[RISCV] Fix mgather -> riscv.masked.strided.load combine not extending indices (...
[llvm-project.git] / llvm / lib / Transforms / Utils / Local.cpp
bloba1c6bbc52fd05e9e5a4970f541eb9cf28477e7d1
1 //===- Local.cpp - Functions to perform local transformations -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This family of functions perform various local transformations to the
10 // program.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Transforms/Utils/Local.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/DenseMapInfo.h"
18 #include "llvm/ADT/DenseSet.h"
19 #include "llvm/ADT/Hashing.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/Analysis/AssumeBundleQueries.h"
26 #include "llvm/Analysis/ConstantFolding.h"
27 #include "llvm/Analysis/DomTreeUpdater.h"
28 #include "llvm/Analysis/InstructionSimplify.h"
29 #include "llvm/Analysis/MemoryBuiltins.h"
30 #include "llvm/Analysis/MemorySSAUpdater.h"
31 #include "llvm/Analysis/TargetLibraryInfo.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/Analysis/VectorUtils.h"
34 #include "llvm/BinaryFormat/Dwarf.h"
35 #include "llvm/IR/Argument.h"
36 #include "llvm/IR/Attributes.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/CFG.h"
39 #include "llvm/IR/Constant.h"
40 #include "llvm/IR/ConstantRange.h"
41 #include "llvm/IR/Constants.h"
42 #include "llvm/IR/DIBuilder.h"
43 #include "llvm/IR/DataLayout.h"
44 #include "llvm/IR/DebugInfo.h"
45 #include "llvm/IR/DebugInfoMetadata.h"
46 #include "llvm/IR/DebugLoc.h"
47 #include "llvm/IR/DerivedTypes.h"
48 #include "llvm/IR/Dominators.h"
49 #include "llvm/IR/EHPersonalities.h"
50 #include "llvm/IR/Function.h"
51 #include "llvm/IR/GetElementPtrTypeIterator.h"
52 #include "llvm/IR/GlobalObject.h"
53 #include "llvm/IR/IRBuilder.h"
54 #include "llvm/IR/InstrTypes.h"
55 #include "llvm/IR/Instruction.h"
56 #include "llvm/IR/Instructions.h"
57 #include "llvm/IR/IntrinsicInst.h"
58 #include "llvm/IR/Intrinsics.h"
59 #include "llvm/IR/IntrinsicsWebAssembly.h"
60 #include "llvm/IR/LLVMContext.h"
61 #include "llvm/IR/MDBuilder.h"
62 #include "llvm/IR/Metadata.h"
63 #include "llvm/IR/Module.h"
64 #include "llvm/IR/PatternMatch.h"
65 #include "llvm/IR/ProfDataUtils.h"
66 #include "llvm/IR/Type.h"
67 #include "llvm/IR/Use.h"
68 #include "llvm/IR/User.h"
69 #include "llvm/IR/Value.h"
70 #include "llvm/IR/ValueHandle.h"
71 #include "llvm/Support/Casting.h"
72 #include "llvm/Support/CommandLine.h"
73 #include "llvm/Support/Debug.h"
74 #include "llvm/Support/ErrorHandling.h"
75 #include "llvm/Support/KnownBits.h"
76 #include "llvm/Support/raw_ostream.h"
77 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
78 #include "llvm/Transforms/Utils/ValueMapper.h"
79 #include <algorithm>
80 #include <cassert>
81 #include <cstdint>
82 #include <iterator>
83 #include <map>
84 #include <optional>
85 #include <utility>
87 using namespace llvm;
88 using namespace llvm::PatternMatch;
90 extern cl::opt<bool> UseNewDbgInfoFormat;
92 #define DEBUG_TYPE "local"
94 STATISTIC(NumRemoved, "Number of unreachable basic blocks removed");
95 STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd");
97 static cl::opt<bool> PHICSEDebugHash(
98 "phicse-debug-hash",
99 #ifdef EXPENSIVE_CHECKS
100 cl::init(true),
101 #else
102 cl::init(false),
103 #endif
104 cl::Hidden,
105 cl::desc("Perform extra assertion checking to verify that PHINodes's hash "
106 "function is well-behaved w.r.t. its isEqual predicate"));
108 static cl::opt<unsigned> PHICSENumPHISmallSize(
109 "phicse-num-phi-smallsize", cl::init(32), cl::Hidden,
110 cl::desc(
111 "When the basic block contains not more than this number of PHI nodes, "
112 "perform a (faster!) exhaustive search instead of set-driven one."));
114 // Max recursion depth for collectBitParts used when detecting bswap and
115 // bitreverse idioms.
116 static const unsigned BitPartRecursionMaxDepth = 48;
118 //===----------------------------------------------------------------------===//
119 // Local constant propagation.
122 /// ConstantFoldTerminator - If a terminator instruction is predicated on a
123 /// constant value, convert it into an unconditional branch to the constant
124 /// destination. This is a nontrivial operation because the successors of this
125 /// basic block must have their PHI nodes updated.
126 /// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
127 /// conditions and indirectbr addresses this might make dead if
128 /// DeleteDeadConditions is true.
129 bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
130 const TargetLibraryInfo *TLI,
131 DomTreeUpdater *DTU) {
132 Instruction *T = BB->getTerminator();
133 IRBuilder<> Builder(T);
135 // Branch - See if we are conditional jumping on constant
136 if (auto *BI = dyn_cast<BranchInst>(T)) {
137 if (BI->isUnconditional()) return false; // Can't optimize uncond branch
139 BasicBlock *Dest1 = BI->getSuccessor(0);
140 BasicBlock *Dest2 = BI->getSuccessor(1);
142 if (Dest2 == Dest1) { // Conditional branch to same location?
143 // This branch matches something like this:
144 // br bool %cond, label %Dest, label %Dest
145 // and changes it into: br label %Dest
147 // Let the basic block know that we are letting go of one copy of it.
148 assert(BI->getParent() && "Terminator not inserted in block!");
149 Dest1->removePredecessor(BI->getParent());
151 // Replace the conditional branch with an unconditional one.
152 BranchInst *NewBI = Builder.CreateBr(Dest1);
154 // Transfer the metadata to the new branch instruction.
155 NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg,
156 LLVMContext::MD_annotation});
158 Value *Cond = BI->getCondition();
159 BI->eraseFromParent();
160 if (DeleteDeadConditions)
161 RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
162 return true;
165 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
166 // Are we branching on constant?
167 // YES. Change to unconditional branch...
168 BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2;
169 BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1;
171 // Let the basic block know that we are letting go of it. Based on this,
172 // it will adjust it's PHI nodes.
173 OldDest->removePredecessor(BB);
175 // Replace the conditional branch with an unconditional one.
176 BranchInst *NewBI = Builder.CreateBr(Destination);
178 // Transfer the metadata to the new branch instruction.
179 NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg,
180 LLVMContext::MD_annotation});
182 BI->eraseFromParent();
183 if (DTU)
184 DTU->applyUpdates({{DominatorTree::Delete, BB, OldDest}});
185 return true;
188 return false;
191 if (auto *SI = dyn_cast<SwitchInst>(T)) {
192 // If we are switching on a constant, we can convert the switch to an
193 // unconditional branch.
194 auto *CI = dyn_cast<ConstantInt>(SI->getCondition());
195 BasicBlock *DefaultDest = SI->getDefaultDest();
196 BasicBlock *TheOnlyDest = DefaultDest;
198 // If the default is unreachable, ignore it when searching for TheOnlyDest.
199 if (isa<UnreachableInst>(DefaultDest->getFirstNonPHIOrDbg()) &&
200 SI->getNumCases() > 0) {
201 TheOnlyDest = SI->case_begin()->getCaseSuccessor();
204 bool Changed = false;
206 // Figure out which case it goes to.
207 for (auto It = SI->case_begin(), End = SI->case_end(); It != End;) {
208 // Found case matching a constant operand?
209 if (It->getCaseValue() == CI) {
210 TheOnlyDest = It->getCaseSuccessor();
211 break;
214 // Check to see if this branch is going to the same place as the default
215 // dest. If so, eliminate it as an explicit compare.
216 if (It->getCaseSuccessor() == DefaultDest) {
217 MDNode *MD = getValidBranchWeightMDNode(*SI);
218 unsigned NCases = SI->getNumCases();
219 // Fold the case metadata into the default if there will be any branches
220 // left, unless the metadata doesn't match the switch.
221 if (NCases > 1 && MD) {
222 // Collect branch weights into a vector.
223 SmallVector<uint32_t, 8> Weights;
224 extractBranchWeights(MD, Weights);
226 // Merge weight of this case to the default weight.
227 unsigned Idx = It->getCaseIndex();
228 // TODO: Add overflow check.
229 Weights[0] += Weights[Idx + 1];
230 // Remove weight for this case.
231 std::swap(Weights[Idx + 1], Weights.back());
232 Weights.pop_back();
233 setBranchWeights(*SI, Weights);
235 // Remove this entry.
236 BasicBlock *ParentBB = SI->getParent();
237 DefaultDest->removePredecessor(ParentBB);
238 It = SI->removeCase(It);
239 End = SI->case_end();
241 // Removing this case may have made the condition constant. In that
242 // case, update CI and restart iteration through the cases.
243 if (auto *NewCI = dyn_cast<ConstantInt>(SI->getCondition())) {
244 CI = NewCI;
245 It = SI->case_begin();
248 Changed = true;
249 continue;
252 // Otherwise, check to see if the switch only branches to one destination.
253 // We do this by reseting "TheOnlyDest" to null when we find two non-equal
254 // destinations.
255 if (It->getCaseSuccessor() != TheOnlyDest)
256 TheOnlyDest = nullptr;
258 // Increment this iterator as we haven't removed the case.
259 ++It;
262 if (CI && !TheOnlyDest) {
263 // Branching on a constant, but not any of the cases, go to the default
264 // successor.
265 TheOnlyDest = SI->getDefaultDest();
268 // If we found a single destination that we can fold the switch into, do so
269 // now.
270 if (TheOnlyDest) {
271 // Insert the new branch.
272 Builder.CreateBr(TheOnlyDest);
273 BasicBlock *BB = SI->getParent();
275 SmallSet<BasicBlock *, 8> RemovedSuccessors;
277 // Remove entries from PHI nodes which we no longer branch to...
278 BasicBlock *SuccToKeep = TheOnlyDest;
279 for (BasicBlock *Succ : successors(SI)) {
280 if (DTU && Succ != TheOnlyDest)
281 RemovedSuccessors.insert(Succ);
282 // Found case matching a constant operand?
283 if (Succ == SuccToKeep) {
284 SuccToKeep = nullptr; // Don't modify the first branch to TheOnlyDest
285 } else {
286 Succ->removePredecessor(BB);
290 // Delete the old switch.
291 Value *Cond = SI->getCondition();
292 SI->eraseFromParent();
293 if (DeleteDeadConditions)
294 RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
295 if (DTU) {
296 std::vector<DominatorTree::UpdateType> Updates;
297 Updates.reserve(RemovedSuccessors.size());
298 for (auto *RemovedSuccessor : RemovedSuccessors)
299 Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
300 DTU->applyUpdates(Updates);
302 return true;
305 if (SI->getNumCases() == 1) {
306 // Otherwise, we can fold this switch into a conditional branch
307 // instruction if it has only one non-default destination.
308 auto FirstCase = *SI->case_begin();
309 Value *Cond = Builder.CreateICmpEQ(SI->getCondition(),
310 FirstCase.getCaseValue(), "cond");
312 // Insert the new branch.
313 BranchInst *NewBr = Builder.CreateCondBr(Cond,
314 FirstCase.getCaseSuccessor(),
315 SI->getDefaultDest());
316 SmallVector<uint32_t> Weights;
317 if (extractBranchWeights(*SI, Weights) && Weights.size() == 2) {
318 uint32_t DefWeight = Weights[0];
319 uint32_t CaseWeight = Weights[1];
320 // The TrueWeight should be the weight for the single case of SI.
321 NewBr->setMetadata(LLVMContext::MD_prof,
322 MDBuilder(BB->getContext())
323 .createBranchWeights(CaseWeight, DefWeight));
326 // Update make.implicit metadata to the newly-created conditional branch.
327 MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit);
328 if (MakeImplicitMD)
329 NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD);
331 // Delete the old switch.
332 SI->eraseFromParent();
333 return true;
335 return Changed;
338 if (auto *IBI = dyn_cast<IndirectBrInst>(T)) {
339 // indirectbr blockaddress(@F, @BB) -> br label @BB
340 if (auto *BA =
341 dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) {
342 BasicBlock *TheOnlyDest = BA->getBasicBlock();
343 SmallSet<BasicBlock *, 8> RemovedSuccessors;
345 // Insert the new branch.
346 Builder.CreateBr(TheOnlyDest);
348 BasicBlock *SuccToKeep = TheOnlyDest;
349 for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
350 BasicBlock *DestBB = IBI->getDestination(i);
351 if (DTU && DestBB != TheOnlyDest)
352 RemovedSuccessors.insert(DestBB);
353 if (IBI->getDestination(i) == SuccToKeep) {
354 SuccToKeep = nullptr;
355 } else {
356 DestBB->removePredecessor(BB);
359 Value *Address = IBI->getAddress();
360 IBI->eraseFromParent();
361 if (DeleteDeadConditions)
362 // Delete pointer cast instructions.
363 RecursivelyDeleteTriviallyDeadInstructions(Address, TLI);
365 // Also zap the blockaddress constant if there are no users remaining,
366 // otherwise the destination is still marked as having its address taken.
367 if (BA->use_empty())
368 BA->destroyConstant();
370 // If we didn't find our destination in the IBI successor list, then we
371 // have undefined behavior. Replace the unconditional branch with an
372 // 'unreachable' instruction.
373 if (SuccToKeep) {
374 BB->getTerminator()->eraseFromParent();
375 new UnreachableInst(BB->getContext(), BB);
378 if (DTU) {
379 std::vector<DominatorTree::UpdateType> Updates;
380 Updates.reserve(RemovedSuccessors.size());
381 for (auto *RemovedSuccessor : RemovedSuccessors)
382 Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
383 DTU->applyUpdates(Updates);
385 return true;
389 return false;
392 //===----------------------------------------------------------------------===//
393 // Local dead code elimination.
396 /// isInstructionTriviallyDead - Return true if the result produced by the
397 /// instruction is not used, and the instruction has no side effects.
399 bool llvm::isInstructionTriviallyDead(Instruction *I,
400 const TargetLibraryInfo *TLI) {
401 if (!I->use_empty())
402 return false;
403 return wouldInstructionBeTriviallyDead(I, TLI);
406 bool llvm::wouldInstructionBeTriviallyDeadOnUnusedPaths(
407 Instruction *I, const TargetLibraryInfo *TLI) {
408 // Instructions that are "markers" and have implied meaning on code around
409 // them (without explicit uses), are not dead on unused paths.
410 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
411 if (II->getIntrinsicID() == Intrinsic::stacksave ||
412 II->getIntrinsicID() == Intrinsic::launder_invariant_group ||
413 II->isLifetimeStartOrEnd())
414 return false;
415 return wouldInstructionBeTriviallyDead(I, TLI);
418 bool llvm::wouldInstructionBeTriviallyDead(const Instruction *I,
419 const TargetLibraryInfo *TLI) {
420 if (I->isTerminator())
421 return false;
423 // We don't want the landingpad-like instructions removed by anything this
424 // general.
425 if (I->isEHPad())
426 return false;
428 // We don't want debug info removed by anything this general.
429 if (isa<DbgVariableIntrinsic>(I))
430 return false;
432 if (const DbgLabelInst *DLI = dyn_cast<DbgLabelInst>(I)) {
433 if (DLI->getLabel())
434 return false;
435 return true;
438 if (auto *CB = dyn_cast<CallBase>(I))
439 if (isRemovableAlloc(CB, TLI))
440 return true;
442 if (!I->willReturn()) {
443 auto *II = dyn_cast<IntrinsicInst>(I);
444 if (!II)
445 return false;
447 switch (II->getIntrinsicID()) {
448 case Intrinsic::experimental_guard: {
449 // Guards on true are operationally no-ops. In the future we can
450 // consider more sophisticated tradeoffs for guards considering potential
451 // for check widening, but for now we keep things simple.
452 auto *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0));
453 return Cond && Cond->isOne();
455 // TODO: These intrinsics are not safe to remove, because this may remove
456 // a well-defined trap.
457 case Intrinsic::wasm_trunc_signed:
458 case Intrinsic::wasm_trunc_unsigned:
459 case Intrinsic::ptrauth_auth:
460 case Intrinsic::ptrauth_resign:
461 return true;
462 default:
463 return false;
467 if (!I->mayHaveSideEffects())
468 return true;
470 // Special case intrinsics that "may have side effects" but can be deleted
471 // when dead.
472 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
473 // Safe to delete llvm.stacksave and launder.invariant.group if dead.
474 if (II->getIntrinsicID() == Intrinsic::stacksave ||
475 II->getIntrinsicID() == Intrinsic::launder_invariant_group)
476 return true;
478 if (II->isLifetimeStartOrEnd()) {
479 auto *Arg = II->getArgOperand(1);
480 // Lifetime intrinsics are dead when their right-hand is undef.
481 if (isa<UndefValue>(Arg))
482 return true;
483 // If the right-hand is an alloc, global, or argument and the only uses
484 // are lifetime intrinsics then the intrinsics are dead.
485 if (isa<AllocaInst>(Arg) || isa<GlobalValue>(Arg) || isa<Argument>(Arg))
486 return llvm::all_of(Arg->uses(), [](Use &Use) {
487 if (IntrinsicInst *IntrinsicUse =
488 dyn_cast<IntrinsicInst>(Use.getUser()))
489 return IntrinsicUse->isLifetimeStartOrEnd();
490 return false;
492 return false;
495 // Assumptions are dead if their condition is trivially true.
496 if (II->getIntrinsicID() == Intrinsic::assume &&
497 isAssumeWithEmptyBundle(cast<AssumeInst>(*II))) {
498 if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0)))
499 return !Cond->isZero();
501 return false;
504 if (auto *FPI = dyn_cast<ConstrainedFPIntrinsic>(I)) {
505 std::optional<fp::ExceptionBehavior> ExBehavior =
506 FPI->getExceptionBehavior();
507 return *ExBehavior != fp::ebStrict;
511 if (auto *Call = dyn_cast<CallBase>(I)) {
512 if (Value *FreedOp = getFreedOperand(Call, TLI))
513 if (Constant *C = dyn_cast<Constant>(FreedOp))
514 return C->isNullValue() || isa<UndefValue>(C);
515 if (isMathLibCallNoop(Call, TLI))
516 return true;
519 // Non-volatile atomic loads from constants can be removed.
520 if (auto *LI = dyn_cast<LoadInst>(I))
521 if (auto *GV = dyn_cast<GlobalVariable>(
522 LI->getPointerOperand()->stripPointerCasts()))
523 if (!LI->isVolatile() && GV->isConstant())
524 return true;
526 return false;
529 /// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
530 /// trivially dead instruction, delete it. If that makes any of its operands
531 /// trivially dead, delete them too, recursively. Return true if any
532 /// instructions were deleted.
533 bool llvm::RecursivelyDeleteTriviallyDeadInstructions(
534 Value *V, const TargetLibraryInfo *TLI, MemorySSAUpdater *MSSAU,
535 std::function<void(Value *)> AboutToDeleteCallback) {
536 Instruction *I = dyn_cast<Instruction>(V);
537 if (!I || !isInstructionTriviallyDead(I, TLI))
538 return false;
540 SmallVector<WeakTrackingVH, 16> DeadInsts;
541 DeadInsts.push_back(I);
542 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
543 AboutToDeleteCallback);
545 return true;
548 bool llvm::RecursivelyDeleteTriviallyDeadInstructionsPermissive(
549 SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI,
550 MemorySSAUpdater *MSSAU,
551 std::function<void(Value *)> AboutToDeleteCallback) {
552 unsigned S = 0, E = DeadInsts.size(), Alive = 0;
553 for (; S != E; ++S) {
554 auto *I = dyn_cast_or_null<Instruction>(DeadInsts[S]);
555 if (!I || !isInstructionTriviallyDead(I)) {
556 DeadInsts[S] = nullptr;
557 ++Alive;
560 if (Alive == E)
561 return false;
562 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
563 AboutToDeleteCallback);
564 return true;
567 void llvm::RecursivelyDeleteTriviallyDeadInstructions(
568 SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI,
569 MemorySSAUpdater *MSSAU,
570 std::function<void(Value *)> AboutToDeleteCallback) {
571 // Process the dead instruction list until empty.
572 while (!DeadInsts.empty()) {
573 Value *V = DeadInsts.pop_back_val();
574 Instruction *I = cast_or_null<Instruction>(V);
575 if (!I)
576 continue;
577 assert(isInstructionTriviallyDead(I, TLI) &&
578 "Live instruction found in dead worklist!");
579 assert(I->use_empty() && "Instructions with uses are not dead.");
581 // Don't lose the debug info while deleting the instructions.
582 salvageDebugInfo(*I);
584 if (AboutToDeleteCallback)
585 AboutToDeleteCallback(I);
587 // Null out all of the instruction's operands to see if any operand becomes
588 // dead as we go.
589 for (Use &OpU : I->operands()) {
590 Value *OpV = OpU.get();
591 OpU.set(nullptr);
593 if (!OpV->use_empty())
594 continue;
596 // If the operand is an instruction that became dead as we nulled out the
597 // operand, and if it is 'trivially' dead, delete it in a future loop
598 // iteration.
599 if (Instruction *OpI = dyn_cast<Instruction>(OpV))
600 if (isInstructionTriviallyDead(OpI, TLI))
601 DeadInsts.push_back(OpI);
603 if (MSSAU)
604 MSSAU->removeMemoryAccess(I);
606 I->eraseFromParent();
610 bool llvm::replaceDbgUsesWithUndef(Instruction *I) {
611 SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
612 SmallVector<DPValue *, 1> DPUsers;
613 findDbgUsers(DbgUsers, I, &DPUsers);
614 for (auto *DII : DbgUsers)
615 DII->setKillLocation();
616 for (auto *DPV : DPUsers)
617 DPV->setKillLocation();
618 return !DbgUsers.empty() || !DPUsers.empty();
621 /// areAllUsesEqual - Check whether the uses of a value are all the same.
622 /// This is similar to Instruction::hasOneUse() except this will also return
623 /// true when there are no uses or multiple uses that all refer to the same
624 /// value.
625 static bool areAllUsesEqual(Instruction *I) {
626 Value::user_iterator UI = I->user_begin();
627 Value::user_iterator UE = I->user_end();
628 if (UI == UE)
629 return true;
631 User *TheUse = *UI;
632 for (++UI; UI != UE; ++UI) {
633 if (*UI != TheUse)
634 return false;
636 return true;
639 /// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
640 /// dead PHI node, due to being a def-use chain of single-use nodes that
641 /// either forms a cycle or is terminated by a trivially dead instruction,
642 /// delete it. If that makes any of its operands trivially dead, delete them
643 /// too, recursively. Return true if a change was made.
644 bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN,
645 const TargetLibraryInfo *TLI,
646 llvm::MemorySSAUpdater *MSSAU) {
647 SmallPtrSet<Instruction*, 4> Visited;
648 for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
649 I = cast<Instruction>(*I->user_begin())) {
650 if (I->use_empty())
651 return RecursivelyDeleteTriviallyDeadInstructions(I, TLI, MSSAU);
653 // If we find an instruction more than once, we're on a cycle that
654 // won't prove fruitful.
655 if (!Visited.insert(I).second) {
656 // Break the cycle and delete the instruction and its operands.
657 I->replaceAllUsesWith(PoisonValue::get(I->getType()));
658 (void)RecursivelyDeleteTriviallyDeadInstructions(I, TLI, MSSAU);
659 return true;
662 return false;
665 static bool
666 simplifyAndDCEInstruction(Instruction *I,
667 SmallSetVector<Instruction *, 16> &WorkList,
668 const DataLayout &DL,
669 const TargetLibraryInfo *TLI) {
670 if (isInstructionTriviallyDead(I, TLI)) {
671 salvageDebugInfo(*I);
673 // Null out all of the instruction's operands to see if any operand becomes
674 // dead as we go.
675 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
676 Value *OpV = I->getOperand(i);
677 I->setOperand(i, nullptr);
679 if (!OpV->use_empty() || I == OpV)
680 continue;
682 // If the operand is an instruction that became dead as we nulled out the
683 // operand, and if it is 'trivially' dead, delete it in a future loop
684 // iteration.
685 if (Instruction *OpI = dyn_cast<Instruction>(OpV))
686 if (isInstructionTriviallyDead(OpI, TLI))
687 WorkList.insert(OpI);
690 I->eraseFromParent();
692 return true;
695 if (Value *SimpleV = simplifyInstruction(I, DL)) {
696 // Add the users to the worklist. CAREFUL: an instruction can use itself,
697 // in the case of a phi node.
698 for (User *U : I->users()) {
699 if (U != I) {
700 WorkList.insert(cast<Instruction>(U));
704 // Replace the instruction with its simplified value.
705 bool Changed = false;
706 if (!I->use_empty()) {
707 I->replaceAllUsesWith(SimpleV);
708 Changed = true;
710 if (isInstructionTriviallyDead(I, TLI)) {
711 I->eraseFromParent();
712 Changed = true;
714 return Changed;
716 return false;
719 /// SimplifyInstructionsInBlock - Scan the specified basic block and try to
720 /// simplify any instructions in it and recursively delete dead instructions.
722 /// This returns true if it changed the code, note that it can delete
723 /// instructions in other blocks as well in this block.
724 bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB,
725 const TargetLibraryInfo *TLI) {
726 bool MadeChange = false;
727 const DataLayout &DL = BB->getModule()->getDataLayout();
729 #ifndef NDEBUG
730 // In debug builds, ensure that the terminator of the block is never replaced
731 // or deleted by these simplifications. The idea of simplification is that it
732 // cannot introduce new instructions, and there is no way to replace the
733 // terminator of a block without introducing a new instruction.
734 AssertingVH<Instruction> TerminatorVH(&BB->back());
735 #endif
737 SmallSetVector<Instruction *, 16> WorkList;
738 // Iterate over the original function, only adding insts to the worklist
739 // if they actually need to be revisited. This avoids having to pre-init
740 // the worklist with the entire function's worth of instructions.
741 for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end());
742 BI != E;) {
743 assert(!BI->isTerminator());
744 Instruction *I = &*BI;
745 ++BI;
747 // We're visiting this instruction now, so make sure it's not in the
748 // worklist from an earlier visit.
749 if (!WorkList.count(I))
750 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
753 while (!WorkList.empty()) {
754 Instruction *I = WorkList.pop_back_val();
755 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
757 return MadeChange;
760 //===----------------------------------------------------------------------===//
761 // Control Flow Graph Restructuring.
764 void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB,
765 DomTreeUpdater *DTU) {
767 // If BB has single-entry PHI nodes, fold them.
768 while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) {
769 Value *NewVal = PN->getIncomingValue(0);
770 // Replace self referencing PHI with poison, it must be dead.
771 if (NewVal == PN) NewVal = PoisonValue::get(PN->getType());
772 PN->replaceAllUsesWith(NewVal);
773 PN->eraseFromParent();
776 BasicBlock *PredBB = DestBB->getSinglePredecessor();
777 assert(PredBB && "Block doesn't have a single predecessor!");
779 bool ReplaceEntryBB = PredBB->isEntryBlock();
781 // DTU updates: Collect all the edges that enter
782 // PredBB. These dominator edges will be redirected to DestBB.
783 SmallVector<DominatorTree::UpdateType, 32> Updates;
785 if (DTU) {
786 // To avoid processing the same predecessor more than once.
787 SmallPtrSet<BasicBlock *, 2> SeenPreds;
788 Updates.reserve(Updates.size() + 2 * pred_size(PredBB) + 1);
789 for (BasicBlock *PredOfPredBB : predecessors(PredBB))
790 // This predecessor of PredBB may already have DestBB as a successor.
791 if (PredOfPredBB != PredBB)
792 if (SeenPreds.insert(PredOfPredBB).second)
793 Updates.push_back({DominatorTree::Insert, PredOfPredBB, DestBB});
794 SeenPreds.clear();
795 for (BasicBlock *PredOfPredBB : predecessors(PredBB))
796 if (SeenPreds.insert(PredOfPredBB).second)
797 Updates.push_back({DominatorTree::Delete, PredOfPredBB, PredBB});
798 Updates.push_back({DominatorTree::Delete, PredBB, DestBB});
801 // Zap anything that took the address of DestBB. Not doing this will give the
802 // address an invalid value.
803 if (DestBB->hasAddressTaken()) {
804 BlockAddress *BA = BlockAddress::get(DestBB);
805 Constant *Replacement =
806 ConstantInt::get(Type::getInt32Ty(BA->getContext()), 1);
807 BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement,
808 BA->getType()));
809 BA->destroyConstant();
812 // Anything that branched to PredBB now branches to DestBB.
813 PredBB->replaceAllUsesWith(DestBB);
815 // Splice all the instructions from PredBB to DestBB.
816 PredBB->getTerminator()->eraseFromParent();
817 DestBB->splice(DestBB->begin(), PredBB);
818 new UnreachableInst(PredBB->getContext(), PredBB);
820 // If the PredBB is the entry block of the function, move DestBB up to
821 // become the entry block after we erase PredBB.
822 if (ReplaceEntryBB)
823 DestBB->moveAfter(PredBB);
825 if (DTU) {
826 assert(PredBB->size() == 1 &&
827 isa<UnreachableInst>(PredBB->getTerminator()) &&
828 "The successor list of PredBB isn't empty before "
829 "applying corresponding DTU updates.");
830 DTU->applyUpdatesPermissive(Updates);
831 DTU->deleteBB(PredBB);
832 // Recalculation of DomTree is needed when updating a forward DomTree and
833 // the Entry BB is replaced.
834 if (ReplaceEntryBB && DTU->hasDomTree()) {
835 // The entry block was removed and there is no external interface for
836 // the dominator tree to be notified of this change. In this corner-case
837 // we recalculate the entire tree.
838 DTU->recalculate(*(DestBB->getParent()));
842 else {
843 PredBB->eraseFromParent(); // Nuke BB if DTU is nullptr.
847 /// Return true if we can choose one of these values to use in place of the
848 /// other. Note that we will always choose the non-undef value to keep.
849 static bool CanMergeValues(Value *First, Value *Second) {
850 return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second);
853 /// Return true if we can fold BB, an almost-empty BB ending in an unconditional
854 /// branch to Succ, into Succ.
856 /// Assumption: Succ is the single successor for BB.
857 static bool
858 CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ,
859 const SmallPtrSetImpl<BasicBlock *> &BBPreds) {
860 assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!");
862 LLVM_DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "
863 << Succ->getName() << "\n");
864 // Shortcut, if there is only a single predecessor it must be BB and merging
865 // is always safe
866 if (Succ->getSinglePredecessor())
867 return true;
869 // Look at all the phi nodes in Succ, to see if they present a conflict when
870 // merging these blocks
871 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
872 PHINode *PN = cast<PHINode>(I);
874 // If the incoming value from BB is again a PHINode in
875 // BB which has the same incoming value for *PI as PN does, we can
876 // merge the phi nodes and then the blocks can still be merged
877 PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB));
878 if (BBPN && BBPN->getParent() == BB) {
879 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
880 BasicBlock *IBB = PN->getIncomingBlock(PI);
881 if (BBPreds.count(IBB) &&
882 !CanMergeValues(BBPN->getIncomingValueForBlock(IBB),
883 PN->getIncomingValue(PI))) {
884 LLVM_DEBUG(dbgs()
885 << "Can't fold, phi node " << PN->getName() << " in "
886 << Succ->getName() << " is conflicting with "
887 << BBPN->getName() << " with regard to common predecessor "
888 << IBB->getName() << "\n");
889 return false;
892 } else {
893 Value* Val = PN->getIncomingValueForBlock(BB);
894 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
895 // See if the incoming value for the common predecessor is equal to the
896 // one for BB, in which case this phi node will not prevent the merging
897 // of the block.
898 BasicBlock *IBB = PN->getIncomingBlock(PI);
899 if (BBPreds.count(IBB) &&
900 !CanMergeValues(Val, PN->getIncomingValue(PI))) {
901 LLVM_DEBUG(dbgs() << "Can't fold, phi node " << PN->getName()
902 << " in " << Succ->getName()
903 << " is conflicting with regard to common "
904 << "predecessor " << IBB->getName() << "\n");
905 return false;
911 return true;
914 using PredBlockVector = SmallVector<BasicBlock *, 16>;
915 using IncomingValueMap = DenseMap<BasicBlock *, Value *>;
917 /// Determines the value to use as the phi node input for a block.
919 /// Select between \p OldVal any value that we know flows from \p BB
920 /// to a particular phi on the basis of which one (if either) is not
921 /// undef. Update IncomingValues based on the selected value.
923 /// \param OldVal The value we are considering selecting.
924 /// \param BB The block that the value flows in from.
925 /// \param IncomingValues A map from block-to-value for other phi inputs
926 /// that we have examined.
928 /// \returns the selected value.
929 static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB,
930 IncomingValueMap &IncomingValues) {
931 if (!isa<UndefValue>(OldVal)) {
932 assert((!IncomingValues.count(BB) ||
933 IncomingValues.find(BB)->second == OldVal) &&
934 "Expected OldVal to match incoming value from BB!");
936 IncomingValues.insert(std::make_pair(BB, OldVal));
937 return OldVal;
940 IncomingValueMap::const_iterator It = IncomingValues.find(BB);
941 if (It != IncomingValues.end()) return It->second;
943 return OldVal;
946 /// Create a map from block to value for the operands of a
947 /// given phi.
949 /// Create a map from block to value for each non-undef value flowing
950 /// into \p PN.
952 /// \param PN The phi we are collecting the map for.
953 /// \param IncomingValues [out] The map from block to value for this phi.
954 static void gatherIncomingValuesToPhi(PHINode *PN,
955 IncomingValueMap &IncomingValues) {
956 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
957 BasicBlock *BB = PN->getIncomingBlock(i);
958 Value *V = PN->getIncomingValue(i);
960 if (!isa<UndefValue>(V))
961 IncomingValues.insert(std::make_pair(BB, V));
965 /// Replace the incoming undef values to a phi with the values
966 /// from a block-to-value map.
968 /// \param PN The phi we are replacing the undefs in.
969 /// \param IncomingValues A map from block to value.
970 static void replaceUndefValuesInPhi(PHINode *PN,
971 const IncomingValueMap &IncomingValues) {
972 SmallVector<unsigned> TrueUndefOps;
973 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
974 Value *V = PN->getIncomingValue(i);
976 if (!isa<UndefValue>(V)) continue;
978 BasicBlock *BB = PN->getIncomingBlock(i);
979 IncomingValueMap::const_iterator It = IncomingValues.find(BB);
981 // Keep track of undef/poison incoming values. Those must match, so we fix
982 // them up below if needed.
983 // Note: this is conservatively correct, but we could try harder and group
984 // the undef values per incoming basic block.
985 if (It == IncomingValues.end()) {
986 TrueUndefOps.push_back(i);
987 continue;
990 // There is a defined value for this incoming block, so map this undef
991 // incoming value to the defined value.
992 PN->setIncomingValue(i, It->second);
995 // If there are both undef and poison values incoming, then convert those
996 // values to undef. It is invalid to have different values for the same
997 // incoming block.
998 unsigned PoisonCount = count_if(TrueUndefOps, [&](unsigned i) {
999 return isa<PoisonValue>(PN->getIncomingValue(i));
1001 if (PoisonCount != 0 && PoisonCount != TrueUndefOps.size()) {
1002 for (unsigned i : TrueUndefOps)
1003 PN->setIncomingValue(i, UndefValue::get(PN->getType()));
1007 // Only when they shares a single common predecessor, return true.
1008 // Only handles cases when BB can't be merged while its predecessors can be
1009 // redirected.
1010 static bool
1011 CanRedirectPredsOfEmptyBBToSucc(BasicBlock *BB, BasicBlock *Succ,
1012 const SmallPtrSetImpl<BasicBlock *> &BBPreds,
1013 const SmallPtrSetImpl<BasicBlock *> &SuccPreds,
1014 BasicBlock *&CommonPred) {
1016 // There must be phis in BB, otherwise BB will be merged into Succ directly
1017 if (BB->phis().empty() || Succ->phis().empty())
1018 return false;
1020 // BB must have predecessors not shared that can be redirected to Succ
1021 if (!BB->hasNPredecessorsOrMore(2))
1022 return false;
1024 // Get single common predecessors of both BB and Succ
1025 for (BasicBlock *SuccPred : SuccPreds) {
1026 if (BBPreds.count(SuccPred)) {
1027 if (CommonPred)
1028 return false;
1029 CommonPred = SuccPred;
1033 return true;
1036 /// Replace a value flowing from a block to a phi with
1037 /// potentially multiple instances of that value flowing from the
1038 /// block's predecessors to the phi.
1040 /// \param BB The block with the value flowing into the phi.
1041 /// \param BBPreds The predecessors of BB.
1042 /// \param PN The phi that we are updating.
1043 /// \param CommonPred The common predecessor of BB and PN's BasicBlock
1044 static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB,
1045 const PredBlockVector &BBPreds,
1046 PHINode *PN,
1047 BasicBlock *CommonPred) {
1048 Value *OldVal = PN->removeIncomingValue(BB, false);
1049 assert(OldVal && "No entry in PHI for Pred BB!");
1051 IncomingValueMap IncomingValues;
1053 // We are merging two blocks - BB, and the block containing PN - and
1054 // as a result we need to redirect edges from the predecessors of BB
1055 // to go to the block containing PN, and update PN
1056 // accordingly. Since we allow merging blocks in the case where the
1057 // predecessor and successor blocks both share some predecessors,
1058 // and where some of those common predecessors might have undef
1059 // values flowing into PN, we want to rewrite those values to be
1060 // consistent with the non-undef values.
1062 gatherIncomingValuesToPhi(PN, IncomingValues);
1064 // If this incoming value is one of the PHI nodes in BB, the new entries
1065 // in the PHI node are the entries from the old PHI.
1066 if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) {
1067 PHINode *OldValPN = cast<PHINode>(OldVal);
1068 for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) {
1069 // Note that, since we are merging phi nodes and BB and Succ might
1070 // have common predecessors, we could end up with a phi node with
1071 // identical incoming branches. This will be cleaned up later (and
1072 // will trigger asserts if we try to clean it up now, without also
1073 // simplifying the corresponding conditional branch).
1074 BasicBlock *PredBB = OldValPN->getIncomingBlock(i);
1076 if (PredBB == CommonPred)
1077 continue;
1079 Value *PredVal = OldValPN->getIncomingValue(i);
1080 Value *Selected =
1081 selectIncomingValueForBlock(PredVal, PredBB, IncomingValues);
1083 // And add a new incoming value for this predecessor for the
1084 // newly retargeted branch.
1085 PN->addIncoming(Selected, PredBB);
1087 if (CommonPred)
1088 PN->addIncoming(OldValPN->getIncomingValueForBlock(CommonPred), BB);
1090 } else {
1091 for (unsigned i = 0, e = BBPreds.size(); i != e; ++i) {
1092 // Update existing incoming values in PN for this
1093 // predecessor of BB.
1094 BasicBlock *PredBB = BBPreds[i];
1096 if (PredBB == CommonPred)
1097 continue;
1099 Value *Selected =
1100 selectIncomingValueForBlock(OldVal, PredBB, IncomingValues);
1102 // And add a new incoming value for this predecessor for the
1103 // newly retargeted branch.
1104 PN->addIncoming(Selected, PredBB);
1106 if (CommonPred)
1107 PN->addIncoming(OldVal, BB);
1110 replaceUndefValuesInPhi(PN, IncomingValues);
1113 bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB,
1114 DomTreeUpdater *DTU) {
1115 assert(BB != &BB->getParent()->getEntryBlock() &&
1116 "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!");
1118 // We can't simplify infinite loops.
1119 BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0);
1120 if (BB == Succ)
1121 return false;
1123 SmallPtrSet<BasicBlock *, 16> BBPreds(pred_begin(BB), pred_end(BB));
1124 SmallPtrSet<BasicBlock *, 16> SuccPreds(pred_begin(Succ), pred_end(Succ));
1126 // The single common predecessor of BB and Succ when BB cannot be killed
1127 BasicBlock *CommonPred = nullptr;
1129 bool BBKillable = CanPropagatePredecessorsForPHIs(BB, Succ, BBPreds);
1131 // Even if we can not fold bB into Succ, we may be able to redirect the
1132 // predecessors of BB to Succ.
1133 bool BBPhisMergeable =
1134 BBKillable ||
1135 CanRedirectPredsOfEmptyBBToSucc(BB, Succ, BBPreds, SuccPreds, CommonPred);
1137 if (!BBKillable && !BBPhisMergeable)
1138 return false;
1140 // Check to see if merging these blocks/phis would cause conflicts for any of
1141 // the phi nodes in BB or Succ. If not, we can safely merge.
1143 // Check for cases where Succ has multiple predecessors and a PHI node in BB
1144 // has uses which will not disappear when the PHI nodes are merged. It is
1145 // possible to handle such cases, but difficult: it requires checking whether
1146 // BB dominates Succ, which is non-trivial to calculate in the case where
1147 // Succ has multiple predecessors. Also, it requires checking whether
1148 // constructing the necessary self-referential PHI node doesn't introduce any
1149 // conflicts; this isn't too difficult, but the previous code for doing this
1150 // was incorrect.
1152 // Note that if this check finds a live use, BB dominates Succ, so BB is
1153 // something like a loop pre-header (or rarely, a part of an irreducible CFG);
1154 // folding the branch isn't profitable in that case anyway.
1155 if (!Succ->getSinglePredecessor()) {
1156 BasicBlock::iterator BBI = BB->begin();
1157 while (isa<PHINode>(*BBI)) {
1158 for (Use &U : BBI->uses()) {
1159 if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) {
1160 if (PN->getIncomingBlock(U) != BB)
1161 return false;
1162 } else {
1163 return false;
1166 ++BBI;
1170 if (BBPhisMergeable && CommonPred)
1171 LLVM_DEBUG(dbgs() << "Found Common Predecessor between: " << BB->getName()
1172 << " and " << Succ->getName() << " : "
1173 << CommonPred->getName() << "\n");
1175 // 'BB' and 'BB->Pred' are loop latches, bail out to presrve inner loop
1176 // metadata.
1178 // FIXME: This is a stop-gap solution to preserve inner-loop metadata given
1179 // current status (that loop metadata is implemented as metadata attached to
1180 // the branch instruction in the loop latch block). To quote from review
1181 // comments, "the current representation of loop metadata (using a loop latch
1182 // terminator attachment) is known to be fundamentally broken. Loop latches
1183 // are not uniquely associated with loops (both in that a latch can be part of
1184 // multiple loops and a loop may have multiple latches). Loop headers are. The
1185 // solution to this problem is also known: Add support for basic block
1186 // metadata, and attach loop metadata to the loop header."
1188 // Why bail out:
1189 // In this case, we expect 'BB' is the latch for outer-loop and 'BB->Pred' is
1190 // the latch for inner-loop (see reason below), so bail out to prerserve
1191 // inner-loop metadata rather than eliminating 'BB' and attaching its metadata
1192 // to this inner-loop.
1193 // - The reason we believe 'BB' and 'BB->Pred' have different inner-most
1194 // loops: assuming 'BB' and 'BB->Pred' are from the same inner-most loop L,
1195 // then 'BB' is the header and latch of 'L' and thereby 'L' must consist of
1196 // one self-looping basic block, which is contradictory with the assumption.
1198 // To illustrate how inner-loop metadata is dropped:
1200 // CFG Before
1202 // BB is while.cond.exit, attached with loop metdata md2.
1203 // BB->Pred is for.body, attached with loop metadata md1.
1205 // entry
1206 // |
1207 // v
1208 // ---> while.cond -------------> while.end
1209 // | |
1210 // | v
1211 // | while.body
1212 // | |
1213 // | v
1214 // | for.body <---- (md1)
1215 // | | |______|
1216 // | v
1217 // | while.cond.exit (md2)
1218 // | |
1219 // |_______|
1221 // CFG After
1223 // while.cond1 is the merge of while.cond.exit and while.cond above.
1224 // for.body is attached with md2, and md1 is dropped.
1225 // If LoopSimplify runs later (as a part of loop pass), it could create
1226 // dedicated exits for inner-loop (essentially adding `while.cond.exit`
1227 // back), but won't it won't see 'md1' nor restore it for the inner-loop.
1229 // entry
1230 // |
1231 // v
1232 // ---> while.cond1 -------------> while.end
1233 // | |
1234 // | v
1235 // | while.body
1236 // | |
1237 // | v
1238 // | for.body <---- (md2)
1239 // |_______| |______|
1240 if (Instruction *TI = BB->getTerminator())
1241 if (TI->hasMetadata(LLVMContext::MD_loop))
1242 for (BasicBlock *Pred : predecessors(BB))
1243 if (Instruction *PredTI = Pred->getTerminator())
1244 if (PredTI->hasMetadata(LLVMContext::MD_loop))
1245 return false;
1247 if (BBKillable)
1248 LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB);
1249 else if (BBPhisMergeable)
1250 LLVM_DEBUG(dbgs() << "Merge Phis in Trivial BB: \n" << *BB);
1252 SmallVector<DominatorTree::UpdateType, 32> Updates;
1254 if (DTU) {
1255 // To avoid processing the same predecessor more than once.
1256 SmallPtrSet<BasicBlock *, 8> SeenPreds;
1257 // All predecessors of BB (except the common predecessor) will be moved to
1258 // Succ.
1259 Updates.reserve(Updates.size() + 2 * pred_size(BB) + 1);
1261 for (auto *PredOfBB : predecessors(BB)) {
1262 // Do not modify those common predecessors of BB and Succ
1263 if (!SuccPreds.contains(PredOfBB))
1264 if (SeenPreds.insert(PredOfBB).second)
1265 Updates.push_back({DominatorTree::Insert, PredOfBB, Succ});
1268 SeenPreds.clear();
1270 for (auto *PredOfBB : predecessors(BB))
1271 // When BB cannot be killed, do not remove the edge between BB and
1272 // CommonPred.
1273 if (SeenPreds.insert(PredOfBB).second && PredOfBB != CommonPred)
1274 Updates.push_back({DominatorTree::Delete, PredOfBB, BB});
1276 if (BBKillable)
1277 Updates.push_back({DominatorTree::Delete, BB, Succ});
1280 if (isa<PHINode>(Succ->begin())) {
1281 // If there is more than one pred of succ, and there are PHI nodes in
1282 // the successor, then we need to add incoming edges for the PHI nodes
1284 const PredBlockVector BBPreds(predecessors(BB));
1286 // Loop over all of the PHI nodes in the successor of BB.
1287 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
1288 PHINode *PN = cast<PHINode>(I);
1289 redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN, CommonPred);
1293 if (Succ->getSinglePredecessor()) {
1294 // BB is the only predecessor of Succ, so Succ will end up with exactly
1295 // the same predecessors BB had.
1296 // Copy over any phi, debug or lifetime instruction.
1297 BB->getTerminator()->eraseFromParent();
1298 Succ->splice(Succ->getFirstNonPHIIt(), BB);
1299 } else {
1300 while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) {
1301 // We explicitly check for such uses for merging phis.
1302 assert(PN->use_empty() && "There shouldn't be any uses here!");
1303 PN->eraseFromParent();
1307 // If the unconditional branch we replaced contains llvm.loop metadata, we
1308 // add the metadata to the branch instructions in the predecessors.
1309 if (Instruction *TI = BB->getTerminator())
1310 if (MDNode *LoopMD = TI->getMetadata(LLVMContext::MD_loop))
1311 for (BasicBlock *Pred : predecessors(BB))
1312 Pred->getTerminator()->setMetadata(LLVMContext::MD_loop, LoopMD);
1314 if (BBKillable) {
1315 // Everything that jumped to BB now goes to Succ.
1316 BB->replaceAllUsesWith(Succ);
1318 if (!Succ->hasName())
1319 Succ->takeName(BB);
1321 // Clear the successor list of BB to match updates applying to DTU later.
1322 if (BB->getTerminator())
1323 BB->back().eraseFromParent();
1325 new UnreachableInst(BB->getContext(), BB);
1326 assert(succ_empty(BB) && "The successor list of BB isn't empty before "
1327 "applying corresponding DTU updates.");
1328 } else if (BBPhisMergeable) {
1329 // Everything except CommonPred that jumped to BB now goes to Succ.
1330 BB->replaceUsesWithIf(Succ, [BBPreds, CommonPred](Use &U) -> bool {
1331 if (Instruction *UseInst = dyn_cast<Instruction>(U.getUser()))
1332 return UseInst->getParent() != CommonPred &&
1333 BBPreds.contains(UseInst->getParent());
1334 return false;
1338 if (DTU)
1339 DTU->applyUpdates(Updates);
1341 if (BBKillable)
1342 DeleteDeadBlock(BB, DTU);
1344 return true;
1347 static bool
1348 EliminateDuplicatePHINodesNaiveImpl(BasicBlock *BB,
1349 SmallPtrSetImpl<PHINode *> &ToRemove) {
1350 // This implementation doesn't currently consider undef operands
1351 // specially. Theoretically, two phis which are identical except for
1352 // one having an undef where the other doesn't could be collapsed.
1354 bool Changed = false;
1356 // Examine each PHI.
1357 // Note that increment of I must *NOT* be in the iteration_expression, since
1358 // we don't want to immediately advance when we restart from the beginning.
1359 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I);) {
1360 ++I;
1361 // Is there an identical PHI node in this basic block?
1362 // Note that we only look in the upper square's triangle,
1363 // we already checked that the lower triangle PHI's aren't identical.
1364 for (auto J = I; PHINode *DuplicatePN = dyn_cast<PHINode>(J); ++J) {
1365 if (ToRemove.contains(DuplicatePN))
1366 continue;
1367 if (!DuplicatePN->isIdenticalToWhenDefined(PN))
1368 continue;
1369 // A duplicate. Replace this PHI with the base PHI.
1370 ++NumPHICSEs;
1371 DuplicatePN->replaceAllUsesWith(PN);
1372 ToRemove.insert(DuplicatePN);
1373 Changed = true;
1375 // The RAUW can change PHIs that we already visited.
1376 I = BB->begin();
1377 break; // Start over from the beginning.
1380 return Changed;
1383 static bool
1384 EliminateDuplicatePHINodesSetBasedImpl(BasicBlock *BB,
1385 SmallPtrSetImpl<PHINode *> &ToRemove) {
1386 // This implementation doesn't currently consider undef operands
1387 // specially. Theoretically, two phis which are identical except for
1388 // one having an undef where the other doesn't could be collapsed.
1390 struct PHIDenseMapInfo {
1391 static PHINode *getEmptyKey() {
1392 return DenseMapInfo<PHINode *>::getEmptyKey();
1395 static PHINode *getTombstoneKey() {
1396 return DenseMapInfo<PHINode *>::getTombstoneKey();
1399 static bool isSentinel(PHINode *PN) {
1400 return PN == getEmptyKey() || PN == getTombstoneKey();
1403 // WARNING: this logic must be kept in sync with
1404 // Instruction::isIdenticalToWhenDefined()!
1405 static unsigned getHashValueImpl(PHINode *PN) {
1406 // Compute a hash value on the operands. Instcombine will likely have
1407 // sorted them, which helps expose duplicates, but we have to check all
1408 // the operands to be safe in case instcombine hasn't run.
1409 return static_cast<unsigned>(hash_combine(
1410 hash_combine_range(PN->value_op_begin(), PN->value_op_end()),
1411 hash_combine_range(PN->block_begin(), PN->block_end())));
1414 static unsigned getHashValue(PHINode *PN) {
1415 #ifndef NDEBUG
1416 // If -phicse-debug-hash was specified, return a constant -- this
1417 // will force all hashing to collide, so we'll exhaustively search
1418 // the table for a match, and the assertion in isEqual will fire if
1419 // there's a bug causing equal keys to hash differently.
1420 if (PHICSEDebugHash)
1421 return 0;
1422 #endif
1423 return getHashValueImpl(PN);
1426 static bool isEqualImpl(PHINode *LHS, PHINode *RHS) {
1427 if (isSentinel(LHS) || isSentinel(RHS))
1428 return LHS == RHS;
1429 return LHS->isIdenticalTo(RHS);
1432 static bool isEqual(PHINode *LHS, PHINode *RHS) {
1433 // These comparisons are nontrivial, so assert that equality implies
1434 // hash equality (DenseMap demands this as an invariant).
1435 bool Result = isEqualImpl(LHS, RHS);
1436 assert(!Result || (isSentinel(LHS) && LHS == RHS) ||
1437 getHashValueImpl(LHS) == getHashValueImpl(RHS));
1438 return Result;
1442 // Set of unique PHINodes.
1443 DenseSet<PHINode *, PHIDenseMapInfo> PHISet;
1444 PHISet.reserve(4 * PHICSENumPHISmallSize);
1446 // Examine each PHI.
1447 bool Changed = false;
1448 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) {
1449 if (ToRemove.contains(PN))
1450 continue;
1451 auto Inserted = PHISet.insert(PN);
1452 if (!Inserted.second) {
1453 // A duplicate. Replace this PHI with its duplicate.
1454 ++NumPHICSEs;
1455 PN->replaceAllUsesWith(*Inserted.first);
1456 ToRemove.insert(PN);
1457 Changed = true;
1459 // The RAUW can change PHIs that we already visited. Start over from the
1460 // beginning.
1461 PHISet.clear();
1462 I = BB->begin();
1466 return Changed;
1469 bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB,
1470 SmallPtrSetImpl<PHINode *> &ToRemove) {
1471 if (
1472 #ifndef NDEBUG
1473 !PHICSEDebugHash &&
1474 #endif
1475 hasNItemsOrLess(BB->phis(), PHICSENumPHISmallSize))
1476 return EliminateDuplicatePHINodesNaiveImpl(BB, ToRemove);
1477 return EliminateDuplicatePHINodesSetBasedImpl(BB, ToRemove);
1480 bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) {
1481 SmallPtrSet<PHINode *, 8> ToRemove;
1482 bool Changed = EliminateDuplicatePHINodes(BB, ToRemove);
1483 for (PHINode *PN : ToRemove)
1484 PN->eraseFromParent();
1485 return Changed;
1488 Align llvm::tryEnforceAlignment(Value *V, Align PrefAlign,
1489 const DataLayout &DL) {
1490 V = V->stripPointerCasts();
1492 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1493 // TODO: Ideally, this function would not be called if PrefAlign is smaller
1494 // than the current alignment, as the known bits calculation should have
1495 // already taken it into account. However, this is not always the case,
1496 // as computeKnownBits() has a depth limit, while stripPointerCasts()
1497 // doesn't.
1498 Align CurrentAlign = AI->getAlign();
1499 if (PrefAlign <= CurrentAlign)
1500 return CurrentAlign;
1502 // If the preferred alignment is greater than the natural stack alignment
1503 // then don't round up. This avoids dynamic stack realignment.
1504 if (DL.exceedsNaturalStackAlignment(PrefAlign))
1505 return CurrentAlign;
1506 AI->setAlignment(PrefAlign);
1507 return PrefAlign;
1510 if (auto *GO = dyn_cast<GlobalObject>(V)) {
1511 // TODO: as above, this shouldn't be necessary.
1512 Align CurrentAlign = GO->getPointerAlignment(DL);
1513 if (PrefAlign <= CurrentAlign)
1514 return CurrentAlign;
1516 // If there is a large requested alignment and we can, bump up the alignment
1517 // of the global. If the memory we set aside for the global may not be the
1518 // memory used by the final program then it is impossible for us to reliably
1519 // enforce the preferred alignment.
1520 if (!GO->canIncreaseAlignment())
1521 return CurrentAlign;
1523 if (GO->isThreadLocal()) {
1524 unsigned MaxTLSAlign = GO->getParent()->getMaxTLSAlignment() / CHAR_BIT;
1525 if (MaxTLSAlign && PrefAlign > Align(MaxTLSAlign))
1526 PrefAlign = Align(MaxTLSAlign);
1529 GO->setAlignment(PrefAlign);
1530 return PrefAlign;
1533 return Align(1);
1536 Align llvm::getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign,
1537 const DataLayout &DL,
1538 const Instruction *CxtI,
1539 AssumptionCache *AC,
1540 const DominatorTree *DT) {
1541 assert(V->getType()->isPointerTy() &&
1542 "getOrEnforceKnownAlignment expects a pointer!");
1544 KnownBits Known = computeKnownBits(V, DL, 0, AC, CxtI, DT);
1545 unsigned TrailZ = Known.countMinTrailingZeros();
1547 // Avoid trouble with ridiculously large TrailZ values, such as
1548 // those computed from a null pointer.
1549 // LLVM doesn't support alignments larger than (1 << MaxAlignmentExponent).
1550 TrailZ = std::min(TrailZ, +Value::MaxAlignmentExponent);
1552 Align Alignment = Align(1ull << std::min(Known.getBitWidth() - 1, TrailZ));
1554 if (PrefAlign && *PrefAlign > Alignment)
1555 Alignment = std::max(Alignment, tryEnforceAlignment(V, *PrefAlign, DL));
1557 // We don't need to make any adjustment.
1558 return Alignment;
1561 ///===---------------------------------------------------------------------===//
1562 /// Dbg Intrinsic utilities
1565 /// See if there is a dbg.value intrinsic for DIVar for the PHI node.
1566 static bool PhiHasDebugValue(DILocalVariable *DIVar,
1567 DIExpression *DIExpr,
1568 PHINode *APN) {
1569 // Since we can't guarantee that the original dbg.declare intrinsic
1570 // is removed by LowerDbgDeclare(), we need to make sure that we are
1571 // not inserting the same dbg.value intrinsic over and over.
1572 SmallVector<DbgValueInst *, 1> DbgValues;
1573 SmallVector<DPValue *, 1> DPValues;
1574 findDbgValues(DbgValues, APN, &DPValues);
1575 for (auto *DVI : DbgValues) {
1576 assert(is_contained(DVI->getValues(), APN));
1577 if ((DVI->getVariable() == DIVar) && (DVI->getExpression() == DIExpr))
1578 return true;
1580 for (auto *DPV : DPValues) {
1581 assert(is_contained(DPV->location_ops(), APN));
1582 if ((DPV->getVariable() == DIVar) && (DPV->getExpression() == DIExpr))
1583 return true;
1585 return false;
1588 /// Check if the alloc size of \p ValTy is large enough to cover the variable
1589 /// (or fragment of the variable) described by \p DII.
1591 /// This is primarily intended as a helper for the different
1592 /// ConvertDebugDeclareToDebugValue functions. The dbg.declare that is converted
1593 /// describes an alloca'd variable, so we need to use the alloc size of the
1594 /// value when doing the comparison. E.g. an i1 value will be identified as
1595 /// covering an n-bit fragment, if the store size of i1 is at least n bits.
1596 static bool valueCoversEntireFragment(Type *ValTy, DbgVariableIntrinsic *DII) {
1597 const DataLayout &DL = DII->getModule()->getDataLayout();
1598 TypeSize ValueSize = DL.getTypeAllocSizeInBits(ValTy);
1599 if (std::optional<uint64_t> FragmentSize = DII->getFragmentSizeInBits())
1600 return TypeSize::isKnownGE(ValueSize, TypeSize::getFixed(*FragmentSize));
1602 // We can't always calculate the size of the DI variable (e.g. if it is a
1603 // VLA). Try to use the size of the alloca that the dbg intrinsic describes
1604 // intead.
1605 if (DII->isAddressOfVariable()) {
1606 // DII should have exactly 1 location when it is an address.
1607 assert(DII->getNumVariableLocationOps() == 1 &&
1608 "address of variable must have exactly 1 location operand.");
1609 if (auto *AI =
1610 dyn_cast_or_null<AllocaInst>(DII->getVariableLocationOp(0))) {
1611 if (std::optional<TypeSize> FragmentSize =
1612 AI->getAllocationSizeInBits(DL)) {
1613 return TypeSize::isKnownGE(ValueSize, *FragmentSize);
1617 // Could not determine size of variable. Conservatively return false.
1618 return false;
1620 // RemoveDIs: duplicate implementation of the above, using DPValues, the
1621 // replacement for dbg.values.
1622 static bool valueCoversEntireFragment(Type *ValTy, DPValue *DPV) {
1623 const DataLayout &DL = DPV->getModule()->getDataLayout();
1624 TypeSize ValueSize = DL.getTypeAllocSizeInBits(ValTy);
1625 if (std::optional<uint64_t> FragmentSize = DPV->getFragmentSizeInBits())
1626 return TypeSize::isKnownGE(ValueSize, TypeSize::getFixed(*FragmentSize));
1628 // We can't always calculate the size of the DI variable (e.g. if it is a
1629 // VLA). Try to use the size of the alloca that the dbg intrinsic describes
1630 // intead.
1631 if (DPV->isAddressOfVariable()) {
1632 // DPV should have exactly 1 location when it is an address.
1633 assert(DPV->getNumVariableLocationOps() == 1 &&
1634 "address of variable must have exactly 1 location operand.");
1635 if (auto *AI =
1636 dyn_cast_or_null<AllocaInst>(DPV->getVariableLocationOp(0))) {
1637 if (std::optional<TypeSize> FragmentSize = AI->getAllocationSizeInBits(DL)) {
1638 return TypeSize::isKnownGE(ValueSize, *FragmentSize);
1642 // Could not determine size of variable. Conservatively return false.
1643 return false;
1646 static void insertDbgValueOrDPValue(DIBuilder &Builder, Value *DV,
1647 DILocalVariable *DIVar,
1648 DIExpression *DIExpr,
1649 const DebugLoc &NewLoc,
1650 BasicBlock::iterator Instr) {
1651 if (!UseNewDbgInfoFormat) {
1652 auto *DbgVal = Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc,
1653 (Instruction *)nullptr);
1654 DbgVal->insertBefore(Instr);
1655 } else {
1656 // RemoveDIs: if we're using the new debug-info format, allocate a
1657 // DPValue directly instead of a dbg.value intrinsic.
1658 ValueAsMetadata *DVAM = ValueAsMetadata::get(DV);
1659 DPValue *DV = new DPValue(DVAM, DIVar, DIExpr, NewLoc.get());
1660 Instr->getParent()->insertDPValueBefore(DV, Instr);
1664 static void insertDbgValueOrDPValueAfter(DIBuilder &Builder, Value *DV,
1665 DILocalVariable *DIVar,
1666 DIExpression *DIExpr,
1667 const DebugLoc &NewLoc,
1668 BasicBlock::iterator Instr) {
1669 if (!UseNewDbgInfoFormat) {
1670 auto *DbgVal = Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc,
1671 (Instruction *)nullptr);
1672 DbgVal->insertAfter(&*Instr);
1673 } else {
1674 // RemoveDIs: if we're using the new debug-info format, allocate a
1675 // DPValue directly instead of a dbg.value intrinsic.
1676 ValueAsMetadata *DVAM = ValueAsMetadata::get(DV);
1677 DPValue *DV = new DPValue(DVAM, DIVar, DIExpr, NewLoc.get());
1678 Instr->getParent()->insertDPValueAfter(DV, &*Instr);
1682 /// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
1683 /// that has an associated llvm.dbg.declare intrinsic.
1684 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1685 StoreInst *SI, DIBuilder &Builder) {
1686 assert(DII->isAddressOfVariable() || isa<DbgAssignIntrinsic>(DII));
1687 auto *DIVar = DII->getVariable();
1688 assert(DIVar && "Missing variable");
1689 auto *DIExpr = DII->getExpression();
1690 Value *DV = SI->getValueOperand();
1692 DebugLoc NewLoc = getDebugValueLoc(DII);
1694 // If the alloca describes the variable itself, i.e. the expression in the
1695 // dbg.declare doesn't start with a dereference, we can perform the
1696 // conversion if the value covers the entire fragment of DII.
1697 // If the alloca describes the *address* of DIVar, i.e. DIExpr is
1698 // *just* a DW_OP_deref, we use DV as is for the dbg.value.
1699 // We conservatively ignore other dereferences, because the following two are
1700 // not equivalent:
1701 // dbg.declare(alloca, ..., !Expr(deref, plus_uconstant, 2))
1702 // dbg.value(DV, ..., !Expr(deref, plus_uconstant, 2))
1703 // The former is adding 2 to the address of the variable, whereas the latter
1704 // is adding 2 to the value of the variable. As such, we insist on just a
1705 // deref expression.
1706 bool CanConvert =
1707 DIExpr->isDeref() || (!DIExpr->startsWithDeref() &&
1708 valueCoversEntireFragment(DV->getType(), DII));
1709 if (CanConvert) {
1710 insertDbgValueOrDPValue(Builder, DV, DIVar, DIExpr, NewLoc,
1711 SI->getIterator());
1712 return;
1715 // FIXME: If storing to a part of the variable described by the dbg.declare,
1716 // then we want to insert a dbg.value for the corresponding fragment.
1717 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DII
1718 << '\n');
1719 // For now, when there is a store to parts of the variable (but we do not
1720 // know which part) we insert an dbg.value intrinsic to indicate that we
1721 // know nothing about the variable's content.
1722 DV = UndefValue::get(DV->getType());
1723 insertDbgValueOrDPValue(Builder, DV, DIVar, DIExpr, NewLoc,
1724 SI->getIterator());
1727 /// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
1728 /// that has an associated llvm.dbg.declare intrinsic.
1729 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1730 LoadInst *LI, DIBuilder &Builder) {
1731 auto *DIVar = DII->getVariable();
1732 auto *DIExpr = DII->getExpression();
1733 assert(DIVar && "Missing variable");
1735 if (!valueCoversEntireFragment(LI->getType(), DII)) {
1736 // FIXME: If only referring to a part of the variable described by the
1737 // dbg.declare, then we want to insert a dbg.value for the corresponding
1738 // fragment.
1739 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1740 << *DII << '\n');
1741 return;
1744 DebugLoc NewLoc = getDebugValueLoc(DII);
1746 // We are now tracking the loaded value instead of the address. In the
1747 // future if multi-location support is added to the IR, it might be
1748 // preferable to keep tracking both the loaded value and the original
1749 // address in case the alloca can not be elided.
1750 insertDbgValueOrDPValueAfter(Builder, LI, DIVar, DIExpr, NewLoc,
1751 LI->getIterator());
1754 void llvm::ConvertDebugDeclareToDebugValue(DPValue *DPV, StoreInst *SI,
1755 DIBuilder &Builder) {
1756 assert(DPV->isAddressOfVariable() || DPV->isDbgAssign());
1757 auto *DIVar = DPV->getVariable();
1758 assert(DIVar && "Missing variable");
1759 auto *DIExpr = DPV->getExpression();
1760 Value *DV = SI->getValueOperand();
1762 DebugLoc NewLoc = getDebugValueLoc(DPV);
1764 // If the alloca describes the variable itself, i.e. the expression in the
1765 // dbg.declare doesn't start with a dereference, we can perform the
1766 // conversion if the value covers the entire fragment of DII.
1767 // If the alloca describes the *address* of DIVar, i.e. DIExpr is
1768 // *just* a DW_OP_deref, we use DV as is for the dbg.value.
1769 // We conservatively ignore other dereferences, because the following two are
1770 // not equivalent:
1771 // dbg.declare(alloca, ..., !Expr(deref, plus_uconstant, 2))
1772 // dbg.value(DV, ..., !Expr(deref, plus_uconstant, 2))
1773 // The former is adding 2 to the address of the variable, whereas the latter
1774 // is adding 2 to the value of the variable. As such, we insist on just a
1775 // deref expression.
1776 bool CanConvert =
1777 DIExpr->isDeref() || (!DIExpr->startsWithDeref() &&
1778 valueCoversEntireFragment(DV->getType(), DPV));
1779 if (CanConvert) {
1780 insertDbgValueOrDPValue(Builder, DV, DIVar, DIExpr, NewLoc,
1781 SI->getIterator());
1782 return;
1785 // FIXME: If storing to a part of the variable described by the dbg.declare,
1786 // then we want to insert a dbg.value for the corresponding fragment.
1787 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DPV
1788 << '\n');
1789 assert(UseNewDbgInfoFormat);
1791 // For now, when there is a store to parts of the variable (but we do not
1792 // know which part) we insert an dbg.value intrinsic to indicate that we
1793 // know nothing about the variable's content.
1794 DV = UndefValue::get(DV->getType());
1795 ValueAsMetadata *DVAM = ValueAsMetadata::get(DV);
1796 DPValue *NewDPV = new DPValue(DVAM, DIVar, DIExpr, NewLoc.get());
1797 SI->getParent()->insertDPValueBefore(NewDPV, SI->getIterator());
1800 /// Inserts a llvm.dbg.value intrinsic after a phi that has an associated
1801 /// llvm.dbg.declare intrinsic.
1802 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1803 PHINode *APN, DIBuilder &Builder) {
1804 auto *DIVar = DII->getVariable();
1805 auto *DIExpr = DII->getExpression();
1806 assert(DIVar && "Missing variable");
1808 if (PhiHasDebugValue(DIVar, DIExpr, APN))
1809 return;
1811 if (!valueCoversEntireFragment(APN->getType(), DII)) {
1812 // FIXME: If only referring to a part of the variable described by the
1813 // dbg.declare, then we want to insert a dbg.value for the corresponding
1814 // fragment.
1815 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1816 << *DII << '\n');
1817 return;
1820 BasicBlock *BB = APN->getParent();
1821 auto InsertionPt = BB->getFirstInsertionPt();
1823 DebugLoc NewLoc = getDebugValueLoc(DII);
1825 // The block may be a catchswitch block, which does not have a valid
1826 // insertion point.
1827 // FIXME: Insert dbg.value markers in the successors when appropriate.
1828 if (InsertionPt != BB->end()) {
1829 insertDbgValueOrDPValue(Builder, APN, DIVar, DIExpr, NewLoc, InsertionPt);
1833 void llvm::ConvertDebugDeclareToDebugValue(DPValue *DPV, LoadInst *LI,
1834 DIBuilder &Builder) {
1835 auto *DIVar = DPV->getVariable();
1836 auto *DIExpr = DPV->getExpression();
1837 assert(DIVar && "Missing variable");
1839 if (!valueCoversEntireFragment(LI->getType(), DPV)) {
1840 // FIXME: If only referring to a part of the variable described by the
1841 // dbg.declare, then we want to insert a DPValue for the corresponding
1842 // fragment.
1843 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to DPValue: " << *DPV
1844 << '\n');
1845 return;
1848 DebugLoc NewLoc = getDebugValueLoc(DPV);
1850 // We are now tracking the loaded value instead of the address. In the
1851 // future if multi-location support is added to the IR, it might be
1852 // preferable to keep tracking both the loaded value and the original
1853 // address in case the alloca can not be elided.
1854 assert(UseNewDbgInfoFormat);
1856 // Create a DPValue directly and insert.
1857 ValueAsMetadata *LIVAM = ValueAsMetadata::get(LI);
1858 DPValue *DV = new DPValue(LIVAM, DIVar, DIExpr, NewLoc.get());
1859 LI->getParent()->insertDPValueAfter(DV, LI);
1862 /// Determine whether this alloca is either a VLA or an array.
1863 static bool isArray(AllocaInst *AI) {
1864 return AI->isArrayAllocation() ||
1865 (AI->getAllocatedType() && AI->getAllocatedType()->isArrayTy());
1868 /// Determine whether this alloca is a structure.
1869 static bool isStructure(AllocaInst *AI) {
1870 return AI->getAllocatedType() && AI->getAllocatedType()->isStructTy();
1872 void llvm::ConvertDebugDeclareToDebugValue(DPValue *DPV, PHINode *APN,
1873 DIBuilder &Builder) {
1874 auto *DIVar = DPV->getVariable();
1875 auto *DIExpr = DPV->getExpression();
1876 assert(DIVar && "Missing variable");
1878 if (PhiHasDebugValue(DIVar, DIExpr, APN))
1879 return;
1881 if (!valueCoversEntireFragment(APN->getType(), DPV)) {
1882 // FIXME: If only referring to a part of the variable described by the
1883 // dbg.declare, then we want to insert a DPValue for the corresponding
1884 // fragment.
1885 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to DPValue: " << *DPV
1886 << '\n');
1887 return;
1890 BasicBlock *BB = APN->getParent();
1891 auto InsertionPt = BB->getFirstInsertionPt();
1893 DebugLoc NewLoc = getDebugValueLoc(DPV);
1895 // The block may be a catchswitch block, which does not have a valid
1896 // insertion point.
1897 // FIXME: Insert DPValue markers in the successors when appropriate.
1898 if (InsertionPt != BB->end()) {
1899 insertDbgValueOrDPValue(Builder, APN, DIVar, DIExpr, NewLoc, InsertionPt);
1903 /// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
1904 /// of llvm.dbg.value intrinsics.
1905 bool llvm::LowerDbgDeclare(Function &F) {
1906 bool Changed = false;
1907 DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false);
1908 SmallVector<DbgDeclareInst *, 4> Dbgs;
1909 SmallVector<DPValue *> DPVs;
1910 for (auto &FI : F) {
1911 for (Instruction &BI : FI) {
1912 if (auto *DDI = dyn_cast<DbgDeclareInst>(&BI))
1913 Dbgs.push_back(DDI);
1914 for (DPValue &DPV : BI.getDbgValueRange()) {
1915 if (DPV.getType() == DPValue::LocationType::Declare)
1916 DPVs.push_back(&DPV);
1921 if (Dbgs.empty() && DPVs.empty())
1922 return Changed;
1924 auto LowerOne = [&](auto *DDI) {
1925 AllocaInst *AI =
1926 dyn_cast_or_null<AllocaInst>(DDI->getVariableLocationOp(0));
1927 // If this is an alloca for a scalar variable, insert a dbg.value
1928 // at each load and store to the alloca and erase the dbg.declare.
1929 // The dbg.values allow tracking a variable even if it is not
1930 // stored on the stack, while the dbg.declare can only describe
1931 // the stack slot (and at a lexical-scope granularity). Later
1932 // passes will attempt to elide the stack slot.
1933 if (!AI || isArray(AI) || isStructure(AI))
1934 return;
1936 // A volatile load/store means that the alloca can't be elided anyway.
1937 if (llvm::any_of(AI->users(), [](User *U) -> bool {
1938 if (LoadInst *LI = dyn_cast<LoadInst>(U))
1939 return LI->isVolatile();
1940 if (StoreInst *SI = dyn_cast<StoreInst>(U))
1941 return SI->isVolatile();
1942 return false;
1944 return;
1946 SmallVector<const Value *, 8> WorkList;
1947 WorkList.push_back(AI);
1948 while (!WorkList.empty()) {
1949 const Value *V = WorkList.pop_back_val();
1950 for (const auto &AIUse : V->uses()) {
1951 User *U = AIUse.getUser();
1952 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1953 if (AIUse.getOperandNo() == 1)
1954 ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
1955 } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
1956 ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
1957 } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
1958 // This is a call by-value or some other instruction that takes a
1959 // pointer to the variable. Insert a *value* intrinsic that describes
1960 // the variable by dereferencing the alloca.
1961 if (!CI->isLifetimeStartOrEnd()) {
1962 DebugLoc NewLoc = getDebugValueLoc(DDI);
1963 auto *DerefExpr =
1964 DIExpression::append(DDI->getExpression(), dwarf::DW_OP_deref);
1965 insertDbgValueOrDPValue(DIB, AI, DDI->getVariable(), DerefExpr,
1966 NewLoc, CI->getIterator());
1968 } else if (BitCastInst *BI = dyn_cast<BitCastInst>(U)) {
1969 if (BI->getType()->isPointerTy())
1970 WorkList.push_back(BI);
1974 DDI->eraseFromParent();
1975 Changed = true;
1978 for_each(Dbgs, LowerOne);
1979 for_each(DPVs, LowerOne);
1981 if (Changed)
1982 for (BasicBlock &BB : F)
1983 RemoveRedundantDbgInstrs(&BB);
1985 return Changed;
1988 // RemoveDIs: re-implementation of insertDebugValuesForPHIs, but which pulls the
1989 // debug-info out of the block's DPValues rather than dbg.value intrinsics.
1990 static void insertDPValuesForPHIs(BasicBlock *BB,
1991 SmallVectorImpl<PHINode *> &InsertedPHIs) {
1992 assert(BB && "No BasicBlock to clone DPValue(s) from.");
1993 if (InsertedPHIs.size() == 0)
1994 return;
1996 // Map existing PHI nodes to their DPValues.
1997 DenseMap<Value *, DPValue *> DbgValueMap;
1998 for (auto &I : *BB) {
1999 for (auto &DPV : I.getDbgValueRange()) {
2000 for (Value *V : DPV.location_ops())
2001 if (auto *Loc = dyn_cast_or_null<PHINode>(V))
2002 DbgValueMap.insert({Loc, &DPV});
2005 if (DbgValueMap.size() == 0)
2006 return;
2008 // Map a pair of the destination BB and old DPValue to the new DPValue,
2009 // so that if a DPValue is being rewritten to use more than one of the
2010 // inserted PHIs in the same destination BB, we can update the same DPValue
2011 // with all the new PHIs instead of creating one copy for each.
2012 MapVector<std::pair<BasicBlock *, DPValue *>, DPValue *> NewDbgValueMap;
2013 // Then iterate through the new PHIs and look to see if they use one of the
2014 // previously mapped PHIs. If so, create a new DPValue that will propagate
2015 // the info through the new PHI. If we use more than one new PHI in a single
2016 // destination BB with the same old dbg.value, merge the updates so that we
2017 // get a single new DPValue with all the new PHIs.
2018 for (auto PHI : InsertedPHIs) {
2019 BasicBlock *Parent = PHI->getParent();
2020 // Avoid inserting a debug-info record into an EH block.
2021 if (Parent->getFirstNonPHI()->isEHPad())
2022 continue;
2023 for (auto VI : PHI->operand_values()) {
2024 auto V = DbgValueMap.find(VI);
2025 if (V != DbgValueMap.end()) {
2026 DPValue *DbgII = cast<DPValue>(V->second);
2027 auto NewDI = NewDbgValueMap.find({Parent, DbgII});
2028 if (NewDI == NewDbgValueMap.end()) {
2029 DPValue *NewDbgII = DbgII->clone();
2030 NewDI = NewDbgValueMap.insert({{Parent, DbgII}, NewDbgII}).first;
2032 DPValue *NewDbgII = NewDI->second;
2033 // If PHI contains VI as an operand more than once, we may
2034 // replaced it in NewDbgII; confirm that it is present.
2035 if (is_contained(NewDbgII->location_ops(), VI))
2036 NewDbgII->replaceVariableLocationOp(VI, PHI);
2040 // Insert the new DPValues into their destination blocks.
2041 for (auto DI : NewDbgValueMap) {
2042 BasicBlock *Parent = DI.first.first;
2043 DPValue *NewDbgII = DI.second;
2044 auto InsertionPt = Parent->getFirstInsertionPt();
2045 assert(InsertionPt != Parent->end() && "Ill-formed basic block");
2047 InsertionPt->DbgMarker->insertDPValue(NewDbgII, true);
2051 /// Propagate dbg.value intrinsics through the newly inserted PHIs.
2052 void llvm::insertDebugValuesForPHIs(BasicBlock *BB,
2053 SmallVectorImpl<PHINode *> &InsertedPHIs) {
2054 assert(BB && "No BasicBlock to clone dbg.value(s) from.");
2055 if (InsertedPHIs.size() == 0)
2056 return;
2058 insertDPValuesForPHIs(BB, InsertedPHIs);
2060 // Map existing PHI nodes to their dbg.values.
2061 ValueToValueMapTy DbgValueMap;
2062 for (auto &I : *BB) {
2063 if (auto DbgII = dyn_cast<DbgVariableIntrinsic>(&I)) {
2064 for (Value *V : DbgII->location_ops())
2065 if (auto *Loc = dyn_cast_or_null<PHINode>(V))
2066 DbgValueMap.insert({Loc, DbgII});
2069 if (DbgValueMap.size() == 0)
2070 return;
2072 // Map a pair of the destination BB and old dbg.value to the new dbg.value,
2073 // so that if a dbg.value is being rewritten to use more than one of the
2074 // inserted PHIs in the same destination BB, we can update the same dbg.value
2075 // with all the new PHIs instead of creating one copy for each.
2076 MapVector<std::pair<BasicBlock *, DbgVariableIntrinsic *>,
2077 DbgVariableIntrinsic *>
2078 NewDbgValueMap;
2079 // Then iterate through the new PHIs and look to see if they use one of the
2080 // previously mapped PHIs. If so, create a new dbg.value intrinsic that will
2081 // propagate the info through the new PHI. If we use more than one new PHI in
2082 // a single destination BB with the same old dbg.value, merge the updates so
2083 // that we get a single new dbg.value with all the new PHIs.
2084 for (auto *PHI : InsertedPHIs) {
2085 BasicBlock *Parent = PHI->getParent();
2086 // Avoid inserting an intrinsic into an EH block.
2087 if (Parent->getFirstNonPHI()->isEHPad())
2088 continue;
2089 for (auto *VI : PHI->operand_values()) {
2090 auto V = DbgValueMap.find(VI);
2091 if (V != DbgValueMap.end()) {
2092 auto *DbgII = cast<DbgVariableIntrinsic>(V->second);
2093 auto NewDI = NewDbgValueMap.find({Parent, DbgII});
2094 if (NewDI == NewDbgValueMap.end()) {
2095 auto *NewDbgII = cast<DbgVariableIntrinsic>(DbgII->clone());
2096 NewDI = NewDbgValueMap.insert({{Parent, DbgII}, NewDbgII}).first;
2098 DbgVariableIntrinsic *NewDbgII = NewDI->second;
2099 // If PHI contains VI as an operand more than once, we may
2100 // replaced it in NewDbgII; confirm that it is present.
2101 if (is_contained(NewDbgII->location_ops(), VI))
2102 NewDbgII->replaceVariableLocationOp(VI, PHI);
2106 // Insert thew new dbg.values into their destination blocks.
2107 for (auto DI : NewDbgValueMap) {
2108 BasicBlock *Parent = DI.first.first;
2109 auto *NewDbgII = DI.second;
2110 auto InsertionPt = Parent->getFirstInsertionPt();
2111 assert(InsertionPt != Parent->end() && "Ill-formed basic block");
2112 NewDbgII->insertBefore(&*InsertionPt);
2116 bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress,
2117 DIBuilder &Builder, uint8_t DIExprFlags,
2118 int Offset) {
2119 TinyPtrVector<DbgDeclareInst *> DbgDeclares = findDbgDeclares(Address);
2120 TinyPtrVector<DPValue *> DPVDeclares = findDPVDeclares(Address);
2122 auto ReplaceOne = [&](auto *DII) {
2123 assert(DII->getVariable() && "Missing variable");
2124 auto *DIExpr = DII->getExpression();
2125 DIExpr = DIExpression::prepend(DIExpr, DIExprFlags, Offset);
2126 DII->setExpression(DIExpr);
2127 DII->replaceVariableLocationOp(Address, NewAddress);
2130 for_each(DbgDeclares, ReplaceOne);
2131 for_each(DPVDeclares, ReplaceOne);
2133 return !DbgDeclares.empty() || !DPVDeclares.empty();
2136 static void updateOneDbgValueForAlloca(const DebugLoc &Loc,
2137 DILocalVariable *DIVar,
2138 DIExpression *DIExpr, Value *NewAddress,
2139 DbgValueInst *DVI, DPValue *DPV,
2140 DIBuilder &Builder, int Offset) {
2141 assert(DIVar && "Missing variable");
2143 // This is an alloca-based dbg.value/DPValue. The first thing it should do
2144 // with the alloca pointer is dereference it. Otherwise we don't know how to
2145 // handle it and give up.
2146 if (!DIExpr || DIExpr->getNumElements() < 1 ||
2147 DIExpr->getElement(0) != dwarf::DW_OP_deref)
2148 return;
2150 // Insert the offset before the first deref.
2151 if (Offset)
2152 DIExpr = DIExpression::prepend(DIExpr, 0, Offset);
2154 if (DVI) {
2155 DVI->setExpression(DIExpr);
2156 DVI->replaceVariableLocationOp(0u, NewAddress);
2157 } else {
2158 assert(DPV);
2159 DPV->setExpression(DIExpr);
2160 DPV->replaceVariableLocationOp(0u, NewAddress);
2164 void llvm::replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
2165 DIBuilder &Builder, int Offset) {
2166 SmallVector<DbgValueInst *, 1> DbgUsers;
2167 SmallVector<DPValue *, 1> DPUsers;
2168 findDbgValues(DbgUsers, AI, &DPUsers);
2170 // Attempt to replace dbg.values that use this alloca.
2171 for (auto *DVI : DbgUsers)
2172 updateOneDbgValueForAlloca(DVI->getDebugLoc(), DVI->getVariable(),
2173 DVI->getExpression(), NewAllocaAddress, DVI,
2174 nullptr, Builder, Offset);
2176 // Replace any DPValues that use this alloca.
2177 for (DPValue *DPV : DPUsers)
2178 updateOneDbgValueForAlloca(DPV->getDebugLoc(), DPV->getVariable(),
2179 DPV->getExpression(), NewAllocaAddress, nullptr,
2180 DPV, Builder, Offset);
2183 /// Where possible to salvage debug information for \p I do so.
2184 /// If not possible mark undef.
2185 void llvm::salvageDebugInfo(Instruction &I) {
2186 SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
2187 SmallVector<DPValue *, 1> DPUsers;
2188 findDbgUsers(DbgUsers, &I, &DPUsers);
2189 salvageDebugInfoForDbgValues(I, DbgUsers, DPUsers);
2192 template <typename T> static void salvageDbgAssignAddress(T *Assign) {
2193 Instruction *I = dyn_cast<Instruction>(Assign->getAddress());
2194 // Only instructions can be salvaged at the moment.
2195 if (!I)
2196 return;
2198 assert(!Assign->getAddressExpression()->getFragmentInfo().has_value() &&
2199 "address-expression shouldn't have fragment info");
2201 // The address component of a dbg.assign cannot be variadic.
2202 uint64_t CurrentLocOps = 0;
2203 SmallVector<Value *, 4> AdditionalValues;
2204 SmallVector<uint64_t, 16> Ops;
2205 Value *NewV = salvageDebugInfoImpl(*I, CurrentLocOps, Ops, AdditionalValues);
2207 // Check if the salvage failed.
2208 if (!NewV)
2209 return;
2211 DIExpression *SalvagedExpr = DIExpression::appendOpsToArg(
2212 Assign->getAddressExpression(), Ops, 0, /*StackValue=*/false);
2213 assert(!SalvagedExpr->getFragmentInfo().has_value() &&
2214 "address-expression shouldn't have fragment info");
2216 // Salvage succeeds if no additional values are required.
2217 if (AdditionalValues.empty()) {
2218 Assign->setAddress(NewV);
2219 Assign->setAddressExpression(SalvagedExpr);
2220 } else {
2221 Assign->setKillAddress();
2225 void llvm::salvageDebugInfoForDbgValues(
2226 Instruction &I, ArrayRef<DbgVariableIntrinsic *> DbgUsers,
2227 ArrayRef<DPValue *> DPUsers) {
2228 // These are arbitrary chosen limits on the maximum number of values and the
2229 // maximum size of a debug expression we can salvage up to, used for
2230 // performance reasons.
2231 const unsigned MaxDebugArgs = 16;
2232 const unsigned MaxExpressionSize = 128;
2233 bool Salvaged = false;
2235 for (auto *DII : DbgUsers) {
2236 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(DII)) {
2237 if (DAI->getAddress() == &I) {
2238 salvageDbgAssignAddress(DAI);
2239 Salvaged = true;
2241 if (DAI->getValue() != &I)
2242 continue;
2245 // Do not add DW_OP_stack_value for DbgDeclare, because they are implicitly
2246 // pointing out the value as a DWARF memory location description.
2247 bool StackValue = isa<DbgValueInst>(DII);
2248 auto DIILocation = DII->location_ops();
2249 assert(
2250 is_contained(DIILocation, &I) &&
2251 "DbgVariableIntrinsic must use salvaged instruction as its location");
2252 SmallVector<Value *, 4> AdditionalValues;
2253 // `I` may appear more than once in DII's location ops, and each use of `I`
2254 // must be updated in the DIExpression and potentially have additional
2255 // values added; thus we call salvageDebugInfoImpl for each `I` instance in
2256 // DIILocation.
2257 Value *Op0 = nullptr;
2258 DIExpression *SalvagedExpr = DII->getExpression();
2259 auto LocItr = find(DIILocation, &I);
2260 while (SalvagedExpr && LocItr != DIILocation.end()) {
2261 SmallVector<uint64_t, 16> Ops;
2262 unsigned LocNo = std::distance(DIILocation.begin(), LocItr);
2263 uint64_t CurrentLocOps = SalvagedExpr->getNumLocationOperands();
2264 Op0 = salvageDebugInfoImpl(I, CurrentLocOps, Ops, AdditionalValues);
2265 if (!Op0)
2266 break;
2267 SalvagedExpr =
2268 DIExpression::appendOpsToArg(SalvagedExpr, Ops, LocNo, StackValue);
2269 LocItr = std::find(++LocItr, DIILocation.end(), &I);
2271 // salvageDebugInfoImpl should fail on examining the first element of
2272 // DbgUsers, or none of them.
2273 if (!Op0)
2274 break;
2276 DII->replaceVariableLocationOp(&I, Op0);
2277 bool IsValidSalvageExpr = SalvagedExpr->getNumElements() <= MaxExpressionSize;
2278 if (AdditionalValues.empty() && IsValidSalvageExpr) {
2279 DII->setExpression(SalvagedExpr);
2280 } else if (isa<DbgValueInst>(DII) && IsValidSalvageExpr &&
2281 DII->getNumVariableLocationOps() + AdditionalValues.size() <=
2282 MaxDebugArgs) {
2283 DII->addVariableLocationOps(AdditionalValues, SalvagedExpr);
2284 } else {
2285 // Do not salvage using DIArgList for dbg.declare, as it is not currently
2286 // supported in those instructions. Also do not salvage if the resulting
2287 // DIArgList would contain an unreasonably large number of values.
2288 DII->setKillLocation();
2290 LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n');
2291 Salvaged = true;
2293 // Duplicate of above block for DPValues.
2294 for (auto *DPV : DPUsers) {
2295 if (DPV->isDbgAssign()) {
2296 if (DPV->getAddress() == &I) {
2297 salvageDbgAssignAddress(DPV);
2298 Salvaged = true;
2300 if (DPV->getValue() != &I)
2301 continue;
2304 // Do not add DW_OP_stack_value for DbgDeclare and DbgAddr, because they
2305 // are implicitly pointing out the value as a DWARF memory location
2306 // description.
2307 bool StackValue = DPV->getType() != DPValue::LocationType::Declare;
2308 auto DPVLocation = DPV->location_ops();
2309 assert(
2310 is_contained(DPVLocation, &I) &&
2311 "DbgVariableIntrinsic must use salvaged instruction as its location");
2312 SmallVector<Value *, 4> AdditionalValues;
2313 // 'I' may appear more than once in DPV's location ops, and each use of 'I'
2314 // must be updated in the DIExpression and potentially have additional
2315 // values added; thus we call salvageDebugInfoImpl for each 'I' instance in
2316 // DPVLocation.
2317 Value *Op0 = nullptr;
2318 DIExpression *SalvagedExpr = DPV->getExpression();
2319 auto LocItr = find(DPVLocation, &I);
2320 while (SalvagedExpr && LocItr != DPVLocation.end()) {
2321 SmallVector<uint64_t, 16> Ops;
2322 unsigned LocNo = std::distance(DPVLocation.begin(), LocItr);
2323 uint64_t CurrentLocOps = SalvagedExpr->getNumLocationOperands();
2324 Op0 = salvageDebugInfoImpl(I, CurrentLocOps, Ops, AdditionalValues);
2325 if (!Op0)
2326 break;
2327 SalvagedExpr =
2328 DIExpression::appendOpsToArg(SalvagedExpr, Ops, LocNo, StackValue);
2329 LocItr = std::find(++LocItr, DPVLocation.end(), &I);
2331 // salvageDebugInfoImpl should fail on examining the first element of
2332 // DbgUsers, or none of them.
2333 if (!Op0)
2334 break;
2336 DPV->replaceVariableLocationOp(&I, Op0);
2337 bool IsValidSalvageExpr =
2338 SalvagedExpr->getNumElements() <= MaxExpressionSize;
2339 if (AdditionalValues.empty() && IsValidSalvageExpr) {
2340 DPV->setExpression(SalvagedExpr);
2341 } else if (DPV->getType() != DPValue::LocationType::Declare &&
2342 IsValidSalvageExpr &&
2343 DPV->getNumVariableLocationOps() + AdditionalValues.size() <=
2344 MaxDebugArgs) {
2345 DPV->addVariableLocationOps(AdditionalValues, SalvagedExpr);
2346 } else {
2347 // Do not salvage using DIArgList for dbg.addr/dbg.declare, as it is
2348 // currently only valid for stack value expressions.
2349 // Also do not salvage if the resulting DIArgList would contain an
2350 // unreasonably large number of values.
2351 DPV->setKillLocation();
2353 LLVM_DEBUG(dbgs() << "SALVAGE: " << DPV << '\n');
2354 Salvaged = true;
2357 if (Salvaged)
2358 return;
2360 for (auto *DII : DbgUsers)
2361 DII->setKillLocation();
2363 for (auto *DPV : DPUsers)
2364 DPV->setKillLocation();
2367 Value *getSalvageOpsForGEP(GetElementPtrInst *GEP, const DataLayout &DL,
2368 uint64_t CurrentLocOps,
2369 SmallVectorImpl<uint64_t> &Opcodes,
2370 SmallVectorImpl<Value *> &AdditionalValues) {
2371 unsigned BitWidth = DL.getIndexSizeInBits(GEP->getPointerAddressSpace());
2372 // Rewrite a GEP into a DIExpression.
2373 MapVector<Value *, APInt> VariableOffsets;
2374 APInt ConstantOffset(BitWidth, 0);
2375 if (!GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset))
2376 return nullptr;
2377 if (!VariableOffsets.empty() && !CurrentLocOps) {
2378 Opcodes.insert(Opcodes.begin(), {dwarf::DW_OP_LLVM_arg, 0});
2379 CurrentLocOps = 1;
2381 for (const auto &Offset : VariableOffsets) {
2382 AdditionalValues.push_back(Offset.first);
2383 assert(Offset.second.isStrictlyPositive() &&
2384 "Expected strictly positive multiplier for offset.");
2385 Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps++, dwarf::DW_OP_constu,
2386 Offset.second.getZExtValue(), dwarf::DW_OP_mul,
2387 dwarf::DW_OP_plus});
2389 DIExpression::appendOffset(Opcodes, ConstantOffset.getSExtValue());
2390 return GEP->getOperand(0);
2393 uint64_t getDwarfOpForBinOp(Instruction::BinaryOps Opcode) {
2394 switch (Opcode) {
2395 case Instruction::Add:
2396 return dwarf::DW_OP_plus;
2397 case Instruction::Sub:
2398 return dwarf::DW_OP_minus;
2399 case Instruction::Mul:
2400 return dwarf::DW_OP_mul;
2401 case Instruction::SDiv:
2402 return dwarf::DW_OP_div;
2403 case Instruction::SRem:
2404 return dwarf::DW_OP_mod;
2405 case Instruction::Or:
2406 return dwarf::DW_OP_or;
2407 case Instruction::And:
2408 return dwarf::DW_OP_and;
2409 case Instruction::Xor:
2410 return dwarf::DW_OP_xor;
2411 case Instruction::Shl:
2412 return dwarf::DW_OP_shl;
2413 case Instruction::LShr:
2414 return dwarf::DW_OP_shr;
2415 case Instruction::AShr:
2416 return dwarf::DW_OP_shra;
2417 default:
2418 // TODO: Salvage from each kind of binop we know about.
2419 return 0;
2423 static void handleSSAValueOperands(uint64_t CurrentLocOps,
2424 SmallVectorImpl<uint64_t> &Opcodes,
2425 SmallVectorImpl<Value *> &AdditionalValues,
2426 Instruction *I) {
2427 if (!CurrentLocOps) {
2428 Opcodes.append({dwarf::DW_OP_LLVM_arg, 0});
2429 CurrentLocOps = 1;
2431 Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps});
2432 AdditionalValues.push_back(I->getOperand(1));
2435 Value *getSalvageOpsForBinOp(BinaryOperator *BI, uint64_t CurrentLocOps,
2436 SmallVectorImpl<uint64_t> &Opcodes,
2437 SmallVectorImpl<Value *> &AdditionalValues) {
2438 // Handle binary operations with constant integer operands as a special case.
2439 auto *ConstInt = dyn_cast<ConstantInt>(BI->getOperand(1));
2440 // Values wider than 64 bits cannot be represented within a DIExpression.
2441 if (ConstInt && ConstInt->getBitWidth() > 64)
2442 return nullptr;
2444 Instruction::BinaryOps BinOpcode = BI->getOpcode();
2445 // Push any Constant Int operand onto the expression stack.
2446 if (ConstInt) {
2447 uint64_t Val = ConstInt->getSExtValue();
2448 // Add or Sub Instructions with a constant operand can potentially be
2449 // simplified.
2450 if (BinOpcode == Instruction::Add || BinOpcode == Instruction::Sub) {
2451 uint64_t Offset = BinOpcode == Instruction::Add ? Val : -int64_t(Val);
2452 DIExpression::appendOffset(Opcodes, Offset);
2453 return BI->getOperand(0);
2455 Opcodes.append({dwarf::DW_OP_constu, Val});
2456 } else {
2457 handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, BI);
2460 // Add salvaged binary operator to expression stack, if it has a valid
2461 // representation in a DIExpression.
2462 uint64_t DwarfBinOp = getDwarfOpForBinOp(BinOpcode);
2463 if (!DwarfBinOp)
2464 return nullptr;
2465 Opcodes.push_back(DwarfBinOp);
2466 return BI->getOperand(0);
2469 uint64_t getDwarfOpForIcmpPred(CmpInst::Predicate Pred) {
2470 // The signedness of the operation is implicit in the typed stack, signed and
2471 // unsigned instructions map to the same DWARF opcode.
2472 switch (Pred) {
2473 case CmpInst::ICMP_EQ:
2474 return dwarf::DW_OP_eq;
2475 case CmpInst::ICMP_NE:
2476 return dwarf::DW_OP_ne;
2477 case CmpInst::ICMP_UGT:
2478 case CmpInst::ICMP_SGT:
2479 return dwarf::DW_OP_gt;
2480 case CmpInst::ICMP_UGE:
2481 case CmpInst::ICMP_SGE:
2482 return dwarf::DW_OP_ge;
2483 case CmpInst::ICMP_ULT:
2484 case CmpInst::ICMP_SLT:
2485 return dwarf::DW_OP_lt;
2486 case CmpInst::ICMP_ULE:
2487 case CmpInst::ICMP_SLE:
2488 return dwarf::DW_OP_le;
2489 default:
2490 return 0;
2494 Value *getSalvageOpsForIcmpOp(ICmpInst *Icmp, uint64_t CurrentLocOps,
2495 SmallVectorImpl<uint64_t> &Opcodes,
2496 SmallVectorImpl<Value *> &AdditionalValues) {
2497 // Handle icmp operations with constant integer operands as a special case.
2498 auto *ConstInt = dyn_cast<ConstantInt>(Icmp->getOperand(1));
2499 // Values wider than 64 bits cannot be represented within a DIExpression.
2500 if (ConstInt && ConstInt->getBitWidth() > 64)
2501 return nullptr;
2502 // Push any Constant Int operand onto the expression stack.
2503 if (ConstInt) {
2504 if (Icmp->isSigned())
2505 Opcodes.push_back(dwarf::DW_OP_consts);
2506 else
2507 Opcodes.push_back(dwarf::DW_OP_constu);
2508 uint64_t Val = ConstInt->getSExtValue();
2509 Opcodes.push_back(Val);
2510 } else {
2511 handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, Icmp);
2514 // Add salvaged binary operator to expression stack, if it has a valid
2515 // representation in a DIExpression.
2516 uint64_t DwarfIcmpOp = getDwarfOpForIcmpPred(Icmp->getPredicate());
2517 if (!DwarfIcmpOp)
2518 return nullptr;
2519 Opcodes.push_back(DwarfIcmpOp);
2520 return Icmp->getOperand(0);
2523 Value *llvm::salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps,
2524 SmallVectorImpl<uint64_t> &Ops,
2525 SmallVectorImpl<Value *> &AdditionalValues) {
2526 auto &M = *I.getModule();
2527 auto &DL = M.getDataLayout();
2529 if (auto *CI = dyn_cast<CastInst>(&I)) {
2530 Value *FromValue = CI->getOperand(0);
2531 // No-op casts are irrelevant for debug info.
2532 if (CI->isNoopCast(DL)) {
2533 return FromValue;
2536 Type *Type = CI->getType();
2537 if (Type->isPointerTy())
2538 Type = DL.getIntPtrType(Type);
2539 // Casts other than Trunc, SExt, or ZExt to scalar types cannot be salvaged.
2540 if (Type->isVectorTy() ||
2541 !(isa<TruncInst>(&I) || isa<SExtInst>(&I) || isa<ZExtInst>(&I) ||
2542 isa<IntToPtrInst>(&I) || isa<PtrToIntInst>(&I)))
2543 return nullptr;
2545 llvm::Type *FromType = FromValue->getType();
2546 if (FromType->isPointerTy())
2547 FromType = DL.getIntPtrType(FromType);
2549 unsigned FromTypeBitSize = FromType->getScalarSizeInBits();
2550 unsigned ToTypeBitSize = Type->getScalarSizeInBits();
2552 auto ExtOps = DIExpression::getExtOps(FromTypeBitSize, ToTypeBitSize,
2553 isa<SExtInst>(&I));
2554 Ops.append(ExtOps.begin(), ExtOps.end());
2555 return FromValue;
2558 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I))
2559 return getSalvageOpsForGEP(GEP, DL, CurrentLocOps, Ops, AdditionalValues);
2560 if (auto *BI = dyn_cast<BinaryOperator>(&I))
2561 return getSalvageOpsForBinOp(BI, CurrentLocOps, Ops, AdditionalValues);
2562 if (auto *IC = dyn_cast<ICmpInst>(&I))
2563 return getSalvageOpsForIcmpOp(IC, CurrentLocOps, Ops, AdditionalValues);
2565 // *Not* to do: we should not attempt to salvage load instructions,
2566 // because the validity and lifetime of a dbg.value containing
2567 // DW_OP_deref becomes difficult to analyze. See PR40628 for examples.
2568 return nullptr;
2571 /// A replacement for a dbg.value expression.
2572 using DbgValReplacement = std::optional<DIExpression *>;
2574 /// Point debug users of \p From to \p To using exprs given by \p RewriteExpr,
2575 /// possibly moving/undefing users to prevent use-before-def. Returns true if
2576 /// changes are made.
2577 static bool rewriteDebugUsers(
2578 Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT,
2579 function_ref<DbgValReplacement(DbgVariableIntrinsic &DII)> RewriteExpr,
2580 function_ref<DbgValReplacement(DPValue &DPV)> RewriteDPVExpr) {
2581 // Find debug users of From.
2582 SmallVector<DbgVariableIntrinsic *, 1> Users;
2583 SmallVector<DPValue *, 1> DPUsers;
2584 findDbgUsers(Users, &From, &DPUsers);
2585 if (Users.empty() && DPUsers.empty())
2586 return false;
2588 // Prevent use-before-def of To.
2589 bool Changed = false;
2591 SmallPtrSet<DbgVariableIntrinsic *, 1> UndefOrSalvage;
2592 SmallPtrSet<DPValue *, 1> UndefOrSalvageDPV;
2593 if (isa<Instruction>(&To)) {
2594 bool DomPointAfterFrom = From.getNextNonDebugInstruction() == &DomPoint;
2596 for (auto *DII : Users) {
2597 // It's common to see a debug user between From and DomPoint. Move it
2598 // after DomPoint to preserve the variable update without any reordering.
2599 if (DomPointAfterFrom && DII->getNextNonDebugInstruction() == &DomPoint) {
2600 LLVM_DEBUG(dbgs() << "MOVE: " << *DII << '\n');
2601 DII->moveAfter(&DomPoint);
2602 Changed = true;
2604 // Users which otherwise aren't dominated by the replacement value must
2605 // be salvaged or deleted.
2606 } else if (!DT.dominates(&DomPoint, DII)) {
2607 UndefOrSalvage.insert(DII);
2611 // DPValue implementation of the above.
2612 for (auto *DPV : DPUsers) {
2613 Instruction *MarkedInstr = DPV->getMarker()->MarkedInstr;
2614 Instruction *NextNonDebug = MarkedInstr;
2615 // The next instruction might still be a dbg.declare, skip over it.
2616 if (isa<DbgVariableIntrinsic>(NextNonDebug))
2617 NextNonDebug = NextNonDebug->getNextNonDebugInstruction();
2619 if (DomPointAfterFrom && NextNonDebug == &DomPoint) {
2620 LLVM_DEBUG(dbgs() << "MOVE: " << *DPV << '\n');
2621 DPV->removeFromParent();
2622 // Ensure there's a marker.
2623 DomPoint.getParent()->insertDPValueAfter(DPV, &DomPoint);
2624 Changed = true;
2625 } else if (!DT.dominates(&DomPoint, MarkedInstr)) {
2626 UndefOrSalvageDPV.insert(DPV);
2631 // Update debug users without use-before-def risk.
2632 for (auto *DII : Users) {
2633 if (UndefOrSalvage.count(DII))
2634 continue;
2636 DbgValReplacement DVR = RewriteExpr(*DII);
2637 if (!DVR)
2638 continue;
2640 DII->replaceVariableLocationOp(&From, &To);
2641 DII->setExpression(*DVR);
2642 LLVM_DEBUG(dbgs() << "REWRITE: " << *DII << '\n');
2643 Changed = true;
2645 for (auto *DPV : DPUsers) {
2646 if (UndefOrSalvageDPV.count(DPV))
2647 continue;
2649 DbgValReplacement DVR = RewriteDPVExpr(*DPV);
2650 if (!DVR)
2651 continue;
2653 DPV->replaceVariableLocationOp(&From, &To);
2654 DPV->setExpression(*DVR);
2655 LLVM_DEBUG(dbgs() << "REWRITE: " << DPV << '\n');
2656 Changed = true;
2659 if (!UndefOrSalvage.empty() || !UndefOrSalvageDPV.empty()) {
2660 // Try to salvage the remaining debug users.
2661 salvageDebugInfo(From);
2662 Changed = true;
2665 return Changed;
2668 /// Check if a bitcast between a value of type \p FromTy to type \p ToTy would
2669 /// losslessly preserve the bits and semantics of the value. This predicate is
2670 /// symmetric, i.e swapping \p FromTy and \p ToTy should give the same result.
2672 /// Note that Type::canLosslesslyBitCastTo is not suitable here because it
2673 /// allows semantically unequivalent bitcasts, such as <2 x i64> -> <4 x i32>,
2674 /// and also does not allow lossless pointer <-> integer conversions.
2675 static bool isBitCastSemanticsPreserving(const DataLayout &DL, Type *FromTy,
2676 Type *ToTy) {
2677 // Trivially compatible types.
2678 if (FromTy == ToTy)
2679 return true;
2681 // Handle compatible pointer <-> integer conversions.
2682 if (FromTy->isIntOrPtrTy() && ToTy->isIntOrPtrTy()) {
2683 bool SameSize = DL.getTypeSizeInBits(FromTy) == DL.getTypeSizeInBits(ToTy);
2684 bool LosslessConversion = !DL.isNonIntegralPointerType(FromTy) &&
2685 !DL.isNonIntegralPointerType(ToTy);
2686 return SameSize && LosslessConversion;
2689 // TODO: This is not exhaustive.
2690 return false;
2693 bool llvm::replaceAllDbgUsesWith(Instruction &From, Value &To,
2694 Instruction &DomPoint, DominatorTree &DT) {
2695 // Exit early if From has no debug users.
2696 if (!From.isUsedByMetadata())
2697 return false;
2699 assert(&From != &To && "Can't replace something with itself");
2701 Type *FromTy = From.getType();
2702 Type *ToTy = To.getType();
2704 auto Identity = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement {
2705 return DII.getExpression();
2707 auto IdentityDPV = [&](DPValue &DPV) -> DbgValReplacement {
2708 return DPV.getExpression();
2711 // Handle no-op conversions.
2712 Module &M = *From.getModule();
2713 const DataLayout &DL = M.getDataLayout();
2714 if (isBitCastSemanticsPreserving(DL, FromTy, ToTy))
2715 return rewriteDebugUsers(From, To, DomPoint, DT, Identity, IdentityDPV);
2717 // Handle integer-to-integer widening and narrowing.
2718 // FIXME: Use DW_OP_convert when it's available everywhere.
2719 if (FromTy->isIntegerTy() && ToTy->isIntegerTy()) {
2720 uint64_t FromBits = FromTy->getPrimitiveSizeInBits();
2721 uint64_t ToBits = ToTy->getPrimitiveSizeInBits();
2722 assert(FromBits != ToBits && "Unexpected no-op conversion");
2724 // When the width of the result grows, assume that a debugger will only
2725 // access the low `FromBits` bits when inspecting the source variable.
2726 if (FromBits < ToBits)
2727 return rewriteDebugUsers(From, To, DomPoint, DT, Identity, IdentityDPV);
2729 // The width of the result has shrunk. Use sign/zero extension to describe
2730 // the source variable's high bits.
2731 auto SignOrZeroExt = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement {
2732 DILocalVariable *Var = DII.getVariable();
2734 // Without knowing signedness, sign/zero extension isn't possible.
2735 auto Signedness = Var->getSignedness();
2736 if (!Signedness)
2737 return std::nullopt;
2739 bool Signed = *Signedness == DIBasicType::Signedness::Signed;
2740 return DIExpression::appendExt(DII.getExpression(), ToBits, FromBits,
2741 Signed);
2743 // RemoveDIs: duplicate implementation working on DPValues rather than on
2744 // dbg.value intrinsics.
2745 auto SignOrZeroExtDPV = [&](DPValue &DPV) -> DbgValReplacement {
2746 DILocalVariable *Var = DPV.getVariable();
2748 // Without knowing signedness, sign/zero extension isn't possible.
2749 auto Signedness = Var->getSignedness();
2750 if (!Signedness)
2751 return std::nullopt;
2753 bool Signed = *Signedness == DIBasicType::Signedness::Signed;
2754 return DIExpression::appendExt(DPV.getExpression(), ToBits, FromBits,
2755 Signed);
2757 return rewriteDebugUsers(From, To, DomPoint, DT, SignOrZeroExt,
2758 SignOrZeroExtDPV);
2761 // TODO: Floating-point conversions, vectors.
2762 return false;
2765 std::pair<unsigned, unsigned>
2766 llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) {
2767 unsigned NumDeadInst = 0;
2768 unsigned NumDeadDbgInst = 0;
2769 // Delete the instructions backwards, as it has a reduced likelihood of
2770 // having to update as many def-use and use-def chains.
2771 Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
2772 // RemoveDIs: erasing debug-info must be done manually.
2773 EndInst->dropDbgValues();
2774 while (EndInst != &BB->front()) {
2775 // Delete the next to last instruction.
2776 Instruction *Inst = &*--EndInst->getIterator();
2777 if (!Inst->use_empty() && !Inst->getType()->isTokenTy())
2778 Inst->replaceAllUsesWith(PoisonValue::get(Inst->getType()));
2779 if (Inst->isEHPad() || Inst->getType()->isTokenTy()) {
2780 // EHPads can't have DPValues attached to them, but it might be possible
2781 // for things with token type.
2782 Inst->dropDbgValues();
2783 EndInst = Inst;
2784 continue;
2786 if (isa<DbgInfoIntrinsic>(Inst))
2787 ++NumDeadDbgInst;
2788 else
2789 ++NumDeadInst;
2790 // RemoveDIs: erasing debug-info must be done manually.
2791 Inst->dropDbgValues();
2792 Inst->eraseFromParent();
2794 return {NumDeadInst, NumDeadDbgInst};
2797 unsigned llvm::changeToUnreachable(Instruction *I, bool PreserveLCSSA,
2798 DomTreeUpdater *DTU,
2799 MemorySSAUpdater *MSSAU) {
2800 BasicBlock *BB = I->getParent();
2802 if (MSSAU)
2803 MSSAU->changeToUnreachable(I);
2805 SmallSet<BasicBlock *, 8> UniqueSuccessors;
2807 // Loop over all of the successors, removing BB's entry from any PHI
2808 // nodes.
2809 for (BasicBlock *Successor : successors(BB)) {
2810 Successor->removePredecessor(BB, PreserveLCSSA);
2811 if (DTU)
2812 UniqueSuccessors.insert(Successor);
2814 auto *UI = new UnreachableInst(I->getContext(), I);
2815 UI->setDebugLoc(I->getDebugLoc());
2817 // All instructions after this are dead.
2818 unsigned NumInstrsRemoved = 0;
2819 BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end();
2820 while (BBI != BBE) {
2821 if (!BBI->use_empty())
2822 BBI->replaceAllUsesWith(PoisonValue::get(BBI->getType()));
2823 BBI++->eraseFromParent();
2824 ++NumInstrsRemoved;
2826 if (DTU) {
2827 SmallVector<DominatorTree::UpdateType, 8> Updates;
2828 Updates.reserve(UniqueSuccessors.size());
2829 for (BasicBlock *UniqueSuccessor : UniqueSuccessors)
2830 Updates.push_back({DominatorTree::Delete, BB, UniqueSuccessor});
2831 DTU->applyUpdates(Updates);
2833 BB->flushTerminatorDbgValues();
2834 return NumInstrsRemoved;
2837 CallInst *llvm::createCallMatchingInvoke(InvokeInst *II) {
2838 SmallVector<Value *, 8> Args(II->args());
2839 SmallVector<OperandBundleDef, 1> OpBundles;
2840 II->getOperandBundlesAsDefs(OpBundles);
2841 CallInst *NewCall = CallInst::Create(II->getFunctionType(),
2842 II->getCalledOperand(), Args, OpBundles);
2843 NewCall->setCallingConv(II->getCallingConv());
2844 NewCall->setAttributes(II->getAttributes());
2845 NewCall->setDebugLoc(II->getDebugLoc());
2846 NewCall->copyMetadata(*II);
2848 // If the invoke had profile metadata, try converting them for CallInst.
2849 uint64_t TotalWeight;
2850 if (NewCall->extractProfTotalWeight(TotalWeight)) {
2851 // Set the total weight if it fits into i32, otherwise reset.
2852 MDBuilder MDB(NewCall->getContext());
2853 auto NewWeights = uint32_t(TotalWeight) != TotalWeight
2854 ? nullptr
2855 : MDB.createBranchWeights({uint32_t(TotalWeight)});
2856 NewCall->setMetadata(LLVMContext::MD_prof, NewWeights);
2859 return NewCall;
2862 // changeToCall - Convert the specified invoke into a normal call.
2863 CallInst *llvm::changeToCall(InvokeInst *II, DomTreeUpdater *DTU) {
2864 CallInst *NewCall = createCallMatchingInvoke(II);
2865 NewCall->takeName(II);
2866 NewCall->insertBefore(II);
2867 II->replaceAllUsesWith(NewCall);
2869 // Follow the call by a branch to the normal destination.
2870 BasicBlock *NormalDestBB = II->getNormalDest();
2871 BranchInst::Create(NormalDestBB, II);
2873 // Update PHI nodes in the unwind destination
2874 BasicBlock *BB = II->getParent();
2875 BasicBlock *UnwindDestBB = II->getUnwindDest();
2876 UnwindDestBB->removePredecessor(BB);
2877 II->eraseFromParent();
2878 if (DTU)
2879 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
2880 return NewCall;
2883 BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI,
2884 BasicBlock *UnwindEdge,
2885 DomTreeUpdater *DTU) {
2886 BasicBlock *BB = CI->getParent();
2888 // Convert this function call into an invoke instruction. First, split the
2889 // basic block.
2890 BasicBlock *Split = SplitBlock(BB, CI, DTU, /*LI=*/nullptr, /*MSSAU*/ nullptr,
2891 CI->getName() + ".noexc");
2893 // Delete the unconditional branch inserted by SplitBlock
2894 BB->back().eraseFromParent();
2896 // Create the new invoke instruction.
2897 SmallVector<Value *, 8> InvokeArgs(CI->args());
2898 SmallVector<OperandBundleDef, 1> OpBundles;
2900 CI->getOperandBundlesAsDefs(OpBundles);
2902 // Note: we're round tripping operand bundles through memory here, and that
2903 // can potentially be avoided with a cleverer API design that we do not have
2904 // as of this time.
2906 InvokeInst *II =
2907 InvokeInst::Create(CI->getFunctionType(), CI->getCalledOperand(), Split,
2908 UnwindEdge, InvokeArgs, OpBundles, CI->getName(), BB);
2909 II->setDebugLoc(CI->getDebugLoc());
2910 II->setCallingConv(CI->getCallingConv());
2911 II->setAttributes(CI->getAttributes());
2912 II->setMetadata(LLVMContext::MD_prof, CI->getMetadata(LLVMContext::MD_prof));
2914 if (DTU)
2915 DTU->applyUpdates({{DominatorTree::Insert, BB, UnwindEdge}});
2917 // Make sure that anything using the call now uses the invoke! This also
2918 // updates the CallGraph if present, because it uses a WeakTrackingVH.
2919 CI->replaceAllUsesWith(II);
2921 // Delete the original call
2922 Split->front().eraseFromParent();
2923 return Split;
2926 static bool markAliveBlocks(Function &F,
2927 SmallPtrSetImpl<BasicBlock *> &Reachable,
2928 DomTreeUpdater *DTU = nullptr) {
2929 SmallVector<BasicBlock*, 128> Worklist;
2930 BasicBlock *BB = &F.front();
2931 Worklist.push_back(BB);
2932 Reachable.insert(BB);
2933 bool Changed = false;
2934 do {
2935 BB = Worklist.pop_back_val();
2937 // Do a quick scan of the basic block, turning any obviously unreachable
2938 // instructions into LLVM unreachable insts. The instruction combining pass
2939 // canonicalizes unreachable insts into stores to null or undef.
2940 for (Instruction &I : *BB) {
2941 if (auto *CI = dyn_cast<CallInst>(&I)) {
2942 Value *Callee = CI->getCalledOperand();
2943 // Handle intrinsic calls.
2944 if (Function *F = dyn_cast<Function>(Callee)) {
2945 auto IntrinsicID = F->getIntrinsicID();
2946 // Assumptions that are known to be false are equivalent to
2947 // unreachable. Also, if the condition is undefined, then we make the
2948 // choice most beneficial to the optimizer, and choose that to also be
2949 // unreachable.
2950 if (IntrinsicID == Intrinsic::assume) {
2951 if (match(CI->getArgOperand(0), m_CombineOr(m_Zero(), m_Undef()))) {
2952 // Don't insert a call to llvm.trap right before the unreachable.
2953 changeToUnreachable(CI, false, DTU);
2954 Changed = true;
2955 break;
2957 } else if (IntrinsicID == Intrinsic::experimental_guard) {
2958 // A call to the guard intrinsic bails out of the current
2959 // compilation unit if the predicate passed to it is false. If the
2960 // predicate is a constant false, then we know the guard will bail
2961 // out of the current compile unconditionally, so all code following
2962 // it is dead.
2964 // Note: unlike in llvm.assume, it is not "obviously profitable" for
2965 // guards to treat `undef` as `false` since a guard on `undef` can
2966 // still be useful for widening.
2967 if (match(CI->getArgOperand(0), m_Zero()))
2968 if (!isa<UnreachableInst>(CI->getNextNode())) {
2969 changeToUnreachable(CI->getNextNode(), false, DTU);
2970 Changed = true;
2971 break;
2974 } else if ((isa<ConstantPointerNull>(Callee) &&
2975 !NullPointerIsDefined(CI->getFunction(),
2976 cast<PointerType>(Callee->getType())
2977 ->getAddressSpace())) ||
2978 isa<UndefValue>(Callee)) {
2979 changeToUnreachable(CI, false, DTU);
2980 Changed = true;
2981 break;
2983 if (CI->doesNotReturn() && !CI->isMustTailCall()) {
2984 // If we found a call to a no-return function, insert an unreachable
2985 // instruction after it. Make sure there isn't *already* one there
2986 // though.
2987 if (!isa<UnreachableInst>(CI->getNextNonDebugInstruction())) {
2988 // Don't insert a call to llvm.trap right before the unreachable.
2989 changeToUnreachable(CI->getNextNonDebugInstruction(), false, DTU);
2990 Changed = true;
2992 break;
2994 } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
2995 // Store to undef and store to null are undefined and used to signal
2996 // that they should be changed to unreachable by passes that can't
2997 // modify the CFG.
2999 // Don't touch volatile stores.
3000 if (SI->isVolatile()) continue;
3002 Value *Ptr = SI->getOperand(1);
3004 if (isa<UndefValue>(Ptr) ||
3005 (isa<ConstantPointerNull>(Ptr) &&
3006 !NullPointerIsDefined(SI->getFunction(),
3007 SI->getPointerAddressSpace()))) {
3008 changeToUnreachable(SI, false, DTU);
3009 Changed = true;
3010 break;
3015 Instruction *Terminator = BB->getTerminator();
3016 if (auto *II = dyn_cast<InvokeInst>(Terminator)) {
3017 // Turn invokes that call 'nounwind' functions into ordinary calls.
3018 Value *Callee = II->getCalledOperand();
3019 if ((isa<ConstantPointerNull>(Callee) &&
3020 !NullPointerIsDefined(BB->getParent())) ||
3021 isa<UndefValue>(Callee)) {
3022 changeToUnreachable(II, false, DTU);
3023 Changed = true;
3024 } else {
3025 if (II->doesNotReturn() &&
3026 !isa<UnreachableInst>(II->getNormalDest()->front())) {
3027 // If we found an invoke of a no-return function,
3028 // create a new empty basic block with an `unreachable` terminator,
3029 // and set it as the normal destination for the invoke,
3030 // unless that is already the case.
3031 // Note that the original normal destination could have other uses.
3032 BasicBlock *OrigNormalDest = II->getNormalDest();
3033 OrigNormalDest->removePredecessor(II->getParent());
3034 LLVMContext &Ctx = II->getContext();
3035 BasicBlock *UnreachableNormalDest = BasicBlock::Create(
3036 Ctx, OrigNormalDest->getName() + ".unreachable",
3037 II->getFunction(), OrigNormalDest);
3038 new UnreachableInst(Ctx, UnreachableNormalDest);
3039 II->setNormalDest(UnreachableNormalDest);
3040 if (DTU)
3041 DTU->applyUpdates(
3042 {{DominatorTree::Delete, BB, OrigNormalDest},
3043 {DominatorTree::Insert, BB, UnreachableNormalDest}});
3044 Changed = true;
3046 if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) {
3047 if (II->use_empty() && !II->mayHaveSideEffects()) {
3048 // jump to the normal destination branch.
3049 BasicBlock *NormalDestBB = II->getNormalDest();
3050 BasicBlock *UnwindDestBB = II->getUnwindDest();
3051 BranchInst::Create(NormalDestBB, II);
3052 UnwindDestBB->removePredecessor(II->getParent());
3053 II->eraseFromParent();
3054 if (DTU)
3055 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
3056 } else
3057 changeToCall(II, DTU);
3058 Changed = true;
3061 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) {
3062 // Remove catchpads which cannot be reached.
3063 struct CatchPadDenseMapInfo {
3064 static CatchPadInst *getEmptyKey() {
3065 return DenseMapInfo<CatchPadInst *>::getEmptyKey();
3068 static CatchPadInst *getTombstoneKey() {
3069 return DenseMapInfo<CatchPadInst *>::getTombstoneKey();
3072 static unsigned getHashValue(CatchPadInst *CatchPad) {
3073 return static_cast<unsigned>(hash_combine_range(
3074 CatchPad->value_op_begin(), CatchPad->value_op_end()));
3077 static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) {
3078 if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
3079 RHS == getEmptyKey() || RHS == getTombstoneKey())
3080 return LHS == RHS;
3081 return LHS->isIdenticalTo(RHS);
3085 SmallDenseMap<BasicBlock *, int, 8> NumPerSuccessorCases;
3086 // Set of unique CatchPads.
3087 SmallDenseMap<CatchPadInst *, detail::DenseSetEmpty, 4,
3088 CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>>
3089 HandlerSet;
3090 detail::DenseSetEmpty Empty;
3091 for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(),
3092 E = CatchSwitch->handler_end();
3093 I != E; ++I) {
3094 BasicBlock *HandlerBB = *I;
3095 if (DTU)
3096 ++NumPerSuccessorCases[HandlerBB];
3097 auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHI());
3098 if (!HandlerSet.insert({CatchPad, Empty}).second) {
3099 if (DTU)
3100 --NumPerSuccessorCases[HandlerBB];
3101 CatchSwitch->removeHandler(I);
3102 --I;
3103 --E;
3104 Changed = true;
3107 if (DTU) {
3108 std::vector<DominatorTree::UpdateType> Updates;
3109 for (const std::pair<BasicBlock *, int> &I : NumPerSuccessorCases)
3110 if (I.second == 0)
3111 Updates.push_back({DominatorTree::Delete, BB, I.first});
3112 DTU->applyUpdates(Updates);
3116 Changed |= ConstantFoldTerminator(BB, true, nullptr, DTU);
3117 for (BasicBlock *Successor : successors(BB))
3118 if (Reachable.insert(Successor).second)
3119 Worklist.push_back(Successor);
3120 } while (!Worklist.empty());
3121 return Changed;
3124 Instruction *llvm::removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU) {
3125 Instruction *TI = BB->getTerminator();
3127 if (auto *II = dyn_cast<InvokeInst>(TI))
3128 return changeToCall(II, DTU);
3130 Instruction *NewTI;
3131 BasicBlock *UnwindDest;
3133 if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
3134 NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI);
3135 UnwindDest = CRI->getUnwindDest();
3136 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) {
3137 auto *NewCatchSwitch = CatchSwitchInst::Create(
3138 CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(),
3139 CatchSwitch->getName(), CatchSwitch);
3140 for (BasicBlock *PadBB : CatchSwitch->handlers())
3141 NewCatchSwitch->addHandler(PadBB);
3143 NewTI = NewCatchSwitch;
3144 UnwindDest = CatchSwitch->getUnwindDest();
3145 } else {
3146 llvm_unreachable("Could not find unwind successor");
3149 NewTI->takeName(TI);
3150 NewTI->setDebugLoc(TI->getDebugLoc());
3151 UnwindDest->removePredecessor(BB);
3152 TI->replaceAllUsesWith(NewTI);
3153 TI->eraseFromParent();
3154 if (DTU)
3155 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDest}});
3156 return NewTI;
3159 /// removeUnreachableBlocks - Remove blocks that are not reachable, even
3160 /// if they are in a dead cycle. Return true if a change was made, false
3161 /// otherwise.
3162 bool llvm::removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU,
3163 MemorySSAUpdater *MSSAU) {
3164 SmallPtrSet<BasicBlock *, 16> Reachable;
3165 bool Changed = markAliveBlocks(F, Reachable, DTU);
3167 // If there are unreachable blocks in the CFG...
3168 if (Reachable.size() == F.size())
3169 return Changed;
3171 assert(Reachable.size() < F.size());
3173 // Are there any blocks left to actually delete?
3174 SmallSetVector<BasicBlock *, 8> BlocksToRemove;
3175 for (BasicBlock &BB : F) {
3176 // Skip reachable basic blocks
3177 if (Reachable.count(&BB))
3178 continue;
3179 // Skip already-deleted blocks
3180 if (DTU && DTU->isBBPendingDeletion(&BB))
3181 continue;
3182 BlocksToRemove.insert(&BB);
3185 if (BlocksToRemove.empty())
3186 return Changed;
3188 Changed = true;
3189 NumRemoved += BlocksToRemove.size();
3191 if (MSSAU)
3192 MSSAU->removeBlocks(BlocksToRemove);
3194 DeleteDeadBlocks(BlocksToRemove.takeVector(), DTU);
3196 return Changed;
3199 void llvm::combineMetadata(Instruction *K, const Instruction *J,
3200 ArrayRef<unsigned> KnownIDs, bool DoesKMove) {
3201 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
3202 K->dropUnknownNonDebugMetadata(KnownIDs);
3203 K->getAllMetadataOtherThanDebugLoc(Metadata);
3204 for (const auto &MD : Metadata) {
3205 unsigned Kind = MD.first;
3206 MDNode *JMD = J->getMetadata(Kind);
3207 MDNode *KMD = MD.second;
3209 switch (Kind) {
3210 default:
3211 K->setMetadata(Kind, nullptr); // Remove unknown metadata
3212 break;
3213 case LLVMContext::MD_dbg:
3214 llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg");
3215 case LLVMContext::MD_DIAssignID:
3216 K->mergeDIAssignID(J);
3217 break;
3218 case LLVMContext::MD_tbaa:
3219 K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
3220 break;
3221 case LLVMContext::MD_alias_scope:
3222 K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD));
3223 break;
3224 case LLVMContext::MD_noalias:
3225 case LLVMContext::MD_mem_parallel_loop_access:
3226 K->setMetadata(Kind, MDNode::intersect(JMD, KMD));
3227 break;
3228 case LLVMContext::MD_access_group:
3229 K->setMetadata(LLVMContext::MD_access_group,
3230 intersectAccessGroups(K, J));
3231 break;
3232 case LLVMContext::MD_range:
3233 if (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef))
3234 K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD));
3235 break;
3236 case LLVMContext::MD_fpmath:
3237 K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD));
3238 break;
3239 case LLVMContext::MD_invariant_load:
3240 // If K moves, only set the !invariant.load if it is present in both
3241 // instructions.
3242 if (DoesKMove)
3243 K->setMetadata(Kind, JMD);
3244 break;
3245 case LLVMContext::MD_nonnull:
3246 if (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef))
3247 K->setMetadata(Kind, JMD);
3248 break;
3249 case LLVMContext::MD_invariant_group:
3250 // Preserve !invariant.group in K.
3251 break;
3252 case LLVMContext::MD_align:
3253 if (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef))
3254 K->setMetadata(
3255 Kind, MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
3256 break;
3257 case LLVMContext::MD_dereferenceable:
3258 case LLVMContext::MD_dereferenceable_or_null:
3259 if (DoesKMove)
3260 K->setMetadata(Kind,
3261 MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
3262 break;
3263 case LLVMContext::MD_preserve_access_index:
3264 // Preserve !preserve.access.index in K.
3265 break;
3266 case LLVMContext::MD_noundef:
3267 // If K does move, keep noundef if it is present in both instructions.
3268 if (DoesKMove)
3269 K->setMetadata(Kind, JMD);
3270 break;
3271 case LLVMContext::MD_nontemporal:
3272 // Preserve !nontemporal if it is present on both instructions.
3273 K->setMetadata(Kind, JMD);
3274 break;
3275 case LLVMContext::MD_prof:
3276 if (DoesKMove)
3277 K->setMetadata(Kind, MDNode::getMergedProfMetadata(KMD, JMD, K, J));
3278 break;
3281 // Set !invariant.group from J if J has it. If both instructions have it
3282 // then we will just pick it from J - even when they are different.
3283 // Also make sure that K is load or store - f.e. combining bitcast with load
3284 // could produce bitcast with invariant.group metadata, which is invalid.
3285 // FIXME: we should try to preserve both invariant.group md if they are
3286 // different, but right now instruction can only have one invariant.group.
3287 if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group))
3288 if (isa<LoadInst>(K) || isa<StoreInst>(K))
3289 K->setMetadata(LLVMContext::MD_invariant_group, JMD);
3292 void llvm::combineMetadataForCSE(Instruction *K, const Instruction *J,
3293 bool KDominatesJ) {
3294 unsigned KnownIDs[] = {LLVMContext::MD_tbaa,
3295 LLVMContext::MD_alias_scope,
3296 LLVMContext::MD_noalias,
3297 LLVMContext::MD_range,
3298 LLVMContext::MD_fpmath,
3299 LLVMContext::MD_invariant_load,
3300 LLVMContext::MD_nonnull,
3301 LLVMContext::MD_invariant_group,
3302 LLVMContext::MD_align,
3303 LLVMContext::MD_dereferenceable,
3304 LLVMContext::MD_dereferenceable_or_null,
3305 LLVMContext::MD_access_group,
3306 LLVMContext::MD_preserve_access_index,
3307 LLVMContext::MD_prof,
3308 LLVMContext::MD_nontemporal,
3309 LLVMContext::MD_noundef};
3310 combineMetadata(K, J, KnownIDs, KDominatesJ);
3313 void llvm::copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source) {
3314 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
3315 Source.getAllMetadata(MD);
3316 MDBuilder MDB(Dest.getContext());
3317 Type *NewType = Dest.getType();
3318 const DataLayout &DL = Source.getModule()->getDataLayout();
3319 for (const auto &MDPair : MD) {
3320 unsigned ID = MDPair.first;
3321 MDNode *N = MDPair.second;
3322 // Note, essentially every kind of metadata should be preserved here! This
3323 // routine is supposed to clone a load instruction changing *only its type*.
3324 // The only metadata it makes sense to drop is metadata which is invalidated
3325 // when the pointer type changes. This should essentially never be the case
3326 // in LLVM, but we explicitly switch over only known metadata to be
3327 // conservatively correct. If you are adding metadata to LLVM which pertains
3328 // to loads, you almost certainly want to add it here.
3329 switch (ID) {
3330 case LLVMContext::MD_dbg:
3331 case LLVMContext::MD_tbaa:
3332 case LLVMContext::MD_prof:
3333 case LLVMContext::MD_fpmath:
3334 case LLVMContext::MD_tbaa_struct:
3335 case LLVMContext::MD_invariant_load:
3336 case LLVMContext::MD_alias_scope:
3337 case LLVMContext::MD_noalias:
3338 case LLVMContext::MD_nontemporal:
3339 case LLVMContext::MD_mem_parallel_loop_access:
3340 case LLVMContext::MD_access_group:
3341 case LLVMContext::MD_noundef:
3342 // All of these directly apply.
3343 Dest.setMetadata(ID, N);
3344 break;
3346 case LLVMContext::MD_nonnull:
3347 copyNonnullMetadata(Source, N, Dest);
3348 break;
3350 case LLVMContext::MD_align:
3351 case LLVMContext::MD_dereferenceable:
3352 case LLVMContext::MD_dereferenceable_or_null:
3353 // These only directly apply if the new type is also a pointer.
3354 if (NewType->isPointerTy())
3355 Dest.setMetadata(ID, N);
3356 break;
3358 case LLVMContext::MD_range:
3359 copyRangeMetadata(DL, Source, N, Dest);
3360 break;
3365 void llvm::patchReplacementInstruction(Instruction *I, Value *Repl) {
3366 auto *ReplInst = dyn_cast<Instruction>(Repl);
3367 if (!ReplInst)
3368 return;
3370 // Patch the replacement so that it is not more restrictive than the value
3371 // being replaced.
3372 WithOverflowInst *UnusedWO;
3373 // When replacing the result of a llvm.*.with.overflow intrinsic with a
3374 // overflowing binary operator, nuw/nsw flags may no longer hold.
3375 if (isa<OverflowingBinaryOperator>(ReplInst) &&
3376 match(I, m_ExtractValue<0>(m_WithOverflowInst(UnusedWO))))
3377 ReplInst->dropPoisonGeneratingFlags();
3378 // Note that if 'I' is a load being replaced by some operation,
3379 // for example, by an arithmetic operation, then andIRFlags()
3380 // would just erase all math flags from the original arithmetic
3381 // operation, which is clearly not wanted and not needed.
3382 else if (!isa<LoadInst>(I))
3383 ReplInst->andIRFlags(I);
3385 // FIXME: If both the original and replacement value are part of the
3386 // same control-flow region (meaning that the execution of one
3387 // guarantees the execution of the other), then we can combine the
3388 // noalias scopes here and do better than the general conservative
3389 // answer used in combineMetadata().
3391 // In general, GVN unifies expressions over different control-flow
3392 // regions, and so we need a conservative combination of the noalias
3393 // scopes.
3394 combineMetadataForCSE(ReplInst, I, false);
3397 template <typename RootType, typename DominatesFn>
3398 static unsigned replaceDominatedUsesWith(Value *From, Value *To,
3399 const RootType &Root,
3400 const DominatesFn &Dominates) {
3401 assert(From->getType() == To->getType());
3403 unsigned Count = 0;
3404 for (Use &U : llvm::make_early_inc_range(From->uses())) {
3405 if (!Dominates(Root, U))
3406 continue;
3407 LLVM_DEBUG(dbgs() << "Replace dominated use of '";
3408 From->printAsOperand(dbgs());
3409 dbgs() << "' with " << *To << " in " << *U.getUser() << "\n");
3410 U.set(To);
3411 ++Count;
3413 return Count;
3416 unsigned llvm::replaceNonLocalUsesWith(Instruction *From, Value *To) {
3417 assert(From->getType() == To->getType());
3418 auto *BB = From->getParent();
3419 unsigned Count = 0;
3421 for (Use &U : llvm::make_early_inc_range(From->uses())) {
3422 auto *I = cast<Instruction>(U.getUser());
3423 if (I->getParent() == BB)
3424 continue;
3425 U.set(To);
3426 ++Count;
3428 return Count;
3431 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
3432 DominatorTree &DT,
3433 const BasicBlockEdge &Root) {
3434 auto Dominates = [&DT](const BasicBlockEdge &Root, const Use &U) {
3435 return DT.dominates(Root, U);
3437 return ::replaceDominatedUsesWith(From, To, Root, Dominates);
3440 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
3441 DominatorTree &DT,
3442 const BasicBlock *BB) {
3443 auto Dominates = [&DT](const BasicBlock *BB, const Use &U) {
3444 return DT.dominates(BB, U);
3446 return ::replaceDominatedUsesWith(From, To, BB, Dominates);
3449 bool llvm::callsGCLeafFunction(const CallBase *Call,
3450 const TargetLibraryInfo &TLI) {
3451 // Check if the function is specifically marked as a gc leaf function.
3452 if (Call->hasFnAttr("gc-leaf-function"))
3453 return true;
3454 if (const Function *F = Call->getCalledFunction()) {
3455 if (F->hasFnAttribute("gc-leaf-function"))
3456 return true;
3458 if (auto IID = F->getIntrinsicID()) {
3459 // Most LLVM intrinsics do not take safepoints.
3460 return IID != Intrinsic::experimental_gc_statepoint &&
3461 IID != Intrinsic::experimental_deoptimize &&
3462 IID != Intrinsic::memcpy_element_unordered_atomic &&
3463 IID != Intrinsic::memmove_element_unordered_atomic;
3467 // Lib calls can be materialized by some passes, and won't be
3468 // marked as 'gc-leaf-function.' All available Libcalls are
3469 // GC-leaf.
3470 LibFunc LF;
3471 if (TLI.getLibFunc(*Call, LF)) {
3472 return TLI.has(LF);
3475 return false;
3478 void llvm::copyNonnullMetadata(const LoadInst &OldLI, MDNode *N,
3479 LoadInst &NewLI) {
3480 auto *NewTy = NewLI.getType();
3482 // This only directly applies if the new type is also a pointer.
3483 if (NewTy->isPointerTy()) {
3484 NewLI.setMetadata(LLVMContext::MD_nonnull, N);
3485 return;
3488 // The only other translation we can do is to integral loads with !range
3489 // metadata.
3490 if (!NewTy->isIntegerTy())
3491 return;
3493 MDBuilder MDB(NewLI.getContext());
3494 const Value *Ptr = OldLI.getPointerOperand();
3495 auto *ITy = cast<IntegerType>(NewTy);
3496 auto *NullInt = ConstantExpr::getPtrToInt(
3497 ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
3498 auto *NonNullInt = ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
3499 NewLI.setMetadata(LLVMContext::MD_range,
3500 MDB.createRange(NonNullInt, NullInt));
3503 void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI,
3504 MDNode *N, LoadInst &NewLI) {
3505 auto *NewTy = NewLI.getType();
3506 // Simply copy the metadata if the type did not change.
3507 if (NewTy == OldLI.getType()) {
3508 NewLI.setMetadata(LLVMContext::MD_range, N);
3509 return;
3512 // Give up unless it is converted to a pointer where there is a single very
3513 // valuable mapping we can do reliably.
3514 // FIXME: It would be nice to propagate this in more ways, but the type
3515 // conversions make it hard.
3516 if (!NewTy->isPointerTy())
3517 return;
3519 unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy);
3520 if (BitWidth == OldLI.getType()->getScalarSizeInBits() &&
3521 !getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
3522 MDNode *NN = MDNode::get(OldLI.getContext(), std::nullopt);
3523 NewLI.setMetadata(LLVMContext::MD_nonnull, NN);
3527 void llvm::dropDebugUsers(Instruction &I) {
3528 SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
3529 SmallVector<DPValue *, 1> DPUsers;
3530 findDbgUsers(DbgUsers, &I, &DPUsers);
3531 for (auto *DII : DbgUsers)
3532 DII->eraseFromParent();
3533 for (auto *DPV : DPUsers)
3534 DPV->eraseFromParent();
3537 void llvm::hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt,
3538 BasicBlock *BB) {
3539 // Since we are moving the instructions out of its basic block, we do not
3540 // retain their original debug locations (DILocations) and debug intrinsic
3541 // instructions.
3543 // Doing so would degrade the debugging experience and adversely affect the
3544 // accuracy of profiling information.
3546 // Currently, when hoisting the instructions, we take the following actions:
3547 // - Remove their debug intrinsic instructions.
3548 // - Set their debug locations to the values from the insertion point.
3550 // As per PR39141 (comment #8), the more fundamental reason why the dbg.values
3551 // need to be deleted, is because there will not be any instructions with a
3552 // DILocation in either branch left after performing the transformation. We
3553 // can only insert a dbg.value after the two branches are joined again.
3555 // See PR38762, PR39243 for more details.
3557 // TODO: Extend llvm.dbg.value to take more than one SSA Value (PR39141) to
3558 // encode predicated DIExpressions that yield different results on different
3559 // code paths.
3561 for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
3562 Instruction *I = &*II;
3563 I->dropUBImplyingAttrsAndMetadata();
3564 if (I->isUsedByMetadata())
3565 dropDebugUsers(*I);
3566 // RemoveDIs: drop debug-info too as the following code does.
3567 I->dropDbgValues();
3568 if (I->isDebugOrPseudoInst()) {
3569 // Remove DbgInfo and pseudo probe Intrinsics.
3570 II = I->eraseFromParent();
3571 continue;
3573 I->setDebugLoc(InsertPt->getDebugLoc());
3574 ++II;
3576 DomBlock->splice(InsertPt->getIterator(), BB, BB->begin(),
3577 BB->getTerminator()->getIterator());
3580 DIExpression *llvm::getExpressionForConstant(DIBuilder &DIB, const Constant &C,
3581 Type &Ty) {
3582 // Create integer constant expression.
3583 auto createIntegerExpression = [&DIB](const Constant &CV) -> DIExpression * {
3584 const APInt &API = cast<ConstantInt>(&CV)->getValue();
3585 std::optional<int64_t> InitIntOpt = API.trySExtValue();
3586 return InitIntOpt ? DIB.createConstantValueExpression(
3587 static_cast<uint64_t>(*InitIntOpt))
3588 : nullptr;
3591 if (isa<ConstantInt>(C))
3592 return createIntegerExpression(C);
3594 auto *FP = dyn_cast<ConstantFP>(&C);
3595 if (FP && (Ty.isFloatTy() || Ty.isDoubleTy())) {
3596 const APFloat &APF = FP->getValueAPF();
3597 return DIB.createConstantValueExpression(
3598 APF.bitcastToAPInt().getZExtValue());
3601 if (!Ty.isPointerTy())
3602 return nullptr;
3604 if (isa<ConstantPointerNull>(C))
3605 return DIB.createConstantValueExpression(0);
3607 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(&C))
3608 if (CE->getOpcode() == Instruction::IntToPtr) {
3609 const Value *V = CE->getOperand(0);
3610 if (auto CI = dyn_cast_or_null<ConstantInt>(V))
3611 return createIntegerExpression(*CI);
3613 return nullptr;
3616 namespace {
3618 /// A potential constituent of a bitreverse or bswap expression. See
3619 /// collectBitParts for a fuller explanation.
3620 struct BitPart {
3621 BitPart(Value *P, unsigned BW) : Provider(P) {
3622 Provenance.resize(BW);
3625 /// The Value that this is a bitreverse/bswap of.
3626 Value *Provider;
3628 /// The "provenance" of each bit. Provenance[A] = B means that bit A
3629 /// in Provider becomes bit B in the result of this expression.
3630 SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128.
3632 enum { Unset = -1 };
3635 } // end anonymous namespace
3637 /// Analyze the specified subexpression and see if it is capable of providing
3638 /// pieces of a bswap or bitreverse. The subexpression provides a potential
3639 /// piece of a bswap or bitreverse if it can be proved that each non-zero bit in
3640 /// the output of the expression came from a corresponding bit in some other
3641 /// value. This function is recursive, and the end result is a mapping of
3642 /// bitnumber to bitnumber. It is the caller's responsibility to validate that
3643 /// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse.
3645 /// For example, if the current subexpression if "(shl i32 %X, 24)" then we know
3646 /// that the expression deposits the low byte of %X into the high byte of the
3647 /// result and that all other bits are zero. This expression is accepted and a
3648 /// BitPart is returned with Provider set to %X and Provenance[24-31] set to
3649 /// [0-7].
3651 /// For vector types, all analysis is performed at the per-element level. No
3652 /// cross-element analysis is supported (shuffle/insertion/reduction), and all
3653 /// constant masks must be splatted across all elements.
3655 /// To avoid revisiting values, the BitPart results are memoized into the
3656 /// provided map. To avoid unnecessary copying of BitParts, BitParts are
3657 /// constructed in-place in the \c BPS map. Because of this \c BPS needs to
3658 /// store BitParts objects, not pointers. As we need the concept of a nullptr
3659 /// BitParts (Value has been analyzed and the analysis failed), we an Optional
3660 /// type instead to provide the same functionality.
3662 /// Because we pass around references into \c BPS, we must use a container that
3663 /// does not invalidate internal references (std::map instead of DenseMap).
3664 static const std::optional<BitPart> &
3665 collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals,
3666 std::map<Value *, std::optional<BitPart>> &BPS, int Depth,
3667 bool &FoundRoot) {
3668 auto I = BPS.find(V);
3669 if (I != BPS.end())
3670 return I->second;
3672 auto &Result = BPS[V] = std::nullopt;
3673 auto BitWidth = V->getType()->getScalarSizeInBits();
3675 // Can't do integer/elements > 128 bits.
3676 if (BitWidth > 128)
3677 return Result;
3679 // Prevent stack overflow by limiting the recursion depth
3680 if (Depth == BitPartRecursionMaxDepth) {
3681 LLVM_DEBUG(dbgs() << "collectBitParts max recursion depth reached.\n");
3682 return Result;
3685 if (auto *I = dyn_cast<Instruction>(V)) {
3686 Value *X, *Y;
3687 const APInt *C;
3689 // If this is an or instruction, it may be an inner node of the bswap.
3690 if (match(V, m_Or(m_Value(X), m_Value(Y)))) {
3691 // Check we have both sources and they are from the same provider.
3692 const auto &A = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3693 Depth + 1, FoundRoot);
3694 if (!A || !A->Provider)
3695 return Result;
3697 const auto &B = collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS,
3698 Depth + 1, FoundRoot);
3699 if (!B || A->Provider != B->Provider)
3700 return Result;
3702 // Try and merge the two together.
3703 Result = BitPart(A->Provider, BitWidth);
3704 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) {
3705 if (A->Provenance[BitIdx] != BitPart::Unset &&
3706 B->Provenance[BitIdx] != BitPart::Unset &&
3707 A->Provenance[BitIdx] != B->Provenance[BitIdx])
3708 return Result = std::nullopt;
3710 if (A->Provenance[BitIdx] == BitPart::Unset)
3711 Result->Provenance[BitIdx] = B->Provenance[BitIdx];
3712 else
3713 Result->Provenance[BitIdx] = A->Provenance[BitIdx];
3716 return Result;
3719 // If this is a logical shift by a constant, recurse then shift the result.
3720 if (match(V, m_LogicalShift(m_Value(X), m_APInt(C)))) {
3721 const APInt &BitShift = *C;
3723 // Ensure the shift amount is defined.
3724 if (BitShift.uge(BitWidth))
3725 return Result;
3727 // For bswap-only, limit shift amounts to whole bytes, for an early exit.
3728 if (!MatchBitReversals && (BitShift.getZExtValue() % 8) != 0)
3729 return Result;
3731 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3732 Depth + 1, FoundRoot);
3733 if (!Res)
3734 return Result;
3735 Result = Res;
3737 // Perform the "shift" on BitProvenance.
3738 auto &P = Result->Provenance;
3739 if (I->getOpcode() == Instruction::Shl) {
3740 P.erase(std::prev(P.end(), BitShift.getZExtValue()), P.end());
3741 P.insert(P.begin(), BitShift.getZExtValue(), BitPart::Unset);
3742 } else {
3743 P.erase(P.begin(), std::next(P.begin(), BitShift.getZExtValue()));
3744 P.insert(P.end(), BitShift.getZExtValue(), BitPart::Unset);
3747 return Result;
3750 // If this is a logical 'and' with a mask that clears bits, recurse then
3751 // unset the appropriate bits.
3752 if (match(V, m_And(m_Value(X), m_APInt(C)))) {
3753 const APInt &AndMask = *C;
3755 // Check that the mask allows a multiple of 8 bits for a bswap, for an
3756 // early exit.
3757 unsigned NumMaskedBits = AndMask.popcount();
3758 if (!MatchBitReversals && (NumMaskedBits % 8) != 0)
3759 return Result;
3761 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3762 Depth + 1, FoundRoot);
3763 if (!Res)
3764 return Result;
3765 Result = Res;
3767 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3768 // If the AndMask is zero for this bit, clear the bit.
3769 if (AndMask[BitIdx] == 0)
3770 Result->Provenance[BitIdx] = BitPart::Unset;
3771 return Result;
3774 // If this is a zext instruction zero extend the result.
3775 if (match(V, m_ZExt(m_Value(X)))) {
3776 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3777 Depth + 1, FoundRoot);
3778 if (!Res)
3779 return Result;
3781 Result = BitPart(Res->Provider, BitWidth);
3782 auto NarrowBitWidth = X->getType()->getScalarSizeInBits();
3783 for (unsigned BitIdx = 0; BitIdx < NarrowBitWidth; ++BitIdx)
3784 Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3785 for (unsigned BitIdx = NarrowBitWidth; BitIdx < BitWidth; ++BitIdx)
3786 Result->Provenance[BitIdx] = BitPart::Unset;
3787 return Result;
3790 // If this is a truncate instruction, extract the lower bits.
3791 if (match(V, m_Trunc(m_Value(X)))) {
3792 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3793 Depth + 1, FoundRoot);
3794 if (!Res)
3795 return Result;
3797 Result = BitPart(Res->Provider, BitWidth);
3798 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3799 Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3800 return Result;
3803 // BITREVERSE - most likely due to us previous matching a partial
3804 // bitreverse.
3805 if (match(V, m_BitReverse(m_Value(X)))) {
3806 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3807 Depth + 1, FoundRoot);
3808 if (!Res)
3809 return Result;
3811 Result = BitPart(Res->Provider, BitWidth);
3812 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3813 Result->Provenance[(BitWidth - 1) - BitIdx] = Res->Provenance[BitIdx];
3814 return Result;
3817 // BSWAP - most likely due to us previous matching a partial bswap.
3818 if (match(V, m_BSwap(m_Value(X)))) {
3819 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3820 Depth + 1, FoundRoot);
3821 if (!Res)
3822 return Result;
3824 unsigned ByteWidth = BitWidth / 8;
3825 Result = BitPart(Res->Provider, BitWidth);
3826 for (unsigned ByteIdx = 0; ByteIdx < ByteWidth; ++ByteIdx) {
3827 unsigned ByteBitOfs = ByteIdx * 8;
3828 for (unsigned BitIdx = 0; BitIdx < 8; ++BitIdx)
3829 Result->Provenance[(BitWidth - 8 - ByteBitOfs) + BitIdx] =
3830 Res->Provenance[ByteBitOfs + BitIdx];
3832 return Result;
3835 // Funnel 'double' shifts take 3 operands, 2 inputs and the shift
3836 // amount (modulo).
3837 // fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
3838 // fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW))
3839 if (match(V, m_FShl(m_Value(X), m_Value(Y), m_APInt(C))) ||
3840 match(V, m_FShr(m_Value(X), m_Value(Y), m_APInt(C)))) {
3841 // We can treat fshr as a fshl by flipping the modulo amount.
3842 unsigned ModAmt = C->urem(BitWidth);
3843 if (cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::fshr)
3844 ModAmt = BitWidth - ModAmt;
3846 // For bswap-only, limit shift amounts to whole bytes, for an early exit.
3847 if (!MatchBitReversals && (ModAmt % 8) != 0)
3848 return Result;
3850 // Check we have both sources and they are from the same provider.
3851 const auto &LHS = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3852 Depth + 1, FoundRoot);
3853 if (!LHS || !LHS->Provider)
3854 return Result;
3856 const auto &RHS = collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS,
3857 Depth + 1, FoundRoot);
3858 if (!RHS || LHS->Provider != RHS->Provider)
3859 return Result;
3861 unsigned StartBitRHS = BitWidth - ModAmt;
3862 Result = BitPart(LHS->Provider, BitWidth);
3863 for (unsigned BitIdx = 0; BitIdx < StartBitRHS; ++BitIdx)
3864 Result->Provenance[BitIdx + ModAmt] = LHS->Provenance[BitIdx];
3865 for (unsigned BitIdx = 0; BitIdx < ModAmt; ++BitIdx)
3866 Result->Provenance[BitIdx] = RHS->Provenance[BitIdx + StartBitRHS];
3867 return Result;
3871 // If we've already found a root input value then we're never going to merge
3872 // these back together.
3873 if (FoundRoot)
3874 return Result;
3876 // Okay, we got to something that isn't a shift, 'or', 'and', etc. This must
3877 // be the root input value to the bswap/bitreverse.
3878 FoundRoot = true;
3879 Result = BitPart(V, BitWidth);
3880 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3881 Result->Provenance[BitIdx] = BitIdx;
3882 return Result;
3885 static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To,
3886 unsigned BitWidth) {
3887 if (From % 8 != To % 8)
3888 return false;
3889 // Convert from bit indices to byte indices and check for a byte reversal.
3890 From >>= 3;
3891 To >>= 3;
3892 BitWidth >>= 3;
3893 return From == BitWidth - To - 1;
3896 static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To,
3897 unsigned BitWidth) {
3898 return From == BitWidth - To - 1;
3901 bool llvm::recognizeBSwapOrBitReverseIdiom(
3902 Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
3903 SmallVectorImpl<Instruction *> &InsertedInsts) {
3904 if (!match(I, m_Or(m_Value(), m_Value())) &&
3905 !match(I, m_FShl(m_Value(), m_Value(), m_Value())) &&
3906 !match(I, m_FShr(m_Value(), m_Value(), m_Value())) &&
3907 !match(I, m_BSwap(m_Value())))
3908 return false;
3909 if (!MatchBSwaps && !MatchBitReversals)
3910 return false;
3911 Type *ITy = I->getType();
3912 if (!ITy->isIntOrIntVectorTy() || ITy->getScalarSizeInBits() > 128)
3913 return false; // Can't do integer/elements > 128 bits.
3915 // Try to find all the pieces corresponding to the bswap.
3916 bool FoundRoot = false;
3917 std::map<Value *, std::optional<BitPart>> BPS;
3918 const auto &Res =
3919 collectBitParts(I, MatchBSwaps, MatchBitReversals, BPS, 0, FoundRoot);
3920 if (!Res)
3921 return false;
3922 ArrayRef<int8_t> BitProvenance = Res->Provenance;
3923 assert(all_of(BitProvenance,
3924 [](int8_t I) { return I == BitPart::Unset || 0 <= I; }) &&
3925 "Illegal bit provenance index");
3927 // If the upper bits are zero, then attempt to perform as a truncated op.
3928 Type *DemandedTy = ITy;
3929 if (BitProvenance.back() == BitPart::Unset) {
3930 while (!BitProvenance.empty() && BitProvenance.back() == BitPart::Unset)
3931 BitProvenance = BitProvenance.drop_back();
3932 if (BitProvenance.empty())
3933 return false; // TODO - handle null value?
3934 DemandedTy = Type::getIntNTy(I->getContext(), BitProvenance.size());
3935 if (auto *IVecTy = dyn_cast<VectorType>(ITy))
3936 DemandedTy = VectorType::get(DemandedTy, IVecTy);
3939 // Check BitProvenance hasn't found a source larger than the result type.
3940 unsigned DemandedBW = DemandedTy->getScalarSizeInBits();
3941 if (DemandedBW > ITy->getScalarSizeInBits())
3942 return false;
3944 // Now, is the bit permutation correct for a bswap or a bitreverse? We can
3945 // only byteswap values with an even number of bytes.
3946 APInt DemandedMask = APInt::getAllOnes(DemandedBW);
3947 bool OKForBSwap = MatchBSwaps && (DemandedBW % 16) == 0;
3948 bool OKForBitReverse = MatchBitReversals;
3949 for (unsigned BitIdx = 0;
3950 (BitIdx < DemandedBW) && (OKForBSwap || OKForBitReverse); ++BitIdx) {
3951 if (BitProvenance[BitIdx] == BitPart::Unset) {
3952 DemandedMask.clearBit(BitIdx);
3953 continue;
3955 OKForBSwap &= bitTransformIsCorrectForBSwap(BitProvenance[BitIdx], BitIdx,
3956 DemandedBW);
3957 OKForBitReverse &= bitTransformIsCorrectForBitReverse(BitProvenance[BitIdx],
3958 BitIdx, DemandedBW);
3961 Intrinsic::ID Intrin;
3962 if (OKForBSwap)
3963 Intrin = Intrinsic::bswap;
3964 else if (OKForBitReverse)
3965 Intrin = Intrinsic::bitreverse;
3966 else
3967 return false;
3969 Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, DemandedTy);
3970 Value *Provider = Res->Provider;
3972 // We may need to truncate the provider.
3973 if (DemandedTy != Provider->getType()) {
3974 auto *Trunc =
3975 CastInst::CreateIntegerCast(Provider, DemandedTy, false, "trunc", I);
3976 InsertedInsts.push_back(Trunc);
3977 Provider = Trunc;
3980 Instruction *Result = CallInst::Create(F, Provider, "rev", I);
3981 InsertedInsts.push_back(Result);
3983 if (!DemandedMask.isAllOnes()) {
3984 auto *Mask = ConstantInt::get(DemandedTy, DemandedMask);
3985 Result = BinaryOperator::Create(Instruction::And, Result, Mask, "mask", I);
3986 InsertedInsts.push_back(Result);
3989 // We may need to zeroextend back to the result type.
3990 if (ITy != Result->getType()) {
3991 auto *ExtInst = CastInst::CreateIntegerCast(Result, ITy, false, "zext", I);
3992 InsertedInsts.push_back(ExtInst);
3995 return true;
3998 // CodeGen has special handling for some string functions that may replace
3999 // them with target-specific intrinsics. Since that'd skip our interceptors
4000 // in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses,
4001 // we mark affected calls as NoBuiltin, which will disable optimization
4002 // in CodeGen.
4003 void llvm::maybeMarkSanitizerLibraryCallNoBuiltin(
4004 CallInst *CI, const TargetLibraryInfo *TLI) {
4005 Function *F = CI->getCalledFunction();
4006 LibFunc Func;
4007 if (F && !F->hasLocalLinkage() && F->hasName() &&
4008 TLI->getLibFunc(F->getName(), Func) && TLI->hasOptimizedCodeGen(Func) &&
4009 !F->doesNotAccessMemory())
4010 CI->addFnAttr(Attribute::NoBuiltin);
4013 bool llvm::canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx) {
4014 // We can't have a PHI with a metadata type.
4015 if (I->getOperand(OpIdx)->getType()->isMetadataTy())
4016 return false;
4018 // Early exit.
4019 if (!isa<Constant>(I->getOperand(OpIdx)))
4020 return true;
4022 switch (I->getOpcode()) {
4023 default:
4024 return true;
4025 case Instruction::Call:
4026 case Instruction::Invoke: {
4027 const auto &CB = cast<CallBase>(*I);
4029 // Can't handle inline asm. Skip it.
4030 if (CB.isInlineAsm())
4031 return false;
4033 // Constant bundle operands may need to retain their constant-ness for
4034 // correctness.
4035 if (CB.isBundleOperand(OpIdx))
4036 return false;
4038 if (OpIdx < CB.arg_size()) {
4039 // Some variadic intrinsics require constants in the variadic arguments,
4040 // which currently aren't markable as immarg.
4041 if (isa<IntrinsicInst>(CB) &&
4042 OpIdx >= CB.getFunctionType()->getNumParams()) {
4043 // This is known to be OK for stackmap.
4044 return CB.getIntrinsicID() == Intrinsic::experimental_stackmap;
4047 // gcroot is a special case, since it requires a constant argument which
4048 // isn't also required to be a simple ConstantInt.
4049 if (CB.getIntrinsicID() == Intrinsic::gcroot)
4050 return false;
4052 // Some intrinsic operands are required to be immediates.
4053 return !CB.paramHasAttr(OpIdx, Attribute::ImmArg);
4056 // It is never allowed to replace the call argument to an intrinsic, but it
4057 // may be possible for a call.
4058 return !isa<IntrinsicInst>(CB);
4060 case Instruction::ShuffleVector:
4061 // Shufflevector masks are constant.
4062 return OpIdx != 2;
4063 case Instruction::Switch:
4064 case Instruction::ExtractValue:
4065 // All operands apart from the first are constant.
4066 return OpIdx == 0;
4067 case Instruction::InsertValue:
4068 // All operands apart from the first and the second are constant.
4069 return OpIdx < 2;
4070 case Instruction::Alloca:
4071 // Static allocas (constant size in the entry block) are handled by
4072 // prologue/epilogue insertion so they're free anyway. We definitely don't
4073 // want to make them non-constant.
4074 return !cast<AllocaInst>(I)->isStaticAlloca();
4075 case Instruction::GetElementPtr:
4076 if (OpIdx == 0)
4077 return true;
4078 gep_type_iterator It = gep_type_begin(I);
4079 for (auto E = std::next(It, OpIdx); It != E; ++It)
4080 if (It.isStruct())
4081 return false;
4082 return true;
4086 Value *llvm::invertCondition(Value *Condition) {
4087 // First: Check if it's a constant
4088 if (Constant *C = dyn_cast<Constant>(Condition))
4089 return ConstantExpr::getNot(C);
4091 // Second: If the condition is already inverted, return the original value
4092 Value *NotCondition;
4093 if (match(Condition, m_Not(m_Value(NotCondition))))
4094 return NotCondition;
4096 BasicBlock *Parent = nullptr;
4097 Instruction *Inst = dyn_cast<Instruction>(Condition);
4098 if (Inst)
4099 Parent = Inst->getParent();
4100 else if (Argument *Arg = dyn_cast<Argument>(Condition))
4101 Parent = &Arg->getParent()->getEntryBlock();
4102 assert(Parent && "Unsupported condition to invert");
4104 // Third: Check all the users for an invert
4105 for (User *U : Condition->users())
4106 if (Instruction *I = dyn_cast<Instruction>(U))
4107 if (I->getParent() == Parent && match(I, m_Not(m_Specific(Condition))))
4108 return I;
4110 // Last option: Create a new instruction
4111 auto *Inverted =
4112 BinaryOperator::CreateNot(Condition, Condition->getName() + ".inv");
4113 if (Inst && !isa<PHINode>(Inst))
4114 Inverted->insertAfter(Inst);
4115 else
4116 Inverted->insertBefore(&*Parent->getFirstInsertionPt());
4117 return Inverted;
4120 bool llvm::inferAttributesFromOthers(Function &F) {
4121 // Note: We explicitly check for attributes rather than using cover functions
4122 // because some of the cover functions include the logic being implemented.
4124 bool Changed = false;
4125 // readnone + not convergent implies nosync
4126 if (!F.hasFnAttribute(Attribute::NoSync) &&
4127 F.doesNotAccessMemory() && !F.isConvergent()) {
4128 F.setNoSync();
4129 Changed = true;
4132 // readonly implies nofree
4133 if (!F.hasFnAttribute(Attribute::NoFree) && F.onlyReadsMemory()) {
4134 F.setDoesNotFreeMemory();
4135 Changed = true;
4138 // willreturn implies mustprogress
4139 if (!F.hasFnAttribute(Attribute::MustProgress) && F.willReturn()) {
4140 F.setMustProgress();
4141 Changed = true;
4144 // TODO: There are a bunch of cases of restrictive memory effects we
4145 // can infer by inspecting arguments of argmemonly-ish functions.
4147 return Changed;