[AMDGPU] Test codegen'ing True16 additions.
[llvm-project.git] / llvm / lib / IR / Value.cpp
blob41260a98e3ce768008a3d495bb71418c32a607e2
1 //===-- Value.cpp - Implement the Value class -----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the Value, ValueHandle, and User classes.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/IR/Value.h"
14 #include "LLVMContextImpl.h"
15 #include "llvm/ADT/DenseMap.h"
16 #include "llvm/ADT/SmallString.h"
17 #include "llvm/IR/Constant.h"
18 #include "llvm/IR/Constants.h"
19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/DebugInfo.h"
21 #include "llvm/IR/DerivedTypes.h"
22 #include "llvm/IR/DerivedUser.h"
23 #include "llvm/IR/GetElementPtrTypeIterator.h"
24 #include "llvm/IR/InstrTypes.h"
25 #include "llvm/IR/Instructions.h"
26 #include "llvm/IR/IntrinsicInst.h"
27 #include "llvm/IR/Module.h"
28 #include "llvm/IR/Operator.h"
29 #include "llvm/IR/TypedPointerType.h"
30 #include "llvm/IR/ValueHandle.h"
31 #include "llvm/IR/ValueSymbolTable.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/raw_ostream.h"
35 #include <algorithm>
37 using namespace llvm;
39 static cl::opt<unsigned> UseDerefAtPointSemantics(
40 "use-dereferenceable-at-point-semantics", cl::Hidden, cl::init(false),
41 cl::desc("Deref attributes and metadata infer facts at definition only"));
43 //===----------------------------------------------------------------------===//
44 // Value Class
45 //===----------------------------------------------------------------------===//
46 static inline Type *checkType(Type *Ty) {
47 assert(Ty && "Value defined with a null type: Error!");
48 assert(!isa<TypedPointerType>(Ty->getScalarType()) &&
49 "Cannot have values with typed pointer types");
50 return Ty;
53 Value::Value(Type *ty, unsigned scid)
54 : VTy(checkType(ty)), UseList(nullptr), SubclassID(scid), HasValueHandle(0),
55 SubclassOptionalData(0), SubclassData(0), NumUserOperands(0),
56 IsUsedByMD(false), HasName(false), HasMetadata(false) {
57 static_assert(ConstantFirstVal == 0, "!(SubclassID < ConstantFirstVal)");
58 // FIXME: Why isn't this in the subclass gunk??
59 // Note, we cannot call isa<CallInst> before the CallInst has been
60 // constructed.
61 unsigned OpCode = 0;
62 if (SubclassID >= InstructionVal)
63 OpCode = SubclassID - InstructionVal;
64 if (OpCode == Instruction::Call || OpCode == Instruction::Invoke ||
65 OpCode == Instruction::CallBr)
66 assert((VTy->isFirstClassType() || VTy->isVoidTy() || VTy->isStructTy()) &&
67 "invalid CallBase type!");
68 else if (SubclassID != BasicBlockVal &&
69 (/*SubclassID < ConstantFirstVal ||*/ SubclassID > ConstantLastVal))
70 assert((VTy->isFirstClassType() || VTy->isVoidTy()) &&
71 "Cannot create non-first-class values except for constants!");
72 static_assert(sizeof(Value) == 2 * sizeof(void *) + 2 * sizeof(unsigned),
73 "Value too big");
76 Value::~Value() {
77 // Notify all ValueHandles (if present) that this value is going away.
78 if (HasValueHandle)
79 ValueHandleBase::ValueIsDeleted(this);
80 if (isUsedByMetadata())
81 ValueAsMetadata::handleDeletion(this);
83 // Remove associated metadata from context.
84 if (HasMetadata)
85 clearMetadata();
87 #ifndef NDEBUG // Only in -g mode...
88 // Check to make sure that there are no uses of this value that are still
89 // around when the value is destroyed. If there are, then we have a dangling
90 // reference and something is wrong. This code is here to print out where
91 // the value is still being referenced.
93 // Note that use_empty() cannot be called here, as it eventually downcasts
94 // 'this' to GlobalValue (derived class of Value), but GlobalValue has already
95 // been destructed, so accessing it is UB.
97 if (!materialized_use_empty()) {
98 dbgs() << "While deleting: " << *VTy << " %" << getName() << "\n";
99 for (auto *U : users())
100 dbgs() << "Use still stuck around after Def is destroyed:" << *U << "\n";
102 #endif
103 assert(materialized_use_empty() && "Uses remain when a value is destroyed!");
105 // If this value is named, destroy the name. This should not be in a symtab
106 // at this point.
107 destroyValueName();
110 void Value::deleteValue() {
111 switch (getValueID()) {
112 #define HANDLE_VALUE(Name) \
113 case Value::Name##Val: \
114 delete static_cast<Name *>(this); \
115 break;
116 #define HANDLE_MEMORY_VALUE(Name) \
117 case Value::Name##Val: \
118 static_cast<DerivedUser *>(this)->DeleteValue( \
119 static_cast<DerivedUser *>(this)); \
120 break;
121 #define HANDLE_CONSTANT(Name) \
122 case Value::Name##Val: \
123 llvm_unreachable("constants should be destroyed with destroyConstant"); \
124 break;
125 #define HANDLE_INSTRUCTION(Name) /* nothing */
126 #include "llvm/IR/Value.def"
128 #define HANDLE_INST(N, OPC, CLASS) \
129 case Value::InstructionVal + Instruction::OPC: \
130 delete static_cast<CLASS *>(this); \
131 break;
132 #define HANDLE_USER_INST(N, OPC, CLASS)
133 #include "llvm/IR/Instruction.def"
135 default:
136 llvm_unreachable("attempting to delete unknown value kind");
140 void Value::destroyValueName() {
141 ValueName *Name = getValueName();
142 if (Name) {
143 MallocAllocator Allocator;
144 Name->Destroy(Allocator);
146 setValueName(nullptr);
149 bool Value::hasNUses(unsigned N) const {
150 return hasNItems(use_begin(), use_end(), N);
153 bool Value::hasNUsesOrMore(unsigned N) const {
154 return hasNItemsOrMore(use_begin(), use_end(), N);
157 bool Value::hasOneUser() const {
158 if (use_empty())
159 return false;
160 if (hasOneUse())
161 return true;
162 return std::equal(++user_begin(), user_end(), user_begin());
165 static bool isUnDroppableUser(const User *U) { return !U->isDroppable(); }
167 Use *Value::getSingleUndroppableUse() {
168 Use *Result = nullptr;
169 for (Use &U : uses()) {
170 if (!U.getUser()->isDroppable()) {
171 if (Result)
172 return nullptr;
173 Result = &U;
176 return Result;
179 User *Value::getUniqueUndroppableUser() {
180 User *Result = nullptr;
181 for (auto *U : users()) {
182 if (!U->isDroppable()) {
183 if (Result && Result != U)
184 return nullptr;
185 Result = U;
188 return Result;
191 bool Value::hasNUndroppableUses(unsigned int N) const {
192 return hasNItems(user_begin(), user_end(), N, isUnDroppableUser);
195 bool Value::hasNUndroppableUsesOrMore(unsigned int N) const {
196 return hasNItemsOrMore(user_begin(), user_end(), N, isUnDroppableUser);
199 void Value::dropDroppableUses(
200 llvm::function_ref<bool(const Use *)> ShouldDrop) {
201 SmallVector<Use *, 8> ToBeEdited;
202 for (Use &U : uses())
203 if (U.getUser()->isDroppable() && ShouldDrop(&U))
204 ToBeEdited.push_back(&U);
205 for (Use *U : ToBeEdited)
206 dropDroppableUse(*U);
209 void Value::dropDroppableUsesIn(User &Usr) {
210 assert(Usr.isDroppable() && "Expected a droppable user!");
211 for (Use &UsrOp : Usr.operands()) {
212 if (UsrOp.get() == this)
213 dropDroppableUse(UsrOp);
217 void Value::dropDroppableUse(Use &U) {
218 U.removeFromList();
219 if (auto *Assume = dyn_cast<AssumeInst>(U.getUser())) {
220 unsigned OpNo = U.getOperandNo();
221 if (OpNo == 0)
222 U.set(ConstantInt::getTrue(Assume->getContext()));
223 else {
224 U.set(UndefValue::get(U.get()->getType()));
225 CallInst::BundleOpInfo &BOI = Assume->getBundleOpInfoForOperand(OpNo);
226 BOI.Tag = Assume->getContext().pImpl->getOrInsertBundleTag("ignore");
228 return;
231 llvm_unreachable("unkown droppable use");
234 bool Value::isUsedInBasicBlock(const BasicBlock *BB) const {
235 // This can be computed either by scanning the instructions in BB, or by
236 // scanning the use list of this Value. Both lists can be very long, but
237 // usually one is quite short.
239 // Scan both lists simultaneously until one is exhausted. This limits the
240 // search to the shorter list.
241 BasicBlock::const_iterator BI = BB->begin(), BE = BB->end();
242 const_user_iterator UI = user_begin(), UE = user_end();
243 for (; BI != BE && UI != UE; ++BI, ++UI) {
244 // Scan basic block: Check if this Value is used by the instruction at BI.
245 if (is_contained(BI->operands(), this))
246 return true;
247 // Scan use list: Check if the use at UI is in BB.
248 const auto *User = dyn_cast<Instruction>(*UI);
249 if (User && User->getParent() == BB)
250 return true;
252 return false;
255 unsigned Value::getNumUses() const {
256 return (unsigned)std::distance(use_begin(), use_end());
259 static bool getSymTab(Value *V, ValueSymbolTable *&ST) {
260 ST = nullptr;
261 if (Instruction *I = dyn_cast<Instruction>(V)) {
262 if (BasicBlock *P = I->getParent())
263 if (Function *PP = P->getParent())
264 ST = PP->getValueSymbolTable();
265 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(V)) {
266 if (Function *P = BB->getParent())
267 ST = P->getValueSymbolTable();
268 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
269 if (Module *P = GV->getParent())
270 ST = &P->getValueSymbolTable();
271 } else if (Argument *A = dyn_cast<Argument>(V)) {
272 if (Function *P = A->getParent())
273 ST = P->getValueSymbolTable();
274 } else {
275 assert(isa<Constant>(V) && "Unknown value type!");
276 return true; // no name is setable for this.
278 return false;
281 ValueName *Value::getValueName() const {
282 if (!HasName) return nullptr;
284 LLVMContext &Ctx = getContext();
285 auto I = Ctx.pImpl->ValueNames.find(this);
286 assert(I != Ctx.pImpl->ValueNames.end() &&
287 "No name entry found!");
289 return I->second;
292 void Value::setValueName(ValueName *VN) {
293 LLVMContext &Ctx = getContext();
295 assert(HasName == Ctx.pImpl->ValueNames.count(this) &&
296 "HasName bit out of sync!");
298 if (!VN) {
299 if (HasName)
300 Ctx.pImpl->ValueNames.erase(this);
301 HasName = false;
302 return;
305 HasName = true;
306 Ctx.pImpl->ValueNames[this] = VN;
309 StringRef Value::getName() const {
310 // Make sure the empty string is still a C string. For historical reasons,
311 // some clients want to call .data() on the result and expect it to be null
312 // terminated.
313 if (!hasName())
314 return StringRef("", 0);
315 return getValueName()->getKey();
318 void Value::setNameImpl(const Twine &NewName) {
319 bool NeedNewName =
320 !getContext().shouldDiscardValueNames() || isa<GlobalValue>(this);
322 // Fast-path: LLVMContext can be set to strip out non-GlobalValue names
323 // and there is no need to delete the old name.
324 if (!NeedNewName && !hasName())
325 return;
327 // Fast path for common IRBuilder case of setName("") when there is no name.
328 if (NewName.isTriviallyEmpty() && !hasName())
329 return;
331 SmallString<256> NameData;
332 StringRef NameRef = NeedNewName ? NewName.toStringRef(NameData) : "";
333 assert(NameRef.find_first_of(0) == StringRef::npos &&
334 "Null bytes are not allowed in names");
336 // Name isn't changing?
337 if (getName() == NameRef)
338 return;
340 assert(!getType()->isVoidTy() && "Cannot assign a name to void values!");
342 // Get the symbol table to update for this object.
343 ValueSymbolTable *ST;
344 if (getSymTab(this, ST))
345 return; // Cannot set a name on this value (e.g. constant).
347 if (!ST) { // No symbol table to update? Just do the change.
348 // NOTE: Could optimize for the case the name is shrinking to not deallocate
349 // then reallocated.
350 destroyValueName();
352 if (!NameRef.empty()) {
353 // Create the new name.
354 assert(NeedNewName);
355 MallocAllocator Allocator;
356 setValueName(ValueName::create(NameRef, Allocator));
357 getValueName()->setValue(this);
359 return;
362 // NOTE: Could optimize for the case the name is shrinking to not deallocate
363 // then reallocated.
364 if (hasName()) {
365 // Remove old name.
366 ST->removeValueName(getValueName());
367 destroyValueName();
369 if (NameRef.empty())
370 return;
373 // Name is changing to something new.
374 assert(NeedNewName);
375 setValueName(ST->createValueName(NameRef, this));
378 void Value::setName(const Twine &NewName) {
379 setNameImpl(NewName);
380 if (Function *F = dyn_cast<Function>(this))
381 F->recalculateIntrinsicID();
384 void Value::takeName(Value *V) {
385 assert(V != this && "Illegal call to this->takeName(this)!");
386 ValueSymbolTable *ST = nullptr;
387 // If this value has a name, drop it.
388 if (hasName()) {
389 // Get the symtab this is in.
390 if (getSymTab(this, ST)) {
391 // We can't set a name on this value, but we need to clear V's name if
392 // it has one.
393 if (V->hasName()) V->setName("");
394 return; // Cannot set a name on this value (e.g. constant).
397 // Remove old name.
398 if (ST)
399 ST->removeValueName(getValueName());
400 destroyValueName();
403 // Now we know that this has no name.
405 // If V has no name either, we're done.
406 if (!V->hasName()) return;
408 // Get this's symtab if we didn't before.
409 if (!ST) {
410 if (getSymTab(this, ST)) {
411 // Clear V's name.
412 V->setName("");
413 return; // Cannot set a name on this value (e.g. constant).
417 // Get V's ST, this should always succeed, because V has a name.
418 ValueSymbolTable *VST;
419 bool Failure = getSymTab(V, VST);
420 assert(!Failure && "V has a name, so it should have a ST!"); (void)Failure;
422 // If these values are both in the same symtab, we can do this very fast.
423 // This works even if both values have no symtab yet.
424 if (ST == VST) {
425 // Take the name!
426 setValueName(V->getValueName());
427 V->setValueName(nullptr);
428 getValueName()->setValue(this);
429 return;
432 // Otherwise, things are slightly more complex. Remove V's name from VST and
433 // then reinsert it into ST.
435 if (VST)
436 VST->removeValueName(V->getValueName());
437 setValueName(V->getValueName());
438 V->setValueName(nullptr);
439 getValueName()->setValue(this);
441 if (ST)
442 ST->reinsertValue(this);
445 #ifndef NDEBUG
446 std::string Value::getNameOrAsOperand() const {
447 if (!getName().empty())
448 return std::string(getName());
450 std::string BBName;
451 raw_string_ostream OS(BBName);
452 printAsOperand(OS, false);
453 return OS.str();
455 #endif
457 void Value::assertModuleIsMaterializedImpl() const {
458 #ifndef NDEBUG
459 const GlobalValue *GV = dyn_cast<GlobalValue>(this);
460 if (!GV)
461 return;
462 const Module *M = GV->getParent();
463 if (!M)
464 return;
465 assert(M->isMaterialized());
466 #endif
469 #ifndef NDEBUG
470 static bool contains(SmallPtrSetImpl<ConstantExpr *> &Cache, ConstantExpr *Expr,
471 Constant *C) {
472 if (!Cache.insert(Expr).second)
473 return false;
475 for (auto &O : Expr->operands()) {
476 if (O == C)
477 return true;
478 auto *CE = dyn_cast<ConstantExpr>(O);
479 if (!CE)
480 continue;
481 if (contains(Cache, CE, C))
482 return true;
484 return false;
487 static bool contains(Value *Expr, Value *V) {
488 if (Expr == V)
489 return true;
491 auto *C = dyn_cast<Constant>(V);
492 if (!C)
493 return false;
495 auto *CE = dyn_cast<ConstantExpr>(Expr);
496 if (!CE)
497 return false;
499 SmallPtrSet<ConstantExpr *, 4> Cache;
500 return contains(Cache, CE, C);
502 #endif // NDEBUG
504 void Value::doRAUW(Value *New, ReplaceMetadataUses ReplaceMetaUses) {
505 assert(New && "Value::replaceAllUsesWith(<null>) is invalid!");
506 assert(!contains(New, this) &&
507 "this->replaceAllUsesWith(expr(this)) is NOT valid!");
508 assert(New->getType() == getType() &&
509 "replaceAllUses of value with new value of different type!");
511 // Notify all ValueHandles (if present) that this value is going away.
512 if (HasValueHandle)
513 ValueHandleBase::ValueIsRAUWd(this, New);
514 if (ReplaceMetaUses == ReplaceMetadataUses::Yes && isUsedByMetadata())
515 ValueAsMetadata::handleRAUW(this, New);
517 while (!materialized_use_empty()) {
518 Use &U = *UseList;
519 // Must handle Constants specially, we cannot call replaceUsesOfWith on a
520 // constant because they are uniqued.
521 if (auto *C = dyn_cast<Constant>(U.getUser())) {
522 if (!isa<GlobalValue>(C)) {
523 C->handleOperandChange(this, New);
524 continue;
528 U.set(New);
531 if (BasicBlock *BB = dyn_cast<BasicBlock>(this))
532 BB->replaceSuccessorsPhiUsesWith(cast<BasicBlock>(New));
535 void Value::replaceAllUsesWith(Value *New) {
536 doRAUW(New, ReplaceMetadataUses::Yes);
539 void Value::replaceNonMetadataUsesWith(Value *New) {
540 doRAUW(New, ReplaceMetadataUses::No);
543 void Value::replaceUsesWithIf(Value *New,
544 llvm::function_ref<bool(Use &U)> ShouldReplace) {
545 assert(New && "Value::replaceUsesWithIf(<null>) is invalid!");
546 assert(New->getType() == getType() &&
547 "replaceUses of value with new value of different type!");
549 SmallVector<TrackingVH<Constant>, 8> Consts;
550 SmallPtrSet<Constant *, 8> Visited;
552 for (Use &U : llvm::make_early_inc_range(uses())) {
553 if (!ShouldReplace(U))
554 continue;
555 // Must handle Constants specially, we cannot call replaceUsesOfWith on a
556 // constant because they are uniqued.
557 if (auto *C = dyn_cast<Constant>(U.getUser())) {
558 if (!isa<GlobalValue>(C)) {
559 if (Visited.insert(C).second)
560 Consts.push_back(TrackingVH<Constant>(C));
561 continue;
564 U.set(New);
567 while (!Consts.empty()) {
568 // FIXME: handleOperandChange() updates all the uses in a given Constant,
569 // not just the one passed to ShouldReplace
570 Consts.pop_back_val()->handleOperandChange(this, New);
574 /// Replace llvm.dbg.* uses of MetadataAsValue(ValueAsMetadata(V)) outside BB
575 /// with New.
576 static void replaceDbgUsesOutsideBlock(Value *V, Value *New, BasicBlock *BB) {
577 SmallVector<DbgVariableIntrinsic *> DbgUsers;
578 findDbgUsers(DbgUsers, V);
579 for (auto *DVI : DbgUsers) {
580 if (DVI->getParent() != BB)
581 DVI->replaceVariableLocationOp(V, New);
585 // Like replaceAllUsesWith except it does not handle constants or basic blocks.
586 // This routine leaves uses within BB.
587 void Value::replaceUsesOutsideBlock(Value *New, BasicBlock *BB) {
588 assert(New && "Value::replaceUsesOutsideBlock(<null>, BB) is invalid!");
589 assert(!contains(New, this) &&
590 "this->replaceUsesOutsideBlock(expr(this), BB) is NOT valid!");
591 assert(New->getType() == getType() &&
592 "replaceUses of value with new value of different type!");
593 assert(BB && "Basic block that may contain a use of 'New' must be defined\n");
595 replaceDbgUsesOutsideBlock(this, New, BB);
596 replaceUsesWithIf(New, [BB](Use &U) {
597 auto *I = dyn_cast<Instruction>(U.getUser());
598 // Don't replace if it's an instruction in the BB basic block.
599 return !I || I->getParent() != BB;
603 namespace {
604 // Various metrics for how much to strip off of pointers.
605 enum PointerStripKind {
606 PSK_ZeroIndices,
607 PSK_ZeroIndicesAndAliases,
608 PSK_ZeroIndicesSameRepresentation,
609 PSK_ForAliasAnalysis,
610 PSK_InBoundsConstantIndices,
611 PSK_InBounds
614 template <PointerStripKind StripKind> static void NoopCallback(const Value *) {}
616 template <PointerStripKind StripKind>
617 static const Value *stripPointerCastsAndOffsets(
618 const Value *V,
619 function_ref<void(const Value *)> Func = NoopCallback<StripKind>) {
620 if (!V->getType()->isPointerTy())
621 return V;
623 // Even though we don't look through PHI nodes, we could be called on an
624 // instruction in an unreachable block, which may be on a cycle.
625 SmallPtrSet<const Value *, 4> Visited;
627 Visited.insert(V);
628 do {
629 Func(V);
630 if (auto *GEP = dyn_cast<GEPOperator>(V)) {
631 switch (StripKind) {
632 case PSK_ZeroIndices:
633 case PSK_ZeroIndicesAndAliases:
634 case PSK_ZeroIndicesSameRepresentation:
635 case PSK_ForAliasAnalysis:
636 if (!GEP->hasAllZeroIndices())
637 return V;
638 break;
639 case PSK_InBoundsConstantIndices:
640 if (!GEP->hasAllConstantIndices())
641 return V;
642 [[fallthrough]];
643 case PSK_InBounds:
644 if (!GEP->isInBounds())
645 return V;
646 break;
648 V = GEP->getPointerOperand();
649 } else if (Operator::getOpcode(V) == Instruction::BitCast) {
650 V = cast<Operator>(V)->getOperand(0);
651 if (!V->getType()->isPointerTy())
652 return V;
653 } else if (StripKind != PSK_ZeroIndicesSameRepresentation &&
654 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
655 // TODO: If we know an address space cast will not change the
656 // representation we could look through it here as well.
657 V = cast<Operator>(V)->getOperand(0);
658 } else if (StripKind == PSK_ZeroIndicesAndAliases && isa<GlobalAlias>(V)) {
659 V = cast<GlobalAlias>(V)->getAliasee();
660 } else if (StripKind == PSK_ForAliasAnalysis && isa<PHINode>(V) &&
661 cast<PHINode>(V)->getNumIncomingValues() == 1) {
662 V = cast<PHINode>(V)->getIncomingValue(0);
663 } else {
664 if (const auto *Call = dyn_cast<CallBase>(V)) {
665 if (const Value *RV = Call->getReturnedArgOperand()) {
666 V = RV;
667 continue;
669 // The result of launder.invariant.group must alias it's argument,
670 // but it can't be marked with returned attribute, that's why it needs
671 // special case.
672 if (StripKind == PSK_ForAliasAnalysis &&
673 (Call->getIntrinsicID() == Intrinsic::launder_invariant_group ||
674 Call->getIntrinsicID() == Intrinsic::strip_invariant_group)) {
675 V = Call->getArgOperand(0);
676 continue;
679 return V;
681 assert(V->getType()->isPointerTy() && "Unexpected operand type!");
682 } while (Visited.insert(V).second);
684 return V;
686 } // end anonymous namespace
688 const Value *Value::stripPointerCasts() const {
689 return stripPointerCastsAndOffsets<PSK_ZeroIndices>(this);
692 const Value *Value::stripPointerCastsAndAliases() const {
693 return stripPointerCastsAndOffsets<PSK_ZeroIndicesAndAliases>(this);
696 const Value *Value::stripPointerCastsSameRepresentation() const {
697 return stripPointerCastsAndOffsets<PSK_ZeroIndicesSameRepresentation>(this);
700 const Value *Value::stripInBoundsConstantOffsets() const {
701 return stripPointerCastsAndOffsets<PSK_InBoundsConstantIndices>(this);
704 const Value *Value::stripPointerCastsForAliasAnalysis() const {
705 return stripPointerCastsAndOffsets<PSK_ForAliasAnalysis>(this);
708 const Value *Value::stripAndAccumulateConstantOffsets(
709 const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
710 bool AllowInvariantGroup,
711 function_ref<bool(Value &, APInt &)> ExternalAnalysis) const {
712 if (!getType()->isPtrOrPtrVectorTy())
713 return this;
715 unsigned BitWidth = Offset.getBitWidth();
716 assert(BitWidth == DL.getIndexTypeSizeInBits(getType()) &&
717 "The offset bit width does not match the DL specification.");
719 // Even though we don't look through PHI nodes, we could be called on an
720 // instruction in an unreachable block, which may be on a cycle.
721 SmallPtrSet<const Value *, 4> Visited;
722 Visited.insert(this);
723 const Value *V = this;
724 do {
725 if (auto *GEP = dyn_cast<GEPOperator>(V)) {
726 // If in-bounds was requested, we do not strip non-in-bounds GEPs.
727 if (!AllowNonInbounds && !GEP->isInBounds())
728 return V;
730 // If one of the values we have visited is an addrspacecast, then
731 // the pointer type of this GEP may be different from the type
732 // of the Ptr parameter which was passed to this function. This
733 // means when we construct GEPOffset, we need to use the size
734 // of GEP's pointer type rather than the size of the original
735 // pointer type.
736 APInt GEPOffset(DL.getIndexTypeSizeInBits(V->getType()), 0);
737 if (!GEP->accumulateConstantOffset(DL, GEPOffset, ExternalAnalysis))
738 return V;
740 // Stop traversal if the pointer offset wouldn't fit in the bit-width
741 // provided by the Offset argument. This can happen due to AddrSpaceCast
742 // stripping.
743 if (GEPOffset.getSignificantBits() > BitWidth)
744 return V;
746 // External Analysis can return a result higher/lower than the value
747 // represents. We need to detect overflow/underflow.
748 APInt GEPOffsetST = GEPOffset.sextOrTrunc(BitWidth);
749 if (!ExternalAnalysis) {
750 Offset += GEPOffsetST;
751 } else {
752 bool Overflow = false;
753 APInt OldOffset = Offset;
754 Offset = Offset.sadd_ov(GEPOffsetST, Overflow);
755 if (Overflow) {
756 Offset = OldOffset;
757 return V;
760 V = GEP->getPointerOperand();
761 } else if (Operator::getOpcode(V) == Instruction::BitCast ||
762 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
763 V = cast<Operator>(V)->getOperand(0);
764 } else if (auto *GA = dyn_cast<GlobalAlias>(V)) {
765 if (!GA->isInterposable())
766 V = GA->getAliasee();
767 } else if (const auto *Call = dyn_cast<CallBase>(V)) {
768 if (const Value *RV = Call->getReturnedArgOperand())
769 V = RV;
770 if (AllowInvariantGroup && Call->isLaunderOrStripInvariantGroup())
771 V = Call->getArgOperand(0);
773 assert(V->getType()->isPtrOrPtrVectorTy() && "Unexpected operand type!");
774 } while (Visited.insert(V).second);
776 return V;
779 const Value *
780 Value::stripInBoundsOffsets(function_ref<void(const Value *)> Func) const {
781 return stripPointerCastsAndOffsets<PSK_InBounds>(this, Func);
784 bool Value::canBeFreed() const {
785 assert(getType()->isPointerTy());
787 // Cases that can simply never be deallocated
788 // *) Constants aren't allocated per se, thus not deallocated either.
789 if (isa<Constant>(this))
790 return false;
792 // Handle byval/byref/sret/inalloca/preallocated arguments. The storage
793 // lifetime is guaranteed to be longer than the callee's lifetime.
794 if (auto *A = dyn_cast<Argument>(this)) {
795 if (A->hasPointeeInMemoryValueAttr())
796 return false;
797 // A pointer to an object in a function which neither frees, nor can arrange
798 // for another thread to free on its behalf, can not be freed in the scope
799 // of the function. Note that this logic is restricted to memory
800 // allocations in existance before the call; a nofree function *is* allowed
801 // to free memory it allocated.
802 const Function *F = A->getParent();
803 if (F->doesNotFreeMemory() && F->hasNoSync())
804 return false;
807 const Function *F = nullptr;
808 if (auto *I = dyn_cast<Instruction>(this))
809 F = I->getFunction();
810 if (auto *A = dyn_cast<Argument>(this))
811 F = A->getParent();
813 if (!F)
814 return true;
816 // With garbage collection, deallocation typically occurs solely at or after
817 // safepoints. If we're compiling for a collector which uses the
818 // gc.statepoint infrastructure, safepoints aren't explicitly present
819 // in the IR until after lowering from abstract to physical machine model.
820 // The collector could chose to mix explicit deallocation and gc'd objects
821 // which is why we need the explicit opt in on a per collector basis.
822 if (!F->hasGC())
823 return true;
825 const auto &GCName = F->getGC();
826 if (GCName == "statepoint-example") {
827 auto *PT = cast<PointerType>(this->getType());
828 if (PT->getAddressSpace() != 1)
829 // For the sake of this example GC, we arbitrarily pick addrspace(1) as
830 // our GC managed heap. This must match the same check in
831 // RewriteStatepointsForGC (and probably needs better factored.)
832 return true;
834 // It is cheaper to scan for a declaration than to scan for a use in this
835 // function. Note that gc.statepoint is a type overloaded function so the
836 // usual trick of requesting declaration of the intrinsic from the module
837 // doesn't work.
838 for (auto &Fn : *F->getParent())
839 if (Fn.getIntrinsicID() == Intrinsic::experimental_gc_statepoint)
840 return true;
841 return false;
843 return true;
846 uint64_t Value::getPointerDereferenceableBytes(const DataLayout &DL,
847 bool &CanBeNull,
848 bool &CanBeFreed) const {
849 assert(getType()->isPointerTy() && "must be pointer");
851 uint64_t DerefBytes = 0;
852 CanBeNull = false;
853 CanBeFreed = UseDerefAtPointSemantics && canBeFreed();
854 if (const Argument *A = dyn_cast<Argument>(this)) {
855 DerefBytes = A->getDereferenceableBytes();
856 if (DerefBytes == 0) {
857 // Handle byval/byref/inalloca/preallocated arguments
858 if (Type *ArgMemTy = A->getPointeeInMemoryValueType()) {
859 if (ArgMemTy->isSized()) {
860 // FIXME: Why isn't this the type alloc size?
861 DerefBytes = DL.getTypeStoreSize(ArgMemTy).getKnownMinValue();
866 if (DerefBytes == 0) {
867 DerefBytes = A->getDereferenceableOrNullBytes();
868 CanBeNull = true;
870 } else if (const auto *Call = dyn_cast<CallBase>(this)) {
871 DerefBytes = Call->getRetDereferenceableBytes();
872 if (DerefBytes == 0) {
873 DerefBytes = Call->getRetDereferenceableOrNullBytes();
874 CanBeNull = true;
876 } else if (const LoadInst *LI = dyn_cast<LoadInst>(this)) {
877 if (MDNode *MD = LI->getMetadata(LLVMContext::MD_dereferenceable)) {
878 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
879 DerefBytes = CI->getLimitedValue();
881 if (DerefBytes == 0) {
882 if (MDNode *MD =
883 LI->getMetadata(LLVMContext::MD_dereferenceable_or_null)) {
884 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
885 DerefBytes = CI->getLimitedValue();
887 CanBeNull = true;
889 } else if (auto *IP = dyn_cast<IntToPtrInst>(this)) {
890 if (MDNode *MD = IP->getMetadata(LLVMContext::MD_dereferenceable)) {
891 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
892 DerefBytes = CI->getLimitedValue();
894 if (DerefBytes == 0) {
895 if (MDNode *MD =
896 IP->getMetadata(LLVMContext::MD_dereferenceable_or_null)) {
897 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
898 DerefBytes = CI->getLimitedValue();
900 CanBeNull = true;
902 } else if (auto *AI = dyn_cast<AllocaInst>(this)) {
903 if (!AI->isArrayAllocation()) {
904 DerefBytes =
905 DL.getTypeStoreSize(AI->getAllocatedType()).getKnownMinValue();
906 CanBeNull = false;
907 CanBeFreed = false;
909 } else if (auto *GV = dyn_cast<GlobalVariable>(this)) {
910 if (GV->getValueType()->isSized() && !GV->hasExternalWeakLinkage()) {
911 // TODO: Don't outright reject hasExternalWeakLinkage but set the
912 // CanBeNull flag.
913 DerefBytes = DL.getTypeStoreSize(GV->getValueType()).getFixedValue();
914 CanBeNull = false;
915 CanBeFreed = false;
918 return DerefBytes;
921 Align Value::getPointerAlignment(const DataLayout &DL) const {
922 assert(getType()->isPointerTy() && "must be pointer");
923 if (auto *GO = dyn_cast<GlobalObject>(this)) {
924 if (isa<Function>(GO)) {
925 Align FunctionPtrAlign = DL.getFunctionPtrAlign().valueOrOne();
926 switch (DL.getFunctionPtrAlignType()) {
927 case DataLayout::FunctionPtrAlignType::Independent:
928 return FunctionPtrAlign;
929 case DataLayout::FunctionPtrAlignType::MultipleOfFunctionAlign:
930 return std::max(FunctionPtrAlign, GO->getAlign().valueOrOne());
932 llvm_unreachable("Unhandled FunctionPtrAlignType");
934 const MaybeAlign Alignment(GO->getAlign());
935 if (!Alignment) {
936 if (auto *GVar = dyn_cast<GlobalVariable>(GO)) {
937 Type *ObjectType = GVar->getValueType();
938 if (ObjectType->isSized()) {
939 // If the object is defined in the current Module, we'll be giving
940 // it the preferred alignment. Otherwise, we have to assume that it
941 // may only have the minimum ABI alignment.
942 if (GVar->isStrongDefinitionForLinker())
943 return DL.getPreferredAlign(GVar);
944 else
945 return DL.getABITypeAlign(ObjectType);
949 return Alignment.valueOrOne();
950 } else if (const Argument *A = dyn_cast<Argument>(this)) {
951 const MaybeAlign Alignment = A->getParamAlign();
952 if (!Alignment && A->hasStructRetAttr()) {
953 // An sret parameter has at least the ABI alignment of the return type.
954 Type *EltTy = A->getParamStructRetType();
955 if (EltTy->isSized())
956 return DL.getABITypeAlign(EltTy);
958 return Alignment.valueOrOne();
959 } else if (const AllocaInst *AI = dyn_cast<AllocaInst>(this)) {
960 return AI->getAlign();
961 } else if (const auto *Call = dyn_cast<CallBase>(this)) {
962 MaybeAlign Alignment = Call->getRetAlign();
963 if (!Alignment && Call->getCalledFunction())
964 Alignment = Call->getCalledFunction()->getAttributes().getRetAlignment();
965 return Alignment.valueOrOne();
966 } else if (const LoadInst *LI = dyn_cast<LoadInst>(this)) {
967 if (MDNode *MD = LI->getMetadata(LLVMContext::MD_align)) {
968 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
969 return Align(CI->getLimitedValue());
971 } else if (auto *CstPtr = dyn_cast<Constant>(this)) {
972 // Strip pointer casts to avoid creating unnecessary ptrtoint expression
973 // if the only "reduction" is combining a bitcast + ptrtoint.
974 CstPtr = CstPtr->stripPointerCasts();
975 if (auto *CstInt = dyn_cast_or_null<ConstantInt>(ConstantExpr::getPtrToInt(
976 const_cast<Constant *>(CstPtr), DL.getIntPtrType(getType()),
977 /*OnlyIfReduced=*/true))) {
978 size_t TrailingZeros = CstInt->getValue().countr_zero();
979 // While the actual alignment may be large, elsewhere we have
980 // an arbitrary upper alignmet limit, so let's clamp to it.
981 return Align(TrailingZeros < Value::MaxAlignmentExponent
982 ? uint64_t(1) << TrailingZeros
983 : Value::MaximumAlignment);
986 return Align(1);
989 static std::optional<int64_t>
990 getOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, const DataLayout &DL) {
991 // Skip over the first indices.
992 gep_type_iterator GTI = gep_type_begin(GEP);
993 for (unsigned i = 1; i != Idx; ++i, ++GTI)
994 /*skip along*/;
996 // Compute the offset implied by the rest of the indices.
997 int64_t Offset = 0;
998 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
999 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
1000 if (!OpC)
1001 return std::nullopt;
1002 if (OpC->isZero())
1003 continue; // No offset.
1005 // Handle struct indices, which add their field offset to the pointer.
1006 if (StructType *STy = GTI.getStructTypeOrNull()) {
1007 Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
1008 continue;
1011 // Otherwise, we have a sequential type like an array or fixed-length
1012 // vector. Multiply the index by the ElementSize.
1013 TypeSize Size = DL.getTypeAllocSize(GTI.getIndexedType());
1014 if (Size.isScalable())
1015 return std::nullopt;
1016 Offset += Size.getFixedValue() * OpC->getSExtValue();
1019 return Offset;
1022 std::optional<int64_t> Value::getPointerOffsetFrom(const Value *Other,
1023 const DataLayout &DL) const {
1024 const Value *Ptr1 = Other;
1025 const Value *Ptr2 = this;
1026 APInt Offset1(DL.getIndexTypeSizeInBits(Ptr1->getType()), 0);
1027 APInt Offset2(DL.getIndexTypeSizeInBits(Ptr2->getType()), 0);
1028 Ptr1 = Ptr1->stripAndAccumulateConstantOffsets(DL, Offset1, true);
1029 Ptr2 = Ptr2->stripAndAccumulateConstantOffsets(DL, Offset2, true);
1031 // Handle the trivial case first.
1032 if (Ptr1 == Ptr2)
1033 return Offset2.getSExtValue() - Offset1.getSExtValue();
1035 const GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
1036 const GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
1038 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
1039 // base. After that base, they may have some number of common (and
1040 // potentially variable) indices. After that they handle some constant
1041 // offset, which determines their offset from each other. At this point, we
1042 // handle no other case.
1043 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0) ||
1044 GEP1->getSourceElementType() != GEP2->getSourceElementType())
1045 return std::nullopt;
1047 // Skip any common indices and track the GEP types.
1048 unsigned Idx = 1;
1049 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
1050 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
1051 break;
1053 auto IOffset1 = getOffsetFromIndex(GEP1, Idx, DL);
1054 auto IOffset2 = getOffsetFromIndex(GEP2, Idx, DL);
1055 if (!IOffset1 || !IOffset2)
1056 return std::nullopt;
1057 return *IOffset2 - *IOffset1 + Offset2.getSExtValue() -
1058 Offset1.getSExtValue();
1061 const Value *Value::DoPHITranslation(const BasicBlock *CurBB,
1062 const BasicBlock *PredBB) const {
1063 auto *PN = dyn_cast<PHINode>(this);
1064 if (PN && PN->getParent() == CurBB)
1065 return PN->getIncomingValueForBlock(PredBB);
1066 return this;
1069 LLVMContext &Value::getContext() const { return VTy->getContext(); }
1071 void Value::reverseUseList() {
1072 if (!UseList || !UseList->Next)
1073 // No need to reverse 0 or 1 uses.
1074 return;
1076 Use *Head = UseList;
1077 Use *Current = UseList->Next;
1078 Head->Next = nullptr;
1079 while (Current) {
1080 Use *Next = Current->Next;
1081 Current->Next = Head;
1082 Head->Prev = &Current->Next;
1083 Head = Current;
1084 Current = Next;
1086 UseList = Head;
1087 Head->Prev = &UseList;
1090 bool Value::isSwiftError() const {
1091 auto *Arg = dyn_cast<Argument>(this);
1092 if (Arg)
1093 return Arg->hasSwiftErrorAttr();
1094 auto *Alloca = dyn_cast<AllocaInst>(this);
1095 if (!Alloca)
1096 return false;
1097 return Alloca->isSwiftError();
1100 //===----------------------------------------------------------------------===//
1101 // ValueHandleBase Class
1102 //===----------------------------------------------------------------------===//
1104 void ValueHandleBase::AddToExistingUseList(ValueHandleBase **List) {
1105 assert(List && "Handle list is null?");
1107 // Splice ourselves into the list.
1108 Next = *List;
1109 *List = this;
1110 setPrevPtr(List);
1111 if (Next) {
1112 Next->setPrevPtr(&Next);
1113 assert(getValPtr() == Next->getValPtr() && "Added to wrong list?");
1117 void ValueHandleBase::AddToExistingUseListAfter(ValueHandleBase *List) {
1118 assert(List && "Must insert after existing node");
1120 Next = List->Next;
1121 setPrevPtr(&List->Next);
1122 List->Next = this;
1123 if (Next)
1124 Next->setPrevPtr(&Next);
1127 void ValueHandleBase::AddToUseList() {
1128 assert(getValPtr() && "Null pointer doesn't have a use list!");
1130 LLVMContextImpl *pImpl = getValPtr()->getContext().pImpl;
1132 if (getValPtr()->HasValueHandle) {
1133 // If this value already has a ValueHandle, then it must be in the
1134 // ValueHandles map already.
1135 ValueHandleBase *&Entry = pImpl->ValueHandles[getValPtr()];
1136 assert(Entry && "Value doesn't have any handles?");
1137 AddToExistingUseList(&Entry);
1138 return;
1141 // Ok, it doesn't have any handles yet, so we must insert it into the
1142 // DenseMap. However, doing this insertion could cause the DenseMap to
1143 // reallocate itself, which would invalidate all of the PrevP pointers that
1144 // point into the old table. Handle this by checking for reallocation and
1145 // updating the stale pointers only if needed.
1146 DenseMap<Value*, ValueHandleBase*> &Handles = pImpl->ValueHandles;
1147 const void *OldBucketPtr = Handles.getPointerIntoBucketsArray();
1149 ValueHandleBase *&Entry = Handles[getValPtr()];
1150 assert(!Entry && "Value really did already have handles?");
1151 AddToExistingUseList(&Entry);
1152 getValPtr()->HasValueHandle = true;
1154 // If reallocation didn't happen or if this was the first insertion, don't
1155 // walk the table.
1156 if (Handles.isPointerIntoBucketsArray(OldBucketPtr) ||
1157 Handles.size() == 1) {
1158 return;
1161 // Okay, reallocation did happen. Fix the Prev Pointers.
1162 for (DenseMap<Value*, ValueHandleBase*>::iterator I = Handles.begin(),
1163 E = Handles.end(); I != E; ++I) {
1164 assert(I->second && I->first == I->second->getValPtr() &&
1165 "List invariant broken!");
1166 I->second->setPrevPtr(&I->second);
1170 void ValueHandleBase::RemoveFromUseList() {
1171 assert(getValPtr() && getValPtr()->HasValueHandle &&
1172 "Pointer doesn't have a use list!");
1174 // Unlink this from its use list.
1175 ValueHandleBase **PrevPtr = getPrevPtr();
1176 assert(*PrevPtr == this && "List invariant broken");
1178 *PrevPtr = Next;
1179 if (Next) {
1180 assert(Next->getPrevPtr() == &Next && "List invariant broken");
1181 Next->setPrevPtr(PrevPtr);
1182 return;
1185 // If the Next pointer was null, then it is possible that this was the last
1186 // ValueHandle watching VP. If so, delete its entry from the ValueHandles
1187 // map.
1188 LLVMContextImpl *pImpl = getValPtr()->getContext().pImpl;
1189 DenseMap<Value*, ValueHandleBase*> &Handles = pImpl->ValueHandles;
1190 if (Handles.isPointerIntoBucketsArray(PrevPtr)) {
1191 Handles.erase(getValPtr());
1192 getValPtr()->HasValueHandle = false;
1196 void ValueHandleBase::ValueIsDeleted(Value *V) {
1197 assert(V->HasValueHandle && "Should only be called if ValueHandles present");
1199 // Get the linked list base, which is guaranteed to exist since the
1200 // HasValueHandle flag is set.
1201 LLVMContextImpl *pImpl = V->getContext().pImpl;
1202 ValueHandleBase *Entry = pImpl->ValueHandles[V];
1203 assert(Entry && "Value bit set but no entries exist");
1205 // We use a local ValueHandleBase as an iterator so that ValueHandles can add
1206 // and remove themselves from the list without breaking our iteration. This
1207 // is not really an AssertingVH; we just have to give ValueHandleBase a kind.
1208 // Note that we deliberately do not the support the case when dropping a value
1209 // handle results in a new value handle being permanently added to the list
1210 // (as might occur in theory for CallbackVH's): the new value handle will not
1211 // be processed and the checking code will mete out righteous punishment if
1212 // the handle is still present once we have finished processing all the other
1213 // value handles (it is fine to momentarily add then remove a value handle).
1214 for (ValueHandleBase Iterator(Assert, *Entry); Entry; Entry = Iterator.Next) {
1215 Iterator.RemoveFromUseList();
1216 Iterator.AddToExistingUseListAfter(Entry);
1217 assert(Entry->Next == &Iterator && "Loop invariant broken.");
1219 switch (Entry->getKind()) {
1220 case Assert:
1221 break;
1222 case Weak:
1223 case WeakTracking:
1224 // WeakTracking and Weak just go to null, which unlinks them
1225 // from the list.
1226 Entry->operator=(nullptr);
1227 break;
1228 case Callback:
1229 // Forward to the subclass's implementation.
1230 static_cast<CallbackVH*>(Entry)->deleted();
1231 break;
1235 // All callbacks, weak references, and assertingVHs should be dropped by now.
1236 if (V->HasValueHandle) {
1237 #ifndef NDEBUG // Only in +Asserts mode...
1238 dbgs() << "While deleting: " << *V->getType() << " %" << V->getName()
1239 << "\n";
1240 if (pImpl->ValueHandles[V]->getKind() == Assert)
1241 llvm_unreachable("An asserting value handle still pointed to this"
1242 " value!");
1244 #endif
1245 llvm_unreachable("All references to V were not removed?");
1249 void ValueHandleBase::ValueIsRAUWd(Value *Old, Value *New) {
1250 assert(Old->HasValueHandle &&"Should only be called if ValueHandles present");
1251 assert(Old != New && "Changing value into itself!");
1252 assert(Old->getType() == New->getType() &&
1253 "replaceAllUses of value with new value of different type!");
1255 // Get the linked list base, which is guaranteed to exist since the
1256 // HasValueHandle flag is set.
1257 LLVMContextImpl *pImpl = Old->getContext().pImpl;
1258 ValueHandleBase *Entry = pImpl->ValueHandles[Old];
1260 assert(Entry && "Value bit set but no entries exist");
1262 // We use a local ValueHandleBase as an iterator so that
1263 // ValueHandles can add and remove themselves from the list without
1264 // breaking our iteration. This is not really an AssertingVH; we
1265 // just have to give ValueHandleBase some kind.
1266 for (ValueHandleBase Iterator(Assert, *Entry); Entry; Entry = Iterator.Next) {
1267 Iterator.RemoveFromUseList();
1268 Iterator.AddToExistingUseListAfter(Entry);
1269 assert(Entry->Next == &Iterator && "Loop invariant broken.");
1271 switch (Entry->getKind()) {
1272 case Assert:
1273 case Weak:
1274 // Asserting and Weak handles do not follow RAUW implicitly.
1275 break;
1276 case WeakTracking:
1277 // Weak goes to the new value, which will unlink it from Old's list.
1278 Entry->operator=(New);
1279 break;
1280 case Callback:
1281 // Forward to the subclass's implementation.
1282 static_cast<CallbackVH*>(Entry)->allUsesReplacedWith(New);
1283 break;
1287 #ifndef NDEBUG
1288 // If any new weak value handles were added while processing the
1289 // list, then complain about it now.
1290 if (Old->HasValueHandle)
1291 for (Entry = pImpl->ValueHandles[Old]; Entry; Entry = Entry->Next)
1292 switch (Entry->getKind()) {
1293 case WeakTracking:
1294 dbgs() << "After RAUW from " << *Old->getType() << " %"
1295 << Old->getName() << " to " << *New->getType() << " %"
1296 << New->getName() << "\n";
1297 llvm_unreachable(
1298 "A weak tracking value handle still pointed to the old value!\n");
1299 default:
1300 break;
1302 #endif
1305 // Pin the vtable to this file.
1306 void CallbackVH::anchor() {}