1 //===-- Value.cpp - Implement the Value class -----------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the Value, ValueHandle, and User classes.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/IR/Value.h"
14 #include "LLVMContextImpl.h"
15 #include "llvm/ADT/DenseMap.h"
16 #include "llvm/ADT/SmallString.h"
17 #include "llvm/IR/Constant.h"
18 #include "llvm/IR/Constants.h"
19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/DebugInfo.h"
21 #include "llvm/IR/DerivedTypes.h"
22 #include "llvm/IR/DerivedUser.h"
23 #include "llvm/IR/GetElementPtrTypeIterator.h"
24 #include "llvm/IR/InstrTypes.h"
25 #include "llvm/IR/Instructions.h"
26 #include "llvm/IR/IntrinsicInst.h"
27 #include "llvm/IR/Module.h"
28 #include "llvm/IR/Operator.h"
29 #include "llvm/IR/TypedPointerType.h"
30 #include "llvm/IR/ValueHandle.h"
31 #include "llvm/IR/ValueSymbolTable.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/raw_ostream.h"
39 static cl::opt
<unsigned> UseDerefAtPointSemantics(
40 "use-dereferenceable-at-point-semantics", cl::Hidden
, cl::init(false),
41 cl::desc("Deref attributes and metadata infer facts at definition only"));
43 //===----------------------------------------------------------------------===//
45 //===----------------------------------------------------------------------===//
46 static inline Type
*checkType(Type
*Ty
) {
47 assert(Ty
&& "Value defined with a null type: Error!");
48 assert(!isa
<TypedPointerType
>(Ty
->getScalarType()) &&
49 "Cannot have values with typed pointer types");
53 Value::Value(Type
*ty
, unsigned scid
)
54 : SubclassID(scid
), HasValueHandle(0), SubclassOptionalData(0),
55 SubclassData(0), NumUserOperands(0), IsUsedByMD(false), HasName(false),
56 HasMetadata(false), VTy(checkType(ty
)), UseList(nullptr) {
57 static_assert(ConstantFirstVal
== 0, "!(SubclassID < ConstantFirstVal)");
58 // FIXME: Why isn't this in the subclass gunk??
59 // Note, we cannot call isa<CallInst> before the CallInst has been
62 if (SubclassID
>= InstructionVal
)
63 OpCode
= SubclassID
- InstructionVal
;
64 if (OpCode
== Instruction::Call
|| OpCode
== Instruction::Invoke
||
65 OpCode
== Instruction::CallBr
)
66 assert((VTy
->isFirstClassType() || VTy
->isVoidTy() || VTy
->isStructTy()) &&
67 "invalid CallBase type!");
68 else if (SubclassID
!= BasicBlockVal
&&
69 (/*SubclassID < ConstantFirstVal ||*/ SubclassID
> ConstantLastVal
))
70 assert((VTy
->isFirstClassType() || VTy
->isVoidTy()) &&
71 "Cannot create non-first-class values except for constants!");
72 static_assert(sizeof(Value
) == 2 * sizeof(void *) + 2 * sizeof(unsigned),
77 // Notify all ValueHandles (if present) that this value is going away.
79 ValueHandleBase::ValueIsDeleted(this);
80 if (isUsedByMetadata())
81 ValueAsMetadata::handleDeletion(this);
83 // Remove associated metadata from context.
87 #ifndef NDEBUG // Only in -g mode...
88 // Check to make sure that there are no uses of this value that are still
89 // around when the value is destroyed. If there are, then we have a dangling
90 // reference and something is wrong. This code is here to print out where
91 // the value is still being referenced.
93 // Note that use_empty() cannot be called here, as it eventually downcasts
94 // 'this' to GlobalValue (derived class of Value), but GlobalValue has already
95 // been destructed, so accessing it is UB.
97 if (!materialized_use_empty()) {
98 dbgs() << "While deleting: " << *VTy
<< " %" << getName() << "\n";
99 for (auto *U
: users())
100 dbgs() << "Use still stuck around after Def is destroyed:" << *U
<< "\n";
103 assert(materialized_use_empty() && "Uses remain when a value is destroyed!");
105 // If this value is named, destroy the name. This should not be in a symtab
110 void Value::deleteValue() {
111 switch (getValueID()) {
112 #define HANDLE_VALUE(Name) \
113 case Value::Name##Val: \
114 delete static_cast<Name *>(this); \
116 #define HANDLE_MEMORY_VALUE(Name) \
117 case Value::Name##Val: \
118 static_cast<DerivedUser *>(this)->DeleteValue( \
119 static_cast<DerivedUser *>(this)); \
121 #define HANDLE_CONSTANT(Name) \
122 case Value::Name##Val: \
123 llvm_unreachable("constants should be destroyed with destroyConstant"); \
125 #define HANDLE_INSTRUCTION(Name) /* nothing */
126 #include "llvm/IR/Value.def"
128 #define HANDLE_INST(N, OPC, CLASS) \
129 case Value::InstructionVal + Instruction::OPC: \
130 delete static_cast<CLASS *>(this); \
132 #define HANDLE_USER_INST(N, OPC, CLASS)
133 #include "llvm/IR/Instruction.def"
136 llvm_unreachable("attempting to delete unknown value kind");
140 void Value::destroyValueName() {
141 ValueName
*Name
= getValueName();
143 MallocAllocator Allocator
;
144 Name
->Destroy(Allocator
);
146 setValueName(nullptr);
149 bool Value::hasNUses(unsigned N
) const {
150 return hasNItems(use_begin(), use_end(), N
);
153 bool Value::hasNUsesOrMore(unsigned N
) const {
154 return hasNItemsOrMore(use_begin(), use_end(), N
);
157 bool Value::hasOneUser() const {
162 return std::equal(++user_begin(), user_end(), user_begin());
165 static bool isUnDroppableUser(const User
*U
) { return !U
->isDroppable(); }
167 Use
*Value::getSingleUndroppableUse() {
168 Use
*Result
= nullptr;
169 for (Use
&U
: uses()) {
170 if (!U
.getUser()->isDroppable()) {
179 User
*Value::getUniqueUndroppableUser() {
180 User
*Result
= nullptr;
181 for (auto *U
: users()) {
182 if (!U
->isDroppable()) {
183 if (Result
&& Result
!= U
)
191 bool Value::hasNUndroppableUses(unsigned int N
) const {
192 return hasNItems(user_begin(), user_end(), N
, isUnDroppableUser
);
195 bool Value::hasNUndroppableUsesOrMore(unsigned int N
) const {
196 return hasNItemsOrMore(user_begin(), user_end(), N
, isUnDroppableUser
);
199 void Value::dropDroppableUses(
200 llvm::function_ref
<bool(const Use
*)> ShouldDrop
) {
201 SmallVector
<Use
*, 8> ToBeEdited
;
202 for (Use
&U
: uses())
203 if (U
.getUser()->isDroppable() && ShouldDrop(&U
))
204 ToBeEdited
.push_back(&U
);
205 for (Use
*U
: ToBeEdited
)
206 dropDroppableUse(*U
);
209 void Value::dropDroppableUsesIn(User
&Usr
) {
210 assert(Usr
.isDroppable() && "Expected a droppable user!");
211 for (Use
&UsrOp
: Usr
.operands()) {
212 if (UsrOp
.get() == this)
213 dropDroppableUse(UsrOp
);
217 void Value::dropDroppableUse(Use
&U
) {
219 if (auto *Assume
= dyn_cast
<AssumeInst
>(U
.getUser())) {
220 unsigned OpNo
= U
.getOperandNo();
222 U
.set(ConstantInt::getTrue(Assume
->getContext()));
224 U
.set(UndefValue::get(U
.get()->getType()));
225 CallInst::BundleOpInfo
&BOI
= Assume
->getBundleOpInfoForOperand(OpNo
);
226 BOI
.Tag
= Assume
->getContext().pImpl
->getOrInsertBundleTag("ignore");
231 llvm_unreachable("unkown droppable use");
234 bool Value::isUsedInBasicBlock(const BasicBlock
*BB
) const {
235 // This can be computed either by scanning the instructions in BB, or by
236 // scanning the use list of this Value. Both lists can be very long, but
237 // usually one is quite short.
239 // Scan both lists simultaneously until one is exhausted. This limits the
240 // search to the shorter list.
241 BasicBlock::const_iterator BI
= BB
->begin(), BE
= BB
->end();
242 const_user_iterator UI
= user_begin(), UE
= user_end();
243 for (; BI
!= BE
&& UI
!= UE
; ++BI
, ++UI
) {
244 // Scan basic block: Check if this Value is used by the instruction at BI.
245 if (is_contained(BI
->operands(), this))
247 // Scan use list: Check if the use at UI is in BB.
248 const auto *User
= dyn_cast
<Instruction
>(*UI
);
249 if (User
&& User
->getParent() == BB
)
255 unsigned Value::getNumUses() const {
256 return (unsigned)std::distance(use_begin(), use_end());
259 static bool getSymTab(Value
*V
, ValueSymbolTable
*&ST
) {
261 if (Instruction
*I
= dyn_cast
<Instruction
>(V
)) {
262 if (BasicBlock
*P
= I
->getParent())
263 if (Function
*PP
= P
->getParent())
264 ST
= PP
->getValueSymbolTable();
265 } else if (BasicBlock
*BB
= dyn_cast
<BasicBlock
>(V
)) {
266 if (Function
*P
= BB
->getParent())
267 ST
= P
->getValueSymbolTable();
268 } else if (GlobalValue
*GV
= dyn_cast
<GlobalValue
>(V
)) {
269 if (Module
*P
= GV
->getParent())
270 ST
= &P
->getValueSymbolTable();
271 } else if (Argument
*A
= dyn_cast
<Argument
>(V
)) {
272 if (Function
*P
= A
->getParent())
273 ST
= P
->getValueSymbolTable();
275 assert(isa
<Constant
>(V
) && "Unknown value type!");
276 return true; // no name is setable for this.
281 ValueName
*Value::getValueName() const {
282 if (!HasName
) return nullptr;
284 LLVMContext
&Ctx
= getContext();
285 auto I
= Ctx
.pImpl
->ValueNames
.find(this);
286 assert(I
!= Ctx
.pImpl
->ValueNames
.end() &&
287 "No name entry found!");
292 void Value::setValueName(ValueName
*VN
) {
293 LLVMContext
&Ctx
= getContext();
295 assert(HasName
== Ctx
.pImpl
->ValueNames
.count(this) &&
296 "HasName bit out of sync!");
300 Ctx
.pImpl
->ValueNames
.erase(this);
306 Ctx
.pImpl
->ValueNames
[this] = VN
;
309 StringRef
Value::getName() const {
310 // Make sure the empty string is still a C string. For historical reasons,
311 // some clients want to call .data() on the result and expect it to be null
314 return StringRef("", 0);
315 return getValueName()->getKey();
318 void Value::setNameImpl(const Twine
&NewName
) {
320 !getContext().shouldDiscardValueNames() || isa
<GlobalValue
>(this);
322 // Fast-path: LLVMContext can be set to strip out non-GlobalValue names
323 // and there is no need to delete the old name.
324 if (!NeedNewName
&& !hasName())
327 // Fast path for common IRBuilder case of setName("") when there is no name.
328 if (NewName
.isTriviallyEmpty() && !hasName())
331 SmallString
<256> NameData
;
332 StringRef NameRef
= NeedNewName
? NewName
.toStringRef(NameData
) : "";
333 assert(!NameRef
.contains(0) && "Null bytes are not allowed in names");
335 // Name isn't changing?
336 if (getName() == NameRef
)
339 assert(!getType()->isVoidTy() && "Cannot assign a name to void values!");
341 // Get the symbol table to update for this object.
342 ValueSymbolTable
*ST
;
343 if (getSymTab(this, ST
))
344 return; // Cannot set a name on this value (e.g. constant).
346 if (!ST
) { // No symbol table to update? Just do the change.
347 // NOTE: Could optimize for the case the name is shrinking to not deallocate
351 if (!NameRef
.empty()) {
352 // Create the new name.
354 MallocAllocator Allocator
;
355 setValueName(ValueName::create(NameRef
, Allocator
));
356 getValueName()->setValue(this);
361 // NOTE: Could optimize for the case the name is shrinking to not deallocate
365 ST
->removeValueName(getValueName());
372 // Name is changing to something new.
374 setValueName(ST
->createValueName(NameRef
, this));
377 void Value::setName(const Twine
&NewName
) {
378 setNameImpl(NewName
);
379 if (Function
*F
= dyn_cast
<Function
>(this))
380 F
->updateAfterNameChange();
383 void Value::takeName(Value
*V
) {
384 assert(V
!= this && "Illegal call to this->takeName(this)!");
385 ValueSymbolTable
*ST
= nullptr;
386 // If this value has a name, drop it.
388 // Get the symtab this is in.
389 if (getSymTab(this, ST
)) {
390 // We can't set a name on this value, but we need to clear V's name if
392 if (V
->hasName()) V
->setName("");
393 return; // Cannot set a name on this value (e.g. constant).
398 ST
->removeValueName(getValueName());
402 // Now we know that this has no name.
404 // If V has no name either, we're done.
405 if (!V
->hasName()) return;
407 // Get this's symtab if we didn't before.
409 if (getSymTab(this, ST
)) {
412 return; // Cannot set a name on this value (e.g. constant).
416 // Get V's ST, this should always succeed, because V has a name.
417 ValueSymbolTable
*VST
;
418 bool Failure
= getSymTab(V
, VST
);
419 assert(!Failure
&& "V has a name, so it should have a ST!"); (void)Failure
;
421 // If these values are both in the same symtab, we can do this very fast.
422 // This works even if both values have no symtab yet.
425 setValueName(V
->getValueName());
426 V
->setValueName(nullptr);
427 getValueName()->setValue(this);
431 // Otherwise, things are slightly more complex. Remove V's name from VST and
432 // then reinsert it into ST.
435 VST
->removeValueName(V
->getValueName());
436 setValueName(V
->getValueName());
437 V
->setValueName(nullptr);
438 getValueName()->setValue(this);
441 ST
->reinsertValue(this);
445 std::string
Value::getNameOrAsOperand() const {
446 if (!getName().empty())
447 return std::string(getName());
450 raw_string_ostream
OS(BBName
);
451 printAsOperand(OS
, false);
456 void Value::assertModuleIsMaterializedImpl() const {
458 const GlobalValue
*GV
= dyn_cast
<GlobalValue
>(this);
461 const Module
*M
= GV
->getParent();
464 assert(M
->isMaterialized());
469 static bool contains(SmallPtrSetImpl
<ConstantExpr
*> &Cache
, ConstantExpr
*Expr
,
471 if (!Cache
.insert(Expr
).second
)
474 for (auto &O
: Expr
->operands()) {
477 auto *CE
= dyn_cast
<ConstantExpr
>(O
);
480 if (contains(Cache
, CE
, C
))
486 static bool contains(Value
*Expr
, Value
*V
) {
490 auto *C
= dyn_cast
<Constant
>(V
);
494 auto *CE
= dyn_cast
<ConstantExpr
>(Expr
);
498 SmallPtrSet
<ConstantExpr
*, 4> Cache
;
499 return contains(Cache
, CE
, C
);
503 void Value::doRAUW(Value
*New
, ReplaceMetadataUses ReplaceMetaUses
) {
504 assert(New
&& "Value::replaceAllUsesWith(<null>) is invalid!");
505 assert(!contains(New
, this) &&
506 "this->replaceAllUsesWith(expr(this)) is NOT valid!");
507 assert(New
->getType() == getType() &&
508 "replaceAllUses of value with new value of different type!");
510 // Notify all ValueHandles (if present) that this value is going away.
512 ValueHandleBase::ValueIsRAUWd(this, New
);
513 if (ReplaceMetaUses
== ReplaceMetadataUses::Yes
&& isUsedByMetadata())
514 ValueAsMetadata::handleRAUW(this, New
);
516 while (!materialized_use_empty()) {
518 // Must handle Constants specially, we cannot call replaceUsesOfWith on a
519 // constant because they are uniqued.
520 if (auto *C
= dyn_cast
<Constant
>(U
.getUser())) {
521 if (!isa
<GlobalValue
>(C
)) {
522 C
->handleOperandChange(this, New
);
530 if (BasicBlock
*BB
= dyn_cast
<BasicBlock
>(this))
531 BB
->replaceSuccessorsPhiUsesWith(cast
<BasicBlock
>(New
));
534 void Value::replaceAllUsesWith(Value
*New
) {
535 doRAUW(New
, ReplaceMetadataUses::Yes
);
538 void Value::replaceNonMetadataUsesWith(Value
*New
) {
539 doRAUW(New
, ReplaceMetadataUses::No
);
542 void Value::replaceUsesWithIf(Value
*New
,
543 llvm::function_ref
<bool(Use
&U
)> ShouldReplace
) {
544 assert(New
&& "Value::replaceUsesWithIf(<null>) is invalid!");
545 assert(New
->getType() == getType() &&
546 "replaceUses of value with new value of different type!");
548 SmallVector
<TrackingVH
<Constant
>, 8> Consts
;
549 SmallPtrSet
<Constant
*, 8> Visited
;
551 for (Use
&U
: llvm::make_early_inc_range(uses())) {
552 if (!ShouldReplace(U
))
554 // Must handle Constants specially, we cannot call replaceUsesOfWith on a
555 // constant because they are uniqued.
556 if (auto *C
= dyn_cast
<Constant
>(U
.getUser())) {
557 if (!isa
<GlobalValue
>(C
)) {
558 if (Visited
.insert(C
).second
)
559 Consts
.push_back(TrackingVH
<Constant
>(C
));
566 while (!Consts
.empty()) {
567 // FIXME: handleOperandChange() updates all the uses in a given Constant,
568 // not just the one passed to ShouldReplace
569 Consts
.pop_back_val()->handleOperandChange(this, New
);
573 /// Replace llvm.dbg.* uses of MetadataAsValue(ValueAsMetadata(V)) outside BB
575 static void replaceDbgUsesOutsideBlock(Value
*V
, Value
*New
, BasicBlock
*BB
) {
576 SmallVector
<DbgVariableIntrinsic
*> DbgUsers
;
577 SmallVector
<DPValue
*> DPUsers
;
578 findDbgUsers(DbgUsers
, V
, &DPUsers
);
579 for (auto *DVI
: DbgUsers
) {
580 if (DVI
->getParent() != BB
)
581 DVI
->replaceVariableLocationOp(V
, New
);
583 for (auto *DPV
: DPUsers
) {
584 DPMarker
*Marker
= DPV
->getMarker();
585 if (Marker
->getParent() != BB
)
586 DPV
->replaceVariableLocationOp(V
, New
);
590 // Like replaceAllUsesWith except it does not handle constants or basic blocks.
591 // This routine leaves uses within BB.
592 void Value::replaceUsesOutsideBlock(Value
*New
, BasicBlock
*BB
) {
593 assert(New
&& "Value::replaceUsesOutsideBlock(<null>, BB) is invalid!");
594 assert(!contains(New
, this) &&
595 "this->replaceUsesOutsideBlock(expr(this), BB) is NOT valid!");
596 assert(New
->getType() == getType() &&
597 "replaceUses of value with new value of different type!");
598 assert(BB
&& "Basic block that may contain a use of 'New' must be defined\n");
600 replaceDbgUsesOutsideBlock(this, New
, BB
);
601 replaceUsesWithIf(New
, [BB
](Use
&U
) {
602 auto *I
= dyn_cast
<Instruction
>(U
.getUser());
603 // Don't replace if it's an instruction in the BB basic block.
604 return !I
|| I
->getParent() != BB
;
609 // Various metrics for how much to strip off of pointers.
610 enum PointerStripKind
{
612 PSK_ZeroIndicesAndAliases
,
613 PSK_ZeroIndicesSameRepresentation
,
614 PSK_ForAliasAnalysis
,
615 PSK_InBoundsConstantIndices
,
619 template <PointerStripKind StripKind
> static void NoopCallback(const Value
*) {}
621 template <PointerStripKind StripKind
>
622 static const Value
*stripPointerCastsAndOffsets(
624 function_ref
<void(const Value
*)> Func
= NoopCallback
<StripKind
>) {
625 if (!V
->getType()->isPointerTy())
628 // Even though we don't look through PHI nodes, we could be called on an
629 // instruction in an unreachable block, which may be on a cycle.
630 SmallPtrSet
<const Value
*, 4> Visited
;
635 if (auto *GEP
= dyn_cast
<GEPOperator
>(V
)) {
637 case PSK_ZeroIndices
:
638 case PSK_ZeroIndicesAndAliases
:
639 case PSK_ZeroIndicesSameRepresentation
:
640 case PSK_ForAliasAnalysis
:
641 if (!GEP
->hasAllZeroIndices())
644 case PSK_InBoundsConstantIndices
:
645 if (!GEP
->hasAllConstantIndices())
649 if (!GEP
->isInBounds())
653 V
= GEP
->getPointerOperand();
654 } else if (Operator::getOpcode(V
) == Instruction::BitCast
) {
655 V
= cast
<Operator
>(V
)->getOperand(0);
656 if (!V
->getType()->isPointerTy())
658 } else if (StripKind
!= PSK_ZeroIndicesSameRepresentation
&&
659 Operator::getOpcode(V
) == Instruction::AddrSpaceCast
) {
660 // TODO: If we know an address space cast will not change the
661 // representation we could look through it here as well.
662 V
= cast
<Operator
>(V
)->getOperand(0);
663 } else if (StripKind
== PSK_ZeroIndicesAndAliases
&& isa
<GlobalAlias
>(V
)) {
664 V
= cast
<GlobalAlias
>(V
)->getAliasee();
665 } else if (StripKind
== PSK_ForAliasAnalysis
&& isa
<PHINode
>(V
) &&
666 cast
<PHINode
>(V
)->getNumIncomingValues() == 1) {
667 V
= cast
<PHINode
>(V
)->getIncomingValue(0);
669 if (const auto *Call
= dyn_cast
<CallBase
>(V
)) {
670 if (const Value
*RV
= Call
->getReturnedArgOperand()) {
674 // The result of launder.invariant.group must alias it's argument,
675 // but it can't be marked with returned attribute, that's why it needs
677 if (StripKind
== PSK_ForAliasAnalysis
&&
678 (Call
->getIntrinsicID() == Intrinsic::launder_invariant_group
||
679 Call
->getIntrinsicID() == Intrinsic::strip_invariant_group
)) {
680 V
= Call
->getArgOperand(0);
686 assert(V
->getType()->isPointerTy() && "Unexpected operand type!");
687 } while (Visited
.insert(V
).second
);
691 } // end anonymous namespace
693 const Value
*Value::stripPointerCasts() const {
694 return stripPointerCastsAndOffsets
<PSK_ZeroIndices
>(this);
697 const Value
*Value::stripPointerCastsAndAliases() const {
698 return stripPointerCastsAndOffsets
<PSK_ZeroIndicesAndAliases
>(this);
701 const Value
*Value::stripPointerCastsSameRepresentation() const {
702 return stripPointerCastsAndOffsets
<PSK_ZeroIndicesSameRepresentation
>(this);
705 const Value
*Value::stripInBoundsConstantOffsets() const {
706 return stripPointerCastsAndOffsets
<PSK_InBoundsConstantIndices
>(this);
709 const Value
*Value::stripPointerCastsForAliasAnalysis() const {
710 return stripPointerCastsAndOffsets
<PSK_ForAliasAnalysis
>(this);
713 const Value
*Value::stripAndAccumulateConstantOffsets(
714 const DataLayout
&DL
, APInt
&Offset
, bool AllowNonInbounds
,
715 bool AllowInvariantGroup
,
716 function_ref
<bool(Value
&, APInt
&)> ExternalAnalysis
) const {
717 if (!getType()->isPtrOrPtrVectorTy())
720 unsigned BitWidth
= Offset
.getBitWidth();
721 assert(BitWidth
== DL
.getIndexTypeSizeInBits(getType()) &&
722 "The offset bit width does not match the DL specification.");
724 // Even though we don't look through PHI nodes, we could be called on an
725 // instruction in an unreachable block, which may be on a cycle.
726 SmallPtrSet
<const Value
*, 4> Visited
;
727 Visited
.insert(this);
728 const Value
*V
= this;
730 if (auto *GEP
= dyn_cast
<GEPOperator
>(V
)) {
731 // If in-bounds was requested, we do not strip non-in-bounds GEPs.
732 if (!AllowNonInbounds
&& !GEP
->isInBounds())
735 // If one of the values we have visited is an addrspacecast, then
736 // the pointer type of this GEP may be different from the type
737 // of the Ptr parameter which was passed to this function. This
738 // means when we construct GEPOffset, we need to use the size
739 // of GEP's pointer type rather than the size of the original
741 APInt
GEPOffset(DL
.getIndexTypeSizeInBits(V
->getType()), 0);
742 if (!GEP
->accumulateConstantOffset(DL
, GEPOffset
, ExternalAnalysis
))
745 // Stop traversal if the pointer offset wouldn't fit in the bit-width
746 // provided by the Offset argument. This can happen due to AddrSpaceCast
748 if (GEPOffset
.getSignificantBits() > BitWidth
)
751 // External Analysis can return a result higher/lower than the value
752 // represents. We need to detect overflow/underflow.
753 APInt GEPOffsetST
= GEPOffset
.sextOrTrunc(BitWidth
);
754 if (!ExternalAnalysis
) {
755 Offset
+= GEPOffsetST
;
757 bool Overflow
= false;
758 APInt OldOffset
= Offset
;
759 Offset
= Offset
.sadd_ov(GEPOffsetST
, Overflow
);
765 V
= GEP
->getPointerOperand();
766 } else if (Operator::getOpcode(V
) == Instruction::BitCast
||
767 Operator::getOpcode(V
) == Instruction::AddrSpaceCast
) {
768 V
= cast
<Operator
>(V
)->getOperand(0);
769 } else if (auto *GA
= dyn_cast
<GlobalAlias
>(V
)) {
770 if (!GA
->isInterposable())
771 V
= GA
->getAliasee();
772 } else if (const auto *Call
= dyn_cast
<CallBase
>(V
)) {
773 if (const Value
*RV
= Call
->getReturnedArgOperand())
775 if (AllowInvariantGroup
&& Call
->isLaunderOrStripInvariantGroup())
776 V
= Call
->getArgOperand(0);
778 assert(V
->getType()->isPtrOrPtrVectorTy() && "Unexpected operand type!");
779 } while (Visited
.insert(V
).second
);
785 Value::stripInBoundsOffsets(function_ref
<void(const Value
*)> Func
) const {
786 return stripPointerCastsAndOffsets
<PSK_InBounds
>(this, Func
);
789 bool Value::canBeFreed() const {
790 assert(getType()->isPointerTy());
792 // Cases that can simply never be deallocated
793 // *) Constants aren't allocated per se, thus not deallocated either.
794 if (isa
<Constant
>(this))
797 // Handle byval/byref/sret/inalloca/preallocated arguments. The storage
798 // lifetime is guaranteed to be longer than the callee's lifetime.
799 if (auto *A
= dyn_cast
<Argument
>(this)) {
800 if (A
->hasPointeeInMemoryValueAttr())
802 // A pointer to an object in a function which neither frees, nor can arrange
803 // for another thread to free on its behalf, can not be freed in the scope
804 // of the function. Note that this logic is restricted to memory
805 // allocations in existance before the call; a nofree function *is* allowed
806 // to free memory it allocated.
807 const Function
*F
= A
->getParent();
808 if (F
->doesNotFreeMemory() && F
->hasNoSync())
812 const Function
*F
= nullptr;
813 if (auto *I
= dyn_cast
<Instruction
>(this))
814 F
= I
->getFunction();
815 if (auto *A
= dyn_cast
<Argument
>(this))
821 // With garbage collection, deallocation typically occurs solely at or after
822 // safepoints. If we're compiling for a collector which uses the
823 // gc.statepoint infrastructure, safepoints aren't explicitly present
824 // in the IR until after lowering from abstract to physical machine model.
825 // The collector could chose to mix explicit deallocation and gc'd objects
826 // which is why we need the explicit opt in on a per collector basis.
830 const auto &GCName
= F
->getGC();
831 if (GCName
== "statepoint-example") {
832 auto *PT
= cast
<PointerType
>(this->getType());
833 if (PT
->getAddressSpace() != 1)
834 // For the sake of this example GC, we arbitrarily pick addrspace(1) as
835 // our GC managed heap. This must match the same check in
836 // RewriteStatepointsForGC (and probably needs better factored.)
839 // It is cheaper to scan for a declaration than to scan for a use in this
840 // function. Note that gc.statepoint is a type overloaded function so the
841 // usual trick of requesting declaration of the intrinsic from the module
843 for (auto &Fn
: *F
->getParent())
844 if (Fn
.getIntrinsicID() == Intrinsic::experimental_gc_statepoint
)
851 uint64_t Value::getPointerDereferenceableBytes(const DataLayout
&DL
,
853 bool &CanBeFreed
) const {
854 assert(getType()->isPointerTy() && "must be pointer");
856 uint64_t DerefBytes
= 0;
858 CanBeFreed
= UseDerefAtPointSemantics
&& canBeFreed();
859 if (const Argument
*A
= dyn_cast
<Argument
>(this)) {
860 DerefBytes
= A
->getDereferenceableBytes();
861 if (DerefBytes
== 0) {
862 // Handle byval/byref/inalloca/preallocated arguments
863 if (Type
*ArgMemTy
= A
->getPointeeInMemoryValueType()) {
864 if (ArgMemTy
->isSized()) {
865 // FIXME: Why isn't this the type alloc size?
866 DerefBytes
= DL
.getTypeStoreSize(ArgMemTy
).getKnownMinValue();
871 if (DerefBytes
== 0) {
872 DerefBytes
= A
->getDereferenceableOrNullBytes();
875 } else if (const auto *Call
= dyn_cast
<CallBase
>(this)) {
876 DerefBytes
= Call
->getRetDereferenceableBytes();
877 if (DerefBytes
== 0) {
878 DerefBytes
= Call
->getRetDereferenceableOrNullBytes();
881 } else if (const LoadInst
*LI
= dyn_cast
<LoadInst
>(this)) {
882 if (MDNode
*MD
= LI
->getMetadata(LLVMContext::MD_dereferenceable
)) {
883 ConstantInt
*CI
= mdconst::extract
<ConstantInt
>(MD
->getOperand(0));
884 DerefBytes
= CI
->getLimitedValue();
886 if (DerefBytes
== 0) {
888 LI
->getMetadata(LLVMContext::MD_dereferenceable_or_null
)) {
889 ConstantInt
*CI
= mdconst::extract
<ConstantInt
>(MD
->getOperand(0));
890 DerefBytes
= CI
->getLimitedValue();
894 } else if (auto *IP
= dyn_cast
<IntToPtrInst
>(this)) {
895 if (MDNode
*MD
= IP
->getMetadata(LLVMContext::MD_dereferenceable
)) {
896 ConstantInt
*CI
= mdconst::extract
<ConstantInt
>(MD
->getOperand(0));
897 DerefBytes
= CI
->getLimitedValue();
899 if (DerefBytes
== 0) {
901 IP
->getMetadata(LLVMContext::MD_dereferenceable_or_null
)) {
902 ConstantInt
*CI
= mdconst::extract
<ConstantInt
>(MD
->getOperand(0));
903 DerefBytes
= CI
->getLimitedValue();
907 } else if (auto *AI
= dyn_cast
<AllocaInst
>(this)) {
908 if (!AI
->isArrayAllocation()) {
910 DL
.getTypeStoreSize(AI
->getAllocatedType()).getKnownMinValue();
914 } else if (auto *GV
= dyn_cast
<GlobalVariable
>(this)) {
915 if (GV
->getValueType()->isSized() && !GV
->hasExternalWeakLinkage()) {
916 // TODO: Don't outright reject hasExternalWeakLinkage but set the
918 DerefBytes
= DL
.getTypeStoreSize(GV
->getValueType()).getFixedValue();
926 Align
Value::getPointerAlignment(const DataLayout
&DL
) const {
927 assert(getType()->isPointerTy() && "must be pointer");
928 if (auto *GO
= dyn_cast
<GlobalObject
>(this)) {
929 if (isa
<Function
>(GO
)) {
930 Align FunctionPtrAlign
= DL
.getFunctionPtrAlign().valueOrOne();
931 switch (DL
.getFunctionPtrAlignType()) {
932 case DataLayout::FunctionPtrAlignType::Independent
:
933 return FunctionPtrAlign
;
934 case DataLayout::FunctionPtrAlignType::MultipleOfFunctionAlign
:
935 return std::max(FunctionPtrAlign
, GO
->getAlign().valueOrOne());
937 llvm_unreachable("Unhandled FunctionPtrAlignType");
939 const MaybeAlign
Alignment(GO
->getAlign());
941 if (auto *GVar
= dyn_cast
<GlobalVariable
>(GO
)) {
942 Type
*ObjectType
= GVar
->getValueType();
943 if (ObjectType
->isSized()) {
944 // If the object is defined in the current Module, we'll be giving
945 // it the preferred alignment. Otherwise, we have to assume that it
946 // may only have the minimum ABI alignment.
947 if (GVar
->isStrongDefinitionForLinker())
948 return DL
.getPreferredAlign(GVar
);
950 return DL
.getABITypeAlign(ObjectType
);
954 return Alignment
.valueOrOne();
955 } else if (const Argument
*A
= dyn_cast
<Argument
>(this)) {
956 const MaybeAlign Alignment
= A
->getParamAlign();
957 if (!Alignment
&& A
->hasStructRetAttr()) {
958 // An sret parameter has at least the ABI alignment of the return type.
959 Type
*EltTy
= A
->getParamStructRetType();
960 if (EltTy
->isSized())
961 return DL
.getABITypeAlign(EltTy
);
963 return Alignment
.valueOrOne();
964 } else if (const AllocaInst
*AI
= dyn_cast
<AllocaInst
>(this)) {
965 return AI
->getAlign();
966 } else if (const auto *Call
= dyn_cast
<CallBase
>(this)) {
967 MaybeAlign Alignment
= Call
->getRetAlign();
968 if (!Alignment
&& Call
->getCalledFunction())
969 Alignment
= Call
->getCalledFunction()->getAttributes().getRetAlignment();
970 return Alignment
.valueOrOne();
971 } else if (const LoadInst
*LI
= dyn_cast
<LoadInst
>(this)) {
972 if (MDNode
*MD
= LI
->getMetadata(LLVMContext::MD_align
)) {
973 ConstantInt
*CI
= mdconst::extract
<ConstantInt
>(MD
->getOperand(0));
974 return Align(CI
->getLimitedValue());
976 } else if (auto *CstPtr
= dyn_cast
<Constant
>(this)) {
977 // Strip pointer casts to avoid creating unnecessary ptrtoint expression
978 // if the only "reduction" is combining a bitcast + ptrtoint.
979 CstPtr
= CstPtr
->stripPointerCasts();
980 if (auto *CstInt
= dyn_cast_or_null
<ConstantInt
>(ConstantExpr::getPtrToInt(
981 const_cast<Constant
*>(CstPtr
), DL
.getIntPtrType(getType()),
982 /*OnlyIfReduced=*/true))) {
983 size_t TrailingZeros
= CstInt
->getValue().countr_zero();
984 // While the actual alignment may be large, elsewhere we have
985 // an arbitrary upper alignmet limit, so let's clamp to it.
986 return Align(TrailingZeros
< Value::MaxAlignmentExponent
987 ? uint64_t(1) << TrailingZeros
988 : Value::MaximumAlignment
);
994 static std::optional
<int64_t>
995 getOffsetFromIndex(const GEPOperator
*GEP
, unsigned Idx
, const DataLayout
&DL
) {
996 // Skip over the first indices.
997 gep_type_iterator GTI
= gep_type_begin(GEP
);
998 for (unsigned i
= 1; i
!= Idx
; ++i
, ++GTI
)
1001 // Compute the offset implied by the rest of the indices.
1003 for (unsigned i
= Idx
, e
= GEP
->getNumOperands(); i
!= e
; ++i
, ++GTI
) {
1004 ConstantInt
*OpC
= dyn_cast
<ConstantInt
>(GEP
->getOperand(i
));
1006 return std::nullopt
;
1008 continue; // No offset.
1010 // Handle struct indices, which add their field offset to the pointer.
1011 if (StructType
*STy
= GTI
.getStructTypeOrNull()) {
1012 Offset
+= DL
.getStructLayout(STy
)->getElementOffset(OpC
->getZExtValue());
1016 // Otherwise, we have a sequential type like an array or fixed-length
1017 // vector. Multiply the index by the ElementSize.
1018 TypeSize Size
= GTI
.getSequentialElementStride(DL
);
1019 if (Size
.isScalable())
1020 return std::nullopt
;
1021 Offset
+= Size
.getFixedValue() * OpC
->getSExtValue();
1027 std::optional
<int64_t> Value::getPointerOffsetFrom(const Value
*Other
,
1028 const DataLayout
&DL
) const {
1029 const Value
*Ptr1
= Other
;
1030 const Value
*Ptr2
= this;
1031 APInt
Offset1(DL
.getIndexTypeSizeInBits(Ptr1
->getType()), 0);
1032 APInt
Offset2(DL
.getIndexTypeSizeInBits(Ptr2
->getType()), 0);
1033 Ptr1
= Ptr1
->stripAndAccumulateConstantOffsets(DL
, Offset1
, true);
1034 Ptr2
= Ptr2
->stripAndAccumulateConstantOffsets(DL
, Offset2
, true);
1036 // Handle the trivial case first.
1038 return Offset2
.getSExtValue() - Offset1
.getSExtValue();
1040 const GEPOperator
*GEP1
= dyn_cast
<GEPOperator
>(Ptr1
);
1041 const GEPOperator
*GEP2
= dyn_cast
<GEPOperator
>(Ptr2
);
1043 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
1044 // base. After that base, they may have some number of common (and
1045 // potentially variable) indices. After that they handle some constant
1046 // offset, which determines their offset from each other. At this point, we
1047 // handle no other case.
1048 if (!GEP1
|| !GEP2
|| GEP1
->getOperand(0) != GEP2
->getOperand(0) ||
1049 GEP1
->getSourceElementType() != GEP2
->getSourceElementType())
1050 return std::nullopt
;
1052 // Skip any common indices and track the GEP types.
1054 for (; Idx
!= GEP1
->getNumOperands() && Idx
!= GEP2
->getNumOperands(); ++Idx
)
1055 if (GEP1
->getOperand(Idx
) != GEP2
->getOperand(Idx
))
1058 auto IOffset1
= getOffsetFromIndex(GEP1
, Idx
, DL
);
1059 auto IOffset2
= getOffsetFromIndex(GEP2
, Idx
, DL
);
1060 if (!IOffset1
|| !IOffset2
)
1061 return std::nullopt
;
1062 return *IOffset2
- *IOffset1
+ Offset2
.getSExtValue() -
1063 Offset1
.getSExtValue();
1066 const Value
*Value::DoPHITranslation(const BasicBlock
*CurBB
,
1067 const BasicBlock
*PredBB
) const {
1068 auto *PN
= dyn_cast
<PHINode
>(this);
1069 if (PN
&& PN
->getParent() == CurBB
)
1070 return PN
->getIncomingValueForBlock(PredBB
);
1074 LLVMContext
&Value::getContext() const { return VTy
->getContext(); }
1076 void Value::reverseUseList() {
1077 if (!UseList
|| !UseList
->Next
)
1078 // No need to reverse 0 or 1 uses.
1081 Use
*Head
= UseList
;
1082 Use
*Current
= UseList
->Next
;
1083 Head
->Next
= nullptr;
1085 Use
*Next
= Current
->Next
;
1086 Current
->Next
= Head
;
1087 Head
->Prev
= &Current
->Next
;
1092 Head
->Prev
= &UseList
;
1095 bool Value::isSwiftError() const {
1096 auto *Arg
= dyn_cast
<Argument
>(this);
1098 return Arg
->hasSwiftErrorAttr();
1099 auto *Alloca
= dyn_cast
<AllocaInst
>(this);
1102 return Alloca
->isSwiftError();
1105 //===----------------------------------------------------------------------===//
1106 // ValueHandleBase Class
1107 //===----------------------------------------------------------------------===//
1109 void ValueHandleBase::AddToExistingUseList(ValueHandleBase
**List
) {
1110 assert(List
&& "Handle list is null?");
1112 // Splice ourselves into the list.
1117 Next
->setPrevPtr(&Next
);
1118 assert(getValPtr() == Next
->getValPtr() && "Added to wrong list?");
1122 void ValueHandleBase::AddToExistingUseListAfter(ValueHandleBase
*List
) {
1123 assert(List
&& "Must insert after existing node");
1126 setPrevPtr(&List
->Next
);
1129 Next
->setPrevPtr(&Next
);
1132 void ValueHandleBase::AddToUseList() {
1133 assert(getValPtr() && "Null pointer doesn't have a use list!");
1135 LLVMContextImpl
*pImpl
= getValPtr()->getContext().pImpl
;
1137 if (getValPtr()->HasValueHandle
) {
1138 // If this value already has a ValueHandle, then it must be in the
1139 // ValueHandles map already.
1140 ValueHandleBase
*&Entry
= pImpl
->ValueHandles
[getValPtr()];
1141 assert(Entry
&& "Value doesn't have any handles?");
1142 AddToExistingUseList(&Entry
);
1146 // Ok, it doesn't have any handles yet, so we must insert it into the
1147 // DenseMap. However, doing this insertion could cause the DenseMap to
1148 // reallocate itself, which would invalidate all of the PrevP pointers that
1149 // point into the old table. Handle this by checking for reallocation and
1150 // updating the stale pointers only if needed.
1151 DenseMap
<Value
*, ValueHandleBase
*> &Handles
= pImpl
->ValueHandles
;
1152 const void *OldBucketPtr
= Handles
.getPointerIntoBucketsArray();
1154 ValueHandleBase
*&Entry
= Handles
[getValPtr()];
1155 assert(!Entry
&& "Value really did already have handles?");
1156 AddToExistingUseList(&Entry
);
1157 getValPtr()->HasValueHandle
= true;
1159 // If reallocation didn't happen or if this was the first insertion, don't
1161 if (Handles
.isPointerIntoBucketsArray(OldBucketPtr
) ||
1162 Handles
.size() == 1) {
1166 // Okay, reallocation did happen. Fix the Prev Pointers.
1167 for (DenseMap
<Value
*, ValueHandleBase
*>::iterator I
= Handles
.begin(),
1168 E
= Handles
.end(); I
!= E
; ++I
) {
1169 assert(I
->second
&& I
->first
== I
->second
->getValPtr() &&
1170 "List invariant broken!");
1171 I
->second
->setPrevPtr(&I
->second
);
1175 void ValueHandleBase::RemoveFromUseList() {
1176 assert(getValPtr() && getValPtr()->HasValueHandle
&&
1177 "Pointer doesn't have a use list!");
1179 // Unlink this from its use list.
1180 ValueHandleBase
**PrevPtr
= getPrevPtr();
1181 assert(*PrevPtr
== this && "List invariant broken");
1185 assert(Next
->getPrevPtr() == &Next
&& "List invariant broken");
1186 Next
->setPrevPtr(PrevPtr
);
1190 // If the Next pointer was null, then it is possible that this was the last
1191 // ValueHandle watching VP. If so, delete its entry from the ValueHandles
1193 LLVMContextImpl
*pImpl
= getValPtr()->getContext().pImpl
;
1194 DenseMap
<Value
*, ValueHandleBase
*> &Handles
= pImpl
->ValueHandles
;
1195 if (Handles
.isPointerIntoBucketsArray(PrevPtr
)) {
1196 Handles
.erase(getValPtr());
1197 getValPtr()->HasValueHandle
= false;
1201 void ValueHandleBase::ValueIsDeleted(Value
*V
) {
1202 assert(V
->HasValueHandle
&& "Should only be called if ValueHandles present");
1204 // Get the linked list base, which is guaranteed to exist since the
1205 // HasValueHandle flag is set.
1206 LLVMContextImpl
*pImpl
= V
->getContext().pImpl
;
1207 ValueHandleBase
*Entry
= pImpl
->ValueHandles
[V
];
1208 assert(Entry
&& "Value bit set but no entries exist");
1210 // We use a local ValueHandleBase as an iterator so that ValueHandles can add
1211 // and remove themselves from the list without breaking our iteration. This
1212 // is not really an AssertingVH; we just have to give ValueHandleBase a kind.
1213 // Note that we deliberately do not the support the case when dropping a value
1214 // handle results in a new value handle being permanently added to the list
1215 // (as might occur in theory for CallbackVH's): the new value handle will not
1216 // be processed and the checking code will mete out righteous punishment if
1217 // the handle is still present once we have finished processing all the other
1218 // value handles (it is fine to momentarily add then remove a value handle).
1219 for (ValueHandleBase
Iterator(Assert
, *Entry
); Entry
; Entry
= Iterator
.Next
) {
1220 Iterator
.RemoveFromUseList();
1221 Iterator
.AddToExistingUseListAfter(Entry
);
1222 assert(Entry
->Next
== &Iterator
&& "Loop invariant broken.");
1224 switch (Entry
->getKind()) {
1229 // WeakTracking and Weak just go to null, which unlinks them
1231 Entry
->operator=(nullptr);
1234 // Forward to the subclass's implementation.
1235 static_cast<CallbackVH
*>(Entry
)->deleted();
1240 // All callbacks, weak references, and assertingVHs should be dropped by now.
1241 if (V
->HasValueHandle
) {
1242 #ifndef NDEBUG // Only in +Asserts mode...
1243 dbgs() << "While deleting: " << *V
->getType() << " %" << V
->getName()
1245 if (pImpl
->ValueHandles
[V
]->getKind() == Assert
)
1246 llvm_unreachable("An asserting value handle still pointed to this"
1250 llvm_unreachable("All references to V were not removed?");
1254 void ValueHandleBase::ValueIsRAUWd(Value
*Old
, Value
*New
) {
1255 assert(Old
->HasValueHandle
&&"Should only be called if ValueHandles present");
1256 assert(Old
!= New
&& "Changing value into itself!");
1257 assert(Old
->getType() == New
->getType() &&
1258 "replaceAllUses of value with new value of different type!");
1260 // Get the linked list base, which is guaranteed to exist since the
1261 // HasValueHandle flag is set.
1262 LLVMContextImpl
*pImpl
= Old
->getContext().pImpl
;
1263 ValueHandleBase
*Entry
= pImpl
->ValueHandles
[Old
];
1265 assert(Entry
&& "Value bit set but no entries exist");
1267 // We use a local ValueHandleBase as an iterator so that
1268 // ValueHandles can add and remove themselves from the list without
1269 // breaking our iteration. This is not really an AssertingVH; we
1270 // just have to give ValueHandleBase some kind.
1271 for (ValueHandleBase
Iterator(Assert
, *Entry
); Entry
; Entry
= Iterator
.Next
) {
1272 Iterator
.RemoveFromUseList();
1273 Iterator
.AddToExistingUseListAfter(Entry
);
1274 assert(Entry
->Next
== &Iterator
&& "Loop invariant broken.");
1276 switch (Entry
->getKind()) {
1279 // Asserting and Weak handles do not follow RAUW implicitly.
1282 // Weak goes to the new value, which will unlink it from Old's list.
1283 Entry
->operator=(New
);
1286 // Forward to the subclass's implementation.
1287 static_cast<CallbackVH
*>(Entry
)->allUsesReplacedWith(New
);
1293 // If any new weak value handles were added while processing the
1294 // list, then complain about it now.
1295 if (Old
->HasValueHandle
)
1296 for (Entry
= pImpl
->ValueHandles
[Old
]; Entry
; Entry
= Entry
->Next
)
1297 switch (Entry
->getKind()) {
1299 dbgs() << "After RAUW from " << *Old
->getType() << " %"
1300 << Old
->getName() << " to " << *New
->getType() << " %"
1301 << New
->getName() << "\n";
1303 "A weak tracking value handle still pointed to the old value!\n");
1310 // Pin the vtable to this file.
1311 void CallbackVH::anchor() {}