1 //==- llvm/CodeGen/MachineMemOperand.h - MachineMemOperand class -*- C++ -*-==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file contains the declaration of the MachineMemOperand class, which is a
10 // description of a memory reference. It is used to help track dependencies
13 //===----------------------------------------------------------------------===//
15 #ifndef LLVM_CODEGEN_MACHINEMEMOPERAND_H
16 #define LLVM_CODEGEN_MACHINEMEMOPERAND_H
18 #include "llvm/ADT/BitmaskEnum.h"
19 #include "llvm/ADT/PointerUnion.h"
20 #include "llvm/CodeGen/PseudoSourceValue.h"
21 #include "llvm/IR/Value.h" // PointerLikeTypeTraits<Value*>
22 #include "llvm/Support/AtomicOrdering.h"
23 #include "llvm/Support/DataTypes.h"
27 class FoldingSetNodeID
;
30 class MachineFunction
;
31 class ModuleSlotTracker
;
33 /// This class contains a discriminated union of information about pointers in
34 /// memory operands, relating them back to LLVM IR or to virtual locations (such
35 /// as frame indices) that are exposed during codegen.
36 struct MachinePointerInfo
{
37 /// This is the IR pointer value for the access, or it is null if unknown.
38 /// If this is null, then the access is to a pointer in the default address
40 PointerUnion
<const Value
*, const PseudoSourceValue
*> V
;
42 /// Offset - This is an offset from the base Value*.
47 unsigned AddrSpace
= 0;
49 explicit MachinePointerInfo(const Value
*v
, int64_t offset
= 0,
51 : V(v
), Offset(offset
), StackID(ID
) {
52 AddrSpace
= v
? v
->getType()->getPointerAddressSpace() : 0;
55 explicit MachinePointerInfo(const PseudoSourceValue
*v
, int64_t offset
= 0,
57 : V(v
), Offset(offset
), StackID(ID
) {
58 AddrSpace
= v
? v
->getAddressSpace() : 0;
61 explicit MachinePointerInfo(unsigned AddressSpace
= 0)
62 : V((const Value
*)nullptr), Offset(0), StackID(0),
63 AddrSpace(AddressSpace
) {}
65 explicit MachinePointerInfo(
66 PointerUnion
<const Value
*, const PseudoSourceValue
*> v
,
69 : V(v
), Offset(offset
), StackID(ID
) {
71 if (const auto *ValPtr
= V
.dyn_cast
<const Value
*>())
72 AddrSpace
= ValPtr
->getType()->getPointerAddressSpace();
74 AddrSpace
= V
.get
<const PseudoSourceValue
*>()->getAddressSpace();
78 MachinePointerInfo
getWithOffset(int64_t O
) const {
80 return MachinePointerInfo(AddrSpace
);
81 if (V
.is
<const Value
*>())
82 return MachinePointerInfo(V
.get
<const Value
*>(), Offset
+O
, StackID
);
83 return MachinePointerInfo(V
.get
<const PseudoSourceValue
*>(), Offset
+O
,
87 /// Return true if memory region [V, V+Offset+Size) is known to be
89 bool isDereferenceable(unsigned Size
, LLVMContext
&C
,
90 const DataLayout
&DL
) const;
92 /// Return the LLVM IR address space number that this pointer points into.
93 unsigned getAddrSpace() const;
95 /// Return a MachinePointerInfo record that refers to the constant pool.
96 static MachinePointerInfo
getConstantPool(MachineFunction
&MF
);
98 /// Return a MachinePointerInfo record that refers to the specified
100 static MachinePointerInfo
getFixedStack(MachineFunction
&MF
, int FI
,
103 /// Return a MachinePointerInfo record that refers to a jump table entry.
104 static MachinePointerInfo
getJumpTable(MachineFunction
&MF
);
106 /// Return a MachinePointerInfo record that refers to a GOT entry.
107 static MachinePointerInfo
getGOT(MachineFunction
&MF
);
109 /// Stack pointer relative access.
110 static MachinePointerInfo
getStack(MachineFunction
&MF
, int64_t Offset
,
113 /// Stack memory without other information.
114 static MachinePointerInfo
getUnknownStack(MachineFunction
&MF
);
118 //===----------------------------------------------------------------------===//
119 /// A description of a memory reference used in the backend.
120 /// Instead of holding a StoreInst or LoadInst, this class holds the address
121 /// Value of the reference along with a byte size and offset. This allows it
122 /// to describe lowered loads and stores. Also, the special PseudoSourceValue
123 /// objects can be used to represent loads and stores to memory locations
124 /// that aren't explicit in the regular LLVM IR.
126 class MachineMemOperand
{
128 /// Flags values. These may be or'd together.
129 enum Flags
: uint16_t {
132 /// The memory access reads data.
134 /// The memory access writes data.
136 /// The memory access is volatile.
137 MOVolatile
= 1u << 2,
138 /// The memory access is non-temporal.
139 MONonTemporal
= 1u << 3,
140 /// The memory access is dereferenceable (i.e., doesn't trap).
141 MODereferenceable
= 1u << 4,
142 /// The memory access always returns the same value (or traps).
143 MOInvariant
= 1u << 5,
145 // Reserved for use by target-specific passes.
146 // Targets may override getSerializableMachineMemOperandTargetFlags() to
147 // enable MIR serialization/parsing of these flags. If more of these flags
148 // are added, the MIR printing/parsing code will need to be updated as well.
149 MOTargetFlag1
= 1u << 6,
150 MOTargetFlag2
= 1u << 7,
151 MOTargetFlag3
= 1u << 8,
153 LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ MOTargetFlag3
)
157 /// Atomic information for this memory operation.
158 struct MachineAtomicInfo
{
159 /// Synchronization scope ID for this memory operation.
160 unsigned SSID
: 8; // SyncScope::ID
161 /// Atomic ordering requirements for this memory operation. For cmpxchg
162 /// atomic operations, atomic ordering requirements when store occurs.
163 unsigned Ordering
: 4; // enum AtomicOrdering
164 /// For cmpxchg atomic operations, atomic ordering requirements when store
166 unsigned FailureOrdering
: 4; // enum AtomicOrdering
169 MachinePointerInfo PtrInfo
;
172 uint16_t BaseAlignLog2
; // log_2(base_alignment) + 1
173 MachineAtomicInfo AtomicInfo
;
175 const MDNode
*Ranges
;
178 /// Construct a MachineMemOperand object with the specified PtrInfo, flags,
179 /// size, and base alignment. For atomic operations the synchronization scope
180 /// and atomic ordering requirements must also be specified. For cmpxchg
181 /// atomic operations the atomic ordering requirements when store does not
182 /// occur must also be specified.
183 MachineMemOperand(MachinePointerInfo PtrInfo
, Flags flags
, uint64_t s
,
185 const AAMDNodes
&AAInfo
= AAMDNodes(),
186 const MDNode
*Ranges
= nullptr,
187 SyncScope::ID SSID
= SyncScope::System
,
188 AtomicOrdering Ordering
= AtomicOrdering::NotAtomic
,
189 AtomicOrdering FailureOrdering
= AtomicOrdering::NotAtomic
);
191 const MachinePointerInfo
&getPointerInfo() const { return PtrInfo
; }
193 /// Return the base address of the memory access. This may either be a normal
194 /// LLVM IR Value, or one of the special values used in CodeGen.
195 /// Special values are those obtained via
196 /// PseudoSourceValue::getFixedStack(int), PseudoSourceValue::getStack, and
197 /// other PseudoSourceValue member functions which return objects which stand
198 /// for frame/stack pointer relative references and other special references
199 /// which are not representable in the high-level IR.
200 const Value
*getValue() const { return PtrInfo
.V
.dyn_cast
<const Value
*>(); }
202 const PseudoSourceValue
*getPseudoValue() const {
203 return PtrInfo
.V
.dyn_cast
<const PseudoSourceValue
*>();
206 const void *getOpaqueValue() const { return PtrInfo
.V
.getOpaqueValue(); }
208 /// Return the raw flags of the source value, \see Flags.
209 Flags
getFlags() const { return FlagVals
; }
211 /// Bitwise OR the current flags with the given flags.
212 void setFlags(Flags f
) { FlagVals
|= f
; }
214 /// For normal values, this is a byte offset added to the base address.
215 /// For PseudoSourceValue::FPRel values, this is the FrameIndex number.
216 int64_t getOffset() const { return PtrInfo
.Offset
; }
218 unsigned getAddrSpace() const { return PtrInfo
.getAddrSpace(); }
220 /// Return the size in bytes of the memory reference.
221 uint64_t getSize() const { return Size
; }
223 /// Return the size in bits of the memory reference.
224 uint64_t getSizeInBits() const { return Size
* 8; }
226 /// Return the minimum known alignment in bytes of the actual memory
228 uint64_t getAlignment() const;
230 /// Return the minimum known alignment in bytes of the base address, without
232 uint64_t getBaseAlignment() const { return (1u << BaseAlignLog2
) >> 1; }
234 /// Return the AA tags for the memory reference.
235 AAMDNodes
getAAInfo() const { return AAInfo
; }
237 /// Return the range tag for the memory reference.
238 const MDNode
*getRanges() const { return Ranges
; }
240 /// Returns the synchronization scope ID for this memory operation.
241 SyncScope::ID
getSyncScopeID() const {
242 return static_cast<SyncScope::ID
>(AtomicInfo
.SSID
);
245 /// Return the atomic ordering requirements for this memory operation. For
246 /// cmpxchg atomic operations, return the atomic ordering requirements when
248 AtomicOrdering
getOrdering() const {
249 return static_cast<AtomicOrdering
>(AtomicInfo
.Ordering
);
252 /// For cmpxchg atomic operations, return the atomic ordering requirements
253 /// when store does not occur.
254 AtomicOrdering
getFailureOrdering() const {
255 return static_cast<AtomicOrdering
>(AtomicInfo
.FailureOrdering
);
258 bool isLoad() const { return FlagVals
& MOLoad
; }
259 bool isStore() const { return FlagVals
& MOStore
; }
260 bool isVolatile() const { return FlagVals
& MOVolatile
; }
261 bool isNonTemporal() const { return FlagVals
& MONonTemporal
; }
262 bool isDereferenceable() const { return FlagVals
& MODereferenceable
; }
263 bool isInvariant() const { return FlagVals
& MOInvariant
; }
265 /// Returns true if this operation has an atomic ordering requirement of
266 /// unordered or higher, false otherwise.
267 bool isAtomic() const { return getOrdering() != AtomicOrdering::NotAtomic
; }
269 /// Returns true if this memory operation doesn't have any ordering
270 /// constraints other than normal aliasing. Volatile and (ordered) atomic
271 /// memory operations can't be reordered.
272 bool isUnordered() const {
273 return (getOrdering() == AtomicOrdering::NotAtomic
||
274 getOrdering() == AtomicOrdering::Unordered
) &&
278 /// Update this MachineMemOperand to reflect the alignment of MMO, if it has a
279 /// greater alignment. This must only be used when the new alignment applies
280 /// to all users of this MachineMemOperand.
281 void refineAlignment(const MachineMemOperand
*MMO
);
283 /// Change the SourceValue for this MachineMemOperand. This should only be
284 /// used when an object is being relocated and all references to it are being
286 void setValue(const Value
*NewSV
) { PtrInfo
.V
= NewSV
; }
287 void setValue(const PseudoSourceValue
*NewSV
) { PtrInfo
.V
= NewSV
; }
288 void setOffset(int64_t NewOffset
) { PtrInfo
.Offset
= NewOffset
; }
290 /// Profile - Gather unique data for the object.
292 void Profile(FoldingSetNodeID
&ID
) const;
294 /// Support for operator<<.
296 void print(raw_ostream
&OS
) const;
297 void print(raw_ostream
&OS
, ModuleSlotTracker
&MST
) const;
298 void print(raw_ostream
&OS
, ModuleSlotTracker
&MST
,
299 SmallVectorImpl
<StringRef
> &SSNs
, const LLVMContext
&Context
,
300 const MachineFrameInfo
*MFI
, const TargetInstrInfo
*TII
) const;
303 friend bool operator==(const MachineMemOperand
&LHS
,
304 const MachineMemOperand
&RHS
) {
305 return LHS
.getValue() == RHS
.getValue() &&
306 LHS
.getPseudoValue() == RHS
.getPseudoValue() &&
307 LHS
.getSize() == RHS
.getSize() &&
308 LHS
.getOffset() == RHS
.getOffset() &&
309 LHS
.getFlags() == RHS
.getFlags() &&
310 LHS
.getAAInfo() == RHS
.getAAInfo() &&
311 LHS
.getRanges() == RHS
.getRanges() &&
312 LHS
.getAlignment() == RHS
.getAlignment() &&
313 LHS
.getAddrSpace() == RHS
.getAddrSpace();
316 friend bool operator!=(const MachineMemOperand
&LHS
,
317 const MachineMemOperand
&RHS
) {
318 return !(LHS
== RHS
);
322 inline raw_ostream
&operator<<(raw_ostream
&OS
, const MachineMemOperand
&MRO
) {
327 } // End llvm namespace