1 //===-- xray_buffer_queue.h ------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of XRay, a dynamic runtime instrumentation system.
11 // Defines the interface for a buffer queue implementation.
13 //===----------------------------------------------------------------------===//
14 #ifndef XRAY_BUFFER_QUEUE_H
15 #define XRAY_BUFFER_QUEUE_H
17 #include "sanitizer_common/sanitizer_atomic.h"
18 #include "sanitizer_common/sanitizer_common.h"
19 #include "sanitizer_common/sanitizer_mutex.h"
20 #include "xray_defs.h"
26 /// BufferQueue implements a circular queue of fixed sized buffers (much like a
27 /// freelist) but is concerned with making it quick to initialise, finalise, and
28 /// get from or return buffers to the queue. This is one key component of the
29 /// "flight data recorder" (FDR) mode to support ongoing XRay function call
33 /// ControlBlock represents the memory layout of how we interpret the backing
34 /// store for all buffers and extents managed by a BufferQueue instance. The
35 /// ControlBlock has the reference count as the first member, sized according
36 /// to platform-specific cache-line size. We never use the Buffer member of
37 /// the union, which is only there for compiler-supported alignment and
40 /// This ensures that the `Data` member will be placed at least kCacheLineSize
41 /// bytes from the beginning of the structure.
44 atomic_uint64_t RefCount
;
45 char Buffer
[kCacheLineSize
];
48 /// We need to make this size 1, to conform to the C++ rules for array data
49 /// members. Typically, we want to subtract this 1 byte for sizing
55 atomic_uint64_t
*Extents
= nullptr;
56 uint64_t Generation
{0};
61 friend class BufferQueue
;
62 ControlBlock
*BackingStore
= nullptr;
63 ControlBlock
*ExtentsBackingStore
= nullptr;
68 // The managed buffer.
71 // This is true if the buffer has been returned to the available queue, and
72 // is considered "used" by another thread.
77 // This models a ForwardIterator. |T| Must be either a `Buffer` or `const
78 // Buffer`. Note that we only advance to the "used" buffers, when
79 // incrementing, so that at dereference we're always at a valid point.
80 template <class T
> class Iterator
{
82 BufferRep
*Buffers
= nullptr;
86 Iterator
&operator++() {
87 DCHECK_NE(Offset
, Max
);
90 } while (!Buffers
[Offset
].Used
&& Offset
!= Max
);
94 Iterator
operator++(int) {
100 T
&operator*() const { return Buffers
[Offset
].Buff
; }
102 T
*operator->() const { return &(Buffers
[Offset
].Buff
); }
104 Iterator(BufferRep
*Root
, size_t O
, size_t M
) XRAY_NEVER_INSTRUMENT
108 // We want to advance to the first Offset where the 'Used' property is
109 // true, or to the end of the list/queue.
110 while (!Buffers
[Offset
].Used
&& Offset
!= Max
) {
115 Iterator() = default;
116 Iterator(const Iterator
&) = default;
117 Iterator(Iterator
&&) = default;
118 Iterator
&operator=(const Iterator
&) = default;
119 Iterator
&operator=(Iterator
&&) = default;
120 ~Iterator() = default;
123 friend bool operator==(const Iterator
&L
, const Iterator
<V
> &R
) {
124 DCHECK_EQ(L
.Max
, R
.Max
);
125 return L
.Buffers
== R
.Buffers
&& L
.Offset
== R
.Offset
;
129 friend bool operator!=(const Iterator
&L
, const Iterator
<V
> &R
) {
134 // Size of each individual Buffer.
137 // Amount of pre-allocated buffers.
141 atomic_uint8_t Finalizing
;
143 // The collocated ControlBlock and buffer storage.
144 ControlBlock
*BackingStore
;
146 // The collocated ControlBlock and extents storage.
147 ControlBlock
*ExtentsBackingStore
;
149 // A dynamically allocated array of BufferRep instances.
152 // Pointer to the next buffer to be handed out.
155 // Pointer to the entry in the array where the next released buffer will be
159 // Count of buffers that have been handed out through 'getBuffer'.
162 // We use a generation number to identify buffers and which generation they're
164 atomic_uint64_t Generation
;
166 /// Releases references to the buffers backed by the current buffer queue.
167 void cleanupBuffers();
170 enum class ErrorCode
: unsigned {
179 static const char *getErrorString(ErrorCode E
) {
183 case ErrorCode::NotEnoughMemory
:
184 return "no available buffers in the queue";
185 case ErrorCode::QueueFinalizing
:
186 return "queue already finalizing";
187 case ErrorCode::UnrecognizedBuffer
:
188 return "buffer being returned not owned by buffer queue";
189 case ErrorCode::AlreadyFinalized
:
190 return "queue already finalized";
191 case ErrorCode::AlreadyInitialized
:
192 return "queue already initialized";
194 return "unknown error";
197 /// Initialise a queue of size |N| with buffers of size |B|. We report success
198 /// through |Success|.
199 BufferQueue(size_t B
, size_t N
, bool &Success
);
201 /// Updates |Buf| to contain the pointer to an appropriate buffer. Returns an
202 /// error in case there are no available buffers to return when we will run
203 /// over the upper bound for the total buffers.
206 /// - BufferQueue is not finalising.
209 /// - ErrorCode::NotEnoughMemory on exceeding MaxSize.
210 /// - ErrorCode::Ok when we find a Buffer.
211 /// - ErrorCode::QueueFinalizing or ErrorCode::AlreadyFinalized on
212 /// a finalizing/finalized BufferQueue.
213 ErrorCode
getBuffer(Buffer
&Buf
);
215 /// Updates |Buf| to point to nullptr, with size 0.
218 /// - ErrorCode::Ok when we successfully release the buffer.
219 /// - ErrorCode::UnrecognizedBuffer for when this BufferQueue does not own
220 /// the buffer being released.
221 ErrorCode
releaseBuffer(Buffer
&Buf
);
223 /// Initializes the buffer queue, starting a new generation. We can re-set the
224 /// size of buffers with |BS| along with the buffer count with |BC|.
227 /// - ErrorCode::Ok when we successfully initialize the buffer. This
228 /// requires that the buffer queue is previously finalized.
229 /// - ErrorCode::AlreadyInitialized when the buffer queue is not finalized.
230 ErrorCode
init(size_t BS
, size_t BC
);
232 bool finalizing() const {
233 return atomic_load(&Finalizing
, memory_order_acquire
);
236 uint64_t generation() const {
237 return atomic_load(&Generation
, memory_order_acquire
);
240 /// Returns the configured size of the buffers in the buffer queue.
241 size_t ConfiguredBufferSize() const { return BufferSize
; }
243 /// Sets the state of the BufferQueue to finalizing, which ensures that:
245 /// - All subsequent attempts to retrieve a Buffer will fail.
246 /// - All releaseBuffer operations will not fail.
248 /// After a call to finalize succeeds, all subsequent calls to finalize will
249 /// fail with ErrorCode::QueueFinalizing.
250 ErrorCode
finalize();
252 /// Applies the provided function F to each Buffer in the queue, only if the
253 /// Buffer is marked 'used' (i.e. has been the result of getBuffer(...) and a
254 /// releaseBuffer(...) operation).
255 template <class F
> void apply(F Fn
) XRAY_NEVER_INSTRUMENT
{
256 SpinMutexLock
G(&Mutex
);
257 for (auto I
= begin(), E
= end(); I
!= E
; ++I
)
261 using const_iterator
= Iterator
<const Buffer
>;
262 using iterator
= Iterator
<Buffer
>;
264 /// Provides iterator access to the raw Buffer instances.
265 iterator
begin() const { return iterator(Buffers
, 0, BufferCount
); }
266 const_iterator
cbegin() const {
267 return const_iterator(Buffers
, 0, BufferCount
);
269 iterator
end() const { return iterator(Buffers
, BufferCount
, BufferCount
); }
270 const_iterator
cend() const {
271 return const_iterator(Buffers
, BufferCount
, BufferCount
);
274 // Cleans up allocated buffers.
278 } // namespace __xray
280 #endif // XRAY_BUFFER_QUEUE_H