1 //===-- xray_buffer_queue.cpp ----------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of XRay, a dynamic runtime instrumentation system.
11 // Defines the interface for a buffer queue implementation.
13 //===----------------------------------------------------------------------===//
14 #include "xray_buffer_queue.h"
15 #include "sanitizer_common/sanitizer_atomic.h"
16 #include "sanitizer_common/sanitizer_common.h"
17 #include "sanitizer_common/sanitizer_libc.h"
18 #if !SANITIZER_FUCHSIA
19 #include "sanitizer_common/sanitizer_posix.h"
21 #include "xray_allocator.h"
22 #include "xray_defs.h"
26 using namespace __xray
;
30 BufferQueue::ControlBlock
*allocControlBlock(size_t Size
, size_t Count
) {
32 allocateBuffer((sizeof(BufferQueue::ControlBlock
) - 1) + (Size
* Count
));
33 return B
== nullptr ? nullptr
34 : reinterpret_cast<BufferQueue::ControlBlock
*>(B
);
37 void deallocControlBlock(BufferQueue::ControlBlock
*C
, size_t Size
,
39 deallocateBuffer(reinterpret_cast<unsigned char *>(C
),
40 (sizeof(BufferQueue::ControlBlock
) - 1) + (Size
* Count
));
43 void decRefCount(BufferQueue::ControlBlock
*C
, size_t Size
, size_t Count
) {
46 if (atomic_fetch_sub(&C
->RefCount
, 1, memory_order_acq_rel
) == 1)
47 deallocControlBlock(C
, Size
, Count
);
50 void incRefCount(BufferQueue::ControlBlock
*C
) {
53 atomic_fetch_add(&C
->RefCount
, 1, memory_order_acq_rel
);
56 // We use a struct to ensure that we are allocating one atomic_uint64_t per
57 // cache line. This allows us to not worry about false-sharing among atomic
58 // objects being updated (constantly) by different threads.
59 struct ExtentsPadded
{
61 atomic_uint64_t Extents
;
62 unsigned char Storage
[kCacheLineSize
];
66 constexpr size_t kExtentsSize
= sizeof(ExtentsPadded
);
70 BufferQueue::ErrorCode
BufferQueue::init(size_t BS
, size_t BC
) {
71 SpinMutexLock
Guard(&Mutex
);
74 return BufferQueue::ErrorCode::AlreadyInitialized
;
82 BackingStore
= allocControlBlock(BufferSize
, BufferCount
);
83 if (BackingStore
== nullptr)
84 return BufferQueue::ErrorCode::NotEnoughMemory
;
86 auto CleanupBackingStore
= at_scope_exit([&, this] {
89 deallocControlBlock(BackingStore
, BufferSize
, BufferCount
);
90 BackingStore
= nullptr;
93 // Initialize enough atomic_uint64_t instances, each
94 ExtentsBackingStore
= allocControlBlock(kExtentsSize
, BufferCount
);
95 if (ExtentsBackingStore
== nullptr)
96 return BufferQueue::ErrorCode::NotEnoughMemory
;
98 auto CleanupExtentsBackingStore
= at_scope_exit([&, this] {
101 deallocControlBlock(ExtentsBackingStore
, kExtentsSize
, BufferCount
);
102 ExtentsBackingStore
= nullptr;
105 Buffers
= initArray
<BufferRep
>(BufferCount
);
106 if (Buffers
== nullptr)
107 return BufferQueue::ErrorCode::NotEnoughMemory
;
109 // At this point we increment the generation number to associate the buffers
110 // to the new generation.
111 atomic_fetch_add(&Generation
, 1, memory_order_acq_rel
);
113 // First, we initialize the refcount in the ControlBlock, which we treat as
114 // being at the start of the BackingStore pointer.
115 atomic_store(&BackingStore
->RefCount
, 1, memory_order_release
);
116 atomic_store(&ExtentsBackingStore
->RefCount
, 1, memory_order_release
);
118 // Then we initialise the individual buffers that sub-divide the whole backing
119 // store. Each buffer will start at the `Data` member of the ControlBlock, and
120 // will be offsets from these locations.
121 for (size_t i
= 0; i
< BufferCount
; ++i
) {
122 auto &T
= Buffers
[i
];
124 auto *E
= reinterpret_cast<ExtentsPadded
*>(&ExtentsBackingStore
->Data
+
126 Buf
.Extents
= &E
->Extents
;
127 atomic_store(Buf
.Extents
, 0, memory_order_release
);
128 Buf
.Generation
= generation();
129 Buf
.Data
= &BackingStore
->Data
+ (BufferSize
* i
);
130 Buf
.Size
= BufferSize
;
131 Buf
.BackingStore
= BackingStore
;
132 Buf
.ExtentsBackingStore
= ExtentsBackingStore
;
133 Buf
.Count
= BufferCount
;
140 atomic_store(&Finalizing
, 0, memory_order_release
);
142 return BufferQueue::ErrorCode::Ok
;
145 BufferQueue::BufferQueue(size_t B
, size_t N
,
146 bool &Success
) XRAY_NEVER_INSTRUMENT
151 BackingStore(nullptr),
152 ExtentsBackingStore(nullptr),
158 Success
= init(B
, N
) == BufferQueue::ErrorCode::Ok
;
161 BufferQueue::ErrorCode
BufferQueue::getBuffer(Buffer
&Buf
) {
162 if (atomic_load(&Finalizing
, memory_order_acquire
))
163 return ErrorCode::QueueFinalizing
;
165 BufferRep
*B
= nullptr;
167 SpinMutexLock
Guard(&Mutex
);
168 if (LiveBuffers
== BufferCount
)
169 return ErrorCode::NotEnoughMemory
;
171 if (Next
== (Buffers
+ BufferCount
))
176 incRefCount(BackingStore
);
177 incRefCount(ExtentsBackingStore
);
179 Buf
.Generation
= generation();
181 return ErrorCode::Ok
;
184 BufferQueue::ErrorCode
BufferQueue::releaseBuffer(Buffer
&Buf
) {
185 // Check whether the buffer being referred to is within the bounds of the
186 // backing store's range.
187 BufferRep
*B
= nullptr;
189 SpinMutexLock
Guard(&Mutex
);
190 if (Buf
.Generation
!= generation() || LiveBuffers
== 0) {
192 decRefCount(Buf
.BackingStore
, Buf
.Size
, Buf
.Count
);
193 decRefCount(Buf
.ExtentsBackingStore
, kExtentsSize
, Buf
.Count
);
194 return BufferQueue::ErrorCode::Ok
;
197 if (Buf
.Data
< &BackingStore
->Data
||
198 Buf
.Data
> &BackingStore
->Data
+ (BufferCount
* BufferSize
))
199 return BufferQueue::ErrorCode::UnrecognizedBuffer
;
203 if (First
== (Buffers
+ BufferCount
))
207 // Now that the buffer has been released, we mark it as "used".
210 decRefCount(Buf
.BackingStore
, Buf
.Size
, Buf
.Count
);
211 decRefCount(Buf
.ExtentsBackingStore
, kExtentsSize
, Buf
.Count
);
212 atomic_store(B
->Buff
.Extents
, atomic_load(Buf
.Extents
, memory_order_acquire
),
213 memory_order_release
);
215 return ErrorCode::Ok
;
218 BufferQueue::ErrorCode
BufferQueue::finalize() {
219 if (atomic_exchange(&Finalizing
, 1, memory_order_acq_rel
))
220 return ErrorCode::QueueFinalizing
;
221 return ErrorCode::Ok
;
224 void BufferQueue::cleanupBuffers() {
225 for (auto B
= Buffers
, E
= Buffers
+ BufferCount
; B
!= E
; ++B
)
227 deallocateBuffer(Buffers
, BufferCount
);
228 decRefCount(BackingStore
, BufferSize
, BufferCount
);
229 decRefCount(ExtentsBackingStore
, kExtentsSize
, BufferCount
);
230 BackingStore
= nullptr;
231 ExtentsBackingStore
= nullptr;
237 BufferQueue::~BufferQueue() { cleanupBuffers(); }