11 #include "flexarray.h"
14 /* NOTE: This lockless ringbuffer implementation is copied from JACK, extended
15 * to include an element size. Consequently, parameters and return values for a
16 * size or count are in 'elements', not bytes. Additionally, it only supports
17 * single-consumer/single-provider operation.
22 #if defined(__cpp_lib_hardware_interference_size) && !defined(_LIBCPP_VERSION)
23 static constexpr std::size_t sCacheAlignment
{std::hardware_destructive_interference_size
};
25 /* Assume a 64-byte cache line, the most common/likely value. */
26 static constexpr std::size_t sCacheAlignment
{64};
28 alignas(sCacheAlignment
) std::atomic
<std::size_t> mWriteCount
{0u};
29 alignas(sCacheAlignment
) std::atomic
<std::size_t> mReadCount
{0u};
31 alignas(sCacheAlignment
) const std::size_t mWriteSize
;
32 const std::size_t mSizeMask
;
33 const std::size_t mElemSize
;
35 al::FlexArray
<std::byte
, 16> mBuffer
;
42 using DataPair
= std::array
<Data
,2>;
44 RingBuffer(const std::size_t writesize
, const std::size_t mask
, const std::size_t elemsize
,
45 const std::size_t numbytes
)
46 : mWriteSize
{writesize
}, mSizeMask
{mask
}, mElemSize
{elemsize
}, mBuffer
{numbytes
}
49 /** Reset the read and write pointers to zero. This is not thread safe. */
50 auto reset() noexcept
-> void;
53 * Return the number of elements available for reading. This is the number
54 * of elements in front of the read pointer and behind the write pointer.
56 [[nodiscard
]] auto readSpace() const noexcept
-> std::size_t
58 const std::size_t w
{mWriteCount
.load(std::memory_order_acquire
)};
59 const std::size_t r
{mReadCount
.load(std::memory_order_acquire
)};
60 /* mWriteCount is never more than mWriteSize greater than mReadCount. */
65 * The copying data reader. Copy at most `count' elements into `dest'.
66 * Returns the actual number of elements copied.
68 [[nodiscard
]] auto read(void *dest
, std::size_t count
) noexcept
-> std::size_t;
70 * The copying data reader w/o read pointer advance. Copy at most `count'
71 * elements into `dest'. Returns the actual number of elements copied.
73 [[nodiscard
]] auto peek(void *dest
, std::size_t count
) const noexcept
-> std::size_t;
76 * The non-copying data reader. Returns two ringbuffer data pointers that
77 * hold the current readable data. If the readable data is in one segment
78 * the second segment has zero length.
80 [[nodiscard
]] auto getReadVector() noexcept
-> DataPair
;
81 /** Advance the read pointer `count' places. */
82 auto readAdvance(std::size_t count
) noexcept
-> void
84 const std::size_t w
{mWriteCount
.load(std::memory_order_acquire
)};
85 const std::size_t r
{mReadCount
.load(std::memory_order_relaxed
)};
86 [[maybe_unused
]] const std::size_t readable
{w
- r
};
87 assert(readable
>= count
);
88 mReadCount
.store(r
+count
, std::memory_order_release
);
93 * Return the number of elements available for writing. This is the total
94 * number of writable elements excluding what's readable (already written).
96 [[nodiscard
]] auto writeSpace() const noexcept
-> std::size_t
97 { return mWriteSize
- readSpace(); }
100 * The copying data writer. Copy at most `count' elements from `src'. Returns
101 * the actual number of elements copied.
103 [[nodiscard
]] auto write(const void *src
, std::size_t count
) noexcept
-> std::size_t;
106 * The non-copying data writer. Returns two ringbuffer data pointers that
107 * hold the current writeable data. If the writeable data is in one segment
108 * the second segment has zero length.
110 [[nodiscard
]] auto getWriteVector() noexcept
-> DataPair
;
111 /** Advance the write pointer `count' places. */
112 auto writeAdvance(std::size_t count
) noexcept
-> void
114 const std::size_t w
{mWriteCount
.load(std::memory_order_relaxed
)};
115 const std::size_t r
{mReadCount
.load(std::memory_order_acquire
)};
116 [[maybe_unused
]] const std::size_t writable
{mWriteSize
- (w
- r
)};
117 assert(writable
>= count
);
118 mWriteCount
.store(w
+count
, std::memory_order_release
);
121 [[nodiscard
]] auto getElemSize() const noexcept
-> std::size_t { return mElemSize
; }
124 * Create a new ringbuffer to hold at least `sz' elements of `elem_sz'
125 * bytes. The number of elements is rounded up to a power of two. If
126 * `limit_writes' is true, the writable space will be limited to `sz'
127 * elements regardless of the rounded size.
130 auto Create(std::size_t sz
, std::size_t elem_sz
, bool limit_writes
) -> std::unique_ptr
<RingBuffer
>;
132 DEF_FAM_NEWDEL(RingBuffer
, mBuffer
)
134 using RingBufferPtr
= std::unique_ptr
<RingBuffer
>;
136 #endif /* RINGBUFFER_H */