13 /* NOTE: This lockless ringbuffer implementation is copied from JACK, extended
14 * to include an element size. Consequently, parameters and return values for a
15 * size or count is in 'elements', not bytes. Additionally, it only supports
16 * single-consumer/single-provider operation.
21 std::atomic
<size_t> mWritePtr
{0u};
22 std::atomic
<size_t> mReadPtr
{0u};
23 size_t mWriteSize
{0u};
27 al::FlexArray
<al::byte
, 16> mBuffer
;
34 using DataPair
= std::pair
<Data
,Data
>;
37 RingBuffer(const size_t count
) : mBuffer
{count
} { }
39 /** Reset the read and write pointers to zero. This is not thread safe. */
40 void reset() noexcept
;
43 * The non-copying data reader. Returns two ringbuffer data pointers that
44 * hold the current readable data. If the readable data is in one segment
45 * the second segment has zero length.
47 DataPair
getReadVector() const noexcept
;
49 * The non-copying data writer. Returns two ringbuffer data pointers that
50 * hold the current writeable data. If the writeable data is in one segment
51 * the second segment has zero length.
53 DataPair
getWriteVector() const noexcept
;
56 * Return the number of elements available for reading. This is the number
57 * of elements in front of the read pointer and behind the write pointer.
59 size_t readSpace() const noexcept
61 const size_t w
{mWritePtr
.load(std::memory_order_acquire
)};
62 const size_t r
{mReadPtr
.load(std::memory_order_acquire
)};
63 return (w
-r
) & mSizeMask
;
67 * The copying data reader. Copy at most `cnt' elements into `dest'.
68 * Returns the actual number of elements copied.
70 size_t read(void *dest
, size_t cnt
) noexcept
;
72 * The copying data reader w/o read pointer advance. Copy at most `cnt'
73 * elements into `dest'. Returns the actual number of elements copied.
75 size_t peek(void *dest
, size_t cnt
) const noexcept
;
76 /** Advance the read pointer `cnt' places. */
77 void readAdvance(size_t cnt
) noexcept
78 { mReadPtr
.fetch_add(cnt
, std::memory_order_acq_rel
); }
82 * Return the number of elements available for writing. This is the number
83 * of elements in front of the write pointer and behind the read pointer.
85 size_t writeSpace() const noexcept
87 const size_t w
{mWritePtr
.load(std::memory_order_acquire
)};
88 const size_t r
{mReadPtr
.load(std::memory_order_acquire
) + mWriteSize
- mSizeMask
};
89 return (r
-w
-1) & mSizeMask
;
93 * The copying data writer. Copy at most `cnt' elements from `src'. Returns
94 * the actual number of elements copied.
96 size_t write(const void *src
, size_t cnt
) noexcept
;
97 /** Advance the write pointer `cnt' places. */
98 void writeAdvance(size_t cnt
) noexcept
99 { mWritePtr
.fetch_add(cnt
, std::memory_order_acq_rel
); }
101 size_t getElemSize() const noexcept
{ return mElemSize
; }
104 * Create a new ringbuffer to hold at least `sz' elements of `elem_sz'
105 * bytes. The number of elements is rounded up to the next power of two
106 * (even if it is already a power of two, to ensure the requested amount
109 static std::unique_ptr
<RingBuffer
> Create(size_t sz
, size_t elem_sz
, int limit_writes
);
111 DEF_FAM_NEWDEL(RingBuffer
, mBuffer
)
113 using RingBufferPtr
= std::unique_ptr
<RingBuffer
>;
115 #endif /* RINGBUFFER_H */