12 #include "flexarray.h"
15 /* NOTE: This lockless ringbuffer implementation is copied from JACK, extended
16 * to include an element size. Consequently, parameters and return values for a
17 * size or count are in 'elements', not bytes. Additionally, it only supports
18 * single-consumer/single-provider operation.
23 #if defined(__cpp_lib_hardware_interference_size) && !defined(_LIBCPP_VERSION)
24 static constexpr std::size_t sCacheAlignment
{std::hardware_destructive_interference_size
};
26 /* Assume a 64-byte cache line, the most common/likely value. */
27 static constexpr std::size_t sCacheAlignment
{64};
29 alignas(sCacheAlignment
) std::atomic
<std::size_t> mWriteCount
{0u};
30 alignas(sCacheAlignment
) std::atomic
<std::size_t> mReadCount
{0u};
32 alignas(sCacheAlignment
) const std::size_t mWriteSize
;
33 const std::size_t mSizeMask
;
34 const std::size_t mElemSize
;
36 al::FlexArray
<std::byte
, 16> mBuffer
;
43 using DataPair
= std::pair
<Data
,Data
>;
45 RingBuffer(const std::size_t writesize
, const std::size_t mask
, const std::size_t elemsize
,
46 const std::size_t numbytes
)
47 : mWriteSize
{writesize
}, mSizeMask
{mask
}, mElemSize
{elemsize
}, mBuffer
{numbytes
}
50 /** Reset the read and write pointers to zero. This is not thread safe. */
51 auto reset() noexcept
-> void;
54 * Return the number of elements available for reading. This is the number
55 * of elements in front of the read pointer and behind the write pointer.
57 [[nodiscard
]] auto readSpace() const noexcept
-> std::size_t
59 const std::size_t w
{mWriteCount
.load(std::memory_order_acquire
)};
60 const std::size_t r
{mReadCount
.load(std::memory_order_acquire
)};
61 /* mWriteCount is never more than mWriteSize greater than mReadCount. */
66 * The copying data reader. Copy at most `count' elements into `dest'.
67 * Returns the actual number of elements copied.
69 [[nodiscard
]] auto read(void *dest
, std::size_t count
) noexcept
-> std::size_t;
71 * The copying data reader w/o read pointer advance. Copy at most `count'
72 * elements into `dest'. Returns the actual number of elements copied.
74 [[nodiscard
]] auto peek(void *dest
, std::size_t count
) const noexcept
-> std::size_t;
77 * The non-copying data reader. Returns two ringbuffer data pointers that
78 * hold the current readable data. If the readable data is in one segment
79 * the second segment has zero length.
81 [[nodiscard
]] auto getReadVector() noexcept
-> DataPair
;
82 /** Advance the read pointer `count' places. */
83 auto readAdvance(std::size_t count
) noexcept
-> void
85 const std::size_t w
{mWriteCount
.load(std::memory_order_acquire
)};
86 const std::size_t r
{mReadCount
.load(std::memory_order_relaxed
)};
87 [[maybe_unused
]] const std::size_t readable
{w
- r
};
88 assert(readable
>= count
);
89 mReadCount
.store(r
+count
, std::memory_order_release
);
94 * Return the number of elements available for writing. This is the total
95 * number of writable elements excluding what's readable (already written).
97 [[nodiscard
]] auto writeSpace() const noexcept
-> std::size_t
98 { return mWriteSize
- readSpace(); }
101 * The copying data writer. Copy at most `count' elements from `src'. Returns
102 * the actual number of elements copied.
104 [[nodiscard
]] auto write(const void *src
, std::size_t count
) noexcept
-> std::size_t;
107 * The non-copying data writer. Returns two ringbuffer data pointers that
108 * hold the current writeable data. If the writeable data is in one segment
109 * the second segment has zero length.
111 [[nodiscard
]] auto getWriteVector() noexcept
-> DataPair
;
112 /** Advance the write pointer `count' places. */
113 auto writeAdvance(std::size_t count
) noexcept
-> void
115 const std::size_t w
{mWriteCount
.load(std::memory_order_relaxed
)};
116 const std::size_t r
{mReadCount
.load(std::memory_order_acquire
)};
117 [[maybe_unused
]] const std::size_t writable
{mWriteSize
- (w
- r
)};
118 assert(writable
>= count
);
119 mWriteCount
.store(w
+count
, std::memory_order_release
);
122 [[nodiscard
]] auto getElemSize() const noexcept
-> std::size_t { return mElemSize
; }
125 * Create a new ringbuffer to hold at least `sz' elements of `elem_sz'
126 * bytes. The number of elements is rounded up to a power of two. If
127 * `limit_writes' is true, the writable space will be limited to `sz'
128 * elements regardless of the rounded size.
131 auto Create(std::size_t sz
, std::size_t elem_sz
, bool limit_writes
) -> std::unique_ptr
<RingBuffer
>;
133 DEF_FAM_NEWDEL(RingBuffer
, mBuffer
)
135 using RingBufferPtr
= std::unique_ptr
<RingBuffer
>;
137 #endif /* RINGBUFFER_H */