Don't apply distance attenuation when the slot's AuxSendAuto is off
[openal-soft.git] / common / ringbuffer.h
blob4493474cdb75a7f4f22935e94fbebf296f97261d
1 #ifndef RINGBUFFER_H
2 #define RINGBUFFER_H
4 #include <atomic>
5 #include <cassert>
6 #include <cstddef>
7 #include <memory>
8 #include <new>
9 #include <utility>
11 #include "almalloc.h"
12 #include "flexarray.h"
15 /* NOTE: This lockless ringbuffer implementation is copied from JACK, extended
16 * to include an element size. Consequently, parameters and return values for a
17 * size or count are in 'elements', not bytes. Additionally, it only supports
18 * single-consumer/single-provider operation.
21 struct RingBuffer {
22 private:
23 #if defined(__cpp_lib_hardware_interference_size) && !defined(_LIBCPP_VERSION)
24 static constexpr std::size_t sCacheAlignment{std::hardware_destructive_interference_size};
25 #else
26 /* Assume a 64-byte cache line, the most common/likely value. */
27 static constexpr std::size_t sCacheAlignment{64};
28 #endif
29 alignas(sCacheAlignment) std::atomic<std::size_t> mWriteCount{0u};
30 alignas(sCacheAlignment) std::atomic<std::size_t> mReadCount{0u};
32 alignas(sCacheAlignment) const std::size_t mWriteSize;
33 const std::size_t mSizeMask;
34 const std::size_t mElemSize;
36 al::FlexArray<std::byte, 16> mBuffer;
38 public:
39 struct Data {
40 std::byte *buf;
41 std::size_t len;
43 using DataPair = std::pair<Data,Data>;
45 RingBuffer(const std::size_t writesize, const std::size_t mask, const std::size_t elemsize,
46 const std::size_t numbytes)
47 : mWriteSize{writesize}, mSizeMask{mask}, mElemSize{elemsize}, mBuffer{numbytes}
48 { }
50 /** Reset the read and write pointers to zero. This is not thread safe. */
51 auto reset() noexcept -> void;
53 /**
54 * Return the number of elements available for reading. This is the number
55 * of elements in front of the read pointer and behind the write pointer.
57 [[nodiscard]] auto readSpace() const noexcept -> std::size_t
59 const std::size_t w{mWriteCount.load(std::memory_order_acquire)};
60 const std::size_t r{mReadCount.load(std::memory_order_acquire)};
61 /* mWriteCount is never more than mWriteSize greater than mReadCount. */
62 return w - r;
65 /**
66 * The copying data reader. Copy at most `count' elements into `dest'.
67 * Returns the actual number of elements copied.
69 [[nodiscard]] auto read(void *dest, std::size_t count) noexcept -> std::size_t;
70 /**
71 * The copying data reader w/o read pointer advance. Copy at most `count'
72 * elements into `dest'. Returns the actual number of elements copied.
74 [[nodiscard]] auto peek(void *dest, std::size_t count) const noexcept -> std::size_t;
76 /**
77 * The non-copying data reader. Returns two ringbuffer data pointers that
78 * hold the current readable data. If the readable data is in one segment
79 * the second segment has zero length.
81 [[nodiscard]] auto getReadVector() noexcept -> DataPair;
82 /** Advance the read pointer `count' places. */
83 auto readAdvance(std::size_t count) noexcept -> void
85 const std::size_t w{mWriteCount.load(std::memory_order_acquire)};
86 const std::size_t r{mReadCount.load(std::memory_order_relaxed)};
87 [[maybe_unused]] const std::size_t readable{w - r};
88 assert(readable >= count);
89 mReadCount.store(r+count, std::memory_order_release);
93 /**
94 * Return the number of elements available for writing. This is the total
95 * number of writable elements excluding what's readable (already written).
97 [[nodiscard]] auto writeSpace() const noexcept -> std::size_t
98 { return mWriteSize - readSpace(); }
101 * The copying data writer. Copy at most `count' elements from `src'. Returns
102 * the actual number of elements copied.
104 [[nodiscard]] auto write(const void *src, std::size_t count) noexcept -> std::size_t;
107 * The non-copying data writer. Returns two ringbuffer data pointers that
108 * hold the current writeable data. If the writeable data is in one segment
109 * the second segment has zero length.
111 [[nodiscard]] auto getWriteVector() noexcept -> DataPair;
112 /** Advance the write pointer `count' places. */
113 auto writeAdvance(std::size_t count) noexcept -> void
115 const std::size_t w{mWriteCount.load(std::memory_order_relaxed)};
116 const std::size_t r{mReadCount.load(std::memory_order_acquire)};
117 [[maybe_unused]] const std::size_t writable{mWriteSize - (w - r)};
118 assert(writable >= count);
119 mWriteCount.store(w+count, std::memory_order_release);
122 [[nodiscard]] auto getElemSize() const noexcept -> std::size_t { return mElemSize; }
125 * Create a new ringbuffer to hold at least `sz' elements of `elem_sz'
126 * bytes. The number of elements is rounded up to a power of two. If
127 * `limit_writes' is true, the writable space will be limited to `sz'
128 * elements regardless of the rounded size.
130 [[nodiscard]] static
131 auto Create(std::size_t sz, std::size_t elem_sz, bool limit_writes) -> std::unique_ptr<RingBuffer>;
133 DEF_FAM_NEWDEL(RingBuffer, mBuffer)
135 using RingBufferPtr = std::unique_ptr<RingBuffer>;
137 #endif /* RINGBUFFER_H */