unary op ugens: provide simdfied distort ugen
[supercollider.git] / common / scope_buffer.hpp
blobce693fcd9c98de541b2b3ea2b6736527cfc7bf6e
1 // Stethoscope shared memory buffer implementation
2 // This file is part of SuperCollider
3 //
4 // Copyright (C) 2011 Jakob Leben
5 //
6 // This program is free software; you can redistribute it and/or modify
7 // it under the terms of the GNU General Public License as published by
8 // the Free Software Foundation; either version 2 of the License, or
9 // (at your option) any later version.
11 // This program is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // You should have received a copy of the GNU General Public License
17 // along with this program; see the file COPYING. If not, write to
18 // the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 // Boston, MA 02111-1307, USA.
21 #ifndef SC_SCOPE_BUFFER_HPP
22 #define SC_SCOPE_BUFFER_HPP
24 #include <boost/interprocess/offset_ptr.hpp>
25 #include <boost/atomic.hpp>
27 extern "C" {
28 #include "tlsf.h"
31 namespace detail_server_shm {
33 using boost::interprocess::offset_ptr;
34 using boost::atomic;
36 class scope_buffer_writer;
37 class scope_buffer_reader;
39 class scope_buffer_pool
41 public:
43 void init (void * pool, size_t size_of_pool)
45 pool_ = (char*)pool;
46 memset(pool_, 0, size_of_pool);
47 init_memory_pool(size_of_pool, pool_);
50 void * allocate (size_t bytes)
52 return malloc_ex(bytes, pool_);
55 void deallocate (void * ptr)
57 free_ex(ptr, pool_);
60 private:
61 friend class server_shared_memory;
62 char * pool_;
65 class scope_buffer
67 friend class scope_buffer_writer;
68 friend class scope_buffer_reader;
70 typedef offset_ptr<float> sh_float_ptr;
72 enum status
74 free = 0,
75 initialized
78 atomic<int> _status;
80 unsigned int _size;
81 unsigned int _channels;
82 sh_float_ptr _data;
85 Reader/writer synchronization mechanism:
87 _stage, _in and _out are indexes into _state - an array of 3 equal data regions.
89 _out denotes the region where the writer writes.
90 _in denotes the region where the reader reads.
91 _stage denotes the region where data is exchanged between the writer and the reader.
93 After the writer is done writing, it sets the changed flag of the _in region,
94 and swaps _in with _stage.
96 The reader polls the changed flag of the _stage region. If it is set, it swaps _out
97 with _stage, reads the new _out region, and unsets its changed flag.
100 atomic<int> _stage;
101 int _in;
102 int _out;
104 struct data_desc {
105 data_desc(): data(0), frames(0), changed(false) {}
106 sh_float_ptr data;
107 unsigned int frames;
108 atomic<bool> changed;
109 } _state [3];
111 public:
113 scope_buffer():
114 _status(free),
115 _stage(0),
116 _in(1),
117 _out(2)
120 private:
122 // writer interface
124 bool allocate( scope_buffer_pool & pool, unsigned int channels, unsigned int size )
126 bool available = _status.load( boost::memory_order_relaxed ) == free;
127 if( !available ) return false;
129 _size = size;
130 _channels = channels;
132 unsigned int asset_size = channels * size;
133 _data = (float*)pool.allocate( asset_size * 3 * sizeof(float) );
134 if (_data == NULL)
135 return false;
137 _state[0].data = _data;
138 _state[1].data = _data + asset_size;
139 _state[2].data = _data + asset_size + asset_size;
141 _status.store( initialized, boost::memory_order_release );
143 return true;
146 void release( scope_buffer_pool & pool )
148 bool allocated = _status.load( boost::memory_order_relaxed ) != free;
149 if( !allocated ) return;
151 pool.deallocate( _data.get() );
153 _status.store( free, boost::memory_order_release );
156 float * write_address() { return _state[_in].data.get(); }
158 void push( unsigned int frames )
160 _state[_in].frames = frames;
161 _state[_in].changed.store( true, boost::memory_order_relaxed );
162 _in = _stage.exchange( _in, boost::memory_order_release );
165 // reader interface
167 bool valid()
169 return _status.load( boost::memory_order_acquire ) == initialized;
172 float * read_address() { return _state[_out].data.get(); }
174 unsigned int pull()
176 int stage = _stage.load( boost::memory_order_relaxed );
177 bool changed = _state[stage].changed.load( boost::memory_order_relaxed );
179 if( changed )
181 _state[_out].changed.store( false, boost::memory_order_relaxed );
182 _out = _stage.exchange( _out, boost::memory_order_acquire );
185 return _state[_out].frames;
189 class scope_buffer_writer
191 public:
193 scope_buffer *buffer;
195 scope_buffer_writer( scope_buffer *buffer = 0 ):
196 buffer(buffer)
199 scope_buffer_writer( scope_buffer *buf, scope_buffer_pool & pool, unsigned int channels, unsigned int size ):
200 buffer(buf)
202 if( !buffer->allocate( pool, channels, size ) )
203 buffer = 0;
206 bool valid()
208 return buffer != 0;
211 float *data()
213 return buffer->write_address();
216 unsigned int max_size()
218 return buffer->_size;
221 void push( unsigned int frames )
223 buffer->push( frames );
226 void release( scope_buffer_pool & pool )
228 buffer->release( pool );
232 // FIXME: how do we ensure that scope_buffer data members used in the reader
233 // are consistent among themselves at all times???
235 class scope_buffer_reader
237 scope_buffer *buffer;
239 public:
240 scope_buffer_reader( scope_buffer *buffer_ = 0 ):
241 buffer(buffer_)
244 bool valid()
246 // places an acquire memory ordering fence
247 return (buffer && buffer->valid());
250 bool pull( unsigned int &frames )
252 unsigned int new_frames = buffer->pull();
254 if( new_frames )
255 frames = new_frames;
257 return new_frames != 0;
260 float *data()
262 return buffer->read_address();
265 unsigned int max_frames()
267 return buffer->_size;
270 unsigned int channels()
272 return buffer->_channels;
276 } /* namespace detail_server_shm */
278 #endif