Remove unused "using" declarations.
[pulseview.git] / pv / data / segment.cpp
blobceaf0a056ef84a2c8f473f1d664559b4e583ce1e
1 /*
2 * This file is part of the PulseView project.
4 * Copyright (C) 2017 Soeren Apel <soeren@apelpie.net>
5 * Copyright (C) 2012 Joel Holdsworth <joel@airwebreathe.org.uk>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #include "segment.hpp"
23 #include <cassert>
24 #include <cstdlib>
25 #include <cstring>
27 using std::lock_guard;
28 using std::min;
29 using std::recursive_mutex;
31 namespace pv {
32 namespace data {
34 const uint64_t Segment::MaxChunkSize = 10*1024*1024; /* 10MiB */
36 Segment::Segment(uint64_t samplerate, unsigned int unit_size) :
37 sample_count_(0),
38 start_time_(0),
39 samplerate_(samplerate),
40 unit_size_(unit_size),
41 iterator_count_(0),
42 mem_optimization_requested_(false)
44 lock_guard<recursive_mutex> lock(mutex_);
45 assert(unit_size_ > 0);
47 // Determine the number of samples we can fit in one chunk
48 // without exceeding MaxChunkSize
49 chunk_size_ = min(MaxChunkSize, (MaxChunkSize / unit_size_) * unit_size_);
51 // Create the initial chunk
52 current_chunk_ = new uint8_t[chunk_size_];
53 data_chunks_.push_back(current_chunk_);
54 used_samples_ = 0;
55 unused_samples_ = chunk_size_ / unit_size_;
58 Segment::~Segment()
60 lock_guard<recursive_mutex> lock(mutex_);
62 for (uint8_t* chunk : data_chunks_)
63 delete[] chunk;
66 uint64_t Segment::get_sample_count() const
68 lock_guard<recursive_mutex> lock(mutex_);
69 return sample_count_;
72 const pv::util::Timestamp& Segment::start_time() const
74 return start_time_;
77 double Segment::samplerate() const
79 return samplerate_;
82 void Segment::set_samplerate(double samplerate)
84 samplerate_ = samplerate;
87 unsigned int Segment::unit_size() const
89 return unit_size_;
92 void Segment::free_unused_memory()
94 lock_guard<recursive_mutex> lock(mutex_);
96 // Do not mess with the data chunks if we have iterators pointing at them
97 if (iterator_count_ > 0) {
98 mem_optimization_requested_ = true;
99 return;
102 // No more data will come in, so re-create the last chunk accordingly
103 uint8_t* resized_chunk = new uint8_t[used_samples_ * unit_size_];
104 memcpy(resized_chunk, current_chunk_, used_samples_ * unit_size_);
106 delete[] current_chunk_;
107 current_chunk_ = resized_chunk;
109 data_chunks_.pop_back();
110 data_chunks_.push_back(resized_chunk);
113 void Segment::append_single_sample(void *data)
115 lock_guard<recursive_mutex> lock(mutex_);
117 // There will always be space for at least one sample in
118 // the current chunk, so we do not need to test for space
120 memcpy(current_chunk_ + (used_samples_ * unit_size_),
121 data, unit_size_);
122 used_samples_++;
123 unused_samples_--;
125 if (unused_samples_ == 0) {
126 current_chunk_ = new uint8_t[chunk_size_];
127 data_chunks_.push_back(current_chunk_);
128 used_samples_ = 0;
129 unused_samples_ = chunk_size_ / unit_size_;
132 sample_count_++;
135 void Segment::append_samples(void* data, uint64_t samples)
137 lock_guard<recursive_mutex> lock(mutex_);
139 if (unused_samples_ >= samples) {
140 // All samples fit into the current chunk
141 memcpy(current_chunk_ + (used_samples_ * unit_size_),
142 data, (samples * unit_size_));
143 used_samples_ += samples;
144 unused_samples_ -= samples;
145 } else {
146 // Only a part of the samples fit, split data up between chunks
147 memcpy(current_chunk_ + (used_samples_ * unit_size_),
148 data, (unused_samples_ * unit_size_));
149 const uint64_t remaining_samples = samples - unused_samples_;
151 // If we're out of memory, this will throw std::bad_alloc
152 current_chunk_ = new uint8_t[chunk_size_];
153 data_chunks_.push_back(current_chunk_);
154 memcpy(current_chunk_, (uint8_t*)data + (unused_samples_ * unit_size_),
155 (remaining_samples * unit_size_));
157 used_samples_ = remaining_samples;
158 unused_samples_ = (chunk_size_ / unit_size_) - remaining_samples;
161 if (unused_samples_ == 0) {
162 // If we're out of memory, this will throw std::bad_alloc
163 current_chunk_ = new uint8_t[chunk_size_];
164 data_chunks_.push_back(current_chunk_);
165 used_samples_ = 0;
166 unused_samples_ = chunk_size_ / unit_size_;
169 sample_count_ += samples;
172 uint8_t* Segment::get_raw_samples(uint64_t start, uint64_t count) const
174 assert(start < sample_count_);
175 assert(start + count <= sample_count_);
176 assert(count > 0);
178 lock_guard<recursive_mutex> lock(mutex_);
180 uint8_t* dest = new uint8_t[count * unit_size_];
181 uint8_t* dest_ptr = dest;
183 uint64_t chunk_num = (start * unit_size_) / chunk_size_;
184 uint64_t chunk_offs = (start * unit_size_) % chunk_size_;
186 while (count > 0) {
187 const uint8_t* chunk = data_chunks_[chunk_num];
189 uint64_t copy_size = min(count * unit_size_,
190 chunk_size_ - chunk_offs);
192 memcpy(dest_ptr, chunk + chunk_offs, copy_size);
194 dest_ptr += copy_size;
195 count -= (copy_size / unit_size_);
197 chunk_num++;
198 chunk_offs = 0;
201 return dest;
204 SegmentRawDataIterator* Segment::begin_raw_sample_iteration(uint64_t start)
206 SegmentRawDataIterator* it = new SegmentRawDataIterator;
208 assert(start < sample_count_);
210 iterator_count_++;
212 it->sample_index = start;
213 it->chunk_num = (start * unit_size_) / chunk_size_;
214 it->chunk_offs = (start * unit_size_) % chunk_size_;
215 it->chunk = data_chunks_[it->chunk_num];
216 it->value = it->chunk + it->chunk_offs;
218 return it;
221 void Segment::continue_raw_sample_iteration(SegmentRawDataIterator* it, uint64_t increase)
223 lock_guard<recursive_mutex> lock(mutex_);
225 if (it->sample_index > sample_count_)
227 // Fail gracefully if we are asked to deliver data we don't have
228 return;
229 } else {
230 it->sample_index += increase;
231 it->chunk_offs += (increase * unit_size_);
234 if (it->chunk_offs > (chunk_size_ - 1)) {
235 it->chunk_num++;
236 it->chunk_offs -= chunk_size_;
237 it->chunk = data_chunks_[it->chunk_num];
240 it->value = it->chunk + it->chunk_offs;
243 void Segment::end_raw_sample_iteration(SegmentRawDataIterator* it)
245 delete it;
247 iterator_count_--;
249 if ((iterator_count_ == 0) && mem_optimization_requested_) {
250 mem_optimization_requested_ = false;
251 free_unused_memory();
256 } // namespace data
257 } // namespace pv