Revert "Fix broken channel icon in chrome://help on CrOS" and try again
[chromium-blink-merge.git] / base / trace_event / trace_buffer.cc
bloba2e4f141ef431941ea0147b3a5e015197f8496f0
1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/trace_event/trace_buffer.h"
7 #include "base/memory/scoped_vector.h"
8 #include "base/trace_event/trace_event_impl.h"
10 namespace base {
11 namespace trace_event {
13 namespace {
15 class TraceBufferRingBuffer : public TraceBuffer {
16 public:
17 TraceBufferRingBuffer(size_t max_chunks)
18 : max_chunks_(max_chunks),
19 recyclable_chunks_queue_(new size_t[queue_capacity()]),
20 queue_head_(0),
21 queue_tail_(max_chunks),
22 current_iteration_index_(0),
23 current_chunk_seq_(1) {
24 chunks_.reserve(max_chunks);
25 for (size_t i = 0; i < max_chunks; ++i)
26 recyclable_chunks_queue_[i] = i;
29 scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
30 // Because the number of threads is much less than the number of chunks,
31 // the queue should never be empty.
32 DCHECK(!QueueIsEmpty());
34 *index = recyclable_chunks_queue_[queue_head_];
35 queue_head_ = NextQueueIndex(queue_head_);
36 current_iteration_index_ = queue_head_;
38 if (*index >= chunks_.size())
39 chunks_.resize(*index + 1);
41 TraceBufferChunk* chunk = chunks_[*index];
42 chunks_[*index] = NULL; // Put NULL in the slot of a in-flight chunk.
43 if (chunk)
44 chunk->Reset(current_chunk_seq_++);
45 else
46 chunk = new TraceBufferChunk(current_chunk_seq_++);
48 return scoped_ptr<TraceBufferChunk>(chunk);
51 void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk> chunk) override {
52 // When this method is called, the queue should not be full because it
53 // can contain all chunks including the one to be returned.
54 DCHECK(!QueueIsFull());
55 DCHECK(chunk);
56 DCHECK_LT(index, chunks_.size());
57 DCHECK(!chunks_[index]);
58 chunks_[index] = chunk.release();
59 recyclable_chunks_queue_[queue_tail_] = index;
60 queue_tail_ = NextQueueIndex(queue_tail_);
63 bool IsFull() const override { return false; }
65 size_t Size() const override {
66 // This is approximate because not all of the chunks are full.
67 return chunks_.size() * TraceBufferChunk::kTraceBufferChunkSize;
70 size_t Capacity() const override {
71 return max_chunks_ * TraceBufferChunk::kTraceBufferChunkSize;
74 TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
75 if (handle.chunk_index >= chunks_.size())
76 return NULL;
77 TraceBufferChunk* chunk = chunks_[handle.chunk_index];
78 if (!chunk || chunk->seq() != handle.chunk_seq)
79 return NULL;
80 return chunk->GetEventAt(handle.event_index);
83 const TraceBufferChunk* NextChunk() override {
84 if (chunks_.empty())
85 return NULL;
87 while (current_iteration_index_ != queue_tail_) {
88 size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_];
89 current_iteration_index_ = NextQueueIndex(current_iteration_index_);
90 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
91 continue;
92 DCHECK(chunks_[chunk_index]);
93 return chunks_[chunk_index];
95 return NULL;
98 scoped_ptr<TraceBuffer> CloneForIteration() const override {
99 scoped_ptr<ClonedTraceBuffer> cloned_buffer(new ClonedTraceBuffer());
100 for (size_t queue_index = queue_head_; queue_index != queue_tail_;
101 queue_index = NextQueueIndex(queue_index)) {
102 size_t chunk_index = recyclable_chunks_queue_[queue_index];
103 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
104 continue;
105 TraceBufferChunk* chunk = chunks_[chunk_index];
106 cloned_buffer->chunks_.push_back(chunk ? chunk->Clone().release() : NULL);
108 return cloned_buffer.Pass();
111 void EstimateTraceMemoryOverhead(
112 TraceEventMemoryOverhead* overhead) override {
113 overhead->Add("TraceBufferRingBuffer", sizeof(*this));
114 for (size_t queue_index = queue_head_; queue_index != queue_tail_;
115 queue_index = NextQueueIndex(queue_index)) {
116 size_t chunk_index = recyclable_chunks_queue_[queue_index];
117 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
118 continue;
119 chunks_[chunk_index]->EstimateTraceMemoryOverhead(overhead);
123 private:
124 class ClonedTraceBuffer : public TraceBuffer {
125 public:
126 ClonedTraceBuffer() : current_iteration_index_(0) {}
128 // The only implemented method.
129 const TraceBufferChunk* NextChunk() override {
130 return current_iteration_index_ < chunks_.size()
131 ? chunks_[current_iteration_index_++]
132 : NULL;
135 scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
136 NOTIMPLEMENTED();
137 return scoped_ptr<TraceBufferChunk>();
139 void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk>) override {
140 NOTIMPLEMENTED();
142 bool IsFull() const override { return false; }
143 size_t Size() const override { return 0; }
144 size_t Capacity() const override { return 0; }
145 TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
146 return NULL;
148 scoped_ptr<TraceBuffer> CloneForIteration() const override {
149 NOTIMPLEMENTED();
150 return scoped_ptr<TraceBuffer>();
152 void EstimateTraceMemoryOverhead(
153 TraceEventMemoryOverhead* overhead) override {
154 NOTIMPLEMENTED();
157 size_t current_iteration_index_;
158 ScopedVector<TraceBufferChunk> chunks_;
161 bool QueueIsEmpty() const { return queue_head_ == queue_tail_; }
163 size_t QueueSize() const {
164 return queue_tail_ > queue_head_
165 ? queue_tail_ - queue_head_
166 : queue_tail_ + queue_capacity() - queue_head_;
169 bool QueueIsFull() const { return QueueSize() == queue_capacity() - 1; }
171 size_t queue_capacity() const {
172 // One extra space to help distinguish full state and empty state.
173 return max_chunks_ + 1;
176 size_t NextQueueIndex(size_t index) const {
177 index++;
178 if (index >= queue_capacity())
179 index = 0;
180 return index;
183 size_t max_chunks_;
184 ScopedVector<TraceBufferChunk> chunks_;
186 scoped_ptr<size_t[]> recyclable_chunks_queue_;
187 size_t queue_head_;
188 size_t queue_tail_;
190 size_t current_iteration_index_;
191 uint32 current_chunk_seq_;
193 DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer);
196 class TraceBufferVector : public TraceBuffer {
197 public:
198 TraceBufferVector(size_t max_chunks)
199 : in_flight_chunk_count_(0),
200 current_iteration_index_(0),
201 max_chunks_(max_chunks) {
202 chunks_.reserve(max_chunks_);
205 scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
206 // This function may be called when adding normal events or indirectly from
207 // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
208 // have to add the metadata events and flush thread-local buffers even if
209 // the buffer is full.
210 *index = chunks_.size();
211 chunks_.push_back(NULL); // Put NULL in the slot of a in-flight chunk.
212 ++in_flight_chunk_count_;
213 // + 1 because zero chunk_seq is not allowed.
214 return scoped_ptr<TraceBufferChunk>(
215 new TraceBufferChunk(static_cast<uint32>(*index) + 1));
218 void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk> chunk) override {
219 DCHECK_GT(in_flight_chunk_count_, 0u);
220 DCHECK_LT(index, chunks_.size());
221 DCHECK(!chunks_[index]);
222 --in_flight_chunk_count_;
223 chunks_[index] = chunk.release();
226 bool IsFull() const override { return chunks_.size() >= max_chunks_; }
228 size_t Size() const override {
229 // This is approximate because not all of the chunks are full.
230 return chunks_.size() * TraceBufferChunk::kTraceBufferChunkSize;
233 size_t Capacity() const override {
234 return max_chunks_ * TraceBufferChunk::kTraceBufferChunkSize;
237 TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
238 if (handle.chunk_index >= chunks_.size())
239 return NULL;
240 TraceBufferChunk* chunk = chunks_[handle.chunk_index];
241 if (!chunk || chunk->seq() != handle.chunk_seq)
242 return NULL;
243 return chunk->GetEventAt(handle.event_index);
246 const TraceBufferChunk* NextChunk() override {
247 while (current_iteration_index_ < chunks_.size()) {
248 // Skip in-flight chunks.
249 const TraceBufferChunk* chunk = chunks_[current_iteration_index_++];
250 if (chunk)
251 return chunk;
253 return NULL;
256 scoped_ptr<TraceBuffer> CloneForIteration() const override {
257 NOTIMPLEMENTED();
258 return scoped_ptr<TraceBuffer>();
261 void EstimateTraceMemoryOverhead(
262 TraceEventMemoryOverhead* overhead) override {
263 const size_t chunks_ptr_vector_allocated_size =
264 sizeof(*this) + max_chunks_ * sizeof(decltype(chunks_)::value_type);
265 const size_t chunks_ptr_vector_resident_size =
266 sizeof(*this) + chunks_.size() * sizeof(decltype(chunks_)::value_type);
267 overhead->Add("TraceBufferVector", chunks_ptr_vector_allocated_size,
268 chunks_ptr_vector_resident_size);
269 for (size_t i = 0; i < chunks_.size(); ++i) {
270 TraceBufferChunk* chunk = chunks_[i];
271 // Skip the in-flight (nullptr) chunks. They will be accounted by the
272 // per-thread-local dumpers, see ThreadLocalEventBuffer::OnMemoryDump.
273 if (chunk)
274 chunk->EstimateTraceMemoryOverhead(overhead);
278 private:
279 size_t in_flight_chunk_count_;
280 size_t current_iteration_index_;
281 size_t max_chunks_;
282 ScopedVector<TraceBufferChunk> chunks_;
284 DISALLOW_COPY_AND_ASSIGN(TraceBufferVector);
287 } // namespace
289 TraceBufferChunk::TraceBufferChunk(uint32 seq) : next_free_(0), seq_(seq) {}
291 TraceBufferChunk::~TraceBufferChunk() {}
293 void TraceBufferChunk::Reset(uint32 new_seq) {
294 for (size_t i = 0; i < next_free_; ++i)
295 chunk_[i].Reset();
296 next_free_ = 0;
297 seq_ = new_seq;
298 cached_overhead_estimate_.reset();
301 TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) {
302 DCHECK(!IsFull());
303 *event_index = next_free_++;
304 return &chunk_[*event_index];
307 scoped_ptr<TraceBufferChunk> TraceBufferChunk::Clone() const {
308 scoped_ptr<TraceBufferChunk> cloned_chunk(new TraceBufferChunk(seq_));
309 cloned_chunk->next_free_ = next_free_;
310 for (size_t i = 0; i < next_free_; ++i)
311 cloned_chunk->chunk_[i].CopyFrom(chunk_[i]);
312 return cloned_chunk.Pass();
315 void TraceBufferChunk::EstimateTraceMemoryOverhead(
316 TraceEventMemoryOverhead* overhead) {
317 if (!cached_overhead_estimate_) {
318 cached_overhead_estimate_.reset(new TraceEventMemoryOverhead);
320 // When estimating the size of TraceBufferChunk, exclude the array of trace
321 // events, as they are computed individually below.
322 cached_overhead_estimate_->Add("TraceBufferChunk",
323 sizeof(*this) - sizeof(chunk_));
326 const size_t num_cached_estimated_events =
327 cached_overhead_estimate_->GetCount("TraceEvent");
328 DCHECK_LE(num_cached_estimated_events, size());
330 if (IsFull() && num_cached_estimated_events == size()) {
331 overhead->Update(*cached_overhead_estimate_);
332 return;
335 for (size_t i = num_cached_estimated_events; i < size(); ++i)
336 chunk_[i].EstimateTraceMemoryOverhead(cached_overhead_estimate_.get());
338 if (IsFull()) {
339 cached_overhead_estimate_->AddSelf();
340 } else {
341 // The unused TraceEvents in |chunks_| are not cached. They will keep
342 // changing as new TraceEvents are added to this chunk, so they are
343 // computed on the fly.
344 const size_t num_unused_trace_events = capacity() - size();
345 overhead->Add("TraceEvent (unused)",
346 num_unused_trace_events * sizeof(TraceEvent));
349 overhead->Update(*cached_overhead_estimate_);
352 TraceResultBuffer::OutputCallback
353 TraceResultBuffer::SimpleOutput::GetCallback() {
354 return Bind(&SimpleOutput::Append, Unretained(this));
357 void TraceResultBuffer::SimpleOutput::Append(
358 const std::string& json_trace_output) {
359 json_output += json_trace_output;
362 TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {}
364 TraceResultBuffer::~TraceResultBuffer() {}
366 void TraceResultBuffer::SetOutputCallback(
367 const OutputCallback& json_chunk_callback) {
368 output_callback_ = json_chunk_callback;
371 void TraceResultBuffer::Start() {
372 append_comma_ = false;
373 output_callback_.Run("[");
376 void TraceResultBuffer::AddFragment(const std::string& trace_fragment) {
377 if (append_comma_)
378 output_callback_.Run(",");
379 append_comma_ = true;
380 output_callback_.Run(trace_fragment);
383 void TraceResultBuffer::Finish() {
384 output_callback_.Run("]");
387 TraceBuffer* TraceBuffer::CreateTraceBufferRingBuffer(size_t max_chunks) {
388 return new TraceBufferRingBuffer(max_chunks);
391 TraceBuffer* TraceBuffer::CreateTraceBufferVectorOfSize(size_t max_chunks) {
392 return new TraceBufferVector(max_chunks);
395 } // namespace trace_event
396 } // namespace base