1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/trace_event/trace_buffer.h"
7 #include "base/trace_event/trace_event_impl.h"
10 namespace trace_event
{
14 class TraceBufferRingBuffer
: public TraceBuffer
{
16 TraceBufferRingBuffer(size_t max_chunks
)
17 : max_chunks_(max_chunks
),
18 recyclable_chunks_queue_(new size_t[queue_capacity()]),
20 queue_tail_(max_chunks
),
21 current_iteration_index_(0),
22 current_chunk_seq_(1) {
23 chunks_
.reserve(max_chunks
);
24 for (size_t i
= 0; i
< max_chunks
; ++i
)
25 recyclable_chunks_queue_
[i
] = i
;
28 scoped_ptr
<TraceBufferChunk
> GetChunk(size_t* index
) override
{
29 // Because the number of threads is much less than the number of chunks,
30 // the queue should never be empty.
31 DCHECK(!QueueIsEmpty());
33 *index
= recyclable_chunks_queue_
[queue_head_
];
34 queue_head_
= NextQueueIndex(queue_head_
);
35 current_iteration_index_
= queue_head_
;
37 if (*index
>= chunks_
.size())
38 chunks_
.resize(*index
+ 1);
40 TraceBufferChunk
* chunk
= chunks_
[*index
];
41 chunks_
[*index
] = NULL
; // Put NULL in the slot of a in-flight chunk.
43 chunk
->Reset(current_chunk_seq_
++);
45 chunk
= new TraceBufferChunk(current_chunk_seq_
++);
47 return scoped_ptr
<TraceBufferChunk
>(chunk
);
50 void ReturnChunk(size_t index
, scoped_ptr
<TraceBufferChunk
> chunk
) override
{
51 // When this method is called, the queue should not be full because it
52 // can contain all chunks including the one to be returned.
53 DCHECK(!QueueIsFull());
55 DCHECK_LT(index
, chunks_
.size());
56 DCHECK(!chunks_
[index
]);
57 chunks_
[index
] = chunk
.release();
58 recyclable_chunks_queue_
[queue_tail_
] = index
;
59 queue_tail_
= NextQueueIndex(queue_tail_
);
62 bool IsFull() const override
{ return false; }
64 size_t Size() const override
{
65 // This is approximate because not all of the chunks are full.
66 return chunks_
.size() * TraceBufferChunk::kTraceBufferChunkSize
;
69 size_t Capacity() const override
{
70 return max_chunks_
* TraceBufferChunk::kTraceBufferChunkSize
;
73 TraceEvent
* GetEventByHandle(TraceEventHandle handle
) override
{
74 if (handle
.chunk_index
>= chunks_
.size())
76 TraceBufferChunk
* chunk
= chunks_
[handle
.chunk_index
];
77 if (!chunk
|| chunk
->seq() != handle
.chunk_seq
)
79 return chunk
->GetEventAt(handle
.event_index
);
82 const TraceBufferChunk
* NextChunk() override
{
86 while (current_iteration_index_
!= queue_tail_
) {
87 size_t chunk_index
= recyclable_chunks_queue_
[current_iteration_index_
];
88 current_iteration_index_
= NextQueueIndex(current_iteration_index_
);
89 if (chunk_index
>= chunks_
.size()) // Skip uninitialized chunks.
91 DCHECK(chunks_
[chunk_index
]);
92 return chunks_
[chunk_index
];
97 scoped_ptr
<TraceBuffer
> CloneForIteration() const override
{
98 scoped_ptr
<ClonedTraceBuffer
> cloned_buffer(new ClonedTraceBuffer());
99 for (size_t queue_index
= queue_head_
; queue_index
!= queue_tail_
;
100 queue_index
= NextQueueIndex(queue_index
)) {
101 size_t chunk_index
= recyclable_chunks_queue_
[queue_index
];
102 if (chunk_index
>= chunks_
.size()) // Skip uninitialized chunks.
104 TraceBufferChunk
* chunk
= chunks_
[chunk_index
];
105 cloned_buffer
->chunks_
.push_back(chunk
? chunk
->Clone().release() : NULL
);
107 return cloned_buffer
.Pass();
110 void EstimateTraceMemoryOverhead(
111 TraceEventMemoryOverhead
* overhead
) override
{
112 overhead
->Add("TraceBufferRingBuffer", sizeof(*this));
113 for (size_t queue_index
= queue_head_
; queue_index
!= queue_tail_
;
114 queue_index
= NextQueueIndex(queue_index
)) {
115 size_t chunk_index
= recyclable_chunks_queue_
[queue_index
];
116 if (chunk_index
>= chunks_
.size()) // Skip uninitialized chunks.
118 chunks_
[chunk_index
]->EstimateTraceMemoryOverhead(overhead
);
123 class ClonedTraceBuffer
: public TraceBuffer
{
125 ClonedTraceBuffer() : current_iteration_index_(0) {}
127 // The only implemented method.
128 const TraceBufferChunk
* NextChunk() override
{
129 return current_iteration_index_
< chunks_
.size()
130 ? chunks_
[current_iteration_index_
++]
134 scoped_ptr
<TraceBufferChunk
> GetChunk(size_t* index
) override
{
136 return scoped_ptr
<TraceBufferChunk
>();
138 void ReturnChunk(size_t index
, scoped_ptr
<TraceBufferChunk
>) override
{
141 bool IsFull() const override
{ return false; }
142 size_t Size() const override
{ return 0; }
143 size_t Capacity() const override
{ return 0; }
144 TraceEvent
* GetEventByHandle(TraceEventHandle handle
) override
{
147 scoped_ptr
<TraceBuffer
> CloneForIteration() const override
{
149 return scoped_ptr
<TraceBuffer
>();
151 void EstimateTraceMemoryOverhead(
152 TraceEventMemoryOverhead
* overhead
) override
{
156 size_t current_iteration_index_
;
157 ScopedVector
<TraceBufferChunk
> chunks_
;
160 bool QueueIsEmpty() const { return queue_head_
== queue_tail_
; }
162 size_t QueueSize() const {
163 return queue_tail_
> queue_head_
164 ? queue_tail_
- queue_head_
165 : queue_tail_
+ queue_capacity() - queue_head_
;
168 bool QueueIsFull() const { return QueueSize() == queue_capacity() - 1; }
170 size_t queue_capacity() const {
171 // One extra space to help distinguish full state and empty state.
172 return max_chunks_
+ 1;
175 size_t NextQueueIndex(size_t index
) const {
177 if (index
>= queue_capacity())
183 ScopedVector
<TraceBufferChunk
> chunks_
;
185 scoped_ptr
<size_t[]> recyclable_chunks_queue_
;
189 size_t current_iteration_index_
;
190 uint32 current_chunk_seq_
;
192 DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer
);
195 class TraceBufferVector
: public TraceBuffer
{
197 TraceBufferVector(size_t max_chunks
)
198 : in_flight_chunk_count_(0),
199 current_iteration_index_(0),
200 max_chunks_(max_chunks
) {
201 chunks_
.reserve(max_chunks_
);
204 scoped_ptr
<TraceBufferChunk
> GetChunk(size_t* index
) override
{
205 // This function may be called when adding normal events or indirectly from
206 // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
207 // have to add the metadata events and flush thread-local buffers even if
208 // the buffer is full.
209 *index
= chunks_
.size();
210 chunks_
.push_back(NULL
); // Put NULL in the slot of a in-flight chunk.
211 ++in_flight_chunk_count_
;
212 // + 1 because zero chunk_seq is not allowed.
213 return scoped_ptr
<TraceBufferChunk
>(
214 new TraceBufferChunk(static_cast<uint32
>(*index
) + 1));
217 void ReturnChunk(size_t index
, scoped_ptr
<TraceBufferChunk
> chunk
) override
{
218 DCHECK_GT(in_flight_chunk_count_
, 0u);
219 DCHECK_LT(index
, chunks_
.size());
220 DCHECK(!chunks_
[index
]);
221 --in_flight_chunk_count_
;
222 chunks_
[index
] = chunk
.release();
225 bool IsFull() const override
{ return chunks_
.size() >= max_chunks_
; }
227 size_t Size() const override
{
228 // This is approximate because not all of the chunks are full.
229 return chunks_
.size() * TraceBufferChunk::kTraceBufferChunkSize
;
232 size_t Capacity() const override
{
233 return max_chunks_
* TraceBufferChunk::kTraceBufferChunkSize
;
236 TraceEvent
* GetEventByHandle(TraceEventHandle handle
) override
{
237 if (handle
.chunk_index
>= chunks_
.size())
239 TraceBufferChunk
* chunk
= chunks_
[handle
.chunk_index
];
240 if (!chunk
|| chunk
->seq() != handle
.chunk_seq
)
242 return chunk
->GetEventAt(handle
.event_index
);
245 const TraceBufferChunk
* NextChunk() override
{
246 while (current_iteration_index_
< chunks_
.size()) {
247 // Skip in-flight chunks.
248 const TraceBufferChunk
* chunk
= chunks_
[current_iteration_index_
++];
255 scoped_ptr
<TraceBuffer
> CloneForIteration() const override
{
257 return scoped_ptr
<TraceBuffer
>();
260 void EstimateTraceMemoryOverhead(
261 TraceEventMemoryOverhead
* overhead
) override
{
262 const size_t chunks_ptr_vector_allocated_size
=
263 sizeof(*this) + max_chunks_
* sizeof(decltype(chunks_
)::value_type
);
264 const size_t chunks_ptr_vector_resident_size
=
265 sizeof(*this) + chunks_
.size() * sizeof(decltype(chunks_
)::value_type
);
266 overhead
->Add("TraceBufferVector", chunks_ptr_vector_allocated_size
,
267 chunks_ptr_vector_resident_size
);
268 for (size_t i
= 0; i
< chunks_
.size(); ++i
) {
269 TraceBufferChunk
* chunk
= chunks_
[i
];
270 // Skip the in-flight (nullptr) chunks. They will be accounted by the
271 // per-thread-local dumpers, see ThreadLocalEventBuffer::OnMemoryDump.
273 chunk
->EstimateTraceMemoryOverhead(overhead
);
278 size_t in_flight_chunk_count_
;
279 size_t current_iteration_index_
;
281 ScopedVector
<TraceBufferChunk
> chunks_
;
283 DISALLOW_COPY_AND_ASSIGN(TraceBufferVector
);
288 TraceBufferChunk::TraceBufferChunk(uint32 seq
) : next_free_(0), seq_(seq
) {}
290 TraceBufferChunk::~TraceBufferChunk() {}
292 void TraceBufferChunk::Reset(uint32 new_seq
) {
293 for (size_t i
= 0; i
< next_free_
; ++i
)
297 cached_overhead_estimate_
.reset();
300 TraceEvent
* TraceBufferChunk::AddTraceEvent(size_t* event_index
) {
302 *event_index
= next_free_
++;
303 return &chunk_
[*event_index
];
306 scoped_ptr
<TraceBufferChunk
> TraceBufferChunk::Clone() const {
307 scoped_ptr
<TraceBufferChunk
> cloned_chunk(new TraceBufferChunk(seq_
));
308 cloned_chunk
->next_free_
= next_free_
;
309 for (size_t i
= 0; i
< next_free_
; ++i
)
310 cloned_chunk
->chunk_
[i
].CopyFrom(chunk_
[i
]);
311 return cloned_chunk
.Pass();
314 void TraceBufferChunk::EstimateTraceMemoryOverhead(
315 TraceEventMemoryOverhead
* overhead
) {
316 if (!cached_overhead_estimate_
) {
317 cached_overhead_estimate_
.reset(new TraceEventMemoryOverhead
);
319 // When estimating the size of TraceBufferChunk, exclude the array of trace
320 // events, as they are computed individually below.
321 cached_overhead_estimate_
->Add("TraceBufferChunk",
322 sizeof(*this) - sizeof(chunk_
));
325 const size_t num_cached_estimated_events
=
326 cached_overhead_estimate_
->GetCount("TraceEvent");
327 DCHECK_LE(num_cached_estimated_events
, size());
329 if (IsFull() && num_cached_estimated_events
== size()) {
330 overhead
->Update(*cached_overhead_estimate_
);
334 for (size_t i
= num_cached_estimated_events
; i
< size(); ++i
)
335 chunk_
[i
].EstimateTraceMemoryOverhead(cached_overhead_estimate_
.get());
338 cached_overhead_estimate_
->AddSelf();
340 // The unused TraceEvents in |chunks_| are not cached. They will keep
341 // changing as new TraceEvents are added to this chunk, so they are
342 // computed on the fly.
343 const size_t num_unused_trace_events
= capacity() - size();
344 overhead
->Add("TraceEvent (unused)",
345 num_unused_trace_events
* sizeof(TraceEvent
));
348 overhead
->Update(*cached_overhead_estimate_
);
351 TraceResultBuffer::OutputCallback
352 TraceResultBuffer::SimpleOutput::GetCallback() {
353 return Bind(&SimpleOutput::Append
, Unretained(this));
356 void TraceResultBuffer::SimpleOutput::Append(
357 const std::string
& json_trace_output
) {
358 json_output
+= json_trace_output
;
361 TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {}
363 TraceResultBuffer::~TraceResultBuffer() {}
365 void TraceResultBuffer::SetOutputCallback(
366 const OutputCallback
& json_chunk_callback
) {
367 output_callback_
= json_chunk_callback
;
370 void TraceResultBuffer::Start() {
371 append_comma_
= false;
372 output_callback_
.Run("[");
375 void TraceResultBuffer::AddFragment(const std::string
& trace_fragment
) {
377 output_callback_
.Run(",");
378 append_comma_
= true;
379 output_callback_
.Run(trace_fragment
);
382 void TraceResultBuffer::Finish() {
383 output_callback_
.Run("]");
386 TraceBuffer
* TraceBuffer::CreateTraceBufferRingBuffer(size_t max_chunks
) {
387 return new TraceBufferRingBuffer(max_chunks
);
390 TraceBuffer
* TraceBuffer::CreateTraceBufferVectorOfSize(size_t max_chunks
) {
391 return new TraceBufferVector(max_chunks
);
394 } // namespace trace_event