1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // This file contains the implementation of the command buffer helper class.
7 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
10 #include "base/logging.h"
11 #include "base/strings/stringprintf.h"
12 #include "base/thread_task_runner_handle.h"
13 #include "base/time/time.h"
14 #include "base/trace_event/memory_allocator_dump.h"
15 #include "base/trace_event/memory_dump_manager.h"
16 #include "base/trace_event/process_memory_dump.h"
17 #include "gpu/command_buffer/common/buffer.h"
18 #include "gpu/command_buffer/common/command_buffer.h"
19 #include "gpu/command_buffer/common/constants.h"
20 #include "gpu/command_buffer/common/trace_event.h"
24 CommandBufferHelper::CommandBufferHelper(CommandBuffer
* command_buffer
)
25 : command_buffer_(command_buffer
),
29 total_entry_count_(0),
30 immediate_entry_count_(0),
34 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
39 flush_automatically_(true),
40 flush_generation_(0) {
41 // In certain cases, ThreadTaskRunnerHandle isn't set (Android Webview).
42 // Don't register a dump provider in these cases.
43 // TODO(ericrk): Get this working in Android Webview. crbug.com/517156
44 if (base::ThreadTaskRunnerHandle::IsSet()) {
45 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
46 this, base::ThreadTaskRunnerHandle::Get());
50 void CommandBufferHelper::SetAutomaticFlushes(bool enabled
) {
51 flush_automatically_
= enabled
;
52 CalcImmediateEntries(0);
55 bool CommandBufferHelper::IsContextLost() {
57 context_lost_
= error::IsError(command_buffer()->GetLastError());
62 void CommandBufferHelper::CalcImmediateEntries(int waiting_count
) {
63 DCHECK_GE(waiting_count
, 0);
65 // Check if usable & allocated.
66 if (!usable() || !HaveRingBuffer()) {
67 immediate_entry_count_
= 0;
71 // Get maximum safe contiguous entries.
72 const int32 curr_get
= get_offset();
73 if (curr_get
> put_
) {
74 immediate_entry_count_
= curr_get
- put_
- 1;
76 immediate_entry_count_
=
77 total_entry_count_
- put_
- (curr_get
== 0 ? 1 : 0);
80 // Limit entry count to force early flushing.
81 if (flush_automatically_
) {
84 ((curr_get
== last_put_sent_
) ? kAutoFlushSmall
: kAutoFlushBig
);
87 (put_
+ total_entry_count_
- last_put_sent_
) % total_entry_count_
;
89 if (pending
> 0 && pending
>= limit
) {
90 // Time to force flush.
91 immediate_entry_count_
= 0;
93 // Limit remaining entries, but not lower than waiting_count entries to
94 // prevent deadlock when command size is greater than the flush limit.
96 limit
= limit
< waiting_count
? waiting_count
: limit
;
97 immediate_entry_count_
=
98 immediate_entry_count_
> limit
? limit
: immediate_entry_count_
;
103 bool CommandBufferHelper::AllocateRingBuffer() {
108 if (HaveRingBuffer()) {
113 scoped_refptr
<Buffer
> buffer
=
114 command_buffer_
->CreateTransferBuffer(ring_buffer_size_
, &id
);
117 DCHECK(error::IsError(command_buffer()->GetLastError()));
121 ring_buffer_
= buffer
;
122 ring_buffer_id_
= id
;
123 command_buffer_
->SetGetBuffer(id
);
124 entries_
= static_cast<CommandBufferEntry
*>(ring_buffer_
->memory());
125 total_entry_count_
= ring_buffer_size_
/ sizeof(CommandBufferEntry
);
126 // Call to SetGetBuffer(id) above resets get and put offsets to 0.
127 // No need to query it through IPC.
129 CalcImmediateEntries(0);
133 void CommandBufferHelper::FreeResources() {
134 if (HaveRingBuffer()) {
135 command_buffer_
->DestroyTransferBuffer(ring_buffer_id_
);
136 ring_buffer_id_
= -1;
137 CalcImmediateEntries(0);
139 ring_buffer_
= nullptr;
143 void CommandBufferHelper::FreeRingBuffer() {
144 CHECK((put_
== get_offset()) ||
145 error::IsError(command_buffer_
->GetLastState().error
));
149 bool CommandBufferHelper::Initialize(int32 ring_buffer_size
) {
150 ring_buffer_size_
= ring_buffer_size
;
151 return AllocateRingBuffer();
154 CommandBufferHelper::~CommandBufferHelper() {
155 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
160 bool CommandBufferHelper::WaitForGetOffsetInRange(int32 start
, int32 end
) {
164 command_buffer_
->WaitForGetOffsetInRange(start
, end
);
165 return command_buffer_
->GetLastError() == gpu::error::kNoError
;
168 void CommandBufferHelper::Flush() {
169 // Wrap put_ before flush.
170 if (put_
== total_entry_count_
)
174 last_flush_time_
= base::TimeTicks::Now();
175 last_put_sent_
= put_
;
176 command_buffer_
->Flush(put_
);
178 CalcImmediateEntries(0);
182 void CommandBufferHelper::OrderingBarrier() {
183 // Wrap put_ before setting the barrier.
184 if (put_
== total_entry_count_
)
188 command_buffer_
->OrderingBarrier(put_
);
190 CalcImmediateEntries(0);
194 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
195 void CommandBufferHelper::PeriodicFlushCheck() {
196 base::TimeTicks current_time
= base::TimeTicks::Now();
197 if (current_time
- last_flush_time_
>
198 base::TimeDelta::FromMicroseconds(kPeriodicFlushDelayInMicroseconds
)) {
204 // Calls Flush() and then waits until the buffer is empty. Break early if the
206 bool CommandBufferHelper::Finish() {
207 TRACE_EVENT0("gpu", "CommandBufferHelper::Finish");
211 // If there is no work just exit.
212 if (put_
== get_offset()) {
215 DCHECK(HaveRingBuffer() ||
216 error::IsError(command_buffer_
->GetLastState().error
));
218 if (!WaitForGetOffsetInRange(put_
, put_
))
220 DCHECK_EQ(get_offset(), put_
);
222 CalcImmediateEntries(0);
227 // Inserts a new token into the command stream. It uses an increasing value
228 // scheme so that we don't lose tokens (a token has passed if the current token
229 // value is higher than that token). Calls Finish() if the token value wraps,
230 // which will be rare.
231 int32
CommandBufferHelper::InsertToken() {
232 AllocateRingBuffer();
236 DCHECK(HaveRingBuffer());
237 // Increment token as 31-bit integer. Negative values are used to signal an
239 token_
= (token_
+ 1) & 0x7FFFFFFF;
240 cmd::SetToken
* cmd
= GetCmdSpace
<cmd::SetToken
>();
244 TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)");
247 DCHECK_EQ(token_
, last_token_read());
253 // Waits until the current token value is greater or equal to the value passed
255 void CommandBufferHelper::WaitForToken(int32 token
) {
256 if (!usable() || !HaveRingBuffer()) {
259 // Return immediately if corresponding InsertToken failed.
262 if (token
> token_
) return; // we wrapped
263 if (last_token_read() >= token
)
266 command_buffer_
->WaitForTokenInRange(token
, token_
);
269 // Waits for available entries, basically waiting until get >= put + count + 1.
270 // It actually waits for contiguous entries, so it may need to wrap the buffer
271 // around, adding a noops. Thus this function may change the value of put_. The
272 // function will return early if an error occurs, in which case the available
273 // space may not be available.
274 void CommandBufferHelper::WaitForAvailableEntries(int32 count
) {
275 AllocateRingBuffer();
279 DCHECK(HaveRingBuffer());
280 DCHECK(count
< total_entry_count_
);
281 if (put_
+ count
> total_entry_count_
) {
282 // There's not enough room between the current put and the end of the
283 // buffer, so we need to wrap. We will add noops all the way to the end,
284 // but we need to make sure get wraps first, actually that get is 1 or
285 // more (since put will wrap to 0 after we add the noops).
287 int32 curr_get
= get_offset();
288 if (curr_get
> put_
|| curr_get
== 0) {
289 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries");
291 if (!WaitForGetOffsetInRange(1, put_
))
293 curr_get
= get_offset();
294 DCHECK_LE(curr_get
, put_
);
295 DCHECK_NE(0, curr_get
);
297 // Insert Noops to fill out the buffer.
298 int32 num_entries
= total_entry_count_
- put_
;
299 while (num_entries
> 0) {
300 int32 num_to_skip
= std::min(CommandHeader::kMaxSize
, num_entries
);
301 cmd::Noop::Set(&entries_
[put_
], num_to_skip
);
303 num_entries
-= num_to_skip
;
308 // Try to get 'count' entries without flushing.
309 CalcImmediateEntries(count
);
310 if (immediate_entry_count_
< count
) {
311 // Try again with a shallow Flush().
313 CalcImmediateEntries(count
);
314 if (immediate_entry_count_
< count
) {
315 // Buffer is full. Need to wait for entries.
316 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries1");
317 if (!WaitForGetOffsetInRange(put_
+ count
+ 1, put_
))
319 CalcImmediateEntries(count
);
320 DCHECK_GE(immediate_entry_count_
, count
);
325 int32
CommandBufferHelper::GetTotalFreeEntriesNoWaiting() const {
326 int32 current_get_offset
= get_offset();
327 if (current_get_offset
> put_
) {
328 return current_get_offset
- put_
- 1;
330 return current_get_offset
+ total_entry_count_
- put_
-
331 (current_get_offset
== 0 ? 1 : 0);
335 bool CommandBufferHelper::OnMemoryDump(
336 const base::trace_event::MemoryDumpArgs
& args
,
337 base::trace_event::ProcessMemoryDump
* pmd
) {
338 if (!HaveRingBuffer())
341 const uint64 tracing_process_id
=
342 base::trace_event::MemoryDumpManager::GetInstance()
343 ->GetTracingProcessId();
345 base::trace_event::MemoryAllocatorDump
* dump
=
346 pmd
->CreateAllocatorDump(base::StringPrintf(
347 "gpu/command_buffer_memory/buffer_%d", ring_buffer_id_
));
348 dump
->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize
,
349 base::trace_event::MemoryAllocatorDump::kUnitsBytes
,
351 dump
->AddScalar("free_size",
352 base::trace_event::MemoryAllocatorDump::kUnitsBytes
,
353 GetTotalFreeEntriesNoWaiting() * sizeof(CommandBufferEntry
));
354 auto guid
= GetBufferGUIDForTracing(tracing_process_id
, ring_buffer_id_
);
355 const int kImportance
= 2;
356 pmd
->CreateSharedGlobalAllocatorDump(guid
);
357 pmd
->AddOwnershipEdge(dump
->guid(), guid
, kImportance
);