mac: Let IPhotoDataProvider::GetAlbumNames() return albums in a deterministic order.
[chromium-blink-merge.git] / gpu / command_buffer / client / cmd_buffer_helper.cc
blob35805fe195703e37b006a0b7e2aef33439d0d333
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // This file contains the implementation of the command buffer helper class.
7 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
9 #include <algorithm>
10 #include "base/logging.h"
11 #include "base/strings/stringprintf.h"
12 #include "base/thread_task_runner_handle.h"
13 #include "base/time/time.h"
14 #include "base/trace_event/memory_allocator_dump.h"
15 #include "base/trace_event/memory_dump_manager.h"
16 #include "base/trace_event/process_memory_dump.h"
17 #include "gpu/command_buffer/common/buffer.h"
18 #include "gpu/command_buffer/common/command_buffer.h"
19 #include "gpu/command_buffer/common/constants.h"
20 #include "gpu/command_buffer/common/trace_event.h"
22 namespace gpu {
24 CommandBufferHelper::CommandBufferHelper(CommandBuffer* command_buffer)
25 : command_buffer_(command_buffer),
26 ring_buffer_id_(-1),
27 ring_buffer_size_(0),
28 entries_(NULL),
29 total_entry_count_(0),
30 immediate_entry_count_(0),
31 token_(0),
32 put_(0),
33 last_put_sent_(0),
34 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
35 commands_issued_(0),
36 #endif
37 usable_(true),
38 context_lost_(false),
39 flush_automatically_(true),
40 flush_generation_(0) {
41 // In certain cases, ThreadTaskRunnerHandle isn't set (Android Webview).
42 // Don't register a dump provider in these cases.
43 // TODO(ericrk): Get this working in Android Webview. crbug.com/517156
44 if (base::ThreadTaskRunnerHandle::IsSet()) {
45 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
46 this, base::ThreadTaskRunnerHandle::Get());
50 void CommandBufferHelper::SetAutomaticFlushes(bool enabled) {
51 flush_automatically_ = enabled;
52 CalcImmediateEntries(0);
55 bool CommandBufferHelper::IsContextLost() {
56 if (!context_lost_) {
57 context_lost_ = error::IsError(command_buffer()->GetLastError());
59 return context_lost_;
62 void CommandBufferHelper::CalcImmediateEntries(int waiting_count) {
63 DCHECK_GE(waiting_count, 0);
65 // Check if usable & allocated.
66 if (!usable() || !HaveRingBuffer()) {
67 immediate_entry_count_ = 0;
68 return;
71 // Get maximum safe contiguous entries.
72 const int32 curr_get = get_offset();
73 if (curr_get > put_) {
74 immediate_entry_count_ = curr_get - put_ - 1;
75 } else {
76 immediate_entry_count_ =
77 total_entry_count_ - put_ - (curr_get == 0 ? 1 : 0);
80 // Limit entry count to force early flushing.
81 if (flush_automatically_) {
82 int32 limit =
83 total_entry_count_ /
84 ((curr_get == last_put_sent_) ? kAutoFlushSmall : kAutoFlushBig);
86 int32 pending =
87 (put_ + total_entry_count_ - last_put_sent_) % total_entry_count_;
89 if (pending > 0 && pending >= limit) {
90 // Time to force flush.
91 immediate_entry_count_ = 0;
92 } else {
93 // Limit remaining entries, but not lower than waiting_count entries to
94 // prevent deadlock when command size is greater than the flush limit.
95 limit -= pending;
96 limit = limit < waiting_count ? waiting_count : limit;
97 immediate_entry_count_ =
98 immediate_entry_count_ > limit ? limit : immediate_entry_count_;
103 bool CommandBufferHelper::AllocateRingBuffer() {
104 if (!usable()) {
105 return false;
108 if (HaveRingBuffer()) {
109 return true;
112 int32 id = -1;
113 scoped_refptr<Buffer> buffer =
114 command_buffer_->CreateTransferBuffer(ring_buffer_size_, &id);
115 if (id < 0) {
116 ClearUsable();
117 DCHECK(error::IsError(command_buffer()->GetLastError()));
118 return false;
121 ring_buffer_ = buffer;
122 ring_buffer_id_ = id;
123 command_buffer_->SetGetBuffer(id);
124 entries_ = static_cast<CommandBufferEntry*>(ring_buffer_->memory());
125 total_entry_count_ = ring_buffer_size_ / sizeof(CommandBufferEntry);
126 // Call to SetGetBuffer(id) above resets get and put offsets to 0.
127 // No need to query it through IPC.
128 put_ = 0;
129 CalcImmediateEntries(0);
130 return true;
133 void CommandBufferHelper::FreeResources() {
134 if (HaveRingBuffer()) {
135 command_buffer_->DestroyTransferBuffer(ring_buffer_id_);
136 ring_buffer_id_ = -1;
137 CalcImmediateEntries(0);
138 entries_ = nullptr;
139 ring_buffer_ = nullptr;
143 void CommandBufferHelper::FreeRingBuffer() {
144 CHECK((put_ == get_offset()) ||
145 error::IsError(command_buffer_->GetLastState().error));
146 FreeResources();
149 bool CommandBufferHelper::Initialize(int32 ring_buffer_size) {
150 ring_buffer_size_ = ring_buffer_size;
151 return AllocateRingBuffer();
154 CommandBufferHelper::~CommandBufferHelper() {
155 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
156 this);
157 FreeResources();
160 bool CommandBufferHelper::WaitForGetOffsetInRange(int32 start, int32 end) {
161 if (!usable()) {
162 return false;
164 command_buffer_->WaitForGetOffsetInRange(start, end);
165 return command_buffer_->GetLastError() == gpu::error::kNoError;
168 void CommandBufferHelper::Flush() {
169 // Wrap put_ before flush.
170 if (put_ == total_entry_count_)
171 put_ = 0;
173 if (usable()) {
174 last_flush_time_ = base::TimeTicks::Now();
175 last_put_sent_ = put_;
176 command_buffer_->Flush(put_);
177 ++flush_generation_;
178 CalcImmediateEntries(0);
182 void CommandBufferHelper::OrderingBarrier() {
183 // Wrap put_ before setting the barrier.
184 if (put_ == total_entry_count_)
185 put_ = 0;
187 if (usable()) {
188 command_buffer_->OrderingBarrier(put_);
189 ++flush_generation_;
190 CalcImmediateEntries(0);
194 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
195 void CommandBufferHelper::PeriodicFlushCheck() {
196 base::TimeTicks current_time = base::TimeTicks::Now();
197 if (current_time - last_flush_time_ >
198 base::TimeDelta::FromMicroseconds(kPeriodicFlushDelayInMicroseconds)) {
199 Flush();
202 #endif
204 // Calls Flush() and then waits until the buffer is empty. Break early if the
205 // error is set.
206 bool CommandBufferHelper::Finish() {
207 TRACE_EVENT0("gpu", "CommandBufferHelper::Finish");
208 if (!usable()) {
209 return false;
211 // If there is no work just exit.
212 if (put_ == get_offset()) {
213 return true;
215 DCHECK(HaveRingBuffer() ||
216 error::IsError(command_buffer_->GetLastState().error));
217 Flush();
218 if (!WaitForGetOffsetInRange(put_, put_))
219 return false;
220 DCHECK_EQ(get_offset(), put_);
222 CalcImmediateEntries(0);
224 return true;
227 // Inserts a new token into the command stream. It uses an increasing value
228 // scheme so that we don't lose tokens (a token has passed if the current token
229 // value is higher than that token). Calls Finish() if the token value wraps,
230 // which will be rare.
231 int32 CommandBufferHelper::InsertToken() {
232 AllocateRingBuffer();
233 if (!usable()) {
234 return token_;
236 DCHECK(HaveRingBuffer());
237 // Increment token as 31-bit integer. Negative values are used to signal an
238 // error.
239 token_ = (token_ + 1) & 0x7FFFFFFF;
240 cmd::SetToken* cmd = GetCmdSpace<cmd::SetToken>();
241 if (cmd) {
242 cmd->Init(token_);
243 if (token_ == 0) {
244 TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)");
245 // we wrapped
246 Finish();
247 DCHECK_EQ(token_, last_token_read());
250 return token_;
253 // Waits until the current token value is greater or equal to the value passed
254 // in argument.
255 void CommandBufferHelper::WaitForToken(int32 token) {
256 if (!usable() || !HaveRingBuffer()) {
257 return;
259 // Return immediately if corresponding InsertToken failed.
260 if (token < 0)
261 return;
262 if (token > token_) return; // we wrapped
263 if (last_token_read() >= token)
264 return;
265 Flush();
266 command_buffer_->WaitForTokenInRange(token, token_);
269 // Waits for available entries, basically waiting until get >= put + count + 1.
270 // It actually waits for contiguous entries, so it may need to wrap the buffer
271 // around, adding a noops. Thus this function may change the value of put_. The
272 // function will return early if an error occurs, in which case the available
273 // space may not be available.
274 void CommandBufferHelper::WaitForAvailableEntries(int32 count) {
275 AllocateRingBuffer();
276 if (!usable()) {
277 return;
279 DCHECK(HaveRingBuffer());
280 DCHECK(count < total_entry_count_);
281 if (put_ + count > total_entry_count_) {
282 // There's not enough room between the current put and the end of the
283 // buffer, so we need to wrap. We will add noops all the way to the end,
284 // but we need to make sure get wraps first, actually that get is 1 or
285 // more (since put will wrap to 0 after we add the noops).
286 DCHECK_LE(1, put_);
287 int32 curr_get = get_offset();
288 if (curr_get > put_ || curr_get == 0) {
289 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries");
290 Flush();
291 if (!WaitForGetOffsetInRange(1, put_))
292 return;
293 curr_get = get_offset();
294 DCHECK_LE(curr_get, put_);
295 DCHECK_NE(0, curr_get);
297 // Insert Noops to fill out the buffer.
298 int32 num_entries = total_entry_count_ - put_;
299 while (num_entries > 0) {
300 int32 num_to_skip = std::min(CommandHeader::kMaxSize, num_entries);
301 cmd::Noop::Set(&entries_[put_], num_to_skip);
302 put_ += num_to_skip;
303 num_entries -= num_to_skip;
305 put_ = 0;
308 // Try to get 'count' entries without flushing.
309 CalcImmediateEntries(count);
310 if (immediate_entry_count_ < count) {
311 // Try again with a shallow Flush().
312 Flush();
313 CalcImmediateEntries(count);
314 if (immediate_entry_count_ < count) {
315 // Buffer is full. Need to wait for entries.
316 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries1");
317 if (!WaitForGetOffsetInRange(put_ + count + 1, put_))
318 return;
319 CalcImmediateEntries(count);
320 DCHECK_GE(immediate_entry_count_, count);
325 int32 CommandBufferHelper::GetTotalFreeEntriesNoWaiting() const {
326 int32 current_get_offset = get_offset();
327 if (current_get_offset > put_) {
328 return current_get_offset - put_ - 1;
329 } else {
330 return current_get_offset + total_entry_count_ - put_ -
331 (current_get_offset == 0 ? 1 : 0);
335 bool CommandBufferHelper::OnMemoryDump(
336 const base::trace_event::MemoryDumpArgs& args,
337 base::trace_event::ProcessMemoryDump* pmd) {
338 if (!HaveRingBuffer())
339 return true;
341 const uint64 tracing_process_id =
342 base::trace_event::MemoryDumpManager::GetInstance()
343 ->GetTracingProcessId();
345 base::trace_event::MemoryAllocatorDump* dump =
346 pmd->CreateAllocatorDump(base::StringPrintf(
347 "gpu/command_buffer_memory/buffer_%d", ring_buffer_id_));
348 dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize,
349 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
350 ring_buffer_size_);
351 dump->AddScalar("free_size",
352 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
353 GetTotalFreeEntriesNoWaiting() * sizeof(CommandBufferEntry));
354 auto guid = GetBufferGUIDForTracing(tracing_process_id, ring_buffer_id_);
355 const int kImportance = 2;
356 pmd->CreateSharedGlobalAllocatorDump(guid);
357 pmd->AddOwnershipEdge(dump->guid(), guid, kImportance);
359 return true;
362 } // namespace gpu