Revert "Reland c91b178b07b0d - Delete dead signin code (SigninGlobalError)"
[chromium-blink-merge.git] / gpu / command_buffer / client / cmd_buffer_helper.cc
blob2750903b716ab1bb648dcad7e7787ebecd583cfd
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // This file contains the implementation of the command buffer helper class.
7 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
9 #include <algorithm>
10 #include "base/logging.h"
11 #include "base/time/time.h"
12 #include "gpu/command_buffer/common/buffer.h"
13 #include "gpu/command_buffer/common/command_buffer.h"
14 #include "gpu/command_buffer/common/constants.h"
15 #include "gpu/command_buffer/common/trace_event.h"
17 namespace gpu {
19 CommandBufferHelper::CommandBufferHelper(CommandBuffer* command_buffer)
20 : command_buffer_(command_buffer),
21 ring_buffer_id_(-1),
22 ring_buffer_size_(0),
23 entries_(NULL),
24 total_entry_count_(0),
25 immediate_entry_count_(0),
26 token_(0),
27 put_(0),
28 last_put_sent_(0),
29 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
30 commands_issued_(0),
31 #endif
32 usable_(true),
33 context_lost_(false),
34 flush_automatically_(true),
35 flush_generation_(0) {
38 void CommandBufferHelper::SetAutomaticFlushes(bool enabled) {
39 flush_automatically_ = enabled;
40 CalcImmediateEntries(0);
43 bool CommandBufferHelper::IsContextLost() {
44 if (!context_lost_) {
45 context_lost_ = error::IsError(command_buffer()->GetLastError());
47 return context_lost_;
50 void CommandBufferHelper::CalcImmediateEntries(int waiting_count) {
51 DCHECK_GE(waiting_count, 0);
53 // Check if usable & allocated.
54 if (!usable() || !HaveRingBuffer()) {
55 immediate_entry_count_ = 0;
56 return;
59 // Get maximum safe contiguous entries.
60 const int32 curr_get = get_offset();
61 if (curr_get > put_) {
62 immediate_entry_count_ = curr_get - put_ - 1;
63 } else {
64 immediate_entry_count_ =
65 total_entry_count_ - put_ - (curr_get == 0 ? 1 : 0);
68 // Limit entry count to force early flushing.
69 if (flush_automatically_) {
70 int32 limit =
71 total_entry_count_ /
72 ((curr_get == last_put_sent_) ? kAutoFlushSmall : kAutoFlushBig);
74 int32 pending =
75 (put_ + total_entry_count_ - last_put_sent_) % total_entry_count_;
77 if (pending > 0 && pending >= limit) {
78 // Time to force flush.
79 immediate_entry_count_ = 0;
80 } else {
81 // Limit remaining entries, but not lower than waiting_count entries to
82 // prevent deadlock when command size is greater than the flush limit.
83 limit -= pending;
84 limit = limit < waiting_count ? waiting_count : limit;
85 immediate_entry_count_ =
86 immediate_entry_count_ > limit ? limit : immediate_entry_count_;
91 bool CommandBufferHelper::AllocateRingBuffer() {
92 if (!usable()) {
93 return false;
96 if (HaveRingBuffer()) {
97 return true;
100 int32 id = -1;
101 scoped_refptr<Buffer> buffer =
102 command_buffer_->CreateTransferBuffer(ring_buffer_size_, &id);
103 if (id < 0) {
104 ClearUsable();
105 DCHECK(error::IsError(command_buffer()->GetLastError()));
106 return false;
109 ring_buffer_ = buffer;
110 ring_buffer_id_ = id;
111 command_buffer_->SetGetBuffer(id);
112 entries_ = static_cast<CommandBufferEntry*>(ring_buffer_->memory());
113 total_entry_count_ = ring_buffer_size_ / sizeof(CommandBufferEntry);
114 // Call to SetGetBuffer(id) above resets get and put offsets to 0.
115 // No need to query it through IPC.
116 put_ = 0;
117 CalcImmediateEntries(0);
118 return true;
121 void CommandBufferHelper::FreeResources() {
122 if (HaveRingBuffer()) {
123 command_buffer_->DestroyTransferBuffer(ring_buffer_id_);
124 ring_buffer_id_ = -1;
125 CalcImmediateEntries(0);
126 entries_ = nullptr;
127 ring_buffer_ = nullptr;
131 void CommandBufferHelper::FreeRingBuffer() {
132 CHECK((put_ == get_offset()) ||
133 error::IsError(command_buffer_->GetLastState().error));
134 FreeResources();
137 bool CommandBufferHelper::Initialize(int32 ring_buffer_size) {
138 ring_buffer_size_ = ring_buffer_size;
139 return AllocateRingBuffer();
142 CommandBufferHelper::~CommandBufferHelper() {
143 FreeResources();
146 bool CommandBufferHelper::WaitForGetOffsetInRange(int32 start, int32 end) {
147 if (!usable()) {
148 return false;
150 command_buffer_->WaitForGetOffsetInRange(start, end);
151 return command_buffer_->GetLastError() == gpu::error::kNoError;
154 void CommandBufferHelper::Flush() {
155 // Wrap put_ before flush.
156 if (put_ == total_entry_count_)
157 put_ = 0;
159 if (usable()) {
160 last_flush_time_ = base::TimeTicks::Now();
161 last_put_sent_ = put_;
162 command_buffer_->Flush(put_);
163 ++flush_generation_;
164 CalcImmediateEntries(0);
168 void CommandBufferHelper::OrderingBarrier() {
169 // Wrap put_ before setting the barrier.
170 if (put_ == total_entry_count_)
171 put_ = 0;
173 if (usable()) {
174 command_buffer_->OrderingBarrier(put_);
175 ++flush_generation_;
176 CalcImmediateEntries(0);
180 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
181 void CommandBufferHelper::PeriodicFlushCheck() {
182 base::TimeTicks current_time = base::TimeTicks::Now();
183 if (current_time - last_flush_time_ >
184 base::TimeDelta::FromMicroseconds(kPeriodicFlushDelayInMicroseconds)) {
185 Flush();
188 #endif
190 // Calls Flush() and then waits until the buffer is empty. Break early if the
191 // error is set.
192 bool CommandBufferHelper::Finish() {
193 TRACE_EVENT0("gpu", "CommandBufferHelper::Finish");
194 if (!usable()) {
195 return false;
197 // If there is no work just exit.
198 if (put_ == get_offset()) {
199 return true;
201 DCHECK(HaveRingBuffer() ||
202 error::IsError(command_buffer_->GetLastState().error));
203 Flush();
204 if (!WaitForGetOffsetInRange(put_, put_))
205 return false;
206 DCHECK_EQ(get_offset(), put_);
208 CalcImmediateEntries(0);
210 return true;
213 // Inserts a new token into the command stream. It uses an increasing value
214 // scheme so that we don't lose tokens (a token has passed if the current token
215 // value is higher than that token). Calls Finish() if the token value wraps,
216 // which will be rare.
217 int32 CommandBufferHelper::InsertToken() {
218 AllocateRingBuffer();
219 if (!usable()) {
220 return token_;
222 DCHECK(HaveRingBuffer());
223 // Increment token as 31-bit integer. Negative values are used to signal an
224 // error.
225 token_ = (token_ + 1) & 0x7FFFFFFF;
226 cmd::SetToken* cmd = GetCmdSpace<cmd::SetToken>();
227 if (cmd) {
228 cmd->Init(token_);
229 if (token_ == 0) {
230 TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)");
231 // we wrapped
232 Finish();
233 DCHECK_EQ(token_, last_token_read());
236 return token_;
239 // Waits until the current token value is greater or equal to the value passed
240 // in argument.
241 void CommandBufferHelper::WaitForToken(int32 token) {
242 if (!usable() || !HaveRingBuffer()) {
243 return;
245 // Return immediately if corresponding InsertToken failed.
246 if (token < 0)
247 return;
248 if (token > token_) return; // we wrapped
249 if (last_token_read() >= token)
250 return;
251 Flush();
252 command_buffer_->WaitForTokenInRange(token, token_);
255 // Waits for available entries, basically waiting until get >= put + count + 1.
256 // It actually waits for contiguous entries, so it may need to wrap the buffer
257 // around, adding a noops. Thus this function may change the value of put_. The
258 // function will return early if an error occurs, in which case the available
259 // space may not be available.
260 void CommandBufferHelper::WaitForAvailableEntries(int32 count) {
261 AllocateRingBuffer();
262 if (!usable()) {
263 return;
265 DCHECK(HaveRingBuffer());
266 DCHECK(count < total_entry_count_);
267 if (put_ + count > total_entry_count_) {
268 // There's not enough room between the current put and the end of the
269 // buffer, so we need to wrap. We will add noops all the way to the end,
270 // but we need to make sure get wraps first, actually that get is 1 or
271 // more (since put will wrap to 0 after we add the noops).
272 DCHECK_LE(1, put_);
273 int32 curr_get = get_offset();
274 if (curr_get > put_ || curr_get == 0) {
275 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries");
276 Flush();
277 if (!WaitForGetOffsetInRange(1, put_))
278 return;
279 curr_get = get_offset();
280 DCHECK_LE(curr_get, put_);
281 DCHECK_NE(0, curr_get);
283 // Insert Noops to fill out the buffer.
284 int32 num_entries = total_entry_count_ - put_;
285 while (num_entries > 0) {
286 int32 num_to_skip = std::min(CommandHeader::kMaxSize, num_entries);
287 cmd::Noop::Set(&entries_[put_], num_to_skip);
288 put_ += num_to_skip;
289 num_entries -= num_to_skip;
291 put_ = 0;
294 // Try to get 'count' entries without flushing.
295 CalcImmediateEntries(count);
296 if (immediate_entry_count_ < count) {
297 // Try again with a shallow Flush().
298 Flush();
299 CalcImmediateEntries(count);
300 if (immediate_entry_count_ < count) {
301 // Buffer is full. Need to wait for entries.
302 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries1");
303 if (!WaitForGetOffsetInRange(put_ + count + 1, put_))
304 return;
305 CalcImmediateEntries(count);
306 DCHECK_GE(immediate_entry_count_, count);
312 } // namespace gpu