1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // This file contains the implementation of the command buffer helper class.
7 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
9 #include "base/logging.h"
10 #include "base/time/time.h"
11 #include "gpu/command_buffer/common/command_buffer.h"
12 #include "gpu/command_buffer/common/trace_event.h"
16 CommandBufferHelper::CommandBufferHelper(CommandBuffer
* command_buffer
)
17 : command_buffer_(command_buffer
),
21 total_entry_count_(0),
22 immediate_entry_count_(0),
26 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
31 flush_automatically_(true),
32 flush_generation_(0) {
35 void CommandBufferHelper::SetAutomaticFlushes(bool enabled
) {
36 flush_automatically_
= enabled
;
37 CalcImmediateEntries(0);
40 bool CommandBufferHelper::IsContextLost() {
42 context_lost_
= error::IsError(command_buffer()->GetLastError());
47 void CommandBufferHelper::CalcImmediateEntries(int waiting_count
) {
48 DCHECK_GE(waiting_count
, 0);
50 // Check if usable & allocated.
51 if (!usable() || !HaveRingBuffer()) {
52 immediate_entry_count_
= 0;
56 // Get maximum safe contiguous entries.
57 const int32 curr_get
= get_offset();
58 if (curr_get
> put_
) {
59 immediate_entry_count_
= curr_get
- put_
- 1;
61 immediate_entry_count_
=
62 total_entry_count_
- put_
- (curr_get
== 0 ? 1 : 0);
65 // Limit entry count to force early flushing.
66 if (flush_automatically_
) {
69 ((curr_get
== last_put_sent_
) ? kAutoFlushSmall
: kAutoFlushBig
);
72 (put_
+ total_entry_count_
- last_put_sent_
) % total_entry_count_
;
74 if (pending
> 0 && pending
>= limit
) {
75 // Time to force flush.
76 immediate_entry_count_
= 0;
78 // Limit remaining entries, but not lower than waiting_count entries to
79 // prevent deadlock when command size is greater than the flush limit.
81 limit
= limit
< waiting_count
? waiting_count
: limit
;
82 immediate_entry_count_
=
83 immediate_entry_count_
> limit
? limit
: immediate_entry_count_
;
88 bool CommandBufferHelper::AllocateRingBuffer() {
93 if (HaveRingBuffer()) {
98 scoped_refptr
<Buffer
> buffer
=
99 command_buffer_
->CreateTransferBuffer(ring_buffer_size_
, &id
);
105 ring_buffer_
= buffer
;
106 ring_buffer_id_
= id
;
107 command_buffer_
->SetGetBuffer(id
);
108 entries_
= static_cast<CommandBufferEntry
*>(ring_buffer_
->memory());
109 total_entry_count_
= ring_buffer_size_
/ sizeof(CommandBufferEntry
);
110 // Call to SetGetBuffer(id) above resets get and put offsets to 0.
111 // No need to query it through IPC.
113 CalcImmediateEntries(0);
117 void CommandBufferHelper::FreeResources() {
118 if (HaveRingBuffer()) {
119 command_buffer_
->DestroyTransferBuffer(ring_buffer_id_
);
120 ring_buffer_id_
= -1;
121 CalcImmediateEntries(0);
125 void CommandBufferHelper::FreeRingBuffer() {
126 CHECK((put_
== get_offset()) ||
127 error::IsError(command_buffer_
->GetLastState().error
));
131 bool CommandBufferHelper::Initialize(int32 ring_buffer_size
) {
132 ring_buffer_size_
= ring_buffer_size
;
133 return AllocateRingBuffer();
136 CommandBufferHelper::~CommandBufferHelper() {
140 bool CommandBufferHelper::WaitForGetOffsetInRange(int32 start
, int32 end
) {
144 command_buffer_
->WaitForGetOffsetInRange(start
, end
);
145 return command_buffer_
->GetLastError() == gpu::error::kNoError
;
148 void CommandBufferHelper::Flush() {
149 // Wrap put_ before flush.
150 if (put_
== total_entry_count_
)
153 if (usable() && last_put_sent_
!= put_
) {
154 last_flush_time_
= base::TimeTicks::Now();
155 last_put_sent_
= put_
;
156 command_buffer_
->Flush(put_
);
158 CalcImmediateEntries(0);
162 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
163 void CommandBufferHelper::PeriodicFlushCheck() {
164 base::TimeTicks current_time
= base::TimeTicks::Now();
165 if (current_time
- last_flush_time_
>
166 base::TimeDelta::FromMicroseconds(kPeriodicFlushDelayInMicroseconds
)) {
172 // Calls Flush() and then waits until the buffer is empty. Break early if the
174 bool CommandBufferHelper::Finish() {
175 TRACE_EVENT0("gpu", "CommandBufferHelper::Finish");
179 // If there is no work just exit.
180 if (put_
== get_offset()) {
183 DCHECK(HaveRingBuffer());
185 if (!WaitForGetOffsetInRange(put_
, put_
))
187 DCHECK_EQ(get_offset(), put_
);
189 CalcImmediateEntries(0);
194 // Inserts a new token into the command stream. It uses an increasing value
195 // scheme so that we don't lose tokens (a token has passed if the current token
196 // value is higher than that token). Calls Finish() if the token value wraps,
197 // which will be rare.
198 int32
CommandBufferHelper::InsertToken() {
199 AllocateRingBuffer();
203 DCHECK(HaveRingBuffer());
204 // Increment token as 31-bit integer. Negative values are used to signal an
206 token_
= (token_
+ 1) & 0x7FFFFFFF;
207 cmd::SetToken
* cmd
= GetCmdSpace
<cmd::SetToken
>();
211 TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)");
214 DCHECK_EQ(token_
, last_token_read());
220 // Waits until the current token value is greater or equal to the value passed
222 void CommandBufferHelper::WaitForToken(int32 token
) {
223 if (!usable() || !HaveRingBuffer()) {
226 // Return immediately if corresponding InsertToken failed.
229 if (token
> token_
) return; // we wrapped
230 if (last_token_read() >= token
)
233 command_buffer_
->WaitForTokenInRange(token
, token_
);
236 // Waits for available entries, basically waiting until get >= put + count + 1.
237 // It actually waits for contiguous entries, so it may need to wrap the buffer
238 // around, adding a noops. Thus this function may change the value of put_. The
239 // function will return early if an error occurs, in which case the available
240 // space may not be available.
241 void CommandBufferHelper::WaitForAvailableEntries(int32 count
) {
242 AllocateRingBuffer();
246 DCHECK(HaveRingBuffer());
247 DCHECK(count
< total_entry_count_
);
248 if (put_
+ count
> total_entry_count_
) {
249 // There's not enough room between the current put and the end of the
250 // buffer, so we need to wrap. We will add noops all the way to the end,
251 // but we need to make sure get wraps first, actually that get is 1 or
252 // more (since put will wrap to 0 after we add the noops).
254 int32 curr_get
= get_offset();
255 if (curr_get
> put_
|| curr_get
== 0) {
256 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries");
258 if (!WaitForGetOffsetInRange(1, put_
))
260 curr_get
= get_offset();
261 DCHECK_LE(curr_get
, put_
);
262 DCHECK_NE(0, curr_get
);
264 // Insert Noops to fill out the buffer.
265 int32 num_entries
= total_entry_count_
- put_
;
266 while (num_entries
> 0) {
267 int32 num_to_skip
= std::min(CommandHeader::kMaxSize
, num_entries
);
268 cmd::Noop::Set(&entries_
[put_
], num_to_skip
);
270 num_entries
-= num_to_skip
;
275 // Try to get 'count' entries without flushing.
276 CalcImmediateEntries(count
);
277 if (immediate_entry_count_
< count
) {
278 // Try again with a shallow Flush().
280 CalcImmediateEntries(count
);
281 if (immediate_entry_count_
< count
) {
282 // Buffer is full. Need to wait for entries.
283 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries1");
284 if (!WaitForGetOffsetInRange(put_
+ count
+ 1, put_
))
286 CalcImmediateEntries(count
);
287 DCHECK_GE(immediate_entry_count_
, count
);