1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // This file contains the implementation of the command buffer helper class.
7 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
10 #include "base/logging.h"
11 #include "base/time/time.h"
12 #include "gpu/command_buffer/common/buffer.h"
13 #include "gpu/command_buffer/common/command_buffer.h"
14 #include "gpu/command_buffer/common/constants.h"
15 #include "gpu/command_buffer/common/trace_event.h"
19 CommandBufferHelper::CommandBufferHelper(CommandBuffer
* command_buffer
)
20 : command_buffer_(command_buffer
),
24 total_entry_count_(0),
25 immediate_entry_count_(0),
29 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
34 flush_automatically_(true),
35 flush_generation_(0) {
38 void CommandBufferHelper::SetAutomaticFlushes(bool enabled
) {
39 flush_automatically_
= enabled
;
40 CalcImmediateEntries(0);
43 bool CommandBufferHelper::IsContextLost() {
45 context_lost_
= error::IsError(command_buffer()->GetLastError());
50 void CommandBufferHelper::CalcImmediateEntries(int waiting_count
) {
51 DCHECK_GE(waiting_count
, 0);
53 // Check if usable & allocated.
54 if (!usable() || !HaveRingBuffer()) {
55 immediate_entry_count_
= 0;
59 // Get maximum safe contiguous entries.
60 const int32 curr_get
= get_offset();
61 if (curr_get
> put_
) {
62 immediate_entry_count_
= curr_get
- put_
- 1;
64 immediate_entry_count_
=
65 total_entry_count_
- put_
- (curr_get
== 0 ? 1 : 0);
68 // Limit entry count to force early flushing.
69 if (flush_automatically_
) {
72 ((curr_get
== last_put_sent_
) ? kAutoFlushSmall
: kAutoFlushBig
);
75 (put_
+ total_entry_count_
- last_put_sent_
) % total_entry_count_
;
77 if (pending
> 0 && pending
>= limit
) {
78 // Time to force flush.
79 immediate_entry_count_
= 0;
81 // Limit remaining entries, but not lower than waiting_count entries to
82 // prevent deadlock when command size is greater than the flush limit.
84 limit
= limit
< waiting_count
? waiting_count
: limit
;
85 immediate_entry_count_
=
86 immediate_entry_count_
> limit
? limit
: immediate_entry_count_
;
91 bool CommandBufferHelper::AllocateRingBuffer() {
96 if (HaveRingBuffer()) {
101 scoped_refptr
<Buffer
> buffer
=
102 command_buffer_
->CreateTransferBuffer(ring_buffer_size_
, &id
);
105 DCHECK(error::IsError(command_buffer()->GetLastError()));
109 ring_buffer_
= buffer
;
110 ring_buffer_id_
= id
;
111 command_buffer_
->SetGetBuffer(id
);
112 entries_
= static_cast<CommandBufferEntry
*>(ring_buffer_
->memory());
113 total_entry_count_
= ring_buffer_size_
/ sizeof(CommandBufferEntry
);
114 // Call to SetGetBuffer(id) above resets get and put offsets to 0.
115 // No need to query it through IPC.
117 CalcImmediateEntries(0);
121 void CommandBufferHelper::FreeResources() {
122 if (HaveRingBuffer()) {
123 command_buffer_
->DestroyTransferBuffer(ring_buffer_id_
);
124 ring_buffer_id_
= -1;
125 CalcImmediateEntries(0);
127 ring_buffer_
= nullptr;
131 void CommandBufferHelper::FreeRingBuffer() {
132 CHECK((put_
== get_offset()) ||
133 error::IsError(command_buffer_
->GetLastState().error
));
137 bool CommandBufferHelper::Initialize(int32 ring_buffer_size
) {
138 ring_buffer_size_
= ring_buffer_size
;
139 return AllocateRingBuffer();
142 CommandBufferHelper::~CommandBufferHelper() {
146 bool CommandBufferHelper::WaitForGetOffsetInRange(int32 start
, int32 end
) {
150 command_buffer_
->WaitForGetOffsetInRange(start
, end
);
151 return command_buffer_
->GetLastError() == gpu::error::kNoError
;
154 void CommandBufferHelper::Flush() {
155 // Wrap put_ before flush.
156 if (put_
== total_entry_count_
)
160 last_flush_time_
= base::TimeTicks::Now();
161 last_put_sent_
= put_
;
162 command_buffer_
->Flush(put_
);
164 CalcImmediateEntries(0);
168 void CommandBufferHelper::OrderingBarrier() {
169 // Wrap put_ before setting the barrier.
170 if (put_
== total_entry_count_
)
174 command_buffer_
->OrderingBarrier(put_
);
176 CalcImmediateEntries(0);
180 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
181 void CommandBufferHelper::PeriodicFlushCheck() {
182 base::TimeTicks current_time
= base::TimeTicks::Now();
183 if (current_time
- last_flush_time_
>
184 base::TimeDelta::FromMicroseconds(kPeriodicFlushDelayInMicroseconds
)) {
190 // Calls Flush() and then waits until the buffer is empty. Break early if the
192 bool CommandBufferHelper::Finish() {
193 TRACE_EVENT0("gpu", "CommandBufferHelper::Finish");
197 // If there is no work just exit.
198 if (put_
== get_offset()) {
201 DCHECK(HaveRingBuffer());
203 if (!WaitForGetOffsetInRange(put_
, put_
))
205 DCHECK_EQ(get_offset(), put_
);
207 CalcImmediateEntries(0);
212 // Inserts a new token into the command stream. It uses an increasing value
213 // scheme so that we don't lose tokens (a token has passed if the current token
214 // value is higher than that token). Calls Finish() if the token value wraps,
215 // which will be rare.
216 int32
CommandBufferHelper::InsertToken() {
217 AllocateRingBuffer();
221 DCHECK(HaveRingBuffer());
222 // Increment token as 31-bit integer. Negative values are used to signal an
224 token_
= (token_
+ 1) & 0x7FFFFFFF;
225 cmd::SetToken
* cmd
= GetCmdSpace
<cmd::SetToken
>();
229 TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)");
232 DCHECK_EQ(token_
, last_token_read());
238 // Waits until the current token value is greater or equal to the value passed
240 void CommandBufferHelper::WaitForToken(int32 token
) {
241 if (!usable() || !HaveRingBuffer()) {
244 // Return immediately if corresponding InsertToken failed.
247 if (token
> token_
) return; // we wrapped
248 if (last_token_read() >= token
)
251 command_buffer_
->WaitForTokenInRange(token
, token_
);
254 // Waits for available entries, basically waiting until get >= put + count + 1.
255 // It actually waits for contiguous entries, so it may need to wrap the buffer
256 // around, adding a noops. Thus this function may change the value of put_. The
257 // function will return early if an error occurs, in which case the available
258 // space may not be available.
259 void CommandBufferHelper::WaitForAvailableEntries(int32 count
) {
260 AllocateRingBuffer();
264 DCHECK(HaveRingBuffer());
265 DCHECK(count
< total_entry_count_
);
266 if (put_
+ count
> total_entry_count_
) {
267 // There's not enough room between the current put and the end of the
268 // buffer, so we need to wrap. We will add noops all the way to the end,
269 // but we need to make sure get wraps first, actually that get is 1 or
270 // more (since put will wrap to 0 after we add the noops).
272 int32 curr_get
= get_offset();
273 if (curr_get
> put_
|| curr_get
== 0) {
274 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries");
276 if (!WaitForGetOffsetInRange(1, put_
))
278 curr_get
= get_offset();
279 DCHECK_LE(curr_get
, put_
);
280 DCHECK_NE(0, curr_get
);
282 // Insert Noops to fill out the buffer.
283 int32 num_entries
= total_entry_count_
- put_
;
284 while (num_entries
> 0) {
285 int32 num_to_skip
= std::min(CommandHeader::kMaxSize
, num_entries
);
286 cmd::Noop::Set(&entries_
[put_
], num_to_skip
);
288 num_entries
-= num_to_skip
;
293 // Try to get 'count' entries without flushing.
294 CalcImmediateEntries(count
);
295 if (immediate_entry_count_
< count
) {
296 // Try again with a shallow Flush().
298 CalcImmediateEntries(count
);
299 if (immediate_entry_count_
< count
) {
300 // Buffer is full. Need to wait for entries.
301 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries1");
302 if (!WaitForGetOffsetInRange(put_
+ count
+ 1, put_
))
304 CalcImmediateEntries(count
);
305 DCHECK_GE(immediate_entry_count_
, count
);