1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // This file contains the implementation of the command buffer helper class.
7 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
9 #include "base/logging.h"
10 #include "gpu/command_buffer/common/command_buffer.h"
11 #include "gpu/command_buffer/common/trace_event.h"
15 const int kCommandsPerFlushCheck
= 100;
17 #if !defined(OS_ANDROID)
18 const double kFlushDelay
= 1.0 / (5.0 * 60.0);
21 CommandBufferHelper::CommandBufferHelper(CommandBuffer
* command_buffer
)
22 : command_buffer_(command_buffer
),
26 total_entry_count_(0),
33 flush_automatically_(true),
37 void CommandBufferHelper::SetAutomaticFlushes(bool enabled
) {
38 flush_automatically_
= enabled
;
41 bool CommandBufferHelper::IsContextLost() {
43 context_lost_
= error::IsError(command_buffer()->GetLastError());
48 bool CommandBufferHelper::AllocateRingBuffer() {
53 if (HaveRingBuffer()) {
58 Buffer buffer
= command_buffer_
->CreateTransferBuffer(ring_buffer_size_
, &id
);
64 ring_buffer_
= buffer
;
66 command_buffer_
->SetGetBuffer(id
);
68 // TODO(gman): Do we really need to call GetState here? We know get & put = 0
69 // Also do we need to check state.num_entries?
70 CommandBuffer::State state
= command_buffer_
->GetState();
71 entries_
= static_cast<CommandBufferEntry
*>(ring_buffer_
.ptr
);
72 int32 num_ring_buffer_entries
=
73 ring_buffer_size_
/ sizeof(CommandBufferEntry
);
74 if (num_ring_buffer_entries
> state
.num_entries
) {
79 total_entry_count_
= num_ring_buffer_entries
;
80 put_
= state
.put_offset
;
84 void CommandBufferHelper::FreeResources() {
85 if (HaveRingBuffer()) {
86 command_buffer_
->DestroyTransferBuffer(ring_buffer_id_
);
91 void CommandBufferHelper::FreeRingBuffer() {
92 CHECK((put_
== get_offset()) ||
93 error::IsError(command_buffer_
->GetLastState().error
));
97 bool CommandBufferHelper::Initialize(int32 ring_buffer_size
) {
98 ring_buffer_size_
= ring_buffer_size
;
99 return AllocateRingBuffer();
102 CommandBufferHelper::~CommandBufferHelper() {
106 bool CommandBufferHelper::FlushSync() {
110 last_flush_time_
= clock();
111 last_put_sent_
= put_
;
112 CommandBuffer::State state
= command_buffer_
->FlushSync(put_
, get_offset());
113 return state
.error
== error::kNoError
;
116 void CommandBufferHelper::Flush() {
117 if (usable() && last_put_sent_
!= put_
) {
118 last_flush_time_
= clock();
119 last_put_sent_
= put_
;
120 command_buffer_
->Flush(put_
);
124 // Calls Flush() and then waits until the buffer is empty. Break early if the
126 bool CommandBufferHelper::Finish() {
127 TRACE_EVENT0("gpu", "CommandBufferHelper::Finish");
131 // If there is no work just exit.
132 if (put_
== get_offset()) {
135 DCHECK(HaveRingBuffer());
137 // Do not loop forever if the flush fails, meaning the command buffer reader
141 } while (put_
!= get_offset());
146 // Inserts a new token into the command stream. It uses an increasing value
147 // scheme so that we don't lose tokens (a token has passed if the current token
148 // value is higher than that token). Calls Finish() if the token value wraps,
149 // which will be rare.
150 int32
CommandBufferHelper::InsertToken() {
151 AllocateRingBuffer();
155 DCHECK(HaveRingBuffer());
156 // Increment token as 31-bit integer. Negative values are used to signal an
158 token_
= (token_
+ 1) & 0x7FFFFFFF;
159 cmd::SetToken
* cmd
= GetCmdSpace
<cmd::SetToken
>();
163 TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)");
166 DCHECK_EQ(token_
, last_token_read());
172 // Waits until the current token value is greater or equal to the value passed
174 void CommandBufferHelper::WaitForToken(int32 token
) {
175 if (!usable() || !HaveRingBuffer()) {
178 // Return immediately if corresponding InsertToken failed.
181 if (token
> token_
) return; // we wrapped
182 while (last_token_read() < token
) {
183 if (get_offset() == put_
) {
184 LOG(FATAL
) << "Empty command buffer while waiting on a token.";
187 // Do not loop forever if the flush fails, meaning the command buffer reader
194 // Waits for available entries, basically waiting until get >= put + count + 1.
195 // It actually waits for contiguous entries, so it may need to wrap the buffer
196 // around, adding a noops. Thus this function may change the value of put_. The
197 // function will return early if an error occurs, in which case the available
198 // space may not be available.
199 void CommandBufferHelper::WaitForAvailableEntries(int32 count
) {
200 AllocateRingBuffer();
204 DCHECK(HaveRingBuffer());
205 DCHECK(count
< total_entry_count_
);
206 if (put_
+ count
> total_entry_count_
) {
207 // There's not enough room between the current put and the end of the
208 // buffer, so we need to wrap. We will add noops all the way to the end,
209 // but we need to make sure get wraps first, actually that get is 1 or
210 // more (since put will wrap to 0 after we add the noops).
212 if (get_offset() > put_
|| get_offset() == 0) {
213 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries");
214 while (get_offset() > put_
|| get_offset() == 0) {
215 // Do not loop forever if the flush fails, meaning the command buffer
216 // reader has shutdown.
221 // Insert Noops to fill out the buffer.
222 int32 num_entries
= total_entry_count_
- put_
;
223 while (num_entries
> 0) {
224 int32 num_to_skip
= std::min(CommandHeader::kMaxSize
, num_entries
);
225 cmd::Noop::Set(&entries_
[put_
], num_to_skip
);
227 num_entries
-= num_to_skip
;
231 if (AvailableEntries() < count
) {
232 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries1");
233 while (AvailableEntries() < count
) {
234 // Do not loop forever if the flush fails, meaning the command buffer
235 // reader has shutdown.
240 // Force a flush if the buffer is getting half full, or even earlier if the
241 // reader is known to be idle.
243 (put_
+ total_entry_count_
- last_put_sent_
) % total_entry_count_
;
244 int32 limit
= total_entry_count_
/
245 ((get_offset() == last_put_sent_
) ? 16 : 2);
246 if (pending
> limit
) {
248 } else if (flush_automatically_
&&
249 (commands_issued_
% kCommandsPerFlushCheck
== 0)) {
250 #if !defined(OS_ANDROID)
251 // Allow this command buffer to be pre-empted by another if a "reasonable"
252 // amount of work has been done. On highend machines, this reduces the
253 // latency of GPU commands. However, on Android, this can cause the
254 // kernel to thrash between generating GPU commands and executing them.
255 clock_t current_time
= clock();
256 if (current_time
- last_flush_time_
> kFlushDelay
* CLOCKS_PER_SEC
)
262 CommandBufferEntry
* CommandBufferHelper::GetSpace(uint32 entries
) {
263 AllocateRingBuffer();
267 DCHECK(HaveRingBuffer());
269 WaitForAvailableEntries(entries
);
270 CommandBufferEntry
* space
= &entries_
[put_
];
272 DCHECK_LE(put_
, total_entry_count_
);
273 if (put_
== total_entry_count_
) {