1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // This file contains the implementation of the command buffer helper class.
7 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
8 #include "gpu/command_buffer/common/command_buffer.h"
9 #include "gpu/command_buffer/common/trace_event.h"
14 const int kCommandsPerFlushCheck
= 100;
15 const double kFlushDelay
= 1.0 / (5.0 * 60.0);
18 CommandBufferHelper::CommandBufferHelper(CommandBuffer
* command_buffer
)
19 : command_buffer_(command_buffer
),
23 total_entry_count_(0),
30 flush_automatically_(true),
34 void CommandBufferHelper::SetAutomaticFlushes(bool enabled
) {
35 flush_automatically_
= enabled
;
38 bool CommandBufferHelper::IsContextLost() {
40 context_lost_
= error::IsError(command_buffer()->GetLastError());
45 bool CommandBufferHelper::AllocateRingBuffer() {
50 if (HaveRingBuffer()) {
55 Buffer buffer
= command_buffer_
->CreateTransferBuffer(ring_buffer_size_
, &id
);
61 ring_buffer_
= buffer
;
63 command_buffer_
->SetGetBuffer(id
);
65 // TODO(gman): Do we really need to call GetState here? We know get & put = 0
66 // Also do we need to check state.num_entries?
67 CommandBuffer::State state
= command_buffer_
->GetState();
68 entries_
= static_cast<CommandBufferEntry
*>(ring_buffer_
.ptr
);
69 int32 num_ring_buffer_entries
=
70 ring_buffer_size_
/ sizeof(CommandBufferEntry
);
71 if (num_ring_buffer_entries
> state
.num_entries
) {
76 total_entry_count_
= num_ring_buffer_entries
;
77 put_
= state
.put_offset
;
81 void CommandBufferHelper::FreeResources() {
82 if (HaveRingBuffer()) {
83 command_buffer_
->DestroyTransferBuffer(ring_buffer_id_
);
88 void CommandBufferHelper::FreeRingBuffer() {
89 GPU_CHECK((put_
== get_offset()) ||
90 error::IsError(command_buffer_
->GetLastState().error
));
94 bool CommandBufferHelper::Initialize(int32 ring_buffer_size
) {
95 ring_buffer_size_
= ring_buffer_size
;
96 return AllocateRingBuffer();
99 CommandBufferHelper::~CommandBufferHelper() {
103 bool CommandBufferHelper::FlushSync() {
107 last_flush_time_
= clock();
108 last_put_sent_
= put_
;
109 CommandBuffer::State state
= command_buffer_
->FlushSync(put_
, get_offset());
110 return state
.error
== error::kNoError
;
113 void CommandBufferHelper::Flush() {
114 if (usable() && last_put_sent_
!= put_
) {
115 last_flush_time_
= clock();
116 last_put_sent_
= put_
;
117 command_buffer_
->Flush(put_
);
121 // Calls Flush() and then waits until the buffer is empty. Break early if the
123 bool CommandBufferHelper::Finish() {
124 TRACE_EVENT0("gpu", "CommandBufferHelper::Finish");
128 // If there is no work just exit.
129 if (put_
== get_offset()) {
132 GPU_DCHECK(HaveRingBuffer());
134 // Do not loop forever if the flush fails, meaning the command buffer reader
138 } while (put_
!= get_offset());
143 // Inserts a new token into the command stream. It uses an increasing value
144 // scheme so that we don't lose tokens (a token has passed if the current token
145 // value is higher than that token). Calls Finish() if the token value wraps,
146 // which will be rare.
147 int32
CommandBufferHelper::InsertToken() {
148 AllocateRingBuffer();
152 GPU_DCHECK(HaveRingBuffer());
153 // Increment token as 31-bit integer. Negative values are used to signal an
155 token_
= (token_
+ 1) & 0x7FFFFFFF;
156 cmd::SetToken
* cmd
= GetCmdSpace
<cmd::SetToken
>();
160 TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)");
163 GPU_DCHECK_EQ(token_
, last_token_read());
169 // Waits until the current token value is greater or equal to the value passed
171 void CommandBufferHelper::WaitForToken(int32 token
) {
172 if (!usable() || !HaveRingBuffer()) {
175 // Return immediately if corresponding InsertToken failed.
178 if (token
> token_
) return; // we wrapped
179 while (last_token_read() < token
) {
180 if (get_offset() == put_
) {
181 GPU_LOG(FATAL
) << "Empty command buffer while waiting on a token.";
184 // Do not loop forever if the flush fails, meaning the command buffer reader
191 // Waits for available entries, basically waiting until get >= put + count + 1.
192 // It actually waits for contiguous entries, so it may need to wrap the buffer
193 // around, adding a noops. Thus this function may change the value of put_. The
194 // function will return early if an error occurs, in which case the available
195 // space may not be available.
196 void CommandBufferHelper::WaitForAvailableEntries(int32 count
) {
197 AllocateRingBuffer();
201 GPU_DCHECK(HaveRingBuffer());
202 GPU_DCHECK(count
< total_entry_count_
);
203 if (put_
+ count
> total_entry_count_
) {
204 // There's not enough room between the current put and the end of the
205 // buffer, so we need to wrap. We will add noops all the way to the end,
206 // but we need to make sure get wraps first, actually that get is 1 or
207 // more (since put will wrap to 0 after we add the noops).
208 GPU_DCHECK_LE(1, put_
);
209 if (get_offset() > put_
|| get_offset() == 0) {
210 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries");
211 while (get_offset() > put_
|| get_offset() == 0) {
212 // Do not loop forever if the flush fails, meaning the command buffer
213 // reader has shutdown.
218 // Insert Noops to fill out the buffer.
219 int32 num_entries
= total_entry_count_
- put_
;
220 while (num_entries
> 0) {
221 int32 num_to_skip
= std::min(CommandHeader::kMaxSize
, num_entries
);
222 cmd::Noop::Set(&entries_
[put_
], num_to_skip
);
224 num_entries
-= num_to_skip
;
228 if (AvailableEntries() < count
) {
229 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries1");
230 while (AvailableEntries() < count
) {
231 // Do not loop forever if the flush fails, meaning the command buffer
232 // reader has shutdown.
237 // Force a flush if the buffer is getting half full, or even earlier if the
238 // reader is known to be idle.
240 (put_
+ total_entry_count_
- last_put_sent_
) % total_entry_count_
;
241 int32 limit
= total_entry_count_
/
242 ((get_offset() == last_put_sent_
) ? 16 : 2);
243 if (pending
> limit
) {
245 } else if (flush_automatically_
&&
246 (commands_issued_
% kCommandsPerFlushCheck
== 0)) {
247 #if !defined(OS_ANDROID)
248 // Allow this command buffer to be pre-empted by another if a "reasonable"
249 // amount of work has been done. On highend machines, this reduces the
250 // latency of GPU commands. However, on Android, this can cause the
251 // kernel to thrash between generating GPU commands and executing them.
252 clock_t current_time
= clock();
253 if (current_time
- last_flush_time_
> kFlushDelay
* CLOCKS_PER_SEC
)
259 CommandBufferEntry
* CommandBufferHelper::GetSpace(uint32 entries
) {
260 AllocateRingBuffer();
264 GPU_DCHECK(HaveRingBuffer());
266 WaitForAvailableEntries(entries
);
267 CommandBufferEntry
* space
= &entries_
[put_
];
269 GPU_DCHECK_LE(put_
, total_entry_count_
);
270 if (put_
== total_entry_count_
) {