cc: Added inline to Tile::IsReadyToDraw
[chromium-blink-merge.git] / gpu / command_buffer / client / cmd_buffer_helper.cc
blobb83934ff5a79a3d9514b4358f1875a6168ddaebb
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // This file contains the implementation of the command buffer helper class.
7 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
8 #include "gpu/command_buffer/common/command_buffer.h"
9 #include "gpu/command_buffer/common/trace_event.h"
11 namespace gpu {
13 namespace {
14 const int kCommandsPerFlushCheck = 100;
15 const double kFlushDelay = 1.0 / (5.0 * 60.0);
18 CommandBufferHelper::CommandBufferHelper(CommandBuffer* command_buffer)
19 : command_buffer_(command_buffer),
20 ring_buffer_id_(-1),
21 ring_buffer_size_(0),
22 entries_(NULL),
23 total_entry_count_(0),
24 token_(0),
25 put_(0),
26 last_put_sent_(0),
27 commands_issued_(0),
28 usable_(true),
29 context_lost_(false),
30 flush_automatically_(true),
31 last_flush_time_(0) {
34 void CommandBufferHelper::SetAutomaticFlushes(bool enabled) {
35 flush_automatically_ = enabled;
38 bool CommandBufferHelper::IsContextLost() {
39 if (!context_lost_) {
40 context_lost_ = error::IsError(command_buffer()->GetLastError());
42 return context_lost_;
45 bool CommandBufferHelper::AllocateRingBuffer() {
46 if (!usable()) {
47 return false;
50 if (HaveRingBuffer()) {
51 return true;
54 int32 id = -1;
55 Buffer buffer = command_buffer_->CreateTransferBuffer(ring_buffer_size_, &id);
56 if (id < 0) {
57 ClearUsable();
58 return false;
61 ring_buffer_ = buffer;
62 ring_buffer_id_ = id;
63 command_buffer_->SetGetBuffer(id);
65 // TODO(gman): Do we really need to call GetState here? We know get & put = 0
66 // Also do we need to check state.num_entries?
67 CommandBuffer::State state = command_buffer_->GetState();
68 entries_ = static_cast<CommandBufferEntry*>(ring_buffer_.ptr);
69 int32 num_ring_buffer_entries =
70 ring_buffer_size_ / sizeof(CommandBufferEntry);
71 if (num_ring_buffer_entries > state.num_entries) {
72 ClearUsable();
73 return false;
76 total_entry_count_ = num_ring_buffer_entries;
77 put_ = state.put_offset;
78 return true;
81 void CommandBufferHelper::FreeResources() {
82 if (HaveRingBuffer()) {
83 command_buffer_->DestroyTransferBuffer(ring_buffer_id_);
84 ring_buffer_id_ = -1;
88 void CommandBufferHelper::FreeRingBuffer() {
89 GPU_CHECK((put_ == get_offset()) ||
90 error::IsError(command_buffer_->GetLastState().error));
91 FreeResources();
94 bool CommandBufferHelper::Initialize(int32 ring_buffer_size) {
95 ring_buffer_size_ = ring_buffer_size;
96 return AllocateRingBuffer();
99 CommandBufferHelper::~CommandBufferHelper() {
100 FreeResources();
103 bool CommandBufferHelper::FlushSync() {
104 if (!usable()) {
105 return false;
107 last_flush_time_ = clock();
108 last_put_sent_ = put_;
109 CommandBuffer::State state = command_buffer_->FlushSync(put_, get_offset());
110 return state.error == error::kNoError;
113 void CommandBufferHelper::Flush() {
114 if (usable() && last_put_sent_ != put_) {
115 last_flush_time_ = clock();
116 last_put_sent_ = put_;
117 command_buffer_->Flush(put_);
121 // Calls Flush() and then waits until the buffer is empty. Break early if the
122 // error is set.
123 bool CommandBufferHelper::Finish() {
124 TRACE_EVENT0("gpu", "CommandBufferHelper::Finish");
125 if (!usable()) {
126 return false;
128 // If there is no work just exit.
129 if (put_ == get_offset()) {
130 return true;
132 GPU_DCHECK(HaveRingBuffer());
133 do {
134 // Do not loop forever if the flush fails, meaning the command buffer reader
135 // has shutdown.
136 if (!FlushSync())
137 return false;
138 } while (put_ != get_offset());
140 return true;
143 // Inserts a new token into the command stream. It uses an increasing value
144 // scheme so that we don't lose tokens (a token has passed if the current token
145 // value is higher than that token). Calls Finish() if the token value wraps,
146 // which will be rare.
147 int32 CommandBufferHelper::InsertToken() {
148 AllocateRingBuffer();
149 if (!usable()) {
150 return token_;
152 GPU_DCHECK(HaveRingBuffer());
153 // Increment token as 31-bit integer. Negative values are used to signal an
154 // error.
155 token_ = (token_ + 1) & 0x7FFFFFFF;
156 cmd::SetToken* cmd = GetCmdSpace<cmd::SetToken>();
157 if (cmd) {
158 cmd->Init(token_);
159 if (token_ == 0) {
160 TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)");
161 // we wrapped
162 Finish();
163 GPU_DCHECK_EQ(token_, last_token_read());
166 return token_;
169 // Waits until the current token value is greater or equal to the value passed
170 // in argument.
171 void CommandBufferHelper::WaitForToken(int32 token) {
172 if (!usable() || !HaveRingBuffer()) {
173 return;
175 // Return immediately if corresponding InsertToken failed.
176 if (token < 0)
177 return;
178 if (token > token_) return; // we wrapped
179 while (last_token_read() < token) {
180 if (get_offset() == put_) {
181 GPU_LOG(FATAL) << "Empty command buffer while waiting on a token.";
182 return;
184 // Do not loop forever if the flush fails, meaning the command buffer reader
185 // has shutdown.
186 if (!FlushSync())
187 return;
191 // Waits for available entries, basically waiting until get >= put + count + 1.
192 // It actually waits for contiguous entries, so it may need to wrap the buffer
193 // around, adding a noops. Thus this function may change the value of put_. The
194 // function will return early if an error occurs, in which case the available
195 // space may not be available.
196 void CommandBufferHelper::WaitForAvailableEntries(int32 count) {
197 AllocateRingBuffer();
198 if (!usable()) {
199 return;
201 GPU_DCHECK(HaveRingBuffer());
202 GPU_DCHECK(count < total_entry_count_);
203 if (put_ + count > total_entry_count_) {
204 // There's not enough room between the current put and the end of the
205 // buffer, so we need to wrap. We will add noops all the way to the end,
206 // but we need to make sure get wraps first, actually that get is 1 or
207 // more (since put will wrap to 0 after we add the noops).
208 GPU_DCHECK_LE(1, put_);
209 if (get_offset() > put_ || get_offset() == 0) {
210 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries");
211 while (get_offset() > put_ || get_offset() == 0) {
212 // Do not loop forever if the flush fails, meaning the command buffer
213 // reader has shutdown.
214 if (!FlushSync())
215 return;
218 // Insert Noops to fill out the buffer.
219 int32 num_entries = total_entry_count_ - put_;
220 while (num_entries > 0) {
221 int32 num_to_skip = std::min(CommandHeader::kMaxSize, num_entries);
222 cmd::Noop::Set(&entries_[put_], num_to_skip);
223 put_ += num_to_skip;
224 num_entries -= num_to_skip;
226 put_ = 0;
228 if (AvailableEntries() < count) {
229 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries1");
230 while (AvailableEntries() < count) {
231 // Do not loop forever if the flush fails, meaning the command buffer
232 // reader has shutdown.
233 if (!FlushSync())
234 return;
237 // Force a flush if the buffer is getting half full, or even earlier if the
238 // reader is known to be idle.
239 int32 pending =
240 (put_ + total_entry_count_ - last_put_sent_) % total_entry_count_;
241 int32 limit = total_entry_count_ /
242 ((get_offset() == last_put_sent_) ? 16 : 2);
243 if (pending > limit) {
244 Flush();
245 } else if (flush_automatically_ &&
246 (commands_issued_ % kCommandsPerFlushCheck == 0)) {
247 #if !defined(OS_ANDROID)
248 // Allow this command buffer to be pre-empted by another if a "reasonable"
249 // amount of work has been done. On highend machines, this reduces the
250 // latency of GPU commands. However, on Android, this can cause the
251 // kernel to thrash between generating GPU commands and executing them.
252 clock_t current_time = clock();
253 if (current_time - last_flush_time_ > kFlushDelay * CLOCKS_PER_SEC)
254 Flush();
255 #endif
259 CommandBufferEntry* CommandBufferHelper::GetSpace(uint32 entries) {
260 AllocateRingBuffer();
261 if (!usable()) {
262 return NULL;
264 GPU_DCHECK(HaveRingBuffer());
265 ++commands_issued_;
266 WaitForAvailableEntries(entries);
267 CommandBufferEntry* space = &entries_[put_];
268 put_ += entries;
269 GPU_DCHECK_LE(put_, total_entry_count_);
270 if (put_ == total_entry_count_) {
271 put_ = 0;
273 return space;
276 } // namespace gpu