Fix crash on app list start page keyboard navigation with <4 apps.
[chromium-blink-merge.git] / gpu / command_buffer / client / cmd_buffer_helper.h
blob3f7fba78269885dbebdb4bfb39c42c53b1cc7622
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // This file contains the command buffer helper class.
7 #ifndef GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_
8 #define GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_
10 #include <string.h>
11 #include <time.h>
13 #include "base/time/time.h"
14 #include "gpu/command_buffer/common/cmd_buffer_common.h"
15 #include "gpu/command_buffer/common/command_buffer.h"
16 #include "gpu/command_buffer/common/constants.h"
17 #include "gpu/gpu_export.h"
19 namespace gpu {
21 #if !defined(OS_ANDROID)
22 #define CMD_HELPER_PERIODIC_FLUSH_CHECK
23 const int kCommandsPerFlushCheck = 100;
24 const int kPeriodicFlushDelayInMicroseconds =
25 base::Time::kMicrosecondsPerSecond / (5 * 60);
26 #endif
28 const int kAutoFlushSmall = 16; // 1/16 of the buffer
29 const int kAutoFlushBig = 2; // 1/2 of the buffer
31 // Command buffer helper class. This class simplifies ring buffer management:
32 // it will allocate the buffer, give it to the buffer interface, and let the
33 // user add commands to it, while taking care of the synchronization (put and
34 // get). It also provides a way to ensure commands have been executed, through
35 // the token mechanism:
37 // helper.AddCommand(...);
38 // helper.AddCommand(...);
39 // int32 token = helper.InsertToken();
40 // helper.AddCommand(...);
41 // helper.AddCommand(...);
42 // [...]
44 // helper.WaitForToken(token); // this doesn't return until the first two
45 // // commands have been executed.
46 class GPU_EXPORT CommandBufferHelper {
47 public:
48 explicit CommandBufferHelper(CommandBuffer* command_buffer);
49 virtual ~CommandBufferHelper();
51 // Initializes the CommandBufferHelper.
52 // Parameters:
53 // ring_buffer_size: The size of the ring buffer portion of the command
54 // buffer.
55 bool Initialize(int32 ring_buffer_size);
57 // Sets whether the command buffer should automatically flush periodically
58 // to try to increase performance. Defaults to true.
59 void SetAutomaticFlushes(bool enabled);
61 // True if the context is lost.
62 bool IsContextLost();
64 // Asynchronously flushes the commands, setting the put pointer to let the
65 // buffer interface know that new commands have been added. After a flush
66 // returns, the command buffer service is aware of all pending commands.
67 void Flush();
69 // Ensures that commands up to the put pointer will be processed in the
70 // command buffer service before any future commands on other command buffers
71 // sharing a channel.
72 void OrderingBarrier();
74 // Waits until all the commands have been executed. Returns whether it
75 // was successful. The function will fail if the command buffer service has
76 // disconnected.
77 bool Finish();
79 // Waits until a given number of available entries are available.
80 // Parameters:
81 // count: number of entries needed. This value must be at most
82 // the size of the buffer minus one.
83 void WaitForAvailableEntries(int32 count);
85 // Inserts a new token into the command buffer. This token either has a value
86 // different from previously inserted tokens, or ensures that previously
87 // inserted tokens with that value have already passed through the command
88 // stream.
89 // Returns:
90 // the value of the new token or -1 if the command buffer reader has
91 // shutdown.
92 int32 InsertToken();
94 // Returns true if the token has passed.
95 // Parameters:
96 // the value of the token to check whether it has passed
97 bool HasTokenPassed(int32 token) const {
98 if (token > token_)
99 return true; // we wrapped
100 return last_token_read() >= token;
103 // Waits until the token of a particular value has passed through the command
104 // stream (i.e. commands inserted before that token have been executed).
105 // NOTE: This will call Flush if it needs to block.
106 // Parameters:
107 // the value of the token to wait for.
108 void WaitForToken(int32 token);
110 // Called prior to each command being issued. Waits for a certain amount of
111 // space to be available. Returns address of space.
112 void* GetSpace(int32 entries) {
113 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
114 // Allow this command buffer to be pre-empted by another if a "reasonable"
115 // amount of work has been done. On highend machines, this reduces the
116 // latency of GPU commands. However, on Android, this can cause the
117 // kernel to thrash between generating GPU commands and executing them.
118 ++commands_issued_;
119 if (flush_automatically_ &&
120 (commands_issued_ % kCommandsPerFlushCheck == 0)) {
121 PeriodicFlushCheck();
123 #endif
125 // Test for immediate entries.
126 if (entries > immediate_entry_count_) {
127 WaitForAvailableEntries(entries);
128 if (entries > immediate_entry_count_)
129 return NULL;
132 DCHECK_LE(entries, immediate_entry_count_);
134 // Allocate space and advance put_.
135 CommandBufferEntry* space = &entries_[put_];
136 put_ += entries;
137 immediate_entry_count_ -= entries;
139 DCHECK_LE(put_, total_entry_count_);
140 return space;
143 template <typename T>
144 void ForceNullCheck(T* data) {
145 #if defined(COMPILER_MSVC) && defined(ARCH_CPU_64_BITS) && !defined(__clang__)
146 // 64-bit MSVC's alias analysis was determining that the command buffer
147 // entry couldn't be NULL, so it optimized out the NULL check.
148 // Dereferencing the same datatype through a volatile pointer seems to
149 // prevent that from happening. http://crbug.com/361936
150 // TODO(jbauman): Remove once we're on VC2015, http://crbug.com/412902
151 if (data)
152 static_cast<volatile T*>(data)->header;
153 #endif
156 // Typed version of GetSpace. Gets enough room for the given type and returns
157 // a reference to it.
158 template <typename T>
159 T* GetCmdSpace() {
160 static_assert(T::kArgFlags == cmd::kFixed,
161 "T::kArgFlags should equal cmd::kFixed");
162 int32 space_needed = ComputeNumEntries(sizeof(T));
163 T* data = static_cast<T*>(GetSpace(space_needed));
164 ForceNullCheck(data);
165 return data;
168 // Typed version of GetSpace for immediate commands.
169 template <typename T>
170 T* GetImmediateCmdSpace(size_t data_space) {
171 static_assert(T::kArgFlags == cmd::kAtLeastN,
172 "T::kArgFlags should equal cmd::kAtLeastN");
173 int32 space_needed = ComputeNumEntries(sizeof(T) + data_space);
174 T* data = static_cast<T*>(GetSpace(space_needed));
175 ForceNullCheck(data);
176 return data;
179 // Typed version of GetSpace for immediate commands.
180 template <typename T>
181 T* GetImmediateCmdSpaceTotalSize(size_t total_space) {
182 static_assert(T::kArgFlags == cmd::kAtLeastN,
183 "T::kArgFlags should equal cmd::kAtLeastN");
184 int32 space_needed = ComputeNumEntries(total_space);
185 T* data = static_cast<T*>(GetSpace(space_needed));
186 ForceNullCheck(data);
187 return data;
190 int32 last_token_read() const {
191 return command_buffer_->GetLastToken();
194 int32 get_offset() const {
195 return command_buffer_->GetLastState().get_offset;
198 // Common Commands
199 void Noop(uint32 skip_count) {
200 cmd::Noop* cmd = GetImmediateCmdSpace<cmd::Noop>(
201 (skip_count - 1) * sizeof(CommandBufferEntry));
202 if (cmd) {
203 cmd->Init(skip_count);
207 void SetToken(uint32 token) {
208 cmd::SetToken* cmd = GetCmdSpace<cmd::SetToken>();
209 if (cmd) {
210 cmd->Init(token);
214 void SetBucketSize(uint32 bucket_id, uint32 size) {
215 cmd::SetBucketSize* cmd = GetCmdSpace<cmd::SetBucketSize>();
216 if (cmd) {
217 cmd->Init(bucket_id, size);
221 void SetBucketData(uint32 bucket_id,
222 uint32 offset,
223 uint32 size,
224 uint32 shared_memory_id,
225 uint32 shared_memory_offset) {
226 cmd::SetBucketData* cmd = GetCmdSpace<cmd::SetBucketData>();
227 if (cmd) {
228 cmd->Init(bucket_id,
229 offset,
230 size,
231 shared_memory_id,
232 shared_memory_offset);
236 void SetBucketDataImmediate(
237 uint32 bucket_id, uint32 offset, const void* data, uint32 size) {
238 cmd::SetBucketDataImmediate* cmd =
239 GetImmediateCmdSpace<cmd::SetBucketDataImmediate>(size);
240 if (cmd) {
241 cmd->Init(bucket_id, offset, size);
242 memcpy(ImmediateDataAddress(cmd), data, size);
246 void GetBucketStart(uint32 bucket_id,
247 uint32 result_memory_id,
248 uint32 result_memory_offset,
249 uint32 data_memory_size,
250 uint32 data_memory_id,
251 uint32 data_memory_offset) {
252 cmd::GetBucketStart* cmd = GetCmdSpace<cmd::GetBucketStart>();
253 if (cmd) {
254 cmd->Init(bucket_id,
255 result_memory_id,
256 result_memory_offset,
257 data_memory_size,
258 data_memory_id,
259 data_memory_offset);
263 void GetBucketData(uint32 bucket_id,
264 uint32 offset,
265 uint32 size,
266 uint32 shared_memory_id,
267 uint32 shared_memory_offset) {
268 cmd::GetBucketData* cmd = GetCmdSpace<cmd::GetBucketData>();
269 if (cmd) {
270 cmd->Init(bucket_id,
271 offset,
272 size,
273 shared_memory_id,
274 shared_memory_offset);
278 CommandBuffer* command_buffer() const {
279 return command_buffer_;
282 scoped_refptr<Buffer> get_ring_buffer() const { return ring_buffer_; }
284 uint32 flush_generation() const { return flush_generation_; }
286 void FreeRingBuffer();
288 bool HaveRingBuffer() const {
289 return ring_buffer_id_ != -1;
292 bool usable () const {
293 return usable_;
296 void ClearUsable() {
297 usable_ = false;
298 CalcImmediateEntries(0);
301 private:
302 // Returns the number of available entries (they may not be contiguous).
303 int32 AvailableEntries() {
304 return (get_offset() - put_ - 1 + total_entry_count_) % total_entry_count_;
307 void CalcImmediateEntries(int waiting_count);
308 bool AllocateRingBuffer();
309 void FreeResources();
311 // Waits for the get offset to be in a specific range, inclusive. Returns
312 // false if there was an error.
313 bool WaitForGetOffsetInRange(int32 start, int32 end);
315 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
316 // Calls Flush if automatic flush conditions are met.
317 void PeriodicFlushCheck();
318 #endif
320 CommandBuffer* command_buffer_;
321 int32 ring_buffer_id_;
322 int32 ring_buffer_size_;
323 scoped_refptr<gpu::Buffer> ring_buffer_;
324 CommandBufferEntry* entries_;
325 int32 total_entry_count_; // the total number of entries
326 int32 immediate_entry_count_;
327 int32 token_;
328 int32 put_;
329 int32 last_put_sent_;
330 int32 last_barrier_put_sent_;
332 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
333 int commands_issued_;
334 #endif
336 bool usable_;
337 bool context_lost_;
338 bool flush_automatically_;
340 base::TimeTicks last_flush_time_;
342 // Incremented every time the helper flushes the command buffer.
343 // Can be used to track when prior commands have been flushed.
344 uint32 flush_generation_;
346 friend class CommandBufferHelperTest;
347 DISALLOW_COPY_AND_ASSIGN(CommandBufferHelper);
350 } // namespace gpu
352 #endif // GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_