1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // This file contains the command buffer helper class.
7 #ifndef GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_
8 #define GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_
13 #include "base/time/time.h"
14 #include "gpu/command_buffer/common/cmd_buffer_common.h"
15 #include "gpu/command_buffer/common/command_buffer.h"
16 #include "gpu/command_buffer/common/constants.h"
17 #include "gpu/gpu_export.h"
21 #if !defined(OS_ANDROID)
22 #define CMD_HELPER_PERIODIC_FLUSH_CHECK
23 const int kCommandsPerFlushCheck
= 100;
24 const int kPeriodicFlushDelayInMicroseconds
=
25 base::Time::kMicrosecondsPerSecond
/ (5 * 60);
28 const int kAutoFlushSmall
= 16; // 1/16 of the buffer
29 const int kAutoFlushBig
= 2; // 1/2 of the buffer
31 // Command buffer helper class. This class simplifies ring buffer management:
32 // it will allocate the buffer, give it to the buffer interface, and let the
33 // user add commands to it, while taking care of the synchronization (put and
34 // get). It also provides a way to ensure commands have been executed, through
35 // the token mechanism:
37 // helper.AddCommand(...);
38 // helper.AddCommand(...);
39 // int32 token = helper.InsertToken();
40 // helper.AddCommand(...);
41 // helper.AddCommand(...);
44 // helper.WaitForToken(token); // this doesn't return until the first two
45 // // commands have been executed.
46 class GPU_EXPORT CommandBufferHelper
{
48 explicit CommandBufferHelper(CommandBuffer
* command_buffer
);
49 virtual ~CommandBufferHelper();
51 // Initializes the CommandBufferHelper.
53 // ring_buffer_size: The size of the ring buffer portion of the command
55 bool Initialize(int32 ring_buffer_size
);
57 // Sets whether the command buffer should automatically flush periodically
58 // to try to increase performance. Defaults to true.
59 void SetAutomaticFlushes(bool enabled
);
61 // True if the context is lost.
64 // Asynchronously flushes the commands, setting the put pointer to let the
65 // buffer interface know that new commands have been added. After a flush
66 // returns, the command buffer service is aware of all pending commands.
69 // Waits until all the commands have been executed. Returns whether it
70 // was successful. The function will fail if the command buffer service has
74 // Waits until a given number of available entries are available.
76 // count: number of entries needed. This value must be at most
77 // the size of the buffer minus one.
78 void WaitForAvailableEntries(int32 count
);
80 // Inserts a new token into the command buffer. This token either has a value
81 // different from previously inserted tokens, or ensures that previously
82 // inserted tokens with that value have already passed through the command
85 // the value of the new token or -1 if the command buffer reader has
89 // Returns true if the token has passed.
91 // the value of the token to check whether it has passed
92 bool HasTokenPassed(int32 token
) const {
94 return true; // we wrapped
95 return last_token_read() >= token
;
98 // Waits until the token of a particular value has passed through the command
99 // stream (i.e. commands inserted before that token have been executed).
100 // NOTE: This will call Flush if it needs to block.
102 // the value of the token to wait for.
103 void WaitForToken(int32 token
);
105 // Called prior to each command being issued. Waits for a certain amount of
106 // space to be available. Returns address of space.
107 void* GetSpace(int32 entries
) {
108 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
109 // Allow this command buffer to be pre-empted by another if a "reasonable"
110 // amount of work has been done. On highend machines, this reduces the
111 // latency of GPU commands. However, on Android, this can cause the
112 // kernel to thrash between generating GPU commands and executing them.
114 if (flush_automatically_
&&
115 (commands_issued_
% kCommandsPerFlushCheck
== 0)) {
116 PeriodicFlushCheck();
120 // Test for immediate entries.
121 if (entries
> immediate_entry_count_
) {
122 WaitForAvailableEntries(entries
);
123 if (entries
> immediate_entry_count_
)
127 DCHECK_LE(entries
, immediate_entry_count_
);
129 // Allocate space and advance put_.
130 CommandBufferEntry
* space
= &entries_
[put_
];
132 immediate_entry_count_
-= entries
;
134 DCHECK_LE(put_
, total_entry_count_
);
138 template <typename T
>
139 void ForceNullCheck(T
* data
) {
140 #if defined(OS_WIN) && defined(ARCH_CPU_64_BITS)
141 // 64-bit MSVC's alias analysis was determining that the command buffer
142 // entry couldn't be NULL, so it optimized out the NULL check.
143 // Dereferencing the same datatype through a volatile pointer seems to
144 // prevent that from happening. http://crbug.com/361936
146 static_cast<volatile T
*>(data
)->header
;
150 // Typed version of GetSpace. Gets enough room for the given type and returns
151 // a reference to it.
152 template <typename T
>
154 COMPILE_ASSERT(T::kArgFlags
== cmd::kFixed
, Cmd_kArgFlags_not_kFixed
);
155 int32 space_needed
= ComputeNumEntries(sizeof(T
));
156 T
* data
= static_cast<T
*>(GetSpace(space_needed
));
157 ForceNullCheck(data
);
161 // Typed version of GetSpace for immediate commands.
162 template <typename T
>
163 T
* GetImmediateCmdSpace(size_t data_space
) {
164 COMPILE_ASSERT(T::kArgFlags
== cmd::kAtLeastN
, Cmd_kArgFlags_not_kAtLeastN
);
165 int32 space_needed
= ComputeNumEntries(sizeof(T
) + data_space
);
166 T
* data
= static_cast<T
*>(GetSpace(space_needed
));
167 ForceNullCheck(data
);
171 // Typed version of GetSpace for immediate commands.
172 template <typename T
>
173 T
* GetImmediateCmdSpaceTotalSize(size_t total_space
) {
174 COMPILE_ASSERT(T::kArgFlags
== cmd::kAtLeastN
, Cmd_kArgFlags_not_kAtLeastN
);
175 int32 space_needed
= ComputeNumEntries(total_space
);
176 T
* data
= static_cast<T
*>(GetSpace(space_needed
));
177 ForceNullCheck(data
);
181 int32
last_token_read() const {
182 return command_buffer_
->GetLastToken();
185 int32
get_offset() const {
186 return command_buffer_
->GetLastState().get_offset
;
190 void Noop(uint32 skip_count
) {
191 cmd::Noop
* cmd
= GetImmediateCmdSpace
<cmd::Noop
>(
192 (skip_count
- 1) * sizeof(CommandBufferEntry
));
194 cmd
->Init(skip_count
);
198 void SetToken(uint32 token
) {
199 cmd::SetToken
* cmd
= GetCmdSpace
<cmd::SetToken
>();
205 void SetBucketSize(uint32 bucket_id
, uint32 size
) {
206 cmd::SetBucketSize
* cmd
= GetCmdSpace
<cmd::SetBucketSize
>();
208 cmd
->Init(bucket_id
, size
);
212 void SetBucketData(uint32 bucket_id
,
215 uint32 shared_memory_id
,
216 uint32 shared_memory_offset
) {
217 cmd::SetBucketData
* cmd
= GetCmdSpace
<cmd::SetBucketData
>();
223 shared_memory_offset
);
227 void SetBucketDataImmediate(
228 uint32 bucket_id
, uint32 offset
, const void* data
, uint32 size
) {
229 cmd::SetBucketDataImmediate
* cmd
=
230 GetImmediateCmdSpace
<cmd::SetBucketDataImmediate
>(size
);
232 cmd
->Init(bucket_id
, offset
, size
);
233 memcpy(ImmediateDataAddress(cmd
), data
, size
);
237 void GetBucketStart(uint32 bucket_id
,
238 uint32 result_memory_id
,
239 uint32 result_memory_offset
,
240 uint32 data_memory_size
,
241 uint32 data_memory_id
,
242 uint32 data_memory_offset
) {
243 cmd::GetBucketStart
* cmd
= GetCmdSpace
<cmd::GetBucketStart
>();
247 result_memory_offset
,
254 void GetBucketData(uint32 bucket_id
,
257 uint32 shared_memory_id
,
258 uint32 shared_memory_offset
) {
259 cmd::GetBucketData
* cmd
= GetCmdSpace
<cmd::GetBucketData
>();
265 shared_memory_offset
);
269 CommandBuffer
* command_buffer() const {
270 return command_buffer_
;
273 scoped_refptr
<Buffer
> get_ring_buffer() const { return ring_buffer_
; }
275 uint32
flush_generation() const { return flush_generation_
; }
277 void FreeRingBuffer();
279 bool HaveRingBuffer() const {
280 return ring_buffer_id_
!= -1;
283 bool usable () const {
289 CalcImmediateEntries(0);
293 // Returns the number of available entries (they may not be contiguous).
294 int32
AvailableEntries() {
295 return (get_offset() - put_
- 1 + total_entry_count_
) % total_entry_count_
;
298 void CalcImmediateEntries(int waiting_count
);
299 bool AllocateRingBuffer();
300 void FreeResources();
302 // Waits for the get offset to be in a specific range, inclusive. Returns
303 // false if there was an error.
304 bool WaitForGetOffsetInRange(int32 start
, int32 end
);
306 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
307 // Calls Flush if automatic flush conditions are met.
308 void PeriodicFlushCheck();
311 CommandBuffer
* command_buffer_
;
312 int32 ring_buffer_id_
;
313 int32 ring_buffer_size_
;
314 scoped_refptr
<gpu::Buffer
> ring_buffer_
;
315 CommandBufferEntry
* entries_
;
316 int32 total_entry_count_
; // the total number of entries
317 int32 immediate_entry_count_
;
320 int32 last_put_sent_
;
322 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
323 int commands_issued_
;
328 bool flush_automatically_
;
330 base::TimeTicks last_flush_time_
;
332 // Incremented every time the helper flushes the command buffer.
333 // Can be used to track when prior commands have been flushed.
334 uint32 flush_generation_
;
336 friend class CommandBufferHelperTest
;
337 DISALLOW_COPY_AND_ASSIGN(CommandBufferHelper
);
342 #endif // GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_