1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // This file contains the command buffer helper class.
7 #ifndef GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_
8 #define GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_
13 #include "gpu/command_buffer/common/cmd_buffer_common.h"
14 #include "gpu/command_buffer/common/command_buffer.h"
15 #include "gpu/command_buffer/common/constants.h"
16 #include "gpu/gpu_export.h"
20 #if !defined(OS_ANDROID)
21 #define CMD_HELPER_PERIODIC_FLUSH_CHECK
22 const int kCommandsPerFlushCheck
= 100;
23 const float kPeriodicFlushDelay
= 1.0f
/ (5.0f
* 60.0f
);
26 const int kAutoFlushSmall
= 16; // 1/16 of the buffer
27 const int kAutoFlushBig
= 2; // 1/2 of the buffer
29 // Command buffer helper class. This class simplifies ring buffer management:
30 // it will allocate the buffer, give it to the buffer interface, and let the
31 // user add commands to it, while taking care of the synchronization (put and
32 // get). It also provides a way to ensure commands have been executed, through
33 // the token mechanism:
35 // helper.AddCommand(...);
36 // helper.AddCommand(...);
37 // int32 token = helper.InsertToken();
38 // helper.AddCommand(...);
39 // helper.AddCommand(...);
42 // helper.WaitForToken(token); // this doesn't return until the first two
43 // // commands have been executed.
44 class GPU_EXPORT CommandBufferHelper
{
46 explicit CommandBufferHelper(CommandBuffer
* command_buffer
);
47 virtual ~CommandBufferHelper();
49 // Initializes the CommandBufferHelper.
51 // ring_buffer_size: The size of the ring buffer portion of the command
53 bool Initialize(int32 ring_buffer_size
);
55 // Sets whether the command buffer should automatically flush periodically
56 // to try to increase performance. Defaults to true.
57 void SetAutomaticFlushes(bool enabled
);
59 // True if the context is lost.
62 // Asynchronously flushes the commands, setting the put pointer to let the
63 // buffer interface know that new commands have been added. After a flush
64 // returns, the command buffer service is aware of all pending commands.
67 // Waits until all the commands have been executed. Returns whether it
68 // was successful. The function will fail if the command buffer service has
72 // Waits until a given number of available entries are available.
74 // count: number of entries needed. This value must be at most
75 // the size of the buffer minus one.
76 void WaitForAvailableEntries(int32 count
);
78 // Inserts a new token into the command buffer. This token either has a value
79 // different from previously inserted tokens, or ensures that previously
80 // inserted tokens with that value have already passed through the command
83 // the value of the new token or -1 if the command buffer reader has
87 // Returns true if the token has passed.
89 // the value of the token to check whether it has passed
90 bool HasTokenPassed(int32 token
) const {
92 return true; // we wrapped
93 return last_token_read() >= token
;
96 // Waits until the token of a particular value has passed through the command
97 // stream (i.e. commands inserted before that token have been executed).
98 // NOTE: This will call Flush if it needs to block.
100 // the value of the token to wait for.
101 void WaitForToken(int32 token
);
103 // Called prior to each command being issued. Waits for a certain amount of
104 // space to be available. Returns address of space.
105 void* GetSpace(int32 entries
) {
106 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
107 // Allow this command buffer to be pre-empted by another if a "reasonable"
108 // amount of work has been done. On highend machines, this reduces the
109 // latency of GPU commands. However, on Android, this can cause the
110 // kernel to thrash between generating GPU commands and executing them.
112 if (flush_automatically_
&&
113 (commands_issued_
% kCommandsPerFlushCheck
== 0)) {
114 PeriodicFlushCheck();
118 // Test for immediate entries.
119 if (entries
> immediate_entry_count_
) {
120 WaitForAvailableEntries(entries
);
121 if (entries
> immediate_entry_count_
)
125 DCHECK_LE(entries
, immediate_entry_count_
);
127 // Allocate space and advance put_.
128 CommandBufferEntry
* space
= &entries_
[put_
];
130 immediate_entry_count_
-= entries
;
132 DCHECK_LE(put_
, total_entry_count_
);
136 template <typename T
>
137 void ForceNullCheck(T
* data
) {
138 #if defined(OS_WIN) && defined(ARCH_CPU_64_BITS)
139 // 64-bit MSVC's alias analysis was determining that the command buffer
140 // entry couldn't be NULL, so it optimized out the NULL check.
141 // Dereferencing the same datatype through a volatile pointer seems to
142 // prevent that from happening. http://crbug.com/361936
144 static_cast<volatile T
*>(data
)->header
;
148 // Typed version of GetSpace. Gets enough room for the given type and returns
149 // a reference to it.
150 template <typename T
>
152 COMPILE_ASSERT(T::kArgFlags
== cmd::kFixed
, Cmd_kArgFlags_not_kFixed
);
153 int32 space_needed
= ComputeNumEntries(sizeof(T
));
154 T
* data
= static_cast<T
*>(GetSpace(space_needed
));
155 ForceNullCheck(data
);
159 // Typed version of GetSpace for immediate commands.
160 template <typename T
>
161 T
* GetImmediateCmdSpace(size_t data_space
) {
162 COMPILE_ASSERT(T::kArgFlags
== cmd::kAtLeastN
, Cmd_kArgFlags_not_kAtLeastN
);
163 int32 space_needed
= ComputeNumEntries(sizeof(T
) + data_space
);
164 T
* data
= static_cast<T
*>(GetSpace(space_needed
));
165 ForceNullCheck(data
);
169 // Typed version of GetSpace for immediate commands.
170 template <typename T
>
171 T
* GetImmediateCmdSpaceTotalSize(size_t total_space
) {
172 COMPILE_ASSERT(T::kArgFlags
== cmd::kAtLeastN
, Cmd_kArgFlags_not_kAtLeastN
);
173 int32 space_needed
= ComputeNumEntries(total_space
);
174 T
* data
= static_cast<T
*>(GetSpace(space_needed
));
175 ForceNullCheck(data
);
179 int32
last_token_read() const {
180 return command_buffer_
->GetLastToken();
183 int32
get_offset() const {
184 return command_buffer_
->GetLastState().get_offset
;
188 void Noop(uint32 skip_count
) {
189 cmd::Noop
* cmd
= GetImmediateCmdSpace
<cmd::Noop
>(
190 (skip_count
- 1) * sizeof(CommandBufferEntry
));
192 cmd
->Init(skip_count
);
196 void SetToken(uint32 token
) {
197 cmd::SetToken
* cmd
= GetCmdSpace
<cmd::SetToken
>();
203 void SetBucketSize(uint32 bucket_id
, uint32 size
) {
204 cmd::SetBucketSize
* cmd
= GetCmdSpace
<cmd::SetBucketSize
>();
206 cmd
->Init(bucket_id
, size
);
210 void SetBucketData(uint32 bucket_id
,
213 uint32 shared_memory_id
,
214 uint32 shared_memory_offset
) {
215 cmd::SetBucketData
* cmd
= GetCmdSpace
<cmd::SetBucketData
>();
221 shared_memory_offset
);
225 void SetBucketDataImmediate(
226 uint32 bucket_id
, uint32 offset
, const void* data
, uint32 size
) {
227 cmd::SetBucketDataImmediate
* cmd
=
228 GetImmediateCmdSpace
<cmd::SetBucketDataImmediate
>(size
);
230 cmd
->Init(bucket_id
, offset
, size
);
231 memcpy(ImmediateDataAddress(cmd
), data
, size
);
235 void GetBucketStart(uint32 bucket_id
,
236 uint32 result_memory_id
,
237 uint32 result_memory_offset
,
238 uint32 data_memory_size
,
239 uint32 data_memory_id
,
240 uint32 data_memory_offset
) {
241 cmd::GetBucketStart
* cmd
= GetCmdSpace
<cmd::GetBucketStart
>();
245 result_memory_offset
,
252 void GetBucketData(uint32 bucket_id
,
255 uint32 shared_memory_id
,
256 uint32 shared_memory_offset
) {
257 cmd::GetBucketData
* cmd
= GetCmdSpace
<cmd::GetBucketData
>();
263 shared_memory_offset
);
267 CommandBuffer
* command_buffer() const {
268 return command_buffer_
;
271 scoped_refptr
<Buffer
> get_ring_buffer() const { return ring_buffer_
; }
273 uint32
flush_generation() const { return flush_generation_
; }
275 void FreeRingBuffer();
277 bool HaveRingBuffer() const {
278 return ring_buffer_id_
!= -1;
281 bool usable () const {
287 CalcImmediateEntries(0);
291 // Returns the number of available entries (they may not be contiguous).
292 int32
AvailableEntries() {
293 return (get_offset() - put_
- 1 + total_entry_count_
) % total_entry_count_
;
296 void CalcImmediateEntries(int waiting_count
);
297 bool AllocateRingBuffer();
298 void FreeResources();
300 // Waits for the get offset to be in a specific range, inclusive. Returns
301 // false if there was an error.
302 bool WaitForGetOffsetInRange(int32 start
, int32 end
);
304 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
305 // Calls Flush if automatic flush conditions are met.
306 void PeriodicFlushCheck();
309 CommandBuffer
* command_buffer_
;
310 int32 ring_buffer_id_
;
311 int32 ring_buffer_size_
;
312 scoped_refptr
<gpu::Buffer
> ring_buffer_
;
313 CommandBufferEntry
* entries_
;
314 int32 total_entry_count_
; // the total number of entries
315 int32 immediate_entry_count_
;
318 int32 last_put_sent_
;
320 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
321 int commands_issued_
;
326 bool flush_automatically_
;
328 // Using C runtime instead of base because this file cannot depend on base.
329 clock_t last_flush_time_
;
331 // Incremented every time the helper flushes the command buffer.
332 // Can be used to track when prior commands have been flushed.
333 uint32 flush_generation_
;
335 friend class CommandBufferHelperTest
;
336 DISALLOW_COPY_AND_ASSIGN(CommandBufferHelper
);
341 #endif // GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_