1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // This file contains the command buffer helper class.
7 #ifndef GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_
8 #define GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_
12 #include "base/basictypes.h"
13 #include "base/logging.h"
14 #include "base/macros.h"
15 #include "base/memory/ref_counted.h"
16 #include "base/time/time.h"
17 #include "build/build_config.h"
18 #include "gpu/command_buffer/common/cmd_buffer_common.h"
19 #include "gpu/command_buffer/common/command_buffer.h"
20 #include "gpu/gpu_export.h"
26 #if !defined(OS_ANDROID)
27 #define CMD_HELPER_PERIODIC_FLUSH_CHECK
28 const int kCommandsPerFlushCheck
= 100;
29 const int kPeriodicFlushDelayInMicroseconds
=
30 base::Time::kMicrosecondsPerSecond
/ (5 * 60);
33 const int kAutoFlushSmall
= 16; // 1/16 of the buffer
34 const int kAutoFlushBig
= 2; // 1/2 of the buffer
36 // Command buffer helper class. This class simplifies ring buffer management:
37 // it will allocate the buffer, give it to the buffer interface, and let the
38 // user add commands to it, while taking care of the synchronization (put and
39 // get). It also provides a way to ensure commands have been executed, through
40 // the token mechanism:
42 // helper.AddCommand(...);
43 // helper.AddCommand(...);
44 // int32 token = helper.InsertToken();
45 // helper.AddCommand(...);
46 // helper.AddCommand(...);
49 // helper.WaitForToken(token); // this doesn't return until the first two
50 // // commands have been executed.
51 class GPU_EXPORT CommandBufferHelper
{
53 explicit CommandBufferHelper(CommandBuffer
* command_buffer
);
54 virtual ~CommandBufferHelper();
56 // Initializes the CommandBufferHelper.
58 // ring_buffer_size: The size of the ring buffer portion of the command
60 bool Initialize(int32 ring_buffer_size
);
62 // Sets whether the command buffer should automatically flush periodically
63 // to try to increase performance. Defaults to true.
64 void SetAutomaticFlushes(bool enabled
);
66 // True if the context is lost.
69 // Asynchronously flushes the commands, setting the put pointer to let the
70 // buffer interface know that new commands have been added. After a flush
71 // returns, the command buffer service is aware of all pending commands.
74 // Ensures that commands up to the put pointer will be processed in the
75 // command buffer service before any future commands on other command buffers
77 void OrderingBarrier();
79 // Waits until all the commands have been executed. Returns whether it
80 // was successful. The function will fail if the command buffer service has
84 // Waits until a given number of available entries are available.
86 // count: number of entries needed. This value must be at most
87 // the size of the buffer minus one.
88 void WaitForAvailableEntries(int32 count
);
90 // Inserts a new token into the command buffer. This token either has a value
91 // different from previously inserted tokens, or ensures that previously
92 // inserted tokens with that value have already passed through the command
95 // the value of the new token or -1 if the command buffer reader has
99 // Returns true if the token has passed.
101 // the value of the token to check whether it has passed
102 bool HasTokenPassed(int32 token
) const {
104 return true; // we wrapped
105 return last_token_read() >= token
;
108 // Waits until the token of a particular value has passed through the command
109 // stream (i.e. commands inserted before that token have been executed).
110 // NOTE: This will call Flush if it needs to block.
112 // the value of the token to wait for.
113 void WaitForToken(int32 token
);
115 // Called prior to each command being issued. Waits for a certain amount of
116 // space to be available. Returns address of space.
117 void* GetSpace(int32 entries
) {
118 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
119 // Allow this command buffer to be pre-empted by another if a "reasonable"
120 // amount of work has been done. On highend machines, this reduces the
121 // latency of GPU commands. However, on Android, this can cause the
122 // kernel to thrash between generating GPU commands and executing them.
124 if (flush_automatically_
&&
125 (commands_issued_
% kCommandsPerFlushCheck
== 0)) {
126 PeriodicFlushCheck();
130 // Test for immediate entries.
131 if (entries
> immediate_entry_count_
) {
132 WaitForAvailableEntries(entries
);
133 if (entries
> immediate_entry_count_
)
137 DCHECK_LE(entries
, immediate_entry_count_
);
139 // Allocate space and advance put_.
140 CommandBufferEntry
* space
= &entries_
[put_
];
142 immediate_entry_count_
-= entries
;
144 DCHECK_LE(put_
, total_entry_count_
);
148 template <typename T
>
149 void ForceNullCheck(T
* data
) {
150 #if defined(COMPILER_MSVC) && defined(ARCH_CPU_64_BITS) && !defined(__clang__)
151 // 64-bit MSVC's alias analysis was determining that the command buffer
152 // entry couldn't be NULL, so it optimized out the NULL check.
153 // Dereferencing the same datatype through a volatile pointer seems to
154 // prevent that from happening. http://crbug.com/361936
155 // TODO(jbauman): Remove once we're on VC2015, http://crbug.com/412902
157 static_cast<volatile T
*>(data
)->header
;
161 // Typed version of GetSpace. Gets enough room for the given type and returns
162 // a reference to it.
163 template <typename T
>
165 static_assert(T::kArgFlags
== cmd::kFixed
,
166 "T::kArgFlags should equal cmd::kFixed");
167 int32 space_needed
= ComputeNumEntries(sizeof(T
));
168 T
* data
= static_cast<T
*>(GetSpace(space_needed
));
169 ForceNullCheck(data
);
173 // Typed version of GetSpace for immediate commands.
174 template <typename T
>
175 T
* GetImmediateCmdSpace(size_t data_space
) {
176 static_assert(T::kArgFlags
== cmd::kAtLeastN
,
177 "T::kArgFlags should equal cmd::kAtLeastN");
178 int32 space_needed
= ComputeNumEntries(sizeof(T
) + data_space
);
179 T
* data
= static_cast<T
*>(GetSpace(space_needed
));
180 ForceNullCheck(data
);
184 // Typed version of GetSpace for immediate commands.
185 template <typename T
>
186 T
* GetImmediateCmdSpaceTotalSize(size_t total_space
) {
187 static_assert(T::kArgFlags
== cmd::kAtLeastN
,
188 "T::kArgFlags should equal cmd::kAtLeastN");
189 int32 space_needed
= ComputeNumEntries(total_space
);
190 T
* data
= static_cast<T
*>(GetSpace(space_needed
));
191 ForceNullCheck(data
);
195 int32
last_token_read() const {
196 return command_buffer_
->GetLastToken();
199 int32
get_offset() const {
200 return command_buffer_
->GetLastState().get_offset
;
204 void Noop(uint32 skip_count
) {
205 cmd::Noop
* cmd
= GetImmediateCmdSpace
<cmd::Noop
>(
206 (skip_count
- 1) * sizeof(CommandBufferEntry
));
208 cmd
->Init(skip_count
);
212 void SetToken(uint32 token
) {
213 cmd::SetToken
* cmd
= GetCmdSpace
<cmd::SetToken
>();
219 void SetBucketSize(uint32 bucket_id
, uint32 size
) {
220 cmd::SetBucketSize
* cmd
= GetCmdSpace
<cmd::SetBucketSize
>();
222 cmd
->Init(bucket_id
, size
);
226 void SetBucketData(uint32 bucket_id
,
229 uint32 shared_memory_id
,
230 uint32 shared_memory_offset
) {
231 cmd::SetBucketData
* cmd
= GetCmdSpace
<cmd::SetBucketData
>();
237 shared_memory_offset
);
241 void SetBucketDataImmediate(
242 uint32 bucket_id
, uint32 offset
, const void* data
, uint32 size
) {
243 cmd::SetBucketDataImmediate
* cmd
=
244 GetImmediateCmdSpace
<cmd::SetBucketDataImmediate
>(size
);
246 cmd
->Init(bucket_id
, offset
, size
);
247 memcpy(ImmediateDataAddress(cmd
), data
, size
);
251 void GetBucketStart(uint32 bucket_id
,
252 uint32 result_memory_id
,
253 uint32 result_memory_offset
,
254 uint32 data_memory_size
,
255 uint32 data_memory_id
,
256 uint32 data_memory_offset
) {
257 cmd::GetBucketStart
* cmd
= GetCmdSpace
<cmd::GetBucketStart
>();
261 result_memory_offset
,
268 void GetBucketData(uint32 bucket_id
,
271 uint32 shared_memory_id
,
272 uint32 shared_memory_offset
) {
273 cmd::GetBucketData
* cmd
= GetCmdSpace
<cmd::GetBucketData
>();
279 shared_memory_offset
);
283 CommandBuffer
* command_buffer() const {
284 return command_buffer_
;
287 scoped_refptr
<Buffer
> get_ring_buffer() const { return ring_buffer_
; }
289 uint32
flush_generation() const { return flush_generation_
; }
291 void FreeRingBuffer();
293 bool HaveRingBuffer() const {
294 return ring_buffer_id_
!= -1;
297 bool usable () const {
303 context_lost_
= true;
304 CalcImmediateEntries(0);
308 // Returns the number of available entries (they may not be contiguous).
309 int32
AvailableEntries() {
310 return (get_offset() - put_
- 1 + total_entry_count_
) % total_entry_count_
;
313 void CalcImmediateEntries(int waiting_count
);
314 bool AllocateRingBuffer();
315 void FreeResources();
317 // Waits for the get offset to be in a specific range, inclusive. Returns
318 // false if there was an error.
319 bool WaitForGetOffsetInRange(int32 start
, int32 end
);
321 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
322 // Calls Flush if automatic flush conditions are met.
323 void PeriodicFlushCheck();
326 CommandBuffer
* command_buffer_
;
327 int32 ring_buffer_id_
;
328 int32 ring_buffer_size_
;
329 scoped_refptr
<gpu::Buffer
> ring_buffer_
;
330 CommandBufferEntry
* entries_
;
331 int32 total_entry_count_
; // the total number of entries
332 int32 immediate_entry_count_
;
335 int32 last_put_sent_
;
336 int32 last_barrier_put_sent_
;
338 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
339 int commands_issued_
;
344 bool flush_automatically_
;
346 base::TimeTicks last_flush_time_
;
348 // Incremented every time the helper flushes the command buffer.
349 // Can be used to track when prior commands have been flushed.
350 uint32 flush_generation_
;
352 friend class CommandBufferHelperTest
;
353 DISALLOW_COPY_AND_ASSIGN(CommandBufferHelper
);
358 #endif // GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_