1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // This file contains the command buffer helper class.
7 #ifndef GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_
8 #define GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_
13 #include "gpu/command_buffer/common/cmd_buffer_common.h"
14 #include "gpu/command_buffer/common/command_buffer.h"
15 #include "gpu/command_buffer/common/constants.h"
16 #include "gpu/command_buffer/common/logging.h"
17 #include "gpu/gpu_export.h"
21 // Command buffer helper class. This class simplifies ring buffer management:
22 // it will allocate the buffer, give it to the buffer interface, and let the
23 // user add commands to it, while taking care of the synchronization (put and
24 // get). It also provides a way to ensure commands have been executed, through
25 // the token mechanism:
27 // helper.AddCommand(...);
28 // helper.AddCommand(...);
29 // int32 token = helper.InsertToken();
30 // helper.AddCommand(...);
31 // helper.AddCommand(...);
34 // helper.WaitForToken(token); // this doesn't return until the first two
35 // // commands have been executed.
36 class GPU_EXPORT CommandBufferHelper
{
38 explicit CommandBufferHelper(CommandBuffer
* command_buffer
);
39 virtual ~CommandBufferHelper();
41 // Initializes the CommandBufferHelper.
43 // ring_buffer_size: The size of the ring buffer portion of the command
45 bool Initialize(int32 ring_buffer_size
);
47 // Sets whether the command buffer should automatically flush periodically
48 // to try to increase performance. Defaults to true.
49 void SetAutomaticFlushes(bool enabled
);
51 // True if the context is lost.
54 // Asynchronously flushes the commands, setting the put pointer to let the
55 // buffer interface know that new commands have been added. After a flush
56 // returns, the command buffer service is aware of all pending commands.
59 // Flushes the commands, setting the put pointer to let the buffer interface
60 // know that new commands have been added. After a flush returns, the command
61 // buffer service is aware of all pending commands and it is guaranteed to
62 // have made some progress in processing them. Returns whether the flush was
63 // successful. The flush will fail if the command buffer service has
67 // Waits until all the commands have been executed. Returns whether it
68 // was successful. The function will fail if the command buffer service has
72 // Waits until a given number of available entries are available.
74 // count: number of entries needed. This value must be at most
75 // the size of the buffer minus one.
76 void WaitForAvailableEntries(int32 count
);
78 // Inserts a new token into the command buffer. This token either has a value
79 // different from previously inserted tokens, or ensures that previously
80 // inserted tokens with that value have already passed through the command
83 // the value of the new token or -1 if the command buffer reader has
87 // Waits until the token of a particular value has passed through the command
88 // stream (i.e. commands inserted before that token have been executed).
89 // NOTE: This will call Flush if it needs to block.
91 // the value of the token to wait for.
92 void WaitForToken(int32 token
);
94 // Called prior to each command being issued. Waits for a certain amount of
95 // space to be available. Returns address of space.
96 CommandBufferEntry
* GetSpace(uint32 entries
);
98 // Typed version of GetSpace. Gets enough room for the given type and returns
100 template <typename T
>
102 COMPILE_ASSERT(T::kArgFlags
== cmd::kFixed
, Cmd_kArgFlags_not_kFixed
);
103 uint32 space_needed
= ComputeNumEntries(sizeof(T
));
104 void* data
= GetSpace(space_needed
);
105 return reinterpret_cast<T
*>(data
);
108 // Typed version of GetSpace for immediate commands.
109 template <typename T
>
110 T
* GetImmediateCmdSpace(size_t data_space
) {
111 COMPILE_ASSERT(T::kArgFlags
== cmd::kAtLeastN
, Cmd_kArgFlags_not_kAtLeastN
);
112 uint32 space_needed
= ComputeNumEntries(sizeof(T
) + data_space
);
113 void* data
= GetSpace(space_needed
);
114 return reinterpret_cast<T
*>(data
);
117 // Typed version of GetSpace for immediate commands.
118 template <typename T
>
119 T
* GetImmediateCmdSpaceTotalSize(size_t total_space
) {
120 COMPILE_ASSERT(T::kArgFlags
== cmd::kAtLeastN
, Cmd_kArgFlags_not_kAtLeastN
);
121 uint32 space_needed
= ComputeNumEntries(total_space
);
122 void* data
= GetSpace(space_needed
);
123 return reinterpret_cast<T
*>(data
);
126 int32
last_token_read() const {
127 return command_buffer_
->GetLastToken();
130 int32
get_offset() const {
131 return command_buffer_
->GetLastState().get_offset
;
135 void Noop(uint32 skip_count
) {
136 cmd::Noop
* cmd
= GetImmediateCmdSpace
<cmd::Noop
>(
137 (skip_count
- 1) * sizeof(CommandBufferEntry
));
139 cmd
->Init(skip_count
);
143 void SetToken(uint32 token
) {
144 cmd::SetToken
* cmd
= GetCmdSpace
<cmd::SetToken
>();
150 void SetBucketSize(uint32 bucket_id
, uint32 size
) {
151 cmd::SetBucketSize
* cmd
= GetCmdSpace
<cmd::SetBucketSize
>();
153 cmd
->Init(bucket_id
, size
);
157 void SetBucketData(uint32 bucket_id
,
160 uint32 shared_memory_id
,
161 uint32 shared_memory_offset
) {
162 cmd::SetBucketData
* cmd
= GetCmdSpace
<cmd::SetBucketData
>();
168 shared_memory_offset
);
172 void SetBucketDataImmediate(
173 uint32 bucket_id
, uint32 offset
, const void* data
, uint32 size
) {
174 cmd::SetBucketDataImmediate
* cmd
=
175 GetImmediateCmdSpace
<cmd::SetBucketDataImmediate
>(size
);
177 cmd
->Init(bucket_id
, offset
, size
);
178 memcpy(ImmediateDataAddress(cmd
), data
, size
);
182 void GetBucketStart(uint32 bucket_id
,
183 uint32 result_memory_id
,
184 uint32 result_memory_offset
,
185 uint32 data_memory_size
,
186 uint32 data_memory_id
,
187 uint32 data_memory_offset
) {
188 cmd::GetBucketStart
* cmd
= GetCmdSpace
<cmd::GetBucketStart
>();
192 result_memory_offset
,
199 void GetBucketData(uint32 bucket_id
,
202 uint32 shared_memory_id
,
203 uint32 shared_memory_offset
) {
204 cmd::GetBucketData
* cmd
= GetCmdSpace
<cmd::GetBucketData
>();
210 shared_memory_offset
);
214 CommandBuffer
* command_buffer() const {
215 return command_buffer_
;
218 Buffer
get_ring_buffer() const {
222 void FreeRingBuffer();
224 bool HaveRingBuffer() const {
225 return ring_buffer_id_
!= -1;
228 bool usable () const {
237 // Waits until get changes, updating the value of get_.
238 void WaitForGetChange();
240 // Returns the number of available entries (they may not be contiguous).
241 int32
AvailableEntries() {
242 return (get_offset() - put_
- 1 + total_entry_count_
) % total_entry_count_
;
245 bool AllocateRingBuffer();
246 void FreeResources();
248 CommandBuffer
* command_buffer_
;
249 int32 ring_buffer_id_
;
250 int32 ring_buffer_size_
;
252 CommandBufferEntry
* entries_
;
253 int32 total_entry_count_
; // the total number of entries
256 int32 last_put_sent_
;
257 int commands_issued_
;
260 bool flush_automatically_
;
262 // Using C runtime instead of base because this file cannot depend on base.
263 clock_t last_flush_time_
;
265 friend class CommandBufferHelperTest
;
266 DISALLOW_COPY_AND_ASSIGN(CommandBufferHelper
);
271 #endif // GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_