Revert of Merge duplicate code in invalidation helper. (patchset #2 id:40001 of https...
[chromium-blink-merge.git] / gpu / command_buffer / client / cmd_buffer_helper.h
blob8739316b5367720b7b5ff4bef922b5727976cbb1
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // This file contains the command buffer helper class.
7 #ifndef GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_
8 #define GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_
10 #include <string.h>
12 #include "base/basictypes.h"
13 #include "base/logging.h"
14 #include "base/macros.h"
15 #include "base/memory/ref_counted.h"
16 #include "base/time/time.h"
17 #include "base/trace_event/memory_dump_provider.h"
18 #include "build/build_config.h"
19 #include "gpu/command_buffer/common/cmd_buffer_common.h"
20 #include "gpu/command_buffer/common/command_buffer.h"
21 #include "gpu/gpu_export.h"
23 namespace gpu {
25 class Buffer;
27 #if !defined(OS_ANDROID)
28 #define CMD_HELPER_PERIODIC_FLUSH_CHECK
29 const int kCommandsPerFlushCheck = 100;
30 const int kPeriodicFlushDelayInMicroseconds =
31 base::Time::kMicrosecondsPerSecond / (5 * 60);
32 #endif
34 const int kAutoFlushSmall = 16; // 1/16 of the buffer
35 const int kAutoFlushBig = 2; // 1/2 of the buffer
37 // Command buffer helper class. This class simplifies ring buffer management:
38 // it will allocate the buffer, give it to the buffer interface, and let the
39 // user add commands to it, while taking care of the synchronization (put and
40 // get). It also provides a way to ensure commands have been executed, through
41 // the token mechanism:
43 // helper.AddCommand(...);
44 // helper.AddCommand(...);
45 // int32 token = helper.InsertToken();
46 // helper.AddCommand(...);
47 // helper.AddCommand(...);
48 // [...]
50 // helper.WaitForToken(token); // this doesn't return until the first two
51 // // commands have been executed.
52 class GPU_EXPORT CommandBufferHelper
53 : public base::trace_event::MemoryDumpProvider {
54 public:
55 explicit CommandBufferHelper(CommandBuffer* command_buffer);
56 ~CommandBufferHelper() override;
58 // Initializes the CommandBufferHelper.
59 // Parameters:
60 // ring_buffer_size: The size of the ring buffer portion of the command
61 // buffer.
62 bool Initialize(int32 ring_buffer_size);
64 // Sets whether the command buffer should automatically flush periodically
65 // to try to increase performance. Defaults to true.
66 void SetAutomaticFlushes(bool enabled);
68 // True if the context is lost.
69 bool IsContextLost();
71 // Asynchronously flushes the commands, setting the put pointer to let the
72 // buffer interface know that new commands have been added. After a flush
73 // returns, the command buffer service is aware of all pending commands.
74 void Flush();
76 // Ensures that commands up to the put pointer will be processed in the
77 // command buffer service before any future commands on other command buffers
78 // sharing a channel.
79 void OrderingBarrier();
81 // Waits until all the commands have been executed. Returns whether it
82 // was successful. The function will fail if the command buffer service has
83 // disconnected.
84 bool Finish();
86 // Waits until a given number of available entries are available.
87 // Parameters:
88 // count: number of entries needed. This value must be at most
89 // the size of the buffer minus one.
90 void WaitForAvailableEntries(int32 count);
92 // Inserts a new token into the command buffer. This token either has a value
93 // different from previously inserted tokens, or ensures that previously
94 // inserted tokens with that value have already passed through the command
95 // stream.
96 // Returns:
97 // the value of the new token or -1 if the command buffer reader has
98 // shutdown.
99 int32 InsertToken();
101 // Returns true if the token has passed.
102 // Parameters:
103 // the value of the token to check whether it has passed
104 bool HasTokenPassed(int32 token) const {
105 if (token > token_)
106 return true; // we wrapped
107 return last_token_read() >= token;
110 // Waits until the token of a particular value has passed through the command
111 // stream (i.e. commands inserted before that token have been executed).
112 // NOTE: This will call Flush if it needs to block.
113 // Parameters:
114 // the value of the token to wait for.
115 void WaitForToken(int32 token);
117 // Called prior to each command being issued. Waits for a certain amount of
118 // space to be available. Returns address of space.
119 void* GetSpace(int32 entries) {
120 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
121 // Allow this command buffer to be pre-empted by another if a "reasonable"
122 // amount of work has been done. On highend machines, this reduces the
123 // latency of GPU commands. However, on Android, this can cause the
124 // kernel to thrash between generating GPU commands and executing them.
125 ++commands_issued_;
126 if (flush_automatically_ &&
127 (commands_issued_ % kCommandsPerFlushCheck == 0)) {
128 PeriodicFlushCheck();
130 #endif
132 // Test for immediate entries.
133 if (entries > immediate_entry_count_) {
134 WaitForAvailableEntries(entries);
135 if (entries > immediate_entry_count_)
136 return NULL;
139 DCHECK_LE(entries, immediate_entry_count_);
141 // Allocate space and advance put_.
142 CommandBufferEntry* space = &entries_[put_];
143 put_ += entries;
144 immediate_entry_count_ -= entries;
146 DCHECK_LE(put_, total_entry_count_);
147 return space;
150 template <typename T>
151 void ForceNullCheck(T* data) {
152 #if defined(COMPILER_MSVC) && defined(ARCH_CPU_64_BITS) && !defined(__clang__)
153 // 64-bit MSVC's alias analysis was determining that the command buffer
154 // entry couldn't be NULL, so it optimized out the NULL check.
155 // Dereferencing the same datatype through a volatile pointer seems to
156 // prevent that from happening. http://crbug.com/361936
157 // TODO(jbauman): Remove once we're on VC2015, http://crbug.com/412902
158 if (data)
159 static_cast<volatile T*>(data)->header;
160 #endif
163 // Typed version of GetSpace. Gets enough room for the given type and returns
164 // a reference to it.
165 template <typename T>
166 T* GetCmdSpace() {
167 static_assert(T::kArgFlags == cmd::kFixed,
168 "T::kArgFlags should equal cmd::kFixed");
169 int32 space_needed = ComputeNumEntries(sizeof(T));
170 T* data = static_cast<T*>(GetSpace(space_needed));
171 ForceNullCheck(data);
172 return data;
175 // Typed version of GetSpace for immediate commands.
176 template <typename T>
177 T* GetImmediateCmdSpace(size_t data_space) {
178 static_assert(T::kArgFlags == cmd::kAtLeastN,
179 "T::kArgFlags should equal cmd::kAtLeastN");
180 int32 space_needed = ComputeNumEntries(sizeof(T) + data_space);
181 T* data = static_cast<T*>(GetSpace(space_needed));
182 ForceNullCheck(data);
183 return data;
186 // Typed version of GetSpace for immediate commands.
187 template <typename T>
188 T* GetImmediateCmdSpaceTotalSize(size_t total_space) {
189 static_assert(T::kArgFlags == cmd::kAtLeastN,
190 "T::kArgFlags should equal cmd::kAtLeastN");
191 int32 space_needed = ComputeNumEntries(total_space);
192 T* data = static_cast<T*>(GetSpace(space_needed));
193 ForceNullCheck(data);
194 return data;
197 int32 last_token_read() const {
198 return command_buffer_->GetLastToken();
201 int32 get_offset() const {
202 return command_buffer_->GetLastState().get_offset;
205 // Common Commands
206 void Noop(uint32 skip_count) {
207 cmd::Noop* cmd = GetImmediateCmdSpace<cmd::Noop>(
208 (skip_count - 1) * sizeof(CommandBufferEntry));
209 if (cmd) {
210 cmd->Init(skip_count);
214 void SetToken(uint32 token) {
215 cmd::SetToken* cmd = GetCmdSpace<cmd::SetToken>();
216 if (cmd) {
217 cmd->Init(token);
221 void SetBucketSize(uint32 bucket_id, uint32 size) {
222 cmd::SetBucketSize* cmd = GetCmdSpace<cmd::SetBucketSize>();
223 if (cmd) {
224 cmd->Init(bucket_id, size);
228 void SetBucketData(uint32 bucket_id,
229 uint32 offset,
230 uint32 size,
231 uint32 shared_memory_id,
232 uint32 shared_memory_offset) {
233 cmd::SetBucketData* cmd = GetCmdSpace<cmd::SetBucketData>();
234 if (cmd) {
235 cmd->Init(bucket_id,
236 offset,
237 size,
238 shared_memory_id,
239 shared_memory_offset);
243 void SetBucketDataImmediate(
244 uint32 bucket_id, uint32 offset, const void* data, uint32 size) {
245 cmd::SetBucketDataImmediate* cmd =
246 GetImmediateCmdSpace<cmd::SetBucketDataImmediate>(size);
247 if (cmd) {
248 cmd->Init(bucket_id, offset, size);
249 memcpy(ImmediateDataAddress(cmd), data, size);
253 void GetBucketStart(uint32 bucket_id,
254 uint32 result_memory_id,
255 uint32 result_memory_offset,
256 uint32 data_memory_size,
257 uint32 data_memory_id,
258 uint32 data_memory_offset) {
259 cmd::GetBucketStart* cmd = GetCmdSpace<cmd::GetBucketStart>();
260 if (cmd) {
261 cmd->Init(bucket_id,
262 result_memory_id,
263 result_memory_offset,
264 data_memory_size,
265 data_memory_id,
266 data_memory_offset);
270 void GetBucketData(uint32 bucket_id,
271 uint32 offset,
272 uint32 size,
273 uint32 shared_memory_id,
274 uint32 shared_memory_offset) {
275 cmd::GetBucketData* cmd = GetCmdSpace<cmd::GetBucketData>();
276 if (cmd) {
277 cmd->Init(bucket_id,
278 offset,
279 size,
280 shared_memory_id,
281 shared_memory_offset);
285 CommandBuffer* command_buffer() const {
286 return command_buffer_;
289 scoped_refptr<Buffer> get_ring_buffer() const { return ring_buffer_; }
291 uint32 flush_generation() const { return flush_generation_; }
293 void FreeRingBuffer();
295 bool HaveRingBuffer() const {
296 return ring_buffer_id_ != -1;
299 bool usable () const {
300 return usable_;
303 void ClearUsable() {
304 usable_ = false;
305 context_lost_ = true;
306 CalcImmediateEntries(0);
309 // Overridden from base::trace_event::MemoryDumpProvider:
310 bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
311 base::trace_event::ProcessMemoryDump* pmd) override;
313 private:
314 // Returns the number of available entries (they may not be contiguous).
315 int32 AvailableEntries() {
316 return (get_offset() - put_ - 1 + total_entry_count_) % total_entry_count_;
319 void CalcImmediateEntries(int waiting_count);
320 bool AllocateRingBuffer();
321 void FreeResources();
323 // Waits for the get offset to be in a specific range, inclusive. Returns
324 // false if there was an error.
325 bool WaitForGetOffsetInRange(int32 start, int32 end);
327 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
328 // Calls Flush if automatic flush conditions are met.
329 void PeriodicFlushCheck();
330 #endif
332 int32 GetTotalFreeEntriesNoWaiting() const;
334 CommandBuffer* command_buffer_;
335 int32 ring_buffer_id_;
336 int32 ring_buffer_size_;
337 scoped_refptr<gpu::Buffer> ring_buffer_;
338 CommandBufferEntry* entries_;
339 int32 total_entry_count_; // the total number of entries
340 int32 immediate_entry_count_;
341 int32 token_;
342 int32 put_;
343 int32 last_put_sent_;
344 int32 last_barrier_put_sent_;
346 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
347 int commands_issued_;
348 #endif
350 bool usable_;
351 bool context_lost_;
352 bool flush_automatically_;
354 base::TimeTicks last_flush_time_;
356 // Incremented every time the helper flushes the command buffer.
357 // Can be used to track when prior commands have been flushed.
358 uint32 flush_generation_;
360 friend class CommandBufferHelperTest;
361 DISALLOW_COPY_AND_ASSIGN(CommandBufferHelper);
364 } // namespace gpu
366 #endif // GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_