We started redesigning GpuMemoryBuffer interface to handle multiple buffers [0].
[chromium-blink-merge.git] / content / common / gpu / gpu_command_buffer_stub.h
blobc9e215c7af0faf9b09bf7c4e29128ba84893a98d
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef CONTENT_COMMON_GPU_GPU_COMMAND_BUFFER_STUB_H_
6 #define CONTENT_COMMON_GPU_GPU_COMMAND_BUFFER_STUB_H_
8 #include <deque>
9 #include <string>
10 #include <vector>
12 #include "base/memory/scoped_vector.h"
13 #include "base/memory/weak_ptr.h"
14 #include "base/observer_list.h"
15 #include "content/common/content_export.h"
16 #include "content/common/gpu/gpu_memory_manager.h"
17 #include "content/common/gpu/gpu_memory_manager_client.h"
18 #include "gpu/command_buffer/common/constants.h"
19 #include "gpu/command_buffer/common/gpu_memory_allocation.h"
20 #include "gpu/command_buffer/service/command_buffer_service.h"
21 #include "gpu/command_buffer/service/context_group.h"
22 #include "gpu/command_buffer/service/gpu_scheduler.h"
23 #include "ipc/ipc_listener.h"
24 #include "ipc/ipc_sender.h"
25 #include "media/base/video_decoder_config.h"
26 #include "ui/events/latency_info.h"
27 #include "ui/gfx/geometry/size.h"
28 #include "ui/gfx/gpu_memory_buffer.h"
29 #include "ui/gfx/native_widget_types.h"
30 #include "ui/gl/gl_surface.h"
31 #include "ui/gl/gpu_preference.h"
32 #include "url/gurl.h"
34 namespace gpu {
35 struct Mailbox;
36 class ValueStateMap;
37 namespace gles2 {
38 class MailboxManager;
39 class SubscriptionRefSet;
43 namespace content {
45 class GpuChannel;
46 class GpuVideoDecodeAccelerator;
47 class GpuVideoEncodeAccelerator;
48 class GpuWatchdog;
49 struct WaitForCommandState;
51 class GpuCommandBufferStub
52 : public GpuMemoryManagerClient,
53 public IPC::Listener,
54 public IPC::Sender,
55 public base::SupportsWeakPtr<GpuCommandBufferStub> {
56 public:
57 class DestructionObserver {
58 public:
59 // Called in Destroy(), before the context/surface are released.
60 virtual void OnWillDestroyStub() = 0;
62 protected:
63 virtual ~DestructionObserver() {}
66 typedef base::Callback<void(const std::vector<ui::LatencyInfo>&)>
67 LatencyInfoCallback;
69 GpuCommandBufferStub(
70 GpuChannel* channel,
71 GpuCommandBufferStub* share_group,
72 const gfx::GLSurfaceHandle& handle,
73 gpu::gles2::MailboxManager* mailbox_manager,
74 gpu::gles2::SubscriptionRefSet* subscription_ref_set,
75 gpu::ValueStateMap* pending_valuebuffer_state,
76 const gfx::Size& size,
77 const gpu::gles2::DisallowedFeatures& disallowed_features,
78 const std::vector<int32>& attribs,
79 gfx::GpuPreference gpu_preference,
80 bool use_virtualized_gl_context,
81 int32 route_id,
82 int32 surface_id,
83 GpuWatchdog* watchdog,
84 bool software,
85 const GURL& active_url);
87 ~GpuCommandBufferStub() override;
89 // IPC::Listener implementation:
90 bool OnMessageReceived(const IPC::Message& message) override;
92 // IPC::Sender implementation:
93 bool Send(IPC::Message* msg) override;
95 // GpuMemoryManagerClient implementation:
96 gfx::Size GetSurfaceSize() const override;
97 gpu::gles2::MemoryTracker* GetMemoryTracker() const override;
98 void SetMemoryAllocation(const gpu::MemoryAllocation& allocation) override;
99 void SuggestHaveFrontBuffer(bool suggest_have_frontbuffer) override;
100 bool GetTotalGpuMemory(uint64* bytes) override;
102 // Whether this command buffer can currently handle IPC messages.
103 bool IsScheduled();
105 // If the command buffer is pre-empted and cannot process commands.
106 bool IsPreempted() const {
107 return scheduler_.get() && scheduler_->IsPreempted();
110 // Whether there are commands in the buffer that haven't been processed.
111 bool HasUnprocessedCommands();
113 gpu::gles2::GLES2Decoder* decoder() const { return decoder_.get(); }
114 gpu::GpuScheduler* scheduler() const { return scheduler_.get(); }
115 GpuChannel* channel() const { return channel_; }
117 // Identifies the target surface.
118 int32 surface_id() const { return surface_id_; }
120 // Identifies the various GpuCommandBufferStubs in the GPU process belonging
121 // to the same renderer process.
122 int32 route_id() const { return route_id_; }
124 gfx::GpuPreference gpu_preference() { return gpu_preference_; }
126 int32 GetRequestedAttribute(int attr) const;
128 // Sends a message to the console.
129 void SendConsoleMessage(int32 id, const std::string& message);
131 void SendCachedShader(const std::string& key, const std::string& shader);
133 gfx::GLSurface* surface() const { return surface_.get(); }
135 void AddDestructionObserver(DestructionObserver* observer);
136 void RemoveDestructionObserver(DestructionObserver* observer);
138 // Associates a sync point to this stub. When the stub is destroyed, it will
139 // retire all sync points that haven't been previously retired.
140 void AddSyncPoint(uint32 sync_point);
142 void SetPreemptByFlag(scoped_refptr<gpu::PreemptionFlag> flag);
144 void SetLatencyInfoCallback(const LatencyInfoCallback& callback);
146 void MarkContextLost();
148 uint64 GetMemoryUsage() const;
150 void SendSwapBuffersCompleted(
151 const std::vector<ui::LatencyInfo>& latency_info);
152 void SendUpdateVSyncParameters(base::TimeTicks timebase,
153 base::TimeDelta interval);
155 private:
156 GpuMemoryManager* GetMemoryManager() const;
157 bool MakeCurrent();
158 void Destroy();
160 // Cleans up and sends reply if OnInitialize failed.
161 void OnInitializeFailed(IPC::Message* reply_message);
163 // Message handlers:
164 void OnInitialize(base::SharedMemoryHandle shared_state_shm,
165 IPC::Message* reply_message);
166 void OnSetGetBuffer(int32 shm_id, IPC::Message* reply_message);
167 void OnProduceFrontBuffer(const gpu::Mailbox& mailbox);
168 void OnGetState(IPC::Message* reply_message);
169 void OnWaitForTokenInRange(int32 start,
170 int32 end,
171 IPC::Message* reply_message);
172 void OnWaitForGetOffsetInRange(int32 start,
173 int32 end,
174 IPC::Message* reply_message);
175 void OnAsyncFlush(int32 put_offset, uint32 flush_count,
176 const std::vector<ui::LatencyInfo>& latency_info);
177 void OnRescheduled();
178 void OnRegisterTransferBuffer(int32 id,
179 base::SharedMemoryHandle transfer_buffer,
180 uint32 size);
181 void OnDestroyTransferBuffer(int32 id);
182 void OnGetTransferBuffer(int32 id, IPC::Message* reply_message);
184 void OnCreateVideoDecoder(media::VideoCodecProfile profile,
185 int32 route_id,
186 IPC::Message* reply_message);
187 void OnCreateVideoEncoder(media::VideoFrame::Format input_format,
188 const gfx::Size& input_visible_size,
189 media::VideoCodecProfile output_profile,
190 uint32 initial_bitrate,
191 int32 route_id,
192 IPC::Message* reply_message);
194 void OnSetSurfaceVisible(bool visible);
196 void OnEnsureBackbuffer();
198 void OnRetireSyncPoint(uint32 sync_point);
199 bool OnWaitSyncPoint(uint32 sync_point);
200 void OnSyncPointRetired();
201 void OnSignalSyncPoint(uint32 sync_point, uint32 id);
202 void OnSignalSyncPointAck(uint32 id);
203 void OnSignalQuery(uint32 query, uint32 id);
205 void OnSetClientHasMemoryAllocationChangedCallback(bool has_callback);
207 void OnCreateImage(int32 id,
208 gfx::GpuMemoryBufferHandle handle,
209 gfx::Size size,
210 gfx::GpuMemoryBuffer::Format format,
211 uint32 internalformat);
212 void OnDestroyImage(int32 id);
214 void OnCommandProcessed();
215 void OnParseError();
216 void OnCreateStreamTexture(
217 uint32 texture_id, int32 stream_id, bool* succeeded);
219 void ReportState();
221 // Wrapper for GpuScheduler::PutChanged that sets the crash report URL.
222 void PutChanged();
224 // Poll the command buffer to execute work.
225 void PollWork();
227 // Whether this command buffer needs to be polled again in the future.
228 bool HasMoreWork();
230 void ScheduleDelayedWork(int64 delay);
232 bool CheckContextLost();
233 void CheckCompleteWaits();
235 // The lifetime of objects of this class is managed by a GpuChannel. The
236 // GpuChannels destroy all the GpuCommandBufferStubs that they own when they
237 // are destroyed. So a raw pointer is safe.
238 GpuChannel* channel_;
240 // The group of contexts that share namespaces with this context.
241 scoped_refptr<gpu::gles2::ContextGroup> context_group_;
243 gfx::GLSurfaceHandle handle_;
244 gfx::Size initial_size_;
245 gpu::gles2::DisallowedFeatures disallowed_features_;
246 std::vector<int32> requested_attribs_;
247 gfx::GpuPreference gpu_preference_;
248 bool use_virtualized_gl_context_;
249 int32 route_id_;
250 int32 surface_id_;
251 bool software_;
252 uint32 last_flush_count_;
254 scoped_ptr<gpu::CommandBufferService> command_buffer_;
255 scoped_ptr<gpu::gles2::GLES2Decoder> decoder_;
256 scoped_ptr<gpu::GpuScheduler> scheduler_;
257 scoped_refptr<gfx::GLSurface> surface_;
259 scoped_ptr<GpuMemoryManagerClientState> memory_manager_client_state_;
260 // The last memory allocation received from the GpuMemoryManager (used to
261 // elide redundant work).
262 bool last_memory_allocation_valid_;
263 gpu::MemoryAllocation last_memory_allocation_;
265 GpuWatchdog* watchdog_;
267 ObserverList<DestructionObserver> destruction_observers_;
269 // A queue of sync points associated with this stub.
270 std::deque<uint32> sync_points_;
271 int sync_point_wait_count_;
273 bool delayed_work_scheduled_;
274 uint64 previous_messages_processed_;
275 base::TimeTicks last_idle_time_;
277 scoped_refptr<gpu::PreemptionFlag> preemption_flag_;
279 LatencyInfoCallback latency_info_callback_;
281 GURL active_url_;
282 size_t active_url_hash_;
284 size_t total_gpu_memory_;
285 scoped_ptr<WaitForCommandState> wait_for_token_;
286 scoped_ptr<WaitForCommandState> wait_for_get_offset_;
288 DISALLOW_COPY_AND_ASSIGN(GpuCommandBufferStub);
291 } // namespace content
293 #endif // CONTENT_COMMON_GPU_GPU_COMMAND_BUFFER_STUB_H_