Update V8 to version 4.7.42.
[chromium-blink-merge.git] / content / common / gpu / gpu_channel.h
blobd51a98ba2761383b87df9247cfe2f7ffc2adb608
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef CONTENT_COMMON_GPU_GPU_CHANNEL_H_
6 #define CONTENT_COMMON_GPU_GPU_CHANNEL_H_
8 #include <string>
10 #include "base/containers/hash_tables.h"
11 #include "base/containers/scoped_ptr_hash_map.h"
12 #include "base/memory/ref_counted.h"
13 #include "base/memory/scoped_ptr.h"
14 #include "base/memory/weak_ptr.h"
15 #include "base/process/process.h"
16 #include "base/trace_event/memory_dump_provider.h"
17 #include "build/build_config.h"
18 #include "content/common/content_export.h"
19 #include "content/common/gpu/gpu_command_buffer_stub.h"
20 #include "content/common/gpu/gpu_memory_manager.h"
21 #include "content/common/gpu/gpu_result_codes.h"
22 #include "content/common/gpu/gpu_stream_priority.h"
23 #include "content/common/message_router.h"
24 #include "gpu/command_buffer/service/valuebuffer_manager.h"
25 #include "ipc/ipc_sync_channel.h"
26 #include "ui/gfx/geometry/size.h"
27 #include "ui/gfx/native_widget_types.h"
28 #include "ui/gl/gl_share_group.h"
29 #include "ui/gl/gpu_preference.h"
31 struct GPUCreateCommandBufferConfig;
33 namespace base {
34 class WaitableEvent;
37 namespace gpu {
38 class PreemptionFlag;
39 class SyncPointManager;
40 union ValueState;
41 class ValueStateMap;
42 namespace gles2 {
43 class SubscriptionRefSet;
47 namespace IPC {
48 class AttachmentBroker;
49 class MessageFilter;
52 namespace content {
53 class GpuChannelManager;
54 class GpuChannelMessageFilter;
55 class GpuChannelMessageQueue;
56 class GpuJpegDecodeAccelerator;
57 class GpuWatchdog;
59 // Encapsulates an IPC channel between the GPU process and one renderer
60 // process. On the renderer side there's a corresponding GpuChannelHost.
61 class CONTENT_EXPORT GpuChannel
62 : public IPC::Listener,
63 public IPC::Sender,
64 public gpu::gles2::SubscriptionRefSet::Observer {
65 public:
66 // Takes ownership of the renderer process handle.
67 GpuChannel(GpuChannelManager* gpu_channel_manager,
68 GpuWatchdog* watchdog,
69 gfx::GLShareGroup* share_group,
70 gpu::gles2::MailboxManager* mailbox_manager,
71 base::SingleThreadTaskRunner* task_runner,
72 base::SingleThreadTaskRunner* io_task_runner,
73 int client_id,
74 uint64_t client_tracing_id,
75 bool software,
76 bool allow_future_sync_points,
77 bool allow_real_time_streams);
78 ~GpuChannel() override;
80 // Initializes the IPC channel. Caller takes ownership of the client FD in
81 // the returned handle and is responsible for closing it.
82 virtual IPC::ChannelHandle Init(base::WaitableEvent* shutdown_event,
83 IPC::AttachmentBroker* attachment_broker);
85 // Get the GpuChannelManager that owns this channel.
86 GpuChannelManager* gpu_channel_manager() const {
87 return gpu_channel_manager_;
90 const std::string& channel_id() const { return channel_id_; }
92 virtual base::ProcessId GetClientPID() const;
94 int client_id() const { return client_id_; }
96 uint64_t client_tracing_id() const { return client_tracing_id_; }
98 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner() const {
99 return io_task_runner_;
102 // IPC::Listener implementation:
103 bool OnMessageReceived(const IPC::Message& msg) override;
104 void OnChannelError() override;
106 // IPC::Sender implementation:
107 bool Send(IPC::Message* msg) override;
109 // Requeue the message that is currently being processed to the beginning of
110 // the queue. Used when the processing of a message gets aborted because of
111 // unscheduling conditions.
112 void RequeueMessage();
114 // SubscriptionRefSet::Observer implementation
115 void OnAddSubscription(unsigned int target) override;
116 void OnRemoveSubscription(unsigned int target) override;
118 // This is called when a command buffer transitions from the unscheduled
119 // state to the scheduled state, which potentially means the channel
120 // transitions from the unscheduled to the scheduled state. When this occurs
121 // deferred IPC messaged are handled.
122 void OnScheduled();
124 // This is called when a command buffer transitions between scheduled and
125 // descheduled states. When any stub is descheduled, we stop preempting
126 // other channels.
127 void StubSchedulingChanged(bool scheduled);
129 CreateCommandBufferResult CreateViewCommandBuffer(
130 const gfx::GLSurfaceHandle& window,
131 int32 surface_id,
132 const GPUCreateCommandBufferConfig& init_params,
133 int32 route_id);
135 gfx::GLShareGroup* share_group() const { return share_group_.get(); }
137 GpuCommandBufferStub* LookupCommandBuffer(int32 route_id);
139 void LoseAllContexts();
140 void MarkAllContextsLost();
142 // Called to add a listener for a particular message routing ID.
143 // Returns true if succeeded.
144 bool AddRoute(int32 route_id, IPC::Listener* listener);
146 // Called to remove a listener for a particular message routing ID.
147 void RemoveRoute(int32 route_id);
149 gpu::PreemptionFlag* GetPreemptionFlag();
151 // If |preemption_flag->IsSet()|, any stub on this channel
152 // should stop issuing GL commands. Setting this to NULL stops deferral.
153 void SetPreemptByFlag(
154 scoped_refptr<gpu::PreemptionFlag> preemption_flag);
156 void CacheShader(const std::string& key, const std::string& shader);
158 void AddFilter(IPC::MessageFilter* filter);
159 void RemoveFilter(IPC::MessageFilter* filter);
161 uint64 GetMemoryUsage();
163 scoped_refptr<gfx::GLImage> CreateImageForGpuMemoryBuffer(
164 const gfx::GpuMemoryBufferHandle& handle,
165 const gfx::Size& size,
166 gfx::BufferFormat format,
167 uint32 internalformat);
169 bool allow_future_sync_points() const { return allow_future_sync_points_; }
171 void HandleUpdateValueState(unsigned int target,
172 const gpu::ValueState& state);
174 // Visible for testing.
175 const gpu::ValueStateMap* pending_valuebuffer_state() const {
176 return pending_valuebuffer_state_.get();
179 // Visible for testing.
180 GpuChannelMessageFilter* filter() const { return filter_.get(); }
182 uint32_t GetCurrentOrderNum() const { return current_order_num_; }
183 uint32_t GetProcessedOrderNum() const { return processed_order_num_; }
184 uint32_t GetUnprocessedOrderNum() const;
186 protected:
187 // The message filter on the io thread.
188 scoped_refptr<GpuChannelMessageFilter> filter_;
190 // Map of routing id to command buffer stub.
191 base::ScopedPtrHashMap<int32, scoped_ptr<GpuCommandBufferStub>> stubs_;
193 private:
194 class StreamState {
195 public:
196 StreamState(int32 id, GpuStreamPriority priority);
197 ~StreamState();
199 int32 id() const { return id_; }
200 GpuStreamPriority priority() const { return priority_; }
202 void AddRoute(int32 route_id);
203 void RemoveRoute(int32 route_id);
204 bool HasRoute(int32 route_id) const;
205 bool HasRoutes() const;
207 private:
208 int32 id_;
209 GpuStreamPriority priority_;
210 base::hash_set<int32> routes_;
213 friend class GpuChannelMessageFilter;
214 friend class GpuChannelMessageQueue;
216 void OnDestroy();
218 bool OnControlMessageReceived(const IPC::Message& msg);
220 void HandleMessage();
222 // Message handlers.
223 void OnCreateOffscreenCommandBuffer(
224 const gfx::Size& size,
225 const GPUCreateCommandBufferConfig& init_params,
226 int32 route_id,
227 bool* succeeded);
228 void OnDestroyCommandBuffer(int32 route_id);
229 void OnCreateJpegDecoder(int32 route_id, IPC::Message* reply_msg);
231 // Update processed order number and defer preemption.
232 void MessageProcessed(uint32_t order_number);
234 // The lifetime of objects of this class is managed by a GpuChannelManager.
235 // The GpuChannelManager destroy all the GpuChannels that they own when they
236 // are destroyed. So a raw pointer is safe.
237 GpuChannelManager* gpu_channel_manager_;
239 scoped_ptr<IPC::SyncChannel> channel_;
241 // Uniquely identifies the channel within this GPU process.
242 std::string channel_id_;
244 // Used to implement message routing functionality to CommandBuffer objects
245 MessageRouter router_;
247 // Whether the processing of IPCs on this channel is stalled and we should
248 // preempt other GpuChannels.
249 scoped_refptr<gpu::PreemptionFlag> preempting_flag_;
251 // If non-NULL, all stubs on this channel should stop processing GL
252 // commands (via their GpuScheduler) when preempted_flag_->IsSet()
253 scoped_refptr<gpu::PreemptionFlag> preempted_flag_;
255 scoped_refptr<GpuChannelMessageQueue> message_queue_;
257 // The id of the client who is on the other side of the channel.
258 int client_id_;
260 // The tracing ID used for memory allocations associated with this client.
261 uint64_t client_tracing_id_;
263 // The task runners for the main thread and the io thread.
264 scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
265 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
267 // The share group that all contexts associated with a particular renderer
268 // process use.
269 scoped_refptr<gfx::GLShareGroup> share_group_;
271 scoped_refptr<gpu::gles2::MailboxManager> mailbox_manager_;
273 scoped_refptr<gpu::gles2::SubscriptionRefSet> subscription_ref_set_;
275 scoped_refptr<gpu::ValueStateMap> pending_valuebuffer_state_;
277 scoped_ptr<GpuJpegDecodeAccelerator> jpeg_decoder_;
279 gpu::gles2::DisallowedFeatures disallowed_features_;
280 GpuWatchdog* watchdog_;
281 bool software_;
283 // Current IPC order number being processed.
284 uint32_t current_order_num_;
286 // Last finished IPC order number.
287 uint32_t processed_order_num_;
289 size_t num_stubs_descheduled_;
291 // Map of stream id to stream state.
292 base::hash_map<int32, StreamState> streams_;
294 bool allow_future_sync_points_;
295 bool allow_real_time_streams_;
297 // Member variables should appear before the WeakPtrFactory, to ensure
298 // that any WeakPtrs to Controller are invalidated before its members
299 // variable's destructors are executed, rendering them invalid.
300 base::WeakPtrFactory<GpuChannel> weak_factory_;
302 DISALLOW_COPY_AND_ASSIGN(GpuChannel);
305 // This filter does three things:
306 // - it counts and timestamps each message forwarded to the channel
307 // so that we can preempt other channels if a message takes too long to
308 // process. To guarantee fairness, we must wait a minimum amount of time
309 // before preempting and we limit the amount of time that we can preempt in
310 // one shot (see constants above).
311 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
312 // thread, generating the sync point ID and responding immediately, and then
313 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
314 // into the channel's queue.
315 // - it generates mailbox names for clients of the GPU process on the IO thread.
316 class GpuChannelMessageFilter : public IPC::MessageFilter {
317 public:
318 GpuChannelMessageFilter(
319 scoped_refptr<GpuChannelMessageQueue> message_queue,
320 gpu::SyncPointManager* sync_point_manager,
321 scoped_refptr<base::SingleThreadTaskRunner> task_runner,
322 bool future_sync_points);
324 // IPC::MessageFilter implementation.
325 void OnFilterAdded(IPC::Sender* sender) override;
326 void OnFilterRemoved() override;
327 void OnChannelConnected(int32 peer_pid) override;
328 void OnChannelError() override;
329 void OnChannelClosing() override;
330 bool OnMessageReceived(const IPC::Message& message) override;
332 void AddChannelFilter(scoped_refptr<IPC::MessageFilter> filter);
333 void RemoveChannelFilter(scoped_refptr<IPC::MessageFilter> filter);
335 void OnMessageProcessed();
337 void SetPreemptingFlagAndSchedulingState(gpu::PreemptionFlag* preempting_flag,
338 bool a_stub_is_descheduled);
340 void UpdateStubSchedulingState(bool a_stub_is_descheduled);
342 bool Send(IPC::Message* message);
344 protected:
345 ~GpuChannelMessageFilter() override;
347 private:
348 enum PreemptionState {
349 // Either there's no other channel to preempt, there are no messages
350 // pending processing, or we just finished preempting and have to wait
351 // before preempting again.
352 IDLE,
353 // We are waiting kPreemptWaitTimeMs before checking if we should preempt.
354 WAITING,
355 // We can preempt whenever any IPC processing takes more than
356 // kPreemptWaitTimeMs.
357 CHECKING,
358 // We are currently preempting (i.e. no stub is descheduled).
359 PREEMPTING,
360 // We would like to preempt, but some stub is descheduled.
361 WOULD_PREEMPT_DESCHEDULED,
364 void UpdatePreemptionState();
366 void TransitionToIdleIfCaughtUp();
367 void TransitionToIdle();
368 void TransitionToWaiting();
369 void TransitionToChecking();
370 void TransitionToPreempting();
371 void TransitionToWouldPreemptDescheduled();
373 PreemptionState preemption_state_;
375 // Maximum amount of time that we can spend in PREEMPTING.
376 // It is reset when we transition to IDLE.
377 base::TimeDelta max_preemption_time_;
379 // The message_queue_ is used to handle messages on the main thread.
380 scoped_refptr<GpuChannelMessageQueue> message_queue_;
381 IPC::Sender* sender_;
382 base::ProcessId peer_pid_;
383 gpu::SyncPointManager* sync_point_manager_;
384 scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
385 scoped_refptr<gpu::PreemptionFlag> preempting_flag_;
386 std::vector<scoped_refptr<IPC::MessageFilter>> channel_filters_;
388 // This timer is created and destroyed on the IO thread.
389 scoped_ptr<base::OneShotTimer<GpuChannelMessageFilter>> timer_;
391 bool a_stub_is_descheduled_;
393 // True if this channel can create future sync points.
394 bool future_sync_points_;
396 // This number is only ever incremented/read on the IO thread.
397 static uint32_t global_order_counter_;
400 } // namespace content
402 #endif // CONTENT_COMMON_GPU_GPU_CHANNEL_H_