1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef CONTENT_COMMON_GPU_GPU_CHANNEL_H_
6 #define CONTENT_COMMON_GPU_GPU_CHANNEL_H_
10 #include "base/containers/hash_tables.h"
11 #include "base/containers/scoped_ptr_hash_map.h"
12 #include "base/memory/ref_counted.h"
13 #include "base/memory/scoped_ptr.h"
14 #include "base/memory/weak_ptr.h"
15 #include "base/process/process.h"
16 #include "base/trace_event/memory_dump_provider.h"
17 #include "build/build_config.h"
18 #include "content/common/content_export.h"
19 #include "content/common/gpu/gpu_command_buffer_stub.h"
20 #include "content/common/gpu/gpu_memory_manager.h"
21 #include "content/common/gpu/gpu_result_codes.h"
22 #include "content/common/gpu/gpu_stream_priority.h"
23 #include "content/common/message_router.h"
24 #include "gpu/command_buffer/service/valuebuffer_manager.h"
25 #include "ipc/ipc_sync_channel.h"
26 #include "ui/gfx/geometry/size.h"
27 #include "ui/gfx/native_widget_types.h"
28 #include "ui/gl/gl_share_group.h"
29 #include "ui/gl/gpu_preference.h"
31 struct GPUCreateCommandBufferConfig
;
39 class SyncPointManager
;
43 class SubscriptionRefSet
;
52 class GpuChannelManager
;
53 class GpuChannelMessageFilter
;
54 class GpuChannelMessageQueue
;
55 class GpuJpegDecodeAccelerator
;
58 // Encapsulates an IPC channel between the GPU process and one renderer
59 // process. On the renderer side there's a corresponding GpuChannelHost.
60 class CONTENT_EXPORT GpuChannel
61 : public IPC::Listener
,
63 public gpu::gles2::SubscriptionRefSet::Observer
{
65 // Takes ownership of the renderer process handle.
66 GpuChannel(GpuChannelManager
* gpu_channel_manager
,
67 GpuWatchdog
* watchdog
,
68 gfx::GLShareGroup
* share_group
,
69 gpu::gles2::MailboxManager
* mailbox_manager
,
70 base::SingleThreadTaskRunner
* task_runner
,
71 base::SingleThreadTaskRunner
* io_task_runner
,
73 uint64_t client_tracing_id
,
75 bool allow_future_sync_points
,
76 bool allow_real_time_streams
);
77 ~GpuChannel() override
;
79 // Initializes the IPC channel. Caller takes ownership of the client FD in
80 // the returned handle and is responsible for closing it.
81 virtual IPC::ChannelHandle
Init(base::WaitableEvent
* shutdown_event
);
83 // Get the GpuChannelManager that owns this channel.
84 GpuChannelManager
* gpu_channel_manager() const {
85 return gpu_channel_manager_
;
88 const std::string
& channel_id() const { return channel_id_
; }
90 virtual base::ProcessId
GetClientPID() const;
92 int client_id() const { return client_id_
; }
94 uint64_t client_tracing_id() const { return client_tracing_id_
; }
96 scoped_refptr
<base::SingleThreadTaskRunner
> io_task_runner() const {
97 return io_task_runner_
;
100 // IPC::Listener implementation:
101 bool OnMessageReceived(const IPC::Message
& msg
) override
;
102 void OnChannelError() override
;
104 // IPC::Sender implementation:
105 bool Send(IPC::Message
* msg
) override
;
107 // SubscriptionRefSet::Observer implementation
108 void OnAddSubscription(unsigned int target
) override
;
109 void OnRemoveSubscription(unsigned int target
) override
;
111 // This is called when a command buffer transitions between scheduled and
112 // descheduled states. When any stub is descheduled, we stop preempting
114 void OnStubSchedulingChanged(GpuCommandBufferStub
* stub
, bool scheduled
);
116 CreateCommandBufferResult
CreateViewCommandBuffer(
117 const gfx::GLSurfaceHandle
& window
,
119 const GPUCreateCommandBufferConfig
& init_params
,
122 gfx::GLShareGroup
* share_group() const { return share_group_
.get(); }
124 GpuCommandBufferStub
* LookupCommandBuffer(int32 route_id
);
126 void LoseAllContexts();
127 void MarkAllContextsLost();
129 // Called to add a listener for a particular message routing ID.
130 // Returns true if succeeded.
131 bool AddRoute(int32 route_id
, IPC::Listener
* listener
);
133 // Called to remove a listener for a particular message routing ID.
134 void RemoveRoute(int32 route_id
);
136 gpu::PreemptionFlag
* GetPreemptionFlag();
138 // If |preemption_flag->IsSet()|, any stub on this channel
139 // should stop issuing GL commands. Setting this to NULL stops deferral.
140 void SetPreemptByFlag(
141 scoped_refptr
<gpu::PreemptionFlag
> preemption_flag
);
143 void CacheShader(const std::string
& key
, const std::string
& shader
);
145 void AddFilter(IPC::MessageFilter
* filter
);
146 void RemoveFilter(IPC::MessageFilter
* filter
);
148 uint64
GetMemoryUsage();
150 scoped_refptr
<gfx::GLImage
> CreateImageForGpuMemoryBuffer(
151 const gfx::GpuMemoryBufferHandle
& handle
,
152 const gfx::Size
& size
,
153 gfx::BufferFormat format
,
154 uint32 internalformat
);
156 bool allow_future_sync_points() const { return allow_future_sync_points_
; }
158 void HandleUpdateValueState(unsigned int target
,
159 const gpu::ValueState
& state
);
161 // Visible for testing.
162 const gpu::ValueStateMap
* pending_valuebuffer_state() const {
163 return pending_valuebuffer_state_
.get();
166 // Visible for testing.
167 GpuChannelMessageFilter
* filter() const { return filter_
.get(); }
169 // Returns the global order number of the IPC message that started processing
171 uint32_t current_order_num() const { return current_order_num_
; }
173 // Returns the global order number for the last processed IPC message.
174 uint32_t GetProcessedOrderNum() const;
176 // Returns the global order number for the last unprocessed IPC message.
177 uint32_t GetUnprocessedOrderNum() const;
179 void HandleMessage();
182 // The message filter on the io thread.
183 scoped_refptr
<GpuChannelMessageFilter
> filter_
;
185 // Map of routing id to command buffer stub.
186 base::ScopedPtrHashMap
<int32
, scoped_ptr
<GpuCommandBufferStub
>> stubs_
;
191 StreamState(int32 id
, GpuStreamPriority priority
);
194 int32
id() const { return id_
; }
195 GpuStreamPriority
priority() const { return priority_
; }
197 void AddRoute(int32 route_id
);
198 void RemoveRoute(int32 route_id
);
199 bool HasRoute(int32 route_id
) const;
200 bool HasRoutes() const;
204 GpuStreamPriority priority_
;
205 base::hash_set
<int32
> routes_
;
210 bool OnControlMessageReceived(const IPC::Message
& msg
);
212 void ScheduleHandleMessage();
215 void OnCreateOffscreenCommandBuffer(
216 const gfx::Size
& size
,
217 const GPUCreateCommandBufferConfig
& init_params
,
220 void OnDestroyCommandBuffer(int32 route_id
);
221 void OnCreateJpegDecoder(int32 route_id
, IPC::Message
* reply_msg
);
223 // The lifetime of objects of this class is managed by a GpuChannelManager.
224 // The GpuChannelManager destroy all the GpuChannels that they own when they
225 // are destroyed. So a raw pointer is safe.
226 GpuChannelManager
* gpu_channel_manager_
;
228 scoped_ptr
<IPC::SyncChannel
> channel_
;
230 // Uniquely identifies the channel within this GPU process.
231 std::string channel_id_
;
233 // Used to implement message routing functionality to CommandBuffer objects
234 MessageRouter router_
;
236 // Whether the processing of IPCs on this channel is stalled and we should
237 // preempt other GpuChannels.
238 scoped_refptr
<gpu::PreemptionFlag
> preempting_flag_
;
240 // If non-NULL, all stubs on this channel should stop processing GL
241 // commands (via their GpuScheduler) when preempted_flag_->IsSet()
242 scoped_refptr
<gpu::PreemptionFlag
> preempted_flag_
;
244 scoped_refptr
<GpuChannelMessageQueue
> message_queue_
;
246 // The id of the client who is on the other side of the channel.
249 // The tracing ID used for memory allocations associated with this client.
250 uint64_t client_tracing_id_
;
252 // The task runners for the main thread and the io thread.
253 scoped_refptr
<base::SingleThreadTaskRunner
> task_runner_
;
254 scoped_refptr
<base::SingleThreadTaskRunner
> io_task_runner_
;
256 // The share group that all contexts associated with a particular renderer
258 scoped_refptr
<gfx::GLShareGroup
> share_group_
;
260 scoped_refptr
<gpu::gles2::MailboxManager
> mailbox_manager_
;
262 scoped_refptr
<gpu::gles2::SubscriptionRefSet
> subscription_ref_set_
;
264 scoped_refptr
<gpu::ValueStateMap
> pending_valuebuffer_state_
;
266 scoped_ptr
<GpuJpegDecodeAccelerator
> jpeg_decoder_
;
268 gpu::gles2::DisallowedFeatures disallowed_features_
;
269 GpuWatchdog
* watchdog_
;
272 size_t num_stubs_descheduled_
;
274 // Map of stream id to stream state.
275 base::hash_map
<int32
, StreamState
> streams_
;
277 uint32_t current_order_num_
;
279 bool allow_future_sync_points_
;
280 bool allow_real_time_streams_
;
282 // Member variables should appear before the WeakPtrFactory, to ensure
283 // that any WeakPtrs to Controller are invalidated before its members
284 // variable's destructors are executed, rendering them invalid.
285 base::WeakPtrFactory
<GpuChannel
> weak_factory_
;
287 DISALLOW_COPY_AND_ASSIGN(GpuChannel
);
290 // This filter does three things:
291 // - it counts and timestamps each message forwarded to the channel
292 // so that we can preempt other channels if a message takes too long to
293 // process. To guarantee fairness, we must wait a minimum amount of time
294 // before preempting and we limit the amount of time that we can preempt in
295 // one shot (see constants above).
296 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
297 // thread, generating the sync point ID and responding immediately, and then
298 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
299 // into the channel's queue.
300 // - it generates mailbox names for clients of the GPU process on the IO thread.
301 class GpuChannelMessageFilter
: public IPC::MessageFilter
{
303 GpuChannelMessageFilter(GpuChannelMessageQueue
* message_queue
,
304 gpu::SyncPointManager
* sync_point_manager
,
305 base::SingleThreadTaskRunner
* task_runner
,
306 bool future_sync_points
);
308 // IPC::MessageFilter implementation.
309 void OnFilterAdded(IPC::Sender
* sender
) override
;
310 void OnFilterRemoved() override
;
311 void OnChannelConnected(int32 peer_pid
) override
;
312 void OnChannelError() override
;
313 void OnChannelClosing() override
;
314 bool OnMessageReceived(const IPC::Message
& message
) override
;
316 void AddChannelFilter(scoped_refptr
<IPC::MessageFilter
> filter
);
317 void RemoveChannelFilter(scoped_refptr
<IPC::MessageFilter
> filter
);
319 void OnMessageProcessed();
321 void SetPreemptingFlagAndSchedulingState(gpu::PreemptionFlag
* preempting_flag
,
322 bool a_stub_is_descheduled
);
324 void UpdateStubSchedulingState(bool a_stub_is_descheduled
);
326 bool Send(IPC::Message
* message
);
329 ~GpuChannelMessageFilter() override
;
332 enum PreemptionState
{
333 // Either there's no other channel to preempt, there are no messages
334 // pending processing, or we just finished preempting and have to wait
335 // before preempting again.
337 // We are waiting kPreemptWaitTimeMs before checking if we should preempt.
339 // We can preempt whenever any IPC processing takes more than
340 // kPreemptWaitTimeMs.
342 // We are currently preempting (i.e. no stub is descheduled).
344 // We would like to preempt, but some stub is descheduled.
345 WOULD_PREEMPT_DESCHEDULED
,
348 void UpdatePreemptionState();
350 void TransitionToIdleIfCaughtUp();
351 void TransitionToIdle();
352 void TransitionToWaiting();
353 void TransitionToChecking();
354 void TransitionToPreempting();
355 void TransitionToWouldPreemptDescheduled();
357 PreemptionState preemption_state_
;
359 // Maximum amount of time that we can spend in PREEMPTING.
360 // It is reset when we transition to IDLE.
361 base::TimeDelta max_preemption_time_
;
363 // The message_queue_ is used to handle messages on the main thread.
364 scoped_refptr
<GpuChannelMessageQueue
> message_queue_
;
365 IPC::Sender
* sender_
;
366 base::ProcessId peer_pid_
;
367 gpu::SyncPointManager
* sync_point_manager_
;
368 scoped_refptr
<base::SingleThreadTaskRunner
> task_runner_
;
369 scoped_refptr
<gpu::PreemptionFlag
> preempting_flag_
;
370 std::vector
<scoped_refptr
<IPC::MessageFilter
>> channel_filters_
;
372 // This timer is created and destroyed on the IO thread.
373 scoped_ptr
<base::OneShotTimer
<GpuChannelMessageFilter
>> timer_
;
375 bool a_stub_is_descheduled_
;
377 // True if this channel can create future sync points.
378 bool future_sync_points_
;
380 // This number is only ever incremented/read on the IO thread.
381 static uint32_t global_order_counter_
;
384 struct GpuChannelMessage
{
385 uint32_t order_number
;
386 base::TimeTicks time_received
;
387 IPC::Message message
;
389 // TODO(dyen): Temporary sync point data, remove once new sync point lands.
390 bool retire_sync_point
;
393 GpuChannelMessage(uint32_t order_num
, const IPC::Message
& msg
)
394 : order_number(order_num
),
395 time_received(base::TimeTicks::Now()),
397 retire_sync_point(false),
401 DISALLOW_COPY_AND_ASSIGN(GpuChannelMessage
);
404 class GpuChannelMessageQueue
405 : public base::RefCountedThreadSafe
<GpuChannelMessageQueue
> {
407 static scoped_refptr
<GpuChannelMessageQueue
> Create(
408 const base::WeakPtr
<GpuChannel
>& gpu_channel
,
409 base::SingleThreadTaskRunner
* task_runner
);
411 // Returns the global order number for the last processed IPC message.
412 uint32_t GetUnprocessedOrderNum() const;
414 // Returns the global order number for the last unprocessed IPC message.
415 uint32_t processed_order_num() const { return processed_order_num_
; }
417 bool HasQueuedMessages() const;
419 base::TimeTicks
GetNextMessageTimeTick() const;
421 GpuChannelMessage
* GetNextMessage() const;
423 // Should be called after a message returned by GetNextMessage is processed.
424 // Returns true if there are more messages on the queue.
425 bool MessageProcessed(uint32_t order_number
);
427 void PushBackMessage(uint32_t order_number
, const IPC::Message
& message
);
429 bool GenerateSyncPointMessage(gpu::SyncPointManager
* sync_point_manager
,
430 uint32_t order_number
,
431 const IPC::Message
& message
,
432 bool retire_sync_point
,
433 uint32_t* sync_point_number
);
435 void DeleteAndDisableMessages(GpuChannelManager
* gpu_channel_manager
);
438 friend class base::RefCountedThreadSafe
<GpuChannelMessageQueue
>;
440 GpuChannelMessageQueue(const base::WeakPtr
<GpuChannel
>& gpu_channel
,
441 base::SingleThreadTaskRunner
* task_runner
);
442 ~GpuChannelMessageQueue();
444 void ScheduleHandleMessage();
446 void PushMessageHelper(scoped_ptr
<GpuChannelMessage
> msg
);
448 bool HasQueuedMessagesHelper() const;
452 // Highest IPC order number seen, set when queued on the IO thread.
453 uint32_t unprocessed_order_num_
;
454 // Both deques own the messages.
455 std::deque
<GpuChannelMessage
*> channel_messages_
;
456 std::deque
<GpuChannelMessage
*> out_of_order_messages_
;
458 // This lock protects enabled_, unprocessed_order_num_, and both deques.
459 mutable base::Lock channel_messages_lock_
;
461 // Last finished IPC order number. Not protected by a lock as it's only
462 // accessed on the main thread.
463 uint32_t processed_order_num_
;
465 base::WeakPtr
<GpuChannel
> gpu_channel_
;
466 scoped_refptr
<base::SingleThreadTaskRunner
> task_runner_
;
468 DISALLOW_COPY_AND_ASSIGN(GpuChannelMessageQueue
);
471 } // namespace content
473 #endif // CONTENT_COMMON_GPU_GPU_CHANNEL_H_