Roll src/third_party/WebKit eac3800:0237a66 (svn 202606:202607)
[chromium-blink-merge.git] / content / common / gpu / gpu_channel.h
blobaaaa30b4532b0bb4481cc6313eb013859ae976f6
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef CONTENT_COMMON_GPU_GPU_CHANNEL_H_
6 #define CONTENT_COMMON_GPU_GPU_CHANNEL_H_
8 #include <string>
10 #include "base/containers/hash_tables.h"
11 #include "base/containers/scoped_ptr_hash_map.h"
12 #include "base/memory/ref_counted.h"
13 #include "base/memory/scoped_ptr.h"
14 #include "base/memory/weak_ptr.h"
15 #include "base/process/process.h"
16 #include "base/trace_event/memory_dump_provider.h"
17 #include "build/build_config.h"
18 #include "content/common/content_export.h"
19 #include "content/common/gpu/gpu_command_buffer_stub.h"
20 #include "content/common/gpu/gpu_memory_manager.h"
21 #include "content/common/gpu/gpu_result_codes.h"
22 #include "content/common/gpu/gpu_stream_priority.h"
23 #include "content/common/message_router.h"
24 #include "gpu/command_buffer/service/valuebuffer_manager.h"
25 #include "ipc/ipc_sync_channel.h"
26 #include "ui/gfx/geometry/size.h"
27 #include "ui/gfx/native_widget_types.h"
28 #include "ui/gl/gl_share_group.h"
29 #include "ui/gl/gpu_preference.h"
31 struct GPUCreateCommandBufferConfig;
33 namespace base {
34 class WaitableEvent;
37 namespace gpu {
38 class PreemptionFlag;
39 class SyncPointManager;
40 union ValueState;
41 class ValueStateMap;
42 namespace gles2 {
43 class SubscriptionRefSet;
47 namespace IPC {
48 class MessageFilter;
51 namespace content {
52 class GpuChannelManager;
53 class GpuChannelMessageFilter;
54 class GpuChannelMessageQueue;
55 class GpuJpegDecodeAccelerator;
56 class GpuWatchdog;
58 // Encapsulates an IPC channel between the GPU process and one renderer
59 // process. On the renderer side there's a corresponding GpuChannelHost.
60 class CONTENT_EXPORT GpuChannel
61 : public IPC::Listener,
62 public IPC::Sender,
63 public gpu::gles2::SubscriptionRefSet::Observer {
64 public:
65 // Takes ownership of the renderer process handle.
66 GpuChannel(GpuChannelManager* gpu_channel_manager,
67 GpuWatchdog* watchdog,
68 gfx::GLShareGroup* share_group,
69 gpu::gles2::MailboxManager* mailbox_manager,
70 base::SingleThreadTaskRunner* task_runner,
71 base::SingleThreadTaskRunner* io_task_runner,
72 int client_id,
73 uint64_t client_tracing_id,
74 bool software,
75 bool allow_future_sync_points,
76 bool allow_real_time_streams);
77 ~GpuChannel() override;
79 // Initializes the IPC channel. Caller takes ownership of the client FD in
80 // the returned handle and is responsible for closing it.
81 virtual IPC::ChannelHandle Init(base::WaitableEvent* shutdown_event);
83 // Get the GpuChannelManager that owns this channel.
84 GpuChannelManager* gpu_channel_manager() const {
85 return gpu_channel_manager_;
88 const std::string& channel_id() const { return channel_id_; }
90 virtual base::ProcessId GetClientPID() const;
92 int client_id() const { return client_id_; }
94 uint64_t client_tracing_id() const { return client_tracing_id_; }
96 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner() const {
97 return io_task_runner_;
100 // IPC::Listener implementation:
101 bool OnMessageReceived(const IPC::Message& msg) override;
102 void OnChannelError() override;
104 // IPC::Sender implementation:
105 bool Send(IPC::Message* msg) override;
107 // SubscriptionRefSet::Observer implementation
108 void OnAddSubscription(unsigned int target) override;
109 void OnRemoveSubscription(unsigned int target) override;
111 // This is called when a command buffer transitions between scheduled and
112 // descheduled states. When any stub is descheduled, we stop preempting
113 // other channels.
114 void OnStubSchedulingChanged(GpuCommandBufferStub* stub, bool scheduled);
116 CreateCommandBufferResult CreateViewCommandBuffer(
117 const gfx::GLSurfaceHandle& window,
118 int32 surface_id,
119 const GPUCreateCommandBufferConfig& init_params,
120 int32 route_id);
122 gfx::GLShareGroup* share_group() const { return share_group_.get(); }
124 GpuCommandBufferStub* LookupCommandBuffer(int32 route_id);
126 void LoseAllContexts();
127 void MarkAllContextsLost();
129 // Called to add a listener for a particular message routing ID.
130 // Returns true if succeeded.
131 bool AddRoute(int32 route_id, IPC::Listener* listener);
133 // Called to remove a listener for a particular message routing ID.
134 void RemoveRoute(int32 route_id);
136 gpu::PreemptionFlag* GetPreemptionFlag();
138 // If |preemption_flag->IsSet()|, any stub on this channel
139 // should stop issuing GL commands. Setting this to NULL stops deferral.
140 void SetPreemptByFlag(
141 scoped_refptr<gpu::PreemptionFlag> preemption_flag);
143 void CacheShader(const std::string& key, const std::string& shader);
145 void AddFilter(IPC::MessageFilter* filter);
146 void RemoveFilter(IPC::MessageFilter* filter);
148 uint64 GetMemoryUsage();
150 scoped_refptr<gfx::GLImage> CreateImageForGpuMemoryBuffer(
151 const gfx::GpuMemoryBufferHandle& handle,
152 const gfx::Size& size,
153 gfx::BufferFormat format,
154 uint32 internalformat);
156 bool allow_future_sync_points() const { return allow_future_sync_points_; }
158 void HandleUpdateValueState(unsigned int target,
159 const gpu::ValueState& state);
161 // Visible for testing.
162 const gpu::ValueStateMap* pending_valuebuffer_state() const {
163 return pending_valuebuffer_state_.get();
166 // Visible for testing.
167 GpuChannelMessageFilter* filter() const { return filter_.get(); }
169 // Returns the global order number of the IPC message that started processing
170 // last.
171 uint32_t current_order_num() const { return current_order_num_; }
173 // Returns the global order number for the last processed IPC message.
174 uint32_t GetProcessedOrderNum() const;
176 // Returns the global order number for the last unprocessed IPC message.
177 uint32_t GetUnprocessedOrderNum() const;
179 void HandleMessage();
181 // Some messages such as WaitForGetOffsetInRange and WaitForTokenInRange are
182 // processed as soon as possible because the client is blocked until they
183 // are completed.
184 void HandleOutOfOrderMessage(const IPC::Message& msg);
186 protected:
187 // The message filter on the io thread.
188 scoped_refptr<GpuChannelMessageFilter> filter_;
190 // Map of routing id to command buffer stub.
191 base::ScopedPtrHashMap<int32, scoped_ptr<GpuCommandBufferStub>> stubs_;
193 private:
194 class StreamState {
195 public:
196 StreamState(int32 id, GpuStreamPriority priority);
197 ~StreamState();
199 int32 id() const { return id_; }
200 GpuStreamPriority priority() const { return priority_; }
202 void AddRoute(int32 route_id);
203 void RemoveRoute(int32 route_id);
204 bool HasRoute(int32 route_id) const;
205 bool HasRoutes() const;
207 private:
208 int32 id_;
209 GpuStreamPriority priority_;
210 base::hash_set<int32> routes_;
213 void OnDestroy();
215 bool OnControlMessageReceived(const IPC::Message& msg);
217 void ScheduleHandleMessage();
219 // Message handlers.
220 void OnCreateOffscreenCommandBuffer(
221 const gfx::Size& size,
222 const GPUCreateCommandBufferConfig& init_params,
223 int32 route_id,
224 bool* succeeded);
225 void OnDestroyCommandBuffer(int32 route_id);
226 void OnCreateJpegDecoder(int32 route_id, IPC::Message* reply_msg);
228 // The lifetime of objects of this class is managed by a GpuChannelManager.
229 // The GpuChannelManager destroy all the GpuChannels that they own when they
230 // are destroyed. So a raw pointer is safe.
231 GpuChannelManager* gpu_channel_manager_;
233 scoped_ptr<IPC::SyncChannel> channel_;
235 // Uniquely identifies the channel within this GPU process.
236 std::string channel_id_;
238 // Used to implement message routing functionality to CommandBuffer objects
239 MessageRouter router_;
241 // Whether the processing of IPCs on this channel is stalled and we should
242 // preempt other GpuChannels.
243 scoped_refptr<gpu::PreemptionFlag> preempting_flag_;
245 // If non-NULL, all stubs on this channel should stop processing GL
246 // commands (via their GpuScheduler) when preempted_flag_->IsSet()
247 scoped_refptr<gpu::PreemptionFlag> preempted_flag_;
249 scoped_refptr<GpuChannelMessageQueue> message_queue_;
251 // The id of the client who is on the other side of the channel.
252 int client_id_;
254 // The tracing ID used for memory allocations associated with this client.
255 uint64_t client_tracing_id_;
257 // The task runners for the main thread and the io thread.
258 scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
259 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
261 // The share group that all contexts associated with a particular renderer
262 // process use.
263 scoped_refptr<gfx::GLShareGroup> share_group_;
265 scoped_refptr<gpu::gles2::MailboxManager> mailbox_manager_;
267 scoped_refptr<gpu::gles2::SubscriptionRefSet> subscription_ref_set_;
269 scoped_refptr<gpu::ValueStateMap> pending_valuebuffer_state_;
271 scoped_ptr<GpuJpegDecodeAccelerator> jpeg_decoder_;
273 gpu::gles2::DisallowedFeatures disallowed_features_;
274 GpuWatchdog* watchdog_;
275 bool software_;
277 size_t num_stubs_descheduled_;
279 // Map of stream id to stream state.
280 base::hash_map<int32, StreamState> streams_;
282 uint32_t current_order_num_;
284 bool allow_future_sync_points_;
285 bool allow_real_time_streams_;
287 // Member variables should appear before the WeakPtrFactory, to ensure
288 // that any WeakPtrs to Controller are invalidated before its members
289 // variable's destructors are executed, rendering them invalid.
290 base::WeakPtrFactory<GpuChannel> weak_factory_;
292 DISALLOW_COPY_AND_ASSIGN(GpuChannel);
295 // This filter does three things:
296 // - it counts and timestamps each message forwarded to the channel
297 // so that we can preempt other channels if a message takes too long to
298 // process. To guarantee fairness, we must wait a minimum amount of time
299 // before preempting and we limit the amount of time that we can preempt in
300 // one shot (see constants above).
301 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
302 // thread, generating the sync point ID and responding immediately, and then
303 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
304 // into the channel's queue.
305 // - it generates mailbox names for clients of the GPU process on the IO thread.
306 class GpuChannelMessageFilter : public IPC::MessageFilter {
307 public:
308 GpuChannelMessageFilter(const base::WeakPtr<GpuChannel>& gpu_channel,
309 GpuChannelMessageQueue* message_queue,
310 gpu::SyncPointManager* sync_point_manager,
311 base::SingleThreadTaskRunner* task_runner,
312 bool future_sync_points);
314 // IPC::MessageFilter implementation.
315 void OnFilterAdded(IPC::Sender* sender) override;
316 void OnFilterRemoved() override;
317 void OnChannelConnected(int32 peer_pid) override;
318 void OnChannelError() override;
319 void OnChannelClosing() override;
320 bool OnMessageReceived(const IPC::Message& message) override;
322 void AddChannelFilter(scoped_refptr<IPC::MessageFilter> filter);
323 void RemoveChannelFilter(scoped_refptr<IPC::MessageFilter> filter);
325 void OnMessageProcessed();
327 void SetPreemptingFlagAndSchedulingState(gpu::PreemptionFlag* preempting_flag,
328 bool a_stub_is_descheduled);
330 void UpdateStubSchedulingState(bool a_stub_is_descheduled);
332 bool Send(IPC::Message* message);
334 protected:
335 ~GpuChannelMessageFilter() override;
337 private:
338 enum PreemptionState {
339 // Either there's no other channel to preempt, there are no messages
340 // pending processing, or we just finished preempting and have to wait
341 // before preempting again.
342 IDLE,
343 // We are waiting kPreemptWaitTimeMs before checking if we should preempt.
344 WAITING,
345 // We can preempt whenever any IPC processing takes more than
346 // kPreemptWaitTimeMs.
347 CHECKING,
348 // We are currently preempting (i.e. no stub is descheduled).
349 PREEMPTING,
350 // We would like to preempt, but some stub is descheduled.
351 WOULD_PREEMPT_DESCHEDULED,
354 void UpdatePreemptionState();
356 void TransitionToIdleIfCaughtUp();
357 void TransitionToIdle();
358 void TransitionToWaiting();
359 void TransitionToChecking();
360 void TransitionToPreempting();
361 void TransitionToWouldPreemptDescheduled();
363 PreemptionState preemption_state_;
365 // Maximum amount of time that we can spend in PREEMPTING.
366 // It is reset when we transition to IDLE.
367 base::TimeDelta max_preemption_time_;
369 base::WeakPtr<GpuChannel> gpu_channel_;
370 // The message_queue_ is used to handle messages on the main thread.
371 scoped_refptr<GpuChannelMessageQueue> message_queue_;
372 IPC::Sender* sender_;
373 base::ProcessId peer_pid_;
374 gpu::SyncPointManager* sync_point_manager_;
375 scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
376 scoped_refptr<gpu::PreemptionFlag> preempting_flag_;
377 std::vector<scoped_refptr<IPC::MessageFilter>> channel_filters_;
379 // This timer is created and destroyed on the IO thread.
380 scoped_ptr<base::OneShotTimer<GpuChannelMessageFilter>> timer_;
382 bool a_stub_is_descheduled_;
384 // True if this channel can create future sync points.
385 bool future_sync_points_;
388 struct GpuChannelMessage {
389 uint32_t order_number;
390 base::TimeTicks time_received;
391 IPC::Message message;
393 // TODO(dyen): Temporary sync point data, remove once new sync point lands.
394 bool retire_sync_point;
395 uint32 sync_point;
397 GpuChannelMessage(const IPC::Message& msg)
398 : order_number(0),
399 time_received(base::TimeTicks()),
400 message(msg),
401 retire_sync_point(false),
402 sync_point(0) {}
404 private:
405 DISALLOW_COPY_AND_ASSIGN(GpuChannelMessage);
408 class GpuChannelMessageQueue
409 : public base::RefCountedThreadSafe<GpuChannelMessageQueue> {
410 public:
411 static scoped_refptr<GpuChannelMessageQueue> Create(
412 const base::WeakPtr<GpuChannel>& gpu_channel,
413 base::SingleThreadTaskRunner* task_runner);
415 // Returns the global order number for the last processed IPC message.
416 uint32_t GetUnprocessedOrderNum() const;
418 // Returns the global order number for the last unprocessed IPC message.
419 uint32_t processed_order_num() const { return processed_order_num_; }
421 bool HasQueuedMessages() const;
423 base::TimeTicks GetNextMessageTimeTick() const;
425 GpuChannelMessage* GetNextMessage() const;
427 // Should be called after a message returned by GetNextMessage is processed.
428 // Returns true if there are more messages on the queue.
429 bool MessageProcessed();
431 void PushBackMessage(const IPC::Message& message);
433 bool GenerateSyncPointMessage(gpu::SyncPointManager* sync_point_manager,
434 const IPC::Message& message,
435 bool retire_sync_point,
436 uint32_t* sync_point_number);
438 void DeleteAndDisableMessages(GpuChannelManager* gpu_channel_manager);
440 private:
441 friend class base::RefCountedThreadSafe<GpuChannelMessageQueue>;
443 GpuChannelMessageQueue(const base::WeakPtr<GpuChannel>& gpu_channel,
444 base::SingleThreadTaskRunner* task_runner);
445 ~GpuChannelMessageQueue();
447 void ScheduleHandleMessage();
449 void PushMessageHelper(scoped_ptr<GpuChannelMessage> msg);
451 // This number is only ever incremented/read on the IO thread.
452 static uint32_t global_order_counter_;
454 bool enabled_;
456 // Highest IPC order number seen, set when queued on the IO thread.
457 uint32_t unprocessed_order_num_;
458 // Both deques own the messages.
459 std::deque<GpuChannelMessage*> channel_messages_;
461 // This lock protects enabled_, unprocessed_order_num_, and channel_messages_.
462 mutable base::Lock channel_messages_lock_;
464 // Last finished IPC order number. Not protected by a lock as it's only
465 // accessed on the main thread.
466 uint32_t processed_order_num_;
468 base::WeakPtr<GpuChannel> gpu_channel_;
469 scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
471 DISALLOW_COPY_AND_ASSIGN(GpuChannelMessageQueue);
474 } // namespace content
476 #endif // CONTENT_COMMON_GPU_GPU_CHANNEL_H_