1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/client/gpu_channel_host.h"
10 #include "base/message_loop/message_loop.h"
11 #include "base/message_loop/message_loop_proxy.h"
12 #include "base/posix/eintr_wrapper.h"
13 #include "base/threading/thread_restrictions.h"
14 #include "base/trace_event/trace_event.h"
15 #include "content/common/gpu/client/command_buffer_proxy_impl.h"
16 #include "content/common/gpu/gpu_messages.h"
17 #include "ipc/ipc_sync_message_filter.h"
21 #include "content/public/common/sandbox_init.h"
25 using base::MessageLoopProxy
;
29 GpuListenerInfo::GpuListenerInfo() {}
31 GpuListenerInfo::~GpuListenerInfo() {}
33 ProxyFlushInfo::ProxyFlushInfo()
34 : flush_pending(false),
35 route_id(MSG_ROUTING_NONE
),
40 ProxyFlushInfo::~ProxyFlushInfo() {
44 scoped_refptr
<GpuChannelHost
> GpuChannelHost::Create(
45 GpuChannelHostFactory
* factory
,
46 const gpu::GPUInfo
& gpu_info
,
47 const IPC::ChannelHandle
& channel_handle
,
48 base::WaitableEvent
* shutdown_event
,
49 gpu::GpuMemoryBufferManager
* gpu_memory_buffer_manager
) {
50 DCHECK(factory
->IsMainThread());
51 scoped_refptr
<GpuChannelHost
> host
=
52 new GpuChannelHost(factory
, gpu_info
, gpu_memory_buffer_manager
);
53 host
->Connect(channel_handle
, shutdown_event
);
57 GpuChannelHost::GpuChannelHost(
58 GpuChannelHostFactory
* factory
,
59 const gpu::GPUInfo
& gpu_info
,
60 gpu::GpuMemoryBufferManager
* gpu_memory_buffer_manager
)
63 gpu_memory_buffer_manager_(gpu_memory_buffer_manager
) {
64 next_transfer_buffer_id_
.GetNext();
65 next_image_id_
.GetNext();
66 next_route_id_
.GetNext();
69 void GpuChannelHost::Connect(const IPC::ChannelHandle
& channel_handle
,
70 base::WaitableEvent
* shutdown_event
) {
71 DCHECK(factory_
->IsMainThread());
72 // Open a channel to the GPU process. We pass NULL as the main listener here
73 // since we need to filter everything to route it to the right thread.
74 scoped_refptr
<base::MessageLoopProxy
> io_loop
= factory_
->GetIOLoopProxy();
75 channel_
= IPC::SyncChannel::Create(channel_handle
,
76 IPC::Channel::MODE_CLIENT
,
82 sync_filter_
= new IPC::SyncMessageFilter(shutdown_event
);
84 channel_
->AddFilter(sync_filter_
.get());
86 channel_filter_
= new MessageFilter();
88 // Install the filter last, because we intercept all leftover
90 channel_
->AddFilter(channel_filter_
.get());
93 bool GpuChannelHost::Send(IPC::Message
* msg
) {
94 // Callee takes ownership of message, regardless of whether Send is
95 // successful. See IPC::Sender.
96 scoped_ptr
<IPC::Message
> message(msg
);
97 // The GPU process never sends synchronous IPCs so clear the unblock flag to
99 message
->set_unblock(false);
101 // Currently we need to choose between two different mechanisms for sending.
102 // On the main thread we use the regular channel Send() method, on another
103 // thread we use SyncMessageFilter. We also have to be careful interpreting
104 // IsMainThread() since it might return false during shutdown,
105 // impl we are actually calling from the main thread (discard message then).
107 // TODO: Can we just always use sync_filter_ since we setup the channel
108 // without a main listener?
109 if (factory_
->IsMainThread()) {
110 // channel_ is only modified on the main thread, so we don't need to take a
113 DVLOG(1) << "GpuChannelHost::Send failed: Channel already destroyed";
116 // http://crbug.com/125264
117 base::ThreadRestrictions::ScopedAllowWait allow_wait
;
118 bool result
= channel_
->Send(message
.release());
120 DVLOG(1) << "GpuChannelHost::Send failed: Channel::Send failed";
124 bool result
= sync_filter_
->Send(message
.release());
128 void GpuChannelHost::OrderingBarrier(
131 unsigned int flush_count
,
132 const std::vector
<ui::LatencyInfo
>& latency_info
,
133 bool put_offset_changed
,
135 AutoLock
lock(context_lock_
);
136 if (flush_info_
.flush_pending
&& flush_info_
.route_id
!= route_id
)
139 if (put_offset_changed
) {
140 flush_info_
.flush_pending
= true;
141 flush_info_
.route_id
= route_id
;
142 flush_info_
.put_offset
= put_offset
;
143 flush_info_
.flush_count
= flush_count
;
144 flush_info_
.latency_info
.insert(flush_info_
.latency_info
.end(),
145 latency_info
.begin(), latency_info
.end());
152 void GpuChannelHost::InternalFlush() {
153 DCHECK(flush_info_
.flush_pending
);
154 Send(new GpuCommandBufferMsg_AsyncFlush(
155 flush_info_
.route_id
, flush_info_
.put_offset
, flush_info_
.flush_count
,
156 flush_info_
.latency_info
));
157 flush_info_
.latency_info
.clear();
158 flush_info_
.flush_pending
= false;
161 CommandBufferProxyImpl
* GpuChannelHost::CreateViewCommandBuffer(
163 CommandBufferProxyImpl
* share_group
,
164 const std::vector
<int32
>& attribs
,
165 const GURL
& active_url
,
166 gfx::GpuPreference gpu_preference
) {
168 "GpuChannelHost::CreateViewCommandBuffer",
172 GPUCreateCommandBufferConfig init_params
;
173 init_params
.share_group_id
=
174 share_group
? share_group
->GetRouteID() : MSG_ROUTING_NONE
;
175 init_params
.attribs
= attribs
;
176 init_params
.active_url
= active_url
;
177 init_params
.gpu_preference
= gpu_preference
;
178 int32 route_id
= GenerateRouteID();
179 CreateCommandBufferResult result
= factory_
->CreateViewCommandBuffer(
180 surface_id
, init_params
, route_id
);
181 if (result
!= CREATE_COMMAND_BUFFER_SUCCEEDED
) {
182 LOG(ERROR
) << "GpuChannelHost::CreateViewCommandBuffer failed.";
184 if (result
== CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST
) {
185 // The GPU channel needs to be considered lost. The caller will
186 // then set up a new connection, and the GPU channel and any
187 // view command buffers will all be associated with the same GPU
189 DCHECK(MessageLoopProxy::current().get());
191 scoped_refptr
<base::MessageLoopProxy
> io_loop
=
192 factory_
->GetIOLoopProxy();
195 base::Bind(&GpuChannelHost::MessageFilter::OnChannelError
,
196 channel_filter_
.get()));
202 CommandBufferProxyImpl
* command_buffer
=
203 new CommandBufferProxyImpl(this, route_id
);
204 AddRoute(route_id
, command_buffer
->AsWeakPtr());
206 AutoLock
lock(context_lock_
);
207 proxies_
[route_id
] = command_buffer
;
208 return command_buffer
;
211 CommandBufferProxyImpl
* GpuChannelHost::CreateOffscreenCommandBuffer(
212 const gfx::Size
& size
,
213 CommandBufferProxyImpl
* share_group
,
214 const std::vector
<int32
>& attribs
,
215 const GURL
& active_url
,
216 gfx::GpuPreference gpu_preference
) {
217 TRACE_EVENT0("gpu", "GpuChannelHost::CreateOffscreenCommandBuffer");
219 GPUCreateCommandBufferConfig init_params
;
220 init_params
.share_group_id
=
221 share_group
? share_group
->GetRouteID() : MSG_ROUTING_NONE
;
222 init_params
.attribs
= attribs
;
223 init_params
.active_url
= active_url
;
224 init_params
.gpu_preference
= gpu_preference
;
225 int32 route_id
= GenerateRouteID();
226 bool succeeded
= false;
227 if (!Send(new GpuChannelMsg_CreateOffscreenCommandBuffer(
228 size
, init_params
, route_id
, &succeeded
))) {
229 LOG(ERROR
) << "Failed to send GpuChannelMsg_CreateOffscreenCommandBuffer.";
235 << "GpuChannelMsg_CreateOffscreenCommandBuffer returned failure.";
239 CommandBufferProxyImpl
* command_buffer
=
240 new CommandBufferProxyImpl(this, route_id
);
241 AddRoute(route_id
, command_buffer
->AsWeakPtr());
243 AutoLock
lock(context_lock_
);
244 proxies_
[route_id
] = command_buffer
;
245 return command_buffer
;
248 scoped_ptr
<media::VideoDecodeAccelerator
> GpuChannelHost::CreateVideoDecoder(
249 int command_buffer_route_id
) {
250 TRACE_EVENT0("gpu", "GpuChannelHost::CreateVideoDecoder");
251 AutoLock
lock(context_lock_
);
252 ProxyMap::iterator it
= proxies_
.find(command_buffer_route_id
);
253 DCHECK(it
!= proxies_
.end());
254 return it
->second
->CreateVideoDecoder();
257 scoped_ptr
<media::VideoEncodeAccelerator
> GpuChannelHost::CreateVideoEncoder(
258 int command_buffer_route_id
) {
259 TRACE_EVENT0("gpu", "GpuChannelHost::CreateVideoEncoder");
260 AutoLock
lock(context_lock_
);
261 ProxyMap::iterator it
= proxies_
.find(command_buffer_route_id
);
262 DCHECK(it
!= proxies_
.end());
263 return it
->second
->CreateVideoEncoder();
266 void GpuChannelHost::DestroyCommandBuffer(
267 CommandBufferProxyImpl
* command_buffer
) {
268 TRACE_EVENT0("gpu", "GpuChannelHost::DestroyCommandBuffer");
270 int route_id
= command_buffer
->GetRouteID();
271 Send(new GpuChannelMsg_DestroyCommandBuffer(route_id
));
272 RemoveRoute(route_id
);
274 AutoLock
lock(context_lock_
);
275 proxies_
.erase(route_id
);
276 if (flush_info_
.flush_pending
&& flush_info_
.route_id
== route_id
)
277 flush_info_
.flush_pending
= false;
279 delete command_buffer
;
282 void GpuChannelHost::DestroyChannel() {
283 DCHECK(factory_
->IsMainThread());
284 AutoLock
lock(context_lock_
);
288 void GpuChannelHost::AddRoute(
289 int route_id
, base::WeakPtr
<IPC::Listener
> listener
) {
290 DCHECK(MessageLoopProxy::current().get());
292 scoped_refptr
<base::MessageLoopProxy
> io_loop
= factory_
->GetIOLoopProxy();
293 io_loop
->PostTask(FROM_HERE
,
294 base::Bind(&GpuChannelHost::MessageFilter::AddRoute
,
295 channel_filter_
.get(), route_id
, listener
,
296 MessageLoopProxy::current()));
299 void GpuChannelHost::RemoveRoute(int route_id
) {
300 scoped_refptr
<base::MessageLoopProxy
> io_loop
= factory_
->GetIOLoopProxy();
301 io_loop
->PostTask(FROM_HERE
,
302 base::Bind(&GpuChannelHost::MessageFilter::RemoveRoute
,
303 channel_filter_
.get(), route_id
));
306 base::SharedMemoryHandle
GpuChannelHost::ShareToGpuProcess(
307 base::SharedMemoryHandle source_handle
) {
309 return base::SharedMemory::NULLHandle();
312 // Windows needs to explicitly duplicate the handle out to another process.
313 base::SharedMemoryHandle target_handle
;
314 base::ProcessId peer_pid
;
316 AutoLock
lock(context_lock_
);
318 return base::SharedMemory::NULLHandle();
319 peer_pid
= channel_
->GetPeerPID();
321 if (!BrokerDuplicateHandle(source_handle
,
324 FILE_GENERIC_READ
| FILE_GENERIC_WRITE
,
326 return base::SharedMemory::NULLHandle();
329 return target_handle
;
331 int duped_handle
= HANDLE_EINTR(dup(source_handle
.fd
));
332 if (duped_handle
< 0)
333 return base::SharedMemory::NULLHandle();
335 return base::FileDescriptor(duped_handle
, true);
339 int32
GpuChannelHost::ReserveTransferBufferId() {
340 return next_transfer_buffer_id_
.GetNext();
343 gfx::GpuMemoryBufferHandle
GpuChannelHost::ShareGpuMemoryBufferToGpuProcess(
344 const gfx::GpuMemoryBufferHandle
& source_handle
,
345 bool* requires_sync_point
) {
346 switch (source_handle
.type
) {
347 case gfx::SHARED_MEMORY_BUFFER
: {
348 gfx::GpuMemoryBufferHandle handle
;
349 handle
.type
= gfx::SHARED_MEMORY_BUFFER
;
350 handle
.handle
= ShareToGpuProcess(source_handle
.handle
);
351 *requires_sync_point
= false;
354 case gfx::IO_SURFACE_BUFFER
:
355 case gfx::SURFACE_TEXTURE_BUFFER
:
356 case gfx::OZONE_NATIVE_BUFFER
:
357 *requires_sync_point
= true;
358 return source_handle
;
361 return gfx::GpuMemoryBufferHandle();
365 int32
GpuChannelHost::ReserveImageId() {
366 return next_image_id_
.GetNext();
369 int32
GpuChannelHost::GenerateRouteID() {
370 return next_route_id_
.GetNext();
373 GpuChannelHost::~GpuChannelHost() {
375 AutoLock
lock(context_lock_
);
377 << "GpuChannelHost::DestroyChannel must be called before destruction.";
381 GpuChannelHost::MessageFilter::MessageFilter()
385 GpuChannelHost::MessageFilter::~MessageFilter() {}
387 void GpuChannelHost::MessageFilter::AddRoute(
389 base::WeakPtr
<IPC::Listener
> listener
,
390 scoped_refptr
<MessageLoopProxy
> loop
) {
391 DCHECK(listeners_
.find(route_id
) == listeners_
.end());
392 GpuListenerInfo info
;
393 info
.listener
= listener
;
395 listeners_
[route_id
] = info
;
398 void GpuChannelHost::MessageFilter::RemoveRoute(int route_id
) {
399 ListenerMap::iterator it
= listeners_
.find(route_id
);
400 if (it
!= listeners_
.end())
401 listeners_
.erase(it
);
404 bool GpuChannelHost::MessageFilter::OnMessageReceived(
405 const IPC::Message
& message
) {
406 // Never handle sync message replies or we will deadlock here.
407 if (message
.is_reply())
410 ListenerMap::iterator it
= listeners_
.find(message
.routing_id());
411 if (it
== listeners_
.end())
414 const GpuListenerInfo
& info
= it
->second
;
418 base::IgnoreResult(&IPC::Listener::OnMessageReceived
),
424 void GpuChannelHost::MessageFilter::OnChannelError() {
425 // Set the lost state before signalling the proxies. That way, if they
426 // themselves post a task to recreate the context, they will not try to re-use
427 // this channel host.
429 AutoLock
lock(lock_
);
433 // Inform all the proxies that an error has occurred. This will be reported
434 // via OpenGL as a lost context.
435 for (ListenerMap::iterator it
= listeners_
.begin();
436 it
!= listeners_
.end();
438 const GpuListenerInfo
& info
= it
->second
;
441 base::Bind(&IPC::Listener::OnChannelError
, info
.listener
));
447 bool GpuChannelHost::MessageFilter::IsLost() const {
448 AutoLock
lock(lock_
);
452 } // namespace content