1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/client/gpu_channel_host.h"
10 #include "base/location.h"
11 #include "base/posix/eintr_wrapper.h"
12 #include "base/single_thread_task_runner.h"
13 #include "base/thread_task_runner_handle.h"
14 #include "base/threading/thread_restrictions.h"
15 #include "base/trace_event/trace_event.h"
16 #include "content/common/gpu/client/command_buffer_proxy_impl.h"
17 #include "content/common/gpu/client/gpu_jpeg_decode_accelerator_host.h"
18 #include "content/common/gpu/gpu_messages.h"
19 #include "ipc/ipc_sync_message_filter.h"
22 #if defined(OS_WIN) || defined(OS_MACOSX)
23 #include "content/public/common/sandbox_init.h"
30 GpuListenerInfo::GpuListenerInfo() {}
32 GpuListenerInfo::~GpuListenerInfo() {}
34 ProxyFlushInfo::ProxyFlushInfo()
35 : flush_pending(false),
36 route_id(MSG_ROUTING_NONE
),
41 ProxyFlushInfo::~ProxyFlushInfo() {
45 scoped_refptr
<GpuChannelHost
> GpuChannelHost::Create(
46 GpuChannelHostFactory
* factory
,
47 const gpu::GPUInfo
& gpu_info
,
48 const IPC::ChannelHandle
& channel_handle
,
49 base::WaitableEvent
* shutdown_event
,
50 gpu::GpuMemoryBufferManager
* gpu_memory_buffer_manager
) {
51 DCHECK(factory
->IsMainThread());
52 scoped_refptr
<GpuChannelHost
> host
=
53 new GpuChannelHost(factory
, gpu_info
, gpu_memory_buffer_manager
);
54 host
->Connect(channel_handle
, shutdown_event
);
58 GpuChannelHost::GpuChannelHost(
59 GpuChannelHostFactory
* factory
,
60 const gpu::GPUInfo
& gpu_info
,
61 gpu::GpuMemoryBufferManager
* gpu_memory_buffer_manager
)
64 gpu_memory_buffer_manager_(gpu_memory_buffer_manager
) {
65 next_transfer_buffer_id_
.GetNext();
66 next_image_id_
.GetNext();
67 next_route_id_
.GetNext();
70 void GpuChannelHost::Connect(const IPC::ChannelHandle
& channel_handle
,
71 base::WaitableEvent
* shutdown_event
) {
72 DCHECK(factory_
->IsMainThread());
73 // Open a channel to the GPU process. We pass NULL as the main listener here
74 // since we need to filter everything to route it to the right thread.
75 scoped_refptr
<base::SingleThreadTaskRunner
> io_task_runner
=
76 factory_
->GetIOThreadTaskRunner();
77 channel_
= IPC::SyncChannel::Create(
78 channel_handle
, IPC::Channel::MODE_CLIENT
, NULL
, io_task_runner
.get(),
79 true, shutdown_event
, factory_
->GetAttachmentBroker());
81 sync_filter_
= new IPC::SyncMessageFilter(shutdown_event
);
83 channel_
->AddFilter(sync_filter_
.get());
85 channel_filter_
= new MessageFilter();
87 // Install the filter last, because we intercept all leftover
89 channel_
->AddFilter(channel_filter_
.get());
92 bool GpuChannelHost::Send(IPC::Message
* msg
) {
93 // Callee takes ownership of message, regardless of whether Send is
94 // successful. See IPC::Sender.
95 scoped_ptr
<IPC::Message
> message(msg
);
96 // The GPU process never sends synchronous IPCs so clear the unblock flag to
98 message
->set_unblock(false);
100 // Currently we need to choose between two different mechanisms for sending.
101 // On the main thread we use the regular channel Send() method, on another
102 // thread we use SyncMessageFilter. We also have to be careful interpreting
103 // IsMainThread() since it might return false during shutdown,
104 // impl we are actually calling from the main thread (discard message then).
106 // TODO: Can we just always use sync_filter_ since we setup the channel
107 // without a main listener?
108 if (factory_
->IsMainThread()) {
109 // channel_ is only modified on the main thread, so we don't need to take a
112 DVLOG(1) << "GpuChannelHost::Send failed: Channel already destroyed";
115 // http://crbug.com/125264
116 base::ThreadRestrictions::ScopedAllowWait allow_wait
;
117 bool result
= channel_
->Send(message
.release());
119 DVLOG(1) << "GpuChannelHost::Send failed: Channel::Send failed";
123 bool result
= sync_filter_
->Send(message
.release());
127 void GpuChannelHost::OrderingBarrier(
130 unsigned int flush_count
,
131 const std::vector
<ui::LatencyInfo
>& latency_info
,
132 bool put_offset_changed
,
134 AutoLock
lock(context_lock_
);
135 if (flush_info_
.flush_pending
&& flush_info_
.route_id
!= route_id
)
138 if (put_offset_changed
) {
139 flush_info_
.flush_pending
= true;
140 flush_info_
.route_id
= route_id
;
141 flush_info_
.put_offset
= put_offset
;
142 flush_info_
.flush_count
= flush_count
;
143 flush_info_
.latency_info
.insert(flush_info_
.latency_info
.end(),
144 latency_info
.begin(), latency_info
.end());
151 void GpuChannelHost::InternalFlush() {
152 DCHECK(flush_info_
.flush_pending
);
153 Send(new GpuCommandBufferMsg_AsyncFlush(
154 flush_info_
.route_id
, flush_info_
.put_offset
, flush_info_
.flush_count
,
155 flush_info_
.latency_info
));
156 flush_info_
.latency_info
.clear();
157 flush_info_
.flush_pending
= false;
160 scoped_ptr
<CommandBufferProxyImpl
> GpuChannelHost::CreateViewCommandBuffer(
162 CommandBufferProxyImpl
* share_group
,
163 const std::vector
<int32
>& attribs
,
164 const GURL
& active_url
,
165 gfx::GpuPreference gpu_preference
) {
167 "GpuChannelHost::CreateViewCommandBuffer",
171 GPUCreateCommandBufferConfig init_params
;
172 init_params
.share_group_id
=
173 share_group
? share_group
->route_id() : MSG_ROUTING_NONE
;
174 init_params
.attribs
= attribs
;
175 init_params
.active_url
= active_url
;
176 init_params
.gpu_preference
= gpu_preference
;
177 int32 route_id
= GenerateRouteID();
178 CreateCommandBufferResult result
= factory_
->CreateViewCommandBuffer(
179 surface_id
, init_params
, route_id
);
180 if (result
!= CREATE_COMMAND_BUFFER_SUCCEEDED
) {
181 LOG(ERROR
) << "GpuChannelHost::CreateViewCommandBuffer failed.";
183 if (result
== CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST
) {
184 // The GPU channel needs to be considered lost. The caller will
185 // then set up a new connection, and the GPU channel and any
186 // view command buffers will all be associated with the same GPU
188 scoped_refptr
<base::SingleThreadTaskRunner
> io_task_runner
=
189 factory_
->GetIOThreadTaskRunner();
190 io_task_runner
->PostTask(
191 FROM_HERE
, base::Bind(&GpuChannelHost::MessageFilter::OnChannelError
,
192 channel_filter_
.get()));
198 scoped_ptr
<CommandBufferProxyImpl
> command_buffer
=
199 make_scoped_ptr(new CommandBufferProxyImpl(this, route_id
));
200 AddRoute(route_id
, command_buffer
->AsWeakPtr());
202 return command_buffer
.Pass();
205 scoped_ptr
<CommandBufferProxyImpl
> GpuChannelHost::CreateOffscreenCommandBuffer(
206 const gfx::Size
& size
,
207 CommandBufferProxyImpl
* share_group
,
208 const std::vector
<int32
>& attribs
,
209 const GURL
& active_url
,
210 gfx::GpuPreference gpu_preference
) {
211 TRACE_EVENT0("gpu", "GpuChannelHost::CreateOffscreenCommandBuffer");
213 GPUCreateCommandBufferConfig init_params
;
214 init_params
.share_group_id
=
215 share_group
? share_group
->route_id() : MSG_ROUTING_NONE
;
216 init_params
.attribs
= attribs
;
217 init_params
.active_url
= active_url
;
218 init_params
.gpu_preference
= gpu_preference
;
219 int32 route_id
= GenerateRouteID();
220 bool succeeded
= false;
221 if (!Send(new GpuChannelMsg_CreateOffscreenCommandBuffer(
222 size
, init_params
, route_id
, &succeeded
))) {
223 LOG(ERROR
) << "Failed to send GpuChannelMsg_CreateOffscreenCommandBuffer.";
229 << "GpuChannelMsg_CreateOffscreenCommandBuffer returned failure.";
233 scoped_ptr
<CommandBufferProxyImpl
> command_buffer
=
234 make_scoped_ptr(new CommandBufferProxyImpl(this, route_id
));
235 AddRoute(route_id
, command_buffer
->AsWeakPtr());
237 return command_buffer
.Pass();
240 scoped_ptr
<media::JpegDecodeAccelerator
> GpuChannelHost::CreateJpegDecoder(
241 media::JpegDecodeAccelerator::Client
* client
) {
242 TRACE_EVENT0("gpu", "GpuChannelHost::CreateJpegDecoder");
244 scoped_refptr
<base::SingleThreadTaskRunner
> io_task_runner
=
245 factory_
->GetIOThreadTaskRunner();
246 int32 route_id
= GenerateRouteID();
247 scoped_ptr
<GpuJpegDecodeAcceleratorHost
> decoder(
248 new GpuJpegDecodeAcceleratorHost(this, route_id
, io_task_runner
));
249 if (!decoder
->Initialize(client
)) {
253 // The reply message of jpeg decoder should run on IO thread.
254 io_task_runner
->PostTask(FROM_HERE
,
255 base::Bind(&GpuChannelHost::MessageFilter::AddRoute
,
256 channel_filter_
.get(), route_id
,
257 decoder
->GetReceiver(), io_task_runner
));
259 return decoder
.Pass();
262 void GpuChannelHost::DestroyCommandBuffer(
263 CommandBufferProxyImpl
* command_buffer
) {
264 TRACE_EVENT0("gpu", "GpuChannelHost::DestroyCommandBuffer");
266 int route_id
= command_buffer
->route_id();
267 Send(new GpuChannelMsg_DestroyCommandBuffer(route_id
));
268 RemoveRoute(route_id
);
270 if (flush_info_
.flush_pending
&& flush_info_
.route_id
== route_id
)
271 flush_info_
.flush_pending
= false;
274 void GpuChannelHost::DestroyChannel() {
275 DCHECK(factory_
->IsMainThread());
276 AutoLock
lock(context_lock_
);
280 void GpuChannelHost::AddRoute(
281 int route_id
, base::WeakPtr
<IPC::Listener
> listener
) {
282 scoped_refptr
<base::SingleThreadTaskRunner
> io_task_runner
=
283 factory_
->GetIOThreadTaskRunner();
284 io_task_runner
->PostTask(FROM_HERE
,
285 base::Bind(&GpuChannelHost::MessageFilter::AddRoute
,
286 channel_filter_
.get(), route_id
, listener
,
287 base::ThreadTaskRunnerHandle::Get()));
290 void GpuChannelHost::RemoveRoute(int route_id
) {
291 scoped_refptr
<base::SingleThreadTaskRunner
> io_task_runner
=
292 factory_
->GetIOThreadTaskRunner();
293 io_task_runner
->PostTask(
294 FROM_HERE
, base::Bind(&GpuChannelHost::MessageFilter::RemoveRoute
,
295 channel_filter_
.get(), route_id
));
298 base::SharedMemoryHandle
GpuChannelHost::ShareToGpuProcess(
299 base::SharedMemoryHandle source_handle
) {
301 return base::SharedMemory::NULLHandle();
303 #if defined(OS_WIN) || defined(OS_MACOSX)
304 // Windows and Mac need to explicitly duplicate the handle out to another
306 base::SharedMemoryHandle target_handle
;
307 base::ProcessId peer_pid
;
309 AutoLock
lock(context_lock_
);
311 return base::SharedMemory::NULLHandle();
312 peer_pid
= channel_
->GetPeerPID();
316 BrokerDuplicateHandle(source_handle
, peer_pid
, &target_handle
,
317 FILE_GENERIC_READ
| FILE_GENERIC_WRITE
, 0);
318 #elif defined(OS_MACOSX)
319 bool success
= BrokerDuplicateSharedMemoryHandle(source_handle
, peer_pid
,
323 return base::SharedMemory::NULLHandle();
325 return target_handle
;
327 return base::SharedMemory::DuplicateHandle(source_handle
);
328 #endif // defined(OS_WIN) || defined(OS_MACOSX)
331 int32
GpuChannelHost::ReserveTransferBufferId() {
332 return next_transfer_buffer_id_
.GetNext();
335 gfx::GpuMemoryBufferHandle
GpuChannelHost::ShareGpuMemoryBufferToGpuProcess(
336 const gfx::GpuMemoryBufferHandle
& source_handle
,
337 bool* requires_sync_point
) {
338 switch (source_handle
.type
) {
339 case gfx::SHARED_MEMORY_BUFFER
: {
340 gfx::GpuMemoryBufferHandle handle
;
341 handle
.type
= gfx::SHARED_MEMORY_BUFFER
;
342 handle
.handle
= ShareToGpuProcess(source_handle
.handle
);
343 *requires_sync_point
= false;
346 case gfx::IO_SURFACE_BUFFER
:
347 case gfx::SURFACE_TEXTURE_BUFFER
:
348 case gfx::OZONE_NATIVE_PIXMAP
:
349 *requires_sync_point
= true;
350 return source_handle
;
353 return gfx::GpuMemoryBufferHandle();
357 int32
GpuChannelHost::ReserveImageId() {
358 return next_image_id_
.GetNext();
361 int32
GpuChannelHost::GenerateRouteID() {
362 return next_route_id_
.GetNext();
365 GpuChannelHost::~GpuChannelHost() {
367 AutoLock
lock(context_lock_
);
369 << "GpuChannelHost::DestroyChannel must be called before destruction.";
373 GpuChannelHost::MessageFilter::MessageFilter()
377 GpuChannelHost::MessageFilter::~MessageFilter() {}
379 void GpuChannelHost::MessageFilter::AddRoute(
381 base::WeakPtr
<IPC::Listener
> listener
,
382 scoped_refptr
<base::SingleThreadTaskRunner
> task_runner
) {
383 DCHECK(listeners_
.find(route_id
) == listeners_
.end());
385 GpuListenerInfo info
;
386 info
.listener
= listener
;
387 info
.task_runner
= task_runner
;
388 listeners_
[route_id
] = info
;
391 void GpuChannelHost::MessageFilter::RemoveRoute(int route_id
) {
392 ListenerMap::iterator it
= listeners_
.find(route_id
);
393 if (it
!= listeners_
.end())
394 listeners_
.erase(it
);
397 bool GpuChannelHost::MessageFilter::OnMessageReceived(
398 const IPC::Message
& message
) {
399 // Never handle sync message replies or we will deadlock here.
400 if (message
.is_reply())
403 ListenerMap::iterator it
= listeners_
.find(message
.routing_id());
404 if (it
== listeners_
.end())
407 const GpuListenerInfo
& info
= it
->second
;
408 info
.task_runner
->PostTask(
410 base::Bind(base::IgnoreResult(&IPC::Listener::OnMessageReceived
),
411 info
.listener
, message
));
415 void GpuChannelHost::MessageFilter::OnChannelError() {
416 // Set the lost state before signalling the proxies. That way, if they
417 // themselves post a task to recreate the context, they will not try to re-use
418 // this channel host.
420 AutoLock
lock(lock_
);
424 // Inform all the proxies that an error has occurred. This will be reported
425 // via OpenGL as a lost context.
426 for (ListenerMap::iterator it
= listeners_
.begin();
427 it
!= listeners_
.end();
429 const GpuListenerInfo
& info
= it
->second
;
430 info
.task_runner
->PostTask(
431 FROM_HERE
, base::Bind(&IPC::Listener::OnChannelError
, info
.listener
));
437 bool GpuChannelHost::MessageFilter::IsLost() const {
438 AutoLock
lock(lock_
);
442 } // namespace content