Pin Chrome's shortcut to the Win10 Start menu on install and OS upgrade.
[chromium-blink-merge.git] / content / common / gpu / client / gpu_channel_host.cc
blobcfcb5eff3c1c4a649fa3b28b6c24497a2c443c6f
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/client/gpu_channel_host.h"
7 #include <algorithm>
9 #include "base/bind.h"
10 #include "base/location.h"
11 #include "base/posix/eintr_wrapper.h"
12 #include "base/single_thread_task_runner.h"
13 #include "base/thread_task_runner_handle.h"
14 #include "base/threading/thread_restrictions.h"
15 #include "base/trace_event/trace_event.h"
16 #include "content/common/gpu/client/command_buffer_proxy_impl.h"
17 #include "content/common/gpu/client/gpu_jpeg_decode_accelerator_host.h"
18 #include "content/common/gpu/gpu_messages.h"
19 #include "ipc/ipc_sync_message_filter.h"
20 #include "url/gurl.h"
22 #if defined(OS_WIN) || defined(OS_MACOSX)
23 #include "content/public/common/sandbox_init.h"
24 #endif
26 using base::AutoLock;
28 namespace content {
30 GpuListenerInfo::GpuListenerInfo() {}
32 GpuListenerInfo::~GpuListenerInfo() {}
34 ProxyFlushInfo::ProxyFlushInfo()
35 : flush_pending(false),
36 route_id(MSG_ROUTING_NONE),
37 put_offset(0),
38 flush_count(0) {
41 ProxyFlushInfo::~ProxyFlushInfo() {
44 // static
45 scoped_refptr<GpuChannelHost> GpuChannelHost::Create(
46 GpuChannelHostFactory* factory,
47 const gpu::GPUInfo& gpu_info,
48 const IPC::ChannelHandle& channel_handle,
49 base::WaitableEvent* shutdown_event,
50 gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager) {
51 DCHECK(factory->IsMainThread());
52 scoped_refptr<GpuChannelHost> host =
53 new GpuChannelHost(factory, gpu_info, gpu_memory_buffer_manager);
54 host->Connect(channel_handle, shutdown_event);
55 return host;
58 GpuChannelHost::GpuChannelHost(
59 GpuChannelHostFactory* factory,
60 const gpu::GPUInfo& gpu_info,
61 gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager)
62 : factory_(factory),
63 gpu_info_(gpu_info),
64 gpu_memory_buffer_manager_(gpu_memory_buffer_manager) {
65 next_transfer_buffer_id_.GetNext();
66 next_image_id_.GetNext();
67 next_route_id_.GetNext();
70 void GpuChannelHost::Connect(const IPC::ChannelHandle& channel_handle,
71 base::WaitableEvent* shutdown_event) {
72 DCHECK(factory_->IsMainThread());
73 // Open a channel to the GPU process. We pass NULL as the main listener here
74 // since we need to filter everything to route it to the right thread.
75 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner =
76 factory_->GetIOThreadTaskRunner();
77 channel_ = IPC::SyncChannel::Create(
78 channel_handle, IPC::Channel::MODE_CLIENT, NULL, io_task_runner.get(),
79 true, shutdown_event, factory_->GetAttachmentBroker());
81 sync_filter_ = new IPC::SyncMessageFilter(shutdown_event);
83 channel_->AddFilter(sync_filter_.get());
85 channel_filter_ = new MessageFilter();
87 // Install the filter last, because we intercept all leftover
88 // messages.
89 channel_->AddFilter(channel_filter_.get());
92 bool GpuChannelHost::Send(IPC::Message* msg) {
93 // Callee takes ownership of message, regardless of whether Send is
94 // successful. See IPC::Sender.
95 scoped_ptr<IPC::Message> message(msg);
96 // The GPU process never sends synchronous IPCs so clear the unblock flag to
97 // preserve order.
98 message->set_unblock(false);
100 // Currently we need to choose between two different mechanisms for sending.
101 // On the main thread we use the regular channel Send() method, on another
102 // thread we use SyncMessageFilter. We also have to be careful interpreting
103 // IsMainThread() since it might return false during shutdown,
104 // impl we are actually calling from the main thread (discard message then).
106 // TODO: Can we just always use sync_filter_ since we setup the channel
107 // without a main listener?
108 if (factory_->IsMainThread()) {
109 // channel_ is only modified on the main thread, so we don't need to take a
110 // lock here.
111 if (!channel_) {
112 DVLOG(1) << "GpuChannelHost::Send failed: Channel already destroyed";
113 return false;
115 // http://crbug.com/125264
116 base::ThreadRestrictions::ScopedAllowWait allow_wait;
117 bool result = channel_->Send(message.release());
118 if (!result)
119 DVLOG(1) << "GpuChannelHost::Send failed: Channel::Send failed";
120 return result;
123 bool result = sync_filter_->Send(message.release());
124 return result;
127 void GpuChannelHost::OrderingBarrier(
128 int route_id,
129 int32 put_offset,
130 unsigned int flush_count,
131 const std::vector<ui::LatencyInfo>& latency_info,
132 bool put_offset_changed,
133 bool do_flush) {
134 AutoLock lock(context_lock_);
135 if (flush_info_.flush_pending && flush_info_.route_id != route_id)
136 InternalFlush();
138 if (put_offset_changed) {
139 flush_info_.flush_pending = true;
140 flush_info_.route_id = route_id;
141 flush_info_.put_offset = put_offset;
142 flush_info_.flush_count = flush_count;
143 flush_info_.latency_info.insert(flush_info_.latency_info.end(),
144 latency_info.begin(), latency_info.end());
146 if (do_flush)
147 InternalFlush();
151 void GpuChannelHost::InternalFlush() {
152 DCHECK(flush_info_.flush_pending);
153 Send(new GpuCommandBufferMsg_AsyncFlush(
154 flush_info_.route_id, flush_info_.put_offset, flush_info_.flush_count,
155 flush_info_.latency_info));
156 flush_info_.latency_info.clear();
157 flush_info_.flush_pending = false;
160 CommandBufferProxyImpl* GpuChannelHost::CreateViewCommandBuffer(
161 int32 surface_id,
162 CommandBufferProxyImpl* share_group,
163 const std::vector<int32>& attribs,
164 const GURL& active_url,
165 gfx::GpuPreference gpu_preference) {
166 TRACE_EVENT1("gpu",
167 "GpuChannelHost::CreateViewCommandBuffer",
168 "surface_id",
169 surface_id);
171 GPUCreateCommandBufferConfig init_params;
172 init_params.share_group_id =
173 share_group ? share_group->GetRouteID() : MSG_ROUTING_NONE;
174 init_params.attribs = attribs;
175 init_params.active_url = active_url;
176 init_params.gpu_preference = gpu_preference;
177 int32 route_id = GenerateRouteID();
178 CreateCommandBufferResult result = factory_->CreateViewCommandBuffer(
179 surface_id, init_params, route_id);
180 if (result != CREATE_COMMAND_BUFFER_SUCCEEDED) {
181 LOG(ERROR) << "GpuChannelHost::CreateViewCommandBuffer failed.";
183 if (result == CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST) {
184 // The GPU channel needs to be considered lost. The caller will
185 // then set up a new connection, and the GPU channel and any
186 // view command buffers will all be associated with the same GPU
187 // process.
188 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner =
189 factory_->GetIOThreadTaskRunner();
190 io_task_runner->PostTask(
191 FROM_HERE, base::Bind(&GpuChannelHost::MessageFilter::OnChannelError,
192 channel_filter_.get()));
195 return NULL;
198 CommandBufferProxyImpl* command_buffer =
199 new CommandBufferProxyImpl(this, route_id);
200 AddRoute(route_id, command_buffer->AsWeakPtr());
202 AutoLock lock(context_lock_);
203 proxies_[route_id] = command_buffer;
204 return command_buffer;
207 CommandBufferProxyImpl* GpuChannelHost::CreateOffscreenCommandBuffer(
208 const gfx::Size& size,
209 CommandBufferProxyImpl* share_group,
210 const std::vector<int32>& attribs,
211 const GURL& active_url,
212 gfx::GpuPreference gpu_preference) {
213 TRACE_EVENT0("gpu", "GpuChannelHost::CreateOffscreenCommandBuffer");
215 GPUCreateCommandBufferConfig init_params;
216 init_params.share_group_id =
217 share_group ? share_group->GetRouteID() : MSG_ROUTING_NONE;
218 init_params.attribs = attribs;
219 init_params.active_url = active_url;
220 init_params.gpu_preference = gpu_preference;
221 int32 route_id = GenerateRouteID();
222 bool succeeded = false;
223 if (!Send(new GpuChannelMsg_CreateOffscreenCommandBuffer(
224 size, init_params, route_id, &succeeded))) {
225 LOG(ERROR) << "Failed to send GpuChannelMsg_CreateOffscreenCommandBuffer.";
226 return NULL;
229 if (!succeeded) {
230 LOG(ERROR)
231 << "GpuChannelMsg_CreateOffscreenCommandBuffer returned failure.";
232 return NULL;
235 CommandBufferProxyImpl* command_buffer =
236 new CommandBufferProxyImpl(this, route_id);
237 AddRoute(route_id, command_buffer->AsWeakPtr());
239 AutoLock lock(context_lock_);
240 proxies_[route_id] = command_buffer;
241 return command_buffer;
244 scoped_ptr<media::VideoDecodeAccelerator> GpuChannelHost::CreateVideoDecoder(
245 int command_buffer_route_id) {
246 TRACE_EVENT0("gpu", "GpuChannelHost::CreateVideoDecoder");
247 AutoLock lock(context_lock_);
248 ProxyMap::iterator it = proxies_.find(command_buffer_route_id);
249 DCHECK(it != proxies_.end());
250 return it->second->CreateVideoDecoder();
253 scoped_ptr<media::VideoEncodeAccelerator> GpuChannelHost::CreateVideoEncoder(
254 int command_buffer_route_id) {
255 TRACE_EVENT0("gpu", "GpuChannelHost::CreateVideoEncoder");
256 AutoLock lock(context_lock_);
257 ProxyMap::iterator it = proxies_.find(command_buffer_route_id);
258 DCHECK(it != proxies_.end());
259 return it->second->CreateVideoEncoder();
262 scoped_ptr<media::JpegDecodeAccelerator> GpuChannelHost::CreateJpegDecoder(
263 media::JpegDecodeAccelerator::Client* client) {
264 TRACE_EVENT0("gpu", "GpuChannelHost::CreateJpegDecoder");
266 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner =
267 factory_->GetIOThreadTaskRunner();
268 int32 route_id = GenerateRouteID();
269 scoped_ptr<GpuJpegDecodeAcceleratorHost> decoder(
270 new GpuJpegDecodeAcceleratorHost(this, route_id, io_task_runner));
271 if (!decoder->Initialize(client)) {
272 return nullptr;
275 // The reply message of jpeg decoder should run on IO thread.
276 io_task_runner->PostTask(FROM_HERE,
277 base::Bind(&GpuChannelHost::MessageFilter::AddRoute,
278 channel_filter_.get(), route_id,
279 decoder->GetReceiver(), io_task_runner));
281 return decoder.Pass();
284 void GpuChannelHost::DestroyCommandBuffer(
285 CommandBufferProxyImpl* command_buffer) {
286 TRACE_EVENT0("gpu", "GpuChannelHost::DestroyCommandBuffer");
288 int route_id = command_buffer->GetRouteID();
289 Send(new GpuChannelMsg_DestroyCommandBuffer(route_id));
290 RemoveRoute(route_id);
292 AutoLock lock(context_lock_);
293 proxies_.erase(route_id);
294 if (flush_info_.flush_pending && flush_info_.route_id == route_id)
295 flush_info_.flush_pending = false;
297 delete command_buffer;
300 void GpuChannelHost::DestroyChannel() {
301 DCHECK(factory_->IsMainThread());
302 AutoLock lock(context_lock_);
303 channel_.reset();
306 void GpuChannelHost::AddRoute(
307 int route_id, base::WeakPtr<IPC::Listener> listener) {
308 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner =
309 factory_->GetIOThreadTaskRunner();
310 io_task_runner->PostTask(FROM_HERE,
311 base::Bind(&GpuChannelHost::MessageFilter::AddRoute,
312 channel_filter_.get(), route_id, listener,
313 base::ThreadTaskRunnerHandle::Get()));
316 void GpuChannelHost::RemoveRoute(int route_id) {
317 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner =
318 factory_->GetIOThreadTaskRunner();
319 io_task_runner->PostTask(
320 FROM_HERE, base::Bind(&GpuChannelHost::MessageFilter::RemoveRoute,
321 channel_filter_.get(), route_id));
324 base::SharedMemoryHandle GpuChannelHost::ShareToGpuProcess(
325 base::SharedMemoryHandle source_handle) {
326 if (IsLost())
327 return base::SharedMemory::NULLHandle();
329 #if defined(OS_WIN) || defined(OS_MACOSX)
330 // Windows and Mac need to explicitly duplicate the handle out to another
331 // process.
332 base::SharedMemoryHandle target_handle;
333 base::ProcessId peer_pid;
335 AutoLock lock(context_lock_);
336 if (!channel_)
337 return base::SharedMemory::NULLHandle();
338 peer_pid = channel_->GetPeerPID();
340 #if defined(OS_WIN)
341 bool success =
342 BrokerDuplicateHandle(source_handle, peer_pid, &target_handle,
343 FILE_GENERIC_READ | FILE_GENERIC_WRITE, 0);
344 #elif defined(OS_MACOSX)
345 bool success = BrokerDuplicateSharedMemoryHandle(source_handle, peer_pid,
346 &target_handle);
347 #endif
348 if (!success)
349 return base::SharedMemory::NULLHandle();
351 return target_handle;
352 #else
353 return base::SharedMemory::DuplicateHandle(source_handle);
354 #endif // defined(OS_WIN) || defined(OS_MACOSX)
357 int32 GpuChannelHost::ReserveTransferBufferId() {
358 return next_transfer_buffer_id_.GetNext();
361 gfx::GpuMemoryBufferHandle GpuChannelHost::ShareGpuMemoryBufferToGpuProcess(
362 const gfx::GpuMemoryBufferHandle& source_handle,
363 bool* requires_sync_point) {
364 switch (source_handle.type) {
365 case gfx::SHARED_MEMORY_BUFFER: {
366 gfx::GpuMemoryBufferHandle handle;
367 handle.type = gfx::SHARED_MEMORY_BUFFER;
368 handle.handle = ShareToGpuProcess(source_handle.handle);
369 *requires_sync_point = false;
370 return handle;
372 case gfx::IO_SURFACE_BUFFER:
373 case gfx::SURFACE_TEXTURE_BUFFER:
374 case gfx::OZONE_NATIVE_PIXMAP:
375 *requires_sync_point = true;
376 return source_handle;
377 default:
378 NOTREACHED();
379 return gfx::GpuMemoryBufferHandle();
383 int32 GpuChannelHost::ReserveImageId() {
384 return next_image_id_.GetNext();
387 int32 GpuChannelHost::GenerateRouteID() {
388 return next_route_id_.GetNext();
391 GpuChannelHost::~GpuChannelHost() {
392 #if DCHECK_IS_ON()
393 AutoLock lock(context_lock_);
394 DCHECK(!channel_)
395 << "GpuChannelHost::DestroyChannel must be called before destruction.";
396 #endif
399 GpuChannelHost::MessageFilter::MessageFilter()
400 : lost_(false) {
403 GpuChannelHost::MessageFilter::~MessageFilter() {}
405 void GpuChannelHost::MessageFilter::AddRoute(
406 int route_id,
407 base::WeakPtr<IPC::Listener> listener,
408 scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
409 DCHECK(listeners_.find(route_id) == listeners_.end());
410 DCHECK(task_runner);
411 GpuListenerInfo info;
412 info.listener = listener;
413 info.task_runner = task_runner;
414 listeners_[route_id] = info;
417 void GpuChannelHost::MessageFilter::RemoveRoute(int route_id) {
418 ListenerMap::iterator it = listeners_.find(route_id);
419 if (it != listeners_.end())
420 listeners_.erase(it);
423 bool GpuChannelHost::MessageFilter::OnMessageReceived(
424 const IPC::Message& message) {
425 // Never handle sync message replies or we will deadlock here.
426 if (message.is_reply())
427 return false;
429 ListenerMap::iterator it = listeners_.find(message.routing_id());
430 if (it == listeners_.end())
431 return false;
433 const GpuListenerInfo& info = it->second;
434 info.task_runner->PostTask(
435 FROM_HERE,
436 base::Bind(base::IgnoreResult(&IPC::Listener::OnMessageReceived),
437 info.listener, message));
438 return true;
441 void GpuChannelHost::MessageFilter::OnChannelError() {
442 // Set the lost state before signalling the proxies. That way, if they
443 // themselves post a task to recreate the context, they will not try to re-use
444 // this channel host.
446 AutoLock lock(lock_);
447 lost_ = true;
450 // Inform all the proxies that an error has occurred. This will be reported
451 // via OpenGL as a lost context.
452 for (ListenerMap::iterator it = listeners_.begin();
453 it != listeners_.end();
454 it++) {
455 const GpuListenerInfo& info = it->second;
456 info.task_runner->PostTask(
457 FROM_HERE, base::Bind(&IPC::Listener::OnChannelError, info.listener));
460 listeners_.clear();
463 bool GpuChannelHost::MessageFilter::IsLost() const {
464 AutoLock lock(lock_);
465 return lost_;
468 } // namespace content