[SyncFS] Build indexes from FileTracker entries on disk.
[chromium-blink-merge.git] / content / common / gpu / client / gpu_channel_host.cc
blob6312dee080ca01f9483b4b3e64b28574166ccb85
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/client/gpu_channel_host.h"
7 #include <algorithm>
9 #include "base/bind.h"
10 #include "base/debug/trace_event.h"
11 #include "base/message_loop/message_loop.h"
12 #include "base/message_loop/message_loop_proxy.h"
13 #include "base/posix/eintr_wrapper.h"
14 #include "base/threading/thread_restrictions.h"
15 #include "content/common/gpu/client/command_buffer_proxy_impl.h"
16 #include "content/common/gpu/gpu_messages.h"
17 #include "ipc/ipc_sync_message_filter.h"
18 #include "url/gurl.h"
20 #if defined(OS_WIN)
21 #include "content/public/common/sandbox_init.h"
22 #endif
24 using base::AutoLock;
25 using base::MessageLoopProxy;
27 namespace content {
29 GpuListenerInfo::GpuListenerInfo() {}
31 GpuListenerInfo::~GpuListenerInfo() {}
33 // static
34 scoped_refptr<GpuChannelHost> GpuChannelHost::Create(
35 GpuChannelHostFactory* factory,
36 const gpu::GPUInfo& gpu_info,
37 const IPC::ChannelHandle& channel_handle,
38 base::WaitableEvent* shutdown_event) {
39 DCHECK(factory->IsMainThread());
40 scoped_refptr<GpuChannelHost> host = new GpuChannelHost(factory, gpu_info);
41 host->Connect(channel_handle, shutdown_event);
42 return host;
45 // static
46 bool GpuChannelHost::IsValidGpuMemoryBuffer(
47 gfx::GpuMemoryBufferHandle handle) {
48 switch (handle.type) {
49 case gfx::SHARED_MEMORY_BUFFER:
50 #if defined(OS_MACOSX)
51 case gfx::IO_SURFACE_BUFFER:
52 #endif
53 #if defined(OS_ANDROID)
54 case gfx::SURFACE_TEXTURE_BUFFER:
55 #endif
56 return true;
57 default:
58 return false;
62 GpuChannelHost::GpuChannelHost(GpuChannelHostFactory* factory,
63 const gpu::GPUInfo& gpu_info)
64 : factory_(factory),
65 gpu_info_(gpu_info) {
66 next_transfer_buffer_id_.GetNext();
67 next_gpu_memory_buffer_id_.GetNext();
68 next_route_id_.GetNext();
71 void GpuChannelHost::Connect(const IPC::ChannelHandle& channel_handle,
72 base::WaitableEvent* shutdown_event) {
73 // Open a channel to the GPU process. We pass NULL as the main listener here
74 // since we need to filter everything to route it to the right thread.
75 scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy();
76 channel_ = IPC::SyncChannel::Create(channel_handle,
77 IPC::Channel::MODE_CLIENT,
78 NULL,
79 io_loop.get(),
80 true,
81 shutdown_event);
83 sync_filter_ = new IPC::SyncMessageFilter(shutdown_event);
85 channel_->AddFilter(sync_filter_.get());
87 channel_filter_ = new MessageFilter();
89 // Install the filter last, because we intercept all leftover
90 // messages.
91 channel_->AddFilter(channel_filter_.get());
94 bool GpuChannelHost::Send(IPC::Message* msg) {
95 // Callee takes ownership of message, regardless of whether Send is
96 // successful. See IPC::Sender.
97 scoped_ptr<IPC::Message> message(msg);
98 // The GPU process never sends synchronous IPCs so clear the unblock flag to
99 // preserve order.
100 message->set_unblock(false);
102 // Currently we need to choose between two different mechanisms for sending.
103 // On the main thread we use the regular channel Send() method, on another
104 // thread we use SyncMessageFilter. We also have to be careful interpreting
105 // IsMainThread() since it might return false during shutdown,
106 // impl we are actually calling from the main thread (discard message then).
108 // TODO: Can we just always use sync_filter_ since we setup the channel
109 // without a main listener?
110 if (factory_->IsMainThread()) {
111 // http://crbug.com/125264
112 base::ThreadRestrictions::ScopedAllowWait allow_wait;
113 bool result = channel_->Send(message.release());
114 if (!result)
115 DVLOG(1) << "GpuChannelHost::Send failed: Channel::Send failed";
116 return result;
117 } else if (base::MessageLoop::current()) {
118 bool result = sync_filter_->Send(message.release());
119 if (!result)
120 DVLOG(1) << "GpuChannelHost::Send failed: SyncMessageFilter::Send failed";
121 return result;
124 return false;
127 CommandBufferProxyImpl* GpuChannelHost::CreateViewCommandBuffer(
128 int32 surface_id,
129 CommandBufferProxyImpl* share_group,
130 const std::vector<int32>& attribs,
131 const GURL& active_url,
132 gfx::GpuPreference gpu_preference) {
133 TRACE_EVENT1("gpu",
134 "GpuChannelHost::CreateViewCommandBuffer",
135 "surface_id",
136 surface_id);
138 GPUCreateCommandBufferConfig init_params;
139 init_params.share_group_id =
140 share_group ? share_group->GetRouteID() : MSG_ROUTING_NONE;
141 init_params.attribs = attribs;
142 init_params.active_url = active_url;
143 init_params.gpu_preference = gpu_preference;
144 int32 route_id = GenerateRouteID();
145 CreateCommandBufferResult result = factory_->CreateViewCommandBuffer(
146 surface_id, init_params, route_id);
147 if (result != CREATE_COMMAND_BUFFER_SUCCEEDED) {
148 LOG(ERROR) << "GpuChannelHost::CreateViewCommandBuffer failed.";
150 if (result == CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST) {
151 // The GPU channel needs to be considered lost. The caller will
152 // then set up a new connection, and the GPU channel and any
153 // view command buffers will all be associated with the same GPU
154 // process.
155 DCHECK(MessageLoopProxy::current().get());
157 scoped_refptr<base::MessageLoopProxy> io_loop =
158 factory_->GetIOLoopProxy();
159 io_loop->PostTask(
160 FROM_HERE,
161 base::Bind(&GpuChannelHost::MessageFilter::OnChannelError,
162 channel_filter_.get()));
165 return NULL;
168 CommandBufferProxyImpl* command_buffer =
169 new CommandBufferProxyImpl(this, route_id);
170 AddRoute(route_id, command_buffer->AsWeakPtr());
172 AutoLock lock(context_lock_);
173 proxies_[route_id] = command_buffer;
174 return command_buffer;
177 CommandBufferProxyImpl* GpuChannelHost::CreateOffscreenCommandBuffer(
178 const gfx::Size& size,
179 CommandBufferProxyImpl* share_group,
180 const std::vector<int32>& attribs,
181 const GURL& active_url,
182 gfx::GpuPreference gpu_preference) {
183 TRACE_EVENT0("gpu", "GpuChannelHost::CreateOffscreenCommandBuffer");
185 GPUCreateCommandBufferConfig init_params;
186 init_params.share_group_id =
187 share_group ? share_group->GetRouteID() : MSG_ROUTING_NONE;
188 init_params.attribs = attribs;
189 init_params.active_url = active_url;
190 init_params.gpu_preference = gpu_preference;
191 int32 route_id = GenerateRouteID();
192 bool succeeded = false;
193 if (!Send(new GpuChannelMsg_CreateOffscreenCommandBuffer(size,
194 init_params,
195 route_id,
196 &succeeded))) {
197 LOG(ERROR) << "Failed to send GpuChannelMsg_CreateOffscreenCommandBuffer.";
198 return NULL;
201 if (!succeeded) {
202 LOG(ERROR)
203 << "GpuChannelMsg_CreateOffscreenCommandBuffer returned failure.";
204 return NULL;
207 CommandBufferProxyImpl* command_buffer =
208 new CommandBufferProxyImpl(this, route_id);
209 AddRoute(route_id, command_buffer->AsWeakPtr());
211 AutoLock lock(context_lock_);
212 proxies_[route_id] = command_buffer;
213 return command_buffer;
216 scoped_ptr<media::VideoDecodeAccelerator> GpuChannelHost::CreateVideoDecoder(
217 int command_buffer_route_id) {
218 TRACE_EVENT0("gpu", "GpuChannelHost::CreateVideoDecoder");
219 AutoLock lock(context_lock_);
220 ProxyMap::iterator it = proxies_.find(command_buffer_route_id);
221 DCHECK(it != proxies_.end());
222 return it->second->CreateVideoDecoder();
225 scoped_ptr<media::VideoEncodeAccelerator> GpuChannelHost::CreateVideoEncoder(
226 int command_buffer_route_id) {
227 TRACE_EVENT0("gpu", "GpuChannelHost::CreateVideoEncoder");
228 AutoLock lock(context_lock_);
229 ProxyMap::iterator it = proxies_.find(command_buffer_route_id);
230 DCHECK(it != proxies_.end());
231 return it->second->CreateVideoEncoder();
234 void GpuChannelHost::DestroyCommandBuffer(
235 CommandBufferProxyImpl* command_buffer) {
236 TRACE_EVENT0("gpu", "GpuChannelHost::DestroyCommandBuffer");
238 int route_id = command_buffer->GetRouteID();
239 Send(new GpuChannelMsg_DestroyCommandBuffer(route_id));
240 RemoveRoute(route_id);
242 AutoLock lock(context_lock_);
243 proxies_.erase(route_id);
244 delete command_buffer;
247 void GpuChannelHost::AddRoute(
248 int route_id, base::WeakPtr<IPC::Listener> listener) {
249 DCHECK(MessageLoopProxy::current().get());
251 scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy();
252 io_loop->PostTask(FROM_HERE,
253 base::Bind(&GpuChannelHost::MessageFilter::AddRoute,
254 channel_filter_.get(), route_id, listener,
255 MessageLoopProxy::current()));
258 void GpuChannelHost::RemoveRoute(int route_id) {
259 scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy();
260 io_loop->PostTask(FROM_HERE,
261 base::Bind(&GpuChannelHost::MessageFilter::RemoveRoute,
262 channel_filter_.get(), route_id));
265 base::SharedMemoryHandle GpuChannelHost::ShareToGpuProcess(
266 base::SharedMemoryHandle source_handle) {
267 if (IsLost())
268 return base::SharedMemory::NULLHandle();
270 #if defined(OS_WIN)
271 // Windows needs to explicitly duplicate the handle out to another process.
272 base::SharedMemoryHandle target_handle;
273 if (!BrokerDuplicateHandle(source_handle,
274 channel_->GetPeerPID(),
275 &target_handle,
276 FILE_GENERIC_READ | FILE_GENERIC_WRITE,
277 0)) {
278 return base::SharedMemory::NULLHandle();
281 return target_handle;
282 #else
283 int duped_handle = HANDLE_EINTR(dup(source_handle.fd));
284 if (duped_handle < 0)
285 return base::SharedMemory::NULLHandle();
287 return base::FileDescriptor(duped_handle, true);
288 #endif
291 int32 GpuChannelHost::ReserveTransferBufferId() {
292 return next_transfer_buffer_id_.GetNext();
295 gfx::GpuMemoryBufferHandle GpuChannelHost::ShareGpuMemoryBufferToGpuProcess(
296 gfx::GpuMemoryBufferHandle source_handle) {
297 switch (source_handle.type) {
298 case gfx::SHARED_MEMORY_BUFFER: {
299 gfx::GpuMemoryBufferHandle handle;
300 handle.type = gfx::SHARED_MEMORY_BUFFER;
301 handle.handle = ShareToGpuProcess(source_handle.handle);
302 return handle;
304 #if defined(OS_MACOSX)
305 case gfx::IO_SURFACE_BUFFER:
306 return source_handle;
307 #endif
308 #if defined(OS_ANDROID)
309 case gfx::SURFACE_TEXTURE_BUFFER:
310 return source_handle;
311 #endif
312 default:
313 NOTREACHED();
314 return gfx::GpuMemoryBufferHandle();
318 int32 GpuChannelHost::ReserveGpuMemoryBufferId() {
319 return next_gpu_memory_buffer_id_.GetNext();
322 int32 GpuChannelHost::GenerateRouteID() {
323 return next_route_id_.GetNext();
326 GpuChannelHost::~GpuChannelHost() {
327 // channel_ must be destroyed on the main thread.
328 if (!factory_->IsMainThread())
329 factory_->GetMainLoop()->DeleteSoon(FROM_HERE, channel_.release());
333 GpuChannelHost::MessageFilter::MessageFilter()
334 : lost_(false) {
337 GpuChannelHost::MessageFilter::~MessageFilter() {}
339 void GpuChannelHost::MessageFilter::AddRoute(
340 int route_id,
341 base::WeakPtr<IPC::Listener> listener,
342 scoped_refptr<MessageLoopProxy> loop) {
343 DCHECK(listeners_.find(route_id) == listeners_.end());
344 GpuListenerInfo info;
345 info.listener = listener;
346 info.loop = loop;
347 listeners_[route_id] = info;
350 void GpuChannelHost::MessageFilter::RemoveRoute(int route_id) {
351 ListenerMap::iterator it = listeners_.find(route_id);
352 if (it != listeners_.end())
353 listeners_.erase(it);
356 bool GpuChannelHost::MessageFilter::OnMessageReceived(
357 const IPC::Message& message) {
358 // Never handle sync message replies or we will deadlock here.
359 if (message.is_reply())
360 return false;
362 ListenerMap::iterator it = listeners_.find(message.routing_id());
363 if (it == listeners_.end())
364 return false;
366 const GpuListenerInfo& info = it->second;
367 info.loop->PostTask(
368 FROM_HERE,
369 base::Bind(
370 base::IgnoreResult(&IPC::Listener::OnMessageReceived),
371 info.listener,
372 message));
373 return true;
376 void GpuChannelHost::MessageFilter::OnChannelError() {
377 // Set the lost state before signalling the proxies. That way, if they
378 // themselves post a task to recreate the context, they will not try to re-use
379 // this channel host.
381 AutoLock lock(lock_);
382 lost_ = true;
385 // Inform all the proxies that an error has occurred. This will be reported
386 // via OpenGL as a lost context.
387 for (ListenerMap::iterator it = listeners_.begin();
388 it != listeners_.end();
389 it++) {
390 const GpuListenerInfo& info = it->second;
391 info.loop->PostTask(
392 FROM_HERE,
393 base::Bind(&IPC::Listener::OnChannelError, info.listener));
396 listeners_.clear();
399 bool GpuChannelHost::MessageFilter::IsLost() const {
400 AutoLock lock(lock_);
401 return lost_;
404 } // namespace content