gpu: Add memory tracing of GPU transfer buffers.
[chromium-blink-merge.git] / content / common / gpu / client / gpu_channel_host.cc
blobac5b453ef37e6fde15c00682ddce6fdf8374ed8a
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/client/gpu_channel_host.h"
7 #include <algorithm>
9 #include "base/atomic_sequence_num.h"
10 #include "base/bind.h"
11 #include "base/location.h"
12 #include "base/posix/eintr_wrapper.h"
13 #include "base/single_thread_task_runner.h"
14 #include "base/thread_task_runner_handle.h"
15 #include "base/threading/thread_restrictions.h"
16 #include "base/trace_event/trace_event.h"
17 #include "content/common/gpu/client/command_buffer_proxy_impl.h"
18 #include "content/common/gpu/client/gpu_jpeg_decode_accelerator_host.h"
19 #include "content/common/gpu/gpu_messages.h"
20 #include "ipc/ipc_sync_message_filter.h"
21 #include "url/gurl.h"
23 #if defined(OS_WIN) || defined(OS_MACOSX)
24 #include "content/public/common/sandbox_init.h"
25 #endif
27 using base::AutoLock;
29 namespace content {
30 namespace {
32 // Global atomic to generate unique transfer buffer IDs.
33 base::StaticAtomicSequenceNumber g_next_transfer_buffer_id;
35 } // namespace
37 GpuChannelHost::StreamFlushInfo::StreamFlushInfo()
38 : flush_pending(false),
39 route_id(MSG_ROUTING_NONE),
40 put_offset(0),
41 flush_count(0) {}
43 GpuChannelHost::StreamFlushInfo::~StreamFlushInfo() {}
45 // static
46 scoped_refptr<GpuChannelHost> GpuChannelHost::Create(
47 GpuChannelHostFactory* factory,
48 const gpu::GPUInfo& gpu_info,
49 const IPC::ChannelHandle& channel_handle,
50 base::WaitableEvent* shutdown_event,
51 gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager) {
52 DCHECK(factory->IsMainThread());
53 scoped_refptr<GpuChannelHost> host =
54 new GpuChannelHost(factory, gpu_info, gpu_memory_buffer_manager);
55 host->Connect(channel_handle, shutdown_event);
56 return host;
59 GpuChannelHost::GpuChannelHost(
60 GpuChannelHostFactory* factory,
61 const gpu::GPUInfo& gpu_info,
62 gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager)
63 : factory_(factory),
64 gpu_info_(gpu_info),
65 gpu_memory_buffer_manager_(gpu_memory_buffer_manager) {
66 next_image_id_.GetNext();
67 next_route_id_.GetNext();
68 next_stream_id_.GetNext();
71 void GpuChannelHost::Connect(const IPC::ChannelHandle& channel_handle,
72 base::WaitableEvent* shutdown_event) {
73 DCHECK(factory_->IsMainThread());
74 // Open a channel to the GPU process. We pass NULL as the main listener here
75 // since we need to filter everything to route it to the right thread.
76 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner =
77 factory_->GetIOThreadTaskRunner();
78 channel_ = IPC::SyncChannel::Create(
79 channel_handle, IPC::Channel::MODE_CLIENT, NULL, io_task_runner.get(),
80 true, shutdown_event, factory_->GetAttachmentBroker());
82 sync_filter_ = channel_->CreateSyncMessageFilter();
84 channel_filter_ = new MessageFilter();
86 // Install the filter last, because we intercept all leftover
87 // messages.
88 channel_->AddFilter(channel_filter_.get());
91 bool GpuChannelHost::Send(IPC::Message* msg) {
92 // Callee takes ownership of message, regardless of whether Send is
93 // successful. See IPC::Sender.
94 scoped_ptr<IPC::Message> message(msg);
95 // The GPU process never sends synchronous IPCs so clear the unblock flag to
96 // preserve order.
97 message->set_unblock(false);
99 // Currently we need to choose between two different mechanisms for sending.
100 // On the main thread we use the regular channel Send() method, on another
101 // thread we use SyncMessageFilter. We also have to be careful interpreting
102 // IsMainThread() since it might return false during shutdown,
103 // impl we are actually calling from the main thread (discard message then).
105 // TODO: Can we just always use sync_filter_ since we setup the channel
106 // without a main listener?
107 if (factory_->IsMainThread()) {
108 // channel_ is only modified on the main thread, so we don't need to take a
109 // lock here.
110 if (!channel_) {
111 DVLOG(1) << "GpuChannelHost::Send failed: Channel already destroyed";
112 return false;
114 // http://crbug.com/125264
115 base::ThreadRestrictions::ScopedAllowWait allow_wait;
116 bool result = channel_->Send(message.release());
117 if (!result)
118 DVLOG(1) << "GpuChannelHost::Send failed: Channel::Send failed";
119 return result;
122 bool result = sync_filter_->Send(message.release());
123 return result;
126 void GpuChannelHost::OrderingBarrier(
127 int32 route_id,
128 int32 stream_id,
129 int32 put_offset,
130 uint32 flush_count,
131 const std::vector<ui::LatencyInfo>& latency_info,
132 bool put_offset_changed,
133 bool do_flush) {
134 AutoLock lock(context_lock_);
135 StreamFlushInfo& flush_info = stream_flush_info_[stream_id];
136 if (flush_info.flush_pending && flush_info.route_id != route_id)
137 InternalFlush(stream_id);
139 if (put_offset_changed) {
140 flush_info.flush_pending = true;
141 flush_info.route_id = route_id;
142 flush_info.put_offset = put_offset;
143 flush_info.flush_count = flush_count;
144 flush_info.latency_info.insert(flush_info.latency_info.end(),
145 latency_info.begin(), latency_info.end());
147 if (do_flush)
148 InternalFlush(stream_id);
152 void GpuChannelHost::InternalFlush(int32 stream_id) {
153 context_lock_.AssertAcquired();
154 StreamFlushInfo& flush_info = stream_flush_info_[stream_id];
155 DCHECK(flush_info.flush_pending);
156 Send(new GpuCommandBufferMsg_AsyncFlush(
157 flush_info.route_id, flush_info.put_offset, flush_info.flush_count,
158 flush_info.latency_info));
159 flush_info.latency_info.clear();
160 flush_info.flush_pending = false;
163 scoped_ptr<CommandBufferProxyImpl> GpuChannelHost::CreateViewCommandBuffer(
164 int32 surface_id,
165 CommandBufferProxyImpl* share_group,
166 int32 stream_id,
167 const std::vector<int32>& attribs,
168 const GURL& active_url,
169 gfx::GpuPreference gpu_preference) {
170 DCHECK(!share_group || (stream_id == share_group->stream_id()));
171 TRACE_EVENT1("gpu",
172 "GpuChannelHost::CreateViewCommandBuffer",
173 "surface_id",
174 surface_id);
176 GPUCreateCommandBufferConfig init_params;
177 init_params.share_group_id =
178 share_group ? share_group->route_id() : MSG_ROUTING_NONE;
179 init_params.stream_id = stream_id;
180 init_params.attribs = attribs;
181 init_params.active_url = active_url;
182 init_params.gpu_preference = gpu_preference;
184 int32 route_id = GenerateRouteID();
186 CreateCommandBufferResult result = factory_->CreateViewCommandBuffer(
187 surface_id, init_params, route_id);
188 if (result != CREATE_COMMAND_BUFFER_SUCCEEDED) {
189 LOG(ERROR) << "GpuChannelHost::CreateViewCommandBuffer failed.";
191 if (result == CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST) {
192 // The GPU channel needs to be considered lost. The caller will
193 // then set up a new connection, and the GPU channel and any
194 // view command buffers will all be associated with the same GPU
195 // process.
196 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner =
197 factory_->GetIOThreadTaskRunner();
198 io_task_runner->PostTask(
199 FROM_HERE, base::Bind(&GpuChannelHost::MessageFilter::OnChannelError,
200 channel_filter_.get()));
203 return NULL;
206 scoped_ptr<CommandBufferProxyImpl> command_buffer =
207 make_scoped_ptr(new CommandBufferProxyImpl(this, route_id, stream_id));
208 AddRoute(route_id, command_buffer->AsWeakPtr());
210 return command_buffer.Pass();
213 scoped_ptr<CommandBufferProxyImpl> GpuChannelHost::CreateOffscreenCommandBuffer(
214 const gfx::Size& size,
215 CommandBufferProxyImpl* share_group,
216 int32 stream_id,
217 const std::vector<int32>& attribs,
218 const GURL& active_url,
219 gfx::GpuPreference gpu_preference) {
220 DCHECK(!share_group || (stream_id == share_group->stream_id()));
221 TRACE_EVENT0("gpu", "GpuChannelHost::CreateOffscreenCommandBuffer");
223 GPUCreateCommandBufferConfig init_params;
224 init_params.share_group_id =
225 share_group ? share_group->route_id() : MSG_ROUTING_NONE;
226 init_params.stream_id = stream_id;
227 init_params.attribs = attribs;
228 init_params.active_url = active_url;
229 init_params.gpu_preference = gpu_preference;
231 int32 route_id = GenerateRouteID();
233 bool succeeded = false;
234 if (!Send(new GpuChannelMsg_CreateOffscreenCommandBuffer(
235 size, init_params, route_id, &succeeded))) {
236 LOG(ERROR) << "Failed to send GpuChannelMsg_CreateOffscreenCommandBuffer.";
237 return NULL;
240 if (!succeeded) {
241 LOG(ERROR)
242 << "GpuChannelMsg_CreateOffscreenCommandBuffer returned failure.";
243 return NULL;
246 scoped_ptr<CommandBufferProxyImpl> command_buffer =
247 make_scoped_ptr(new CommandBufferProxyImpl(this, route_id, stream_id));
248 AddRoute(route_id, command_buffer->AsWeakPtr());
250 return command_buffer.Pass();
253 scoped_ptr<media::JpegDecodeAccelerator> GpuChannelHost::CreateJpegDecoder(
254 media::JpegDecodeAccelerator::Client* client) {
255 TRACE_EVENT0("gpu", "GpuChannelHost::CreateJpegDecoder");
257 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner =
258 factory_->GetIOThreadTaskRunner();
259 int32 route_id = GenerateRouteID();
260 scoped_ptr<GpuJpegDecodeAcceleratorHost> decoder(
261 new GpuJpegDecodeAcceleratorHost(this, route_id, io_task_runner));
262 if (!decoder->Initialize(client)) {
263 return nullptr;
266 // The reply message of jpeg decoder should run on IO thread.
267 io_task_runner->PostTask(FROM_HERE,
268 base::Bind(&GpuChannelHost::MessageFilter::AddRoute,
269 channel_filter_.get(), route_id,
270 decoder->GetReceiver(), io_task_runner));
272 return decoder.Pass();
275 void GpuChannelHost::DestroyCommandBuffer(
276 CommandBufferProxyImpl* command_buffer) {
277 TRACE_EVENT0("gpu", "GpuChannelHost::DestroyCommandBuffer");
279 int32 route_id = command_buffer->route_id();
280 int32 stream_id = command_buffer->stream_id();
281 Send(new GpuChannelMsg_DestroyCommandBuffer(route_id));
282 RemoveRoute(route_id);
284 if (stream_flush_info_[stream_id].route_id == route_id)
285 stream_flush_info_.erase(stream_id);
288 void GpuChannelHost::DestroyChannel() {
289 DCHECK(factory_->IsMainThread());
290 AutoLock lock(context_lock_);
291 channel_.reset();
294 void GpuChannelHost::AddRoute(
295 int route_id, base::WeakPtr<IPC::Listener> listener) {
296 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner =
297 factory_->GetIOThreadTaskRunner();
298 io_task_runner->PostTask(FROM_HERE,
299 base::Bind(&GpuChannelHost::MessageFilter::AddRoute,
300 channel_filter_.get(), route_id, listener,
301 base::ThreadTaskRunnerHandle::Get()));
304 void GpuChannelHost::RemoveRoute(int route_id) {
305 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner =
306 factory_->GetIOThreadTaskRunner();
307 io_task_runner->PostTask(
308 FROM_HERE, base::Bind(&GpuChannelHost::MessageFilter::RemoveRoute,
309 channel_filter_.get(), route_id));
312 base::SharedMemoryHandle GpuChannelHost::ShareToGpuProcess(
313 base::SharedMemoryHandle source_handle) {
314 if (IsLost())
315 return base::SharedMemory::NULLHandle();
317 #if defined(OS_WIN) || defined(OS_MACOSX)
318 // Windows and Mac need to explicitly duplicate the handle out to another
319 // process.
320 base::SharedMemoryHandle target_handle;
321 base::ProcessId peer_pid;
323 AutoLock lock(context_lock_);
324 if (!channel_)
325 return base::SharedMemory::NULLHandle();
326 peer_pid = channel_->GetPeerPID();
328 #if defined(OS_WIN)
329 bool success =
330 BrokerDuplicateHandle(source_handle, peer_pid, &target_handle,
331 FILE_GENERIC_READ | FILE_GENERIC_WRITE, 0);
332 #elif defined(OS_MACOSX)
333 bool success = BrokerDuplicateSharedMemoryHandle(source_handle, peer_pid,
334 &target_handle);
335 #endif
336 if (!success)
337 return base::SharedMemory::NULLHandle();
339 return target_handle;
340 #else
341 return base::SharedMemory::DuplicateHandle(source_handle);
342 #endif // defined(OS_WIN) || defined(OS_MACOSX)
345 int32 GpuChannelHost::ReserveTransferBufferId() {
346 // 0 is a reserved value.
347 return g_next_transfer_buffer_id.GetNext() + 1;
350 gfx::GpuMemoryBufferHandle GpuChannelHost::ShareGpuMemoryBufferToGpuProcess(
351 const gfx::GpuMemoryBufferHandle& source_handle,
352 bool* requires_sync_point) {
353 switch (source_handle.type) {
354 case gfx::SHARED_MEMORY_BUFFER: {
355 gfx::GpuMemoryBufferHandle handle;
356 handle.type = gfx::SHARED_MEMORY_BUFFER;
357 handle.handle = ShareToGpuProcess(source_handle.handle);
358 *requires_sync_point = false;
359 return handle;
361 case gfx::IO_SURFACE_BUFFER:
362 case gfx::SURFACE_TEXTURE_BUFFER:
363 case gfx::OZONE_NATIVE_PIXMAP:
364 *requires_sync_point = true;
365 return source_handle;
366 default:
367 NOTREACHED();
368 return gfx::GpuMemoryBufferHandle();
372 int32 GpuChannelHost::ReserveImageId() {
373 return next_image_id_.GetNext();
376 int32 GpuChannelHost::GenerateRouteID() {
377 return next_route_id_.GetNext();
380 int32 GpuChannelHost::GenerateStreamID() {
381 return next_stream_id_.GetNext();
384 GpuChannelHost::~GpuChannelHost() {
385 #if DCHECK_IS_ON()
386 AutoLock lock(context_lock_);
387 DCHECK(!channel_)
388 << "GpuChannelHost::DestroyChannel must be called before destruction.";
389 #endif
392 GpuChannelHost::MessageFilter::ListenerInfo::ListenerInfo() {}
394 GpuChannelHost::MessageFilter::ListenerInfo::~ListenerInfo() {}
396 GpuChannelHost::MessageFilter::MessageFilter()
397 : lost_(false) {
400 GpuChannelHost::MessageFilter::~MessageFilter() {}
402 void GpuChannelHost::MessageFilter::AddRoute(
403 int32 route_id,
404 base::WeakPtr<IPC::Listener> listener,
405 scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
406 DCHECK(listeners_.find(route_id) == listeners_.end());
407 DCHECK(task_runner);
408 ListenerInfo info;
409 info.listener = listener;
410 info.task_runner = task_runner;
411 listeners_[route_id] = info;
414 void GpuChannelHost::MessageFilter::RemoveRoute(int32 route_id) {
415 listeners_.erase(route_id);
418 bool GpuChannelHost::MessageFilter::OnMessageReceived(
419 const IPC::Message& message) {
420 // Never handle sync message replies or we will deadlock here.
421 if (message.is_reply())
422 return false;
424 auto it = listeners_.find(message.routing_id());
425 if (it == listeners_.end())
426 return false;
428 const ListenerInfo& info = it->second;
429 info.task_runner->PostTask(
430 FROM_HERE,
431 base::Bind(base::IgnoreResult(&IPC::Listener::OnMessageReceived),
432 info.listener, message));
433 return true;
436 void GpuChannelHost::MessageFilter::OnChannelError() {
437 // Set the lost state before signalling the proxies. That way, if they
438 // themselves post a task to recreate the context, they will not try to re-use
439 // this channel host.
441 AutoLock lock(lock_);
442 lost_ = true;
445 // Inform all the proxies that an error has occurred. This will be reported
446 // via OpenGL as a lost context.
447 for (const auto& kv : listeners_) {
448 const ListenerInfo& info = kv.second;
449 info.task_runner->PostTask(
450 FROM_HERE, base::Bind(&IPC::Listener::OnChannelError, info.listener));
453 listeners_.clear();
456 bool GpuChannelHost::MessageFilter::IsLost() const {
457 AutoLock lock(lock_);
458 return lost_;
461 } // namespace content