Roll src/third_party/WebKit eac3800:0237a66 (svn 202606:202607)
[chromium-blink-merge.git] / content / common / gpu / client / command_buffer_proxy_impl.cc
blob17ecca2741cc2db4748c4f974cb56c5dacc27614
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/client/command_buffer_proxy_impl.h"
7 #include <vector>
9 #include "base/callback.h"
10 #include "base/logging.h"
11 #include "base/memory/shared_memory.h"
12 #include "base/stl_util.h"
13 #include "base/trace_event/trace_event.h"
14 #include "content/common/child_process_messages.h"
15 #include "content/common/gpu/client/gpu_channel_host.h"
16 #include "content/common/gpu/client/gpu_video_decode_accelerator_host.h"
17 #include "content/common/gpu/client/gpu_video_encode_accelerator_host.h"
18 #include "content/common/gpu/gpu_messages.h"
19 #include "content/common/view_messages.h"
20 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
21 #include "gpu/command_buffer/common/cmd_buffer_common.h"
22 #include "gpu/command_buffer/common/command_buffer_shared.h"
23 #include "gpu/command_buffer/common/gpu_memory_allocation.h"
24 #include "gpu/command_buffer/service/image_factory.h"
25 #include "ui/gfx/geometry/size.h"
26 #include "ui/gl/gl_bindings.h"
28 namespace content {
30 namespace {
32 uint64_t CommandBufferProxyID(int channel_id, int32 route_id) {
33 return (static_cast<uint64_t>(channel_id) << 32) | route_id;
36 } // namespace
38 CommandBufferProxyImpl::CommandBufferProxyImpl(GpuChannelHost* channel,
39 int32 route_id,
40 int32 stream_id)
41 : lock_(nullptr),
42 channel_(channel),
43 command_buffer_id_(CommandBufferProxyID(channel->channel_id(), route_id)),
44 route_id_(route_id),
45 stream_id_(stream_id),
46 flush_count_(0),
47 last_put_offset_(-1),
48 last_barrier_put_offset_(-1),
49 next_signal_id_(0) {
50 DCHECK(channel);
53 CommandBufferProxyImpl::~CommandBufferProxyImpl() {
54 FOR_EACH_OBSERVER(DeletionObserver,
55 deletion_observers_,
56 OnWillDeleteImpl());
57 if (channel_) {
58 channel_->DestroyCommandBuffer(this);
59 channel_ = nullptr;
63 bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message& message) {
64 scoped_ptr<base::AutoLock> lock;
65 if (lock_)
66 lock.reset(new base::AutoLock(*lock_));
67 bool handled = true;
68 IPC_BEGIN_MESSAGE_MAP(CommandBufferProxyImpl, message)
69 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Destroyed, OnDestroyed);
70 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ConsoleMsg, OnConsoleMessage);
71 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetMemoryAllocation,
72 OnSetMemoryAllocation);
73 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPointAck,
74 OnSignalSyncPointAck);
75 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SwapBuffersCompleted,
76 OnSwapBuffersCompleted);
77 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_UpdateVSyncParameters,
78 OnUpdateVSyncParameters);
79 IPC_MESSAGE_UNHANDLED(handled = false)
80 IPC_END_MESSAGE_MAP()
82 DCHECK(handled);
83 return handled;
86 void CommandBufferProxyImpl::OnChannelError() {
87 scoped_ptr<base::AutoLock> lock;
88 if (lock_)
89 lock.reset(new base::AutoLock(*lock_));
91 gpu::error::ContextLostReason context_lost_reason =
92 gpu::error::kGpuChannelLost;
93 if (shared_state_shm_ && shared_state_shm_->memory()) {
94 TryUpdateState();
95 // The GPU process might have intentionally been crashed
96 // (exit_on_context_lost), so try to find out the original reason.
97 if (last_state_.error == gpu::error::kLostContext)
98 context_lost_reason = last_state_.context_lost_reason;
100 OnDestroyed(context_lost_reason, gpu::error::kLostContext);
103 void CommandBufferProxyImpl::OnDestroyed(gpu::error::ContextLostReason reason,
104 gpu::error::Error error) {
105 CheckLock();
106 // Prevent any further messages from being sent.
107 if (channel_) {
108 channel_->DestroyCommandBuffer(this);
109 channel_ = nullptr;
112 // When the client sees that the context is lost, they should delete this
113 // CommandBufferProxyImpl and create a new one.
114 last_state_.error = error;
115 last_state_.context_lost_reason = reason;
117 if (!context_lost_callback_.is_null()) {
118 context_lost_callback_.Run();
119 // Avoid calling the error callback more than once.
120 context_lost_callback_.Reset();
124 void CommandBufferProxyImpl::OnConsoleMessage(
125 const GPUCommandBufferConsoleMessage& message) {
126 if (!console_message_callback_.is_null()) {
127 console_message_callback_.Run(message.message, message.id);
131 void CommandBufferProxyImpl::SetMemoryAllocationChangedCallback(
132 const MemoryAllocationChangedCallback& callback) {
133 CheckLock();
134 if (last_state_.error != gpu::error::kNoError)
135 return;
137 memory_allocation_changed_callback_ = callback;
138 Send(new GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback(
139 route_id_, !memory_allocation_changed_callback_.is_null()));
142 void CommandBufferProxyImpl::AddDeletionObserver(DeletionObserver* observer) {
143 CheckLock();
144 deletion_observers_.AddObserver(observer);
147 void CommandBufferProxyImpl::RemoveDeletionObserver(
148 DeletionObserver* observer) {
149 CheckLock();
150 deletion_observers_.RemoveObserver(observer);
153 void CommandBufferProxyImpl::OnSetMemoryAllocation(
154 const gpu::MemoryAllocation& allocation) {
155 if (!memory_allocation_changed_callback_.is_null())
156 memory_allocation_changed_callback_.Run(allocation);
159 void CommandBufferProxyImpl::OnSignalSyncPointAck(uint32 id) {
160 SignalTaskMap::iterator it = signal_tasks_.find(id);
161 DCHECK(it != signal_tasks_.end());
162 base::Closure callback = it->second;
163 signal_tasks_.erase(it);
164 callback.Run();
167 void CommandBufferProxyImpl::SetContextLostCallback(
168 const base::Closure& callback) {
169 CheckLock();
170 context_lost_callback_ = callback;
173 bool CommandBufferProxyImpl::Initialize() {
174 TRACE_EVENT0("gpu", "CommandBufferProxyImpl::Initialize");
175 shared_state_shm_.reset(channel_->factory()->AllocateSharedMemory(
176 sizeof(*shared_state())).release());
177 if (!shared_state_shm_)
178 return false;
180 if (!shared_state_shm_->Map(sizeof(*shared_state())))
181 return false;
183 shared_state()->Initialize();
185 // This handle is owned by the GPU process and must be passed to it or it
186 // will leak. In otherwords, do not early out on error between here and the
187 // sending of the Initialize IPC below.
188 base::SharedMemoryHandle handle =
189 channel_->ShareToGpuProcess(shared_state_shm_->handle());
190 if (!base::SharedMemory::IsHandleValid(handle))
191 return false;
193 bool result = false;
194 if (!Send(new GpuCommandBufferMsg_Initialize(
195 route_id_, handle, &result, &capabilities_))) {
196 LOG(ERROR) << "Could not send GpuCommandBufferMsg_Initialize.";
197 return false;
200 if (!result) {
201 LOG(ERROR) << "Failed to initialize command buffer service.";
202 return false;
205 capabilities_.image = true;
207 return true;
210 gpu::CommandBuffer::State CommandBufferProxyImpl::GetLastState() {
211 return last_state_;
214 int32 CommandBufferProxyImpl::GetLastToken() {
215 TryUpdateState();
216 return last_state_.token;
219 void CommandBufferProxyImpl::Flush(int32 put_offset) {
220 CheckLock();
221 if (last_state_.error != gpu::error::kNoError)
222 return;
224 TRACE_EVENT1("gpu",
225 "CommandBufferProxyImpl::Flush",
226 "put_offset",
227 put_offset);
229 bool put_offset_changed = last_put_offset_ != put_offset;
230 last_put_offset_ = put_offset;
231 last_barrier_put_offset_ = put_offset;
233 if (channel_) {
234 channel_->OrderingBarrier(route_id_, stream_id_, put_offset, ++flush_count_,
235 latency_info_, put_offset_changed, true);
238 if (put_offset_changed)
239 latency_info_.clear();
242 void CommandBufferProxyImpl::OrderingBarrier(int32 put_offset) {
243 if (last_state_.error != gpu::error::kNoError)
244 return;
246 TRACE_EVENT1("gpu", "CommandBufferProxyImpl::OrderingBarrier", "put_offset",
247 put_offset);
249 bool put_offset_changed = last_barrier_put_offset_ != put_offset;
250 last_barrier_put_offset_ = put_offset;
252 if (channel_) {
253 channel_->OrderingBarrier(route_id_, stream_id_, put_offset, ++flush_count_,
254 latency_info_, put_offset_changed, false);
257 if (put_offset_changed)
258 latency_info_.clear();
261 void CommandBufferProxyImpl::SetLatencyInfo(
262 const std::vector<ui::LatencyInfo>& latency_info) {
263 CheckLock();
264 for (size_t i = 0; i < latency_info.size(); i++)
265 latency_info_.push_back(latency_info[i]);
268 void CommandBufferProxyImpl::SetSwapBuffersCompletionCallback(
269 const SwapBuffersCompletionCallback& callback) {
270 CheckLock();
271 swap_buffers_completion_callback_ = callback;
274 void CommandBufferProxyImpl::SetUpdateVSyncParametersCallback(
275 const UpdateVSyncParametersCallback& callback) {
276 CheckLock();
277 update_vsync_parameters_completion_callback_ = callback;
280 void CommandBufferProxyImpl::WaitForTokenInRange(int32 start, int32 end) {
281 CheckLock();
282 TRACE_EVENT2("gpu",
283 "CommandBufferProxyImpl::WaitForToken",
284 "start",
285 start,
286 "end",
287 end);
288 TryUpdateState();
289 if (!InRange(start, end, last_state_.token) &&
290 last_state_.error == gpu::error::kNoError) {
291 gpu::CommandBuffer::State state;
292 if (Send(new GpuCommandBufferMsg_WaitForTokenInRange(
293 route_id_, start, end, &state)))
294 OnUpdateState(state);
296 DCHECK(InRange(start, end, last_state_.token) ||
297 last_state_.error != gpu::error::kNoError);
300 void CommandBufferProxyImpl::WaitForGetOffsetInRange(int32 start, int32 end) {
301 CheckLock();
302 TRACE_EVENT2("gpu",
303 "CommandBufferProxyImpl::WaitForGetOffset",
304 "start",
305 start,
306 "end",
307 end);
308 TryUpdateState();
309 if (!InRange(start, end, last_state_.get_offset) &&
310 last_state_.error == gpu::error::kNoError) {
311 gpu::CommandBuffer::State state;
312 if (Send(new GpuCommandBufferMsg_WaitForGetOffsetInRange(
313 route_id_, start, end, &state)))
314 OnUpdateState(state);
316 DCHECK(InRange(start, end, last_state_.get_offset) ||
317 last_state_.error != gpu::error::kNoError);
320 void CommandBufferProxyImpl::SetGetBuffer(int32 shm_id) {
321 CheckLock();
322 if (last_state_.error != gpu::error::kNoError)
323 return;
325 Send(new GpuCommandBufferMsg_SetGetBuffer(route_id_, shm_id));
326 last_put_offset_ = -1;
329 scoped_refptr<gpu::Buffer> CommandBufferProxyImpl::CreateTransferBuffer(
330 size_t size,
331 int32* id) {
332 CheckLock();
333 *id = -1;
335 if (last_state_.error != gpu::error::kNoError)
336 return NULL;
338 int32 new_id = channel_->ReserveTransferBufferId();
340 scoped_ptr<base::SharedMemory> shared_memory(
341 channel_->factory()->AllocateSharedMemory(size));
342 if (!shared_memory) {
343 if (last_state_.error == gpu::error::kNoError)
344 last_state_.error = gpu::error::kOutOfBounds;
345 return NULL;
348 DCHECK(!shared_memory->memory());
349 if (!shared_memory->Map(size)) {
350 if (last_state_.error == gpu::error::kNoError)
351 last_state_.error = gpu::error::kOutOfBounds;
352 return NULL;
355 // This handle is owned by the GPU process and must be passed to it or it
356 // will leak. In otherwords, do not early out on error between here and the
357 // sending of the RegisterTransferBuffer IPC below.
358 base::SharedMemoryHandle handle =
359 channel_->ShareToGpuProcess(shared_memory->handle());
360 if (!base::SharedMemory::IsHandleValid(handle)) {
361 if (last_state_.error == gpu::error::kNoError)
362 last_state_.error = gpu::error::kLostContext;
363 return NULL;
366 if (!Send(new GpuCommandBufferMsg_RegisterTransferBuffer(route_id_,
367 new_id,
368 handle,
369 size))) {
370 return NULL;
373 *id = new_id;
374 scoped_refptr<gpu::Buffer> buffer(
375 gpu::MakeBufferFromSharedMemory(shared_memory.Pass(), size));
376 return buffer;
379 void CommandBufferProxyImpl::DestroyTransferBuffer(int32 id) {
380 CheckLock();
381 if (last_state_.error != gpu::error::kNoError)
382 return;
384 Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_, id));
387 gpu::Capabilities CommandBufferProxyImpl::GetCapabilities() {
388 return capabilities_;
391 int32_t CommandBufferProxyImpl::CreateImage(ClientBuffer buffer,
392 size_t width,
393 size_t height,
394 unsigned internalformat) {
395 CheckLock();
396 if (last_state_.error != gpu::error::kNoError)
397 return -1;
399 int32 new_id = channel_->ReserveImageId();
401 gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager =
402 channel_->gpu_memory_buffer_manager();
403 gfx::GpuMemoryBuffer* gpu_memory_buffer =
404 gpu_memory_buffer_manager->GpuMemoryBufferFromClientBuffer(buffer);
405 DCHECK(gpu_memory_buffer);
407 // This handle is owned by the GPU process and must be passed to it or it
408 // will leak. In otherwords, do not early out on error between here and the
409 // sending of the CreateImage IPC below.
410 bool requires_sync_point = false;
411 gfx::GpuMemoryBufferHandle handle =
412 channel_->ShareGpuMemoryBufferToGpuProcess(gpu_memory_buffer->GetHandle(),
413 &requires_sync_point);
415 DCHECK(gpu::ImageFactory::IsGpuMemoryBufferFormatSupported(
416 gpu_memory_buffer->GetFormat(), capabilities_));
417 DCHECK(gpu::ImageFactory::IsImageSizeValidForGpuMemoryBufferFormat(
418 gfx::Size(width, height), gpu_memory_buffer->GetFormat()));
419 DCHECK(gpu::ImageFactory::IsImageFormatCompatibleWithGpuMemoryBufferFormat(
420 internalformat, gpu_memory_buffer->GetFormat()));
421 if (!Send(new GpuCommandBufferMsg_CreateImage(route_id_,
422 new_id,
423 handle,
424 gfx::Size(width, height),
425 gpu_memory_buffer->GetFormat(),
426 internalformat))) {
427 return -1;
430 if (requires_sync_point) {
431 gpu_memory_buffer_manager->SetDestructionSyncPoint(gpu_memory_buffer,
432 InsertSyncPoint());
435 return new_id;
438 void CommandBufferProxyImpl::DestroyImage(int32 id) {
439 CheckLock();
440 if (last_state_.error != gpu::error::kNoError)
441 return;
443 Send(new GpuCommandBufferMsg_DestroyImage(route_id_, id));
446 int32_t CommandBufferProxyImpl::CreateGpuMemoryBufferImage(
447 size_t width,
448 size_t height,
449 unsigned internalformat,
450 unsigned usage) {
451 CheckLock();
452 scoped_ptr<gfx::GpuMemoryBuffer> buffer(
453 channel_->gpu_memory_buffer_manager()->AllocateGpuMemoryBuffer(
454 gfx::Size(width, height),
455 gpu::ImageFactory::DefaultBufferFormatForImageFormat(internalformat),
456 gpu::ImageFactory::ImageUsageToGpuMemoryBufferUsage(usage)));
457 if (!buffer)
458 return -1;
460 return CreateImage(buffer->AsClientBuffer(), width, height, internalformat);
463 uint32 CommandBufferProxyImpl::CreateStreamTexture(uint32 texture_id) {
464 CheckLock();
465 if (last_state_.error != gpu::error::kNoError)
466 return 0;
468 int32 stream_id = channel_->GenerateRouteID();
469 bool succeeded = false;
470 Send(new GpuCommandBufferMsg_CreateStreamTexture(
471 route_id_, texture_id, stream_id, &succeeded));
472 if (!succeeded) {
473 DLOG(ERROR) << "GpuCommandBufferMsg_CreateStreamTexture returned failure";
474 return 0;
476 return stream_id;
479 void CommandBufferProxyImpl::SetLock(base::Lock* lock) {
480 lock_ = lock;
483 bool CommandBufferProxyImpl::IsGpuChannelLost() {
484 return !channel_ || channel_->IsLost();
487 gpu::CommandBufferNamespace CommandBufferProxyImpl::GetNamespaceID() const {
488 return gpu::CommandBufferNamespace::GPU_IO;
491 uint64_t CommandBufferProxyImpl::GetCommandBufferID() const {
492 return command_buffer_id_;
495 uint32 CommandBufferProxyImpl::InsertSyncPoint() {
496 CheckLock();
497 if (last_state_.error != gpu::error::kNoError)
498 return 0;
500 uint32 sync_point = 0;
501 Send(new GpuCommandBufferMsg_InsertSyncPoint(route_id_, true, &sync_point));
502 return sync_point;
505 uint32_t CommandBufferProxyImpl::InsertFutureSyncPoint() {
506 CheckLock();
507 if (last_state_.error != gpu::error::kNoError)
508 return 0;
510 uint32 sync_point = 0;
511 Send(new GpuCommandBufferMsg_InsertSyncPoint(route_id_, false, &sync_point));
512 return sync_point;
515 void CommandBufferProxyImpl::RetireSyncPoint(uint32_t sync_point) {
516 CheckLock();
517 if (last_state_.error != gpu::error::kNoError)
518 return;
520 Send(new GpuCommandBufferMsg_RetireSyncPoint(route_id_, sync_point));
523 void CommandBufferProxyImpl::SignalSyncPoint(uint32 sync_point,
524 const base::Closure& callback) {
525 CheckLock();
526 if (last_state_.error != gpu::error::kNoError)
527 return;
529 uint32 signal_id = next_signal_id_++;
530 if (!Send(new GpuCommandBufferMsg_SignalSyncPoint(route_id_,
531 sync_point,
532 signal_id))) {
533 return;
536 signal_tasks_.insert(std::make_pair(signal_id, callback));
539 void CommandBufferProxyImpl::SignalQuery(uint32 query,
540 const base::Closure& callback) {
541 CheckLock();
542 if (last_state_.error != gpu::error::kNoError)
543 return;
545 // Signal identifiers are hidden, so nobody outside of this class will see
546 // them. (And thus, they cannot save them.) The IDs themselves only last
547 // until the callback is invoked, which will happen as soon as the GPU
548 // catches upwith the command buffer.
549 // A malicious caller trying to create a collision by making next_signal_id
550 // would have to make calls at an astounding rate (300B/s) and even if they
551 // could do that, all they would do is to prevent some callbacks from getting
552 // called, leading to stalled threads and/or memory leaks.
553 uint32 signal_id = next_signal_id_++;
554 if (!Send(new GpuCommandBufferMsg_SignalQuery(route_id_,
555 query,
556 signal_id))) {
557 return;
560 signal_tasks_.insert(std::make_pair(signal_id, callback));
563 void CommandBufferProxyImpl::SetSurfaceVisible(bool visible) {
564 CheckLock();
565 if (last_state_.error != gpu::error::kNoError)
566 return;
568 Send(new GpuCommandBufferMsg_SetSurfaceVisible(route_id_, visible));
571 bool CommandBufferProxyImpl::ProduceFrontBuffer(const gpu::Mailbox& mailbox) {
572 CheckLock();
573 if (last_state_.error != gpu::error::kNoError)
574 return false;
576 return Send(new GpuCommandBufferMsg_ProduceFrontBuffer(route_id_, mailbox));
579 scoped_ptr<media::VideoDecodeAccelerator>
580 CommandBufferProxyImpl::CreateVideoDecoder() {
581 if (!channel_)
582 return scoped_ptr<media::VideoDecodeAccelerator>();
583 return scoped_ptr<media::VideoDecodeAccelerator>(
584 new GpuVideoDecodeAcceleratorHost(channel_, this));
587 scoped_ptr<media::VideoEncodeAccelerator>
588 CommandBufferProxyImpl::CreateVideoEncoder() {
589 if (!channel_)
590 return scoped_ptr<media::VideoEncodeAccelerator>();
591 return scoped_ptr<media::VideoEncodeAccelerator>(
592 new GpuVideoEncodeAcceleratorHost(channel_, this));
595 gpu::error::Error CommandBufferProxyImpl::GetLastError() {
596 return last_state_.error;
599 bool CommandBufferProxyImpl::Send(IPC::Message* msg) {
600 // Caller should not intentionally send a message if the context is lost.
601 DCHECK(last_state_.error == gpu::error::kNoError);
603 if (channel_) {
604 if (channel_->Send(msg)) {
605 return true;
606 } else {
607 // Flag the command buffer as lost. Defer deleting the channel until
608 // OnChannelError is called after returning to the message loop in case
609 // it is referenced elsewhere.
610 DVLOG(1) << "CommandBufferProxyImpl::Send failed. Losing context.";
611 last_state_.error = gpu::error::kLostContext;
612 return false;
616 // Callee takes ownership of message, regardless of whether Send is
617 // successful. See IPC::Sender.
618 delete msg;
619 return false;
622 void CommandBufferProxyImpl::OnUpdateState(
623 const gpu::CommandBuffer::State& state) {
624 // Handle wraparound. It works as long as we don't have more than 2B state
625 // updates in flight across which reordering occurs.
626 if (state.generation - last_state_.generation < 0x80000000U)
627 last_state_ = state;
630 void CommandBufferProxyImpl::SetOnConsoleMessageCallback(
631 const GpuConsoleMessageCallback& callback) {
632 CheckLock();
633 console_message_callback_ = callback;
636 void CommandBufferProxyImpl::TryUpdateState() {
637 if (last_state_.error == gpu::error::kNoError)
638 shared_state()->Read(&last_state_);
641 gpu::CommandBufferSharedState* CommandBufferProxyImpl::shared_state() const {
642 return reinterpret_cast<gpu::CommandBufferSharedState*>(
643 shared_state_shm_->memory());
646 void CommandBufferProxyImpl::OnSwapBuffersCompleted(
647 const std::vector<ui::LatencyInfo>& latency_info,
648 gfx::SwapResult result) {
649 if (!swap_buffers_completion_callback_.is_null()) {
650 if (!ui::LatencyInfo::Verify(
651 latency_info, "CommandBufferProxyImpl::OnSwapBuffersCompleted")) {
652 swap_buffers_completion_callback_.Run(std::vector<ui::LatencyInfo>(),
653 result);
654 return;
656 swap_buffers_completion_callback_.Run(latency_info, result);
660 void CommandBufferProxyImpl::OnUpdateVSyncParameters(base::TimeTicks timebase,
661 base::TimeDelta interval) {
662 if (!update_vsync_parameters_completion_callback_.is_null())
663 update_vsync_parameters_completion_callback_.Run(timebase, interval);
666 } // namespace content