1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/client/command_buffer_proxy_impl.h"
9 #include "base/callback.h"
10 #include "base/logging.h"
11 #include "base/memory/shared_memory.h"
12 #include "base/stl_util.h"
13 #include "base/trace_event/trace_event.h"
14 #include "content/common/child_process_messages.h"
15 #include "content/common/gpu/client/gpu_channel_host.h"
16 #include "content/common/gpu/client/gpu_video_decode_accelerator_host.h"
17 #include "content/common/gpu/client/gpu_video_encode_accelerator_host.h"
18 #include "content/common/gpu/gpu_messages.h"
19 #include "content/common/view_messages.h"
20 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
21 #include "gpu/command_buffer/common/cmd_buffer_common.h"
22 #include "gpu/command_buffer/common/command_buffer_shared.h"
23 #include "gpu/command_buffer/common/gpu_memory_allocation.h"
24 #include "gpu/command_buffer/service/image_factory.h"
25 #include "ui/gfx/geometry/size.h"
26 #include "ui/gl/gl_bindings.h"
32 uint64_t CommandBufferProxyID(int channel_id
, int32 route_id
) {
33 return (static_cast<uint64_t>(channel_id
) << 32) | route_id
;
38 CommandBufferProxyImpl::CommandBufferProxyImpl(GpuChannelHost
* channel
,
43 command_buffer_id_(CommandBufferProxyID(channel
->channel_id(), route_id
)),
45 stream_id_(stream_id
),
48 last_barrier_put_offset_(-1),
53 CommandBufferProxyImpl::~CommandBufferProxyImpl() {
54 FOR_EACH_OBSERVER(DeletionObserver
,
58 channel_
->DestroyCommandBuffer(this);
63 bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message
& message
) {
64 scoped_ptr
<base::AutoLock
> lock
;
66 lock
.reset(new base::AutoLock(*lock_
));
68 IPC_BEGIN_MESSAGE_MAP(CommandBufferProxyImpl
, message
)
69 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Destroyed
, OnDestroyed
);
70 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ConsoleMsg
, OnConsoleMessage
);
71 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetMemoryAllocation
,
72 OnSetMemoryAllocation
);
73 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPointAck
,
74 OnSignalSyncPointAck
);
75 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SwapBuffersCompleted
,
76 OnSwapBuffersCompleted
);
77 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_UpdateVSyncParameters
,
78 OnUpdateVSyncParameters
);
79 IPC_MESSAGE_UNHANDLED(handled
= false)
86 void CommandBufferProxyImpl::OnChannelError() {
87 scoped_ptr
<base::AutoLock
> lock
;
89 lock
.reset(new base::AutoLock(*lock_
));
91 gpu::error::ContextLostReason context_lost_reason
=
92 gpu::error::kGpuChannelLost
;
93 if (shared_state_shm_
&& shared_state_shm_
->memory()) {
95 // The GPU process might have intentionally been crashed
96 // (exit_on_context_lost), so try to find out the original reason.
97 if (last_state_
.error
== gpu::error::kLostContext
)
98 context_lost_reason
= last_state_
.context_lost_reason
;
100 OnDestroyed(context_lost_reason
, gpu::error::kLostContext
);
103 void CommandBufferProxyImpl::OnDestroyed(gpu::error::ContextLostReason reason
,
104 gpu::error::Error error
) {
106 // Prevent any further messages from being sent.
108 channel_
->DestroyCommandBuffer(this);
112 // When the client sees that the context is lost, they should delete this
113 // CommandBufferProxyImpl and create a new one.
114 last_state_
.error
= error
;
115 last_state_
.context_lost_reason
= reason
;
117 if (!context_lost_callback_
.is_null()) {
118 context_lost_callback_
.Run();
119 // Avoid calling the error callback more than once.
120 context_lost_callback_
.Reset();
124 void CommandBufferProxyImpl::OnConsoleMessage(
125 const GPUCommandBufferConsoleMessage
& message
) {
126 if (!console_message_callback_
.is_null()) {
127 console_message_callback_
.Run(message
.message
, message
.id
);
131 void CommandBufferProxyImpl::SetMemoryAllocationChangedCallback(
132 const MemoryAllocationChangedCallback
& callback
) {
134 if (last_state_
.error
!= gpu::error::kNoError
)
137 memory_allocation_changed_callback_
= callback
;
138 Send(new GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback(
139 route_id_
, !memory_allocation_changed_callback_
.is_null()));
142 void CommandBufferProxyImpl::AddDeletionObserver(DeletionObserver
* observer
) {
144 deletion_observers_
.AddObserver(observer
);
147 void CommandBufferProxyImpl::RemoveDeletionObserver(
148 DeletionObserver
* observer
) {
150 deletion_observers_
.RemoveObserver(observer
);
153 void CommandBufferProxyImpl::OnSetMemoryAllocation(
154 const gpu::MemoryAllocation
& allocation
) {
155 if (!memory_allocation_changed_callback_
.is_null())
156 memory_allocation_changed_callback_
.Run(allocation
);
159 void CommandBufferProxyImpl::OnSignalSyncPointAck(uint32 id
) {
160 SignalTaskMap::iterator it
= signal_tasks_
.find(id
);
161 DCHECK(it
!= signal_tasks_
.end());
162 base::Closure callback
= it
->second
;
163 signal_tasks_
.erase(it
);
167 void CommandBufferProxyImpl::SetContextLostCallback(
168 const base::Closure
& callback
) {
170 context_lost_callback_
= callback
;
173 bool CommandBufferProxyImpl::Initialize() {
174 TRACE_EVENT0("gpu", "CommandBufferProxyImpl::Initialize");
175 shared_state_shm_
.reset(channel_
->factory()->AllocateSharedMemory(
176 sizeof(*shared_state())).release());
177 if (!shared_state_shm_
)
180 if (!shared_state_shm_
->Map(sizeof(*shared_state())))
183 shared_state()->Initialize();
185 // This handle is owned by the GPU process and must be passed to it or it
186 // will leak. In otherwords, do not early out on error between here and the
187 // sending of the Initialize IPC below.
188 base::SharedMemoryHandle handle
=
189 channel_
->ShareToGpuProcess(shared_state_shm_
->handle());
190 if (!base::SharedMemory::IsHandleValid(handle
))
194 if (!Send(new GpuCommandBufferMsg_Initialize(
195 route_id_
, handle
, &result
, &capabilities_
))) {
196 LOG(ERROR
) << "Could not send GpuCommandBufferMsg_Initialize.";
201 LOG(ERROR
) << "Failed to initialize command buffer service.";
205 capabilities_
.image
= true;
210 gpu::CommandBuffer::State
CommandBufferProxyImpl::GetLastState() {
214 int32
CommandBufferProxyImpl::GetLastToken() {
216 return last_state_
.token
;
219 void CommandBufferProxyImpl::Flush(int32 put_offset
) {
221 if (last_state_
.error
!= gpu::error::kNoError
)
225 "CommandBufferProxyImpl::Flush",
229 bool put_offset_changed
= last_put_offset_
!= put_offset
;
230 last_put_offset_
= put_offset
;
231 last_barrier_put_offset_
= put_offset
;
234 channel_
->OrderingBarrier(route_id_
, stream_id_
, put_offset
, ++flush_count_
,
235 latency_info_
, put_offset_changed
, true);
238 if (put_offset_changed
)
239 latency_info_
.clear();
242 void CommandBufferProxyImpl::OrderingBarrier(int32 put_offset
) {
243 if (last_state_
.error
!= gpu::error::kNoError
)
246 TRACE_EVENT1("gpu", "CommandBufferProxyImpl::OrderingBarrier", "put_offset",
249 bool put_offset_changed
= last_barrier_put_offset_
!= put_offset
;
250 last_barrier_put_offset_
= put_offset
;
253 channel_
->OrderingBarrier(route_id_
, stream_id_
, put_offset
, ++flush_count_
,
254 latency_info_
, put_offset_changed
, false);
257 if (put_offset_changed
)
258 latency_info_
.clear();
261 void CommandBufferProxyImpl::SetLatencyInfo(
262 const std::vector
<ui::LatencyInfo
>& latency_info
) {
264 for (size_t i
= 0; i
< latency_info
.size(); i
++)
265 latency_info_
.push_back(latency_info
[i
]);
268 void CommandBufferProxyImpl::SetSwapBuffersCompletionCallback(
269 const SwapBuffersCompletionCallback
& callback
) {
271 swap_buffers_completion_callback_
= callback
;
274 void CommandBufferProxyImpl::SetUpdateVSyncParametersCallback(
275 const UpdateVSyncParametersCallback
& callback
) {
277 update_vsync_parameters_completion_callback_
= callback
;
280 void CommandBufferProxyImpl::WaitForTokenInRange(int32 start
, int32 end
) {
283 "CommandBufferProxyImpl::WaitForToken",
289 if (!InRange(start
, end
, last_state_
.token
) &&
290 last_state_
.error
== gpu::error::kNoError
) {
291 gpu::CommandBuffer::State state
;
292 if (Send(new GpuCommandBufferMsg_WaitForTokenInRange(
293 route_id_
, start
, end
, &state
)))
294 OnUpdateState(state
);
296 DCHECK(InRange(start
, end
, last_state_
.token
) ||
297 last_state_
.error
!= gpu::error::kNoError
);
300 void CommandBufferProxyImpl::WaitForGetOffsetInRange(int32 start
, int32 end
) {
303 "CommandBufferProxyImpl::WaitForGetOffset",
309 if (!InRange(start
, end
, last_state_
.get_offset
) &&
310 last_state_
.error
== gpu::error::kNoError
) {
311 gpu::CommandBuffer::State state
;
312 if (Send(new GpuCommandBufferMsg_WaitForGetOffsetInRange(
313 route_id_
, start
, end
, &state
)))
314 OnUpdateState(state
);
316 DCHECK(InRange(start
, end
, last_state_
.get_offset
) ||
317 last_state_
.error
!= gpu::error::kNoError
);
320 void CommandBufferProxyImpl::SetGetBuffer(int32 shm_id
) {
322 if (last_state_
.error
!= gpu::error::kNoError
)
325 Send(new GpuCommandBufferMsg_SetGetBuffer(route_id_
, shm_id
));
326 last_put_offset_
= -1;
329 scoped_refptr
<gpu::Buffer
> CommandBufferProxyImpl::CreateTransferBuffer(
335 if (last_state_
.error
!= gpu::error::kNoError
)
338 int32 new_id
= channel_
->ReserveTransferBufferId();
340 scoped_ptr
<base::SharedMemory
> shared_memory(
341 channel_
->factory()->AllocateSharedMemory(size
));
342 if (!shared_memory
) {
343 if (last_state_
.error
== gpu::error::kNoError
)
344 last_state_
.error
= gpu::error::kOutOfBounds
;
348 DCHECK(!shared_memory
->memory());
349 if (!shared_memory
->Map(size
)) {
350 if (last_state_
.error
== gpu::error::kNoError
)
351 last_state_
.error
= gpu::error::kOutOfBounds
;
355 // This handle is owned by the GPU process and must be passed to it or it
356 // will leak. In otherwords, do not early out on error between here and the
357 // sending of the RegisterTransferBuffer IPC below.
358 base::SharedMemoryHandle handle
=
359 channel_
->ShareToGpuProcess(shared_memory
->handle());
360 if (!base::SharedMemory::IsHandleValid(handle
)) {
361 if (last_state_
.error
== gpu::error::kNoError
)
362 last_state_
.error
= gpu::error::kLostContext
;
366 if (!Send(new GpuCommandBufferMsg_RegisterTransferBuffer(route_id_
,
374 scoped_refptr
<gpu::Buffer
> buffer(
375 gpu::MakeBufferFromSharedMemory(shared_memory
.Pass(), size
));
379 void CommandBufferProxyImpl::DestroyTransferBuffer(int32 id
) {
381 if (last_state_
.error
!= gpu::error::kNoError
)
384 Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_
, id
));
387 gpu::Capabilities
CommandBufferProxyImpl::GetCapabilities() {
388 return capabilities_
;
391 int32_t CommandBufferProxyImpl::CreateImage(ClientBuffer buffer
,
394 unsigned internalformat
) {
396 if (last_state_
.error
!= gpu::error::kNoError
)
399 int32 new_id
= channel_
->ReserveImageId();
401 gpu::GpuMemoryBufferManager
* gpu_memory_buffer_manager
=
402 channel_
->gpu_memory_buffer_manager();
403 gfx::GpuMemoryBuffer
* gpu_memory_buffer
=
404 gpu_memory_buffer_manager
->GpuMemoryBufferFromClientBuffer(buffer
);
405 DCHECK(gpu_memory_buffer
);
407 // This handle is owned by the GPU process and must be passed to it or it
408 // will leak. In otherwords, do not early out on error between here and the
409 // sending of the CreateImage IPC below.
410 bool requires_sync_point
= false;
411 gfx::GpuMemoryBufferHandle handle
=
412 channel_
->ShareGpuMemoryBufferToGpuProcess(gpu_memory_buffer
->GetHandle(),
413 &requires_sync_point
);
415 DCHECK(gpu::ImageFactory::IsGpuMemoryBufferFormatSupported(
416 gpu_memory_buffer
->GetFormat(), capabilities_
));
417 DCHECK(gpu::ImageFactory::IsImageSizeValidForGpuMemoryBufferFormat(
418 gfx::Size(width
, height
), gpu_memory_buffer
->GetFormat()));
419 DCHECK(gpu::ImageFactory::IsImageFormatCompatibleWithGpuMemoryBufferFormat(
420 internalformat
, gpu_memory_buffer
->GetFormat()));
421 if (!Send(new GpuCommandBufferMsg_CreateImage(route_id_
,
424 gfx::Size(width
, height
),
425 gpu_memory_buffer
->GetFormat(),
430 if (requires_sync_point
) {
431 gpu_memory_buffer_manager
->SetDestructionSyncPoint(gpu_memory_buffer
,
438 void CommandBufferProxyImpl::DestroyImage(int32 id
) {
440 if (last_state_
.error
!= gpu::error::kNoError
)
443 Send(new GpuCommandBufferMsg_DestroyImage(route_id_
, id
));
446 int32_t CommandBufferProxyImpl::CreateGpuMemoryBufferImage(
449 unsigned internalformat
,
452 scoped_ptr
<gfx::GpuMemoryBuffer
> buffer(
453 channel_
->gpu_memory_buffer_manager()->AllocateGpuMemoryBuffer(
454 gfx::Size(width
, height
),
455 gpu::ImageFactory::DefaultBufferFormatForImageFormat(internalformat
),
456 gpu::ImageFactory::ImageUsageToGpuMemoryBufferUsage(usage
)));
460 return CreateImage(buffer
->AsClientBuffer(), width
, height
, internalformat
);
463 uint32
CommandBufferProxyImpl::CreateStreamTexture(uint32 texture_id
) {
465 if (last_state_
.error
!= gpu::error::kNoError
)
468 int32 stream_id
= channel_
->GenerateRouteID();
469 bool succeeded
= false;
470 Send(new GpuCommandBufferMsg_CreateStreamTexture(
471 route_id_
, texture_id
, stream_id
, &succeeded
));
473 DLOG(ERROR
) << "GpuCommandBufferMsg_CreateStreamTexture returned failure";
479 void CommandBufferProxyImpl::SetLock(base::Lock
* lock
) {
483 bool CommandBufferProxyImpl::IsGpuChannelLost() {
484 return !channel_
|| channel_
->IsLost();
487 gpu::CommandBufferNamespace
CommandBufferProxyImpl::GetNamespaceID() const {
488 return gpu::CommandBufferNamespace::GPU_IO
;
491 uint64_t CommandBufferProxyImpl::GetCommandBufferID() const {
492 return command_buffer_id_
;
495 uint32
CommandBufferProxyImpl::InsertSyncPoint() {
497 if (last_state_
.error
!= gpu::error::kNoError
)
500 uint32 sync_point
= 0;
501 Send(new GpuCommandBufferMsg_InsertSyncPoint(route_id_
, true, &sync_point
));
505 uint32_t CommandBufferProxyImpl::InsertFutureSyncPoint() {
507 if (last_state_
.error
!= gpu::error::kNoError
)
510 uint32 sync_point
= 0;
511 Send(new GpuCommandBufferMsg_InsertSyncPoint(route_id_
, false, &sync_point
));
515 void CommandBufferProxyImpl::RetireSyncPoint(uint32_t sync_point
) {
517 if (last_state_
.error
!= gpu::error::kNoError
)
520 Send(new GpuCommandBufferMsg_RetireSyncPoint(route_id_
, sync_point
));
523 void CommandBufferProxyImpl::SignalSyncPoint(uint32 sync_point
,
524 const base::Closure
& callback
) {
526 if (last_state_
.error
!= gpu::error::kNoError
)
529 uint32 signal_id
= next_signal_id_
++;
530 if (!Send(new GpuCommandBufferMsg_SignalSyncPoint(route_id_
,
536 signal_tasks_
.insert(std::make_pair(signal_id
, callback
));
539 void CommandBufferProxyImpl::SignalQuery(uint32 query
,
540 const base::Closure
& callback
) {
542 if (last_state_
.error
!= gpu::error::kNoError
)
545 // Signal identifiers are hidden, so nobody outside of this class will see
546 // them. (And thus, they cannot save them.) The IDs themselves only last
547 // until the callback is invoked, which will happen as soon as the GPU
548 // catches upwith the command buffer.
549 // A malicious caller trying to create a collision by making next_signal_id
550 // would have to make calls at an astounding rate (300B/s) and even if they
551 // could do that, all they would do is to prevent some callbacks from getting
552 // called, leading to stalled threads and/or memory leaks.
553 uint32 signal_id
= next_signal_id_
++;
554 if (!Send(new GpuCommandBufferMsg_SignalQuery(route_id_
,
560 signal_tasks_
.insert(std::make_pair(signal_id
, callback
));
563 void CommandBufferProxyImpl::SetSurfaceVisible(bool visible
) {
565 if (last_state_
.error
!= gpu::error::kNoError
)
568 Send(new GpuCommandBufferMsg_SetSurfaceVisible(route_id_
, visible
));
571 bool CommandBufferProxyImpl::ProduceFrontBuffer(const gpu::Mailbox
& mailbox
) {
573 if (last_state_
.error
!= gpu::error::kNoError
)
576 return Send(new GpuCommandBufferMsg_ProduceFrontBuffer(route_id_
, mailbox
));
579 scoped_ptr
<media::VideoDecodeAccelerator
>
580 CommandBufferProxyImpl::CreateVideoDecoder() {
582 return scoped_ptr
<media::VideoDecodeAccelerator
>();
583 return scoped_ptr
<media::VideoDecodeAccelerator
>(
584 new GpuVideoDecodeAcceleratorHost(channel_
, this));
587 scoped_ptr
<media::VideoEncodeAccelerator
>
588 CommandBufferProxyImpl::CreateVideoEncoder() {
590 return scoped_ptr
<media::VideoEncodeAccelerator
>();
591 return scoped_ptr
<media::VideoEncodeAccelerator
>(
592 new GpuVideoEncodeAcceleratorHost(channel_
, this));
595 gpu::error::Error
CommandBufferProxyImpl::GetLastError() {
596 return last_state_
.error
;
599 bool CommandBufferProxyImpl::Send(IPC::Message
* msg
) {
600 // Caller should not intentionally send a message if the context is lost.
601 DCHECK(last_state_
.error
== gpu::error::kNoError
);
604 if (channel_
->Send(msg
)) {
607 // Flag the command buffer as lost. Defer deleting the channel until
608 // OnChannelError is called after returning to the message loop in case
609 // it is referenced elsewhere.
610 DVLOG(1) << "CommandBufferProxyImpl::Send failed. Losing context.";
611 last_state_
.error
= gpu::error::kLostContext
;
616 // Callee takes ownership of message, regardless of whether Send is
617 // successful. See IPC::Sender.
622 void CommandBufferProxyImpl::OnUpdateState(
623 const gpu::CommandBuffer::State
& state
) {
624 // Handle wraparound. It works as long as we don't have more than 2B state
625 // updates in flight across which reordering occurs.
626 if (state
.generation
- last_state_
.generation
< 0x80000000U
)
630 void CommandBufferProxyImpl::SetOnConsoleMessageCallback(
631 const GpuConsoleMessageCallback
& callback
) {
633 console_message_callback_
= callback
;
636 void CommandBufferProxyImpl::TryUpdateState() {
637 if (last_state_
.error
== gpu::error::kNoError
)
638 shared_state()->Read(&last_state_
);
641 gpu::CommandBufferSharedState
* CommandBufferProxyImpl::shared_state() const {
642 return reinterpret_cast<gpu::CommandBufferSharedState
*>(
643 shared_state_shm_
->memory());
646 void CommandBufferProxyImpl::OnSwapBuffersCompleted(
647 const std::vector
<ui::LatencyInfo
>& latency_info
,
648 gfx::SwapResult result
) {
649 if (!swap_buffers_completion_callback_
.is_null()) {
650 if (!ui::LatencyInfo::Verify(
651 latency_info
, "CommandBufferProxyImpl::OnSwapBuffersCompleted")) {
652 swap_buffers_completion_callback_
.Run(std::vector
<ui::LatencyInfo
>(),
656 swap_buffers_completion_callback_
.Run(latency_info
, result
);
660 void CommandBufferProxyImpl::OnUpdateVSyncParameters(base::TimeTicks timebase
,
661 base::TimeDelta interval
) {
662 if (!update_vsync_parameters_completion_callback_
.is_null())
663 update_vsync_parameters_completion_callback_
.Run(timebase
, interval
);
666 } // namespace content