1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/client/command_buffer_proxy_impl.h"
9 #include "base/callback.h"
10 #include "base/logging.h"
11 #include "base/memory/shared_memory.h"
12 #include "base/stl_util.h"
13 #include "base/trace_event/trace_event.h"
14 #include "content/common/child_process_messages.h"
15 #include "content/common/gpu/client/gpu_channel_host.h"
16 #include "content/common/gpu/client/gpu_video_decode_accelerator_host.h"
17 #include "content/common/gpu/client/gpu_video_encode_accelerator_host.h"
18 #include "content/common/gpu/gpu_messages.h"
19 #include "content/common/view_messages.h"
20 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
21 #include "gpu/command_buffer/common/cmd_buffer_common.h"
22 #include "gpu/command_buffer/common/command_buffer_shared.h"
23 #include "gpu/command_buffer/common/gpu_memory_allocation.h"
24 #include "gpu/command_buffer/service/image_factory.h"
25 #include "ui/gfx/geometry/size.h"
26 #include "ui/gl/gl_bindings.h"
30 CommandBufferProxyImpl::CommandBufferProxyImpl(GpuChannelHost
* channel
,
37 last_barrier_put_offset_(-1),
41 CommandBufferProxyImpl::~CommandBufferProxyImpl() {
42 FOR_EACH_OBSERVER(DeletionObserver
,
47 bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message
& message
) {
48 scoped_ptr
<base::AutoLock
> lock
;
50 lock
.reset(new base::AutoLock(*lock_
));
52 IPC_BEGIN_MESSAGE_MAP(CommandBufferProxyImpl
, message
)
53 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Destroyed
, OnDestroyed
);
54 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ConsoleMsg
, OnConsoleMessage
);
55 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetMemoryAllocation
,
56 OnSetMemoryAllocation
);
57 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPointAck
,
58 OnSignalSyncPointAck
);
59 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SwapBuffersCompleted
,
60 OnSwapBuffersCompleted
);
61 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_UpdateVSyncParameters
,
62 OnUpdateVSyncParameters
);
63 IPC_MESSAGE_UNHANDLED(handled
= false)
70 void CommandBufferProxyImpl::OnChannelError() {
71 scoped_ptr
<base::AutoLock
> lock
;
73 lock
.reset(new base::AutoLock(*lock_
));
74 OnDestroyed(gpu::error::kUnknown
);
77 void CommandBufferProxyImpl::OnDestroyed(gpu::error::ContextLostReason reason
) {
79 // Prevent any further messages from being sent.
82 // When the client sees that the context is lost, they should delete this
83 // CommandBufferProxyImpl and create a new one.
84 last_state_
.error
= gpu::error::kLostContext
;
85 last_state_
.context_lost_reason
= reason
;
87 if (!channel_error_callback_
.is_null()) {
88 channel_error_callback_
.Run();
89 // Avoid calling the error callback more than once.
90 channel_error_callback_
.Reset();
94 void CommandBufferProxyImpl::OnConsoleMessage(
95 const GPUCommandBufferConsoleMessage
& message
) {
96 if (!console_message_callback_
.is_null()) {
97 console_message_callback_
.Run(message
.message
, message
.id
);
101 void CommandBufferProxyImpl::SetMemoryAllocationChangedCallback(
102 const MemoryAllocationChangedCallback
& callback
) {
104 if (last_state_
.error
!= gpu::error::kNoError
)
107 memory_allocation_changed_callback_
= callback
;
108 Send(new GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback(
109 route_id_
, !memory_allocation_changed_callback_
.is_null()));
112 void CommandBufferProxyImpl::AddDeletionObserver(DeletionObserver
* observer
) {
114 deletion_observers_
.AddObserver(observer
);
117 void CommandBufferProxyImpl::RemoveDeletionObserver(
118 DeletionObserver
* observer
) {
120 deletion_observers_
.RemoveObserver(observer
);
123 void CommandBufferProxyImpl::OnSetMemoryAllocation(
124 const gpu::MemoryAllocation
& allocation
) {
125 if (!memory_allocation_changed_callback_
.is_null())
126 memory_allocation_changed_callback_
.Run(allocation
);
129 void CommandBufferProxyImpl::OnSignalSyncPointAck(uint32 id
) {
130 SignalTaskMap::iterator it
= signal_tasks_
.find(id
);
131 DCHECK(it
!= signal_tasks_
.end());
132 base::Closure callback
= it
->second
;
133 signal_tasks_
.erase(it
);
137 void CommandBufferProxyImpl::SetChannelErrorCallback(
138 const base::Closure
& callback
) {
140 channel_error_callback_
= callback
;
143 bool CommandBufferProxyImpl::Initialize() {
144 TRACE_EVENT0("gpu", "CommandBufferProxyImpl::Initialize");
145 shared_state_shm_
.reset(channel_
->factory()->AllocateSharedMemory(
146 sizeof(*shared_state())).release());
147 if (!shared_state_shm_
)
150 if (!shared_state_shm_
->Map(sizeof(*shared_state())))
153 shared_state()->Initialize();
155 // This handle is owned by the GPU process and must be passed to it or it
156 // will leak. In otherwords, do not early out on error between here and the
157 // sending of the Initialize IPC below.
158 base::SharedMemoryHandle handle
=
159 channel_
->ShareToGpuProcess(shared_state_shm_
->handle());
160 if (!base::SharedMemory::IsHandleValid(handle
))
164 if (!Send(new GpuCommandBufferMsg_Initialize(
165 route_id_
, handle
, &result
, &capabilities_
))) {
166 LOG(ERROR
) << "Could not send GpuCommandBufferMsg_Initialize.";
171 LOG(ERROR
) << "Failed to initialize command buffer service.";
175 capabilities_
.image
= true;
180 gpu::CommandBuffer::State
CommandBufferProxyImpl::GetLastState() {
184 int32
CommandBufferProxyImpl::GetLastToken() {
186 return last_state_
.token
;
189 void CommandBufferProxyImpl::Flush(int32 put_offset
) {
191 if (last_state_
.error
!= gpu::error::kNoError
)
195 "CommandBufferProxyImpl::Flush",
199 bool put_offset_changed
= last_put_offset_
!= put_offset
;
200 last_put_offset_
= put_offset
;
201 last_barrier_put_offset_
= put_offset
;
204 channel_
->OrderingBarrier(route_id_
, put_offset
, ++flush_count_
,
205 latency_info_
, put_offset_changed
, true);
208 if (put_offset_changed
)
209 latency_info_
.clear();
212 void CommandBufferProxyImpl::OrderingBarrier(int32 put_offset
) {
213 if (last_state_
.error
!= gpu::error::kNoError
)
216 TRACE_EVENT1("gpu", "CommandBufferProxyImpl::OrderingBarrier", "put_offset",
219 bool put_offset_changed
= last_barrier_put_offset_
!= put_offset
;
220 last_barrier_put_offset_
= put_offset
;
223 channel_
->OrderingBarrier(route_id_
, put_offset
, ++flush_count_
,
224 latency_info_
, put_offset_changed
, false);
227 if (put_offset_changed
)
228 latency_info_
.clear();
231 void CommandBufferProxyImpl::SetLatencyInfo(
232 const std::vector
<ui::LatencyInfo
>& latency_info
) {
234 for (size_t i
= 0; i
< latency_info
.size(); i
++)
235 latency_info_
.push_back(latency_info
[i
]);
238 void CommandBufferProxyImpl::SetSwapBuffersCompletionCallback(
239 const SwapBuffersCompletionCallback
& callback
) {
241 swap_buffers_completion_callback_
= callback
;
244 void CommandBufferProxyImpl::SetUpdateVSyncParametersCallback(
245 const UpdateVSyncParametersCallback
& callback
) {
247 update_vsync_parameters_completion_callback_
= callback
;
250 void CommandBufferProxyImpl::WaitForTokenInRange(int32 start
, int32 end
) {
253 "CommandBufferProxyImpl::WaitForToken",
259 if (!InRange(start
, end
, last_state_
.token
) &&
260 last_state_
.error
== gpu::error::kNoError
) {
261 gpu::CommandBuffer::State state
;
262 if (Send(new GpuCommandBufferMsg_WaitForTokenInRange(
263 route_id_
, start
, end
, &state
)))
264 OnUpdateState(state
);
266 DCHECK(InRange(start
, end
, last_state_
.token
) ||
267 last_state_
.error
!= gpu::error::kNoError
);
270 void CommandBufferProxyImpl::WaitForGetOffsetInRange(int32 start
, int32 end
) {
273 "CommandBufferProxyImpl::WaitForGetOffset",
279 if (!InRange(start
, end
, last_state_
.get_offset
) &&
280 last_state_
.error
== gpu::error::kNoError
) {
281 gpu::CommandBuffer::State state
;
282 if (Send(new GpuCommandBufferMsg_WaitForGetOffsetInRange(
283 route_id_
, start
, end
, &state
)))
284 OnUpdateState(state
);
286 DCHECK(InRange(start
, end
, last_state_
.get_offset
) ||
287 last_state_
.error
!= gpu::error::kNoError
);
290 void CommandBufferProxyImpl::SetGetBuffer(int32 shm_id
) {
292 if (last_state_
.error
!= gpu::error::kNoError
)
295 Send(new GpuCommandBufferMsg_SetGetBuffer(route_id_
, shm_id
));
296 last_put_offset_
= -1;
299 scoped_refptr
<gpu::Buffer
> CommandBufferProxyImpl::CreateTransferBuffer(
305 if (last_state_
.error
!= gpu::error::kNoError
)
308 int32 new_id
= channel_
->ReserveTransferBufferId();
310 scoped_ptr
<base::SharedMemory
> shared_memory(
311 channel_
->factory()->AllocateSharedMemory(size
));
315 DCHECK(!shared_memory
->memory());
316 if (!shared_memory
->Map(size
))
319 // This handle is owned by the GPU process and must be passed to it or it
320 // will leak. In otherwords, do not early out on error between here and the
321 // sending of the RegisterTransferBuffer IPC below.
322 base::SharedMemoryHandle handle
=
323 channel_
->ShareToGpuProcess(shared_memory
->handle());
324 if (!base::SharedMemory::IsHandleValid(handle
))
327 if (!Send(new GpuCommandBufferMsg_RegisterTransferBuffer(route_id_
,
335 scoped_refptr
<gpu::Buffer
> buffer(
336 gpu::MakeBufferFromSharedMemory(shared_memory
.Pass(), size
));
340 void CommandBufferProxyImpl::DestroyTransferBuffer(int32 id
) {
342 if (last_state_
.error
!= gpu::error::kNoError
)
345 Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_
, id
));
348 gpu::Capabilities
CommandBufferProxyImpl::GetCapabilities() {
349 return capabilities_
;
352 int32_t CommandBufferProxyImpl::CreateImage(ClientBuffer buffer
,
355 unsigned internalformat
) {
357 if (last_state_
.error
!= gpu::error::kNoError
)
360 int32 new_id
= channel_
->ReserveImageId();
362 gpu::GpuMemoryBufferManager
* gpu_memory_buffer_manager
=
363 channel_
->gpu_memory_buffer_manager();
364 gfx::GpuMemoryBuffer
* gpu_memory_buffer
=
365 gpu_memory_buffer_manager
->GpuMemoryBufferFromClientBuffer(buffer
);
366 DCHECK(gpu_memory_buffer
);
368 // This handle is owned by the GPU process and must be passed to it or it
369 // will leak. In otherwords, do not early out on error between here and the
370 // sending of the CreateImage IPC below.
371 bool requires_sync_point
= false;
372 gfx::GpuMemoryBufferHandle handle
=
373 channel_
->ShareGpuMemoryBufferToGpuProcess(gpu_memory_buffer
->GetHandle(),
374 &requires_sync_point
);
376 DCHECK(gpu::ImageFactory::IsGpuMemoryBufferFormatSupported(
377 gpu_memory_buffer
->GetFormat(), capabilities_
));
378 DCHECK(gpu::ImageFactory::IsImageSizeValidForGpuMemoryBufferFormat(
379 gfx::Size(width
, height
), gpu_memory_buffer
->GetFormat()));
380 DCHECK(gpu::ImageFactory::IsImageFormatCompatibleWithGpuMemoryBufferFormat(
381 internalformat
, gpu_memory_buffer
->GetFormat()));
382 if (!Send(new GpuCommandBufferMsg_CreateImage(route_id_
,
385 gfx::Size(width
, height
),
386 gpu_memory_buffer
->GetFormat(),
391 if (requires_sync_point
) {
392 gpu_memory_buffer_manager
->SetDestructionSyncPoint(gpu_memory_buffer
,
399 void CommandBufferProxyImpl::DestroyImage(int32 id
) {
401 if (last_state_
.error
!= gpu::error::kNoError
)
404 Send(new GpuCommandBufferMsg_DestroyImage(route_id_
, id
));
407 int32_t CommandBufferProxyImpl::CreateGpuMemoryBufferImage(
410 unsigned internalformat
,
413 scoped_ptr
<gfx::GpuMemoryBuffer
> buffer(
414 channel_
->gpu_memory_buffer_manager()->AllocateGpuMemoryBuffer(
415 gfx::Size(width
, height
),
416 gpu::ImageFactory::ImageFormatToGpuMemoryBufferFormat(internalformat
),
417 gpu::ImageFactory::ImageUsageToGpuMemoryBufferUsage(usage
)));
421 return CreateImage(buffer
->AsClientBuffer(), width
, height
, internalformat
);
424 int CommandBufferProxyImpl::GetRouteID() const {
428 uint32
CommandBufferProxyImpl::CreateStreamTexture(uint32 texture_id
) {
430 if (last_state_
.error
!= gpu::error::kNoError
)
433 int32 stream_id
= channel_
->GenerateRouteID();
434 bool succeeded
= false;
435 Send(new GpuCommandBufferMsg_CreateStreamTexture(
436 route_id_
, texture_id
, stream_id
, &succeeded
));
438 DLOG(ERROR
) << "GpuCommandBufferMsg_CreateStreamTexture returned failure";
444 void CommandBufferProxyImpl::SetLock(base::Lock
* lock
) {
448 uint32
CommandBufferProxyImpl::InsertSyncPoint() {
450 if (last_state_
.error
!= gpu::error::kNoError
)
453 uint32 sync_point
= 0;
454 Send(new GpuCommandBufferMsg_InsertSyncPoint(route_id_
, true, &sync_point
));
458 uint32_t CommandBufferProxyImpl::InsertFutureSyncPoint() {
460 if (last_state_
.error
!= gpu::error::kNoError
)
463 uint32 sync_point
= 0;
464 Send(new GpuCommandBufferMsg_InsertSyncPoint(route_id_
, false, &sync_point
));
468 void CommandBufferProxyImpl::RetireSyncPoint(uint32_t sync_point
) {
470 if (last_state_
.error
!= gpu::error::kNoError
)
473 Send(new GpuCommandBufferMsg_RetireSyncPoint(route_id_
, sync_point
));
476 void CommandBufferProxyImpl::SignalSyncPoint(uint32 sync_point
,
477 const base::Closure
& callback
) {
479 if (last_state_
.error
!= gpu::error::kNoError
)
482 uint32 signal_id
= next_signal_id_
++;
483 if (!Send(new GpuCommandBufferMsg_SignalSyncPoint(route_id_
,
489 signal_tasks_
.insert(std::make_pair(signal_id
, callback
));
492 void CommandBufferProxyImpl::SignalQuery(uint32 query
,
493 const base::Closure
& callback
) {
495 if (last_state_
.error
!= gpu::error::kNoError
)
498 // Signal identifiers are hidden, so nobody outside of this class will see
499 // them. (And thus, they cannot save them.) The IDs themselves only last
500 // until the callback is invoked, which will happen as soon as the GPU
501 // catches upwith the command buffer.
502 // A malicious caller trying to create a collision by making next_signal_id
503 // would have to make calls at an astounding rate (300B/s) and even if they
504 // could do that, all they would do is to prevent some callbacks from getting
505 // called, leading to stalled threads and/or memory leaks.
506 uint32 signal_id
= next_signal_id_
++;
507 if (!Send(new GpuCommandBufferMsg_SignalQuery(route_id_
,
513 signal_tasks_
.insert(std::make_pair(signal_id
, callback
));
516 void CommandBufferProxyImpl::SetSurfaceVisible(bool visible
) {
518 if (last_state_
.error
!= gpu::error::kNoError
)
521 Send(new GpuCommandBufferMsg_SetSurfaceVisible(route_id_
, visible
));
524 bool CommandBufferProxyImpl::ProduceFrontBuffer(const gpu::Mailbox
& mailbox
) {
526 if (last_state_
.error
!= gpu::error::kNoError
)
529 return Send(new GpuCommandBufferMsg_ProduceFrontBuffer(route_id_
, mailbox
));
532 scoped_ptr
<media::VideoDecodeAccelerator
>
533 CommandBufferProxyImpl::CreateVideoDecoder() {
535 return scoped_ptr
<media::VideoDecodeAccelerator
>();
536 return scoped_ptr
<media::VideoDecodeAccelerator
>(
537 new GpuVideoDecodeAcceleratorHost(channel_
, this));
540 scoped_ptr
<media::VideoEncodeAccelerator
>
541 CommandBufferProxyImpl::CreateVideoEncoder() {
543 return scoped_ptr
<media::VideoEncodeAccelerator
>();
544 return scoped_ptr
<media::VideoEncodeAccelerator
>(
545 new GpuVideoEncodeAcceleratorHost(channel_
, this));
548 gpu::error::Error
CommandBufferProxyImpl::GetLastError() {
549 return last_state_
.error
;
552 bool CommandBufferProxyImpl::Send(IPC::Message
* msg
) {
553 // Caller should not intentionally send a message if the context is lost.
554 DCHECK(last_state_
.error
== gpu::error::kNoError
);
557 if (channel_
->Send(msg
)) {
560 // Flag the command buffer as lost. Defer deleting the channel until
561 // OnChannelError is called after returning to the message loop in case
562 // it is referenced elsewhere.
563 DVLOG(1) << "CommandBufferProxyImpl::Send failed. Losing context.";
564 last_state_
.error
= gpu::error::kLostContext
;
569 // Callee takes ownership of message, regardless of whether Send is
570 // successful. See IPC::Sender.
575 void CommandBufferProxyImpl::OnUpdateState(
576 const gpu::CommandBuffer::State
& state
) {
577 // Handle wraparound. It works as long as we don't have more than 2B state
578 // updates in flight across which reordering occurs.
579 if (state
.generation
- last_state_
.generation
< 0x80000000U
)
583 void CommandBufferProxyImpl::SetOnConsoleMessageCallback(
584 const GpuConsoleMessageCallback
& callback
) {
586 console_message_callback_
= callback
;
589 void CommandBufferProxyImpl::TryUpdateState() {
590 if (last_state_
.error
== gpu::error::kNoError
)
591 shared_state()->Read(&last_state_
);
594 gpu::CommandBufferSharedState
* CommandBufferProxyImpl::shared_state() const {
595 return reinterpret_cast<gpu::CommandBufferSharedState
*>(
596 shared_state_shm_
->memory());
599 void CommandBufferProxyImpl::OnSwapBuffersCompleted(
600 const std::vector
<ui::LatencyInfo
>& latency_info
) {
601 if (!swap_buffers_completion_callback_
.is_null()) {
602 if (!ui::LatencyInfo::Verify(
603 latency_info
, "CommandBufferProxyImpl::OnSwapBuffersCompleted")) {
604 swap_buffers_completion_callback_
.Run(std::vector
<ui::LatencyInfo
>());
607 swap_buffers_completion_callback_
.Run(latency_info
);
611 void CommandBufferProxyImpl::OnUpdateVSyncParameters(base::TimeTicks timebase
,
612 base::TimeDelta interval
) {
613 if (!update_vsync_parameters_completion_callback_
.is_null())
614 update_vsync_parameters_completion_callback_
.Run(timebase
, interval
);
617 } // namespace content