1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/client/command_buffer_proxy_impl.h"
7 #include "base/callback.h"
8 #include "base/debug/trace_event.h"
9 #include "base/logging.h"
10 #include "base/memory/shared_memory.h"
11 #include "base/stl_util.h"
12 #include "content/common/child_process_messages.h"
13 #include "content/common/gpu/client/gpu_channel_host.h"
14 #include "content/common/gpu/client/gpu_video_decode_accelerator_host.h"
15 #include "content/common/gpu/client/gpu_video_encode_accelerator_host.h"
16 #include "content/common/gpu/gpu_messages.h"
17 #include "content/common/view_messages.h"
18 #include "gpu/command_buffer/common/cmd_buffer_common.h"
19 #include "gpu/command_buffer/common/command_buffer_shared.h"
20 #include "gpu/command_buffer/common/gpu_memory_allocation.h"
21 #include "ui/gfx/size.h"
25 CommandBufferProxyImpl::CommandBufferProxyImpl(
26 GpuChannelHost
* channel
,
35 CommandBufferProxyImpl::~CommandBufferProxyImpl() {
36 FOR_EACH_OBSERVER(DeletionObserver
,
41 bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message
& message
) {
43 IPC_BEGIN_MESSAGE_MAP(CommandBufferProxyImpl
, message
)
44 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Destroyed
, OnDestroyed
);
45 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_EchoAck
, OnEchoAck
);
46 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ConsoleMsg
, OnConsoleMessage
);
47 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetMemoryAllocation
,
48 OnSetMemoryAllocation
);
49 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPointAck
,
50 OnSignalSyncPointAck
);
51 IPC_MESSAGE_UNHANDLED(handled
= false)
58 void CommandBufferProxyImpl::OnChannelError() {
59 OnDestroyed(gpu::error::kUnknown
);
62 void CommandBufferProxyImpl::OnDestroyed(gpu::error::ContextLostReason reason
) {
63 // Prevent any further messages from being sent.
66 // When the client sees that the context is lost, they should delete this
67 // CommandBufferProxyImpl and create a new one.
68 last_state_
.error
= gpu::error::kLostContext
;
69 last_state_
.context_lost_reason
= reason
;
71 if (!channel_error_callback_
.is_null()) {
72 channel_error_callback_
.Run();
73 // Avoid calling the error callback more than once.
74 channel_error_callback_
.Reset();
78 void CommandBufferProxyImpl::OnEchoAck() {
79 DCHECK(!echo_tasks_
.empty());
80 base::Closure callback
= echo_tasks_
.front();
85 void CommandBufferProxyImpl::OnConsoleMessage(
86 const GPUCommandBufferConsoleMessage
& message
) {
87 if (!console_message_callback_
.is_null()) {
88 console_message_callback_
.Run(message
.message
, message
.id
);
92 void CommandBufferProxyImpl::SetMemoryAllocationChangedCallback(
93 const MemoryAllocationChangedCallback
& callback
) {
94 if (last_state_
.error
!= gpu::error::kNoError
)
97 memory_allocation_changed_callback_
= callback
;
98 Send(new GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback(
99 route_id_
, !memory_allocation_changed_callback_
.is_null()));
102 void CommandBufferProxyImpl::AddDeletionObserver(DeletionObserver
* observer
) {
103 deletion_observers_
.AddObserver(observer
);
106 void CommandBufferProxyImpl::RemoveDeletionObserver(
107 DeletionObserver
* observer
) {
108 deletion_observers_
.RemoveObserver(observer
);
111 void CommandBufferProxyImpl::OnSetMemoryAllocation(
112 const gpu::MemoryAllocation
& allocation
) {
113 if (!memory_allocation_changed_callback_
.is_null())
114 memory_allocation_changed_callback_
.Run(allocation
);
117 void CommandBufferProxyImpl::OnSignalSyncPointAck(uint32 id
) {
118 SignalTaskMap::iterator it
= signal_tasks_
.find(id
);
119 DCHECK(it
!= signal_tasks_
.end());
120 base::Closure callback
= it
->second
;
121 signal_tasks_
.erase(it
);
125 void CommandBufferProxyImpl::SetChannelErrorCallback(
126 const base::Closure
& callback
) {
127 channel_error_callback_
= callback
;
130 bool CommandBufferProxyImpl::Initialize() {
131 TRACE_EVENT0("gpu", "CommandBufferProxyImpl::Initialize");
132 shared_state_shm_
.reset(channel_
->factory()->AllocateSharedMemory(
133 sizeof(*shared_state())).release());
134 if (!shared_state_shm_
)
137 if (!shared_state_shm_
->Map(sizeof(*shared_state())))
140 shared_state()->Initialize();
142 // This handle is owned by the GPU process and must be passed to it or it
143 // will leak. In otherwords, do not early out on error between here and the
144 // sending of the Initialize IPC below.
145 base::SharedMemoryHandle handle
=
146 channel_
->ShareToGpuProcess(shared_state_shm_
->handle());
147 if (!base::SharedMemory::IsHandleValid(handle
))
151 if (!Send(new GpuCommandBufferMsg_Initialize(
152 route_id_
, handle
, &result
, &capabilities_
))) {
153 LOG(ERROR
) << "Could not send GpuCommandBufferMsg_Initialize.";
158 LOG(ERROR
) << "Failed to initialize command buffer service.";
162 capabilities_
.map_image
= true;
167 gpu::CommandBuffer::State
CommandBufferProxyImpl::GetLastState() {
171 int32
CommandBufferProxyImpl::GetLastToken() {
173 return last_state_
.token
;
176 void CommandBufferProxyImpl::Flush(int32 put_offset
) {
177 if (last_state_
.error
!= gpu::error::kNoError
)
181 "CommandBufferProxyImpl::Flush",
185 if (last_put_offset_
== put_offset
)
188 last_put_offset_
= put_offset
;
190 Send(new GpuCommandBufferMsg_AsyncFlush(route_id_
,
195 void CommandBufferProxyImpl::SetLatencyInfo(
196 const std::vector
<ui::LatencyInfo
>& latency_info
) {
197 if (last_state_
.error
!= gpu::error::kNoError
)
199 Send(new GpuCommandBufferMsg_SetLatencyInfo(route_id_
, latency_info
));
202 void CommandBufferProxyImpl::WaitForTokenInRange(int32 start
, int32 end
) {
204 "CommandBufferProxyImpl::WaitForToken",
210 if (!InRange(start
, end
, last_state_
.token
) &&
211 last_state_
.error
== gpu::error::kNoError
) {
212 gpu::CommandBuffer::State state
;
213 if (Send(new GpuCommandBufferMsg_WaitForTokenInRange(
214 route_id_
, start
, end
, &state
)))
215 OnUpdateState(state
);
217 DCHECK(InRange(start
, end
, last_state_
.token
) ||
218 last_state_
.error
!= gpu::error::kNoError
);
221 void CommandBufferProxyImpl::WaitForGetOffsetInRange(int32 start
, int32 end
) {
223 "CommandBufferProxyImpl::WaitForGetOffset",
229 if (!InRange(start
, end
, last_state_
.get_offset
) &&
230 last_state_
.error
== gpu::error::kNoError
) {
231 gpu::CommandBuffer::State state
;
232 if (Send(new GpuCommandBufferMsg_WaitForGetOffsetInRange(
233 route_id_
, start
, end
, &state
)))
234 OnUpdateState(state
);
236 DCHECK(InRange(start
, end
, last_state_
.get_offset
) ||
237 last_state_
.error
!= gpu::error::kNoError
);
240 void CommandBufferProxyImpl::SetGetBuffer(int32 shm_id
) {
241 if (last_state_
.error
!= gpu::error::kNoError
)
244 Send(new GpuCommandBufferMsg_SetGetBuffer(route_id_
, shm_id
));
245 last_put_offset_
= -1;
248 scoped_refptr
<gpu::Buffer
> CommandBufferProxyImpl::CreateTransferBuffer(
253 if (last_state_
.error
!= gpu::error::kNoError
)
256 int32 new_id
= channel_
->ReserveTransferBufferId();
258 scoped_ptr
<base::SharedMemory
> shared_memory(
259 channel_
->factory()->AllocateSharedMemory(size
));
263 DCHECK(!shared_memory
->memory());
264 if (!shared_memory
->Map(size
))
267 // This handle is owned by the GPU process and must be passed to it or it
268 // will leak. In otherwords, do not early out on error between here and the
269 // sending of the RegisterTransferBuffer IPC below.
270 base::SharedMemoryHandle handle
=
271 channel_
->ShareToGpuProcess(shared_memory
->handle());
272 if (!base::SharedMemory::IsHandleValid(handle
))
275 if (!Send(new GpuCommandBufferMsg_RegisterTransferBuffer(route_id_
,
283 scoped_refptr
<gpu::Buffer
> buffer(
284 gpu::MakeBufferFromSharedMemory(shared_memory
.Pass(), size
));
288 void CommandBufferProxyImpl::DestroyTransferBuffer(int32 id
) {
289 if (last_state_
.error
!= gpu::error::kNoError
)
292 Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_
, id
));
295 gpu::Capabilities
CommandBufferProxyImpl::GetCapabilities() {
296 return capabilities_
;
299 gfx::GpuMemoryBuffer
* CommandBufferProxyImpl::CreateGpuMemoryBuffer(
302 unsigned internalformat
,
307 if (last_state_
.error
!= gpu::error::kNoError
)
310 int32 new_id
= channel_
->ReserveGpuMemoryBufferId();
311 DCHECK(gpu_memory_buffers_
.find(new_id
) == gpu_memory_buffers_
.end());
313 scoped_ptr
<gfx::GpuMemoryBuffer
> gpu_memory_buffer(
314 channel_
->factory()->AllocateGpuMemoryBuffer(
315 width
, height
, internalformat
, usage
));
316 if (!gpu_memory_buffer
)
319 DCHECK(GpuChannelHost::IsValidGpuMemoryBuffer(
320 gpu_memory_buffer
->GetHandle()));
322 // This handle is owned by the GPU process and must be passed to it or it
323 // will leak. In otherwords, do not early out on error between here and the
324 // sending of the RegisterGpuMemoryBuffer IPC below.
325 gfx::GpuMemoryBufferHandle handle
=
326 channel_
->ShareGpuMemoryBufferToGpuProcess(
327 gpu_memory_buffer
->GetHandle());
329 if (!Send(new GpuCommandBufferMsg_RegisterGpuMemoryBuffer(
340 gpu_memory_buffers_
[new_id
] = gpu_memory_buffer
.release();
341 return gpu_memory_buffers_
[new_id
];
344 void CommandBufferProxyImpl::DestroyGpuMemoryBuffer(int32 id
) {
345 if (last_state_
.error
!= gpu::error::kNoError
)
348 // Remove the gpu memory buffer from the client side cache.
349 GpuMemoryBufferMap::iterator it
= gpu_memory_buffers_
.find(id
);
350 if (it
!= gpu_memory_buffers_
.end()) {
352 gpu_memory_buffers_
.erase(it
);
355 Send(new GpuCommandBufferMsg_DestroyGpuMemoryBuffer(route_id_
, id
));
358 int CommandBufferProxyImpl::GetRouteID() const {
362 void CommandBufferProxyImpl::Echo(const base::Closure
& callback
) {
363 if (last_state_
.error
!= gpu::error::kNoError
) {
367 if (!Send(new GpuCommandBufferMsg_Echo(
368 route_id_
, GpuCommandBufferMsg_EchoAck(route_id_
)))) {
372 echo_tasks_
.push(callback
);
375 uint32
CommandBufferProxyImpl::CreateStreamTexture(uint32 texture_id
) {
376 if (last_state_
.error
!= gpu::error::kNoError
)
379 int32 stream_id
= channel_
->GenerateRouteID();
380 bool succeeded
= false;
381 Send(new GpuCommandBufferMsg_CreateStreamTexture(
382 route_id_
, texture_id
, stream_id
, &succeeded
));
384 DLOG(ERROR
) << "GpuCommandBufferMsg_CreateStreamTexture returned failure";
390 uint32
CommandBufferProxyImpl::InsertSyncPoint() {
391 if (last_state_
.error
!= gpu::error::kNoError
)
394 uint32 sync_point
= 0;
395 Send(new GpuCommandBufferMsg_InsertSyncPoint(route_id_
, true, &sync_point
));
399 uint32_t CommandBufferProxyImpl::InsertFutureSyncPoint() {
400 if (last_state_
.error
!= gpu::error::kNoError
)
403 uint32 sync_point
= 0;
404 Send(new GpuCommandBufferMsg_InsertSyncPoint(route_id_
, false, &sync_point
));
408 void CommandBufferProxyImpl::RetireSyncPoint(uint32_t sync_point
) {
409 if (last_state_
.error
!= gpu::error::kNoError
)
412 Send(new GpuCommandBufferMsg_RetireSyncPoint(route_id_
, sync_point
));
415 void CommandBufferProxyImpl::SignalSyncPoint(uint32 sync_point
,
416 const base::Closure
& callback
) {
417 if (last_state_
.error
!= gpu::error::kNoError
)
420 uint32 signal_id
= next_signal_id_
++;
421 if (!Send(new GpuCommandBufferMsg_SignalSyncPoint(route_id_
,
427 signal_tasks_
.insert(std::make_pair(signal_id
, callback
));
430 void CommandBufferProxyImpl::SignalQuery(uint32 query
,
431 const base::Closure
& callback
) {
432 if (last_state_
.error
!= gpu::error::kNoError
)
435 // Signal identifiers are hidden, so nobody outside of this class will see
436 // them. (And thus, they cannot save them.) The IDs themselves only last
437 // until the callback is invoked, which will happen as soon as the GPU
438 // catches upwith the command buffer.
439 // A malicious caller trying to create a collision by making next_signal_id
440 // would have to make calls at an astounding rate (300B/s) and even if they
441 // could do that, all they would do is to prevent some callbacks from getting
442 // called, leading to stalled threads and/or memory leaks.
443 uint32 signal_id
= next_signal_id_
++;
444 if (!Send(new GpuCommandBufferMsg_SignalQuery(route_id_
,
450 signal_tasks_
.insert(std::make_pair(signal_id
, callback
));
453 void CommandBufferProxyImpl::SetSurfaceVisible(bool visible
) {
454 if (last_state_
.error
!= gpu::error::kNoError
)
457 Send(new GpuCommandBufferMsg_SetSurfaceVisible(route_id_
, visible
));
460 bool CommandBufferProxyImpl::ProduceFrontBuffer(const gpu::Mailbox
& mailbox
) {
461 if (last_state_
.error
!= gpu::error::kNoError
)
464 return Send(new GpuCommandBufferMsg_ProduceFrontBuffer(route_id_
, mailbox
));
467 scoped_ptr
<media::VideoDecodeAccelerator
>
468 CommandBufferProxyImpl::CreateVideoDecoder() {
470 return scoped_ptr
<media::VideoDecodeAccelerator
>();
471 return scoped_ptr
<media::VideoDecodeAccelerator
>(
472 new GpuVideoDecodeAcceleratorHost(channel_
, this));
475 scoped_ptr
<media::VideoEncodeAccelerator
>
476 CommandBufferProxyImpl::CreateVideoEncoder() {
478 return scoped_ptr
<media::VideoEncodeAccelerator
>();
479 return scoped_ptr
<media::VideoEncodeAccelerator
>(
480 new GpuVideoEncodeAcceleratorHost(channel_
, this));
483 gpu::error::Error
CommandBufferProxyImpl::GetLastError() {
484 return last_state_
.error
;
487 bool CommandBufferProxyImpl::Send(IPC::Message
* msg
) {
488 // Caller should not intentionally send a message if the context is lost.
489 DCHECK(last_state_
.error
== gpu::error::kNoError
);
492 if (channel_
->Send(msg
)) {
495 // Flag the command buffer as lost. Defer deleting the channel until
496 // OnChannelError is called after returning to the message loop in case
497 // it is referenced elsewhere.
498 DVLOG(1) << "CommandBufferProxyImpl::Send failed. Losing context.";
499 last_state_
.error
= gpu::error::kLostContext
;
504 // Callee takes ownership of message, regardless of whether Send is
505 // successful. See IPC::Sender.
510 void CommandBufferProxyImpl::OnUpdateState(
511 const gpu::CommandBuffer::State
& state
) {
512 // Handle wraparound. It works as long as we don't have more than 2B state
513 // updates in flight across which reordering occurs.
514 if (state
.generation
- last_state_
.generation
< 0x80000000U
)
518 void CommandBufferProxyImpl::SetOnConsoleMessageCallback(
519 const GpuConsoleMessageCallback
& callback
) {
520 console_message_callback_
= callback
;
523 void CommandBufferProxyImpl::TryUpdateState() {
524 if (last_state_
.error
== gpu::error::kNoError
)
525 shared_state()->Read(&last_state_
);
528 gpu::CommandBufferSharedState
* CommandBufferProxyImpl::shared_state() const {
529 return reinterpret_cast<gpu::CommandBufferSharedState
*>(
530 shared_state_shm_
->memory());
533 } // namespace content