1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/client/command_buffer_proxy_impl.h"
7 #include "base/callback.h"
8 #include "base/debug/trace_event.h"
9 #include "base/logging.h"
10 #include "base/memory/shared_memory.h"
11 #include "base/stl_util.h"
12 #include "content/common/child_process_messages.h"
13 #include "content/common/gpu/client/gpu_channel_host.h"
14 #include "content/common/gpu/client/gpu_video_decode_accelerator_host.h"
15 #include "content/common/gpu/client/gpu_video_encode_accelerator_host.h"
16 #include "content/common/gpu/gpu_messages.h"
17 #include "content/common/view_messages.h"
18 #include "gpu/command_buffer/common/cmd_buffer_common.h"
19 #include "gpu/command_buffer/common/command_buffer_shared.h"
20 #include "gpu/command_buffer/common/gpu_memory_allocation.h"
21 #include "ui/gfx/size.h"
25 CommandBufferProxyImpl::CommandBufferProxyImpl(
26 GpuChannelHost
* channel
,
35 CommandBufferProxyImpl::~CommandBufferProxyImpl() {
36 FOR_EACH_OBSERVER(DeletionObserver
,
41 bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message
& message
) {
43 IPC_BEGIN_MESSAGE_MAP(CommandBufferProxyImpl
, message
)
44 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Destroyed
, OnDestroyed
);
45 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_EchoAck
, OnEchoAck
);
46 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ConsoleMsg
, OnConsoleMessage
);
47 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetMemoryAllocation
,
48 OnSetMemoryAllocation
);
49 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPointAck
,
50 OnSignalSyncPointAck
);
51 IPC_MESSAGE_UNHANDLED(handled
= false)
58 void CommandBufferProxyImpl::OnChannelError() {
59 OnDestroyed(gpu::error::kUnknown
);
62 void CommandBufferProxyImpl::OnDestroyed(gpu::error::ContextLostReason reason
) {
63 // Prevent any further messages from being sent.
66 // When the client sees that the context is lost, they should delete this
67 // CommandBufferProxyImpl and create a new one.
68 last_state_
.error
= gpu::error::kLostContext
;
69 last_state_
.context_lost_reason
= reason
;
71 if (!channel_error_callback_
.is_null()) {
72 channel_error_callback_
.Run();
73 // Avoid calling the error callback more than once.
74 channel_error_callback_
.Reset();
78 void CommandBufferProxyImpl::OnEchoAck() {
79 DCHECK(!echo_tasks_
.empty());
80 base::Closure callback
= echo_tasks_
.front();
85 void CommandBufferProxyImpl::OnConsoleMessage(
86 const GPUCommandBufferConsoleMessage
& message
) {
87 if (!console_message_callback_
.is_null()) {
88 console_message_callback_
.Run(message
.message
, message
.id
);
92 void CommandBufferProxyImpl::SetMemoryAllocationChangedCallback(
93 const MemoryAllocationChangedCallback
& callback
) {
94 if (last_state_
.error
!= gpu::error::kNoError
)
97 memory_allocation_changed_callback_
= callback
;
98 Send(new GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback(
99 route_id_
, !memory_allocation_changed_callback_
.is_null()));
102 void CommandBufferProxyImpl::AddDeletionObserver(DeletionObserver
* observer
) {
103 deletion_observers_
.AddObserver(observer
);
106 void CommandBufferProxyImpl::RemoveDeletionObserver(
107 DeletionObserver
* observer
) {
108 deletion_observers_
.RemoveObserver(observer
);
111 void CommandBufferProxyImpl::OnSetMemoryAllocation(
112 const gpu::MemoryAllocation
& allocation
) {
113 if (!memory_allocation_changed_callback_
.is_null())
114 memory_allocation_changed_callback_
.Run(allocation
);
117 void CommandBufferProxyImpl::OnSignalSyncPointAck(uint32 id
) {
118 SignalTaskMap::iterator it
= signal_tasks_
.find(id
);
119 DCHECK(it
!= signal_tasks_
.end());
120 base::Closure callback
= it
->second
;
121 signal_tasks_
.erase(it
);
125 void CommandBufferProxyImpl::SetChannelErrorCallback(
126 const base::Closure
& callback
) {
127 channel_error_callback_
= callback
;
130 bool CommandBufferProxyImpl::Initialize() {
131 TRACE_EVENT0("gpu", "CommandBufferProxyImpl::Initialize");
132 shared_state_shm_
.reset(channel_
->factory()->AllocateSharedMemory(
133 sizeof(*shared_state())).release());
134 if (!shared_state_shm_
)
137 if (!shared_state_shm_
->Map(sizeof(*shared_state())))
140 shared_state()->Initialize();
142 // This handle is owned by the GPU process and must be passed to it or it
143 // will leak. In otherwords, do not early out on error between here and the
144 // sending of the Initialize IPC below.
145 base::SharedMemoryHandle handle
=
146 channel_
->ShareToGpuProcess(shared_state_shm_
->handle());
147 if (!base::SharedMemory::IsHandleValid(handle
))
151 if (!Send(new GpuCommandBufferMsg_Initialize(
152 route_id_
, handle
, &result
, &capabilities_
))) {
153 LOG(ERROR
) << "Could not send GpuCommandBufferMsg_Initialize.";
158 LOG(ERROR
) << "Failed to initialize command buffer service.";
162 capabilities_
.map_image
= true;
167 gpu::CommandBuffer::State
CommandBufferProxyImpl::GetLastState() {
171 int32
CommandBufferProxyImpl::GetLastToken() {
173 return last_state_
.token
;
176 void CommandBufferProxyImpl::Flush(int32 put_offset
) {
177 if (last_state_
.error
!= gpu::error::kNoError
)
181 "CommandBufferProxyImpl::Flush",
185 if (last_put_offset_
== put_offset
)
188 last_put_offset_
= put_offset
;
190 Send(new GpuCommandBufferMsg_AsyncFlush(route_id_
,
195 void CommandBufferProxyImpl::SetLatencyInfo(
196 const std::vector
<ui::LatencyInfo
>& latency_info
) {
197 if (last_state_
.error
!= gpu::error::kNoError
||
198 latency_info
.empty())
200 Send(new GpuCommandBufferMsg_SetLatencyInfo(route_id_
, latency_info
));
203 void CommandBufferProxyImpl::WaitForTokenInRange(int32 start
, int32 end
) {
205 "CommandBufferProxyImpl::WaitForToken",
211 if (!InRange(start
, end
, last_state_
.token
) &&
212 last_state_
.error
== gpu::error::kNoError
) {
213 gpu::CommandBuffer::State state
;
214 if (Send(new GpuCommandBufferMsg_WaitForTokenInRange(
215 route_id_
, start
, end
, &state
)))
216 OnUpdateState(state
);
218 DCHECK(InRange(start
, end
, last_state_
.token
) ||
219 last_state_
.error
!= gpu::error::kNoError
);
222 void CommandBufferProxyImpl::WaitForGetOffsetInRange(int32 start
, int32 end
) {
224 "CommandBufferProxyImpl::WaitForGetOffset",
230 if (!InRange(start
, end
, last_state_
.get_offset
) &&
231 last_state_
.error
== gpu::error::kNoError
) {
232 gpu::CommandBuffer::State state
;
233 if (Send(new GpuCommandBufferMsg_WaitForGetOffsetInRange(
234 route_id_
, start
, end
, &state
)))
235 OnUpdateState(state
);
237 DCHECK(InRange(start
, end
, last_state_
.get_offset
) ||
238 last_state_
.error
!= gpu::error::kNoError
);
241 void CommandBufferProxyImpl::SetGetBuffer(int32 shm_id
) {
242 if (last_state_
.error
!= gpu::error::kNoError
)
245 Send(new GpuCommandBufferMsg_SetGetBuffer(route_id_
, shm_id
));
246 last_put_offset_
= -1;
249 scoped_refptr
<gpu::Buffer
> CommandBufferProxyImpl::CreateTransferBuffer(
254 if (last_state_
.error
!= gpu::error::kNoError
)
257 int32 new_id
= channel_
->ReserveTransferBufferId();
259 scoped_ptr
<base::SharedMemory
> shared_memory(
260 channel_
->factory()->AllocateSharedMemory(size
));
264 DCHECK(!shared_memory
->memory());
265 if (!shared_memory
->Map(size
))
268 // This handle is owned by the GPU process and must be passed to it or it
269 // will leak. In otherwords, do not early out on error between here and the
270 // sending of the RegisterTransferBuffer IPC below.
271 base::SharedMemoryHandle handle
=
272 channel_
->ShareToGpuProcess(shared_memory
->handle());
273 if (!base::SharedMemory::IsHandleValid(handle
))
276 if (!Send(new GpuCommandBufferMsg_RegisterTransferBuffer(route_id_
,
284 scoped_refptr
<gpu::Buffer
> buffer(
285 gpu::MakeBufferFromSharedMemory(shared_memory
.Pass(), size
));
289 void CommandBufferProxyImpl::DestroyTransferBuffer(int32 id
) {
290 if (last_state_
.error
!= gpu::error::kNoError
)
293 Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_
, id
));
296 gpu::Capabilities
CommandBufferProxyImpl::GetCapabilities() {
297 return capabilities_
;
300 gfx::GpuMemoryBuffer
* CommandBufferProxyImpl::CreateGpuMemoryBuffer(
303 unsigned internalformat
,
308 if (last_state_
.error
!= gpu::error::kNoError
)
311 scoped_ptr
<gfx::GpuMemoryBuffer
> buffer(
312 channel_
->factory()->AllocateGpuMemoryBuffer(
313 width
, height
, internalformat
, usage
));
317 DCHECK(GpuChannelHost::IsValidGpuMemoryBuffer(buffer
->GetHandle()));
319 int32 new_id
= channel_
->ReserveGpuMemoryBufferId();
321 // This handle is owned by the GPU process and must be passed to it or it
322 // will leak. In otherwords, do not early out on error between here and the
323 // sending of the RegisterGpuMemoryBuffer IPC below.
324 gfx::GpuMemoryBufferHandle handle
=
325 channel_
->ShareGpuMemoryBufferToGpuProcess(buffer
->GetHandle());
327 if (!Send(new GpuCommandBufferMsg_RegisterGpuMemoryBuffer(
338 DCHECK(gpu_memory_buffers_
.find(new_id
) == gpu_memory_buffers_
.end());
339 return gpu_memory_buffers_
.add(new_id
, buffer
.Pass()).first
->second
;
342 void CommandBufferProxyImpl::DestroyGpuMemoryBuffer(int32 id
) {
343 if (last_state_
.error
!= gpu::error::kNoError
)
346 Send(new GpuCommandBufferMsg_UnregisterGpuMemoryBuffer(route_id_
, id
));
348 // Remove the gpu memory buffer from the client side cache.
349 DCHECK(gpu_memory_buffers_
.find(id
) != gpu_memory_buffers_
.end());
350 channel_
->factory()->DeleteGpuMemoryBuffer(gpu_memory_buffers_
.take(id
));
353 int CommandBufferProxyImpl::GetRouteID() const {
357 void CommandBufferProxyImpl::Echo(const base::Closure
& callback
) {
358 if (last_state_
.error
!= gpu::error::kNoError
) {
362 if (!Send(new GpuCommandBufferMsg_Echo(
363 route_id_
, GpuCommandBufferMsg_EchoAck(route_id_
)))) {
367 echo_tasks_
.push(callback
);
370 uint32
CommandBufferProxyImpl::CreateStreamTexture(uint32 texture_id
) {
371 if (last_state_
.error
!= gpu::error::kNoError
)
374 int32 stream_id
= channel_
->GenerateRouteID();
375 bool succeeded
= false;
376 Send(new GpuCommandBufferMsg_CreateStreamTexture(
377 route_id_
, texture_id
, stream_id
, &succeeded
));
379 DLOG(ERROR
) << "GpuCommandBufferMsg_CreateStreamTexture returned failure";
385 uint32
CommandBufferProxyImpl::InsertSyncPoint() {
386 if (last_state_
.error
!= gpu::error::kNoError
)
389 uint32 sync_point
= 0;
390 Send(new GpuCommandBufferMsg_InsertSyncPoint(route_id_
, true, &sync_point
));
394 uint32_t CommandBufferProxyImpl::InsertFutureSyncPoint() {
395 if (last_state_
.error
!= gpu::error::kNoError
)
398 uint32 sync_point
= 0;
399 Send(new GpuCommandBufferMsg_InsertSyncPoint(route_id_
, false, &sync_point
));
403 void CommandBufferProxyImpl::RetireSyncPoint(uint32_t sync_point
) {
404 if (last_state_
.error
!= gpu::error::kNoError
)
407 Send(new GpuCommandBufferMsg_RetireSyncPoint(route_id_
, sync_point
));
410 void CommandBufferProxyImpl::SignalSyncPoint(uint32 sync_point
,
411 const base::Closure
& callback
) {
412 if (last_state_
.error
!= gpu::error::kNoError
)
415 uint32 signal_id
= next_signal_id_
++;
416 if (!Send(new GpuCommandBufferMsg_SignalSyncPoint(route_id_
,
422 signal_tasks_
.insert(std::make_pair(signal_id
, callback
));
425 void CommandBufferProxyImpl::SignalQuery(uint32 query
,
426 const base::Closure
& callback
) {
427 if (last_state_
.error
!= gpu::error::kNoError
)
430 // Signal identifiers are hidden, so nobody outside of this class will see
431 // them. (And thus, they cannot save them.) The IDs themselves only last
432 // until the callback is invoked, which will happen as soon as the GPU
433 // catches upwith the command buffer.
434 // A malicious caller trying to create a collision by making next_signal_id
435 // would have to make calls at an astounding rate (300B/s) and even if they
436 // could do that, all they would do is to prevent some callbacks from getting
437 // called, leading to stalled threads and/or memory leaks.
438 uint32 signal_id
= next_signal_id_
++;
439 if (!Send(new GpuCommandBufferMsg_SignalQuery(route_id_
,
445 signal_tasks_
.insert(std::make_pair(signal_id
, callback
));
448 void CommandBufferProxyImpl::SetSurfaceVisible(bool visible
) {
449 if (last_state_
.error
!= gpu::error::kNoError
)
452 Send(new GpuCommandBufferMsg_SetSurfaceVisible(route_id_
, visible
));
455 bool CommandBufferProxyImpl::ProduceFrontBuffer(const gpu::Mailbox
& mailbox
) {
456 if (last_state_
.error
!= gpu::error::kNoError
)
459 return Send(new GpuCommandBufferMsg_ProduceFrontBuffer(route_id_
, mailbox
));
462 scoped_ptr
<media::VideoDecodeAccelerator
>
463 CommandBufferProxyImpl::CreateVideoDecoder() {
465 return scoped_ptr
<media::VideoDecodeAccelerator
>();
466 return scoped_ptr
<media::VideoDecodeAccelerator
>(
467 new GpuVideoDecodeAcceleratorHost(channel_
, this));
470 scoped_ptr
<media::VideoEncodeAccelerator
>
471 CommandBufferProxyImpl::CreateVideoEncoder() {
473 return scoped_ptr
<media::VideoEncodeAccelerator
>();
474 return scoped_ptr
<media::VideoEncodeAccelerator
>(
475 new GpuVideoEncodeAcceleratorHost(channel_
, this));
478 gpu::error::Error
CommandBufferProxyImpl::GetLastError() {
479 return last_state_
.error
;
482 bool CommandBufferProxyImpl::Send(IPC::Message
* msg
) {
483 // Caller should not intentionally send a message if the context is lost.
484 DCHECK(last_state_
.error
== gpu::error::kNoError
);
487 if (channel_
->Send(msg
)) {
490 // Flag the command buffer as lost. Defer deleting the channel until
491 // OnChannelError is called after returning to the message loop in case
492 // it is referenced elsewhere.
493 DVLOG(1) << "CommandBufferProxyImpl::Send failed. Losing context.";
494 last_state_
.error
= gpu::error::kLostContext
;
499 // Callee takes ownership of message, regardless of whether Send is
500 // successful. See IPC::Sender.
505 void CommandBufferProxyImpl::OnUpdateState(
506 const gpu::CommandBuffer::State
& state
) {
507 // Handle wraparound. It works as long as we don't have more than 2B state
508 // updates in flight across which reordering occurs.
509 if (state
.generation
- last_state_
.generation
< 0x80000000U
)
513 void CommandBufferProxyImpl::SetOnConsoleMessageCallback(
514 const GpuConsoleMessageCallback
& callback
) {
515 console_message_callback_
= callback
;
518 void CommandBufferProxyImpl::TryUpdateState() {
519 if (last_state_
.error
== gpu::error::kNoError
)
520 shared_state()->Read(&last_state_
);
523 gpu::CommandBufferSharedState
* CommandBufferProxyImpl::shared_state() const {
524 return reinterpret_cast<gpu::CommandBufferSharedState
*>(
525 shared_state_shm_
->memory());
528 } // namespace content