1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/client/command_buffer_proxy_impl.h"
7 #include "base/callback.h"
8 #include "base/debug/trace_event.h"
9 #include "base/logging.h"
10 #include "base/memory/shared_memory.h"
11 #include "base/stl_util.h"
12 #include "content/common/child_process_messages.h"
13 #include "content/common/gpu/client/gl_surface_capturer_host.h"
14 #include "content/common/gpu/client/gpu_channel_host.h"
15 #include "content/common/gpu/client/gpu_video_decode_accelerator_host.h"
16 #include "content/common/gpu/gpu_memory_allocation.h"
17 #include "content/common/gpu/gpu_messages.h"
18 #include "content/common/view_messages.h"
19 #include "gpu/command_buffer/common/cmd_buffer_common.h"
20 #include "gpu/command_buffer/common/command_buffer_shared.h"
21 #include "ui/gfx/size.h"
25 CommandBufferProxyImpl::CommandBufferProxyImpl(
26 GpuChannelHost
* channel
,
35 CommandBufferProxyImpl::~CommandBufferProxyImpl() {
36 FOR_EACH_OBSERVER(DeletionObserver
,
40 // Delete all the locally cached shared memory objects, closing the handle
42 for (TransferBufferMap::iterator it
= transfer_buffers_
.begin();
43 it
!= transfer_buffers_
.end();
45 delete it
->second
.shared_memory
;
46 it
->second
.shared_memory
= NULL
;
50 bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message
& message
) {
52 IPC_BEGIN_MESSAGE_MAP(CommandBufferProxyImpl
, message
)
53 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Destroyed
, OnDestroyed
);
54 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_EchoAck
, OnEchoAck
);
55 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ConsoleMsg
, OnConsoleMessage
);
56 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetMemoryAllocation
,
57 OnSetMemoryAllocation
);
58 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPointAck
,
59 OnSignalSyncPointAck
);
60 IPC_MESSAGE_UNHANDLED(handled
= false)
67 void CommandBufferProxyImpl::OnChannelError() {
68 OnDestroyed(gpu::error::kUnknown
);
71 void CommandBufferProxyImpl::OnDestroyed(gpu::error::ContextLostReason reason
) {
72 // Prevent any further messages from being sent.
75 // When the client sees that the context is lost, they should delete this
76 // CommandBufferProxyImpl and create a new one.
77 last_state_
.error
= gpu::error::kLostContext
;
78 last_state_
.context_lost_reason
= reason
;
80 if (!channel_error_callback_
.is_null()) {
81 channel_error_callback_
.Run();
82 // Avoid calling the error callback more than once.
83 channel_error_callback_
.Reset();
87 void CommandBufferProxyImpl::OnEchoAck() {
88 DCHECK(!echo_tasks_
.empty());
89 base::Closure callback
= echo_tasks_
.front();
94 void CommandBufferProxyImpl::OnConsoleMessage(
95 const GPUCommandBufferConsoleMessage
& message
) {
96 if (!console_message_callback_
.is_null()) {
97 console_message_callback_
.Run(message
.message
, message
.id
);
101 void CommandBufferProxyImpl::SetMemoryAllocationChangedCallback(
102 const base::Callback
<void(const GpuMemoryAllocationForRenderer
&)>&
104 if (last_state_
.error
!= gpu::error::kNoError
)
107 memory_allocation_changed_callback_
= callback
;
108 Send(new GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback(
109 route_id_
, !memory_allocation_changed_callback_
.is_null()));
112 void CommandBufferProxyImpl::AddDeletionObserver(DeletionObserver
* observer
) {
113 deletion_observers_
.AddObserver(observer
);
116 void CommandBufferProxyImpl::RemoveDeletionObserver(
117 DeletionObserver
* observer
) {
118 deletion_observers_
.RemoveObserver(observer
);
121 void CommandBufferProxyImpl::OnSetMemoryAllocation(
122 const GpuMemoryAllocationForRenderer
& allocation
) {
123 if (!memory_allocation_changed_callback_
.is_null())
124 memory_allocation_changed_callback_
.Run(allocation
);
127 void CommandBufferProxyImpl::OnSignalSyncPointAck(uint32 id
) {
128 SignalTaskMap::iterator it
= signal_tasks_
.find(id
);
129 DCHECK(it
!= signal_tasks_
.end());
130 base::Closure callback
= it
->second
;
131 signal_tasks_
.erase(it
);
135 void CommandBufferProxyImpl::SetChannelErrorCallback(
136 const base::Closure
& callback
) {
137 channel_error_callback_
= callback
;
140 bool CommandBufferProxyImpl::Initialize() {
141 shared_state_shm_
.reset(channel_
->factory()->AllocateSharedMemory(
142 sizeof(*shared_state())).release());
143 if (!shared_state_shm_
)
146 if (!shared_state_shm_
->Map(sizeof(*shared_state())))
149 shared_state()->Initialize();
151 // This handle is owned by the GPU process and must be passed to it or it
152 // will leak. In otherwords, do not early out on error between here and the
153 // sending of the Initialize IPC below.
154 base::SharedMemoryHandle handle
=
155 channel_
->ShareToGpuProcess(shared_state_shm_
->handle());
156 if (!base::SharedMemory::IsHandleValid(handle
))
160 if (!Send(new GpuCommandBufferMsg_Initialize(route_id_
, handle
, &result
))) {
161 LOG(ERROR
) << "Could not send GpuCommandBufferMsg_Initialize.";
166 LOG(ERROR
) << "Failed to initialize command buffer service.";
173 gpu::CommandBuffer::State
CommandBufferProxyImpl::GetState() {
174 // Send will flag state with lost context if IPC fails.
175 if (last_state_
.error
== gpu::error::kNoError
) {
176 gpu::CommandBuffer::State state
;
177 if (Send(new GpuCommandBufferMsg_GetState(route_id_
, &state
)))
178 OnUpdateState(state
);
185 gpu::CommandBuffer::State
CommandBufferProxyImpl::GetLastState() {
189 int32
CommandBufferProxyImpl::GetLastToken() {
191 return last_state_
.token
;
194 void CommandBufferProxyImpl::Flush(int32 put_offset
) {
195 if (last_state_
.error
!= gpu::error::kNoError
)
199 "CommandBufferProxyImpl::Flush",
203 if (last_put_offset_
== put_offset
)
206 last_put_offset_
= put_offset
;
208 Send(new GpuCommandBufferMsg_AsyncFlush(route_id_
,
213 void CommandBufferProxyImpl::SetLatencyInfo(
214 const ui::LatencyInfo
& latency_info
) {
215 if (last_state_
.error
!= gpu::error::kNoError
)
217 Send(new GpuCommandBufferMsg_SetLatencyInfo(route_id_
, latency_info
));
220 gpu::CommandBuffer::State
CommandBufferProxyImpl::FlushSync(
222 int32 last_known_get
) {
223 TRACE_EVENT1("gpu", "CommandBufferProxyImpl::FlushSync", "put_offset",
227 if (last_known_get
== last_state_
.get_offset
) {
228 // Send will flag state with lost context if IPC fails.
229 if (last_state_
.error
== gpu::error::kNoError
) {
230 gpu::CommandBuffer::State state
;
231 if (Send(new GpuCommandBufferMsg_GetStateFast(route_id_
,
233 OnUpdateState(state
);
241 void CommandBufferProxyImpl::SetGetBuffer(int32 shm_id
) {
242 if (last_state_
.error
!= gpu::error::kNoError
)
245 Send(new GpuCommandBufferMsg_SetGetBuffer(route_id_
, shm_id
));
246 last_put_offset_
= -1;
249 void CommandBufferProxyImpl::SetGetOffset(int32 get_offset
) {
250 // Not implemented in proxy.
254 gpu::Buffer
CommandBufferProxyImpl::CreateTransferBuffer(size_t size
,
258 if (last_state_
.error
!= gpu::error::kNoError
)
259 return gpu::Buffer();
261 int32 new_id
= channel_
->ReserveTransferBufferId();
262 DCHECK(transfer_buffers_
.find(new_id
) == transfer_buffers_
.end());
264 scoped_ptr
<base::SharedMemory
> shared_memory(
265 channel_
->factory()->AllocateSharedMemory(size
));
267 return gpu::Buffer();
269 DCHECK(!shared_memory
->memory());
270 if (!shared_memory
->Map(size
))
271 return gpu::Buffer();
273 // This handle is owned by the GPU process and must be passed to it or it
274 // will leak. In otherwords, do not early out on error between here and the
275 // sending of the RegisterTransferBuffer IPC below.
276 base::SharedMemoryHandle handle
=
277 channel_
->ShareToGpuProcess(shared_memory
->handle());
278 if (!base::SharedMemory::IsHandleValid(handle
))
279 return gpu::Buffer();
281 if (!Send(new GpuCommandBufferMsg_RegisterTransferBuffer(route_id_
,
285 return gpu::Buffer();
290 buffer
.ptr
= shared_memory
->memory();
292 buffer
.shared_memory
= shared_memory
.release();
293 transfer_buffers_
[new_id
] = buffer
;
298 void CommandBufferProxyImpl::DestroyTransferBuffer(int32 id
) {
299 if (last_state_
.error
!= gpu::error::kNoError
)
302 // Remove the transfer buffer from the client side cache.
303 TransferBufferMap::iterator it
= transfer_buffers_
.find(id
);
304 if (it
!= transfer_buffers_
.end()) {
305 delete it
->second
.shared_memory
;
306 transfer_buffers_
.erase(it
);
309 Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_
, id
));
312 gpu::Buffer
CommandBufferProxyImpl::GetTransferBuffer(int32 id
) {
313 if (last_state_
.error
!= gpu::error::kNoError
)
314 return gpu::Buffer();
316 // Check local cache to see if there is already a client side shared memory
317 // object for this id.
318 TransferBufferMap::iterator it
= transfer_buffers_
.find(id
);
319 if (it
!= transfer_buffers_
.end()) {
323 // Assuming we are in the renderer process, the service is responsible for
324 // duplicating the handle. This might not be true for NaCl.
325 base::SharedMemoryHandle handle
= base::SharedMemoryHandle();
327 if (!Send(new GpuCommandBufferMsg_GetTransferBuffer(route_id_
,
331 return gpu::Buffer();
334 // Cache the transfer buffer shared memory object client side.
335 scoped_ptr
<base::SharedMemory
> shared_memory(
336 new base::SharedMemory(handle
, false));
338 // Map the shared memory on demand.
339 if (!shared_memory
->memory()) {
340 if (!shared_memory
->Map(size
))
341 return gpu::Buffer();
345 buffer
.ptr
= shared_memory
->memory();
347 buffer
.shared_memory
= shared_memory
.release();
348 transfer_buffers_
[id
] = buffer
;
353 void CommandBufferProxyImpl::SetToken(int32 token
) {
354 // Not implemented in proxy.
358 void CommandBufferProxyImpl::SetParseError(
359 gpu::error::Error error
) {
360 // Not implemented in proxy.
364 void CommandBufferProxyImpl::SetContextLostReason(
365 gpu::error::ContextLostReason reason
) {
366 // Not implemented in proxy.
370 int CommandBufferProxyImpl::GetRouteID() const {
374 bool CommandBufferProxyImpl::Echo(const base::Closure
& callback
) {
375 if (last_state_
.error
!= gpu::error::kNoError
) {
379 if (!Send(new GpuCommandBufferMsg_Echo(route_id_
,
380 GpuCommandBufferMsg_EchoAck(route_id_
)))) {
384 echo_tasks_
.push(callback
);
389 bool CommandBufferProxyImpl::SetSurfaceVisible(bool visible
) {
390 if (last_state_
.error
!= gpu::error::kNoError
)
393 return Send(new GpuCommandBufferMsg_SetSurfaceVisible(route_id_
, visible
));
396 bool CommandBufferProxyImpl::DiscardBackbuffer() {
397 if (last_state_
.error
!= gpu::error::kNoError
)
400 return Send(new GpuCommandBufferMsg_DiscardBackbuffer(route_id_
));
403 bool CommandBufferProxyImpl::EnsureBackbuffer() {
404 if (last_state_
.error
!= gpu::error::kNoError
)
407 return Send(new GpuCommandBufferMsg_EnsureBackbuffer(route_id_
));
410 uint32
CommandBufferProxyImpl::InsertSyncPoint() {
411 if (last_state_
.error
!= gpu::error::kNoError
)
414 uint32 sync_point
= 0;
415 Send(new GpuCommandBufferMsg_InsertSyncPoint(route_id_
, &sync_point
));
419 bool CommandBufferProxyImpl::SignalSyncPoint(uint32 sync_point
,
420 const base::Closure
& callback
) {
421 if (last_state_
.error
!= gpu::error::kNoError
) {
425 uint32 signal_id
= next_signal_id_
++;
426 if (!Send(new GpuCommandBufferMsg_SignalSyncPoint(route_id_
,
432 signal_tasks_
.insert(std::make_pair(signal_id
, callback
));
437 bool CommandBufferProxyImpl::SignalQuery(unsigned query
,
438 const base::Closure
& callback
) {
439 if (last_state_
.error
!= gpu::error::kNoError
) {
443 // Signal identifiers are hidden, so nobody outside of this class will see
444 // them. (And thus, they cannot save them.) The IDs themselves only last
445 // until the callback is invoked, which will happen as soon as the GPU
446 // catches upwith the command buffer.
447 // A malicious caller trying to create a collision by making next_signal_id
448 // would have to make calls at an astounding rate (300B/s) and even if they
449 // could do that, all they would do is to prevent some callbacks from getting
450 // called, leading to stalled threads and/or memory leaks.
451 uint32 signal_id
= next_signal_id_
++;
452 if (!Send(new GpuCommandBufferMsg_SignalQuery(route_id_
,
458 signal_tasks_
.insert(std::make_pair(signal_id
, callback
));
464 bool CommandBufferProxyImpl::GenerateMailboxNames(
466 std::vector
<gpu::Mailbox
>* names
) {
467 if (last_state_
.error
!= gpu::error::kNoError
)
470 return channel_
->GenerateMailboxNames(num
, names
);
473 bool CommandBufferProxyImpl::ProduceFrontBuffer(const gpu::Mailbox
& mailbox
) {
474 if (last_state_
.error
!= gpu::error::kNoError
)
477 return Send(new GpuCommandBufferMsg_ProduceFrontBuffer(route_id_
, mailbox
));
480 scoped_ptr
<media::VideoDecodeAccelerator
>
481 CommandBufferProxyImpl::CreateVideoDecoder(
482 media::VideoCodecProfile profile
,
483 media::VideoDecodeAccelerator::Client
* client
) {
484 int decoder_route_id
;
485 scoped_ptr
<media::VideoDecodeAccelerator
> vda
;
486 if (!Send(new GpuCommandBufferMsg_CreateVideoDecoder(route_id_
, profile
,
487 &decoder_route_id
))) {
488 LOG(ERROR
) << "Send(GpuCommandBufferMsg_CreateVideoDecoder) failed";
492 if (decoder_route_id
< 0) {
493 DLOG(ERROR
) << "Failed to Initialize GPU decoder on profile: " << profile
;
497 GpuVideoDecodeAcceleratorHost
* decoder_host
=
498 new GpuVideoDecodeAcceleratorHost(channel_
, decoder_route_id
, client
,
500 vda
.reset(decoder_host
);
504 scoped_ptr
<SurfaceCapturer
> CommandBufferProxyImpl::CreateSurfaceCapturer(
505 SurfaceCapturer::Client
* client
) {
506 int capturer_route_id
;
507 scoped_ptr
<SurfaceCapturer
> capturer
;
508 if (!Send(new GpuCommandBufferMsg_CreateSurfaceCapturer(
509 route_id_
, &capturer_route_id
))) {
510 LOG(ERROR
) << "Send(GpuCommandBufferMsg_CreateSurfaceCapturer) failed";
511 return capturer
.Pass();
514 if (capturer_route_id
< 0) {
515 DLOG(ERROR
) << "Failed create surface capturer";
516 return capturer
.Pass();
519 capturer
.reset(new GLSurfaceCapturerHost(capturer_route_id
, client
, this));
520 return capturer
.Pass();
523 gpu::error::Error
CommandBufferProxyImpl::GetLastError() {
524 return last_state_
.error
;
527 bool CommandBufferProxyImpl::Send(IPC::Message
* msg
) {
528 // Caller should not intentionally send a message if the context is lost.
529 DCHECK(last_state_
.error
== gpu::error::kNoError
);
532 if (channel_
->Send(msg
)) {
535 // Flag the command buffer as lost. Defer deleting the channel until
536 // OnChannelError is called after returning to the message loop in case
537 // it is referenced elsewhere.
538 last_state_
.error
= gpu::error::kLostContext
;
543 // Callee takes ownership of message, regardless of whether Send is
544 // successful. See IPC::Sender.
549 void CommandBufferProxyImpl::OnUpdateState(
550 const gpu::CommandBuffer::State
& state
) {
551 // Handle wraparound. It works as long as we don't have more than 2B state
552 // updates in flight across which reordering occurs.
553 if (state
.generation
- last_state_
.generation
< 0x80000000U
)
557 void CommandBufferProxyImpl::SetOnConsoleMessageCallback(
558 const GpuConsoleMessageCallback
& callback
) {
559 console_message_callback_
= callback
;
562 void CommandBufferProxyImpl::TryUpdateState() {
563 if (last_state_
.error
== gpu::error::kNoError
)
564 shared_state()->Read(&last_state_
);
567 void CommandBufferProxyImpl::SendManagedMemoryStats(
568 const GpuManagedMemoryStats
& stats
) {
569 if (last_state_
.error
!= gpu::error::kNoError
)
572 Send(new GpuCommandBufferMsg_SendClientManagedMemoryStats(route_id_
,
576 } // namespace content