Respond with QuotaExceededError when IndexedDB has no disk space on open.
[chromium-blink-merge.git] / content / common / gpu / client / command_buffer_proxy_impl.cc
blob90271647069457afad3c098c24216c47adf4121d
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/client/command_buffer_proxy_impl.h"
7 #include "base/callback.h"
8 #include "base/debug/trace_event.h"
9 #include "base/logging.h"
10 #include "base/memory/shared_memory.h"
11 #include "base/stl_util.h"
12 #include "content/common/child_process_messages.h"
13 #include "content/common/gpu/client/gl_surface_capturer_host.h"
14 #include "content/common/gpu/client/gpu_channel_host.h"
15 #include "content/common/gpu/client/gpu_video_decode_accelerator_host.h"
16 #include "content/common/gpu/gpu_memory_allocation.h"
17 #include "content/common/gpu/gpu_messages.h"
18 #include "content/common/view_messages.h"
19 #include "gpu/command_buffer/common/cmd_buffer_common.h"
20 #include "gpu/command_buffer/common/command_buffer_shared.h"
21 #include "ui/gfx/size.h"
23 namespace content {
25 CommandBufferProxyImpl::CommandBufferProxyImpl(
26 GpuChannelHost* channel,
27 int route_id)
28 : channel_(channel),
29 route_id_(route_id),
30 flush_count_(0),
31 last_put_offset_(-1),
32 next_signal_id_(0) {
35 CommandBufferProxyImpl::~CommandBufferProxyImpl() {
36 FOR_EACH_OBSERVER(DeletionObserver,
37 deletion_observers_,
38 OnWillDeleteImpl());
40 // Delete all the locally cached shared memory objects, closing the handle
41 // in this process.
42 for (TransferBufferMap::iterator it = transfer_buffers_.begin();
43 it != transfer_buffers_.end();
44 ++it) {
45 delete it->second.shared_memory;
46 it->second.shared_memory = NULL;
50 bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message& message) {
51 bool handled = true;
52 IPC_BEGIN_MESSAGE_MAP(CommandBufferProxyImpl, message)
53 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Destroyed, OnDestroyed);
54 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_EchoAck, OnEchoAck);
55 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ConsoleMsg, OnConsoleMessage);
56 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetMemoryAllocation,
57 OnSetMemoryAllocation);
58 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPointAck,
59 OnSignalSyncPointAck);
60 IPC_MESSAGE_UNHANDLED(handled = false)
61 IPC_END_MESSAGE_MAP()
63 DCHECK(handled);
64 return handled;
67 void CommandBufferProxyImpl::OnChannelError() {
68 OnDestroyed(gpu::error::kUnknown);
71 void CommandBufferProxyImpl::OnDestroyed(gpu::error::ContextLostReason reason) {
72 // Prevent any further messages from being sent.
73 channel_ = NULL;
75 // When the client sees that the context is lost, they should delete this
76 // CommandBufferProxyImpl and create a new one.
77 last_state_.error = gpu::error::kLostContext;
78 last_state_.context_lost_reason = reason;
80 if (!channel_error_callback_.is_null()) {
81 channel_error_callback_.Run();
82 // Avoid calling the error callback more than once.
83 channel_error_callback_.Reset();
87 void CommandBufferProxyImpl::OnEchoAck() {
88 DCHECK(!echo_tasks_.empty());
89 base::Closure callback = echo_tasks_.front();
90 echo_tasks_.pop();
91 callback.Run();
94 void CommandBufferProxyImpl::OnConsoleMessage(
95 const GPUCommandBufferConsoleMessage& message) {
96 if (!console_message_callback_.is_null()) {
97 console_message_callback_.Run(message.message, message.id);
101 void CommandBufferProxyImpl::SetMemoryAllocationChangedCallback(
102 const base::Callback<void(const GpuMemoryAllocationForRenderer&)>&
103 callback) {
104 if (last_state_.error != gpu::error::kNoError)
105 return;
107 memory_allocation_changed_callback_ = callback;
108 Send(new GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback(
109 route_id_, !memory_allocation_changed_callback_.is_null()));
112 void CommandBufferProxyImpl::AddDeletionObserver(DeletionObserver* observer) {
113 deletion_observers_.AddObserver(observer);
116 void CommandBufferProxyImpl::RemoveDeletionObserver(
117 DeletionObserver* observer) {
118 deletion_observers_.RemoveObserver(observer);
121 void CommandBufferProxyImpl::OnSetMemoryAllocation(
122 const GpuMemoryAllocationForRenderer& allocation) {
123 if (!memory_allocation_changed_callback_.is_null())
124 memory_allocation_changed_callback_.Run(allocation);
127 void CommandBufferProxyImpl::OnSignalSyncPointAck(uint32 id) {
128 SignalTaskMap::iterator it = signal_tasks_.find(id);
129 DCHECK(it != signal_tasks_.end());
130 base::Closure callback = it->second;
131 signal_tasks_.erase(it);
132 callback.Run();
135 void CommandBufferProxyImpl::SetChannelErrorCallback(
136 const base::Closure& callback) {
137 channel_error_callback_ = callback;
140 bool CommandBufferProxyImpl::Initialize() {
141 shared_state_shm_.reset(channel_->factory()->AllocateSharedMemory(
142 sizeof(*shared_state())).release());
143 if (!shared_state_shm_)
144 return false;
146 if (!shared_state_shm_->Map(sizeof(*shared_state())))
147 return false;
149 shared_state()->Initialize();
151 // This handle is owned by the GPU process and must be passed to it or it
152 // will leak. In otherwords, do not early out on error between here and the
153 // sending of the Initialize IPC below.
154 base::SharedMemoryHandle handle =
155 channel_->ShareToGpuProcess(shared_state_shm_->handle());
156 if (!base::SharedMemory::IsHandleValid(handle))
157 return false;
159 bool result;
160 if (!Send(new GpuCommandBufferMsg_Initialize(route_id_, handle, &result))) {
161 LOG(ERROR) << "Could not send GpuCommandBufferMsg_Initialize.";
162 return false;
165 if (!result) {
166 LOG(ERROR) << "Failed to initialize command buffer service.";
167 return false;
170 return true;
173 gpu::CommandBuffer::State CommandBufferProxyImpl::GetState() {
174 // Send will flag state with lost context if IPC fails.
175 if (last_state_.error == gpu::error::kNoError) {
176 gpu::CommandBuffer::State state;
177 if (Send(new GpuCommandBufferMsg_GetState(route_id_, &state)))
178 OnUpdateState(state);
181 TryUpdateState();
182 return last_state_;
185 gpu::CommandBuffer::State CommandBufferProxyImpl::GetLastState() {
186 return last_state_;
189 int32 CommandBufferProxyImpl::GetLastToken() {
190 TryUpdateState();
191 return last_state_.token;
194 void CommandBufferProxyImpl::Flush(int32 put_offset) {
195 if (last_state_.error != gpu::error::kNoError)
196 return;
198 TRACE_EVENT1("gpu",
199 "CommandBufferProxyImpl::Flush",
200 "put_offset",
201 put_offset);
203 if (last_put_offset_ == put_offset)
204 return;
206 last_put_offset_ = put_offset;
208 Send(new GpuCommandBufferMsg_AsyncFlush(route_id_,
209 put_offset,
210 ++flush_count_));
213 void CommandBufferProxyImpl::SetLatencyInfo(
214 const ui::LatencyInfo& latency_info) {
215 if (last_state_.error != gpu::error::kNoError)
216 return;
217 Send(new GpuCommandBufferMsg_SetLatencyInfo(route_id_, latency_info));
220 gpu::CommandBuffer::State CommandBufferProxyImpl::FlushSync(
221 int32 put_offset,
222 int32 last_known_get) {
223 TRACE_EVENT1("gpu", "CommandBufferProxyImpl::FlushSync", "put_offset",
224 put_offset);
225 Flush(put_offset);
226 TryUpdateState();
227 if (last_known_get == last_state_.get_offset) {
228 // Send will flag state with lost context if IPC fails.
229 if (last_state_.error == gpu::error::kNoError) {
230 gpu::CommandBuffer::State state;
231 if (Send(new GpuCommandBufferMsg_GetStateFast(route_id_,
232 &state)))
233 OnUpdateState(state);
235 TryUpdateState();
238 return last_state_;
241 void CommandBufferProxyImpl::SetGetBuffer(int32 shm_id) {
242 if (last_state_.error != gpu::error::kNoError)
243 return;
245 Send(new GpuCommandBufferMsg_SetGetBuffer(route_id_, shm_id));
246 last_put_offset_ = -1;
249 void CommandBufferProxyImpl::SetGetOffset(int32 get_offset) {
250 // Not implemented in proxy.
251 NOTREACHED();
254 gpu::Buffer CommandBufferProxyImpl::CreateTransferBuffer(size_t size,
255 int32* id) {
256 *id = -1;
258 if (last_state_.error != gpu::error::kNoError)
259 return gpu::Buffer();
261 int32 new_id = channel_->ReserveTransferBufferId();
262 DCHECK(transfer_buffers_.find(new_id) == transfer_buffers_.end());
264 scoped_ptr<base::SharedMemory> shared_memory(
265 channel_->factory()->AllocateSharedMemory(size));
266 if (!shared_memory)
267 return gpu::Buffer();
269 DCHECK(!shared_memory->memory());
270 if (!shared_memory->Map(size))
271 return gpu::Buffer();
273 // This handle is owned by the GPU process and must be passed to it or it
274 // will leak. In otherwords, do not early out on error between here and the
275 // sending of the RegisterTransferBuffer IPC below.
276 base::SharedMemoryHandle handle =
277 channel_->ShareToGpuProcess(shared_memory->handle());
278 if (!base::SharedMemory::IsHandleValid(handle))
279 return gpu::Buffer();
281 if (!Send(new GpuCommandBufferMsg_RegisterTransferBuffer(route_id_,
282 new_id,
283 handle,
284 size))) {
285 return gpu::Buffer();
288 *id = new_id;
289 gpu::Buffer buffer;
290 buffer.ptr = shared_memory->memory();
291 buffer.size = size;
292 buffer.shared_memory = shared_memory.release();
293 transfer_buffers_[new_id] = buffer;
295 return buffer;
298 void CommandBufferProxyImpl::DestroyTransferBuffer(int32 id) {
299 if (last_state_.error != gpu::error::kNoError)
300 return;
302 // Remove the transfer buffer from the client side cache.
303 TransferBufferMap::iterator it = transfer_buffers_.find(id);
304 if (it != transfer_buffers_.end()) {
305 delete it->second.shared_memory;
306 transfer_buffers_.erase(it);
309 Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_, id));
312 gpu::Buffer CommandBufferProxyImpl::GetTransferBuffer(int32 id) {
313 if (last_state_.error != gpu::error::kNoError)
314 return gpu::Buffer();
316 // Check local cache to see if there is already a client side shared memory
317 // object for this id.
318 TransferBufferMap::iterator it = transfer_buffers_.find(id);
319 if (it != transfer_buffers_.end()) {
320 return it->second;
323 // Assuming we are in the renderer process, the service is responsible for
324 // duplicating the handle. This might not be true for NaCl.
325 base::SharedMemoryHandle handle = base::SharedMemoryHandle();
326 uint32 size;
327 if (!Send(new GpuCommandBufferMsg_GetTransferBuffer(route_id_,
329 &handle,
330 &size))) {
331 return gpu::Buffer();
334 // Cache the transfer buffer shared memory object client side.
335 scoped_ptr<base::SharedMemory> shared_memory(
336 new base::SharedMemory(handle, false));
338 // Map the shared memory on demand.
339 if (!shared_memory->memory()) {
340 if (!shared_memory->Map(size))
341 return gpu::Buffer();
344 gpu::Buffer buffer;
345 buffer.ptr = shared_memory->memory();
346 buffer.size = size;
347 buffer.shared_memory = shared_memory.release();
348 transfer_buffers_[id] = buffer;
350 return buffer;
353 void CommandBufferProxyImpl::SetToken(int32 token) {
354 // Not implemented in proxy.
355 NOTREACHED();
358 void CommandBufferProxyImpl::SetParseError(
359 gpu::error::Error error) {
360 // Not implemented in proxy.
361 NOTREACHED();
364 void CommandBufferProxyImpl::SetContextLostReason(
365 gpu::error::ContextLostReason reason) {
366 // Not implemented in proxy.
367 NOTREACHED();
370 int CommandBufferProxyImpl::GetRouteID() const {
371 return route_id_;
374 bool CommandBufferProxyImpl::Echo(const base::Closure& callback) {
375 if (last_state_.error != gpu::error::kNoError) {
376 return false;
379 if (!Send(new GpuCommandBufferMsg_Echo(route_id_,
380 GpuCommandBufferMsg_EchoAck(route_id_)))) {
381 return false;
384 echo_tasks_.push(callback);
386 return true;
389 bool CommandBufferProxyImpl::SetSurfaceVisible(bool visible) {
390 if (last_state_.error != gpu::error::kNoError)
391 return false;
393 return Send(new GpuCommandBufferMsg_SetSurfaceVisible(route_id_, visible));
396 bool CommandBufferProxyImpl::DiscardBackbuffer() {
397 if (last_state_.error != gpu::error::kNoError)
398 return false;
400 return Send(new GpuCommandBufferMsg_DiscardBackbuffer(route_id_));
403 bool CommandBufferProxyImpl::EnsureBackbuffer() {
404 if (last_state_.error != gpu::error::kNoError)
405 return false;
407 return Send(new GpuCommandBufferMsg_EnsureBackbuffer(route_id_));
410 uint32 CommandBufferProxyImpl::InsertSyncPoint() {
411 if (last_state_.error != gpu::error::kNoError)
412 return 0;
414 uint32 sync_point = 0;
415 Send(new GpuCommandBufferMsg_InsertSyncPoint(route_id_, &sync_point));
416 return sync_point;
419 bool CommandBufferProxyImpl::SignalSyncPoint(uint32 sync_point,
420 const base::Closure& callback) {
421 if (last_state_.error != gpu::error::kNoError) {
422 return false;
425 uint32 signal_id = next_signal_id_++;
426 if (!Send(new GpuCommandBufferMsg_SignalSyncPoint(route_id_,
427 sync_point,
428 signal_id))) {
429 return false;
432 signal_tasks_.insert(std::make_pair(signal_id, callback));
434 return true;
437 bool CommandBufferProxyImpl::SignalQuery(unsigned query,
438 const base::Closure& callback) {
439 if (last_state_.error != gpu::error::kNoError) {
440 return false;
443 // Signal identifiers are hidden, so nobody outside of this class will see
444 // them. (And thus, they cannot save them.) The IDs themselves only last
445 // until the callback is invoked, which will happen as soon as the GPU
446 // catches upwith the command buffer.
447 // A malicious caller trying to create a collision by making next_signal_id
448 // would have to make calls at an astounding rate (300B/s) and even if they
449 // could do that, all they would do is to prevent some callbacks from getting
450 // called, leading to stalled threads and/or memory leaks.
451 uint32 signal_id = next_signal_id_++;
452 if (!Send(new GpuCommandBufferMsg_SignalQuery(route_id_,
453 query,
454 signal_id))) {
455 return false;
458 signal_tasks_.insert(std::make_pair(signal_id, callback));
460 return true;
464 bool CommandBufferProxyImpl::GenerateMailboxNames(
465 unsigned num,
466 std::vector<gpu::Mailbox>* names) {
467 if (last_state_.error != gpu::error::kNoError)
468 return false;
470 return channel_->GenerateMailboxNames(num, names);
473 bool CommandBufferProxyImpl::ProduceFrontBuffer(const gpu::Mailbox& mailbox) {
474 if (last_state_.error != gpu::error::kNoError)
475 return false;
477 return Send(new GpuCommandBufferMsg_ProduceFrontBuffer(route_id_, mailbox));
480 scoped_ptr<media::VideoDecodeAccelerator>
481 CommandBufferProxyImpl::CreateVideoDecoder(
482 media::VideoCodecProfile profile,
483 media::VideoDecodeAccelerator::Client* client) {
484 int decoder_route_id;
485 scoped_ptr<media::VideoDecodeAccelerator> vda;
486 if (!Send(new GpuCommandBufferMsg_CreateVideoDecoder(route_id_, profile,
487 &decoder_route_id))) {
488 LOG(ERROR) << "Send(GpuCommandBufferMsg_CreateVideoDecoder) failed";
489 return vda.Pass();
492 if (decoder_route_id < 0) {
493 DLOG(ERROR) << "Failed to Initialize GPU decoder on profile: " << profile;
494 return vda.Pass();
497 GpuVideoDecodeAcceleratorHost* decoder_host =
498 new GpuVideoDecodeAcceleratorHost(channel_, decoder_route_id, client,
499 this);
500 vda.reset(decoder_host);
501 return vda.Pass();
504 scoped_ptr<SurfaceCapturer> CommandBufferProxyImpl::CreateSurfaceCapturer(
505 SurfaceCapturer::Client* client) {
506 int capturer_route_id;
507 scoped_ptr<SurfaceCapturer> capturer;
508 if (!Send(new GpuCommandBufferMsg_CreateSurfaceCapturer(
509 route_id_, &capturer_route_id))) {
510 LOG(ERROR) << "Send(GpuCommandBufferMsg_CreateSurfaceCapturer) failed";
511 return capturer.Pass();
514 if (capturer_route_id < 0) {
515 DLOG(ERROR) << "Failed create surface capturer";
516 return capturer.Pass();
519 capturer.reset(new GLSurfaceCapturerHost(capturer_route_id, client, this));
520 return capturer.Pass();
523 gpu::error::Error CommandBufferProxyImpl::GetLastError() {
524 return last_state_.error;
527 bool CommandBufferProxyImpl::Send(IPC::Message* msg) {
528 // Caller should not intentionally send a message if the context is lost.
529 DCHECK(last_state_.error == gpu::error::kNoError);
531 if (channel_) {
532 if (channel_->Send(msg)) {
533 return true;
534 } else {
535 // Flag the command buffer as lost. Defer deleting the channel until
536 // OnChannelError is called after returning to the message loop in case
537 // it is referenced elsewhere.
538 last_state_.error = gpu::error::kLostContext;
539 return false;
543 // Callee takes ownership of message, regardless of whether Send is
544 // successful. See IPC::Sender.
545 delete msg;
546 return false;
549 void CommandBufferProxyImpl::OnUpdateState(
550 const gpu::CommandBuffer::State& state) {
551 // Handle wraparound. It works as long as we don't have more than 2B state
552 // updates in flight across which reordering occurs.
553 if (state.generation - last_state_.generation < 0x80000000U)
554 last_state_ = state;
557 void CommandBufferProxyImpl::SetOnConsoleMessageCallback(
558 const GpuConsoleMessageCallback& callback) {
559 console_message_callback_ = callback;
562 void CommandBufferProxyImpl::TryUpdateState() {
563 if (last_state_.error == gpu::error::kNoError)
564 shared_state()->Read(&last_state_);
567 void CommandBufferProxyImpl::SendManagedMemoryStats(
568 const GpuManagedMemoryStats& stats) {
569 if (last_state_.error != gpu::error::kNoError)
570 return;
572 Send(new GpuCommandBufferMsg_SendClientManagedMemoryStats(route_id_,
573 stats));
576 } // namespace content