Pin Chrome's shortcut to the Win10 Start menu on install and OS upgrade.
[chromium-blink-merge.git] / content / common / gpu / client / command_buffer_proxy_impl.cc
blob60cc94af803d22b15e21457b8b42f1d75c55b25c
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/client/command_buffer_proxy_impl.h"
7 #include <vector>
9 #include "base/callback.h"
10 #include "base/logging.h"
11 #include "base/memory/shared_memory.h"
12 #include "base/stl_util.h"
13 #include "base/trace_event/trace_event.h"
14 #include "content/common/child_process_messages.h"
15 #include "content/common/gpu/client/gpu_channel_host.h"
16 #include "content/common/gpu/client/gpu_video_decode_accelerator_host.h"
17 #include "content/common/gpu/client/gpu_video_encode_accelerator_host.h"
18 #include "content/common/gpu/gpu_messages.h"
19 #include "content/common/view_messages.h"
20 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
21 #include "gpu/command_buffer/common/cmd_buffer_common.h"
22 #include "gpu/command_buffer/common/command_buffer_shared.h"
23 #include "gpu/command_buffer/common/gpu_memory_allocation.h"
24 #include "gpu/command_buffer/service/image_factory.h"
25 #include "ui/gfx/geometry/size.h"
26 #include "ui/gl/gl_bindings.h"
28 namespace content {
30 CommandBufferProxyImpl::CommandBufferProxyImpl(GpuChannelHost* channel,
31 int route_id)
32 : lock_(nullptr),
33 channel_(channel),
34 route_id_(route_id),
35 flush_count_(0),
36 last_put_offset_(-1),
37 last_barrier_put_offset_(-1),
38 next_signal_id_(0) {
41 CommandBufferProxyImpl::~CommandBufferProxyImpl() {
42 FOR_EACH_OBSERVER(DeletionObserver,
43 deletion_observers_,
44 OnWillDeleteImpl());
47 bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message& message) {
48 scoped_ptr<base::AutoLock> lock;
49 if (lock_)
50 lock.reset(new base::AutoLock(*lock_));
51 bool handled = true;
52 IPC_BEGIN_MESSAGE_MAP(CommandBufferProxyImpl, message)
53 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Destroyed, OnDestroyed);
54 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ConsoleMsg, OnConsoleMessage);
55 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetMemoryAllocation,
56 OnSetMemoryAllocation);
57 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPointAck,
58 OnSignalSyncPointAck);
59 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SwapBuffersCompleted,
60 OnSwapBuffersCompleted);
61 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_UpdateVSyncParameters,
62 OnUpdateVSyncParameters);
63 IPC_MESSAGE_UNHANDLED(handled = false)
64 IPC_END_MESSAGE_MAP()
66 DCHECK(handled);
67 return handled;
70 void CommandBufferProxyImpl::OnChannelError() {
71 scoped_ptr<base::AutoLock> lock;
72 if (lock_)
73 lock.reset(new base::AutoLock(*lock_));
75 gpu::error::ContextLostReason context_lost_reason =
76 gpu::error::kGpuChannelLost;
77 if (shared_state_shm_ && shared_state_shm_->memory()) {
78 TryUpdateState();
79 // The GPU process might have intentionally been crashed
80 // (exit_on_context_lost), so try to find out the original reason.
81 if (last_state_.error == gpu::error::kLostContext)
82 context_lost_reason = last_state_.context_lost_reason;
84 OnDestroyed(context_lost_reason, gpu::error::kLostContext);
87 void CommandBufferProxyImpl::OnDestroyed(gpu::error::ContextLostReason reason,
88 gpu::error::Error error) {
89 CheckLock();
90 // Prevent any further messages from being sent.
91 channel_ = NULL;
93 // When the client sees that the context is lost, they should delete this
94 // CommandBufferProxyImpl and create a new one.
95 last_state_.error = error;
96 last_state_.context_lost_reason = reason;
98 if (!context_lost_callback_.is_null()) {
99 context_lost_callback_.Run();
100 // Avoid calling the error callback more than once.
101 context_lost_callback_.Reset();
105 void CommandBufferProxyImpl::OnConsoleMessage(
106 const GPUCommandBufferConsoleMessage& message) {
107 if (!console_message_callback_.is_null()) {
108 console_message_callback_.Run(message.message, message.id);
112 void CommandBufferProxyImpl::SetMemoryAllocationChangedCallback(
113 const MemoryAllocationChangedCallback& callback) {
114 CheckLock();
115 if (last_state_.error != gpu::error::kNoError)
116 return;
118 memory_allocation_changed_callback_ = callback;
119 Send(new GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback(
120 route_id_, !memory_allocation_changed_callback_.is_null()));
123 void CommandBufferProxyImpl::AddDeletionObserver(DeletionObserver* observer) {
124 CheckLock();
125 deletion_observers_.AddObserver(observer);
128 void CommandBufferProxyImpl::RemoveDeletionObserver(
129 DeletionObserver* observer) {
130 CheckLock();
131 deletion_observers_.RemoveObserver(observer);
134 void CommandBufferProxyImpl::OnSetMemoryAllocation(
135 const gpu::MemoryAllocation& allocation) {
136 if (!memory_allocation_changed_callback_.is_null())
137 memory_allocation_changed_callback_.Run(allocation);
140 void CommandBufferProxyImpl::OnSignalSyncPointAck(uint32 id) {
141 SignalTaskMap::iterator it = signal_tasks_.find(id);
142 DCHECK(it != signal_tasks_.end());
143 base::Closure callback = it->second;
144 signal_tasks_.erase(it);
145 callback.Run();
148 void CommandBufferProxyImpl::SetContextLostCallback(
149 const base::Closure& callback) {
150 CheckLock();
151 context_lost_callback_ = callback;
154 bool CommandBufferProxyImpl::Initialize() {
155 TRACE_EVENT0("gpu", "CommandBufferProxyImpl::Initialize");
156 shared_state_shm_.reset(channel_->factory()->AllocateSharedMemory(
157 sizeof(*shared_state())).release());
158 if (!shared_state_shm_)
159 return false;
161 if (!shared_state_shm_->Map(sizeof(*shared_state())))
162 return false;
164 shared_state()->Initialize();
166 // This handle is owned by the GPU process and must be passed to it or it
167 // will leak. In otherwords, do not early out on error between here and the
168 // sending of the Initialize IPC below.
169 base::SharedMemoryHandle handle =
170 channel_->ShareToGpuProcess(shared_state_shm_->handle());
171 if (!base::SharedMemory::IsHandleValid(handle))
172 return false;
174 bool result = false;
175 if (!Send(new GpuCommandBufferMsg_Initialize(
176 route_id_, handle, &result, &capabilities_))) {
177 LOG(ERROR) << "Could not send GpuCommandBufferMsg_Initialize.";
178 return false;
181 if (!result) {
182 LOG(ERROR) << "Failed to initialize command buffer service.";
183 return false;
186 capabilities_.image = true;
188 return true;
191 gpu::CommandBuffer::State CommandBufferProxyImpl::GetLastState() {
192 return last_state_;
195 int32 CommandBufferProxyImpl::GetLastToken() {
196 TryUpdateState();
197 return last_state_.token;
200 void CommandBufferProxyImpl::Flush(int32 put_offset) {
201 CheckLock();
202 if (last_state_.error != gpu::error::kNoError)
203 return;
205 TRACE_EVENT1("gpu",
206 "CommandBufferProxyImpl::Flush",
207 "put_offset",
208 put_offset);
210 bool put_offset_changed = last_put_offset_ != put_offset;
211 last_put_offset_ = put_offset;
212 last_barrier_put_offset_ = put_offset;
214 if (channel_) {
215 channel_->OrderingBarrier(route_id_, put_offset, ++flush_count_,
216 latency_info_, put_offset_changed, true);
219 if (put_offset_changed)
220 latency_info_.clear();
223 void CommandBufferProxyImpl::OrderingBarrier(int32 put_offset) {
224 if (last_state_.error != gpu::error::kNoError)
225 return;
227 TRACE_EVENT1("gpu", "CommandBufferProxyImpl::OrderingBarrier", "put_offset",
228 put_offset);
230 bool put_offset_changed = last_barrier_put_offset_ != put_offset;
231 last_barrier_put_offset_ = put_offset;
233 if (channel_) {
234 channel_->OrderingBarrier(route_id_, put_offset, ++flush_count_,
235 latency_info_, put_offset_changed, false);
238 if (put_offset_changed)
239 latency_info_.clear();
242 void CommandBufferProxyImpl::SetLatencyInfo(
243 const std::vector<ui::LatencyInfo>& latency_info) {
244 CheckLock();
245 for (size_t i = 0; i < latency_info.size(); i++)
246 latency_info_.push_back(latency_info[i]);
249 void CommandBufferProxyImpl::SetSwapBuffersCompletionCallback(
250 const SwapBuffersCompletionCallback& callback) {
251 CheckLock();
252 swap_buffers_completion_callback_ = callback;
255 void CommandBufferProxyImpl::SetUpdateVSyncParametersCallback(
256 const UpdateVSyncParametersCallback& callback) {
257 CheckLock();
258 update_vsync_parameters_completion_callback_ = callback;
261 void CommandBufferProxyImpl::WaitForTokenInRange(int32 start, int32 end) {
262 CheckLock();
263 TRACE_EVENT2("gpu",
264 "CommandBufferProxyImpl::WaitForToken",
265 "start",
266 start,
267 "end",
268 end);
269 TryUpdateState();
270 if (!InRange(start, end, last_state_.token) &&
271 last_state_.error == gpu::error::kNoError) {
272 gpu::CommandBuffer::State state;
273 if (Send(new GpuCommandBufferMsg_WaitForTokenInRange(
274 route_id_, start, end, &state)))
275 OnUpdateState(state);
277 DCHECK(InRange(start, end, last_state_.token) ||
278 last_state_.error != gpu::error::kNoError);
281 void CommandBufferProxyImpl::WaitForGetOffsetInRange(int32 start, int32 end) {
282 CheckLock();
283 TRACE_EVENT2("gpu",
284 "CommandBufferProxyImpl::WaitForGetOffset",
285 "start",
286 start,
287 "end",
288 end);
289 TryUpdateState();
290 if (!InRange(start, end, last_state_.get_offset) &&
291 last_state_.error == gpu::error::kNoError) {
292 gpu::CommandBuffer::State state;
293 if (Send(new GpuCommandBufferMsg_WaitForGetOffsetInRange(
294 route_id_, start, end, &state)))
295 OnUpdateState(state);
297 DCHECK(InRange(start, end, last_state_.get_offset) ||
298 last_state_.error != gpu::error::kNoError);
301 void CommandBufferProxyImpl::SetGetBuffer(int32 shm_id) {
302 CheckLock();
303 if (last_state_.error != gpu::error::kNoError)
304 return;
306 Send(new GpuCommandBufferMsg_SetGetBuffer(route_id_, shm_id));
307 last_put_offset_ = -1;
310 scoped_refptr<gpu::Buffer> CommandBufferProxyImpl::CreateTransferBuffer(
311 size_t size,
312 int32* id) {
313 CheckLock();
314 *id = -1;
316 if (last_state_.error != gpu::error::kNoError)
317 return NULL;
319 int32 new_id = channel_->ReserveTransferBufferId();
321 scoped_ptr<base::SharedMemory> shared_memory(
322 channel_->factory()->AllocateSharedMemory(size));
323 if (!shared_memory)
324 return NULL;
326 DCHECK(!shared_memory->memory());
327 if (!shared_memory->Map(size))
328 return NULL;
330 // This handle is owned by the GPU process and must be passed to it or it
331 // will leak. In otherwords, do not early out on error between here and the
332 // sending of the RegisterTransferBuffer IPC below.
333 base::SharedMemoryHandle handle =
334 channel_->ShareToGpuProcess(shared_memory->handle());
335 if (!base::SharedMemory::IsHandleValid(handle))
336 return NULL;
338 if (!Send(new GpuCommandBufferMsg_RegisterTransferBuffer(route_id_,
339 new_id,
340 handle,
341 size))) {
342 return NULL;
345 *id = new_id;
346 scoped_refptr<gpu::Buffer> buffer(
347 gpu::MakeBufferFromSharedMemory(shared_memory.Pass(), size));
348 return buffer;
351 void CommandBufferProxyImpl::DestroyTransferBuffer(int32 id) {
352 CheckLock();
353 if (last_state_.error != gpu::error::kNoError)
354 return;
356 Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_, id));
359 gpu::Capabilities CommandBufferProxyImpl::GetCapabilities() {
360 return capabilities_;
363 int32_t CommandBufferProxyImpl::CreateImage(ClientBuffer buffer,
364 size_t width,
365 size_t height,
366 unsigned internalformat) {
367 CheckLock();
368 if (last_state_.error != gpu::error::kNoError)
369 return -1;
371 int32 new_id = channel_->ReserveImageId();
373 gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager =
374 channel_->gpu_memory_buffer_manager();
375 gfx::GpuMemoryBuffer* gpu_memory_buffer =
376 gpu_memory_buffer_manager->GpuMemoryBufferFromClientBuffer(buffer);
377 DCHECK(gpu_memory_buffer);
379 // This handle is owned by the GPU process and must be passed to it or it
380 // will leak. In otherwords, do not early out on error between here and the
381 // sending of the CreateImage IPC below.
382 bool requires_sync_point = false;
383 gfx::GpuMemoryBufferHandle handle =
384 channel_->ShareGpuMemoryBufferToGpuProcess(gpu_memory_buffer->GetHandle(),
385 &requires_sync_point);
387 DCHECK(gpu::ImageFactory::IsGpuMemoryBufferFormatSupported(
388 gpu_memory_buffer->GetFormat(), capabilities_));
389 DCHECK(gpu::ImageFactory::IsImageSizeValidForGpuMemoryBufferFormat(
390 gfx::Size(width, height), gpu_memory_buffer->GetFormat()));
391 DCHECK(gpu::ImageFactory::IsImageFormatCompatibleWithGpuMemoryBufferFormat(
392 internalformat, gpu_memory_buffer->GetFormat()));
393 if (!Send(new GpuCommandBufferMsg_CreateImage(route_id_,
394 new_id,
395 handle,
396 gfx::Size(width, height),
397 gpu_memory_buffer->GetFormat(),
398 internalformat))) {
399 return -1;
402 if (requires_sync_point) {
403 gpu_memory_buffer_manager->SetDestructionSyncPoint(gpu_memory_buffer,
404 InsertSyncPoint());
407 return new_id;
410 void CommandBufferProxyImpl::DestroyImage(int32 id) {
411 CheckLock();
412 if (last_state_.error != gpu::error::kNoError)
413 return;
415 Send(new GpuCommandBufferMsg_DestroyImage(route_id_, id));
418 int32_t CommandBufferProxyImpl::CreateGpuMemoryBufferImage(
419 size_t width,
420 size_t height,
421 unsigned internalformat,
422 unsigned usage) {
423 CheckLock();
424 scoped_ptr<gfx::GpuMemoryBuffer> buffer(
425 channel_->gpu_memory_buffer_manager()->AllocateGpuMemoryBuffer(
426 gfx::Size(width, height),
427 gpu::ImageFactory::ImageFormatToGpuMemoryBufferFormat(internalformat),
428 gpu::ImageFactory::ImageUsageToGpuMemoryBufferUsage(usage)));
429 if (!buffer)
430 return -1;
432 return CreateImage(buffer->AsClientBuffer(), width, height, internalformat);
435 int CommandBufferProxyImpl::GetRouteID() const {
436 return route_id_;
439 uint32 CommandBufferProxyImpl::CreateStreamTexture(uint32 texture_id) {
440 CheckLock();
441 if (last_state_.error != gpu::error::kNoError)
442 return 0;
444 int32 stream_id = channel_->GenerateRouteID();
445 bool succeeded = false;
446 Send(new GpuCommandBufferMsg_CreateStreamTexture(
447 route_id_, texture_id, stream_id, &succeeded));
448 if (!succeeded) {
449 DLOG(ERROR) << "GpuCommandBufferMsg_CreateStreamTexture returned failure";
450 return 0;
452 return stream_id;
455 void CommandBufferProxyImpl::SetLock(base::Lock* lock) {
456 lock_ = lock;
459 bool CommandBufferProxyImpl::IsGpuChannelLost() {
460 return !channel_ || channel_->IsLost();
463 uint32 CommandBufferProxyImpl::InsertSyncPoint() {
464 CheckLock();
465 if (last_state_.error != gpu::error::kNoError)
466 return 0;
468 uint32 sync_point = 0;
469 Send(new GpuCommandBufferMsg_InsertSyncPoint(route_id_, true, &sync_point));
470 return sync_point;
473 uint32_t CommandBufferProxyImpl::InsertFutureSyncPoint() {
474 CheckLock();
475 if (last_state_.error != gpu::error::kNoError)
476 return 0;
478 uint32 sync_point = 0;
479 Send(new GpuCommandBufferMsg_InsertSyncPoint(route_id_, false, &sync_point));
480 return sync_point;
483 void CommandBufferProxyImpl::RetireSyncPoint(uint32_t sync_point) {
484 CheckLock();
485 if (last_state_.error != gpu::error::kNoError)
486 return;
488 Send(new GpuCommandBufferMsg_RetireSyncPoint(route_id_, sync_point));
491 void CommandBufferProxyImpl::SignalSyncPoint(uint32 sync_point,
492 const base::Closure& callback) {
493 CheckLock();
494 if (last_state_.error != gpu::error::kNoError)
495 return;
497 uint32 signal_id = next_signal_id_++;
498 if (!Send(new GpuCommandBufferMsg_SignalSyncPoint(route_id_,
499 sync_point,
500 signal_id))) {
501 return;
504 signal_tasks_.insert(std::make_pair(signal_id, callback));
507 void CommandBufferProxyImpl::SignalQuery(uint32 query,
508 const base::Closure& callback) {
509 CheckLock();
510 if (last_state_.error != gpu::error::kNoError)
511 return;
513 // Signal identifiers are hidden, so nobody outside of this class will see
514 // them. (And thus, they cannot save them.) The IDs themselves only last
515 // until the callback is invoked, which will happen as soon as the GPU
516 // catches upwith the command buffer.
517 // A malicious caller trying to create a collision by making next_signal_id
518 // would have to make calls at an astounding rate (300B/s) and even if they
519 // could do that, all they would do is to prevent some callbacks from getting
520 // called, leading to stalled threads and/or memory leaks.
521 uint32 signal_id = next_signal_id_++;
522 if (!Send(new GpuCommandBufferMsg_SignalQuery(route_id_,
523 query,
524 signal_id))) {
525 return;
528 signal_tasks_.insert(std::make_pair(signal_id, callback));
531 void CommandBufferProxyImpl::SetSurfaceVisible(bool visible) {
532 CheckLock();
533 if (last_state_.error != gpu::error::kNoError)
534 return;
536 Send(new GpuCommandBufferMsg_SetSurfaceVisible(route_id_, visible));
539 bool CommandBufferProxyImpl::ProduceFrontBuffer(const gpu::Mailbox& mailbox) {
540 CheckLock();
541 if (last_state_.error != gpu::error::kNoError)
542 return false;
544 return Send(new GpuCommandBufferMsg_ProduceFrontBuffer(route_id_, mailbox));
547 scoped_ptr<media::VideoDecodeAccelerator>
548 CommandBufferProxyImpl::CreateVideoDecoder() {
549 if (!channel_)
550 return scoped_ptr<media::VideoDecodeAccelerator>();
551 return scoped_ptr<media::VideoDecodeAccelerator>(
552 new GpuVideoDecodeAcceleratorHost(channel_, this));
555 scoped_ptr<media::VideoEncodeAccelerator>
556 CommandBufferProxyImpl::CreateVideoEncoder() {
557 if (!channel_)
558 return scoped_ptr<media::VideoEncodeAccelerator>();
559 return scoped_ptr<media::VideoEncodeAccelerator>(
560 new GpuVideoEncodeAcceleratorHost(channel_, this));
563 gpu::error::Error CommandBufferProxyImpl::GetLastError() {
564 return last_state_.error;
567 bool CommandBufferProxyImpl::Send(IPC::Message* msg) {
568 // Caller should not intentionally send a message if the context is lost.
569 DCHECK(last_state_.error == gpu::error::kNoError);
571 if (channel_) {
572 if (channel_->Send(msg)) {
573 return true;
574 } else {
575 // Flag the command buffer as lost. Defer deleting the channel until
576 // OnChannelError is called after returning to the message loop in case
577 // it is referenced elsewhere.
578 DVLOG(1) << "CommandBufferProxyImpl::Send failed. Losing context.";
579 last_state_.error = gpu::error::kLostContext;
580 return false;
584 // Callee takes ownership of message, regardless of whether Send is
585 // successful. See IPC::Sender.
586 delete msg;
587 return false;
590 void CommandBufferProxyImpl::OnUpdateState(
591 const gpu::CommandBuffer::State& state) {
592 // Handle wraparound. It works as long as we don't have more than 2B state
593 // updates in flight across which reordering occurs.
594 if (state.generation - last_state_.generation < 0x80000000U)
595 last_state_ = state;
598 void CommandBufferProxyImpl::SetOnConsoleMessageCallback(
599 const GpuConsoleMessageCallback& callback) {
600 CheckLock();
601 console_message_callback_ = callback;
604 void CommandBufferProxyImpl::TryUpdateState() {
605 if (last_state_.error == gpu::error::kNoError)
606 shared_state()->Read(&last_state_);
609 gpu::CommandBufferSharedState* CommandBufferProxyImpl::shared_state() const {
610 return reinterpret_cast<gpu::CommandBufferSharedState*>(
611 shared_state_shm_->memory());
614 void CommandBufferProxyImpl::OnSwapBuffersCompleted(
615 const std::vector<ui::LatencyInfo>& latency_info,
616 gfx::SwapResult result) {
617 if (!swap_buffers_completion_callback_.is_null()) {
618 if (!ui::LatencyInfo::Verify(
619 latency_info, "CommandBufferProxyImpl::OnSwapBuffersCompleted")) {
620 swap_buffers_completion_callback_.Run(std::vector<ui::LatencyInfo>(),
621 result);
622 return;
624 swap_buffers_completion_callback_.Run(latency_info, result);
628 void CommandBufferProxyImpl::OnUpdateVSyncParameters(base::TimeTicks timebase,
629 base::TimeDelta interval) {
630 if (!update_vsync_parameters_completion_callback_.is_null())
631 update_vsync_parameters_completion_callback_.Run(timebase, interval);
634 } // namespace content