[SyncFS] Build indexes from FileTracker entries on disk.
[chromium-blink-merge.git] / content / common / gpu / client / command_buffer_proxy_impl.cc
blob7b7961f500923be427d8929ead36cd6b4d34f6a7
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/client/command_buffer_proxy_impl.h"
7 #include "base/callback.h"
8 #include "base/debug/trace_event.h"
9 #include "base/logging.h"
10 #include "base/memory/shared_memory.h"
11 #include "base/stl_util.h"
12 #include "content/common/child_process_messages.h"
13 #include "content/common/gpu/client/gpu_channel_host.h"
14 #include "content/common/gpu/client/gpu_video_decode_accelerator_host.h"
15 #include "content/common/gpu/client/gpu_video_encode_accelerator_host.h"
16 #include "content/common/gpu/gpu_messages.h"
17 #include "content/common/view_messages.h"
18 #include "gpu/command_buffer/common/cmd_buffer_common.h"
19 #include "gpu/command_buffer/common/command_buffer_shared.h"
20 #include "gpu/command_buffer/common/gpu_memory_allocation.h"
21 #include "ui/gfx/size.h"
23 namespace content {
25 CommandBufferProxyImpl::CommandBufferProxyImpl(
26 GpuChannelHost* channel,
27 int route_id)
28 : channel_(channel),
29 route_id_(route_id),
30 flush_count_(0),
31 last_put_offset_(-1),
32 next_signal_id_(0) {
35 CommandBufferProxyImpl::~CommandBufferProxyImpl() {
36 FOR_EACH_OBSERVER(DeletionObserver,
37 deletion_observers_,
38 OnWillDeleteImpl());
41 bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message& message) {
42 bool handled = true;
43 IPC_BEGIN_MESSAGE_MAP(CommandBufferProxyImpl, message)
44 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Destroyed, OnDestroyed);
45 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_EchoAck, OnEchoAck);
46 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ConsoleMsg, OnConsoleMessage);
47 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetMemoryAllocation,
48 OnSetMemoryAllocation);
49 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPointAck,
50 OnSignalSyncPointAck);
51 IPC_MESSAGE_UNHANDLED(handled = false)
52 IPC_END_MESSAGE_MAP()
54 DCHECK(handled);
55 return handled;
58 void CommandBufferProxyImpl::OnChannelError() {
59 OnDestroyed(gpu::error::kUnknown);
62 void CommandBufferProxyImpl::OnDestroyed(gpu::error::ContextLostReason reason) {
63 // Prevent any further messages from being sent.
64 channel_ = NULL;
66 // When the client sees that the context is lost, they should delete this
67 // CommandBufferProxyImpl and create a new one.
68 last_state_.error = gpu::error::kLostContext;
69 last_state_.context_lost_reason = reason;
71 if (!channel_error_callback_.is_null()) {
72 channel_error_callback_.Run();
73 // Avoid calling the error callback more than once.
74 channel_error_callback_.Reset();
78 void CommandBufferProxyImpl::OnEchoAck() {
79 DCHECK(!echo_tasks_.empty());
80 base::Closure callback = echo_tasks_.front();
81 echo_tasks_.pop();
82 callback.Run();
85 void CommandBufferProxyImpl::OnConsoleMessage(
86 const GPUCommandBufferConsoleMessage& message) {
87 if (!console_message_callback_.is_null()) {
88 console_message_callback_.Run(message.message, message.id);
92 void CommandBufferProxyImpl::SetMemoryAllocationChangedCallback(
93 const MemoryAllocationChangedCallback& callback) {
94 if (last_state_.error != gpu::error::kNoError)
95 return;
97 memory_allocation_changed_callback_ = callback;
98 Send(new GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback(
99 route_id_, !memory_allocation_changed_callback_.is_null()));
102 void CommandBufferProxyImpl::AddDeletionObserver(DeletionObserver* observer) {
103 deletion_observers_.AddObserver(observer);
106 void CommandBufferProxyImpl::RemoveDeletionObserver(
107 DeletionObserver* observer) {
108 deletion_observers_.RemoveObserver(observer);
111 void CommandBufferProxyImpl::OnSetMemoryAllocation(
112 const gpu::MemoryAllocation& allocation) {
113 if (!memory_allocation_changed_callback_.is_null())
114 memory_allocation_changed_callback_.Run(allocation);
117 void CommandBufferProxyImpl::OnSignalSyncPointAck(uint32 id) {
118 SignalTaskMap::iterator it = signal_tasks_.find(id);
119 DCHECK(it != signal_tasks_.end());
120 base::Closure callback = it->second;
121 signal_tasks_.erase(it);
122 callback.Run();
125 void CommandBufferProxyImpl::SetChannelErrorCallback(
126 const base::Closure& callback) {
127 channel_error_callback_ = callback;
130 bool CommandBufferProxyImpl::Initialize() {
131 TRACE_EVENT0("gpu", "CommandBufferProxyImpl::Initialize");
132 shared_state_shm_.reset(channel_->factory()->AllocateSharedMemory(
133 sizeof(*shared_state())).release());
134 if (!shared_state_shm_)
135 return false;
137 if (!shared_state_shm_->Map(sizeof(*shared_state())))
138 return false;
140 shared_state()->Initialize();
142 // This handle is owned by the GPU process and must be passed to it or it
143 // will leak. In otherwords, do not early out on error between here and the
144 // sending of the Initialize IPC below.
145 base::SharedMemoryHandle handle =
146 channel_->ShareToGpuProcess(shared_state_shm_->handle());
147 if (!base::SharedMemory::IsHandleValid(handle))
148 return false;
150 bool result = false;
151 if (!Send(new GpuCommandBufferMsg_Initialize(
152 route_id_, handle, &result, &capabilities_))) {
153 LOG(ERROR) << "Could not send GpuCommandBufferMsg_Initialize.";
154 return false;
157 if (!result) {
158 LOG(ERROR) << "Failed to initialize command buffer service.";
159 return false;
162 capabilities_.map_image = true;
164 return true;
167 gpu::CommandBuffer::State CommandBufferProxyImpl::GetLastState() {
168 return last_state_;
171 int32 CommandBufferProxyImpl::GetLastToken() {
172 TryUpdateState();
173 return last_state_.token;
176 void CommandBufferProxyImpl::Flush(int32 put_offset) {
177 if (last_state_.error != gpu::error::kNoError)
178 return;
180 TRACE_EVENT1("gpu",
181 "CommandBufferProxyImpl::Flush",
182 "put_offset",
183 put_offset);
185 if (last_put_offset_ == put_offset)
186 return;
188 last_put_offset_ = put_offset;
190 Send(new GpuCommandBufferMsg_AsyncFlush(route_id_,
191 put_offset,
192 ++flush_count_));
195 void CommandBufferProxyImpl::SetLatencyInfo(
196 const std::vector<ui::LatencyInfo>& latency_info) {
197 if (last_state_.error != gpu::error::kNoError)
198 return;
199 Send(new GpuCommandBufferMsg_SetLatencyInfo(route_id_, latency_info));
202 void CommandBufferProxyImpl::WaitForTokenInRange(int32 start, int32 end) {
203 TRACE_EVENT2("gpu",
204 "CommandBufferProxyImpl::WaitForToken",
205 "start",
206 start,
207 "end",
208 end);
209 TryUpdateState();
210 if (!InRange(start, end, last_state_.token) &&
211 last_state_.error == gpu::error::kNoError) {
212 gpu::CommandBuffer::State state;
213 if (Send(new GpuCommandBufferMsg_WaitForTokenInRange(
214 route_id_, start, end, &state)))
215 OnUpdateState(state);
217 DCHECK(InRange(start, end, last_state_.token) ||
218 last_state_.error != gpu::error::kNoError);
221 void CommandBufferProxyImpl::WaitForGetOffsetInRange(int32 start, int32 end) {
222 TRACE_EVENT2("gpu",
223 "CommandBufferProxyImpl::WaitForGetOffset",
224 "start",
225 start,
226 "end",
227 end);
228 TryUpdateState();
229 if (!InRange(start, end, last_state_.get_offset) &&
230 last_state_.error == gpu::error::kNoError) {
231 gpu::CommandBuffer::State state;
232 if (Send(new GpuCommandBufferMsg_WaitForGetOffsetInRange(
233 route_id_, start, end, &state)))
234 OnUpdateState(state);
236 DCHECK(InRange(start, end, last_state_.get_offset) ||
237 last_state_.error != gpu::error::kNoError);
240 void CommandBufferProxyImpl::SetGetBuffer(int32 shm_id) {
241 if (last_state_.error != gpu::error::kNoError)
242 return;
244 Send(new GpuCommandBufferMsg_SetGetBuffer(route_id_, shm_id));
245 last_put_offset_ = -1;
248 scoped_refptr<gpu::Buffer> CommandBufferProxyImpl::CreateTransferBuffer(
249 size_t size,
250 int32* id) {
251 *id = -1;
253 if (last_state_.error != gpu::error::kNoError)
254 return NULL;
256 int32 new_id = channel_->ReserveTransferBufferId();
258 scoped_ptr<base::SharedMemory> shared_memory(
259 channel_->factory()->AllocateSharedMemory(size));
260 if (!shared_memory)
261 return NULL;
263 DCHECK(!shared_memory->memory());
264 if (!shared_memory->Map(size))
265 return NULL;
267 // This handle is owned by the GPU process and must be passed to it or it
268 // will leak. In otherwords, do not early out on error between here and the
269 // sending of the RegisterTransferBuffer IPC below.
270 base::SharedMemoryHandle handle =
271 channel_->ShareToGpuProcess(shared_memory->handle());
272 if (!base::SharedMemory::IsHandleValid(handle))
273 return NULL;
275 if (!Send(new GpuCommandBufferMsg_RegisterTransferBuffer(route_id_,
276 new_id,
277 handle,
278 size))) {
279 return NULL;
282 *id = new_id;
283 scoped_refptr<gpu::Buffer> buffer(
284 gpu::MakeBufferFromSharedMemory(shared_memory.Pass(), size));
285 return buffer;
288 void CommandBufferProxyImpl::DestroyTransferBuffer(int32 id) {
289 if (last_state_.error != gpu::error::kNoError)
290 return;
292 Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_, id));
295 gpu::Capabilities CommandBufferProxyImpl::GetCapabilities() {
296 return capabilities_;
299 gfx::GpuMemoryBuffer* CommandBufferProxyImpl::CreateGpuMemoryBuffer(
300 size_t width,
301 size_t height,
302 unsigned internalformat,
303 unsigned usage,
304 int32* id) {
305 *id = -1;
307 if (last_state_.error != gpu::error::kNoError)
308 return NULL;
310 int32 new_id = channel_->ReserveGpuMemoryBufferId();
311 DCHECK(gpu_memory_buffers_.find(new_id) == gpu_memory_buffers_.end());
313 scoped_ptr<gfx::GpuMemoryBuffer> gpu_memory_buffer(
314 channel_->factory()->AllocateGpuMemoryBuffer(
315 width, height, internalformat, usage));
316 if (!gpu_memory_buffer)
317 return NULL;
319 DCHECK(GpuChannelHost::IsValidGpuMemoryBuffer(
320 gpu_memory_buffer->GetHandle()));
322 // This handle is owned by the GPU process and must be passed to it or it
323 // will leak. In otherwords, do not early out on error between here and the
324 // sending of the RegisterGpuMemoryBuffer IPC below.
325 gfx::GpuMemoryBufferHandle handle =
326 channel_->ShareGpuMemoryBufferToGpuProcess(
327 gpu_memory_buffer->GetHandle());
329 if (!Send(new GpuCommandBufferMsg_RegisterGpuMemoryBuffer(
330 route_id_,
331 new_id,
332 handle,
333 width,
334 height,
335 internalformat))) {
336 return NULL;
339 *id = new_id;
340 gpu_memory_buffers_[new_id] = gpu_memory_buffer.release();
341 return gpu_memory_buffers_[new_id];
344 void CommandBufferProxyImpl::DestroyGpuMemoryBuffer(int32 id) {
345 if (last_state_.error != gpu::error::kNoError)
346 return;
348 // Remove the gpu memory buffer from the client side cache.
349 GpuMemoryBufferMap::iterator it = gpu_memory_buffers_.find(id);
350 if (it != gpu_memory_buffers_.end()) {
351 delete it->second;
352 gpu_memory_buffers_.erase(it);
355 Send(new GpuCommandBufferMsg_DestroyGpuMemoryBuffer(route_id_, id));
358 int CommandBufferProxyImpl::GetRouteID() const {
359 return route_id_;
362 void CommandBufferProxyImpl::Echo(const base::Closure& callback) {
363 if (last_state_.error != gpu::error::kNoError) {
364 return;
367 if (!Send(new GpuCommandBufferMsg_Echo(
368 route_id_, GpuCommandBufferMsg_EchoAck(route_id_)))) {
369 return;
372 echo_tasks_.push(callback);
375 uint32 CommandBufferProxyImpl::CreateStreamTexture(uint32 texture_id) {
376 if (last_state_.error != gpu::error::kNoError)
377 return 0;
379 int32 stream_id = channel_->GenerateRouteID();
380 bool succeeded = false;
381 Send(new GpuCommandBufferMsg_CreateStreamTexture(
382 route_id_, texture_id, stream_id, &succeeded));
383 if (!succeeded) {
384 DLOG(ERROR) << "GpuCommandBufferMsg_CreateStreamTexture returned failure";
385 return 0;
387 return stream_id;
390 uint32 CommandBufferProxyImpl::InsertSyncPoint() {
391 if (last_state_.error != gpu::error::kNoError)
392 return 0;
394 uint32 sync_point = 0;
395 Send(new GpuCommandBufferMsg_InsertSyncPoint(route_id_, true, &sync_point));
396 return sync_point;
399 uint32_t CommandBufferProxyImpl::InsertFutureSyncPoint() {
400 if (last_state_.error != gpu::error::kNoError)
401 return 0;
403 uint32 sync_point = 0;
404 Send(new GpuCommandBufferMsg_InsertSyncPoint(route_id_, false, &sync_point));
405 return sync_point;
408 void CommandBufferProxyImpl::RetireSyncPoint(uint32_t sync_point) {
409 if (last_state_.error != gpu::error::kNoError)
410 return;
412 Send(new GpuCommandBufferMsg_RetireSyncPoint(route_id_, sync_point));
415 void CommandBufferProxyImpl::SignalSyncPoint(uint32 sync_point,
416 const base::Closure& callback) {
417 if (last_state_.error != gpu::error::kNoError)
418 return;
420 uint32 signal_id = next_signal_id_++;
421 if (!Send(new GpuCommandBufferMsg_SignalSyncPoint(route_id_,
422 sync_point,
423 signal_id))) {
424 return;
427 signal_tasks_.insert(std::make_pair(signal_id, callback));
430 void CommandBufferProxyImpl::SignalQuery(uint32 query,
431 const base::Closure& callback) {
432 if (last_state_.error != gpu::error::kNoError)
433 return;
435 // Signal identifiers are hidden, so nobody outside of this class will see
436 // them. (And thus, they cannot save them.) The IDs themselves only last
437 // until the callback is invoked, which will happen as soon as the GPU
438 // catches upwith the command buffer.
439 // A malicious caller trying to create a collision by making next_signal_id
440 // would have to make calls at an astounding rate (300B/s) and even if they
441 // could do that, all they would do is to prevent some callbacks from getting
442 // called, leading to stalled threads and/or memory leaks.
443 uint32 signal_id = next_signal_id_++;
444 if (!Send(new GpuCommandBufferMsg_SignalQuery(route_id_,
445 query,
446 signal_id))) {
447 return;
450 signal_tasks_.insert(std::make_pair(signal_id, callback));
453 void CommandBufferProxyImpl::SetSurfaceVisible(bool visible) {
454 if (last_state_.error != gpu::error::kNoError)
455 return;
457 Send(new GpuCommandBufferMsg_SetSurfaceVisible(route_id_, visible));
460 bool CommandBufferProxyImpl::ProduceFrontBuffer(const gpu::Mailbox& mailbox) {
461 if (last_state_.error != gpu::error::kNoError)
462 return false;
464 return Send(new GpuCommandBufferMsg_ProduceFrontBuffer(route_id_, mailbox));
467 scoped_ptr<media::VideoDecodeAccelerator>
468 CommandBufferProxyImpl::CreateVideoDecoder() {
469 if (!channel_)
470 return scoped_ptr<media::VideoDecodeAccelerator>();
471 return scoped_ptr<media::VideoDecodeAccelerator>(
472 new GpuVideoDecodeAcceleratorHost(channel_, this));
475 scoped_ptr<media::VideoEncodeAccelerator>
476 CommandBufferProxyImpl::CreateVideoEncoder() {
477 if (!channel_)
478 return scoped_ptr<media::VideoEncodeAccelerator>();
479 return scoped_ptr<media::VideoEncodeAccelerator>(
480 new GpuVideoEncodeAcceleratorHost(channel_, this));
483 gpu::error::Error CommandBufferProxyImpl::GetLastError() {
484 return last_state_.error;
487 bool CommandBufferProxyImpl::Send(IPC::Message* msg) {
488 // Caller should not intentionally send a message if the context is lost.
489 DCHECK(last_state_.error == gpu::error::kNoError);
491 if (channel_) {
492 if (channel_->Send(msg)) {
493 return true;
494 } else {
495 // Flag the command buffer as lost. Defer deleting the channel until
496 // OnChannelError is called after returning to the message loop in case
497 // it is referenced elsewhere.
498 DVLOG(1) << "CommandBufferProxyImpl::Send failed. Losing context.";
499 last_state_.error = gpu::error::kLostContext;
500 return false;
504 // Callee takes ownership of message, regardless of whether Send is
505 // successful. See IPC::Sender.
506 delete msg;
507 return false;
510 void CommandBufferProxyImpl::OnUpdateState(
511 const gpu::CommandBuffer::State& state) {
512 // Handle wraparound. It works as long as we don't have more than 2B state
513 // updates in flight across which reordering occurs.
514 if (state.generation - last_state_.generation < 0x80000000U)
515 last_state_ = state;
518 void CommandBufferProxyImpl::SetOnConsoleMessageCallback(
519 const GpuConsoleMessageCallback& callback) {
520 console_message_callback_ = callback;
523 void CommandBufferProxyImpl::TryUpdateState() {
524 if (last_state_.error == gpu::error::kNoError)
525 shared_state()->Read(&last_state_);
528 gpu::CommandBufferSharedState* CommandBufferProxyImpl::shared_state() const {
529 return reinterpret_cast<gpu::CommandBufferSharedState*>(
530 shared_state_shm_->memory());
533 } // namespace content