cc: Added inline to Tile::IsReadyToDraw
[chromium-blink-merge.git] / ppapi / proxy / ppapi_command_buffer_proxy.cc
blob8ca17e442d1e20b7b6f2d70b25c3e6caaa8269d8
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "ppapi/proxy/ppapi_command_buffer_proxy.h"
7 #include "ppapi/proxy/ppapi_messages.h"
8 #include "ppapi/proxy/proxy_channel.h"
9 #include "ppapi/shared_impl/api_id.h"
10 #include "ppapi/shared_impl/host_resource.h"
12 namespace ppapi {
13 namespace proxy {
15 PpapiCommandBufferProxy::PpapiCommandBufferProxy(
16 const ppapi::HostResource& resource,
17 ProxyChannel* channel)
18 : resource_(resource),
19 channel_(channel) {
22 PpapiCommandBufferProxy::~PpapiCommandBufferProxy() {
23 // Delete all the locally cached shared memory objects, closing the handle
24 // in this process.
25 for (TransferBufferMap::iterator it = transfer_buffers_.begin();
26 it != transfer_buffers_.end(); ++it) {
27 delete it->second.shared_memory;
28 it->second.shared_memory = NULL;
32 void PpapiCommandBufferProxy::ReportChannelError() {
33 if (!channel_error_callback_.is_null()) {
34 channel_error_callback_.Run();
35 channel_error_callback_.Reset();
39 int PpapiCommandBufferProxy::GetRouteID() const {
40 NOTIMPLEMENTED();
41 return 0;
44 bool PpapiCommandBufferProxy::Echo(const base::Closure& callback) {
45 return false;
48 bool PpapiCommandBufferProxy::ProduceFrontBuffer(const gpu::Mailbox& mailbox) {
49 NOTIMPLEMENTED();
50 return false;
53 void PpapiCommandBufferProxy::SetChannelErrorCallback(
54 const base::Closure& callback) {
55 channel_error_callback_ = callback;
58 bool PpapiCommandBufferProxy::Initialize() {
59 return true;
62 gpu::CommandBuffer::State PpapiCommandBufferProxy::GetState() {
63 // Send will flag state with lost context if IPC fails.
64 if (last_state_.error == gpu::error::kNoError) {
65 gpu::CommandBuffer::State state;
66 bool success = false;
67 if (Send(new PpapiHostMsg_PPBGraphics3D_GetState(
68 ppapi::API_ID_PPB_GRAPHICS_3D, resource_, &state, &success))) {
69 UpdateState(state, success);
73 return last_state_;
76 gpu::CommandBuffer::State PpapiCommandBufferProxy::GetLastState() {
77 // Note: The locking command buffer wrapper does not take a global lock before
78 // calling this function.
79 return last_state_;
82 int32 PpapiCommandBufferProxy::GetLastToken() {
83 // Note: The locking command buffer wrapper does not take a global lock before
84 // calling this function.
85 return last_state_.token;
88 void PpapiCommandBufferProxy::Flush(int32 put_offset) {
89 if (last_state_.error != gpu::error::kNoError)
90 return;
92 IPC::Message* message = new PpapiHostMsg_PPBGraphics3D_AsyncFlush(
93 ppapi::API_ID_PPB_GRAPHICS_3D, resource_, put_offset);
95 // Do not let a synchronous flush hold up this message. If this handler is
96 // deferred until after the synchronous flush completes, it will overwrite the
97 // cached last_state_ with out-of-date data.
98 message->set_unblock(true);
99 Send(message);
102 gpu::CommandBuffer::State PpapiCommandBufferProxy::FlushSync(int32 put_offset,
103 int32 last_known_get) {
104 if (last_known_get == last_state_.get_offset) {
105 // Send will flag state with lost context if IPC fails.
106 if (last_state_.error == gpu::error::kNoError) {
107 gpu::CommandBuffer::State state;
108 bool success = false;
109 if (Send(new PpapiHostMsg_PPBGraphics3D_Flush(
110 ppapi::API_ID_PPB_GRAPHICS_3D, resource_, put_offset,
111 last_known_get, &state, &success))) {
112 UpdateState(state, success);
115 } else {
116 Flush(put_offset);
118 return last_state_;
121 void PpapiCommandBufferProxy::SetGetBuffer(int32 transfer_buffer_id) {
122 if (last_state_.error == gpu::error::kNoError) {
123 Send(new PpapiHostMsg_PPBGraphics3D_SetGetBuffer(
124 ppapi::API_ID_PPB_GRAPHICS_3D, resource_, transfer_buffer_id));
128 void PpapiCommandBufferProxy::SetGetOffset(int32 get_offset) {
129 // Not implemented in proxy.
130 NOTREACHED();
133 gpu::Buffer PpapiCommandBufferProxy::CreateTransferBuffer(size_t size,
134 int32* id) {
135 *id = -1;
137 if (last_state_.error != gpu::error::kNoError)
138 return gpu::Buffer();
140 if (!Send(new PpapiHostMsg_PPBGraphics3D_CreateTransferBuffer(
141 ppapi::API_ID_PPB_GRAPHICS_3D, resource_, size, id))) {
142 return gpu::Buffer();
145 if ((*id) <= 0)
146 return gpu::Buffer();
148 return GetTransferBuffer(*id);
151 void PpapiCommandBufferProxy::DestroyTransferBuffer(int32 id) {
152 if (last_state_.error != gpu::error::kNoError)
153 return;
155 // Remove the transfer buffer from the client side4 cache.
156 TransferBufferMap::iterator it = transfer_buffers_.find(id);
158 if (it != transfer_buffers_.end()) {
159 // Delete the shared memory object, closing the handle in this process.
160 delete it->second.shared_memory;
162 transfer_buffers_.erase(it);
165 Send(new PpapiHostMsg_PPBGraphics3D_DestroyTransferBuffer(
166 ppapi::API_ID_PPB_GRAPHICS_3D, resource_, id));
169 gpu::Buffer PpapiCommandBufferProxy::GetTransferBuffer(int32 id) {
170 if (last_state_.error != gpu::error::kNoError)
171 return gpu::Buffer();
173 // Check local cache to see if there is already a client side shared memory
174 // object for this id.
175 TransferBufferMap::iterator it = transfer_buffers_.find(id);
176 if (it != transfer_buffers_.end()) {
177 return it->second;
180 // Assuming we are in the renderer process, the service is responsible for
181 // duplicating the handle. This might not be true for NaCl.
182 ppapi::proxy::SerializedHandle handle(
183 ppapi::proxy::SerializedHandle::SHARED_MEMORY);
184 if (!Send(new PpapiHostMsg_PPBGraphics3D_GetTransferBuffer(
185 ppapi::API_ID_PPB_GRAPHICS_3D, resource_, id, &handle))) {
186 return gpu::Buffer();
188 if (!handle.is_shmem())
189 return gpu::Buffer();
191 // Cache the transfer buffer shared memory object client side.
192 scoped_ptr<base::SharedMemory> shared_memory(
193 new base::SharedMemory(handle.shmem(), false));
195 // Map the shared memory on demand.
196 if (!shared_memory->memory()) {
197 if (!shared_memory->Map(handle.size())) {
198 return gpu::Buffer();
202 gpu::Buffer buffer;
203 buffer.ptr = shared_memory->memory();
204 buffer.size = handle.size();
205 buffer.shared_memory = shared_memory.release();
206 transfer_buffers_[id] = buffer;
208 return buffer;
211 void PpapiCommandBufferProxy::SetToken(int32 token) {
212 NOTREACHED();
215 void PpapiCommandBufferProxy::SetParseError(gpu::error::Error error) {
216 NOTREACHED();
219 void PpapiCommandBufferProxy::SetContextLostReason(
220 gpu::error::ContextLostReason reason) {
221 NOTREACHED();
224 uint32 PpapiCommandBufferProxy::InsertSyncPoint() {
225 uint32 sync_point = 0;
226 if (last_state_.error == gpu::error::kNoError) {
227 Send(new PpapiHostMsg_PPBGraphics3D_InsertSyncPoint(
228 ppapi::API_ID_PPB_GRAPHICS_3D, resource_, &sync_point));
230 return sync_point;
233 bool PpapiCommandBufferProxy::Send(IPC::Message* msg) {
234 DCHECK(last_state_.error == gpu::error::kNoError);
236 if (channel_->Send(msg))
237 return true;
239 last_state_.error = gpu::error::kLostContext;
240 return false;
243 void PpapiCommandBufferProxy::UpdateState(
244 const gpu::CommandBuffer::State& state,
245 bool success) {
246 // Handle wraparound. It works as long as we don't have more than 2B state
247 // updates in flight across which reordering occurs.
248 if (success) {
249 if (state.generation - last_state_.generation < 0x80000000U) {
250 last_state_ = state;
252 } else {
253 last_state_.error = gpu::error::kLostContext;
254 ++last_state_.generation;
258 } // namespace proxy
259 } // namespace ppapi