1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "ppapi/proxy/ppapi_command_buffer_proxy.h"
7 #include "base/numerics/safe_conversions.h"
8 #include "ppapi/proxy/ppapi_messages.h"
9 #include "ppapi/shared_impl/api_id.h"
10 #include "ppapi/shared_impl/host_resource.h"
11 #include "ppapi/shared_impl/proxy_lock.h"
16 PpapiCommandBufferProxy::PpapiCommandBufferProxy(
17 const ppapi::HostResource
& resource
,
18 PluginDispatcher
* dispatcher
,
19 const gpu::Capabilities
& capabilities
,
20 const SerializedHandle
& shared_state
,
21 uint64_t command_buffer_id
)
22 : command_buffer_id_(command_buffer_id
),
23 capabilities_(capabilities
),
25 dispatcher_(dispatcher
) {
26 shared_state_shm_
.reset(
27 new base::SharedMemory(shared_state
.shmem(), false));
28 shared_state_shm_
->Map(shared_state
.size());
29 InstanceData
* data
= dispatcher
->GetInstanceData(resource
.instance());
30 flush_info_
= &data
->flush_info_
;
33 PpapiCommandBufferProxy::~PpapiCommandBufferProxy() {
34 // gpu::Buffers are no longer referenced, allowing shared memory objects to be
35 // deleted, closing the handle in this process.
38 bool PpapiCommandBufferProxy::Initialize() {
42 gpu::CommandBuffer::State
PpapiCommandBufferProxy::GetLastState() {
43 ppapi::ProxyLock::AssertAcquiredDebugOnly();
47 int32
PpapiCommandBufferProxy::GetLastToken() {
48 ppapi::ProxyLock::AssertAcquiredDebugOnly();
50 return last_state_
.token
;
53 void PpapiCommandBufferProxy::Flush(int32 put_offset
) {
54 if (last_state_
.error
!= gpu::error::kNoError
)
57 OrderingBarrier(put_offset
);
61 void PpapiCommandBufferProxy::OrderingBarrier(int32 put_offset
) {
62 if (last_state_
.error
!= gpu::error::kNoError
)
65 if (flush_info_
->flush_pending
&& flush_info_
->resource
!= resource_
)
68 flush_info_
->flush_pending
= true;
69 flush_info_
->resource
= resource_
;
70 flush_info_
->put_offset
= put_offset
;
73 void PpapiCommandBufferProxy::WaitForTokenInRange(int32 start
, int32 end
) {
75 if (!InRange(start
, end
, last_state_
.token
) &&
76 last_state_
.error
== gpu::error::kNoError
) {
78 gpu::CommandBuffer::State state
;
79 if (Send(new PpapiHostMsg_PPBGraphics3D_WaitForTokenInRange(
80 ppapi::API_ID_PPB_GRAPHICS_3D
,
86 UpdateState(state
, success
);
88 DCHECK(InRange(start
, end
, last_state_
.token
) ||
89 last_state_
.error
!= gpu::error::kNoError
);
92 void PpapiCommandBufferProxy::WaitForGetOffsetInRange(int32 start
, int32 end
) {
94 if (!InRange(start
, end
, last_state_
.get_offset
) &&
95 last_state_
.error
== gpu::error::kNoError
) {
97 gpu::CommandBuffer::State state
;
98 if (Send(new PpapiHostMsg_PPBGraphics3D_WaitForGetOffsetInRange(
99 ppapi::API_ID_PPB_GRAPHICS_3D
,
105 UpdateState(state
, success
);
107 DCHECK(InRange(start
, end
, last_state_
.get_offset
) ||
108 last_state_
.error
!= gpu::error::kNoError
);
111 void PpapiCommandBufferProxy::SetGetBuffer(int32 transfer_buffer_id
) {
112 if (last_state_
.error
== gpu::error::kNoError
) {
113 Send(new PpapiHostMsg_PPBGraphics3D_SetGetBuffer(
114 ppapi::API_ID_PPB_GRAPHICS_3D
, resource_
, transfer_buffer_id
));
118 scoped_refptr
<gpu::Buffer
> PpapiCommandBufferProxy::CreateTransferBuffer(
123 if (last_state_
.error
!= gpu::error::kNoError
)
126 // Assuming we are in the renderer process, the service is responsible for
127 // duplicating the handle. This might not be true for NaCl.
128 ppapi::proxy::SerializedHandle
handle(
129 ppapi::proxy::SerializedHandle::SHARED_MEMORY
);
130 if (!Send(new PpapiHostMsg_PPBGraphics3D_CreateTransferBuffer(
131 ppapi::API_ID_PPB_GRAPHICS_3D
, resource_
,
132 base::checked_cast
<uint32_t>(size
), id
, &handle
))) {
133 if (last_state_
.error
== gpu::error::kNoError
)
134 last_state_
.error
= gpu::error::kLostContext
;
138 if (*id
<= 0 || !handle
.is_shmem()) {
139 if (last_state_
.error
== gpu::error::kNoError
)
140 last_state_
.error
= gpu::error::kOutOfBounds
;
144 scoped_ptr
<base::SharedMemory
> shared_memory(
145 new base::SharedMemory(handle
.shmem(), false));
147 // Map the shared memory on demand.
148 if (!shared_memory
->memory()) {
149 if (!shared_memory
->Map(handle
.size())) {
150 if (last_state_
.error
== gpu::error::kNoError
)
151 last_state_
.error
= gpu::error::kOutOfBounds
;
157 return gpu::MakeBufferFromSharedMemory(shared_memory
.Pass(), handle
.size());
160 void PpapiCommandBufferProxy::DestroyTransferBuffer(int32 id
) {
161 if (last_state_
.error
!= gpu::error::kNoError
)
164 Send(new PpapiHostMsg_PPBGraphics3D_DestroyTransferBuffer(
165 ppapi::API_ID_PPB_GRAPHICS_3D
, resource_
, id
));
168 uint32
PpapiCommandBufferProxy::CreateStreamTexture(uint32 texture_id
) {
173 void PpapiCommandBufferProxy::SetLock(base::Lock
*) {
177 bool PpapiCommandBufferProxy::IsGpuChannelLost() {
182 gpu::CommandBufferNamespace
PpapiCommandBufferProxy::GetNamespaceID() const {
183 return gpu::CommandBufferNamespace::GPU_IO
;
186 uint64_t PpapiCommandBufferProxy::GetCommandBufferID() const {
187 return command_buffer_id_
;
190 uint32
PpapiCommandBufferProxy::InsertSyncPoint() {
191 uint32 sync_point
= 0;
192 if (last_state_
.error
== gpu::error::kNoError
) {
193 Send(new PpapiHostMsg_PPBGraphics3D_InsertSyncPoint(
194 ppapi::API_ID_PPB_GRAPHICS_3D
, resource_
, &sync_point
));
199 uint32
PpapiCommandBufferProxy::InsertFutureSyncPoint() {
200 uint32 sync_point
= 0;
201 if (last_state_
.error
== gpu::error::kNoError
) {
202 Send(new PpapiHostMsg_PPBGraphics3D_InsertFutureSyncPoint(
203 ppapi::API_ID_PPB_GRAPHICS_3D
, resource_
, &sync_point
));
208 void PpapiCommandBufferProxy::RetireSyncPoint(uint32 sync_point
) {
209 if (last_state_
.error
== gpu::error::kNoError
) {
210 Send(new PpapiHostMsg_PPBGraphics3D_RetireSyncPoint(
211 ppapi::API_ID_PPB_GRAPHICS_3D
, resource_
, sync_point
));
215 void PpapiCommandBufferProxy::SignalSyncPoint(uint32 sync_point
,
216 const base::Closure
& callback
) {
220 void PpapiCommandBufferProxy::SignalQuery(uint32 query
,
221 const base::Closure
& callback
) {
225 void PpapiCommandBufferProxy::SetSurfaceVisible(bool visible
) {
229 gpu::Capabilities
PpapiCommandBufferProxy::GetCapabilities() {
230 return capabilities_
;
233 int32
PpapiCommandBufferProxy::CreateImage(ClientBuffer buffer
,
236 unsigned internalformat
) {
241 void PpapiCommandBufferProxy::DestroyImage(int32 id
) {
245 int32
PpapiCommandBufferProxy::CreateGpuMemoryBufferImage(
248 unsigned internalformat
,
254 bool PpapiCommandBufferProxy::Send(IPC::Message
* msg
) {
255 DCHECK(last_state_
.error
== gpu::error::kNoError
);
257 // We need to hold the Pepper proxy lock for sync IPC, because the GPU command
258 // buffer may use a sync IPC with another lock held which could lead to lock
259 // and deadlock if we dropped the proxy lock here.
260 // http://crbug.com/418651
261 if (dispatcher_
->SendAndStayLocked(msg
))
264 last_state_
.error
= gpu::error::kLostContext
;
268 void PpapiCommandBufferProxy::UpdateState(
269 const gpu::CommandBuffer::State
& state
,
271 // Handle wraparound. It works as long as we don't have more than 2B state
272 // updates in flight across which reordering occurs.
274 if (state
.generation
- last_state_
.generation
< 0x80000000U
) {
278 last_state_
.error
= gpu::error::kLostContext
;
279 ++last_state_
.generation
;
283 void PpapiCommandBufferProxy::TryUpdateState() {
284 if (last_state_
.error
== gpu::error::kNoError
)
285 shared_state()->Read(&last_state_
);
288 gpu::CommandBufferSharedState
* PpapiCommandBufferProxy::shared_state() const {
289 return reinterpret_cast<gpu::CommandBufferSharedState
*>(
290 shared_state_shm_
->memory());
293 void PpapiCommandBufferProxy::FlushInternal() {
294 DCHECK(last_state_
.error
== gpu::error::kNoError
);
296 DCHECK(flush_info_
->flush_pending
);
298 IPC::Message
* message
= new PpapiHostMsg_PPBGraphics3D_AsyncFlush(
299 ppapi::API_ID_PPB_GRAPHICS_3D
, flush_info_
->resource
,
300 flush_info_
->put_offset
);
302 // Do not let a synchronous flush hold up this message. If this handler is
303 // deferred until after the synchronous flush completes, it will overwrite the
304 // cached last_state_ with out-of-date data.
305 message
->set_unblock(true);
308 flush_info_
->flush_pending
= false;
309 flush_info_
->resource
.SetHostResource(0, 0);