1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "ppapi/proxy/ppapi_command_buffer_proxy.h"
7 #include "base/numerics/safe_conversions.h"
8 #include "ppapi/proxy/ppapi_messages.h"
9 #include "ppapi/shared_impl/api_id.h"
10 #include "ppapi/shared_impl/host_resource.h"
11 #include "ppapi/shared_impl/proxy_lock.h"
16 PpapiCommandBufferProxy::PpapiCommandBufferProxy(
17 const ppapi::HostResource
& resource
,
18 PluginDispatcher
* dispatcher
,
19 const gpu::Capabilities
& capabilities
,
20 const SerializedHandle
& shared_state
)
21 : capabilities_(capabilities
),
23 dispatcher_(dispatcher
) {
24 shared_state_shm_
.reset(
25 new base::SharedMemory(shared_state
.shmem(), false));
26 shared_state_shm_
->Map(shared_state
.size());
27 InstanceData
* data
= dispatcher
->GetInstanceData(resource
.instance());
28 flush_info_
= &data
->flush_info_
;
31 PpapiCommandBufferProxy::~PpapiCommandBufferProxy() {
32 // gpu::Buffers are no longer referenced, allowing shared memory objects to be
33 // deleted, closing the handle in this process.
36 bool PpapiCommandBufferProxy::Initialize() {
40 gpu::CommandBuffer::State
PpapiCommandBufferProxy::GetLastState() {
41 ppapi::ProxyLock::AssertAcquiredDebugOnly();
45 int32
PpapiCommandBufferProxy::GetLastToken() {
46 ppapi::ProxyLock::AssertAcquiredDebugOnly();
48 return last_state_
.token
;
51 void PpapiCommandBufferProxy::Flush(int32 put_offset
) {
52 if (last_state_
.error
!= gpu::error::kNoError
)
55 OrderingBarrier(put_offset
);
59 void PpapiCommandBufferProxy::OrderingBarrier(int32 put_offset
) {
60 if (last_state_
.error
!= gpu::error::kNoError
)
63 if (flush_info_
->flush_pending
&& flush_info_
->resource
!= resource_
)
66 flush_info_
->flush_pending
= true;
67 flush_info_
->resource
= resource_
;
68 flush_info_
->put_offset
= put_offset
;
71 void PpapiCommandBufferProxy::WaitForTokenInRange(int32 start
, int32 end
) {
73 if (!InRange(start
, end
, last_state_
.token
) &&
74 last_state_
.error
== gpu::error::kNoError
) {
76 gpu::CommandBuffer::State state
;
77 if (Send(new PpapiHostMsg_PPBGraphics3D_WaitForTokenInRange(
78 ppapi::API_ID_PPB_GRAPHICS_3D
,
84 UpdateState(state
, success
);
86 DCHECK(InRange(start
, end
, last_state_
.token
) ||
87 last_state_
.error
!= gpu::error::kNoError
);
90 void PpapiCommandBufferProxy::WaitForGetOffsetInRange(int32 start
, int32 end
) {
92 if (!InRange(start
, end
, last_state_
.get_offset
) &&
93 last_state_
.error
== gpu::error::kNoError
) {
95 gpu::CommandBuffer::State state
;
96 if (Send(new PpapiHostMsg_PPBGraphics3D_WaitForGetOffsetInRange(
97 ppapi::API_ID_PPB_GRAPHICS_3D
,
103 UpdateState(state
, success
);
105 DCHECK(InRange(start
, end
, last_state_
.get_offset
) ||
106 last_state_
.error
!= gpu::error::kNoError
);
109 void PpapiCommandBufferProxy::SetGetBuffer(int32 transfer_buffer_id
) {
110 if (last_state_
.error
== gpu::error::kNoError
) {
111 Send(new PpapiHostMsg_PPBGraphics3D_SetGetBuffer(
112 ppapi::API_ID_PPB_GRAPHICS_3D
, resource_
, transfer_buffer_id
));
116 scoped_refptr
<gpu::Buffer
> PpapiCommandBufferProxy::CreateTransferBuffer(
121 if (last_state_
.error
!= gpu::error::kNoError
)
124 // Assuming we are in the renderer process, the service is responsible for
125 // duplicating the handle. This might not be true for NaCl.
126 ppapi::proxy::SerializedHandle
handle(
127 ppapi::proxy::SerializedHandle::SHARED_MEMORY
);
128 if (!Send(new PpapiHostMsg_PPBGraphics3D_CreateTransferBuffer(
129 ppapi::API_ID_PPB_GRAPHICS_3D
, resource_
,
130 base::checked_cast
<uint32_t>(size
), id
, &handle
))) {
134 if (*id
<= 0 || !handle
.is_shmem())
137 scoped_ptr
<base::SharedMemory
> shared_memory(
138 new base::SharedMemory(handle
.shmem(), false));
140 // Map the shared memory on demand.
141 if (!shared_memory
->memory()) {
142 if (!shared_memory
->Map(handle
.size())) {
148 return gpu::MakeBufferFromSharedMemory(shared_memory
.Pass(), handle
.size());
151 void PpapiCommandBufferProxy::DestroyTransferBuffer(int32 id
) {
152 if (last_state_
.error
!= gpu::error::kNoError
)
155 Send(new PpapiHostMsg_PPBGraphics3D_DestroyTransferBuffer(
156 ppapi::API_ID_PPB_GRAPHICS_3D
, resource_
, id
));
159 uint32
PpapiCommandBufferProxy::CreateStreamTexture(uint32 texture_id
) {
164 void PpapiCommandBufferProxy::SetLock(base::Lock
*) {
168 bool PpapiCommandBufferProxy::IsGpuChannelLost() {
173 uint32
PpapiCommandBufferProxy::InsertSyncPoint() {
174 uint32 sync_point
= 0;
175 if (last_state_
.error
== gpu::error::kNoError
) {
176 Send(new PpapiHostMsg_PPBGraphics3D_InsertSyncPoint(
177 ppapi::API_ID_PPB_GRAPHICS_3D
, resource_
, &sync_point
));
182 uint32
PpapiCommandBufferProxy::InsertFutureSyncPoint() {
183 uint32 sync_point
= 0;
184 if (last_state_
.error
== gpu::error::kNoError
) {
185 Send(new PpapiHostMsg_PPBGraphics3D_InsertFutureSyncPoint(
186 ppapi::API_ID_PPB_GRAPHICS_3D
, resource_
, &sync_point
));
191 void PpapiCommandBufferProxy::RetireSyncPoint(uint32 sync_point
) {
192 if (last_state_
.error
== gpu::error::kNoError
) {
193 Send(new PpapiHostMsg_PPBGraphics3D_RetireSyncPoint(
194 ppapi::API_ID_PPB_GRAPHICS_3D
, resource_
, sync_point
));
198 void PpapiCommandBufferProxy::SignalSyncPoint(uint32 sync_point
,
199 const base::Closure
& callback
) {
203 void PpapiCommandBufferProxy::SignalQuery(uint32 query
,
204 const base::Closure
& callback
) {
208 void PpapiCommandBufferProxy::SetSurfaceVisible(bool visible
) {
212 gpu::Capabilities
PpapiCommandBufferProxy::GetCapabilities() {
213 return capabilities_
;
216 int32
PpapiCommandBufferProxy::CreateImage(ClientBuffer buffer
,
219 unsigned internalformat
) {
224 void PpapiCommandBufferProxy::DestroyImage(int32 id
) {
228 int32
PpapiCommandBufferProxy::CreateGpuMemoryBufferImage(
231 unsigned internalformat
,
237 bool PpapiCommandBufferProxy::Send(IPC::Message
* msg
) {
238 DCHECK(last_state_
.error
== gpu::error::kNoError
);
240 // We need to hold the Pepper proxy lock for sync IPC, because the GPU command
241 // buffer may use a sync IPC with another lock held which could lead to lock
242 // and deadlock if we dropped the proxy lock here.
243 // http://crbug.com/418651
244 if (dispatcher_
->SendAndStayLocked(msg
))
247 last_state_
.error
= gpu::error::kLostContext
;
251 void PpapiCommandBufferProxy::UpdateState(
252 const gpu::CommandBuffer::State
& state
,
254 // Handle wraparound. It works as long as we don't have more than 2B state
255 // updates in flight across which reordering occurs.
257 if (state
.generation
- last_state_
.generation
< 0x80000000U
) {
261 last_state_
.error
= gpu::error::kLostContext
;
262 ++last_state_
.generation
;
266 void PpapiCommandBufferProxy::TryUpdateState() {
267 if (last_state_
.error
== gpu::error::kNoError
)
268 shared_state()->Read(&last_state_
);
271 gpu::CommandBufferSharedState
* PpapiCommandBufferProxy::shared_state() const {
272 return reinterpret_cast<gpu::CommandBufferSharedState
*>(
273 shared_state_shm_
->memory());
276 void PpapiCommandBufferProxy::FlushInternal() {
277 DCHECK(last_state_
.error
== gpu::error::kNoError
);
279 DCHECK(flush_info_
->flush_pending
);
281 IPC::Message
* message
= new PpapiHostMsg_PPBGraphics3D_AsyncFlush(
282 ppapi::API_ID_PPB_GRAPHICS_3D
, flush_info_
->resource
,
283 flush_info_
->put_offset
);
285 // Do not let a synchronous flush hold up this message. If this handler is
286 // deferred until after the synchronous flush completes, it will overwrite the
287 // cached last_state_ with out-of-date data.
288 message
->set_unblock(true);
291 flush_info_
->flush_pending
= false;
292 flush_info_
->resource
.SetHostResource(0, 0);