1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "ppapi/proxy/ppapi_command_buffer_proxy.h"
7 #include "base/numerics/safe_conversions.h"
8 #include "ppapi/proxy/ppapi_messages.h"
9 #include "ppapi/proxy/proxy_channel.h"
10 #include "ppapi/shared_impl/api_id.h"
11 #include "ppapi/shared_impl/host_resource.h"
12 #include "ppapi/shared_impl/proxy_lock.h"
17 PpapiCommandBufferProxy::PpapiCommandBufferProxy(
18 const ppapi::HostResource
& resource
,
19 ProxyChannel
* channel
,
20 const gpu::Capabilities
& capabilities
,
21 const SerializedHandle
& shared_state
)
22 : capabilities_(capabilities
),
25 shared_state_shm_
.reset(
26 new base::SharedMemory(shared_state
.shmem(), false));
27 shared_state_shm_
->Map(shared_state
.size());
30 PpapiCommandBufferProxy::~PpapiCommandBufferProxy() {
31 // gpu::Buffers are no longer referenced, allowing shared memory objects to be
32 // deleted, closing the handle in this process.
35 bool PpapiCommandBufferProxy::Initialize() {
39 gpu::CommandBuffer::State
PpapiCommandBufferProxy::GetLastState() {
40 ppapi::ProxyLock::AssertAcquiredDebugOnly();
44 int32
PpapiCommandBufferProxy::GetLastToken() {
45 ppapi::ProxyLock::AssertAcquiredDebugOnly();
47 return last_state_
.token
;
50 void PpapiCommandBufferProxy::Flush(int32 put_offset
) {
51 if (last_state_
.error
!= gpu::error::kNoError
)
54 IPC::Message
* message
= new PpapiHostMsg_PPBGraphics3D_AsyncFlush(
55 ppapi::API_ID_PPB_GRAPHICS_3D
, resource_
, put_offset
);
57 // Do not let a synchronous flush hold up this message. If this handler is
58 // deferred until after the synchronous flush completes, it will overwrite the
59 // cached last_state_ with out-of-date data.
60 message
->set_unblock(true);
64 void PpapiCommandBufferProxy::OrderingBarrier(int32 put_offset
) {
68 void PpapiCommandBufferProxy::WaitForTokenInRange(int32 start
, int32 end
) {
70 if (!InRange(start
, end
, last_state_
.token
) &&
71 last_state_
.error
== gpu::error::kNoError
) {
73 gpu::CommandBuffer::State state
;
74 if (Send(new PpapiHostMsg_PPBGraphics3D_WaitForTokenInRange(
75 ppapi::API_ID_PPB_GRAPHICS_3D
,
81 UpdateState(state
, success
);
83 DCHECK(InRange(start
, end
, last_state_
.token
) ||
84 last_state_
.error
!= gpu::error::kNoError
);
87 void PpapiCommandBufferProxy::WaitForGetOffsetInRange(int32 start
, int32 end
) {
89 if (!InRange(start
, end
, last_state_
.get_offset
) &&
90 last_state_
.error
== gpu::error::kNoError
) {
92 gpu::CommandBuffer::State state
;
93 if (Send(new PpapiHostMsg_PPBGraphics3D_WaitForGetOffsetInRange(
94 ppapi::API_ID_PPB_GRAPHICS_3D
,
100 UpdateState(state
, success
);
102 DCHECK(InRange(start
, end
, last_state_
.get_offset
) ||
103 last_state_
.error
!= gpu::error::kNoError
);
106 void PpapiCommandBufferProxy::SetGetBuffer(int32 transfer_buffer_id
) {
107 if (last_state_
.error
== gpu::error::kNoError
) {
108 Send(new PpapiHostMsg_PPBGraphics3D_SetGetBuffer(
109 ppapi::API_ID_PPB_GRAPHICS_3D
, resource_
, transfer_buffer_id
));
113 scoped_refptr
<gpu::Buffer
> PpapiCommandBufferProxy::CreateTransferBuffer(
118 if (last_state_
.error
!= gpu::error::kNoError
)
121 // Assuming we are in the renderer process, the service is responsible for
122 // duplicating the handle. This might not be true for NaCl.
123 ppapi::proxy::SerializedHandle
handle(
124 ppapi::proxy::SerializedHandle::SHARED_MEMORY
);
125 if (!Send(new PpapiHostMsg_PPBGraphics3D_CreateTransferBuffer(
126 ppapi::API_ID_PPB_GRAPHICS_3D
, resource_
,
127 base::checked_cast
<uint32_t>(size
), id
, &handle
))) {
131 if (*id
<= 0 || !handle
.is_shmem())
134 scoped_ptr
<base::SharedMemory
> shared_memory(
135 new base::SharedMemory(handle
.shmem(), false));
137 // Map the shared memory on demand.
138 if (!shared_memory
->memory()) {
139 if (!shared_memory
->Map(handle
.size())) {
145 return gpu::MakeBufferFromSharedMemory(shared_memory
.Pass(), handle
.size());
148 void PpapiCommandBufferProxy::DestroyTransferBuffer(int32 id
) {
149 if (last_state_
.error
!= gpu::error::kNoError
)
152 Send(new PpapiHostMsg_PPBGraphics3D_DestroyTransferBuffer(
153 ppapi::API_ID_PPB_GRAPHICS_3D
, resource_
, id
));
156 uint32
PpapiCommandBufferProxy::CreateStreamTexture(uint32 texture_id
) {
161 void PpapiCommandBufferProxy::SetLock(base::Lock
*) {
165 uint32
PpapiCommandBufferProxy::InsertSyncPoint() {
166 uint32 sync_point
= 0;
167 if (last_state_
.error
== gpu::error::kNoError
) {
168 Send(new PpapiHostMsg_PPBGraphics3D_InsertSyncPoint(
169 ppapi::API_ID_PPB_GRAPHICS_3D
, resource_
, &sync_point
));
174 uint32
PpapiCommandBufferProxy::InsertFutureSyncPoint() {
175 uint32 sync_point
= 0;
176 if (last_state_
.error
== gpu::error::kNoError
) {
177 Send(new PpapiHostMsg_PPBGraphics3D_InsertFutureSyncPoint(
178 ppapi::API_ID_PPB_GRAPHICS_3D
, resource_
, &sync_point
));
183 void PpapiCommandBufferProxy::RetireSyncPoint(uint32 sync_point
) {
184 if (last_state_
.error
== gpu::error::kNoError
) {
185 Send(new PpapiHostMsg_PPBGraphics3D_RetireSyncPoint(
186 ppapi::API_ID_PPB_GRAPHICS_3D
, resource_
, sync_point
));
190 void PpapiCommandBufferProxy::SignalSyncPoint(uint32 sync_point
,
191 const base::Closure
& callback
) {
195 void PpapiCommandBufferProxy::SignalQuery(uint32 query
,
196 const base::Closure
& callback
) {
200 void PpapiCommandBufferProxy::SetSurfaceVisible(bool visible
) {
204 gpu::Capabilities
PpapiCommandBufferProxy::GetCapabilities() {
205 return capabilities_
;
208 int32
PpapiCommandBufferProxy::CreateImage(ClientBuffer buffer
,
211 unsigned internalformat
) {
216 void PpapiCommandBufferProxy::DestroyImage(int32 id
) {
220 int32
PpapiCommandBufferProxy::CreateGpuMemoryBufferImage(
223 unsigned internalformat
,
229 bool PpapiCommandBufferProxy::Send(IPC::Message
* msg
) {
230 DCHECK(last_state_
.error
== gpu::error::kNoError
);
232 if (channel_
->Send(msg
))
235 last_state_
.error
= gpu::error::kLostContext
;
239 void PpapiCommandBufferProxy::UpdateState(
240 const gpu::CommandBuffer::State
& state
,
242 // Handle wraparound. It works as long as we don't have more than 2B state
243 // updates in flight across which reordering occurs.
245 if (state
.generation
- last_state_
.generation
< 0x80000000U
) {
249 last_state_
.error
= gpu::error::kLostContext
;
250 ++last_state_
.generation
;
254 void PpapiCommandBufferProxy::TryUpdateState() {
255 if (last_state_
.error
== gpu::error::kNoError
)
256 shared_state()->Read(&last_state_
);
259 gpu::CommandBufferSharedState
* PpapiCommandBufferProxy::shared_state() const {
260 return reinterpret_cast<gpu::CommandBufferSharedState
*>(
261 shared_state_shm_
->memory());