Don't announce clipboard events if the event target is not visible and focused.
[chromium-blink-merge.git] / ppapi / proxy / ppapi_command_buffer_proxy.cc
blob644e79e4311f515572d05b15c3334a9084659b7d
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "ppapi/proxy/ppapi_command_buffer_proxy.h"
7 #include "base/numerics/safe_conversions.h"
8 #include "ppapi/proxy/ppapi_messages.h"
9 #include "ppapi/proxy/proxy_channel.h"
10 #include "ppapi/shared_impl/api_id.h"
11 #include "ppapi/shared_impl/host_resource.h"
12 #include "ppapi/shared_impl/proxy_lock.h"
14 namespace ppapi {
15 namespace proxy {
17 PpapiCommandBufferProxy::PpapiCommandBufferProxy(
18 const ppapi::HostResource& resource,
19 ProxyChannel* channel,
20 const gpu::Capabilities& capabilities,
21 const SerializedHandle& shared_state)
22 : capabilities_(capabilities),
23 resource_(resource),
24 channel_(channel) {
25 shared_state_shm_.reset(
26 new base::SharedMemory(shared_state.shmem(), false));
27 shared_state_shm_->Map(shared_state.size());
30 PpapiCommandBufferProxy::~PpapiCommandBufferProxy() {
31 // gpu::Buffers are no longer referenced, allowing shared memory objects to be
32 // deleted, closing the handle in this process.
35 bool PpapiCommandBufferProxy::Initialize() {
36 return true;
39 gpu::CommandBuffer::State PpapiCommandBufferProxy::GetLastState() {
40 ppapi::ProxyLock::AssertAcquiredDebugOnly();
41 return last_state_;
44 int32 PpapiCommandBufferProxy::GetLastToken() {
45 ppapi::ProxyLock::AssertAcquiredDebugOnly();
46 TryUpdateState();
47 return last_state_.token;
50 void PpapiCommandBufferProxy::Flush(int32 put_offset) {
51 if (last_state_.error != gpu::error::kNoError)
52 return;
54 IPC::Message* message = new PpapiHostMsg_PPBGraphics3D_AsyncFlush(
55 ppapi::API_ID_PPB_GRAPHICS_3D, resource_, put_offset);
57 // Do not let a synchronous flush hold up this message. If this handler is
58 // deferred until after the synchronous flush completes, it will overwrite the
59 // cached last_state_ with out-of-date data.
60 message->set_unblock(true);
61 Send(message);
64 void PpapiCommandBufferProxy::OrderingBarrier(int32 put_offset) {
65 Flush(put_offset);
68 void PpapiCommandBufferProxy::WaitForTokenInRange(int32 start, int32 end) {
69 TryUpdateState();
70 if (!InRange(start, end, last_state_.token) &&
71 last_state_.error == gpu::error::kNoError) {
72 bool success = false;
73 gpu::CommandBuffer::State state;
74 if (Send(new PpapiHostMsg_PPBGraphics3D_WaitForTokenInRange(
75 ppapi::API_ID_PPB_GRAPHICS_3D,
76 resource_,
77 start,
78 end,
79 &state,
80 &success)))
81 UpdateState(state, success);
83 DCHECK(InRange(start, end, last_state_.token) ||
84 last_state_.error != gpu::error::kNoError);
87 void PpapiCommandBufferProxy::WaitForGetOffsetInRange(int32 start, int32 end) {
88 TryUpdateState();
89 if (!InRange(start, end, last_state_.get_offset) &&
90 last_state_.error == gpu::error::kNoError) {
91 bool success = false;
92 gpu::CommandBuffer::State state;
93 if (Send(new PpapiHostMsg_PPBGraphics3D_WaitForGetOffsetInRange(
94 ppapi::API_ID_PPB_GRAPHICS_3D,
95 resource_,
96 start,
97 end,
98 &state,
99 &success)))
100 UpdateState(state, success);
102 DCHECK(InRange(start, end, last_state_.get_offset) ||
103 last_state_.error != gpu::error::kNoError);
106 void PpapiCommandBufferProxy::SetGetBuffer(int32 transfer_buffer_id) {
107 if (last_state_.error == gpu::error::kNoError) {
108 Send(new PpapiHostMsg_PPBGraphics3D_SetGetBuffer(
109 ppapi::API_ID_PPB_GRAPHICS_3D, resource_, transfer_buffer_id));
113 scoped_refptr<gpu::Buffer> PpapiCommandBufferProxy::CreateTransferBuffer(
114 size_t size,
115 int32* id) {
116 *id = -1;
118 if (last_state_.error != gpu::error::kNoError)
119 return NULL;
121 // Assuming we are in the renderer process, the service is responsible for
122 // duplicating the handle. This might not be true for NaCl.
123 ppapi::proxy::SerializedHandle handle(
124 ppapi::proxy::SerializedHandle::SHARED_MEMORY);
125 if (!Send(new PpapiHostMsg_PPBGraphics3D_CreateTransferBuffer(
126 ppapi::API_ID_PPB_GRAPHICS_3D, resource_,
127 base::checked_cast<uint32_t>(size), id, &handle))) {
128 return NULL;
131 if (*id <= 0 || !handle.is_shmem())
132 return NULL;
134 scoped_ptr<base::SharedMemory> shared_memory(
135 new base::SharedMemory(handle.shmem(), false));
137 // Map the shared memory on demand.
138 if (!shared_memory->memory()) {
139 if (!shared_memory->Map(handle.size())) {
140 *id = -1;
141 return NULL;
145 return gpu::MakeBufferFromSharedMemory(shared_memory.Pass(), handle.size());
148 void PpapiCommandBufferProxy::DestroyTransferBuffer(int32 id) {
149 if (last_state_.error != gpu::error::kNoError)
150 return;
152 Send(new PpapiHostMsg_PPBGraphics3D_DestroyTransferBuffer(
153 ppapi::API_ID_PPB_GRAPHICS_3D, resource_, id));
156 uint32 PpapiCommandBufferProxy::CreateStreamTexture(uint32 texture_id) {
157 NOTREACHED();
158 return 0;
161 void PpapiCommandBufferProxy::SetLock(base::Lock*) {
162 NOTIMPLEMENTED();
165 uint32 PpapiCommandBufferProxy::InsertSyncPoint() {
166 uint32 sync_point = 0;
167 if (last_state_.error == gpu::error::kNoError) {
168 Send(new PpapiHostMsg_PPBGraphics3D_InsertSyncPoint(
169 ppapi::API_ID_PPB_GRAPHICS_3D, resource_, &sync_point));
171 return sync_point;
174 uint32 PpapiCommandBufferProxy::InsertFutureSyncPoint() {
175 uint32 sync_point = 0;
176 if (last_state_.error == gpu::error::kNoError) {
177 Send(new PpapiHostMsg_PPBGraphics3D_InsertFutureSyncPoint(
178 ppapi::API_ID_PPB_GRAPHICS_3D, resource_, &sync_point));
180 return sync_point;
183 void PpapiCommandBufferProxy::RetireSyncPoint(uint32 sync_point) {
184 if (last_state_.error == gpu::error::kNoError) {
185 Send(new PpapiHostMsg_PPBGraphics3D_RetireSyncPoint(
186 ppapi::API_ID_PPB_GRAPHICS_3D, resource_, sync_point));
190 void PpapiCommandBufferProxy::SignalSyncPoint(uint32 sync_point,
191 const base::Closure& callback) {
192 NOTREACHED();
195 void PpapiCommandBufferProxy::SignalQuery(uint32 query,
196 const base::Closure& callback) {
197 NOTREACHED();
200 void PpapiCommandBufferProxy::SetSurfaceVisible(bool visible) {
201 NOTREACHED();
204 gpu::Capabilities PpapiCommandBufferProxy::GetCapabilities() {
205 return capabilities_;
208 int32 PpapiCommandBufferProxy::CreateImage(ClientBuffer buffer,
209 size_t width,
210 size_t height,
211 unsigned internalformat) {
212 NOTREACHED();
213 return -1;
216 void PpapiCommandBufferProxy::DestroyImage(int32 id) {
217 NOTREACHED();
220 int32 PpapiCommandBufferProxy::CreateGpuMemoryBufferImage(
221 size_t width,
222 size_t height,
223 unsigned internalformat,
224 unsigned usage) {
225 NOTREACHED();
226 return -1;
229 bool PpapiCommandBufferProxy::Send(IPC::Message* msg) {
230 DCHECK(last_state_.error == gpu::error::kNoError);
232 if (channel_->Send(msg))
233 return true;
235 last_state_.error = gpu::error::kLostContext;
236 return false;
239 void PpapiCommandBufferProxy::UpdateState(
240 const gpu::CommandBuffer::State& state,
241 bool success) {
242 // Handle wraparound. It works as long as we don't have more than 2B state
243 // updates in flight across which reordering occurs.
244 if (success) {
245 if (state.generation - last_state_.generation < 0x80000000U) {
246 last_state_ = state;
248 } else {
249 last_state_.error = gpu::error::kLostContext;
250 ++last_state_.generation;
254 void PpapiCommandBufferProxy::TryUpdateState() {
255 if (last_state_.error == gpu::error::kNoError)
256 shared_state()->Read(&last_state_);
259 gpu::CommandBufferSharedState* PpapiCommandBufferProxy::shared_state() const {
260 return reinterpret_cast<gpu::CommandBufferSharedState*>(
261 shared_state_shm_->memory());
264 } // namespace proxy
265 } // namespace ppapi