Don't show supervised user as "already on this device" while they're being imported.
[chromium-blink-merge.git] / gpu / command_buffer / service / async_pixel_transfer_manager_share_group.cc
blob3c65f7210b0d5eebf9b94e305a2eb927eb1d72bd
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "gpu/command_buffer/service/async_pixel_transfer_manager_share_group.h"
7 #include <list>
9 #include "base/bind.h"
10 #include "base/lazy_instance.h"
11 #include "base/location.h"
12 #include "base/logging.h"
13 #include "base/memory/ref_counted.h"
14 #include "base/memory/weak_ptr.h"
15 #include "base/single_thread_task_runner.h"
16 #include "base/synchronization/cancellation_flag.h"
17 #include "base/synchronization/lock.h"
18 #include "base/synchronization/waitable_event.h"
19 #include "base/threading/thread.h"
20 #include "base/threading/thread_checker.h"
21 #include "base/trace_event/trace_event.h"
22 #include "base/trace_event/trace_event_synthetic_delay.h"
23 #include "gpu/command_buffer/service/async_pixel_transfer_delegate.h"
24 #include "ui/gl/gl_bindings.h"
25 #include "ui/gl/gl_context.h"
26 #include "ui/gl/gl_surface.h"
27 #include "ui/gl/gpu_preference.h"
28 #include "ui/gl/scoped_binders.h"
30 namespace gpu {
32 namespace {
34 const char kAsyncTransferThreadName[] = "AsyncTransferThread";
36 void PerformNotifyCompletion(
37 AsyncMemoryParams mem_params,
38 scoped_refptr<AsyncPixelTransferCompletionObserver> observer) {
39 TRACE_EVENT0("gpu", "PerformNotifyCompletion");
40 observer->DidComplete(mem_params);
43 // TODO(backer): Factor out common thread scheduling logic from the EGL and
44 // ShareGroup implementations. http://crbug.com/239889
45 class TransferThread : public base::Thread {
46 public:
47 TransferThread()
48 : base::Thread(kAsyncTransferThreadName),
49 initialized_(false) {
50 Start();
51 #if defined(OS_ANDROID) || defined(OS_LINUX)
52 SetPriority(base::ThreadPriority::BACKGROUND);
53 #endif
56 ~TransferThread() override {
57 // The only instance of this class was declared leaky.
58 NOTREACHED();
61 void InitializeOnMainThread(gfx::GLContext* parent_context) {
62 TRACE_EVENT0("gpu", "TransferThread::InitializeOnMainThread");
63 if (initialized_)
64 return;
66 base::WaitableEvent wait_for_init(true, false);
67 task_runner()->PostTask(
68 FROM_HERE,
69 base::Bind(&TransferThread::InitializeOnTransferThread,
70 base::Unretained(this), base::Unretained(parent_context),
71 &wait_for_init));
72 wait_for_init.Wait();
75 void CleanUp() override {
76 surface_ = NULL;
77 context_ = NULL;
80 private:
81 bool initialized_;
83 scoped_refptr<gfx::GLSurface> surface_;
84 scoped_refptr<gfx::GLContext> context_;
86 void InitializeOnTransferThread(gfx::GLContext* parent_context,
87 base::WaitableEvent* caller_wait) {
88 TRACE_EVENT0("gpu", "InitializeOnTransferThread");
90 if (!parent_context) {
91 LOG(ERROR) << "No parent context provided.";
92 caller_wait->Signal();
93 return;
96 surface_ = gfx::GLSurface::CreateOffscreenGLSurface(gfx::Size(1, 1));
97 if (!surface_.get()) {
98 LOG(ERROR) << "Unable to create GLSurface";
99 caller_wait->Signal();
100 return;
103 // TODO(backer): This is coded for integrated GPUs. For discrete GPUs
104 // we would probably want to use a PBO texture upload for a true async
105 // upload (that would hopefully be optimized as a DMA transfer by the
106 // driver).
107 context_ = gfx::GLContext::CreateGLContext(parent_context->share_group(),
108 surface_.get(),
109 gfx::PreferIntegratedGpu);
110 if (!context_.get()) {
111 LOG(ERROR) << "Unable to create GLContext.";
112 caller_wait->Signal();
113 return;
116 context_->MakeCurrent(surface_.get());
117 initialized_ = true;
118 caller_wait->Signal();
121 DISALLOW_COPY_AND_ASSIGN(TransferThread);
124 base::LazyInstance<TransferThread>::Leaky
125 g_transfer_thread = LAZY_INSTANCE_INITIALIZER;
127 base::SingleThreadTaskRunner* transfer_task_runner() {
128 return g_transfer_thread.Pointer()->task_runner().get();
131 class PendingTask : public base::RefCountedThreadSafe<PendingTask> {
132 public:
133 explicit PendingTask(const base::Closure& task)
134 : task_(task), task_pending_(true, false) {}
136 bool TryRun() {
137 // This is meant to be called on the main thread where the texture
138 // is already bound.
139 DCHECK(checker_.CalledOnValidThread());
140 if (task_lock_.Try()) {
141 // Only run once.
142 if (!task_.is_null())
143 task_.Run();
144 task_.Reset();
146 task_lock_.Release();
147 task_pending_.Signal();
148 return true;
150 return false;
153 void BindAndRun(GLuint texture_id) {
154 // This is meant to be called on the upload thread where we don't have to
155 // restore the previous texture binding.
156 DCHECK(!checker_.CalledOnValidThread());
157 base::AutoLock locked(task_lock_);
158 if (!task_.is_null()) {
159 glBindTexture(GL_TEXTURE_2D, texture_id);
160 task_.Run();
161 task_.Reset();
162 glBindTexture(GL_TEXTURE_2D, 0);
163 // Flush for synchronization between threads.
164 glFlush();
165 task_pending_.Signal();
169 void Cancel() {
170 base::AutoLock locked(task_lock_);
171 task_.Reset();
172 task_pending_.Signal();
175 bool TaskIsInProgress() {
176 return !task_pending_.IsSignaled();
179 void WaitForTask() {
180 task_pending_.Wait();
183 private:
184 friend class base::RefCountedThreadSafe<PendingTask>;
186 virtual ~PendingTask() {}
188 base::ThreadChecker checker_;
190 base::Lock task_lock_;
191 base::Closure task_;
192 base::WaitableEvent task_pending_;
194 DISALLOW_COPY_AND_ASSIGN(PendingTask);
197 // Class which holds async pixel transfers state.
198 // The texture_id is accessed by either thread, but everything
199 // else accessed only on the main thread.
200 class TransferStateInternal
201 : public base::RefCountedThreadSafe<TransferStateInternal> {
202 public:
203 TransferStateInternal(GLuint texture_id,
204 const AsyncTexImage2DParams& define_params)
205 : texture_id_(texture_id), define_params_(define_params) {}
207 bool TransferIsInProgress() {
208 return pending_upload_task_.get() &&
209 pending_upload_task_->TaskIsInProgress();
212 void BindTransfer() {
213 TRACE_EVENT2("gpu", "BindAsyncTransfer",
214 "width", define_params_.width,
215 "height", define_params_.height);
216 DCHECK(texture_id_);
218 glBindTexture(GL_TEXTURE_2D, texture_id_);
219 bind_callback_.Run();
222 void WaitForTransferCompletion() {
223 TRACE_EVENT0("gpu", "WaitForTransferCompletion");
224 DCHECK(pending_upload_task_.get());
225 if (!pending_upload_task_->TryRun()) {
226 pending_upload_task_->WaitForTask();
228 pending_upload_task_ = NULL;
231 void CancelUpload() {
232 TRACE_EVENT0("gpu", "CancelUpload");
233 if (pending_upload_task_.get())
234 pending_upload_task_->Cancel();
235 pending_upload_task_ = NULL;
238 void ScheduleAsyncTexImage2D(
239 const AsyncTexImage2DParams tex_params,
240 const AsyncMemoryParams mem_params,
241 scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats,
242 const base::Closure& bind_callback) {
243 TRACE_EVENT_SYNTHETIC_DELAY_BEGIN("gpu.AsyncTexImage");
244 pending_upload_task_ = new PendingTask(base::Bind(
245 &TransferStateInternal::PerformAsyncTexImage2D,
246 this,
247 tex_params,
248 mem_params,
249 texture_upload_stats));
250 transfer_task_runner()->PostTask(
251 FROM_HERE, base::Bind(&PendingTask::BindAndRun, pending_upload_task_,
252 texture_id_));
254 // Save the late bind callback, so we can notify the client when it is
255 // bound.
256 bind_callback_ = bind_callback;
259 void ScheduleAsyncTexSubImage2D(
260 AsyncTexSubImage2DParams tex_params,
261 AsyncMemoryParams mem_params,
262 scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats) {
263 TRACE_EVENT_SYNTHETIC_DELAY_BEGIN("gpu.AsyncTexImage");
264 pending_upload_task_ = new PendingTask(base::Bind(
265 &TransferStateInternal::PerformAsyncTexSubImage2D,
266 this,
267 tex_params,
268 mem_params,
269 texture_upload_stats));
270 transfer_task_runner()->PostTask(
271 FROM_HERE, base::Bind(&PendingTask::BindAndRun, pending_upload_task_,
272 texture_id_));
275 private:
276 friend class base::RefCountedThreadSafe<TransferStateInternal>;
278 virtual ~TransferStateInternal() {
281 void PerformAsyncTexImage2D(
282 AsyncTexImage2DParams tex_params,
283 AsyncMemoryParams mem_params,
284 scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats) {
285 TRACE_EVENT2("gpu",
286 "PerformAsyncTexImage",
287 "width",
288 tex_params.width,
289 "height",
290 tex_params.height);
291 DCHECK_EQ(0, tex_params.level);
293 base::TimeTicks begin_time;
294 if (texture_upload_stats.get())
295 begin_time = base::TimeTicks::Now();
297 void* data = mem_params.GetDataAddress();
300 TRACE_EVENT0("gpu", "glTexImage2D");
301 glTexImage2D(GL_TEXTURE_2D,
302 tex_params.level,
303 tex_params.internal_format,
304 tex_params.width,
305 tex_params.height,
306 tex_params.border,
307 tex_params.format,
308 tex_params.type,
309 data);
310 TRACE_EVENT_SYNTHETIC_DELAY_END("gpu.AsyncTexImage");
313 if (texture_upload_stats.get()) {
314 texture_upload_stats->AddUpload(base::TimeTicks::Now() - begin_time);
318 void PerformAsyncTexSubImage2D(
319 AsyncTexSubImage2DParams tex_params,
320 AsyncMemoryParams mem_params,
321 scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats) {
322 TRACE_EVENT2("gpu",
323 "PerformAsyncTexSubImage2D",
324 "width",
325 tex_params.width,
326 "height",
327 tex_params.height);
328 DCHECK_EQ(0, tex_params.level);
330 base::TimeTicks begin_time;
331 if (texture_upload_stats.get())
332 begin_time = base::TimeTicks::Now();
334 void* data = mem_params.GetDataAddress();
336 TRACE_EVENT0("gpu", "glTexSubImage2D");
337 glTexSubImage2D(GL_TEXTURE_2D,
338 tex_params.level,
339 tex_params.xoffset,
340 tex_params.yoffset,
341 tex_params.width,
342 tex_params.height,
343 tex_params.format,
344 tex_params.type,
345 data);
346 TRACE_EVENT_SYNTHETIC_DELAY_END("gpu.AsyncTexImage");
349 if (texture_upload_stats.get()) {
350 texture_upload_stats->AddUpload(base::TimeTicks::Now() - begin_time);
354 scoped_refptr<PendingTask> pending_upload_task_;
356 GLuint texture_id_;
358 // Definition params for texture that needs binding.
359 AsyncTexImage2DParams define_params_;
361 // Callback to invoke when AsyncTexImage2D is complete
362 // and the client can safely use the texture. This occurs
363 // during BindCompletedAsyncTransfers().
364 base::Closure bind_callback_;
367 } // namespace
369 class AsyncPixelTransferDelegateShareGroup
370 : public AsyncPixelTransferDelegate,
371 public base::SupportsWeakPtr<AsyncPixelTransferDelegateShareGroup> {
372 public:
373 AsyncPixelTransferDelegateShareGroup(
374 AsyncPixelTransferManagerShareGroup::SharedState* shared_state,
375 GLuint texture_id,
376 const AsyncTexImage2DParams& define_params);
377 ~AsyncPixelTransferDelegateShareGroup() override;
379 void BindTransfer() { state_->BindTransfer(); }
381 // Implement AsyncPixelTransferDelegate:
382 void AsyncTexImage2D(const AsyncTexImage2DParams& tex_params,
383 const AsyncMemoryParams& mem_params,
384 const base::Closure& bind_callback) override;
385 void AsyncTexSubImage2D(const AsyncTexSubImage2DParams& tex_params,
386 const AsyncMemoryParams& mem_params) override;
387 bool TransferIsInProgress() override;
388 void WaitForTransferCompletion() override;
390 private:
391 // A raw pointer is safe because the SharedState is owned by the Manager,
392 // which owns this Delegate.
393 AsyncPixelTransferManagerShareGroup::SharedState* shared_state_;
394 scoped_refptr<TransferStateInternal> state_;
396 DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferDelegateShareGroup);
399 AsyncPixelTransferDelegateShareGroup::AsyncPixelTransferDelegateShareGroup(
400 AsyncPixelTransferManagerShareGroup::SharedState* shared_state,
401 GLuint texture_id,
402 const AsyncTexImage2DParams& define_params)
403 : shared_state_(shared_state),
404 state_(new TransferStateInternal(texture_id, define_params)) {}
406 AsyncPixelTransferDelegateShareGroup::~AsyncPixelTransferDelegateShareGroup() {
407 TRACE_EVENT0("gpu", " ~AsyncPixelTransferDelegateShareGroup");
408 state_->CancelUpload();
411 bool AsyncPixelTransferDelegateShareGroup::TransferIsInProgress() {
412 return state_->TransferIsInProgress();
415 void AsyncPixelTransferDelegateShareGroup::WaitForTransferCompletion() {
416 if (state_->TransferIsInProgress()) {
417 state_->WaitForTransferCompletion();
418 DCHECK(!state_->TransferIsInProgress());
421 // Fast track the BindTransfer, if applicable.
422 for (AsyncPixelTransferManagerShareGroup::SharedState::TransferQueue::iterator
423 iter = shared_state_->pending_allocations.begin();
424 iter != shared_state_->pending_allocations.end();
425 ++iter) {
426 if (iter->get() != this)
427 continue;
429 shared_state_->pending_allocations.erase(iter);
430 BindTransfer();
431 break;
435 void AsyncPixelTransferDelegateShareGroup::AsyncTexImage2D(
436 const AsyncTexImage2DParams& tex_params,
437 const AsyncMemoryParams& mem_params,
438 const base::Closure& bind_callback) {
439 DCHECK(!state_->TransferIsInProgress());
440 DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), tex_params.target);
441 DCHECK_EQ(tex_params.level, 0);
443 shared_state_->pending_allocations.push_back(AsWeakPtr());
444 state_->ScheduleAsyncTexImage2D(tex_params,
445 mem_params,
446 shared_state_->texture_upload_stats,
447 bind_callback);
450 void AsyncPixelTransferDelegateShareGroup::AsyncTexSubImage2D(
451 const AsyncTexSubImage2DParams& tex_params,
452 const AsyncMemoryParams& mem_params) {
453 TRACE_EVENT2("gpu", "AsyncTexSubImage2D",
454 "width", tex_params.width,
455 "height", tex_params.height);
456 DCHECK(!state_->TransferIsInProgress());
457 DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), tex_params.target);
458 DCHECK_EQ(tex_params.level, 0);
460 state_->ScheduleAsyncTexSubImage2D(
461 tex_params, mem_params, shared_state_->texture_upload_stats);
464 AsyncPixelTransferManagerShareGroup::SharedState::SharedState()
465 // TODO(reveman): Skip this if --enable-gpu-benchmarking is not present.
466 : texture_upload_stats(new AsyncPixelTransferUploadStats) {}
468 AsyncPixelTransferManagerShareGroup::SharedState::~SharedState() {}
470 AsyncPixelTransferManagerShareGroup::AsyncPixelTransferManagerShareGroup(
471 gfx::GLContext* context) {
472 g_transfer_thread.Pointer()->InitializeOnMainThread(context);
475 AsyncPixelTransferManagerShareGroup::~AsyncPixelTransferManagerShareGroup() {}
477 void AsyncPixelTransferManagerShareGroup::BindCompletedAsyncTransfers() {
478 scoped_ptr<gfx::ScopedTextureBinder> texture_binder;
480 while (!shared_state_.pending_allocations.empty()) {
481 if (!shared_state_.pending_allocations.front().get()) {
482 shared_state_.pending_allocations.pop_front();
483 continue;
485 AsyncPixelTransferDelegateShareGroup* delegate =
486 shared_state_.pending_allocations.front().get();
487 // Terminate early, as all transfers finish in order, currently.
488 if (delegate->TransferIsInProgress())
489 break;
491 if (!texture_binder)
492 texture_binder.reset(new gfx::ScopedTextureBinder(GL_TEXTURE_2D, 0));
494 // Used to set tex info from the gles2 cmd decoder once upload has
495 // finished (it'll bind the texture and call a callback).
496 delegate->BindTransfer();
498 shared_state_.pending_allocations.pop_front();
502 void AsyncPixelTransferManagerShareGroup::AsyncNotifyCompletion(
503 const AsyncMemoryParams& mem_params,
504 AsyncPixelTransferCompletionObserver* observer) {
505 // Post a PerformNotifyCompletion task to the upload thread. This task
506 // will run after all async transfers are complete.
507 transfer_task_runner()->PostTask(
508 FROM_HERE, base::Bind(&PerformNotifyCompletion, mem_params,
509 make_scoped_refptr(observer)));
512 uint32 AsyncPixelTransferManagerShareGroup::GetTextureUploadCount() {
513 return shared_state_.texture_upload_stats->GetStats(NULL);
516 base::TimeDelta
517 AsyncPixelTransferManagerShareGroup::GetTotalTextureUploadTime() {
518 base::TimeDelta total_texture_upload_time;
519 shared_state_.texture_upload_stats->GetStats(&total_texture_upload_time);
520 return total_texture_upload_time;
523 void AsyncPixelTransferManagerShareGroup::ProcessMorePendingTransfers() {
526 bool AsyncPixelTransferManagerShareGroup::NeedsProcessMorePendingTransfers() {
527 return false;
530 void AsyncPixelTransferManagerShareGroup::WaitAllAsyncTexImage2D() {
531 if (shared_state_.pending_allocations.empty())
532 return;
534 AsyncPixelTransferDelegateShareGroup* delegate =
535 shared_state_.pending_allocations.back().get();
536 if (delegate)
537 delegate->WaitForTransferCompletion();
540 AsyncPixelTransferDelegate*
541 AsyncPixelTransferManagerShareGroup::CreatePixelTransferDelegateImpl(
542 gles2::TextureRef* ref,
543 const AsyncTexImage2DParams& define_params) {
544 return new AsyncPixelTransferDelegateShareGroup(
545 &shared_state_, ref->service_id(), define_params);
548 } // namespace gpu