Handle account removal correctly on all platforms.
[chromium-blink-merge.git] / gpu / command_buffer / service / async_pixel_transfer_manager_egl.cc
blobe153617829d57c10ad03b5597dca317f0be7ff3d
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "gpu/command_buffer/service/async_pixel_transfer_manager_egl.h"
7 #include <list>
8 #include <string>
10 #include "base/bind.h"
11 #include "base/debug/trace_event.h"
12 #include "base/debug/trace_event_synthetic_delay.h"
13 #include "base/lazy_instance.h"
14 #include "base/logging.h"
15 #include "base/memory/ref_counted.h"
16 #include "base/synchronization/waitable_event.h"
17 #include "base/threading/thread.h"
18 #include "gpu/command_buffer/service/async_pixel_transfer_delegate.h"
19 #include "ui/gl/gl_context.h"
20 #include "ui/gl/gl_surface_egl.h"
21 #include "ui/gl/scoped_binders.h"
23 namespace gpu {
25 namespace {
27 bool CheckErrors(const char* file, int line) {
28 EGLint eglerror;
29 GLenum glerror;
30 bool success = true;
31 while ((eglerror = eglGetError()) != EGL_SUCCESS) {
32 LOG(ERROR) << "Async transfer EGL error at "
33 << file << ":" << line << " " << eglerror;
34 success = false;
36 while ((glerror = glGetError()) != GL_NO_ERROR) {
37 LOG(ERROR) << "Async transfer OpenGL error at "
38 << file << ":" << line << " " << glerror;
39 success = false;
41 return success;
43 #define CHECK_GL() CheckErrors(__FILE__, __LINE__)
45 const char kAsyncTransferThreadName[] = "AsyncTransferThread";
47 // Regular glTexImage2D call.
48 void DoTexImage2D(const AsyncTexImage2DParams& tex_params, void* data) {
49 glTexImage2D(
50 GL_TEXTURE_2D, tex_params.level, tex_params.internal_format,
51 tex_params.width, tex_params.height,
52 tex_params.border, tex_params.format, tex_params.type, data);
55 // Regular glTexSubImage2D call.
56 void DoTexSubImage2D(const AsyncTexSubImage2DParams& tex_params, void* data) {
57 glTexSubImage2D(
58 GL_TEXTURE_2D, tex_params.level,
59 tex_params.xoffset, tex_params.yoffset,
60 tex_params.width, tex_params.height,
61 tex_params.format, tex_params.type, data);
64 // Full glTexSubImage2D call, from glTexImage2D params.
65 void DoFullTexSubImage2D(const AsyncTexImage2DParams& tex_params, void* data) {
66 glTexSubImage2D(
67 GL_TEXTURE_2D, tex_params.level,
68 0, 0, tex_params.width, tex_params.height,
69 tex_params.format, tex_params.type, data);
72 void SetGlParametersForEglImageTexture() {
73 // These params are needed for EGLImage creation to succeed on several
74 // Android devices. I couldn't find this requirement in the EGLImage
75 // extension spec, but several devices fail without it.
76 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
77 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
78 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
81 void PerformNotifyCompletion(
82 AsyncMemoryParams mem_params,
83 scoped_refptr<AsyncPixelTransferCompletionObserver> observer) {
84 TRACE_EVENT0("gpu", "PerformNotifyCompletion");
85 observer->DidComplete(mem_params);
88 class TransferThread : public base::Thread {
89 public:
90 TransferThread() : base::Thread(kAsyncTransferThreadName) {
91 Start();
92 #if defined(OS_ANDROID) || defined(OS_LINUX)
93 SetPriority(base::kThreadPriority_Background);
94 #endif
96 virtual ~TransferThread() {
97 Stop();
100 virtual void Init() OVERRIDE {
101 gfx::GLShareGroup* share_group = NULL;
102 surface_ = new gfx::PbufferGLSurfaceEGL(gfx::Size(1, 1));
103 surface_->Initialize();
104 context_ = gfx::GLContext::CreateGLContext(
105 share_group, surface_.get(), gfx::PreferDiscreteGpu);
106 bool is_current = context_->MakeCurrent(surface_.get());
107 DCHECK(is_current);
110 virtual void CleanUp() OVERRIDE {
111 surface_ = NULL;
112 context_->ReleaseCurrent(surface_.get());
113 context_ = NULL;
116 private:
117 scoped_refptr<gfx::GLContext> context_;
118 scoped_refptr<gfx::GLSurface> surface_;
120 DISALLOW_COPY_AND_ASSIGN(TransferThread);
123 base::LazyInstance<TransferThread>
124 g_transfer_thread = LAZY_INSTANCE_INITIALIZER;
126 base::MessageLoopProxy* transfer_message_loop_proxy() {
127 return g_transfer_thread.Pointer()->message_loop_proxy().get();
130 // Class which holds async pixel transfers state (EGLImage).
131 // The EGLImage is accessed by either thread, but everything
132 // else accessed only on the main thread.
133 class TransferStateInternal
134 : public base::RefCountedThreadSafe<TransferStateInternal> {
135 public:
136 TransferStateInternal(GLuint texture_id,
137 const AsyncTexImage2DParams& define_params,
138 bool wait_for_uploads,
139 bool wait_for_creation,
140 bool use_image_preserved)
141 : texture_id_(texture_id),
142 thread_texture_id_(0),
143 transfer_completion_(true, true),
144 egl_image_(EGL_NO_IMAGE_KHR),
145 wait_for_uploads_(wait_for_uploads),
146 wait_for_creation_(wait_for_creation),
147 use_image_preserved_(use_image_preserved) {
148 define_params_ = define_params;
151 bool TransferIsInProgress() {
152 return !transfer_completion_.IsSignaled();
155 void BindTransfer() {
156 TRACE_EVENT2("gpu", "BindAsyncTransfer glEGLImageTargetTexture2DOES",
157 "width", define_params_.width,
158 "height", define_params_.height);
159 DCHECK(texture_id_);
160 if (EGL_NO_IMAGE_KHR == egl_image_)
161 return;
163 glBindTexture(GL_TEXTURE_2D, texture_id_);
164 glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, egl_image_);
165 bind_callback_.Run();
167 DCHECK(CHECK_GL());
170 void CreateEglImage(GLuint texture_id) {
171 TRACE_EVENT0("gpu", "eglCreateImageKHR");
172 DCHECK(texture_id);
173 DCHECK_EQ(egl_image_, EGL_NO_IMAGE_KHR);
175 EGLDisplay egl_display = eglGetCurrentDisplay();
176 EGLContext egl_context = eglGetCurrentContext();
177 EGLenum egl_target = EGL_GL_TEXTURE_2D_KHR;
178 EGLClientBuffer egl_buffer =
179 reinterpret_cast<EGLClientBuffer>(texture_id);
181 EGLint image_preserved = use_image_preserved_ ? EGL_TRUE : EGL_FALSE;
182 EGLint egl_attrib_list[] = {
183 EGL_GL_TEXTURE_LEVEL_KHR, 0, // mip-level.
184 EGL_IMAGE_PRESERVED_KHR, image_preserved,
185 EGL_NONE
187 egl_image_ = eglCreateImageKHR(
188 egl_display,
189 egl_context,
190 egl_target,
191 egl_buffer,
192 egl_attrib_list);
194 DLOG_IF(ERROR, EGL_NO_IMAGE_KHR == egl_image_)
195 << "eglCreateImageKHR failed";
198 void CreateEglImageOnUploadThread() {
199 CreateEglImage(thread_texture_id_);
202 void CreateEglImageOnMainThreadIfNeeded() {
203 if (egl_image_ == EGL_NO_IMAGE_KHR) {
204 CreateEglImage(texture_id_);
205 if (wait_for_creation_) {
206 TRACE_EVENT0("gpu", "glFinish creation");
207 glFinish();
212 void WaitForLastUpload() {
213 // This glFinish is just a safe-guard for if uploads have some
214 // GPU action that needs to occur. We could use fences and try
215 // to do this less often. However, on older drivers fences are
216 // not always reliable (eg. Mali-400 just blocks forever).
217 if (wait_for_uploads_) {
218 TRACE_EVENT0("gpu", "glFinish");
219 glFinish();
223 void MarkAsTransferIsInProgress() {
224 TRACE_EVENT_SYNTHETIC_DELAY_BEGIN("gpu.AsyncTexImage");
225 transfer_completion_.Reset();
228 void MarkAsCompleted() {
229 TRACE_EVENT_SYNTHETIC_DELAY_END("gpu.AsyncTexImage");
230 transfer_completion_.Signal();
233 void WaitForTransferCompletion() {
234 TRACE_EVENT0("gpu", "WaitForTransferCompletion");
235 // TODO(backer): Deschedule the channel rather than blocking the main GPU
236 // thread (crbug.com/240265).
237 transfer_completion_.Wait();
240 void PerformAsyncTexImage2D(
241 AsyncTexImage2DParams tex_params,
242 AsyncMemoryParams mem_params,
243 scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats) {
244 TRACE_EVENT2("gpu",
245 "PerformAsyncTexImage",
246 "width",
247 tex_params.width,
248 "height",
249 tex_params.height);
250 DCHECK(!thread_texture_id_);
251 DCHECK_EQ(0, tex_params.level);
252 if (EGL_NO_IMAGE_KHR != egl_image_) {
253 MarkAsCompleted();
254 return;
257 void* data = mem_params.GetDataAddress();
259 base::TimeTicks begin_time;
260 if (texture_upload_stats.get())
261 begin_time = base::TimeTicks::HighResNow();
264 TRACE_EVENT0("gpu", "glTexImage2D no data");
265 glGenTextures(1, &thread_texture_id_);
266 glActiveTexture(GL_TEXTURE0);
267 glBindTexture(GL_TEXTURE_2D, thread_texture_id_);
269 SetGlParametersForEglImageTexture();
271 // If we need to use image_preserved, we pass the data with
272 // the allocation. Otherwise we use a NULL allocation to
273 // try to avoid any costs associated with creating the EGLImage.
274 if (use_image_preserved_)
275 DoTexImage2D(tex_params, data);
276 else
277 DoTexImage2D(tex_params, NULL);
280 CreateEglImageOnUploadThread();
283 TRACE_EVENT0("gpu", "glTexSubImage2D with data");
285 // If we didn't use image_preserved, we haven't uploaded
286 // the data yet, so we do this with a full texSubImage.
287 if (!use_image_preserved_)
288 DoFullTexSubImage2D(tex_params, data);
291 WaitForLastUpload();
292 MarkAsCompleted();
294 DCHECK(CHECK_GL());
295 if (texture_upload_stats.get()) {
296 texture_upload_stats->AddUpload(base::TimeTicks::HighResNow() -
297 begin_time);
301 void PerformAsyncTexSubImage2D(
302 AsyncTexSubImage2DParams tex_params,
303 AsyncMemoryParams mem_params,
304 scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats) {
305 TRACE_EVENT2("gpu",
306 "PerformAsyncTexSubImage2D",
307 "width",
308 tex_params.width,
309 "height",
310 tex_params.height);
312 DCHECK_NE(EGL_NO_IMAGE_KHR, egl_image_);
313 DCHECK_EQ(0, tex_params.level);
315 void* data = mem_params.GetDataAddress();
317 base::TimeTicks begin_time;
318 if (texture_upload_stats.get())
319 begin_time = base::TimeTicks::HighResNow();
321 if (!thread_texture_id_) {
322 TRACE_EVENT0("gpu", "glEGLImageTargetTexture2DOES");
323 glGenTextures(1, &thread_texture_id_);
324 glActiveTexture(GL_TEXTURE0);
325 glBindTexture(GL_TEXTURE_2D, thread_texture_id_);
326 glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, egl_image_);
327 } else {
328 glActiveTexture(GL_TEXTURE0);
329 glBindTexture(GL_TEXTURE_2D, thread_texture_id_);
332 TRACE_EVENT0("gpu", "glTexSubImage2D");
333 DoTexSubImage2D(tex_params, data);
335 WaitForLastUpload();
336 MarkAsCompleted();
338 DCHECK(CHECK_GL());
339 if (texture_upload_stats.get()) {
340 texture_upload_stats->AddUpload(base::TimeTicks::HighResNow() -
341 begin_time);
345 protected:
346 friend class base::RefCountedThreadSafe<TransferStateInternal>;
347 friend class gpu::AsyncPixelTransferDelegateEGL;
349 static void DeleteTexture(GLuint id) {
350 glDeleteTextures(1, &id);
353 virtual ~TransferStateInternal() {
354 if (egl_image_ != EGL_NO_IMAGE_KHR) {
355 EGLDisplay display = eglGetCurrentDisplay();
356 eglDestroyImageKHR(display, egl_image_);
358 if (thread_texture_id_) {
359 transfer_message_loop_proxy()->PostTask(FROM_HERE,
360 base::Bind(&DeleteTexture, thread_texture_id_));
364 // The 'real' texture.
365 GLuint texture_id_;
367 // The EGLImage sibling on the upload thread.
368 GLuint thread_texture_id_;
370 // Definition params for texture that needs binding.
371 AsyncTexImage2DParams define_params_;
373 // Indicates that an async transfer is in progress.
374 base::WaitableEvent transfer_completion_;
376 // It would be nice if we could just create a new EGLImage for
377 // every upload, but I found that didn't work, so this stores
378 // one for the lifetime of the texture.
379 EGLImageKHR egl_image_;
381 // Callback to invoke when AsyncTexImage2D is complete
382 // and the client can safely use the texture. This occurs
383 // during BindCompletedAsyncTransfers().
384 base::Closure bind_callback_;
386 // Customize when we block on fences (these are work-arounds).
387 bool wait_for_uploads_;
388 bool wait_for_creation_;
389 bool use_image_preserved_;
392 } // namespace
394 // Class which handles async pixel transfers using EGLImageKHR and another
395 // upload thread
396 class AsyncPixelTransferDelegateEGL
397 : public AsyncPixelTransferDelegate,
398 public base::SupportsWeakPtr<AsyncPixelTransferDelegateEGL> {
399 public:
400 AsyncPixelTransferDelegateEGL(
401 AsyncPixelTransferManagerEGL::SharedState* shared_state,
402 GLuint texture_id,
403 const AsyncTexImage2DParams& define_params);
404 virtual ~AsyncPixelTransferDelegateEGL();
406 void BindTransfer() { state_->BindTransfer(); }
408 // Implement AsyncPixelTransferDelegate:
409 virtual void AsyncTexImage2D(
410 const AsyncTexImage2DParams& tex_params,
411 const AsyncMemoryParams& mem_params,
412 const base::Closure& bind_callback) OVERRIDE;
413 virtual void AsyncTexSubImage2D(
414 const AsyncTexSubImage2DParams& tex_params,
415 const AsyncMemoryParams& mem_params) OVERRIDE;
416 virtual bool TransferIsInProgress() OVERRIDE;
417 virtual void WaitForTransferCompletion() OVERRIDE;
419 private:
420 // Returns true if a work-around was used.
421 bool WorkAroundAsyncTexImage2D(
422 const AsyncTexImage2DParams& tex_params,
423 const AsyncMemoryParams& mem_params,
424 const base::Closure& bind_callback);
425 bool WorkAroundAsyncTexSubImage2D(
426 const AsyncTexSubImage2DParams& tex_params,
427 const AsyncMemoryParams& mem_params);
429 // A raw pointer is safe because the SharedState is owned by the Manager,
430 // which owns this Delegate.
431 AsyncPixelTransferManagerEGL::SharedState* shared_state_;
432 scoped_refptr<TransferStateInternal> state_;
434 DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferDelegateEGL);
437 AsyncPixelTransferDelegateEGL::AsyncPixelTransferDelegateEGL(
438 AsyncPixelTransferManagerEGL::SharedState* shared_state,
439 GLuint texture_id,
440 const AsyncTexImage2DParams& define_params)
441 : shared_state_(shared_state) {
442 // We can't wait on uploads on imagination (it can take 200ms+).
443 // In practice, they are complete when the CPU glTexSubImage2D completes.
444 bool wait_for_uploads = !shared_state_->is_imagination;
446 // Qualcomm runs into texture corruption problems if the same texture is
447 // uploaded to with both async and normal uploads. Synchronize after EGLImage
448 // creation on the main thread as a work-around.
449 bool wait_for_creation = shared_state_->is_qualcomm;
451 // Qualcomm has a race when using image_preserved=FALSE,
452 // which can result in black textures even after the first upload.
453 // Since using FALSE is mainly for performance (to avoid layout changes),
454 // but Qualcomm itself doesn't seem to get any performance benefit,
455 // we just using image_preservedd=TRUE on Qualcomm as a work-around.
456 bool use_image_preserved =
457 shared_state_->is_qualcomm || shared_state_->is_imagination;
459 state_ = new TransferStateInternal(texture_id,
460 define_params,
461 wait_for_uploads,
462 wait_for_creation,
463 use_image_preserved);
466 AsyncPixelTransferDelegateEGL::~AsyncPixelTransferDelegateEGL() {}
468 bool AsyncPixelTransferDelegateEGL::TransferIsInProgress() {
469 return state_->TransferIsInProgress();
472 void AsyncPixelTransferDelegateEGL::WaitForTransferCompletion() {
473 if (state_->TransferIsInProgress()) {
474 #if defined(OS_ANDROID) || defined(OS_LINUX)
475 g_transfer_thread.Pointer()->SetPriority(base::kThreadPriority_Display);
476 #endif
478 state_->WaitForTransferCompletion();
479 DCHECK(!state_->TransferIsInProgress());
481 #if defined(OS_ANDROID) || defined(OS_LINUX)
482 g_transfer_thread.Pointer()->SetPriority(base::kThreadPriority_Background);
483 #endif
487 void AsyncPixelTransferDelegateEGL::AsyncTexImage2D(
488 const AsyncTexImage2DParams& tex_params,
489 const AsyncMemoryParams& mem_params,
490 const base::Closure& bind_callback) {
491 if (WorkAroundAsyncTexImage2D(tex_params, mem_params, bind_callback))
492 return;
494 DCHECK(!state_->TransferIsInProgress());
495 DCHECK_EQ(state_->egl_image_, EGL_NO_IMAGE_KHR);
496 DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), tex_params.target);
497 DCHECK_EQ(tex_params.level, 0);
499 // Mark the transfer in progress and save the late bind
500 // callback, so we can notify the client when it is bound.
501 shared_state_->pending_allocations.push_back(AsWeakPtr());
502 state_->bind_callback_ = bind_callback;
504 // Mark the transfer in progress.
505 state_->MarkAsTransferIsInProgress();
507 // Duplicate the shared memory so there is no way we can get
508 // a use-after-free of the raw pixels.
509 transfer_message_loop_proxy()->PostTask(FROM_HERE,
510 base::Bind(
511 &TransferStateInternal::PerformAsyncTexImage2D,
512 state_,
513 tex_params,
514 mem_params,
515 shared_state_->texture_upload_stats));
517 DCHECK(CHECK_GL());
520 void AsyncPixelTransferDelegateEGL::AsyncTexSubImage2D(
521 const AsyncTexSubImage2DParams& tex_params,
522 const AsyncMemoryParams& mem_params) {
523 TRACE_EVENT2("gpu", "AsyncTexSubImage2D",
524 "width", tex_params.width,
525 "height", tex_params.height);
526 if (WorkAroundAsyncTexSubImage2D(tex_params, mem_params))
527 return;
528 DCHECK(!state_->TransferIsInProgress());
529 DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), tex_params.target);
530 DCHECK_EQ(tex_params.level, 0);
532 // Mark the transfer in progress.
533 state_->MarkAsTransferIsInProgress();
535 // If this wasn't async allocated, we don't have an EGLImage yet.
536 // Create the EGLImage if it hasn't already been created.
537 state_->CreateEglImageOnMainThreadIfNeeded();
539 // Duplicate the shared memory so there are no way we can get
540 // a use-after-free of the raw pixels.
541 transfer_message_loop_proxy()->PostTask(FROM_HERE,
542 base::Bind(
543 &TransferStateInternal::PerformAsyncTexSubImage2D,
544 state_,
545 tex_params,
546 mem_params,
547 shared_state_->texture_upload_stats));
549 DCHECK(CHECK_GL());
552 namespace {
553 bool IsPowerOfTwo (unsigned int x) {
554 return ((x != 0) && !(x & (x - 1)));
557 bool IsMultipleOfEight(unsigned int x) {
558 return (x & 7) == 0;
561 bool DimensionsSupportImgFastPath(int width, int height) {
562 // Multiple of eight, but not a power of two.
563 return IsMultipleOfEight(width) &&
564 IsMultipleOfEight(height) &&
565 !(IsPowerOfTwo(width) &&
566 IsPowerOfTwo(height));
568 } // namespace
570 // It is very difficult to stream uploads on Imagination GPUs:
571 // - glTexImage2D defers a swizzle/stall until draw-time
572 // - glTexSubImage2D will sleep for 16ms on a good day, and 100ms
573 // or longer if OpenGL is in heavy use by another thread.
574 // The one combination that avoids these problems requires:
575 // a.) Allocations/Uploads must occur on different threads/contexts.
576 // b.) Texture size must be non-power-of-two.
577 // When using a+b, uploads will be incorrect/corrupt unless:
578 // c.) Texture size must be a multiple-of-eight.
580 // To achieve a.) we allocate synchronously on the main thread followed
581 // by uploading on the upload thread. When b/c are not true we fall back
582 // on purely synchronous allocation/upload on the main thread.
584 bool AsyncPixelTransferDelegateEGL::WorkAroundAsyncTexImage2D(
585 const AsyncTexImage2DParams& tex_params,
586 const AsyncMemoryParams& mem_params,
587 const base::Closure& bind_callback) {
588 if (!shared_state_->is_imagination)
589 return false;
591 // On imagination we allocate synchronously all the time, even
592 // if the dimensions support fast uploads. This is for part a.)
593 // above, so allocations occur on a different thread/context as uploads.
594 void* data = mem_params.GetDataAddress();
595 SetGlParametersForEglImageTexture();
598 TRACE_EVENT0("gpu", "glTexImage2D with data");
599 DoTexImage2D(tex_params, data);
602 // The allocation has already occured, so mark it as finished
603 // and ready for binding.
604 CHECK(!state_->TransferIsInProgress());
606 // If the dimensions support fast async uploads, create the
607 // EGLImage for future uploads. The late bind should not
608 // be needed since the EGLImage was created from the main thread
609 // texture, but this is required to prevent an imagination driver crash.
610 if (DimensionsSupportImgFastPath(tex_params.width, tex_params.height)) {
611 state_->CreateEglImageOnMainThreadIfNeeded();
612 shared_state_->pending_allocations.push_back(AsWeakPtr());
613 state_->bind_callback_ = bind_callback;
616 DCHECK(CHECK_GL());
617 return true;
620 bool AsyncPixelTransferDelegateEGL::WorkAroundAsyncTexSubImage2D(
621 const AsyncTexSubImage2DParams& tex_params,
622 const AsyncMemoryParams& mem_params) {
623 if (!shared_state_->is_imagination)
624 return false;
626 // If the dimensions support fast async uploads, we can use the
627 // normal async upload path for uploads.
628 if (DimensionsSupportImgFastPath(tex_params.width, tex_params.height))
629 return false;
631 // Fall back on a synchronous stub as we don't have a known fast path.
632 // Also, older ICS drivers crash when we do any glTexSubImage2D on the
633 // same thread. To work around this we do glTexImage2D instead. Since
634 // we didn't create an EGLImage for this texture (see above), this is
635 // okay, but it limits this API to full updates for now.
636 DCHECK(!state_->egl_image_);
637 DCHECK_EQ(tex_params.xoffset, 0);
638 DCHECK_EQ(tex_params.yoffset, 0);
639 DCHECK_EQ(state_->define_params_.width, tex_params.width);
640 DCHECK_EQ(state_->define_params_.height, tex_params.height);
641 DCHECK_EQ(state_->define_params_.level, tex_params.level);
642 DCHECK_EQ(state_->define_params_.format, tex_params.format);
643 DCHECK_EQ(state_->define_params_.type, tex_params.type);
645 void* data = mem_params.GetDataAddress();
646 base::TimeTicks begin_time;
647 if (shared_state_->texture_upload_stats.get())
648 begin_time = base::TimeTicks::HighResNow();
650 TRACE_EVENT0("gpu", "glTexSubImage2D");
651 // Note we use define_params_ instead of tex_params.
652 // The DCHECKs above verify this is always the same.
653 DoTexImage2D(state_->define_params_, data);
655 if (shared_state_->texture_upload_stats.get()) {
656 shared_state_->texture_upload_stats
657 ->AddUpload(base::TimeTicks::HighResNow() - begin_time);
660 DCHECK(CHECK_GL());
661 return true;
664 AsyncPixelTransferManagerEGL::SharedState::SharedState()
665 // TODO(reveman): Skip this if --enable-gpu-benchmarking is not present.
666 : texture_upload_stats(new AsyncPixelTransferUploadStats) {
667 const char* vendor = reinterpret_cast<const char*>(glGetString(GL_VENDOR));
668 if (vendor) {
669 is_imagination =
670 std::string(vendor).find("Imagination") != std::string::npos;
671 is_qualcomm = std::string(vendor).find("Qualcomm") != std::string::npos;
675 AsyncPixelTransferManagerEGL::SharedState::~SharedState() {}
677 AsyncPixelTransferManagerEGL::AsyncPixelTransferManagerEGL() {}
679 AsyncPixelTransferManagerEGL::~AsyncPixelTransferManagerEGL() {}
681 void AsyncPixelTransferManagerEGL::BindCompletedAsyncTransfers() {
682 scoped_ptr<gfx::ScopedTextureBinder> texture_binder;
684 while(!shared_state_.pending_allocations.empty()) {
685 if (!shared_state_.pending_allocations.front().get()) {
686 shared_state_.pending_allocations.pop_front();
687 continue;
689 AsyncPixelTransferDelegateEGL* delegate =
690 shared_state_.pending_allocations.front().get();
691 // Terminate early, as all transfers finish in order, currently.
692 if (delegate->TransferIsInProgress())
693 break;
695 if (!texture_binder)
696 texture_binder.reset(new gfx::ScopedTextureBinder(GL_TEXTURE_2D, 0));
698 // If the transfer is finished, bind it to the texture
699 // and remove it from pending list.
700 delegate->BindTransfer();
701 shared_state_.pending_allocations.pop_front();
705 void AsyncPixelTransferManagerEGL::AsyncNotifyCompletion(
706 const AsyncMemoryParams& mem_params,
707 AsyncPixelTransferCompletionObserver* observer) {
708 // Post a PerformNotifyCompletion task to the upload thread. This task
709 // will run after all async transfers are complete.
710 transfer_message_loop_proxy()->PostTask(
711 FROM_HERE,
712 base::Bind(&PerformNotifyCompletion,
713 mem_params,
714 make_scoped_refptr(observer)));
717 uint32 AsyncPixelTransferManagerEGL::GetTextureUploadCount() {
718 return shared_state_.texture_upload_stats->GetStats(NULL);
721 base::TimeDelta AsyncPixelTransferManagerEGL::GetTotalTextureUploadTime() {
722 base::TimeDelta total_texture_upload_time;
723 shared_state_.texture_upload_stats->GetStats(&total_texture_upload_time);
724 return total_texture_upload_time;
727 void AsyncPixelTransferManagerEGL::ProcessMorePendingTransfers() {
730 bool AsyncPixelTransferManagerEGL::NeedsProcessMorePendingTransfers() {
731 return false;
734 void AsyncPixelTransferManagerEGL::WaitAllAsyncTexImage2D() {
735 if (shared_state_.pending_allocations.empty())
736 return;
738 AsyncPixelTransferDelegateEGL* delegate =
739 shared_state_.pending_allocations.back().get();
740 if (delegate)
741 delegate->WaitForTransferCompletion();
744 AsyncPixelTransferDelegate*
745 AsyncPixelTransferManagerEGL::CreatePixelTransferDelegateImpl(
746 gles2::TextureRef* ref,
747 const AsyncTexImage2DParams& define_params) {
748 return new AsyncPixelTransferDelegateEGL(
749 &shared_state_, ref->service_id(), define_params);
752 } // namespace gpu