Updating trunk VERSION from 2139.0 to 2140.0
[chromium-blink-merge.git] / gpu / command_buffer / service / in_process_command_buffer.cc
blob34b746a8c86baf47617f8b4de944fe55e6c00b4a
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "gpu/command_buffer/service/in_process_command_buffer.h"
7 #include <queue>
8 #include <set>
9 #include <utility>
11 #include <GLES2/gl2.h>
12 #ifndef GL_GLEXT_PROTOTYPES
13 #define GL_GLEXT_PROTOTYPES 1
14 #endif
15 #include <GLES2/gl2ext.h>
16 #include <GLES2/gl2extchromium.h>
18 #include "base/bind.h"
19 #include "base/bind_helpers.h"
20 #include "base/lazy_instance.h"
21 #include "base/logging.h"
22 #include "base/memory/weak_ptr.h"
23 #include "base/message_loop/message_loop_proxy.h"
24 #include "base/sequence_checker.h"
25 #include "base/synchronization/condition_variable.h"
26 #include "base/threading/thread.h"
27 #include "gpu/command_buffer/service/command_buffer_service.h"
28 #include "gpu/command_buffer/service/context_group.h"
29 #include "gpu/command_buffer/service/gl_context_virtual.h"
30 #include "gpu/command_buffer/service/gpu_scheduler.h"
31 #include "gpu/command_buffer/service/image_manager.h"
32 #include "gpu/command_buffer/service/mailbox_manager.h"
33 #include "gpu/command_buffer/service/query_manager.h"
34 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
35 #include "ui/gfx/size.h"
36 #include "ui/gl/gl_context.h"
37 #include "ui/gl/gl_image.h"
38 #include "ui/gl/gl_share_group.h"
40 #if defined(OS_ANDROID)
41 #include "gpu/command_buffer/service/stream_texture_manager_in_process_android.h"
42 #include "ui/gl/android/surface_texture.h"
43 #endif
45 namespace gpu {
47 namespace {
49 static InProcessGpuMemoryBufferFactory* g_gpu_memory_buffer_factory = NULL;
51 template <typename T>
52 static void RunTaskWithResult(base::Callback<T(void)> task,
53 T* result,
54 base::WaitableEvent* completion) {
55 *result = task.Run();
56 completion->Signal();
59 class GpuInProcessThread
60 : public base::Thread,
61 public InProcessCommandBuffer::Service,
62 public base::RefCountedThreadSafe<GpuInProcessThread> {
63 public:
64 GpuInProcessThread();
66 virtual void AddRef() const OVERRIDE {
67 base::RefCountedThreadSafe<GpuInProcessThread>::AddRef();
69 virtual void Release() const OVERRIDE {
70 base::RefCountedThreadSafe<GpuInProcessThread>::Release();
73 virtual void ScheduleTask(const base::Closure& task) OVERRIDE;
74 virtual void ScheduleIdleWork(const base::Closure& callback) OVERRIDE;
75 virtual bool UseVirtualizedGLContexts() OVERRIDE { return false; }
76 virtual scoped_refptr<gles2::ShaderTranslatorCache> shader_translator_cache()
77 OVERRIDE;
79 private:
80 virtual ~GpuInProcessThread();
81 friend class base::RefCountedThreadSafe<GpuInProcessThread>;
83 scoped_refptr<gpu::gles2::ShaderTranslatorCache> shader_translator_cache_;
84 DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread);
87 GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") {
88 Start();
91 GpuInProcessThread::~GpuInProcessThread() {
92 Stop();
95 void GpuInProcessThread::ScheduleTask(const base::Closure& task) {
96 message_loop()->PostTask(FROM_HERE, task);
99 void GpuInProcessThread::ScheduleIdleWork(const base::Closure& callback) {
100 message_loop()->PostDelayedTask(
101 FROM_HERE, callback, base::TimeDelta::FromMilliseconds(5));
104 scoped_refptr<gles2::ShaderTranslatorCache>
105 GpuInProcessThread::shader_translator_cache() {
106 if (!shader_translator_cache_.get())
107 shader_translator_cache_ = new gpu::gles2::ShaderTranslatorCache;
108 return shader_translator_cache_;
111 base::LazyInstance<std::set<InProcessCommandBuffer*> > default_thread_clients_ =
112 LAZY_INSTANCE_INITIALIZER;
113 base::LazyInstance<base::Lock> default_thread_clients_lock_ =
114 LAZY_INSTANCE_INITIALIZER;
116 class ScopedEvent {
117 public:
118 ScopedEvent(base::WaitableEvent* event) : event_(event) {}
119 ~ScopedEvent() { event_->Signal(); }
121 private:
122 base::WaitableEvent* event_;
125 class SyncPointManager {
126 public:
127 SyncPointManager();
128 ~SyncPointManager();
130 uint32 GenerateSyncPoint();
131 void RetireSyncPoint(uint32 sync_point);
133 bool IsSyncPointPassed(uint32 sync_point);
134 void WaitSyncPoint(uint32 sync_point);
136 private:
137 // This lock protects access to pending_sync_points_ and next_sync_point_ and
138 // is used with the ConditionVariable to signal when a sync point is retired.
139 base::Lock lock_;
140 std::set<uint32> pending_sync_points_;
141 uint32 next_sync_point_;
142 base::ConditionVariable cond_var_;
145 SyncPointManager::SyncPointManager() : next_sync_point_(1), cond_var_(&lock_) {}
147 SyncPointManager::~SyncPointManager() {
148 DCHECK_EQ(pending_sync_points_.size(), 0U);
151 uint32 SyncPointManager::GenerateSyncPoint() {
152 base::AutoLock lock(lock_);
153 uint32 sync_point = next_sync_point_++;
154 DCHECK_EQ(pending_sync_points_.count(sync_point), 0U);
155 pending_sync_points_.insert(sync_point);
156 return sync_point;
159 void SyncPointManager::RetireSyncPoint(uint32 sync_point) {
160 base::AutoLock lock(lock_);
161 DCHECK(pending_sync_points_.count(sync_point));
162 pending_sync_points_.erase(sync_point);
163 cond_var_.Broadcast();
166 bool SyncPointManager::IsSyncPointPassed(uint32 sync_point) {
167 base::AutoLock lock(lock_);
168 return pending_sync_points_.count(sync_point) == 0;
171 void SyncPointManager::WaitSyncPoint(uint32 sync_point) {
172 base::AutoLock lock(lock_);
173 while (pending_sync_points_.count(sync_point)) {
174 cond_var_.Wait();
178 base::LazyInstance<SyncPointManager> g_sync_point_manager =
179 LAZY_INSTANCE_INITIALIZER;
181 bool WaitSyncPoint(uint32 sync_point) {
182 g_sync_point_manager.Get().WaitSyncPoint(sync_point);
183 return true;
186 } // anonyous namespace
188 InProcessCommandBuffer::Service::Service() {}
190 InProcessCommandBuffer::Service::~Service() {}
192 scoped_refptr<InProcessCommandBuffer::Service>
193 InProcessCommandBuffer::GetDefaultService() {
194 base::AutoLock lock(default_thread_clients_lock_.Get());
195 scoped_refptr<Service> service;
196 if (!default_thread_clients_.Get().empty()) {
197 InProcessCommandBuffer* other = *default_thread_clients_.Get().begin();
198 service = other->service_;
199 DCHECK(service.get());
200 } else {
201 service = new GpuInProcessThread;
203 return service;
206 InProcessCommandBuffer::InProcessCommandBuffer(
207 const scoped_refptr<Service>& service)
208 : context_lost_(false),
209 idle_work_pending_(false),
210 last_put_offset_(-1),
211 flush_event_(false, false),
212 service_(service.get() ? service : GetDefaultService()),
213 gpu_thread_weak_ptr_factory_(this) {
214 if (!service.get()) {
215 base::AutoLock lock(default_thread_clients_lock_.Get());
216 default_thread_clients_.Get().insert(this);
220 InProcessCommandBuffer::~InProcessCommandBuffer() {
221 Destroy();
222 base::AutoLock lock(default_thread_clients_lock_.Get());
223 default_thread_clients_.Get().erase(this);
226 void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) {
227 CheckSequencedThread();
228 DCHECK(!surface_->IsOffscreen());
229 surface_->Resize(size);
232 bool InProcessCommandBuffer::MakeCurrent() {
233 CheckSequencedThread();
234 command_buffer_lock_.AssertAcquired();
236 if (!context_lost_ && decoder_->MakeCurrent())
237 return true;
238 DLOG(ERROR) << "Context lost because MakeCurrent failed.";
239 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
240 command_buffer_->SetParseError(gpu::error::kLostContext);
241 return false;
244 void InProcessCommandBuffer::PumpCommands() {
245 CheckSequencedThread();
246 command_buffer_lock_.AssertAcquired();
248 if (!MakeCurrent())
249 return;
251 gpu_scheduler_->PutChanged();
254 bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id) {
255 CheckSequencedThread();
256 command_buffer_lock_.AssertAcquired();
257 command_buffer_->SetGetBuffer(transfer_buffer_id);
258 return true;
261 bool InProcessCommandBuffer::Initialize(
262 scoped_refptr<gfx::GLSurface> surface,
263 bool is_offscreen,
264 gfx::AcceleratedWidget window,
265 const gfx::Size& size,
266 const std::vector<int32>& attribs,
267 gfx::GpuPreference gpu_preference,
268 const base::Closure& context_lost_callback,
269 InProcessCommandBuffer* share_group) {
270 DCHECK(!share_group || service_ == share_group->service_);
271 context_lost_callback_ = WrapCallback(context_lost_callback);
273 if (surface.get()) {
274 // GPU thread must be the same as client thread due to GLSurface not being
275 // thread safe.
276 sequence_checker_.reset(new base::SequenceChecker);
277 surface_ = surface;
280 gpu::Capabilities capabilities;
281 InitializeOnGpuThreadParams params(is_offscreen,
282 window,
283 size,
284 attribs,
285 gpu_preference,
286 &capabilities,
287 share_group);
289 base::Callback<bool(void)> init_task =
290 base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread,
291 base::Unretained(this),
292 params);
294 base::WaitableEvent completion(true, false);
295 bool result = false;
296 QueueTask(
297 base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion));
298 completion.Wait();
300 if (result) {
301 capabilities_ = capabilities;
302 capabilities_.map_image =
303 capabilities_.map_image && g_gpu_memory_buffer_factory;
305 return result;
308 bool InProcessCommandBuffer::InitializeOnGpuThread(
309 const InitializeOnGpuThreadParams& params) {
310 CheckSequencedThread();
311 gpu_thread_weak_ptr_ = gpu_thread_weak_ptr_factory_.GetWeakPtr();
313 DCHECK(params.size.width() >= 0 && params.size.height() >= 0);
315 TransferBufferManager* manager = new TransferBufferManager();
316 transfer_buffer_manager_.reset(manager);
317 manager->Initialize();
319 scoped_ptr<CommandBufferService> command_buffer(
320 new CommandBufferService(transfer_buffer_manager_.get()));
321 command_buffer->SetPutOffsetChangeCallback(base::Bind(
322 &InProcessCommandBuffer::PumpCommands, gpu_thread_weak_ptr_));
323 command_buffer->SetParseErrorCallback(base::Bind(
324 &InProcessCommandBuffer::OnContextLost, gpu_thread_weak_ptr_));
326 if (!command_buffer->Initialize()) {
327 LOG(ERROR) << "Could not initialize command buffer.";
328 DestroyOnGpuThread();
329 return false;
332 gl_share_group_ = params.context_group
333 ? params.context_group->gl_share_group_.get()
334 : new gfx::GLShareGroup;
336 #if defined(OS_ANDROID)
337 stream_texture_manager_.reset(new StreamTextureManagerInProcess);
338 #endif
340 bool bind_generates_resource = false;
341 decoder_.reset(gles2::GLES2Decoder::Create(
342 params.context_group
343 ? params.context_group->decoder_->GetContextGroup()
344 : new gles2::ContextGroup(NULL,
345 NULL,
346 service_->shader_translator_cache(),
347 NULL,
348 bind_generates_resource)));
350 gpu_scheduler_.reset(
351 new GpuScheduler(command_buffer.get(), decoder_.get(), decoder_.get()));
352 command_buffer->SetGetBufferChangeCallback(base::Bind(
353 &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
354 command_buffer_ = command_buffer.Pass();
356 decoder_->set_engine(gpu_scheduler_.get());
358 if (!surface_.get()) {
359 if (params.is_offscreen)
360 surface_ = gfx::GLSurface::CreateOffscreenGLSurface(params.size);
361 else
362 surface_ = gfx::GLSurface::CreateViewGLSurface(params.window);
365 if (!surface_.get()) {
366 LOG(ERROR) << "Could not create GLSurface.";
367 DestroyOnGpuThread();
368 return false;
371 if (service_->UseVirtualizedGLContexts()) {
372 context_ = gl_share_group_->GetSharedContext();
373 if (!context_.get()) {
374 context_ = gfx::GLContext::CreateGLContext(
375 gl_share_group_.get(), surface_.get(), params.gpu_preference);
376 gl_share_group_->SetSharedContext(context_.get());
379 context_ = new GLContextVirtual(
380 gl_share_group_.get(), context_.get(), decoder_->AsWeakPtr());
381 if (context_->Initialize(surface_.get(), params.gpu_preference)) {
382 VLOG(1) << "Created virtual GL context.";
383 } else {
384 context_ = NULL;
386 } else {
387 context_ = gfx::GLContext::CreateGLContext(
388 gl_share_group_.get(), surface_.get(), params.gpu_preference);
391 if (!context_.get()) {
392 LOG(ERROR) << "Could not create GLContext.";
393 DestroyOnGpuThread();
394 return false;
397 if (!context_->MakeCurrent(surface_.get())) {
398 LOG(ERROR) << "Could not make context current.";
399 DestroyOnGpuThread();
400 return false;
403 gles2::DisallowedFeatures disallowed_features;
404 disallowed_features.gpu_memory_manager = true;
405 if (!decoder_->Initialize(surface_,
406 context_,
407 params.is_offscreen,
408 params.size,
409 disallowed_features,
410 params.attribs)) {
411 LOG(ERROR) << "Could not initialize decoder.";
412 DestroyOnGpuThread();
413 return false;
415 *params.capabilities = decoder_->GetCapabilities();
417 if (!params.is_offscreen) {
418 decoder_->SetResizeCallback(base::Bind(
419 &InProcessCommandBuffer::OnResizeView, gpu_thread_weak_ptr_));
421 decoder_->SetWaitSyncPointCallback(base::Bind(&WaitSyncPoint));
423 return true;
426 void InProcessCommandBuffer::Destroy() {
427 CheckSequencedThread();
429 base::WaitableEvent completion(true, false);
430 bool result = false;
431 base::Callback<bool(void)> destroy_task = base::Bind(
432 &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this));
433 QueueTask(
434 base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion));
435 completion.Wait();
438 bool InProcessCommandBuffer::DestroyOnGpuThread() {
439 CheckSequencedThread();
440 gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs();
441 command_buffer_.reset();
442 // Clean up GL resources if possible.
443 bool have_context = context_.get() && context_->MakeCurrent(surface_.get());
444 if (decoder_) {
445 decoder_->Destroy(have_context);
446 decoder_.reset();
448 context_ = NULL;
449 surface_ = NULL;
450 gl_share_group_ = NULL;
451 #if defined(OS_ANDROID)
452 stream_texture_manager_.reset();
453 #endif
455 return true;
458 void InProcessCommandBuffer::CheckSequencedThread() {
459 DCHECK(!sequence_checker_ ||
460 sequence_checker_->CalledOnValidSequencedThread());
463 void InProcessCommandBuffer::OnContextLost() {
464 CheckSequencedThread();
465 if (!context_lost_callback_.is_null()) {
466 context_lost_callback_.Run();
467 context_lost_callback_.Reset();
470 context_lost_ = true;
473 CommandBuffer::State InProcessCommandBuffer::GetStateFast() {
474 CheckSequencedThread();
475 base::AutoLock lock(state_after_last_flush_lock_);
476 if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U)
477 last_state_ = state_after_last_flush_;
478 return last_state_;
481 CommandBuffer::State InProcessCommandBuffer::GetLastState() {
482 CheckSequencedThread();
483 return last_state_;
486 int32 InProcessCommandBuffer::GetLastToken() {
487 CheckSequencedThread();
488 GetStateFast();
489 return last_state_.token;
492 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) {
493 CheckSequencedThread();
494 ScopedEvent handle_flush(&flush_event_);
495 base::AutoLock lock(command_buffer_lock_);
496 command_buffer_->Flush(put_offset);
498 // Update state before signaling the flush event.
499 base::AutoLock lock(state_after_last_flush_lock_);
500 state_after_last_flush_ = command_buffer_->GetLastState();
502 DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) ||
503 (error::IsError(state_after_last_flush_.error) && context_lost_));
505 // If we've processed all pending commands but still have pending queries,
506 // pump idle work until the query is passed.
507 if (put_offset == state_after_last_flush_.get_offset &&
508 gpu_scheduler_->HasMoreWork()) {
509 ScheduleIdleWorkOnGpuThread();
513 void InProcessCommandBuffer::PerformIdleWork() {
514 CheckSequencedThread();
515 idle_work_pending_ = false;
516 base::AutoLock lock(command_buffer_lock_);
517 if (gpu_scheduler_->HasMoreWork()) {
518 gpu_scheduler_->PerformIdleWork();
519 ScheduleIdleWorkOnGpuThread();
523 void InProcessCommandBuffer::ScheduleIdleWorkOnGpuThread() {
524 CheckSequencedThread();
525 if (idle_work_pending_)
526 return;
527 idle_work_pending_ = true;
528 service_->ScheduleIdleWork(
529 base::Bind(&InProcessCommandBuffer::PerformIdleWork,
530 gpu_thread_weak_ptr_));
533 void InProcessCommandBuffer::Flush(int32 put_offset) {
534 CheckSequencedThread();
535 if (last_state_.error != gpu::error::kNoError)
536 return;
538 if (last_put_offset_ == put_offset)
539 return;
541 last_put_offset_ = put_offset;
542 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
543 gpu_thread_weak_ptr_,
544 put_offset);
545 QueueTask(task);
548 void InProcessCommandBuffer::WaitForTokenInRange(int32 start, int32 end) {
549 CheckSequencedThread();
550 while (!InRange(start, end, GetLastToken()) &&
551 last_state_.error == gpu::error::kNoError)
552 flush_event_.Wait();
555 void InProcessCommandBuffer::WaitForGetOffsetInRange(int32 start, int32 end) {
556 CheckSequencedThread();
558 GetStateFast();
559 while (!InRange(start, end, last_state_.get_offset) &&
560 last_state_.error == gpu::error::kNoError) {
561 flush_event_.Wait();
562 GetStateFast();
566 void InProcessCommandBuffer::SetGetBuffer(int32 shm_id) {
567 CheckSequencedThread();
568 if (last_state_.error != gpu::error::kNoError)
569 return;
572 base::AutoLock lock(command_buffer_lock_);
573 command_buffer_->SetGetBuffer(shm_id);
574 last_put_offset_ = 0;
577 base::AutoLock lock(state_after_last_flush_lock_);
578 state_after_last_flush_ = command_buffer_->GetLastState();
582 scoped_refptr<Buffer> InProcessCommandBuffer::CreateTransferBuffer(size_t size,
583 int32* id) {
584 CheckSequencedThread();
585 base::AutoLock lock(command_buffer_lock_);
586 return command_buffer_->CreateTransferBuffer(size, id);
589 void InProcessCommandBuffer::DestroyTransferBuffer(int32 id) {
590 CheckSequencedThread();
591 base::Closure task =
592 base::Bind(&InProcessCommandBuffer::DestroyTransferBufferOnGpuThread,
593 base::Unretained(this),
594 id);
596 QueueTask(task);
599 void InProcessCommandBuffer::DestroyTransferBufferOnGpuThread(int32 id) {
600 base::AutoLock lock(command_buffer_lock_);
601 command_buffer_->DestroyTransferBuffer(id);
604 gpu::Capabilities InProcessCommandBuffer::GetCapabilities() {
605 return capabilities_;
608 gfx::GpuMemoryBuffer* InProcessCommandBuffer::CreateGpuMemoryBuffer(
609 size_t width,
610 size_t height,
611 unsigned internalformat,
612 unsigned usage,
613 int32* id) {
614 CheckSequencedThread();
616 *id = -1;
618 scoped_ptr<gfx::GpuMemoryBuffer> buffer =
619 g_gpu_memory_buffer_factory->AllocateGpuMemoryBuffer(
620 width, height, internalformat, usage);
621 if (!buffer.get())
622 return NULL;
624 static int32 next_id = 1;
625 int32 new_id = next_id++;
627 base::Closure task =
628 base::Bind(&InProcessCommandBuffer::RegisterGpuMemoryBufferOnGpuThread,
629 base::Unretained(this),
630 new_id,
631 buffer->GetHandle(),
632 width,
633 height,
634 internalformat);
636 QueueTask(task);
638 *id = new_id;
639 DCHECK(gpu_memory_buffers_.find(new_id) == gpu_memory_buffers_.end());
640 return gpu_memory_buffers_.add(new_id, buffer.Pass()).first->second;
643 void InProcessCommandBuffer::RegisterGpuMemoryBufferOnGpuThread(
644 int32 id,
645 const gfx::GpuMemoryBufferHandle& handle,
646 size_t width,
647 size_t height,
648 unsigned internalformat) {
649 scoped_refptr<gfx::GLImage> image =
650 g_gpu_memory_buffer_factory->CreateImageForGpuMemoryBuffer(
651 handle, gfx::Size(width, height), internalformat);
652 if (!image.get())
653 return;
655 // For Android specific workaround.
656 gles2::ContextGroup* context_group = decoder_->GetContextGroup();
657 if (context_group->feature_info()->workarounds().release_image_after_use)
658 image->SetReleaseAfterUse();
660 if (decoder_) {
661 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
662 DCHECK(image_manager);
663 image_manager->AddImage(image.get(), id);
667 void InProcessCommandBuffer::DestroyGpuMemoryBuffer(int32 id) {
668 CheckSequencedThread();
670 base::Closure task =
671 base::Bind(&InProcessCommandBuffer::UnregisterGpuMemoryBufferOnGpuThread,
672 base::Unretained(this),
673 id);
675 QueueTask(task);
677 gpu_memory_buffers_.erase(id);
680 void InProcessCommandBuffer::UnregisterGpuMemoryBufferOnGpuThread(int32 id) {
681 if (decoder_) {
682 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
683 DCHECK(image_manager);
684 image_manager->RemoveImage(id);
688 uint32 InProcessCommandBuffer::InsertSyncPoint() {
689 uint32 sync_point = g_sync_point_manager.Get().GenerateSyncPoint();
690 QueueTask(base::Bind(&InProcessCommandBuffer::RetireSyncPointOnGpuThread,
691 base::Unretained(this),
692 sync_point));
693 return sync_point;
696 uint32 InProcessCommandBuffer::InsertFutureSyncPoint() {
697 return g_sync_point_manager.Get().GenerateSyncPoint();
700 void InProcessCommandBuffer::RetireSyncPoint(uint32 sync_point) {
701 QueueTask(base::Bind(&InProcessCommandBuffer::RetireSyncPointOnGpuThread,
702 base::Unretained(this),
703 sync_point));
706 void InProcessCommandBuffer::RetireSyncPointOnGpuThread(uint32 sync_point) {
707 gles2::MailboxManager* mailbox_manager =
708 decoder_->GetContextGroup()->mailbox_manager();
709 if (mailbox_manager->UsesSync()) {
710 bool make_current_success = false;
712 base::AutoLock lock(command_buffer_lock_);
713 make_current_success = MakeCurrent();
715 if (make_current_success)
716 mailbox_manager->PushTextureUpdates();
718 g_sync_point_manager.Get().RetireSyncPoint(sync_point);
721 void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point,
722 const base::Closure& callback) {
723 CheckSequencedThread();
724 QueueTask(base::Bind(&InProcessCommandBuffer::SignalSyncPointOnGpuThread,
725 base::Unretained(this),
726 sync_point,
727 WrapCallback(callback)));
730 void InProcessCommandBuffer::SignalSyncPointOnGpuThread(
731 unsigned sync_point,
732 const base::Closure& callback) {
733 if (g_sync_point_manager.Get().IsSyncPointPassed(sync_point)) {
734 callback.Run();
735 } else {
736 service_->ScheduleIdleWork(
737 base::Bind(&InProcessCommandBuffer::SignalSyncPointOnGpuThread,
738 gpu_thread_weak_ptr_,
739 sync_point,
740 callback));
744 void InProcessCommandBuffer::SignalQuery(unsigned query_id,
745 const base::Closure& callback) {
746 CheckSequencedThread();
747 QueueTask(base::Bind(&InProcessCommandBuffer::SignalQueryOnGpuThread,
748 base::Unretained(this),
749 query_id,
750 WrapCallback(callback)));
753 void InProcessCommandBuffer::SignalQueryOnGpuThread(
754 unsigned query_id,
755 const base::Closure& callback) {
756 gles2::QueryManager* query_manager_ = decoder_->GetQueryManager();
757 DCHECK(query_manager_);
759 gles2::QueryManager::Query* query = query_manager_->GetQuery(query_id);
760 if (!query)
761 callback.Run();
762 else
763 query->AddCallback(callback);
766 void InProcessCommandBuffer::SetSurfaceVisible(bool visible) {}
768 void InProcessCommandBuffer::Echo(const base::Closure& callback) {
769 QueueTask(WrapCallback(callback));
772 uint32 InProcessCommandBuffer::CreateStreamTexture(uint32 texture_id) {
773 base::WaitableEvent completion(true, false);
774 uint32 stream_id = 0;
775 base::Callback<uint32(void)> task =
776 base::Bind(&InProcessCommandBuffer::CreateStreamTextureOnGpuThread,
777 base::Unretained(this),
778 texture_id);
779 QueueTask(
780 base::Bind(&RunTaskWithResult<uint32>, task, &stream_id, &completion));
781 completion.Wait();
782 return stream_id;
785 uint32 InProcessCommandBuffer::CreateStreamTextureOnGpuThread(
786 uint32 client_texture_id) {
787 #if defined(OS_ANDROID)
788 return stream_texture_manager_->CreateStreamTexture(
789 client_texture_id, decoder_->GetContextGroup()->texture_manager());
790 #else
791 return 0;
792 #endif
795 gpu::error::Error InProcessCommandBuffer::GetLastError() {
796 CheckSequencedThread();
797 return last_state_.error;
800 bool InProcessCommandBuffer::Initialize() {
801 NOTREACHED();
802 return false;
805 namespace {
807 void PostCallback(const scoped_refptr<base::MessageLoopProxy>& loop,
808 const base::Closure& callback) {
809 if (!loop->BelongsToCurrentThread()) {
810 loop->PostTask(FROM_HERE, callback);
811 } else {
812 callback.Run();
816 void RunOnTargetThread(scoped_ptr<base::Closure> callback) {
817 DCHECK(callback.get());
818 callback->Run();
821 } // anonymous namespace
823 base::Closure InProcessCommandBuffer::WrapCallback(
824 const base::Closure& callback) {
825 // Make sure the callback gets deleted on the target thread by passing
826 // ownership.
827 scoped_ptr<base::Closure> scoped_callback(new base::Closure(callback));
828 base::Closure callback_on_client_thread =
829 base::Bind(&RunOnTargetThread, base::Passed(&scoped_callback));
830 base::Closure wrapped_callback =
831 base::Bind(&PostCallback, base::MessageLoopProxy::current(),
832 callback_on_client_thread);
833 return wrapped_callback;
836 #if defined(OS_ANDROID)
837 scoped_refptr<gfx::SurfaceTexture>
838 InProcessCommandBuffer::GetSurfaceTexture(uint32 stream_id) {
839 DCHECK(stream_texture_manager_);
840 return stream_texture_manager_->GetSurfaceTexture(stream_id);
842 #endif
844 // static
845 void InProcessCommandBuffer::SetGpuMemoryBufferFactory(
846 InProcessGpuMemoryBufferFactory* factory) {
847 g_gpu_memory_buffer_factory = factory;
850 } // namespace gpu