1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "gpu/command_buffer/service/in_process_command_buffer.h"
11 #include <GLES2/gl2.h>
12 #ifndef GL_GLEXT_PROTOTYPES
13 #define GL_GLEXT_PROTOTYPES 1
15 #include <GLES2/gl2ext.h>
16 #include <GLES2/gl2extchromium.h>
18 #include "base/bind.h"
19 #include "base/bind_helpers.h"
20 #include "base/lazy_instance.h"
21 #include "base/logging.h"
22 #include "base/memory/weak_ptr.h"
23 #include "base/message_loop/message_loop_proxy.h"
24 #include "base/sequence_checker.h"
25 #include "base/synchronization/condition_variable.h"
26 #include "base/threading/thread.h"
27 #include "gpu/command_buffer/service/command_buffer_service.h"
28 #include "gpu/command_buffer/service/context_group.h"
29 #include "gpu/command_buffer/service/gl_context_virtual.h"
30 #include "gpu/command_buffer/service/gpu_scheduler.h"
31 #include "gpu/command_buffer/service/image_manager.h"
32 #include "gpu/command_buffer/service/mailbox_manager.h"
33 #include "gpu/command_buffer/service/memory_tracking.h"
34 #include "gpu/command_buffer/service/query_manager.h"
35 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
36 #include "ui/gfx/size.h"
37 #include "ui/gl/gl_context.h"
38 #include "ui/gl/gl_image.h"
39 #include "ui/gl/gl_share_group.h"
41 #if defined(OS_ANDROID)
42 #include "gpu/command_buffer/service/stream_texture_manager_in_process_android.h"
43 #include "ui/gl/android/surface_texture.h"
50 static InProcessGpuMemoryBufferFactory
* g_gpu_memory_buffer_factory
= NULL
;
53 static void RunTaskWithResult(base::Callback
<T(void)> task
,
55 base::WaitableEvent
* completion
) {
60 class GpuInProcessThread
61 : public base::Thread
,
62 public InProcessCommandBuffer::Service
,
63 public base::RefCountedThreadSafe
<GpuInProcessThread
> {
67 virtual void AddRef() const OVERRIDE
{
68 base::RefCountedThreadSafe
<GpuInProcessThread
>::AddRef();
70 virtual void Release() const OVERRIDE
{
71 base::RefCountedThreadSafe
<GpuInProcessThread
>::Release();
74 virtual void ScheduleTask(const base::Closure
& task
) OVERRIDE
;
75 virtual void ScheduleIdleWork(const base::Closure
& callback
) OVERRIDE
;
76 virtual bool UseVirtualizedGLContexts() OVERRIDE
{ return false; }
77 virtual scoped_refptr
<gles2::ShaderTranslatorCache
> shader_translator_cache()
81 virtual ~GpuInProcessThread();
82 friend class base::RefCountedThreadSafe
<GpuInProcessThread
>;
84 scoped_refptr
<gpu::gles2::ShaderTranslatorCache
> shader_translator_cache_
;
85 DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread
);
88 GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") {
92 GpuInProcessThread::~GpuInProcessThread() {
96 void GpuInProcessThread::ScheduleTask(const base::Closure
& task
) {
97 message_loop()->PostTask(FROM_HERE
, task
);
100 void GpuInProcessThread::ScheduleIdleWork(const base::Closure
& callback
) {
101 message_loop()->PostDelayedTask(
102 FROM_HERE
, callback
, base::TimeDelta::FromMilliseconds(5));
105 scoped_refptr
<gles2::ShaderTranslatorCache
>
106 GpuInProcessThread::shader_translator_cache() {
107 if (!shader_translator_cache_
.get())
108 shader_translator_cache_
= new gpu::gles2::ShaderTranslatorCache
;
109 return shader_translator_cache_
;
112 base::LazyInstance
<std::set
<InProcessCommandBuffer
*> > default_thread_clients_
=
113 LAZY_INSTANCE_INITIALIZER
;
114 base::LazyInstance
<base::Lock
> default_thread_clients_lock_
=
115 LAZY_INSTANCE_INITIALIZER
;
119 ScopedEvent(base::WaitableEvent
* event
) : event_(event
) {}
120 ~ScopedEvent() { event_
->Signal(); }
123 base::WaitableEvent
* event_
;
126 class SyncPointManager
{
131 uint32
GenerateSyncPoint();
132 void RetireSyncPoint(uint32 sync_point
);
134 bool IsSyncPointPassed(uint32 sync_point
);
135 void WaitSyncPoint(uint32 sync_point
);
138 // This lock protects access to pending_sync_points_ and next_sync_point_ and
139 // is used with the ConditionVariable to signal when a sync point is retired.
141 std::set
<uint32
> pending_sync_points_
;
142 uint32 next_sync_point_
;
143 base::ConditionVariable cond_var_
;
146 SyncPointManager::SyncPointManager() : next_sync_point_(1), cond_var_(&lock_
) {}
148 SyncPointManager::~SyncPointManager() {
149 DCHECK_EQ(pending_sync_points_
.size(), 0U);
152 uint32
SyncPointManager::GenerateSyncPoint() {
153 base::AutoLock
lock(lock_
);
154 uint32 sync_point
= next_sync_point_
++;
155 DCHECK_EQ(pending_sync_points_
.count(sync_point
), 0U);
156 pending_sync_points_
.insert(sync_point
);
160 void SyncPointManager::RetireSyncPoint(uint32 sync_point
) {
161 base::AutoLock
lock(lock_
);
162 DCHECK(pending_sync_points_
.count(sync_point
));
163 pending_sync_points_
.erase(sync_point
);
164 cond_var_
.Broadcast();
167 bool SyncPointManager::IsSyncPointPassed(uint32 sync_point
) {
168 base::AutoLock
lock(lock_
);
169 return pending_sync_points_
.count(sync_point
) == 0;
172 void SyncPointManager::WaitSyncPoint(uint32 sync_point
) {
173 base::AutoLock
lock(lock_
);
174 while (pending_sync_points_
.count(sync_point
)) {
179 base::LazyInstance
<SyncPointManager
> g_sync_point_manager
=
180 LAZY_INSTANCE_INITIALIZER
;
182 bool WaitSyncPoint(uint32 sync_point
) {
183 g_sync_point_manager
.Get().WaitSyncPoint(sync_point
);
187 } // anonyous namespace
189 InProcessCommandBuffer::Service::Service() {}
191 InProcessCommandBuffer::Service::~Service() {}
193 scoped_refptr
<gles2::MailboxManager
>
194 InProcessCommandBuffer::Service::mailbox_manager() {
195 if (!mailbox_manager_
.get())
196 mailbox_manager_
= new gles2::MailboxManager();
197 return mailbox_manager_
;
200 scoped_refptr
<InProcessCommandBuffer::Service
>
201 InProcessCommandBuffer::GetDefaultService() {
202 base::AutoLock
lock(default_thread_clients_lock_
.Get());
203 scoped_refptr
<Service
> service
;
204 if (!default_thread_clients_
.Get().empty()) {
205 InProcessCommandBuffer
* other
= *default_thread_clients_
.Get().begin();
206 service
= other
->service_
;
207 DCHECK(service
.get());
209 service
= new GpuInProcessThread
;
214 InProcessCommandBuffer::InProcessCommandBuffer(
215 const scoped_refptr
<Service
>& service
)
216 : context_lost_(false),
217 idle_work_pending_(false),
218 last_put_offset_(-1),
219 flush_event_(false, false),
220 service_(service
.get() ? service
: GetDefaultService()),
221 gpu_thread_weak_ptr_factory_(this) {
222 if (!service
.get()) {
223 base::AutoLock
lock(default_thread_clients_lock_
.Get());
224 default_thread_clients_
.Get().insert(this);
228 InProcessCommandBuffer::~InProcessCommandBuffer() {
230 base::AutoLock
lock(default_thread_clients_lock_
.Get());
231 default_thread_clients_
.Get().erase(this);
234 void InProcessCommandBuffer::OnResizeView(gfx::Size size
, float scale_factor
) {
235 CheckSequencedThread();
236 DCHECK(!surface_
->IsOffscreen());
237 surface_
->Resize(size
);
240 bool InProcessCommandBuffer::MakeCurrent() {
241 CheckSequencedThread();
242 command_buffer_lock_
.AssertAcquired();
244 if (!context_lost_
&& decoder_
->MakeCurrent())
246 DLOG(ERROR
) << "Context lost because MakeCurrent failed.";
247 command_buffer_
->SetContextLostReason(decoder_
->GetContextLostReason());
248 command_buffer_
->SetParseError(gpu::error::kLostContext
);
252 void InProcessCommandBuffer::PumpCommands() {
253 CheckSequencedThread();
254 command_buffer_lock_
.AssertAcquired();
259 gpu_scheduler_
->PutChanged();
262 bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id
) {
263 CheckSequencedThread();
264 command_buffer_lock_
.AssertAcquired();
265 command_buffer_
->SetGetBuffer(transfer_buffer_id
);
269 bool InProcessCommandBuffer::Initialize(
270 scoped_refptr
<gfx::GLSurface
> surface
,
272 gfx::AcceleratedWidget window
,
273 const gfx::Size
& size
,
274 const std::vector
<int32
>& attribs
,
275 gfx::GpuPreference gpu_preference
,
276 const base::Closure
& context_lost_callback
,
277 InProcessCommandBuffer
* share_group
) {
278 DCHECK(!share_group
|| service_
.get() == share_group
->service_
.get());
279 context_lost_callback_
= WrapCallback(context_lost_callback
);
282 // GPU thread must be the same as client thread due to GLSurface not being
284 sequence_checker_
.reset(new base::SequenceChecker
);
288 gpu::Capabilities capabilities
;
289 InitializeOnGpuThreadParams
params(is_offscreen
,
297 base::Callback
<bool(void)> init_task
=
298 base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread
,
299 base::Unretained(this),
302 base::WaitableEvent
completion(true, false);
305 base::Bind(&RunTaskWithResult
<bool>, init_task
, &result
, &completion
));
309 capabilities_
= capabilities
;
310 capabilities_
.map_image
=
311 capabilities_
.map_image
&& g_gpu_memory_buffer_factory
;
316 bool InProcessCommandBuffer::InitializeOnGpuThread(
317 const InitializeOnGpuThreadParams
& params
) {
318 CheckSequencedThread();
319 gpu_thread_weak_ptr_
= gpu_thread_weak_ptr_factory_
.GetWeakPtr();
321 DCHECK(params
.size
.width() >= 0 && params
.size
.height() >= 0);
323 TransferBufferManager
* manager
= new TransferBufferManager();
324 transfer_buffer_manager_
.reset(manager
);
325 manager
->Initialize();
327 scoped_ptr
<CommandBufferService
> command_buffer(
328 new CommandBufferService(transfer_buffer_manager_
.get()));
329 command_buffer
->SetPutOffsetChangeCallback(base::Bind(
330 &InProcessCommandBuffer::PumpCommands
, gpu_thread_weak_ptr_
));
331 command_buffer
->SetParseErrorCallback(base::Bind(
332 &InProcessCommandBuffer::OnContextLost
, gpu_thread_weak_ptr_
));
334 if (!command_buffer
->Initialize()) {
335 LOG(ERROR
) << "Could not initialize command buffer.";
336 DestroyOnGpuThread();
340 gl_share_group_
= params
.context_group
341 ? params
.context_group
->gl_share_group_
.get()
342 : new gfx::GLShareGroup
;
344 #if defined(OS_ANDROID)
345 stream_texture_manager_
.reset(new StreamTextureManagerInProcess
);
348 bool bind_generates_resource
= false;
349 decoder_
.reset(gles2::GLES2Decoder::Create(
351 ? params
.context_group
->decoder_
->GetContextGroup()
352 : new gles2::ContextGroup(service_
->mailbox_manager(),
354 service_
->shader_translator_cache(),
356 bind_generates_resource
)));
358 gpu_scheduler_
.reset(
359 new GpuScheduler(command_buffer
.get(), decoder_
.get(), decoder_
.get()));
360 command_buffer
->SetGetBufferChangeCallback(base::Bind(
361 &GpuScheduler::SetGetBuffer
, base::Unretained(gpu_scheduler_
.get())));
362 command_buffer_
= command_buffer
.Pass();
364 decoder_
->set_engine(gpu_scheduler_
.get());
366 if (!surface_
.get()) {
367 if (params
.is_offscreen
)
368 surface_
= gfx::GLSurface::CreateOffscreenGLSurface(params
.size
);
370 surface_
= gfx::GLSurface::CreateViewGLSurface(params
.window
);
373 if (!surface_
.get()) {
374 LOG(ERROR
) << "Could not create GLSurface.";
375 DestroyOnGpuThread();
379 if (service_
->UseVirtualizedGLContexts()) {
380 context_
= gl_share_group_
->GetSharedContext();
381 if (!context_
.get()) {
382 context_
= gfx::GLContext::CreateGLContext(
383 gl_share_group_
.get(), surface_
.get(), params
.gpu_preference
);
384 gl_share_group_
->SetSharedContext(context_
.get());
387 context_
= new GLContextVirtual(
388 gl_share_group_
.get(), context_
.get(), decoder_
->AsWeakPtr());
389 if (context_
->Initialize(surface_
.get(), params
.gpu_preference
)) {
390 VLOG(1) << "Created virtual GL context.";
395 context_
= gfx::GLContext::CreateGLContext(
396 gl_share_group_
.get(), surface_
.get(), params
.gpu_preference
);
399 if (!context_
.get()) {
400 LOG(ERROR
) << "Could not create GLContext.";
401 DestroyOnGpuThread();
405 if (!context_
->MakeCurrent(surface_
.get())) {
406 LOG(ERROR
) << "Could not make context current.";
407 DestroyOnGpuThread();
411 gles2::DisallowedFeatures disallowed_features
;
412 disallowed_features
.gpu_memory_manager
= true;
413 if (!decoder_
->Initialize(surface_
,
419 LOG(ERROR
) << "Could not initialize decoder.";
420 DestroyOnGpuThread();
423 *params
.capabilities
= decoder_
->GetCapabilities();
425 if (!params
.is_offscreen
) {
426 decoder_
->SetResizeCallback(base::Bind(
427 &InProcessCommandBuffer::OnResizeView
, gpu_thread_weak_ptr_
));
429 decoder_
->SetWaitSyncPointCallback(base::Bind(&WaitSyncPoint
));
434 void InProcessCommandBuffer::Destroy() {
435 CheckSequencedThread();
437 base::WaitableEvent
completion(true, false);
439 base::Callback
<bool(void)> destroy_task
= base::Bind(
440 &InProcessCommandBuffer::DestroyOnGpuThread
, base::Unretained(this));
442 base::Bind(&RunTaskWithResult
<bool>, destroy_task
, &result
, &completion
));
446 bool InProcessCommandBuffer::DestroyOnGpuThread() {
447 CheckSequencedThread();
448 gpu_thread_weak_ptr_factory_
.InvalidateWeakPtrs();
449 command_buffer_
.reset();
450 // Clean up GL resources if possible.
451 bool have_context
= context_
.get() && context_
->MakeCurrent(surface_
.get());
453 decoder_
->Destroy(have_context
);
458 gl_share_group_
= NULL
;
459 #if defined(OS_ANDROID)
460 stream_texture_manager_
.reset();
466 void InProcessCommandBuffer::CheckSequencedThread() {
467 DCHECK(!sequence_checker_
||
468 sequence_checker_
->CalledOnValidSequencedThread());
471 void InProcessCommandBuffer::OnContextLost() {
472 CheckSequencedThread();
473 if (!context_lost_callback_
.is_null()) {
474 context_lost_callback_
.Run();
475 context_lost_callback_
.Reset();
478 context_lost_
= true;
481 CommandBuffer::State
InProcessCommandBuffer::GetStateFast() {
482 CheckSequencedThread();
483 base::AutoLock
lock(state_after_last_flush_lock_
);
484 if (state_after_last_flush_
.generation
- last_state_
.generation
< 0x80000000U
)
485 last_state_
= state_after_last_flush_
;
489 CommandBuffer::State
InProcessCommandBuffer::GetLastState() {
490 CheckSequencedThread();
494 int32
InProcessCommandBuffer::GetLastToken() {
495 CheckSequencedThread();
497 return last_state_
.token
;
500 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset
) {
501 CheckSequencedThread();
502 ScopedEvent
handle_flush(&flush_event_
);
503 base::AutoLock
lock(command_buffer_lock_
);
504 command_buffer_
->Flush(put_offset
);
506 // Update state before signaling the flush event.
507 base::AutoLock
lock(state_after_last_flush_lock_
);
508 state_after_last_flush_
= command_buffer_
->GetLastState();
510 DCHECK((!error::IsError(state_after_last_flush_
.error
) && !context_lost_
) ||
511 (error::IsError(state_after_last_flush_
.error
) && context_lost_
));
513 // If we've processed all pending commands but still have pending queries,
514 // pump idle work until the query is passed.
515 if (put_offset
== state_after_last_flush_
.get_offset
&&
516 gpu_scheduler_
->HasMoreWork()) {
517 ScheduleIdleWorkOnGpuThread();
521 void InProcessCommandBuffer::PerformIdleWork() {
522 CheckSequencedThread();
523 idle_work_pending_
= false;
524 base::AutoLock
lock(command_buffer_lock_
);
525 if (MakeCurrent() && gpu_scheduler_
->HasMoreWork()) {
526 gpu_scheduler_
->PerformIdleWork();
527 ScheduleIdleWorkOnGpuThread();
531 void InProcessCommandBuffer::ScheduleIdleWorkOnGpuThread() {
532 CheckSequencedThread();
533 if (idle_work_pending_
)
535 idle_work_pending_
= true;
536 service_
->ScheduleIdleWork(
537 base::Bind(&InProcessCommandBuffer::PerformIdleWork
,
538 gpu_thread_weak_ptr_
));
541 void InProcessCommandBuffer::Flush(int32 put_offset
) {
542 CheckSequencedThread();
543 if (last_state_
.error
!= gpu::error::kNoError
)
546 if (last_put_offset_
== put_offset
)
549 last_put_offset_
= put_offset
;
550 base::Closure task
= base::Bind(&InProcessCommandBuffer::FlushOnGpuThread
,
551 gpu_thread_weak_ptr_
,
556 void InProcessCommandBuffer::WaitForTokenInRange(int32 start
, int32 end
) {
557 CheckSequencedThread();
558 while (!InRange(start
, end
, GetLastToken()) &&
559 last_state_
.error
== gpu::error::kNoError
)
563 void InProcessCommandBuffer::WaitForGetOffsetInRange(int32 start
, int32 end
) {
564 CheckSequencedThread();
567 while (!InRange(start
, end
, last_state_
.get_offset
) &&
568 last_state_
.error
== gpu::error::kNoError
) {
574 void InProcessCommandBuffer::SetGetBuffer(int32 shm_id
) {
575 CheckSequencedThread();
576 if (last_state_
.error
!= gpu::error::kNoError
)
580 base::AutoLock
lock(command_buffer_lock_
);
581 command_buffer_
->SetGetBuffer(shm_id
);
582 last_put_offset_
= 0;
585 base::AutoLock
lock(state_after_last_flush_lock_
);
586 state_after_last_flush_
= command_buffer_
->GetLastState();
590 scoped_refptr
<Buffer
> InProcessCommandBuffer::CreateTransferBuffer(size_t size
,
592 CheckSequencedThread();
593 base::AutoLock
lock(command_buffer_lock_
);
594 return command_buffer_
->CreateTransferBuffer(size
, id
);
597 void InProcessCommandBuffer::DestroyTransferBuffer(int32 id
) {
598 CheckSequencedThread();
600 base::Bind(&InProcessCommandBuffer::DestroyTransferBufferOnGpuThread
,
601 base::Unretained(this),
607 void InProcessCommandBuffer::DestroyTransferBufferOnGpuThread(int32 id
) {
608 base::AutoLock
lock(command_buffer_lock_
);
609 command_buffer_
->DestroyTransferBuffer(id
);
612 gpu::Capabilities
InProcessCommandBuffer::GetCapabilities() {
613 return capabilities_
;
616 gfx::GpuMemoryBuffer
* InProcessCommandBuffer::CreateGpuMemoryBuffer(
619 unsigned internalformat
,
622 CheckSequencedThread();
626 scoped_ptr
<gfx::GpuMemoryBuffer
> buffer
=
627 g_gpu_memory_buffer_factory
->AllocateGpuMemoryBuffer(
628 width
, height
, internalformat
, usage
);
632 static int32 next_id
= 1;
633 int32 new_id
= next_id
++;
636 base::Bind(&InProcessCommandBuffer::RegisterGpuMemoryBufferOnGpuThread
,
637 base::Unretained(this),
647 DCHECK(gpu_memory_buffers_
.find(new_id
) == gpu_memory_buffers_
.end());
648 return gpu_memory_buffers_
.add(new_id
, buffer
.Pass()).first
->second
;
651 void InProcessCommandBuffer::RegisterGpuMemoryBufferOnGpuThread(
653 const gfx::GpuMemoryBufferHandle
& handle
,
656 unsigned internalformat
) {
657 scoped_refptr
<gfx::GLImage
> image
=
658 g_gpu_memory_buffer_factory
->CreateImageForGpuMemoryBuffer(
659 handle
, gfx::Size(width
, height
), internalformat
);
663 // For Android specific workaround.
664 gles2::ContextGroup
* context_group
= decoder_
->GetContextGroup();
665 if (context_group
->feature_info()->workarounds().release_image_after_use
)
666 image
->SetReleaseAfterUse();
669 gpu::gles2::ImageManager
* image_manager
= decoder_
->GetImageManager();
670 DCHECK(image_manager
);
671 image_manager
->AddImage(image
.get(), id
);
675 void InProcessCommandBuffer::DestroyGpuMemoryBuffer(int32 id
) {
676 CheckSequencedThread();
679 base::Bind(&InProcessCommandBuffer::UnregisterGpuMemoryBufferOnGpuThread
,
680 base::Unretained(this),
685 gpu_memory_buffers_
.erase(id
);
688 void InProcessCommandBuffer::UnregisterGpuMemoryBufferOnGpuThread(int32 id
) {
690 gpu::gles2::ImageManager
* image_manager
= decoder_
->GetImageManager();
691 DCHECK(image_manager
);
692 image_manager
->RemoveImage(id
);
696 uint32
InProcessCommandBuffer::InsertSyncPoint() {
697 uint32 sync_point
= g_sync_point_manager
.Get().GenerateSyncPoint();
698 QueueTask(base::Bind(&InProcessCommandBuffer::RetireSyncPointOnGpuThread
,
699 base::Unretained(this),
704 uint32
InProcessCommandBuffer::InsertFutureSyncPoint() {
705 return g_sync_point_manager
.Get().GenerateSyncPoint();
708 void InProcessCommandBuffer::RetireSyncPoint(uint32 sync_point
) {
709 QueueTask(base::Bind(&InProcessCommandBuffer::RetireSyncPointOnGpuThread
,
710 base::Unretained(this),
714 void InProcessCommandBuffer::RetireSyncPointOnGpuThread(uint32 sync_point
) {
715 gles2::MailboxManager
* mailbox_manager
=
716 decoder_
->GetContextGroup()->mailbox_manager();
717 if (mailbox_manager
->UsesSync()) {
718 bool make_current_success
= false;
720 base::AutoLock
lock(command_buffer_lock_
);
721 make_current_success
= MakeCurrent();
723 if (make_current_success
)
724 mailbox_manager
->PushTextureUpdates();
726 g_sync_point_manager
.Get().RetireSyncPoint(sync_point
);
729 void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point
,
730 const base::Closure
& callback
) {
731 CheckSequencedThread();
732 QueueTask(base::Bind(&InProcessCommandBuffer::SignalSyncPointOnGpuThread
,
733 base::Unretained(this),
735 WrapCallback(callback
)));
738 void InProcessCommandBuffer::SignalSyncPointOnGpuThread(
740 const base::Closure
& callback
) {
741 if (g_sync_point_manager
.Get().IsSyncPointPassed(sync_point
)) {
744 service_
->ScheduleIdleWork(
745 base::Bind(&InProcessCommandBuffer::SignalSyncPointOnGpuThread
,
746 gpu_thread_weak_ptr_
,
752 void InProcessCommandBuffer::SignalQuery(unsigned query_id
,
753 const base::Closure
& callback
) {
754 CheckSequencedThread();
755 QueueTask(base::Bind(&InProcessCommandBuffer::SignalQueryOnGpuThread
,
756 base::Unretained(this),
758 WrapCallback(callback
)));
761 void InProcessCommandBuffer::SignalQueryOnGpuThread(
763 const base::Closure
& callback
) {
764 gles2::QueryManager
* query_manager_
= decoder_
->GetQueryManager();
765 DCHECK(query_manager_
);
767 gles2::QueryManager::Query
* query
= query_manager_
->GetQuery(query_id
);
771 query
->AddCallback(callback
);
774 void InProcessCommandBuffer::SetSurfaceVisible(bool visible
) {}
776 void InProcessCommandBuffer::Echo(const base::Closure
& callback
) {
777 QueueTask(WrapCallback(callback
));
780 uint32
InProcessCommandBuffer::CreateStreamTexture(uint32 texture_id
) {
781 base::WaitableEvent
completion(true, false);
782 uint32 stream_id
= 0;
783 base::Callback
<uint32(void)> task
=
784 base::Bind(&InProcessCommandBuffer::CreateStreamTextureOnGpuThread
,
785 base::Unretained(this),
788 base::Bind(&RunTaskWithResult
<uint32
>, task
, &stream_id
, &completion
));
793 uint32
InProcessCommandBuffer::CreateStreamTextureOnGpuThread(
794 uint32 client_texture_id
) {
795 #if defined(OS_ANDROID)
796 return stream_texture_manager_
->CreateStreamTexture(
797 client_texture_id
, decoder_
->GetContextGroup()->texture_manager());
803 gpu::error::Error
InProcessCommandBuffer::GetLastError() {
804 CheckSequencedThread();
805 return last_state_
.error
;
808 bool InProcessCommandBuffer::Initialize() {
815 void PostCallback(const scoped_refptr
<base::MessageLoopProxy
>& loop
,
816 const base::Closure
& callback
) {
817 if (!loop
->BelongsToCurrentThread()) {
818 loop
->PostTask(FROM_HERE
, callback
);
824 void RunOnTargetThread(scoped_ptr
<base::Closure
> callback
) {
825 DCHECK(callback
.get());
829 } // anonymous namespace
831 base::Closure
InProcessCommandBuffer::WrapCallback(
832 const base::Closure
& callback
) {
833 // Make sure the callback gets deleted on the target thread by passing
835 scoped_ptr
<base::Closure
> scoped_callback(new base::Closure(callback
));
836 base::Closure callback_on_client_thread
=
837 base::Bind(&RunOnTargetThread
, base::Passed(&scoped_callback
));
838 base::Closure wrapped_callback
=
839 base::Bind(&PostCallback
, base::MessageLoopProxy::current(),
840 callback_on_client_thread
);
841 return wrapped_callback
;
844 #if defined(OS_ANDROID)
845 scoped_refptr
<gfx::SurfaceTexture
>
846 InProcessCommandBuffer::GetSurfaceTexture(uint32 stream_id
) {
847 DCHECK(stream_texture_manager_
);
848 return stream_texture_manager_
->GetSurfaceTexture(stream_id
);
853 void InProcessCommandBuffer::SetGpuMemoryBufferFactory(
854 InProcessGpuMemoryBufferFactory
* factory
) {
855 g_gpu_memory_buffer_factory
= factory
;