1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "gpu/command_buffer/service/in_process_command_buffer.h"
11 #include "base/bind.h"
12 #include "base/bind_helpers.h"
13 #include "base/command_line.h"
14 #include "base/lazy_instance.h"
15 #include "base/logging.h"
16 #include "base/memory/weak_ptr.h"
17 #include "base/message_loop/message_loop_proxy.h"
18 #include "base/sequence_checker.h"
19 #include "base/synchronization/condition_variable.h"
20 #include "base/threading/thread.h"
21 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
22 #include "gpu/command_buffer/common/value_state.h"
23 #include "gpu/command_buffer/service/command_buffer_service.h"
24 #include "gpu/command_buffer/service/context_group.h"
25 #include "gpu/command_buffer/service/gl_context_virtual.h"
26 #include "gpu/command_buffer/service/gpu_scheduler.h"
27 #include "gpu/command_buffer/service/gpu_switches.h"
28 #include "gpu/command_buffer/service/image_factory.h"
29 #include "gpu/command_buffer/service/image_manager.h"
30 #include "gpu/command_buffer/service/mailbox_manager_impl.h"
31 #include "gpu/command_buffer/service/mailbox_manager_sync.h"
32 #include "gpu/command_buffer/service/memory_tracking.h"
33 #include "gpu/command_buffer/service/query_manager.h"
34 #include "gpu/command_buffer/service/sync_point_manager.h"
35 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
36 #include "gpu/command_buffer/service/valuebuffer_manager.h"
37 #include "ui/gfx/geometry/size.h"
38 #include "ui/gl/gl_context.h"
39 #include "ui/gl/gl_image.h"
40 #include "ui/gl/gl_share_group.h"
42 #if defined(OS_ANDROID)
43 #include "gpu/command_buffer/service/stream_texture_manager_in_process_android.h"
44 #include "ui/gl/android/surface_texture.h"
49 #include "base/process/process_handle.h"
57 static void RunTaskWithResult(base::Callback
<T(void)> task
,
59 base::WaitableEvent
* completion
) {
64 class GpuInProcessThread
65 : public base::Thread
,
66 public InProcessCommandBuffer::Service
,
67 public base::RefCountedThreadSafe
<GpuInProcessThread
> {
71 void AddRef() const override
{
72 base::RefCountedThreadSafe
<GpuInProcessThread
>::AddRef();
74 void Release() const override
{
75 base::RefCountedThreadSafe
<GpuInProcessThread
>::Release();
78 void ScheduleTask(const base::Closure
& task
) override
;
79 void ScheduleIdleWork(const base::Closure
& callback
) override
;
80 bool UseVirtualizedGLContexts() override
{ return false; }
81 scoped_refptr
<gles2::ShaderTranslatorCache
> shader_translator_cache()
85 ~GpuInProcessThread() override
;
86 friend class base::RefCountedThreadSafe
<GpuInProcessThread
>;
88 scoped_refptr
<gpu::gles2::ShaderTranslatorCache
> shader_translator_cache_
;
89 DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread
);
92 GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") {
96 GpuInProcessThread::~GpuInProcessThread() {
100 void GpuInProcessThread::ScheduleTask(const base::Closure
& task
) {
101 message_loop()->PostTask(FROM_HERE
, task
);
104 void GpuInProcessThread::ScheduleIdleWork(const base::Closure
& callback
) {
105 // Match delay with GpuCommandBufferStub.
106 message_loop()->PostDelayedTask(
107 FROM_HERE
, callback
, base::TimeDelta::FromMilliseconds(2));
110 scoped_refptr
<gles2::ShaderTranslatorCache
>
111 GpuInProcessThread::shader_translator_cache() {
112 if (!shader_translator_cache_
.get())
113 shader_translator_cache_
= new gpu::gles2::ShaderTranslatorCache
;
114 return shader_translator_cache_
;
117 struct GpuInProcessThreadHolder
{
118 GpuInProcessThreadHolder() : gpu_thread(new GpuInProcessThread
) {}
119 scoped_refptr
<InProcessCommandBuffer::Service
> gpu_thread
;
122 base::LazyInstance
<GpuInProcessThreadHolder
> g_default_service
=
123 LAZY_INSTANCE_INITIALIZER
;
127 explicit ScopedEvent(base::WaitableEvent
* event
) : event_(event
) {}
128 ~ScopedEvent() { event_
->Signal(); }
131 base::WaitableEvent
* event_
;
134 // This wrapper adds the WaitSyncPoint which allows waiting on a sync point
135 // on the service thread, implemented using a condition variable.
136 class SyncPointManagerWrapper
{
138 SyncPointManagerWrapper();
140 uint32
GenerateSyncPoint();
141 void RetireSyncPoint(uint32 sync_point
);
142 void AddSyncPointCallback(uint32 sync_point
, const base::Closure
& callback
);
144 void WaitSyncPoint(uint32 sync_point
);
147 void OnSyncPointRetired();
149 const scoped_refptr
<SyncPointManager
> manager_
;
150 base::Lock retire_lock_
;
151 base::ConditionVariable retire_cond_var_
;
153 DISALLOW_COPY_AND_ASSIGN(SyncPointManagerWrapper
);
156 SyncPointManagerWrapper::SyncPointManagerWrapper()
157 : manager_(SyncPointManager::Create(true)),
158 retire_cond_var_(&retire_lock_
) {
161 uint32
SyncPointManagerWrapper::GenerateSyncPoint() {
162 uint32 sync_point
= manager_
->GenerateSyncPoint();
163 manager_
->AddSyncPointCallback(
164 sync_point
, base::Bind(&SyncPointManagerWrapper::OnSyncPointRetired
,
165 base::Unretained(this)));
169 void SyncPointManagerWrapper::RetireSyncPoint(uint32 sync_point
) {
170 manager_
->RetireSyncPoint(sync_point
);
173 void SyncPointManagerWrapper::AddSyncPointCallback(
175 const base::Closure
& callback
) {
176 manager_
->AddSyncPointCallback(sync_point
, callback
);
179 void SyncPointManagerWrapper::WaitSyncPoint(uint32 sync_point
) {
180 base::AutoLock
lock(retire_lock_
);
181 while (!manager_
->IsSyncPointRetired(sync_point
)) {
182 retire_cond_var_
.Wait();
186 void SyncPointManagerWrapper::OnSyncPointRetired() {
187 base::AutoLock
lock(retire_lock_
);
188 retire_cond_var_
.Broadcast();
191 base::LazyInstance
<SyncPointManagerWrapper
> g_sync_point_manager
=
192 LAZY_INSTANCE_INITIALIZER
;
194 base::SharedMemoryHandle
ShareToGpuThread(
195 base::SharedMemoryHandle source_handle
) {
197 // Windows needs to explicitly duplicate the handle to current process.
198 base::SharedMemoryHandle target_handle
;
199 if (!DuplicateHandle(GetCurrentProcess(),
203 FILE_GENERIC_READ
| FILE_GENERIC_WRITE
,
206 return base::SharedMemory::NULLHandle();
209 return target_handle
;
211 int duped_handle
= HANDLE_EINTR(dup(source_handle
.fd
));
212 if (duped_handle
< 0)
213 return base::SharedMemory::NULLHandle();
215 return base::FileDescriptor(duped_handle
, true);
219 gfx::GpuMemoryBufferHandle
ShareGpuMemoryBufferToGpuThread(
220 const gfx::GpuMemoryBufferHandle
& source_handle
,
221 bool* requires_sync_point
) {
222 switch (source_handle
.type
) {
223 case gfx::SHARED_MEMORY_BUFFER
: {
224 gfx::GpuMemoryBufferHandle handle
;
225 handle
.type
= gfx::SHARED_MEMORY_BUFFER
;
226 handle
.handle
= ShareToGpuThread(source_handle
.handle
);
227 *requires_sync_point
= false;
230 case gfx::IO_SURFACE_BUFFER
:
231 case gfx::SURFACE_TEXTURE_BUFFER
:
232 case gfx::OZONE_NATIVE_BUFFER
:
233 *requires_sync_point
= true;
234 return source_handle
;
237 return gfx::GpuMemoryBufferHandle();
241 } // anonyous namespace
243 InProcessCommandBuffer::Service::Service() {}
245 InProcessCommandBuffer::Service::~Service() {}
247 scoped_refptr
<gfx::GLShareGroup
>
248 InProcessCommandBuffer::Service::share_group() {
249 if (!share_group_
.get())
250 share_group_
= new gfx::GLShareGroup
;
254 scoped_refptr
<gles2::MailboxManager
>
255 InProcessCommandBuffer::Service::mailbox_manager() {
256 if (!mailbox_manager_
.get()) {
257 if (base::CommandLine::ForCurrentProcess()->HasSwitch(
258 switches::kEnableThreadedTextureMailboxes
)) {
259 mailbox_manager_
= new gles2::MailboxManagerSync();
261 mailbox_manager_
= new gles2::MailboxManagerImpl();
264 return mailbox_manager_
;
267 scoped_refptr
<gles2::SubscriptionRefSet
>
268 InProcessCommandBuffer::Service::subscription_ref_set() {
269 if (!subscription_ref_set_
.get()) {
270 subscription_ref_set_
= new gles2::SubscriptionRefSet();
272 return subscription_ref_set_
;
275 scoped_refptr
<ValueStateMap
>
276 InProcessCommandBuffer::Service::pending_valuebuffer_state() {
277 if (!pending_valuebuffer_state_
.get()) {
278 pending_valuebuffer_state_
= new ValueStateMap();
280 return pending_valuebuffer_state_
;
283 InProcessCommandBuffer::InProcessCommandBuffer(
284 const scoped_refptr
<Service
>& service
)
285 : context_lost_(false),
286 idle_work_pending_(false),
287 image_factory_(nullptr),
288 last_put_offset_(-1),
289 gpu_memory_buffer_manager_(nullptr),
290 flush_event_(false, false),
291 service_(service
.get() ? service
: g_default_service
.Get().gpu_thread
),
292 gpu_thread_weak_ptr_factory_(this) {
293 DCHECK(service_
.get());
294 next_image_id_
.GetNext();
297 InProcessCommandBuffer::~InProcessCommandBuffer() {
301 void InProcessCommandBuffer::OnResizeView(gfx::Size size
, float scale_factor
) {
302 CheckSequencedThread();
303 DCHECK(!surface_
->IsOffscreen());
304 surface_
->Resize(size
);
307 bool InProcessCommandBuffer::MakeCurrent() {
308 CheckSequencedThread();
309 command_buffer_lock_
.AssertAcquired();
311 if (!context_lost_
&& decoder_
->MakeCurrent())
313 DLOG(ERROR
) << "Context lost because MakeCurrent failed.";
314 command_buffer_
->SetContextLostReason(decoder_
->GetContextLostReason());
315 command_buffer_
->SetParseError(gpu::error::kLostContext
);
319 void InProcessCommandBuffer::PumpCommands() {
320 CheckSequencedThread();
321 command_buffer_lock_
.AssertAcquired();
326 gpu_scheduler_
->PutChanged();
329 bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id
) {
330 CheckSequencedThread();
331 command_buffer_lock_
.AssertAcquired();
332 command_buffer_
->SetGetBuffer(transfer_buffer_id
);
336 bool InProcessCommandBuffer::Initialize(
337 scoped_refptr
<gfx::GLSurface
> surface
,
339 gfx::AcceleratedWidget window
,
340 const gfx::Size
& size
,
341 const std::vector
<int32
>& attribs
,
342 gfx::GpuPreference gpu_preference
,
343 const base::Closure
& context_lost_callback
,
344 InProcessCommandBuffer
* share_group
,
345 GpuMemoryBufferManager
* gpu_memory_buffer_manager
,
346 ImageFactory
* image_factory
) {
347 DCHECK(!share_group
|| service_
.get() == share_group
->service_
.get());
348 context_lost_callback_
= WrapCallback(context_lost_callback
);
351 // GPU thread must be the same as client thread due to GLSurface not being
353 sequence_checker_
.reset(new base::SequenceChecker
);
357 gpu::Capabilities capabilities
;
358 InitializeOnGpuThreadParams
params(is_offscreen
,
367 base::Callback
<bool(void)> init_task
=
368 base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread
,
369 base::Unretained(this),
372 base::WaitableEvent
completion(true, false);
375 base::Bind(&RunTaskWithResult
<bool>, init_task
, &result
, &completion
));
378 gpu_memory_buffer_manager_
= gpu_memory_buffer_manager
;
381 capabilities_
= capabilities
;
382 capabilities_
.image
= capabilities_
.image
&& gpu_memory_buffer_manager_
;
388 bool InProcessCommandBuffer::InitializeOnGpuThread(
389 const InitializeOnGpuThreadParams
& params
) {
390 CheckSequencedThread();
391 gpu_thread_weak_ptr_
= gpu_thread_weak_ptr_factory_
.GetWeakPtr();
393 DCHECK(params
.size
.width() >= 0 && params
.size
.height() >= 0);
395 TransferBufferManager
* manager
= new TransferBufferManager();
396 transfer_buffer_manager_
.reset(manager
);
397 manager
->Initialize();
399 scoped_ptr
<CommandBufferService
> command_buffer(
400 new CommandBufferService(transfer_buffer_manager_
.get()));
401 command_buffer
->SetPutOffsetChangeCallback(base::Bind(
402 &InProcessCommandBuffer::PumpCommands
, gpu_thread_weak_ptr_
));
403 command_buffer
->SetParseErrorCallback(base::Bind(
404 &InProcessCommandBuffer::OnContextLost
, gpu_thread_weak_ptr_
));
406 if (!command_buffer
->Initialize()) {
407 LOG(ERROR
) << "Could not initialize command buffer.";
408 DestroyOnGpuThread();
412 gl_share_group_
= params
.context_group
413 ? params
.context_group
->gl_share_group_
414 : service_
->share_group();
416 #if defined(OS_ANDROID)
417 stream_texture_manager_
.reset(new StreamTextureManagerInProcess
);
420 bool bind_generates_resource
= false;
421 decoder_
.reset(gles2::GLES2Decoder::Create(
423 ? params
.context_group
->decoder_
->GetContextGroup()
424 : new gles2::ContextGroup(service_
->mailbox_manager(),
426 service_
->shader_translator_cache(),
428 service_
->subscription_ref_set(),
429 service_
->pending_valuebuffer_state(),
430 bind_generates_resource
)));
432 gpu_scheduler_
.reset(
433 new GpuScheduler(command_buffer
.get(), decoder_
.get(), decoder_
.get()));
434 command_buffer
->SetGetBufferChangeCallback(base::Bind(
435 &GpuScheduler::SetGetBuffer
, base::Unretained(gpu_scheduler_
.get())));
436 command_buffer_
= command_buffer
.Pass();
438 decoder_
->set_engine(gpu_scheduler_
.get());
440 if (!surface_
.get()) {
441 if (params
.is_offscreen
)
442 surface_
= gfx::GLSurface::CreateOffscreenGLSurface(params
.size
);
444 surface_
= gfx::GLSurface::CreateViewGLSurface(params
.window
);
447 if (!surface_
.get()) {
448 LOG(ERROR
) << "Could not create GLSurface.";
449 DestroyOnGpuThread();
453 if (service_
->UseVirtualizedGLContexts() ||
454 decoder_
->GetContextGroup()
457 .use_virtualized_gl_contexts
) {
458 context_
= gl_share_group_
->GetSharedContext();
459 if (!context_
.get()) {
460 context_
= gfx::GLContext::CreateGLContext(
461 gl_share_group_
.get(), surface_
.get(), params
.gpu_preference
);
462 gl_share_group_
->SetSharedContext(context_
.get());
465 context_
= new GLContextVirtual(
466 gl_share_group_
.get(), context_
.get(), decoder_
->AsWeakPtr());
467 if (context_
->Initialize(surface_
.get(), params
.gpu_preference
)) {
468 VLOG(1) << "Created virtual GL context.";
473 context_
= gfx::GLContext::CreateGLContext(
474 gl_share_group_
.get(), surface_
.get(), params
.gpu_preference
);
477 if (!context_
.get()) {
478 LOG(ERROR
) << "Could not create GLContext.";
479 DestroyOnGpuThread();
483 if (!context_
->MakeCurrent(surface_
.get())) {
484 LOG(ERROR
) << "Could not make context current.";
485 DestroyOnGpuThread();
489 gles2::DisallowedFeatures disallowed_features
;
490 disallowed_features
.gpu_memory_manager
= true;
491 if (!decoder_
->Initialize(surface_
,
497 LOG(ERROR
) << "Could not initialize decoder.";
498 DestroyOnGpuThread();
501 *params
.capabilities
= decoder_
->GetCapabilities();
503 if (!params
.is_offscreen
) {
504 decoder_
->SetResizeCallback(base::Bind(
505 &InProcessCommandBuffer::OnResizeView
, gpu_thread_weak_ptr_
));
507 decoder_
->SetWaitSyncPointCallback(
508 base::Bind(&InProcessCommandBuffer::WaitSyncPointOnGpuThread
,
509 base::Unretained(this)));
511 image_factory_
= params
.image_factory
;
512 params
.capabilities
->image
= params
.capabilities
->image
&& image_factory_
;
517 void InProcessCommandBuffer::Destroy() {
518 CheckSequencedThread();
520 base::WaitableEvent
completion(true, false);
522 base::Callback
<bool(void)> destroy_task
= base::Bind(
523 &InProcessCommandBuffer::DestroyOnGpuThread
, base::Unretained(this));
525 base::Bind(&RunTaskWithResult
<bool>, destroy_task
, &result
, &completion
));
529 bool InProcessCommandBuffer::DestroyOnGpuThread() {
530 CheckSequencedThread();
531 gpu_thread_weak_ptr_factory_
.InvalidateWeakPtrs();
532 command_buffer_
.reset();
533 // Clean up GL resources if possible.
534 bool have_context
= context_
.get() && context_
->MakeCurrent(surface_
.get());
536 decoder_
->Destroy(have_context
);
541 gl_share_group_
= NULL
;
542 #if defined(OS_ANDROID)
543 stream_texture_manager_
.reset();
549 void InProcessCommandBuffer::CheckSequencedThread() {
550 DCHECK(!sequence_checker_
||
551 sequence_checker_
->CalledOnValidSequencedThread());
554 void InProcessCommandBuffer::OnContextLost() {
555 CheckSequencedThread();
556 if (!context_lost_callback_
.is_null()) {
557 context_lost_callback_
.Run();
558 context_lost_callback_
.Reset();
561 context_lost_
= true;
564 CommandBuffer::State
InProcessCommandBuffer::GetStateFast() {
565 CheckSequencedThread();
566 base::AutoLock
lock(state_after_last_flush_lock_
);
567 if (state_after_last_flush_
.generation
- last_state_
.generation
< 0x80000000U
)
568 last_state_
= state_after_last_flush_
;
572 CommandBuffer::State
InProcessCommandBuffer::GetLastState() {
573 CheckSequencedThread();
577 int32
InProcessCommandBuffer::GetLastToken() {
578 CheckSequencedThread();
580 return last_state_
.token
;
583 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset
) {
584 CheckSequencedThread();
585 ScopedEvent
handle_flush(&flush_event_
);
586 base::AutoLock
lock(command_buffer_lock_
);
587 command_buffer_
->Flush(put_offset
);
589 // Update state before signaling the flush event.
590 base::AutoLock
lock(state_after_last_flush_lock_
);
591 state_after_last_flush_
= command_buffer_
->GetLastState();
593 DCHECK((!error::IsError(state_after_last_flush_
.error
) && !context_lost_
) ||
594 (error::IsError(state_after_last_flush_
.error
) && context_lost_
));
596 // If we've processed all pending commands but still have pending queries,
597 // pump idle work until the query is passed.
598 if (put_offset
== state_after_last_flush_
.get_offset
&&
599 gpu_scheduler_
->HasMoreWork()) {
600 ScheduleIdleWorkOnGpuThread();
604 void InProcessCommandBuffer::PerformIdleWork() {
605 CheckSequencedThread();
606 idle_work_pending_
= false;
607 base::AutoLock
lock(command_buffer_lock_
);
608 if (MakeCurrent() && gpu_scheduler_
->HasMoreWork()) {
609 gpu_scheduler_
->PerformIdleWork();
610 ScheduleIdleWorkOnGpuThread();
614 void InProcessCommandBuffer::ScheduleIdleWorkOnGpuThread() {
615 CheckSequencedThread();
616 if (idle_work_pending_
)
618 idle_work_pending_
= true;
619 service_
->ScheduleIdleWork(
620 base::Bind(&InProcessCommandBuffer::PerformIdleWork
,
621 gpu_thread_weak_ptr_
));
624 void InProcessCommandBuffer::Flush(int32 put_offset
) {
625 CheckSequencedThread();
626 if (last_state_
.error
!= gpu::error::kNoError
)
629 if (last_put_offset_
== put_offset
)
632 last_put_offset_
= put_offset
;
633 base::Closure task
= base::Bind(&InProcessCommandBuffer::FlushOnGpuThread
,
634 gpu_thread_weak_ptr_
,
639 void InProcessCommandBuffer::OrderingBarrier(int32 put_offset
) {
643 void InProcessCommandBuffer::WaitForTokenInRange(int32 start
, int32 end
) {
644 CheckSequencedThread();
645 while (!InRange(start
, end
, GetLastToken()) &&
646 last_state_
.error
== gpu::error::kNoError
)
650 void InProcessCommandBuffer::WaitForGetOffsetInRange(int32 start
, int32 end
) {
651 CheckSequencedThread();
654 while (!InRange(start
, end
, last_state_
.get_offset
) &&
655 last_state_
.error
== gpu::error::kNoError
) {
661 void InProcessCommandBuffer::SetGetBuffer(int32 shm_id
) {
662 CheckSequencedThread();
663 if (last_state_
.error
!= gpu::error::kNoError
)
667 base::AutoLock
lock(command_buffer_lock_
);
668 command_buffer_
->SetGetBuffer(shm_id
);
669 last_put_offset_
= 0;
672 base::AutoLock
lock(state_after_last_flush_lock_
);
673 state_after_last_flush_
= command_buffer_
->GetLastState();
677 scoped_refptr
<Buffer
> InProcessCommandBuffer::CreateTransferBuffer(size_t size
,
679 CheckSequencedThread();
680 base::AutoLock
lock(command_buffer_lock_
);
681 return command_buffer_
->CreateTransferBuffer(size
, id
);
684 void InProcessCommandBuffer::DestroyTransferBuffer(int32 id
) {
685 CheckSequencedThread();
687 base::Bind(&InProcessCommandBuffer::DestroyTransferBufferOnGpuThread
,
688 base::Unretained(this),
694 void InProcessCommandBuffer::DestroyTransferBufferOnGpuThread(int32 id
) {
695 base::AutoLock
lock(command_buffer_lock_
);
696 command_buffer_
->DestroyTransferBuffer(id
);
699 gpu::Capabilities
InProcessCommandBuffer::GetCapabilities() {
700 return capabilities_
;
703 int32
InProcessCommandBuffer::CreateImage(ClientBuffer buffer
,
706 unsigned internalformat
) {
707 CheckSequencedThread();
709 DCHECK(gpu_memory_buffer_manager_
);
710 gfx::GpuMemoryBuffer
* gpu_memory_buffer
=
711 gpu_memory_buffer_manager_
->GpuMemoryBufferFromClientBuffer(buffer
);
712 DCHECK(gpu_memory_buffer
);
714 int32 new_id
= next_image_id_
.GetNext();
716 DCHECK(gpu::ImageFactory::IsImageFormatCompatibleWithGpuMemoryBufferFormat(
717 internalformat
, gpu_memory_buffer
->GetFormat()));
719 // This handle is owned by the GPU thread and must be passed to it or it
720 // will leak. In otherwords, do not early out on error between here and the
721 // queuing of the CreateImage task below.
722 bool requires_sync_point
= false;
723 gfx::GpuMemoryBufferHandle handle
=
724 ShareGpuMemoryBufferToGpuThread(gpu_memory_buffer
->GetHandle(),
725 &requires_sync_point
);
727 QueueTask(base::Bind(&InProcessCommandBuffer::CreateImageOnGpuThread
,
728 base::Unretained(this),
731 gfx::Size(width
, height
),
732 gpu_memory_buffer
->GetFormat(),
735 if (requires_sync_point
) {
736 gpu_memory_buffer_manager_
->SetDestructionSyncPoint(gpu_memory_buffer
,
743 void InProcessCommandBuffer::CreateImageOnGpuThread(
745 const gfx::GpuMemoryBufferHandle
& handle
,
746 const gfx::Size
& size
,
747 gfx::GpuMemoryBuffer::Format format
,
748 uint32 internalformat
) {
752 gpu::gles2::ImageManager
* image_manager
= decoder_
->GetImageManager();
753 DCHECK(image_manager
);
754 if (image_manager
->LookupImage(id
)) {
755 LOG(ERROR
) << "Image already exists with same ID.";
759 // Note: this assumes that client ID is always 0.
760 const int kClientId
= 0;
762 DCHECK(image_factory_
);
763 scoped_refptr
<gfx::GLImage
> image
=
764 image_factory_
->CreateImageForGpuMemoryBuffer(
765 handle
, size
, format
, internalformat
, kClientId
);
769 image_manager
->AddImage(image
.get(), id
);
772 void InProcessCommandBuffer::DestroyImage(int32 id
) {
773 CheckSequencedThread();
775 QueueTask(base::Bind(&InProcessCommandBuffer::DestroyImageOnGpuThread
,
776 base::Unretained(this),
780 void InProcessCommandBuffer::DestroyImageOnGpuThread(int32 id
) {
784 gpu::gles2::ImageManager
* image_manager
= decoder_
->GetImageManager();
785 DCHECK(image_manager
);
786 if (!image_manager
->LookupImage(id
)) {
787 LOG(ERROR
) << "Image with ID doesn't exist.";
791 image_manager
->RemoveImage(id
);
794 int32
InProcessCommandBuffer::CreateGpuMemoryBufferImage(
797 unsigned internalformat
,
799 CheckSequencedThread();
801 DCHECK(gpu_memory_buffer_manager_
);
802 scoped_ptr
<gfx::GpuMemoryBuffer
> buffer(
803 gpu_memory_buffer_manager_
->AllocateGpuMemoryBuffer(
804 gfx::Size(width
, height
),
805 gpu::ImageFactory::ImageFormatToGpuMemoryBufferFormat(internalformat
),
806 gpu::ImageFactory::ImageUsageToGpuMemoryBufferUsage(usage
)));
810 return CreateImage(buffer
->AsClientBuffer(), width
, height
, internalformat
);
813 uint32
InProcessCommandBuffer::InsertSyncPoint() {
814 uint32 sync_point
= g_sync_point_manager
.Get().GenerateSyncPoint();
815 QueueTask(base::Bind(&InProcessCommandBuffer::RetireSyncPointOnGpuThread
,
816 base::Unretained(this),
821 uint32
InProcessCommandBuffer::InsertFutureSyncPoint() {
822 return g_sync_point_manager
.Get().GenerateSyncPoint();
825 void InProcessCommandBuffer::RetireSyncPoint(uint32 sync_point
) {
826 QueueTask(base::Bind(&InProcessCommandBuffer::RetireSyncPointOnGpuThread
,
827 base::Unretained(this),
831 void InProcessCommandBuffer::RetireSyncPointOnGpuThread(uint32 sync_point
) {
832 gles2::MailboxManager
* mailbox_manager
=
833 decoder_
->GetContextGroup()->mailbox_manager();
834 if (mailbox_manager
->UsesSync()) {
835 bool make_current_success
= false;
837 base::AutoLock
lock(command_buffer_lock_
);
838 make_current_success
= MakeCurrent();
840 if (make_current_success
)
841 mailbox_manager
->PushTextureUpdates(sync_point
);
843 g_sync_point_manager
.Get().RetireSyncPoint(sync_point
);
846 void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point
,
847 const base::Closure
& callback
) {
848 CheckSequencedThread();
849 QueueTask(base::Bind(&InProcessCommandBuffer::SignalSyncPointOnGpuThread
,
850 base::Unretained(this),
852 WrapCallback(callback
)));
855 bool InProcessCommandBuffer::WaitSyncPointOnGpuThread(unsigned sync_point
) {
856 g_sync_point_manager
.Get().WaitSyncPoint(sync_point
);
857 gles2::MailboxManager
* mailbox_manager
=
858 decoder_
->GetContextGroup()->mailbox_manager();
859 mailbox_manager
->PullTextureUpdates(sync_point
);
863 void InProcessCommandBuffer::SignalSyncPointOnGpuThread(
865 const base::Closure
& callback
) {
866 g_sync_point_manager
.Get().AddSyncPointCallback(sync_point
, callback
);
869 void InProcessCommandBuffer::SignalQuery(unsigned query_id
,
870 const base::Closure
& callback
) {
871 CheckSequencedThread();
872 QueueTask(base::Bind(&InProcessCommandBuffer::SignalQueryOnGpuThread
,
873 base::Unretained(this),
875 WrapCallback(callback
)));
878 void InProcessCommandBuffer::SignalQueryOnGpuThread(
880 const base::Closure
& callback
) {
881 gles2::QueryManager
* query_manager_
= decoder_
->GetQueryManager();
882 DCHECK(query_manager_
);
884 gles2::QueryManager::Query
* query
= query_manager_
->GetQuery(query_id
);
888 query
->AddCallback(callback
);
891 void InProcessCommandBuffer::SetSurfaceVisible(bool visible
) {}
893 uint32
InProcessCommandBuffer::CreateStreamTexture(uint32 texture_id
) {
894 base::WaitableEvent
completion(true, false);
895 uint32 stream_id
= 0;
896 base::Callback
<uint32(void)> task
=
897 base::Bind(&InProcessCommandBuffer::CreateStreamTextureOnGpuThread
,
898 base::Unretained(this),
901 base::Bind(&RunTaskWithResult
<uint32
>, task
, &stream_id
, &completion
));
906 void InProcessCommandBuffer::SetLock(base::Lock
*) {
909 uint32
InProcessCommandBuffer::CreateStreamTextureOnGpuThread(
910 uint32 client_texture_id
) {
911 #if defined(OS_ANDROID)
912 return stream_texture_manager_
->CreateStreamTexture(
913 client_texture_id
, decoder_
->GetContextGroup()->texture_manager());
919 gpu::error::Error
InProcessCommandBuffer::GetLastError() {
920 CheckSequencedThread();
921 return last_state_
.error
;
924 bool InProcessCommandBuffer::Initialize() {
931 void PostCallback(const scoped_refptr
<base::MessageLoopProxy
>& loop
,
932 const base::Closure
& callback
) {
933 // The loop.get() check is to support using InProcessCommandBuffer on a thread
934 // without a message loop.
935 if (loop
.get() && !loop
->BelongsToCurrentThread()) {
936 loop
->PostTask(FROM_HERE
, callback
);
942 void RunOnTargetThread(scoped_ptr
<base::Closure
> callback
) {
943 DCHECK(callback
.get());
947 } // anonymous namespace
949 base::Closure
InProcessCommandBuffer::WrapCallback(
950 const base::Closure
& callback
) {
951 // Make sure the callback gets deleted on the target thread by passing
953 scoped_ptr
<base::Closure
> scoped_callback(new base::Closure(callback
));
954 base::Closure callback_on_client_thread
=
955 base::Bind(&RunOnTargetThread
, base::Passed(&scoped_callback
));
956 base::Closure wrapped_callback
=
957 base::Bind(&PostCallback
, base::MessageLoopProxy::current(),
958 callback_on_client_thread
);
959 return wrapped_callback
;
962 #if defined(OS_ANDROID)
963 scoped_refptr
<gfx::SurfaceTexture
>
964 InProcessCommandBuffer::GetSurfaceTexture(uint32 stream_id
) {
965 DCHECK(stream_texture_manager_
);
966 return stream_texture_manager_
->GetSurfaceTexture(stream_id
);