1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "gpu/command_buffer/service/in_process_command_buffer.h"
11 #include "base/bind.h"
12 #include "base/bind_helpers.h"
13 #include "base/command_line.h"
14 #include "base/lazy_instance.h"
15 #include "base/location.h"
16 #include "base/logging.h"
17 #include "base/memory/weak_ptr.h"
18 #include "base/sequence_checker.h"
19 #include "base/single_thread_task_runner.h"
20 #include "base/synchronization/condition_variable.h"
21 #include "base/thread_task_runner_handle.h"
22 #include "base/threading/thread.h"
23 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
24 #include "gpu/command_buffer/common/value_state.h"
25 #include "gpu/command_buffer/service/command_buffer_service.h"
26 #include "gpu/command_buffer/service/context_group.h"
27 #include "gpu/command_buffer/service/gl_context_virtual.h"
28 #include "gpu/command_buffer/service/gpu_scheduler.h"
29 #include "gpu/command_buffer/service/gpu_switches.h"
30 #include "gpu/command_buffer/service/image_factory.h"
31 #include "gpu/command_buffer/service/image_manager.h"
32 #include "gpu/command_buffer/service/mailbox_manager_impl.h"
33 #include "gpu/command_buffer/service/mailbox_manager_sync.h"
34 #include "gpu/command_buffer/service/memory_tracking.h"
35 #include "gpu/command_buffer/service/query_manager.h"
36 #include "gpu/command_buffer/service/sync_point_manager.h"
37 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
38 #include "gpu/command_buffer/service/valuebuffer_manager.h"
39 #include "ui/gfx/geometry/size.h"
40 #include "ui/gl/gl_context.h"
41 #include "ui/gl/gl_image.h"
42 #include "ui/gl/gl_image_shared_memory.h"
43 #include "ui/gl/gl_share_group.h"
45 #if defined(OS_ANDROID)
46 #include "gpu/command_buffer/service/stream_texture_manager_in_process_android.h"
47 #include "ui/gl/android/surface_texture.h"
52 #include "base/process/process_handle.h"
60 static void RunTaskWithResult(base::Callback
<T(void)> task
,
62 base::WaitableEvent
* completion
) {
67 class GpuInProcessThread
68 : public base::Thread
,
69 public InProcessCommandBuffer::Service
,
70 public base::RefCountedThreadSafe
<GpuInProcessThread
> {
74 void AddRef() const override
{
75 base::RefCountedThreadSafe
<GpuInProcessThread
>::AddRef();
77 void Release() const override
{
78 base::RefCountedThreadSafe
<GpuInProcessThread
>::Release();
81 void ScheduleTask(const base::Closure
& task
) override
;
82 void ScheduleIdleWork(const base::Closure
& callback
) override
;
83 bool UseVirtualizedGLContexts() override
{ return false; }
84 scoped_refptr
<gles2::ShaderTranslatorCache
> shader_translator_cache()
88 ~GpuInProcessThread() override
;
89 friend class base::RefCountedThreadSafe
<GpuInProcessThread
>;
91 scoped_refptr
<gpu::gles2::ShaderTranslatorCache
> shader_translator_cache_
;
92 DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread
);
95 GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") {
99 GpuInProcessThread::~GpuInProcessThread() {
103 void GpuInProcessThread::ScheduleTask(const base::Closure
& task
) {
104 task_runner()->PostTask(FROM_HERE
, task
);
107 void GpuInProcessThread::ScheduleIdleWork(const base::Closure
& callback
) {
108 // Match delay with GpuCommandBufferStub.
109 task_runner()->PostDelayedTask(FROM_HERE
, callback
,
110 base::TimeDelta::FromMilliseconds(2));
113 scoped_refptr
<gles2::ShaderTranslatorCache
>
114 GpuInProcessThread::shader_translator_cache() {
115 if (!shader_translator_cache_
.get())
116 shader_translator_cache_
= new gpu::gles2::ShaderTranslatorCache
;
117 return shader_translator_cache_
;
120 struct GpuInProcessThreadHolder
{
121 GpuInProcessThreadHolder() : gpu_thread(new GpuInProcessThread
) {}
122 scoped_refptr
<InProcessCommandBuffer::Service
> gpu_thread
;
125 base::LazyInstance
<GpuInProcessThreadHolder
> g_default_service
=
126 LAZY_INSTANCE_INITIALIZER
;
130 explicit ScopedEvent(base::WaitableEvent
* event
) : event_(event
) {}
131 ~ScopedEvent() { event_
->Signal(); }
134 base::WaitableEvent
* event_
;
137 // This wrapper adds the WaitSyncPoint which allows waiting on a sync point
138 // on the service thread, implemented using a condition variable.
139 class SyncPointManagerWrapper
{
141 SyncPointManagerWrapper();
143 uint32
GenerateSyncPoint();
144 void RetireSyncPoint(uint32 sync_point
);
145 void AddSyncPointCallback(uint32 sync_point
, const base::Closure
& callback
);
147 void WaitSyncPoint(uint32 sync_point
);
150 void OnSyncPointRetired();
152 const scoped_refptr
<SyncPointManager
> manager_
;
153 base::Lock retire_lock_
;
154 base::ConditionVariable retire_cond_var_
;
156 DISALLOW_COPY_AND_ASSIGN(SyncPointManagerWrapper
);
159 SyncPointManagerWrapper::SyncPointManagerWrapper()
160 : manager_(SyncPointManager::Create(true)),
161 retire_cond_var_(&retire_lock_
) {
164 uint32
SyncPointManagerWrapper::GenerateSyncPoint() {
165 uint32 sync_point
= manager_
->GenerateSyncPoint();
166 manager_
->AddSyncPointCallback(
167 sync_point
, base::Bind(&SyncPointManagerWrapper::OnSyncPointRetired
,
168 base::Unretained(this)));
172 void SyncPointManagerWrapper::RetireSyncPoint(uint32 sync_point
) {
173 manager_
->RetireSyncPoint(sync_point
);
176 void SyncPointManagerWrapper::AddSyncPointCallback(
178 const base::Closure
& callback
) {
179 manager_
->AddSyncPointCallback(sync_point
, callback
);
182 void SyncPointManagerWrapper::WaitSyncPoint(uint32 sync_point
) {
183 base::AutoLock
lock(retire_lock_
);
184 while (!manager_
->IsSyncPointRetired(sync_point
)) {
185 retire_cond_var_
.Wait();
189 void SyncPointManagerWrapper::OnSyncPointRetired() {
190 base::AutoLock
lock(retire_lock_
);
191 retire_cond_var_
.Broadcast();
194 base::LazyInstance
<SyncPointManagerWrapper
> g_sync_point_manager
=
195 LAZY_INSTANCE_INITIALIZER
;
197 base::SharedMemoryHandle
ShareToGpuThread(
198 base::SharedMemoryHandle source_handle
) {
200 // Windows needs to explicitly duplicate the handle to current process.
201 base::SharedMemoryHandle target_handle
;
202 if (!DuplicateHandle(GetCurrentProcess(),
206 FILE_GENERIC_READ
| FILE_GENERIC_WRITE
,
209 return base::SharedMemory::NULLHandle();
212 return target_handle
;
214 int duped_handle
= HANDLE_EINTR(dup(source_handle
.fd
));
215 if (duped_handle
< 0)
216 return base::SharedMemory::NULLHandle();
218 return base::FileDescriptor(duped_handle
, true);
222 gfx::GpuMemoryBufferHandle
ShareGpuMemoryBufferToGpuThread(
223 const gfx::GpuMemoryBufferHandle
& source_handle
,
224 bool* requires_sync_point
) {
225 switch (source_handle
.type
) {
226 case gfx::SHARED_MEMORY_BUFFER
: {
227 gfx::GpuMemoryBufferHandle handle
;
228 handle
.type
= gfx::SHARED_MEMORY_BUFFER
;
229 handle
.handle
= ShareToGpuThread(source_handle
.handle
);
230 *requires_sync_point
= false;
233 case gfx::IO_SURFACE_BUFFER
:
234 case gfx::SURFACE_TEXTURE_BUFFER
:
235 case gfx::OZONE_NATIVE_BUFFER
:
236 *requires_sync_point
= true;
237 return source_handle
;
240 return gfx::GpuMemoryBufferHandle();
244 } // anonyous namespace
246 InProcessCommandBuffer::Service::Service() {}
248 InProcessCommandBuffer::Service::~Service() {}
250 scoped_refptr
<gfx::GLShareGroup
>
251 InProcessCommandBuffer::Service::share_group() {
252 if (!share_group_
.get())
253 share_group_
= new gfx::GLShareGroup
;
257 scoped_refptr
<gles2::MailboxManager
>
258 InProcessCommandBuffer::Service::mailbox_manager() {
259 if (!mailbox_manager_
.get()) {
260 if (base::CommandLine::ForCurrentProcess()->HasSwitch(
261 switches::kEnableThreadedTextureMailboxes
)) {
262 mailbox_manager_
= new gles2::MailboxManagerSync();
264 mailbox_manager_
= new gles2::MailboxManagerImpl();
267 return mailbox_manager_
;
270 scoped_refptr
<gles2::SubscriptionRefSet
>
271 InProcessCommandBuffer::Service::subscription_ref_set() {
272 if (!subscription_ref_set_
.get()) {
273 subscription_ref_set_
= new gles2::SubscriptionRefSet();
275 return subscription_ref_set_
;
278 scoped_refptr
<ValueStateMap
>
279 InProcessCommandBuffer::Service::pending_valuebuffer_state() {
280 if (!pending_valuebuffer_state_
.get()) {
281 pending_valuebuffer_state_
= new ValueStateMap();
283 return pending_valuebuffer_state_
;
286 InProcessCommandBuffer::InProcessCommandBuffer(
287 const scoped_refptr
<Service
>& service
)
288 : context_lost_(false),
289 idle_work_pending_(false),
290 image_factory_(nullptr),
291 last_put_offset_(-1),
292 gpu_memory_buffer_manager_(nullptr),
293 flush_event_(false, false),
294 service_(service
.get() ? service
: g_default_service
.Get().gpu_thread
),
295 gpu_thread_weak_ptr_factory_(this) {
296 DCHECK(service_
.get());
297 next_image_id_
.GetNext();
300 InProcessCommandBuffer::~InProcessCommandBuffer() {
304 void InProcessCommandBuffer::OnResizeView(gfx::Size size
, float scale_factor
) {
305 CheckSequencedThread();
306 DCHECK(!surface_
->IsOffscreen());
307 surface_
->Resize(size
);
310 bool InProcessCommandBuffer::MakeCurrent() {
311 CheckSequencedThread();
312 command_buffer_lock_
.AssertAcquired();
314 if (!context_lost_
&& decoder_
->MakeCurrent())
316 DLOG(ERROR
) << "Context lost because MakeCurrent failed.";
317 command_buffer_
->SetContextLostReason(decoder_
->GetContextLostReason());
318 command_buffer_
->SetParseError(gpu::error::kLostContext
);
322 void InProcessCommandBuffer::PumpCommands() {
323 CheckSequencedThread();
324 command_buffer_lock_
.AssertAcquired();
329 gpu_scheduler_
->PutChanged();
332 bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id
) {
333 CheckSequencedThread();
334 command_buffer_lock_
.AssertAcquired();
335 command_buffer_
->SetGetBuffer(transfer_buffer_id
);
339 bool InProcessCommandBuffer::Initialize(
340 scoped_refptr
<gfx::GLSurface
> surface
,
342 gfx::AcceleratedWidget window
,
343 const gfx::Size
& size
,
344 const std::vector
<int32
>& attribs
,
345 gfx::GpuPreference gpu_preference
,
346 const base::Closure
& context_lost_callback
,
347 InProcessCommandBuffer
* share_group
,
348 GpuMemoryBufferManager
* gpu_memory_buffer_manager
,
349 ImageFactory
* image_factory
) {
350 DCHECK(!share_group
|| service_
.get() == share_group
->service_
.get());
351 context_lost_callback_
= WrapCallback(context_lost_callback
);
354 // GPU thread must be the same as client thread due to GLSurface not being
356 sequence_checker_
.reset(new base::SequenceChecker
);
360 gpu::Capabilities capabilities
;
361 InitializeOnGpuThreadParams
params(is_offscreen
,
370 base::Callback
<bool(void)> init_task
=
371 base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread
,
372 base::Unretained(this),
375 base::WaitableEvent
completion(true, false);
378 base::Bind(&RunTaskWithResult
<bool>, init_task
, &result
, &completion
));
381 gpu_memory_buffer_manager_
= gpu_memory_buffer_manager
;
384 capabilities_
= capabilities
;
385 capabilities_
.image
= capabilities_
.image
&& gpu_memory_buffer_manager_
;
391 bool InProcessCommandBuffer::InitializeOnGpuThread(
392 const InitializeOnGpuThreadParams
& params
) {
393 CheckSequencedThread();
394 gpu_thread_weak_ptr_
= gpu_thread_weak_ptr_factory_
.GetWeakPtr();
396 DCHECK(params
.size
.width() >= 0 && params
.size
.height() >= 0);
398 TransferBufferManager
* manager
= new TransferBufferManager();
399 transfer_buffer_manager_
.reset(manager
);
400 manager
->Initialize();
402 scoped_ptr
<CommandBufferService
> command_buffer(
403 new CommandBufferService(transfer_buffer_manager_
.get()));
404 command_buffer
->SetPutOffsetChangeCallback(base::Bind(
405 &InProcessCommandBuffer::PumpCommands
, gpu_thread_weak_ptr_
));
406 command_buffer
->SetParseErrorCallback(base::Bind(
407 &InProcessCommandBuffer::OnContextLost
, gpu_thread_weak_ptr_
));
409 if (!command_buffer
->Initialize()) {
410 LOG(ERROR
) << "Could not initialize command buffer.";
411 DestroyOnGpuThread();
415 gl_share_group_
= params
.context_group
416 ? params
.context_group
->gl_share_group_
417 : service_
->share_group();
419 #if defined(OS_ANDROID)
420 stream_texture_manager_
.reset(new StreamTextureManagerInProcess
);
423 bool bind_generates_resource
= false;
424 decoder_
.reset(gles2::GLES2Decoder::Create(
426 ? params
.context_group
->decoder_
->GetContextGroup()
427 : new gles2::ContextGroup(service_
->mailbox_manager(),
429 service_
->shader_translator_cache(),
431 service_
->subscription_ref_set(),
432 service_
->pending_valuebuffer_state(),
433 bind_generates_resource
)));
435 gpu_scheduler_
.reset(
436 new GpuScheduler(command_buffer
.get(), decoder_
.get(), decoder_
.get()));
437 command_buffer
->SetGetBufferChangeCallback(base::Bind(
438 &GpuScheduler::SetGetBuffer
, base::Unretained(gpu_scheduler_
.get())));
439 command_buffer_
= command_buffer
.Pass();
441 decoder_
->set_engine(gpu_scheduler_
.get());
443 if (!surface_
.get()) {
444 if (params
.is_offscreen
)
445 surface_
= gfx::GLSurface::CreateOffscreenGLSurface(params
.size
);
447 surface_
= gfx::GLSurface::CreateViewGLSurface(params
.window
);
450 if (!surface_
.get()) {
451 LOG(ERROR
) << "Could not create GLSurface.";
452 DestroyOnGpuThread();
456 if (service_
->UseVirtualizedGLContexts() ||
457 decoder_
->GetContextGroup()
460 .use_virtualized_gl_contexts
) {
461 context_
= gl_share_group_
->GetSharedContext();
462 if (!context_
.get()) {
463 context_
= gfx::GLContext::CreateGLContext(
464 gl_share_group_
.get(), surface_
.get(), params
.gpu_preference
);
465 gl_share_group_
->SetSharedContext(context_
.get());
468 context_
= new GLContextVirtual(
469 gl_share_group_
.get(), context_
.get(), decoder_
->AsWeakPtr());
470 if (context_
->Initialize(surface_
.get(), params
.gpu_preference
)) {
471 VLOG(1) << "Created virtual GL context.";
476 context_
= gfx::GLContext::CreateGLContext(
477 gl_share_group_
.get(), surface_
.get(), params
.gpu_preference
);
480 if (!context_
.get()) {
481 LOG(ERROR
) << "Could not create GLContext.";
482 DestroyOnGpuThread();
486 if (!context_
->MakeCurrent(surface_
.get())) {
487 LOG(ERROR
) << "Could not make context current.";
488 DestroyOnGpuThread();
492 gles2::DisallowedFeatures disallowed_features
;
493 disallowed_features
.gpu_memory_manager
= true;
494 if (!decoder_
->Initialize(surface_
,
500 LOG(ERROR
) << "Could not initialize decoder.";
501 DestroyOnGpuThread();
504 *params
.capabilities
= decoder_
->GetCapabilities();
506 if (!params
.is_offscreen
) {
507 decoder_
->SetResizeCallback(base::Bind(
508 &InProcessCommandBuffer::OnResizeView
, gpu_thread_weak_ptr_
));
510 decoder_
->SetWaitSyncPointCallback(
511 base::Bind(&InProcessCommandBuffer::WaitSyncPointOnGpuThread
,
512 base::Unretained(this)));
514 image_factory_
= params
.image_factory
;
519 void InProcessCommandBuffer::Destroy() {
520 CheckSequencedThread();
522 base::WaitableEvent
completion(true, false);
524 base::Callback
<bool(void)> destroy_task
= base::Bind(
525 &InProcessCommandBuffer::DestroyOnGpuThread
, base::Unretained(this));
527 base::Bind(&RunTaskWithResult
<bool>, destroy_task
, &result
, &completion
));
531 bool InProcessCommandBuffer::DestroyOnGpuThread() {
532 CheckSequencedThread();
533 gpu_thread_weak_ptr_factory_
.InvalidateWeakPtrs();
534 command_buffer_
.reset();
535 // Clean up GL resources if possible.
536 bool have_context
= context_
.get() && context_
->MakeCurrent(surface_
.get());
538 decoder_
->Destroy(have_context
);
543 gl_share_group_
= NULL
;
544 #if defined(OS_ANDROID)
545 stream_texture_manager_
.reset();
551 void InProcessCommandBuffer::CheckSequencedThread() {
552 DCHECK(!sequence_checker_
||
553 sequence_checker_
->CalledOnValidSequencedThread());
556 void InProcessCommandBuffer::OnContextLost() {
557 CheckSequencedThread();
558 if (!context_lost_callback_
.is_null()) {
559 context_lost_callback_
.Run();
560 context_lost_callback_
.Reset();
563 context_lost_
= true;
566 CommandBuffer::State
InProcessCommandBuffer::GetStateFast() {
567 CheckSequencedThread();
568 base::AutoLock
lock(state_after_last_flush_lock_
);
569 if (state_after_last_flush_
.generation
- last_state_
.generation
< 0x80000000U
)
570 last_state_
= state_after_last_flush_
;
574 CommandBuffer::State
InProcessCommandBuffer::GetLastState() {
575 CheckSequencedThread();
579 int32
InProcessCommandBuffer::GetLastToken() {
580 CheckSequencedThread();
582 return last_state_
.token
;
585 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset
) {
586 CheckSequencedThread();
587 ScopedEvent
handle_flush(&flush_event_
);
588 base::AutoLock
lock(command_buffer_lock_
);
589 command_buffer_
->Flush(put_offset
);
591 // Update state before signaling the flush event.
592 base::AutoLock
lock(state_after_last_flush_lock_
);
593 state_after_last_flush_
= command_buffer_
->GetLastState();
595 DCHECK((!error::IsError(state_after_last_flush_
.error
) && !context_lost_
) ||
596 (error::IsError(state_after_last_flush_
.error
) && context_lost_
));
598 // If we've processed all pending commands but still have pending queries,
599 // pump idle work until the query is passed.
600 if (put_offset
== state_after_last_flush_
.get_offset
&&
601 gpu_scheduler_
->HasMoreWork()) {
602 ScheduleIdleWorkOnGpuThread();
606 void InProcessCommandBuffer::PerformIdleWork() {
607 CheckSequencedThread();
608 idle_work_pending_
= false;
609 base::AutoLock
lock(command_buffer_lock_
);
610 if (MakeCurrent() && gpu_scheduler_
->HasMoreWork()) {
611 gpu_scheduler_
->PerformIdleWork();
612 ScheduleIdleWorkOnGpuThread();
616 void InProcessCommandBuffer::ScheduleIdleWorkOnGpuThread() {
617 CheckSequencedThread();
618 if (idle_work_pending_
)
620 idle_work_pending_
= true;
621 service_
->ScheduleIdleWork(
622 base::Bind(&InProcessCommandBuffer::PerformIdleWork
,
623 gpu_thread_weak_ptr_
));
626 void InProcessCommandBuffer::Flush(int32 put_offset
) {
627 CheckSequencedThread();
628 if (last_state_
.error
!= gpu::error::kNoError
)
631 if (last_put_offset_
== put_offset
)
634 last_put_offset_
= put_offset
;
635 base::Closure task
= base::Bind(&InProcessCommandBuffer::FlushOnGpuThread
,
636 gpu_thread_weak_ptr_
,
641 void InProcessCommandBuffer::OrderingBarrier(int32 put_offset
) {
645 void InProcessCommandBuffer::WaitForTokenInRange(int32 start
, int32 end
) {
646 CheckSequencedThread();
647 while (!InRange(start
, end
, GetLastToken()) &&
648 last_state_
.error
== gpu::error::kNoError
)
652 void InProcessCommandBuffer::WaitForGetOffsetInRange(int32 start
, int32 end
) {
653 CheckSequencedThread();
656 while (!InRange(start
, end
, last_state_
.get_offset
) &&
657 last_state_
.error
== gpu::error::kNoError
) {
663 void InProcessCommandBuffer::SetGetBuffer(int32 shm_id
) {
664 CheckSequencedThread();
665 if (last_state_
.error
!= gpu::error::kNoError
)
669 base::AutoLock
lock(command_buffer_lock_
);
670 command_buffer_
->SetGetBuffer(shm_id
);
671 last_put_offset_
= 0;
674 base::AutoLock
lock(state_after_last_flush_lock_
);
675 state_after_last_flush_
= command_buffer_
->GetLastState();
679 scoped_refptr
<Buffer
> InProcessCommandBuffer::CreateTransferBuffer(size_t size
,
681 CheckSequencedThread();
682 base::AutoLock
lock(command_buffer_lock_
);
683 return command_buffer_
->CreateTransferBuffer(size
, id
);
686 void InProcessCommandBuffer::DestroyTransferBuffer(int32 id
) {
687 CheckSequencedThread();
689 base::Bind(&InProcessCommandBuffer::DestroyTransferBufferOnGpuThread
,
690 base::Unretained(this),
696 void InProcessCommandBuffer::DestroyTransferBufferOnGpuThread(int32 id
) {
697 base::AutoLock
lock(command_buffer_lock_
);
698 command_buffer_
->DestroyTransferBuffer(id
);
701 gpu::Capabilities
InProcessCommandBuffer::GetCapabilities() {
702 return capabilities_
;
705 int32
InProcessCommandBuffer::CreateImage(ClientBuffer buffer
,
708 unsigned internalformat
) {
709 CheckSequencedThread();
711 DCHECK(gpu_memory_buffer_manager_
);
712 gfx::GpuMemoryBuffer
* gpu_memory_buffer
=
713 gpu_memory_buffer_manager_
->GpuMemoryBufferFromClientBuffer(buffer
);
714 DCHECK(gpu_memory_buffer
);
716 int32 new_id
= next_image_id_
.GetNext();
718 DCHECK(gpu::ImageFactory::IsGpuMemoryBufferFormatSupported(
719 gpu_memory_buffer
->GetFormat(), capabilities_
));
720 DCHECK(gpu::ImageFactory::IsImageFormatCompatibleWithGpuMemoryBufferFormat(
721 internalformat
, gpu_memory_buffer
->GetFormat()));
723 // This handle is owned by the GPU thread and must be passed to it or it
724 // will leak. In otherwords, do not early out on error between here and the
725 // queuing of the CreateImage task below.
726 bool requires_sync_point
= false;
727 gfx::GpuMemoryBufferHandle handle
=
728 ShareGpuMemoryBufferToGpuThread(gpu_memory_buffer
->GetHandle(),
729 &requires_sync_point
);
731 QueueTask(base::Bind(&InProcessCommandBuffer::CreateImageOnGpuThread
,
732 base::Unretained(this),
735 gfx::Size(width
, height
),
736 gpu_memory_buffer
->GetFormat(),
739 if (requires_sync_point
) {
740 gpu_memory_buffer_manager_
->SetDestructionSyncPoint(gpu_memory_buffer
,
747 void InProcessCommandBuffer::CreateImageOnGpuThread(
749 const gfx::GpuMemoryBufferHandle
& handle
,
750 const gfx::Size
& size
,
751 gfx::GpuMemoryBuffer::Format format
,
752 uint32 internalformat
) {
756 gpu::gles2::ImageManager
* image_manager
= decoder_
->GetImageManager();
757 DCHECK(image_manager
);
758 if (image_manager
->LookupImage(id
)) {
759 LOG(ERROR
) << "Image already exists with same ID.";
763 switch (handle
.type
) {
764 case gfx::SHARED_MEMORY_BUFFER
: {
765 scoped_refptr
<gfx::GLImageSharedMemory
> image(
766 new gfx::GLImageSharedMemory(size
, internalformat
));
767 if (!image
->Initialize(handle
, format
)) {
768 LOG(ERROR
) << "Failed to initialize image.";
772 image_manager
->AddImage(image
.get(), id
);
776 if (!image_factory_
) {
777 LOG(ERROR
) << "Image factory missing but required by buffer type.";
781 // Note: this assumes that client ID is always 0.
782 const int kClientId
= 0;
784 scoped_refptr
<gfx::GLImage
> image
=
785 image_factory_
->CreateImageForGpuMemoryBuffer(
786 handle
, size
, format
, internalformat
, kClientId
);
788 LOG(ERROR
) << "Failed to create image for buffer.";
792 image_manager
->AddImage(image
.get(), id
);
798 void InProcessCommandBuffer::DestroyImage(int32 id
) {
799 CheckSequencedThread();
801 QueueTask(base::Bind(&InProcessCommandBuffer::DestroyImageOnGpuThread
,
802 base::Unretained(this),
806 void InProcessCommandBuffer::DestroyImageOnGpuThread(int32 id
) {
810 gpu::gles2::ImageManager
* image_manager
= decoder_
->GetImageManager();
811 DCHECK(image_manager
);
812 if (!image_manager
->LookupImage(id
)) {
813 LOG(ERROR
) << "Image with ID doesn't exist.";
817 image_manager
->RemoveImage(id
);
820 int32
InProcessCommandBuffer::CreateGpuMemoryBufferImage(
823 unsigned internalformat
,
825 CheckSequencedThread();
827 DCHECK(gpu_memory_buffer_manager_
);
828 scoped_ptr
<gfx::GpuMemoryBuffer
> buffer(
829 gpu_memory_buffer_manager_
->AllocateGpuMemoryBuffer(
830 gfx::Size(width
, height
),
831 gpu::ImageFactory::ImageFormatToGpuMemoryBufferFormat(internalformat
),
832 gpu::ImageFactory::ImageUsageToGpuMemoryBufferUsage(usage
)));
836 return CreateImage(buffer
->AsClientBuffer(), width
, height
, internalformat
);
839 uint32
InProcessCommandBuffer::InsertSyncPoint() {
840 uint32 sync_point
= g_sync_point_manager
.Get().GenerateSyncPoint();
841 QueueTask(base::Bind(&InProcessCommandBuffer::RetireSyncPointOnGpuThread
,
842 base::Unretained(this),
847 uint32
InProcessCommandBuffer::InsertFutureSyncPoint() {
848 return g_sync_point_manager
.Get().GenerateSyncPoint();
851 void InProcessCommandBuffer::RetireSyncPoint(uint32 sync_point
) {
852 QueueTask(base::Bind(&InProcessCommandBuffer::RetireSyncPointOnGpuThread
,
853 base::Unretained(this),
857 void InProcessCommandBuffer::RetireSyncPointOnGpuThread(uint32 sync_point
) {
858 gles2::MailboxManager
* mailbox_manager
=
859 decoder_
->GetContextGroup()->mailbox_manager();
860 if (mailbox_manager
->UsesSync()) {
861 bool make_current_success
= false;
863 base::AutoLock
lock(command_buffer_lock_
);
864 make_current_success
= MakeCurrent();
866 if (make_current_success
)
867 mailbox_manager
->PushTextureUpdates(sync_point
);
869 g_sync_point_manager
.Get().RetireSyncPoint(sync_point
);
872 void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point
,
873 const base::Closure
& callback
) {
874 CheckSequencedThread();
875 QueueTask(base::Bind(&InProcessCommandBuffer::SignalSyncPointOnGpuThread
,
876 base::Unretained(this),
878 WrapCallback(callback
)));
881 bool InProcessCommandBuffer::WaitSyncPointOnGpuThread(unsigned sync_point
) {
882 g_sync_point_manager
.Get().WaitSyncPoint(sync_point
);
883 gles2::MailboxManager
* mailbox_manager
=
884 decoder_
->GetContextGroup()->mailbox_manager();
885 mailbox_manager
->PullTextureUpdates(sync_point
);
889 void InProcessCommandBuffer::SignalSyncPointOnGpuThread(
891 const base::Closure
& callback
) {
892 g_sync_point_manager
.Get().AddSyncPointCallback(sync_point
, callback
);
895 void InProcessCommandBuffer::SignalQuery(unsigned query_id
,
896 const base::Closure
& callback
) {
897 CheckSequencedThread();
898 QueueTask(base::Bind(&InProcessCommandBuffer::SignalQueryOnGpuThread
,
899 base::Unretained(this),
901 WrapCallback(callback
)));
904 void InProcessCommandBuffer::SignalQueryOnGpuThread(
906 const base::Closure
& callback
) {
907 gles2::QueryManager
* query_manager_
= decoder_
->GetQueryManager();
908 DCHECK(query_manager_
);
910 gles2::QueryManager::Query
* query
= query_manager_
->GetQuery(query_id
);
914 query
->AddCallback(callback
);
917 void InProcessCommandBuffer::SetSurfaceVisible(bool visible
) {}
919 uint32
InProcessCommandBuffer::CreateStreamTexture(uint32 texture_id
) {
920 base::WaitableEvent
completion(true, false);
921 uint32 stream_id
= 0;
922 base::Callback
<uint32(void)> task
=
923 base::Bind(&InProcessCommandBuffer::CreateStreamTextureOnGpuThread
,
924 base::Unretained(this),
927 base::Bind(&RunTaskWithResult
<uint32
>, task
, &stream_id
, &completion
));
932 void InProcessCommandBuffer::SetLock(base::Lock
*) {
935 uint32
InProcessCommandBuffer::CreateStreamTextureOnGpuThread(
936 uint32 client_texture_id
) {
937 #if defined(OS_ANDROID)
938 return stream_texture_manager_
->CreateStreamTexture(
939 client_texture_id
, decoder_
->GetContextGroup()->texture_manager());
945 gpu::error::Error
InProcessCommandBuffer::GetLastError() {
946 CheckSequencedThread();
947 return last_state_
.error
;
950 bool InProcessCommandBuffer::Initialize() {
958 const scoped_refptr
<base::SingleThreadTaskRunner
>& task_runner
,
959 const base::Closure
& callback
) {
960 // The task_runner.get() check is to support using InProcessCommandBuffer on
961 // a thread without a message loop.
962 if (task_runner
.get() && !task_runner
->BelongsToCurrentThread()) {
963 task_runner
->PostTask(FROM_HERE
, callback
);
969 void RunOnTargetThread(scoped_ptr
<base::Closure
> callback
) {
970 DCHECK(callback
.get());
974 } // anonymous namespace
976 base::Closure
InProcessCommandBuffer::WrapCallback(
977 const base::Closure
& callback
) {
978 // Make sure the callback gets deleted on the target thread by passing
980 scoped_ptr
<base::Closure
> scoped_callback(new base::Closure(callback
));
981 base::Closure callback_on_client_thread
=
982 base::Bind(&RunOnTargetThread
, base::Passed(&scoped_callback
));
983 base::Closure wrapped_callback
=
984 base::Bind(&PostCallback
, base::ThreadTaskRunnerHandle::IsSet()
985 ? base::ThreadTaskRunnerHandle::Get()
987 callback_on_client_thread
);
988 return wrapped_callback
;
991 #if defined(OS_ANDROID)
992 scoped_refptr
<gfx::SurfaceTexture
>
993 InProcessCommandBuffer::GetSurfaceTexture(uint32 stream_id
) {
994 DCHECK(stream_texture_manager_
);
995 return stream_texture_manager_
->GetSurfaceTexture(stream_id
);