1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "gpu/command_buffer/service/in_process_command_buffer.h"
11 #include "base/bind.h"
12 #include "base/bind_helpers.h"
13 #include "base/command_line.h"
14 #include "base/lazy_instance.h"
15 #include "base/location.h"
16 #include "base/logging.h"
17 #include "base/memory/weak_ptr.h"
18 #include "base/sequence_checker.h"
19 #include "base/single_thread_task_runner.h"
20 #include "base/thread_task_runner_handle.h"
21 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
22 #include "gpu/command_buffer/common/value_state.h"
23 #include "gpu/command_buffer/service/command_buffer_service.h"
24 #include "gpu/command_buffer/service/context_group.h"
25 #include "gpu/command_buffer/service/gl_context_virtual.h"
26 #include "gpu/command_buffer/service/gpu_scheduler.h"
27 #include "gpu/command_buffer/service/gpu_switches.h"
28 #include "gpu/command_buffer/service/image_factory.h"
29 #include "gpu/command_buffer/service/image_manager.h"
30 #include "gpu/command_buffer/service/mailbox_manager.h"
31 #include "gpu/command_buffer/service/memory_program_cache.h"
32 #include "gpu/command_buffer/service/memory_tracking.h"
33 #include "gpu/command_buffer/service/query_manager.h"
34 #include "gpu/command_buffer/service/sync_point_manager.h"
35 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
36 #include "gpu/command_buffer/service/valuebuffer_manager.h"
37 #include "ui/gfx/geometry/size.h"
38 #include "ui/gl/gl_context.h"
39 #include "ui/gl/gl_image.h"
40 #include "ui/gl/gl_image_shared_memory.h"
41 #include "ui/gl/gl_share_group.h"
43 #if defined(OS_ANDROID)
44 #include "gpu/command_buffer/service/stream_texture_manager_in_process_android.h"
45 #include "ui/gl/android/surface_texture.h"
50 #include "base/process/process_handle.h"
58 static void RunTaskWithResult(base::Callback
<T(void)> task
,
60 base::WaitableEvent
* completion
) {
65 struct GpuInProcessThreadHolder
{
66 GpuInProcessThreadHolder()
67 : sync_point_manager(new SyncPointManager(false)),
68 gpu_thread(new GpuInProcessThread(sync_point_manager
.get())) {}
69 scoped_ptr
<SyncPointManager
> sync_point_manager
;
70 scoped_refptr
<InProcessCommandBuffer::Service
> gpu_thread
;
73 base::LazyInstance
<GpuInProcessThreadHolder
> g_default_service
=
74 LAZY_INSTANCE_INITIALIZER
;
78 explicit ScopedEvent(base::WaitableEvent
* event
) : event_(event
) {}
79 ~ScopedEvent() { event_
->Signal(); }
82 base::WaitableEvent
* event_
;
85 base::SharedMemoryHandle
ShareToGpuThread(
86 base::SharedMemoryHandle source_handle
) {
87 return base::SharedMemory::DuplicateHandle(source_handle
);
90 gfx::GpuMemoryBufferHandle
ShareGpuMemoryBufferToGpuThread(
91 const gfx::GpuMemoryBufferHandle
& source_handle
,
92 bool* requires_sync_point
) {
93 switch (source_handle
.type
) {
94 case gfx::SHARED_MEMORY_BUFFER
: {
95 gfx::GpuMemoryBufferHandle handle
;
96 handle
.type
= gfx::SHARED_MEMORY_BUFFER
;
97 handle
.handle
= ShareToGpuThread(source_handle
.handle
);
98 *requires_sync_point
= false;
101 case gfx::IO_SURFACE_BUFFER
:
102 case gfx::SURFACE_TEXTURE_BUFFER
:
103 case gfx::OZONE_NATIVE_PIXMAP
:
104 *requires_sync_point
= true;
105 return source_handle
;
108 return gfx::GpuMemoryBufferHandle();
112 scoped_refptr
<InProcessCommandBuffer::Service
> GetInitialService(
113 const scoped_refptr
<InProcessCommandBuffer::Service
>& service
) {
117 // Call base::ThreadTaskRunnerHandle::IsSet() to ensure that it is
118 // instantiated before we create the GPU thread, otherwise shutdown order will
119 // delete the ThreadTaskRunnerHandle before the GPU thread's message loop,
120 // and when the message loop is shutdown, it will recreate
121 // ThreadTaskRunnerHandle, which will re-add a new task to the, AtExitManager,
122 // which causes a deadlock because it's already locked.
123 base::ThreadTaskRunnerHandle::IsSet();
124 return g_default_service
.Get().gpu_thread
;
127 } // anonyous namespace
129 InProcessCommandBuffer::Service::Service() {}
131 InProcessCommandBuffer::Service::~Service() {}
133 scoped_refptr
<gfx::GLShareGroup
>
134 InProcessCommandBuffer::Service::share_group() {
135 if (!share_group_
.get())
136 share_group_
= new gfx::GLShareGroup
;
140 scoped_refptr
<gles2::MailboxManager
>
141 InProcessCommandBuffer::Service::mailbox_manager() {
142 if (!mailbox_manager_
.get()) {
143 mailbox_manager_
= gles2::MailboxManager::Create();
145 return mailbox_manager_
;
148 scoped_refptr
<gles2::SubscriptionRefSet
>
149 InProcessCommandBuffer::Service::subscription_ref_set() {
150 if (!subscription_ref_set_
.get()) {
151 subscription_ref_set_
= new gles2::SubscriptionRefSet();
153 return subscription_ref_set_
;
156 scoped_refptr
<ValueStateMap
>
157 InProcessCommandBuffer::Service::pending_valuebuffer_state() {
158 if (!pending_valuebuffer_state_
.get()) {
159 pending_valuebuffer_state_
= new ValueStateMap();
161 return pending_valuebuffer_state_
;
164 gpu::gles2::ProgramCache
* InProcessCommandBuffer::Service::program_cache() {
165 if (!program_cache_
.get() &&
166 (gfx::g_driver_gl
.ext
.b_GL_ARB_get_program_binary
||
167 gfx::g_driver_gl
.ext
.b_GL_OES_get_program_binary
) &&
168 !base::CommandLine::ForCurrentProcess()->HasSwitch(
169 switches::kDisableGpuProgramCache
)) {
170 program_cache_
.reset(new gpu::gles2::MemoryProgramCache());
172 return program_cache_
.get();
175 InProcessCommandBuffer::InProcessCommandBuffer(
176 const scoped_refptr
<Service
>& service
)
177 : context_lost_(false),
178 idle_work_pending_(false),
179 image_factory_(nullptr),
180 last_put_offset_(-1),
181 gpu_memory_buffer_manager_(nullptr),
182 flush_event_(false, false),
183 service_(GetInitialService(service
)),
184 gpu_thread_weak_ptr_factory_(this) {
185 DCHECK(service_
.get());
186 next_image_id_
.GetNext();
189 InProcessCommandBuffer::~InProcessCommandBuffer() {
193 void InProcessCommandBuffer::OnResizeView(gfx::Size size
, float scale_factor
) {
194 CheckSequencedThread();
195 DCHECK(!surface_
->IsOffscreen());
196 surface_
->Resize(size
);
199 bool InProcessCommandBuffer::MakeCurrent() {
200 CheckSequencedThread();
201 command_buffer_lock_
.AssertAcquired();
203 if (!context_lost_
&& decoder_
->MakeCurrent())
205 DLOG(ERROR
) << "Context lost because MakeCurrent failed.";
206 command_buffer_
->SetContextLostReason(decoder_
->GetContextLostReason());
207 command_buffer_
->SetParseError(gpu::error::kLostContext
);
211 void InProcessCommandBuffer::PumpCommands() {
212 CheckSequencedThread();
213 command_buffer_lock_
.AssertAcquired();
218 gpu_scheduler_
->PutChanged();
221 bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id
) {
222 CheckSequencedThread();
223 command_buffer_lock_
.AssertAcquired();
224 command_buffer_
->SetGetBuffer(transfer_buffer_id
);
228 bool InProcessCommandBuffer::Initialize(
229 scoped_refptr
<gfx::GLSurface
> surface
,
231 gfx::AcceleratedWidget window
,
232 const gfx::Size
& size
,
233 const std::vector
<int32
>& attribs
,
234 gfx::GpuPreference gpu_preference
,
235 const base::Closure
& context_lost_callback
,
236 InProcessCommandBuffer
* share_group
,
237 GpuMemoryBufferManager
* gpu_memory_buffer_manager
,
238 ImageFactory
* image_factory
) {
239 DCHECK(!share_group
|| service_
.get() == share_group
->service_
.get());
240 context_lost_callback_
= WrapCallback(context_lost_callback
);
243 // GPU thread must be the same as client thread due to GLSurface not being
245 sequence_checker_
.reset(new base::SequenceChecker
);
249 gpu::Capabilities capabilities
;
250 InitializeOnGpuThreadParams
params(is_offscreen
,
259 base::Callback
<bool(void)> init_task
=
260 base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread
,
261 base::Unretained(this),
264 base::WaitableEvent
completion(true, false);
267 base::Bind(&RunTaskWithResult
<bool>, init_task
, &result
, &completion
));
270 gpu_memory_buffer_manager_
= gpu_memory_buffer_manager
;
273 capabilities_
= capabilities
;
274 capabilities_
.image
= capabilities_
.image
&& gpu_memory_buffer_manager_
;
280 bool InProcessCommandBuffer::InitializeOnGpuThread(
281 const InitializeOnGpuThreadParams
& params
) {
282 CheckSequencedThread();
283 gpu_thread_weak_ptr_
= gpu_thread_weak_ptr_factory_
.GetWeakPtr();
285 DCHECK(params
.size
.width() >= 0 && params
.size
.height() >= 0);
287 TransferBufferManager
* manager
= new TransferBufferManager(nullptr);
288 transfer_buffer_manager_
= manager
;
289 manager
->Initialize();
291 scoped_ptr
<CommandBufferService
> command_buffer(
292 new CommandBufferService(transfer_buffer_manager_
.get()));
293 command_buffer
->SetPutOffsetChangeCallback(base::Bind(
294 &InProcessCommandBuffer::PumpCommands
, gpu_thread_weak_ptr_
));
295 command_buffer
->SetParseErrorCallback(base::Bind(
296 &InProcessCommandBuffer::OnContextLost
, gpu_thread_weak_ptr_
));
298 if (!command_buffer
->Initialize()) {
299 LOG(ERROR
) << "Could not initialize command buffer.";
300 DestroyOnGpuThread();
304 gl_share_group_
= params
.context_group
305 ? params
.context_group
->gl_share_group_
306 : service_
->share_group();
308 #if defined(OS_ANDROID)
309 stream_texture_manager_
.reset(new StreamTextureManagerInProcess
);
312 bool bind_generates_resource
= false;
313 decoder_
.reset(gles2::GLES2Decoder::Create(
315 ? params
.context_group
->decoder_
->GetContextGroup()
316 : new gles2::ContextGroup(service_
->mailbox_manager(), NULL
,
317 service_
->shader_translator_cache(),
318 service_
->framebuffer_completeness_cache(),
319 NULL
, service_
->subscription_ref_set(),
320 service_
->pending_valuebuffer_state(),
321 bind_generates_resource
)));
323 gpu_scheduler_
.reset(
324 new GpuScheduler(command_buffer
.get(), decoder_
.get(), decoder_
.get()));
325 command_buffer
->SetGetBufferChangeCallback(base::Bind(
326 &GpuScheduler::SetGetBuffer
, base::Unretained(gpu_scheduler_
.get())));
327 command_buffer_
= command_buffer
.Pass();
329 decoder_
->set_engine(gpu_scheduler_
.get());
331 if (!surface_
.get()) {
332 if (params
.is_offscreen
)
333 surface_
= gfx::GLSurface::CreateOffscreenGLSurface(params
.size
);
335 surface_
= gfx::GLSurface::CreateViewGLSurface(params
.window
);
338 if (!surface_
.get()) {
339 LOG(ERROR
) << "Could not create GLSurface.";
340 DestroyOnGpuThread();
344 if (service_
->UseVirtualizedGLContexts() ||
345 decoder_
->GetContextGroup()
348 .use_virtualized_gl_contexts
) {
349 context_
= gl_share_group_
->GetSharedContext();
350 if (!context_
.get()) {
351 context_
= gfx::GLContext::CreateGLContext(
352 gl_share_group_
.get(), surface_
.get(), params
.gpu_preference
);
353 gl_share_group_
->SetSharedContext(context_
.get());
356 context_
= new GLContextVirtual(
357 gl_share_group_
.get(), context_
.get(), decoder_
->AsWeakPtr());
358 if (context_
->Initialize(surface_
.get(), params
.gpu_preference
)) {
359 VLOG(1) << "Created virtual GL context.";
364 context_
= gfx::GLContext::CreateGLContext(
365 gl_share_group_
.get(), surface_
.get(), params
.gpu_preference
);
368 if (!context_
.get()) {
369 LOG(ERROR
) << "Could not create GLContext.";
370 DestroyOnGpuThread();
374 if (!context_
->MakeCurrent(surface_
.get())) {
375 LOG(ERROR
) << "Could not make context current.";
376 DestroyOnGpuThread();
380 if (!decoder_
->GetContextGroup()->has_program_cache() &&
381 !decoder_
->GetContextGroup()
384 .disable_program_cache
) {
385 decoder_
->GetContextGroup()->set_program_cache(service_
->program_cache());
388 gles2::DisallowedFeatures disallowed_features
;
389 disallowed_features
.gpu_memory_manager
= true;
390 if (!decoder_
->Initialize(surface_
,
396 LOG(ERROR
) << "Could not initialize decoder.";
397 DestroyOnGpuThread();
400 *params
.capabilities
= decoder_
->GetCapabilities();
402 if (!params
.is_offscreen
) {
403 decoder_
->SetResizeCallback(base::Bind(
404 &InProcessCommandBuffer::OnResizeView
, gpu_thread_weak_ptr_
));
406 decoder_
->SetWaitSyncPointCallback(
407 base::Bind(&InProcessCommandBuffer::WaitSyncPointOnGpuThread
,
408 base::Unretained(this)));
410 image_factory_
= params
.image_factory
;
415 void InProcessCommandBuffer::Destroy() {
416 CheckSequencedThread();
418 base::WaitableEvent
completion(true, false);
420 base::Callback
<bool(void)> destroy_task
= base::Bind(
421 &InProcessCommandBuffer::DestroyOnGpuThread
, base::Unretained(this));
423 base::Bind(&RunTaskWithResult
<bool>, destroy_task
, &result
, &completion
));
427 bool InProcessCommandBuffer::DestroyOnGpuThread() {
428 CheckSequencedThread();
429 gpu_thread_weak_ptr_factory_
.InvalidateWeakPtrs();
430 command_buffer_
.reset();
431 // Clean up GL resources if possible.
432 bool have_context
= context_
.get() && context_
->MakeCurrent(surface_
.get());
434 decoder_
->Destroy(have_context
);
439 gl_share_group_
= NULL
;
440 #if defined(OS_ANDROID)
441 stream_texture_manager_
.reset();
447 void InProcessCommandBuffer::CheckSequencedThread() {
448 DCHECK(!sequence_checker_
||
449 sequence_checker_
->CalledOnValidSequencedThread());
452 void InProcessCommandBuffer::OnContextLost() {
453 CheckSequencedThread();
454 if (!context_lost_callback_
.is_null()) {
455 context_lost_callback_
.Run();
456 context_lost_callback_
.Reset();
459 context_lost_
= true;
462 CommandBuffer::State
InProcessCommandBuffer::GetStateFast() {
463 CheckSequencedThread();
464 base::AutoLock
lock(state_after_last_flush_lock_
);
465 if (state_after_last_flush_
.generation
- last_state_
.generation
< 0x80000000U
)
466 last_state_
= state_after_last_flush_
;
470 CommandBuffer::State
InProcessCommandBuffer::GetLastState() {
471 CheckSequencedThread();
475 int32
InProcessCommandBuffer::GetLastToken() {
476 CheckSequencedThread();
478 return last_state_
.token
;
481 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset
) {
482 CheckSequencedThread();
483 ScopedEvent
handle_flush(&flush_event_
);
484 base::AutoLock
lock(command_buffer_lock_
);
485 command_buffer_
->Flush(put_offset
);
487 // Update state before signaling the flush event.
488 base::AutoLock
lock(state_after_last_flush_lock_
);
489 state_after_last_flush_
= command_buffer_
->GetLastState();
491 DCHECK((!error::IsError(state_after_last_flush_
.error
) && !context_lost_
) ||
492 (error::IsError(state_after_last_flush_
.error
) && context_lost_
));
494 // If we've processed all pending commands but still have pending queries,
495 // pump idle work until the query is passed.
496 if (put_offset
== state_after_last_flush_
.get_offset
&&
497 gpu_scheduler_
->HasMoreWork()) {
498 ScheduleIdleWorkOnGpuThread();
502 void InProcessCommandBuffer::PerformIdleWork() {
503 CheckSequencedThread();
504 idle_work_pending_
= false;
505 base::AutoLock
lock(command_buffer_lock_
);
506 if (MakeCurrent() && gpu_scheduler_
->HasMoreWork()) {
507 gpu_scheduler_
->PerformIdleWork();
508 ScheduleIdleWorkOnGpuThread();
512 void InProcessCommandBuffer::ScheduleIdleWorkOnGpuThread() {
513 CheckSequencedThread();
514 if (idle_work_pending_
)
516 idle_work_pending_
= true;
517 service_
->ScheduleIdleWork(
518 base::Bind(&InProcessCommandBuffer::PerformIdleWork
,
519 gpu_thread_weak_ptr_
));
522 void InProcessCommandBuffer::Flush(int32 put_offset
) {
523 CheckSequencedThread();
524 if (last_state_
.error
!= gpu::error::kNoError
)
527 if (last_put_offset_
== put_offset
)
530 last_put_offset_
= put_offset
;
531 base::Closure task
= base::Bind(&InProcessCommandBuffer::FlushOnGpuThread
,
532 gpu_thread_weak_ptr_
,
537 void InProcessCommandBuffer::OrderingBarrier(int32 put_offset
) {
541 void InProcessCommandBuffer::WaitForTokenInRange(int32 start
, int32 end
) {
542 CheckSequencedThread();
543 while (!InRange(start
, end
, GetLastToken()) &&
544 last_state_
.error
== gpu::error::kNoError
)
548 void InProcessCommandBuffer::WaitForGetOffsetInRange(int32 start
, int32 end
) {
549 CheckSequencedThread();
552 while (!InRange(start
, end
, last_state_
.get_offset
) &&
553 last_state_
.error
== gpu::error::kNoError
) {
559 void InProcessCommandBuffer::SetGetBuffer(int32 shm_id
) {
560 CheckSequencedThread();
561 if (last_state_
.error
!= gpu::error::kNoError
)
564 base::WaitableEvent
completion(true, false);
566 base::Bind(&InProcessCommandBuffer::SetGetBufferOnGpuThread
,
567 base::Unretained(this), shm_id
, &completion
);
572 base::AutoLock
lock(state_after_last_flush_lock_
);
573 state_after_last_flush_
= command_buffer_
->GetLastState();
577 void InProcessCommandBuffer::SetGetBufferOnGpuThread(
579 base::WaitableEvent
* completion
) {
580 base::AutoLock
lock(command_buffer_lock_
);
581 command_buffer_
->SetGetBuffer(shm_id
);
582 last_put_offset_
= 0;
583 completion
->Signal();
586 scoped_refptr
<Buffer
> InProcessCommandBuffer::CreateTransferBuffer(size_t size
,
588 CheckSequencedThread();
589 base::AutoLock
lock(command_buffer_lock_
);
590 return command_buffer_
->CreateTransferBuffer(size
, id
);
593 void InProcessCommandBuffer::DestroyTransferBuffer(int32 id
) {
594 CheckSequencedThread();
596 base::Bind(&InProcessCommandBuffer::DestroyTransferBufferOnGpuThread
,
597 base::Unretained(this),
603 void InProcessCommandBuffer::DestroyTransferBufferOnGpuThread(int32 id
) {
604 base::AutoLock
lock(command_buffer_lock_
);
605 command_buffer_
->DestroyTransferBuffer(id
);
608 gpu::Capabilities
InProcessCommandBuffer::GetCapabilities() {
609 return capabilities_
;
612 int32
InProcessCommandBuffer::CreateImage(ClientBuffer buffer
,
615 unsigned internalformat
) {
616 CheckSequencedThread();
618 DCHECK(gpu_memory_buffer_manager_
);
619 gfx::GpuMemoryBuffer
* gpu_memory_buffer
=
620 gpu_memory_buffer_manager_
->GpuMemoryBufferFromClientBuffer(buffer
);
621 DCHECK(gpu_memory_buffer
);
623 int32 new_id
= next_image_id_
.GetNext();
625 DCHECK(gpu::ImageFactory::IsGpuMemoryBufferFormatSupported(
626 gpu_memory_buffer
->GetFormat(), capabilities_
));
627 DCHECK(gpu::ImageFactory::IsImageFormatCompatibleWithGpuMemoryBufferFormat(
628 internalformat
, gpu_memory_buffer
->GetFormat()));
630 // This handle is owned by the GPU thread and must be passed to it or it
631 // will leak. In otherwords, do not early out on error between here and the
632 // queuing of the CreateImage task below.
633 bool requires_sync_point
= false;
634 gfx::GpuMemoryBufferHandle handle
=
635 ShareGpuMemoryBufferToGpuThread(gpu_memory_buffer
->GetHandle(),
636 &requires_sync_point
);
638 QueueTask(base::Bind(&InProcessCommandBuffer::CreateImageOnGpuThread
,
639 base::Unretained(this),
642 gfx::Size(width
, height
),
643 gpu_memory_buffer
->GetFormat(),
646 if (requires_sync_point
) {
647 gpu_memory_buffer_manager_
->SetDestructionSyncPoint(gpu_memory_buffer
,
654 void InProcessCommandBuffer::CreateImageOnGpuThread(
656 const gfx::GpuMemoryBufferHandle
& handle
,
657 const gfx::Size
& size
,
658 gfx::BufferFormat format
,
659 uint32 internalformat
) {
663 gpu::gles2::ImageManager
* image_manager
= decoder_
->GetImageManager();
664 DCHECK(image_manager
);
665 if (image_manager
->LookupImage(id
)) {
666 LOG(ERROR
) << "Image already exists with same ID.";
670 switch (handle
.type
) {
671 case gfx::SHARED_MEMORY_BUFFER
: {
672 scoped_refptr
<gfx::GLImageSharedMemory
> image(
673 new gfx::GLImageSharedMemory(size
, internalformat
));
674 if (!image
->Initialize(handle
, format
)) {
675 LOG(ERROR
) << "Failed to initialize image.";
679 image_manager
->AddImage(image
.get(), id
);
683 if (!image_factory_
) {
684 LOG(ERROR
) << "Image factory missing but required by buffer type.";
688 // Note: this assumes that client ID is always 0.
689 const int kClientId
= 0;
691 scoped_refptr
<gfx::GLImage
> image
=
692 image_factory_
->CreateImageForGpuMemoryBuffer(
693 handle
, size
, format
, internalformat
, kClientId
);
695 LOG(ERROR
) << "Failed to create image for buffer.";
699 image_manager
->AddImage(image
.get(), id
);
705 void InProcessCommandBuffer::DestroyImage(int32 id
) {
706 CheckSequencedThread();
708 QueueTask(base::Bind(&InProcessCommandBuffer::DestroyImageOnGpuThread
,
709 base::Unretained(this),
713 void InProcessCommandBuffer::DestroyImageOnGpuThread(int32 id
) {
717 gpu::gles2::ImageManager
* image_manager
= decoder_
->GetImageManager();
718 DCHECK(image_manager
);
719 if (!image_manager
->LookupImage(id
)) {
720 LOG(ERROR
) << "Image with ID doesn't exist.";
724 image_manager
->RemoveImage(id
);
727 int32
InProcessCommandBuffer::CreateGpuMemoryBufferImage(
730 unsigned internalformat
,
732 CheckSequencedThread();
734 DCHECK(gpu_memory_buffer_manager_
);
735 scoped_ptr
<gfx::GpuMemoryBuffer
> buffer(
736 gpu_memory_buffer_manager_
->AllocateGpuMemoryBuffer(
737 gfx::Size(width
, height
),
738 gpu::ImageFactory::DefaultBufferFormatForImageFormat(internalformat
),
739 gpu::ImageFactory::ImageUsageToGpuMemoryBufferUsage(usage
)));
743 return CreateImage(buffer
->AsClientBuffer(), width
, height
, internalformat
);
746 uint32
InProcessCommandBuffer::InsertSyncPoint() {
747 uint32 sync_point
= service_
->sync_point_manager()->GenerateSyncPoint();
748 QueueTask(base::Bind(&InProcessCommandBuffer::RetireSyncPointOnGpuThread
,
749 base::Unretained(this),
754 uint32
InProcessCommandBuffer::InsertFutureSyncPoint() {
755 return service_
->sync_point_manager()->GenerateSyncPoint();
758 void InProcessCommandBuffer::RetireSyncPoint(uint32 sync_point
) {
759 QueueTask(base::Bind(&InProcessCommandBuffer::RetireSyncPointOnGpuThread
,
760 base::Unretained(this),
764 void InProcessCommandBuffer::RetireSyncPointOnGpuThread(uint32 sync_point
) {
765 gles2::MailboxManager
* mailbox_manager
=
766 decoder_
->GetContextGroup()->mailbox_manager();
767 if (mailbox_manager
->UsesSync()) {
768 bool make_current_success
= false;
770 base::AutoLock
lock(command_buffer_lock_
);
771 make_current_success
= MakeCurrent();
773 if (make_current_success
)
774 mailbox_manager
->PushTextureUpdates(sync_point
);
776 service_
->sync_point_manager()->RetireSyncPoint(sync_point
);
779 void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point
,
780 const base::Closure
& callback
) {
781 CheckSequencedThread();
782 QueueTask(base::Bind(&InProcessCommandBuffer::SignalSyncPointOnGpuThread
,
783 base::Unretained(this),
785 WrapCallback(callback
)));
788 bool InProcessCommandBuffer::WaitSyncPointOnGpuThread(unsigned sync_point
) {
789 service_
->sync_point_manager()->WaitSyncPoint(sync_point
);
790 gles2::MailboxManager
* mailbox_manager
=
791 decoder_
->GetContextGroup()->mailbox_manager();
792 mailbox_manager
->PullTextureUpdates(sync_point
);
796 void InProcessCommandBuffer::SignalSyncPointOnGpuThread(
798 const base::Closure
& callback
) {
799 service_
->sync_point_manager()->AddSyncPointCallback(sync_point
, callback
);
802 void InProcessCommandBuffer::SignalQuery(unsigned query_id
,
803 const base::Closure
& callback
) {
804 CheckSequencedThread();
805 QueueTask(base::Bind(&InProcessCommandBuffer::SignalQueryOnGpuThread
,
806 base::Unretained(this),
808 WrapCallback(callback
)));
811 void InProcessCommandBuffer::SignalQueryOnGpuThread(
813 const base::Closure
& callback
) {
814 gles2::QueryManager
* query_manager_
= decoder_
->GetQueryManager();
815 DCHECK(query_manager_
);
817 gles2::QueryManager::Query
* query
= query_manager_
->GetQuery(query_id
);
821 query
->AddCallback(callback
);
824 void InProcessCommandBuffer::SetSurfaceVisible(bool visible
) {}
826 uint32
InProcessCommandBuffer::CreateStreamTexture(uint32 texture_id
) {
827 base::WaitableEvent
completion(true, false);
828 uint32 stream_id
= 0;
829 base::Callback
<uint32(void)> task
=
830 base::Bind(&InProcessCommandBuffer::CreateStreamTextureOnGpuThread
,
831 base::Unretained(this),
834 base::Bind(&RunTaskWithResult
<uint32
>, task
, &stream_id
, &completion
));
839 void InProcessCommandBuffer::SetLock(base::Lock
*) {
842 bool InProcessCommandBuffer::IsGpuChannelLost() {
843 // There is no such channel to lose for in-process contexts. This only
844 // makes sense for out-of-process command buffers.
848 uint32
InProcessCommandBuffer::CreateStreamTextureOnGpuThread(
849 uint32 client_texture_id
) {
850 #if defined(OS_ANDROID)
851 return stream_texture_manager_
->CreateStreamTexture(
852 client_texture_id
, decoder_
->GetContextGroup()->texture_manager());
858 gpu::error::Error
InProcessCommandBuffer::GetLastError() {
859 CheckSequencedThread();
860 return last_state_
.error
;
863 bool InProcessCommandBuffer::Initialize() {
871 const scoped_refptr
<base::SingleThreadTaskRunner
>& task_runner
,
872 const base::Closure
& callback
) {
873 // The task_runner.get() check is to support using InProcessCommandBuffer on
874 // a thread without a message loop.
875 if (task_runner
.get() && !task_runner
->BelongsToCurrentThread()) {
876 task_runner
->PostTask(FROM_HERE
, callback
);
882 void RunOnTargetThread(scoped_ptr
<base::Closure
> callback
) {
883 DCHECK(callback
.get());
887 } // anonymous namespace
889 base::Closure
InProcessCommandBuffer::WrapCallback(
890 const base::Closure
& callback
) {
891 // Make sure the callback gets deleted on the target thread by passing
893 scoped_ptr
<base::Closure
> scoped_callback(new base::Closure(callback
));
894 base::Closure callback_on_client_thread
=
895 base::Bind(&RunOnTargetThread
, base::Passed(&scoped_callback
));
896 base::Closure wrapped_callback
=
897 base::Bind(&PostCallback
, base::ThreadTaskRunnerHandle::IsSet()
898 ? base::ThreadTaskRunnerHandle::Get()
900 callback_on_client_thread
);
901 return wrapped_callback
;
904 #if defined(OS_ANDROID)
905 scoped_refptr
<gfx::SurfaceTexture
>
906 InProcessCommandBuffer::GetSurfaceTexture(uint32 stream_id
) {
907 DCHECK(stream_texture_manager_
);
908 return stream_texture_manager_
->GetSurfaceTexture(stream_id
);
912 GpuInProcessThread::GpuInProcessThread(SyncPointManager
* sync_point_manager
)
913 : base::Thread("GpuThread"), sync_point_manager_(sync_point_manager
) {
917 GpuInProcessThread::~GpuInProcessThread() {
921 void GpuInProcessThread::AddRef() const {
922 base::RefCountedThreadSafe
<GpuInProcessThread
>::AddRef();
924 void GpuInProcessThread::Release() const {
925 base::RefCountedThreadSafe
<GpuInProcessThread
>::Release();
928 void GpuInProcessThread::ScheduleTask(const base::Closure
& task
) {
929 task_runner()->PostTask(FROM_HERE
, task
);
932 void GpuInProcessThread::ScheduleIdleWork(const base::Closure
& callback
) {
933 // Match delay with GpuCommandBufferStub.
934 task_runner()->PostDelayedTask(FROM_HERE
, callback
,
935 base::TimeDelta::FromMilliseconds(2));
938 bool GpuInProcessThread::UseVirtualizedGLContexts() {
942 scoped_refptr
<gles2::ShaderTranslatorCache
>
943 GpuInProcessThread::shader_translator_cache() {
944 if (!shader_translator_cache_
.get())
945 shader_translator_cache_
= new gpu::gles2::ShaderTranslatorCache
;
946 return shader_translator_cache_
;
949 scoped_refptr
<gles2::FramebufferCompletenessCache
>
950 GpuInProcessThread::framebuffer_completeness_cache() {
951 if (!framebuffer_completeness_cache_
.get())
952 framebuffer_completeness_cache_
=
953 new gpu::gles2::FramebufferCompletenessCache
;
954 return framebuffer_completeness_cache_
;
957 SyncPointManager
* GpuInProcessThread::sync_point_manager() {
958 return sync_point_manager_
;