1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "gpu/command_buffer/service/in_process_command_buffer.h"
11 #include "base/bind.h"
12 #include "base/bind_helpers.h"
13 #include "base/lazy_instance.h"
14 #include "base/location.h"
15 #include "base/logging.h"
16 #include "base/memory/weak_ptr.h"
17 #include "base/sequence_checker.h"
18 #include "base/single_thread_task_runner.h"
19 #include "base/thread_task_runner_handle.h"
20 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
21 #include "gpu/command_buffer/common/value_state.h"
22 #include "gpu/command_buffer/service/command_buffer_service.h"
23 #include "gpu/command_buffer/service/context_group.h"
24 #include "gpu/command_buffer/service/gl_context_virtual.h"
25 #include "gpu/command_buffer/service/gpu_scheduler.h"
26 #include "gpu/command_buffer/service/image_factory.h"
27 #include "gpu/command_buffer/service/image_manager.h"
28 #include "gpu/command_buffer/service/mailbox_manager.h"
29 #include "gpu/command_buffer/service/memory_tracking.h"
30 #include "gpu/command_buffer/service/query_manager.h"
31 #include "gpu/command_buffer/service/sync_point_manager.h"
32 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
33 #include "gpu/command_buffer/service/valuebuffer_manager.h"
34 #include "ui/gfx/geometry/size.h"
35 #include "ui/gl/gl_context.h"
36 #include "ui/gl/gl_image.h"
37 #include "ui/gl/gl_image_shared_memory.h"
38 #include "ui/gl/gl_share_group.h"
40 #if defined(OS_ANDROID)
41 #include "gpu/command_buffer/service/stream_texture_manager_in_process_android.h"
42 #include "ui/gl/android/surface_texture.h"
47 #include "base/process/process_handle.h"
55 static void RunTaskWithResult(base::Callback
<T(void)> task
,
57 base::WaitableEvent
* completion
) {
62 struct GpuInProcessThreadHolder
{
63 GpuInProcessThreadHolder()
64 : sync_point_manager(new SyncPointManager(false)),
65 gpu_thread(new GpuInProcessThread(sync_point_manager
.get())) {}
66 scoped_ptr
<SyncPointManager
> sync_point_manager
;
67 scoped_refptr
<InProcessCommandBuffer::Service
> gpu_thread
;
70 base::LazyInstance
<GpuInProcessThreadHolder
> g_default_service
=
71 LAZY_INSTANCE_INITIALIZER
;
75 explicit ScopedEvent(base::WaitableEvent
* event
) : event_(event
) {}
76 ~ScopedEvent() { event_
->Signal(); }
79 base::WaitableEvent
* event_
;
82 base::SharedMemoryHandle
ShareToGpuThread(
83 base::SharedMemoryHandle source_handle
) {
84 return base::SharedMemory::DuplicateHandle(source_handle
);
87 gfx::GpuMemoryBufferHandle
ShareGpuMemoryBufferToGpuThread(
88 const gfx::GpuMemoryBufferHandle
& source_handle
,
89 bool* requires_sync_point
) {
90 switch (source_handle
.type
) {
91 case gfx::SHARED_MEMORY_BUFFER
: {
92 gfx::GpuMemoryBufferHandle handle
;
93 handle
.type
= gfx::SHARED_MEMORY_BUFFER
;
94 handle
.handle
= ShareToGpuThread(source_handle
.handle
);
95 *requires_sync_point
= false;
98 case gfx::IO_SURFACE_BUFFER
:
99 case gfx::SURFACE_TEXTURE_BUFFER
:
100 case gfx::OZONE_NATIVE_PIXMAP
:
101 *requires_sync_point
= true;
102 return source_handle
;
105 return gfx::GpuMemoryBufferHandle();
109 scoped_refptr
<InProcessCommandBuffer::Service
> GetInitialService(
110 const scoped_refptr
<InProcessCommandBuffer::Service
>& service
) {
114 // Call base::ThreadTaskRunnerHandle::IsSet() to ensure that it is
115 // instantiated before we create the GPU thread, otherwise shutdown order will
116 // delete the ThreadTaskRunnerHandle before the GPU thread's message loop,
117 // and when the message loop is shutdown, it will recreate
118 // ThreadTaskRunnerHandle, which will re-add a new task to the, AtExitManager,
119 // which causes a deadlock because it's already locked.
120 base::ThreadTaskRunnerHandle::IsSet();
121 return g_default_service
.Get().gpu_thread
;
124 } // anonyous namespace
126 InProcessCommandBuffer::Service::Service() {}
128 InProcessCommandBuffer::Service::~Service() {}
130 scoped_refptr
<gfx::GLShareGroup
>
131 InProcessCommandBuffer::Service::share_group() {
132 if (!share_group_
.get())
133 share_group_
= new gfx::GLShareGroup
;
137 scoped_refptr
<gles2::MailboxManager
>
138 InProcessCommandBuffer::Service::mailbox_manager() {
139 if (!mailbox_manager_
.get()) {
140 mailbox_manager_
= gles2::MailboxManager::Create();
142 return mailbox_manager_
;
145 scoped_refptr
<gles2::SubscriptionRefSet
>
146 InProcessCommandBuffer::Service::subscription_ref_set() {
147 if (!subscription_ref_set_
.get()) {
148 subscription_ref_set_
= new gles2::SubscriptionRefSet();
150 return subscription_ref_set_
;
153 scoped_refptr
<ValueStateMap
>
154 InProcessCommandBuffer::Service::pending_valuebuffer_state() {
155 if (!pending_valuebuffer_state_
.get()) {
156 pending_valuebuffer_state_
= new ValueStateMap();
158 return pending_valuebuffer_state_
;
161 InProcessCommandBuffer::InProcessCommandBuffer(
162 const scoped_refptr
<Service
>& service
)
163 : context_lost_(false),
164 idle_work_pending_(false),
165 image_factory_(nullptr),
166 last_put_offset_(-1),
167 gpu_memory_buffer_manager_(nullptr),
168 flush_event_(false, false),
169 service_(GetInitialService(service
)),
170 gpu_thread_weak_ptr_factory_(this) {
171 DCHECK(service_
.get());
172 next_image_id_
.GetNext();
175 InProcessCommandBuffer::~InProcessCommandBuffer() {
179 void InProcessCommandBuffer::OnResizeView(gfx::Size size
, float scale_factor
) {
180 CheckSequencedThread();
181 DCHECK(!surface_
->IsOffscreen());
182 surface_
->Resize(size
);
185 bool InProcessCommandBuffer::MakeCurrent() {
186 CheckSequencedThread();
187 command_buffer_lock_
.AssertAcquired();
189 if (!context_lost_
&& decoder_
->MakeCurrent())
191 DLOG(ERROR
) << "Context lost because MakeCurrent failed.";
192 command_buffer_
->SetContextLostReason(decoder_
->GetContextLostReason());
193 command_buffer_
->SetParseError(gpu::error::kLostContext
);
197 void InProcessCommandBuffer::PumpCommands() {
198 CheckSequencedThread();
199 command_buffer_lock_
.AssertAcquired();
204 gpu_scheduler_
->PutChanged();
207 bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id
) {
208 CheckSequencedThread();
209 command_buffer_lock_
.AssertAcquired();
210 command_buffer_
->SetGetBuffer(transfer_buffer_id
);
214 bool InProcessCommandBuffer::Initialize(
215 scoped_refptr
<gfx::GLSurface
> surface
,
217 gfx::AcceleratedWidget window
,
218 const gfx::Size
& size
,
219 const std::vector
<int32
>& attribs
,
220 gfx::GpuPreference gpu_preference
,
221 const base::Closure
& context_lost_callback
,
222 InProcessCommandBuffer
* share_group
,
223 GpuMemoryBufferManager
* gpu_memory_buffer_manager
,
224 ImageFactory
* image_factory
) {
225 DCHECK(!share_group
|| service_
.get() == share_group
->service_
.get());
226 context_lost_callback_
= WrapCallback(context_lost_callback
);
229 // GPU thread must be the same as client thread due to GLSurface not being
231 sequence_checker_
.reset(new base::SequenceChecker
);
235 gpu::Capabilities capabilities
;
236 InitializeOnGpuThreadParams
params(is_offscreen
,
245 base::Callback
<bool(void)> init_task
=
246 base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread
,
247 base::Unretained(this),
250 base::WaitableEvent
completion(true, false);
253 base::Bind(&RunTaskWithResult
<bool>, init_task
, &result
, &completion
));
256 gpu_memory_buffer_manager_
= gpu_memory_buffer_manager
;
259 capabilities_
= capabilities
;
260 capabilities_
.image
= capabilities_
.image
&& gpu_memory_buffer_manager_
;
266 bool InProcessCommandBuffer::InitializeOnGpuThread(
267 const InitializeOnGpuThreadParams
& params
) {
268 CheckSequencedThread();
269 gpu_thread_weak_ptr_
= gpu_thread_weak_ptr_factory_
.GetWeakPtr();
271 DCHECK(params
.size
.width() >= 0 && params
.size
.height() >= 0);
273 TransferBufferManager
* manager
= new TransferBufferManager();
274 transfer_buffer_manager_
= manager
;
275 manager
->Initialize();
277 scoped_ptr
<CommandBufferService
> command_buffer(
278 new CommandBufferService(transfer_buffer_manager_
.get()));
279 command_buffer
->SetPutOffsetChangeCallback(base::Bind(
280 &InProcessCommandBuffer::PumpCommands
, gpu_thread_weak_ptr_
));
281 command_buffer
->SetParseErrorCallback(base::Bind(
282 &InProcessCommandBuffer::OnContextLost
, gpu_thread_weak_ptr_
));
284 if (!command_buffer
->Initialize()) {
285 LOG(ERROR
) << "Could not initialize command buffer.";
286 DestroyOnGpuThread();
290 gl_share_group_
= params
.context_group
291 ? params
.context_group
->gl_share_group_
292 : service_
->share_group();
294 #if defined(OS_ANDROID)
295 stream_texture_manager_
.reset(new StreamTextureManagerInProcess
);
298 bool bind_generates_resource
= false;
299 decoder_
.reset(gles2::GLES2Decoder::Create(
301 ? params
.context_group
->decoder_
->GetContextGroup()
302 : new gles2::ContextGroup(service_
->mailbox_manager(),
304 service_
->shader_translator_cache(),
306 service_
->subscription_ref_set(),
307 service_
->pending_valuebuffer_state(),
308 bind_generates_resource
)));
310 gpu_scheduler_
.reset(
311 new GpuScheduler(command_buffer
.get(), decoder_
.get(), decoder_
.get()));
312 command_buffer
->SetGetBufferChangeCallback(base::Bind(
313 &GpuScheduler::SetGetBuffer
, base::Unretained(gpu_scheduler_
.get())));
314 command_buffer_
= command_buffer
.Pass();
316 decoder_
->set_engine(gpu_scheduler_
.get());
318 if (!surface_
.get()) {
319 if (params
.is_offscreen
)
320 surface_
= gfx::GLSurface::CreateOffscreenGLSurface(params
.size
);
322 surface_
= gfx::GLSurface::CreateViewGLSurface(params
.window
);
325 if (!surface_
.get()) {
326 LOG(ERROR
) << "Could not create GLSurface.";
327 DestroyOnGpuThread();
331 if (service_
->UseVirtualizedGLContexts() ||
332 decoder_
->GetContextGroup()
335 .use_virtualized_gl_contexts
) {
336 context_
= gl_share_group_
->GetSharedContext();
337 if (!context_
.get()) {
338 context_
= gfx::GLContext::CreateGLContext(
339 gl_share_group_
.get(), surface_
.get(), params
.gpu_preference
);
340 gl_share_group_
->SetSharedContext(context_
.get());
343 context_
= new GLContextVirtual(
344 gl_share_group_
.get(), context_
.get(), decoder_
->AsWeakPtr());
345 if (context_
->Initialize(surface_
.get(), params
.gpu_preference
)) {
346 VLOG(1) << "Created virtual GL context.";
351 context_
= gfx::GLContext::CreateGLContext(
352 gl_share_group_
.get(), surface_
.get(), params
.gpu_preference
);
355 if (!context_
.get()) {
356 LOG(ERROR
) << "Could not create GLContext.";
357 DestroyOnGpuThread();
361 if (!context_
->MakeCurrent(surface_
.get())) {
362 LOG(ERROR
) << "Could not make context current.";
363 DestroyOnGpuThread();
367 gles2::DisallowedFeatures disallowed_features
;
368 disallowed_features
.gpu_memory_manager
= true;
369 if (!decoder_
->Initialize(surface_
,
375 LOG(ERROR
) << "Could not initialize decoder.";
376 DestroyOnGpuThread();
379 *params
.capabilities
= decoder_
->GetCapabilities();
381 if (!params
.is_offscreen
) {
382 decoder_
->SetResizeCallback(base::Bind(
383 &InProcessCommandBuffer::OnResizeView
, gpu_thread_weak_ptr_
));
385 decoder_
->SetWaitSyncPointCallback(
386 base::Bind(&InProcessCommandBuffer::WaitSyncPointOnGpuThread
,
387 base::Unretained(this)));
389 image_factory_
= params
.image_factory
;
394 void InProcessCommandBuffer::Destroy() {
395 CheckSequencedThread();
397 base::WaitableEvent
completion(true, false);
399 base::Callback
<bool(void)> destroy_task
= base::Bind(
400 &InProcessCommandBuffer::DestroyOnGpuThread
, base::Unretained(this));
402 base::Bind(&RunTaskWithResult
<bool>, destroy_task
, &result
, &completion
));
406 bool InProcessCommandBuffer::DestroyOnGpuThread() {
407 CheckSequencedThread();
408 gpu_thread_weak_ptr_factory_
.InvalidateWeakPtrs();
409 command_buffer_
.reset();
410 // Clean up GL resources if possible.
411 bool have_context
= context_
.get() && context_
->MakeCurrent(surface_
.get());
413 decoder_
->Destroy(have_context
);
418 gl_share_group_
= NULL
;
419 #if defined(OS_ANDROID)
420 stream_texture_manager_
.reset();
426 void InProcessCommandBuffer::CheckSequencedThread() {
427 DCHECK(!sequence_checker_
||
428 sequence_checker_
->CalledOnValidSequencedThread());
431 void InProcessCommandBuffer::OnContextLost() {
432 CheckSequencedThread();
433 if (!context_lost_callback_
.is_null()) {
434 context_lost_callback_
.Run();
435 context_lost_callback_
.Reset();
438 context_lost_
= true;
441 CommandBuffer::State
InProcessCommandBuffer::GetStateFast() {
442 CheckSequencedThread();
443 base::AutoLock
lock(state_after_last_flush_lock_
);
444 if (state_after_last_flush_
.generation
- last_state_
.generation
< 0x80000000U
)
445 last_state_
= state_after_last_flush_
;
449 CommandBuffer::State
InProcessCommandBuffer::GetLastState() {
450 CheckSequencedThread();
454 int32
InProcessCommandBuffer::GetLastToken() {
455 CheckSequencedThread();
457 return last_state_
.token
;
460 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset
) {
461 CheckSequencedThread();
462 ScopedEvent
handle_flush(&flush_event_
);
463 base::AutoLock
lock(command_buffer_lock_
);
464 command_buffer_
->Flush(put_offset
);
466 // Update state before signaling the flush event.
467 base::AutoLock
lock(state_after_last_flush_lock_
);
468 state_after_last_flush_
= command_buffer_
->GetLastState();
470 DCHECK((!error::IsError(state_after_last_flush_
.error
) && !context_lost_
) ||
471 (error::IsError(state_after_last_flush_
.error
) && context_lost_
));
473 // If we've processed all pending commands but still have pending queries,
474 // pump idle work until the query is passed.
475 if (put_offset
== state_after_last_flush_
.get_offset
&&
476 gpu_scheduler_
->HasMoreWork()) {
477 ScheduleIdleWorkOnGpuThread();
481 void InProcessCommandBuffer::PerformIdleWork() {
482 CheckSequencedThread();
483 idle_work_pending_
= false;
484 base::AutoLock
lock(command_buffer_lock_
);
485 if (MakeCurrent() && gpu_scheduler_
->HasMoreWork()) {
486 gpu_scheduler_
->PerformIdleWork();
487 ScheduleIdleWorkOnGpuThread();
491 void InProcessCommandBuffer::ScheduleIdleWorkOnGpuThread() {
492 CheckSequencedThread();
493 if (idle_work_pending_
)
495 idle_work_pending_
= true;
496 service_
->ScheduleIdleWork(
497 base::Bind(&InProcessCommandBuffer::PerformIdleWork
,
498 gpu_thread_weak_ptr_
));
501 void InProcessCommandBuffer::Flush(int32 put_offset
) {
502 CheckSequencedThread();
503 if (last_state_
.error
!= gpu::error::kNoError
)
506 if (last_put_offset_
== put_offset
)
509 last_put_offset_
= put_offset
;
510 base::Closure task
= base::Bind(&InProcessCommandBuffer::FlushOnGpuThread
,
511 gpu_thread_weak_ptr_
,
516 void InProcessCommandBuffer::OrderingBarrier(int32 put_offset
) {
520 void InProcessCommandBuffer::WaitForTokenInRange(int32 start
, int32 end
) {
521 CheckSequencedThread();
522 while (!InRange(start
, end
, GetLastToken()) &&
523 last_state_
.error
== gpu::error::kNoError
)
527 void InProcessCommandBuffer::WaitForGetOffsetInRange(int32 start
, int32 end
) {
528 CheckSequencedThread();
531 while (!InRange(start
, end
, last_state_
.get_offset
) &&
532 last_state_
.error
== gpu::error::kNoError
) {
538 void InProcessCommandBuffer::SetGetBuffer(int32 shm_id
) {
539 CheckSequencedThread();
540 if (last_state_
.error
!= gpu::error::kNoError
)
543 base::WaitableEvent
completion(true, false);
545 base::Bind(&InProcessCommandBuffer::SetGetBufferOnGpuThread
,
546 base::Unretained(this), shm_id
, &completion
);
551 base::AutoLock
lock(state_after_last_flush_lock_
);
552 state_after_last_flush_
= command_buffer_
->GetLastState();
556 void InProcessCommandBuffer::SetGetBufferOnGpuThread(
558 base::WaitableEvent
* completion
) {
559 base::AutoLock
lock(command_buffer_lock_
);
560 command_buffer_
->SetGetBuffer(shm_id
);
561 last_put_offset_
= 0;
562 completion
->Signal();
565 scoped_refptr
<Buffer
> InProcessCommandBuffer::CreateTransferBuffer(size_t size
,
567 CheckSequencedThread();
568 base::AutoLock
lock(command_buffer_lock_
);
569 return command_buffer_
->CreateTransferBuffer(size
, id
);
572 void InProcessCommandBuffer::DestroyTransferBuffer(int32 id
) {
573 CheckSequencedThread();
575 base::Bind(&InProcessCommandBuffer::DestroyTransferBufferOnGpuThread
,
576 base::Unretained(this),
582 void InProcessCommandBuffer::DestroyTransferBufferOnGpuThread(int32 id
) {
583 base::AutoLock
lock(command_buffer_lock_
);
584 command_buffer_
->DestroyTransferBuffer(id
);
587 gpu::Capabilities
InProcessCommandBuffer::GetCapabilities() {
588 return capabilities_
;
591 int32
InProcessCommandBuffer::CreateImage(ClientBuffer buffer
,
594 unsigned internalformat
) {
595 CheckSequencedThread();
597 DCHECK(gpu_memory_buffer_manager_
);
598 gfx::GpuMemoryBuffer
* gpu_memory_buffer
=
599 gpu_memory_buffer_manager_
->GpuMemoryBufferFromClientBuffer(buffer
);
600 DCHECK(gpu_memory_buffer
);
602 int32 new_id
= next_image_id_
.GetNext();
604 DCHECK(gpu::ImageFactory::IsGpuMemoryBufferFormatSupported(
605 gpu_memory_buffer
->GetFormat(), capabilities_
));
606 DCHECK(gpu::ImageFactory::IsImageFormatCompatibleWithGpuMemoryBufferFormat(
607 internalformat
, gpu_memory_buffer
->GetFormat()));
609 // This handle is owned by the GPU thread and must be passed to it or it
610 // will leak. In otherwords, do not early out on error between here and the
611 // queuing of the CreateImage task below.
612 bool requires_sync_point
= false;
613 gfx::GpuMemoryBufferHandle handle
=
614 ShareGpuMemoryBufferToGpuThread(gpu_memory_buffer
->GetHandle(),
615 &requires_sync_point
);
617 QueueTask(base::Bind(&InProcessCommandBuffer::CreateImageOnGpuThread
,
618 base::Unretained(this),
621 gfx::Size(width
, height
),
622 gpu_memory_buffer
->GetFormat(),
625 if (requires_sync_point
) {
626 gpu_memory_buffer_manager_
->SetDestructionSyncPoint(gpu_memory_buffer
,
633 void InProcessCommandBuffer::CreateImageOnGpuThread(
635 const gfx::GpuMemoryBufferHandle
& handle
,
636 const gfx::Size
& size
,
637 gfx::GpuMemoryBuffer::Format format
,
638 uint32 internalformat
) {
642 gpu::gles2::ImageManager
* image_manager
= decoder_
->GetImageManager();
643 DCHECK(image_manager
);
644 if (image_manager
->LookupImage(id
)) {
645 LOG(ERROR
) << "Image already exists with same ID.";
649 switch (handle
.type
) {
650 case gfx::SHARED_MEMORY_BUFFER
: {
651 scoped_refptr
<gfx::GLImageSharedMemory
> image(
652 new gfx::GLImageSharedMemory(size
, internalformat
));
653 if (!image
->Initialize(handle
, format
)) {
654 LOG(ERROR
) << "Failed to initialize image.";
658 image_manager
->AddImage(image
.get(), id
);
662 if (!image_factory_
) {
663 LOG(ERROR
) << "Image factory missing but required by buffer type.";
667 // Note: this assumes that client ID is always 0.
668 const int kClientId
= 0;
670 scoped_refptr
<gfx::GLImage
> image
=
671 image_factory_
->CreateImageForGpuMemoryBuffer(
672 handle
, size
, format
, internalformat
, kClientId
);
674 LOG(ERROR
) << "Failed to create image for buffer.";
678 image_manager
->AddImage(image
.get(), id
);
684 void InProcessCommandBuffer::DestroyImage(int32 id
) {
685 CheckSequencedThread();
687 QueueTask(base::Bind(&InProcessCommandBuffer::DestroyImageOnGpuThread
,
688 base::Unretained(this),
692 void InProcessCommandBuffer::DestroyImageOnGpuThread(int32 id
) {
696 gpu::gles2::ImageManager
* image_manager
= decoder_
->GetImageManager();
697 DCHECK(image_manager
);
698 if (!image_manager
->LookupImage(id
)) {
699 LOG(ERROR
) << "Image with ID doesn't exist.";
703 image_manager
->RemoveImage(id
);
706 int32
InProcessCommandBuffer::CreateGpuMemoryBufferImage(
709 unsigned internalformat
,
711 CheckSequencedThread();
713 DCHECK(gpu_memory_buffer_manager_
);
714 scoped_ptr
<gfx::GpuMemoryBuffer
> buffer(
715 gpu_memory_buffer_manager_
->AllocateGpuMemoryBuffer(
716 gfx::Size(width
, height
),
717 gpu::ImageFactory::ImageFormatToGpuMemoryBufferFormat(internalformat
),
718 gpu::ImageFactory::ImageUsageToGpuMemoryBufferUsage(usage
)));
722 return CreateImage(buffer
->AsClientBuffer(), width
, height
, internalformat
);
725 uint32
InProcessCommandBuffer::InsertSyncPoint() {
726 uint32 sync_point
= service_
->sync_point_manager()->GenerateSyncPoint();
727 QueueTask(base::Bind(&InProcessCommandBuffer::RetireSyncPointOnGpuThread
,
728 base::Unretained(this),
733 uint32
InProcessCommandBuffer::InsertFutureSyncPoint() {
734 return service_
->sync_point_manager()->GenerateSyncPoint();
737 void InProcessCommandBuffer::RetireSyncPoint(uint32 sync_point
) {
738 QueueTask(base::Bind(&InProcessCommandBuffer::RetireSyncPointOnGpuThread
,
739 base::Unretained(this),
743 void InProcessCommandBuffer::RetireSyncPointOnGpuThread(uint32 sync_point
) {
744 gles2::MailboxManager
* mailbox_manager
=
745 decoder_
->GetContextGroup()->mailbox_manager();
746 if (mailbox_manager
->UsesSync()) {
747 bool make_current_success
= false;
749 base::AutoLock
lock(command_buffer_lock_
);
750 make_current_success
= MakeCurrent();
752 if (make_current_success
)
753 mailbox_manager
->PushTextureUpdates(sync_point
);
755 service_
->sync_point_manager()->RetireSyncPoint(sync_point
);
758 void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point
,
759 const base::Closure
& callback
) {
760 CheckSequencedThread();
761 QueueTask(base::Bind(&InProcessCommandBuffer::SignalSyncPointOnGpuThread
,
762 base::Unretained(this),
764 WrapCallback(callback
)));
767 bool InProcessCommandBuffer::WaitSyncPointOnGpuThread(unsigned sync_point
) {
768 service_
->sync_point_manager()->WaitSyncPoint(sync_point
);
769 gles2::MailboxManager
* mailbox_manager
=
770 decoder_
->GetContextGroup()->mailbox_manager();
771 mailbox_manager
->PullTextureUpdates(sync_point
);
775 void InProcessCommandBuffer::SignalSyncPointOnGpuThread(
777 const base::Closure
& callback
) {
778 service_
->sync_point_manager()->AddSyncPointCallback(sync_point
, callback
);
781 void InProcessCommandBuffer::SignalQuery(unsigned query_id
,
782 const base::Closure
& callback
) {
783 CheckSequencedThread();
784 QueueTask(base::Bind(&InProcessCommandBuffer::SignalQueryOnGpuThread
,
785 base::Unretained(this),
787 WrapCallback(callback
)));
790 void InProcessCommandBuffer::SignalQueryOnGpuThread(
792 const base::Closure
& callback
) {
793 gles2::QueryManager
* query_manager_
= decoder_
->GetQueryManager();
794 DCHECK(query_manager_
);
796 gles2::QueryManager::Query
* query
= query_manager_
->GetQuery(query_id
);
800 query
->AddCallback(callback
);
803 void InProcessCommandBuffer::SetSurfaceVisible(bool visible
) {}
805 uint32
InProcessCommandBuffer::CreateStreamTexture(uint32 texture_id
) {
806 base::WaitableEvent
completion(true, false);
807 uint32 stream_id
= 0;
808 base::Callback
<uint32(void)> task
=
809 base::Bind(&InProcessCommandBuffer::CreateStreamTextureOnGpuThread
,
810 base::Unretained(this),
813 base::Bind(&RunTaskWithResult
<uint32
>, task
, &stream_id
, &completion
));
818 void InProcessCommandBuffer::SetLock(base::Lock
*) {
821 bool InProcessCommandBuffer::IsGpuChannelLost() {
822 // There is no such channel to lose for in-process contexts. This only
823 // makes sense for out-of-process command buffers.
827 uint32
InProcessCommandBuffer::CreateStreamTextureOnGpuThread(
828 uint32 client_texture_id
) {
829 #if defined(OS_ANDROID)
830 return stream_texture_manager_
->CreateStreamTexture(
831 client_texture_id
, decoder_
->GetContextGroup()->texture_manager());
837 gpu::error::Error
InProcessCommandBuffer::GetLastError() {
838 CheckSequencedThread();
839 return last_state_
.error
;
842 bool InProcessCommandBuffer::Initialize() {
850 const scoped_refptr
<base::SingleThreadTaskRunner
>& task_runner
,
851 const base::Closure
& callback
) {
852 // The task_runner.get() check is to support using InProcessCommandBuffer on
853 // a thread without a message loop.
854 if (task_runner
.get() && !task_runner
->BelongsToCurrentThread()) {
855 task_runner
->PostTask(FROM_HERE
, callback
);
861 void RunOnTargetThread(scoped_ptr
<base::Closure
> callback
) {
862 DCHECK(callback
.get());
866 } // anonymous namespace
868 base::Closure
InProcessCommandBuffer::WrapCallback(
869 const base::Closure
& callback
) {
870 // Make sure the callback gets deleted on the target thread by passing
872 scoped_ptr
<base::Closure
> scoped_callback(new base::Closure(callback
));
873 base::Closure callback_on_client_thread
=
874 base::Bind(&RunOnTargetThread
, base::Passed(&scoped_callback
));
875 base::Closure wrapped_callback
=
876 base::Bind(&PostCallback
, base::ThreadTaskRunnerHandle::IsSet()
877 ? base::ThreadTaskRunnerHandle::Get()
879 callback_on_client_thread
);
880 return wrapped_callback
;
883 #if defined(OS_ANDROID)
884 scoped_refptr
<gfx::SurfaceTexture
>
885 InProcessCommandBuffer::GetSurfaceTexture(uint32 stream_id
) {
886 DCHECK(stream_texture_manager_
);
887 return stream_texture_manager_
->GetSurfaceTexture(stream_id
);
891 GpuInProcessThread::GpuInProcessThread(SyncPointManager
* sync_point_manager
)
892 : base::Thread("GpuThread"), sync_point_manager_(sync_point_manager
) {
896 GpuInProcessThread::~GpuInProcessThread() {
900 void GpuInProcessThread::AddRef() const {
901 base::RefCountedThreadSafe
<GpuInProcessThread
>::AddRef();
903 void GpuInProcessThread::Release() const {
904 base::RefCountedThreadSafe
<GpuInProcessThread
>::Release();
907 void GpuInProcessThread::ScheduleTask(const base::Closure
& task
) {
908 task_runner()->PostTask(FROM_HERE
, task
);
911 void GpuInProcessThread::ScheduleIdleWork(const base::Closure
& callback
) {
912 // Match delay with GpuCommandBufferStub.
913 task_runner()->PostDelayedTask(FROM_HERE
, callback
,
914 base::TimeDelta::FromMilliseconds(2));
917 bool GpuInProcessThread::UseVirtualizedGLContexts() {
921 scoped_refptr
<gles2::ShaderTranslatorCache
>
922 GpuInProcessThread::shader_translator_cache() {
923 if (!shader_translator_cache_
.get())
924 shader_translator_cache_
= new gpu::gles2::ShaderTranslatorCache
;
925 return shader_translator_cache_
;
928 SyncPointManager
* GpuInProcessThread::sync_point_manager() {
929 return sync_point_manager_
;