1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "gpu/command_buffer/service/in_process_command_buffer.h"
11 #include "base/bind.h"
12 #include "base/bind_helpers.h"
13 #include "base/command_line.h"
14 #include "base/lazy_instance.h"
15 #include "base/location.h"
16 #include "base/logging.h"
17 #include "base/memory/weak_ptr.h"
18 #include "base/sequence_checker.h"
19 #include "base/single_thread_task_runner.h"
20 #include "base/thread_task_runner_handle.h"
21 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
22 #include "gpu/command_buffer/common/value_state.h"
23 #include "gpu/command_buffer/service/command_buffer_service.h"
24 #include "gpu/command_buffer/service/context_group.h"
25 #include "gpu/command_buffer/service/gl_context_virtual.h"
26 #include "gpu/command_buffer/service/gpu_scheduler.h"
27 #include "gpu/command_buffer/service/gpu_switches.h"
28 #include "gpu/command_buffer/service/image_factory.h"
29 #include "gpu/command_buffer/service/image_manager.h"
30 #include "gpu/command_buffer/service/mailbox_manager.h"
31 #include "gpu/command_buffer/service/memory_program_cache.h"
32 #include "gpu/command_buffer/service/memory_tracking.h"
33 #include "gpu/command_buffer/service/query_manager.h"
34 #include "gpu/command_buffer/service/sync_point_manager.h"
35 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
36 #include "gpu/command_buffer/service/valuebuffer_manager.h"
37 #include "ui/gfx/geometry/size.h"
38 #include "ui/gl/gl_context.h"
39 #include "ui/gl/gl_image.h"
40 #include "ui/gl/gl_image_shared_memory.h"
41 #include "ui/gl/gl_share_group.h"
43 #if defined(OS_ANDROID)
44 #include "gpu/command_buffer/service/stream_texture_manager_in_process_android.h"
45 #include "ui/gl/android/surface_texture.h"
50 #include "base/process/process_handle.h"
57 base::StaticAtomicSequenceNumber g_next_command_buffer_id
;
60 static void RunTaskWithResult(base::Callback
<T(void)> task
,
62 base::WaitableEvent
* completion
) {
67 struct GpuInProcessThreadHolder
{
68 GpuInProcessThreadHolder()
69 : sync_point_manager(new SyncPointManager(false)),
70 gpu_thread(new GpuInProcessThread(sync_point_manager
.get())) {}
71 scoped_ptr
<SyncPointManager
> sync_point_manager
;
72 scoped_refptr
<InProcessCommandBuffer::Service
> gpu_thread
;
75 base::LazyInstance
<GpuInProcessThreadHolder
> g_default_service
=
76 LAZY_INSTANCE_INITIALIZER
;
80 explicit ScopedEvent(base::WaitableEvent
* event
) : event_(event
) {}
81 ~ScopedEvent() { event_
->Signal(); }
84 base::WaitableEvent
* event_
;
87 base::SharedMemoryHandle
ShareToGpuThread(
88 base::SharedMemoryHandle source_handle
) {
89 return base::SharedMemory::DuplicateHandle(source_handle
);
92 gfx::GpuMemoryBufferHandle
ShareGpuMemoryBufferToGpuThread(
93 const gfx::GpuMemoryBufferHandle
& source_handle
,
94 bool* requires_sync_point
) {
95 switch (source_handle
.type
) {
96 case gfx::SHARED_MEMORY_BUFFER
: {
97 gfx::GpuMemoryBufferHandle handle
;
98 handle
.type
= gfx::SHARED_MEMORY_BUFFER
;
99 handle
.handle
= ShareToGpuThread(source_handle
.handle
);
100 *requires_sync_point
= false;
103 case gfx::IO_SURFACE_BUFFER
:
104 case gfx::SURFACE_TEXTURE_BUFFER
:
105 case gfx::OZONE_NATIVE_PIXMAP
:
106 *requires_sync_point
= true;
107 return source_handle
;
110 return gfx::GpuMemoryBufferHandle();
114 scoped_refptr
<InProcessCommandBuffer::Service
> GetInitialService(
115 const scoped_refptr
<InProcessCommandBuffer::Service
>& service
) {
119 // Call base::ThreadTaskRunnerHandle::IsSet() to ensure that it is
120 // instantiated before we create the GPU thread, otherwise shutdown order will
121 // delete the ThreadTaskRunnerHandle before the GPU thread's message loop,
122 // and when the message loop is shutdown, it will recreate
123 // ThreadTaskRunnerHandle, which will re-add a new task to the, AtExitManager,
124 // which causes a deadlock because it's already locked.
125 base::ThreadTaskRunnerHandle::IsSet();
126 return g_default_service
.Get().gpu_thread
;
129 } // anonyous namespace
131 InProcessCommandBuffer::Service::Service() {}
133 InProcessCommandBuffer::Service::~Service() {}
135 scoped_refptr
<gfx::GLShareGroup
>
136 InProcessCommandBuffer::Service::share_group() {
137 if (!share_group_
.get())
138 share_group_
= new gfx::GLShareGroup
;
142 scoped_refptr
<gles2::MailboxManager
>
143 InProcessCommandBuffer::Service::mailbox_manager() {
144 if (!mailbox_manager_
.get()) {
145 mailbox_manager_
= gles2::MailboxManager::Create();
147 return mailbox_manager_
;
150 scoped_refptr
<gles2::SubscriptionRefSet
>
151 InProcessCommandBuffer::Service::subscription_ref_set() {
152 if (!subscription_ref_set_
.get()) {
153 subscription_ref_set_
= new gles2::SubscriptionRefSet();
155 return subscription_ref_set_
;
158 scoped_refptr
<ValueStateMap
>
159 InProcessCommandBuffer::Service::pending_valuebuffer_state() {
160 if (!pending_valuebuffer_state_
.get()) {
161 pending_valuebuffer_state_
= new ValueStateMap();
163 return pending_valuebuffer_state_
;
166 gpu::gles2::ProgramCache
* InProcessCommandBuffer::Service::program_cache() {
167 if (!program_cache_
.get() &&
168 (gfx::g_driver_gl
.ext
.b_GL_ARB_get_program_binary
||
169 gfx::g_driver_gl
.ext
.b_GL_OES_get_program_binary
) &&
170 !base::CommandLine::ForCurrentProcess()->HasSwitch(
171 switches::kDisableGpuProgramCache
)) {
172 program_cache_
.reset(new gpu::gles2::MemoryProgramCache());
174 return program_cache_
.get();
177 InProcessCommandBuffer::InProcessCommandBuffer(
178 const scoped_refptr
<Service
>& service
)
179 : command_buffer_id_(g_next_command_buffer_id
.GetNext()),
180 context_lost_(false),
181 delayed_work_pending_(false),
182 image_factory_(nullptr),
183 last_put_offset_(-1),
184 gpu_memory_buffer_manager_(nullptr),
185 flush_event_(false, false),
186 service_(GetInitialService(service
)),
187 gpu_thread_weak_ptr_factory_(this) {
188 DCHECK(service_
.get());
189 next_image_id_
.GetNext();
192 InProcessCommandBuffer::~InProcessCommandBuffer() {
196 void InProcessCommandBuffer::OnResizeView(gfx::Size size
, float scale_factor
) {
197 CheckSequencedThread();
198 DCHECK(!surface_
->IsOffscreen());
199 surface_
->Resize(size
);
202 bool InProcessCommandBuffer::MakeCurrent() {
203 CheckSequencedThread();
204 command_buffer_lock_
.AssertAcquired();
206 if (!context_lost_
&& decoder_
->MakeCurrent())
208 DLOG(ERROR
) << "Context lost because MakeCurrent failed.";
209 command_buffer_
->SetContextLostReason(decoder_
->GetContextLostReason());
210 command_buffer_
->SetParseError(gpu::error::kLostContext
);
214 void InProcessCommandBuffer::PumpCommands() {
215 CheckSequencedThread();
216 command_buffer_lock_
.AssertAcquired();
221 gpu_scheduler_
->PutChanged();
224 bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id
) {
225 CheckSequencedThread();
226 command_buffer_lock_
.AssertAcquired();
227 command_buffer_
->SetGetBuffer(transfer_buffer_id
);
231 bool InProcessCommandBuffer::Initialize(
232 scoped_refptr
<gfx::GLSurface
> surface
,
234 gfx::AcceleratedWidget window
,
235 const gfx::Size
& size
,
236 const std::vector
<int32
>& attribs
,
237 gfx::GpuPreference gpu_preference
,
238 const base::Closure
& context_lost_callback
,
239 InProcessCommandBuffer
* share_group
,
240 GpuMemoryBufferManager
* gpu_memory_buffer_manager
,
241 ImageFactory
* image_factory
) {
242 DCHECK(!share_group
|| service_
.get() == share_group
->service_
.get());
243 context_lost_callback_
= WrapCallback(context_lost_callback
);
246 // GPU thread must be the same as client thread due to GLSurface not being
248 sequence_checker_
.reset(new base::SequenceChecker
);
252 gpu::Capabilities capabilities
;
253 InitializeOnGpuThreadParams
params(is_offscreen
,
262 base::Callback
<bool(void)> init_task
=
263 base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread
,
264 base::Unretained(this),
267 base::WaitableEvent
completion(true, false);
270 base::Bind(&RunTaskWithResult
<bool>, init_task
, &result
, &completion
));
273 gpu_memory_buffer_manager_
= gpu_memory_buffer_manager
;
276 capabilities_
= capabilities
;
277 capabilities_
.image
= capabilities_
.image
&& gpu_memory_buffer_manager_
;
283 bool InProcessCommandBuffer::InitializeOnGpuThread(
284 const InitializeOnGpuThreadParams
& params
) {
285 CheckSequencedThread();
286 gpu_thread_weak_ptr_
= gpu_thread_weak_ptr_factory_
.GetWeakPtr();
288 DCHECK(params
.size
.width() >= 0 && params
.size
.height() >= 0);
290 TransferBufferManager
* manager
= new TransferBufferManager(nullptr);
291 transfer_buffer_manager_
= manager
;
292 manager
->Initialize();
294 scoped_ptr
<CommandBufferService
> command_buffer(
295 new CommandBufferService(transfer_buffer_manager_
.get()));
296 command_buffer
->SetPutOffsetChangeCallback(base::Bind(
297 &InProcessCommandBuffer::PumpCommands
, gpu_thread_weak_ptr_
));
298 command_buffer
->SetParseErrorCallback(base::Bind(
299 &InProcessCommandBuffer::OnContextLost
, gpu_thread_weak_ptr_
));
301 if (!command_buffer
->Initialize()) {
302 LOG(ERROR
) << "Could not initialize command buffer.";
303 DestroyOnGpuThread();
307 gl_share_group_
= params
.context_group
308 ? params
.context_group
->gl_share_group_
309 : service_
->share_group();
311 #if defined(OS_ANDROID)
312 stream_texture_manager_
.reset(new StreamTextureManagerInProcess
);
315 bool bind_generates_resource
= false;
316 decoder_
.reset(gles2::GLES2Decoder::Create(
318 ? params
.context_group
->decoder_
->GetContextGroup()
319 : new gles2::ContextGroup(service_
->mailbox_manager(), NULL
,
320 service_
->shader_translator_cache(),
321 service_
->framebuffer_completeness_cache(),
322 NULL
, service_
->subscription_ref_set(),
323 service_
->pending_valuebuffer_state(),
324 bind_generates_resource
)));
326 gpu_scheduler_
.reset(
327 new GpuScheduler(command_buffer
.get(), decoder_
.get(), decoder_
.get()));
328 command_buffer
->SetGetBufferChangeCallback(base::Bind(
329 &GpuScheduler::SetGetBuffer
, base::Unretained(gpu_scheduler_
.get())));
330 command_buffer_
= command_buffer
.Pass();
332 decoder_
->set_engine(gpu_scheduler_
.get());
334 if (!surface_
.get()) {
335 if (params
.is_offscreen
)
336 surface_
= gfx::GLSurface::CreateOffscreenGLSurface(params
.size
);
338 surface_
= gfx::GLSurface::CreateViewGLSurface(params
.window
);
341 if (!surface_
.get()) {
342 LOG(ERROR
) << "Could not create GLSurface.";
343 DestroyOnGpuThread();
347 if (service_
->UseVirtualizedGLContexts() ||
348 decoder_
->GetContextGroup()
351 .use_virtualized_gl_contexts
) {
352 context_
= gl_share_group_
->GetSharedContext();
353 if (!context_
.get()) {
354 context_
= gfx::GLContext::CreateGLContext(
355 gl_share_group_
.get(), surface_
.get(), params
.gpu_preference
);
356 gl_share_group_
->SetSharedContext(context_
.get());
359 context_
= new GLContextVirtual(
360 gl_share_group_
.get(), context_
.get(), decoder_
->AsWeakPtr());
361 if (context_
->Initialize(surface_
.get(), params
.gpu_preference
)) {
362 VLOG(1) << "Created virtual GL context.";
367 context_
= gfx::GLContext::CreateGLContext(
368 gl_share_group_
.get(), surface_
.get(), params
.gpu_preference
);
371 if (!context_
.get()) {
372 LOG(ERROR
) << "Could not create GLContext.";
373 DestroyOnGpuThread();
377 if (!context_
->MakeCurrent(surface_
.get())) {
378 LOG(ERROR
) << "Could not make context current.";
379 DestroyOnGpuThread();
383 if (!decoder_
->GetContextGroup()->has_program_cache() &&
384 !decoder_
->GetContextGroup()
387 .disable_program_cache
) {
388 decoder_
->GetContextGroup()->set_program_cache(service_
->program_cache());
391 gles2::DisallowedFeatures disallowed_features
;
392 disallowed_features
.gpu_memory_manager
= true;
393 if (!decoder_
->Initialize(surface_
,
399 LOG(ERROR
) << "Could not initialize decoder.";
400 DestroyOnGpuThread();
403 *params
.capabilities
= decoder_
->GetCapabilities();
405 if (!params
.is_offscreen
) {
406 decoder_
->SetResizeCallback(base::Bind(
407 &InProcessCommandBuffer::OnResizeView
, gpu_thread_weak_ptr_
));
409 decoder_
->SetWaitSyncPointCallback(
410 base::Bind(&InProcessCommandBuffer::WaitSyncPointOnGpuThread
,
411 base::Unretained(this)));
413 image_factory_
= params
.image_factory
;
418 void InProcessCommandBuffer::Destroy() {
419 CheckSequencedThread();
421 base::WaitableEvent
completion(true, false);
423 base::Callback
<bool(void)> destroy_task
= base::Bind(
424 &InProcessCommandBuffer::DestroyOnGpuThread
, base::Unretained(this));
426 base::Bind(&RunTaskWithResult
<bool>, destroy_task
, &result
, &completion
));
430 bool InProcessCommandBuffer::DestroyOnGpuThread() {
431 CheckSequencedThread();
432 gpu_thread_weak_ptr_factory_
.InvalidateWeakPtrs();
433 command_buffer_
.reset();
434 // Clean up GL resources if possible.
435 bool have_context
= context_
.get() && context_
->MakeCurrent(surface_
.get());
437 decoder_
->Destroy(have_context
);
442 gl_share_group_
= NULL
;
443 #if defined(OS_ANDROID)
444 stream_texture_manager_
.reset();
450 void InProcessCommandBuffer::CheckSequencedThread() {
451 DCHECK(!sequence_checker_
||
452 sequence_checker_
->CalledOnValidSequencedThread());
455 void InProcessCommandBuffer::OnContextLost() {
456 CheckSequencedThread();
457 if (!context_lost_callback_
.is_null()) {
458 context_lost_callback_
.Run();
459 context_lost_callback_
.Reset();
462 context_lost_
= true;
465 CommandBuffer::State
InProcessCommandBuffer::GetStateFast() {
466 CheckSequencedThread();
467 base::AutoLock
lock(state_after_last_flush_lock_
);
468 if (state_after_last_flush_
.generation
- last_state_
.generation
< 0x80000000U
)
469 last_state_
= state_after_last_flush_
;
473 CommandBuffer::State
InProcessCommandBuffer::GetLastState() {
474 CheckSequencedThread();
478 int32
InProcessCommandBuffer::GetLastToken() {
479 CheckSequencedThread();
481 return last_state_
.token
;
484 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset
) {
485 CheckSequencedThread();
486 ScopedEvent
handle_flush(&flush_event_
);
487 base::AutoLock
lock(command_buffer_lock_
);
488 command_buffer_
->Flush(put_offset
);
490 // Update state before signaling the flush event.
491 base::AutoLock
lock(state_after_last_flush_lock_
);
492 state_after_last_flush_
= command_buffer_
->GetLastState();
494 DCHECK((!error::IsError(state_after_last_flush_
.error
) && !context_lost_
) ||
495 (error::IsError(state_after_last_flush_
.error
) && context_lost_
));
497 // If we've processed all pending commands but still have pending queries,
498 // pump idle work until the query is passed.
499 if (put_offset
== state_after_last_flush_
.get_offset
&&
500 (gpu_scheduler_
->HasMoreIdleWork() ||
501 gpu_scheduler_
->HasPendingQueries())) {
502 ScheduleDelayedWorkOnGpuThread();
506 void InProcessCommandBuffer::PerformDelayedWork() {
507 CheckSequencedThread();
508 delayed_work_pending_
= false;
509 base::AutoLock
lock(command_buffer_lock_
);
511 gpu_scheduler_
->PerformIdleWork();
512 gpu_scheduler_
->ProcessPendingQueries();
513 if (gpu_scheduler_
->HasMoreIdleWork() ||
514 gpu_scheduler_
->HasPendingQueries()) {
515 ScheduleDelayedWorkOnGpuThread();
520 void InProcessCommandBuffer::ScheduleDelayedWorkOnGpuThread() {
521 CheckSequencedThread();
522 if (delayed_work_pending_
)
524 delayed_work_pending_
= true;
525 service_
->ScheduleDelayedWork(base::Bind(
526 &InProcessCommandBuffer::PerformDelayedWork
, gpu_thread_weak_ptr_
));
529 void InProcessCommandBuffer::Flush(int32 put_offset
) {
530 CheckSequencedThread();
531 if (last_state_
.error
!= gpu::error::kNoError
)
534 if (last_put_offset_
== put_offset
)
537 last_put_offset_
= put_offset
;
538 base::Closure task
= base::Bind(&InProcessCommandBuffer::FlushOnGpuThread
,
539 gpu_thread_weak_ptr_
,
544 void InProcessCommandBuffer::OrderingBarrier(int32 put_offset
) {
548 void InProcessCommandBuffer::WaitForTokenInRange(int32 start
, int32 end
) {
549 CheckSequencedThread();
550 while (!InRange(start
, end
, GetLastToken()) &&
551 last_state_
.error
== gpu::error::kNoError
)
555 void InProcessCommandBuffer::WaitForGetOffsetInRange(int32 start
, int32 end
) {
556 CheckSequencedThread();
559 while (!InRange(start
, end
, last_state_
.get_offset
) &&
560 last_state_
.error
== gpu::error::kNoError
) {
566 void InProcessCommandBuffer::SetGetBuffer(int32 shm_id
) {
567 CheckSequencedThread();
568 if (last_state_
.error
!= gpu::error::kNoError
)
571 base::WaitableEvent
completion(true, false);
573 base::Bind(&InProcessCommandBuffer::SetGetBufferOnGpuThread
,
574 base::Unretained(this), shm_id
, &completion
);
579 base::AutoLock
lock(state_after_last_flush_lock_
);
580 state_after_last_flush_
= command_buffer_
->GetLastState();
584 void InProcessCommandBuffer::SetGetBufferOnGpuThread(
586 base::WaitableEvent
* completion
) {
587 base::AutoLock
lock(command_buffer_lock_
);
588 command_buffer_
->SetGetBuffer(shm_id
);
589 last_put_offset_
= 0;
590 completion
->Signal();
593 scoped_refptr
<Buffer
> InProcessCommandBuffer::CreateTransferBuffer(size_t size
,
595 CheckSequencedThread();
596 base::AutoLock
lock(command_buffer_lock_
);
597 return command_buffer_
->CreateTransferBuffer(size
, id
);
600 void InProcessCommandBuffer::DestroyTransferBuffer(int32 id
) {
601 CheckSequencedThread();
603 base::Bind(&InProcessCommandBuffer::DestroyTransferBufferOnGpuThread
,
604 base::Unretained(this),
610 void InProcessCommandBuffer::DestroyTransferBufferOnGpuThread(int32 id
) {
611 base::AutoLock
lock(command_buffer_lock_
);
612 command_buffer_
->DestroyTransferBuffer(id
);
615 gpu::Capabilities
InProcessCommandBuffer::GetCapabilities() {
616 return capabilities_
;
619 int32
InProcessCommandBuffer::CreateImage(ClientBuffer buffer
,
622 unsigned internalformat
) {
623 CheckSequencedThread();
625 DCHECK(gpu_memory_buffer_manager_
);
626 gfx::GpuMemoryBuffer
* gpu_memory_buffer
=
627 gpu_memory_buffer_manager_
->GpuMemoryBufferFromClientBuffer(buffer
);
628 DCHECK(gpu_memory_buffer
);
630 int32 new_id
= next_image_id_
.GetNext();
632 DCHECK(gpu::ImageFactory::IsGpuMemoryBufferFormatSupported(
633 gpu_memory_buffer
->GetFormat(), capabilities_
));
634 DCHECK(gpu::ImageFactory::IsImageFormatCompatibleWithGpuMemoryBufferFormat(
635 internalformat
, gpu_memory_buffer
->GetFormat()));
637 // This handle is owned by the GPU thread and must be passed to it or it
638 // will leak. In otherwords, do not early out on error between here and the
639 // queuing of the CreateImage task below.
640 bool requires_sync_point
= false;
641 gfx::GpuMemoryBufferHandle handle
=
642 ShareGpuMemoryBufferToGpuThread(gpu_memory_buffer
->GetHandle(),
643 &requires_sync_point
);
645 QueueTask(base::Bind(&InProcessCommandBuffer::CreateImageOnGpuThread
,
646 base::Unretained(this),
649 gfx::Size(width
, height
),
650 gpu_memory_buffer
->GetFormat(),
653 if (requires_sync_point
) {
654 gpu_memory_buffer_manager_
->SetDestructionSyncPoint(gpu_memory_buffer
,
661 void InProcessCommandBuffer::CreateImageOnGpuThread(
663 const gfx::GpuMemoryBufferHandle
& handle
,
664 const gfx::Size
& size
,
665 gfx::BufferFormat format
,
666 uint32 internalformat
) {
670 gpu::gles2::ImageManager
* image_manager
= decoder_
->GetImageManager();
671 DCHECK(image_manager
);
672 if (image_manager
->LookupImage(id
)) {
673 LOG(ERROR
) << "Image already exists with same ID.";
677 switch (handle
.type
) {
678 case gfx::SHARED_MEMORY_BUFFER
: {
679 scoped_refptr
<gfx::GLImageSharedMemory
> image(
680 new gfx::GLImageSharedMemory(size
, internalformat
));
681 if (!image
->Initialize(handle
, format
)) {
682 LOG(ERROR
) << "Failed to initialize image.";
686 image_manager
->AddImage(image
.get(), id
);
690 if (!image_factory_
) {
691 LOG(ERROR
) << "Image factory missing but required by buffer type.";
695 // Note: this assumes that client ID is always 0.
696 const int kClientId
= 0;
698 scoped_refptr
<gfx::GLImage
> image
=
699 image_factory_
->CreateImageForGpuMemoryBuffer(
700 handle
, size
, format
, internalformat
, kClientId
);
702 LOG(ERROR
) << "Failed to create image for buffer.";
706 image_manager
->AddImage(image
.get(), id
);
712 void InProcessCommandBuffer::DestroyImage(int32 id
) {
713 CheckSequencedThread();
715 QueueTask(base::Bind(&InProcessCommandBuffer::DestroyImageOnGpuThread
,
716 base::Unretained(this),
720 void InProcessCommandBuffer::DestroyImageOnGpuThread(int32 id
) {
724 gpu::gles2::ImageManager
* image_manager
= decoder_
->GetImageManager();
725 DCHECK(image_manager
);
726 if (!image_manager
->LookupImage(id
)) {
727 LOG(ERROR
) << "Image with ID doesn't exist.";
731 image_manager
->RemoveImage(id
);
734 int32
InProcessCommandBuffer::CreateGpuMemoryBufferImage(
737 unsigned internalformat
,
739 CheckSequencedThread();
741 DCHECK(gpu_memory_buffer_manager_
);
742 scoped_ptr
<gfx::GpuMemoryBuffer
> buffer(
743 gpu_memory_buffer_manager_
->AllocateGpuMemoryBuffer(
744 gfx::Size(width
, height
),
745 gpu::ImageFactory::DefaultBufferFormatForImageFormat(internalformat
),
746 gpu::ImageFactory::ImageUsageToGpuMemoryBufferUsage(usage
)));
750 return CreateImage(buffer
->AsClientBuffer(), width
, height
, internalformat
);
753 uint32
InProcessCommandBuffer::InsertSyncPoint() {
754 uint32 sync_point
= service_
->sync_point_manager()->GenerateSyncPoint();
755 QueueTask(base::Bind(&InProcessCommandBuffer::RetireSyncPointOnGpuThread
,
756 base::Unretained(this),
761 uint32
InProcessCommandBuffer::InsertFutureSyncPoint() {
762 return service_
->sync_point_manager()->GenerateSyncPoint();
765 void InProcessCommandBuffer::RetireSyncPoint(uint32 sync_point
) {
766 QueueTask(base::Bind(&InProcessCommandBuffer::RetireSyncPointOnGpuThread
,
767 base::Unretained(this),
771 void InProcessCommandBuffer::RetireSyncPointOnGpuThread(uint32 sync_point
) {
772 gles2::MailboxManager
* mailbox_manager
=
773 decoder_
->GetContextGroup()->mailbox_manager();
774 if (mailbox_manager
->UsesSync()) {
775 bool make_current_success
= false;
777 base::AutoLock
lock(command_buffer_lock_
);
778 make_current_success
= MakeCurrent();
780 if (make_current_success
)
781 mailbox_manager
->PushTextureUpdates(sync_point
);
783 service_
->sync_point_manager()->RetireSyncPoint(sync_point
);
786 void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point
,
787 const base::Closure
& callback
) {
788 CheckSequencedThread();
789 QueueTask(base::Bind(&InProcessCommandBuffer::SignalSyncPointOnGpuThread
,
790 base::Unretained(this),
792 WrapCallback(callback
)));
795 bool InProcessCommandBuffer::WaitSyncPointOnGpuThread(unsigned sync_point
) {
796 service_
->sync_point_manager()->WaitSyncPoint(sync_point
);
797 gles2::MailboxManager
* mailbox_manager
=
798 decoder_
->GetContextGroup()->mailbox_manager();
799 mailbox_manager
->PullTextureUpdates(sync_point
);
803 void InProcessCommandBuffer::SignalSyncPointOnGpuThread(
805 const base::Closure
& callback
) {
806 service_
->sync_point_manager()->AddSyncPointCallback(sync_point
, callback
);
809 void InProcessCommandBuffer::SignalQuery(unsigned query_id
,
810 const base::Closure
& callback
) {
811 CheckSequencedThread();
812 QueueTask(base::Bind(&InProcessCommandBuffer::SignalQueryOnGpuThread
,
813 base::Unretained(this),
815 WrapCallback(callback
)));
818 void InProcessCommandBuffer::SignalQueryOnGpuThread(
820 const base::Closure
& callback
) {
821 gles2::QueryManager
* query_manager_
= decoder_
->GetQueryManager();
822 DCHECK(query_manager_
);
824 gles2::QueryManager::Query
* query
= query_manager_
->GetQuery(query_id
);
828 query
->AddCallback(callback
);
831 void InProcessCommandBuffer::SetSurfaceVisible(bool visible
) {}
833 uint32
InProcessCommandBuffer::CreateStreamTexture(uint32 texture_id
) {
834 base::WaitableEvent
completion(true, false);
835 uint32 stream_id
= 0;
836 base::Callback
<uint32(void)> task
=
837 base::Bind(&InProcessCommandBuffer::CreateStreamTextureOnGpuThread
,
838 base::Unretained(this),
841 base::Bind(&RunTaskWithResult
<uint32
>, task
, &stream_id
, &completion
));
846 void InProcessCommandBuffer::SetLock(base::Lock
*) {
849 bool InProcessCommandBuffer::IsGpuChannelLost() {
850 // There is no such channel to lose for in-process contexts. This only
851 // makes sense for out-of-process command buffers.
855 CommandBufferNamespace
InProcessCommandBuffer::GetNamespaceID() const {
856 return CommandBufferNamespace::IN_PROCESS
;
859 uint64_t InProcessCommandBuffer::GetCommandBufferID() const {
860 return command_buffer_id_
;
863 uint32
InProcessCommandBuffer::CreateStreamTextureOnGpuThread(
864 uint32 client_texture_id
) {
865 #if defined(OS_ANDROID)
866 return stream_texture_manager_
->CreateStreamTexture(
867 client_texture_id
, decoder_
->GetContextGroup()->texture_manager());
873 gpu::error::Error
InProcessCommandBuffer::GetLastError() {
874 CheckSequencedThread();
875 return last_state_
.error
;
878 bool InProcessCommandBuffer::Initialize() {
886 const scoped_refptr
<base::SingleThreadTaskRunner
>& task_runner
,
887 const base::Closure
& callback
) {
888 // The task_runner.get() check is to support using InProcessCommandBuffer on
889 // a thread without a message loop.
890 if (task_runner
.get() && !task_runner
->BelongsToCurrentThread()) {
891 task_runner
->PostTask(FROM_HERE
, callback
);
897 void RunOnTargetThread(scoped_ptr
<base::Closure
> callback
) {
898 DCHECK(callback
.get());
902 } // anonymous namespace
904 base::Closure
InProcessCommandBuffer::WrapCallback(
905 const base::Closure
& callback
) {
906 // Make sure the callback gets deleted on the target thread by passing
908 scoped_ptr
<base::Closure
> scoped_callback(new base::Closure(callback
));
909 base::Closure callback_on_client_thread
=
910 base::Bind(&RunOnTargetThread
, base::Passed(&scoped_callback
));
911 base::Closure wrapped_callback
=
912 base::Bind(&PostCallback
, base::ThreadTaskRunnerHandle::IsSet()
913 ? base::ThreadTaskRunnerHandle::Get()
915 callback_on_client_thread
);
916 return wrapped_callback
;
919 #if defined(OS_ANDROID)
920 scoped_refptr
<gfx::SurfaceTexture
>
921 InProcessCommandBuffer::GetSurfaceTexture(uint32 stream_id
) {
922 DCHECK(stream_texture_manager_
);
923 return stream_texture_manager_
->GetSurfaceTexture(stream_id
);
927 GpuInProcessThread::GpuInProcessThread(SyncPointManager
* sync_point_manager
)
928 : base::Thread("GpuThread"), sync_point_manager_(sync_point_manager
) {
932 GpuInProcessThread::~GpuInProcessThread() {
936 void GpuInProcessThread::AddRef() const {
937 base::RefCountedThreadSafe
<GpuInProcessThread
>::AddRef();
939 void GpuInProcessThread::Release() const {
940 base::RefCountedThreadSafe
<GpuInProcessThread
>::Release();
943 void GpuInProcessThread::ScheduleTask(const base::Closure
& task
) {
944 task_runner()->PostTask(FROM_HERE
, task
);
947 void GpuInProcessThread::ScheduleDelayedWork(const base::Closure
& callback
) {
948 // Match delay with GpuCommandBufferStub.
949 task_runner()->PostDelayedTask(FROM_HERE
, callback
,
950 base::TimeDelta::FromMilliseconds(2));
953 bool GpuInProcessThread::UseVirtualizedGLContexts() {
957 scoped_refptr
<gles2::ShaderTranslatorCache
>
958 GpuInProcessThread::shader_translator_cache() {
959 if (!shader_translator_cache_
.get())
960 shader_translator_cache_
= new gpu::gles2::ShaderTranslatorCache
;
961 return shader_translator_cache_
;
964 scoped_refptr
<gles2::FramebufferCompletenessCache
>
965 GpuInProcessThread::framebuffer_completeness_cache() {
966 if (!framebuffer_completeness_cache_
.get())
967 framebuffer_completeness_cache_
=
968 new gpu::gles2::FramebufferCompletenessCache
;
969 return framebuffer_completeness_cache_
;
972 SyncPointManager
* GpuInProcessThread::sync_point_manager() {
973 return sync_point_manager_
;