Don't show supervised user as "already on this device" while they're being imported.
[chromium-blink-merge.git] / gpu / command_buffer / service / in_process_command_buffer.cc
blobc5e53541d9d45587ad24f526d7f9e2e18aeb82f2
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "gpu/command_buffer/service/in_process_command_buffer.h"
7 #include <queue>
8 #include <set>
9 #include <utility>
11 #include "base/bind.h"
12 #include "base/bind_helpers.h"
13 #include "base/command_line.h"
14 #include "base/lazy_instance.h"
15 #include "base/location.h"
16 #include "base/logging.h"
17 #include "base/memory/weak_ptr.h"
18 #include "base/sequence_checker.h"
19 #include "base/single_thread_task_runner.h"
20 #include "base/synchronization/condition_variable.h"
21 #include "base/thread_task_runner_handle.h"
22 #include "base/threading/thread.h"
23 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
24 #include "gpu/command_buffer/common/value_state.h"
25 #include "gpu/command_buffer/service/command_buffer_service.h"
26 #include "gpu/command_buffer/service/context_group.h"
27 #include "gpu/command_buffer/service/gl_context_virtual.h"
28 #include "gpu/command_buffer/service/gpu_scheduler.h"
29 #include "gpu/command_buffer/service/gpu_switches.h"
30 #include "gpu/command_buffer/service/image_factory.h"
31 #include "gpu/command_buffer/service/image_manager.h"
32 #include "gpu/command_buffer/service/mailbox_manager_impl.h"
33 #include "gpu/command_buffer/service/mailbox_manager_sync.h"
34 #include "gpu/command_buffer/service/memory_tracking.h"
35 #include "gpu/command_buffer/service/query_manager.h"
36 #include "gpu/command_buffer/service/sync_point_manager.h"
37 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
38 #include "gpu/command_buffer/service/valuebuffer_manager.h"
39 #include "ui/gfx/geometry/size.h"
40 #include "ui/gl/gl_context.h"
41 #include "ui/gl/gl_image.h"
42 #include "ui/gl/gl_image_shared_memory.h"
43 #include "ui/gl/gl_share_group.h"
45 #if defined(OS_ANDROID)
46 #include "gpu/command_buffer/service/stream_texture_manager_in_process_android.h"
47 #include "ui/gl/android/surface_texture.h"
48 #endif
50 #if defined(OS_WIN)
51 #include <windows.h>
52 #include "base/process/process_handle.h"
53 #endif
55 namespace gpu {
57 namespace {
59 template <typename T>
60 static void RunTaskWithResult(base::Callback<T(void)> task,
61 T* result,
62 base::WaitableEvent* completion) {
63 *result = task.Run();
64 completion->Signal();
67 class GpuInProcessThread
68 : public base::Thread,
69 public InProcessCommandBuffer::Service,
70 public base::RefCountedThreadSafe<GpuInProcessThread> {
71 public:
72 GpuInProcessThread();
74 void AddRef() const override {
75 base::RefCountedThreadSafe<GpuInProcessThread>::AddRef();
77 void Release() const override {
78 base::RefCountedThreadSafe<GpuInProcessThread>::Release();
81 void ScheduleTask(const base::Closure& task) override;
82 void ScheduleIdleWork(const base::Closure& callback) override;
83 bool UseVirtualizedGLContexts() override { return false; }
84 scoped_refptr<gles2::ShaderTranslatorCache> shader_translator_cache()
85 override;
87 private:
88 ~GpuInProcessThread() override;
89 friend class base::RefCountedThreadSafe<GpuInProcessThread>;
91 scoped_refptr<gpu::gles2::ShaderTranslatorCache> shader_translator_cache_;
92 DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread);
95 GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") {
96 Start();
99 GpuInProcessThread::~GpuInProcessThread() {
100 Stop();
103 void GpuInProcessThread::ScheduleTask(const base::Closure& task) {
104 task_runner()->PostTask(FROM_HERE, task);
107 void GpuInProcessThread::ScheduleIdleWork(const base::Closure& callback) {
108 // Match delay with GpuCommandBufferStub.
109 task_runner()->PostDelayedTask(FROM_HERE, callback,
110 base::TimeDelta::FromMilliseconds(2));
113 scoped_refptr<gles2::ShaderTranslatorCache>
114 GpuInProcessThread::shader_translator_cache() {
115 if (!shader_translator_cache_.get())
116 shader_translator_cache_ = new gpu::gles2::ShaderTranslatorCache;
117 return shader_translator_cache_;
120 struct GpuInProcessThreadHolder {
121 GpuInProcessThreadHolder() : gpu_thread(new GpuInProcessThread) {}
122 scoped_refptr<InProcessCommandBuffer::Service> gpu_thread;
125 base::LazyInstance<GpuInProcessThreadHolder> g_default_service =
126 LAZY_INSTANCE_INITIALIZER;
128 class ScopedEvent {
129 public:
130 explicit ScopedEvent(base::WaitableEvent* event) : event_(event) {}
131 ~ScopedEvent() { event_->Signal(); }
133 private:
134 base::WaitableEvent* event_;
137 // This wrapper adds the WaitSyncPoint which allows waiting on a sync point
138 // on the service thread, implemented using a condition variable.
139 class SyncPointManagerWrapper {
140 public:
141 SyncPointManagerWrapper();
143 uint32 GenerateSyncPoint();
144 void RetireSyncPoint(uint32 sync_point);
145 void AddSyncPointCallback(uint32 sync_point, const base::Closure& callback);
147 void WaitSyncPoint(uint32 sync_point);
149 private:
150 void OnSyncPointRetired();
152 const scoped_refptr<SyncPointManager> manager_;
153 base::Lock retire_lock_;
154 base::ConditionVariable retire_cond_var_;
156 DISALLOW_COPY_AND_ASSIGN(SyncPointManagerWrapper);
159 SyncPointManagerWrapper::SyncPointManagerWrapper()
160 : manager_(SyncPointManager::Create(true)),
161 retire_cond_var_(&retire_lock_) {
164 uint32 SyncPointManagerWrapper::GenerateSyncPoint() {
165 uint32 sync_point = manager_->GenerateSyncPoint();
166 manager_->AddSyncPointCallback(
167 sync_point, base::Bind(&SyncPointManagerWrapper::OnSyncPointRetired,
168 base::Unretained(this)));
169 return sync_point;
172 void SyncPointManagerWrapper::RetireSyncPoint(uint32 sync_point) {
173 manager_->RetireSyncPoint(sync_point);
176 void SyncPointManagerWrapper::AddSyncPointCallback(
177 uint32 sync_point,
178 const base::Closure& callback) {
179 manager_->AddSyncPointCallback(sync_point, callback);
182 void SyncPointManagerWrapper::WaitSyncPoint(uint32 sync_point) {
183 base::AutoLock lock(retire_lock_);
184 while (!manager_->IsSyncPointRetired(sync_point)) {
185 retire_cond_var_.Wait();
189 void SyncPointManagerWrapper::OnSyncPointRetired() {
190 base::AutoLock lock(retire_lock_);
191 retire_cond_var_.Broadcast();
194 base::LazyInstance<SyncPointManagerWrapper> g_sync_point_manager =
195 LAZY_INSTANCE_INITIALIZER;
197 base::SharedMemoryHandle ShareToGpuThread(
198 base::SharedMemoryHandle source_handle) {
199 #if defined(OS_WIN)
200 // Windows needs to explicitly duplicate the handle to current process.
201 base::SharedMemoryHandle target_handle;
202 if (!DuplicateHandle(GetCurrentProcess(),
203 source_handle,
204 GetCurrentProcess(),
205 &target_handle,
206 FILE_GENERIC_READ | FILE_GENERIC_WRITE,
207 FALSE,
208 0)) {
209 return base::SharedMemory::NULLHandle();
212 return target_handle;
213 #else
214 int duped_handle = HANDLE_EINTR(dup(source_handle.fd));
215 if (duped_handle < 0)
216 return base::SharedMemory::NULLHandle();
218 return base::FileDescriptor(duped_handle, true);
219 #endif
222 gfx::GpuMemoryBufferHandle ShareGpuMemoryBufferToGpuThread(
223 const gfx::GpuMemoryBufferHandle& source_handle,
224 bool* requires_sync_point) {
225 switch (source_handle.type) {
226 case gfx::SHARED_MEMORY_BUFFER: {
227 gfx::GpuMemoryBufferHandle handle;
228 handle.type = gfx::SHARED_MEMORY_BUFFER;
229 handle.handle = ShareToGpuThread(source_handle.handle);
230 *requires_sync_point = false;
231 return handle;
233 case gfx::IO_SURFACE_BUFFER:
234 case gfx::SURFACE_TEXTURE_BUFFER:
235 case gfx::OZONE_NATIVE_BUFFER:
236 *requires_sync_point = true;
237 return source_handle;
238 default:
239 NOTREACHED();
240 return gfx::GpuMemoryBufferHandle();
244 } // anonyous namespace
246 InProcessCommandBuffer::Service::Service() {}
248 InProcessCommandBuffer::Service::~Service() {}
250 scoped_refptr<gfx::GLShareGroup>
251 InProcessCommandBuffer::Service::share_group() {
252 if (!share_group_.get())
253 share_group_ = new gfx::GLShareGroup;
254 return share_group_;
257 scoped_refptr<gles2::MailboxManager>
258 InProcessCommandBuffer::Service::mailbox_manager() {
259 if (!mailbox_manager_.get()) {
260 if (base::CommandLine::ForCurrentProcess()->HasSwitch(
261 switches::kEnableThreadedTextureMailboxes)) {
262 mailbox_manager_ = new gles2::MailboxManagerSync();
263 } else {
264 mailbox_manager_ = new gles2::MailboxManagerImpl();
267 return mailbox_manager_;
270 scoped_refptr<gles2::SubscriptionRefSet>
271 InProcessCommandBuffer::Service::subscription_ref_set() {
272 if (!subscription_ref_set_.get()) {
273 subscription_ref_set_ = new gles2::SubscriptionRefSet();
275 return subscription_ref_set_;
278 scoped_refptr<ValueStateMap>
279 InProcessCommandBuffer::Service::pending_valuebuffer_state() {
280 if (!pending_valuebuffer_state_.get()) {
281 pending_valuebuffer_state_ = new ValueStateMap();
283 return pending_valuebuffer_state_;
286 InProcessCommandBuffer::InProcessCommandBuffer(
287 const scoped_refptr<Service>& service)
288 : context_lost_(false),
289 idle_work_pending_(false),
290 image_factory_(nullptr),
291 last_put_offset_(-1),
292 gpu_memory_buffer_manager_(nullptr),
293 flush_event_(false, false),
294 service_(service.get() ? service : g_default_service.Get().gpu_thread),
295 gpu_thread_weak_ptr_factory_(this) {
296 DCHECK(service_.get());
297 next_image_id_.GetNext();
300 InProcessCommandBuffer::~InProcessCommandBuffer() {
301 Destroy();
304 void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) {
305 CheckSequencedThread();
306 DCHECK(!surface_->IsOffscreen());
307 surface_->Resize(size);
310 bool InProcessCommandBuffer::MakeCurrent() {
311 CheckSequencedThread();
312 command_buffer_lock_.AssertAcquired();
314 if (!context_lost_ && decoder_->MakeCurrent())
315 return true;
316 DLOG(ERROR) << "Context lost because MakeCurrent failed.";
317 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
318 command_buffer_->SetParseError(gpu::error::kLostContext);
319 return false;
322 void InProcessCommandBuffer::PumpCommands() {
323 CheckSequencedThread();
324 command_buffer_lock_.AssertAcquired();
326 if (!MakeCurrent())
327 return;
329 gpu_scheduler_->PutChanged();
332 bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id) {
333 CheckSequencedThread();
334 command_buffer_lock_.AssertAcquired();
335 command_buffer_->SetGetBuffer(transfer_buffer_id);
336 return true;
339 bool InProcessCommandBuffer::Initialize(
340 scoped_refptr<gfx::GLSurface> surface,
341 bool is_offscreen,
342 gfx::AcceleratedWidget window,
343 const gfx::Size& size,
344 const std::vector<int32>& attribs,
345 gfx::GpuPreference gpu_preference,
346 const base::Closure& context_lost_callback,
347 InProcessCommandBuffer* share_group,
348 GpuMemoryBufferManager* gpu_memory_buffer_manager,
349 ImageFactory* image_factory) {
350 DCHECK(!share_group || service_.get() == share_group->service_.get());
351 context_lost_callback_ = WrapCallback(context_lost_callback);
353 if (surface.get()) {
354 // GPU thread must be the same as client thread due to GLSurface not being
355 // thread safe.
356 sequence_checker_.reset(new base::SequenceChecker);
357 surface_ = surface;
360 gpu::Capabilities capabilities;
361 InitializeOnGpuThreadParams params(is_offscreen,
362 window,
363 size,
364 attribs,
365 gpu_preference,
366 &capabilities,
367 share_group,
368 image_factory);
370 base::Callback<bool(void)> init_task =
371 base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread,
372 base::Unretained(this),
373 params);
375 base::WaitableEvent completion(true, false);
376 bool result = false;
377 QueueTask(
378 base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion));
379 completion.Wait();
381 gpu_memory_buffer_manager_ = gpu_memory_buffer_manager;
383 if (result) {
384 capabilities_ = capabilities;
385 capabilities_.image = capabilities_.image && gpu_memory_buffer_manager_;
388 return result;
391 bool InProcessCommandBuffer::InitializeOnGpuThread(
392 const InitializeOnGpuThreadParams& params) {
393 CheckSequencedThread();
394 gpu_thread_weak_ptr_ = gpu_thread_weak_ptr_factory_.GetWeakPtr();
396 DCHECK(params.size.width() >= 0 && params.size.height() >= 0);
398 TransferBufferManager* manager = new TransferBufferManager();
399 transfer_buffer_manager_.reset(manager);
400 manager->Initialize();
402 scoped_ptr<CommandBufferService> command_buffer(
403 new CommandBufferService(transfer_buffer_manager_.get()));
404 command_buffer->SetPutOffsetChangeCallback(base::Bind(
405 &InProcessCommandBuffer::PumpCommands, gpu_thread_weak_ptr_));
406 command_buffer->SetParseErrorCallback(base::Bind(
407 &InProcessCommandBuffer::OnContextLost, gpu_thread_weak_ptr_));
409 if (!command_buffer->Initialize()) {
410 LOG(ERROR) << "Could not initialize command buffer.";
411 DestroyOnGpuThread();
412 return false;
415 gl_share_group_ = params.context_group
416 ? params.context_group->gl_share_group_
417 : service_->share_group();
419 #if defined(OS_ANDROID)
420 stream_texture_manager_.reset(new StreamTextureManagerInProcess);
421 #endif
423 bool bind_generates_resource = false;
424 decoder_.reset(gles2::GLES2Decoder::Create(
425 params.context_group
426 ? params.context_group->decoder_->GetContextGroup()
427 : new gles2::ContextGroup(service_->mailbox_manager(),
428 NULL,
429 service_->shader_translator_cache(),
430 NULL,
431 service_->subscription_ref_set(),
432 service_->pending_valuebuffer_state(),
433 bind_generates_resource)));
435 gpu_scheduler_.reset(
436 new GpuScheduler(command_buffer.get(), decoder_.get(), decoder_.get()));
437 command_buffer->SetGetBufferChangeCallback(base::Bind(
438 &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
439 command_buffer_ = command_buffer.Pass();
441 decoder_->set_engine(gpu_scheduler_.get());
443 if (!surface_.get()) {
444 if (params.is_offscreen)
445 surface_ = gfx::GLSurface::CreateOffscreenGLSurface(params.size);
446 else
447 surface_ = gfx::GLSurface::CreateViewGLSurface(params.window);
450 if (!surface_.get()) {
451 LOG(ERROR) << "Could not create GLSurface.";
452 DestroyOnGpuThread();
453 return false;
456 if (service_->UseVirtualizedGLContexts() ||
457 decoder_->GetContextGroup()
458 ->feature_info()
459 ->workarounds()
460 .use_virtualized_gl_contexts) {
461 context_ = gl_share_group_->GetSharedContext();
462 if (!context_.get()) {
463 context_ = gfx::GLContext::CreateGLContext(
464 gl_share_group_.get(), surface_.get(), params.gpu_preference);
465 gl_share_group_->SetSharedContext(context_.get());
468 context_ = new GLContextVirtual(
469 gl_share_group_.get(), context_.get(), decoder_->AsWeakPtr());
470 if (context_->Initialize(surface_.get(), params.gpu_preference)) {
471 VLOG(1) << "Created virtual GL context.";
472 } else {
473 context_ = NULL;
475 } else {
476 context_ = gfx::GLContext::CreateGLContext(
477 gl_share_group_.get(), surface_.get(), params.gpu_preference);
480 if (!context_.get()) {
481 LOG(ERROR) << "Could not create GLContext.";
482 DestroyOnGpuThread();
483 return false;
486 if (!context_->MakeCurrent(surface_.get())) {
487 LOG(ERROR) << "Could not make context current.";
488 DestroyOnGpuThread();
489 return false;
492 gles2::DisallowedFeatures disallowed_features;
493 disallowed_features.gpu_memory_manager = true;
494 if (!decoder_->Initialize(surface_,
495 context_,
496 params.is_offscreen,
497 params.size,
498 disallowed_features,
499 params.attribs)) {
500 LOG(ERROR) << "Could not initialize decoder.";
501 DestroyOnGpuThread();
502 return false;
504 *params.capabilities = decoder_->GetCapabilities();
506 if (!params.is_offscreen) {
507 decoder_->SetResizeCallback(base::Bind(
508 &InProcessCommandBuffer::OnResizeView, gpu_thread_weak_ptr_));
510 decoder_->SetWaitSyncPointCallback(
511 base::Bind(&InProcessCommandBuffer::WaitSyncPointOnGpuThread,
512 base::Unretained(this)));
514 image_factory_ = params.image_factory;
516 return true;
519 void InProcessCommandBuffer::Destroy() {
520 CheckSequencedThread();
522 base::WaitableEvent completion(true, false);
523 bool result = false;
524 base::Callback<bool(void)> destroy_task = base::Bind(
525 &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this));
526 QueueTask(
527 base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion));
528 completion.Wait();
531 bool InProcessCommandBuffer::DestroyOnGpuThread() {
532 CheckSequencedThread();
533 gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs();
534 command_buffer_.reset();
535 // Clean up GL resources if possible.
536 bool have_context = context_.get() && context_->MakeCurrent(surface_.get());
537 if (decoder_) {
538 decoder_->Destroy(have_context);
539 decoder_.reset();
541 context_ = NULL;
542 surface_ = NULL;
543 gl_share_group_ = NULL;
544 #if defined(OS_ANDROID)
545 stream_texture_manager_.reset();
546 #endif
548 return true;
551 void InProcessCommandBuffer::CheckSequencedThread() {
552 DCHECK(!sequence_checker_ ||
553 sequence_checker_->CalledOnValidSequencedThread());
556 void InProcessCommandBuffer::OnContextLost() {
557 CheckSequencedThread();
558 if (!context_lost_callback_.is_null()) {
559 context_lost_callback_.Run();
560 context_lost_callback_.Reset();
563 context_lost_ = true;
566 CommandBuffer::State InProcessCommandBuffer::GetStateFast() {
567 CheckSequencedThread();
568 base::AutoLock lock(state_after_last_flush_lock_);
569 if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U)
570 last_state_ = state_after_last_flush_;
571 return last_state_;
574 CommandBuffer::State InProcessCommandBuffer::GetLastState() {
575 CheckSequencedThread();
576 return last_state_;
579 int32 InProcessCommandBuffer::GetLastToken() {
580 CheckSequencedThread();
581 GetStateFast();
582 return last_state_.token;
585 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) {
586 CheckSequencedThread();
587 ScopedEvent handle_flush(&flush_event_);
588 base::AutoLock lock(command_buffer_lock_);
589 command_buffer_->Flush(put_offset);
591 // Update state before signaling the flush event.
592 base::AutoLock lock(state_after_last_flush_lock_);
593 state_after_last_flush_ = command_buffer_->GetLastState();
595 DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) ||
596 (error::IsError(state_after_last_flush_.error) && context_lost_));
598 // If we've processed all pending commands but still have pending queries,
599 // pump idle work until the query is passed.
600 if (put_offset == state_after_last_flush_.get_offset &&
601 gpu_scheduler_->HasMoreWork()) {
602 ScheduleIdleWorkOnGpuThread();
606 void InProcessCommandBuffer::PerformIdleWork() {
607 CheckSequencedThread();
608 idle_work_pending_ = false;
609 base::AutoLock lock(command_buffer_lock_);
610 if (MakeCurrent() && gpu_scheduler_->HasMoreWork()) {
611 gpu_scheduler_->PerformIdleWork();
612 ScheduleIdleWorkOnGpuThread();
616 void InProcessCommandBuffer::ScheduleIdleWorkOnGpuThread() {
617 CheckSequencedThread();
618 if (idle_work_pending_)
619 return;
620 idle_work_pending_ = true;
621 service_->ScheduleIdleWork(
622 base::Bind(&InProcessCommandBuffer::PerformIdleWork,
623 gpu_thread_weak_ptr_));
626 void InProcessCommandBuffer::Flush(int32 put_offset) {
627 CheckSequencedThread();
628 if (last_state_.error != gpu::error::kNoError)
629 return;
631 if (last_put_offset_ == put_offset)
632 return;
634 last_put_offset_ = put_offset;
635 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
636 gpu_thread_weak_ptr_,
637 put_offset);
638 QueueTask(task);
641 void InProcessCommandBuffer::OrderingBarrier(int32 put_offset) {
642 Flush(put_offset);
645 void InProcessCommandBuffer::WaitForTokenInRange(int32 start, int32 end) {
646 CheckSequencedThread();
647 while (!InRange(start, end, GetLastToken()) &&
648 last_state_.error == gpu::error::kNoError)
649 flush_event_.Wait();
652 void InProcessCommandBuffer::WaitForGetOffsetInRange(int32 start, int32 end) {
653 CheckSequencedThread();
655 GetStateFast();
656 while (!InRange(start, end, last_state_.get_offset) &&
657 last_state_.error == gpu::error::kNoError) {
658 flush_event_.Wait();
659 GetStateFast();
663 void InProcessCommandBuffer::SetGetBuffer(int32 shm_id) {
664 CheckSequencedThread();
665 if (last_state_.error != gpu::error::kNoError)
666 return;
669 base::AutoLock lock(command_buffer_lock_);
670 command_buffer_->SetGetBuffer(shm_id);
671 last_put_offset_ = 0;
674 base::AutoLock lock(state_after_last_flush_lock_);
675 state_after_last_flush_ = command_buffer_->GetLastState();
679 scoped_refptr<Buffer> InProcessCommandBuffer::CreateTransferBuffer(size_t size,
680 int32* id) {
681 CheckSequencedThread();
682 base::AutoLock lock(command_buffer_lock_);
683 return command_buffer_->CreateTransferBuffer(size, id);
686 void InProcessCommandBuffer::DestroyTransferBuffer(int32 id) {
687 CheckSequencedThread();
688 base::Closure task =
689 base::Bind(&InProcessCommandBuffer::DestroyTransferBufferOnGpuThread,
690 base::Unretained(this),
691 id);
693 QueueTask(task);
696 void InProcessCommandBuffer::DestroyTransferBufferOnGpuThread(int32 id) {
697 base::AutoLock lock(command_buffer_lock_);
698 command_buffer_->DestroyTransferBuffer(id);
701 gpu::Capabilities InProcessCommandBuffer::GetCapabilities() {
702 return capabilities_;
705 int32 InProcessCommandBuffer::CreateImage(ClientBuffer buffer,
706 size_t width,
707 size_t height,
708 unsigned internalformat) {
709 CheckSequencedThread();
711 DCHECK(gpu_memory_buffer_manager_);
712 gfx::GpuMemoryBuffer* gpu_memory_buffer =
713 gpu_memory_buffer_manager_->GpuMemoryBufferFromClientBuffer(buffer);
714 DCHECK(gpu_memory_buffer);
716 int32 new_id = next_image_id_.GetNext();
718 DCHECK(gpu::ImageFactory::IsGpuMemoryBufferFormatSupported(
719 gpu_memory_buffer->GetFormat(), capabilities_));
720 DCHECK(gpu::ImageFactory::IsImageFormatCompatibleWithGpuMemoryBufferFormat(
721 internalformat, gpu_memory_buffer->GetFormat()));
723 // This handle is owned by the GPU thread and must be passed to it or it
724 // will leak. In otherwords, do not early out on error between here and the
725 // queuing of the CreateImage task below.
726 bool requires_sync_point = false;
727 gfx::GpuMemoryBufferHandle handle =
728 ShareGpuMemoryBufferToGpuThread(gpu_memory_buffer->GetHandle(),
729 &requires_sync_point);
731 QueueTask(base::Bind(&InProcessCommandBuffer::CreateImageOnGpuThread,
732 base::Unretained(this),
733 new_id,
734 handle,
735 gfx::Size(width, height),
736 gpu_memory_buffer->GetFormat(),
737 internalformat));
739 if (requires_sync_point) {
740 gpu_memory_buffer_manager_->SetDestructionSyncPoint(gpu_memory_buffer,
741 InsertSyncPoint());
744 return new_id;
747 void InProcessCommandBuffer::CreateImageOnGpuThread(
748 int32 id,
749 const gfx::GpuMemoryBufferHandle& handle,
750 const gfx::Size& size,
751 gfx::GpuMemoryBuffer::Format format,
752 uint32 internalformat) {
753 if (!decoder_)
754 return;
756 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
757 DCHECK(image_manager);
758 if (image_manager->LookupImage(id)) {
759 LOG(ERROR) << "Image already exists with same ID.";
760 return;
763 switch (handle.type) {
764 case gfx::SHARED_MEMORY_BUFFER: {
765 scoped_refptr<gfx::GLImageSharedMemory> image(
766 new gfx::GLImageSharedMemory(size, internalformat));
767 if (!image->Initialize(handle, format)) {
768 LOG(ERROR) << "Failed to initialize image.";
769 return;
772 image_manager->AddImage(image.get(), id);
773 break;
775 default: {
776 if (!image_factory_) {
777 LOG(ERROR) << "Image factory missing but required by buffer type.";
778 return;
781 // Note: this assumes that client ID is always 0.
782 const int kClientId = 0;
784 scoped_refptr<gfx::GLImage> image =
785 image_factory_->CreateImageForGpuMemoryBuffer(
786 handle, size, format, internalformat, kClientId);
787 if (!image.get()) {
788 LOG(ERROR) << "Failed to create image for buffer.";
789 return;
792 image_manager->AddImage(image.get(), id);
793 break;
798 void InProcessCommandBuffer::DestroyImage(int32 id) {
799 CheckSequencedThread();
801 QueueTask(base::Bind(&InProcessCommandBuffer::DestroyImageOnGpuThread,
802 base::Unretained(this),
803 id));
806 void InProcessCommandBuffer::DestroyImageOnGpuThread(int32 id) {
807 if (!decoder_)
808 return;
810 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
811 DCHECK(image_manager);
812 if (!image_manager->LookupImage(id)) {
813 LOG(ERROR) << "Image with ID doesn't exist.";
814 return;
817 image_manager->RemoveImage(id);
820 int32 InProcessCommandBuffer::CreateGpuMemoryBufferImage(
821 size_t width,
822 size_t height,
823 unsigned internalformat,
824 unsigned usage) {
825 CheckSequencedThread();
827 DCHECK(gpu_memory_buffer_manager_);
828 scoped_ptr<gfx::GpuMemoryBuffer> buffer(
829 gpu_memory_buffer_manager_->AllocateGpuMemoryBuffer(
830 gfx::Size(width, height),
831 gpu::ImageFactory::ImageFormatToGpuMemoryBufferFormat(internalformat),
832 gpu::ImageFactory::ImageUsageToGpuMemoryBufferUsage(usage)));
833 if (!buffer)
834 return -1;
836 return CreateImage(buffer->AsClientBuffer(), width, height, internalformat);
839 uint32 InProcessCommandBuffer::InsertSyncPoint() {
840 uint32 sync_point = g_sync_point_manager.Get().GenerateSyncPoint();
841 QueueTask(base::Bind(&InProcessCommandBuffer::RetireSyncPointOnGpuThread,
842 base::Unretained(this),
843 sync_point));
844 return sync_point;
847 uint32 InProcessCommandBuffer::InsertFutureSyncPoint() {
848 return g_sync_point_manager.Get().GenerateSyncPoint();
851 void InProcessCommandBuffer::RetireSyncPoint(uint32 sync_point) {
852 QueueTask(base::Bind(&InProcessCommandBuffer::RetireSyncPointOnGpuThread,
853 base::Unretained(this),
854 sync_point));
857 void InProcessCommandBuffer::RetireSyncPointOnGpuThread(uint32 sync_point) {
858 gles2::MailboxManager* mailbox_manager =
859 decoder_->GetContextGroup()->mailbox_manager();
860 if (mailbox_manager->UsesSync()) {
861 bool make_current_success = false;
863 base::AutoLock lock(command_buffer_lock_);
864 make_current_success = MakeCurrent();
866 if (make_current_success)
867 mailbox_manager->PushTextureUpdates(sync_point);
869 g_sync_point_manager.Get().RetireSyncPoint(sync_point);
872 void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point,
873 const base::Closure& callback) {
874 CheckSequencedThread();
875 QueueTask(base::Bind(&InProcessCommandBuffer::SignalSyncPointOnGpuThread,
876 base::Unretained(this),
877 sync_point,
878 WrapCallback(callback)));
881 bool InProcessCommandBuffer::WaitSyncPointOnGpuThread(unsigned sync_point) {
882 g_sync_point_manager.Get().WaitSyncPoint(sync_point);
883 gles2::MailboxManager* mailbox_manager =
884 decoder_->GetContextGroup()->mailbox_manager();
885 mailbox_manager->PullTextureUpdates(sync_point);
886 return true;
889 void InProcessCommandBuffer::SignalSyncPointOnGpuThread(
890 unsigned sync_point,
891 const base::Closure& callback) {
892 g_sync_point_manager.Get().AddSyncPointCallback(sync_point, callback);
895 void InProcessCommandBuffer::SignalQuery(unsigned query_id,
896 const base::Closure& callback) {
897 CheckSequencedThread();
898 QueueTask(base::Bind(&InProcessCommandBuffer::SignalQueryOnGpuThread,
899 base::Unretained(this),
900 query_id,
901 WrapCallback(callback)));
904 void InProcessCommandBuffer::SignalQueryOnGpuThread(
905 unsigned query_id,
906 const base::Closure& callback) {
907 gles2::QueryManager* query_manager_ = decoder_->GetQueryManager();
908 DCHECK(query_manager_);
910 gles2::QueryManager::Query* query = query_manager_->GetQuery(query_id);
911 if (!query)
912 callback.Run();
913 else
914 query->AddCallback(callback);
917 void InProcessCommandBuffer::SetSurfaceVisible(bool visible) {}
919 uint32 InProcessCommandBuffer::CreateStreamTexture(uint32 texture_id) {
920 base::WaitableEvent completion(true, false);
921 uint32 stream_id = 0;
922 base::Callback<uint32(void)> task =
923 base::Bind(&InProcessCommandBuffer::CreateStreamTextureOnGpuThread,
924 base::Unretained(this),
925 texture_id);
926 QueueTask(
927 base::Bind(&RunTaskWithResult<uint32>, task, &stream_id, &completion));
928 completion.Wait();
929 return stream_id;
932 void InProcessCommandBuffer::SetLock(base::Lock*) {
935 uint32 InProcessCommandBuffer::CreateStreamTextureOnGpuThread(
936 uint32 client_texture_id) {
937 #if defined(OS_ANDROID)
938 return stream_texture_manager_->CreateStreamTexture(
939 client_texture_id, decoder_->GetContextGroup()->texture_manager());
940 #else
941 return 0;
942 #endif
945 gpu::error::Error InProcessCommandBuffer::GetLastError() {
946 CheckSequencedThread();
947 return last_state_.error;
950 bool InProcessCommandBuffer::Initialize() {
951 NOTREACHED();
952 return false;
955 namespace {
957 void PostCallback(
958 const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
959 const base::Closure& callback) {
960 // The task_runner.get() check is to support using InProcessCommandBuffer on
961 // a thread without a message loop.
962 if (task_runner.get() && !task_runner->BelongsToCurrentThread()) {
963 task_runner->PostTask(FROM_HERE, callback);
964 } else {
965 callback.Run();
969 void RunOnTargetThread(scoped_ptr<base::Closure> callback) {
970 DCHECK(callback.get());
971 callback->Run();
974 } // anonymous namespace
976 base::Closure InProcessCommandBuffer::WrapCallback(
977 const base::Closure& callback) {
978 // Make sure the callback gets deleted on the target thread by passing
979 // ownership.
980 scoped_ptr<base::Closure> scoped_callback(new base::Closure(callback));
981 base::Closure callback_on_client_thread =
982 base::Bind(&RunOnTargetThread, base::Passed(&scoped_callback));
983 base::Closure wrapped_callback =
984 base::Bind(&PostCallback, base::ThreadTaskRunnerHandle::IsSet()
985 ? base::ThreadTaskRunnerHandle::Get()
986 : nullptr,
987 callback_on_client_thread);
988 return wrapped_callback;
991 #if defined(OS_ANDROID)
992 scoped_refptr<gfx::SurfaceTexture>
993 InProcessCommandBuffer::GetSurfaceTexture(uint32 stream_id) {
994 DCHECK(stream_texture_manager_);
995 return stream_texture_manager_->GetSurfaceTexture(stream_id);
997 #endif
999 } // namespace gpu