Cleanup: Update the path to gfx size headers.
[chromium-blink-merge.git] / gpu / command_buffer / service / in_process_command_buffer.cc
blobc3123457f64ff1c42bbffcef368c49d86ba3f149
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "gpu/command_buffer/service/in_process_command_buffer.h"
7 #include <queue>
8 #include <set>
9 #include <utility>
11 #include "base/bind.h"
12 #include "base/bind_helpers.h"
13 #include "base/command_line.h"
14 #include "base/lazy_instance.h"
15 #include "base/logging.h"
16 #include "base/memory/weak_ptr.h"
17 #include "base/message_loop/message_loop_proxy.h"
18 #include "base/sequence_checker.h"
19 #include "base/synchronization/condition_variable.h"
20 #include "base/threading/thread.h"
21 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
22 #include "gpu/command_buffer/common/value_state.h"
23 #include "gpu/command_buffer/service/command_buffer_service.h"
24 #include "gpu/command_buffer/service/context_group.h"
25 #include "gpu/command_buffer/service/gl_context_virtual.h"
26 #include "gpu/command_buffer/service/gpu_scheduler.h"
27 #include "gpu/command_buffer/service/gpu_switches.h"
28 #include "gpu/command_buffer/service/image_factory.h"
29 #include "gpu/command_buffer/service/image_manager.h"
30 #include "gpu/command_buffer/service/mailbox_manager_impl.h"
31 #include "gpu/command_buffer/service/mailbox_manager_sync.h"
32 #include "gpu/command_buffer/service/memory_tracking.h"
33 #include "gpu/command_buffer/service/query_manager.h"
34 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
35 #include "gpu/command_buffer/service/valuebuffer_manager.h"
36 #include "ui/gfx/geometry/size.h"
37 #include "ui/gl/gl_context.h"
38 #include "ui/gl/gl_image.h"
39 #include "ui/gl/gl_share_group.h"
41 #if defined(OS_ANDROID)
42 #include "gpu/command_buffer/service/stream_texture_manager_in_process_android.h"
43 #include "ui/gl/android/surface_texture.h"
44 #endif
46 #if defined(OS_WIN)
47 #include <windows.h>
48 #include "base/process/process_handle.h"
49 #endif
51 namespace gpu {
53 namespace {
55 template <typename T>
56 static void RunTaskWithResult(base::Callback<T(void)> task,
57 T* result,
58 base::WaitableEvent* completion) {
59 *result = task.Run();
60 completion->Signal();
63 class GpuInProcessThread
64 : public base::Thread,
65 public InProcessCommandBuffer::Service,
66 public base::RefCountedThreadSafe<GpuInProcessThread> {
67 public:
68 GpuInProcessThread();
70 void AddRef() const override {
71 base::RefCountedThreadSafe<GpuInProcessThread>::AddRef();
73 void Release() const override {
74 base::RefCountedThreadSafe<GpuInProcessThread>::Release();
77 void ScheduleTask(const base::Closure& task) override;
78 void ScheduleIdleWork(const base::Closure& callback) override;
79 bool UseVirtualizedGLContexts() override { return false; }
80 scoped_refptr<gles2::ShaderTranslatorCache> shader_translator_cache()
81 override;
83 private:
84 ~GpuInProcessThread() override;
85 friend class base::RefCountedThreadSafe<GpuInProcessThread>;
87 scoped_refptr<gpu::gles2::ShaderTranslatorCache> shader_translator_cache_;
88 DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread);
91 GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") {
92 Start();
95 GpuInProcessThread::~GpuInProcessThread() {
96 Stop();
99 void GpuInProcessThread::ScheduleTask(const base::Closure& task) {
100 message_loop()->PostTask(FROM_HERE, task);
103 void GpuInProcessThread::ScheduleIdleWork(const base::Closure& callback) {
104 message_loop()->PostDelayedTask(
105 FROM_HERE, callback, base::TimeDelta::FromMilliseconds(5));
108 scoped_refptr<gles2::ShaderTranslatorCache>
109 GpuInProcessThread::shader_translator_cache() {
110 if (!shader_translator_cache_.get())
111 shader_translator_cache_ = new gpu::gles2::ShaderTranslatorCache;
112 return shader_translator_cache_;
115 struct GpuInProcessThreadHolder {
116 GpuInProcessThreadHolder() : gpu_thread(new GpuInProcessThread) {}
117 scoped_refptr<InProcessCommandBuffer::Service> gpu_thread;
120 base::LazyInstance<GpuInProcessThreadHolder> g_default_service =
121 LAZY_INSTANCE_INITIALIZER;
123 class ScopedEvent {
124 public:
125 ScopedEvent(base::WaitableEvent* event) : event_(event) {}
126 ~ScopedEvent() { event_->Signal(); }
128 private:
129 base::WaitableEvent* event_;
132 class SyncPointManager {
133 public:
134 SyncPointManager();
135 ~SyncPointManager();
137 uint32 GenerateSyncPoint();
138 void RetireSyncPoint(uint32 sync_point);
140 bool IsSyncPointPassed(uint32 sync_point);
141 void WaitSyncPoint(uint32 sync_point);
143 private:
144 // This lock protects access to pending_sync_points_ and next_sync_point_ and
145 // is used with the ConditionVariable to signal when a sync point is retired.
146 base::Lock lock_;
147 std::set<uint32> pending_sync_points_;
148 uint32 next_sync_point_;
149 base::ConditionVariable cond_var_;
152 SyncPointManager::SyncPointManager() : next_sync_point_(1), cond_var_(&lock_) {}
154 SyncPointManager::~SyncPointManager() {
155 DCHECK_EQ(pending_sync_points_.size(), 0U);
158 uint32 SyncPointManager::GenerateSyncPoint() {
159 base::AutoLock lock(lock_);
160 uint32 sync_point = next_sync_point_++;
161 DCHECK_EQ(pending_sync_points_.count(sync_point), 0U);
162 pending_sync_points_.insert(sync_point);
163 return sync_point;
166 void SyncPointManager::RetireSyncPoint(uint32 sync_point) {
167 base::AutoLock lock(lock_);
168 DCHECK(pending_sync_points_.count(sync_point));
169 pending_sync_points_.erase(sync_point);
170 cond_var_.Broadcast();
173 bool SyncPointManager::IsSyncPointPassed(uint32 sync_point) {
174 base::AutoLock lock(lock_);
175 return pending_sync_points_.count(sync_point) == 0;
178 void SyncPointManager::WaitSyncPoint(uint32 sync_point) {
179 base::AutoLock lock(lock_);
180 while (pending_sync_points_.count(sync_point)) {
181 cond_var_.Wait();
185 base::LazyInstance<SyncPointManager> g_sync_point_manager =
186 LAZY_INSTANCE_INITIALIZER;
188 base::SharedMemoryHandle ShareToGpuThread(
189 base::SharedMemoryHandle source_handle) {
190 #if defined(OS_WIN)
191 // Windows needs to explicitly duplicate the handle to current process.
192 base::SharedMemoryHandle target_handle;
193 if (!DuplicateHandle(GetCurrentProcess(),
194 source_handle,
195 GetCurrentProcess(),
196 &target_handle,
197 FILE_GENERIC_READ | FILE_GENERIC_WRITE,
198 FALSE,
199 0)) {
200 return base::SharedMemory::NULLHandle();
203 return target_handle;
204 #else
205 int duped_handle = HANDLE_EINTR(dup(source_handle.fd));
206 if (duped_handle < 0)
207 return base::SharedMemory::NULLHandle();
209 return base::FileDescriptor(duped_handle, true);
210 #endif
213 gfx::GpuMemoryBufferHandle ShareGpuMemoryBufferToGpuThread(
214 const gfx::GpuMemoryBufferHandle& source_handle,
215 bool* requires_sync_point) {
216 switch (source_handle.type) {
217 case gfx::SHARED_MEMORY_BUFFER: {
218 gfx::GpuMemoryBufferHandle handle;
219 handle.type = gfx::SHARED_MEMORY_BUFFER;
220 handle.handle = ShareToGpuThread(source_handle.handle);
221 *requires_sync_point = false;
222 return handle;
224 case gfx::IO_SURFACE_BUFFER:
225 case gfx::SURFACE_TEXTURE_BUFFER:
226 case gfx::OZONE_NATIVE_BUFFER:
227 *requires_sync_point = true;
228 return source_handle;
229 default:
230 NOTREACHED();
231 return gfx::GpuMemoryBufferHandle();
235 } // anonyous namespace
237 InProcessCommandBuffer::Service::Service() {}
239 InProcessCommandBuffer::Service::~Service() {}
241 scoped_refptr<gles2::MailboxManager>
242 InProcessCommandBuffer::Service::mailbox_manager() {
243 if (!mailbox_manager_.get()) {
244 if (base::CommandLine::ForCurrentProcess()->HasSwitch(
245 switches::kEnableThreadedTextureMailboxes)) {
246 mailbox_manager_ = new gles2::MailboxManagerSync();
247 } else {
248 mailbox_manager_ = new gles2::MailboxManagerImpl();
251 return mailbox_manager_;
254 scoped_refptr<gles2::SubscriptionRefSet>
255 InProcessCommandBuffer::Service::subscription_ref_set() {
256 if (!subscription_ref_set_.get()) {
257 subscription_ref_set_ = new gles2::SubscriptionRefSet();
259 return subscription_ref_set_;
262 scoped_refptr<ValueStateMap>
263 InProcessCommandBuffer::Service::pending_valuebuffer_state() {
264 if (!pending_valuebuffer_state_.get()) {
265 pending_valuebuffer_state_ = new ValueStateMap();
267 return pending_valuebuffer_state_;
270 InProcessCommandBuffer::InProcessCommandBuffer(
271 const scoped_refptr<Service>& service)
272 : context_lost_(false),
273 idle_work_pending_(false),
274 image_factory_(nullptr),
275 last_put_offset_(-1),
276 gpu_memory_buffer_manager_(nullptr),
277 flush_event_(false, false),
278 service_(service.get() ? service : g_default_service.Get().gpu_thread),
279 gpu_thread_weak_ptr_factory_(this) {
280 DCHECK(service_.get());
281 next_image_id_.GetNext();
284 InProcessCommandBuffer::~InProcessCommandBuffer() {
285 Destroy();
288 void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) {
289 CheckSequencedThread();
290 DCHECK(!surface_->IsOffscreen());
291 surface_->Resize(size);
294 bool InProcessCommandBuffer::MakeCurrent() {
295 CheckSequencedThread();
296 command_buffer_lock_.AssertAcquired();
298 if (!context_lost_ && decoder_->MakeCurrent())
299 return true;
300 DLOG(ERROR) << "Context lost because MakeCurrent failed.";
301 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
302 command_buffer_->SetParseError(gpu::error::kLostContext);
303 return false;
306 void InProcessCommandBuffer::PumpCommands() {
307 CheckSequencedThread();
308 command_buffer_lock_.AssertAcquired();
310 if (!MakeCurrent())
311 return;
313 gpu_scheduler_->PutChanged();
316 bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id) {
317 CheckSequencedThread();
318 command_buffer_lock_.AssertAcquired();
319 command_buffer_->SetGetBuffer(transfer_buffer_id);
320 return true;
323 bool InProcessCommandBuffer::Initialize(
324 scoped_refptr<gfx::GLSurface> surface,
325 bool is_offscreen,
326 gfx::AcceleratedWidget window,
327 const gfx::Size& size,
328 const std::vector<int32>& attribs,
329 gfx::GpuPreference gpu_preference,
330 const base::Closure& context_lost_callback,
331 InProcessCommandBuffer* share_group,
332 GpuMemoryBufferManager* gpu_memory_buffer_manager,
333 ImageFactory* image_factory) {
334 DCHECK(!share_group || service_.get() == share_group->service_.get());
335 context_lost_callback_ = WrapCallback(context_lost_callback);
337 if (surface.get()) {
338 // GPU thread must be the same as client thread due to GLSurface not being
339 // thread safe.
340 sequence_checker_.reset(new base::SequenceChecker);
341 surface_ = surface;
344 gpu::Capabilities capabilities;
345 InitializeOnGpuThreadParams params(is_offscreen,
346 window,
347 size,
348 attribs,
349 gpu_preference,
350 &capabilities,
351 share_group,
352 image_factory);
354 base::Callback<bool(void)> init_task =
355 base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread,
356 base::Unretained(this),
357 params);
359 base::WaitableEvent completion(true, false);
360 bool result = false;
361 QueueTask(
362 base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion));
363 completion.Wait();
365 gpu_memory_buffer_manager_ = gpu_memory_buffer_manager;
367 if (result) {
368 capabilities_ = capabilities;
369 capabilities_.image = capabilities_.image && gpu_memory_buffer_manager_;
372 return result;
375 bool InProcessCommandBuffer::InitializeOnGpuThread(
376 const InitializeOnGpuThreadParams& params) {
377 CheckSequencedThread();
378 gpu_thread_weak_ptr_ = gpu_thread_weak_ptr_factory_.GetWeakPtr();
380 DCHECK(params.size.width() >= 0 && params.size.height() >= 0);
382 TransferBufferManager* manager = new TransferBufferManager();
383 transfer_buffer_manager_.reset(manager);
384 manager->Initialize();
386 scoped_ptr<CommandBufferService> command_buffer(
387 new CommandBufferService(transfer_buffer_manager_.get()));
388 command_buffer->SetPutOffsetChangeCallback(base::Bind(
389 &InProcessCommandBuffer::PumpCommands, gpu_thread_weak_ptr_));
390 command_buffer->SetParseErrorCallback(base::Bind(
391 &InProcessCommandBuffer::OnContextLost, gpu_thread_weak_ptr_));
393 if (!command_buffer->Initialize()) {
394 LOG(ERROR) << "Could not initialize command buffer.";
395 DestroyOnGpuThread();
396 return false;
399 gl_share_group_ = params.context_group
400 ? params.context_group->gl_share_group_.get()
401 : new gfx::GLShareGroup;
403 #if defined(OS_ANDROID)
404 stream_texture_manager_.reset(new StreamTextureManagerInProcess);
405 #endif
407 bool bind_generates_resource = false;
408 decoder_.reset(gles2::GLES2Decoder::Create(
409 params.context_group
410 ? params.context_group->decoder_->GetContextGroup()
411 : new gles2::ContextGroup(service_->mailbox_manager(),
412 NULL,
413 service_->shader_translator_cache(),
414 NULL,
415 service_->subscription_ref_set(),
416 service_->pending_valuebuffer_state(),
417 bind_generates_resource)));
419 gpu_scheduler_.reset(
420 new GpuScheduler(command_buffer.get(), decoder_.get(), decoder_.get()));
421 command_buffer->SetGetBufferChangeCallback(base::Bind(
422 &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
423 command_buffer_ = command_buffer.Pass();
425 decoder_->set_engine(gpu_scheduler_.get());
427 if (!surface_.get()) {
428 if (params.is_offscreen)
429 surface_ = gfx::GLSurface::CreateOffscreenGLSurface(params.size);
430 else
431 surface_ = gfx::GLSurface::CreateViewGLSurface(params.window);
434 if (!surface_.get()) {
435 LOG(ERROR) << "Could not create GLSurface.";
436 DestroyOnGpuThread();
437 return false;
440 if (service_->UseVirtualizedGLContexts()) {
441 context_ = gl_share_group_->GetSharedContext();
442 if (!context_.get()) {
443 context_ = gfx::GLContext::CreateGLContext(
444 gl_share_group_.get(), surface_.get(), params.gpu_preference);
445 gl_share_group_->SetSharedContext(context_.get());
448 context_ = new GLContextVirtual(
449 gl_share_group_.get(), context_.get(), decoder_->AsWeakPtr());
450 if (context_->Initialize(surface_.get(), params.gpu_preference)) {
451 VLOG(1) << "Created virtual GL context.";
452 } else {
453 context_ = NULL;
455 } else {
456 context_ = gfx::GLContext::CreateGLContext(
457 gl_share_group_.get(), surface_.get(), params.gpu_preference);
460 if (!context_.get()) {
461 LOG(ERROR) << "Could not create GLContext.";
462 DestroyOnGpuThread();
463 return false;
466 if (!context_->MakeCurrent(surface_.get())) {
467 LOG(ERROR) << "Could not make context current.";
468 DestroyOnGpuThread();
469 return false;
472 gles2::DisallowedFeatures disallowed_features;
473 disallowed_features.gpu_memory_manager = true;
474 if (!decoder_->Initialize(surface_,
475 context_,
476 params.is_offscreen,
477 params.size,
478 disallowed_features,
479 params.attribs)) {
480 LOG(ERROR) << "Could not initialize decoder.";
481 DestroyOnGpuThread();
482 return false;
484 *params.capabilities = decoder_->GetCapabilities();
486 if (!params.is_offscreen) {
487 decoder_->SetResizeCallback(base::Bind(
488 &InProcessCommandBuffer::OnResizeView, gpu_thread_weak_ptr_));
490 decoder_->SetWaitSyncPointCallback(
491 base::Bind(&InProcessCommandBuffer::WaitSyncPointOnGpuThread,
492 base::Unretained(this)));
494 image_factory_ = params.image_factory;
495 params.capabilities->image = params.capabilities->image && image_factory_;
497 return true;
500 void InProcessCommandBuffer::Destroy() {
501 CheckSequencedThread();
503 base::WaitableEvent completion(true, false);
504 bool result = false;
505 base::Callback<bool(void)> destroy_task = base::Bind(
506 &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this));
507 QueueTask(
508 base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion));
509 completion.Wait();
512 bool InProcessCommandBuffer::DestroyOnGpuThread() {
513 CheckSequencedThread();
514 gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs();
515 command_buffer_.reset();
516 // Clean up GL resources if possible.
517 bool have_context = context_.get() && context_->MakeCurrent(surface_.get());
518 if (decoder_) {
519 decoder_->Destroy(have_context);
520 decoder_.reset();
522 context_ = NULL;
523 surface_ = NULL;
524 gl_share_group_ = NULL;
525 #if defined(OS_ANDROID)
526 stream_texture_manager_.reset();
527 #endif
529 return true;
532 void InProcessCommandBuffer::CheckSequencedThread() {
533 DCHECK(!sequence_checker_ ||
534 sequence_checker_->CalledOnValidSequencedThread());
537 void InProcessCommandBuffer::OnContextLost() {
538 CheckSequencedThread();
539 if (!context_lost_callback_.is_null()) {
540 context_lost_callback_.Run();
541 context_lost_callback_.Reset();
544 context_lost_ = true;
547 CommandBuffer::State InProcessCommandBuffer::GetStateFast() {
548 CheckSequencedThread();
549 base::AutoLock lock(state_after_last_flush_lock_);
550 if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U)
551 last_state_ = state_after_last_flush_;
552 return last_state_;
555 CommandBuffer::State InProcessCommandBuffer::GetLastState() {
556 CheckSequencedThread();
557 return last_state_;
560 int32 InProcessCommandBuffer::GetLastToken() {
561 CheckSequencedThread();
562 GetStateFast();
563 return last_state_.token;
566 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) {
567 CheckSequencedThread();
568 ScopedEvent handle_flush(&flush_event_);
569 base::AutoLock lock(command_buffer_lock_);
570 command_buffer_->Flush(put_offset);
572 // Update state before signaling the flush event.
573 base::AutoLock lock(state_after_last_flush_lock_);
574 state_after_last_flush_ = command_buffer_->GetLastState();
576 DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) ||
577 (error::IsError(state_after_last_flush_.error) && context_lost_));
579 // If we've processed all pending commands but still have pending queries,
580 // pump idle work until the query is passed.
581 if (put_offset == state_after_last_flush_.get_offset &&
582 gpu_scheduler_->HasMoreWork()) {
583 ScheduleIdleWorkOnGpuThread();
587 void InProcessCommandBuffer::PerformIdleWork() {
588 CheckSequencedThread();
589 idle_work_pending_ = false;
590 base::AutoLock lock(command_buffer_lock_);
591 if (MakeCurrent() && gpu_scheduler_->HasMoreWork()) {
592 gpu_scheduler_->PerformIdleWork();
593 ScheduleIdleWorkOnGpuThread();
597 void InProcessCommandBuffer::ScheduleIdleWorkOnGpuThread() {
598 CheckSequencedThread();
599 if (idle_work_pending_)
600 return;
601 idle_work_pending_ = true;
602 service_->ScheduleIdleWork(
603 base::Bind(&InProcessCommandBuffer::PerformIdleWork,
604 gpu_thread_weak_ptr_));
607 void InProcessCommandBuffer::Flush(int32 put_offset) {
608 CheckSequencedThread();
609 if (last_state_.error != gpu::error::kNoError)
610 return;
612 if (last_put_offset_ == put_offset)
613 return;
615 last_put_offset_ = put_offset;
616 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
617 gpu_thread_weak_ptr_,
618 put_offset);
619 QueueTask(task);
622 void InProcessCommandBuffer::WaitForTokenInRange(int32 start, int32 end) {
623 CheckSequencedThread();
624 while (!InRange(start, end, GetLastToken()) &&
625 last_state_.error == gpu::error::kNoError)
626 flush_event_.Wait();
629 void InProcessCommandBuffer::WaitForGetOffsetInRange(int32 start, int32 end) {
630 CheckSequencedThread();
632 GetStateFast();
633 while (!InRange(start, end, last_state_.get_offset) &&
634 last_state_.error == gpu::error::kNoError) {
635 flush_event_.Wait();
636 GetStateFast();
640 void InProcessCommandBuffer::SetGetBuffer(int32 shm_id) {
641 CheckSequencedThread();
642 if (last_state_.error != gpu::error::kNoError)
643 return;
646 base::AutoLock lock(command_buffer_lock_);
647 command_buffer_->SetGetBuffer(shm_id);
648 last_put_offset_ = 0;
651 base::AutoLock lock(state_after_last_flush_lock_);
652 state_after_last_flush_ = command_buffer_->GetLastState();
656 scoped_refptr<Buffer> InProcessCommandBuffer::CreateTransferBuffer(size_t size,
657 int32* id) {
658 CheckSequencedThread();
659 base::AutoLock lock(command_buffer_lock_);
660 return command_buffer_->CreateTransferBuffer(size, id);
663 void InProcessCommandBuffer::DestroyTransferBuffer(int32 id) {
664 CheckSequencedThread();
665 base::Closure task =
666 base::Bind(&InProcessCommandBuffer::DestroyTransferBufferOnGpuThread,
667 base::Unretained(this),
668 id);
670 QueueTask(task);
673 void InProcessCommandBuffer::DestroyTransferBufferOnGpuThread(int32 id) {
674 base::AutoLock lock(command_buffer_lock_);
675 command_buffer_->DestroyTransferBuffer(id);
678 gpu::Capabilities InProcessCommandBuffer::GetCapabilities() {
679 return capabilities_;
682 int32 InProcessCommandBuffer::CreateImage(ClientBuffer buffer,
683 size_t width,
684 size_t height,
685 unsigned internalformat) {
686 CheckSequencedThread();
688 DCHECK(gpu_memory_buffer_manager_);
689 gfx::GpuMemoryBuffer* gpu_memory_buffer =
690 gpu_memory_buffer_manager_->GpuMemoryBufferFromClientBuffer(buffer);
691 DCHECK(gpu_memory_buffer);
693 int32 new_id = next_image_id_.GetNext();
695 DCHECK(gpu::ImageFactory::IsImageFormatCompatibleWithGpuMemoryBufferFormat(
696 internalformat, gpu_memory_buffer->GetFormat()));
698 // This handle is owned by the GPU thread and must be passed to it or it
699 // will leak. In otherwords, do not early out on error between here and the
700 // queuing of the CreateImage task below.
701 bool requires_sync_point = false;
702 gfx::GpuMemoryBufferHandle handle =
703 ShareGpuMemoryBufferToGpuThread(gpu_memory_buffer->GetHandle(),
704 &requires_sync_point);
706 QueueTask(base::Bind(&InProcessCommandBuffer::CreateImageOnGpuThread,
707 base::Unretained(this),
708 new_id,
709 handle,
710 gfx::Size(width, height),
711 gpu_memory_buffer->GetFormat(),
712 internalformat));
714 if (requires_sync_point) {
715 gpu_memory_buffer_manager_->SetDestructionSyncPoint(gpu_memory_buffer,
716 InsertSyncPoint());
719 return new_id;
722 void InProcessCommandBuffer::CreateImageOnGpuThread(
723 int32 id,
724 const gfx::GpuMemoryBufferHandle& handle,
725 const gfx::Size& size,
726 gfx::GpuMemoryBuffer::Format format,
727 uint32 internalformat) {
728 if (!decoder_)
729 return;
731 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
732 DCHECK(image_manager);
733 if (image_manager->LookupImage(id)) {
734 LOG(ERROR) << "Image already exists with same ID.";
735 return;
738 // Note: this assumes that client ID is always 0.
739 const int kClientId = 0;
741 DCHECK(image_factory_);
742 scoped_refptr<gfx::GLImage> image =
743 image_factory_->CreateImageForGpuMemoryBuffer(
744 handle, size, format, internalformat, kClientId);
745 if (!image.get())
746 return;
748 image_manager->AddImage(image.get(), id);
751 void InProcessCommandBuffer::DestroyImage(int32 id) {
752 CheckSequencedThread();
754 QueueTask(base::Bind(&InProcessCommandBuffer::DestroyImageOnGpuThread,
755 base::Unretained(this),
756 id));
759 void InProcessCommandBuffer::DestroyImageOnGpuThread(int32 id) {
760 if (!decoder_)
761 return;
763 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
764 DCHECK(image_manager);
765 if (!image_manager->LookupImage(id)) {
766 LOG(ERROR) << "Image with ID doesn't exist.";
767 return;
770 image_manager->RemoveImage(id);
773 int32 InProcessCommandBuffer::CreateGpuMemoryBufferImage(
774 size_t width,
775 size_t height,
776 unsigned internalformat,
777 unsigned usage) {
778 CheckSequencedThread();
780 DCHECK(gpu_memory_buffer_manager_);
781 scoped_ptr<gfx::GpuMemoryBuffer> buffer(
782 gpu_memory_buffer_manager_->AllocateGpuMemoryBuffer(
783 gfx::Size(width, height),
784 gpu::ImageFactory::ImageFormatToGpuMemoryBufferFormat(internalformat),
785 gpu::ImageFactory::ImageUsageToGpuMemoryBufferUsage(usage)));
786 if (!buffer)
787 return -1;
789 return CreateImage(buffer->AsClientBuffer(), width, height, internalformat);
792 uint32 InProcessCommandBuffer::InsertSyncPoint() {
793 uint32 sync_point = g_sync_point_manager.Get().GenerateSyncPoint();
794 QueueTask(base::Bind(&InProcessCommandBuffer::RetireSyncPointOnGpuThread,
795 base::Unretained(this),
796 sync_point));
797 return sync_point;
800 uint32 InProcessCommandBuffer::InsertFutureSyncPoint() {
801 return g_sync_point_manager.Get().GenerateSyncPoint();
804 void InProcessCommandBuffer::RetireSyncPoint(uint32 sync_point) {
805 QueueTask(base::Bind(&InProcessCommandBuffer::RetireSyncPointOnGpuThread,
806 base::Unretained(this),
807 sync_point));
810 void InProcessCommandBuffer::RetireSyncPointOnGpuThread(uint32 sync_point) {
811 gles2::MailboxManager* mailbox_manager =
812 decoder_->GetContextGroup()->mailbox_manager();
813 if (mailbox_manager->UsesSync()) {
814 bool make_current_success = false;
816 base::AutoLock lock(command_buffer_lock_);
817 make_current_success = MakeCurrent();
819 if (make_current_success)
820 mailbox_manager->PushTextureUpdates(sync_point);
822 g_sync_point_manager.Get().RetireSyncPoint(sync_point);
825 void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point,
826 const base::Closure& callback) {
827 CheckSequencedThread();
828 QueueTask(base::Bind(&InProcessCommandBuffer::SignalSyncPointOnGpuThread,
829 base::Unretained(this),
830 sync_point,
831 WrapCallback(callback)));
834 bool InProcessCommandBuffer::WaitSyncPointOnGpuThread(unsigned sync_point) {
835 g_sync_point_manager.Get().WaitSyncPoint(sync_point);
836 gles2::MailboxManager* mailbox_manager =
837 decoder_->GetContextGroup()->mailbox_manager();
838 mailbox_manager->PullTextureUpdates(sync_point);
839 return true;
842 void InProcessCommandBuffer::SignalSyncPointOnGpuThread(
843 unsigned sync_point,
844 const base::Closure& callback) {
845 if (g_sync_point_manager.Get().IsSyncPointPassed(sync_point)) {
846 callback.Run();
847 } else {
848 service_->ScheduleIdleWork(
849 base::Bind(&InProcessCommandBuffer::SignalSyncPointOnGpuThread,
850 gpu_thread_weak_ptr_,
851 sync_point,
852 callback));
856 void InProcessCommandBuffer::SignalQuery(unsigned query_id,
857 const base::Closure& callback) {
858 CheckSequencedThread();
859 QueueTask(base::Bind(&InProcessCommandBuffer::SignalQueryOnGpuThread,
860 base::Unretained(this),
861 query_id,
862 WrapCallback(callback)));
865 void InProcessCommandBuffer::SignalQueryOnGpuThread(
866 unsigned query_id,
867 const base::Closure& callback) {
868 gles2::QueryManager* query_manager_ = decoder_->GetQueryManager();
869 DCHECK(query_manager_);
871 gles2::QueryManager::Query* query = query_manager_->GetQuery(query_id);
872 if (!query)
873 callback.Run();
874 else
875 query->AddCallback(callback);
878 void InProcessCommandBuffer::SetSurfaceVisible(bool visible) {}
880 uint32 InProcessCommandBuffer::CreateStreamTexture(uint32 texture_id) {
881 base::WaitableEvent completion(true, false);
882 uint32 stream_id = 0;
883 base::Callback<uint32(void)> task =
884 base::Bind(&InProcessCommandBuffer::CreateStreamTextureOnGpuThread,
885 base::Unretained(this),
886 texture_id);
887 QueueTask(
888 base::Bind(&RunTaskWithResult<uint32>, task, &stream_id, &completion));
889 completion.Wait();
890 return stream_id;
893 uint32 InProcessCommandBuffer::CreateStreamTextureOnGpuThread(
894 uint32 client_texture_id) {
895 #if defined(OS_ANDROID)
896 return stream_texture_manager_->CreateStreamTexture(
897 client_texture_id, decoder_->GetContextGroup()->texture_manager());
898 #else
899 return 0;
900 #endif
903 gpu::error::Error InProcessCommandBuffer::GetLastError() {
904 CheckSequencedThread();
905 return last_state_.error;
908 bool InProcessCommandBuffer::Initialize() {
909 NOTREACHED();
910 return false;
913 namespace {
915 void PostCallback(const scoped_refptr<base::MessageLoopProxy>& loop,
916 const base::Closure& callback) {
917 // The loop.get() check is to support using InProcessCommandBuffer on a thread
918 // without a message loop.
919 if (loop.get() && !loop->BelongsToCurrentThread()) {
920 loop->PostTask(FROM_HERE, callback);
921 } else {
922 callback.Run();
926 void RunOnTargetThread(scoped_ptr<base::Closure> callback) {
927 DCHECK(callback.get());
928 callback->Run();
931 } // anonymous namespace
933 base::Closure InProcessCommandBuffer::WrapCallback(
934 const base::Closure& callback) {
935 // Make sure the callback gets deleted on the target thread by passing
936 // ownership.
937 scoped_ptr<base::Closure> scoped_callback(new base::Closure(callback));
938 base::Closure callback_on_client_thread =
939 base::Bind(&RunOnTargetThread, base::Passed(&scoped_callback));
940 base::Closure wrapped_callback =
941 base::Bind(&PostCallback, base::MessageLoopProxy::current(),
942 callback_on_client_thread);
943 return wrapped_callback;
946 #if defined(OS_ANDROID)
947 scoped_refptr<gfx::SurfaceTexture>
948 InProcessCommandBuffer::GetSurfaceTexture(uint32 stream_id) {
949 DCHECK(stream_texture_manager_);
950 return stream_texture_manager_->GetSurfaceTexture(stream_id);
952 #endif
954 } // namespace gpu