Performance histograms for extension content verification
[chromium-blink-merge.git] / gpu / command_buffer / service / in_process_command_buffer.cc
blob79cb6c0dd92fcbaa8db3a5240cc2e6e1fa4d1d3b
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "gpu/command_buffer/service/in_process_command_buffer.h"
7 #include <queue>
8 #include <set>
9 #include <utility>
11 #include <GLES2/gl2.h>
12 #ifndef GL_GLEXT_PROTOTYPES
13 #define GL_GLEXT_PROTOTYPES 1
14 #endif
15 #include <GLES2/gl2ext.h>
16 #include <GLES2/gl2extchromium.h>
18 #include "base/bind.h"
19 #include "base/bind_helpers.h"
20 #include "base/lazy_instance.h"
21 #include "base/logging.h"
22 #include "base/memory/weak_ptr.h"
23 #include "base/message_loop/message_loop_proxy.h"
24 #include "base/sequence_checker.h"
25 #include "base/synchronization/condition_variable.h"
26 #include "base/threading/thread.h"
27 #include "gpu/command_buffer/client/gpu_memory_buffer_factory.h"
28 #include "gpu/command_buffer/service/command_buffer_service.h"
29 #include "gpu/command_buffer/service/context_group.h"
30 #include "gpu/command_buffer/service/gl_context_virtual.h"
31 #include "gpu/command_buffer/service/gpu_control_service.h"
32 #include "gpu/command_buffer/service/gpu_scheduler.h"
33 #include "gpu/command_buffer/service/image_manager.h"
34 #include "gpu/command_buffer/service/mailbox_manager.h"
35 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
36 #include "ui/gfx/size.h"
37 #include "ui/gl/gl_context.h"
38 #include "ui/gl/gl_image.h"
39 #include "ui/gl/gl_share_group.h"
41 #if defined(OS_ANDROID)
42 #include "gpu/command_buffer/service/stream_texture_manager_in_process_android.h"
43 #include "ui/gl/android/surface_texture.h"
44 #endif
46 namespace gpu {
48 namespace {
50 static GpuMemoryBufferFactory* g_gpu_memory_buffer_factory = NULL;
52 template <typename T>
53 static void RunTaskWithResult(base::Callback<T(void)> task,
54 T* result,
55 base::WaitableEvent* completion) {
56 *result = task.Run();
57 completion->Signal();
60 class GpuInProcessThread
61 : public base::Thread,
62 public InProcessCommandBuffer::Service,
63 public base::RefCountedThreadSafe<GpuInProcessThread> {
64 public:
65 GpuInProcessThread();
67 virtual void AddRef() const OVERRIDE {
68 base::RefCountedThreadSafe<GpuInProcessThread>::AddRef();
70 virtual void Release() const OVERRIDE {
71 base::RefCountedThreadSafe<GpuInProcessThread>::Release();
74 virtual void ScheduleTask(const base::Closure& task) OVERRIDE;
75 virtual void ScheduleIdleWork(const base::Closure& callback) OVERRIDE;
76 virtual bool UseVirtualizedGLContexts() OVERRIDE { return false; }
77 virtual scoped_refptr<gles2::ShaderTranslatorCache> shader_translator_cache()
78 OVERRIDE;
80 private:
81 virtual ~GpuInProcessThread();
82 friend class base::RefCountedThreadSafe<GpuInProcessThread>;
84 scoped_refptr<gpu::gles2::ShaderTranslatorCache> shader_translator_cache_;
85 DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread);
88 GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") {
89 Start();
92 GpuInProcessThread::~GpuInProcessThread() {
93 Stop();
96 void GpuInProcessThread::ScheduleTask(const base::Closure& task) {
97 message_loop()->PostTask(FROM_HERE, task);
100 void GpuInProcessThread::ScheduleIdleWork(const base::Closure& callback) {
101 message_loop()->PostDelayedTask(
102 FROM_HERE, callback, base::TimeDelta::FromMilliseconds(5));
105 scoped_refptr<gles2::ShaderTranslatorCache>
106 GpuInProcessThread::shader_translator_cache() {
107 if (!shader_translator_cache_.get())
108 shader_translator_cache_ = new gpu::gles2::ShaderTranslatorCache;
109 return shader_translator_cache_;
112 base::LazyInstance<std::set<InProcessCommandBuffer*> > default_thread_clients_ =
113 LAZY_INSTANCE_INITIALIZER;
114 base::LazyInstance<base::Lock> default_thread_clients_lock_ =
115 LAZY_INSTANCE_INITIALIZER;
117 class ScopedEvent {
118 public:
119 ScopedEvent(base::WaitableEvent* event) : event_(event) {}
120 ~ScopedEvent() { event_->Signal(); }
122 private:
123 base::WaitableEvent* event_;
126 class SyncPointManager {
127 public:
128 SyncPointManager();
129 ~SyncPointManager();
131 uint32 GenerateSyncPoint();
132 void RetireSyncPoint(uint32 sync_point);
134 bool IsSyncPointPassed(uint32 sync_point);
135 void WaitSyncPoint(uint32 sync_point);
137 private:
138 // This lock protects access to pending_sync_points_ and next_sync_point_ and
139 // is used with the ConditionVariable to signal when a sync point is retired.
140 base::Lock lock_;
141 std::set<uint32> pending_sync_points_;
142 uint32 next_sync_point_;
143 base::ConditionVariable cond_var_;
146 SyncPointManager::SyncPointManager() : next_sync_point_(1), cond_var_(&lock_) {}
148 SyncPointManager::~SyncPointManager() {
149 DCHECK_EQ(pending_sync_points_.size(), 0U);
152 uint32 SyncPointManager::GenerateSyncPoint() {
153 base::AutoLock lock(lock_);
154 uint32 sync_point = next_sync_point_++;
155 DCHECK_EQ(pending_sync_points_.count(sync_point), 0U);
156 pending_sync_points_.insert(sync_point);
157 return sync_point;
160 void SyncPointManager::RetireSyncPoint(uint32 sync_point) {
161 base::AutoLock lock(lock_);
162 DCHECK(pending_sync_points_.count(sync_point));
163 pending_sync_points_.erase(sync_point);
164 cond_var_.Broadcast();
167 bool SyncPointManager::IsSyncPointPassed(uint32 sync_point) {
168 base::AutoLock lock(lock_);
169 return pending_sync_points_.count(sync_point) == 0;
172 void SyncPointManager::WaitSyncPoint(uint32 sync_point) {
173 base::AutoLock lock(lock_);
174 while (pending_sync_points_.count(sync_point)) {
175 cond_var_.Wait();
179 base::LazyInstance<SyncPointManager> g_sync_point_manager =
180 LAZY_INSTANCE_INITIALIZER;
182 bool WaitSyncPoint(uint32 sync_point) {
183 g_sync_point_manager.Get().WaitSyncPoint(sync_point);
184 return true;
187 } // anonyous namespace
189 InProcessCommandBuffer::Service::Service() {}
191 InProcessCommandBuffer::Service::~Service() {}
193 scoped_refptr<InProcessCommandBuffer::Service>
194 InProcessCommandBuffer::GetDefaultService() {
195 base::AutoLock lock(default_thread_clients_lock_.Get());
196 scoped_refptr<Service> service;
197 if (!default_thread_clients_.Get().empty()) {
198 InProcessCommandBuffer* other = *default_thread_clients_.Get().begin();
199 service = other->service_;
200 DCHECK(service.get());
201 } else {
202 service = new GpuInProcessThread;
204 return service;
207 InProcessCommandBuffer::InProcessCommandBuffer(
208 const scoped_refptr<Service>& service)
209 : context_lost_(false),
210 last_put_offset_(-1),
211 flush_event_(false, false),
212 service_(service.get() ? service : GetDefaultService()),
213 gpu_thread_weak_ptr_factory_(this) {
214 if (!service) {
215 base::AutoLock lock(default_thread_clients_lock_.Get());
216 default_thread_clients_.Get().insert(this);
220 InProcessCommandBuffer::~InProcessCommandBuffer() {
221 Destroy();
222 base::AutoLock lock(default_thread_clients_lock_.Get());
223 default_thread_clients_.Get().erase(this);
226 void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) {
227 CheckSequencedThread();
228 DCHECK(!surface_->IsOffscreen());
229 surface_->Resize(size);
232 bool InProcessCommandBuffer::MakeCurrent() {
233 CheckSequencedThread();
234 command_buffer_lock_.AssertAcquired();
236 if (!context_lost_ && decoder_->MakeCurrent())
237 return true;
238 DLOG(ERROR) << "Context lost because MakeCurrent failed.";
239 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
240 command_buffer_->SetParseError(gpu::error::kLostContext);
241 return false;
244 void InProcessCommandBuffer::PumpCommands() {
245 CheckSequencedThread();
246 command_buffer_lock_.AssertAcquired();
248 if (!MakeCurrent())
249 return;
251 gpu_scheduler_->PutChanged();
254 bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id) {
255 CheckSequencedThread();
256 command_buffer_lock_.AssertAcquired();
257 command_buffer_->SetGetBuffer(transfer_buffer_id);
258 return true;
261 bool InProcessCommandBuffer::Initialize(
262 scoped_refptr<gfx::GLSurface> surface,
263 bool is_offscreen,
264 gfx::AcceleratedWidget window,
265 const gfx::Size& size,
266 const std::vector<int32>& attribs,
267 gfx::GpuPreference gpu_preference,
268 const base::Closure& context_lost_callback,
269 InProcessCommandBuffer* share_group) {
270 DCHECK(!share_group || service_ == share_group->service_);
271 context_lost_callback_ = WrapCallback(context_lost_callback);
273 if (surface) {
274 // GPU thread must be the same as client thread due to GLSurface not being
275 // thread safe.
276 sequence_checker_.reset(new base::SequenceChecker);
277 surface_ = surface;
280 gpu::Capabilities capabilities;
281 InitializeOnGpuThreadParams params(is_offscreen,
282 window,
283 size,
284 attribs,
285 gpu_preference,
286 &capabilities,
287 share_group);
289 base::Callback<bool(void)> init_task =
290 base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread,
291 base::Unretained(this),
292 params);
294 base::WaitableEvent completion(true, false);
295 bool result = false;
296 QueueTask(
297 base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion));
298 completion.Wait();
300 if (result) {
301 capabilities_ = capabilities;
302 capabilities_.map_image =
303 capabilities_.map_image && g_gpu_memory_buffer_factory;
305 return result;
308 bool InProcessCommandBuffer::InitializeOnGpuThread(
309 const InitializeOnGpuThreadParams& params) {
310 CheckSequencedThread();
311 gpu_thread_weak_ptr_ = gpu_thread_weak_ptr_factory_.GetWeakPtr();
313 DCHECK(params.size.width() >= 0 && params.size.height() >= 0);
315 TransferBufferManager* manager = new TransferBufferManager();
316 transfer_buffer_manager_.reset(manager);
317 manager->Initialize();
319 scoped_ptr<CommandBufferService> command_buffer(
320 new CommandBufferService(transfer_buffer_manager_.get()));
321 command_buffer->SetPutOffsetChangeCallback(base::Bind(
322 &InProcessCommandBuffer::PumpCommands, gpu_thread_weak_ptr_));
323 command_buffer->SetParseErrorCallback(base::Bind(
324 &InProcessCommandBuffer::OnContextLost, gpu_thread_weak_ptr_));
326 if (!command_buffer->Initialize()) {
327 LOG(ERROR) << "Could not initialize command buffer.";
328 DestroyOnGpuThread();
329 return false;
332 gl_share_group_ = params.context_group
333 ? params.context_group->gl_share_group_.get()
334 : new gfx::GLShareGroup;
336 #if defined(OS_ANDROID)
337 stream_texture_manager_.reset(new StreamTextureManagerInProcess);
338 #endif
340 bool bind_generates_resource = false;
341 decoder_.reset(gles2::GLES2Decoder::Create(
342 params.context_group
343 ? params.context_group->decoder_->GetContextGroup()
344 : new gles2::ContextGroup(NULL,
345 NULL,
346 NULL,
347 service_->shader_translator_cache(),
348 NULL,
349 bind_generates_resource)));
351 gpu_scheduler_.reset(
352 new GpuScheduler(command_buffer.get(), decoder_.get(), decoder_.get()));
353 command_buffer->SetGetBufferChangeCallback(base::Bind(
354 &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
355 command_buffer_ = command_buffer.Pass();
357 decoder_->set_engine(gpu_scheduler_.get());
359 if (!surface_) {
360 if (params.is_offscreen)
361 surface_ = gfx::GLSurface::CreateOffscreenGLSurface(params.size);
362 else
363 surface_ = gfx::GLSurface::CreateViewGLSurface(params.window);
366 if (!surface_.get()) {
367 LOG(ERROR) << "Could not create GLSurface.";
368 DestroyOnGpuThread();
369 return false;
372 if (service_->UseVirtualizedGLContexts()) {
373 context_ = gl_share_group_->GetSharedContext();
374 if (!context_.get()) {
375 context_ = gfx::GLContext::CreateGLContext(
376 gl_share_group_.get(), surface_.get(), params.gpu_preference);
377 gl_share_group_->SetSharedContext(context_.get());
380 context_ = new GLContextVirtual(
381 gl_share_group_.get(), context_.get(), decoder_->AsWeakPtr());
382 if (context_->Initialize(surface_.get(), params.gpu_preference)) {
383 VLOG(1) << "Created virtual GL context.";
384 } else {
385 context_ = NULL;
387 } else {
388 context_ = gfx::GLContext::CreateGLContext(
389 gl_share_group_.get(), surface_.get(), params.gpu_preference);
392 if (!context_.get()) {
393 LOG(ERROR) << "Could not create GLContext.";
394 DestroyOnGpuThread();
395 return false;
398 if (!context_->MakeCurrent(surface_.get())) {
399 LOG(ERROR) << "Could not make context current.";
400 DestroyOnGpuThread();
401 return false;
404 gles2::DisallowedFeatures disallowed_features;
405 disallowed_features.gpu_memory_manager = true;
406 if (!decoder_->Initialize(surface_,
407 context_,
408 params.is_offscreen,
409 params.size,
410 disallowed_features,
411 params.attribs)) {
412 LOG(ERROR) << "Could not initialize decoder.";
413 DestroyOnGpuThread();
414 return false;
416 *params.capabilities = decoder_->GetCapabilities();
418 gpu_control_.reset(
419 new GpuControlService(decoder_->GetContextGroup()->image_manager(),
420 decoder_->GetQueryManager()));
422 if (!params.is_offscreen) {
423 decoder_->SetResizeCallback(base::Bind(
424 &InProcessCommandBuffer::OnResizeView, gpu_thread_weak_ptr_));
426 decoder_->SetWaitSyncPointCallback(base::Bind(&WaitSyncPoint));
428 return true;
431 void InProcessCommandBuffer::Destroy() {
432 CheckSequencedThread();
434 base::WaitableEvent completion(true, false);
435 bool result = false;
436 base::Callback<bool(void)> destroy_task = base::Bind(
437 &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this));
438 QueueTask(
439 base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion));
440 completion.Wait();
443 bool InProcessCommandBuffer::DestroyOnGpuThread() {
444 CheckSequencedThread();
445 gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs();
446 command_buffer_.reset();
447 // Clean up GL resources if possible.
448 bool have_context = context_ && context_->MakeCurrent(surface_);
449 if (decoder_) {
450 decoder_->Destroy(have_context);
451 decoder_.reset();
453 context_ = NULL;
454 surface_ = NULL;
455 gl_share_group_ = NULL;
456 #if defined(OS_ANDROID)
457 stream_texture_manager_.reset();
458 #endif
460 return true;
463 void InProcessCommandBuffer::CheckSequencedThread() {
464 DCHECK(!sequence_checker_ ||
465 sequence_checker_->CalledOnValidSequencedThread());
468 void InProcessCommandBuffer::OnContextLost() {
469 CheckSequencedThread();
470 if (!context_lost_callback_.is_null()) {
471 context_lost_callback_.Run();
472 context_lost_callback_.Reset();
475 context_lost_ = true;
478 CommandBuffer::State InProcessCommandBuffer::GetStateFast() {
479 CheckSequencedThread();
480 base::AutoLock lock(state_after_last_flush_lock_);
481 if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U)
482 last_state_ = state_after_last_flush_;
483 return last_state_;
486 CommandBuffer::State InProcessCommandBuffer::GetLastState() {
487 CheckSequencedThread();
488 return last_state_;
491 int32 InProcessCommandBuffer::GetLastToken() {
492 CheckSequencedThread();
493 GetStateFast();
494 return last_state_.token;
497 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) {
498 CheckSequencedThread();
499 ScopedEvent handle_flush(&flush_event_);
500 base::AutoLock lock(command_buffer_lock_);
501 command_buffer_->Flush(put_offset);
503 // Update state before signaling the flush event.
504 base::AutoLock lock(state_after_last_flush_lock_);
505 state_after_last_flush_ = command_buffer_->GetLastState();
507 DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) ||
508 (error::IsError(state_after_last_flush_.error) && context_lost_));
510 // If we've processed all pending commands but still have pending queries,
511 // pump idle work until the query is passed.
512 if (put_offset == state_after_last_flush_.get_offset &&
513 gpu_scheduler_->HasMoreWork()) {
514 service_->ScheduleIdleWork(
515 base::Bind(&InProcessCommandBuffer::ScheduleMoreIdleWork,
516 gpu_thread_weak_ptr_));
520 void InProcessCommandBuffer::ScheduleMoreIdleWork() {
521 CheckSequencedThread();
522 base::AutoLock lock(command_buffer_lock_);
523 if (gpu_scheduler_->HasMoreWork()) {
524 gpu_scheduler_->PerformIdleWork();
525 service_->ScheduleIdleWork(
526 base::Bind(&InProcessCommandBuffer::ScheduleMoreIdleWork,
527 gpu_thread_weak_ptr_));
531 void InProcessCommandBuffer::Flush(int32 put_offset) {
532 CheckSequencedThread();
533 if (last_state_.error != gpu::error::kNoError)
534 return;
536 if (last_put_offset_ == put_offset)
537 return;
539 last_put_offset_ = put_offset;
540 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
541 gpu_thread_weak_ptr_,
542 put_offset);
543 QueueTask(task);
546 void InProcessCommandBuffer::WaitForTokenInRange(int32 start, int32 end) {
547 CheckSequencedThread();
548 while (!InRange(start, end, GetLastToken()) &&
549 last_state_.error == gpu::error::kNoError)
550 flush_event_.Wait();
553 void InProcessCommandBuffer::WaitForGetOffsetInRange(int32 start, int32 end) {
554 CheckSequencedThread();
556 GetStateFast();
557 while (!InRange(start, end, last_state_.get_offset) &&
558 last_state_.error == gpu::error::kNoError) {
559 flush_event_.Wait();
560 GetStateFast();
564 void InProcessCommandBuffer::SetGetBuffer(int32 shm_id) {
565 CheckSequencedThread();
566 if (last_state_.error != gpu::error::kNoError)
567 return;
570 base::AutoLock lock(command_buffer_lock_);
571 command_buffer_->SetGetBuffer(shm_id);
572 last_put_offset_ = 0;
575 base::AutoLock lock(state_after_last_flush_lock_);
576 state_after_last_flush_ = command_buffer_->GetLastState();
580 scoped_refptr<Buffer> InProcessCommandBuffer::CreateTransferBuffer(size_t size,
581 int32* id) {
582 CheckSequencedThread();
583 base::AutoLock lock(command_buffer_lock_);
584 return command_buffer_->CreateTransferBuffer(size, id);
587 void InProcessCommandBuffer::DestroyTransferBuffer(int32 id) {
588 CheckSequencedThread();
589 base::Closure task =
590 base::Bind(&InProcessCommandBuffer::DestroyTransferBufferOnGputhread,
591 base::Unretained(this),
592 id);
594 QueueTask(task);
597 void InProcessCommandBuffer::DestroyTransferBufferOnGputhread(int32 id) {
598 base::AutoLock lock(command_buffer_lock_);
599 command_buffer_->DestroyTransferBuffer(id);
602 gpu::Capabilities InProcessCommandBuffer::GetCapabilities() {
603 return capabilities_;
606 gfx::GpuMemoryBuffer* InProcessCommandBuffer::CreateGpuMemoryBuffer(
607 size_t width,
608 size_t height,
609 unsigned internalformat,
610 unsigned usage,
611 int32* id) {
612 CheckSequencedThread();
614 *id = -1;
615 linked_ptr<gfx::GpuMemoryBuffer> buffer =
616 make_linked_ptr(g_gpu_memory_buffer_factory->CreateGpuMemoryBuffer(
617 width, height, internalformat, usage));
618 if (!buffer.get())
619 return NULL;
621 static int32 next_id = 1;
622 *id = next_id++;
624 base::Closure task = base::Bind(&GpuControlService::RegisterGpuMemoryBuffer,
625 base::Unretained(gpu_control_.get()),
626 *id,
627 buffer->GetHandle(),
628 width,
629 height,
630 internalformat);
632 QueueTask(task);
634 gpu_memory_buffers_[*id] = buffer;
635 return buffer.get();
638 void InProcessCommandBuffer::DestroyGpuMemoryBuffer(int32 id) {
639 CheckSequencedThread();
640 GpuMemoryBufferMap::iterator it = gpu_memory_buffers_.find(id);
641 if (it != gpu_memory_buffers_.end())
642 gpu_memory_buffers_.erase(it);
643 base::Closure task = base::Bind(&GpuControlService::UnregisterGpuMemoryBuffer,
644 base::Unretained(gpu_control_.get()),
645 id);
647 QueueTask(task);
650 uint32 InProcessCommandBuffer::InsertSyncPoint() {
651 uint32 sync_point = g_sync_point_manager.Get().GenerateSyncPoint();
652 QueueTask(base::Bind(&InProcessCommandBuffer::RetireSyncPointOnGpuThread,
653 base::Unretained(this),
654 sync_point));
655 return sync_point;
658 void InProcessCommandBuffer::RetireSyncPointOnGpuThread(uint32 sync_point) {
659 gles2::MailboxManager* mailbox_manager =
660 decoder_->GetContextGroup()->mailbox_manager();
661 if (mailbox_manager->UsesSync() && MakeCurrent())
662 mailbox_manager->PushTextureUpdates();
663 g_sync_point_manager.Get().RetireSyncPoint(sync_point);
666 void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point,
667 const base::Closure& callback) {
668 CheckSequencedThread();
669 QueueTask(base::Bind(&InProcessCommandBuffer::SignalSyncPointOnGpuThread,
670 base::Unretained(this),
671 sync_point,
672 WrapCallback(callback)));
675 void InProcessCommandBuffer::SignalSyncPointOnGpuThread(
676 unsigned sync_point,
677 const base::Closure& callback) {
678 if (g_sync_point_manager.Get().IsSyncPointPassed(sync_point)) {
679 callback.Run();
680 } else {
681 service_->ScheduleIdleWork(
682 base::Bind(&InProcessCommandBuffer::SignalSyncPointOnGpuThread,
683 gpu_thread_weak_ptr_,
684 sync_point,
685 callback));
689 void InProcessCommandBuffer::SignalQuery(unsigned query,
690 const base::Closure& callback) {
691 CheckSequencedThread();
692 QueueTask(base::Bind(&GpuControlService::SignalQuery,
693 base::Unretained(gpu_control_.get()),
694 query,
695 WrapCallback(callback)));
698 void InProcessCommandBuffer::SetSurfaceVisible(bool visible) {}
700 void InProcessCommandBuffer::Echo(const base::Closure& callback) {
701 QueueTask(WrapCallback(callback));
704 uint32 InProcessCommandBuffer::CreateStreamTexture(uint32 texture_id) {
705 base::WaitableEvent completion(true, false);
706 uint32 stream_id = 0;
707 base::Callback<uint32(void)> task =
708 base::Bind(&InProcessCommandBuffer::CreateStreamTextureOnGpuThread,
709 base::Unretained(this),
710 texture_id);
711 QueueTask(
712 base::Bind(&RunTaskWithResult<uint32>, task, &stream_id, &completion));
713 completion.Wait();
714 return stream_id;
717 uint32 InProcessCommandBuffer::CreateStreamTextureOnGpuThread(
718 uint32 client_texture_id) {
719 #if defined(OS_ANDROID)
720 return stream_texture_manager_->CreateStreamTexture(
721 client_texture_id, decoder_->GetContextGroup()->texture_manager());
722 #else
723 return 0;
724 #endif
727 gpu::error::Error InProcessCommandBuffer::GetLastError() {
728 CheckSequencedThread();
729 return last_state_.error;
732 bool InProcessCommandBuffer::Initialize() {
733 NOTREACHED();
734 return false;
737 namespace {
739 void PostCallback(const scoped_refptr<base::MessageLoopProxy>& loop,
740 const base::Closure& callback) {
741 if (!loop->BelongsToCurrentThread()) {
742 loop->PostTask(FROM_HERE, callback);
743 } else {
744 callback.Run();
748 void RunOnTargetThread(scoped_ptr<base::Closure> callback) {
749 DCHECK(callback.get());
750 callback->Run();
753 } // anonymous namespace
755 base::Closure InProcessCommandBuffer::WrapCallback(
756 const base::Closure& callback) {
757 // Make sure the callback gets deleted on the target thread by passing
758 // ownership.
759 scoped_ptr<base::Closure> scoped_callback(new base::Closure(callback));
760 base::Closure callback_on_client_thread =
761 base::Bind(&RunOnTargetThread, base::Passed(&scoped_callback));
762 base::Closure wrapped_callback =
763 base::Bind(&PostCallback, base::MessageLoopProxy::current(),
764 callback_on_client_thread);
765 return wrapped_callback;
768 #if defined(OS_ANDROID)
769 scoped_refptr<gfx::SurfaceTexture>
770 InProcessCommandBuffer::GetSurfaceTexture(uint32 stream_id) {
771 DCHECK(stream_texture_manager_);
772 return stream_texture_manager_->GetSurfaceTexture(stream_id);
774 #endif
776 // static
777 void InProcessCommandBuffer::SetGpuMemoryBufferFactory(
778 GpuMemoryBufferFactory* factory) {
779 g_gpu_memory_buffer_factory = factory;
782 } // namespace gpu