Pin Chrome's shortcut to the Win10 Start menu on install and OS upgrade.
[chromium-blink-merge.git] / content / common / gpu / gpu_command_buffer_stub.cc
blobc321416ee8f97a02c46ae5494537d0f573f21fb9
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/bind.h"
6 #include "base/bind_helpers.h"
7 #include "base/command_line.h"
8 #include "base/hash.h"
9 #include "base/json/json_writer.h"
10 #include "base/memory/shared_memory.h"
11 #include "base/time/time.h"
12 #include "base/trace_event/trace_event.h"
13 #include "build/build_config.h"
14 #include "content/common/gpu/gpu_channel.h"
15 #include "content/common/gpu/gpu_channel_manager.h"
16 #include "content/common/gpu/gpu_command_buffer_stub.h"
17 #include "content/common/gpu/gpu_memory_manager.h"
18 #include "content/common/gpu/gpu_memory_tracking.h"
19 #include "content/common/gpu/gpu_messages.h"
20 #include "content/common/gpu/gpu_watchdog.h"
21 #include "content/common/gpu/image_transport_surface.h"
22 #include "content/common/gpu/media/gpu_video_decode_accelerator.h"
23 #include "content/common/gpu/media/gpu_video_encode_accelerator.h"
24 #include "content/public/common/content_client.h"
25 #include "content/public/common/content_switches.h"
26 #include "gpu/command_buffer/common/constants.h"
27 #include "gpu/command_buffer/common/gles2_cmd_utils.h"
28 #include "gpu/command_buffer/common/mailbox.h"
29 #include "gpu/command_buffer/service/gl_context_virtual.h"
30 #include "gpu/command_buffer/service/gl_state_restorer_impl.h"
31 #include "gpu/command_buffer/service/image_factory.h"
32 #include "gpu/command_buffer/service/image_manager.h"
33 #include "gpu/command_buffer/service/logger.h"
34 #include "gpu/command_buffer/service/mailbox_manager.h"
35 #include "gpu/command_buffer/service/memory_tracking.h"
36 #include "gpu/command_buffer/service/query_manager.h"
37 #include "gpu/command_buffer/service/sync_point_manager.h"
38 #include "gpu/command_buffer/service/valuebuffer_manager.h"
39 #include "ui/gl/gl_bindings.h"
40 #include "ui/gl/gl_switches.h"
42 #if defined(OS_WIN)
43 #include "base/win/win_util.h"
44 #include "content/public/common/sandbox_init.h"
45 #endif
47 #if defined(OS_ANDROID)
48 #include "content/common/gpu/stream_texture_android.h"
49 #endif
51 namespace content {
52 struct WaitForCommandState {
53 WaitForCommandState(int32 start, int32 end, IPC::Message* reply)
54 : start(start), end(end), reply(reply) {}
56 int32 start;
57 int32 end;
58 scoped_ptr<IPC::Message> reply;
61 namespace {
63 // The GpuCommandBufferMemoryTracker class provides a bridge between the
64 // ContextGroup's memory type managers and the GpuMemoryManager class.
65 class GpuCommandBufferMemoryTracker : public gpu::gles2::MemoryTracker {
66 public:
67 explicit GpuCommandBufferMemoryTracker(GpuChannel* channel) :
68 tracking_group_(channel->gpu_channel_manager()->gpu_memory_manager()->
69 CreateTrackingGroup(channel->renderer_pid(), this)) {
72 void TrackMemoryAllocatedChange(
73 size_t old_size,
74 size_t new_size,
75 gpu::gles2::MemoryTracker::Pool pool) override {
76 tracking_group_->TrackMemoryAllocatedChange(
77 old_size, new_size, pool);
80 bool EnsureGPUMemoryAvailable(size_t size_needed) override {
81 return tracking_group_->EnsureGPUMemoryAvailable(size_needed);
84 private:
85 ~GpuCommandBufferMemoryTracker() override {}
86 scoped_ptr<GpuMemoryTrackingGroup> tracking_group_;
88 DISALLOW_COPY_AND_ASSIGN(GpuCommandBufferMemoryTracker);
91 // FastSetActiveURL will shortcut the expensive call to SetActiveURL when the
92 // url_hash matches.
93 void FastSetActiveURL(const GURL& url, size_t url_hash) {
94 // Leave the previously set URL in the empty case -- empty URLs are given by
95 // BlinkPlatformImpl::createOffscreenGraphicsContext3D. Hopefully the
96 // onscreen context URL was set previously and will show up even when a crash
97 // occurs during offscreen command processing.
98 if (url.is_empty())
99 return;
100 static size_t g_last_url_hash = 0;
101 if (url_hash != g_last_url_hash) {
102 g_last_url_hash = url_hash;
103 GetContentClient()->SetActiveURL(url);
107 // The first time polling a fence, delay some extra time to allow other
108 // stubs to process some work, or else the timing of the fences could
109 // allow a pattern of alternating fast and slow frames to occur.
110 const int64 kHandleMoreWorkPeriodMs = 2;
111 const int64 kHandleMoreWorkPeriodBusyMs = 1;
113 // Prevents idle work from being starved.
114 const int64 kMaxTimeSinceIdleMs = 10;
116 class DevToolsChannelData : public base::trace_event::ConvertableToTraceFormat {
117 public:
118 static scoped_refptr<base::trace_event::ConvertableToTraceFormat>
119 CreateForChannel(GpuChannel* channel);
121 void AppendAsTraceFormat(std::string* out) const override {
122 std::string tmp;
123 base::JSONWriter::Write(*value_, &tmp);
124 *out += tmp;
127 private:
128 explicit DevToolsChannelData(base::Value* value) : value_(value) {}
129 ~DevToolsChannelData() override {}
130 scoped_ptr<base::Value> value_;
131 DISALLOW_COPY_AND_ASSIGN(DevToolsChannelData);
134 scoped_refptr<base::trace_event::ConvertableToTraceFormat>
135 DevToolsChannelData::CreateForChannel(GpuChannel* channel) {
136 scoped_ptr<base::DictionaryValue> res(new base::DictionaryValue);
137 res->SetInteger("renderer_pid", channel->renderer_pid());
138 res->SetDouble("used_bytes", channel->GetMemoryUsage());
139 res->SetDouble("limit_bytes",
140 channel->gpu_channel_manager()
141 ->gpu_memory_manager()
142 ->GetMaximumClientAllocation());
143 return new DevToolsChannelData(res.release());
146 void RunOnThread(scoped_refptr<base::SingleThreadTaskRunner> task_runner,
147 const base::Closure& callback) {
148 if (task_runner->BelongsToCurrentThread()) {
149 callback.Run();
150 } else {
151 task_runner->PostTask(FROM_HERE, callback);
155 } // namespace
157 GpuCommandBufferStub::GpuCommandBufferStub(
158 GpuChannel* channel,
159 GpuCommandBufferStub* share_group,
160 const gfx::GLSurfaceHandle& handle,
161 gpu::gles2::MailboxManager* mailbox_manager,
162 gpu::gles2::SubscriptionRefSet* subscription_ref_set,
163 gpu::ValueStateMap* pending_valuebuffer_state,
164 const gfx::Size& size,
165 const gpu::gles2::DisallowedFeatures& disallowed_features,
166 const std::vector<int32>& attribs,
167 gfx::GpuPreference gpu_preference,
168 bool use_virtualized_gl_context,
169 int32 route_id,
170 int32 surface_id,
171 GpuWatchdog* watchdog,
172 bool software,
173 const GURL& active_url)
174 : channel_(channel),
175 handle_(handle),
176 initial_size_(size),
177 disallowed_features_(disallowed_features),
178 requested_attribs_(attribs),
179 gpu_preference_(gpu_preference),
180 use_virtualized_gl_context_(use_virtualized_gl_context),
181 route_id_(route_id),
182 surface_id_(surface_id),
183 software_(software),
184 last_flush_count_(0),
185 last_memory_allocation_valid_(false),
186 watchdog_(watchdog),
187 sync_point_wait_count_(0),
188 delayed_work_scheduled_(false),
189 previous_messages_processed_(0),
190 active_url_(active_url),
191 total_gpu_memory_(0) {
192 active_url_hash_ = base::Hash(active_url.possibly_invalid_spec());
193 FastSetActiveURL(active_url_, active_url_hash_);
195 gpu::gles2::ContextCreationAttribHelper attrib_parser;
196 attrib_parser.Parse(requested_attribs_);
198 if (share_group) {
199 context_group_ = share_group->context_group_;
200 DCHECK(context_group_->bind_generates_resource() ==
201 attrib_parser.bind_generates_resource);
202 } else {
203 context_group_ = new gpu::gles2::ContextGroup(
204 mailbox_manager,
205 new GpuCommandBufferMemoryTracker(channel),
206 channel_->gpu_channel_manager()->shader_translator_cache(),
207 NULL,
208 subscription_ref_set,
209 pending_valuebuffer_state,
210 attrib_parser.bind_generates_resource);
213 use_virtualized_gl_context_ |=
214 context_group_->feature_info()->workarounds().use_virtualized_gl_contexts;
216 bool is_offscreen = surface_id_ == 0;
217 if (is_offscreen && initial_size_.IsEmpty()) {
218 // If we're an offscreen surface with zero width and/or height, set to a
219 // non-zero size so that we have a complete framebuffer for operations like
220 // glClear.
221 initial_size_ = gfx::Size(1, 1);
225 GpuCommandBufferStub::~GpuCommandBufferStub() {
226 Destroy();
228 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
229 gpu_channel_manager->Send(new GpuHostMsg_DestroyCommandBuffer(surface_id()));
232 GpuMemoryManager* GpuCommandBufferStub::GetMemoryManager() const {
233 return channel()->gpu_channel_manager()->gpu_memory_manager();
236 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
237 TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"),
238 "GPUTask",
239 "data",
240 DevToolsChannelData::CreateForChannel(channel()));
241 FastSetActiveURL(active_url_, active_url_hash_);
243 bool have_context = false;
244 // Ensure the appropriate GL context is current before handling any IPC
245 // messages directed at the command buffer. This ensures that the message
246 // handler can assume that the context is current (not necessary for
247 // RetireSyncPoint or WaitSyncPoint).
248 if (decoder_.get() &&
249 message.type() != GpuCommandBufferMsg_SetGetBuffer::ID &&
250 message.type() != GpuCommandBufferMsg_WaitForTokenInRange::ID &&
251 message.type() != GpuCommandBufferMsg_WaitForGetOffsetInRange::ID &&
252 message.type() != GpuCommandBufferMsg_RegisterTransferBuffer::ID &&
253 message.type() != GpuCommandBufferMsg_DestroyTransferBuffer::ID &&
254 message.type() != GpuCommandBufferMsg_RetireSyncPoint::ID &&
255 message.type() != GpuCommandBufferMsg_SignalSyncPoint::ID &&
256 message.type() !=
257 GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback::ID) {
258 if (!MakeCurrent())
259 return false;
260 have_context = true;
263 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers
264 // here. This is so the reply can be delayed if the scheduler is unscheduled.
265 bool handled = true;
266 IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message)
267 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize,
268 OnInitialize);
269 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetGetBuffer,
270 OnSetGetBuffer);
271 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ProduceFrontBuffer,
272 OnProduceFrontBuffer);
273 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForTokenInRange,
274 OnWaitForTokenInRange);
275 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForGetOffsetInRange,
276 OnWaitForGetOffsetInRange);
277 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush);
278 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Rescheduled, OnRescheduled);
279 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer,
280 OnRegisterTransferBuffer);
281 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer,
282 OnDestroyTransferBuffer);
283 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoDecoder,
284 OnCreateVideoDecoder)
285 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoEncoder,
286 OnCreateVideoEncoder)
287 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetSurfaceVisible,
288 OnSetSurfaceVisible)
289 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RetireSyncPoint,
290 OnRetireSyncPoint)
291 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPoint,
292 OnSignalSyncPoint)
293 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalQuery,
294 OnSignalQuery)
295 IPC_MESSAGE_HANDLER(
296 GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback,
297 OnSetClientHasMemoryAllocationChangedCallback)
298 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateImage, OnCreateImage);
299 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyImage, OnDestroyImage);
300 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateStreamTexture,
301 OnCreateStreamTexture)
302 IPC_MESSAGE_UNHANDLED(handled = false)
303 IPC_END_MESSAGE_MAP()
305 CheckCompleteWaits();
307 if (have_context) {
308 // Ensure that any delayed work that was created will be handled.
309 ScheduleDelayedWork(kHandleMoreWorkPeriodMs);
312 DCHECK(handled);
313 return handled;
316 bool GpuCommandBufferStub::Send(IPC::Message* message) {
317 return channel_->Send(message);
320 bool GpuCommandBufferStub::IsScheduled() {
321 return (!scheduler_.get() || scheduler_->IsScheduled());
324 bool GpuCommandBufferStub::HasMoreWork() {
325 return scheduler_.get() && scheduler_->HasMoreWork();
328 void GpuCommandBufferStub::PollWork() {
329 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork");
330 delayed_work_scheduled_ = false;
331 FastSetActiveURL(active_url_, active_url_hash_);
332 if (decoder_.get() && !MakeCurrent())
333 return;
335 if (scheduler_) {
336 uint64 current_messages_processed =
337 channel()->gpu_channel_manager()->MessagesProcessed();
338 // We're idle when no messages were processed or scheduled.
339 bool is_idle =
340 (previous_messages_processed_ == current_messages_processed) &&
341 !channel()->gpu_channel_manager()->HandleMessagesScheduled();
342 if (!is_idle && !last_idle_time_.is_null()) {
343 base::TimeDelta time_since_idle =
344 base::TimeTicks::Now() - last_idle_time_;
345 base::TimeDelta max_time_since_idle =
346 base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs);
348 // Force idle when it's been too long since last time we were idle.
349 if (time_since_idle > max_time_since_idle)
350 is_idle = true;
353 if (is_idle) {
354 last_idle_time_ = base::TimeTicks::Now();
355 scheduler_->PerformIdleWork();
358 ScheduleDelayedWork(kHandleMoreWorkPeriodBusyMs);
361 bool GpuCommandBufferStub::HasUnprocessedCommands() {
362 if (command_buffer_) {
363 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
364 return command_buffer_->GetPutOffset() != state.get_offset &&
365 !gpu::error::IsError(state.error);
367 return false;
370 void GpuCommandBufferStub::ScheduleDelayedWork(int64 delay) {
371 if (!HasMoreWork()) {
372 last_idle_time_ = base::TimeTicks();
373 return;
376 if (delayed_work_scheduled_)
377 return;
378 delayed_work_scheduled_ = true;
380 // Idle when no messages are processed between now and when
381 // PollWork is called.
382 previous_messages_processed_ =
383 channel()->gpu_channel_manager()->MessagesProcessed();
384 if (last_idle_time_.is_null())
385 last_idle_time_ = base::TimeTicks::Now();
387 // IsScheduled() returns true after passing all unschedule fences
388 // and this is when we can start performing idle work. Idle work
389 // is done synchronously so we can set delay to 0 and instead poll
390 // for more work at the rate idle work is performed. This also ensures
391 // that idle work is done as efficiently as possible without any
392 // unnecessary delays.
393 if (scheduler_.get() &&
394 scheduler_->IsScheduled() &&
395 scheduler_->HasMoreIdleWork()) {
396 delay = 0;
399 base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
400 FROM_HERE, base::Bind(&GpuCommandBufferStub::PollWork, AsWeakPtr()),
401 base::TimeDelta::FromMilliseconds(delay));
404 bool GpuCommandBufferStub::MakeCurrent() {
405 if (decoder_->MakeCurrent())
406 return true;
407 DLOG(ERROR) << "Context lost because MakeCurrent failed.";
408 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
409 command_buffer_->SetParseError(gpu::error::kLostContext);
410 CheckContextLost();
411 return false;
414 void GpuCommandBufferStub::Destroy() {
415 if (wait_for_token_) {
416 Send(wait_for_token_->reply.release());
417 wait_for_token_.reset();
419 if (wait_for_get_offset_) {
420 Send(wait_for_get_offset_->reply.release());
421 wait_for_get_offset_.reset();
423 if (handle_.is_null() && !active_url_.is_empty()) {
424 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
425 gpu_channel_manager->Send(new GpuHostMsg_DidDestroyOffscreenContext(
426 active_url_));
429 memory_manager_client_state_.reset();
431 while (!sync_points_.empty())
432 OnRetireSyncPoint(sync_points_.front());
434 if (decoder_)
435 decoder_->set_engine(NULL);
437 // The scheduler has raw references to the decoder and the command buffer so
438 // destroy it before those.
439 scheduler_.reset();
441 bool have_context = false;
442 if (decoder_ && decoder_->GetGLContext()) {
443 // Try to make the context current regardless of whether it was lost, so we
444 // don't leak resources.
445 have_context = decoder_->GetGLContext()->MakeCurrent(surface_.get());
447 FOR_EACH_OBSERVER(DestructionObserver,
448 destruction_observers_,
449 OnWillDestroyStub());
451 if (decoder_) {
452 decoder_->Destroy(have_context);
453 decoder_.reset();
456 command_buffer_.reset();
458 // Remove this after crbug.com/248395 is sorted out.
459 surface_ = NULL;
462 void GpuCommandBufferStub::OnInitializeFailed(IPC::Message* reply_message) {
463 Destroy();
464 GpuCommandBufferMsg_Initialize::WriteReplyParams(
465 reply_message, false, gpu::Capabilities());
466 Send(reply_message);
469 void GpuCommandBufferStub::OnInitialize(
470 base::SharedMemoryHandle shared_state_handle,
471 IPC::Message* reply_message) {
472 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnInitialize");
473 DCHECK(!command_buffer_.get());
475 scoped_ptr<base::SharedMemory> shared_state_shm(
476 new base::SharedMemory(shared_state_handle, false));
478 command_buffer_.reset(new gpu::CommandBufferService(
479 context_group_->transfer_buffer_manager()));
481 bool result = command_buffer_->Initialize();
482 DCHECK(result);
484 decoder_.reset(::gpu::gles2::GLES2Decoder::Create(context_group_.get()));
485 scheduler_.reset(new gpu::GpuScheduler(command_buffer_.get(),
486 decoder_.get(),
487 decoder_.get()));
488 if (preemption_flag_.get())
489 scheduler_->SetPreemptByFlag(preemption_flag_);
491 decoder_->set_engine(scheduler_.get());
493 if (!handle_.is_null()) {
494 #if defined(OS_MACOSX) || defined(UI_COMPOSITOR_IMAGE_TRANSPORT)
495 if (software_) {
496 LOG(ERROR) << "No software support.";
497 OnInitializeFailed(reply_message);
498 return;
500 #endif
502 surface_ = ImageTransportSurface::CreateSurface(
503 channel_->gpu_channel_manager(),
504 this,
505 handle_);
506 } else {
507 GpuChannelManager* manager = channel_->gpu_channel_manager();
508 surface_ = manager->GetDefaultOffscreenSurface();
511 if (!surface_.get()) {
512 DLOG(ERROR) << "Failed to create surface.";
513 OnInitializeFailed(reply_message);
514 return;
517 scoped_refptr<gfx::GLContext> context;
518 if (use_virtualized_gl_context_ && channel_->share_group()) {
519 context = channel_->share_group()->GetSharedContext();
520 if (!context.get()) {
521 context = gfx::GLContext::CreateGLContext(
522 channel_->share_group(),
523 channel_->gpu_channel_manager()->GetDefaultOffscreenSurface(),
524 gpu_preference_);
525 if (!context.get()) {
526 DLOG(ERROR) << "Failed to create shared context for virtualization.";
527 OnInitializeFailed(reply_message);
528 return;
530 channel_->share_group()->SetSharedContext(context.get());
532 // This should be a non-virtual GL context.
533 DCHECK(context->GetHandle());
534 context = new gpu::GLContextVirtual(
535 channel_->share_group(), context.get(), decoder_->AsWeakPtr());
536 if (!context->Initialize(surface_.get(), gpu_preference_)) {
537 // TODO(sievers): The real context created above for the default
538 // offscreen surface might not be compatible with this surface.
539 // Need to adjust at least GLX to be able to create the initial context
540 // with a config that is compatible with onscreen and offscreen surfaces.
541 context = NULL;
543 DLOG(ERROR) << "Failed to initialize virtual GL context.";
544 OnInitializeFailed(reply_message);
545 return;
548 if (!context.get()) {
549 context = gfx::GLContext::CreateGLContext(
550 channel_->share_group(), surface_.get(), gpu_preference_);
552 if (!context.get()) {
553 DLOG(ERROR) << "Failed to create context.";
554 OnInitializeFailed(reply_message);
555 return;
558 if (!context->MakeCurrent(surface_.get())) {
559 LOG(ERROR) << "Failed to make context current.";
560 OnInitializeFailed(reply_message);
561 return;
564 if (!context->GetGLStateRestorer()) {
565 context->SetGLStateRestorer(
566 new gpu::GLStateRestorerImpl(decoder_->AsWeakPtr()));
569 if (!context->GetTotalGpuMemory(&total_gpu_memory_))
570 total_gpu_memory_ = 0;
572 if (!context_group_->has_program_cache() &&
573 !context_group_->feature_info()->workarounds().disable_program_cache) {
574 context_group_->set_program_cache(
575 channel_->gpu_channel_manager()->program_cache());
578 // Initialize the decoder with either the view or pbuffer GLContext.
579 if (!decoder_->Initialize(surface_,
580 context,
581 !surface_id(),
582 initial_size_,
583 disallowed_features_,
584 requested_attribs_)) {
585 DLOG(ERROR) << "Failed to initialize decoder.";
586 OnInitializeFailed(reply_message);
587 return;
590 if (base::CommandLine::ForCurrentProcess()->HasSwitch(
591 switches::kEnableGPUServiceLogging)) {
592 decoder_->set_log_commands(true);
595 decoder_->GetLogger()->SetMsgCallback(
596 base::Bind(&GpuCommandBufferStub::SendConsoleMessage,
597 base::Unretained(this)));
598 decoder_->SetShaderCacheCallback(
599 base::Bind(&GpuCommandBufferStub::SendCachedShader,
600 base::Unretained(this)));
601 decoder_->SetWaitSyncPointCallback(
602 base::Bind(&GpuCommandBufferStub::OnWaitSyncPoint,
603 base::Unretained(this)));
605 command_buffer_->SetPutOffsetChangeCallback(
606 base::Bind(&GpuCommandBufferStub::PutChanged, base::Unretained(this)));
607 command_buffer_->SetGetBufferChangeCallback(
608 base::Bind(&gpu::GpuScheduler::SetGetBuffer,
609 base::Unretained(scheduler_.get())));
610 command_buffer_->SetParseErrorCallback(
611 base::Bind(&GpuCommandBufferStub::OnParseError, base::Unretained(this)));
612 scheduler_->SetSchedulingChangedCallback(
613 base::Bind(&GpuChannel::StubSchedulingChanged,
614 base::Unretained(channel_)));
616 if (watchdog_) {
617 scheduler_->SetCommandProcessedCallback(
618 base::Bind(&GpuCommandBufferStub::OnCommandProcessed,
619 base::Unretained(this)));
622 const size_t kSharedStateSize = sizeof(gpu::CommandBufferSharedState);
623 if (!shared_state_shm->Map(kSharedStateSize)) {
624 DLOG(ERROR) << "Failed to map shared state buffer.";
625 OnInitializeFailed(reply_message);
626 return;
628 command_buffer_->SetSharedStateBuffer(gpu::MakeBackingFromSharedMemory(
629 shared_state_shm.Pass(), kSharedStateSize));
631 gpu::Capabilities capabilities = decoder_->GetCapabilities();
632 capabilities.future_sync_points = channel_->allow_future_sync_points();
634 GpuCommandBufferMsg_Initialize::WriteReplyParams(
635 reply_message, true, capabilities);
636 Send(reply_message);
638 if (handle_.is_null() && !active_url_.is_empty()) {
639 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
640 gpu_channel_manager->Send(new GpuHostMsg_DidCreateOffscreenContext(
641 active_url_));
645 void GpuCommandBufferStub::OnCreateStreamTexture(
646 uint32 texture_id, int32 stream_id, bool* succeeded) {
647 #if defined(OS_ANDROID)
648 *succeeded = StreamTexture::Create(this, texture_id, stream_id);
649 #else
650 *succeeded = false;
651 #endif
654 void GpuCommandBufferStub::SetLatencyInfoCallback(
655 const LatencyInfoCallback& callback) {
656 latency_info_callback_ = callback;
659 int32 GpuCommandBufferStub::GetRequestedAttribute(int attr) const {
660 // The command buffer is pairs of enum, value
661 // search for the requested attribute, return the value.
662 for (std::vector<int32>::const_iterator it = requested_attribs_.begin();
663 it != requested_attribs_.end(); ++it) {
664 if (*it++ == attr) {
665 return *it;
668 return -1;
671 void GpuCommandBufferStub::OnSetGetBuffer(int32 shm_id,
672 IPC::Message* reply_message) {
673 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetGetBuffer");
674 if (command_buffer_)
675 command_buffer_->SetGetBuffer(shm_id);
676 Send(reply_message);
679 void GpuCommandBufferStub::OnProduceFrontBuffer(const gpu::Mailbox& mailbox) {
680 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnProduceFrontBuffer");
681 if (!decoder_) {
682 LOG(ERROR) << "Can't produce front buffer before initialization.";
683 return;
686 decoder_->ProduceFrontBuffer(mailbox);
689 void GpuCommandBufferStub::OnParseError() {
690 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnParseError");
691 DCHECK(command_buffer_.get());
692 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
693 IPC::Message* msg = new GpuCommandBufferMsg_Destroyed(
694 route_id_, state.context_lost_reason, state.error);
695 msg->set_unblock(true);
696 Send(msg);
698 // Tell the browser about this context loss as well, so it can
699 // determine whether client APIs like WebGL need to be immediately
700 // blocked from automatically running.
701 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
702 gpu_channel_manager->Send(new GpuHostMsg_DidLoseContext(
703 handle_.is_null(), state.context_lost_reason, active_url_));
705 CheckContextLost();
708 void GpuCommandBufferStub::OnWaitForTokenInRange(int32 start,
709 int32 end,
710 IPC::Message* reply_message) {
711 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForTokenInRange");
712 DCHECK(command_buffer_.get());
713 CheckContextLost();
714 if (wait_for_token_)
715 LOG(ERROR) << "Got WaitForToken command while currently waiting for token.";
716 wait_for_token_ =
717 make_scoped_ptr(new WaitForCommandState(start, end, reply_message));
718 CheckCompleteWaits();
721 void GpuCommandBufferStub::OnWaitForGetOffsetInRange(
722 int32 start,
723 int32 end,
724 IPC::Message* reply_message) {
725 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForGetOffsetInRange");
726 DCHECK(command_buffer_.get());
727 CheckContextLost();
728 if (wait_for_get_offset_) {
729 LOG(ERROR)
730 << "Got WaitForGetOffset command while currently waiting for offset.";
732 wait_for_get_offset_ =
733 make_scoped_ptr(new WaitForCommandState(start, end, reply_message));
734 CheckCompleteWaits();
737 void GpuCommandBufferStub::CheckCompleteWaits() {
738 if (wait_for_token_ || wait_for_get_offset_) {
739 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
740 if (wait_for_token_ &&
741 (gpu::CommandBuffer::InRange(
742 wait_for_token_->start, wait_for_token_->end, state.token) ||
743 state.error != gpu::error::kNoError)) {
744 ReportState();
745 GpuCommandBufferMsg_WaitForTokenInRange::WriteReplyParams(
746 wait_for_token_->reply.get(), state);
747 Send(wait_for_token_->reply.release());
748 wait_for_token_.reset();
750 if (wait_for_get_offset_ &&
751 (gpu::CommandBuffer::InRange(wait_for_get_offset_->start,
752 wait_for_get_offset_->end,
753 state.get_offset) ||
754 state.error != gpu::error::kNoError)) {
755 ReportState();
756 GpuCommandBufferMsg_WaitForGetOffsetInRange::WriteReplyParams(
757 wait_for_get_offset_->reply.get(), state);
758 Send(wait_for_get_offset_->reply.release());
759 wait_for_get_offset_.reset();
764 void GpuCommandBufferStub::OnAsyncFlush(
765 int32 put_offset,
766 uint32 flush_count,
767 const std::vector<ui::LatencyInfo>& latency_info) {
768 TRACE_EVENT1(
769 "gpu", "GpuCommandBufferStub::OnAsyncFlush", "put_offset", put_offset);
771 if (ui::LatencyInfo::Verify(latency_info,
772 "GpuCommandBufferStub::OnAsyncFlush") &&
773 !latency_info_callback_.is_null()) {
774 latency_info_callback_.Run(latency_info);
776 DCHECK(command_buffer_.get());
777 if (flush_count - last_flush_count_ < 0x8000000U) {
778 last_flush_count_ = flush_count;
779 command_buffer_->Flush(put_offset);
780 } else {
781 // We received this message out-of-order. This should not happen but is here
782 // to catch regressions. Ignore the message.
783 NOTREACHED() << "Received a Flush message out-of-order";
786 ReportState();
789 void GpuCommandBufferStub::OnRescheduled() {
790 gpu::CommandBuffer::State pre_state = command_buffer_->GetLastState();
791 command_buffer_->Flush(command_buffer_->GetPutOffset());
792 gpu::CommandBuffer::State post_state = command_buffer_->GetLastState();
794 if (pre_state.get_offset != post_state.get_offset)
795 ReportState();
798 void GpuCommandBufferStub::OnRegisterTransferBuffer(
799 int32 id,
800 base::SharedMemoryHandle transfer_buffer,
801 uint32 size) {
802 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterTransferBuffer");
804 // Take ownership of the memory and map it into this process.
805 // This validates the size.
806 scoped_ptr<base::SharedMemory> shared_memory(
807 new base::SharedMemory(transfer_buffer, false));
808 if (!shared_memory->Map(size)) {
809 DVLOG(0) << "Failed to map shared memory.";
810 return;
813 if (command_buffer_) {
814 command_buffer_->RegisterTransferBuffer(
815 id, gpu::MakeBackingFromSharedMemory(shared_memory.Pass(), size));
819 void GpuCommandBufferStub::OnDestroyTransferBuffer(int32 id) {
820 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyTransferBuffer");
822 if (command_buffer_)
823 command_buffer_->DestroyTransferBuffer(id);
826 void GpuCommandBufferStub::OnCommandProcessed() {
827 if (watchdog_)
828 watchdog_->CheckArmed();
831 void GpuCommandBufferStub::ReportState() { command_buffer_->UpdateState(); }
833 void GpuCommandBufferStub::PutChanged() {
834 FastSetActiveURL(active_url_, active_url_hash_);
835 scheduler_->PutChanged();
838 void GpuCommandBufferStub::OnCreateVideoDecoder(
839 media::VideoCodecProfile profile,
840 int32 decoder_route_id,
841 IPC::Message* reply_message) {
842 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoDecoder");
843 GpuVideoDecodeAccelerator* decoder = new GpuVideoDecodeAccelerator(
844 decoder_route_id, this, channel_->io_task_runner());
845 decoder->Initialize(profile, reply_message);
846 // decoder is registered as a DestructionObserver of this stub and will
847 // self-delete during destruction of this stub.
850 void GpuCommandBufferStub::OnCreateVideoEncoder(
851 media::VideoPixelFormat input_format,
852 const gfx::Size& input_visible_size,
853 media::VideoCodecProfile output_profile,
854 uint32 initial_bitrate,
855 int32 encoder_route_id,
856 IPC::Message* reply_message) {
857 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoEncoder");
858 GpuVideoEncodeAccelerator* encoder =
859 new GpuVideoEncodeAccelerator(encoder_route_id, this);
860 encoder->Initialize(input_format,
861 input_visible_size,
862 output_profile,
863 initial_bitrate,
864 reply_message);
865 // encoder is registered as a DestructionObserver of this stub and will
866 // self-delete during destruction of this stub.
869 void GpuCommandBufferStub::OnSetSurfaceVisible(bool visible) {
870 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetSurfaceVisible");
871 if (memory_manager_client_state_)
872 memory_manager_client_state_->SetVisible(visible);
875 void GpuCommandBufferStub::AddSyncPoint(uint32 sync_point) {
876 sync_points_.push_back(sync_point);
879 void GpuCommandBufferStub::OnRetireSyncPoint(uint32 sync_point) {
880 DCHECK(!sync_points_.empty() && sync_points_.front() == sync_point);
881 sync_points_.pop_front();
883 gpu::gles2::MailboxManager* mailbox_manager =
884 context_group_->mailbox_manager();
885 if (mailbox_manager->UsesSync() && MakeCurrent())
886 mailbox_manager->PushTextureUpdates(sync_point);
888 GpuChannelManager* manager = channel_->gpu_channel_manager();
889 manager->sync_point_manager()->RetireSyncPoint(sync_point);
892 bool GpuCommandBufferStub::OnWaitSyncPoint(uint32 sync_point) {
893 if (!sync_point)
894 return true;
895 GpuChannelManager* manager = channel_->gpu_channel_manager();
896 if (manager->sync_point_manager()->IsSyncPointRetired(sync_point)) {
897 PullTextureUpdates(sync_point);
898 return true;
901 if (sync_point_wait_count_ == 0) {
902 TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitSyncPoint", this,
903 "GpuCommandBufferStub", this);
905 scheduler_->SetScheduled(false);
906 ++sync_point_wait_count_;
907 manager->sync_point_manager()->AddSyncPointCallback(
908 sync_point,
909 base::Bind(&RunOnThread, base::ThreadTaskRunnerHandle::Get(),
910 base::Bind(&GpuCommandBufferStub::OnWaitSyncPointCompleted,
911 this->AsWeakPtr(), sync_point)));
912 return scheduler_->IsScheduled();
915 void GpuCommandBufferStub::OnWaitSyncPointCompleted(uint32 sync_point) {
916 PullTextureUpdates(sync_point);
917 --sync_point_wait_count_;
918 if (sync_point_wait_count_ == 0) {
919 TRACE_EVENT_ASYNC_END1("gpu", "WaitSyncPoint", this,
920 "GpuCommandBufferStub", this);
922 scheduler_->SetScheduled(true);
925 void GpuCommandBufferStub::PullTextureUpdates(uint32 sync_point) {
926 gpu::gles2::MailboxManager* mailbox_manager =
927 context_group_->mailbox_manager();
928 if (mailbox_manager->UsesSync() && MakeCurrent())
929 mailbox_manager->PullTextureUpdates(sync_point);
932 void GpuCommandBufferStub::OnSignalSyncPoint(uint32 sync_point, uint32 id) {
933 GpuChannelManager* manager = channel_->gpu_channel_manager();
934 manager->sync_point_manager()->AddSyncPointCallback(
935 sync_point,
936 base::Bind(&RunOnThread, base::ThreadTaskRunnerHandle::Get(),
937 base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck,
938 this->AsWeakPtr(), id)));
941 void GpuCommandBufferStub::OnSignalSyncPointAck(uint32 id) {
942 Send(new GpuCommandBufferMsg_SignalSyncPointAck(route_id_, id));
945 void GpuCommandBufferStub::OnSignalQuery(uint32 query_id, uint32 id) {
946 if (decoder_) {
947 gpu::gles2::QueryManager* query_manager = decoder_->GetQueryManager();
948 if (query_manager) {
949 gpu::gles2::QueryManager::Query* query =
950 query_manager->GetQuery(query_id);
951 if (query) {
952 query->AddCallback(
953 base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck,
954 this->AsWeakPtr(),
955 id));
956 return;
960 // Something went wrong, run callback immediately.
961 OnSignalSyncPointAck(id);
965 void GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback(
966 bool has_callback) {
967 TRACE_EVENT0(
968 "gpu",
969 "GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback");
970 if (has_callback) {
971 if (!memory_manager_client_state_) {
972 memory_manager_client_state_.reset(GetMemoryManager()->CreateClientState(
973 this, surface_id_ != 0, true));
975 } else {
976 memory_manager_client_state_.reset();
980 void GpuCommandBufferStub::OnCreateImage(int32 id,
981 gfx::GpuMemoryBufferHandle handle,
982 gfx::Size size,
983 gfx::GpuMemoryBuffer::Format format,
984 uint32 internalformat) {
985 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateImage");
987 if (!decoder_)
988 return;
990 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
991 DCHECK(image_manager);
992 if (image_manager->LookupImage(id)) {
993 LOG(ERROR) << "Image already exists with same ID.";
994 return;
997 if (!gpu::ImageFactory::IsGpuMemoryBufferFormatSupported(
998 format, decoder_->GetCapabilities())) {
999 LOG(ERROR) << "Format is not supported.";
1000 return;
1003 if (!gpu::ImageFactory::IsImageSizeValidForGpuMemoryBufferFormat(size,
1004 format)) {
1005 LOG(ERROR) << "Invalid image size for format.";
1006 return;
1009 if (!gpu::ImageFactory::IsImageFormatCompatibleWithGpuMemoryBufferFormat(
1010 internalformat, format)) {
1011 LOG(ERROR) << "Incompatible image format.";
1012 return;
1015 scoped_refptr<gfx::GLImage> image = channel()->CreateImageForGpuMemoryBuffer(
1016 handle, size, format, internalformat);
1017 if (!image.get())
1018 return;
1020 image_manager->AddImage(image.get(), id);
1023 void GpuCommandBufferStub::OnDestroyImage(int32 id) {
1024 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyImage");
1026 if (!decoder_)
1027 return;
1029 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
1030 DCHECK(image_manager);
1031 if (!image_manager->LookupImage(id)) {
1032 LOG(ERROR) << "Image with ID doesn't exist.";
1033 return;
1036 image_manager->RemoveImage(id);
1039 void GpuCommandBufferStub::SendConsoleMessage(
1040 int32 id,
1041 const std::string& message) {
1042 GPUCommandBufferConsoleMessage console_message;
1043 console_message.id = id;
1044 console_message.message = message;
1045 IPC::Message* msg = new GpuCommandBufferMsg_ConsoleMsg(
1046 route_id_, console_message);
1047 msg->set_unblock(true);
1048 Send(msg);
1051 void GpuCommandBufferStub::SendCachedShader(
1052 const std::string& key, const std::string& shader) {
1053 channel_->CacheShader(key, shader);
1056 void GpuCommandBufferStub::AddDestructionObserver(
1057 DestructionObserver* observer) {
1058 destruction_observers_.AddObserver(observer);
1061 void GpuCommandBufferStub::RemoveDestructionObserver(
1062 DestructionObserver* observer) {
1063 destruction_observers_.RemoveObserver(observer);
1066 void GpuCommandBufferStub::SetPreemptByFlag(
1067 scoped_refptr<gpu::PreemptionFlag> flag) {
1068 preemption_flag_ = flag;
1069 if (scheduler_)
1070 scheduler_->SetPreemptByFlag(preemption_flag_);
1073 bool GpuCommandBufferStub::GetTotalGpuMemory(uint64* bytes) {
1074 *bytes = total_gpu_memory_;
1075 return !!total_gpu_memory_;
1078 gfx::Size GpuCommandBufferStub::GetSurfaceSize() const {
1079 if (!surface_.get())
1080 return gfx::Size();
1081 return surface_->GetSize();
1084 const gpu::gles2::FeatureInfo* GpuCommandBufferStub::GetFeatureInfo() const {
1085 return context_group_->feature_info();
1088 gpu::gles2::MemoryTracker* GpuCommandBufferStub::GetMemoryTracker() const {
1089 return context_group_->memory_tracker();
1092 void GpuCommandBufferStub::SetMemoryAllocation(
1093 const gpu::MemoryAllocation& allocation) {
1094 if (!last_memory_allocation_valid_ ||
1095 !allocation.Equals(last_memory_allocation_)) {
1096 Send(new GpuCommandBufferMsg_SetMemoryAllocation(
1097 route_id_, allocation));
1100 last_memory_allocation_valid_ = true;
1101 last_memory_allocation_ = allocation;
1104 void GpuCommandBufferStub::SuggestHaveFrontBuffer(
1105 bool suggest_have_frontbuffer) {
1106 // This can be called outside of OnMessageReceived, so the context needs
1107 // to be made current before calling methods on the surface.
1108 if (surface_.get() && MakeCurrent())
1109 surface_->SetFrontbufferAllocation(suggest_have_frontbuffer);
1112 bool GpuCommandBufferStub::CheckContextLost() {
1113 DCHECK(command_buffer_);
1114 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
1115 bool was_lost = state.error == gpu::error::kLostContext;
1117 // Work around issues with recovery by allowing a new GPU process to launch.
1118 if (was_lost &&
1119 context_group_->feature_info()->workarounds().exit_on_context_lost &&
1120 !base::CommandLine::ForCurrentProcess()->HasSwitch(
1121 switches::kSingleProcess) &&
1122 !base::CommandLine::ForCurrentProcess()->HasSwitch(
1123 switches::kInProcessGPU)) {
1124 LOG(ERROR) << "Exiting GPU process because some drivers cannot recover"
1125 << " from problems.";
1126 #if defined(OS_WIN)
1127 base::win::SetShouldCrashOnProcessDetach(false);
1128 #endif
1129 exit(0);
1131 // Lose all other contexts if the reset was triggered by the robustness
1132 // extension instead of being synthetic.
1133 if (was_lost && decoder_ && decoder_->WasContextLostByRobustnessExtension() &&
1134 (gfx::GLContext::LosesAllContextsOnContextLost() ||
1135 use_virtualized_gl_context_))
1136 channel_->LoseAllContexts();
1137 CheckCompleteWaits();
1138 return was_lost;
1141 void GpuCommandBufferStub::MarkContextLost() {
1142 if (!command_buffer_ ||
1143 command_buffer_->GetLastState().error == gpu::error::kLostContext)
1144 return;
1146 command_buffer_->SetContextLostReason(gpu::error::kUnknown);
1147 if (decoder_)
1148 decoder_->MarkContextLost(gpu::error::kUnknown);
1149 command_buffer_->SetParseError(gpu::error::kLostContext);
1152 void GpuCommandBufferStub::SendSwapBuffersCompleted(
1153 const std::vector<ui::LatencyInfo>& latency_info,
1154 gfx::SwapResult result) {
1155 Send(new GpuCommandBufferMsg_SwapBuffersCompleted(route_id_, latency_info,
1156 result));
1159 void GpuCommandBufferStub::SendUpdateVSyncParameters(base::TimeTicks timebase,
1160 base::TimeDelta interval) {
1161 Send(new GpuCommandBufferMsg_UpdateVSyncParameters(route_id_, timebase,
1162 interval));
1165 } // namespace content