Add ICU message format support
[chromium-blink-merge.git] / content / common / gpu / gpu_command_buffer_stub.cc
blobf1cbfd7ec99760af3f7c2b0d7afc0aa1abfc4be5
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/bind.h"
6 #include "base/bind_helpers.h"
7 #include "base/command_line.h"
8 #include "base/hash.h"
9 #include "base/json/json_writer.h"
10 #include "base/memory/shared_memory.h"
11 #include "base/time/time.h"
12 #include "base/trace_event/trace_event.h"
13 #include "build/build_config.h"
14 #include "content/common/gpu/gpu_channel.h"
15 #include "content/common/gpu/gpu_channel_manager.h"
16 #include "content/common/gpu/gpu_command_buffer_stub.h"
17 #include "content/common/gpu/gpu_memory_manager.h"
18 #include "content/common/gpu/gpu_memory_tracking.h"
19 #include "content/common/gpu/gpu_messages.h"
20 #include "content/common/gpu/gpu_watchdog.h"
21 #include "content/common/gpu/image_transport_surface.h"
22 #include "content/common/gpu/media/gpu_video_decode_accelerator.h"
23 #include "content/common/gpu/media/gpu_video_encode_accelerator.h"
24 #include "content/public/common/content_client.h"
25 #include "content/public/common/content_switches.h"
26 #include "gpu/command_buffer/common/constants.h"
27 #include "gpu/command_buffer/common/gles2_cmd_utils.h"
28 #include "gpu/command_buffer/common/mailbox.h"
29 #include "gpu/command_buffer/service/gl_context_virtual.h"
30 #include "gpu/command_buffer/service/gl_state_restorer_impl.h"
31 #include "gpu/command_buffer/service/image_factory.h"
32 #include "gpu/command_buffer/service/image_manager.h"
33 #include "gpu/command_buffer/service/logger.h"
34 #include "gpu/command_buffer/service/mailbox_manager.h"
35 #include "gpu/command_buffer/service/memory_tracking.h"
36 #include "gpu/command_buffer/service/query_manager.h"
37 #include "gpu/command_buffer/service/sync_point_manager.h"
38 #include "gpu/command_buffer/service/valuebuffer_manager.h"
39 #include "ui/gl/gl_bindings.h"
40 #include "ui/gl/gl_switches.h"
42 #if defined(OS_WIN)
43 #include "base/win/win_util.h"
44 #include "content/public/common/sandbox_init.h"
45 #endif
47 #if defined(OS_ANDROID)
48 #include "content/common/gpu/stream_texture_android.h"
49 #endif
51 namespace content {
52 struct WaitForCommandState {
53 WaitForCommandState(int32 start, int32 end, IPC::Message* reply)
54 : start(start), end(end), reply(reply) {}
56 int32 start;
57 int32 end;
58 scoped_ptr<IPC::Message> reply;
61 namespace {
63 // The GpuCommandBufferMemoryTracker class provides a bridge between the
64 // ContextGroup's memory type managers and the GpuMemoryManager class.
65 class GpuCommandBufferMemoryTracker : public gpu::gles2::MemoryTracker {
66 public:
67 explicit GpuCommandBufferMemoryTracker(GpuChannel* channel)
68 : tracking_group_(
69 channel->gpu_channel_manager()
70 ->gpu_memory_manager()
71 ->CreateTrackingGroup(channel->renderer_pid(), this)),
72 client_tracing_id_(channel->client_tracing_id()),
73 client_id_(channel->client_id()) {}
75 void TrackMemoryAllocatedChange(
76 size_t old_size,
77 size_t new_size,
78 gpu::gles2::MemoryTracker::Pool pool) override {
79 tracking_group_->TrackMemoryAllocatedChange(
80 old_size, new_size, pool);
83 bool EnsureGPUMemoryAvailable(size_t size_needed) override {
84 return tracking_group_->EnsureGPUMemoryAvailable(size_needed);
87 uint64_t ClientTracingId() const override { return client_tracing_id_; }
88 int ClientId() const override { return client_id_; }
90 private:
91 ~GpuCommandBufferMemoryTracker() override {}
92 scoped_ptr<GpuMemoryTrackingGroup> tracking_group_;
93 const uint64_t client_tracing_id_;
94 const int client_id_;
96 DISALLOW_COPY_AND_ASSIGN(GpuCommandBufferMemoryTracker);
99 // FastSetActiveURL will shortcut the expensive call to SetActiveURL when the
100 // url_hash matches.
101 void FastSetActiveURL(const GURL& url, size_t url_hash) {
102 // Leave the previously set URL in the empty case -- empty URLs are given by
103 // BlinkPlatformImpl::createOffscreenGraphicsContext3D. Hopefully the
104 // onscreen context URL was set previously and will show up even when a crash
105 // occurs during offscreen command processing.
106 if (url.is_empty())
107 return;
108 static size_t g_last_url_hash = 0;
109 if (url_hash != g_last_url_hash) {
110 g_last_url_hash = url_hash;
111 GetContentClient()->SetActiveURL(url);
115 // The first time polling a fence, delay some extra time to allow other
116 // stubs to process some work, or else the timing of the fences could
117 // allow a pattern of alternating fast and slow frames to occur.
118 const int64 kHandleMoreWorkPeriodMs = 2;
119 const int64 kHandleMoreWorkPeriodBusyMs = 1;
121 // Prevents idle work from being starved.
122 const int64 kMaxTimeSinceIdleMs = 10;
124 class DevToolsChannelData : public base::trace_event::ConvertableToTraceFormat {
125 public:
126 static scoped_refptr<base::trace_event::ConvertableToTraceFormat>
127 CreateForChannel(GpuChannel* channel);
129 void AppendAsTraceFormat(std::string* out) const override {
130 std::string tmp;
131 base::JSONWriter::Write(*value_, &tmp);
132 *out += tmp;
135 private:
136 explicit DevToolsChannelData(base::Value* value) : value_(value) {}
137 ~DevToolsChannelData() override {}
138 scoped_ptr<base::Value> value_;
139 DISALLOW_COPY_AND_ASSIGN(DevToolsChannelData);
142 scoped_refptr<base::trace_event::ConvertableToTraceFormat>
143 DevToolsChannelData::CreateForChannel(GpuChannel* channel) {
144 scoped_ptr<base::DictionaryValue> res(new base::DictionaryValue);
145 res->SetInteger("renderer_pid", channel->renderer_pid());
146 res->SetDouble("used_bytes", channel->GetMemoryUsage());
147 res->SetDouble("limit_bytes",
148 channel->gpu_channel_manager()
149 ->gpu_memory_manager()
150 ->GetMaximumClientAllocation());
151 return new DevToolsChannelData(res.release());
154 void RunOnThread(scoped_refptr<base::SingleThreadTaskRunner> task_runner,
155 const base::Closure& callback) {
156 if (task_runner->BelongsToCurrentThread()) {
157 callback.Run();
158 } else {
159 task_runner->PostTask(FROM_HERE, callback);
163 } // namespace
165 GpuCommandBufferStub::GpuCommandBufferStub(
166 GpuChannel* channel,
167 GpuCommandBufferStub* share_group,
168 const gfx::GLSurfaceHandle& handle,
169 gpu::gles2::MailboxManager* mailbox_manager,
170 gpu::gles2::SubscriptionRefSet* subscription_ref_set,
171 gpu::ValueStateMap* pending_valuebuffer_state,
172 const gfx::Size& size,
173 const gpu::gles2::DisallowedFeatures& disallowed_features,
174 const std::vector<int32>& attribs,
175 gfx::GpuPreference gpu_preference,
176 bool use_virtualized_gl_context,
177 int32 route_id,
178 int32 surface_id,
179 GpuWatchdog* watchdog,
180 bool software,
181 const GURL& active_url)
182 : channel_(channel),
183 handle_(handle),
184 initial_size_(size),
185 disallowed_features_(disallowed_features),
186 requested_attribs_(attribs),
187 gpu_preference_(gpu_preference),
188 use_virtualized_gl_context_(use_virtualized_gl_context),
189 route_id_(route_id),
190 surface_id_(surface_id),
191 software_(software),
192 last_flush_count_(0),
193 last_memory_allocation_valid_(false),
194 watchdog_(watchdog),
195 sync_point_wait_count_(0),
196 delayed_work_scheduled_(false),
197 previous_messages_processed_(0),
198 active_url_(active_url),
199 total_gpu_memory_(0) {
200 active_url_hash_ = base::Hash(active_url.possibly_invalid_spec());
201 FastSetActiveURL(active_url_, active_url_hash_);
203 gpu::gles2::ContextCreationAttribHelper attrib_parser;
204 attrib_parser.Parse(requested_attribs_);
206 if (share_group) {
207 context_group_ = share_group->context_group_;
208 DCHECK(context_group_->bind_generates_resource() ==
209 attrib_parser.bind_generates_resource);
210 } else {
211 context_group_ = new gpu::gles2::ContextGroup(
212 mailbox_manager,
213 new GpuCommandBufferMemoryTracker(channel),
214 channel_->gpu_channel_manager()->shader_translator_cache(),
215 NULL,
216 subscription_ref_set,
217 pending_valuebuffer_state,
218 attrib_parser.bind_generates_resource);
221 use_virtualized_gl_context_ |=
222 context_group_->feature_info()->workarounds().use_virtualized_gl_contexts;
224 bool is_offscreen = surface_id_ == 0;
225 if (is_offscreen && initial_size_.IsEmpty()) {
226 // If we're an offscreen surface with zero width and/or height, set to a
227 // non-zero size so that we have a complete framebuffer for operations like
228 // glClear.
229 initial_size_ = gfx::Size(1, 1);
233 GpuCommandBufferStub::~GpuCommandBufferStub() {
234 Destroy();
236 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
237 gpu_channel_manager->Send(new GpuHostMsg_DestroyCommandBuffer(surface_id()));
240 GpuMemoryManager* GpuCommandBufferStub::GetMemoryManager() const {
241 return channel()->gpu_channel_manager()->gpu_memory_manager();
244 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
245 TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"),
246 "GPUTask",
247 "data",
248 DevToolsChannelData::CreateForChannel(channel()));
249 FastSetActiveURL(active_url_, active_url_hash_);
251 bool have_context = false;
252 // Ensure the appropriate GL context is current before handling any IPC
253 // messages directed at the command buffer. This ensures that the message
254 // handler can assume that the context is current (not necessary for
255 // RetireSyncPoint or WaitSyncPoint).
256 if (decoder_.get() &&
257 message.type() != GpuCommandBufferMsg_SetGetBuffer::ID &&
258 message.type() != GpuCommandBufferMsg_WaitForTokenInRange::ID &&
259 message.type() != GpuCommandBufferMsg_WaitForGetOffsetInRange::ID &&
260 message.type() != GpuCommandBufferMsg_RegisterTransferBuffer::ID &&
261 message.type() != GpuCommandBufferMsg_DestroyTransferBuffer::ID &&
262 message.type() != GpuCommandBufferMsg_RetireSyncPoint::ID &&
263 message.type() != GpuCommandBufferMsg_SignalSyncPoint::ID &&
264 message.type() !=
265 GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback::ID) {
266 if (!MakeCurrent())
267 return false;
268 have_context = true;
271 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers
272 // here. This is so the reply can be delayed if the scheduler is unscheduled.
273 bool handled = true;
274 IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message)
275 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize,
276 OnInitialize);
277 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetGetBuffer,
278 OnSetGetBuffer);
279 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ProduceFrontBuffer,
280 OnProduceFrontBuffer);
281 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForTokenInRange,
282 OnWaitForTokenInRange);
283 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForGetOffsetInRange,
284 OnWaitForGetOffsetInRange);
285 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush);
286 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Rescheduled, OnRescheduled);
287 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer,
288 OnRegisterTransferBuffer);
289 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer,
290 OnDestroyTransferBuffer);
291 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoDecoder,
292 OnCreateVideoDecoder)
293 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoEncoder,
294 OnCreateVideoEncoder)
295 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetSurfaceVisible,
296 OnSetSurfaceVisible)
297 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RetireSyncPoint,
298 OnRetireSyncPoint)
299 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPoint,
300 OnSignalSyncPoint)
301 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalQuery,
302 OnSignalQuery)
303 IPC_MESSAGE_HANDLER(
304 GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback,
305 OnSetClientHasMemoryAllocationChangedCallback)
306 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateImage, OnCreateImage);
307 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyImage, OnDestroyImage);
308 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateStreamTexture,
309 OnCreateStreamTexture)
310 IPC_MESSAGE_UNHANDLED(handled = false)
311 IPC_END_MESSAGE_MAP()
313 CheckCompleteWaits();
315 if (have_context) {
316 // Ensure that any delayed work that was created will be handled.
317 ScheduleDelayedWork(kHandleMoreWorkPeriodMs);
320 DCHECK(handled);
321 return handled;
324 bool GpuCommandBufferStub::Send(IPC::Message* message) {
325 return channel_->Send(message);
328 bool GpuCommandBufferStub::IsScheduled() {
329 return (!scheduler_.get() || scheduler_->IsScheduled());
332 bool GpuCommandBufferStub::HasMoreWork() {
333 return scheduler_.get() && scheduler_->HasMoreWork();
336 void GpuCommandBufferStub::PollWork() {
337 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork");
338 delayed_work_scheduled_ = false;
339 FastSetActiveURL(active_url_, active_url_hash_);
340 if (decoder_.get() && !MakeCurrent())
341 return;
343 if (scheduler_) {
344 uint64 current_messages_processed =
345 channel()->gpu_channel_manager()->MessagesProcessed();
346 // We're idle when no messages were processed or scheduled.
347 bool is_idle =
348 (previous_messages_processed_ == current_messages_processed) &&
349 !channel()->gpu_channel_manager()->HandleMessagesScheduled();
350 if (!is_idle && !last_idle_time_.is_null()) {
351 base::TimeDelta time_since_idle =
352 base::TimeTicks::Now() - last_idle_time_;
353 base::TimeDelta max_time_since_idle =
354 base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs);
356 // Force idle when it's been too long since last time we were idle.
357 if (time_since_idle > max_time_since_idle)
358 is_idle = true;
361 if (is_idle) {
362 last_idle_time_ = base::TimeTicks::Now();
363 scheduler_->PerformIdleWork();
366 ScheduleDelayedWork(kHandleMoreWorkPeriodBusyMs);
369 bool GpuCommandBufferStub::HasUnprocessedCommands() {
370 if (command_buffer_) {
371 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
372 return command_buffer_->GetPutOffset() != state.get_offset &&
373 !gpu::error::IsError(state.error);
375 return false;
378 void GpuCommandBufferStub::ScheduleDelayedWork(int64 delay) {
379 if (!HasMoreWork()) {
380 last_idle_time_ = base::TimeTicks();
381 return;
384 if (delayed_work_scheduled_)
385 return;
386 delayed_work_scheduled_ = true;
388 // Idle when no messages are processed between now and when
389 // PollWork is called.
390 previous_messages_processed_ =
391 channel()->gpu_channel_manager()->MessagesProcessed();
392 if (last_idle_time_.is_null())
393 last_idle_time_ = base::TimeTicks::Now();
395 // IsScheduled() returns true after passing all unschedule fences
396 // and this is when we can start performing idle work. Idle work
397 // is done synchronously so we can set delay to 0 and instead poll
398 // for more work at the rate idle work is performed. This also ensures
399 // that idle work is done as efficiently as possible without any
400 // unnecessary delays.
401 if (scheduler_.get() &&
402 scheduler_->IsScheduled() &&
403 scheduler_->HasMoreIdleWork()) {
404 delay = 0;
407 base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
408 FROM_HERE, base::Bind(&GpuCommandBufferStub::PollWork, AsWeakPtr()),
409 base::TimeDelta::FromMilliseconds(delay));
412 bool GpuCommandBufferStub::MakeCurrent() {
413 if (decoder_->MakeCurrent())
414 return true;
415 DLOG(ERROR) << "Context lost because MakeCurrent failed.";
416 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
417 command_buffer_->SetParseError(gpu::error::kLostContext);
418 CheckContextLost();
419 return false;
422 void GpuCommandBufferStub::Destroy() {
423 if (wait_for_token_) {
424 Send(wait_for_token_->reply.release());
425 wait_for_token_.reset();
427 if (wait_for_get_offset_) {
428 Send(wait_for_get_offset_->reply.release());
429 wait_for_get_offset_.reset();
431 if (handle_.is_null() && !active_url_.is_empty()) {
432 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
433 gpu_channel_manager->Send(new GpuHostMsg_DidDestroyOffscreenContext(
434 active_url_));
437 memory_manager_client_state_.reset();
439 while (!sync_points_.empty())
440 OnRetireSyncPoint(sync_points_.front());
442 if (decoder_)
443 decoder_->set_engine(NULL);
445 // The scheduler has raw references to the decoder and the command buffer so
446 // destroy it before those.
447 scheduler_.reset();
449 bool have_context = false;
450 if (decoder_ && decoder_->GetGLContext()) {
451 // Try to make the context current regardless of whether it was lost, so we
452 // don't leak resources.
453 have_context = decoder_->GetGLContext()->MakeCurrent(surface_.get());
455 FOR_EACH_OBSERVER(DestructionObserver,
456 destruction_observers_,
457 OnWillDestroyStub());
459 if (decoder_) {
460 decoder_->Destroy(have_context);
461 decoder_.reset();
464 command_buffer_.reset();
466 // Remove this after crbug.com/248395 is sorted out.
467 surface_ = NULL;
470 void GpuCommandBufferStub::OnInitializeFailed(IPC::Message* reply_message) {
471 Destroy();
472 GpuCommandBufferMsg_Initialize::WriteReplyParams(
473 reply_message, false, gpu::Capabilities());
474 Send(reply_message);
477 void GpuCommandBufferStub::OnInitialize(
478 base::SharedMemoryHandle shared_state_handle,
479 IPC::Message* reply_message) {
480 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnInitialize");
481 DCHECK(!command_buffer_.get());
483 scoped_ptr<base::SharedMemory> shared_state_shm(
484 new base::SharedMemory(shared_state_handle, false));
486 command_buffer_.reset(new gpu::CommandBufferService(
487 context_group_->transfer_buffer_manager()));
489 bool result = command_buffer_->Initialize();
490 DCHECK(result);
492 decoder_.reset(::gpu::gles2::GLES2Decoder::Create(context_group_.get()));
493 scheduler_.reset(new gpu::GpuScheduler(command_buffer_.get(),
494 decoder_.get(),
495 decoder_.get()));
496 if (preemption_flag_.get())
497 scheduler_->SetPreemptByFlag(preemption_flag_);
499 decoder_->set_engine(scheduler_.get());
501 if (!handle_.is_null()) {
502 #if defined(OS_MACOSX) || defined(UI_COMPOSITOR_IMAGE_TRANSPORT)
503 if (software_) {
504 LOG(ERROR) << "No software support.";
505 OnInitializeFailed(reply_message);
506 return;
508 #endif
510 surface_ = ImageTransportSurface::CreateSurface(
511 channel_->gpu_channel_manager(),
512 this,
513 handle_);
514 } else {
515 GpuChannelManager* manager = channel_->gpu_channel_manager();
516 surface_ = manager->GetDefaultOffscreenSurface();
519 if (!surface_.get()) {
520 DLOG(ERROR) << "Failed to create surface.";
521 OnInitializeFailed(reply_message);
522 return;
525 scoped_refptr<gfx::GLContext> context;
526 if (use_virtualized_gl_context_ && channel_->share_group()) {
527 context = channel_->share_group()->GetSharedContext();
528 if (!context.get()) {
529 context = gfx::GLContext::CreateGLContext(
530 channel_->share_group(),
531 channel_->gpu_channel_manager()->GetDefaultOffscreenSurface(),
532 gpu_preference_);
533 if (!context.get()) {
534 DLOG(ERROR) << "Failed to create shared context for virtualization.";
535 OnInitializeFailed(reply_message);
536 return;
538 channel_->share_group()->SetSharedContext(context.get());
540 // This should be a non-virtual GL context.
541 DCHECK(context->GetHandle());
542 context = new gpu::GLContextVirtual(
543 channel_->share_group(), context.get(), decoder_->AsWeakPtr());
544 if (!context->Initialize(surface_.get(), gpu_preference_)) {
545 // TODO(sievers): The real context created above for the default
546 // offscreen surface might not be compatible with this surface.
547 // Need to adjust at least GLX to be able to create the initial context
548 // with a config that is compatible with onscreen and offscreen surfaces.
549 context = NULL;
551 DLOG(ERROR) << "Failed to initialize virtual GL context.";
552 OnInitializeFailed(reply_message);
553 return;
556 if (!context.get()) {
557 context = gfx::GLContext::CreateGLContext(
558 channel_->share_group(), surface_.get(), gpu_preference_);
560 if (!context.get()) {
561 DLOG(ERROR) << "Failed to create context.";
562 OnInitializeFailed(reply_message);
563 return;
566 if (!context->MakeCurrent(surface_.get())) {
567 LOG(ERROR) << "Failed to make context current.";
568 OnInitializeFailed(reply_message);
569 return;
572 if (!context->GetGLStateRestorer()) {
573 context->SetGLStateRestorer(
574 new gpu::GLStateRestorerImpl(decoder_->AsWeakPtr()));
577 if (!context->GetTotalGpuMemory(&total_gpu_memory_))
578 total_gpu_memory_ = 0;
580 if (!context_group_->has_program_cache() &&
581 !context_group_->feature_info()->workarounds().disable_program_cache) {
582 context_group_->set_program_cache(
583 channel_->gpu_channel_manager()->program_cache());
586 // Initialize the decoder with either the view or pbuffer GLContext.
587 if (!decoder_->Initialize(surface_,
588 context,
589 !surface_id(),
590 initial_size_,
591 disallowed_features_,
592 requested_attribs_)) {
593 DLOG(ERROR) << "Failed to initialize decoder.";
594 OnInitializeFailed(reply_message);
595 return;
598 if (base::CommandLine::ForCurrentProcess()->HasSwitch(
599 switches::kEnableGPUServiceLogging)) {
600 decoder_->set_log_commands(true);
603 decoder_->GetLogger()->SetMsgCallback(
604 base::Bind(&GpuCommandBufferStub::SendConsoleMessage,
605 base::Unretained(this)));
606 decoder_->SetShaderCacheCallback(
607 base::Bind(&GpuCommandBufferStub::SendCachedShader,
608 base::Unretained(this)));
609 decoder_->SetWaitSyncPointCallback(
610 base::Bind(&GpuCommandBufferStub::OnWaitSyncPoint,
611 base::Unretained(this)));
613 command_buffer_->SetPutOffsetChangeCallback(
614 base::Bind(&GpuCommandBufferStub::PutChanged, base::Unretained(this)));
615 command_buffer_->SetGetBufferChangeCallback(
616 base::Bind(&gpu::GpuScheduler::SetGetBuffer,
617 base::Unretained(scheduler_.get())));
618 command_buffer_->SetParseErrorCallback(
619 base::Bind(&GpuCommandBufferStub::OnParseError, base::Unretained(this)));
620 scheduler_->SetSchedulingChangedCallback(
621 base::Bind(&GpuChannel::StubSchedulingChanged,
622 base::Unretained(channel_)));
624 if (watchdog_) {
625 scheduler_->SetCommandProcessedCallback(
626 base::Bind(&GpuCommandBufferStub::OnCommandProcessed,
627 base::Unretained(this)));
630 const size_t kSharedStateSize = sizeof(gpu::CommandBufferSharedState);
631 if (!shared_state_shm->Map(kSharedStateSize)) {
632 DLOG(ERROR) << "Failed to map shared state buffer.";
633 OnInitializeFailed(reply_message);
634 return;
636 command_buffer_->SetSharedStateBuffer(gpu::MakeBackingFromSharedMemory(
637 shared_state_shm.Pass(), kSharedStateSize));
639 gpu::Capabilities capabilities = decoder_->GetCapabilities();
640 capabilities.future_sync_points = channel_->allow_future_sync_points();
642 GpuCommandBufferMsg_Initialize::WriteReplyParams(
643 reply_message, true, capabilities);
644 Send(reply_message);
646 if (handle_.is_null() && !active_url_.is_empty()) {
647 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
648 gpu_channel_manager->Send(new GpuHostMsg_DidCreateOffscreenContext(
649 active_url_));
653 void GpuCommandBufferStub::OnCreateStreamTexture(
654 uint32 texture_id, int32 stream_id, bool* succeeded) {
655 #if defined(OS_ANDROID)
656 *succeeded = StreamTexture::Create(this, texture_id, stream_id);
657 #else
658 *succeeded = false;
659 #endif
662 void GpuCommandBufferStub::SetLatencyInfoCallback(
663 const LatencyInfoCallback& callback) {
664 latency_info_callback_ = callback;
667 int32 GpuCommandBufferStub::GetRequestedAttribute(int attr) const {
668 // The command buffer is pairs of enum, value
669 // search for the requested attribute, return the value.
670 for (std::vector<int32>::const_iterator it = requested_attribs_.begin();
671 it != requested_attribs_.end(); ++it) {
672 if (*it++ == attr) {
673 return *it;
676 return -1;
679 void GpuCommandBufferStub::OnSetGetBuffer(int32 shm_id,
680 IPC::Message* reply_message) {
681 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetGetBuffer");
682 if (command_buffer_)
683 command_buffer_->SetGetBuffer(shm_id);
684 Send(reply_message);
687 void GpuCommandBufferStub::OnProduceFrontBuffer(const gpu::Mailbox& mailbox) {
688 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnProduceFrontBuffer");
689 if (!decoder_) {
690 LOG(ERROR) << "Can't produce front buffer before initialization.";
691 return;
694 decoder_->ProduceFrontBuffer(mailbox);
697 void GpuCommandBufferStub::OnParseError() {
698 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnParseError");
699 DCHECK(command_buffer_.get());
700 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
701 IPC::Message* msg = new GpuCommandBufferMsg_Destroyed(
702 route_id_, state.context_lost_reason, state.error);
703 msg->set_unblock(true);
704 Send(msg);
706 // Tell the browser about this context loss as well, so it can
707 // determine whether client APIs like WebGL need to be immediately
708 // blocked from automatically running.
709 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
710 gpu_channel_manager->Send(new GpuHostMsg_DidLoseContext(
711 handle_.is_null(), state.context_lost_reason, active_url_));
713 CheckContextLost();
716 void GpuCommandBufferStub::OnWaitForTokenInRange(int32 start,
717 int32 end,
718 IPC::Message* reply_message) {
719 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForTokenInRange");
720 DCHECK(command_buffer_.get());
721 CheckContextLost();
722 if (wait_for_token_)
723 LOG(ERROR) << "Got WaitForToken command while currently waiting for token.";
724 wait_for_token_ =
725 make_scoped_ptr(new WaitForCommandState(start, end, reply_message));
726 CheckCompleteWaits();
729 void GpuCommandBufferStub::OnWaitForGetOffsetInRange(
730 int32 start,
731 int32 end,
732 IPC::Message* reply_message) {
733 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForGetOffsetInRange");
734 DCHECK(command_buffer_.get());
735 CheckContextLost();
736 if (wait_for_get_offset_) {
737 LOG(ERROR)
738 << "Got WaitForGetOffset command while currently waiting for offset.";
740 wait_for_get_offset_ =
741 make_scoped_ptr(new WaitForCommandState(start, end, reply_message));
742 CheckCompleteWaits();
745 void GpuCommandBufferStub::CheckCompleteWaits() {
746 if (wait_for_token_ || wait_for_get_offset_) {
747 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
748 if (wait_for_token_ &&
749 (gpu::CommandBuffer::InRange(
750 wait_for_token_->start, wait_for_token_->end, state.token) ||
751 state.error != gpu::error::kNoError)) {
752 ReportState();
753 GpuCommandBufferMsg_WaitForTokenInRange::WriteReplyParams(
754 wait_for_token_->reply.get(), state);
755 Send(wait_for_token_->reply.release());
756 wait_for_token_.reset();
758 if (wait_for_get_offset_ &&
759 (gpu::CommandBuffer::InRange(wait_for_get_offset_->start,
760 wait_for_get_offset_->end,
761 state.get_offset) ||
762 state.error != gpu::error::kNoError)) {
763 ReportState();
764 GpuCommandBufferMsg_WaitForGetOffsetInRange::WriteReplyParams(
765 wait_for_get_offset_->reply.get(), state);
766 Send(wait_for_get_offset_->reply.release());
767 wait_for_get_offset_.reset();
772 void GpuCommandBufferStub::OnAsyncFlush(
773 int32 put_offset,
774 uint32 flush_count,
775 const std::vector<ui::LatencyInfo>& latency_info) {
776 TRACE_EVENT1(
777 "gpu", "GpuCommandBufferStub::OnAsyncFlush", "put_offset", put_offset);
779 if (ui::LatencyInfo::Verify(latency_info,
780 "GpuCommandBufferStub::OnAsyncFlush") &&
781 !latency_info_callback_.is_null()) {
782 latency_info_callback_.Run(latency_info);
784 DCHECK(command_buffer_.get());
785 if (flush_count - last_flush_count_ < 0x8000000U) {
786 last_flush_count_ = flush_count;
787 command_buffer_->Flush(put_offset);
788 } else {
789 // We received this message out-of-order. This should not happen but is here
790 // to catch regressions. Ignore the message.
791 NOTREACHED() << "Received a Flush message out-of-order";
794 ReportState();
797 void GpuCommandBufferStub::OnRescheduled() {
798 gpu::CommandBuffer::State pre_state = command_buffer_->GetLastState();
799 command_buffer_->Flush(command_buffer_->GetPutOffset());
800 gpu::CommandBuffer::State post_state = command_buffer_->GetLastState();
802 if (pre_state.get_offset != post_state.get_offset)
803 ReportState();
806 void GpuCommandBufferStub::OnRegisterTransferBuffer(
807 int32 id,
808 base::SharedMemoryHandle transfer_buffer,
809 uint32 size) {
810 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterTransferBuffer");
812 // Take ownership of the memory and map it into this process.
813 // This validates the size.
814 scoped_ptr<base::SharedMemory> shared_memory(
815 new base::SharedMemory(transfer_buffer, false));
816 if (!shared_memory->Map(size)) {
817 DVLOG(0) << "Failed to map shared memory.";
818 return;
821 if (command_buffer_) {
822 command_buffer_->RegisterTransferBuffer(
823 id, gpu::MakeBackingFromSharedMemory(shared_memory.Pass(), size));
827 void GpuCommandBufferStub::OnDestroyTransferBuffer(int32 id) {
828 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyTransferBuffer");
830 if (command_buffer_)
831 command_buffer_->DestroyTransferBuffer(id);
834 void GpuCommandBufferStub::OnCommandProcessed() {
835 if (watchdog_)
836 watchdog_->CheckArmed();
839 void GpuCommandBufferStub::ReportState() { command_buffer_->UpdateState(); }
841 void GpuCommandBufferStub::PutChanged() {
842 FastSetActiveURL(active_url_, active_url_hash_);
843 scheduler_->PutChanged();
846 void GpuCommandBufferStub::OnCreateVideoDecoder(
847 media::VideoCodecProfile profile,
848 int32 decoder_route_id,
849 IPC::Message* reply_message) {
850 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoDecoder");
851 GpuVideoDecodeAccelerator* decoder = new GpuVideoDecodeAccelerator(
852 decoder_route_id, this, channel_->io_task_runner());
853 decoder->Initialize(profile, reply_message);
854 // decoder is registered as a DestructionObserver of this stub and will
855 // self-delete during destruction of this stub.
858 void GpuCommandBufferStub::OnCreateVideoEncoder(
859 media::VideoPixelFormat input_format,
860 const gfx::Size& input_visible_size,
861 media::VideoCodecProfile output_profile,
862 uint32 initial_bitrate,
863 int32 encoder_route_id,
864 IPC::Message* reply_message) {
865 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoEncoder");
866 GpuVideoEncodeAccelerator* encoder =
867 new GpuVideoEncodeAccelerator(encoder_route_id, this);
868 encoder->Initialize(input_format,
869 input_visible_size,
870 output_profile,
871 initial_bitrate,
872 reply_message);
873 // encoder is registered as a DestructionObserver of this stub and will
874 // self-delete during destruction of this stub.
877 void GpuCommandBufferStub::OnSetSurfaceVisible(bool visible) {
878 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetSurfaceVisible");
879 if (memory_manager_client_state_)
880 memory_manager_client_state_->SetVisible(visible);
883 void GpuCommandBufferStub::AddSyncPoint(uint32 sync_point) {
884 sync_points_.push_back(sync_point);
887 void GpuCommandBufferStub::OnRetireSyncPoint(uint32 sync_point) {
888 DCHECK(!sync_points_.empty() && sync_points_.front() == sync_point);
889 sync_points_.pop_front();
891 gpu::gles2::MailboxManager* mailbox_manager =
892 context_group_->mailbox_manager();
893 if (mailbox_manager->UsesSync() && MakeCurrent())
894 mailbox_manager->PushTextureUpdates(sync_point);
896 GpuChannelManager* manager = channel_->gpu_channel_manager();
897 manager->sync_point_manager()->RetireSyncPoint(sync_point);
900 bool GpuCommandBufferStub::OnWaitSyncPoint(uint32 sync_point) {
901 if (!sync_point)
902 return true;
903 GpuChannelManager* manager = channel_->gpu_channel_manager();
904 if (manager->sync_point_manager()->IsSyncPointRetired(sync_point)) {
905 PullTextureUpdates(sync_point);
906 return true;
909 if (sync_point_wait_count_ == 0) {
910 TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitSyncPoint", this,
911 "GpuCommandBufferStub", this);
913 scheduler_->SetScheduled(false);
914 ++sync_point_wait_count_;
915 manager->sync_point_manager()->AddSyncPointCallback(
916 sync_point,
917 base::Bind(&RunOnThread, base::ThreadTaskRunnerHandle::Get(),
918 base::Bind(&GpuCommandBufferStub::OnWaitSyncPointCompleted,
919 this->AsWeakPtr(), sync_point)));
920 return scheduler_->IsScheduled();
923 void GpuCommandBufferStub::OnWaitSyncPointCompleted(uint32 sync_point) {
924 PullTextureUpdates(sync_point);
925 --sync_point_wait_count_;
926 if (sync_point_wait_count_ == 0) {
927 TRACE_EVENT_ASYNC_END1("gpu", "WaitSyncPoint", this,
928 "GpuCommandBufferStub", this);
930 scheduler_->SetScheduled(true);
933 void GpuCommandBufferStub::PullTextureUpdates(uint32 sync_point) {
934 gpu::gles2::MailboxManager* mailbox_manager =
935 context_group_->mailbox_manager();
936 if (mailbox_manager->UsesSync() && MakeCurrent())
937 mailbox_manager->PullTextureUpdates(sync_point);
940 void GpuCommandBufferStub::OnSignalSyncPoint(uint32 sync_point, uint32 id) {
941 GpuChannelManager* manager = channel_->gpu_channel_manager();
942 manager->sync_point_manager()->AddSyncPointCallback(
943 sync_point,
944 base::Bind(&RunOnThread, base::ThreadTaskRunnerHandle::Get(),
945 base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck,
946 this->AsWeakPtr(), id)));
949 void GpuCommandBufferStub::OnSignalSyncPointAck(uint32 id) {
950 Send(new GpuCommandBufferMsg_SignalSyncPointAck(route_id_, id));
953 void GpuCommandBufferStub::OnSignalQuery(uint32 query_id, uint32 id) {
954 if (decoder_) {
955 gpu::gles2::QueryManager* query_manager = decoder_->GetQueryManager();
956 if (query_manager) {
957 gpu::gles2::QueryManager::Query* query =
958 query_manager->GetQuery(query_id);
959 if (query) {
960 query->AddCallback(
961 base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck,
962 this->AsWeakPtr(),
963 id));
964 return;
968 // Something went wrong, run callback immediately.
969 OnSignalSyncPointAck(id);
973 void GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback(
974 bool has_callback) {
975 TRACE_EVENT0(
976 "gpu",
977 "GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback");
978 if (has_callback) {
979 if (!memory_manager_client_state_) {
980 memory_manager_client_state_.reset(GetMemoryManager()->CreateClientState(
981 this, surface_id_ != 0, true));
983 } else {
984 memory_manager_client_state_.reset();
988 void GpuCommandBufferStub::OnCreateImage(int32 id,
989 gfx::GpuMemoryBufferHandle handle,
990 gfx::Size size,
991 gfx::BufferFormat format,
992 uint32 internalformat) {
993 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateImage");
995 if (!decoder_)
996 return;
998 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
999 DCHECK(image_manager);
1000 if (image_manager->LookupImage(id)) {
1001 LOG(ERROR) << "Image already exists with same ID.";
1002 return;
1005 if (!gpu::ImageFactory::IsGpuMemoryBufferFormatSupported(
1006 format, decoder_->GetCapabilities())) {
1007 LOG(ERROR) << "Format is not supported.";
1008 return;
1011 if (!gpu::ImageFactory::IsImageSizeValidForGpuMemoryBufferFormat(size,
1012 format)) {
1013 LOG(ERROR) << "Invalid image size for format.";
1014 return;
1017 if (!gpu::ImageFactory::IsImageFormatCompatibleWithGpuMemoryBufferFormat(
1018 internalformat, format)) {
1019 LOG(ERROR) << "Incompatible image format.";
1020 return;
1023 scoped_refptr<gfx::GLImage> image = channel()->CreateImageForGpuMemoryBuffer(
1024 handle, size, format, internalformat);
1025 if (!image.get())
1026 return;
1028 image_manager->AddImage(image.get(), id);
1031 void GpuCommandBufferStub::OnDestroyImage(int32 id) {
1032 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyImage");
1034 if (!decoder_)
1035 return;
1037 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
1038 DCHECK(image_manager);
1039 if (!image_manager->LookupImage(id)) {
1040 LOG(ERROR) << "Image with ID doesn't exist.";
1041 return;
1044 image_manager->RemoveImage(id);
1047 void GpuCommandBufferStub::SendConsoleMessage(
1048 int32 id,
1049 const std::string& message) {
1050 GPUCommandBufferConsoleMessage console_message;
1051 console_message.id = id;
1052 console_message.message = message;
1053 IPC::Message* msg = new GpuCommandBufferMsg_ConsoleMsg(
1054 route_id_, console_message);
1055 msg->set_unblock(true);
1056 Send(msg);
1059 void GpuCommandBufferStub::SendCachedShader(
1060 const std::string& key, const std::string& shader) {
1061 channel_->CacheShader(key, shader);
1064 void GpuCommandBufferStub::AddDestructionObserver(
1065 DestructionObserver* observer) {
1066 destruction_observers_.AddObserver(observer);
1069 void GpuCommandBufferStub::RemoveDestructionObserver(
1070 DestructionObserver* observer) {
1071 destruction_observers_.RemoveObserver(observer);
1074 void GpuCommandBufferStub::SetPreemptByFlag(
1075 scoped_refptr<gpu::PreemptionFlag> flag) {
1076 preemption_flag_ = flag;
1077 if (scheduler_)
1078 scheduler_->SetPreemptByFlag(preemption_flag_);
1081 bool GpuCommandBufferStub::GetTotalGpuMemory(uint64* bytes) {
1082 *bytes = total_gpu_memory_;
1083 return !!total_gpu_memory_;
1086 gfx::Size GpuCommandBufferStub::GetSurfaceSize() const {
1087 if (!surface_.get())
1088 return gfx::Size();
1089 return surface_->GetSize();
1092 const gpu::gles2::FeatureInfo* GpuCommandBufferStub::GetFeatureInfo() const {
1093 return context_group_->feature_info();
1096 gpu::gles2::MemoryTracker* GpuCommandBufferStub::GetMemoryTracker() const {
1097 return context_group_->memory_tracker();
1100 void GpuCommandBufferStub::SetMemoryAllocation(
1101 const gpu::MemoryAllocation& allocation) {
1102 if (!last_memory_allocation_valid_ ||
1103 !allocation.Equals(last_memory_allocation_)) {
1104 Send(new GpuCommandBufferMsg_SetMemoryAllocation(
1105 route_id_, allocation));
1108 last_memory_allocation_valid_ = true;
1109 last_memory_allocation_ = allocation;
1112 void GpuCommandBufferStub::SuggestHaveFrontBuffer(
1113 bool suggest_have_frontbuffer) {
1114 // This can be called outside of OnMessageReceived, so the context needs
1115 // to be made current before calling methods on the surface.
1116 if (surface_.get() && MakeCurrent())
1117 surface_->SetFrontbufferAllocation(suggest_have_frontbuffer);
1120 bool GpuCommandBufferStub::CheckContextLost() {
1121 DCHECK(command_buffer_);
1122 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
1123 bool was_lost = state.error == gpu::error::kLostContext;
1125 // Work around issues with recovery by allowing a new GPU process to launch.
1126 if (was_lost &&
1127 context_group_->feature_info()->workarounds().exit_on_context_lost &&
1128 !base::CommandLine::ForCurrentProcess()->HasSwitch(
1129 switches::kSingleProcess) &&
1130 !base::CommandLine::ForCurrentProcess()->HasSwitch(
1131 switches::kInProcessGPU)) {
1132 LOG(ERROR) << "Exiting GPU process because some drivers cannot recover"
1133 << " from problems.";
1134 #if defined(OS_WIN)
1135 base::win::SetShouldCrashOnProcessDetach(false);
1136 #endif
1137 exit(0);
1139 // Lose all other contexts if the reset was triggered by the robustness
1140 // extension instead of being synthetic.
1141 if (was_lost && decoder_ && decoder_->WasContextLostByRobustnessExtension() &&
1142 (gfx::GLContext::LosesAllContextsOnContextLost() ||
1143 use_virtualized_gl_context_))
1144 channel_->LoseAllContexts();
1145 CheckCompleteWaits();
1146 return was_lost;
1149 void GpuCommandBufferStub::MarkContextLost() {
1150 if (!command_buffer_ ||
1151 command_buffer_->GetLastState().error == gpu::error::kLostContext)
1152 return;
1154 command_buffer_->SetContextLostReason(gpu::error::kUnknown);
1155 if (decoder_)
1156 decoder_->MarkContextLost(gpu::error::kUnknown);
1157 command_buffer_->SetParseError(gpu::error::kLostContext);
1160 void GpuCommandBufferStub::SendSwapBuffersCompleted(
1161 const std::vector<ui::LatencyInfo>& latency_info,
1162 gfx::SwapResult result) {
1163 Send(new GpuCommandBufferMsg_SwapBuffersCompleted(route_id_, latency_info,
1164 result));
1167 void GpuCommandBufferStub::SendUpdateVSyncParameters(base::TimeTicks timebase,
1168 base::TimeDelta interval) {
1169 Send(new GpuCommandBufferMsg_UpdateVSyncParameters(route_id_, timebase,
1170 interval));
1173 } // namespace content