GPU workaround to simulate Out of Memory errors with large textures
[chromium-blink-merge.git] / content / common / gpu / gpu_command_buffer_stub.cc
blobe14cf986092b9c8817dcd3024e2474890734c368
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/bind.h"
6 #include "base/bind_helpers.h"
7 #include "base/command_line.h"
8 #include "base/hash.h"
9 #include "base/json/json_writer.h"
10 #include "base/memory/shared_memory.h"
11 #include "base/time/time.h"
12 #include "base/trace_event/trace_event.h"
13 #include "build/build_config.h"
14 #include "content/common/gpu/gpu_channel.h"
15 #include "content/common/gpu/gpu_channel_manager.h"
16 #include "content/common/gpu/gpu_command_buffer_stub.h"
17 #include "content/common/gpu/gpu_memory_manager.h"
18 #include "content/common/gpu/gpu_memory_tracking.h"
19 #include "content/common/gpu/gpu_messages.h"
20 #include "content/common/gpu/gpu_watchdog.h"
21 #include "content/common/gpu/image_transport_surface.h"
22 #include "content/common/gpu/media/gpu_video_decode_accelerator.h"
23 #include "content/common/gpu/media/gpu_video_encode_accelerator.h"
24 #include "content/public/common/content_client.h"
25 #include "gpu/command_buffer/common/constants.h"
26 #include "gpu/command_buffer/common/gles2_cmd_utils.h"
27 #include "gpu/command_buffer/common/mailbox.h"
28 #include "gpu/command_buffer/service/gl_context_virtual.h"
29 #include "gpu/command_buffer/service/gl_state_restorer_impl.h"
30 #include "gpu/command_buffer/service/image_factory.h"
31 #include "gpu/command_buffer/service/image_manager.h"
32 #include "gpu/command_buffer/service/logger.h"
33 #include "gpu/command_buffer/service/mailbox_manager.h"
34 #include "gpu/command_buffer/service/memory_tracking.h"
35 #include "gpu/command_buffer/service/query_manager.h"
36 #include "gpu/command_buffer/service/sync_point_manager.h"
37 #include "gpu/command_buffer/service/valuebuffer_manager.h"
38 #include "ui/gl/gl_bindings.h"
39 #include "ui/gl/gl_switches.h"
41 #if defined(OS_WIN)
42 #include "content/public/common/sandbox_init.h"
43 #endif
45 #if defined(OS_ANDROID)
46 #include "content/common/gpu/stream_texture_android.h"
47 #endif
49 namespace content {
50 struct WaitForCommandState {
51 WaitForCommandState(int32 start, int32 end, IPC::Message* reply)
52 : start(start), end(end), reply(reply) {}
54 int32 start;
55 int32 end;
56 scoped_ptr<IPC::Message> reply;
59 namespace {
61 // The GpuCommandBufferMemoryTracker class provides a bridge between the
62 // ContextGroup's memory type managers and the GpuMemoryManager class.
63 class GpuCommandBufferMemoryTracker : public gpu::gles2::MemoryTracker {
64 public:
65 explicit GpuCommandBufferMemoryTracker(GpuChannel* channel) :
66 tracking_group_(channel->gpu_channel_manager()->gpu_memory_manager()->
67 CreateTrackingGroup(channel->renderer_pid(), this)) {
70 void TrackMemoryAllocatedChange(
71 size_t old_size,
72 size_t new_size,
73 gpu::gles2::MemoryTracker::Pool pool) override {
74 tracking_group_->TrackMemoryAllocatedChange(
75 old_size, new_size, pool);
78 bool EnsureGPUMemoryAvailable(size_t size_needed) override {
79 return tracking_group_->EnsureGPUMemoryAvailable(size_needed);
82 private:
83 ~GpuCommandBufferMemoryTracker() override {}
84 scoped_ptr<GpuMemoryTrackingGroup> tracking_group_;
86 DISALLOW_COPY_AND_ASSIGN(GpuCommandBufferMemoryTracker);
89 // FastSetActiveURL will shortcut the expensive call to SetActiveURL when the
90 // url_hash matches.
91 void FastSetActiveURL(const GURL& url, size_t url_hash) {
92 // Leave the previously set URL in the empty case -- empty URLs are given by
93 // BlinkPlatformImpl::createOffscreenGraphicsContext3D. Hopefully the
94 // onscreen context URL was set previously and will show up even when a crash
95 // occurs during offscreen command processing.
96 if (url.is_empty())
97 return;
98 static size_t g_last_url_hash = 0;
99 if (url_hash != g_last_url_hash) {
100 g_last_url_hash = url_hash;
101 GetContentClient()->SetActiveURL(url);
105 // The first time polling a fence, delay some extra time to allow other
106 // stubs to process some work, or else the timing of the fences could
107 // allow a pattern of alternating fast and slow frames to occur.
108 const int64 kHandleMoreWorkPeriodMs = 2;
109 const int64 kHandleMoreWorkPeriodBusyMs = 1;
111 // Prevents idle work from being starved.
112 const int64 kMaxTimeSinceIdleMs = 10;
114 class DevToolsChannelData : public base::trace_event::ConvertableToTraceFormat {
115 public:
116 static scoped_refptr<base::trace_event::ConvertableToTraceFormat>
117 CreateForChannel(GpuChannel* channel);
119 void AppendAsTraceFormat(std::string* out) const override {
120 std::string tmp;
121 base::JSONWriter::Write(value_.get(), &tmp);
122 *out += tmp;
125 private:
126 explicit DevToolsChannelData(base::Value* value) : value_(value) {}
127 ~DevToolsChannelData() override {}
128 scoped_ptr<base::Value> value_;
129 DISALLOW_COPY_AND_ASSIGN(DevToolsChannelData);
132 scoped_refptr<base::trace_event::ConvertableToTraceFormat>
133 DevToolsChannelData::CreateForChannel(GpuChannel* channel) {
134 scoped_ptr<base::DictionaryValue> res(new base::DictionaryValue);
135 res->SetInteger("renderer_pid", channel->renderer_pid());
136 res->SetDouble("used_bytes", channel->GetMemoryUsage());
137 res->SetDouble("limit_bytes",
138 channel->gpu_channel_manager()
139 ->gpu_memory_manager()
140 ->GetMaximumClientAllocation());
141 return new DevToolsChannelData(res.release());
144 } // namespace
146 GpuCommandBufferStub::GpuCommandBufferStub(
147 GpuChannel* channel,
148 GpuCommandBufferStub* share_group,
149 const gfx::GLSurfaceHandle& handle,
150 gpu::gles2::MailboxManager* mailbox_manager,
151 gpu::gles2::SubscriptionRefSet* subscription_ref_set,
152 gpu::ValueStateMap* pending_valuebuffer_state,
153 const gfx::Size& size,
154 const gpu::gles2::DisallowedFeatures& disallowed_features,
155 const std::vector<int32>& attribs,
156 gfx::GpuPreference gpu_preference,
157 bool use_virtualized_gl_context,
158 int32 route_id,
159 int32 surface_id,
160 GpuWatchdog* watchdog,
161 bool software,
162 const GURL& active_url)
163 : channel_(channel),
164 handle_(handle),
165 initial_size_(size),
166 disallowed_features_(disallowed_features),
167 requested_attribs_(attribs),
168 gpu_preference_(gpu_preference),
169 use_virtualized_gl_context_(use_virtualized_gl_context),
170 route_id_(route_id),
171 surface_id_(surface_id),
172 software_(software),
173 last_flush_count_(0),
174 last_memory_allocation_valid_(false),
175 watchdog_(watchdog),
176 sync_point_wait_count_(0),
177 delayed_work_scheduled_(false),
178 previous_messages_processed_(0),
179 active_url_(active_url),
180 total_gpu_memory_(0) {
181 active_url_hash_ = base::Hash(active_url.possibly_invalid_spec());
182 FastSetActiveURL(active_url_, active_url_hash_);
184 gpu::gles2::ContextCreationAttribHelper attrib_parser;
185 attrib_parser.Parse(requested_attribs_);
187 if (share_group) {
188 context_group_ = share_group->context_group_;
189 DCHECK(context_group_->bind_generates_resource() ==
190 attrib_parser.bind_generates_resource);
191 } else {
192 context_group_ = new gpu::gles2::ContextGroup(
193 mailbox_manager,
194 new GpuCommandBufferMemoryTracker(channel),
195 channel_->gpu_channel_manager()->shader_translator_cache(),
196 NULL,
197 subscription_ref_set,
198 pending_valuebuffer_state,
199 attrib_parser.bind_generates_resource);
202 use_virtualized_gl_context_ |=
203 context_group_->feature_info()->workarounds().use_virtualized_gl_contexts;
206 GpuCommandBufferStub::~GpuCommandBufferStub() {
207 Destroy();
209 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
210 gpu_channel_manager->Send(new GpuHostMsg_DestroyCommandBuffer(surface_id()));
213 GpuMemoryManager* GpuCommandBufferStub::GetMemoryManager() const {
214 return channel()->gpu_channel_manager()->gpu_memory_manager();
217 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
218 TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"),
219 "GPUTask",
220 "data",
221 DevToolsChannelData::CreateForChannel(channel()));
222 FastSetActiveURL(active_url_, active_url_hash_);
224 bool have_context = false;
225 // Ensure the appropriate GL context is current before handling any IPC
226 // messages directed at the command buffer. This ensures that the message
227 // handler can assume that the context is current (not necessary for
228 // RetireSyncPoint or WaitSyncPoint).
229 if (decoder_.get() &&
230 message.type() != GpuCommandBufferMsg_SetGetBuffer::ID &&
231 message.type() != GpuCommandBufferMsg_WaitForTokenInRange::ID &&
232 message.type() != GpuCommandBufferMsg_WaitForGetOffsetInRange::ID &&
233 message.type() != GpuCommandBufferMsg_RegisterTransferBuffer::ID &&
234 message.type() != GpuCommandBufferMsg_DestroyTransferBuffer::ID &&
235 message.type() != GpuCommandBufferMsg_RetireSyncPoint::ID &&
236 message.type() != GpuCommandBufferMsg_SignalSyncPoint::ID &&
237 message.type() !=
238 GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback::ID) {
239 if (!MakeCurrent())
240 return false;
241 have_context = true;
244 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers
245 // here. This is so the reply can be delayed if the scheduler is unscheduled.
246 bool handled = true;
247 IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message)
248 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize,
249 OnInitialize);
250 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetGetBuffer,
251 OnSetGetBuffer);
252 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ProduceFrontBuffer,
253 OnProduceFrontBuffer);
254 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForTokenInRange,
255 OnWaitForTokenInRange);
256 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForGetOffsetInRange,
257 OnWaitForGetOffsetInRange);
258 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush);
259 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Rescheduled, OnRescheduled);
260 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer,
261 OnRegisterTransferBuffer);
262 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer,
263 OnDestroyTransferBuffer);
264 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoDecoder,
265 OnCreateVideoDecoder)
266 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoEncoder,
267 OnCreateVideoEncoder)
268 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetSurfaceVisible,
269 OnSetSurfaceVisible)
270 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RetireSyncPoint,
271 OnRetireSyncPoint)
272 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPoint,
273 OnSignalSyncPoint)
274 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalQuery,
275 OnSignalQuery)
276 IPC_MESSAGE_HANDLER(
277 GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback,
278 OnSetClientHasMemoryAllocationChangedCallback)
279 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateImage, OnCreateImage);
280 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyImage, OnDestroyImage);
281 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateStreamTexture,
282 OnCreateStreamTexture)
283 IPC_MESSAGE_UNHANDLED(handled = false)
284 IPC_END_MESSAGE_MAP()
286 CheckCompleteWaits();
288 if (have_context) {
289 // Ensure that any delayed work that was created will be handled.
290 ScheduleDelayedWork(kHandleMoreWorkPeriodMs);
293 DCHECK(handled);
294 return handled;
297 bool GpuCommandBufferStub::Send(IPC::Message* message) {
298 return channel_->Send(message);
301 bool GpuCommandBufferStub::IsScheduled() {
302 return (!scheduler_.get() || scheduler_->IsScheduled());
305 bool GpuCommandBufferStub::HasMoreWork() {
306 return scheduler_.get() && scheduler_->HasMoreWork();
309 void GpuCommandBufferStub::PollWork() {
310 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork");
311 delayed_work_scheduled_ = false;
312 FastSetActiveURL(active_url_, active_url_hash_);
313 if (decoder_.get() && !MakeCurrent())
314 return;
316 if (scheduler_) {
317 uint64 current_messages_processed =
318 channel()->gpu_channel_manager()->MessagesProcessed();
319 // We're idle when no messages were processed or scheduled.
320 bool is_idle =
321 (previous_messages_processed_ == current_messages_processed) &&
322 !channel()->gpu_channel_manager()->HandleMessagesScheduled();
323 if (!is_idle && !last_idle_time_.is_null()) {
324 base::TimeDelta time_since_idle =
325 base::TimeTicks::Now() - last_idle_time_;
326 base::TimeDelta max_time_since_idle =
327 base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs);
329 // Force idle when it's been too long since last time we were idle.
330 if (time_since_idle > max_time_since_idle)
331 is_idle = true;
334 if (is_idle) {
335 last_idle_time_ = base::TimeTicks::Now();
336 scheduler_->PerformIdleWork();
339 ScheduleDelayedWork(kHandleMoreWorkPeriodBusyMs);
342 bool GpuCommandBufferStub::HasUnprocessedCommands() {
343 if (command_buffer_) {
344 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
345 return command_buffer_->GetPutOffset() != state.get_offset &&
346 !gpu::error::IsError(state.error);
348 return false;
351 void GpuCommandBufferStub::ScheduleDelayedWork(int64 delay) {
352 if (!HasMoreWork()) {
353 last_idle_time_ = base::TimeTicks();
354 return;
357 if (delayed_work_scheduled_)
358 return;
359 delayed_work_scheduled_ = true;
361 // Idle when no messages are processed between now and when
362 // PollWork is called.
363 previous_messages_processed_ =
364 channel()->gpu_channel_manager()->MessagesProcessed();
365 if (last_idle_time_.is_null())
366 last_idle_time_ = base::TimeTicks::Now();
368 // IsScheduled() returns true after passing all unschedule fences
369 // and this is when we can start performing idle work. Idle work
370 // is done synchronously so we can set delay to 0 and instead poll
371 // for more work at the rate idle work is performed. This also ensures
372 // that idle work is done as efficiently as possible without any
373 // unnecessary delays.
374 if (scheduler_.get() &&
375 scheduler_->IsScheduled() &&
376 scheduler_->HasMoreIdleWork()) {
377 delay = 0;
380 base::MessageLoop::current()->PostDelayedTask(
381 FROM_HERE,
382 base::Bind(&GpuCommandBufferStub::PollWork, AsWeakPtr()),
383 base::TimeDelta::FromMilliseconds(delay));
386 bool GpuCommandBufferStub::MakeCurrent() {
387 if (decoder_->MakeCurrent())
388 return true;
389 DLOG(ERROR) << "Context lost because MakeCurrent failed.";
390 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
391 command_buffer_->SetParseError(gpu::error::kLostContext);
392 CheckContextLost();
393 return false;
396 void GpuCommandBufferStub::Destroy() {
397 if (wait_for_token_) {
398 Send(wait_for_token_->reply.release());
399 wait_for_token_.reset();
401 if (wait_for_get_offset_) {
402 Send(wait_for_get_offset_->reply.release());
403 wait_for_get_offset_.reset();
405 if (handle_.is_null() && !active_url_.is_empty()) {
406 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
407 gpu_channel_manager->Send(new GpuHostMsg_DidDestroyOffscreenContext(
408 active_url_));
411 memory_manager_client_state_.reset();
413 while (!sync_points_.empty())
414 OnRetireSyncPoint(sync_points_.front());
416 if (decoder_)
417 decoder_->set_engine(NULL);
419 // The scheduler has raw references to the decoder and the command buffer so
420 // destroy it before those.
421 scheduler_.reset();
423 bool have_context = false;
424 if (decoder_ && command_buffer_ &&
425 command_buffer_->GetLastState().error != gpu::error::kLostContext)
426 have_context = decoder_->MakeCurrent();
427 FOR_EACH_OBSERVER(DestructionObserver,
428 destruction_observers_,
429 OnWillDestroyStub());
431 if (decoder_) {
432 decoder_->Destroy(have_context);
433 decoder_.reset();
436 command_buffer_.reset();
438 // Remove this after crbug.com/248395 is sorted out.
439 surface_ = NULL;
442 void GpuCommandBufferStub::OnInitializeFailed(IPC::Message* reply_message) {
443 Destroy();
444 GpuCommandBufferMsg_Initialize::WriteReplyParams(
445 reply_message, false, gpu::Capabilities());
446 Send(reply_message);
449 void GpuCommandBufferStub::OnInitialize(
450 base::SharedMemoryHandle shared_state_handle,
451 IPC::Message* reply_message) {
452 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnInitialize");
453 DCHECK(!command_buffer_.get());
455 scoped_ptr<base::SharedMemory> shared_state_shm(
456 new base::SharedMemory(shared_state_handle, false));
458 command_buffer_.reset(new gpu::CommandBufferService(
459 context_group_->transfer_buffer_manager()));
461 bool result = command_buffer_->Initialize();
462 DCHECK(result);
464 decoder_.reset(::gpu::gles2::GLES2Decoder::Create(context_group_.get()));
466 scheduler_.reset(new gpu::GpuScheduler(command_buffer_.get(),
467 decoder_.get(),
468 decoder_.get()));
469 if (preemption_flag_.get())
470 scheduler_->SetPreemptByFlag(preemption_flag_);
472 decoder_->set_engine(scheduler_.get());
474 if (!handle_.is_null()) {
475 #if defined(OS_MACOSX) || defined(UI_COMPOSITOR_IMAGE_TRANSPORT)
476 if (software_) {
477 LOG(ERROR) << "No software support.";
478 OnInitializeFailed(reply_message);
479 return;
481 #endif
483 surface_ = ImageTransportSurface::CreateSurface(
484 channel_->gpu_channel_manager(),
485 this,
486 handle_);
487 } else {
488 GpuChannelManager* manager = channel_->gpu_channel_manager();
489 surface_ = manager->GetDefaultOffscreenSurface();
492 if (!surface_.get()) {
493 DLOG(ERROR) << "Failed to create surface.";
494 OnInitializeFailed(reply_message);
495 return;
498 scoped_refptr<gfx::GLContext> context;
499 if (use_virtualized_gl_context_ && channel_->share_group()) {
500 context = channel_->share_group()->GetSharedContext();
501 if (!context.get()) {
502 context = gfx::GLContext::CreateGLContext(
503 channel_->share_group(),
504 channel_->gpu_channel_manager()->GetDefaultOffscreenSurface(),
505 gpu_preference_);
506 if (!context.get()) {
507 DLOG(ERROR) << "Failed to create shared context for virtualization.";
508 OnInitializeFailed(reply_message);
509 return;
511 channel_->share_group()->SetSharedContext(context.get());
513 // This should be a non-virtual GL context.
514 DCHECK(context->GetHandle());
515 context = new gpu::GLContextVirtual(
516 channel_->share_group(), context.get(), decoder_->AsWeakPtr());
517 if (!context->Initialize(surface_.get(), gpu_preference_)) {
518 // TODO(sievers): The real context created above for the default
519 // offscreen surface might not be compatible with this surface.
520 // Need to adjust at least GLX to be able to create the initial context
521 // with a config that is compatible with onscreen and offscreen surfaces.
522 context = NULL;
524 DLOG(ERROR) << "Failed to initialize virtual GL context.";
525 OnInitializeFailed(reply_message);
526 return;
529 if (!context.get()) {
530 context = gfx::GLContext::CreateGLContext(
531 channel_->share_group(), surface_.get(), gpu_preference_);
533 if (!context.get()) {
534 DLOG(ERROR) << "Failed to create context.";
535 OnInitializeFailed(reply_message);
536 return;
539 if (!context->MakeCurrent(surface_.get())) {
540 LOG(ERROR) << "Failed to make context current.";
541 OnInitializeFailed(reply_message);
542 return;
545 if (!context->GetGLStateRestorer()) {
546 context->SetGLStateRestorer(
547 new gpu::GLStateRestorerImpl(decoder_->AsWeakPtr()));
550 if (!context->GetTotalGpuMemory(&total_gpu_memory_))
551 total_gpu_memory_ = 0;
553 if (!context_group_->has_program_cache()) {
554 context_group_->set_program_cache(
555 channel_->gpu_channel_manager()->program_cache());
558 // Initialize the decoder with either the view or pbuffer GLContext.
559 if (!decoder_->Initialize(surface_,
560 context,
561 !surface_id(),
562 initial_size_,
563 disallowed_features_,
564 requested_attribs_)) {
565 DLOG(ERROR) << "Failed to initialize decoder.";
566 OnInitializeFailed(reply_message);
567 return;
570 if (base::CommandLine::ForCurrentProcess()->HasSwitch(
571 switches::kEnableGPUServiceLogging)) {
572 decoder_->set_log_commands(true);
575 decoder_->GetLogger()->SetMsgCallback(
576 base::Bind(&GpuCommandBufferStub::SendConsoleMessage,
577 base::Unretained(this)));
578 decoder_->SetShaderCacheCallback(
579 base::Bind(&GpuCommandBufferStub::SendCachedShader,
580 base::Unretained(this)));
581 decoder_->SetWaitSyncPointCallback(
582 base::Bind(&GpuCommandBufferStub::OnWaitSyncPoint,
583 base::Unretained(this)));
585 command_buffer_->SetPutOffsetChangeCallback(
586 base::Bind(&GpuCommandBufferStub::PutChanged, base::Unretained(this)));
587 command_buffer_->SetGetBufferChangeCallback(
588 base::Bind(&gpu::GpuScheduler::SetGetBuffer,
589 base::Unretained(scheduler_.get())));
590 command_buffer_->SetParseErrorCallback(
591 base::Bind(&GpuCommandBufferStub::OnParseError, base::Unretained(this)));
592 scheduler_->SetSchedulingChangedCallback(
593 base::Bind(&GpuChannel::StubSchedulingChanged,
594 base::Unretained(channel_)));
596 if (watchdog_) {
597 scheduler_->SetCommandProcessedCallback(
598 base::Bind(&GpuCommandBufferStub::OnCommandProcessed,
599 base::Unretained(this)));
602 const size_t kSharedStateSize = sizeof(gpu::CommandBufferSharedState);
603 if (!shared_state_shm->Map(kSharedStateSize)) {
604 DLOG(ERROR) << "Failed to map shared state buffer.";
605 OnInitializeFailed(reply_message);
606 return;
608 command_buffer_->SetSharedStateBuffer(gpu::MakeBackingFromSharedMemory(
609 shared_state_shm.Pass(), kSharedStateSize));
611 gpu::Capabilities capabilities = decoder_->GetCapabilities();
612 capabilities.future_sync_points = channel_->allow_future_sync_points();
614 GpuCommandBufferMsg_Initialize::WriteReplyParams(
615 reply_message, true, capabilities);
616 Send(reply_message);
618 if (handle_.is_null() && !active_url_.is_empty()) {
619 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
620 gpu_channel_manager->Send(new GpuHostMsg_DidCreateOffscreenContext(
621 active_url_));
625 void GpuCommandBufferStub::OnCreateStreamTexture(
626 uint32 texture_id, int32 stream_id, bool* succeeded) {
627 #if defined(OS_ANDROID)
628 *succeeded = StreamTexture::Create(this, texture_id, stream_id);
629 #else
630 *succeeded = false;
631 #endif
634 void GpuCommandBufferStub::SetLatencyInfoCallback(
635 const LatencyInfoCallback& callback) {
636 latency_info_callback_ = callback;
639 int32 GpuCommandBufferStub::GetRequestedAttribute(int attr) const {
640 // The command buffer is pairs of enum, value
641 // search for the requested attribute, return the value.
642 for (std::vector<int32>::const_iterator it = requested_attribs_.begin();
643 it != requested_attribs_.end(); ++it) {
644 if (*it++ == attr) {
645 return *it;
648 return -1;
651 void GpuCommandBufferStub::OnSetGetBuffer(int32 shm_id,
652 IPC::Message* reply_message) {
653 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetGetBuffer");
654 if (command_buffer_)
655 command_buffer_->SetGetBuffer(shm_id);
656 Send(reply_message);
659 void GpuCommandBufferStub::OnProduceFrontBuffer(const gpu::Mailbox& mailbox) {
660 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnProduceFrontBuffer");
661 if (!decoder_) {
662 LOG(ERROR) << "Can't produce front buffer before initialization.";
663 return;
666 decoder_->ProduceFrontBuffer(mailbox);
669 void GpuCommandBufferStub::OnParseError() {
670 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnParseError");
671 DCHECK(command_buffer_.get());
672 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
673 IPC::Message* msg = new GpuCommandBufferMsg_Destroyed(
674 route_id_, state.context_lost_reason);
675 msg->set_unblock(true);
676 Send(msg);
678 // Tell the browser about this context loss as well, so it can
679 // determine whether client APIs like WebGL need to be immediately
680 // blocked from automatically running.
681 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
682 gpu_channel_manager->Send(new GpuHostMsg_DidLoseContext(
683 handle_.is_null(), state.context_lost_reason, active_url_));
685 CheckContextLost();
688 void GpuCommandBufferStub::OnWaitForTokenInRange(int32 start,
689 int32 end,
690 IPC::Message* reply_message) {
691 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForTokenInRange");
692 DCHECK(command_buffer_.get());
693 CheckContextLost();
694 if (wait_for_token_)
695 LOG(ERROR) << "Got WaitForToken command while currently waiting for token.";
696 wait_for_token_ =
697 make_scoped_ptr(new WaitForCommandState(start, end, reply_message));
698 CheckCompleteWaits();
701 void GpuCommandBufferStub::OnWaitForGetOffsetInRange(
702 int32 start,
703 int32 end,
704 IPC::Message* reply_message) {
705 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForGetOffsetInRange");
706 DCHECK(command_buffer_.get());
707 CheckContextLost();
708 if (wait_for_get_offset_) {
709 LOG(ERROR)
710 << "Got WaitForGetOffset command while currently waiting for offset.";
712 wait_for_get_offset_ =
713 make_scoped_ptr(new WaitForCommandState(start, end, reply_message));
714 CheckCompleteWaits();
717 void GpuCommandBufferStub::CheckCompleteWaits() {
718 if (wait_for_token_ || wait_for_get_offset_) {
719 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
720 if (wait_for_token_ &&
721 (gpu::CommandBuffer::InRange(
722 wait_for_token_->start, wait_for_token_->end, state.token) ||
723 state.error != gpu::error::kNoError)) {
724 ReportState();
725 GpuCommandBufferMsg_WaitForTokenInRange::WriteReplyParams(
726 wait_for_token_->reply.get(), state);
727 Send(wait_for_token_->reply.release());
728 wait_for_token_.reset();
730 if (wait_for_get_offset_ &&
731 (gpu::CommandBuffer::InRange(wait_for_get_offset_->start,
732 wait_for_get_offset_->end,
733 state.get_offset) ||
734 state.error != gpu::error::kNoError)) {
735 ReportState();
736 GpuCommandBufferMsg_WaitForGetOffsetInRange::WriteReplyParams(
737 wait_for_get_offset_->reply.get(), state);
738 Send(wait_for_get_offset_->reply.release());
739 wait_for_get_offset_.reset();
744 void GpuCommandBufferStub::OnAsyncFlush(
745 int32 put_offset,
746 uint32 flush_count,
747 const std::vector<ui::LatencyInfo>& latency_info) {
748 TRACE_EVENT1(
749 "gpu", "GpuCommandBufferStub::OnAsyncFlush", "put_offset", put_offset);
751 if (ui::LatencyInfo::Verify(latency_info,
752 "GpuCommandBufferStub::OnAsyncFlush") &&
753 !latency_info_callback_.is_null()) {
754 latency_info_callback_.Run(latency_info);
756 DCHECK(command_buffer_.get());
757 if (flush_count - last_flush_count_ < 0x8000000U) {
758 last_flush_count_ = flush_count;
759 command_buffer_->Flush(put_offset);
760 } else {
761 // We received this message out-of-order. This should not happen but is here
762 // to catch regressions. Ignore the message.
763 NOTREACHED() << "Received a Flush message out-of-order";
766 ReportState();
769 void GpuCommandBufferStub::OnRescheduled() {
770 gpu::CommandBuffer::State pre_state = command_buffer_->GetLastState();
771 command_buffer_->Flush(command_buffer_->GetPutOffset());
772 gpu::CommandBuffer::State post_state = command_buffer_->GetLastState();
774 if (pre_state.get_offset != post_state.get_offset)
775 ReportState();
778 void GpuCommandBufferStub::OnRegisterTransferBuffer(
779 int32 id,
780 base::SharedMemoryHandle transfer_buffer,
781 uint32 size) {
782 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterTransferBuffer");
784 // Take ownership of the memory and map it into this process.
785 // This validates the size.
786 scoped_ptr<base::SharedMemory> shared_memory(
787 new base::SharedMemory(transfer_buffer, false));
788 if (!shared_memory->Map(size)) {
789 DVLOG(0) << "Failed to map shared memory.";
790 return;
793 if (command_buffer_) {
794 command_buffer_->RegisterTransferBuffer(
795 id, gpu::MakeBackingFromSharedMemory(shared_memory.Pass(), size));
799 void GpuCommandBufferStub::OnDestroyTransferBuffer(int32 id) {
800 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyTransferBuffer");
802 if (command_buffer_)
803 command_buffer_->DestroyTransferBuffer(id);
806 void GpuCommandBufferStub::OnCommandProcessed() {
807 if (watchdog_)
808 watchdog_->CheckArmed();
811 void GpuCommandBufferStub::ReportState() { command_buffer_->UpdateState(); }
813 void GpuCommandBufferStub::PutChanged() {
814 FastSetActiveURL(active_url_, active_url_hash_);
815 scheduler_->PutChanged();
818 void GpuCommandBufferStub::OnCreateVideoDecoder(
819 media::VideoCodecProfile profile,
820 int32 decoder_route_id,
821 IPC::Message* reply_message) {
822 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoDecoder");
823 GpuVideoDecodeAccelerator* decoder = new GpuVideoDecodeAccelerator(
824 decoder_route_id, this, channel_->io_message_loop());
825 decoder->Initialize(profile, reply_message);
826 // decoder is registered as a DestructionObserver of this stub and will
827 // self-delete during destruction of this stub.
830 void GpuCommandBufferStub::OnCreateVideoEncoder(
831 media::VideoFrame::Format input_format,
832 const gfx::Size& input_visible_size,
833 media::VideoCodecProfile output_profile,
834 uint32 initial_bitrate,
835 int32 encoder_route_id,
836 IPC::Message* reply_message) {
837 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoEncoder");
838 GpuVideoEncodeAccelerator* encoder =
839 new GpuVideoEncodeAccelerator(encoder_route_id, this);
840 encoder->Initialize(input_format,
841 input_visible_size,
842 output_profile,
843 initial_bitrate,
844 reply_message);
845 // encoder is registered as a DestructionObserver of this stub and will
846 // self-delete during destruction of this stub.
849 void GpuCommandBufferStub::OnSetSurfaceVisible(bool visible) {
850 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetSurfaceVisible");
851 if (memory_manager_client_state_)
852 memory_manager_client_state_->SetVisible(visible);
855 void GpuCommandBufferStub::AddSyncPoint(uint32 sync_point) {
856 sync_points_.push_back(sync_point);
859 void GpuCommandBufferStub::OnRetireSyncPoint(uint32 sync_point) {
860 DCHECK(!sync_points_.empty() && sync_points_.front() == sync_point);
861 sync_points_.pop_front();
862 GpuChannelManager* manager = channel_->gpu_channel_manager();
863 manager->sync_point_manager()->RetireSyncPoint(sync_point);
866 bool GpuCommandBufferStub::OnWaitSyncPoint(uint32 sync_point) {
867 if (!sync_point)
868 return true;
869 GpuChannelManager* manager = channel_->gpu_channel_manager();
870 if (manager->sync_point_manager()->IsSyncPointRetired(sync_point))
871 return true;
873 if (sync_point_wait_count_ == 0) {
874 TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitSyncPoint", this,
875 "GpuCommandBufferStub", this);
877 scheduler_->SetScheduled(false);
878 ++sync_point_wait_count_;
879 manager->sync_point_manager()->AddSyncPointCallback(
880 sync_point,
881 base::Bind(&GpuCommandBufferStub::OnSyncPointRetired,
882 this->AsWeakPtr()));
883 return scheduler_->IsScheduled();
886 void GpuCommandBufferStub::OnSyncPointRetired() {
887 --sync_point_wait_count_;
888 if (sync_point_wait_count_ == 0) {
889 TRACE_EVENT_ASYNC_END1("gpu", "WaitSyncPoint", this,
890 "GpuCommandBufferStub", this);
892 scheduler_->SetScheduled(true);
895 void GpuCommandBufferStub::OnSignalSyncPoint(uint32 sync_point, uint32 id) {
896 GpuChannelManager* manager = channel_->gpu_channel_manager();
897 manager->sync_point_manager()->AddSyncPointCallback(
898 sync_point,
899 base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck,
900 this->AsWeakPtr(),
901 id));
904 void GpuCommandBufferStub::OnSignalSyncPointAck(uint32 id) {
905 Send(new GpuCommandBufferMsg_SignalSyncPointAck(route_id_, id));
908 void GpuCommandBufferStub::OnSignalQuery(uint32 query_id, uint32 id) {
909 if (decoder_) {
910 gpu::gles2::QueryManager* query_manager = decoder_->GetQueryManager();
911 if (query_manager) {
912 gpu::gles2::QueryManager::Query* query =
913 query_manager->GetQuery(query_id);
914 if (query) {
915 query->AddCallback(
916 base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck,
917 this->AsWeakPtr(),
918 id));
919 return;
923 // Something went wrong, run callback immediately.
924 OnSignalSyncPointAck(id);
928 void GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback(
929 bool has_callback) {
930 TRACE_EVENT0(
931 "gpu",
932 "GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback");
933 if (has_callback) {
934 if (!memory_manager_client_state_) {
935 memory_manager_client_state_.reset(GetMemoryManager()->CreateClientState(
936 this, surface_id_ != 0, true));
938 } else {
939 memory_manager_client_state_.reset();
943 void GpuCommandBufferStub::OnCreateImage(int32 id,
944 gfx::GpuMemoryBufferHandle handle,
945 gfx::Size size,
946 gfx::GpuMemoryBuffer::Format format,
947 uint32 internalformat) {
948 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateImage");
950 if (!decoder_)
951 return;
953 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
954 DCHECK(image_manager);
955 if (image_manager->LookupImage(id)) {
956 LOG(ERROR) << "Image already exists with same ID.";
957 return;
960 if (!gpu::ImageFactory::IsGpuMemoryBufferFormatSupported(
961 format, decoder_->GetCapabilities())) {
962 LOG(ERROR) << "Format is not supported.";
963 return;
966 if (!gpu::ImageFactory::IsImageSizeValidForGpuMemoryBufferFormat(size,
967 format)) {
968 LOG(ERROR) << "Invalid image size for format.";
969 return;
972 if (!gpu::ImageFactory::IsImageFormatCompatibleWithGpuMemoryBufferFormat(
973 internalformat, format)) {
974 LOG(ERROR) << "Incompatible image format.";
975 return;
978 scoped_refptr<gfx::GLImage> image = channel()->CreateImageForGpuMemoryBuffer(
979 handle, size, format, internalformat);
980 if (!image.get())
981 return;
983 image_manager->AddImage(image.get(), id);
986 void GpuCommandBufferStub::OnDestroyImage(int32 id) {
987 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyImage");
989 if (!decoder_)
990 return;
992 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
993 DCHECK(image_manager);
994 if (!image_manager->LookupImage(id)) {
995 LOG(ERROR) << "Image with ID doesn't exist.";
996 return;
999 image_manager->RemoveImage(id);
1002 void GpuCommandBufferStub::SendConsoleMessage(
1003 int32 id,
1004 const std::string& message) {
1005 GPUCommandBufferConsoleMessage console_message;
1006 console_message.id = id;
1007 console_message.message = message;
1008 IPC::Message* msg = new GpuCommandBufferMsg_ConsoleMsg(
1009 route_id_, console_message);
1010 msg->set_unblock(true);
1011 Send(msg);
1014 void GpuCommandBufferStub::SendCachedShader(
1015 const std::string& key, const std::string& shader) {
1016 channel_->CacheShader(key, shader);
1019 void GpuCommandBufferStub::AddDestructionObserver(
1020 DestructionObserver* observer) {
1021 destruction_observers_.AddObserver(observer);
1024 void GpuCommandBufferStub::RemoveDestructionObserver(
1025 DestructionObserver* observer) {
1026 destruction_observers_.RemoveObserver(observer);
1029 void GpuCommandBufferStub::SetPreemptByFlag(
1030 scoped_refptr<gpu::PreemptionFlag> flag) {
1031 preemption_flag_ = flag;
1032 if (scheduler_)
1033 scheduler_->SetPreemptByFlag(preemption_flag_);
1036 bool GpuCommandBufferStub::GetTotalGpuMemory(uint64* bytes) {
1037 *bytes = total_gpu_memory_;
1038 return !!total_gpu_memory_;
1041 gfx::Size GpuCommandBufferStub::GetSurfaceSize() const {
1042 if (!surface_.get())
1043 return gfx::Size();
1044 return surface_->GetSize();
1047 gpu::gles2::MemoryTracker* GpuCommandBufferStub::GetMemoryTracker() const {
1048 return context_group_->memory_tracker();
1051 void GpuCommandBufferStub::SetMemoryAllocation(
1052 const gpu::MemoryAllocation& allocation) {
1053 if (!last_memory_allocation_valid_ ||
1054 !allocation.Equals(last_memory_allocation_)) {
1055 Send(new GpuCommandBufferMsg_SetMemoryAllocation(
1056 route_id_, allocation));
1059 last_memory_allocation_valid_ = true;
1060 last_memory_allocation_ = allocation;
1063 void GpuCommandBufferStub::SuggestHaveFrontBuffer(
1064 bool suggest_have_frontbuffer) {
1065 // This can be called outside of OnMessageReceived, so the context needs
1066 // to be made current before calling methods on the surface.
1067 if (surface_.get() && MakeCurrent())
1068 surface_->SetFrontbufferAllocation(suggest_have_frontbuffer);
1071 bool GpuCommandBufferStub::CheckContextLost() {
1072 DCHECK(command_buffer_);
1073 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
1074 bool was_lost = state.error == gpu::error::kLostContext;
1075 // Lose all other contexts if the reset was triggered by the robustness
1076 // extension instead of being synthetic.
1077 if (was_lost && decoder_ && decoder_->WasContextLostByRobustnessExtension() &&
1078 (gfx::GLContext::LosesAllContextsOnContextLost() ||
1079 use_virtualized_gl_context_))
1080 channel_->LoseAllContexts();
1081 CheckCompleteWaits();
1082 return was_lost;
1085 void GpuCommandBufferStub::MarkContextLost() {
1086 if (!command_buffer_ ||
1087 command_buffer_->GetLastState().error == gpu::error::kLostContext)
1088 return;
1090 command_buffer_->SetContextLostReason(gpu::error::kUnknown);
1091 if (decoder_)
1092 decoder_->LoseContext(GL_UNKNOWN_CONTEXT_RESET_ARB);
1093 command_buffer_->SetParseError(gpu::error::kLostContext);
1096 uint64 GpuCommandBufferStub::GetMemoryUsage() const {
1097 return GetMemoryManager()->GetClientMemoryUsage(this);
1100 void GpuCommandBufferStub::SendSwapBuffersCompleted(
1101 const std::vector<ui::LatencyInfo>& latency_info) {
1102 Send(new GpuCommandBufferMsg_SwapBuffersCompleted(route_id_, latency_info));
1105 void GpuCommandBufferStub::SendUpdateVSyncParameters(base::TimeTicks timebase,
1106 base::TimeDelta interval) {
1107 Send(new GpuCommandBufferMsg_UpdateVSyncParameters(route_id_, timebase,
1108 interval));
1111 } // namespace content