Add P2PDatagramSocket and P2PStreamSocket interfaces.
[chromium-blink-merge.git] / content / common / gpu / gpu_command_buffer_stub.cc
blob3db87eb4ac8d88e687568e011f8ab66cbc0f4809
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/bind.h"
6 #include "base/bind_helpers.h"
7 #include "base/command_line.h"
8 #include "base/hash.h"
9 #include "base/json/json_writer.h"
10 #include "base/memory/shared_memory.h"
11 #include "base/time/time.h"
12 #include "base/trace_event/trace_event.h"
13 #include "build/build_config.h"
14 #include "content/common/gpu/gpu_channel.h"
15 #include "content/common/gpu/gpu_channel_manager.h"
16 #include "content/common/gpu/gpu_command_buffer_stub.h"
17 #include "content/common/gpu/gpu_memory_manager.h"
18 #include "content/common/gpu/gpu_memory_tracking.h"
19 #include "content/common/gpu/gpu_messages.h"
20 #include "content/common/gpu/gpu_watchdog.h"
21 #include "content/common/gpu/image_transport_surface.h"
22 #include "content/common/gpu/media/gpu_video_decode_accelerator.h"
23 #include "content/common/gpu/media/gpu_video_encode_accelerator.h"
24 #include "content/public/common/content_client.h"
25 #include "content/public/common/content_switches.h"
26 #include "gpu/command_buffer/common/constants.h"
27 #include "gpu/command_buffer/common/gles2_cmd_utils.h"
28 #include "gpu/command_buffer/common/mailbox.h"
29 #include "gpu/command_buffer/service/gl_context_virtual.h"
30 #include "gpu/command_buffer/service/gl_state_restorer_impl.h"
31 #include "gpu/command_buffer/service/image_factory.h"
32 #include "gpu/command_buffer/service/image_manager.h"
33 #include "gpu/command_buffer/service/logger.h"
34 #include "gpu/command_buffer/service/mailbox_manager.h"
35 #include "gpu/command_buffer/service/memory_tracking.h"
36 #include "gpu/command_buffer/service/query_manager.h"
37 #include "gpu/command_buffer/service/sync_point_manager.h"
38 #include "gpu/command_buffer/service/valuebuffer_manager.h"
39 #include "ui/gl/gl_bindings.h"
40 #include "ui/gl/gl_switches.h"
42 #if defined(OS_WIN)
43 #include "base/win/win_util.h"
44 #include "content/public/common/sandbox_init.h"
45 #endif
47 #if defined(OS_ANDROID)
48 #include "content/common/gpu/stream_texture_android.h"
49 #endif
51 namespace content {
52 struct WaitForCommandState {
53 WaitForCommandState(int32 start, int32 end, IPC::Message* reply)
54 : start(start), end(end), reply(reply) {}
56 int32 start;
57 int32 end;
58 scoped_ptr<IPC::Message> reply;
61 namespace {
63 // The GpuCommandBufferMemoryTracker class provides a bridge between the
64 // ContextGroup's memory type managers and the GpuMemoryManager class.
65 class GpuCommandBufferMemoryTracker : public gpu::gles2::MemoryTracker {
66 public:
67 explicit GpuCommandBufferMemoryTracker(GpuChannel* channel) :
68 tracking_group_(channel->gpu_channel_manager()->gpu_memory_manager()->
69 CreateTrackingGroup(channel->renderer_pid(), this)) {
72 void TrackMemoryAllocatedChange(
73 size_t old_size,
74 size_t new_size,
75 gpu::gles2::MemoryTracker::Pool pool) override {
76 tracking_group_->TrackMemoryAllocatedChange(
77 old_size, new_size, pool);
80 bool EnsureGPUMemoryAvailable(size_t size_needed) override {
81 return tracking_group_->EnsureGPUMemoryAvailable(size_needed);
84 private:
85 ~GpuCommandBufferMemoryTracker() override {}
86 scoped_ptr<GpuMemoryTrackingGroup> tracking_group_;
88 DISALLOW_COPY_AND_ASSIGN(GpuCommandBufferMemoryTracker);
91 // FastSetActiveURL will shortcut the expensive call to SetActiveURL when the
92 // url_hash matches.
93 void FastSetActiveURL(const GURL& url, size_t url_hash) {
94 // Leave the previously set URL in the empty case -- empty URLs are given by
95 // BlinkPlatformImpl::createOffscreenGraphicsContext3D. Hopefully the
96 // onscreen context URL was set previously and will show up even when a crash
97 // occurs during offscreen command processing.
98 if (url.is_empty())
99 return;
100 static size_t g_last_url_hash = 0;
101 if (url_hash != g_last_url_hash) {
102 g_last_url_hash = url_hash;
103 GetContentClient()->SetActiveURL(url);
107 // The first time polling a fence, delay some extra time to allow other
108 // stubs to process some work, or else the timing of the fences could
109 // allow a pattern of alternating fast and slow frames to occur.
110 const int64 kHandleMoreWorkPeriodMs = 2;
111 const int64 kHandleMoreWorkPeriodBusyMs = 1;
113 // Prevents idle work from being starved.
114 const int64 kMaxTimeSinceIdleMs = 10;
116 class DevToolsChannelData : public base::trace_event::ConvertableToTraceFormat {
117 public:
118 static scoped_refptr<base::trace_event::ConvertableToTraceFormat>
119 CreateForChannel(GpuChannel* channel);
121 void AppendAsTraceFormat(std::string* out) const override {
122 std::string tmp;
123 base::JSONWriter::Write(*value_, &tmp);
124 *out += tmp;
127 private:
128 explicit DevToolsChannelData(base::Value* value) : value_(value) {}
129 ~DevToolsChannelData() override {}
130 scoped_ptr<base::Value> value_;
131 DISALLOW_COPY_AND_ASSIGN(DevToolsChannelData);
134 scoped_refptr<base::trace_event::ConvertableToTraceFormat>
135 DevToolsChannelData::CreateForChannel(GpuChannel* channel) {
136 scoped_ptr<base::DictionaryValue> res(new base::DictionaryValue);
137 res->SetInteger("renderer_pid", channel->renderer_pid());
138 res->SetDouble("used_bytes", channel->GetMemoryUsage());
139 res->SetDouble("limit_bytes",
140 channel->gpu_channel_manager()
141 ->gpu_memory_manager()
142 ->GetMaximumClientAllocation());
143 return new DevToolsChannelData(res.release());
146 } // namespace
148 GpuCommandBufferStub::GpuCommandBufferStub(
149 GpuChannel* channel,
150 GpuCommandBufferStub* share_group,
151 const gfx::GLSurfaceHandle& handle,
152 gpu::gles2::MailboxManager* mailbox_manager,
153 gpu::gles2::SubscriptionRefSet* subscription_ref_set,
154 gpu::ValueStateMap* pending_valuebuffer_state,
155 const gfx::Size& size,
156 const gpu::gles2::DisallowedFeatures& disallowed_features,
157 const std::vector<int32>& attribs,
158 gfx::GpuPreference gpu_preference,
159 bool use_virtualized_gl_context,
160 int32 route_id,
161 int32 surface_id,
162 GpuWatchdog* watchdog,
163 bool software,
164 const GURL& active_url)
165 : channel_(channel),
166 handle_(handle),
167 initial_size_(size),
168 disallowed_features_(disallowed_features),
169 requested_attribs_(attribs),
170 gpu_preference_(gpu_preference),
171 use_virtualized_gl_context_(use_virtualized_gl_context),
172 route_id_(route_id),
173 surface_id_(surface_id),
174 software_(software),
175 last_flush_count_(0),
176 last_memory_allocation_valid_(false),
177 watchdog_(watchdog),
178 sync_point_wait_count_(0),
179 delayed_work_scheduled_(false),
180 previous_messages_processed_(0),
181 active_url_(active_url),
182 total_gpu_memory_(0) {
183 active_url_hash_ = base::Hash(active_url.possibly_invalid_spec());
184 FastSetActiveURL(active_url_, active_url_hash_);
186 gpu::gles2::ContextCreationAttribHelper attrib_parser;
187 attrib_parser.Parse(requested_attribs_);
189 if (share_group) {
190 context_group_ = share_group->context_group_;
191 DCHECK(context_group_->bind_generates_resource() ==
192 attrib_parser.bind_generates_resource);
193 } else {
194 context_group_ = new gpu::gles2::ContextGroup(
195 mailbox_manager,
196 new GpuCommandBufferMemoryTracker(channel),
197 channel_->gpu_channel_manager()->shader_translator_cache(),
198 NULL,
199 subscription_ref_set,
200 pending_valuebuffer_state,
201 attrib_parser.bind_generates_resource);
204 use_virtualized_gl_context_ |=
205 context_group_->feature_info()->workarounds().use_virtualized_gl_contexts;
207 bool is_offscreen = surface_id_ == 0;
208 if (is_offscreen && initial_size_.IsEmpty()) {
209 // If we're an offscreen surface with zero width and/or height, set to a
210 // non-zero size so that we have a complete framebuffer for operations like
211 // glClear.
212 initial_size_ = gfx::Size(1, 1);
216 GpuCommandBufferStub::~GpuCommandBufferStub() {
217 Destroy();
219 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
220 gpu_channel_manager->Send(new GpuHostMsg_DestroyCommandBuffer(surface_id()));
223 GpuMemoryManager* GpuCommandBufferStub::GetMemoryManager() const {
224 return channel()->gpu_channel_manager()->gpu_memory_manager();
227 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
228 TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"),
229 "GPUTask",
230 "data",
231 DevToolsChannelData::CreateForChannel(channel()));
232 FastSetActiveURL(active_url_, active_url_hash_);
234 bool have_context = false;
235 // Ensure the appropriate GL context is current before handling any IPC
236 // messages directed at the command buffer. This ensures that the message
237 // handler can assume that the context is current (not necessary for
238 // RetireSyncPoint or WaitSyncPoint).
239 if (decoder_.get() &&
240 message.type() != GpuCommandBufferMsg_SetGetBuffer::ID &&
241 message.type() != GpuCommandBufferMsg_WaitForTokenInRange::ID &&
242 message.type() != GpuCommandBufferMsg_WaitForGetOffsetInRange::ID &&
243 message.type() != GpuCommandBufferMsg_RegisterTransferBuffer::ID &&
244 message.type() != GpuCommandBufferMsg_DestroyTransferBuffer::ID &&
245 message.type() != GpuCommandBufferMsg_RetireSyncPoint::ID &&
246 message.type() != GpuCommandBufferMsg_SignalSyncPoint::ID &&
247 message.type() !=
248 GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback::ID) {
249 if (!MakeCurrent())
250 return false;
251 have_context = true;
254 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers
255 // here. This is so the reply can be delayed if the scheduler is unscheduled.
256 bool handled = true;
257 IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message)
258 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize,
259 OnInitialize);
260 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetGetBuffer,
261 OnSetGetBuffer);
262 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ProduceFrontBuffer,
263 OnProduceFrontBuffer);
264 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForTokenInRange,
265 OnWaitForTokenInRange);
266 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForGetOffsetInRange,
267 OnWaitForGetOffsetInRange);
268 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush);
269 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Rescheduled, OnRescheduled);
270 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer,
271 OnRegisterTransferBuffer);
272 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer,
273 OnDestroyTransferBuffer);
274 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoDecoder,
275 OnCreateVideoDecoder)
276 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoEncoder,
277 OnCreateVideoEncoder)
278 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetSurfaceVisible,
279 OnSetSurfaceVisible)
280 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RetireSyncPoint,
281 OnRetireSyncPoint)
282 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPoint,
283 OnSignalSyncPoint)
284 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalQuery,
285 OnSignalQuery)
286 IPC_MESSAGE_HANDLER(
287 GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback,
288 OnSetClientHasMemoryAllocationChangedCallback)
289 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateImage, OnCreateImage);
290 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyImage, OnDestroyImage);
291 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateStreamTexture,
292 OnCreateStreamTexture)
293 IPC_MESSAGE_UNHANDLED(handled = false)
294 IPC_END_MESSAGE_MAP()
296 CheckCompleteWaits();
298 if (have_context) {
299 // Ensure that any delayed work that was created will be handled.
300 ScheduleDelayedWork(kHandleMoreWorkPeriodMs);
303 DCHECK(handled);
304 return handled;
307 bool GpuCommandBufferStub::Send(IPC::Message* message) {
308 return channel_->Send(message);
311 bool GpuCommandBufferStub::IsScheduled() {
312 return (!scheduler_.get() || scheduler_->IsScheduled());
315 bool GpuCommandBufferStub::HasMoreWork() {
316 return scheduler_.get() && scheduler_->HasMoreWork();
319 void GpuCommandBufferStub::PollWork() {
320 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork");
321 delayed_work_scheduled_ = false;
322 FastSetActiveURL(active_url_, active_url_hash_);
323 if (decoder_.get() && !MakeCurrent())
324 return;
326 if (scheduler_) {
327 uint64 current_messages_processed =
328 channel()->gpu_channel_manager()->MessagesProcessed();
329 // We're idle when no messages were processed or scheduled.
330 bool is_idle =
331 (previous_messages_processed_ == current_messages_processed) &&
332 !channel()->gpu_channel_manager()->HandleMessagesScheduled();
333 if (!is_idle && !last_idle_time_.is_null()) {
334 base::TimeDelta time_since_idle =
335 base::TimeTicks::Now() - last_idle_time_;
336 base::TimeDelta max_time_since_idle =
337 base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs);
339 // Force idle when it's been too long since last time we were idle.
340 if (time_since_idle > max_time_since_idle)
341 is_idle = true;
344 if (is_idle) {
345 last_idle_time_ = base::TimeTicks::Now();
346 scheduler_->PerformIdleWork();
349 ScheduleDelayedWork(kHandleMoreWorkPeriodBusyMs);
352 bool GpuCommandBufferStub::HasUnprocessedCommands() {
353 if (command_buffer_) {
354 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
355 return command_buffer_->GetPutOffset() != state.get_offset &&
356 !gpu::error::IsError(state.error);
358 return false;
361 void GpuCommandBufferStub::ScheduleDelayedWork(int64 delay) {
362 if (!HasMoreWork()) {
363 last_idle_time_ = base::TimeTicks();
364 return;
367 if (delayed_work_scheduled_)
368 return;
369 delayed_work_scheduled_ = true;
371 // Idle when no messages are processed between now and when
372 // PollWork is called.
373 previous_messages_processed_ =
374 channel()->gpu_channel_manager()->MessagesProcessed();
375 if (last_idle_time_.is_null())
376 last_idle_time_ = base::TimeTicks::Now();
378 // IsScheduled() returns true after passing all unschedule fences
379 // and this is when we can start performing idle work. Idle work
380 // is done synchronously so we can set delay to 0 and instead poll
381 // for more work at the rate idle work is performed. This also ensures
382 // that idle work is done as efficiently as possible without any
383 // unnecessary delays.
384 if (scheduler_.get() &&
385 scheduler_->IsScheduled() &&
386 scheduler_->HasMoreIdleWork()) {
387 delay = 0;
390 base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
391 FROM_HERE, base::Bind(&GpuCommandBufferStub::PollWork, AsWeakPtr()),
392 base::TimeDelta::FromMilliseconds(delay));
395 bool GpuCommandBufferStub::MakeCurrent() {
396 if (decoder_->MakeCurrent())
397 return true;
398 DLOG(ERROR) << "Context lost because MakeCurrent failed.";
399 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
400 command_buffer_->SetParseError(gpu::error::kLostContext);
401 CheckContextLost();
402 return false;
405 void GpuCommandBufferStub::Destroy() {
406 if (wait_for_token_) {
407 Send(wait_for_token_->reply.release());
408 wait_for_token_.reset();
410 if (wait_for_get_offset_) {
411 Send(wait_for_get_offset_->reply.release());
412 wait_for_get_offset_.reset();
414 if (handle_.is_null() && !active_url_.is_empty()) {
415 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
416 gpu_channel_manager->Send(new GpuHostMsg_DidDestroyOffscreenContext(
417 active_url_));
420 memory_manager_client_state_.reset();
422 while (!sync_points_.empty())
423 OnRetireSyncPoint(sync_points_.front());
425 if (decoder_)
426 decoder_->set_engine(NULL);
428 // The scheduler has raw references to the decoder and the command buffer so
429 // destroy it before those.
430 scheduler_.reset();
432 bool have_context = false;
433 if (decoder_ && decoder_->GetGLContext()) {
434 // Try to make the context current regardless of whether it was lost, so we
435 // don't leak resources.
436 have_context = decoder_->GetGLContext()->MakeCurrent(surface_.get());
438 FOR_EACH_OBSERVER(DestructionObserver,
439 destruction_observers_,
440 OnWillDestroyStub());
442 if (decoder_) {
443 decoder_->Destroy(have_context);
444 decoder_.reset();
447 command_buffer_.reset();
449 // Remove this after crbug.com/248395 is sorted out.
450 surface_ = NULL;
453 void GpuCommandBufferStub::OnInitializeFailed(IPC::Message* reply_message) {
454 Destroy();
455 GpuCommandBufferMsg_Initialize::WriteReplyParams(
456 reply_message, false, gpu::Capabilities());
457 Send(reply_message);
460 void GpuCommandBufferStub::OnInitialize(
461 base::SharedMemoryHandle shared_state_handle,
462 IPC::Message* reply_message) {
463 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnInitialize");
464 DCHECK(!command_buffer_.get());
466 scoped_ptr<base::SharedMemory> shared_state_shm(
467 new base::SharedMemory(shared_state_handle, false));
469 command_buffer_.reset(new gpu::CommandBufferService(
470 context_group_->transfer_buffer_manager()));
472 bool result = command_buffer_->Initialize();
473 DCHECK(result);
475 decoder_.reset(::gpu::gles2::GLES2Decoder::Create(context_group_.get()));
476 scheduler_.reset(new gpu::GpuScheduler(command_buffer_.get(),
477 decoder_.get(),
478 decoder_.get()));
479 if (preemption_flag_.get())
480 scheduler_->SetPreemptByFlag(preemption_flag_);
482 decoder_->set_engine(scheduler_.get());
484 if (!handle_.is_null()) {
485 #if defined(OS_MACOSX) || defined(UI_COMPOSITOR_IMAGE_TRANSPORT)
486 if (software_) {
487 LOG(ERROR) << "No software support.";
488 OnInitializeFailed(reply_message);
489 return;
491 #endif
493 surface_ = ImageTransportSurface::CreateSurface(
494 channel_->gpu_channel_manager(),
495 this,
496 handle_);
497 } else {
498 GpuChannelManager* manager = channel_->gpu_channel_manager();
499 surface_ = manager->GetDefaultOffscreenSurface();
502 if (!surface_.get()) {
503 DLOG(ERROR) << "Failed to create surface.";
504 OnInitializeFailed(reply_message);
505 return;
508 scoped_refptr<gfx::GLContext> context;
509 if (use_virtualized_gl_context_ && channel_->share_group()) {
510 context = channel_->share_group()->GetSharedContext();
511 if (!context.get()) {
512 context = gfx::GLContext::CreateGLContext(
513 channel_->share_group(),
514 channel_->gpu_channel_manager()->GetDefaultOffscreenSurface(),
515 gpu_preference_);
516 if (!context.get()) {
517 DLOG(ERROR) << "Failed to create shared context for virtualization.";
518 OnInitializeFailed(reply_message);
519 return;
521 channel_->share_group()->SetSharedContext(context.get());
523 // This should be a non-virtual GL context.
524 DCHECK(context->GetHandle());
525 context = new gpu::GLContextVirtual(
526 channel_->share_group(), context.get(), decoder_->AsWeakPtr());
527 if (!context->Initialize(surface_.get(), gpu_preference_)) {
528 // TODO(sievers): The real context created above for the default
529 // offscreen surface might not be compatible with this surface.
530 // Need to adjust at least GLX to be able to create the initial context
531 // with a config that is compatible with onscreen and offscreen surfaces.
532 context = NULL;
534 DLOG(ERROR) << "Failed to initialize virtual GL context.";
535 OnInitializeFailed(reply_message);
536 return;
539 if (!context.get()) {
540 context = gfx::GLContext::CreateGLContext(
541 channel_->share_group(), surface_.get(), gpu_preference_);
543 if (!context.get()) {
544 DLOG(ERROR) << "Failed to create context.";
545 OnInitializeFailed(reply_message);
546 return;
549 if (!context->MakeCurrent(surface_.get())) {
550 LOG(ERROR) << "Failed to make context current.";
551 OnInitializeFailed(reply_message);
552 return;
555 if (!context->GetGLStateRestorer()) {
556 context->SetGLStateRestorer(
557 new gpu::GLStateRestorerImpl(decoder_->AsWeakPtr()));
560 if (!context->GetTotalGpuMemory(&total_gpu_memory_))
561 total_gpu_memory_ = 0;
563 if (!context_group_->has_program_cache() &&
564 !context_group_->feature_info()->workarounds().disable_program_cache) {
565 context_group_->set_program_cache(
566 channel_->gpu_channel_manager()->program_cache());
569 // Initialize the decoder with either the view or pbuffer GLContext.
570 if (!decoder_->Initialize(surface_,
571 context,
572 !surface_id(),
573 initial_size_,
574 disallowed_features_,
575 requested_attribs_)) {
576 DLOG(ERROR) << "Failed to initialize decoder.";
577 OnInitializeFailed(reply_message);
578 return;
581 if (base::CommandLine::ForCurrentProcess()->HasSwitch(
582 switches::kEnableGPUServiceLogging)) {
583 decoder_->set_log_commands(true);
586 decoder_->GetLogger()->SetMsgCallback(
587 base::Bind(&GpuCommandBufferStub::SendConsoleMessage,
588 base::Unretained(this)));
589 decoder_->SetShaderCacheCallback(
590 base::Bind(&GpuCommandBufferStub::SendCachedShader,
591 base::Unretained(this)));
592 decoder_->SetWaitSyncPointCallback(
593 base::Bind(&GpuCommandBufferStub::OnWaitSyncPoint,
594 base::Unretained(this)));
596 command_buffer_->SetPutOffsetChangeCallback(
597 base::Bind(&GpuCommandBufferStub::PutChanged, base::Unretained(this)));
598 command_buffer_->SetGetBufferChangeCallback(
599 base::Bind(&gpu::GpuScheduler::SetGetBuffer,
600 base::Unretained(scheduler_.get())));
601 command_buffer_->SetParseErrorCallback(
602 base::Bind(&GpuCommandBufferStub::OnParseError, base::Unretained(this)));
603 scheduler_->SetSchedulingChangedCallback(
604 base::Bind(&GpuChannel::StubSchedulingChanged,
605 base::Unretained(channel_)));
607 if (watchdog_) {
608 scheduler_->SetCommandProcessedCallback(
609 base::Bind(&GpuCommandBufferStub::OnCommandProcessed,
610 base::Unretained(this)));
613 const size_t kSharedStateSize = sizeof(gpu::CommandBufferSharedState);
614 if (!shared_state_shm->Map(kSharedStateSize)) {
615 DLOG(ERROR) << "Failed to map shared state buffer.";
616 OnInitializeFailed(reply_message);
617 return;
619 command_buffer_->SetSharedStateBuffer(gpu::MakeBackingFromSharedMemory(
620 shared_state_shm.Pass(), kSharedStateSize));
622 gpu::Capabilities capabilities = decoder_->GetCapabilities();
623 capabilities.future_sync_points = channel_->allow_future_sync_points();
625 GpuCommandBufferMsg_Initialize::WriteReplyParams(
626 reply_message, true, capabilities);
627 Send(reply_message);
629 if (handle_.is_null() && !active_url_.is_empty()) {
630 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
631 gpu_channel_manager->Send(new GpuHostMsg_DidCreateOffscreenContext(
632 active_url_));
636 void GpuCommandBufferStub::OnCreateStreamTexture(
637 uint32 texture_id, int32 stream_id, bool* succeeded) {
638 #if defined(OS_ANDROID)
639 *succeeded = StreamTexture::Create(this, texture_id, stream_id);
640 #else
641 *succeeded = false;
642 #endif
645 void GpuCommandBufferStub::SetLatencyInfoCallback(
646 const LatencyInfoCallback& callback) {
647 latency_info_callback_ = callback;
650 int32 GpuCommandBufferStub::GetRequestedAttribute(int attr) const {
651 // The command buffer is pairs of enum, value
652 // search for the requested attribute, return the value.
653 for (std::vector<int32>::const_iterator it = requested_attribs_.begin();
654 it != requested_attribs_.end(); ++it) {
655 if (*it++ == attr) {
656 return *it;
659 return -1;
662 void GpuCommandBufferStub::OnSetGetBuffer(int32 shm_id,
663 IPC::Message* reply_message) {
664 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetGetBuffer");
665 if (command_buffer_)
666 command_buffer_->SetGetBuffer(shm_id);
667 Send(reply_message);
670 void GpuCommandBufferStub::OnProduceFrontBuffer(const gpu::Mailbox& mailbox) {
671 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnProduceFrontBuffer");
672 if (!decoder_) {
673 LOG(ERROR) << "Can't produce front buffer before initialization.";
674 return;
677 decoder_->ProduceFrontBuffer(mailbox);
680 void GpuCommandBufferStub::OnParseError() {
681 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnParseError");
682 DCHECK(command_buffer_.get());
683 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
684 IPC::Message* msg = new GpuCommandBufferMsg_Destroyed(
685 route_id_, state.context_lost_reason, state.error);
686 msg->set_unblock(true);
687 Send(msg);
689 // Tell the browser about this context loss as well, so it can
690 // determine whether client APIs like WebGL need to be immediately
691 // blocked from automatically running.
692 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
693 gpu_channel_manager->Send(new GpuHostMsg_DidLoseContext(
694 handle_.is_null(), state.context_lost_reason, active_url_));
696 CheckContextLost();
699 void GpuCommandBufferStub::OnWaitForTokenInRange(int32 start,
700 int32 end,
701 IPC::Message* reply_message) {
702 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForTokenInRange");
703 DCHECK(command_buffer_.get());
704 CheckContextLost();
705 if (wait_for_token_)
706 LOG(ERROR) << "Got WaitForToken command while currently waiting for token.";
707 wait_for_token_ =
708 make_scoped_ptr(new WaitForCommandState(start, end, reply_message));
709 CheckCompleteWaits();
712 void GpuCommandBufferStub::OnWaitForGetOffsetInRange(
713 int32 start,
714 int32 end,
715 IPC::Message* reply_message) {
716 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForGetOffsetInRange");
717 DCHECK(command_buffer_.get());
718 CheckContextLost();
719 if (wait_for_get_offset_) {
720 LOG(ERROR)
721 << "Got WaitForGetOffset command while currently waiting for offset.";
723 wait_for_get_offset_ =
724 make_scoped_ptr(new WaitForCommandState(start, end, reply_message));
725 CheckCompleteWaits();
728 void GpuCommandBufferStub::CheckCompleteWaits() {
729 if (wait_for_token_ || wait_for_get_offset_) {
730 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
731 if (wait_for_token_ &&
732 (gpu::CommandBuffer::InRange(
733 wait_for_token_->start, wait_for_token_->end, state.token) ||
734 state.error != gpu::error::kNoError)) {
735 ReportState();
736 GpuCommandBufferMsg_WaitForTokenInRange::WriteReplyParams(
737 wait_for_token_->reply.get(), state);
738 Send(wait_for_token_->reply.release());
739 wait_for_token_.reset();
741 if (wait_for_get_offset_ &&
742 (gpu::CommandBuffer::InRange(wait_for_get_offset_->start,
743 wait_for_get_offset_->end,
744 state.get_offset) ||
745 state.error != gpu::error::kNoError)) {
746 ReportState();
747 GpuCommandBufferMsg_WaitForGetOffsetInRange::WriteReplyParams(
748 wait_for_get_offset_->reply.get(), state);
749 Send(wait_for_get_offset_->reply.release());
750 wait_for_get_offset_.reset();
755 void GpuCommandBufferStub::OnAsyncFlush(
756 int32 put_offset,
757 uint32 flush_count,
758 const std::vector<ui::LatencyInfo>& latency_info) {
759 TRACE_EVENT1(
760 "gpu", "GpuCommandBufferStub::OnAsyncFlush", "put_offset", put_offset);
762 if (ui::LatencyInfo::Verify(latency_info,
763 "GpuCommandBufferStub::OnAsyncFlush") &&
764 !latency_info_callback_.is_null()) {
765 latency_info_callback_.Run(latency_info);
767 DCHECK(command_buffer_.get());
768 if (flush_count - last_flush_count_ < 0x8000000U) {
769 last_flush_count_ = flush_count;
770 command_buffer_->Flush(put_offset);
771 } else {
772 // We received this message out-of-order. This should not happen but is here
773 // to catch regressions. Ignore the message.
774 NOTREACHED() << "Received a Flush message out-of-order";
777 ReportState();
780 void GpuCommandBufferStub::OnRescheduled() {
781 gpu::CommandBuffer::State pre_state = command_buffer_->GetLastState();
782 command_buffer_->Flush(command_buffer_->GetPutOffset());
783 gpu::CommandBuffer::State post_state = command_buffer_->GetLastState();
785 if (pre_state.get_offset != post_state.get_offset)
786 ReportState();
789 void GpuCommandBufferStub::OnRegisterTransferBuffer(
790 int32 id,
791 base::SharedMemoryHandle transfer_buffer,
792 uint32 size) {
793 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterTransferBuffer");
795 // Take ownership of the memory and map it into this process.
796 // This validates the size.
797 scoped_ptr<base::SharedMemory> shared_memory(
798 new base::SharedMemory(transfer_buffer, false));
799 if (!shared_memory->Map(size)) {
800 DVLOG(0) << "Failed to map shared memory.";
801 return;
804 if (command_buffer_) {
805 command_buffer_->RegisterTransferBuffer(
806 id, gpu::MakeBackingFromSharedMemory(shared_memory.Pass(), size));
810 void GpuCommandBufferStub::OnDestroyTransferBuffer(int32 id) {
811 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyTransferBuffer");
813 if (command_buffer_)
814 command_buffer_->DestroyTransferBuffer(id);
817 void GpuCommandBufferStub::OnCommandProcessed() {
818 if (watchdog_)
819 watchdog_->CheckArmed();
822 void GpuCommandBufferStub::ReportState() { command_buffer_->UpdateState(); }
824 void GpuCommandBufferStub::PutChanged() {
825 FastSetActiveURL(active_url_, active_url_hash_);
826 scheduler_->PutChanged();
829 void GpuCommandBufferStub::OnCreateVideoDecoder(
830 media::VideoCodecProfile profile,
831 int32 decoder_route_id,
832 IPC::Message* reply_message) {
833 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoDecoder");
834 GpuVideoDecodeAccelerator* decoder = new GpuVideoDecodeAccelerator(
835 decoder_route_id, this, channel_->io_task_runner());
836 decoder->Initialize(profile, reply_message);
837 // decoder is registered as a DestructionObserver of this stub and will
838 // self-delete during destruction of this stub.
841 void GpuCommandBufferStub::OnCreateVideoEncoder(
842 media::VideoPixelFormat input_format,
843 const gfx::Size& input_visible_size,
844 media::VideoCodecProfile output_profile,
845 uint32 initial_bitrate,
846 int32 encoder_route_id,
847 IPC::Message* reply_message) {
848 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoEncoder");
849 GpuVideoEncodeAccelerator* encoder =
850 new GpuVideoEncodeAccelerator(encoder_route_id, this);
851 encoder->Initialize(input_format,
852 input_visible_size,
853 output_profile,
854 initial_bitrate,
855 reply_message);
856 // encoder is registered as a DestructionObserver of this stub and will
857 // self-delete during destruction of this stub.
860 void GpuCommandBufferStub::OnSetSurfaceVisible(bool visible) {
861 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetSurfaceVisible");
862 if (memory_manager_client_state_)
863 memory_manager_client_state_->SetVisible(visible);
866 void GpuCommandBufferStub::AddSyncPoint(uint32 sync_point) {
867 sync_points_.push_back(sync_point);
870 void GpuCommandBufferStub::OnRetireSyncPoint(uint32 sync_point) {
871 DCHECK(!sync_points_.empty() && sync_points_.front() == sync_point);
872 sync_points_.pop_front();
874 gpu::gles2::MailboxManager* mailbox_manager =
875 context_group_->mailbox_manager();
876 if (mailbox_manager->UsesSync() && MakeCurrent())
877 mailbox_manager->PushTextureUpdates(sync_point);
879 GpuChannelManager* manager = channel_->gpu_channel_manager();
880 manager->sync_point_manager()->RetireSyncPoint(sync_point);
883 bool GpuCommandBufferStub::OnWaitSyncPoint(uint32 sync_point) {
884 if (!sync_point)
885 return true;
886 GpuChannelManager* manager = channel_->gpu_channel_manager();
887 if (manager->sync_point_manager()->IsSyncPointRetired(sync_point)) {
888 PullTextureUpdates(sync_point);
889 return true;
892 if (sync_point_wait_count_ == 0) {
893 TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitSyncPoint", this,
894 "GpuCommandBufferStub", this);
896 scheduler_->SetScheduled(false);
897 ++sync_point_wait_count_;
898 manager->sync_point_manager()->AddSyncPointCallback(
899 sync_point, base::Bind(&GpuCommandBufferStub::OnWaitSyncPointCompleted,
900 this->AsWeakPtr(), sync_point));
901 return scheduler_->IsScheduled();
904 void GpuCommandBufferStub::OnWaitSyncPointCompleted(uint32 sync_point) {
905 PullTextureUpdates(sync_point);
906 --sync_point_wait_count_;
907 if (sync_point_wait_count_ == 0) {
908 TRACE_EVENT_ASYNC_END1("gpu", "WaitSyncPoint", this,
909 "GpuCommandBufferStub", this);
911 scheduler_->SetScheduled(true);
914 void GpuCommandBufferStub::PullTextureUpdates(uint32 sync_point) {
915 gpu::gles2::MailboxManager* mailbox_manager =
916 context_group_->mailbox_manager();
917 if (mailbox_manager->UsesSync() && MakeCurrent())
918 mailbox_manager->PullTextureUpdates(sync_point);
921 void GpuCommandBufferStub::OnSignalSyncPoint(uint32 sync_point, uint32 id) {
922 GpuChannelManager* manager = channel_->gpu_channel_manager();
923 manager->sync_point_manager()->AddSyncPointCallback(
924 sync_point,
925 base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck,
926 this->AsWeakPtr(),
927 id));
930 void GpuCommandBufferStub::OnSignalSyncPointAck(uint32 id) {
931 Send(new GpuCommandBufferMsg_SignalSyncPointAck(route_id_, id));
934 void GpuCommandBufferStub::OnSignalQuery(uint32 query_id, uint32 id) {
935 if (decoder_) {
936 gpu::gles2::QueryManager* query_manager = decoder_->GetQueryManager();
937 if (query_manager) {
938 gpu::gles2::QueryManager::Query* query =
939 query_manager->GetQuery(query_id);
940 if (query) {
941 query->AddCallback(
942 base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck,
943 this->AsWeakPtr(),
944 id));
945 return;
949 // Something went wrong, run callback immediately.
950 OnSignalSyncPointAck(id);
954 void GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback(
955 bool has_callback) {
956 TRACE_EVENT0(
957 "gpu",
958 "GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback");
959 if (has_callback) {
960 if (!memory_manager_client_state_) {
961 memory_manager_client_state_.reset(GetMemoryManager()->CreateClientState(
962 this, surface_id_ != 0, true));
964 } else {
965 memory_manager_client_state_.reset();
969 void GpuCommandBufferStub::OnCreateImage(int32 id,
970 gfx::GpuMemoryBufferHandle handle,
971 gfx::Size size,
972 gfx::GpuMemoryBuffer::Format format,
973 uint32 internalformat) {
974 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateImage");
976 if (!decoder_)
977 return;
979 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
980 DCHECK(image_manager);
981 if (image_manager->LookupImage(id)) {
982 LOG(ERROR) << "Image already exists with same ID.";
983 return;
986 if (!gpu::ImageFactory::IsGpuMemoryBufferFormatSupported(
987 format, decoder_->GetCapabilities())) {
988 LOG(ERROR) << "Format is not supported.";
989 return;
992 if (!gpu::ImageFactory::IsImageSizeValidForGpuMemoryBufferFormat(size,
993 format)) {
994 LOG(ERROR) << "Invalid image size for format.";
995 return;
998 if (!gpu::ImageFactory::IsImageFormatCompatibleWithGpuMemoryBufferFormat(
999 internalformat, format)) {
1000 LOG(ERROR) << "Incompatible image format.";
1001 return;
1004 scoped_refptr<gfx::GLImage> image = channel()->CreateImageForGpuMemoryBuffer(
1005 handle, size, format, internalformat);
1006 if (!image.get())
1007 return;
1009 image_manager->AddImage(image.get(), id);
1012 void GpuCommandBufferStub::OnDestroyImage(int32 id) {
1013 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyImage");
1015 if (!decoder_)
1016 return;
1018 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
1019 DCHECK(image_manager);
1020 if (!image_manager->LookupImage(id)) {
1021 LOG(ERROR) << "Image with ID doesn't exist.";
1022 return;
1025 image_manager->RemoveImage(id);
1028 void GpuCommandBufferStub::SendConsoleMessage(
1029 int32 id,
1030 const std::string& message) {
1031 GPUCommandBufferConsoleMessage console_message;
1032 console_message.id = id;
1033 console_message.message = message;
1034 IPC::Message* msg = new GpuCommandBufferMsg_ConsoleMsg(
1035 route_id_, console_message);
1036 msg->set_unblock(true);
1037 Send(msg);
1040 void GpuCommandBufferStub::SendCachedShader(
1041 const std::string& key, const std::string& shader) {
1042 channel_->CacheShader(key, shader);
1045 void GpuCommandBufferStub::AddDestructionObserver(
1046 DestructionObserver* observer) {
1047 destruction_observers_.AddObserver(observer);
1050 void GpuCommandBufferStub::RemoveDestructionObserver(
1051 DestructionObserver* observer) {
1052 destruction_observers_.RemoveObserver(observer);
1055 void GpuCommandBufferStub::SetPreemptByFlag(
1056 scoped_refptr<gpu::PreemptionFlag> flag) {
1057 preemption_flag_ = flag;
1058 if (scheduler_)
1059 scheduler_->SetPreemptByFlag(preemption_flag_);
1062 bool GpuCommandBufferStub::GetTotalGpuMemory(uint64* bytes) {
1063 *bytes = total_gpu_memory_;
1064 return !!total_gpu_memory_;
1067 gfx::Size GpuCommandBufferStub::GetSurfaceSize() const {
1068 if (!surface_.get())
1069 return gfx::Size();
1070 return surface_->GetSize();
1073 const gpu::gles2::FeatureInfo* GpuCommandBufferStub::GetFeatureInfo() const {
1074 return context_group_->feature_info();
1077 gpu::gles2::MemoryTracker* GpuCommandBufferStub::GetMemoryTracker() const {
1078 return context_group_->memory_tracker();
1081 void GpuCommandBufferStub::SetMemoryAllocation(
1082 const gpu::MemoryAllocation& allocation) {
1083 if (!last_memory_allocation_valid_ ||
1084 !allocation.Equals(last_memory_allocation_)) {
1085 Send(new GpuCommandBufferMsg_SetMemoryAllocation(
1086 route_id_, allocation));
1089 last_memory_allocation_valid_ = true;
1090 last_memory_allocation_ = allocation;
1093 void GpuCommandBufferStub::SuggestHaveFrontBuffer(
1094 bool suggest_have_frontbuffer) {
1095 // This can be called outside of OnMessageReceived, so the context needs
1096 // to be made current before calling methods on the surface.
1097 if (surface_.get() && MakeCurrent())
1098 surface_->SetFrontbufferAllocation(suggest_have_frontbuffer);
1101 bool GpuCommandBufferStub::CheckContextLost() {
1102 DCHECK(command_buffer_);
1103 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
1104 bool was_lost = state.error == gpu::error::kLostContext;
1106 // Work around issues with recovery by allowing a new GPU process to launch.
1107 if (was_lost &&
1108 context_group_->feature_info()->workarounds().exit_on_context_lost &&
1109 !base::CommandLine::ForCurrentProcess()->HasSwitch(
1110 switches::kSingleProcess) &&
1111 !base::CommandLine::ForCurrentProcess()->HasSwitch(
1112 switches::kInProcessGPU)) {
1113 LOG(ERROR) << "Exiting GPU process because some drivers cannot recover"
1114 << " from problems.";
1115 #if defined(OS_WIN)
1116 base::win::SetShouldCrashOnProcessDetach(false);
1117 #endif
1118 exit(0);
1120 // Lose all other contexts if the reset was triggered by the robustness
1121 // extension instead of being synthetic.
1122 if (was_lost && decoder_ && decoder_->WasContextLostByRobustnessExtension() &&
1123 (gfx::GLContext::LosesAllContextsOnContextLost() ||
1124 use_virtualized_gl_context_))
1125 channel_->LoseAllContexts();
1126 CheckCompleteWaits();
1127 return was_lost;
1130 void GpuCommandBufferStub::MarkContextLost() {
1131 if (!command_buffer_ ||
1132 command_buffer_->GetLastState().error == gpu::error::kLostContext)
1133 return;
1135 command_buffer_->SetContextLostReason(gpu::error::kUnknown);
1136 if (decoder_)
1137 decoder_->MarkContextLost(gpu::error::kUnknown);
1138 command_buffer_->SetParseError(gpu::error::kLostContext);
1141 uint64 GpuCommandBufferStub::GetMemoryUsage() const {
1142 return GetMemoryManager()->GetClientMemoryUsage(this);
1145 void GpuCommandBufferStub::SendSwapBuffersCompleted(
1146 const std::vector<ui::LatencyInfo>& latency_info,
1147 gfx::SwapResult result) {
1148 Send(new GpuCommandBufferMsg_SwapBuffersCompleted(route_id_, latency_info,
1149 result));
1152 void GpuCommandBufferStub::SendUpdateVSyncParameters(base::TimeTicks timebase,
1153 base::TimeDelta interval) {
1154 Send(new GpuCommandBufferMsg_UpdateVSyncParameters(route_id_, timebase,
1155 interval));
1158 } // namespace content