1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
6 #include "base/bind_helpers.h"
7 #include "base/command_line.h"
9 #include "base/json/json_writer.h"
10 #include "base/memory/shared_memory.h"
11 #include "base/time/time.h"
12 #include "base/trace_event/trace_event.h"
13 #include "build/build_config.h"
14 #include "content/common/gpu/gpu_channel.h"
15 #include "content/common/gpu/gpu_channel_manager.h"
16 #include "content/common/gpu/gpu_command_buffer_stub.h"
17 #include "content/common/gpu/gpu_memory_manager.h"
18 #include "content/common/gpu/gpu_memory_tracking.h"
19 #include "content/common/gpu/gpu_messages.h"
20 #include "content/common/gpu/gpu_watchdog.h"
21 #include "content/common/gpu/image_transport_surface.h"
22 #include "content/common/gpu/media/gpu_video_decode_accelerator.h"
23 #include "content/common/gpu/media/gpu_video_encode_accelerator.h"
24 #include "content/public/common/content_client.h"
25 #include "gpu/command_buffer/common/constants.h"
26 #include "gpu/command_buffer/common/gles2_cmd_utils.h"
27 #include "gpu/command_buffer/common/mailbox.h"
28 #include "gpu/command_buffer/service/gl_context_virtual.h"
29 #include "gpu/command_buffer/service/gl_state_restorer_impl.h"
30 #include "gpu/command_buffer/service/image_manager.h"
31 #include "gpu/command_buffer/service/logger.h"
32 #include "gpu/command_buffer/service/mailbox_manager.h"
33 #include "gpu/command_buffer/service/memory_tracking.h"
34 #include "gpu/command_buffer/service/query_manager.h"
35 #include "gpu/command_buffer/service/sync_point_manager.h"
36 #include "gpu/command_buffer/service/valuebuffer_manager.h"
37 #include "ui/gl/gl_bindings.h"
38 #include "ui/gl/gl_switches.h"
41 #include "content/public/common/sandbox_init.h"
44 #if defined(OS_ANDROID)
45 #include "content/common/gpu/stream_texture_android.h"
49 struct WaitForCommandState
{
50 WaitForCommandState(int32 start
, int32 end
, IPC::Message
* reply
)
51 : start(start
), end(end
), reply(reply
) {}
55 scoped_ptr
<IPC::Message
> reply
;
60 // The GpuCommandBufferMemoryTracker class provides a bridge between the
61 // ContextGroup's memory type managers and the GpuMemoryManager class.
62 class GpuCommandBufferMemoryTracker
: public gpu::gles2::MemoryTracker
{
64 explicit GpuCommandBufferMemoryTracker(GpuChannel
* channel
) :
65 tracking_group_(channel
->gpu_channel_manager()->gpu_memory_manager()->
66 CreateTrackingGroup(channel
->renderer_pid(), this)) {
69 void TrackMemoryAllocatedChange(
72 gpu::gles2::MemoryTracker::Pool pool
) override
{
73 tracking_group_
->TrackMemoryAllocatedChange(
74 old_size
, new_size
, pool
);
77 bool EnsureGPUMemoryAvailable(size_t size_needed
) override
{
78 return tracking_group_
->EnsureGPUMemoryAvailable(size_needed
);
82 ~GpuCommandBufferMemoryTracker() override
{}
83 scoped_ptr
<GpuMemoryTrackingGroup
> tracking_group_
;
85 DISALLOW_COPY_AND_ASSIGN(GpuCommandBufferMemoryTracker
);
88 // FastSetActiveURL will shortcut the expensive call to SetActiveURL when the
90 void FastSetActiveURL(const GURL
& url
, size_t url_hash
) {
91 // Leave the previously set URL in the empty case -- empty URLs are given by
92 // BlinkPlatformImpl::createOffscreenGraphicsContext3D. Hopefully the
93 // onscreen context URL was set previously and will show up even when a crash
94 // occurs during offscreen command processing.
97 static size_t g_last_url_hash
= 0;
98 if (url_hash
!= g_last_url_hash
) {
99 g_last_url_hash
= url_hash
;
100 GetContentClient()->SetActiveURL(url
);
104 // The first time polling a fence, delay some extra time to allow other
105 // stubs to process some work, or else the timing of the fences could
106 // allow a pattern of alternating fast and slow frames to occur.
107 const int64 kHandleMoreWorkPeriodMs
= 2;
108 const int64 kHandleMoreWorkPeriodBusyMs
= 1;
110 // Prevents idle work from being starved.
111 const int64 kMaxTimeSinceIdleMs
= 10;
113 class DevToolsChannelData
: public base::trace_event::ConvertableToTraceFormat
{
115 static scoped_refptr
<base::trace_event::ConvertableToTraceFormat
>
116 CreateForChannel(GpuChannel
* channel
);
118 void AppendAsTraceFormat(std::string
* out
) const override
{
120 base::JSONWriter::Write(value_
.get(), &tmp
);
125 explicit DevToolsChannelData(base::Value
* value
) : value_(value
) {}
126 ~DevToolsChannelData() override
{}
127 scoped_ptr
<base::Value
> value_
;
128 DISALLOW_COPY_AND_ASSIGN(DevToolsChannelData
);
131 scoped_refptr
<base::trace_event::ConvertableToTraceFormat
>
132 DevToolsChannelData::CreateForChannel(GpuChannel
* channel
) {
133 scoped_ptr
<base::DictionaryValue
> res(new base::DictionaryValue
);
134 res
->SetInteger("renderer_pid", channel
->renderer_pid());
135 res
->SetDouble("used_bytes", channel
->GetMemoryUsage());
136 res
->SetDouble("limit_bytes",
137 channel
->gpu_channel_manager()
138 ->gpu_memory_manager()
139 ->GetMaximumClientAllocation());
140 return new DevToolsChannelData(res
.release());
143 bool IsSupportedImageFormat(const gpu::Capabilities
& capabilities
,
144 gfx::GpuMemoryBuffer::Format format
) {
146 case gfx::GpuMemoryBuffer::ATC
:
147 case gfx::GpuMemoryBuffer::ATCIA
:
148 return capabilities
.texture_format_atc
;
149 case gfx::GpuMemoryBuffer::BGRA_8888
:
150 return capabilities
.texture_format_bgra8888
;
151 case gfx::GpuMemoryBuffer::DXT1
:
152 return capabilities
.texture_format_dxt1
;
153 case gfx::GpuMemoryBuffer::DXT5
:
154 return capabilities
.texture_format_dxt5
;
155 case gfx::GpuMemoryBuffer::ETC1
:
156 return capabilities
.texture_format_etc1
;
157 case gfx::GpuMemoryBuffer::RGBA_8888
:
158 case gfx::GpuMemoryBuffer::RGBX_8888
:
168 GpuCommandBufferStub::GpuCommandBufferStub(
170 GpuCommandBufferStub
* share_group
,
171 const gfx::GLSurfaceHandle
& handle
,
172 gpu::gles2::MailboxManager
* mailbox_manager
,
173 gpu::gles2::SubscriptionRefSet
* subscription_ref_set
,
174 gpu::ValueStateMap
* pending_valuebuffer_state
,
175 const gfx::Size
& size
,
176 const gpu::gles2::DisallowedFeatures
& disallowed_features
,
177 const std::vector
<int32
>& attribs
,
178 gfx::GpuPreference gpu_preference
,
179 bool use_virtualized_gl_context
,
182 GpuWatchdog
* watchdog
,
184 const GURL
& active_url
)
188 disallowed_features_(disallowed_features
),
189 requested_attribs_(attribs
),
190 gpu_preference_(gpu_preference
),
191 use_virtualized_gl_context_(use_virtualized_gl_context
),
193 surface_id_(surface_id
),
195 last_flush_count_(0),
196 last_memory_allocation_valid_(false),
198 sync_point_wait_count_(0),
199 delayed_work_scheduled_(false),
200 previous_messages_processed_(0),
201 active_url_(active_url
),
202 total_gpu_memory_(0) {
203 active_url_hash_
= base::Hash(active_url
.possibly_invalid_spec());
204 FastSetActiveURL(active_url_
, active_url_hash_
);
206 gpu::gles2::ContextCreationAttribHelper attrib_parser
;
207 attrib_parser
.Parse(requested_attribs_
);
210 context_group_
= share_group
->context_group_
;
211 DCHECK(context_group_
->bind_generates_resource() ==
212 attrib_parser
.bind_generates_resource
);
214 context_group_
= new gpu::gles2::ContextGroup(
216 new GpuCommandBufferMemoryTracker(channel
),
217 channel_
->gpu_channel_manager()->shader_translator_cache(),
219 subscription_ref_set
,
220 pending_valuebuffer_state
,
221 attrib_parser
.bind_generates_resource
);
224 use_virtualized_gl_context_
|=
225 context_group_
->feature_info()->workarounds().use_virtualized_gl_contexts
;
228 GpuCommandBufferStub::~GpuCommandBufferStub() {
231 GpuChannelManager
* gpu_channel_manager
= channel_
->gpu_channel_manager();
232 gpu_channel_manager
->Send(new GpuHostMsg_DestroyCommandBuffer(surface_id()));
235 GpuMemoryManager
* GpuCommandBufferStub::GetMemoryManager() const {
236 return channel()->gpu_channel_manager()->gpu_memory_manager();
239 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message
& message
) {
240 TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"),
243 DevToolsChannelData::CreateForChannel(channel()));
244 FastSetActiveURL(active_url_
, active_url_hash_
);
246 bool have_context
= false;
247 // Ensure the appropriate GL context is current before handling any IPC
248 // messages directed at the command buffer. This ensures that the message
249 // handler can assume that the context is current (not necessary for
250 // RetireSyncPoint or WaitSyncPoint).
251 if (decoder_
.get() &&
252 message
.type() != GpuCommandBufferMsg_SetGetBuffer::ID
&&
253 message
.type() != GpuCommandBufferMsg_WaitForTokenInRange::ID
&&
254 message
.type() != GpuCommandBufferMsg_WaitForGetOffsetInRange::ID
&&
255 message
.type() != GpuCommandBufferMsg_RegisterTransferBuffer::ID
&&
256 message
.type() != GpuCommandBufferMsg_DestroyTransferBuffer::ID
&&
257 message
.type() != GpuCommandBufferMsg_RetireSyncPoint::ID
&&
258 message
.type() != GpuCommandBufferMsg_SignalSyncPoint::ID
&&
260 GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback::ID
) {
266 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers
267 // here. This is so the reply can be delayed if the scheduler is unscheduled.
269 IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub
, message
)
270 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize
,
272 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetGetBuffer
,
274 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ProduceFrontBuffer
,
275 OnProduceFrontBuffer
);
276 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForTokenInRange
,
277 OnWaitForTokenInRange
);
278 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForGetOffsetInRange
,
279 OnWaitForGetOffsetInRange
);
280 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush
, OnAsyncFlush
);
281 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Rescheduled
, OnRescheduled
);
282 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer
,
283 OnRegisterTransferBuffer
);
284 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer
,
285 OnDestroyTransferBuffer
);
286 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoDecoder
,
287 OnCreateVideoDecoder
)
288 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoEncoder
,
289 OnCreateVideoEncoder
)
290 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetSurfaceVisible
,
292 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RetireSyncPoint
,
294 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPoint
,
296 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalQuery
,
299 GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback
,
300 OnSetClientHasMemoryAllocationChangedCallback
)
301 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateImage
, OnCreateImage
);
302 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyImage
, OnDestroyImage
);
303 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateStreamTexture
,
304 OnCreateStreamTexture
)
305 IPC_MESSAGE_UNHANDLED(handled
= false)
306 IPC_END_MESSAGE_MAP()
308 CheckCompleteWaits();
311 // Ensure that any delayed work that was created will be handled.
312 ScheduleDelayedWork(kHandleMoreWorkPeriodMs
);
319 bool GpuCommandBufferStub::Send(IPC::Message
* message
) {
320 return channel_
->Send(message
);
323 bool GpuCommandBufferStub::IsScheduled() {
324 return (!scheduler_
.get() || scheduler_
->IsScheduled());
327 bool GpuCommandBufferStub::HasMoreWork() {
328 return scheduler_
.get() && scheduler_
->HasMoreWork();
331 void GpuCommandBufferStub::PollWork() {
332 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork");
333 delayed_work_scheduled_
= false;
334 FastSetActiveURL(active_url_
, active_url_hash_
);
335 if (decoder_
.get() && !MakeCurrent())
339 uint64 current_messages_processed
=
340 channel()->gpu_channel_manager()->MessagesProcessed();
341 // We're idle when no messages were processed or scheduled.
343 (previous_messages_processed_
== current_messages_processed
) &&
344 !channel()->gpu_channel_manager()->HandleMessagesScheduled();
345 if (!is_idle
&& !last_idle_time_
.is_null()) {
346 base::TimeDelta time_since_idle
=
347 base::TimeTicks::Now() - last_idle_time_
;
348 base::TimeDelta max_time_since_idle
=
349 base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs
);
351 // Force idle when it's been too long since last time we were idle.
352 if (time_since_idle
> max_time_since_idle
)
357 last_idle_time_
= base::TimeTicks::Now();
358 scheduler_
->PerformIdleWork();
361 ScheduleDelayedWork(kHandleMoreWorkPeriodBusyMs
);
364 bool GpuCommandBufferStub::HasUnprocessedCommands() {
365 if (command_buffer_
) {
366 gpu::CommandBuffer::State state
= command_buffer_
->GetLastState();
367 return command_buffer_
->GetPutOffset() != state
.get_offset
&&
368 !gpu::error::IsError(state
.error
);
373 void GpuCommandBufferStub::ScheduleDelayedWork(int64 delay
) {
374 if (!HasMoreWork()) {
375 last_idle_time_
= base::TimeTicks();
379 if (delayed_work_scheduled_
)
381 delayed_work_scheduled_
= true;
383 // Idle when no messages are processed between now and when
384 // PollWork is called.
385 previous_messages_processed_
=
386 channel()->gpu_channel_manager()->MessagesProcessed();
387 if (last_idle_time_
.is_null())
388 last_idle_time_
= base::TimeTicks::Now();
390 // IsScheduled() returns true after passing all unschedule fences
391 // and this is when we can start performing idle work. Idle work
392 // is done synchronously so we can set delay to 0 and instead poll
393 // for more work at the rate idle work is performed. This also ensures
394 // that idle work is done as efficiently as possible without any
395 // unnecessary delays.
396 if (scheduler_
.get() &&
397 scheduler_
->IsScheduled() &&
398 scheduler_
->HasMoreIdleWork()) {
402 base::MessageLoop::current()->PostDelayedTask(
404 base::Bind(&GpuCommandBufferStub::PollWork
, AsWeakPtr()),
405 base::TimeDelta::FromMilliseconds(delay
));
408 bool GpuCommandBufferStub::MakeCurrent() {
409 if (decoder_
->MakeCurrent())
411 DLOG(ERROR
) << "Context lost because MakeCurrent failed.";
412 command_buffer_
->SetContextLostReason(decoder_
->GetContextLostReason());
413 command_buffer_
->SetParseError(gpu::error::kLostContext
);
418 void GpuCommandBufferStub::Destroy() {
419 if (wait_for_token_
) {
420 Send(wait_for_token_
->reply
.release());
421 wait_for_token_
.reset();
423 if (wait_for_get_offset_
) {
424 Send(wait_for_get_offset_
->reply
.release());
425 wait_for_get_offset_
.reset();
427 if (handle_
.is_null() && !active_url_
.is_empty()) {
428 GpuChannelManager
* gpu_channel_manager
= channel_
->gpu_channel_manager();
429 gpu_channel_manager
->Send(new GpuHostMsg_DidDestroyOffscreenContext(
433 memory_manager_client_state_
.reset();
435 while (!sync_points_
.empty())
436 OnRetireSyncPoint(sync_points_
.front());
439 decoder_
->set_engine(NULL
);
441 // The scheduler has raw references to the decoder and the command buffer so
442 // destroy it before those.
445 bool have_context
= false;
446 if (decoder_
&& command_buffer_
&&
447 command_buffer_
->GetLastState().error
!= gpu::error::kLostContext
)
448 have_context
= decoder_
->MakeCurrent();
449 FOR_EACH_OBSERVER(DestructionObserver
,
450 destruction_observers_
,
451 OnWillDestroyStub());
454 decoder_
->Destroy(have_context
);
458 command_buffer_
.reset();
460 // Remove this after crbug.com/248395 is sorted out.
464 void GpuCommandBufferStub::OnInitializeFailed(IPC::Message
* reply_message
) {
466 GpuCommandBufferMsg_Initialize::WriteReplyParams(
467 reply_message
, false, gpu::Capabilities());
471 void GpuCommandBufferStub::OnInitialize(
472 base::SharedMemoryHandle shared_state_handle
,
473 IPC::Message
* reply_message
) {
474 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnInitialize");
475 DCHECK(!command_buffer_
.get());
477 scoped_ptr
<base::SharedMemory
> shared_state_shm(
478 new base::SharedMemory(shared_state_handle
, false));
480 command_buffer_
.reset(new gpu::CommandBufferService(
481 context_group_
->transfer_buffer_manager()));
483 bool result
= command_buffer_
->Initialize();
486 decoder_
.reset(::gpu::gles2::GLES2Decoder::Create(context_group_
.get()));
488 scheduler_
.reset(new gpu::GpuScheduler(command_buffer_
.get(),
491 if (preemption_flag_
.get())
492 scheduler_
->SetPreemptByFlag(preemption_flag_
);
494 decoder_
->set_engine(scheduler_
.get());
496 if (!handle_
.is_null()) {
497 #if defined(OS_MACOSX) || defined(UI_COMPOSITOR_IMAGE_TRANSPORT)
499 LOG(ERROR
) << "No software support.";
500 OnInitializeFailed(reply_message
);
505 surface_
= ImageTransportSurface::CreateSurface(
506 channel_
->gpu_channel_manager(),
510 GpuChannelManager
* manager
= channel_
->gpu_channel_manager();
511 surface_
= manager
->GetDefaultOffscreenSurface();
514 if (!surface_
.get()) {
515 DLOG(ERROR
) << "Failed to create surface.";
516 OnInitializeFailed(reply_message
);
520 scoped_refptr
<gfx::GLContext
> context
;
521 if (use_virtualized_gl_context_
&& channel_
->share_group()) {
522 context
= channel_
->share_group()->GetSharedContext();
523 if (!context
.get()) {
524 context
= gfx::GLContext::CreateGLContext(
525 channel_
->share_group(),
526 channel_
->gpu_channel_manager()->GetDefaultOffscreenSurface(),
528 if (!context
.get()) {
529 DLOG(ERROR
) << "Failed to create shared context for virtualization.";
530 OnInitializeFailed(reply_message
);
533 channel_
->share_group()->SetSharedContext(context
.get());
535 // This should be a non-virtual GL context.
536 DCHECK(context
->GetHandle());
537 context
= new gpu::GLContextVirtual(
538 channel_
->share_group(), context
.get(), decoder_
->AsWeakPtr());
539 if (!context
->Initialize(surface_
.get(), gpu_preference_
)) {
540 // TODO(sievers): The real context created above for the default
541 // offscreen surface might not be compatible with this surface.
542 // Need to adjust at least GLX to be able to create the initial context
543 // with a config that is compatible with onscreen and offscreen surfaces.
546 DLOG(ERROR
) << "Failed to initialize virtual GL context.";
547 OnInitializeFailed(reply_message
);
551 if (!context
.get()) {
552 context
= gfx::GLContext::CreateGLContext(
553 channel_
->share_group(), surface_
.get(), gpu_preference_
);
555 if (!context
.get()) {
556 DLOG(ERROR
) << "Failed to create context.";
557 OnInitializeFailed(reply_message
);
561 if (!context
->MakeCurrent(surface_
.get())) {
562 LOG(ERROR
) << "Failed to make context current.";
563 OnInitializeFailed(reply_message
);
567 if (!context
->GetGLStateRestorer()) {
568 context
->SetGLStateRestorer(
569 new gpu::GLStateRestorerImpl(decoder_
->AsWeakPtr()));
572 if (!context
->GetTotalGpuMemory(&total_gpu_memory_
))
573 total_gpu_memory_
= 0;
575 if (!context_group_
->has_program_cache()) {
576 context_group_
->set_program_cache(
577 channel_
->gpu_channel_manager()->program_cache());
580 // Initialize the decoder with either the view or pbuffer GLContext.
581 if (!decoder_
->Initialize(surface_
,
585 disallowed_features_
,
586 requested_attribs_
)) {
587 DLOG(ERROR
) << "Failed to initialize decoder.";
588 OnInitializeFailed(reply_message
);
592 if (base::CommandLine::ForCurrentProcess()->HasSwitch(
593 switches::kEnableGPUServiceLogging
)) {
594 decoder_
->set_log_commands(true);
597 decoder_
->GetLogger()->SetMsgCallback(
598 base::Bind(&GpuCommandBufferStub::SendConsoleMessage
,
599 base::Unretained(this)));
600 decoder_
->SetShaderCacheCallback(
601 base::Bind(&GpuCommandBufferStub::SendCachedShader
,
602 base::Unretained(this)));
603 decoder_
->SetWaitSyncPointCallback(
604 base::Bind(&GpuCommandBufferStub::OnWaitSyncPoint
,
605 base::Unretained(this)));
607 command_buffer_
->SetPutOffsetChangeCallback(
608 base::Bind(&GpuCommandBufferStub::PutChanged
, base::Unretained(this)));
609 command_buffer_
->SetGetBufferChangeCallback(
610 base::Bind(&gpu::GpuScheduler::SetGetBuffer
,
611 base::Unretained(scheduler_
.get())));
612 command_buffer_
->SetParseErrorCallback(
613 base::Bind(&GpuCommandBufferStub::OnParseError
, base::Unretained(this)));
614 scheduler_
->SetSchedulingChangedCallback(
615 base::Bind(&GpuChannel::StubSchedulingChanged
,
616 base::Unretained(channel_
)));
619 scheduler_
->SetCommandProcessedCallback(
620 base::Bind(&GpuCommandBufferStub::OnCommandProcessed
,
621 base::Unretained(this)));
624 const size_t kSharedStateSize
= sizeof(gpu::CommandBufferSharedState
);
625 if (!shared_state_shm
->Map(kSharedStateSize
)) {
626 DLOG(ERROR
) << "Failed to map shared state buffer.";
627 OnInitializeFailed(reply_message
);
630 command_buffer_
->SetSharedStateBuffer(gpu::MakeBackingFromSharedMemory(
631 shared_state_shm
.Pass(), kSharedStateSize
));
633 gpu::Capabilities capabilities
= decoder_
->GetCapabilities();
634 capabilities
.future_sync_points
= channel_
->allow_future_sync_points();
636 GpuCommandBufferMsg_Initialize::WriteReplyParams(
637 reply_message
, true, capabilities
);
640 if (handle_
.is_null() && !active_url_
.is_empty()) {
641 GpuChannelManager
* gpu_channel_manager
= channel_
->gpu_channel_manager();
642 gpu_channel_manager
->Send(new GpuHostMsg_DidCreateOffscreenContext(
647 void GpuCommandBufferStub::OnCreateStreamTexture(
648 uint32 texture_id
, int32 stream_id
, bool* succeeded
) {
649 #if defined(OS_ANDROID)
650 *succeeded
= StreamTexture::Create(this, texture_id
, stream_id
);
656 void GpuCommandBufferStub::SetLatencyInfoCallback(
657 const LatencyInfoCallback
& callback
) {
658 latency_info_callback_
= callback
;
661 int32
GpuCommandBufferStub::GetRequestedAttribute(int attr
) const {
662 // The command buffer is pairs of enum, value
663 // search for the requested attribute, return the value.
664 for (std::vector
<int32
>::const_iterator it
= requested_attribs_
.begin();
665 it
!= requested_attribs_
.end(); ++it
) {
673 void GpuCommandBufferStub::OnSetGetBuffer(int32 shm_id
,
674 IPC::Message
* reply_message
) {
675 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetGetBuffer");
677 command_buffer_
->SetGetBuffer(shm_id
);
681 void GpuCommandBufferStub::OnProduceFrontBuffer(const gpu::Mailbox
& mailbox
) {
682 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnProduceFrontBuffer");
684 LOG(ERROR
) << "Can't produce front buffer before initialization.";
688 decoder_
->ProduceFrontBuffer(mailbox
);
691 void GpuCommandBufferStub::OnParseError() {
692 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnParseError");
693 DCHECK(command_buffer_
.get());
694 gpu::CommandBuffer::State state
= command_buffer_
->GetLastState();
695 IPC::Message
* msg
= new GpuCommandBufferMsg_Destroyed(
696 route_id_
, state
.context_lost_reason
);
697 msg
->set_unblock(true);
700 // Tell the browser about this context loss as well, so it can
701 // determine whether client APIs like WebGL need to be immediately
702 // blocked from automatically running.
703 GpuChannelManager
* gpu_channel_manager
= channel_
->gpu_channel_manager();
704 gpu_channel_manager
->Send(new GpuHostMsg_DidLoseContext(
705 handle_
.is_null(), state
.context_lost_reason
, active_url_
));
710 void GpuCommandBufferStub::OnWaitForTokenInRange(int32 start
,
712 IPC::Message
* reply_message
) {
713 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForTokenInRange");
714 DCHECK(command_buffer_
.get());
717 LOG(ERROR
) << "Got WaitForToken command while currently waiting for token.";
719 make_scoped_ptr(new WaitForCommandState(start
, end
, reply_message
));
720 CheckCompleteWaits();
723 void GpuCommandBufferStub::OnWaitForGetOffsetInRange(
726 IPC::Message
* reply_message
) {
727 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForGetOffsetInRange");
728 DCHECK(command_buffer_
.get());
730 if (wait_for_get_offset_
) {
732 << "Got WaitForGetOffset command while currently waiting for offset.";
734 wait_for_get_offset_
=
735 make_scoped_ptr(new WaitForCommandState(start
, end
, reply_message
));
736 CheckCompleteWaits();
739 void GpuCommandBufferStub::CheckCompleteWaits() {
740 if (wait_for_token_
|| wait_for_get_offset_
) {
741 gpu::CommandBuffer::State state
= command_buffer_
->GetLastState();
742 if (wait_for_token_
&&
743 (gpu::CommandBuffer::InRange(
744 wait_for_token_
->start
, wait_for_token_
->end
, state
.token
) ||
745 state
.error
!= gpu::error::kNoError
)) {
747 GpuCommandBufferMsg_WaitForTokenInRange::WriteReplyParams(
748 wait_for_token_
->reply
.get(), state
);
749 Send(wait_for_token_
->reply
.release());
750 wait_for_token_
.reset();
752 if (wait_for_get_offset_
&&
753 (gpu::CommandBuffer::InRange(wait_for_get_offset_
->start
,
754 wait_for_get_offset_
->end
,
756 state
.error
!= gpu::error::kNoError
)) {
758 GpuCommandBufferMsg_WaitForGetOffsetInRange::WriteReplyParams(
759 wait_for_get_offset_
->reply
.get(), state
);
760 Send(wait_for_get_offset_
->reply
.release());
761 wait_for_get_offset_
.reset();
766 void GpuCommandBufferStub::OnAsyncFlush(
769 const std::vector
<ui::LatencyInfo
>& latency_info
) {
771 "gpu", "GpuCommandBufferStub::OnAsyncFlush", "put_offset", put_offset
);
773 if (ui::LatencyInfo::Verify(latency_info
,
774 "GpuCommandBufferStub::OnAsyncFlush") &&
775 !latency_info_callback_
.is_null()) {
776 latency_info_callback_
.Run(latency_info
);
778 DCHECK(command_buffer_
.get());
779 if (flush_count
- last_flush_count_
< 0x8000000U
) {
780 last_flush_count_
= flush_count
;
781 command_buffer_
->Flush(put_offset
);
783 // We received this message out-of-order. This should not happen but is here
784 // to catch regressions. Ignore the message.
785 NOTREACHED() << "Received a Flush message out-of-order";
791 void GpuCommandBufferStub::OnRescheduled() {
792 gpu::CommandBuffer::State pre_state
= command_buffer_
->GetLastState();
793 command_buffer_
->Flush(command_buffer_
->GetPutOffset());
794 gpu::CommandBuffer::State post_state
= command_buffer_
->GetLastState();
796 if (pre_state
.get_offset
!= post_state
.get_offset
)
800 void GpuCommandBufferStub::OnRegisterTransferBuffer(
802 base::SharedMemoryHandle transfer_buffer
,
804 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterTransferBuffer");
806 // Take ownership of the memory and map it into this process.
807 // This validates the size.
808 scoped_ptr
<base::SharedMemory
> shared_memory(
809 new base::SharedMemory(transfer_buffer
, false));
810 if (!shared_memory
->Map(size
)) {
811 DVLOG(0) << "Failed to map shared memory.";
815 if (command_buffer_
) {
816 command_buffer_
->RegisterTransferBuffer(
817 id
, gpu::MakeBackingFromSharedMemory(shared_memory
.Pass(), size
));
821 void GpuCommandBufferStub::OnDestroyTransferBuffer(int32 id
) {
822 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyTransferBuffer");
825 command_buffer_
->DestroyTransferBuffer(id
);
828 void GpuCommandBufferStub::OnCommandProcessed() {
830 watchdog_
->CheckArmed();
833 void GpuCommandBufferStub::ReportState() { command_buffer_
->UpdateState(); }
835 void GpuCommandBufferStub::PutChanged() {
836 FastSetActiveURL(active_url_
, active_url_hash_
);
837 scheduler_
->PutChanged();
840 void GpuCommandBufferStub::OnCreateVideoDecoder(
841 media::VideoCodecProfile profile
,
842 int32 decoder_route_id
,
843 IPC::Message
* reply_message
) {
844 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoDecoder");
845 GpuVideoDecodeAccelerator
* decoder
= new GpuVideoDecodeAccelerator(
846 decoder_route_id
, this, channel_
->io_message_loop());
847 decoder
->Initialize(profile
, reply_message
);
848 // decoder is registered as a DestructionObserver of this stub and will
849 // self-delete during destruction of this stub.
852 void GpuCommandBufferStub::OnCreateVideoEncoder(
853 media::VideoFrame::Format input_format
,
854 const gfx::Size
& input_visible_size
,
855 media::VideoCodecProfile output_profile
,
856 uint32 initial_bitrate
,
857 int32 encoder_route_id
,
858 IPC::Message
* reply_message
) {
859 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoEncoder");
860 GpuVideoEncodeAccelerator
* encoder
=
861 new GpuVideoEncodeAccelerator(encoder_route_id
, this);
862 encoder
->Initialize(input_format
,
867 // encoder is registered as a DestructionObserver of this stub and will
868 // self-delete during destruction of this stub.
871 void GpuCommandBufferStub::OnSetSurfaceVisible(bool visible
) {
872 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetSurfaceVisible");
873 if (memory_manager_client_state_
)
874 memory_manager_client_state_
->SetVisible(visible
);
877 void GpuCommandBufferStub::AddSyncPoint(uint32 sync_point
) {
878 sync_points_
.push_back(sync_point
);
881 void GpuCommandBufferStub::OnRetireSyncPoint(uint32 sync_point
) {
882 DCHECK(!sync_points_
.empty() && sync_points_
.front() == sync_point
);
883 sync_points_
.pop_front();
884 GpuChannelManager
* manager
= channel_
->gpu_channel_manager();
885 manager
->sync_point_manager()->RetireSyncPoint(sync_point
);
888 bool GpuCommandBufferStub::OnWaitSyncPoint(uint32 sync_point
) {
891 GpuChannelManager
* manager
= channel_
->gpu_channel_manager();
892 if (manager
->sync_point_manager()->IsSyncPointRetired(sync_point
))
895 if (sync_point_wait_count_
== 0) {
896 TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitSyncPoint", this,
897 "GpuCommandBufferStub", this);
899 scheduler_
->SetScheduled(false);
900 ++sync_point_wait_count_
;
901 manager
->sync_point_manager()->AddSyncPointCallback(
903 base::Bind(&GpuCommandBufferStub::OnSyncPointRetired
,
905 return scheduler_
->IsScheduled();
908 void GpuCommandBufferStub::OnSyncPointRetired() {
909 --sync_point_wait_count_
;
910 if (sync_point_wait_count_
== 0) {
911 TRACE_EVENT_ASYNC_END1("gpu", "WaitSyncPoint", this,
912 "GpuCommandBufferStub", this);
914 scheduler_
->SetScheduled(true);
917 void GpuCommandBufferStub::OnSignalSyncPoint(uint32 sync_point
, uint32 id
) {
918 GpuChannelManager
* manager
= channel_
->gpu_channel_manager();
919 manager
->sync_point_manager()->AddSyncPointCallback(
921 base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck
,
926 void GpuCommandBufferStub::OnSignalSyncPointAck(uint32 id
) {
927 Send(new GpuCommandBufferMsg_SignalSyncPointAck(route_id_
, id
));
930 void GpuCommandBufferStub::OnSignalQuery(uint32 query_id
, uint32 id
) {
932 gpu::gles2::QueryManager
* query_manager
= decoder_
->GetQueryManager();
934 gpu::gles2::QueryManager::Query
* query
=
935 query_manager
->GetQuery(query_id
);
938 base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck
,
945 // Something went wrong, run callback immediately.
946 OnSignalSyncPointAck(id
);
950 void GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback(
954 "GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback");
956 if (!memory_manager_client_state_
) {
957 memory_manager_client_state_
.reset(GetMemoryManager()->CreateClientState(
958 this, surface_id_
!= 0, true));
961 memory_manager_client_state_
.reset();
965 void GpuCommandBufferStub::OnCreateImage(int32 id
,
966 gfx::GpuMemoryBufferHandle handle
,
968 gfx::GpuMemoryBuffer::Format format
,
969 uint32 internalformat
) {
970 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateImage");
975 gpu::gles2::ImageManager
* image_manager
= decoder_
->GetImageManager();
976 DCHECK(image_manager
);
977 if (image_manager
->LookupImage(id
)) {
978 LOG(ERROR
) << "Image already exists with same ID.";
982 if (!IsSupportedImageFormat(decoder_
->GetCapabilities(), format
)) {
983 LOG(ERROR
) << "Image format is not supported.";
987 scoped_refptr
<gfx::GLImage
> image
= channel()->CreateImageForGpuMemoryBuffer(
988 handle
, size
, format
, internalformat
);
992 image_manager
->AddImage(image
.get(), id
);
995 void GpuCommandBufferStub::OnDestroyImage(int32 id
) {
996 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyImage");
1001 gpu::gles2::ImageManager
* image_manager
= decoder_
->GetImageManager();
1002 DCHECK(image_manager
);
1003 if (!image_manager
->LookupImage(id
)) {
1004 LOG(ERROR
) << "Image with ID doesn't exist.";
1008 image_manager
->RemoveImage(id
);
1011 void GpuCommandBufferStub::SendConsoleMessage(
1013 const std::string
& message
) {
1014 GPUCommandBufferConsoleMessage console_message
;
1015 console_message
.id
= id
;
1016 console_message
.message
= message
;
1017 IPC::Message
* msg
= new GpuCommandBufferMsg_ConsoleMsg(
1018 route_id_
, console_message
);
1019 msg
->set_unblock(true);
1023 void GpuCommandBufferStub::SendCachedShader(
1024 const std::string
& key
, const std::string
& shader
) {
1025 channel_
->CacheShader(key
, shader
);
1028 void GpuCommandBufferStub::AddDestructionObserver(
1029 DestructionObserver
* observer
) {
1030 destruction_observers_
.AddObserver(observer
);
1033 void GpuCommandBufferStub::RemoveDestructionObserver(
1034 DestructionObserver
* observer
) {
1035 destruction_observers_
.RemoveObserver(observer
);
1038 void GpuCommandBufferStub::SetPreemptByFlag(
1039 scoped_refptr
<gpu::PreemptionFlag
> flag
) {
1040 preemption_flag_
= flag
;
1042 scheduler_
->SetPreemptByFlag(preemption_flag_
);
1045 bool GpuCommandBufferStub::GetTotalGpuMemory(uint64
* bytes
) {
1046 *bytes
= total_gpu_memory_
;
1047 return !!total_gpu_memory_
;
1050 gfx::Size
GpuCommandBufferStub::GetSurfaceSize() const {
1051 if (!surface_
.get())
1053 return surface_
->GetSize();
1056 gpu::gles2::MemoryTracker
* GpuCommandBufferStub::GetMemoryTracker() const {
1057 return context_group_
->memory_tracker();
1060 void GpuCommandBufferStub::SetMemoryAllocation(
1061 const gpu::MemoryAllocation
& allocation
) {
1062 if (!last_memory_allocation_valid_
||
1063 !allocation
.Equals(last_memory_allocation_
)) {
1064 Send(new GpuCommandBufferMsg_SetMemoryAllocation(
1065 route_id_
, allocation
));
1068 last_memory_allocation_valid_
= true;
1069 last_memory_allocation_
= allocation
;
1072 void GpuCommandBufferStub::SuggestHaveFrontBuffer(
1073 bool suggest_have_frontbuffer
) {
1074 // This can be called outside of OnMessageReceived, so the context needs
1075 // to be made current before calling methods on the surface.
1076 if (surface_
.get() && MakeCurrent())
1077 surface_
->SetFrontbufferAllocation(suggest_have_frontbuffer
);
1080 bool GpuCommandBufferStub::CheckContextLost() {
1081 DCHECK(command_buffer_
);
1082 gpu::CommandBuffer::State state
= command_buffer_
->GetLastState();
1083 bool was_lost
= state
.error
== gpu::error::kLostContext
;
1084 // Lose all other contexts if the reset was triggered by the robustness
1085 // extension instead of being synthetic.
1086 if (was_lost
&& decoder_
&& decoder_
->WasContextLostByRobustnessExtension() &&
1087 (gfx::GLContext::LosesAllContextsOnContextLost() ||
1088 use_virtualized_gl_context_
))
1089 channel_
->LoseAllContexts();
1090 CheckCompleteWaits();
1094 void GpuCommandBufferStub::MarkContextLost() {
1095 if (!command_buffer_
||
1096 command_buffer_
->GetLastState().error
== gpu::error::kLostContext
)
1099 command_buffer_
->SetContextLostReason(gpu::error::kUnknown
);
1101 decoder_
->LoseContext(GL_UNKNOWN_CONTEXT_RESET_ARB
);
1102 command_buffer_
->SetParseError(gpu::error::kLostContext
);
1105 uint64
GpuCommandBufferStub::GetMemoryUsage() const {
1106 return GetMemoryManager()->GetClientMemoryUsage(this);
1109 void GpuCommandBufferStub::SendSwapBuffersCompleted(
1110 const std::vector
<ui::LatencyInfo
>& latency_info
) {
1111 Send(new GpuCommandBufferMsg_SwapBuffersCompleted(route_id_
, latency_info
));
1114 void GpuCommandBufferStub::SendUpdateVSyncParameters(base::TimeTicks timebase
,
1115 base::TimeDelta interval
) {
1116 Send(new GpuCommandBufferMsg_UpdateVSyncParameters(route_id_
, timebase
,
1120 } // namespace content