1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
6 #include "base/bind_helpers.h"
7 #include "base/command_line.h"
8 #include "base/debug/trace_event.h"
10 #include "base/json/json_writer.h"
11 #include "base/memory/shared_memory.h"
12 #include "base/time/time.h"
13 #include "build/build_config.h"
14 #include "content/common/gpu/devtools_gpu_instrumentation.h"
15 #include "content/common/gpu/gpu_channel.h"
16 #include "content/common/gpu/gpu_channel_manager.h"
17 #include "content/common/gpu/gpu_command_buffer_stub.h"
18 #include "content/common/gpu/gpu_memory_buffer_factory.h"
19 #include "content/common/gpu/gpu_memory_manager.h"
20 #include "content/common/gpu/gpu_memory_tracking.h"
21 #include "content/common/gpu/gpu_messages.h"
22 #include "content/common/gpu/gpu_watchdog.h"
23 #include "content/common/gpu/image_transport_surface.h"
24 #include "content/common/gpu/media/gpu_video_decode_accelerator.h"
25 #include "content/common/gpu/media/gpu_video_encode_accelerator.h"
26 #include "content/common/gpu/sync_point_manager.h"
27 #include "content/public/common/content_client.h"
28 #include "gpu/command_buffer/common/constants.h"
29 #include "gpu/command_buffer/common/gles2_cmd_utils.h"
30 #include "gpu/command_buffer/common/mailbox.h"
31 #include "gpu/command_buffer/service/gl_context_virtual.h"
32 #include "gpu/command_buffer/service/gl_state_restorer_impl.h"
33 #include "gpu/command_buffer/service/image_manager.h"
34 #include "gpu/command_buffer/service/logger.h"
35 #include "gpu/command_buffer/service/mailbox_manager.h"
36 #include "gpu/command_buffer/service/memory_tracking.h"
37 #include "gpu/command_buffer/service/query_manager.h"
38 #include "ui/gl/gl_bindings.h"
39 #include "ui/gl/gl_switches.h"
42 #include "content/public/common/sandbox_init.h"
45 #if defined(OS_ANDROID)
46 #include "content/common/gpu/stream_texture_android.h"
50 struct WaitForCommandState
{
51 WaitForCommandState(int32 start
, int32 end
, IPC::Message
* reply
)
52 : start(start
), end(end
), reply(reply
) {}
56 scoped_ptr
<IPC::Message
> reply
;
61 // The GpuCommandBufferMemoryTracker class provides a bridge between the
62 // ContextGroup's memory type managers and the GpuMemoryManager class.
63 class GpuCommandBufferMemoryTracker
: public gpu::gles2::MemoryTracker
{
65 explicit GpuCommandBufferMemoryTracker(GpuChannel
* channel
) :
66 tracking_group_(channel
->gpu_channel_manager()->gpu_memory_manager()->
67 CreateTrackingGroup(channel
->renderer_pid(), this)) {
70 virtual void TrackMemoryAllocatedChange(
73 gpu::gles2::MemoryTracker::Pool pool
) OVERRIDE
{
74 tracking_group_
->TrackMemoryAllocatedChange(
75 old_size
, new_size
, pool
);
78 virtual bool EnsureGPUMemoryAvailable(size_t size_needed
) OVERRIDE
{
79 return tracking_group_
->EnsureGPUMemoryAvailable(size_needed
);
83 virtual ~GpuCommandBufferMemoryTracker() {
85 scoped_ptr
<GpuMemoryTrackingGroup
> tracking_group_
;
87 DISALLOW_COPY_AND_ASSIGN(GpuCommandBufferMemoryTracker
);
90 // FastSetActiveURL will shortcut the expensive call to SetActiveURL when the
92 void FastSetActiveURL(const GURL
& url
, size_t url_hash
) {
93 // Leave the previously set URL in the empty case -- empty URLs are given by
94 // WebKitPlatformSupportImpl::createOffscreenGraphicsContext3D. Hopefully the
95 // onscreen context URL was set previously and will show up even when a crash
96 // occurs during offscreen command processing.
99 static size_t g_last_url_hash
= 0;
100 if (url_hash
!= g_last_url_hash
) {
101 g_last_url_hash
= url_hash
;
102 GetContentClient()->SetActiveURL(url
);
106 // The first time polling a fence, delay some extra time to allow other
107 // stubs to process some work, or else the timing of the fences could
108 // allow a pattern of alternating fast and slow frames to occur.
109 const int64 kHandleMoreWorkPeriodMs
= 2;
110 const int64 kHandleMoreWorkPeriodBusyMs
= 1;
112 // Prevents idle work from being starved.
113 const int64 kMaxTimeSinceIdleMs
= 10;
115 class DevToolsChannelData
: public base::debug::ConvertableToTraceFormat
{
117 static scoped_refptr
<base::debug::ConvertableToTraceFormat
> CreateForChannel(
118 GpuChannel
* channel
);
120 virtual void AppendAsTraceFormat(std::string
* out
) const OVERRIDE
{
122 base::JSONWriter::Write(value_
.get(), &tmp
);
127 explicit DevToolsChannelData(base::Value
* value
) : value_(value
) {}
128 virtual ~DevToolsChannelData() {}
129 scoped_ptr
<base::Value
> value_
;
130 DISALLOW_COPY_AND_ASSIGN(DevToolsChannelData
);
133 scoped_refptr
<base::debug::ConvertableToTraceFormat
>
134 DevToolsChannelData::CreateForChannel(GpuChannel
* channel
) {
135 scoped_ptr
<base::DictionaryValue
> res(new base::DictionaryValue
);
136 res
->SetInteger("renderer_pid", channel
->renderer_pid());
137 res
->SetDouble("used_bytes", channel
->GetMemoryUsage());
138 res
->SetDouble("limit_bytes",
139 channel
->gpu_channel_manager()
140 ->gpu_memory_manager()
141 ->GetMaximumClientAllocation());
142 return new DevToolsChannelData(res
.release());
147 GpuCommandBufferStub::GpuCommandBufferStub(
149 GpuCommandBufferStub
* share_group
,
150 const gfx::GLSurfaceHandle
& handle
,
151 gpu::gles2::MailboxManager
* mailbox_manager
,
152 const gfx::Size
& size
,
153 const gpu::gles2::DisallowedFeatures
& disallowed_features
,
154 const std::vector
<int32
>& attribs
,
155 gfx::GpuPreference gpu_preference
,
156 bool use_virtualized_gl_context
,
159 GpuWatchdog
* watchdog
,
161 const GURL
& active_url
)
165 disallowed_features_(disallowed_features
),
166 requested_attribs_(attribs
),
167 gpu_preference_(gpu_preference
),
168 use_virtualized_gl_context_(use_virtualized_gl_context
),
170 surface_id_(surface_id
),
172 last_flush_count_(0),
173 last_memory_allocation_valid_(false),
175 sync_point_wait_count_(0),
176 delayed_work_scheduled_(false),
177 previous_messages_processed_(0),
178 active_url_(active_url
),
179 total_gpu_memory_(0) {
180 active_url_hash_
= base::Hash(active_url
.possibly_invalid_spec());
181 FastSetActiveURL(active_url_
, active_url_hash_
);
183 gpu::gles2::ContextCreationAttribHelper attrib_parser
;
184 attrib_parser
.Parse(requested_attribs_
);
187 context_group_
= share_group
->context_group_
;
188 DCHECK(context_group_
->bind_generates_resource() ==
189 attrib_parser
.bind_generates_resource
);
191 context_group_
= new gpu::gles2::ContextGroup(
193 new GpuCommandBufferMemoryTracker(channel
),
194 channel_
->gpu_channel_manager()->shader_translator_cache(),
196 attrib_parser
.bind_generates_resource
);
199 use_virtualized_gl_context_
|=
200 context_group_
->feature_info()->workarounds().use_virtualized_gl_contexts
;
203 GpuCommandBufferStub::~GpuCommandBufferStub() {
206 GpuChannelManager
* gpu_channel_manager
= channel_
->gpu_channel_manager();
207 gpu_channel_manager
->Send(new GpuHostMsg_DestroyCommandBuffer(surface_id()));
210 GpuMemoryManager
* GpuCommandBufferStub::GetMemoryManager() const {
211 return channel()->gpu_channel_manager()->gpu_memory_manager();
214 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message
& message
) {
215 TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"),
218 DevToolsChannelData::CreateForChannel(channel()));
219 // TODO(yurys): remove devtools_gpu_instrumentation call once DevTools
220 // Timeline migrates to tracing crbug.com/361045.
221 devtools_gpu_instrumentation::ScopedGpuTask
task(channel());
222 FastSetActiveURL(active_url_
, active_url_hash_
);
224 bool have_context
= false;
225 // Ensure the appropriate GL context is current before handling any IPC
226 // messages directed at the command buffer. This ensures that the message
227 // handler can assume that the context is current (not necessary for
228 // Echo, RetireSyncPoint, or WaitSyncPoint).
229 if (decoder_
.get() && message
.type() != GpuCommandBufferMsg_Echo::ID
&&
230 message
.type() != GpuCommandBufferMsg_WaitForTokenInRange::ID
&&
231 message
.type() != GpuCommandBufferMsg_WaitForGetOffsetInRange::ID
&&
232 message
.type() != GpuCommandBufferMsg_RetireSyncPoint::ID
&&
233 message
.type() != GpuCommandBufferMsg_SetLatencyInfo::ID
) {
239 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers
240 // here. This is so the reply can be delayed if the scheduler is unscheduled.
242 IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub
, message
)
243 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize
,
245 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetGetBuffer
,
247 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ProduceFrontBuffer
,
248 OnProduceFrontBuffer
);
249 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Echo
, OnEcho
);
250 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForTokenInRange
,
251 OnWaitForTokenInRange
);
252 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForGetOffsetInRange
,
253 OnWaitForGetOffsetInRange
);
254 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush
, OnAsyncFlush
);
255 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetLatencyInfo
, OnSetLatencyInfo
);
256 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Rescheduled
, OnRescheduled
);
257 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer
,
258 OnRegisterTransferBuffer
);
259 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer
,
260 OnDestroyTransferBuffer
);
261 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoDecoder
,
262 OnCreateVideoDecoder
)
263 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoEncoder
,
264 OnCreateVideoEncoder
)
265 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetSurfaceVisible
,
267 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RetireSyncPoint
,
269 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPoint
,
271 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalQuery
,
274 GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback
,
275 OnSetClientHasMemoryAllocationChangedCallback
)
276 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterGpuMemoryBuffer
,
277 OnRegisterGpuMemoryBuffer
);
278 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_UnregisterGpuMemoryBuffer
,
279 OnUnregisterGpuMemoryBuffer
);
280 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateStreamTexture
,
281 OnCreateStreamTexture
)
282 IPC_MESSAGE_UNHANDLED(handled
= false)
283 IPC_END_MESSAGE_MAP()
285 CheckCompleteWaits();
288 // Ensure that any delayed work that was created will be handled.
289 ScheduleDelayedWork(kHandleMoreWorkPeriodMs
);
296 bool GpuCommandBufferStub::Send(IPC::Message
* message
) {
297 return channel_
->Send(message
);
300 bool GpuCommandBufferStub::IsScheduled() {
301 return (!scheduler_
.get() || scheduler_
->IsScheduled());
304 bool GpuCommandBufferStub::HasMoreWork() {
305 return scheduler_
.get() && scheduler_
->HasMoreWork();
308 void GpuCommandBufferStub::PollWork() {
309 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork");
310 delayed_work_scheduled_
= false;
311 FastSetActiveURL(active_url_
, active_url_hash_
);
312 if (decoder_
.get() && !MakeCurrent())
316 bool fences_complete
= scheduler_
->PollUnscheduleFences();
317 // Perform idle work if all fences are complete.
318 if (fences_complete
) {
319 uint64 current_messages_processed
=
320 channel()->gpu_channel_manager()->MessagesProcessed();
321 // We're idle when no messages were processed or scheduled.
323 (previous_messages_processed_
== current_messages_processed
) &&
324 !channel()->gpu_channel_manager()->HandleMessagesScheduled();
325 if (!is_idle
&& !last_idle_time_
.is_null()) {
326 base::TimeDelta time_since_idle
= base::TimeTicks::Now() -
328 base::TimeDelta max_time_since_idle
=
329 base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs
);
331 // Force idle when it's been too long since last time we were idle.
332 if (time_since_idle
> max_time_since_idle
)
337 last_idle_time_
= base::TimeTicks::Now();
338 scheduler_
->PerformIdleWork();
342 ScheduleDelayedWork(kHandleMoreWorkPeriodBusyMs
);
345 bool GpuCommandBufferStub::HasUnprocessedCommands() {
346 if (command_buffer_
) {
347 gpu::CommandBuffer::State state
= command_buffer_
->GetLastState();
348 return state
.put_offset
!= state
.get_offset
&&
349 !gpu::error::IsError(state
.error
);
354 void GpuCommandBufferStub::ScheduleDelayedWork(int64 delay
) {
355 if (!HasMoreWork()) {
356 last_idle_time_
= base::TimeTicks();
360 if (delayed_work_scheduled_
)
362 delayed_work_scheduled_
= true;
364 // Idle when no messages are processed between now and when
365 // PollWork is called.
366 previous_messages_processed_
=
367 channel()->gpu_channel_manager()->MessagesProcessed();
368 if (last_idle_time_
.is_null())
369 last_idle_time_
= base::TimeTicks::Now();
371 // IsScheduled() returns true after passing all unschedule fences
372 // and this is when we can start performing idle work. Idle work
373 // is done synchronously so we can set delay to 0 and instead poll
374 // for more work at the rate idle work is performed. This also ensures
375 // that idle work is done as efficiently as possible without any
376 // unnecessary delays.
377 if (scheduler_
.get() &&
378 scheduler_
->IsScheduled() &&
379 scheduler_
->HasMoreIdleWork()) {
383 base::MessageLoop::current()->PostDelayedTask(
385 base::Bind(&GpuCommandBufferStub::PollWork
, AsWeakPtr()),
386 base::TimeDelta::FromMilliseconds(delay
));
389 void GpuCommandBufferStub::OnEcho(const IPC::Message
& message
) {
390 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnEcho");
391 Send(new IPC::Message(message
));
394 bool GpuCommandBufferStub::MakeCurrent() {
395 if (decoder_
->MakeCurrent())
397 DLOG(ERROR
) << "Context lost because MakeCurrent failed.";
398 command_buffer_
->SetContextLostReason(decoder_
->GetContextLostReason());
399 command_buffer_
->SetParseError(gpu::error::kLostContext
);
404 void GpuCommandBufferStub::Destroy() {
405 if (wait_for_token_
) {
406 Send(wait_for_token_
->reply
.release());
407 wait_for_token_
.reset();
409 if (wait_for_get_offset_
) {
410 Send(wait_for_get_offset_
->reply
.release());
411 wait_for_get_offset_
.reset();
413 if (handle_
.is_null() && !active_url_
.is_empty()) {
414 GpuChannelManager
* gpu_channel_manager
= channel_
->gpu_channel_manager();
415 gpu_channel_manager
->Send(new GpuHostMsg_DidDestroyOffscreenContext(
419 memory_manager_client_state_
.reset();
421 while (!sync_points_
.empty())
422 OnRetireSyncPoint(sync_points_
.front());
425 decoder_
->set_engine(NULL
);
427 // The scheduler has raw references to the decoder and the command buffer so
428 // destroy it before those.
431 bool have_context
= false;
432 if (decoder_
&& command_buffer_
&&
433 command_buffer_
->GetLastState().error
!= gpu::error::kLostContext
)
434 have_context
= decoder_
->MakeCurrent();
435 FOR_EACH_OBSERVER(DestructionObserver
,
436 destruction_observers_
,
437 OnWillDestroyStub());
440 decoder_
->Destroy(have_context
);
444 command_buffer_
.reset();
446 // Remove this after crbug.com/248395 is sorted out.
450 void GpuCommandBufferStub::OnInitializeFailed(IPC::Message
* reply_message
) {
452 GpuCommandBufferMsg_Initialize::WriteReplyParams(
453 reply_message
, false, gpu::Capabilities());
457 void GpuCommandBufferStub::OnInitialize(
458 base::SharedMemoryHandle shared_state_handle
,
459 IPC::Message
* reply_message
) {
460 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnInitialize");
461 DCHECK(!command_buffer_
.get());
463 scoped_ptr
<base::SharedMemory
> shared_state_shm(
464 new base::SharedMemory(shared_state_handle
, false));
466 command_buffer_
.reset(new gpu::CommandBufferService(
467 context_group_
->transfer_buffer_manager()));
469 bool result
= command_buffer_
->Initialize();
472 decoder_
.reset(::gpu::gles2::GLES2Decoder::Create(context_group_
.get()));
474 scheduler_
.reset(new gpu::GpuScheduler(command_buffer_
.get(),
477 if (preemption_flag_
.get())
478 scheduler_
->SetPreemptByFlag(preemption_flag_
);
480 decoder_
->set_engine(scheduler_
.get());
482 if (!handle_
.is_null()) {
483 #if defined(OS_MACOSX) || defined(UI_COMPOSITOR_IMAGE_TRANSPORT)
485 LOG(ERROR
) << "No software support.";
486 OnInitializeFailed(reply_message
);
491 surface_
= ImageTransportSurface::CreateSurface(
492 channel_
->gpu_channel_manager(),
496 GpuChannelManager
* manager
= channel_
->gpu_channel_manager();
497 surface_
= manager
->GetDefaultOffscreenSurface();
500 if (!surface_
.get()) {
501 DLOG(ERROR
) << "Failed to create surface.";
502 OnInitializeFailed(reply_message
);
506 scoped_refptr
<gfx::GLContext
> context
;
507 if (use_virtualized_gl_context_
&& channel_
->share_group()) {
508 context
= channel_
->share_group()->GetSharedContext();
509 if (!context
.get()) {
510 context
= gfx::GLContext::CreateGLContext(
511 channel_
->share_group(),
512 channel_
->gpu_channel_manager()->GetDefaultOffscreenSurface(),
514 if (!context
.get()) {
515 DLOG(ERROR
) << "Failed to create shared context for virtualization.";
516 OnInitializeFailed(reply_message
);
519 channel_
->share_group()->SetSharedContext(context
.get());
521 // This should be a non-virtual GL context.
522 DCHECK(context
->GetHandle());
523 context
= new gpu::GLContextVirtual(
524 channel_
->share_group(), context
.get(), decoder_
->AsWeakPtr());
525 if (!context
->Initialize(surface_
.get(), gpu_preference_
)) {
526 // TODO(sievers): The real context created above for the default
527 // offscreen surface might not be compatible with this surface.
528 // Need to adjust at least GLX to be able to create the initial context
529 // with a config that is compatible with onscreen and offscreen surfaces.
532 DLOG(ERROR
) << "Failed to initialize virtual GL context.";
533 OnInitializeFailed(reply_message
);
537 if (!context
.get()) {
538 context
= gfx::GLContext::CreateGLContext(
539 channel_
->share_group(), surface_
.get(), gpu_preference_
);
541 if (!context
.get()) {
542 DLOG(ERROR
) << "Failed to create context.";
543 OnInitializeFailed(reply_message
);
547 if (!context
->MakeCurrent(surface_
.get())) {
548 LOG(ERROR
) << "Failed to make context current.";
549 OnInitializeFailed(reply_message
);
553 if (!context
->GetGLStateRestorer()) {
554 context
->SetGLStateRestorer(
555 new gpu::GLStateRestorerImpl(decoder_
->AsWeakPtr()));
558 if (!context
->GetTotalGpuMemory(&total_gpu_memory_
))
559 total_gpu_memory_
= 0;
561 if (!context_group_
->has_program_cache()) {
562 context_group_
->set_program_cache(
563 channel_
->gpu_channel_manager()->program_cache());
566 // Initialize the decoder with either the view or pbuffer GLContext.
567 if (!decoder_
->Initialize(surface_
,
571 disallowed_features_
,
572 requested_attribs_
)) {
573 DLOG(ERROR
) << "Failed to initialize decoder.";
574 OnInitializeFailed(reply_message
);
578 if (CommandLine::ForCurrentProcess()->HasSwitch(
579 switches::kEnableGPUServiceLogging
)) {
580 decoder_
->set_log_commands(true);
583 decoder_
->GetLogger()->SetMsgCallback(
584 base::Bind(&GpuCommandBufferStub::SendConsoleMessage
,
585 base::Unretained(this)));
586 decoder_
->SetShaderCacheCallback(
587 base::Bind(&GpuCommandBufferStub::SendCachedShader
,
588 base::Unretained(this)));
589 decoder_
->SetWaitSyncPointCallback(
590 base::Bind(&GpuCommandBufferStub::OnWaitSyncPoint
,
591 base::Unretained(this)));
593 command_buffer_
->SetPutOffsetChangeCallback(
594 base::Bind(&GpuCommandBufferStub::PutChanged
, base::Unretained(this)));
595 command_buffer_
->SetGetBufferChangeCallback(
596 base::Bind(&gpu::GpuScheduler::SetGetBuffer
,
597 base::Unretained(scheduler_
.get())));
598 command_buffer_
->SetParseErrorCallback(
599 base::Bind(&GpuCommandBufferStub::OnParseError
, base::Unretained(this)));
600 scheduler_
->SetSchedulingChangedCallback(
601 base::Bind(&GpuChannel::StubSchedulingChanged
,
602 base::Unretained(channel_
)));
605 scheduler_
->SetCommandProcessedCallback(
606 base::Bind(&GpuCommandBufferStub::OnCommandProcessed
,
607 base::Unretained(this)));
610 const size_t kSharedStateSize
= sizeof(gpu::CommandBufferSharedState
);
611 if (!shared_state_shm
->Map(kSharedStateSize
)) {
612 DLOG(ERROR
) << "Failed to map shared state buffer.";
613 OnInitializeFailed(reply_message
);
616 command_buffer_
->SetSharedStateBuffer(gpu::MakeBackingFromSharedMemory(
617 shared_state_shm
.Pass(), kSharedStateSize
));
619 gpu::Capabilities capabilities
= decoder_
->GetCapabilities();
620 capabilities
.future_sync_points
= channel_
->allow_future_sync_points();
622 GpuCommandBufferMsg_Initialize::WriteReplyParams(
623 reply_message
, true, capabilities
);
626 if (handle_
.is_null() && !active_url_
.is_empty()) {
627 GpuChannelManager
* gpu_channel_manager
= channel_
->gpu_channel_manager();
628 gpu_channel_manager
->Send(new GpuHostMsg_DidCreateOffscreenContext(
633 void GpuCommandBufferStub::OnSetLatencyInfo(
634 const std::vector
<ui::LatencyInfo
>& latency_info
) {
635 if (!ui::LatencyInfo::Verify(latency_info
,
636 "GpuCommandBufferStub::OnSetLatencyInfo"))
638 if (!latency_info_callback_
.is_null())
639 latency_info_callback_
.Run(latency_info
);
642 void GpuCommandBufferStub::OnCreateStreamTexture(
643 uint32 texture_id
, int32 stream_id
, bool* succeeded
) {
644 #if defined(OS_ANDROID)
645 *succeeded
= StreamTexture::Create(this, texture_id
, stream_id
);
651 void GpuCommandBufferStub::SetLatencyInfoCallback(
652 const LatencyInfoCallback
& callback
) {
653 latency_info_callback_
= callback
;
656 int32
GpuCommandBufferStub::GetRequestedAttribute(int attr
) const {
657 // The command buffer is pairs of enum, value
658 // search for the requested attribute, return the value.
659 for (std::vector
<int32
>::const_iterator it
= requested_attribs_
.begin();
660 it
!= requested_attribs_
.end(); ++it
) {
668 void GpuCommandBufferStub::OnSetGetBuffer(int32 shm_id
,
669 IPC::Message
* reply_message
) {
670 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetGetBuffer");
672 command_buffer_
->SetGetBuffer(shm_id
);
676 void GpuCommandBufferStub::OnProduceFrontBuffer(const gpu::Mailbox
& mailbox
) {
677 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnProduceFrontBuffer");
679 LOG(ERROR
) << "Can't produce front buffer before initialization.";
683 decoder_
->ProduceFrontBuffer(mailbox
);
686 void GpuCommandBufferStub::OnParseError() {
687 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnParseError");
688 DCHECK(command_buffer_
.get());
689 gpu::CommandBuffer::State state
= command_buffer_
->GetLastState();
690 IPC::Message
* msg
= new GpuCommandBufferMsg_Destroyed(
691 route_id_
, state
.context_lost_reason
);
692 msg
->set_unblock(true);
695 // Tell the browser about this context loss as well, so it can
696 // determine whether client APIs like WebGL need to be immediately
697 // blocked from automatically running.
698 GpuChannelManager
* gpu_channel_manager
= channel_
->gpu_channel_manager();
699 gpu_channel_manager
->Send(new GpuHostMsg_DidLoseContext(
700 handle_
.is_null(), state
.context_lost_reason
, active_url_
));
705 void GpuCommandBufferStub::OnWaitForTokenInRange(int32 start
,
707 IPC::Message
* reply_message
) {
708 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForTokenInRange");
709 DCHECK(command_buffer_
.get());
712 LOG(ERROR
) << "Got WaitForToken command while currently waiting for token.";
714 make_scoped_ptr(new WaitForCommandState(start
, end
, reply_message
));
715 CheckCompleteWaits();
718 void GpuCommandBufferStub::OnWaitForGetOffsetInRange(
721 IPC::Message
* reply_message
) {
722 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForGetOffsetInRange");
723 DCHECK(command_buffer_
.get());
725 if (wait_for_get_offset_
) {
727 << "Got WaitForGetOffset command while currently waiting for offset.";
729 wait_for_get_offset_
=
730 make_scoped_ptr(new WaitForCommandState(start
, end
, reply_message
));
731 CheckCompleteWaits();
734 void GpuCommandBufferStub::CheckCompleteWaits() {
735 if (wait_for_token_
|| wait_for_get_offset_
) {
736 gpu::CommandBuffer::State state
= command_buffer_
->GetLastState();
737 if (wait_for_token_
&&
738 (gpu::CommandBuffer::InRange(
739 wait_for_token_
->start
, wait_for_token_
->end
, state
.token
) ||
740 state
.error
!= gpu::error::kNoError
)) {
742 GpuCommandBufferMsg_WaitForTokenInRange::WriteReplyParams(
743 wait_for_token_
->reply
.get(), state
);
744 Send(wait_for_token_
->reply
.release());
745 wait_for_token_
.reset();
747 if (wait_for_get_offset_
&&
748 (gpu::CommandBuffer::InRange(wait_for_get_offset_
->start
,
749 wait_for_get_offset_
->end
,
751 state
.error
!= gpu::error::kNoError
)) {
753 GpuCommandBufferMsg_WaitForGetOffsetInRange::WriteReplyParams(
754 wait_for_get_offset_
->reply
.get(), state
);
755 Send(wait_for_get_offset_
->reply
.release());
756 wait_for_get_offset_
.reset();
761 void GpuCommandBufferStub::OnAsyncFlush(int32 put_offset
, uint32 flush_count
) {
763 "gpu", "GpuCommandBufferStub::OnAsyncFlush", "put_offset", put_offset
);
764 DCHECK(command_buffer_
.get());
765 if (flush_count
- last_flush_count_
< 0x8000000U
) {
766 last_flush_count_
= flush_count
;
767 command_buffer_
->Flush(put_offset
);
769 // We received this message out-of-order. This should not happen but is here
770 // to catch regressions. Ignore the message.
771 NOTREACHED() << "Received a Flush message out-of-order";
777 void GpuCommandBufferStub::OnRescheduled() {
778 gpu::CommandBuffer::State pre_state
= command_buffer_
->GetLastState();
779 command_buffer_
->Flush(pre_state
.put_offset
);
780 gpu::CommandBuffer::State post_state
= command_buffer_
->GetLastState();
782 if (pre_state
.get_offset
!= post_state
.get_offset
)
786 void GpuCommandBufferStub::OnRegisterTransferBuffer(
788 base::SharedMemoryHandle transfer_buffer
,
790 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterTransferBuffer");
792 // Take ownership of the memory and map it into this process.
793 // This validates the size.
794 scoped_ptr
<base::SharedMemory
> shared_memory(
795 new base::SharedMemory(transfer_buffer
, false));
796 if (!shared_memory
->Map(size
)) {
797 DVLOG(0) << "Failed to map shared memory.";
801 if (command_buffer_
) {
802 command_buffer_
->RegisterTransferBuffer(
803 id
, gpu::MakeBackingFromSharedMemory(shared_memory
.Pass(), size
));
807 void GpuCommandBufferStub::OnDestroyTransferBuffer(int32 id
) {
808 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyTransferBuffer");
811 command_buffer_
->DestroyTransferBuffer(id
);
814 void GpuCommandBufferStub::OnCommandProcessed() {
816 watchdog_
->CheckArmed();
819 void GpuCommandBufferStub::ReportState() { command_buffer_
->UpdateState(); }
821 void GpuCommandBufferStub::PutChanged() {
822 FastSetActiveURL(active_url_
, active_url_hash_
);
823 scheduler_
->PutChanged();
826 void GpuCommandBufferStub::OnCreateVideoDecoder(
827 media::VideoCodecProfile profile
,
828 int32 decoder_route_id
,
829 IPC::Message
* reply_message
) {
830 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoDecoder");
831 GpuVideoDecodeAccelerator
* decoder
= new GpuVideoDecodeAccelerator(
832 decoder_route_id
, this, channel_
->io_message_loop());
833 decoder
->Initialize(profile
, reply_message
);
834 // decoder is registered as a DestructionObserver of this stub and will
835 // self-delete during destruction of this stub.
838 void GpuCommandBufferStub::OnCreateVideoEncoder(
839 media::VideoFrame::Format input_format
,
840 const gfx::Size
& input_visible_size
,
841 media::VideoCodecProfile output_profile
,
842 uint32 initial_bitrate
,
843 int32 encoder_route_id
,
844 IPC::Message
* reply_message
) {
845 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoEncoder");
846 GpuVideoEncodeAccelerator
* encoder
=
847 new GpuVideoEncodeAccelerator(encoder_route_id
, this);
848 encoder
->Initialize(input_format
,
853 // encoder is registered as a DestructionObserver of this stub and will
854 // self-delete during destruction of this stub.
857 void GpuCommandBufferStub::OnSetSurfaceVisible(bool visible
) {
858 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetSurfaceVisible");
859 if (memory_manager_client_state_
)
860 memory_manager_client_state_
->SetVisible(visible
);
863 void GpuCommandBufferStub::AddSyncPoint(uint32 sync_point
) {
864 sync_points_
.push_back(sync_point
);
867 void GpuCommandBufferStub::OnRetireSyncPoint(uint32 sync_point
) {
868 DCHECK(!sync_points_
.empty() && sync_points_
.front() == sync_point
);
869 sync_points_
.pop_front();
870 if (context_group_
->mailbox_manager()->UsesSync() && MakeCurrent())
871 context_group_
->mailbox_manager()->PushTextureUpdates();
872 GpuChannelManager
* manager
= channel_
->gpu_channel_manager();
873 manager
->sync_point_manager()->RetireSyncPoint(sync_point
);
876 bool GpuCommandBufferStub::OnWaitSyncPoint(uint32 sync_point
) {
879 GpuChannelManager
* manager
= channel_
->gpu_channel_manager();
880 if (manager
->sync_point_manager()->IsSyncPointRetired(sync_point
))
883 if (sync_point_wait_count_
== 0) {
884 TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitSyncPoint", this,
885 "GpuCommandBufferStub", this);
887 scheduler_
->SetScheduled(false);
888 ++sync_point_wait_count_
;
889 manager
->sync_point_manager()->AddSyncPointCallback(
891 base::Bind(&GpuCommandBufferStub::OnSyncPointRetired
,
893 return scheduler_
->IsScheduled();
896 void GpuCommandBufferStub::OnSyncPointRetired() {
897 --sync_point_wait_count_
;
898 if (sync_point_wait_count_
== 0) {
899 TRACE_EVENT_ASYNC_END1("gpu", "WaitSyncPoint", this,
900 "GpuCommandBufferStub", this);
902 scheduler_
->SetScheduled(true);
905 void GpuCommandBufferStub::OnSignalSyncPoint(uint32 sync_point
, uint32 id
) {
906 GpuChannelManager
* manager
= channel_
->gpu_channel_manager();
907 manager
->sync_point_manager()->AddSyncPointCallback(
909 base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck
,
914 void GpuCommandBufferStub::OnSignalSyncPointAck(uint32 id
) {
915 Send(new GpuCommandBufferMsg_SignalSyncPointAck(route_id_
, id
));
918 void GpuCommandBufferStub::OnSignalQuery(uint32 query_id
, uint32 id
) {
920 gpu::gles2::QueryManager
* query_manager
= decoder_
->GetQueryManager();
922 gpu::gles2::QueryManager::Query
* query
=
923 query_manager
->GetQuery(query_id
);
926 base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck
,
933 // Something went wrong, run callback immediately.
934 OnSignalSyncPointAck(id
);
938 void GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback(
942 "GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback");
944 if (!memory_manager_client_state_
) {
945 memory_manager_client_state_
.reset(GetMemoryManager()->CreateClientState(
946 this, surface_id_
!= 0, true));
949 memory_manager_client_state_
.reset();
953 void GpuCommandBufferStub::OnRegisterGpuMemoryBuffer(
955 gfx::GpuMemoryBufferHandle handle
,
958 uint32 internalformat
) {
959 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterGpuMemoryBuffer");
960 #if defined(OS_ANDROID)
961 // Verify that renderer is not trying to use a surface texture it doesn't own.
962 if (handle
.type
== gfx::SURFACE_TEXTURE_BUFFER
&&
963 handle
.surface_texture_id
.secondary_id
!= channel()->client_id()) {
964 LOG(ERROR
) << "Illegal surface texture ID for renderer.";
972 gpu::gles2::ImageManager
* image_manager
= decoder_
->GetImageManager();
973 DCHECK(image_manager
);
974 if (image_manager
->LookupImage(id
)) {
975 LOG(ERROR
) << "Image already exists with same ID.";
979 GpuChannelManager
* manager
= channel_
->gpu_channel_manager();
980 scoped_refptr
<gfx::GLImage
> image
=
981 manager
->gpu_memory_buffer_factory()->CreateImageForGpuMemoryBuffer(
983 gfx::Size(width
, height
),
985 channel()->client_id());
989 // For Android specific workaround.
990 if (context_group_
->feature_info()->workarounds().release_image_after_use
)
991 image
->SetReleaseAfterUse();
993 image_manager
->AddImage(image
.get(), id
);
996 void GpuCommandBufferStub::OnUnregisterGpuMemoryBuffer(int32 id
) {
997 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnUnregisterGpuMemoryBuffer");
1002 gpu::gles2::ImageManager
* image_manager
= decoder_
->GetImageManager();
1003 DCHECK(image_manager
);
1004 if (!image_manager
->LookupImage(id
)) {
1005 LOG(ERROR
) << "Image with ID doesn't exist.";
1009 image_manager
->RemoveImage(id
);
1012 void GpuCommandBufferStub::SendConsoleMessage(
1014 const std::string
& message
) {
1015 GPUCommandBufferConsoleMessage console_message
;
1016 console_message
.id
= id
;
1017 console_message
.message
= message
;
1018 IPC::Message
* msg
= new GpuCommandBufferMsg_ConsoleMsg(
1019 route_id_
, console_message
);
1020 msg
->set_unblock(true);
1024 void GpuCommandBufferStub::SendCachedShader(
1025 const std::string
& key
, const std::string
& shader
) {
1026 channel_
->CacheShader(key
, shader
);
1029 void GpuCommandBufferStub::AddDestructionObserver(
1030 DestructionObserver
* observer
) {
1031 destruction_observers_
.AddObserver(observer
);
1034 void GpuCommandBufferStub::RemoveDestructionObserver(
1035 DestructionObserver
* observer
) {
1036 destruction_observers_
.RemoveObserver(observer
);
1039 void GpuCommandBufferStub::SetPreemptByFlag(
1040 scoped_refptr
<gpu::PreemptionFlag
> flag
) {
1041 preemption_flag_
= flag
;
1043 scheduler_
->SetPreemptByFlag(preemption_flag_
);
1046 bool GpuCommandBufferStub::GetTotalGpuMemory(uint64
* bytes
) {
1047 *bytes
= total_gpu_memory_
;
1048 return !!total_gpu_memory_
;
1051 gfx::Size
GpuCommandBufferStub::GetSurfaceSize() const {
1052 if (!surface_
.get())
1054 return surface_
->GetSize();
1057 gpu::gles2::MemoryTracker
* GpuCommandBufferStub::GetMemoryTracker() const {
1058 return context_group_
->memory_tracker();
1061 void GpuCommandBufferStub::SetMemoryAllocation(
1062 const gpu::MemoryAllocation
& allocation
) {
1063 if (!last_memory_allocation_valid_
||
1064 !allocation
.Equals(last_memory_allocation_
)) {
1065 Send(new GpuCommandBufferMsg_SetMemoryAllocation(
1066 route_id_
, allocation
));
1069 last_memory_allocation_valid_
= true;
1070 last_memory_allocation_
= allocation
;
1073 void GpuCommandBufferStub::SuggestHaveFrontBuffer(
1074 bool suggest_have_frontbuffer
) {
1075 // This can be called outside of OnMessageReceived, so the context needs
1076 // to be made current before calling methods on the surface.
1077 if (surface_
.get() && MakeCurrent())
1078 surface_
->SetFrontbufferAllocation(suggest_have_frontbuffer
);
1081 bool GpuCommandBufferStub::CheckContextLost() {
1082 DCHECK(command_buffer_
);
1083 gpu::CommandBuffer::State state
= command_buffer_
->GetLastState();
1084 bool was_lost
= state
.error
== gpu::error::kLostContext
;
1085 // Lose all other contexts if the reset was triggered by the robustness
1086 // extension instead of being synthetic.
1087 if (was_lost
&& decoder_
&& decoder_
->WasContextLostByRobustnessExtension() &&
1088 (gfx::GLContext::LosesAllContextsOnContextLost() ||
1089 use_virtualized_gl_context_
))
1090 channel_
->LoseAllContexts();
1091 CheckCompleteWaits();
1095 void GpuCommandBufferStub::MarkContextLost() {
1096 if (!command_buffer_
||
1097 command_buffer_
->GetLastState().error
== gpu::error::kLostContext
)
1100 command_buffer_
->SetContextLostReason(gpu::error::kUnknown
);
1102 decoder_
->LoseContext(GL_UNKNOWN_CONTEXT_RESET_ARB
);
1103 command_buffer_
->SetParseError(gpu::error::kLostContext
);
1106 uint64
GpuCommandBufferStub::GetMemoryUsage() const {
1107 return GetMemoryManager()->GetClientMemoryUsage(this);
1110 } // namespace content