1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
6 #include "base/bind_helpers.h"
7 #include "base/command_line.h"
8 #include "base/debug/trace_event.h"
10 #include "base/memory/shared_memory.h"
11 #include "base/time/time.h"
12 #include "build/build_config.h"
13 #include "content/common/gpu/devtools_gpu_instrumentation.h"
14 #include "content/common/gpu/gpu_channel.h"
15 #include "content/common/gpu/gpu_channel_manager.h"
16 #include "content/common/gpu/gpu_command_buffer_stub.h"
17 #include "content/common/gpu/gpu_memory_manager.h"
18 #include "content/common/gpu/gpu_memory_tracking.h"
19 #include "content/common/gpu/gpu_messages.h"
20 #include "content/common/gpu/gpu_watchdog.h"
21 #include "content/common/gpu/image_transport_surface.h"
22 #include "content/common/gpu/media/gpu_video_decode_accelerator.h"
23 #include "content/common/gpu/media/gpu_video_encode_accelerator.h"
24 #include "content/common/gpu/sync_point_manager.h"
25 #include "content/public/common/content_client.h"
26 #include "gpu/command_buffer/common/constants.h"
27 #include "gpu/command_buffer/common/gles2_cmd_utils.h"
28 #include "gpu/command_buffer/common/mailbox.h"
29 #include "gpu/command_buffer/service/gl_context_virtual.h"
30 #include "gpu/command_buffer/service/gl_state_restorer_impl.h"
31 #include "gpu/command_buffer/service/gpu_control_service.h"
32 #include "gpu/command_buffer/service/image_manager.h"
33 #include "gpu/command_buffer/service/logger.h"
34 #include "gpu/command_buffer/service/mailbox_manager.h"
35 #include "gpu/command_buffer/service/memory_tracking.h"
36 #include "gpu/command_buffer/service/query_manager.h"
37 #include "ui/gl/gl_bindings.h"
38 #include "ui/gl/gl_switches.h"
41 #include "content/public/common/sandbox_init.h"
44 #if defined(OS_ANDROID)
45 #include "content/common/gpu/stream_texture_android.h"
49 struct WaitForCommandState
{
50 WaitForCommandState(int32 start
, int32 end
, IPC::Message
* reply
)
51 : start(start
), end(end
), reply(reply
) {}
55 scoped_ptr
<IPC::Message
> reply
;
60 // The GpuCommandBufferMemoryTracker class provides a bridge between the
61 // ContextGroup's memory type managers and the GpuMemoryManager class.
62 class GpuCommandBufferMemoryTracker
: public gpu::gles2::MemoryTracker
{
64 explicit GpuCommandBufferMemoryTracker(GpuChannel
* channel
) :
65 tracking_group_(channel
->gpu_channel_manager()->gpu_memory_manager()->
66 CreateTrackingGroup(channel
->renderer_pid(), this)) {
69 virtual void TrackMemoryAllocatedChange(
72 gpu::gles2::MemoryTracker::Pool pool
) OVERRIDE
{
73 tracking_group_
->TrackMemoryAllocatedChange(
74 old_size
, new_size
, pool
);
77 virtual bool EnsureGPUMemoryAvailable(size_t size_needed
) OVERRIDE
{
78 return tracking_group_
->EnsureGPUMemoryAvailable(size_needed
);
82 virtual ~GpuCommandBufferMemoryTracker() {
84 scoped_ptr
<GpuMemoryTrackingGroup
> tracking_group_
;
86 DISALLOW_COPY_AND_ASSIGN(GpuCommandBufferMemoryTracker
);
89 // FastSetActiveURL will shortcut the expensive call to SetActiveURL when the
91 void FastSetActiveURL(const GURL
& url
, size_t url_hash
) {
92 // Leave the previously set URL in the empty case -- empty URLs are given by
93 // WebKitPlatformSupportImpl::createOffscreenGraphicsContext3D. Hopefully the
94 // onscreen context URL was set previously and will show up even when a crash
95 // occurs during offscreen command processing.
98 static size_t g_last_url_hash
= 0;
99 if (url_hash
!= g_last_url_hash
) {
100 g_last_url_hash
= url_hash
;
101 GetContentClient()->SetActiveURL(url
);
105 // The first time polling a fence, delay some extra time to allow other
106 // stubs to process some work, or else the timing of the fences could
107 // allow a pattern of alternating fast and slow frames to occur.
108 const int64 kHandleMoreWorkPeriodMs
= 2;
109 const int64 kHandleMoreWorkPeriodBusyMs
= 1;
111 // Prevents idle work from being starved.
112 const int64 kMaxTimeSinceIdleMs
= 10;
116 GpuCommandBufferStub::GpuCommandBufferStub(
118 GpuCommandBufferStub
* share_group
,
119 const gfx::GLSurfaceHandle
& handle
,
120 gpu::gles2::MailboxManager
* mailbox_manager
,
121 gpu::gles2::ImageManager
* image_manager
,
122 const gfx::Size
& size
,
123 const gpu::gles2::DisallowedFeatures
& disallowed_features
,
124 const std::vector
<int32
>& attribs
,
125 gfx::GpuPreference gpu_preference
,
126 bool use_virtualized_gl_context
,
129 GpuWatchdog
* watchdog
,
131 const GURL
& active_url
)
135 disallowed_features_(disallowed_features
),
136 requested_attribs_(attribs
),
137 gpu_preference_(gpu_preference
),
138 use_virtualized_gl_context_(use_virtualized_gl_context
),
140 surface_id_(surface_id
),
142 last_flush_count_(0),
143 last_memory_allocation_valid_(false),
145 sync_point_wait_count_(0),
146 delayed_work_scheduled_(false),
147 previous_messages_processed_(0),
148 active_url_(active_url
),
149 total_gpu_memory_(0) {
150 active_url_hash_
= base::Hash(active_url
.possibly_invalid_spec());
151 FastSetActiveURL(active_url_
, active_url_hash_
);
153 gpu::gles2::ContextCreationAttribHelper attrib_parser
;
154 attrib_parser
.Parse(requested_attribs_
);
157 context_group_
= share_group
->context_group_
;
158 DCHECK(context_group_
->bind_generates_resource() ==
159 attrib_parser
.bind_generates_resource_
);
161 context_group_
= new gpu::gles2::ContextGroup(
164 new GpuCommandBufferMemoryTracker(channel
),
165 channel_
->gpu_channel_manager()->shader_translator_cache(),
167 attrib_parser
.bind_generates_resource_
);
170 use_virtualized_gl_context_
|=
171 context_group_
->feature_info()->workarounds().use_virtualized_gl_contexts
;
174 GpuCommandBufferStub::~GpuCommandBufferStub() {
177 GpuChannelManager
* gpu_channel_manager
= channel_
->gpu_channel_manager();
178 gpu_channel_manager
->Send(new GpuHostMsg_DestroyCommandBuffer(surface_id()));
181 GpuMemoryManager
* GpuCommandBufferStub::GetMemoryManager() const {
182 return channel()->gpu_channel_manager()->gpu_memory_manager();
185 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message
& message
) {
186 devtools_gpu_instrumentation::ScopedGpuTask
task(channel());
187 FastSetActiveURL(active_url_
, active_url_hash_
);
189 bool have_context
= false;
190 // Ensure the appropriate GL context is current before handling any IPC
191 // messages directed at the command buffer. This ensures that the message
192 // handler can assume that the context is current (not necessary for
193 // Echo, RetireSyncPoint, or WaitSyncPoint).
194 if (decoder_
.get() && message
.type() != GpuCommandBufferMsg_Echo::ID
&&
195 message
.type() != GpuCommandBufferMsg_WaitForTokenInRange::ID
&&
196 message
.type() != GpuCommandBufferMsg_WaitForGetOffsetInRange::ID
&&
197 message
.type() != GpuCommandBufferMsg_RetireSyncPoint::ID
&&
198 message
.type() != GpuCommandBufferMsg_SetLatencyInfo::ID
) {
204 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers
205 // here. This is so the reply can be delayed if the scheduler is unscheduled.
207 IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub
, message
)
208 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize
,
210 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetGetBuffer
,
212 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ProduceFrontBuffer
,
213 OnProduceFrontBuffer
);
214 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Echo
, OnEcho
);
215 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForTokenInRange
,
216 OnWaitForTokenInRange
);
217 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForGetOffsetInRange
,
218 OnWaitForGetOffsetInRange
);
219 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush
, OnAsyncFlush
);
220 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetLatencyInfo
, OnSetLatencyInfo
);
221 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Rescheduled
, OnRescheduled
);
222 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer
,
223 OnRegisterTransferBuffer
);
224 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer
,
225 OnDestroyTransferBuffer
);
226 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoDecoder
,
227 OnCreateVideoDecoder
)
228 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoEncoder
,
229 OnCreateVideoEncoder
)
230 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetSurfaceVisible
,
232 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RetireSyncPoint
,
234 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPoint
,
236 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalQuery
,
238 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SendClientManagedMemoryStats
,
239 OnReceivedClientManagedMemoryStats
)
241 GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback
,
242 OnSetClientHasMemoryAllocationChangedCallback
)
243 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterGpuMemoryBuffer
,
244 OnRegisterGpuMemoryBuffer
);
245 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyGpuMemoryBuffer
,
246 OnDestroyGpuMemoryBuffer
);
247 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateStreamTexture
,
248 OnCreateStreamTexture
)
249 IPC_MESSAGE_UNHANDLED(handled
= false)
250 IPC_END_MESSAGE_MAP()
252 CheckCompleteWaits();
255 // Ensure that any delayed work that was created will be handled.
256 ScheduleDelayedWork(kHandleMoreWorkPeriodMs
);
263 bool GpuCommandBufferStub::Send(IPC::Message
* message
) {
264 return channel_
->Send(message
);
267 bool GpuCommandBufferStub::IsScheduled() {
268 return (!scheduler_
.get() || scheduler_
->IsScheduled());
271 bool GpuCommandBufferStub::HasMoreWork() {
272 return scheduler_
.get() && scheduler_
->HasMoreWork();
275 void GpuCommandBufferStub::PollWork() {
276 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork");
277 delayed_work_scheduled_
= false;
278 FastSetActiveURL(active_url_
, active_url_hash_
);
279 if (decoder_
.get() && !MakeCurrent())
283 bool fences_complete
= scheduler_
->PollUnscheduleFences();
284 // Perform idle work if all fences are complete.
285 if (fences_complete
) {
286 uint64 current_messages_processed
=
287 channel()->gpu_channel_manager()->MessagesProcessed();
288 // We're idle when no messages were processed or scheduled.
290 (previous_messages_processed_
== current_messages_processed
) &&
291 !channel()->gpu_channel_manager()->HandleMessagesScheduled();
292 if (!is_idle
&& !last_idle_time_
.is_null()) {
293 base::TimeDelta time_since_idle
= base::TimeTicks::Now() -
295 base::TimeDelta max_time_since_idle
=
296 base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs
);
298 // Force idle when it's been too long since last time we were idle.
299 if (time_since_idle
> max_time_since_idle
)
304 last_idle_time_
= base::TimeTicks::Now();
305 scheduler_
->PerformIdleWork();
309 ScheduleDelayedWork(kHandleMoreWorkPeriodBusyMs
);
312 bool GpuCommandBufferStub::HasUnprocessedCommands() {
313 if (command_buffer_
) {
314 gpu::CommandBuffer::State state
= command_buffer_
->GetLastState();
315 return state
.put_offset
!= state
.get_offset
&&
316 !gpu::error::IsError(state
.error
);
321 void GpuCommandBufferStub::ScheduleDelayedWork(int64 delay
) {
322 if (!HasMoreWork()) {
323 last_idle_time_
= base::TimeTicks();
327 if (delayed_work_scheduled_
)
329 delayed_work_scheduled_
= true;
331 // Idle when no messages are processed between now and when
332 // PollWork is called.
333 previous_messages_processed_
=
334 channel()->gpu_channel_manager()->MessagesProcessed();
335 if (last_idle_time_
.is_null())
336 last_idle_time_
= base::TimeTicks::Now();
338 // IsScheduled() returns true after passing all unschedule fences
339 // and this is when we can start performing idle work. Idle work
340 // is done synchronously so we can set delay to 0 and instead poll
341 // for more work at the rate idle work is performed. This also ensures
342 // that idle work is done as efficiently as possible without any
343 // unnecessary delays.
344 if (scheduler_
.get() &&
345 scheduler_
->IsScheduled() &&
346 scheduler_
->HasMoreIdleWork()) {
350 base::MessageLoop::current()->PostDelayedTask(
352 base::Bind(&GpuCommandBufferStub::PollWork
, AsWeakPtr()),
353 base::TimeDelta::FromMilliseconds(delay
));
356 void GpuCommandBufferStub::OnEcho(const IPC::Message
& message
) {
357 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnEcho");
358 Send(new IPC::Message(message
));
361 bool GpuCommandBufferStub::MakeCurrent() {
362 if (decoder_
->MakeCurrent())
364 DLOG(ERROR
) << "Context lost because MakeCurrent failed.";
365 command_buffer_
->SetContextLostReason(decoder_
->GetContextLostReason());
366 command_buffer_
->SetParseError(gpu::error::kLostContext
);
371 void GpuCommandBufferStub::Destroy() {
372 if (wait_for_token_
) {
373 Send(wait_for_token_
->reply
.release());
374 wait_for_token_
.reset();
376 if (wait_for_get_offset_
) {
377 Send(wait_for_get_offset_
->reply
.release());
378 wait_for_get_offset_
.reset();
380 if (handle_
.is_null() && !active_url_
.is_empty()) {
381 GpuChannelManager
* gpu_channel_manager
= channel_
->gpu_channel_manager();
382 gpu_channel_manager
->Send(new GpuHostMsg_DidDestroyOffscreenContext(
386 memory_manager_client_state_
.reset();
388 while (!sync_points_
.empty())
389 OnRetireSyncPoint(sync_points_
.front());
392 decoder_
->set_engine(NULL
);
394 // The scheduler has raw references to the decoder and the command buffer so
395 // destroy it before those.
398 bool have_context
= false;
399 if (decoder_
&& command_buffer_
&&
400 command_buffer_
->GetLastState().error
!= gpu::error::kLostContext
)
401 have_context
= decoder_
->MakeCurrent();
402 FOR_EACH_OBSERVER(DestructionObserver
,
403 destruction_observers_
,
404 OnWillDestroyStub());
407 decoder_
->Destroy(have_context
);
411 command_buffer_
.reset();
413 // Remove this after crbug.com/248395 is sorted out.
417 void GpuCommandBufferStub::OnInitializeFailed(IPC::Message
* reply_message
) {
419 GpuCommandBufferMsg_Initialize::WriteReplyParams(
420 reply_message
, false, gpu::Capabilities());
424 void GpuCommandBufferStub::OnInitialize(
425 base::SharedMemoryHandle shared_state_handle
,
426 IPC::Message
* reply_message
) {
427 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnInitialize");
428 DCHECK(!command_buffer_
.get());
430 scoped_ptr
<base::SharedMemory
> shared_state_shm(
431 new base::SharedMemory(shared_state_handle
, false));
433 command_buffer_
.reset(new gpu::CommandBufferService(
434 context_group_
->transfer_buffer_manager()));
436 bool result
= command_buffer_
->Initialize();
439 decoder_
.reset(::gpu::gles2::GLES2Decoder::Create(context_group_
.get()));
441 scheduler_
.reset(new gpu::GpuScheduler(command_buffer_
.get(),
444 if (preemption_flag_
.get())
445 scheduler_
->SetPreemptByFlag(preemption_flag_
);
447 decoder_
->set_engine(scheduler_
.get());
449 if (!handle_
.is_null()) {
450 #if defined(OS_MACOSX) || defined(UI_COMPOSITOR_IMAGE_TRANSPORT)
452 LOG(ERROR
) << "No software support.";
453 OnInitializeFailed(reply_message
);
458 surface_
= ImageTransportSurface::CreateSurface(
459 channel_
->gpu_channel_manager(),
463 GpuChannelManager
* manager
= channel_
->gpu_channel_manager();
464 surface_
= manager
->GetDefaultOffscreenSurface();
467 if (!surface_
.get()) {
468 DLOG(ERROR
) << "Failed to create surface.";
469 OnInitializeFailed(reply_message
);
473 scoped_refptr
<gfx::GLContext
> context
;
474 if (use_virtualized_gl_context_
&& channel_
->share_group()) {
475 context
= channel_
->share_group()->GetSharedContext();
476 if (!context
.get()) {
477 context
= gfx::GLContext::CreateGLContext(
478 channel_
->share_group(),
479 channel_
->gpu_channel_manager()->GetDefaultOffscreenSurface(),
481 if (!context
.get()) {
482 DLOG(ERROR
) << "Failed to create shared context for virtualization.";
483 OnInitializeFailed(reply_message
);
486 channel_
->share_group()->SetSharedContext(context
.get());
488 // This should be a non-virtual GL context.
489 DCHECK(context
->GetHandle());
490 context
= new gpu::GLContextVirtual(
491 channel_
->share_group(), context
.get(), decoder_
->AsWeakPtr());
492 if (!context
->Initialize(surface_
.get(), gpu_preference_
)) {
493 // TODO(sievers): The real context created above for the default
494 // offscreen surface might not be compatible with this surface.
495 // Need to adjust at least GLX to be able to create the initial context
496 // with a config that is compatible with onscreen and offscreen surfaces.
499 DLOG(ERROR
) << "Failed to initialize virtual GL context.";
500 OnInitializeFailed(reply_message
);
504 if (!context
.get()) {
505 context
= gfx::GLContext::CreateGLContext(
506 channel_
->share_group(), surface_
.get(), gpu_preference_
);
508 if (!context
.get()) {
509 DLOG(ERROR
) << "Failed to create context.";
510 OnInitializeFailed(reply_message
);
514 if (!context
->MakeCurrent(surface_
.get())) {
515 LOG(ERROR
) << "Failed to make context current.";
516 OnInitializeFailed(reply_message
);
520 if (!context
->GetGLStateRestorer()) {
521 context
->SetGLStateRestorer(
522 new gpu::GLStateRestorerImpl(decoder_
->AsWeakPtr()));
525 if (!context
->GetTotalGpuMemory(&total_gpu_memory_
))
526 total_gpu_memory_
= 0;
528 if (!context_group_
->has_program_cache()) {
529 context_group_
->set_program_cache(
530 channel_
->gpu_channel_manager()->program_cache());
533 // Initialize the decoder with either the view or pbuffer GLContext.
534 if (!decoder_
->Initialize(surface_
,
538 disallowed_features_
,
539 requested_attribs_
)) {
540 DLOG(ERROR
) << "Failed to initialize decoder.";
541 OnInitializeFailed(reply_message
);
545 gpu_control_service_
.reset(
546 new gpu::GpuControlService(context_group_
->image_manager(), NULL
));
548 if (CommandLine::ForCurrentProcess()->HasSwitch(
549 switches::kEnableGPUServiceLogging
)) {
550 decoder_
->set_log_commands(true);
553 decoder_
->GetLogger()->SetMsgCallback(
554 base::Bind(&GpuCommandBufferStub::SendConsoleMessage
,
555 base::Unretained(this)));
556 decoder_
->SetShaderCacheCallback(
557 base::Bind(&GpuCommandBufferStub::SendCachedShader
,
558 base::Unretained(this)));
559 decoder_
->SetWaitSyncPointCallback(
560 base::Bind(&GpuCommandBufferStub::OnWaitSyncPoint
,
561 base::Unretained(this)));
563 command_buffer_
->SetPutOffsetChangeCallback(
564 base::Bind(&GpuCommandBufferStub::PutChanged
, base::Unretained(this)));
565 command_buffer_
->SetGetBufferChangeCallback(
566 base::Bind(&gpu::GpuScheduler::SetGetBuffer
,
567 base::Unretained(scheduler_
.get())));
568 command_buffer_
->SetParseErrorCallback(
569 base::Bind(&GpuCommandBufferStub::OnParseError
, base::Unretained(this)));
570 scheduler_
->SetSchedulingChangedCallback(
571 base::Bind(&GpuChannel::StubSchedulingChanged
,
572 base::Unretained(channel_
)));
575 scheduler_
->SetCommandProcessedCallback(
576 base::Bind(&GpuCommandBufferStub::OnCommandProcessed
,
577 base::Unretained(this)));
580 const size_t kSharedStateSize
= sizeof(gpu::CommandBufferSharedState
);
581 if (!shared_state_shm
->Map(kSharedStateSize
)) {
582 DLOG(ERROR
) << "Failed to map shared state buffer.";
583 OnInitializeFailed(reply_message
);
586 command_buffer_
->SetSharedStateBuffer(gpu::MakeBackingFromSharedMemory(
587 shared_state_shm
.Pass(), kSharedStateSize
));
589 GpuCommandBufferMsg_Initialize::WriteReplyParams(
590 reply_message
, true, decoder_
->GetCapabilities());
593 if (handle_
.is_null() && !active_url_
.is_empty()) {
594 GpuChannelManager
* gpu_channel_manager
= channel_
->gpu_channel_manager();
595 gpu_channel_manager
->Send(new GpuHostMsg_DidCreateOffscreenContext(
600 void GpuCommandBufferStub::OnSetLatencyInfo(
601 const std::vector
<ui::LatencyInfo
>& latency_info
) {
602 if (!ui::LatencyInfo::Verify(latency_info
,
603 "GpuCommandBufferStub::OnSetLatencyInfo"))
605 if (!latency_info_callback_
.is_null())
606 latency_info_callback_
.Run(latency_info
);
609 void GpuCommandBufferStub::OnCreateStreamTexture(
610 uint32 texture_id
, int32 stream_id
, bool* succeeded
) {
611 #if defined(OS_ANDROID)
612 *succeeded
= StreamTexture::Create(this, texture_id
, stream_id
);
618 void GpuCommandBufferStub::SetLatencyInfoCallback(
619 const LatencyInfoCallback
& callback
) {
620 latency_info_callback_
= callback
;
623 int32
GpuCommandBufferStub::GetRequestedAttribute(int attr
) const {
624 // The command buffer is pairs of enum, value
625 // search for the requested attribute, return the value.
626 for (std::vector
<int32
>::const_iterator it
= requested_attribs_
.begin();
627 it
!= requested_attribs_
.end(); ++it
) {
635 void GpuCommandBufferStub::OnSetGetBuffer(int32 shm_id
,
636 IPC::Message
* reply_message
) {
637 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetGetBuffer");
639 command_buffer_
->SetGetBuffer(shm_id
);
643 void GpuCommandBufferStub::OnProduceFrontBuffer(const gpu::Mailbox
& mailbox
) {
644 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnProduceFrontBuffer");
646 LOG(ERROR
) << "Can't produce front buffer before initialization.";
650 decoder_
->ProduceFrontBuffer(mailbox
);
653 void GpuCommandBufferStub::OnParseError() {
654 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnParseError");
655 DCHECK(command_buffer_
.get());
656 gpu::CommandBuffer::State state
= command_buffer_
->GetLastState();
657 IPC::Message
* msg
= new GpuCommandBufferMsg_Destroyed(
658 route_id_
, state
.context_lost_reason
);
659 msg
->set_unblock(true);
662 // Tell the browser about this context loss as well, so it can
663 // determine whether client APIs like WebGL need to be immediately
664 // blocked from automatically running.
665 GpuChannelManager
* gpu_channel_manager
= channel_
->gpu_channel_manager();
666 gpu_channel_manager
->Send(new GpuHostMsg_DidLoseContext(
667 handle_
.is_null(), state
.context_lost_reason
, active_url_
));
672 void GpuCommandBufferStub::OnWaitForTokenInRange(int32 start
,
674 IPC::Message
* reply_message
) {
675 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForTokenInRange");
676 DCHECK(command_buffer_
.get());
679 LOG(ERROR
) << "Got WaitForToken command while currently waiting for token.";
681 make_scoped_ptr(new WaitForCommandState(start
, end
, reply_message
));
682 CheckCompleteWaits();
685 void GpuCommandBufferStub::OnWaitForGetOffsetInRange(
688 IPC::Message
* reply_message
) {
689 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForGetOffsetInRange");
690 DCHECK(command_buffer_
.get());
692 if (wait_for_get_offset_
) {
694 << "Got WaitForGetOffset command while currently waiting for offset.";
696 wait_for_get_offset_
=
697 make_scoped_ptr(new WaitForCommandState(start
, end
, reply_message
));
698 CheckCompleteWaits();
701 void GpuCommandBufferStub::CheckCompleteWaits() {
702 if (wait_for_token_
|| wait_for_get_offset_
) {
703 gpu::CommandBuffer::State state
= command_buffer_
->GetLastState();
704 if (wait_for_token_
&&
705 (gpu::CommandBuffer::InRange(
706 wait_for_token_
->start
, wait_for_token_
->end
, state
.token
) ||
707 state
.error
!= gpu::error::kNoError
)) {
709 GpuCommandBufferMsg_WaitForTokenInRange::WriteReplyParams(
710 wait_for_token_
->reply
.get(), state
);
711 Send(wait_for_token_
->reply
.release());
712 wait_for_token_
.reset();
714 if (wait_for_get_offset_
&&
715 (gpu::CommandBuffer::InRange(wait_for_get_offset_
->start
,
716 wait_for_get_offset_
->end
,
718 state
.error
!= gpu::error::kNoError
)) {
720 GpuCommandBufferMsg_WaitForGetOffsetInRange::WriteReplyParams(
721 wait_for_get_offset_
->reply
.get(), state
);
722 Send(wait_for_get_offset_
->reply
.release());
723 wait_for_get_offset_
.reset();
728 void GpuCommandBufferStub::OnAsyncFlush(int32 put_offset
, uint32 flush_count
) {
730 "gpu", "GpuCommandBufferStub::OnAsyncFlush", "put_offset", put_offset
);
731 DCHECK(command_buffer_
.get());
732 if (flush_count
- last_flush_count_
< 0x8000000U
) {
733 last_flush_count_
= flush_count
;
734 command_buffer_
->Flush(put_offset
);
736 // We received this message out-of-order. This should not happen but is here
737 // to catch regressions. Ignore the message.
738 NOTREACHED() << "Received a Flush message out-of-order";
744 void GpuCommandBufferStub::OnRescheduled() {
745 gpu::CommandBuffer::State pre_state
= command_buffer_
->GetLastState();
746 command_buffer_
->Flush(pre_state
.put_offset
);
747 gpu::CommandBuffer::State post_state
= command_buffer_
->GetLastState();
749 if (pre_state
.get_offset
!= post_state
.get_offset
)
753 void GpuCommandBufferStub::OnRegisterTransferBuffer(
755 base::SharedMemoryHandle transfer_buffer
,
757 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterTransferBuffer");
759 // Take ownership of the memory and map it into this process.
760 // This validates the size.
761 scoped_ptr
<base::SharedMemory
> shared_memory(
762 new base::SharedMemory(transfer_buffer
, false));
763 if (!shared_memory
->Map(size
)) {
764 DVLOG(0) << "Failed to map shared memory.";
768 if (command_buffer_
) {
769 command_buffer_
->RegisterTransferBuffer(
770 id
, gpu::MakeBackingFromSharedMemory(shared_memory
.Pass(), size
));
774 void GpuCommandBufferStub::OnDestroyTransferBuffer(int32 id
) {
775 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyTransferBuffer");
778 command_buffer_
->DestroyTransferBuffer(id
);
781 void GpuCommandBufferStub::OnCommandProcessed() {
783 watchdog_
->CheckArmed();
786 void GpuCommandBufferStub::ReportState() { command_buffer_
->UpdateState(); }
788 void GpuCommandBufferStub::PutChanged() {
789 FastSetActiveURL(active_url_
, active_url_hash_
);
790 scheduler_
->PutChanged();
793 void GpuCommandBufferStub::OnCreateVideoDecoder(
794 media::VideoCodecProfile profile
,
795 int32 decoder_route_id
,
796 IPC::Message
* reply_message
) {
797 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoDecoder");
798 GpuVideoDecodeAccelerator
* decoder
= new GpuVideoDecodeAccelerator(
799 decoder_route_id
, this, channel_
->io_message_loop());
800 decoder
->Initialize(profile
, reply_message
);
801 // decoder is registered as a DestructionObserver of this stub and will
802 // self-delete during destruction of this stub.
805 void GpuCommandBufferStub::OnCreateVideoEncoder(
806 media::VideoFrame::Format input_format
,
807 const gfx::Size
& input_visible_size
,
808 media::VideoCodecProfile output_profile
,
809 uint32 initial_bitrate
,
810 int32 encoder_route_id
,
811 IPC::Message
* reply_message
) {
812 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoEncoder");
813 GpuVideoEncodeAccelerator
* encoder
=
814 new GpuVideoEncodeAccelerator(encoder_route_id
, this);
815 encoder
->Initialize(input_format
,
820 // encoder is registered as a DestructionObserver of this stub and will
821 // self-delete during destruction of this stub.
824 void GpuCommandBufferStub::OnSetSurfaceVisible(bool visible
) {
825 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetSurfaceVisible");
826 if (memory_manager_client_state_
)
827 memory_manager_client_state_
->SetVisible(visible
);
830 void GpuCommandBufferStub::AddSyncPoint(uint32 sync_point
) {
831 sync_points_
.push_back(sync_point
);
834 void GpuCommandBufferStub::OnRetireSyncPoint(uint32 sync_point
) {
835 DCHECK(!sync_points_
.empty() && sync_points_
.front() == sync_point
);
836 sync_points_
.pop_front();
837 if (context_group_
->mailbox_manager()->UsesSync() && MakeCurrent())
838 context_group_
->mailbox_manager()->PushTextureUpdates();
839 GpuChannelManager
* manager
= channel_
->gpu_channel_manager();
840 manager
->sync_point_manager()->RetireSyncPoint(sync_point
);
843 bool GpuCommandBufferStub::OnWaitSyncPoint(uint32 sync_point
) {
846 GpuChannelManager
* manager
= channel_
->gpu_channel_manager();
847 if (manager
->sync_point_manager()->IsSyncPointRetired(sync_point
))
850 if (sync_point_wait_count_
== 0) {
851 TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitSyncPoint", this,
852 "GpuCommandBufferStub", this);
854 scheduler_
->SetScheduled(false);
855 ++sync_point_wait_count_
;
856 manager
->sync_point_manager()->AddSyncPointCallback(
858 base::Bind(&GpuCommandBufferStub::OnSyncPointRetired
,
860 return scheduler_
->IsScheduled();
863 void GpuCommandBufferStub::OnSyncPointRetired() {
864 --sync_point_wait_count_
;
865 if (sync_point_wait_count_
== 0) {
866 TRACE_EVENT_ASYNC_END1("gpu", "WaitSyncPoint", this,
867 "GpuCommandBufferStub", this);
869 scheduler_
->SetScheduled(true);
872 void GpuCommandBufferStub::OnSignalSyncPoint(uint32 sync_point
, uint32 id
) {
873 GpuChannelManager
* manager
= channel_
->gpu_channel_manager();
874 manager
->sync_point_manager()->AddSyncPointCallback(
876 base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck
,
881 void GpuCommandBufferStub::OnSignalSyncPointAck(uint32 id
) {
882 Send(new GpuCommandBufferMsg_SignalSyncPointAck(route_id_
, id
));
885 void GpuCommandBufferStub::OnSignalQuery(uint32 query_id
, uint32 id
) {
887 gpu::gles2::QueryManager
* query_manager
= decoder_
->GetQueryManager();
889 gpu::gles2::QueryManager::Query
* query
=
890 query_manager
->GetQuery(query_id
);
893 base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck
,
900 // Something went wrong, run callback immediately.
901 OnSignalSyncPointAck(id
);
905 void GpuCommandBufferStub::OnReceivedClientManagedMemoryStats(
906 const gpu::ManagedMemoryStats
& stats
) {
909 "GpuCommandBufferStub::OnReceivedClientManagedMemoryStats");
910 if (memory_manager_client_state_
)
911 memory_manager_client_state_
->SetManagedMemoryStats(stats
);
914 void GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback(
918 "GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback");
920 if (!memory_manager_client_state_
) {
921 memory_manager_client_state_
.reset(GetMemoryManager()->CreateClientState(
922 this, surface_id_
!= 0, true));
925 memory_manager_client_state_
.reset();
929 void GpuCommandBufferStub::OnRegisterGpuMemoryBuffer(
931 gfx::GpuMemoryBufferHandle gpu_memory_buffer
,
934 uint32 internalformat
) {
935 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterGpuMemoryBuffer");
936 #if defined(OS_ANDROID)
937 // Verify that renderer is not trying to use a surface texture it doesn't own.
938 if (gpu_memory_buffer
.type
== gfx::SURFACE_TEXTURE_BUFFER
&&
939 gpu_memory_buffer
.surface_texture_id
.secondary_id
!=
940 channel()->client_id()) {
941 LOG(ERROR
) << "Illegal surface texture ID for renderer.";
945 if (gpu_control_service_
) {
946 gpu_control_service_
->RegisterGpuMemoryBuffer(
947 id
, gpu_memory_buffer
, width
, height
, internalformat
);
951 void GpuCommandBufferStub::OnDestroyGpuMemoryBuffer(int32 id
) {
952 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyGpuMemoryBuffer");
953 if (gpu_control_service_
)
954 gpu_control_service_
->UnregisterGpuMemoryBuffer(id
);
957 void GpuCommandBufferStub::SendConsoleMessage(
959 const std::string
& message
) {
960 GPUCommandBufferConsoleMessage console_message
;
961 console_message
.id
= id
;
962 console_message
.message
= message
;
963 IPC::Message
* msg
= new GpuCommandBufferMsg_ConsoleMsg(
964 route_id_
, console_message
);
965 msg
->set_unblock(true);
969 void GpuCommandBufferStub::SendCachedShader(
970 const std::string
& key
, const std::string
& shader
) {
971 channel_
->CacheShader(key
, shader
);
974 void GpuCommandBufferStub::AddDestructionObserver(
975 DestructionObserver
* observer
) {
976 destruction_observers_
.AddObserver(observer
);
979 void GpuCommandBufferStub::RemoveDestructionObserver(
980 DestructionObserver
* observer
) {
981 destruction_observers_
.RemoveObserver(observer
);
984 void GpuCommandBufferStub::SetPreemptByFlag(
985 scoped_refptr
<gpu::PreemptionFlag
> flag
) {
986 preemption_flag_
= flag
;
988 scheduler_
->SetPreemptByFlag(preemption_flag_
);
991 bool GpuCommandBufferStub::GetTotalGpuMemory(uint64
* bytes
) {
992 *bytes
= total_gpu_memory_
;
993 return !!total_gpu_memory_
;
996 gfx::Size
GpuCommandBufferStub::GetSurfaceSize() const {
999 return surface_
->GetSize();
1002 gpu::gles2::MemoryTracker
* GpuCommandBufferStub::GetMemoryTracker() const {
1003 return context_group_
->memory_tracker();
1006 void GpuCommandBufferStub::SetMemoryAllocation(
1007 const gpu::MemoryAllocation
& allocation
) {
1008 if (!last_memory_allocation_valid_
||
1009 !allocation
.Equals(last_memory_allocation_
)) {
1010 Send(new GpuCommandBufferMsg_SetMemoryAllocation(
1011 route_id_
, allocation
));
1014 last_memory_allocation_valid_
= true;
1015 last_memory_allocation_
= allocation
;
1018 void GpuCommandBufferStub::SuggestHaveFrontBuffer(
1019 bool suggest_have_frontbuffer
) {
1020 // This can be called outside of OnMessageReceived, so the context needs
1021 // to be made current before calling methods on the surface.
1022 if (surface_
.get() && MakeCurrent())
1023 surface_
->SetFrontbufferAllocation(suggest_have_frontbuffer
);
1026 bool GpuCommandBufferStub::CheckContextLost() {
1027 DCHECK(command_buffer_
);
1028 gpu::CommandBuffer::State state
= command_buffer_
->GetLastState();
1029 bool was_lost
= state
.error
== gpu::error::kLostContext
;
1030 // Lose all other contexts if the reset was triggered by the robustness
1031 // extension instead of being synthetic.
1032 if (was_lost
&& decoder_
&& decoder_
->WasContextLostByRobustnessExtension() &&
1033 (gfx::GLContext::LosesAllContextsOnContextLost() ||
1034 use_virtualized_gl_context_
))
1035 channel_
->LoseAllContexts();
1036 CheckCompleteWaits();
1040 void GpuCommandBufferStub::MarkContextLost() {
1041 if (!command_buffer_
||
1042 command_buffer_
->GetLastState().error
== gpu::error::kLostContext
)
1045 command_buffer_
->SetContextLostReason(gpu::error::kUnknown
);
1047 decoder_
->LoseContext(GL_UNKNOWN_CONTEXT_RESET_ARB
);
1048 command_buffer_
->SetParseError(gpu::error::kLostContext
);
1051 uint64
GpuCommandBufferStub::GetMemoryUsage() const {
1052 return GetMemoryManager()->GetClientMemoryUsage(this);
1055 } // namespace content