1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
6 #include "base/bind_helpers.h"
7 #include "base/command_line.h"
9 #include "base/json/json_writer.h"
10 #include "base/memory/shared_memory.h"
11 #include "base/time/time.h"
12 #include "base/trace_event/trace_event.h"
13 #include "build/build_config.h"
14 #include "content/common/gpu/gpu_channel.h"
15 #include "content/common/gpu/gpu_channel_manager.h"
16 #include "content/common/gpu/gpu_command_buffer_stub.h"
17 #include "content/common/gpu/gpu_memory_manager.h"
18 #include "content/common/gpu/gpu_memory_tracking.h"
19 #include "content/common/gpu/gpu_messages.h"
20 #include "content/common/gpu/gpu_watchdog.h"
21 #include "content/common/gpu/image_transport_surface.h"
22 #include "content/common/gpu/media/gpu_video_decode_accelerator.h"
23 #include "content/common/gpu/media/gpu_video_encode_accelerator.h"
24 #include "content/public/common/content_client.h"
25 #include "content/public/common/content_switches.h"
26 #include "gpu/command_buffer/common/constants.h"
27 #include "gpu/command_buffer/common/gles2_cmd_utils.h"
28 #include "gpu/command_buffer/common/mailbox.h"
29 #include "gpu/command_buffer/service/gl_context_virtual.h"
30 #include "gpu/command_buffer/service/gl_state_restorer_impl.h"
31 #include "gpu/command_buffer/service/image_factory.h"
32 #include "gpu/command_buffer/service/image_manager.h"
33 #include "gpu/command_buffer/service/logger.h"
34 #include "gpu/command_buffer/service/mailbox_manager.h"
35 #include "gpu/command_buffer/service/memory_tracking.h"
36 #include "gpu/command_buffer/service/query_manager.h"
37 #include "gpu/command_buffer/service/sync_point_manager.h"
38 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
39 #include "gpu/command_buffer/service/valuebuffer_manager.h"
40 #include "ui/gl/gl_bindings.h"
41 #include "ui/gl/gl_switches.h"
44 #include "base/win/win_util.h"
45 #include "content/public/common/sandbox_init.h"
48 #if defined(OS_ANDROID)
49 #include "content/common/gpu/stream_texture_android.h"
53 struct WaitForCommandState
{
54 WaitForCommandState(int32 start
, int32 end
, IPC::Message
* reply
)
55 : start(start
), end(end
), reply(reply
) {}
59 scoped_ptr
<IPC::Message
> reply
;
64 // The GpuCommandBufferMemoryTracker class provides a bridge between the
65 // ContextGroup's memory type managers and the GpuMemoryManager class.
66 class GpuCommandBufferMemoryTracker
: public gpu::gles2::MemoryTracker
{
68 explicit GpuCommandBufferMemoryTracker(GpuChannel
* channel
)
70 channel
->gpu_channel_manager()
71 ->gpu_memory_manager()
72 ->CreateTrackingGroup(channel
->GetClientPID(), this)),
73 client_tracing_id_(channel
->client_tracing_id()),
74 client_id_(channel
->client_id()) {}
76 void TrackMemoryAllocatedChange(
79 gpu::gles2::MemoryTracker::Pool pool
) override
{
80 tracking_group_
->TrackMemoryAllocatedChange(
81 old_size
, new_size
, pool
);
84 bool EnsureGPUMemoryAvailable(size_t size_needed
) override
{
85 return tracking_group_
->EnsureGPUMemoryAvailable(size_needed
);
88 uint64_t ClientTracingId() const override
{ return client_tracing_id_
; }
89 int ClientId() const override
{ return client_id_
; }
92 ~GpuCommandBufferMemoryTracker() override
{}
93 scoped_ptr
<GpuMemoryTrackingGroup
> tracking_group_
;
94 const uint64_t client_tracing_id_
;
97 DISALLOW_COPY_AND_ASSIGN(GpuCommandBufferMemoryTracker
);
100 // FastSetActiveURL will shortcut the expensive call to SetActiveURL when the
102 void FastSetActiveURL(const GURL
& url
, size_t url_hash
) {
103 // Leave the previously set URL in the empty case -- empty URLs are given by
104 // BlinkPlatformImpl::createOffscreenGraphicsContext3D. Hopefully the
105 // onscreen context URL was set previously and will show up even when a crash
106 // occurs during offscreen command processing.
109 static size_t g_last_url_hash
= 0;
110 if (url_hash
!= g_last_url_hash
) {
111 g_last_url_hash
= url_hash
;
112 GetContentClient()->SetActiveURL(url
);
116 // The first time polling a fence, delay some extra time to allow other
117 // stubs to process some work, or else the timing of the fences could
118 // allow a pattern of alternating fast and slow frames to occur.
119 const int64 kHandleMoreWorkPeriodMs
= 2;
120 const int64 kHandleMoreWorkPeriodBusyMs
= 1;
122 // Prevents idle work from being starved.
123 const int64 kMaxTimeSinceIdleMs
= 10;
125 class DevToolsChannelData
: public base::trace_event::ConvertableToTraceFormat
{
127 static scoped_refptr
<base::trace_event::ConvertableToTraceFormat
>
128 CreateForChannel(GpuChannel
* channel
);
130 void AppendAsTraceFormat(std::string
* out
) const override
{
132 base::JSONWriter::Write(*value_
, &tmp
);
137 explicit DevToolsChannelData(base::Value
* value
) : value_(value
) {}
138 ~DevToolsChannelData() override
{}
139 scoped_ptr
<base::Value
> value_
;
140 DISALLOW_COPY_AND_ASSIGN(DevToolsChannelData
);
143 scoped_refptr
<base::trace_event::ConvertableToTraceFormat
>
144 DevToolsChannelData::CreateForChannel(GpuChannel
* channel
) {
145 scoped_ptr
<base::DictionaryValue
> res(new base::DictionaryValue
);
146 res
->SetInteger("renderer_pid", channel
->GetClientPID());
147 res
->SetDouble("used_bytes", channel
->GetMemoryUsage());
148 res
->SetDouble("limit_bytes",
149 channel
->gpu_channel_manager()
150 ->gpu_memory_manager()
151 ->GetMaximumClientAllocation());
152 return new DevToolsChannelData(res
.release());
155 void RunOnThread(scoped_refptr
<base::SingleThreadTaskRunner
> task_runner
,
156 const base::Closure
& callback
) {
157 if (task_runner
->BelongsToCurrentThread()) {
160 task_runner
->PostTask(FROM_HERE
, callback
);
166 GpuCommandBufferStub::GpuCommandBufferStub(
168 base::SingleThreadTaskRunner
* task_runner
,
169 GpuCommandBufferStub
* share_group
,
170 const gfx::GLSurfaceHandle
& handle
,
171 gpu::gles2::MailboxManager
* mailbox_manager
,
172 gpu::gles2::SubscriptionRefSet
* subscription_ref_set
,
173 gpu::ValueStateMap
* pending_valuebuffer_state
,
174 const gfx::Size
& size
,
175 const gpu::gles2::DisallowedFeatures
& disallowed_features
,
176 const std::vector
<int32
>& attribs
,
177 gfx::GpuPreference gpu_preference
,
178 bool use_virtualized_gl_context
,
182 GpuWatchdog
* watchdog
,
184 const GURL
& active_url
)
186 task_runner_(task_runner
),
190 disallowed_features_(disallowed_features
),
191 requested_attribs_(attribs
),
192 gpu_preference_(gpu_preference
),
193 use_virtualized_gl_context_(use_virtualized_gl_context
),
194 stream_id_(stream_id
),
196 surface_id_(surface_id
),
198 last_flush_count_(0),
199 last_memory_allocation_valid_(false),
201 sync_point_wait_count_(0),
202 delayed_work_scheduled_(false),
203 previous_messages_processed_(0),
204 active_url_(active_url
),
205 total_gpu_memory_(0) {
206 active_url_hash_
= base::Hash(active_url
.possibly_invalid_spec());
207 FastSetActiveURL(active_url_
, active_url_hash_
);
209 gpu::gles2::ContextCreationAttribHelper attrib_parser
;
210 attrib_parser
.Parse(requested_attribs_
);
213 context_group_
= share_group
->context_group_
;
214 DCHECK(context_group_
->bind_generates_resource() ==
215 attrib_parser
.bind_generates_resource
);
217 context_group_
= new gpu::gles2::ContextGroup(
218 mailbox_manager
, new GpuCommandBufferMemoryTracker(channel
),
219 channel_
->gpu_channel_manager()->shader_translator_cache(),
220 channel_
->gpu_channel_manager()->framebuffer_completeness_cache(), NULL
,
221 subscription_ref_set
, pending_valuebuffer_state
,
222 attrib_parser
.bind_generates_resource
);
225 use_virtualized_gl_context_
|=
226 context_group_
->feature_info()->workarounds().use_virtualized_gl_contexts
;
228 bool is_offscreen
= surface_id_
== 0;
229 if (is_offscreen
&& initial_size_
.IsEmpty()) {
230 // If we're an offscreen surface with zero width and/or height, set to a
231 // non-zero size so that we have a complete framebuffer for operations like
233 initial_size_
= gfx::Size(1, 1);
237 GpuCommandBufferStub::~GpuCommandBufferStub() {
241 GpuMemoryManager
* GpuCommandBufferStub::GetMemoryManager() const {
242 return channel()->gpu_channel_manager()->gpu_memory_manager();
245 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message
& message
) {
246 TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"),
249 DevToolsChannelData::CreateForChannel(channel()));
250 FastSetActiveURL(active_url_
, active_url_hash_
);
252 bool have_context
= false;
253 // Ensure the appropriate GL context is current before handling any IPC
254 // messages directed at the command buffer. This ensures that the message
255 // handler can assume that the context is current (not necessary for
256 // RetireSyncPoint or WaitSyncPoint).
257 if (decoder_
.get() &&
258 message
.type() != GpuCommandBufferMsg_SetGetBuffer::ID
&&
259 message
.type() != GpuCommandBufferMsg_WaitForTokenInRange::ID
&&
260 message
.type() != GpuCommandBufferMsg_WaitForGetOffsetInRange::ID
&&
261 message
.type() != GpuCommandBufferMsg_RegisterTransferBuffer::ID
&&
262 message
.type() != GpuCommandBufferMsg_DestroyTransferBuffer::ID
&&
263 message
.type() != GpuCommandBufferMsg_RetireSyncPoint::ID
&&
264 message
.type() != GpuCommandBufferMsg_SignalSyncPoint::ID
&&
266 GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback::ID
) {
272 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers
273 // here. This is so the reply can be delayed if the scheduler is unscheduled.
275 IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub
, message
)
276 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize
,
278 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetGetBuffer
,
280 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ProduceFrontBuffer
,
281 OnProduceFrontBuffer
);
282 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForTokenInRange
,
283 OnWaitForTokenInRange
);
284 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForGetOffsetInRange
,
285 OnWaitForGetOffsetInRange
);
286 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush
, OnAsyncFlush
);
287 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Rescheduled
, OnRescheduled
);
288 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer
,
289 OnRegisterTransferBuffer
);
290 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer
,
291 OnDestroyTransferBuffer
);
292 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoDecoder
,
293 OnCreateVideoDecoder
)
294 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoEncoder
,
295 OnCreateVideoEncoder
)
296 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetSurfaceVisible
,
298 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RetireSyncPoint
,
300 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPoint
,
302 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalQuery
,
305 GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback
,
306 OnSetClientHasMemoryAllocationChangedCallback
)
307 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateImage
, OnCreateImage
);
308 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyImage
, OnDestroyImage
);
309 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateStreamTexture
,
310 OnCreateStreamTexture
)
311 IPC_MESSAGE_UNHANDLED(handled
= false)
312 IPC_END_MESSAGE_MAP()
314 CheckCompleteWaits();
317 // Ensure that any delayed work that was created will be handled.
318 ScheduleDelayedWork(kHandleMoreWorkPeriodMs
);
325 bool GpuCommandBufferStub::Send(IPC::Message
* message
) {
326 return channel_
->Send(message
);
329 bool GpuCommandBufferStub::IsScheduled() {
330 return (!scheduler_
.get() || scheduler_
->IsScheduled());
333 bool GpuCommandBufferStub::HasMoreWork() {
334 return scheduler_
.get() && scheduler_
->HasMoreWork();
337 void GpuCommandBufferStub::PollWork() {
338 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork");
339 delayed_work_scheduled_
= false;
340 FastSetActiveURL(active_url_
, active_url_hash_
);
341 if (decoder_
.get() && !MakeCurrent())
345 uint64 current_messages_processed
=
346 channel()->gpu_channel_manager()->MessagesProcessed();
347 // We're idle when no messages were processed or scheduled.
349 (previous_messages_processed_
== current_messages_processed
) &&
350 !channel()->gpu_channel_manager()->HandleMessagesScheduled();
351 if (!is_idle
&& !last_idle_time_
.is_null()) {
352 base::TimeDelta time_since_idle
=
353 base::TimeTicks::Now() - last_idle_time_
;
354 base::TimeDelta max_time_since_idle
=
355 base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs
);
357 // Force idle when it's been too long since last time we were idle.
358 if (time_since_idle
> max_time_since_idle
)
363 last_idle_time_
= base::TimeTicks::Now();
364 scheduler_
->PerformIdleWork();
367 ScheduleDelayedWork(kHandleMoreWorkPeriodBusyMs
);
370 bool GpuCommandBufferStub::HasUnprocessedCommands() {
371 if (command_buffer_
) {
372 gpu::CommandBuffer::State state
= command_buffer_
->GetLastState();
373 return command_buffer_
->GetPutOffset() != state
.get_offset
&&
374 !gpu::error::IsError(state
.error
);
379 void GpuCommandBufferStub::ScheduleDelayedWork(int64 delay
) {
380 if (!HasMoreWork()) {
381 last_idle_time_
= base::TimeTicks();
385 if (delayed_work_scheduled_
)
387 delayed_work_scheduled_
= true;
389 // Idle when no messages are processed between now and when
390 // PollWork is called.
391 previous_messages_processed_
=
392 channel()->gpu_channel_manager()->MessagesProcessed();
393 if (last_idle_time_
.is_null())
394 last_idle_time_
= base::TimeTicks::Now();
396 // IsScheduled() returns true after passing all unschedule fences
397 // and this is when we can start performing idle work. Idle work
398 // is done synchronously so we can set delay to 0 and instead poll
399 // for more work at the rate idle work is performed. This also ensures
400 // that idle work is done as efficiently as possible without any
401 // unnecessary delays.
402 if (scheduler_
.get() &&
403 scheduler_
->IsScheduled() &&
404 scheduler_
->HasMoreIdleWork()) {
408 task_runner_
->PostDelayedTask(
409 FROM_HERE
, base::Bind(&GpuCommandBufferStub::PollWork
, AsWeakPtr()),
410 base::TimeDelta::FromMilliseconds(delay
));
413 bool GpuCommandBufferStub::MakeCurrent() {
414 if (decoder_
->MakeCurrent())
416 DLOG(ERROR
) << "Context lost because MakeCurrent failed.";
417 command_buffer_
->SetContextLostReason(decoder_
->GetContextLostReason());
418 command_buffer_
->SetParseError(gpu::error::kLostContext
);
423 void GpuCommandBufferStub::Destroy() {
424 if (wait_for_token_
) {
425 Send(wait_for_token_
->reply
.release());
426 wait_for_token_
.reset();
428 if (wait_for_get_offset_
) {
429 Send(wait_for_get_offset_
->reply
.release());
430 wait_for_get_offset_
.reset();
434 GpuChannelManager
* gpu_channel_manager
= channel_
->gpu_channel_manager();
435 if (handle_
.is_null() && !active_url_
.is_empty()) {
436 gpu_channel_manager
->Send(
437 new GpuHostMsg_DidDestroyOffscreenContext(active_url_
));
439 gpu_channel_manager
->Send(
440 new GpuHostMsg_DestroyCommandBuffer(surface_id()));
443 memory_manager_client_state_
.reset();
445 while (!sync_points_
.empty())
446 OnRetireSyncPoint(sync_points_
.front());
449 decoder_
->set_engine(NULL
);
451 // The scheduler has raw references to the decoder and the command buffer so
452 // destroy it before those.
455 bool have_context
= false;
456 if (decoder_
&& decoder_
->GetGLContext()) {
457 // Try to make the context current regardless of whether it was lost, so we
458 // don't leak resources.
459 have_context
= decoder_
->GetGLContext()->MakeCurrent(surface_
.get());
461 FOR_EACH_OBSERVER(DestructionObserver
,
462 destruction_observers_
,
463 OnWillDestroyStub());
466 decoder_
->Destroy(have_context
);
470 command_buffer_
.reset();
472 // Remove this after crbug.com/248395 is sorted out.
476 void GpuCommandBufferStub::OnInitializeFailed(IPC::Message
* reply_message
) {
478 GpuCommandBufferMsg_Initialize::WriteReplyParams(
479 reply_message
, false, gpu::Capabilities());
483 void GpuCommandBufferStub::OnInitialize(
484 base::SharedMemoryHandle shared_state_handle
,
485 IPC::Message
* reply_message
) {
486 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnInitialize");
487 DCHECK(!command_buffer_
.get());
489 scoped_ptr
<base::SharedMemory
> shared_state_shm(
490 new base::SharedMemory(shared_state_handle
, false));
492 command_buffer_
.reset(new gpu::CommandBufferService(
493 context_group_
->transfer_buffer_manager()));
495 bool result
= command_buffer_
->Initialize();
498 decoder_
.reset(::gpu::gles2::GLES2Decoder::Create(context_group_
.get()));
499 scheduler_
.reset(new gpu::GpuScheduler(command_buffer_
.get(),
502 if (preemption_flag_
.get())
503 scheduler_
->SetPreemptByFlag(preemption_flag_
);
505 decoder_
->set_engine(scheduler_
.get());
507 if (!handle_
.is_null()) {
508 #if defined(OS_MACOSX) || defined(UI_COMPOSITOR_IMAGE_TRANSPORT)
510 LOG(ERROR
) << "No software support.";
511 OnInitializeFailed(reply_message
);
516 surface_
= ImageTransportSurface::CreateSurface(
517 channel_
->gpu_channel_manager(),
521 GpuChannelManager
* manager
= channel_
->gpu_channel_manager();
522 surface_
= manager
->GetDefaultOffscreenSurface();
525 if (!surface_
.get()) {
526 DLOG(ERROR
) << "Failed to create surface.";
527 OnInitializeFailed(reply_message
);
531 scoped_refptr
<gfx::GLContext
> context
;
532 if (use_virtualized_gl_context_
&& channel_
->share_group()) {
533 context
= channel_
->share_group()->GetSharedContext();
534 if (!context
.get()) {
535 context
= gfx::GLContext::CreateGLContext(
536 channel_
->share_group(),
537 channel_
->gpu_channel_manager()->GetDefaultOffscreenSurface(),
539 if (!context
.get()) {
540 DLOG(ERROR
) << "Failed to create shared context for virtualization.";
541 OnInitializeFailed(reply_message
);
544 channel_
->share_group()->SetSharedContext(context
.get());
546 // This should be a non-virtual GL context.
547 DCHECK(context
->GetHandle());
548 context
= new gpu::GLContextVirtual(
549 channel_
->share_group(), context
.get(), decoder_
->AsWeakPtr());
550 if (!context
->Initialize(surface_
.get(), gpu_preference_
)) {
551 // TODO(sievers): The real context created above for the default
552 // offscreen surface might not be compatible with this surface.
553 // Need to adjust at least GLX to be able to create the initial context
554 // with a config that is compatible with onscreen and offscreen surfaces.
557 DLOG(ERROR
) << "Failed to initialize virtual GL context.";
558 OnInitializeFailed(reply_message
);
562 if (!context
.get()) {
563 context
= gfx::GLContext::CreateGLContext(
564 channel_
->share_group(), surface_
.get(), gpu_preference_
);
566 if (!context
.get()) {
567 DLOG(ERROR
) << "Failed to create context.";
568 OnInitializeFailed(reply_message
);
572 if (!context
->MakeCurrent(surface_
.get())) {
573 LOG(ERROR
) << "Failed to make context current.";
574 OnInitializeFailed(reply_message
);
578 if (!context
->GetGLStateRestorer()) {
579 context
->SetGLStateRestorer(
580 new gpu::GLStateRestorerImpl(decoder_
->AsWeakPtr()));
583 if (!context
->GetTotalGpuMemory(&total_gpu_memory_
))
584 total_gpu_memory_
= 0;
586 if (!context_group_
->has_program_cache() &&
587 !context_group_
->feature_info()->workarounds().disable_program_cache
) {
588 context_group_
->set_program_cache(
589 channel_
->gpu_channel_manager()->program_cache());
592 // Initialize the decoder with either the view or pbuffer GLContext.
593 if (!decoder_
->Initialize(surface_
,
597 disallowed_features_
,
598 requested_attribs_
)) {
599 DLOG(ERROR
) << "Failed to initialize decoder.";
600 OnInitializeFailed(reply_message
);
604 if (base::CommandLine::ForCurrentProcess()->HasSwitch(
605 switches::kEnableGPUServiceLogging
)) {
606 decoder_
->set_log_commands(true);
609 decoder_
->GetLogger()->SetMsgCallback(
610 base::Bind(&GpuCommandBufferStub::SendConsoleMessage
,
611 base::Unretained(this)));
612 decoder_
->SetShaderCacheCallback(
613 base::Bind(&GpuCommandBufferStub::SendCachedShader
,
614 base::Unretained(this)));
615 decoder_
->SetWaitSyncPointCallback(
616 base::Bind(&GpuCommandBufferStub::OnWaitSyncPoint
,
617 base::Unretained(this)));
619 command_buffer_
->SetPutOffsetChangeCallback(
620 base::Bind(&GpuCommandBufferStub::PutChanged
, base::Unretained(this)));
621 command_buffer_
->SetGetBufferChangeCallback(
622 base::Bind(&gpu::GpuScheduler::SetGetBuffer
,
623 base::Unretained(scheduler_
.get())));
624 command_buffer_
->SetParseErrorCallback(
625 base::Bind(&GpuCommandBufferStub::OnParseError
, base::Unretained(this)));
626 scheduler_
->SetSchedulingChangedCallback(
627 base::Bind(&GpuChannel::StubSchedulingChanged
,
628 base::Unretained(channel_
)));
631 scheduler_
->SetCommandProcessedCallback(
632 base::Bind(&GpuCommandBufferStub::OnCommandProcessed
,
633 base::Unretained(this)));
636 const size_t kSharedStateSize
= sizeof(gpu::CommandBufferSharedState
);
637 if (!shared_state_shm
->Map(kSharedStateSize
)) {
638 DLOG(ERROR
) << "Failed to map shared state buffer.";
639 OnInitializeFailed(reply_message
);
642 command_buffer_
->SetSharedStateBuffer(gpu::MakeBackingFromSharedMemory(
643 shared_state_shm
.Pass(), kSharedStateSize
));
645 gpu::Capabilities capabilities
= decoder_
->GetCapabilities();
646 capabilities
.future_sync_points
= channel_
->allow_future_sync_points();
648 GpuCommandBufferMsg_Initialize::WriteReplyParams(
649 reply_message
, true, capabilities
);
652 if (handle_
.is_null() && !active_url_
.is_empty()) {
653 GpuChannelManager
* gpu_channel_manager
= channel_
->gpu_channel_manager();
654 gpu_channel_manager
->Send(new GpuHostMsg_DidCreateOffscreenContext(
661 void GpuCommandBufferStub::OnCreateStreamTexture(
662 uint32 texture_id
, int32 stream_id
, bool* succeeded
) {
663 #if defined(OS_ANDROID)
664 *succeeded
= StreamTexture::Create(this, texture_id
, stream_id
);
670 void GpuCommandBufferStub::SetLatencyInfoCallback(
671 const LatencyInfoCallback
& callback
) {
672 latency_info_callback_
= callback
;
675 int32
GpuCommandBufferStub::GetRequestedAttribute(int attr
) const {
676 // The command buffer is pairs of enum, value
677 // search for the requested attribute, return the value.
678 for (std::vector
<int32
>::const_iterator it
= requested_attribs_
.begin();
679 it
!= requested_attribs_
.end(); ++it
) {
687 void GpuCommandBufferStub::OnSetGetBuffer(int32 shm_id
,
688 IPC::Message
* reply_message
) {
689 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetGetBuffer");
691 command_buffer_
->SetGetBuffer(shm_id
);
695 void GpuCommandBufferStub::OnProduceFrontBuffer(const gpu::Mailbox
& mailbox
) {
696 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnProduceFrontBuffer");
698 LOG(ERROR
) << "Can't produce front buffer before initialization.";
702 decoder_
->ProduceFrontBuffer(mailbox
);
705 void GpuCommandBufferStub::OnParseError() {
706 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnParseError");
707 DCHECK(command_buffer_
.get());
708 gpu::CommandBuffer::State state
= command_buffer_
->GetLastState();
709 IPC::Message
* msg
= new GpuCommandBufferMsg_Destroyed(
710 route_id_
, state
.context_lost_reason
, state
.error
);
711 msg
->set_unblock(true);
714 // Tell the browser about this context loss as well, so it can
715 // determine whether client APIs like WebGL need to be immediately
716 // blocked from automatically running.
717 GpuChannelManager
* gpu_channel_manager
= channel_
->gpu_channel_manager();
718 gpu_channel_manager
->Send(new GpuHostMsg_DidLoseContext(
719 handle_
.is_null(), state
.context_lost_reason
, active_url_
));
724 void GpuCommandBufferStub::OnWaitForTokenInRange(int32 start
,
726 IPC::Message
* reply_message
) {
727 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForTokenInRange");
728 DCHECK(command_buffer_
.get());
731 LOG(ERROR
) << "Got WaitForToken command while currently waiting for token.";
733 make_scoped_ptr(new WaitForCommandState(start
, end
, reply_message
));
734 CheckCompleteWaits();
737 void GpuCommandBufferStub::OnWaitForGetOffsetInRange(
740 IPC::Message
* reply_message
) {
741 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForGetOffsetInRange");
742 DCHECK(command_buffer_
.get());
744 if (wait_for_get_offset_
) {
746 << "Got WaitForGetOffset command while currently waiting for offset.";
748 wait_for_get_offset_
=
749 make_scoped_ptr(new WaitForCommandState(start
, end
, reply_message
));
750 CheckCompleteWaits();
753 void GpuCommandBufferStub::CheckCompleteWaits() {
754 if (wait_for_token_
|| wait_for_get_offset_
) {
755 gpu::CommandBuffer::State state
= command_buffer_
->GetLastState();
756 if (wait_for_token_
&&
757 (gpu::CommandBuffer::InRange(
758 wait_for_token_
->start
, wait_for_token_
->end
, state
.token
) ||
759 state
.error
!= gpu::error::kNoError
)) {
761 GpuCommandBufferMsg_WaitForTokenInRange::WriteReplyParams(
762 wait_for_token_
->reply
.get(), state
);
763 Send(wait_for_token_
->reply
.release());
764 wait_for_token_
.reset();
766 if (wait_for_get_offset_
&&
767 (gpu::CommandBuffer::InRange(wait_for_get_offset_
->start
,
768 wait_for_get_offset_
->end
,
770 state
.error
!= gpu::error::kNoError
)) {
772 GpuCommandBufferMsg_WaitForGetOffsetInRange::WriteReplyParams(
773 wait_for_get_offset_
->reply
.get(), state
);
774 Send(wait_for_get_offset_
->reply
.release());
775 wait_for_get_offset_
.reset();
780 void GpuCommandBufferStub::OnAsyncFlush(
783 const std::vector
<ui::LatencyInfo
>& latency_info
) {
785 "gpu", "GpuCommandBufferStub::OnAsyncFlush", "put_offset", put_offset
);
787 if (ui::LatencyInfo::Verify(latency_info
,
788 "GpuCommandBufferStub::OnAsyncFlush") &&
789 !latency_info_callback_
.is_null()) {
790 latency_info_callback_
.Run(latency_info
);
792 DCHECK(command_buffer_
.get());
793 if (flush_count
- last_flush_count_
< 0x8000000U
) {
794 last_flush_count_
= flush_count
;
795 command_buffer_
->Flush(put_offset
);
797 // We received this message out-of-order. This should not happen but is here
798 // to catch regressions. Ignore the message.
799 NOTREACHED() << "Received a Flush message out-of-order";
805 void GpuCommandBufferStub::OnRescheduled() {
806 gpu::CommandBuffer::State pre_state
= command_buffer_
->GetLastState();
807 command_buffer_
->Flush(command_buffer_
->GetPutOffset());
808 gpu::CommandBuffer::State post_state
= command_buffer_
->GetLastState();
810 if (pre_state
.get_offset
!= post_state
.get_offset
)
814 void GpuCommandBufferStub::OnRegisterTransferBuffer(
816 base::SharedMemoryHandle transfer_buffer
,
818 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterTransferBuffer");
820 // Take ownership of the memory and map it into this process.
821 // This validates the size.
822 scoped_ptr
<base::SharedMemory
> shared_memory(
823 new base::SharedMemory(transfer_buffer
, false));
824 if (!shared_memory
->Map(size
)) {
825 DVLOG(0) << "Failed to map shared memory.";
829 if (command_buffer_
) {
830 command_buffer_
->RegisterTransferBuffer(
831 id
, gpu::MakeBackingFromSharedMemory(shared_memory
.Pass(), size
));
835 void GpuCommandBufferStub::OnDestroyTransferBuffer(int32 id
) {
836 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyTransferBuffer");
839 command_buffer_
->DestroyTransferBuffer(id
);
842 void GpuCommandBufferStub::OnCommandProcessed() {
844 watchdog_
->CheckArmed();
847 void GpuCommandBufferStub::ReportState() { command_buffer_
->UpdateState(); }
849 void GpuCommandBufferStub::PutChanged() {
850 FastSetActiveURL(active_url_
, active_url_hash_
);
851 scheduler_
->PutChanged();
854 void GpuCommandBufferStub::OnCreateVideoDecoder(
855 media::VideoCodecProfile profile
,
856 int32 decoder_route_id
,
857 IPC::Message
* reply_message
) {
858 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoDecoder");
859 GpuVideoDecodeAccelerator
* decoder
= new GpuVideoDecodeAccelerator(
860 decoder_route_id
, this, channel_
->io_task_runner());
861 decoder
->Initialize(profile
, reply_message
);
862 // decoder is registered as a DestructionObserver of this stub and will
863 // self-delete during destruction of this stub.
866 void GpuCommandBufferStub::OnCreateVideoEncoder(
867 media::VideoPixelFormat input_format
,
868 const gfx::Size
& input_visible_size
,
869 media::VideoCodecProfile output_profile
,
870 uint32 initial_bitrate
,
871 int32 encoder_route_id
,
872 IPC::Message
* reply_message
) {
873 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoEncoder");
874 GpuVideoEncodeAccelerator
* encoder
=
875 new GpuVideoEncodeAccelerator(encoder_route_id
, this);
876 encoder
->Initialize(input_format
,
881 // encoder is registered as a DestructionObserver of this stub and will
882 // self-delete during destruction of this stub.
885 void GpuCommandBufferStub::OnSetSurfaceVisible(bool visible
) {
886 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetSurfaceVisible");
887 if (memory_manager_client_state_
)
888 memory_manager_client_state_
->SetVisible(visible
);
891 void GpuCommandBufferStub::AddSyncPoint(uint32 sync_point
) {
892 sync_points_
.push_back(sync_point
);
895 void GpuCommandBufferStub::OnRetireSyncPoint(uint32 sync_point
) {
896 DCHECK(!sync_points_
.empty() && sync_points_
.front() == sync_point
);
897 sync_points_
.pop_front();
899 gpu::gles2::MailboxManager
* mailbox_manager
=
900 context_group_
->mailbox_manager();
901 if (mailbox_manager
->UsesSync() && MakeCurrent())
902 mailbox_manager
->PushTextureUpdates(sync_point
);
904 GpuChannelManager
* manager
= channel_
->gpu_channel_manager();
905 manager
->sync_point_manager()->RetireSyncPoint(sync_point
);
908 bool GpuCommandBufferStub::OnWaitSyncPoint(uint32 sync_point
) {
911 GpuChannelManager
* manager
= channel_
->gpu_channel_manager();
912 if (manager
->sync_point_manager()->IsSyncPointRetired(sync_point
)) {
913 PullTextureUpdates(sync_point
);
917 if (sync_point_wait_count_
== 0) {
918 TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitSyncPoint", this,
919 "GpuCommandBufferStub", this);
921 scheduler_
->SetScheduled(false);
922 ++sync_point_wait_count_
;
923 manager
->sync_point_manager()->AddSyncPointCallback(
925 base::Bind(&RunOnThread
, task_runner_
,
926 base::Bind(&GpuCommandBufferStub::OnWaitSyncPointCompleted
,
927 this->AsWeakPtr(), sync_point
)));
928 return scheduler_
->IsScheduled();
931 void GpuCommandBufferStub::OnWaitSyncPointCompleted(uint32 sync_point
) {
932 PullTextureUpdates(sync_point
);
933 --sync_point_wait_count_
;
934 if (sync_point_wait_count_
== 0) {
935 TRACE_EVENT_ASYNC_END1("gpu", "WaitSyncPoint", this,
936 "GpuCommandBufferStub", this);
938 scheduler_
->SetScheduled(true);
941 void GpuCommandBufferStub::PullTextureUpdates(uint32 sync_point
) {
942 gpu::gles2::MailboxManager
* mailbox_manager
=
943 context_group_
->mailbox_manager();
944 if (mailbox_manager
->UsesSync() && MakeCurrent())
945 mailbox_manager
->PullTextureUpdates(sync_point
);
948 void GpuCommandBufferStub::OnSignalSyncPoint(uint32 sync_point
, uint32 id
) {
949 GpuChannelManager
* manager
= channel_
->gpu_channel_manager();
950 manager
->sync_point_manager()->AddSyncPointCallback(
952 base::Bind(&RunOnThread
, task_runner_
,
953 base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck
,
954 this->AsWeakPtr(), id
)));
957 void GpuCommandBufferStub::OnSignalSyncPointAck(uint32 id
) {
958 Send(new GpuCommandBufferMsg_SignalSyncPointAck(route_id_
, id
));
961 void GpuCommandBufferStub::OnSignalQuery(uint32 query_id
, uint32 id
) {
963 gpu::gles2::QueryManager
* query_manager
= decoder_
->GetQueryManager();
965 gpu::gles2::QueryManager::Query
* query
=
966 query_manager
->GetQuery(query_id
);
969 base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck
,
976 // Something went wrong, run callback immediately.
977 OnSignalSyncPointAck(id
);
981 void GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback(
985 "GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback");
987 if (!memory_manager_client_state_
) {
988 memory_manager_client_state_
.reset(GetMemoryManager()->CreateClientState(
989 this, surface_id_
!= 0, true));
992 memory_manager_client_state_
.reset();
996 void GpuCommandBufferStub::OnCreateImage(int32 id
,
997 gfx::GpuMemoryBufferHandle handle
,
999 gfx::BufferFormat format
,
1000 uint32 internalformat
) {
1001 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateImage");
1006 gpu::gles2::ImageManager
* image_manager
= decoder_
->GetImageManager();
1007 DCHECK(image_manager
);
1008 if (image_manager
->LookupImage(id
)) {
1009 LOG(ERROR
) << "Image already exists with same ID.";
1013 if (!gpu::ImageFactory::IsGpuMemoryBufferFormatSupported(
1014 format
, decoder_
->GetCapabilities())) {
1015 LOG(ERROR
) << "Format is not supported.";
1019 if (!gpu::ImageFactory::IsImageSizeValidForGpuMemoryBufferFormat(size
,
1021 LOG(ERROR
) << "Invalid image size for format.";
1025 if (!gpu::ImageFactory::IsImageFormatCompatibleWithGpuMemoryBufferFormat(
1026 internalformat
, format
)) {
1027 LOG(ERROR
) << "Incompatible image format.";
1031 scoped_refptr
<gfx::GLImage
> image
= channel()->CreateImageForGpuMemoryBuffer(
1032 handle
, size
, format
, internalformat
);
1036 image_manager
->AddImage(image
.get(), id
);
1039 void GpuCommandBufferStub::OnDestroyImage(int32 id
) {
1040 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyImage");
1045 gpu::gles2::ImageManager
* image_manager
= decoder_
->GetImageManager();
1046 DCHECK(image_manager
);
1047 if (!image_manager
->LookupImage(id
)) {
1048 LOG(ERROR
) << "Image with ID doesn't exist.";
1052 image_manager
->RemoveImage(id
);
1055 void GpuCommandBufferStub::SendConsoleMessage(
1057 const std::string
& message
) {
1058 GPUCommandBufferConsoleMessage console_message
;
1059 console_message
.id
= id
;
1060 console_message
.message
= message
;
1061 IPC::Message
* msg
= new GpuCommandBufferMsg_ConsoleMsg(
1062 route_id_
, console_message
);
1063 msg
->set_unblock(true);
1067 void GpuCommandBufferStub::SendCachedShader(
1068 const std::string
& key
, const std::string
& shader
) {
1069 channel_
->CacheShader(key
, shader
);
1072 void GpuCommandBufferStub::AddDestructionObserver(
1073 DestructionObserver
* observer
) {
1074 destruction_observers_
.AddObserver(observer
);
1077 void GpuCommandBufferStub::RemoveDestructionObserver(
1078 DestructionObserver
* observer
) {
1079 destruction_observers_
.RemoveObserver(observer
);
1082 void GpuCommandBufferStub::SetPreemptByFlag(
1083 scoped_refptr
<gpu::PreemptionFlag
> flag
) {
1084 preemption_flag_
= flag
;
1086 scheduler_
->SetPreemptByFlag(preemption_flag_
);
1089 bool GpuCommandBufferStub::GetTotalGpuMemory(uint64
* bytes
) {
1090 *bytes
= total_gpu_memory_
;
1091 return !!total_gpu_memory_
;
1094 gfx::Size
GpuCommandBufferStub::GetSurfaceSize() const {
1095 if (!surface_
.get())
1097 return surface_
->GetSize();
1100 const gpu::gles2::FeatureInfo
* GpuCommandBufferStub::GetFeatureInfo() const {
1101 return context_group_
->feature_info();
1104 gpu::gles2::MemoryTracker
* GpuCommandBufferStub::GetMemoryTracker() const {
1105 return context_group_
->memory_tracker();
1108 void GpuCommandBufferStub::SetMemoryAllocation(
1109 const gpu::MemoryAllocation
& allocation
) {
1110 if (!last_memory_allocation_valid_
||
1111 !allocation
.Equals(last_memory_allocation_
)) {
1112 Send(new GpuCommandBufferMsg_SetMemoryAllocation(
1113 route_id_
, allocation
));
1116 last_memory_allocation_valid_
= true;
1117 last_memory_allocation_
= allocation
;
1120 void GpuCommandBufferStub::SuggestHaveFrontBuffer(
1121 bool suggest_have_frontbuffer
) {
1122 // This can be called outside of OnMessageReceived, so the context needs
1123 // to be made current before calling methods on the surface.
1124 if (surface_
.get() && MakeCurrent())
1125 surface_
->SetFrontbufferAllocation(suggest_have_frontbuffer
);
1128 bool GpuCommandBufferStub::CheckContextLost() {
1129 DCHECK(command_buffer_
);
1130 gpu::CommandBuffer::State state
= command_buffer_
->GetLastState();
1131 bool was_lost
= state
.error
== gpu::error::kLostContext
;
1134 bool was_lost_by_robustness
=
1135 decoder_
&& decoder_
->WasContextLostByRobustnessExtension();
1137 // Work around issues with recovery by allowing a new GPU process to launch.
1138 if ((was_lost_by_robustness
||
1139 context_group_
->feature_info()->workarounds().exit_on_context_lost
) &&
1140 !base::CommandLine::ForCurrentProcess()->HasSwitch(
1141 switches::kSingleProcess
) &&
1142 !base::CommandLine::ForCurrentProcess()->HasSwitch(
1143 switches::kInProcessGPU
)) {
1144 LOG(ERROR
) << "Exiting GPU process because some drivers cannot recover"
1145 << " from problems.";
1147 base::win::SetShouldCrashOnProcessDetach(false);
1152 // Lose all other contexts if the reset was triggered by the robustness
1153 // extension instead of being synthetic.
1154 if (was_lost_by_robustness
&&
1155 (gfx::GLContext::LosesAllContextsOnContextLost() ||
1156 use_virtualized_gl_context_
)) {
1157 channel_
->LoseAllContexts();
1161 CheckCompleteWaits();
1165 void GpuCommandBufferStub::MarkContextLost() {
1166 if (!command_buffer_
||
1167 command_buffer_
->GetLastState().error
== gpu::error::kLostContext
)
1170 command_buffer_
->SetContextLostReason(gpu::error::kUnknown
);
1172 decoder_
->MarkContextLost(gpu::error::kUnknown
);
1173 command_buffer_
->SetParseError(gpu::error::kLostContext
);
1176 void GpuCommandBufferStub::SendSwapBuffersCompleted(
1177 const std::vector
<ui::LatencyInfo
>& latency_info
,
1178 gfx::SwapResult result
) {
1179 Send(new GpuCommandBufferMsg_SwapBuffersCompleted(route_id_
, latency_info
,
1183 void GpuCommandBufferStub::SendUpdateVSyncParameters(base::TimeTicks timebase
,
1184 base::TimeDelta interval
) {
1185 Send(new GpuCommandBufferMsg_UpdateVSyncParameters(route_id_
, timebase
,
1189 } // namespace content