1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "gpu/command_buffer/service/gpu_scheduler.h"
8 #include "base/command_line.h"
9 #include "base/compiler_specific.h"
10 #include "base/debug/trace_event.h"
11 #include "base/message_loop/message_loop.h"
12 #include "base/time/time.h"
13 #include "ui/gl/gl_bindings.h"
14 #include "ui/gl/gl_fence.h"
15 #include "ui/gl/gl_switches.h"
18 #include "base/win/windows_version.h"
21 using ::base::SharedMemory
;
25 const int64 kUnscheduleFenceTimeOutDelay
= 10000;
28 const int64 kRescheduleTimeOutDelay
= 1000;
31 GpuScheduler::GpuScheduler(CommandBufferServiceBase
* command_buffer
,
32 AsyncAPIInterface
* handler
,
33 gles2::GLES2Decoder
* decoder
)
34 : command_buffer_(command_buffer
),
37 unscheduled_count_(0),
38 rescheduled_count_(0),
39 was_preempted_(false),
40 reschedule_task_factory_(this) {}
42 GpuScheduler::~GpuScheduler() {
45 void GpuScheduler::PutChanged() {
47 "gpu", "GpuScheduler:PutChanged",
48 "decoder", decoder_
? decoder_
->GetLogger()->GetLogPrefix() : "None");
50 CommandBuffer::State state
= command_buffer_
->GetLastState();
52 // If there is no parser, exit.
54 DCHECK_EQ(state
.get_offset
, state
.put_offset
);
58 parser_
->set_put(state
.put_offset
);
59 if (state
.error
!= error::kNoError
)
62 // Check that the GPU has passed all fences.
63 if (!PollUnscheduleFences())
66 // One of the unschedule fence tasks might have unscheduled us.
70 base::TimeTicks
begin_time(base::TimeTicks::HighResNow());
71 error::Error error
= error::kNoError
;
73 decoder_
->BeginDecoding();
74 while (!parser_
->IsEmpty()) {
78 DCHECK(IsScheduled());
79 DCHECK(unschedule_fences_
.empty());
81 error
= parser_
->ProcessCommand();
83 if (error
== error::kDeferCommandUntilLater
) {
84 DCHECK_GT(unscheduled_count_
, 0);
88 // TODO(piman): various classes duplicate various pieces of state, leading
89 // to needlessly complex update logic. It should be possible to simply
90 // share the state across all of them.
91 command_buffer_
->SetGetOffset(static_cast<int32
>(parser_
->get()));
93 if (error::IsError(error
)) {
94 LOG(ERROR
) << "[" << decoder_
<< "] "
95 << "GPU PARSE ERROR: " << error
;
96 command_buffer_
->SetContextLostReason(decoder_
->GetContextLostReason());
97 command_buffer_
->SetParseError(error
);
101 if (!command_processed_callback_
.is_null())
102 command_processed_callback_
.Run();
104 if (unscheduled_count_
> 0)
109 if (!error::IsError(error
) && decoder_
->WasContextLost()) {
110 command_buffer_
->SetContextLostReason(decoder_
->GetContextLostReason());
111 command_buffer_
->SetParseError(error::kLostContext
);
113 decoder_
->EndDecoding();
114 decoder_
->AddProcessingCommandsTime(
115 base::TimeTicks::HighResNow() - begin_time
);
119 void GpuScheduler::SetScheduled(bool scheduled
) {
120 TRACE_EVENT2("gpu", "GpuScheduler:SetScheduled", "this", this,
121 "new unscheduled_count_",
122 unscheduled_count_
+ (scheduled
? -1 : 1));
124 // If the scheduler was rescheduled after a timeout, ignore the subsequent
125 // calls to SetScheduled when they eventually arrive until they are all
127 if (rescheduled_count_
> 0) {
128 --rescheduled_count_
;
131 --unscheduled_count_
;
134 DCHECK_GE(unscheduled_count_
, 0);
136 if (unscheduled_count_
== 0) {
137 TRACE_EVENT_ASYNC_END1("gpu", "ProcessingSwap", this,
138 "GpuScheduler", this);
139 // When the scheduler transitions from the unscheduled to the scheduled
140 // state, cancel the task that would reschedule it after a timeout.
141 reschedule_task_factory_
.InvalidateWeakPtrs();
143 if (!scheduling_changed_callback_
.is_null())
144 scheduling_changed_callback_
.Run(true);
147 ++unscheduled_count_
;
148 if (unscheduled_count_
== 1) {
149 TRACE_EVENT_ASYNC_BEGIN1("gpu", "ProcessingSwap", this,
150 "GpuScheduler", this);
152 if (base::win::GetVersion() < base::win::VERSION_VISTA
) {
153 // When the scheduler transitions from scheduled to unscheduled, post a
154 // delayed task that it will force it back into a scheduled state after
155 // a timeout. This should only be necessary on pre-Vista.
156 base::MessageLoop::current()->PostDelayedTask(
158 base::Bind(&GpuScheduler::RescheduleTimeOut
,
159 reschedule_task_factory_
.GetWeakPtr()),
160 base::TimeDelta::FromMilliseconds(kRescheduleTimeOutDelay
));
163 if (!scheduling_changed_callback_
.is_null())
164 scheduling_changed_callback_
.Run(false);
169 bool GpuScheduler::IsScheduled() {
170 return unscheduled_count_
== 0;
173 bool GpuScheduler::HasMoreWork() {
174 return !unschedule_fences_
.empty() ||
175 (decoder_
&& decoder_
->ProcessPendingQueries()) ||
179 void GpuScheduler::SetSchedulingChangedCallback(
180 const SchedulingChangedCallback
& callback
) {
181 scheduling_changed_callback_
= callback
;
184 scoped_refptr
<Buffer
> GpuScheduler::GetSharedMemoryBuffer(int32 shm_id
) {
185 return command_buffer_
->GetTransferBuffer(shm_id
);
188 void GpuScheduler::set_token(int32 token
) {
189 command_buffer_
->SetToken(token
);
192 bool GpuScheduler::SetGetBuffer(int32 transfer_buffer_id
) {
193 scoped_refptr
<Buffer
> ring_buffer
=
194 command_buffer_
->GetTransferBuffer(transfer_buffer_id
);
195 if (!ring_buffer
.get()) {
199 if (!parser_
.get()) {
200 parser_
.reset(new CommandParser(handler_
));
204 ring_buffer
->memory(), ring_buffer
->size(), 0, ring_buffer
->size());
210 bool GpuScheduler::SetGetOffset(int32 offset
) {
211 if (parser_
->set_get(offset
)) {
212 command_buffer_
->SetGetOffset(static_cast<int32
>(parser_
->get()));
218 int32
GpuScheduler::GetGetOffset() {
219 return parser_
->get();
222 void GpuScheduler::SetCommandProcessedCallback(
223 const base::Closure
& callback
) {
224 command_processed_callback_
= callback
;
227 void GpuScheduler::DeferToFence(base::Closure task
) {
228 unschedule_fences_
.push(make_linked_ptr(
229 new UnscheduleFence(gfx::GLFence::Create(), task
)));
233 bool GpuScheduler::PollUnscheduleFences() {
234 if (unschedule_fences_
.empty())
237 if (unschedule_fences_
.front()->fence
.get()) {
238 base::Time now
= base::Time::Now();
239 base::TimeDelta timeout
=
240 base::TimeDelta::FromMilliseconds(kUnscheduleFenceTimeOutDelay
);
242 while (!unschedule_fences_
.empty()) {
243 const UnscheduleFence
& fence
= *unschedule_fences_
.front();
244 if (fence
.fence
->HasCompleted() ||
245 now
- fence
.issue_time
> timeout
) {
246 unschedule_fences_
.front()->task
.Run();
247 unschedule_fences_
.pop();
256 while (!unschedule_fences_
.empty()) {
257 unschedule_fences_
.front()->task
.Run();
258 unschedule_fences_
.pop();
266 bool GpuScheduler::IsPreempted() {
267 if (!preemption_flag_
.get())
270 if (!was_preempted_
&& preemption_flag_
->IsSet()) {
271 TRACE_COUNTER_ID1("gpu", "GpuScheduler::Preempted", this, 1);
272 was_preempted_
= true;
273 } else if (was_preempted_
&& !preemption_flag_
->IsSet()) {
274 TRACE_COUNTER_ID1("gpu", "GpuScheduler::Preempted", this, 0);
275 was_preempted_
= false;
278 return preemption_flag_
->IsSet();
281 bool GpuScheduler::HasMoreIdleWork() {
282 return (decoder_
&& decoder_
->HasMoreIdleWork());
285 void GpuScheduler::PerformIdleWork() {
288 decoder_
->PerformIdleWork();
291 void GpuScheduler::RescheduleTimeOut() {
292 int new_count
= unscheduled_count_
+ rescheduled_count_
;
294 rescheduled_count_
= 0;
296 while (unscheduled_count_
)
299 rescheduled_count_
= new_count
;
302 GpuScheduler::UnscheduleFence::UnscheduleFence(gfx::GLFence
* fence_
,
305 issue_time(base::Time::Now()),
309 GpuScheduler::UnscheduleFence::~UnscheduleFence() {