1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "gpu/command_buffer/service/gpu_scheduler.h"
8 #include "base/command_line.h"
9 #include "base/compiler_specific.h"
10 #include "base/debug/trace_event.h"
11 #include "base/message_loop/message_loop.h"
12 #include "base/time/time.h"
13 #include "ui/gl/gl_bindings.h"
14 #include "ui/gl/gl_fence.h"
15 #include "ui/gl/gl_switches.h"
18 #include "base/win/windows_version.h"
21 using ::base::SharedMemory
;
25 const int64 kUnscheduleFenceTimeOutDelay
= 10000;
28 const int64 kRescheduleTimeOutDelay
= 1000;
31 GpuScheduler::GpuScheduler(CommandBufferServiceBase
* command_buffer
,
32 AsyncAPIInterface
* handler
,
33 gles2::GLES2Decoder
* decoder
)
34 : command_buffer_(command_buffer
),
37 unscheduled_count_(0),
38 rescheduled_count_(0),
39 was_preempted_(false),
40 reschedule_task_factory_(this) {}
42 GpuScheduler::~GpuScheduler() {
45 void GpuScheduler::PutChanged() {
47 "gpu", "GpuScheduler:PutChanged",
48 "decoder", decoder_
? decoder_
->GetLogger()->GetLogPrefix() : "None");
50 CommandBuffer::State state
= command_buffer_
->GetLastState();
52 // If there is no parser, exit.
54 DCHECK_EQ(state
.get_offset
, state
.put_offset
);
58 parser_
->set_put(state
.put_offset
);
59 if (state
.error
!= error::kNoError
)
62 // Check that the GPU has passed all fences.
63 if (!PollUnscheduleFences())
66 // One of the unschedule fence tasks might have unscheduled us.
70 base::TimeTicks
begin_time(base::TimeTicks::HighResNow());
71 error::Error error
= error::kNoError
;
73 decoder_
->BeginDecoding();
74 while (!parser_
->IsEmpty()) {
78 DCHECK(IsScheduled());
79 DCHECK(unschedule_fences_
.empty());
81 error
= parser_
->ProcessCommands(CommandParser::kParseCommandsSlice
);
83 if (error
== error::kDeferCommandUntilLater
) {
84 DCHECK_GT(unscheduled_count_
, 0);
88 // TODO(piman): various classes duplicate various pieces of state, leading
89 // to needlessly complex update logic. It should be possible to simply
90 // share the state across all of them.
91 command_buffer_
->SetGetOffset(static_cast<int32
>(parser_
->get()));
93 if (error::IsError(error
)) {
94 command_buffer_
->SetContextLostReason(decoder_
->GetContextLostReason());
95 command_buffer_
->SetParseError(error
);
99 if (!command_processed_callback_
.is_null())
100 command_processed_callback_
.Run();
102 if (unscheduled_count_
> 0)
107 if (!error::IsError(error
) && decoder_
->WasContextLost()) {
108 command_buffer_
->SetContextLostReason(decoder_
->GetContextLostReason());
109 command_buffer_
->SetParseError(error::kLostContext
);
111 decoder_
->EndDecoding();
112 decoder_
->AddProcessingCommandsTime(
113 base::TimeTicks::HighResNow() - begin_time
);
117 void GpuScheduler::SetScheduled(bool scheduled
) {
118 TRACE_EVENT2("gpu", "GpuScheduler:SetScheduled", "this", this,
119 "new unscheduled_count_",
120 unscheduled_count_
+ (scheduled
? -1 : 1));
122 // If the scheduler was rescheduled after a timeout, ignore the subsequent
123 // calls to SetScheduled when they eventually arrive until they are all
125 if (rescheduled_count_
> 0) {
126 --rescheduled_count_
;
129 --unscheduled_count_
;
132 DCHECK_GE(unscheduled_count_
, 0);
134 if (unscheduled_count_
== 0) {
135 TRACE_EVENT_ASYNC_END1("gpu", "ProcessingSwap", this,
136 "GpuScheduler", this);
137 // When the scheduler transitions from the unscheduled to the scheduled
138 // state, cancel the task that would reschedule it after a timeout.
139 reschedule_task_factory_
.InvalidateWeakPtrs();
141 if (!scheduling_changed_callback_
.is_null())
142 scheduling_changed_callback_
.Run(true);
145 ++unscheduled_count_
;
146 if (unscheduled_count_
== 1) {
147 TRACE_EVENT_ASYNC_BEGIN1("gpu", "ProcessingSwap", this,
148 "GpuScheduler", this);
150 if (base::win::GetVersion() < base::win::VERSION_VISTA
) {
151 // When the scheduler transitions from scheduled to unscheduled, post a
152 // delayed task that it will force it back into a scheduled state after
153 // a timeout. This should only be necessary on pre-Vista.
154 base::MessageLoop::current()->PostDelayedTask(
156 base::Bind(&GpuScheduler::RescheduleTimeOut
,
157 reschedule_task_factory_
.GetWeakPtr()),
158 base::TimeDelta::FromMilliseconds(kRescheduleTimeOutDelay
));
161 if (!scheduling_changed_callback_
.is_null())
162 scheduling_changed_callback_
.Run(false);
167 bool GpuScheduler::IsScheduled() {
168 return unscheduled_count_
== 0;
171 bool GpuScheduler::HasMoreWork() {
172 return !unschedule_fences_
.empty() ||
173 (decoder_
&& decoder_
->ProcessPendingQueries()) ||
177 void GpuScheduler::SetSchedulingChangedCallback(
178 const SchedulingChangedCallback
& callback
) {
179 scheduling_changed_callback_
= callback
;
182 scoped_refptr
<Buffer
> GpuScheduler::GetSharedMemoryBuffer(int32 shm_id
) {
183 return command_buffer_
->GetTransferBuffer(shm_id
);
186 void GpuScheduler::set_token(int32 token
) {
187 command_buffer_
->SetToken(token
);
190 bool GpuScheduler::SetGetBuffer(int32 transfer_buffer_id
) {
191 scoped_refptr
<Buffer
> ring_buffer
=
192 command_buffer_
->GetTransferBuffer(transfer_buffer_id
);
193 if (!ring_buffer
.get()) {
197 if (!parser_
.get()) {
198 parser_
.reset(new CommandParser(handler_
));
202 ring_buffer
->memory(), ring_buffer
->size(), 0, ring_buffer
->size());
208 bool GpuScheduler::SetGetOffset(int32 offset
) {
209 if (parser_
->set_get(offset
)) {
210 command_buffer_
->SetGetOffset(static_cast<int32
>(parser_
->get()));
216 int32
GpuScheduler::GetGetOffset() {
217 return parser_
->get();
220 void GpuScheduler::SetCommandProcessedCallback(
221 const base::Closure
& callback
) {
222 command_processed_callback_
= callback
;
225 void GpuScheduler::DeferToFence(base::Closure task
) {
226 unschedule_fences_
.push(make_linked_ptr(
227 new UnscheduleFence(gfx::GLFence::Create(), task
)));
231 bool GpuScheduler::PollUnscheduleFences() {
232 if (unschedule_fences_
.empty())
235 if (unschedule_fences_
.front()->fence
.get()) {
236 base::Time now
= base::Time::Now();
237 base::TimeDelta timeout
=
238 base::TimeDelta::FromMilliseconds(kUnscheduleFenceTimeOutDelay
);
240 while (!unschedule_fences_
.empty()) {
241 const UnscheduleFence
& fence
= *unschedule_fences_
.front();
242 if (fence
.fence
->HasCompleted() ||
243 now
- fence
.issue_time
> timeout
) {
244 unschedule_fences_
.front()->task
.Run();
245 unschedule_fences_
.pop();
254 while (!unschedule_fences_
.empty()) {
255 unschedule_fences_
.front()->task
.Run();
256 unschedule_fences_
.pop();
264 bool GpuScheduler::IsPreempted() {
265 if (!preemption_flag_
.get())
268 if (!was_preempted_
&& preemption_flag_
->IsSet()) {
269 TRACE_COUNTER_ID1("gpu", "GpuScheduler::Preempted", this, 1);
270 was_preempted_
= true;
271 } else if (was_preempted_
&& !preemption_flag_
->IsSet()) {
272 TRACE_COUNTER_ID1("gpu", "GpuScheduler::Preempted", this, 0);
273 was_preempted_
= false;
276 return preemption_flag_
->IsSet();
279 bool GpuScheduler::HasMoreIdleWork() {
280 return (decoder_
&& decoder_
->HasMoreIdleWork());
283 void GpuScheduler::PerformIdleWork() {
286 decoder_
->PerformIdleWork();
289 void GpuScheduler::RescheduleTimeOut() {
290 int new_count
= unscheduled_count_
+ rescheduled_count_
;
292 rescheduled_count_
= 0;
294 while (unscheduled_count_
)
297 rescheduled_count_
= new_count
;
300 GpuScheduler::UnscheduleFence::UnscheduleFence(gfx::GLFence
* fence_
,
303 issue_time(base::Time::Now()),
307 GpuScheduler::UnscheduleFence::~UnscheduleFence() {