1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "components/scheduler/child/task_queue_manager.h"
10 #include "base/bind.h"
11 #include "base/time/default_tick_clock.h"
12 #include "components/scheduler/child/lazy_now.h"
13 #include "components/scheduler/child/nestable_single_thread_task_runner.h"
14 #include "components/scheduler/child/task_queue_impl.h"
15 #include "components/scheduler/child/task_queue_selector.h"
16 #include "components/scheduler/child/task_queue_sets.h"
19 const int64_t kMaxTimeTicks
= std::numeric_limits
<int64
>::max();
24 TaskQueueManager::TaskQueueManager(
25 scoped_refptr
<NestableSingleThreadTaskRunner
> main_task_runner
,
26 const char* disabled_by_default_tracing_category
,
27 const char* disabled_by_default_verbose_tracing_category
)
28 : main_task_runner_(main_task_runner
),
29 task_was_run_on_quiescence_monitored_queue_(false),
30 pending_dowork_count_(0),
32 time_source_(new base::DefaultTickClock
),
33 disabled_by_default_tracing_category_(
34 disabled_by_default_tracing_category
),
35 disabled_by_default_verbose_tracing_category_(
36 disabled_by_default_verbose_tracing_category
),
37 deletion_sentinel_(new DeletionSentinel()),
39 DCHECK(main_task_runner
->RunsTasksOnCurrentThread());
40 TRACE_EVENT_OBJECT_CREATED_WITH_ID(disabled_by_default_tracing_category
,
41 "TaskQueueManager", this);
42 selector_
.SetTaskQueueSelectorObserver(this);
44 do_work_from_main_thread_closure_
=
45 base::Bind(&TaskQueueManager::DoWork
, weak_factory_
.GetWeakPtr(), true);
46 do_work_from_other_thread_closure_
=
47 base::Bind(&TaskQueueManager::DoWork
, weak_factory_
.GetWeakPtr(), false);
48 delayed_queue_wakeup_closure_
=
49 base::Bind(&TaskQueueManager::DelayedDoWork
, weak_factory_
.GetWeakPtr());
52 TaskQueueManager::~TaskQueueManager() {
53 TRACE_EVENT_OBJECT_DELETED_WITH_ID(disabled_by_default_tracing_category_
,
54 "TaskQueueManager", this);
56 while (!queues_
.empty())
57 (*queues_
.begin())->UnregisterTaskQueue();
59 selector_
.SetTaskQueueSelectorObserver(nullptr);
62 scoped_refptr
<internal::TaskQueueImpl
> TaskQueueManager::NewTaskQueue(
63 const TaskQueue::Spec
& spec
) {
64 TRACE_EVENT1(disabled_by_default_tracing_category_
,
65 "TaskQueueManager::NewTaskQueue", "queue_name", spec
.name
);
66 DCHECK(main_thread_checker_
.CalledOnValidThread());
67 scoped_refptr
<internal::TaskQueueImpl
> queue(
68 make_scoped_refptr(new internal::TaskQueueImpl(
69 this, spec
, disabled_by_default_tracing_category_
,
70 disabled_by_default_verbose_tracing_category_
)));
71 queues_
.insert(queue
);
72 selector_
.AddQueue(queue
.get());
76 void TaskQueueManager::UnregisterTaskQueue(
77 scoped_refptr
<internal::TaskQueueImpl
> task_queue
) {
78 TRACE_EVENT1(disabled_by_default_tracing_category_
,
79 "TaskQueueManager::UnregisterTaskQueue",
80 "queue_name", task_queue
->GetName());
81 DCHECK(main_thread_checker_
.CalledOnValidThread());
82 // Add |task_queue| to |queues_to_delete_| so we can prevent it from being
83 // freed while any of our structures hold hold a raw pointer to it.
84 queues_to_delete_
.insert(task_queue
);
85 queues_
.erase(task_queue
);
86 selector_
.RemoveQueue(task_queue
.get());
88 // We need to remove |task_queue| from delayed_wakeup_map_ which is a little
89 // awkward since it's keyed by time. O(n) running time.
90 for (DelayedWakeupMultimap::iterator iter
= delayed_wakeup_map_
.begin();
91 iter
!= delayed_wakeup_map_
.end();) {
92 if (iter
->second
== task_queue
.get()) {
93 DelayedWakeupMultimap::iterator temp
= iter
;
96 delayed_wakeup_map_
.erase(temp
);
102 // |newly_updatable_| might contain |task_queue|, we use
103 // MoveNewlyUpdatableQueuesIntoUpdatableQueueSet to flush it out.
104 MoveNewlyUpdatableQueuesIntoUpdatableQueueSet();
105 updatable_queue_set_
.erase(task_queue
.get());
108 base::TimeTicks
TaskQueueManager::NextPendingDelayedTaskRunTime() {
109 DCHECK(main_thread_checker_
.CalledOnValidThread());
110 bool found_pending_task
= false;
111 base::TimeTicks
next_pending_delayed_task(
112 base::TimeTicks::FromInternalValue(kMaxTimeTicks
));
113 for (auto& queue
: queues_
) {
114 base::TimeTicks queues_next_pending_delayed_task
;
115 if (queue
->NextPendingDelayedTaskRunTime(
116 &queues_next_pending_delayed_task
)) {
117 found_pending_task
= true;
118 next_pending_delayed_task
=
119 std::min(next_pending_delayed_task
, queues_next_pending_delayed_task
);
123 if (!found_pending_task
)
124 return base::TimeTicks();
126 DCHECK_NE(next_pending_delayed_task
,
127 base::TimeTicks::FromInternalValue(kMaxTimeTicks
));
128 return next_pending_delayed_task
;
131 void TaskQueueManager::RegisterAsUpdatableTaskQueue(
132 internal::TaskQueueImpl
* queue
) {
133 base::AutoLock
lock(newly_updatable_lock_
);
134 newly_updatable_
.push_back(queue
);
137 void TaskQueueManager::UnregisterAsUpdatableTaskQueue(
138 internal::TaskQueueImpl
* queue
) {
139 DCHECK(main_thread_checker_
.CalledOnValidThread());
140 updatable_queue_set_
.erase(queue
);
143 void TaskQueueManager::MoveNewlyUpdatableQueuesIntoUpdatableQueueSet() {
144 base::AutoLock
lock(newly_updatable_lock_
);
145 while (!newly_updatable_
.empty()) {
146 updatable_queue_set_
.insert(newly_updatable_
.back());
147 newly_updatable_
.pop_back();
151 void TaskQueueManager::UpdateWorkQueues(
152 bool should_trigger_wakeup
,
153 const internal::TaskQueueImpl::Task
* previous_task
) {
154 DCHECK(main_thread_checker_
.CalledOnValidThread());
155 TRACE_EVENT0(disabled_by_default_tracing_category_
,
156 "TaskQueueManager::UpdateWorkQueues");
157 internal::LazyNow
lazy_now(this);
159 // Move any ready delayed tasks into the incomming queues.
160 WakeupReadyDelayedQueues(&lazy_now
);
162 // Insert any newly updatable queues into the updatable_queue_set_.
164 base::AutoLock
lock(newly_updatable_lock_
);
165 while (!newly_updatable_
.empty()) {
166 updatable_queue_set_
.insert(newly_updatable_
.back());
167 newly_updatable_
.pop_back();
171 auto iter
= updatable_queue_set_
.begin();
172 while (iter
!= updatable_queue_set_
.end()) {
173 internal::TaskQueueImpl
* queue
= *iter
++;
174 // NOTE Update work queue may erase itself from |updatable_queue_set_|.
175 // This is fine, erasing an element won't invalidate any interator, as long
176 // as the iterator isn't the element being delated.
177 if (queue
->work_queue().empty())
178 queue
->UpdateWorkQueue(&lazy_now
, should_trigger_wakeup
, previous_task
);
182 void TaskQueueManager::ScheduleDelayedWorkTask(
183 scoped_refptr
<internal::TaskQueueImpl
> queue
,
184 base::TimeTicks delayed_run_time
) {
185 internal::LazyNow
lazy_now(this);
186 ScheduleDelayedWork(queue
.get(), delayed_run_time
, &lazy_now
);
189 void TaskQueueManager::ScheduleDelayedWork(internal::TaskQueueImpl
* queue
,
190 base::TimeTicks delayed_run_time
,
191 internal::LazyNow
* lazy_now
) {
192 if (!main_task_runner_
->BelongsToCurrentThread()) {
193 // NOTE posting a delayed task from a different thread is not expected to be
194 // common. This pathway is less optimal than perhaps it could be because
195 // it causes two main thread tasks to be run. Should this assumption prove
196 // to be false in future, we may need to revisit this.
197 main_task_runner_
->PostTask(
198 FROM_HERE
, base::Bind(&TaskQueueManager::ScheduleDelayedWorkTask
,
199 weak_factory_
.GetWeakPtr(),
200 scoped_refptr
<internal::TaskQueueImpl
>(queue
),
204 if (delayed_run_time
> lazy_now
->Now()) {
205 // Make sure there's one (and only one) task posted to |main_task_runner_|
206 // to call |DelayedDoWork| at |delayed_run_time|.
207 if (delayed_wakeup_map_
.find(delayed_run_time
) ==
208 delayed_wakeup_map_
.end()) {
209 base::TimeDelta delay
= delayed_run_time
- lazy_now
->Now();
210 main_task_runner_
->PostDelayedTask(FROM_HERE
,
211 delayed_queue_wakeup_closure_
, delay
);
213 delayed_wakeup_map_
.insert(std::make_pair(delayed_run_time
, queue
));
215 WakeupReadyDelayedQueues(lazy_now
);
219 void TaskQueueManager::DelayedDoWork() {
220 DCHECK(main_thread_checker_
.CalledOnValidThread());
223 internal::LazyNow
lazy_now(this);
224 WakeupReadyDelayedQueues(&lazy_now
);
230 void TaskQueueManager::WakeupReadyDelayedQueues(internal::LazyNow
* lazy_now
) {
231 // Wake up any queues with pending delayed work. Note std::multipmap stores
232 // the elements sorted by key, so the begin() iterator points to the earliest
234 std::set
<internal::TaskQueueImpl
*> dedup_set
;
235 while (!delayed_wakeup_map_
.empty()) {
236 DelayedWakeupMultimap::iterator next_wakeup
= delayed_wakeup_map_
.begin();
237 if (next_wakeup
->first
> lazy_now
->Now())
239 // A queue could have any number of delayed tasks pending so it's worthwhile
240 // deduping calls to MoveReadyDelayedTasksToIncomingQueue since it takes a
241 // lock. NOTE the order in which these are called matters since the order
242 // in which EnqueueTaskLocks is called is respected when choosing which
243 // queue to execute a task from.
244 if (dedup_set
.insert(next_wakeup
->second
).second
)
245 next_wakeup
->second
->MoveReadyDelayedTasksToIncomingQueue(lazy_now
);
246 delayed_wakeup_map_
.erase(next_wakeup
);
250 void TaskQueueManager::MaybePostDoWorkOnMainRunner() {
251 bool on_main_thread
= main_task_runner_
->BelongsToCurrentThread();
252 if (on_main_thread
) {
253 // We only want one pending DoWork posted from the main thread, or we risk
254 // an explosion of pending DoWorks which could starve out everything else.
255 if (pending_dowork_count_
> 0) {
258 pending_dowork_count_
++;
259 main_task_runner_
->PostTask(FROM_HERE
, do_work_from_main_thread_closure_
);
261 main_task_runner_
->PostTask(FROM_HERE
, do_work_from_other_thread_closure_
);
265 void TaskQueueManager::DoWork(bool decrement_pending_dowork_count
) {
266 if (decrement_pending_dowork_count
) {
267 pending_dowork_count_
--;
268 DCHECK_GE(pending_dowork_count_
, 0);
270 DCHECK(main_thread_checker_
.CalledOnValidThread());
272 queues_to_delete_
.clear();
274 // Pass false and nullptr to UpdateWorkQueues here to prevent waking up a
275 // pump-after-wakeup queue.
276 UpdateWorkQueues(false, nullptr);
278 internal::TaskQueueImpl::Task previous_task
;
279 for (int i
= 0; i
< work_batch_size_
; i
++) {
280 internal::TaskQueueImpl
* queue
;
281 if (!SelectQueueToService(&queue
))
284 if (ProcessTaskFromWorkQueue(queue
, &previous_task
))
285 return; // The TaskQueueManager got deleted, we must bail out.
287 bool should_trigger_wakeup
= queue
->wakeup_policy() ==
288 TaskQueue::WakeupPolicy::CAN_WAKE_OTHER_QUEUES
;
289 UpdateWorkQueues(should_trigger_wakeup
, &previous_task
);
291 // Only run a single task per batch in nested run loops so that we can
292 // properly exit the nested loop when someone calls RunLoop::Quit().
293 if (main_task_runner_
->IsNested())
297 // TODO(alexclarke): Consider refactoring the above loop to terminate only
298 // when there's no more work left to be done, rather than posting a
299 // continuation task.
300 if (!selector_
.EnabledWorkQueuesEmpty())
301 MaybePostDoWorkOnMainRunner();
304 bool TaskQueueManager::SelectQueueToService(
305 internal::TaskQueueImpl
** out_queue
) {
306 bool should_run
= selector_
.SelectQueueToService(out_queue
);
307 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
308 disabled_by_default_tracing_category_
, "TaskQueueManager", this,
309 AsValueWithSelectorResult(should_run
, *out_queue
));
313 void TaskQueueManager::DidQueueTask(
314 const internal::TaskQueueImpl::Task
& pending_task
) {
315 task_annotator_
.DidQueueTask("TaskQueueManager::PostTask", pending_task
);
318 bool TaskQueueManager::ProcessTaskFromWorkQueue(
319 internal::TaskQueueImpl
* queue
,
320 internal::TaskQueueImpl::Task
* out_previous_task
) {
321 DCHECK(main_thread_checker_
.CalledOnValidThread());
322 scoped_refptr
<DeletionSentinel
> protect(deletion_sentinel_
);
323 // TODO(alexclarke): consider std::move() when allowed.
324 internal::TaskQueueImpl::Task pending_task
= queue
->TakeTaskFromWorkQueue();
326 if (queue
->GetQuiescenceMonitored())
327 task_was_run_on_quiescence_monitored_queue_
= true;
329 if (!pending_task
.nestable
&& main_task_runner_
->IsNested()) {
330 // Defer non-nestable work to the main task runner. NOTE these tasks can be
331 // arbitrarily delayed so the additional delay should not be a problem.
332 // TODO(skyostil): Figure out a way to not forget which task queue the
333 // task is associated with. See http://crbug.com/522843.
334 main_task_runner_
->PostNonNestableTask(pending_task
.posted_from
,
337 TRACE_TASK_EXECUTION("TaskQueueManager::ProcessTaskFromWorkQueue",
339 if (queue
->GetShouldNotifyObservers()) {
340 FOR_EACH_OBSERVER(base::MessageLoop::TaskObserver
, task_observers_
,
341 WillProcessTask(pending_task
));
342 queue
->NotifyWillProcessTask(pending_task
);
344 TRACE_EVENT1(disabled_by_default_tracing_category_
,
345 "Run Task From Queue", "queue", queue
->GetName());
346 task_annotator_
.RunTask("TaskQueueManager::PostTask", pending_task
);
348 // Detect if the TaskQueueManager just got deleted. If this happens we must
349 // not access any member variables after this point.
350 if (protect
->HasOneRef())
353 if (queue
->GetShouldNotifyObservers()) {
354 FOR_EACH_OBSERVER(base::MessageLoop::TaskObserver
, task_observers_
,
355 DidProcessTask(pending_task
));
356 queue
->NotifyDidProcessTask(pending_task
);
359 pending_task
.task
.Reset();
360 *out_previous_task
= pending_task
;
365 bool TaskQueueManager::RunsTasksOnCurrentThread() const {
366 return main_task_runner_
->RunsTasksOnCurrentThread();
369 bool TaskQueueManager::PostDelayedTask(
370 const tracked_objects::Location
& from_here
,
371 const base::Closure
& task
,
372 base::TimeDelta delay
) {
373 DCHECK_GE(delay
, base::TimeDelta());
374 return main_task_runner_
->PostDelayedTask(from_here
, task
, delay
);
377 void TaskQueueManager::SetWorkBatchSize(int work_batch_size
) {
378 DCHECK(main_thread_checker_
.CalledOnValidThread());
379 DCHECK_GE(work_batch_size
, 1);
380 work_batch_size_
= work_batch_size
;
383 void TaskQueueManager::AddTaskObserver(
384 base::MessageLoop::TaskObserver
* task_observer
) {
385 DCHECK(main_thread_checker_
.CalledOnValidThread());
386 task_observers_
.AddObserver(task_observer
);
389 void TaskQueueManager::RemoveTaskObserver(
390 base::MessageLoop::TaskObserver
* task_observer
) {
391 DCHECK(main_thread_checker_
.CalledOnValidThread());
392 task_observers_
.RemoveObserver(task_observer
);
395 void TaskQueueManager::SetTimeSourceForTesting(
396 scoped_ptr
<base::TickClock
> time_source
) {
397 DCHECK(main_thread_checker_
.CalledOnValidThread());
398 time_source_
= time_source
.Pass();
401 bool TaskQueueManager::GetAndClearSystemIsQuiescentBit() {
402 bool task_was_run
= task_was_run_on_quiescence_monitored_queue_
;
403 task_was_run_on_quiescence_monitored_queue_
= false;
404 return !task_was_run
;
407 base::TimeTicks
TaskQueueManager::Now() const {
408 return time_source_
->NowTicks();
411 int TaskQueueManager::GetNextSequenceNumber() {
412 return task_sequence_num_
.GetNext();
415 scoped_refptr
<base::trace_event::ConvertableToTraceFormat
>
416 TaskQueueManager::AsValueWithSelectorResult(
418 internal::TaskQueueImpl
* selected_queue
) const {
419 DCHECK(main_thread_checker_
.CalledOnValidThread());
420 scoped_refptr
<base::trace_event::TracedValue
> state
=
421 new base::trace_event::TracedValue();
422 state
->BeginArray("queues");
423 for (auto& queue
: queues_
)
424 queue
->AsValueInto(state
.get());
426 state
->BeginDictionary("selector");
427 selector_
.AsValueInto(state
.get());
428 state
->EndDictionary();
430 state
->SetString("selected_queue", selected_queue
->GetName());
432 state
->BeginArray("updatable_queue_set");
433 for (auto& queue
: updatable_queue_set_
)
434 state
->AppendString(queue
->GetName());
439 void TaskQueueManager::OnTaskQueueEnabled(internal::TaskQueueImpl
* queue
) {
440 DCHECK(main_thread_checker_
.CalledOnValidThread());
441 // Only schedule DoWork if there's something to do.
442 if (!queue
->work_queue().empty())
443 MaybePostDoWorkOnMainRunner();
446 } // namespace scheduler