Roll src/third_party/WebKit 605a979:06cb9e9 (svn 202556:202558)
[chromium-blink-merge.git] / components / scheduler / child / task_queue_manager.cc
blobbf1e1565c3be775581bd9f9475a8375f85a2d3d1
1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "components/scheduler/child/task_queue_manager.h"
7 #include <queue>
8 #include <set>
10 #include "base/bind.h"
11 #include "base/time/default_tick_clock.h"
12 #include "components/scheduler/child/lazy_now.h"
13 #include "components/scheduler/child/nestable_single_thread_task_runner.h"
14 #include "components/scheduler/child/task_queue_impl.h"
15 #include "components/scheduler/child/task_queue_selector.h"
16 #include "components/scheduler/child/task_queue_sets.h"
18 namespace {
19 const int64_t kMaxTimeTicks = std::numeric_limits<int64>::max();
22 namespace scheduler {
24 TaskQueueManager::TaskQueueManager(
25 scoped_refptr<NestableSingleThreadTaskRunner> main_task_runner,
26 const char* disabled_by_default_tracing_category,
27 const char* disabled_by_default_verbose_tracing_category)
28 : main_task_runner_(main_task_runner),
29 task_was_run_on_quiescence_monitored_queue_(false),
30 pending_dowork_count_(0),
31 work_batch_size_(1),
32 time_source_(new base::DefaultTickClock),
33 disabled_by_default_tracing_category_(
34 disabled_by_default_tracing_category),
35 disabled_by_default_verbose_tracing_category_(
36 disabled_by_default_verbose_tracing_category),
37 deletion_sentinel_(new DeletionSentinel()),
38 weak_factory_(this) {
39 DCHECK(main_task_runner->RunsTasksOnCurrentThread());
40 TRACE_EVENT_OBJECT_CREATED_WITH_ID(disabled_by_default_tracing_category,
41 "TaskQueueManager", this);
42 selector_.SetTaskQueueSelectorObserver(this);
44 do_work_from_main_thread_closure_ =
45 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), true);
46 do_work_from_other_thread_closure_ =
47 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), false);
48 delayed_queue_wakeup_closure_ =
49 base::Bind(&TaskQueueManager::DelayedDoWork, weak_factory_.GetWeakPtr());
52 TaskQueueManager::~TaskQueueManager() {
53 TRACE_EVENT_OBJECT_DELETED_WITH_ID(disabled_by_default_tracing_category_,
54 "TaskQueueManager", this);
56 while (!queues_.empty())
57 (*queues_.begin())->UnregisterTaskQueue();
59 selector_.SetTaskQueueSelectorObserver(nullptr);
62 scoped_refptr<internal::TaskQueueImpl> TaskQueueManager::NewTaskQueue(
63 const TaskQueue::Spec& spec) {
64 TRACE_EVENT1(disabled_by_default_tracing_category_,
65 "TaskQueueManager::NewTaskQueue", "queue_name", spec.name);
66 DCHECK(main_thread_checker_.CalledOnValidThread());
67 scoped_refptr<internal::TaskQueueImpl> queue(
68 make_scoped_refptr(new internal::TaskQueueImpl(
69 this, spec, disabled_by_default_tracing_category_,
70 disabled_by_default_verbose_tracing_category_)));
71 queues_.insert(queue);
72 selector_.AddQueue(queue.get());
73 return queue;
76 void TaskQueueManager::UnregisterTaskQueue(
77 scoped_refptr<internal::TaskQueueImpl> task_queue) {
78 TRACE_EVENT1(disabled_by_default_tracing_category_,
79 "TaskQueueManager::UnregisterTaskQueue",
80 "queue_name", task_queue->GetName());
81 DCHECK(main_thread_checker_.CalledOnValidThread());
82 // Add |task_queue| to |queues_to_delete_| so we can prevent it from being
83 // freed while any of our structures hold hold a raw pointer to it.
84 queues_to_delete_.insert(task_queue);
85 queues_.erase(task_queue);
86 selector_.RemoveQueue(task_queue.get());
88 // We need to remove |task_queue| from delayed_wakeup_map_ which is a little
89 // awkward since it's keyed by time. O(n) running time.
90 for (DelayedWakeupMultimap::iterator iter = delayed_wakeup_map_.begin();
91 iter != delayed_wakeup_map_.end();) {
92 if (iter->second == task_queue.get()) {
93 DelayedWakeupMultimap::iterator temp = iter;
94 iter++;
95 // O(1) amortized.
96 delayed_wakeup_map_.erase(temp);
97 } else {
98 iter++;
102 // |newly_updatable_| might contain |task_queue|, we use
103 // MoveNewlyUpdatableQueuesIntoUpdatableQueueSet to flush it out.
104 MoveNewlyUpdatableQueuesIntoUpdatableQueueSet();
105 updatable_queue_set_.erase(task_queue.get());
108 base::TimeTicks TaskQueueManager::NextPendingDelayedTaskRunTime() {
109 DCHECK(main_thread_checker_.CalledOnValidThread());
110 bool found_pending_task = false;
111 base::TimeTicks next_pending_delayed_task(
112 base::TimeTicks::FromInternalValue(kMaxTimeTicks));
113 for (auto& queue : queues_) {
114 base::TimeTicks queues_next_pending_delayed_task;
115 if (queue->NextPendingDelayedTaskRunTime(
116 &queues_next_pending_delayed_task)) {
117 found_pending_task = true;
118 next_pending_delayed_task =
119 std::min(next_pending_delayed_task, queues_next_pending_delayed_task);
123 if (!found_pending_task)
124 return base::TimeTicks();
126 DCHECK_NE(next_pending_delayed_task,
127 base::TimeTicks::FromInternalValue(kMaxTimeTicks));
128 return next_pending_delayed_task;
131 void TaskQueueManager::RegisterAsUpdatableTaskQueue(
132 internal::TaskQueueImpl* queue) {
133 base::AutoLock lock(newly_updatable_lock_);
134 newly_updatable_.push_back(queue);
137 void TaskQueueManager::UnregisterAsUpdatableTaskQueue(
138 internal::TaskQueueImpl* queue) {
139 DCHECK(main_thread_checker_.CalledOnValidThread());
140 updatable_queue_set_.erase(queue);
143 void TaskQueueManager::MoveNewlyUpdatableQueuesIntoUpdatableQueueSet() {
144 base::AutoLock lock(newly_updatable_lock_);
145 while (!newly_updatable_.empty()) {
146 updatable_queue_set_.insert(newly_updatable_.back());
147 newly_updatable_.pop_back();
151 void TaskQueueManager::UpdateWorkQueues(
152 bool should_trigger_wakeup,
153 const internal::TaskQueueImpl::Task* previous_task) {
154 DCHECK(main_thread_checker_.CalledOnValidThread());
155 TRACE_EVENT0(disabled_by_default_tracing_category_,
156 "TaskQueueManager::UpdateWorkQueues");
157 internal::LazyNow lazy_now(this);
159 // Move any ready delayed tasks into the incomming queues.
160 WakeupReadyDelayedQueues(&lazy_now);
162 // Insert any newly updatable queues into the updatable_queue_set_.
164 base::AutoLock lock(newly_updatable_lock_);
165 while (!newly_updatable_.empty()) {
166 updatable_queue_set_.insert(newly_updatable_.back());
167 newly_updatable_.pop_back();
171 auto iter = updatable_queue_set_.begin();
172 while (iter != updatable_queue_set_.end()) {
173 internal::TaskQueueImpl* queue = *iter++;
174 // NOTE Update work queue may erase itself from |updatable_queue_set_|.
175 // This is fine, erasing an element won't invalidate any interator, as long
176 // as the iterator isn't the element being delated.
177 if (queue->work_queue().empty())
178 queue->UpdateWorkQueue(&lazy_now, should_trigger_wakeup, previous_task);
182 void TaskQueueManager::ScheduleDelayedWorkTask(
183 scoped_refptr<internal::TaskQueueImpl> queue,
184 base::TimeTicks delayed_run_time) {
185 internal::LazyNow lazy_now(this);
186 ScheduleDelayedWork(queue.get(), delayed_run_time, &lazy_now);
189 void TaskQueueManager::ScheduleDelayedWork(internal::TaskQueueImpl* queue,
190 base::TimeTicks delayed_run_time,
191 internal::LazyNow* lazy_now) {
192 if (!main_task_runner_->BelongsToCurrentThread()) {
193 // NOTE posting a delayed task from a different thread is not expected to be
194 // common. This pathway is less optimal than perhaps it could be because
195 // it causes two main thread tasks to be run. Should this assumption prove
196 // to be false in future, we may need to revisit this.
197 main_task_runner_->PostTask(
198 FROM_HERE, base::Bind(&TaskQueueManager::ScheduleDelayedWorkTask,
199 weak_factory_.GetWeakPtr(),
200 scoped_refptr<internal::TaskQueueImpl>(queue),
201 delayed_run_time));
202 return;
204 if (delayed_run_time > lazy_now->Now()) {
205 // Make sure there's one (and only one) task posted to |main_task_runner_|
206 // to call |DelayedDoWork| at |delayed_run_time|.
207 if (delayed_wakeup_map_.find(delayed_run_time) ==
208 delayed_wakeup_map_.end()) {
209 base::TimeDelta delay = delayed_run_time - lazy_now->Now();
210 main_task_runner_->PostDelayedTask(FROM_HERE,
211 delayed_queue_wakeup_closure_, delay);
213 delayed_wakeup_map_.insert(std::make_pair(delayed_run_time, queue));
214 } else {
215 WakeupReadyDelayedQueues(lazy_now);
219 void TaskQueueManager::DelayedDoWork() {
220 DCHECK(main_thread_checker_.CalledOnValidThread());
223 internal::LazyNow lazy_now(this);
224 WakeupReadyDelayedQueues(&lazy_now);
227 DoWork(false);
230 void TaskQueueManager::WakeupReadyDelayedQueues(internal::LazyNow* lazy_now) {
231 // Wake up any queues with pending delayed work. Note std::multipmap stores
232 // the elements sorted by key, so the begin() iterator points to the earliest
233 // queue to wakeup.
234 std::set<internal::TaskQueueImpl*> dedup_set;
235 while (!delayed_wakeup_map_.empty()) {
236 DelayedWakeupMultimap::iterator next_wakeup = delayed_wakeup_map_.begin();
237 if (next_wakeup->first > lazy_now->Now())
238 break;
239 // A queue could have any number of delayed tasks pending so it's worthwhile
240 // deduping calls to MoveReadyDelayedTasksToIncomingQueue since it takes a
241 // lock. NOTE the order in which these are called matters since the order
242 // in which EnqueueTaskLocks is called is respected when choosing which
243 // queue to execute a task from.
244 if (dedup_set.insert(next_wakeup->second).second)
245 next_wakeup->second->MoveReadyDelayedTasksToIncomingQueue(lazy_now);
246 delayed_wakeup_map_.erase(next_wakeup);
250 void TaskQueueManager::MaybePostDoWorkOnMainRunner() {
251 bool on_main_thread = main_task_runner_->BelongsToCurrentThread();
252 if (on_main_thread) {
253 // We only want one pending DoWork posted from the main thread, or we risk
254 // an explosion of pending DoWorks which could starve out everything else.
255 if (pending_dowork_count_ > 0) {
256 return;
258 pending_dowork_count_++;
259 main_task_runner_->PostTask(FROM_HERE, do_work_from_main_thread_closure_);
260 } else {
261 main_task_runner_->PostTask(FROM_HERE, do_work_from_other_thread_closure_);
265 void TaskQueueManager::DoWork(bool decrement_pending_dowork_count) {
266 if (decrement_pending_dowork_count) {
267 pending_dowork_count_--;
268 DCHECK_GE(pending_dowork_count_, 0);
270 DCHECK(main_thread_checker_.CalledOnValidThread());
272 queues_to_delete_.clear();
274 // Pass false and nullptr to UpdateWorkQueues here to prevent waking up a
275 // pump-after-wakeup queue.
276 UpdateWorkQueues(false, nullptr);
278 internal::TaskQueueImpl::Task previous_task;
279 for (int i = 0; i < work_batch_size_; i++) {
280 internal::TaskQueueImpl* queue;
281 if (!SelectQueueToService(&queue))
282 break;
284 switch (ProcessTaskFromWorkQueue(queue, &previous_task)) {
285 case ProcessTaskResult::DEFERRED:
286 // If a task was deferred, try again with another task. Note that this
287 // means deferred tasks (i.e. non-nestable tasks) will never trigger
288 // queue wake-ups.
289 continue;
290 case ProcessTaskResult::EXECUTED:
291 break;
292 case ProcessTaskResult::TASK_QUEUE_MANAGER_DELETED:
293 return; // The TaskQueueManager got deleted, we must bail out.
295 bool should_trigger_wakeup = queue->wakeup_policy() ==
296 TaskQueue::WakeupPolicy::CAN_WAKE_OTHER_QUEUES;
297 UpdateWorkQueues(should_trigger_wakeup, &previous_task);
299 // Only run a single task per batch in nested run loops so that we can
300 // properly exit the nested loop when someone calls RunLoop::Quit().
301 if (main_task_runner_->IsNested())
302 break;
305 // TODO(alexclarke): Consider refactoring the above loop to terminate only
306 // when there's no more work left to be done, rather than posting a
307 // continuation task.
308 if (!selector_.EnabledWorkQueuesEmpty())
309 MaybePostDoWorkOnMainRunner();
312 bool TaskQueueManager::SelectQueueToService(
313 internal::TaskQueueImpl** out_queue) {
314 bool should_run = selector_.SelectQueueToService(out_queue);
315 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
316 disabled_by_default_tracing_category_, "TaskQueueManager", this,
317 AsValueWithSelectorResult(should_run, *out_queue));
318 return should_run;
321 void TaskQueueManager::DidQueueTask(
322 const internal::TaskQueueImpl::Task& pending_task) {
323 task_annotator_.DidQueueTask("TaskQueueManager::PostTask", pending_task);
326 TaskQueueManager::ProcessTaskResult TaskQueueManager::ProcessTaskFromWorkQueue(
327 internal::TaskQueueImpl* queue,
328 internal::TaskQueueImpl::Task* out_previous_task) {
329 DCHECK(main_thread_checker_.CalledOnValidThread());
330 scoped_refptr<DeletionSentinel> protect(deletion_sentinel_);
331 // TODO(alexclarke): consider std::move() when allowed.
332 internal::TaskQueueImpl::Task pending_task = queue->TakeTaskFromWorkQueue();
334 if (queue->GetQuiescenceMonitored())
335 task_was_run_on_quiescence_monitored_queue_ = true;
337 if (!pending_task.nestable && main_task_runner_->IsNested()) {
338 // Defer non-nestable work to the main task runner. NOTE these tasks can be
339 // arbitrarily delayed so the additional delay should not be a problem.
340 // TODO(skyostil): Figure out a way to not forget which task queue the
341 // task is associated with. See http://crbug.com/522843.
342 main_task_runner_->PostNonNestableTask(pending_task.posted_from,
343 pending_task.task);
344 return ProcessTaskResult::DEFERRED;
347 TRACE_TASK_EXECUTION("TaskQueueManager::ProcessTaskFromWorkQueue",
348 pending_task);
349 if (queue->GetShouldNotifyObservers()) {
350 FOR_EACH_OBSERVER(base::MessageLoop::TaskObserver, task_observers_,
351 WillProcessTask(pending_task));
352 queue->NotifyWillProcessTask(pending_task);
354 TRACE_EVENT1(disabled_by_default_tracing_category_,
355 "Run Task From Queue", "queue", queue->GetName());
356 task_annotator_.RunTask("TaskQueueManager::PostTask", pending_task);
358 // Detect if the TaskQueueManager just got deleted. If this happens we must
359 // not access any member variables after this point.
360 if (protect->HasOneRef())
361 return ProcessTaskResult::TASK_QUEUE_MANAGER_DELETED;
363 if (queue->GetShouldNotifyObservers()) {
364 FOR_EACH_OBSERVER(base::MessageLoop::TaskObserver, task_observers_,
365 DidProcessTask(pending_task));
366 queue->NotifyDidProcessTask(pending_task);
369 pending_task.task.Reset();
370 *out_previous_task = pending_task;
371 return ProcessTaskResult::EXECUTED;
374 bool TaskQueueManager::RunsTasksOnCurrentThread() const {
375 return main_task_runner_->RunsTasksOnCurrentThread();
378 bool TaskQueueManager::PostDelayedTask(
379 const tracked_objects::Location& from_here,
380 const base::Closure& task,
381 base::TimeDelta delay) {
382 DCHECK_GE(delay, base::TimeDelta());
383 return main_task_runner_->PostDelayedTask(from_here, task, delay);
386 void TaskQueueManager::SetWorkBatchSize(int work_batch_size) {
387 DCHECK(main_thread_checker_.CalledOnValidThread());
388 DCHECK_GE(work_batch_size, 1);
389 work_batch_size_ = work_batch_size;
392 void TaskQueueManager::AddTaskObserver(
393 base::MessageLoop::TaskObserver* task_observer) {
394 DCHECK(main_thread_checker_.CalledOnValidThread());
395 task_observers_.AddObserver(task_observer);
398 void TaskQueueManager::RemoveTaskObserver(
399 base::MessageLoop::TaskObserver* task_observer) {
400 DCHECK(main_thread_checker_.CalledOnValidThread());
401 task_observers_.RemoveObserver(task_observer);
404 void TaskQueueManager::SetTimeSourceForTesting(
405 scoped_ptr<base::TickClock> time_source) {
406 DCHECK(main_thread_checker_.CalledOnValidThread());
407 time_source_ = time_source.Pass();
410 bool TaskQueueManager::GetAndClearSystemIsQuiescentBit() {
411 bool task_was_run = task_was_run_on_quiescence_monitored_queue_;
412 task_was_run_on_quiescence_monitored_queue_ = false;
413 return !task_was_run;
416 base::TimeTicks TaskQueueManager::Now() const {
417 return time_source_->NowTicks();
420 int TaskQueueManager::GetNextSequenceNumber() {
421 return task_sequence_num_.GetNext();
424 scoped_refptr<base::trace_event::ConvertableToTraceFormat>
425 TaskQueueManager::AsValueWithSelectorResult(
426 bool should_run,
427 internal::TaskQueueImpl* selected_queue) const {
428 DCHECK(main_thread_checker_.CalledOnValidThread());
429 scoped_refptr<base::trace_event::TracedValue> state =
430 new base::trace_event::TracedValue();
431 state->BeginArray("queues");
432 for (auto& queue : queues_)
433 queue->AsValueInto(state.get());
434 state->EndArray();
435 state->BeginDictionary("selector");
436 selector_.AsValueInto(state.get());
437 state->EndDictionary();
438 if (should_run)
439 state->SetString("selected_queue", selected_queue->GetName());
441 state->BeginArray("updatable_queue_set");
442 for (auto& queue : updatable_queue_set_)
443 state->AppendString(queue->GetName());
444 state->EndArray();
445 return state;
448 void TaskQueueManager::OnTaskQueueEnabled(internal::TaskQueueImpl* queue) {
449 DCHECK(main_thread_checker_.CalledOnValidThread());
450 // Only schedule DoWork if there's something to do.
451 if (!queue->work_queue().empty())
452 MaybePostDoWorkOnMainRunner();
455 } // namespace scheduler