[Media Router] Add integration tests and e2e tests for media router and presentation...
[chromium-blink-merge.git] / components / scheduler / child / task_queue_manager.cc
blob71b2d86e3ddc8e98d27c8ab39cc180e83618cd30
1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "components/scheduler/child/task_queue_manager.h"
7 #include <queue>
8 #include <set>
10 #include "base/bind.h"
11 #include "base/time/default_tick_clock.h"
12 #include "base/trace_event/trace_event.h"
13 #include "base/trace_event/trace_event_argument.h"
14 #include "components/scheduler/child/nestable_single_thread_task_runner.h"
15 #include "components/scheduler/child/task_queue_selector.h"
17 namespace {
18 const int64_t kMaxTimeTicks = std::numeric_limits<int64>::max();
21 namespace scheduler {
22 namespace internal {
24 // Now() is somewhat expensive so it makes sense not to call Now() unless we
25 // really need to.
26 class LazyNow {
27 public:
28 explicit LazyNow(base::TimeTicks now)
29 : task_queue_manager_(nullptr), now_(now) {
30 DCHECK(!now.is_null());
33 explicit LazyNow(TaskQueueManager* task_queue_manager)
34 : task_queue_manager_(task_queue_manager) {}
36 base::TimeTicks Now() {
37 if (now_.is_null())
38 now_ = task_queue_manager_->Now();
39 return now_;
42 private:
43 TaskQueueManager* task_queue_manager_; // NOT OWNED
44 base::TimeTicks now_;
47 class TaskQueue : public base::SingleThreadTaskRunner {
48 public:
49 TaskQueue(TaskQueueManager* task_queue_manager,
50 const char* disabled_by_default_tracing_category,
51 const char* disabled_by_default_verbose_tracing_category);
53 // base::SingleThreadTaskRunner implementation.
54 bool RunsTasksOnCurrentThread() const override;
55 bool PostDelayedTask(const tracked_objects::Location& from_here,
56 const base::Closure& task,
57 base::TimeDelta delay) override {
58 return PostDelayedTaskImpl(from_here, task, delay, TaskType::NORMAL);
61 bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
62 const base::Closure& task,
63 base::TimeDelta delay) override {
64 return PostDelayedTaskImpl(from_here, task, delay, TaskType::NON_NESTABLE);
67 TaskQueueManager::QueueState GetQueueState() const;
69 void SetPumpPolicy(TaskQueueManager::PumpPolicy pump_policy);
70 void PumpQueue();
72 bool NextPendingDelayedTaskRunTime(
73 base::TimeTicks* next_pending_delayed_task);
75 bool UpdateWorkQueue(LazyNow* lazy_now,
76 bool should_trigger_wakeup,
77 const base::PendingTask* previous_task);
78 base::PendingTask TakeTaskFromWorkQueue();
80 void WillDeleteTaskQueueManager();
82 base::TaskQueue& work_queue() { return work_queue_; }
84 void set_name(const char* name) { name_ = name; }
86 TaskQueueManager::WakeupPolicy wakeup_policy() const {
87 DCHECK(main_thread_checker_.CalledOnValidThread());
88 return wakeup_policy_;
91 void set_wakeup_policy(TaskQueueManager::WakeupPolicy wakeup_policy) {
92 DCHECK(main_thread_checker_.CalledOnValidThread());
93 wakeup_policy_ = wakeup_policy;
96 void AsValueInto(base::trace_event::TracedValue* state) const;
98 private:
99 enum class TaskType {
100 NORMAL,
101 NON_NESTABLE,
104 ~TaskQueue() override;
106 bool PostDelayedTaskImpl(const tracked_objects::Location& from_here,
107 const base::Closure& task,
108 base::TimeDelta delay,
109 TaskType task_type);
111 // Delayed task posted to the underlying run loop, which locks |lock_| and
112 // calls MoveReadyDelayedTasksToIncomingQueueLocked to process dealyed tasks
113 // that need to be run now.
114 void MoveReadyDelayedTasksToIncomingQueue();
116 // Enqueues any delayed tasks which should be run now on the incoming_queue_
117 // and calls ScheduleDelayedWorkLocked to ensure future tasks are scheduled.
118 // Must be called with |lock_| locked.
119 void MoveReadyDelayedTasksToIncomingQueueLocked(LazyNow* lazy_now);
121 // Posts MoveReadyDelayedTasksToIncomingQueue if there isn't already a task
122 // posted on the underlying runloop for the next task's scheduled run time.
123 void ScheduleDelayedWorkLocked(LazyNow* lazy_now);
125 void PumpQueueLocked();
126 bool TaskIsOlderThanQueuedTasks(const base::PendingTask* task);
127 bool ShouldAutoPumpQueueLocked(bool should_trigger_wakeup,
128 const base::PendingTask* previous_task);
129 void EnqueueTaskLocked(const base::PendingTask& pending_task);
131 void TraceQueueSize(bool is_locked) const;
132 static void QueueAsValueInto(const base::TaskQueue& queue,
133 base::trace_event::TracedValue* state);
134 static void QueueAsValueInto(const base::DelayedTaskQueue& queue,
135 base::trace_event::TracedValue* state);
136 static void TaskAsValueInto(const base::PendingTask& task,
137 base::trace_event::TracedValue* state);
139 // This lock protects all members except the work queue, the
140 // main_thread_checker_ and wakeup_policy_.
141 mutable base::Lock lock_;
142 base::PlatformThreadId thread_id_;
143 TaskQueueManager* task_queue_manager_;
144 base::TaskQueue incoming_queue_;
145 TaskQueueManager::PumpPolicy pump_policy_;
146 const char* name_;
147 const char* disabled_by_default_tracing_category_;
148 const char* disabled_by_default_verbose_tracing_category_;
149 base::DelayedTaskQueue delayed_task_queue_;
150 std::set<base::TimeTicks> in_flight_kick_delayed_tasks_;
152 base::ThreadChecker main_thread_checker_;
153 base::TaskQueue work_queue_;
154 TaskQueueManager::WakeupPolicy wakeup_policy_;
156 DISALLOW_COPY_AND_ASSIGN(TaskQueue);
159 TaskQueue::TaskQueue(TaskQueueManager* task_queue_manager,
160 const char* disabled_by_default_tracing_category,
161 const char* disabled_by_default_verbose_tracing_category)
162 : thread_id_(base::PlatformThread::CurrentId()),
163 task_queue_manager_(task_queue_manager),
164 pump_policy_(TaskQueueManager::PumpPolicy::AUTO),
165 name_(nullptr),
166 disabled_by_default_tracing_category_(
167 disabled_by_default_tracing_category),
168 disabled_by_default_verbose_tracing_category_(
169 disabled_by_default_verbose_tracing_category),
170 wakeup_policy_(TaskQueueManager::WakeupPolicy::CAN_WAKE_OTHER_QUEUES) {
173 TaskQueue::~TaskQueue() {
176 void TaskQueue::WillDeleteTaskQueueManager() {
177 base::AutoLock lock(lock_);
178 task_queue_manager_ = nullptr;
179 delayed_task_queue_ = base::DelayedTaskQueue();
180 incoming_queue_ = base::TaskQueue();
181 work_queue_ = base::TaskQueue();
184 bool TaskQueue::RunsTasksOnCurrentThread() const {
185 base::AutoLock lock(lock_);
186 return base::PlatformThread::CurrentId() == thread_id_;
189 bool TaskQueue::PostDelayedTaskImpl(const tracked_objects::Location& from_here,
190 const base::Closure& task,
191 base::TimeDelta delay,
192 TaskType task_type) {
193 base::AutoLock lock(lock_);
194 if (!task_queue_manager_)
195 return false;
197 base::PendingTask pending_task(from_here, task, base::TimeTicks(),
198 task_type != TaskType::NON_NESTABLE);
199 task_queue_manager_->DidQueueTask(&pending_task);
201 if (delay > base::TimeDelta()) {
202 base::TimeTicks now = task_queue_manager_->Now();
203 pending_task.delayed_run_time = now + delay;
204 delayed_task_queue_.push(pending_task);
205 TraceQueueSize(true);
206 // If we changed the topmost task, then it is time to reschedule.
207 if (delayed_task_queue_.top().task.Equals(pending_task.task)) {
208 LazyNow lazy_now(now);
209 ScheduleDelayedWorkLocked(&lazy_now);
211 return true;
213 EnqueueTaskLocked(pending_task);
214 return true;
217 void TaskQueue::MoveReadyDelayedTasksToIncomingQueue() {
218 DCHECK(main_thread_checker_.CalledOnValidThread());
219 base::AutoLock lock(lock_);
220 if (!task_queue_manager_)
221 return;
223 LazyNow lazy_now(task_queue_manager_);
224 MoveReadyDelayedTasksToIncomingQueueLocked(&lazy_now);
227 void TaskQueue::MoveReadyDelayedTasksToIncomingQueueLocked(LazyNow* lazy_now) {
228 lock_.AssertAcquired();
229 // Enqueue all delayed tasks that should be running now.
230 while (!delayed_task_queue_.empty() &&
231 delayed_task_queue_.top().delayed_run_time <= lazy_now->Now()) {
232 in_flight_kick_delayed_tasks_.erase(
233 delayed_task_queue_.top().delayed_run_time);
234 EnqueueTaskLocked(delayed_task_queue_.top());
235 delayed_task_queue_.pop();
237 TraceQueueSize(true);
238 ScheduleDelayedWorkLocked(lazy_now);
241 void TaskQueue::ScheduleDelayedWorkLocked(LazyNow* lazy_now) {
242 lock_.AssertAcquired();
243 // Any remaining tasks are in the future, so queue a task to kick them.
244 if (!delayed_task_queue_.empty()) {
245 base::TimeTicks next_run_time = delayed_task_queue_.top().delayed_run_time;
246 DCHECK_GT(next_run_time, lazy_now->Now());
247 // Make sure we don't have more than one
248 // MoveReadyDelayedTasksToIncomingQueue posted for a particular scheduled
249 // run time (note it's fine to have multiple ones in flight for distinct
250 // run times).
251 if (in_flight_kick_delayed_tasks_.find(next_run_time) ==
252 in_flight_kick_delayed_tasks_.end()) {
253 in_flight_kick_delayed_tasks_.insert(next_run_time);
254 base::TimeDelta delay = next_run_time - lazy_now->Now();
255 task_queue_manager_->PostDelayedTask(
256 FROM_HERE,
257 Bind(&TaskQueue::MoveReadyDelayedTasksToIncomingQueue, this), delay);
262 TaskQueueManager::QueueState TaskQueue::GetQueueState() const {
263 DCHECK(main_thread_checker_.CalledOnValidThread());
264 if (!work_queue_.empty())
265 return TaskQueueManager::QueueState::HAS_WORK;
268 base::AutoLock lock(lock_);
269 if (incoming_queue_.empty()) {
270 return TaskQueueManager::QueueState::EMPTY;
271 } else {
272 return TaskQueueManager::QueueState::NEEDS_PUMPING;
277 bool TaskQueue::TaskIsOlderThanQueuedTasks(const base::PendingTask* task) {
278 lock_.AssertAcquired();
279 // A null task is passed when UpdateQueue is called before any task is run.
280 // In this case we don't want to pump an after_wakeup queue, so return true
281 // here.
282 if (!task)
283 return true;
285 // Return false if there are no task in the incoming queue.
286 if (incoming_queue_.empty())
287 return false;
289 base::PendingTask oldest_queued_task = incoming_queue_.front();
290 DCHECK(oldest_queued_task.delayed_run_time.is_null());
291 DCHECK(task->delayed_run_time.is_null());
293 // Note: the comparison is correct due to the fact that the PendingTask
294 // operator inverts its comparison operation in order to work well in a heap
295 // based priority queue.
296 return oldest_queued_task < *task;
299 bool TaskQueue::ShouldAutoPumpQueueLocked(
300 bool should_trigger_wakeup,
301 const base::PendingTask* previous_task) {
302 lock_.AssertAcquired();
303 if (pump_policy_ == TaskQueueManager::PumpPolicy::MANUAL)
304 return false;
305 if (pump_policy_ == TaskQueueManager::PumpPolicy::AFTER_WAKEUP &&
306 (!should_trigger_wakeup || TaskIsOlderThanQueuedTasks(previous_task)))
307 return false;
308 if (incoming_queue_.empty())
309 return false;
310 return true;
313 bool TaskQueue::NextPendingDelayedTaskRunTime(
314 base::TimeTicks* next_pending_delayed_task) {
315 base::AutoLock lock(lock_);
316 if (delayed_task_queue_.empty())
317 return false;
318 *next_pending_delayed_task = delayed_task_queue_.top().delayed_run_time;
319 return true;
322 bool TaskQueue::UpdateWorkQueue(LazyNow* lazy_now,
323 bool should_trigger_wakeup,
324 const base::PendingTask* previous_task) {
325 if (!work_queue_.empty())
326 return true;
329 base::AutoLock lock(lock_);
330 if (!ShouldAutoPumpQueueLocked(should_trigger_wakeup, previous_task))
331 return false;
332 MoveReadyDelayedTasksToIncomingQueueLocked(lazy_now);
333 work_queue_.Swap(&incoming_queue_);
334 TraceQueueSize(true);
335 return true;
339 base::PendingTask TaskQueue::TakeTaskFromWorkQueue() {
340 base::PendingTask pending_task = work_queue_.front();
341 work_queue_.pop();
342 TraceQueueSize(false);
343 return pending_task;
346 void TaskQueue::TraceQueueSize(bool is_locked) const {
347 bool is_tracing;
348 TRACE_EVENT_CATEGORY_GROUP_ENABLED(disabled_by_default_tracing_category_,
349 &is_tracing);
350 if (!is_tracing || !name_)
351 return;
352 if (!is_locked)
353 lock_.Acquire();
354 else
355 lock_.AssertAcquired();
356 TRACE_COUNTER1(
357 disabled_by_default_tracing_category_, name_,
358 incoming_queue_.size() + work_queue_.size() + delayed_task_queue_.size());
359 if (!is_locked)
360 lock_.Release();
363 void TaskQueue::EnqueueTaskLocked(const base::PendingTask& pending_task) {
364 lock_.AssertAcquired();
365 if (!task_queue_manager_)
366 return;
367 if (pump_policy_ == TaskQueueManager::PumpPolicy::AUTO &&
368 incoming_queue_.empty())
369 task_queue_manager_->MaybePostDoWorkOnMainRunner();
370 incoming_queue_.push(pending_task);
372 if (!pending_task.delayed_run_time.is_null()) {
373 // Clear the delayed run time because we've already applied the delay
374 // before getting here.
375 incoming_queue_.back().delayed_run_time = base::TimeTicks();
377 TraceQueueSize(true);
380 void TaskQueue::SetPumpPolicy(TaskQueueManager::PumpPolicy pump_policy) {
381 base::AutoLock lock(lock_);
382 if (pump_policy == TaskQueueManager::PumpPolicy::AUTO &&
383 pump_policy_ != TaskQueueManager::PumpPolicy::AUTO) {
384 PumpQueueLocked();
386 pump_policy_ = pump_policy;
389 void TaskQueue::PumpQueueLocked() {
390 lock_.AssertAcquired();
391 if (task_queue_manager_) {
392 LazyNow lazy_now(task_queue_manager_);
393 MoveReadyDelayedTasksToIncomingQueueLocked(&lazy_now);
395 while (!incoming_queue_.empty()) {
396 work_queue_.push(incoming_queue_.front());
397 incoming_queue_.pop();
399 if (!work_queue_.empty())
400 task_queue_manager_->MaybePostDoWorkOnMainRunner();
403 void TaskQueue::PumpQueue() {
404 base::AutoLock lock(lock_);
405 PumpQueueLocked();
408 void TaskQueue::AsValueInto(base::trace_event::TracedValue* state) const {
409 base::AutoLock lock(lock_);
410 state->BeginDictionary();
411 if (name_)
412 state->SetString("name", name_);
413 state->SetString("pump_policy",
414 TaskQueueManager::PumpPolicyToString(pump_policy_));
415 state->SetString("wakeup_policy",
416 TaskQueueManager::WakeupPolicyToString(wakeup_policy_));
417 bool verbose_tracing_enabled = false;
418 TRACE_EVENT_CATEGORY_GROUP_ENABLED(
419 disabled_by_default_verbose_tracing_category_, &verbose_tracing_enabled);
420 state->SetInteger("incoming_queue_size", incoming_queue_.size());
421 state->SetInteger("work_queue_size", work_queue_.size());
422 state->SetInteger("delayed_task_queue_size", delayed_task_queue_.size());
423 if (verbose_tracing_enabled) {
424 state->BeginArray("incoming_queue");
425 QueueAsValueInto(incoming_queue_, state);
426 state->EndArray();
427 state->BeginArray("work_queue");
428 QueueAsValueInto(work_queue_, state);
429 state->EndArray();
430 state->BeginArray("delayed_task_queue");
431 QueueAsValueInto(delayed_task_queue_, state);
432 state->EndArray();
434 state->EndDictionary();
437 // static
438 void TaskQueue::QueueAsValueInto(const base::TaskQueue& queue,
439 base::trace_event::TracedValue* state) {
440 base::TaskQueue queue_copy(queue);
441 while (!queue_copy.empty()) {
442 TaskAsValueInto(queue_copy.front(), state);
443 queue_copy.pop();
447 // static
448 void TaskQueue::QueueAsValueInto(const base::DelayedTaskQueue& queue,
449 base::trace_event::TracedValue* state) {
450 base::DelayedTaskQueue queue_copy(queue);
451 while (!queue_copy.empty()) {
452 TaskAsValueInto(queue_copy.top(), state);
453 queue_copy.pop();
457 // static
458 void TaskQueue::TaskAsValueInto(const base::PendingTask& task,
459 base::trace_event::TracedValue* state) {
460 state->BeginDictionary();
461 state->SetString("posted_from", task.posted_from.ToString());
462 state->SetInteger("sequence_num", task.sequence_num);
463 state->SetBoolean("nestable", task.nestable);
464 state->SetBoolean("is_high_res", task.is_high_res);
465 state->SetDouble(
466 "delayed_run_time",
467 (task.delayed_run_time - base::TimeTicks()).InMicroseconds() / 1000.0L);
468 state->EndDictionary();
471 } // namespace internal
473 TaskQueueManager::TaskQueueManager(
474 size_t task_queue_count,
475 scoped_refptr<NestableSingleThreadTaskRunner> main_task_runner,
476 TaskQueueSelector* selector,
477 const char* disabled_by_default_tracing_category,
478 const char* disabled_by_default_verbose_tracing_category)
479 : main_task_runner_(main_task_runner),
480 selector_(selector),
481 task_was_run_bitmap_(0),
482 pending_dowork_count_(0),
483 work_batch_size_(1),
484 time_source_(new base::DefaultTickClock),
485 disabled_by_default_tracing_category_(
486 disabled_by_default_tracing_category),
487 deletion_sentinel_(new DeletionSentinel()),
488 weak_factory_(this) {
489 DCHECK(main_task_runner->RunsTasksOnCurrentThread());
490 DCHECK_LE(task_queue_count, sizeof(task_was_run_bitmap_) * CHAR_BIT)
491 << "You need a bigger int for task_was_run_bitmap_";
492 TRACE_EVENT_OBJECT_CREATED_WITH_ID(disabled_by_default_tracing_category,
493 "TaskQueueManager", this);
495 for (size_t i = 0; i < task_queue_count; i++) {
496 scoped_refptr<internal::TaskQueue> queue(make_scoped_refptr(
497 new internal::TaskQueue(this,
498 disabled_by_default_tracing_category,
499 disabled_by_default_verbose_tracing_category)));
500 queues_.push_back(queue);
503 std::vector<const base::TaskQueue*> work_queues;
504 for (const auto& queue : queues_)
505 work_queues.push_back(&queue->work_queue());
506 selector_->RegisterWorkQueues(work_queues);
507 selector_->SetTaskQueueSelectorObserver(this);
509 do_work_from_main_thread_closure_ =
510 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), true);
511 do_work_from_other_thread_closure_ =
512 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), false);
515 TaskQueueManager::~TaskQueueManager() {
516 TRACE_EVENT_OBJECT_DELETED_WITH_ID(disabled_by_default_tracing_category_,
517 "TaskQueueManager", this);
518 for (auto& queue : queues_)
519 queue->WillDeleteTaskQueueManager();
520 selector_->SetTaskQueueSelectorObserver(nullptr);
523 internal::TaskQueue* TaskQueueManager::Queue(size_t queue_index) const {
524 DCHECK_LT(queue_index, queues_.size());
525 return queues_[queue_index].get();
528 scoped_refptr<base::SingleThreadTaskRunner>
529 TaskQueueManager::TaskRunnerForQueue(size_t queue_index) const {
530 return Queue(queue_index);
533 bool TaskQueueManager::IsQueueEmpty(size_t queue_index) const {
534 return Queue(queue_index)->GetQueueState() == QueueState::EMPTY;
537 TaskQueueManager::QueueState TaskQueueManager::GetQueueState(size_t queue_index)
538 const {
539 return Queue(queue_index)->GetQueueState();
542 base::TimeTicks TaskQueueManager::NextPendingDelayedTaskRunTime() {
543 DCHECK(main_thread_checker_.CalledOnValidThread());
544 bool found_pending_task = false;
545 base::TimeTicks next_pending_delayed_task(
546 base::TimeTicks::FromInternalValue(kMaxTimeTicks));
547 for (auto& queue : queues_) {
548 base::TimeTicks queues_next_pending_delayed_task;
549 if (queue->NextPendingDelayedTaskRunTime(
550 &queues_next_pending_delayed_task)) {
551 found_pending_task = true;
552 next_pending_delayed_task =
553 std::min(next_pending_delayed_task, queues_next_pending_delayed_task);
557 if (!found_pending_task)
558 return base::TimeTicks();
560 DCHECK_NE(next_pending_delayed_task,
561 base::TimeTicks::FromInternalValue(kMaxTimeTicks));
562 return next_pending_delayed_task;
565 void TaskQueueManager::SetPumpPolicy(size_t queue_index,
566 PumpPolicy pump_policy) {
567 DCHECK(main_thread_checker_.CalledOnValidThread());
568 internal::TaskQueue* queue = Queue(queue_index);
569 queue->SetPumpPolicy(pump_policy);
572 void TaskQueueManager::SetWakeupPolicy(size_t queue_index,
573 WakeupPolicy wakeup_policy) {
574 DCHECK(main_thread_checker_.CalledOnValidThread());
575 internal::TaskQueue* queue = Queue(queue_index);
576 queue->set_wakeup_policy(wakeup_policy);
579 void TaskQueueManager::PumpQueue(size_t queue_index) {
580 DCHECK(main_thread_checker_.CalledOnValidThread());
581 internal::TaskQueue* queue = Queue(queue_index);
582 queue->PumpQueue();
585 bool TaskQueueManager::UpdateWorkQueues(
586 bool should_trigger_wakeup,
587 const base::PendingTask* previous_task) {
588 // TODO(skyostil): This is not efficient when the number of queues grows very
589 // large due to the number of locks taken. Consider optimizing when we get
590 // there.
591 DCHECK(main_thread_checker_.CalledOnValidThread());
592 internal::LazyNow lazy_now(this);
593 bool has_work = false;
594 for (auto& queue : queues_) {
595 has_work |=
596 queue->UpdateWorkQueue(&lazy_now, should_trigger_wakeup, previous_task);
597 if (!queue->work_queue().empty()) {
598 // Currently we should not be getting tasks with delayed run times in any
599 // of the work queues.
600 DCHECK(queue->work_queue().front().delayed_run_time.is_null());
603 return has_work;
606 void TaskQueueManager::MaybePostDoWorkOnMainRunner() {
607 bool on_main_thread = main_task_runner_->BelongsToCurrentThread();
608 if (on_main_thread) {
609 // We only want one pending DoWork posted from the main thread, or we risk
610 // an explosion of pending DoWorks which could starve out everything else.
611 if (pending_dowork_count_ > 0) {
612 return;
614 pending_dowork_count_++;
615 main_task_runner_->PostTask(FROM_HERE, do_work_from_main_thread_closure_);
616 } else {
617 main_task_runner_->PostTask(FROM_HERE, do_work_from_other_thread_closure_);
621 void TaskQueueManager::DoWork(bool posted_from_main_thread) {
622 if (posted_from_main_thread) {
623 pending_dowork_count_--;
624 DCHECK_GE(pending_dowork_count_, 0);
626 DCHECK(main_thread_checker_.CalledOnValidThread());
628 // Pass false and nullptr to UpdateWorkQueues here to prevent waking up a
629 // pump-after-wakeup queue.
630 if (!UpdateWorkQueues(false, nullptr))
631 return;
633 base::PendingTask previous_task((tracked_objects::Location()),
634 (base::Closure()));
635 for (int i = 0; i < work_batch_size_; i++) {
636 size_t queue_index;
637 if (!SelectWorkQueueToService(&queue_index))
638 return;
639 // Note that this function won't post another call to DoWork if one is
640 // already pending, so it is safe to call it in a loop.
641 MaybePostDoWorkOnMainRunner();
643 if (ProcessTaskFromWorkQueue(queue_index, i > 0, &previous_task))
644 return; // The TaskQueueManager got deleted, we must bail out.
646 bool should_trigger_wakeup = Queue(queue_index)->wakeup_policy() ==
647 WakeupPolicy::CAN_WAKE_OTHER_QUEUES;
648 if (!UpdateWorkQueues(should_trigger_wakeup, &previous_task))
649 return;
653 bool TaskQueueManager::SelectWorkQueueToService(size_t* out_queue_index) {
654 bool should_run = selector_->SelectWorkQueueToService(out_queue_index);
655 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
656 disabled_by_default_tracing_category_, "TaskQueueManager", this,
657 AsValueWithSelectorResult(should_run, *out_queue_index));
658 return should_run;
661 void TaskQueueManager::DidQueueTask(base::PendingTask* pending_task) {
662 pending_task->sequence_num = task_sequence_num_.GetNext();
663 task_annotator_.DidQueueTask("TaskQueueManager::PostTask", *pending_task);
666 bool TaskQueueManager::ProcessTaskFromWorkQueue(
667 size_t queue_index,
668 bool has_previous_task,
669 base::PendingTask* previous_task) {
670 DCHECK(main_thread_checker_.CalledOnValidThread());
671 scoped_refptr<DeletionSentinel> protect(deletion_sentinel_);
672 internal::TaskQueue* queue = Queue(queue_index);
673 base::PendingTask pending_task = queue->TakeTaskFromWorkQueue();
674 task_was_run_bitmap_ |= UINT64_C(1) << queue_index;
675 if (!pending_task.nestable && main_task_runner_->IsNested()) {
676 // Defer non-nestable work to the main task runner. NOTE these tasks can be
677 // arbitrarily delayed so the additional delay should not be a problem.
678 main_task_runner_->PostNonNestableTask(pending_task.posted_from,
679 pending_task.task);
680 } else {
681 // Suppress "will" task observer notifications for the first and "did"
682 // notifications for the last task in the batch to avoid duplicate
683 // notifications.
684 if (has_previous_task) {
685 FOR_EACH_OBSERVER(base::MessageLoop::TaskObserver, task_observers_,
686 DidProcessTask(*previous_task));
687 FOR_EACH_OBSERVER(base::MessageLoop::TaskObserver, task_observers_,
688 WillProcessTask(pending_task));
690 task_annotator_.RunTask("TaskQueueManager::PostTask",
691 "TaskQueueManager::RunTask", pending_task);
693 // Detect if the TaskQueueManager just got deleted. If this happens we must
694 // not access any member variables after this point.
695 if (protect->HasOneRef())
696 return true;
698 pending_task.task.Reset();
699 *previous_task = pending_task;
701 return false;
704 bool TaskQueueManager::RunsTasksOnCurrentThread() const {
705 return main_task_runner_->RunsTasksOnCurrentThread();
708 bool TaskQueueManager::PostDelayedTask(
709 const tracked_objects::Location& from_here,
710 const base::Closure& task,
711 base::TimeDelta delay) {
712 DCHECK(delay > base::TimeDelta());
713 return main_task_runner_->PostDelayedTask(from_here, task, delay);
716 void TaskQueueManager::SetQueueName(size_t queue_index, const char* name) {
717 DCHECK(main_thread_checker_.CalledOnValidThread());
718 internal::TaskQueue* queue = Queue(queue_index);
719 queue->set_name(name);
722 void TaskQueueManager::SetWorkBatchSize(int work_batch_size) {
723 DCHECK(main_thread_checker_.CalledOnValidThread());
724 DCHECK_GE(work_batch_size, 1);
725 work_batch_size_ = work_batch_size;
728 void TaskQueueManager::AddTaskObserver(
729 base::MessageLoop::TaskObserver* task_observer) {
730 DCHECK(main_thread_checker_.CalledOnValidThread());
731 main_task_runner_->AddTaskObserver(task_observer);
732 task_observers_.AddObserver(task_observer);
735 void TaskQueueManager::RemoveTaskObserver(
736 base::MessageLoop::TaskObserver* task_observer) {
737 DCHECK(main_thread_checker_.CalledOnValidThread());
738 main_task_runner_->RemoveTaskObserver(task_observer);
739 task_observers_.RemoveObserver(task_observer);
742 void TaskQueueManager::SetTimeSourceForTesting(
743 scoped_ptr<base::TickClock> time_source) {
744 DCHECK(main_thread_checker_.CalledOnValidThread());
745 time_source_ = time_source.Pass();
748 uint64 TaskQueueManager::GetAndClearTaskWasRunOnQueueBitmap() {
749 uint64 bitmap = task_was_run_bitmap_;
750 task_was_run_bitmap_ = 0;
751 return bitmap;
754 base::TimeTicks TaskQueueManager::Now() const {
755 return time_source_->NowTicks();
758 scoped_refptr<base::trace_event::ConvertableToTraceFormat>
759 TaskQueueManager::AsValueWithSelectorResult(bool should_run,
760 size_t selected_queue) const {
761 DCHECK(main_thread_checker_.CalledOnValidThread());
762 scoped_refptr<base::trace_event::TracedValue> state =
763 new base::trace_event::TracedValue();
764 state->BeginArray("queues");
765 for (auto& queue : queues_)
766 queue->AsValueInto(state.get());
767 state->EndArray();
768 state->BeginDictionary("selector");
769 selector_->AsValueInto(state.get());
770 state->EndDictionary();
771 if (should_run)
772 state->SetInteger("selected_queue", selected_queue);
773 return state;
776 // static
777 const char* TaskQueueManager::PumpPolicyToString(
778 TaskQueueManager::PumpPolicy pump_policy) {
779 switch (pump_policy) {
780 case TaskQueueManager::PumpPolicy::AUTO:
781 return "auto";
782 case TaskQueueManager::PumpPolicy::AFTER_WAKEUP:
783 return "after_wakeup";
784 case TaskQueueManager::PumpPolicy::MANUAL:
785 return "manual";
786 default:
787 NOTREACHED();
788 return nullptr;
792 // static
793 const char* TaskQueueManager::WakeupPolicyToString(
794 TaskQueueManager::WakeupPolicy wakeup_policy) {
795 switch (wakeup_policy) {
796 case TaskQueueManager::WakeupPolicy::CAN_WAKE_OTHER_QUEUES:
797 return "can_wake_other_queues";
798 case TaskQueueManager::WakeupPolicy::DONT_WAKE_OTHER_QUEUES:
799 return "dont_wake_other_queues";
800 default:
801 NOTREACHED();
802 return nullptr;
806 void TaskQueueManager::OnTaskQueueEnabled() {
807 DCHECK(main_thread_checker_.CalledOnValidThread());
808 MaybePostDoWorkOnMainRunner();
811 } // namespace scheduler