Don't add extra app list launcher page webviews.
[chromium-blink-merge.git] / content / child / scheduler / task_queue_manager.cc
blob4651e3421ac36aa04b1b75d761ac981865f1eac6
1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/child/scheduler/task_queue_manager.h"
7 #include <queue>
8 #include <set>
10 #include "base/bind.h"
11 #include "base/trace_event/trace_event.h"
12 #include "base/trace_event/trace_event_argument.h"
13 #include "cc/test/test_now_source.h"
14 #include "content/child/scheduler/nestable_single_thread_task_runner.h"
15 #include "content/child/scheduler/task_queue_selector.h"
17 namespace {
18 const int64_t kMaxTimeTicks = std::numeric_limits<int64>::max();
21 namespace content {
22 namespace internal {
24 // Now() is somewhat expensive so it makes sense not to call Now() unless we
25 // really need to.
26 class LazyNow {
27 public:
28 explicit LazyNow(base::TimeTicks now)
29 : task_queue_manager_(nullptr), now_(now) {
30 DCHECK(!now.is_null());
33 explicit LazyNow(TaskQueueManager* task_queue_manager)
34 : task_queue_manager_(task_queue_manager) {}
36 base::TimeTicks Now() {
37 if (now_.is_null())
38 now_ = task_queue_manager_->Now();
39 return now_;
42 private:
43 TaskQueueManager* task_queue_manager_; // NOT OWNED
44 base::TimeTicks now_;
47 class TaskQueue : public base::SingleThreadTaskRunner {
48 public:
49 TaskQueue(TaskQueueManager* task_queue_manager,
50 const char* disabled_by_default_tracing_category);
52 // base::SingleThreadTaskRunner implementation.
53 bool RunsTasksOnCurrentThread() const override;
54 bool PostDelayedTask(const tracked_objects::Location& from_here,
55 const base::Closure& task,
56 base::TimeDelta delay) override {
57 return PostDelayedTaskImpl(from_here, task, delay, TaskType::NORMAL);
60 bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
61 const base::Closure& task,
62 base::TimeDelta delay) override {
63 return PostDelayedTaskImpl(from_here, task, delay, TaskType::NON_NESTABLE);
66 bool IsQueueEmpty() const;
68 void SetPumpPolicy(TaskQueueManager::PumpPolicy pump_policy);
69 void PumpQueue();
71 bool NextPendingDelayedTaskRunTime(
72 base::TimeTicks* next_pending_delayed_task);
74 bool UpdateWorkQueue(LazyNow* lazy_now,
75 const base::PendingTask* previous_task);
76 base::PendingTask TakeTaskFromWorkQueue();
78 void WillDeleteTaskQueueManager();
80 base::TaskQueue& work_queue() { return work_queue_; }
82 void set_name(const char* name) { name_ = name; }
84 void AsValueInto(base::trace_event::TracedValue* state) const;
86 private:
87 enum class TaskType {
88 NORMAL,
89 NON_NESTABLE,
92 ~TaskQueue() override;
94 bool PostDelayedTaskImpl(const tracked_objects::Location& from_here,
95 const base::Closure& task,
96 base::TimeDelta delay,
97 TaskType task_type);
99 // Delayed task posted to the underlying run loop, which locks |lock_| and
100 // calls MoveReadyDelayedTasksToIncomingQueueLocked to process dealyed tasks
101 // that need to be run now.
102 void MoveReadyDelayedTasksToIncomingQueue();
104 // Enqueues any delayed tasks which should be run now on the incoming_queue_
105 // and calls ScheduleDelayedWorkLocked to ensure future tasks are scheduled.
106 // Must be called with |lock_| locked.
107 void MoveReadyDelayedTasksToIncomingQueueLocked(LazyNow* lazy_now);
109 // Posts MoveReadyDelayedTasksToIncomingQueue if there isn't already a task
110 // posted on the underlying runloop for the next task's scheduled run time.
111 void ScheduleDelayedWorkLocked(LazyNow* lazy_now);
113 void PumpQueueLocked();
114 bool TaskIsOlderThanQueuedTasks(const base::PendingTask* task);
115 bool ShouldAutoPumpQueueLocked(const base::PendingTask* previous_task);
116 void EnqueueTaskLocked(const base::PendingTask& pending_task);
118 void TraceQueueSize(bool is_locked) const;
119 static const char* PumpPolicyToString(
120 TaskQueueManager::PumpPolicy pump_policy);
121 static void QueueAsValueInto(const base::TaskQueue& queue,
122 base::trace_event::TracedValue* state);
123 static void QueueAsValueInto(const base::DelayedTaskQueue& queue,
124 base::trace_event::TracedValue* state);
125 static void TaskAsValueInto(const base::PendingTask& task,
126 base::trace_event::TracedValue* state);
128 // This lock protects all members except the work queue and the
129 // main_thread_checker_.
130 mutable base::Lock lock_;
131 base::PlatformThreadId thread_id_;
132 TaskQueueManager* task_queue_manager_;
133 base::TaskQueue incoming_queue_;
134 TaskQueueManager::PumpPolicy pump_policy_;
135 const char* name_;
136 const char* disabled_by_default_tracing_category_;
137 base::DelayedTaskQueue delayed_task_queue_;
138 std::set<base::TimeTicks> in_flight_kick_delayed_tasks_;
140 base::ThreadChecker main_thread_checker_;
141 base::TaskQueue work_queue_;
143 DISALLOW_COPY_AND_ASSIGN(TaskQueue);
146 TaskQueue::TaskQueue(TaskQueueManager* task_queue_manager,
147 const char* disabled_by_default_tracing_category)
148 : thread_id_(base::PlatformThread::CurrentId()),
149 task_queue_manager_(task_queue_manager),
150 pump_policy_(TaskQueueManager::PumpPolicy::AUTO),
151 name_(nullptr),
152 disabled_by_default_tracing_category_(
153 disabled_by_default_tracing_category) {
156 TaskQueue::~TaskQueue() {
159 void TaskQueue::WillDeleteTaskQueueManager() {
160 base::AutoLock lock(lock_);
161 task_queue_manager_ = nullptr;
162 delayed_task_queue_ = base::DelayedTaskQueue();
163 incoming_queue_ = base::TaskQueue();
164 work_queue_ = base::TaskQueue();
167 bool TaskQueue::RunsTasksOnCurrentThread() const {
168 base::AutoLock lock(lock_);
169 return base::PlatformThread::CurrentId() == thread_id_;
172 bool TaskQueue::PostDelayedTaskImpl(const tracked_objects::Location& from_here,
173 const base::Closure& task,
174 base::TimeDelta delay,
175 TaskType task_type) {
176 base::AutoLock lock(lock_);
177 if (!task_queue_manager_)
178 return false;
180 base::PendingTask pending_task(from_here, task, base::TimeTicks(),
181 task_type != TaskType::NON_NESTABLE);
182 task_queue_manager_->DidQueueTask(&pending_task);
184 if (delay > base::TimeDelta()) {
185 base::TimeTicks now = task_queue_manager_->Now();
186 pending_task.delayed_run_time = now + delay;
187 delayed_task_queue_.push(pending_task);
188 TraceQueueSize(true);
189 // If we changed the topmost task, then it is time to reschedule.
190 if (delayed_task_queue_.top().task.Equals(pending_task.task)) {
191 LazyNow lazy_now(now);
192 ScheduleDelayedWorkLocked(&lazy_now);
194 return true;
196 EnqueueTaskLocked(pending_task);
197 return true;
200 void TaskQueue::MoveReadyDelayedTasksToIncomingQueue() {
201 DCHECK(main_thread_checker_.CalledOnValidThread());
202 base::AutoLock lock(lock_);
203 if (!task_queue_manager_)
204 return;
206 LazyNow lazy_now(task_queue_manager_);
207 MoveReadyDelayedTasksToIncomingQueueLocked(&lazy_now);
210 void TaskQueue::MoveReadyDelayedTasksToIncomingQueueLocked(LazyNow* lazy_now) {
211 lock_.AssertAcquired();
212 // Enqueue all delayed tasks that should be running now.
213 while (!delayed_task_queue_.empty() &&
214 delayed_task_queue_.top().delayed_run_time <= lazy_now->Now()) {
215 in_flight_kick_delayed_tasks_.erase(
216 delayed_task_queue_.top().delayed_run_time);
217 EnqueueTaskLocked(delayed_task_queue_.top());
218 delayed_task_queue_.pop();
220 TraceQueueSize(true);
221 ScheduleDelayedWorkLocked(lazy_now);
224 void TaskQueue::ScheduleDelayedWorkLocked(LazyNow* lazy_now) {
225 lock_.AssertAcquired();
226 // Any remaining tasks are in the future, so queue a task to kick them.
227 if (!delayed_task_queue_.empty()) {
228 base::TimeTicks next_run_time = delayed_task_queue_.top().delayed_run_time;
229 DCHECK_GT(next_run_time, lazy_now->Now());
230 // Make sure we don't have more than one
231 // MoveReadyDelayedTasksToIncomingQueue posted for a particular scheduled
232 // run time (note it's fine to have multiple ones in flight for distinct
233 // run times).
234 if (in_flight_kick_delayed_tasks_.find(next_run_time) ==
235 in_flight_kick_delayed_tasks_.end()) {
236 in_flight_kick_delayed_tasks_.insert(next_run_time);
237 base::TimeDelta delay = next_run_time - lazy_now->Now();
238 task_queue_manager_->PostDelayedTask(
239 FROM_HERE,
240 Bind(&TaskQueue::MoveReadyDelayedTasksToIncomingQueue, this), delay);
245 bool TaskQueue::IsQueueEmpty() const {
246 if (!work_queue_.empty())
247 return false;
250 base::AutoLock lock(lock_);
251 return incoming_queue_.empty();
255 bool TaskQueue::TaskIsOlderThanQueuedTasks(const base::PendingTask* task) {
256 lock_.AssertAcquired();
257 // A null task is passed when UpdateQueue is called before any task is run.
258 // In this case we don't want to pump an after_wakeup queue, so return true
259 // here.
260 if (!task)
261 return true;
263 // Return false if there are no task in the incoming queue.
264 if (incoming_queue_.empty())
265 return false;
267 base::PendingTask oldest_queued_task = incoming_queue_.front();
268 DCHECK(oldest_queued_task.delayed_run_time.is_null());
269 DCHECK(task->delayed_run_time.is_null());
271 // Note: the comparison is correct due to the fact that the PendingTask
272 // operator inverts its comparison operation in order to work well in a heap
273 // based priority queue.
274 return oldest_queued_task < *task;
277 bool TaskQueue::ShouldAutoPumpQueueLocked(
278 const base::PendingTask* previous_task) {
279 lock_.AssertAcquired();
280 if (pump_policy_ == TaskQueueManager::PumpPolicy::MANUAL)
281 return false;
282 if (pump_policy_ == TaskQueueManager::PumpPolicy::AFTER_WAKEUP &&
283 TaskIsOlderThanQueuedTasks(previous_task))
284 return false;
285 if (incoming_queue_.empty())
286 return false;
287 return true;
290 bool TaskQueue::NextPendingDelayedTaskRunTime(
291 base::TimeTicks* next_pending_delayed_task) {
292 base::AutoLock lock(lock_);
293 if (delayed_task_queue_.empty())
294 return false;
295 *next_pending_delayed_task = delayed_task_queue_.top().delayed_run_time;
296 return true;
299 bool TaskQueue::UpdateWorkQueue(LazyNow* lazy_now,
300 const base::PendingTask* previous_task) {
301 if (!work_queue_.empty())
302 return true;
305 base::AutoLock lock(lock_);
306 if (!ShouldAutoPumpQueueLocked(previous_task))
307 return false;
308 MoveReadyDelayedTasksToIncomingQueueLocked(lazy_now);
309 work_queue_.Swap(&incoming_queue_);
310 TraceQueueSize(true);
311 return true;
315 base::PendingTask TaskQueue::TakeTaskFromWorkQueue() {
316 base::PendingTask pending_task = work_queue_.front();
317 work_queue_.pop();
318 TraceQueueSize(false);
319 return pending_task;
322 void TaskQueue::TraceQueueSize(bool is_locked) const {
323 bool is_tracing;
324 TRACE_EVENT_CATEGORY_GROUP_ENABLED(disabled_by_default_tracing_category_,
325 &is_tracing);
326 if (!is_tracing || !name_)
327 return;
328 if (!is_locked)
329 lock_.Acquire();
330 else
331 lock_.AssertAcquired();
332 TRACE_COUNTER1(
333 disabled_by_default_tracing_category_, name_,
334 incoming_queue_.size() + work_queue_.size() + delayed_task_queue_.size());
335 if (!is_locked)
336 lock_.Release();
339 void TaskQueue::EnqueueTaskLocked(const base::PendingTask& pending_task) {
340 lock_.AssertAcquired();
341 if (!task_queue_manager_)
342 return;
343 if (pump_policy_ == TaskQueueManager::PumpPolicy::AUTO &&
344 incoming_queue_.empty())
345 task_queue_manager_->MaybePostDoWorkOnMainRunner();
346 incoming_queue_.push(pending_task);
348 if (!pending_task.delayed_run_time.is_null()) {
349 // Clear the delayed run time because we've already applied the delay
350 // before getting here.
351 incoming_queue_.back().delayed_run_time = base::TimeTicks();
353 TraceQueueSize(true);
356 void TaskQueue::SetPumpPolicy(TaskQueueManager::PumpPolicy pump_policy) {
357 base::AutoLock lock(lock_);
358 if (pump_policy == TaskQueueManager::PumpPolicy::AUTO &&
359 pump_policy_ != TaskQueueManager::PumpPolicy::AUTO) {
360 PumpQueueLocked();
362 pump_policy_ = pump_policy;
365 void TaskQueue::PumpQueueLocked() {
366 lock_.AssertAcquired();
367 if (task_queue_manager_) {
368 LazyNow lazy_now(task_queue_manager_);
369 MoveReadyDelayedTasksToIncomingQueueLocked(&lazy_now);
371 while (!incoming_queue_.empty()) {
372 work_queue_.push(incoming_queue_.front());
373 incoming_queue_.pop();
375 if (!work_queue_.empty())
376 task_queue_manager_->MaybePostDoWorkOnMainRunner();
379 void TaskQueue::PumpQueue() {
380 base::AutoLock lock(lock_);
381 PumpQueueLocked();
384 void TaskQueue::AsValueInto(base::trace_event::TracedValue* state) const {
385 base::AutoLock lock(lock_);
386 state->BeginDictionary();
387 if (name_)
388 state->SetString("name", name_);
389 state->SetString("pump_policy", PumpPolicyToString(pump_policy_));
390 state->BeginArray("incoming_queue");
391 QueueAsValueInto(incoming_queue_, state);
392 state->EndArray();
393 state->BeginArray("work_queue");
394 QueueAsValueInto(work_queue_, state);
395 state->EndArray();
396 state->BeginArray("delayed_task_queue");
397 QueueAsValueInto(delayed_task_queue_, state);
398 state->EndArray();
399 state->EndDictionary();
402 // static
403 const char* TaskQueue::PumpPolicyToString(
404 TaskQueueManager::PumpPolicy pump_policy) {
405 switch (pump_policy) {
406 case TaskQueueManager::PumpPolicy::AUTO:
407 return "auto";
408 case TaskQueueManager::PumpPolicy::AFTER_WAKEUP:
409 return "after_wakeup";
410 case TaskQueueManager::PumpPolicy::MANUAL:
411 return "manual";
412 default:
413 NOTREACHED();
414 return nullptr;
418 // static
419 void TaskQueue::QueueAsValueInto(const base::TaskQueue& queue,
420 base::trace_event::TracedValue* state) {
421 base::TaskQueue queue_copy(queue);
422 while (!queue_copy.empty()) {
423 TaskAsValueInto(queue_copy.front(), state);
424 queue_copy.pop();
428 // static
429 void TaskQueue::QueueAsValueInto(const base::DelayedTaskQueue& queue,
430 base::trace_event::TracedValue* state) {
431 base::DelayedTaskQueue queue_copy(queue);
432 while (!queue_copy.empty()) {
433 TaskAsValueInto(queue_copy.top(), state);
434 queue_copy.pop();
438 // static
439 void TaskQueue::TaskAsValueInto(const base::PendingTask& task,
440 base::trace_event::TracedValue* state) {
441 state->BeginDictionary();
442 state->SetString("posted_from", task.posted_from.ToString());
443 state->SetInteger("sequence_num", task.sequence_num);
444 state->SetBoolean("nestable", task.nestable);
445 state->SetBoolean("is_high_res", task.is_high_res);
446 state->SetDouble(
447 "delayed_run_time",
448 (task.delayed_run_time - base::TimeTicks()).InMicroseconds() / 1000.0L);
449 state->EndDictionary();
452 } // namespace internal
454 TaskQueueManager::TaskQueueManager(
455 size_t task_queue_count,
456 scoped_refptr<NestableSingleThreadTaskRunner> main_task_runner,
457 TaskQueueSelector* selector,
458 const char* disabled_by_default_tracing_category)
459 : main_task_runner_(main_task_runner),
460 selector_(selector),
461 pending_dowork_count_(0),
462 work_batch_size_(1),
463 time_source_(nullptr),
464 disabled_by_default_tracing_category_(
465 disabled_by_default_tracing_category),
466 deletion_sentinel_(new DeletionSentinel()),
467 weak_factory_(this) {
468 DCHECK(main_task_runner->RunsTasksOnCurrentThread());
469 TRACE_EVENT_OBJECT_CREATED_WITH_ID(disabled_by_default_tracing_category,
470 "TaskQueueManager", this);
472 for (size_t i = 0; i < task_queue_count; i++) {
473 scoped_refptr<internal::TaskQueue> queue(make_scoped_refptr(
474 new internal::TaskQueue(this, disabled_by_default_tracing_category)));
475 queues_.push_back(queue);
478 std::vector<const base::TaskQueue*> work_queues;
479 for (const auto& queue : queues_)
480 work_queues.push_back(&queue->work_queue());
481 selector_->RegisterWorkQueues(work_queues);
483 do_work_from_main_thread_closure_ =
484 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), true);
485 do_work_from_other_thread_closure_ =
486 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), false);
489 TaskQueueManager::~TaskQueueManager() {
490 TRACE_EVENT_OBJECT_DELETED_WITH_ID(disabled_by_default_tracing_category_,
491 "TaskQueueManager", this);
492 for (auto& queue : queues_)
493 queue->WillDeleteTaskQueueManager();
496 internal::TaskQueue* TaskQueueManager::Queue(size_t queue_index) const {
497 DCHECK_LT(queue_index, queues_.size());
498 return queues_[queue_index].get();
501 scoped_refptr<base::SingleThreadTaskRunner>
502 TaskQueueManager::TaskRunnerForQueue(size_t queue_index) const {
503 return Queue(queue_index);
506 bool TaskQueueManager::IsQueueEmpty(size_t queue_index) const {
507 internal::TaskQueue* queue = Queue(queue_index);
508 return queue->IsQueueEmpty();
511 base::TimeTicks TaskQueueManager::NextPendingDelayedTaskRunTime() {
512 DCHECK(main_thread_checker_.CalledOnValidThread());
513 bool found_pending_task = false;
514 base::TimeTicks next_pending_delayed_task(
515 base::TimeTicks::FromInternalValue(kMaxTimeTicks));
516 for (auto& queue : queues_) {
517 base::TimeTicks queues_next_pending_delayed_task;
518 if (queue->NextPendingDelayedTaskRunTime(
519 &queues_next_pending_delayed_task)) {
520 found_pending_task = true;
521 next_pending_delayed_task =
522 std::min(next_pending_delayed_task, queues_next_pending_delayed_task);
526 if (!found_pending_task)
527 return base::TimeTicks();
529 DCHECK_NE(next_pending_delayed_task,
530 base::TimeTicks::FromInternalValue(kMaxTimeTicks));
531 return next_pending_delayed_task;
534 void TaskQueueManager::SetPumpPolicy(size_t queue_index,
535 PumpPolicy pump_policy) {
536 DCHECK(main_thread_checker_.CalledOnValidThread());
537 internal::TaskQueue* queue = Queue(queue_index);
538 queue->SetPumpPolicy(pump_policy);
541 void TaskQueueManager::PumpQueue(size_t queue_index) {
542 DCHECK(main_thread_checker_.CalledOnValidThread());
543 internal::TaskQueue* queue = Queue(queue_index);
544 queue->PumpQueue();
547 bool TaskQueueManager::UpdateWorkQueues(
548 const base::PendingTask* previous_task) {
549 // TODO(skyostil): This is not efficient when the number of queues grows very
550 // large due to the number of locks taken. Consider optimizing when we get
551 // there.
552 DCHECK(main_thread_checker_.CalledOnValidThread());
553 internal::LazyNow lazy_now(this);
554 bool has_work = false;
555 for (auto& queue : queues_) {
556 has_work |= queue->UpdateWorkQueue(&lazy_now, previous_task);
557 if (!queue->work_queue().empty()) {
558 // Currently we should not be getting tasks with delayed run times in any
559 // of the work queues.
560 DCHECK(queue->work_queue().front().delayed_run_time.is_null());
563 return has_work;
566 void TaskQueueManager::MaybePostDoWorkOnMainRunner() {
567 bool on_main_thread = main_task_runner_->BelongsToCurrentThread();
568 if (on_main_thread) {
569 // We only want one pending DoWork posted from the main thread, or we risk
570 // an explosion of pending DoWorks which could starve out everything else.
571 if (pending_dowork_count_ > 0) {
572 return;
574 pending_dowork_count_++;
575 main_task_runner_->PostTask(FROM_HERE, do_work_from_main_thread_closure_);
576 } else {
577 main_task_runner_->PostTask(FROM_HERE, do_work_from_other_thread_closure_);
581 void TaskQueueManager::DoWork(bool posted_from_main_thread) {
582 if (posted_from_main_thread) {
583 pending_dowork_count_--;
584 DCHECK_GE(pending_dowork_count_, 0);
586 DCHECK(main_thread_checker_.CalledOnValidThread());
588 // Pass nullptr to UpdateWorkQueues here to prevent waking up a
589 // pump-after-wakeup queue.
590 if (!UpdateWorkQueues(nullptr))
591 return;
593 base::PendingTask previous_task((tracked_objects::Location()),
594 (base::Closure()));
595 for (int i = 0; i < work_batch_size_; i++) {
596 size_t queue_index;
597 if (!SelectWorkQueueToService(&queue_index))
598 return;
599 // Note that this function won't post another call to DoWork if one is
600 // already pending, so it is safe to call it in a loop.
601 MaybePostDoWorkOnMainRunner();
603 if (ProcessTaskFromWorkQueue(queue_index, i > 0, &previous_task))
604 return; // The TaskQueueManager got deleted, we must bail out.
606 if (!UpdateWorkQueues(&previous_task))
607 return;
611 bool TaskQueueManager::SelectWorkQueueToService(size_t* out_queue_index) {
612 bool should_run = selector_->SelectWorkQueueToService(out_queue_index);
613 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
614 disabled_by_default_tracing_category_, "TaskQueueManager", this,
615 AsValueWithSelectorResult(should_run, *out_queue_index));
616 return should_run;
619 void TaskQueueManager::DidQueueTask(base::PendingTask* pending_task) {
620 pending_task->sequence_num = task_sequence_num_.GetNext();
621 task_annotator_.DidQueueTask("TaskQueueManager::PostTask", *pending_task);
624 bool TaskQueueManager::ProcessTaskFromWorkQueue(
625 size_t queue_index,
626 bool has_previous_task,
627 base::PendingTask* previous_task) {
628 DCHECK(main_thread_checker_.CalledOnValidThread());
629 scoped_refptr<DeletionSentinel> protect(deletion_sentinel_);
630 internal::TaskQueue* queue = Queue(queue_index);
631 base::PendingTask pending_task = queue->TakeTaskFromWorkQueue();
632 if (!pending_task.nestable && main_task_runner_->IsNested()) {
633 // Defer non-nestable work to the main task runner. NOTE these tasks can be
634 // arbitrarily delayed so the additional delay should not be a problem.
635 main_task_runner_->PostNonNestableTask(pending_task.posted_from,
636 pending_task.task);
637 } else {
638 // Suppress "will" task observer notifications for the first and "did"
639 // notifications for the last task in the batch to avoid duplicate
640 // notifications.
641 if (has_previous_task) {
642 FOR_EACH_OBSERVER(base::MessageLoop::TaskObserver, task_observers_,
643 DidProcessTask(*previous_task));
644 FOR_EACH_OBSERVER(base::MessageLoop::TaskObserver, task_observers_,
645 WillProcessTask(pending_task));
647 task_annotator_.RunTask("TaskQueueManager::PostTask",
648 "TaskQueueManager::RunTask", pending_task);
650 // Detect if the TaskQueueManager just got deleted. If this happens we must
651 // not access any member variables after this point.
652 if (protect->HasOneRef())
653 return true;
655 pending_task.task.Reset();
656 *previous_task = pending_task;
658 return false;
661 bool TaskQueueManager::RunsTasksOnCurrentThread() const {
662 return main_task_runner_->RunsTasksOnCurrentThread();
665 bool TaskQueueManager::PostDelayedTask(
666 const tracked_objects::Location& from_here,
667 const base::Closure& task,
668 base::TimeDelta delay) {
669 DCHECK(delay > base::TimeDelta());
670 return main_task_runner_->PostDelayedTask(from_here, task, delay);
673 void TaskQueueManager::SetQueueName(size_t queue_index, const char* name) {
674 DCHECK(main_thread_checker_.CalledOnValidThread());
675 internal::TaskQueue* queue = Queue(queue_index);
676 queue->set_name(name);
679 void TaskQueueManager::SetWorkBatchSize(int work_batch_size) {
680 DCHECK(main_thread_checker_.CalledOnValidThread());
681 DCHECK_GE(work_batch_size, 1);
682 work_batch_size_ = work_batch_size;
685 void TaskQueueManager::AddTaskObserver(
686 base::MessageLoop::TaskObserver* task_observer) {
687 DCHECK(main_thread_checker_.CalledOnValidThread());
688 base::MessageLoop::current()->AddTaskObserver(task_observer);
689 task_observers_.AddObserver(task_observer);
692 void TaskQueueManager::RemoveTaskObserver(
693 base::MessageLoop::TaskObserver* task_observer) {
694 DCHECK(main_thread_checker_.CalledOnValidThread());
695 base::MessageLoop::current()->RemoveTaskObserver(task_observer);
696 task_observers_.RemoveObserver(task_observer);
699 void TaskQueueManager::SetTimeSourceForTesting(
700 scoped_refptr<cc::TestNowSource> time_source) {
701 DCHECK(main_thread_checker_.CalledOnValidThread());
702 time_source_ = time_source;
705 base::TimeTicks TaskQueueManager::Now() const {
706 return UNLIKELY(time_source_) ? time_source_->Now() : base::TimeTicks::Now();
709 scoped_refptr<base::trace_event::ConvertableToTraceFormat>
710 TaskQueueManager::AsValueWithSelectorResult(bool should_run,
711 size_t selected_queue) const {
712 DCHECK(main_thread_checker_.CalledOnValidThread());
713 scoped_refptr<base::trace_event::TracedValue> state =
714 new base::trace_event::TracedValue();
715 state->BeginArray("queues");
716 for (auto& queue : queues_)
717 queue->AsValueInto(state.get());
718 state->EndArray();
719 state->BeginDictionary("selector");
720 selector_->AsValueInto(state.get());
721 state->EndDictionary();
722 if (should_run)
723 state->SetInteger("selected_queue", selected_queue);
724 return state;
727 } // namespace content