1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "nsTimerImpl.h"
8 #include "TimerThread.h"
10 #include "GeckoProfiler.h"
11 #include "nsThreadUtils.h"
13 #include "nsIObserverService.h"
14 #include "nsIPropertyBag2.h"
15 #include "mozilla/Services.h"
16 #include "mozilla/ChaosMode.h"
17 #include "mozilla/ArenaAllocator.h"
18 #include "mozilla/ArrayUtils.h"
19 #include "mozilla/OperatorNewExtensions.h"
20 #include "mozilla/StaticPrefs_timer.h"
22 #include "mozilla/glean/GleanMetrics.h"
26 using namespace mozilla
;
29 // Include Windows header required for enabling high-precision timers.
31 # include <mmsystem.h>
33 static constexpr UINT kTimerPeriodHiRes
= 1;
34 static constexpr UINT kTimerPeriodLowRes
= 16;
36 // Helper functions to determine what Windows timer resolution to target.
37 static constexpr UINT
GetDesiredTimerPeriod(const bool aOnBatteryPower
,
38 const bool aLowProcessPriority
) {
39 const bool useLowResTimer
= aOnBatteryPower
|| aLowProcessPriority
;
40 return useLowResTimer
? kTimerPeriodLowRes
: kTimerPeriodHiRes
;
43 static_assert(GetDesiredTimerPeriod(true, false) == kTimerPeriodLowRes
);
44 static_assert(GetDesiredTimerPeriod(false, true) == kTimerPeriodLowRes
);
45 static_assert(GetDesiredTimerPeriod(true, true) == kTimerPeriodLowRes
);
46 static_assert(GetDesiredTimerPeriod(false, false) == kTimerPeriodHiRes
);
48 UINT
TimerThread::ComputeDesiredTimerPeriod() const {
49 const bool lowPriorityProcess
=
50 mCachedPriority
.load(std::memory_order_relaxed
) <
51 hal::PROCESS_PRIORITY_FOREGROUND
;
53 // NOTE: Using short-circuiting here to avoid call to GetSystemPowerStatus()
54 // when we know that that result will not affect the final result. (As
55 // confirmed by the static_assert's above, onBatteryPower does not affect the
56 // result when the lowPriorityProcess is true.)
57 SYSTEM_POWER_STATUS status
;
58 const bool onBatteryPower
= !lowPriorityProcess
&&
59 GetSystemPowerStatus(&status
) &&
60 (status
.ACLineStatus
== 0);
62 return GetDesiredTimerPeriod(onBatteryPower
, lowPriorityProcess
);
66 // Uncomment the following line to enable runtime stats during development.
67 // #define TIMERS_RUNTIME_STATS
69 #ifdef TIMERS_RUNTIME_STATS
70 // This class gathers durations and displays some basic stats when destroyed.
71 // It is intended to be used as a static variable (see `AUTO_TIMERS_STATS`
72 // below), to display stats at the end of the program.
73 class StaticTimersStats
{
75 explicit StaticTimersStats(const char* aName
) : mName(aName
) {}
77 ~StaticTimersStats() {
78 // Using unsigned long long for computations and printfs.
79 using ULL
= unsigned long long;
80 ULL n
= static_cast<ULL
>(mCount
);
82 printf("[%d] Timers stats `%s`: (nothing)\n",
83 int(profiler_current_process_id().ToNumber()), mName
);
84 } else if (ULL sumNs
= static_cast<ULL
>(mSumDurationsNs
); sumNs
== 0) {
85 printf("[%d] Timers stats `%s`: %llu\n",
86 int(profiler_current_process_id().ToNumber()), mName
, n
);
88 printf("[%d] Timers stats `%s`: %llu ns / %llu = %llu ns, max %llu ns\n",
89 int(profiler_current_process_id().ToNumber()), mName
, sumNs
, n
,
90 sumNs
/ n
, static_cast<ULL
>(mLongestDurationNs
));
94 void AddDurationFrom(TimeStamp aStart
) {
95 // Duration between aStart and now, rounded to the nearest nanosecond.
96 DurationNs duration
= static_cast<DurationNs
>(
97 (TimeStamp::Now() - aStart
).ToMicroseconds() * 1000 + 0.5);
98 mSumDurationsNs
+= duration
;
100 // Update mLongestDurationNs if this one is longer.
102 DurationNs longest
= mLongestDurationNs
;
103 if (MOZ_LIKELY(longest
>= duration
)) {
104 // This duration is not the longest, nothing to do.
107 if (MOZ_LIKELY(mLongestDurationNs
.compareExchange(longest
, duration
))) {
108 // Successfully updated `mLongestDurationNs` with the new value.
111 // Otherwise someone else just updated `mLongestDurationNs`, we need to
112 // try again by looping.
117 MOZ_ASSERT(mSumDurationsNs
== 0, "Don't mix counts and durations");
122 using DurationNs
= uint64_t;
123 using Count
= uint32_t;
125 Atomic
<DurationNs
> mSumDurationsNs
{0};
126 Atomic
<DurationNs
> mLongestDurationNs
{0};
127 Atomic
<Count
> mCount
{0};
131 // RAII object that measures its scoped lifetime duration and reports it to a
132 // `StaticTimersStats`.
133 class MOZ_RAII AutoTimersStats
{
135 explicit AutoTimersStats(StaticTimersStats
& aStats
)
136 : mStats(aStats
), mStart(TimeStamp::Now()) {}
138 ~AutoTimersStats() { mStats
.AddDurationFrom(mStart
); }
141 StaticTimersStats
& mStats
;
145 // Macro that should be used to collect basic statistics from measurements of
146 // block durations, from where this macro is, until the end of its enclosing
147 // scope. The name is used in the static variable name and when displaying stats
148 // at the end of the program; Another location could use the same name but their
149 // stats will not be combined, so use different name if these locations should
151 # define AUTO_TIMERS_STATS(name) \
152 static ::StaticTimersStats sStat##name(#name); \
153 ::AutoTimersStats autoStat##name(sStat##name);
155 // This macro only counts the number of times it's used, not durations.
156 // Don't mix with AUTO_TIMERS_STATS!
157 # define COUNT_TIMERS_STATS(name) \
158 static ::StaticTimersStats sStat##name(#name); \
159 sStat##name.AddCount();
161 #else // TIMERS_RUNTIME_STATS
163 # define AUTO_TIMERS_STATS(name)
164 # define COUNT_TIMERS_STATS(name)
166 #endif // TIMERS_RUNTIME_STATS else
168 NS_IMPL_ISUPPORTS_INHERITED(TimerThread
, Runnable
, nsIObserver
)
170 TimerThread::TimerThread()
171 : Runnable("TimerThread"),
173 mMonitor("TimerThread.mMonitor"),
178 mAllowedEarlyFiringMicroseconds(0) {}
180 TimerThread::~TimerThread() {
183 NS_ASSERTION(mTimers
.IsEmpty(), "Timers remain in TimerThread::~TimerThread");
185 #if TIMER_THREAD_STATISTICS
187 MonitorAutoLock
lock(mMonitor
);
195 class TimerObserverRunnable
: public Runnable
{
197 explicit TimerObserverRunnable(nsIObserver
* aObserver
)
198 : mozilla::Runnable("TimerObserverRunnable"), mObserver(aObserver
) {}
203 nsCOMPtr
<nsIObserver
> mObserver
;
207 TimerObserverRunnable::Run() {
208 nsCOMPtr
<nsIObserverService
> observerService
=
209 mozilla::services::GetObserverService();
210 if (observerService
) {
211 observerService
->AddObserver(mObserver
, "sleep_notification", false);
212 observerService
->AddObserver(mObserver
, "wake_notification", false);
213 observerService
->AddObserver(mObserver
, "suspend_process_notification",
215 observerService
->AddObserver(mObserver
, "resume_process_notification",
217 observerService
->AddObserver(mObserver
, "ipc:process-priority-changed",
227 // TimerEventAllocator is a thread-safe allocator used only for nsTimerEvents.
228 // It's needed to avoid contention over the default allocator lock when
229 // firing timer events (see bug 733277). The thread-safety is required because
230 // nsTimerEvent objects are allocated on the timer thread, and freed on another
231 // thread. Because TimerEventAllocator has its own lock, contention over that
232 // lock is limited to the allocation and deallocation of nsTimerEvent objects.
234 // Because this is layered over ArenaAllocator, it never shrinks -- even
235 // "freed" nsTimerEvents aren't truly freed, they're just put onto a free-list
236 // for later recycling. So the amount of memory consumed will always be equal
237 // to the high-water mark consumption. But nsTimerEvents are small and it's
238 // unusual to have more than a few hundred of them, so this shouldn't be a
239 // problem in practice.
241 class TimerEventAllocator
{
247 ArenaAllocator
<4096> mPool
MOZ_GUARDED_BY(mMonitor
);
248 FreeEntry
* mFirstFree
MOZ_GUARDED_BY(mMonitor
);
249 mozilla::Monitor mMonitor
;
252 TimerEventAllocator()
253 : mFirstFree(nullptr), mMonitor("TimerEventAllocator") {}
255 ~TimerEventAllocator() = default;
257 void* Alloc(size_t aSize
);
258 void Free(void* aPtr
);
263 // This is a nsICancelableRunnable because we can dispatch it to Workers and
264 // those can be shut down at any time, and in these cases, Cancel() is called
266 class nsTimerEvent final
: public CancelableRunnable
{
268 NS_IMETHOD
Run() override
;
270 nsresult
Cancel() override
{
275 #ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
276 NS_IMETHOD
GetName(nsACString
& aName
) override
;
279 explicit nsTimerEvent(already_AddRefed
<nsTimerImpl
> aTimer
,
280 ProfilerThreadId aTimerThreadId
)
281 : mozilla::CancelableRunnable("nsTimerEvent"),
283 mGeneration(mTimer
->GetGeneration()),
284 mTimerThreadId(aTimerThreadId
) {
285 // Note: We override operator new for this class, and the override is
289 if (MOZ_LOG_TEST(GetTimerLog(), LogLevel::Debug
) ||
290 profiler_thread_is_being_profiled_for_markers(mTimerThreadId
)) {
291 mInitTime
= TimeStamp::Now();
296 static void Shutdown();
297 static void DeleteAllocatorIfNeeded();
299 static void* operator new(size_t aSize
) noexcept(true) {
300 return sAllocator
->Alloc(aSize
);
302 void operator delete(void* aPtr
) {
303 sAllocator
->Free(aPtr
);
305 DeleteAllocatorIfNeeded();
308 already_AddRefed
<nsTimerImpl
> ForgetTimer() { return mTimer
.forget(); }
311 nsTimerEvent(const nsTimerEvent
&) = delete;
312 nsTimerEvent
& operator=(const nsTimerEvent
&) = delete;
313 nsTimerEvent
& operator=(const nsTimerEvent
&&) = delete;
316 MOZ_ASSERT(!sCanDeleteAllocator
|| sAllocatorUsers
> 0,
317 "This will result in us attempting to deallocate the "
318 "nsTimerEvent allocator twice");
322 RefPtr
<nsTimerImpl
> mTimer
;
323 const int32_t mGeneration
;
324 ProfilerThreadId mTimerThreadId
;
326 static TimerEventAllocator
* sAllocator
;
328 static Atomic
<int32_t, SequentiallyConsistent
> sAllocatorUsers
;
329 static Atomic
<bool, SequentiallyConsistent
> sCanDeleteAllocator
;
332 TimerEventAllocator
* nsTimerEvent::sAllocator
= nullptr;
333 Atomic
<int32_t, SequentiallyConsistent
> nsTimerEvent::sAllocatorUsers
;
334 Atomic
<bool, SequentiallyConsistent
> nsTimerEvent::sCanDeleteAllocator
;
338 void* TimerEventAllocator::Alloc(size_t aSize
) {
339 MOZ_ASSERT(aSize
== sizeof(nsTimerEvent
));
341 mozilla::MonitorAutoLock
lock(mMonitor
);
346 mFirstFree
= mFirstFree
->mNext
;
348 p
= mPool
.Allocate(aSize
, fallible
);
354 void TimerEventAllocator::Free(void* aPtr
) {
355 mozilla::MonitorAutoLock
lock(mMonitor
);
357 FreeEntry
* entry
= reinterpret_cast<FreeEntry
*>(aPtr
);
359 entry
->mNext
= mFirstFree
;
366 static constexpr Span
<const char> MarkerTypeName() {
367 return MakeStringSpan("Timer");
369 static void StreamJSONMarkerData(baseprofiler::SpliceableJSONWriter
& aWriter
,
370 uint32_t aDelay
, uint8_t aType
,
371 MarkerThreadId aThreadId
, bool aCanceled
) {
372 aWriter
.IntProperty("delay", aDelay
);
373 if (!aThreadId
.IsUnspecified()) {
374 // Tech note: If `ToNumber()` returns a uint64_t, the conversion to
375 // int64_t is "implementation-defined" before C++20. This is
376 // acceptable here, because this is a one-way conversion to a unique
377 // identifier that's used to visually separate data by thread on the
380 "threadId", static_cast<int64_t>(aThreadId
.ThreadId().ToNumber()));
383 aWriter
.BoolProperty("canceled", true);
384 // Show a red 'X' as a prefix on the marker chart for canceled timers.
385 aWriter
.StringProperty("prefix", "❌");
388 // The string property for the timer type is not written when the type is
389 // one shot, as that's the type used almost all the time, and that would
390 // consume space in the profiler buffer and then in the profile JSON,
391 // getting in the way of capturing long power profiles.
392 // Bug 1815677 might make this cheap to capture.
393 if (aType
!= nsITimer::TYPE_ONE_SHOT
) {
394 if (aType
== nsITimer::TYPE_REPEATING_SLACK
) {
395 aWriter
.StringProperty("ttype", "repeating slack");
396 } else if (aType
== nsITimer::TYPE_REPEATING_PRECISE
) {
397 aWriter
.StringProperty("ttype", "repeating precise");
398 } else if (aType
== nsITimer::TYPE_REPEATING_PRECISE_CAN_SKIP
) {
399 aWriter
.StringProperty("ttype", "repeating precise can skip");
400 } else if (aType
== nsITimer::TYPE_REPEATING_SLACK_LOW_PRIORITY
) {
401 aWriter
.StringProperty("ttype", "repeating slack low priority");
402 } else if (aType
== nsITimer::TYPE_ONE_SHOT_LOW_PRIORITY
) {
403 aWriter
.StringProperty("ttype", "low priority");
407 static MarkerSchema
MarkerTypeDisplay() {
408 using MS
= MarkerSchema
;
409 MS schema
{MS::Location::MarkerChart
, MS::Location::MarkerTable
};
410 schema
.AddKeyLabelFormat("delay", "Delay", MS::Format::Milliseconds
);
411 schema
.AddKeyLabelFormat("ttype", "Timer Type", MS::Format::String
);
412 schema
.AddKeyLabelFormat("canceled", "Canceled", MS::Format::String
);
413 schema
.SetChartLabel("{marker.data.prefix} {marker.data.delay}");
414 schema
.SetTableLabel(
415 "{marker.name} - {marker.data.prefix} {marker.data.delay}");
420 struct AddRemoveTimerMarker
{
421 static constexpr Span
<const char> MarkerTypeName() {
422 return MakeStringSpan("AddRemoveTimer");
424 static void StreamJSONMarkerData(baseprofiler::SpliceableJSONWriter
& aWriter
,
425 const ProfilerString8View
& aTimerName
,
426 uint32_t aDelay
, MarkerThreadId aThreadId
) {
427 aWriter
.StringProperty("name", aTimerName
);
428 aWriter
.IntProperty("delay", aDelay
);
429 if (!aThreadId
.IsUnspecified()) {
430 // Tech note: If `ToNumber()` returns a uint64_t, the conversion to
431 // int64_t is "implementation-defined" before C++20. This is
432 // acceptable here, because this is a one-way conversion to a unique
433 // identifier that's used to visually separate data by thread on the
436 "threadId", static_cast<int64_t>(aThreadId
.ThreadId().ToNumber()));
439 static MarkerSchema
MarkerTypeDisplay() {
440 using MS
= MarkerSchema
;
441 MS schema
{MS::Location::MarkerChart
, MS::Location::MarkerTable
};
442 schema
.AddKeyLabelFormatSearchable("name", "Name", MS::Format::String
,
443 MS::Searchable::Searchable
);
444 schema
.AddKeyLabelFormat("delay", "Delay", MS::Format::Milliseconds
);
445 schema
.SetTableLabel(
446 "{marker.name} - {marker.data.name} - {marker.data.delay}");
451 void nsTimerEvent::Init() { sAllocator
= new TimerEventAllocator(); }
453 void nsTimerEvent::Shutdown() {
454 sCanDeleteAllocator
= true;
455 DeleteAllocatorIfNeeded();
458 void nsTimerEvent::DeleteAllocatorIfNeeded() {
459 if (sCanDeleteAllocator
&& sAllocatorUsers
== 0) {
461 sAllocator
= nullptr;
465 #ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
467 nsTimerEvent::GetName(nsACString
& aName
) {
470 NS_SUCCEEDED(mTimer
->mEventTarget
->IsOnCurrentThread(¤t
)) &&
473 mTimer
->GetName(aName
);
479 nsTimerEvent::Run() {
480 if (MOZ_LOG_TEST(GetTimerLog(), LogLevel::Debug
)) {
481 TimeStamp now
= TimeStamp::Now();
482 MOZ_LOG(GetTimerLog(), LogLevel::Debug
,
483 ("[this=%p] time between PostTimerEvent() and Fire(): %fms\n", this,
484 (now
- mInitTime
).ToMilliseconds()));
487 if (profiler_thread_is_being_profiled_for_markers(mTimerThreadId
)) {
488 MutexAutoLock
lock(mTimer
->mMutex
);
490 mTimer
->GetName(name
, lock
);
491 // This adds a marker with the timer name as the marker name, to make it
492 // obvious which timers are being used. This marker will be useful to
493 // understand which timers might be added and firing excessively often.
495 name
, geckoprofiler::category::TIMER
,
496 MarkerOptions(MOZ_LIKELY(mInitTime
)
497 ? MarkerTiming::Interval(
498 mTimer
->mTimeout
- mTimer
->mDelay
, mInitTime
)
499 : MarkerTiming::IntervalUntilNowFrom(
500 mTimer
->mTimeout
- mTimer
->mDelay
),
501 MarkerThreadId(mTimerThreadId
)),
502 TimerMarker
{}, mTimer
->mDelay
.ToMilliseconds(), mTimer
->mType
,
503 MarkerThreadId::CurrentThread(), false);
504 // This marker is meant to help understand the behavior of the timer thread.
506 "PostTimerEvent", geckoprofiler::category::OTHER
,
507 MarkerOptions(MOZ_LIKELY(mInitTime
)
508 ? MarkerTiming::IntervalUntilNowFrom(mInitTime
)
509 : MarkerTiming::InstantNow(),
510 MarkerThreadId(mTimerThreadId
)),
511 AddRemoveTimerMarker
{}, name
, mTimer
->mDelay
.ToMilliseconds(),
512 MarkerThreadId::CurrentThread());
515 mTimer
->Fire(mGeneration
);
520 nsresult
TimerThread::Init() {
521 mMonitor
.AssertCurrentThreadOwns();
522 MOZ_LOG(GetTimerLog(), LogLevel::Debug
,
523 ("TimerThread::Init [%d]\n", mInitialized
));
526 nsTimerEvent::Init();
528 // We hold on to mThread to keep the thread alive.
530 NS_NewNamedThread("Timer", getter_AddRefs(mThread
), this,
531 {.stackSize
= nsIThreadManager::DEFAULT_STACK_SIZE
,
532 .blockDispatch
= true});
536 RefPtr
<TimerObserverRunnable
> r
= new TimerObserverRunnable(this);
537 if (NS_IsMainThread()) {
540 NS_DispatchToMainThread(r
);
548 return NS_ERROR_FAILURE
;
554 nsresult
TimerThread::Shutdown() {
555 MOZ_LOG(GetTimerLog(), LogLevel::Debug
, ("TimerThread::Shutdown begin\n"));
558 return NS_ERROR_NOT_INITIALIZED
;
561 nsTArray
<RefPtr
<nsTimerImpl
>> timers
;
564 MonitorAutoLock
lock(mMonitor
);
568 // notify the cond var so that Run() can return
574 // Need to copy content of mTimers array to a local array
575 // because call to timers' Cancel() (and release its self)
576 // must not be done under the lock. Destructor of a callback
577 // might potentially call some code reentering the same lock
578 // that leads to unexpected behavior or deadlock.
580 timers
.SetCapacity(mTimers
.Length());
581 for (Entry
& entry
: mTimers
) {
583 timers
.AppendElement(entry
.Take());
590 for (const RefPtr
<nsTimerImpl
>& timer
: timers
) {
595 mThread
->Shutdown(); // wait for the thread to die
597 nsTimerEvent::Shutdown();
599 MOZ_LOG(GetTimerLog(), LogLevel::Debug
, ("TimerThread::Shutdown end\n"));
605 struct MicrosecondsToInterval
{
606 PRIntervalTime
operator[](size_t aMs
) const {
607 return PR_MicrosecondsToInterval(aMs
);
611 struct IntervalComparator
{
612 int operator()(PRIntervalTime aInterval
) const {
613 return (0 < aInterval
) ? -1 : 1;
620 void TimerThread::VerifyTimerListConsistency() const {
621 mMonitor
.AssertCurrentThreadOwns();
623 // Find the first non-canceled timer (and check its cached timeout if we find
625 const size_t timerCount
= mTimers
.Length();
626 size_t lastNonCanceledTimerIndex
= 0;
627 while (lastNonCanceledTimerIndex
< timerCount
&&
628 !mTimers
[lastNonCanceledTimerIndex
].Value()) {
629 ++lastNonCanceledTimerIndex
;
631 MOZ_ASSERT(lastNonCanceledTimerIndex
== timerCount
||
632 mTimers
[lastNonCanceledTimerIndex
].Value());
633 MOZ_ASSERT(lastNonCanceledTimerIndex
== timerCount
||
634 mTimers
[lastNonCanceledTimerIndex
].Value()->mTimeout
==
635 mTimers
[lastNonCanceledTimerIndex
].Timeout());
637 // Verify that mTimers is sorted and the cached timeouts are consistent.
638 for (size_t timerIndex
= lastNonCanceledTimerIndex
+ 1;
639 timerIndex
< timerCount
; ++timerIndex
) {
640 if (mTimers
[timerIndex
].Value()) {
641 MOZ_ASSERT(mTimers
[timerIndex
].Timeout() ==
642 mTimers
[timerIndex
].Value()->mTimeout
);
643 MOZ_ASSERT(mTimers
[timerIndex
].Timeout() >=
644 mTimers
[lastNonCanceledTimerIndex
].Timeout());
645 lastNonCanceledTimerIndex
= timerIndex
;
651 size_t TimerThread::ComputeTimerInsertionIndex(const TimeStamp
& timeout
) const {
652 mMonitor
.AssertCurrentThreadOwns();
654 const size_t timerCount
= mTimers
.Length();
656 size_t firstGtIndex
= 0;
657 while (firstGtIndex
< timerCount
&&
658 (!mTimers
[firstGtIndex
].Value() ||
659 mTimers
[firstGtIndex
].Timeout() <= timeout
)) {
666 TimeStamp
TimerThread::ComputeWakeupTimeFromTimers() const {
667 mMonitor
.AssertCurrentThreadOwns();
669 // Timer list should be non-empty and first timer should always be
670 // non-canceled at this point and we rely on that here.
671 MOZ_ASSERT(!mTimers
.IsEmpty());
672 MOZ_ASSERT(mTimers
[0].Value());
674 // Overview: Find the last timer in the list that can be "bundled" together in
675 // the same wake-up with mTimers[0] and use its timeout as our target wake-up
678 // bundleWakeup is when we should wake up in order to be able to fire all of
679 // the timers in our selected bundle. It will always be the timeout of the
680 // last timer in the bundle.
681 TimeStamp bundleWakeup
= mTimers
[0].Timeout();
683 // cutoffTime is the latest that we can wake up for the timers currently
684 // accepted into the bundle. These needs to be updated as we go through the
685 // list because later timers may have more strict delay tolerances.
686 const TimeDuration minTimerDelay
= TimeDuration::FromMilliseconds(
687 StaticPrefs::timer_minimum_firing_delay_tolerance_ms());
688 const TimeDuration maxTimerDelay
= TimeDuration::FromMilliseconds(
689 StaticPrefs::timer_maximum_firing_delay_tolerance_ms());
690 TimeStamp cutoffTime
=
691 bundleWakeup
+ ComputeAcceptableFiringDelay(mTimers
[0].Delay(),
692 minTimerDelay
, maxTimerDelay
);
694 const size_t timerCount
= mTimers
.Length();
695 for (size_t entryIndex
= 1; entryIndex
< timerCount
; ++entryIndex
) {
696 const Entry
& curEntry
= mTimers
[entryIndex
];
697 const nsTimerImpl
* curTimer
= curEntry
.Value();
699 // Canceled timer - skip it
703 const TimeStamp curTimerDue
= curEntry
.Timeout();
704 if (curTimerDue
> cutoffTime
) {
705 // Can't include this timer in the bundle - it fires too late.
709 // This timer can be included in the bundle. Update bundleWakeup and
711 bundleWakeup
= curTimerDue
;
712 cutoffTime
= std::min(
713 curTimerDue
+ ComputeAcceptableFiringDelay(
714 curEntry
.Delay(), minTimerDelay
, maxTimerDelay
),
716 MOZ_ASSERT(bundleWakeup
<= cutoffTime
);
720 // Due to the fact that, on Windows, each TimeStamp object holds two distinct
721 // "values", this assert is not valid there. See bug 1829983 for the details.
722 MOZ_ASSERT(bundleWakeup
- mTimers
[0].Timeout() <=
723 ComputeAcceptableFiringDelay(mTimers
[0].Delay(), minTimerDelay
,
730 TimeDuration
TimerThread::ComputeAcceptableFiringDelay(
731 TimeDuration timerDuration
, TimeDuration minDelay
,
732 TimeDuration maxDelay
) const {
733 // Use the timer's duration divided by this value as a base for how much
734 // firing delay a timer can accept. 8 was chosen specifically because it is a
735 // power of two which means that this division turns nicely into a shift.
736 constexpr int64_t timerDurationDivider
= 8;
737 static_assert(IsPowerOfTwo(static_cast<uint64_t>(timerDurationDivider
)));
738 const TimeDuration tmp
= timerDuration
/ timerDurationDivider
;
739 return std::clamp(tmp
, minDelay
, maxDelay
);
744 MonitorAutoLock
lock(mMonitor
);
746 mProfilerThreadId
= profiler_current_thread_id();
748 // TODO: Make mAllowedEarlyFiringMicroseconds const and initialize it in the
750 mAllowedEarlyFiringMicroseconds
= 250;
751 const TimeDuration allowedEarlyFiring
=
752 TimeDuration::FromMicroseconds(mAllowedEarlyFiringMicroseconds
);
754 bool forceRunNextTimer
= false;
756 // Queue for tracking of how many timers are fired on each wake-up. We need to
757 // buffer these locally and only send off to glean occasionally to avoid
759 static constexpr size_t kMaxQueuedTimerFired
= 128;
760 size_t queuedTimerFiredCount
= 0;
761 AutoTArray
<uint64_t, kMaxQueuedTimerFired
> queuedTimersFiredPerWakeup
;
762 queuedTimersFiredPerWakeup
.SetLengthAndRetainStorage(kMaxQueuedTimerFired
);
765 // kTimerPeriodEvalIntervalSec is the minimum amount of time that must pass
766 // before we will consider changing the timer period again.
767 static constexpr float kTimerPeriodEvalIntervalSec
= 2.0f
;
768 const TimeDuration timerPeriodEvalInterval
=
769 TimeDuration::FromSeconds(kTimerPeriodEvalIntervalSec
);
770 TimeStamp nextTimerPeriodEval
= TimeStamp::Now() + timerPeriodEvalInterval
;
772 // If this is false, we will perform all of the logic but will stop short of
773 // actually changing the timer period.
774 const bool adjustTimerPeriod
=
775 StaticPrefs::timer_auto_increase_timer_resolution();
776 UINT lastTimePeriodSet
= ComputeDesiredTimerPeriod();
778 if (adjustTimerPeriod
) {
779 timeBeginPeriod(lastTimePeriodSet
);
783 uint64_t timersFiredThisWakeup
= 0;
785 // Have to use PRIntervalTime here, since PR_WaitCondVar takes it
786 TimeDuration waitFor
;
787 bool forceRunThisTimer
= forceRunNextTimer
;
788 forceRunNextTimer
= false;
791 VerifyTimerListConsistency();
795 // Sleep for 0.1 seconds while not firing timers.
796 uint32_t milliseconds
= 100;
797 if (ChaosMode::isActive(ChaosFeature::TimerScheduling
)) {
798 milliseconds
= ChaosMode::randomUint32LessThan(200);
800 waitFor
= TimeDuration::FromMilliseconds(milliseconds
);
802 waitFor
= TimeDuration::Forever();
803 TimeStamp now
= TimeStamp::Now();
806 if (now
>= nextTimerPeriodEval
) {
807 const UINT newTimePeriod
= ComputeDesiredTimerPeriod();
808 if (newTimePeriod
!= lastTimePeriodSet
) {
809 if (adjustTimerPeriod
) {
810 timeEndPeriod(lastTimePeriodSet
);
811 timeBeginPeriod(newTimePeriod
);
813 lastTimePeriodSet
= newTimePeriod
;
815 nextTimerPeriodEval
= now
+ timerPeriodEvalInterval
;
819 #if TIMER_THREAD_STATISTICS
820 if (!mNotified
&& !mIntendedWakeupTime
.IsNull() &&
821 now
< mIntendedWakeupTime
) {
823 const double earlinessms
= (mIntendedWakeupTime
- now
).ToMilliseconds();
824 mTotalEarlyWakeupTime
+= earlinessms
;
828 RemoveLeadingCanceledTimersInternal();
830 if (!mTimers
.IsEmpty()) {
831 if (now
+ allowedEarlyFiring
>= mTimers
[0].Value()->mTimeout
||
834 // NB: AddRef before the Release under RemoveTimerInternal to avoid
835 // mRefCnt passing through zero, in case all other refs than the one
836 // from mTimers have gone away (the last non-mTimers[i]-ref's Release
837 // must be racing with us, blocked in gThread->RemoveTimer waiting
838 // for TimerThread::mMonitor, under nsTimerImpl::Release.
840 RefPtr
<nsTimerImpl
> timerRef(mTimers
[0].Take());
841 RemoveFirstTimerInternal();
842 MOZ_LOG(GetTimerLog(), LogLevel::Debug
,
843 ("Timer thread woke up %fms from when it was supposed to\n",
844 fabs((now
- timerRef
->mTimeout
).ToMilliseconds())));
846 // We are going to let the call to PostTimerEvent here handle the
847 // release of the timer so that we don't end up releasing the timer
848 // on the TimerThread instead of on the thread it targets.
850 ++timersFiredThisWakeup
;
851 LogTimerEvent::Run
run(timerRef
.get());
852 PostTimerEvent(timerRef
.forget());
859 // Update now, as PostTimerEvent plus the locking may have taken a
860 // tick or two, and we may goto next below.
861 now
= TimeStamp::Now();
865 RemoveLeadingCanceledTimersInternal();
867 if (!mTimers
.IsEmpty()) {
868 TimeStamp timeout
= mTimers
[0].Value()->mTimeout
;
870 // Don't wait at all (even for PR_INTERVAL_NO_WAIT) if the next timer
871 // is due now or overdue.
873 // Note that we can only sleep for integer values of a certain
874 // resolution. We use mAllowedEarlyFiringMicroseconds, calculated
875 // before, to do the optimal rounding (i.e., of how to decide what
876 // interval is so small we should not wait at all).
877 double microseconds
= (timeout
- now
).ToMicroseconds();
879 // The mean value of sFractions must be 1 to ensure that the average of
880 // a long sequence of timeouts converges to the actual sum of their
882 static constexpr double sChaosFractions
[] = {0.0, 0.25, 0.5, 0.75,
884 if (ChaosMode::isActive(ChaosFeature::TimerScheduling
)) {
885 microseconds
*= sChaosFractions
[ChaosMode::randomUint32LessThan(
886 std::size(sChaosFractions
))];
887 forceRunNextTimer
= true;
890 if (microseconds
< mAllowedEarlyFiringMicroseconds
) {
891 forceRunNextTimer
= false;
892 goto next
; // round down; execute event now
895 // TECHNICAL NOTE: Determining waitFor (by subtracting |now| from our
896 // desired wake-up time) at this point is not ideal. For one thing, the
897 // |now| that we have at this point is somewhat old. Secondly, there is
898 // quite a bit of code between here and where we actually use waitFor to
899 // request sleep. If I am thinking about this correctly, both of these
900 // will contribute to us requesting more sleep than is actually needed
901 // to wake up at our desired time. We could avoid this problem by only
902 // determining our desired wake-up time here and then calculating the
903 // wait time when we're actually about to sleep.
904 const TimeStamp wakeupTime
= ComputeWakeupTimeFromTimers();
905 waitFor
= wakeupTime
- now
;
907 // If this were to fail that would mean that we had more timers that we
908 // should have fired.
909 MOZ_ASSERT(!waitFor
.IsZero());
911 if (ChaosMode::isActive(ChaosFeature::TimerScheduling
)) {
912 // If chaos mode is active then mess with the amount of time that we
913 // request to sleep (without changing what we record as our expected
914 // wake-up time). This will simulate unintended early/late wake-ups.
915 const double waitInMs
= waitFor
.ToMilliseconds();
916 const double chaosWaitInMs
=
917 waitInMs
* sChaosFractions
[ChaosMode::randomUint32LessThan(
918 std::size(sChaosFractions
))];
919 waitFor
= TimeDuration::FromMilliseconds(chaosWaitInMs
);
922 mIntendedWakeupTime
= wakeupTime
;
924 mIntendedWakeupTime
= TimeStamp
{};
927 if (MOZ_LOG_TEST(GetTimerLog(), LogLevel::Debug
)) {
928 if (waitFor
== TimeDuration::Forever())
929 MOZ_LOG(GetTimerLog(), LogLevel::Debug
, ("waiting forever\n"));
931 MOZ_LOG(GetTimerLog(), LogLevel::Debug
,
932 ("waiting for %f\n", waitFor
.ToMilliseconds()));
937 // About to sleep - let's make note of how many timers we processed and
938 // see if we should send out a new batch of telemetry.
939 queuedTimersFiredPerWakeup
[queuedTimerFiredCount
] = timersFiredThisWakeup
;
940 ++queuedTimerFiredCount
;
941 if (queuedTimerFiredCount
== kMaxQueuedTimerFired
) {
942 glean::timer_thread::timers_fired_per_wakeup
.AccumulateSamples(
943 queuedTimersFiredPerWakeup
);
944 queuedTimerFiredCount
= 0;
948 #if TIMER_THREAD_STATISTICS
950 size_t bucketIndex
= 0;
951 while (bucketIndex
< sTimersFiredPerWakeupBucketCount
- 1 &&
952 timersFiredThisWakeup
>
953 sTimersFiredPerWakeupThresholds
[bucketIndex
]) {
956 MOZ_ASSERT(bucketIndex
< sTimersFiredPerWakeupBucketCount
);
957 ++mTimersFiredPerWakeup
[bucketIndex
];
961 ++mTimersFiredPerNotifiedWakeup
[bucketIndex
];
962 ++mTotalNotifiedWakeupCount
;
964 ++mTimersFiredPerUnnotifiedWakeup
[bucketIndex
];
965 ++mTotalUnnotifiedWakeupCount
;
970 timersFiredThisWakeup
= 0;
976 AUTO_PROFILER_TRACING_MARKER("TimerThread", "Wait", OTHER
);
977 mMonitor
.Wait(waitFor
);
980 forceRunNextTimer
= false;
985 // About to shut down - let's send out the final batch of timers fired counts.
986 if (queuedTimerFiredCount
!= 0) {
987 queuedTimersFiredPerWakeup
.SetLengthAndRetainStorage(queuedTimerFiredCount
);
988 glean::timer_thread::timers_fired_per_wakeup
.AccumulateSamples(
989 queuedTimersFiredPerWakeup
);
993 // About to shut down - let's finish off the last time period that we set.
994 if (adjustTimerPeriod
) {
995 timeEndPeriod(lastTimePeriodSet
);
1002 nsresult
TimerThread::AddTimer(nsTimerImpl
* aTimer
,
1003 const MutexAutoLock
& aProofOfLock
) {
1004 MonitorAutoLock
lock(mMonitor
);
1005 AUTO_TIMERS_STATS(TimerThread_AddTimer
);
1007 if (!aTimer
->mEventTarget
) {
1008 return NS_ERROR_NOT_INITIALIZED
;
1011 nsresult rv
= Init();
1012 if (NS_FAILED(rv
)) {
1016 // Awaken the timer thread if:
1017 // - This timer needs to fire *before* the Timer Thread is scheduled to wake
1020 // - The delay is 0, which is usually meant to be run as soon as possible.
1021 // Note: Even if the thread is scheduled to wake up now/soon, on some
1022 // systems there could be a significant delay compared to notifying, which
1023 // is almost immediate; and some users of 0-delay depend on it being this
1025 const TimeDuration minTimerDelay
= TimeDuration::FromMilliseconds(
1026 StaticPrefs::timer_minimum_firing_delay_tolerance_ms());
1027 const TimeDuration maxTimerDelay
= TimeDuration::FromMilliseconds(
1028 StaticPrefs::timer_maximum_firing_delay_tolerance_ms());
1029 const TimeDuration firingDelay
= ComputeAcceptableFiringDelay(
1030 aTimer
->mDelay
, minTimerDelay
, maxTimerDelay
);
1031 const bool firingBeforeNextWakeup
=
1032 mIntendedWakeupTime
.IsNull() ||
1033 (aTimer
->mTimeout
+ firingDelay
< mIntendedWakeupTime
);
1034 const bool wakeUpTimerThread
=
1035 mWaiting
&& (firingBeforeNextWakeup
|| aTimer
->mDelay
.IsZero());
1037 #if TIMER_THREAD_STATISTICS
1038 if (mTotalTimersAdded
== 0) {
1039 mFirstTimerAdded
= TimeStamp::Now();
1041 ++mTotalTimersAdded
;
1044 // Add the timer to our list.
1045 if (!AddTimerInternal(*aTimer
)) {
1046 return NS_ERROR_OUT_OF_MEMORY
;
1049 if (wakeUpTimerThread
) {
1054 if (profiler_thread_is_being_profiled_for_markers(mProfilerThreadId
)) {
1056 aTimer
->GetName(name
, aProofOfLock
);
1058 nsLiteralCString
prefix("Anonymous_");
1059 profiler_add_marker(
1060 "AddTimer", geckoprofiler::category::OTHER
,
1061 MarkerOptions(MarkerThreadId(mProfilerThreadId
),
1062 MarkerStack::MaybeCapture(
1063 name
.Equals("nonfunction:JS") ||
1064 StringHead(name
, prefix
.Length()) == prefix
)),
1065 AddRemoveTimerMarker
{}, name
, aTimer
->mDelay
.ToMilliseconds(),
1066 MarkerThreadId::CurrentThread());
1072 nsresult
TimerThread::RemoveTimer(nsTimerImpl
* aTimer
,
1073 const MutexAutoLock
& aProofOfLock
) {
1074 MonitorAutoLock
lock(mMonitor
);
1075 AUTO_TIMERS_STATS(TimerThread_RemoveTimer
);
1077 // Remove the timer from our array. Tell callers that aTimer was not found
1078 // by returning NS_ERROR_NOT_AVAILABLE.
1080 if (!RemoveTimerInternal(*aTimer
)) {
1081 return NS_ERROR_NOT_AVAILABLE
;
1084 #if TIMER_THREAD_STATISTICS
1085 ++mTotalTimersRemoved
;
1088 // Note: The timer thread is *not* awoken.
1089 // The removed-timer entry is just left null, and will be reused (by a new or
1090 // re-set timer) or discarded (when the timer thread logic handles non-null
1091 // timers around it).
1092 // If this was the front timer, and in the unlikely case that its entry is not
1093 // soon reused by a re-set timer, the timer thread will wake up at the
1094 // previously-scheduled time, but will quickly notice that there is no actual
1095 // pending timer, and will restart its wait until the following real timeout.
1097 if (profiler_thread_is_being_profiled_for_markers(mProfilerThreadId
)) {
1099 aTimer
->GetName(name
, aProofOfLock
);
1101 nsLiteralCString
prefix("Anonymous_");
1102 // This marker is meant to help understand the behavior of the timer thread.
1103 profiler_add_marker(
1104 "RemoveTimer", geckoprofiler::category::OTHER
,
1105 MarkerOptions(MarkerThreadId(mProfilerThreadId
),
1106 MarkerStack::MaybeCapture(
1107 name
.Equals("nonfunction:JS") ||
1108 StringHead(name
, prefix
.Length()) == prefix
)),
1109 AddRemoveTimerMarker
{}, name
, aTimer
->mDelay
.ToMilliseconds(),
1110 MarkerThreadId::CurrentThread());
1111 // This adds a marker with the timer name as the marker name, to make it
1112 // obvious which timers are being used. This marker will be useful to
1113 // understand which timers might be added and removed excessively often.
1114 profiler_add_marker(name
, geckoprofiler::category::TIMER
,
1115 MarkerOptions(MarkerTiming::IntervalUntilNowFrom(
1116 aTimer
->mTimeout
- aTimer
->mDelay
),
1117 MarkerThreadId(mProfilerThreadId
)),
1118 TimerMarker
{}, aTimer
->mDelay
.ToMilliseconds(),
1119 aTimer
->mType
, MarkerThreadId::CurrentThread(), true);
1125 TimeStamp
TimerThread::FindNextFireTimeForCurrentThread(TimeStamp aDefault
,
1126 uint32_t aSearchBound
) {
1127 MonitorAutoLock
lock(mMonitor
);
1128 AUTO_TIMERS_STATS(TimerThread_FindNextFireTimeForCurrentThread
);
1130 for (const Entry
& entry
: mTimers
) {
1131 const nsTimerImpl
* timer
= entry
.Value();
1133 if (entry
.Timeout() > aDefault
) {
1137 // Don't yield to timers created with the *_LOW_PRIORITY type.
1138 if (!timer
->IsLowPriority()) {
1139 bool isOnCurrentThread
= false;
1141 timer
->mEventTarget
->IsOnCurrentThread(&isOnCurrentThread
);
1142 if (NS_SUCCEEDED(rv
) && isOnCurrentThread
) {
1143 return entry
.Timeout();
1147 if (aSearchBound
== 0) {
1148 // Couldn't find any non-low priority timers for the current thread.
1149 // Return a compromise between a very short and a long idle time.
1150 TimeStamp fallbackDeadline
=
1151 TimeStamp::Now() + TimeDuration::FromMilliseconds(16);
1152 return fallbackDeadline
< aDefault
? fallbackDeadline
: aDefault
;
1159 // No timers for this thread, return the default.
1163 // This function must be called from within a lock
1164 // Also: we hold the mutex for the nsTimerImpl.
1165 bool TimerThread::AddTimerInternal(nsTimerImpl
& aTimer
) {
1166 mMonitor
.AssertCurrentThreadOwns();
1167 aTimer
.mMutex
.AssertCurrentThreadOwns();
1168 AUTO_TIMERS_STATS(TimerThread_AddTimerInternal
);
1173 LogTimerEvent::LogDispatch(&aTimer
);
1175 const TimeStamp
& timeout
= aTimer
.mTimeout
;
1176 const size_t insertionIndex
= ComputeTimerInsertionIndex(timeout
);
1178 if (insertionIndex
!= 0 && !mTimers
[insertionIndex
- 1].Value()) {
1179 // Very common scenario in practice: The timer just before the insertion
1180 // point is canceled, overwrite it.
1181 AUTO_TIMERS_STATS(TimerThread_AddTimerInternal_overwrite_before
);
1182 mTimers
[insertionIndex
- 1] = Entry
{aTimer
};
1186 const size_t length
= mTimers
.Length();
1187 if (insertionIndex
== length
) {
1188 // We're at the end (including it's the very first insertion), add new timer
1190 AUTO_TIMERS_STATS(TimerThread_AddTimerInternal_append
);
1191 return mTimers
.AppendElement(Entry
{aTimer
}, mozilla::fallible
);
1194 if (!mTimers
[insertionIndex
].Value()) {
1195 // The timer at the insertion point is canceled, overwrite it.
1196 AUTO_TIMERS_STATS(TimerThread_AddTimerInternal_overwrite
);
1197 mTimers
[insertionIndex
] = Entry
{aTimer
};
1201 // The new timer has to be inserted.
1202 AUTO_TIMERS_STATS(TimerThread_AddTimerInternal_insert
);
1203 // The capacity should be checked first, because if it needs to be increased
1204 // and the memory allocation fails, only the new timer should be lost.
1205 if (length
== mTimers
.Capacity() && mTimers
[length
- 1].Value()) {
1206 // We have reached capacity, and the last entry is not canceled, so we
1207 // really want to increase the capacity in case the extra slot is required.
1208 // To force-expand the array, append a canceled-timer entry with a timestamp
1209 // far in the future.
1210 // This empty Entry may be used below to receive the moved-from previous
1211 // entry. If not, it may be used in a later call if we need to append a new
1212 // timer at the end.
1213 AUTO_TIMERS_STATS(TimerThread_AddTimerInternal_insert_expand
);
1214 if (!mTimers
.AppendElement(
1215 Entry
{mTimers
[length
- 1].Timeout() +
1216 TimeDuration::FromSeconds(365.0 * 24.0 * 60.0 * 60.0)},
1217 mozilla::fallible
)) {
1222 // Extract the timer at the insertion point, and put the new timer in its
1224 Entry extractedEntry
= std::exchange(mTimers
[insertionIndex
], Entry
{aTimer
});
1225 // Following entries can be pushed until we hit a canceled timer or the end.
1226 for (size_t i
= insertionIndex
+ 1; i
< length
; ++i
) {
1227 Entry
& entryRef
= mTimers
[i
];
1228 if (!entryRef
.Value()) {
1229 // Canceled entry, overwrite it with the extracted entry from before.
1230 COUNT_TIMERS_STATS(TimerThread_AddTimerInternal_insert_overwrite
);
1231 entryRef
= std::move(extractedEntry
);
1234 // Write extracted entry from before, and extract current entry.
1235 COUNT_TIMERS_STATS(TimerThread_AddTimerInternal_insert_shifts
);
1236 std::swap(entryRef
, extractedEntry
);
1238 // We've reached the end of the list, with still one extracted entry to
1239 // re-insert. We've checked the capacity above, this cannot fail.
1240 COUNT_TIMERS_STATS(TimerThread_AddTimerInternal_insert_append
);
1241 mTimers
.AppendElement(std::move(extractedEntry
));
1245 // This function must be called from within a lock
1246 // Also: we hold the mutex for the nsTimerImpl.
1247 bool TimerThread::RemoveTimerInternal(nsTimerImpl
& aTimer
) {
1248 mMonitor
.AssertCurrentThreadOwns();
1249 aTimer
.mMutex
.AssertCurrentThreadOwns();
1250 AUTO_TIMERS_STATS(TimerThread_RemoveTimerInternal
);
1251 if (!aTimer
.IsInTimerThread()) {
1252 COUNT_TIMERS_STATS(TimerThread_RemoveTimerInternal_not_in_list
);
1255 AUTO_TIMERS_STATS(TimerThread_RemoveTimerInternal_in_list
);
1256 for (auto& entry
: mTimers
) {
1257 if (entry
.Value() == &aTimer
) {
1262 MOZ_ASSERT(!aTimer
.IsInTimerThread(),
1263 "Not found in the list but it should be!?");
1267 void TimerThread::RemoveLeadingCanceledTimersInternal() {
1268 mMonitor
.AssertCurrentThreadOwns();
1269 AUTO_TIMERS_STATS(TimerThread_RemoveLeadingCanceledTimersInternal
);
1271 size_t toRemove
= 0;
1272 while (toRemove
< mTimers
.Length() && !mTimers
[toRemove
].Value()) {
1275 mTimers
.RemoveElementsAt(0, toRemove
);
1278 void TimerThread::RemoveFirstTimerInternal() {
1279 mMonitor
.AssertCurrentThreadOwns();
1280 AUTO_TIMERS_STATS(TimerThread_RemoveFirstTimerInternal
);
1281 MOZ_ASSERT(!mTimers
.IsEmpty());
1282 mTimers
.RemoveElementAt(0);
1285 void TimerThread::PostTimerEvent(already_AddRefed
<nsTimerImpl
> aTimerRef
) {
1286 mMonitor
.AssertCurrentThreadOwns();
1287 AUTO_TIMERS_STATS(TimerThread_PostTimerEvent
);
1289 RefPtr
<nsTimerImpl
> timer(aTimerRef
);
1291 #if TIMER_THREAD_STATISTICS
1292 const double actualFiringDelay
=
1293 std::max((TimeStamp::Now() - timer
->mTimeout
).ToMilliseconds(), 0.0);
1295 ++mTotalTimersFiredNotified
;
1296 mTotalActualTimerFiringDelayNotified
+= actualFiringDelay
;
1298 ++mTotalTimersFiredUnnotified
;
1299 mTotalActualTimerFiringDelayUnnotified
+= actualFiringDelay
;
1303 if (!timer
->mEventTarget
) {
1304 NS_ERROR("Attempt to post timer event to NULL event target");
1308 // XXX we may want to reuse this nsTimerEvent in the case of repeating timers.
1310 // Since we already addref'd 'timer', we don't need to addref here.
1311 // We will release either in ~nsTimerEvent(), or pass the reference back to
1312 // the caller. We need to copy the generation number from this timer into the
1313 // event, so we can avoid firing a timer that was re-initialized after being
1316 nsCOMPtr
<nsIEventTarget
> target
= timer
->mEventTarget
;
1318 void* p
= nsTimerEvent::operator new(sizeof(nsTimerEvent
));
1322 RefPtr
<nsTimerEvent
> event
=
1323 ::new (KnownNotNull
, p
) nsTimerEvent(timer
.forget(), mProfilerThreadId
);
1327 // We release mMonitor around the Dispatch because if the Dispatch interacts
1328 // with the timer API we'll deadlock.
1329 MonitorAutoUnlock
unlock(mMonitor
);
1330 rv
= target
->Dispatch(event
, NS_DISPATCH_NORMAL
);
1331 if (NS_FAILED(rv
)) {
1332 timer
= event
->ForgetTimer();
1333 // We do this to avoid possible deadlock by taking the two locks in a
1334 // different order than is used in RemoveTimer(). RemoveTimer() has
1335 // aTimer->mMutex first. We use timer.get() to keep static analysis
1337 // NOTE: I'm not sure that any of the below is actually necessary. It
1338 // seems to me that the timer that we're trying to fire will have already
1339 // been removed prior to this.
1340 MutexAutoLock
lock1(timer
.get()->mMutex
);
1341 MonitorAutoLock
lock2(mMonitor
);
1342 RemoveTimerInternal(*timer
);
1347 void TimerThread::DoBeforeSleep() {
1349 MonitorAutoLock
lock(mMonitor
);
1353 // Note: wake may be notified without preceding sleep notification
1354 void TimerThread::DoAfterSleep() {
1356 MonitorAutoLock
lock(mMonitor
);
1359 // Wake up the timer thread to re-process the array to ensure the sleep delay
1360 // is correct, and fire any expired timers (perhaps quite a few)
1362 PROFILER_MARKER_UNTYPED("AfterSleep", OTHER
,
1363 MarkerThreadId(mProfilerThreadId
));
1368 TimerThread::Observe(nsISupports
* aSubject
, const char* aTopic
,
1369 const char16_t
* aData
) {
1370 if (strcmp(aTopic
, "ipc:process-priority-changed") == 0) {
1371 nsCOMPtr
<nsIPropertyBag2
> props
= do_QueryInterface(aSubject
);
1372 MOZ_ASSERT(props
!= nullptr);
1374 int32_t priority
= static_cast<int32_t>(hal::PROCESS_PRIORITY_UNKNOWN
);
1375 props
->GetPropertyAsInt32(u
"priority"_ns
, &priority
);
1376 mCachedPriority
.store(static_cast<hal::ProcessPriority
>(priority
),
1377 std::memory_order_relaxed
);
1380 if (StaticPrefs::timer_ignore_sleep_wake_notifications()) {
1384 if (strcmp(aTopic
, "sleep_notification") == 0 ||
1385 strcmp(aTopic
, "suspend_process_notification") == 0) {
1387 } else if (strcmp(aTopic
, "wake_notification") == 0 ||
1388 strcmp(aTopic
, "resume_process_notification") == 0) {
1395 uint32_t TimerThread::AllowedEarlyFiringMicroseconds() {
1396 MonitorAutoLock
lock(mMonitor
);
1397 return mAllowedEarlyFiringMicroseconds
;
1400 #if TIMER_THREAD_STATISTICS
1401 void TimerThread::PrintStatistics() const {
1402 mMonitor
.AssertCurrentThreadOwns();
1404 const TimeStamp freshNow
= TimeStamp::Now();
1405 const double timeElapsed
= mFirstTimerAdded
.IsNull()
1407 : (freshNow
- mFirstTimerAdded
).ToSeconds();
1408 printf_stderr("TimerThread Stats (Total time %8.2fs)\n", timeElapsed
);
1410 printf_stderr("Added: %6llu Removed: %6llu Fired: %6llu\n", mTotalTimersAdded
,
1411 mTotalTimersRemoved
,
1412 mTotalTimersFiredNotified
+ mTotalTimersFiredUnnotified
);
1414 auto PrintTimersFiredBucket
=
1415 [](const AutoTArray
<size_t, sTimersFiredPerWakeupBucketCount
>& buckets
,
1416 const size_t wakeupCount
, const size_t timersFiredCount
,
1417 const double totalTimerDelay
, const char* label
) {
1418 printf_stderr("%s : [", label
);
1419 for (size_t bucketVal
: buckets
) {
1420 printf_stderr(" %5llu", bucketVal
);
1423 " ] Wake-ups/timer %6llu / %6llu (%7.4f) Avg Timer Delay %7.4f\n",
1424 wakeupCount
, timersFiredCount
,
1425 static_cast<double>(wakeupCount
) / timersFiredCount
,
1426 totalTimerDelay
/ timersFiredCount
);
1429 printf_stderr("Wake-ups:\n");
1430 PrintTimersFiredBucket(
1431 mTimersFiredPerWakeup
, mTotalWakeupCount
,
1432 mTotalTimersFiredNotified
+ mTotalTimersFiredUnnotified
,
1433 mTotalActualTimerFiringDelayNotified
+
1434 mTotalActualTimerFiringDelayUnnotified
,
1436 PrintTimersFiredBucket(mTimersFiredPerNotifiedWakeup
,
1437 mTotalNotifiedWakeupCount
, mTotalTimersFiredNotified
,
1438 mTotalActualTimerFiringDelayNotified
, "Notified ");
1439 PrintTimersFiredBucket(mTimersFiredPerUnnotifiedWakeup
,
1440 mTotalUnnotifiedWakeupCount
,
1441 mTotalTimersFiredUnnotified
,
1442 mTotalActualTimerFiringDelayUnnotified
, "Unnotified ");
1444 printf_stderr("Early Wake-ups: %6llu Avg: %7.4fms\n", mEarlyWakeups
,
1445 mTotalEarlyWakeupTime
/ mEarlyWakeups
);
1449 /* This nsReadOnlyTimer class is used for the values returned by the
1450 * TimerThread::GetTimers method.
1451 * It is not possible to return a strong reference to the nsTimerImpl
1452 * instance (that could extend the lifetime of the timer and cause it to fire
1453 * a callback pointing to already freed memory) or a weak reference
1454 * (nsSupportsWeakReference doesn't support freeing the referee on a thread
1455 * that isn't the thread that owns the weak reference), so instead the timer
1456 * name, delay and type are copied to a new object. */
1457 class nsReadOnlyTimer final
: public nsITimer
{
1459 explicit nsReadOnlyTimer(const nsACString
& aName
, uint32_t aDelay
,
1461 : mName(aName
), mDelay(aDelay
), mType(aType
) {}
1464 NS_IMETHOD
Init(nsIObserver
* aObserver
, uint32_t aDelayInMs
,
1465 uint32_t aType
) override
{
1466 return NS_ERROR_NOT_IMPLEMENTED
;
1468 NS_IMETHOD
InitWithCallback(nsITimerCallback
* aCallback
, uint32_t aDelayInMs
,
1469 uint32_t aType
) override
{
1470 return NS_ERROR_NOT_IMPLEMENTED
;
1472 NS_IMETHOD
InitHighResolutionWithCallback(nsITimerCallback
* aCallback
,
1473 const mozilla::TimeDuration
& aDelay
,
1474 uint32_t aType
) override
{
1475 return NS_ERROR_NOT_IMPLEMENTED
;
1477 NS_IMETHOD
Cancel(void) override
{ return NS_ERROR_NOT_IMPLEMENTED
; }
1478 NS_IMETHOD
InitWithNamedFuncCallback(nsTimerCallbackFunc aCallback
,
1479 void* aClosure
, uint32_t aDelay
,
1481 const char* aName
) override
{
1482 return NS_ERROR_NOT_IMPLEMENTED
;
1484 NS_IMETHOD
InitHighResolutionWithNamedFuncCallback(
1485 nsTimerCallbackFunc aCallback
, void* aClosure
,
1486 const mozilla::TimeDuration
& aDelay
, uint32_t aType
,
1487 const char* aName
) override
{
1488 return NS_ERROR_NOT_IMPLEMENTED
;
1491 NS_IMETHOD
GetName(nsACString
& aName
) override
{
1495 NS_IMETHOD
GetDelay(uint32_t* aDelay
) override
{
1499 NS_IMETHOD
SetDelay(uint32_t aDelay
) override
{
1500 return NS_ERROR_NOT_IMPLEMENTED
;
1502 NS_IMETHOD
GetType(uint32_t* aType
) override
{
1506 NS_IMETHOD
SetType(uint32_t aType
) override
{
1507 return NS_ERROR_NOT_IMPLEMENTED
;
1509 NS_IMETHOD
GetClosure(void** aClosure
) override
{
1510 return NS_ERROR_NOT_IMPLEMENTED
;
1512 NS_IMETHOD
GetCallback(nsITimerCallback
** aCallback
) override
{
1513 return NS_ERROR_NOT_IMPLEMENTED
;
1515 NS_IMETHOD
GetTarget(nsIEventTarget
** aTarget
) override
{
1516 return NS_ERROR_NOT_IMPLEMENTED
;
1518 NS_IMETHOD
SetTarget(nsIEventTarget
* aTarget
) override
{
1519 return NS_ERROR_NOT_IMPLEMENTED
;
1521 NS_IMETHOD
GetAllowedEarlyFiringMicroseconds(
1522 uint32_t* aAllowedEarlyFiringMicroseconds
) override
{
1523 return NS_ERROR_NOT_IMPLEMENTED
;
1525 size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf
) override
{
1526 return sizeof(*this);
1533 ~nsReadOnlyTimer() = default;
1536 NS_IMPL_ISUPPORTS(nsReadOnlyTimer
, nsITimer
)
1538 nsresult
TimerThread::GetTimers(nsTArray
<RefPtr
<nsITimer
>>& aRetVal
) {
1539 nsTArray
<RefPtr
<nsTimerImpl
>> timers
;
1541 MonitorAutoLock
lock(mMonitor
);
1542 for (const auto& entry
: mTimers
) {
1543 nsTimerImpl
* timer
= entry
.Value();
1547 timers
.AppendElement(timer
);
1551 for (nsTimerImpl
* timer
: timers
) {
1553 timer
->GetName(name
);
1556 timer
->GetDelay(&delay
);
1559 timer
->GetType(&type
);
1561 aRetVal
.AppendElement(new nsReadOnlyTimer(name
, delay
, type
));