1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "ProfileBufferEntry.h"
9 #include "mozilla/ProfilerMarkers.h"
11 #include "ProfileBuffer.h"
12 #include "ProfiledThreadData.h"
13 #include "ProfilerBacktrace.h"
14 #include "ProfilerRustBindings.h"
16 #include "js/ProfilingFrameIterator.h"
18 #include "jsfriendapi.h"
19 #include "mozilla/CycleCollectedJSContext.h"
20 #include "mozilla/Logging.h"
21 #include "mozilla/JSONStringWriteFuncs.h"
22 #include "mozilla/ScopeExit.h"
23 #include "mozilla/Sprintf.h"
24 #include "mozilla/StackWalk.h"
25 #include "nsThreadUtils.h"
26 #include "nsXULAppAPI.h"
27 #include "ProfilerCodeAddressService.h"
30 #include <type_traits>
32 using namespace mozilla
;
33 using namespace mozilla::literals::ProportionValue_literals
;
35 ////////////////////////////////////////////////////////////////////////
36 // BEGIN ProfileBufferEntry
38 ProfileBufferEntry::ProfileBufferEntry()
39 : mKind(Kind::INVALID
), mStorage
{0, 0, 0, 0, 0, 0, 0, 0} {}
41 // aString must be a static string.
42 ProfileBufferEntry::ProfileBufferEntry(Kind aKind
, const char* aString
)
44 MOZ_ASSERT(aKind
== Kind::Label
);
45 memcpy(mStorage
, &aString
, sizeof(aString
));
48 ProfileBufferEntry::ProfileBufferEntry(Kind aKind
, char aChars
[kNumChars
])
50 MOZ_ASSERT(aKind
== Kind::DynamicStringFragment
);
51 memcpy(mStorage
, aChars
, kNumChars
);
54 ProfileBufferEntry::ProfileBufferEntry(Kind aKind
, void* aPtr
) : mKind(aKind
) {
55 memcpy(mStorage
, &aPtr
, sizeof(aPtr
));
58 ProfileBufferEntry::ProfileBufferEntry(Kind aKind
, double aDouble
)
60 memcpy(mStorage
, &aDouble
, sizeof(aDouble
));
63 ProfileBufferEntry::ProfileBufferEntry(Kind aKind
, int aInt
) : mKind(aKind
) {
64 memcpy(mStorage
, &aInt
, sizeof(aInt
));
67 ProfileBufferEntry::ProfileBufferEntry(Kind aKind
, int64_t aInt64
)
69 memcpy(mStorage
, &aInt64
, sizeof(aInt64
));
72 ProfileBufferEntry::ProfileBufferEntry(Kind aKind
, uint64_t aUint64
)
74 memcpy(mStorage
, &aUint64
, sizeof(aUint64
));
77 ProfileBufferEntry::ProfileBufferEntry(Kind aKind
, ProfilerThreadId aThreadId
)
79 static_assert(std::is_trivially_copyable_v
<ProfilerThreadId
>);
80 static_assert(sizeof(aThreadId
) <= sizeof(mStorage
));
81 memcpy(mStorage
, &aThreadId
, sizeof(aThreadId
));
84 const char* ProfileBufferEntry::GetString() const {
86 memcpy(&result
, mStorage
, sizeof(result
));
90 void* ProfileBufferEntry::GetPtr() const {
92 memcpy(&result
, mStorage
, sizeof(result
));
96 double ProfileBufferEntry::GetDouble() const {
98 memcpy(&result
, mStorage
, sizeof(result
));
102 int ProfileBufferEntry::GetInt() const {
104 memcpy(&result
, mStorage
, sizeof(result
));
108 int64_t ProfileBufferEntry::GetInt64() const {
110 memcpy(&result
, mStorage
, sizeof(result
));
114 uint64_t ProfileBufferEntry::GetUint64() const {
116 memcpy(&result
, mStorage
, sizeof(result
));
120 ProfilerThreadId
ProfileBufferEntry::GetThreadId() const {
121 ProfilerThreadId result
;
122 static_assert(std::is_trivially_copyable_v
<ProfilerThreadId
>);
123 memcpy(&result
, mStorage
, sizeof(result
));
127 void ProfileBufferEntry::CopyCharsInto(char (&aOutArray
)[kNumChars
]) const {
128 memcpy(aOutArray
, mStorage
, kNumChars
);
131 // END ProfileBufferEntry
132 ////////////////////////////////////////////////////////////////////////
135 Maybe
<nsCString
> mKeyedBy
;
136 Maybe
<nsCString
> mName
;
137 Maybe
<nsCString
> mLocation
;
138 Maybe
<unsigned> mLineNumber
;
141 // As mentioned in ProfileBufferEntry.h, the JSON format contains many
142 // arrays whose elements are laid out according to various schemas to help
143 // de-duplication. This RAII class helps write these arrays by keeping track of
144 // the last non-null element written and adding the appropriate number of null
145 // elements when writing new non-null elements. It also automatically opens and
146 // closes an array element on the given JSON writer.
148 // You grant the AutoArraySchemaWriter exclusive access to the JSONWriter and
149 // the UniqueJSONStrings objects for the lifetime of AutoArraySchemaWriter. Do
150 // not access them independently while the AutoArraySchemaWriter is alive.
151 // If you need to add complex objects, call FreeFormElement(), which will give
152 // you temporary access to the writer.
156 // // Define the schema of elements in this type of array: [FOO, BAR, BAZ]
157 // enum Schema : uint32_t {
163 // AutoArraySchemaWriter writer(someJsonWriter, someUniqueStrings);
164 // if (shouldWriteFoo) {
165 // writer.IntElement(FOO, getFoo());
169 // The elements need to be added in-order.
170 class MOZ_RAII AutoArraySchemaWriter
{
172 explicit AutoArraySchemaWriter(SpliceableJSONWriter
& aWriter
)
173 : mJSONWriter(aWriter
), mNextFreeIndex(0) {
174 mJSONWriter
.StartArrayElement();
177 ~AutoArraySchemaWriter() { mJSONWriter
.EndArray(); }
179 template <typename T
>
180 void IntElement(uint32_t aIndex
, T aValue
) {
181 static_assert(!std::is_same_v
<T
, uint64_t>,
182 "Narrowing uint64 -> int64 conversion not allowed");
184 mJSONWriter
.IntElement(static_cast<int64_t>(aValue
));
187 void DoubleElement(uint32_t aIndex
, double aValue
) {
189 mJSONWriter
.DoubleElement(aValue
);
192 void TimeMsElement(uint32_t aIndex
, double aTime_ms
) {
194 mJSONWriter
.TimeDoubleMsElement(aTime_ms
);
197 void BoolElement(uint32_t aIndex
, bool aValue
) {
199 mJSONWriter
.BoolElement(aValue
);
203 SpliceableJSONWriter
& Writer() { return mJSONWriter
; }
205 void FillUpTo(uint32_t aIndex
) {
206 MOZ_ASSERT(aIndex
>= mNextFreeIndex
);
207 mJSONWriter
.NullElements(aIndex
- mNextFreeIndex
);
208 mNextFreeIndex
= aIndex
+ 1;
212 SpliceableJSONWriter
& mJSONWriter
;
213 uint32_t mNextFreeIndex
;
216 // Same as AutoArraySchemaWriter, but this can also write strings (output as
217 // indexes into the table of unique strings).
218 class MOZ_RAII AutoArraySchemaWithStringsWriter
: public AutoArraySchemaWriter
{
220 AutoArraySchemaWithStringsWriter(SpliceableJSONWriter
& aWriter
,
221 UniqueJSONStrings
& aStrings
)
222 : AutoArraySchemaWriter(aWriter
), mStrings(aStrings
) {}
224 void StringElement(uint32_t aIndex
, const Span
<const char>& aValue
) {
226 mStrings
.WriteElement(Writer(), aValue
);
230 UniqueJSONStrings
& mStrings
;
233 Maybe
<UniqueStacks::StackKey
> UniqueStacks::BeginStack(const FrameKey
& aFrame
) {
234 if (Maybe
<uint32_t> frameIndex
= GetOrAddFrameIndex(aFrame
); frameIndex
) {
235 return Some(StackKey(*frameIndex
));
240 Vector
<JITFrameInfoForBufferRange
>&&
241 JITFrameInfo::MoveRangesWithNewFailureLatch(FailureLatch
& aFailureLatch
) && {
242 aFailureLatch
.SetFailureFrom(mLocalFailureLatchSource
);
243 return std::move(mRanges
);
246 UniquePtr
<UniqueJSONStrings
>&&
247 JITFrameInfo::MoveUniqueStringsWithNewFailureLatch(
248 FailureLatch
& aFailureLatch
) && {
249 if (mUniqueStrings
) {
250 mUniqueStrings
->ChangeFailureLatchAndForwardState(aFailureLatch
);
252 aFailureLatch
.SetFailureFrom(mLocalFailureLatchSource
);
254 return std::move(mUniqueStrings
);
257 Maybe
<UniqueStacks::StackKey
> UniqueStacks::AppendFrame(
258 const StackKey
& aStack
, const FrameKey
& aFrame
) {
259 if (Maybe
<uint32_t> stackIndex
= GetOrAddStackIndex(aStack
); stackIndex
) {
260 if (Maybe
<uint32_t> frameIndex
= GetOrAddFrameIndex(aFrame
); frameIndex
) {
261 return Some(StackKey(aStack
, *stackIndex
, *frameIndex
));
267 JITFrameInfoForBufferRange
JITFrameInfoForBufferRange::Clone() const {
268 JITFrameInfoForBufferRange::JITAddressToJITFramesMap jitAddressToJITFramesMap
;
270 jitAddressToJITFramesMap
.reserve(mJITAddressToJITFramesMap
.count()));
271 for (auto iter
= mJITAddressToJITFramesMap
.iter(); !iter
.done();
273 const mozilla::Vector
<JITFrameKey
>& srcKeys
= iter
.get().value();
274 mozilla::Vector
<JITFrameKey
> destKeys
;
275 MOZ_RELEASE_ASSERT(destKeys
.appendAll(srcKeys
));
276 jitAddressToJITFramesMap
.putNewInfallible(iter
.get().key(),
277 std::move(destKeys
));
280 JITFrameInfoForBufferRange::JITFrameToFrameJSONMap jitFrameToFrameJSONMap
;
282 jitFrameToFrameJSONMap
.reserve(mJITFrameToFrameJSONMap
.count()));
283 for (auto iter
= mJITFrameToFrameJSONMap
.iter(); !iter
.done(); iter
.next()) {
284 jitFrameToFrameJSONMap
.putNewInfallible(iter
.get().key(),
288 return JITFrameInfoForBufferRange
{mRangeStart
, mRangeEnd
,
289 std::move(jitAddressToJITFramesMap
),
290 std::move(jitFrameToFrameJSONMap
)};
293 JITFrameInfo::JITFrameInfo(const JITFrameInfo
& aOther
,
294 mozilla::ProgressLogger aProgressLogger
)
295 : mUniqueStrings(MakeUniqueFallible
<UniqueJSONStrings
>(
296 mLocalFailureLatchSource
, *aOther
.mUniqueStrings
,
297 aProgressLogger
.CreateSubLoggerFromTo(
298 0_pc
, "Creating JIT frame info unique strings...", 49_pc
,
299 "Created JIT frame info unique strings"))) {
300 if (!mUniqueStrings
) {
301 mLocalFailureLatchSource
.SetFailure(
302 "OOM in JITFrameInfo allocating mUniqueStrings");
306 if (mRanges
.reserve(aOther
.mRanges
.length())) {
307 for (auto&& [i
, progressLogger
] :
308 aProgressLogger
.CreateLoopSubLoggersFromTo(50_pc
, 100_pc
,
309 aOther
.mRanges
.length(),
310 "Copying JIT frame info")) {
311 mRanges
.infallibleAppend(aOther
.mRanges
[i
].Clone());
314 mLocalFailureLatchSource
.SetFailure("OOM in JITFrameInfo resizing mRanges");
318 bool UniqueStacks::FrameKey::NormalFrameData::operator==(
319 const NormalFrameData
& aOther
) const {
320 return mLocation
== aOther
.mLocation
&&
321 mRelevantForJS
== aOther
.mRelevantForJS
&&
322 mBaselineInterp
== aOther
.mBaselineInterp
&&
323 mInnerWindowID
== aOther
.mInnerWindowID
&& mLine
== aOther
.mLine
&&
324 mColumn
== aOther
.mColumn
&& mCategoryPair
== aOther
.mCategoryPair
;
327 bool UniqueStacks::FrameKey::JITFrameData::operator==(
328 const JITFrameData
& aOther
) const {
329 return mCanonicalAddress
== aOther
.mCanonicalAddress
&&
330 mDepth
== aOther
.mDepth
&& mRangeIndex
== aOther
.mRangeIndex
;
333 // Consume aJITFrameInfo by stealing its string table and its JIT frame info
334 // ranges. The JIT frame info contains JSON which refers to strings from the
335 // JIT frame info's string table, so our string table needs to have the same
336 // strings at the same indices.
337 UniqueStacks::UniqueStacks(
338 FailureLatch
& aFailureLatch
, JITFrameInfo
&& aJITFrameInfo
,
339 ProfilerCodeAddressService
* aCodeAddressService
/* = nullptr */)
340 : mUniqueStrings(std::move(aJITFrameInfo
)
341 .MoveUniqueStringsWithNewFailureLatch(aFailureLatch
)),
342 mCodeAddressService(aCodeAddressService
),
343 mFrameTableWriter(aFailureLatch
),
344 mStackTableWriter(aFailureLatch
),
345 mJITInfoRanges(std::move(aJITFrameInfo
)
346 .MoveRangesWithNewFailureLatch(aFailureLatch
)) {
347 if (!mUniqueStrings
) {
348 SetFailure("Did not get mUniqueStrings from JITFrameInfo");
352 mFrameTableWriter
.StartBareList();
353 mStackTableWriter
.StartBareList();
356 Maybe
<uint32_t> UniqueStacks::GetOrAddStackIndex(const StackKey
& aStack
) {
361 uint32_t count
= mStackToIndexMap
.count();
362 auto entry
= mStackToIndexMap
.lookupForAdd(aStack
);
364 MOZ_ASSERT(entry
->value() < count
);
365 return Some(entry
->value());
368 if (!mStackToIndexMap
.add(entry
, aStack
, count
)) {
369 SetFailure("OOM in UniqueStacks::GetOrAddStackIndex");
376 Maybe
<Vector
<UniqueStacks::FrameKey
>>
377 UniqueStacks::LookupFramesForJITAddressFromBufferPos(void* aJITAddress
,
378 uint64_t aBufferPos
) {
379 JITFrameInfoForBufferRange
* rangeIter
=
380 std::lower_bound(mJITInfoRanges
.begin(), mJITInfoRanges
.end(), aBufferPos
,
381 [](const JITFrameInfoForBufferRange
& aRange
,
382 uint64_t aPos
) { return aRange
.mRangeEnd
< aPos
; });
384 rangeIter
!= mJITInfoRanges
.end() &&
385 rangeIter
->mRangeStart
<= aBufferPos
&&
386 aBufferPos
< rangeIter
->mRangeEnd
,
387 "Buffer position of jit address needs to be in one of the ranges");
389 using JITFrameKey
= JITFrameInfoForBufferRange::JITFrameKey
;
391 const JITFrameInfoForBufferRange
& jitFrameInfoRange
= *rangeIter
;
393 jitFrameInfoRange
.mJITAddressToJITFramesMap
.lookup(aJITAddress
);
398 // Map the array of JITFrameKeys to an array of FrameKeys, and ensure that
399 // each of the FrameKeys exists in mFrameToIndexMap.
400 Vector
<FrameKey
> frameKeys
;
401 MOZ_RELEASE_ASSERT(frameKeys
.initCapacity(jitFrameKeys
->value().length()));
402 for (const JITFrameKey
& jitFrameKey
: jitFrameKeys
->value()) {
403 FrameKey
frameKey(jitFrameKey
.mCanonicalAddress
, jitFrameKey
.mDepth
,
404 rangeIter
- mJITInfoRanges
.begin());
405 uint32_t index
= mFrameToIndexMap
.count();
406 auto entry
= mFrameToIndexMap
.lookupForAdd(frameKey
);
408 // We need to add this frame to our frame table. The JSON for this frame
409 // already exists in jitFrameInfoRange, we just need to splice it into
410 // the frame table and give it an index.
412 jitFrameInfoRange
.mJITFrameToFrameJSONMap
.lookup(jitFrameKey
);
413 MOZ_RELEASE_ASSERT(frameJSON
, "Should have cached JSON for this frame");
414 mFrameTableWriter
.Splice(frameJSON
->value());
415 MOZ_RELEASE_ASSERT(mFrameToIndexMap
.add(entry
, frameKey
, index
));
417 MOZ_RELEASE_ASSERT(frameKeys
.append(std::move(frameKey
)));
419 return Some(std::move(frameKeys
));
422 Maybe
<uint32_t> UniqueStacks::GetOrAddFrameIndex(const FrameKey
& aFrame
) {
427 uint32_t count
= mFrameToIndexMap
.count();
428 auto entry
= mFrameToIndexMap
.lookupForAdd(aFrame
);
430 MOZ_ASSERT(entry
->value() < count
);
431 return Some(entry
->value());
434 if (!mFrameToIndexMap
.add(entry
, aFrame
, count
)) {
435 SetFailure("OOM in UniqueStacks::GetOrAddFrameIndex");
438 StreamNonJITFrame(aFrame
);
442 void UniqueStacks::SpliceFrameTableElements(SpliceableJSONWriter
& aWriter
) {
443 mFrameTableWriter
.EndBareList();
444 aWriter
.TakeAndSplice(mFrameTableWriter
.TakeChunkedWriteFunc());
447 void UniqueStacks::SpliceStackTableElements(SpliceableJSONWriter
& aWriter
) {
448 mStackTableWriter
.EndBareList();
449 aWriter
.TakeAndSplice(mStackTableWriter
.TakeChunkedWriteFunc());
452 [[nodiscard
]] nsAutoCString
UniqueStacks::FunctionNameOrAddress(void* aPC
) {
453 nsAutoCString nameOrAddress
;
455 if (!mCodeAddressService
||
456 !mCodeAddressService
->GetFunction(aPC
, nameOrAddress
) ||
457 nameOrAddress
.IsEmpty()) {
458 nameOrAddress
.AppendASCII("0x");
459 // `AppendInt` only knows `uint32_t` or `uint64_t`, but because these are
460 // just aliases for *two* of (`unsigned`, `unsigned long`, and `unsigned
461 // long long`), a call with `uintptr_t` could use the third type and
462 // therefore would be ambiguous.
463 // So we want to force using exactly `uint32_t` or `uint64_t`, whichever
464 // matches the size of `uintptr_t`.
465 // (The outer cast to `uint` should then be a no-op.)
466 using uint
= std::conditional_t
<sizeof(uintptr_t) <= sizeof(uint32_t),
468 nameOrAddress
.AppendInt(static_cast<uint
>(reinterpret_cast<uintptr_t>(aPC
)),
472 return nameOrAddress
;
475 void UniqueStacks::StreamStack(const StackKey
& aStack
) {
476 enum Schema
: uint32_t { PREFIX
= 0, FRAME
= 1 };
478 AutoArraySchemaWriter
writer(mStackTableWriter
);
479 if (aStack
.mPrefixStackIndex
.isSome()) {
480 writer
.IntElement(PREFIX
, *aStack
.mPrefixStackIndex
);
482 writer
.IntElement(FRAME
, aStack
.mFrameIndex
);
485 void UniqueStacks::StreamNonJITFrame(const FrameKey
& aFrame
) {
490 using NormalFrameData
= FrameKey::NormalFrameData
;
492 enum Schema
: uint32_t {
503 AutoArraySchemaWithStringsWriter
writer(mFrameTableWriter
, *mUniqueStrings
);
505 const NormalFrameData
& data
= aFrame
.mData
.as
<NormalFrameData
>();
506 writer
.StringElement(LOCATION
, data
.mLocation
);
507 writer
.BoolElement(RELEVANT_FOR_JS
, data
.mRelevantForJS
);
509 // It's okay to convert uint64_t to double here because DOM always creates IDs
510 // that are convertible to double.
511 writer
.DoubleElement(INNER_WINDOW_ID
, data
.mInnerWindowID
);
513 // The C++ interpreter is the default implementation so we only emit element
514 // for Baseline Interpreter frames.
515 if (data
.mBaselineInterp
) {
516 writer
.StringElement(IMPLEMENTATION
, MakeStringSpan("blinterp"));
519 if (data
.mLine
.isSome()) {
520 writer
.IntElement(LINE
, *data
.mLine
);
522 if (data
.mColumn
.isSome()) {
523 writer
.IntElement(COLUMN
, *data
.mColumn
);
525 if (data
.mCategoryPair
.isSome()) {
526 const JS::ProfilingCategoryPairInfo
& info
=
527 JS::GetProfilingCategoryPairInfo(*data
.mCategoryPair
);
528 writer
.IntElement(CATEGORY
, uint32_t(info
.mCategory
));
529 writer
.IntElement(SUBCATEGORY
, info
.mSubcategoryIndex
);
533 static void StreamJITFrame(JSContext
* aContext
, SpliceableJSONWriter
& aWriter
,
534 UniqueJSONStrings
& aUniqueStrings
,
535 const JS::ProfiledFrameHandle
& aJITFrame
) {
536 enum Schema
: uint32_t {
547 AutoArraySchemaWithStringsWriter
writer(aWriter
, aUniqueStrings
);
549 writer
.StringElement(LOCATION
, MakeStringSpan(aJITFrame
.label()));
550 writer
.BoolElement(RELEVANT_FOR_JS
, false);
552 // It's okay to convert uint64_t to double here because DOM always creates IDs
553 // that are convertible to double.
554 // Realm ID is the name of innerWindowID inside JS code.
555 writer
.DoubleElement(INNER_WINDOW_ID
, aJITFrame
.realmID());
557 JS::ProfilingFrameIterator::FrameKind frameKind
= aJITFrame
.frameKind();
558 MOZ_ASSERT(frameKind
== JS::ProfilingFrameIterator::Frame_Ion
||
559 frameKind
== JS::ProfilingFrameIterator::Frame_Baseline
);
560 writer
.StringElement(IMPLEMENTATION
,
561 frameKind
== JS::ProfilingFrameIterator::Frame_Ion
562 ? MakeStringSpan("ion")
563 : MakeStringSpan("baseline"));
565 const JS::ProfilingCategoryPairInfo
& info
= JS::GetProfilingCategoryPairInfo(
566 frameKind
== JS::ProfilingFrameIterator::Frame_Ion
567 ? JS::ProfilingCategoryPair::JS_IonMonkey
568 : JS::ProfilingCategoryPair::JS_Baseline
);
569 writer
.IntElement(CATEGORY
, uint32_t(info
.mCategory
));
570 writer
.IntElement(SUBCATEGORY
, info
.mSubcategoryIndex
);
573 static nsCString
JSONForJITFrame(JSContext
* aContext
,
574 const JS::ProfiledFrameHandle
& aJITFrame
,
575 UniqueJSONStrings
& aUniqueStrings
) {
577 JSONStringRefWriteFunc
jw(json
);
578 SpliceableJSONWriter
writer(jw
, aUniqueStrings
.SourceFailureLatch());
579 StreamJITFrame(aContext
, writer
, aUniqueStrings
, aJITFrame
);
583 void JITFrameInfo::AddInfoForRange(
584 uint64_t aRangeStart
, uint64_t aRangeEnd
, JSContext
* aCx
,
585 const std::function
<void(const std::function
<void(void*)>&)>&
586 aJITAddressProvider
) {
587 if (mLocalFailureLatchSource
.Failed()) {
591 if (aRangeStart
== aRangeEnd
) {
595 MOZ_RELEASE_ASSERT(aRangeStart
< aRangeEnd
);
597 if (!mRanges
.empty()) {
598 const JITFrameInfoForBufferRange
& prevRange
= mRanges
.back();
599 MOZ_RELEASE_ASSERT(prevRange
.mRangeEnd
<= aRangeStart
,
600 "Ranges must be non-overlapping and added in-order.");
603 using JITFrameKey
= JITFrameInfoForBufferRange::JITFrameKey
;
605 JITFrameInfoForBufferRange::JITAddressToJITFramesMap jitAddressToJITFrameMap
;
606 JITFrameInfoForBufferRange::JITFrameToFrameJSONMap jitFrameToFrameJSONMap
;
608 aJITAddressProvider([&](void* aJITAddress
) {
609 // Make sure that we have cached data for aJITAddress.
610 auto addressEntry
= jitAddressToJITFrameMap
.lookupForAdd(aJITAddress
);
612 Vector
<JITFrameKey
> jitFrameKeys
;
613 for (JS::ProfiledFrameHandle handle
:
614 JS::GetProfiledFrames(aCx
, aJITAddress
)) {
615 uint32_t depth
= jitFrameKeys
.length();
616 JITFrameKey jitFrameKey
{handle
.canonicalAddress(), depth
};
617 auto frameEntry
= jitFrameToFrameJSONMap
.lookupForAdd(jitFrameKey
);
619 if (!jitFrameToFrameJSONMap
.add(
620 frameEntry
, jitFrameKey
,
621 JSONForJITFrame(aCx
, handle
, *mUniqueStrings
))) {
622 mLocalFailureLatchSource
.SetFailure(
623 "OOM in JITFrameInfo::AddInfoForRange adding jit->frame map");
627 if (!jitFrameKeys
.append(jitFrameKey
)) {
628 mLocalFailureLatchSource
.SetFailure(
629 "OOM in JITFrameInfo::AddInfoForRange adding jit frame key");
633 if (!jitAddressToJITFrameMap
.add(addressEntry
, aJITAddress
,
634 std::move(jitFrameKeys
))) {
635 mLocalFailureLatchSource
.SetFailure(
636 "OOM in JITFrameInfo::AddInfoForRange adding addr->jit map");
642 if (!mRanges
.append(JITFrameInfoForBufferRange
{
643 aRangeStart
, aRangeEnd
, std::move(jitAddressToJITFrameMap
),
644 std::move(jitFrameToFrameJSONMap
)})) {
645 mLocalFailureLatchSource
.SetFailure(
646 "OOM in JITFrameInfo::AddInfoForRange adding range");
651 struct ProfileSample
{
654 Maybe
<double> mResponsiveness
;
655 RunningTimes mRunningTimes
;
658 // Write CPU measurements with "Delta" unit, which is some amount of work that
659 // happened since the previous sample.
660 static void WriteDelta(AutoArraySchemaWriter
& aSchemaWriter
, uint32_t aProperty
,
662 aSchemaWriter
.IntElement(aProperty
, int64_t(aDelta
));
665 static void WriteSample(SpliceableJSONWriter
& aWriter
,
666 const ProfileSample
& aSample
) {
667 enum Schema
: uint32_t {
671 #define RUNNING_TIME_SCHEMA(index, name, unit, jsonProperty) , name
672 PROFILER_FOR_EACH_RUNNING_TIME(RUNNING_TIME_SCHEMA
)
673 #undef RUNNING_TIME_SCHEMA
676 AutoArraySchemaWriter
writer(aWriter
);
678 writer
.IntElement(STACK
, aSample
.mStack
);
680 writer
.TimeMsElement(TIME
, aSample
.mTime
);
682 if (aSample
.mResponsiveness
.isSome()) {
683 writer
.DoubleElement(EVENT_DELAY
, *aSample
.mResponsiveness
);
686 #define RUNNING_TIME_STREAM(index, name, unit, jsonProperty) \
687 aSample.mRunningTimes.GetJson##name##unit().apply( \
688 [&writer](const uint64_t& aValue) { \
689 Write##unit(writer, name, aValue); \
692 PROFILER_FOR_EACH_RUNNING_TIME(RUNNING_TIME_STREAM
)
694 #undef RUNNING_TIME_STREAM
697 static void StreamMarkerAfterKind(
698 ProfileBufferEntryReader
& aER
,
699 ProcessStreamingContext
& aProcessStreamingContext
) {
700 ThreadStreamingContext
* threadData
= nullptr;
701 mozilla::base_profiler_markers_detail::DeserializeAfterKindAndStream(
703 [&](ProfilerThreadId aThreadId
) -> baseprofiler::SpliceableJSONWriter
* {
705 aProcessStreamingContext
.GetThreadStreamingContext(aThreadId
);
706 return threadData
? &threadData
->mMarkersDataWriter
: nullptr;
708 [&](ProfileChunkedBuffer
& aChunkedBuffer
) {
709 ProfilerBacktrace
backtrace("", &aChunkedBuffer
);
710 MOZ_ASSERT(threadData
,
711 "threadData should have been set before calling here");
712 backtrace
.StreamJSON(threadData
->mMarkersDataWriter
,
713 aProcessStreamingContext
.ProcessStartTime(),
714 *threadData
->mUniqueStacks
);
716 [&](mozilla::base_profiler_markers_detail::Streaming::DeserializerTag
718 MOZ_ASSERT(threadData
,
719 "threadData should have been set before calling here");
721 size_t payloadSize
= aER
.RemainingBytes();
723 ProfileBufferEntryReader::DoubleSpanOfConstBytes spans
=
724 aER
.ReadSpans(payloadSize
);
725 if (MOZ_LIKELY(spans
.IsSingleSpan())) {
726 // Only a single span, we can just refer to it directly
727 // instead of copying it.
728 profiler::ffi::gecko_profiler_serialize_marker_for_tag(
729 aTag
, spans
.mFirstOrOnly
.Elements(), payloadSize
,
730 &threadData
->mMarkersDataWriter
);
732 // Two spans, we need to concatenate them by copying.
733 uint8_t* payloadBuffer
= new uint8_t[payloadSize
];
734 spans
.CopyBytesTo(payloadBuffer
);
735 profiler::ffi::gecko_profiler_serialize_marker_for_tag(
736 aTag
, payloadBuffer
, payloadSize
,
737 &threadData
->mMarkersDataWriter
);
738 delete[] payloadBuffer
;
745 explicit EntryGetter(
746 ProfileChunkedBuffer::Reader
& aReader
,
747 mozilla::FailureLatch
& aFailureLatch
,
748 mozilla::ProgressLogger aProgressLogger
= {},
749 uint64_t aInitialReadPos
= 0,
750 ProcessStreamingContext
* aStreamingContextForMarkers
= nullptr)
751 : mFailureLatch(aFailureLatch
),
752 mStreamingContextForMarkers(aStreamingContextForMarkers
),
754 aReader
.At(ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
756 mBlockItEnd(aReader
.end()),
757 mRangeStart(mBlockIt
.BufferRangeStart().ConvertToProfileBufferIndex()),
759 double(mBlockIt
.BufferRangeEnd().ConvertToProfileBufferIndex() -
761 mProgressLogger(std::move(aProgressLogger
)) {
762 SetLocalProgress(ProgressLogger::NO_LOCATION_UPDATE
);
763 if (!ReadLegacyOrEnd()) {
764 // Find and read the next non-legacy entry.
770 return (!mFailureLatch
.Failed()) && (mBlockIt
!= mBlockItEnd
);
773 const ProfileBufferEntry
& Get() const {
774 MOZ_ASSERT(Has() || mFailureLatch
.Failed(),
775 "Caller should have checked `Has()` before `Get()`");
780 MOZ_ASSERT(Has() || mFailureLatch
.Failed(),
781 "Caller should have checked `Has()` before `Next()`");
783 ReadUntilLegacyOrEnd();
786 // Hand off the current iterator to the caller, which may be used to read
787 // any kind of entries (legacy or modern).
788 ProfileChunkedBuffer::BlockIterator
Iterator() const { return mBlockIt
; }
790 // After `Iterator()` was used, we can restart from *after* its updated
792 void RestartAfter(const ProfileChunkedBuffer::BlockIterator
& it
) {
800 ProfileBufferBlockIndex
CurBlockIndex() const {
801 return mBlockIt
.CurrentBlockIndex();
804 uint64_t CurPos() const {
805 return CurBlockIndex().ConvertToProfileBufferIndex();
808 void SetLocalProgress(const char* aLocation
) {
809 mProgressLogger
.SetLocalProgress(
810 ProportionValue
{double(CurBlockIndex().ConvertToProfileBufferIndex() -
817 // Try to read the entry at the current `mBlockIt` position.
818 // * If we're at the end of the buffer, just return `true`.
819 // * If there is a "legacy" entry (containing a real `ProfileBufferEntry`),
820 // read it into `mEntry`, and return `true` as well.
821 // * Otherwise the entry contains a "modern" type that cannot be read into
822 // `mEntry`, return `false` (so `EntryGetter` can skip to another entry).
823 bool ReadLegacyOrEnd() {
827 // Read the entry "kind", which is always at the start of all entries.
828 ProfileBufferEntryReader er
= *mBlockIt
;
829 auto type
= static_cast<ProfileBufferEntry::Kind
>(
830 er
.ReadObject
<ProfileBufferEntry::KindUnderlyingType
>());
831 MOZ_ASSERT(static_cast<ProfileBufferEntry::KindUnderlyingType
>(type
) <
832 static_cast<ProfileBufferEntry::KindUnderlyingType
>(
833 ProfileBufferEntry::Kind::MODERN_LIMIT
));
834 if (type
>= ProfileBufferEntry::Kind::LEGACY_LIMIT
) {
835 if (type
== ProfileBufferEntry::Kind::Marker
&&
836 mStreamingContextForMarkers
) {
837 StreamMarkerAfterKind(er
, *mStreamingContextForMarkers
);
841 SetLocalProgress("Processed marker");
843 er
.SetRemainingBytes(0);
846 // Here, we have a legacy item, we need to read it from the start.
847 // Because the above `ReadObject` moved the reader, we ned to reset it to
848 // the start of the entry before reading the whole entry.
850 er
.ReadBytes(&mEntry
, er
.RemainingBytes());
854 void ReadUntilLegacyOrEnd() {
856 if (ReadLegacyOrEnd()) {
857 // Either we're at the end, or we could read a legacy entry -> Done.
860 // Otherwise loop around until we hit a legacy entry or the end.
863 SetLocalProgress(ProgressLogger::NO_LOCATION_UPDATE
);
866 mozilla::FailureLatch
& mFailureLatch
;
868 ProcessStreamingContext
* const mStreamingContextForMarkers
;
870 ProfileBufferEntry mEntry
;
871 ProfileChunkedBuffer::BlockIterator mBlockIt
;
872 const ProfileChunkedBuffer::BlockIterator mBlockItEnd
;
874 // Progress logger, and the data needed to compute the current relative
875 // position in the buffer.
876 const mozilla::ProfileBufferIndex mRangeStart
;
877 const double mRangeSize
;
878 mozilla::ProgressLogger mProgressLogger
;
881 // The following grammar shows legal sequences of profile buffer entries.
882 // The sequences beginning with a ThreadId entry are known as "samples".
887 // TimeBeforeCompactStack
889 // UnresponsivenessDurationMs?
891 // /* internally including:
893 // | Label FrameFlags? DynamicStringFragment*
894 // LineNumber? CategoryPair?
899 // | ( /* Reference to a previous identical sample */
901 // TimeBeforeSameSample
906 // | ( /* Counters */
919 // | ( ProfilerOverheadTime /* Sampling start timestamp */
920 // ProfilerOverheadDuration /* Lock acquisition */
921 // ProfilerOverheadDuration /* Expired markers cleaning */
922 // ProfilerOverheadDuration /* Counters */
923 // ProfilerOverheadDuration /* Threads */
927 // The most complicated part is the stack entry sequence that begins with
928 // Label. Here are some examples.
930 // - ProfilingStack frames without a dynamic string:
932 // Label("js::RunScript")
933 // CategoryPair(JS::ProfilingCategoryPair::JS)
935 // Label("XREMain::XRE_main")
937 // CategoryPair(JS::ProfilingCategoryPair::OTHER)
939 // Label("ElementRestyler::ComputeStyleChangeFor")
941 // CategoryPair(JS::ProfilingCategoryPair::CSS)
943 // - ProfilingStack frames with a dynamic string:
945 // Label("nsObserverService::NotifyObservers")
946 // FrameFlags(uint64_t(ProfilingStackFrame::Flags::IS_LABEL_FRAME))
947 // DynamicStringFragment("domwindo")
948 // DynamicStringFragment("wopened")
950 // CategoryPair(JS::ProfilingCategoryPair::OTHER)
953 // FrameFlags(uint64_t(ProfilingStackFrame::Flags::IS_JS_FRAME))
954 // DynamicStringFragment("closeWin")
955 // DynamicStringFragment("dow (chr")
956 // DynamicStringFragment("ome://gl")
957 // DynamicStringFragment("obal/con")
958 // DynamicStringFragment("tent/glo")
959 // DynamicStringFragment("balOverl")
960 // DynamicStringFragment("ay.js:5)")
961 // DynamicStringFragment("") # this string holds the closing '\0'
963 // CategoryPair(JS::ProfilingCategoryPair::JS)
966 // FrameFlags(uint64_t(ProfilingStackFrame::Flags::IS_JS_FRAME))
967 // DynamicStringFragment("bound (s")
968 // DynamicStringFragment("elf-host")
969 // DynamicStringFragment("ed:914)")
971 // CategoryPair(JS::ProfilingCategoryPair::JS)
973 // - A profiling stack frame with an overly long dynamic string:
976 // FrameFlags(uint64_t(ProfilingStackFrame::Flags::IS_LABEL_FRAME))
977 // DynamicStringFragment("(too lon")
978 // DynamicStringFragment("g)")
980 // CategoryPair(JS::ProfilingCategoryPair::NETWORK)
982 // - A wasm JIT frame:
985 // FrameFlags(uint64_t(0))
986 // DynamicStringFragment("wasm-fun")
987 // DynamicStringFragment("ction[87")
988 // DynamicStringFragment("36] (blo")
989 // DynamicStringFragment("b:http:/")
990 // DynamicStringFragment("/webasse")
991 // DynamicStringFragment("mbly.org")
992 // DynamicStringFragment("/3dc5759")
993 // DynamicStringFragment("4-ce58-4")
994 // DynamicStringFragment("626-975b")
995 // DynamicStringFragment("-08ad116")
996 // DynamicStringFragment("30bc1:38")
997 // DynamicStringFragment("29856)")
999 // - A JS frame in a synchronous sample:
1002 // FrameFlags(uint64_t(ProfilingStackFrame::Flags::IS_LABEL_FRAME))
1003 // DynamicStringFragment("u (https")
1004 // DynamicStringFragment("://perf-")
1005 // DynamicStringFragment("html.io/")
1006 // DynamicStringFragment("ac0da204")
1007 // DynamicStringFragment("aaa44d75")
1008 // DynamicStringFragment("a800.bun")
1009 // DynamicStringFragment("dle.js:2")
1010 // DynamicStringFragment("5)")
1012 // Because this is a format entirely internal to the Profiler, any parsing
1013 // error indicates a bug in the ProfileBuffer writing or the parser itself,
1014 // or possibly flaky hardware.
1015 #define ERROR_AND_CONTINUE(msg) \
1017 fprintf(stderr, "ProfileBuffer parse error: %s", msg); \
1018 MOZ_ASSERT(false, msg); \
1022 struct StreamingParametersForThread
{
1023 SpliceableJSONWriter
& mWriter
;
1024 UniqueStacks
& mUniqueStacks
;
1025 ThreadStreamingContext::PreviousStackState
& mPreviousStackState
;
1026 uint32_t& mPreviousStack
;
1028 StreamingParametersForThread(
1029 SpliceableJSONWriter
& aWriter
, UniqueStacks
& aUniqueStacks
,
1030 ThreadStreamingContext::PreviousStackState
& aPreviousStackState
,
1031 uint32_t& aPreviousStack
)
1033 mUniqueStacks(aUniqueStacks
),
1034 mPreviousStackState(aPreviousStackState
),
1035 mPreviousStack(aPreviousStack
) {}
1038 #ifdef MOZ_EXECUTION_TRACING
1040 template <typename GetStreamingParametersForThreadCallback
>
1041 void ProfileBuffer::MaybeStreamExecutionTraceToJSON(
1042 GetStreamingParametersForThreadCallback
&&
1043 aGetStreamingParametersForThreadCallback
,
1044 double aSinceTime
) const {
1045 JS::ExecutionTrace trace
;
1046 if (!JS_TracerSnapshotTrace(trace
)) {
1050 for (const JS::ExecutionTrace::TracedJSContext
& context
: trace
.contexts
) {
1051 Maybe
<StreamingParametersForThread
> streamingParameters
=
1052 std::forward
<GetStreamingParametersForThreadCallback
>(
1053 aGetStreamingParametersForThreadCallback
)(context
.id
);
1055 // Ignore samples that are for the wrong thread.
1056 if (!streamingParameters
) {
1060 SpliceableJSONWriter
& writer
= streamingParameters
->mWriter
;
1061 UniqueStacks
& uniqueStacks
= streamingParameters
->mUniqueStacks
;
1063 mozilla::Vector
<UniqueStacks::StackKey
> frameStack
;
1065 Maybe
<UniqueStacks::StackKey
> maybeStack
=
1066 uniqueStacks
.BeginStack(UniqueStacks::FrameKey("(root)"));
1068 writer
.SetFailure("BeginStack failure");
1072 UniqueStacks::StackKey stack
= *maybeStack
;
1073 if (!frameStack
.append(stack
)) {
1074 writer
.SetFailure("frameStack append failure");
1078 for (const JS::ExecutionTrace::TracedEvent
& event
: context
.events
) {
1079 if (event
.time
< aSinceTime
) {
1083 if (event
.kind
== JS::ExecutionTrace::EventKind::Error
) {
1084 writer
.SetFailure("Error during tracing (likely OOM)");
1088 if (event
.kind
== JS::ExecutionTrace::EventKind::FunctionEnter
) {
1089 HashMap
<uint32_t, size_t>::Ptr functionName
=
1090 context
.atoms
.lookup(event
.functionEvent
.functionNameId
);
1091 // This is uncommon, but if one of our ring buffers wraps around, we
1092 // can end up with missing function name entries
1093 const char* functionNameStr
= "<expired>";
1095 functionNameStr
= &trace
.stringBuffer
[functionName
->value()];
1097 HashMap
<uint32_t, size_t>::Ptr scriptUrl
=
1098 context
.scriptUrls
.lookup(event
.functionEvent
.scriptId
);
1099 // See the comment above functionNameStr
1100 const char* scriptUrlStr
= "<expired>";
1102 scriptUrlStr
= &trace
.stringBuffer
[scriptUrl
->value()];
1104 nsAutoCStringN
<1024> name(functionNameStr
);
1105 name
.AppendPrintf(" (%s:%u:%u)", scriptUrlStr
,
1106 event
.functionEvent
.lineNumber
,
1107 event
.functionEvent
.column
);
1108 JS::ProfilingCategoryPair categoryPair
;
1109 switch (event
.functionEvent
.implementation
) {
1110 case JS::ExecutionTrace::ImplementationType::Interpreter
:
1111 categoryPair
= JS::ProfilingCategoryPair::JS
;
1113 case JS::ExecutionTrace::ImplementationType::Baseline
:
1114 categoryPair
= JS::ProfilingCategoryPair::JS_Baseline
;
1116 case JS::ExecutionTrace::ImplementationType::Ion
:
1117 categoryPair
= JS::ProfilingCategoryPair::JS_IonMonkey
;
1119 case JS::ExecutionTrace::ImplementationType::Wasm
:
1120 categoryPair
= JS::ProfilingCategoryPair::JS_WasmOther
;
1124 UniqueStacks::FrameKey
newFrame(nsCString(name
.get()), true, false,
1125 event
.functionEvent
.realmID
, Nothing
{},
1126 Nothing
{}, Some(categoryPair
));
1127 maybeStack
= uniqueStacks
.AppendFrame(stack
, newFrame
);
1129 writer
.SetFailure("AppendFrame failure");
1132 stack
= *maybeStack
;
1133 if (!frameStack
.append(stack
)) {
1134 writer
.SetFailure("frameStack append failure");
1138 } else if (event
.kind
== JS::ExecutionTrace::EventKind::LabelEnter
) {
1139 UniqueStacks::FrameKey
newFrame(
1140 nsCString(&trace
.stringBuffer
[event
.labelEvent
.label
]), true, false,
1141 0, Nothing
{}, Nothing
{}, Some(JS::ProfilingCategoryPair::DOM
));
1142 maybeStack
= uniqueStacks
.AppendFrame(stack
, newFrame
);
1144 writer
.SetFailure("AppendFrame failure");
1147 stack
= *maybeStack
;
1148 if (!frameStack
.append(stack
)) {
1149 writer
.SetFailure("frameStack append failure");
1154 MOZ_ASSERT(event
.kind
== JS::ExecutionTrace::EventKind::LabelLeave
||
1155 event
.kind
== JS::ExecutionTrace::EventKind::FunctionLeave
);
1156 if (frameStack
.length() > 0) {
1157 frameStack
.popBack();
1159 if (frameStack
.length() > 0) {
1160 stack
= frameStack
[frameStack
.length() - 1];
1163 uniqueStacks
.BeginStack(UniqueStacks::FrameKey("(root)"));
1165 writer
.SetFailure("BeginStack failure");
1169 stack
= *maybeStack
;
1170 if (!frameStack
.append(stack
)) {
1171 writer
.SetFailure("frameStack append failure");
1177 const Maybe
<uint32_t> stackIndex
= uniqueStacks
.GetOrAddStackIndex(stack
);
1179 writer
.SetFailure("Can't add unique string for stack");
1183 WriteSample(writer
, ProfileSample
{*stackIndex
, event
.time
, Nothing
{},
1190 // GetStreamingParametersForThreadCallback:
1191 // (ProfilerThreadId) -> Maybe<StreamingParametersForThread>
1192 template <typename GetStreamingParametersForThreadCallback
>
1193 ProfilerThreadId
ProfileBuffer::DoStreamSamplesAndMarkersToJSON(
1194 mozilla::FailureLatch
& aFailureLatch
,
1195 GetStreamingParametersForThreadCallback
&&
1196 aGetStreamingParametersForThreadCallback
,
1197 double aSinceTime
, ProcessStreamingContext
* aStreamingContextForMarkers
,
1198 mozilla::ProgressLogger aProgressLogger
) const {
1199 UniquePtr
<char[]> dynStrBuf
= MakeUnique
<char[]>(kMaxFrameKeyLength
);
1201 return mEntries
.Read([&](ProfileChunkedBuffer::Reader
* aReader
) {
1203 "ProfileChunkedBuffer cannot be out-of-session when sampler is "
1206 ProfilerThreadId processedThreadId
;
1208 EntryGetter
e(*aReader
, aFailureLatch
, std::move(aProgressLogger
),
1209 /* aInitialReadPos */ 0, aStreamingContextForMarkers
);
1212 // This block skips entries until we find the start of the next sample.
1213 // This is useful in three situations.
1215 // - The circular buffer overwrites old entries, so when we start parsing
1216 // we might be in the middle of a sample, and we must skip forward to
1217 // the start of the next sample.
1219 // - We skip samples that don't have an appropriate ThreadId or Time.
1221 // - We skip range Pause, Resume, CollectionStart, Marker, Counter
1222 // and CollectionEnd entries between samples.
1224 if (e
.Get().IsThreadId()) {
1234 // Due to the skip_to_next_sample block above, if we have an entry here it
1235 // must be a ThreadId entry.
1236 MOZ_ASSERT(e
.Get().IsThreadId());
1238 ProfilerThreadId threadId
= e
.Get().GetThreadId();
1241 Maybe
<StreamingParametersForThread
> streamingParameters
=
1242 std::forward
<GetStreamingParametersForThreadCallback
>(
1243 aGetStreamingParametersForThreadCallback
)(threadId
);
1245 // Ignore samples that are for the wrong thread.
1246 if (!streamingParameters
) {
1250 SpliceableJSONWriter
& writer
= streamingParameters
->mWriter
;
1251 UniqueStacks
& uniqueStacks
= streamingParameters
->mUniqueStacks
;
1252 ThreadStreamingContext::PreviousStackState
& previousStackState
=
1253 streamingParameters
->mPreviousStackState
;
1254 uint32_t& previousStack
= streamingParameters
->mPreviousStack
;
1256 auto ReadStack
= [&](EntryGetter
& e
, double time
, uint64_t entryPosition
,
1257 const Maybe
<double>& unresponsiveDuration
,
1258 const RunningTimes
& runningTimes
) {
1259 if (writer
.Failed()) {
1263 Maybe
<UniqueStacks::StackKey
> maybeStack
=
1264 uniqueStacks
.BeginStack(UniqueStacks::FrameKey("(root)"));
1266 writer
.SetFailure("BeginStack failure");
1270 UniqueStacks::StackKey stack
= *maybeStack
;
1274 if (e
.Get().IsNativeLeafAddr()) {
1277 void* pc
= e
.Get().GetPtr();
1280 nsAutoCString functionNameOrAddress
=
1281 uniqueStacks
.FunctionNameOrAddress(pc
);
1283 maybeStack
= uniqueStacks
.AppendFrame(
1284 stack
, UniqueStacks::FrameKey(functionNameOrAddress
.get()));
1286 writer
.SetFailure("AppendFrame failure");
1289 stack
= *maybeStack
;
1291 } else if (e
.Get().IsLabel()) {
1294 const char* label
= e
.Get().GetString();
1297 using FrameFlags
= js::ProfilingStackFrame::Flags
;
1298 uint32_t frameFlags
= 0;
1299 if (e
.Has() && e
.Get().IsFrameFlags()) {
1300 frameFlags
= uint32_t(e
.Get().GetUint64());
1304 bool relevantForJS
=
1305 frameFlags
& uint32_t(FrameFlags::RELEVANT_FOR_JS
);
1307 bool isBaselineInterp
=
1308 frameFlags
& uint32_t(FrameFlags::IS_BLINTERP_FRAME
);
1310 // Copy potential dynamic string fragments into dynStrBuf, so that
1311 // dynStrBuf will then contain the entire dynamic string.
1313 dynStrBuf
[0] = '\0';
1315 if (e
.Get().IsDynamicStringFragment()) {
1316 char chars
[ProfileBufferEntry::kNumChars
];
1317 e
.Get().CopyCharsInto(chars
);
1318 for (char c
: chars
) {
1319 if (i
< kMaxFrameKeyLength
) {
1329 dynStrBuf
[kMaxFrameKeyLength
- 1] = '\0';
1330 bool hasDynamicString
= (i
!= 0);
1332 nsAutoCStringN
<1024> frameLabel
;
1333 if (label
[0] != '\0' && hasDynamicString
) {
1334 if (frameFlags
& uint32_t(FrameFlags::STRING_TEMPLATE_METHOD
)) {
1335 frameLabel
.AppendPrintf("%s.%s", label
, dynStrBuf
.get());
1336 } else if (frameFlags
&
1337 uint32_t(FrameFlags::STRING_TEMPLATE_GETTER
)) {
1338 frameLabel
.AppendPrintf("get %s.%s", label
, dynStrBuf
.get());
1339 } else if (frameFlags
&
1340 uint32_t(FrameFlags::STRING_TEMPLATE_SETTER
)) {
1341 frameLabel
.AppendPrintf("set %s.%s", label
, dynStrBuf
.get());
1343 frameLabel
.AppendPrintf("%s %s", label
, dynStrBuf
.get());
1345 } else if (hasDynamicString
) {
1346 frameLabel
.Append(dynStrBuf
.get());
1348 frameLabel
.Append(label
);
1351 uint64_t innerWindowID
= 0;
1352 if (e
.Has() && e
.Get().IsInnerWindowID()) {
1353 innerWindowID
= uint64_t(e
.Get().GetUint64());
1357 Maybe
<unsigned> line
;
1358 if (e
.Has() && e
.Get().IsLineNumber()) {
1359 line
= Some(unsigned(e
.Get().GetInt()));
1363 Maybe
<unsigned> column
;
1364 if (e
.Has() && e
.Get().IsColumnNumber()) {
1365 column
= Some(unsigned(e
.Get().GetInt()));
1369 Maybe
<JS::ProfilingCategoryPair
> categoryPair
;
1370 if (e
.Has() && e
.Get().IsCategoryPair()) {
1372 Some(JS::ProfilingCategoryPair(uint32_t(e
.Get().GetInt())));
1376 maybeStack
= uniqueStacks
.AppendFrame(
1378 UniqueStacks::FrameKey(std::move(frameLabel
), relevantForJS
,
1379 isBaselineInterp
, innerWindowID
, line
,
1380 column
, categoryPair
));
1382 writer
.SetFailure("AppendFrame failure");
1385 stack
= *maybeStack
;
1387 } else if (e
.Get().IsJitReturnAddr()) {
1390 // A JIT frame may expand to multiple frames due to inlining.
1391 void* pc
= e
.Get().GetPtr();
1392 const Maybe
<Vector
<UniqueStacks::FrameKey
>>& frameKeys
=
1393 uniqueStacks
.LookupFramesForJITAddressFromBufferPos(
1394 pc
, entryPosition
? entryPosition
: e
.CurPos());
1397 "Attempting to stream samples for a buffer range "
1398 "for which we don't have JITFrameInfo?");
1399 for (const UniqueStacks::FrameKey
& frameKey
: *frameKeys
) {
1400 maybeStack
= uniqueStacks
.AppendFrame(stack
, frameKey
);
1402 writer
.SetFailure("AppendFrame failure");
1405 stack
= *maybeStack
;
1415 // Even if this stack is considered empty, it contains the root frame,
1416 // which needs to be in the JSON output because following "same samples"
1417 // may refer to it when reusing this sample.mStack.
1418 const Maybe
<uint32_t> stackIndex
=
1419 uniqueStacks
.GetOrAddStackIndex(stack
);
1421 writer
.SetFailure("Can't add unique string for stack");
1425 // And store that possibly-empty stack in case it's followed by "same
1427 previousStack
= *stackIndex
;
1428 previousStackState
= (numFrames
== 0)
1429 ? ThreadStreamingContext::eStackWasEmpty
1430 : ThreadStreamingContext::eStackWasNotEmpty
;
1432 // Even if too old or empty, we did process a sample for this thread id.
1433 processedThreadId
= threadId
;
1435 // Discard samples that are too old.
1436 if (time
< aSinceTime
) {
1440 if (numFrames
== 0 && runningTimes
.IsEmpty()) {
1441 // It is possible to have empty stacks if native stackwalking is
1442 // disabled. Skip samples with empty stacks, unless we have useful
1447 WriteSample(writer
, ProfileSample
{*stackIndex
, time
,
1448 unresponsiveDuration
, runningTimes
});
1449 }; // End of `ReadStack(EntryGetter&)` lambda.
1451 if (e
.Has() && e
.Get().IsTime()) {
1452 double time
= e
.Get().GetDouble();
1454 // Note: Even if this sample is too old (before aSinceTime), we still
1455 // need to read it, so that its frames are in the tables, in case there
1456 // is a same-sample following it that would be after aSinceTime, which
1457 // would need these frames to be present.
1459 ReadStack(e
, time
, 0, Nothing
{}, RunningTimes
{});
1461 e
.SetLocalProgress("Processed sample");
1462 } else if (e
.Has() && e
.Get().IsTimeBeforeCompactStack()) {
1463 double time
= e
.Get().GetDouble();
1464 // Note: Even if this sample is too old (before aSinceTime), we still
1465 // need to read it, so that its frames are in the tables, in case there
1466 // is a same-sample following it that would be after aSinceTime, which
1467 // would need these frames to be present.
1469 RunningTimes runningTimes
;
1470 Maybe
<double> unresponsiveDuration
;
1472 ProfileChunkedBuffer::BlockIterator it
= e
.Iterator();
1478 ProfileBufferEntryReader er
= *it
;
1479 ProfileBufferEntry::Kind kind
=
1480 er
.ReadObject
<ProfileBufferEntry::Kind
>();
1482 // There may be running times before the CompactStack.
1483 if (kind
== ProfileBufferEntry::Kind::RunningTimes
) {
1484 er
.ReadIntoObject(runningTimes
);
1488 // There may be an UnresponsiveDurationMs before the CompactStack.
1489 if (kind
== ProfileBufferEntry::Kind::UnresponsiveDurationMs
) {
1490 unresponsiveDuration
= Some(er
.ReadObject
<double>());
1494 if (kind
== ProfileBufferEntry::Kind::CompactStack
) {
1495 ProfileChunkedBuffer
tempBuffer(
1496 ProfileChunkedBuffer::ThreadSafety::WithoutMutex
,
1497 WorkerChunkManager());
1498 er
.ReadIntoObject(tempBuffer
);
1499 tempBuffer
.Read([&](ProfileChunkedBuffer::Reader
* aReader
) {
1501 "Local ProfileChunkedBuffer cannot be out-of-session");
1502 // This is a compact stack, it should only contain one sample.
1503 EntryGetter
stackEntryGetter(*aReader
, aFailureLatch
);
1504 ReadStack(stackEntryGetter
, time
,
1505 it
.CurrentBlockIndex().ConvertToProfileBufferIndex(),
1506 unresponsiveDuration
, runningTimes
);
1508 WorkerChunkManager().Reset(tempBuffer
.GetAllChunks());
1512 if (kind
== ProfileBufferEntry::Kind::Marker
&&
1513 aStreamingContextForMarkers
) {
1514 StreamMarkerAfterKind(er
, *aStreamingContextForMarkers
);
1518 MOZ_ASSERT(kind
>= ProfileBufferEntry::Kind::LEGACY_LIMIT
,
1519 "There should be no legacy entries between "
1520 "TimeBeforeCompactStack and CompactStack");
1521 er
.SetRemainingBytes(0);
1526 e
.SetLocalProgress("Processed compact sample");
1527 } else if (e
.Has() && e
.Get().IsTimeBeforeSameSample()) {
1528 if (previousStackState
== ThreadStreamingContext::eNoStackYet
) {
1529 // We don't have any full sample yet, we cannot duplicate a "previous"
1530 // one. This should only happen at most once per thread, for the very
1535 ProfileSample sample
;
1537 // Keep the same `mStack` as previously output.
1538 // Note that it may be empty, this is checked below before writing it.
1539 sample
.mStack
= previousStack
;
1541 sample
.mTime
= e
.Get().GetDouble();
1543 // Ignore samples that are too old.
1544 if (sample
.mTime
< aSinceTime
) {
1549 sample
.mResponsiveness
= Nothing
{};
1551 sample
.mRunningTimes
.Clear();
1553 ProfileChunkedBuffer::BlockIterator it
= e
.Iterator();
1559 ProfileBufferEntryReader er
= *it
;
1560 ProfileBufferEntry::Kind kind
=
1561 er
.ReadObject
<ProfileBufferEntry::Kind
>();
1563 // There may be running times before the SameSample.
1564 if (kind
== ProfileBufferEntry::Kind::RunningTimes
) {
1565 er
.ReadIntoObject(sample
.mRunningTimes
);
1569 if (kind
== ProfileBufferEntry::Kind::SameSample
) {
1570 if (previousStackState
== ThreadStreamingContext::eStackWasEmpty
&&
1571 sample
.mRunningTimes
.IsEmpty()) {
1572 // Skip samples with empty stacks, unless we have useful running
1576 WriteSample(writer
, sample
);
1580 if (kind
== ProfileBufferEntry::Kind::Marker
&&
1581 aStreamingContextForMarkers
) {
1582 StreamMarkerAfterKind(er
, *aStreamingContextForMarkers
);
1586 MOZ_ASSERT(kind
>= ProfileBufferEntry::Kind::LEGACY_LIMIT
,
1587 "There should be no legacy entries between "
1588 "TimeBeforeSameSample and SameSample");
1589 er
.SetRemainingBytes(0);
1594 e
.SetLocalProgress("Processed repeated sample");
1596 ERROR_AND_CONTINUE("expected a Time entry");
1600 return processedThreadId
;
1604 ProfilerThreadId
ProfileBuffer::StreamSamplesToJSON(
1605 SpliceableJSONWriter
& aWriter
, ProfilerThreadId aThreadId
,
1606 double aSinceTime
, UniqueStacks
& aUniqueStacks
,
1607 mozilla::ProgressLogger aProgressLogger
) const {
1608 ThreadStreamingContext::PreviousStackState previousStackState
=
1609 ThreadStreamingContext::eNoStackYet
;
1610 uint32_t stack
= 0u;
1612 int processedCount
= 0;
1615 return DoStreamSamplesAndMarkersToJSON(
1616 aWriter
.SourceFailureLatch(),
1617 [&](ProfilerThreadId aReadThreadId
) {
1618 Maybe
<StreamingParametersForThread
> streamingParameters
;
1622 aThreadId
.IsSpecified() ||
1623 (processedCount
== 1 && aReadThreadId
.IsSpecified()),
1624 "Unspecified aThreadId should only be used with 1-sample buffer");
1626 if (!aThreadId
.IsSpecified() || aThreadId
== aReadThreadId
) {
1627 streamingParameters
.emplace(aWriter
, aUniqueStacks
,
1628 previousStackState
, stack
);
1630 return streamingParameters
;
1632 aSinceTime
, /* aStreamingContextForMarkers */ nullptr,
1633 std::move(aProgressLogger
));
1636 void ProfileBuffer::StreamSamplesAndMarkersToJSON(
1637 ProcessStreamingContext
& aProcessStreamingContext
,
1638 mozilla::ProgressLogger aProgressLogger
) const {
1639 auto getStreamingParamsCallback
= [&](ProfilerThreadId aReadThreadId
) {
1640 Maybe
<StreamingParametersForThread
> streamingParameters
;
1641 ThreadStreamingContext
* threadData
=
1642 aProcessStreamingContext
.GetThreadStreamingContext(aReadThreadId
);
1644 streamingParameters
.emplace(
1645 threadData
->mSamplesDataWriter
, *threadData
->mUniqueStacks
,
1646 threadData
->mPreviousStackState
, threadData
->mPreviousStack
);
1648 return streamingParameters
;
1651 #ifdef MOZ_EXECUTION_TRACING
1652 MaybeStreamExecutionTraceToJSON(getStreamingParamsCallback
,
1653 aProcessStreamingContext
.GetSinceTime());
1656 (void)DoStreamSamplesAndMarkersToJSON(
1657 aProcessStreamingContext
.SourceFailureLatch(), getStreamingParamsCallback
,
1658 aProcessStreamingContext
.GetSinceTime(), &aProcessStreamingContext
,
1659 std::move(aProgressLogger
));
1662 void ProfileBuffer::AddJITInfoForRange(
1663 uint64_t aRangeStart
, ProfilerThreadId aThreadId
, JSContext
* aContext
,
1664 JITFrameInfo
& aJITFrameInfo
,
1665 mozilla::ProgressLogger aProgressLogger
) const {
1666 // We can only process JitReturnAddr entries if we have a JSContext.
1667 MOZ_RELEASE_ASSERT(aContext
);
1669 aRangeStart
= std::max(aRangeStart
, BufferRangeStart());
1670 aJITFrameInfo
.AddInfoForRange(
1671 aRangeStart
, BufferRangeEnd(), aContext
,
1672 [&](const std::function
<void(void*)>& aJITAddressConsumer
) {
1673 // Find all JitReturnAddr entries in the given range for the given
1674 // thread, and call aJITAddressConsumer with those addresses.
1676 mEntries
.Read([&](ProfileChunkedBuffer::Reader
* aReader
) {
1678 "ProfileChunkedBuffer cannot be out-of-session when "
1679 "sampler is running");
1681 EntryGetter
e(*aReader
, aJITFrameInfo
.LocalFailureLatchSource(),
1682 std::move(aProgressLogger
), aRangeStart
);
1685 // Advance to the next ThreadId entry.
1686 while (e
.Has() && !e
.Get().IsThreadId()) {
1693 MOZ_ASSERT(e
.Get().IsThreadId());
1694 ProfilerThreadId threadId
= e
.Get().GetThreadId();
1697 // Ignore samples that are for a different thread.
1698 if (threadId
!= aThreadId
) {
1702 if (e
.Has() && e
.Get().IsTime()) {
1705 while (e
.Has() && !e
.Get().IsThreadId()) {
1706 if (e
.Get().IsJitReturnAddr()) {
1707 aJITAddressConsumer(e
.Get().GetPtr());
1711 } else if (e
.Has() && e
.Get().IsTimeBeforeCompactStack()) {
1713 ProfileChunkedBuffer::BlockIterator it
= e
.Iterator();
1719 ProfileBufferEntryReader er
= *it
;
1720 ProfileBufferEntry::Kind kind
=
1721 er
.ReadObject
<ProfileBufferEntry::Kind
>();
1722 if (kind
== ProfileBufferEntry::Kind::CompactStack
) {
1723 ProfileChunkedBuffer
tempBuffer(
1724 ProfileChunkedBuffer::ThreadSafety::WithoutMutex
,
1725 WorkerChunkManager());
1726 er
.ReadIntoObject(tempBuffer
);
1727 tempBuffer
.Read([&](ProfileChunkedBuffer::Reader
* aReader
) {
1730 "Local ProfileChunkedBuffer cannot be out-of-session");
1731 EntryGetter
stackEntryGetter(
1732 *aReader
, aJITFrameInfo
.LocalFailureLatchSource());
1733 while (stackEntryGetter
.Has()) {
1734 if (stackEntryGetter
.Get().IsJitReturnAddr()) {
1735 aJITAddressConsumer(stackEntryGetter
.Get().GetPtr());
1737 stackEntryGetter
.Next();
1740 WorkerChunkManager().Reset(tempBuffer
.GetAllChunks());
1744 MOZ_ASSERT(kind
>= ProfileBufferEntry::Kind::LEGACY_LIMIT
,
1745 "There should be no legacy entries between "
1746 "TimeBeforeCompactStack and CompactStack");
1747 er
.SetRemainingBytes(0);
1751 } else if (e
.Has() && e
.Get().IsTimeBeforeSameSample()) {
1752 // Sample index, nothing to do.
1755 ERROR_AND_CONTINUE("expected a Time entry");
1762 void ProfileBuffer::StreamMarkersToJSON(
1763 SpliceableJSONWriter
& aWriter
, ProfilerThreadId aThreadId
,
1764 const TimeStamp
& aProcessStartTime
, double aSinceTime
,
1765 UniqueStacks
& aUniqueStacks
,
1766 mozilla::ProgressLogger aProgressLogger
) const {
1767 mEntries
.ReadEach([&](ProfileBufferEntryReader
& aER
) {
1768 auto type
= static_cast<ProfileBufferEntry::Kind
>(
1769 aER
.ReadObject
<ProfileBufferEntry::KindUnderlyingType
>());
1770 MOZ_ASSERT(static_cast<ProfileBufferEntry::KindUnderlyingType
>(type
) <
1771 static_cast<ProfileBufferEntry::KindUnderlyingType
>(
1772 ProfileBufferEntry::Kind::MODERN_LIMIT
));
1773 if (type
== ProfileBufferEntry::Kind::Marker
) {
1774 mozilla::base_profiler_markers_detail::DeserializeAfterKindAndStream(
1776 [&](const ProfilerThreadId
& aMarkerThreadId
) {
1777 return (!aThreadId
.IsSpecified() || aMarkerThreadId
== aThreadId
)
1781 [&](ProfileChunkedBuffer
& aChunkedBuffer
) {
1782 ProfilerBacktrace
backtrace("", &aChunkedBuffer
);
1783 backtrace
.StreamJSON(aWriter
, aProcessStartTime
, aUniqueStacks
);
1785 [&](mozilla::base_profiler_markers_detail::Streaming::DeserializerTag
1787 size_t payloadSize
= aER
.RemainingBytes();
1789 ProfileBufferEntryReader::DoubleSpanOfConstBytes spans
=
1790 aER
.ReadSpans(payloadSize
);
1791 if (MOZ_LIKELY(spans
.IsSingleSpan())) {
1792 // Only a single span, we can just refer to it directly
1793 // instead of copying it.
1794 profiler::ffi::gecko_profiler_serialize_marker_for_tag(
1795 aTag
, spans
.mFirstOrOnly
.Elements(), payloadSize
, &aWriter
);
1797 // Two spans, we need to concatenate them by copying.
1798 uint8_t* payloadBuffer
= new uint8_t[payloadSize
];
1799 spans
.CopyBytesTo(payloadBuffer
);
1800 profiler::ffi::gecko_profiler_serialize_marker_for_tag(
1801 aTag
, payloadBuffer
, payloadSize
, &aWriter
);
1802 delete[] payloadBuffer
;
1806 // The entry was not a marker, we need to skip to the end.
1807 aER
.SetRemainingBytes(0);
1812 void ProfileBuffer::StreamProfilerOverheadToJSON(
1813 SpliceableJSONWriter
& aWriter
, const TimeStamp
& aProcessStartTime
,
1814 double aSinceTime
, mozilla::ProgressLogger aProgressLogger
) const {
1815 const char* recordOverheads
= getenv("MOZ_PROFILER_RECORD_OVERHEADS");
1816 if (!recordOverheads
|| recordOverheads
[0] == '\0') {
1817 // Overheads were not recorded, return early.
1821 mEntries
.Read([&](ProfileChunkedBuffer::Reader
* aReader
) {
1823 "ProfileChunkedBuffer cannot be out-of-session when sampler is "
1826 EntryGetter
e(*aReader
, aWriter
.SourceFailureLatch(),
1827 std::move(aProgressLogger
));
1829 enum Schema
: uint32_t {
1832 MARKER_CLEANING
= 2,
1837 aWriter
.StartObjectProperty("profilerOverhead");
1838 aWriter
.StartObjectProperty("samples");
1839 // Stream all sampling overhead data. We skip other entries, because we
1840 // process them in StreamSamplesToJSON()/etc.
1842 JSONSchemaWriter
schema(aWriter
);
1843 schema
.WriteField("time");
1844 schema
.WriteField("locking");
1845 schema
.WriteField("expiredMarkerCleaning");
1846 schema
.WriteField("counters");
1847 schema
.WriteField("threads");
1850 aWriter
.StartArrayProperty("data");
1851 double firstTime
= 0.0;
1852 double lastTime
= 0.0;
1853 ProfilerStats intervals
, overheads
, lockings
, cleanings
, counters
, threads
;
1855 // valid sequence: ProfilerOverheadTime, ProfilerOverheadDuration * 4
1856 if (e
.Get().IsProfilerOverheadTime()) {
1857 double time
= e
.Get().GetDouble();
1858 if (time
>= aSinceTime
) {
1860 if (!e
.Has() || !e
.Get().IsProfilerOverheadDuration()) {
1862 "expected a ProfilerOverheadDuration entry after "
1863 "ProfilerOverheadTime");
1865 double locking
= e
.Get().GetDouble();
1867 if (!e
.Has() || !e
.Get().IsProfilerOverheadDuration()) {
1869 "expected a ProfilerOverheadDuration entry after "
1870 "ProfilerOverheadTime,ProfilerOverheadDuration");
1872 double cleaning
= e
.Get().GetDouble();
1874 if (!e
.Has() || !e
.Get().IsProfilerOverheadDuration()) {
1876 "expected a ProfilerOverheadDuration entry after "
1877 "ProfilerOverheadTime,ProfilerOverheadDuration*2");
1879 double counter
= e
.Get().GetDouble();
1881 if (!e
.Has() || !e
.Get().IsProfilerOverheadDuration()) {
1883 "expected a ProfilerOverheadDuration entry after "
1884 "ProfilerOverheadTime,ProfilerOverheadDuration*3");
1886 double thread
= e
.Get().GetDouble();
1888 if (firstTime
== 0.0) {
1891 // Note that we'll have 1 fewer interval than other numbers (because
1892 // we need both ends of an interval to know its duration). The final
1893 // difference should be insignificant over the expected many
1894 // thousands of iterations.
1895 intervals
.Count(time
- lastTime
);
1898 overheads
.Count(locking
+ cleaning
+ counter
+ thread
);
1899 lockings
.Count(locking
);
1900 cleanings
.Count(cleaning
);
1901 counters
.Count(counter
);
1902 threads
.Count(thread
);
1904 AutoArraySchemaWriter
writer(aWriter
);
1905 writer
.TimeMsElement(TIME
, time
);
1906 writer
.DoubleElement(LOCKING
, locking
);
1907 writer
.DoubleElement(MARKER_CLEANING
, cleaning
);
1908 writer
.DoubleElement(COUNTERS
, counter
);
1909 writer
.DoubleElement(THREADS
, thread
);
1914 aWriter
.EndArray(); // data
1915 aWriter
.EndObject(); // samples
1917 // Only output statistics if there is at least one full interval (and
1918 // therefore at least two samplings.)
1919 if (intervals
.n
> 0) {
1920 aWriter
.StartObjectProperty("statistics");
1921 aWriter
.DoubleProperty("profiledDuration", lastTime
- firstTime
);
1922 aWriter
.IntProperty("samplingCount", overheads
.n
);
1923 aWriter
.DoubleProperty("overheadDurations", overheads
.sum
);
1924 aWriter
.DoubleProperty("overheadPercentage",
1925 overheads
.sum
/ (lastTime
- firstTime
));
1926 #define PROFILER_STATS(name, var) \
1927 aWriter.DoubleProperty("mean" name, (var).sum / (var).n); \
1928 aWriter.DoubleProperty("min" name, (var).min); \
1929 aWriter.DoubleProperty("max" name, (var).max);
1930 PROFILER_STATS("Interval", intervals
);
1931 PROFILER_STATS("Overhead", overheads
);
1932 PROFILER_STATS("Lockings", lockings
);
1933 PROFILER_STATS("Cleaning", cleanings
);
1934 PROFILER_STATS("Counter", counters
);
1935 PROFILER_STATS("Thread", threads
);
1936 #undef PROFILER_STATS
1937 aWriter
.EndObject(); // statistics
1939 aWriter
.EndObject(); // profilerOverhead
1943 struct CounterSample
{
1949 using CounterSamples
= Vector
<CounterSample
>;
1951 static LazyLogModule
sFuzzyfoxLog("Fuzzyfox");
1953 // HashMap lookup, if not found, a default value is inserted.
1954 // Returns reference to (existing or new) value inside the HashMap.
1955 template <typename HashM
, typename Key
>
1956 static auto& LookupOrAdd(HashM
& aMap
, Key
&& aKey
) {
1957 auto addPtr
= aMap
.lookupForAdd(aKey
);
1959 MOZ_RELEASE_ASSERT(aMap
.add(addPtr
, std::forward
<Key
>(aKey
),
1960 typename
HashM::Entry::ValueType
{}));
1961 MOZ_ASSERT(!!addPtr
);
1963 return addPtr
->value();
1966 void ProfileBuffer::StreamCountersToJSON(
1967 SpliceableJSONWriter
& aWriter
, const TimeStamp
& aProcessStartTime
,
1968 double aSinceTime
, mozilla::ProgressLogger aProgressLogger
) const {
1969 // Because this is a format entirely internal to the Profiler, any parsing
1970 // error indicates a bug in the ProfileBuffer writing or the parser itself,
1971 // or possibly flaky hardware.
1973 mEntries
.Read([&](ProfileChunkedBuffer::Reader
* aReader
) {
1975 "ProfileChunkedBuffer cannot be out-of-session when sampler is "
1978 EntryGetter
e(*aReader
, aWriter
.SourceFailureLatch(),
1979 std::move(aProgressLogger
));
1981 enum Schema
: uint32_t { TIME
= 0, COUNT
= 1, NUMBER
= 2 };
1983 // Stream all counters. We skip other entries, because we process them in
1984 // StreamSamplesToJSON()/etc.
1986 // Valid sequence in the buffer:
1989 // ( Count Number? )*
1991 // And the JSON (example):
1993 // "name": "malloc",
1994 // "category": "Memory",
1995 // "description": "Amount of allocated memory",
1997 // "schema": {"time": 0, "count": 1, "number": 2},
2000 // 16117.033968000002,
2013 // Build the map of counters and populate it
2014 HashMap
<void*, CounterSamples
> counters
;
2017 // skip all non-Counters, including if we start in the middle of a counter
2018 if (e
.Get().IsCounterId()) {
2019 void* id
= e
.Get().GetPtr();
2020 CounterSamples
& data
= LookupOrAdd(counters
, id
);
2022 if (!e
.Has() || !e
.Get().IsTime()) {
2023 ERROR_AND_CONTINUE("expected a Time entry");
2025 double time
= e
.Get().GetDouble();
2027 if (time
>= aSinceTime
) {
2028 if (!e
.Has() || !e
.Get().IsCount()) {
2029 ERROR_AND_CONTINUE("expected a Count entry");
2031 int64_t count
= e
.Get().GetUint64();
2034 if (!e
.Has() || !e
.Get().IsNumber()) {
2037 number
= e
.Get().GetInt64();
2040 CounterSample sample
= {time
, number
, count
};
2041 MOZ_RELEASE_ASSERT(data
.append(sample
));
2043 // skip counter sample - only need to skip the initial counter
2044 // id, then let the loop at the top skip the rest
2050 // we have a map of counter entries; dump them to JSON
2051 if (counters
.count() == 0) {
2055 aWriter
.StartArrayProperty("counters");
2056 for (auto iter
= counters
.iter(); !iter
.done(); iter
.next()) {
2057 CounterSamples
& samples
= iter
.get().value();
2058 size_t size
= samples
.length();
2062 const BaseProfilerCount
* base_counter
=
2063 static_cast<const BaseProfilerCount
*>(iter
.get().key());
2066 aWriter
.StringProperty("name", MakeStringSpan(base_counter
->mLabel
));
2067 aWriter
.StringProperty("category",
2068 MakeStringSpan(base_counter
->mCategory
));
2069 aWriter
.StringProperty("description",
2070 MakeStringSpan(base_counter
->mDescription
));
2072 bool hasNumber
= false;
2073 for (size_t i
= 0; i
< size
; i
++) {
2074 if (samples
[i
].mNumber
!= 0) {
2079 aWriter
.StartObjectProperty("samples");
2081 JSONSchemaWriter
schema(aWriter
);
2082 schema
.WriteField("time");
2083 schema
.WriteField("count");
2085 schema
.WriteField("number");
2089 aWriter
.StartArrayProperty("data");
2090 double previousSkippedTime
= 0.0;
2091 uint64_t previousNumber
= 0;
2092 int64_t previousCount
= 0;
2093 for (size_t i
= 0; i
< size
; i
++) {
2094 // Encode as deltas, and only encode if different than the previous
2095 // or next sample; Always write the first and last samples.
2096 if (i
== 0 || i
== size
- 1 || samples
[i
].mNumber
!= previousNumber
||
2097 samples
[i
].mCount
!= previousCount
||
2098 // Ensure we ouput the first 0 before skipping samples.
2099 (i
>= 2 && (samples
[i
- 2].mNumber
!= previousNumber
||
2100 samples
[i
- 2].mCount
!= previousCount
))) {
2101 if (i
!= 0 && samples
[i
].mTime
>= samples
[i
- 1].mTime
) {
2102 MOZ_LOG(sFuzzyfoxLog
, mozilla::LogLevel::Error
,
2103 ("Fuzzyfox Profiler Assertion: %f >= %f", samples
[i
].mTime
,
2104 samples
[i
- 1].mTime
));
2106 MOZ_ASSERT(i
== 0 || samples
[i
].mTime
>= samples
[i
- 1].mTime
);
2107 MOZ_ASSERT(samples
[i
].mNumber
>= previousNumber
);
2108 MOZ_ASSERT(samples
[i
].mNumber
- previousNumber
<=
2109 uint64_t(std::numeric_limits
<int64_t>::max()));
2111 int64_t numberDelta
=
2112 static_cast<int64_t>(samples
[i
].mNumber
- previousNumber
);
2113 int64_t countDelta
= samples
[i
].mCount
- previousCount
;
2115 if (previousSkippedTime
!= 0.0 &&
2116 (numberDelta
!= 0 || countDelta
!= 0)) {
2117 // Write the last skipped sample, unless the new one is all
2118 // zeroes (that'd be redundant) This is useful to know when a
2119 // certain value was last sampled, so that the front-end graph
2120 // will be more correct.
2121 AutoArraySchemaWriter
writer(aWriter
);
2122 writer
.TimeMsElement(TIME
, previousSkippedTime
);
2123 // The deltas are effectively zeroes, since no change happened
2124 // between the last actually-written sample and the last skipped
2126 writer
.IntElement(COUNT
, 0);
2128 writer
.IntElement(NUMBER
, 0);
2132 AutoArraySchemaWriter
writer(aWriter
);
2133 writer
.TimeMsElement(TIME
, samples
[i
].mTime
);
2134 writer
.IntElement(COUNT
, countDelta
);
2136 writer
.IntElement(NUMBER
, numberDelta
);
2139 previousSkippedTime
= 0.0;
2140 previousNumber
= samples
[i
].mNumber
;
2141 previousCount
= samples
[i
].mCount
;
2143 previousSkippedTime
= samples
[i
].mTime
;
2146 aWriter
.EndArray(); // data
2147 aWriter
.EndObject(); // samples
2148 aWriter
.End(); // for each counter
2150 aWriter
.EndArray(); // counters
2154 #undef ERROR_AND_CONTINUE
2156 static void AddPausedRange(SpliceableJSONWriter
& aWriter
, const char* aReason
,
2157 const Maybe
<double>& aStartTime
,
2158 const Maybe
<double>& aEndTime
) {
2161 aWriter
.TimeDoubleMsProperty("startTime", *aStartTime
);
2163 aWriter
.NullProperty("startTime");
2166 aWriter
.TimeDoubleMsProperty("endTime", *aEndTime
);
2168 aWriter
.NullProperty("endTime");
2170 aWriter
.StringProperty("reason", MakeStringSpan(aReason
));
2174 void ProfileBuffer::StreamPausedRangesToJSON(
2175 SpliceableJSONWriter
& aWriter
, double aSinceTime
,
2176 mozilla::ProgressLogger aProgressLogger
) const {
2177 mEntries
.Read([&](ProfileChunkedBuffer::Reader
* aReader
) {
2179 "ProfileChunkedBuffer cannot be out-of-session when sampler is "
2182 EntryGetter
e(*aReader
, aWriter
.SourceFailureLatch(),
2183 aProgressLogger
.CreateSubLoggerFromTo(
2184 1_pc
, "Streaming pauses...", 99_pc
, "Streamed pauses"));
2186 Maybe
<double> currentPauseStartTime
;
2187 Maybe
<double> currentCollectionStartTime
;
2190 if (e
.Get().IsPause()) {
2191 currentPauseStartTime
= Some(e
.Get().GetDouble());
2192 } else if (e
.Get().IsResume()) {
2193 AddPausedRange(aWriter
, "profiler-paused", currentPauseStartTime
,
2194 Some(e
.Get().GetDouble()));
2195 currentPauseStartTime
= Nothing();
2196 } else if (e
.Get().IsCollectionStart()) {
2197 currentCollectionStartTime
= Some(e
.Get().GetDouble());
2198 } else if (e
.Get().IsCollectionEnd()) {
2199 AddPausedRange(aWriter
, "collecting", currentCollectionStartTime
,
2200 Some(e
.Get().GetDouble()));
2201 currentCollectionStartTime
= Nothing();
2206 if (currentPauseStartTime
) {
2207 AddPausedRange(aWriter
, "profiler-paused", currentPauseStartTime
,
2210 if (currentCollectionStartTime
) {
2211 AddPausedRange(aWriter
, "collecting", currentCollectionStartTime
,
2217 bool ProfileBuffer::DuplicateLastSample(ProfilerThreadId aThreadId
,
2218 double aSampleTimeMs
,
2219 Maybe
<uint64_t>& aLastSample
,
2220 const RunningTimes
& aRunningTimes
) {
2225 if (mEntries
.IsIndexInCurrentChunk(ProfileBufferIndex
{*aLastSample
})) {
2226 // The last (fully-written) sample is in this chunk, we can refer to it.
2228 // Note that between now and when we write the SameSample below, another
2229 // chunk could have been started, so the SameSample will in fact refer to a
2230 // block in a previous chunk. This is okay, because:
2231 // - When serializing to JSON, if that chunk is still there, we'll still be
2232 // able to find that old stack, so nothing will be lost.
2233 // - If unfortunately that chunk has been destroyed, we will lose this
2234 // sample. But this will only happen to the first sample (per thread) in
2235 // in the whole JSON output, because the next time we're here to duplicate
2236 // the same sample again, IsIndexInCurrentChunk will say `false` and we
2237 // will fall back to the normal copy or even re-sample. Losing the first
2238 // sample out of many in a whole recording is acceptable.
2240 // |---| = chunk, S = Sample, D = Duplicate, s = same sample
2241 // |---S-s-s--| |s-D--s--s-| |s-D--s---s|
2242 // Later, the first chunk is destroyed/recycled:
2243 // |s-D--s--s-| |s-D--s---s| |-...
2245 // `-|--|-------|--- Same but no previous -> lost.
2246 // `--|-------|--- Full duplicate sample.
2247 // `-------|--- Same with previous -> okay.
2248 // `--- Same but now we have a previous -> okay!
2250 AUTO_PROFILER_STATS(DuplicateLastSample_SameSample
);
2252 // Add the thread id first. We don't update `aLastSample` because we are not
2253 // writing a full sample.
2254 (void)AddThreadIdEntry(aThreadId
);
2256 // Copy the new time, to be followed by a SameSample.
2257 AddEntry(ProfileBufferEntry::TimeBeforeSameSample(aSampleTimeMs
));
2259 // Add running times if they have data.
2260 if (!aRunningTimes
.IsEmpty()) {
2261 mEntries
.PutObjects(ProfileBufferEntry::Kind::RunningTimes
,
2265 // Finish with a SameSample entry.
2266 mEntries
.PutObjects(ProfileBufferEntry::Kind::SameSample
);
2271 AUTO_PROFILER_STATS(DuplicateLastSample_copy
);
2273 ProfileChunkedBuffer
tempBuffer(
2274 ProfileChunkedBuffer::ThreadSafety::WithoutMutex
, WorkerChunkManager());
2276 auto retrieveWorkerChunk
= MakeScopeExit(
2277 [&]() { WorkerChunkManager().Reset(tempBuffer
.GetAllChunks()); });
2279 const bool ok
= mEntries
.Read([&](ProfileChunkedBuffer::Reader
* aReader
) {
2281 "ProfileChunkedBuffer cannot be out-of-session when sampler is "
2284 // DuplicateLastSample is only called during profiling, so we don't need a
2285 // progress logger (only useful when capturing the final profile).
2286 EntryGetter
e(*aReader
, mozilla::FailureLatchInfallibleSource::Singleton(),
2287 ProgressLogger
{}, *aLastSample
);
2289 if (e
.CurPos() != *aLastSample
) {
2290 // The last sample is no longer within the buffer range, so we cannot
2291 // use it. Reset the stored buffer position to Nothing().
2292 aLastSample
.reset();
2296 MOZ_RELEASE_ASSERT(e
.Has() && e
.Get().IsThreadId() &&
2297 e
.Get().GetThreadId() == aThreadId
);
2301 // Go through the whole entry and duplicate it, until we find the next
2304 switch (e
.Get().GetKind()) {
2305 case ProfileBufferEntry::Kind::Pause
:
2306 case ProfileBufferEntry::Kind::Resume
:
2307 case ProfileBufferEntry::Kind::PauseSampling
:
2308 case ProfileBufferEntry::Kind::ResumeSampling
:
2309 case ProfileBufferEntry::Kind::CollectionStart
:
2310 case ProfileBufferEntry::Kind::CollectionEnd
:
2311 case ProfileBufferEntry::Kind::ThreadId
:
2312 case ProfileBufferEntry::Kind::TimeBeforeSameSample
:
2315 case ProfileBufferEntry::Kind::Time
:
2316 // Copy with new time
2317 AddEntry(tempBuffer
, ProfileBufferEntry::Time(aSampleTimeMs
));
2319 case ProfileBufferEntry::Kind::TimeBeforeCompactStack
: {
2320 // Copy with new time, followed by a compact stack.
2321 AddEntry(tempBuffer
,
2322 ProfileBufferEntry::TimeBeforeCompactStack(aSampleTimeMs
));
2324 // Add running times if they have data.
2325 if (!aRunningTimes
.IsEmpty()) {
2326 tempBuffer
.PutObjects(ProfileBufferEntry::Kind::RunningTimes
,
2330 // The `CompactStack` *must* be present afterwards, but may not
2331 // immediately follow `TimeBeforeCompactStack` (e.g., some markers
2332 // could be written in-between), so we need to look for it in the
2333 // following entries.
2334 ProfileChunkedBuffer::BlockIterator it
= e
.Iterator();
2340 ProfileBufferEntryReader er
= *it
;
2341 auto kind
= static_cast<ProfileBufferEntry::Kind
>(
2342 er
.ReadObject
<ProfileBufferEntry::KindUnderlyingType
>());
2344 static_cast<ProfileBufferEntry::KindUnderlyingType
>(kind
) <
2345 static_cast<ProfileBufferEntry::KindUnderlyingType
>(
2346 ProfileBufferEntry::Kind::MODERN_LIMIT
));
2347 if (kind
== ProfileBufferEntry::Kind::CompactStack
) {
2348 // Found our CompactStack, just make a copy of the whole entry.
2350 auto bytes
= er
.RemainingBytes();
2352 ProfileBufferChunkManager::scExpectedMaximumStackSize
);
2353 tempBuffer
.Put(bytes
, [&](Maybe
<ProfileBufferEntryWriter
>& aEW
) {
2354 MOZ_ASSERT(aEW
.isSome(), "tempBuffer cannot be out-of-session");
2355 aEW
->WriteFromReader(er
, bytes
);
2357 // CompactStack marks the end, we're done.
2361 MOZ_ASSERT(kind
>= ProfileBufferEntry::Kind::LEGACY_LIMIT
,
2362 "There should be no legacy entries between "
2363 "TimeBeforeCompactStack and CompactStack");
2364 er
.SetRemainingBytes(0);
2365 // Here, we have encountered a non-legacy entry that was not the
2366 // CompactStack we're looking for; just continue the search...
2371 case ProfileBufferEntry::Kind::Number
:
2372 case ProfileBufferEntry::Kind::Count
:
2373 // Don't copy anything not part of a thread's stack sample
2375 case ProfileBufferEntry::Kind::CounterId
:
2376 // CounterId is normally followed by Time - if so, we'd like
2377 // to skip it. If we duplicate Time, it won't hurt anything, just
2378 // waste buffer space (and this can happen if the CounterId has
2379 // fallen off the end of the buffer, but Time (and Number/Count)
2380 // are still in the buffer).
2382 if (e
.Has() && e
.Get().GetKind() != ProfileBufferEntry::Kind::Time
) {
2383 // this would only happen if there was an invalid sequence
2384 // in the buffer. Don't skip it.
2387 // we've skipped Time
2389 case ProfileBufferEntry::Kind::ProfilerOverheadTime
:
2390 // ProfilerOverheadTime is normally followed by
2391 // ProfilerOverheadDuration*4 - if so, we'd like to skip it. Don't
2392 // duplicate, as we are in the middle of a sampling and will soon
2393 // capture its own overhead.
2395 // A missing Time would only happen if there was an invalid
2396 // sequence in the buffer. Don't skip unexpected entry.
2398 e
.Get().GetKind() !=
2399 ProfileBufferEntry::Kind::ProfilerOverheadDuration
) {
2404 e
.Get().GetKind() !=
2405 ProfileBufferEntry::Kind::ProfilerOverheadDuration
) {
2410 e
.Get().GetKind() !=
2411 ProfileBufferEntry::Kind::ProfilerOverheadDuration
) {
2416 e
.Get().GetKind() !=
2417 ProfileBufferEntry::Kind::ProfilerOverheadDuration
) {
2420 // we've skipped ProfilerOverheadTime and
2421 // ProfilerOverheadDuration*4.
2424 // Copy anything else we don't know about.
2425 AddEntry(tempBuffer
, e
.Get());
2438 // If the buffer was big enough, there won't be any cleared blocks.
2439 if (tempBuffer
.GetState().mClearedBlockCount
!= 0) {
2440 // No need to try to read stack again as it won't fit. Reset the stored
2441 // buffer position to Nothing().
2442 aLastSample
.reset();
2446 aLastSample
= Some(AddThreadIdEntry(aThreadId
));
2448 mEntries
.AppendContents(tempBuffer
);
2453 void ProfileBuffer::DiscardSamplesBeforeTime(double aTime
) {
2454 // This function does nothing!
2455 // The duration limit will be removed from Firefox, see bug 1632365.
2459 // END ProfileBuffer
2460 ////////////////////////////////////////////////////////////////////////