[MemProf] Templatize CallStackRadixTreeBuilder (NFC) (#117014)
[llvm-project.git] / llvm / lib / ProfileData / InstrProfWriter.cpp
blobd90629ad57f5b92fc8edb01e98ca223d60ff75d4
1 //===- InstrProfWriter.cpp - Instrumented profiling writer ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains support for writing profiling data for clang's
10 // instrumentation based PGO and coverage.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/ProfileData/InstrProfWriter.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/ADT/StringRef.h"
18 #include "llvm/IR/ProfileSummary.h"
19 #include "llvm/ProfileData/InstrProf.h"
20 #include "llvm/ProfileData/MemProf.h"
21 #include "llvm/ProfileData/ProfileCommon.h"
22 #include "llvm/Support/Compression.h"
23 #include "llvm/Support/Endian.h"
24 #include "llvm/Support/EndianStream.h"
25 #include "llvm/Support/Error.h"
26 #include "llvm/Support/FormatVariadic.h"
27 #include "llvm/Support/MemoryBuffer.h"
28 #include "llvm/Support/OnDiskHashTable.h"
29 #include "llvm/Support/raw_ostream.h"
30 #include <cstdint>
31 #include <ctime>
32 #include <memory>
33 #include <string>
34 #include <tuple>
35 #include <utility>
36 #include <vector>
38 using namespace llvm;
40 // A struct to define how the data stream should be patched. For Indexed
41 // profiling, only uint64_t data type is needed.
42 struct PatchItem {
43 uint64_t Pos; // Where to patch.
44 ArrayRef<uint64_t> D; // An array of source data.
47 namespace llvm {
49 // A wrapper class to abstract writer stream with support of bytes
50 // back patching.
51 class ProfOStream {
52 public:
53 ProfOStream(raw_fd_ostream &FD)
54 : IsFDOStream(true), OS(FD), LE(FD, llvm::endianness::little) {}
55 ProfOStream(raw_string_ostream &STR)
56 : IsFDOStream(false), OS(STR), LE(STR, llvm::endianness::little) {}
58 [[nodiscard]] uint64_t tell() const { return OS.tell(); }
59 void write(uint64_t V) { LE.write<uint64_t>(V); }
60 void write32(uint32_t V) { LE.write<uint32_t>(V); }
61 void writeByte(uint8_t V) { LE.write<uint8_t>(V); }
63 // \c patch can only be called when all data is written and flushed.
64 // For raw_string_ostream, the patch is done on the target string
65 // directly and it won't be reflected in the stream's internal buffer.
66 void patch(ArrayRef<PatchItem> P) {
67 using namespace support;
69 if (IsFDOStream) {
70 raw_fd_ostream &FDOStream = static_cast<raw_fd_ostream &>(OS);
71 const uint64_t LastPos = FDOStream.tell();
72 for (const auto &K : P) {
73 FDOStream.seek(K.Pos);
74 for (uint64_t Elem : K.D)
75 write(Elem);
77 // Reset the stream to the last position after patching so that users
78 // don't accidentally overwrite data. This makes it consistent with
79 // the string stream below which replaces the data directly.
80 FDOStream.seek(LastPos);
81 } else {
82 raw_string_ostream &SOStream = static_cast<raw_string_ostream &>(OS);
83 std::string &Data = SOStream.str(); // with flush
84 for (const auto &K : P) {
85 for (int I = 0, E = K.D.size(); I != E; I++) {
86 uint64_t Bytes =
87 endian::byte_swap<uint64_t, llvm::endianness::little>(K.D[I]);
88 Data.replace(K.Pos + I * sizeof(uint64_t), sizeof(uint64_t),
89 (const char *)&Bytes, sizeof(uint64_t));
95 // If \c OS is an instance of \c raw_fd_ostream, this field will be
96 // true. Otherwise, \c OS will be an raw_string_ostream.
97 bool IsFDOStream;
98 raw_ostream &OS;
99 support::endian::Writer LE;
102 class InstrProfRecordWriterTrait {
103 public:
104 using key_type = StringRef;
105 using key_type_ref = StringRef;
107 using data_type = const InstrProfWriter::ProfilingData *const;
108 using data_type_ref = const InstrProfWriter::ProfilingData *const;
110 using hash_value_type = uint64_t;
111 using offset_type = uint64_t;
113 llvm::endianness ValueProfDataEndianness = llvm::endianness::little;
114 InstrProfSummaryBuilder *SummaryBuilder;
115 InstrProfSummaryBuilder *CSSummaryBuilder;
117 InstrProfRecordWriterTrait() = default;
119 static hash_value_type ComputeHash(key_type_ref K) {
120 return IndexedInstrProf::ComputeHash(K);
123 static std::pair<offset_type, offset_type>
124 EmitKeyDataLength(raw_ostream &Out, key_type_ref K, data_type_ref V) {
125 using namespace support;
127 endian::Writer LE(Out, llvm::endianness::little);
129 offset_type N = K.size();
130 LE.write<offset_type>(N);
132 offset_type M = 0;
133 for (const auto &ProfileData : *V) {
134 const InstrProfRecord &ProfRecord = ProfileData.second;
135 M += sizeof(uint64_t); // The function hash
136 M += sizeof(uint64_t); // The size of the Counts vector
137 M += ProfRecord.Counts.size() * sizeof(uint64_t);
138 M += sizeof(uint64_t); // The size of the Bitmap vector
139 M += ProfRecord.BitmapBytes.size() * sizeof(uint64_t);
141 // Value data
142 M += ValueProfData::getSize(ProfileData.second);
144 LE.write<offset_type>(M);
146 return std::make_pair(N, M);
149 void EmitKey(raw_ostream &Out, key_type_ref K, offset_type N) {
150 Out.write(K.data(), N);
153 void EmitData(raw_ostream &Out, key_type_ref, data_type_ref V, offset_type) {
154 using namespace support;
156 endian::Writer LE(Out, llvm::endianness::little);
157 for (const auto &ProfileData : *V) {
158 const InstrProfRecord &ProfRecord = ProfileData.second;
159 if (NamedInstrProfRecord::hasCSFlagInHash(ProfileData.first))
160 CSSummaryBuilder->addRecord(ProfRecord);
161 else
162 SummaryBuilder->addRecord(ProfRecord);
164 LE.write<uint64_t>(ProfileData.first); // Function hash
165 LE.write<uint64_t>(ProfRecord.Counts.size());
166 for (uint64_t I : ProfRecord.Counts)
167 LE.write<uint64_t>(I);
169 LE.write<uint64_t>(ProfRecord.BitmapBytes.size());
170 for (uint64_t I : ProfRecord.BitmapBytes)
171 LE.write<uint64_t>(I);
173 // Write value data
174 std::unique_ptr<ValueProfData> VDataPtr =
175 ValueProfData::serializeFrom(ProfileData.second);
176 uint32_t S = VDataPtr->getSize();
177 VDataPtr->swapBytesFromHost(ValueProfDataEndianness);
178 Out.write((const char *)VDataPtr.get(), S);
183 } // end namespace llvm
185 InstrProfWriter::InstrProfWriter(
186 bool Sparse, uint64_t TemporalProfTraceReservoirSize,
187 uint64_t MaxTemporalProfTraceLength, bool WritePrevVersion,
188 memprof::IndexedVersion MemProfVersionRequested, bool MemProfFullSchema,
189 bool MemprofGenerateRandomHotness,
190 unsigned MemprofGenerateRandomHotnessSeed)
191 : Sparse(Sparse), MaxTemporalProfTraceLength(MaxTemporalProfTraceLength),
192 TemporalProfTraceReservoirSize(TemporalProfTraceReservoirSize),
193 InfoObj(new InstrProfRecordWriterTrait()),
194 WritePrevVersion(WritePrevVersion),
195 MemProfVersionRequested(MemProfVersionRequested),
196 MemProfFullSchema(MemProfFullSchema),
197 MemprofGenerateRandomHotness(MemprofGenerateRandomHotness) {
198 // Set up the random number seed if requested.
199 if (MemprofGenerateRandomHotness) {
200 unsigned seed = MemprofGenerateRandomHotnessSeed
201 ? MemprofGenerateRandomHotnessSeed
202 : std::time(nullptr);
203 errs() << "random hotness seed = " << seed << "\n";
204 std::srand(seed);
208 InstrProfWriter::~InstrProfWriter() { delete InfoObj; }
210 // Internal interface for testing purpose only.
211 void InstrProfWriter::setValueProfDataEndianness(llvm::endianness Endianness) {
212 InfoObj->ValueProfDataEndianness = Endianness;
215 void InstrProfWriter::setOutputSparse(bool Sparse) {
216 this->Sparse = Sparse;
219 void InstrProfWriter::addRecord(NamedInstrProfRecord &&I, uint64_t Weight,
220 function_ref<void(Error)> Warn) {
221 auto Name = I.Name;
222 auto Hash = I.Hash;
223 addRecord(Name, Hash, std::move(I), Weight, Warn);
226 void InstrProfWriter::overlapRecord(NamedInstrProfRecord &&Other,
227 OverlapStats &Overlap,
228 OverlapStats &FuncLevelOverlap,
229 const OverlapFuncFilters &FuncFilter) {
230 auto Name = Other.Name;
231 auto Hash = Other.Hash;
232 Other.accumulateCounts(FuncLevelOverlap.Test);
233 if (!FunctionData.contains(Name)) {
234 Overlap.addOneUnique(FuncLevelOverlap.Test);
235 return;
237 if (FuncLevelOverlap.Test.CountSum < 1.0f) {
238 Overlap.Overlap.NumEntries += 1;
239 return;
241 auto &ProfileDataMap = FunctionData[Name];
242 bool NewFunc;
243 ProfilingData::iterator Where;
244 std::tie(Where, NewFunc) =
245 ProfileDataMap.insert(std::make_pair(Hash, InstrProfRecord()));
246 if (NewFunc) {
247 Overlap.addOneMismatch(FuncLevelOverlap.Test);
248 return;
250 InstrProfRecord &Dest = Where->second;
252 uint64_t ValueCutoff = FuncFilter.ValueCutoff;
253 if (!FuncFilter.NameFilter.empty() && Name.contains(FuncFilter.NameFilter))
254 ValueCutoff = 0;
256 Dest.overlap(Other, Overlap, FuncLevelOverlap, ValueCutoff);
259 void InstrProfWriter::addRecord(StringRef Name, uint64_t Hash,
260 InstrProfRecord &&I, uint64_t Weight,
261 function_ref<void(Error)> Warn) {
262 auto &ProfileDataMap = FunctionData[Name];
264 bool NewFunc;
265 ProfilingData::iterator Where;
266 std::tie(Where, NewFunc) =
267 ProfileDataMap.insert(std::make_pair(Hash, InstrProfRecord()));
268 InstrProfRecord &Dest = Where->second;
270 auto MapWarn = [&](instrprof_error E) {
271 Warn(make_error<InstrProfError>(E));
274 if (NewFunc) {
275 // We've never seen a function with this name and hash, add it.
276 Dest = std::move(I);
277 if (Weight > 1)
278 Dest.scale(Weight, 1, MapWarn);
279 } else {
280 // We're updating a function we've seen before.
281 Dest.merge(I, Weight, MapWarn);
284 Dest.sortValueData();
287 void InstrProfWriter::addMemProfRecord(
288 const Function::GUID Id, const memprof::IndexedMemProfRecord &Record) {
289 auto NewRecord = Record;
290 // Provoke random hotness values if requested. We specify the lifetime access
291 // density and lifetime length that will result in a cold or not cold hotness.
292 // See the logic in getAllocType() in Analysis/MemoryProfileInfo.cpp.
293 if (MemprofGenerateRandomHotness) {
294 for (auto &Alloc : NewRecord.AllocSites) {
295 // To get a not cold context, set the lifetime access density to the
296 // maximum value and the lifetime to 0.
297 uint64_t NewTLAD = std::numeric_limits<uint64_t>::max();
298 uint64_t NewTL = 0;
299 bool IsCold = std::rand() % 2;
300 if (IsCold) {
301 // To get a cold context, set the lifetime access density to 0 and the
302 // lifetime to the maximum value.
303 NewTLAD = 0;
304 NewTL = std::numeric_limits<uint64_t>::max();
306 Alloc.Info.setTotalLifetimeAccessDensity(NewTLAD);
307 Alloc.Info.setTotalLifetime(NewTL);
310 auto [Iter, Inserted] = MemProfData.Records.insert({Id, NewRecord});
311 // If we inserted a new record then we are done.
312 if (Inserted) {
313 return;
315 memprof::IndexedMemProfRecord &Existing = Iter->second;
316 Existing.merge(NewRecord);
319 bool InstrProfWriter::addMemProfFrame(const memprof::FrameId Id,
320 const memprof::Frame &Frame,
321 function_ref<void(Error)> Warn) {
322 auto [Iter, Inserted] = MemProfData.Frames.insert({Id, Frame});
323 // If a mapping already exists for the current frame id and it does not
324 // match the new mapping provided then reset the existing contents and bail
325 // out. We don't support the merging of memprof data whose Frame -> Id
326 // mapping across profiles is inconsistent.
327 if (!Inserted && Iter->second != Frame) {
328 Warn(make_error<InstrProfError>(instrprof_error::malformed,
329 "frame to id mapping mismatch"));
330 return false;
332 return true;
335 bool InstrProfWriter::addMemProfCallStack(
336 const memprof::CallStackId CSId,
337 const llvm::SmallVector<memprof::FrameId> &CallStack,
338 function_ref<void(Error)> Warn) {
339 auto [Iter, Inserted] = MemProfData.CallStacks.insert({CSId, CallStack});
340 // If a mapping already exists for the current call stack id and it does not
341 // match the new mapping provided then reset the existing contents and bail
342 // out. We don't support the merging of memprof data whose CallStack -> Id
343 // mapping across profiles is inconsistent.
344 if (!Inserted && Iter->second != CallStack) {
345 Warn(make_error<InstrProfError>(instrprof_error::malformed,
346 "call stack to id mapping mismatch"));
347 return false;
349 return true;
352 bool InstrProfWriter::addMemProfData(memprof::IndexedMemProfData Incoming,
353 function_ref<void(Error)> Warn) {
354 // TODO: Once we remove support for MemProf format Version V1, assert that
355 // the three components (frames, call stacks, and records) are either all
356 // empty or populated.
358 if (MemProfData.Frames.empty())
359 MemProfData.Frames = std::move(Incoming.Frames);
360 else
361 for (const auto &[Id, F] : Incoming.Frames)
362 if (addMemProfFrame(Id, F, Warn))
363 return false;
365 if (MemProfData.CallStacks.empty())
366 MemProfData.CallStacks = std::move(Incoming.CallStacks);
367 else
368 for (const auto &[CSId, CS] : Incoming.CallStacks)
369 if (addMemProfCallStack(CSId, CS, Warn))
370 return false;
372 // Add one record at a time if randomization is requested.
373 if (MemProfData.Records.empty() && !MemprofGenerateRandomHotness)
374 MemProfData.Records = std::move(Incoming.Records);
375 else
376 for (const auto &[GUID, Record] : Incoming.Records)
377 addMemProfRecord(GUID, Record);
379 return true;
382 void InstrProfWriter::addBinaryIds(ArrayRef<llvm::object::BuildID> BIs) {
383 llvm::append_range(BinaryIds, BIs);
386 void InstrProfWriter::addTemporalProfileTrace(TemporalProfTraceTy Trace) {
387 assert(Trace.FunctionNameRefs.size() <= MaxTemporalProfTraceLength);
388 assert(!Trace.FunctionNameRefs.empty());
389 if (TemporalProfTraceStreamSize < TemporalProfTraceReservoirSize) {
390 // Simply append the trace if we have not yet hit our reservoir size limit.
391 TemporalProfTraces.push_back(std::move(Trace));
392 } else {
393 // Otherwise, replace a random trace in the stream.
394 std::uniform_int_distribution<uint64_t> Distribution(
395 0, TemporalProfTraceStreamSize);
396 uint64_t RandomIndex = Distribution(RNG);
397 if (RandomIndex < TemporalProfTraces.size())
398 TemporalProfTraces[RandomIndex] = std::move(Trace);
400 ++TemporalProfTraceStreamSize;
403 void InstrProfWriter::addTemporalProfileTraces(
404 SmallVectorImpl<TemporalProfTraceTy> &SrcTraces, uint64_t SrcStreamSize) {
405 for (auto &Trace : SrcTraces)
406 if (Trace.FunctionNameRefs.size() > MaxTemporalProfTraceLength)
407 Trace.FunctionNameRefs.resize(MaxTemporalProfTraceLength);
408 llvm::erase_if(SrcTraces, [](auto &T) { return T.FunctionNameRefs.empty(); });
409 // Assume that the source has the same reservoir size as the destination to
410 // avoid needing to record it in the indexed profile format.
411 bool IsDestSampled =
412 (TemporalProfTraceStreamSize > TemporalProfTraceReservoirSize);
413 bool IsSrcSampled = (SrcStreamSize > TemporalProfTraceReservoirSize);
414 if (!IsDestSampled && IsSrcSampled) {
415 // If one of the traces are sampled, ensure that it belongs to Dest.
416 std::swap(TemporalProfTraces, SrcTraces);
417 std::swap(TemporalProfTraceStreamSize, SrcStreamSize);
418 std::swap(IsDestSampled, IsSrcSampled);
420 if (!IsSrcSampled) {
421 // If the source stream is not sampled, we add each source trace normally.
422 for (auto &Trace : SrcTraces)
423 addTemporalProfileTrace(std::move(Trace));
424 return;
426 // Otherwise, we find the traces that would have been removed if we added
427 // the whole source stream.
428 SmallSetVector<uint64_t, 8> IndicesToReplace;
429 for (uint64_t I = 0; I < SrcStreamSize; I++) {
430 std::uniform_int_distribution<uint64_t> Distribution(
431 0, TemporalProfTraceStreamSize);
432 uint64_t RandomIndex = Distribution(RNG);
433 if (RandomIndex < TemporalProfTraces.size())
434 IndicesToReplace.insert(RandomIndex);
435 ++TemporalProfTraceStreamSize;
437 // Then we insert a random sample of the source traces.
438 llvm::shuffle(SrcTraces.begin(), SrcTraces.end(), RNG);
439 for (const auto &[Index, Trace] : llvm::zip(IndicesToReplace, SrcTraces))
440 TemporalProfTraces[Index] = std::move(Trace);
443 void InstrProfWriter::mergeRecordsFromWriter(InstrProfWriter &&IPW,
444 function_ref<void(Error)> Warn) {
445 for (auto &I : IPW.FunctionData)
446 for (auto &Func : I.getValue())
447 addRecord(I.getKey(), Func.first, std::move(Func.second), 1, Warn);
449 BinaryIds.reserve(BinaryIds.size() + IPW.BinaryIds.size());
450 for (auto &I : IPW.BinaryIds)
451 addBinaryIds(I);
453 addTemporalProfileTraces(IPW.TemporalProfTraces,
454 IPW.TemporalProfTraceStreamSize);
456 MemProfData.Frames.reserve(IPW.MemProfData.Frames.size());
457 for (auto &[FrameId, Frame] : IPW.MemProfData.Frames) {
458 // If we weren't able to add the frame mappings then it doesn't make sense
459 // to try to merge the records from this profile.
460 if (!addMemProfFrame(FrameId, Frame, Warn))
461 return;
464 MemProfData.CallStacks.reserve(IPW.MemProfData.CallStacks.size());
465 for (auto &[CSId, CallStack] : IPW.MemProfData.CallStacks) {
466 if (!addMemProfCallStack(CSId, CallStack, Warn))
467 return;
470 MemProfData.Records.reserve(IPW.MemProfData.Records.size());
471 for (auto &[GUID, Record] : IPW.MemProfData.Records) {
472 addMemProfRecord(GUID, Record);
476 bool InstrProfWriter::shouldEncodeData(const ProfilingData &PD) {
477 if (!Sparse)
478 return true;
479 for (const auto &Func : PD) {
480 const InstrProfRecord &IPR = Func.second;
481 if (llvm::any_of(IPR.Counts, [](uint64_t Count) { return Count > 0; }))
482 return true;
483 if (llvm::any_of(IPR.BitmapBytes, [](uint8_t Byte) { return Byte > 0; }))
484 return true;
486 return false;
489 static void setSummary(IndexedInstrProf::Summary *TheSummary,
490 ProfileSummary &PS) {
491 using namespace IndexedInstrProf;
493 const std::vector<ProfileSummaryEntry> &Res = PS.getDetailedSummary();
494 TheSummary->NumSummaryFields = Summary::NumKinds;
495 TheSummary->NumCutoffEntries = Res.size();
496 TheSummary->set(Summary::MaxFunctionCount, PS.getMaxFunctionCount());
497 TheSummary->set(Summary::MaxBlockCount, PS.getMaxCount());
498 TheSummary->set(Summary::MaxInternalBlockCount, PS.getMaxInternalCount());
499 TheSummary->set(Summary::TotalBlockCount, PS.getTotalCount());
500 TheSummary->set(Summary::TotalNumBlocks, PS.getNumCounts());
501 TheSummary->set(Summary::TotalNumFunctions, PS.getNumFunctions());
502 for (unsigned I = 0; I < Res.size(); I++)
503 TheSummary->setEntry(I, Res[I]);
506 // Serialize Schema.
507 static void writeMemProfSchema(ProfOStream &OS,
508 const memprof::MemProfSchema &Schema) {
509 OS.write(static_cast<uint64_t>(Schema.size()));
510 for (const auto Id : Schema)
511 OS.write(static_cast<uint64_t>(Id));
514 // Serialize MemProfRecordData. Return RecordTableOffset.
515 static uint64_t writeMemProfRecords(
516 ProfOStream &OS,
517 llvm::MapVector<GlobalValue::GUID, memprof::IndexedMemProfRecord>
518 &MemProfRecordData,
519 memprof::MemProfSchema *Schema, memprof::IndexedVersion Version,
520 llvm::DenseMap<memprof::CallStackId, memprof::LinearCallStackId>
521 *MemProfCallStackIndexes = nullptr) {
522 memprof::RecordWriterTrait RecordWriter(Schema, Version,
523 MemProfCallStackIndexes);
524 OnDiskChainedHashTableGenerator<memprof::RecordWriterTrait>
525 RecordTableGenerator;
526 for (auto &[GUID, Record] : MemProfRecordData) {
527 // Insert the key (func hash) and value (memprof record).
528 RecordTableGenerator.insert(GUID, Record, RecordWriter);
530 // Release the memory of this MapVector as it is no longer needed.
531 MemProfRecordData.clear();
533 // The call to Emit invokes RecordWriterTrait::EmitData which destructs
534 // the memprof record copies owned by the RecordTableGenerator. This works
535 // because the RecordTableGenerator is not used after this point.
536 return RecordTableGenerator.Emit(OS.OS, RecordWriter);
539 // Serialize MemProfFrameData. Return FrameTableOffset.
540 static uint64_t writeMemProfFrames(
541 ProfOStream &OS,
542 llvm::MapVector<memprof::FrameId, memprof::Frame> &MemProfFrameData) {
543 OnDiskChainedHashTableGenerator<memprof::FrameWriterTrait>
544 FrameTableGenerator;
545 for (auto &[FrameId, Frame] : MemProfFrameData) {
546 // Insert the key (frame id) and value (frame contents).
547 FrameTableGenerator.insert(FrameId, Frame);
549 // Release the memory of this MapVector as it is no longer needed.
550 MemProfFrameData.clear();
552 return FrameTableGenerator.Emit(OS.OS);
555 // Serialize MemProfFrameData. Return the mapping from FrameIds to their
556 // indexes within the frame array.
557 static llvm::DenseMap<memprof::FrameId, memprof::LinearFrameId>
558 writeMemProfFrameArray(
559 ProfOStream &OS,
560 llvm::MapVector<memprof::FrameId, memprof::Frame> &MemProfFrameData,
561 llvm::DenseMap<memprof::FrameId, memprof::FrameStat> &FrameHistogram) {
562 // Mappings from FrameIds to array indexes.
563 llvm::DenseMap<memprof::FrameId, memprof::LinearFrameId> MemProfFrameIndexes;
565 // Compute the order in which we serialize Frames. The order does not matter
566 // in terms of correctness, but we still compute it for deserialization
567 // performance. Specifically, if we serialize frequently used Frames one
568 // after another, we have better cache utilization. For two Frames that
569 // appear equally frequently, we break a tie by serializing the one that tends
570 // to appear earlier in call stacks. We implement the tie-breaking mechanism
571 // by computing the sum of indexes within call stacks for each Frame. If we
572 // still have a tie, then we just resort to compare two FrameIds, which is
573 // just for stability of output.
574 std::vector<std::pair<memprof::FrameId, const memprof::Frame *>> FrameIdOrder;
575 FrameIdOrder.reserve(MemProfFrameData.size());
576 for (const auto &[Id, Frame] : MemProfFrameData)
577 FrameIdOrder.emplace_back(Id, &Frame);
578 assert(MemProfFrameData.size() == FrameIdOrder.size());
579 llvm::sort(FrameIdOrder,
580 [&](const std::pair<memprof::FrameId, const memprof::Frame *> &L,
581 const std::pair<memprof::FrameId, const memprof::Frame *> &R) {
582 const auto &SL = FrameHistogram[L.first];
583 const auto &SR = FrameHistogram[R.first];
584 // Popular FrameIds should come first.
585 if (SL.Count != SR.Count)
586 return SL.Count > SR.Count;
587 // If they are equally popular, then the one that tends to appear
588 // earlier in call stacks should come first.
589 if (SL.PositionSum != SR.PositionSum)
590 return SL.PositionSum < SR.PositionSum;
591 // Compare their FrameIds for sort stability.
592 return L.first < R.first;
595 // Serialize all frames while creating mappings from linear IDs to FrameIds.
596 uint64_t Index = 0;
597 MemProfFrameIndexes.reserve(FrameIdOrder.size());
598 for (const auto &[Id, F] : FrameIdOrder) {
599 F->serialize(OS.OS);
600 MemProfFrameIndexes.insert({Id, Index});
601 ++Index;
603 assert(MemProfFrameData.size() == Index);
604 assert(MemProfFrameData.size() == MemProfFrameIndexes.size());
606 // Release the memory of this MapVector as it is no longer needed.
607 MemProfFrameData.clear();
609 return MemProfFrameIndexes;
612 static uint64_t writeMemProfCallStacks(
613 ProfOStream &OS,
614 llvm::MapVector<memprof::CallStackId, llvm::SmallVector<memprof::FrameId>>
615 &MemProfCallStackData) {
616 OnDiskChainedHashTableGenerator<memprof::CallStackWriterTrait>
617 CallStackTableGenerator;
618 for (auto &[CSId, CallStack] : MemProfCallStackData)
619 CallStackTableGenerator.insert(CSId, CallStack);
620 // Release the memory of this vector as it is no longer needed.
621 MemProfCallStackData.clear();
623 return CallStackTableGenerator.Emit(OS.OS);
626 static llvm::DenseMap<memprof::CallStackId, memprof::LinearCallStackId>
627 writeMemProfCallStackArray(
628 ProfOStream &OS,
629 llvm::MapVector<memprof::CallStackId, llvm::SmallVector<memprof::FrameId>>
630 &MemProfCallStackData,
631 llvm::DenseMap<memprof::FrameId, memprof::LinearFrameId>
632 &MemProfFrameIndexes,
633 llvm::DenseMap<memprof::FrameId, memprof::FrameStat> &FrameHistogram,
634 unsigned &NumElements) {
635 llvm::DenseMap<memprof::CallStackId, memprof::LinearCallStackId>
636 MemProfCallStackIndexes;
638 memprof::CallStackRadixTreeBuilder<memprof::FrameId> Builder;
639 Builder.build(std::move(MemProfCallStackData), MemProfFrameIndexes,
640 FrameHistogram);
641 for (auto I : Builder.getRadixArray())
642 OS.write32(I);
643 NumElements = Builder.getRadixArray().size();
644 MemProfCallStackIndexes = Builder.takeCallStackPos();
646 // Release the memory of this vector as it is no longer needed.
647 MemProfCallStackData.clear();
649 return MemProfCallStackIndexes;
652 // Write out MemProf Version1 as follows:
653 // uint64_t Version (NEW in V1)
654 // uint64_t RecordTableOffset = RecordTableGenerator.Emit
655 // uint64_t FramePayloadOffset = Offset for the frame payload
656 // uint64_t FrameTableOffset = FrameTableGenerator.Emit
657 // uint64_t Num schema entries
658 // uint64_t Schema entry 0
659 // uint64_t Schema entry 1
660 // ....
661 // uint64_t Schema entry N - 1
662 // OnDiskChainedHashTable MemProfRecordData
663 // OnDiskChainedHashTable MemProfFrameData
664 static Error writeMemProfV1(ProfOStream &OS,
665 memprof::IndexedMemProfData &MemProfData) {
666 OS.write(memprof::Version1);
667 uint64_t HeaderUpdatePos = OS.tell();
668 OS.write(0ULL); // Reserve space for the memprof record table offset.
669 OS.write(0ULL); // Reserve space for the memprof frame payload offset.
670 OS.write(0ULL); // Reserve space for the memprof frame table offset.
672 auto Schema = memprof::getFullSchema();
673 writeMemProfSchema(OS, Schema);
675 uint64_t RecordTableOffset =
676 writeMemProfRecords(OS, MemProfData.Records, &Schema, memprof::Version1);
678 uint64_t FramePayloadOffset = OS.tell();
679 uint64_t FrameTableOffset = writeMemProfFrames(OS, MemProfData.Frames);
681 uint64_t Header[] = {RecordTableOffset, FramePayloadOffset, FrameTableOffset};
682 OS.patch({{HeaderUpdatePos, Header}});
684 return Error::success();
687 // Write out MemProf Version2 as follows:
688 // uint64_t Version
689 // uint64_t RecordTableOffset = RecordTableGenerator.Emit
690 // uint64_t FramePayloadOffset = Offset for the frame payload
691 // uint64_t FrameTableOffset = FrameTableGenerator.Emit
692 // uint64_t CallStackPayloadOffset = Offset for the call stack payload (NEW V2)
693 // uint64_t CallStackTableOffset = CallStackTableGenerator.Emit (NEW in V2)
694 // uint64_t Num schema entries
695 // uint64_t Schema entry 0
696 // uint64_t Schema entry 1
697 // ....
698 // uint64_t Schema entry N - 1
699 // OnDiskChainedHashTable MemProfRecordData
700 // OnDiskChainedHashTable MemProfFrameData
701 // OnDiskChainedHashTable MemProfCallStackData (NEW in V2)
702 static Error writeMemProfV2(ProfOStream &OS,
703 memprof::IndexedMemProfData &MemProfData,
704 bool MemProfFullSchema) {
705 OS.write(memprof::Version2);
706 uint64_t HeaderUpdatePos = OS.tell();
707 OS.write(0ULL); // Reserve space for the memprof record table offset.
708 OS.write(0ULL); // Reserve space for the memprof frame payload offset.
709 OS.write(0ULL); // Reserve space for the memprof frame table offset.
710 OS.write(0ULL); // Reserve space for the memprof call stack payload offset.
711 OS.write(0ULL); // Reserve space for the memprof call stack table offset.
713 auto Schema = memprof::getHotColdSchema();
714 if (MemProfFullSchema)
715 Schema = memprof::getFullSchema();
716 writeMemProfSchema(OS, Schema);
718 uint64_t RecordTableOffset =
719 writeMemProfRecords(OS, MemProfData.Records, &Schema, memprof::Version2);
721 uint64_t FramePayloadOffset = OS.tell();
722 uint64_t FrameTableOffset = writeMemProfFrames(OS, MemProfData.Frames);
724 uint64_t CallStackPayloadOffset = OS.tell();
725 uint64_t CallStackTableOffset =
726 writeMemProfCallStacks(OS, MemProfData.CallStacks);
728 uint64_t Header[] = {
729 RecordTableOffset, FramePayloadOffset, FrameTableOffset,
730 CallStackPayloadOffset, CallStackTableOffset,
732 OS.patch({{HeaderUpdatePos, Header}});
734 return Error::success();
737 // Write out MemProf Version3 as follows:
738 // uint64_t Version
739 // uint64_t CallStackPayloadOffset = Offset for the call stack payload
740 // uint64_t RecordPayloadOffset = Offset for the record payload
741 // uint64_t RecordTableOffset = RecordTableGenerator.Emit
742 // uint64_t Num schema entries
743 // uint64_t Schema entry 0
744 // uint64_t Schema entry 1
745 // ....
746 // uint64_t Schema entry N - 1
747 // Frames serialized one after another
748 // Call stacks encoded as a radix tree
749 // OnDiskChainedHashTable MemProfRecordData
750 static Error writeMemProfV3(ProfOStream &OS,
751 memprof::IndexedMemProfData &MemProfData,
752 bool MemProfFullSchema) {
753 OS.write(memprof::Version3);
754 uint64_t HeaderUpdatePos = OS.tell();
755 OS.write(0ULL); // Reserve space for the memprof call stack payload offset.
756 OS.write(0ULL); // Reserve space for the memprof record payload offset.
757 OS.write(0ULL); // Reserve space for the memprof record table offset.
759 auto Schema = memprof::getHotColdSchema();
760 if (MemProfFullSchema)
761 Schema = memprof::getFullSchema();
762 writeMemProfSchema(OS, Schema);
764 llvm::DenseMap<memprof::FrameId, memprof::FrameStat> FrameHistogram =
765 memprof::computeFrameHistogram(MemProfData.CallStacks);
766 assert(MemProfData.Frames.size() == FrameHistogram.size());
768 llvm::DenseMap<memprof::FrameId, memprof::LinearFrameId> MemProfFrameIndexes =
769 writeMemProfFrameArray(OS, MemProfData.Frames, FrameHistogram);
771 uint64_t CallStackPayloadOffset = OS.tell();
772 // The number of elements in the call stack array.
773 unsigned NumElements = 0;
774 llvm::DenseMap<memprof::CallStackId, memprof::LinearCallStackId>
775 MemProfCallStackIndexes =
776 writeMemProfCallStackArray(OS, MemProfData.CallStacks,
777 MemProfFrameIndexes, FrameHistogram,
778 NumElements);
780 uint64_t RecordPayloadOffset = OS.tell();
781 uint64_t RecordTableOffset =
782 writeMemProfRecords(OS, MemProfData.Records, &Schema, memprof::Version3,
783 &MemProfCallStackIndexes);
785 // IndexedMemProfReader::deserializeV3 computes the number of elements in the
786 // call stack array from the difference between CallStackPayloadOffset and
787 // RecordPayloadOffset. Verify that the computation works.
788 assert(CallStackPayloadOffset +
789 NumElements * sizeof(memprof::LinearFrameId) ==
790 RecordPayloadOffset);
792 uint64_t Header[] = {
793 CallStackPayloadOffset,
794 RecordPayloadOffset,
795 RecordTableOffset,
797 OS.patch({{HeaderUpdatePos, Header}});
799 return Error::success();
802 // Write out the MemProf data in a requested version.
803 static Error writeMemProf(ProfOStream &OS,
804 memprof::IndexedMemProfData &MemProfData,
805 memprof::IndexedVersion MemProfVersionRequested,
806 bool MemProfFullSchema) {
807 switch (MemProfVersionRequested) {
808 case memprof::Version1:
809 return writeMemProfV1(OS, MemProfData);
810 case memprof::Version2:
811 return writeMemProfV2(OS, MemProfData, MemProfFullSchema);
812 case memprof::Version3:
813 return writeMemProfV3(OS, MemProfData, MemProfFullSchema);
816 return make_error<InstrProfError>(
817 instrprof_error::unsupported_version,
818 formatv("MemProf version {} not supported; "
819 "requires version between {} and {}, inclusive",
820 MemProfVersionRequested, memprof::MinimumSupportedVersion,
821 memprof::MaximumSupportedVersion));
824 uint64_t InstrProfWriter::writeHeader(const IndexedInstrProf::Header &Header,
825 const bool WritePrevVersion,
826 ProfOStream &OS) {
827 // Only write out the first four fields.
828 for (int I = 0; I < 4; I++)
829 OS.write(reinterpret_cast<const uint64_t *>(&Header)[I]);
831 // Remember the offset of the remaining fields to allow back patching later.
832 auto BackPatchStartOffset = OS.tell();
834 // Reserve the space for back patching later.
835 OS.write(0); // HashOffset
836 OS.write(0); // MemProfOffset
837 OS.write(0); // BinaryIdOffset
838 OS.write(0); // TemporalProfTracesOffset
839 if (!WritePrevVersion)
840 OS.write(0); // VTableNamesOffset
842 return BackPatchStartOffset;
845 Error InstrProfWriter::writeVTableNames(ProfOStream &OS) {
846 std::vector<std::string> VTableNameStrs;
847 for (StringRef VTableName : VTableNames.keys())
848 VTableNameStrs.push_back(VTableName.str());
850 std::string CompressedVTableNames;
851 if (!VTableNameStrs.empty())
852 if (Error E = collectGlobalObjectNameStrings(
853 VTableNameStrs, compression::zlib::isAvailable(),
854 CompressedVTableNames))
855 return E;
857 const uint64_t CompressedStringLen = CompressedVTableNames.length();
859 // Record the length of compressed string.
860 OS.write(CompressedStringLen);
862 // Write the chars in compressed strings.
863 for (auto &c : CompressedVTableNames)
864 OS.writeByte(static_cast<uint8_t>(c));
866 // Pad up to a multiple of 8.
867 // InstrProfReader could read bytes according to 'CompressedStringLen'.
868 const uint64_t PaddedLength = alignTo(CompressedStringLen, 8);
870 for (uint64_t K = CompressedStringLen; K < PaddedLength; K++)
871 OS.writeByte(0);
873 return Error::success();
876 Error InstrProfWriter::writeImpl(ProfOStream &OS) {
877 using namespace IndexedInstrProf;
878 using namespace support;
880 OnDiskChainedHashTableGenerator<InstrProfRecordWriterTrait> Generator;
882 InstrProfSummaryBuilder ISB(ProfileSummaryBuilder::DefaultCutoffs);
883 InfoObj->SummaryBuilder = &ISB;
884 InstrProfSummaryBuilder CSISB(ProfileSummaryBuilder::DefaultCutoffs);
885 InfoObj->CSSummaryBuilder = &CSISB;
887 // Populate the hash table generator.
888 SmallVector<std::pair<StringRef, const ProfilingData *>> OrderedData;
889 for (const auto &I : FunctionData)
890 if (shouldEncodeData(I.getValue()))
891 OrderedData.emplace_back((I.getKey()), &I.getValue());
892 llvm::sort(OrderedData, less_first());
893 for (const auto &I : OrderedData)
894 Generator.insert(I.first, I.second);
896 // Write the header.
897 IndexedInstrProf::Header Header;
898 Header.Version = WritePrevVersion
899 ? IndexedInstrProf::ProfVersion::Version11
900 : IndexedInstrProf::ProfVersion::CurrentVersion;
901 // The WritePrevVersion handling will either need to be removed or updated
902 // if the version is advanced beyond 12.
903 static_assert(IndexedInstrProf::ProfVersion::CurrentVersion ==
904 IndexedInstrProf::ProfVersion::Version12);
905 if (static_cast<bool>(ProfileKind & InstrProfKind::IRInstrumentation))
906 Header.Version |= VARIANT_MASK_IR_PROF;
907 if (static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive))
908 Header.Version |= VARIANT_MASK_CSIR_PROF;
909 if (static_cast<bool>(ProfileKind &
910 InstrProfKind::FunctionEntryInstrumentation))
911 Header.Version |= VARIANT_MASK_INSTR_ENTRY;
912 if (static_cast<bool>(ProfileKind & InstrProfKind::SingleByteCoverage))
913 Header.Version |= VARIANT_MASK_BYTE_COVERAGE;
914 if (static_cast<bool>(ProfileKind & InstrProfKind::FunctionEntryOnly))
915 Header.Version |= VARIANT_MASK_FUNCTION_ENTRY_ONLY;
916 if (static_cast<bool>(ProfileKind & InstrProfKind::MemProf))
917 Header.Version |= VARIANT_MASK_MEMPROF;
918 if (static_cast<bool>(ProfileKind & InstrProfKind::TemporalProfile))
919 Header.Version |= VARIANT_MASK_TEMPORAL_PROF;
921 const uint64_t BackPatchStartOffset =
922 writeHeader(Header, WritePrevVersion, OS);
924 // Reserve space to write profile summary data.
925 uint32_t NumEntries = ProfileSummaryBuilder::DefaultCutoffs.size();
926 uint32_t SummarySize = Summary::getSize(Summary::NumKinds, NumEntries);
927 // Remember the summary offset.
928 uint64_t SummaryOffset = OS.tell();
929 for (unsigned I = 0; I < SummarySize / sizeof(uint64_t); I++)
930 OS.write(0);
931 uint64_t CSSummaryOffset = 0;
932 uint64_t CSSummarySize = 0;
933 if (static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive)) {
934 CSSummaryOffset = OS.tell();
935 CSSummarySize = SummarySize / sizeof(uint64_t);
936 for (unsigned I = 0; I < CSSummarySize; I++)
937 OS.write(0);
940 // Write the hash table.
941 uint64_t HashTableStart = Generator.Emit(OS.OS, *InfoObj);
943 // Write the MemProf profile data if we have it.
944 uint64_t MemProfSectionStart = 0;
945 if (static_cast<bool>(ProfileKind & InstrProfKind::MemProf)) {
946 MemProfSectionStart = OS.tell();
947 if (auto E = writeMemProf(OS, MemProfData, MemProfVersionRequested,
948 MemProfFullSchema))
949 return E;
952 // BinaryIdSection has two parts:
953 // 1. uint64_t BinaryIdsSectionSize
954 // 2. list of binary ids that consist of:
955 // a. uint64_t BinaryIdLength
956 // b. uint8_t BinaryIdData
957 // c. uint8_t Padding (if necessary)
958 uint64_t BinaryIdSectionStart = OS.tell();
959 // Calculate size of binary section.
960 uint64_t BinaryIdsSectionSize = 0;
962 // Remove duplicate binary ids.
963 llvm::sort(BinaryIds);
964 BinaryIds.erase(llvm::unique(BinaryIds), BinaryIds.end());
966 for (const auto &BI : BinaryIds) {
967 // Increment by binary id length data type size.
968 BinaryIdsSectionSize += sizeof(uint64_t);
969 // Increment by binary id data length, aligned to 8 bytes.
970 BinaryIdsSectionSize += alignToPowerOf2(BI.size(), sizeof(uint64_t));
972 // Write binary ids section size.
973 OS.write(BinaryIdsSectionSize);
975 for (const auto &BI : BinaryIds) {
976 uint64_t BILen = BI.size();
977 // Write binary id length.
978 OS.write(BILen);
979 // Write binary id data.
980 for (unsigned K = 0; K < BILen; K++)
981 OS.writeByte(BI[K]);
982 // Write padding if necessary.
983 uint64_t PaddingSize = alignToPowerOf2(BILen, sizeof(uint64_t)) - BILen;
984 for (unsigned K = 0; K < PaddingSize; K++)
985 OS.writeByte(0);
988 uint64_t VTableNamesSectionStart = OS.tell();
990 if (!WritePrevVersion)
991 if (Error E = writeVTableNames(OS))
992 return E;
994 uint64_t TemporalProfTracesSectionStart = 0;
995 if (static_cast<bool>(ProfileKind & InstrProfKind::TemporalProfile)) {
996 TemporalProfTracesSectionStart = OS.tell();
997 OS.write(TemporalProfTraces.size());
998 OS.write(TemporalProfTraceStreamSize);
999 for (auto &Trace : TemporalProfTraces) {
1000 OS.write(Trace.Weight);
1001 OS.write(Trace.FunctionNameRefs.size());
1002 for (auto &NameRef : Trace.FunctionNameRefs)
1003 OS.write(NameRef);
1007 // Allocate space for data to be serialized out.
1008 std::unique_ptr<IndexedInstrProf::Summary> TheSummary =
1009 IndexedInstrProf::allocSummary(SummarySize);
1010 // Compute the Summary and copy the data to the data
1011 // structure to be serialized out (to disk or buffer).
1012 std::unique_ptr<ProfileSummary> PS = ISB.getSummary();
1013 setSummary(TheSummary.get(), *PS);
1014 InfoObj->SummaryBuilder = nullptr;
1016 // For Context Sensitive summary.
1017 std::unique_ptr<IndexedInstrProf::Summary> TheCSSummary = nullptr;
1018 if (static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive)) {
1019 TheCSSummary = IndexedInstrProf::allocSummary(SummarySize);
1020 std::unique_ptr<ProfileSummary> CSPS = CSISB.getSummary();
1021 setSummary(TheCSSummary.get(), *CSPS);
1023 InfoObj->CSSummaryBuilder = nullptr;
1025 SmallVector<uint64_t, 8> HeaderOffsets = {HashTableStart, MemProfSectionStart,
1026 BinaryIdSectionStart,
1027 TemporalProfTracesSectionStart};
1028 if (!WritePrevVersion)
1029 HeaderOffsets.push_back(VTableNamesSectionStart);
1031 PatchItem PatchItems[] = {
1032 // Patch the Header fields
1033 {BackPatchStartOffset, HeaderOffsets},
1034 // Patch the summary data.
1035 {SummaryOffset,
1036 ArrayRef<uint64_t>(reinterpret_cast<uint64_t *>(TheSummary.get()),
1037 SummarySize / sizeof(uint64_t))},
1038 {CSSummaryOffset,
1039 ArrayRef<uint64_t>(reinterpret_cast<uint64_t *>(TheCSSummary.get()),
1040 CSSummarySize)}};
1042 OS.patch(PatchItems);
1044 for (const auto &I : FunctionData)
1045 for (const auto &F : I.getValue())
1046 if (Error E = validateRecord(F.second))
1047 return E;
1049 return Error::success();
1052 Error InstrProfWriter::write(raw_fd_ostream &OS) {
1053 // Write the hash table.
1054 ProfOStream POS(OS);
1055 return writeImpl(POS);
1058 Error InstrProfWriter::write(raw_string_ostream &OS) {
1059 ProfOStream POS(OS);
1060 return writeImpl(POS);
1063 std::unique_ptr<MemoryBuffer> InstrProfWriter::writeBuffer() {
1064 std::string Data;
1065 raw_string_ostream OS(Data);
1066 // Write the hash table.
1067 if (Error E = write(OS))
1068 return nullptr;
1069 // Return this in an aligned memory buffer.
1070 return MemoryBuffer::getMemBufferCopy(Data);
1073 static const char *ValueProfKindStr[] = {
1074 #define VALUE_PROF_KIND(Enumerator, Value, Descr) #Enumerator,
1075 #include "llvm/ProfileData/InstrProfData.inc"
1078 Error InstrProfWriter::validateRecord(const InstrProfRecord &Func) {
1079 for (uint32_t VK = 0; VK <= IPVK_Last; VK++) {
1080 if (VK == IPVK_IndirectCallTarget || VK == IPVK_VTableTarget)
1081 continue;
1082 uint32_t NS = Func.getNumValueSites(VK);
1083 for (uint32_t S = 0; S < NS; S++) {
1084 DenseSet<uint64_t> SeenValues;
1085 for (const auto &V : Func.getValueArrayForSite(VK, S))
1086 if (!SeenValues.insert(V.Value).second)
1087 return make_error<InstrProfError>(instrprof_error::invalid_prof);
1091 return Error::success();
1094 void InstrProfWriter::writeRecordInText(StringRef Name, uint64_t Hash,
1095 const InstrProfRecord &Func,
1096 InstrProfSymtab &Symtab,
1097 raw_fd_ostream &OS) {
1098 OS << Name << "\n";
1099 OS << "# Func Hash:\n" << Hash << "\n";
1100 OS << "# Num Counters:\n" << Func.Counts.size() << "\n";
1101 OS << "# Counter Values:\n";
1102 for (uint64_t Count : Func.Counts)
1103 OS << Count << "\n";
1105 if (Func.BitmapBytes.size() > 0) {
1106 OS << "# Num Bitmap Bytes:\n$" << Func.BitmapBytes.size() << "\n";
1107 OS << "# Bitmap Byte Values:\n";
1108 for (uint8_t Byte : Func.BitmapBytes) {
1109 OS << "0x";
1110 OS.write_hex(Byte);
1111 OS << "\n";
1113 OS << "\n";
1116 uint32_t NumValueKinds = Func.getNumValueKinds();
1117 if (!NumValueKinds) {
1118 OS << "\n";
1119 return;
1122 OS << "# Num Value Kinds:\n" << Func.getNumValueKinds() << "\n";
1123 for (uint32_t VK = 0; VK < IPVK_Last + 1; VK++) {
1124 uint32_t NS = Func.getNumValueSites(VK);
1125 if (!NS)
1126 continue;
1127 OS << "# ValueKind = " << ValueProfKindStr[VK] << ":\n" << VK << "\n";
1128 OS << "# NumValueSites:\n" << NS << "\n";
1129 for (uint32_t S = 0; S < NS; S++) {
1130 auto VD = Func.getValueArrayForSite(VK, S);
1131 OS << VD.size() << "\n";
1132 for (const auto &V : VD) {
1133 if (VK == IPVK_IndirectCallTarget || VK == IPVK_VTableTarget)
1134 OS << Symtab.getFuncOrVarNameIfDefined(V.Value) << ":" << V.Count
1135 << "\n";
1136 else
1137 OS << V.Value << ":" << V.Count << "\n";
1142 OS << "\n";
1145 Error InstrProfWriter::writeText(raw_fd_ostream &OS) {
1146 // Check CS first since it implies an IR level profile.
1147 if (static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive))
1148 OS << "# CSIR level Instrumentation Flag\n:csir\n";
1149 else if (static_cast<bool>(ProfileKind & InstrProfKind::IRInstrumentation))
1150 OS << "# IR level Instrumentation Flag\n:ir\n";
1152 if (static_cast<bool>(ProfileKind &
1153 InstrProfKind::FunctionEntryInstrumentation))
1154 OS << "# Always instrument the function entry block\n:entry_first\n";
1155 if (static_cast<bool>(ProfileKind & InstrProfKind::SingleByteCoverage))
1156 OS << "# Instrument block coverage\n:single_byte_coverage\n";
1157 InstrProfSymtab Symtab;
1159 using FuncPair = detail::DenseMapPair<uint64_t, InstrProfRecord>;
1160 using RecordType = std::pair<StringRef, FuncPair>;
1161 SmallVector<RecordType, 4> OrderedFuncData;
1163 for (const auto &I : FunctionData) {
1164 if (shouldEncodeData(I.getValue())) {
1165 if (Error E = Symtab.addFuncName(I.getKey()))
1166 return E;
1167 for (const auto &Func : I.getValue())
1168 OrderedFuncData.push_back(std::make_pair(I.getKey(), Func));
1172 for (const auto &VTableName : VTableNames)
1173 if (Error E = Symtab.addVTableName(VTableName.getKey()))
1174 return E;
1176 if (static_cast<bool>(ProfileKind & InstrProfKind::TemporalProfile))
1177 writeTextTemporalProfTraceData(OS, Symtab);
1179 llvm::sort(OrderedFuncData, [](const RecordType &A, const RecordType &B) {
1180 return std::tie(A.first, A.second.first) <
1181 std::tie(B.first, B.second.first);
1184 for (const auto &record : OrderedFuncData) {
1185 const StringRef &Name = record.first;
1186 const FuncPair &Func = record.second;
1187 writeRecordInText(Name, Func.first, Func.second, Symtab, OS);
1190 for (const auto &record : OrderedFuncData) {
1191 const FuncPair &Func = record.second;
1192 if (Error E = validateRecord(Func.second))
1193 return E;
1196 return Error::success();
1199 void InstrProfWriter::writeTextTemporalProfTraceData(raw_fd_ostream &OS,
1200 InstrProfSymtab &Symtab) {
1201 OS << ":temporal_prof_traces\n";
1202 OS << "# Num Temporal Profile Traces:\n" << TemporalProfTraces.size() << "\n";
1203 OS << "# Temporal Profile Trace Stream Size:\n"
1204 << TemporalProfTraceStreamSize << "\n";
1205 for (auto &Trace : TemporalProfTraces) {
1206 OS << "# Weight:\n" << Trace.Weight << "\n";
1207 for (auto &NameRef : Trace.FunctionNameRefs)
1208 OS << Symtab.getFuncOrVarName(NameRef) << ",";
1209 OS << "\n";
1211 OS << "\n";