Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / compiler-rt / lib / memprof / memprof_rawprofile.cpp
blobfa92fa0e4b53e95cbb3413e6886bf7160647a4a8
1 #include <stdint.h>
2 #include <stdlib.h>
3 #include <string.h>
5 #include "memprof_rawprofile.h"
6 #include "profile/MemProfData.inc"
7 #include "sanitizer_common/sanitizer_allocator_internal.h"
8 #include "sanitizer_common/sanitizer_array_ref.h"
9 #include "sanitizer_common/sanitizer_common.h"
10 #include "sanitizer_common/sanitizer_linux.h"
11 #include "sanitizer_common/sanitizer_procmaps.h"
12 #include "sanitizer_common/sanitizer_stackdepot.h"
13 #include "sanitizer_common/sanitizer_stackdepotbase.h"
14 #include "sanitizer_common/sanitizer_stacktrace.h"
15 #include "sanitizer_common/sanitizer_vector.h"
17 namespace __memprof {
18 using ::__sanitizer::Vector;
19 using ::llvm::memprof::MemInfoBlock;
20 using SegmentEntry = ::llvm::memprof::SegmentEntry;
21 using Header = ::llvm::memprof::Header;
23 namespace {
24 template <class T> char *WriteBytes(const T &Pod, char *Buffer) {
25 *(T *)Buffer = Pod;
26 return Buffer + sizeof(T);
29 void RecordStackId(const uptr Key, UNUSED LockedMemInfoBlock *const &MIB,
30 void *Arg) {
31 // No need to touch the MIB value here since we are only recording the key.
32 auto *StackIds = reinterpret_cast<Vector<u64> *>(Arg);
33 StackIds->PushBack(Key);
35 } // namespace
37 u64 SegmentSizeBytes(ArrayRef<LoadedModule> Modules) {
38 u64 NumSegmentsToRecord = 0;
39 for (const auto &Module : Modules) {
40 for (const auto &Segment : Module.ranges()) {
41 if (Segment.executable)
42 NumSegmentsToRecord++;
46 return sizeof(u64) // A header which stores the number of records.
47 + sizeof(SegmentEntry) * NumSegmentsToRecord;
50 // The segment section uses the following format:
51 // ---------- Segment Info
52 // Num Entries
53 // ---------- Segment Entry
54 // Start
55 // End
56 // Offset
57 // UuidSize
58 // Uuid 32B
59 // ----------
60 // ...
61 void SerializeSegmentsToBuffer(ArrayRef<LoadedModule> Modules,
62 const u64 ExpectedNumBytes, char *&Buffer) {
63 char *Ptr = Buffer;
64 // Reserve space for the final count.
65 Ptr += sizeof(u64);
67 u64 NumSegmentsRecorded = 0;
69 for (const auto &Module : Modules) {
70 for (const auto &Segment : Module.ranges()) {
71 if (Segment.executable) {
72 SegmentEntry Entry(Segment.beg, Segment.end, Module.base_address());
73 CHECK(Module.uuid_size() <= MEMPROF_BUILDID_MAX_SIZE);
74 Entry.BuildIdSize = Module.uuid_size();
75 memcpy(Entry.BuildId, Module.uuid(), Module.uuid_size());
76 memcpy(Ptr, &Entry, sizeof(SegmentEntry));
77 Ptr += sizeof(SegmentEntry);
78 NumSegmentsRecorded++;
82 // Store the number of segments we recorded in the space we reserved.
83 *((u64 *)Buffer) = NumSegmentsRecorded;
84 CHECK(ExpectedNumBytes >= static_cast<u64>(Ptr - Buffer) &&
85 "Expected num bytes != actual bytes written");
88 u64 StackSizeBytes(const Vector<u64> &StackIds) {
89 u64 NumBytesToWrite = sizeof(u64);
91 const u64 NumIds = StackIds.Size();
92 for (unsigned k = 0; k < NumIds; ++k) {
93 const u64 Id = StackIds[k];
94 // One entry for the id and then one more for the number of stack pcs.
95 NumBytesToWrite += 2 * sizeof(u64);
96 const StackTrace St = StackDepotGet(Id);
98 CHECK(St.trace != nullptr && St.size > 0 && "Empty stack trace");
99 for (uptr i = 0; i < St.size && St.trace[i] != 0; i++) {
100 NumBytesToWrite += sizeof(u64);
103 return NumBytesToWrite;
106 // The stack info section uses the following format:
108 // ---------- Stack Info
109 // Num Entries
110 // ---------- Stack Entry
111 // Num Stacks
112 // PC1
113 // PC2
114 // ...
115 // ----------
116 void SerializeStackToBuffer(const Vector<u64> &StackIds,
117 const u64 ExpectedNumBytes, char *&Buffer) {
118 const u64 NumIds = StackIds.Size();
119 char *Ptr = Buffer;
120 Ptr = WriteBytes(static_cast<u64>(NumIds), Ptr);
122 for (unsigned k = 0; k < NumIds; ++k) {
123 const u64 Id = StackIds[k];
124 Ptr = WriteBytes(Id, Ptr);
125 Ptr += sizeof(u64); // Bump it by u64, we will fill this in later.
126 u64 Count = 0;
127 const StackTrace St = StackDepotGet(Id);
128 for (uptr i = 0; i < St.size && St.trace[i] != 0; i++) {
129 // PCs in stack traces are actually the return addresses, that is,
130 // addresses of the next instructions after the call.
131 uptr pc = StackTrace::GetPreviousInstructionPc(St.trace[i]);
132 Ptr = WriteBytes(static_cast<u64>(pc), Ptr);
133 ++Count;
135 // Store the count in the space we reserved earlier.
136 *(u64 *)(Ptr - (Count + 1) * sizeof(u64)) = Count;
139 CHECK(ExpectedNumBytes >= static_cast<u64>(Ptr - Buffer) &&
140 "Expected num bytes != actual bytes written");
143 // The MIB section has the following format:
144 // ---------- MIB Info
145 // Num Entries
146 // ---------- MIB Entry 0
147 // Alloc Count
148 // ...
149 // ---------- MIB Entry 1
150 // Alloc Count
151 // ...
152 // ----------
153 void SerializeMIBInfoToBuffer(MIBMapTy &MIBMap, const Vector<u64> &StackIds,
154 const u64 ExpectedNumBytes, char *&Buffer) {
155 char *Ptr = Buffer;
156 const u64 NumEntries = StackIds.Size();
157 Ptr = WriteBytes(NumEntries, Ptr);
159 for (u64 i = 0; i < NumEntries; i++) {
160 const u64 Key = StackIds[i];
161 MIBMapTy::Handle h(&MIBMap, Key, /*remove=*/true, /*create=*/false);
162 CHECK(h.exists());
163 Ptr = WriteBytes(Key, Ptr);
164 Ptr = WriteBytes((*h)->mib, Ptr);
167 CHECK(ExpectedNumBytes >= static_cast<u64>(Ptr - Buffer) &&
168 "Expected num bytes != actual bytes written");
171 // Format
172 // ---------- Header
173 // Magic
174 // Version
175 // Total Size
176 // Segment Offset
177 // MIB Info Offset
178 // Stack Offset
179 // ---------- Segment Info
180 // Num Entries
181 // ---------- Segment Entry
182 // Start
183 // End
184 // Offset
185 // BuildID 32B
186 // ----------
187 // ...
188 // ----------
189 // Optional Padding Bytes
190 // ---------- MIB Info
191 // Num Entries
192 // ---------- MIB Entry
193 // Alloc Count
194 // ...
195 // ----------
196 // Optional Padding Bytes
197 // ---------- Stack Info
198 // Num Entries
199 // ---------- Stack Entry
200 // Num Stacks
201 // PC1
202 // PC2
203 // ...
204 // ----------
205 // Optional Padding Bytes
206 // ...
207 u64 SerializeToRawProfile(MIBMapTy &MIBMap, ArrayRef<LoadedModule> Modules,
208 char *&Buffer) {
209 // Each section size is rounded up to 8b since the first entry in each section
210 // is a u64 which holds the number of entries in the section by convention.
211 const u64 NumSegmentBytes = RoundUpTo(SegmentSizeBytes(Modules), 8);
213 Vector<u64> StackIds;
214 MIBMap.ForEach(RecordStackId, reinterpret_cast<void *>(&StackIds));
215 // The first 8b are for the total number of MIB records. Each MIB record is
216 // preceded by a 8b stack id which is associated with stack frames in the next
217 // section.
218 const u64 NumMIBInfoBytes = RoundUpTo(
219 sizeof(u64) + StackIds.Size() * (sizeof(u64) + sizeof(MemInfoBlock)), 8);
221 const u64 NumStackBytes = RoundUpTo(StackSizeBytes(StackIds), 8);
223 // Ensure that the profile is 8b aligned. We allow for some optional padding
224 // at the end so that any subsequent profile serialized to the same file does
225 // not incur unaligned accesses.
226 const u64 TotalSizeBytes = RoundUpTo(
227 sizeof(Header) + NumSegmentBytes + NumStackBytes + NumMIBInfoBytes, 8);
229 // Allocate the memory for the entire buffer incl. info blocks.
230 Buffer = (char *)InternalAlloc(TotalSizeBytes);
231 char *Ptr = Buffer;
233 Header header{MEMPROF_RAW_MAGIC_64,
234 MEMPROF_RAW_VERSION,
235 static_cast<u64>(TotalSizeBytes),
236 sizeof(Header),
237 sizeof(Header) + NumSegmentBytes,
238 sizeof(Header) + NumSegmentBytes + NumMIBInfoBytes};
239 Ptr = WriteBytes(header, Ptr);
241 SerializeSegmentsToBuffer(Modules, NumSegmentBytes, Ptr);
242 Ptr += NumSegmentBytes;
244 SerializeMIBInfoToBuffer(MIBMap, StackIds, NumMIBInfoBytes, Ptr);
245 Ptr += NumMIBInfoBytes;
247 SerializeStackToBuffer(StackIds, NumStackBytes, Ptr);
249 return TotalSizeBytes;
252 } // namespace __memprof