[llvm-shlib] Fix the version naming style of libLLVM for Windows (#85710)
[llvm-project.git] / llvm / unittests / ProfileData / MemProfTest.cpp
blobf5e4a4aff2ed17c7791a1f67be6efb147270e4eb
1 #include "llvm/ProfileData/MemProf.h"
2 #include "llvm/ADT/DenseMap.h"
3 #include "llvm/ADT/MapVector.h"
4 #include "llvm/DebugInfo/DIContext.h"
5 #include "llvm/DebugInfo/Symbolize/SymbolizableModule.h"
6 #include "llvm/IR/Value.h"
7 #include "llvm/Object/ObjectFile.h"
8 #include "llvm/ProfileData/MemProfData.inc"
9 #include "llvm/ProfileData/RawMemProfReader.h"
10 #include "llvm/Support/raw_ostream.h"
11 #include "gmock/gmock.h"
12 #include "gtest/gtest.h"
14 #include <initializer_list>
16 namespace {
18 using ::llvm::DIGlobal;
19 using ::llvm::DIInliningInfo;
20 using ::llvm::DILineInfo;
21 using ::llvm::DILineInfoSpecifier;
22 using ::llvm::DILocal;
23 using ::llvm::StringRef;
24 using ::llvm::memprof::CallStackMap;
25 using ::llvm::memprof::Frame;
26 using ::llvm::memprof::FrameId;
27 using ::llvm::memprof::IndexedMemProfRecord;
28 using ::llvm::memprof::MemInfoBlock;
29 using ::llvm::memprof::MemProfReader;
30 using ::llvm::memprof::MemProfRecord;
31 using ::llvm::memprof::MemProfSchema;
32 using ::llvm::memprof::Meta;
33 using ::llvm::memprof::PortableMemInfoBlock;
34 using ::llvm::memprof::RawMemProfReader;
35 using ::llvm::memprof::SegmentEntry;
36 using ::llvm::object::SectionedAddress;
37 using ::llvm::symbolize::SymbolizableModule;
38 using ::testing::Return;
40 class MockSymbolizer : public SymbolizableModule {
41 public:
42 MOCK_CONST_METHOD3(symbolizeInlinedCode,
43 DIInliningInfo(SectionedAddress, DILineInfoSpecifier,
44 bool));
45 // Most of the methods in the interface are unused. We only mock the
46 // method that we expect to be called from the memprof reader.
47 virtual DILineInfo symbolizeCode(SectionedAddress, DILineInfoSpecifier,
48 bool) const {
49 llvm_unreachable("unused");
51 virtual DIGlobal symbolizeData(SectionedAddress) const {
52 llvm_unreachable("unused");
54 virtual std::vector<DILocal> symbolizeFrame(SectionedAddress) const {
55 llvm_unreachable("unused");
57 virtual std::vector<SectionedAddress> findSymbol(StringRef Symbol,
58 uint64_t Offset) const {
59 llvm_unreachable("unused");
61 virtual bool isWin32Module() const { llvm_unreachable("unused"); }
62 virtual uint64_t getModulePreferredBase() const {
63 llvm_unreachable("unused");
67 struct MockInfo {
68 std::string FunctionName;
69 uint32_t Line;
70 uint32_t StartLine;
71 uint32_t Column;
72 std::string FileName = "valid/path.cc";
74 DIInliningInfo makeInliningInfo(std::initializer_list<MockInfo> MockFrames) {
75 DIInliningInfo Result;
76 for (const auto &Item : MockFrames) {
77 DILineInfo Frame;
78 Frame.FunctionName = Item.FunctionName;
79 Frame.Line = Item.Line;
80 Frame.StartLine = Item.StartLine;
81 Frame.Column = Item.Column;
82 Frame.FileName = Item.FileName;
83 Result.addFrame(Frame);
85 return Result;
88 llvm::SmallVector<SegmentEntry, 4> makeSegments() {
89 llvm::SmallVector<SegmentEntry, 4> Result;
90 // Mimic an entry for a non position independent executable.
91 Result.emplace_back(0x0, 0x40000, 0x0);
92 return Result;
95 const DILineInfoSpecifier specifier() {
96 return DILineInfoSpecifier(
97 DILineInfoSpecifier::FileLineInfoKind::RawValue,
98 DILineInfoSpecifier::FunctionNameKind::LinkageName);
101 MATCHER_P4(FrameContains, FunctionName, LineOffset, Column, Inline, "") {
102 const Frame &F = arg;
104 const uint64_t ExpectedHash = IndexedMemProfRecord::getGUID(FunctionName);
105 if (F.Function != ExpectedHash) {
106 *result_listener << "Hash mismatch";
107 return false;
109 if (F.SymbolName && *F.SymbolName != FunctionName) {
110 *result_listener << "SymbolName mismatch\nWant: " << FunctionName
111 << "\nGot: " << *F.SymbolName;
112 return false;
114 if (F.LineOffset == LineOffset && F.Column == Column &&
115 F.IsInlineFrame == Inline) {
116 return true;
118 *result_listener << "LineOffset, Column or Inline mismatch";
119 return false;
122 MemProfSchema getFullSchema() {
123 MemProfSchema Schema;
124 #define MIBEntryDef(NameTag, Name, Type) Schema.push_back(Meta::Name);
125 #include "llvm/ProfileData/MIBEntryDef.inc"
126 #undef MIBEntryDef
127 return Schema;
130 TEST(MemProf, FillsValue) {
131 std::unique_ptr<MockSymbolizer> Symbolizer(new MockSymbolizer());
133 EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x1000},
134 specifier(), false))
135 .Times(1) // Only once since we remember invalid PCs.
136 .WillRepeatedly(Return(makeInliningInfo({
137 {"new", 70, 57, 3, "memprof/memprof_new_delete.cpp"},
138 })));
140 EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x2000},
141 specifier(), false))
142 .Times(1) // Only once since we cache the result for future lookups.
143 .WillRepeatedly(Return(makeInliningInfo({
144 {"foo", 10, 5, 30},
145 {"bar", 201, 150, 20},
146 })));
148 EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x3000},
149 specifier(), false))
150 .Times(1)
151 .WillRepeatedly(Return(makeInliningInfo({
152 {"xyz.llvm.123", 10, 5, 30},
153 {"abc", 10, 5, 30},
154 })));
156 CallStackMap CSM;
157 CSM[0x1] = {0x1000, 0x2000, 0x3000};
159 llvm::MapVector<uint64_t, MemInfoBlock> Prof;
160 Prof[0x1].AllocCount = 1;
162 auto Seg = makeSegments();
164 RawMemProfReader Reader(std::move(Symbolizer), Seg, Prof, CSM,
165 /*KeepName=*/true);
167 llvm::DenseMap<llvm::GlobalValue::GUID, MemProfRecord> Records;
168 for (const auto &Pair : Reader) {
169 Records.insert({Pair.first, Pair.second});
172 // Mock program pseudocode and expected memprof record contents.
174 // AllocSite CallSite
175 // inline foo() { new(); } Y N
176 // bar() { foo(); } Y Y
177 // inline xyz() { bar(); } N Y
178 // abc() { xyz(); } N Y
180 // We expect 4 records. We attach alloc site data to foo and bar, i.e.
181 // all frames bottom up until we find a non-inline frame. We attach call site
182 // data to bar, xyz and abc.
183 ASSERT_EQ(Records.size(), 4U);
185 // Check the memprof record for foo.
186 const llvm::GlobalValue::GUID FooId = IndexedMemProfRecord::getGUID("foo");
187 ASSERT_EQ(Records.count(FooId), 1U);
188 const MemProfRecord &Foo = Records[FooId];
189 ASSERT_EQ(Foo.AllocSites.size(), 1U);
190 EXPECT_EQ(Foo.AllocSites[0].Info.getAllocCount(), 1U);
191 EXPECT_THAT(Foo.AllocSites[0].CallStack[0],
192 FrameContains("foo", 5U, 30U, true));
193 EXPECT_THAT(Foo.AllocSites[0].CallStack[1],
194 FrameContains("bar", 51U, 20U, false));
195 EXPECT_THAT(Foo.AllocSites[0].CallStack[2],
196 FrameContains("xyz", 5U, 30U, true));
197 EXPECT_THAT(Foo.AllocSites[0].CallStack[3],
198 FrameContains("abc", 5U, 30U, false));
199 EXPECT_TRUE(Foo.CallSites.empty());
201 // Check the memprof record for bar.
202 const llvm::GlobalValue::GUID BarId = IndexedMemProfRecord::getGUID("bar");
203 ASSERT_EQ(Records.count(BarId), 1U);
204 const MemProfRecord &Bar = Records[BarId];
205 ASSERT_EQ(Bar.AllocSites.size(), 1U);
206 EXPECT_EQ(Bar.AllocSites[0].Info.getAllocCount(), 1U);
207 EXPECT_THAT(Bar.AllocSites[0].CallStack[0],
208 FrameContains("foo", 5U, 30U, true));
209 EXPECT_THAT(Bar.AllocSites[0].CallStack[1],
210 FrameContains("bar", 51U, 20U, false));
211 EXPECT_THAT(Bar.AllocSites[0].CallStack[2],
212 FrameContains("xyz", 5U, 30U, true));
213 EXPECT_THAT(Bar.AllocSites[0].CallStack[3],
214 FrameContains("abc", 5U, 30U, false));
216 ASSERT_EQ(Bar.CallSites.size(), 1U);
217 ASSERT_EQ(Bar.CallSites[0].size(), 2U);
218 EXPECT_THAT(Bar.CallSites[0][0], FrameContains("foo", 5U, 30U, true));
219 EXPECT_THAT(Bar.CallSites[0][1], FrameContains("bar", 51U, 20U, false));
221 // Check the memprof record for xyz.
222 const llvm::GlobalValue::GUID XyzId = IndexedMemProfRecord::getGUID("xyz");
223 ASSERT_EQ(Records.count(XyzId), 1U);
224 const MemProfRecord &Xyz = Records[XyzId];
225 ASSERT_EQ(Xyz.CallSites.size(), 1U);
226 ASSERT_EQ(Xyz.CallSites[0].size(), 2U);
227 // Expect the entire frame even though in practice we only need the first
228 // entry here.
229 EXPECT_THAT(Xyz.CallSites[0][0], FrameContains("xyz", 5U, 30U, true));
230 EXPECT_THAT(Xyz.CallSites[0][1], FrameContains("abc", 5U, 30U, false));
232 // Check the memprof record for abc.
233 const llvm::GlobalValue::GUID AbcId = IndexedMemProfRecord::getGUID("abc");
234 ASSERT_EQ(Records.count(AbcId), 1U);
235 const MemProfRecord &Abc = Records[AbcId];
236 EXPECT_TRUE(Abc.AllocSites.empty());
237 ASSERT_EQ(Abc.CallSites.size(), 1U);
238 ASSERT_EQ(Abc.CallSites[0].size(), 2U);
239 EXPECT_THAT(Abc.CallSites[0][0], FrameContains("xyz", 5U, 30U, true));
240 EXPECT_THAT(Abc.CallSites[0][1], FrameContains("abc", 5U, 30U, false));
243 TEST(MemProf, PortableWrapper) {
244 MemInfoBlock Info(/*size=*/16, /*access_count=*/7, /*alloc_timestamp=*/1000,
245 /*dealloc_timestamp=*/2000, /*alloc_cpu=*/3,
246 /*dealloc_cpu=*/4);
248 const auto Schema = getFullSchema();
249 PortableMemInfoBlock WriteBlock(Info);
251 std::string Buffer;
252 llvm::raw_string_ostream OS(Buffer);
253 WriteBlock.serialize(Schema, OS);
254 OS.flush();
256 PortableMemInfoBlock ReadBlock(
257 Schema, reinterpret_cast<const unsigned char *>(Buffer.data()));
259 EXPECT_EQ(ReadBlock, WriteBlock);
260 // Here we compare directly with the actual counts instead of MemInfoBlock
261 // members. Since the MemInfoBlock struct is packed and the EXPECT_EQ macros
262 // take a reference to the params, this results in unaligned accesses.
263 EXPECT_EQ(1UL, ReadBlock.getAllocCount());
264 EXPECT_EQ(7ULL, ReadBlock.getTotalAccessCount());
265 EXPECT_EQ(3UL, ReadBlock.getAllocCpuId());
268 TEST(MemProf, RecordSerializationRoundTrip) {
269 const MemProfSchema Schema = getFullSchema();
271 MemInfoBlock Info(/*size=*/16, /*access_count=*/7, /*alloc_timestamp=*/1000,
272 /*dealloc_timestamp=*/2000, /*alloc_cpu=*/3,
273 /*dealloc_cpu=*/4);
275 llvm::SmallVector<llvm::SmallVector<FrameId>> AllocCallStacks = {
276 {0x123, 0x345}, {0x123, 0x567}};
278 llvm::SmallVector<llvm::SmallVector<FrameId>> CallSites = {{0x333, 0x777}};
280 IndexedMemProfRecord Record;
281 for (const auto &ACS : AllocCallStacks) {
282 // Use the same info block for both allocation sites.
283 Record.AllocSites.emplace_back(ACS, Info);
285 Record.CallSites.assign(CallSites);
287 std::string Buffer;
288 llvm::raw_string_ostream OS(Buffer);
289 Record.serialize(Schema, OS);
290 OS.flush();
292 const IndexedMemProfRecord GotRecord = IndexedMemProfRecord::deserialize(
293 Schema, reinterpret_cast<const unsigned char *>(Buffer.data()));
295 EXPECT_EQ(Record, GotRecord);
298 TEST(MemProf, SymbolizationFilter) {
299 std::unique_ptr<MockSymbolizer> Symbolizer(new MockSymbolizer());
301 EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x1000},
302 specifier(), false))
303 .Times(1) // once since we don't lookup invalid PCs repeatedly.
304 .WillRepeatedly(Return(makeInliningInfo({
305 {"malloc", 70, 57, 3, "memprof/memprof_malloc_linux.cpp"},
306 })));
308 EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x2000},
309 specifier(), false))
310 .Times(1) // once since we don't lookup invalid PCs repeatedly.
311 .WillRepeatedly(Return(makeInliningInfo({
312 {"new", 70, 57, 3, "memprof/memprof_new_delete.cpp"},
313 })));
315 EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x3000},
316 specifier(), false))
317 .Times(1) // once since we don't lookup invalid PCs repeatedly.
318 .WillRepeatedly(Return(makeInliningInfo({
319 {DILineInfo::BadString, 0, 0, 0},
320 })));
322 EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x4000},
323 specifier(), false))
324 .Times(1)
325 .WillRepeatedly(Return(makeInliningInfo({
326 {"foo", 10, 5, 30, "memprof/memprof_test_file.cpp"},
327 })));
329 EXPECT_CALL(*Symbolizer, symbolizeInlinedCode(SectionedAddress{0x5000},
330 specifier(), false))
331 .Times(1)
332 .WillRepeatedly(Return(makeInliningInfo({
333 // Depending on how the runtime was compiled, only the filename
334 // may be present in the debug information.
335 {"malloc", 70, 57, 3, "memprof_malloc_linux.cpp"},
336 })));
338 CallStackMap CSM;
339 CSM[0x1] = {0x1000, 0x2000, 0x3000, 0x4000};
340 // This entry should be dropped since all PCs are either not
341 // symbolizable or belong to the runtime.
342 CSM[0x2] = {0x1000, 0x2000, 0x5000};
344 llvm::MapVector<uint64_t, MemInfoBlock> Prof;
345 Prof[0x1].AllocCount = 1;
346 Prof[0x2].AllocCount = 1;
348 auto Seg = makeSegments();
350 RawMemProfReader Reader(std::move(Symbolizer), Seg, Prof, CSM);
352 llvm::SmallVector<MemProfRecord, 1> Records;
353 for (const auto &KeyRecordPair : Reader) {
354 Records.push_back(KeyRecordPair.second);
357 ASSERT_EQ(Records.size(), 1U);
358 ASSERT_EQ(Records[0].AllocSites.size(), 1U);
359 ASSERT_EQ(Records[0].AllocSites[0].CallStack.size(), 1U);
360 EXPECT_THAT(Records[0].AllocSites[0].CallStack[0],
361 FrameContains("foo", 5U, 30U, false));
364 TEST(MemProf, BaseMemProfReader) {
365 llvm::DenseMap<FrameId, Frame> FrameIdMap;
366 Frame F1(/*Hash=*/IndexedMemProfRecord::getGUID("foo"), /*LineOffset=*/20,
367 /*Column=*/5, /*IsInlineFrame=*/true);
368 Frame F2(/*Hash=*/IndexedMemProfRecord::getGUID("bar"), /*LineOffset=*/10,
369 /*Column=*/2, /*IsInlineFrame=*/false);
370 FrameIdMap.insert({F1.hash(), F1});
371 FrameIdMap.insert({F2.hash(), F2});
373 llvm::MapVector<llvm::GlobalValue::GUID, IndexedMemProfRecord> ProfData;
374 IndexedMemProfRecord FakeRecord;
375 MemInfoBlock Block;
376 Block.AllocCount = 1U, Block.TotalAccessDensity = 4,
377 Block.TotalLifetime = 200001;
378 std::array<FrameId, 2> CallStack{F1.hash(), F2.hash()};
379 FakeRecord.AllocSites.emplace_back(/*CS=*/CallStack, /*MB=*/Block);
380 ProfData.insert({F1.hash(), FakeRecord});
382 MemProfReader Reader(FrameIdMap, ProfData);
384 llvm::SmallVector<MemProfRecord, 1> Records;
385 for (const auto &KeyRecordPair : Reader) {
386 Records.push_back(KeyRecordPair.second);
389 ASSERT_EQ(Records.size(), 1U);
390 ASSERT_EQ(Records[0].AllocSites.size(), 1U);
391 ASSERT_EQ(Records[0].AllocSites[0].CallStack.size(), 2U);
392 EXPECT_THAT(Records[0].AllocSites[0].CallStack[0],
393 FrameContains("foo", 20U, 5U, true));
394 EXPECT_THAT(Records[0].AllocSites[0].CallStack[1],
395 FrameContains("bar", 10U, 2U, false));
397 } // namespace