1 //===--- JITLinkMemoryManager.cpp - JITLinkMemoryManager implementation ---===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h"
10 #include "llvm/ExecutionEngine/JITLink/JITLink.h"
11 #include "llvm/Support/FormatVariadic.h"
12 #include "llvm/Support/Process.h"
14 #define DEBUG_TYPE "jitlink"
21 JITLinkMemoryManager::~JITLinkMemoryManager() = default;
22 JITLinkMemoryManager::InFlightAlloc::~InFlightAlloc() = default;
24 BasicLayout::BasicLayout(LinkGraph
&G
) : G(G
) {
26 for (auto &Sec
: G
.sections()) {
27 // Skip empty sections, and sections with NoAlloc lifetime policies.
28 if (Sec
.blocks().empty() ||
29 Sec
.getMemLifetime() == orc::MemLifetime::NoAlloc
)
32 auto &Seg
= Segments
[{Sec
.getMemProt(), Sec
.getMemLifetime()}];
33 for (auto *B
: Sec
.blocks())
34 if (LLVM_LIKELY(!B
->isZeroFill()))
35 Seg
.ContentBlocks
.push_back(B
);
37 Seg
.ZeroFillBlocks
.push_back(B
);
40 // Build Segments map.
41 auto CompareBlocks
= [](const Block
*LHS
, const Block
*RHS
) {
42 // Sort by section, address and size
43 if (LHS
->getSection().getOrdinal() != RHS
->getSection().getOrdinal())
44 return LHS
->getSection().getOrdinal() < RHS
->getSection().getOrdinal();
45 if (LHS
->getAddress() != RHS
->getAddress())
46 return LHS
->getAddress() < RHS
->getAddress();
47 return LHS
->getSize() < RHS
->getSize();
50 LLVM_DEBUG(dbgs() << "Generated BasicLayout for " << G
.getName() << ":\n");
51 for (auto &KV
: Segments
) {
52 auto &Seg
= KV
.second
;
54 llvm::sort(Seg
.ContentBlocks
, CompareBlocks
);
55 llvm::sort(Seg
.ZeroFillBlocks
, CompareBlocks
);
57 for (auto *B
: Seg
.ContentBlocks
) {
58 Seg
.ContentSize
= alignToBlock(Seg
.ContentSize
, *B
);
59 Seg
.ContentSize
+= B
->getSize();
60 Seg
.Alignment
= std::max(Seg
.Alignment
, Align(B
->getAlignment()));
63 uint64_t SegEndOffset
= Seg
.ContentSize
;
64 for (auto *B
: Seg
.ZeroFillBlocks
) {
65 SegEndOffset
= alignToBlock(SegEndOffset
, *B
);
66 SegEndOffset
+= B
->getSize();
67 Seg
.Alignment
= std::max(Seg
.Alignment
, Align(B
->getAlignment()));
69 Seg
.ZeroFillSize
= SegEndOffset
- Seg
.ContentSize
;
72 dbgs() << " Seg " << KV
.first
73 << ": content-size=" << formatv("{0:x}", Seg
.ContentSize
)
74 << ", zero-fill-size=" << formatv("{0:x}", Seg
.ZeroFillSize
)
75 << ", align=" << formatv("{0:x}", Seg
.Alignment
.value()) << "\n";
80 Expected
<BasicLayout::ContiguousPageBasedLayoutSizes
>
81 BasicLayout::getContiguousPageBasedLayoutSizes(uint64_t PageSize
) {
82 ContiguousPageBasedLayoutSizes SegsSizes
;
84 for (auto &KV
: segments()) {
86 auto &Seg
= KV
.second
;
88 if (Seg
.Alignment
> PageSize
)
89 return make_error
<StringError
>("Segment alignment greater than page size",
90 inconvertibleErrorCode());
92 uint64_t SegSize
= alignTo(Seg
.ContentSize
+ Seg
.ZeroFillSize
, PageSize
);
93 if (AG
.getMemLifetime() == orc::MemLifetime::Standard
)
94 SegsSizes
.StandardSegs
+= SegSize
;
96 SegsSizes
.FinalizeSegs
+= SegSize
;
102 Error
BasicLayout::apply() {
103 for (auto &KV
: Segments
) {
104 auto &Seg
= KV
.second
;
106 assert(!(Seg
.ContentBlocks
.empty() && Seg
.ZeroFillBlocks
.empty()) &&
107 "Empty section recorded?");
109 for (auto *B
: Seg
.ContentBlocks
) {
110 // Align addr and working-mem-offset.
111 Seg
.Addr
= alignToBlock(Seg
.Addr
, *B
);
112 Seg
.NextWorkingMemOffset
= alignToBlock(Seg
.NextWorkingMemOffset
, *B
);
114 // Update block addr.
115 B
->setAddress(Seg
.Addr
);
116 Seg
.Addr
+= B
->getSize();
118 // Copy content to working memory, then update content to point at working
120 memcpy(Seg
.WorkingMem
+ Seg
.NextWorkingMemOffset
, B
->getContent().data(),
122 B
->setMutableContent(
123 {Seg
.WorkingMem
+ Seg
.NextWorkingMemOffset
, B
->getSize()});
124 Seg
.NextWorkingMemOffset
+= B
->getSize();
127 for (auto *B
: Seg
.ZeroFillBlocks
) {
129 Seg
.Addr
= alignToBlock(Seg
.Addr
, *B
);
130 // Update block addr.
131 B
->setAddress(Seg
.Addr
);
132 Seg
.Addr
+= B
->getSize();
135 Seg
.ContentBlocks
.clear();
136 Seg
.ZeroFillBlocks
.clear();
139 return Error::success();
142 orc::shared::AllocActions
&BasicLayout::graphAllocActions() {
143 return G
.allocActions();
146 void SimpleSegmentAlloc::Create(JITLinkMemoryManager
&MemMgr
,
147 const JITLinkDylib
*JD
, SegmentMap Segments
,
148 OnCreatedFunction OnCreated
) {
150 static_assert(orc::AllocGroup::NumGroups
== 32,
151 "AllocGroup has changed. Section names below must be updated");
152 StringRef AGSectionNames
[] = {
153 "__---.standard", "__R--.standard", "__-W-.standard", "__RW-.standard",
154 "__--X.standard", "__R-X.standard", "__-WX.standard", "__RWX.standard",
155 "__---.finalize", "__R--.finalize", "__-W-.finalize", "__RW-.finalize",
156 "__--X.finalize", "__R-X.finalize", "__-WX.finalize", "__RWX.finalize"};
158 auto G
= std::make_unique
<LinkGraph
>("", Triple(), 0,
159 llvm::endianness::native
, nullptr);
160 orc::AllocGroupSmallMap
<Block
*> ContentBlocks
;
162 orc::ExecutorAddr
NextAddr(0x100000);
163 for (auto &KV
: Segments
) {
165 auto &Seg
= KV
.second
;
167 assert(AG
.getMemLifetime() != orc::MemLifetime::NoAlloc
&&
168 "NoAlloc segments are not supported by SimpleSegmentAlloc");
171 AGSectionNames
[static_cast<unsigned>(AG
.getMemProt()) |
172 static_cast<bool>(AG
.getMemLifetime()) << 3];
174 auto &Sec
= G
->createSection(AGSectionName
, AG
.getMemProt());
175 Sec
.setMemLifetime(AG
.getMemLifetime());
177 if (Seg
.ContentSize
!= 0) {
179 orc::ExecutorAddr(alignTo(NextAddr
.getValue(), Seg
.ContentAlign
));
181 G
->createMutableContentBlock(Sec
, G
->allocateBuffer(Seg
.ContentSize
),
182 NextAddr
, Seg
.ContentAlign
.value(), 0);
183 ContentBlocks
[AG
] = &B
;
184 NextAddr
+= Seg
.ContentSize
;
188 // GRef declared separately since order-of-argument-eval isn't specified.
190 MemMgr
.allocate(JD
, GRef
,
191 [G
= std::move(G
), ContentBlocks
= std::move(ContentBlocks
),
192 OnCreated
= std::move(OnCreated
)](
193 JITLinkMemoryManager::AllocResult Alloc
) mutable {
195 OnCreated(Alloc
.takeError());
197 OnCreated(SimpleSegmentAlloc(std::move(G
),
198 std::move(ContentBlocks
),
203 Expected
<SimpleSegmentAlloc
>
204 SimpleSegmentAlloc::Create(JITLinkMemoryManager
&MemMgr
, const JITLinkDylib
*JD
,
205 SegmentMap Segments
) {
206 std::promise
<MSVCPExpected
<SimpleSegmentAlloc
>> AllocP
;
207 auto AllocF
= AllocP
.get_future();
208 Create(MemMgr
, JD
, std::move(Segments
),
209 [&](Expected
<SimpleSegmentAlloc
> Result
) {
210 AllocP
.set_value(std::move(Result
));
215 SimpleSegmentAlloc::SimpleSegmentAlloc(SimpleSegmentAlloc
&&) = default;
217 SimpleSegmentAlloc::operator=(SimpleSegmentAlloc
&&) = default;
218 SimpleSegmentAlloc::~SimpleSegmentAlloc() = default;
220 SimpleSegmentAlloc::SegmentInfo
221 SimpleSegmentAlloc::getSegInfo(orc::AllocGroup AG
) {
222 auto I
= ContentBlocks
.find(AG
);
223 if (I
!= ContentBlocks
.end()) {
224 auto &B
= *I
->second
;
225 return {B
.getAddress(), B
.getAlreadyMutableContent()};
230 SimpleSegmentAlloc::SimpleSegmentAlloc(
231 std::unique_ptr
<LinkGraph
> G
,
232 orc::AllocGroupSmallMap
<Block
*> ContentBlocks
,
233 std::unique_ptr
<JITLinkMemoryManager::InFlightAlloc
> Alloc
)
234 : G(std::move(G
)), ContentBlocks(std::move(ContentBlocks
)),
235 Alloc(std::move(Alloc
)) {}
237 class InProcessMemoryManager::IPInFlightAlloc
238 : public JITLinkMemoryManager::InFlightAlloc
{
240 IPInFlightAlloc(InProcessMemoryManager
&MemMgr
, LinkGraph
&G
, BasicLayout BL
,
241 sys::MemoryBlock StandardSegments
,
242 sys::MemoryBlock FinalizationSegments
)
243 : MemMgr(MemMgr
), G(&G
), BL(std::move(BL
)),
244 StandardSegments(std::move(StandardSegments
)),
245 FinalizationSegments(std::move(FinalizationSegments
)) {}
248 assert(!G
&& "InFlight alloc neither abandoned nor finalized");
251 void finalize(OnFinalizedFunction OnFinalized
) override
{
253 // Apply memory protections to all segments.
254 if (auto Err
= applyProtections()) {
255 OnFinalized(std::move(Err
));
259 // Run finalization actions.
260 auto DeallocActions
= runFinalizeActions(G
->allocActions());
261 if (!DeallocActions
) {
262 OnFinalized(DeallocActions
.takeError());
266 // Release the finalize segments slab.
267 if (auto EC
= sys::Memory::releaseMappedMemory(FinalizationSegments
)) {
268 OnFinalized(errorCodeToError(EC
));
273 // Set 'G' to null to flag that we've been successfully finalized.
274 // This allows us to assert at destruction time that a call has been made
275 // to either finalize or abandon.
279 // Continue with finalized allocation.
280 OnFinalized(MemMgr
.createFinalizedAlloc(std::move(StandardSegments
),
281 std::move(*DeallocActions
)));
284 void abandon(OnAbandonedFunction OnAbandoned
) override
{
285 Error Err
= Error::success();
286 if (auto EC
= sys::Memory::releaseMappedMemory(FinalizationSegments
))
287 Err
= joinErrors(std::move(Err
), errorCodeToError(EC
));
288 if (auto EC
= sys::Memory::releaseMappedMemory(StandardSegments
))
289 Err
= joinErrors(std::move(Err
), errorCodeToError(EC
));
292 // Set 'G' to null to flag that we've been successfully finalized.
293 // This allows us to assert at destruction time that a call has been made
294 // to either finalize or abandon.
298 OnAbandoned(std::move(Err
));
302 Error
applyProtections() {
303 for (auto &KV
: BL
.segments()) {
304 const auto &AG
= KV
.first
;
305 auto &Seg
= KV
.second
;
307 auto Prot
= toSysMemoryProtectionFlags(AG
.getMemProt());
310 alignTo(Seg
.ContentSize
+ Seg
.ZeroFillSize
, MemMgr
.PageSize
);
311 sys::MemoryBlock
MB(Seg
.WorkingMem
, SegSize
);
312 if (auto EC
= sys::Memory::protectMappedMemory(MB
, Prot
))
313 return errorCodeToError(EC
);
314 if (Prot
& sys::Memory::MF_EXEC
)
315 sys::Memory::InvalidateInstructionCache(MB
.base(), MB
.allocatedSize());
317 return Error::success();
320 InProcessMemoryManager
&MemMgr
;
323 sys::MemoryBlock StandardSegments
;
324 sys::MemoryBlock FinalizationSegments
;
327 Expected
<std::unique_ptr
<InProcessMemoryManager
>>
328 InProcessMemoryManager::Create() {
329 if (auto PageSize
= sys::Process::getPageSize())
330 return std::make_unique
<InProcessMemoryManager
>(*PageSize
);
332 return PageSize
.takeError();
335 void InProcessMemoryManager::allocate(const JITLinkDylib
*JD
, LinkGraph
&G
,
336 OnAllocatedFunction OnAllocated
) {
338 // FIXME: Just check this once on startup.
339 if (!isPowerOf2_64((uint64_t)PageSize
)) {
340 OnAllocated(make_error
<StringError
>("Page size is not a power of 2",
341 inconvertibleErrorCode()));
347 /// Scan the request and calculate the group and total sizes.
348 /// Check that segment size is no larger than a page.
349 auto SegsSizes
= BL
.getContiguousPageBasedLayoutSizes(PageSize
);
351 OnAllocated(SegsSizes
.takeError());
355 /// Check that the total size requested (including zero fill) is not larger
357 if (SegsSizes
->total() > std::numeric_limits
<size_t>::max()) {
358 OnAllocated(make_error
<JITLinkError
>(
359 "Total requested size " + formatv("{0:x}", SegsSizes
->total()) +
360 " for graph " + G
.getName() + " exceeds address space"));
364 // Allocate one slab for the whole thing (to make sure everything is
365 // in-range), then partition into standard and finalization blocks.
367 // FIXME: Make two separate allocations in the future to reduce
368 // fragmentation: finalization segments will usually be a single page, and
369 // standard segments are likely to be more than one page. Where multiple
370 // allocations are in-flight at once (likely) the current approach will leave
371 // a lot of single-page holes.
372 sys::MemoryBlock Slab
;
373 sys::MemoryBlock StandardSegsMem
;
374 sys::MemoryBlock FinalizeSegsMem
;
376 const sys::Memory::ProtectionFlags ReadWrite
=
377 static_cast<sys::Memory::ProtectionFlags
>(sys::Memory::MF_READ
|
378 sys::Memory::MF_WRITE
);
381 Slab
= sys::Memory::allocateMappedMemory(SegsSizes
->total(), nullptr,
385 OnAllocated(errorCodeToError(EC
));
389 // Zero-fill the whole slab up-front.
390 memset(Slab
.base(), 0, Slab
.allocatedSize());
392 StandardSegsMem
= {Slab
.base(),
393 static_cast<size_t>(SegsSizes
->StandardSegs
)};
394 FinalizeSegsMem
= {(void *)((char *)Slab
.base() + SegsSizes
->StandardSegs
),
395 static_cast<size_t>(SegsSizes
->FinalizeSegs
)};
398 auto NextStandardSegAddr
= orc::ExecutorAddr::fromPtr(StandardSegsMem
.base());
399 auto NextFinalizeSegAddr
= orc::ExecutorAddr::fromPtr(FinalizeSegsMem
.base());
402 dbgs() << "InProcessMemoryManager allocated:\n";
403 if (SegsSizes
->StandardSegs
)
404 dbgs() << formatv(" [ {0:x16} -- {1:x16} ]", NextStandardSegAddr
,
405 NextStandardSegAddr
+ StandardSegsMem
.allocatedSize())
406 << " to stardard segs\n";
408 dbgs() << " no standard segs\n";
409 if (SegsSizes
->FinalizeSegs
)
410 dbgs() << formatv(" [ {0:x16} -- {1:x16} ]", NextFinalizeSegAddr
,
411 NextFinalizeSegAddr
+ FinalizeSegsMem
.allocatedSize())
412 << " to finalize segs\n";
414 dbgs() << " no finalize segs\n";
417 // Build ProtMap, assign addresses.
418 for (auto &KV
: BL
.segments()) {
420 auto &Seg
= KV
.second
;
422 auto &SegAddr
= (AG
.getMemLifetime() == orc::MemLifetime::Standard
)
423 ? NextStandardSegAddr
424 : NextFinalizeSegAddr
;
426 Seg
.WorkingMem
= SegAddr
.toPtr
<char *>();
429 SegAddr
+= alignTo(Seg
.ContentSize
+ Seg
.ZeroFillSize
, PageSize
);
432 if (auto Err
= BL
.apply()) {
433 OnAllocated(std::move(Err
));
437 OnAllocated(std::make_unique
<IPInFlightAlloc
>(*this, G
, std::move(BL
),
438 std::move(StandardSegsMem
),
439 std::move(FinalizeSegsMem
)));
442 void InProcessMemoryManager::deallocate(std::vector
<FinalizedAlloc
> Allocs
,
443 OnDeallocatedFunction OnDeallocated
) {
444 std::vector
<sys::MemoryBlock
> StandardSegmentsList
;
445 std::vector
<std::vector
<orc::shared::WrapperFunctionCall
>> DeallocActionsList
;
448 std::lock_guard
<std::mutex
> Lock(FinalizedAllocsMutex
);
449 for (auto &Alloc
: Allocs
) {
450 auto *FA
= Alloc
.release().toPtr
<FinalizedAllocInfo
*>();
451 StandardSegmentsList
.push_back(std::move(FA
->StandardSegments
));
452 if (!FA
->DeallocActions
.empty())
453 DeallocActionsList
.push_back(std::move(FA
->DeallocActions
));
454 FA
->~FinalizedAllocInfo();
455 FinalizedAllocInfos
.Deallocate(FA
);
459 Error DeallocErr
= Error::success();
461 while (!DeallocActionsList
.empty()) {
462 auto &DeallocActions
= DeallocActionsList
.back();
463 auto &StandardSegments
= StandardSegmentsList
.back();
465 /// Run any deallocate calls.
466 while (!DeallocActions
.empty()) {
467 if (auto Err
= DeallocActions
.back().runWithSPSRetErrorMerged())
468 DeallocErr
= joinErrors(std::move(DeallocErr
), std::move(Err
));
469 DeallocActions
.pop_back();
472 /// Release the standard segments slab.
473 if (auto EC
= sys::Memory::releaseMappedMemory(StandardSegments
))
474 DeallocErr
= joinErrors(std::move(DeallocErr
), errorCodeToError(EC
));
476 DeallocActionsList
.pop_back();
477 StandardSegmentsList
.pop_back();
480 OnDeallocated(std::move(DeallocErr
));
483 JITLinkMemoryManager::FinalizedAlloc
484 InProcessMemoryManager::createFinalizedAlloc(
485 sys::MemoryBlock StandardSegments
,
486 std::vector
<orc::shared::WrapperFunctionCall
> DeallocActions
) {
487 std::lock_guard
<std::mutex
> Lock(FinalizedAllocsMutex
);
488 auto *FA
= FinalizedAllocInfos
.Allocate
<FinalizedAllocInfo
>();
489 new (FA
) FinalizedAllocInfo(
490 {std::move(StandardSegments
), std::move(DeallocActions
)});
491 return FinalizedAlloc(orc::ExecutorAddr::fromPtr(FA
));
494 } // end namespace jitlink
495 } // end namespace llvm