2 * This file is from https://github.com/llvm/llvm-project/pull/71968
3 * with minor modifications to avoid name clash and work with older
4 * LLVM versions. The llvm::backport::SectionMemoryManager class is a
5 * drop-in replacement for llvm::SectionMemoryManager, for use with
6 * llvm::RuntimeDyld. It fixes a memory layout bug on large memory
7 * ARM systems (see pull request for details). If the LLVM project
8 * eventually commits the change, we may need to resynchronize our
9 * copy with any further modifications, but they would be unlikely to
10 * backport it into the LLVM versions that we target so we would still
13 * In the future we will switch to using JITLink instead of
14 * RuntimeDyld where possible, and later remove this code (.cpp, .h,
15 * .LICENSE) after all LLVM versions that we target allow it.
17 * This file is a modified copy of a part of the LLVM source code that
18 * we would normally access from the LLVM library. It is therefore
19 * covered by the license at https://llvm.org/LICENSE.txt, reproduced
20 * verbatim in SectionMemoryManager.LICENSE in fulfillment of clause
21 * 4a. The bugfix changes from the pull request are also covered, per
25 //===- SectionMemoryManager.cpp - Memory manager for MCJIT/RtDyld *- C++ -*-==//
27 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
28 // See https://llvm.org/LICENSE.txt for license information.
29 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
31 //===----------------------------------------------------------------------===//
33 // This file implements the section-based memory manager used by the MCJIT
34 // execution engine and RuntimeDyld
36 //===----------------------------------------------------------------------===//
38 #include "jit/llvmjit_backport.h"
40 #ifdef USE_LLVM_BACKPORT_SECTION_MEMORY_MANAGER
42 #include "jit/SectionMemoryManager.h"
43 #include "llvm/Support/MathExtras.h"
44 #include "llvm/Support/Process.h"
49 bool SectionMemoryManager::hasSpace(const MemoryGroup
&MemGroup
,
50 uintptr_t Size
) const {
51 for (const FreeMemBlock
&FreeMB
: MemGroup
.FreeMem
) {
52 if (FreeMB
.Free
.allocatedSize() >= Size
)
58 #if LLVM_VERSION_MAJOR < 16
59 void SectionMemoryManager::reserveAllocationSpace(uintptr_t CodeSize
,
62 uint32_t RODataAlign_i
,
64 uint32_t RWDataAlign_i
) {
65 Align
CodeAlign(CodeAlign_i
);
66 Align
RODataAlign(RODataAlign_i
);
67 Align
RWDataAlign(RWDataAlign_i
);
69 void SectionMemoryManager::reserveAllocationSpace(
70 uintptr_t CodeSize
, Align CodeAlign
, uintptr_t RODataSize
,
71 Align RODataAlign
, uintptr_t RWDataSize
, Align RWDataAlign
) {
73 if (CodeSize
== 0 && RODataSize
== 0 && RWDataSize
== 0)
76 static const size_t PageSize
= sys::Process::getPageSizeEstimate();
78 // Code alignment needs to be at least the stub alignment - however, we
79 // don't have an easy way to get that here so as a workaround, we assume
80 // it's 8, which is the largest value I observed across all platforms.
81 constexpr uint64_t StubAlign
= 8;
82 CodeAlign
= Align(std::max(CodeAlign
.value(), StubAlign
));
83 RODataAlign
= Align(std::max(RODataAlign
.value(), StubAlign
));
84 RWDataAlign
= Align(std::max(RWDataAlign
.value(), StubAlign
));
86 // Get space required for each section. Use the same calculation as
87 // allocateSection because we need to be able to satisfy it.
88 uint64_t RequiredCodeSize
= alignTo(CodeSize
, CodeAlign
) + CodeAlign
.value();
89 uint64_t RequiredRODataSize
=
90 alignTo(RODataSize
, RODataAlign
) + RODataAlign
.value();
91 uint64_t RequiredRWDataSize
=
92 alignTo(RWDataSize
, RWDataAlign
) + RWDataAlign
.value();
94 if (hasSpace(CodeMem
, RequiredCodeSize
) &&
95 hasSpace(RODataMem
, RequiredRODataSize
) &&
96 hasSpace(RWDataMem
, RequiredRWDataSize
)) {
97 // Sufficient space in contiguous block already available.
101 // MemoryManager does not have functions for releasing memory after it's
102 // allocated. Normally it tries to use any excess blocks that were allocated
103 // due to page alignment, but if we have insufficient free memory for the
104 // request this can lead to allocating disparate memory that can violate the
105 // ARM ABI. Clear free memory so only the new allocations are used, but do
106 // not release allocated memory as it may still be in-use.
107 CodeMem
.FreeMem
.clear();
108 RODataMem
.FreeMem
.clear();
109 RWDataMem
.FreeMem
.clear();
111 // Round up to the nearest page size. Blocks must be page-aligned.
112 RequiredCodeSize
= alignTo(RequiredCodeSize
, PageSize
);
113 RequiredRODataSize
= alignTo(RequiredRODataSize
, PageSize
);
114 RequiredRWDataSize
= alignTo(RequiredRWDataSize
, PageSize
);
115 uint64_t RequiredSize
=
116 RequiredCodeSize
+ RequiredRODataSize
+ RequiredRWDataSize
;
119 sys::MemoryBlock MB
= MMapper
->allocateMappedMemory(
120 AllocationPurpose::RWData
, RequiredSize
, nullptr,
121 sys::Memory::MF_READ
| sys::Memory::MF_WRITE
, ec
);
125 // CodeMem will arbitrarily own this MemoryBlock to handle cleanup.
126 CodeMem
.AllocatedMem
.push_back(MB
);
127 uintptr_t Addr
= (uintptr_t)MB
.base();
129 FreeMB
.PendingPrefixIndex
= (unsigned)-1;
132 assert(isAddrAligned(CodeAlign
, (void *)Addr
));
133 FreeMB
.Free
= sys::MemoryBlock((void *)Addr
, RequiredCodeSize
);
134 CodeMem
.FreeMem
.push_back(FreeMB
);
135 Addr
+= RequiredCodeSize
;
138 if (RODataSize
> 0) {
139 assert(isAddrAligned(RODataAlign
, (void *)Addr
));
140 FreeMB
.Free
= sys::MemoryBlock((void *)Addr
, RequiredRODataSize
);
141 RODataMem
.FreeMem
.push_back(FreeMB
);
142 Addr
+= RequiredRODataSize
;
145 if (RWDataSize
> 0) {
146 assert(isAddrAligned(RWDataAlign
, (void *)Addr
));
147 FreeMB
.Free
= sys::MemoryBlock((void *)Addr
, RequiredRWDataSize
);
148 RWDataMem
.FreeMem
.push_back(FreeMB
);
152 uint8_t *SectionMemoryManager::allocateDataSection(uintptr_t Size
,
155 StringRef SectionName
,
158 return allocateSection(SectionMemoryManager::AllocationPurpose::ROData
,
160 return allocateSection(SectionMemoryManager::AllocationPurpose::RWData
, Size
,
164 uint8_t *SectionMemoryManager::allocateCodeSection(uintptr_t Size
,
167 StringRef SectionName
) {
168 return allocateSection(SectionMemoryManager::AllocationPurpose::Code
, Size
,
172 uint8_t *SectionMemoryManager::allocateSection(
173 SectionMemoryManager::AllocationPurpose Purpose
, uintptr_t Size
,
174 unsigned Alignment
) {
178 assert(!(Alignment
& (Alignment
- 1)) && "Alignment must be a power of two.");
180 uintptr_t RequiredSize
= Alignment
* ((Size
+ Alignment
- 1) / Alignment
+ 1);
183 MemoryGroup
&MemGroup
= [&]() -> MemoryGroup
& {
185 case AllocationPurpose::Code
:
187 case AllocationPurpose::ROData
:
189 case AllocationPurpose::RWData
:
192 llvm_unreachable("Unknown SectionMemoryManager::AllocationPurpose");
195 // Look in the list of free memory regions and use a block there if one
197 for (FreeMemBlock
&FreeMB
: MemGroup
.FreeMem
) {
198 if (FreeMB
.Free
.allocatedSize() >= RequiredSize
) {
199 Addr
= (uintptr_t)FreeMB
.Free
.base();
200 uintptr_t EndOfBlock
= Addr
+ FreeMB
.Free
.allocatedSize();
201 // Align the address.
202 Addr
= (Addr
+ Alignment
- 1) & ~(uintptr_t)(Alignment
- 1);
204 if (FreeMB
.PendingPrefixIndex
== (unsigned)-1) {
205 // The part of the block we're giving out to the user is now pending
206 MemGroup
.PendingMem
.push_back(sys::MemoryBlock((void *)Addr
, Size
));
208 // Remember this pending block, such that future allocations can just
209 // modify it rather than creating a new one
210 FreeMB
.PendingPrefixIndex
= MemGroup
.PendingMem
.size() - 1;
212 sys::MemoryBlock
&PendingMB
=
213 MemGroup
.PendingMem
[FreeMB
.PendingPrefixIndex
];
214 PendingMB
= sys::MemoryBlock(PendingMB
.base(),
215 Addr
+ Size
- (uintptr_t)PendingMB
.base());
218 // Remember how much free space is now left in this block
220 sys::MemoryBlock((void *)(Addr
+ Size
), EndOfBlock
- Addr
- Size
);
221 return (uint8_t *)Addr
;
225 // No pre-allocated free block was large enough. Allocate a new memory region.
226 // Note that all sections get allocated as read-write. The permissions will
227 // be updated later based on memory group.
229 // FIXME: It would be useful to define a default allocation size (or add
230 // it as a constructor parameter) to minimize the number of allocations.
232 // FIXME: Initialize the Near member for each memory group to avoid
235 sys::MemoryBlock MB
= MMapper
->allocateMappedMemory(
236 Purpose
, RequiredSize
, &MemGroup
.Near
,
237 sys::Memory::MF_READ
| sys::Memory::MF_WRITE
, ec
);
239 // FIXME: Add error propagation to the interface.
243 // Save this address as the basis for our next request
246 // Copy the address to all the other groups, if they have not
248 if (CodeMem
.Near
.base() == nullptr)
250 if (RODataMem
.Near
.base() == nullptr)
252 if (RWDataMem
.Near
.base() == nullptr)
255 // Remember that we allocated this memory
256 MemGroup
.AllocatedMem
.push_back(MB
);
257 Addr
= (uintptr_t)MB
.base();
258 uintptr_t EndOfBlock
= Addr
+ MB
.allocatedSize();
260 // Align the address.
261 Addr
= (Addr
+ Alignment
- 1) & ~(uintptr_t)(Alignment
- 1);
263 // The part of the block we're giving out to the user is now pending
264 MemGroup
.PendingMem
.push_back(sys::MemoryBlock((void *)Addr
, Size
));
266 // The allocateMappedMemory may allocate much more memory than we need. In
267 // this case, we store the unused memory as a free memory block.
268 unsigned FreeSize
= EndOfBlock
- Addr
- Size
;
271 FreeMB
.Free
= sys::MemoryBlock((void *)(Addr
+ Size
), FreeSize
);
272 FreeMB
.PendingPrefixIndex
= (unsigned)-1;
273 MemGroup
.FreeMem
.push_back(FreeMB
);
276 // Return aligned address
277 return (uint8_t *)Addr
;
280 bool SectionMemoryManager::finalizeMemory(std::string
*ErrMsg
) {
281 // FIXME: Should in-progress permissions be reverted if an error occurs?
284 // Make code memory executable.
285 ec
= applyMemoryGroupPermissions(CodeMem
,
286 sys::Memory::MF_READ
| sys::Memory::MF_EXEC
);
289 *ErrMsg
= ec
.message();
294 // Make read-only data memory read-only.
295 ec
= applyMemoryGroupPermissions(RODataMem
, sys::Memory::MF_READ
);
298 *ErrMsg
= ec
.message();
303 // Read-write data memory already has the correct permissions
305 // Some platforms with separate data cache and instruction cache require
306 // explicit cache flush, otherwise JIT code manipulations (like resolved
307 // relocations) will get to the data cache but not to the instruction cache.
308 invalidateInstructionCache();
313 static sys::MemoryBlock
trimBlockToPageSize(sys::MemoryBlock M
) {
314 static const size_t PageSize
= sys::Process::getPageSizeEstimate();
316 size_t StartOverlap
=
317 (PageSize
- ((uintptr_t)M
.base() % PageSize
)) % PageSize
;
319 size_t TrimmedSize
= M
.allocatedSize();
320 TrimmedSize
-= StartOverlap
;
321 TrimmedSize
-= TrimmedSize
% PageSize
;
323 sys::MemoryBlock
Trimmed((void *)((uintptr_t)M
.base() + StartOverlap
),
326 assert(((uintptr_t)Trimmed
.base() % PageSize
) == 0);
327 assert((Trimmed
.allocatedSize() % PageSize
) == 0);
328 assert(M
.base() <= Trimmed
.base() &&
329 Trimmed
.allocatedSize() <= M
.allocatedSize());
335 SectionMemoryManager::applyMemoryGroupPermissions(MemoryGroup
&MemGroup
,
336 unsigned Permissions
) {
337 for (sys::MemoryBlock
&MB
: MemGroup
.PendingMem
)
338 if (std::error_code EC
= MMapper
->protectMappedMemory(MB
, Permissions
))
341 MemGroup
.PendingMem
.clear();
343 // Now go through free blocks and trim any of them that don't span the entire
344 // page because one of the pending blocks may have overlapped it.
345 for (FreeMemBlock
&FreeMB
: MemGroup
.FreeMem
) {
346 FreeMB
.Free
= trimBlockToPageSize(FreeMB
.Free
);
347 // We cleared the PendingMem list, so all these pointers are now invalid
348 FreeMB
.PendingPrefixIndex
= (unsigned)-1;
351 // Remove all blocks which are now empty
352 erase_if(MemGroup
.FreeMem
, [](FreeMemBlock
&FreeMB
) {
353 return FreeMB
.Free
.allocatedSize() == 0;
356 return std::error_code();
359 void SectionMemoryManager::invalidateInstructionCache() {
360 for (sys::MemoryBlock
&Block
: CodeMem
.PendingMem
)
361 sys::Memory::InvalidateInstructionCache(Block
.base(),
362 Block
.allocatedSize());
365 SectionMemoryManager::~SectionMemoryManager() {
366 for (MemoryGroup
*Group
: {&CodeMem
, &RWDataMem
, &RODataMem
}) {
367 for (sys::MemoryBlock
&Block
: Group
->AllocatedMem
)
368 MMapper
->releaseMappedMemory(Block
);
372 SectionMemoryManager::MemoryMapper::~MemoryMapper() = default;
374 void SectionMemoryManager::anchor() {}
377 // Trivial implementation of SectionMemoryManager::MemoryMapper that just calls
379 class DefaultMMapper final
: public SectionMemoryManager::MemoryMapper
{
382 allocateMappedMemory(SectionMemoryManager::AllocationPurpose Purpose
,
383 size_t NumBytes
, const sys::MemoryBlock
*const NearBlock
,
384 unsigned Flags
, std::error_code
&EC
) override
{
385 return sys::Memory::allocateMappedMemory(NumBytes
, NearBlock
, Flags
, EC
);
388 std::error_code
protectMappedMemory(const sys::MemoryBlock
&Block
,
389 unsigned Flags
) override
{
390 return sys::Memory::protectMappedMemory(Block
, Flags
);
393 std::error_code
releaseMappedMemory(sys::MemoryBlock
&M
) override
{
394 return sys::Memory::releaseMappedMemory(M
);
399 SectionMemoryManager::SectionMemoryManager(MemoryMapper
*UnownedMM
,
401 : MMapper(UnownedMM
), OwnedMMapper(nullptr),
402 ReserveAllocation(ReserveAlloc
) {
404 OwnedMMapper
= std::make_unique
<DefaultMMapper
>();
405 MMapper
= OwnedMMapper
.get();
409 } // namespace backport