1 //===----------------------------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
11 #include <memory_resource>
13 #if _LIBCPP_HAS_ATOMIC_HEADER
15 #elif _LIBCPP_HAS_THREADS
17 # if defined(__ELF__) && defined(_LIBCPP_LINK_PTHREAD_LIB)
18 # pragma comment(lib, "pthread")
22 _LIBCPP_BEGIN_NAMESPACE_STD
28 memory_resource::~memory_resource() = default;
30 // new_delete_resource()
32 #if !_LIBCPP_HAS_ALIGNED_ALLOCATION
33 static bool is_aligned_to(void* ptr
, size_t align
) {
36 void* result
= std::align(align
, 1, p2
, space
);
37 return (result
== ptr
);
41 class _LIBCPP_EXPORTED_FROM_ABI __new_delete_memory_resource_imp
: public memory_resource
{
42 void* do_allocate(size_t bytes
, size_t align
) override
{
43 #if _LIBCPP_HAS_ALIGNED_ALLOCATION
44 return std::__libcpp_allocate
<std::byte
>(__element_count(bytes
), align
);
48 std::byte
* result
= std::__libcpp_allocate
<std::byte
>(__element_count(bytes
), align
);
49 if (!is_aligned_to(result
, align
)) {
50 std::__libcpp_deallocate
<std::byte
>(result
, __element_count(bytes
), align
);
57 void do_deallocate(void* p
, size_t bytes
, size_t align
) override
{
58 std::__libcpp_deallocate
<std::byte
>(static_cast<std::byte
*>(p
), __element_count(bytes
), align
);
61 bool do_is_equal(const memory_resource
& other
) const noexcept override
{ return &other
== this; }
64 // null_memory_resource()
66 class _LIBCPP_EXPORTED_FROM_ABI __null_memory_resource_imp
: public memory_resource
{
67 void* do_allocate(size_t, size_t) override
{ __throw_bad_alloc(); }
68 void do_deallocate(void*, size_t, size_t) override
{}
69 bool do_is_equal(const memory_resource
& other
) const noexcept override
{ return &other
== this; }
74 union ResourceInitHelper
{
76 __new_delete_memory_resource_imp new_delete_res
;
77 __null_memory_resource_imp null_res
;
80 constexpr ResourceInitHelper() : resources() {}
81 ~ResourceInitHelper() {}
84 // Pretend we're inside a system header so the compiler doesn't flag the use of the init_priority
85 // attribute with a value that's reserved for the implementation (we're the implementation).
86 #include "memory_resource_init_helper.h"
90 memory_resource
* new_delete_resource() noexcept
{ return &res_init
.resources
.new_delete_res
; }
92 memory_resource
* null_memory_resource() noexcept
{ return &res_init
.resources
.null_res
; }
94 // default_memory_resource()
96 static memory_resource
* __default_memory_resource(bool set
= false, memory_resource
* new_res
= nullptr) noexcept
{
97 #if _LIBCPP_HAS_ATOMIC_HEADER
98 static constinit atomic
<memory_resource
*> __res
{&res_init
.resources
.new_delete_res
};
100 new_res
= new_res
? new_res
: new_delete_resource();
101 // TODO: Can a weaker ordering be used?
102 return std::atomic_exchange_explicit(&__res
, new_res
, memory_order_acq_rel
);
104 return std::atomic_load_explicit(&__res
, memory_order_acquire
);
106 #elif _LIBCPP_HAS_THREADS
107 static constinit memory_resource
* res
= &res_init
.resources
.new_delete_res
;
108 static mutex res_lock
;
110 new_res
= new_res
? new_res
: new_delete_resource();
111 lock_guard
<mutex
> guard(res_lock
);
112 memory_resource
* old_res
= res
;
116 lock_guard
<mutex
> guard(res_lock
);
120 static constinit memory_resource
* res
= &res_init
.resources
.new_delete_res
;
122 new_res
= new_res
? new_res
: new_delete_resource();
123 memory_resource
* old_res
= res
;
132 memory_resource
* get_default_resource() noexcept
{ return __default_memory_resource(); }
134 memory_resource
* set_default_resource(memory_resource
* __new_res
) noexcept
{
135 return __default_memory_resource(true, __new_res
);
138 // 23.12.5, mem.res.pool
140 static size_t roundup(size_t count
, size_t alignment
) {
141 size_t mask
= alignment
- 1;
142 return (count
+ mask
) & ~mask
;
145 struct unsynchronized_pool_resource::__adhoc_pool::__chunk_footer
{
146 __chunk_footer
* __next_
;
149 size_t __allocation_size() { return (reinterpret_cast<char*>(this) - __start_
) + sizeof(*this); }
152 void unsynchronized_pool_resource::__adhoc_pool::__release_ptr(memory_resource
* upstream
) {
153 while (__first_
!= nullptr) {
154 __chunk_footer
* next
= __first_
->__next_
;
155 upstream
->deallocate(__first_
->__start_
, __first_
->__allocation_size(), __first_
->__align_
);
160 void* unsynchronized_pool_resource::__adhoc_pool::__do_allocate(memory_resource
* upstream
, size_t bytes
, size_t align
) {
161 const size_t footer_size
= sizeof(__chunk_footer
);
162 const size_t footer_align
= alignof(__chunk_footer
);
164 if (align
< footer_align
)
165 align
= footer_align
;
167 size_t aligned_capacity
= roundup(bytes
, footer_align
) + footer_size
;
169 void* result
= upstream
->allocate(aligned_capacity
, align
);
171 __chunk_footer
* h
= (__chunk_footer
*)((char*)result
+ aligned_capacity
- footer_size
);
172 h
->__next_
= __first_
;
173 h
->__start_
= (char*)result
;
179 void unsynchronized_pool_resource::__adhoc_pool::__do_deallocate(
180 memory_resource
* upstream
, void* p
, size_t bytes
, size_t align
) {
181 _LIBCPP_ASSERT_NON_NULL(__first_
!= nullptr, "deallocating a block that was not allocated with this allocator");
182 if (__first_
->__start_
== p
) {
183 __chunk_footer
* next
= __first_
->__next_
;
184 upstream
->deallocate(p
, __first_
->__allocation_size(), __first_
->__align_
);
187 for (__chunk_footer
* h
= __first_
; h
->__next_
!= nullptr; h
= h
->__next_
) {
188 if (h
->__next_
->__start_
== p
) {
189 __chunk_footer
* next
= h
->__next_
->__next_
;
190 upstream
->deallocate(p
, h
->__next_
->__allocation_size(), h
->__next_
->__align_
);
195 // The request to deallocate memory ends up being a no-op, likely resulting in a memory leak.
196 _LIBCPP_ASSERT_VALID_DEALLOCATION(false, "deallocating a block that was not allocated with this allocator");
200 class unsynchronized_pool_resource::__fixed_pool
{
201 struct __chunk_footer
{
202 __chunk_footer
* __next_
;
205 size_t __allocation_size() { return (reinterpret_cast<char*>(this) - __start_
) + sizeof(*this); }
208 struct __vacancy_header
{
209 __vacancy_header
* __next_vacancy_
;
212 __chunk_footer
* __first_chunk_
= nullptr;
213 __vacancy_header
* __first_vacancy_
= nullptr;
216 explicit __fixed_pool() = default;
218 void __release_ptr(memory_resource
* upstream
) {
219 __first_vacancy_
= nullptr;
220 while (__first_chunk_
!= nullptr) {
221 __chunk_footer
* next
= __first_chunk_
->__next_
;
222 upstream
->deallocate(__first_chunk_
->__start_
, __first_chunk_
->__allocation_size(), __first_chunk_
->__align_
);
223 __first_chunk_
= next
;
227 void* __try_allocate_from_vacancies() {
228 if (__first_vacancy_
!= nullptr) {
229 void* result
= __first_vacancy_
;
230 __first_vacancy_
= __first_vacancy_
->__next_vacancy_
;
236 void* __allocate_in_new_chunk(memory_resource
* upstream
, size_t block_size
, size_t chunk_size
) {
237 _LIBCPP_ASSERT_INTERNAL(chunk_size
% block_size
== 0, "");
238 static_assert(__default_alignment
>= alignof(std::max_align_t
), "");
239 static_assert(__default_alignment
>= alignof(__chunk_footer
), "");
240 static_assert(__default_alignment
>= alignof(__vacancy_header
), "");
242 const size_t footer_size
= sizeof(__chunk_footer
);
243 const size_t footer_align
= alignof(__chunk_footer
);
245 size_t aligned_capacity
= roundup(chunk_size
, footer_align
) + footer_size
;
247 void* result
= upstream
->allocate(aligned_capacity
, __default_alignment
);
249 __chunk_footer
* h
= (__chunk_footer
*)((char*)result
+ aligned_capacity
- footer_size
);
250 h
->__next_
= __first_chunk_
;
251 h
->__start_
= (char*)result
;
252 h
->__align_
= __default_alignment
;
255 if (chunk_size
> block_size
) {
256 __vacancy_header
* last_vh
= this->__first_vacancy_
;
257 for (size_t i
= block_size
; i
!= chunk_size
; i
+= block_size
) {
258 __vacancy_header
* vh
= (__vacancy_header
*)((char*)result
+ i
);
259 vh
->__next_vacancy_
= last_vh
;
262 this->__first_vacancy_
= last_vh
;
267 void __evacuate(void* p
) {
268 __vacancy_header
* vh
= (__vacancy_header
*)(p
);
269 vh
->__next_vacancy_
= __first_vacancy_
;
270 __first_vacancy_
= vh
;
273 size_t __previous_chunk_size_in_bytes() const { return __first_chunk_
? __first_chunk_
->__allocation_size() : 0; }
275 static const size_t __default_alignment
= alignof(max_align_t
);
278 size_t unsynchronized_pool_resource::__pool_block_size(int i
) const { return size_t(1) << __log2_pool_block_size(i
); }
280 int unsynchronized_pool_resource::__log2_pool_block_size(int i
) const { return (i
+ __log2_smallest_block_size
); }
282 int unsynchronized_pool_resource::__pool_index(size_t bytes
, size_t align
) const {
283 if (align
> alignof(std::max_align_t
) || bytes
> (size_t(1) << __num_fixed_pools_
))
284 return __num_fixed_pools_
;
287 bytes
= (bytes
> align
) ? bytes
: align
;
289 bytes
>>= __log2_smallest_block_size
;
298 unsynchronized_pool_resource::unsynchronized_pool_resource(const pool_options
& opts
, memory_resource
* upstream
)
299 : __res_(upstream
), __fixed_pools_(nullptr) {
300 size_t largest_block_size
;
301 if (opts
.largest_required_pool_block
== 0)
302 largest_block_size
= __default_largest_block_size
;
303 else if (opts
.largest_required_pool_block
< __smallest_block_size
)
304 largest_block_size
= __smallest_block_size
;
305 else if (opts
.largest_required_pool_block
> __max_largest_block_size
)
306 largest_block_size
= __max_largest_block_size
;
308 largest_block_size
= opts
.largest_required_pool_block
;
310 if (opts
.max_blocks_per_chunk
== 0)
311 __options_max_blocks_per_chunk_
= __max_blocks_per_chunk
;
312 else if (opts
.max_blocks_per_chunk
< __min_blocks_per_chunk
)
313 __options_max_blocks_per_chunk_
= __min_blocks_per_chunk
;
314 else if (opts
.max_blocks_per_chunk
> __max_blocks_per_chunk
)
315 __options_max_blocks_per_chunk_
= __max_blocks_per_chunk
;
317 __options_max_blocks_per_chunk_
= opts
.max_blocks_per_chunk
;
319 __num_fixed_pools_
= 1;
320 size_t capacity
= __smallest_block_size
;
321 while (capacity
< largest_block_size
) {
323 __num_fixed_pools_
+= 1;
327 pool_options
unsynchronized_pool_resource::options() const {
329 p
.max_blocks_per_chunk
= __options_max_blocks_per_chunk_
;
330 p
.largest_required_pool_block
= __pool_block_size(__num_fixed_pools_
- 1);
334 void unsynchronized_pool_resource::release() {
335 __adhoc_pool_
.__release_ptr(__res_
);
336 if (__fixed_pools_
!= nullptr) {
337 const int n
= __num_fixed_pools_
;
338 for (int i
= 0; i
< n
; ++i
)
339 __fixed_pools_
[i
].__release_ptr(__res_
);
340 __res_
->deallocate(__fixed_pools_
, __num_fixed_pools_
* sizeof(__fixed_pool
), alignof(__fixed_pool
));
341 __fixed_pools_
= nullptr;
345 void* unsynchronized_pool_resource::do_allocate(size_t bytes
, size_t align
) {
346 // A pointer to allocated storage (6.6.4.4.1) with a size of at least bytes.
347 // The size and alignment of the allocated memory shall meet the requirements for
348 // a class derived from memory_resource (23.12).
349 // If the pool selected for a block of size bytes is unable to satisfy the memory request
350 // from its own internal data structures, it will call upstream_resource()->allocate()
351 // to obtain more memory. If bytes is larger than that which the largest pool can handle,
352 // then memory will be allocated using upstream_resource()->allocate().
354 int i
= __pool_index(bytes
, align
);
355 if (i
== __num_fixed_pools_
)
356 return __adhoc_pool_
.__do_allocate(__res_
, bytes
, align
);
358 if (__fixed_pools_
== nullptr) {
360 (__fixed_pool
*)__res_
->allocate(__num_fixed_pools_
* sizeof(__fixed_pool
), alignof(__fixed_pool
));
361 __fixed_pool
* first
= __fixed_pools_
;
362 __fixed_pool
* last
= __fixed_pools_
+ __num_fixed_pools_
;
363 for (__fixed_pool
* pool
= first
; pool
!= last
; ++pool
)
364 ::new ((void*)pool
) __fixed_pool
;
366 void* result
= __fixed_pools_
[i
].__try_allocate_from_vacancies();
367 if (result
== nullptr) {
368 auto min
= [](size_t a
, size_t b
) { return a
< b
? a
: b
; };
369 auto max
= [](size_t a
, size_t b
) { return a
< b
? b
: a
; };
371 size_t prev_chunk_size_in_bytes
= __fixed_pools_
[i
].__previous_chunk_size_in_bytes();
372 size_t prev_chunk_size_in_blocks
= prev_chunk_size_in_bytes
>> __log2_pool_block_size(i
);
374 size_t chunk_size_in_blocks
;
376 if (prev_chunk_size_in_blocks
== 0) {
377 size_t min_blocks_per_chunk
= max(__min_bytes_per_chunk
>> __log2_pool_block_size(i
), __min_blocks_per_chunk
);
378 chunk_size_in_blocks
= min_blocks_per_chunk
;
380 static_assert(__max_bytes_per_chunk
<= SIZE_MAX
- (__max_bytes_per_chunk
/ 4), "unsigned overflow is possible");
381 chunk_size_in_blocks
= prev_chunk_size_in_blocks
+ (prev_chunk_size_in_blocks
/ 4);
384 size_t max_blocks_per_chunk
=
385 min((__max_bytes_per_chunk
>> __log2_pool_block_size(i
)),
386 min(__max_blocks_per_chunk
, __options_max_blocks_per_chunk_
));
387 if (chunk_size_in_blocks
> max_blocks_per_chunk
)
388 chunk_size_in_blocks
= max_blocks_per_chunk
;
390 size_t block_size
= __pool_block_size(i
);
392 size_t chunk_size_in_bytes
= (chunk_size_in_blocks
<< __log2_pool_block_size(i
));
393 result
= __fixed_pools_
[i
].__allocate_in_new_chunk(__res_
, block_size
, chunk_size_in_bytes
);
399 void unsynchronized_pool_resource::do_deallocate(void* p
, size_t bytes
, size_t align
) {
400 // Returns the memory at p to the pool. It is unspecified if,
401 // or under what circumstances, this operation will result in
402 // a call to upstream_resource()->deallocate().
404 int i
= __pool_index(bytes
, align
);
405 if (i
== __num_fixed_pools_
)
406 return __adhoc_pool_
.__do_deallocate(__res_
, p
, bytes
, align
);
408 _LIBCPP_ASSERT_NON_NULL(
409 __fixed_pools_
!= nullptr, "deallocating a block that was not allocated with this allocator");
410 __fixed_pools_
[i
].__evacuate(p
);
414 bool synchronized_pool_resource::do_is_equal(const memory_resource
& other
) const noexcept
{ return &other
== this; }
416 // 23.12.6, mem.res.monotonic.buffer
418 constexpr size_t __default_growth_factor
= 2;
420 static void* align_down(size_t align
, size_t size
, void*& ptr
, size_t& space
) {
424 char* p1
= static_cast<char*>(ptr
);
425 char* new_ptr
= reinterpret_cast<char*>(reinterpret_cast<uintptr_t>(p1
- size
) & ~(align
- 1));
427 if (new_ptr
< (p1
- space
))
431 space
-= p1
- new_ptr
;
436 template <bool is_initial
, typename Chunk
>
437 void* __try_allocate_from_chunk(Chunk
& self
, size_t bytes
, size_t align
) {
438 if constexpr (is_initial
) {
439 // only for __initial_descriptor.
440 // if __initial_descriptor.__cur_ equals nullptr, means no available buffer given when ctor.
441 // here we just return nullptr, let the caller do the next handling.
445 void* new_ptr
= static_cast<void*>(self
.__cur_
);
446 size_t new_capacity
= (self
.__cur_
- self
.__start_
);
447 void* aligned_ptr
= align_down(align
, bytes
, new_ptr
, new_capacity
);
448 if (aligned_ptr
!= nullptr)
449 self
.__cur_
= static_cast<char*>(new_ptr
);
453 void* monotonic_buffer_resource::do_allocate(size_t bytes
, size_t align
) {
454 const size_t footer_size
= sizeof(__chunk_footer
);
455 const size_t footer_align
= alignof(__chunk_footer
);
457 auto previous_allocation_size
= [&]() {
458 if (__chunks_
!= nullptr)
459 return __chunks_
->__allocation_size();
461 size_t newsize
= (__initial_
.__start_
!= nullptr) ? (__initial_
.__end_
- __initial_
.__start_
) : __initial_
.__size_
;
463 return roundup(newsize
, footer_align
) + footer_size
;
466 if (void* result
= __try_allocate_from_chunk
<true, __initial_descriptor
>(__initial_
, bytes
, align
))
468 if (__chunks_
!= nullptr) {
469 if (void* result
= __try_allocate_from_chunk
<false, __chunk_footer
>(*__chunks_
, bytes
, align
))
473 // Allocate a brand-new chunk.
475 if (align
< footer_align
)
476 align
= footer_align
;
478 size_t aligned_capacity
= roundup(bytes
, footer_align
) + footer_size
;
479 size_t previous_capacity
= previous_allocation_size();
481 if (aligned_capacity
<= previous_capacity
) {
482 size_t newsize
= __default_growth_factor
* (previous_capacity
- footer_size
);
483 aligned_capacity
= roundup(newsize
, footer_align
) + footer_size
;
486 char* start
= (char*)__res_
->allocate(aligned_capacity
, align
);
487 auto end
= start
+ aligned_capacity
- footer_size
;
488 __chunk_footer
* footer
= (__chunk_footer
*)(end
);
489 footer
->__next_
= __chunks_
;
490 footer
->__start_
= start
;
491 footer
->__cur_
= end
;
492 footer
->__align_
= align
;
495 return __try_allocate_from_chunk
<false, __chunk_footer
>(*__chunks_
, bytes
, align
);
500 _LIBCPP_END_NAMESPACE_STD