1 //===----------------------------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 #include <memory_resource>
12 #ifndef _LIBCPP_HAS_NO_ATOMIC_HEADER
14 #elif !defined(_LIBCPP_HAS_NO_THREADS)
16 # if defined(__ELF__) && defined(_LIBCPP_LINK_PTHREAD_LIB)
17 # pragma comment(lib, "pthread")
21 _LIBCPP_BEGIN_NAMESPACE_STD
27 memory_resource::~memory_resource() = default;
29 // new_delete_resource()
31 #ifdef _LIBCPP_HAS_NO_ALIGNED_ALLOCATION
32 static bool is_aligned_to(void* ptr
, size_t align
) {
35 void* result
= std::align(align
, 1, p2
, space
);
36 return (result
== ptr
);
40 class _LIBCPP_EXPORTED_FROM_ABI __new_delete_memory_resource_imp
: public memory_resource
{
41 void* do_allocate(size_t bytes
, size_t align
) override
{
42 #ifndef _LIBCPP_HAS_NO_ALIGNED_ALLOCATION
43 return std::__libcpp_allocate(bytes
, align
);
47 void* result
= std::__libcpp_allocate(bytes
, align
);
48 if (!is_aligned_to(result
, align
)) {
49 std::__libcpp_deallocate(result
, bytes
, align
);
56 void do_deallocate(void* p
, size_t bytes
, size_t align
) override
{ std::__libcpp_deallocate(p
, bytes
, align
); }
58 bool do_is_equal(const memory_resource
& other
) const noexcept override
{ return &other
== this; }
61 // null_memory_resource()
63 class _LIBCPP_EXPORTED_FROM_ABI __null_memory_resource_imp
: public memory_resource
{
64 void* do_allocate(size_t, size_t) override
{ __throw_bad_alloc(); }
65 void do_deallocate(void*, size_t, size_t) override
{}
66 bool do_is_equal(const memory_resource
& other
) const noexcept override
{ return &other
== this; }
71 union ResourceInitHelper
{
73 __new_delete_memory_resource_imp new_delete_res
;
74 __null_memory_resource_imp null_res
;
77 constexpr ResourceInitHelper() : resources() {}
78 ~ResourceInitHelper() {}
81 // Pretend we're inside a system header so the compiler doesn't flag the use of the init_priority
82 // attribute with a value that's reserved for the implementation (we're the implementation).
83 #include "memory_resource_init_helper.h"
87 memory_resource
* new_delete_resource() noexcept
{ return &res_init
.resources
.new_delete_res
; }
89 memory_resource
* null_memory_resource() noexcept
{ return &res_init
.resources
.null_res
; }
91 // default_memory_resource()
93 static memory_resource
* __default_memory_resource(bool set
= false, memory_resource
* new_res
= nullptr) noexcept
{
94 #ifndef _LIBCPP_HAS_NO_ATOMIC_HEADER
95 static constinit atomic
<memory_resource
*> __res
{&res_init
.resources
.new_delete_res
};
97 new_res
= new_res
? new_res
: new_delete_resource();
98 // TODO: Can a weaker ordering be used?
99 return std::atomic_exchange_explicit(&__res
, new_res
, memory_order_acq_rel
);
101 return std::atomic_load_explicit(&__res
, memory_order_acquire
);
103 #elif !defined(_LIBCPP_HAS_NO_THREADS)
104 static constinit memory_resource
* res
= &res_init
.resources
.new_delete_res
;
105 static mutex res_lock
;
107 new_res
= new_res
? new_res
: new_delete_resource();
108 lock_guard
<mutex
> guard(res_lock
);
109 memory_resource
* old_res
= res
;
113 lock_guard
<mutex
> guard(res_lock
);
117 static constinit memory_resource
* res
= &res_init
.resources
.new_delete_res
;
119 new_res
= new_res
? new_res
: new_delete_resource();
120 memory_resource
* old_res
= res
;
129 memory_resource
* get_default_resource() noexcept
{ return __default_memory_resource(); }
131 memory_resource
* set_default_resource(memory_resource
* __new_res
) noexcept
{
132 return __default_memory_resource(true, __new_res
);
135 // 23.12.5, mem.res.pool
137 static size_t roundup(size_t count
, size_t alignment
) {
138 size_t mask
= alignment
- 1;
139 return (count
+ mask
) & ~mask
;
142 struct unsynchronized_pool_resource::__adhoc_pool::__chunk_footer
{
143 __chunk_footer
* __next_
;
146 size_t __allocation_size() { return (reinterpret_cast<char*>(this) - __start_
) + sizeof(*this); }
149 void unsynchronized_pool_resource::__adhoc_pool::__release_ptr(memory_resource
* upstream
) {
150 while (__first_
!= nullptr) {
151 __chunk_footer
* next
= __first_
->__next_
;
152 upstream
->deallocate(__first_
->__start_
, __first_
->__allocation_size(), __first_
->__align_
);
157 void* unsynchronized_pool_resource::__adhoc_pool::__do_allocate(memory_resource
* upstream
, size_t bytes
, size_t align
) {
158 const size_t footer_size
= sizeof(__chunk_footer
);
159 const size_t footer_align
= alignof(__chunk_footer
);
161 if (align
< footer_align
)
162 align
= footer_align
;
164 size_t aligned_capacity
= roundup(bytes
, footer_align
) + footer_size
;
166 void* result
= upstream
->allocate(aligned_capacity
, align
);
168 __chunk_footer
* h
= (__chunk_footer
*)((char*)result
+ aligned_capacity
- footer_size
);
169 h
->__next_
= __first_
;
170 h
->__start_
= (char*)result
;
176 void unsynchronized_pool_resource::__adhoc_pool::__do_deallocate(
177 memory_resource
* upstream
, void* p
, size_t bytes
, size_t align
) {
178 _LIBCPP_ASSERT_UNCATEGORIZED(__first_
!= nullptr, "deallocating a block that was not allocated with this allocator");
179 if (__first_
->__start_
== p
) {
180 __chunk_footer
* next
= __first_
->__next_
;
181 upstream
->deallocate(p
, __first_
->__allocation_size(), __first_
->__align_
);
184 for (__chunk_footer
* h
= __first_
; h
->__next_
!= nullptr; h
= h
->__next_
) {
185 if (h
->__next_
->__start_
== p
) {
186 __chunk_footer
* next
= h
->__next_
->__next_
;
187 upstream
->deallocate(p
, h
->__next_
->__allocation_size(), h
->__next_
->__align_
);
192 _LIBCPP_ASSERT_UNCATEGORIZED(false, "deallocating a block that was not allocated with this allocator");
196 class unsynchronized_pool_resource::__fixed_pool
{
197 struct __chunk_footer
{
198 __chunk_footer
* __next_
;
201 size_t __allocation_size() { return (reinterpret_cast<char*>(this) - __start_
) + sizeof(*this); }
204 struct __vacancy_header
{
205 __vacancy_header
* __next_vacancy_
;
208 __chunk_footer
* __first_chunk_
= nullptr;
209 __vacancy_header
* __first_vacancy_
= nullptr;
212 explicit __fixed_pool() = default;
214 void __release_ptr(memory_resource
* upstream
) {
215 __first_vacancy_
= nullptr;
216 while (__first_chunk_
!= nullptr) {
217 __chunk_footer
* next
= __first_chunk_
->__next_
;
218 upstream
->deallocate(__first_chunk_
->__start_
, __first_chunk_
->__allocation_size(), __first_chunk_
->__align_
);
219 __first_chunk_
= next
;
223 void* __try_allocate_from_vacancies() {
224 if (__first_vacancy_
!= nullptr) {
225 void* result
= __first_vacancy_
;
226 __first_vacancy_
= __first_vacancy_
->__next_vacancy_
;
232 void* __allocate_in_new_chunk(memory_resource
* upstream
, size_t block_size
, size_t chunk_size
) {
233 _LIBCPP_ASSERT_UNCATEGORIZED(chunk_size
% block_size
== 0, "");
234 static_assert(__default_alignment
>= alignof(std::max_align_t
), "");
235 static_assert(__default_alignment
>= alignof(__chunk_footer
), "");
236 static_assert(__default_alignment
>= alignof(__vacancy_header
), "");
238 const size_t footer_size
= sizeof(__chunk_footer
);
239 const size_t footer_align
= alignof(__chunk_footer
);
241 size_t aligned_capacity
= roundup(chunk_size
, footer_align
) + footer_size
;
243 void* result
= upstream
->allocate(aligned_capacity
, __default_alignment
);
245 __chunk_footer
* h
= (__chunk_footer
*)((char*)result
+ aligned_capacity
- footer_size
);
246 h
->__next_
= __first_chunk_
;
247 h
->__start_
= (char*)result
;
248 h
->__align_
= __default_alignment
;
251 if (chunk_size
> block_size
) {
252 __vacancy_header
* last_vh
= this->__first_vacancy_
;
253 for (size_t i
= block_size
; i
!= chunk_size
; i
+= block_size
) {
254 __vacancy_header
* vh
= (__vacancy_header
*)((char*)result
+ i
);
255 vh
->__next_vacancy_
= last_vh
;
258 this->__first_vacancy_
= last_vh
;
263 void __evacuate(void* p
) {
264 __vacancy_header
* vh
= (__vacancy_header
*)(p
);
265 vh
->__next_vacancy_
= __first_vacancy_
;
266 __first_vacancy_
= vh
;
269 size_t __previous_chunk_size_in_bytes() const { return __first_chunk_
? __first_chunk_
->__allocation_size() : 0; }
271 static const size_t __default_alignment
= alignof(max_align_t
);
274 size_t unsynchronized_pool_resource::__pool_block_size(int i
) const { return size_t(1) << __log2_pool_block_size(i
); }
276 int unsynchronized_pool_resource::__log2_pool_block_size(int i
) const { return (i
+ __log2_smallest_block_size
); }
278 int unsynchronized_pool_resource::__pool_index(size_t bytes
, size_t align
) const {
279 if (align
> alignof(std::max_align_t
) || bytes
> (size_t(1) << __num_fixed_pools_
))
280 return __num_fixed_pools_
;
283 bytes
= (bytes
> align
) ? bytes
: align
;
285 bytes
>>= __log2_smallest_block_size
;
294 unsynchronized_pool_resource::unsynchronized_pool_resource(const pool_options
& opts
, memory_resource
* upstream
)
295 : __res_(upstream
), __fixed_pools_(nullptr) {
296 size_t largest_block_size
;
297 if (opts
.largest_required_pool_block
== 0)
298 largest_block_size
= __default_largest_block_size
;
299 else if (opts
.largest_required_pool_block
< __smallest_block_size
)
300 largest_block_size
= __smallest_block_size
;
301 else if (opts
.largest_required_pool_block
> __max_largest_block_size
)
302 largest_block_size
= __max_largest_block_size
;
304 largest_block_size
= opts
.largest_required_pool_block
;
306 if (opts
.max_blocks_per_chunk
== 0)
307 __options_max_blocks_per_chunk_
= __max_blocks_per_chunk
;
308 else if (opts
.max_blocks_per_chunk
< __min_blocks_per_chunk
)
309 __options_max_blocks_per_chunk_
= __min_blocks_per_chunk
;
310 else if (opts
.max_blocks_per_chunk
> __max_blocks_per_chunk
)
311 __options_max_blocks_per_chunk_
= __max_blocks_per_chunk
;
313 __options_max_blocks_per_chunk_
= opts
.max_blocks_per_chunk
;
315 __num_fixed_pools_
= 1;
316 size_t capacity
= __smallest_block_size
;
317 while (capacity
< largest_block_size
) {
319 __num_fixed_pools_
+= 1;
323 pool_options
unsynchronized_pool_resource::options() const {
325 p
.max_blocks_per_chunk
= __options_max_blocks_per_chunk_
;
326 p
.largest_required_pool_block
= __pool_block_size(__num_fixed_pools_
- 1);
330 void unsynchronized_pool_resource::release() {
331 __adhoc_pool_
.__release_ptr(__res_
);
332 if (__fixed_pools_
!= nullptr) {
333 const int n
= __num_fixed_pools_
;
334 for (int i
= 0; i
< n
; ++i
)
335 __fixed_pools_
[i
].__release_ptr(__res_
);
336 __res_
->deallocate(__fixed_pools_
, __num_fixed_pools_
* sizeof(__fixed_pool
), alignof(__fixed_pool
));
337 __fixed_pools_
= nullptr;
341 void* unsynchronized_pool_resource::do_allocate(size_t bytes
, size_t align
) {
342 // A pointer to allocated storage (6.6.4.4.1) with a size of at least bytes.
343 // The size and alignment of the allocated memory shall meet the requirements for
344 // a class derived from memory_resource (23.12).
345 // If the pool selected for a block of size bytes is unable to satisfy the memory request
346 // from its own internal data structures, it will call upstream_resource()->allocate()
347 // to obtain more memory. If bytes is larger than that which the largest pool can handle,
348 // then memory will be allocated using upstream_resource()->allocate().
350 int i
= __pool_index(bytes
, align
);
351 if (i
== __num_fixed_pools_
)
352 return __adhoc_pool_
.__do_allocate(__res_
, bytes
, align
);
354 if (__fixed_pools_
== nullptr) {
356 (__fixed_pool
*)__res_
->allocate(__num_fixed_pools_
* sizeof(__fixed_pool
), alignof(__fixed_pool
));
357 __fixed_pool
* first
= __fixed_pools_
;
358 __fixed_pool
* last
= __fixed_pools_
+ __num_fixed_pools_
;
359 for (__fixed_pool
* pool
= first
; pool
!= last
; ++pool
)
360 ::new ((void*)pool
) __fixed_pool
;
362 void* result
= __fixed_pools_
[i
].__try_allocate_from_vacancies();
363 if (result
== nullptr) {
364 auto min
= [](size_t a
, size_t b
) { return a
< b
? a
: b
; };
365 auto max
= [](size_t a
, size_t b
) { return a
< b
? b
: a
; };
367 size_t prev_chunk_size_in_bytes
= __fixed_pools_
[i
].__previous_chunk_size_in_bytes();
368 size_t prev_chunk_size_in_blocks
= prev_chunk_size_in_bytes
>> __log2_pool_block_size(i
);
370 size_t chunk_size_in_blocks
;
372 if (prev_chunk_size_in_blocks
== 0) {
373 size_t min_blocks_per_chunk
= max(__min_bytes_per_chunk
>> __log2_pool_block_size(i
), __min_blocks_per_chunk
);
374 chunk_size_in_blocks
= min_blocks_per_chunk
;
376 static_assert(__max_bytes_per_chunk
<= SIZE_MAX
- (__max_bytes_per_chunk
/ 4), "unsigned overflow is possible");
377 chunk_size_in_blocks
= prev_chunk_size_in_blocks
+ (prev_chunk_size_in_blocks
/ 4);
380 size_t max_blocks_per_chunk
=
381 min((__max_bytes_per_chunk
>> __log2_pool_block_size(i
)),
382 min(__max_blocks_per_chunk
, __options_max_blocks_per_chunk_
));
383 if (chunk_size_in_blocks
> max_blocks_per_chunk
)
384 chunk_size_in_blocks
= max_blocks_per_chunk
;
386 size_t block_size
= __pool_block_size(i
);
388 size_t chunk_size_in_bytes
= (chunk_size_in_blocks
<< __log2_pool_block_size(i
));
389 result
= __fixed_pools_
[i
].__allocate_in_new_chunk(__res_
, block_size
, chunk_size_in_bytes
);
395 void unsynchronized_pool_resource::do_deallocate(void* p
, size_t bytes
, size_t align
) {
396 // Returns the memory at p to the pool. It is unspecified if,
397 // or under what circumstances, this operation will result in
398 // a call to upstream_resource()->deallocate().
400 int i
= __pool_index(bytes
, align
);
401 if (i
== __num_fixed_pools_
)
402 return __adhoc_pool_
.__do_deallocate(__res_
, p
, bytes
, align
);
404 _LIBCPP_ASSERT_UNCATEGORIZED(
405 __fixed_pools_
!= nullptr, "deallocating a block that was not allocated with this allocator");
406 __fixed_pools_
[i
].__evacuate(p
);
410 bool synchronized_pool_resource::do_is_equal(const memory_resource
& other
) const noexcept
{ return &other
== this; }
412 // 23.12.6, mem.res.monotonic.buffer
414 static void* align_down(size_t align
, size_t size
, void*& ptr
, size_t& space
) {
418 char* p1
= static_cast<char*>(ptr
);
419 char* new_ptr
= reinterpret_cast<char*>(reinterpret_cast<uintptr_t>(p1
- size
) & ~(align
- 1));
421 if (new_ptr
< (p1
- space
))
425 space
-= p1
- new_ptr
;
430 void* monotonic_buffer_resource::__initial_descriptor::__try_allocate_from_chunk(size_t bytes
, size_t align
) {
433 void* new_ptr
= static_cast<void*>(__cur_
);
434 size_t new_capacity
= (__cur_
- __start_
);
435 void* aligned_ptr
= align_down(align
, bytes
, new_ptr
, new_capacity
);
436 if (aligned_ptr
!= nullptr)
437 __cur_
= static_cast<char*>(new_ptr
);
441 void* monotonic_buffer_resource::__chunk_footer::__try_allocate_from_chunk(size_t bytes
, size_t align
) {
442 void* new_ptr
= static_cast<void*>(__cur_
);
443 size_t new_capacity
= (__cur_
- __start_
);
444 void* aligned_ptr
= align_down(align
, bytes
, new_ptr
, new_capacity
);
445 if (aligned_ptr
!= nullptr)
446 __cur_
= static_cast<char*>(new_ptr
);
450 void* monotonic_buffer_resource::do_allocate(size_t bytes
, size_t align
) {
451 const size_t footer_size
= sizeof(__chunk_footer
);
452 const size_t footer_align
= alignof(__chunk_footer
);
454 auto previous_allocation_size
= [&]() {
455 if (__chunks_
!= nullptr)
456 return __chunks_
->__allocation_size();
458 size_t newsize
= (__initial_
.__start_
!= nullptr) ? (__initial_
.__end_
- __initial_
.__start_
) : __initial_
.__size_
;
460 return roundup(newsize
, footer_align
) + footer_size
;
463 if (void* result
= __initial_
.__try_allocate_from_chunk(bytes
, align
))
465 if (__chunks_
!= nullptr) {
466 if (void* result
= __chunks_
->__try_allocate_from_chunk(bytes
, align
))
470 // Allocate a brand-new chunk.
472 if (align
< footer_align
)
473 align
= footer_align
;
475 size_t aligned_capacity
= roundup(bytes
, footer_align
) + footer_size
;
476 size_t previous_capacity
= previous_allocation_size();
478 if (aligned_capacity
<= previous_capacity
) {
479 size_t newsize
= 2 * (previous_capacity
- footer_size
);
480 aligned_capacity
= roundup(newsize
, footer_align
) + footer_size
;
483 char* start
= (char*)__res_
->allocate(aligned_capacity
, align
);
484 auto end
= start
+ aligned_capacity
- footer_size
;
485 __chunk_footer
* footer
= (__chunk_footer
*)(end
);
486 footer
->__next_
= __chunks_
;
487 footer
->__start_
= start
;
488 footer
->__cur_
= end
;
489 footer
->__align_
= align
;
492 return __chunks_
->__try_allocate_from_chunk(bytes
, align
);
497 _LIBCPP_END_NAMESPACE_STD