1 //===-- Implementation header for a block of memory -------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #ifndef LLVM_LIBC_SRC___SUPPORT_BLOCK_H
10 #define LLVM_LIBC_SRC___SUPPORT_BLOCK_H
12 #include "src/__support/CPP/algorithm.h"
13 #include "src/__support/CPP/cstddef.h"
14 #include "src/__support/CPP/limits.h"
15 #include "src/__support/CPP/new.h"
16 #include "src/__support/CPP/optional.h"
17 #include "src/__support/CPP/span.h"
18 #include "src/__support/CPP/type_traits.h"
19 #include "src/__support/libc_assert.h"
20 #include "src/__support/macros/config.h"
24 namespace LIBC_NAMESPACE_DECL
{
27 // Types of corrupted blocks, and functions to crash with an error message
28 // corresponding to each type.
29 enum class BlockStatus
{
35 } // namespace internal
37 /// Returns the value rounded down to the nearest multiple of alignment.
38 LIBC_INLINE
constexpr size_t align_down(size_t value
, size_t alignment
) {
39 // Note this shouldn't overflow since the result will always be <= value.
40 return (value
/ alignment
) * alignment
;
43 /// Returns the value rounded down to the nearest multiple of alignment.
45 LIBC_INLINE
constexpr T
*align_down(T
*value
, size_t alignment
) {
46 return reinterpret_cast<T
*>(
47 align_down(reinterpret_cast<size_t>(value
), alignment
));
50 /// Returns the value rounded up to the nearest multiple of alignment.
51 LIBC_INLINE
constexpr size_t align_up(size_t value
, size_t alignment
) {
52 __builtin_add_overflow(value
, alignment
- 1, &value
);
53 return align_down(value
, alignment
);
56 /// Returns the value rounded up to the nearest multiple of alignment.
58 LIBC_INLINE
constexpr T
*align_up(T
*value
, size_t alignment
) {
59 return reinterpret_cast<T
*>(
60 align_up(reinterpret_cast<size_t>(value
), alignment
));
63 using ByteSpan
= cpp::span
<LIBC_NAMESPACE::cpp::byte
>;
66 /// Memory region with links to adjacent blocks.
68 /// The blocks store their offsets to the previous and next blocks. The latter
69 /// is also the block's size.
71 /// The `ALIGNMENT` constant provided by the derived block is typically the
72 /// minimum value of `alignof(OffsetType)`. Blocks will always be aligned to a
73 /// `ALIGNMENT` boundary. Block sizes will always be rounded up to a multiple of
76 /// As an example, the diagram below represents two contiguous
77 /// `Block<uint32_t, 8>`s. The indices indicate byte offsets:
81 /// +---------------------+--------------+
82 /// | Header | Usable space |
83 /// +----------+----------+--------------+
85 /// | 0......3 | 4......7 | 8........227 |
86 /// | 00000000 | 00000230 | <app data> |
87 /// +----------+----------+--------------+
89 /// +---------------------+--------------+
90 /// | Header | Usable space |
91 /// +----------+----------+--------------+
93 /// | 0......3 | 4......7 | 8........827 |
94 /// | 00000230 | 00000830 | f7f7....f7f7 |
95 /// +----------+----------+--------------+
98 /// As a space optimization, when a block is allocated, it consumes the prev
99 /// field of the following block:
102 /// +---------------------+--------------+
103 /// | Header | Usable space |
104 /// +----------+----------+--------------+
105 /// | prev | next | |
106 /// | 0......3 | 4......7 | 8........230 |
107 /// | 00000000 | 00000230 | <app data> |
108 /// +----------+----------+--------------+
110 /// +---------------------+--------------+
111 /// | B1 | Header | Usable space |
112 /// +----------+----------+--------------+
114 /// | 0......3 | 4......7 | 8........827 |
115 /// | xxxxxxxx | 00000830 | f7f7....f7f7 |
116 /// +----------+----------+--------------+
118 /// The next offset of a block matches the previous offset of its next block.
119 /// The first block in a list is denoted by having a previous offset of `0`.
121 /// @tparam OffsetType Unsigned integral type used to encode offsets. Larger
122 /// types can address more memory, but consume greater
124 /// @tparam kAlign Sets the overall alignment for blocks. Minimum is
125 /// `alignof(OffsetType)`, but the default is max_align_t,
126 /// since the usable space will then already be
127 /// aligned to max_align_t if the size of OffsetType is no
128 /// less than half of max_align_t. Larger values cause
129 /// greater overhead.
130 template <typename OffsetType
= uintptr_t, size_t kAlign
= alignof(max_align_t
)>
132 // Masks for the contents of the next_ field.
133 static constexpr size_t PREV_FREE_MASK
= 1 << 0;
134 static constexpr size_t LAST_MASK
= 1 << 1;
135 static constexpr size_t SIZE_MASK
= ~(PREV_FREE_MASK
| LAST_MASK
);
138 using offset_type
= OffsetType
;
139 static_assert(cpp::is_unsigned_v
<offset_type
>,
140 "offset type must be unsigned");
141 static constexpr size_t ALIGNMENT
=
142 cpp::max(cpp::max(kAlign
, alignof(offset_type
)), size_t{4});
143 static constexpr size_t BLOCK_OVERHEAD
= align_up(sizeof(Block
), ALIGNMENT
);
146 Block(const Block
&other
) = delete;
147 Block
&operator=(const Block
&other
) = delete;
149 /// Creates the first block for a given memory region, followed by a sentinel
150 /// last block. Returns the first block.
151 static optional
<Block
*> init(ByteSpan region
);
153 /// @returns A pointer to a `Block`, given a pointer to the start of the
154 /// usable space inside the block.
156 /// This is the inverse of `usable_space()`.
158 /// @warning This method does not do any checking; passing a random
159 /// pointer will return a non-null pointer.
160 static Block
*from_usable_space(void *usable_space
) {
161 auto *bytes
= reinterpret_cast<cpp::byte
*>(usable_space
);
162 return reinterpret_cast<Block
*>(bytes
- BLOCK_OVERHEAD
);
164 static const Block
*from_usable_space(const void *usable_space
) {
165 const auto *bytes
= reinterpret_cast<const cpp::byte
*>(usable_space
);
166 return reinterpret_cast<const Block
*>(bytes
- BLOCK_OVERHEAD
);
169 /// @returns The total size of the block in bytes, including the header.
170 size_t outer_size() const { return next_
& SIZE_MASK
; }
172 static size_t outer_size(size_t inner_size
) {
173 // The usable region includes the prev_ field of the next block.
174 return inner_size
- sizeof(prev_
) + BLOCK_OVERHEAD
;
177 /// @returns The number of usable bytes inside the block.
178 size_t inner_size() const {
181 return inner_size(outer_size());
184 static size_t inner_size(size_t outer_size
) {
185 // The usable region includes the prev_ field of the next block.
186 return outer_size
- BLOCK_OVERHEAD
+ sizeof(prev_
);
189 /// @returns A pointer to the usable space inside this block.
190 cpp::byte
*usable_space() {
191 return reinterpret_cast<cpp::byte
*>(this) + BLOCK_OVERHEAD
;
193 const cpp::byte
*usable_space() const {
194 return reinterpret_cast<const cpp::byte
*>(this) + BLOCK_OVERHEAD
;
197 // @returns The region of memory the block manages, including the header.
199 return {reinterpret_cast<cpp::byte
*>(this), outer_size()};
202 /// Attempts to split this block.
204 /// If successful, the block will have an inner size of `new_inner_size`,
205 /// rounded to ensure that the split point is on an ALIGNMENT boundary. The
206 /// remaining space will be returned as a new block. Note that the prev_ field
207 /// of the next block counts as part of the inner size of the returnd block.
209 /// This method may fail if the remaining space is too small to hold a new
210 /// block. If this method fails for any reason, the original block is
212 optional
<Block
*> split(size_t new_inner_size
);
214 /// Merges this block with the one that comes after it.
217 /// @returns The block immediately after this one, or a null pointer if this
218 /// is the last block.
221 /// @returns The free block immediately before this one, otherwise nullptr.
222 Block
*prev_free() const;
224 /// @returns Whether the block is unavailable for allocation.
225 bool used() const { return !next() || !next()->prev_free(); }
227 /// Marks this block as in use.
229 LIBC_ASSERT(next() && "last block is always considered used");
230 next()->next_
&= ~PREV_FREE_MASK
;
233 /// Marks this block as free.
235 LIBC_ASSERT(next() && "last block is always considered used");
236 next()->next_
|= PREV_FREE_MASK
;
237 // The next block's prev_ field becomes alive, as it is no longer part of
238 // this block's used space.
239 *new (&next()->prev_
) offset_type
= outer_size();
242 /// Marks this block as the last one in the chain. Makes next() return
244 void mark_last() { next_
|= LAST_MASK
; }
246 constexpr Block(size_t outer_size
);
248 bool is_usable_space_aligned(size_t alignment
) const {
249 return reinterpret_cast<uintptr_t>(usable_space()) % alignment
== 0;
252 /// @returns The new inner size of this block that would give the usable
253 /// space of the next block the given alignment.
254 size_t padding_for_alignment(size_t alignment
) const {
255 if (is_usable_space_aligned(alignment
))
258 // We need to ensure we can always split this block into a "padding" block
259 // and the aligned block. To do this, we need enough extra space for at
262 // |block |usable_space |
263 // |........|......................................|
265 // Alignment requirement
268 // |block |space |block |usable_space |
269 // |........|........|........|....................|
271 // Alignment requirement
273 alignment
= cpp::max(alignment
, ALIGNMENT
);
274 uintptr_t start
= reinterpret_cast<uintptr_t>(usable_space());
275 uintptr_t next_usable_space
= align_up(start
+ BLOCK_OVERHEAD
, alignment
);
276 uintptr_t next_block
= next_usable_space
- BLOCK_OVERHEAD
;
277 return next_block
- start
+ sizeof(prev_
);
280 // Check that we can `allocate` a block with a given alignment and size from
281 // this existing block.
282 bool can_allocate(size_t alignment
, size_t size
) const;
284 // This is the return type for `allocate` which can split one block into up to
287 // This is the newly aligned block. It will have the alignment requested by
288 // a call to `allocate` and at most `size`.
291 // If the usable_space in the new block was not aligned according to the
292 // `alignment` parameter, we will need to split into this block and the
293 // `block` to ensure `block` is properly aligned. In this case, `prev` will
294 // be a pointer to this new "padding" block. `prev` will be nullptr if no
295 // new block was created or we were able to merge the block before the
296 // original block with the "padding" block.
299 // This is the remainder of the next block after splitting the `block`
300 // according to `size`. This can happen if there's enough space after the
305 // Divide a block into up to 3 blocks according to `BlockInfo`. This should
306 // only be called if `can_allocate` returns true.
307 static BlockInfo
allocate(Block
*block
, size_t alignment
, size_t size
);
310 /// Construct a block to represent a span of bytes. Overwrites only enough
311 /// memory for the block header; the rest of the span is left alone.
312 static Block
*as_block(ByteSpan bytes
);
314 /// Like `split`, but assumes the caller has already checked to parameters to
315 /// ensure the split will succeed.
316 Block
*split_impl(size_t new_inner_size
);
318 /// Offset from this block to the previous block. 0 if this is the first
319 /// block. This field is only alive when the previous block is free;
320 /// otherwise, its memory is reused as part of the previous block's usable
322 offset_type prev_
= 0;
324 /// Offset from this block to the next block. Valid even if this is the last
325 /// block, since it equals the size of the block.
326 offset_type next_
= 0;
328 /// Information about the current state of the block is stored in the two low
329 /// order bits of the next_ value. These are guaranteed free by a minimum
330 /// alignment (and thus, alignment of the size) of 4. The lowest bit is the
331 /// `prev_free` flag, and the other bit is the `last` flag.
333 /// * If the `prev_free` flag is set, the block isn't the first and the
334 /// previous block is free.
335 /// * If the `last` flag is set, the block is the sentinel last block. It is
336 /// summarily considered used and has no next block.
337 } __attribute__((packed
, aligned(cpp::max(kAlign
, size_t{4}))));
339 // Public template method implementations.
341 LIBC_INLINE ByteSpan
get_aligned_subspan(ByteSpan bytes
, size_t alignment
) {
342 if (bytes
.data() == nullptr)
345 auto unaligned_start
= reinterpret_cast<uintptr_t>(bytes
.data());
346 auto aligned_start
= align_up(unaligned_start
, alignment
);
347 auto unaligned_end
= unaligned_start
+ bytes
.size();
348 auto aligned_end
= align_down(unaligned_end
, alignment
);
350 if (aligned_end
<= aligned_start
)
353 return bytes
.subspan(aligned_start
- unaligned_start
,
354 aligned_end
- aligned_start
);
357 template <typename OffsetType
, size_t kAlign
>
358 optional
<Block
<OffsetType
, kAlign
> *>
359 Block
<OffsetType
, kAlign
>::init(ByteSpan region
) {
360 optional
<ByteSpan
> result
= get_aligned_subspan(region
, ALIGNMENT
);
364 region
= result
.value();
365 // Two blocks are allocated: a free block and a sentinel last block.
366 if (region
.size() < 2 * BLOCK_OVERHEAD
)
369 if (cpp::numeric_limits
<OffsetType
>::max() < region
.size())
372 Block
*block
= as_block(region
.first(region
.size() - BLOCK_OVERHEAD
));
373 Block
*last
= as_block(region
.last(BLOCK_OVERHEAD
));
379 template <typename OffsetType
, size_t kAlign
>
380 bool Block
<OffsetType
, kAlign
>::can_allocate(size_t alignment
,
382 if (inner_size() < size
)
384 if (is_usable_space_aligned(alignment
))
387 // Alignment isn't met, so a padding block is needed. Determine amount of
388 // inner_size() consumed by the padding block.
389 size_t padding_size
= padding_for_alignment(alignment
) - sizeof(prev_
);
391 // Check that there is room for the allocation in the following aligned block.
392 size_t aligned_inner_size
= inner_size() - padding_size
- BLOCK_OVERHEAD
;
393 return size
<= aligned_inner_size
;
396 template <typename OffsetType
, size_t kAlign
>
397 typename Block
<OffsetType
, kAlign
>::BlockInfo
398 Block
<OffsetType
, kAlign
>::allocate(Block
*block
, size_t alignment
,
401 block
->can_allocate(alignment
, size
) &&
402 "Calls to this function for a given alignment and size should only be "
403 "done if `can_allocate` for these parameters returns true.");
405 BlockInfo info
{block
, /*prev=*/nullptr, /*next=*/nullptr};
407 if (!info
.block
->is_usable_space_aligned(alignment
)) {
408 Block
*original
= info
.block
;
409 optional
<Block
*> maybe_aligned_block
=
410 original
->split(info
.block
->padding_for_alignment(alignment
));
411 LIBC_ASSERT(maybe_aligned_block
.has_value() &&
412 "This split should always result in a new block. The check in "
413 "`can_allocate` ensures that we have enough space here to make "
416 if (Block
*prev
= original
->prev_free()) {
417 // If there is a free block before this, we can merge the current one with
418 // the newly created one.
421 info
.prev
= original
;
424 Block
*aligned_block
= *maybe_aligned_block
;
425 LIBC_ASSERT(aligned_block
->is_usable_space_aligned(alignment
) &&
426 "The aligned block isn't aligned somehow.");
427 info
.block
= aligned_block
;
430 // Now get a block for the requested size.
431 if (optional
<Block
*> next
= info
.block
->split(size
))
437 template <typename OffsetType
, size_t kAlign
>
438 optional
<Block
<OffsetType
, kAlign
> *>
439 Block
<OffsetType
, kAlign
>::split(size_t new_inner_size
) {
442 // The prev_ field of the next block is always available, so there is a
443 // minimum size to a block created through splitting.
444 if (new_inner_size
< sizeof(prev_
))
447 size_t old_inner_size
= inner_size();
449 align_up(new_inner_size
- sizeof(prev_
), ALIGNMENT
) + sizeof(prev_
);
450 if (old_inner_size
< new_inner_size
)
453 if (old_inner_size
- new_inner_size
< BLOCK_OVERHEAD
)
456 return split_impl(new_inner_size
);
459 template <typename OffsetType
, size_t kAlign
>
460 Block
<OffsetType
, kAlign
> *
461 Block
<OffsetType
, kAlign
>::split_impl(size_t new_inner_size
) {
462 size_t outer_size1
= outer_size(new_inner_size
);
463 LIBC_ASSERT(outer_size1
% ALIGNMENT
== 0 && "new size must be aligned");
464 ByteSpan new_region
= region().subspan(outer_size1
);
466 next_
|= outer_size1
;
468 Block
*new_block
= as_block(new_region
);
469 mark_free(); // Free status for this block is now stored in new_block.
470 new_block
->next()->prev_
= new_region
.size();
474 template <typename OffsetType
, size_t kAlign
>
475 bool Block
<OffsetType
, kAlign
>::merge_next() {
476 if (used() || next()->used())
478 size_t new_size
= outer_size() + next()->outer_size();
481 next()->prev_
= new_size
;
485 template <typename OffsetType
, size_t kAlign
>
486 Block
<OffsetType
, kAlign
> *Block
<OffsetType
, kAlign
>::next() const {
487 if (next_
& LAST_MASK
)
489 return reinterpret_cast<Block
*>(reinterpret_cast<uintptr_t>(this) +
493 template <typename OffsetType
, size_t kAlign
>
494 Block
<OffsetType
, kAlign
> *Block
<OffsetType
, kAlign
>::prev_free() const {
495 if (!(next_
& PREV_FREE_MASK
))
497 return reinterpret_cast<Block
*>(reinterpret_cast<uintptr_t>(this) - prev_
);
500 // Private template method implementations.
502 template <typename OffsetType
, size_t kAlign
>
503 constexpr Block
<OffsetType
, kAlign
>::Block(size_t outer_size
)
504 : next_(outer_size
) {
505 LIBC_ASSERT(outer_size
% ALIGNMENT
== 0 && "block sizes must be aligned");
508 template <typename OffsetType
, size_t kAlign
>
509 Block
<OffsetType
, kAlign
> *Block
<OffsetType
, kAlign
>::as_block(ByteSpan bytes
) {
510 return ::new (bytes
.data()) Block(bytes
.size());
513 } // namespace LIBC_NAMESPACE_DECL
515 #endif // LLVM_LIBC_SRC___SUPPORT_BLOCK_H