1 /* wmem_allocator_block.c
2 * Wireshark Memory Manager Large-Block Allocator (version 3)
3 * Copyright 2013, Evan Huus <eapache@gmail.com>
5 * Wireshark - Network traffic analyzer
6 * By Gerald Combs <gerald@wireshark.org>
7 * Copyright 1998 Gerald Combs
9 * SPDX-License-Identifier: GPL-2.0-or-later
17 #include "wmem_core.h"
18 #include "wmem_allocator.h"
19 #include "wmem_allocator_block.h"
21 /* This has turned into a very interesting exercise in algorithms and data
26 * Version 1 of this allocator was embedded in the original emem framework. It
27 * didn't have to handle realloc or free, so it was very simple: it just grabbed
28 * a block from the OS and served allocations sequentially out of that until it
29 * ran out, then allocated a new block. The old block was never revisited, so
30 * it generally had a bit of wasted space at the end, but the waste was
31 * small enough that it was simply ignored. This allocator provided very fast
32 * constant-time allocation for any request that didn't require a new block from
33 * the OS, and that cost could be amortized away.
35 * Version 2 of this allocator was prompted by the need to support realloc and
36 * free in wmem. The original version simply didn't save enough metadata to do
37 * this, so I added a layer on top to make it possible. The primary principle
38 * was the same (allocate sequentially out of big blocks) with a bit of extra
39 * magic. Allocations were still fast constant-time, and frees were as well.
40 * Large parts of that design are still present in this one, but for more
41 * details see older versions of this file from git or svn.
43 * Version 3 of this allocator was written to address some issues that
44 * eventually showed up with version 2 under real-world usage. Specifically,
45 * version 2 dealt very poorly with memory fragmentation, almost never reusing
46 * freed blocks and choosing to just keep allocating from the master block
47 * instead. This led to particularly poor behaviour under the tick-tock loads
48 * (alloc/free/alloc/free or alloc/alloc/free/alloc/alloc/free/ or ...) that
49 * showed up in a couple of different protocol dissectors (TCP, Kafka).
53 * As in previous versions, allocations typically happen sequentially out of
54 * large OS-level blocks. Each block has a short embedded header used to
55 * maintain a doubly-linked list of all blocks (used or not) currently owned by
56 * the allocator. Each block is divided into chunks, which represent allocations
57 * and free sections (a block is initialized with one large, free, chunk). Each
58 * chunk is prefixed with a wmem_block_chunk_t structure, which is a short
59 * metadata header (8 bytes, regardless of 32 or 64-bit architecture unless
60 * alignment requires it to be padded) that contains the length of the chunk,
61 * the length of the previous chunk, a flag marking the chunk as free or used,
62 * and a flag marking the last chunk in a block. This serves to implement an
63 * inline sequential doubly-linked list of all the chunks in each block. A block
64 * with three chunks might look something like this:
66 * 0 _________________________
67 * ^ ___________ / ______________ \ __________
68 * ||---||--|-----/-----------||--------/--------------||--\-----/----------||
69 * ||hdr|| prv | len | body || prv | len | body || prv | len | body ||
70 * ||---||--------------------||--/--------------------||-------------------||
71 * \______________________/
74 * When allocating, a free chunk is found (more on that later) and split into
75 * two chunks: the first of the requested size and the second containing any
76 * remaining free. The first is marked used and returned to the caller.
78 * When freeing, the chunk in question is marked as free. Its neighbouring
79 * chunks are then checked; if either of them are free, the consecutive free
80 * chunks are merged into a single larger free chunk. Induction can show that
81 * applying this operation consistently prevents us ever having consecutive
84 * Free chunks (because they are not being used for anything else) each store an
85 * additional pair of pointers (see the wmem_block_free_t structure) that form
86 * the backbone of the data structures used to track free chunks.
90 * The extra pair of pointers in free chunks are used to build two doubly-linked
91 * lists: the master and the recycler. The recycler is circular, the master is
94 * The master stack is only populated by chunks from new OS-level blocks,
95 * so every chunk in this list is guaranteed to be able to serve any allocation
96 * request (the allocator will not serve requests larger than its block size).
97 * The chunk at the head of the master list shrinks as it serves requests. When
98 * it is too small to serve the current request, it is popped and inserted into
99 * the recycler. If the master list is empty, a new OS-level block is allocated,
100 * and its chunk is pushed onto the master stack.
102 * The recycler is populated by 'leftovers' from the master, as well as any
103 * chunks that were returned to the allocator via a call to free(). Although the
104 * recycler is circular, we will refer to the element referenced from the
105 * allocator as the 'head' of the list for convenience. The primary operation on
106 * the recycler is called cycling it. In this operation, the head is compared
107 * with its clockwise neighbour. If the neighbour is as large or larger, it
108 * becomes the head (the list rotates counter-clockwise). If the neighbour is
109 * smaller, then it is removed from its location and inserted as the counter-
110 * clockwise neighbour of the head (the list still rotates counter-clockwise,
111 * but the head element is held fixed while the rest of the list spins). This
112 * operation has the following properties:
113 * - fast constant time
114 * - once the largest chunk is at the head, it remains at the head
115 * - more iterations increases the probability that the largest chunk will be
116 * the head (for a list with n items, n iterations guarantees that the
117 * largest chunk will be the head).
121 * When an allocation request is received, the allocator first attempts to
122 * satisfy it with the chunk at the head of the recycler. If that does not
123 * succeed, the request is satisfied by the master list instead. Regardless of
124 * which chunk satisfied the request, the recycler is always cycled.
127 /* https://mail.gnome.org/archives/gtk-devel-list/2004-December/msg00091.html
128 * The 2*sizeof(size_t) alignment here is borrowed from GNU libc, so it should
129 * be good most everywhere. It is more conservative than is needed on some
130 * 64-bit platforms, but ia64 does require a 16-byte alignment. The SIMD
131 * extensions for x86 and ppc32 would want a larger alignment than this, but
132 * we don't need to do better than malloc.
134 #define WMEM_ALIGN_AMOUNT (2 * sizeof (size_t))
135 #define WMEM_ALIGN_SIZE(SIZE) ((~(WMEM_ALIGN_AMOUNT-1)) & \
136 ((SIZE) + (WMEM_ALIGN_AMOUNT-1)))
138 /* When required, allocate more memory from the OS in chunks of this size.
139 * 8MB is a pretty arbitrary value - it's big enough that it should last a while
140 * and small enough that a mostly-unused one doesn't waste *too* much. It's
141 * also a nice power of two, of course. */
142 #define WMEM_BLOCK_SIZE (8 * 1024 * 1024)
144 /* The header for an entire OS-level 'block' of memory */
145 typedef struct _wmem_block_hdr_t
{
146 struct _wmem_block_hdr_t
*prev
, *next
;
149 /* The header for a single 'chunk' of memory as returned from alloc/realloc.
150 * The 'jumbo' flag indicates an allocation larger than a normal-sized block
151 * would be capable of serving. If this is set, it is the only chunk in the
152 * block and the other chunk header fields are irrelevant.
154 typedef struct _wmem_block_chunk_t
{
163 } wmem_block_chunk_t
;
165 /* Handy macros for navigating the chunks in a block as if they were a
166 * doubly-linked list. */
167 #define WMEM_CHUNK_PREV(CHUNK) ((CHUNK)->prev \
168 ? ((wmem_block_chunk_t*)(((uint8_t*)(CHUNK)) - (CHUNK)->prev)) \
171 #define WMEM_CHUNK_NEXT(CHUNK) ((CHUNK)->last \
173 : ((wmem_block_chunk_t*)(((uint8_t*)(CHUNK)) + (CHUNK)->len)))
175 #define WMEM_CHUNK_HEADER_SIZE WMEM_ALIGN_SIZE(sizeof(wmem_block_chunk_t))
177 #define WMEM_BLOCK_MAX_ALLOC_SIZE (WMEM_BLOCK_SIZE - \
178 (WMEM_BLOCK_HEADER_SIZE + WMEM_CHUNK_HEADER_SIZE))
180 /* other handy chunk macros */
181 #define WMEM_CHUNK_TO_DATA(CHUNK) ((void*)((uint8_t*)(CHUNK) + WMEM_CHUNK_HEADER_SIZE))
182 #define WMEM_DATA_TO_CHUNK(DATA) ((wmem_block_chunk_t*)((uint8_t*)(DATA) - WMEM_CHUNK_HEADER_SIZE))
183 #define WMEM_CHUNK_DATA_LEN(CHUNK) ((CHUNK)->len - WMEM_CHUNK_HEADER_SIZE)
185 /* some handy block macros */
186 #define WMEM_BLOCK_HEADER_SIZE WMEM_ALIGN_SIZE(sizeof(wmem_block_hdr_t))
187 #define WMEM_BLOCK_TO_CHUNK(BLOCK) ((wmem_block_chunk_t*)((uint8_t*)(BLOCK) + WMEM_BLOCK_HEADER_SIZE))
188 #define WMEM_CHUNK_TO_BLOCK(CHUNK) ((wmem_block_hdr_t*)((uint8_t*)(CHUNK) - WMEM_BLOCK_HEADER_SIZE))
190 /* This is what the 'data' section of a chunk contains if it is free. */
191 typedef struct _wmem_block_free_t
{
192 wmem_block_chunk_t
*prev
, *next
;
195 /* Handy macro for accessing the free-header of a chunk */
196 #define WMEM_GET_FREE(CHUNK) ((wmem_block_free_t*)WMEM_CHUNK_TO_DATA(CHUNK))
198 typedef struct _wmem_block_allocator_t
{
199 wmem_block_hdr_t
*block_list
;
200 wmem_block_chunk_t
*master_head
;
201 wmem_block_chunk_t
*recycler_head
;
202 } wmem_block_allocator_t
;
206 wmem_block_verify_block(wmem_block_hdr_t
*block
)
208 int total_free_space
= 0;
210 wmem_block_chunk_t
*chunk
;
212 chunk
= WMEM_BLOCK_TO_CHUNK(block
);
213 total_len
= WMEM_BLOCK_HEADER_SIZE
;
216 /* We can tell nothing else about jumbo chunks except that they are
221 g_assert_true(chunk
->prev
== 0);
224 total_len
+= chunk
->len
;
226 g_assert_true(chunk
->len
>= WMEM_CHUNK_HEADER_SIZE
);
227 g_assert_true(!chunk
->jumbo
);
229 if (WMEM_CHUNK_NEXT(chunk
)) {
230 g_assert_true(chunk
->len
== WMEM_CHUNK_NEXT(chunk
)->prev
);
234 WMEM_CHUNK_DATA_LEN(chunk
) >= sizeof(wmem_block_free_t
)) {
236 total_free_space
+= chunk
->len
;
239 g_assert_true(WMEM_GET_FREE(chunk
)->next
);
240 g_assert_true(WMEM_GET_FREE(chunk
)->prev
);
244 chunk
= WMEM_CHUNK_NEXT(chunk
);
247 g_assert_true(total_len
== WMEM_BLOCK_SIZE
);
249 return total_free_space
;
253 wmem_block_verify_master_list(wmem_block_allocator_t
*allocator
)
255 wmem_block_chunk_t
*cur
;
256 wmem_block_free_t
*cur_free
;
259 cur
= allocator
->master_head
;
264 g_assert_true(WMEM_GET_FREE(cur
)->prev
== NULL
);
267 free_space
+= cur
->len
;
269 cur_free
= WMEM_GET_FREE(cur
);
271 g_assert_true(! cur
->used
);
273 if (cur_free
->next
) {
274 g_assert_true(WMEM_GET_FREE(cur_free
->next
)->prev
== cur
);
277 if (cur
!= allocator
->master_head
) {
278 g_assert_true(cur
->len
== WMEM_BLOCK_SIZE
);
281 cur
= cur_free
->next
;
288 wmem_block_verify_recycler(wmem_block_allocator_t
*allocator
)
290 wmem_block_chunk_t
*cur
;
291 wmem_block_free_t
*cur_free
;
294 cur
= allocator
->recycler_head
;
300 free_space
+= cur
->len
;
302 cur_free
= WMEM_GET_FREE(cur
);
304 g_assert_true(! cur
->used
);
306 g_assert_true(cur_free
->prev
);
307 g_assert_true(cur_free
->next
);
309 g_assert_true(WMEM_GET_FREE(cur_free
->prev
)->next
== cur
);
310 g_assert_true(WMEM_GET_FREE(cur_free
->next
)->prev
== cur
);
312 cur
= cur_free
->next
;
313 } while (cur
!= allocator
->recycler_head
);
319 wmem_block_verify(wmem_allocator_t
*allocator
)
321 wmem_block_hdr_t
*cur
;
322 wmem_block_allocator_t
*private_allocator
;
323 int master_free
, recycler_free
, chunk_free
= 0;
325 /* Normally it would be bad for an allocator helper function to depend
326 * on receiving the right type of allocator, but this is for testing only
327 * and is not part of any real API. */
328 g_assert_true(allocator
->type
== WMEM_ALLOCATOR_BLOCK
);
330 private_allocator
= (wmem_block_allocator_t
*) allocator
->private_data
;
332 if (private_allocator
->block_list
== NULL
) {
333 g_assert_true(! private_allocator
->master_head
);
334 g_assert_true(! private_allocator
->recycler_head
);
338 master_free
= wmem_block_verify_master_list(private_allocator
);
339 recycler_free
= wmem_block_verify_recycler(private_allocator
);
341 cur
= private_allocator
->block_list
;
342 g_assert_true(cur
->prev
== NULL
);
345 g_assert_true(cur
->next
->prev
== cur
);
347 chunk_free
+= wmem_block_verify_block(cur
);
351 g_assert_true(chunk_free
== master_free
+ recycler_free
);
354 /* MASTER/RECYCLER HELPERS */
356 /* Cycles the recycler. See the design notes at the top of this file for more
359 wmem_block_cycle_recycler(wmem_block_allocator_t
*allocator
)
361 wmem_block_chunk_t
*chunk
;
362 wmem_block_free_t
*free_chunk
;
364 chunk
= allocator
->recycler_head
;
370 free_chunk
= WMEM_GET_FREE(chunk
);
372 if (free_chunk
->next
->len
< chunk
->len
) {
373 /* Hold the current head fixed during rotation. */
374 WMEM_GET_FREE(free_chunk
->next
)->prev
= free_chunk
->prev
;
375 WMEM_GET_FREE(free_chunk
->prev
)->next
= free_chunk
->next
;
377 free_chunk
->prev
= free_chunk
->next
;
378 free_chunk
->next
= WMEM_GET_FREE(free_chunk
->next
)->next
;
380 WMEM_GET_FREE(free_chunk
->next
)->prev
= chunk
;
381 WMEM_GET_FREE(free_chunk
->prev
)->next
= chunk
;
384 /* Just rotate everything. */
385 allocator
->recycler_head
= free_chunk
->next
;
389 /* Adds a chunk from the recycler. */
391 wmem_block_add_to_recycler(wmem_block_allocator_t
*allocator
,
392 wmem_block_chunk_t
*chunk
)
394 wmem_block_free_t
*free_chunk
;
396 if (WMEM_CHUNK_DATA_LEN(chunk
) < sizeof(wmem_block_free_t
)) {
400 free_chunk
= WMEM_GET_FREE(chunk
);
402 if (! allocator
->recycler_head
) {
404 free_chunk
->next
= chunk
;
405 free_chunk
->prev
= chunk
;
406 allocator
->recycler_head
= chunk
;
409 free_chunk
->next
= allocator
->recycler_head
;
410 free_chunk
->prev
= WMEM_GET_FREE(allocator
->recycler_head
)->prev
;
412 WMEM_GET_FREE(free_chunk
->next
)->prev
= chunk
;
413 WMEM_GET_FREE(free_chunk
->prev
)->next
= chunk
;
415 if (chunk
->len
> allocator
->recycler_head
->len
) {
416 allocator
->recycler_head
= chunk
;
421 /* Removes a chunk from the recycler. */
423 wmem_block_remove_from_recycler(wmem_block_allocator_t
*allocator
,
424 wmem_block_chunk_t
*chunk
)
426 wmem_block_free_t
*free_chunk
;
428 free_chunk
= WMEM_GET_FREE(chunk
);
430 if (free_chunk
->prev
== chunk
&& free_chunk
->next
== chunk
) {
431 /* Only one item in recycler, just empty it. */
432 allocator
->recycler_head
= NULL
;
435 /* Two or more items, usual doubly-linked-list removal. It's circular
436 * so we don't need to worry about null-checking anything, which is
438 WMEM_GET_FREE(free_chunk
->prev
)->next
= free_chunk
->next
;
439 WMEM_GET_FREE(free_chunk
->next
)->prev
= free_chunk
->prev
;
440 if (allocator
->recycler_head
== chunk
) {
441 allocator
->recycler_head
= free_chunk
->next
;
446 /* Pushes a chunk onto the master stack. */
448 wmem_block_push_master(wmem_block_allocator_t
*allocator
,
449 wmem_block_chunk_t
*chunk
)
451 wmem_block_free_t
*free_chunk
;
453 free_chunk
= WMEM_GET_FREE(chunk
);
454 free_chunk
->prev
= NULL
;
455 free_chunk
->next
= allocator
->master_head
;
456 if (free_chunk
->next
) {
457 WMEM_GET_FREE(free_chunk
->next
)->prev
= chunk
;
459 allocator
->master_head
= chunk
;
462 /* Removes the top chunk from the master stack. */
464 wmem_block_pop_master(wmem_block_allocator_t
*allocator
)
466 wmem_block_chunk_t
*chunk
;
467 wmem_block_free_t
*free_chunk
;
469 chunk
= allocator
->master_head
;
471 free_chunk
= WMEM_GET_FREE(chunk
);
473 allocator
->master_head
= free_chunk
->next
;
474 if (free_chunk
->next
) {
475 WMEM_GET_FREE(free_chunk
->next
)->prev
= NULL
;
481 /* Takes a free chunk and checks the chunks to its immediate right and left in
482 * the block. If they are also free, the contiguous free chunks are merged into
483 * a single free chunk. The resulting chunk ends up in either the master list or
484 * the recycler, depending on where the merged chunks were originally.
487 wmem_block_merge_free(wmem_block_allocator_t
*allocator
,
488 wmem_block_chunk_t
*chunk
)
490 wmem_block_chunk_t
*tmp
;
491 wmem_block_chunk_t
*left_free
= NULL
;
492 wmem_block_chunk_t
*right_free
= NULL
;
494 /* Check the chunk to our right. If it is free, merge it into our current
495 * chunk. If it is big enough to hold a free-header, save it for later (we
496 * need to know about the left chunk before we decide what goes where). */
497 tmp
= WMEM_CHUNK_NEXT(chunk
);
498 if (tmp
&& !tmp
->used
) {
499 if (WMEM_CHUNK_DATA_LEN(tmp
) >= sizeof(wmem_block_free_t
)) {
502 chunk
->len
+= tmp
->len
;
503 chunk
->last
= tmp
->last
;
506 /* Check the chunk to our left. If it is free, merge our current chunk into
507 * it (thus chunk = tmp). As before, save it if it has enough space to
508 * hold a free-header. */
509 tmp
= WMEM_CHUNK_PREV(chunk
);
510 if (tmp
&& !tmp
->used
) {
511 if (WMEM_CHUNK_DATA_LEN(tmp
) >= sizeof(wmem_block_free_t
)) {
514 tmp
->len
+= chunk
->len
;
515 tmp
->last
= chunk
->last
;
519 /* The length of our chunk may have changed. If we have a chunk following,
520 * update its 'prev' count. */
522 WMEM_CHUNK_NEXT(chunk
)->prev
= chunk
->len
;
525 /* Now that the chunk headers are merged and consistent, we need to figure
526 * out what goes where in which free list. */
527 if (right_free
&& right_free
== allocator
->master_head
) {
528 /* If we merged right, and that chunk was the head of the master list,
529 * then we leave the resulting chunk at the head of the master list. */
530 wmem_block_free_t
*moved
;
532 wmem_block_remove_from_recycler(allocator
, left_free
);
534 moved
= WMEM_GET_FREE(chunk
);
536 moved
->next
= WMEM_GET_FREE(right_free
)->next
;
537 allocator
->master_head
= chunk
;
539 WMEM_GET_FREE(moved
->next
)->prev
= chunk
;
543 /* Otherwise, we remove the right-merged chunk (if there was one) from
544 * the recycler. Then, if we merged left we have nothing to do, since
545 * that recycler entry is still valid. If not, we add the chunk. */
547 wmem_block_remove_from_recycler(allocator
, right_free
);
550 wmem_block_add_to_recycler(allocator
, chunk
);
555 /* Takes an unused chunk and a size, and splits it into two chunks if possible.
556 * The first chunk (at the same address as the input chunk) is guaranteed to
557 * hold at least `size` bytes of data, and to not be in either the master or
560 * The second chunk gets whatever data is left over. It is marked unused and
561 * replaces the input chunk in whichever list it originally inhabited. */
563 wmem_block_split_free_chunk(wmem_block_allocator_t
*allocator
,
564 wmem_block_chunk_t
*chunk
,
567 wmem_block_chunk_t
*extra
;
568 wmem_block_free_t
*old_blk
, *new_blk
;
569 size_t aligned_size
, available
;
572 aligned_size
= WMEM_ALIGN_SIZE(size
) + WMEM_CHUNK_HEADER_SIZE
;
574 if (WMEM_CHUNK_DATA_LEN(chunk
) < aligned_size
+ sizeof(wmem_block_free_t
)) {
575 /* If the available space is not enought to store all of
576 * (hdr + requested size + alignment padding + hdr + free-header) then
577 * just remove the current chunk from the free list and return, since we
578 * can't usefully split it. */
579 if (chunk
== allocator
->master_head
) {
580 wmem_block_pop_master(allocator
);
582 else if (WMEM_CHUNK_DATA_LEN(chunk
) >= sizeof(wmem_block_free_t
)) {
583 wmem_block_remove_from_recycler(allocator
, chunk
);
588 /* preserve a few values from chunk that we'll need to manipulate */
590 available
= chunk
->len
- aligned_size
;
592 /* set new values for chunk */
593 chunk
->len
= (uint32_t) aligned_size
;
596 /* with chunk's values set, we can use the standard macro to calculate
597 * the location and size of the new free chunk */
598 extra
= WMEM_CHUNK_NEXT(chunk
);
600 /* Now we move the free chunk's address without changing its location
601 * in whichever list it is in.
603 * Note that the new chunk header 'extra' may overlap the old free header,
604 * so we have to copy the free header before we write anything to extra.
606 old_blk
= WMEM_GET_FREE(chunk
);
607 new_blk
= WMEM_GET_FREE(extra
);
609 if (allocator
->master_head
== chunk
) {
610 new_blk
->prev
= old_blk
->prev
;
611 new_blk
->next
= old_blk
->next
;
614 WMEM_GET_FREE(old_blk
->next
)->prev
= extra
;
617 allocator
->master_head
= extra
;
620 if (old_blk
->prev
== chunk
) {
621 new_blk
->prev
= extra
;
622 new_blk
->next
= extra
;
625 new_blk
->prev
= old_blk
->prev
;
626 new_blk
->next
= old_blk
->next
;
628 WMEM_GET_FREE(old_blk
->prev
)->next
= extra
;
629 WMEM_GET_FREE(old_blk
->next
)->prev
= extra
;
632 if (allocator
->recycler_head
== chunk
) {
633 allocator
->recycler_head
= extra
;
637 /* Now that we've copied over the free-list stuff (which may have overlapped
638 * with our new chunk header) we can safely write our new chunk header. */
639 extra
->len
= (uint32_t) available
;
641 extra
->prev
= chunk
->len
;
643 extra
->jumbo
= false;
645 /* Correctly update the following chunk's back-pointer */
647 WMEM_CHUNK_NEXT(extra
)->prev
= extra
->len
;
651 /* Takes a used chunk and a size, and splits it into two chunks if possible.
652 * The first chunk can hold at least `size` bytes of data, while the second gets
653 * whatever's left over. The second is marked as unused and is added to the
656 wmem_block_split_used_chunk(wmem_block_allocator_t
*allocator
,
657 wmem_block_chunk_t
*chunk
,
660 wmem_block_chunk_t
*extra
;
661 size_t aligned_size
, available
;
664 aligned_size
= WMEM_ALIGN_SIZE(size
) + WMEM_CHUNK_HEADER_SIZE
;
666 if (aligned_size
> WMEM_CHUNK_DATA_LEN(chunk
)) {
667 /* in this case we don't have enough space to really split it, so
668 * it's basically a no-op */
671 /* otherwise, we have room to split it, though the remaining free chunk
672 * may still not be usefully large */
674 /* preserve a few values from chunk that we'll need to manipulate */
676 available
= chunk
->len
- aligned_size
;
678 /* set new values for chunk */
679 chunk
->len
= (uint32_t) aligned_size
;
682 /* with chunk's values set, we can use the standard macro to calculate
683 * the location and size of the new free chunk */
684 extra
= WMEM_CHUNK_NEXT(chunk
);
686 /* set the new values for the chunk */
687 extra
->len
= (uint32_t) available
;
689 extra
->prev
= chunk
->len
;
691 extra
->jumbo
= false;
693 /* Correctly update the following chunk's back-pointer */
695 WMEM_CHUNK_NEXT(extra
)->prev
= extra
->len
;
698 /* Merge it to its right if possible (it can't be merged left, obviously).
699 * This also adds it to the recycler. */
700 wmem_block_merge_free(allocator
, extra
);
705 /* Add a block to the allocator's embedded doubly-linked list of OS-level blocks
708 wmem_block_add_to_block_list(wmem_block_allocator_t
*allocator
,
709 wmem_block_hdr_t
*block
)
712 block
->next
= allocator
->block_list
;
714 block
->next
->prev
= block
;
716 allocator
->block_list
= block
;
719 /* Remove a block from the allocator's embedded doubly-linked list of OS-level
720 * blocks that it owns. */
722 wmem_block_remove_from_block_list(wmem_block_allocator_t
*allocator
,
723 wmem_block_hdr_t
*block
)
726 block
->prev
->next
= block
->next
;
729 allocator
->block_list
= block
->next
;
733 block
->next
->prev
= block
->prev
;
737 /* Initializes a single unused chunk at the beginning of the block, and
738 * adds that chunk to the free list. */
740 wmem_block_init_block(wmem_block_allocator_t
*allocator
,
741 wmem_block_hdr_t
*block
)
743 wmem_block_chunk_t
*chunk
;
745 /* a new block contains one chunk, right at the beginning */
746 chunk
= WMEM_BLOCK_TO_CHUNK(block
);
749 chunk
->jumbo
= false;
752 chunk
->len
= WMEM_BLOCK_SIZE
- WMEM_BLOCK_HEADER_SIZE
;
754 /* now push that chunk onto the master list */
755 wmem_block_push_master(allocator
, chunk
);
758 /* Creates a new block, and initializes it. */
760 wmem_block_new_block(wmem_block_allocator_t
*allocator
)
762 wmem_block_hdr_t
*block
;
764 /* allocate the new block and add it to the block list */
765 block
= (wmem_block_hdr_t
*)wmem_alloc(NULL
, WMEM_BLOCK_SIZE
);
766 wmem_block_add_to_block_list(allocator
, block
);
769 wmem_block_init_block(allocator
, block
);
772 /* JUMBO ALLOCATIONS */
774 /* Allocates special 'jumbo' blocks for sizes that won't fit normally. */
776 wmem_block_alloc_jumbo(wmem_block_allocator_t
*allocator
, const size_t size
)
778 wmem_block_hdr_t
*block
;
779 wmem_block_chunk_t
*chunk
;
781 /* allocate a new block of exactly the right size */
782 block
= (wmem_block_hdr_t
*) wmem_alloc(NULL
, size
783 + WMEM_BLOCK_HEADER_SIZE
784 + WMEM_CHUNK_HEADER_SIZE
);
786 /* add it to the block list */
787 wmem_block_add_to_block_list(allocator
, block
);
789 /* the new block contains a single jumbo chunk */
790 chunk
= WMEM_BLOCK_TO_CHUNK(block
);
797 /* and return the data pointer */
798 return WMEM_CHUNK_TO_DATA(chunk
);
801 /* Frees special 'jumbo' blocks of sizes that won't fit normally. */
803 wmem_block_free_jumbo(wmem_block_allocator_t
*allocator
,
804 wmem_block_chunk_t
*chunk
)
806 wmem_block_hdr_t
*block
;
808 block
= WMEM_CHUNK_TO_BLOCK(chunk
);
810 wmem_block_remove_from_block_list(allocator
, block
);
812 wmem_free(NULL
, block
);
815 /* Reallocs special 'jumbo' blocks of sizes that won't fit normally. */
817 wmem_block_realloc_jumbo(wmem_block_allocator_t
*allocator
,
818 wmem_block_chunk_t
*chunk
,
821 wmem_block_hdr_t
*block
;
823 block
= WMEM_CHUNK_TO_BLOCK(chunk
);
825 block
= (wmem_block_hdr_t
*) wmem_realloc(NULL
, block
, size
826 + WMEM_BLOCK_HEADER_SIZE
827 + WMEM_CHUNK_HEADER_SIZE
);
830 block
->next
->prev
= block
;
834 block
->prev
->next
= block
;
837 allocator
->block_list
= block
;
840 return WMEM_CHUNK_TO_DATA(WMEM_BLOCK_TO_CHUNK(block
));
846 wmem_block_alloc(void *private_data
, const size_t size
)
848 wmem_block_allocator_t
*allocator
= (wmem_block_allocator_t
*) private_data
;
849 wmem_block_chunk_t
*chunk
;
851 if (size
> WMEM_BLOCK_MAX_ALLOC_SIZE
) {
852 return wmem_block_alloc_jumbo(allocator
, size
);
855 if (allocator
->recycler_head
&&
856 WMEM_CHUNK_DATA_LEN(allocator
->recycler_head
) >= size
) {
858 /* If we can serve it from the recycler, do so. */
859 chunk
= allocator
->recycler_head
;
862 if (allocator
->master_head
&&
863 WMEM_CHUNK_DATA_LEN(allocator
->master_head
) < size
) {
865 /* Recycle the head of the master list if necessary. */
866 chunk
= allocator
->master_head
;
867 wmem_block_pop_master(allocator
);
868 wmem_block_add_to_recycler(allocator
, chunk
);
871 if (!allocator
->master_head
) {
872 /* Allocate a new block if necessary. */
873 wmem_block_new_block(allocator
);
876 chunk
= allocator
->master_head
;
879 /* Split our chunk into two to preserve any trailing free space */
880 wmem_block_split_free_chunk(allocator
, chunk
, size
);
882 /* Now cycle the recycler */
883 wmem_block_cycle_recycler(allocator
);
885 /* mark it as used */
888 /* and return the user's pointer */
889 return WMEM_CHUNK_TO_DATA(chunk
);
893 wmem_block_free(void *private_data
, void *ptr
)
895 wmem_block_allocator_t
*allocator
= (wmem_block_allocator_t
*) private_data
;
896 wmem_block_chunk_t
*chunk
;
898 chunk
= WMEM_DATA_TO_CHUNK(ptr
);
901 wmem_block_free_jumbo(allocator
, chunk
);
905 /* mark it as unused */
908 /* merge it with any other free chunks adjacent to it, so that contiguous
909 * free space doesn't get fragmented */
910 wmem_block_merge_free(allocator
, chunk
);
912 /* Now cycle the recycler */
913 wmem_block_cycle_recycler(allocator
);
917 wmem_block_realloc(void *private_data
, void *ptr
, const size_t size
)
919 wmem_block_allocator_t
*allocator
= (wmem_block_allocator_t
*) private_data
;
920 wmem_block_chunk_t
*chunk
;
922 chunk
= WMEM_DATA_TO_CHUNK(ptr
);
925 return wmem_block_realloc_jumbo(allocator
, chunk
, size
);
928 if (size
> WMEM_CHUNK_DATA_LEN(chunk
)) {
930 wmem_block_chunk_t
*tmp
;
932 tmp
= WMEM_CHUNK_NEXT(chunk
);
934 if (tmp
&& (!tmp
->used
) &&
935 (size
< WMEM_CHUNK_DATA_LEN(chunk
) + tmp
->len
)) {
936 /* the next chunk is free and has enough extra, so just grab
940 /* we ask for the next chunk to be split, but we don't end up
941 * using the split chunk header (it just gets merged into this one),
942 * so we want the split to be of (size - curdatalen - header_size).
943 * However, this can underflow by header_size, so we do a quick
944 * check here and floor the value to 0. */
945 split_size
= size
- WMEM_CHUNK_DATA_LEN(chunk
);
947 if (split_size
< WMEM_CHUNK_HEADER_SIZE
) {
951 split_size
-= WMEM_CHUNK_HEADER_SIZE
;
954 wmem_block_split_free_chunk(allocator
, tmp
, split_size
);
956 /* Now do a 'quickie' merge between the current block and the left-
957 * hand side of the split. Simply calling wmem_block_merge_free
958 * might confuse things, since we may temporarily have two blocks
959 * to our right that are both free (and it isn't guaranteed to
960 * handle that case). Update our 'next' count and last flag, and
961 * our (new) successor's 'prev' count */
962 chunk
->len
+= tmp
->len
;
963 chunk
->last
= tmp
->last
;
964 tmp
= WMEM_CHUNK_NEXT(chunk
);
966 tmp
->prev
= chunk
->len
;
969 /* Now cycle the recycler */
970 wmem_block_cycle_recycler(allocator
);
972 /* And return the same old pointer */
976 /* no room to grow, need to alloc, copy, free */
979 newptr
= wmem_block_alloc(private_data
, size
);
980 memcpy(newptr
, ptr
, WMEM_CHUNK_DATA_LEN(chunk
));
981 wmem_block_free(private_data
, ptr
);
983 /* No need to cycle the recycler, alloc and free both did that
989 else if (size
< WMEM_CHUNK_DATA_LEN(chunk
)) {
991 wmem_block_split_used_chunk(allocator
, chunk
, size
);
993 /* Now cycle the recycler */
994 wmem_block_cycle_recycler(allocator
);
1004 wmem_block_free_all(void *private_data
)
1006 wmem_block_allocator_t
*allocator
= (wmem_block_allocator_t
*) private_data
;
1007 wmem_block_hdr_t
*cur
;
1008 wmem_block_chunk_t
*chunk
;
1010 /* the existing free lists are entirely irrelevant */
1011 allocator
->master_head
= NULL
;
1012 allocator
->recycler_head
= NULL
;
1014 /* iterate through the blocks, reinitializing each one */
1015 cur
= allocator
->block_list
;
1018 chunk
= WMEM_BLOCK_TO_CHUNK(cur
);
1020 wmem_block_remove_from_block_list(allocator
, cur
);
1022 wmem_free(NULL
, WMEM_CHUNK_TO_BLOCK(chunk
));
1025 wmem_block_init_block(allocator
, cur
);
1032 wmem_block_gc(void *private_data
)
1034 wmem_block_allocator_t
*allocator
= (wmem_block_allocator_t
*) private_data
;
1035 wmem_block_hdr_t
*cur
, *next
;
1036 wmem_block_chunk_t
*chunk
;
1037 wmem_block_free_t
*free_chunk
;
1039 /* Walk through the blocks, adding used blocks to the new list and
1040 * completely destroying unused blocks. */
1041 cur
= allocator
->block_list
;
1042 allocator
->block_list
= NULL
;
1045 chunk
= WMEM_BLOCK_TO_CHUNK(cur
);
1048 if (!chunk
->jumbo
&& !chunk
->used
&& chunk
->last
) {
1049 /* If the first chunk is also the last, and is unused, then
1050 * the block as a whole is entirely unused, so return it to
1051 * the OS and remove it from whatever lists it is in. */
1052 free_chunk
= WMEM_GET_FREE(chunk
);
1053 if (free_chunk
->next
) {
1054 WMEM_GET_FREE(free_chunk
->next
)->prev
= free_chunk
->prev
;
1056 if (free_chunk
->prev
) {
1057 WMEM_GET_FREE(free_chunk
->prev
)->next
= free_chunk
->next
;
1059 if (allocator
->recycler_head
== chunk
) {
1060 if (free_chunk
->next
== chunk
) {
1061 allocator
->recycler_head
= NULL
;
1064 allocator
->recycler_head
= free_chunk
->next
;
1067 else if (allocator
->master_head
== chunk
) {
1068 allocator
->master_head
= free_chunk
->next
;
1070 wmem_free(NULL
, cur
);
1073 /* part of this block is used, so add it to the new block list */
1074 wmem_block_add_to_block_list(allocator
, cur
);
1082 wmem_block_allocator_cleanup(void *private_data
)
1084 /* wmem guarantees that free_all() is called directly before this, so
1085 * calling gc will return all our blocks to the OS automatically */
1086 wmem_block_gc(private_data
);
1088 /* then just free the allocator structs */
1089 wmem_free(NULL
, private_data
);
1093 wmem_block_allocator_init(wmem_allocator_t
*allocator
)
1095 wmem_block_allocator_t
*block_allocator
;
1097 block_allocator
= wmem_new(NULL
, wmem_block_allocator_t
);
1099 allocator
->walloc
= &wmem_block_alloc
;
1100 allocator
->wrealloc
= &wmem_block_realloc
;
1101 allocator
->wfree
= &wmem_block_free
;
1103 allocator
->free_all
= &wmem_block_free_all
;
1104 allocator
->gc
= &wmem_block_gc
;
1105 allocator
->cleanup
= &wmem_block_allocator_cleanup
;
1107 allocator
->private_data
= (void*) block_allocator
;
1109 block_allocator
->block_list
= NULL
;
1110 block_allocator
->master_head
= NULL
;
1111 block_allocator
->recycler_head
= NULL
;
1115 * Editor modelines - https://www.wireshark.org/tools/modelines.html
1120 * indent-tabs-mode: nil
1123 * vi: set shiftwidth=4 tabstop=8 expandtab:
1124 * :indentSize=4:tabSize=8:noTabs=true: