TODO epan/dissectors/asn1/kerberos/packet-kerberos-template.c new GSS flags
[wireshark-sm.git] / wsutil / wmem / wmem_allocator_block.c
blob4723f0f368af5e29f67719050b320fa99e4438a9
1 /* wmem_allocator_block.c
2 * Wireshark Memory Manager Large-Block Allocator (version 3)
3 * Copyright 2013, Evan Huus <eapache@gmail.com>
5 * Wireshark - Network traffic analyzer
6 * By Gerald Combs <gerald@wireshark.org>
7 * Copyright 1998 Gerald Combs
9 * SPDX-License-Identifier: GPL-2.0-or-later
12 #include <stdio.h>
13 #include <string.h>
15 #include <glib.h>
17 #include "wmem_core.h"
18 #include "wmem_allocator.h"
19 #include "wmem_allocator_block.h"
21 /* This has turned into a very interesting exercise in algorithms and data
22 * structures.
24 * HISTORY
26 * Version 1 of this allocator was embedded in the original emem framework. It
27 * didn't have to handle realloc or free, so it was very simple: it just grabbed
28 * a block from the OS and served allocations sequentially out of that until it
29 * ran out, then allocated a new block. The old block was never revisited, so
30 * it generally had a bit of wasted space at the end, but the waste was
31 * small enough that it was simply ignored. This allocator provided very fast
32 * constant-time allocation for any request that didn't require a new block from
33 * the OS, and that cost could be amortized away.
35 * Version 2 of this allocator was prompted by the need to support realloc and
36 * free in wmem. The original version simply didn't save enough metadata to do
37 * this, so I added a layer on top to make it possible. The primary principle
38 * was the same (allocate sequentially out of big blocks) with a bit of extra
39 * magic. Allocations were still fast constant-time, and frees were as well.
40 * Large parts of that design are still present in this one, but for more
41 * details see older versions of this file from git or svn.
43 * Version 3 of this allocator was written to address some issues that
44 * eventually showed up with version 2 under real-world usage. Specifically,
45 * version 2 dealt very poorly with memory fragmentation, almost never reusing
46 * freed blocks and choosing to just keep allocating from the master block
47 * instead. This led to particularly poor behaviour under the tick-tock loads
48 * (alloc/free/alloc/free or alloc/alloc/free/alloc/alloc/free/ or ...) that
49 * showed up in a couple of different protocol dissectors (TCP, Kafka).
51 * BLOCKS AND CHUNKS
53 * As in previous versions, allocations typically happen sequentially out of
54 * large OS-level blocks. Each block has a short embedded header used to
55 * maintain a doubly-linked list of all blocks (used or not) currently owned by
56 * the allocator. Each block is divided into chunks, which represent allocations
57 * and free sections (a block is initialized with one large, free, chunk). Each
58 * chunk is prefixed with a wmem_block_chunk_t structure, which is a short
59 * metadata header (8 bytes, regardless of 32 or 64-bit architecture unless
60 * alignment requires it to be padded) that contains the length of the chunk,
61 * the length of the previous chunk, a flag marking the chunk as free or used,
62 * and a flag marking the last chunk in a block. This serves to implement an
63 * inline sequential doubly-linked list of all the chunks in each block. A block
64 * with three chunks might look something like this:
66 * 0 _________________________
67 * ^ ___________ / ______________ \ __________
68 * ||---||--|-----/-----------||--------/--------------||--\-----/----------||
69 * ||hdr|| prv | len | body || prv | len | body || prv | len | body ||
70 * ||---||--------------------||--/--------------------||-------------------||
71 * \______________________/
74 * When allocating, a free chunk is found (more on that later) and split into
75 * two chunks: the first of the requested size and the second containing any
76 * remaining free. The first is marked used and returned to the caller.
78 * When freeing, the chunk in question is marked as free. Its neighbouring
79 * chunks are then checked; if either of them are free, the consecutive free
80 * chunks are merged into a single larger free chunk. Induction can show that
81 * applying this operation consistently prevents us ever having consecutive
82 * free chunks.
84 * Free chunks (because they are not being used for anything else) each store an
85 * additional pair of pointers (see the wmem_block_free_t structure) that form
86 * the backbone of the data structures used to track free chunks.
88 * MASTER AND RECYCLER
90 * The extra pair of pointers in free chunks are used to build two doubly-linked
91 * lists: the master and the recycler. The recycler is circular, the master is
92 * a stack.
94 * The master stack is only populated by chunks from new OS-level blocks,
95 * so every chunk in this list is guaranteed to be able to serve any allocation
96 * request (the allocator will not serve requests larger than its block size).
97 * The chunk at the head of the master list shrinks as it serves requests. When
98 * it is too small to serve the current request, it is popped and inserted into
99 * the recycler. If the master list is empty, a new OS-level block is allocated,
100 * and its chunk is pushed onto the master stack.
102 * The recycler is populated by 'leftovers' from the master, as well as any
103 * chunks that were returned to the allocator via a call to free(). Although the
104 * recycler is circular, we will refer to the element referenced from the
105 * allocator as the 'head' of the list for convenience. The primary operation on
106 * the recycler is called cycling it. In this operation, the head is compared
107 * with its clockwise neighbour. If the neighbour is as large or larger, it
108 * becomes the head (the list rotates counter-clockwise). If the neighbour is
109 * smaller, then it is removed from its location and inserted as the counter-
110 * clockwise neighbour of the head (the list still rotates counter-clockwise,
111 * but the head element is held fixed while the rest of the list spins). This
112 * operation has the following properties:
113 * - fast constant time
114 * - once the largest chunk is at the head, it remains at the head
115 * - more iterations increases the probability that the largest chunk will be
116 * the head (for a list with n items, n iterations guarantees that the
117 * largest chunk will be the head).
119 * ALLOCATING
121 * When an allocation request is received, the allocator first attempts to
122 * satisfy it with the chunk at the head of the recycler. If that does not
123 * succeed, the request is satisfied by the master list instead. Regardless of
124 * which chunk satisfied the request, the recycler is always cycled.
127 /* https://mail.gnome.org/archives/gtk-devel-list/2004-December/msg00091.html
128 * The 2*sizeof(size_t) alignment here is borrowed from GNU libc, so it should
129 * be good most everywhere. It is more conservative than is needed on some
130 * 64-bit platforms, but ia64 does require a 16-byte alignment. The SIMD
131 * extensions for x86 and ppc32 would want a larger alignment than this, but
132 * we don't need to do better than malloc.
134 #define WMEM_ALIGN_AMOUNT (2 * sizeof (size_t))
135 #define WMEM_ALIGN_SIZE(SIZE) ((~(WMEM_ALIGN_AMOUNT-1)) & \
136 ((SIZE) + (WMEM_ALIGN_AMOUNT-1)))
138 /* When required, allocate more memory from the OS in chunks of this size.
139 * 8MB is a pretty arbitrary value - it's big enough that it should last a while
140 * and small enough that a mostly-unused one doesn't waste *too* much. It's
141 * also a nice power of two, of course. */
142 #define WMEM_BLOCK_SIZE (8 * 1024 * 1024)
144 /* The header for an entire OS-level 'block' of memory */
145 typedef struct _wmem_block_hdr_t {
146 struct _wmem_block_hdr_t *prev, *next;
147 } wmem_block_hdr_t;
149 /* The header for a single 'chunk' of memory as returned from alloc/realloc.
150 * The 'jumbo' flag indicates an allocation larger than a normal-sized block
151 * would be capable of serving. If this is set, it is the only chunk in the
152 * block and the other chunk header fields are irrelevant.
154 typedef struct _wmem_block_chunk_t {
155 uint32_t prev;
157 /* flags */
158 uint32_t last:1;
159 uint32_t used:1;
160 uint32_t jumbo:1;
162 uint32_t len:29;
163 } wmem_block_chunk_t;
165 /* Handy macros for navigating the chunks in a block as if they were a
166 * doubly-linked list. */
167 #define WMEM_CHUNK_PREV(CHUNK) ((CHUNK)->prev \
168 ? ((wmem_block_chunk_t*)(((uint8_t*)(CHUNK)) - (CHUNK)->prev)) \
169 : NULL)
171 #define WMEM_CHUNK_NEXT(CHUNK) ((CHUNK)->last \
172 ? NULL \
173 : ((wmem_block_chunk_t*)(((uint8_t*)(CHUNK)) + (CHUNK)->len)))
175 #define WMEM_CHUNK_HEADER_SIZE WMEM_ALIGN_SIZE(sizeof(wmem_block_chunk_t))
177 #define WMEM_BLOCK_MAX_ALLOC_SIZE (WMEM_BLOCK_SIZE - \
178 (WMEM_BLOCK_HEADER_SIZE + WMEM_CHUNK_HEADER_SIZE))
180 /* other handy chunk macros */
181 #define WMEM_CHUNK_TO_DATA(CHUNK) ((void*)((uint8_t*)(CHUNK) + WMEM_CHUNK_HEADER_SIZE))
182 #define WMEM_DATA_TO_CHUNK(DATA) ((wmem_block_chunk_t*)((uint8_t*)(DATA) - WMEM_CHUNK_HEADER_SIZE))
183 #define WMEM_CHUNK_DATA_LEN(CHUNK) ((CHUNK)->len - WMEM_CHUNK_HEADER_SIZE)
185 /* some handy block macros */
186 #define WMEM_BLOCK_HEADER_SIZE WMEM_ALIGN_SIZE(sizeof(wmem_block_hdr_t))
187 #define WMEM_BLOCK_TO_CHUNK(BLOCK) ((wmem_block_chunk_t*)((uint8_t*)(BLOCK) + WMEM_BLOCK_HEADER_SIZE))
188 #define WMEM_CHUNK_TO_BLOCK(CHUNK) ((wmem_block_hdr_t*)((uint8_t*)(CHUNK) - WMEM_BLOCK_HEADER_SIZE))
190 /* This is what the 'data' section of a chunk contains if it is free. */
191 typedef struct _wmem_block_free_t {
192 wmem_block_chunk_t *prev, *next;
193 } wmem_block_free_t;
195 /* Handy macro for accessing the free-header of a chunk */
196 #define WMEM_GET_FREE(CHUNK) ((wmem_block_free_t*)WMEM_CHUNK_TO_DATA(CHUNK))
198 typedef struct _wmem_block_allocator_t {
199 wmem_block_hdr_t *block_list;
200 wmem_block_chunk_t *master_head;
201 wmem_block_chunk_t *recycler_head;
202 } wmem_block_allocator_t;
204 /* DEBUG AND TEST */
205 static int
206 wmem_block_verify_block(wmem_block_hdr_t *block)
208 int total_free_space = 0;
209 uint32_t total_len;
210 wmem_block_chunk_t *chunk;
212 chunk = WMEM_BLOCK_TO_CHUNK(block);
213 total_len = WMEM_BLOCK_HEADER_SIZE;
215 if (chunk->jumbo) {
216 /* We can tell nothing else about jumbo chunks except that they are
217 * always used. */
218 return 0;
221 g_assert_true(chunk->prev == 0);
223 do {
224 total_len += chunk->len;
226 g_assert_true(chunk->len >= WMEM_CHUNK_HEADER_SIZE);
227 g_assert_true(!chunk->jumbo);
229 if (WMEM_CHUNK_NEXT(chunk)) {
230 g_assert_true(chunk->len == WMEM_CHUNK_NEXT(chunk)->prev);
233 if (!chunk->used &&
234 WMEM_CHUNK_DATA_LEN(chunk) >= sizeof(wmem_block_free_t)) {
236 total_free_space += chunk->len;
238 if (!chunk->last) {
239 g_assert_true(WMEM_GET_FREE(chunk)->next);
240 g_assert_true(WMEM_GET_FREE(chunk)->prev);
244 chunk = WMEM_CHUNK_NEXT(chunk);
245 } while (chunk);
247 g_assert_true(total_len == WMEM_BLOCK_SIZE);
249 return total_free_space;
252 static int
253 wmem_block_verify_master_list(wmem_block_allocator_t *allocator)
255 wmem_block_chunk_t *cur;
256 wmem_block_free_t *cur_free;
257 int free_space = 0;
259 cur = allocator->master_head;
260 if (!cur) {
261 return 0;
264 g_assert_true(WMEM_GET_FREE(cur)->prev == NULL);
266 while (cur) {
267 free_space += cur->len;
269 cur_free = WMEM_GET_FREE(cur);
271 g_assert_true(! cur->used);
273 if (cur_free->next) {
274 g_assert_true(WMEM_GET_FREE(cur_free->next)->prev == cur);
277 if (cur != allocator->master_head) {
278 g_assert_true(cur->len == WMEM_BLOCK_SIZE);
281 cur = cur_free->next;
284 return free_space;
287 static int
288 wmem_block_verify_recycler(wmem_block_allocator_t *allocator)
290 wmem_block_chunk_t *cur;
291 wmem_block_free_t *cur_free;
292 int free_space = 0;
294 cur = allocator->recycler_head;
295 if (!cur) {
296 return 0;
299 do {
300 free_space += cur->len;
302 cur_free = WMEM_GET_FREE(cur);
304 g_assert_true(! cur->used);
306 g_assert_true(cur_free->prev);
307 g_assert_true(cur_free->next);
309 g_assert_true(WMEM_GET_FREE(cur_free->prev)->next == cur);
310 g_assert_true(WMEM_GET_FREE(cur_free->next)->prev == cur);
312 cur = cur_free->next;
313 } while (cur != allocator->recycler_head);
315 return free_space;
318 void
319 wmem_block_verify(wmem_allocator_t *allocator)
321 wmem_block_hdr_t *cur;
322 wmem_block_allocator_t *private_allocator;
323 int master_free, recycler_free, chunk_free = 0;
325 /* Normally it would be bad for an allocator helper function to depend
326 * on receiving the right type of allocator, but this is for testing only
327 * and is not part of any real API. */
328 g_assert_true(allocator->type == WMEM_ALLOCATOR_BLOCK);
330 private_allocator = (wmem_block_allocator_t*) allocator->private_data;
332 if (private_allocator->block_list == NULL) {
333 g_assert_true(! private_allocator->master_head);
334 g_assert_true(! private_allocator->recycler_head);
335 return;
338 master_free = wmem_block_verify_master_list(private_allocator);
339 recycler_free = wmem_block_verify_recycler(private_allocator);
341 cur = private_allocator->block_list;
342 g_assert_true(cur->prev == NULL);
343 while (cur) {
344 if (cur->next) {
345 g_assert_true(cur->next->prev == cur);
347 chunk_free += wmem_block_verify_block(cur);
348 cur = cur->next;
351 g_assert_true(chunk_free == master_free + recycler_free);
354 /* MASTER/RECYCLER HELPERS */
356 /* Cycles the recycler. See the design notes at the top of this file for more
357 * details. */
358 static void
359 wmem_block_cycle_recycler(wmem_block_allocator_t *allocator)
361 wmem_block_chunk_t *chunk;
362 wmem_block_free_t *free_chunk;
364 chunk = allocator->recycler_head;
366 if (chunk == NULL) {
367 return;
370 free_chunk = WMEM_GET_FREE(chunk);
372 if (free_chunk->next->len < chunk->len) {
373 /* Hold the current head fixed during rotation. */
374 WMEM_GET_FREE(free_chunk->next)->prev = free_chunk->prev;
375 WMEM_GET_FREE(free_chunk->prev)->next = free_chunk->next;
377 free_chunk->prev = free_chunk->next;
378 free_chunk->next = WMEM_GET_FREE(free_chunk->next)->next;
380 WMEM_GET_FREE(free_chunk->next)->prev = chunk;
381 WMEM_GET_FREE(free_chunk->prev)->next = chunk;
383 else {
384 /* Just rotate everything. */
385 allocator->recycler_head = free_chunk->next;
389 /* Adds a chunk from the recycler. */
390 static void
391 wmem_block_add_to_recycler(wmem_block_allocator_t *allocator,
392 wmem_block_chunk_t *chunk)
394 wmem_block_free_t *free_chunk;
396 if (WMEM_CHUNK_DATA_LEN(chunk) < sizeof(wmem_block_free_t)) {
397 return;
400 free_chunk = WMEM_GET_FREE(chunk);
402 if (! allocator->recycler_head) {
403 /* First one */
404 free_chunk->next = chunk;
405 free_chunk->prev = chunk;
406 allocator->recycler_head = chunk;
408 else {
409 free_chunk->next = allocator->recycler_head;
410 free_chunk->prev = WMEM_GET_FREE(allocator->recycler_head)->prev;
412 WMEM_GET_FREE(free_chunk->next)->prev = chunk;
413 WMEM_GET_FREE(free_chunk->prev)->next = chunk;
415 if (chunk->len > allocator->recycler_head->len) {
416 allocator->recycler_head = chunk;
421 /* Removes a chunk from the recycler. */
422 static void
423 wmem_block_remove_from_recycler(wmem_block_allocator_t *allocator,
424 wmem_block_chunk_t *chunk)
426 wmem_block_free_t *free_chunk;
428 free_chunk = WMEM_GET_FREE(chunk);
430 if (free_chunk->prev == chunk && free_chunk->next == chunk) {
431 /* Only one item in recycler, just empty it. */
432 allocator->recycler_head = NULL;
434 else {
435 /* Two or more items, usual doubly-linked-list removal. It's circular
436 * so we don't need to worry about null-checking anything, which is
437 * nice. */
438 WMEM_GET_FREE(free_chunk->prev)->next = free_chunk->next;
439 WMEM_GET_FREE(free_chunk->next)->prev = free_chunk->prev;
440 if (allocator->recycler_head == chunk) {
441 allocator->recycler_head = free_chunk->next;
446 /* Pushes a chunk onto the master stack. */
447 static void
448 wmem_block_push_master(wmem_block_allocator_t *allocator,
449 wmem_block_chunk_t *chunk)
451 wmem_block_free_t *free_chunk;
453 free_chunk = WMEM_GET_FREE(chunk);
454 free_chunk->prev = NULL;
455 free_chunk->next = allocator->master_head;
456 if (free_chunk->next) {
457 WMEM_GET_FREE(free_chunk->next)->prev = chunk;
459 allocator->master_head = chunk;
462 /* Removes the top chunk from the master stack. */
463 static void
464 wmem_block_pop_master(wmem_block_allocator_t *allocator)
466 wmem_block_chunk_t *chunk;
467 wmem_block_free_t *free_chunk;
469 chunk = allocator->master_head;
471 free_chunk = WMEM_GET_FREE(chunk);
473 allocator->master_head = free_chunk->next;
474 if (free_chunk->next) {
475 WMEM_GET_FREE(free_chunk->next)->prev = NULL;
479 /* CHUNK HELPERS */
481 /* Takes a free chunk and checks the chunks to its immediate right and left in
482 * the block. If they are also free, the contiguous free chunks are merged into
483 * a single free chunk. The resulting chunk ends up in either the master list or
484 * the recycler, depending on where the merged chunks were originally.
486 static void
487 wmem_block_merge_free(wmem_block_allocator_t *allocator,
488 wmem_block_chunk_t *chunk)
490 wmem_block_chunk_t *tmp;
491 wmem_block_chunk_t *left_free = NULL;
492 wmem_block_chunk_t *right_free = NULL;
494 /* Check the chunk to our right. If it is free, merge it into our current
495 * chunk. If it is big enough to hold a free-header, save it for later (we
496 * need to know about the left chunk before we decide what goes where). */
497 tmp = WMEM_CHUNK_NEXT(chunk);
498 if (tmp && !tmp->used) {
499 if (WMEM_CHUNK_DATA_LEN(tmp) >= sizeof(wmem_block_free_t)) {
500 right_free = tmp;
502 chunk->len += tmp->len;
503 chunk->last = tmp->last;
506 /* Check the chunk to our left. If it is free, merge our current chunk into
507 * it (thus chunk = tmp). As before, save it if it has enough space to
508 * hold a free-header. */
509 tmp = WMEM_CHUNK_PREV(chunk);
510 if (tmp && !tmp->used) {
511 if (WMEM_CHUNK_DATA_LEN(tmp) >= sizeof(wmem_block_free_t)) {
512 left_free = tmp;
514 tmp->len += chunk->len;
515 tmp->last = chunk->last;
516 chunk = tmp;
519 /* The length of our chunk may have changed. If we have a chunk following,
520 * update its 'prev' count. */
521 if (!chunk->last) {
522 WMEM_CHUNK_NEXT(chunk)->prev = chunk->len;
525 /* Now that the chunk headers are merged and consistent, we need to figure
526 * out what goes where in which free list. */
527 if (right_free && right_free == allocator->master_head) {
528 /* If we merged right, and that chunk was the head of the master list,
529 * then we leave the resulting chunk at the head of the master list. */
530 wmem_block_free_t *moved;
531 if (left_free) {
532 wmem_block_remove_from_recycler(allocator, left_free);
534 moved = WMEM_GET_FREE(chunk);
535 moved->prev = NULL;
536 moved->next = WMEM_GET_FREE(right_free)->next;
537 allocator->master_head = chunk;
538 if (moved->next) {
539 WMEM_GET_FREE(moved->next)->prev = chunk;
542 else {
543 /* Otherwise, we remove the right-merged chunk (if there was one) from
544 * the recycler. Then, if we merged left we have nothing to do, since
545 * that recycler entry is still valid. If not, we add the chunk. */
546 if (right_free) {
547 wmem_block_remove_from_recycler(allocator, right_free);
549 if (!left_free) {
550 wmem_block_add_to_recycler(allocator, chunk);
555 /* Takes an unused chunk and a size, and splits it into two chunks if possible.
556 * The first chunk (at the same address as the input chunk) is guaranteed to
557 * hold at least `size` bytes of data, and to not be in either the master or
558 * recycler lists.
560 * The second chunk gets whatever data is left over. It is marked unused and
561 * replaces the input chunk in whichever list it originally inhabited. */
562 static void
563 wmem_block_split_free_chunk(wmem_block_allocator_t *allocator,
564 wmem_block_chunk_t *chunk,
565 const size_t size)
567 wmem_block_chunk_t *extra;
568 wmem_block_free_t *old_blk, *new_blk;
569 size_t aligned_size, available;
570 bool last;
572 aligned_size = WMEM_ALIGN_SIZE(size) + WMEM_CHUNK_HEADER_SIZE;
574 if (WMEM_CHUNK_DATA_LEN(chunk) < aligned_size + sizeof(wmem_block_free_t)) {
575 /* If the available space is not enought to store all of
576 * (hdr + requested size + alignment padding + hdr + free-header) then
577 * just remove the current chunk from the free list and return, since we
578 * can't usefully split it. */
579 if (chunk == allocator->master_head) {
580 wmem_block_pop_master(allocator);
582 else if (WMEM_CHUNK_DATA_LEN(chunk) >= sizeof(wmem_block_free_t)) {
583 wmem_block_remove_from_recycler(allocator, chunk);
585 return;
588 /* preserve a few values from chunk that we'll need to manipulate */
589 last = chunk->last;
590 available = chunk->len - aligned_size;
592 /* set new values for chunk */
593 chunk->len = (uint32_t) aligned_size;
594 chunk->last = false;
596 /* with chunk's values set, we can use the standard macro to calculate
597 * the location and size of the new free chunk */
598 extra = WMEM_CHUNK_NEXT(chunk);
600 /* Now we move the free chunk's address without changing its location
601 * in whichever list it is in.
603 * Note that the new chunk header 'extra' may overlap the old free header,
604 * so we have to copy the free header before we write anything to extra.
606 old_blk = WMEM_GET_FREE(chunk);
607 new_blk = WMEM_GET_FREE(extra);
609 if (allocator->master_head == chunk) {
610 new_blk->prev = old_blk->prev;
611 new_blk->next = old_blk->next;
613 if (old_blk->next) {
614 WMEM_GET_FREE(old_blk->next)->prev = extra;
617 allocator->master_head = extra;
619 else {
620 if (old_blk->prev == chunk) {
621 new_blk->prev = extra;
622 new_blk->next = extra;
624 else {
625 new_blk->prev = old_blk->prev;
626 new_blk->next = old_blk->next;
628 WMEM_GET_FREE(old_blk->prev)->next = extra;
629 WMEM_GET_FREE(old_blk->next)->prev = extra;
632 if (allocator->recycler_head == chunk) {
633 allocator->recycler_head = extra;
637 /* Now that we've copied over the free-list stuff (which may have overlapped
638 * with our new chunk header) we can safely write our new chunk header. */
639 extra->len = (uint32_t) available;
640 extra->last = last;
641 extra->prev = chunk->len;
642 extra->used = false;
643 extra->jumbo = false;
645 /* Correctly update the following chunk's back-pointer */
646 if (!last) {
647 WMEM_CHUNK_NEXT(extra)->prev = extra->len;
651 /* Takes a used chunk and a size, and splits it into two chunks if possible.
652 * The first chunk can hold at least `size` bytes of data, while the second gets
653 * whatever's left over. The second is marked as unused and is added to the
654 * recycler. */
655 static void
656 wmem_block_split_used_chunk(wmem_block_allocator_t *allocator,
657 wmem_block_chunk_t *chunk,
658 const size_t size)
660 wmem_block_chunk_t *extra;
661 size_t aligned_size, available;
662 bool last;
664 aligned_size = WMEM_ALIGN_SIZE(size) + WMEM_CHUNK_HEADER_SIZE;
666 if (aligned_size > WMEM_CHUNK_DATA_LEN(chunk)) {
667 /* in this case we don't have enough space to really split it, so
668 * it's basically a no-op */
669 return;
671 /* otherwise, we have room to split it, though the remaining free chunk
672 * may still not be usefully large */
674 /* preserve a few values from chunk that we'll need to manipulate */
675 last = chunk->last;
676 available = chunk->len - aligned_size;
678 /* set new values for chunk */
679 chunk->len = (uint32_t) aligned_size;
680 chunk->last = false;
682 /* with chunk's values set, we can use the standard macro to calculate
683 * the location and size of the new free chunk */
684 extra = WMEM_CHUNK_NEXT(chunk);
686 /* set the new values for the chunk */
687 extra->len = (uint32_t) available;
688 extra->last = last;
689 extra->prev = chunk->len;
690 extra->used = false;
691 extra->jumbo = false;
693 /* Correctly update the following chunk's back-pointer */
694 if (!last) {
695 WMEM_CHUNK_NEXT(extra)->prev = extra->len;
698 /* Merge it to its right if possible (it can't be merged left, obviously).
699 * This also adds it to the recycler. */
700 wmem_block_merge_free(allocator, extra);
703 /* BLOCK HELPERS */
705 /* Add a block to the allocator's embedded doubly-linked list of OS-level blocks
706 * that it owns. */
707 static void
708 wmem_block_add_to_block_list(wmem_block_allocator_t *allocator,
709 wmem_block_hdr_t *block)
711 block->prev = NULL;
712 block->next = allocator->block_list;
713 if (block->next) {
714 block->next->prev = block;
716 allocator->block_list = block;
719 /* Remove a block from the allocator's embedded doubly-linked list of OS-level
720 * blocks that it owns. */
721 static void
722 wmem_block_remove_from_block_list(wmem_block_allocator_t *allocator,
723 wmem_block_hdr_t *block)
725 if (block->prev) {
726 block->prev->next = block->next;
728 else {
729 allocator->block_list = block->next;
732 if (block->next) {
733 block->next->prev = block->prev;
737 /* Initializes a single unused chunk at the beginning of the block, and
738 * adds that chunk to the free list. */
739 static void
740 wmem_block_init_block(wmem_block_allocator_t *allocator,
741 wmem_block_hdr_t *block)
743 wmem_block_chunk_t *chunk;
745 /* a new block contains one chunk, right at the beginning */
746 chunk = WMEM_BLOCK_TO_CHUNK(block);
748 chunk->used = false;
749 chunk->jumbo = false;
750 chunk->last = true;
751 chunk->prev = 0;
752 chunk->len = WMEM_BLOCK_SIZE - WMEM_BLOCK_HEADER_SIZE;
754 /* now push that chunk onto the master list */
755 wmem_block_push_master(allocator, chunk);
758 /* Creates a new block, and initializes it. */
759 static void
760 wmem_block_new_block(wmem_block_allocator_t *allocator)
762 wmem_block_hdr_t *block;
764 /* allocate the new block and add it to the block list */
765 block = (wmem_block_hdr_t *)wmem_alloc(NULL, WMEM_BLOCK_SIZE);
766 wmem_block_add_to_block_list(allocator, block);
768 /* initialize it */
769 wmem_block_init_block(allocator, block);
772 /* JUMBO ALLOCATIONS */
774 /* Allocates special 'jumbo' blocks for sizes that won't fit normally. */
775 static void *
776 wmem_block_alloc_jumbo(wmem_block_allocator_t *allocator, const size_t size)
778 wmem_block_hdr_t *block;
779 wmem_block_chunk_t *chunk;
781 /* allocate a new block of exactly the right size */
782 block = (wmem_block_hdr_t *) wmem_alloc(NULL, size
783 + WMEM_BLOCK_HEADER_SIZE
784 + WMEM_CHUNK_HEADER_SIZE);
786 /* add it to the block list */
787 wmem_block_add_to_block_list(allocator, block);
789 /* the new block contains a single jumbo chunk */
790 chunk = WMEM_BLOCK_TO_CHUNK(block);
791 chunk->last = true;
792 chunk->used = true;
793 chunk->jumbo = true;
794 chunk->len = 0;
795 chunk->prev = 0;
797 /* and return the data pointer */
798 return WMEM_CHUNK_TO_DATA(chunk);
801 /* Frees special 'jumbo' blocks of sizes that won't fit normally. */
802 static void
803 wmem_block_free_jumbo(wmem_block_allocator_t *allocator,
804 wmem_block_chunk_t *chunk)
806 wmem_block_hdr_t *block;
808 block = WMEM_CHUNK_TO_BLOCK(chunk);
810 wmem_block_remove_from_block_list(allocator, block);
812 wmem_free(NULL, block);
815 /* Reallocs special 'jumbo' blocks of sizes that won't fit normally. */
816 static void *
817 wmem_block_realloc_jumbo(wmem_block_allocator_t *allocator,
818 wmem_block_chunk_t *chunk,
819 const size_t size)
821 wmem_block_hdr_t *block;
823 block = WMEM_CHUNK_TO_BLOCK(chunk);
825 block = (wmem_block_hdr_t *) wmem_realloc(NULL, block, size
826 + WMEM_BLOCK_HEADER_SIZE
827 + WMEM_CHUNK_HEADER_SIZE);
829 if (block->next) {
830 block->next->prev = block;
833 if (block->prev) {
834 block->prev->next = block;
836 else {
837 allocator->block_list = block;
840 return WMEM_CHUNK_TO_DATA(WMEM_BLOCK_TO_CHUNK(block));
843 /* API */
845 static void *
846 wmem_block_alloc(void *private_data, const size_t size)
848 wmem_block_allocator_t *allocator = (wmem_block_allocator_t*) private_data;
849 wmem_block_chunk_t *chunk;
851 if (size > WMEM_BLOCK_MAX_ALLOC_SIZE) {
852 return wmem_block_alloc_jumbo(allocator, size);
855 if (allocator->recycler_head &&
856 WMEM_CHUNK_DATA_LEN(allocator->recycler_head) >= size) {
858 /* If we can serve it from the recycler, do so. */
859 chunk = allocator->recycler_head;
861 else {
862 if (allocator->master_head &&
863 WMEM_CHUNK_DATA_LEN(allocator->master_head) < size) {
865 /* Recycle the head of the master list if necessary. */
866 chunk = allocator->master_head;
867 wmem_block_pop_master(allocator);
868 wmem_block_add_to_recycler(allocator, chunk);
871 if (!allocator->master_head) {
872 /* Allocate a new block if necessary. */
873 wmem_block_new_block(allocator);
876 chunk = allocator->master_head;
879 /* Split our chunk into two to preserve any trailing free space */
880 wmem_block_split_free_chunk(allocator, chunk, size);
882 /* Now cycle the recycler */
883 wmem_block_cycle_recycler(allocator);
885 /* mark it as used */
886 chunk->used = true;
888 /* and return the user's pointer */
889 return WMEM_CHUNK_TO_DATA(chunk);
892 static void
893 wmem_block_free(void *private_data, void *ptr)
895 wmem_block_allocator_t *allocator = (wmem_block_allocator_t*) private_data;
896 wmem_block_chunk_t *chunk;
898 chunk = WMEM_DATA_TO_CHUNK(ptr);
900 if (chunk->jumbo) {
901 wmem_block_free_jumbo(allocator, chunk);
902 return;
905 /* mark it as unused */
906 chunk->used = false;
908 /* merge it with any other free chunks adjacent to it, so that contiguous
909 * free space doesn't get fragmented */
910 wmem_block_merge_free(allocator, chunk);
912 /* Now cycle the recycler */
913 wmem_block_cycle_recycler(allocator);
916 static void *
917 wmem_block_realloc(void *private_data, void *ptr, const size_t size)
919 wmem_block_allocator_t *allocator = (wmem_block_allocator_t*) private_data;
920 wmem_block_chunk_t *chunk;
922 chunk = WMEM_DATA_TO_CHUNK(ptr);
924 if (chunk->jumbo) {
925 return wmem_block_realloc_jumbo(allocator, chunk, size);
928 if (size > WMEM_CHUNK_DATA_LEN(chunk)) {
929 /* grow */
930 wmem_block_chunk_t *tmp;
932 tmp = WMEM_CHUNK_NEXT(chunk);
934 if (tmp && (!tmp->used) &&
935 (size < WMEM_CHUNK_DATA_LEN(chunk) + tmp->len)) {
936 /* the next chunk is free and has enough extra, so just grab
937 * from that */
938 size_t split_size;
940 /* we ask for the next chunk to be split, but we don't end up
941 * using the split chunk header (it just gets merged into this one),
942 * so we want the split to be of (size - curdatalen - header_size).
943 * However, this can underflow by header_size, so we do a quick
944 * check here and floor the value to 0. */
945 split_size = size - WMEM_CHUNK_DATA_LEN(chunk);
947 if (split_size < WMEM_CHUNK_HEADER_SIZE) {
948 split_size = 0;
950 else {
951 split_size -= WMEM_CHUNK_HEADER_SIZE;
954 wmem_block_split_free_chunk(allocator, tmp, split_size);
956 /* Now do a 'quickie' merge between the current block and the left-
957 * hand side of the split. Simply calling wmem_block_merge_free
958 * might confuse things, since we may temporarily have two blocks
959 * to our right that are both free (and it isn't guaranteed to
960 * handle that case). Update our 'next' count and last flag, and
961 * our (new) successor's 'prev' count */
962 chunk->len += tmp->len;
963 chunk->last = tmp->last;
964 tmp = WMEM_CHUNK_NEXT(chunk);
965 if (tmp) {
966 tmp->prev = chunk->len;
969 /* Now cycle the recycler */
970 wmem_block_cycle_recycler(allocator);
972 /* And return the same old pointer */
973 return ptr;
975 else {
976 /* no room to grow, need to alloc, copy, free */
977 void *newptr;
979 newptr = wmem_block_alloc(private_data, size);
980 memcpy(newptr, ptr, WMEM_CHUNK_DATA_LEN(chunk));
981 wmem_block_free(private_data, ptr);
983 /* No need to cycle the recycler, alloc and free both did that
984 * already */
986 return newptr;
989 else if (size < WMEM_CHUNK_DATA_LEN(chunk)) {
990 /* shrink */
991 wmem_block_split_used_chunk(allocator, chunk, size);
993 /* Now cycle the recycler */
994 wmem_block_cycle_recycler(allocator);
996 return ptr;
999 /* no-op */
1000 return ptr;
1003 static void
1004 wmem_block_free_all(void *private_data)
1006 wmem_block_allocator_t *allocator = (wmem_block_allocator_t*) private_data;
1007 wmem_block_hdr_t *cur;
1008 wmem_block_chunk_t *chunk;
1010 /* the existing free lists are entirely irrelevant */
1011 allocator->master_head = NULL;
1012 allocator->recycler_head = NULL;
1014 /* iterate through the blocks, reinitializing each one */
1015 cur = allocator->block_list;
1017 while (cur) {
1018 chunk = WMEM_BLOCK_TO_CHUNK(cur);
1019 if (chunk->jumbo) {
1020 wmem_block_remove_from_block_list(allocator, cur);
1021 cur = cur->next;
1022 wmem_free(NULL, WMEM_CHUNK_TO_BLOCK(chunk));
1024 else {
1025 wmem_block_init_block(allocator, cur);
1026 cur = cur->next;
1031 static void
1032 wmem_block_gc(void *private_data)
1034 wmem_block_allocator_t *allocator = (wmem_block_allocator_t*) private_data;
1035 wmem_block_hdr_t *cur, *next;
1036 wmem_block_chunk_t *chunk;
1037 wmem_block_free_t *free_chunk;
1039 /* Walk through the blocks, adding used blocks to the new list and
1040 * completely destroying unused blocks. */
1041 cur = allocator->block_list;
1042 allocator->block_list = NULL;
1044 while (cur) {
1045 chunk = WMEM_BLOCK_TO_CHUNK(cur);
1046 next = cur->next;
1048 if (!chunk->jumbo && !chunk->used && chunk->last) {
1049 /* If the first chunk is also the last, and is unused, then
1050 * the block as a whole is entirely unused, so return it to
1051 * the OS and remove it from whatever lists it is in. */
1052 free_chunk = WMEM_GET_FREE(chunk);
1053 if (free_chunk->next) {
1054 WMEM_GET_FREE(free_chunk->next)->prev = free_chunk->prev;
1056 if (free_chunk->prev) {
1057 WMEM_GET_FREE(free_chunk->prev)->next = free_chunk->next;
1059 if (allocator->recycler_head == chunk) {
1060 if (free_chunk->next == chunk) {
1061 allocator->recycler_head = NULL;
1063 else {
1064 allocator->recycler_head = free_chunk->next;
1067 else if (allocator->master_head == chunk) {
1068 allocator->master_head = free_chunk->next;
1070 wmem_free(NULL, cur);
1072 else {
1073 /* part of this block is used, so add it to the new block list */
1074 wmem_block_add_to_block_list(allocator, cur);
1077 cur = next;
1081 static void
1082 wmem_block_allocator_cleanup(void *private_data)
1084 /* wmem guarantees that free_all() is called directly before this, so
1085 * calling gc will return all our blocks to the OS automatically */
1086 wmem_block_gc(private_data);
1088 /* then just free the allocator structs */
1089 wmem_free(NULL, private_data);
1092 void
1093 wmem_block_allocator_init(wmem_allocator_t *allocator)
1095 wmem_block_allocator_t *block_allocator;
1097 block_allocator = wmem_new(NULL, wmem_block_allocator_t);
1099 allocator->walloc = &wmem_block_alloc;
1100 allocator->wrealloc = &wmem_block_realloc;
1101 allocator->wfree = &wmem_block_free;
1103 allocator->free_all = &wmem_block_free_all;
1104 allocator->gc = &wmem_block_gc;
1105 allocator->cleanup = &wmem_block_allocator_cleanup;
1107 allocator->private_data = (void*) block_allocator;
1109 block_allocator->block_list = NULL;
1110 block_allocator->master_head = NULL;
1111 block_allocator->recycler_head = NULL;
1115 * Editor modelines - https://www.wireshark.org/tools/modelines.html
1117 * Local variables:
1118 * c-basic-offset: 4
1119 * tab-width: 8
1120 * indent-tabs-mode: nil
1121 * End:
1123 * vi: set shiftwidth=4 tabstop=8 expandtab:
1124 * :indentSize=4:tabSize=8:noTabs=true: