1 /* GLIB sliced memory - fast concurrent memory chunk allocator
2 * Copyright (C) 2005 Tim Janik
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 02111-1307, USA.
23 #if defined HAVE_POSIX_MEMALIGN && defined POSIX_MEMALIGN_WITH_COMPLIANT_ALLOCS
24 # define HAVE_COMPLIANT_POSIX_MEMALIGN 1
27 #ifdef HAVE_COMPLIANT_POSIX_MEMALIGN
28 #define _XOPEN_SOURCE 600 /* posix_memalign() */
30 #include <stdlib.h> /* posix_memalign() */
33 #include "gmem.h" /* gslice.h */
34 #include "gthreadprivate.h"
38 #include <unistd.h> /* sysconf() */
46 /* the GSlice allocator is split up into 4 layers, roughly modelled after the slab
47 * allocator and magazine extensions as outlined in:
48 * + [Bonwick94] Jeff Bonwick, The slab allocator: An object-caching kernel
49 * memory allocator. USENIX 1994, http://citeseer.ist.psu.edu/bonwick94slab.html
50 * + [Bonwick01] Bonwick and Jonathan Adams, Magazines and vmem: Extending the
51 * slab allocator to many cpu's and arbitrary resources.
52 * USENIX 2001, http://citeseer.ist.psu.edu/bonwick01magazines.html
54 * - the thread magazines. for each (aligned) chunk size, a magazine (a list)
55 * of recently freed and soon to be allocated chunks is maintained per thread.
56 * this way, most alloc/free requests can be quickly satisfied from per-thread
57 * free lists which only require one g_private_get() call to retrive the
59 * - the magazine cache. allocating and freeing chunks to/from threads only
60 * occours at magazine sizes from a global depot of magazines. the depot
61 * maintaines a 15 second working set of allocated magazines, so full
62 * magazines are not allocated and released too often.
63 * the chunk size dependent magazine sizes automatically adapt (within limits,
64 * see [3]) to lock contention to properly scale performance across a variety
66 * - the slab allocator. this allocator allocates slabs (blocks of memory) close
67 * to the system page size or multiples thereof which have to be page aligned.
68 * the blocks are divided into smaller chunks which are used to satisfy
69 * allocations from the upper layers. the space provided by the reminder of
70 * the chunk size division is used for cache colorization (random distribution
71 * of chunk addresses) to improve processor cache utilization. multiple slabs
72 * with the same chunk size are kept in a partially sorted ring to allow O(1)
73 * freeing and allocation of chunks (as long as the allocation of an entirely
74 * new slab can be avoided).
75 * - the page allocator. on most modern systems, posix_memalign(3) or
76 * memalign(3) should be available, so this is used to allocate blocks with
77 * system page size based alignments and sizes or multiples thereof.
78 * if no memalign variant is provided, valloc() is used instead and
79 * block sizes are limited to the system page size (no multiples thereof).
80 * as a fallback, on system without even valloc(), a malloc(3)-based page
81 * allocator with alloc-only behaviour is used.
84 * [1] some systems memalign(3) implementations may rely on boundary tagging for
85 * the handed out memory chunks. to avoid excessive page-wise fragmentation,
86 * we reserve 2 * sizeof (void*) per block size for the systems memalign(3),
87 * specified in NATIVE_MALLOC_PADDING.
88 * [2] using the slab allocator alone already provides for a fast and efficient
89 * allocator, it doesn't properly scale beyond single-threaded uses though.
90 * also, the slab allocator implements eager free(3)-ing, i.e. does not
91 * provide any form of caching or working set maintenance. so if used alone,
92 * it's vulnerable to trashing for sequences of balanced (alloc, free) pairs
93 * at certain thresholds.
94 * [3] magazine sizes are bound by an implementation specific minimum size and
95 * a chunk size specific maximum to limit magazine storage sizes to roughly
97 * [4] allocating ca. 8 chunks per block/page keeps a good balance between
98 * external and internal fragmentation (<= 12.5%). [Bonwick94]
101 /* --- macros and constants --- */
102 #define LARGEALIGNMENT (256)
103 #define P2ALIGNMENT (2 * sizeof (gsize)) /* fits 2 pointers (assumed to be 2 * GLIB_SIZEOF_SIZE_T below) */
104 #define ALIGN(size, base) ((base) * (gsize) (((size) + (base) - 1) / (base)))
105 #define NATIVE_MALLOC_PADDING P2ALIGNMENT /* per-page padding left for native malloc(3) see [1] */
106 #define SLAB_INFO_SIZE P2ALIGN (sizeof (SlabInfo) + NATIVE_MALLOC_PADDING)
107 #define MAX_MAGAZINE_SIZE (256) /* see [3] and allocator_get_magazine_threshold() for this */
108 #define MIN_MAGAZINE_SIZE (4)
109 #define MAX_STAMP_COUNTER (7) /* distributes the load of gettimeofday() */
110 #define MAX_SLAB_CHUNK_SIZE(al) (((al)->max_page_size - SLAB_INFO_SIZE) / 8) /* we want at last 8 chunks per page, see [4] */
111 #define MAX_SLAB_INDEX(al) (SLAB_INDEX (al, MAX_SLAB_CHUNK_SIZE (al)) + 1)
112 #define SLAB_INDEX(al, asize) ((asize) / P2ALIGNMENT - 1) /* asize must be P2ALIGNMENT aligned */
113 #define SLAB_CHUNK_SIZE(al, ix) (((ix) + 1) * P2ALIGNMENT)
114 #define SLAB_BPAGE_SIZE(al,csz) (8 * (csz) + SLAB_INFO_SIZE)
116 /* optimized version of ALIGN (size, P2ALIGNMENT) */
117 #if GLIB_SIZEOF_SIZE_T * 2 == 8 /* P2ALIGNMENT */
118 #define P2ALIGN(size) (((size) + 0x7) & ~(gsize) 0x7)
119 #elif GLIB_SIZEOF_SIZE_T * 2 == 16 /* P2ALIGNMENT */
120 #define P2ALIGN(size) (((size) + 0xf) & ~(gsize) 0xf)
122 #define P2ALIGN(size) ALIGN (size, P2ALIGNMENT)
125 /* special helpers to avoid gmessage.c dependency */
126 static void mem_error (const char *format
, ...) G_GNUC_PRINTF (1,2);
127 #define mem_assert(cond) do { if (G_LIKELY (cond)) ; else mem_error ("assertion failed: %s", #cond); } while (0)
129 /* --- structures --- */
130 typedef struct _ChunkLink ChunkLink
;
131 typedef struct _SlabInfo SlabInfo
;
132 typedef struct _CachedMagazine CachedMagazine
;
140 SlabInfo
*next
, *prev
;
144 gsize count
; /* approximative chunks list length */
147 Magazine
*magazine1
; /* array of MAX_SLAB_INDEX (allocator) */
148 Magazine
*magazine2
; /* array of MAX_SLAB_INDEX (allocator) */
151 gboolean always_malloc
;
152 gboolean bypass_magazines
;
153 gsize working_set_msecs
;
154 guint color_increment
;
157 /* const after initialization */
158 gsize min_page_size
, max_page_size
;
160 gsize max_slab_chunk_size_for_magazine_cache
;
162 GMutex
*magazine_mutex
;
163 ChunkLink
**magazines
; /* array of MAX_SLAB_INDEX (allocator) */
164 guint
*contention_counters
; /* array of MAX_SLAB_INDEX (allocator) */
170 SlabInfo
**slab_stack
; /* array of MAX_SLAB_INDEX (allocator) */
174 /* --- prototypes --- */
175 static gpointer
slab_allocator_alloc_chunk (gsize chunk_size
);
176 static void slab_allocator_free_chunk (gsize chunk_size
,
178 static void private_thread_memory_cleanup (gpointer data
);
179 static gpointer
allocator_memalign (gsize alignment
,
181 static void allocator_memfree (gsize memsize
,
183 static inline void magazine_cache_update_stamp (void);
184 static inline gsize
allocator_get_magazine_threshold (Allocator
*allocator
,
187 /* --- variables --- */
188 static GPrivate
*private_thread_memory
= NULL
;
189 static gsize sys_page_size
= 0;
190 static Allocator allocator
[1] = { { 0, }, };
191 static SliceConfig slice_config
= {
192 FALSE
, /* always_malloc */
193 FALSE
, /* bypass_magazines */
194 15 * 1000, /* working_set_msecs */
195 1, /* color increment, alt: 0x7fffffff */
198 /* --- auxillary funcitons --- */
200 g_slice_set_config (GSliceConfig ckey
,
203 g_return_if_fail (sys_page_size
== 0);
206 case G_SLICE_CONFIG_ALWAYS_MALLOC
:
207 slice_config
.always_malloc
= value
!= 0;
209 case G_SLICE_CONFIG_BYPASS_MAGAZINES
:
210 slice_config
.bypass_magazines
= value
!= 0;
212 case G_SLICE_CONFIG_WORKING_SET_MSECS
:
213 slice_config
.working_set_msecs
= value
;
215 case G_SLICE_CONFIG_COLOR_INCREMENT
:
216 slice_config
.color_increment
= value
;
222 g_slice_get_config (GSliceConfig ckey
)
226 case G_SLICE_CONFIG_ALWAYS_MALLOC
:
227 return slice_config
.always_malloc
;
228 case G_SLICE_CONFIG_BYPASS_MAGAZINES
:
229 return slice_config
.bypass_magazines
;
230 case G_SLICE_CONFIG_WORKING_SET_MSECS
:
231 return slice_config
.working_set_msecs
;
232 case G_SLICE_CONFIG_CHUNK_SIZES
:
233 return MAX_SLAB_INDEX (allocator
);
234 case G_SLICE_CONFIG_COLOR_INCREMENT
:
235 return slice_config
.color_increment
;
242 g_slice_get_config_state (GSliceConfig ckey
,
247 g_return_val_if_fail (n_values
!= NULL
, NULL
);
252 case G_SLICE_CONFIG_CONTENTION_COUNTER
:
253 array
[i
++] = SLAB_CHUNK_SIZE (allocator
, address
);
254 array
[i
++] = allocator
->contention_counters
[address
];
255 array
[i
++] = allocator_get_magazine_threshold (allocator
, address
);
257 return g_memdup (array
, sizeof (array
[0]) * *n_values
);
264 slice_config_init (SliceConfig
*config
)
266 /* don't use g_malloc/g_message here */
268 const gchar
*val
= _g_getenv_nomalloc ("G_SLICE", buffer
);
269 static const GDebugKey keys
[] = {
270 { "always-malloc", 1 << 0 },
272 gint flags
= !val
? 0 : g_parse_debug_string (val
, keys
, G_N_ELEMENTS (keys
));
273 *config
= slice_config
;
274 if (flags
& (1 << 0)) /* always-malloc */
276 config
->always_malloc
= TRUE
;
281 g_slice_init_nomessage (void)
283 /* we may not use g_error() or friends here */
284 mem_assert (sys_page_size
== 0);
285 mem_assert (MIN_MAGAZINE_SIZE
>= 4);
289 SYSTEM_INFO system_info
;
290 GetSystemInfo (&system_info
);
291 sys_page_size
= system_info
.dwPageSize
;
294 sys_page_size
= sysconf (_SC_PAGESIZE
); /* = sysconf (_SC_PAGE_SIZE); = getpagesize(); */
296 mem_assert (sys_page_size
>= 2 * LARGEALIGNMENT
);
297 mem_assert ((sys_page_size
& (sys_page_size
- 1)) == 0);
298 slice_config_init (&allocator
->config
);
299 allocator
->min_page_size
= sys_page_size
;
300 #if HAVE_COMPLIANT_POSIX_MEMALIGN || HAVE_MEMALIGN
301 /* allow allocation of pages up to 8KB (with 8KB alignment).
302 * this is useful because many medium to large sized structures
303 * fit less than 8 times (see [4]) into 4KB pages.
304 * we allow very small page sizes here, to reduce wastage in
305 * threads if only small allocations are required (this does
306 * bear the risk of incresing allocation times and fragmentation
309 allocator
->min_page_size
= MAX (allocator
->min_page_size
, 4096);
310 allocator
->max_page_size
= MAX (allocator
->min_page_size
, 8192);
311 allocator
->min_page_size
= MIN (allocator
->min_page_size
, 128);
313 /* we can only align to system page size */
314 allocator
->max_page_size
= sys_page_size
;
316 allocator
->magazine_mutex
= NULL
; /* _g_slice_thread_init_nomessage() */
317 allocator
->magazines
= g_new0 (ChunkLink
*, MAX_SLAB_INDEX (allocator
));
318 allocator
->contention_counters
= g_new0 (guint
, MAX_SLAB_INDEX (allocator
));
319 allocator
->mutex_counter
= 0;
320 allocator
->stamp_counter
= MAX_STAMP_COUNTER
; /* force initial update */
321 allocator
->last_stamp
= 0;
322 allocator
->slab_mutex
= NULL
; /* _g_slice_thread_init_nomessage() */
323 allocator
->slab_stack
= g_new0 (SlabInfo
*, MAX_SLAB_INDEX (allocator
));
324 allocator
->color_accu
= 0;
325 magazine_cache_update_stamp();
326 /* values cached for performance reasons */
327 allocator
->max_slab_chunk_size_for_magazine_cache
= MAX_SLAB_CHUNK_SIZE (allocator
);
328 if (allocator
->config
.always_malloc
|| allocator
->config
.bypass_magazines
)
329 allocator
->max_slab_chunk_size_for_magazine_cache
= 0; /* non-optimized cases */
330 /* at this point, g_mem_gc_friendly() should be initialized, this
331 * should have been accomplished by the above g_malloc/g_new calls
336 allocator_categorize (gsize aligned_chunk_size
)
338 /* speed up the likely path */
339 if (G_LIKELY (aligned_chunk_size
&& aligned_chunk_size
<= allocator
->max_slab_chunk_size_for_magazine_cache
))
340 return 1; /* use magazine cache */
342 /* the above will fail (max_slab_chunk_size_for_magazine_cache == 0) if the
343 * allocator is still uninitialized, or if we are not configured to use the
347 g_slice_init_nomessage ();
348 if (!allocator
->config
.always_malloc
&&
349 aligned_chunk_size
&&
350 aligned_chunk_size
<= MAX_SLAB_CHUNK_SIZE (allocator
))
352 if (allocator
->config
.bypass_magazines
)
353 return 2; /* use slab allocator, see [2] */
354 return 1; /* use magazine cache */
356 return 0; /* use malloc() */
360 _g_slice_thread_init_nomessage (void)
362 /* we may not use g_error() or friends here */
364 g_slice_init_nomessage();
365 private_thread_memory
= g_private_new (private_thread_memory_cleanup
);
366 allocator
->magazine_mutex
= g_mutex_new();
367 allocator
->slab_mutex
= g_mutex_new();
371 g_mutex_lock_a (GMutex
*mutex
,
372 guint
*contention_counter
)
374 gboolean contention
= FALSE
;
375 if (!g_mutex_trylock (mutex
))
377 g_mutex_lock (mutex
);
382 allocator
->mutex_counter
++;
383 if (allocator
->mutex_counter
>= 1) /* quickly adapt to contention */
385 allocator
->mutex_counter
= 0;
386 *contention_counter
= MIN (*contention_counter
+ 1, MAX_MAGAZINE_SIZE
);
389 else /* !contention */
391 allocator
->mutex_counter
--;
392 if (allocator
->mutex_counter
< -11) /* moderately recover magazine sizes */
394 allocator
->mutex_counter
= 0;
395 *contention_counter
= MAX (*contention_counter
, 1) - 1;
400 static inline ThreadMemory
*
401 thread_memory_from_self (void)
403 ThreadMemory
*tmem
= g_private_get (private_thread_memory
);
404 if (G_UNLIKELY (!tmem
))
406 const guint n_magazines
= MAX_SLAB_INDEX (allocator
);
407 tmem
= g_malloc0 (sizeof (ThreadMemory
) + sizeof (Magazine
) * 2 * n_magazines
);
408 tmem
->magazine1
= (Magazine
*) (tmem
+ 1);
409 tmem
->magazine2
= &tmem
->magazine1
[n_magazines
];
410 g_private_set (private_thread_memory
, tmem
);
415 static inline ChunkLink
*
416 magazine_chain_pop_head (ChunkLink
**magazine_chunks
)
418 /* magazine chains are linked via ChunkLink->next.
419 * each ChunkLink->data of the toplevel chain may point to a subchain,
420 * linked via ChunkLink->next. ChunkLink->data of the subchains just
421 * contains uninitialized junk.
423 ChunkLink
*chunk
= (*magazine_chunks
)->data
;
424 if (G_UNLIKELY (chunk
))
426 /* allocating from freed list */
427 (*magazine_chunks
)->data
= chunk
->next
;
431 chunk
= *magazine_chunks
;
432 *magazine_chunks
= chunk
->next
;
437 #if 0 /* useful for debugging */
439 magazine_count (ChunkLink
*head
)
446 ChunkLink
*child
= head
->data
;
448 for (child
= head
->data
; child
; child
= child
->next
)
457 allocator_get_magazine_threshold (Allocator
*allocator
,
460 /* the magazine size calculated here has a lower bound of MIN_MAGAZINE_SIZE,
461 * which is required by the implementation. also, for moderately sized chunks
462 * (say >= 64 bytes), magazine sizes shouldn't be much smaller then the number
463 * of chunks available per page/2 to avoid excessive traffic in the magazine
464 * cache for small to medium sized structures.
465 * the upper bound of the magazine size is effectively provided by
466 * MAX_MAGAZINE_SIZE. for larger chunks, this number is scaled down so that
467 * the content of a single magazine doesn't exceed ca. 16KB.
469 gsize chunk_size
= SLAB_CHUNK_SIZE (allocator
, ix
);
470 guint threshold
= MAX (MIN_MAGAZINE_SIZE
, allocator
->max_page_size
/ MAX (5 * chunk_size
, 5 * 32));
471 guint contention_counter
= allocator
->contention_counters
[ix
];
472 if (G_UNLIKELY (contention_counter
)) /* single CPU bias */
474 /* adapt contention counter thresholds to chunk sizes */
475 contention_counter
= contention_counter
* 64 / chunk_size
;
476 threshold
= MAX (threshold
, contention_counter
);
481 /* --- magazine cache --- */
483 magazine_cache_update_stamp (void)
485 if (allocator
->stamp_counter
>= MAX_STAMP_COUNTER
)
488 g_get_current_time (&tv
);
489 allocator
->last_stamp
= tv
.tv_sec
* 1000 + tv
.tv_usec
/ 1000; /* milli seconds */
490 allocator
->stamp_counter
= 0;
493 allocator
->stamp_counter
++;
496 static inline ChunkLink
*
497 magazine_chain_prepare_fields (ChunkLink
*magazine_chunks
)
503 /* checked upon initialization: mem_assert (MIN_MAGAZINE_SIZE >= 4); */
504 /* ensure a magazine with at least 4 unused data pointers */
505 chunk1
= magazine_chain_pop_head (&magazine_chunks
);
506 chunk2
= magazine_chain_pop_head (&magazine_chunks
);
507 chunk3
= magazine_chain_pop_head (&magazine_chunks
);
508 chunk4
= magazine_chain_pop_head (&magazine_chunks
);
509 chunk4
->next
= magazine_chunks
;
510 chunk3
->next
= chunk4
;
511 chunk2
->next
= chunk3
;
512 chunk1
->next
= chunk2
;
516 /* access the first 3 fields of a specially prepared magazine chain */
517 #define magazine_chain_prev(mc) ((mc)->data)
518 #define magazine_chain_stamp(mc) ((mc)->next->data)
519 #define magazine_chain_uint_stamp(mc) GPOINTER_TO_UINT ((mc)->next->data)
520 #define magazine_chain_next(mc) ((mc)->next->next->data)
521 #define magazine_chain_count(mc) ((mc)->next->next->next->data)
524 magazine_cache_trim (Allocator
*allocator
,
528 /* g_mutex_lock (allocator->mutex); done by caller */
529 /* trim magazine cache from tail */
530 ChunkLink
*current
= magazine_chain_prev (allocator
->magazines
[ix
]);
531 ChunkLink
*trash
= NULL
;
532 while (ABS (stamp
- magazine_chain_uint_stamp (current
)) >= allocator
->config
.working_set_msecs
)
535 ChunkLink
*prev
= magazine_chain_prev (current
);
536 ChunkLink
*next
= magazine_chain_next (current
);
537 magazine_chain_next (prev
) = next
;
538 magazine_chain_prev (next
) = prev
;
539 /* clear special fields, put on trash stack */
540 magazine_chain_next (current
) = NULL
;
541 magazine_chain_count (current
) = NULL
;
542 magazine_chain_stamp (current
) = NULL
;
543 magazine_chain_prev (current
) = trash
;
545 /* fixup list head if required */
546 if (current
== allocator
->magazines
[ix
])
548 allocator
->magazines
[ix
] = NULL
;
553 g_mutex_unlock (allocator
->magazine_mutex
);
557 const gsize chunk_size
= SLAB_CHUNK_SIZE (allocator
, ix
);
558 g_mutex_lock (allocator
->slab_mutex
);
562 trash
= magazine_chain_prev (current
);
563 magazine_chain_prev (current
) = NULL
; /* clear special field */
566 ChunkLink
*chunk
= magazine_chain_pop_head (¤t
);
567 slab_allocator_free_chunk (chunk_size
, chunk
);
570 g_mutex_unlock (allocator
->slab_mutex
);
575 magazine_cache_push_magazine (guint ix
,
576 ChunkLink
*magazine_chunks
,
577 gsize count
) /* must be >= MIN_MAGAZINE_SIZE */
579 ChunkLink
*current
= magazine_chain_prepare_fields (magazine_chunks
);
580 ChunkLink
*next
, *prev
;
581 g_mutex_lock (allocator
->magazine_mutex
);
582 /* add magazine at head */
583 next
= allocator
->magazines
[ix
];
585 prev
= magazine_chain_prev (next
);
587 next
= prev
= current
;
588 magazine_chain_next (prev
) = current
;
589 magazine_chain_prev (next
) = current
;
590 magazine_chain_prev (current
) = prev
;
591 magazine_chain_next (current
) = next
;
592 magazine_chain_count (current
) = (gpointer
) count
;
594 magazine_cache_update_stamp();
595 magazine_chain_stamp (current
) = GUINT_TO_POINTER (allocator
->last_stamp
);
596 allocator
->magazines
[ix
] = current
;
597 /* free old magazines beyond a certain threshold */
598 magazine_cache_trim (allocator
, ix
, allocator
->last_stamp
);
599 /* g_mutex_unlock (allocator->mutex); was done by magazine_cache_trim() */
603 magazine_cache_pop_magazine (guint ix
,
606 g_mutex_lock_a (allocator
->magazine_mutex
, &allocator
->contention_counters
[ix
]);
607 if (!allocator
->magazines
[ix
])
609 guint magazine_threshold
= allocator_get_magazine_threshold (allocator
, ix
);
610 gsize i
, chunk_size
= SLAB_CHUNK_SIZE (allocator
, ix
);
611 ChunkLink
*chunk
, *head
;
612 g_mutex_unlock (allocator
->magazine_mutex
);
613 g_mutex_lock (allocator
->slab_mutex
);
614 head
= slab_allocator_alloc_chunk (chunk_size
);
617 for (i
= 1; i
< magazine_threshold
; i
++)
619 chunk
->next
= slab_allocator_alloc_chunk (chunk_size
);
624 g_mutex_unlock (allocator
->slab_mutex
);
630 ChunkLink
*current
= allocator
->magazines
[ix
];
631 ChunkLink
*prev
= magazine_chain_prev (current
);
632 ChunkLink
*next
= magazine_chain_next (current
);
634 magazine_chain_next (prev
) = next
;
635 magazine_chain_prev (next
) = prev
;
636 allocator
->magazines
[ix
] = next
== current
? NULL
: next
;
637 g_mutex_unlock (allocator
->magazine_mutex
);
638 /* clear special fields and hand out */
639 *countp
= (gsize
) magazine_chain_count (current
);
640 magazine_chain_prev (current
) = NULL
;
641 magazine_chain_next (current
) = NULL
;
642 magazine_chain_count (current
) = NULL
;
643 magazine_chain_stamp (current
) = NULL
;
648 /* --- thread magazines --- */
650 private_thread_memory_cleanup (gpointer data
)
652 ThreadMemory
*tmem
= data
;
653 const guint n_magazines
= MAX_SLAB_INDEX (allocator
);
655 for (ix
= 0; ix
< n_magazines
; ix
++)
659 mags
[0] = &tmem
->magazine1
[ix
];
660 mags
[1] = &tmem
->magazine2
[ix
];
661 for (j
= 0; j
< 2; j
++)
663 Magazine
*mag
= mags
[j
];
664 if (mag
->count
>= MIN_MAGAZINE_SIZE
)
665 magazine_cache_push_magazine (ix
, mag
->chunks
, mag
->count
);
668 const gsize chunk_size
= SLAB_CHUNK_SIZE (allocator
, ix
);
669 g_mutex_lock (allocator
->slab_mutex
);
672 ChunkLink
*chunk
= magazine_chain_pop_head (&mag
->chunks
);
673 slab_allocator_free_chunk (chunk_size
, chunk
);
675 g_mutex_unlock (allocator
->slab_mutex
);
683 thread_memory_magazine1_reload (ThreadMemory
*tmem
,
686 Magazine
*mag
= &tmem
->magazine1
[ix
];
687 mem_assert (mag
->chunks
== NULL
); /* ensure that we may reset mag->count */
689 mag
->chunks
= magazine_cache_pop_magazine (ix
, &mag
->count
);
693 thread_memory_magazine2_unload (ThreadMemory
*tmem
,
696 Magazine
*mag
= &tmem
->magazine2
[ix
];
697 magazine_cache_push_magazine (ix
, mag
->chunks
, mag
->count
);
703 thread_memory_swap_magazines (ThreadMemory
*tmem
,
706 Magazine xmag
= tmem
->magazine1
[ix
];
707 tmem
->magazine1
[ix
] = tmem
->magazine2
[ix
];
708 tmem
->magazine2
[ix
] = xmag
;
711 static inline gboolean
712 thread_memory_magazine1_is_empty (ThreadMemory
*tmem
,
715 return tmem
->magazine1
[ix
].chunks
== NULL
;
718 static inline gboolean
719 thread_memory_magazine2_is_full (ThreadMemory
*tmem
,
722 return tmem
->magazine2
[ix
].count
>= allocator_get_magazine_threshold (allocator
, ix
);
725 static inline gpointer
726 thread_memory_magazine1_alloc (ThreadMemory
*tmem
,
729 Magazine
*mag
= &tmem
->magazine1
[ix
];
730 ChunkLink
*chunk
= magazine_chain_pop_head (&mag
->chunks
);
731 if (G_LIKELY (mag
->count
> 0))
737 thread_memory_magazine2_free (ThreadMemory
*tmem
,
741 Magazine
*mag
= &tmem
->magazine2
[ix
];
742 ChunkLink
*chunk
= mem
;
744 chunk
->next
= mag
->chunks
;
749 /* --- API functions --- */
751 g_slice_alloc (gsize mem_size
)
756 chunk_size
= P2ALIGN (mem_size
);
757 acat
= allocator_categorize (chunk_size
);
758 if (G_LIKELY (acat
== 1)) /* allocate through magazine layer */
760 ThreadMemory
*tmem
= thread_memory_from_self();
761 guint ix
= SLAB_INDEX (allocator
, chunk_size
);
762 if (G_UNLIKELY (thread_memory_magazine1_is_empty (tmem
, ix
)))
764 thread_memory_swap_magazines (tmem
, ix
);
765 if (G_UNLIKELY (thread_memory_magazine1_is_empty (tmem
, ix
)))
766 thread_memory_magazine1_reload (tmem
, ix
);
768 mem
= thread_memory_magazine1_alloc (tmem
, ix
);
770 else if (acat
== 2) /* allocate through slab allocator */
772 g_mutex_lock (allocator
->slab_mutex
);
773 mem
= slab_allocator_alloc_chunk (chunk_size
);
774 g_mutex_unlock (allocator
->slab_mutex
);
776 else /* delegate to system malloc */
777 mem
= g_malloc (mem_size
);
782 g_slice_alloc0 (gsize mem_size
)
784 gpointer mem
= g_slice_alloc (mem_size
);
786 memset (mem
, 0, mem_size
);
791 g_slice_free1 (gsize mem_size
,
794 gsize chunk_size
= P2ALIGN (mem_size
);
795 guint acat
= allocator_categorize (chunk_size
);
796 if (G_UNLIKELY (!mem_block
))
798 if (G_LIKELY (acat
== 1)) /* allocate through magazine layer */
800 ThreadMemory
*tmem
= thread_memory_from_self();
801 guint ix
= SLAB_INDEX (allocator
, chunk_size
);
802 if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem
, ix
)))
804 thread_memory_swap_magazines (tmem
, ix
);
805 if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem
, ix
)))
806 thread_memory_magazine2_unload (tmem
, ix
);
808 if (G_UNLIKELY (g_mem_gc_friendly
))
809 memset (mem_block
, 0, chunk_size
);
810 thread_memory_magazine2_free (tmem
, ix
, mem_block
);
812 else if (acat
== 2) /* allocate through slab allocator */
814 if (G_UNLIKELY (g_mem_gc_friendly
))
815 memset (mem_block
, 0, chunk_size
);
816 g_mutex_lock (allocator
->slab_mutex
);
817 slab_allocator_free_chunk (chunk_size
, mem_block
);
818 g_mutex_unlock (allocator
->slab_mutex
);
820 else /* delegate to system malloc */
822 if (G_UNLIKELY (g_mem_gc_friendly
))
823 memset (mem_block
, 0, mem_size
);
829 g_slice_free_chain_with_offset (gsize mem_size
,
833 gpointer slice
= mem_chain
;
834 /* while the thread magazines and the magazine cache are implemented so that
835 * they can easily be extended to allow for free lists containing more free
836 * lists for the first level nodes, which would allow O(1) freeing in this
837 * function, the benefit of such an extension is questionable, because:
838 * - the magazine size counts will become mere lower bounds which confuses
839 * the code adapting to lock contention;
840 * - freeing a single node to the thread magazines is very fast, so this
841 * O(list_length) operation is multiplied by a fairly small factor;
842 * - memory usage histograms on larger applications seem to indicate that
843 * the amount of released multi node lists is negligible in comparison
844 * to single node releases.
845 * - the major performance bottle neck, namely g_private_get() or
846 * g_mutex_lock()/g_mutex_unlock() has already been moved out of the
847 * inner loop for freeing chained slices.
849 gsize chunk_size
= P2ALIGN (mem_size
);
850 guint acat
= allocator_categorize (chunk_size
);
851 if (G_LIKELY (acat
== 1)) /* allocate through magazine layer */
853 ThreadMemory
*tmem
= thread_memory_from_self();
854 guint ix
= SLAB_INDEX (allocator
, chunk_size
);
857 guint8
*current
= slice
;
858 slice
= *(gpointer
*) (current
+ next_offset
);
859 if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem
, ix
)))
861 thread_memory_swap_magazines (tmem
, ix
);
862 if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem
, ix
)))
863 thread_memory_magazine2_unload (tmem
, ix
);
865 if (G_UNLIKELY (g_mem_gc_friendly
))
866 memset (current
, 0, chunk_size
);
867 thread_memory_magazine2_free (tmem
, ix
, current
);
870 else if (acat
== 2) /* allocate through slab allocator */
872 g_mutex_lock (allocator
->slab_mutex
);
875 guint8
*current
= slice
;
876 slice
= *(gpointer
*) (current
+ next_offset
);
877 if (G_UNLIKELY (g_mem_gc_friendly
))
878 memset (current
, 0, chunk_size
);
879 slab_allocator_free_chunk (chunk_size
, current
);
881 g_mutex_unlock (allocator
->slab_mutex
);
883 else /* delegate to system malloc */
886 guint8
*current
= slice
;
887 slice
= *(gpointer
*) (current
+ next_offset
);
888 if (G_UNLIKELY (g_mem_gc_friendly
))
889 memset (current
, 0, mem_size
);
894 /* --- single page allocator --- */
896 allocator_slab_stack_push (Allocator
*allocator
,
900 /* insert slab at slab ring head */
901 if (!allocator
->slab_stack
[ix
])
908 SlabInfo
*next
= allocator
->slab_stack
[ix
], *prev
= next
->prev
;
914 allocator
->slab_stack
[ix
] = sinfo
;
918 allocator_aligned_page_size (Allocator
*allocator
,
921 gsize val
= 1 << g_bit_storage (n_bytes
- 1);
922 val
= MAX (val
, allocator
->min_page_size
);
927 allocator_add_slab (Allocator
*allocator
,
933 gsize addr
, padding
, n_chunks
, color
= 0;
934 gsize page_size
= allocator_aligned_page_size (allocator
, SLAB_BPAGE_SIZE (allocator
, chunk_size
));
935 /* allocate 1 page for the chunks and the slab */
936 gpointer aligned_memory
= allocator_memalign (page_size
, page_size
- NATIVE_MALLOC_PADDING
);
937 guint8
*mem
= aligned_memory
;
941 const gchar
*syserr
= "unknown error";
943 syserr
= strerror (errno
);
945 mem_error ("failed to allocate %u bytes (alignment: %u): %s\n",
946 (guint
) (page_size
- NATIVE_MALLOC_PADDING
), (guint
) page_size
, syserr
);
948 /* mask page adress */
949 addr
= ((gsize
) mem
/ page_size
) * page_size
;
950 /* assert alignment */
951 mem_assert (aligned_memory
== (gpointer
) addr
);
952 /* basic slab info setup */
953 sinfo
= (SlabInfo
*) (mem
+ page_size
- SLAB_INFO_SIZE
);
954 sinfo
->n_allocated
= 0;
955 sinfo
->chunks
= NULL
;
956 /* figure cache colorization */
957 n_chunks
= ((guint8
*) sinfo
- mem
) / chunk_size
;
958 padding
= ((guint8
*) sinfo
- mem
) - n_chunks
* chunk_size
;
961 color
= (allocator
->color_accu
* P2ALIGNMENT
) % padding
;
962 allocator
->color_accu
+= allocator
->config
.color_increment
;
964 /* add chunks to free list */
965 chunk
= (ChunkLink
*) (mem
+ color
);
966 sinfo
->chunks
= chunk
;
967 for (i
= 0; i
< n_chunks
- 1; i
++)
969 chunk
->next
= (ChunkLink
*) ((guint8
*) chunk
+ chunk_size
);
972 chunk
->next
= NULL
; /* last chunk */
973 /* add slab to slab ring */
974 allocator_slab_stack_push (allocator
, ix
, sinfo
);
978 slab_allocator_alloc_chunk (gsize chunk_size
)
981 guint ix
= SLAB_INDEX (allocator
, chunk_size
);
982 /* ensure non-empty slab */
983 if (!allocator
->slab_stack
[ix
] || !allocator
->slab_stack
[ix
]->chunks
)
984 allocator_add_slab (allocator
, ix
, chunk_size
);
986 chunk
= allocator
->slab_stack
[ix
]->chunks
;
987 allocator
->slab_stack
[ix
]->chunks
= chunk
->next
;
988 allocator
->slab_stack
[ix
]->n_allocated
++;
989 /* rotate empty slabs */
990 if (!allocator
->slab_stack
[ix
]->chunks
)
991 allocator
->slab_stack
[ix
] = allocator
->slab_stack
[ix
]->next
;
996 slab_allocator_free_chunk (gsize chunk_size
,
1001 guint ix
= SLAB_INDEX (allocator
, chunk_size
);
1002 gsize page_size
= allocator_aligned_page_size (allocator
, SLAB_BPAGE_SIZE (allocator
, chunk_size
));
1003 gsize addr
= ((gsize
) mem
/ page_size
) * page_size
;
1004 /* mask page adress */
1005 guint8
*page
= (guint8
*) addr
;
1006 SlabInfo
*sinfo
= (SlabInfo
*) (page
+ page_size
- SLAB_INFO_SIZE
);
1007 /* assert valid chunk count */
1008 mem_assert (sinfo
->n_allocated
> 0);
1009 /* add chunk to free list */
1010 was_empty
= sinfo
->chunks
== NULL
;
1011 chunk
= (ChunkLink
*) mem
;
1012 chunk
->next
= sinfo
->chunks
;
1013 sinfo
->chunks
= chunk
;
1014 sinfo
->n_allocated
--;
1015 /* keep slab ring partially sorted, empty slabs at end */
1019 SlabInfo
*next
= sinfo
->next
, *prev
= sinfo
->prev
;
1022 if (allocator
->slab_stack
[ix
] == sinfo
)
1023 allocator
->slab_stack
[ix
] = next
== sinfo
? NULL
: next
;
1024 /* insert slab at head */
1025 allocator_slab_stack_push (allocator
, ix
, sinfo
);
1027 /* eagerly free complete unused slabs */
1028 if (!sinfo
->n_allocated
)
1031 SlabInfo
*next
= sinfo
->next
, *prev
= sinfo
->prev
;
1034 if (allocator
->slab_stack
[ix
] == sinfo
)
1035 allocator
->slab_stack
[ix
] = next
== sinfo
? NULL
: next
;
1037 allocator_memfree (page_size
, page
);
1041 /* --- memalign implementation --- */
1042 #ifdef HAVE_MALLOC_H
1043 #include <malloc.h> /* memalign() */
1047 * define HAVE_POSIX_MEMALIGN 1 // if free(posix_memalign(3)) works, <stdlib.h>
1048 * define HAVE_COMPLIANT_POSIX_MEMALIGN 1 // if free(posix_memalign(3)) works for sizes != 2^n, <stdlib.h>
1049 * define HAVE_MEMALIGN 1 // if free(memalign(3)) works, <malloc.h>
1050 * define HAVE_VALLOC 1 // if free(valloc(3)) works, <stdlib.h> or <malloc.h>
1051 * if none is provided, we implement malloc(3)-based alloc-only page alignment
1054 #if !(HAVE_COMPLIANT_POSIX_MEMALIGN || HAVE_MEMALIGN || HAVE_VALLOC)
1055 static GTrashStack
*compat_valloc_trash
= NULL
;
1059 allocator_memalign (gsize alignment
,
1062 gpointer aligned_memory
= NULL
;
1064 #if HAVE_COMPLIANT_POSIX_MEMALIGN
1065 err
= posix_memalign (&aligned_memory
, alignment
, memsize
);
1068 aligned_memory
= memalign (alignment
, memsize
);
1072 aligned_memory
= valloc (memsize
);
1075 /* simplistic non-freeing page allocator */
1076 mem_assert (alignment
== sys_page_size
);
1077 mem_assert (memsize
<= sys_page_size
);
1078 if (!compat_valloc_trash
)
1080 const guint n_pages
= 16;
1081 guint8
*mem
= malloc (n_pages
* sys_page_size
);
1086 guint8
*amem
= (guint8
*) ALIGN ((gsize
) mem
, sys_page_size
);
1088 i
--; /* mem wasn't page aligned */
1090 g_trash_stack_push (&compat_valloc_trash
, amem
+ i
* sys_page_size
);
1093 aligned_memory
= g_trash_stack_pop (&compat_valloc_trash
);
1095 if (!aligned_memory
)
1097 return aligned_memory
;
1101 allocator_memfree (gsize memsize
,
1104 #if HAVE_COMPLIANT_POSIX_MEMALIGN || HAVE_MEMALIGN || HAVE_VALLOC
1107 mem_assert (memsize
<= sys_page_size
);
1108 g_trash_stack_push (&compat_valloc_trash
, mem
);
1115 mem_error (const char *format
,
1120 /* at least, put out "MEMORY-ERROR", in case we segfault during the rest of the function */
1121 fputs ("\n***MEMORY-ERROR***: ", stderr
);
1122 pname
= g_get_prgname();
1123 fprintf (stderr
, "%s[%u]: GSlice: ", pname
? pname
: "", getpid());
1124 va_start (args
, format
);
1125 vfprintf (stderr
, format
, args
);
1127 fputs ("\n", stderr
);
1131 #define __G_SLICE_C__
1132 #include "galiasdef.c"