2 * SLOB Allocator: Simple List Of Blocks
4 * Matt Mackall <mpm@selenic.com> 12/30/03
8 * The core of SLOB is a traditional K&R style heap allocator, with
9 * support for returning aligned objects. The granularity of this
10 * allocator is 4 bytes on 32-bit and 8 bytes on 64-bit, though it
11 * could be as low as 2 if the compiler alignment requirements allow.
13 * The slob heap is a linked list of pages from __get_free_page, and
14 * within each page, there is a singly-linked list of free blocks (slob_t).
15 * The heap is grown on demand and allocation from the heap is currently
18 * Above this is an implementation of kmalloc/kfree. Blocks returned
19 * from kmalloc are 4-byte aligned and prepended with a 4-byte header.
20 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
21 * __get_free_pages directly so that it can return page-aligned blocks
22 * and keeps a linked list of such pages and their orders. These
23 * objects are detected in kfree() by their page alignment.
25 * SLAB is emulated on top of SLOB by simply calling constructors and
26 * destructors for every SLAB allocation. Objects are returned with the
27 * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
28 * case the low-level allocator will fragment blocks to create the proper
29 * alignment. Again, objects of page-size or greater are allocated by
30 * calling __get_free_pages. As SLAB objects know their size, no separate
31 * size bookkeeping is necessary and there is essentially no allocation
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
38 #include <linux/cache.h>
39 #include <linux/init.h>
40 #include <linux/module.h>
41 #include <linux/rcupdate.h>
42 #include <linux/list.h>
43 #include <asm/atomic.h>
45 /* SLOB_MIN_ALIGN == sizeof(long) */
46 #if BITS_PER_BYTE == 32
47 #define SLOB_MIN_ALIGN 4
49 #define SLOB_MIN_ALIGN 8
53 * slob_block has a field 'units', which indicates size of block if +ve,
54 * or offset of next block if -ve (in SLOB_UNITs).
56 * Free blocks of size 1 unit simply contain the offset of the next block.
57 * Those with larger size contain their size in the first SLOB_UNIT of
58 * memory, and the offset of the next free block in the second SLOB_UNIT.
60 #if PAGE_SIZE <= (32767 * SLOB_MIN_ALIGN)
61 typedef s16 slobidx_t
;
63 typedef s32 slobidx_t
;
67 * Align struct slob_block to long for now, but can some embedded
68 * architectures get away with less?
72 } __attribute__((aligned(SLOB_MIN_ALIGN
)));
73 typedef struct slob_block slob_t
;
76 * We use struct page fields to manage some slob allocation aspects,
77 * however to avoid the horrible mess in include/linux/mm_types.h, we'll
78 * just define our own struct page type variant here.
83 unsigned long flags
; /* mandatory */
84 atomic_t _count
; /* mandatory */
85 slobidx_t units
; /* free units left in page */
87 slob_t
*free
; /* first free slob_t in page */
88 struct list_head list
; /* linked list of free pages */
93 static inline void struct_slob_page_wrong_size(void)
94 { BUILD_BUG_ON(sizeof(struct slob_page
) != sizeof(struct page
)); }
97 * free_slob_page: call before a slob_page is returned to the page allocator.
99 static inline void free_slob_page(struct slob_page
*sp
)
101 reset_page_mapcount(&sp
->page
);
102 sp
->page
.mapping
= NULL
;
106 * All (partially) free slob pages go on this list.
108 static LIST_HEAD(free_slob_pages
);
111 * slob_page: True for all slob pages (false for bigblock pages)
113 static inline int slob_page(struct slob_page
*sp
)
115 return test_bit(PG_active
, &sp
->flags
);
118 static inline void set_slob_page(struct slob_page
*sp
)
120 __set_bit(PG_active
, &sp
->flags
);
123 static inline void clear_slob_page(struct slob_page
*sp
)
125 __clear_bit(PG_active
, &sp
->flags
);
129 * slob_page_free: true for pages on free_slob_pages list.
131 static inline int slob_page_free(struct slob_page
*sp
)
133 return test_bit(PG_private
, &sp
->flags
);
136 static inline void set_slob_page_free(struct slob_page
*sp
)
138 list_add(&sp
->list
, &free_slob_pages
);
139 __set_bit(PG_private
, &sp
->flags
);
142 static inline void clear_slob_page_free(struct slob_page
*sp
)
145 __clear_bit(PG_private
, &sp
->flags
);
148 #define SLOB_UNIT sizeof(slob_t)
149 #define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
150 #define SLOB_ALIGN L1_CACHE_BYTES
153 * struct slob_rcu is inserted at the tail of allocated slob blocks, which
154 * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
155 * the block using call_rcu.
158 struct rcu_head head
;
163 * slob_lock protects all slob allocator structures.
165 static DEFINE_SPINLOCK(slob_lock
);
168 * Encode the given size and next info into a free slob block s.
170 static void set_slob(slob_t
*s
, slobidx_t size
, slob_t
*next
)
172 slob_t
*base
= (slob_t
*)((unsigned long)s
& PAGE_MASK
);
173 slobidx_t offset
= next
- base
;
179 s
[0].units
= -offset
;
183 * Return the size of a slob block.
185 static slobidx_t
slob_units(slob_t
*s
)
193 * Return the next free slob block pointer after this one.
195 static slob_t
*slob_next(slob_t
*s
)
197 slob_t
*base
= (slob_t
*)((unsigned long)s
& PAGE_MASK
);
208 * Returns true if s is the last free block in its page.
210 static int slob_last(slob_t
*s
)
212 return !((unsigned long)slob_next(s
) & ~PAGE_MASK
);
216 * Allocate a slob block within a given slob_page sp.
218 static void *slob_page_alloc(struct slob_page
*sp
, size_t size
, int align
)
220 slob_t
*prev
, *cur
, *aligned
= 0;
221 int delta
= 0, units
= SLOB_UNITS(size
);
223 for (prev
= NULL
, cur
= sp
->free
; ; prev
= cur
, cur
= slob_next(cur
)) {
224 slobidx_t avail
= slob_units(cur
);
227 aligned
= (slob_t
*)ALIGN((unsigned long)cur
, align
);
228 delta
= aligned
- cur
;
230 if (avail
>= units
+ delta
) { /* room enough? */
233 if (delta
) { /* need to fragment head to align? */
234 next
= slob_next(cur
);
235 set_slob(aligned
, avail
- delta
, next
);
236 set_slob(cur
, delta
, aligned
);
239 avail
= slob_units(cur
);
242 next
= slob_next(cur
);
243 if (avail
== units
) { /* exact fit? unlink. */
245 set_slob(prev
, slob_units(prev
), next
);
248 } else { /* fragment */
250 set_slob(prev
, slob_units(prev
), cur
+ units
);
252 sp
->free
= cur
+ units
;
253 set_slob(cur
+ units
, avail
- units
, next
);
258 clear_slob_page_free(sp
);
267 * slob_alloc: entry point into the slob allocator.
269 static void *slob_alloc(size_t size
, gfp_t gfp
, int align
)
271 struct slob_page
*sp
;
275 spin_lock_irqsave(&slob_lock
, flags
);
276 /* Iterate through each partially free page, try to find room */
277 list_for_each_entry(sp
, &free_slob_pages
, list
) {
278 if (sp
->units
>= SLOB_UNITS(size
)) {
279 b
= slob_page_alloc(sp
, size
, align
);
284 spin_unlock_irqrestore(&slob_lock
, flags
);
286 /* Not enough space: must allocate a new page */
288 b
= (slob_t
*)__get_free_page(gfp
);
291 sp
= (struct slob_page
*)virt_to_page(b
);
294 spin_lock_irqsave(&slob_lock
, flags
);
295 sp
->units
= SLOB_UNITS(PAGE_SIZE
);
297 INIT_LIST_HEAD(&sp
->list
);
298 set_slob(b
, SLOB_UNITS(PAGE_SIZE
), b
+ SLOB_UNITS(PAGE_SIZE
));
299 set_slob_page_free(sp
);
300 b
= slob_page_alloc(sp
, size
, align
);
302 spin_unlock_irqrestore(&slob_lock
, flags
);
308 * slob_free: entry point into the slob allocator.
310 static void slob_free(void *block
, int size
)
312 struct slob_page
*sp
;
313 slob_t
*prev
, *next
, *b
= (slob_t
*)block
;
321 sp
= (struct slob_page
*)virt_to_page(block
);
322 units
= SLOB_UNITS(size
);
324 spin_lock_irqsave(&slob_lock
, flags
);
326 if (sp
->units
+ units
== SLOB_UNITS(PAGE_SIZE
)) {
327 /* Go directly to page allocator. Do not pass slob allocator */
328 if (slob_page_free(sp
))
329 clear_slob_page_free(sp
);
332 free_page((unsigned long)b
);
336 if (!slob_page_free(sp
)) {
337 /* This slob page is about to become partially free. Easy! */
341 (void *)((unsigned long)(b
+
342 SLOB_UNITS(PAGE_SIZE
)) & PAGE_MASK
));
343 set_slob_page_free(sp
);
348 * Otherwise the page is already partially free, so find reinsertion
354 set_slob(b
, units
, sp
->free
);
358 next
= slob_next(prev
);
361 next
= slob_next(prev
);
364 if (!slob_last(prev
) && b
+ units
== next
) {
365 units
+= slob_units(next
);
366 set_slob(b
, units
, slob_next(next
));
368 set_slob(b
, units
, next
);
370 if (prev
+ slob_units(prev
) == b
) {
371 units
= slob_units(b
) + slob_units(prev
);
372 set_slob(prev
, units
, slob_next(b
));
374 set_slob(prev
, slob_units(prev
), b
);
377 spin_unlock_irqrestore(&slob_lock
, flags
);
381 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
387 struct bigblock
*next
;
389 typedef struct bigblock bigblock_t
;
391 static bigblock_t
*bigblocks
;
393 static DEFINE_SPINLOCK(block_lock
);
396 void *__kmalloc(size_t size
, gfp_t gfp
)
402 if (size
< PAGE_SIZE
- SLOB_UNIT
) {
403 m
= slob_alloc(size
+ SLOB_UNIT
, gfp
, 0);
409 bb
= slob_alloc(sizeof(bigblock_t
), gfp
, 0);
413 bb
->order
= get_order(size
);
414 bb
->pages
= (void *)__get_free_pages(gfp
, bb
->order
);
417 spin_lock_irqsave(&block_lock
, flags
);
418 bb
->next
= bigblocks
;
420 spin_unlock_irqrestore(&block_lock
, flags
);
424 slob_free(bb
, sizeof(bigblock_t
));
427 EXPORT_SYMBOL(__kmalloc
);
430 * krealloc - reallocate memory. The contents will remain unchanged.
432 * @p: object to reallocate memory for.
433 * @new_size: how many bytes of memory are required.
434 * @flags: the type of memory to allocate.
436 * The contents of the object pointed to are preserved up to the
437 * lesser of the new and old sizes. If @p is %NULL, krealloc()
438 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
439 * %NULL pointer, the object pointed to is freed.
441 void *krealloc(const void *p
, size_t new_size
, gfp_t flags
)
446 return kmalloc_track_caller(new_size
, flags
);
448 if (unlikely(!new_size
)) {
453 ret
= kmalloc_track_caller(new_size
, flags
);
455 memcpy(ret
, p
, min(new_size
, ksize(p
)));
460 EXPORT_SYMBOL(krealloc
);
462 void kfree(const void *block
)
464 struct slob_page
*sp
;
466 bigblock_t
*bb
, **last
= &bigblocks
;
472 sp
= (struct slob_page
*)virt_to_page(block
);
473 if (!slob_page(sp
)) {
474 /* on the big block list */
475 spin_lock_irqsave(&block_lock
, flags
);
476 for (bb
= bigblocks
; bb
; last
= &bb
->next
, bb
= bb
->next
) {
477 if (bb
->pages
== block
) {
479 spin_unlock_irqrestore(&block_lock
, flags
);
480 free_pages((unsigned long)block
, bb
->order
);
481 slob_free(bb
, sizeof(bigblock_t
));
485 spin_unlock_irqrestore(&block_lock
, flags
);
490 m
= (slob_t
*)block
- 1;
491 slob_free(m
, m
->units
+ SLOB_UNIT
);
495 EXPORT_SYMBOL(kfree
);
497 size_t ksize(const void *block
)
499 struct slob_page
*sp
;
506 sp
= (struct slob_page
*)virt_to_page(block
);
507 if (!slob_page(sp
)) {
508 spin_lock_irqsave(&block_lock
, flags
);
509 for (bb
= bigblocks
; bb
; bb
= bb
->next
)
510 if (bb
->pages
== block
) {
511 spin_unlock_irqrestore(&slob_lock
, flags
);
512 return PAGE_SIZE
<< bb
->order
;
514 spin_unlock_irqrestore(&block_lock
, flags
);
517 return ((slob_t
*)block
- 1)->units
+ SLOB_UNIT
;
521 unsigned int size
, align
;
524 void (*ctor
)(void *, struct kmem_cache
*, unsigned long);
527 struct kmem_cache
*kmem_cache_create(const char *name
, size_t size
,
528 size_t align
, unsigned long flags
,
529 void (*ctor
)(void*, struct kmem_cache
*, unsigned long),
530 void (*dtor
)(void*, struct kmem_cache
*, unsigned long))
532 struct kmem_cache
*c
;
534 c
= slob_alloc(sizeof(struct kmem_cache
), flags
, 0);
539 if (flags
& SLAB_DESTROY_BY_RCU
) {
540 /* leave room for rcu footer at the end of object */
541 c
->size
+= sizeof(struct slob_rcu
);
545 /* ignore alignment unless it's forced */
546 c
->align
= (flags
& SLAB_HWCACHE_ALIGN
) ? SLOB_ALIGN
: 0;
547 if (c
->align
< align
)
549 } else if (flags
& SLAB_PANIC
)
550 panic("Cannot create slab cache %s\n", name
);
554 EXPORT_SYMBOL(kmem_cache_create
);
556 void kmem_cache_destroy(struct kmem_cache
*c
)
558 slob_free(c
, sizeof(struct kmem_cache
));
560 EXPORT_SYMBOL(kmem_cache_destroy
);
562 void *kmem_cache_alloc(struct kmem_cache
*c
, gfp_t flags
)
566 if (c
->size
< PAGE_SIZE
)
567 b
= slob_alloc(c
->size
, flags
, c
->align
);
569 b
= (void *)__get_free_pages(flags
, get_order(c
->size
));
576 EXPORT_SYMBOL(kmem_cache_alloc
);
578 void *kmem_cache_zalloc(struct kmem_cache
*c
, gfp_t flags
)
580 void *ret
= kmem_cache_alloc(c
, flags
);
582 memset(ret
, 0, c
->size
);
586 EXPORT_SYMBOL(kmem_cache_zalloc
);
588 static void __kmem_cache_free(void *b
, int size
)
590 if (size
< PAGE_SIZE
)
593 free_pages((unsigned long)b
, get_order(size
));
596 static void kmem_rcu_free(struct rcu_head
*head
)
598 struct slob_rcu
*slob_rcu
= (struct slob_rcu
*)head
;
599 void *b
= (void *)slob_rcu
- (slob_rcu
->size
- sizeof(struct slob_rcu
));
601 __kmem_cache_free(b
, slob_rcu
->size
);
604 void kmem_cache_free(struct kmem_cache
*c
, void *b
)
606 if (unlikely(c
->flags
& SLAB_DESTROY_BY_RCU
)) {
607 struct slob_rcu
*slob_rcu
;
608 slob_rcu
= b
+ (c
->size
- sizeof(struct slob_rcu
));
609 INIT_RCU_HEAD(&slob_rcu
->head
);
610 slob_rcu
->size
= c
->size
;
611 call_rcu(&slob_rcu
->head
, kmem_rcu_free
);
613 __kmem_cache_free(b
, c
->size
);
616 EXPORT_SYMBOL(kmem_cache_free
);
618 unsigned int kmem_cache_size(struct kmem_cache
*c
)
622 EXPORT_SYMBOL(kmem_cache_size
);
624 const char *kmem_cache_name(struct kmem_cache
*c
)
628 EXPORT_SYMBOL(kmem_cache_name
);
630 int kmem_cache_shrink(struct kmem_cache
*d
)
634 EXPORT_SYMBOL(kmem_cache_shrink
);
636 int kmem_ptr_validate(struct kmem_cache
*a
, const void *b
)
641 void __init
kmem_cache_init(void)