2 * (C) Copyright 2007-2010 Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
4 * This file is released under the GPLv2. See the COPYING file for more
13 static struct slab
*generic
[7];
16 * Create slab caches for 16, 32, 64, 128, and 256 byte allocations
20 generic
[0] = create_slab(16, 4);
24 generic
[1] = create_slab(32, 4);
28 generic
[2] = create_slab(64, 4);
32 generic
[3] = create_slab(128, 4);
36 generic
[4] = create_slab(256, 8);
40 generic
[5] = create_slab(512, 8);
44 generic
[6] = create_slab(1024, 8);
51 free_slab(generic
[0]);
52 free_slab(generic
[1]);
53 free_slab(generic
[2]);
54 free_slab(generic
[3]);
55 free_slab(generic
[4]);
56 free_slab(generic
[5]);
57 free_slab(generic
[6]);
63 * create_slab - Create a new slab
64 * @objsize: object size in bytes
65 * @align: object alignment in bytes (must be a power of two)
67 struct slab
*create_slab(u16 objsize
, u8 align
)
72 if (!objsize
|| !align
)
75 page
= alloc_pages(0, ZONE_NORMAL
);
79 slab
= page_to_addr(page
);
80 memset(slab
, 0, PAGE_SIZE
);
82 slab
->magic
= SLAB_MAGIC
;
83 slab
->lock
= SPIN_LOCK_UNLOCKED
;
84 INIT_LIST_HEAD(&slab
->slab_pages
);
87 align
--; /* turn into a mask */
89 /* actual object size */
91 objsize
+= align
+ 1 - (objsize
& align
);
92 slab
->objsize
= objsize
;
94 /* number of objects in a page */
95 slab
->count
= 8 * (PAGE_SIZE
- sizeof(struct slab
)) / (8 * objsize
+ 1);
97 /* offset of the first object */
98 slab
->startoff
= sizeof(struct slab
) + (slab
->count
+ 4) / 8;
99 if (slab
->startoff
& align
) {
102 slab
->startoff
+= align
+ 1 - (slab
->startoff
& align
);
105 * TODO: there's got to be a better way to ensure that we
106 * fit into a single page
108 tmp
= slab
->startoff
+ slab
->count
* slab
->objsize
;
118 void free_slab(struct slab
*passed_slab
)
125 BUG_ON(passed_slab
->magic
!= SLAB_MAGIC
);
126 BUG_ON(passed_slab
->used
!= 0);
128 list_for_each_entry(slab
, &passed_slab
->slab_pages
, slab_pages
) {
129 BUG_ON(slab
->magic
!= SLAB_CONT_MAGIC
);
130 BUG_ON(slab
->used
!= 0);
133 * Theoretically, we should remove the page from the list,
134 * but no one _really_ cares
140 free_pages(passed_slab
, 0);
143 static inline void *__alloc_slab_obj_newpage(struct slab
*slab
, int type
)
148 page
= alloc_pages(0, type
);
150 return ERR_PTR(-ENOMEM
);
152 new = page_to_addr(page
);
154 memset(new, 0, PAGE_SIZE
);
155 new->magic
= SLAB_CONT_MAGIC
;
157 new->objsize
= slab
->objsize
;
158 new->startoff
= slab
->startoff
;
159 new->count
= slab
->count
;
161 /* add it to the current slab */
162 list_add_tail(&new->slab_pages
, &slab
->slab_pages
);
167 static void *alloc_slab_obj(struct slab
*passed_slab
, int type
)
169 struct slab
*slab
= passed_slab
;
174 unsigned long int_mask
;
179 BUG_ON(passed_slab
->magic
!= SLAB_MAGIC
);
181 spin_lock_intsave(&passed_slab
->lock
, &int_mask
);
184 * Does the first slab page have an unused object _AND_ is in the
187 if (slab
->used
< slab
->count
&& ZONE_TYPE(addr_to_page(slab
)) == type
)
191 * No. Find the first slab page that has unused objects
193 list_for_each_entry(slab
, &passed_slab
->slab_pages
, slab_pages
)
194 if (slab
->used
< slab
->count
&&
195 ZONE_TYPE(addr_to_page(slab
)) == type
)
199 * None of the pages have an unused object. Let's allocate another
203 slab
= __alloc_slab_obj_newpage(passed_slab
, type
);
205 FIXME("if we tried to get a ZONE_NORMAL and failed, "
206 "shouldn't we retry with ZONE_LOW?");
212 for (objidx
= 0; objidx
< slab
->count
; objidx
++) {
213 bits
= slab
->bitmap
+ (objidx
/8);
215 mask
= 1 << (7 - (objidx
% 8));
223 obj
= ((u8
*) slab
) + slab
->startoff
+ slab
->objsize
* objidx
;
228 spin_unlock_intrestore(&passed_slab
->lock
, int_mask
);
233 static void free_slab_obj(void *ptr
)
238 unsigned long int_mask
;
240 /* get the slab object ptr */
241 slab
= (struct slab
*) (((u64
) ptr
) & ~0xfff);
243 spin_lock_intsave(&slab
->first
->lock
, &int_mask
);
245 /* calculate the object number */
246 objidx
= (((u64
) ptr
) - ((u64
) slab
) - slab
->startoff
) / slab
->objsize
;
248 /* update the bitmap */
249 bits
= slab
->bitmap
+ (objidx
/8);
250 *bits
&= ~(1 << (7 - (objidx
% 8)));
253 FIXME("free the page?");
256 spin_unlock_intrestore(&slab
->first
->lock
, int_mask
);
259 int allocsize(void *ptr
)
263 /* get the slab object ptr */
264 slab
= (struct slab
*) (((u64
) ptr
) & ~0xfff);
266 return slab
->objsize
;
269 void *malloc(int size
, int type
)
274 return alloc_slab_obj(generic
[0], type
);
276 return alloc_slab_obj(generic
[1], type
);
278 return alloc_slab_obj(generic
[2], type
);
280 return alloc_slab_obj(generic
[3], type
);
282 return alloc_slab_obj(generic
[4], type
);
284 return alloc_slab_obj(generic
[5], type
);
286 return alloc_slab_obj(generic
[6], type
);