loader: remove shouting from ORB's variable name
[hvf.git] / cp / mm / slab.c
blob65b57179ad40806051b37127ae473f39fbe31da7
1 /*
2 * (C) Copyright 2007-2010 Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
4 * This file is released under the GPLv2. See the COPYING file for more
5 * details.
6 */
8 #include <magic.h>
9 #include <buddy.h>
10 #include <slab.h>
11 #include <page.h>
13 static struct slab *generic[7];
16 * Create slab caches for 16, 32, 64, 128, and 256 byte allocations
18 int init_slab(void)
20 generic[0] = create_slab(16, 4);
21 if (!generic[0])
22 goto out_err;
24 generic[1] = create_slab(32, 4);
25 if (!generic[1])
26 goto out_err;
28 generic[2] = create_slab(64, 4);
29 if (!generic[2])
30 goto out_err;
32 generic[3] = create_slab(128, 4);
33 if (!generic[3])
34 goto out_err;
36 generic[4] = create_slab(256, 8);
37 if (!generic[4])
38 goto out_err;
40 generic[5] = create_slab(512, 8);
41 if (!generic[5])
42 goto out_err;
44 generic[6] = create_slab(1024, 8);
45 if (!generic[6])
46 goto out_err;
48 return 0;
50 out_err:
51 free_slab(generic[0]);
52 free_slab(generic[1]);
53 free_slab(generic[2]);
54 free_slab(generic[3]);
55 free_slab(generic[4]);
56 free_slab(generic[5]);
57 free_slab(generic[6]);
59 return -ENOMEM;
62 /**
63 * create_slab - Create a new slab
64 * @objsize: object size in bytes
65 * @align: object alignment in bytes (must be a power of two)
67 struct slab *create_slab(u16 objsize, u8 align)
69 struct page *page;
70 struct slab *slab;
72 if (!objsize || !align)
73 return NULL;
75 page = alloc_pages(0, ZONE_NORMAL);
76 if (!page)
77 return NULL;
79 slab = page_to_addr(page);
80 memset(slab, 0, PAGE_SIZE);
82 slab->magic = SLAB_MAGIC;
83 slab->lock = SPIN_LOCK_UNLOCKED;
84 INIT_LIST_HEAD(&slab->slab_pages);
85 slab->first = slab;
87 align--; /* turn into a mask */
89 /* actual object size */
90 if (objsize & align)
91 objsize += align + 1 - (objsize & align);
92 slab->objsize = objsize;
94 /* number of objects in a page */
95 slab->count = 8 * (PAGE_SIZE - sizeof(struct slab)) / (8 * objsize + 1);
97 /* offset of the first object */
98 slab->startoff = sizeof(struct slab) + (slab->count + 4) / 8;
99 if (slab->startoff & align) {
100 u16 tmp;
102 slab->startoff += align + 1 - (slab->startoff & align);
105 * TODO: there's got to be a better way to ensure that we
106 * fit into a single page
108 tmp = slab->startoff + slab->count * slab->objsize;
109 if (tmp > PAGE_SIZE)
110 slab->count--;
113 slab->used = 0;
115 return slab;
118 void free_slab(struct slab *passed_slab)
120 struct slab *slab;
122 if (!passed_slab)
123 return;
125 BUG_ON(passed_slab->magic != SLAB_MAGIC);
126 BUG_ON(passed_slab->used != 0);
128 list_for_each_entry(slab, &passed_slab->slab_pages, slab_pages) {
129 BUG_ON(slab->magic != SLAB_CONT_MAGIC);
130 BUG_ON(slab->used != 0);
133 * Theoretically, we should remove the page from the list,
134 * but no one _really_ cares
137 free_pages(slab, 0);
140 free_pages(passed_slab, 0);
143 static inline void *__alloc_slab_obj_newpage(struct slab *slab, int type)
145 struct page *page;
146 struct slab *new;
148 page = alloc_pages(0, type);
149 if (!page)
150 return ERR_PTR(-ENOMEM);
152 new = page_to_addr(page);
154 memset(new, 0, PAGE_SIZE);
155 new->magic = SLAB_CONT_MAGIC;
156 new->first = slab;
157 new->objsize = slab->objsize;
158 new->startoff = slab->startoff;
159 new->count = slab->count;
161 /* add it to the current slab */
162 list_add_tail(&new->slab_pages, &slab->slab_pages);
164 return new;
167 static void *alloc_slab_obj(struct slab *passed_slab, int type)
169 struct slab *slab = passed_slab;
170 void *obj = NULL;
171 int objidx;
172 u8 *bits;
173 u8 mask;
174 unsigned long int_mask;
176 if (!slab)
177 return NULL;
179 BUG_ON(passed_slab->magic != SLAB_MAGIC);
181 spin_lock_intsave(&passed_slab->lock, &int_mask);
184 * Does the first slab page have an unused object _AND_ is in the
185 * right zone?
187 if (slab->used < slab->count && ZONE_TYPE(addr_to_page(slab)) == type)
188 goto alloc;
191 * No. Find the first slab page that has unused objects
193 list_for_each_entry(slab, &passed_slab->slab_pages, slab_pages)
194 if (slab->used < slab->count &&
195 ZONE_TYPE(addr_to_page(slab)) == type)
196 goto alloc;
199 * None of the pages have an unused object. Let's allocate another
200 * page
203 slab = __alloc_slab_obj_newpage(passed_slab, type);
204 if (IS_ERR(slab)) {
205 FIXME("if we tried to get a ZONE_NORMAL and failed, "
206 "shouldn't we retry with ZONE_LOW?");
207 goto out;
210 alloc:
211 /* found a page */
212 for (objidx = 0; objidx < slab->count; objidx++) {
213 bits = slab->bitmap + (objidx/8);
215 mask = 1 << (7 - (objidx % 8));
217 if (*bits & mask)
218 continue;
220 slab->used++;
221 *bits |= mask;
223 obj = ((u8*) slab) + slab->startoff + slab->objsize * objidx;
224 break;
227 out:
228 spin_unlock_intrestore(&passed_slab->lock, int_mask);
230 return obj;
233 static void free_slab_obj(void *ptr)
235 struct slab *slab;
236 int objidx;
237 u8 *bits;
238 unsigned long int_mask;
240 /* get the slab object ptr */
241 slab = (struct slab *) (((u64) ptr) & ~0xfff);
243 spin_lock_intsave(&slab->first->lock, &int_mask);
245 /* calculate the object number */
246 objidx = (((u64) ptr) - ((u64) slab) - slab->startoff) / slab->objsize;
248 /* update the bitmap */
249 bits = slab->bitmap + (objidx/8);
250 *bits &= ~(1 << (7 - (objidx % 8)));
252 if (--slab->used) {
253 FIXME("free the page?");
256 spin_unlock_intrestore(&slab->first->lock, int_mask);
259 int allocsize(void *ptr)
261 struct slab *slab;
263 /* get the slab object ptr */
264 slab = (struct slab *) (((u64) ptr) & ~0xfff);
266 return slab->objsize;
269 void *malloc(int size, int type)
271 if (!size)
272 return NULL;
273 if (size <= 16)
274 return alloc_slab_obj(generic[0], type);
275 if (size <= 32)
276 return alloc_slab_obj(generic[1], type);
277 if (size <= 64)
278 return alloc_slab_obj(generic[2], type);
279 if (size <= 128)
280 return alloc_slab_obj(generic[3], type);
281 if (size <= 256)
282 return alloc_slab_obj(generic[4], type);
283 if (size <= 512)
284 return alloc_slab_obj(generic[5], type);
285 if (size <= 1024)
286 return alloc_slab_obj(generic[6], type);
287 return NULL;
290 void free(void *ptr)
292 if (ptr)
293 free_slab_obj(ptr);