Make VM fix up memory for kernel that crosses region boundaries
[minix.git] / servers / vm / slaballoc.c
blobbf9e4fe00d27cbf4cf0f98d2334518567cec22f3
2 #define _SYSTEM 1
4 #include <minix/callnr.h>
5 #include <minix/com.h>
6 #include <minix/config.h>
7 #include <minix/const.h>
8 #include <minix/ds.h>
9 #include <minix/endpoint.h>
10 #include <minix/keymap.h>
11 #include <minix/minlib.h>
12 #include <minix/type.h>
13 #include <minix/ipc.h>
14 #include <minix/sysutil.h>
15 #include <minix/syslib.h>
16 #include <minix/bitmap.h>
17 #include <minix/debug.h>
19 #include <errno.h>
20 #include <string.h>
21 #include <env.h>
23 #include <memory.h>
25 #include "glo.h"
26 #include "proto.h"
27 #include "util.h"
28 #include "sanitycheck.h"
30 #define SLABSIZES 60
32 #define ITEMSPERPAGE(bytes) (DATABYTES / (bytes))
34 #define ELBITS (sizeof(element_t)*8)
35 #define BITPAT(b) (1UL << ((b) % ELBITS))
36 #define BITEL(f, b) (f)->sdh.usebits[(b)/ELBITS]
39 #define OFF(f, b) vm_assert(!GETBIT(f, b))
40 #define ON(f, b) vm_assert(GETBIT(f, b))
42 #if SANITYCHECKS
43 #define SLABDATAWRITABLE(data, wr) do { \
44 vm_assert(data->sdh.writable == WRITABLE_NONE); \
45 vm_assert(wr != WRITABLE_NONE); \
46 vm_pagelock(data, 0); \
47 data->sdh.writable = wr; \
48 } while(0)
50 #define SLABDATAUNWRITABLE(data) do { \
51 vm_assert(data->sdh.writable != WRITABLE_NONE); \
52 data->sdh.writable = WRITABLE_NONE; \
53 vm_pagelock(data, 1); \
54 } while(0)
56 #define SLABDATAUSE(data, code) do { \
57 SLABDATAWRITABLE(data, WRITABLE_HEADER); \
58 code \
59 SLABDATAUNWRITABLE(data); \
60 } while(0)
62 #else
64 #define SLABDATAWRITABLE(data, wr)
65 #define SLABDATAUNWRITABLE(data)
66 #define SLABDATAUSE(data, code) do { code } while(0)
68 #endif
70 #define GETBIT(f, b) (BITEL(f,b) & BITPAT(b))
71 #define SETBIT(f, b) {OFF(f,b); SLABDATAUSE(f, BITEL(f,b)|= BITPAT(b); (f)->sdh.nused++;); }
72 #define CLEARBIT(f, b) {ON(f, b); SLABDATAUSE(f, BITEL(f,b)&=~BITPAT(b); (f)->sdh.nused--; (f)->sdh.freeguess = (b);); }
74 #define MINSIZE 8
75 #define MAXSIZE (SLABSIZES-1+MINSIZE)
76 #define USEELEMENTS (1+(VM_PAGE_SIZE/MINSIZE/8))
78 PRIVATE int pages = 0;
80 typedef u8_t element_t;
81 #define BITS_FULL (~(element_t)0)
82 typedef element_t elements_t[USEELEMENTS];
84 /* This file is too low-level to have global SANITYCHECKs everywhere,
85 * as the (other) data structures are often necessarily in an
86 * inconsistent state during a slaballoc() / slabfree(). So only do
87 * our own sanity checks here, with SLABSANITYCHECK.
91 /* Special writable values. */
92 #define WRITABLE_NONE -2
93 #define WRITABLE_HEADER -1
95 struct sdh {
96 #if SANITYCHECKS
97 u32_t magic1;
98 #endif
99 u8_t list;
100 u16_t nused; /* Number of data items used in this slab. */
101 int freeguess;
102 struct slabdata *next, *prev;
103 elements_t usebits;
104 phys_bytes phys;
105 #if SANITYCHECKS
106 int writable; /* data item number or WRITABLE_* */
107 u32_t magic2;
108 #endif
111 #define DATABYTES (VM_PAGE_SIZE-sizeof(struct sdh))
113 #define MAGIC1 0x1f5b842f
114 #define MAGIC2 0x8bb5a420
115 #define JUNK 0xdeadbeef
116 #define NOJUNK 0xc0ffee
118 #define LIST_UNUSED 0
119 #define LIST_FREE 1
120 #define LIST_USED 2
121 #define LIST_FULL 3
122 #define LIST_NUMBER 4
124 PRIVATE struct slabheader {
125 struct slabdata {
126 struct sdh sdh;
127 u8_t data[DATABYTES];
128 } *list_head[LIST_NUMBER];
129 } slabs[SLABSIZES];
131 FORWARD _PROTOTYPE( int objstats, (void *, int, struct slabheader **, struct slabdata **, int *));
133 #define GETSLAB(b, s) { \
134 int i; \
135 vm_assert((b) >= MINSIZE); \
136 i = (b) - MINSIZE; \
137 vm_assert((i) < SLABSIZES); \
138 vm_assert((i) >= 0); \
139 s = &slabs[i]; \
142 #define LH(sl, l) (sl)->list_head[l]
144 /* move head of list l1 to list of l2 in slabheader sl. */
145 #define MOVEHEAD(sl, l1, l2) { \
146 struct slabdata *t; \
147 vm_assert(LH(sl,l1)); \
148 REMOVEHEAD(sl, l1, t); \
149 ADDHEAD(t, sl, l2); \
152 /* remove head of list 'list' in sl, assign it unlinked to 'to'. */
153 #define REMOVEHEAD(sl, list, to) { \
154 struct slabdata *dat; \
155 dat = (to) = LH(sl, list); \
156 vm_assert(dat); \
157 LH(sl, list) = dat->sdh.next; \
158 UNLINKNODE(dat); \
161 /* move slabdata nw to slabheader sl under list number l. */
162 #define ADDHEAD(nw, sl, l) { \
163 SLABDATAUSE(nw, \
164 (nw)->sdh.next = LH(sl, l); \
165 (nw)->sdh.prev = NULL; \
166 (nw)->sdh.list = l;); \
167 LH(sl, l) = (nw); \
168 if((nw)->sdh.next) { \
169 SLABDATAUSE((nw)->sdh.next, \
170 (nw)->sdh.next->sdh.prev = (nw);); \
174 #define UNLINKNODE(node) { \
175 struct slabdata *next, *prev; \
176 prev = (node)->sdh.prev; \
177 next = (node)->sdh.next; \
178 if(prev) { SLABDATAUSE(prev, prev->sdh.next = next;); } \
179 if(next) { SLABDATAUSE(next, next->sdh.prev = prev;); } \
182 struct slabdata *newslabdata(int list)
184 struct slabdata *n;
185 phys_bytes p;
187 vm_assert(sizeof(*n) == VM_PAGE_SIZE);
189 if(!(n = vm_allocpage(&p, VMP_SLAB))) {
190 printf("newslabdata: vm_allocpage failed\n");
191 return NULL;
193 memset(n->sdh.usebits, 0, sizeof(n->sdh.usebits));
194 pages++;
196 n->sdh.phys = p;
197 #if SANITYCHECKS
198 n->sdh.magic1 = MAGIC1;
199 n->sdh.magic2 = MAGIC2;
200 #endif
201 n->sdh.nused = 0;
202 n->sdh.freeguess = 0;
203 n->sdh.list = list;
205 #if SANITYCHECKS
206 n->sdh.writable = WRITABLE_HEADER;
207 SLABDATAUNWRITABLE(n);
208 #endif
210 return n;
213 #if SANITYCHECKS
215 /*===========================================================================*
216 * checklist *
217 *===========================================================================*/
218 PRIVATE int checklist(char *file, int line,
219 struct slabheader *s, int l, int bytes)
221 struct slabdata *n = s->list_head[l];
222 int ch = 0;
224 while(n) {
225 int count = 0, i;
226 MYASSERT(n->sdh.magic1 == MAGIC1);
227 MYASSERT(n->sdh.magic2 == MAGIC2);
228 MYASSERT(n->sdh.list == l);
229 MYASSERT(usedpages_add(n->sdh.phys, VM_PAGE_SIZE) == OK);
230 if(n->sdh.prev)
231 MYASSERT(n->sdh.prev->sdh.next == n);
232 else
233 MYASSERT(s->list_head[l] == n);
234 if(n->sdh.next) MYASSERT(n->sdh.next->sdh.prev == n);
235 for(i = 0; i < USEELEMENTS*8; i++)
236 if(i >= ITEMSPERPAGE(bytes))
237 MYASSERT(!GETBIT(n, i));
238 else
239 if(GETBIT(n,i))
240 count++;
241 MYASSERT(count == n->sdh.nused);
242 ch += count;
243 n = n->sdh.next;
246 return ch;
249 /*===========================================================================*
250 * void slab_sanitycheck *
251 *===========================================================================*/
252 PUBLIC void slab_sanitycheck(char *file, int line)
254 int s;
255 for(s = 0; s < SLABSIZES; s++) {
256 int l;
257 for(l = 0; l < LIST_NUMBER; l++) {
258 checklist(file, line, &slabs[s], l, s + MINSIZE);
263 /*===========================================================================*
264 * int slabsane *
265 *===========================================================================*/
266 PUBLIC int slabsane_f(char *file, int line, void *mem, int bytes)
268 struct slabheader *s;
269 struct slabdata *f;
270 int i;
272 return (objstats(mem, bytes, &s, &f, &i) == OK);
274 #endif
276 static int nojunkwarning = 0;
278 /*===========================================================================*
279 * void *slaballoc *
280 *===========================================================================*/
281 PUBLIC void *slaballoc(int bytes)
283 int i;
284 int count = 0;
285 struct slabheader *s;
286 struct slabdata *firstused;
288 SLABSANITYCHECK(SCL_FUNCTIONS);
290 /* Retrieve entry in slabs[]. */
291 GETSLAB(bytes, s);
292 vm_assert(s);
294 /* To make the common case more common, make space in the 'used'
295 * queue first.
297 if(!LH(s, LIST_USED)) {
298 /* Make sure there is something on the freelist. */
299 SLABSANITYCHECK(SCL_DETAIL);
300 if(!LH(s, LIST_FREE)) {
301 struct slabdata *nd = newslabdata(LIST_FREE);
302 SLABSANITYCHECK(SCL_DETAIL);
303 if(!nd) return NULL;
304 ADDHEAD(nd, s, LIST_FREE);
305 SLABSANITYCHECK(SCL_DETAIL);
309 SLABSANITYCHECK(SCL_DETAIL);
310 MOVEHEAD(s, LIST_FREE, LIST_USED);
311 SLABSANITYCHECK(SCL_DETAIL);
314 SLABSANITYCHECK(SCL_DETAIL);
316 vm_assert(s);
317 firstused = LH(s, LIST_USED);
318 vm_assert(firstused);
319 vm_assert(firstused->sdh.magic1 == MAGIC1);
320 vm_assert(firstused->sdh.magic2 == MAGIC2);
321 vm_assert(firstused->sdh.nused < ITEMSPERPAGE(bytes));
323 for(i = firstused->sdh.freeguess;
324 count < ITEMSPERPAGE(bytes); count++, i++) {
325 SLABSANITYCHECK(SCL_DETAIL);
326 i = i % ITEMSPERPAGE(bytes);
328 if(!GETBIT(firstused, i)) {
329 struct slabdata *f;
330 char *ret;
331 SETBIT(firstused, i);
332 SLABSANITYCHECK(SCL_DETAIL);
333 if(firstused->sdh.nused == ITEMSPERPAGE(bytes)) {
334 SLABSANITYCHECK(SCL_DETAIL);
335 MOVEHEAD(s, LIST_USED, LIST_FULL);
336 SLABSANITYCHECK(SCL_DETAIL);
338 SLABSANITYCHECK(SCL_DETAIL);
339 ret = ((char *) firstused->data) + i*bytes;
341 #if SANITYCHECKS
342 nojunkwarning++;
343 slabunlock(ret, bytes);
344 nojunkwarning--;
345 vm_assert(!nojunkwarning);
346 *(u32_t *) ret = NOJUNK;
347 slablock(ret, bytes);
348 #endif
349 SLABSANITYCHECK(SCL_FUNCTIONS);
350 SLABDATAUSE(firstused, firstused->sdh.freeguess = i+1;);
352 #if SANITYCHECKS
353 if(bytes >= SLABSIZES+MINSIZE) {
354 printf("slaballoc: odd, bytes %d?\n", bytes);
356 if(!slabsane_f(__FILE__, __LINE__, ret, bytes))
357 vm_panic("slaballoc: slabsane failed", NO_NUM);
358 #endif
360 return ret;
363 SLABSANITYCHECK(SCL_DETAIL);
366 SLABSANITYCHECK(SCL_FUNCTIONS);
368 vm_panic("slaballoc: no space in 'used' slabdata", NO_NUM);
370 /* Not reached. */
371 return NULL;
374 /*===========================================================================*
375 * int objstats *
376 *===========================================================================*/
377 PRIVATE int objstats(void *mem, int bytes,
378 struct slabheader **sp, struct slabdata **fp, int *ip)
380 #if SANITYCHECKS
381 #define OBJSTATSCHECK(cond) \
382 if(!(cond)) { \
383 printf("VM: objstats: %s failed for ptr 0x%p, %d bytes\n", \
384 #cond, mem, bytes); \
385 return EINVAL; \
387 #else
388 #define OBJSTATSCHECK(cond)
389 #endif
391 struct slabheader *s;
392 struct slabdata *f;
393 int i;
395 OBJSTATSCHECK((char *) mem >= (char *) VM_PAGE_SIZE);
397 #if SANITYCHECKS
398 if(*(u32_t *) mem == JUNK && !nojunkwarning) {
399 util_stacktrace();
400 printf("VM: WARNING: JUNK seen in slab object\n");
402 #endif
403 /* Retrieve entry in slabs[]. */
404 GETSLAB(bytes, s);
406 /* Round address down to VM_PAGE_SIZE boundary to get header. */
407 f = (struct slabdata *) ((char *) mem - (vir_bytes) mem % VM_PAGE_SIZE);
409 OBJSTATSCHECK(f->sdh.magic1 == MAGIC1);
410 OBJSTATSCHECK(f->sdh.magic2 == MAGIC2);
411 OBJSTATSCHECK(f->sdh.list == LIST_USED || f->sdh.list == LIST_FULL);
413 /* Make sure it's in range. */
414 OBJSTATSCHECK((char *) mem >= (char *) f->data);
415 OBJSTATSCHECK((char *) mem < (char *) f->data + sizeof(f->data));
417 /* Get position. */
418 i = (char *) mem - (char *) f->data;
419 OBJSTATSCHECK(!(i % bytes));
420 i = i / bytes;
422 /* Make sure it is marked as allocated. */
423 OBJSTATSCHECK(GETBIT(f, i));
425 /* return values */
426 *ip = i;
427 *fp = f;
428 *sp = s;
430 return OK;
433 /*===========================================================================*
434 * void *slabfree *
435 *===========================================================================*/
436 PUBLIC void slabfree(void *mem, int bytes)
438 int i;
439 struct slabheader *s;
440 struct slabdata *f;
442 SLABSANITYCHECK(SCL_FUNCTIONS);
444 if(objstats(mem, bytes, &s, &f, &i) != OK) {
445 vm_panic("slabfree objstats failed", NO_NUM);
448 #if SANITYCHECKS
449 if(*(u32_t *) mem == JUNK) {
450 printf("VM: WARNING: likely double free, JUNK seen\n");
453 slabunlock(mem, bytes);
454 *(u32_t *) mem = JUNK;
455 nojunkwarning++;
456 slablock(mem, bytes);
457 nojunkwarning--;
458 vm_assert(!nojunkwarning);
459 #endif
461 /* Free this data. */
462 CLEARBIT(f, i);
464 /* Check if this slab changes lists. */
465 if(f->sdh.nused == 0) {
466 /* Now become FREE; must've been USED */
467 vm_assert(f->sdh.list == LIST_USED);
468 UNLINKNODE(f);
469 if(f == LH(s, LIST_USED))
470 LH(s, LIST_USED) = f->sdh.next;
471 ADDHEAD(f, s, LIST_FREE);
472 SLABSANITYCHECK(SCL_DETAIL);
473 } else if(f->sdh.nused == ITEMSPERPAGE(bytes)-1) {
474 /* Now become USED; must've been FULL */
475 vm_assert(f->sdh.list == LIST_FULL);
476 UNLINKNODE(f);
477 if(f == LH(s, LIST_FULL))
478 LH(s, LIST_FULL) = f->sdh.next;
479 ADDHEAD(f, s, LIST_USED);
480 SLABSANITYCHECK(SCL_DETAIL);
481 } else {
482 /* Stay USED */
483 vm_assert(f->sdh.list == LIST_USED);
486 SLABSANITYCHECK(SCL_FUNCTIONS);
488 return;
491 /*===========================================================================*
492 * void *slablock *
493 *===========================================================================*/
494 PUBLIC void slablock(void *mem, int bytes)
496 int i;
497 struct slabheader *s;
498 struct slabdata *f;
500 if(objstats(mem, bytes, &s, &f, &i) != OK)
501 vm_panic("slablock objstats failed", NO_NUM);
503 SLABDATAUNWRITABLE(f);
505 FIXME("verify new contents");
507 return;
510 /*===========================================================================*
511 * void *slabunlock *
512 *===========================================================================*/
513 PUBLIC void slabunlock(void *mem, int bytes)
515 int i;
516 struct slabheader *s;
517 struct slabdata *f;
519 if(objstats(mem, bytes, &s, &f, &i) != OK)
520 vm_panic("slablock objstats failed", NO_NUM);
522 SLABDATAWRITABLE(f, i);
524 return;
527 #if SANITYCHECKS
528 /*===========================================================================*
529 * void slabstats *
530 *===========================================================================*/
531 PUBLIC void slabstats(void)
533 int s, total = 0, totalbytes = 0;
534 static int n;
535 n++;
536 if(n%1000) return;
537 for(s = 0; s < SLABSIZES; s++) {
538 int l;
539 for(l = 0; l < LIST_NUMBER; l++) {
540 int b, t;
541 b = s + MINSIZE;
542 t = checklist(__FILE__, __LINE__, &slabs[s], l, b);
544 if(t > 0) {
545 int bytes = t * b;
546 printf("VMSTATS: %2d slabs: %d (%dkB)\n", b, t, bytes/1024);
547 totalbytes += bytes;
552 if(pages > 0) {
553 printf("VMSTATS: %dK net used in slab objects in %d pages (%dkB): %d%% utilization\n",
554 totalbytes/1024, pages, pages*VM_PAGE_SIZE/1024,
555 100 * totalbytes / (pages*VM_PAGE_SIZE));
558 #endif