4 #include <minix/callnr.h>
6 #include <minix/config.h>
7 #include <minix/const.h>
9 #include <minix/endpoint.h>
10 #include <minix/keymap.h>
11 #include <minix/minlib.h>
12 #include <minix/type.h>
13 #include <minix/ipc.h>
14 #include <minix/sysutil.h>
15 #include <minix/syslib.h>
16 #include <minix/bitmap.h>
17 #include <minix/debug.h>
28 #include "sanitycheck.h"
32 #define ITEMSPERPAGE(bytes) (DATABYTES / (bytes))
34 #define ELBITS (sizeof(element_t)*8)
35 #define BITPAT(b) (1UL << ((b) % ELBITS))
36 #define BITEL(f, b) (f)->sdh.usebits[(b)/ELBITS]
39 #define OFF(f, b) vm_assert(!GETBIT(f, b))
40 #define ON(f, b) vm_assert(GETBIT(f, b))
43 #define SLABDATAWRITABLE(data, wr) do { \
44 vm_assert(data->sdh.writable == WRITABLE_NONE); \
45 vm_assert(wr != WRITABLE_NONE); \
46 vm_pagelock(data, 0); \
47 data->sdh.writable = wr; \
50 #define SLABDATAUNWRITABLE(data) do { \
51 vm_assert(data->sdh.writable != WRITABLE_NONE); \
52 data->sdh.writable = WRITABLE_NONE; \
53 vm_pagelock(data, 1); \
56 #define SLABDATAUSE(data, code) do { \
57 SLABDATAWRITABLE(data, WRITABLE_HEADER); \
59 SLABDATAUNWRITABLE(data); \
64 #define SLABDATAWRITABLE(data, wr)
65 #define SLABDATAUNWRITABLE(data)
66 #define SLABDATAUSE(data, code) do { code } while(0)
70 #define GETBIT(f, b) (BITEL(f,b) & BITPAT(b))
71 #define SETBIT(f, b) {OFF(f,b); SLABDATAUSE(f, BITEL(f,b)|= BITPAT(b); (f)->sdh.nused++;); }
72 #define CLEARBIT(f, b) {ON(f, b); SLABDATAUSE(f, BITEL(f,b)&=~BITPAT(b); (f)->sdh.nused--; (f)->sdh.freeguess = (b);); }
75 #define MAXSIZE (SLABSIZES-1+MINSIZE)
76 #define USEELEMENTS (1+(VM_PAGE_SIZE/MINSIZE/8))
78 PRIVATE
int pages
= 0;
80 typedef u8_t element_t
;
81 #define BITS_FULL (~(element_t)0)
82 typedef element_t elements_t
[USEELEMENTS
];
84 /* This file is too low-level to have global SANITYCHECKs everywhere,
85 * as the (other) data structures are often necessarily in an
86 * inconsistent state during a slaballoc() / slabfree(). So only do
87 * our own sanity checks here, with SLABSANITYCHECK.
91 /* Special writable values. */
92 #define WRITABLE_NONE -2
93 #define WRITABLE_HEADER -1
100 u16_t nused
; /* Number of data items used in this slab. */
102 struct slabdata
*next
, *prev
;
106 int writable
; /* data item number or WRITABLE_* */
111 #define DATABYTES (VM_PAGE_SIZE-sizeof(struct sdh))
113 #define MAGIC1 0x1f5b842f
114 #define MAGIC2 0x8bb5a420
115 #define JUNK 0xdeadbeef
116 #define NOJUNK 0xc0ffee
118 #define LIST_UNUSED 0
122 #define LIST_NUMBER 4
124 PRIVATE
struct slabheader
{
127 u8_t data
[DATABYTES
];
128 } *list_head
[LIST_NUMBER
];
131 FORWARD
_PROTOTYPE( int objstats
, (void *, int, struct slabheader
**, struct slabdata
**, int *));
133 #define GETSLAB(b, s) { \
135 vm_assert((b) >= MINSIZE); \
137 vm_assert((i) < SLABSIZES); \
138 vm_assert((i) >= 0); \
142 #define LH(sl, l) (sl)->list_head[l]
144 /* move head of list l1 to list of l2 in slabheader sl. */
145 #define MOVEHEAD(sl, l1, l2) { \
146 struct slabdata *t; \
147 vm_assert(LH(sl,l1)); \
148 REMOVEHEAD(sl, l1, t); \
149 ADDHEAD(t, sl, l2); \
152 /* remove head of list 'list' in sl, assign it unlinked to 'to'. */
153 #define REMOVEHEAD(sl, list, to) { \
154 struct slabdata *dat; \
155 dat = (to) = LH(sl, list); \
157 LH(sl, list) = dat->sdh.next; \
161 /* move slabdata nw to slabheader sl under list number l. */
162 #define ADDHEAD(nw, sl, l) { \
164 (nw)->sdh.next = LH(sl, l); \
165 (nw)->sdh.prev = NULL; \
166 (nw)->sdh.list = l;); \
168 if((nw)->sdh.next) { \
169 SLABDATAUSE((nw)->sdh.next, \
170 (nw)->sdh.next->sdh.prev = (nw);); \
174 #define UNLINKNODE(node) { \
175 struct slabdata *next, *prev; \
176 prev = (node)->sdh.prev; \
177 next = (node)->sdh.next; \
178 if(prev) { SLABDATAUSE(prev, prev->sdh.next = next;); } \
179 if(next) { SLABDATAUSE(next, next->sdh.prev = prev;); } \
182 struct slabdata
*newslabdata(int list
)
187 vm_assert(sizeof(*n
) == VM_PAGE_SIZE
);
189 if(!(n
= vm_allocpage(&p
, VMP_SLAB
))) {
190 printf("newslabdata: vm_allocpage failed\n");
193 memset(n
->sdh
.usebits
, 0, sizeof(n
->sdh
.usebits
));
198 n
->sdh
.magic1
= MAGIC1
;
199 n
->sdh
.magic2
= MAGIC2
;
202 n
->sdh
.freeguess
= 0;
206 n
->sdh
.writable
= WRITABLE_HEADER
;
207 SLABDATAUNWRITABLE(n
);
215 /*===========================================================================*
217 *===========================================================================*/
218 PRIVATE
int checklist(char *file
, int line
,
219 struct slabheader
*s
, int l
, int bytes
)
221 struct slabdata
*n
= s
->list_head
[l
];
226 MYASSERT(n
->sdh
.magic1
== MAGIC1
);
227 MYASSERT(n
->sdh
.magic2
== MAGIC2
);
228 MYASSERT(n
->sdh
.list
== l
);
229 MYASSERT(usedpages_add(n
->sdh
.phys
, VM_PAGE_SIZE
) == OK
);
231 MYASSERT(n
->sdh
.prev
->sdh
.next
== n
);
233 MYASSERT(s
->list_head
[l
] == n
);
234 if(n
->sdh
.next
) MYASSERT(n
->sdh
.next
->sdh
.prev
== n
);
235 for(i
= 0; i
< USEELEMENTS
*8; i
++)
236 if(i
>= ITEMSPERPAGE(bytes
))
237 MYASSERT(!GETBIT(n
, i
));
241 MYASSERT(count
== n
->sdh
.nused
);
249 /*===========================================================================*
250 * void slab_sanitycheck *
251 *===========================================================================*/
252 PUBLIC
void slab_sanitycheck(char *file
, int line
)
255 for(s
= 0; s
< SLABSIZES
; s
++) {
257 for(l
= 0; l
< LIST_NUMBER
; l
++) {
258 checklist(file
, line
, &slabs
[s
], l
, s
+ MINSIZE
);
263 /*===========================================================================*
265 *===========================================================================*/
266 PUBLIC
int slabsane_f(char *file
, int line
, void *mem
, int bytes
)
268 struct slabheader
*s
;
272 return (objstats(mem
, bytes
, &s
, &f
, &i
) == OK
);
276 static int nojunkwarning
= 0;
278 /*===========================================================================*
280 *===========================================================================*/
281 PUBLIC
void *slaballoc(int bytes
)
285 struct slabheader
*s
;
286 struct slabdata
*firstused
;
288 SLABSANITYCHECK(SCL_FUNCTIONS
);
290 /* Retrieve entry in slabs[]. */
294 /* To make the common case more common, make space in the 'used'
297 if(!LH(s
, LIST_USED
)) {
298 /* Make sure there is something on the freelist. */
299 SLABSANITYCHECK(SCL_DETAIL
);
300 if(!LH(s
, LIST_FREE
)) {
301 struct slabdata
*nd
= newslabdata(LIST_FREE
);
302 SLABSANITYCHECK(SCL_DETAIL
);
304 ADDHEAD(nd
, s
, LIST_FREE
);
305 SLABSANITYCHECK(SCL_DETAIL
);
309 SLABSANITYCHECK(SCL_DETAIL
);
310 MOVEHEAD(s
, LIST_FREE
, LIST_USED
);
311 SLABSANITYCHECK(SCL_DETAIL
);
314 SLABSANITYCHECK(SCL_DETAIL
);
317 firstused
= LH(s
, LIST_USED
);
318 vm_assert(firstused
);
319 vm_assert(firstused
->sdh
.magic1
== MAGIC1
);
320 vm_assert(firstused
->sdh
.magic2
== MAGIC2
);
321 vm_assert(firstused
->sdh
.nused
< ITEMSPERPAGE(bytes
));
323 for(i
= firstused
->sdh
.freeguess
;
324 count
< ITEMSPERPAGE(bytes
); count
++, i
++) {
325 SLABSANITYCHECK(SCL_DETAIL
);
326 i
= i
% ITEMSPERPAGE(bytes
);
328 if(!GETBIT(firstused
, i
)) {
331 SETBIT(firstused
, i
);
332 SLABSANITYCHECK(SCL_DETAIL
);
333 if(firstused
->sdh
.nused
== ITEMSPERPAGE(bytes
)) {
334 SLABSANITYCHECK(SCL_DETAIL
);
335 MOVEHEAD(s
, LIST_USED
, LIST_FULL
);
336 SLABSANITYCHECK(SCL_DETAIL
);
338 SLABSANITYCHECK(SCL_DETAIL
);
339 ret
= ((char *) firstused
->data
) + i
*bytes
;
343 slabunlock(ret
, bytes
);
345 vm_assert(!nojunkwarning
);
346 *(u32_t
*) ret
= NOJUNK
;
347 slablock(ret
, bytes
);
349 SLABSANITYCHECK(SCL_FUNCTIONS
);
350 SLABDATAUSE(firstused
, firstused
->sdh
.freeguess
= i
+1;);
353 if(bytes
>= SLABSIZES
+MINSIZE
) {
354 printf("slaballoc: odd, bytes %d?\n", bytes
);
356 if(!slabsane_f(__FILE__
, __LINE__
, ret
, bytes
))
357 vm_panic("slaballoc: slabsane failed", NO_NUM
);
363 SLABSANITYCHECK(SCL_DETAIL
);
366 SLABSANITYCHECK(SCL_FUNCTIONS
);
368 vm_panic("slaballoc: no space in 'used' slabdata", NO_NUM
);
374 /*===========================================================================*
376 *===========================================================================*/
377 PRIVATE
int objstats(void *mem
, int bytes
,
378 struct slabheader
**sp
, struct slabdata
**fp
, int *ip
)
381 #define OBJSTATSCHECK(cond) \
383 printf("VM: objstats: %s failed for ptr 0x%p, %d bytes\n", \
384 #cond, mem, bytes); \
388 #define OBJSTATSCHECK(cond)
391 struct slabheader
*s
;
395 OBJSTATSCHECK((char *) mem
>= (char *) VM_PAGE_SIZE
);
398 if(*(u32_t
*) mem
== JUNK
&& !nojunkwarning
) {
400 printf("VM: WARNING: JUNK seen in slab object\n");
403 /* Retrieve entry in slabs[]. */
406 /* Round address down to VM_PAGE_SIZE boundary to get header. */
407 f
= (struct slabdata
*) ((char *) mem
- (vir_bytes
) mem
% VM_PAGE_SIZE
);
409 OBJSTATSCHECK(f
->sdh
.magic1
== MAGIC1
);
410 OBJSTATSCHECK(f
->sdh
.magic2
== MAGIC2
);
411 OBJSTATSCHECK(f
->sdh
.list
== LIST_USED
|| f
->sdh
.list
== LIST_FULL
);
413 /* Make sure it's in range. */
414 OBJSTATSCHECK((char *) mem
>= (char *) f
->data
);
415 OBJSTATSCHECK((char *) mem
< (char *) f
->data
+ sizeof(f
->data
));
418 i
= (char *) mem
- (char *) f
->data
;
419 OBJSTATSCHECK(!(i
% bytes
));
422 /* Make sure it is marked as allocated. */
423 OBJSTATSCHECK(GETBIT(f
, i
));
433 /*===========================================================================*
435 *===========================================================================*/
436 PUBLIC
void slabfree(void *mem
, int bytes
)
439 struct slabheader
*s
;
442 SLABSANITYCHECK(SCL_FUNCTIONS
);
444 if(objstats(mem
, bytes
, &s
, &f
, &i
) != OK
) {
445 vm_panic("slabfree objstats failed", NO_NUM
);
449 if(*(u32_t
*) mem
== JUNK
) {
450 printf("VM: WARNING: likely double free, JUNK seen\n");
453 slabunlock(mem
, bytes
);
454 *(u32_t
*) mem
= JUNK
;
456 slablock(mem
, bytes
);
458 vm_assert(!nojunkwarning
);
461 /* Free this data. */
464 /* Check if this slab changes lists. */
465 if(f
->sdh
.nused
== 0) {
466 /* Now become FREE; must've been USED */
467 vm_assert(f
->sdh
.list
== LIST_USED
);
469 if(f
== LH(s
, LIST_USED
))
470 LH(s
, LIST_USED
) = f
->sdh
.next
;
471 ADDHEAD(f
, s
, LIST_FREE
);
472 SLABSANITYCHECK(SCL_DETAIL
);
473 } else if(f
->sdh
.nused
== ITEMSPERPAGE(bytes
)-1) {
474 /* Now become USED; must've been FULL */
475 vm_assert(f
->sdh
.list
== LIST_FULL
);
477 if(f
== LH(s
, LIST_FULL
))
478 LH(s
, LIST_FULL
) = f
->sdh
.next
;
479 ADDHEAD(f
, s
, LIST_USED
);
480 SLABSANITYCHECK(SCL_DETAIL
);
483 vm_assert(f
->sdh
.list
== LIST_USED
);
486 SLABSANITYCHECK(SCL_FUNCTIONS
);
491 /*===========================================================================*
493 *===========================================================================*/
494 PUBLIC
void slablock(void *mem
, int bytes
)
497 struct slabheader
*s
;
500 if(objstats(mem
, bytes
, &s
, &f
, &i
) != OK
)
501 vm_panic("slablock objstats failed", NO_NUM
);
503 SLABDATAUNWRITABLE(f
);
505 FIXME("verify new contents");
510 /*===========================================================================*
512 *===========================================================================*/
513 PUBLIC
void slabunlock(void *mem
, int bytes
)
516 struct slabheader
*s
;
519 if(objstats(mem
, bytes
, &s
, &f
, &i
) != OK
)
520 vm_panic("slablock objstats failed", NO_NUM
);
522 SLABDATAWRITABLE(f
, i
);
528 /*===========================================================================*
530 *===========================================================================*/
531 PUBLIC
void slabstats(void)
533 int s
, total
= 0, totalbytes
= 0;
537 for(s
= 0; s
< SLABSIZES
; s
++) {
539 for(l
= 0; l
< LIST_NUMBER
; l
++) {
542 t
= checklist(__FILE__
, __LINE__
, &slabs
[s
], l
, b
);
546 printf("VMSTATS: %2d slabs: %d (%dkB)\n", b
, t
, bytes
/1024);
553 printf("VMSTATS: %dK net used in slab objects in %d pages (%dkB): %d%% utilization\n",
554 totalbytes
/1024, pages
, pages
*VM_PAGE_SIZE
/1024,
555 100 * totalbytes
/ (pages
*VM_PAGE_SIZE
));