1 /* $NetBSD: kern_malloc.c,v 1.126 2009/01/07 21:06:31 pooka Exp $ */
4 * Copyright (c) 1987, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * @(#)kern_malloc.c 8.4 (Berkeley) 5/20/95
35 * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * @(#)kern_malloc.c 8.4 (Berkeley) 5/20/95
68 #include <sys/cdefs.h>
69 __KERNEL_RCSID(0, "$NetBSD: kern_malloc.c,v 1.126 2009/01/07 21:06:31 pooka Exp $");
71 #include <sys/param.h>
73 #include <sys/kernel.h>
74 #include <sys/malloc.h>
75 #include <sys/systm.h>
76 #include <sys/debug.h>
77 #include <sys/mutex.h>
78 #include <sys/lockdebug.h>
80 #include <uvm/uvm_extern.h>
82 static struct vm_map_kernel kmem_map_store
;
83 struct vm_map
*kmem_map
= NULL
;
85 #include "opt_kmempages.h"
88 #error NKMEMCLUSTERS is obsolete; remove it from your kernel config file and use NKMEMPAGES instead or let the kernel auto-size
92 * Default number of pages in kmem_map. We attempt to calculate this
93 * at run-time, but allow it to be either patched or set in the kernel
99 int nkmempages
= NKMEMPAGES
;
102 * Defaults for lower- and upper-bounds for the kmem_map page count.
103 * Can be overridden by kernel config options.
105 #ifndef NKMEMPAGES_MIN
106 #define NKMEMPAGES_MIN NKMEMPAGES_MIN_DEFAULT
109 #ifndef NKMEMPAGES_MAX
110 #define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT
113 #include "opt_kmemstats.h"
114 #include "opt_malloclog.h"
115 #include "opt_malloc_debug.h"
117 #define MINALLOCSIZE (1 << MINBUCKET)
118 #define BUCKETINDX(size) \
119 ((size) <= (MINALLOCSIZE * 128) \
120 ? (size) <= (MINALLOCSIZE * 8) \
121 ? (size) <= (MINALLOCSIZE * 2) \
122 ? (size) <= (MINALLOCSIZE * 1) \
125 : (size) <= (MINALLOCSIZE * 4) \
128 : (size) <= (MINALLOCSIZE* 32) \
129 ? (size) <= (MINALLOCSIZE * 16) \
132 : (size) <= (MINALLOCSIZE * 64) \
135 : (size) <= (MINALLOCSIZE * 2048) \
136 ? (size) <= (MINALLOCSIZE * 512) \
137 ? (size) <= (MINALLOCSIZE * 256) \
140 : (size) <= (MINALLOCSIZE * 1024) \
143 : (size) <= (MINALLOCSIZE * 8192) \
144 ? (size) <= (MINALLOCSIZE * 4096) \
147 : (size) <= (MINALLOCSIZE * 16384) \
152 * Array of descriptors that describe the contents of each page
155 short ku_indx
; /* bucket index */
157 u_short freecnt
;/* for small allocations, free pieces in page */
158 u_short pagecnt
;/* for large allocations, pages alloced */
161 #define ku_freecnt ku_un.freecnt
162 #define ku_pagecnt ku_un.pagecnt
164 struct kmembuckets kmembuckets
[MINBUCKET
+ 16];
165 struct kmemusage
*kmemusage
;
166 char *kmembase
, *kmemlimit
;
169 static void *malloc_freecheck
;
173 * Turn virtual addresses into kmem map indicies
175 #define btokup(addr) (&kmemusage[((char *)(addr) - kmembase) >> PGSHIFT])
177 struct malloc_type
*kmemstatistics
;
180 #ifndef MALLOCLOGSIZE
181 #define MALLOCLOGSIZE 100000
187 struct malloc_type
*type
;
191 } malloclog
[MALLOCLOGSIZE
];
196 * Fuzz factor for neighbour address match this must be a mask of the lower
197 * bits we wish to ignore when comparing addresses
199 __uintptr_t malloclog_fuzz
= 0x7FL
;
203 domlog(void *a
, long size
, struct malloc_type
*type
, int action
,
204 const char *file
, long line
)
207 malloclog
[malloclogptr
].addr
= a
;
208 malloclog
[malloclogptr
].size
= size
;
209 malloclog
[malloclogptr
].type
= type
;
210 malloclog
[malloclogptr
].action
= action
;
211 malloclog
[malloclogptr
].file
= file
;
212 malloclog
[malloclogptr
].line
= line
;
214 if (malloclogptr
>= MALLOCLOGSIZE
)
221 struct malloclog
*lp
;
225 lp = &malloclog[l]; \
226 if (lp->addr == a && lp->action) { \
227 printf("malloc log entry %ld:\n", l); \
228 printf("\taddr = %p\n", lp->addr); \
229 printf("\tsize = %ld\n", lp->size); \
230 printf("\ttype = %s\n", lp->type->ks_shortdesc); \
231 printf("\taction = %s\n", lp->action == 1 ? "alloc" : "free"); \
232 printf("\tfile = %s\n", lp->file); \
233 printf("\tline = %ld\n", lp->line); \
235 } while (/* CONSTCOND */0)
238 * Print fuzzy matched "neighbour" - look for the memory block that has
239 * been allocated below the address we are interested in. We look for a
240 * base address + size that is within malloclog_fuzz of our target
241 * address. If the base address and target address are the same then it is
242 * likely we have found a free (size is 0 in this case) so we won't report
243 * those, they will get reported by PRT anyway.
246 __uintptr_t fuzz_mask = ~(malloclog_fuzz); \
247 lp = &malloclog[l]; \
248 if ((__uintptr_t)lp->addr != (__uintptr_t)a && \
249 (((__uintptr_t)lp->addr + lp->size + malloclog_fuzz) & fuzz_mask) \
250 == ((__uintptr_t)a & fuzz_mask) && lp->action) { \
251 printf("neighbour malloc log entry %ld:\n", l); \
252 printf("\taddr = %p\n", lp->addr); \
253 printf("\tsize = %ld\n", lp->size); \
254 printf("\ttype = %s\n", lp->type->ks_shortdesc); \
255 printf("\taction = %s\n", lp->action == 1 ? "alloc" : "free"); \
256 printf("\tfile = %s\n", lp->file); \
257 printf("\tline = %ld\n", lp->line); \
259 } while (/* CONSTCOND */0)
261 for (l
= malloclogptr
; l
< MALLOCLOGSIZE
; l
++) {
267 for (l
= 0; l
< malloclogptr
; l
++) {
274 #endif /* MALLOCLOG */
278 * This structure provides a set of masks to catch unaligned frees.
280 const long addrmask
[] = { 0,
281 0x00000001, 0x00000003, 0x00000007, 0x0000000f,
282 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
283 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
284 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
288 * The WEIRD_ADDR is used as known text to copy into free objects so
289 * that modifications after frees can be detected.
291 #define WEIRD_ADDR ((uint32_t) 0xdeadbeef)
293 #define MAX_COPY PAGE_SIZE
299 * Normally the freelist structure is used only to hold the list pointer
300 * for free objects. However, when running with diagnostics, the first
301 * 8/16 bytes of the structure is unused except for diagnostic information,
302 * and the free list pointer is at offset 8/16 in the structure. Since the
303 * first 8 bytes is the portion of the structure most often modified, this
304 * helps to detect memory reuse problems and avoid free list corruption.
309 uint32_t spare1
; /* explicit padding */
311 struct malloc_type
*type
;
314 #else /* !DIAGNOSTIC */
318 #endif /* DIAGNOSTIC */
320 kmutex_t malloc_lock
;
323 * Allocate a block of memory
327 _kern_malloc(unsigned long size
, struct malloc_type
*ksp
, int flags
,
328 const char *file
, long line
)
331 kern_malloc(unsigned long size
, struct malloc_type
*ksp
, int flags
)
332 #endif /* MALLOCLOG */
334 struct kmembuckets
*kbp
;
335 struct kmemusage
*kup
;
336 struct freelist
*freep
;
337 long indx
, npg
, allocsize
;
338 char *va
, *cp
, *savedlist
;
345 if ((flags
& M_NOWAIT
) == 0) {
350 if (debug_malloc(size
, ksp
, flags
, (void *) &va
)) {
352 FREECHECK_OUT(&malloc_freecheck
, (void *)va
);
354 return ((void *) va
);
357 indx
= BUCKETINDX(size
);
358 kbp
= &kmembuckets
[indx
];
359 mutex_spin_enter(&malloc_lock
);
361 while (ksp
->ks_memuse
>= ksp
->ks_limit
) {
362 if (flags
& M_NOWAIT
) {
363 mutex_spin_exit(&malloc_lock
);
364 return ((void *) NULL
);
366 if (ksp
->ks_limblocks
< 65535)
368 mtsleep((void *)ksp
, PSWP
+2, ksp
->ks_shortdesc
, 0,
371 ksp
->ks_size
|= 1 << indx
;
374 copysize
= 1 << indx
< MAX_COPY
? 1 << indx
: MAX_COPY
;
376 if (kbp
->kb_next
== NULL
) {
379 if (size
> MAXALLOCSAVE
)
380 allocsize
= round_page(size
);
382 allocsize
= 1 << indx
;
383 npg
= btoc(allocsize
);
384 mutex_spin_exit(&malloc_lock
);
386 va
= (void *) uvm_km_alloc(kmem_map
,
387 (vsize_t
)ctob(npg
), 0,
388 ((flags
& M_NOWAIT
) ? UVM_KMF_NOWAIT
: 0) |
389 ((flags
& M_CANFAIL
) ? UVM_KMF_CANFAIL
: 0) |
392 if (__predict_false(va
== NULL
)) {
394 * Kmem_malloc() can return NULL, even if it can
395 * wait, if there is no map space available, because
396 * it can't fix that problem. Neither can we,
397 * right now. (We should release pages which
398 * are completely free and which are in kmembuckets
399 * with too many free elements.)
401 if ((flags
& (M_NOWAIT
|M_CANFAIL
)) == 0)
402 panic("malloc: out of space in kmem_map");
405 mutex_spin_enter(&malloc_lock
);
407 kbp
->kb_total
+= kbp
->kb_elmpercl
;
411 if (allocsize
> MAXALLOCSAVE
) {
413 panic("malloc: allocation too large");
414 kup
->ku_pagecnt
= npg
;
416 ksp
->ks_memuse
+= allocsize
;
421 kup
->ku_freecnt
= kbp
->kb_elmpercl
;
422 kbp
->kb_totalfree
+= kbp
->kb_elmpercl
;
425 * Just in case we blocked while allocating memory,
426 * and someone else also allocated memory for this
427 * kmembucket, don't assume the list is still empty.
429 savedlist
= kbp
->kb_next
;
430 kbp
->kb_next
= cp
= va
+ (npg
<< PAGE_SHIFT
) - allocsize
;
432 freep
= (struct freelist
*)cp
;
435 * Copy in known text to detect modification
438 end
= (uint32_t *)&cp
[copysize
];
439 for (lp
= (uint32_t *)cp
; lp
< end
; lp
++)
441 freep
->type
= M_FREE
;
442 #endif /* DIAGNOSTIC */
448 freep
->next
= savedlist
;
449 if (savedlist
== NULL
)
450 kbp
->kb_last
= (void *)freep
;
453 kbp
->kb_next
= ((struct freelist
*)va
)->next
;
455 freep
= (struct freelist
*)va
;
456 /* XXX potential to get garbage pointer here. */
459 vaddr_t addr
= (vaddr_t
)kbp
->kb_next
;
461 vm_map_lock(kmem_map
);
462 rv
= uvm_map_checkprot(kmem_map
, addr
,
463 addr
+ sizeof(struct freelist
), VM_PROT_WRITE
);
464 vm_map_unlock(kmem_map
);
466 if (__predict_false(rv
== 0)) {
467 printf("Data modified on freelist: "
468 "word %ld of object %p size %ld previous type %s "
469 "(invalid addr %p)\n",
470 (long)((int32_t *)&kbp
->kb_next
- (int32_t *)kbp
),
471 va
, size
, "foo", kbp
->kb_next
);
479 /* Fill the fields that we've used with WEIRD_ADDR */
481 freep
->type
= (struct malloc_type
*)
482 (WEIRD_ADDR
| (((u_long
) WEIRD_ADDR
) << 32));
484 freep
->type
= (struct malloc_type
*) WEIRD_ADDR
;
486 end
= (uint32_t *)&freep
->next
+
487 (sizeof(freep
->next
) / sizeof(int32_t));
488 for (lp
= (uint32_t *)&freep
->next
; lp
< end
; lp
++)
491 /* and check that the data hasn't been modified. */
492 end
= (uint32_t *)&va
[copysize
];
493 for (lp
= (uint32_t *)va
; lp
< end
; lp
++) {
494 if (__predict_true(*lp
== WEIRD_ADDR
))
496 printf("Data modified on freelist: "
497 "word %ld of object %p size %ld previous type %s "
499 (long)(lp
- (uint32_t *)va
), va
, size
,
500 "bar", *lp
, WEIRD_ADDR
);
508 #endif /* DIAGNOSTIC */
511 if (kup
->ku_indx
!= indx
)
512 panic("malloc: wrong bucket");
513 if (kup
->ku_freecnt
== 0)
514 panic("malloc: lost data");
517 ksp
->ks_memuse
+= 1 << indx
;
522 if (ksp
->ks_memuse
> ksp
->ks_maxused
)
523 ksp
->ks_maxused
= ksp
->ks_memuse
;
528 domlog(va
, size
, ksp
, 1, file
, line
);
530 mutex_spin_exit(&malloc_lock
);
531 if ((flags
& M_ZERO
) != 0)
533 FREECHECK_OUT(&malloc_freecheck
, (void *)va
);
534 return ((void *) va
);
538 * Free a block of memory allocated by malloc.
542 _kern_free(void *addr
, struct malloc_type
*ksp
, const char *file
, long line
)
545 kern_free(void *addr
, struct malloc_type
*ksp
)
546 #endif /* MALLOCLOG */
548 struct kmembuckets
*kbp
;
549 struct kmemusage
*kup
;
550 struct freelist
*freep
;
555 long alloc
, copysize
;
558 FREECHECK_IN(&malloc_freecheck
, addr
);
560 if (debug_free(addr
, ksp
))
566 * Ensure that we're free'ing something that we could
567 * have allocated in the first place. That is, check
568 * to see that the address is within kmem_map.
570 if (__predict_false((vaddr_t
)addr
< vm_map_min(kmem_map
) ||
571 (vaddr_t
)addr
>= vm_map_max(kmem_map
)))
572 panic("free: addr %p not within kmem_map", addr
);
576 size
= 1 << kup
->ku_indx
;
577 kbp
= &kmembuckets
[kup
->ku_indx
];
579 LOCKDEBUG_MEM_CHECK(addr
,
580 size
<= MAXALLOCSAVE
? size
: ctob(kup
->ku_pagecnt
));
582 mutex_spin_enter(&malloc_lock
);
584 domlog(addr
, 0, ksp
, 2, file
, line
);
588 * Check for returns of data that do not point to the
589 * beginning of the allocation.
591 if (size
> PAGE_SIZE
)
592 alloc
= addrmask
[BUCKETINDX(PAGE_SIZE
)];
594 alloc
= addrmask
[kup
->ku_indx
];
595 if (((u_long
)addr
& alloc
) != 0)
596 panic("free: unaligned addr %p, size %ld, type %s, mask %ld",
597 addr
, size
, ksp
->ks_shortdesc
, alloc
);
598 #endif /* DIAGNOSTIC */
599 if (size
> MAXALLOCSAVE
) {
600 uvm_km_free(kmem_map
, (vaddr_t
)addr
, ctob(kup
->ku_pagecnt
),
603 size
= kup
->ku_pagecnt
<< PGSHIFT
;
604 ksp
->ks_memuse
-= size
;
607 if (ksp
->ks_memuse
+ size
>= ksp
->ks_limit
&&
608 ksp
->ks_memuse
< ksp
->ks_limit
)
611 if (ksp
->ks_inuse
== 0)
612 panic("free 1: inuse 0, probable double free");
617 mutex_spin_exit(&malloc_lock
);
620 freep
= (struct freelist
*)addr
;
623 * Check for multiple frees. Use a quick check to see if
624 * it looks free before laboriously searching the freelist.
626 if (__predict_false(freep
->spare0
== WEIRD_ADDR
)) {
627 for (cp
= kbp
->kb_next
; cp
;
628 cp
= ((struct freelist
*)cp
)->next
) {
631 printf("multiply freed item %p\n", addr
);
635 panic("free: duplicated free");
640 * Copy in known text to detect modification after freeing
641 * and to make it look free. Also, save the type being freed
642 * so we can list likely culprit if modification is detected
643 * when the object is reallocated.
645 copysize
= size
< MAX_COPY
? size
: MAX_COPY
;
646 end
= (int32_t *)&((char *)addr
)[copysize
];
647 for (lp
= (int32_t *)addr
; lp
< end
; lp
++)
650 #endif /* DIAGNOSTIC */
653 if (kup
->ku_freecnt
>= kbp
->kb_elmpercl
) {
654 if (kup
->ku_freecnt
> kbp
->kb_elmpercl
)
655 panic("free: multiple frees");
656 else if (kbp
->kb_totalfree
> kbp
->kb_highwat
)
660 ksp
->ks_memuse
-= size
;
661 if (ksp
->ks_memuse
+ size
>= ksp
->ks_limit
&&
662 ksp
->ks_memuse
< ksp
->ks_limit
)
665 if (ksp
->ks_inuse
== 0)
666 panic("free 2: inuse 0, probable double free");
670 if (kbp
->kb_next
== NULL
)
673 ((struct freelist
*)kbp
->kb_last
)->next
= addr
;
676 mutex_spin_exit(&malloc_lock
);
680 * Change the size of a block of memory.
683 kern_realloc(void *curaddr
, unsigned long newsize
, struct malloc_type
*ksp
,
686 struct kmemusage
*kup
;
687 unsigned long cursize
;
694 * realloc() with a NULL pointer is the same as malloc().
697 return (malloc(newsize
, ksp
, flags
));
700 * realloc() with zero size is the same as free().
708 if ((flags
& M_NOWAIT
) == 0) {
714 * Find out how large the old allocation was (and do some
717 kup
= btokup(curaddr
);
718 cursize
= 1 << kup
->ku_indx
;
722 * Check for returns of data that do not point to the
723 * beginning of the allocation.
725 if (cursize
> PAGE_SIZE
)
726 alloc
= addrmask
[BUCKETINDX(PAGE_SIZE
)];
728 alloc
= addrmask
[kup
->ku_indx
];
729 if (((u_long
)curaddr
& alloc
) != 0)
731 "unaligned addr %p, size %ld, type %s, mask %ld\n",
732 curaddr
, cursize
, ksp
->ks_shortdesc
, alloc
);
733 #endif /* DIAGNOSTIC */
735 if (cursize
> MAXALLOCSAVE
)
736 cursize
= ctob(kup
->ku_pagecnt
);
739 * If we already actually have as much as they want, we're done.
741 if (newsize
<= cursize
)
745 * Can't satisfy the allocation with the existing block.
746 * Allocate a new one and copy the data.
748 newaddr
= malloc(newsize
, ksp
, flags
);
749 if (__predict_false(newaddr
== NULL
)) {
751 * malloc() failed, because flags included M_NOWAIT.
752 * Return NULL to indicate that failure. The old
753 * pointer is still valid.
757 memcpy(newaddr
, curaddr
, cursize
);
760 * We were successful: free the old allocation and return
768 * Roundup size to the actual allocation size.
771 malloc_roundup(unsigned long size
)
774 if (size
> MAXALLOCSAVE
)
775 return (roundup(size
, PAGE_SIZE
));
777 return (1 << BUCKETINDX(size
));
781 * Add a malloc type to the system.
784 malloc_type_attach(struct malloc_type
*type
)
788 panic("malloc_type_attach: nkmempages == 0");
790 if (type
->ks_magic
!= M_MAGIC
)
791 panic("malloc_type_attach: bad magic");
795 struct malloc_type
*ksp
;
796 for (ksp
= kmemstatistics
; ksp
!= NULL
; ksp
= ksp
->ks_next
) {
798 panic("malloc_type_attach: already on list");
804 if (type
->ks_limit
== 0)
805 type
->ks_limit
= ((u_long
)nkmempages
<< PAGE_SHIFT
) * 6U / 10U;
810 type
->ks_next
= kmemstatistics
;
811 kmemstatistics
= type
;
815 * Remove a malloc type from the system..
818 malloc_type_detach(struct malloc_type
*type
)
820 struct malloc_type
*ksp
;
823 if (type
->ks_magic
!= M_MAGIC
)
824 panic("malloc_type_detach: bad magic");
827 if (type
== kmemstatistics
)
828 kmemstatistics
= type
->ks_next
;
830 for (ksp
= kmemstatistics
; ksp
->ks_next
!= NULL
;
831 ksp
= ksp
->ks_next
) {
832 if (ksp
->ks_next
== type
) {
833 ksp
->ks_next
= type
->ks_next
;
838 if (ksp
->ks_next
== NULL
)
839 panic("malloc_type_detach: not on list");
842 type
->ks_next
= NULL
;
846 * Set the limit on a malloc type.
849 malloc_type_setlimit(struct malloc_type
*type
, u_long limit
)
852 mutex_spin_enter(&malloc_lock
);
853 type
->ks_limit
= limit
;
854 mutex_spin_exit(&malloc_lock
);
859 * Compute the number of pages that kmem_map will map, that is,
860 * the size of the kernel malloc arena.
863 kmeminit_nkmempages(void)
867 if (nkmempages
!= 0) {
869 * It's already been set (by us being here before, or
870 * by patching or kernel config options), bail out now.
877 if (npages
> NKMEMPAGES_MAX
)
878 npages
= NKMEMPAGES_MAX
;
880 if (npages
< NKMEMPAGES_MIN
)
881 npages
= NKMEMPAGES_MIN
;
887 * Initialize the kernel memory allocator
892 __link_set_decl(malloc_types
, struct malloc_type
);
893 struct malloc_type
* const *ksp
;
899 #if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
900 ERROR
!_kmeminit
:_MAXALLOCSAVE_not_power_of_2
902 #if (MAXALLOCSAVE > MINALLOCSIZE * 32768)
903 ERROR
!_kmeminit
:_MAXALLOCSAVE_too_big
905 #if (MAXALLOCSAVE < NBPG)
906 ERROR
!_kmeminit
:_MAXALLOCSAVE_too_small
909 if (sizeof(struct freelist
) > (1 << MINBUCKET
))
910 panic("minbucket too small/struct freelist too big");
912 mutex_init(&malloc_lock
, MUTEX_DEFAULT
, IPL_VM
);
915 * Compute the number of kmem_map pages, if we have not
918 kmeminit_nkmempages();
920 kmemusage
= (struct kmemusage
*) uvm_km_alloc(kernel_map
,
921 (vsize_t
)(nkmempages
* sizeof(struct kmemusage
)), 0,
922 UVM_KMF_WIRED
|UVM_KMF_ZERO
);
924 kmem_map
= uvm_km_suballoc(kernel_map
, &kmb
,
925 &kml
, ((vsize_t
)nkmempages
<< PAGE_SHIFT
),
926 VM_MAP_INTRSAFE
, false, &kmem_map_store
);
927 uvm_km_vacache_init(kmem_map
, "kvakmem", 0);
928 kmembase
= (char *)kmb
;
929 kmemlimit
= (char *)kml
;
931 for (indx
= 0; indx
< MINBUCKET
+ 16; indx
++) {
932 if (1 << indx
>= PAGE_SIZE
)
933 kmembuckets
[indx
].kb_elmpercl
= 1;
935 kmembuckets
[indx
].kb_elmpercl
= PAGE_SIZE
/ (1 << indx
);
936 kmembuckets
[indx
].kb_highwat
=
937 5 * kmembuckets
[indx
].kb_elmpercl
;
941 /* Attach all of the statically-linked malloc types. */
942 __link_set_foreach(ksp
, malloc_types
)
943 malloc_type_attach(*ksp
);
951 #include <ddb/db_output.h>
954 * Dump kmem statistics from ddb.
956 * usage: call dump_kmemstats
958 void dump_kmemstats(void);
964 struct malloc_type
*ksp
;
966 for (ksp
= kmemstatistics
; ksp
!= NULL
; ksp
= ksp
->ks_next
) {
967 if (ksp
->ks_memuse
== 0)
969 db_printf("%s%.*s %ld\n", ksp
->ks_shortdesc
,
970 (int)(20 - strlen(ksp
->ks_shortdesc
)),
975 db_printf("Kmem stats are not being collected.\n");
976 #endif /* KMEMSTATS */
983 * Diagnostic messages about "Data modified on
984 * freelist" indicate a memory corruption, but
985 * they do not help tracking it down.
986 * This function can be called at various places
987 * to sanity check malloc's freelist and discover
988 * where does the corruption take place.
991 freelist_sanitycheck(void) {
993 struct kmembuckets
*kbp
;
994 struct freelist
*freep
;
997 for (i
= MINBUCKET
; i
<= MINBUCKET
+ 15; i
++) {
998 kbp
= &kmembuckets
[i
];
999 freep
= (struct freelist
*)kbp
->kb_next
;
1002 vm_map_lock(kmem_map
);
1003 rv
= uvm_map_checkprot(kmem_map
, (vaddr_t
)freep
,
1004 (vaddr_t
)freep
+ sizeof(struct freelist
),
1006 vm_map_unlock(kmem_map
);
1008 if ((rv
== 0) || (*(int *)freep
!= WEIRD_ADDR
)) {
1009 printf("bucket %i, chunck %d at %p modified\n",
1013 freep
= (struct freelist
*)freep
->next
;