Expand PMF_FN_* macros.
[netbsd-mini2440.git] / sys / kern / kern_malloc_debug.c
blob17c2779ab993c19198fce29c319a13debbd970c9
1 /* $NetBSD: kern_malloc_debug.c,v 1.21 2009/09/13 18:45:11 pooka Exp $ */
3 /*
4 * Copyright (c) 1999, 2000 Artur Grabowski <art@openbsd.org>
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
20 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
21 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
22 * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
23 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
24 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
25 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
26 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
27 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
28 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * OpenBSD: kern_malloc_debug.c,v 1.10 2001/07/26 13:33:52 art Exp
34 * This really belongs in kern/kern_malloc.c, but it was too much pollution.
38 * It's only possible to debug one type/size at a time. The question is
39 * if this is a limitation or a feature. We never want to run this as the
40 * default malloc because we'll run out of memory really fast. Adding
41 * more types will also add to the complexity of the code.
43 * This is really simple. Every malloc() allocates two virtual pages,
44 * the second page is left unmapped, and the value returned is aligned
45 * so that it ends at (or very close to) the page boundary to catch overflows.
46 * Every free() changes the protection of the first page to VM_PROT_NONE so
47 * that we can catch any dangling writes to it.
48 * To minimize the risk of writes to recycled chunks we keep an LRU of latest
49 * freed chunks. The length of it is controlled by MALLOC_DEBUG_CHUNKS.
51 * Don't expect any performance.
53 * TODO:
54 * - support for size >= PAGE_SIZE
55 * - add support to the fault handler to give better diagnostics if we fail.
58 #include <sys/cdefs.h>
59 __KERNEL_RCSID(0, "$NetBSD: kern_malloc_debug.c,v 1.21 2009/09/13 18:45:11 pooka Exp $");
61 #include <sys/param.h>
62 #include <sys/proc.h>
63 #include <sys/kernel.h>
64 #include <sys/malloc.h>
65 #include <sys/systm.h>
66 #include <sys/pool.h>
68 #include <uvm/uvm.h>
71 * debug_malloc_type and debug_malloc_size define the type and size of
72 * memory to be debugged. Use 0 for a wildcard. debug_malloc_size_lo
73 * is the lower limit and debug_malloc_size_hi the upper limit of sizes
74 * being debugged; 0 will not work as a wildcard for the upper limit.
75 * For any debugging to take place, type must be != NULL, size must be >= 0,
76 * and if the limits are being used, size must be set to 0.
77 * See /usr/src/sys/sys/malloc.h and malloc(9) for a list of types.
79 * Although those are variables, it's a really bad idea to change the type
80 * if any memory chunks of this type are used. It's ok to change the size
81 * in runtime.
83 struct malloc_type *debug_malloc_type = NULL;
84 int debug_malloc_size = -1;
85 int debug_malloc_size_lo = -1;
86 int debug_malloc_size_hi = -1;
89 * MALLOC_DEBUG_CHUNKS is the number of memory chunks we require on the
90 * freelist before we reuse them.
92 #define MALLOC_DEBUG_CHUNKS 16
94 void debug_malloc_allocate_free(int);
96 struct debug_malloc_entry {
97 TAILQ_ENTRY(debug_malloc_entry) md_list;
98 vaddr_t md_va;
99 paddr_t md_pa;
100 size_t md_size;
101 struct malloc_type *md_type;
104 TAILQ_HEAD(,debug_malloc_entry) debug_malloc_freelist =
105 TAILQ_HEAD_INITIALIZER(debug_malloc_freelist);
106 TAILQ_HEAD(,debug_malloc_entry) debug_malloc_usedlist =
107 TAILQ_HEAD_INITIALIZER(debug_malloc_usedlist);
109 int debug_malloc_allocs;
110 int debug_malloc_frees;
111 int debug_malloc_pages;
112 int debug_malloc_chunks_on_freelist;
114 static struct pool debug_malloc_pool;
116 void
117 debug_malloc_init(void)
120 pool_init(&debug_malloc_pool, sizeof(struct debug_malloc_entry),
121 0, 0, 0, "mdbepl", NULL, IPL_VM);
125 debug_malloc(unsigned long size, struct malloc_type *type, int flags,
126 void **addr)
128 struct debug_malloc_entry *md = NULL;
129 int s, wait = !(flags & M_NOWAIT);
131 /* Careful not to compare unsigned long to int -1 */
132 if ((type != debug_malloc_type && debug_malloc_type != 0) ||
133 (size != debug_malloc_size && debug_malloc_size != 0) ||
134 (debug_malloc_size_lo != -1 && size < debug_malloc_size_lo) ||
135 (debug_malloc_size_hi != -1 && size > debug_malloc_size_hi))
136 return (0);
138 /* XXX - fix later */
139 if (size > PAGE_SIZE)
140 return (0);
142 s = splvm();
143 if (debug_malloc_chunks_on_freelist < MALLOC_DEBUG_CHUNKS)
144 debug_malloc_allocate_free(wait);
146 md = TAILQ_FIRST(&debug_malloc_freelist);
147 if (md == NULL) {
148 splx(s);
149 return (0);
151 TAILQ_REMOVE(&debug_malloc_freelist, md, md_list);
152 debug_malloc_chunks_on_freelist--;
154 TAILQ_INSERT_HEAD(&debug_malloc_usedlist, md, md_list);
155 debug_malloc_allocs++;
156 splx(s);
158 pmap_kenter_pa(md->md_va, md->md_pa,
159 VM_PROT_READ|VM_PROT_WRITE|PMAP_KMPAGE, 0);
160 pmap_update(pmap_kernel());
162 md->md_size = size;
163 md->md_type = type;
166 * Align the returned addr so that it ends where the first page
167 * ends. roundup to get decent alignment.
169 *addr = (void *)(md->md_va + PAGE_SIZE - roundup(size, sizeof(long)));
170 if (*addr != NULL && (flags & M_ZERO))
171 memset(*addr, 0, size);
172 return (1);
176 debug_free(void *addr, struct malloc_type *type)
178 struct debug_malloc_entry *md;
179 vaddr_t va;
180 int s;
182 if (type != debug_malloc_type && debug_malloc_type != 0)
183 return (0);
186 * trunc_page to get the address of the page.
188 va = trunc_page((vaddr_t)addr);
190 s = splvm();
191 TAILQ_FOREACH(md, &debug_malloc_usedlist, md_list)
192 if (md->md_va == va)
193 break;
196 * If we are not responsible for this entry, let the normal free
197 * handle it
199 if (md == NULL) {
201 * sanity check. Check for multiple frees.
203 TAILQ_FOREACH(md, &debug_malloc_freelist, md_list)
204 if (md->md_va == va)
205 panic("debug_free: already free");
206 splx(s);
207 return (0);
210 debug_malloc_frees++;
211 TAILQ_REMOVE(&debug_malloc_usedlist, md, md_list);
213 TAILQ_INSERT_TAIL(&debug_malloc_freelist, md, md_list);
214 debug_malloc_chunks_on_freelist++;
216 * unmap the page.
218 pmap_kremove(md->md_va, PAGE_SIZE);
219 pmap_update(pmap_kernel());
220 splx(s);
222 return (1);
226 * Add one chunk to the freelist.
228 * called at splvm.
230 void
231 debug_malloc_allocate_free(int wait)
233 vaddr_t va, offset;
234 struct vm_page *pg;
235 struct debug_malloc_entry *md;
237 md = pool_get(&debug_malloc_pool, wait ? PR_WAITOK : PR_NOWAIT);
238 if (md == NULL)
239 return;
241 va = uvm_km_alloc(kmem_map, PAGE_SIZE * 2, 0,
242 UVM_KMF_VAONLY | (wait ? UVM_KMF_NOWAIT : 0));
243 if (va == 0) {
244 pool_put(&debug_malloc_pool, md);
245 return;
248 offset = va - vm_map_min(kernel_map);
249 for (;;) {
250 pg = uvm_pagealloc(NULL, offset, NULL, 0);
251 if (pg) {
252 pg->flags &= ~PG_BUSY; /* new page */
253 UVM_PAGE_OWN(pg, NULL);
256 if (pg)
257 break;
259 if (wait == 0) {
260 uvm_km_free(kmem_map, va, va + PAGE_SIZE * 2,
261 UVM_KMF_VAONLY);
262 pool_put(&debug_malloc_pool, md);
263 return;
265 uvm_wait("debug_malloc");
268 md->md_va = va;
269 md->md_pa = VM_PAGE_TO_PHYS(pg);
271 debug_malloc_pages++;
272 TAILQ_INSERT_HEAD(&debug_malloc_freelist, md, md_list);
273 debug_malloc_chunks_on_freelist++;
276 void
277 debug_malloc_print(void)
280 debug_malloc_printit(printf, 0);
283 void
284 debug_malloc_printit(void (*pr)(const char *, ...), vaddr_t addr)
286 struct debug_malloc_entry *md;
288 if (addr) {
289 TAILQ_FOREACH(md, &debug_malloc_freelist, md_list) {
290 if (addr >= md->md_va &&
291 addr < md->md_va + 2 * PAGE_SIZE) {
292 (*pr)("Memory at address 0x%x is in a freed "
293 "area. type %s, size: %d\n ",
294 addr, md->md_type->ks_shortdesc,
295 md->md_size);
296 return;
299 TAILQ_FOREACH(md, &debug_malloc_usedlist, md_list) {
300 if (addr >= md->md_va + PAGE_SIZE &&
301 addr < md->md_va + 2 * PAGE_SIZE) {
302 (*pr)("Memory at address 0x%x is just outside "
303 "an allocated area. type %s, size: %d\n",
304 addr, md->md_type->ks_shortdesc,
305 md->md_size);
306 return;
309 (*pr)("Memory at address 0x%x is outside debugged malloc.\n");
310 return;
313 (*pr)("allocs: %d\n", debug_malloc_allocs);
314 (*pr)("frees: %d\n", debug_malloc_frees);
315 (*pr)("pages used: %d\n", debug_malloc_pages);
316 (*pr)("chunks on freelist: %d\n", debug_malloc_chunks_on_freelist);
318 (*pr)("\taddr:\tsize:\n");
319 (*pr)("free chunks:\n");
320 TAILQ_FOREACH(md, &debug_malloc_freelist, md_list)
321 (*pr)("\t0x%x\t0x%x\t%s\n", md->md_va, md->md_size,
322 md->md_type->ks_shortdesc);
323 (*pr)("used chunks:\n");
324 TAILQ_FOREACH(md, &debug_malloc_usedlist, md_list)
325 (*pr)("\t0x%x\t0x%x\t%s\n", md->md_va, md->md_size,
326 md->md_type->ks_shortdesc);