2 * Copyright (c) 2006-2007 Pawel Jakub Dawidek <pjd@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/types.h>
28 #include <sys/param.h>
29 #include <sys/byteorder.h>
30 #include <sys/kernel.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
34 #include <sys/kmem_cache.h>
35 #include <sys/debug.h>
36 #include <sys/mutex.h>
37 #include <sys/vmmeter.h>
40 #include <vm/vm_page.h>
41 #include <vm/vm_object.h>
42 #include <vm/vm_kern.h>
43 #include <vm/vm_map.h>
46 #include <sys/queue.h>
47 #include <sys/stack.h>
51 MALLOC_DEFINE(M_SOLARIS
, "solaris", "Solaris");
53 #define malloc(size, type, flags) malloc(size)
54 #define free(addr, type) free(addr)
60 LIST_ENTRY(kmem_item
) next
;
62 static LIST_HEAD(, kmem_item
) kmem_items
;
63 static struct mtx kmem_items_mtx
;
64 MTX_SYSINIT(kmem_items_mtx
, &kmem_items_mtx
, "kmem_items", MTX_DEF
);
65 #endif /* KMEM_DEBUG */
70 zfs_kmem_alloc(size_t size
, int kmflags
)
76 size
+= sizeof (struct kmem_item
);
78 p
= malloc(MAX(size
, 16), M_SOLARIS
, kmflags
);
80 if (kmflags
& KM_SLEEP
)
86 p
= (uint8_t *)p
+ sizeof (struct kmem_item
);
87 stack_save(&i
->stack
);
88 mtx_lock(&kmem_items_mtx
);
89 LIST_INSERT_HEAD(&kmem_items
, i
, next
);
90 mtx_unlock(&kmem_items_mtx
);
97 zfs_kmem_free(void *buf
, size_t size __unused
)
101 printf("%s: attempt to free NULL\n", __func__
);
106 buf
= (uint8_t *)buf
- sizeof (struct kmem_item
);
107 mtx_lock(&kmem_items_mtx
);
108 LIST_FOREACH(i
, &kmem_items
, next
) {
112 ASSERT3P(i
, !=, NULL
);
113 LIST_REMOVE(i
, next
);
114 mtx_unlock(&kmem_items_mtx
);
115 memset(buf
, 0xDC, MAX(size
, 16));
117 free(buf
, M_SOLARIS
);
120 static uint64_t kmem_size_val
;
123 kmem_size_init(void *unused __unused
)
126 kmem_size_val
= (uint64_t)vm_cnt
.v_page_count
* PAGE_SIZE
;
127 if (kmem_size_val
> vm_kmem_size
)
128 kmem_size_val
= vm_kmem_size
;
130 SYSINIT(kmem_size_init
, SI_SUB_KMEM
, SI_ORDER_ANY
, kmem_size_init
, NULL
);
136 return (kmem_size_val
);
140 kmem_std_constructor(void *mem
, int size __unused
, void *private, int flags
)
142 struct kmem_cache
*cache
= private;
144 return (cache
->kc_constructor(mem
, cache
->kc_private
, flags
));
148 kmem_std_destructor(void *mem
, int size __unused
, void *private)
150 struct kmem_cache
*cache
= private;
152 cache
->kc_destructor(mem
, cache
->kc_private
);
156 kmem_cache_create(const char *name
, size_t bufsize
, size_t align
,
157 int (*constructor
)(void *, void *, int), void (*destructor
)(void *, void *),
158 void (*reclaim
)(void *) __unused
, void *private, vmem_t
*vmp
, int cflags
)
162 ASSERT3P(vmp
, ==, NULL
);
164 cache
= kmem_alloc(sizeof (*cache
), KM_SLEEP
);
165 strlcpy(cache
->kc_name
, name
, sizeof (cache
->kc_name
));
166 cache
->kc_constructor
= constructor
;
167 cache
->kc_destructor
= destructor
;
168 cache
->kc_private
= private;
169 #if defined(_KERNEL) && !defined(KMEM_DEBUG)
170 cache
->kc_zone
= uma_zcreate(cache
->kc_name
, bufsize
,
171 constructor
!= NULL
? kmem_std_constructor
: NULL
,
172 destructor
!= NULL
? kmem_std_destructor
: NULL
,
173 NULL
, NULL
, align
> 0 ? align
- 1 : 0, cflags
);
175 cache
->kc_size
= bufsize
;
182 kmem_cache_destroy(kmem_cache_t
*cache
)
184 #if defined(_KERNEL) && !defined(KMEM_DEBUG)
185 uma_zdestroy(cache
->kc_zone
);
187 kmem_free(cache
, sizeof (*cache
));
191 kmem_cache_alloc(kmem_cache_t
*cache
, int flags
)
193 #if defined(_KERNEL) && !defined(KMEM_DEBUG)
194 return (uma_zalloc_arg(cache
->kc_zone
, cache
, flags
));
198 p
= kmem_alloc(cache
->kc_size
, flags
);
199 if (p
!= NULL
&& cache
->kc_constructor
!= NULL
)
200 kmem_std_constructor(p
, cache
->kc_size
, cache
, flags
);
206 kmem_cache_free(kmem_cache_t
*cache
, void *buf
)
208 #if defined(_KERNEL) && !defined(KMEM_DEBUG)
209 uma_zfree_arg(cache
->kc_zone
, buf
, cache
);
211 if (cache
->kc_destructor
!= NULL
)
212 kmem_std_destructor(buf
, cache
->kc_size
, cache
);
213 kmem_free(buf
, cache
->kc_size
);
218 * Allow our caller to determine if there are running reaps.
220 * This call is very conservative and may return B_TRUE even when
221 * reaping activity isn't active. If it returns B_FALSE, then reaping
222 * activity is definitely inactive.
225 kmem_cache_reap_active(void)
232 * Reap (almost) everything soon.
234 * Note: this does not wait for the reap-tasks to complete. Caller
235 * should use kmem_cache_reap_active() (above) and/or moderation to
236 * avoid scheduling too many reap-tasks.
240 kmem_cache_reap_soon(kmem_cache_t
*cache
)
243 uma_zone_reclaim(cache
->kc_zone
, UMA_RECLAIM_DRAIN
);
250 uma_reclaim(UMA_RECLAIM_TRIM
);
254 kmem_cache_reap_soon(kmem_cache_t
*cache __unused
)
271 calloc(size_t n
, size_t s
)
273 return (kmem_zalloc(n
* s
, KM_NOSLEEP
));
277 kmem_vasprintf(const char *fmt
, va_list adx
)
283 msg
= kmem_alloc(vsnprintf(NULL
, 0, fmt
, adx
) + 1, KM_SLEEP
);
284 (void) vsprintf(msg
, fmt
, adx2
);
291 #include <vm/uma_int.h>
293 #error "KMEM_DEBUG not currently supported"
297 spl_kmem_cache_inuse(kmem_cache_t
*cache
)
299 return (uma_zone_get_cur(cache
->kc_zone
));
303 spl_kmem_cache_entry_size(kmem_cache_t
*cache
)
305 return (cache
->kc_zone
->uz_size
);
309 * Register a move callback for cache defragmentation.
310 * XXX: Unimplemented but harmless to stub out for now.
313 spl_kmem_cache_set_move(kmem_cache_t
*skc
,
314 kmem_cbrc_t (move
)(void *, void *, size_t, void *))
316 ASSERT3P(move
, !=, NULL
);
320 void kmem_show(void *);
322 kmem_show(void *dummy __unused
)
326 mtx_lock(&kmem_items_mtx
);
327 if (LIST_EMPTY(&kmem_items
))
328 printf("KMEM_DEBUG: No leaked elements.\n");
330 printf("KMEM_DEBUG: Leaked elements:\n\n");
331 LIST_FOREACH(i
, &kmem_items
, next
) {
332 printf("address=%p\n", i
);
333 stack_print_ddb(&i
->stack
);
337 mtx_unlock(&kmem_items_mtx
);
340 SYSUNINIT(sol_kmem
, SI_SUB_CPU
, SI_ORDER_FIRST
, kmem_show
, NULL
);
341 #endif /* KMEM_DEBUG */