4 * Copyright (c) 2009 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Copyright (c)2006 YAMAMOTO Takashi,
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * allocator of kernel wired memory.
62 * - worth to have "intrsafe" version? maybe..
65 #include <sys/cdefs.h>
66 __KERNEL_RCSID(0, "$NetBSD$");
68 #include <sys/param.h>
69 #include <sys/callback.h>
72 #include <sys/debug.h>
73 #include <sys/lockdebug.h>
76 #include <uvm/uvm_extern.h>
77 #include <uvm/uvm_map.h>
78 #include <uvm/uvm_kmguard.h>
80 #include <lib/libkern/libkern.h>
82 #define KMEM_QUANTUM_SIZE (ALIGNBYTES + 1)
83 #define KMEM_QCACHE_MAX (KMEM_QUANTUM_SIZE * 32)
84 #define KMEM_CACHE_COUNT 16
86 typedef struct kmem_cache
{
87 pool_cache_t kc_cache
;
88 struct pool_allocator kc_pa
;
92 static vmem_t
*kmem_arena
;
93 static struct callback_entry kmem_kva_reclaim_entry
;
95 static kmem_cache_t kmem_cache
[KMEM_CACHE_COUNT
+ 1];
96 static size_t kmem_cache_max
;
97 static size_t kmem_cache_min
;
98 static size_t kmem_cache_mask
;
99 static int kmem_cache_shift
;
102 int kmem_guard_depth
;
103 size_t kmem_guard_size
;
104 static struct uvm_kmguard kmem_guard
;
105 static void *kmem_freecheck
;
110 #endif /* defined(DEBUG) */
112 #if defined(KMEM_POISON)
113 static void kmem_poison_fill(void *, size_t);
114 static void kmem_poison_check(void *, size_t);
115 #else /* defined(KMEM_POISON) */
116 #define kmem_poison_fill(p, sz) /* nothing */
117 #define kmem_poison_check(p, sz) /* nothing */
118 #endif /* defined(KMEM_POISON) */
120 #if defined(KMEM_REDZONE)
121 #define REDZONE_SIZE 1
122 #else /* defined(KMEM_REDZONE) */
123 #define REDZONE_SIZE 0
124 #endif /* defined(KMEM_REDZONE) */
126 #if defined(KMEM_SIZE)
127 #define SIZE_SIZE (max(KMEM_QUANTUM_SIZE, sizeof(size_t)))
128 static void kmem_size_set(void *, size_t);
129 static void kmem_size_check(const void *, size_t);
132 #define kmem_size_set(p, sz) /* nothing */
133 #define kmem_size_check(p, sz) /* nothing */
136 static vmem_addr_t
kmem_backend_alloc(vmem_t
*, vmem_size_t
, vmem_size_t
*,
138 static void kmem_backend_free(vmem_t
*, vmem_addr_t
, vmem_size_t
);
139 static int kmem_kva_reclaim_callback(struct callback_entry
*, void *, void *);
141 static inline vm_flag_t
142 kmf_to_vmf(km_flag_t kmflags
)
146 KASSERT((kmflags
& (KM_SLEEP
|KM_NOSLEEP
)) != 0);
147 KASSERT((~kmflags
& (KM_SLEEP
|KM_NOSLEEP
)) != 0);
150 if ((kmflags
& KM_SLEEP
) != 0) {
153 if ((kmflags
& KM_NOSLEEP
) != 0) {
154 vmflags
|= VM_NOSLEEP
;
161 kmem_poolpage_alloc(struct pool
*pool
, int prflags
)
164 CTASSERT(KM_SLEEP
== PR_WAITOK
);
165 CTASSERT(KM_NOSLEEP
== PR_NOWAIT
);
167 return (void *)vmem_alloc(kmem_arena
, pool
->pr_alloc
->pa_pagesz
,
168 kmf_to_vmf(prflags
) | VM_INSTANTFIT
);
173 kmem_poolpage_free(struct pool
*pool
, void *addr
)
176 vmem_free(kmem_arena
, (vmem_addr_t
)addr
, pool
->pr_alloc
->pa_pagesz
);
182 * kmem_alloc: allocate wired memory.
184 * => must not be called from interrupt context.
188 kmem_alloc(size_t size
, km_flag_t kmflags
)
193 KASSERT(!cpu_intr_p());
194 KASSERT(!cpu_softintr_p());
198 if (size
<= kmem_guard_size
) {
199 return uvm_kmguard_alloc(&kmem_guard
, size
,
200 (kmflags
& KM_SLEEP
) != 0);
204 size
+= REDZONE_SIZE
+ SIZE_SIZE
;
205 if (size
>= kmem_cache_min
&& size
<= kmem_cache_max
) {
206 kc
= &kmem_cache
[(size
+ kmem_cache_mask
) >> kmem_cache_shift
];
207 KASSERT(size
<= kc
->kc_pa
.pa_pagesz
);
208 CTASSERT(KM_SLEEP
== PR_WAITOK
);
209 CTASSERT(KM_NOSLEEP
== PR_NOWAIT
);
210 kmflags
&= (KM_SLEEP
| KM_NOSLEEP
);
211 p
= pool_cache_get(kc
->kc_cache
, kmflags
);
213 p
= (void *)vmem_alloc(kmem_arena
, size
,
214 kmf_to_vmf(kmflags
) | VM_INSTANTFIT
);
216 if (__predict_true(p
!= NULL
)) {
217 kmem_poison_check(p
, kmem_roundup_size(size
));
218 FREECHECK_OUT(&kmem_freecheck
, p
);
219 kmem_size_set(p
, size
);
220 p
= (uint8_t *)p
+ SIZE_SIZE
;
226 * kmem_zalloc: allocate wired memory.
228 * => must not be called from interrupt context.
232 kmem_zalloc(size_t size
, km_flag_t kmflags
)
236 p
= kmem_alloc(size
, kmflags
);
244 * kmem_free: free wired memory allocated by kmem_alloc.
246 * => must not be called from interrupt context.
250 kmem_free(void *p
, size_t size
)
254 KASSERT(!cpu_intr_p());
255 KASSERT(!cpu_softintr_p());
260 if (size
<= kmem_guard_size
) {
261 uvm_kmguard_free(&kmem_guard
, size
, p
);
266 p
= (uint8_t *)p
- SIZE_SIZE
;
267 kmem_size_check(p
, size
+ REDZONE_SIZE
);
268 FREECHECK_IN(&kmem_freecheck
, p
);
269 LOCKDEBUG_MEM_CHECK(p
, size
);
270 kmem_poison_check((char *)p
+ size
,
271 kmem_roundup_size(size
+ REDZONE_SIZE
) - size
);
272 kmem_poison_fill(p
, size
);
273 size
+= REDZONE_SIZE
;
274 if (size
>= kmem_cache_min
&& size
<= kmem_cache_max
) {
275 kc
= &kmem_cache
[(size
+ kmem_cache_mask
) >> kmem_cache_shift
];
276 KASSERT(size
<= kc
->kc_pa
.pa_pagesz
);
277 pool_cache_put(kc
->kc_cache
, p
);
279 vmem_free(kmem_arena
, (vmem_addr_t
)p
, size
);
292 uvm_kmguard_init(&kmem_guard
, &kmem_guard_depth
, &kmem_guard_size
,
296 kmem_arena
= vmem_create("kmem", 0, 0, KMEM_QUANTUM_SIZE
,
297 kmem_backend_alloc
, kmem_backend_free
, NULL
, KMEM_QCACHE_MAX
,
299 callback_register(&vm_map_to_kernel(kernel_map
)->vmk_reclaim_callback
,
300 &kmem_kva_reclaim_entry
, kmem_arena
, kmem_kva_reclaim_callback
);
303 * kmem caches start at twice the size of the largest vmem qcache
304 * and end at PAGE_SIZE or earlier. assert that KMEM_QCACHE_MAX
307 KASSERT(ffs(KMEM_QCACHE_MAX
) != 0);
308 KASSERT(KMEM_QCACHE_MAX
- (1 << (ffs(KMEM_QCACHE_MAX
) - 1)) == 0);
309 kmem_cache_shift
= ffs(KMEM_QCACHE_MAX
);
310 kmem_cache_min
= 1 << kmem_cache_shift
;
311 kmem_cache_mask
= kmem_cache_min
- 1;
312 for (i
= 1; i
<= KMEM_CACHE_COUNT
; i
++) {
313 sz
= i
<< kmem_cache_shift
;
314 if (sz
> PAGE_SIZE
) {
319 kc
->kc_pa
.pa_pagesz
= sz
;
320 kc
->kc_pa
.pa_alloc
= kmem_poolpage_alloc
;
321 kc
->kc_pa
.pa_free
= kmem_poolpage_free
;
322 sprintf(kc
->kc_name
, "kmem-%zu", sz
);
323 kc
->kc_cache
= pool_cache_init(sz
,
324 KMEM_QUANTUM_SIZE
, 0, PR_NOALIGN
| PR_NOTOUCH
,
325 kc
->kc_name
, &kc
->kc_pa
, IPL_NONE
,
327 KASSERT(kc
->kc_cache
!= NULL
);
332 kmem_roundup_size(size_t size
)
335 return vmem_roundup_size(kmem_arena
, size
);
341 kmem_backend_alloc(vmem_t
*dummy
, vmem_size_t size
, vmem_size_t
*resultsize
,
347 KASSERT(dummy
== NULL
);
349 KASSERT((vmflags
& (VM_SLEEP
|VM_NOSLEEP
)) != 0);
350 KASSERT((~vmflags
& (VM_SLEEP
|VM_NOSLEEP
)) != 0);
352 if ((vmflags
& VM_NOSLEEP
) != 0) {
353 uflags
= UVM_KMF_TRYLOCK
| UVM_KMF_NOWAIT
;
355 uflags
= UVM_KMF_WAITVA
;
357 *resultsize
= size
= round_page(size
);
358 va
= uvm_km_alloc(kernel_map
, size
, 0,
359 uflags
| UVM_KMF_WIRED
| UVM_KMF_CANFAIL
);
361 kmem_poison_fill((void *)va
, size
);
363 return (vmem_addr_t
)va
;
367 kmem_backend_free(vmem_t
*dummy
, vmem_addr_t addr
, vmem_size_t size
)
370 KASSERT(dummy
== NULL
);
373 KASSERT(size
== round_page(size
));
375 kmem_poison_check((void *)addr
, size
);
376 uvm_km_free(kernel_map
, (vaddr_t
)addr
, size
, UVM_KMF_WIRED
);
380 kmem_kva_reclaim_callback(struct callback_entry
*ce
, void *obj
, void *arg
)
385 return CALLBACK_CHAIN_CONTINUE
;
390 #if defined(KMEM_POISON)
393 #define PRIME 0x9e37fffffffc0001UL
394 #else /* defined(_LP64) */
395 #define PRIME 0x9e3779b1
396 #endif /* defined(_LP64) */
398 static inline uint8_t
399 kmem_poison_pattern(const void *p
)
402 return (uint8_t)((((uintptr_t)p
) * PRIME
)
403 >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT
);
407 kmem_poison_fill(void *p
, size_t sz
)
415 *cp
= kmem_poison_pattern(cp
);
421 kmem_poison_check(void *p
, size_t sz
)
429 const uint8_t expected
= kmem_poison_pattern(cp
);
431 if (*cp
!= expected
) {
432 panic("%s: %p: 0x%02x != 0x%02x\n",
433 __func__
, cp
, *cp
, expected
);
439 #endif /* defined(KMEM_POISON) */
441 #if defined(KMEM_SIZE)
443 kmem_size_set(void *p
, size_t sz
)
446 memcpy(p
, &sz
, sizeof(sz
));
450 kmem_size_check(const void *p
, size_t sz
)
454 memcpy(&psz
, p
, sizeof(psz
));
456 panic("kmem_free(%p, %zu) != allocated size %zu",
457 (const uint8_t *)p
+ SIZE_SIZE
, sz
- SIZE_SIZE
, psz
);
460 #endif /* defined(KMEM_SIZE) */