1 /* $NetBSD: memalloc.c,v 1.2 2009/11/27 13:45:15 pooka Exp $ */
4 * Copyright (c) 2009 Antti Kantee. All Rights Reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD");
31 #include <sys/param.h>
33 #include <sys/malloc.h>
37 #include <rump/rumpuser.h>
40 * Allocator "implementations" which relegate tasks to the host
55 malloc_type_attach(struct malloc_type
*type
)
62 malloc_type_detach(struct malloc_type
*type
)
69 kern_malloc(unsigned long size
, struct malloc_type
*type
, int flags
)
73 rv
= rumpuser_malloc(size
, (flags
& (M_CANFAIL
| M_NOWAIT
)) != 0);
74 if (rv
&& flags
& M_ZERO
)
81 kern_realloc(void *ptr
, unsigned long size
, struct malloc_type
*type
, int flags
)
84 return rumpuser_realloc(ptr
, size
, (flags
& (M_CANFAIL
|M_NOWAIT
)) != 0);
88 kern_free(void *ptr
, struct malloc_type
*type
)
98 #ifdef RUMP_USE_UNREAL_ALLOCATORS
107 kmem_alloc(size_t size
, km_flag_t kmflag
)
110 return rumpuser_malloc(size
, kmflag
== KM_NOSLEEP
);
114 kmem_zalloc(size_t size
, km_flag_t kmflag
)
118 rv
= kmem_alloc(size
, kmflag
);
126 kmem_free(void *p
, size_t size
)
136 struct pool_cache pnbuf_cache
;
137 struct pool pnbuf_pool
;
138 struct pool_allocator pool_allocator_nointr
;
141 pool_subsystem_init()
148 pool_init(struct pool
*pp
, size_t size
, u_int align
, u_int align_offset
,
149 int flags
, const char *wchan
, struct pool_allocator
*palloc
, int ipl
)
156 pool_destroy(struct pool
*pp
)
163 pool_cache_init(size_t size
, u_int align
, u_int align_offset
, u_int flags
,
164 const char *wchan
, struct pool_allocator
*palloc
, int ipl
,
165 int (*ctor
)(void *, void *, int), void (*dtor
)(void *, void *),
170 pc
= rumpuser_malloc(sizeof(*pc
), 0);
171 pool_cache_bootstrap(pc
, size
, align
, align_offset
, flags
, wchan
,
172 palloc
, ipl
, ctor
, dtor
, arg
);
177 pool_cache_bootstrap(pool_cache_t pc
, size_t size
, u_int align
,
178 u_int align_offset
, u_int flags
, const char *wchan
,
179 struct pool_allocator
*palloc
, int ipl
,
180 int (*ctor
)(void *, void *, int), void (*dtor
)(void *, void *),
184 pool_init(&pc
->pc_pool
, size
, align
, align_offset
, flags
,
192 pool_cache_destroy(pool_cache_t pc
)
195 pool_destroy(&pc
->pc_pool
);
200 pool_cache_get_paddr(pool_cache_t pc
, int flags
, paddr_t
*pap
)
204 item
= pool_get(&pc
->pc_pool
, 0);
206 pc
->pc_ctor(pc
->pc_arg
, item
, flags
);
208 *pap
= POOL_PADDR_INVALID
;
214 pool_cache_put_paddr(pool_cache_t pc
, void *object
, paddr_t pa
)
218 pc
->pc_dtor(pc
->pc_arg
, object
);
219 pool_put(&pc
->pc_pool
, object
);
223 pool_cache_reclaim(pool_cache_t pc
)
230 pool_cache_cpu_init(struct cpu_info
*ci
)
237 pool_get(struct pool
*pp
, int flags
)
242 if (pp
->pr_size
== 0)
243 panic("%s: pool unit size 0. not initialized?", __func__
);
246 rv
= rumpuser_malloc(pp
->pr_size
, 1);
247 if (rv
== NULL
&& (flags
& PR_WAITOK
&& (flags
& PR_LIMITFAIL
) == 0))
248 panic("%s: out of memory and PR_WAITOK", __func__
);
254 pool_put(struct pool
*pp
, void *item
)
261 pool_sethiwat(struct pool
*pp
, int n
)
268 pool_setlowat(struct pool
*pp
, int n
)
275 pool_cache_sethardlimit(pool_cache_t pc
, int n
, const char *warnmess
,
283 pool_cache_setlowat(pool_cache_t pc
, int n
)
290 pool_cache_set_drain_hook(pool_cache_t pc
, void (*fn
)(void *, int), void *arg
)
294 pc
->pc_pool
.pr_drain_hook
= fn
;
295 pc
->pc_pool
.pr_drain_hook_arg
= arg
;
299 pool_prime(struct pool
*pp
, int nitems
)
305 /* XXX: for tmpfs, shouldn't be here */
306 void *pool_page_alloc_nointr(struct pool
*, int);
307 void pool_page_free_nointr(struct pool
*, void *);
309 pool_page_alloc_nointr(struct pool
*pp
, int flags
)
312 return pool_get(pp
, flags
);
316 pool_page_free_nointr(struct pool
*pp
, void *item
)
319 return pool_put(pp
, item
);
328 #endif /* RUMP_USE_UNREAL_ALLOCATORS */