Expand PMF_FN_* macros.
[netbsd-mini2440.git] / sys / rump / librump / rumpkern / memalloc.c
blob3a6030fb180f022b3d0c967e9ca37e86404de175
1 /* $NetBSD: memalloc.c,v 1.2 2009/11/27 13:45:15 pooka Exp $ */
3 /*
4 * Copyright (c) 2009 Antti Kantee. All Rights Reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD");
31 #include <sys/param.h>
32 #include <sys/kmem.h>
33 #include <sys/malloc.h>
34 #include <sys/pool.h>
35 #include <sys/vmem.h>
37 #include <rump/rumpuser.h>
40 * Allocator "implementations" which relegate tasks to the host
41 * libc malloc.
43 * Supported:
44 * + malloc
45 * + kmem
46 * + pool
47 * + pool_cache
51 * malloc
54 void
55 malloc_type_attach(struct malloc_type *type)
58 return;
61 void
62 malloc_type_detach(struct malloc_type *type)
65 return;
68 void *
69 kern_malloc(unsigned long size, struct malloc_type *type, int flags)
71 void *rv;
73 rv = rumpuser_malloc(size, (flags & (M_CANFAIL | M_NOWAIT)) != 0);
74 if (rv && flags & M_ZERO)
75 memset(rv, 0, size);
77 return rv;
80 void *
81 kern_realloc(void *ptr, unsigned long size, struct malloc_type *type, int flags)
84 return rumpuser_realloc(ptr, size, (flags & (M_CANFAIL|M_NOWAIT)) != 0);
87 void
88 kern_free(void *ptr, struct malloc_type *type)
91 rumpuser_free(ptr);
95 * Kmem
98 #ifdef RUMP_USE_UNREAL_ALLOCATORS
99 void
100 kmem_init()
103 /* nothing to do */
106 void *
107 kmem_alloc(size_t size, km_flag_t kmflag)
110 return rumpuser_malloc(size, kmflag == KM_NOSLEEP);
113 void *
114 kmem_zalloc(size_t size, km_flag_t kmflag)
116 void *rv;
118 rv = kmem_alloc(size, kmflag);
119 if (rv)
120 memset(rv, 0, size);
122 return rv;
125 void
126 kmem_free(void *p, size_t size)
129 rumpuser_free(p);
133 * pool & pool_cache
136 struct pool_cache pnbuf_cache;
137 struct pool pnbuf_pool;
138 struct pool_allocator pool_allocator_nointr;
140 void
141 pool_subsystem_init()
144 /* nada */
147 void
148 pool_init(struct pool *pp, size_t size, u_int align, u_int align_offset,
149 int flags, const char *wchan, struct pool_allocator *palloc, int ipl)
152 pp->pr_size = size;
155 void
156 pool_destroy(struct pool *pp)
159 return;
162 pool_cache_t
163 pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags,
164 const char *wchan, struct pool_allocator *palloc, int ipl,
165 int (*ctor)(void *, void *, int), void (*dtor)(void *, void *),
166 void *arg)
168 pool_cache_t pc;
170 pc = rumpuser_malloc(sizeof(*pc), 0);
171 pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan,
172 palloc, ipl, ctor, dtor, arg);
173 return pc;
176 void
177 pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align,
178 u_int align_offset, u_int flags, const char *wchan,
179 struct pool_allocator *palloc, int ipl,
180 int (*ctor)(void *, void *, int), void (*dtor)(void *, void *),
181 void *arg)
184 pool_init(&pc->pc_pool, size, align, align_offset, flags,
185 wchan, palloc, ipl);
186 pc->pc_ctor = ctor;
187 pc->pc_dtor = dtor;
188 pc->pc_arg = arg;
191 void
192 pool_cache_destroy(pool_cache_t pc)
195 pool_destroy(&pc->pc_pool);
196 rumpuser_free(pc);
199 void *
200 pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap)
202 void *item;
204 item = pool_get(&pc->pc_pool, 0);
205 if (pc->pc_ctor)
206 pc->pc_ctor(pc->pc_arg, item, flags);
207 if (pap)
208 *pap = POOL_PADDR_INVALID;
210 return item;
213 void
214 pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa)
217 if (pc->pc_dtor)
218 pc->pc_dtor(pc->pc_arg, object);
219 pool_put(&pc->pc_pool, object);
222 bool
223 pool_cache_reclaim(pool_cache_t pc)
226 return true;
229 void
230 pool_cache_cpu_init(struct cpu_info *ci)
233 return;
236 void *
237 pool_get(struct pool *pp, int flags)
239 void *rv;
241 #ifdef DIAGNOSTIC
242 if (pp->pr_size == 0)
243 panic("%s: pool unit size 0. not initialized?", __func__);
244 #endif
246 rv = rumpuser_malloc(pp->pr_size, 1);
247 if (rv == NULL && (flags & PR_WAITOK && (flags & PR_LIMITFAIL) == 0))
248 panic("%s: out of memory and PR_WAITOK", __func__);
250 return rv;
253 void
254 pool_put(struct pool *pp, void *item)
257 rumpuser_free(item);
260 void
261 pool_sethiwat(struct pool *pp, int n)
264 return;
267 void
268 pool_setlowat(struct pool *pp, int n)
271 return;
274 void
275 pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess,
276 int ratecap)
279 return;
282 void
283 pool_cache_setlowat(pool_cache_t pc, int n)
286 return;
289 void
290 pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)
293 /* XXX: notused */
294 pc->pc_pool.pr_drain_hook = fn;
295 pc->pc_pool.pr_drain_hook_arg = arg;
299 pool_prime(struct pool *pp, int nitems)
302 return 0;
305 /* XXX: for tmpfs, shouldn't be here */
306 void *pool_page_alloc_nointr(struct pool *, int);
307 void pool_page_free_nointr(struct pool *, void *);
308 void *
309 pool_page_alloc_nointr(struct pool *pp, int flags)
312 return pool_get(pp, flags);
315 void
316 pool_page_free_nointr(struct pool *pp, void *item)
319 return pool_put(pp, item);
322 void
323 vmem_rehash_start()
326 return;
328 #endif /* RUMP_USE_UNREAL_ALLOCATORS */