fkvm_get_regs: missing break in switch statement
[freebsd-src/fkvm-freebsd.git] / sys / kern / subr_kobj.c
blob4e2f096499b834b8e5f6df0fefe8d07ce280e045
1 /*-
2 * Copyright (c) 2000,2003 Doug Rabson
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/kernel.h>
32 #include <sys/kobj.h>
33 #include <sys/lock.h>
34 #include <sys/malloc.h>
35 #include <sys/mutex.h>
36 #include <sys/sysctl.h>
37 #ifndef TEST
38 #include <sys/systm.h>
39 #endif
41 #ifdef TEST
42 #include "usertest.h"
43 #endif
45 static MALLOC_DEFINE(M_KOBJ, "kobj", "Kernel object structures");
47 #ifdef KOBJ_STATS
49 u_int kobj_lookup_hits;
50 u_int kobj_lookup_misses;
52 SYSCTL_UINT(_kern, OID_AUTO, kobj_hits, CTLFLAG_RD,
53 &kobj_lookup_hits, 0, "");
54 SYSCTL_UINT(_kern, OID_AUTO, kobj_misses, CTLFLAG_RD,
55 &kobj_lookup_misses, 0, "");
57 #endif
59 static struct mtx kobj_mtx;
60 static int kobj_mutex_inited;
61 static int kobj_next_id = 1;
63 SYSCTL_UINT(_kern, OID_AUTO, kobj_methodcount, CTLFLAG_RD,
64 &kobj_next_id, 0, "");
66 static void
67 kobj_init_mutex(void *arg)
69 if (!kobj_mutex_inited) {
70 mtx_init(&kobj_mtx, "kobj", NULL, MTX_DEF);
71 kobj_mutex_inited = 1;
75 SYSINIT(kobj, SI_SUB_LOCK, SI_ORDER_ANY, kobj_init_mutex, NULL);
77 void
78 kobj_machdep_init(void)
80 kobj_init_mutex(NULL);
84 * This method structure is used to initialise new caches. Since the
85 * desc pointer is NULL, it is guaranteed never to match any read
86 * descriptors.
88 static struct kobj_method null_method = {
89 0, 0,
92 int
93 kobj_error_method(void)
96 return ENXIO;
99 static void
100 kobj_register_method(struct kobjop_desc *desc)
103 mtx_assert(&kobj_mtx, MA_OWNED);
104 if (desc->id == 0) {
105 desc->id = kobj_next_id++;
109 static void
110 kobj_unregister_method(struct kobjop_desc *desc)
114 static void
115 kobj_class_compile_common(kobj_class_t cls, kobj_ops_t ops)
117 kobj_method_t *m;
118 int i;
120 mtx_assert(&kobj_mtx, MA_OWNED);
123 * Don't do anything if we are already compiled.
125 if (cls->ops)
126 return;
129 * First register any methods which need it.
131 for (i = 0, m = cls->methods; m->desc; i++, m++)
132 kobj_register_method(m->desc);
135 * Then initialise the ops table.
137 for (i = 0; i < KOBJ_CACHE_SIZE; i++)
138 ops->cache[i] = &null_method;
139 ops->cls = cls;
140 cls->ops = ops;
143 void
144 kobj_class_compile(kobj_class_t cls)
146 kobj_ops_t ops;
148 mtx_assert(&kobj_mtx, MA_NOTOWNED);
151 * Allocate space for the compiled ops table.
153 ops = malloc(sizeof(struct kobj_ops), M_KOBJ, M_NOWAIT);
154 if (!ops)
155 panic("kobj_compile_methods: out of memory");
157 mtx_lock(&kobj_mtx);
160 * We may have lost a race for kobj_class_compile here - check
161 * to make sure someone else hasn't already compiled this
162 * class.
164 if (cls->ops) {
165 mtx_unlock(&kobj_mtx);
166 free(ops, M_KOBJ);
167 return;
170 kobj_class_compile_common(cls, ops);
171 mtx_unlock(&kobj_mtx);
174 void
175 kobj_class_compile_static(kobj_class_t cls, kobj_ops_t ops)
178 mtx_assert(&kobj_mtx, MA_NOTOWNED);
181 * Increment refs to make sure that the ops table is not freed.
183 mtx_lock(&kobj_mtx);
184 cls->refs++;
185 kobj_class_compile_common(cls, ops);
186 mtx_unlock(&kobj_mtx);
189 static kobj_method_t*
190 kobj_lookup_method_class(kobj_class_t cls, kobjop_desc_t desc)
192 kobj_method_t *methods = cls->methods;
193 kobj_method_t *ce;
195 for (ce = methods; ce && ce->desc; ce++) {
196 if (ce->desc == desc) {
197 return ce;
201 return 0;
204 static kobj_method_t*
205 kobj_lookup_method_mi(kobj_class_t cls,
206 kobjop_desc_t desc)
208 kobj_method_t *ce;
209 kobj_class_t *basep;
211 ce = kobj_lookup_method_class(cls, desc);
212 if (ce)
213 return ce;
215 basep = cls->baseclasses;
216 if (basep) {
217 for (; *basep; basep++) {
218 ce = kobj_lookup_method_mi(*basep, desc);
219 if (ce)
220 return ce;
224 return 0;
227 kobj_method_t*
228 kobj_lookup_method(kobj_class_t cls,
229 kobj_method_t **cep,
230 kobjop_desc_t desc)
232 kobj_method_t *ce;
234 #ifdef KOBJ_STATS
236 * Correct for the 'hit' assumption in KOBJOPLOOKUP and record
237 * a 'miss'.
239 kobj_lookup_hits--;
240 kobj_lookup_misses++;
241 #endif
243 ce = kobj_lookup_method_mi(cls, desc);
244 if (!ce)
245 ce = desc->deflt;
246 *cep = ce;
247 return ce;
250 void
251 kobj_class_free(kobj_class_t cls)
253 int i;
254 kobj_method_t *m;
255 void* ops = 0;
257 mtx_assert(&kobj_mtx, MA_NOTOWNED);
258 mtx_lock(&kobj_mtx);
261 * Protect against a race between kobj_create and
262 * kobj_delete.
264 if (cls->refs == 0) {
266 * Unregister any methods which are no longer used.
268 for (i = 0, m = cls->methods; m->desc; i++, m++)
269 kobj_unregister_method(m->desc);
272 * Free memory and clean up.
274 ops = cls->ops;
275 cls->ops = 0;
278 mtx_unlock(&kobj_mtx);
280 if (ops)
281 free(ops, M_KOBJ);
284 kobj_t
285 kobj_create(kobj_class_t cls,
286 struct malloc_type *mtype,
287 int mflags)
289 kobj_t obj;
292 * Allocate and initialise the new object.
294 obj = malloc(cls->size, mtype, mflags | M_ZERO);
295 if (!obj)
296 return 0;
297 kobj_init(obj, cls);
299 return obj;
302 void
303 kobj_init(kobj_t obj, kobj_class_t cls)
305 mtx_assert(&kobj_mtx, MA_NOTOWNED);
306 retry:
307 mtx_lock(&kobj_mtx);
310 * Consider compiling the class' method table.
312 if (!cls->ops) {
314 * kobj_class_compile doesn't want the lock held
315 * because of the call to malloc - we drop the lock
316 * and re-try.
318 mtx_unlock(&kobj_mtx);
319 kobj_class_compile(cls);
320 goto retry;
323 obj->ops = cls->ops;
324 cls->refs++;
326 mtx_unlock(&kobj_mtx);
329 void
330 kobj_delete(kobj_t obj, struct malloc_type *mtype)
332 kobj_class_t cls = obj->ops->cls;
333 int refs;
336 * Consider freeing the compiled method table for the class
337 * after its last instance is deleted. As an optimisation, we
338 * should defer this for a short while to avoid thrashing.
340 mtx_assert(&kobj_mtx, MA_NOTOWNED);
341 mtx_lock(&kobj_mtx);
342 cls->refs--;
343 refs = cls->refs;
344 mtx_unlock(&kobj_mtx);
346 if (!refs)
347 kobj_class_free(cls);
349 obj->ops = 0;
350 if (mtype)
351 free(obj, mtype);