2 * Copyright (c) 2000,2003 Doug Rabson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/mutex.h>
36 #include <sys/sysctl.h>
38 #include <sys/systm.h>
45 static MALLOC_DEFINE(M_KOBJ
, "kobj", "Kernel object structures");
49 u_int kobj_lookup_hits
;
50 u_int kobj_lookup_misses
;
52 SYSCTL_UINT(_kern
, OID_AUTO
, kobj_hits
, CTLFLAG_RD
,
53 &kobj_lookup_hits
, 0, "");
54 SYSCTL_UINT(_kern
, OID_AUTO
, kobj_misses
, CTLFLAG_RD
,
55 &kobj_lookup_misses
, 0, "");
59 static struct mtx kobj_mtx
;
60 static int kobj_mutex_inited
;
61 static int kobj_next_id
= 1;
63 SYSCTL_UINT(_kern
, OID_AUTO
, kobj_methodcount
, CTLFLAG_RD
,
64 &kobj_next_id
, 0, "");
67 kobj_init_mutex(void *arg
)
69 if (!kobj_mutex_inited
) {
70 mtx_init(&kobj_mtx
, "kobj", NULL
, MTX_DEF
);
71 kobj_mutex_inited
= 1;
75 SYSINIT(kobj
, SI_SUB_LOCK
, SI_ORDER_ANY
, kobj_init_mutex
, NULL
);
78 kobj_machdep_init(void)
80 kobj_init_mutex(NULL
);
84 * This method structure is used to initialise new caches. Since the
85 * desc pointer is NULL, it is guaranteed never to match any read
88 static struct kobj_method null_method
= {
93 kobj_error_method(void)
100 kobj_register_method(struct kobjop_desc
*desc
)
103 mtx_assert(&kobj_mtx
, MA_OWNED
);
105 desc
->id
= kobj_next_id
++;
110 kobj_unregister_method(struct kobjop_desc
*desc
)
115 kobj_class_compile_common(kobj_class_t cls
, kobj_ops_t ops
)
120 mtx_assert(&kobj_mtx
, MA_OWNED
);
123 * Don't do anything if we are already compiled.
129 * First register any methods which need it.
131 for (i
= 0, m
= cls
->methods
; m
->desc
; i
++, m
++)
132 kobj_register_method(m
->desc
);
135 * Then initialise the ops table.
137 for (i
= 0; i
< KOBJ_CACHE_SIZE
; i
++)
138 ops
->cache
[i
] = &null_method
;
144 kobj_class_compile(kobj_class_t cls
)
148 mtx_assert(&kobj_mtx
, MA_NOTOWNED
);
151 * Allocate space for the compiled ops table.
153 ops
= malloc(sizeof(struct kobj_ops
), M_KOBJ
, M_NOWAIT
);
155 panic("kobj_compile_methods: out of memory");
160 * We may have lost a race for kobj_class_compile here - check
161 * to make sure someone else hasn't already compiled this
165 mtx_unlock(&kobj_mtx
);
170 kobj_class_compile_common(cls
, ops
);
171 mtx_unlock(&kobj_mtx
);
175 kobj_class_compile_static(kobj_class_t cls
, kobj_ops_t ops
)
178 mtx_assert(&kobj_mtx
, MA_NOTOWNED
);
181 * Increment refs to make sure that the ops table is not freed.
185 kobj_class_compile_common(cls
, ops
);
186 mtx_unlock(&kobj_mtx
);
189 static kobj_method_t
*
190 kobj_lookup_method_class(kobj_class_t cls
, kobjop_desc_t desc
)
192 kobj_method_t
*methods
= cls
->methods
;
195 for (ce
= methods
; ce
&& ce
->desc
; ce
++) {
196 if (ce
->desc
== desc
) {
204 static kobj_method_t
*
205 kobj_lookup_method_mi(kobj_class_t cls
,
211 ce
= kobj_lookup_method_class(cls
, desc
);
215 basep
= cls
->baseclasses
;
217 for (; *basep
; basep
++) {
218 ce
= kobj_lookup_method_mi(*basep
, desc
);
228 kobj_lookup_method(kobj_class_t cls
,
236 * Correct for the 'hit' assumption in KOBJOPLOOKUP and record
240 kobj_lookup_misses
++;
243 ce
= kobj_lookup_method_mi(cls
, desc
);
251 kobj_class_free(kobj_class_t cls
)
257 mtx_assert(&kobj_mtx
, MA_NOTOWNED
);
261 * Protect against a race between kobj_create and
264 if (cls
->refs
== 0) {
266 * Unregister any methods which are no longer used.
268 for (i
= 0, m
= cls
->methods
; m
->desc
; i
++, m
++)
269 kobj_unregister_method(m
->desc
);
272 * Free memory and clean up.
278 mtx_unlock(&kobj_mtx
);
285 kobj_create(kobj_class_t cls
,
286 struct malloc_type
*mtype
,
292 * Allocate and initialise the new object.
294 obj
= malloc(cls
->size
, mtype
, mflags
| M_ZERO
);
303 kobj_init(kobj_t obj
, kobj_class_t cls
)
305 mtx_assert(&kobj_mtx
, MA_NOTOWNED
);
310 * Consider compiling the class' method table.
314 * kobj_class_compile doesn't want the lock held
315 * because of the call to malloc - we drop the lock
318 mtx_unlock(&kobj_mtx
);
319 kobj_class_compile(cls
);
326 mtx_unlock(&kobj_mtx
);
330 kobj_delete(kobj_t obj
, struct malloc_type
*mtype
)
332 kobj_class_t cls
= obj
->ops
->cls
;
336 * Consider freeing the compiled method table for the class
337 * after its last instance is deleted. As an optimisation, we
338 * should defer this for a short while to avoid thrashing.
340 mtx_assert(&kobj_mtx
, MA_NOTOWNED
);
344 mtx_unlock(&kobj_mtx
);
347 kobj_class_free(cls
);