1 /* $NetBSD: uipc_sem.c,v 1.28 2008/11/14 13:35:25 ad Exp $ */
4 * Copyright (c) 2003, 2007, 2008 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of Wasabi Systems, Inc, and by Andrew Doran.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Copyright (c) 2002 Alfred Perlstein <alfred@FreeBSD.org>
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 #include <sys/cdefs.h>
59 __KERNEL_RCSID(0, "$NetBSD: uipc_sem.c,v 1.28 2008/11/14 13:35:25 ad Exp $");
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/kernel.h>
66 #include <sys/syscall.h>
69 #include <sys/fcntl.h>
70 #include <sys/kauth.h>
71 #include <sys/module.h>
72 #include <sys/mount.h>
73 #include <sys/syscall.h>
74 #include <sys/syscallargs.h>
75 #include <sys/syscallvar.h>
77 #define SEM_MAX_NAMELEN 14
78 #define SEM_VALUE_MAX (~0U)
79 #define SEM_HASHTBL_SIZE 13
81 #define SEM_TO_ID(x) (((x)->ks_id))
82 #define SEM_HASH(id) ((id) % SEM_HASHTBL_SIZE)
84 MODULE(MODULE_CLASS_MISC
, ksem
, NULL
);
86 static const struct syscall_package ksem_syscalls
[] = {
87 { SYS__ksem_init
, 0, (sy_call_t
*)sys__ksem_init
},
88 { SYS__ksem_open
, 0, (sy_call_t
*)sys__ksem_open
},
89 { SYS__ksem_unlink
, 0, (sy_call_t
*)sys__ksem_unlink
},
90 { SYS__ksem_close
, 0, (sy_call_t
*)sys__ksem_close
},
91 { SYS__ksem_post
, 0, (sy_call_t
*)sys__ksem_post
},
92 { SYS__ksem_wait
, 0, (sy_call_t
*)sys__ksem_wait
},
93 { SYS__ksem_trywait
, 0, (sy_call_t
*)sys__ksem_trywait
},
94 { SYS__ksem_getvalue
, 0, (sy_call_t
*)sys__ksem_getvalue
},
95 { SYS__ksem_destroy
, 0, (sy_call_t
*)sys__ksem_destroy
},
100 * Note: to read the ks_name member, you need either the ks_interlock
101 * or the ksem_mutex. To write the ks_name member, you need both. Make
102 * sure the order is ksem_mutex -> ks_interlock.
105 LIST_ENTRY(ksem
) ks_entry
; /* global list entry */
106 LIST_ENTRY(ksem
) ks_hash
; /* hash list entry */
107 kmutex_t ks_interlock
; /* lock on this ksem */
108 kcondvar_t ks_cv
; /* condition variable */
109 unsigned int ks_ref
; /* number of references */
110 char *ks_name
; /* if named, this is the name */
111 size_t ks_namelen
; /* length of name */
112 mode_t ks_mode
; /* protection bits */
113 uid_t ks_uid
; /* creator uid */
114 gid_t ks_gid
; /* creator gid */
115 unsigned int ks_value
; /* current value */
116 unsigned int ks_waiters
; /* number of waiters */
117 intptr_t ks_id
; /* unique identifier */
121 LIST_ENTRY(ksem_ref
) ksr_list
;
122 struct ksem
*ksr_ksem
;
127 LIST_HEAD(, ksem_ref
) kp_ksems
;
130 LIST_HEAD(ksem_list
, ksem
);
133 * ksem_mutex protects ksem_head and nsems. Only named semaphores go
136 static kmutex_t ksem_mutex
;
137 static struct ksem_list ksem_head
= LIST_HEAD_INITIALIZER(&ksem_head
);
138 static struct ksem_list ksem_hash
[SEM_HASHTBL_SIZE
];
139 static int nsems
= 0;
142 * ksem_counter is the last assigned intptr_t. It needs to be COMPAT_NETBSD32
143 * friendly, even though intptr_t itself is defined as uintptr_t.
145 static uint32_t ksem_counter
= 1;
147 static specificdata_key_t ksem_specificdata_key
;
148 static void *ksem_ehook
;
149 static void *ksem_fhook
;
152 ksem_free(struct ksem
*ks
)
155 KASSERT(mutex_owned(&ks
->ks_interlock
));
158 * If the ksem is anonymous (or has been unlinked), then
159 * this is the end if its life.
161 if (ks
->ks_name
== NULL
) {
162 mutex_exit(&ks
->ks_interlock
);
163 mutex_destroy(&ks
->ks_interlock
);
164 cv_destroy(&ks
->ks_cv
);
166 mutex_enter(&ksem_mutex
);
168 LIST_REMOVE(ks
, ks_hash
);
169 mutex_exit(&ksem_mutex
);
171 kmem_free(ks
, sizeof(*ks
));
174 mutex_exit(&ks
->ks_interlock
);
178 ksem_addref(struct ksem
*ks
)
181 KASSERT(mutex_owned(&ks
->ks_interlock
));
183 KASSERT(ks
->ks_ref
!= 0);
187 ksem_delref(struct ksem
*ks
)
190 KASSERT(mutex_owned(&ks
->ks_interlock
));
191 KASSERT(ks
->ks_ref
!= 0);
192 if (--ks
->ks_ref
== 0) {
196 mutex_exit(&ks
->ks_interlock
);
199 static struct ksem_proc
*
200 ksem_proc_alloc(void)
202 struct ksem_proc
*kp
;
204 kp
= kmem_alloc(sizeof(*kp
), KM_SLEEP
);
205 rw_init(&kp
->kp_lock
);
206 LIST_INIT(&kp
->kp_ksems
);
212 ksem_proc_dtor(void *arg
)
214 struct ksem_proc
*kp
= arg
;
215 struct ksem_ref
*ksr
;
217 rw_enter(&kp
->kp_lock
, RW_WRITER
);
219 while ((ksr
= LIST_FIRST(&kp
->kp_ksems
)) != NULL
) {
220 LIST_REMOVE(ksr
, ksr_list
);
221 mutex_enter(&ksr
->ksr_ksem
->ks_interlock
);
222 ksem_delref(ksr
->ksr_ksem
);
223 kmem_free(ksr
, sizeof(*ksr
));
226 rw_exit(&kp
->kp_lock
);
227 rw_destroy(&kp
->kp_lock
);
228 kmem_free(kp
, sizeof(*kp
));
232 ksem_add_proc(struct proc
*p
, struct ksem
*ks
)
234 struct ksem_proc
*kp
;
235 struct ksem_ref
*ksr
;
237 kp
= proc_getspecific(p
, ksem_specificdata_key
);
239 kp
= ksem_proc_alloc();
240 proc_setspecific(p
, ksem_specificdata_key
, kp
);
243 ksr
= kmem_alloc(sizeof(*ksr
), KM_SLEEP
);
246 rw_enter(&kp
->kp_lock
, RW_WRITER
);
247 LIST_INSERT_HEAD(&kp
->kp_ksems
, ksr
, ksr_list
);
248 rw_exit(&kp
->kp_lock
);
251 /* We MUST have a write lock on the ksem_proc list! */
252 static struct ksem_ref
*
253 ksem_drop_proc(struct ksem_proc
*kp
, struct ksem
*ks
)
255 struct ksem_ref
*ksr
;
257 KASSERT(mutex_owned(&ks
->ks_interlock
));
258 LIST_FOREACH(ksr
, &kp
->kp_ksems
, ksr_list
) {
259 if (ksr
->ksr_ksem
== ks
) {
261 LIST_REMOVE(ksr
, ksr_list
);
266 panic("ksem_drop_proc: ksem_proc %p ksem %p", kp
, ks
);
272 ksem_perm(struct lwp
*l
, struct ksem
*ks
)
276 KASSERT(mutex_owned(&ks
->ks_interlock
));
278 if ((kauth_cred_geteuid(uc
) == ks
->ks_uid
&& (ks
->ks_mode
& S_IWUSR
) != 0) ||
279 (kauth_cred_getegid(uc
) == ks
->ks_gid
&& (ks
->ks_mode
& S_IWGRP
) != 0) ||
280 (ks
->ks_mode
& S_IWOTH
) != 0 ||
281 kauth_authorize_generic(uc
, KAUTH_GENERIC_ISSUSER
, NULL
) == 0)
287 ksem_lookup_byid(intptr_t id
)
291 KASSERT(mutex_owned(&ksem_mutex
));
292 LIST_FOREACH(ks
, &ksem_hash
[SEM_HASH(id
)], ks_hash
) {
300 ksem_lookup_byname(const char *name
)
304 KASSERT(mutex_owned(&ksem_mutex
));
305 LIST_FOREACH(ks
, &ksem_head
, ks_entry
) {
306 if (strcmp(ks
->ks_name
, name
) == 0) {
307 mutex_enter(&ks
->ks_interlock
);
315 ksem_create(struct lwp
*l
, const char *name
, struct ksem
**ksret
,
316 mode_t mode
, unsigned int value
)
323 if (value
> SEM_VALUE_MAX
)
325 ret
= kmem_zalloc(sizeof(*ret
), KM_SLEEP
);
328 if (len
> SEM_MAX_NAMELEN
) {
329 kmem_free(ret
, sizeof(*ret
));
330 return (ENAMETOOLONG
);
332 /* name must start with a '/' but not contain one. */
333 if (*name
!= '/' || len
< 2 || strchr(name
+ 1, '/') != NULL
) {
334 kmem_free(ret
, sizeof(*ret
));
337 ret
->ks_namelen
= len
+ 1;
338 ret
->ks_name
= kmem_alloc(ret
->ks_namelen
, KM_SLEEP
);
339 strlcpy(ret
->ks_name
, name
, len
+ 1);
343 ret
->ks_value
= value
;
346 ret
->ks_uid
= kauth_cred_geteuid(uc
);
347 ret
->ks_gid
= kauth_cred_getegid(uc
);
348 mutex_init(&ret
->ks_interlock
, MUTEX_DEFAULT
, IPL_NONE
);
349 cv_init(&ret
->ks_cv
, "psem");
351 mutex_enter(&ksem_mutex
);
352 if (nsems
>= ksem_max
) {
353 mutex_exit(&ksem_mutex
);
354 if (ret
->ks_name
!= NULL
)
355 kmem_free(ret
->ks_name
, ret
->ks_namelen
);
356 kmem_free(ret
, sizeof(*ret
));
360 while (ksem_lookup_byid(ksem_counter
) != NULL
) {
362 /* 0 is a special value for libpthread */
363 if (ksem_counter
== 0)
366 ret
->ks_id
= ksem_counter
;
367 LIST_INSERT_HEAD(&ksem_hash
[SEM_HASH(ret
->ks_id
)], ret
, ks_hash
);
368 mutex_exit(&ksem_mutex
);
375 sys__ksem_init(struct lwp
*l
, const struct sys__ksem_init_args
*uap
, register_t
*retval
)
382 return do_ksem_init(l
, SCARG(uap
, value
), SCARG(uap
, idp
), copyout
);
386 do_ksem_init(struct lwp
*l
, unsigned int value
, intptr_t *idp
,
393 /* Note the mode does not matter for anonymous semaphores. */
394 error
= ksem_create(l
, NULL
, &ks
, 0, value
);
398 error
= (*docopyout
)(&id
, idp
, sizeof(id
));
400 mutex_enter(&ks
->ks_interlock
);
405 ksem_add_proc(l
->l_proc
, ks
);
411 sys__ksem_open(struct lwp
*l
, const struct sys__ksem_open_args
*uap
, register_t
*retval
)
421 return do_ksem_open(l
, SCARG(uap
, name
), SCARG(uap
, oflag
),
422 SCARG(uap
, mode
), SCARG(uap
, value
), SCARG(uap
, idp
), copyout
);
426 do_ksem_open(struct lwp
*l
, const char *semname
, int oflag
, mode_t mode
,
427 unsigned int value
, intptr_t *idp
, copyout_t docopyout
)
429 char name
[SEM_MAX_NAMELEN
+ 1];
432 struct ksem
*ksnew
, *ks
;
435 error
= copyinstr(semname
, name
, sizeof(name
), &done
);
440 mutex_enter(&ksem_mutex
);
441 ks
= ksem_lookup_byname(name
);
445 /* Check for exclusive create. */
446 if (oflag
& O_EXCL
) {
447 mutex_exit(&ks
->ks_interlock
);
448 mutex_exit(&ksem_mutex
);
453 * Verify permissions. If we can access it, add
454 * this process's reference.
456 KASSERT(mutex_owned(&ks
->ks_interlock
));
457 error
= ksem_perm(l
, ks
);
460 mutex_exit(&ks
->ks_interlock
);
461 mutex_exit(&ksem_mutex
);
466 error
= (*docopyout
)(&id
, idp
, sizeof(id
));
468 mutex_enter(&ks
->ks_interlock
);
473 ksem_add_proc(l
->l_proc
, ks
);
479 * didn't ask for creation? error.
481 if ((oflag
& O_CREAT
) == 0) {
482 mutex_exit(&ksem_mutex
);
487 * We may block during creation, so drop the lock.
489 mutex_exit(&ksem_mutex
);
490 error
= ksem_create(l
, name
, &ksnew
, mode
, value
);
494 id
= SEM_TO_ID(ksnew
);
495 error
= (*docopyout
)(&id
, idp
, sizeof(id
));
497 kmem_free(ksnew
->ks_name
, ksnew
->ks_namelen
);
498 ksnew
->ks_name
= NULL
;
500 mutex_enter(&ksnew
->ks_interlock
);
506 * We need to make sure we haven't lost a race while
507 * allocating during creation.
509 mutex_enter(&ksem_mutex
);
510 if ((ks
= ksem_lookup_byname(name
)) != NULL
) {
511 if (oflag
& O_EXCL
) {
512 mutex_exit(&ks
->ks_interlock
);
513 mutex_exit(&ksem_mutex
);
515 kmem_free(ksnew
->ks_name
, ksnew
->ks_namelen
);
516 ksnew
->ks_name
= NULL
;
518 mutex_enter(&ksnew
->ks_interlock
);
524 /* ksnew already has its initial reference. */
525 LIST_INSERT_HEAD(&ksem_head
, ksnew
, ks_entry
);
526 mutex_exit(&ksem_mutex
);
528 ksem_add_proc(l
->l_proc
, ksnew
);
533 /* We must have a read lock on the ksem_proc list! */
535 ksem_lookup_proc(struct ksem_proc
*kp
, intptr_t id
)
537 struct ksem_ref
*ksr
;
539 LIST_FOREACH(ksr
, &kp
->kp_ksems
, ksr_list
) {
540 if (id
== SEM_TO_ID(ksr
->ksr_ksem
)) {
541 mutex_enter(&ksr
->ksr_ksem
->ks_interlock
);
542 return (ksr
->ksr_ksem
);
550 sys__ksem_unlink(struct lwp
*l
, const struct sys__ksem_unlink_args
*uap
, register_t
*retval
)
555 char name
[SEM_MAX_NAMELEN
+ 1], *cp
;
560 error
= copyinstr(SCARG(uap
, name
), name
, sizeof(name
), &done
);
564 mutex_enter(&ksem_mutex
);
565 ks
= ksem_lookup_byname(name
);
567 mutex_exit(&ksem_mutex
);
571 KASSERT(mutex_owned(&ks
->ks_interlock
));
573 LIST_REMOVE(ks
, ks_entry
);
575 len
= ks
->ks_namelen
;
578 mutex_exit(&ksem_mutex
);
583 mutex_exit(&ks
->ks_interlock
);
591 sys__ksem_close(struct lwp
*l
, const struct sys__ksem_close_args
*uap
, register_t
*retval
)
596 struct ksem_proc
*kp
;
597 struct ksem_ref
*ksr
;
600 kp
= proc_getspecific(l
->l_proc
, ksem_specificdata_key
);
604 rw_enter(&kp
->kp_lock
, RW_WRITER
);
606 ks
= ksem_lookup_proc(kp
, SCARG(uap
, id
));
608 rw_exit(&kp
->kp_lock
);
612 KASSERT(mutex_owned(&ks
->ks_interlock
));
613 if (ks
->ks_name
== NULL
) {
614 mutex_exit(&ks
->ks_interlock
);
615 rw_exit(&kp
->kp_lock
);
619 ksr
= ksem_drop_proc(kp
, ks
);
620 rw_exit(&kp
->kp_lock
);
621 kmem_free(ksr
, sizeof(*ksr
));
627 sys__ksem_post(struct lwp
*l
, const struct sys__ksem_post_args
*uap
, register_t
*retval
)
632 struct ksem_proc
*kp
;
636 kp
= proc_getspecific(l
->l_proc
, ksem_specificdata_key
);
640 rw_enter(&kp
->kp_lock
, RW_READER
);
641 ks
= ksem_lookup_proc(kp
, SCARG(uap
, id
));
642 rw_exit(&kp
->kp_lock
);
646 KASSERT(mutex_owned(&ks
->ks_interlock
));
647 if (ks
->ks_value
== SEM_VALUE_MAX
) {
653 cv_broadcast(&ks
->ks_cv
);
656 mutex_exit(&ks
->ks_interlock
);
661 ksem_wait(struct lwp
*l
, intptr_t id
, int tryflag
)
663 struct ksem_proc
*kp
;
667 kp
= proc_getspecific(l
->l_proc
, ksem_specificdata_key
);
671 rw_enter(&kp
->kp_lock
, RW_READER
);
672 ks
= ksem_lookup_proc(kp
, id
);
673 rw_exit(&kp
->kp_lock
);
677 KASSERT(mutex_owned(&ks
->ks_interlock
));
679 while (ks
->ks_value
== 0) {
684 error
= cv_wait_sig(&ks
->ks_cv
, &ks
->ks_interlock
);
697 sys__ksem_wait(struct lwp
*l
, const struct sys__ksem_wait_args
*uap
, register_t
*retval
)
703 return ksem_wait(l
, SCARG(uap
, id
), 0);
707 sys__ksem_trywait(struct lwp
*l
, const struct sys__ksem_trywait_args
*uap
, register_t
*retval
)
713 return ksem_wait(l
, SCARG(uap
, id
), 1);
717 sys__ksem_getvalue(struct lwp
*l
, const struct sys__ksem_getvalue_args
*uap
, register_t
*retval
)
723 struct ksem_proc
*kp
;
727 kp
= proc_getspecific(l
->l_proc
, ksem_specificdata_key
);
731 rw_enter(&kp
->kp_lock
, RW_READER
);
732 ks
= ksem_lookup_proc(kp
, SCARG(uap
, id
));
733 rw_exit(&kp
->kp_lock
);
737 KASSERT(mutex_owned(&ks
->ks_interlock
));
739 mutex_exit(&ks
->ks_interlock
);
741 return (copyout(&val
, SCARG(uap
, value
), sizeof(val
)));
745 sys__ksem_destroy(struct lwp
*l
, const struct sys__ksem_destroy_args
*uap
, register_t
*retval
)
750 struct ksem_proc
*kp
;
751 struct ksem_ref
*ksr
;
754 kp
= proc_getspecific(l
->l_proc
, ksem_specificdata_key
);
758 rw_enter(&kp
->kp_lock
, RW_WRITER
);
760 ks
= ksem_lookup_proc(kp
, SCARG(uap
, id
));
762 rw_exit(&kp
->kp_lock
);
766 KASSERT(mutex_owned(&ks
->ks_interlock
));
769 * XXX This misses named semaphores which have been unlink'd,
770 * XXX but since behavior of destroying a named semaphore is
771 * XXX undefined, this is technically allowed.
773 if (ks
->ks_name
!= NULL
) {
774 mutex_exit(&ks
->ks_interlock
);
775 rw_exit(&kp
->kp_lock
);
779 if (ks
->ks_waiters
) {
780 mutex_exit(&ks
->ks_interlock
);
781 rw_exit(&kp
->kp_lock
);
785 ksr
= ksem_drop_proc(kp
, ks
);
786 rw_exit(&kp
->kp_lock
);
787 kmem_free(ksr
, sizeof(*ksr
));
793 ksem_forkhook(struct proc
*p2
, struct proc
*p1
)
795 struct ksem_proc
*kp1
, *kp2
;
796 struct ksem_ref
*ksr
, *ksr1
;
798 kp1
= proc_getspecific(p1
, ksem_specificdata_key
);
802 kp2
= ksem_proc_alloc();
804 rw_enter(&kp1
->kp_lock
, RW_READER
);
806 if (!LIST_EMPTY(&kp1
->kp_ksems
)) {
807 LIST_FOREACH(ksr
, &kp1
->kp_ksems
, ksr_list
) {
808 ksr1
= kmem_alloc(sizeof(*ksr
), KM_SLEEP
);
809 ksr1
->ksr_ksem
= ksr
->ksr_ksem
;
810 mutex_enter(&ksr
->ksr_ksem
->ks_interlock
);
811 ksem_addref(ksr
->ksr_ksem
);
812 mutex_exit(&ksr
->ksr_ksem
->ks_interlock
);
813 LIST_INSERT_HEAD(&kp2
->kp_ksems
, ksr1
, ksr_list
);
817 rw_exit(&kp1
->kp_lock
);
818 proc_setspecific(p2
, ksem_specificdata_key
, kp2
);
822 ksem_exechook(struct proc
*p
, void *arg
)
824 struct ksem_proc
*kp
;
826 kp
= proc_getspecific(p
, ksem_specificdata_key
);
828 proc_setspecific(p
, ksem_specificdata_key
, NULL
);
834 ksem_fini(bool interface
)
839 error
= syscall_disestablish(NULL
, ksem_syscalls
);
844 error
= syscall_establish(NULL
, ksem_syscalls
);
849 exechook_disestablish(ksem_ehook
);
850 forkhook_disestablish(ksem_fhook
);
851 proc_specific_key_delete(ksem_specificdata_key
);
852 mutex_destroy(&ksem_mutex
);
861 mutex_init(&ksem_mutex
, MUTEX_DEFAULT
, IPL_NONE
);
862 for (i
= 0; i
< SEM_HASHTBL_SIZE
; i
++)
863 LIST_INIT(&ksem_hash
[i
]);
864 error
= proc_specific_key_create(&ksem_specificdata_key
,
867 mutex_destroy(&ksem_mutex
);
870 ksem_ehook
= exechook_establish(ksem_exechook
, NULL
);
871 ksem_fhook
= forkhook_establish(ksem_forkhook
);
872 error
= syscall_establish(NULL
, ksem_syscalls
);
874 (void)ksem_fini(false);
880 ksem_modcmd(modcmd_t cmd
, void *arg
)
884 case MODULE_CMD_INIT
:
887 case MODULE_CMD_FINI
:
888 return ksem_fini(true);