Expand PMF_FN_* macros.
[netbsd-mini2440.git] / sys / kern / uipc_sem.c
blob4a969cf2579246b26ed95965c60e4b4bf1ddc336
1 /* $NetBSD: uipc_sem.c,v 1.28 2008/11/14 13:35:25 ad Exp $ */
3 /*-
4 * Copyright (c) 2003, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of Wasabi Systems, Inc, and by Andrew Doran.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Copyright (c) 2002 Alfred Perlstein <alfred@FreeBSD.org>
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
58 #include <sys/cdefs.h>
59 __KERNEL_RCSID(0, "$NetBSD: uipc_sem.c,v 1.28 2008/11/14 13:35:25 ad Exp $");
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/kernel.h>
64 #include <sys/proc.h>
65 #include <sys/ksem.h>
66 #include <sys/syscall.h>
67 #include <sys/stat.h>
68 #include <sys/kmem.h>
69 #include <sys/fcntl.h>
70 #include <sys/kauth.h>
71 #include <sys/module.h>
72 #include <sys/mount.h>
73 #include <sys/syscall.h>
74 #include <sys/syscallargs.h>
75 #include <sys/syscallvar.h>
77 #define SEM_MAX_NAMELEN 14
78 #define SEM_VALUE_MAX (~0U)
79 #define SEM_HASHTBL_SIZE 13
81 #define SEM_TO_ID(x) (((x)->ks_id))
82 #define SEM_HASH(id) ((id) % SEM_HASHTBL_SIZE)
84 MODULE(MODULE_CLASS_MISC, ksem, NULL);
86 static const struct syscall_package ksem_syscalls[] = {
87 { SYS__ksem_init, 0, (sy_call_t *)sys__ksem_init },
88 { SYS__ksem_open, 0, (sy_call_t *)sys__ksem_open },
89 { SYS__ksem_unlink, 0, (sy_call_t *)sys__ksem_unlink },
90 { SYS__ksem_close, 0, (sy_call_t *)sys__ksem_close },
91 { SYS__ksem_post, 0, (sy_call_t *)sys__ksem_post },
92 { SYS__ksem_wait, 0, (sy_call_t *)sys__ksem_wait },
93 { SYS__ksem_trywait, 0, (sy_call_t *)sys__ksem_trywait },
94 { SYS__ksem_getvalue, 0, (sy_call_t *)sys__ksem_getvalue },
95 { SYS__ksem_destroy, 0, (sy_call_t *)sys__ksem_destroy },
96 { 0, 0, NULL },
100 * Note: to read the ks_name member, you need either the ks_interlock
101 * or the ksem_mutex. To write the ks_name member, you need both. Make
102 * sure the order is ksem_mutex -> ks_interlock.
104 struct ksem {
105 LIST_ENTRY(ksem) ks_entry; /* global list entry */
106 LIST_ENTRY(ksem) ks_hash; /* hash list entry */
107 kmutex_t ks_interlock; /* lock on this ksem */
108 kcondvar_t ks_cv; /* condition variable */
109 unsigned int ks_ref; /* number of references */
110 char *ks_name; /* if named, this is the name */
111 size_t ks_namelen; /* length of name */
112 mode_t ks_mode; /* protection bits */
113 uid_t ks_uid; /* creator uid */
114 gid_t ks_gid; /* creator gid */
115 unsigned int ks_value; /* current value */
116 unsigned int ks_waiters; /* number of waiters */
117 intptr_t ks_id; /* unique identifier */
120 struct ksem_ref {
121 LIST_ENTRY(ksem_ref) ksr_list;
122 struct ksem *ksr_ksem;
125 struct ksem_proc {
126 krwlock_t kp_lock;
127 LIST_HEAD(, ksem_ref) kp_ksems;
130 LIST_HEAD(ksem_list, ksem);
133 * ksem_mutex protects ksem_head and nsems. Only named semaphores go
134 * onto ksem_head.
136 static kmutex_t ksem_mutex;
137 static struct ksem_list ksem_head = LIST_HEAD_INITIALIZER(&ksem_head);
138 static struct ksem_list ksem_hash[SEM_HASHTBL_SIZE];
139 static int nsems = 0;
142 * ksem_counter is the last assigned intptr_t. It needs to be COMPAT_NETBSD32
143 * friendly, even though intptr_t itself is defined as uintptr_t.
145 static uint32_t ksem_counter = 1;
147 static specificdata_key_t ksem_specificdata_key;
148 static void *ksem_ehook;
149 static void *ksem_fhook;
151 static void
152 ksem_free(struct ksem *ks)
155 KASSERT(mutex_owned(&ks->ks_interlock));
158 * If the ksem is anonymous (or has been unlinked), then
159 * this is the end if its life.
161 if (ks->ks_name == NULL) {
162 mutex_exit(&ks->ks_interlock);
163 mutex_destroy(&ks->ks_interlock);
164 cv_destroy(&ks->ks_cv);
166 mutex_enter(&ksem_mutex);
167 nsems--;
168 LIST_REMOVE(ks, ks_hash);
169 mutex_exit(&ksem_mutex);
171 kmem_free(ks, sizeof(*ks));
172 return;
174 mutex_exit(&ks->ks_interlock);
177 static inline void
178 ksem_addref(struct ksem *ks)
181 KASSERT(mutex_owned(&ks->ks_interlock));
182 ks->ks_ref++;
183 KASSERT(ks->ks_ref != 0);
186 static inline void
187 ksem_delref(struct ksem *ks)
190 KASSERT(mutex_owned(&ks->ks_interlock));
191 KASSERT(ks->ks_ref != 0);
192 if (--ks->ks_ref == 0) {
193 ksem_free(ks);
194 return;
196 mutex_exit(&ks->ks_interlock);
199 static struct ksem_proc *
200 ksem_proc_alloc(void)
202 struct ksem_proc *kp;
204 kp = kmem_alloc(sizeof(*kp), KM_SLEEP);
205 rw_init(&kp->kp_lock);
206 LIST_INIT(&kp->kp_ksems);
208 return (kp);
211 static void
212 ksem_proc_dtor(void *arg)
214 struct ksem_proc *kp = arg;
215 struct ksem_ref *ksr;
217 rw_enter(&kp->kp_lock, RW_WRITER);
219 while ((ksr = LIST_FIRST(&kp->kp_ksems)) != NULL) {
220 LIST_REMOVE(ksr, ksr_list);
221 mutex_enter(&ksr->ksr_ksem->ks_interlock);
222 ksem_delref(ksr->ksr_ksem);
223 kmem_free(ksr, sizeof(*ksr));
226 rw_exit(&kp->kp_lock);
227 rw_destroy(&kp->kp_lock);
228 kmem_free(kp, sizeof(*kp));
231 static void
232 ksem_add_proc(struct proc *p, struct ksem *ks)
234 struct ksem_proc *kp;
235 struct ksem_ref *ksr;
237 kp = proc_getspecific(p, ksem_specificdata_key);
238 if (kp == NULL) {
239 kp = ksem_proc_alloc();
240 proc_setspecific(p, ksem_specificdata_key, kp);
243 ksr = kmem_alloc(sizeof(*ksr), KM_SLEEP);
244 ksr->ksr_ksem = ks;
246 rw_enter(&kp->kp_lock, RW_WRITER);
247 LIST_INSERT_HEAD(&kp->kp_ksems, ksr, ksr_list);
248 rw_exit(&kp->kp_lock);
251 /* We MUST have a write lock on the ksem_proc list! */
252 static struct ksem_ref *
253 ksem_drop_proc(struct ksem_proc *kp, struct ksem *ks)
255 struct ksem_ref *ksr;
257 KASSERT(mutex_owned(&ks->ks_interlock));
258 LIST_FOREACH(ksr, &kp->kp_ksems, ksr_list) {
259 if (ksr->ksr_ksem == ks) {
260 ksem_delref(ks);
261 LIST_REMOVE(ksr, ksr_list);
262 return (ksr);
265 #ifdef DIAGNOSTIC
266 panic("ksem_drop_proc: ksem_proc %p ksem %p", kp, ks);
267 #endif
268 return (NULL);
271 static int
272 ksem_perm(struct lwp *l, struct ksem *ks)
274 kauth_cred_t uc;
276 KASSERT(mutex_owned(&ks->ks_interlock));
277 uc = l->l_cred;
278 if ((kauth_cred_geteuid(uc) == ks->ks_uid && (ks->ks_mode & S_IWUSR) != 0) ||
279 (kauth_cred_getegid(uc) == ks->ks_gid && (ks->ks_mode & S_IWGRP) != 0) ||
280 (ks->ks_mode & S_IWOTH) != 0 ||
281 kauth_authorize_generic(uc, KAUTH_GENERIC_ISSUSER, NULL) == 0)
282 return (0);
283 return (EPERM);
286 static struct ksem *
287 ksem_lookup_byid(intptr_t id)
289 struct ksem *ks;
291 KASSERT(mutex_owned(&ksem_mutex));
292 LIST_FOREACH(ks, &ksem_hash[SEM_HASH(id)], ks_hash) {
293 if (ks->ks_id == id)
294 return ks;
296 return NULL;
299 static struct ksem *
300 ksem_lookup_byname(const char *name)
302 struct ksem *ks;
304 KASSERT(mutex_owned(&ksem_mutex));
305 LIST_FOREACH(ks, &ksem_head, ks_entry) {
306 if (strcmp(ks->ks_name, name) == 0) {
307 mutex_enter(&ks->ks_interlock);
308 return (ks);
311 return (NULL);
314 static int
315 ksem_create(struct lwp *l, const char *name, struct ksem **ksret,
316 mode_t mode, unsigned int value)
318 struct ksem *ret;
319 kauth_cred_t uc;
320 size_t len;
322 uc = l->l_cred;
323 if (value > SEM_VALUE_MAX)
324 return (EINVAL);
325 ret = kmem_zalloc(sizeof(*ret), KM_SLEEP);
326 if (name != NULL) {
327 len = strlen(name);
328 if (len > SEM_MAX_NAMELEN) {
329 kmem_free(ret, sizeof(*ret));
330 return (ENAMETOOLONG);
332 /* name must start with a '/' but not contain one. */
333 if (*name != '/' || len < 2 || strchr(name + 1, '/') != NULL) {
334 kmem_free(ret, sizeof(*ret));
335 return (EINVAL);
337 ret->ks_namelen = len + 1;
338 ret->ks_name = kmem_alloc(ret->ks_namelen, KM_SLEEP);
339 strlcpy(ret->ks_name, name, len + 1);
340 } else
341 ret->ks_name = NULL;
342 ret->ks_mode = mode;
343 ret->ks_value = value;
344 ret->ks_ref = 1;
345 ret->ks_waiters = 0;
346 ret->ks_uid = kauth_cred_geteuid(uc);
347 ret->ks_gid = kauth_cred_getegid(uc);
348 mutex_init(&ret->ks_interlock, MUTEX_DEFAULT, IPL_NONE);
349 cv_init(&ret->ks_cv, "psem");
351 mutex_enter(&ksem_mutex);
352 if (nsems >= ksem_max) {
353 mutex_exit(&ksem_mutex);
354 if (ret->ks_name != NULL)
355 kmem_free(ret->ks_name, ret->ks_namelen);
356 kmem_free(ret, sizeof(*ret));
357 return (ENFILE);
359 nsems++;
360 while (ksem_lookup_byid(ksem_counter) != NULL) {
361 ksem_counter++;
362 /* 0 is a special value for libpthread */
363 if (ksem_counter == 0)
364 ksem_counter++;
366 ret->ks_id = ksem_counter;
367 LIST_INSERT_HEAD(&ksem_hash[SEM_HASH(ret->ks_id)], ret, ks_hash);
368 mutex_exit(&ksem_mutex);
370 *ksret = ret;
371 return (0);
375 sys__ksem_init(struct lwp *l, const struct sys__ksem_init_args *uap, register_t *retval)
377 /* {
378 unsigned int value;
379 intptr_t *idp;
380 } */
382 return do_ksem_init(l, SCARG(uap, value), SCARG(uap, idp), copyout);
386 do_ksem_init(struct lwp *l, unsigned int value, intptr_t *idp,
387 copyout_t docopyout)
389 struct ksem *ks;
390 intptr_t id;
391 int error;
393 /* Note the mode does not matter for anonymous semaphores. */
394 error = ksem_create(l, NULL, &ks, 0, value);
395 if (error)
396 return (error);
397 id = SEM_TO_ID(ks);
398 error = (*docopyout)(&id, idp, sizeof(id));
399 if (error) {
400 mutex_enter(&ks->ks_interlock);
401 ksem_delref(ks);
402 return (error);
405 ksem_add_proc(l->l_proc, ks);
407 return (0);
411 sys__ksem_open(struct lwp *l, const struct sys__ksem_open_args *uap, register_t *retval)
413 /* {
414 const char *name;
415 int oflag;
416 mode_t mode;
417 unsigned int value;
418 intptr_t *idp;
419 } */
421 return do_ksem_open(l, SCARG(uap, name), SCARG(uap, oflag),
422 SCARG(uap, mode), SCARG(uap, value), SCARG(uap, idp), copyout);
426 do_ksem_open(struct lwp *l, const char *semname, int oflag, mode_t mode,
427 unsigned int value, intptr_t *idp, copyout_t docopyout)
429 char name[SEM_MAX_NAMELEN + 1];
430 size_t done;
431 int error;
432 struct ksem *ksnew, *ks;
433 intptr_t id;
435 error = copyinstr(semname, name, sizeof(name), &done);
436 if (error)
437 return (error);
439 ksnew = NULL;
440 mutex_enter(&ksem_mutex);
441 ks = ksem_lookup_byname(name);
443 /* Found one? */
444 if (ks != NULL) {
445 /* Check for exclusive create. */
446 if (oflag & O_EXCL) {
447 mutex_exit(&ks->ks_interlock);
448 mutex_exit(&ksem_mutex);
449 return (EEXIST);
451 found_one:
453 * Verify permissions. If we can access it, add
454 * this process's reference.
456 KASSERT(mutex_owned(&ks->ks_interlock));
457 error = ksem_perm(l, ks);
458 if (error == 0)
459 ksem_addref(ks);
460 mutex_exit(&ks->ks_interlock);
461 mutex_exit(&ksem_mutex);
462 if (error)
463 return (error);
465 id = SEM_TO_ID(ks);
466 error = (*docopyout)(&id, idp, sizeof(id));
467 if (error) {
468 mutex_enter(&ks->ks_interlock);
469 ksem_delref(ks);
470 return (error);
473 ksem_add_proc(l->l_proc, ks);
475 return (0);
479 * didn't ask for creation? error.
481 if ((oflag & O_CREAT) == 0) {
482 mutex_exit(&ksem_mutex);
483 return (ENOENT);
487 * We may block during creation, so drop the lock.
489 mutex_exit(&ksem_mutex);
490 error = ksem_create(l, name, &ksnew, mode, value);
491 if (error != 0)
492 return (error);
494 id = SEM_TO_ID(ksnew);
495 error = (*docopyout)(&id, idp, sizeof(id));
496 if (error) {
497 kmem_free(ksnew->ks_name, ksnew->ks_namelen);
498 ksnew->ks_name = NULL;
500 mutex_enter(&ksnew->ks_interlock);
501 ksem_delref(ksnew);
502 return (error);
506 * We need to make sure we haven't lost a race while
507 * allocating during creation.
509 mutex_enter(&ksem_mutex);
510 if ((ks = ksem_lookup_byname(name)) != NULL) {
511 if (oflag & O_EXCL) {
512 mutex_exit(&ks->ks_interlock);
513 mutex_exit(&ksem_mutex);
515 kmem_free(ksnew->ks_name, ksnew->ks_namelen);
516 ksnew->ks_name = NULL;
518 mutex_enter(&ksnew->ks_interlock);
519 ksem_delref(ksnew);
520 return (EEXIST);
522 goto found_one;
523 } else {
524 /* ksnew already has its initial reference. */
525 LIST_INSERT_HEAD(&ksem_head, ksnew, ks_entry);
526 mutex_exit(&ksem_mutex);
528 ksem_add_proc(l->l_proc, ksnew);
530 return (error);
533 /* We must have a read lock on the ksem_proc list! */
534 static struct ksem *
535 ksem_lookup_proc(struct ksem_proc *kp, intptr_t id)
537 struct ksem_ref *ksr;
539 LIST_FOREACH(ksr, &kp->kp_ksems, ksr_list) {
540 if (id == SEM_TO_ID(ksr->ksr_ksem)) {
541 mutex_enter(&ksr->ksr_ksem->ks_interlock);
542 return (ksr->ksr_ksem);
546 return (NULL);
550 sys__ksem_unlink(struct lwp *l, const struct sys__ksem_unlink_args *uap, register_t *retval)
552 /* {
553 const char *name;
554 } */
555 char name[SEM_MAX_NAMELEN + 1], *cp;
556 size_t done, len;
557 struct ksem *ks;
558 int error;
560 error = copyinstr(SCARG(uap, name), name, sizeof(name), &done);
561 if (error)
562 return error;
564 mutex_enter(&ksem_mutex);
565 ks = ksem_lookup_byname(name);
566 if (ks == NULL) {
567 mutex_exit(&ksem_mutex);
568 return (ENOENT);
571 KASSERT(mutex_owned(&ks->ks_interlock));
573 LIST_REMOVE(ks, ks_entry);
574 cp = ks->ks_name;
575 len = ks->ks_namelen;
576 ks->ks_name = NULL;
578 mutex_exit(&ksem_mutex);
580 if (ks->ks_ref == 0)
581 ksem_free(ks);
582 else
583 mutex_exit(&ks->ks_interlock);
585 kmem_free(cp, len);
587 return (0);
591 sys__ksem_close(struct lwp *l, const struct sys__ksem_close_args *uap, register_t *retval)
593 /* {
594 intptr_t id;
595 } */
596 struct ksem_proc *kp;
597 struct ksem_ref *ksr;
598 struct ksem *ks;
600 kp = proc_getspecific(l->l_proc, ksem_specificdata_key);
601 if (kp == NULL)
602 return (EINVAL);
604 rw_enter(&kp->kp_lock, RW_WRITER);
606 ks = ksem_lookup_proc(kp, SCARG(uap, id));
607 if (ks == NULL) {
608 rw_exit(&kp->kp_lock);
609 return (EINVAL);
612 KASSERT(mutex_owned(&ks->ks_interlock));
613 if (ks->ks_name == NULL) {
614 mutex_exit(&ks->ks_interlock);
615 rw_exit(&kp->kp_lock);
616 return (EINVAL);
619 ksr = ksem_drop_proc(kp, ks);
620 rw_exit(&kp->kp_lock);
621 kmem_free(ksr, sizeof(*ksr));
623 return (0);
627 sys__ksem_post(struct lwp *l, const struct sys__ksem_post_args *uap, register_t *retval)
629 /* {
630 intptr_t id;
631 } */
632 struct ksem_proc *kp;
633 struct ksem *ks;
634 int error;
636 kp = proc_getspecific(l->l_proc, ksem_specificdata_key);
637 if (kp == NULL)
638 return (EINVAL);
640 rw_enter(&kp->kp_lock, RW_READER);
641 ks = ksem_lookup_proc(kp, SCARG(uap, id));
642 rw_exit(&kp->kp_lock);
643 if (ks == NULL)
644 return (EINVAL);
646 KASSERT(mutex_owned(&ks->ks_interlock));
647 if (ks->ks_value == SEM_VALUE_MAX) {
648 error = EOVERFLOW;
649 goto out;
651 ++ks->ks_value;
652 if (ks->ks_waiters)
653 cv_broadcast(&ks->ks_cv);
654 error = 0;
655 out:
656 mutex_exit(&ks->ks_interlock);
657 return (error);
660 static int
661 ksem_wait(struct lwp *l, intptr_t id, int tryflag)
663 struct ksem_proc *kp;
664 struct ksem *ks;
665 int error;
667 kp = proc_getspecific(l->l_proc, ksem_specificdata_key);
668 if (kp == NULL)
669 return (EINVAL);
671 rw_enter(&kp->kp_lock, RW_READER);
672 ks = ksem_lookup_proc(kp, id);
673 rw_exit(&kp->kp_lock);
674 if (ks == NULL)
675 return (EINVAL);
677 KASSERT(mutex_owned(&ks->ks_interlock));
678 ksem_addref(ks);
679 while (ks->ks_value == 0) {
680 ks->ks_waiters++;
681 if (tryflag)
682 error = EAGAIN;
683 else
684 error = cv_wait_sig(&ks->ks_cv, &ks->ks_interlock);
685 ks->ks_waiters--;
686 if (error)
687 goto out;
689 ks->ks_value--;
690 error = 0;
691 out:
692 ksem_delref(ks);
693 return (error);
697 sys__ksem_wait(struct lwp *l, const struct sys__ksem_wait_args *uap, register_t *retval)
699 /* {
700 intptr_t id;
701 } */
703 return ksem_wait(l, SCARG(uap, id), 0);
707 sys__ksem_trywait(struct lwp *l, const struct sys__ksem_trywait_args *uap, register_t *retval)
709 /* {
710 intptr_t id;
711 } */
713 return ksem_wait(l, SCARG(uap, id), 1);
717 sys__ksem_getvalue(struct lwp *l, const struct sys__ksem_getvalue_args *uap, register_t *retval)
719 /* {
720 intptr_t id;
721 unsigned int *value;
722 } */
723 struct ksem_proc *kp;
724 struct ksem *ks;
725 unsigned int val;
727 kp = proc_getspecific(l->l_proc, ksem_specificdata_key);
728 if (kp == NULL)
729 return (EINVAL);
731 rw_enter(&kp->kp_lock, RW_READER);
732 ks = ksem_lookup_proc(kp, SCARG(uap, id));
733 rw_exit(&kp->kp_lock);
734 if (ks == NULL)
735 return (EINVAL);
737 KASSERT(mutex_owned(&ks->ks_interlock));
738 val = ks->ks_value;
739 mutex_exit(&ks->ks_interlock);
741 return (copyout(&val, SCARG(uap, value), sizeof(val)));
745 sys__ksem_destroy(struct lwp *l, const struct sys__ksem_destroy_args *uap, register_t *retval)
747 /* {
748 intptr_t id;
749 } */
750 struct ksem_proc *kp;
751 struct ksem_ref *ksr;
752 struct ksem *ks;
754 kp = proc_getspecific(l->l_proc, ksem_specificdata_key);
755 if (kp == NULL)
756 return (EINVAL);
758 rw_enter(&kp->kp_lock, RW_WRITER);
760 ks = ksem_lookup_proc(kp, SCARG(uap, id));
761 if (ks == NULL) {
762 rw_exit(&kp->kp_lock);
763 return (EINVAL);
766 KASSERT(mutex_owned(&ks->ks_interlock));
769 * XXX This misses named semaphores which have been unlink'd,
770 * XXX but since behavior of destroying a named semaphore is
771 * XXX undefined, this is technically allowed.
773 if (ks->ks_name != NULL) {
774 mutex_exit(&ks->ks_interlock);
775 rw_exit(&kp->kp_lock);
776 return (EINVAL);
779 if (ks->ks_waiters) {
780 mutex_exit(&ks->ks_interlock);
781 rw_exit(&kp->kp_lock);
782 return (EBUSY);
785 ksr = ksem_drop_proc(kp, ks);
786 rw_exit(&kp->kp_lock);
787 kmem_free(ksr, sizeof(*ksr));
789 return (0);
792 static void
793 ksem_forkhook(struct proc *p2, struct proc *p1)
795 struct ksem_proc *kp1, *kp2;
796 struct ksem_ref *ksr, *ksr1;
798 kp1 = proc_getspecific(p1, ksem_specificdata_key);
799 if (kp1 == NULL)
800 return;
802 kp2 = ksem_proc_alloc();
804 rw_enter(&kp1->kp_lock, RW_READER);
806 if (!LIST_EMPTY(&kp1->kp_ksems)) {
807 LIST_FOREACH(ksr, &kp1->kp_ksems, ksr_list) {
808 ksr1 = kmem_alloc(sizeof(*ksr), KM_SLEEP);
809 ksr1->ksr_ksem = ksr->ksr_ksem;
810 mutex_enter(&ksr->ksr_ksem->ks_interlock);
811 ksem_addref(ksr->ksr_ksem);
812 mutex_exit(&ksr->ksr_ksem->ks_interlock);
813 LIST_INSERT_HEAD(&kp2->kp_ksems, ksr1, ksr_list);
817 rw_exit(&kp1->kp_lock);
818 proc_setspecific(p2, ksem_specificdata_key, kp2);
821 static void
822 ksem_exechook(struct proc *p, void *arg)
824 struct ksem_proc *kp;
826 kp = proc_getspecific(p, ksem_specificdata_key);
827 if (kp != NULL) {
828 proc_setspecific(p, ksem_specificdata_key, NULL);
829 ksem_proc_dtor(kp);
833 static int
834 ksem_fini(bool interface)
836 int error;
838 if (interface) {
839 error = syscall_disestablish(NULL, ksem_syscalls);
840 if (error != 0) {
841 return error;
843 if (nsems != 0) {
844 error = syscall_establish(NULL, ksem_syscalls);
845 KASSERT(error == 0);
846 return EBUSY;
849 exechook_disestablish(ksem_ehook);
850 forkhook_disestablish(ksem_fhook);
851 proc_specific_key_delete(ksem_specificdata_key);
852 mutex_destroy(&ksem_mutex);
853 return 0;
856 static int
857 ksem_init(void)
859 int error, i;
861 mutex_init(&ksem_mutex, MUTEX_DEFAULT, IPL_NONE);
862 for (i = 0; i < SEM_HASHTBL_SIZE; i++)
863 LIST_INIT(&ksem_hash[i]);
864 error = proc_specific_key_create(&ksem_specificdata_key,
865 ksem_proc_dtor);
866 if (error != 0) {
867 mutex_destroy(&ksem_mutex);
868 return error;
870 ksem_ehook = exechook_establish(ksem_exechook, NULL);
871 ksem_fhook = forkhook_establish(ksem_forkhook);
872 error = syscall_establish(NULL, ksem_syscalls);
873 if (error != 0) {
874 (void)ksem_fini(false);
876 return error;
879 static int
880 ksem_modcmd(modcmd_t cmd, void *arg)
883 switch (cmd) {
884 case MODULE_CMD_INIT:
885 return ksem_init();
887 case MODULE_CMD_FINI:
888 return ksem_fini(true);
890 default:
891 return ENOTTY;