Sync usage with man page.
[netbsd-mini2440.git] / sys / compat / irix / irix_usema.c
blob49220bb8c943000850d6f79721e2ba5767dc9ccd
1 /* $NetBSD: irix_usema.c,v 1.32 2008/05/09 20:49:14 tnn Exp $ */
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Emmanuel Dreyfus.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: irix_usema.c,v 1.32 2008/05/09 20:49:14 tnn Exp $");
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/select.h>
38 #include <sys/proc.h>
39 #include <sys/errno.h>
40 #include <sys/ioctl.h>
41 #include <sys/rwlock.h>
42 #include <sys/device.h>
43 #include <sys/vnode.h>
44 #include <sys/vnode_if.h>
45 #include <sys/mount.h>
46 #include <sys/file.h>
47 #include <sys/filedesc.h>
48 #include <sys/malloc.h>
49 #include <sys/poll.h>
50 #include <sys/queue.h>
51 #include <sys/conf.h>
53 #include <miscfs/genfs/genfs.h>
55 #include <ufs/ufs/inode.h>
56 #include <ufs/ufs/dir.h>
57 #include <ufs/ufs/dinode.h>
58 #include <ufs/ufs/ufs_extern.h>
60 #include <compat/irix/irix_types.h>
61 #include <compat/irix/irix_signal.h>
62 #include <compat/irix/irix_usema.h>
63 #include <compat/irix/irix_ioctl.h>
64 #include <compat/irix/irix_syscallargs.h>
66 const struct cdevsw irix_usema_cdevsw = {
67 nullopen, nullclose, noread, nowrite,
68 noioctl, nostop, notty, nopoll, nommap, nokqfilter,
72 * semaphore list, and operations on the list
74 static LIST_HEAD(irix_usema_reclist, irix_usema_rec) irix_usema_reclist;
75 static krwlock_t irix_usema_reclist_lock;
77 static struct irix_usema_rec *iur_lookup_by_vn(struct vnode *);
78 static struct irix_usema_rec *iur_lookup_by_sem(struct irix_semaphore *);
79 static struct irix_usema_rec *iur_insert
80 (struct irix_semaphore *, struct vnode *, struct proc *);
81 static void iur_remove(struct irix_usema_rec *);
82 static struct irix_waiting_proc_rec *iur_proc_queue
83 (struct irix_usema_rec *, struct proc *);
84 static void iur_proc_dequeue
85 (struct irix_usema_rec *, struct irix_waiting_proc_rec *);
86 static void iur_proc_release
87 (struct irix_usema_rec *, struct irix_waiting_proc_rec *);
88 static int iur_proc_isreleased(struct irix_usema_rec *, struct proc *);
89 static struct irix_waiting_proc_rec *iur_proc_getfirst
90 (struct irix_usema_rec *);
93 * In order to define a custom vnode operation vector for the usemaclone
94 * driver, we need to define a dummy filesystem, featuring just a null
95 * init function and the vnode operation vector. This is defined by
96 * irix_usema_dummy_vfsops, and registered to the kernel using vfs_attach
97 * at driver attach time, in irix_usemaattach().
99 struct vfsops irix_usema_dummy_vfsops = {
100 "usema_dummy", 0,
101 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
102 NULL, NULL, irix_usema_dummy_vfs_init, NULL, NULL, NULL, NULL, NULL,
103 NULL, NULL, NULL, NULL,
104 irix_usema_vnodeopv_descs,
106 void irix_usema_dummy_vfs_init(void) { return; } /* Do nothing */
107 const struct vnodeopv_desc * const irix_usema_vnodeopv_descs[] = {
108 &irix_usema_opv_desc,
109 NULL,
111 const struct vnodeopv_desc irix_usema_opv_desc =
112 { &irix_usema_vnodeop_p, irix_usema_vnodeop_entries };
113 int (**irix_usema_vnodeop_p)(void *);
116 * Vnode operations on the usemaclone device
118 const struct vnodeopv_entry_desc irix_usema_vnodeop_entries[] = {
119 { &vop_default_desc, vn_default_error },
120 { &vop_lookup_desc, genfs_nullop },
121 { &vop_open_desc, genfs_nullop },
122 { &vop_close_desc, irix_usema_close },
123 { &vop_access_desc, irix_usema_access },
124 { &vop_getattr_desc, irix_usema_getattr },
125 { &vop_setattr_desc, irix_usema_setattr },
126 { &vop_ioctl_desc, irix_usema_ioctl },
127 { &vop_fcntl_desc, irix_usema_fcntl },
128 { &vop_poll_desc, irix_usema_poll },
129 { &vop_abortop_desc, genfs_abortop },
130 { &vop_lock_desc, genfs_lock },
131 { &vop_unlock_desc, genfs_unlock },
132 { &vop_islocked_desc, genfs_islocked },
133 { &vop_advlock_desc, genfs_nullop },
134 { &vop_fsync_desc, genfs_nullop },
135 { &vop_reclaim_desc, genfs_nullop },
136 { &vop_revoke_desc, genfs_revoke },
137 { &vop_inactive_desc, irix_usema_inactive },
138 { NULL, NULL},
141 struct irix_usema_softc {
142 struct device irix_usema_dev;
146 * Initialize the usema driver: prepare the chained lists
147 * and attach the dummy filesystem we need to use custom vnode operations.
149 void
150 irix_usemaattach(struct device *parent, struct device *self, void *aux)
152 int error;
154 rw_init(&irix_usema_reclist_lock);
155 LIST_INIT(&irix_usema_reclist);
156 if ((error = vfs_attach(&irix_usema_dummy_vfsops)) != 0)
157 panic("irix_usemaattach: vfs_attach() failed");
159 return;
163 * vnode operations on the device
166 irix_usema_ioctl(void *v)
168 struct vop_ioctl_args /* {
169 struct vnode *a_vp;
170 u_long a_command;
171 void * a_data;
172 int a_fflag;
173 kauth_cred_t a_cred;
174 } */ *ap = v;
175 u_long cmd = ap->a_command;
176 struct irix_ioctl_usrdata *iiu = ap->a_data;
177 struct vnode *vp = ap->a_vp;
178 struct irix_usema_rec *iur;
179 struct irix_waiting_proc_rec *iwpr;
180 void *data;
181 register_t *retval;
182 int error;
185 * Some ioctl commands need to set the ioctl return value. In
186 * irix_sys_ioctl(), we copy the return value address and the
187 * original data argument to a struct irix_ioctl_usrdata.
188 * The address of this structure is passed as the data argument
189 * to the vnode layer. We therefore need to read this structure
190 * to get the real data argument and the retval address.
192 data = iiu->iiu_data;
193 retval = iiu->iiu_retval;
195 #ifdef DEBUG_IRIX
196 printf("irix_usema_ioctl(): vp = %p, cmd = %lx, data = %p\n",
197 vp, cmd, data);
198 #endif
200 switch (cmd) {
201 case IRIX_UIOCABLOCKQ: /* semaphore has been blocked */
202 if ((iur = iur_lookup_by_vn(vp)) == NULL)
203 return EBADF;
205 iwpr = iur_proc_queue(iur, curlwp->l_proc);
206 break;
208 case IRIX_UIOCAUNBLOCKQ: /* semaphore has been unblocked */
209 if ((iur = iur_lookup_by_vn(vp)) == NULL)
210 return EBADF;
212 if ((iwpr = iur_proc_getfirst(iur)) != NULL) {
213 iur_proc_release(iur, iwpr);
214 rw_enter(&iur->iur_lock, RW_READER);
215 selnotify(&iur->iur_si, 0, 0);
216 rw_exit(&iur->iur_lock);
218 break;
220 case IRIX_UIOCGETCOUNT: /* get semaphore value */
221 if ((iur = iur_lookup_by_vn(vp)) == NULL)
222 return EBADF;
224 *retval = -iur->iur_waiting_count;
225 break;
227 case IRIX_UIOCIDADDR: { /* register address of sem. owner field */
228 struct irix_usema_idaddr iui;
229 struct irix_semaphore *isp;
231 if ((error = copyin(data, &iui, sizeof(iui))) != 0)
232 return error;
235 * iui.iui_oidp points to the is_oid field of struct
236 * irix_semaphore. We want the structre address itself.
238 isp = NULL;
239 isp = (struct irix_semaphore *)((u_long)(isp) -
240 (u_long)(&isp->is_oid) + (u_long)iui.iui_oidp);
242 if ((iur_insert(isp, vp, curlwp->l_proc)) == NULL)
243 return EFAULT;
244 break;
246 default:
247 printf("Warning: unimplemented IRIX usema ioctl command %ld\n",
248 (cmd & 0xff));
249 break;
251 return 0;
256 irix_usema_poll(void *v)
258 struct vop_poll_args /* {
259 struct vnode *a_vp;
260 int a_events;
261 } */ *ap = v;
262 int events = ap->a_events;
263 struct vnode *vp = ap->a_vp;
264 struct irix_usema_rec *iur;
265 int check = POLLIN|POLLRDNORM|POLLRDBAND|POLLPRI;
267 #ifdef DEBUG_IRIX
268 printf("irix_usema_poll() vn = %p, events = 0x%x\n", vp, events);
269 #endif
270 if ((events & check) == 0)
271 return 0;
273 if ((iur = iur_lookup_by_vn(vp)) == NULL)
274 return 0;
276 rw_enter(&iur->iur_lock, RW_READER);
277 if (iur_proc_isreleased(iur, curlwp->l_proc) == 0) {
278 selrecord(curlwp, &iur->iur_si);
279 rw_exit(&iur->iur_lock);
280 return 0;
282 rw_exit(&iur->iur_lock);
284 return (events & check);
288 irix_usema_close(void *v)
290 struct vop_close_args /* {
291 struct vnode *a_vp;
292 int a_fflag;
293 kauth_cred_t a_cred;
294 } */ *ap = v;
295 struct vnode *vp = ap->a_vp;
296 struct vnode *rvp;
297 struct irix_usema_rec *iur;
298 int error;
300 #ifdef DEBUG_IRIX
301 printf("irix_usema_close() vn = %p\n", vp);
302 #endif
304 mutex_enter(&vp->v_interlock);
306 /* vp is a vnode duplicated from rvp. eventually also close rvp */
307 rvp = (struct vnode *)(vp->v_data);
308 vrele(rvp); /* for vref() in irix_sys_open() */
309 vp->v_data = NULL;
311 if (ap->a_fflag & FWRITE)
312 rvp->v_writecount--;
313 vn_lock(rvp, LK_EXCLUSIVE | LK_RETRY);
314 error = VOP_CLOSE(rvp, ap->a_fflag, ap->a_cred);
315 vput(rvp);
317 if ((iur = iur_lookup_by_vn(vp)) != NULL)
318 iur_remove(iur);
320 mutex_exit(&vp->v_interlock);
322 return error;
326 * Try to apply setattr to the original vnode, not the duplicated one,
327 * but still return 0 in case of failure (IRIX libc rely on this).
330 irix_usema_setattr(void *v)
332 struct vop_setattr_args /* {
333 struct vnode *a_vp;
334 struct vattr *a_vap;
335 kauth_cred_t a_cred;
336 } */ *ap = v;
337 struct vnode *vp = (struct vnode *)(ap->a_vp->v_data);
338 int error;
340 #ifdef DEBUG_IRIX
341 printf("irix_usema_setattr()\n");
342 #endif
343 error = VOP_SETATTR(vp, ap->a_vap, ap->a_cred);
345 /* Silently ignore any error */
346 return 0;
350 irix_usema_inactive(void *v)
352 struct vop_inactive_args /* {
353 struct vnode *a_vp;
354 } */ *ap = v;
356 VOP_UNLOCK(ap->a_vp, 0);
357 vrecycle(ap->a_vp, NULL, curlwp);
359 return 0;
364 * For fcntl, access and getattr vnode operations, we want to do the
365 * operation on the original vnode, not the duplicated one.
367 #define ___CONCAT(x,y) __CONCAT(x,y)
368 #define __CONCAT3(x,y,z) ___CONCAT(__CONCAT(x,y),z)
370 #define IRIX_USEMA_VNOP_WRAP(op) \
371 int \
372 __CONCAT(irix_usema_,op)(v) \
373 void *v; \
375 struct __CONCAT3(vop_,op,_args) *ap = v; \
376 struct vnode *vp = (struct vnode *)(ap->a_vp->v_data); \
377 struct __CONCAT3(vop_,op,_args) a; \
379 (void)memcpy(&a, ap, sizeof(a)); \
380 a.a_vp = vp; \
382 return VCALL(vp,VOFFSET(__CONCAT(vop_,op)),&a); \
385 IRIX_USEMA_VNOP_WRAP(access)
386 IRIX_USEMA_VNOP_WRAP(getattr)
387 IRIX_USEMA_VNOP_WRAP(fcntl)
390 * The usync_ctnl system call is not part of the usema driver,
391 * but it is closely related to it.
394 irix_sys_usync_cntl(struct lwp *l, const struct irix_sys_usync_cntl_args *uap, register_t *retval)
396 /* {
397 syscallarg(int) cmd;
398 syscallarg(void *) arg;
399 } */
400 struct proc *p = l->l_proc;
401 int error;
402 struct irix_usync_arg iua;
403 struct irix_usema_rec *iur;
404 struct irix_waiting_proc_rec *iwpr;
406 switch (SCARG(uap, cmd)) {
407 case IRIX_USYNC_BLOCK:
408 if ((error = copyin(SCARG(uap, arg), &iua, sizeof(iua))) != 0)
409 return error;
411 if ((iur = iur_insert(iua.iua_sem, NULL, p)) == NULL)
412 return EFAULT;
414 iwpr = iur_proc_queue(iur, p);
415 (void)tsleep(iwpr, PZERO, "irix_usema", 0);
416 break;
418 case IRIX_USYNC_INTR_BLOCK:
419 if ((error = copyin(SCARG(uap, arg), &iua, sizeof(iua))) != 0)
420 return error;
422 if ((iur = iur_insert(iua.iua_sem, NULL, p)) == NULL)
423 return EFAULT;
425 iwpr = iur_proc_queue(iur, p);
426 (void)tsleep(iwpr, PZERO|PCATCH, "irix_usema", 0);
427 break;
429 case IRIX_USYNC_UNBLOCK_ALL:
430 if ((error = copyin(SCARG(uap, arg), &iua, sizeof(iua))) != 0)
431 return error;
433 if ((iur = iur_lookup_by_sem(iua.iua_sem)) == 0)
434 return EINVAL;
436 rw_enter(&iur->iur_lock, RW_READER);
437 TAILQ_FOREACH(iwpr, &iur->iur_waiting_p, iwpr_list) {
438 wakeup((void *)iwpr);
439 iur_proc_dequeue(iur, iwpr);
441 iur_remove(iur);
442 rw_exit(&iur->iur_lock);
443 break;
445 case IRIX_USYNC_UNBLOCK:
446 if ((error = copyin(SCARG(uap, arg), &iua, sizeof(iua))) != 0)
447 return error;
449 if ((iur = iur_lookup_by_sem(iua.iua_sem)) == 0)
450 return EINVAL;
452 if ((iwpr = iur_proc_getfirst(iur)) != NULL) {
453 wakeup((void *)iwpr);
454 iur_proc_dequeue(iur, iwpr);
456 if ((iwpr = iur_proc_getfirst(iur)) == NULL)
457 iur_remove(iur);
458 break;
460 case IRIX_USYNC_GET_STATE:
461 if ((error = copyin(SCARG(uap, arg), &iua, sizeof(iua))) != 0)
462 return error;
464 if ((iur = iur_lookup_by_sem(iua.iua_sem)) == NULL)
465 return 0; /* Not blocked, return 0 */
467 *retval = -iur->iur_waiting_count;
468 break;
469 default:
470 printf("Warning: unimplemented IRIX usync_cntl command %d\n",
471 SCARG(uap, cmd));
472 return EINVAL;
475 return 0;
478 /* Operations on irix_usema_reclist */
479 static struct irix_usema_rec *
480 iur_lookup_by_vn(struct vnode *vp)
482 struct irix_usema_rec *iur;
484 rw_enter(&irix_usema_reclist_lock, RW_READER);
485 LIST_FOREACH(iur, &irix_usema_reclist, iur_list)
486 if (iur->iur_vn == vp)
487 break;
488 rw_exit(&irix_usema_reclist_lock);
489 return iur;
492 static struct irix_usema_rec *
493 iur_lookup_by_sem(struct irix_semaphore *sem)
495 struct irix_usema_rec *iur;
496 struct irix_semaphore is;
497 int error;
499 if ((error = copyin(sem, &is, sizeof(is))) != 0)
500 return NULL;
502 rw_enter(&irix_usema_reclist_lock, RW_READER);
503 LIST_FOREACH(iur, &irix_usema_reclist, iur_list)
504 if (iur->iur_sem == sem && iur->iur_shid == is.is_shid)
505 break;
506 rw_exit(&irix_usema_reclist_lock);
508 return iur;
511 static struct irix_usema_rec *
512 iur_insert(struct irix_semaphore *sem, struct vnode *vp, struct proc *p)
514 struct irix_usema_rec *iur;
515 struct irix_semaphore is;
516 int error;
518 if ((iur = iur_lookup_by_sem(sem)) != NULL)
519 return iur;
521 if ((error = copyin(sem, &is, sizeof(is))) != 0)
522 return NULL;
524 iur = malloc(sizeof(struct irix_usema_rec), M_DEVBUF, M_WAITOK);
525 iur->iur_sem = sem;
526 iur->iur_vn = vp;
527 iur->iur_shid = is.is_shid;
528 iur->iur_p = p;
529 iur->iur_waiting_count = 0;
530 rw_init(&iur->iur_lock);
531 selinit(&iur->iur_si);
532 TAILQ_INIT(&iur->iur_waiting_p);
533 TAILQ_INIT(&iur->iur_released_p);
534 rw_enter(&irix_usema_reclist_lock, RW_WRITER);
535 LIST_INSERT_HEAD(&irix_usema_reclist, iur, iur_list);
536 rw_exit(&irix_usema_reclist_lock);
537 return iur;
540 static void
541 iur_remove(struct irix_usema_rec *iur)
543 struct irix_waiting_proc_rec *iwpr;
545 rw_enter(&iur->iur_lock, RW_WRITER);
546 waiting_restart:
547 TAILQ_FOREACH(iwpr, &iur->iur_waiting_p, iwpr_list) {
548 TAILQ_REMOVE(&iur->iur_waiting_p, iwpr, iwpr_list);
549 free(iwpr, M_DEVBUF);
550 /* iwpr is now invalid, restart */
551 goto waiting_restart;
554 released_restart:
555 TAILQ_FOREACH(iwpr, &iur->iur_released_p, iwpr_list) {
556 TAILQ_REMOVE(&iur->iur_released_p, iwpr, iwpr_list);
557 free(iwpr, M_DEVBUF);
558 /* iwpr is now invalid, restart */
559 goto released_restart;
562 rw_enter(&irix_usema_reclist_lock, RW_WRITER);
563 LIST_REMOVE(iur, iur_list);
564 rw_exit(&irix_usema_reclist_lock);
566 seldestroy(&iur->iur_si);
567 rw_exit(&iur->iur_lock);
568 rw_destroy(&iur->iur_lock);
569 free(iur, M_DEVBUF);
570 return;
573 static struct irix_waiting_proc_rec *
574 iur_proc_queue(struct irix_usema_rec *iur, struct proc *p)
576 struct irix_waiting_proc_rec *iwpr;
578 /* Do we have this iwpr on the released list? If we do, reuse it */
579 rw_enter(&iur->iur_lock, RW_WRITER);
580 TAILQ_FOREACH(iwpr, &iur->iur_released_p, iwpr_list) {
581 if (iwpr->iwpr_p == p) {
582 TAILQ_REMOVE(&iur->iur_released_p, iwpr, iwpr_list);
583 goto got_iwpr;
587 /* Otherwise, create a new one */
588 iwpr = malloc(sizeof(struct irix_waiting_proc_rec), M_DEVBUF, M_WAITOK);
589 iwpr->iwpr_p = p;
590 got_iwpr:
591 TAILQ_INSERT_TAIL(&iur->iur_waiting_p, iwpr, iwpr_list);
592 iur->iur_waiting_count++;
593 rw_exit(&iur->iur_lock);
594 return iwpr;
597 static void
598 iur_proc_dequeue(struct irix_usema_rec *iur, struct irix_waiting_proc_rec *iwpr)
600 rw_enter(&iur->iur_lock, RW_WRITER);
601 iur->iur_waiting_count--;
602 TAILQ_REMOVE(&iur->iur_waiting_p, iwpr, iwpr_list);
603 rw_exit(&iur->iur_lock);
604 free(iwpr, M_DEVBUF);
605 return;
608 static void
609 iur_proc_release(struct irix_usema_rec *iur, struct irix_waiting_proc_rec *iwpr)
611 rw_enter(&iur->iur_lock, RW_WRITER);
612 iur->iur_waiting_count--;
613 TAILQ_REMOVE(&iur->iur_waiting_p, iwpr, iwpr_list);
614 TAILQ_INSERT_TAIL(&iur->iur_released_p, iwpr, iwpr_list);
615 rw_exit(&iur->iur_lock);
616 return;
620 /* Should be called with iur_lock RW_READER held */
621 static int
622 iur_proc_isreleased(struct irix_usema_rec *iur, struct proc *p)
624 struct irix_waiting_proc_rec *iwpr;
625 int res = 0;
627 TAILQ_FOREACH(iwpr, &iur->iur_released_p, iwpr_list) {
628 if (iwpr->iwpr_p == p) {
629 res = 1;
630 break;
633 return res;
636 static struct irix_waiting_proc_rec *
637 iur_proc_getfirst(struct irix_usema_rec *iur)
639 struct irix_waiting_proc_rec *iwpr;
641 rw_enter(&iur->iur_lock, RW_READER);
642 iwpr = TAILQ_FIRST(&iur->iur_waiting_p);
643 rw_exit(&iur->iur_lock);
644 return iwpr;
648 * Cleanup irix_usema_usync_cntl() allocations
649 * if new_p is NULL, free any structure allocated for process p
650 * otherwise change ownership of structure allocated for process p to new_p
652 void
653 irix_usema_exit_cleanup(struct proc *p, struct proc *new_p)
655 struct irix_usema_rec *iur;
657 #ifdef DEBUG_IRIX
658 printf("irix_usema_exit_cleanup(): p = %p, new_p = %p\n", p, new_p);
659 #endif
660 remove_restart:
661 rw_enter(&irix_usema_reclist_lock, RW_WRITER);
662 LIST_FOREACH(iur, &irix_usema_reclist, iur_list) {
663 if (iur->iur_p != p)
664 continue;
665 if (new_p == NULL) {
667 * Release the lock now since iur_remove() needs to
668 * acquire an exclusive lock.
670 rw_exit(&irix_usema_reclist_lock);
671 iur_remove(iur);
673 * iur is now invalid and we lost the lock, restart
675 goto remove_restart;
676 } else {
677 iur->iur_p = new_p;
680 rw_exit(&irix_usema_reclist_lock);
682 return;
685 #ifdef DEBUG_IRIX
687 * This dumps all in-kernel information about processes waiting for
688 * semaphores and process that have been released by an operation
689 * on a semaphore.
691 void
692 irix_usema_debug(void)
694 struct irix_usema_rec *iur;
695 struct irix_waiting_proc_rec *iwpr;
697 LIST_FOREACH(iur, &irix_usema_reclist, iur_list) {
698 printf("iur %p\n", iur);
699 printf(" iur->iur_vn = %p\n", iur->iur_vn);
700 printf(" iur->iur_sem = %p\n", iur->iur_sem);
701 printf(" iur->iur_shid = 0x%08x\n", iur->iur_shid);
702 printf(" iur->iur_p = %p\n", iur->iur_p);
703 printf(" iur->iur_waiting_count = %d\n",
704 iur->iur_waiting_count);
705 printf(" Waiting processes\n");
706 TAILQ_FOREACH(iwpr, &iur->iur_waiting_p, iwpr_list) {
707 printf(" iwpr %p: iwpr->iwpr_p = %p (pid %d)\n",
708 iwpr, iwpr->iwpr_p, iwpr->iwpr_p->p_pid);
710 printf(" Released processes\n");
711 TAILQ_FOREACH(iwpr, &iur->iur_released_p, iwpr_list) {
712 printf(" iwpr %p: iwpr->iwpr_p = %p (pid %d)\n",
713 iwpr, iwpr->iwpr_p, iwpr->iwpr_p->p_pid);
717 #endif /* DEBUG_IRIX */