5 /* IPC-server process table, currently used for semaphores only. */
7 struct sem_struct
*ip_sem
; /* affected semaphore set, or NULL */
8 struct sembuf
*ip_sops
; /* pending operations (malloc'ed) */
9 unsigned int ip_nsops
; /* number of pending operations */
10 struct sembuf
*ip_blkop
; /* pointer to operation that blocked */
11 endpoint_t ip_endpt
; /* process endpoint */
12 pid_t ip_pid
; /* process PID */
13 TAILQ_ENTRY(iproc
) ip_next
; /* next waiting process */
17 unsigned short semval
; /* semaphore value */
18 unsigned short semzcnt
; /* # waiting for zero */
19 unsigned short semncnt
; /* # waiting for increase */
20 pid_t sempid
; /* process that did last op */
24 * For the list of waiting processes, we use a doubly linked tail queue. In
25 * order to maintain a basic degree of fairness, we keep the pending processes
26 * in FCFS (well, at least first tested) order, which means we need to be able
27 * to add new processes at the end of the list. In order to remove waiting
28 * processes O(1) instead of O(n) we need a doubly linked list; in the common
29 * case we do have the element's predecessor, but STAILQ_REMOVE is O(n) anyway
30 * and NetBSD has no STAILQ_REMOVE_AFTER yet.
32 * We use one list per semaphore set: semop(2) affects only one semaphore set,
33 * but it may involve operations on multiple semaphores within the set. While
34 * it is possible to recheck only semaphores that were affected by a particular
35 * operation, and associate waiting lists to individual semaphores, the number
36 * of expected waiting processes is currently not high enough to justify the
37 * extra complexity of such an implementation.
40 struct semid_ds semid_ds
;
41 struct semaphore sems
[SEMMSL
];
42 TAILQ_HEAD(waiters
, iproc
) waiters
;
45 static struct sem_struct sem_list
[SEMMNI
];
46 static unsigned int sem_list_nr
= 0; /* highest in-use slot number plus one */
49 * Find a semaphore set by key. The given key must not be IPC_PRIVATE. Return
50 * a pointer to the semaphore set if found, or NULL otherwise.
52 static struct sem_struct
*
53 sem_find_key(key_t key
)
57 for (i
= 0; i
< sem_list_nr
; i
++) {
58 if (!(sem_list
[i
].semid_ds
.sem_perm
.mode
& SEM_ALLOC
))
60 if (sem_list
[i
].semid_ds
.sem_perm
._key
== key
)
68 * Find a semaphore set by identifier. Return a pointer to the semaphore set
69 * if found, or NULL otherwise.
71 static struct sem_struct
*
74 struct sem_struct
*sem
;
82 if (!(sem
->semid_ds
.sem_perm
.mode
& SEM_ALLOC
))
84 if (sem
->semid_ds
.sem_perm
._seq
!= IPCID_TO_SEQ(id
))
90 * Implementation of the semget(2) system call.
93 do_semget(message
* m
)
95 struct sem_struct
*sem
;
100 key
= m
->m_lc_ipc_semget
.key
;
101 nsems
= m
->m_lc_ipc_semget
.nr
;
102 flag
= m
->m_lc_ipc_semget
.flag
;
104 if (key
!= IPC_PRIVATE
&& (sem
= sem_find_key(key
)) != NULL
) {
105 if ((flag
& IPC_CREAT
) && (flag
& IPC_EXCL
))
107 if (!check_perm(&sem
->semid_ds
.sem_perm
, m
->m_source
, flag
))
109 if (nsems
> sem
->semid_ds
.sem_nsems
)
113 if (key
!= IPC_PRIVATE
&& !(flag
& IPC_CREAT
))
115 if (nsems
<= 0 || nsems
> SEMMSL
)
118 /* Find a free entry. */
119 for (i
= 0; i
< __arraycount(sem_list
); i
++)
120 if (!(sem_list
[i
].semid_ds
.sem_perm
.mode
& SEM_ALLOC
))
122 if (i
== __arraycount(sem_list
))
125 /* Initialize the entry. */
127 seq
= sem
->semid_ds
.sem_perm
._seq
;
128 memset(sem
, 0, sizeof(*sem
));
129 sem
->semid_ds
.sem_perm
._key
= key
;
130 sem
->semid_ds
.sem_perm
.cuid
=
131 sem
->semid_ds
.sem_perm
.uid
= getnuid(m
->m_source
);
132 sem
->semid_ds
.sem_perm
.cgid
=
133 sem
->semid_ds
.sem_perm
.gid
= getngid(m
->m_source
);
134 sem
->semid_ds
.sem_perm
.mode
= SEM_ALLOC
| (flag
& ACCESSPERMS
);
135 sem
->semid_ds
.sem_perm
._seq
= (seq
+ 1) & 0x7fff;
136 sem
->semid_ds
.sem_nsems
= nsems
;
137 sem
->semid_ds
.sem_otime
= 0;
138 sem
->semid_ds
.sem_ctime
= clock_time(NULL
);
139 TAILQ_INIT(&sem
->waiters
);
141 assert(i
<= sem_list_nr
);
142 if (i
== sem_list_nr
) {
144 * If no semaphore sets were allocated before,
145 * subscribe to process events now.
147 if (sem_list_nr
== 0)
148 update_sem_sub(TRUE
/*want_events*/);
154 m
->m_lc_ipc_semget
.retid
= IXSEQ_TO_IPCID(i
, sem
->semid_ds
.sem_perm
);
159 * Increase the proper suspension count (semncnt or semzcnt) of the semaphore
160 * on which the given process is blocked.
163 inc_susp_count(struct iproc
* ip
)
165 struct sembuf
*blkop
;
166 struct semaphore
*sp
;
168 blkop
= ip
->ip_blkop
;
169 sp
= &ip
->ip_sem
->sems
[blkop
->sem_num
];
171 if (blkop
->sem_op
!= 0) {
172 assert(sp
->semncnt
< USHRT_MAX
);
175 assert(sp
->semncnt
< USHRT_MAX
);
181 * Decrease the proper suspension count (semncnt or semzcnt) of the semaphore
182 * on which the given process is blocked.
185 dec_susp_count(struct iproc
* ip
)
187 struct sembuf
*blkop
;
188 struct semaphore
*sp
;
190 blkop
= ip
->ip_blkop
;
191 sp
= &ip
->ip_sem
->sems
[blkop
->sem_num
];
193 if (blkop
->sem_op
!= 0) {
194 assert(sp
->semncnt
> 0);
197 assert(sp
->semzcnt
> 0);
203 * Send a reply for a semop(2) call suspended earlier, thus waking up the
207 send_reply(endpoint_t who
, int ret
)
211 memset(&m
, 0, sizeof(m
));
218 * Satisfy or cancel the semop(2) call on which the given process is blocked,
219 * and send the given reply code (OK or a negative error code) to wake it up,
220 * unless the given code is EDONTREPLY.
223 complete_semop(struct iproc
* ip
, int code
)
225 struct sem_struct
*sem
;
231 TAILQ_REMOVE(&sem
->waiters
, ip
, ip_next
);
235 assert(ip
->ip_sops
!= NULL
);
242 if (code
!= EDONTREPLY
)
243 send_reply(ip
->ip_endpt
, code
);
247 * Free up the given semaphore set. This includes cancelling any blocking
248 * semop(2) calls on any of its semaphores.
251 remove_set(struct sem_struct
* sem
)
256 * Cancel all semop(2) operations on this semaphore set, with an EIDRM
259 while (!TAILQ_EMPTY(&sem
->waiters
)) {
260 ip
= TAILQ_FIRST(&sem
->waiters
);
262 complete_semop(ip
, EIDRM
);
265 /* Mark the entry as free. */
266 sem
->semid_ds
.sem_perm
.mode
&= ~SEM_ALLOC
;
269 * This may have been the last in-use slot in the list. Ensure that
270 * sem_list_nr again equals the highest in-use slot number plus one.
272 while (sem_list_nr
> 0 &&
273 !(sem_list
[sem_list_nr
- 1].semid_ds
.sem_perm
.mode
& SEM_ALLOC
))
277 * If this was our last semaphore set, unsubscribe from process events.
279 if (sem_list_nr
== 0)
280 update_sem_sub(FALSE
/*want_events*/);
284 * Try to perform a set of semaphore operations, as given by semop(2), on a
285 * semaphore set. The entire action must be atomic, i.e., either succeed in
286 * its entirety or fail without making any changes. Return OK on success, in
287 * which case the PIDs of all affected semaphores will be updated to the given
288 * 'pid' value, and the semaphore set's sem_otime will be updated as well.
289 * Return SUSPEND if the call should be suspended, in which case 'blkop' will
290 * be set to a pointer to the operation causing the call to block. Return an
291 * error code if the call failed altogether.
294 try_semop(struct sem_struct
*sem
, struct sembuf
*sops
, unsigned int nsops
,
295 pid_t pid
, struct sembuf
** blkop
)
297 struct semaphore
*sp
;
303 * The operation must be processed atomically. However, it must also
304 * be processed "in array order," which we assume to mean that while
305 * processing one operation, the changes of the previous operations
306 * must be taken into account. This is relevant for cases where the
307 * same semaphore is referenced by more than one operation, for example
308 * to perform an atomic increase-if-zero action on a single semaphore.
309 * As a result, we must optimistically modify semaphore values and roll
310 * back on suspension or failure afterwards.
314 for (i
= 0; i
< nsops
; i
++) {
315 sp
= &sem
->sems
[sops
[i
].sem_num
];
318 if (op
->sem_op
> 0) {
319 if (SEMVMX
- sp
->semval
< op
->sem_op
) {
323 sp
->semval
+= op
->sem_op
;
324 } else if (op
->sem_op
< 0) {
326 * No SEMVMX check; if the process wants to deadlock
327 * itself by supplying -SEMVMX it is free to do so..
329 if ((int)sp
->semval
< -(int)op
->sem_op
) {
330 r
= (op
->sem_flg
& IPC_NOWAIT
) ? EAGAIN
:
334 sp
->semval
+= op
->sem_op
;
335 } else /* (op->sem_op == 0) */ {
336 if (sp
->semval
!= 0) {
337 r
= (op
->sem_flg
& IPC_NOWAIT
) ? EAGAIN
:
345 * If we did not go through all the operations, then either an error
346 * occurred or the user process is to be suspended. In that case we
347 * must roll back any progress we have made so far, and return the
348 * operation that caused the call to block.
354 /* Roll back all changes made so far. */
356 sem
->sems
[sops
[i
].sem_num
].semval
-= sops
[i
].sem_op
;
363 * The operation has completed successfully. Also update all affected
364 * semaphores' PID values, and the semaphore set's last-semop time.
365 * The caller must do everything else.
367 for (i
= 0; i
< nsops
; i
++)
368 sem
->sems
[sops
[i
].sem_num
].sempid
= pid
;
370 sem
->semid_ds
.sem_otime
= clock_time(NULL
);
376 * Check whether any blocked operations can now be satisfied on any of the
377 * semaphores in the given semaphore set. Do this repeatedly as necessary, as
378 * any unblocked operation may in turn allow other operations to be resumed.
381 check_set(struct sem_struct
* sem
)
383 struct iproc
*ip
, *nextip
;
384 struct sembuf
*blkop
;
388 * Go through all the waiting processes in FIFO order, which is our
389 * best attempt at providing at least some fairness. Keep trying as
390 * long as we woke up at least one process, which means we made actual
396 TAILQ_FOREACH_SAFE(ip
, &sem
->waiters
, ip_next
, nextip
) {
397 /* Retry the entire semop(2) operation, atomically. */
398 r
= try_semop(ip
->ip_sem
, ip
->ip_sops
, ip
->ip_nsops
,
402 /* Success or failure. */
403 complete_semop(ip
, r
);
405 /* No changes are made on failure. */
408 } else if (blkop
!= ip
->ip_blkop
) {
410 * The process stays suspended, but it is now
411 * blocked on a different semaphore. As a
412 * result, we need to adjust the semaphores'
417 ip
->ip_blkop
= blkop
;
426 * Fill a seminfo structure with actual information. The information returned
427 * depends on the given command, which may be either IPC_INFO or SEM_INFO.
430 fill_seminfo(struct seminfo
* sinfo
, int cmd
)
434 assert(cmd
== IPC_INFO
|| cmd
== SEM_INFO
);
436 memset(sinfo
, 0, sizeof(*sinfo
));
438 sinfo
->semmap
= SEMMNI
;
439 sinfo
->semmni
= SEMMNI
;
440 sinfo
->semmns
= SEMMNI
* SEMMSL
;
441 sinfo
->semmnu
= 0; /* TODO: support for SEM_UNDO */
442 sinfo
->semmsl
= SEMMSL
;
443 sinfo
->semopm
= SEMOPM
;
444 sinfo
->semume
= 0; /* TODO: support for SEM_UNDO */
445 if (cmd
== SEM_INFO
) {
447 * For SEM_INFO the semusz field is expected to contain the
448 * number of semaphore sets currently in use.
450 sinfo
->semusz
= sem_list_nr
;
452 sinfo
->semusz
= 0; /* TODO: support for SEM_UNDO */
453 sinfo
->semvmx
= SEMVMX
;
454 if (cmd
== SEM_INFO
) {
456 * For SEM_INFO the semaem field is expected to contain
457 * the total number of allocated semaphores.
459 for (i
= 0; i
< sem_list_nr
; i
++)
460 sinfo
->semaem
+= sem_list
[i
].semid_ds
.sem_nsems
;
462 sinfo
->semaem
= 0; /* TODO: support for SEM_UNDO */
466 * Implementation of the semctl(2) system call.
469 do_semctl(message
* m
)
471 static unsigned short valbuf
[SEMMSL
];
475 int r
, id
, num
, cmd
, val
;
476 struct semid_ds tmp_ds
;
477 struct sem_struct
*sem
;
478 struct seminfo sinfo
;
480 id
= m
->m_lc_ipc_semctl
.id
;
481 num
= m
->m_lc_ipc_semctl
.num
;
482 cmd
= m
->m_lc_ipc_semctl
.cmd
;
483 opt
= m
->m_lc_ipc_semctl
.opt
;
486 * Look up the target semaphore set. The IPC_INFO and SEM_INFO
487 * commands have no associated semaphore set. The SEM_STAT command
488 * takes an array index into the semaphore set table. For all other
489 * commands, look up the semaphore set by its given identifier.
497 if (id
< 0 || (unsigned int)id
>= sem_list_nr
)
500 if (!(sem
->semid_ds
.sem_perm
.mode
& SEM_ALLOC
))
504 if ((sem
= sem_find_id(id
)) == NULL
)
510 * Check if the caller has the appropriate permissions on the target
511 * semaphore set. SETVAL and SETALL require write permission. IPC_SET
512 * and IPC_RMID require ownership permission, and return EPERM instead
513 * of EACCES on failure. IPC_INFO and SEM_INFO are free for general
514 * use. All other calls require read permission.
520 if (!check_perm(&sem
->semid_ds
.sem_perm
, m
->m_source
, IPC_W
))
526 uid
= getnuid(m
->m_source
);
527 if (uid
!= sem
->semid_ds
.sem_perm
.cuid
&&
528 uid
!= sem
->semid_ds
.sem_perm
.uid
&& uid
!= 0)
536 if (!check_perm(&sem
->semid_ds
.sem_perm
, m
->m_source
, IPC_R
))
543 if ((r
= sys_datacopy(SELF
, (vir_bytes
)&sem
->semid_ds
,
544 m
->m_source
, opt
, sizeof(sem
->semid_ds
))) != OK
)
547 m
->m_lc_ipc_semctl
.ret
=
548 IXSEQ_TO_IPCID(id
, sem
->semid_ds
.sem_perm
);
551 if ((r
= sys_datacopy(m
->m_source
, opt
, SELF
,
552 (vir_bytes
)&tmp_ds
, sizeof(tmp_ds
))) != OK
)
554 sem
->semid_ds
.sem_perm
.uid
= tmp_ds
.sem_perm
.uid
;
555 sem
->semid_ds
.sem_perm
.gid
= tmp_ds
.sem_perm
.gid
;
556 sem
->semid_ds
.sem_perm
.mode
&= ~ACCESSPERMS
;
557 sem
->semid_ds
.sem_perm
.mode
|=
558 tmp_ds
.sem_perm
.mode
& ACCESSPERMS
;
559 sem
->semid_ds
.sem_ctime
= clock_time(NULL
);
563 * Awaken all processes blocked in semop(2) on any semaphore in
564 * this set, and remove the semaphore set itself.
570 fill_seminfo(&sinfo
, cmd
);
572 if ((r
= sys_datacopy(SELF
, (vir_bytes
)&sinfo
, m
->m_source
,
573 opt
, sizeof(sinfo
))) != OK
)
575 /* Return the highest in-use slot number if any, or zero. */
577 m
->m_lc_ipc_semctl
.ret
= sem_list_nr
- 1;
579 m
->m_lc_ipc_semctl
.ret
= 0;
582 assert(sem
->semid_ds
.sem_nsems
<= __arraycount(valbuf
));
583 for (i
= 0; i
< sem
->semid_ds
.sem_nsems
; i
++)
584 valbuf
[i
] = sem
->sems
[i
].semval
;
585 r
= sys_datacopy(SELF
, (vir_bytes
)valbuf
, m
->m_source
,
586 opt
, sizeof(unsigned short) * sem
->semid_ds
.sem_nsems
);
591 if (num
< 0 || num
>= sem
->semid_ds
.sem_nsems
)
593 m
->m_lc_ipc_semctl
.ret
= sem
->sems
[num
].semncnt
;
596 if (num
< 0 || num
>= sem
->semid_ds
.sem_nsems
)
598 m
->m_lc_ipc_semctl
.ret
= sem
->sems
[num
].sempid
;
601 if (num
< 0 || num
>= sem
->semid_ds
.sem_nsems
)
603 m
->m_lc_ipc_semctl
.ret
= sem
->sems
[num
].semval
;
606 if (num
< 0 || num
>= sem
->semid_ds
.sem_nsems
)
608 m
->m_lc_ipc_semctl
.ret
= sem
->sems
[num
].semzcnt
;
611 assert(sem
->semid_ds
.sem_nsems
<= __arraycount(valbuf
));
612 r
= sys_datacopy(m
->m_source
, opt
, SELF
, (vir_bytes
)valbuf
,
613 sizeof(unsigned short) * sem
->semid_ds
.sem_nsems
);
616 for (i
= 0; i
< sem
->semid_ds
.sem_nsems
; i
++)
617 if (valbuf
[i
] > SEMVMX
)
620 for (i
= 0; i
< sem
->semid_ds
.sem_nsems
; i
++)
621 printf("SEMCTL: SETALL val: [%d] %d\n", i
, valbuf
[i
]);
623 for (i
= 0; i
< sem
->semid_ds
.sem_nsems
; i
++)
624 sem
->sems
[i
].semval
= valbuf
[i
];
625 sem
->semid_ds
.sem_ctime
= clock_time(NULL
);
626 /* Awaken any waiting parties if now possible. */
631 if (num
< 0 || num
>= sem
->semid_ds
.sem_nsems
)
633 if (val
< 0 || val
> SEMVMX
)
635 sem
->sems
[num
].semval
= val
;
637 printf("SEMCTL: SETVAL: %d %d\n", num
, val
);
639 sem
->semid_ds
.sem_ctime
= clock_time(NULL
);
640 /* Awaken any waiting parties if now possible. */
651 * Implementation of the semop(2) system call.
654 do_semop(message
* m
)
656 unsigned int i
, mask
, slot
;
658 struct sembuf
*sops
, *blkop
;
660 struct sem_struct
*sem
;
664 id
= m
->m_lc_ipc_semop
.id
;
665 nsops
= m
->m_lc_ipc_semop
.size
;
667 if ((sem
= sem_find_id(id
)) == NULL
)
671 return OK
; /* nothing to do */
675 /* Get the array from the user process. */
676 sops
= malloc(sizeof(sops
[0]) * nsops
);
679 r
= sys_datacopy(m
->m_source
, (vir_bytes
)m
->m_lc_ipc_semop
.ops
, SELF
,
680 (vir_bytes
)sops
, sizeof(sops
[0]) * nsops
);
685 for (i
= 0; i
< nsops
; i
++)
686 printf("SEMOP: num:%d op:%d flg:%d\n",
687 sops
[i
].sem_num
, sops
[i
].sem_op
, sops
[i
].sem_flg
);
690 * Check for permissions. We do this only once, even though the call
691 * might suspend and the semaphore set's permissions might be changed
692 * before the call resumes. The specification is not clear on this.
693 * Either way, perform the permission check before checking on the
694 * validity of semaphore numbers, since obtaining the semaphore set
695 * size itself requires read permission (except through sysctl(2)..).
698 for (i
= 0; i
< nsops
; i
++) {
699 if (sops
[i
].sem_op
!= 0)
700 mask
|= IPC_W
; /* check for write permission */
702 mask
|= IPC_R
; /* check for read permission */
705 if (!check_perm(&sem
->semid_ds
.sem_perm
, m
->m_source
, mask
))
708 /* Check that all given semaphore numbers are within range. */
710 for (i
= 0; i
< nsops
; i
++)
711 if (sops
[i
].sem_num
>= sem
->semid_ds
.sem_nsems
)
715 * Do not check if the same semaphore is referenced more than once
716 * (there was such a check here originally), because that is actually
717 * a valid case. The result is however that it is possible to
718 * construct a semop(2) request that will never complete, and thus,
719 * care must be taken that such requests do not create potential
720 * deadlock situations etc.
723 pid
= getnpid(m
->m_source
);
726 * We do not yet support SEM_UNDO at all, so we better not give the
727 * caller the impression that we do. For now, print a warning so that
728 * we know when an application actually fails for that reason.
730 for (i
= 0; i
< nsops
; i
++) {
731 if (sops
[i
].sem_flg
& SEM_UNDO
) {
732 /* Print a warning only if this isn't the test set.. */
733 if (sops
[i
].sem_flg
!= SHRT_MAX
)
734 printf("IPC: pid %d tried to use SEM_UNDO\n",
741 /* Try to perform the operation now. */
742 r
= try_semop(sem
, sops
, nsops
, pid
, &blkop
);
746 * The operation ended up blocking on a particular semaphore
747 * operation. Save all details in the slot for the user
748 * process, and add it to the list of processes waiting for
749 * this semaphore set.
751 slot
= _ENDPOINT_P(m
->m_source
);
752 assert(slot
< __arraycount(iproc
));
755 assert(ip
->ip_sem
== NULL
); /* can't already be in use */
757 ip
->ip_endpt
= m
->m_source
;
761 ip
->ip_nsops
= nsops
;
762 ip
->ip_blkop
= blkop
;
764 TAILQ_INSERT_TAIL(&sem
->waiters
, ip
, ip_next
);
774 /* Awaken any other waiting parties if now possible. */
782 * Return semaphore information for a remote MIB call on the sysvipc_info node
783 * in the kern.ipc subtree. The particular semantics of this call are tightly
784 * coupled to the implementation of the ipcs(1) userland utility.
787 get_sem_mib_info(struct rmib_oldp
* oldp
)
789 struct sem_sysctl_info semsi
;
790 struct semid_ds
*semds
;
796 fill_seminfo(&semsi
.seminfo
, IPC_INFO
);
799 * As a hackish exception, the requested size may imply that just
800 * general information is to be returned, without throwing an ENOMEM
801 * error because there is no space for full output.
803 if (rmib_getoldlen(oldp
) == sizeof(semsi
.seminfo
))
804 return rmib_copyout(oldp
, 0, &semsi
.seminfo
,
805 sizeof(semsi
.seminfo
));
808 * ipcs(1) blindly expects the returned array to be of size
809 * seminfo.semmni, using the SEM_ALLOC mode flag to see whether each
810 * entry is valid. If we return a smaller size, ipcs(1) will access
813 assert(semsi
.seminfo
.semmni
> 0);
816 return sizeof(semsi
) + sizeof(semsi
.semids
[0]) *
817 (semsi
.seminfo
.semmni
- 1);
820 * Copy out entries one by one. For the first entry, copy out the
821 * entire "semsi" structure. For subsequent entries, reuse the single
822 * embedded 'semids' element of "semsi" and copy out only that element.
824 for (i
= 0; i
< (unsigned int)semsi
.seminfo
.semmni
; i
++) {
825 semds
= &sem_list
[i
].semid_ds
;
827 memset(&semsi
.semids
[0], 0, sizeof(semsi
.semids
[0]));
828 if (i
< sem_list_nr
&& (semds
->sem_perm
.mode
& SEM_ALLOC
)) {
829 prepare_mib_perm(&semsi
.semids
[0].sem_perm
,
831 semsi
.semids
[0].sem_nsems
= semds
->sem_nsems
;
832 semsi
.semids
[0].sem_otime
= semds
->sem_otime
;
833 semsi
.semids
[0].sem_ctime
= semds
->sem_ctime
;
837 r
= rmib_copyout(oldp
, off
, &semsi
, sizeof(semsi
));
839 r
= rmib_copyout(oldp
, off
, &semsi
.semids
[0],
840 sizeof(semsi
.semids
[0]));
851 * Return TRUE iff no semaphore sets are allocated.
857 return (sem_list_nr
== 0);
861 * Check if the given endpoint is blocked on a semop(2) call. If so, cancel
862 * the call, because either it is interrupted by a signal or the process was
863 * killed. In the former case, unblock the process by replying with EINTR.
866 sem_process_event(endpoint_t endpt
, int has_exited
)
871 slot
= _ENDPOINT_P(endpt
);
872 assert(slot
< __arraycount(iproc
));
876 /* Was the process blocked on a semop(2) call at all? */
877 if (ip
->ip_sem
== NULL
)
880 assert(ip
->ip_endpt
== endpt
);
883 * It was; cancel the semop(2) call. If the process is being removed
884 * because its call was interrupted by a signal, then we must wake it
887 complete_semop(ip
, has_exited
? EDONTREPLY
: EINTR
);