3 * Copyright (C) 1992 Krishna Balasubramanian
4 * Copyright (C) 1995 Eric Schenk, Bruno Haible
6 * IMPLEMENTATION NOTES ON CODE REWRITE (Eric Schenk, January 1995):
7 * This code underwent a massive rewrite in order to solve some problems
8 * with the original code. In particular the original code failed to
9 * wake up processes that were waiting for semval to go to 0 if the
10 * value went to 0 and was then incremented rapidly enough. In solving
11 * this problem I have also modified the implementation so that it
12 * processes pending operations in a FIFO manner, thus give a guarantee
13 * that processes waiting for a lock on the semaphore won't starve
14 * unless another locking process fails to unlock.
15 * In addition the following two changes in behavior have been introduced:
16 * - The original implementation of semop returned the value
17 * last semaphore element examined on success. This does not
18 * match the manual page specifications, and effectively
19 * allows the user to read the semaphore even if they do not
20 * have read permissions. The implementation now returns 0
21 * on success as stated in the manual page.
22 * - There is some confusion over whether the set of undo adjustments
23 * to be performed at exit should be done in an atomic manner.
24 * That is, if we are attempting to decrement the semval should we queue
25 * up and wait until we can do so legally?
26 * The original implementation attempted to do this.
27 * The current implementation does not do so. This is because I don't
28 * think it is the right thing (TM) to do, and because I couldn't
29 * see a clean way to get the old behavior with the new design.
30 * The POSIX standard and SVID should be consulted to determine
31 * what behavior is mandated.
33 * Further notes on refinement (Christoph Rohland, December 1998):
34 * - The POSIX standard says, that the undo adjustments simply should
35 * redo. So the current implementation is o.K.
36 * - The previous code had two flaws:
37 * 1) It actively gave the semaphore to the next waiting process
38 * sleeping on the semaphore. Since this process did not have the
39 * cpu this led to many unnecessary context switches and bad
40 * performance. Now we only check which process should be able to
41 * get the semaphore and if this process wants to reduce some
42 * semaphore value we simply wake it up without doing the
43 * operation. So it has to try to get it later. Thus e.g. the
44 * running process may reacquire the semaphore during the current
45 * time slice. If it only waits for zero or increases the semaphore,
46 * we do the operation in advance and wake it up.
47 * 2) It did not wake up all zero waiting processes. We try to do
48 * better but only get the semops right which only wait for zero or
49 * increase. If there are decrement operations in the operations
50 * array we do the same as before.
52 * With the incarnation of O(1) scheduler, it becomes unnecessary to perform
53 * check/retry algorithm for waking up blocked processes as the new scheduler
54 * is better at handling thread switch than the old one.
56 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
58 * SMP-threaded, sysctl's added
59 * (c) 1999 Manfred Spraul <manfreds@colorfullife.com>
60 * Enforced range limit on SEM_UNDO
61 * (c) 2001 Red Hat Inc <alan@redhat.com>
63 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
66 #include <linux/config.h>
67 #include <linux/slab.h>
68 #include <linux/spinlock.h>
69 #include <linux/init.h>
70 #include <linux/proc_fs.h>
71 #include <linux/time.h>
72 #include <linux/smp_lock.h>
73 #include <linux/security.h>
74 #include <asm/uaccess.h>
78 #define sem_lock(id) ((struct sem_array*)ipc_lock(&sem_ids,id))
79 #define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
80 #define sem_rmid(id) ((struct sem_array*)ipc_rmid(&sem_ids,id))
81 #define sem_checkid(sma, semid) \
82 ipc_checkid(&sem_ids,&sma->sem_perm,semid)
83 #define sem_buildid(id, seq) \
84 ipc_buildid(&sem_ids, id, seq)
85 static struct ipc_ids sem_ids
;
87 static int newary (key_t
, int, int);
88 static void freeary (struct sem_array
*sma
, int id
);
90 static int sysvipc_sem_read_proc(char *buffer
, char **start
, off_t offset
, int length
, int *eof
, void *data
);
93 #define SEMMSL_FAST 256 /* 512 bytes on stack */
94 #define SEMOPM_FAST 64 /* ~ 372 bytes on stack */
97 * linked list protection:
99 * sem_array.sem_pending{,last},
100 * sem_array.sem_undo: sem_lock() for read/write
101 * sem_undo.proc_next: only "current" is allowed to read/write that field.
105 int sem_ctls
[4] = {SEMMSL
, SEMMNS
, SEMOPM
, SEMMNI
};
106 #define sc_semmsl (sem_ctls[0])
107 #define sc_semmns (sem_ctls[1])
108 #define sc_semopm (sem_ctls[2])
109 #define sc_semmni (sem_ctls[3])
111 static int used_sems
;
113 void __init
sem_init (void)
116 ipc_init_ids(&sem_ids
,sc_semmni
);
118 #ifdef CONFIG_PROC_FS
119 create_proc_read_entry("sysvipc/sem", 0, NULL
, sysvipc_sem_read_proc
, NULL
);
124 * Lockless wakeup algorithm:
125 * Without the check/retry algorithm a lockless wakeup is possible:
126 * - queue.status is initialized to -EINTR before blocking.
127 * - wakeup is performed by
128 * * unlinking the queue entry from sma->sem_pending
129 * * setting queue.status to IN_WAKEUP
130 * This is the notification for the blocked thread that a
131 * result value is imminent.
132 * * call wake_up_process
133 * * set queue.status to the final value.
134 * - the previously blocked thread checks queue.status:
135 * * if it's IN_WAKEUP, then it must wait until the value changes
136 * * if it's not -EINTR, then the operation was completed by
137 * update_queue. semtimedop can return queue.status without
138 * performing any operation on the semaphore array.
139 * * otherwise it must acquire the spinlock and check what's up.
141 * The two-stage algorithm is necessary to protect against the following
143 * - if queue.status is set after wake_up_process, then the woken up idle
144 * thread could race forward and try (and fail) to acquire sma->lock
145 * before update_queue had a chance to set queue.status
146 * - if queue.status is written before wake_up_process and if the
147 * blocked process is woken up by a signal between writing
148 * queue.status and the wake_up_process, then the woken up
149 * process could return from semtimedop and die by calling
150 * sys_exit before wake_up_process is called. Then wake_up_process
151 * will oops, because the task structure is already invalid.
152 * (yes, this happened on s390 with sysv msg).
157 static int newary (key_t key
, int nsems
, int semflg
)
161 struct sem_array
*sma
;
166 if (used_sems
+ nsems
> sc_semmns
)
169 size
= sizeof (*sma
) + nsems
* sizeof (struct sem
);
170 sma
= ipc_rcu_alloc(size
);
174 memset (sma
, 0, size
);
176 sma
->sem_perm
.mode
= (semflg
& S_IRWXUGO
);
177 sma
->sem_perm
.key
= key
;
179 sma
->sem_perm
.security
= NULL
;
180 retval
= security_sem_alloc(sma
);
186 id
= ipc_addid(&sem_ids
, &sma
->sem_perm
, sc_semmni
);
188 security_sem_free(sma
);
194 sma
->sem_base
= (struct sem
*) &sma
[1];
195 /* sma->sem_pending = NULL; */
196 sma
->sem_pending_last
= &sma
->sem_pending
;
197 /* sma->undo = NULL; */
198 sma
->sem_nsems
= nsems
;
199 sma
->sem_ctime
= get_seconds();
202 return sem_buildid(id
, sma
->sem_perm
.seq
);
205 asmlinkage
long sys_semget (key_t key
, int nsems
, int semflg
)
207 int id
, err
= -EINVAL
;
208 struct sem_array
*sma
;
210 if (nsems
< 0 || nsems
> sc_semmsl
)
214 if (key
== IPC_PRIVATE
) {
215 err
= newary(key
, nsems
, semflg
);
216 } else if ((id
= ipc_findkey(&sem_ids
, key
)) == -1) { /* key not used */
217 if (!(semflg
& IPC_CREAT
))
220 err
= newary(key
, nsems
, semflg
);
221 } else if (semflg
& IPC_CREAT
&& semflg
& IPC_EXCL
) {
227 if (nsems
> sma
->sem_nsems
)
229 else if (ipcperms(&sma
->sem_perm
, semflg
))
232 int semid
= sem_buildid(id
, sma
->sem_perm
.seq
);
233 err
= security_sem_associate(sma
, semflg
);
244 /* Manage the doubly linked list sma->sem_pending as a FIFO:
245 * insert new queue elements at the tail sma->sem_pending_last.
247 static inline void append_to_queue (struct sem_array
* sma
,
248 struct sem_queue
* q
)
250 *(q
->prev
= sma
->sem_pending_last
) = q
;
251 *(sma
->sem_pending_last
= &q
->next
) = NULL
;
254 static inline void prepend_to_queue (struct sem_array
* sma
,
255 struct sem_queue
* q
)
257 q
->next
= sma
->sem_pending
;
258 *(q
->prev
= &sma
->sem_pending
) = q
;
260 q
->next
->prev
= &q
->next
;
261 else /* sma->sem_pending_last == &sma->sem_pending */
262 sma
->sem_pending_last
= &q
->next
;
265 static inline void remove_from_queue (struct sem_array
* sma
,
266 struct sem_queue
* q
)
268 *(q
->prev
) = q
->next
;
270 q
->next
->prev
= q
->prev
;
271 else /* sma->sem_pending_last == &q->next */
272 sma
->sem_pending_last
= q
->prev
;
273 q
->prev
= NULL
; /* mark as removed */
277 * Determine whether a sequence of semaphore operations would succeed
278 * all at once. Return 0 if yes, 1 if need to sleep, else return error code.
281 static int try_atomic_semop (struct sem_array
* sma
, struct sembuf
* sops
,
282 int nsops
, struct sem_undo
*un
, int pid
)
288 for (sop
= sops
; sop
< sops
+ nsops
; sop
++) {
289 curr
= sma
->sem_base
+ sop
->sem_num
;
290 sem_op
= sop
->sem_op
;
291 result
= curr
->semval
;
293 if (!sem_op
&& result
)
301 if (sop
->sem_flg
& SEM_UNDO
) {
302 int undo
= un
->semadj
[sop
->sem_num
] - sem_op
;
304 * Exceeding the undo range is an error.
306 if (undo
< (-SEMAEM
- 1) || undo
> SEMAEM
)
309 curr
->semval
= result
;
313 while (sop
>= sops
) {
314 sma
->sem_base
[sop
->sem_num
].sempid
= pid
;
315 if (sop
->sem_flg
& SEM_UNDO
)
316 un
->semadj
[sop
->sem_num
] -= sop
->sem_op
;
320 sma
->sem_otime
= get_seconds();
328 if (sop
->sem_flg
& IPC_NOWAIT
)
335 while (sop
>= sops
) {
336 sma
->sem_base
[sop
->sem_num
].semval
-= sop
->sem_op
;
343 /* Go through the pending queue for the indicated semaphore
344 * looking for tasks that can be completed.
346 static void update_queue (struct sem_array
* sma
)
349 struct sem_queue
* q
;
351 q
= sma
->sem_pending
;
353 error
= try_atomic_semop(sma
, q
->sops
, q
->nsops
,
356 /* Does q->sleeper still need to sleep? */
359 remove_from_queue(sma
,q
);
361 q
->status
= IN_WAKEUP
;
362 wake_up_process(q
->sleeper
);
363 /* hands-off: q will disappear immediately after
374 /* The following counts are associated to each semaphore:
375 * semncnt number of tasks waiting on semval being nonzero
376 * semzcnt number of tasks waiting on semval being zero
377 * This model assumes that a task waits on exactly one semaphore.
378 * Since semaphore operations are to be performed atomically, tasks actually
379 * wait on a whole sequence of semaphores simultaneously.
380 * The counts we return here are a rough approximation, but still
381 * warrant that semncnt+semzcnt>0 if the task is on the pending queue.
383 static int count_semncnt (struct sem_array
* sma
, ushort semnum
)
386 struct sem_queue
* q
;
389 for (q
= sma
->sem_pending
; q
; q
= q
->next
) {
390 struct sembuf
* sops
= q
->sops
;
391 int nsops
= q
->nsops
;
393 for (i
= 0; i
< nsops
; i
++)
394 if (sops
[i
].sem_num
== semnum
395 && (sops
[i
].sem_op
< 0)
396 && !(sops
[i
].sem_flg
& IPC_NOWAIT
))
401 static int count_semzcnt (struct sem_array
* sma
, ushort semnum
)
404 struct sem_queue
* q
;
407 for (q
= sma
->sem_pending
; q
; q
= q
->next
) {
408 struct sembuf
* sops
= q
->sops
;
409 int nsops
= q
->nsops
;
411 for (i
= 0; i
< nsops
; i
++)
412 if (sops
[i
].sem_num
== semnum
413 && (sops
[i
].sem_op
== 0)
414 && !(sops
[i
].sem_flg
& IPC_NOWAIT
))
420 /* Free a semaphore set. freeary() is called with sem_ids.sem down and
421 * the spinlock for this semaphore set hold. sem_ids.sem remains locked
424 static void freeary (struct sem_array
*sma
, int id
)
430 /* Invalidate the existing undo structures for this semaphore set.
431 * (They will be freed without any further action in exit_sem()
432 * or during the next semop.)
434 for (un
= sma
->undo
; un
; un
= un
->id_next
)
437 /* Wake up all pending processes and let them fail with EIDRM. */
438 q
= sma
->sem_pending
;
441 /* lazy remove_from_queue: we are killing the whole queue */
444 q
->status
= IN_WAKEUP
;
445 wake_up_process(q
->sleeper
); /* doesn't sleep */
446 q
->status
= -EIDRM
; /* hands-off q */
450 /* Remove the semaphore set from the ID array*/
454 used_sems
-= sma
->sem_nsems
;
455 size
= sizeof (*sma
) + sma
->sem_nsems
* sizeof (struct sem
);
456 security_sem_free(sma
);
460 static unsigned long copy_semid_to_user(void __user
*buf
, struct semid64_ds
*in
, int version
)
464 return copy_to_user(buf
, in
, sizeof(*in
));
469 ipc64_perm_to_ipc_perm(&in
->sem_perm
, &out
.sem_perm
);
471 out
.sem_otime
= in
->sem_otime
;
472 out
.sem_ctime
= in
->sem_ctime
;
473 out
.sem_nsems
= in
->sem_nsems
;
475 return copy_to_user(buf
, &out
, sizeof(out
));
482 static int semctl_nolock(int semid
, int semnum
, int cmd
, int version
, union semun arg
)
485 struct sem_array
*sma
;
491 struct seminfo seminfo
;
494 err
= security_sem_semctl(NULL
, cmd
);
498 memset(&seminfo
,0,sizeof(seminfo
));
499 seminfo
.semmni
= sc_semmni
;
500 seminfo
.semmns
= sc_semmns
;
501 seminfo
.semmsl
= sc_semmsl
;
502 seminfo
.semopm
= sc_semopm
;
503 seminfo
.semvmx
= SEMVMX
;
504 seminfo
.semmnu
= SEMMNU
;
505 seminfo
.semmap
= SEMMAP
;
506 seminfo
.semume
= SEMUME
;
508 if (cmd
== SEM_INFO
) {
509 seminfo
.semusz
= sem_ids
.in_use
;
510 seminfo
.semaem
= used_sems
;
512 seminfo
.semusz
= SEMUSZ
;
513 seminfo
.semaem
= SEMAEM
;
515 max_id
= sem_ids
.max_id
;
517 if (copy_to_user (arg
.__buf
, &seminfo
, sizeof(struct seminfo
)))
519 return (max_id
< 0) ? 0: max_id
;
523 struct semid64_ds tbuf
;
526 if(semid
>= sem_ids
.size
)
529 memset(&tbuf
,0,sizeof(tbuf
));
531 sma
= sem_lock(semid
);
536 if (ipcperms (&sma
->sem_perm
, S_IRUGO
))
539 err
= security_sem_semctl(sma
, cmd
);
543 id
= sem_buildid(semid
, sma
->sem_perm
.seq
);
545 kernel_to_ipc64_perm(&sma
->sem_perm
, &tbuf
.sem_perm
);
546 tbuf
.sem_otime
= sma
->sem_otime
;
547 tbuf
.sem_ctime
= sma
->sem_ctime
;
548 tbuf
.sem_nsems
= sma
->sem_nsems
;
550 if (copy_semid_to_user (arg
.buf
, &tbuf
, version
))
563 static int semctl_main(int semid
, int semnum
, int cmd
, int version
, union semun arg
)
565 struct sem_array
*sma
;
568 ushort fast_sem_io
[SEMMSL_FAST
];
569 ushort
* sem_io
= fast_sem_io
;
572 sma
= sem_lock(semid
);
576 nsems
= sma
->sem_nsems
;
579 if (sem_checkid(sma
,semid
))
583 if (ipcperms (&sma
->sem_perm
, (cmd
==SETVAL
||cmd
==SETALL
)?S_IWUGO
:S_IRUGO
))
586 err
= security_sem_semctl(sma
, cmd
);
594 ushort __user
*array
= arg
.array
;
597 if(nsems
> SEMMSL_FAST
) {
601 sem_io
= ipc_alloc(sizeof(ushort
)*nsems
);
603 ipc_lock_by_ptr(&sma
->sem_perm
);
609 ipc_lock_by_ptr(&sma
->sem_perm
);
611 if (sma
->sem_perm
.deleted
) {
618 for (i
= 0; i
< sma
->sem_nsems
; i
++)
619 sem_io
[i
] = sma
->sem_base
[i
].semval
;
622 if(copy_to_user(array
, sem_io
, nsems
*sizeof(ushort
)))
634 if(nsems
> SEMMSL_FAST
) {
635 sem_io
= ipc_alloc(sizeof(ushort
)*nsems
);
637 ipc_lock_by_ptr(&sma
->sem_perm
);
644 if (copy_from_user (sem_io
, arg
.array
, nsems
*sizeof(ushort
))) {
645 ipc_lock_by_ptr(&sma
->sem_perm
);
652 for (i
= 0; i
< nsems
; i
++) {
653 if (sem_io
[i
] > SEMVMX
) {
654 ipc_lock_by_ptr(&sma
->sem_perm
);
661 ipc_lock_by_ptr(&sma
->sem_perm
);
663 if (sma
->sem_perm
.deleted
) {
669 for (i
= 0; i
< nsems
; i
++)
670 sma
->sem_base
[i
].semval
= sem_io
[i
];
671 for (un
= sma
->undo
; un
; un
= un
->id_next
)
672 for (i
= 0; i
< nsems
; i
++)
674 sma
->sem_ctime
= get_seconds();
675 /* maybe some queued-up processes were waiting for this */
682 struct semid64_ds tbuf
;
683 memset(&tbuf
,0,sizeof(tbuf
));
684 kernel_to_ipc64_perm(&sma
->sem_perm
, &tbuf
.sem_perm
);
685 tbuf
.sem_otime
= sma
->sem_otime
;
686 tbuf
.sem_ctime
= sma
->sem_ctime
;
687 tbuf
.sem_nsems
= sma
->sem_nsems
;
689 if (copy_semid_to_user (arg
.buf
, &tbuf
, version
))
693 /* GETVAL, GETPID, GETNCTN, GETZCNT, SETVAL: fall-through */
696 if(semnum
< 0 || semnum
>= nsems
)
699 curr
= &sma
->sem_base
[semnum
];
709 err
= count_semncnt(sma
,semnum
);
712 err
= count_semzcnt(sma
,semnum
);
719 if (val
> SEMVMX
|| val
< 0)
722 for (un
= sma
->undo
; un
; un
= un
->id_next
)
723 un
->semadj
[semnum
] = 0;
725 curr
->sempid
= current
->tgid
;
726 sma
->sem_ctime
= get_seconds();
727 /* maybe some queued-up processes were waiting for this */
736 if(sem_io
!= fast_sem_io
)
737 ipc_free(sem_io
, sizeof(ushort
)*nsems
);
747 static inline unsigned long copy_semid_from_user(struct sem_setbuf
*out
, void __user
*buf
, int version
)
752 struct semid64_ds tbuf
;
754 if(copy_from_user(&tbuf
, buf
, sizeof(tbuf
)))
757 out
->uid
= tbuf
.sem_perm
.uid
;
758 out
->gid
= tbuf
.sem_perm
.gid
;
759 out
->mode
= tbuf
.sem_perm
.mode
;
765 struct semid_ds tbuf_old
;
767 if(copy_from_user(&tbuf_old
, buf
, sizeof(tbuf_old
)))
770 out
->uid
= tbuf_old
.sem_perm
.uid
;
771 out
->gid
= tbuf_old
.sem_perm
.gid
;
772 out
->mode
= tbuf_old
.sem_perm
.mode
;
781 static int semctl_down(int semid
, int semnum
, int cmd
, int version
, union semun arg
)
783 struct sem_array
*sma
;
785 struct sem_setbuf setbuf
;
786 struct kern_ipc_perm
*ipcp
;
789 if(copy_semid_from_user (&setbuf
, arg
.buf
, version
))
792 sma
= sem_lock(semid
);
796 if (sem_checkid(sma
,semid
)) {
800 ipcp
= &sma
->sem_perm
;
802 if (current
->euid
!= ipcp
->cuid
&&
803 current
->euid
!= ipcp
->uid
&& !capable(CAP_SYS_ADMIN
)) {
808 err
= security_sem_semctl(sma
, cmd
);
818 ipcp
->uid
= setbuf
.uid
;
819 ipcp
->gid
= setbuf
.gid
;
820 ipcp
->mode
= (ipcp
->mode
& ~S_IRWXUGO
)
821 | (setbuf
.mode
& S_IRWXUGO
);
822 sma
->sem_ctime
= get_seconds();
838 asmlinkage
long sys_semctl (int semid
, int semnum
, int cmd
, union semun arg
)
846 version
= ipc_parse_version(&cmd
);
852 err
= semctl_nolock(semid
,semnum
,cmd
,version
,arg
);
862 err
= semctl_main(semid
,semnum
,cmd
,version
,arg
);
867 err
= semctl_down(semid
,semnum
,cmd
,version
,arg
);
875 static inline void lock_semundo(void)
877 struct sem_undo_list
*undo_list
;
879 undo_list
= current
->sysvsem
.undo_list
;
880 if ((undo_list
!= NULL
) && (atomic_read(&undo_list
->refcnt
) != 1))
881 spin_lock(&undo_list
->lock
);
884 /* This code has an interaction with copy_semundo().
885 * Consider; two tasks are sharing the undo_list. task1
886 * acquires the undo_list lock in lock_semundo(). If task2 now
887 * exits before task1 releases the lock (by calling
888 * unlock_semundo()), then task1 will never call spin_unlock().
889 * This leave the sem_undo_list in a locked state. If task1 now creats task3
890 * and once again shares the sem_undo_list, the sem_undo_list will still be
891 * locked, and future SEM_UNDO operations will deadlock. This case is
892 * dealt with in copy_semundo() by having it reinitialize the spin lock when
893 * the refcnt goes from 1 to 2.
895 static inline void unlock_semundo(void)
897 struct sem_undo_list
*undo_list
;
899 undo_list
= current
->sysvsem
.undo_list
;
900 if ((undo_list
!= NULL
) && (atomic_read(&undo_list
->refcnt
) != 1))
901 spin_unlock(&undo_list
->lock
);
905 /* If the task doesn't already have a undo_list, then allocate one
906 * here. We guarantee there is only one thread using this undo list,
907 * and current is THE ONE
909 * If this allocation and assignment succeeds, but later
910 * portions of this code fail, there is no need to free the sem_undo_list.
911 * Just let it stay associated with the task, and it'll be freed later
914 * This can block, so callers must hold no locks.
916 static inline int get_undo_list(struct sem_undo_list
**undo_listp
)
918 struct sem_undo_list
*undo_list
;
921 undo_list
= current
->sysvsem
.undo_list
;
923 size
= sizeof(struct sem_undo_list
);
924 undo_list
= (struct sem_undo_list
*) kmalloc(size
, GFP_KERNEL
);
925 if (undo_list
== NULL
)
927 memset(undo_list
, 0, size
);
928 /* don't initialize unodhd->lock here. It's done
929 * in copy_semundo() instead.
931 atomic_set(&undo_list
->refcnt
, 1);
932 current
->sysvsem
.undo_list
= undo_list
;
934 *undo_listp
= undo_list
;
938 static struct sem_undo
*lookup_undo(struct sem_undo_list
*ulp
, int semid
)
940 struct sem_undo
**last
, *un
;
942 last
= &ulp
->proc_list
;
958 static struct sem_undo
*find_undo(int semid
)
960 struct sem_array
*sma
;
961 struct sem_undo_list
*ulp
;
962 struct sem_undo
*un
, *new;
966 error
= get_undo_list(&ulp
);
968 return ERR_PTR(error
);
971 un
= lookup_undo(ulp
, semid
);
973 if (likely(un
!=NULL
))
976 /* no undo structure around - allocate one. */
977 sma
= sem_lock(semid
);
978 un
= ERR_PTR(-EINVAL
);
981 un
= ERR_PTR(-EIDRM
);
982 if (sem_checkid(sma
,semid
)) {
986 nsems
= sma
->sem_nsems
;
990 new = (struct sem_undo
*) kmalloc(sizeof(struct sem_undo
) + sizeof(short)*nsems
, GFP_KERNEL
);
992 ipc_lock_by_ptr(&sma
->sem_perm
);
995 return ERR_PTR(-ENOMEM
);
997 memset(new, 0, sizeof(struct sem_undo
) + sizeof(short)*nsems
);
998 new->semadj
= (short *) &new[1];
1002 un
= lookup_undo(ulp
, semid
);
1006 ipc_lock_by_ptr(&sma
->sem_perm
);
1007 ipc_rcu_putref(sma
);
1011 ipc_lock_by_ptr(&sma
->sem_perm
);
1012 ipc_rcu_putref(sma
);
1013 if (sma
->sem_perm
.deleted
) {
1017 un
= ERR_PTR(-EIDRM
);
1020 new->proc_next
= ulp
->proc_list
;
1021 ulp
->proc_list
= new;
1022 new->id_next
= sma
->undo
;
1031 asmlinkage
long sys_semtimedop(int semid
, struct sembuf __user
*tsops
,
1032 unsigned nsops
, const struct timespec __user
*timeout
)
1034 int error
= -EINVAL
;
1035 struct sem_array
*sma
;
1036 struct sembuf fast_sops
[SEMOPM_FAST
];
1037 struct sembuf
* sops
= fast_sops
, *sop
;
1038 struct sem_undo
*un
;
1039 int undos
= 0, decrease
= 0, alter
= 0, max
;
1040 struct sem_queue queue
;
1041 unsigned long jiffies_left
= 0;
1043 if (nsops
< 1 || semid
< 0)
1045 if (nsops
> sc_semopm
)
1047 if(nsops
> SEMOPM_FAST
) {
1048 sops
= kmalloc(sizeof(*sops
)*nsops
,GFP_KERNEL
);
1052 if (copy_from_user (sops
, tsops
, nsops
* sizeof(*tsops
))) {
1057 struct timespec _timeout
;
1058 if (copy_from_user(&_timeout
, timeout
, sizeof(*timeout
))) {
1062 if (_timeout
.tv_sec
< 0 || _timeout
.tv_nsec
< 0 ||
1063 _timeout
.tv_nsec
>= 1000000000L) {
1067 jiffies_left
= timespec_to_jiffies(&_timeout
);
1070 for (sop
= sops
; sop
< sops
+ nsops
; sop
++) {
1071 if (sop
->sem_num
>= max
)
1073 if (sop
->sem_flg
& SEM_UNDO
)
1075 if (sop
->sem_op
< 0)
1077 if (sop
->sem_op
> 0)
1084 un
= find_undo(semid
);
1086 error
= PTR_ERR(un
);
1092 sma
= sem_lock(semid
);
1097 if (sem_checkid(sma
,semid
))
1098 goto out_unlock_free
;
1100 * semid identifies are not unique - find_undo may have
1101 * allocated an undo structure, it was invalidated by an RMID
1102 * and now a new array with received the same id. Check and retry.
1104 if (un
&& un
->semid
== -1) {
1109 if (max
>= sma
->sem_nsems
)
1110 goto out_unlock_free
;
1113 if (ipcperms(&sma
->sem_perm
, alter
? S_IWUGO
: S_IRUGO
))
1114 goto out_unlock_free
;
1116 error
= security_sem_semop(sma
, sops
, nsops
, alter
);
1118 goto out_unlock_free
;
1120 error
= try_atomic_semop (sma
, sops
, nsops
, un
, current
->tgid
);
1124 /* We need to sleep on this operation, so we put the current
1125 * task into the pending queue and go to sleep.
1130 queue
.nsops
= nsops
;
1132 queue
.pid
= current
->tgid
;
1135 append_to_queue(sma
,&queue
);
1137 prepend_to_queue(sma
,&queue
);
1139 queue
.status
= -EINTR
;
1140 queue
.sleeper
= current
;
1141 current
->state
= TASK_INTERRUPTIBLE
;
1145 jiffies_left
= schedule_timeout(jiffies_left
);
1149 error
= queue
.status
;
1150 while(unlikely(error
== IN_WAKEUP
)) {
1152 error
= queue
.status
;
1155 if (error
!= -EINTR
) {
1156 /* fast path: update_queue already obtained all requested
1161 sma
= sem_lock(semid
);
1163 if(queue
.prev
!= NULL
)
1170 * If queue.status != -EINTR we are woken up by another process
1172 error
= queue
.status
;
1173 if (error
!= -EINTR
) {
1174 goto out_unlock_free
;
1178 * If an interrupt occurred we have to clean up the queue
1180 if (timeout
&& jiffies_left
== 0)
1182 remove_from_queue(sma
,&queue
);
1183 goto out_unlock_free
;
1191 if(sops
!= fast_sops
)
1196 asmlinkage
long sys_semop (int semid
, struct sembuf __user
*tsops
, unsigned nsops
)
1198 return sys_semtimedop(semid
, tsops
, nsops
, NULL
);
1201 /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
1202 * parent and child tasks.
1204 * See the notes above unlock_semundo() regarding the spin_lock_init()
1205 * in this code. Initialize the undo_list->lock here instead of get_undo_list()
1206 * because of the reasoning in the comment above unlock_semundo.
1209 int copy_semundo(unsigned long clone_flags
, struct task_struct
*tsk
)
1211 struct sem_undo_list
*undo_list
;
1214 if (clone_flags
& CLONE_SYSVSEM
) {
1215 error
= get_undo_list(&undo_list
);
1218 if (atomic_read(&undo_list
->refcnt
) == 1)
1219 spin_lock_init(&undo_list
->lock
);
1220 atomic_inc(&undo_list
->refcnt
);
1221 tsk
->sysvsem
.undo_list
= undo_list
;
1223 tsk
->sysvsem
.undo_list
= NULL
;
1229 * add semadj values to semaphores, free undo structures.
1230 * undo structures are not freed when semaphore arrays are destroyed
1231 * so some of them may be out of date.
1232 * IMPLEMENTATION NOTE: There is some confusion over whether the
1233 * set of adjustments that needs to be done should be done in an atomic
1234 * manner or not. That is, if we are attempting to decrement the semval
1235 * should we queue up and wait until we can do so legally?
1236 * The original implementation attempted to do this (queue and wait).
1237 * The current implementation does not do so. The POSIX standard
1238 * and SVID should be consulted to determine what behavior is mandated.
1240 void exit_sem(struct task_struct
*tsk
)
1242 struct sem_undo_list
*undo_list
;
1243 struct sem_undo
*u
, **up
;
1245 undo_list
= tsk
->sysvsem
.undo_list
;
1249 if (!atomic_dec_and_test(&undo_list
->refcnt
))
1252 /* There's no need to hold the semundo list lock, as current
1253 * is the last task exiting for this undo list.
1255 for (up
= &undo_list
->proc_list
; (u
= *up
); *up
= u
->proc_next
, kfree(u
)) {
1256 struct sem_array
*sma
;
1258 struct sem_undo
*un
, **unp
;
1265 sma
= sem_lock(semid
);
1272 BUG_ON(sem_checkid(sma
,u
->semid
));
1274 /* remove u from the sma->undo list */
1275 for (unp
= &sma
->undo
; (un
= *unp
); unp
= &un
->id_next
) {
1279 printk ("exit_sem undo list error id=%d\n", u
->semid
);
1283 /* perform adjustments registered in u */
1284 nsems
= sma
->sem_nsems
;
1285 for (i
= 0; i
< nsems
; i
++) {
1286 struct sem
* sem
= &sma
->sem_base
[i
];
1288 sem
->semval
+= u
->semadj
[i
];
1290 * Range checks of the new semaphore value,
1291 * not defined by sus:
1292 * - Some unices ignore the undo entirely
1293 * (e.g. HP UX 11i 11.22, Tru64 V5.1)
1294 * - some cap the value (e.g. FreeBSD caps
1295 * at 0, but doesn't enforce SEMVMX)
1297 * Linux caps the semaphore value, both at 0
1300 * Manfred <manfred@colorfullife.com>
1302 if (sem
->semval
< 0)
1304 if (sem
->semval
> SEMVMX
)
1305 sem
->semval
= SEMVMX
;
1306 sem
->sempid
= current
->tgid
;
1309 sma
->sem_otime
= get_seconds();
1310 /* maybe some queued-up processes were waiting for this */
1318 #ifdef CONFIG_PROC_FS
1319 static int sysvipc_sem_read_proc(char *buffer
, char **start
, off_t offset
, int length
, int *eof
, void *data
)
1325 len
+= sprintf(buffer
, " key semid perms nsems uid gid cuid cgid otime ctime\n");
1328 for(i
= 0; i
<= sem_ids
.max_id
; i
++) {
1329 struct sem_array
*sma
;
1332 len
+= sprintf(buffer
+ len
, "%10d %10d %4o %10lu %5u %5u %5u %5u %10lu %10lu\n",
1334 sem_buildid(i
,sma
->sem_perm
.seq
),
1350 if(pos
> offset
+ length
)
1357 *start
= buffer
+ (offset
- begin
);
1358 len
-= (offset
- begin
);