On Tue, Nov 06, 2007 at 02:33:53AM -0800, akpm@linux-foundation.org wrote:
[mmotm.git] / ipc / sem.c
blob82518d6a96a34583ae76f6ec85795f32aa70cb2e
1 /*
2 * linux/ipc/sem.c
3 * Copyright (C) 1992 Krishna Balasubramanian
4 * Copyright (C) 1995 Eric Schenk, Bruno Haible
6 * IMPLEMENTATION NOTES ON CODE REWRITE (Eric Schenk, January 1995):
7 * This code underwent a massive rewrite in order to solve some problems
8 * with the original code. In particular the original code failed to
9 * wake up processes that were waiting for semval to go to 0 if the
10 * value went to 0 and was then incremented rapidly enough. In solving
11 * this problem I have also modified the implementation so that it
12 * processes pending operations in a FIFO manner, thus give a guarantee
13 * that processes waiting for a lock on the semaphore won't starve
14 * unless another locking process fails to unlock.
15 * In addition the following two changes in behavior have been introduced:
16 * - The original implementation of semop returned the value
17 * last semaphore element examined on success. This does not
18 * match the manual page specifications, and effectively
19 * allows the user to read the semaphore even if they do not
20 * have read permissions. The implementation now returns 0
21 * on success as stated in the manual page.
22 * - There is some confusion over whether the set of undo adjustments
23 * to be performed at exit should be done in an atomic manner.
24 * That is, if we are attempting to decrement the semval should we queue
25 * up and wait until we can do so legally?
26 * The original implementation attempted to do this.
27 * The current implementation does not do so. This is because I don't
28 * think it is the right thing (TM) to do, and because I couldn't
29 * see a clean way to get the old behavior with the new design.
30 * The POSIX standard and SVID should be consulted to determine
31 * what behavior is mandated.
33 * Further notes on refinement (Christoph Rohland, December 1998):
34 * - The POSIX standard says, that the undo adjustments simply should
35 * redo. So the current implementation is o.K.
36 * - The previous code had two flaws:
37 * 1) It actively gave the semaphore to the next waiting process
38 * sleeping on the semaphore. Since this process did not have the
39 * cpu this led to many unnecessary context switches and bad
40 * performance. Now we only check which process should be able to
41 * get the semaphore and if this process wants to reduce some
42 * semaphore value we simply wake it up without doing the
43 * operation. So it has to try to get it later. Thus e.g. the
44 * running process may reacquire the semaphore during the current
45 * time slice. If it only waits for zero or increases the semaphore,
46 * we do the operation in advance and wake it up.
47 * 2) It did not wake up all zero waiting processes. We try to do
48 * better but only get the semops right which only wait for zero or
49 * increase. If there are decrement operations in the operations
50 * array we do the same as before.
52 * With the incarnation of O(1) scheduler, it becomes unnecessary to perform
53 * check/retry algorithm for waking up blocked processes as the new scheduler
54 * is better at handling thread switch than the old one.
56 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
58 * SMP-threaded, sysctl's added
59 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
60 * Enforced range limit on SEM_UNDO
61 * (c) 2001 Red Hat Inc
62 * Lockless wakeup
63 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
65 * support for audit of ipc object properties and permission changes
66 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
68 * namespaces support
69 * OpenVZ, SWsoft Inc.
70 * Pavel Emelianov <xemul@openvz.org>
73 #include <linux/slab.h>
74 #include <linux/spinlock.h>
75 #include <linux/init.h>
76 #include <linux/proc_fs.h>
77 #include <linux/time.h>
78 #include <linux/security.h>
79 #include <linux/syscalls.h>
80 #include <linux/audit.h>
81 #include <linux/capability.h>
82 #include <linux/seq_file.h>
83 #include <linux/rwsem.h>
84 #include <linux/nsproxy.h>
85 #include <linux/ipc_namespace.h>
87 #include <asm/uaccess.h>
88 #include "util.h"
90 #define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
92 #define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
93 #define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
95 static int newary(struct ipc_namespace *, struct ipc_params *);
96 static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
97 #ifdef CONFIG_PROC_FS
98 static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
99 #endif
101 #define SEMMSL_FAST 256 /* 512 bytes on stack */
102 #define SEMOPM_FAST 64 /* ~ 372 bytes on stack */
105 * linked list protection:
106 * sem_undo.id_next,
107 * sem_array.sem_pending{,last},
108 * sem_array.sem_undo: sem_lock() for read/write
109 * sem_undo.proc_next: only "current" is allowed to read/write that field.
113 #define sc_semmsl sem_ctls[0]
114 #define sc_semmns sem_ctls[1]
115 #define sc_semopm sem_ctls[2]
116 #define sc_semmni sem_ctls[3]
118 void sem_init_ns(struct ipc_namespace *ns)
120 ns->sc_semmsl = SEMMSL;
121 ns->sc_semmns = SEMMNS;
122 ns->sc_semopm = SEMOPM;
123 ns->sc_semmni = SEMMNI;
124 ns->used_sems = 0;
125 ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
128 #ifdef CONFIG_IPC_NS
129 void sem_exit_ns(struct ipc_namespace *ns)
131 free_ipcs(ns, &sem_ids(ns), freeary);
132 idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
134 #endif
136 void __init sem_init (void)
138 sem_init_ns(&init_ipc_ns);
139 ipc_init_proc_interface("sysvipc/sem",
140 " key semid perms nsems uid gid cuid cgid otime ctime\n",
141 IPC_SEM_IDS, sysvipc_sem_proc_show);
145 * sem_lock_(check_) routines are called in the paths where the rw_mutex
146 * is not held.
148 static inline struct sem_array *sem_lock(struct ipc_namespace *ns, int id)
150 struct kern_ipc_perm *ipcp = ipc_lock(&sem_ids(ns), id);
152 if (IS_ERR(ipcp))
153 return (struct sem_array *)ipcp;
155 return container_of(ipcp, struct sem_array, sem_perm);
158 static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
159 int id)
161 struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
163 if (IS_ERR(ipcp))
164 return (struct sem_array *)ipcp;
166 return container_of(ipcp, struct sem_array, sem_perm);
169 static inline void sem_lock_and_putref(struct sem_array *sma)
171 ipc_lock_by_ptr(&sma->sem_perm);
172 ipc_rcu_putref(sma);
175 static inline void sem_getref_and_unlock(struct sem_array *sma)
177 ipc_rcu_getref(sma);
178 ipc_unlock(&(sma)->sem_perm);
181 static inline void sem_putref(struct sem_array *sma)
183 ipc_lock_by_ptr(&sma->sem_perm);
184 ipc_rcu_putref(sma);
185 ipc_unlock(&(sma)->sem_perm);
188 static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
190 ipc_rmid(&sem_ids(ns), &s->sem_perm);
194 * Lockless wakeup algorithm:
195 * Without the check/retry algorithm a lockless wakeup is possible:
196 * - queue.status is initialized to -EINTR before blocking.
197 * - wakeup is performed by
198 * * unlinking the queue entry from sma->sem_pending
199 * * setting queue.status to IN_WAKEUP
200 * This is the notification for the blocked thread that a
201 * result value is imminent.
202 * * call wake_up_process
203 * * set queue.status to the final value.
204 * - the previously blocked thread checks queue.status:
205 * * if it's IN_WAKEUP, then it must wait until the value changes
206 * * if it's not -EINTR, then the operation was completed by
207 * update_queue. semtimedop can return queue.status without
208 * performing any operation on the sem array.
209 * * otherwise it must acquire the spinlock and check what's up.
211 * The two-stage algorithm is necessary to protect against the following
212 * races:
213 * - if queue.status is set after wake_up_process, then the woken up idle
214 * thread could race forward and try (and fail) to acquire sma->lock
215 * before update_queue had a chance to set queue.status
216 * - if queue.status is written before wake_up_process and if the
217 * blocked process is woken up by a signal between writing
218 * queue.status and the wake_up_process, then the woken up
219 * process could return from semtimedop and die by calling
220 * sys_exit before wake_up_process is called. Then wake_up_process
221 * will oops, because the task structure is already invalid.
222 * (yes, this happened on s390 with sysv msg).
225 #define IN_WAKEUP 1
228 * newary - Create a new semaphore set
229 * @ns: namespace
230 * @params: ptr to the structure that contains key, semflg and nsems
232 * Called with sem_ids.rw_mutex held (as a writer)
235 static int newary(struct ipc_namespace *ns, struct ipc_params *params)
237 int id;
238 int retval;
239 struct sem_array *sma;
240 int size;
241 key_t key = params->key;
242 int nsems = params->u.nsems;
243 int semflg = params->flg;
244 int i;
246 if (!nsems)
247 return -EINVAL;
248 if (ns->used_sems + nsems > ns->sc_semmns)
249 return -ENOSPC;
251 size = sizeof (*sma) + nsems * sizeof (struct sem);
252 sma = ipc_rcu_alloc(size);
253 if (!sma) {
254 return -ENOMEM;
256 memset (sma, 0, size);
258 sma->sem_perm.mode = (semflg & S_IRWXUGO);
259 sma->sem_perm.key = key;
261 sma->sem_perm.security = NULL;
262 retval = security_sem_alloc(sma);
263 if (retval) {
264 ipc_rcu_putref(sma);
265 return retval;
268 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
269 if (id < 0) {
270 security_sem_free(sma);
271 ipc_rcu_putref(sma);
272 return id;
274 ns->used_sems += nsems;
276 sma->sem_base = (struct sem *) &sma[1];
278 for (i = 0; i < nsems; i++)
279 INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
281 sma->complex_count = 0;
282 INIT_LIST_HEAD(&sma->sem_pending);
283 INIT_LIST_HEAD(&sma->list_id);
284 sma->sem_nsems = nsems;
285 sma->sem_ctime = get_seconds();
286 sem_unlock(sma);
288 return sma->sem_perm.id;
293 * Called with sem_ids.rw_mutex and ipcp locked.
295 static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
297 struct sem_array *sma;
299 sma = container_of(ipcp, struct sem_array, sem_perm);
300 return security_sem_associate(sma, semflg);
304 * Called with sem_ids.rw_mutex and ipcp locked.
306 static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
307 struct ipc_params *params)
309 struct sem_array *sma;
311 sma = container_of(ipcp, struct sem_array, sem_perm);
312 if (params->u.nsems > sma->sem_nsems)
313 return -EINVAL;
315 return 0;
318 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
320 struct ipc_namespace *ns;
321 struct ipc_ops sem_ops;
322 struct ipc_params sem_params;
324 ns = current->nsproxy->ipc_ns;
326 if (nsems < 0 || nsems > ns->sc_semmsl)
327 return -EINVAL;
329 sem_ops.getnew = newary;
330 sem_ops.associate = sem_security;
331 sem_ops.more_checks = sem_more_checks;
333 sem_params.key = key;
334 sem_params.flg = semflg;
335 sem_params.u.nsems = nsems;
337 return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
341 * Determine whether a sequence of semaphore operations would succeed
342 * all at once. Return 0 if yes, 1 if need to sleep, else return error code.
345 static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops,
346 int nsops, struct sem_undo *un, int pid)
348 int result, sem_op;
349 struct sembuf *sop;
350 struct sem * curr;
352 for (sop = sops; sop < sops + nsops; sop++) {
353 curr = sma->sem_base + sop->sem_num;
354 sem_op = sop->sem_op;
355 result = curr->semval;
357 if (!sem_op && result)
358 goto would_block;
360 result += sem_op;
361 if (result < 0)
362 goto would_block;
363 if (result > SEMVMX)
364 goto out_of_range;
365 if (sop->sem_flg & SEM_UNDO) {
366 int undo = un->semadj[sop->sem_num] - sem_op;
368 * Exceeding the undo range is an error.
370 if (undo < (-SEMAEM - 1) || undo > SEMAEM)
371 goto out_of_range;
373 curr->semval = result;
376 sop--;
377 while (sop >= sops) {
378 sma->sem_base[sop->sem_num].sempid = pid;
379 if (sop->sem_flg & SEM_UNDO)
380 un->semadj[sop->sem_num] -= sop->sem_op;
381 sop--;
384 sma->sem_otime = get_seconds();
385 return 0;
387 out_of_range:
388 result = -ERANGE;
389 goto undo;
391 would_block:
392 if (sop->sem_flg & IPC_NOWAIT)
393 result = -EAGAIN;
394 else
395 result = 1;
397 undo:
398 sop--;
399 while (sop >= sops) {
400 sma->sem_base[sop->sem_num].semval -= sop->sem_op;
401 sop--;
404 return result;
408 * Wake up a process waiting on the sem queue with a given error.
409 * The queue is invalid (may not be accessed) after the function returns.
411 static void wake_up_sem_queue(struct sem_queue *q, int error)
414 * Hold preempt off so that we don't get preempted and have the
415 * wakee busy-wait until we're scheduled back on. We're holding
416 * locks here so it may not strictly be needed, however if the
417 * locks become preemptible then this prevents such a problem.
419 preempt_disable();
420 q->status = IN_WAKEUP;
421 wake_up_process(q->sleeper);
422 /* hands-off: q can disappear immediately after writing q->status. */
423 smp_wmb();
424 q->status = error;
425 preempt_enable();
428 static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
430 list_del(&q->list);
431 if (q->nsops == 1)
432 list_del(&q->simple_list);
433 else
434 sma->complex_count--;
439 * update_queue(sma, semnum): Look for tasks that can be completed.
440 * @sma: semaphore array.
441 * @semnum: semaphore that was modified.
443 * update_queue must be called after a semaphore in a semaphore array
444 * was modified. If multiple semaphore were modified, then @semnum
445 * must be set to -1.
447 static void update_queue(struct sem_array *sma, int semnum)
449 struct sem_queue *q;
450 struct list_head *walk;
451 struct list_head *pending_list;
452 int offset;
454 /* if there are complex operations around, then knowing the semaphore
455 * that was modified doesn't help us. Assume that multiple semaphores
456 * were modified.
458 if (sma->complex_count)
459 semnum = -1;
461 if (semnum == -1) {
462 pending_list = &sma->sem_pending;
463 offset = offsetof(struct sem_queue, list);
464 } else {
465 pending_list = &sma->sem_base[semnum].sem_pending;
466 offset = offsetof(struct sem_queue, simple_list);
469 again:
470 walk = pending_list->next;
471 while (walk != pending_list) {
472 int error, alter;
474 q = (struct sem_queue *)((char *)walk - offset);
475 walk = walk->next;
477 /* If we are scanning the single sop, per-semaphore list of
478 * one semaphore and that semaphore is 0, then it is not
479 * necessary to scan the "alter" entries: simple increments
480 * that affect only one entry succeed immediately and cannot
481 * be in the per semaphore pending queue, and decrements
482 * cannot be successful if the value is already 0.
484 if (semnum != -1 && sma->sem_base[semnum].semval == 0 &&
485 q->alter)
486 break;
488 error = try_atomic_semop(sma, q->sops, q->nsops,
489 q->undo, q->pid);
491 /* Does q->sleeper still need to sleep? */
492 if (error > 0)
493 continue;
495 unlink_queue(sma, q);
498 * The next operation that must be checked depends on the type
499 * of the completed operation:
500 * - if the operation modified the array, then restart from the
501 * head of the queue and check for threads that might be
502 * waiting for the new semaphore values.
503 * - if the operation didn't modify the array, then just
504 * continue.
506 alter = q->alter;
507 wake_up_sem_queue(q, error);
508 if (alter && !error)
509 goto again;
513 /* The following counts are associated to each semaphore:
514 * semncnt number of tasks waiting on semval being nonzero
515 * semzcnt number of tasks waiting on semval being zero
516 * This model assumes that a task waits on exactly one semaphore.
517 * Since semaphore operations are to be performed atomically, tasks actually
518 * wait on a whole sequence of semaphores simultaneously.
519 * The counts we return here are a rough approximation, but still
520 * warrant that semncnt+semzcnt>0 if the task is on the pending queue.
522 static int count_semncnt (struct sem_array * sma, ushort semnum)
524 int semncnt;
525 struct sem_queue * q;
527 semncnt = 0;
528 list_for_each_entry(q, &sma->sem_pending, list) {
529 struct sembuf * sops = q->sops;
530 int nsops = q->nsops;
531 int i;
532 for (i = 0; i < nsops; i++)
533 if (sops[i].sem_num == semnum
534 && (sops[i].sem_op < 0)
535 && !(sops[i].sem_flg & IPC_NOWAIT))
536 semncnt++;
538 return semncnt;
541 static int count_semzcnt (struct sem_array * sma, ushort semnum)
543 int semzcnt;
544 struct sem_queue * q;
546 semzcnt = 0;
547 list_for_each_entry(q, &sma->sem_pending, list) {
548 struct sembuf * sops = q->sops;
549 int nsops = q->nsops;
550 int i;
551 for (i = 0; i < nsops; i++)
552 if (sops[i].sem_num == semnum
553 && (sops[i].sem_op == 0)
554 && !(sops[i].sem_flg & IPC_NOWAIT))
555 semzcnt++;
557 return semzcnt;
560 static void free_un(struct rcu_head *head)
562 struct sem_undo *un = container_of(head, struct sem_undo, rcu);
563 kfree(un);
566 /* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked
567 * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex
568 * remains locked on exit.
570 static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
572 struct sem_undo *un, *tu;
573 struct sem_queue *q, *tq;
574 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
576 /* Free the existing undo structures for this semaphore set. */
577 assert_spin_locked(&sma->sem_perm.lock);
578 list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
579 list_del(&un->list_id);
580 spin_lock(&un->ulp->lock);
581 un->semid = -1;
582 list_del_rcu(&un->list_proc);
583 spin_unlock(&un->ulp->lock);
584 call_rcu(&un->rcu, free_un);
587 /* Wake up all pending processes and let them fail with EIDRM. */
588 list_for_each_entry_safe(q, tq, &sma->sem_pending, list) {
589 unlink_queue(sma, q);
590 wake_up_sem_queue(q, -EIDRM);
593 /* Remove the semaphore set from the IDR */
594 sem_rmid(ns, sma);
595 sem_unlock(sma);
597 ns->used_sems -= sma->sem_nsems;
598 security_sem_free(sma);
599 ipc_rcu_putref(sma);
602 static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
604 switch(version) {
605 case IPC_64:
606 return copy_to_user(buf, in, sizeof(*in));
607 case IPC_OLD:
609 struct semid_ds out;
611 ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
613 out.sem_otime = in->sem_otime;
614 out.sem_ctime = in->sem_ctime;
615 out.sem_nsems = in->sem_nsems;
617 return copy_to_user(buf, &out, sizeof(out));
619 default:
620 return -EINVAL;
624 static int semctl_nolock(struct ipc_namespace *ns, int semid,
625 int cmd, int version, union semun arg)
627 int err = -EINVAL;
628 struct sem_array *sma;
630 switch(cmd) {
631 case IPC_INFO:
632 case SEM_INFO:
634 struct seminfo seminfo;
635 int max_id;
637 err = security_sem_semctl(NULL, cmd);
638 if (err)
639 return err;
641 memset(&seminfo,0,sizeof(seminfo));
642 seminfo.semmni = ns->sc_semmni;
643 seminfo.semmns = ns->sc_semmns;
644 seminfo.semmsl = ns->sc_semmsl;
645 seminfo.semopm = ns->sc_semopm;
646 seminfo.semvmx = SEMVMX;
647 seminfo.semmnu = SEMMNU;
648 seminfo.semmap = SEMMAP;
649 seminfo.semume = SEMUME;
650 down_read(&sem_ids(ns).rw_mutex);
651 if (cmd == SEM_INFO) {
652 seminfo.semusz = sem_ids(ns).in_use;
653 seminfo.semaem = ns->used_sems;
654 } else {
655 seminfo.semusz = SEMUSZ;
656 seminfo.semaem = SEMAEM;
658 max_id = ipc_get_maxid(&sem_ids(ns));
659 up_read(&sem_ids(ns).rw_mutex);
660 if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo)))
661 return -EFAULT;
662 return (max_id < 0) ? 0: max_id;
664 case IPC_STAT:
665 case SEM_STAT:
667 struct semid64_ds tbuf;
668 int id;
670 if (cmd == SEM_STAT) {
671 sma = sem_lock(ns, semid);
672 if (IS_ERR(sma))
673 return PTR_ERR(sma);
674 id = sma->sem_perm.id;
675 } else {
676 sma = sem_lock_check(ns, semid);
677 if (IS_ERR(sma))
678 return PTR_ERR(sma);
679 id = 0;
682 err = -EACCES;
683 if (ipcperms (&sma->sem_perm, S_IRUGO))
684 goto out_unlock;
686 err = security_sem_semctl(sma, cmd);
687 if (err)
688 goto out_unlock;
690 memset(&tbuf, 0, sizeof(tbuf));
692 kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
693 tbuf.sem_otime = sma->sem_otime;
694 tbuf.sem_ctime = sma->sem_ctime;
695 tbuf.sem_nsems = sma->sem_nsems;
696 sem_unlock(sma);
697 if (copy_semid_to_user (arg.buf, &tbuf, version))
698 return -EFAULT;
699 return id;
701 default:
702 return -EINVAL;
704 return err;
705 out_unlock:
706 sem_unlock(sma);
707 return err;
710 static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
711 int cmd, int version, union semun arg)
713 struct sem_array *sma;
714 struct sem* curr;
715 int err;
716 ushort fast_sem_io[SEMMSL_FAST];
717 ushort* sem_io = fast_sem_io;
718 int nsems;
720 sma = sem_lock_check(ns, semid);
721 if (IS_ERR(sma))
722 return PTR_ERR(sma);
724 nsems = sma->sem_nsems;
726 err = -EACCES;
727 if (ipcperms (&sma->sem_perm, (cmd==SETVAL||cmd==SETALL)?S_IWUGO:S_IRUGO))
728 goto out_unlock;
730 err = security_sem_semctl(sma, cmd);
731 if (err)
732 goto out_unlock;
734 err = -EACCES;
735 switch (cmd) {
736 case GETALL:
738 ushort __user *array = arg.array;
739 int i;
741 if(nsems > SEMMSL_FAST) {
742 sem_getref_and_unlock(sma);
744 sem_io = ipc_alloc(sizeof(ushort)*nsems);
745 if(sem_io == NULL) {
746 sem_putref(sma);
747 return -ENOMEM;
750 sem_lock_and_putref(sma);
751 if (sma->sem_perm.deleted) {
752 sem_unlock(sma);
753 err = -EIDRM;
754 goto out_free;
758 for (i = 0; i < sma->sem_nsems; i++)
759 sem_io[i] = sma->sem_base[i].semval;
760 sem_unlock(sma);
761 err = 0;
762 if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
763 err = -EFAULT;
764 goto out_free;
766 case SETALL:
768 int i;
769 struct sem_undo *un;
771 sem_getref_and_unlock(sma);
773 if(nsems > SEMMSL_FAST) {
774 sem_io = ipc_alloc(sizeof(ushort)*nsems);
775 if(sem_io == NULL) {
776 sem_putref(sma);
777 return -ENOMEM;
781 if (copy_from_user (sem_io, arg.array, nsems*sizeof(ushort))) {
782 sem_putref(sma);
783 err = -EFAULT;
784 goto out_free;
787 for (i = 0; i < nsems; i++) {
788 if (sem_io[i] > SEMVMX) {
789 sem_putref(sma);
790 err = -ERANGE;
791 goto out_free;
794 sem_lock_and_putref(sma);
795 if (sma->sem_perm.deleted) {
796 sem_unlock(sma);
797 err = -EIDRM;
798 goto out_free;
801 for (i = 0; i < nsems; i++)
802 sma->sem_base[i].semval = sem_io[i];
804 assert_spin_locked(&sma->sem_perm.lock);
805 list_for_each_entry(un, &sma->list_id, list_id) {
806 for (i = 0; i < nsems; i++)
807 un->semadj[i] = 0;
809 sma->sem_ctime = get_seconds();
810 /* maybe some queued-up processes were waiting for this */
811 update_queue(sma, -1);
812 err = 0;
813 goto out_unlock;
815 /* GETVAL, GETPID, GETNCTN, GETZCNT, SETVAL: fall-through */
817 err = -EINVAL;
818 if(semnum < 0 || semnum >= nsems)
819 goto out_unlock;
821 curr = &sma->sem_base[semnum];
823 switch (cmd) {
824 case GETVAL:
825 err = curr->semval;
826 goto out_unlock;
827 case GETPID:
828 err = curr->sempid;
829 goto out_unlock;
830 case GETNCNT:
831 err = count_semncnt(sma,semnum);
832 goto out_unlock;
833 case GETZCNT:
834 err = count_semzcnt(sma,semnum);
835 goto out_unlock;
836 case SETVAL:
838 int val = arg.val;
839 struct sem_undo *un;
841 err = -ERANGE;
842 if (val > SEMVMX || val < 0)
843 goto out_unlock;
845 assert_spin_locked(&sma->sem_perm.lock);
846 list_for_each_entry(un, &sma->list_id, list_id)
847 un->semadj[semnum] = 0;
849 curr->semval = val;
850 curr->sempid = task_tgid_vnr(current);
851 sma->sem_ctime = get_seconds();
852 /* maybe some queued-up processes were waiting for this */
853 update_queue(sma, semnum);
854 err = 0;
855 goto out_unlock;
858 out_unlock:
859 sem_unlock(sma);
860 out_free:
861 if(sem_io != fast_sem_io)
862 ipc_free(sem_io, sizeof(ushort)*nsems);
863 return err;
866 static inline unsigned long
867 copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
869 switch(version) {
870 case IPC_64:
871 if (copy_from_user(out, buf, sizeof(*out)))
872 return -EFAULT;
873 return 0;
874 case IPC_OLD:
876 struct semid_ds tbuf_old;
878 if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
879 return -EFAULT;
881 out->sem_perm.uid = tbuf_old.sem_perm.uid;
882 out->sem_perm.gid = tbuf_old.sem_perm.gid;
883 out->sem_perm.mode = tbuf_old.sem_perm.mode;
885 return 0;
887 default:
888 return -EINVAL;
893 * This function handles some semctl commands which require the rw_mutex
894 * to be held in write mode.
895 * NOTE: no locks must be held, the rw_mutex is taken inside this function.
897 static int semctl_down(struct ipc_namespace *ns, int semid,
898 int cmd, int version, union semun arg)
900 struct sem_array *sma;
901 int err;
902 struct semid64_ds semid64;
903 struct kern_ipc_perm *ipcp;
905 if(cmd == IPC_SET) {
906 if (copy_semid_from_user(&semid64, arg.buf, version))
907 return -EFAULT;
910 ipcp = ipcctl_pre_down(&sem_ids(ns), semid, cmd, &semid64.sem_perm, 0);
911 if (IS_ERR(ipcp))
912 return PTR_ERR(ipcp);
914 sma = container_of(ipcp, struct sem_array, sem_perm);
916 err = security_sem_semctl(sma, cmd);
917 if (err)
918 goto out_unlock;
920 switch(cmd){
921 case IPC_RMID:
922 freeary(ns, ipcp);
923 goto out_up;
924 case IPC_SET:
925 ipc_update_perm(&semid64.sem_perm, ipcp);
926 sma->sem_ctime = get_seconds();
927 break;
928 default:
929 err = -EINVAL;
932 out_unlock:
933 sem_unlock(sma);
934 out_up:
935 up_write(&sem_ids(ns).rw_mutex);
936 return err;
939 SYSCALL_DEFINE(semctl)(int semid, int semnum, int cmd, union semun arg)
941 int err = -EINVAL;
942 int version;
943 struct ipc_namespace *ns;
945 if (semid < 0)
946 return -EINVAL;
948 version = ipc_parse_version(&cmd);
949 ns = current->nsproxy->ipc_ns;
951 switch(cmd) {
952 case IPC_INFO:
953 case SEM_INFO:
954 case IPC_STAT:
955 case SEM_STAT:
956 err = semctl_nolock(ns, semid, cmd, version, arg);
957 return err;
958 case GETALL:
959 case GETVAL:
960 case GETPID:
961 case GETNCNT:
962 case GETZCNT:
963 case SETVAL:
964 case SETALL:
965 err = semctl_main(ns,semid,semnum,cmd,version,arg);
966 return err;
967 case IPC_RMID:
968 case IPC_SET:
969 err = semctl_down(ns, semid, cmd, version, arg);
970 return err;
971 default:
972 return -EINVAL;
975 #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
976 asmlinkage long SyS_semctl(int semid, int semnum, int cmd, union semun arg)
978 return SYSC_semctl((int) semid, (int) semnum, (int) cmd, arg);
980 SYSCALL_ALIAS(sys_semctl, SyS_semctl);
981 #endif
983 /* If the task doesn't already have a undo_list, then allocate one
984 * here. We guarantee there is only one thread using this undo list,
985 * and current is THE ONE
987 * If this allocation and assignment succeeds, but later
988 * portions of this code fail, there is no need to free the sem_undo_list.
989 * Just let it stay associated with the task, and it'll be freed later
990 * at exit time.
992 * This can block, so callers must hold no locks.
994 static inline int get_undo_list(struct sem_undo_list **undo_listp)
996 struct sem_undo_list *undo_list;
998 undo_list = current->sysvsem.undo_list;
999 if (!undo_list) {
1000 undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1001 if (undo_list == NULL)
1002 return -ENOMEM;
1003 spin_lock_init(&undo_list->lock);
1004 atomic_set(&undo_list->refcnt, 1);
1005 INIT_LIST_HEAD(&undo_list->list_proc);
1007 current->sysvsem.undo_list = undo_list;
1009 *undo_listp = undo_list;
1010 return 0;
1013 static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1015 struct sem_undo *un;
1017 list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
1018 if (un->semid == semid)
1019 return un;
1021 return NULL;
1024 static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1026 struct sem_undo *un;
1028 assert_spin_locked(&ulp->lock);
1030 un = __lookup_undo(ulp, semid);
1031 if (un) {
1032 list_del_rcu(&un->list_proc);
1033 list_add_rcu(&un->list_proc, &ulp->list_proc);
1035 return un;
1039 * find_alloc_undo - Lookup (and if not present create) undo array
1040 * @ns: namespace
1041 * @semid: semaphore array id
1043 * The function looks up (and if not present creates) the undo structure.
1044 * The size of the undo structure depends on the size of the semaphore
1045 * array, thus the alloc path is not that straightforward.
1046 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1047 * performs a rcu_read_lock().
1049 static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1051 struct sem_array *sma;
1052 struct sem_undo_list *ulp;
1053 struct sem_undo *un, *new;
1054 int nsems;
1055 int error;
1057 error = get_undo_list(&ulp);
1058 if (error)
1059 return ERR_PTR(error);
1061 rcu_read_lock();
1062 spin_lock(&ulp->lock);
1063 un = lookup_undo(ulp, semid);
1064 spin_unlock(&ulp->lock);
1065 if (likely(un!=NULL))
1066 goto out;
1067 rcu_read_unlock();
1069 /* no undo structure around - allocate one. */
1070 /* step 1: figure out the size of the semaphore array */
1071 sma = sem_lock_check(ns, semid);
1072 if (IS_ERR(sma))
1073 return ERR_PTR(PTR_ERR(sma));
1075 nsems = sma->sem_nsems;
1076 sem_getref_and_unlock(sma);
1078 /* step 2: allocate new undo structure */
1079 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1080 if (!new) {
1081 sem_putref(sma);
1082 return ERR_PTR(-ENOMEM);
1085 /* step 3: Acquire the lock on semaphore array */
1086 sem_lock_and_putref(sma);
1087 if (sma->sem_perm.deleted) {
1088 sem_unlock(sma);
1089 kfree(new);
1090 un = ERR_PTR(-EIDRM);
1091 goto out;
1093 spin_lock(&ulp->lock);
1096 * step 4: check for races: did someone else allocate the undo struct?
1098 un = lookup_undo(ulp, semid);
1099 if (un) {
1100 kfree(new);
1101 goto success;
1103 /* step 5: initialize & link new undo structure */
1104 new->semadj = (short *) &new[1];
1105 new->ulp = ulp;
1106 new->semid = semid;
1107 assert_spin_locked(&ulp->lock);
1108 list_add_rcu(&new->list_proc, &ulp->list_proc);
1109 assert_spin_locked(&sma->sem_perm.lock);
1110 list_add(&new->list_id, &sma->list_id);
1111 un = new;
1113 success:
1114 spin_unlock(&ulp->lock);
1115 rcu_read_lock();
1116 sem_unlock(sma);
1117 out:
1118 return un;
1121 SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1122 unsigned, nsops, const struct timespec __user *, timeout)
1124 int error = -EINVAL;
1125 struct sem_array *sma;
1126 struct sembuf fast_sops[SEMOPM_FAST];
1127 struct sembuf* sops = fast_sops, *sop;
1128 struct sem_undo *un;
1129 int undos = 0, alter = 0, max;
1130 struct sem_queue queue;
1131 unsigned long jiffies_left = 0;
1132 struct ipc_namespace *ns;
1134 ns = current->nsproxy->ipc_ns;
1136 if (nsops < 1 || semid < 0)
1137 return -EINVAL;
1138 if (nsops > ns->sc_semopm)
1139 return -E2BIG;
1140 if(nsops > SEMOPM_FAST) {
1141 sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL);
1142 if(sops==NULL)
1143 return -ENOMEM;
1145 if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) {
1146 error=-EFAULT;
1147 goto out_free;
1149 if (timeout) {
1150 struct timespec _timeout;
1151 if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) {
1152 error = -EFAULT;
1153 goto out_free;
1155 if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 ||
1156 _timeout.tv_nsec >= 1000000000L) {
1157 error = -EINVAL;
1158 goto out_free;
1160 jiffies_left = timespec_to_jiffies(&_timeout);
1162 max = 0;
1163 for (sop = sops; sop < sops + nsops; sop++) {
1164 if (sop->sem_num >= max)
1165 max = sop->sem_num;
1166 if (sop->sem_flg & SEM_UNDO)
1167 undos = 1;
1168 if (sop->sem_op != 0)
1169 alter = 1;
1172 if (undos) {
1173 un = find_alloc_undo(ns, semid);
1174 if (IS_ERR(un)) {
1175 error = PTR_ERR(un);
1176 goto out_free;
1178 } else
1179 un = NULL;
1181 sma = sem_lock_check(ns, semid);
1182 if (IS_ERR(sma)) {
1183 if (un)
1184 rcu_read_unlock();
1185 error = PTR_ERR(sma);
1186 goto out_free;
1190 * semid identifiers are not unique - find_alloc_undo may have
1191 * allocated an undo structure, it was invalidated by an RMID
1192 * and now a new array with received the same id. Check and fail.
1193 * This case can be detected checking un->semid. The existance of
1194 * "un" itself is guaranteed by rcu.
1196 error = -EIDRM;
1197 if (un) {
1198 if (un->semid == -1) {
1199 rcu_read_unlock();
1200 goto out_unlock_free;
1201 } else {
1203 * rcu lock can be released, "un" cannot disappear:
1204 * - sem_lock is acquired, thus IPC_RMID is
1205 * impossible.
1206 * - exit_sem is impossible, it always operates on
1207 * current (or a dead task).
1210 rcu_read_unlock();
1214 error = -EFBIG;
1215 if (max >= sma->sem_nsems)
1216 goto out_unlock_free;
1218 error = -EACCES;
1219 if (ipcperms(&sma->sem_perm, alter ? S_IWUGO : S_IRUGO))
1220 goto out_unlock_free;
1222 error = security_sem_semop(sma, sops, nsops, alter);
1223 if (error)
1224 goto out_unlock_free;
1226 error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
1227 if (error <= 0) {
1228 if (alter && error == 0)
1229 update_queue(sma, (nsops == 1) ? sops[0].sem_num : -1);
1231 goto out_unlock_free;
1234 /* We need to sleep on this operation, so we put the current
1235 * task into the pending queue and go to sleep.
1238 queue.sops = sops;
1239 queue.nsops = nsops;
1240 queue.undo = un;
1241 queue.pid = task_tgid_vnr(current);
1242 queue.alter = alter;
1243 if (alter)
1244 list_add_tail(&queue.list, &sma->sem_pending);
1245 else
1246 list_add(&queue.list, &sma->sem_pending);
1248 if (nsops == 1) {
1249 struct sem *curr;
1250 curr = &sma->sem_base[sops->sem_num];
1252 if (alter)
1253 list_add_tail(&queue.simple_list, &curr->sem_pending);
1254 else
1255 list_add(&queue.simple_list, &curr->sem_pending);
1256 } else {
1257 INIT_LIST_HEAD(&queue.simple_list);
1258 sma->complex_count++;
1261 queue.status = -EINTR;
1262 queue.sleeper = current;
1263 current->state = TASK_INTERRUPTIBLE;
1264 sem_unlock(sma);
1266 if (timeout)
1267 jiffies_left = schedule_timeout(jiffies_left);
1268 else
1269 schedule();
1271 error = queue.status;
1272 while(unlikely(error == IN_WAKEUP)) {
1273 cpu_relax();
1274 error = queue.status;
1277 if (error != -EINTR) {
1278 /* fast path: update_queue already obtained all requested
1279 * resources */
1280 goto out_free;
1283 sma = sem_lock(ns, semid);
1284 if (IS_ERR(sma)) {
1285 error = -EIDRM;
1286 goto out_free;
1290 * If queue.status != -EINTR we are woken up by another process
1292 error = queue.status;
1293 if (error != -EINTR) {
1294 goto out_unlock_free;
1298 * If an interrupt occurred we have to clean up the queue
1300 if (timeout && jiffies_left == 0)
1301 error = -EAGAIN;
1302 unlink_queue(sma, &queue);
1304 out_unlock_free:
1305 sem_unlock(sma);
1306 out_free:
1307 if(sops != fast_sops)
1308 kfree(sops);
1309 return error;
1312 SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
1313 unsigned, nsops)
1315 return sys_semtimedop(semid, tsops, nsops, NULL);
1318 /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
1319 * parent and child tasks.
1322 int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
1324 struct sem_undo_list *undo_list;
1325 int error;
1327 if (clone_flags & CLONE_SYSVSEM) {
1328 error = get_undo_list(&undo_list);
1329 if (error)
1330 return error;
1331 atomic_inc(&undo_list->refcnt);
1332 tsk->sysvsem.undo_list = undo_list;
1333 } else
1334 tsk->sysvsem.undo_list = NULL;
1336 return 0;
1340 * add semadj values to semaphores, free undo structures.
1341 * undo structures are not freed when semaphore arrays are destroyed
1342 * so some of them may be out of date.
1343 * IMPLEMENTATION NOTE: There is some confusion over whether the
1344 * set of adjustments that needs to be done should be done in an atomic
1345 * manner or not. That is, if we are attempting to decrement the semval
1346 * should we queue up and wait until we can do so legally?
1347 * The original implementation attempted to do this (queue and wait).
1348 * The current implementation does not do so. The POSIX standard
1349 * and SVID should be consulted to determine what behavior is mandated.
1351 void exit_sem(struct task_struct *tsk)
1353 struct sem_undo_list *ulp;
1355 ulp = tsk->sysvsem.undo_list;
1356 if (!ulp)
1357 return;
1358 tsk->sysvsem.undo_list = NULL;
1360 if (!atomic_dec_and_test(&ulp->refcnt))
1361 return;
1363 for (;;) {
1364 struct sem_array *sma;
1365 struct sem_undo *un;
1366 int semid;
1367 int i;
1369 rcu_read_lock();
1370 un = list_entry_rcu(ulp->list_proc.next,
1371 struct sem_undo, list_proc);
1372 if (&un->list_proc == &ulp->list_proc)
1373 semid = -1;
1374 else
1375 semid = un->semid;
1376 rcu_read_unlock();
1378 if (semid == -1)
1379 break;
1381 sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
1383 /* exit_sem raced with IPC_RMID, nothing to do */
1384 if (IS_ERR(sma))
1385 continue;
1387 un = __lookup_undo(ulp, semid);
1388 if (un == NULL) {
1389 /* exit_sem raced with IPC_RMID+semget() that created
1390 * exactly the same semid. Nothing to do.
1392 sem_unlock(sma);
1393 continue;
1396 /* remove un from the linked lists */
1397 assert_spin_locked(&sma->sem_perm.lock);
1398 list_del(&un->list_id);
1400 spin_lock(&ulp->lock);
1401 list_del_rcu(&un->list_proc);
1402 spin_unlock(&ulp->lock);
1404 /* perform adjustments registered in un */
1405 for (i = 0; i < sma->sem_nsems; i++) {
1406 struct sem * semaphore = &sma->sem_base[i];
1407 if (un->semadj[i]) {
1408 semaphore->semval += un->semadj[i];
1410 * Range checks of the new semaphore value,
1411 * not defined by sus:
1412 * - Some unices ignore the undo entirely
1413 * (e.g. HP UX 11i 11.22, Tru64 V5.1)
1414 * - some cap the value (e.g. FreeBSD caps
1415 * at 0, but doesn't enforce SEMVMX)
1417 * Linux caps the semaphore value, both at 0
1418 * and at SEMVMX.
1420 * Manfred <manfred@colorfullife.com>
1422 if (semaphore->semval < 0)
1423 semaphore->semval = 0;
1424 if (semaphore->semval > SEMVMX)
1425 semaphore->semval = SEMVMX;
1426 semaphore->sempid = task_tgid_vnr(current);
1429 sma->sem_otime = get_seconds();
1430 /* maybe some queued-up processes were waiting for this */
1431 update_queue(sma, -1);
1432 sem_unlock(sma);
1434 call_rcu(&un->rcu, free_un);
1436 kfree(ulp);
1439 #ifdef CONFIG_PROC_FS
1440 static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
1442 struct sem_array *sma = it;
1444 return seq_printf(s,
1445 "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
1446 sma->sem_perm.key,
1447 sma->sem_perm.id,
1448 sma->sem_perm.mode,
1449 sma->sem_nsems,
1450 sma->sem_perm.uid,
1451 sma->sem_perm.gid,
1452 sma->sem_perm.cuid,
1453 sma->sem_perm.cgid,
1454 sma->sem_otime,
1455 sma->sem_ctime);
1457 #endif