3 * Copyright (C) 1992 Krishna Balasubramanian
4 * Copyright (C) 1995 Eric Schenk, Bruno Haible
6 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
8 * SMP-threaded, sysctl's added
9 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
10 * Enforced range limit on SEM_UNDO
11 * (c) 2001 Red Hat Inc
13 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
14 * (c) 2016 Davidlohr Bueso <dave@stgolabs.net>
15 * Further wakeup optimizations, documentation
16 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
18 * support for audit of ipc object properties and permission changes
19 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
23 * Pavel Emelianov <xemul@openvz.org>
25 * Implementation notes: (May 2010)
26 * This file implements System V semaphores.
28 * User space visible behavior:
29 * - FIFO ordering for semop() operations (just FIFO, not starvation
31 * - multiple semaphore operations that alter the same semaphore in
32 * one semop() are handled.
33 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
35 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
36 * - undo adjustments at process exit are limited to 0..SEMVMX.
37 * - namespace are supported.
38 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
39 * to /proc/sys/kernel/sem.
40 * - statistics about the usage are reported in /proc/sysvipc/sem.
44 * - all global variables are read-mostly.
45 * - semop() calls and semctl(RMID) are synchronized by RCU.
46 * - most operations do write operations (actually: spin_lock calls) to
47 * the per-semaphore array structure.
48 * Thus: Perfect SMP scaling between independent semaphore arrays.
49 * If multiple semaphores in one array are used, then cache line
50 * trashing on the semaphore array spinlock will limit the scaling.
51 * - semncnt and semzcnt are calculated on demand in count_semcnt()
52 * - the task that performs a successful semop() scans the list of all
53 * sleeping tasks and completes any pending operations that can be fulfilled.
54 * Semaphores are actively given to waiting tasks (necessary for FIFO).
55 * (see update_queue())
56 * - To improve the scalability, the actual wake-up calls are performed after
57 * dropping all locks. (see wake_up_sem_queue_prepare())
58 * - All work is done by the waker, the woken up task does not have to do
59 * anything - not even acquiring a lock or dropping a refcount.
60 * - A woken up task may not even touch the semaphore array anymore, it may
61 * have been destroyed already by a semctl(RMID).
62 * - UNDO values are stored in an array (one per process and per
63 * semaphore array, lazily allocated). For backwards compatibility, multiple
64 * modes for the UNDO variables are supported (per process, per thread)
65 * (see copy_semundo, CLONE_SYSVSEM)
66 * - There are two lists of the pending operations: a per-array list
67 * and per-semaphore list (stored in the array). This allows to achieve FIFO
68 * ordering without always scanning all pending operations.
69 * The worst-case behavior is nevertheless O(N^2) for N wakeups.
72 #include <linux/slab.h>
73 #include <linux/spinlock.h>
74 #include <linux/init.h>
75 #include <linux/proc_fs.h>
76 #include <linux/time.h>
77 #include <linux/security.h>
78 #include <linux/syscalls.h>
79 #include <linux/audit.h>
80 #include <linux/capability.h>
81 #include <linux/seq_file.h>
82 #include <linux/rwsem.h>
83 #include <linux/nsproxy.h>
84 #include <linux/ipc_namespace.h>
85 #include <linux/sched/wake_q.h>
87 #include <linux/uaccess.h>
91 /* One queue for each sleeping process in the system. */
93 struct list_head list
; /* queue of pending operations */
94 struct task_struct
*sleeper
; /* this process */
95 struct sem_undo
*undo
; /* undo structure */
96 int pid
; /* process id of requesting process */
97 int status
; /* completion status of operation */
98 struct sembuf
*sops
; /* array of pending operations */
99 struct sembuf
*blocking
; /* the operation that blocked */
100 int nsops
; /* number of operations */
101 bool alter
; /* does *sops alter the array? */
102 bool dupsop
; /* sops on more than one sem_num */
105 /* Each task has a list of undo requests. They are executed automatically
106 * when the process exits.
109 struct list_head list_proc
; /* per-process list: *
110 * all undos from one process
112 struct rcu_head rcu
; /* rcu struct for sem_undo */
113 struct sem_undo_list
*ulp
; /* back ptr to sem_undo_list */
114 struct list_head list_id
; /* per semaphore array list:
115 * all undos for one array */
116 int semid
; /* semaphore set identifier */
117 short *semadj
; /* array of adjustments */
118 /* one per semaphore */
121 /* sem_undo_list controls shared access to the list of sem_undo structures
122 * that may be shared among all a CLONE_SYSVSEM task group.
124 struct sem_undo_list
{
127 struct list_head list_proc
;
131 #define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
133 static int newary(struct ipc_namespace
*, struct ipc_params
*);
134 static void freeary(struct ipc_namespace
*, struct kern_ipc_perm
*);
135 #ifdef CONFIG_PROC_FS
136 static int sysvipc_sem_proc_show(struct seq_file
*s
, void *it
);
139 #define SEMMSL_FAST 256 /* 512 bytes on stack */
140 #define SEMOPM_FAST 64 /* ~ 372 bytes on stack */
143 * Switching from the mode suitable for simple ops
144 * to the mode for complex ops is costly. Therefore:
145 * use some hysteresis
147 #define USE_GLOBAL_LOCK_HYSTERESIS 10
151 * a) global sem_lock() for read/write
153 * sem_array.complex_count,
154 * sem_array.pending{_alter,_const},
157 * b) global or semaphore sem_lock() for read/write:
158 * sem_array.sems[i].pending_{const,alter}:
161 * sem_undo_list.list_proc:
162 * * undo_list->lock for write
165 * * global sem_lock() for write
166 * * either local or global sem_lock() for read.
169 * Most ordering is enforced by using spin_lock() and spin_unlock().
170 * The special case is use_global_lock:
171 * Setting it from non-zero to 0 is a RELEASE, this is ensured by
172 * using smp_store_release().
173 * Testing if it is non-zero is an ACQUIRE, this is ensured by using
174 * smp_load_acquire().
175 * Setting it from 0 to non-zero must be ordered with regards to
176 * this smp_load_acquire(), this is guaranteed because the smp_load_acquire()
177 * is inside a spin_lock() and after a write from 0 to non-zero a
178 * spin_lock()+spin_unlock() is done.
181 #define sc_semmsl sem_ctls[0]
182 #define sc_semmns sem_ctls[1]
183 #define sc_semopm sem_ctls[2]
184 #define sc_semmni sem_ctls[3]
186 int sem_init_ns(struct ipc_namespace
*ns
)
188 ns
->sc_semmsl
= SEMMSL
;
189 ns
->sc_semmns
= SEMMNS
;
190 ns
->sc_semopm
= SEMOPM
;
191 ns
->sc_semmni
= SEMMNI
;
193 return ipc_init_ids(&ns
->ids
[IPC_SEM_IDS
]);
197 void sem_exit_ns(struct ipc_namespace
*ns
)
199 free_ipcs(ns
, &sem_ids(ns
), freeary
);
200 idr_destroy(&ns
->ids
[IPC_SEM_IDS
].ipcs_idr
);
201 rhashtable_destroy(&ns
->ids
[IPC_SEM_IDS
].key_ht
);
205 int __init
sem_init(void)
207 const int err
= sem_init_ns(&init_ipc_ns
);
209 ipc_init_proc_interface("sysvipc/sem",
210 " key semid perms nsems uid gid cuid cgid otime ctime\n",
211 IPC_SEM_IDS
, sysvipc_sem_proc_show
);
216 * unmerge_queues - unmerge queues, if possible.
217 * @sma: semaphore array
219 * The function unmerges the wait queues if complex_count is 0.
220 * It must be called prior to dropping the global semaphore array lock.
222 static void unmerge_queues(struct sem_array
*sma
)
224 struct sem_queue
*q
, *tq
;
226 /* complex operations still around? */
227 if (sma
->complex_count
)
230 * We will switch back to simple mode.
231 * Move all pending operation back into the per-semaphore
234 list_for_each_entry_safe(q
, tq
, &sma
->pending_alter
, list
) {
236 curr
= &sma
->sems
[q
->sops
[0].sem_num
];
238 list_add_tail(&q
->list
, &curr
->pending_alter
);
240 INIT_LIST_HEAD(&sma
->pending_alter
);
244 * merge_queues - merge single semop queues into global queue
245 * @sma: semaphore array
247 * This function merges all per-semaphore queues into the global queue.
248 * It is necessary to achieve FIFO ordering for the pending single-sop
249 * operations when a multi-semop operation must sleep.
250 * Only the alter operations must be moved, the const operations can stay.
252 static void merge_queues(struct sem_array
*sma
)
255 for (i
= 0; i
< sma
->sem_nsems
; i
++) {
256 struct sem
*sem
= &sma
->sems
[i
];
258 list_splice_init(&sem
->pending_alter
, &sma
->pending_alter
);
262 static void sem_rcu_free(struct rcu_head
*head
)
264 struct kern_ipc_perm
*p
= container_of(head
, struct kern_ipc_perm
, rcu
);
265 struct sem_array
*sma
= container_of(p
, struct sem_array
, sem_perm
);
267 security_sem_free(sma
);
272 * Enter the mode suitable for non-simple operations:
273 * Caller must own sem_perm.lock.
275 static void complexmode_enter(struct sem_array
*sma
)
280 if (sma
->use_global_lock
> 0) {
282 * We are already in global lock mode.
283 * Nothing to do, just reset the
284 * counter until we return to simple mode.
286 sma
->use_global_lock
= USE_GLOBAL_LOCK_HYSTERESIS
;
289 sma
->use_global_lock
= USE_GLOBAL_LOCK_HYSTERESIS
;
291 for (i
= 0; i
< sma
->sem_nsems
; i
++) {
293 spin_lock(&sem
->lock
);
294 spin_unlock(&sem
->lock
);
299 * Try to leave the mode that disallows simple operations:
300 * Caller must own sem_perm.lock.
302 static void complexmode_tryleave(struct sem_array
*sma
)
304 if (sma
->complex_count
) {
305 /* Complex ops are sleeping.
306 * We must stay in complex mode
310 if (sma
->use_global_lock
== 1) {
312 * Immediately after setting use_global_lock to 0,
313 * a simple op can start. Thus: all memory writes
314 * performed by the current operation must be visible
315 * before we set use_global_lock to 0.
317 smp_store_release(&sma
->use_global_lock
, 0);
319 sma
->use_global_lock
--;
323 #define SEM_GLOBAL_LOCK (-1)
325 * If the request contains only one semaphore operation, and there are
326 * no complex transactions pending, lock only the semaphore involved.
327 * Otherwise, lock the entire semaphore array, since we either have
328 * multiple semaphores in our own semops, or we need to look at
329 * semaphores from other pending complex operations.
331 static inline int sem_lock(struct sem_array
*sma
, struct sembuf
*sops
,
337 /* Complex operation - acquire a full lock */
338 ipc_lock_object(&sma
->sem_perm
);
340 /* Prevent parallel simple ops */
341 complexmode_enter(sma
);
342 return SEM_GLOBAL_LOCK
;
346 * Only one semaphore affected - try to optimize locking.
347 * Optimized locking is possible if no complex operation
348 * is either enqueued or processed right now.
350 * Both facts are tracked by use_global_mode.
352 sem
= &sma
->sems
[sops
->sem_num
];
355 * Initial check for use_global_lock. Just an optimization,
356 * no locking, no memory barrier.
358 if (!sma
->use_global_lock
) {
360 * It appears that no complex operation is around.
361 * Acquire the per-semaphore lock.
363 spin_lock(&sem
->lock
);
365 /* pairs with smp_store_release() */
366 if (!smp_load_acquire(&sma
->use_global_lock
)) {
367 /* fast path successful! */
368 return sops
->sem_num
;
370 spin_unlock(&sem
->lock
);
373 /* slow path: acquire the full lock */
374 ipc_lock_object(&sma
->sem_perm
);
376 if (sma
->use_global_lock
== 0) {
378 * The use_global_lock mode ended while we waited for
379 * sma->sem_perm.lock. Thus we must switch to locking
381 * Unlike in the fast path, there is no need to recheck
382 * sma->use_global_lock after we have acquired sem->lock:
383 * We own sma->sem_perm.lock, thus use_global_lock cannot
386 spin_lock(&sem
->lock
);
388 ipc_unlock_object(&sma
->sem_perm
);
389 return sops
->sem_num
;
392 * Not a false alarm, thus continue to use the global lock
393 * mode. No need for complexmode_enter(), this was done by
394 * the caller that has set use_global_mode to non-zero.
396 return SEM_GLOBAL_LOCK
;
400 static inline void sem_unlock(struct sem_array
*sma
, int locknum
)
402 if (locknum
== SEM_GLOBAL_LOCK
) {
404 complexmode_tryleave(sma
);
405 ipc_unlock_object(&sma
->sem_perm
);
407 struct sem
*sem
= &sma
->sems
[locknum
];
408 spin_unlock(&sem
->lock
);
413 * sem_lock_(check_) routines are called in the paths where the rwsem
416 * The caller holds the RCU read lock.
418 static inline struct sem_array
*sem_obtain_object(struct ipc_namespace
*ns
, int id
)
420 struct kern_ipc_perm
*ipcp
= ipc_obtain_object_idr(&sem_ids(ns
), id
);
423 return ERR_CAST(ipcp
);
425 return container_of(ipcp
, struct sem_array
, sem_perm
);
428 static inline struct sem_array
*sem_obtain_object_check(struct ipc_namespace
*ns
,
431 struct kern_ipc_perm
*ipcp
= ipc_obtain_object_check(&sem_ids(ns
), id
);
434 return ERR_CAST(ipcp
);
436 return container_of(ipcp
, struct sem_array
, sem_perm
);
439 static inline void sem_lock_and_putref(struct sem_array
*sma
)
441 sem_lock(sma
, NULL
, -1);
442 ipc_rcu_putref(&sma
->sem_perm
, sem_rcu_free
);
445 static inline void sem_rmid(struct ipc_namespace
*ns
, struct sem_array
*s
)
447 ipc_rmid(&sem_ids(ns
), &s
->sem_perm
);
450 static struct sem_array
*sem_alloc(size_t nsems
)
452 struct sem_array
*sma
;
455 if (nsems
> (INT_MAX
- sizeof(*sma
)) / sizeof(sma
->sems
[0]))
458 size
= sizeof(*sma
) + nsems
* sizeof(sma
->sems
[0]);
459 sma
= kvmalloc(size
, GFP_KERNEL
);
463 memset(sma
, 0, size
);
469 * newary - Create a new semaphore set
471 * @params: ptr to the structure that contains key, semflg and nsems
473 * Called with sem_ids.rwsem held (as a writer)
475 static int newary(struct ipc_namespace
*ns
, struct ipc_params
*params
)
478 struct sem_array
*sma
;
479 key_t key
= params
->key
;
480 int nsems
= params
->u
.nsems
;
481 int semflg
= params
->flg
;
486 if (ns
->used_sems
+ nsems
> ns
->sc_semmns
)
489 sma
= sem_alloc(nsems
);
493 sma
->sem_perm
.mode
= (semflg
& S_IRWXUGO
);
494 sma
->sem_perm
.key
= key
;
496 sma
->sem_perm
.security
= NULL
;
497 retval
= security_sem_alloc(sma
);
503 for (i
= 0; i
< nsems
; i
++) {
504 INIT_LIST_HEAD(&sma
->sems
[i
].pending_alter
);
505 INIT_LIST_HEAD(&sma
->sems
[i
].pending_const
);
506 spin_lock_init(&sma
->sems
[i
].lock
);
509 sma
->complex_count
= 0;
510 sma
->use_global_lock
= USE_GLOBAL_LOCK_HYSTERESIS
;
511 INIT_LIST_HEAD(&sma
->pending_alter
);
512 INIT_LIST_HEAD(&sma
->pending_const
);
513 INIT_LIST_HEAD(&sma
->list_id
);
514 sma
->sem_nsems
= nsems
;
515 sma
->sem_ctime
= get_seconds();
517 retval
= ipc_addid(&sem_ids(ns
), &sma
->sem_perm
, ns
->sc_semmni
);
519 call_rcu(&sma
->sem_perm
.rcu
, sem_rcu_free
);
522 ns
->used_sems
+= nsems
;
527 return sma
->sem_perm
.id
;
532 * Called with sem_ids.rwsem and ipcp locked.
534 static inline int sem_security(struct kern_ipc_perm
*ipcp
, int semflg
)
536 struct sem_array
*sma
;
538 sma
= container_of(ipcp
, struct sem_array
, sem_perm
);
539 return security_sem_associate(sma
, semflg
);
543 * Called with sem_ids.rwsem and ipcp locked.
545 static inline int sem_more_checks(struct kern_ipc_perm
*ipcp
,
546 struct ipc_params
*params
)
548 struct sem_array
*sma
;
550 sma
= container_of(ipcp
, struct sem_array
, sem_perm
);
551 if (params
->u
.nsems
> sma
->sem_nsems
)
557 SYSCALL_DEFINE3(semget
, key_t
, key
, int, nsems
, int, semflg
)
559 struct ipc_namespace
*ns
;
560 static const struct ipc_ops sem_ops
= {
562 .associate
= sem_security
,
563 .more_checks
= sem_more_checks
,
565 struct ipc_params sem_params
;
567 ns
= current
->nsproxy
->ipc_ns
;
569 if (nsems
< 0 || nsems
> ns
->sc_semmsl
)
572 sem_params
.key
= key
;
573 sem_params
.flg
= semflg
;
574 sem_params
.u
.nsems
= nsems
;
576 return ipcget(ns
, &sem_ids(ns
), &sem_ops
, &sem_params
);
580 * perform_atomic_semop[_slow] - Attempt to perform semaphore
581 * operations on a given array.
582 * @sma: semaphore array
583 * @q: struct sem_queue that describes the operation
585 * Caller blocking are as follows, based the value
586 * indicated by the semaphore operation (sem_op):
588 * (1) >0 never blocks.
589 * (2) 0 (wait-for-zero operation): semval is non-zero.
590 * (3) <0 attempting to decrement semval to a value smaller than zero.
592 * Returns 0 if the operation was possible.
593 * Returns 1 if the operation is impossible, the caller must sleep.
594 * Returns <0 for error codes.
596 static int perform_atomic_semop_slow(struct sem_array
*sma
, struct sem_queue
*q
)
598 int result
, sem_op
, nsops
, pid
;
608 for (sop
= sops
; sop
< sops
+ nsops
; sop
++) {
609 curr
= &sma
->sems
[sop
->sem_num
];
610 sem_op
= sop
->sem_op
;
611 result
= curr
->semval
;
613 if (!sem_op
&& result
)
622 if (sop
->sem_flg
& SEM_UNDO
) {
623 int undo
= un
->semadj
[sop
->sem_num
] - sem_op
;
624 /* Exceeding the undo range is an error. */
625 if (undo
< (-SEMAEM
- 1) || undo
> SEMAEM
)
627 un
->semadj
[sop
->sem_num
] = undo
;
630 curr
->semval
= result
;
635 while (sop
>= sops
) {
636 sma
->sems
[sop
->sem_num
].sempid
= pid
;
649 if (sop
->sem_flg
& IPC_NOWAIT
)
656 while (sop
>= sops
) {
657 sem_op
= sop
->sem_op
;
658 sma
->sems
[sop
->sem_num
].semval
-= sem_op
;
659 if (sop
->sem_flg
& SEM_UNDO
)
660 un
->semadj
[sop
->sem_num
] += sem_op
;
667 static int perform_atomic_semop(struct sem_array
*sma
, struct sem_queue
*q
)
669 int result
, sem_op
, nsops
;
679 if (unlikely(q
->dupsop
))
680 return perform_atomic_semop_slow(sma
, q
);
683 * We scan the semaphore set twice, first to ensure that the entire
684 * operation can succeed, therefore avoiding any pointless writes
685 * to shared memory and having to undo such changes in order to block
686 * until the operations can go through.
688 for (sop
= sops
; sop
< sops
+ nsops
; sop
++) {
689 curr
= &sma
->sems
[sop
->sem_num
];
690 sem_op
= sop
->sem_op
;
691 result
= curr
->semval
;
693 if (!sem_op
&& result
)
694 goto would_block
; /* wait-for-zero */
703 if (sop
->sem_flg
& SEM_UNDO
) {
704 int undo
= un
->semadj
[sop
->sem_num
] - sem_op
;
706 /* Exceeding the undo range is an error. */
707 if (undo
< (-SEMAEM
- 1) || undo
> SEMAEM
)
712 for (sop
= sops
; sop
< sops
+ nsops
; sop
++) {
713 curr
= &sma
->sems
[sop
->sem_num
];
714 sem_op
= sop
->sem_op
;
715 result
= curr
->semval
;
717 if (sop
->sem_flg
& SEM_UNDO
) {
718 int undo
= un
->semadj
[sop
->sem_num
] - sem_op
;
720 un
->semadj
[sop
->sem_num
] = undo
;
722 curr
->semval
+= sem_op
;
723 curr
->sempid
= q
->pid
;
730 return sop
->sem_flg
& IPC_NOWAIT
? -EAGAIN
: 1;
733 static inline void wake_up_sem_queue_prepare(struct sem_queue
*q
, int error
,
734 struct wake_q_head
*wake_q
)
736 wake_q_add(wake_q
, q
->sleeper
);
738 * Rely on the above implicit barrier, such that we can
739 * ensure that we hold reference to the task before setting
740 * q->status. Otherwise we could race with do_exit if the
741 * task is awoken by an external event before calling
744 WRITE_ONCE(q
->status
, error
);
747 static void unlink_queue(struct sem_array
*sma
, struct sem_queue
*q
)
751 sma
->complex_count
--;
754 /** check_restart(sma, q)
755 * @sma: semaphore array
756 * @q: the operation that just completed
758 * update_queue is O(N^2) when it restarts scanning the whole queue of
759 * waiting operations. Therefore this function checks if the restart is
760 * really necessary. It is called after a previously waiting operation
761 * modified the array.
762 * Note that wait-for-zero operations are handled without restart.
764 static inline int check_restart(struct sem_array
*sma
, struct sem_queue
*q
)
766 /* pending complex alter operations are too difficult to analyse */
767 if (!list_empty(&sma
->pending_alter
))
770 /* we were a sleeping complex operation. Too difficult */
774 /* It is impossible that someone waits for the new value:
775 * - complex operations always restart.
776 * - wait-for-zero are handled seperately.
777 * - q is a previously sleeping simple operation that
778 * altered the array. It must be a decrement, because
779 * simple increments never sleep.
780 * - If there are older (higher priority) decrements
781 * in the queue, then they have observed the original
782 * semval value and couldn't proceed. The operation
783 * decremented to value - thus they won't proceed either.
789 * wake_const_ops - wake up non-alter tasks
790 * @sma: semaphore array.
791 * @semnum: semaphore that was modified.
792 * @wake_q: lockless wake-queue head.
794 * wake_const_ops must be called after a semaphore in a semaphore array
795 * was set to 0. If complex const operations are pending, wake_const_ops must
796 * be called with semnum = -1, as well as with the number of each modified
798 * The tasks that must be woken up are added to @wake_q. The return code
799 * is stored in q->pid.
800 * The function returns 1 if at least one operation was completed successfully.
802 static int wake_const_ops(struct sem_array
*sma
, int semnum
,
803 struct wake_q_head
*wake_q
)
805 struct sem_queue
*q
, *tmp
;
806 struct list_head
*pending_list
;
807 int semop_completed
= 0;
810 pending_list
= &sma
->pending_const
;
812 pending_list
= &sma
->sems
[semnum
].pending_const
;
814 list_for_each_entry_safe(q
, tmp
, pending_list
, list
) {
815 int error
= perform_atomic_semop(sma
, q
);
819 /* operation completed, remove from queue & wakeup */
820 unlink_queue(sma
, q
);
822 wake_up_sem_queue_prepare(q
, error
, wake_q
);
827 return semop_completed
;
831 * do_smart_wakeup_zero - wakeup all wait for zero tasks
832 * @sma: semaphore array
833 * @sops: operations that were performed
834 * @nsops: number of operations
835 * @wake_q: lockless wake-queue head
837 * Checks all required queue for wait-for-zero operations, based
838 * on the actual changes that were performed on the semaphore array.
839 * The function returns 1 if at least one operation was completed successfully.
841 static int do_smart_wakeup_zero(struct sem_array
*sma
, struct sembuf
*sops
,
842 int nsops
, struct wake_q_head
*wake_q
)
845 int semop_completed
= 0;
848 /* first: the per-semaphore queues, if known */
850 for (i
= 0; i
< nsops
; i
++) {
851 int num
= sops
[i
].sem_num
;
853 if (sma
->sems
[num
].semval
== 0) {
855 semop_completed
|= wake_const_ops(sma
, num
, wake_q
);
860 * No sops means modified semaphores not known.
861 * Assume all were changed.
863 for (i
= 0; i
< sma
->sem_nsems
; i
++) {
864 if (sma
->sems
[i
].semval
== 0) {
866 semop_completed
|= wake_const_ops(sma
, i
, wake_q
);
871 * If one of the modified semaphores got 0,
872 * then check the global queue, too.
875 semop_completed
|= wake_const_ops(sma
, -1, wake_q
);
877 return semop_completed
;
882 * update_queue - look for tasks that can be completed.
883 * @sma: semaphore array.
884 * @semnum: semaphore that was modified.
885 * @wake_q: lockless wake-queue head.
887 * update_queue must be called after a semaphore in a semaphore array
888 * was modified. If multiple semaphores were modified, update_queue must
889 * be called with semnum = -1, as well as with the number of each modified
891 * The tasks that must be woken up are added to @wake_q. The return code
892 * is stored in q->pid.
893 * The function internally checks if const operations can now succeed.
895 * The function return 1 if at least one semop was completed successfully.
897 static int update_queue(struct sem_array
*sma
, int semnum
, struct wake_q_head
*wake_q
)
899 struct sem_queue
*q
, *tmp
;
900 struct list_head
*pending_list
;
901 int semop_completed
= 0;
904 pending_list
= &sma
->pending_alter
;
906 pending_list
= &sma
->sems
[semnum
].pending_alter
;
909 list_for_each_entry_safe(q
, tmp
, pending_list
, list
) {
912 /* If we are scanning the single sop, per-semaphore list of
913 * one semaphore and that semaphore is 0, then it is not
914 * necessary to scan further: simple increments
915 * that affect only one entry succeed immediately and cannot
916 * be in the per semaphore pending queue, and decrements
917 * cannot be successful if the value is already 0.
919 if (semnum
!= -1 && sma
->sems
[semnum
].semval
== 0)
922 error
= perform_atomic_semop(sma
, q
);
924 /* Does q->sleeper still need to sleep? */
928 unlink_queue(sma
, q
);
934 do_smart_wakeup_zero(sma
, q
->sops
, q
->nsops
, wake_q
);
935 restart
= check_restart(sma
, q
);
938 wake_up_sem_queue_prepare(q
, error
, wake_q
);
942 return semop_completed
;
946 * set_semotime - set sem_otime
947 * @sma: semaphore array
948 * @sops: operations that modified the array, may be NULL
950 * sem_otime is replicated to avoid cache line trashing.
951 * This function sets one instance to the current time.
953 static void set_semotime(struct sem_array
*sma
, struct sembuf
*sops
)
956 sma
->sems
[0].sem_otime
= get_seconds();
958 sma
->sems
[sops
[0].sem_num
].sem_otime
=
964 * do_smart_update - optimized update_queue
965 * @sma: semaphore array
966 * @sops: operations that were performed
967 * @nsops: number of operations
968 * @otime: force setting otime
969 * @wake_q: lockless wake-queue head
971 * do_smart_update() does the required calls to update_queue and wakeup_zero,
972 * based on the actual changes that were performed on the semaphore array.
973 * Note that the function does not do the actual wake-up: the caller is
974 * responsible for calling wake_up_q().
975 * It is safe to perform this call after dropping all locks.
977 static void do_smart_update(struct sem_array
*sma
, struct sembuf
*sops
, int nsops
,
978 int otime
, struct wake_q_head
*wake_q
)
982 otime
|= do_smart_wakeup_zero(sma
, sops
, nsops
, wake_q
);
984 if (!list_empty(&sma
->pending_alter
)) {
985 /* semaphore array uses the global queue - just process it. */
986 otime
|= update_queue(sma
, -1, wake_q
);
990 * No sops, thus the modified semaphores are not
993 for (i
= 0; i
< sma
->sem_nsems
; i
++)
994 otime
|= update_queue(sma
, i
, wake_q
);
997 * Check the semaphores that were increased:
998 * - No complex ops, thus all sleeping ops are
1000 * - if we decreased the value, then any sleeping
1001 * semaphore ops wont be able to run: If the
1002 * previous value was too small, then the new
1003 * value will be too small, too.
1005 for (i
= 0; i
< nsops
; i
++) {
1006 if (sops
[i
].sem_op
> 0) {
1007 otime
|= update_queue(sma
,
1008 sops
[i
].sem_num
, wake_q
);
1014 set_semotime(sma
, sops
);
1018 * check_qop: Test if a queued operation sleeps on the semaphore semnum
1020 static int check_qop(struct sem_array
*sma
, int semnum
, struct sem_queue
*q
,
1023 struct sembuf
*sop
= q
->blocking
;
1026 * Linux always (since 0.99.10) reported a task as sleeping on all
1027 * semaphores. This violates SUS, therefore it was changed to the
1028 * standard compliant behavior.
1029 * Give the administrators a chance to notice that an application
1030 * might misbehave because it relies on the Linux behavior.
1032 pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
1033 "The task %s (%d) triggered the difference, watch for misbehavior.\n",
1034 current
->comm
, task_pid_nr(current
));
1036 if (sop
->sem_num
!= semnum
)
1039 if (count_zero
&& sop
->sem_op
== 0)
1041 if (!count_zero
&& sop
->sem_op
< 0)
1047 /* The following counts are associated to each semaphore:
1048 * semncnt number of tasks waiting on semval being nonzero
1049 * semzcnt number of tasks waiting on semval being zero
1051 * Per definition, a task waits only on the semaphore of the first semop
1052 * that cannot proceed, even if additional operation would block, too.
1054 static int count_semcnt(struct sem_array
*sma
, ushort semnum
,
1057 struct list_head
*l
;
1058 struct sem_queue
*q
;
1062 /* First: check the simple operations. They are easy to evaluate */
1064 l
= &sma
->sems
[semnum
].pending_const
;
1066 l
= &sma
->sems
[semnum
].pending_alter
;
1068 list_for_each_entry(q
, l
, list
) {
1069 /* all task on a per-semaphore list sleep on exactly
1075 /* Then: check the complex operations. */
1076 list_for_each_entry(q
, &sma
->pending_alter
, list
) {
1077 semcnt
+= check_qop(sma
, semnum
, q
, count_zero
);
1080 list_for_each_entry(q
, &sma
->pending_const
, list
) {
1081 semcnt
+= check_qop(sma
, semnum
, q
, count_zero
);
1087 /* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
1088 * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
1089 * remains locked on exit.
1091 static void freeary(struct ipc_namespace
*ns
, struct kern_ipc_perm
*ipcp
)
1093 struct sem_undo
*un
, *tu
;
1094 struct sem_queue
*q
, *tq
;
1095 struct sem_array
*sma
= container_of(ipcp
, struct sem_array
, sem_perm
);
1097 DEFINE_WAKE_Q(wake_q
);
1099 /* Free the existing undo structures for this semaphore set. */
1100 ipc_assert_locked_object(&sma
->sem_perm
);
1101 list_for_each_entry_safe(un
, tu
, &sma
->list_id
, list_id
) {
1102 list_del(&un
->list_id
);
1103 spin_lock(&un
->ulp
->lock
);
1105 list_del_rcu(&un
->list_proc
);
1106 spin_unlock(&un
->ulp
->lock
);
1110 /* Wake up all pending processes and let them fail with EIDRM. */
1111 list_for_each_entry_safe(q
, tq
, &sma
->pending_const
, list
) {
1112 unlink_queue(sma
, q
);
1113 wake_up_sem_queue_prepare(q
, -EIDRM
, &wake_q
);
1116 list_for_each_entry_safe(q
, tq
, &sma
->pending_alter
, list
) {
1117 unlink_queue(sma
, q
);
1118 wake_up_sem_queue_prepare(q
, -EIDRM
, &wake_q
);
1120 for (i
= 0; i
< sma
->sem_nsems
; i
++) {
1121 struct sem
*sem
= &sma
->sems
[i
];
1122 list_for_each_entry_safe(q
, tq
, &sem
->pending_const
, list
) {
1123 unlink_queue(sma
, q
);
1124 wake_up_sem_queue_prepare(q
, -EIDRM
, &wake_q
);
1126 list_for_each_entry_safe(q
, tq
, &sem
->pending_alter
, list
) {
1127 unlink_queue(sma
, q
);
1128 wake_up_sem_queue_prepare(q
, -EIDRM
, &wake_q
);
1132 /* Remove the semaphore set from the IDR */
1134 sem_unlock(sma
, -1);
1138 ns
->used_sems
-= sma
->sem_nsems
;
1139 ipc_rcu_putref(&sma
->sem_perm
, sem_rcu_free
);
1142 static unsigned long copy_semid_to_user(void __user
*buf
, struct semid64_ds
*in
, int version
)
1146 return copy_to_user(buf
, in
, sizeof(*in
));
1149 struct semid_ds out
;
1151 memset(&out
, 0, sizeof(out
));
1153 ipc64_perm_to_ipc_perm(&in
->sem_perm
, &out
.sem_perm
);
1155 out
.sem_otime
= in
->sem_otime
;
1156 out
.sem_ctime
= in
->sem_ctime
;
1157 out
.sem_nsems
= in
->sem_nsems
;
1159 return copy_to_user(buf
, &out
, sizeof(out
));
1166 static time_t get_semotime(struct sem_array
*sma
)
1171 res
= sma
->sems
[0].sem_otime
;
1172 for (i
= 1; i
< sma
->sem_nsems
; i
++) {
1173 time_t to
= sma
->sems
[i
].sem_otime
;
1181 static int semctl_nolock(struct ipc_namespace
*ns
, int semid
,
1182 int cmd
, int version
, void __user
*p
)
1185 struct sem_array
*sma
;
1191 struct seminfo seminfo
;
1194 err
= security_sem_semctl(NULL
, cmd
);
1198 memset(&seminfo
, 0, sizeof(seminfo
));
1199 seminfo
.semmni
= ns
->sc_semmni
;
1200 seminfo
.semmns
= ns
->sc_semmns
;
1201 seminfo
.semmsl
= ns
->sc_semmsl
;
1202 seminfo
.semopm
= ns
->sc_semopm
;
1203 seminfo
.semvmx
= SEMVMX
;
1204 seminfo
.semmnu
= SEMMNU
;
1205 seminfo
.semmap
= SEMMAP
;
1206 seminfo
.semume
= SEMUME
;
1207 down_read(&sem_ids(ns
).rwsem
);
1208 if (cmd
== SEM_INFO
) {
1209 seminfo
.semusz
= sem_ids(ns
).in_use
;
1210 seminfo
.semaem
= ns
->used_sems
;
1212 seminfo
.semusz
= SEMUSZ
;
1213 seminfo
.semaem
= SEMAEM
;
1215 max_id
= ipc_get_maxid(&sem_ids(ns
));
1216 up_read(&sem_ids(ns
).rwsem
);
1217 if (copy_to_user(p
, &seminfo
, sizeof(struct seminfo
)))
1219 return (max_id
< 0) ? 0 : max_id
;
1224 struct semid64_ds tbuf
;
1227 memset(&tbuf
, 0, sizeof(tbuf
));
1230 if (cmd
== SEM_STAT
) {
1231 sma
= sem_obtain_object(ns
, semid
);
1236 id
= sma
->sem_perm
.id
;
1238 sma
= sem_obtain_object_check(ns
, semid
);
1246 if (ipcperms(ns
, &sma
->sem_perm
, S_IRUGO
))
1249 err
= security_sem_semctl(sma
, cmd
);
1253 kernel_to_ipc64_perm(&sma
->sem_perm
, &tbuf
.sem_perm
);
1254 tbuf
.sem_otime
= get_semotime(sma
);
1255 tbuf
.sem_ctime
= sma
->sem_ctime
;
1256 tbuf
.sem_nsems
= sma
->sem_nsems
;
1258 if (copy_semid_to_user(p
, &tbuf
, version
))
1270 static int semctl_setval(struct ipc_namespace
*ns
, int semid
, int semnum
,
1273 struct sem_undo
*un
;
1274 struct sem_array
*sma
;
1277 DEFINE_WAKE_Q(wake_q
);
1279 #if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1280 /* big-endian 64bit */
1283 /* 32bit or little-endian 64bit */
1287 if (val
> SEMVMX
|| val
< 0)
1291 sma
= sem_obtain_object_check(ns
, semid
);
1294 return PTR_ERR(sma
);
1297 if (semnum
< 0 || semnum
>= sma
->sem_nsems
) {
1303 if (ipcperms(ns
, &sma
->sem_perm
, S_IWUGO
)) {
1308 err
= security_sem_semctl(sma
, SETVAL
);
1314 sem_lock(sma
, NULL
, -1);
1316 if (!ipc_valid_object(&sma
->sem_perm
)) {
1317 sem_unlock(sma
, -1);
1322 curr
= &sma
->sems
[semnum
];
1324 ipc_assert_locked_object(&sma
->sem_perm
);
1325 list_for_each_entry(un
, &sma
->list_id
, list_id
)
1326 un
->semadj
[semnum
] = 0;
1329 curr
->sempid
= task_tgid_vnr(current
);
1330 sma
->sem_ctime
= get_seconds();
1331 /* maybe some queued-up processes were waiting for this */
1332 do_smart_update(sma
, NULL
, 0, 0, &wake_q
);
1333 sem_unlock(sma
, -1);
1339 static int semctl_main(struct ipc_namespace
*ns
, int semid
, int semnum
,
1340 int cmd
, void __user
*p
)
1342 struct sem_array
*sma
;
1345 ushort fast_sem_io
[SEMMSL_FAST
];
1346 ushort
*sem_io
= fast_sem_io
;
1347 DEFINE_WAKE_Q(wake_q
);
1350 sma
= sem_obtain_object_check(ns
, semid
);
1353 return PTR_ERR(sma
);
1356 nsems
= sma
->sem_nsems
;
1359 if (ipcperms(ns
, &sma
->sem_perm
, cmd
== SETALL
? S_IWUGO
: S_IRUGO
))
1360 goto out_rcu_wakeup
;
1362 err
= security_sem_semctl(sma
, cmd
);
1364 goto out_rcu_wakeup
;
1370 ushort __user
*array
= p
;
1373 sem_lock(sma
, NULL
, -1);
1374 if (!ipc_valid_object(&sma
->sem_perm
)) {
1378 if (nsems
> SEMMSL_FAST
) {
1379 if (!ipc_rcu_getref(&sma
->sem_perm
)) {
1383 sem_unlock(sma
, -1);
1385 sem_io
= kvmalloc_array(nsems
, sizeof(ushort
),
1387 if (sem_io
== NULL
) {
1388 ipc_rcu_putref(&sma
->sem_perm
, sem_rcu_free
);
1393 sem_lock_and_putref(sma
);
1394 if (!ipc_valid_object(&sma
->sem_perm
)) {
1399 for (i
= 0; i
< sma
->sem_nsems
; i
++)
1400 sem_io
[i
] = sma
->sems
[i
].semval
;
1401 sem_unlock(sma
, -1);
1404 if (copy_to_user(array
, sem_io
, nsems
*sizeof(ushort
)))
1411 struct sem_undo
*un
;
1413 if (!ipc_rcu_getref(&sma
->sem_perm
)) {
1415 goto out_rcu_wakeup
;
1419 if (nsems
> SEMMSL_FAST
) {
1420 sem_io
= kvmalloc_array(nsems
, sizeof(ushort
),
1422 if (sem_io
== NULL
) {
1423 ipc_rcu_putref(&sma
->sem_perm
, sem_rcu_free
);
1428 if (copy_from_user(sem_io
, p
, nsems
*sizeof(ushort
))) {
1429 ipc_rcu_putref(&sma
->sem_perm
, sem_rcu_free
);
1434 for (i
= 0; i
< nsems
; i
++) {
1435 if (sem_io
[i
] > SEMVMX
) {
1436 ipc_rcu_putref(&sma
->sem_perm
, sem_rcu_free
);
1442 sem_lock_and_putref(sma
);
1443 if (!ipc_valid_object(&sma
->sem_perm
)) {
1448 for (i
= 0; i
< nsems
; i
++) {
1449 sma
->sems
[i
].semval
= sem_io
[i
];
1450 sma
->sems
[i
].sempid
= task_tgid_vnr(current
);
1453 ipc_assert_locked_object(&sma
->sem_perm
);
1454 list_for_each_entry(un
, &sma
->list_id
, list_id
) {
1455 for (i
= 0; i
< nsems
; i
++)
1458 sma
->sem_ctime
= get_seconds();
1459 /* maybe some queued-up processes were waiting for this */
1460 do_smart_update(sma
, NULL
, 0, 0, &wake_q
);
1464 /* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */
1467 if (semnum
< 0 || semnum
>= nsems
)
1468 goto out_rcu_wakeup
;
1470 sem_lock(sma
, NULL
, -1);
1471 if (!ipc_valid_object(&sma
->sem_perm
)) {
1475 curr
= &sma
->sems
[semnum
];
1485 err
= count_semcnt(sma
, semnum
, 0);
1488 err
= count_semcnt(sma
, semnum
, 1);
1493 sem_unlock(sma
, -1);
1498 if (sem_io
!= fast_sem_io
)
1503 static inline unsigned long
1504 copy_semid_from_user(struct semid64_ds
*out
, void __user
*buf
, int version
)
1508 if (copy_from_user(out
, buf
, sizeof(*out
)))
1513 struct semid_ds tbuf_old
;
1515 if (copy_from_user(&tbuf_old
, buf
, sizeof(tbuf_old
)))
1518 out
->sem_perm
.uid
= tbuf_old
.sem_perm
.uid
;
1519 out
->sem_perm
.gid
= tbuf_old
.sem_perm
.gid
;
1520 out
->sem_perm
.mode
= tbuf_old
.sem_perm
.mode
;
1530 * This function handles some semctl commands which require the rwsem
1531 * to be held in write mode.
1532 * NOTE: no locks must be held, the rwsem is taken inside this function.
1534 static int semctl_down(struct ipc_namespace
*ns
, int semid
,
1535 int cmd
, int version
, void __user
*p
)
1537 struct sem_array
*sma
;
1539 struct semid64_ds semid64
;
1540 struct kern_ipc_perm
*ipcp
;
1542 if (cmd
== IPC_SET
) {
1543 if (copy_semid_from_user(&semid64
, p
, version
))
1547 down_write(&sem_ids(ns
).rwsem
);
1550 ipcp
= ipcctl_pre_down_nolock(ns
, &sem_ids(ns
), semid
, cmd
,
1551 &semid64
.sem_perm
, 0);
1553 err
= PTR_ERR(ipcp
);
1557 sma
= container_of(ipcp
, struct sem_array
, sem_perm
);
1559 err
= security_sem_semctl(sma
, cmd
);
1565 sem_lock(sma
, NULL
, -1);
1566 /* freeary unlocks the ipc object and rcu */
1570 sem_lock(sma
, NULL
, -1);
1571 err
= ipc_update_perm(&semid64
.sem_perm
, ipcp
);
1574 sma
->sem_ctime
= get_seconds();
1582 sem_unlock(sma
, -1);
1586 up_write(&sem_ids(ns
).rwsem
);
1590 SYSCALL_DEFINE4(semctl
, int, semid
, int, semnum
, int, cmd
, unsigned long, arg
)
1593 struct ipc_namespace
*ns
;
1594 void __user
*p
= (void __user
*)arg
;
1599 version
= ipc_parse_version(&cmd
);
1600 ns
= current
->nsproxy
->ipc_ns
;
1607 return semctl_nolock(ns
, semid
, cmd
, version
, p
);
1614 return semctl_main(ns
, semid
, semnum
, cmd
, p
);
1616 return semctl_setval(ns
, semid
, semnum
, arg
);
1619 return semctl_down(ns
, semid
, cmd
, version
, p
);
1625 /* If the task doesn't already have a undo_list, then allocate one
1626 * here. We guarantee there is only one thread using this undo list,
1627 * and current is THE ONE
1629 * If this allocation and assignment succeeds, but later
1630 * portions of this code fail, there is no need to free the sem_undo_list.
1631 * Just let it stay associated with the task, and it'll be freed later
1634 * This can block, so callers must hold no locks.
1636 static inline int get_undo_list(struct sem_undo_list
**undo_listp
)
1638 struct sem_undo_list
*undo_list
;
1640 undo_list
= current
->sysvsem
.undo_list
;
1642 undo_list
= kzalloc(sizeof(*undo_list
), GFP_KERNEL
);
1643 if (undo_list
== NULL
)
1645 spin_lock_init(&undo_list
->lock
);
1646 refcount_set(&undo_list
->refcnt
, 1);
1647 INIT_LIST_HEAD(&undo_list
->list_proc
);
1649 current
->sysvsem
.undo_list
= undo_list
;
1651 *undo_listp
= undo_list
;
1655 static struct sem_undo
*__lookup_undo(struct sem_undo_list
*ulp
, int semid
)
1657 struct sem_undo
*un
;
1659 list_for_each_entry_rcu(un
, &ulp
->list_proc
, list_proc
) {
1660 if (un
->semid
== semid
)
1666 static struct sem_undo
*lookup_undo(struct sem_undo_list
*ulp
, int semid
)
1668 struct sem_undo
*un
;
1670 assert_spin_locked(&ulp
->lock
);
1672 un
= __lookup_undo(ulp
, semid
);
1674 list_del_rcu(&un
->list_proc
);
1675 list_add_rcu(&un
->list_proc
, &ulp
->list_proc
);
1681 * find_alloc_undo - lookup (and if not present create) undo array
1683 * @semid: semaphore array id
1685 * The function looks up (and if not present creates) the undo structure.
1686 * The size of the undo structure depends on the size of the semaphore
1687 * array, thus the alloc path is not that straightforward.
1688 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1689 * performs a rcu_read_lock().
1691 static struct sem_undo
*find_alloc_undo(struct ipc_namespace
*ns
, int semid
)
1693 struct sem_array
*sma
;
1694 struct sem_undo_list
*ulp
;
1695 struct sem_undo
*un
, *new;
1698 error
= get_undo_list(&ulp
);
1700 return ERR_PTR(error
);
1703 spin_lock(&ulp
->lock
);
1704 un
= lookup_undo(ulp
, semid
);
1705 spin_unlock(&ulp
->lock
);
1706 if (likely(un
!= NULL
))
1709 /* no undo structure around - allocate one. */
1710 /* step 1: figure out the size of the semaphore array */
1711 sma
= sem_obtain_object_check(ns
, semid
);
1714 return ERR_CAST(sma
);
1717 nsems
= sma
->sem_nsems
;
1718 if (!ipc_rcu_getref(&sma
->sem_perm
)) {
1720 un
= ERR_PTR(-EIDRM
);
1725 /* step 2: allocate new undo structure */
1726 new = kzalloc(sizeof(struct sem_undo
) + sizeof(short)*nsems
, GFP_KERNEL
);
1728 ipc_rcu_putref(&sma
->sem_perm
, sem_rcu_free
);
1729 return ERR_PTR(-ENOMEM
);
1732 /* step 3: Acquire the lock on semaphore array */
1734 sem_lock_and_putref(sma
);
1735 if (!ipc_valid_object(&sma
->sem_perm
)) {
1736 sem_unlock(sma
, -1);
1739 un
= ERR_PTR(-EIDRM
);
1742 spin_lock(&ulp
->lock
);
1745 * step 4: check for races: did someone else allocate the undo struct?
1747 un
= lookup_undo(ulp
, semid
);
1752 /* step 5: initialize & link new undo structure */
1753 new->semadj
= (short *) &new[1];
1756 assert_spin_locked(&ulp
->lock
);
1757 list_add_rcu(&new->list_proc
, &ulp
->list_proc
);
1758 ipc_assert_locked_object(&sma
->sem_perm
);
1759 list_add(&new->list_id
, &sma
->list_id
);
1763 spin_unlock(&ulp
->lock
);
1764 sem_unlock(sma
, -1);
1769 SYSCALL_DEFINE4(semtimedop
, int, semid
, struct sembuf __user
*, tsops
,
1770 unsigned, nsops
, const struct timespec __user
*, timeout
)
1772 int error
= -EINVAL
;
1773 struct sem_array
*sma
;
1774 struct sembuf fast_sops
[SEMOPM_FAST
];
1775 struct sembuf
*sops
= fast_sops
, *sop
;
1776 struct sem_undo
*un
;
1778 bool undos
= false, alter
= false, dupsop
= false;
1779 struct sem_queue queue
;
1780 unsigned long dup
= 0, jiffies_left
= 0;
1781 struct ipc_namespace
*ns
;
1783 ns
= current
->nsproxy
->ipc_ns
;
1785 if (nsops
< 1 || semid
< 0)
1787 if (nsops
> ns
->sc_semopm
)
1789 if (nsops
> SEMOPM_FAST
) {
1790 sops
= kvmalloc(sizeof(*sops
)*nsops
, GFP_KERNEL
);
1795 if (copy_from_user(sops
, tsops
, nsops
* sizeof(*tsops
))) {
1801 struct timespec _timeout
;
1802 if (copy_from_user(&_timeout
, timeout
, sizeof(*timeout
))) {
1806 if (_timeout
.tv_sec
< 0 || _timeout
.tv_nsec
< 0 ||
1807 _timeout
.tv_nsec
>= 1000000000L) {
1811 jiffies_left
= timespec_to_jiffies(&_timeout
);
1815 for (sop
= sops
; sop
< sops
+ nsops
; sop
++) {
1816 unsigned long mask
= 1ULL << ((sop
->sem_num
) % BITS_PER_LONG
);
1818 if (sop
->sem_num
>= max
)
1820 if (sop
->sem_flg
& SEM_UNDO
)
1824 * There was a previous alter access that appears
1825 * to have accessed the same semaphore, thus use
1826 * the dupsop logic. "appears", because the detection
1827 * can only check % BITS_PER_LONG.
1831 if (sop
->sem_op
!= 0) {
1838 /* On success, find_alloc_undo takes the rcu_read_lock */
1839 un
= find_alloc_undo(ns
, semid
);
1841 error
= PTR_ERR(un
);
1849 sma
= sem_obtain_object_check(ns
, semid
);
1852 error
= PTR_ERR(sma
);
1857 if (max
>= sma
->sem_nsems
) {
1863 if (ipcperms(ns
, &sma
->sem_perm
, alter
? S_IWUGO
: S_IRUGO
)) {
1868 error
= security_sem_semop(sma
, sops
, nsops
, alter
);
1875 locknum
= sem_lock(sma
, sops
, nsops
);
1877 * We eventually might perform the following check in a lockless
1878 * fashion, considering ipc_valid_object() locking constraints.
1879 * If nsops == 1 and there is no contention for sem_perm.lock, then
1880 * only a per-semaphore lock is held and it's OK to proceed with the
1881 * check below. More details on the fine grained locking scheme
1882 * entangled here and why it's RMID race safe on comments at sem_lock()
1884 if (!ipc_valid_object(&sma
->sem_perm
))
1885 goto out_unlock_free
;
1887 * semid identifiers are not unique - find_alloc_undo may have
1888 * allocated an undo structure, it was invalidated by an RMID
1889 * and now a new array with received the same id. Check and fail.
1890 * This case can be detected checking un->semid. The existence of
1891 * "un" itself is guaranteed by rcu.
1893 if (un
&& un
->semid
== -1)
1894 goto out_unlock_free
;
1897 queue
.nsops
= nsops
;
1899 queue
.pid
= task_tgid_vnr(current
);
1900 queue
.alter
= alter
;
1901 queue
.dupsop
= dupsop
;
1903 error
= perform_atomic_semop(sma
, &queue
);
1904 if (error
== 0) { /* non-blocking succesfull path */
1905 DEFINE_WAKE_Q(wake_q
);
1908 * If the operation was successful, then do
1909 * the required updates.
1912 do_smart_update(sma
, sops
, nsops
, 1, &wake_q
);
1914 set_semotime(sma
, sops
);
1916 sem_unlock(sma
, locknum
);
1922 if (error
< 0) /* non-blocking error path */
1923 goto out_unlock_free
;
1926 * We need to sleep on this operation, so we put the current
1927 * task into the pending queue and go to sleep.
1931 curr
= &sma
->sems
[sops
->sem_num
];
1934 if (sma
->complex_count
) {
1935 list_add_tail(&queue
.list
,
1936 &sma
->pending_alter
);
1939 list_add_tail(&queue
.list
,
1940 &curr
->pending_alter
);
1943 list_add_tail(&queue
.list
, &curr
->pending_const
);
1946 if (!sma
->complex_count
)
1950 list_add_tail(&queue
.list
, &sma
->pending_alter
);
1952 list_add_tail(&queue
.list
, &sma
->pending_const
);
1954 sma
->complex_count
++;
1958 queue
.status
= -EINTR
;
1959 queue
.sleeper
= current
;
1961 __set_current_state(TASK_INTERRUPTIBLE
);
1962 sem_unlock(sma
, locknum
);
1966 jiffies_left
= schedule_timeout(jiffies_left
);
1971 * fastpath: the semop has completed, either successfully or
1972 * not, from the syscall pov, is quite irrelevant to us at this
1973 * point; we're done.
1975 * We _do_ care, nonetheless, about being awoken by a signal or
1976 * spuriously. The queue.status is checked again in the
1977 * slowpath (aka after taking sem_lock), such that we can detect
1978 * scenarios where we were awakened externally, during the
1979 * window between wake_q_add() and wake_up_q().
1981 error
= READ_ONCE(queue
.status
);
1982 if (error
!= -EINTR
) {
1984 * User space could assume that semop() is a memory
1985 * barrier: Without the mb(), the cpu could
1986 * speculatively read in userspace stale data that was
1987 * overwritten by the previous owner of the semaphore.
1994 locknum
= sem_lock(sma
, sops
, nsops
);
1996 if (!ipc_valid_object(&sma
->sem_perm
))
1997 goto out_unlock_free
;
1999 error
= READ_ONCE(queue
.status
);
2002 * If queue.status != -EINTR we are woken up by another process.
2003 * Leave without unlink_queue(), but with sem_unlock().
2005 if (error
!= -EINTR
)
2006 goto out_unlock_free
;
2009 * If an interrupt occurred we have to clean up the queue.
2011 if (timeout
&& jiffies_left
== 0)
2013 } while (error
== -EINTR
&& !signal_pending(current
)); /* spurious */
2015 unlink_queue(sma
, &queue
);
2018 sem_unlock(sma
, locknum
);
2021 if (sops
!= fast_sops
)
2026 SYSCALL_DEFINE3(semop
, int, semid
, struct sembuf __user
*, tsops
,
2029 return sys_semtimedop(semid
, tsops
, nsops
, NULL
);
2032 /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
2033 * parent and child tasks.
2036 int copy_semundo(unsigned long clone_flags
, struct task_struct
*tsk
)
2038 struct sem_undo_list
*undo_list
;
2041 if (clone_flags
& CLONE_SYSVSEM
) {
2042 error
= get_undo_list(&undo_list
);
2045 refcount_inc(&undo_list
->refcnt
);
2046 tsk
->sysvsem
.undo_list
= undo_list
;
2048 tsk
->sysvsem
.undo_list
= NULL
;
2054 * add semadj values to semaphores, free undo structures.
2055 * undo structures are not freed when semaphore arrays are destroyed
2056 * so some of them may be out of date.
2057 * IMPLEMENTATION NOTE: There is some confusion over whether the
2058 * set of adjustments that needs to be done should be done in an atomic
2059 * manner or not. That is, if we are attempting to decrement the semval
2060 * should we queue up and wait until we can do so legally?
2061 * The original implementation attempted to do this (queue and wait).
2062 * The current implementation does not do so. The POSIX standard
2063 * and SVID should be consulted to determine what behavior is mandated.
2065 void exit_sem(struct task_struct
*tsk
)
2067 struct sem_undo_list
*ulp
;
2069 ulp
= tsk
->sysvsem
.undo_list
;
2072 tsk
->sysvsem
.undo_list
= NULL
;
2074 if (!refcount_dec_and_test(&ulp
->refcnt
))
2078 struct sem_array
*sma
;
2079 struct sem_undo
*un
;
2081 DEFINE_WAKE_Q(wake_q
);
2086 un
= list_entry_rcu(ulp
->list_proc
.next
,
2087 struct sem_undo
, list_proc
);
2088 if (&un
->list_proc
== &ulp
->list_proc
) {
2090 * We must wait for freeary() before freeing this ulp,
2091 * in case we raced with last sem_undo. There is a small
2092 * possibility where we exit while freeary() didn't
2093 * finish unlocking sem_undo_list.
2095 spin_lock(&ulp
->lock
);
2096 spin_unlock(&ulp
->lock
);
2100 spin_lock(&ulp
->lock
);
2102 spin_unlock(&ulp
->lock
);
2104 /* exit_sem raced with IPC_RMID, nothing to do */
2110 sma
= sem_obtain_object_check(tsk
->nsproxy
->ipc_ns
, semid
);
2111 /* exit_sem raced with IPC_RMID, nothing to do */
2117 sem_lock(sma
, NULL
, -1);
2118 /* exit_sem raced with IPC_RMID, nothing to do */
2119 if (!ipc_valid_object(&sma
->sem_perm
)) {
2120 sem_unlock(sma
, -1);
2124 un
= __lookup_undo(ulp
, semid
);
2126 /* exit_sem raced with IPC_RMID+semget() that created
2127 * exactly the same semid. Nothing to do.
2129 sem_unlock(sma
, -1);
2134 /* remove un from the linked lists */
2135 ipc_assert_locked_object(&sma
->sem_perm
);
2136 list_del(&un
->list_id
);
2138 /* we are the last process using this ulp, acquiring ulp->lock
2139 * isn't required. Besides that, we are also protected against
2140 * IPC_RMID as we hold sma->sem_perm lock now
2142 list_del_rcu(&un
->list_proc
);
2144 /* perform adjustments registered in un */
2145 for (i
= 0; i
< sma
->sem_nsems
; i
++) {
2146 struct sem
*semaphore
= &sma
->sems
[i
];
2147 if (un
->semadj
[i
]) {
2148 semaphore
->semval
+= un
->semadj
[i
];
2150 * Range checks of the new semaphore value,
2151 * not defined by sus:
2152 * - Some unices ignore the undo entirely
2153 * (e.g. HP UX 11i 11.22, Tru64 V5.1)
2154 * - some cap the value (e.g. FreeBSD caps
2155 * at 0, but doesn't enforce SEMVMX)
2157 * Linux caps the semaphore value, both at 0
2160 * Manfred <manfred@colorfullife.com>
2162 if (semaphore
->semval
< 0)
2163 semaphore
->semval
= 0;
2164 if (semaphore
->semval
> SEMVMX
)
2165 semaphore
->semval
= SEMVMX
;
2166 semaphore
->sempid
= task_tgid_vnr(current
);
2169 /* maybe some queued-up processes were waiting for this */
2170 do_smart_update(sma
, NULL
, 0, 1, &wake_q
);
2171 sem_unlock(sma
, -1);
2180 #ifdef CONFIG_PROC_FS
2181 static int sysvipc_sem_proc_show(struct seq_file
*s
, void *it
)
2183 struct user_namespace
*user_ns
= seq_user_ns(s
);
2184 struct kern_ipc_perm
*ipcp
= it
;
2185 struct sem_array
*sma
= container_of(ipcp
, struct sem_array
, sem_perm
);
2189 * The proc interface isn't aware of sem_lock(), it calls
2190 * ipc_lock_object() directly (in sysvipc_find_ipc).
2191 * In order to stay compatible with sem_lock(), we must
2192 * enter / leave complex_mode.
2194 complexmode_enter(sma
);
2196 sem_otime
= get_semotime(sma
);
2199 "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
2204 from_kuid_munged(user_ns
, sma
->sem_perm
.uid
),
2205 from_kgid_munged(user_ns
, sma
->sem_perm
.gid
),
2206 from_kuid_munged(user_ns
, sma
->sem_perm
.cuid
),
2207 from_kgid_munged(user_ns
, sma
->sem_perm
.cgid
),
2211 complexmode_tryleave(sma
);