1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 1992 Krishna Balasubramanian
5 * Copyright (C) 1995 Eric Schenk, Bruno Haible
7 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * SMP-threaded, sysctl's added
10 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
11 * Enforced range limit on SEM_UNDO
12 * (c) 2001 Red Hat Inc
14 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
15 * (c) 2016 Davidlohr Bueso <dave@stgolabs.net>
16 * Further wakeup optimizations, documentation
17 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
19 * support for audit of ipc object properties and permission changes
20 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
24 * Pavel Emelianov <xemul@openvz.org>
26 * Implementation notes: (May 2010)
27 * This file implements System V semaphores.
29 * User space visible behavior:
30 * - FIFO ordering for semop() operations (just FIFO, not starvation
32 * - multiple semaphore operations that alter the same semaphore in
33 * one semop() are handled.
34 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
36 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
37 * - undo adjustments at process exit are limited to 0..SEMVMX.
38 * - namespace are supported.
39 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
40 * to /proc/sys/kernel/sem.
41 * - statistics about the usage are reported in /proc/sysvipc/sem.
45 * - all global variables are read-mostly.
46 * - semop() calls and semctl(RMID) are synchronized by RCU.
47 * - most operations do write operations (actually: spin_lock calls) to
48 * the per-semaphore array structure.
49 * Thus: Perfect SMP scaling between independent semaphore arrays.
50 * If multiple semaphores in one array are used, then cache line
51 * trashing on the semaphore array spinlock will limit the scaling.
52 * - semncnt and semzcnt are calculated on demand in count_semcnt()
53 * - the task that performs a successful semop() scans the list of all
54 * sleeping tasks and completes any pending operations that can be fulfilled.
55 * Semaphores are actively given to waiting tasks (necessary for FIFO).
56 * (see update_queue())
57 * - To improve the scalability, the actual wake-up calls are performed after
58 * dropping all locks. (see wake_up_sem_queue_prepare())
59 * - All work is done by the waker, the woken up task does not have to do
60 * anything - not even acquiring a lock or dropping a refcount.
61 * - A woken up task may not even touch the semaphore array anymore, it may
62 * have been destroyed already by a semctl(RMID).
63 * - UNDO values are stored in an array (one per process and per
64 * semaphore array, lazily allocated). For backwards compatibility, multiple
65 * modes for the UNDO variables are supported (per process, per thread)
66 * (see copy_semundo, CLONE_SYSVSEM)
67 * - There are two lists of the pending operations: a per-array list
68 * and per-semaphore list (stored in the array). This allows to achieve FIFO
69 * ordering without always scanning all pending operations.
70 * The worst-case behavior is nevertheless O(N^2) for N wakeups.
73 #include <linux/compat.h>
74 #include <linux/slab.h>
75 #include <linux/spinlock.h>
76 #include <linux/init.h>
77 #include <linux/proc_fs.h>
78 #include <linux/time.h>
79 #include <linux/security.h>
80 #include <linux/syscalls.h>
81 #include <linux/audit.h>
82 #include <linux/capability.h>
83 #include <linux/seq_file.h>
84 #include <linux/rwsem.h>
85 #include <linux/nsproxy.h>
86 #include <linux/ipc_namespace.h>
87 #include <linux/sched/wake_q.h>
88 #include <linux/nospec.h>
89 #include <linux/rhashtable.h>
91 #include <linux/uaccess.h>
94 /* One semaphore structure for each semaphore in the system. */
96 int semval
; /* current value */
98 * PID of the process that last modified the semaphore. For
99 * Linux, specifically these are:
101 * - semctl, via SETVAL and SETALL.
102 * - at task exit when performing undo adjustments (see exit_sem).
105 spinlock_t lock
; /* spinlock for fine-grained semtimedop */
106 struct list_head pending_alter
; /* pending single-sop operations */
107 /* that alter the semaphore */
108 struct list_head pending_const
; /* pending single-sop operations */
109 /* that do not alter the semaphore*/
110 time64_t sem_otime
; /* candidate for sem_otime */
111 } ____cacheline_aligned_in_smp
;
113 /* One sem_array data structure for each set of semaphores in the system. */
115 struct kern_ipc_perm sem_perm
; /* permissions .. see ipc.h */
116 time64_t sem_ctime
; /* create/last semctl() time */
117 struct list_head pending_alter
; /* pending operations */
118 /* that alter the array */
119 struct list_head pending_const
; /* pending complex operations */
120 /* that do not alter semvals */
121 struct list_head list_id
; /* undo requests on this array */
122 int sem_nsems
; /* no. of semaphores in array */
123 int complex_count
; /* pending complex operations */
124 unsigned int use_global_lock
;/* >0: global lock required */
127 } __randomize_layout
;
129 /* One queue for each sleeping process in the system. */
131 struct list_head list
; /* queue of pending operations */
132 struct task_struct
*sleeper
; /* this process */
133 struct sem_undo
*undo
; /* undo structure */
134 struct pid
*pid
; /* process id of requesting process */
135 int status
; /* completion status of operation */
136 struct sembuf
*sops
; /* array of pending operations */
137 struct sembuf
*blocking
; /* the operation that blocked */
138 int nsops
; /* number of operations */
139 bool alter
; /* does *sops alter the array? */
140 bool dupsop
; /* sops on more than one sem_num */
143 /* Each task has a list of undo requests. They are executed automatically
144 * when the process exits.
147 struct list_head list_proc
; /* per-process list: *
148 * all undos from one process
150 struct rcu_head rcu
; /* rcu struct for sem_undo */
151 struct sem_undo_list
*ulp
; /* back ptr to sem_undo_list */
152 struct list_head list_id
; /* per semaphore array list:
153 * all undos for one array */
154 int semid
; /* semaphore set identifier */
155 short *semadj
; /* array of adjustments */
156 /* one per semaphore */
159 /* sem_undo_list controls shared access to the list of sem_undo structures
160 * that may be shared among all a CLONE_SYSVSEM task group.
162 struct sem_undo_list
{
165 struct list_head list_proc
;
169 #define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
171 static int newary(struct ipc_namespace
*, struct ipc_params
*);
172 static void freeary(struct ipc_namespace
*, struct kern_ipc_perm
*);
173 #ifdef CONFIG_PROC_FS
174 static int sysvipc_sem_proc_show(struct seq_file
*s
, void *it
);
177 #define SEMMSL_FAST 256 /* 512 bytes on stack */
178 #define SEMOPM_FAST 64 /* ~ 372 bytes on stack */
181 * Switching from the mode suitable for simple ops
182 * to the mode for complex ops is costly. Therefore:
183 * use some hysteresis
185 #define USE_GLOBAL_LOCK_HYSTERESIS 10
189 * a) global sem_lock() for read/write
191 * sem_array.complex_count,
192 * sem_array.pending{_alter,_const},
195 * b) global or semaphore sem_lock() for read/write:
196 * sem_array.sems[i].pending_{const,alter}:
199 * sem_undo_list.list_proc:
200 * * undo_list->lock for write
203 * * global sem_lock() for write
204 * * either local or global sem_lock() for read.
207 * Most ordering is enforced by using spin_lock() and spin_unlock().
210 * 1) use_global_lock: (SEM_BARRIER_1)
211 * Setting it from non-zero to 0 is a RELEASE, this is ensured by
212 * using smp_store_release(): Immediately after setting it to 0,
213 * a simple op can start.
214 * Testing if it is non-zero is an ACQUIRE, this is ensured by using
215 * smp_load_acquire().
216 * Setting it from 0 to non-zero must be ordered with regards to
217 * this smp_load_acquire(), this is guaranteed because the smp_load_acquire()
218 * is inside a spin_lock() and after a write from 0 to non-zero a
219 * spin_lock()+spin_unlock() is done.
221 * 2) queue.status: (SEM_BARRIER_2)
222 * Initialization is done while holding sem_lock(), so no further barrier is
224 * Setting it to a result code is a RELEASE, this is ensured by both a
225 * smp_store_release() (for case a) and while holding sem_lock()
227 * The AQUIRE when reading the result code without holding sem_lock() is
228 * achieved by using READ_ONCE() + smp_acquire__after_ctrl_dep().
230 * Reading the result code while holding sem_lock() needs no further barriers,
231 * the locks inside sem_lock() enforce ordering (case b above)
234 * current->state is set to TASK_INTERRUPTIBLE while holding sem_lock().
235 * The wakeup is handled using the wake_q infrastructure. wake_q wakeups may
236 * happen immediately after calling wake_q_add. As wake_q_add_safe() is called
237 * when holding sem_lock(), no further barriers are required.
239 * See also ipc/mqueue.c for more details on the covered races.
242 #define sc_semmsl sem_ctls[0]
243 #define sc_semmns sem_ctls[1]
244 #define sc_semopm sem_ctls[2]
245 #define sc_semmni sem_ctls[3]
247 void sem_init_ns(struct ipc_namespace
*ns
)
249 ns
->sc_semmsl
= SEMMSL
;
250 ns
->sc_semmns
= SEMMNS
;
251 ns
->sc_semopm
= SEMOPM
;
252 ns
->sc_semmni
= SEMMNI
;
254 ipc_init_ids(&ns
->ids
[IPC_SEM_IDS
]);
258 void sem_exit_ns(struct ipc_namespace
*ns
)
260 free_ipcs(ns
, &sem_ids(ns
), freeary
);
261 idr_destroy(&ns
->ids
[IPC_SEM_IDS
].ipcs_idr
);
262 rhashtable_destroy(&ns
->ids
[IPC_SEM_IDS
].key_ht
);
266 void __init
sem_init(void)
268 sem_init_ns(&init_ipc_ns
);
269 ipc_init_proc_interface("sysvipc/sem",
270 " key semid perms nsems uid gid cuid cgid otime ctime\n",
271 IPC_SEM_IDS
, sysvipc_sem_proc_show
);
275 * unmerge_queues - unmerge queues, if possible.
276 * @sma: semaphore array
278 * The function unmerges the wait queues if complex_count is 0.
279 * It must be called prior to dropping the global semaphore array lock.
281 static void unmerge_queues(struct sem_array
*sma
)
283 struct sem_queue
*q
, *tq
;
285 /* complex operations still around? */
286 if (sma
->complex_count
)
289 * We will switch back to simple mode.
290 * Move all pending operation back into the per-semaphore
293 list_for_each_entry_safe(q
, tq
, &sma
->pending_alter
, list
) {
295 curr
= &sma
->sems
[q
->sops
[0].sem_num
];
297 list_add_tail(&q
->list
, &curr
->pending_alter
);
299 INIT_LIST_HEAD(&sma
->pending_alter
);
303 * merge_queues - merge single semop queues into global queue
304 * @sma: semaphore array
306 * This function merges all per-semaphore queues into the global queue.
307 * It is necessary to achieve FIFO ordering for the pending single-sop
308 * operations when a multi-semop operation must sleep.
309 * Only the alter operations must be moved, the const operations can stay.
311 static void merge_queues(struct sem_array
*sma
)
314 for (i
= 0; i
< sma
->sem_nsems
; i
++) {
315 struct sem
*sem
= &sma
->sems
[i
];
317 list_splice_init(&sem
->pending_alter
, &sma
->pending_alter
);
321 static void sem_rcu_free(struct rcu_head
*head
)
323 struct kern_ipc_perm
*p
= container_of(head
, struct kern_ipc_perm
, rcu
);
324 struct sem_array
*sma
= container_of(p
, struct sem_array
, sem_perm
);
326 security_sem_free(&sma
->sem_perm
);
331 * Enter the mode suitable for non-simple operations:
332 * Caller must own sem_perm.lock.
334 static void complexmode_enter(struct sem_array
*sma
)
339 if (sma
->use_global_lock
> 0) {
341 * We are already in global lock mode.
342 * Nothing to do, just reset the
343 * counter until we return to simple mode.
345 sma
->use_global_lock
= USE_GLOBAL_LOCK_HYSTERESIS
;
348 sma
->use_global_lock
= USE_GLOBAL_LOCK_HYSTERESIS
;
350 for (i
= 0; i
< sma
->sem_nsems
; i
++) {
352 spin_lock(&sem
->lock
);
353 spin_unlock(&sem
->lock
);
358 * Try to leave the mode that disallows simple operations:
359 * Caller must own sem_perm.lock.
361 static void complexmode_tryleave(struct sem_array
*sma
)
363 if (sma
->complex_count
) {
364 /* Complex ops are sleeping.
365 * We must stay in complex mode
369 if (sma
->use_global_lock
== 1) {
371 /* See SEM_BARRIER_1 for purpose/pairing */
372 smp_store_release(&sma
->use_global_lock
, 0);
374 sma
->use_global_lock
--;
378 #define SEM_GLOBAL_LOCK (-1)
380 * If the request contains only one semaphore operation, and there are
381 * no complex transactions pending, lock only the semaphore involved.
382 * Otherwise, lock the entire semaphore array, since we either have
383 * multiple semaphores in our own semops, or we need to look at
384 * semaphores from other pending complex operations.
386 static inline int sem_lock(struct sem_array
*sma
, struct sembuf
*sops
,
393 /* Complex operation - acquire a full lock */
394 ipc_lock_object(&sma
->sem_perm
);
396 /* Prevent parallel simple ops */
397 complexmode_enter(sma
);
398 return SEM_GLOBAL_LOCK
;
402 * Only one semaphore affected - try to optimize locking.
403 * Optimized locking is possible if no complex operation
404 * is either enqueued or processed right now.
406 * Both facts are tracked by use_global_mode.
408 idx
= array_index_nospec(sops
->sem_num
, sma
->sem_nsems
);
409 sem
= &sma
->sems
[idx
];
412 * Initial check for use_global_lock. Just an optimization,
413 * no locking, no memory barrier.
415 if (!sma
->use_global_lock
) {
417 * It appears that no complex operation is around.
418 * Acquire the per-semaphore lock.
420 spin_lock(&sem
->lock
);
422 /* see SEM_BARRIER_1 for purpose/pairing */
423 if (!smp_load_acquire(&sma
->use_global_lock
)) {
424 /* fast path successful! */
425 return sops
->sem_num
;
427 spin_unlock(&sem
->lock
);
430 /* slow path: acquire the full lock */
431 ipc_lock_object(&sma
->sem_perm
);
433 if (sma
->use_global_lock
== 0) {
435 * The use_global_lock mode ended while we waited for
436 * sma->sem_perm.lock. Thus we must switch to locking
438 * Unlike in the fast path, there is no need to recheck
439 * sma->use_global_lock after we have acquired sem->lock:
440 * We own sma->sem_perm.lock, thus use_global_lock cannot
443 spin_lock(&sem
->lock
);
445 ipc_unlock_object(&sma
->sem_perm
);
446 return sops
->sem_num
;
449 * Not a false alarm, thus continue to use the global lock
450 * mode. No need for complexmode_enter(), this was done by
451 * the caller that has set use_global_mode to non-zero.
453 return SEM_GLOBAL_LOCK
;
457 static inline void sem_unlock(struct sem_array
*sma
, int locknum
)
459 if (locknum
== SEM_GLOBAL_LOCK
) {
461 complexmode_tryleave(sma
);
462 ipc_unlock_object(&sma
->sem_perm
);
464 struct sem
*sem
= &sma
->sems
[locknum
];
465 spin_unlock(&sem
->lock
);
470 * sem_lock_(check_) routines are called in the paths where the rwsem
473 * The caller holds the RCU read lock.
475 static inline struct sem_array
*sem_obtain_object(struct ipc_namespace
*ns
, int id
)
477 struct kern_ipc_perm
*ipcp
= ipc_obtain_object_idr(&sem_ids(ns
), id
);
480 return ERR_CAST(ipcp
);
482 return container_of(ipcp
, struct sem_array
, sem_perm
);
485 static inline struct sem_array
*sem_obtain_object_check(struct ipc_namespace
*ns
,
488 struct kern_ipc_perm
*ipcp
= ipc_obtain_object_check(&sem_ids(ns
), id
);
491 return ERR_CAST(ipcp
);
493 return container_of(ipcp
, struct sem_array
, sem_perm
);
496 static inline void sem_lock_and_putref(struct sem_array
*sma
)
498 sem_lock(sma
, NULL
, -1);
499 ipc_rcu_putref(&sma
->sem_perm
, sem_rcu_free
);
502 static inline void sem_rmid(struct ipc_namespace
*ns
, struct sem_array
*s
)
504 ipc_rmid(&sem_ids(ns
), &s
->sem_perm
);
507 static struct sem_array
*sem_alloc(size_t nsems
)
509 struct sem_array
*sma
;
511 if (nsems
> (INT_MAX
- sizeof(*sma
)) / sizeof(sma
->sems
[0]))
514 sma
= kvzalloc(struct_size(sma
, sems
, nsems
), GFP_KERNEL
);
522 * newary - Create a new semaphore set
524 * @params: ptr to the structure that contains key, semflg and nsems
526 * Called with sem_ids.rwsem held (as a writer)
528 static int newary(struct ipc_namespace
*ns
, struct ipc_params
*params
)
531 struct sem_array
*sma
;
532 key_t key
= params
->key
;
533 int nsems
= params
->u
.nsems
;
534 int semflg
= params
->flg
;
539 if (ns
->used_sems
+ nsems
> ns
->sc_semmns
)
542 sma
= sem_alloc(nsems
);
546 sma
->sem_perm
.mode
= (semflg
& S_IRWXUGO
);
547 sma
->sem_perm
.key
= key
;
549 sma
->sem_perm
.security
= NULL
;
550 retval
= security_sem_alloc(&sma
->sem_perm
);
556 for (i
= 0; i
< nsems
; i
++) {
557 INIT_LIST_HEAD(&sma
->sems
[i
].pending_alter
);
558 INIT_LIST_HEAD(&sma
->sems
[i
].pending_const
);
559 spin_lock_init(&sma
->sems
[i
].lock
);
562 sma
->complex_count
= 0;
563 sma
->use_global_lock
= USE_GLOBAL_LOCK_HYSTERESIS
;
564 INIT_LIST_HEAD(&sma
->pending_alter
);
565 INIT_LIST_HEAD(&sma
->pending_const
);
566 INIT_LIST_HEAD(&sma
->list_id
);
567 sma
->sem_nsems
= nsems
;
568 sma
->sem_ctime
= ktime_get_real_seconds();
570 /* ipc_addid() locks sma upon success. */
571 retval
= ipc_addid(&sem_ids(ns
), &sma
->sem_perm
, ns
->sc_semmni
);
573 ipc_rcu_putref(&sma
->sem_perm
, sem_rcu_free
);
576 ns
->used_sems
+= nsems
;
581 return sma
->sem_perm
.id
;
586 * Called with sem_ids.rwsem and ipcp locked.
588 static int sem_more_checks(struct kern_ipc_perm
*ipcp
, struct ipc_params
*params
)
590 struct sem_array
*sma
;
592 sma
= container_of(ipcp
, struct sem_array
, sem_perm
);
593 if (params
->u
.nsems
> sma
->sem_nsems
)
599 long ksys_semget(key_t key
, int nsems
, int semflg
)
601 struct ipc_namespace
*ns
;
602 static const struct ipc_ops sem_ops
= {
604 .associate
= security_sem_associate
,
605 .more_checks
= sem_more_checks
,
607 struct ipc_params sem_params
;
609 ns
= current
->nsproxy
->ipc_ns
;
611 if (nsems
< 0 || nsems
> ns
->sc_semmsl
)
614 sem_params
.key
= key
;
615 sem_params
.flg
= semflg
;
616 sem_params
.u
.nsems
= nsems
;
618 return ipcget(ns
, &sem_ids(ns
), &sem_ops
, &sem_params
);
621 SYSCALL_DEFINE3(semget
, key_t
, key
, int, nsems
, int, semflg
)
623 return ksys_semget(key
, nsems
, semflg
);
627 * perform_atomic_semop[_slow] - Attempt to perform semaphore
628 * operations on a given array.
629 * @sma: semaphore array
630 * @q: struct sem_queue that describes the operation
632 * Caller blocking are as follows, based the value
633 * indicated by the semaphore operation (sem_op):
635 * (1) >0 never blocks.
636 * (2) 0 (wait-for-zero operation): semval is non-zero.
637 * (3) <0 attempting to decrement semval to a value smaller than zero.
639 * Returns 0 if the operation was possible.
640 * Returns 1 if the operation is impossible, the caller must sleep.
641 * Returns <0 for error codes.
643 static int perform_atomic_semop_slow(struct sem_array
*sma
, struct sem_queue
*q
)
645 int result
, sem_op
, nsops
;
656 for (sop
= sops
; sop
< sops
+ nsops
; sop
++) {
657 int idx
= array_index_nospec(sop
->sem_num
, sma
->sem_nsems
);
658 curr
= &sma
->sems
[idx
];
659 sem_op
= sop
->sem_op
;
660 result
= curr
->semval
;
662 if (!sem_op
&& result
)
671 if (sop
->sem_flg
& SEM_UNDO
) {
672 int undo
= un
->semadj
[sop
->sem_num
] - sem_op
;
673 /* Exceeding the undo range is an error. */
674 if (undo
< (-SEMAEM
- 1) || undo
> SEMAEM
)
676 un
->semadj
[sop
->sem_num
] = undo
;
679 curr
->semval
= result
;
684 while (sop
>= sops
) {
685 ipc_update_pid(&sma
->sems
[sop
->sem_num
].sempid
, pid
);
698 if (sop
->sem_flg
& IPC_NOWAIT
)
705 while (sop
>= sops
) {
706 sem_op
= sop
->sem_op
;
707 sma
->sems
[sop
->sem_num
].semval
-= sem_op
;
708 if (sop
->sem_flg
& SEM_UNDO
)
709 un
->semadj
[sop
->sem_num
] += sem_op
;
716 static int perform_atomic_semop(struct sem_array
*sma
, struct sem_queue
*q
)
718 int result
, sem_op
, nsops
;
728 if (unlikely(q
->dupsop
))
729 return perform_atomic_semop_slow(sma
, q
);
732 * We scan the semaphore set twice, first to ensure that the entire
733 * operation can succeed, therefore avoiding any pointless writes
734 * to shared memory and having to undo such changes in order to block
735 * until the operations can go through.
737 for (sop
= sops
; sop
< sops
+ nsops
; sop
++) {
738 int idx
= array_index_nospec(sop
->sem_num
, sma
->sem_nsems
);
740 curr
= &sma
->sems
[idx
];
741 sem_op
= sop
->sem_op
;
742 result
= curr
->semval
;
744 if (!sem_op
&& result
)
745 goto would_block
; /* wait-for-zero */
754 if (sop
->sem_flg
& SEM_UNDO
) {
755 int undo
= un
->semadj
[sop
->sem_num
] - sem_op
;
757 /* Exceeding the undo range is an error. */
758 if (undo
< (-SEMAEM
- 1) || undo
> SEMAEM
)
763 for (sop
= sops
; sop
< sops
+ nsops
; sop
++) {
764 curr
= &sma
->sems
[sop
->sem_num
];
765 sem_op
= sop
->sem_op
;
766 result
= curr
->semval
;
768 if (sop
->sem_flg
& SEM_UNDO
) {
769 int undo
= un
->semadj
[sop
->sem_num
] - sem_op
;
771 un
->semadj
[sop
->sem_num
] = undo
;
773 curr
->semval
+= sem_op
;
774 ipc_update_pid(&curr
->sempid
, q
->pid
);
781 return sop
->sem_flg
& IPC_NOWAIT
? -EAGAIN
: 1;
784 static inline void wake_up_sem_queue_prepare(struct sem_queue
*q
, int error
,
785 struct wake_q_head
*wake_q
)
787 get_task_struct(q
->sleeper
);
789 /* see SEM_BARRIER_2 for purpuse/pairing */
790 smp_store_release(&q
->status
, error
);
792 wake_q_add_safe(wake_q
, q
->sleeper
);
795 static void unlink_queue(struct sem_array
*sma
, struct sem_queue
*q
)
799 sma
->complex_count
--;
802 /** check_restart(sma, q)
803 * @sma: semaphore array
804 * @q: the operation that just completed
806 * update_queue is O(N^2) when it restarts scanning the whole queue of
807 * waiting operations. Therefore this function checks if the restart is
808 * really necessary. It is called after a previously waiting operation
809 * modified the array.
810 * Note that wait-for-zero operations are handled without restart.
812 static inline int check_restart(struct sem_array
*sma
, struct sem_queue
*q
)
814 /* pending complex alter operations are too difficult to analyse */
815 if (!list_empty(&sma
->pending_alter
))
818 /* we were a sleeping complex operation. Too difficult */
822 /* It is impossible that someone waits for the new value:
823 * - complex operations always restart.
824 * - wait-for-zero are handled seperately.
825 * - q is a previously sleeping simple operation that
826 * altered the array. It must be a decrement, because
827 * simple increments never sleep.
828 * - If there are older (higher priority) decrements
829 * in the queue, then they have observed the original
830 * semval value and couldn't proceed. The operation
831 * decremented to value - thus they won't proceed either.
837 * wake_const_ops - wake up non-alter tasks
838 * @sma: semaphore array.
839 * @semnum: semaphore that was modified.
840 * @wake_q: lockless wake-queue head.
842 * wake_const_ops must be called after a semaphore in a semaphore array
843 * was set to 0. If complex const operations are pending, wake_const_ops must
844 * be called with semnum = -1, as well as with the number of each modified
846 * The tasks that must be woken up are added to @wake_q. The return code
847 * is stored in q->pid.
848 * The function returns 1 if at least one operation was completed successfully.
850 static int wake_const_ops(struct sem_array
*sma
, int semnum
,
851 struct wake_q_head
*wake_q
)
853 struct sem_queue
*q
, *tmp
;
854 struct list_head
*pending_list
;
855 int semop_completed
= 0;
858 pending_list
= &sma
->pending_const
;
860 pending_list
= &sma
->sems
[semnum
].pending_const
;
862 list_for_each_entry_safe(q
, tmp
, pending_list
, list
) {
863 int error
= perform_atomic_semop(sma
, q
);
867 /* operation completed, remove from queue & wakeup */
868 unlink_queue(sma
, q
);
870 wake_up_sem_queue_prepare(q
, error
, wake_q
);
875 return semop_completed
;
879 * do_smart_wakeup_zero - wakeup all wait for zero tasks
880 * @sma: semaphore array
881 * @sops: operations that were performed
882 * @nsops: number of operations
883 * @wake_q: lockless wake-queue head
885 * Checks all required queue for wait-for-zero operations, based
886 * on the actual changes that were performed on the semaphore array.
887 * The function returns 1 if at least one operation was completed successfully.
889 static int do_smart_wakeup_zero(struct sem_array
*sma
, struct sembuf
*sops
,
890 int nsops
, struct wake_q_head
*wake_q
)
893 int semop_completed
= 0;
896 /* first: the per-semaphore queues, if known */
898 for (i
= 0; i
< nsops
; i
++) {
899 int num
= sops
[i
].sem_num
;
901 if (sma
->sems
[num
].semval
== 0) {
903 semop_completed
|= wake_const_ops(sma
, num
, wake_q
);
908 * No sops means modified semaphores not known.
909 * Assume all were changed.
911 for (i
= 0; i
< sma
->sem_nsems
; i
++) {
912 if (sma
->sems
[i
].semval
== 0) {
914 semop_completed
|= wake_const_ops(sma
, i
, wake_q
);
919 * If one of the modified semaphores got 0,
920 * then check the global queue, too.
923 semop_completed
|= wake_const_ops(sma
, -1, wake_q
);
925 return semop_completed
;
930 * update_queue - look for tasks that can be completed.
931 * @sma: semaphore array.
932 * @semnum: semaphore that was modified.
933 * @wake_q: lockless wake-queue head.
935 * update_queue must be called after a semaphore in a semaphore array
936 * was modified. If multiple semaphores were modified, update_queue must
937 * be called with semnum = -1, as well as with the number of each modified
939 * The tasks that must be woken up are added to @wake_q. The return code
940 * is stored in q->pid.
941 * The function internally checks if const operations can now succeed.
943 * The function return 1 if at least one semop was completed successfully.
945 static int update_queue(struct sem_array
*sma
, int semnum
, struct wake_q_head
*wake_q
)
947 struct sem_queue
*q
, *tmp
;
948 struct list_head
*pending_list
;
949 int semop_completed
= 0;
952 pending_list
= &sma
->pending_alter
;
954 pending_list
= &sma
->sems
[semnum
].pending_alter
;
957 list_for_each_entry_safe(q
, tmp
, pending_list
, list
) {
960 /* If we are scanning the single sop, per-semaphore list of
961 * one semaphore and that semaphore is 0, then it is not
962 * necessary to scan further: simple increments
963 * that affect only one entry succeed immediately and cannot
964 * be in the per semaphore pending queue, and decrements
965 * cannot be successful if the value is already 0.
967 if (semnum
!= -1 && sma
->sems
[semnum
].semval
== 0)
970 error
= perform_atomic_semop(sma
, q
);
972 /* Does q->sleeper still need to sleep? */
976 unlink_queue(sma
, q
);
982 do_smart_wakeup_zero(sma
, q
->sops
, q
->nsops
, wake_q
);
983 restart
= check_restart(sma
, q
);
986 wake_up_sem_queue_prepare(q
, error
, wake_q
);
990 return semop_completed
;
994 * set_semotime - set sem_otime
995 * @sma: semaphore array
996 * @sops: operations that modified the array, may be NULL
998 * sem_otime is replicated to avoid cache line trashing.
999 * This function sets one instance to the current time.
1001 static void set_semotime(struct sem_array
*sma
, struct sembuf
*sops
)
1004 sma
->sems
[0].sem_otime
= ktime_get_real_seconds();
1006 sma
->sems
[sops
[0].sem_num
].sem_otime
=
1007 ktime_get_real_seconds();
1012 * do_smart_update - optimized update_queue
1013 * @sma: semaphore array
1014 * @sops: operations that were performed
1015 * @nsops: number of operations
1016 * @otime: force setting otime
1017 * @wake_q: lockless wake-queue head
1019 * do_smart_update() does the required calls to update_queue and wakeup_zero,
1020 * based on the actual changes that were performed on the semaphore array.
1021 * Note that the function does not do the actual wake-up: the caller is
1022 * responsible for calling wake_up_q().
1023 * It is safe to perform this call after dropping all locks.
1025 static void do_smart_update(struct sem_array
*sma
, struct sembuf
*sops
, int nsops
,
1026 int otime
, struct wake_q_head
*wake_q
)
1030 otime
|= do_smart_wakeup_zero(sma
, sops
, nsops
, wake_q
);
1032 if (!list_empty(&sma
->pending_alter
)) {
1033 /* semaphore array uses the global queue - just process it. */
1034 otime
|= update_queue(sma
, -1, wake_q
);
1038 * No sops, thus the modified semaphores are not
1041 for (i
= 0; i
< sma
->sem_nsems
; i
++)
1042 otime
|= update_queue(sma
, i
, wake_q
);
1045 * Check the semaphores that were increased:
1046 * - No complex ops, thus all sleeping ops are
1048 * - if we decreased the value, then any sleeping
1049 * semaphore ops wont be able to run: If the
1050 * previous value was too small, then the new
1051 * value will be too small, too.
1053 for (i
= 0; i
< nsops
; i
++) {
1054 if (sops
[i
].sem_op
> 0) {
1055 otime
|= update_queue(sma
,
1056 sops
[i
].sem_num
, wake_q
);
1062 set_semotime(sma
, sops
);
1066 * check_qop: Test if a queued operation sleeps on the semaphore semnum
1068 static int check_qop(struct sem_array
*sma
, int semnum
, struct sem_queue
*q
,
1071 struct sembuf
*sop
= q
->blocking
;
1074 * Linux always (since 0.99.10) reported a task as sleeping on all
1075 * semaphores. This violates SUS, therefore it was changed to the
1076 * standard compliant behavior.
1077 * Give the administrators a chance to notice that an application
1078 * might misbehave because it relies on the Linux behavior.
1080 pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
1081 "The task %s (%d) triggered the difference, watch for misbehavior.\n",
1082 current
->comm
, task_pid_nr(current
));
1084 if (sop
->sem_num
!= semnum
)
1087 if (count_zero
&& sop
->sem_op
== 0)
1089 if (!count_zero
&& sop
->sem_op
< 0)
1095 /* The following counts are associated to each semaphore:
1096 * semncnt number of tasks waiting on semval being nonzero
1097 * semzcnt number of tasks waiting on semval being zero
1099 * Per definition, a task waits only on the semaphore of the first semop
1100 * that cannot proceed, even if additional operation would block, too.
1102 static int count_semcnt(struct sem_array
*sma
, ushort semnum
,
1105 struct list_head
*l
;
1106 struct sem_queue
*q
;
1110 /* First: check the simple operations. They are easy to evaluate */
1112 l
= &sma
->sems
[semnum
].pending_const
;
1114 l
= &sma
->sems
[semnum
].pending_alter
;
1116 list_for_each_entry(q
, l
, list
) {
1117 /* all task on a per-semaphore list sleep on exactly
1123 /* Then: check the complex operations. */
1124 list_for_each_entry(q
, &sma
->pending_alter
, list
) {
1125 semcnt
+= check_qop(sma
, semnum
, q
, count_zero
);
1128 list_for_each_entry(q
, &sma
->pending_const
, list
) {
1129 semcnt
+= check_qop(sma
, semnum
, q
, count_zero
);
1135 /* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
1136 * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
1137 * remains locked on exit.
1139 static void freeary(struct ipc_namespace
*ns
, struct kern_ipc_perm
*ipcp
)
1141 struct sem_undo
*un
, *tu
;
1142 struct sem_queue
*q
, *tq
;
1143 struct sem_array
*sma
= container_of(ipcp
, struct sem_array
, sem_perm
);
1145 DEFINE_WAKE_Q(wake_q
);
1147 /* Free the existing undo structures for this semaphore set. */
1148 ipc_assert_locked_object(&sma
->sem_perm
);
1149 list_for_each_entry_safe(un
, tu
, &sma
->list_id
, list_id
) {
1150 list_del(&un
->list_id
);
1151 spin_lock(&un
->ulp
->lock
);
1153 list_del_rcu(&un
->list_proc
);
1154 spin_unlock(&un
->ulp
->lock
);
1158 /* Wake up all pending processes and let them fail with EIDRM. */
1159 list_for_each_entry_safe(q
, tq
, &sma
->pending_const
, list
) {
1160 unlink_queue(sma
, q
);
1161 wake_up_sem_queue_prepare(q
, -EIDRM
, &wake_q
);
1164 list_for_each_entry_safe(q
, tq
, &sma
->pending_alter
, list
) {
1165 unlink_queue(sma
, q
);
1166 wake_up_sem_queue_prepare(q
, -EIDRM
, &wake_q
);
1168 for (i
= 0; i
< sma
->sem_nsems
; i
++) {
1169 struct sem
*sem
= &sma
->sems
[i
];
1170 list_for_each_entry_safe(q
, tq
, &sem
->pending_const
, list
) {
1171 unlink_queue(sma
, q
);
1172 wake_up_sem_queue_prepare(q
, -EIDRM
, &wake_q
);
1174 list_for_each_entry_safe(q
, tq
, &sem
->pending_alter
, list
) {
1175 unlink_queue(sma
, q
);
1176 wake_up_sem_queue_prepare(q
, -EIDRM
, &wake_q
);
1178 ipc_update_pid(&sem
->sempid
, NULL
);
1181 /* Remove the semaphore set from the IDR */
1183 sem_unlock(sma
, -1);
1187 ns
->used_sems
-= sma
->sem_nsems
;
1188 ipc_rcu_putref(&sma
->sem_perm
, sem_rcu_free
);
1191 static unsigned long copy_semid_to_user(void __user
*buf
, struct semid64_ds
*in
, int version
)
1195 return copy_to_user(buf
, in
, sizeof(*in
));
1198 struct semid_ds out
;
1200 memset(&out
, 0, sizeof(out
));
1202 ipc64_perm_to_ipc_perm(&in
->sem_perm
, &out
.sem_perm
);
1204 out
.sem_otime
= in
->sem_otime
;
1205 out
.sem_ctime
= in
->sem_ctime
;
1206 out
.sem_nsems
= in
->sem_nsems
;
1208 return copy_to_user(buf
, &out
, sizeof(out
));
1215 static time64_t
get_semotime(struct sem_array
*sma
)
1220 res
= sma
->sems
[0].sem_otime
;
1221 for (i
= 1; i
< sma
->sem_nsems
; i
++) {
1222 time64_t to
= sma
->sems
[i
].sem_otime
;
1230 static int semctl_stat(struct ipc_namespace
*ns
, int semid
,
1231 int cmd
, struct semid64_ds
*semid64
)
1233 struct sem_array
*sma
;
1237 memset(semid64
, 0, sizeof(*semid64
));
1240 if (cmd
== SEM_STAT
|| cmd
== SEM_STAT_ANY
) {
1241 sma
= sem_obtain_object(ns
, semid
);
1246 } else { /* IPC_STAT */
1247 sma
= sem_obtain_object_check(ns
, semid
);
1254 /* see comment for SHM_STAT_ANY */
1255 if (cmd
== SEM_STAT_ANY
)
1256 audit_ipc_obj(&sma
->sem_perm
);
1259 if (ipcperms(ns
, &sma
->sem_perm
, S_IRUGO
))
1263 err
= security_sem_semctl(&sma
->sem_perm
, cmd
);
1267 ipc_lock_object(&sma
->sem_perm
);
1269 if (!ipc_valid_object(&sma
->sem_perm
)) {
1270 ipc_unlock_object(&sma
->sem_perm
);
1275 kernel_to_ipc64_perm(&sma
->sem_perm
, &semid64
->sem_perm
);
1276 semotime
= get_semotime(sma
);
1277 semid64
->sem_otime
= semotime
;
1278 semid64
->sem_ctime
= sma
->sem_ctime
;
1279 #ifndef CONFIG_64BIT
1280 semid64
->sem_otime_high
= semotime
>> 32;
1281 semid64
->sem_ctime_high
= sma
->sem_ctime
>> 32;
1283 semid64
->sem_nsems
= sma
->sem_nsems
;
1285 if (cmd
== IPC_STAT
) {
1287 * As defined in SUS:
1288 * Return 0 on success
1293 * SEM_STAT and SEM_STAT_ANY (both Linux specific)
1294 * Return the full id, including the sequence number
1296 err
= sma
->sem_perm
.id
;
1298 ipc_unlock_object(&sma
->sem_perm
);
1304 static int semctl_info(struct ipc_namespace
*ns
, int semid
,
1305 int cmd
, void __user
*p
)
1307 struct seminfo seminfo
;
1311 err
= security_sem_semctl(NULL
, cmd
);
1315 memset(&seminfo
, 0, sizeof(seminfo
));
1316 seminfo
.semmni
= ns
->sc_semmni
;
1317 seminfo
.semmns
= ns
->sc_semmns
;
1318 seminfo
.semmsl
= ns
->sc_semmsl
;
1319 seminfo
.semopm
= ns
->sc_semopm
;
1320 seminfo
.semvmx
= SEMVMX
;
1321 seminfo
.semmnu
= SEMMNU
;
1322 seminfo
.semmap
= SEMMAP
;
1323 seminfo
.semume
= SEMUME
;
1324 down_read(&sem_ids(ns
).rwsem
);
1325 if (cmd
== SEM_INFO
) {
1326 seminfo
.semusz
= sem_ids(ns
).in_use
;
1327 seminfo
.semaem
= ns
->used_sems
;
1329 seminfo
.semusz
= SEMUSZ
;
1330 seminfo
.semaem
= SEMAEM
;
1332 max_idx
= ipc_get_maxidx(&sem_ids(ns
));
1333 up_read(&sem_ids(ns
).rwsem
);
1334 if (copy_to_user(p
, &seminfo
, sizeof(struct seminfo
)))
1336 return (max_idx
< 0) ? 0 : max_idx
;
1339 static int semctl_setval(struct ipc_namespace
*ns
, int semid
, int semnum
,
1342 struct sem_undo
*un
;
1343 struct sem_array
*sma
;
1346 DEFINE_WAKE_Q(wake_q
);
1348 if (val
> SEMVMX
|| val
< 0)
1352 sma
= sem_obtain_object_check(ns
, semid
);
1355 return PTR_ERR(sma
);
1358 if (semnum
< 0 || semnum
>= sma
->sem_nsems
) {
1364 if (ipcperms(ns
, &sma
->sem_perm
, S_IWUGO
)) {
1369 err
= security_sem_semctl(&sma
->sem_perm
, SETVAL
);
1375 sem_lock(sma
, NULL
, -1);
1377 if (!ipc_valid_object(&sma
->sem_perm
)) {
1378 sem_unlock(sma
, -1);
1383 semnum
= array_index_nospec(semnum
, sma
->sem_nsems
);
1384 curr
= &sma
->sems
[semnum
];
1386 ipc_assert_locked_object(&sma
->sem_perm
);
1387 list_for_each_entry(un
, &sma
->list_id
, list_id
)
1388 un
->semadj
[semnum
] = 0;
1391 ipc_update_pid(&curr
->sempid
, task_tgid(current
));
1392 sma
->sem_ctime
= ktime_get_real_seconds();
1393 /* maybe some queued-up processes were waiting for this */
1394 do_smart_update(sma
, NULL
, 0, 0, &wake_q
);
1395 sem_unlock(sma
, -1);
1401 static int semctl_main(struct ipc_namespace
*ns
, int semid
, int semnum
,
1402 int cmd
, void __user
*p
)
1404 struct sem_array
*sma
;
1407 ushort fast_sem_io
[SEMMSL_FAST
];
1408 ushort
*sem_io
= fast_sem_io
;
1409 DEFINE_WAKE_Q(wake_q
);
1412 sma
= sem_obtain_object_check(ns
, semid
);
1415 return PTR_ERR(sma
);
1418 nsems
= sma
->sem_nsems
;
1421 if (ipcperms(ns
, &sma
->sem_perm
, cmd
== SETALL
? S_IWUGO
: S_IRUGO
))
1422 goto out_rcu_wakeup
;
1424 err
= security_sem_semctl(&sma
->sem_perm
, cmd
);
1426 goto out_rcu_wakeup
;
1432 ushort __user
*array
= p
;
1435 sem_lock(sma
, NULL
, -1);
1436 if (!ipc_valid_object(&sma
->sem_perm
)) {
1440 if (nsems
> SEMMSL_FAST
) {
1441 if (!ipc_rcu_getref(&sma
->sem_perm
)) {
1445 sem_unlock(sma
, -1);
1447 sem_io
= kvmalloc_array(nsems
, sizeof(ushort
),
1449 if (sem_io
== NULL
) {
1450 ipc_rcu_putref(&sma
->sem_perm
, sem_rcu_free
);
1455 sem_lock_and_putref(sma
);
1456 if (!ipc_valid_object(&sma
->sem_perm
)) {
1461 for (i
= 0; i
< sma
->sem_nsems
; i
++)
1462 sem_io
[i
] = sma
->sems
[i
].semval
;
1463 sem_unlock(sma
, -1);
1466 if (copy_to_user(array
, sem_io
, nsems
*sizeof(ushort
)))
1473 struct sem_undo
*un
;
1475 if (!ipc_rcu_getref(&sma
->sem_perm
)) {
1477 goto out_rcu_wakeup
;
1481 if (nsems
> SEMMSL_FAST
) {
1482 sem_io
= kvmalloc_array(nsems
, sizeof(ushort
),
1484 if (sem_io
== NULL
) {
1485 ipc_rcu_putref(&sma
->sem_perm
, sem_rcu_free
);
1490 if (copy_from_user(sem_io
, p
, nsems
*sizeof(ushort
))) {
1491 ipc_rcu_putref(&sma
->sem_perm
, sem_rcu_free
);
1496 for (i
= 0; i
< nsems
; i
++) {
1497 if (sem_io
[i
] > SEMVMX
) {
1498 ipc_rcu_putref(&sma
->sem_perm
, sem_rcu_free
);
1504 sem_lock_and_putref(sma
);
1505 if (!ipc_valid_object(&sma
->sem_perm
)) {
1510 for (i
= 0; i
< nsems
; i
++) {
1511 sma
->sems
[i
].semval
= sem_io
[i
];
1512 ipc_update_pid(&sma
->sems
[i
].sempid
, task_tgid(current
));
1515 ipc_assert_locked_object(&sma
->sem_perm
);
1516 list_for_each_entry(un
, &sma
->list_id
, list_id
) {
1517 for (i
= 0; i
< nsems
; i
++)
1520 sma
->sem_ctime
= ktime_get_real_seconds();
1521 /* maybe some queued-up processes were waiting for this */
1522 do_smart_update(sma
, NULL
, 0, 0, &wake_q
);
1526 /* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */
1529 if (semnum
< 0 || semnum
>= nsems
)
1530 goto out_rcu_wakeup
;
1532 sem_lock(sma
, NULL
, -1);
1533 if (!ipc_valid_object(&sma
->sem_perm
)) {
1538 semnum
= array_index_nospec(semnum
, nsems
);
1539 curr
= &sma
->sems
[semnum
];
1546 err
= pid_vnr(curr
->sempid
);
1549 err
= count_semcnt(sma
, semnum
, 0);
1552 err
= count_semcnt(sma
, semnum
, 1);
1557 sem_unlock(sma
, -1);
1562 if (sem_io
!= fast_sem_io
)
1567 static inline unsigned long
1568 copy_semid_from_user(struct semid64_ds
*out
, void __user
*buf
, int version
)
1572 if (copy_from_user(out
, buf
, sizeof(*out
)))
1577 struct semid_ds tbuf_old
;
1579 if (copy_from_user(&tbuf_old
, buf
, sizeof(tbuf_old
)))
1582 out
->sem_perm
.uid
= tbuf_old
.sem_perm
.uid
;
1583 out
->sem_perm
.gid
= tbuf_old
.sem_perm
.gid
;
1584 out
->sem_perm
.mode
= tbuf_old
.sem_perm
.mode
;
1594 * This function handles some semctl commands which require the rwsem
1595 * to be held in write mode.
1596 * NOTE: no locks must be held, the rwsem is taken inside this function.
1598 static int semctl_down(struct ipc_namespace
*ns
, int semid
,
1599 int cmd
, struct semid64_ds
*semid64
)
1601 struct sem_array
*sma
;
1603 struct kern_ipc_perm
*ipcp
;
1605 down_write(&sem_ids(ns
).rwsem
);
1608 ipcp
= ipcctl_obtain_check(ns
, &sem_ids(ns
), semid
, cmd
,
1609 &semid64
->sem_perm
, 0);
1611 err
= PTR_ERR(ipcp
);
1615 sma
= container_of(ipcp
, struct sem_array
, sem_perm
);
1617 err
= security_sem_semctl(&sma
->sem_perm
, cmd
);
1623 sem_lock(sma
, NULL
, -1);
1624 /* freeary unlocks the ipc object and rcu */
1628 sem_lock(sma
, NULL
, -1);
1629 err
= ipc_update_perm(&semid64
->sem_perm
, ipcp
);
1632 sma
->sem_ctime
= ktime_get_real_seconds();
1640 sem_unlock(sma
, -1);
1644 up_write(&sem_ids(ns
).rwsem
);
1648 static long ksys_semctl(int semid
, int semnum
, int cmd
, unsigned long arg
, int version
)
1650 struct ipc_namespace
*ns
;
1651 void __user
*p
= (void __user
*)arg
;
1652 struct semid64_ds semid64
;
1658 ns
= current
->nsproxy
->ipc_ns
;
1663 return semctl_info(ns
, semid
, cmd
, p
);
1667 err
= semctl_stat(ns
, semid
, cmd
, &semid64
);
1670 if (copy_semid_to_user(p
, &semid64
, version
))
1679 return semctl_main(ns
, semid
, semnum
, cmd
, p
);
1682 #if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1683 /* big-endian 64bit */
1686 /* 32bit or little-endian 64bit */
1689 return semctl_setval(ns
, semid
, semnum
, val
);
1692 if (copy_semid_from_user(&semid64
, p
, version
))
1696 return semctl_down(ns
, semid
, cmd
, &semid64
);
1702 SYSCALL_DEFINE4(semctl
, int, semid
, int, semnum
, int, cmd
, unsigned long, arg
)
1704 return ksys_semctl(semid
, semnum
, cmd
, arg
, IPC_64
);
1707 #ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
1708 long ksys_old_semctl(int semid
, int semnum
, int cmd
, unsigned long arg
)
1710 int version
= ipc_parse_version(&cmd
);
1712 return ksys_semctl(semid
, semnum
, cmd
, arg
, version
);
1715 SYSCALL_DEFINE4(old_semctl
, int, semid
, int, semnum
, int, cmd
, unsigned long, arg
)
1717 return ksys_old_semctl(semid
, semnum
, cmd
, arg
);
1721 #ifdef CONFIG_COMPAT
1723 struct compat_semid_ds
{
1724 struct compat_ipc_perm sem_perm
;
1725 old_time32_t sem_otime
;
1726 old_time32_t sem_ctime
;
1727 compat_uptr_t sem_base
;
1728 compat_uptr_t sem_pending
;
1729 compat_uptr_t sem_pending_last
;
1731 unsigned short sem_nsems
;
1734 static int copy_compat_semid_from_user(struct semid64_ds
*out
, void __user
*buf
,
1737 memset(out
, 0, sizeof(*out
));
1738 if (version
== IPC_64
) {
1739 struct compat_semid64_ds __user
*p
= buf
;
1740 return get_compat_ipc64_perm(&out
->sem_perm
, &p
->sem_perm
);
1742 struct compat_semid_ds __user
*p
= buf
;
1743 return get_compat_ipc_perm(&out
->sem_perm
, &p
->sem_perm
);
1747 static int copy_compat_semid_to_user(void __user
*buf
, struct semid64_ds
*in
,
1750 if (version
== IPC_64
) {
1751 struct compat_semid64_ds v
;
1752 memset(&v
, 0, sizeof(v
));
1753 to_compat_ipc64_perm(&v
.sem_perm
, &in
->sem_perm
);
1754 v
.sem_otime
= lower_32_bits(in
->sem_otime
);
1755 v
.sem_otime_high
= upper_32_bits(in
->sem_otime
);
1756 v
.sem_ctime
= lower_32_bits(in
->sem_ctime
);
1757 v
.sem_ctime_high
= upper_32_bits(in
->sem_ctime
);
1758 v
.sem_nsems
= in
->sem_nsems
;
1759 return copy_to_user(buf
, &v
, sizeof(v
));
1761 struct compat_semid_ds v
;
1762 memset(&v
, 0, sizeof(v
));
1763 to_compat_ipc_perm(&v
.sem_perm
, &in
->sem_perm
);
1764 v
.sem_otime
= in
->sem_otime
;
1765 v
.sem_ctime
= in
->sem_ctime
;
1766 v
.sem_nsems
= in
->sem_nsems
;
1767 return copy_to_user(buf
, &v
, sizeof(v
));
1771 static long compat_ksys_semctl(int semid
, int semnum
, int cmd
, int arg
, int version
)
1773 void __user
*p
= compat_ptr(arg
);
1774 struct ipc_namespace
*ns
;
1775 struct semid64_ds semid64
;
1778 ns
= current
->nsproxy
->ipc_ns
;
1783 switch (cmd
& (~IPC_64
)) {
1786 return semctl_info(ns
, semid
, cmd
, p
);
1790 err
= semctl_stat(ns
, semid
, cmd
, &semid64
);
1793 if (copy_compat_semid_to_user(p
, &semid64
, version
))
1802 return semctl_main(ns
, semid
, semnum
, cmd
, p
);
1804 return semctl_setval(ns
, semid
, semnum
, arg
);
1806 if (copy_compat_semid_from_user(&semid64
, p
, version
))
1810 return semctl_down(ns
, semid
, cmd
, &semid64
);
1816 COMPAT_SYSCALL_DEFINE4(semctl
, int, semid
, int, semnum
, int, cmd
, int, arg
)
1818 return compat_ksys_semctl(semid
, semnum
, cmd
, arg
, IPC_64
);
1821 #ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION
1822 long compat_ksys_old_semctl(int semid
, int semnum
, int cmd
, int arg
)
1824 int version
= compat_ipc_parse_version(&cmd
);
1826 return compat_ksys_semctl(semid
, semnum
, cmd
, arg
, version
);
1829 COMPAT_SYSCALL_DEFINE4(old_semctl
, int, semid
, int, semnum
, int, cmd
, int, arg
)
1831 return compat_ksys_old_semctl(semid
, semnum
, cmd
, arg
);
1836 /* If the task doesn't already have a undo_list, then allocate one
1837 * here. We guarantee there is only one thread using this undo list,
1838 * and current is THE ONE
1840 * If this allocation and assignment succeeds, but later
1841 * portions of this code fail, there is no need to free the sem_undo_list.
1842 * Just let it stay associated with the task, and it'll be freed later
1845 * This can block, so callers must hold no locks.
1847 static inline int get_undo_list(struct sem_undo_list
**undo_listp
)
1849 struct sem_undo_list
*undo_list
;
1851 undo_list
= current
->sysvsem
.undo_list
;
1853 undo_list
= kzalloc(sizeof(*undo_list
), GFP_KERNEL
);
1854 if (undo_list
== NULL
)
1856 spin_lock_init(&undo_list
->lock
);
1857 refcount_set(&undo_list
->refcnt
, 1);
1858 INIT_LIST_HEAD(&undo_list
->list_proc
);
1860 current
->sysvsem
.undo_list
= undo_list
;
1862 *undo_listp
= undo_list
;
1866 static struct sem_undo
*__lookup_undo(struct sem_undo_list
*ulp
, int semid
)
1868 struct sem_undo
*un
;
1870 list_for_each_entry_rcu(un
, &ulp
->list_proc
, list_proc
,
1871 spin_is_locked(&ulp
->lock
)) {
1872 if (un
->semid
== semid
)
1878 static struct sem_undo
*lookup_undo(struct sem_undo_list
*ulp
, int semid
)
1880 struct sem_undo
*un
;
1882 assert_spin_locked(&ulp
->lock
);
1884 un
= __lookup_undo(ulp
, semid
);
1886 list_del_rcu(&un
->list_proc
);
1887 list_add_rcu(&un
->list_proc
, &ulp
->list_proc
);
1893 * find_alloc_undo - lookup (and if not present create) undo array
1895 * @semid: semaphore array id
1897 * The function looks up (and if not present creates) the undo structure.
1898 * The size of the undo structure depends on the size of the semaphore
1899 * array, thus the alloc path is not that straightforward.
1900 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1901 * performs a rcu_read_lock().
1903 static struct sem_undo
*find_alloc_undo(struct ipc_namespace
*ns
, int semid
)
1905 struct sem_array
*sma
;
1906 struct sem_undo_list
*ulp
;
1907 struct sem_undo
*un
, *new;
1910 error
= get_undo_list(&ulp
);
1912 return ERR_PTR(error
);
1915 spin_lock(&ulp
->lock
);
1916 un
= lookup_undo(ulp
, semid
);
1917 spin_unlock(&ulp
->lock
);
1918 if (likely(un
!= NULL
))
1921 /* no undo structure around - allocate one. */
1922 /* step 1: figure out the size of the semaphore array */
1923 sma
= sem_obtain_object_check(ns
, semid
);
1926 return ERR_CAST(sma
);
1929 nsems
= sma
->sem_nsems
;
1930 if (!ipc_rcu_getref(&sma
->sem_perm
)) {
1932 un
= ERR_PTR(-EIDRM
);
1937 /* step 2: allocate new undo structure */
1938 new = kzalloc(sizeof(struct sem_undo
) + sizeof(short)*nsems
, GFP_KERNEL
);
1940 ipc_rcu_putref(&sma
->sem_perm
, sem_rcu_free
);
1941 return ERR_PTR(-ENOMEM
);
1944 /* step 3: Acquire the lock on semaphore array */
1946 sem_lock_and_putref(sma
);
1947 if (!ipc_valid_object(&sma
->sem_perm
)) {
1948 sem_unlock(sma
, -1);
1951 un
= ERR_PTR(-EIDRM
);
1954 spin_lock(&ulp
->lock
);
1957 * step 4: check for races: did someone else allocate the undo struct?
1959 un
= lookup_undo(ulp
, semid
);
1964 /* step 5: initialize & link new undo structure */
1965 new->semadj
= (short *) &new[1];
1968 assert_spin_locked(&ulp
->lock
);
1969 list_add_rcu(&new->list_proc
, &ulp
->list_proc
);
1970 ipc_assert_locked_object(&sma
->sem_perm
);
1971 list_add(&new->list_id
, &sma
->list_id
);
1975 spin_unlock(&ulp
->lock
);
1976 sem_unlock(sma
, -1);
1981 static long do_semtimedop(int semid
, struct sembuf __user
*tsops
,
1982 unsigned nsops
, const struct timespec64
*timeout
)
1984 int error
= -EINVAL
;
1985 struct sem_array
*sma
;
1986 struct sembuf fast_sops
[SEMOPM_FAST
];
1987 struct sembuf
*sops
= fast_sops
, *sop
;
1988 struct sem_undo
*un
;
1990 bool undos
= false, alter
= false, dupsop
= false;
1991 struct sem_queue queue
;
1992 unsigned long dup
= 0, jiffies_left
= 0;
1993 struct ipc_namespace
*ns
;
1995 ns
= current
->nsproxy
->ipc_ns
;
1997 if (nsops
< 1 || semid
< 0)
1999 if (nsops
> ns
->sc_semopm
)
2001 if (nsops
> SEMOPM_FAST
) {
2002 sops
= kvmalloc_array(nsops
, sizeof(*sops
), GFP_KERNEL
);
2007 if (copy_from_user(sops
, tsops
, nsops
* sizeof(*tsops
))) {
2013 if (timeout
->tv_sec
< 0 || timeout
->tv_nsec
< 0 ||
2014 timeout
->tv_nsec
>= 1000000000L) {
2018 jiffies_left
= timespec64_to_jiffies(timeout
);
2022 for (sop
= sops
; sop
< sops
+ nsops
; sop
++) {
2023 unsigned long mask
= 1ULL << ((sop
->sem_num
) % BITS_PER_LONG
);
2025 if (sop
->sem_num
>= max
)
2027 if (sop
->sem_flg
& SEM_UNDO
)
2031 * There was a previous alter access that appears
2032 * to have accessed the same semaphore, thus use
2033 * the dupsop logic. "appears", because the detection
2034 * can only check % BITS_PER_LONG.
2038 if (sop
->sem_op
!= 0) {
2045 /* On success, find_alloc_undo takes the rcu_read_lock */
2046 un
= find_alloc_undo(ns
, semid
);
2048 error
= PTR_ERR(un
);
2056 sma
= sem_obtain_object_check(ns
, semid
);
2059 error
= PTR_ERR(sma
);
2064 if (max
>= sma
->sem_nsems
) {
2070 if (ipcperms(ns
, &sma
->sem_perm
, alter
? S_IWUGO
: S_IRUGO
)) {
2075 error
= security_sem_semop(&sma
->sem_perm
, sops
, nsops
, alter
);
2082 locknum
= sem_lock(sma
, sops
, nsops
);
2084 * We eventually might perform the following check in a lockless
2085 * fashion, considering ipc_valid_object() locking constraints.
2086 * If nsops == 1 and there is no contention for sem_perm.lock, then
2087 * only a per-semaphore lock is held and it's OK to proceed with the
2088 * check below. More details on the fine grained locking scheme
2089 * entangled here and why it's RMID race safe on comments at sem_lock()
2091 if (!ipc_valid_object(&sma
->sem_perm
))
2092 goto out_unlock_free
;
2094 * semid identifiers are not unique - find_alloc_undo may have
2095 * allocated an undo structure, it was invalidated by an RMID
2096 * and now a new array with received the same id. Check and fail.
2097 * This case can be detected checking un->semid. The existence of
2098 * "un" itself is guaranteed by rcu.
2100 if (un
&& un
->semid
== -1)
2101 goto out_unlock_free
;
2104 queue
.nsops
= nsops
;
2106 queue
.pid
= task_tgid(current
);
2107 queue
.alter
= alter
;
2108 queue
.dupsop
= dupsop
;
2110 error
= perform_atomic_semop(sma
, &queue
);
2111 if (error
== 0) { /* non-blocking succesfull path */
2112 DEFINE_WAKE_Q(wake_q
);
2115 * If the operation was successful, then do
2116 * the required updates.
2119 do_smart_update(sma
, sops
, nsops
, 1, &wake_q
);
2121 set_semotime(sma
, sops
);
2123 sem_unlock(sma
, locknum
);
2129 if (error
< 0) /* non-blocking error path */
2130 goto out_unlock_free
;
2133 * We need to sleep on this operation, so we put the current
2134 * task into the pending queue and go to sleep.
2138 int idx
= array_index_nospec(sops
->sem_num
, sma
->sem_nsems
);
2139 curr
= &sma
->sems
[idx
];
2142 if (sma
->complex_count
) {
2143 list_add_tail(&queue
.list
,
2144 &sma
->pending_alter
);
2147 list_add_tail(&queue
.list
,
2148 &curr
->pending_alter
);
2151 list_add_tail(&queue
.list
, &curr
->pending_const
);
2154 if (!sma
->complex_count
)
2158 list_add_tail(&queue
.list
, &sma
->pending_alter
);
2160 list_add_tail(&queue
.list
, &sma
->pending_const
);
2162 sma
->complex_count
++;
2166 /* memory ordering ensured by the lock in sem_lock() */
2167 WRITE_ONCE(queue
.status
, -EINTR
);
2168 queue
.sleeper
= current
;
2170 /* memory ordering is ensured by the lock in sem_lock() */
2171 __set_current_state(TASK_INTERRUPTIBLE
);
2172 sem_unlock(sma
, locknum
);
2176 jiffies_left
= schedule_timeout(jiffies_left
);
2181 * fastpath: the semop has completed, either successfully or
2182 * not, from the syscall pov, is quite irrelevant to us at this
2183 * point; we're done.
2185 * We _do_ care, nonetheless, about being awoken by a signal or
2186 * spuriously. The queue.status is checked again in the
2187 * slowpath (aka after taking sem_lock), such that we can detect
2188 * scenarios where we were awakened externally, during the
2189 * window between wake_q_add() and wake_up_q().
2191 error
= READ_ONCE(queue
.status
);
2192 if (error
!= -EINTR
) {
2193 /* see SEM_BARRIER_2 for purpose/pairing */
2194 smp_acquire__after_ctrl_dep();
2199 locknum
= sem_lock(sma
, sops
, nsops
);
2201 if (!ipc_valid_object(&sma
->sem_perm
))
2202 goto out_unlock_free
;
2205 * No necessity for any barrier: We are protect by sem_lock()
2207 error
= READ_ONCE(queue
.status
);
2210 * If queue.status != -EINTR we are woken up by another process.
2211 * Leave without unlink_queue(), but with sem_unlock().
2213 if (error
!= -EINTR
)
2214 goto out_unlock_free
;
2217 * If an interrupt occurred we have to clean up the queue.
2219 if (timeout
&& jiffies_left
== 0)
2221 } while (error
== -EINTR
&& !signal_pending(current
)); /* spurious */
2223 unlink_queue(sma
, &queue
);
2226 sem_unlock(sma
, locknum
);
2229 if (sops
!= fast_sops
)
2234 long ksys_semtimedop(int semid
, struct sembuf __user
*tsops
,
2235 unsigned int nsops
, const struct __kernel_timespec __user
*timeout
)
2238 struct timespec64 ts
;
2239 if (get_timespec64(&ts
, timeout
))
2241 return do_semtimedop(semid
, tsops
, nsops
, &ts
);
2243 return do_semtimedop(semid
, tsops
, nsops
, NULL
);
2246 SYSCALL_DEFINE4(semtimedop
, int, semid
, struct sembuf __user
*, tsops
,
2247 unsigned int, nsops
, const struct __kernel_timespec __user
*, timeout
)
2249 return ksys_semtimedop(semid
, tsops
, nsops
, timeout
);
2252 #ifdef CONFIG_COMPAT_32BIT_TIME
2253 long compat_ksys_semtimedop(int semid
, struct sembuf __user
*tsems
,
2255 const struct old_timespec32 __user
*timeout
)
2258 struct timespec64 ts
;
2259 if (get_old_timespec32(&ts
, timeout
))
2261 return do_semtimedop(semid
, tsems
, nsops
, &ts
);
2263 return do_semtimedop(semid
, tsems
, nsops
, NULL
);
2266 SYSCALL_DEFINE4(semtimedop_time32
, int, semid
, struct sembuf __user
*, tsems
,
2267 unsigned int, nsops
,
2268 const struct old_timespec32 __user
*, timeout
)
2270 return compat_ksys_semtimedop(semid
, tsems
, nsops
, timeout
);
2274 SYSCALL_DEFINE3(semop
, int, semid
, struct sembuf __user
*, tsops
,
2277 return do_semtimedop(semid
, tsops
, nsops
, NULL
);
2280 /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
2281 * parent and child tasks.
2284 int copy_semundo(unsigned long clone_flags
, struct task_struct
*tsk
)
2286 struct sem_undo_list
*undo_list
;
2289 if (clone_flags
& CLONE_SYSVSEM
) {
2290 error
= get_undo_list(&undo_list
);
2293 refcount_inc(&undo_list
->refcnt
);
2294 tsk
->sysvsem
.undo_list
= undo_list
;
2296 tsk
->sysvsem
.undo_list
= NULL
;
2302 * add semadj values to semaphores, free undo structures.
2303 * undo structures are not freed when semaphore arrays are destroyed
2304 * so some of them may be out of date.
2305 * IMPLEMENTATION NOTE: There is some confusion over whether the
2306 * set of adjustments that needs to be done should be done in an atomic
2307 * manner or not. That is, if we are attempting to decrement the semval
2308 * should we queue up and wait until we can do so legally?
2309 * The original implementation attempted to do this (queue and wait).
2310 * The current implementation does not do so. The POSIX standard
2311 * and SVID should be consulted to determine what behavior is mandated.
2313 void exit_sem(struct task_struct
*tsk
)
2315 struct sem_undo_list
*ulp
;
2317 ulp
= tsk
->sysvsem
.undo_list
;
2320 tsk
->sysvsem
.undo_list
= NULL
;
2322 if (!refcount_dec_and_test(&ulp
->refcnt
))
2326 struct sem_array
*sma
;
2327 struct sem_undo
*un
;
2329 DEFINE_WAKE_Q(wake_q
);
2334 un
= list_entry_rcu(ulp
->list_proc
.next
,
2335 struct sem_undo
, list_proc
);
2336 if (&un
->list_proc
== &ulp
->list_proc
) {
2338 * We must wait for freeary() before freeing this ulp,
2339 * in case we raced with last sem_undo. There is a small
2340 * possibility where we exit while freeary() didn't
2341 * finish unlocking sem_undo_list.
2343 spin_lock(&ulp
->lock
);
2344 spin_unlock(&ulp
->lock
);
2348 spin_lock(&ulp
->lock
);
2350 spin_unlock(&ulp
->lock
);
2352 /* exit_sem raced with IPC_RMID, nothing to do */
2358 sma
= sem_obtain_object_check(tsk
->nsproxy
->ipc_ns
, semid
);
2359 /* exit_sem raced with IPC_RMID, nothing to do */
2365 sem_lock(sma
, NULL
, -1);
2366 /* exit_sem raced with IPC_RMID, nothing to do */
2367 if (!ipc_valid_object(&sma
->sem_perm
)) {
2368 sem_unlock(sma
, -1);
2372 un
= __lookup_undo(ulp
, semid
);
2374 /* exit_sem raced with IPC_RMID+semget() that created
2375 * exactly the same semid. Nothing to do.
2377 sem_unlock(sma
, -1);
2382 /* remove un from the linked lists */
2383 ipc_assert_locked_object(&sma
->sem_perm
);
2384 list_del(&un
->list_id
);
2386 spin_lock(&ulp
->lock
);
2387 list_del_rcu(&un
->list_proc
);
2388 spin_unlock(&ulp
->lock
);
2390 /* perform adjustments registered in un */
2391 for (i
= 0; i
< sma
->sem_nsems
; i
++) {
2392 struct sem
*semaphore
= &sma
->sems
[i
];
2393 if (un
->semadj
[i
]) {
2394 semaphore
->semval
+= un
->semadj
[i
];
2396 * Range checks of the new semaphore value,
2397 * not defined by sus:
2398 * - Some unices ignore the undo entirely
2399 * (e.g. HP UX 11i 11.22, Tru64 V5.1)
2400 * - some cap the value (e.g. FreeBSD caps
2401 * at 0, but doesn't enforce SEMVMX)
2403 * Linux caps the semaphore value, both at 0
2406 * Manfred <manfred@colorfullife.com>
2408 if (semaphore
->semval
< 0)
2409 semaphore
->semval
= 0;
2410 if (semaphore
->semval
> SEMVMX
)
2411 semaphore
->semval
= SEMVMX
;
2412 ipc_update_pid(&semaphore
->sempid
, task_tgid(current
));
2415 /* maybe some queued-up processes were waiting for this */
2416 do_smart_update(sma
, NULL
, 0, 1, &wake_q
);
2417 sem_unlock(sma
, -1);
2426 #ifdef CONFIG_PROC_FS
2427 static int sysvipc_sem_proc_show(struct seq_file
*s
, void *it
)
2429 struct user_namespace
*user_ns
= seq_user_ns(s
);
2430 struct kern_ipc_perm
*ipcp
= it
;
2431 struct sem_array
*sma
= container_of(ipcp
, struct sem_array
, sem_perm
);
2435 * The proc interface isn't aware of sem_lock(), it calls
2436 * ipc_lock_object() directly (in sysvipc_find_ipc).
2437 * In order to stay compatible with sem_lock(), we must
2438 * enter / leave complex_mode.
2440 complexmode_enter(sma
);
2442 sem_otime
= get_semotime(sma
);
2445 "%10d %10d %4o %10u %5u %5u %5u %5u %10llu %10llu\n",
2450 from_kuid_munged(user_ns
, sma
->sem_perm
.uid
),
2451 from_kgid_munged(user_ns
, sma
->sem_perm
.gid
),
2452 from_kuid_munged(user_ns
, sma
->sem_perm
.cuid
),
2453 from_kgid_munged(user_ns
, sma
->sem_perm
.cgid
),
2457 complexmode_tryleave(sma
);