Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / kernel / pid.c
blobebdf9c60cd0b586b7e6d9f06028bf31bfd9571ab
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Generic pidhash and scalable, time-bounded PID allocator
5 * (C) 2002-2003 Nadia Yvette Chambers, IBM
6 * (C) 2004 Nadia Yvette Chambers, Oracle
7 * (C) 2002-2004 Ingo Molnar, Red Hat
9 * pid-structures are backing objects for tasks sharing a given ID to chain
10 * against. There is very little to them aside from hashing them and
11 * parking tasks using given ID's on a list.
13 * The hash is always changed with the tasklist_lock write-acquired,
14 * and the hash is only accessed with the tasklist_lock at least
15 * read-acquired, so there's no additional SMP locking needed here.
17 * We have a list of bitmap pages, which bitmaps represent the PID space.
18 * Allocating and freeing PIDs is completely lockless. The worst-case
19 * allocation scenario when all but one out of 1 million PIDs possible are
20 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
21 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
23 * Pid namespaces:
24 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
25 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
26 * Many thanks to Oleg Nesterov for comments and help
30 #include <linux/mm.h>
31 #include <linux/export.h>
32 #include <linux/slab.h>
33 #include <linux/init.h>
34 #include <linux/rculist.h>
35 #include <linux/memblock.h>
36 #include <linux/pid_namespace.h>
37 #include <linux/init_task.h>
38 #include <linux/syscalls.h>
39 #include <linux/proc_ns.h>
40 #include <linux/refcount.h>
41 #include <linux/anon_inodes.h>
42 #include <linux/sched/signal.h>
43 #include <linux/sched/task.h>
44 #include <linux/idr.h>
45 #include <net/sock.h>
46 #include <uapi/linux/pidfd.h>
48 struct pid init_struct_pid = {
49 .count = REFCOUNT_INIT(1),
50 .tasks = {
51 { .first = NULL },
52 { .first = NULL },
53 { .first = NULL },
55 .level = 0,
56 .numbers = { {
57 .nr = 0,
58 .ns = &init_pid_ns,
59 }, }
62 int pid_max = PID_MAX_DEFAULT;
64 #define RESERVED_PIDS 300
66 int pid_max_min = RESERVED_PIDS + 1;
67 int pid_max_max = PID_MAX_LIMIT;
70 * PID-map pages start out as NULL, they get allocated upon
71 * first use and are never deallocated. This way a low pid_max
72 * value does not cause lots of bitmaps to be allocated, but
73 * the scheme scales to up to 4 million PIDs, runtime.
75 struct pid_namespace init_pid_ns = {
76 .ns.count = REFCOUNT_INIT(2),
77 .idr = IDR_INIT(init_pid_ns.idr),
78 .pid_allocated = PIDNS_ADDING,
79 .level = 0,
80 .child_reaper = &init_task,
81 .user_ns = &init_user_ns,
82 .ns.inum = PROC_PID_INIT_INO,
83 #ifdef CONFIG_PID_NS
84 .ns.ops = &pidns_operations,
85 #endif
87 EXPORT_SYMBOL_GPL(init_pid_ns);
90 * Note: disable interrupts while the pidmap_lock is held as an
91 * interrupt might come in and do read_lock(&tasklist_lock).
93 * If we don't disable interrupts there is a nasty deadlock between
94 * detach_pid()->free_pid() and another cpu that does
95 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
96 * read_lock(&tasklist_lock);
98 * After we clean up the tasklist_lock and know there are no
99 * irq handlers that take it we can leave the interrupts enabled.
100 * For now it is easier to be safe than to prove it can't happen.
103 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
105 void put_pid(struct pid *pid)
107 struct pid_namespace *ns;
109 if (!pid)
110 return;
112 ns = pid->numbers[pid->level].ns;
113 if (refcount_dec_and_test(&pid->count)) {
114 kmem_cache_free(ns->pid_cachep, pid);
115 put_pid_ns(ns);
118 EXPORT_SYMBOL_GPL(put_pid);
120 static void delayed_put_pid(struct rcu_head *rhp)
122 struct pid *pid = container_of(rhp, struct pid, rcu);
123 put_pid(pid);
126 void free_pid(struct pid *pid)
128 /* We can be called with write_lock_irq(&tasklist_lock) held */
129 int i;
130 unsigned long flags;
132 spin_lock_irqsave(&pidmap_lock, flags);
133 for (i = 0; i <= pid->level; i++) {
134 struct upid *upid = pid->numbers + i;
135 struct pid_namespace *ns = upid->ns;
136 switch (--ns->pid_allocated) {
137 case 2:
138 case 1:
139 /* When all that is left in the pid namespace
140 * is the reaper wake up the reaper. The reaper
141 * may be sleeping in zap_pid_ns_processes().
143 wake_up_process(ns->child_reaper);
144 break;
145 case PIDNS_ADDING:
146 /* Handle a fork failure of the first process */
147 WARN_ON(ns->child_reaper);
148 ns->pid_allocated = 0;
149 break;
152 idr_remove(&ns->idr, upid->nr);
154 spin_unlock_irqrestore(&pidmap_lock, flags);
156 call_rcu(&pid->rcu, delayed_put_pid);
159 struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
160 size_t set_tid_size)
162 struct pid *pid;
163 enum pid_type type;
164 int i, nr;
165 struct pid_namespace *tmp;
166 struct upid *upid;
167 int retval = -ENOMEM;
170 * set_tid_size contains the size of the set_tid array. Starting at
171 * the most nested currently active PID namespace it tells alloc_pid()
172 * which PID to set for a process in that most nested PID namespace
173 * up to set_tid_size PID namespaces. It does not have to set the PID
174 * for a process in all nested PID namespaces but set_tid_size must
175 * never be greater than the current ns->level + 1.
177 if (set_tid_size > ns->level + 1)
178 return ERR_PTR(-EINVAL);
180 pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
181 if (!pid)
182 return ERR_PTR(retval);
184 tmp = ns;
185 pid->level = ns->level;
187 for (i = ns->level; i >= 0; i--) {
188 int tid = 0;
190 if (set_tid_size) {
191 tid = set_tid[ns->level - i];
193 retval = -EINVAL;
194 if (tid < 1 || tid >= pid_max)
195 goto out_free;
197 * Also fail if a PID != 1 is requested and
198 * no PID 1 exists.
200 if (tid != 1 && !tmp->child_reaper)
201 goto out_free;
202 retval = -EPERM;
203 if (!checkpoint_restore_ns_capable(tmp->user_ns))
204 goto out_free;
205 set_tid_size--;
208 idr_preload(GFP_KERNEL);
209 spin_lock_irq(&pidmap_lock);
211 if (tid) {
212 nr = idr_alloc(&tmp->idr, NULL, tid,
213 tid + 1, GFP_ATOMIC);
215 * If ENOSPC is returned it means that the PID is
216 * alreay in use. Return EEXIST in that case.
218 if (nr == -ENOSPC)
219 nr = -EEXIST;
220 } else {
221 int pid_min = 1;
223 * init really needs pid 1, but after reaching the
224 * maximum wrap back to RESERVED_PIDS
226 if (idr_get_cursor(&tmp->idr) > RESERVED_PIDS)
227 pid_min = RESERVED_PIDS;
230 * Store a null pointer so find_pid_ns does not find
231 * a partially initialized PID (see below).
233 nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min,
234 pid_max, GFP_ATOMIC);
236 spin_unlock_irq(&pidmap_lock);
237 idr_preload_end();
239 if (nr < 0) {
240 retval = (nr == -ENOSPC) ? -EAGAIN : nr;
241 goto out_free;
244 pid->numbers[i].nr = nr;
245 pid->numbers[i].ns = tmp;
246 tmp = tmp->parent;
250 * ENOMEM is not the most obvious choice especially for the case
251 * where the child subreaper has already exited and the pid
252 * namespace denies the creation of any new processes. But ENOMEM
253 * is what we have exposed to userspace for a long time and it is
254 * documented behavior for pid namespaces. So we can't easily
255 * change it even if there were an error code better suited.
257 retval = -ENOMEM;
259 get_pid_ns(ns);
260 refcount_set(&pid->count, 1);
261 spin_lock_init(&pid->lock);
262 for (type = 0; type < PIDTYPE_MAX; ++type)
263 INIT_HLIST_HEAD(&pid->tasks[type]);
265 init_waitqueue_head(&pid->wait_pidfd);
266 INIT_HLIST_HEAD(&pid->inodes);
268 upid = pid->numbers + ns->level;
269 spin_lock_irq(&pidmap_lock);
270 if (!(ns->pid_allocated & PIDNS_ADDING))
271 goto out_unlock;
272 for ( ; upid >= pid->numbers; --upid) {
273 /* Make the PID visible to find_pid_ns. */
274 idr_replace(&upid->ns->idr, pid, upid->nr);
275 upid->ns->pid_allocated++;
277 spin_unlock_irq(&pidmap_lock);
279 return pid;
281 out_unlock:
282 spin_unlock_irq(&pidmap_lock);
283 put_pid_ns(ns);
285 out_free:
286 spin_lock_irq(&pidmap_lock);
287 while (++i <= ns->level) {
288 upid = pid->numbers + i;
289 idr_remove(&upid->ns->idr, upid->nr);
292 /* On failure to allocate the first pid, reset the state */
293 if (ns->pid_allocated == PIDNS_ADDING)
294 idr_set_cursor(&ns->idr, 0);
296 spin_unlock_irq(&pidmap_lock);
298 kmem_cache_free(ns->pid_cachep, pid);
299 return ERR_PTR(retval);
302 void disable_pid_allocation(struct pid_namespace *ns)
304 spin_lock_irq(&pidmap_lock);
305 ns->pid_allocated &= ~PIDNS_ADDING;
306 spin_unlock_irq(&pidmap_lock);
309 struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
311 return idr_find(&ns->idr, nr);
313 EXPORT_SYMBOL_GPL(find_pid_ns);
315 struct pid *find_vpid(int nr)
317 return find_pid_ns(nr, task_active_pid_ns(current));
319 EXPORT_SYMBOL_GPL(find_vpid);
321 static struct pid **task_pid_ptr(struct task_struct *task, enum pid_type type)
323 return (type == PIDTYPE_PID) ?
324 &task->thread_pid :
325 &task->signal->pids[type];
329 * attach_pid() must be called with the tasklist_lock write-held.
331 void attach_pid(struct task_struct *task, enum pid_type type)
333 struct pid *pid = *task_pid_ptr(task, type);
334 hlist_add_head_rcu(&task->pid_links[type], &pid->tasks[type]);
337 static void __change_pid(struct task_struct *task, enum pid_type type,
338 struct pid *new)
340 struct pid **pid_ptr = task_pid_ptr(task, type);
341 struct pid *pid;
342 int tmp;
344 pid = *pid_ptr;
346 hlist_del_rcu(&task->pid_links[type]);
347 *pid_ptr = new;
349 for (tmp = PIDTYPE_MAX; --tmp >= 0; )
350 if (pid_has_task(pid, tmp))
351 return;
353 free_pid(pid);
356 void detach_pid(struct task_struct *task, enum pid_type type)
358 __change_pid(task, type, NULL);
361 void change_pid(struct task_struct *task, enum pid_type type,
362 struct pid *pid)
364 __change_pid(task, type, pid);
365 attach_pid(task, type);
368 void exchange_tids(struct task_struct *left, struct task_struct *right)
370 struct pid *pid1 = left->thread_pid;
371 struct pid *pid2 = right->thread_pid;
372 struct hlist_head *head1 = &pid1->tasks[PIDTYPE_PID];
373 struct hlist_head *head2 = &pid2->tasks[PIDTYPE_PID];
375 /* Swap the single entry tid lists */
376 hlists_swap_heads_rcu(head1, head2);
378 /* Swap the per task_struct pid */
379 rcu_assign_pointer(left->thread_pid, pid2);
380 rcu_assign_pointer(right->thread_pid, pid1);
382 /* Swap the cached value */
383 WRITE_ONCE(left->pid, pid_nr(pid2));
384 WRITE_ONCE(right->pid, pid_nr(pid1));
387 /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
388 void transfer_pid(struct task_struct *old, struct task_struct *new,
389 enum pid_type type)
391 if (type == PIDTYPE_PID)
392 new->thread_pid = old->thread_pid;
393 hlist_replace_rcu(&old->pid_links[type], &new->pid_links[type]);
396 struct task_struct *pid_task(struct pid *pid, enum pid_type type)
398 struct task_struct *result = NULL;
399 if (pid) {
400 struct hlist_node *first;
401 first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
402 lockdep_tasklist_lock_is_held());
403 if (first)
404 result = hlist_entry(first, struct task_struct, pid_links[(type)]);
406 return result;
408 EXPORT_SYMBOL(pid_task);
411 * Must be called under rcu_read_lock().
413 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
415 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
416 "find_task_by_pid_ns() needs rcu_read_lock() protection");
417 return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
420 struct task_struct *find_task_by_vpid(pid_t vnr)
422 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
425 struct task_struct *find_get_task_by_vpid(pid_t nr)
427 struct task_struct *task;
429 rcu_read_lock();
430 task = find_task_by_vpid(nr);
431 if (task)
432 get_task_struct(task);
433 rcu_read_unlock();
435 return task;
438 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
440 struct pid *pid;
441 rcu_read_lock();
442 pid = get_pid(rcu_dereference(*task_pid_ptr(task, type)));
443 rcu_read_unlock();
444 return pid;
446 EXPORT_SYMBOL_GPL(get_task_pid);
448 struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
450 struct task_struct *result;
451 rcu_read_lock();
452 result = pid_task(pid, type);
453 if (result)
454 get_task_struct(result);
455 rcu_read_unlock();
456 return result;
458 EXPORT_SYMBOL_GPL(get_pid_task);
460 struct pid *find_get_pid(pid_t nr)
462 struct pid *pid;
464 rcu_read_lock();
465 pid = get_pid(find_vpid(nr));
466 rcu_read_unlock();
468 return pid;
470 EXPORT_SYMBOL_GPL(find_get_pid);
472 pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
474 struct upid *upid;
475 pid_t nr = 0;
477 if (pid && ns->level <= pid->level) {
478 upid = &pid->numbers[ns->level];
479 if (upid->ns == ns)
480 nr = upid->nr;
482 return nr;
484 EXPORT_SYMBOL_GPL(pid_nr_ns);
486 pid_t pid_vnr(struct pid *pid)
488 return pid_nr_ns(pid, task_active_pid_ns(current));
490 EXPORT_SYMBOL_GPL(pid_vnr);
492 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
493 struct pid_namespace *ns)
495 pid_t nr = 0;
497 rcu_read_lock();
498 if (!ns)
499 ns = task_active_pid_ns(current);
500 nr = pid_nr_ns(rcu_dereference(*task_pid_ptr(task, type)), ns);
501 rcu_read_unlock();
503 return nr;
505 EXPORT_SYMBOL(__task_pid_nr_ns);
507 struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
509 return ns_of_pid(task_pid(tsk));
511 EXPORT_SYMBOL_GPL(task_active_pid_ns);
514 * Used by proc to find the first pid that is greater than or equal to nr.
516 * If there is a pid at nr this function is exactly the same as find_pid_ns.
518 struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
520 return idr_get_next(&ns->idr, &nr);
523 struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags)
525 struct fd f;
526 struct pid *pid;
528 f = fdget(fd);
529 if (!f.file)
530 return ERR_PTR(-EBADF);
532 pid = pidfd_pid(f.file);
533 if (!IS_ERR(pid)) {
534 get_pid(pid);
535 *flags = f.file->f_flags;
538 fdput(f);
539 return pid;
543 * pidfd_create() - Create a new pid file descriptor.
545 * @pid: struct pid that the pidfd will reference
546 * @flags: flags to pass
548 * This creates a new pid file descriptor with the O_CLOEXEC flag set.
550 * Note, that this function can only be called after the fd table has
551 * been unshared to avoid leaking the pidfd to the new process.
553 * Return: On success, a cloexec pidfd is returned.
554 * On error, a negative errno number will be returned.
556 static int pidfd_create(struct pid *pid, unsigned int flags)
558 int fd;
560 fd = anon_inode_getfd("[pidfd]", &pidfd_fops, get_pid(pid),
561 flags | O_RDWR | O_CLOEXEC);
562 if (fd < 0)
563 put_pid(pid);
565 return fd;
569 * pidfd_open() - Open new pid file descriptor.
571 * @pid: pid for which to retrieve a pidfd
572 * @flags: flags to pass
574 * This creates a new pid file descriptor with the O_CLOEXEC flag set for
575 * the process identified by @pid. Currently, the process identified by
576 * @pid must be a thread-group leader. This restriction currently exists
577 * for all aspects of pidfds including pidfd creation (CLONE_PIDFD cannot
578 * be used with CLONE_THREAD) and pidfd polling (only supports thread group
579 * leaders).
581 * Return: On success, a cloexec pidfd is returned.
582 * On error, a negative errno number will be returned.
584 SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags)
586 int fd;
587 struct pid *p;
589 if (flags & ~PIDFD_NONBLOCK)
590 return -EINVAL;
592 if (pid <= 0)
593 return -EINVAL;
595 p = find_get_pid(pid);
596 if (!p)
597 return -ESRCH;
599 if (pid_has_task(p, PIDTYPE_TGID))
600 fd = pidfd_create(p, flags);
601 else
602 fd = -EINVAL;
604 put_pid(p);
605 return fd;
608 void __init pid_idr_init(void)
610 /* Verify no one has done anything silly: */
611 BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_ADDING);
613 /* bump default and minimum pid_max based on number of cpus */
614 pid_max = min(pid_max_max, max_t(int, pid_max,
615 PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
616 pid_max_min = max_t(int, pid_max_min,
617 PIDS_PER_CPU_MIN * num_possible_cpus());
618 pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
620 idr_init(&init_pid_ns.idr);
622 init_pid_ns.pid_cachep = KMEM_CACHE(pid,
623 SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
626 static struct file *__pidfd_fget(struct task_struct *task, int fd)
628 struct file *file;
629 int ret;
631 ret = down_read_killable(&task->signal->exec_update_lock);
632 if (ret)
633 return ERR_PTR(ret);
635 if (ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS))
636 file = fget_task(task, fd);
637 else
638 file = ERR_PTR(-EPERM);
640 up_read(&task->signal->exec_update_lock);
642 return file ?: ERR_PTR(-EBADF);
645 static int pidfd_getfd(struct pid *pid, int fd)
647 struct task_struct *task;
648 struct file *file;
649 int ret;
651 task = get_pid_task(pid, PIDTYPE_PID);
652 if (!task)
653 return -ESRCH;
655 file = __pidfd_fget(task, fd);
656 put_task_struct(task);
657 if (IS_ERR(file))
658 return PTR_ERR(file);
660 ret = receive_fd(file, O_CLOEXEC);
661 fput(file);
663 return ret;
667 * sys_pidfd_getfd() - Get a file descriptor from another process
669 * @pidfd: the pidfd file descriptor of the process
670 * @fd: the file descriptor number to get
671 * @flags: flags on how to get the fd (reserved)
673 * This syscall gets a copy of a file descriptor from another process
674 * based on the pidfd, and file descriptor number. It requires that
675 * the calling process has the ability to ptrace the process represented
676 * by the pidfd. The process which is having its file descriptor copied
677 * is otherwise unaffected.
679 * Return: On success, a cloexec file descriptor is returned.
680 * On error, a negative errno number will be returned.
682 SYSCALL_DEFINE3(pidfd_getfd, int, pidfd, int, fd,
683 unsigned int, flags)
685 struct pid *pid;
686 struct fd f;
687 int ret;
689 /* flags is currently unused - make sure it's unset */
690 if (flags)
691 return -EINVAL;
693 f = fdget(pidfd);
694 if (!f.file)
695 return -EBADF;
697 pid = pidfd_pid(f.file);
698 if (IS_ERR(pid))
699 ret = PTR_ERR(pid);
700 else
701 ret = pidfd_getfd(pid, fd);
703 fdput(f);
704 return ret;