1 // SPDX-License-Identifier: GPL-2.0-only
3 * Generic pidhash and scalable, time-bounded PID allocator
5 * (C) 2002-2003 Nadia Yvette Chambers, IBM
6 * (C) 2004 Nadia Yvette Chambers, Oracle
7 * (C) 2002-2004 Ingo Molnar, Red Hat
9 * pid-structures are backing objects for tasks sharing a given ID to chain
10 * against. There is very little to them aside from hashing them and
11 * parking tasks using given ID's on a list.
13 * The hash is always changed with the tasklist_lock write-acquired,
14 * and the hash is only accessed with the tasklist_lock at least
15 * read-acquired, so there's no additional SMP locking needed here.
17 * We have a list of bitmap pages, which bitmaps represent the PID space.
18 * Allocating and freeing PIDs is completely lockless. The worst-case
19 * allocation scenario when all but one out of 1 million PIDs possible are
20 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
21 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
24 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
25 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
26 * Many thanks to Oleg Nesterov for comments and help
31 #include <linux/export.h>
32 #include <linux/slab.h>
33 #include <linux/init.h>
34 #include <linux/rculist.h>
35 #include <linux/memblock.h>
36 #include <linux/pid_namespace.h>
37 #include <linux/init_task.h>
38 #include <linux/syscalls.h>
39 #include <linux/proc_ns.h>
40 #include <linux/refcount.h>
41 #include <linux/anon_inodes.h>
42 #include <linux/sched/signal.h>
43 #include <linux/sched/task.h>
44 #include <linux/idr.h>
46 struct pid init_struct_pid
= {
47 .count
= REFCOUNT_INIT(1),
60 int pid_max
= PID_MAX_DEFAULT
;
62 #define RESERVED_PIDS 300
64 int pid_max_min
= RESERVED_PIDS
+ 1;
65 int pid_max_max
= PID_MAX_LIMIT
;
68 * PID-map pages start out as NULL, they get allocated upon
69 * first use and are never deallocated. This way a low pid_max
70 * value does not cause lots of bitmaps to be allocated, but
71 * the scheme scales to up to 4 million PIDs, runtime.
73 struct pid_namespace init_pid_ns
= {
75 .idr
= IDR_INIT(init_pid_ns
.idr
),
76 .pid_allocated
= PIDNS_ADDING
,
78 .child_reaper
= &init_task
,
79 .user_ns
= &init_user_ns
,
80 .ns
.inum
= PROC_PID_INIT_INO
,
82 .ns
.ops
= &pidns_operations
,
85 EXPORT_SYMBOL_GPL(init_pid_ns
);
88 * Note: disable interrupts while the pidmap_lock is held as an
89 * interrupt might come in and do read_lock(&tasklist_lock).
91 * If we don't disable interrupts there is a nasty deadlock between
92 * detach_pid()->free_pid() and another cpu that does
93 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
94 * read_lock(&tasklist_lock);
96 * After we clean up the tasklist_lock and know there are no
97 * irq handlers that take it we can leave the interrupts enabled.
98 * For now it is easier to be safe than to prove it can't happen.
101 static __cacheline_aligned_in_smp
DEFINE_SPINLOCK(pidmap_lock
);
103 void put_pid(struct pid
*pid
)
105 struct pid_namespace
*ns
;
110 ns
= pid
->numbers
[pid
->level
].ns
;
111 if (refcount_dec_and_test(&pid
->count
)) {
112 kmem_cache_free(ns
->pid_cachep
, pid
);
116 EXPORT_SYMBOL_GPL(put_pid
);
118 static void delayed_put_pid(struct rcu_head
*rhp
)
120 struct pid
*pid
= container_of(rhp
, struct pid
, rcu
);
124 void free_pid(struct pid
*pid
)
126 /* We can be called with write_lock_irq(&tasklist_lock) held */
130 spin_lock_irqsave(&pidmap_lock
, flags
);
131 for (i
= 0; i
<= pid
->level
; i
++) {
132 struct upid
*upid
= pid
->numbers
+ i
;
133 struct pid_namespace
*ns
= upid
->ns
;
134 switch (--ns
->pid_allocated
) {
137 /* When all that is left in the pid namespace
138 * is the reaper wake up the reaper. The reaper
139 * may be sleeping in zap_pid_ns_processes().
141 wake_up_process(ns
->child_reaper
);
144 /* Handle a fork failure of the first process */
145 WARN_ON(ns
->child_reaper
);
146 ns
->pid_allocated
= 0;
149 schedule_work(&ns
->proc_work
);
153 idr_remove(&ns
->idr
, upid
->nr
);
155 spin_unlock_irqrestore(&pidmap_lock
, flags
);
157 call_rcu(&pid
->rcu
, delayed_put_pid
);
160 struct pid
*alloc_pid(struct pid_namespace
*ns
)
165 struct pid_namespace
*tmp
;
167 int retval
= -ENOMEM
;
169 pid
= kmem_cache_alloc(ns
->pid_cachep
, GFP_KERNEL
);
171 return ERR_PTR(retval
);
174 pid
->level
= ns
->level
;
176 for (i
= ns
->level
; i
>= 0; i
--) {
179 idr_preload(GFP_KERNEL
);
180 spin_lock_irq(&pidmap_lock
);
183 * init really needs pid 1, but after reaching the maximum
184 * wrap back to RESERVED_PIDS
186 if (idr_get_cursor(&tmp
->idr
) > RESERVED_PIDS
)
187 pid_min
= RESERVED_PIDS
;
190 * Store a null pointer so find_pid_ns does not find
191 * a partially initialized PID (see below).
193 nr
= idr_alloc_cyclic(&tmp
->idr
, NULL
, pid_min
,
194 pid_max
, GFP_ATOMIC
);
195 spin_unlock_irq(&pidmap_lock
);
199 retval
= (nr
== -ENOSPC
) ? -EAGAIN
: nr
;
203 pid
->numbers
[i
].nr
= nr
;
204 pid
->numbers
[i
].ns
= tmp
;
208 if (unlikely(is_child_reaper(pid
))) {
209 if (pid_ns_prepare_proc(ns
))
214 refcount_set(&pid
->count
, 1);
215 for (type
= 0; type
< PIDTYPE_MAX
; ++type
)
216 INIT_HLIST_HEAD(&pid
->tasks
[type
]);
218 init_waitqueue_head(&pid
->wait_pidfd
);
220 upid
= pid
->numbers
+ ns
->level
;
221 spin_lock_irq(&pidmap_lock
);
222 if (!(ns
->pid_allocated
& PIDNS_ADDING
))
224 for ( ; upid
>= pid
->numbers
; --upid
) {
225 /* Make the PID visible to find_pid_ns. */
226 idr_replace(&upid
->ns
->idr
, pid
, upid
->nr
);
227 upid
->ns
->pid_allocated
++;
229 spin_unlock_irq(&pidmap_lock
);
234 spin_unlock_irq(&pidmap_lock
);
238 spin_lock_irq(&pidmap_lock
);
239 while (++i
<= ns
->level
) {
240 upid
= pid
->numbers
+ i
;
241 idr_remove(&upid
->ns
->idr
, upid
->nr
);
244 /* On failure to allocate the first pid, reset the state */
245 if (ns
->pid_allocated
== PIDNS_ADDING
)
246 idr_set_cursor(&ns
->idr
, 0);
248 spin_unlock_irq(&pidmap_lock
);
250 kmem_cache_free(ns
->pid_cachep
, pid
);
251 return ERR_PTR(retval
);
254 void disable_pid_allocation(struct pid_namespace
*ns
)
256 spin_lock_irq(&pidmap_lock
);
257 ns
->pid_allocated
&= ~PIDNS_ADDING
;
258 spin_unlock_irq(&pidmap_lock
);
261 struct pid
*find_pid_ns(int nr
, struct pid_namespace
*ns
)
263 return idr_find(&ns
->idr
, nr
);
265 EXPORT_SYMBOL_GPL(find_pid_ns
);
267 struct pid
*find_vpid(int nr
)
269 return find_pid_ns(nr
, task_active_pid_ns(current
));
271 EXPORT_SYMBOL_GPL(find_vpid
);
273 static struct pid
**task_pid_ptr(struct task_struct
*task
, enum pid_type type
)
275 return (type
== PIDTYPE_PID
) ?
277 &task
->signal
->pids
[type
];
281 * attach_pid() must be called with the tasklist_lock write-held.
283 void attach_pid(struct task_struct
*task
, enum pid_type type
)
285 struct pid
*pid
= *task_pid_ptr(task
, type
);
286 hlist_add_head_rcu(&task
->pid_links
[type
], &pid
->tasks
[type
]);
289 static void __change_pid(struct task_struct
*task
, enum pid_type type
,
292 struct pid
**pid_ptr
= task_pid_ptr(task
, type
);
298 hlist_del_rcu(&task
->pid_links
[type
]);
301 for (tmp
= PIDTYPE_MAX
; --tmp
>= 0; )
302 if (!hlist_empty(&pid
->tasks
[tmp
]))
308 void detach_pid(struct task_struct
*task
, enum pid_type type
)
310 __change_pid(task
, type
, NULL
);
313 void change_pid(struct task_struct
*task
, enum pid_type type
,
316 __change_pid(task
, type
, pid
);
317 attach_pid(task
, type
);
320 /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
321 void transfer_pid(struct task_struct
*old
, struct task_struct
*new,
324 if (type
== PIDTYPE_PID
)
325 new->thread_pid
= old
->thread_pid
;
326 hlist_replace_rcu(&old
->pid_links
[type
], &new->pid_links
[type
]);
329 struct task_struct
*pid_task(struct pid
*pid
, enum pid_type type
)
331 struct task_struct
*result
= NULL
;
333 struct hlist_node
*first
;
334 first
= rcu_dereference_check(hlist_first_rcu(&pid
->tasks
[type
]),
335 lockdep_tasklist_lock_is_held());
337 result
= hlist_entry(first
, struct task_struct
, pid_links
[(type
)]);
341 EXPORT_SYMBOL(pid_task
);
344 * Must be called under rcu_read_lock().
346 struct task_struct
*find_task_by_pid_ns(pid_t nr
, struct pid_namespace
*ns
)
348 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
349 "find_task_by_pid_ns() needs rcu_read_lock() protection");
350 return pid_task(find_pid_ns(nr
, ns
), PIDTYPE_PID
);
353 struct task_struct
*find_task_by_vpid(pid_t vnr
)
355 return find_task_by_pid_ns(vnr
, task_active_pid_ns(current
));
358 struct task_struct
*find_get_task_by_vpid(pid_t nr
)
360 struct task_struct
*task
;
363 task
= find_task_by_vpid(nr
);
365 get_task_struct(task
);
371 struct pid
*get_task_pid(struct task_struct
*task
, enum pid_type type
)
375 pid
= get_pid(rcu_dereference(*task_pid_ptr(task
, type
)));
379 EXPORT_SYMBOL_GPL(get_task_pid
);
381 struct task_struct
*get_pid_task(struct pid
*pid
, enum pid_type type
)
383 struct task_struct
*result
;
385 result
= pid_task(pid
, type
);
387 get_task_struct(result
);
391 EXPORT_SYMBOL_GPL(get_pid_task
);
393 struct pid
*find_get_pid(pid_t nr
)
398 pid
= get_pid(find_vpid(nr
));
403 EXPORT_SYMBOL_GPL(find_get_pid
);
405 pid_t
pid_nr_ns(struct pid
*pid
, struct pid_namespace
*ns
)
410 if (pid
&& ns
->level
<= pid
->level
) {
411 upid
= &pid
->numbers
[ns
->level
];
417 EXPORT_SYMBOL_GPL(pid_nr_ns
);
419 pid_t
pid_vnr(struct pid
*pid
)
421 return pid_nr_ns(pid
, task_active_pid_ns(current
));
423 EXPORT_SYMBOL_GPL(pid_vnr
);
425 pid_t
__task_pid_nr_ns(struct task_struct
*task
, enum pid_type type
,
426 struct pid_namespace
*ns
)
432 ns
= task_active_pid_ns(current
);
433 if (likely(pid_alive(task
)))
434 nr
= pid_nr_ns(rcu_dereference(*task_pid_ptr(task
, type
)), ns
);
439 EXPORT_SYMBOL(__task_pid_nr_ns
);
441 struct pid_namespace
*task_active_pid_ns(struct task_struct
*tsk
)
443 return ns_of_pid(task_pid(tsk
));
445 EXPORT_SYMBOL_GPL(task_active_pid_ns
);
448 * Used by proc to find the first pid that is greater than or equal to nr.
450 * If there is a pid at nr this function is exactly the same as find_pid_ns.
452 struct pid
*find_ge_pid(int nr
, struct pid_namespace
*ns
)
454 return idr_get_next(&ns
->idr
, &nr
);
458 * pidfd_create() - Create a new pid file descriptor.
460 * @pid: struct pid that the pidfd will reference
462 * This creates a new pid file descriptor with the O_CLOEXEC flag set.
464 * Note, that this function can only be called after the fd table has
465 * been unshared to avoid leaking the pidfd to the new process.
467 * Return: On success, a cloexec pidfd is returned.
468 * On error, a negative errno number will be returned.
470 static int pidfd_create(struct pid
*pid
)
474 fd
= anon_inode_getfd("[pidfd]", &pidfd_fops
, get_pid(pid
),
483 * pidfd_open() - Open new pid file descriptor.
485 * @pid: pid for which to retrieve a pidfd
486 * @flags: flags to pass
488 * This creates a new pid file descriptor with the O_CLOEXEC flag set for
489 * the process identified by @pid. Currently, the process identified by
490 * @pid must be a thread-group leader. This restriction currently exists
491 * for all aspects of pidfds including pidfd creation (CLONE_PIDFD cannot
492 * be used with CLONE_THREAD) and pidfd polling (only supports thread group
495 * Return: On success, a cloexec pidfd is returned.
496 * On error, a negative errno number will be returned.
498 SYSCALL_DEFINE2(pidfd_open
, pid_t
, pid
, unsigned int, flags
)
509 p
= find_get_pid(pid
);
515 if (!pid_task(p
, PIDTYPE_TGID
))
519 fd
= ret
?: pidfd_create(p
);
524 void __init
pid_idr_init(void)
526 /* Verify no one has done anything silly: */
527 BUILD_BUG_ON(PID_MAX_LIMIT
>= PIDNS_ADDING
);
529 /* bump default and minimum pid_max based on number of cpus */
530 pid_max
= min(pid_max_max
, max_t(int, pid_max
,
531 PIDS_PER_CPU_DEFAULT
* num_possible_cpus()));
532 pid_max_min
= max_t(int, pid_max_min
,
533 PIDS_PER_CPU_MIN
* num_possible_cpus());
534 pr_info("pid_max: default: %u minimum: %u\n", pid_max
, pid_max_min
);
536 idr_init(&init_pid_ns
.idr
);
538 init_pid_ns
.pid_cachep
= KMEM_CACHE(pid
,
539 SLAB_HWCACHE_ALIGN
| SLAB_PANIC
| SLAB_ACCOUNT
);