5 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
6 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
7 * Many thanks to Oleg Nesterov for comments and help
11 #include <linux/pid.h>
12 #include <linux/pid_namespace.h>
13 #include <linux/user_namespace.h>
14 #include <linux/syscalls.h>
15 #include <linux/err.h>
16 #include <linux/acct.h>
17 #include <linux/slab.h>
18 #include <linux/proc_ns.h>
19 #include <linux/reboot.h>
20 #include <linux/export.h>
25 struct kmem_cache
*cachep
;
26 struct list_head list
;
29 static LIST_HEAD(pid_caches_lh
);
30 static DEFINE_MUTEX(pid_caches_mutex
);
31 static struct kmem_cache
*pid_ns_cachep
;
34 * creates the kmem cache to allocate pids from.
35 * @nr_ids: the number of numerical ids this pid will have to carry
38 static struct kmem_cache
*create_pid_cachep(int nr_ids
)
40 struct pid_cache
*pcache
;
41 struct kmem_cache
*cachep
;
43 mutex_lock(&pid_caches_mutex
);
44 list_for_each_entry(pcache
, &pid_caches_lh
, list
)
45 if (pcache
->nr_ids
== nr_ids
)
48 pcache
= kmalloc(sizeof(struct pid_cache
), GFP_KERNEL
);
52 snprintf(pcache
->name
, sizeof(pcache
->name
), "pid_%d", nr_ids
);
53 cachep
= kmem_cache_create(pcache
->name
,
54 sizeof(struct pid
) + (nr_ids
- 1) * sizeof(struct upid
),
55 0, SLAB_HWCACHE_ALIGN
, NULL
);
59 pcache
->nr_ids
= nr_ids
;
60 pcache
->cachep
= cachep
;
61 list_add(&pcache
->list
, &pid_caches_lh
);
63 mutex_unlock(&pid_caches_mutex
);
64 return pcache
->cachep
;
69 mutex_unlock(&pid_caches_mutex
);
73 static void proc_cleanup_work(struct work_struct
*work
)
75 struct pid_namespace
*ns
= container_of(work
, struct pid_namespace
, proc_work
);
76 pid_ns_release_proc(ns
);
79 /* MAX_PID_NS_LEVEL is needed for limiting size of 'struct pid' */
80 #define MAX_PID_NS_LEVEL 32
82 static struct pid_namespace
*create_pid_namespace(struct user_namespace
*user_ns
,
83 struct pid_namespace
*parent_pid_ns
)
85 struct pid_namespace
*ns
;
86 unsigned int level
= parent_pid_ns
->level
+ 1;
90 if (level
> MAX_PID_NS_LEVEL
) {
96 ns
= kmem_cache_zalloc(pid_ns_cachep
, GFP_KERNEL
);
100 ns
->pidmap
[0].page
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
101 if (!ns
->pidmap
[0].page
)
104 ns
->pid_cachep
= create_pid_cachep(level
+ 1);
105 if (ns
->pid_cachep
== NULL
)
108 err
= ns_alloc_inum(&ns
->ns
);
111 ns
->ns
.ops
= &pidns_operations
;
113 kref_init(&ns
->kref
);
115 ns
->parent
= get_pid_ns(parent_pid_ns
);
116 ns
->user_ns
= get_user_ns(user_ns
);
117 ns
->nr_hashed
= PIDNS_HASH_ADDING
;
118 INIT_WORK(&ns
->proc_work
, proc_cleanup_work
);
120 set_bit(0, ns
->pidmap
[0].page
);
121 atomic_set(&ns
->pidmap
[0].nr_free
, BITS_PER_PAGE
- 1);
123 for (i
= 1; i
< PIDMAP_ENTRIES
; i
++)
124 atomic_set(&ns
->pidmap
[i
].nr_free
, BITS_PER_PAGE
);
129 kfree(ns
->pidmap
[0].page
);
131 kmem_cache_free(pid_ns_cachep
, ns
);
136 static void delayed_free_pidns(struct rcu_head
*p
)
138 kmem_cache_free(pid_ns_cachep
,
139 container_of(p
, struct pid_namespace
, rcu
));
142 static void destroy_pid_namespace(struct pid_namespace
*ns
)
146 ns_free_inum(&ns
->ns
);
147 for (i
= 0; i
< PIDMAP_ENTRIES
; i
++)
148 kfree(ns
->pidmap
[i
].page
);
149 put_user_ns(ns
->user_ns
);
150 call_rcu(&ns
->rcu
, delayed_free_pidns
);
153 struct pid_namespace
*copy_pid_ns(unsigned long flags
,
154 struct user_namespace
*user_ns
, struct pid_namespace
*old_ns
)
156 if (!(flags
& CLONE_NEWPID
))
157 return get_pid_ns(old_ns
);
158 if (task_active_pid_ns(current
) != old_ns
)
159 return ERR_PTR(-EINVAL
);
160 return create_pid_namespace(user_ns
, old_ns
);
163 static void free_pid_ns(struct kref
*kref
)
165 struct pid_namespace
*ns
;
167 ns
= container_of(kref
, struct pid_namespace
, kref
);
168 destroy_pid_namespace(ns
);
171 void put_pid_ns(struct pid_namespace
*ns
)
173 struct pid_namespace
*parent
;
175 while (ns
!= &init_pid_ns
) {
177 if (!kref_put(&ns
->kref
, free_pid_ns
))
182 EXPORT_SYMBOL_GPL(put_pid_ns
);
184 void zap_pid_ns_processes(struct pid_namespace
*pid_ns
)
188 struct task_struct
*task
, *me
= current
;
189 int init_pids
= thread_group_leader(me
) ? 1 : 2;
191 /* Don't allow any more processes into the pid namespace */
192 disable_pid_allocation(pid_ns
);
195 * Ignore SIGCHLD causing any terminated children to autoreap.
196 * This speeds up the namespace shutdown, plus see the comment
199 spin_lock_irq(&me
->sighand
->siglock
);
200 me
->sighand
->action
[SIGCHLD
- 1].sa
.sa_handler
= SIG_IGN
;
201 spin_unlock_irq(&me
->sighand
->siglock
);
204 * The last thread in the cgroup-init thread group is terminating.
205 * Find remaining pid_ts in the namespace, signal and wait for them
208 * Note: This signals each threads in the namespace - even those that
209 * belong to the same thread group, To avoid this, we would have
210 * to walk the entire tasklist looking a processes in this
211 * namespace, but that could be unnecessarily expensive if the
212 * pid namespace has just a few processes. Or we need to
213 * maintain a tasklist for each pid namespace.
216 read_lock(&tasklist_lock
);
217 nr
= next_pidmap(pid_ns
, 1);
221 task
= pid_task(find_vpid(nr
), PIDTYPE_PID
);
222 if (task
&& !__fatal_signal_pending(task
))
223 send_sig_info(SIGKILL
, SEND_SIG_FORCED
, task
);
227 nr
= next_pidmap(pid_ns
, nr
);
229 read_unlock(&tasklist_lock
);
232 * Reap the EXIT_ZOMBIE children we had before we ignored SIGCHLD.
233 * sys_wait4() will also block until our children traced from the
234 * parent namespace are detached and become EXIT_DEAD.
237 clear_thread_flag(TIF_SIGPENDING
);
238 rc
= sys_wait4(-1, NULL
, __WALL
, NULL
);
239 } while (rc
!= -ECHILD
);
242 * sys_wait4() above can't reap the EXIT_DEAD children but we do not
243 * really care, we could reparent them to the global init. We could
244 * exit and reap ->child_reaper even if it is not the last thread in
245 * this pid_ns, free_pid(nr_hashed == 0) calls proc_cleanup_work(),
246 * pid_ns can not go away until proc_kill_sb() drops the reference.
248 * But this ns can also have other tasks injected by setns()+fork().
249 * Again, ignoring the user visible semantics we do not really need
250 * to wait until they are all reaped, but they can be reparented to
251 * us and thus we need to ensure that pid->child_reaper stays valid
252 * until they all go away. See free_pid()->wake_up_process().
254 * We rely on ignored SIGCHLD, an injected zombie must be autoreaped
258 set_current_state(TASK_INTERRUPTIBLE
);
259 if (pid_ns
->nr_hashed
== init_pids
)
263 __set_current_state(TASK_RUNNING
);
266 current
->signal
->group_exit_code
= pid_ns
->reboot
;
268 acct_exit_ns(pid_ns
);
272 #ifdef CONFIG_CHECKPOINT_RESTORE
273 static int pid_ns_ctl_handler(struct ctl_table
*table
, int write
,
274 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
276 struct pid_namespace
*pid_ns
= task_active_pid_ns(current
);
277 struct ctl_table tmp
= *table
;
279 if (write
&& !ns_capable(pid_ns
->user_ns
, CAP_SYS_ADMIN
))
283 * Writing directly to ns' last_pid field is OK, since this field
284 * is volatile in a living namespace anyway and a code writing to
285 * it should synchronize its usage with external means.
288 tmp
.data
= &pid_ns
->last_pid
;
289 return proc_dointvec_minmax(&tmp
, write
, buffer
, lenp
, ppos
);
294 static struct ctl_table pid_ns_ctl_table
[] = {
296 .procname
= "ns_last_pid",
297 .maxlen
= sizeof(int),
298 .mode
= 0666, /* permissions are checked in the handler */
299 .proc_handler
= pid_ns_ctl_handler
,
305 static struct ctl_path kern_path
[] = { { .procname
= "kernel", }, { } };
306 #endif /* CONFIG_CHECKPOINT_RESTORE */
308 int reboot_pid_ns(struct pid_namespace
*pid_ns
, int cmd
)
310 if (pid_ns
== &init_pid_ns
)
314 case LINUX_REBOOT_CMD_RESTART2
:
315 case LINUX_REBOOT_CMD_RESTART
:
316 pid_ns
->reboot
= SIGHUP
;
319 case LINUX_REBOOT_CMD_POWER_OFF
:
320 case LINUX_REBOOT_CMD_HALT
:
321 pid_ns
->reboot
= SIGINT
;
327 read_lock(&tasklist_lock
);
328 force_sig(SIGKILL
, pid_ns
->child_reaper
);
329 read_unlock(&tasklist_lock
);
337 static inline struct pid_namespace
*to_pid_ns(struct ns_common
*ns
)
339 return container_of(ns
, struct pid_namespace
, ns
);
342 static struct ns_common
*pidns_get(struct task_struct
*task
)
344 struct pid_namespace
*ns
;
347 ns
= task_active_pid_ns(task
);
352 return ns
? &ns
->ns
: NULL
;
355 static void pidns_put(struct ns_common
*ns
)
357 put_pid_ns(to_pid_ns(ns
));
360 static int pidns_install(struct nsproxy
*nsproxy
, struct ns_common
*ns
)
362 struct pid_namespace
*active
= task_active_pid_ns(current
);
363 struct pid_namespace
*ancestor
, *new = to_pid_ns(ns
);
365 if (!ns_capable(new->user_ns
, CAP_SYS_ADMIN
) ||
366 !ns_capable(current_user_ns(), CAP_SYS_ADMIN
))
370 * Only allow entering the current active pid namespace
371 * or a child of the current active pid namespace.
373 * This is required for fork to return a usable pid value and
374 * this maintains the property that processes and their
375 * children can not escape their current pid namespace.
377 if (new->level
< active
->level
)
381 while (ancestor
->level
> active
->level
)
382 ancestor
= ancestor
->parent
;
383 if (ancestor
!= active
)
386 put_pid_ns(nsproxy
->pid_ns_for_children
);
387 nsproxy
->pid_ns_for_children
= get_pid_ns(new);
391 const struct proc_ns_operations pidns_operations
= {
393 .type
= CLONE_NEWPID
,
396 .install
= pidns_install
,
399 static __init
int pid_namespaces_init(void)
401 pid_ns_cachep
= KMEM_CACHE(pid_namespace
, SLAB_PANIC
);
403 #ifdef CONFIG_CHECKPOINT_RESTORE
404 register_sysctl_paths(kern_path
, pid_ns_ctl_table
);
409 __initcall(pid_namespaces_init
);