5 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
6 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
7 * Many thanks to Oleg Nesterov for comments and help
11 #include <linux/pid.h>
12 #include <linux/pid_namespace.h>
13 #include <linux/syscalls.h>
14 #include <linux/err.h>
15 #include <linux/acct.h>
16 #include <linux/slab.h>
17 #include <linux/proc_fs.h>
18 #include <linux/reboot.h>
20 #define BITS_PER_PAGE (PAGE_SIZE*8)
25 struct kmem_cache
*cachep
;
26 struct list_head list
;
29 static LIST_HEAD(pid_caches_lh
);
30 static DEFINE_MUTEX(pid_caches_mutex
);
31 static struct kmem_cache
*pid_ns_cachep
;
34 * creates the kmem cache to allocate pids from.
35 * @nr_ids: the number of numerical ids this pid will have to carry
38 static struct kmem_cache
*create_pid_cachep(int nr_ids
)
40 struct pid_cache
*pcache
;
41 struct kmem_cache
*cachep
;
43 mutex_lock(&pid_caches_mutex
);
44 list_for_each_entry(pcache
, &pid_caches_lh
, list
)
45 if (pcache
->nr_ids
== nr_ids
)
48 pcache
= kmalloc(sizeof(struct pid_cache
), GFP_KERNEL
);
52 snprintf(pcache
->name
, sizeof(pcache
->name
), "pid_%d", nr_ids
);
53 cachep
= kmem_cache_create(pcache
->name
,
54 sizeof(struct pid
) + (nr_ids
- 1) * sizeof(struct upid
),
55 0, SLAB_HWCACHE_ALIGN
, NULL
);
59 pcache
->nr_ids
= nr_ids
;
60 pcache
->cachep
= cachep
;
61 list_add(&pcache
->list
, &pid_caches_lh
);
63 mutex_unlock(&pid_caches_mutex
);
64 return pcache
->cachep
;
69 mutex_unlock(&pid_caches_mutex
);
73 static struct pid_namespace
*create_pid_namespace(struct pid_namespace
*parent_pid_ns
)
75 struct pid_namespace
*ns
;
76 unsigned int level
= parent_pid_ns
->level
+ 1;
79 ns
= kmem_cache_zalloc(pid_ns_cachep
, GFP_KERNEL
);
83 ns
->pidmap
[0].page
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
84 if (!ns
->pidmap
[0].page
)
87 ns
->pid_cachep
= create_pid_cachep(level
+ 1);
88 if (ns
->pid_cachep
== NULL
)
93 ns
->parent
= get_pid_ns(parent_pid_ns
);
95 set_bit(0, ns
->pidmap
[0].page
);
96 atomic_set(&ns
->pidmap
[0].nr_free
, BITS_PER_PAGE
- 1);
98 for (i
= 1; i
< PIDMAP_ENTRIES
; i
++)
99 atomic_set(&ns
->pidmap
[i
].nr_free
, BITS_PER_PAGE
);
101 err
= pid_ns_prepare_proc(ns
);
103 goto out_put_parent_pid_ns
;
107 out_put_parent_pid_ns
:
108 put_pid_ns(parent_pid_ns
);
110 kfree(ns
->pidmap
[0].page
);
112 kmem_cache_free(pid_ns_cachep
, ns
);
117 static void destroy_pid_namespace(struct pid_namespace
*ns
)
121 for (i
= 0; i
< PIDMAP_ENTRIES
; i
++)
122 kfree(ns
->pidmap
[i
].page
);
123 kmem_cache_free(pid_ns_cachep
, ns
);
126 struct pid_namespace
*copy_pid_ns(unsigned long flags
, struct pid_namespace
*old_ns
)
128 if (!(flags
& CLONE_NEWPID
))
129 return get_pid_ns(old_ns
);
130 if (flags
& (CLONE_THREAD
|CLONE_PARENT
))
131 return ERR_PTR(-EINVAL
);
132 return create_pid_namespace(old_ns
);
135 void free_pid_ns(struct kref
*kref
)
137 struct pid_namespace
*ns
, *parent
;
139 ns
= container_of(kref
, struct pid_namespace
, kref
);
142 destroy_pid_namespace(ns
);
148 void zap_pid_ns_processes(struct pid_namespace
*pid_ns
)
152 struct task_struct
*task
;
155 * The last thread in the cgroup-init thread group is terminating.
156 * Find remaining pid_ts in the namespace, signal and wait for them
159 * Note: This signals each threads in the namespace - even those that
160 * belong to the same thread group, To avoid this, we would have
161 * to walk the entire tasklist looking a processes in this
162 * namespace, but that could be unnecessarily expensive if the
163 * pid namespace has just a few processes. Or we need to
164 * maintain a tasklist for each pid namespace.
167 read_lock(&tasklist_lock
);
168 nr
= next_pidmap(pid_ns
, 1);
172 task
= pid_task(find_vpid(nr
), PIDTYPE_PID
);
173 if (task
&& !__fatal_signal_pending(task
))
174 send_sig_info(SIGKILL
, SEND_SIG_FORCED
, task
);
178 nr
= next_pidmap(pid_ns
, nr
);
180 read_unlock(&tasklist_lock
);
183 clear_thread_flag(TIF_SIGPENDING
);
184 rc
= sys_wait4(-1, NULL
, __WALL
, NULL
);
185 } while (rc
!= -ECHILD
);
188 current
->signal
->group_exit_code
= pid_ns
->reboot
;
190 acct_exit_ns(pid_ns
);
194 static int pid_ns_ctl_handler(struct ctl_table
*table
, int write
,
195 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
197 struct ctl_table tmp
= *table
;
199 if (write
&& !capable(CAP_SYS_ADMIN
))
203 * Writing directly to ns' last_pid field is OK, since this field
204 * is volatile in a living namespace anyway and a code writing to
205 * it should synchronize its usage with external means.
208 tmp
.data
= ¤t
->nsproxy
->pid_ns
->last_pid
;
209 return proc_dointvec(&tmp
, write
, buffer
, lenp
, ppos
);
212 static struct ctl_table pid_ns_ctl_table
[] = {
214 .procname
= "ns_last_pid",
215 .maxlen
= sizeof(int),
216 .mode
= 0666, /* permissions are checked in the handler */
217 .proc_handler
= pid_ns_ctl_handler
,
222 static struct ctl_path kern_path
[] = { { .procname
= "kernel", }, { } };
224 int reboot_pid_ns(struct pid_namespace
*pid_ns
, int cmd
)
226 if (pid_ns
== &init_pid_ns
)
230 case LINUX_REBOOT_CMD_RESTART2
:
231 case LINUX_REBOOT_CMD_RESTART
:
232 pid_ns
->reboot
= SIGHUP
;
235 case LINUX_REBOOT_CMD_POWER_OFF
:
236 case LINUX_REBOOT_CMD_HALT
:
237 pid_ns
->reboot
= SIGINT
;
243 read_lock(&tasklist_lock
);
244 force_sig(SIGKILL
, pid_ns
->child_reaper
);
245 read_unlock(&tasklist_lock
);
253 static __init
int pid_namespaces_init(void)
255 pid_ns_cachep
= KMEM_CACHE(pid_namespace
, SLAB_PANIC
);
256 register_sysctl_paths(kern_path
, pid_ns_ctl_table
);
260 __initcall(pid_namespaces_init
);