1 // SPDX-License-Identifier: GPL-2.0-only
2 #include "cgroup-internal.h"
4 #include <linux/ctype.h>
5 #include <linux/kmod.h>
6 #include <linux/sort.h>
7 #include <linux/delay.h>
9 #include <linux/sched/signal.h>
10 #include <linux/sched/task.h>
11 #include <linux/magic.h>
12 #include <linux/slab.h>
13 #include <linux/vmalloc.h>
14 #include <linux/delayacct.h>
15 #include <linux/pid_namespace.h>
16 #include <linux/cgroupstats.h>
17 #include <linux/fs_parser.h>
19 #include <trace/events/cgroup.h>
22 * pidlists linger the following amount before being destroyed. The goal
23 * is avoiding frequent destruction in the middle of consecutive read calls
24 * Expiring in the middle is a performance problem not a correctness one.
25 * 1 sec should be enough.
27 #define CGROUP_PIDLIST_DESTROY_DELAY HZ
29 /* Controllers blocked by the commandline in v1 */
30 static u16 cgroup_no_v1_mask
;
32 /* disable named v1 mounts */
33 static bool cgroup_no_v1_named
;
36 * pidlist destructions need to be flushed on cgroup destruction. Use a
37 * separate workqueue as flush domain.
39 static struct workqueue_struct
*cgroup_pidlist_destroy_wq
;
41 /* protects cgroup_subsys->release_agent_path */
42 static DEFINE_SPINLOCK(release_agent_path_lock
);
44 bool cgroup1_ssid_disabled(int ssid
)
46 return cgroup_no_v1_mask
& (1 << ssid
);
50 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
51 * @from: attach to all cgroups of a given task
52 * @tsk: the task to be attached
54 int cgroup_attach_task_all(struct task_struct
*from
, struct task_struct
*tsk
)
56 struct cgroup_root
*root
;
59 mutex_lock(&cgroup_mutex
);
60 percpu_down_write(&cgroup_threadgroup_rwsem
);
62 struct cgroup
*from_cgrp
;
64 if (root
== &cgrp_dfl_root
)
67 spin_lock_irq(&css_set_lock
);
68 from_cgrp
= task_cgroup_from_root(from
, root
);
69 spin_unlock_irq(&css_set_lock
);
71 retval
= cgroup_attach_task(from_cgrp
, tsk
, false);
75 percpu_up_write(&cgroup_threadgroup_rwsem
);
76 mutex_unlock(&cgroup_mutex
);
80 EXPORT_SYMBOL_GPL(cgroup_attach_task_all
);
83 * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
84 * @to: cgroup to which the tasks will be moved
85 * @from: cgroup in which the tasks currently reside
87 * Locking rules between cgroup_post_fork() and the migration path
88 * guarantee that, if a task is forking while being migrated, the new child
89 * is guaranteed to be either visible in the source cgroup after the
90 * parent's migration is complete or put into the target cgroup. No task
91 * can slip out of migration through forking.
93 int cgroup_transfer_tasks(struct cgroup
*to
, struct cgroup
*from
)
95 DEFINE_CGROUP_MGCTX(mgctx
);
96 struct cgrp_cset_link
*link
;
97 struct css_task_iter it
;
98 struct task_struct
*task
;
101 if (cgroup_on_dfl(to
))
104 ret
= cgroup_migrate_vet_dst(to
);
108 mutex_lock(&cgroup_mutex
);
110 percpu_down_write(&cgroup_threadgroup_rwsem
);
112 /* all tasks in @from are being moved, all csets are source */
113 spin_lock_irq(&css_set_lock
);
114 list_for_each_entry(link
, &from
->cset_links
, cset_link
)
115 cgroup_migrate_add_src(link
->cset
, to
, &mgctx
);
116 spin_unlock_irq(&css_set_lock
);
118 ret
= cgroup_migrate_prepare_dst(&mgctx
);
123 * Migrate tasks one-by-one until @from is empty. This fails iff
124 * ->can_attach() fails.
127 css_task_iter_start(&from
->self
, 0, &it
);
130 task
= css_task_iter_next(&it
);
131 } while (task
&& (task
->flags
& PF_EXITING
));
134 get_task_struct(task
);
135 css_task_iter_end(&it
);
138 ret
= cgroup_migrate(task
, false, &mgctx
);
140 TRACE_CGROUP_PATH(transfer_tasks
, to
, task
, false);
141 put_task_struct(task
);
143 } while (task
&& !ret
);
145 cgroup_migrate_finish(&mgctx
);
146 percpu_up_write(&cgroup_threadgroup_rwsem
);
147 mutex_unlock(&cgroup_mutex
);
152 * Stuff for reading the 'tasks'/'procs' files.
154 * Reading this file can return large amounts of data if a cgroup has
155 * *lots* of attached tasks. So it may need several calls to read(),
156 * but we cannot guarantee that the information we produce is correct
157 * unless we produce it entirely atomically.
161 /* which pidlist file are we talking about? */
162 enum cgroup_filetype
{
168 * A pidlist is a list of pids that virtually represents the contents of one
169 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
170 * a pair (one each for procs, tasks) for each pid namespace that's relevant
173 struct cgroup_pidlist
{
175 * used to find which pidlist is wanted. doesn't change as long as
176 * this particular list stays in the list.
178 struct { enum cgroup_filetype type
; struct pid_namespace
*ns
; } key
;
181 /* how many elements the above list has */
183 /* each of these stored in a list by its cgroup */
184 struct list_head links
;
185 /* pointer to the cgroup we belong to, for list removal purposes */
186 struct cgroup
*owner
;
187 /* for delayed destruction */
188 struct delayed_work destroy_dwork
;
192 * Used to destroy all pidlists lingering waiting for destroy timer. None
193 * should be left afterwards.
195 void cgroup1_pidlist_destroy_all(struct cgroup
*cgrp
)
197 struct cgroup_pidlist
*l
, *tmp_l
;
199 mutex_lock(&cgrp
->pidlist_mutex
);
200 list_for_each_entry_safe(l
, tmp_l
, &cgrp
->pidlists
, links
)
201 mod_delayed_work(cgroup_pidlist_destroy_wq
, &l
->destroy_dwork
, 0);
202 mutex_unlock(&cgrp
->pidlist_mutex
);
204 flush_workqueue(cgroup_pidlist_destroy_wq
);
205 BUG_ON(!list_empty(&cgrp
->pidlists
));
208 static void cgroup_pidlist_destroy_work_fn(struct work_struct
*work
)
210 struct delayed_work
*dwork
= to_delayed_work(work
);
211 struct cgroup_pidlist
*l
= container_of(dwork
, struct cgroup_pidlist
,
213 struct cgroup_pidlist
*tofree
= NULL
;
215 mutex_lock(&l
->owner
->pidlist_mutex
);
218 * Destroy iff we didn't get queued again. The state won't change
219 * as destroy_dwork can only be queued while locked.
221 if (!delayed_work_pending(dwork
)) {
224 put_pid_ns(l
->key
.ns
);
228 mutex_unlock(&l
->owner
->pidlist_mutex
);
233 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
234 * Returns the number of unique elements.
236 static int pidlist_uniq(pid_t
*list
, int length
)
241 * we presume the 0th element is unique, so i starts at 1. trivial
242 * edge cases first; no work needs to be done for either
244 if (length
== 0 || length
== 1)
246 /* src and dest walk down the list; dest counts unique elements */
247 for (src
= 1; src
< length
; src
++) {
248 /* find next unique element */
249 while (list
[src
] == list
[src
-1]) {
254 /* dest always points to where the next unique element goes */
255 list
[dest
] = list
[src
];
263 * The two pid files - task and cgroup.procs - guaranteed that the result
264 * is sorted, which forced this whole pidlist fiasco. As pid order is
265 * different per namespace, each namespace needs differently sorted list,
266 * making it impossible to use, for example, single rbtree of member tasks
267 * sorted by task pointer. As pidlists can be fairly large, allocating one
268 * per open file is dangerous, so cgroup had to implement shared pool of
269 * pidlists keyed by cgroup and namespace.
271 static int cmppid(const void *a
, const void *b
)
273 return *(pid_t
*)a
- *(pid_t
*)b
;
276 static struct cgroup_pidlist
*cgroup_pidlist_find(struct cgroup
*cgrp
,
277 enum cgroup_filetype type
)
279 struct cgroup_pidlist
*l
;
280 /* don't need task_nsproxy() if we're looking at ourself */
281 struct pid_namespace
*ns
= task_active_pid_ns(current
);
283 lockdep_assert_held(&cgrp
->pidlist_mutex
);
285 list_for_each_entry(l
, &cgrp
->pidlists
, links
)
286 if (l
->key
.type
== type
&& l
->key
.ns
== ns
)
292 * find the appropriate pidlist for our purpose (given procs vs tasks)
293 * returns with the lock on that pidlist already held, and takes care
294 * of the use count, or returns NULL with no locks held if we're out of
297 static struct cgroup_pidlist
*cgroup_pidlist_find_create(struct cgroup
*cgrp
,
298 enum cgroup_filetype type
)
300 struct cgroup_pidlist
*l
;
302 lockdep_assert_held(&cgrp
->pidlist_mutex
);
304 l
= cgroup_pidlist_find(cgrp
, type
);
308 /* entry not found; create a new one */
309 l
= kzalloc(sizeof(struct cgroup_pidlist
), GFP_KERNEL
);
313 INIT_DELAYED_WORK(&l
->destroy_dwork
, cgroup_pidlist_destroy_work_fn
);
315 /* don't need task_nsproxy() if we're looking at ourself */
316 l
->key
.ns
= get_pid_ns(task_active_pid_ns(current
));
318 list_add(&l
->links
, &cgrp
->pidlists
);
323 * Load a cgroup's pidarray with either procs' tgids or tasks' pids
325 static int pidlist_array_load(struct cgroup
*cgrp
, enum cgroup_filetype type
,
326 struct cgroup_pidlist
**lp
)
330 int pid
, n
= 0; /* used for populating the array */
331 struct css_task_iter it
;
332 struct task_struct
*tsk
;
333 struct cgroup_pidlist
*l
;
335 lockdep_assert_held(&cgrp
->pidlist_mutex
);
338 * If cgroup gets more users after we read count, we won't have
339 * enough space - tough. This race is indistinguishable to the
340 * caller from the case that the additional cgroup users didn't
341 * show up until sometime later on.
343 length
= cgroup_task_count(cgrp
);
344 array
= kvmalloc_array(length
, sizeof(pid_t
), GFP_KERNEL
);
347 /* now, populate the array */
348 css_task_iter_start(&cgrp
->self
, 0, &it
);
349 while ((tsk
= css_task_iter_next(&it
))) {
350 if (unlikely(n
== length
))
352 /* get tgid or pid for procs or tasks file respectively */
353 if (type
== CGROUP_FILE_PROCS
)
354 pid
= task_tgid_vnr(tsk
);
356 pid
= task_pid_vnr(tsk
);
357 if (pid
> 0) /* make sure to only use valid results */
360 css_task_iter_end(&it
);
362 /* now sort & (if procs) strip out duplicates */
363 sort(array
, length
, sizeof(pid_t
), cmppid
, NULL
);
364 if (type
== CGROUP_FILE_PROCS
)
365 length
= pidlist_uniq(array
, length
);
367 l
= cgroup_pidlist_find_create(cgrp
, type
);
373 /* store array, freeing old if necessary */
382 * seq_file methods for the tasks/procs files. The seq_file position is the
383 * next pid to display; the seq_file iterator is a pointer to the pid
384 * in the cgroup->l->list array.
387 static void *cgroup_pidlist_start(struct seq_file
*s
, loff_t
*pos
)
390 * Initially we receive a position value that corresponds to
391 * one more than the last pid shown (or 0 on the first call or
392 * after a seek to the start). Use a binary-search to find the
393 * next pid to display, if any
395 struct kernfs_open_file
*of
= s
->private;
396 struct cgroup
*cgrp
= seq_css(s
)->cgroup
;
397 struct cgroup_pidlist
*l
;
398 enum cgroup_filetype type
= seq_cft(s
)->private;
399 int index
= 0, pid
= *pos
;
402 mutex_lock(&cgrp
->pidlist_mutex
);
405 * !NULL @of->priv indicates that this isn't the first start()
406 * after open. If the matching pidlist is around, we can use that.
407 * Look for it. Note that @of->priv can't be used directly. It
408 * could already have been destroyed.
411 of
->priv
= cgroup_pidlist_find(cgrp
, type
);
414 * Either this is the first start() after open or the matching
415 * pidlist has been destroyed inbetween. Create a new one.
418 ret
= pidlist_array_load(cgrp
, type
,
419 (struct cgroup_pidlist
**)&of
->priv
);
428 while (index
< end
) {
429 int mid
= (index
+ end
) / 2;
430 if (l
->list
[mid
] == pid
) {
433 } else if (l
->list
[mid
] <= pid
)
439 /* If we're off the end of the array, we're done */
440 if (index
>= l
->length
)
442 /* Update the abstract position to be the actual pid that we found */
443 iter
= l
->list
+ index
;
448 static void cgroup_pidlist_stop(struct seq_file
*s
, void *v
)
450 struct kernfs_open_file
*of
= s
->private;
451 struct cgroup_pidlist
*l
= of
->priv
;
454 mod_delayed_work(cgroup_pidlist_destroy_wq
, &l
->destroy_dwork
,
455 CGROUP_PIDLIST_DESTROY_DELAY
);
456 mutex_unlock(&seq_css(s
)->cgroup
->pidlist_mutex
);
459 static void *cgroup_pidlist_next(struct seq_file
*s
, void *v
, loff_t
*pos
)
461 struct kernfs_open_file
*of
= s
->private;
462 struct cgroup_pidlist
*l
= of
->priv
;
464 pid_t
*end
= l
->list
+ l
->length
;
466 * Advance to the next pid in the array. If this goes off the
479 static int cgroup_pidlist_show(struct seq_file
*s
, void *v
)
481 seq_printf(s
, "%d\n", *(int *)v
);
486 static ssize_t
__cgroup1_procs_write(struct kernfs_open_file
*of
,
487 char *buf
, size_t nbytes
, loff_t off
,
491 struct task_struct
*task
;
492 const struct cred
*cred
, *tcred
;
496 cgrp
= cgroup_kn_lock_live(of
->kn
, false);
500 task
= cgroup_procs_write_start(buf
, threadgroup
, &locked
);
501 ret
= PTR_ERR_OR_ZERO(task
);
506 * Even if we're attaching all tasks in the thread group, we only
507 * need to check permissions on one of them.
509 cred
= current_cred();
510 tcred
= get_task_cred(task
);
511 if (!uid_eq(cred
->euid
, GLOBAL_ROOT_UID
) &&
512 !uid_eq(cred
->euid
, tcred
->uid
) &&
513 !uid_eq(cred
->euid
, tcred
->suid
))
519 ret
= cgroup_attach_task(cgrp
, task
, threadgroup
);
522 cgroup_procs_write_finish(task
, locked
);
524 cgroup_kn_unlock(of
->kn
);
526 return ret
?: nbytes
;
529 static ssize_t
cgroup1_procs_write(struct kernfs_open_file
*of
,
530 char *buf
, size_t nbytes
, loff_t off
)
532 return __cgroup1_procs_write(of
, buf
, nbytes
, off
, true);
535 static ssize_t
cgroup1_tasks_write(struct kernfs_open_file
*of
,
536 char *buf
, size_t nbytes
, loff_t off
)
538 return __cgroup1_procs_write(of
, buf
, nbytes
, off
, false);
541 static ssize_t
cgroup_release_agent_write(struct kernfs_open_file
*of
,
542 char *buf
, size_t nbytes
, loff_t off
)
546 BUILD_BUG_ON(sizeof(cgrp
->root
->release_agent_path
) < PATH_MAX
);
548 cgrp
= cgroup_kn_lock_live(of
->kn
, false);
551 spin_lock(&release_agent_path_lock
);
552 strlcpy(cgrp
->root
->release_agent_path
, strstrip(buf
),
553 sizeof(cgrp
->root
->release_agent_path
));
554 spin_unlock(&release_agent_path_lock
);
555 cgroup_kn_unlock(of
->kn
);
559 static int cgroup_release_agent_show(struct seq_file
*seq
, void *v
)
561 struct cgroup
*cgrp
= seq_css(seq
)->cgroup
;
563 spin_lock(&release_agent_path_lock
);
564 seq_puts(seq
, cgrp
->root
->release_agent_path
);
565 spin_unlock(&release_agent_path_lock
);
570 static int cgroup_sane_behavior_show(struct seq_file
*seq
, void *v
)
572 seq_puts(seq
, "0\n");
576 static u64
cgroup_read_notify_on_release(struct cgroup_subsys_state
*css
,
579 return notify_on_release(css
->cgroup
);
582 static int cgroup_write_notify_on_release(struct cgroup_subsys_state
*css
,
583 struct cftype
*cft
, u64 val
)
586 set_bit(CGRP_NOTIFY_ON_RELEASE
, &css
->cgroup
->flags
);
588 clear_bit(CGRP_NOTIFY_ON_RELEASE
, &css
->cgroup
->flags
);
592 static u64
cgroup_clone_children_read(struct cgroup_subsys_state
*css
,
595 return test_bit(CGRP_CPUSET_CLONE_CHILDREN
, &css
->cgroup
->flags
);
598 static int cgroup_clone_children_write(struct cgroup_subsys_state
*css
,
599 struct cftype
*cft
, u64 val
)
602 set_bit(CGRP_CPUSET_CLONE_CHILDREN
, &css
->cgroup
->flags
);
604 clear_bit(CGRP_CPUSET_CLONE_CHILDREN
, &css
->cgroup
->flags
);
608 /* cgroup core interface files for the legacy hierarchies */
609 struct cftype cgroup1_base_files
[] = {
611 .name
= "cgroup.procs",
612 .seq_start
= cgroup_pidlist_start
,
613 .seq_next
= cgroup_pidlist_next
,
614 .seq_stop
= cgroup_pidlist_stop
,
615 .seq_show
= cgroup_pidlist_show
,
616 .private = CGROUP_FILE_PROCS
,
617 .write
= cgroup1_procs_write
,
620 .name
= "cgroup.clone_children",
621 .read_u64
= cgroup_clone_children_read
,
622 .write_u64
= cgroup_clone_children_write
,
625 .name
= "cgroup.sane_behavior",
626 .flags
= CFTYPE_ONLY_ON_ROOT
,
627 .seq_show
= cgroup_sane_behavior_show
,
631 .seq_start
= cgroup_pidlist_start
,
632 .seq_next
= cgroup_pidlist_next
,
633 .seq_stop
= cgroup_pidlist_stop
,
634 .seq_show
= cgroup_pidlist_show
,
635 .private = CGROUP_FILE_TASKS
,
636 .write
= cgroup1_tasks_write
,
639 .name
= "notify_on_release",
640 .read_u64
= cgroup_read_notify_on_release
,
641 .write_u64
= cgroup_write_notify_on_release
,
644 .name
= "release_agent",
645 .flags
= CFTYPE_ONLY_ON_ROOT
,
646 .seq_show
= cgroup_release_agent_show
,
647 .write
= cgroup_release_agent_write
,
648 .max_write_len
= PATH_MAX
- 1,
653 /* Display information about each subsystem and each hierarchy */
654 int proc_cgroupstats_show(struct seq_file
*m
, void *v
)
656 struct cgroup_subsys
*ss
;
659 seq_puts(m
, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
661 * ideally we don't want subsystems moving around while we do this.
662 * cgroup_mutex is also necessary to guarantee an atomic snapshot of
663 * subsys/hierarchy state.
665 mutex_lock(&cgroup_mutex
);
667 for_each_subsys(ss
, i
)
668 seq_printf(m
, "%s\t%d\t%d\t%d\n",
669 ss
->legacy_name
, ss
->root
->hierarchy_id
,
670 atomic_read(&ss
->root
->nr_cgrps
),
671 cgroup_ssid_enabled(i
));
673 mutex_unlock(&cgroup_mutex
);
678 * cgroupstats_build - build and fill cgroupstats
679 * @stats: cgroupstats to fill information into
680 * @dentry: A dentry entry belonging to the cgroup for which stats have
683 * Build and fill cgroupstats so that taskstats can export it to user
686 int cgroupstats_build(struct cgroupstats
*stats
, struct dentry
*dentry
)
688 struct kernfs_node
*kn
= kernfs_node_from_dentry(dentry
);
690 struct css_task_iter it
;
691 struct task_struct
*tsk
;
693 /* it should be kernfs_node belonging to cgroupfs and is a directory */
694 if (dentry
->d_sb
->s_type
!= &cgroup_fs_type
|| !kn
||
695 kernfs_type(kn
) != KERNFS_DIR
)
698 mutex_lock(&cgroup_mutex
);
701 * We aren't being called from kernfs and there's no guarantee on
702 * @kn->priv's validity. For this and css_tryget_online_from_dir(),
703 * @kn->priv is RCU safe. Let's do the RCU dancing.
706 cgrp
= rcu_dereference(*(void __rcu __force
**)&kn
->priv
);
707 if (!cgrp
|| cgroup_is_dead(cgrp
)) {
709 mutex_unlock(&cgroup_mutex
);
714 css_task_iter_start(&cgrp
->self
, 0, &it
);
715 while ((tsk
= css_task_iter_next(&it
))) {
716 switch (tsk
->state
) {
720 case TASK_INTERRUPTIBLE
:
721 stats
->nr_sleeping
++;
723 case TASK_UNINTERRUPTIBLE
:
724 stats
->nr_uninterruptible
++;
730 if (delayacct_is_task_waiting_on_io(tsk
))
735 css_task_iter_end(&it
);
737 mutex_unlock(&cgroup_mutex
);
741 void cgroup1_check_for_release(struct cgroup
*cgrp
)
743 if (notify_on_release(cgrp
) && !cgroup_is_populated(cgrp
) &&
744 !css_has_online_children(&cgrp
->self
) && !cgroup_is_dead(cgrp
))
745 schedule_work(&cgrp
->release_agent_work
);
749 * Notify userspace when a cgroup is released, by running the
750 * configured release agent with the name of the cgroup (path
751 * relative to the root of cgroup file system) as the argument.
753 * Most likely, this user command will try to rmdir this cgroup.
755 * This races with the possibility that some other task will be
756 * attached to this cgroup before it is removed, or that some other
757 * user task will 'mkdir' a child cgroup of this cgroup. That's ok.
758 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
759 * unused, and this cgroup will be reprieved from its death sentence,
760 * to continue to serve a useful existence. Next time it's released,
761 * we will get notified again, if it still has 'notify_on_release' set.
763 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
764 * means only wait until the task is successfully execve()'d. The
765 * separate release agent task is forked by call_usermodehelper(),
766 * then control in this thread returns here, without waiting for the
767 * release agent task. We don't bother to wait because the caller of
768 * this routine has no use for the exit status of the release agent
769 * task, so no sense holding our caller up for that.
771 void cgroup1_release_agent(struct work_struct
*work
)
773 struct cgroup
*cgrp
=
774 container_of(work
, struct cgroup
, release_agent_work
);
775 char *pathbuf
, *agentbuf
;
776 char *argv
[3], *envp
[3];
779 /* snoop agent path and exit early if empty */
780 if (!cgrp
->root
->release_agent_path
[0])
783 /* prepare argument buffers */
784 pathbuf
= kmalloc(PATH_MAX
, GFP_KERNEL
);
785 agentbuf
= kmalloc(PATH_MAX
, GFP_KERNEL
);
786 if (!pathbuf
|| !agentbuf
)
789 spin_lock(&release_agent_path_lock
);
790 strlcpy(agentbuf
, cgrp
->root
->release_agent_path
, PATH_MAX
);
791 spin_unlock(&release_agent_path_lock
);
795 ret
= cgroup_path_ns(cgrp
, pathbuf
, PATH_MAX
, &init_cgroup_ns
);
796 if (ret
< 0 || ret
>= PATH_MAX
)
803 /* minimal command environment */
805 envp
[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
808 call_usermodehelper(argv
[0], argv
, envp
, UMH_WAIT_EXEC
);
815 * cgroup_rename - Only allow simple rename of directories in place.
817 static int cgroup1_rename(struct kernfs_node
*kn
, struct kernfs_node
*new_parent
,
818 const char *new_name_str
)
820 struct cgroup
*cgrp
= kn
->priv
;
823 if (kernfs_type(kn
) != KERNFS_DIR
)
825 if (kn
->parent
!= new_parent
)
829 * We're gonna grab cgroup_mutex which nests outside kernfs
830 * active_ref. kernfs_rename() doesn't require active_ref
831 * protection. Break them before grabbing cgroup_mutex.
833 kernfs_break_active_protection(new_parent
);
834 kernfs_break_active_protection(kn
);
836 mutex_lock(&cgroup_mutex
);
838 ret
= kernfs_rename(kn
, new_parent
, new_name_str
);
840 TRACE_CGROUP_PATH(rename
, cgrp
);
842 mutex_unlock(&cgroup_mutex
);
844 kernfs_unbreak_active_protection(kn
);
845 kernfs_unbreak_active_protection(new_parent
);
849 static int cgroup1_show_options(struct seq_file
*seq
, struct kernfs_root
*kf_root
)
851 struct cgroup_root
*root
= cgroup_root_from_kf(kf_root
);
852 struct cgroup_subsys
*ss
;
855 for_each_subsys(ss
, ssid
)
856 if (root
->subsys_mask
& (1 << ssid
))
857 seq_show_option(seq
, ss
->legacy_name
, NULL
);
858 if (root
->flags
& CGRP_ROOT_NOPREFIX
)
859 seq_puts(seq
, ",noprefix");
860 if (root
->flags
& CGRP_ROOT_XATTR
)
861 seq_puts(seq
, ",xattr");
862 if (root
->flags
& CGRP_ROOT_CPUSET_V2_MODE
)
863 seq_puts(seq
, ",cpuset_v2_mode");
865 spin_lock(&release_agent_path_lock
);
866 if (strlen(root
->release_agent_path
))
867 seq_show_option(seq
, "release_agent",
868 root
->release_agent_path
);
869 spin_unlock(&release_agent_path_lock
);
871 if (test_bit(CGRP_CPUSET_CLONE_CHILDREN
, &root
->cgrp
.flags
))
872 seq_puts(seq
, ",clone_children");
873 if (strlen(root
->name
))
874 seq_show_option(seq
, "name", root
->name
);
889 const struct fs_parameter_spec cgroup1_fs_parameters
[] = {
890 fsparam_flag ("all", Opt_all
),
891 fsparam_flag ("clone_children", Opt_clone_children
),
892 fsparam_flag ("cpuset_v2_mode", Opt_cpuset_v2_mode
),
893 fsparam_string("name", Opt_name
),
894 fsparam_flag ("none", Opt_none
),
895 fsparam_flag ("noprefix", Opt_noprefix
),
896 fsparam_string("release_agent", Opt_release_agent
),
897 fsparam_flag ("xattr", Opt_xattr
),
901 int cgroup1_parse_param(struct fs_context
*fc
, struct fs_parameter
*param
)
903 struct cgroup_fs_context
*ctx
= cgroup_fc2context(fc
);
904 struct cgroup_subsys
*ss
;
905 struct fs_parse_result result
;
908 opt
= fs_parse(fc
, cgroup1_fs_parameters
, param
, &result
);
909 if (opt
== -ENOPARAM
) {
910 if (strcmp(param
->key
, "source") == 0) {
912 return invalf(fc
, "Multiple sources not supported");
913 fc
->source
= param
->string
;
914 param
->string
= NULL
;
917 for_each_subsys(ss
, i
) {
918 if (strcmp(param
->key
, ss
->legacy_name
))
920 ctx
->subsys_mask
|= (1 << i
);
923 return invalfc(fc
, "Unknown subsys name '%s'", param
->key
);
930 /* Explicitly have no subsystems */
937 ctx
->flags
|= CGRP_ROOT_NOPREFIX
;
939 case Opt_clone_children
:
940 ctx
->cpuset_clone_children
= true;
942 case Opt_cpuset_v2_mode
:
943 ctx
->flags
|= CGRP_ROOT_CPUSET_V2_MODE
;
946 ctx
->flags
|= CGRP_ROOT_XATTR
;
948 case Opt_release_agent
:
949 /* Specifying two release agents is forbidden */
950 if (ctx
->release_agent
)
951 return invalfc(fc
, "release_agent respecified");
952 ctx
->release_agent
= param
->string
;
953 param
->string
= NULL
;
956 /* blocked by boot param? */
957 if (cgroup_no_v1_named
)
959 /* Can't specify an empty name */
961 return invalfc(fc
, "Empty name");
962 if (param
->size
> MAX_CGROUP_ROOT_NAMELEN
- 1)
963 return invalfc(fc
, "Name too long");
964 /* Must match [\w.-]+ */
965 for (i
= 0; i
< param
->size
; i
++) {
966 char c
= param
->string
[i
];
969 if ((c
== '.') || (c
== '-') || (c
== '_'))
971 return invalfc(fc
, "Invalid name");
973 /* Specifying two names is forbidden */
975 return invalfc(fc
, "name respecified");
976 ctx
->name
= param
->string
;
977 param
->string
= NULL
;
983 static int check_cgroupfs_options(struct fs_context
*fc
)
985 struct cgroup_fs_context
*ctx
= cgroup_fc2context(fc
);
988 struct cgroup_subsys
*ss
;
991 #ifdef CONFIG_CPUSETS
992 mask
= ~((u16
)1 << cpuset_cgrp_id
);
994 for_each_subsys(ss
, i
)
995 if (cgroup_ssid_enabled(i
) && !cgroup1_ssid_disabled(i
))
998 ctx
->subsys_mask
&= enabled
;
1001 * In absense of 'none', 'name=' or subsystem name options,
1002 * let's default to 'all'.
1004 if (!ctx
->subsys_mask
&& !ctx
->none
&& !ctx
->name
)
1008 /* Mutually exclusive option 'all' + subsystem name */
1009 if (ctx
->subsys_mask
)
1010 return invalfc(fc
, "subsys name conflicts with all");
1011 /* 'all' => select all the subsystems */
1012 ctx
->subsys_mask
= enabled
;
1016 * We either have to specify by name or by subsystems. (So all
1017 * empty hierarchies must have a name).
1019 if (!ctx
->subsys_mask
&& !ctx
->name
)
1020 return invalfc(fc
, "Need name or subsystem set");
1023 * Option noprefix was introduced just for backward compatibility
1024 * with the old cpuset, so we allow noprefix only if mounting just
1025 * the cpuset subsystem.
1027 if ((ctx
->flags
& CGRP_ROOT_NOPREFIX
) && (ctx
->subsys_mask
& mask
))
1028 return invalfc(fc
, "noprefix used incorrectly");
1030 /* Can't specify "none" and some subsystems */
1031 if (ctx
->subsys_mask
&& ctx
->none
)
1032 return invalfc(fc
, "none used incorrectly");
1037 int cgroup1_reconfigure(struct fs_context
*fc
)
1039 struct cgroup_fs_context
*ctx
= cgroup_fc2context(fc
);
1040 struct kernfs_root
*kf_root
= kernfs_root_from_sb(fc
->root
->d_sb
);
1041 struct cgroup_root
*root
= cgroup_root_from_kf(kf_root
);
1043 u16 added_mask
, removed_mask
;
1045 cgroup_lock_and_drain_offline(&cgrp_dfl_root
.cgrp
);
1047 /* See what subsystems are wanted */
1048 ret
= check_cgroupfs_options(fc
);
1052 if (ctx
->subsys_mask
!= root
->subsys_mask
|| ctx
->release_agent
)
1053 pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
1054 task_tgid_nr(current
), current
->comm
);
1056 added_mask
= ctx
->subsys_mask
& ~root
->subsys_mask
;
1057 removed_mask
= root
->subsys_mask
& ~ctx
->subsys_mask
;
1059 /* Don't allow flags or name to change at remount */
1060 if ((ctx
->flags
^ root
->flags
) ||
1061 (ctx
->name
&& strcmp(ctx
->name
, root
->name
))) {
1062 errorfc(fc
, "option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"",
1063 ctx
->flags
, ctx
->name
?: "", root
->flags
, root
->name
);
1068 /* remounting is not allowed for populated hierarchies */
1069 if (!list_empty(&root
->cgrp
.self
.children
)) {
1074 ret
= rebind_subsystems(root
, added_mask
);
1078 WARN_ON(rebind_subsystems(&cgrp_dfl_root
, removed_mask
));
1080 if (ctx
->release_agent
) {
1081 spin_lock(&release_agent_path_lock
);
1082 strcpy(root
->release_agent_path
, ctx
->release_agent
);
1083 spin_unlock(&release_agent_path_lock
);
1086 trace_cgroup_remount(root
);
1089 mutex_unlock(&cgroup_mutex
);
1093 struct kernfs_syscall_ops cgroup1_kf_syscall_ops
= {
1094 .rename
= cgroup1_rename
,
1095 .show_options
= cgroup1_show_options
,
1096 .mkdir
= cgroup_mkdir
,
1097 .rmdir
= cgroup_rmdir
,
1098 .show_path
= cgroup_show_path
,
1102 * The guts of cgroup1 mount - find or create cgroup_root to use.
1103 * Called with cgroup_mutex held; returns 0 on success, -E... on
1104 * error and positive - in case when the candidate is busy dying.
1105 * On success it stashes a reference to cgroup_root into given
1106 * cgroup_fs_context; that reference is *NOT* counting towards the
1107 * cgroup_root refcount.
1109 static int cgroup1_root_to_use(struct fs_context
*fc
)
1111 struct cgroup_fs_context
*ctx
= cgroup_fc2context(fc
);
1112 struct cgroup_root
*root
;
1113 struct cgroup_subsys
*ss
;
1116 /* First find the desired set of subsystems */
1117 ret
= check_cgroupfs_options(fc
);
1122 * Destruction of cgroup root is asynchronous, so subsystems may
1123 * still be dying after the previous unmount. Let's drain the
1124 * dying subsystems. We just need to ensure that the ones
1125 * unmounted previously finish dying and don't care about new ones
1126 * starting. Testing ref liveliness is good enough.
1128 for_each_subsys(ss
, i
) {
1129 if (!(ctx
->subsys_mask
& (1 << i
)) ||
1130 ss
->root
== &cgrp_dfl_root
)
1133 if (!percpu_ref_tryget_live(&ss
->root
->cgrp
.self
.refcnt
))
1134 return 1; /* restart */
1135 cgroup_put(&ss
->root
->cgrp
);
1138 for_each_root(root
) {
1139 bool name_match
= false;
1141 if (root
== &cgrp_dfl_root
)
1145 * If we asked for a name then it must match. Also, if
1146 * name matches but sybsys_mask doesn't, we should fail.
1147 * Remember whether name matched.
1150 if (strcmp(ctx
->name
, root
->name
))
1156 * If we asked for subsystems (or explicitly for no
1157 * subsystems) then they must match.
1159 if ((ctx
->subsys_mask
|| ctx
->none
) &&
1160 (ctx
->subsys_mask
!= root
->subsys_mask
)) {
1166 if (root
->flags
^ ctx
->flags
)
1167 pr_warn("new mount options do not match the existing superblock, will be ignored\n");
1174 * No such thing, create a new one. name= matching without subsys
1175 * specification is allowed for already existing hierarchies but we
1176 * can't create new one without subsys specification.
1178 if (!ctx
->subsys_mask
&& !ctx
->none
)
1179 return invalfc(fc
, "No subsys list or none specified");
1181 /* Hierarchies may only be created in the initial cgroup namespace. */
1182 if (ctx
->ns
!= &init_cgroup_ns
)
1185 root
= kzalloc(sizeof(*root
), GFP_KERNEL
);
1190 init_cgroup_root(ctx
);
1192 ret
= cgroup_setup_root(root
, ctx
->subsys_mask
);
1194 cgroup_free_root(root
);
1198 int cgroup1_get_tree(struct fs_context
*fc
)
1200 struct cgroup_fs_context
*ctx
= cgroup_fc2context(fc
);
1203 /* Check if the caller has permission to mount. */
1204 if (!ns_capable(ctx
->ns
->user_ns
, CAP_SYS_ADMIN
))
1207 cgroup_lock_and_drain_offline(&cgrp_dfl_root
.cgrp
);
1209 ret
= cgroup1_root_to_use(fc
);
1210 if (!ret
&& !percpu_ref_tryget_live(&ctx
->root
->cgrp
.self
.refcnt
))
1211 ret
= 1; /* restart */
1213 mutex_unlock(&cgroup_mutex
);
1216 ret
= cgroup_do_get_tree(fc
);
1218 if (!ret
&& percpu_ref_is_dying(&ctx
->root
->cgrp
.self
.refcnt
)) {
1219 struct super_block
*sb
= fc
->root
->d_sb
;
1221 deactivate_locked_super(sb
);
1225 if (unlikely(ret
> 0)) {
1227 return restart_syscall();
1232 static int __init
cgroup1_wq_init(void)
1235 * Used to destroy pidlists and separate to serve as flush domain.
1236 * Cap @max_active to 1 too.
1238 cgroup_pidlist_destroy_wq
= alloc_workqueue("cgroup_pidlist_destroy",
1240 BUG_ON(!cgroup_pidlist_destroy_wq
);
1243 core_initcall(cgroup1_wq_init
);
1245 static int __init
cgroup_no_v1(char *str
)
1247 struct cgroup_subsys
*ss
;
1251 while ((token
= strsep(&str
, ",")) != NULL
) {
1255 if (!strcmp(token
, "all")) {
1256 cgroup_no_v1_mask
= U16_MAX
;
1260 if (!strcmp(token
, "named")) {
1261 cgroup_no_v1_named
= true;
1265 for_each_subsys(ss
, i
) {
1266 if (strcmp(token
, ss
->name
) &&
1267 strcmp(token
, ss
->legacy_name
))
1270 cgroup_no_v1_mask
|= 1 << i
;
1275 __setup("cgroup_no_v1=", cgroup_no_v1
);