1 #include "cgroup-internal.h"
3 #include <linux/ctype.h>
4 #include <linux/kmod.h>
5 #include <linux/sort.h>
6 #include <linux/delay.h>
8 #include <linux/sched/signal.h>
9 #include <linux/sched/task.h>
10 #include <linux/magic.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13 #include <linux/delayacct.h>
14 #include <linux/pid_namespace.h>
15 #include <linux/cgroupstats.h>
17 #include <trace/events/cgroup.h>
20 * pidlists linger the following amount before being destroyed. The goal
21 * is avoiding frequent destruction in the middle of consecutive read calls
22 * Expiring in the middle is a performance problem not a correctness one.
23 * 1 sec should be enough.
25 #define CGROUP_PIDLIST_DESTROY_DELAY HZ
27 /* Controllers blocked by the commandline in v1 */
28 static u16 cgroup_no_v1_mask
;
31 * pidlist destructions need to be flushed on cgroup destruction. Use a
32 * separate workqueue as flush domain.
34 static struct workqueue_struct
*cgroup_pidlist_destroy_wq
;
37 * Protects cgroup_subsys->release_agent_path. Modifying it also requires
38 * cgroup_mutex. Reading requires either cgroup_mutex or this spinlock.
40 static DEFINE_SPINLOCK(release_agent_path_lock
);
42 bool cgroup1_ssid_disabled(int ssid
)
44 return cgroup_no_v1_mask
& (1 << ssid
);
48 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
49 * @from: attach to all cgroups of a given task
50 * @tsk: the task to be attached
52 int cgroup_attach_task_all(struct task_struct
*from
, struct task_struct
*tsk
)
54 struct cgroup_root
*root
;
57 mutex_lock(&cgroup_mutex
);
58 percpu_down_write(&cgroup_threadgroup_rwsem
);
60 struct cgroup
*from_cgrp
;
62 if (root
== &cgrp_dfl_root
)
65 spin_lock_irq(&css_set_lock
);
66 from_cgrp
= task_cgroup_from_root(from
, root
);
67 spin_unlock_irq(&css_set_lock
);
69 retval
= cgroup_attach_task(from_cgrp
, tsk
, false);
73 percpu_up_write(&cgroup_threadgroup_rwsem
);
74 mutex_unlock(&cgroup_mutex
);
78 EXPORT_SYMBOL_GPL(cgroup_attach_task_all
);
81 * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
82 * @to: cgroup to which the tasks will be moved
83 * @from: cgroup in which the tasks currently reside
85 * Locking rules between cgroup_post_fork() and the migration path
86 * guarantee that, if a task is forking while being migrated, the new child
87 * is guaranteed to be either visible in the source cgroup after the
88 * parent's migration is complete or put into the target cgroup. No task
89 * can slip out of migration through forking.
91 int cgroup_transfer_tasks(struct cgroup
*to
, struct cgroup
*from
)
93 DEFINE_CGROUP_MGCTX(mgctx
);
94 struct cgrp_cset_link
*link
;
95 struct css_task_iter it
;
96 struct task_struct
*task
;
99 if (cgroup_on_dfl(to
))
102 if (!cgroup_may_migrate_to(to
))
105 mutex_lock(&cgroup_mutex
);
107 percpu_down_write(&cgroup_threadgroup_rwsem
);
109 /* all tasks in @from are being moved, all csets are source */
110 spin_lock_irq(&css_set_lock
);
111 list_for_each_entry(link
, &from
->cset_links
, cset_link
)
112 cgroup_migrate_add_src(link
->cset
, to
, &mgctx
);
113 spin_unlock_irq(&css_set_lock
);
115 ret
= cgroup_migrate_prepare_dst(&mgctx
);
120 * Migrate tasks one-by-one until @from is empty. This fails iff
121 * ->can_attach() fails.
124 css_task_iter_start(&from
->self
, &it
);
125 task
= css_task_iter_next(&it
);
127 get_task_struct(task
);
128 css_task_iter_end(&it
);
131 ret
= cgroup_migrate(task
, false, &mgctx
);
133 trace_cgroup_transfer_tasks(to
, task
, false);
134 put_task_struct(task
);
136 } while (task
&& !ret
);
138 cgroup_migrate_finish(&mgctx
);
139 percpu_up_write(&cgroup_threadgroup_rwsem
);
140 mutex_unlock(&cgroup_mutex
);
145 * Stuff for reading the 'tasks'/'procs' files.
147 * Reading this file can return large amounts of data if a cgroup has
148 * *lots* of attached tasks. So it may need several calls to read(),
149 * but we cannot guarantee that the information we produce is correct
150 * unless we produce it entirely atomically.
154 /* which pidlist file are we talking about? */
155 enum cgroup_filetype
{
161 * A pidlist is a list of pids that virtually represents the contents of one
162 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
163 * a pair (one each for procs, tasks) for each pid namespace that's relevant
166 struct cgroup_pidlist
{
168 * used to find which pidlist is wanted. doesn't change as long as
169 * this particular list stays in the list.
171 struct { enum cgroup_filetype type
; struct pid_namespace
*ns
; } key
;
174 /* how many elements the above list has */
176 /* each of these stored in a list by its cgroup */
177 struct list_head links
;
178 /* pointer to the cgroup we belong to, for list removal purposes */
179 struct cgroup
*owner
;
180 /* for delayed destruction */
181 struct delayed_work destroy_dwork
;
185 * The following two functions "fix" the issue where there are more pids
186 * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
187 * TODO: replace with a kernel-wide solution to this problem
189 #define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
190 static void *pidlist_allocate(int count
)
192 if (PIDLIST_TOO_LARGE(count
))
193 return vmalloc(count
* sizeof(pid_t
));
195 return kmalloc(count
* sizeof(pid_t
), GFP_KERNEL
);
198 static void pidlist_free(void *p
)
204 * Used to destroy all pidlists lingering waiting for destroy timer. None
205 * should be left afterwards.
207 void cgroup1_pidlist_destroy_all(struct cgroup
*cgrp
)
209 struct cgroup_pidlist
*l
, *tmp_l
;
211 mutex_lock(&cgrp
->pidlist_mutex
);
212 list_for_each_entry_safe(l
, tmp_l
, &cgrp
->pidlists
, links
)
213 mod_delayed_work(cgroup_pidlist_destroy_wq
, &l
->destroy_dwork
, 0);
214 mutex_unlock(&cgrp
->pidlist_mutex
);
216 flush_workqueue(cgroup_pidlist_destroy_wq
);
217 BUG_ON(!list_empty(&cgrp
->pidlists
));
220 static void cgroup_pidlist_destroy_work_fn(struct work_struct
*work
)
222 struct delayed_work
*dwork
= to_delayed_work(work
);
223 struct cgroup_pidlist
*l
= container_of(dwork
, struct cgroup_pidlist
,
225 struct cgroup_pidlist
*tofree
= NULL
;
227 mutex_lock(&l
->owner
->pidlist_mutex
);
230 * Destroy iff we didn't get queued again. The state won't change
231 * as destroy_dwork can only be queued while locked.
233 if (!delayed_work_pending(dwork
)) {
235 pidlist_free(l
->list
);
236 put_pid_ns(l
->key
.ns
);
240 mutex_unlock(&l
->owner
->pidlist_mutex
);
245 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
246 * Returns the number of unique elements.
248 static int pidlist_uniq(pid_t
*list
, int length
)
253 * we presume the 0th element is unique, so i starts at 1. trivial
254 * edge cases first; no work needs to be done for either
256 if (length
== 0 || length
== 1)
258 /* src and dest walk down the list; dest counts unique elements */
259 for (src
= 1; src
< length
; src
++) {
260 /* find next unique element */
261 while (list
[src
] == list
[src
-1]) {
266 /* dest always points to where the next unique element goes */
267 list
[dest
] = list
[src
];
275 * The two pid files - task and cgroup.procs - guaranteed that the result
276 * is sorted, which forced this whole pidlist fiasco. As pid order is
277 * different per namespace, each namespace needs differently sorted list,
278 * making it impossible to use, for example, single rbtree of member tasks
279 * sorted by task pointer. As pidlists can be fairly large, allocating one
280 * per open file is dangerous, so cgroup had to implement shared pool of
281 * pidlists keyed by cgroup and namespace.
283 static int cmppid(const void *a
, const void *b
)
285 return *(pid_t
*)a
- *(pid_t
*)b
;
288 static struct cgroup_pidlist
*cgroup_pidlist_find(struct cgroup
*cgrp
,
289 enum cgroup_filetype type
)
291 struct cgroup_pidlist
*l
;
292 /* don't need task_nsproxy() if we're looking at ourself */
293 struct pid_namespace
*ns
= task_active_pid_ns(current
);
295 lockdep_assert_held(&cgrp
->pidlist_mutex
);
297 list_for_each_entry(l
, &cgrp
->pidlists
, links
)
298 if (l
->key
.type
== type
&& l
->key
.ns
== ns
)
304 * find the appropriate pidlist for our purpose (given procs vs tasks)
305 * returns with the lock on that pidlist already held, and takes care
306 * of the use count, or returns NULL with no locks held if we're out of
309 static struct cgroup_pidlist
*cgroup_pidlist_find_create(struct cgroup
*cgrp
,
310 enum cgroup_filetype type
)
312 struct cgroup_pidlist
*l
;
314 lockdep_assert_held(&cgrp
->pidlist_mutex
);
316 l
= cgroup_pidlist_find(cgrp
, type
);
320 /* entry not found; create a new one */
321 l
= kzalloc(sizeof(struct cgroup_pidlist
), GFP_KERNEL
);
325 INIT_DELAYED_WORK(&l
->destroy_dwork
, cgroup_pidlist_destroy_work_fn
);
327 /* don't need task_nsproxy() if we're looking at ourself */
328 l
->key
.ns
= get_pid_ns(task_active_pid_ns(current
));
330 list_add(&l
->links
, &cgrp
->pidlists
);
335 * cgroup_task_count - count the number of tasks in a cgroup.
336 * @cgrp: the cgroup in question
338 int cgroup_task_count(const struct cgroup
*cgrp
)
341 struct cgrp_cset_link
*link
;
343 spin_lock_irq(&css_set_lock
);
344 list_for_each_entry(link
, &cgrp
->cset_links
, cset_link
)
345 count
+= link
->cset
->nr_tasks
;
346 spin_unlock_irq(&css_set_lock
);
351 * Load a cgroup's pidarray with either procs' tgids or tasks' pids
353 static int pidlist_array_load(struct cgroup
*cgrp
, enum cgroup_filetype type
,
354 struct cgroup_pidlist
**lp
)
358 int pid
, n
= 0; /* used for populating the array */
359 struct css_task_iter it
;
360 struct task_struct
*tsk
;
361 struct cgroup_pidlist
*l
;
363 lockdep_assert_held(&cgrp
->pidlist_mutex
);
366 * If cgroup gets more users after we read count, we won't have
367 * enough space - tough. This race is indistinguishable to the
368 * caller from the case that the additional cgroup users didn't
369 * show up until sometime later on.
371 length
= cgroup_task_count(cgrp
);
372 array
= pidlist_allocate(length
);
375 /* now, populate the array */
376 css_task_iter_start(&cgrp
->self
, &it
);
377 while ((tsk
= css_task_iter_next(&it
))) {
378 if (unlikely(n
== length
))
380 /* get tgid or pid for procs or tasks file respectively */
381 if (type
== CGROUP_FILE_PROCS
)
382 pid
= task_tgid_vnr(tsk
);
384 pid
= task_pid_vnr(tsk
);
385 if (pid
> 0) /* make sure to only use valid results */
388 css_task_iter_end(&it
);
390 /* now sort & (if procs) strip out duplicates */
391 sort(array
, length
, sizeof(pid_t
), cmppid
, NULL
);
392 if (type
== CGROUP_FILE_PROCS
)
393 length
= pidlist_uniq(array
, length
);
395 l
= cgroup_pidlist_find_create(cgrp
, type
);
401 /* store array, freeing old if necessary */
402 pidlist_free(l
->list
);
410 * seq_file methods for the tasks/procs files. The seq_file position is the
411 * next pid to display; the seq_file iterator is a pointer to the pid
412 * in the cgroup->l->list array.
415 static void *cgroup_pidlist_start(struct seq_file
*s
, loff_t
*pos
)
418 * Initially we receive a position value that corresponds to
419 * one more than the last pid shown (or 0 on the first call or
420 * after a seek to the start). Use a binary-search to find the
421 * next pid to display, if any
423 struct kernfs_open_file
*of
= s
->private;
424 struct cgroup
*cgrp
= seq_css(s
)->cgroup
;
425 struct cgroup_pidlist
*l
;
426 enum cgroup_filetype type
= seq_cft(s
)->private;
427 int index
= 0, pid
= *pos
;
430 mutex_lock(&cgrp
->pidlist_mutex
);
433 * !NULL @of->priv indicates that this isn't the first start()
434 * after open. If the matching pidlist is around, we can use that.
435 * Look for it. Note that @of->priv can't be used directly. It
436 * could already have been destroyed.
439 of
->priv
= cgroup_pidlist_find(cgrp
, type
);
442 * Either this is the first start() after open or the matching
443 * pidlist has been destroyed inbetween. Create a new one.
446 ret
= pidlist_array_load(cgrp
, type
,
447 (struct cgroup_pidlist
**)&of
->priv
);
456 while (index
< end
) {
457 int mid
= (index
+ end
) / 2;
458 if (l
->list
[mid
] == pid
) {
461 } else if (l
->list
[mid
] <= pid
)
467 /* If we're off the end of the array, we're done */
468 if (index
>= l
->length
)
470 /* Update the abstract position to be the actual pid that we found */
471 iter
= l
->list
+ index
;
476 static void cgroup_pidlist_stop(struct seq_file
*s
, void *v
)
478 struct kernfs_open_file
*of
= s
->private;
479 struct cgroup_pidlist
*l
= of
->priv
;
482 mod_delayed_work(cgroup_pidlist_destroy_wq
, &l
->destroy_dwork
,
483 CGROUP_PIDLIST_DESTROY_DELAY
);
484 mutex_unlock(&seq_css(s
)->cgroup
->pidlist_mutex
);
487 static void *cgroup_pidlist_next(struct seq_file
*s
, void *v
, loff_t
*pos
)
489 struct kernfs_open_file
*of
= s
->private;
490 struct cgroup_pidlist
*l
= of
->priv
;
492 pid_t
*end
= l
->list
+ l
->length
;
494 * Advance to the next pid in the array. If this goes off the
506 static int cgroup_pidlist_show(struct seq_file
*s
, void *v
)
508 seq_printf(s
, "%d\n", *(int *)v
);
513 static ssize_t
cgroup_tasks_write(struct kernfs_open_file
*of
,
514 char *buf
, size_t nbytes
, loff_t off
)
516 return __cgroup_procs_write(of
, buf
, nbytes
, off
, false);
519 static ssize_t
cgroup_release_agent_write(struct kernfs_open_file
*of
,
520 char *buf
, size_t nbytes
, loff_t off
)
524 BUILD_BUG_ON(sizeof(cgrp
->root
->release_agent_path
) < PATH_MAX
);
526 cgrp
= cgroup_kn_lock_live(of
->kn
, false);
529 spin_lock(&release_agent_path_lock
);
530 strlcpy(cgrp
->root
->release_agent_path
, strstrip(buf
),
531 sizeof(cgrp
->root
->release_agent_path
));
532 spin_unlock(&release_agent_path_lock
);
533 cgroup_kn_unlock(of
->kn
);
537 static int cgroup_release_agent_show(struct seq_file
*seq
, void *v
)
539 struct cgroup
*cgrp
= seq_css(seq
)->cgroup
;
541 spin_lock(&release_agent_path_lock
);
542 seq_puts(seq
, cgrp
->root
->release_agent_path
);
543 spin_unlock(&release_agent_path_lock
);
548 static int cgroup_sane_behavior_show(struct seq_file
*seq
, void *v
)
550 seq_puts(seq
, "0\n");
554 static u64
cgroup_read_notify_on_release(struct cgroup_subsys_state
*css
,
557 return notify_on_release(css
->cgroup
);
560 static int cgroup_write_notify_on_release(struct cgroup_subsys_state
*css
,
561 struct cftype
*cft
, u64 val
)
564 set_bit(CGRP_NOTIFY_ON_RELEASE
, &css
->cgroup
->flags
);
566 clear_bit(CGRP_NOTIFY_ON_RELEASE
, &css
->cgroup
->flags
);
570 static u64
cgroup_clone_children_read(struct cgroup_subsys_state
*css
,
573 return test_bit(CGRP_CPUSET_CLONE_CHILDREN
, &css
->cgroup
->flags
);
576 static int cgroup_clone_children_write(struct cgroup_subsys_state
*css
,
577 struct cftype
*cft
, u64 val
)
580 set_bit(CGRP_CPUSET_CLONE_CHILDREN
, &css
->cgroup
->flags
);
582 clear_bit(CGRP_CPUSET_CLONE_CHILDREN
, &css
->cgroup
->flags
);
586 /* cgroup core interface files for the legacy hierarchies */
587 struct cftype cgroup1_base_files
[] = {
589 .name
= "cgroup.procs",
590 .seq_start
= cgroup_pidlist_start
,
591 .seq_next
= cgroup_pidlist_next
,
592 .seq_stop
= cgroup_pidlist_stop
,
593 .seq_show
= cgroup_pidlist_show
,
594 .private = CGROUP_FILE_PROCS
,
595 .write
= cgroup_procs_write
,
598 .name
= "cgroup.clone_children",
599 .read_u64
= cgroup_clone_children_read
,
600 .write_u64
= cgroup_clone_children_write
,
603 .name
= "cgroup.sane_behavior",
604 .flags
= CFTYPE_ONLY_ON_ROOT
,
605 .seq_show
= cgroup_sane_behavior_show
,
609 .seq_start
= cgroup_pidlist_start
,
610 .seq_next
= cgroup_pidlist_next
,
611 .seq_stop
= cgroup_pidlist_stop
,
612 .seq_show
= cgroup_pidlist_show
,
613 .private = CGROUP_FILE_TASKS
,
614 .write
= cgroup_tasks_write
,
617 .name
= "notify_on_release",
618 .read_u64
= cgroup_read_notify_on_release
,
619 .write_u64
= cgroup_write_notify_on_release
,
622 .name
= "release_agent",
623 .flags
= CFTYPE_ONLY_ON_ROOT
,
624 .seq_show
= cgroup_release_agent_show
,
625 .write
= cgroup_release_agent_write
,
626 .max_write_len
= PATH_MAX
- 1,
631 /* Display information about each subsystem and each hierarchy */
632 static int proc_cgroupstats_show(struct seq_file
*m
, void *v
)
634 struct cgroup_subsys
*ss
;
637 seq_puts(m
, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
639 * ideally we don't want subsystems moving around while we do this.
640 * cgroup_mutex is also necessary to guarantee an atomic snapshot of
641 * subsys/hierarchy state.
643 mutex_lock(&cgroup_mutex
);
645 for_each_subsys(ss
, i
)
646 seq_printf(m
, "%s\t%d\t%d\t%d\n",
647 ss
->legacy_name
, ss
->root
->hierarchy_id
,
648 atomic_read(&ss
->root
->nr_cgrps
),
649 cgroup_ssid_enabled(i
));
651 mutex_unlock(&cgroup_mutex
);
655 static int cgroupstats_open(struct inode
*inode
, struct file
*file
)
657 return single_open(file
, proc_cgroupstats_show
, NULL
);
660 const struct file_operations proc_cgroupstats_operations
= {
661 .open
= cgroupstats_open
,
664 .release
= single_release
,
668 * cgroupstats_build - build and fill cgroupstats
669 * @stats: cgroupstats to fill information into
670 * @dentry: A dentry entry belonging to the cgroup for which stats have
673 * Build and fill cgroupstats so that taskstats can export it to user
676 int cgroupstats_build(struct cgroupstats
*stats
, struct dentry
*dentry
)
678 struct kernfs_node
*kn
= kernfs_node_from_dentry(dentry
);
680 struct css_task_iter it
;
681 struct task_struct
*tsk
;
683 /* it should be kernfs_node belonging to cgroupfs and is a directory */
684 if (dentry
->d_sb
->s_type
!= &cgroup_fs_type
|| !kn
||
685 kernfs_type(kn
) != KERNFS_DIR
)
688 mutex_lock(&cgroup_mutex
);
691 * We aren't being called from kernfs and there's no guarantee on
692 * @kn->priv's validity. For this and css_tryget_online_from_dir(),
693 * @kn->priv is RCU safe. Let's do the RCU dancing.
696 cgrp
= rcu_dereference(*(void __rcu __force
**)&kn
->priv
);
697 if (!cgrp
|| cgroup_is_dead(cgrp
)) {
699 mutex_unlock(&cgroup_mutex
);
704 css_task_iter_start(&cgrp
->self
, &it
);
705 while ((tsk
= css_task_iter_next(&it
))) {
706 switch (tsk
->state
) {
710 case TASK_INTERRUPTIBLE
:
711 stats
->nr_sleeping
++;
713 case TASK_UNINTERRUPTIBLE
:
714 stats
->nr_uninterruptible
++;
720 if (delayacct_is_task_waiting_on_io(tsk
))
725 css_task_iter_end(&it
);
727 mutex_unlock(&cgroup_mutex
);
731 void cgroup1_check_for_release(struct cgroup
*cgrp
)
733 if (notify_on_release(cgrp
) && !cgroup_is_populated(cgrp
) &&
734 !css_has_online_children(&cgrp
->self
) && !cgroup_is_dead(cgrp
))
735 schedule_work(&cgrp
->release_agent_work
);
739 * Notify userspace when a cgroup is released, by running the
740 * configured release agent with the name of the cgroup (path
741 * relative to the root of cgroup file system) as the argument.
743 * Most likely, this user command will try to rmdir this cgroup.
745 * This races with the possibility that some other task will be
746 * attached to this cgroup before it is removed, or that some other
747 * user task will 'mkdir' a child cgroup of this cgroup. That's ok.
748 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
749 * unused, and this cgroup will be reprieved from its death sentence,
750 * to continue to serve a useful existence. Next time it's released,
751 * we will get notified again, if it still has 'notify_on_release' set.
753 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
754 * means only wait until the task is successfully execve()'d. The
755 * separate release agent task is forked by call_usermodehelper(),
756 * then control in this thread returns here, without waiting for the
757 * release agent task. We don't bother to wait because the caller of
758 * this routine has no use for the exit status of the release agent
759 * task, so no sense holding our caller up for that.
761 void cgroup1_release_agent(struct work_struct
*work
)
763 struct cgroup
*cgrp
=
764 container_of(work
, struct cgroup
, release_agent_work
);
765 char *pathbuf
= NULL
, *agentbuf
= NULL
;
766 char *argv
[3], *envp
[3];
769 mutex_lock(&cgroup_mutex
);
771 pathbuf
= kmalloc(PATH_MAX
, GFP_KERNEL
);
772 agentbuf
= kstrdup(cgrp
->root
->release_agent_path
, GFP_KERNEL
);
773 if (!pathbuf
|| !agentbuf
)
776 spin_lock_irq(&css_set_lock
);
777 ret
= cgroup_path_ns_locked(cgrp
, pathbuf
, PATH_MAX
, &init_cgroup_ns
);
778 spin_unlock_irq(&css_set_lock
);
779 if (ret
< 0 || ret
>= PATH_MAX
)
786 /* minimal command environment */
788 envp
[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
791 mutex_unlock(&cgroup_mutex
);
792 call_usermodehelper(argv
[0], argv
, envp
, UMH_WAIT_EXEC
);
795 mutex_unlock(&cgroup_mutex
);
802 * cgroup_rename - Only allow simple rename of directories in place.
804 static int cgroup1_rename(struct kernfs_node
*kn
, struct kernfs_node
*new_parent
,
805 const char *new_name_str
)
807 struct cgroup
*cgrp
= kn
->priv
;
810 if (kernfs_type(kn
) != KERNFS_DIR
)
812 if (kn
->parent
!= new_parent
)
816 * We're gonna grab cgroup_mutex which nests outside kernfs
817 * active_ref. kernfs_rename() doesn't require active_ref
818 * protection. Break them before grabbing cgroup_mutex.
820 kernfs_break_active_protection(new_parent
);
821 kernfs_break_active_protection(kn
);
823 mutex_lock(&cgroup_mutex
);
825 ret
= kernfs_rename(kn
, new_parent
, new_name_str
);
827 trace_cgroup_rename(cgrp
);
829 mutex_unlock(&cgroup_mutex
);
831 kernfs_unbreak_active_protection(kn
);
832 kernfs_unbreak_active_protection(new_parent
);
836 static int cgroup1_show_options(struct seq_file
*seq
, struct kernfs_root
*kf_root
)
838 struct cgroup_root
*root
= cgroup_root_from_kf(kf_root
);
839 struct cgroup_subsys
*ss
;
842 for_each_subsys(ss
, ssid
)
843 if (root
->subsys_mask
& (1 << ssid
))
844 seq_show_option(seq
, ss
->legacy_name
, NULL
);
845 if (root
->flags
& CGRP_ROOT_NOPREFIX
)
846 seq_puts(seq
, ",noprefix");
847 if (root
->flags
& CGRP_ROOT_XATTR
)
848 seq_puts(seq
, ",xattr");
850 spin_lock(&release_agent_path_lock
);
851 if (strlen(root
->release_agent_path
))
852 seq_show_option(seq
, "release_agent",
853 root
->release_agent_path
);
854 spin_unlock(&release_agent_path_lock
);
856 if (test_bit(CGRP_CPUSET_CLONE_CHILDREN
, &root
->cgrp
.flags
))
857 seq_puts(seq
, ",clone_children");
858 if (strlen(root
->name
))
859 seq_show_option(seq
, "name", root
->name
);
863 static int parse_cgroupfs_options(char *data
, struct cgroup_sb_opts
*opts
)
865 char *token
, *o
= data
;
866 bool all_ss
= false, one_ss
= false;
868 struct cgroup_subsys
*ss
;
872 #ifdef CONFIG_CPUSETS
873 mask
= ~((u16
)1 << cpuset_cgrp_id
);
876 memset(opts
, 0, sizeof(*opts
));
878 while ((token
= strsep(&o
, ",")) != NULL
) {
883 if (!strcmp(token
, "none")) {
884 /* Explicitly have no subsystems */
888 if (!strcmp(token
, "all")) {
889 /* Mutually exclusive option 'all' + subsystem name */
895 if (!strcmp(token
, "noprefix")) {
896 opts
->flags
|= CGRP_ROOT_NOPREFIX
;
899 if (!strcmp(token
, "clone_children")) {
900 opts
->cpuset_clone_children
= true;
903 if (!strcmp(token
, "xattr")) {
904 opts
->flags
|= CGRP_ROOT_XATTR
;
907 if (!strncmp(token
, "release_agent=", 14)) {
908 /* Specifying two release agents is forbidden */
909 if (opts
->release_agent
)
911 opts
->release_agent
=
912 kstrndup(token
+ 14, PATH_MAX
- 1, GFP_KERNEL
);
913 if (!opts
->release_agent
)
917 if (!strncmp(token
, "name=", 5)) {
918 const char *name
= token
+ 5;
919 /* Can't specify an empty name */
922 /* Must match [\w.-]+ */
923 for (i
= 0; i
< strlen(name
); i
++) {
927 if ((c
== '.') || (c
== '-') || (c
== '_'))
931 /* Specifying two names is forbidden */
934 opts
->name
= kstrndup(name
,
935 MAX_CGROUP_ROOT_NAMELEN
- 1,
943 for_each_subsys(ss
, i
) {
944 if (strcmp(token
, ss
->legacy_name
))
946 if (!cgroup_ssid_enabled(i
))
948 if (cgroup1_ssid_disabled(i
))
951 /* Mutually exclusive option 'all' + subsystem name */
954 opts
->subsys_mask
|= (1 << i
);
959 if (i
== CGROUP_SUBSYS_COUNT
)
964 * If the 'all' option was specified select all the subsystems,
965 * otherwise if 'none', 'name=' and a subsystem name options were
966 * not specified, let's default to 'all'
968 if (all_ss
|| (!one_ss
&& !opts
->none
&& !opts
->name
))
969 for_each_subsys(ss
, i
)
970 if (cgroup_ssid_enabled(i
) && !cgroup1_ssid_disabled(i
))
971 opts
->subsys_mask
|= (1 << i
);
974 * We either have to specify by name or by subsystems. (So all
975 * empty hierarchies must have a name).
977 if (!opts
->subsys_mask
&& !opts
->name
)
981 * Option noprefix was introduced just for backward compatibility
982 * with the old cpuset, so we allow noprefix only if mounting just
983 * the cpuset subsystem.
985 if ((opts
->flags
& CGRP_ROOT_NOPREFIX
) && (opts
->subsys_mask
& mask
))
988 /* Can't specify "none" and some subsystems */
989 if (opts
->subsys_mask
&& opts
->none
)
995 static int cgroup1_remount(struct kernfs_root
*kf_root
, int *flags
, char *data
)
998 struct cgroup_root
*root
= cgroup_root_from_kf(kf_root
);
999 struct cgroup_sb_opts opts
;
1000 u16 added_mask
, removed_mask
;
1002 cgroup_lock_and_drain_offline(&cgrp_dfl_root
.cgrp
);
1004 /* See what subsystems are wanted */
1005 ret
= parse_cgroupfs_options(data
, &opts
);
1009 if (opts
.subsys_mask
!= root
->subsys_mask
|| opts
.release_agent
)
1010 pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
1011 task_tgid_nr(current
), current
->comm
);
1013 added_mask
= opts
.subsys_mask
& ~root
->subsys_mask
;
1014 removed_mask
= root
->subsys_mask
& ~opts
.subsys_mask
;
1016 /* Don't allow flags or name to change at remount */
1017 if ((opts
.flags
^ root
->flags
) ||
1018 (opts
.name
&& strcmp(opts
.name
, root
->name
))) {
1019 pr_err("option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"\n",
1020 opts
.flags
, opts
.name
?: "", root
->flags
, root
->name
);
1025 /* remounting is not allowed for populated hierarchies */
1026 if (!list_empty(&root
->cgrp
.self
.children
)) {
1031 ret
= rebind_subsystems(root
, added_mask
);
1035 WARN_ON(rebind_subsystems(&cgrp_dfl_root
, removed_mask
));
1037 if (opts
.release_agent
) {
1038 spin_lock(&release_agent_path_lock
);
1039 strcpy(root
->release_agent_path
, opts
.release_agent
);
1040 spin_unlock(&release_agent_path_lock
);
1043 trace_cgroup_remount(root
);
1046 kfree(opts
.release_agent
);
1048 mutex_unlock(&cgroup_mutex
);
1052 struct kernfs_syscall_ops cgroup1_kf_syscall_ops
= {
1053 .rename
= cgroup1_rename
,
1054 .show_options
= cgroup1_show_options
,
1055 .remount_fs
= cgroup1_remount
,
1056 .mkdir
= cgroup_mkdir
,
1057 .rmdir
= cgroup_rmdir
,
1058 .show_path
= cgroup_show_path
,
1061 struct dentry
*cgroup1_mount(struct file_system_type
*fs_type
, int flags
,
1062 void *data
, unsigned long magic
,
1063 struct cgroup_namespace
*ns
)
1065 struct super_block
*pinned_sb
= NULL
;
1066 struct cgroup_sb_opts opts
;
1067 struct cgroup_root
*root
;
1068 struct cgroup_subsys
*ss
;
1069 struct dentry
*dentry
;
1071 bool new_root
= false;
1073 cgroup_lock_and_drain_offline(&cgrp_dfl_root
.cgrp
);
1075 /* First find the desired set of subsystems */
1076 ret
= parse_cgroupfs_options(data
, &opts
);
1081 * Destruction of cgroup root is asynchronous, so subsystems may
1082 * still be dying after the previous unmount. Let's drain the
1083 * dying subsystems. We just need to ensure that the ones
1084 * unmounted previously finish dying and don't care about new ones
1085 * starting. Testing ref liveliness is good enough.
1087 for_each_subsys(ss
, i
) {
1088 if (!(opts
.subsys_mask
& (1 << i
)) ||
1089 ss
->root
== &cgrp_dfl_root
)
1092 if (!percpu_ref_tryget_live(&ss
->root
->cgrp
.self
.refcnt
)) {
1093 mutex_unlock(&cgroup_mutex
);
1095 ret
= restart_syscall();
1098 cgroup_put(&ss
->root
->cgrp
);
1101 for_each_root(root
) {
1102 bool name_match
= false;
1104 if (root
== &cgrp_dfl_root
)
1108 * If we asked for a name then it must match. Also, if
1109 * name matches but sybsys_mask doesn't, we should fail.
1110 * Remember whether name matched.
1113 if (strcmp(opts
.name
, root
->name
))
1119 * If we asked for subsystems (or explicitly for no
1120 * subsystems) then they must match.
1122 if ((opts
.subsys_mask
|| opts
.none
) &&
1123 (opts
.subsys_mask
!= root
->subsys_mask
)) {
1130 if (root
->flags
^ opts
.flags
)
1131 pr_warn("new mount options do not match the existing superblock, will be ignored\n");
1134 * We want to reuse @root whose lifetime is governed by its
1135 * ->cgrp. Let's check whether @root is alive and keep it
1136 * that way. As cgroup_kill_sb() can happen anytime, we
1137 * want to block it by pinning the sb so that @root doesn't
1138 * get killed before mount is complete.
1140 * With the sb pinned, tryget_live can reliably indicate
1141 * whether @root can be reused. If it's being killed,
1142 * drain it. We can use wait_queue for the wait but this
1143 * path is super cold. Let's just sleep a bit and retry.
1145 pinned_sb
= kernfs_pin_sb(root
->kf_root
, NULL
);
1146 if (IS_ERR(pinned_sb
) ||
1147 !percpu_ref_tryget_live(&root
->cgrp
.self
.refcnt
)) {
1148 mutex_unlock(&cgroup_mutex
);
1149 if (!IS_ERR_OR_NULL(pinned_sb
))
1150 deactivate_super(pinned_sb
);
1152 ret
= restart_syscall();
1161 * No such thing, create a new one. name= matching without subsys
1162 * specification is allowed for already existing hierarchies but we
1163 * can't create new one without subsys specification.
1165 if (!opts
.subsys_mask
&& !opts
.none
) {
1170 /* Hierarchies may only be created in the initial cgroup namespace. */
1171 if (ns
!= &init_cgroup_ns
) {
1176 root
= kzalloc(sizeof(*root
), GFP_KERNEL
);
1183 init_cgroup_root(root
, &opts
);
1185 ret
= cgroup_setup_root(root
, opts
.subsys_mask
, PERCPU_REF_INIT_DEAD
);
1187 cgroup_free_root(root
);
1190 mutex_unlock(&cgroup_mutex
);
1192 kfree(opts
.release_agent
);
1196 return ERR_PTR(ret
);
1198 dentry
= cgroup_do_mount(&cgroup_fs_type
, flags
, root
,
1199 CGROUP_SUPER_MAGIC
, ns
);
1202 * There's a race window after we release cgroup_mutex and before
1203 * allocating a superblock. Make sure a concurrent process won't
1204 * be able to re-use the root during this window by delaying the
1205 * initialization of root refcnt.
1208 mutex_lock(&cgroup_mutex
);
1209 percpu_ref_reinit(&root
->cgrp
.self
.refcnt
);
1210 mutex_unlock(&cgroup_mutex
);
1214 * If @pinned_sb, we're reusing an existing root and holding an
1215 * extra ref on its sb. Mount is complete. Put the extra ref.
1218 deactivate_super(pinned_sb
);
1223 static int __init
cgroup1_wq_init(void)
1226 * Used to destroy pidlists and separate to serve as flush domain.
1227 * Cap @max_active to 1 too.
1229 cgroup_pidlist_destroy_wq
= alloc_workqueue("cgroup_pidlist_destroy",
1231 BUG_ON(!cgroup_pidlist_destroy_wq
);
1234 core_initcall(cgroup1_wq_init
);
1236 static int __init
cgroup_no_v1(char *str
)
1238 struct cgroup_subsys
*ss
;
1242 while ((token
= strsep(&str
, ",")) != NULL
) {
1246 if (!strcmp(token
, "all")) {
1247 cgroup_no_v1_mask
= U16_MAX
;
1251 for_each_subsys(ss
, i
) {
1252 if (strcmp(token
, ss
->name
) &&
1253 strcmp(token
, ss
->legacy_name
))
1256 cgroup_no_v1_mask
|= 1 << i
;
1261 __setup("cgroup_no_v1=", cgroup_no_v1
);