1 // SPDX-License-Identifier: GPL-2.0
5 * WARNING: This controller is for cgroup core debugging only.
6 * Its interfaces are unstable and subject to changes at any time.
8 #include <linux/ctype.h>
10 #include <linux/slab.h>
12 #include "cgroup-internal.h"
14 static struct cgroup_subsys_state
*
15 debug_css_alloc(struct cgroup_subsys_state
*parent_css
)
17 struct cgroup_subsys_state
*css
= kzalloc(sizeof(*css
), GFP_KERNEL
);
20 return ERR_PTR(-ENOMEM
);
25 static void debug_css_free(struct cgroup_subsys_state
*css
)
31 * debug_taskcount_read - return the number of tasks in a cgroup.
32 * @cgrp: the cgroup in question
34 static u64
debug_taskcount_read(struct cgroup_subsys_state
*css
,
37 return cgroup_task_count(css
->cgroup
);
40 static int current_css_set_read(struct seq_file
*seq
, void *v
)
42 struct kernfs_open_file
*of
= seq
->private;
44 struct cgroup_subsys
*ss
;
45 struct cgroup_subsys_state
*css
;
48 if (!cgroup_kn_lock_live(of
->kn
, false))
51 spin_lock_irq(&css_set_lock
);
53 cset
= task_css_set(current
);
54 refcnt
= refcount_read(&cset
->refcount
);
55 seq_printf(seq
, "css_set %pK %d", cset
, refcnt
);
56 if (refcnt
> cset
->nr_tasks
)
57 seq_printf(seq
, " +%d", refcnt
- cset
->nr_tasks
);
61 * Print the css'es stored in the current css_set.
63 for_each_subsys(ss
, i
) {
64 css
= cset
->subsys
[ss
->id
];
67 seq_printf(seq
, "%2d: %-4s\t- %lx[%d]\n", ss
->id
, ss
->name
,
68 (unsigned long)css
, css
->id
);
71 spin_unlock_irq(&css_set_lock
);
72 cgroup_kn_unlock(of
->kn
);
76 static u64
current_css_set_refcount_read(struct cgroup_subsys_state
*css
,
82 count
= refcount_read(&task_css_set(current
)->refcount
);
87 static int current_css_set_cg_links_read(struct seq_file
*seq
, void *v
)
89 struct cgrp_cset_link
*link
;
93 name_buf
= kmalloc(NAME_MAX
+ 1, GFP_KERNEL
);
97 spin_lock_irq(&css_set_lock
);
99 cset
= task_css_set(current
);
100 list_for_each_entry(link
, &cset
->cgrp_links
, cgrp_link
) {
101 struct cgroup
*c
= link
->cgrp
;
103 cgroup_name(c
, name_buf
, NAME_MAX
+ 1);
104 seq_printf(seq
, "Root %d group %s\n",
105 c
->root
->hierarchy_id
, name_buf
);
108 spin_unlock_irq(&css_set_lock
);
113 #define MAX_TASKS_SHOWN_PER_CSS 25
114 static int cgroup_css_links_read(struct seq_file
*seq
, void *v
)
116 struct cgroup_subsys_state
*css
= seq_css(seq
);
117 struct cgrp_cset_link
*link
;
118 int dead_cnt
= 0, extra_refs
= 0, threaded_csets
= 0;
120 spin_lock_irq(&css_set_lock
);
122 list_for_each_entry(link
, &css
->cgroup
->cset_links
, cset_link
) {
123 struct css_set
*cset
= link
->cset
;
124 struct task_struct
*task
;
126 int refcnt
= refcount_read(&cset
->refcount
);
129 * Print out the proc_cset and threaded_cset relationship
130 * and highlight difference between refcount and task_count.
132 seq_printf(seq
, "css_set %pK", cset
);
133 if (rcu_dereference_protected(cset
->dom_cset
, 1) != cset
) {
135 seq_printf(seq
, "=>%pK", cset
->dom_cset
);
137 if (!list_empty(&cset
->threaded_csets
)) {
138 struct css_set
*tcset
;
141 list_for_each_entry(tcset
, &cset
->threaded_csets
,
142 threaded_csets_node
) {
143 seq_puts(seq
, idx
? "," : "<=");
144 seq_printf(seq
, "%pK", tcset
);
148 seq_printf(seq
, " %d", refcnt
);
149 if (refcnt
- cset
->nr_tasks
> 0) {
150 int extra
= refcnt
- cset
->nr_tasks
;
152 seq_printf(seq
, " +%d", extra
);
154 * Take out the one additional reference in
157 if (cset
== &init_css_set
)
164 list_for_each_entry(task
, &cset
->tasks
, cg_list
) {
165 if (count
++ <= MAX_TASKS_SHOWN_PER_CSS
)
166 seq_printf(seq
, " task %d\n",
170 list_for_each_entry(task
, &cset
->mg_tasks
, cg_list
) {
171 if (count
++ <= MAX_TASKS_SHOWN_PER_CSS
)
172 seq_printf(seq
, " task %d\n",
175 /* show # of overflowed tasks */
176 if (count
> MAX_TASKS_SHOWN_PER_CSS
)
177 seq_printf(seq
, " ... (%d)\n",
178 count
- MAX_TASKS_SHOWN_PER_CSS
);
181 seq_puts(seq
, " [dead]\n");
185 WARN_ON(count
!= cset
->nr_tasks
);
187 spin_unlock_irq(&css_set_lock
);
189 if (!dead_cnt
&& !extra_refs
&& !threaded_csets
)
194 seq_printf(seq
, "threaded css_sets = %d\n", threaded_csets
);
196 seq_printf(seq
, "extra references = %d\n", extra_refs
);
198 seq_printf(seq
, "dead css_sets = %d\n", dead_cnt
);
203 static int cgroup_subsys_states_read(struct seq_file
*seq
, void *v
)
205 struct kernfs_open_file
*of
= seq
->private;
207 struct cgroup_subsys
*ss
;
208 struct cgroup_subsys_state
*css
;
212 cgrp
= cgroup_kn_lock_live(of
->kn
, false);
216 for_each_subsys(ss
, i
) {
217 css
= rcu_dereference_check(cgrp
->subsys
[ss
->id
], true);
223 /* Show the parent CSS if applicable*/
225 snprintf(pbuf
, sizeof(pbuf
) - 1, " P=%d",
227 seq_printf(seq
, "%2d: %-4s\t- %lx[%d] %d%s\n", ss
->id
, ss
->name
,
228 (unsigned long)css
, css
->id
,
229 atomic_read(&css
->online_cnt
), pbuf
);
232 cgroup_kn_unlock(of
->kn
);
236 static void cgroup_masks_read_one(struct seq_file
*seq
, const char *name
,
239 struct cgroup_subsys
*ss
;
243 seq_printf(seq
, "%-17s: ", name
);
244 for_each_subsys(ss
, ssid
) {
245 if (!(mask
& (1 << ssid
)))
249 seq_puts(seq
, ss
->name
);
255 static int cgroup_masks_read(struct seq_file
*seq
, void *v
)
257 struct kernfs_open_file
*of
= seq
->private;
260 cgrp
= cgroup_kn_lock_live(of
->kn
, false);
264 cgroup_masks_read_one(seq
, "subtree_control", cgrp
->subtree_control
);
265 cgroup_masks_read_one(seq
, "subtree_ss_mask", cgrp
->subtree_ss_mask
);
267 cgroup_kn_unlock(of
->kn
);
271 static u64
releasable_read(struct cgroup_subsys_state
*css
, struct cftype
*cft
)
273 return (!cgroup_is_populated(css
->cgroup
) &&
274 !css_has_online_children(&css
->cgroup
->self
));
277 static struct cftype debug_legacy_files
[] = {
280 .read_u64
= debug_taskcount_read
,
284 .name
= "current_css_set",
285 .seq_show
= current_css_set_read
,
286 .flags
= CFTYPE_ONLY_ON_ROOT
,
290 .name
= "current_css_set_refcount",
291 .read_u64
= current_css_set_refcount_read
,
292 .flags
= CFTYPE_ONLY_ON_ROOT
,
296 .name
= "current_css_set_cg_links",
297 .seq_show
= current_css_set_cg_links_read
,
298 .flags
= CFTYPE_ONLY_ON_ROOT
,
302 .name
= "cgroup_css_links",
303 .seq_show
= cgroup_css_links_read
,
307 .name
= "cgroup_subsys_states",
308 .seq_show
= cgroup_subsys_states_read
,
312 .name
= "cgroup_masks",
313 .seq_show
= cgroup_masks_read
,
317 .name
= "releasable",
318 .read_u64
= releasable_read
,
324 static struct cftype debug_files
[] = {
327 .read_u64
= debug_taskcount_read
,
331 .name
= "current_css_set",
332 .seq_show
= current_css_set_read
,
333 .flags
= CFTYPE_ONLY_ON_ROOT
,
337 .name
= "current_css_set_refcount",
338 .read_u64
= current_css_set_refcount_read
,
339 .flags
= CFTYPE_ONLY_ON_ROOT
,
343 .name
= "current_css_set_cg_links",
344 .seq_show
= current_css_set_cg_links_read
,
345 .flags
= CFTYPE_ONLY_ON_ROOT
,
350 .seq_show
= cgroup_css_links_read
,
355 .seq_show
= cgroup_subsys_states_read
,
360 .seq_show
= cgroup_masks_read
,
366 struct cgroup_subsys debug_cgrp_subsys
= {
367 .css_alloc
= debug_css_alloc
,
368 .css_free
= debug_css_free
,
369 .legacy_cftypes
= debug_legacy_files
,
373 * On v2, debug is an implicit controller enabled by "cgroup_debug" boot
376 static int __init
enable_cgroup_debug(char *str
)
378 debug_cgrp_subsys
.dfl_cftypes
= debug_files
;
379 debug_cgrp_subsys
.implicit_on_dfl
= true;
380 debug_cgrp_subsys
.threaded
= true;
383 __setup("cgroup_debug", enable_cgroup_debug
);