1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_CGROUP_H
3 #define _LINUX_CGROUP_H
7 * Copyright (C) 2003 BULL SA
8 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
12 #include <linux/sched.h>
13 #include <linux/nodemask.h>
14 #include <linux/list.h>
15 #include <linux/rculist.h>
16 #include <linux/cgroupstats.h>
18 #include <linux/seq_file.h>
19 #include <linux/kernfs.h>
20 #include <linux/jump_label.h>
21 #include <linux/types.h>
22 #include <linux/ns_common.h>
23 #include <linux/nsproxy.h>
24 #include <linux/user_namespace.h>
25 #include <linux/refcount.h>
26 #include <linux/kernel_stat.h>
28 #include <linux/cgroup-defs.h>
30 struct kernel_clone_args
;
33 * All weight knobs on the default hierarchy should use the following min,
34 * default and max values. The default value is the logarithmic center of
35 * MIN and MAX and allows 100x to be expressed in both directions.
37 #define CGROUP_WEIGHT_MIN 1
38 #define CGROUP_WEIGHT_DFL 100
39 #define CGROUP_WEIGHT_MAX 10000
44 CSS_TASK_ITER_PROCS
= (1U << 0), /* walk only threadgroup leaders */
45 CSS_TASK_ITER_THREADED
= (1U << 1), /* walk all threaded css_sets in the domain */
46 CSS_TASK_ITER_SKIPPED
= (1U << 16), /* internal flags */
49 /* a css_task_iter should be treated as an opaque object */
50 struct css_task_iter
{
51 struct cgroup_subsys
*ss
;
54 struct list_head
*cset_pos
;
55 struct list_head
*cset_head
;
57 struct list_head
*tcset_pos
;
58 struct list_head
*tcset_head
;
60 struct list_head
*task_pos
;
62 struct list_head
*cur_tasks_head
;
63 struct css_set
*cur_cset
;
64 struct css_set
*cur_dcset
;
65 struct task_struct
*cur_task
;
66 struct list_head iters_node
; /* css_set->task_iters */
69 extern struct file_system_type cgroup_fs_type
;
70 extern struct cgroup_root cgrp_dfl_root
;
71 extern struct css_set init_css_set
;
72 extern spinlock_t css_set_lock
;
74 #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
75 #include <linux/cgroup_subsys.h>
79 extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \
80 extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key;
81 #include <linux/cgroup_subsys.h>
85 * cgroup_subsys_enabled - fast test on whether a subsys is enabled
86 * @ss: subsystem in question
88 #define cgroup_subsys_enabled(ss) \
89 static_branch_likely(&ss ## _enabled_key)
92 * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy
93 * @ss: subsystem in question
95 #define cgroup_subsys_on_dfl(ss) \
96 static_branch_likely(&ss ## _on_dfl_key)
98 bool css_has_online_children(struct cgroup_subsys_state
*css
);
99 struct cgroup_subsys_state
*css_from_id(int id
, struct cgroup_subsys
*ss
);
100 struct cgroup_subsys_state
*cgroup_e_css(struct cgroup
*cgroup
,
101 struct cgroup_subsys
*ss
);
102 struct cgroup_subsys_state
*cgroup_get_e_css(struct cgroup
*cgroup
,
103 struct cgroup_subsys
*ss
);
104 struct cgroup_subsys_state
*css_tryget_online_from_dir(struct dentry
*dentry
,
105 struct cgroup_subsys
*ss
);
107 struct cgroup
*cgroup_get_from_path(const char *path
);
108 struct cgroup
*cgroup_get_from_fd(int fd
);
109 struct cgroup
*cgroup_v1v2_get_from_fd(int fd
);
111 int cgroup_attach_task_all(struct task_struct
*from
, struct task_struct
*);
112 int cgroup_transfer_tasks(struct cgroup
*to
, struct cgroup
*from
);
114 int cgroup_add_dfl_cftypes(struct cgroup_subsys
*ss
, struct cftype
*cfts
);
115 int cgroup_add_legacy_cftypes(struct cgroup_subsys
*ss
, struct cftype
*cfts
);
116 int cgroup_rm_cftypes(struct cftype
*cfts
);
117 void cgroup_file_notify(struct cgroup_file
*cfile
);
118 void cgroup_file_show(struct cgroup_file
*cfile
, bool show
);
120 int cgroupstats_build(struct cgroupstats
*stats
, struct dentry
*dentry
);
121 int proc_cgroup_show(struct seq_file
*m
, struct pid_namespace
*ns
,
122 struct pid
*pid
, struct task_struct
*tsk
);
124 void cgroup_fork(struct task_struct
*p
);
125 extern int cgroup_can_fork(struct task_struct
*p
,
126 struct kernel_clone_args
*kargs
);
127 extern void cgroup_cancel_fork(struct task_struct
*p
,
128 struct kernel_clone_args
*kargs
);
129 extern void cgroup_post_fork(struct task_struct
*p
,
130 struct kernel_clone_args
*kargs
);
131 void cgroup_exit(struct task_struct
*p
);
132 void cgroup_release(struct task_struct
*p
);
133 void cgroup_free(struct task_struct
*p
);
135 int cgroup_init_early(void);
136 int cgroup_init(void);
138 int cgroup_parse_float(const char *input
, unsigned dec_shift
, s64
*v
);
141 * Iteration helpers and macros.
144 struct cgroup_subsys_state
*css_next_child(struct cgroup_subsys_state
*pos
,
145 struct cgroup_subsys_state
*parent
);
146 struct cgroup_subsys_state
*css_next_descendant_pre(struct cgroup_subsys_state
*pos
,
147 struct cgroup_subsys_state
*css
);
148 struct cgroup_subsys_state
*css_rightmost_descendant(struct cgroup_subsys_state
*pos
);
149 struct cgroup_subsys_state
*css_next_descendant_post(struct cgroup_subsys_state
*pos
,
150 struct cgroup_subsys_state
*css
);
152 struct task_struct
*cgroup_taskset_first(struct cgroup_taskset
*tset
,
153 struct cgroup_subsys_state
**dst_cssp
);
154 struct task_struct
*cgroup_taskset_next(struct cgroup_taskset
*tset
,
155 struct cgroup_subsys_state
**dst_cssp
);
157 void css_task_iter_start(struct cgroup_subsys_state
*css
, unsigned int flags
,
158 struct css_task_iter
*it
);
159 struct task_struct
*css_task_iter_next(struct css_task_iter
*it
);
160 void css_task_iter_end(struct css_task_iter
*it
);
163 * css_for_each_child - iterate through children of a css
164 * @pos: the css * to use as the loop cursor
165 * @parent: css whose children to walk
167 * Walk @parent's children. Must be called under rcu_read_lock().
169 * If a subsystem synchronizes ->css_online() and the start of iteration, a
170 * css which finished ->css_online() is guaranteed to be visible in the
171 * future iterations and will stay visible until the last reference is put.
172 * A css which hasn't finished ->css_online() or already finished
173 * ->css_offline() may show up during traversal. It's each subsystem's
174 * responsibility to synchronize against on/offlining.
176 * It is allowed to temporarily drop RCU read lock during iteration. The
177 * caller is responsible for ensuring that @pos remains accessible until
178 * the start of the next iteration by, for example, bumping the css refcnt.
180 #define css_for_each_child(pos, parent) \
181 for ((pos) = css_next_child(NULL, (parent)); (pos); \
182 (pos) = css_next_child((pos), (parent)))
185 * css_for_each_descendant_pre - pre-order walk of a css's descendants
186 * @pos: the css * to use as the loop cursor
187 * @root: css whose descendants to walk
189 * Walk @root's descendants. @root is included in the iteration and the
190 * first node to be visited. Must be called under rcu_read_lock().
192 * If a subsystem synchronizes ->css_online() and the start of iteration, a
193 * css which finished ->css_online() is guaranteed to be visible in the
194 * future iterations and will stay visible until the last reference is put.
195 * A css which hasn't finished ->css_online() or already finished
196 * ->css_offline() may show up during traversal. It's each subsystem's
197 * responsibility to synchronize against on/offlining.
199 * For example, the following guarantees that a descendant can't escape
200 * state updates of its ancestors.
204 * Lock @css's parent and @css;
205 * Inherit state from the parent;
209 * my_update_state(@css)
211 * css_for_each_descendant_pre(@pos, @css) {
214 * Update @css's state;
216 * Verify @pos is alive and inherit state from its parent;
221 * As long as the inheriting step, including checking the parent state, is
222 * enclosed inside @pos locking, double-locking the parent isn't necessary
223 * while inheriting. The state update to the parent is guaranteed to be
224 * visible by walking order and, as long as inheriting operations to the
225 * same @pos are atomic to each other, multiple updates racing each other
226 * still result in the correct state. It's guaranateed that at least one
227 * inheritance happens for any css after the latest update to its parent.
229 * If checking parent's state requires locking the parent, each inheriting
230 * iteration should lock and unlock both @pos->parent and @pos.
232 * Alternatively, a subsystem may choose to use a single global lock to
233 * synchronize ->css_online() and ->css_offline() against tree-walking
236 * It is allowed to temporarily drop RCU read lock during iteration. The
237 * caller is responsible for ensuring that @pos remains accessible until
238 * the start of the next iteration by, for example, bumping the css refcnt.
240 #define css_for_each_descendant_pre(pos, css) \
241 for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \
242 (pos) = css_next_descendant_pre((pos), (css)))
245 * css_for_each_descendant_post - post-order walk of a css's descendants
246 * @pos: the css * to use as the loop cursor
247 * @css: css whose descendants to walk
249 * Similar to css_for_each_descendant_pre() but performs post-order
250 * traversal instead. @root is included in the iteration and the last
251 * node to be visited.
253 * If a subsystem synchronizes ->css_online() and the start of iteration, a
254 * css which finished ->css_online() is guaranteed to be visible in the
255 * future iterations and will stay visible until the last reference is put.
256 * A css which hasn't finished ->css_online() or already finished
257 * ->css_offline() may show up during traversal. It's each subsystem's
258 * responsibility to synchronize against on/offlining.
260 * Note that the walk visibility guarantee example described in pre-order
261 * walk doesn't apply the same to post-order walks.
263 #define css_for_each_descendant_post(pos, css) \
264 for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
265 (pos) = css_next_descendant_post((pos), (css)))
268 * cgroup_taskset_for_each - iterate cgroup_taskset
269 * @task: the loop cursor
270 * @dst_css: the destination css
271 * @tset: taskset to iterate
273 * @tset may contain multiple tasks and they may belong to multiple
276 * On the v2 hierarchy, there may be tasks from multiple processes and they
277 * may not share the source or destination csses.
279 * On traditional hierarchies, when there are multiple tasks in @tset, if a
280 * task of a process is in @tset, all tasks of the process are in @tset.
281 * Also, all are guaranteed to share the same source and destination csses.
283 * Iteration is not in any specific order.
285 #define cgroup_taskset_for_each(task, dst_css, tset) \
286 for ((task) = cgroup_taskset_first((tset), &(dst_css)); \
288 (task) = cgroup_taskset_next((tset), &(dst_css)))
291 * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset
292 * @leader: the loop cursor
293 * @dst_css: the destination css
294 * @tset: taskset to iterate
296 * Iterate threadgroup leaders of @tset. For single-task migrations, @tset
297 * may not contain any.
299 #define cgroup_taskset_for_each_leader(leader, dst_css, tset) \
300 for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \
302 (leader) = cgroup_taskset_next((tset), &(dst_css))) \
303 if ((leader) != (leader)->group_leader) \
311 #ifdef CONFIG_DEBUG_CGROUP_REF
312 void css_get(struct cgroup_subsys_state
*css
);
313 void css_get_many(struct cgroup_subsys_state
*css
, unsigned int n
);
314 bool css_tryget(struct cgroup_subsys_state
*css
);
315 bool css_tryget_online(struct cgroup_subsys_state
*css
);
316 void css_put(struct cgroup_subsys_state
*css
);
317 void css_put_many(struct cgroup_subsys_state
*css
, unsigned int n
);
319 #define CGROUP_REF_FN_ATTRS static inline
320 #define CGROUP_REF_EXPORT(fn)
321 #include <linux/cgroup_refcnt.h>
324 static inline u64
cgroup_id(const struct cgroup
*cgrp
)
330 * css_is_dying - test whether the specified css is dying
333 * Test whether @css is in the process of offlining or already offline. In
334 * most cases, ->css_online() and ->css_offline() callbacks should be
335 * enough; however, the actual offline operations are RCU delayed and this
336 * test returns %true also when @css is scheduled to be offlined.
338 * This is useful, for example, when the use case requires synchronous
339 * behavior with respect to cgroup removal. cgroup removal schedules css
340 * offlining but the css can seem alive while the operation is being
341 * delayed. If the delay affects user visible semantics, this test can be
342 * used to resolve the situation.
344 static inline bool css_is_dying(struct cgroup_subsys_state
*css
)
346 return !(css
->flags
& CSS_NO_REF
) && percpu_ref_is_dying(&css
->refcnt
);
349 static inline void cgroup_get(struct cgroup
*cgrp
)
351 css_get(&cgrp
->self
);
354 static inline bool cgroup_tryget(struct cgroup
*cgrp
)
356 return css_tryget(&cgrp
->self
);
359 static inline void cgroup_put(struct cgroup
*cgrp
)
361 css_put(&cgrp
->self
);
364 extern struct mutex cgroup_mutex
;
366 static inline void cgroup_lock(void)
368 mutex_lock(&cgroup_mutex
);
371 static inline void cgroup_unlock(void)
373 mutex_unlock(&cgroup_mutex
);
377 * task_css_set_check - obtain a task's css_set with extra access conditions
378 * @task: the task to obtain css_set for
379 * @__c: extra condition expression to be passed to rcu_dereference_check()
381 * A task's css_set is RCU protected, initialized and exited while holding
382 * task_lock(), and can only be modified while holding both cgroup_mutex
383 * and task_lock() while the task is alive. This macro verifies that the
384 * caller is inside proper critical section and returns @task's css_set.
386 * The caller can also specify additional allowed conditions via @__c, such
387 * as locks used during the cgroup_subsys::attach() methods.
389 #ifdef CONFIG_PROVE_RCU
390 #define task_css_set_check(task, __c) \
391 rcu_dereference_check((task)->cgroups, \
392 rcu_read_lock_sched_held() || \
393 lockdep_is_held(&cgroup_mutex) || \
394 lockdep_is_held(&css_set_lock) || \
395 ((task)->flags & PF_EXITING) || (__c))
397 #define task_css_set_check(task, __c) \
398 rcu_dereference((task)->cgroups)
402 * task_css_check - obtain css for (task, subsys) w/ extra access conds
403 * @task: the target task
404 * @subsys_id: the target subsystem ID
405 * @__c: extra condition expression to be passed to rcu_dereference_check()
407 * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The
408 * synchronization rules are the same as task_css_set_check().
410 #define task_css_check(task, subsys_id, __c) \
411 task_css_set_check((task), (__c))->subsys[(subsys_id)]
414 * task_css_set - obtain a task's css_set
415 * @task: the task to obtain css_set for
417 * See task_css_set_check().
419 static inline struct css_set
*task_css_set(struct task_struct
*task
)
421 return task_css_set_check(task
, false);
425 * task_css - obtain css for (task, subsys)
426 * @task: the target task
427 * @subsys_id: the target subsystem ID
429 * See task_css_check().
431 static inline struct cgroup_subsys_state
*task_css(struct task_struct
*task
,
434 return task_css_check(task
, subsys_id
, false);
438 * task_get_css - find and get the css for (task, subsys)
439 * @task: the target task
440 * @subsys_id: the target subsystem ID
442 * Find the css for the (@task, @subsys_id) combination, increment a
443 * reference on and return it. This function is guaranteed to return a
444 * valid css. The returned css may already have been offlined.
446 static inline struct cgroup_subsys_state
*
447 task_get_css(struct task_struct
*task
, int subsys_id
)
449 struct cgroup_subsys_state
*css
;
453 css
= task_css(task
, subsys_id
);
455 * Can't use css_tryget_online() here. A task which has
456 * PF_EXITING set may stay associated with an offline css.
457 * If such task calls this function, css_tryget_online()
460 if (likely(css_tryget(css
)))
469 * task_css_is_root - test whether a task belongs to the root css
470 * @task: the target task
471 * @subsys_id: the target subsystem ID
473 * Test whether @task belongs to the root css on the specified subsystem.
474 * May be invoked in any context.
476 static inline bool task_css_is_root(struct task_struct
*task
, int subsys_id
)
478 return task_css_check(task
, subsys_id
, true) ==
479 init_css_set
.subsys
[subsys_id
];
482 static inline struct cgroup
*task_cgroup(struct task_struct
*task
,
485 return task_css(task
, subsys_id
)->cgroup
;
488 static inline struct cgroup
*task_dfl_cgroup(struct task_struct
*task
)
490 return task_css_set(task
)->dfl_cgrp
;
493 static inline struct cgroup
*cgroup_parent(struct cgroup
*cgrp
)
495 struct cgroup_subsys_state
*parent_css
= cgrp
->self
.parent
;
498 return container_of(parent_css
, struct cgroup
, self
);
503 * cgroup_is_descendant - test ancestry
504 * @cgrp: the cgroup to be tested
505 * @ancestor: possible ancestor of @cgrp
507 * Test whether @cgrp is a descendant of @ancestor. It also returns %true
508 * if @cgrp == @ancestor. This function is safe to call as long as @cgrp
509 * and @ancestor are accessible.
511 static inline bool cgroup_is_descendant(struct cgroup
*cgrp
,
512 struct cgroup
*ancestor
)
514 if (cgrp
->root
!= ancestor
->root
|| cgrp
->level
< ancestor
->level
)
516 return cgrp
->ancestors
[ancestor
->level
] == ancestor
;
520 * cgroup_ancestor - find ancestor of cgroup
521 * @cgrp: cgroup to find ancestor of
522 * @ancestor_level: level of ancestor to find starting from root
524 * Find ancestor of cgroup at specified level starting from root if it exists
525 * and return pointer to it. Return NULL if @cgrp doesn't have ancestor at
528 * This function is safe to call as long as @cgrp is accessible.
530 static inline struct cgroup
*cgroup_ancestor(struct cgroup
*cgrp
,
533 if (ancestor_level
< 0 || ancestor_level
> cgrp
->level
)
535 return cgrp
->ancestors
[ancestor_level
];
539 * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry
540 * @task: the task to be tested
541 * @ancestor: possible ancestor of @task's cgroup
543 * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
544 * It follows all the same rules as cgroup_is_descendant, and only applies
545 * to the default hierarchy.
547 static inline bool task_under_cgroup_hierarchy(struct task_struct
*task
,
548 struct cgroup
*ancestor
)
550 struct css_set
*cset
= task_css_set(task
);
552 return cgroup_is_descendant(cset
->dfl_cgrp
, ancestor
);
555 /* no synchronization, the result can only be used as a hint */
556 static inline bool cgroup_is_populated(struct cgroup
*cgrp
)
558 return cgrp
->nr_populated_csets
+ cgrp
->nr_populated_domain_children
+
559 cgrp
->nr_populated_threaded_children
;
562 /* returns ino associated with a cgroup */
563 static inline ino_t
cgroup_ino(struct cgroup
*cgrp
)
565 return kernfs_ino(cgrp
->kn
);
568 /* cft/css accessors for cftype->write() operation */
569 static inline struct cftype
*of_cft(struct kernfs_open_file
*of
)
574 struct cgroup_subsys_state
*of_css(struct kernfs_open_file
*of
);
576 /* cft/css accessors for cftype->seq_*() operations */
577 static inline struct cftype
*seq_cft(struct seq_file
*seq
)
579 return of_cft(seq
->private);
582 static inline struct cgroup_subsys_state
*seq_css(struct seq_file
*seq
)
584 return of_css(seq
->private);
588 * Name / path handling functions. All are thin wrappers around the kernfs
589 * counterparts and can be called under any context.
592 static inline int cgroup_name(struct cgroup
*cgrp
, char *buf
, size_t buflen
)
594 return kernfs_name(cgrp
->kn
, buf
, buflen
);
597 static inline int cgroup_path(struct cgroup
*cgrp
, char *buf
, size_t buflen
)
599 return kernfs_path(cgrp
->kn
, buf
, buflen
);
602 static inline void pr_cont_cgroup_name(struct cgroup
*cgrp
)
604 pr_cont_kernfs_name(cgrp
->kn
);
607 static inline void pr_cont_cgroup_path(struct cgroup
*cgrp
)
609 pr_cont_kernfs_path(cgrp
->kn
);
612 bool cgroup_psi_enabled(void);
614 static inline void cgroup_init_kthreadd(void)
617 * kthreadd is inherited by all kthreads, keep it in the root so
618 * that the new kthreads are guaranteed to stay in the root until
619 * initialization is finished.
621 current
->no_cgroup_migration
= 1;
624 static inline void cgroup_kthread_ready(void)
627 * This kthread finished initialization. The creator should have
628 * set PF_NO_SETAFFINITY if this kthread should stay in the root.
630 current
->no_cgroup_migration
= 0;
633 void cgroup_path_from_kernfs_id(u64 id
, char *buf
, size_t buflen
);
634 struct cgroup
*cgroup_get_from_id(u64 id
);
635 #else /* !CONFIG_CGROUPS */
637 struct cgroup_subsys_state
;
640 static inline u64
cgroup_id(const struct cgroup
*cgrp
) { return 1; }
641 static inline void css_get(struct cgroup_subsys_state
*css
) {}
642 static inline void css_put(struct cgroup_subsys_state
*css
) {}
643 static inline void cgroup_lock(void) {}
644 static inline void cgroup_unlock(void) {}
645 static inline int cgroup_attach_task_all(struct task_struct
*from
,
646 struct task_struct
*t
) { return 0; }
647 static inline int cgroupstats_build(struct cgroupstats
*stats
,
648 struct dentry
*dentry
) { return -EINVAL
; }
650 static inline void cgroup_fork(struct task_struct
*p
) {}
651 static inline int cgroup_can_fork(struct task_struct
*p
,
652 struct kernel_clone_args
*kargs
) { return 0; }
653 static inline void cgroup_cancel_fork(struct task_struct
*p
,
654 struct kernel_clone_args
*kargs
) {}
655 static inline void cgroup_post_fork(struct task_struct
*p
,
656 struct kernel_clone_args
*kargs
) {}
657 static inline void cgroup_exit(struct task_struct
*p
) {}
658 static inline void cgroup_release(struct task_struct
*p
) {}
659 static inline void cgroup_free(struct task_struct
*p
) {}
661 static inline int cgroup_init_early(void) { return 0; }
662 static inline int cgroup_init(void) { return 0; }
663 static inline void cgroup_init_kthreadd(void) {}
664 static inline void cgroup_kthread_ready(void) {}
666 static inline struct cgroup
*cgroup_parent(struct cgroup
*cgrp
)
671 static inline bool cgroup_psi_enabled(void)
676 static inline bool task_under_cgroup_hierarchy(struct task_struct
*task
,
677 struct cgroup
*ancestor
)
682 static inline void cgroup_path_from_kernfs_id(u64 id
, char *buf
, size_t buflen
)
684 #endif /* !CONFIG_CGROUPS */
686 #ifdef CONFIG_CGROUPS
688 * cgroup scalable recursive statistics.
690 void cgroup_rstat_updated(struct cgroup
*cgrp
, int cpu
);
691 void cgroup_rstat_flush(struct cgroup
*cgrp
);
692 void cgroup_rstat_flush_hold(struct cgroup
*cgrp
);
693 void cgroup_rstat_flush_release(struct cgroup
*cgrp
);
696 * Basic resource stats.
698 #ifdef CONFIG_CGROUP_CPUACCT
699 void cpuacct_charge(struct task_struct
*tsk
, u64 cputime
);
700 void cpuacct_account_field(struct task_struct
*tsk
, int index
, u64 val
);
702 static inline void cpuacct_charge(struct task_struct
*tsk
, u64 cputime
) {}
703 static inline void cpuacct_account_field(struct task_struct
*tsk
, int index
,
707 void __cgroup_account_cputime(struct cgroup
*cgrp
, u64 delta_exec
);
708 void __cgroup_account_cputime_field(struct cgroup
*cgrp
,
709 enum cpu_usage_stat index
, u64 delta_exec
);
711 static inline void cgroup_account_cputime(struct task_struct
*task
,
716 cpuacct_charge(task
, delta_exec
);
718 cgrp
= task_dfl_cgroup(task
);
719 if (cgroup_parent(cgrp
))
720 __cgroup_account_cputime(cgrp
, delta_exec
);
723 static inline void cgroup_account_cputime_field(struct task_struct
*task
,
724 enum cpu_usage_stat index
,
729 cpuacct_account_field(task
, index
, delta_exec
);
731 cgrp
= task_dfl_cgroup(task
);
732 if (cgroup_parent(cgrp
))
733 __cgroup_account_cputime_field(cgrp
, index
, delta_exec
);
736 #else /* CONFIG_CGROUPS */
738 static inline void cgroup_account_cputime(struct task_struct
*task
,
740 static inline void cgroup_account_cputime_field(struct task_struct
*task
,
741 enum cpu_usage_stat index
,
744 #endif /* CONFIG_CGROUPS */
747 * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data
748 * definition in cgroup-defs.h.
750 #ifdef CONFIG_SOCK_CGROUP_DATA
752 void cgroup_sk_alloc(struct sock_cgroup_data
*skcd
);
753 void cgroup_sk_clone(struct sock_cgroup_data
*skcd
);
754 void cgroup_sk_free(struct sock_cgroup_data
*skcd
);
756 static inline struct cgroup
*sock_cgroup_ptr(struct sock_cgroup_data
*skcd
)
761 #else /* CONFIG_CGROUP_DATA */
763 static inline void cgroup_sk_alloc(struct sock_cgroup_data
*skcd
) {}
764 static inline void cgroup_sk_clone(struct sock_cgroup_data
*skcd
) {}
765 static inline void cgroup_sk_free(struct sock_cgroup_data
*skcd
) {}
767 #endif /* CONFIG_CGROUP_DATA */
769 struct cgroup_namespace
{
771 struct user_namespace
*user_ns
;
772 struct ucounts
*ucounts
;
773 struct css_set
*root_cset
;
776 extern struct cgroup_namespace init_cgroup_ns
;
778 #ifdef CONFIG_CGROUPS
780 void free_cgroup_ns(struct cgroup_namespace
*ns
);
782 struct cgroup_namespace
*copy_cgroup_ns(unsigned long flags
,
783 struct user_namespace
*user_ns
,
784 struct cgroup_namespace
*old_ns
);
786 int cgroup_path_ns(struct cgroup
*cgrp
, char *buf
, size_t buflen
,
787 struct cgroup_namespace
*ns
);
789 #else /* !CONFIG_CGROUPS */
791 static inline void free_cgroup_ns(struct cgroup_namespace
*ns
) { }
792 static inline struct cgroup_namespace
*
793 copy_cgroup_ns(unsigned long flags
, struct user_namespace
*user_ns
,
794 struct cgroup_namespace
*old_ns
)
799 #endif /* !CONFIG_CGROUPS */
801 static inline void get_cgroup_ns(struct cgroup_namespace
*ns
)
804 refcount_inc(&ns
->ns
.count
);
807 static inline void put_cgroup_ns(struct cgroup_namespace
*ns
)
809 if (ns
&& refcount_dec_and_test(&ns
->ns
.count
))
813 #ifdef CONFIG_CGROUPS
815 void cgroup_enter_frozen(void);
816 void cgroup_leave_frozen(bool always_leave
);
817 void cgroup_update_frozen(struct cgroup
*cgrp
);
818 void cgroup_freeze(struct cgroup
*cgrp
, bool freeze
);
819 void cgroup_freezer_migrate_task(struct task_struct
*task
, struct cgroup
*src
,
822 static inline bool cgroup_task_frozen(struct task_struct
*task
)
827 #else /* !CONFIG_CGROUPS */
829 static inline void cgroup_enter_frozen(void) { }
830 static inline void cgroup_leave_frozen(bool always_leave
) { }
831 static inline bool cgroup_task_frozen(struct task_struct
*task
)
836 #endif /* !CONFIG_CGROUPS */
838 #ifdef CONFIG_CGROUP_BPF
839 static inline void cgroup_bpf_get(struct cgroup
*cgrp
)
841 percpu_ref_get(&cgrp
->bpf
.refcnt
);
844 static inline void cgroup_bpf_put(struct cgroup
*cgrp
)
846 percpu_ref_put(&cgrp
->bpf
.refcnt
);
849 #else /* CONFIG_CGROUP_BPF */
851 static inline void cgroup_bpf_get(struct cgroup
*cgrp
) {}
852 static inline void cgroup_bpf_put(struct cgroup
*cgrp
) {}
854 #endif /* CONFIG_CGROUP_BPF */
856 struct cgroup
*task_get_cgroup1(struct task_struct
*tsk
, int hierarchy_id
);
858 struct cgroup_of_peak
*of_peak(struct kernfs_open_file
*of
);
860 #endif /* _LINUX_CGROUP_H */