2 * Common Block IO controller cgroup interface
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
13 #include <linux/ioprio.h>
14 #include <linux/kdev_t.h>
15 #include <linux/module.h>
16 #include <linux/err.h>
17 #include <linux/blkdev.h>
18 #include <linux/slab.h>
19 #include <linux/genhd.h>
20 #include <linux/delay.h>
21 #include <linux/atomic.h>
22 #include "blk-cgroup.h"
25 #define MAX_KEY_LEN 100
27 static DEFINE_MUTEX(blkcg_pol_mutex
);
29 struct blkcg blkcg_root
= { .cfq_weight
= 2 * CFQ_WEIGHT_DEFAULT
,
30 .cfq_leaf_weight
= 2 * CFQ_WEIGHT_DEFAULT
, };
31 EXPORT_SYMBOL_GPL(blkcg_root
);
33 static struct blkcg_policy
*blkcg_policy
[BLKCG_MAX_POLS
];
35 static bool blkcg_policy_enabled(struct request_queue
*q
,
36 const struct blkcg_policy
*pol
)
38 return pol
&& test_bit(pol
->plid
, q
->blkcg_pols
);
42 * blkg_free - free a blkg
45 * Free @blkg which may be partially allocated.
47 static void blkg_free(struct blkcg_gq
*blkg
)
54 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++)
57 blk_exit_rl(&blkg
->rl
);
62 * blkg_alloc - allocate a blkg
63 * @blkcg: block cgroup the new blkg is associated with
64 * @q: request_queue the new blkg is associated with
65 * @gfp_mask: allocation mask to use
67 * Allocate a new blkg assocating @blkcg and @q.
69 static struct blkcg_gq
*blkg_alloc(struct blkcg
*blkcg
, struct request_queue
*q
,
72 struct blkcg_gq
*blkg
;
75 /* alloc and init base part */
76 blkg
= kzalloc_node(sizeof(*blkg
), gfp_mask
, q
->node
);
81 INIT_LIST_HEAD(&blkg
->q_node
);
83 atomic_set(&blkg
->refcnt
, 1);
85 /* root blkg uses @q->root_rl, init rl only for !root blkgs */
86 if (blkcg
!= &blkcg_root
) {
87 if (blk_init_rl(&blkg
->rl
, q
, gfp_mask
))
92 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++) {
93 struct blkcg_policy
*pol
= blkcg_policy
[i
];
94 struct blkg_policy_data
*pd
;
96 if (!blkcg_policy_enabled(q
, pol
))
99 /* alloc per-policy data and attach it to blkg */
100 pd
= kzalloc_node(pol
->pd_size
, gfp_mask
, q
->node
);
117 * __blkg_lookup - internal version of blkg_lookup()
118 * @blkcg: blkcg of interest
119 * @q: request_queue of interest
120 * @update_hint: whether to update lookup hint with the result or not
122 * This is internal version and shouldn't be used by policy
123 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
124 * @q's bypass state. If @update_hint is %true, the caller should be
125 * holding @q->queue_lock and lookup hint is updated on success.
127 struct blkcg_gq
*__blkg_lookup(struct blkcg
*blkcg
, struct request_queue
*q
,
130 struct blkcg_gq
*blkg
;
132 blkg
= rcu_dereference(blkcg
->blkg_hint
);
133 if (blkg
&& blkg
->q
== q
)
137 * Hint didn't match. Look up from the radix tree. Note that the
138 * hint can only be updated under queue_lock as otherwise @blkg
139 * could have already been removed from blkg_tree. The caller is
140 * responsible for grabbing queue_lock if @update_hint.
142 blkg
= radix_tree_lookup(&blkcg
->blkg_tree
, q
->id
);
143 if (blkg
&& blkg
->q
== q
) {
145 lockdep_assert_held(q
->queue_lock
);
146 rcu_assign_pointer(blkcg
->blkg_hint
, blkg
);
155 * blkg_lookup - lookup blkg for the specified blkcg - q pair
156 * @blkcg: blkcg of interest
157 * @q: request_queue of interest
159 * Lookup blkg for the @blkcg - @q pair. This function should be called
160 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
161 * - see blk_queue_bypass_start() for details.
163 struct blkcg_gq
*blkg_lookup(struct blkcg
*blkcg
, struct request_queue
*q
)
165 WARN_ON_ONCE(!rcu_read_lock_held());
167 if (unlikely(blk_queue_bypass(q
)))
169 return __blkg_lookup(blkcg
, q
, false);
171 EXPORT_SYMBOL_GPL(blkg_lookup
);
174 * If @new_blkg is %NULL, this function tries to allocate a new one as
175 * necessary using %GFP_ATOMIC. @new_blkg is always consumed on return.
177 static struct blkcg_gq
*blkg_create(struct blkcg
*blkcg
,
178 struct request_queue
*q
,
179 struct blkcg_gq
*new_blkg
)
181 struct blkcg_gq
*blkg
;
184 WARN_ON_ONCE(!rcu_read_lock_held());
185 lockdep_assert_held(q
->queue_lock
);
187 /* blkg holds a reference to blkcg */
188 if (!css_tryget_online(&blkcg
->css
)) {
195 new_blkg
= blkg_alloc(blkcg
, q
, GFP_ATOMIC
);
196 if (unlikely(!new_blkg
)) {
204 if (blkcg_parent(blkcg
)) {
205 blkg
->parent
= __blkg_lookup(blkcg_parent(blkcg
), q
, false);
206 if (WARN_ON_ONCE(!blkg
->parent
)) {
210 blkg_get(blkg
->parent
);
213 /* invoke per-policy init */
214 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++) {
215 struct blkcg_policy
*pol
= blkcg_policy
[i
];
217 if (blkg
->pd
[i
] && pol
->pd_init_fn
)
218 pol
->pd_init_fn(blkg
);
222 spin_lock(&blkcg
->lock
);
223 ret
= radix_tree_insert(&blkcg
->blkg_tree
, q
->id
, blkg
);
225 hlist_add_head_rcu(&blkg
->blkcg_node
, &blkcg
->blkg_list
);
226 list_add(&blkg
->q_node
, &q
->blkg_list
);
228 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++) {
229 struct blkcg_policy
*pol
= blkcg_policy
[i
];
231 if (blkg
->pd
[i
] && pol
->pd_online_fn
)
232 pol
->pd_online_fn(blkg
);
236 spin_unlock(&blkcg
->lock
);
239 if (blkcg
== &blkcg_root
) {
241 q
->root_rl
.blkg
= blkg
;
246 /* @blkg failed fully initialized, use the usual release path */
251 css_put(&blkcg
->css
);
258 * blkg_lookup_create - lookup blkg, try to create one if not there
259 * @blkcg: blkcg of interest
260 * @q: request_queue of interest
262 * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to
263 * create one. blkg creation is performed recursively from blkcg_root such
264 * that all non-root blkg's have access to the parent blkg. This function
265 * should be called under RCU read lock and @q->queue_lock.
267 * Returns pointer to the looked up or created blkg on success, ERR_PTR()
268 * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not
269 * dead and bypassing, returns ERR_PTR(-EBUSY).
271 struct blkcg_gq
*blkg_lookup_create(struct blkcg
*blkcg
,
272 struct request_queue
*q
)
274 struct blkcg_gq
*blkg
;
276 WARN_ON_ONCE(!rcu_read_lock_held());
277 lockdep_assert_held(q
->queue_lock
);
280 * This could be the first entry point of blkcg implementation and
281 * we shouldn't allow anything to go through for a bypassing queue.
283 if (unlikely(blk_queue_bypass(q
)))
284 return ERR_PTR(blk_queue_dying(q
) ? -EINVAL
: -EBUSY
);
286 blkg
= __blkg_lookup(blkcg
, q
, true);
291 * Create blkgs walking down from blkcg_root to @blkcg, so that all
292 * non-root blkgs have access to their parents.
295 struct blkcg
*pos
= blkcg
;
296 struct blkcg
*parent
= blkcg_parent(blkcg
);
298 while (parent
&& !__blkg_lookup(parent
, q
, false)) {
300 parent
= blkcg_parent(parent
);
303 blkg
= blkg_create(pos
, q
, NULL
);
304 if (pos
== blkcg
|| IS_ERR(blkg
))
308 EXPORT_SYMBOL_GPL(blkg_lookup_create
);
310 static void blkg_destroy(struct blkcg_gq
*blkg
)
312 struct blkcg
*blkcg
= blkg
->blkcg
;
315 lockdep_assert_held(blkg
->q
->queue_lock
);
316 lockdep_assert_held(&blkcg
->lock
);
318 /* Something wrong if we are trying to remove same group twice */
319 WARN_ON_ONCE(list_empty(&blkg
->q_node
));
320 WARN_ON_ONCE(hlist_unhashed(&blkg
->blkcg_node
));
322 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++) {
323 struct blkcg_policy
*pol
= blkcg_policy
[i
];
325 if (blkg
->pd
[i
] && pol
->pd_offline_fn
)
326 pol
->pd_offline_fn(blkg
);
328 blkg
->online
= false;
330 radix_tree_delete(&blkcg
->blkg_tree
, blkg
->q
->id
);
331 list_del_init(&blkg
->q_node
);
332 hlist_del_init_rcu(&blkg
->blkcg_node
);
335 * Both setting lookup hint to and clearing it from @blkg are done
336 * under queue_lock. If it's not pointing to @blkg now, it never
337 * will. Hint assignment itself can race safely.
339 if (rcu_access_pointer(blkcg
->blkg_hint
) == blkg
)
340 rcu_assign_pointer(blkcg
->blkg_hint
, NULL
);
343 * If root blkg is destroyed. Just clear the pointer since root_rl
344 * does not take reference on root blkg.
346 if (blkcg
== &blkcg_root
) {
347 blkg
->q
->root_blkg
= NULL
;
348 blkg
->q
->root_rl
.blkg
= NULL
;
352 * Put the reference taken at the time of creation so that when all
353 * queues are gone, group can be destroyed.
359 * blkg_destroy_all - destroy all blkgs associated with a request_queue
360 * @q: request_queue of interest
362 * Destroy all blkgs associated with @q.
364 static void blkg_destroy_all(struct request_queue
*q
)
366 struct blkcg_gq
*blkg
, *n
;
368 lockdep_assert_held(q
->queue_lock
);
370 list_for_each_entry_safe(blkg
, n
, &q
->blkg_list
, q_node
) {
371 struct blkcg
*blkcg
= blkg
->blkcg
;
373 spin_lock(&blkcg
->lock
);
375 spin_unlock(&blkcg
->lock
);
380 * A group is RCU protected, but having an rcu lock does not mean that one
381 * can access all the fields of blkg and assume these are valid. For
382 * example, don't try to follow throtl_data and request queue links.
384 * Having a reference to blkg under an rcu allows accesses to only values
385 * local to groups like group stats and group rate limits.
387 void __blkg_release_rcu(struct rcu_head
*rcu_head
)
389 struct blkcg_gq
*blkg
= container_of(rcu_head
, struct blkcg_gq
, rcu_head
);
392 /* tell policies that this one is being freed */
393 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++) {
394 struct blkcg_policy
*pol
= blkcg_policy
[i
];
396 if (blkg
->pd
[i
] && pol
->pd_exit_fn
)
397 pol
->pd_exit_fn(blkg
);
400 /* release the blkcg and parent blkg refs this blkg has been holding */
401 css_put(&blkg
->blkcg
->css
);
403 blkg_put(blkg
->parent
);
407 EXPORT_SYMBOL_GPL(__blkg_release_rcu
);
410 * The next function used by blk_queue_for_each_rl(). It's a bit tricky
411 * because the root blkg uses @q->root_rl instead of its own rl.
413 struct request_list
*__blk_queue_next_rl(struct request_list
*rl
,
414 struct request_queue
*q
)
416 struct list_head
*ent
;
417 struct blkcg_gq
*blkg
;
420 * Determine the current blkg list_head. The first entry is
421 * root_rl which is off @q->blkg_list and mapped to the head.
423 if (rl
== &q
->root_rl
) {
425 /* There are no more block groups, hence no request lists */
429 blkg
= container_of(rl
, struct blkcg_gq
, rl
);
433 /* walk to the next list_head, skip root blkcg */
435 if (ent
== &q
->root_blkg
->q_node
)
437 if (ent
== &q
->blkg_list
)
440 blkg
= container_of(ent
, struct blkcg_gq
, q_node
);
444 static int blkcg_reset_stats(struct cgroup_subsys_state
*css
,
445 struct cftype
*cftype
, u64 val
)
447 struct blkcg
*blkcg
= css_to_blkcg(css
);
448 struct blkcg_gq
*blkg
;
452 * XXX: We invoke cgroup_add/rm_cftypes() under blkcg_pol_mutex
453 * which ends up putting cgroup's internal cgroup_tree_mutex under
454 * it; however, cgroup_tree_mutex is nested above cgroup file
455 * active protection and grabbing blkcg_pol_mutex from a cgroup
456 * file operation creates a possible circular dependency. cgroup
457 * internal locking is planned to go through further simplification
458 * and this issue should go away soon. For now, let's trylock
459 * blkcg_pol_mutex and restart the write on failure.
461 * http://lkml.kernel.org/g/5363C04B.4010400@oracle.com
463 if (!mutex_trylock(&blkcg_pol_mutex
))
464 return restart_syscall();
465 spin_lock_irq(&blkcg
->lock
);
468 * Note that stat reset is racy - it doesn't synchronize against
469 * stat updates. This is a debug feature which shouldn't exist
470 * anyway. If you get hit by a race, retry.
472 hlist_for_each_entry(blkg
, &blkcg
->blkg_list
, blkcg_node
) {
473 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++) {
474 struct blkcg_policy
*pol
= blkcg_policy
[i
];
476 if (blkcg_policy_enabled(blkg
->q
, pol
) &&
477 pol
->pd_reset_stats_fn
)
478 pol
->pd_reset_stats_fn(blkg
);
482 spin_unlock_irq(&blkcg
->lock
);
483 mutex_unlock(&blkcg_pol_mutex
);
487 static const char *blkg_dev_name(struct blkcg_gq
*blkg
)
489 /* some drivers (floppy) instantiate a queue w/o disk registered */
490 if (blkg
->q
->backing_dev_info
.dev
)
491 return dev_name(blkg
->q
->backing_dev_info
.dev
);
496 * blkcg_print_blkgs - helper for printing per-blkg data
497 * @sf: seq_file to print to
498 * @blkcg: blkcg of interest
499 * @prfill: fill function to print out a blkg
500 * @pol: policy in question
501 * @data: data to be passed to @prfill
502 * @show_total: to print out sum of prfill return values or not
504 * This function invokes @prfill on each blkg of @blkcg if pd for the
505 * policy specified by @pol exists. @prfill is invoked with @sf, the
506 * policy data and @data and the matching queue lock held. If @show_total
507 * is %true, the sum of the return values from @prfill is printed with
508 * "Total" label at the end.
510 * This is to be used to construct print functions for
511 * cftype->read_seq_string method.
513 void blkcg_print_blkgs(struct seq_file
*sf
, struct blkcg
*blkcg
,
514 u64 (*prfill
)(struct seq_file
*,
515 struct blkg_policy_data
*, int),
516 const struct blkcg_policy
*pol
, int data
,
519 struct blkcg_gq
*blkg
;
523 hlist_for_each_entry_rcu(blkg
, &blkcg
->blkg_list
, blkcg_node
) {
524 spin_lock_irq(blkg
->q
->queue_lock
);
525 if (blkcg_policy_enabled(blkg
->q
, pol
))
526 total
+= prfill(sf
, blkg
->pd
[pol
->plid
], data
);
527 spin_unlock_irq(blkg
->q
->queue_lock
);
532 seq_printf(sf
, "Total %llu\n", (unsigned long long)total
);
534 EXPORT_SYMBOL_GPL(blkcg_print_blkgs
);
537 * __blkg_prfill_u64 - prfill helper for a single u64 value
538 * @sf: seq_file to print to
539 * @pd: policy private data of interest
542 * Print @v to @sf for the device assocaited with @pd.
544 u64
__blkg_prfill_u64(struct seq_file
*sf
, struct blkg_policy_data
*pd
, u64 v
)
546 const char *dname
= blkg_dev_name(pd
->blkg
);
551 seq_printf(sf
, "%s %llu\n", dname
, (unsigned long long)v
);
554 EXPORT_SYMBOL_GPL(__blkg_prfill_u64
);
557 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
558 * @sf: seq_file to print to
559 * @pd: policy private data of interest
560 * @rwstat: rwstat to print
562 * Print @rwstat to @sf for the device assocaited with @pd.
564 u64
__blkg_prfill_rwstat(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
565 const struct blkg_rwstat
*rwstat
)
567 static const char *rwstr
[] = {
568 [BLKG_RWSTAT_READ
] = "Read",
569 [BLKG_RWSTAT_WRITE
] = "Write",
570 [BLKG_RWSTAT_SYNC
] = "Sync",
571 [BLKG_RWSTAT_ASYNC
] = "Async",
573 const char *dname
= blkg_dev_name(pd
->blkg
);
580 for (i
= 0; i
< BLKG_RWSTAT_NR
; i
++)
581 seq_printf(sf
, "%s %s %llu\n", dname
, rwstr
[i
],
582 (unsigned long long)rwstat
->cnt
[i
]);
584 v
= rwstat
->cnt
[BLKG_RWSTAT_READ
] + rwstat
->cnt
[BLKG_RWSTAT_WRITE
];
585 seq_printf(sf
, "%s Total %llu\n", dname
, (unsigned long long)v
);
588 EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat
);
591 * blkg_prfill_stat - prfill callback for blkg_stat
592 * @sf: seq_file to print to
593 * @pd: policy private data of interest
594 * @off: offset to the blkg_stat in @pd
596 * prfill callback for printing a blkg_stat.
598 u64
blkg_prfill_stat(struct seq_file
*sf
, struct blkg_policy_data
*pd
, int off
)
600 return __blkg_prfill_u64(sf
, pd
, blkg_stat_read((void *)pd
+ off
));
602 EXPORT_SYMBOL_GPL(blkg_prfill_stat
);
605 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
606 * @sf: seq_file to print to
607 * @pd: policy private data of interest
608 * @off: offset to the blkg_rwstat in @pd
610 * prfill callback for printing a blkg_rwstat.
612 u64
blkg_prfill_rwstat(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
615 struct blkg_rwstat rwstat
= blkg_rwstat_read((void *)pd
+ off
);
617 return __blkg_prfill_rwstat(sf
, pd
, &rwstat
);
619 EXPORT_SYMBOL_GPL(blkg_prfill_rwstat
);
622 * blkg_stat_recursive_sum - collect hierarchical blkg_stat
623 * @pd: policy private data of interest
624 * @off: offset to the blkg_stat in @pd
626 * Collect the blkg_stat specified by @off from @pd and all its online
627 * descendants and return the sum. The caller must be holding the queue
628 * lock for online tests.
630 u64
blkg_stat_recursive_sum(struct blkg_policy_data
*pd
, int off
)
632 struct blkcg_policy
*pol
= blkcg_policy
[pd
->plid
];
633 struct blkcg_gq
*pos_blkg
;
634 struct cgroup_subsys_state
*pos_css
;
637 lockdep_assert_held(pd
->blkg
->q
->queue_lock
);
640 blkg_for_each_descendant_pre(pos_blkg
, pos_css
, pd_to_blkg(pd
)) {
641 struct blkg_policy_data
*pos_pd
= blkg_to_pd(pos_blkg
, pol
);
642 struct blkg_stat
*stat
= (void *)pos_pd
+ off
;
644 if (pos_blkg
->online
)
645 sum
+= blkg_stat_read(stat
);
651 EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum
);
654 * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
655 * @pd: policy private data of interest
656 * @off: offset to the blkg_stat in @pd
658 * Collect the blkg_rwstat specified by @off from @pd and all its online
659 * descendants and return the sum. The caller must be holding the queue
660 * lock for online tests.
662 struct blkg_rwstat
blkg_rwstat_recursive_sum(struct blkg_policy_data
*pd
,
665 struct blkcg_policy
*pol
= blkcg_policy
[pd
->plid
];
666 struct blkcg_gq
*pos_blkg
;
667 struct cgroup_subsys_state
*pos_css
;
668 struct blkg_rwstat sum
= { };
671 lockdep_assert_held(pd
->blkg
->q
->queue_lock
);
674 blkg_for_each_descendant_pre(pos_blkg
, pos_css
, pd_to_blkg(pd
)) {
675 struct blkg_policy_data
*pos_pd
= blkg_to_pd(pos_blkg
, pol
);
676 struct blkg_rwstat
*rwstat
= (void *)pos_pd
+ off
;
677 struct blkg_rwstat tmp
;
679 if (!pos_blkg
->online
)
682 tmp
= blkg_rwstat_read(rwstat
);
684 for (i
= 0; i
< BLKG_RWSTAT_NR
; i
++)
685 sum
.cnt
[i
] += tmp
.cnt
[i
];
691 EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum
);
694 * blkg_conf_prep - parse and prepare for per-blkg config update
695 * @blkcg: target block cgroup
696 * @pol: target policy
697 * @input: input string
698 * @ctx: blkg_conf_ctx to be filled
700 * Parse per-blkg config update from @input and initialize @ctx with the
701 * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new
702 * value. This function returns with RCU read lock and queue lock held and
703 * must be paired with blkg_conf_finish().
705 int blkg_conf_prep(struct blkcg
*blkcg
, const struct blkcg_policy
*pol
,
706 const char *input
, struct blkg_conf_ctx
*ctx
)
707 __acquires(rcu
) __acquires(disk
->queue
->queue_lock
)
709 struct gendisk
*disk
;
710 struct blkcg_gq
*blkg
;
711 unsigned int major
, minor
;
712 unsigned long long v
;
715 if (sscanf(input
, "%u:%u %llu", &major
, &minor
, &v
) != 3)
718 disk
= get_gendisk(MKDEV(major
, minor
), &part
);
727 spin_lock_irq(disk
->queue
->queue_lock
);
729 if (blkcg_policy_enabled(disk
->queue
, pol
))
730 blkg
= blkg_lookup_create(blkcg
, disk
->queue
);
732 blkg
= ERR_PTR(-EINVAL
);
737 spin_unlock_irq(disk
->queue
->queue_lock
);
740 * If queue was bypassing, we should retry. Do so after a
741 * short msleep(). It isn't strictly necessary but queue
742 * can be bypassing for some time and it's always nice to
743 * avoid busy looping.
747 ret
= restart_syscall();
757 EXPORT_SYMBOL_GPL(blkg_conf_prep
);
760 * blkg_conf_finish - finish up per-blkg config update
761 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
763 * Finish up after per-blkg config update. This function must be paired
764 * with blkg_conf_prep().
766 void blkg_conf_finish(struct blkg_conf_ctx
*ctx
)
767 __releases(ctx
->disk
->queue
->queue_lock
) __releases(rcu
)
769 spin_unlock_irq(ctx
->disk
->queue
->queue_lock
);
773 EXPORT_SYMBOL_GPL(blkg_conf_finish
);
775 struct cftype blkcg_files
[] = {
777 .name
= "reset_stats",
778 .write_u64
= blkcg_reset_stats
,
784 * blkcg_css_offline - cgroup css_offline callback
785 * @css: css of interest
787 * This function is called when @css is about to go away and responsible
788 * for shooting down all blkgs associated with @css. blkgs should be
789 * removed while holding both q and blkcg locks. As blkcg lock is nested
790 * inside q lock, this function performs reverse double lock dancing.
792 * This is the blkcg counterpart of ioc_release_fn().
794 static void blkcg_css_offline(struct cgroup_subsys_state
*css
)
796 struct blkcg
*blkcg
= css_to_blkcg(css
);
798 spin_lock_irq(&blkcg
->lock
);
800 while (!hlist_empty(&blkcg
->blkg_list
)) {
801 struct blkcg_gq
*blkg
= hlist_entry(blkcg
->blkg_list
.first
,
802 struct blkcg_gq
, blkcg_node
);
803 struct request_queue
*q
= blkg
->q
;
805 if (spin_trylock(q
->queue_lock
)) {
807 spin_unlock(q
->queue_lock
);
809 spin_unlock_irq(&blkcg
->lock
);
811 spin_lock_irq(&blkcg
->lock
);
815 spin_unlock_irq(&blkcg
->lock
);
818 static void blkcg_css_free(struct cgroup_subsys_state
*css
)
820 struct blkcg
*blkcg
= css_to_blkcg(css
);
822 if (blkcg
!= &blkcg_root
)
826 static struct cgroup_subsys_state
*
827 blkcg_css_alloc(struct cgroup_subsys_state
*parent_css
)
836 blkcg
= kzalloc(sizeof(*blkcg
), GFP_KERNEL
);
838 return ERR_PTR(-ENOMEM
);
840 blkcg
->cfq_weight
= CFQ_WEIGHT_DEFAULT
;
841 blkcg
->cfq_leaf_weight
= CFQ_WEIGHT_DEFAULT
;
843 spin_lock_init(&blkcg
->lock
);
844 INIT_RADIX_TREE(&blkcg
->blkg_tree
, GFP_ATOMIC
);
845 INIT_HLIST_HEAD(&blkcg
->blkg_list
);
851 * blkcg_init_queue - initialize blkcg part of request queue
852 * @q: request_queue to initialize
854 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
855 * part of new request_queue @q.
858 * 0 on success, -errno on failure.
860 int blkcg_init_queue(struct request_queue
*q
)
864 return blk_throtl_init(q
);
868 * blkcg_drain_queue - drain blkcg part of request_queue
869 * @q: request_queue to drain
871 * Called from blk_drain_queue(). Responsible for draining blkcg part.
873 void blkcg_drain_queue(struct request_queue
*q
)
875 lockdep_assert_held(q
->queue_lock
);
878 * @q could be exiting and already have destroyed all blkgs as
879 * indicated by NULL root_blkg. If so, don't confuse policies.
888 * blkcg_exit_queue - exit and release blkcg part of request_queue
889 * @q: request_queue being released
891 * Called from blk_release_queue(). Responsible for exiting blkcg part.
893 void blkcg_exit_queue(struct request_queue
*q
)
895 spin_lock_irq(q
->queue_lock
);
897 spin_unlock_irq(q
->queue_lock
);
903 * We cannot support shared io contexts, as we have no mean to support
904 * two tasks with the same ioc in two different groups without major rework
905 * of the main cic data structures. For now we allow a task to change
906 * its cgroup only if it's the only owner of its ioc.
908 static int blkcg_can_attach(struct cgroup_subsys_state
*css
,
909 struct cgroup_taskset
*tset
)
911 struct task_struct
*task
;
912 struct io_context
*ioc
;
915 /* task_lock() is needed to avoid races with exit_io_context() */
916 cgroup_taskset_for_each(task
, tset
) {
918 ioc
= task
->io_context
;
919 if (ioc
&& atomic_read(&ioc
->nr_tasks
) > 1)
928 struct cgroup_subsys blkio_cgrp_subsys
= {
929 .css_alloc
= blkcg_css_alloc
,
930 .css_offline
= blkcg_css_offline
,
931 .css_free
= blkcg_css_free
,
932 .can_attach
= blkcg_can_attach
,
933 .legacy_cftypes
= blkcg_files
,
936 * This ensures that, if available, memcg is automatically enabled
937 * together on the default hierarchy so that the owner cgroup can
938 * be retrieved from writeback pages.
940 .depends_on
= 1 << memory_cgrp_id
,
943 EXPORT_SYMBOL_GPL(blkio_cgrp_subsys
);
946 * blkcg_activate_policy - activate a blkcg policy on a request_queue
947 * @q: request_queue of interest
948 * @pol: blkcg policy to activate
950 * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through
951 * bypass mode to populate its blkgs with policy_data for @pol.
953 * Activation happens with @q bypassed, so nobody would be accessing blkgs
954 * from IO path. Update of each blkg is protected by both queue and blkcg
955 * locks so that holding either lock and testing blkcg_policy_enabled() is
956 * always enough for dereferencing policy data.
958 * The caller is responsible for synchronizing [de]activations and policy
959 * [un]registerations. Returns 0 on success, -errno on failure.
961 int blkcg_activate_policy(struct request_queue
*q
,
962 const struct blkcg_policy
*pol
)
965 struct blkcg_gq
*blkg
, *new_blkg
;
966 struct blkg_policy_data
*pd
, *n
;
970 if (blkcg_policy_enabled(q
, pol
))
973 /* preallocations for root blkg */
974 new_blkg
= blkg_alloc(&blkcg_root
, q
, GFP_KERNEL
);
978 blk_queue_bypass_start(q
);
980 preloaded
= !radix_tree_preload(GFP_KERNEL
);
983 * Make sure the root blkg exists and count the existing blkgs. As
984 * @q is bypassing at this point, blkg_lookup_create() can't be
985 * used. Open code it.
987 spin_lock_irq(q
->queue_lock
);
990 blkg
= __blkg_lookup(&blkcg_root
, q
, false);
994 blkg
= blkg_create(&blkcg_root
, q
, new_blkg
);
998 radix_tree_preload_end();
1001 ret
= PTR_ERR(blkg
);
1005 list_for_each_entry(blkg
, &q
->blkg_list
, q_node
)
1008 spin_unlock_irq(q
->queue_lock
);
1010 /* allocate policy_data for all existing blkgs */
1012 pd
= kzalloc_node(pol
->pd_size
, GFP_KERNEL
, q
->node
);
1017 list_add_tail(&pd
->alloc_node
, &pds
);
1021 * Install the allocated pds. With @q bypassing, no new blkg
1022 * should have been created while the queue lock was dropped.
1024 spin_lock_irq(q
->queue_lock
);
1026 list_for_each_entry(blkg
, &q
->blkg_list
, q_node
) {
1027 if (WARN_ON(list_empty(&pds
))) {
1028 /* umm... this shouldn't happen, just abort */
1032 pd
= list_first_entry(&pds
, struct blkg_policy_data
, alloc_node
);
1033 list_del_init(&pd
->alloc_node
);
1035 /* grab blkcg lock too while installing @pd on @blkg */
1036 spin_lock(&blkg
->blkcg
->lock
);
1038 blkg
->pd
[pol
->plid
] = pd
;
1040 pd
->plid
= pol
->plid
;
1041 pol
->pd_init_fn(blkg
);
1043 spin_unlock(&blkg
->blkcg
->lock
);
1046 __set_bit(pol
->plid
, q
->blkcg_pols
);
1049 spin_unlock_irq(q
->queue_lock
);
1051 blk_queue_bypass_end(q
);
1052 list_for_each_entry_safe(pd
, n
, &pds
, alloc_node
)
1056 EXPORT_SYMBOL_GPL(blkcg_activate_policy
);
1059 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
1060 * @q: request_queue of interest
1061 * @pol: blkcg policy to deactivate
1063 * Deactivate @pol on @q. Follows the same synchronization rules as
1064 * blkcg_activate_policy().
1066 void blkcg_deactivate_policy(struct request_queue
*q
,
1067 const struct blkcg_policy
*pol
)
1069 struct blkcg_gq
*blkg
;
1071 if (!blkcg_policy_enabled(q
, pol
))
1074 blk_queue_bypass_start(q
);
1075 spin_lock_irq(q
->queue_lock
);
1077 __clear_bit(pol
->plid
, q
->blkcg_pols
);
1079 /* if no policy is left, no need for blkgs - shoot them down */
1080 if (bitmap_empty(q
->blkcg_pols
, BLKCG_MAX_POLS
))
1081 blkg_destroy_all(q
);
1083 list_for_each_entry(blkg
, &q
->blkg_list
, q_node
) {
1084 /* grab blkcg lock too while removing @pd from @blkg */
1085 spin_lock(&blkg
->blkcg
->lock
);
1087 if (pol
->pd_offline_fn
)
1088 pol
->pd_offline_fn(blkg
);
1089 if (pol
->pd_exit_fn
)
1090 pol
->pd_exit_fn(blkg
);
1092 kfree(blkg
->pd
[pol
->plid
]);
1093 blkg
->pd
[pol
->plid
] = NULL
;
1095 spin_unlock(&blkg
->blkcg
->lock
);
1098 spin_unlock_irq(q
->queue_lock
);
1099 blk_queue_bypass_end(q
);
1101 EXPORT_SYMBOL_GPL(blkcg_deactivate_policy
);
1104 * blkcg_policy_register - register a blkcg policy
1105 * @pol: blkcg policy to register
1107 * Register @pol with blkcg core. Might sleep and @pol may be modified on
1108 * successful registration. Returns 0 on success and -errno on failure.
1110 int blkcg_policy_register(struct blkcg_policy
*pol
)
1114 if (WARN_ON(pol
->pd_size
< sizeof(struct blkg_policy_data
)))
1117 mutex_lock(&blkcg_pol_mutex
);
1119 /* find an empty slot */
1121 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++)
1122 if (!blkcg_policy
[i
])
1124 if (i
>= BLKCG_MAX_POLS
)
1127 /* register and update blkgs */
1129 blkcg_policy
[i
] = pol
;
1131 /* everything is in place, add intf files for the new policy */
1133 WARN_ON(cgroup_add_legacy_cftypes(&blkio_cgrp_subsys
,
1137 mutex_unlock(&blkcg_pol_mutex
);
1140 EXPORT_SYMBOL_GPL(blkcg_policy_register
);
1143 * blkcg_policy_unregister - unregister a blkcg policy
1144 * @pol: blkcg policy to unregister
1146 * Undo blkcg_policy_register(@pol). Might sleep.
1148 void blkcg_policy_unregister(struct blkcg_policy
*pol
)
1150 mutex_lock(&blkcg_pol_mutex
);
1152 if (WARN_ON(blkcg_policy
[pol
->plid
] != pol
))
1155 /* kill the intf files first */
1157 cgroup_rm_cftypes(pol
->cftypes
);
1159 /* unregister and update blkgs */
1160 blkcg_policy
[pol
->plid
] = NULL
;
1162 mutex_unlock(&blkcg_pol_mutex
);
1164 EXPORT_SYMBOL_GPL(blkcg_policy_unregister
);