4 * Common Block IO controller cgroup interface
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
16 #include <linux/cgroup.h>
17 #include <linux/u64_stats_sync.h>
18 #include <linux/seq_file.h>
19 #include <linux/radix-tree.h>
20 #include <linux/blkdev.h>
21 #include <linux/atomic.h>
23 /* Max limits for throttle policy */
24 #define THROTL_IOPS_MAX UINT_MAX
26 /* CFQ specific, out here for blkcg->cfq_weight */
27 #define CFQ_WEIGHT_MIN 10
28 #define CFQ_WEIGHT_MAX 1000
29 #define CFQ_WEIGHT_DEFAULT 500
31 #ifdef CONFIG_BLK_CGROUP
33 enum blkg_rwstat_type
{
40 BLKG_RWSTAT_TOTAL
= BLKG_RWSTAT_NR
,
46 struct cgroup_subsys_state css
;
49 struct radix_tree_root blkg_tree
;
50 struct blkcg_gq
*blkg_hint
;
51 struct hlist_head blkg_list
;
53 /* TODO: per-policy storage in blkcg */
54 unsigned int cfq_weight
; /* belongs to cfq */
55 unsigned int cfq_leaf_weight
;
59 struct u64_stats_sync syncp
;
64 struct u64_stats_sync syncp
;
65 uint64_t cnt
[BLKG_RWSTAT_NR
];
69 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
70 * request_queue (q). This is used by blkcg policies which need to track
71 * information per blkcg - q pair.
73 * There can be multiple active blkcg policies and each has its private
74 * data on each blkg, the size of which is determined by
75 * blkcg_policy->pd_size. blkcg core allocates and frees such areas
76 * together with blkg and invokes pd_init/exit_fn() methods.
78 * Such private data must embed struct blkg_policy_data (pd) at the
79 * beginning and pd_size can't be smaller than pd.
81 struct blkg_policy_data
{
82 /* the blkg and policy id this per-policy data belongs to */
83 struct blkcg_gq
*blkg
;
86 /* used during policy activation */
87 struct list_head alloc_node
;
90 /* association between a blk cgroup and a request queue */
92 /* Pointer to the associated request_queue */
93 struct request_queue
*q
;
94 struct list_head q_node
;
95 struct hlist_node blkcg_node
;
98 /* all non-root blkcg_gq's are guaranteed to have access to parent */
99 struct blkcg_gq
*parent
;
101 /* request allocation list for this blkcg-q pair */
102 struct request_list rl
;
104 /* reference count */
107 /* is this blkg online? protected by both blkcg and q locks */
110 struct blkg_policy_data
*pd
[BLKCG_MAX_POLS
];
112 struct rcu_head rcu_head
;
115 typedef void (blkcg_pol_init_pd_fn
)(struct blkcg_gq
*blkg
);
116 typedef void (blkcg_pol_online_pd_fn
)(struct blkcg_gq
*blkg
);
117 typedef void (blkcg_pol_offline_pd_fn
)(struct blkcg_gq
*blkg
);
118 typedef void (blkcg_pol_exit_pd_fn
)(struct blkcg_gq
*blkg
);
119 typedef void (blkcg_pol_reset_pd_stats_fn
)(struct blkcg_gq
*blkg
);
121 struct blkcg_policy
{
123 /* policy specific private data size */
125 /* cgroup files for the policy */
126 struct cftype
*cftypes
;
129 blkcg_pol_init_pd_fn
*pd_init_fn
;
130 blkcg_pol_online_pd_fn
*pd_online_fn
;
131 blkcg_pol_offline_pd_fn
*pd_offline_fn
;
132 blkcg_pol_exit_pd_fn
*pd_exit_fn
;
133 blkcg_pol_reset_pd_stats_fn
*pd_reset_stats_fn
;
136 extern struct blkcg blkcg_root
;
138 struct blkcg_gq
*blkg_lookup(struct blkcg
*blkcg
, struct request_queue
*q
);
139 struct blkcg_gq
*blkg_lookup_create(struct blkcg
*blkcg
,
140 struct request_queue
*q
);
141 int blkcg_init_queue(struct request_queue
*q
);
142 void blkcg_drain_queue(struct request_queue
*q
);
143 void blkcg_exit_queue(struct request_queue
*q
);
145 /* Blkio controller policy registration */
146 int blkcg_policy_register(struct blkcg_policy
*pol
);
147 void blkcg_policy_unregister(struct blkcg_policy
*pol
);
148 int blkcg_activate_policy(struct request_queue
*q
,
149 const struct blkcg_policy
*pol
);
150 void blkcg_deactivate_policy(struct request_queue
*q
,
151 const struct blkcg_policy
*pol
);
153 void blkcg_print_blkgs(struct seq_file
*sf
, struct blkcg
*blkcg
,
154 u64 (*prfill
)(struct seq_file
*,
155 struct blkg_policy_data
*, int),
156 const struct blkcg_policy
*pol
, int data
,
158 u64
__blkg_prfill_u64(struct seq_file
*sf
, struct blkg_policy_data
*pd
, u64 v
);
159 u64
__blkg_prfill_rwstat(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
160 const struct blkg_rwstat
*rwstat
);
161 u64
blkg_prfill_stat(struct seq_file
*sf
, struct blkg_policy_data
*pd
, int off
);
162 u64
blkg_prfill_rwstat(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
165 u64
blkg_stat_recursive_sum(struct blkg_policy_data
*pd
, int off
);
166 struct blkg_rwstat
blkg_rwstat_recursive_sum(struct blkg_policy_data
*pd
,
169 struct blkg_conf_ctx
{
170 struct gendisk
*disk
;
171 struct blkcg_gq
*blkg
;
175 int blkg_conf_prep(struct blkcg
*blkcg
, const struct blkcg_policy
*pol
,
176 const char *input
, struct blkg_conf_ctx
*ctx
);
177 void blkg_conf_finish(struct blkg_conf_ctx
*ctx
);
180 static inline struct blkcg
*css_to_blkcg(struct cgroup_subsys_state
*css
)
182 return css
? container_of(css
, struct blkcg
, css
) : NULL
;
185 static inline struct blkcg
*task_blkcg(struct task_struct
*tsk
)
187 return css_to_blkcg(task_css(tsk
, blkio_cgrp_id
));
190 static inline struct blkcg
*bio_blkcg(struct bio
*bio
)
192 if (bio
&& bio
->bi_css
)
193 return css_to_blkcg(bio
->bi_css
);
194 return task_blkcg(current
);
198 * blkcg_parent - get the parent of a blkcg
199 * @blkcg: blkcg of interest
201 * Return the parent blkcg of @blkcg. Can be called anytime.
203 static inline struct blkcg
*blkcg_parent(struct blkcg
*blkcg
)
205 return css_to_blkcg(blkcg
->css
.parent
);
209 * blkg_to_pdata - get policy private data
210 * @blkg: blkg of interest
211 * @pol: policy of interest
213 * Return pointer to private data associated with the @blkg-@pol pair.
215 static inline struct blkg_policy_data
*blkg_to_pd(struct blkcg_gq
*blkg
,
216 struct blkcg_policy
*pol
)
218 return blkg
? blkg
->pd
[pol
->plid
] : NULL
;
222 * pdata_to_blkg - get blkg associated with policy private data
223 * @pd: policy private data of interest
225 * @pd is policy private data. Determine the blkg it's associated with.
227 static inline struct blkcg_gq
*pd_to_blkg(struct blkg_policy_data
*pd
)
229 return pd
? pd
->blkg
: NULL
;
233 * blkg_path - format cgroup path of blkg
234 * @blkg: blkg of interest
235 * @buf: target buffer
236 * @buflen: target buffer length
238 * Format the path of the cgroup of @blkg into @buf.
240 static inline int blkg_path(struct blkcg_gq
*blkg
, char *buf
, int buflen
)
244 p
= cgroup_path(blkg
->blkcg
->css
.cgroup
, buf
, buflen
);
246 strncpy(buf
, "<unavailable>", buflen
);
247 return -ENAMETOOLONG
;
250 memmove(buf
, p
, buf
+ buflen
- p
);
255 * blkg_get - get a blkg reference
258 * The caller should be holding an existing reference.
260 static inline void blkg_get(struct blkcg_gq
*blkg
)
262 WARN_ON_ONCE(atomic_read(&blkg
->refcnt
) <= 0);
263 atomic_inc(&blkg
->refcnt
);
266 void __blkg_release_rcu(struct rcu_head
*rcu
);
269 * blkg_put - put a blkg reference
272 static inline void blkg_put(struct blkcg_gq
*blkg
)
274 WARN_ON_ONCE(atomic_read(&blkg
->refcnt
) <= 0);
275 if (atomic_dec_and_test(&blkg
->refcnt
))
276 call_rcu(&blkg
->rcu_head
, __blkg_release_rcu
);
279 struct blkcg_gq
*__blkg_lookup(struct blkcg
*blkcg
, struct request_queue
*q
,
283 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
284 * @d_blkg: loop cursor pointing to the current descendant
285 * @pos_css: used for iteration
286 * @p_blkg: target blkg to walk descendants of
288 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
289 * read locked. If called under either blkcg or queue lock, the iteration
290 * is guaranteed to include all and only online blkgs. The caller may
291 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
292 * @p_blkg is included in the iteration and the first node to be visited.
294 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
295 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
296 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
297 (p_blkg)->q, false)))
300 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
301 * @d_blkg: loop cursor pointing to the current descendant
302 * @pos_css: used for iteration
303 * @p_blkg: target blkg to walk descendants of
305 * Similar to blkg_for_each_descendant_pre() but performs post-order
306 * traversal instead. Synchronization rules are the same. @p_blkg is
307 * included in the iteration and the last node to be visited.
309 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
310 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
311 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
312 (p_blkg)->q, false)))
315 * blk_get_rl - get request_list to use
316 * @q: request_queue of interest
317 * @bio: bio which will be attached to the allocated request (may be %NULL)
319 * The caller wants to allocate a request from @q to use for @bio. Find
320 * the request_list to use and obtain a reference on it. Should be called
321 * under queue_lock. This function is guaranteed to return non-%NULL
324 static inline struct request_list
*blk_get_rl(struct request_queue
*q
,
328 struct blkcg_gq
*blkg
;
332 blkcg
= bio_blkcg(bio
);
334 /* bypass blkg lookup and use @q->root_rl directly for root */
335 if (blkcg
== &blkcg_root
)
339 * Try to use blkg->rl. blkg lookup may fail under memory pressure
340 * or if either the blkcg or queue is going away. Fall back to
341 * root_rl in such cases.
343 blkg
= blkg_lookup_create(blkcg
, q
);
344 if (unlikely(IS_ERR(blkg
)))
356 * blk_put_rl - put request_list
357 * @rl: request_list to put
359 * Put the reference acquired by blk_get_rl(). Should be called under
362 static inline void blk_put_rl(struct request_list
*rl
)
364 /* root_rl may not have blkg set */
365 if (rl
->blkg
&& rl
->blkg
->blkcg
!= &blkcg_root
)
370 * blk_rq_set_rl - associate a request with a request_list
371 * @rq: request of interest
372 * @rl: target request_list
374 * Associate @rq with @rl so that accounting and freeing can know the
375 * request_list @rq came from.
377 static inline void blk_rq_set_rl(struct request
*rq
, struct request_list
*rl
)
383 * blk_rq_rl - return the request_list a request came from
384 * @rq: request of interest
386 * Return the request_list @rq is allocated from.
388 static inline struct request_list
*blk_rq_rl(struct request
*rq
)
393 struct request_list
*__blk_queue_next_rl(struct request_list
*rl
,
394 struct request_queue
*q
);
396 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
398 * Should be used under queue_lock.
400 #define blk_queue_for_each_rl(rl, q) \
401 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
403 static inline void blkg_stat_init(struct blkg_stat
*stat
)
405 u64_stats_init(&stat
->syncp
);
409 * blkg_stat_add - add a value to a blkg_stat
410 * @stat: target blkg_stat
413 * Add @val to @stat. The caller is responsible for synchronizing calls to
416 static inline void blkg_stat_add(struct blkg_stat
*stat
, uint64_t val
)
418 u64_stats_update_begin(&stat
->syncp
);
420 u64_stats_update_end(&stat
->syncp
);
424 * blkg_stat_read - read the current value of a blkg_stat
425 * @stat: blkg_stat to read
427 * Read the current value of @stat. This function can be called without
428 * synchroniztion and takes care of u64 atomicity.
430 static inline uint64_t blkg_stat_read(struct blkg_stat
*stat
)
436 start
= u64_stats_fetch_begin_irq(&stat
->syncp
);
438 } while (u64_stats_fetch_retry_irq(&stat
->syncp
, start
));
444 * blkg_stat_reset - reset a blkg_stat
445 * @stat: blkg_stat to reset
447 static inline void blkg_stat_reset(struct blkg_stat
*stat
)
453 * blkg_stat_merge - merge a blkg_stat into another
454 * @to: the destination blkg_stat
457 * Add @from's count to @to.
459 static inline void blkg_stat_merge(struct blkg_stat
*to
, struct blkg_stat
*from
)
461 blkg_stat_add(to
, blkg_stat_read(from
));
464 static inline void blkg_rwstat_init(struct blkg_rwstat
*rwstat
)
466 u64_stats_init(&rwstat
->syncp
);
470 * blkg_rwstat_add - add a value to a blkg_rwstat
471 * @rwstat: target blkg_rwstat
472 * @rw: mask of REQ_{WRITE|SYNC}
475 * Add @val to @rwstat. The counters are chosen according to @rw. The
476 * caller is responsible for synchronizing calls to this function.
478 static inline void blkg_rwstat_add(struct blkg_rwstat
*rwstat
,
479 int rw
, uint64_t val
)
481 u64_stats_update_begin(&rwstat
->syncp
);
484 rwstat
->cnt
[BLKG_RWSTAT_WRITE
] += val
;
486 rwstat
->cnt
[BLKG_RWSTAT_READ
] += val
;
488 rwstat
->cnt
[BLKG_RWSTAT_SYNC
] += val
;
490 rwstat
->cnt
[BLKG_RWSTAT_ASYNC
] += val
;
492 u64_stats_update_end(&rwstat
->syncp
);
496 * blkg_rwstat_read - read the current values of a blkg_rwstat
497 * @rwstat: blkg_rwstat to read
499 * Read the current snapshot of @rwstat and return it as the return value.
500 * This function can be called without synchronization and takes care of
503 static inline struct blkg_rwstat
blkg_rwstat_read(struct blkg_rwstat
*rwstat
)
506 struct blkg_rwstat tmp
;
509 start
= u64_stats_fetch_begin_irq(&rwstat
->syncp
);
511 } while (u64_stats_fetch_retry_irq(&rwstat
->syncp
, start
));
517 * blkg_rwstat_total - read the total count of a blkg_rwstat
518 * @rwstat: blkg_rwstat to read
520 * Return the total count of @rwstat regardless of the IO direction. This
521 * function can be called without synchronization and takes care of u64
524 static inline uint64_t blkg_rwstat_total(struct blkg_rwstat
*rwstat
)
526 struct blkg_rwstat tmp
= blkg_rwstat_read(rwstat
);
528 return tmp
.cnt
[BLKG_RWSTAT_READ
] + tmp
.cnt
[BLKG_RWSTAT_WRITE
];
532 * blkg_rwstat_reset - reset a blkg_rwstat
533 * @rwstat: blkg_rwstat to reset
535 static inline void blkg_rwstat_reset(struct blkg_rwstat
*rwstat
)
537 memset(rwstat
->cnt
, 0, sizeof(rwstat
->cnt
));
541 * blkg_rwstat_merge - merge a blkg_rwstat into another
542 * @to: the destination blkg_rwstat
545 * Add @from's counts to @to.
547 static inline void blkg_rwstat_merge(struct blkg_rwstat
*to
,
548 struct blkg_rwstat
*from
)
550 struct blkg_rwstat v
= blkg_rwstat_read(from
);
553 u64_stats_update_begin(&to
->syncp
);
554 for (i
= 0; i
< BLKG_RWSTAT_NR
; i
++)
555 to
->cnt
[i
] += v
.cnt
[i
];
556 u64_stats_update_end(&to
->syncp
);
559 #else /* CONFIG_BLK_CGROUP */
564 struct blkg_policy_data
{
570 struct blkcg_policy
{
573 static inline struct blkcg_gq
*blkg_lookup(struct blkcg
*blkcg
, void *key
) { return NULL
; }
574 static inline int blkcg_init_queue(struct request_queue
*q
) { return 0; }
575 static inline void blkcg_drain_queue(struct request_queue
*q
) { }
576 static inline void blkcg_exit_queue(struct request_queue
*q
) { }
577 static inline int blkcg_policy_register(struct blkcg_policy
*pol
) { return 0; }
578 static inline void blkcg_policy_unregister(struct blkcg_policy
*pol
) { }
579 static inline int blkcg_activate_policy(struct request_queue
*q
,
580 const struct blkcg_policy
*pol
) { return 0; }
581 static inline void blkcg_deactivate_policy(struct request_queue
*q
,
582 const struct blkcg_policy
*pol
) { }
584 static inline struct blkcg
*bio_blkcg(struct bio
*bio
) { return NULL
; }
586 static inline struct blkg_policy_data
*blkg_to_pd(struct blkcg_gq
*blkg
,
587 struct blkcg_policy
*pol
) { return NULL
; }
588 static inline struct blkcg_gq
*pd_to_blkg(struct blkg_policy_data
*pd
) { return NULL
; }
589 static inline char *blkg_path(struct blkcg_gq
*blkg
) { return NULL
; }
590 static inline void blkg_get(struct blkcg_gq
*blkg
) { }
591 static inline void blkg_put(struct blkcg_gq
*blkg
) { }
593 static inline struct request_list
*blk_get_rl(struct request_queue
*q
,
594 struct bio
*bio
) { return &q
->root_rl
; }
595 static inline void blk_put_rl(struct request_list
*rl
) { }
596 static inline void blk_rq_set_rl(struct request
*rq
, struct request_list
*rl
) { }
597 static inline struct request_list
*blk_rq_rl(struct request
*rq
) { return &rq
->q
->root_rl
; }
599 #define blk_queue_for_each_rl(rl, q) \
600 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
602 #endif /* CONFIG_BLK_CGROUP */
603 #endif /* _BLK_CGROUP_H */