Linux 4.6-rc6
[cris-mirror.git] / include / linux / blk-cgroup.h
blobc02e669945e9279bceb796eb9a1adb938ebf863b
1 #ifndef _BLK_CGROUP_H
2 #define _BLK_CGROUP_H
3 /*
4 * Common Block IO controller cgroup interface
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
16 #include <linux/cgroup.h>
17 #include <linux/percpu_counter.h>
18 #include <linux/seq_file.h>
19 #include <linux/radix-tree.h>
20 #include <linux/blkdev.h>
21 #include <linux/atomic.h>
23 /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
24 #define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
26 /* Max limits for throttle policy */
27 #define THROTL_IOPS_MAX UINT_MAX
29 #ifdef CONFIG_BLK_CGROUP
31 enum blkg_rwstat_type {
32 BLKG_RWSTAT_READ,
33 BLKG_RWSTAT_WRITE,
34 BLKG_RWSTAT_SYNC,
35 BLKG_RWSTAT_ASYNC,
37 BLKG_RWSTAT_NR,
38 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
41 struct blkcg_gq;
43 struct blkcg {
44 struct cgroup_subsys_state css;
45 spinlock_t lock;
47 struct radix_tree_root blkg_tree;
48 struct blkcg_gq *blkg_hint;
49 struct hlist_head blkg_list;
51 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
53 struct list_head all_blkcgs_node;
54 #ifdef CONFIG_CGROUP_WRITEBACK
55 struct list_head cgwb_list;
56 #endif
60 * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
61 * recursive. Used to carry stats of dead children, and, for blkg_rwstat,
62 * to carry result values from read and sum operations.
64 struct blkg_stat {
65 struct percpu_counter cpu_cnt;
66 atomic64_t aux_cnt;
69 struct blkg_rwstat {
70 struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR];
71 atomic64_t aux_cnt[BLKG_RWSTAT_NR];
75 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
76 * request_queue (q). This is used by blkcg policies which need to track
77 * information per blkcg - q pair.
79 * There can be multiple active blkcg policies and each blkg:policy pair is
80 * represented by a blkg_policy_data which is allocated and freed by each
81 * policy's pd_alloc/free_fn() methods. A policy can allocate private data
82 * area by allocating larger data structure which embeds blkg_policy_data
83 * at the beginning.
85 struct blkg_policy_data {
86 /* the blkg and policy id this per-policy data belongs to */
87 struct blkcg_gq *blkg;
88 int plid;
92 * Policies that need to keep per-blkcg data which is independent from any
93 * request_queue associated to it should implement cpd_alloc/free_fn()
94 * methods. A policy can allocate private data area by allocating larger
95 * data structure which embeds blkcg_policy_data at the beginning.
96 * cpd_init() is invoked to let each policy handle per-blkcg data.
98 struct blkcg_policy_data {
99 /* the blkcg and policy id this per-policy data belongs to */
100 struct blkcg *blkcg;
101 int plid;
104 /* association between a blk cgroup and a request queue */
105 struct blkcg_gq {
106 /* Pointer to the associated request_queue */
107 struct request_queue *q;
108 struct list_head q_node;
109 struct hlist_node blkcg_node;
110 struct blkcg *blkcg;
113 * Each blkg gets congested separately and the congestion state is
114 * propagated to the matching bdi_writeback_congested.
116 struct bdi_writeback_congested *wb_congested;
118 /* all non-root blkcg_gq's are guaranteed to have access to parent */
119 struct blkcg_gq *parent;
121 /* request allocation list for this blkcg-q pair */
122 struct request_list rl;
124 /* reference count */
125 atomic_t refcnt;
127 /* is this blkg online? protected by both blkcg and q locks */
128 bool online;
130 struct blkg_rwstat stat_bytes;
131 struct blkg_rwstat stat_ios;
133 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
135 struct rcu_head rcu_head;
138 typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
139 typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
140 typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
141 typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
142 typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
143 typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
144 typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
145 typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
146 typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
147 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
149 struct blkcg_policy {
150 int plid;
151 /* cgroup files for the policy */
152 struct cftype *dfl_cftypes;
153 struct cftype *legacy_cftypes;
155 /* operations */
156 blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
157 blkcg_pol_init_cpd_fn *cpd_init_fn;
158 blkcg_pol_free_cpd_fn *cpd_free_fn;
159 blkcg_pol_bind_cpd_fn *cpd_bind_fn;
161 blkcg_pol_alloc_pd_fn *pd_alloc_fn;
162 blkcg_pol_init_pd_fn *pd_init_fn;
163 blkcg_pol_online_pd_fn *pd_online_fn;
164 blkcg_pol_offline_pd_fn *pd_offline_fn;
165 blkcg_pol_free_pd_fn *pd_free_fn;
166 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
169 extern struct blkcg blkcg_root;
170 extern struct cgroup_subsys_state * const blkcg_root_css;
172 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
173 struct request_queue *q, bool update_hint);
174 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
175 struct request_queue *q);
176 int blkcg_init_queue(struct request_queue *q);
177 void blkcg_drain_queue(struct request_queue *q);
178 void blkcg_exit_queue(struct request_queue *q);
180 /* Blkio controller policy registration */
181 int blkcg_policy_register(struct blkcg_policy *pol);
182 void blkcg_policy_unregister(struct blkcg_policy *pol);
183 int blkcg_activate_policy(struct request_queue *q,
184 const struct blkcg_policy *pol);
185 void blkcg_deactivate_policy(struct request_queue *q,
186 const struct blkcg_policy *pol);
188 const char *blkg_dev_name(struct blkcg_gq *blkg);
189 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
190 u64 (*prfill)(struct seq_file *,
191 struct blkg_policy_data *, int),
192 const struct blkcg_policy *pol, int data,
193 bool show_total);
194 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
195 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
196 const struct blkg_rwstat *rwstat);
197 u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
198 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
199 int off);
200 int blkg_print_stat_bytes(struct seq_file *sf, void *v);
201 int blkg_print_stat_ios(struct seq_file *sf, void *v);
202 int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
203 int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
205 u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
206 struct blkcg_policy *pol, int off);
207 struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
208 struct blkcg_policy *pol, int off);
210 struct blkg_conf_ctx {
211 struct gendisk *disk;
212 struct blkcg_gq *blkg;
213 char *body;
216 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
217 char *input, struct blkg_conf_ctx *ctx);
218 void blkg_conf_finish(struct blkg_conf_ctx *ctx);
221 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
223 return css ? container_of(css, struct blkcg, css) : NULL;
226 static inline struct blkcg *task_blkcg(struct task_struct *tsk)
228 return css_to_blkcg(task_css(tsk, io_cgrp_id));
231 static inline struct blkcg *bio_blkcg(struct bio *bio)
233 if (bio && bio->bi_css)
234 return css_to_blkcg(bio->bi_css);
235 return task_blkcg(current);
238 static inline struct cgroup_subsys_state *
239 task_get_blkcg_css(struct task_struct *task)
241 return task_get_css(task, io_cgrp_id);
245 * blkcg_parent - get the parent of a blkcg
246 * @blkcg: blkcg of interest
248 * Return the parent blkcg of @blkcg. Can be called anytime.
250 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
252 return css_to_blkcg(blkcg->css.parent);
256 * __blkg_lookup - internal version of blkg_lookup()
257 * @blkcg: blkcg of interest
258 * @q: request_queue of interest
259 * @update_hint: whether to update lookup hint with the result or not
261 * This is internal version and shouldn't be used by policy
262 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
263 * @q's bypass state. If @update_hint is %true, the caller should be
264 * holding @q->queue_lock and lookup hint is updated on success.
266 static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
267 struct request_queue *q,
268 bool update_hint)
270 struct blkcg_gq *blkg;
272 if (blkcg == &blkcg_root)
273 return q->root_blkg;
275 blkg = rcu_dereference(blkcg->blkg_hint);
276 if (blkg && blkg->q == q)
277 return blkg;
279 return blkg_lookup_slowpath(blkcg, q, update_hint);
283 * blkg_lookup - lookup blkg for the specified blkcg - q pair
284 * @blkcg: blkcg of interest
285 * @q: request_queue of interest
287 * Lookup blkg for the @blkcg - @q pair. This function should be called
288 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
289 * - see blk_queue_bypass_start() for details.
291 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
292 struct request_queue *q)
294 WARN_ON_ONCE(!rcu_read_lock_held());
296 if (unlikely(blk_queue_bypass(q)))
297 return NULL;
298 return __blkg_lookup(blkcg, q, false);
302 * blkg_to_pdata - get policy private data
303 * @blkg: blkg of interest
304 * @pol: policy of interest
306 * Return pointer to private data associated with the @blkg-@pol pair.
308 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
309 struct blkcg_policy *pol)
311 return blkg ? blkg->pd[pol->plid] : NULL;
314 static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
315 struct blkcg_policy *pol)
317 return blkcg ? blkcg->cpd[pol->plid] : NULL;
321 * pdata_to_blkg - get blkg associated with policy private data
322 * @pd: policy private data of interest
324 * @pd is policy private data. Determine the blkg it's associated with.
326 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
328 return pd ? pd->blkg : NULL;
331 static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
333 return cpd ? cpd->blkcg : NULL;
337 * blkg_path - format cgroup path of blkg
338 * @blkg: blkg of interest
339 * @buf: target buffer
340 * @buflen: target buffer length
342 * Format the path of the cgroup of @blkg into @buf.
344 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
346 char *p;
348 p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
349 if (!p) {
350 strncpy(buf, "<unavailable>", buflen);
351 return -ENAMETOOLONG;
354 memmove(buf, p, buf + buflen - p);
355 return 0;
359 * blkg_get - get a blkg reference
360 * @blkg: blkg to get
362 * The caller should be holding an existing reference.
364 static inline void blkg_get(struct blkcg_gq *blkg)
366 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
367 atomic_inc(&blkg->refcnt);
370 void __blkg_release_rcu(struct rcu_head *rcu);
373 * blkg_put - put a blkg reference
374 * @blkg: blkg to put
376 static inline void blkg_put(struct blkcg_gq *blkg)
378 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
379 if (atomic_dec_and_test(&blkg->refcnt))
380 call_rcu(&blkg->rcu_head, __blkg_release_rcu);
384 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
385 * @d_blkg: loop cursor pointing to the current descendant
386 * @pos_css: used for iteration
387 * @p_blkg: target blkg to walk descendants of
389 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
390 * read locked. If called under either blkcg or queue lock, the iteration
391 * is guaranteed to include all and only online blkgs. The caller may
392 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
393 * @p_blkg is included in the iteration and the first node to be visited.
395 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
396 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
397 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
398 (p_blkg)->q, false)))
401 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
402 * @d_blkg: loop cursor pointing to the current descendant
403 * @pos_css: used for iteration
404 * @p_blkg: target blkg to walk descendants of
406 * Similar to blkg_for_each_descendant_pre() but performs post-order
407 * traversal instead. Synchronization rules are the same. @p_blkg is
408 * included in the iteration and the last node to be visited.
410 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
411 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
412 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
413 (p_blkg)->q, false)))
416 * blk_get_rl - get request_list to use
417 * @q: request_queue of interest
418 * @bio: bio which will be attached to the allocated request (may be %NULL)
420 * The caller wants to allocate a request from @q to use for @bio. Find
421 * the request_list to use and obtain a reference on it. Should be called
422 * under queue_lock. This function is guaranteed to return non-%NULL
423 * request_list.
425 static inline struct request_list *blk_get_rl(struct request_queue *q,
426 struct bio *bio)
428 struct blkcg *blkcg;
429 struct blkcg_gq *blkg;
431 rcu_read_lock();
433 blkcg = bio_blkcg(bio);
435 /* bypass blkg lookup and use @q->root_rl directly for root */
436 if (blkcg == &blkcg_root)
437 goto root_rl;
440 * Try to use blkg->rl. blkg lookup may fail under memory pressure
441 * or if either the blkcg or queue is going away. Fall back to
442 * root_rl in such cases.
444 blkg = blkg_lookup(blkcg, q);
445 if (unlikely(!blkg))
446 goto root_rl;
448 blkg_get(blkg);
449 rcu_read_unlock();
450 return &blkg->rl;
451 root_rl:
452 rcu_read_unlock();
453 return &q->root_rl;
457 * blk_put_rl - put request_list
458 * @rl: request_list to put
460 * Put the reference acquired by blk_get_rl(). Should be called under
461 * queue_lock.
463 static inline void blk_put_rl(struct request_list *rl)
465 if (rl->blkg->blkcg != &blkcg_root)
466 blkg_put(rl->blkg);
470 * blk_rq_set_rl - associate a request with a request_list
471 * @rq: request of interest
472 * @rl: target request_list
474 * Associate @rq with @rl so that accounting and freeing can know the
475 * request_list @rq came from.
477 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
479 rq->rl = rl;
483 * blk_rq_rl - return the request_list a request came from
484 * @rq: request of interest
486 * Return the request_list @rq is allocated from.
488 static inline struct request_list *blk_rq_rl(struct request *rq)
490 return rq->rl;
493 struct request_list *__blk_queue_next_rl(struct request_list *rl,
494 struct request_queue *q);
496 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
498 * Should be used under queue_lock.
500 #define blk_queue_for_each_rl(rl, q) \
501 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
503 static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
505 int ret;
507 ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
508 if (ret)
509 return ret;
511 atomic64_set(&stat->aux_cnt, 0);
512 return 0;
515 static inline void blkg_stat_exit(struct blkg_stat *stat)
517 percpu_counter_destroy(&stat->cpu_cnt);
521 * blkg_stat_add - add a value to a blkg_stat
522 * @stat: target blkg_stat
523 * @val: value to add
525 * Add @val to @stat. The caller must ensure that IRQ on the same CPU
526 * don't re-enter this function for the same counter.
528 static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
530 __percpu_counter_add(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
534 * blkg_stat_read - read the current value of a blkg_stat
535 * @stat: blkg_stat to read
537 static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
539 return percpu_counter_sum_positive(&stat->cpu_cnt);
543 * blkg_stat_reset - reset a blkg_stat
544 * @stat: blkg_stat to reset
546 static inline void blkg_stat_reset(struct blkg_stat *stat)
548 percpu_counter_set(&stat->cpu_cnt, 0);
549 atomic64_set(&stat->aux_cnt, 0);
553 * blkg_stat_add_aux - add a blkg_stat into another's aux count
554 * @to: the destination blkg_stat
555 * @from: the source
557 * Add @from's count including the aux one to @to's aux count.
559 static inline void blkg_stat_add_aux(struct blkg_stat *to,
560 struct blkg_stat *from)
562 atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt),
563 &to->aux_cnt);
566 static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
568 int i, ret;
570 for (i = 0; i < BLKG_RWSTAT_NR; i++) {
571 ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
572 if (ret) {
573 while (--i >= 0)
574 percpu_counter_destroy(&rwstat->cpu_cnt[i]);
575 return ret;
577 atomic64_set(&rwstat->aux_cnt[i], 0);
579 return 0;
582 static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
584 int i;
586 for (i = 0; i < BLKG_RWSTAT_NR; i++)
587 percpu_counter_destroy(&rwstat->cpu_cnt[i]);
591 * blkg_rwstat_add - add a value to a blkg_rwstat
592 * @rwstat: target blkg_rwstat
593 * @rw: mask of REQ_{WRITE|SYNC}
594 * @val: value to add
596 * Add @val to @rwstat. The counters are chosen according to @rw. The
597 * caller is responsible for synchronizing calls to this function.
599 static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
600 int rw, uint64_t val)
602 struct percpu_counter *cnt;
604 if (rw & REQ_WRITE)
605 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
606 else
607 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
609 __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
611 if (rw & REQ_SYNC)
612 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
613 else
614 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
616 __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
620 * blkg_rwstat_read - read the current values of a blkg_rwstat
621 * @rwstat: blkg_rwstat to read
623 * Read the current snapshot of @rwstat and return it in the aux counts.
625 static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
627 struct blkg_rwstat result;
628 int i;
630 for (i = 0; i < BLKG_RWSTAT_NR; i++)
631 atomic64_set(&result.aux_cnt[i],
632 percpu_counter_sum_positive(&rwstat->cpu_cnt[i]));
633 return result;
637 * blkg_rwstat_total - read the total count of a blkg_rwstat
638 * @rwstat: blkg_rwstat to read
640 * Return the total count of @rwstat regardless of the IO direction. This
641 * function can be called without synchronization and takes care of u64
642 * atomicity.
644 static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
646 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
648 return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
649 atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
653 * blkg_rwstat_reset - reset a blkg_rwstat
654 * @rwstat: blkg_rwstat to reset
656 static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
658 int i;
660 for (i = 0; i < BLKG_RWSTAT_NR; i++) {
661 percpu_counter_set(&rwstat->cpu_cnt[i], 0);
662 atomic64_set(&rwstat->aux_cnt[i], 0);
667 * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
668 * @to: the destination blkg_rwstat
669 * @from: the source
671 * Add @from's count including the aux one to @to's aux count.
673 static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
674 struct blkg_rwstat *from)
676 struct blkg_rwstat v = blkg_rwstat_read(from);
677 int i;
679 for (i = 0; i < BLKG_RWSTAT_NR; i++)
680 atomic64_add(atomic64_read(&v.aux_cnt[i]) +
681 atomic64_read(&from->aux_cnt[i]),
682 &to->aux_cnt[i]);
685 #ifdef CONFIG_BLK_DEV_THROTTLING
686 extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
687 struct bio *bio);
688 #else
689 static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
690 struct bio *bio) { return false; }
691 #endif
693 static inline bool blkcg_bio_issue_check(struct request_queue *q,
694 struct bio *bio)
696 struct blkcg *blkcg;
697 struct blkcg_gq *blkg;
698 bool throtl = false;
700 rcu_read_lock();
701 blkcg = bio_blkcg(bio);
703 blkg = blkg_lookup(blkcg, q);
704 if (unlikely(!blkg)) {
705 spin_lock_irq(q->queue_lock);
706 blkg = blkg_lookup_create(blkcg, q);
707 if (IS_ERR(blkg))
708 blkg = NULL;
709 spin_unlock_irq(q->queue_lock);
712 throtl = blk_throtl_bio(q, blkg, bio);
714 if (!throtl) {
715 blkg = blkg ?: q->root_blkg;
716 blkg_rwstat_add(&blkg->stat_bytes, bio->bi_rw,
717 bio->bi_iter.bi_size);
718 blkg_rwstat_add(&blkg->stat_ios, bio->bi_rw, 1);
721 rcu_read_unlock();
722 return !throtl;
725 #else /* CONFIG_BLK_CGROUP */
727 struct blkcg {
730 struct blkg_policy_data {
733 struct blkcg_policy_data {
736 struct blkcg_gq {
739 struct blkcg_policy {
742 #define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
744 static inline struct cgroup_subsys_state *
745 task_get_blkcg_css(struct task_struct *task)
747 return NULL;
750 #ifdef CONFIG_BLOCK
752 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
753 static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
754 static inline void blkcg_drain_queue(struct request_queue *q) { }
755 static inline void blkcg_exit_queue(struct request_queue *q) { }
756 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
757 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
758 static inline int blkcg_activate_policy(struct request_queue *q,
759 const struct blkcg_policy *pol) { return 0; }
760 static inline void blkcg_deactivate_policy(struct request_queue *q,
761 const struct blkcg_policy *pol) { }
763 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
765 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
766 struct blkcg_policy *pol) { return NULL; }
767 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
768 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
769 static inline void blkg_get(struct blkcg_gq *blkg) { }
770 static inline void blkg_put(struct blkcg_gq *blkg) { }
772 static inline struct request_list *blk_get_rl(struct request_queue *q,
773 struct bio *bio) { return &q->root_rl; }
774 static inline void blk_put_rl(struct request_list *rl) { }
775 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
776 static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
778 static inline bool blkcg_bio_issue_check(struct request_queue *q,
779 struct bio *bio) { return true; }
781 #define blk_queue_for_each_rl(rl, q) \
782 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
784 #endif /* CONFIG_BLOCK */
785 #endif /* CONFIG_BLK_CGROUP */
786 #endif /* _BLK_CGROUP_H */