1 /* SPDX-License-Identifier: GPL-2.0 */
5 * Common Block IO controller cgroup interface
7 * Based on ideas and code from CFQ, CFS and BFQ:
8 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
10 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
11 * Paolo Valente <paolo.valente@unimore.it>
13 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
14 * Nauman Rafique <nauman@google.com>
17 #include <linux/cgroup.h>
18 #include <linux/percpu_counter.h>
19 #include <linux/seq_file.h>
20 #include <linux/radix-tree.h>
21 #include <linux/blkdev.h>
22 #include <linux/atomic.h>
23 #include <linux/kthread.h>
25 /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
26 #define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
28 /* Max limits for throttle policy */
29 #define THROTL_IOPS_MAX UINT_MAX
31 #ifdef CONFIG_BLK_CGROUP
33 enum blkg_rwstat_type
{
40 BLKG_RWSTAT_TOTAL
= BLKG_RWSTAT_NR
,
46 struct cgroup_subsys_state css
;
49 struct radix_tree_root blkg_tree
;
50 struct blkcg_gq __rcu
*blkg_hint
;
51 struct hlist_head blkg_list
;
53 struct blkcg_policy_data
*cpd
[BLKCG_MAX_POLS
];
55 struct list_head all_blkcgs_node
;
56 #ifdef CONFIG_CGROUP_WRITEBACK
57 struct list_head cgwb_list
;
62 * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
63 * recursive. Used to carry stats of dead children, and, for blkg_rwstat,
64 * to carry result values from read and sum operations.
67 struct percpu_counter cpu_cnt
;
72 struct percpu_counter cpu_cnt
[BLKG_RWSTAT_NR
];
73 atomic64_t aux_cnt
[BLKG_RWSTAT_NR
];
77 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
78 * request_queue (q). This is used by blkcg policies which need to track
79 * information per blkcg - q pair.
81 * There can be multiple active blkcg policies and each blkg:policy pair is
82 * represented by a blkg_policy_data which is allocated and freed by each
83 * policy's pd_alloc/free_fn() methods. A policy can allocate private data
84 * area by allocating larger data structure which embeds blkg_policy_data
87 struct blkg_policy_data
{
88 /* the blkg and policy id this per-policy data belongs to */
89 struct blkcg_gq
*blkg
;
94 * Policies that need to keep per-blkcg data which is independent from any
95 * request_queue associated to it should implement cpd_alloc/free_fn()
96 * methods. A policy can allocate private data area by allocating larger
97 * data structure which embeds blkcg_policy_data at the beginning.
98 * cpd_init() is invoked to let each policy handle per-blkcg data.
100 struct blkcg_policy_data
{
101 /* the blkcg and policy id this per-policy data belongs to */
106 /* association between a blk cgroup and a request queue */
108 /* Pointer to the associated request_queue */
109 struct request_queue
*q
;
110 struct list_head q_node
;
111 struct hlist_node blkcg_node
;
115 * Each blkg gets congested separately and the congestion state is
116 * propagated to the matching bdi_writeback_congested.
118 struct bdi_writeback_congested
*wb_congested
;
120 /* all non-root blkcg_gq's are guaranteed to have access to parent */
121 struct blkcg_gq
*parent
;
123 /* request allocation list for this blkcg-q pair */
124 struct request_list rl
;
126 /* reference count */
129 /* is this blkg online? protected by both blkcg and q locks */
132 struct blkg_rwstat stat_bytes
;
133 struct blkg_rwstat stat_ios
;
135 struct blkg_policy_data
*pd
[BLKCG_MAX_POLS
];
137 struct rcu_head rcu_head
;
140 typedef struct blkcg_policy_data
*(blkcg_pol_alloc_cpd_fn
)(gfp_t gfp
);
141 typedef void (blkcg_pol_init_cpd_fn
)(struct blkcg_policy_data
*cpd
);
142 typedef void (blkcg_pol_free_cpd_fn
)(struct blkcg_policy_data
*cpd
);
143 typedef void (blkcg_pol_bind_cpd_fn
)(struct blkcg_policy_data
*cpd
);
144 typedef struct blkg_policy_data
*(blkcg_pol_alloc_pd_fn
)(gfp_t gfp
, int node
);
145 typedef void (blkcg_pol_init_pd_fn
)(struct blkg_policy_data
*pd
);
146 typedef void (blkcg_pol_online_pd_fn
)(struct blkg_policy_data
*pd
);
147 typedef void (blkcg_pol_offline_pd_fn
)(struct blkg_policy_data
*pd
);
148 typedef void (blkcg_pol_free_pd_fn
)(struct blkg_policy_data
*pd
);
149 typedef void (blkcg_pol_reset_pd_stats_fn
)(struct blkg_policy_data
*pd
);
151 struct blkcg_policy
{
153 /* cgroup files for the policy */
154 struct cftype
*dfl_cftypes
;
155 struct cftype
*legacy_cftypes
;
158 blkcg_pol_alloc_cpd_fn
*cpd_alloc_fn
;
159 blkcg_pol_init_cpd_fn
*cpd_init_fn
;
160 blkcg_pol_free_cpd_fn
*cpd_free_fn
;
161 blkcg_pol_bind_cpd_fn
*cpd_bind_fn
;
163 blkcg_pol_alloc_pd_fn
*pd_alloc_fn
;
164 blkcg_pol_init_pd_fn
*pd_init_fn
;
165 blkcg_pol_online_pd_fn
*pd_online_fn
;
166 blkcg_pol_offline_pd_fn
*pd_offline_fn
;
167 blkcg_pol_free_pd_fn
*pd_free_fn
;
168 blkcg_pol_reset_pd_stats_fn
*pd_reset_stats_fn
;
171 extern struct blkcg blkcg_root
;
172 extern struct cgroup_subsys_state
* const blkcg_root_css
;
174 struct blkcg_gq
*blkg_lookup_slowpath(struct blkcg
*blkcg
,
175 struct request_queue
*q
, bool update_hint
);
176 struct blkcg_gq
*blkg_lookup_create(struct blkcg
*blkcg
,
177 struct request_queue
*q
);
178 int blkcg_init_queue(struct request_queue
*q
);
179 void blkcg_drain_queue(struct request_queue
*q
);
180 void blkcg_exit_queue(struct request_queue
*q
);
182 /* Blkio controller policy registration */
183 int blkcg_policy_register(struct blkcg_policy
*pol
);
184 void blkcg_policy_unregister(struct blkcg_policy
*pol
);
185 int blkcg_activate_policy(struct request_queue
*q
,
186 const struct blkcg_policy
*pol
);
187 void blkcg_deactivate_policy(struct request_queue
*q
,
188 const struct blkcg_policy
*pol
);
190 const char *blkg_dev_name(struct blkcg_gq
*blkg
);
191 void blkcg_print_blkgs(struct seq_file
*sf
, struct blkcg
*blkcg
,
192 u64 (*prfill
)(struct seq_file
*,
193 struct blkg_policy_data
*, int),
194 const struct blkcg_policy
*pol
, int data
,
196 u64
__blkg_prfill_u64(struct seq_file
*sf
, struct blkg_policy_data
*pd
, u64 v
);
197 u64
__blkg_prfill_rwstat(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
198 const struct blkg_rwstat
*rwstat
);
199 u64
blkg_prfill_stat(struct seq_file
*sf
, struct blkg_policy_data
*pd
, int off
);
200 u64
blkg_prfill_rwstat(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
202 int blkg_print_stat_bytes(struct seq_file
*sf
, void *v
);
203 int blkg_print_stat_ios(struct seq_file
*sf
, void *v
);
204 int blkg_print_stat_bytes_recursive(struct seq_file
*sf
, void *v
);
205 int blkg_print_stat_ios_recursive(struct seq_file
*sf
, void *v
);
207 u64
blkg_stat_recursive_sum(struct blkcg_gq
*blkg
,
208 struct blkcg_policy
*pol
, int off
);
209 struct blkg_rwstat
blkg_rwstat_recursive_sum(struct blkcg_gq
*blkg
,
210 struct blkcg_policy
*pol
, int off
);
212 struct blkg_conf_ctx
{
213 struct gendisk
*disk
;
214 struct blkcg_gq
*blkg
;
218 int blkg_conf_prep(struct blkcg
*blkcg
, const struct blkcg_policy
*pol
,
219 char *input
, struct blkg_conf_ctx
*ctx
);
220 void blkg_conf_finish(struct blkg_conf_ctx
*ctx
);
223 static inline struct blkcg
*css_to_blkcg(struct cgroup_subsys_state
*css
)
225 return css
? container_of(css
, struct blkcg
, css
) : NULL
;
228 static inline struct blkcg
*bio_blkcg(struct bio
*bio
)
230 struct cgroup_subsys_state
*css
;
232 if (bio
&& bio
->bi_css
)
233 return css_to_blkcg(bio
->bi_css
);
234 css
= kthread_blkcg();
236 return css_to_blkcg(css
);
237 return css_to_blkcg(task_css(current
, io_cgrp_id
));
241 * blkcg_parent - get the parent of a blkcg
242 * @blkcg: blkcg of interest
244 * Return the parent blkcg of @blkcg. Can be called anytime.
246 static inline struct blkcg
*blkcg_parent(struct blkcg
*blkcg
)
248 return css_to_blkcg(blkcg
->css
.parent
);
252 * __blkg_lookup - internal version of blkg_lookup()
253 * @blkcg: blkcg of interest
254 * @q: request_queue of interest
255 * @update_hint: whether to update lookup hint with the result or not
257 * This is internal version and shouldn't be used by policy
258 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
259 * @q's bypass state. If @update_hint is %true, the caller should be
260 * holding @q->queue_lock and lookup hint is updated on success.
262 static inline struct blkcg_gq
*__blkg_lookup(struct blkcg
*blkcg
,
263 struct request_queue
*q
,
266 struct blkcg_gq
*blkg
;
268 if (blkcg
== &blkcg_root
)
271 blkg
= rcu_dereference(blkcg
->blkg_hint
);
272 if (blkg
&& blkg
->q
== q
)
275 return blkg_lookup_slowpath(blkcg
, q
, update_hint
);
279 * blkg_lookup - lookup blkg for the specified blkcg - q pair
280 * @blkcg: blkcg of interest
281 * @q: request_queue of interest
283 * Lookup blkg for the @blkcg - @q pair. This function should be called
284 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
285 * - see blk_queue_bypass_start() for details.
287 static inline struct blkcg_gq
*blkg_lookup(struct blkcg
*blkcg
,
288 struct request_queue
*q
)
290 WARN_ON_ONCE(!rcu_read_lock_held());
292 if (unlikely(blk_queue_bypass(q
)))
294 return __blkg_lookup(blkcg
, q
, false);
298 * blkg_to_pdata - get policy private data
299 * @blkg: blkg of interest
300 * @pol: policy of interest
302 * Return pointer to private data associated with the @blkg-@pol pair.
304 static inline struct blkg_policy_data
*blkg_to_pd(struct blkcg_gq
*blkg
,
305 struct blkcg_policy
*pol
)
307 return blkg
? blkg
->pd
[pol
->plid
] : NULL
;
310 static inline struct blkcg_policy_data
*blkcg_to_cpd(struct blkcg
*blkcg
,
311 struct blkcg_policy
*pol
)
313 return blkcg
? blkcg
->cpd
[pol
->plid
] : NULL
;
317 * pdata_to_blkg - get blkg associated with policy private data
318 * @pd: policy private data of interest
320 * @pd is policy private data. Determine the blkg it's associated with.
322 static inline struct blkcg_gq
*pd_to_blkg(struct blkg_policy_data
*pd
)
324 return pd
? pd
->blkg
: NULL
;
327 static inline struct blkcg
*cpd_to_blkcg(struct blkcg_policy_data
*cpd
)
329 return cpd
? cpd
->blkcg
: NULL
;
333 * blkg_path - format cgroup path of blkg
334 * @blkg: blkg of interest
335 * @buf: target buffer
336 * @buflen: target buffer length
338 * Format the path of the cgroup of @blkg into @buf.
340 static inline int blkg_path(struct blkcg_gq
*blkg
, char *buf
, int buflen
)
342 return cgroup_path(blkg
->blkcg
->css
.cgroup
, buf
, buflen
);
346 * blkg_get - get a blkg reference
349 * The caller should be holding an existing reference.
351 static inline void blkg_get(struct blkcg_gq
*blkg
)
353 WARN_ON_ONCE(atomic_read(&blkg
->refcnt
) <= 0);
354 atomic_inc(&blkg
->refcnt
);
357 void __blkg_release_rcu(struct rcu_head
*rcu
);
360 * blkg_put - put a blkg reference
363 static inline void blkg_put(struct blkcg_gq
*blkg
)
365 WARN_ON_ONCE(atomic_read(&blkg
->refcnt
) <= 0);
366 if (atomic_dec_and_test(&blkg
->refcnt
))
367 call_rcu(&blkg
->rcu_head
, __blkg_release_rcu
);
371 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
372 * @d_blkg: loop cursor pointing to the current descendant
373 * @pos_css: used for iteration
374 * @p_blkg: target blkg to walk descendants of
376 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
377 * read locked. If called under either blkcg or queue lock, the iteration
378 * is guaranteed to include all and only online blkgs. The caller may
379 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
380 * @p_blkg is included in the iteration and the first node to be visited.
382 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
383 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
384 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
385 (p_blkg)->q, false)))
388 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
389 * @d_blkg: loop cursor pointing to the current descendant
390 * @pos_css: used for iteration
391 * @p_blkg: target blkg to walk descendants of
393 * Similar to blkg_for_each_descendant_pre() but performs post-order
394 * traversal instead. Synchronization rules are the same. @p_blkg is
395 * included in the iteration and the last node to be visited.
397 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
398 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
399 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
400 (p_blkg)->q, false)))
403 * blk_get_rl - get request_list to use
404 * @q: request_queue of interest
405 * @bio: bio which will be attached to the allocated request (may be %NULL)
407 * The caller wants to allocate a request from @q to use for @bio. Find
408 * the request_list to use and obtain a reference on it. Should be called
409 * under queue_lock. This function is guaranteed to return non-%NULL
412 static inline struct request_list
*blk_get_rl(struct request_queue
*q
,
416 struct blkcg_gq
*blkg
;
420 blkcg
= bio_blkcg(bio
);
422 /* bypass blkg lookup and use @q->root_rl directly for root */
423 if (blkcg
== &blkcg_root
)
427 * Try to use blkg->rl. blkg lookup may fail under memory pressure
428 * or if either the blkcg or queue is going away. Fall back to
429 * root_rl in such cases.
431 blkg
= blkg_lookup(blkcg
, q
);
444 * blk_put_rl - put request_list
445 * @rl: request_list to put
447 * Put the reference acquired by blk_get_rl(). Should be called under
450 static inline void blk_put_rl(struct request_list
*rl
)
452 if (rl
->blkg
->blkcg
!= &blkcg_root
)
457 * blk_rq_set_rl - associate a request with a request_list
458 * @rq: request of interest
459 * @rl: target request_list
461 * Associate @rq with @rl so that accounting and freeing can know the
462 * request_list @rq came from.
464 static inline void blk_rq_set_rl(struct request
*rq
, struct request_list
*rl
)
470 * blk_rq_rl - return the request_list a request came from
471 * @rq: request of interest
473 * Return the request_list @rq is allocated from.
475 static inline struct request_list
*blk_rq_rl(struct request
*rq
)
480 struct request_list
*__blk_queue_next_rl(struct request_list
*rl
,
481 struct request_queue
*q
);
483 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
485 * Should be used under queue_lock.
487 #define blk_queue_for_each_rl(rl, q) \
488 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
490 static inline int blkg_stat_init(struct blkg_stat
*stat
, gfp_t gfp
)
494 ret
= percpu_counter_init(&stat
->cpu_cnt
, 0, gfp
);
498 atomic64_set(&stat
->aux_cnt
, 0);
502 static inline void blkg_stat_exit(struct blkg_stat
*stat
)
504 percpu_counter_destroy(&stat
->cpu_cnt
);
508 * blkg_stat_add - add a value to a blkg_stat
509 * @stat: target blkg_stat
512 * Add @val to @stat. The caller must ensure that IRQ on the same CPU
513 * don't re-enter this function for the same counter.
515 static inline void blkg_stat_add(struct blkg_stat
*stat
, uint64_t val
)
517 percpu_counter_add_batch(&stat
->cpu_cnt
, val
, BLKG_STAT_CPU_BATCH
);
521 * blkg_stat_read - read the current value of a blkg_stat
522 * @stat: blkg_stat to read
524 static inline uint64_t blkg_stat_read(struct blkg_stat
*stat
)
526 return percpu_counter_sum_positive(&stat
->cpu_cnt
);
530 * blkg_stat_reset - reset a blkg_stat
531 * @stat: blkg_stat to reset
533 static inline void blkg_stat_reset(struct blkg_stat
*stat
)
535 percpu_counter_set(&stat
->cpu_cnt
, 0);
536 atomic64_set(&stat
->aux_cnt
, 0);
540 * blkg_stat_add_aux - add a blkg_stat into another's aux count
541 * @to: the destination blkg_stat
544 * Add @from's count including the aux one to @to's aux count.
546 static inline void blkg_stat_add_aux(struct blkg_stat
*to
,
547 struct blkg_stat
*from
)
549 atomic64_add(blkg_stat_read(from
) + atomic64_read(&from
->aux_cnt
),
553 static inline int blkg_rwstat_init(struct blkg_rwstat
*rwstat
, gfp_t gfp
)
557 for (i
= 0; i
< BLKG_RWSTAT_NR
; i
++) {
558 ret
= percpu_counter_init(&rwstat
->cpu_cnt
[i
], 0, gfp
);
561 percpu_counter_destroy(&rwstat
->cpu_cnt
[i
]);
564 atomic64_set(&rwstat
->aux_cnt
[i
], 0);
569 static inline void blkg_rwstat_exit(struct blkg_rwstat
*rwstat
)
573 for (i
= 0; i
< BLKG_RWSTAT_NR
; i
++)
574 percpu_counter_destroy(&rwstat
->cpu_cnt
[i
]);
578 * blkg_rwstat_add - add a value to a blkg_rwstat
579 * @rwstat: target blkg_rwstat
580 * @op: REQ_OP and flags
583 * Add @val to @rwstat. The counters are chosen according to @rw. The
584 * caller is responsible for synchronizing calls to this function.
586 static inline void blkg_rwstat_add(struct blkg_rwstat
*rwstat
,
587 unsigned int op
, uint64_t val
)
589 struct percpu_counter
*cnt
;
592 cnt
= &rwstat
->cpu_cnt
[BLKG_RWSTAT_WRITE
];
594 cnt
= &rwstat
->cpu_cnt
[BLKG_RWSTAT_READ
];
596 percpu_counter_add_batch(cnt
, val
, BLKG_STAT_CPU_BATCH
);
599 cnt
= &rwstat
->cpu_cnt
[BLKG_RWSTAT_SYNC
];
601 cnt
= &rwstat
->cpu_cnt
[BLKG_RWSTAT_ASYNC
];
603 percpu_counter_add_batch(cnt
, val
, BLKG_STAT_CPU_BATCH
);
607 * blkg_rwstat_read - read the current values of a blkg_rwstat
608 * @rwstat: blkg_rwstat to read
610 * Read the current snapshot of @rwstat and return it in the aux counts.
612 static inline struct blkg_rwstat
blkg_rwstat_read(struct blkg_rwstat
*rwstat
)
614 struct blkg_rwstat result
;
617 for (i
= 0; i
< BLKG_RWSTAT_NR
; i
++)
618 atomic64_set(&result
.aux_cnt
[i
],
619 percpu_counter_sum_positive(&rwstat
->cpu_cnt
[i
]));
624 * blkg_rwstat_total - read the total count of a blkg_rwstat
625 * @rwstat: blkg_rwstat to read
627 * Return the total count of @rwstat regardless of the IO direction. This
628 * function can be called without synchronization and takes care of u64
631 static inline uint64_t blkg_rwstat_total(struct blkg_rwstat
*rwstat
)
633 struct blkg_rwstat tmp
= blkg_rwstat_read(rwstat
);
635 return atomic64_read(&tmp
.aux_cnt
[BLKG_RWSTAT_READ
]) +
636 atomic64_read(&tmp
.aux_cnt
[BLKG_RWSTAT_WRITE
]);
640 * blkg_rwstat_reset - reset a blkg_rwstat
641 * @rwstat: blkg_rwstat to reset
643 static inline void blkg_rwstat_reset(struct blkg_rwstat
*rwstat
)
647 for (i
= 0; i
< BLKG_RWSTAT_NR
; i
++) {
648 percpu_counter_set(&rwstat
->cpu_cnt
[i
], 0);
649 atomic64_set(&rwstat
->aux_cnt
[i
], 0);
654 * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
655 * @to: the destination blkg_rwstat
658 * Add @from's count including the aux one to @to's aux count.
660 static inline void blkg_rwstat_add_aux(struct blkg_rwstat
*to
,
661 struct blkg_rwstat
*from
)
663 u64 sum
[BLKG_RWSTAT_NR
];
666 for (i
= 0; i
< BLKG_RWSTAT_NR
; i
++)
667 sum
[i
] = percpu_counter_sum_positive(&from
->cpu_cnt
[i
]);
669 for (i
= 0; i
< BLKG_RWSTAT_NR
; i
++)
670 atomic64_add(sum
[i
] + atomic64_read(&from
->aux_cnt
[i
]),
674 #ifdef CONFIG_BLK_DEV_THROTTLING
675 extern bool blk_throtl_bio(struct request_queue
*q
, struct blkcg_gq
*blkg
,
678 static inline bool blk_throtl_bio(struct request_queue
*q
, struct blkcg_gq
*blkg
,
679 struct bio
*bio
) { return false; }
682 static inline bool blkcg_bio_issue_check(struct request_queue
*q
,
686 struct blkcg_gq
*blkg
;
690 blkcg
= bio_blkcg(bio
);
692 /* associate blkcg if bio hasn't attached one */
693 bio_associate_blkcg(bio
, &blkcg
->css
);
695 blkg
= blkg_lookup(blkcg
, q
);
696 if (unlikely(!blkg
)) {
697 spin_lock_irq(q
->queue_lock
);
698 blkg
= blkg_lookup_create(blkcg
, q
);
701 spin_unlock_irq(q
->queue_lock
);
704 throtl
= blk_throtl_bio(q
, blkg
, bio
);
707 blkg
= blkg
?: q
->root_blkg
;
708 blkg_rwstat_add(&blkg
->stat_bytes
, bio
->bi_opf
,
709 bio
->bi_iter
.bi_size
);
710 blkg_rwstat_add(&blkg
->stat_ios
, bio
->bi_opf
, 1);
717 #else /* CONFIG_BLK_CGROUP */
722 struct blkg_policy_data
{
725 struct blkcg_policy_data
{
731 struct blkcg_policy
{
734 #define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
738 static inline struct blkcg_gq
*blkg_lookup(struct blkcg
*blkcg
, void *key
) { return NULL
; }
739 static inline int blkcg_init_queue(struct request_queue
*q
) { return 0; }
740 static inline void blkcg_drain_queue(struct request_queue
*q
) { }
741 static inline void blkcg_exit_queue(struct request_queue
*q
) { }
742 static inline int blkcg_policy_register(struct blkcg_policy
*pol
) { return 0; }
743 static inline void blkcg_policy_unregister(struct blkcg_policy
*pol
) { }
744 static inline int blkcg_activate_policy(struct request_queue
*q
,
745 const struct blkcg_policy
*pol
) { return 0; }
746 static inline void blkcg_deactivate_policy(struct request_queue
*q
,
747 const struct blkcg_policy
*pol
) { }
749 static inline struct blkcg
*bio_blkcg(struct bio
*bio
) { return NULL
; }
751 static inline struct blkg_policy_data
*blkg_to_pd(struct blkcg_gq
*blkg
,
752 struct blkcg_policy
*pol
) { return NULL
; }
753 static inline struct blkcg_gq
*pd_to_blkg(struct blkg_policy_data
*pd
) { return NULL
; }
754 static inline char *blkg_path(struct blkcg_gq
*blkg
) { return NULL
; }
755 static inline void blkg_get(struct blkcg_gq
*blkg
) { }
756 static inline void blkg_put(struct blkcg_gq
*blkg
) { }
758 static inline struct request_list
*blk_get_rl(struct request_queue
*q
,
759 struct bio
*bio
) { return &q
->root_rl
; }
760 static inline void blk_put_rl(struct request_list
*rl
) { }
761 static inline void blk_rq_set_rl(struct request
*rq
, struct request_list
*rl
) { }
762 static inline struct request_list
*blk_rq_rl(struct request
*rq
) { return &rq
->q
->root_rl
; }
764 static inline bool blkcg_bio_issue_check(struct request_queue
*q
,
765 struct bio
*bio
) { return true; }
767 #define blk_queue_for_each_rl(rl, q) \
768 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
770 #endif /* CONFIG_BLOCK */
771 #endif /* CONFIG_BLK_CGROUP */
772 #endif /* _BLK_CGROUP_H */