2 * cgroups support for the BFQ I/O scheduler.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/blkdev.h>
17 #include <linux/cgroup.h>
18 #include <linux/elevator.h>
19 #include <linux/ktime.h>
20 #include <linux/rbtree.h>
21 #include <linux/ioprio.h>
22 #include <linux/sbitmap.h>
23 #include <linux/delay.h>
25 #include "bfq-iosched.h"
27 #if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
29 /* bfqg stats flags */
30 enum bfqg_stats_flags
{
31 BFQG_stats_waiting
= 0,
36 #define BFQG_FLAG_FNS(name) \
37 static void bfqg_stats_mark_##name(struct bfqg_stats *stats) \
39 stats->flags |= (1 << BFQG_stats_##name); \
41 static void bfqg_stats_clear_##name(struct bfqg_stats *stats) \
43 stats->flags &= ~(1 << BFQG_stats_##name); \
45 static int bfqg_stats_##name(struct bfqg_stats *stats) \
47 return (stats->flags & (1 << BFQG_stats_##name)) != 0; \
50 BFQG_FLAG_FNS(waiting)
55 /* This should be called with the scheduler lock held. */
56 static void bfqg_stats_update_group_wait_time(struct bfqg_stats
*stats
)
60 if (!bfqg_stats_waiting(stats
))
64 if (now
> stats
->start_group_wait_time
)
65 blkg_stat_add(&stats
->group_wait_time
,
66 now
- stats
->start_group_wait_time
);
67 bfqg_stats_clear_waiting(stats
);
70 /* This should be called with the scheduler lock held. */
71 static void bfqg_stats_set_start_group_wait_time(struct bfq_group
*bfqg
,
72 struct bfq_group
*curr_bfqg
)
74 struct bfqg_stats
*stats
= &bfqg
->stats
;
76 if (bfqg_stats_waiting(stats
))
78 if (bfqg
== curr_bfqg
)
80 stats
->start_group_wait_time
= ktime_get_ns();
81 bfqg_stats_mark_waiting(stats
);
84 /* This should be called with the scheduler lock held. */
85 static void bfqg_stats_end_empty_time(struct bfqg_stats
*stats
)
89 if (!bfqg_stats_empty(stats
))
93 if (now
> stats
->start_empty_time
)
94 blkg_stat_add(&stats
->empty_time
,
95 now
- stats
->start_empty_time
);
96 bfqg_stats_clear_empty(stats
);
99 void bfqg_stats_update_dequeue(struct bfq_group
*bfqg
)
101 blkg_stat_add(&bfqg
->stats
.dequeue
, 1);
104 void bfqg_stats_set_start_empty_time(struct bfq_group
*bfqg
)
106 struct bfqg_stats
*stats
= &bfqg
->stats
;
108 if (blkg_rwstat_total(&stats
->queued
))
112 * group is already marked empty. This can happen if bfqq got new
113 * request in parent group and moved to this group while being added
114 * to service tree. Just ignore the event and move on.
116 if (bfqg_stats_empty(stats
))
119 stats
->start_empty_time
= ktime_get_ns();
120 bfqg_stats_mark_empty(stats
);
123 void bfqg_stats_update_idle_time(struct bfq_group
*bfqg
)
125 struct bfqg_stats
*stats
= &bfqg
->stats
;
127 if (bfqg_stats_idling(stats
)) {
128 u64 now
= ktime_get_ns();
130 if (now
> stats
->start_idle_time
)
131 blkg_stat_add(&stats
->idle_time
,
132 now
- stats
->start_idle_time
);
133 bfqg_stats_clear_idling(stats
);
137 void bfqg_stats_set_start_idle_time(struct bfq_group
*bfqg
)
139 struct bfqg_stats
*stats
= &bfqg
->stats
;
141 stats
->start_idle_time
= ktime_get_ns();
142 bfqg_stats_mark_idling(stats
);
145 void bfqg_stats_update_avg_queue_size(struct bfq_group
*bfqg
)
147 struct bfqg_stats
*stats
= &bfqg
->stats
;
149 blkg_stat_add(&stats
->avg_queue_size_sum
,
150 blkg_rwstat_total(&stats
->queued
));
151 blkg_stat_add(&stats
->avg_queue_size_samples
, 1);
152 bfqg_stats_update_group_wait_time(stats
);
155 void bfqg_stats_update_io_add(struct bfq_group
*bfqg
, struct bfq_queue
*bfqq
,
158 blkg_rwstat_add(&bfqg
->stats
.queued
, op
, 1);
159 bfqg_stats_end_empty_time(&bfqg
->stats
);
160 if (!(bfqq
== ((struct bfq_data
*)bfqg
->bfqd
)->in_service_queue
))
161 bfqg_stats_set_start_group_wait_time(bfqg
, bfqq_group(bfqq
));
164 void bfqg_stats_update_io_remove(struct bfq_group
*bfqg
, unsigned int op
)
166 blkg_rwstat_add(&bfqg
->stats
.queued
, op
, -1);
169 void bfqg_stats_update_io_merged(struct bfq_group
*bfqg
, unsigned int op
)
171 blkg_rwstat_add(&bfqg
->stats
.merged
, op
, 1);
174 void bfqg_stats_update_completion(struct bfq_group
*bfqg
, u64 start_time_ns
,
175 u64 io_start_time_ns
, unsigned int op
)
177 struct bfqg_stats
*stats
= &bfqg
->stats
;
178 u64 now
= ktime_get_ns();
180 if (now
> io_start_time_ns
)
181 blkg_rwstat_add(&stats
->service_time
, op
,
182 now
- io_start_time_ns
);
183 if (io_start_time_ns
> start_time_ns
)
184 blkg_rwstat_add(&stats
->wait_time
, op
,
185 io_start_time_ns
- start_time_ns
);
188 #else /* CONFIG_BFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
190 void bfqg_stats_update_io_add(struct bfq_group
*bfqg
, struct bfq_queue
*bfqq
,
192 void bfqg_stats_update_io_remove(struct bfq_group
*bfqg
, unsigned int op
) { }
193 void bfqg_stats_update_io_merged(struct bfq_group
*bfqg
, unsigned int op
) { }
194 void bfqg_stats_update_completion(struct bfq_group
*bfqg
, u64 start_time_ns
,
195 u64 io_start_time_ns
, unsigned int op
) { }
196 void bfqg_stats_update_dequeue(struct bfq_group
*bfqg
) { }
197 void bfqg_stats_set_start_empty_time(struct bfq_group
*bfqg
) { }
198 void bfqg_stats_update_idle_time(struct bfq_group
*bfqg
) { }
199 void bfqg_stats_set_start_idle_time(struct bfq_group
*bfqg
) { }
200 void bfqg_stats_update_avg_queue_size(struct bfq_group
*bfqg
) { }
202 #endif /* CONFIG_BFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
204 #ifdef CONFIG_BFQ_GROUP_IOSCHED
207 * blk-cgroup policy-related handlers
208 * The following functions help in converting between blk-cgroup
209 * internal structures and BFQ-specific structures.
212 static struct bfq_group
*pd_to_bfqg(struct blkg_policy_data
*pd
)
214 return pd
? container_of(pd
, struct bfq_group
, pd
) : NULL
;
217 struct blkcg_gq
*bfqg_to_blkg(struct bfq_group
*bfqg
)
219 return pd_to_blkg(&bfqg
->pd
);
222 static struct bfq_group
*blkg_to_bfqg(struct blkcg_gq
*blkg
)
224 return pd_to_bfqg(blkg_to_pd(blkg
, &blkcg_policy_bfq
));
229 * The following functions help in navigating the bfq_group hierarchy
230 * by allowing to find the parent of a bfq_group or the bfq_group
231 * associated to a bfq_queue.
234 static struct bfq_group
*bfqg_parent(struct bfq_group
*bfqg
)
236 struct blkcg_gq
*pblkg
= bfqg_to_blkg(bfqg
)->parent
;
238 return pblkg
? blkg_to_bfqg(pblkg
) : NULL
;
241 struct bfq_group
*bfqq_group(struct bfq_queue
*bfqq
)
243 struct bfq_entity
*group_entity
= bfqq
->entity
.parent
;
245 return group_entity
? container_of(group_entity
, struct bfq_group
,
247 bfqq
->bfqd
->root_group
;
251 * The following two functions handle get and put of a bfq_group by
252 * wrapping the related blk-cgroup hooks.
255 static void bfqg_get(struct bfq_group
*bfqg
)
260 static void bfqg_put(struct bfq_group
*bfqg
)
268 static void bfqg_and_blkg_get(struct bfq_group
*bfqg
)
270 /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
273 blkg_get(bfqg_to_blkg(bfqg
));
276 void bfqg_and_blkg_put(struct bfq_group
*bfqg
)
280 blkg_put(bfqg_to_blkg(bfqg
));
284 static void bfqg_stats_reset(struct bfqg_stats
*stats
)
286 #ifdef CONFIG_DEBUG_BLK_CGROUP
287 /* queued stats shouldn't be cleared */
288 blkg_rwstat_reset(&stats
->merged
);
289 blkg_rwstat_reset(&stats
->service_time
);
290 blkg_rwstat_reset(&stats
->wait_time
);
291 blkg_stat_reset(&stats
->time
);
292 blkg_stat_reset(&stats
->avg_queue_size_sum
);
293 blkg_stat_reset(&stats
->avg_queue_size_samples
);
294 blkg_stat_reset(&stats
->dequeue
);
295 blkg_stat_reset(&stats
->group_wait_time
);
296 blkg_stat_reset(&stats
->idle_time
);
297 blkg_stat_reset(&stats
->empty_time
);
302 static void bfqg_stats_add_aux(struct bfqg_stats
*to
, struct bfqg_stats
*from
)
307 #ifdef CONFIG_DEBUG_BLK_CGROUP
308 /* queued stats shouldn't be cleared */
309 blkg_rwstat_add_aux(&to
->merged
, &from
->merged
);
310 blkg_rwstat_add_aux(&to
->service_time
, &from
->service_time
);
311 blkg_rwstat_add_aux(&to
->wait_time
, &from
->wait_time
);
312 blkg_stat_add_aux(&from
->time
, &from
->time
);
313 blkg_stat_add_aux(&to
->avg_queue_size_sum
, &from
->avg_queue_size_sum
);
314 blkg_stat_add_aux(&to
->avg_queue_size_samples
,
315 &from
->avg_queue_size_samples
);
316 blkg_stat_add_aux(&to
->dequeue
, &from
->dequeue
);
317 blkg_stat_add_aux(&to
->group_wait_time
, &from
->group_wait_time
);
318 blkg_stat_add_aux(&to
->idle_time
, &from
->idle_time
);
319 blkg_stat_add_aux(&to
->empty_time
, &from
->empty_time
);
324 * Transfer @bfqg's stats to its parent's aux counts so that the ancestors'
325 * recursive stats can still account for the amount used by this bfqg after
328 static void bfqg_stats_xfer_dead(struct bfq_group
*bfqg
)
330 struct bfq_group
*parent
;
332 if (!bfqg
) /* root_group */
335 parent
= bfqg_parent(bfqg
);
337 lockdep_assert_held(bfqg_to_blkg(bfqg
)->q
->queue_lock
);
339 if (unlikely(!parent
))
342 bfqg_stats_add_aux(&parent
->stats
, &bfqg
->stats
);
343 bfqg_stats_reset(&bfqg
->stats
);
346 void bfq_init_entity(struct bfq_entity
*entity
, struct bfq_group
*bfqg
)
348 struct bfq_queue
*bfqq
= bfq_entity_to_bfqq(entity
);
350 entity
->weight
= entity
->new_weight
;
351 entity
->orig_weight
= entity
->new_weight
;
353 bfqq
->ioprio
= bfqq
->new_ioprio
;
354 bfqq
->ioprio_class
= bfqq
->new_ioprio_class
;
356 * Make sure that bfqg and its associated blkg do not
357 * disappear before entity.
359 bfqg_and_blkg_get(bfqg
);
361 entity
->parent
= bfqg
->my_entity
; /* NULL for root group */
362 entity
->sched_data
= &bfqg
->sched_data
;
365 static void bfqg_stats_exit(struct bfqg_stats
*stats
)
367 #ifdef CONFIG_DEBUG_BLK_CGROUP
368 blkg_rwstat_exit(&stats
->merged
);
369 blkg_rwstat_exit(&stats
->service_time
);
370 blkg_rwstat_exit(&stats
->wait_time
);
371 blkg_rwstat_exit(&stats
->queued
);
372 blkg_stat_exit(&stats
->time
);
373 blkg_stat_exit(&stats
->avg_queue_size_sum
);
374 blkg_stat_exit(&stats
->avg_queue_size_samples
);
375 blkg_stat_exit(&stats
->dequeue
);
376 blkg_stat_exit(&stats
->group_wait_time
);
377 blkg_stat_exit(&stats
->idle_time
);
378 blkg_stat_exit(&stats
->empty_time
);
382 static int bfqg_stats_init(struct bfqg_stats
*stats
, gfp_t gfp
)
384 #ifdef CONFIG_DEBUG_BLK_CGROUP
385 if (blkg_rwstat_init(&stats
->merged
, gfp
) ||
386 blkg_rwstat_init(&stats
->service_time
, gfp
) ||
387 blkg_rwstat_init(&stats
->wait_time
, gfp
) ||
388 blkg_rwstat_init(&stats
->queued
, gfp
) ||
389 blkg_stat_init(&stats
->time
, gfp
) ||
390 blkg_stat_init(&stats
->avg_queue_size_sum
, gfp
) ||
391 blkg_stat_init(&stats
->avg_queue_size_samples
, gfp
) ||
392 blkg_stat_init(&stats
->dequeue
, gfp
) ||
393 blkg_stat_init(&stats
->group_wait_time
, gfp
) ||
394 blkg_stat_init(&stats
->idle_time
, gfp
) ||
395 blkg_stat_init(&stats
->empty_time
, gfp
)) {
396 bfqg_stats_exit(stats
);
404 static struct bfq_group_data
*cpd_to_bfqgd(struct blkcg_policy_data
*cpd
)
406 return cpd
? container_of(cpd
, struct bfq_group_data
, pd
) : NULL
;
409 static struct bfq_group_data
*blkcg_to_bfqgd(struct blkcg
*blkcg
)
411 return cpd_to_bfqgd(blkcg_to_cpd(blkcg
, &blkcg_policy_bfq
));
414 static struct blkcg_policy_data
*bfq_cpd_alloc(gfp_t gfp
)
416 struct bfq_group_data
*bgd
;
418 bgd
= kzalloc(sizeof(*bgd
), gfp
);
424 static void bfq_cpd_init(struct blkcg_policy_data
*cpd
)
426 struct bfq_group_data
*d
= cpd_to_bfqgd(cpd
);
428 d
->weight
= cgroup_subsys_on_dfl(io_cgrp_subsys
) ?
429 CGROUP_WEIGHT_DFL
: BFQ_WEIGHT_LEGACY_DFL
;
432 static void bfq_cpd_free(struct blkcg_policy_data
*cpd
)
434 kfree(cpd_to_bfqgd(cpd
));
437 static struct blkg_policy_data
*bfq_pd_alloc(gfp_t gfp
, int node
)
439 struct bfq_group
*bfqg
;
441 bfqg
= kzalloc_node(sizeof(*bfqg
), gfp
, node
);
445 if (bfqg_stats_init(&bfqg
->stats
, gfp
)) {
450 /* see comments in bfq_bic_update_cgroup for why refcounting */
455 static void bfq_pd_init(struct blkg_policy_data
*pd
)
457 struct blkcg_gq
*blkg
= pd_to_blkg(pd
);
458 struct bfq_group
*bfqg
= blkg_to_bfqg(blkg
);
459 struct bfq_data
*bfqd
= blkg
->q
->elevator
->elevator_data
;
460 struct bfq_entity
*entity
= &bfqg
->entity
;
461 struct bfq_group_data
*d
= blkcg_to_bfqgd(blkg
->blkcg
);
463 entity
->orig_weight
= entity
->weight
= entity
->new_weight
= d
->weight
;
464 entity
->my_sched_data
= &bfqg
->sched_data
;
465 bfqg
->my_entity
= entity
; /*
466 * the root_group's will be set to NULL
467 * in bfq_init_queue()
470 bfqg
->active_entities
= 0;
471 bfqg
->rq_pos_tree
= RB_ROOT
;
474 static void bfq_pd_free(struct blkg_policy_data
*pd
)
476 struct bfq_group
*bfqg
= pd_to_bfqg(pd
);
478 bfqg_stats_exit(&bfqg
->stats
);
482 static void bfq_pd_reset_stats(struct blkg_policy_data
*pd
)
484 struct bfq_group
*bfqg
= pd_to_bfqg(pd
);
486 bfqg_stats_reset(&bfqg
->stats
);
489 static void bfq_group_set_parent(struct bfq_group
*bfqg
,
490 struct bfq_group
*parent
)
492 struct bfq_entity
*entity
;
494 entity
= &bfqg
->entity
;
495 entity
->parent
= parent
->my_entity
;
496 entity
->sched_data
= &parent
->sched_data
;
499 static struct bfq_group
*bfq_lookup_bfqg(struct bfq_data
*bfqd
,
502 struct blkcg_gq
*blkg
;
504 blkg
= blkg_lookup(blkcg
, bfqd
->queue
);
506 return blkg_to_bfqg(blkg
);
510 struct bfq_group
*bfq_find_set_group(struct bfq_data
*bfqd
,
513 struct bfq_group
*bfqg
, *parent
;
514 struct bfq_entity
*entity
;
516 bfqg
= bfq_lookup_bfqg(bfqd
, blkcg
);
522 * Update chain of bfq_groups as we might be handling a leaf group
523 * which, along with some of its relatives, has not been hooked yet
524 * to the private hierarchy of BFQ.
526 entity
= &bfqg
->entity
;
527 for_each_entity(entity
) {
528 bfqg
= container_of(entity
, struct bfq_group
, entity
);
529 if (bfqg
!= bfqd
->root_group
) {
530 parent
= bfqg_parent(bfqg
);
532 parent
= bfqd
->root_group
;
533 bfq_group_set_parent(bfqg
, parent
);
541 * bfq_bfqq_move - migrate @bfqq to @bfqg.
542 * @bfqd: queue descriptor.
543 * @bfqq: the queue to move.
544 * @bfqg: the group to move to.
546 * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
547 * it on the new one. Avoid putting the entity on the old group idle tree.
549 * Must be called under the scheduler lock, to make sure that the blkg
550 * owning @bfqg does not disappear (see comments in
551 * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
554 void bfq_bfqq_move(struct bfq_data
*bfqd
, struct bfq_queue
*bfqq
,
555 struct bfq_group
*bfqg
)
557 struct bfq_entity
*entity
= &bfqq
->entity
;
559 /* If bfqq is empty, then bfq_bfqq_expire also invokes
560 * bfq_del_bfqq_busy, thereby removing bfqq and its entity
561 * from data structures related to current group. Otherwise we
562 * need to remove bfqq explicitly with bfq_deactivate_bfqq, as
565 if (bfqq
== bfqd
->in_service_queue
)
566 bfq_bfqq_expire(bfqd
, bfqd
->in_service_queue
,
567 false, BFQQE_PREEMPTED
);
569 if (bfq_bfqq_busy(bfqq
))
570 bfq_deactivate_bfqq(bfqd
, bfqq
, false, false);
571 else if (entity
->on_st
)
572 bfq_put_idle_entity(bfq_entity_service_tree(entity
), entity
);
573 bfqg_and_blkg_put(bfqq_group(bfqq
));
575 entity
->parent
= bfqg
->my_entity
;
576 entity
->sched_data
= &bfqg
->sched_data
;
577 /* pin down bfqg and its associated blkg */
578 bfqg_and_blkg_get(bfqg
);
580 if (bfq_bfqq_busy(bfqq
)) {
581 bfq_pos_tree_add_move(bfqd
, bfqq
);
582 bfq_activate_bfqq(bfqd
, bfqq
);
585 if (!bfqd
->in_service_queue
&& !bfqd
->rq_in_driver
)
586 bfq_schedule_dispatch(bfqd
);
590 * __bfq_bic_change_cgroup - move @bic to @cgroup.
591 * @bfqd: the queue descriptor.
592 * @bic: the bic to move.
593 * @blkcg: the blk-cgroup to move to.
595 * Move bic to blkcg, assuming that bfqd->lock is held; which makes
596 * sure that the reference to cgroup is valid across the call (see
597 * comments in bfq_bic_update_cgroup on this issue)
599 * NOTE: an alternative approach might have been to store the current
600 * cgroup in bfqq and getting a reference to it, reducing the lookup
601 * time here, at the price of slightly more complex code.
603 static struct bfq_group
*__bfq_bic_change_cgroup(struct bfq_data
*bfqd
,
604 struct bfq_io_cq
*bic
,
607 struct bfq_queue
*async_bfqq
= bic_to_bfqq(bic
, 0);
608 struct bfq_queue
*sync_bfqq
= bic_to_bfqq(bic
, 1);
609 struct bfq_group
*bfqg
;
610 struct bfq_entity
*entity
;
612 bfqg
= bfq_find_set_group(bfqd
, blkcg
);
615 bfqg
= bfqd
->root_group
;
618 entity
= &async_bfqq
->entity
;
620 if (entity
->sched_data
!= &bfqg
->sched_data
) {
621 bic_set_bfqq(bic
, NULL
, 0);
622 bfq_log_bfqq(bfqd
, async_bfqq
,
623 "bic_change_group: %p %d",
624 async_bfqq
, async_bfqq
->ref
);
625 bfq_put_queue(async_bfqq
);
630 entity
= &sync_bfqq
->entity
;
631 if (entity
->sched_data
!= &bfqg
->sched_data
)
632 bfq_bfqq_move(bfqd
, sync_bfqq
, bfqg
);
638 void bfq_bic_update_cgroup(struct bfq_io_cq
*bic
, struct bio
*bio
)
640 struct bfq_data
*bfqd
= bic_to_bfqd(bic
);
641 struct bfq_group
*bfqg
= NULL
;
645 serial_nr
= bio_blkcg(bio
)->css
.serial_nr
;
648 * Check whether blkcg has changed. The condition may trigger
649 * spuriously on a newly created cic but there's no harm.
651 if (unlikely(!bfqd
) || likely(bic
->blkcg_serial_nr
== serial_nr
))
654 bfqg
= __bfq_bic_change_cgroup(bfqd
, bic
, bio_blkcg(bio
));
656 * Update blkg_path for bfq_log_* functions. We cache this
657 * path, and update it here, for the following
658 * reasons. Operations on blkg objects in blk-cgroup are
659 * protected with the request_queue lock, and not with the
660 * lock that protects the instances of this scheduler
661 * (bfqd->lock). This exposes BFQ to the following sort of
664 * The blkg_lookup performed in bfq_get_queue, protected
665 * through rcu, may happen to return the address of a copy of
666 * the original blkg. If this is the case, then the
667 * bfqg_and_blkg_get performed in bfq_get_queue, to pin down
668 * the blkg, is useless: it does not prevent blk-cgroup code
669 * from destroying both the original blkg and all objects
670 * directly or indirectly referred by the copy of the
673 * On the bright side, destroy operations on a blkg invoke, as
674 * a first step, hooks of the scheduler associated with the
675 * blkg. And these hooks are executed with bfqd->lock held for
676 * BFQ. As a consequence, for any blkg associated with the
677 * request queue this instance of the scheduler is attached
678 * to, we are guaranteed that such a blkg is not destroyed, and
679 * that all the pointers it contains are consistent, while we
680 * are holding bfqd->lock. A blkg_lookup performed with
681 * bfqd->lock held then returns a fully consistent blkg, which
682 * remains consistent until this lock is held.
684 * Thanks to the last fact, and to the fact that: (1) bfqg has
685 * been obtained through a blkg_lookup in the above
686 * assignment, and (2) bfqd->lock is being held, here we can
687 * safely use the policy data for the involved blkg (i.e., the
688 * field bfqg->pd) to get to the blkg associated with bfqg,
689 * and then we can safely use any field of blkg. After we
690 * release bfqd->lock, even just getting blkg through this
691 * bfqg may cause dangling references to be traversed, as
692 * bfqg->pd may not exist any more.
694 * In view of the above facts, here we cache, in the bfqg, any
695 * blkg data we may need for this bic, and for its associated
696 * bfq_queue. As of now, we need to cache only the path of the
697 * blkg, which is used in the bfq_log_* functions.
699 * Finally, note that bfqg itself needs to be protected from
700 * destruction on the blkg_free of the original blkg (which
701 * invokes bfq_pd_free). We use an additional private
702 * refcounter for bfqg, to let it disappear only after no
703 * bfq_queue refers to it any longer.
705 blkg_path(bfqg_to_blkg(bfqg
), bfqg
->blkg_path
, sizeof(bfqg
->blkg_path
));
706 bic
->blkcg_serial_nr
= serial_nr
;
712 * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
713 * @st: the service tree being flushed.
715 static void bfq_flush_idle_tree(struct bfq_service_tree
*st
)
717 struct bfq_entity
*entity
= st
->first_idle
;
719 for (; entity
; entity
= st
->first_idle
)
720 __bfq_deactivate_entity(entity
, false);
724 * bfq_reparent_leaf_entity - move leaf entity to the root_group.
725 * @bfqd: the device data structure with the root group.
726 * @entity: the entity to move.
728 static void bfq_reparent_leaf_entity(struct bfq_data
*bfqd
,
729 struct bfq_entity
*entity
)
731 struct bfq_queue
*bfqq
= bfq_entity_to_bfqq(entity
);
733 bfq_bfqq_move(bfqd
, bfqq
, bfqd
->root_group
);
737 * bfq_reparent_active_entities - move to the root group all active
739 * @bfqd: the device data structure with the root group.
740 * @bfqg: the group to move from.
741 * @st: the service tree with the entities.
743 static void bfq_reparent_active_entities(struct bfq_data
*bfqd
,
744 struct bfq_group
*bfqg
,
745 struct bfq_service_tree
*st
)
747 struct rb_root
*active
= &st
->active
;
748 struct bfq_entity
*entity
= NULL
;
750 if (!RB_EMPTY_ROOT(&st
->active
))
751 entity
= bfq_entity_of(rb_first(active
));
753 for (; entity
; entity
= bfq_entity_of(rb_first(active
)))
754 bfq_reparent_leaf_entity(bfqd
, entity
);
756 if (bfqg
->sched_data
.in_service_entity
)
757 bfq_reparent_leaf_entity(bfqd
,
758 bfqg
->sched_data
.in_service_entity
);
762 * bfq_pd_offline - deactivate the entity associated with @pd,
763 * and reparent its children entities.
764 * @pd: descriptor of the policy going offline.
766 * blkio already grabs the queue_lock for us, so no need to use
769 static void bfq_pd_offline(struct blkg_policy_data
*pd
)
771 struct bfq_service_tree
*st
;
772 struct bfq_group
*bfqg
= pd_to_bfqg(pd
);
773 struct bfq_data
*bfqd
= bfqg
->bfqd
;
774 struct bfq_entity
*entity
= bfqg
->my_entity
;
778 spin_lock_irqsave(&bfqd
->lock
, flags
);
780 if (!entity
) /* root group */
781 goto put_async_queues
;
784 * Empty all service_trees belonging to this group before
785 * deactivating the group itself.
787 for (i
= 0; i
< BFQ_IOPRIO_CLASSES
; i
++) {
788 st
= bfqg
->sched_data
.service_tree
+ i
;
791 * The idle tree may still contain bfq_queues belonging
792 * to exited task because they never migrated to a different
793 * cgroup from the one being destroyed now.
795 bfq_flush_idle_tree(st
);
798 * It may happen that some queues are still active
799 * (busy) upon group destruction (if the corresponding
800 * processes have been forced to terminate). We move
801 * all the leaf entities corresponding to these queues
803 * Also, it may happen that the group has an entity
804 * in service, which is disconnected from the active
805 * tree: it must be moved, too.
806 * There is no need to put the sync queues, as the
807 * scheduler has taken no reference.
809 bfq_reparent_active_entities(bfqd
, bfqg
, st
);
812 __bfq_deactivate_entity(entity
, false);
815 bfq_put_async_queues(bfqd
, bfqg
);
817 spin_unlock_irqrestore(&bfqd
->lock
, flags
);
819 * @blkg is going offline and will be ignored by
820 * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
821 * that they don't get lost. If IOs complete after this point, the
822 * stats for them will be lost. Oh well...
824 bfqg_stats_xfer_dead(bfqg
);
827 void bfq_end_wr_async(struct bfq_data
*bfqd
)
829 struct blkcg_gq
*blkg
;
831 list_for_each_entry(blkg
, &bfqd
->queue
->blkg_list
, q_node
) {
832 struct bfq_group
*bfqg
= blkg_to_bfqg(blkg
);
834 bfq_end_wr_async_queues(bfqd
, bfqg
);
836 bfq_end_wr_async_queues(bfqd
, bfqd
->root_group
);
839 static int bfq_io_show_weight(struct seq_file
*sf
, void *v
)
841 struct blkcg
*blkcg
= css_to_blkcg(seq_css(sf
));
842 struct bfq_group_data
*bfqgd
= blkcg_to_bfqgd(blkcg
);
843 unsigned int val
= 0;
848 seq_printf(sf
, "%u\n", val
);
853 static int bfq_io_set_weight_legacy(struct cgroup_subsys_state
*css
,
854 struct cftype
*cftype
,
857 struct blkcg
*blkcg
= css_to_blkcg(css
);
858 struct bfq_group_data
*bfqgd
= blkcg_to_bfqgd(blkcg
);
859 struct blkcg_gq
*blkg
;
862 if (val
< BFQ_MIN_WEIGHT
|| val
> BFQ_MAX_WEIGHT
)
866 spin_lock_irq(&blkcg
->lock
);
867 bfqgd
->weight
= (unsigned short)val
;
868 hlist_for_each_entry(blkg
, &blkcg
->blkg_list
, blkcg_node
) {
869 struct bfq_group
*bfqg
= blkg_to_bfqg(blkg
);
874 * Setting the prio_changed flag of the entity
875 * to 1 with new_weight == weight would re-set
876 * the value of the weight to its ioprio mapping.
877 * Set the flag only if necessary.
879 if ((unsigned short)val
!= bfqg
->entity
.new_weight
) {
880 bfqg
->entity
.new_weight
= (unsigned short)val
;
882 * Make sure that the above new value has been
883 * stored in bfqg->entity.new_weight before
884 * setting the prio_changed flag. In fact,
885 * this flag may be read asynchronously (in
886 * critical sections protected by a different
887 * lock than that held here), and finding this
888 * flag set may cause the execution of the code
889 * for updating parameters whose value may
890 * depend also on bfqg->entity.new_weight (in
891 * __bfq_entity_update_weight_prio).
892 * This barrier makes sure that the new value
893 * of bfqg->entity.new_weight is correctly
897 bfqg
->entity
.prio_changed
= 1;
900 spin_unlock_irq(&blkcg
->lock
);
905 static ssize_t
bfq_io_set_weight(struct kernfs_open_file
*of
,
906 char *buf
, size_t nbytes
,
910 /* First unsigned long found in the file is used */
911 int ret
= kstrtoull(strim(buf
), 0, &weight
);
916 ret
= bfq_io_set_weight_legacy(of_css(of
), NULL
, weight
);
917 return ret
?: nbytes
;
920 #ifdef CONFIG_DEBUG_BLK_CGROUP
921 static int bfqg_print_stat(struct seq_file
*sf
, void *v
)
923 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)), blkg_prfill_stat
,
924 &blkcg_policy_bfq
, seq_cft(sf
)->private, false);
928 static int bfqg_print_rwstat(struct seq_file
*sf
, void *v
)
930 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)), blkg_prfill_rwstat
,
931 &blkcg_policy_bfq
, seq_cft(sf
)->private, true);
935 static u64
bfqg_prfill_stat_recursive(struct seq_file
*sf
,
936 struct blkg_policy_data
*pd
, int off
)
938 u64 sum
= blkg_stat_recursive_sum(pd_to_blkg(pd
),
939 &blkcg_policy_bfq
, off
);
940 return __blkg_prfill_u64(sf
, pd
, sum
);
943 static u64
bfqg_prfill_rwstat_recursive(struct seq_file
*sf
,
944 struct blkg_policy_data
*pd
, int off
)
946 struct blkg_rwstat sum
= blkg_rwstat_recursive_sum(pd_to_blkg(pd
),
949 return __blkg_prfill_rwstat(sf
, pd
, &sum
);
952 static int bfqg_print_stat_recursive(struct seq_file
*sf
, void *v
)
954 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)),
955 bfqg_prfill_stat_recursive
, &blkcg_policy_bfq
,
956 seq_cft(sf
)->private, false);
960 static int bfqg_print_rwstat_recursive(struct seq_file
*sf
, void *v
)
962 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)),
963 bfqg_prfill_rwstat_recursive
, &blkcg_policy_bfq
,
964 seq_cft(sf
)->private, true);
968 static u64
bfqg_prfill_sectors(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
971 u64 sum
= blkg_rwstat_total(&pd
->blkg
->stat_bytes
);
973 return __blkg_prfill_u64(sf
, pd
, sum
>> 9);
976 static int bfqg_print_stat_sectors(struct seq_file
*sf
, void *v
)
978 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)),
979 bfqg_prfill_sectors
, &blkcg_policy_bfq
, 0, false);
983 static u64
bfqg_prfill_sectors_recursive(struct seq_file
*sf
,
984 struct blkg_policy_data
*pd
, int off
)
986 struct blkg_rwstat tmp
= blkg_rwstat_recursive_sum(pd
->blkg
, NULL
,
987 offsetof(struct blkcg_gq
, stat_bytes
));
988 u64 sum
= atomic64_read(&tmp
.aux_cnt
[BLKG_RWSTAT_READ
]) +
989 atomic64_read(&tmp
.aux_cnt
[BLKG_RWSTAT_WRITE
]);
991 return __blkg_prfill_u64(sf
, pd
, sum
>> 9);
994 static int bfqg_print_stat_sectors_recursive(struct seq_file
*sf
, void *v
)
996 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)),
997 bfqg_prfill_sectors_recursive
, &blkcg_policy_bfq
, 0,
1002 static u64
bfqg_prfill_avg_queue_size(struct seq_file
*sf
,
1003 struct blkg_policy_data
*pd
, int off
)
1005 struct bfq_group
*bfqg
= pd_to_bfqg(pd
);
1006 u64 samples
= blkg_stat_read(&bfqg
->stats
.avg_queue_size_samples
);
1010 v
= blkg_stat_read(&bfqg
->stats
.avg_queue_size_sum
);
1011 v
= div64_u64(v
, samples
);
1013 __blkg_prfill_u64(sf
, pd
, v
);
1017 /* print avg_queue_size */
1018 static int bfqg_print_avg_queue_size(struct seq_file
*sf
, void *v
)
1020 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)),
1021 bfqg_prfill_avg_queue_size
, &blkcg_policy_bfq
,
1025 #endif /* CONFIG_DEBUG_BLK_CGROUP */
1027 struct bfq_group
*bfq_create_group_hierarchy(struct bfq_data
*bfqd
, int node
)
1031 ret
= blkcg_activate_policy(bfqd
->queue
, &blkcg_policy_bfq
);
1035 return blkg_to_bfqg(bfqd
->queue
->root_blkg
);
1038 struct blkcg_policy blkcg_policy_bfq
= {
1039 .dfl_cftypes
= bfq_blkg_files
,
1040 .legacy_cftypes
= bfq_blkcg_legacy_files
,
1042 .cpd_alloc_fn
= bfq_cpd_alloc
,
1043 .cpd_init_fn
= bfq_cpd_init
,
1044 .cpd_bind_fn
= bfq_cpd_init
,
1045 .cpd_free_fn
= bfq_cpd_free
,
1047 .pd_alloc_fn
= bfq_pd_alloc
,
1048 .pd_init_fn
= bfq_pd_init
,
1049 .pd_offline_fn
= bfq_pd_offline
,
1050 .pd_free_fn
= bfq_pd_free
,
1051 .pd_reset_stats_fn
= bfq_pd_reset_stats
,
1054 struct cftype bfq_blkcg_legacy_files
[] = {
1056 .name
= "bfq.weight",
1057 .flags
= CFTYPE_NOT_ON_ROOT
,
1058 .seq_show
= bfq_io_show_weight
,
1059 .write_u64
= bfq_io_set_weight_legacy
,
1062 /* statistics, covers only the tasks in the bfqg */
1064 .name
= "bfq.io_service_bytes",
1065 .private = (unsigned long)&blkcg_policy_bfq
,
1066 .seq_show
= blkg_print_stat_bytes
,
1069 .name
= "bfq.io_serviced",
1070 .private = (unsigned long)&blkcg_policy_bfq
,
1071 .seq_show
= blkg_print_stat_ios
,
1073 #ifdef CONFIG_DEBUG_BLK_CGROUP
1076 .private = offsetof(struct bfq_group
, stats
.time
),
1077 .seq_show
= bfqg_print_stat
,
1080 .name
= "bfq.sectors",
1081 .seq_show
= bfqg_print_stat_sectors
,
1084 .name
= "bfq.io_service_time",
1085 .private = offsetof(struct bfq_group
, stats
.service_time
),
1086 .seq_show
= bfqg_print_rwstat
,
1089 .name
= "bfq.io_wait_time",
1090 .private = offsetof(struct bfq_group
, stats
.wait_time
),
1091 .seq_show
= bfqg_print_rwstat
,
1094 .name
= "bfq.io_merged",
1095 .private = offsetof(struct bfq_group
, stats
.merged
),
1096 .seq_show
= bfqg_print_rwstat
,
1099 .name
= "bfq.io_queued",
1100 .private = offsetof(struct bfq_group
, stats
.queued
),
1101 .seq_show
= bfqg_print_rwstat
,
1103 #endif /* CONFIG_DEBUG_BLK_CGROUP */
1105 /* the same statictics which cover the bfqg and its descendants */
1107 .name
= "bfq.io_service_bytes_recursive",
1108 .private = (unsigned long)&blkcg_policy_bfq
,
1109 .seq_show
= blkg_print_stat_bytes_recursive
,
1112 .name
= "bfq.io_serviced_recursive",
1113 .private = (unsigned long)&blkcg_policy_bfq
,
1114 .seq_show
= blkg_print_stat_ios_recursive
,
1116 #ifdef CONFIG_DEBUG_BLK_CGROUP
1118 .name
= "bfq.time_recursive",
1119 .private = offsetof(struct bfq_group
, stats
.time
),
1120 .seq_show
= bfqg_print_stat_recursive
,
1123 .name
= "bfq.sectors_recursive",
1124 .seq_show
= bfqg_print_stat_sectors_recursive
,
1127 .name
= "bfq.io_service_time_recursive",
1128 .private = offsetof(struct bfq_group
, stats
.service_time
),
1129 .seq_show
= bfqg_print_rwstat_recursive
,
1132 .name
= "bfq.io_wait_time_recursive",
1133 .private = offsetof(struct bfq_group
, stats
.wait_time
),
1134 .seq_show
= bfqg_print_rwstat_recursive
,
1137 .name
= "bfq.io_merged_recursive",
1138 .private = offsetof(struct bfq_group
, stats
.merged
),
1139 .seq_show
= bfqg_print_rwstat_recursive
,
1142 .name
= "bfq.io_queued_recursive",
1143 .private = offsetof(struct bfq_group
, stats
.queued
),
1144 .seq_show
= bfqg_print_rwstat_recursive
,
1147 .name
= "bfq.avg_queue_size",
1148 .seq_show
= bfqg_print_avg_queue_size
,
1151 .name
= "bfq.group_wait_time",
1152 .private = offsetof(struct bfq_group
, stats
.group_wait_time
),
1153 .seq_show
= bfqg_print_stat
,
1156 .name
= "bfq.idle_time",
1157 .private = offsetof(struct bfq_group
, stats
.idle_time
),
1158 .seq_show
= bfqg_print_stat
,
1161 .name
= "bfq.empty_time",
1162 .private = offsetof(struct bfq_group
, stats
.empty_time
),
1163 .seq_show
= bfqg_print_stat
,
1166 .name
= "bfq.dequeue",
1167 .private = offsetof(struct bfq_group
, stats
.dequeue
),
1168 .seq_show
= bfqg_print_stat
,
1170 #endif /* CONFIG_DEBUG_BLK_CGROUP */
1174 struct cftype bfq_blkg_files
[] = {
1176 .name
= "bfq.weight",
1177 .flags
= CFTYPE_NOT_ON_ROOT
,
1178 .seq_show
= bfq_io_show_weight
,
1179 .write
= bfq_io_set_weight
,
1184 #else /* CONFIG_BFQ_GROUP_IOSCHED */
1186 void bfq_bfqq_move(struct bfq_data
*bfqd
, struct bfq_queue
*bfqq
,
1187 struct bfq_group
*bfqg
) {}
1189 void bfq_init_entity(struct bfq_entity
*entity
, struct bfq_group
*bfqg
)
1191 struct bfq_queue
*bfqq
= bfq_entity_to_bfqq(entity
);
1193 entity
->weight
= entity
->new_weight
;
1194 entity
->orig_weight
= entity
->new_weight
;
1196 bfqq
->ioprio
= bfqq
->new_ioprio
;
1197 bfqq
->ioprio_class
= bfqq
->new_ioprio_class
;
1199 entity
->sched_data
= &bfqg
->sched_data
;
1202 void bfq_bic_update_cgroup(struct bfq_io_cq
*bic
, struct bio
*bio
) {}
1204 void bfq_end_wr_async(struct bfq_data
*bfqd
)
1206 bfq_end_wr_async_queues(bfqd
, bfqd
->root_group
);
1209 struct bfq_group
*bfq_find_set_group(struct bfq_data
*bfqd
, struct blkcg
*blkcg
)
1211 return bfqd
->root_group
;
1214 struct bfq_group
*bfqq_group(struct bfq_queue
*bfqq
)
1216 return bfqq
->bfqd
->root_group
;
1219 struct bfq_group
*bfq_create_group_hierarchy(struct bfq_data
*bfqd
, int node
)
1221 struct bfq_group
*bfqg
;
1224 bfqg
= kmalloc_node(sizeof(*bfqg
), GFP_KERNEL
| __GFP_ZERO
, node
);
1228 for (i
= 0; i
< BFQ_IOPRIO_CLASSES
; i
++)
1229 bfqg
->sched_data
.service_tree
[i
] = BFQ_SERVICE_TREE_INIT
;
1233 #endif /* CONFIG_BFQ_GROUP_IOSCHED */