copy_xstate_to_kernel(): don't leave parts of destination uninitialized
[linux/fpc-iii.git] / block / bfq-cgroup.c
blobecd3d0ec2f3b68c7fe6b9ceb453402da2454c4ce
1 /*
2 * cgroups support for the BFQ I/O scheduler.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/blkdev.h>
17 #include <linux/cgroup.h>
18 #include <linux/elevator.h>
19 #include <linux/ktime.h>
20 #include <linux/rbtree.h>
21 #include <linux/ioprio.h>
22 #include <linux/sbitmap.h>
23 #include <linux/delay.h>
25 #include "bfq-iosched.h"
27 #if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
29 /* bfqg stats flags */
30 enum bfqg_stats_flags {
31 BFQG_stats_waiting = 0,
32 BFQG_stats_idling,
33 BFQG_stats_empty,
36 #define BFQG_FLAG_FNS(name) \
37 static void bfqg_stats_mark_##name(struct bfqg_stats *stats) \
38 { \
39 stats->flags |= (1 << BFQG_stats_##name); \
40 } \
41 static void bfqg_stats_clear_##name(struct bfqg_stats *stats) \
42 { \
43 stats->flags &= ~(1 << BFQG_stats_##name); \
44 } \
45 static int bfqg_stats_##name(struct bfqg_stats *stats) \
46 { \
47 return (stats->flags & (1 << BFQG_stats_##name)) != 0; \
48 } \
50 BFQG_FLAG_FNS(waiting)
51 BFQG_FLAG_FNS(idling)
52 BFQG_FLAG_FNS(empty)
53 #undef BFQG_FLAG_FNS
55 /* This should be called with the scheduler lock held. */
56 static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
58 u64 now;
60 if (!bfqg_stats_waiting(stats))
61 return;
63 now = ktime_get_ns();
64 if (now > stats->start_group_wait_time)
65 blkg_stat_add(&stats->group_wait_time,
66 now - stats->start_group_wait_time);
67 bfqg_stats_clear_waiting(stats);
70 /* This should be called with the scheduler lock held. */
71 static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
72 struct bfq_group *curr_bfqg)
74 struct bfqg_stats *stats = &bfqg->stats;
76 if (bfqg_stats_waiting(stats))
77 return;
78 if (bfqg == curr_bfqg)
79 return;
80 stats->start_group_wait_time = ktime_get_ns();
81 bfqg_stats_mark_waiting(stats);
84 /* This should be called with the scheduler lock held. */
85 static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
87 u64 now;
89 if (!bfqg_stats_empty(stats))
90 return;
92 now = ktime_get_ns();
93 if (now > stats->start_empty_time)
94 blkg_stat_add(&stats->empty_time,
95 now - stats->start_empty_time);
96 bfqg_stats_clear_empty(stats);
99 void bfqg_stats_update_dequeue(struct bfq_group *bfqg)
101 blkg_stat_add(&bfqg->stats.dequeue, 1);
104 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
106 struct bfqg_stats *stats = &bfqg->stats;
108 if (blkg_rwstat_total(&stats->queued))
109 return;
112 * group is already marked empty. This can happen if bfqq got new
113 * request in parent group and moved to this group while being added
114 * to service tree. Just ignore the event and move on.
116 if (bfqg_stats_empty(stats))
117 return;
119 stats->start_empty_time = ktime_get_ns();
120 bfqg_stats_mark_empty(stats);
123 void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
125 struct bfqg_stats *stats = &bfqg->stats;
127 if (bfqg_stats_idling(stats)) {
128 u64 now = ktime_get_ns();
130 if (now > stats->start_idle_time)
131 blkg_stat_add(&stats->idle_time,
132 now - stats->start_idle_time);
133 bfqg_stats_clear_idling(stats);
137 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
139 struct bfqg_stats *stats = &bfqg->stats;
141 stats->start_idle_time = ktime_get_ns();
142 bfqg_stats_mark_idling(stats);
145 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
147 struct bfqg_stats *stats = &bfqg->stats;
149 blkg_stat_add(&stats->avg_queue_size_sum,
150 blkg_rwstat_total(&stats->queued));
151 blkg_stat_add(&stats->avg_queue_size_samples, 1);
152 bfqg_stats_update_group_wait_time(stats);
155 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
156 unsigned int op)
158 blkg_rwstat_add(&bfqg->stats.queued, op, 1);
159 bfqg_stats_end_empty_time(&bfqg->stats);
160 if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue))
161 bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
164 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op)
166 blkg_rwstat_add(&bfqg->stats.queued, op, -1);
169 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op)
171 blkg_rwstat_add(&bfqg->stats.merged, op, 1);
174 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
175 u64 io_start_time_ns, unsigned int op)
177 struct bfqg_stats *stats = &bfqg->stats;
178 u64 now = ktime_get_ns();
180 if (now > io_start_time_ns)
181 blkg_rwstat_add(&stats->service_time, op,
182 now - io_start_time_ns);
183 if (io_start_time_ns > start_time_ns)
184 blkg_rwstat_add(&stats->wait_time, op,
185 io_start_time_ns - start_time_ns);
188 #else /* CONFIG_BFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
190 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
191 unsigned int op) { }
192 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { }
193 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { }
194 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
195 u64 io_start_time_ns, unsigned int op) { }
196 void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
197 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
198 void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }
199 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
200 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { }
202 #endif /* CONFIG_BFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
204 #ifdef CONFIG_BFQ_GROUP_IOSCHED
207 * blk-cgroup policy-related handlers
208 * The following functions help in converting between blk-cgroup
209 * internal structures and BFQ-specific structures.
212 static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd)
214 return pd ? container_of(pd, struct bfq_group, pd) : NULL;
217 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg)
219 return pd_to_blkg(&bfqg->pd);
222 static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
224 return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq));
228 * bfq_group handlers
229 * The following functions help in navigating the bfq_group hierarchy
230 * by allowing to find the parent of a bfq_group or the bfq_group
231 * associated to a bfq_queue.
234 static struct bfq_group *bfqg_parent(struct bfq_group *bfqg)
236 struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent;
238 return pblkg ? blkg_to_bfqg(pblkg) : NULL;
241 struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
243 struct bfq_entity *group_entity = bfqq->entity.parent;
245 return group_entity ? container_of(group_entity, struct bfq_group,
246 entity) :
247 bfqq->bfqd->root_group;
251 * The following two functions handle get and put of a bfq_group by
252 * wrapping the related blk-cgroup hooks.
255 static void bfqg_get(struct bfq_group *bfqg)
257 bfqg->ref++;
260 static void bfqg_put(struct bfq_group *bfqg)
262 bfqg->ref--;
264 if (bfqg->ref == 0)
265 kfree(bfqg);
268 static void bfqg_and_blkg_get(struct bfq_group *bfqg)
270 /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
271 bfqg_get(bfqg);
273 blkg_get(bfqg_to_blkg(bfqg));
276 void bfqg_and_blkg_put(struct bfq_group *bfqg)
278 blkg_put(bfqg_to_blkg(bfqg));
280 bfqg_put(bfqg);
283 /* @stats = 0 */
284 static void bfqg_stats_reset(struct bfqg_stats *stats)
286 #ifdef CONFIG_DEBUG_BLK_CGROUP
287 /* queued stats shouldn't be cleared */
288 blkg_rwstat_reset(&stats->merged);
289 blkg_rwstat_reset(&stats->service_time);
290 blkg_rwstat_reset(&stats->wait_time);
291 blkg_stat_reset(&stats->time);
292 blkg_stat_reset(&stats->avg_queue_size_sum);
293 blkg_stat_reset(&stats->avg_queue_size_samples);
294 blkg_stat_reset(&stats->dequeue);
295 blkg_stat_reset(&stats->group_wait_time);
296 blkg_stat_reset(&stats->idle_time);
297 blkg_stat_reset(&stats->empty_time);
298 #endif
301 /* @to += @from */
302 static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
304 if (!to || !from)
305 return;
307 #ifdef CONFIG_DEBUG_BLK_CGROUP
308 /* queued stats shouldn't be cleared */
309 blkg_rwstat_add_aux(&to->merged, &from->merged);
310 blkg_rwstat_add_aux(&to->service_time, &from->service_time);
311 blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
312 blkg_stat_add_aux(&from->time, &from->time);
313 blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
314 blkg_stat_add_aux(&to->avg_queue_size_samples,
315 &from->avg_queue_size_samples);
316 blkg_stat_add_aux(&to->dequeue, &from->dequeue);
317 blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
318 blkg_stat_add_aux(&to->idle_time, &from->idle_time);
319 blkg_stat_add_aux(&to->empty_time, &from->empty_time);
320 #endif
324 * Transfer @bfqg's stats to its parent's aux counts so that the ancestors'
325 * recursive stats can still account for the amount used by this bfqg after
326 * it's gone.
328 static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
330 struct bfq_group *parent;
332 if (!bfqg) /* root_group */
333 return;
335 parent = bfqg_parent(bfqg);
337 lockdep_assert_held(bfqg_to_blkg(bfqg)->q->queue_lock);
339 if (unlikely(!parent))
340 return;
342 bfqg_stats_add_aux(&parent->stats, &bfqg->stats);
343 bfqg_stats_reset(&bfqg->stats);
346 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
348 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
350 entity->weight = entity->new_weight;
351 entity->orig_weight = entity->new_weight;
352 if (bfqq) {
353 bfqq->ioprio = bfqq->new_ioprio;
354 bfqq->ioprio_class = bfqq->new_ioprio_class;
356 * Make sure that bfqg and its associated blkg do not
357 * disappear before entity.
359 bfqg_and_blkg_get(bfqg);
361 entity->parent = bfqg->my_entity; /* NULL for root group */
362 entity->sched_data = &bfqg->sched_data;
365 static void bfqg_stats_exit(struct bfqg_stats *stats)
367 #ifdef CONFIG_DEBUG_BLK_CGROUP
368 blkg_rwstat_exit(&stats->merged);
369 blkg_rwstat_exit(&stats->service_time);
370 blkg_rwstat_exit(&stats->wait_time);
371 blkg_rwstat_exit(&stats->queued);
372 blkg_stat_exit(&stats->time);
373 blkg_stat_exit(&stats->avg_queue_size_sum);
374 blkg_stat_exit(&stats->avg_queue_size_samples);
375 blkg_stat_exit(&stats->dequeue);
376 blkg_stat_exit(&stats->group_wait_time);
377 blkg_stat_exit(&stats->idle_time);
378 blkg_stat_exit(&stats->empty_time);
379 #endif
382 static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
384 #ifdef CONFIG_DEBUG_BLK_CGROUP
385 if (blkg_rwstat_init(&stats->merged, gfp) ||
386 blkg_rwstat_init(&stats->service_time, gfp) ||
387 blkg_rwstat_init(&stats->wait_time, gfp) ||
388 blkg_rwstat_init(&stats->queued, gfp) ||
389 blkg_stat_init(&stats->time, gfp) ||
390 blkg_stat_init(&stats->avg_queue_size_sum, gfp) ||
391 blkg_stat_init(&stats->avg_queue_size_samples, gfp) ||
392 blkg_stat_init(&stats->dequeue, gfp) ||
393 blkg_stat_init(&stats->group_wait_time, gfp) ||
394 blkg_stat_init(&stats->idle_time, gfp) ||
395 blkg_stat_init(&stats->empty_time, gfp)) {
396 bfqg_stats_exit(stats);
397 return -ENOMEM;
399 #endif
401 return 0;
404 static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd)
406 return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL;
409 static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg)
411 return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq));
414 static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
416 struct bfq_group_data *bgd;
418 bgd = kzalloc(sizeof(*bgd), gfp);
419 if (!bgd)
420 return NULL;
421 return &bgd->pd;
424 static void bfq_cpd_init(struct blkcg_policy_data *cpd)
426 struct bfq_group_data *d = cpd_to_bfqgd(cpd);
428 d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
429 CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL;
432 static void bfq_cpd_free(struct blkcg_policy_data *cpd)
434 kfree(cpd_to_bfqgd(cpd));
437 static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node)
439 struct bfq_group *bfqg;
441 bfqg = kzalloc_node(sizeof(*bfqg), gfp, node);
442 if (!bfqg)
443 return NULL;
445 if (bfqg_stats_init(&bfqg->stats, gfp)) {
446 kfree(bfqg);
447 return NULL;
450 /* see comments in bfq_bic_update_cgroup for why refcounting */
451 bfqg_get(bfqg);
452 return &bfqg->pd;
455 static void bfq_pd_init(struct blkg_policy_data *pd)
457 struct blkcg_gq *blkg = pd_to_blkg(pd);
458 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
459 struct bfq_data *bfqd = blkg->q->elevator->elevator_data;
460 struct bfq_entity *entity = &bfqg->entity;
461 struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg);
463 entity->orig_weight = entity->weight = entity->new_weight = d->weight;
464 entity->my_sched_data = &bfqg->sched_data;
465 bfqg->my_entity = entity; /*
466 * the root_group's will be set to NULL
467 * in bfq_init_queue()
469 bfqg->bfqd = bfqd;
470 bfqg->active_entities = 0;
471 bfqg->rq_pos_tree = RB_ROOT;
474 static void bfq_pd_free(struct blkg_policy_data *pd)
476 struct bfq_group *bfqg = pd_to_bfqg(pd);
478 bfqg_stats_exit(&bfqg->stats);
479 bfqg_put(bfqg);
482 static void bfq_pd_reset_stats(struct blkg_policy_data *pd)
484 struct bfq_group *bfqg = pd_to_bfqg(pd);
486 bfqg_stats_reset(&bfqg->stats);
489 static void bfq_group_set_parent(struct bfq_group *bfqg,
490 struct bfq_group *parent)
492 struct bfq_entity *entity;
494 entity = &bfqg->entity;
495 entity->parent = parent->my_entity;
496 entity->sched_data = &parent->sched_data;
499 static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd,
500 struct blkcg *blkcg)
502 struct blkcg_gq *blkg;
504 blkg = blkg_lookup(blkcg, bfqd->queue);
505 if (likely(blkg))
506 return blkg_to_bfqg(blkg);
507 return NULL;
510 struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
511 struct blkcg *blkcg)
513 struct bfq_group *bfqg, *parent;
514 struct bfq_entity *entity;
516 bfqg = bfq_lookup_bfqg(bfqd, blkcg);
518 if (unlikely(!bfqg))
519 return NULL;
522 * Update chain of bfq_groups as we might be handling a leaf group
523 * which, along with some of its relatives, has not been hooked yet
524 * to the private hierarchy of BFQ.
526 entity = &bfqg->entity;
527 for_each_entity(entity) {
528 struct bfq_group *curr_bfqg = container_of(entity,
529 struct bfq_group, entity);
530 if (curr_bfqg != bfqd->root_group) {
531 parent = bfqg_parent(curr_bfqg);
532 if (!parent)
533 parent = bfqd->root_group;
534 bfq_group_set_parent(curr_bfqg, parent);
538 return bfqg;
542 * bfq_bfqq_move - migrate @bfqq to @bfqg.
543 * @bfqd: queue descriptor.
544 * @bfqq: the queue to move.
545 * @bfqg: the group to move to.
547 * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
548 * it on the new one. Avoid putting the entity on the old group idle tree.
550 * Must be called under the scheduler lock, to make sure that the blkg
551 * owning @bfqg does not disappear (see comments in
552 * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
553 * objects).
555 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
556 struct bfq_group *bfqg)
558 struct bfq_entity *entity = &bfqq->entity;
560 /* If bfqq is empty, then bfq_bfqq_expire also invokes
561 * bfq_del_bfqq_busy, thereby removing bfqq and its entity
562 * from data structures related to current group. Otherwise we
563 * need to remove bfqq explicitly with bfq_deactivate_bfqq, as
564 * we do below.
566 if (bfqq == bfqd->in_service_queue)
567 bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
568 false, BFQQE_PREEMPTED);
570 if (bfq_bfqq_busy(bfqq))
571 bfq_deactivate_bfqq(bfqd, bfqq, false, false);
572 else if (entity->on_st)
573 bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
574 bfqg_and_blkg_put(bfqq_group(bfqq));
576 entity->parent = bfqg->my_entity;
577 entity->sched_data = &bfqg->sched_data;
578 /* pin down bfqg and its associated blkg */
579 bfqg_and_blkg_get(bfqg);
581 if (bfq_bfqq_busy(bfqq)) {
582 bfq_pos_tree_add_move(bfqd, bfqq);
583 bfq_activate_bfqq(bfqd, bfqq);
586 if (!bfqd->in_service_queue && !bfqd->rq_in_driver)
587 bfq_schedule_dispatch(bfqd);
591 * __bfq_bic_change_cgroup - move @bic to @cgroup.
592 * @bfqd: the queue descriptor.
593 * @bic: the bic to move.
594 * @blkcg: the blk-cgroup to move to.
596 * Move bic to blkcg, assuming that bfqd->lock is held; which makes
597 * sure that the reference to cgroup is valid across the call (see
598 * comments in bfq_bic_update_cgroup on this issue)
600 * NOTE: an alternative approach might have been to store the current
601 * cgroup in bfqq and getting a reference to it, reducing the lookup
602 * time here, at the price of slightly more complex code.
604 static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
605 struct bfq_io_cq *bic,
606 struct blkcg *blkcg)
608 struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
609 struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
610 struct bfq_group *bfqg;
611 struct bfq_entity *entity;
613 bfqg = bfq_find_set_group(bfqd, blkcg);
615 if (unlikely(!bfqg))
616 bfqg = bfqd->root_group;
618 if (async_bfqq) {
619 entity = &async_bfqq->entity;
621 if (entity->sched_data != &bfqg->sched_data) {
622 bic_set_bfqq(bic, NULL, 0);
623 bfq_log_bfqq(bfqd, async_bfqq,
624 "bic_change_group: %p %d",
625 async_bfqq, async_bfqq->ref);
626 bfq_put_queue(async_bfqq);
630 if (sync_bfqq) {
631 entity = &sync_bfqq->entity;
632 if (entity->sched_data != &bfqg->sched_data)
633 bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
636 return bfqg;
639 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
641 struct bfq_data *bfqd = bic_to_bfqd(bic);
642 struct bfq_group *bfqg = NULL;
643 uint64_t serial_nr;
645 rcu_read_lock();
646 serial_nr = bio_blkcg(bio)->css.serial_nr;
649 * Check whether blkcg has changed. The condition may trigger
650 * spuriously on a newly created cic but there's no harm.
652 if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
653 goto out;
655 bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
657 * Update blkg_path for bfq_log_* functions. We cache this
658 * path, and update it here, for the following
659 * reasons. Operations on blkg objects in blk-cgroup are
660 * protected with the request_queue lock, and not with the
661 * lock that protects the instances of this scheduler
662 * (bfqd->lock). This exposes BFQ to the following sort of
663 * race.
665 * The blkg_lookup performed in bfq_get_queue, protected
666 * through rcu, may happen to return the address of a copy of
667 * the original blkg. If this is the case, then the
668 * bfqg_and_blkg_get performed in bfq_get_queue, to pin down
669 * the blkg, is useless: it does not prevent blk-cgroup code
670 * from destroying both the original blkg and all objects
671 * directly or indirectly referred by the copy of the
672 * blkg.
674 * On the bright side, destroy operations on a blkg invoke, as
675 * a first step, hooks of the scheduler associated with the
676 * blkg. And these hooks are executed with bfqd->lock held for
677 * BFQ. As a consequence, for any blkg associated with the
678 * request queue this instance of the scheduler is attached
679 * to, we are guaranteed that such a blkg is not destroyed, and
680 * that all the pointers it contains are consistent, while we
681 * are holding bfqd->lock. A blkg_lookup performed with
682 * bfqd->lock held then returns a fully consistent blkg, which
683 * remains consistent until this lock is held.
685 * Thanks to the last fact, and to the fact that: (1) bfqg has
686 * been obtained through a blkg_lookup in the above
687 * assignment, and (2) bfqd->lock is being held, here we can
688 * safely use the policy data for the involved blkg (i.e., the
689 * field bfqg->pd) to get to the blkg associated with bfqg,
690 * and then we can safely use any field of blkg. After we
691 * release bfqd->lock, even just getting blkg through this
692 * bfqg may cause dangling references to be traversed, as
693 * bfqg->pd may not exist any more.
695 * In view of the above facts, here we cache, in the bfqg, any
696 * blkg data we may need for this bic, and for its associated
697 * bfq_queue. As of now, we need to cache only the path of the
698 * blkg, which is used in the bfq_log_* functions.
700 * Finally, note that bfqg itself needs to be protected from
701 * destruction on the blkg_free of the original blkg (which
702 * invokes bfq_pd_free). We use an additional private
703 * refcounter for bfqg, to let it disappear only after no
704 * bfq_queue refers to it any longer.
706 blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
707 bic->blkcg_serial_nr = serial_nr;
708 out:
709 rcu_read_unlock();
713 * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
714 * @st: the service tree being flushed.
716 static void bfq_flush_idle_tree(struct bfq_service_tree *st)
718 struct bfq_entity *entity = st->first_idle;
720 for (; entity ; entity = st->first_idle)
721 __bfq_deactivate_entity(entity, false);
725 * bfq_reparent_leaf_entity - move leaf entity to the root_group.
726 * @bfqd: the device data structure with the root group.
727 * @entity: the entity to move.
729 static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
730 struct bfq_entity *entity)
732 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
734 bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
738 * bfq_reparent_active_entities - move to the root group all active
739 * entities.
740 * @bfqd: the device data structure with the root group.
741 * @bfqg: the group to move from.
742 * @st: the service tree with the entities.
744 static void bfq_reparent_active_entities(struct bfq_data *bfqd,
745 struct bfq_group *bfqg,
746 struct bfq_service_tree *st)
748 struct rb_root *active = &st->active;
749 struct bfq_entity *entity = NULL;
751 if (!RB_EMPTY_ROOT(&st->active))
752 entity = bfq_entity_of(rb_first(active));
754 for (; entity ; entity = bfq_entity_of(rb_first(active)))
755 bfq_reparent_leaf_entity(bfqd, entity);
757 if (bfqg->sched_data.in_service_entity)
758 bfq_reparent_leaf_entity(bfqd,
759 bfqg->sched_data.in_service_entity);
763 * bfq_pd_offline - deactivate the entity associated with @pd,
764 * and reparent its children entities.
765 * @pd: descriptor of the policy going offline.
767 * blkio already grabs the queue_lock for us, so no need to use
768 * RCU-based magic
770 static void bfq_pd_offline(struct blkg_policy_data *pd)
772 struct bfq_service_tree *st;
773 struct bfq_group *bfqg = pd_to_bfqg(pd);
774 struct bfq_data *bfqd = bfqg->bfqd;
775 struct bfq_entity *entity = bfqg->my_entity;
776 unsigned long flags;
777 int i;
779 spin_lock_irqsave(&bfqd->lock, flags);
781 if (!entity) /* root group */
782 goto put_async_queues;
785 * Empty all service_trees belonging to this group before
786 * deactivating the group itself.
788 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
789 st = bfqg->sched_data.service_tree + i;
792 * The idle tree may still contain bfq_queues belonging
793 * to exited task because they never migrated to a different
794 * cgroup from the one being destroyed now.
796 bfq_flush_idle_tree(st);
799 * It may happen that some queues are still active
800 * (busy) upon group destruction (if the corresponding
801 * processes have been forced to terminate). We move
802 * all the leaf entities corresponding to these queues
803 * to the root_group.
804 * Also, it may happen that the group has an entity
805 * in service, which is disconnected from the active
806 * tree: it must be moved, too.
807 * There is no need to put the sync queues, as the
808 * scheduler has taken no reference.
810 bfq_reparent_active_entities(bfqd, bfqg, st);
813 __bfq_deactivate_entity(entity, false);
815 put_async_queues:
816 bfq_put_async_queues(bfqd, bfqg);
818 spin_unlock_irqrestore(&bfqd->lock, flags);
820 * @blkg is going offline and will be ignored by
821 * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
822 * that they don't get lost. If IOs complete after this point, the
823 * stats for them will be lost. Oh well...
825 bfqg_stats_xfer_dead(bfqg);
828 void bfq_end_wr_async(struct bfq_data *bfqd)
830 struct blkcg_gq *blkg;
832 list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
833 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
835 bfq_end_wr_async_queues(bfqd, bfqg);
837 bfq_end_wr_async_queues(bfqd, bfqd->root_group);
840 static int bfq_io_show_weight(struct seq_file *sf, void *v)
842 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
843 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
844 unsigned int val = 0;
846 if (bfqgd)
847 val = bfqgd->weight;
849 seq_printf(sf, "%u\n", val);
851 return 0;
854 static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
855 struct cftype *cftype,
856 u64 val)
858 struct blkcg *blkcg = css_to_blkcg(css);
859 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
860 struct blkcg_gq *blkg;
861 int ret = -ERANGE;
863 if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT)
864 return ret;
866 ret = 0;
867 spin_lock_irq(&blkcg->lock);
868 bfqgd->weight = (unsigned short)val;
869 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
870 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
872 if (!bfqg)
873 continue;
875 * Setting the prio_changed flag of the entity
876 * to 1 with new_weight == weight would re-set
877 * the value of the weight to its ioprio mapping.
878 * Set the flag only if necessary.
880 if ((unsigned short)val != bfqg->entity.new_weight) {
881 bfqg->entity.new_weight = (unsigned short)val;
883 * Make sure that the above new value has been
884 * stored in bfqg->entity.new_weight before
885 * setting the prio_changed flag. In fact,
886 * this flag may be read asynchronously (in
887 * critical sections protected by a different
888 * lock than that held here), and finding this
889 * flag set may cause the execution of the code
890 * for updating parameters whose value may
891 * depend also on bfqg->entity.new_weight (in
892 * __bfq_entity_update_weight_prio).
893 * This barrier makes sure that the new value
894 * of bfqg->entity.new_weight is correctly
895 * seen in that code.
897 smp_wmb();
898 bfqg->entity.prio_changed = 1;
901 spin_unlock_irq(&blkcg->lock);
903 return ret;
906 static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
907 char *buf, size_t nbytes,
908 loff_t off)
910 u64 weight;
911 /* First unsigned long found in the file is used */
912 int ret = kstrtoull(strim(buf), 0, &weight);
914 if (ret)
915 return ret;
917 ret = bfq_io_set_weight_legacy(of_css(of), NULL, weight);
918 return ret ?: nbytes;
921 #ifdef CONFIG_DEBUG_BLK_CGROUP
922 static int bfqg_print_stat(struct seq_file *sf, void *v)
924 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
925 &blkcg_policy_bfq, seq_cft(sf)->private, false);
926 return 0;
929 static int bfqg_print_rwstat(struct seq_file *sf, void *v)
931 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
932 &blkcg_policy_bfq, seq_cft(sf)->private, true);
933 return 0;
936 static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
937 struct blkg_policy_data *pd, int off)
939 u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd),
940 &blkcg_policy_bfq, off);
941 return __blkg_prfill_u64(sf, pd, sum);
944 static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
945 struct blkg_policy_data *pd, int off)
947 struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd_to_blkg(pd),
948 &blkcg_policy_bfq,
949 off);
950 return __blkg_prfill_rwstat(sf, pd, &sum);
953 static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
955 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
956 bfqg_prfill_stat_recursive, &blkcg_policy_bfq,
957 seq_cft(sf)->private, false);
958 return 0;
961 static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
963 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
964 bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq,
965 seq_cft(sf)->private, true);
966 return 0;
969 static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
970 int off)
972 u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes);
974 return __blkg_prfill_u64(sf, pd, sum >> 9);
977 static int bfqg_print_stat_sectors(struct seq_file *sf, void *v)
979 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
980 bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false);
981 return 0;
984 static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
985 struct blkg_policy_data *pd, int off)
987 struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL,
988 offsetof(struct blkcg_gq, stat_bytes));
989 u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
990 atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
992 return __blkg_prfill_u64(sf, pd, sum >> 9);
995 static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
997 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
998 bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0,
999 false);
1000 return 0;
1003 static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf,
1004 struct blkg_policy_data *pd, int off)
1006 struct bfq_group *bfqg = pd_to_bfqg(pd);
1007 u64 samples = blkg_stat_read(&bfqg->stats.avg_queue_size_samples);
1008 u64 v = 0;
1010 if (samples) {
1011 v = blkg_stat_read(&bfqg->stats.avg_queue_size_sum);
1012 v = div64_u64(v, samples);
1014 __blkg_prfill_u64(sf, pd, v);
1015 return 0;
1018 /* print avg_queue_size */
1019 static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v)
1021 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1022 bfqg_prfill_avg_queue_size, &blkcg_policy_bfq,
1023 0, false);
1024 return 0;
1026 #endif /* CONFIG_DEBUG_BLK_CGROUP */
1028 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1030 int ret;
1032 ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq);
1033 if (ret)
1034 return NULL;
1036 return blkg_to_bfqg(bfqd->queue->root_blkg);
1039 struct blkcg_policy blkcg_policy_bfq = {
1040 .dfl_cftypes = bfq_blkg_files,
1041 .legacy_cftypes = bfq_blkcg_legacy_files,
1043 .cpd_alloc_fn = bfq_cpd_alloc,
1044 .cpd_init_fn = bfq_cpd_init,
1045 .cpd_bind_fn = bfq_cpd_init,
1046 .cpd_free_fn = bfq_cpd_free,
1048 .pd_alloc_fn = bfq_pd_alloc,
1049 .pd_init_fn = bfq_pd_init,
1050 .pd_offline_fn = bfq_pd_offline,
1051 .pd_free_fn = bfq_pd_free,
1052 .pd_reset_stats_fn = bfq_pd_reset_stats,
1055 struct cftype bfq_blkcg_legacy_files[] = {
1057 .name = "bfq.weight",
1058 .flags = CFTYPE_NOT_ON_ROOT,
1059 .seq_show = bfq_io_show_weight,
1060 .write_u64 = bfq_io_set_weight_legacy,
1063 /* statistics, covers only the tasks in the bfqg */
1065 .name = "bfq.io_service_bytes",
1066 .private = (unsigned long)&blkcg_policy_bfq,
1067 .seq_show = blkg_print_stat_bytes,
1070 .name = "bfq.io_serviced",
1071 .private = (unsigned long)&blkcg_policy_bfq,
1072 .seq_show = blkg_print_stat_ios,
1074 #ifdef CONFIG_DEBUG_BLK_CGROUP
1076 .name = "bfq.time",
1077 .private = offsetof(struct bfq_group, stats.time),
1078 .seq_show = bfqg_print_stat,
1081 .name = "bfq.sectors",
1082 .seq_show = bfqg_print_stat_sectors,
1085 .name = "bfq.io_service_time",
1086 .private = offsetof(struct bfq_group, stats.service_time),
1087 .seq_show = bfqg_print_rwstat,
1090 .name = "bfq.io_wait_time",
1091 .private = offsetof(struct bfq_group, stats.wait_time),
1092 .seq_show = bfqg_print_rwstat,
1095 .name = "bfq.io_merged",
1096 .private = offsetof(struct bfq_group, stats.merged),
1097 .seq_show = bfqg_print_rwstat,
1100 .name = "bfq.io_queued",
1101 .private = offsetof(struct bfq_group, stats.queued),
1102 .seq_show = bfqg_print_rwstat,
1104 #endif /* CONFIG_DEBUG_BLK_CGROUP */
1106 /* the same statictics which cover the bfqg and its descendants */
1108 .name = "bfq.io_service_bytes_recursive",
1109 .private = (unsigned long)&blkcg_policy_bfq,
1110 .seq_show = blkg_print_stat_bytes_recursive,
1113 .name = "bfq.io_serviced_recursive",
1114 .private = (unsigned long)&blkcg_policy_bfq,
1115 .seq_show = blkg_print_stat_ios_recursive,
1117 #ifdef CONFIG_DEBUG_BLK_CGROUP
1119 .name = "bfq.time_recursive",
1120 .private = offsetof(struct bfq_group, stats.time),
1121 .seq_show = bfqg_print_stat_recursive,
1124 .name = "bfq.sectors_recursive",
1125 .seq_show = bfqg_print_stat_sectors_recursive,
1128 .name = "bfq.io_service_time_recursive",
1129 .private = offsetof(struct bfq_group, stats.service_time),
1130 .seq_show = bfqg_print_rwstat_recursive,
1133 .name = "bfq.io_wait_time_recursive",
1134 .private = offsetof(struct bfq_group, stats.wait_time),
1135 .seq_show = bfqg_print_rwstat_recursive,
1138 .name = "bfq.io_merged_recursive",
1139 .private = offsetof(struct bfq_group, stats.merged),
1140 .seq_show = bfqg_print_rwstat_recursive,
1143 .name = "bfq.io_queued_recursive",
1144 .private = offsetof(struct bfq_group, stats.queued),
1145 .seq_show = bfqg_print_rwstat_recursive,
1148 .name = "bfq.avg_queue_size",
1149 .seq_show = bfqg_print_avg_queue_size,
1152 .name = "bfq.group_wait_time",
1153 .private = offsetof(struct bfq_group, stats.group_wait_time),
1154 .seq_show = bfqg_print_stat,
1157 .name = "bfq.idle_time",
1158 .private = offsetof(struct bfq_group, stats.idle_time),
1159 .seq_show = bfqg_print_stat,
1162 .name = "bfq.empty_time",
1163 .private = offsetof(struct bfq_group, stats.empty_time),
1164 .seq_show = bfqg_print_stat,
1167 .name = "bfq.dequeue",
1168 .private = offsetof(struct bfq_group, stats.dequeue),
1169 .seq_show = bfqg_print_stat,
1171 #endif /* CONFIG_DEBUG_BLK_CGROUP */
1172 { } /* terminate */
1175 struct cftype bfq_blkg_files[] = {
1177 .name = "bfq.weight",
1178 .flags = CFTYPE_NOT_ON_ROOT,
1179 .seq_show = bfq_io_show_weight,
1180 .write = bfq_io_set_weight,
1182 {} /* terminate */
1185 #else /* CONFIG_BFQ_GROUP_IOSCHED */
1187 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1188 struct bfq_group *bfqg) {}
1190 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
1192 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
1194 entity->weight = entity->new_weight;
1195 entity->orig_weight = entity->new_weight;
1196 if (bfqq) {
1197 bfqq->ioprio = bfqq->new_ioprio;
1198 bfqq->ioprio_class = bfqq->new_ioprio_class;
1200 entity->sched_data = &bfqg->sched_data;
1203 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {}
1205 void bfq_end_wr_async(struct bfq_data *bfqd)
1207 bfq_end_wr_async_queues(bfqd, bfqd->root_group);
1210 struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg *blkcg)
1212 return bfqd->root_group;
1215 struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
1217 return bfqq->bfqd->root_group;
1220 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1222 struct bfq_group *bfqg;
1223 int i;
1225 bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
1226 if (!bfqg)
1227 return NULL;
1229 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
1230 bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
1232 return bfqg;
1234 #endif /* CONFIG_BFQ_GROUP_IOSCHED */