4 #include "blk-cgroup-rwstat.h"
7 * To implement hierarchical throttling, throtl_grps form a tree and bios
8 * are dispatched upwards level by level until they reach the top and get
9 * issued. When dispatching bios from the children and local group at each
10 * level, if the bios are dispatched into a single bio_list, there's a risk
11 * of a local or child group which can queue many bios at once filling up
12 * the list starving others.
14 * To avoid such starvation, dispatched bios are queued separately
15 * according to where they came from. When they are again dispatched to
16 * the parent, they're popped in round-robin order so that no single source
17 * hogs the dispatch window.
19 * throtl_qnode is used to keep the queued bios separated by their sources.
20 * Bios are queued to throtl_qnode which in turn is queued to
21 * throtl_service_queue and then dispatched in round-robin order.
23 * It's also used to track the reference counts on blkg's. A qnode always
24 * belongs to a throtl_grp and gets queued on itself or the parent, so
25 * incrementing the reference of the associated throtl_grp when a qnode is
26 * queued and decrementing when dequeued is enough to keep the whole blkg
27 * tree pinned while bios are in flight.
30 struct list_head node
; /* service_queue->queued[] */
31 struct bio_list bios
; /* queued bios */
32 struct throtl_grp
*tg
; /* tg this qnode belongs to */
35 struct throtl_service_queue
{
36 struct throtl_service_queue
*parent_sq
; /* the parent service_queue */
39 * Bios queued directly to this service_queue or dispatched from
40 * children throtl_grp's.
42 struct list_head queued
[2]; /* throtl_qnode [READ/WRITE] */
43 unsigned int nr_queued
[2]; /* number of queued bios */
46 * RB tree of active children throtl_grp's, which are sorted by
49 struct rb_root_cached pending_tree
; /* RB tree of active tgs */
50 unsigned int nr_pending
; /* # queued in the tree */
51 unsigned long first_pending_disptime
; /* disptime of the first tg */
52 struct timer_list pending_timer
; /* fires on first_pending_disptime */
56 THROTL_TG_PENDING
= 1 << 0, /* on parent's pending tree */
57 THROTL_TG_WAS_EMPTY
= 1 << 1, /* bio_lists[] became non-empty */
58 THROTL_TG_CANCELING
= 1 << 2, /* starts to cancel bio */
62 /* must be the first member */
63 struct blkg_policy_data pd
;
65 /* active throtl group service_queue member */
66 struct rb_node rb_node
;
68 /* throtl_data this group belongs to */
69 struct throtl_data
*td
;
71 /* this group's service queue */
72 struct throtl_service_queue service_queue
;
75 * qnode_on_self is used when bios are directly queued to this
76 * throtl_grp so that local bios compete fairly with bios
77 * dispatched from children. qnode_on_parent is used when bios are
78 * dispatched from this throtl_grp into its parent and will compete
79 * with the sibling qnode_on_parents and the parent's
82 struct throtl_qnode qnode_on_self
[2];
83 struct throtl_qnode qnode_on_parent
[2];
86 * Dispatch time in jiffies. This is the estimated time when group
87 * will unthrottle and is ready to dispatch more bio. It is used as
88 * key to sort active groups in service tree.
90 unsigned long disptime
;
94 /* are there any throtl rules between this group and td? */
95 bool has_rules_bps
[2];
96 bool has_rules_iops
[2];
98 /* bytes per second rate limits */
102 unsigned int iops
[2];
104 /* Number of bytes dispatched in current slice */
105 uint64_t bytes_disp
[2];
106 /* Number of bio's dispatched in current slice */
107 unsigned int io_disp
[2];
109 uint64_t last_bytes_disp
[2];
110 unsigned int last_io_disp
[2];
113 * The following two fields are updated when new configuration is
114 * submitted while some bios are still throttled, they record how many
115 * bytes/ios are waited already in previous configuration, and they will
116 * be used to calculate wait time under new configuration.
118 long long carryover_bytes
[2];
119 int carryover_ios
[2];
121 unsigned long last_check_time
;
123 /* When did we start a new slice */
124 unsigned long slice_start
[2];
125 unsigned long slice_end
[2];
127 struct blkg_rwstat stat_bytes
;
128 struct blkg_rwstat stat_ios
;
131 extern struct blkcg_policy blkcg_policy_throtl
;
133 static inline struct throtl_grp
*pd_to_tg(struct blkg_policy_data
*pd
)
135 return pd
? container_of(pd
, struct throtl_grp
, pd
) : NULL
;
138 static inline struct throtl_grp
*blkg_to_tg(struct blkcg_gq
*blkg
)
140 return pd_to_tg(blkg_to_pd(blkg
, &blkcg_policy_throtl
));
144 * Internal throttling interface
146 #ifndef CONFIG_BLK_DEV_THROTTLING
147 static inline void blk_throtl_exit(struct gendisk
*disk
) { }
148 static inline bool blk_throtl_bio(struct bio
*bio
) { return false; }
149 static inline void blk_throtl_cancel_bios(struct gendisk
*disk
) { }
150 #else /* CONFIG_BLK_DEV_THROTTLING */
151 void blk_throtl_exit(struct gendisk
*disk
);
152 bool __blk_throtl_bio(struct bio
*bio
);
153 void blk_throtl_cancel_bios(struct gendisk
*disk
);
155 static inline bool blk_throtl_activated(struct request_queue
*q
)
157 return q
->td
!= NULL
;
160 static inline bool blk_should_throtl(struct bio
*bio
)
162 struct throtl_grp
*tg
;
163 int rw
= bio_data_dir(bio
);
166 * This is called under bio_queue_enter(), and it's synchronized with
167 * the activation of blk-throtl, which is protected by
168 * blk_mq_freeze_queue().
170 if (!blk_throtl_activated(bio
->bi_bdev
->bd_queue
))
173 tg
= blkg_to_tg(bio
->bi_blkg
);
174 if (!cgroup_subsys_on_dfl(io_cgrp_subsys
)) {
175 if (!bio_flagged(bio
, BIO_CGROUP_ACCT
)) {
176 bio_set_flag(bio
, BIO_CGROUP_ACCT
);
177 blkg_rwstat_add(&tg
->stat_bytes
, bio
->bi_opf
,
178 bio
->bi_iter
.bi_size
);
180 blkg_rwstat_add(&tg
->stat_ios
, bio
->bi_opf
, 1);
183 /* iops limit is always counted */
184 if (tg
->has_rules_iops
[rw
])
187 if (tg
->has_rules_bps
[rw
] && !bio_flagged(bio
, BIO_BPS_THROTTLED
))
193 static inline bool blk_throtl_bio(struct bio
*bio
)
196 if (!blk_should_throtl(bio
))
199 return __blk_throtl_bio(bio
);
201 #endif /* CONFIG_BLK_DEV_THROTTLING */