nvme-fc: cancel async events before freeing event struct
[linux/fpc-iii.git] / block / blk-rq-qos.h
blob2bc43e94f4c407582087e328d892f0c5d1e4da86
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef RQ_QOS_H
3 #define RQ_QOS_H
5 #include <linux/kernel.h>
6 #include <linux/blkdev.h>
7 #include <linux/blk_types.h>
8 #include <linux/atomic.h>
9 #include <linux/wait.h>
11 #include "blk-mq-debugfs.h"
13 struct blk_mq_debugfs_attr;
15 enum rq_qos_id {
16 RQ_QOS_WBT,
17 RQ_QOS_LATENCY,
18 RQ_QOS_COST,
21 struct rq_wait {
22 wait_queue_head_t wait;
23 atomic_t inflight;
26 struct rq_qos {
27 struct rq_qos_ops *ops;
28 struct request_queue *q;
29 enum rq_qos_id id;
30 struct rq_qos *next;
31 #ifdef CONFIG_BLK_DEBUG_FS
32 struct dentry *debugfs_dir;
33 #endif
36 struct rq_qos_ops {
37 void (*throttle)(struct rq_qos *, struct bio *);
38 void (*track)(struct rq_qos *, struct request *, struct bio *);
39 void (*merge)(struct rq_qos *, struct request *, struct bio *);
40 void (*issue)(struct rq_qos *, struct request *);
41 void (*requeue)(struct rq_qos *, struct request *);
42 void (*done)(struct rq_qos *, struct request *);
43 void (*done_bio)(struct rq_qos *, struct bio *);
44 void (*cleanup)(struct rq_qos *, struct bio *);
45 void (*queue_depth_changed)(struct rq_qos *);
46 void (*exit)(struct rq_qos *);
47 const struct blk_mq_debugfs_attr *debugfs_attrs;
50 struct rq_depth {
51 unsigned int max_depth;
53 int scale_step;
54 bool scaled_max;
56 unsigned int queue_depth;
57 unsigned int default_depth;
60 static inline struct rq_qos *rq_qos_id(struct request_queue *q,
61 enum rq_qos_id id)
63 struct rq_qos *rqos;
64 for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
65 if (rqos->id == id)
66 break;
68 return rqos;
71 static inline struct rq_qos *wbt_rq_qos(struct request_queue *q)
73 return rq_qos_id(q, RQ_QOS_WBT);
76 static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q)
78 return rq_qos_id(q, RQ_QOS_LATENCY);
81 static inline const char *rq_qos_id_to_name(enum rq_qos_id id)
83 switch (id) {
84 case RQ_QOS_WBT:
85 return "wbt";
86 case RQ_QOS_LATENCY:
87 return "latency";
88 case RQ_QOS_COST:
89 return "cost";
91 return "unknown";
94 static inline void rq_wait_init(struct rq_wait *rq_wait)
96 atomic_set(&rq_wait->inflight, 0);
97 init_waitqueue_head(&rq_wait->wait);
100 static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
102 rqos->next = q->rq_qos;
103 q->rq_qos = rqos;
105 if (rqos->ops->debugfs_attrs)
106 blk_mq_debugfs_register_rqos(rqos);
109 static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
111 struct rq_qos **cur;
113 for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
114 if (*cur == rqos) {
115 *cur = rqos->next;
116 break;
120 blk_mq_debugfs_unregister_rqos(rqos);
123 typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data);
124 typedef void (cleanup_cb_t)(struct rq_wait *rqw, void *private_data);
126 void rq_qos_wait(struct rq_wait *rqw, void *private_data,
127 acquire_inflight_cb_t *acquire_inflight_cb,
128 cleanup_cb_t *cleanup_cb);
129 bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
130 bool rq_depth_scale_up(struct rq_depth *rqd);
131 bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
132 bool rq_depth_calc_max_depth(struct rq_depth *rqd);
134 void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio);
135 void __rq_qos_done(struct rq_qos *rqos, struct request *rq);
136 void __rq_qos_issue(struct rq_qos *rqos, struct request *rq);
137 void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq);
138 void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio);
139 void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio);
140 void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio);
141 void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio);
142 void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
144 static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
146 if (q->rq_qos)
147 __rq_qos_cleanup(q->rq_qos, bio);
150 static inline void rq_qos_done(struct request_queue *q, struct request *rq)
152 if (q->rq_qos)
153 __rq_qos_done(q->rq_qos, rq);
156 static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
158 if (q->rq_qos)
159 __rq_qos_issue(q->rq_qos, rq);
162 static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
164 if (q->rq_qos)
165 __rq_qos_requeue(q->rq_qos, rq);
168 static inline void rq_qos_done_bio(struct request_queue *q, struct bio *bio)
170 if (q->rq_qos)
171 __rq_qos_done_bio(q->rq_qos, bio);
174 static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
177 * BIO_TRACKED lets controllers know that a bio went through the
178 * normal rq_qos path.
180 bio_set_flag(bio, BIO_TRACKED);
181 if (q->rq_qos)
182 __rq_qos_throttle(q->rq_qos, bio);
185 static inline void rq_qos_track(struct request_queue *q, struct request *rq,
186 struct bio *bio)
188 if (q->rq_qos)
189 __rq_qos_track(q->rq_qos, rq, bio);
192 static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
193 struct bio *bio)
195 if (q->rq_qos)
196 __rq_qos_merge(q->rq_qos, rq, bio);
199 static inline void rq_qos_queue_depth_changed(struct request_queue *q)
201 if (q->rq_qos)
202 __rq_qos_queue_depth_changed(q->rq_qos);
205 void rq_qos_exit(struct request_queue *);
207 #endif