1 /* SPDX-License-Identifier: GPL-2.0 */
6 #include "blk-mq-tag.h"
11 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
16 struct list_head rq_list
;
17 } ____cacheline_aligned_in_smp
;
20 unsigned int index_hw
;
22 /* incremented at dispatch time */
23 unsigned long rq_dispatched
[2];
24 unsigned long rq_merged
;
26 /* incremented at completion time */
27 unsigned long ____cacheline_aligned_in_smp rq_completed
[2];
29 struct request_queue
*queue
;
31 } ____cacheline_aligned_in_smp
;
33 void blk_mq_freeze_queue(struct request_queue
*q
);
34 void blk_mq_free_queue(struct request_queue
*q
);
35 int blk_mq_update_nr_requests(struct request_queue
*q
, unsigned int nr
);
36 void blk_mq_wake_waiters(struct request_queue
*q
);
37 bool blk_mq_dispatch_rq_list(struct request_queue
*, struct list_head
*, bool);
38 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx
*hctx
, struct list_head
*list
);
39 bool blk_mq_get_driver_tag(struct request
*rq
, struct blk_mq_hw_ctx
**hctx
,
41 struct request
*blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx
*hctx
,
42 struct blk_mq_ctx
*start
);
45 * Internal helpers for allocating/freeing the request map
47 void blk_mq_free_rqs(struct blk_mq_tag_set
*set
, struct blk_mq_tags
*tags
,
48 unsigned int hctx_idx
);
49 void blk_mq_free_rq_map(struct blk_mq_tags
*tags
);
50 struct blk_mq_tags
*blk_mq_alloc_rq_map(struct blk_mq_tag_set
*set
,
51 unsigned int hctx_idx
,
53 unsigned int reserved_tags
);
54 int blk_mq_alloc_rqs(struct blk_mq_tag_set
*set
, struct blk_mq_tags
*tags
,
55 unsigned int hctx_idx
, unsigned int depth
);
58 * Internal helpers for request insertion into sw queues
60 void __blk_mq_insert_request(struct blk_mq_hw_ctx
*hctx
, struct request
*rq
,
62 void blk_mq_request_bypass_insert(struct request
*rq
, bool run_queue
);
63 void blk_mq_insert_requests(struct blk_mq_hw_ctx
*hctx
, struct blk_mq_ctx
*ctx
,
64 struct list_head
*list
);
66 /* Used by blk_insert_cloned_request() to issue request directly */
67 blk_status_t
blk_mq_request_issue_directly(struct request
*rq
);
70 * CPU -> queue mappings
72 extern int blk_mq_hw_queue_to_node(unsigned int *map
, unsigned int);
74 static inline struct blk_mq_hw_ctx
*blk_mq_map_queue(struct request_queue
*q
,
77 return q
->queue_hw_ctx
[q
->mq_map
[cpu
]];
83 extern void blk_mq_sysfs_init(struct request_queue
*q
);
84 extern void blk_mq_sysfs_deinit(struct request_queue
*q
);
85 extern int __blk_mq_register_dev(struct device
*dev
, struct request_queue
*q
);
86 extern int blk_mq_sysfs_register(struct request_queue
*q
);
87 extern void blk_mq_sysfs_unregister(struct request_queue
*q
);
88 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx
*hctx
);
90 void blk_mq_release(struct request_queue
*q
);
93 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
94 * @rq: target request.
96 static inline enum mq_rq_state
blk_mq_rq_state(struct request
*rq
)
98 return READ_ONCE(rq
->state
);
101 static inline struct blk_mq_ctx
*__blk_mq_get_ctx(struct request_queue
*q
,
104 return per_cpu_ptr(q
->queue_ctx
, cpu
);
108 * This assumes per-cpu software queueing queues. They could be per-node
109 * as well, for instance. For now this is hardcoded as-is. Note that we don't
110 * care about preemption, since we know the ctx's are persistent. This does
111 * mean that we can't rely on ctx always matching the currently running CPU.
113 static inline struct blk_mq_ctx
*blk_mq_get_ctx(struct request_queue
*q
)
115 return __blk_mq_get_ctx(q
, get_cpu());
118 static inline void blk_mq_put_ctx(struct blk_mq_ctx
*ctx
)
123 struct blk_mq_alloc_data
{
124 /* input parameter */
125 struct request_queue
*q
;
126 blk_mq_req_flags_t flags
;
127 unsigned int shallow_depth
;
129 /* input & output parameter */
130 struct blk_mq_ctx
*ctx
;
131 struct blk_mq_hw_ctx
*hctx
;
134 static inline struct blk_mq_tags
*blk_mq_tags_from_data(struct blk_mq_alloc_data
*data
)
136 if (data
->flags
& BLK_MQ_REQ_INTERNAL
)
137 return data
->hctx
->sched_tags
;
139 return data
->hctx
->tags
;
142 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx
*hctx
)
144 return test_bit(BLK_MQ_S_STOPPED
, &hctx
->state
);
147 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx
*hctx
)
149 return hctx
->nr_ctx
&& hctx
->tags
;
152 void blk_mq_in_flight(struct request_queue
*q
, struct hd_struct
*part
,
153 unsigned int inflight
[2]);
154 void blk_mq_in_flight_rw(struct request_queue
*q
, struct hd_struct
*part
,
155 unsigned int inflight
[2]);
157 static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx
*hctx
)
159 struct request_queue
*q
= hctx
->queue
;
161 if (q
->mq_ops
->put_budget
)
162 q
->mq_ops
->put_budget(hctx
);
165 static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx
*hctx
)
167 struct request_queue
*q
= hctx
->queue
;
169 if (q
->mq_ops
->get_budget
)
170 return q
->mq_ops
->get_budget(hctx
);
174 static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx
*hctx
,
177 blk_mq_put_tag(hctx
, hctx
->tags
, rq
->mq_ctx
, rq
->tag
);
180 if (rq
->rq_flags
& RQF_MQ_INFLIGHT
) {
181 rq
->rq_flags
&= ~RQF_MQ_INFLIGHT
;
182 atomic_dec(&hctx
->nr_active
);
186 static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx
*hctx
,
189 if (rq
->tag
== -1 || rq
->internal_tag
== -1)
192 __blk_mq_put_driver_tag(hctx
, rq
);
195 static inline void blk_mq_put_driver_tag(struct request
*rq
)
197 struct blk_mq_hw_ctx
*hctx
;
199 if (rq
->tag
== -1 || rq
->internal_tag
== -1)
202 hctx
= blk_mq_map_queue(rq
->q
, rq
->mq_ctx
->cpu
);
203 __blk_mq_put_driver_tag(hctx
, rq
);