1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/backing-dev.h>
5 #include <linux/blkdev.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/workqueue.h>
10 #include <linux/smp.h>
12 #include <linux/blk-mq.h>
14 #include "blk-mq-tag.h"
16 static void blk_mq_sysfs_release(struct kobject
*kobj
)
20 struct blk_mq_ctx_sysfs_entry
{
21 struct attribute attr
;
22 ssize_t (*show
)(struct blk_mq_ctx
*, char *);
23 ssize_t (*store
)(struct blk_mq_ctx
*, const char *, size_t);
26 struct blk_mq_hw_ctx_sysfs_entry
{
27 struct attribute attr
;
28 ssize_t (*show
)(struct blk_mq_hw_ctx
*, char *);
29 ssize_t (*store
)(struct blk_mq_hw_ctx
*, const char *, size_t);
32 static ssize_t
blk_mq_sysfs_show(struct kobject
*kobj
, struct attribute
*attr
,
35 struct blk_mq_ctx_sysfs_entry
*entry
;
36 struct blk_mq_ctx
*ctx
;
37 struct request_queue
*q
;
40 entry
= container_of(attr
, struct blk_mq_ctx_sysfs_entry
, attr
);
41 ctx
= container_of(kobj
, struct blk_mq_ctx
, kobj
);
48 mutex_lock(&q
->sysfs_lock
);
49 if (!blk_queue_dying(q
))
50 res
= entry
->show(ctx
, page
);
51 mutex_unlock(&q
->sysfs_lock
);
55 static ssize_t
blk_mq_sysfs_store(struct kobject
*kobj
, struct attribute
*attr
,
56 const char *page
, size_t length
)
58 struct blk_mq_ctx_sysfs_entry
*entry
;
59 struct blk_mq_ctx
*ctx
;
60 struct request_queue
*q
;
63 entry
= container_of(attr
, struct blk_mq_ctx_sysfs_entry
, attr
);
64 ctx
= container_of(kobj
, struct blk_mq_ctx
, kobj
);
71 mutex_lock(&q
->sysfs_lock
);
72 if (!blk_queue_dying(q
))
73 res
= entry
->store(ctx
, page
, length
);
74 mutex_unlock(&q
->sysfs_lock
);
78 static ssize_t
blk_mq_hw_sysfs_show(struct kobject
*kobj
,
79 struct attribute
*attr
, char *page
)
81 struct blk_mq_hw_ctx_sysfs_entry
*entry
;
82 struct blk_mq_hw_ctx
*hctx
;
83 struct request_queue
*q
;
86 entry
= container_of(attr
, struct blk_mq_hw_ctx_sysfs_entry
, attr
);
87 hctx
= container_of(kobj
, struct blk_mq_hw_ctx
, kobj
);
94 mutex_lock(&q
->sysfs_lock
);
95 if (!blk_queue_dying(q
))
96 res
= entry
->show(hctx
, page
);
97 mutex_unlock(&q
->sysfs_lock
);
101 static ssize_t
blk_mq_hw_sysfs_store(struct kobject
*kobj
,
102 struct attribute
*attr
, const char *page
,
105 struct blk_mq_hw_ctx_sysfs_entry
*entry
;
106 struct blk_mq_hw_ctx
*hctx
;
107 struct request_queue
*q
;
110 entry
= container_of(attr
, struct blk_mq_hw_ctx_sysfs_entry
, attr
);
111 hctx
= container_of(kobj
, struct blk_mq_hw_ctx
, kobj
);
118 mutex_lock(&q
->sysfs_lock
);
119 if (!blk_queue_dying(q
))
120 res
= entry
->store(hctx
, page
, length
);
121 mutex_unlock(&q
->sysfs_lock
);
125 static ssize_t
blk_mq_sysfs_dispatched_show(struct blk_mq_ctx
*ctx
, char *page
)
127 return sprintf(page
, "%lu %lu\n", ctx
->rq_dispatched
[1],
128 ctx
->rq_dispatched
[0]);
131 static ssize_t
blk_mq_sysfs_merged_show(struct blk_mq_ctx
*ctx
, char *page
)
133 return sprintf(page
, "%lu\n", ctx
->rq_merged
);
136 static ssize_t
blk_mq_sysfs_completed_show(struct blk_mq_ctx
*ctx
, char *page
)
138 return sprintf(page
, "%lu %lu\n", ctx
->rq_completed
[1],
139 ctx
->rq_completed
[0]);
142 static ssize_t
sysfs_list_show(char *page
, struct list_head
*list
, char *msg
)
145 int len
= snprintf(page
, PAGE_SIZE
- 1, "%s:\n", msg
);
147 list_for_each_entry(rq
, list
, queuelist
) {
148 const int rq_len
= 2 * sizeof(rq
) + 2;
150 /* if the output will be truncated */
151 if (PAGE_SIZE
- 1 < len
+ rq_len
) {
152 /* backspacing if it can't hold '\t...\n' */
153 if (PAGE_SIZE
- 1 < len
+ 5)
155 len
+= snprintf(page
+ len
, PAGE_SIZE
- 1 - len
,
159 len
+= snprintf(page
+ len
, PAGE_SIZE
- 1 - len
,
166 static ssize_t
blk_mq_sysfs_rq_list_show(struct blk_mq_ctx
*ctx
, char *page
)
170 spin_lock(&ctx
->lock
);
171 ret
= sysfs_list_show(page
, &ctx
->rq_list
, "CTX pending");
172 spin_unlock(&ctx
->lock
);
177 static ssize_t
blk_mq_hw_sysfs_poll_show(struct blk_mq_hw_ctx
*hctx
, char *page
)
179 return sprintf(page
, "considered=%lu, invoked=%lu, success=%lu\n",
180 hctx
->poll_considered
, hctx
->poll_invoked
,
184 static ssize_t
blk_mq_hw_sysfs_poll_store(struct blk_mq_hw_ctx
*hctx
,
185 const char *page
, size_t size
)
187 hctx
->poll_considered
= hctx
->poll_invoked
= hctx
->poll_success
= 0;
192 static ssize_t
blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx
*hctx
,
195 return sprintf(page
, "%lu\n", hctx
->queued
);
198 static ssize_t
blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx
*hctx
, char *page
)
200 return sprintf(page
, "%lu\n", hctx
->run
);
203 static ssize_t
blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx
*hctx
,
206 char *start_page
= page
;
209 page
+= sprintf(page
, "%8u\t%lu\n", 0U, hctx
->dispatched
[0]);
211 for (i
= 1; i
< BLK_MQ_MAX_DISPATCH_ORDER
- 1; i
++) {
212 unsigned int d
= 1U << (i
- 1);
214 page
+= sprintf(page
, "%8u\t%lu\n", d
, hctx
->dispatched
[i
]);
217 page
+= sprintf(page
, "%8u+\t%lu\n", 1U << (i
- 1),
218 hctx
->dispatched
[i
]);
219 return page
- start_page
;
222 static ssize_t
blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx
*hctx
,
227 spin_lock(&hctx
->lock
);
228 ret
= sysfs_list_show(page
, &hctx
->dispatch
, "HCTX pending");
229 spin_unlock(&hctx
->lock
);
234 static ssize_t
blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx
*hctx
, char *page
)
236 return blk_mq_tag_sysfs_show(hctx
->tags
, page
);
239 static ssize_t
blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx
*hctx
, char *page
)
241 return sprintf(page
, "%u\n", atomic_read(&hctx
->nr_active
));
244 static ssize_t
blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx
*hctx
, char *page
)
246 const size_t size
= PAGE_SIZE
- 1;
247 unsigned int i
, first
= 1;
248 int ret
= 0, pos
= 0;
250 for_each_cpu(i
, hctx
->cpumask
) {
252 ret
= snprintf(pos
+ page
, size
- pos
, "%u", i
);
254 ret
= snprintf(pos
+ page
, size
- pos
, ", %u", i
);
256 if (ret
>= size
- pos
)
263 ret
= snprintf(pos
+ page
, size
+ 1 - pos
, "\n");
267 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched
= {
268 .attr
= {.name
= "dispatched", .mode
= S_IRUGO
},
269 .show
= blk_mq_sysfs_dispatched_show
,
271 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged
= {
272 .attr
= {.name
= "merged", .mode
= S_IRUGO
},
273 .show
= blk_mq_sysfs_merged_show
,
275 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed
= {
276 .attr
= {.name
= "completed", .mode
= S_IRUGO
},
277 .show
= blk_mq_sysfs_completed_show
,
279 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_rq_list
= {
280 .attr
= {.name
= "rq_list", .mode
= S_IRUGO
},
281 .show
= blk_mq_sysfs_rq_list_show
,
284 static struct attribute
*default_ctx_attrs
[] = {
285 &blk_mq_sysfs_dispatched
.attr
,
286 &blk_mq_sysfs_merged
.attr
,
287 &blk_mq_sysfs_completed
.attr
,
288 &blk_mq_sysfs_rq_list
.attr
,
292 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued
= {
293 .attr
= {.name
= "queued", .mode
= S_IRUGO
},
294 .show
= blk_mq_hw_sysfs_queued_show
,
296 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run
= {
297 .attr
= {.name
= "run", .mode
= S_IRUGO
},
298 .show
= blk_mq_hw_sysfs_run_show
,
300 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched
= {
301 .attr
= {.name
= "dispatched", .mode
= S_IRUGO
},
302 .show
= blk_mq_hw_sysfs_dispatched_show
,
304 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_active
= {
305 .attr
= {.name
= "active", .mode
= S_IRUGO
},
306 .show
= blk_mq_hw_sysfs_active_show
,
308 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending
= {
309 .attr
= {.name
= "pending", .mode
= S_IRUGO
},
310 .show
= blk_mq_hw_sysfs_rq_list_show
,
312 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags
= {
313 .attr
= {.name
= "tags", .mode
= S_IRUGO
},
314 .show
= blk_mq_hw_sysfs_tags_show
,
316 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus
= {
317 .attr
= {.name
= "cpu_list", .mode
= S_IRUGO
},
318 .show
= blk_mq_hw_sysfs_cpus_show
,
320 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_poll
= {
321 .attr
= {.name
= "io_poll", .mode
= S_IWUSR
| S_IRUGO
},
322 .show
= blk_mq_hw_sysfs_poll_show
,
323 .store
= blk_mq_hw_sysfs_poll_store
,
326 static struct attribute
*default_hw_ctx_attrs
[] = {
327 &blk_mq_hw_sysfs_queued
.attr
,
328 &blk_mq_hw_sysfs_run
.attr
,
329 &blk_mq_hw_sysfs_dispatched
.attr
,
330 &blk_mq_hw_sysfs_pending
.attr
,
331 &blk_mq_hw_sysfs_tags
.attr
,
332 &blk_mq_hw_sysfs_cpus
.attr
,
333 &blk_mq_hw_sysfs_active
.attr
,
334 &blk_mq_hw_sysfs_poll
.attr
,
338 static const struct sysfs_ops blk_mq_sysfs_ops
= {
339 .show
= blk_mq_sysfs_show
,
340 .store
= blk_mq_sysfs_store
,
343 static const struct sysfs_ops blk_mq_hw_sysfs_ops
= {
344 .show
= blk_mq_hw_sysfs_show
,
345 .store
= blk_mq_hw_sysfs_store
,
348 static struct kobj_type blk_mq_ktype
= {
349 .sysfs_ops
= &blk_mq_sysfs_ops
,
350 .release
= blk_mq_sysfs_release
,
353 static struct kobj_type blk_mq_ctx_ktype
= {
354 .sysfs_ops
= &blk_mq_sysfs_ops
,
355 .default_attrs
= default_ctx_attrs
,
356 .release
= blk_mq_sysfs_release
,
359 static struct kobj_type blk_mq_hw_ktype
= {
360 .sysfs_ops
= &blk_mq_hw_sysfs_ops
,
361 .default_attrs
= default_hw_ctx_attrs
,
362 .release
= blk_mq_sysfs_release
,
365 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx
*hctx
)
367 struct blk_mq_ctx
*ctx
;
373 hctx_for_each_ctx(hctx
, ctx
, i
)
374 kobject_del(&ctx
->kobj
);
376 kobject_del(&hctx
->kobj
);
379 static int blk_mq_register_hctx(struct blk_mq_hw_ctx
*hctx
)
381 struct request_queue
*q
= hctx
->queue
;
382 struct blk_mq_ctx
*ctx
;
388 ret
= kobject_add(&hctx
->kobj
, &q
->mq_kobj
, "%u", hctx
->queue_num
);
392 hctx_for_each_ctx(hctx
, ctx
, i
) {
393 ret
= kobject_add(&ctx
->kobj
, &hctx
->kobj
, "cpu%u", ctx
->cpu
);
401 static void __blk_mq_unregister_dev(struct device
*dev
, struct request_queue
*q
)
403 struct blk_mq_hw_ctx
*hctx
;
404 struct blk_mq_ctx
*ctx
;
407 queue_for_each_hw_ctx(q
, hctx
, i
) {
408 blk_mq_unregister_hctx(hctx
);
410 hctx_for_each_ctx(hctx
, ctx
, j
)
411 kobject_put(&ctx
->kobj
);
413 kobject_put(&hctx
->kobj
);
416 kobject_uevent(&q
->mq_kobj
, KOBJ_REMOVE
);
417 kobject_del(&q
->mq_kobj
);
418 kobject_put(&q
->mq_kobj
);
420 kobject_put(&dev
->kobj
);
422 q
->mq_sysfs_init_done
= false;
425 void blk_mq_unregister_dev(struct device
*dev
, struct request_queue
*q
)
427 blk_mq_disable_hotplug();
428 __blk_mq_unregister_dev(dev
, q
);
429 blk_mq_enable_hotplug();
432 void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx
*hctx
)
434 kobject_init(&hctx
->kobj
, &blk_mq_hw_ktype
);
437 void blk_mq_sysfs_init(struct request_queue
*q
)
439 struct blk_mq_ctx
*ctx
;
442 kobject_init(&q
->mq_kobj
, &blk_mq_ktype
);
444 for_each_possible_cpu(cpu
) {
445 ctx
= per_cpu_ptr(q
->queue_ctx
, cpu
);
446 kobject_init(&ctx
->kobj
, &blk_mq_ctx_ktype
);
450 int blk_mq_register_dev(struct device
*dev
, struct request_queue
*q
)
452 struct blk_mq_hw_ctx
*hctx
;
455 blk_mq_disable_hotplug();
457 ret
= kobject_add(&q
->mq_kobj
, kobject_get(&dev
->kobj
), "%s", "mq");
461 kobject_uevent(&q
->mq_kobj
, KOBJ_ADD
);
463 queue_for_each_hw_ctx(q
, hctx
, i
) {
464 ret
= blk_mq_register_hctx(hctx
);
470 __blk_mq_unregister_dev(dev
, q
);
472 q
->mq_sysfs_init_done
= true;
474 blk_mq_enable_hotplug();
478 EXPORT_SYMBOL_GPL(blk_mq_register_dev
);
480 void blk_mq_sysfs_unregister(struct request_queue
*q
)
482 struct blk_mq_hw_ctx
*hctx
;
485 if (!q
->mq_sysfs_init_done
)
488 queue_for_each_hw_ctx(q
, hctx
, i
)
489 blk_mq_unregister_hctx(hctx
);
492 int blk_mq_sysfs_register(struct request_queue
*q
)
494 struct blk_mq_hw_ctx
*hctx
;
497 if (!q
->mq_sysfs_init_done
)
500 queue_for_each_hw_ctx(q
, hctx
, i
) {
501 ret
= blk_mq_register_hctx(hctx
);