1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/backing-dev.h>
5 #include <linux/blkdev.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/workqueue.h>
10 #include <linux/smp.h>
12 #include <linux/blk-mq.h>
14 #include "blk-mq-tag.h"
16 static void blk_mq_sysfs_release(struct kobject
*kobj
)
20 struct blk_mq_ctx_sysfs_entry
{
21 struct attribute attr
;
22 ssize_t (*show
)(struct blk_mq_ctx
*, char *);
23 ssize_t (*store
)(struct blk_mq_ctx
*, const char *, size_t);
26 struct blk_mq_hw_ctx_sysfs_entry
{
27 struct attribute attr
;
28 ssize_t (*show
)(struct blk_mq_hw_ctx
*, char *);
29 ssize_t (*store
)(struct blk_mq_hw_ctx
*, const char *, size_t);
32 static ssize_t
blk_mq_sysfs_show(struct kobject
*kobj
, struct attribute
*attr
,
35 struct blk_mq_ctx_sysfs_entry
*entry
;
36 struct blk_mq_ctx
*ctx
;
37 struct request_queue
*q
;
40 entry
= container_of(attr
, struct blk_mq_ctx_sysfs_entry
, attr
);
41 ctx
= container_of(kobj
, struct blk_mq_ctx
, kobj
);
48 mutex_lock(&q
->sysfs_lock
);
49 if (!blk_queue_dying(q
))
50 res
= entry
->show(ctx
, page
);
51 mutex_unlock(&q
->sysfs_lock
);
55 static ssize_t
blk_mq_sysfs_store(struct kobject
*kobj
, struct attribute
*attr
,
56 const char *page
, size_t length
)
58 struct blk_mq_ctx_sysfs_entry
*entry
;
59 struct blk_mq_ctx
*ctx
;
60 struct request_queue
*q
;
63 entry
= container_of(attr
, struct blk_mq_ctx_sysfs_entry
, attr
);
64 ctx
= container_of(kobj
, struct blk_mq_ctx
, kobj
);
71 mutex_lock(&q
->sysfs_lock
);
72 if (!blk_queue_dying(q
))
73 res
= entry
->store(ctx
, page
, length
);
74 mutex_unlock(&q
->sysfs_lock
);
78 static ssize_t
blk_mq_hw_sysfs_show(struct kobject
*kobj
,
79 struct attribute
*attr
, char *page
)
81 struct blk_mq_hw_ctx_sysfs_entry
*entry
;
82 struct blk_mq_hw_ctx
*hctx
;
83 struct request_queue
*q
;
86 entry
= container_of(attr
, struct blk_mq_hw_ctx_sysfs_entry
, attr
);
87 hctx
= container_of(kobj
, struct blk_mq_hw_ctx
, kobj
);
94 mutex_lock(&q
->sysfs_lock
);
95 if (!blk_queue_dying(q
))
96 res
= entry
->show(hctx
, page
);
97 mutex_unlock(&q
->sysfs_lock
);
101 static ssize_t
blk_mq_hw_sysfs_store(struct kobject
*kobj
,
102 struct attribute
*attr
, const char *page
,
105 struct blk_mq_hw_ctx_sysfs_entry
*entry
;
106 struct blk_mq_hw_ctx
*hctx
;
107 struct request_queue
*q
;
110 entry
= container_of(attr
, struct blk_mq_hw_ctx_sysfs_entry
, attr
);
111 hctx
= container_of(kobj
, struct blk_mq_hw_ctx
, kobj
);
118 mutex_lock(&q
->sysfs_lock
);
119 if (!blk_queue_dying(q
))
120 res
= entry
->store(hctx
, page
, length
);
121 mutex_unlock(&q
->sysfs_lock
);
125 static ssize_t
blk_mq_sysfs_dispatched_show(struct blk_mq_ctx
*ctx
, char *page
)
127 return sprintf(page
, "%lu %lu\n", ctx
->rq_dispatched
[1],
128 ctx
->rq_dispatched
[0]);
131 static ssize_t
blk_mq_sysfs_merged_show(struct blk_mq_ctx
*ctx
, char *page
)
133 return sprintf(page
, "%lu\n", ctx
->rq_merged
);
136 static ssize_t
blk_mq_sysfs_completed_show(struct blk_mq_ctx
*ctx
, char *page
)
138 return sprintf(page
, "%lu %lu\n", ctx
->rq_completed
[1],
139 ctx
->rq_completed
[0]);
142 static ssize_t
sysfs_list_show(char *page
, struct list_head
*list
, char *msg
)
145 int len
= snprintf(page
, PAGE_SIZE
- 1, "%s:\n", msg
);
147 list_for_each_entry(rq
, list
, queuelist
) {
148 const int rq_len
= 2 * sizeof(rq
) + 2;
150 /* if the output will be truncated */
151 if (PAGE_SIZE
- 1 < len
+ rq_len
) {
152 /* backspacing if it can't hold '\t...\n' */
153 if (PAGE_SIZE
- 1 < len
+ 5)
155 len
+= snprintf(page
+ len
, PAGE_SIZE
- 1 - len
,
159 len
+= snprintf(page
+ len
, PAGE_SIZE
- 1 - len
,
166 static ssize_t
blk_mq_sysfs_rq_list_show(struct blk_mq_ctx
*ctx
, char *page
)
170 spin_lock(&ctx
->lock
);
171 ret
= sysfs_list_show(page
, &ctx
->rq_list
, "CTX pending");
172 spin_unlock(&ctx
->lock
);
177 static ssize_t
blk_mq_hw_sysfs_poll_show(struct blk_mq_hw_ctx
*hctx
, char *page
)
179 return sprintf(page
, "considered=%lu, invoked=%lu, success=%lu\n",
180 hctx
->poll_considered
, hctx
->poll_invoked
,
184 static ssize_t
blk_mq_hw_sysfs_poll_store(struct blk_mq_hw_ctx
*hctx
,
185 const char *page
, size_t size
)
187 hctx
->poll_considered
= hctx
->poll_invoked
= hctx
->poll_success
= 0;
192 static ssize_t
blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx
*hctx
,
195 return sprintf(page
, "%lu\n", hctx
->queued
);
198 static ssize_t
blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx
*hctx
, char *page
)
200 return sprintf(page
, "%lu\n", hctx
->run
);
203 static ssize_t
blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx
*hctx
,
206 char *start_page
= page
;
209 page
+= sprintf(page
, "%8u\t%lu\n", 0U, hctx
->dispatched
[0]);
211 for (i
= 1; i
< BLK_MQ_MAX_DISPATCH_ORDER
- 1; i
++) {
212 unsigned int d
= 1U << (i
- 1);
214 page
+= sprintf(page
, "%8u\t%lu\n", d
, hctx
->dispatched
[i
]);
217 page
+= sprintf(page
, "%8u+\t%lu\n", 1U << (i
- 1),
218 hctx
->dispatched
[i
]);
219 return page
- start_page
;
222 static ssize_t
blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx
*hctx
,
227 spin_lock(&hctx
->lock
);
228 ret
= sysfs_list_show(page
, &hctx
->dispatch
, "HCTX pending");
229 spin_unlock(&hctx
->lock
);
234 static ssize_t
blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx
*hctx
, char *page
)
236 return blk_mq_tag_sysfs_show(hctx
->tags
, page
);
239 static ssize_t
blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx
*hctx
, char *page
)
241 return sprintf(page
, "%u\n", atomic_read(&hctx
->nr_active
));
244 static ssize_t
blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx
*hctx
, char *page
)
246 unsigned int i
, first
= 1;
249 for_each_cpu(i
, hctx
->cpumask
) {
251 ret
+= sprintf(ret
+ page
, "%u", i
);
253 ret
+= sprintf(ret
+ page
, ", %u", i
);
258 ret
+= sprintf(ret
+ page
, "\n");
262 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched
= {
263 .attr
= {.name
= "dispatched", .mode
= S_IRUGO
},
264 .show
= blk_mq_sysfs_dispatched_show
,
266 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged
= {
267 .attr
= {.name
= "merged", .mode
= S_IRUGO
},
268 .show
= blk_mq_sysfs_merged_show
,
270 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed
= {
271 .attr
= {.name
= "completed", .mode
= S_IRUGO
},
272 .show
= blk_mq_sysfs_completed_show
,
274 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_rq_list
= {
275 .attr
= {.name
= "rq_list", .mode
= S_IRUGO
},
276 .show
= blk_mq_sysfs_rq_list_show
,
279 static struct attribute
*default_ctx_attrs
[] = {
280 &blk_mq_sysfs_dispatched
.attr
,
281 &blk_mq_sysfs_merged
.attr
,
282 &blk_mq_sysfs_completed
.attr
,
283 &blk_mq_sysfs_rq_list
.attr
,
287 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued
= {
288 .attr
= {.name
= "queued", .mode
= S_IRUGO
},
289 .show
= blk_mq_hw_sysfs_queued_show
,
291 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run
= {
292 .attr
= {.name
= "run", .mode
= S_IRUGO
},
293 .show
= blk_mq_hw_sysfs_run_show
,
295 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched
= {
296 .attr
= {.name
= "dispatched", .mode
= S_IRUGO
},
297 .show
= blk_mq_hw_sysfs_dispatched_show
,
299 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_active
= {
300 .attr
= {.name
= "active", .mode
= S_IRUGO
},
301 .show
= blk_mq_hw_sysfs_active_show
,
303 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending
= {
304 .attr
= {.name
= "pending", .mode
= S_IRUGO
},
305 .show
= blk_mq_hw_sysfs_rq_list_show
,
307 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags
= {
308 .attr
= {.name
= "tags", .mode
= S_IRUGO
},
309 .show
= blk_mq_hw_sysfs_tags_show
,
311 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus
= {
312 .attr
= {.name
= "cpu_list", .mode
= S_IRUGO
},
313 .show
= blk_mq_hw_sysfs_cpus_show
,
315 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_poll
= {
316 .attr
= {.name
= "io_poll", .mode
= S_IWUSR
| S_IRUGO
},
317 .show
= blk_mq_hw_sysfs_poll_show
,
318 .store
= blk_mq_hw_sysfs_poll_store
,
321 static struct attribute
*default_hw_ctx_attrs
[] = {
322 &blk_mq_hw_sysfs_queued
.attr
,
323 &blk_mq_hw_sysfs_run
.attr
,
324 &blk_mq_hw_sysfs_dispatched
.attr
,
325 &blk_mq_hw_sysfs_pending
.attr
,
326 &blk_mq_hw_sysfs_tags
.attr
,
327 &blk_mq_hw_sysfs_cpus
.attr
,
328 &blk_mq_hw_sysfs_active
.attr
,
329 &blk_mq_hw_sysfs_poll
.attr
,
333 static const struct sysfs_ops blk_mq_sysfs_ops
= {
334 .show
= blk_mq_sysfs_show
,
335 .store
= blk_mq_sysfs_store
,
338 static const struct sysfs_ops blk_mq_hw_sysfs_ops
= {
339 .show
= blk_mq_hw_sysfs_show
,
340 .store
= blk_mq_hw_sysfs_store
,
343 static struct kobj_type blk_mq_ktype
= {
344 .sysfs_ops
= &blk_mq_sysfs_ops
,
345 .release
= blk_mq_sysfs_release
,
348 static struct kobj_type blk_mq_ctx_ktype
= {
349 .sysfs_ops
= &blk_mq_sysfs_ops
,
350 .default_attrs
= default_ctx_attrs
,
351 .release
= blk_mq_sysfs_release
,
354 static struct kobj_type blk_mq_hw_ktype
= {
355 .sysfs_ops
= &blk_mq_hw_sysfs_ops
,
356 .default_attrs
= default_hw_ctx_attrs
,
357 .release
= blk_mq_sysfs_release
,
360 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx
*hctx
)
362 struct blk_mq_ctx
*ctx
;
368 hctx_for_each_ctx(hctx
, ctx
, i
)
369 kobject_del(&ctx
->kobj
);
371 kobject_del(&hctx
->kobj
);
374 static int blk_mq_register_hctx(struct blk_mq_hw_ctx
*hctx
)
376 struct request_queue
*q
= hctx
->queue
;
377 struct blk_mq_ctx
*ctx
;
383 ret
= kobject_add(&hctx
->kobj
, &q
->mq_kobj
, "%u", hctx
->queue_num
);
387 hctx_for_each_ctx(hctx
, ctx
, i
) {
388 ret
= kobject_add(&ctx
->kobj
, &hctx
->kobj
, "cpu%u", ctx
->cpu
);
396 static void __blk_mq_unregister_dev(struct device
*dev
, struct request_queue
*q
)
398 struct blk_mq_hw_ctx
*hctx
;
399 struct blk_mq_ctx
*ctx
;
402 queue_for_each_hw_ctx(q
, hctx
, i
) {
403 blk_mq_unregister_hctx(hctx
);
405 hctx_for_each_ctx(hctx
, ctx
, j
)
406 kobject_put(&ctx
->kobj
);
408 kobject_put(&hctx
->kobj
);
411 kobject_uevent(&q
->mq_kobj
, KOBJ_REMOVE
);
412 kobject_del(&q
->mq_kobj
);
413 kobject_put(&q
->mq_kobj
);
415 kobject_put(&dev
->kobj
);
417 q
->mq_sysfs_init_done
= false;
420 void blk_mq_unregister_dev(struct device
*dev
, struct request_queue
*q
)
422 blk_mq_disable_hotplug();
423 __blk_mq_unregister_dev(dev
, q
);
424 blk_mq_enable_hotplug();
427 void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx
*hctx
)
429 kobject_init(&hctx
->kobj
, &blk_mq_hw_ktype
);
432 static void blk_mq_sysfs_init(struct request_queue
*q
)
434 struct blk_mq_ctx
*ctx
;
437 kobject_init(&q
->mq_kobj
, &blk_mq_ktype
);
439 for_each_possible_cpu(cpu
) {
440 ctx
= per_cpu_ptr(q
->queue_ctx
, cpu
);
441 kobject_init(&ctx
->kobj
, &blk_mq_ctx_ktype
);
445 int blk_mq_register_dev(struct device
*dev
, struct request_queue
*q
)
447 struct blk_mq_hw_ctx
*hctx
;
450 blk_mq_disable_hotplug();
452 blk_mq_sysfs_init(q
);
454 ret
= kobject_add(&q
->mq_kobj
, kobject_get(&dev
->kobj
), "%s", "mq");
458 kobject_uevent(&q
->mq_kobj
, KOBJ_ADD
);
460 queue_for_each_hw_ctx(q
, hctx
, i
) {
461 ret
= blk_mq_register_hctx(hctx
);
467 __blk_mq_unregister_dev(dev
, q
);
469 q
->mq_sysfs_init_done
= true;
471 blk_mq_enable_hotplug();
475 EXPORT_SYMBOL_GPL(blk_mq_register_dev
);
477 void blk_mq_sysfs_unregister(struct request_queue
*q
)
479 struct blk_mq_hw_ctx
*hctx
;
482 if (!q
->mq_sysfs_init_done
)
485 queue_for_each_hw_ctx(q
, hctx
, i
)
486 blk_mq_unregister_hctx(hctx
);
489 int blk_mq_sysfs_register(struct request_queue
*q
)
491 struct blk_mq_hw_ctx
*hctx
;
494 if (!q
->mq_sysfs_init_done
)
497 queue_for_each_hw_ctx(q
, hctx
, i
) {
498 ret
= blk_mq_register_hctx(hctx
);