1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/backing-dev.h>
5 #include <linux/blkdev.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/workqueue.h>
10 #include <linux/smp.h>
12 #include <linux/blk-mq.h>
14 #include "blk-mq-tag.h"
16 static void blk_mq_sysfs_release(struct kobject
*kobj
)
20 struct blk_mq_ctx_sysfs_entry
{
21 struct attribute attr
;
22 ssize_t (*show
)(struct blk_mq_ctx
*, char *);
23 ssize_t (*store
)(struct blk_mq_ctx
*, const char *, size_t);
26 struct blk_mq_hw_ctx_sysfs_entry
{
27 struct attribute attr
;
28 ssize_t (*show
)(struct blk_mq_hw_ctx
*, char *);
29 ssize_t (*store
)(struct blk_mq_hw_ctx
*, const char *, size_t);
32 static ssize_t
blk_mq_sysfs_show(struct kobject
*kobj
, struct attribute
*attr
,
35 struct blk_mq_ctx_sysfs_entry
*entry
;
36 struct blk_mq_ctx
*ctx
;
37 struct request_queue
*q
;
40 entry
= container_of(attr
, struct blk_mq_ctx_sysfs_entry
, attr
);
41 ctx
= container_of(kobj
, struct blk_mq_ctx
, kobj
);
48 mutex_lock(&q
->sysfs_lock
);
49 if (!blk_queue_dying(q
))
50 res
= entry
->show(ctx
, page
);
51 mutex_unlock(&q
->sysfs_lock
);
55 static ssize_t
blk_mq_sysfs_store(struct kobject
*kobj
, struct attribute
*attr
,
56 const char *page
, size_t length
)
58 struct blk_mq_ctx_sysfs_entry
*entry
;
59 struct blk_mq_ctx
*ctx
;
60 struct request_queue
*q
;
63 entry
= container_of(attr
, struct blk_mq_ctx_sysfs_entry
, attr
);
64 ctx
= container_of(kobj
, struct blk_mq_ctx
, kobj
);
71 mutex_lock(&q
->sysfs_lock
);
72 if (!blk_queue_dying(q
))
73 res
= entry
->store(ctx
, page
, length
);
74 mutex_unlock(&q
->sysfs_lock
);
78 static ssize_t
blk_mq_hw_sysfs_show(struct kobject
*kobj
,
79 struct attribute
*attr
, char *page
)
81 struct blk_mq_hw_ctx_sysfs_entry
*entry
;
82 struct blk_mq_hw_ctx
*hctx
;
83 struct request_queue
*q
;
86 entry
= container_of(attr
, struct blk_mq_hw_ctx_sysfs_entry
, attr
);
87 hctx
= container_of(kobj
, struct blk_mq_hw_ctx
, kobj
);
94 mutex_lock(&q
->sysfs_lock
);
95 if (!blk_queue_dying(q
))
96 res
= entry
->show(hctx
, page
);
97 mutex_unlock(&q
->sysfs_lock
);
101 static ssize_t
blk_mq_hw_sysfs_store(struct kobject
*kobj
,
102 struct attribute
*attr
, const char *page
,
105 struct blk_mq_hw_ctx_sysfs_entry
*entry
;
106 struct blk_mq_hw_ctx
*hctx
;
107 struct request_queue
*q
;
110 entry
= container_of(attr
, struct blk_mq_hw_ctx_sysfs_entry
, attr
);
111 hctx
= container_of(kobj
, struct blk_mq_hw_ctx
, kobj
);
118 mutex_lock(&q
->sysfs_lock
);
119 if (!blk_queue_dying(q
))
120 res
= entry
->store(hctx
, page
, length
);
121 mutex_unlock(&q
->sysfs_lock
);
125 static ssize_t
blk_mq_sysfs_dispatched_show(struct blk_mq_ctx
*ctx
, char *page
)
127 return sprintf(page
, "%lu %lu\n", ctx
->rq_dispatched
[1],
128 ctx
->rq_dispatched
[0]);
131 static ssize_t
blk_mq_sysfs_merged_show(struct blk_mq_ctx
*ctx
, char *page
)
133 return sprintf(page
, "%lu\n", ctx
->rq_merged
);
136 static ssize_t
blk_mq_sysfs_completed_show(struct blk_mq_ctx
*ctx
, char *page
)
138 return sprintf(page
, "%lu %lu\n", ctx
->rq_completed
[1],
139 ctx
->rq_completed
[0]);
142 static ssize_t
sysfs_list_show(char *page
, struct list_head
*list
, char *msg
)
144 char *start_page
= page
;
147 page
+= sprintf(page
, "%s:\n", msg
);
149 list_for_each_entry(rq
, list
, queuelist
)
150 page
+= sprintf(page
, "\t%p\n", rq
);
152 return page
- start_page
;
155 static ssize_t
blk_mq_sysfs_rq_list_show(struct blk_mq_ctx
*ctx
, char *page
)
159 spin_lock(&ctx
->lock
);
160 ret
= sysfs_list_show(page
, &ctx
->rq_list
, "CTX pending");
161 spin_unlock(&ctx
->lock
);
166 static ssize_t
blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx
*hctx
,
169 return sprintf(page
, "%lu\n", hctx
->queued
);
172 static ssize_t
blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx
*hctx
, char *page
)
174 return sprintf(page
, "%lu\n", hctx
->run
);
177 static ssize_t
blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx
*hctx
,
180 char *start_page
= page
;
183 page
+= sprintf(page
, "%8u\t%lu\n", 0U, hctx
->dispatched
[0]);
185 for (i
= 1; i
< BLK_MQ_MAX_DISPATCH_ORDER
; i
++) {
186 unsigned long d
= 1U << (i
- 1);
188 page
+= sprintf(page
, "%8lu\t%lu\n", d
, hctx
->dispatched
[i
]);
191 return page
- start_page
;
194 static ssize_t
blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx
*hctx
,
199 spin_lock(&hctx
->lock
);
200 ret
= sysfs_list_show(page
, &hctx
->dispatch
, "HCTX pending");
201 spin_unlock(&hctx
->lock
);
206 static ssize_t
blk_mq_hw_sysfs_ipi_show(struct blk_mq_hw_ctx
*hctx
, char *page
)
210 spin_lock(&hctx
->lock
);
211 ret
= sprintf(page
, "%u\n", !!(hctx
->flags
& BLK_MQ_F_SHOULD_IPI
));
212 spin_unlock(&hctx
->lock
);
217 static ssize_t
blk_mq_hw_sysfs_ipi_store(struct blk_mq_hw_ctx
*hctx
,
218 const char *page
, size_t len
)
220 struct blk_mq_ctx
*ctx
;
224 if (kstrtoul(page
, 10, &ret
)) {
225 pr_err("blk-mq-sysfs: invalid input '%s'\n", page
);
229 spin_lock(&hctx
->lock
);
231 hctx
->flags
|= BLK_MQ_F_SHOULD_IPI
;
233 hctx
->flags
&= ~BLK_MQ_F_SHOULD_IPI
;
234 spin_unlock(&hctx
->lock
);
236 hctx_for_each_ctx(hctx
, ctx
, i
)
237 ctx
->ipi_redirect
= !!ret
;
242 static ssize_t
blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx
*hctx
, char *page
)
244 return blk_mq_tag_sysfs_show(hctx
->tags
, page
);
247 static ssize_t
blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx
*hctx
, char *page
)
249 unsigned int i
, queue_num
, first
= 1;
252 blk_mq_disable_hotplug();
254 for_each_online_cpu(i
) {
255 queue_num
= hctx
->queue
->mq_map
[i
];
256 if (queue_num
!= hctx
->queue_num
)
260 ret
+= sprintf(ret
+ page
, "%u", i
);
262 ret
+= sprintf(ret
+ page
, ", %u", i
);
267 blk_mq_enable_hotplug();
269 ret
+= sprintf(ret
+ page
, "\n");
273 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched
= {
274 .attr
= {.name
= "dispatched", .mode
= S_IRUGO
},
275 .show
= blk_mq_sysfs_dispatched_show
,
277 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged
= {
278 .attr
= {.name
= "merged", .mode
= S_IRUGO
},
279 .show
= blk_mq_sysfs_merged_show
,
281 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed
= {
282 .attr
= {.name
= "completed", .mode
= S_IRUGO
},
283 .show
= blk_mq_sysfs_completed_show
,
285 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_rq_list
= {
286 .attr
= {.name
= "rq_list", .mode
= S_IRUGO
},
287 .show
= blk_mq_sysfs_rq_list_show
,
290 static struct attribute
*default_ctx_attrs
[] = {
291 &blk_mq_sysfs_dispatched
.attr
,
292 &blk_mq_sysfs_merged
.attr
,
293 &blk_mq_sysfs_completed
.attr
,
294 &blk_mq_sysfs_rq_list
.attr
,
298 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued
= {
299 .attr
= {.name
= "queued", .mode
= S_IRUGO
},
300 .show
= blk_mq_hw_sysfs_queued_show
,
302 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run
= {
303 .attr
= {.name
= "run", .mode
= S_IRUGO
},
304 .show
= blk_mq_hw_sysfs_run_show
,
306 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched
= {
307 .attr
= {.name
= "dispatched", .mode
= S_IRUGO
},
308 .show
= blk_mq_hw_sysfs_dispatched_show
,
310 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending
= {
311 .attr
= {.name
= "pending", .mode
= S_IRUGO
},
312 .show
= blk_mq_hw_sysfs_rq_list_show
,
314 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_ipi
= {
315 .attr
= {.name
= "ipi_redirect", .mode
= S_IRUGO
| S_IWUSR
},
316 .show
= blk_mq_hw_sysfs_ipi_show
,
317 .store
= blk_mq_hw_sysfs_ipi_store
,
319 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags
= {
320 .attr
= {.name
= "tags", .mode
= S_IRUGO
},
321 .show
= blk_mq_hw_sysfs_tags_show
,
323 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus
= {
324 .attr
= {.name
= "cpu_list", .mode
= S_IRUGO
},
325 .show
= blk_mq_hw_sysfs_cpus_show
,
328 static struct attribute
*default_hw_ctx_attrs
[] = {
329 &blk_mq_hw_sysfs_queued
.attr
,
330 &blk_mq_hw_sysfs_run
.attr
,
331 &blk_mq_hw_sysfs_dispatched
.attr
,
332 &blk_mq_hw_sysfs_pending
.attr
,
333 &blk_mq_hw_sysfs_ipi
.attr
,
334 &blk_mq_hw_sysfs_tags
.attr
,
335 &blk_mq_hw_sysfs_cpus
.attr
,
339 static const struct sysfs_ops blk_mq_sysfs_ops
= {
340 .show
= blk_mq_sysfs_show
,
341 .store
= blk_mq_sysfs_store
,
344 static const struct sysfs_ops blk_mq_hw_sysfs_ops
= {
345 .show
= blk_mq_hw_sysfs_show
,
346 .store
= blk_mq_hw_sysfs_store
,
349 static struct kobj_type blk_mq_ktype
= {
350 .sysfs_ops
= &blk_mq_sysfs_ops
,
351 .release
= blk_mq_sysfs_release
,
354 static struct kobj_type blk_mq_ctx_ktype
= {
355 .sysfs_ops
= &blk_mq_sysfs_ops
,
356 .default_attrs
= default_ctx_attrs
,
357 .release
= blk_mq_sysfs_release
,
360 static struct kobj_type blk_mq_hw_ktype
= {
361 .sysfs_ops
= &blk_mq_hw_sysfs_ops
,
362 .default_attrs
= default_hw_ctx_attrs
,
363 .release
= blk_mq_sysfs_release
,
366 void blk_mq_unregister_disk(struct gendisk
*disk
)
368 struct request_queue
*q
= disk
->queue
;
369 struct blk_mq_hw_ctx
*hctx
;
370 struct blk_mq_ctx
*ctx
;
373 queue_for_each_hw_ctx(q
, hctx
, i
) {
374 hctx_for_each_ctx(hctx
, ctx
, j
) {
375 kobject_del(&ctx
->kobj
);
376 kobject_put(&ctx
->kobj
);
378 kobject_del(&hctx
->kobj
);
379 kobject_put(&hctx
->kobj
);
382 kobject_uevent(&q
->mq_kobj
, KOBJ_REMOVE
);
383 kobject_del(&q
->mq_kobj
);
384 kobject_put(&q
->mq_kobj
);
386 kobject_put(&disk_to_dev(disk
)->kobj
);
389 int blk_mq_register_disk(struct gendisk
*disk
)
391 struct device
*dev
= disk_to_dev(disk
);
392 struct request_queue
*q
= disk
->queue
;
393 struct blk_mq_hw_ctx
*hctx
;
394 struct blk_mq_ctx
*ctx
;
397 kobject_init(&q
->mq_kobj
, &blk_mq_ktype
);
399 ret
= kobject_add(&q
->mq_kobj
, kobject_get(&dev
->kobj
), "%s", "mq");
403 kobject_uevent(&q
->mq_kobj
, KOBJ_ADD
);
405 queue_for_each_hw_ctx(q
, hctx
, i
) {
406 kobject_init(&hctx
->kobj
, &blk_mq_hw_ktype
);
407 ret
= kobject_add(&hctx
->kobj
, &q
->mq_kobj
, "%u", i
);
414 hctx_for_each_ctx(hctx
, ctx
, j
) {
415 kobject_init(&ctx
->kobj
, &blk_mq_ctx_ktype
);
416 ret
= kobject_add(&ctx
->kobj
, &hctx
->kobj
, "cpu%u", ctx
->cpu
);
423 blk_mq_unregister_disk(disk
);