2 * Copyright (C) 2017 Facebook
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <https://www.gnu.org/licenses/>.
17 #include <linux/kernel.h>
18 #include <linux/blkdev.h>
19 #include <linux/debugfs.h>
21 #include <linux/blk-mq.h>
24 #include "blk-mq-debugfs.h"
25 #include "blk-mq-tag.h"
27 static int blk_flags_show(struct seq_file
*m
, const unsigned long flags
,
28 const char *const *flag_name
, int flag_name_count
)
33 for (i
= 0; i
< sizeof(flags
) * BITS_PER_BYTE
; i
++) {
34 if (!(flags
& BIT(i
)))
39 if (i
< flag_name_count
&& flag_name
[i
])
40 seq_puts(m
, flag_name
[i
]);
42 seq_printf(m
, "%d", i
);
47 #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
48 static const char *const blk_queue_flag_name
[] = {
49 QUEUE_FLAG_NAME(QUEUED
),
50 QUEUE_FLAG_NAME(STOPPED
),
51 QUEUE_FLAG_NAME(SYNCFULL
),
52 QUEUE_FLAG_NAME(ASYNCFULL
),
53 QUEUE_FLAG_NAME(DYING
),
54 QUEUE_FLAG_NAME(BYPASS
),
55 QUEUE_FLAG_NAME(BIDI
),
56 QUEUE_FLAG_NAME(NOMERGES
),
57 QUEUE_FLAG_NAME(SAME_COMP
),
58 QUEUE_FLAG_NAME(FAIL_IO
),
59 QUEUE_FLAG_NAME(STACKABLE
),
60 QUEUE_FLAG_NAME(NONROT
),
61 QUEUE_FLAG_NAME(IO_STAT
),
62 QUEUE_FLAG_NAME(DISCARD
),
63 QUEUE_FLAG_NAME(NOXMERGES
),
64 QUEUE_FLAG_NAME(ADD_RANDOM
),
65 QUEUE_FLAG_NAME(SECERASE
),
66 QUEUE_FLAG_NAME(SAME_FORCE
),
67 QUEUE_FLAG_NAME(DEAD
),
68 QUEUE_FLAG_NAME(INIT_DONE
),
69 QUEUE_FLAG_NAME(NO_SG_MERGE
),
70 QUEUE_FLAG_NAME(POLL
),
73 QUEUE_FLAG_NAME(FLUSH_NQ
),
75 QUEUE_FLAG_NAME(STATS
),
76 QUEUE_FLAG_NAME(POLL_STATS
),
77 QUEUE_FLAG_NAME(REGISTERED
),
78 QUEUE_FLAG_NAME(SCSI_PASSTHROUGH
),
79 QUEUE_FLAG_NAME(QUIESCED
),
81 #undef QUEUE_FLAG_NAME
83 static int queue_state_show(void *data
, struct seq_file
*m
)
85 struct request_queue
*q
= data
;
87 blk_flags_show(m
, q
->queue_flags
, blk_queue_flag_name
,
88 ARRAY_SIZE(blk_queue_flag_name
));
93 static ssize_t
queue_state_write(void *data
, const char __user
*buf
,
94 size_t count
, loff_t
*ppos
)
96 struct request_queue
*q
= data
;
97 char opbuf
[16] = { }, *op
;
100 * The "state" attribute is removed after blk_cleanup_queue() has called
101 * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid
102 * triggering a use-after-free.
104 if (blk_queue_dead(q
))
107 if (count
>= sizeof(opbuf
)) {
108 pr_err("%s: operation too long\n", __func__
);
112 if (copy_from_user(opbuf
, buf
, count
))
114 op
= strstrip(opbuf
);
115 if (strcmp(op
, "run") == 0) {
116 blk_mq_run_hw_queues(q
, true);
117 } else if (strcmp(op
, "start") == 0) {
118 blk_mq_start_stopped_hw_queues(q
, true);
119 } else if (strcmp(op
, "kick") == 0) {
120 blk_mq_kick_requeue_list(q
);
122 pr_err("%s: unsupported operation '%s'\n", __func__
, op
);
124 pr_err("%s: use 'run', 'start' or 'kick'\n", __func__
);
130 static void print_stat(struct seq_file
*m
, struct blk_rq_stat
*stat
)
132 if (stat
->nr_samples
) {
133 seq_printf(m
, "samples=%d, mean=%lld, min=%llu, max=%llu",
134 stat
->nr_samples
, stat
->mean
, stat
->min
, stat
->max
);
136 seq_puts(m
, "samples=0");
140 static int queue_write_hint_show(void *data
, struct seq_file
*m
)
142 struct request_queue
*q
= data
;
145 for (i
= 0; i
< BLK_MAX_WRITE_HINTS
; i
++)
146 seq_printf(m
, "hint%d: %llu\n", i
, q
->write_hints
[i
]);
151 static ssize_t
queue_write_hint_store(void *data
, const char __user
*buf
,
152 size_t count
, loff_t
*ppos
)
154 struct request_queue
*q
= data
;
157 for (i
= 0; i
< BLK_MAX_WRITE_HINTS
; i
++)
158 q
->write_hints
[i
] = 0;
163 static int queue_poll_stat_show(void *data
, struct seq_file
*m
)
165 struct request_queue
*q
= data
;
168 for (bucket
= 0; bucket
< BLK_MQ_POLL_STATS_BKTS
/2; bucket
++) {
169 seq_printf(m
, "read (%d Bytes): ", 1 << (9+bucket
));
170 print_stat(m
, &q
->poll_stat
[2*bucket
]);
173 seq_printf(m
, "write (%d Bytes): ", 1 << (9+bucket
));
174 print_stat(m
, &q
->poll_stat
[2*bucket
+1]);
180 #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
181 static const char *const hctx_state_name
[] = {
182 HCTX_STATE_NAME(STOPPED
),
183 HCTX_STATE_NAME(TAG_ACTIVE
),
184 HCTX_STATE_NAME(SCHED_RESTART
),
185 HCTX_STATE_NAME(TAG_WAITING
),
186 HCTX_STATE_NAME(START_ON_RUN
),
188 #undef HCTX_STATE_NAME
190 static int hctx_state_show(void *data
, struct seq_file
*m
)
192 struct blk_mq_hw_ctx
*hctx
= data
;
194 blk_flags_show(m
, hctx
->state
, hctx_state_name
,
195 ARRAY_SIZE(hctx_state_name
));
200 #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
201 static const char *const alloc_policy_name
[] = {
202 BLK_TAG_ALLOC_NAME(FIFO
),
203 BLK_TAG_ALLOC_NAME(RR
),
205 #undef BLK_TAG_ALLOC_NAME
207 #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
208 static const char *const hctx_flag_name
[] = {
209 HCTX_FLAG_NAME(SHOULD_MERGE
),
210 HCTX_FLAG_NAME(TAG_SHARED
),
211 HCTX_FLAG_NAME(SG_MERGE
),
212 HCTX_FLAG_NAME(BLOCKING
),
213 HCTX_FLAG_NAME(NO_SCHED
),
215 #undef HCTX_FLAG_NAME
217 static int hctx_flags_show(void *data
, struct seq_file
*m
)
219 struct blk_mq_hw_ctx
*hctx
= data
;
220 const int alloc_policy
= BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx
->flags
);
222 seq_puts(m
, "alloc_policy=");
223 if (alloc_policy
< ARRAY_SIZE(alloc_policy_name
) &&
224 alloc_policy_name
[alloc_policy
])
225 seq_puts(m
, alloc_policy_name
[alloc_policy
]);
227 seq_printf(m
, "%d", alloc_policy
);
230 hctx
->flags
^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy
),
231 hctx_flag_name
, ARRAY_SIZE(hctx_flag_name
));
236 #define REQ_OP_NAME(name) [REQ_OP_##name] = #name
237 static const char *const op_name
[] = {
241 REQ_OP_NAME(DISCARD
),
242 REQ_OP_NAME(ZONE_REPORT
),
243 REQ_OP_NAME(SECURE_ERASE
),
244 REQ_OP_NAME(ZONE_RESET
),
245 REQ_OP_NAME(WRITE_SAME
),
246 REQ_OP_NAME(WRITE_ZEROES
),
247 REQ_OP_NAME(SCSI_IN
),
248 REQ_OP_NAME(SCSI_OUT
),
250 REQ_OP_NAME(DRV_OUT
),
254 #define CMD_FLAG_NAME(name) [__REQ_##name] = #name
255 static const char *const cmd_flag_name
[] = {
256 CMD_FLAG_NAME(FAILFAST_DEV
),
257 CMD_FLAG_NAME(FAILFAST_TRANSPORT
),
258 CMD_FLAG_NAME(FAILFAST_DRIVER
),
262 CMD_FLAG_NAME(NOMERGE
),
264 CMD_FLAG_NAME(INTEGRITY
),
266 CMD_FLAG_NAME(PREFLUSH
),
267 CMD_FLAG_NAME(RAHEAD
),
268 CMD_FLAG_NAME(BACKGROUND
),
269 CMD_FLAG_NAME(NOUNMAP
),
270 CMD_FLAG_NAME(NOWAIT
),
274 #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
275 static const char *const rqf_name
[] = {
279 RQF_NAME(SOFTBARRIER
),
281 RQF_NAME(MIXED_MERGE
),
282 RQF_NAME(MQ_INFLIGHT
),
294 RQF_NAME(SPECIAL_PAYLOAD
),
298 #define RQAF_NAME(name) [REQ_ATOM_##name] = #name
299 static const char *const rqaf_name
[] = {
302 RQAF_NAME(POLL_SLEPT
),
306 int __blk_mq_debugfs_rq_show(struct seq_file
*m
, struct request
*rq
)
308 const struct blk_mq_ops
*const mq_ops
= rq
->q
->mq_ops
;
309 const unsigned int op
= rq
->cmd_flags
& REQ_OP_MASK
;
311 seq_printf(m
, "%p {.op=", rq
);
312 if (op
< ARRAY_SIZE(op_name
) && op_name
[op
])
313 seq_printf(m
, "%s", op_name
[op
]);
315 seq_printf(m
, "%d", op
);
316 seq_puts(m
, ", .cmd_flags=");
317 blk_flags_show(m
, rq
->cmd_flags
& ~REQ_OP_MASK
, cmd_flag_name
,
318 ARRAY_SIZE(cmd_flag_name
));
319 seq_puts(m
, ", .rq_flags=");
320 blk_flags_show(m
, (__force
unsigned int)rq
->rq_flags
, rqf_name
,
321 ARRAY_SIZE(rqf_name
));
322 seq_puts(m
, ", .atomic_flags=");
323 blk_flags_show(m
, rq
->atomic_flags
, rqaf_name
, ARRAY_SIZE(rqaf_name
));
324 seq_printf(m
, ", .tag=%d, .internal_tag=%d", rq
->tag
,
327 mq_ops
->show_rq(m
, rq
);
331 EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show
);
333 int blk_mq_debugfs_rq_show(struct seq_file
*m
, void *v
)
335 return __blk_mq_debugfs_rq_show(m
, list_entry_rq(v
));
337 EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show
);
339 static void *queue_requeue_list_start(struct seq_file
*m
, loff_t
*pos
)
340 __acquires(&q
->requeue_lock
)
342 struct request_queue
*q
= m
->private;
344 spin_lock_irq(&q
->requeue_lock
);
345 return seq_list_start(&q
->requeue_list
, *pos
);
348 static void *queue_requeue_list_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
350 struct request_queue
*q
= m
->private;
352 return seq_list_next(v
, &q
->requeue_list
, pos
);
355 static void queue_requeue_list_stop(struct seq_file
*m
, void *v
)
356 __releases(&q
->requeue_lock
)
358 struct request_queue
*q
= m
->private;
360 spin_unlock_irq(&q
->requeue_lock
);
363 static const struct seq_operations queue_requeue_list_seq_ops
= {
364 .start
= queue_requeue_list_start
,
365 .next
= queue_requeue_list_next
,
366 .stop
= queue_requeue_list_stop
,
367 .show
= blk_mq_debugfs_rq_show
,
370 static void *hctx_dispatch_start(struct seq_file
*m
, loff_t
*pos
)
371 __acquires(&hctx
->lock
)
373 struct blk_mq_hw_ctx
*hctx
= m
->private;
375 spin_lock(&hctx
->lock
);
376 return seq_list_start(&hctx
->dispatch
, *pos
);
379 static void *hctx_dispatch_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
381 struct blk_mq_hw_ctx
*hctx
= m
->private;
383 return seq_list_next(v
, &hctx
->dispatch
, pos
);
386 static void hctx_dispatch_stop(struct seq_file
*m
, void *v
)
387 __releases(&hctx
->lock
)
389 struct blk_mq_hw_ctx
*hctx
= m
->private;
391 spin_unlock(&hctx
->lock
);
394 static const struct seq_operations hctx_dispatch_seq_ops
= {
395 .start
= hctx_dispatch_start
,
396 .next
= hctx_dispatch_next
,
397 .stop
= hctx_dispatch_stop
,
398 .show
= blk_mq_debugfs_rq_show
,
401 struct show_busy_params
{
403 struct blk_mq_hw_ctx
*hctx
;
407 * Note: the state of a request may change while this function is in progress,
408 * e.g. due to a concurrent blk_mq_finish_request() call.
410 static void hctx_show_busy_rq(struct request
*rq
, void *data
, bool reserved
)
412 const struct show_busy_params
*params
= data
;
414 if (blk_mq_map_queue(rq
->q
, rq
->mq_ctx
->cpu
) == params
->hctx
&&
415 test_bit(REQ_ATOM_STARTED
, &rq
->atomic_flags
))
416 __blk_mq_debugfs_rq_show(params
->m
,
417 list_entry_rq(&rq
->queuelist
));
420 static int hctx_busy_show(void *data
, struct seq_file
*m
)
422 struct blk_mq_hw_ctx
*hctx
= data
;
423 struct show_busy_params params
= { .m
= m
, .hctx
= hctx
};
425 blk_mq_tagset_busy_iter(hctx
->queue
->tag_set
, hctx_show_busy_rq
,
431 static int hctx_ctx_map_show(void *data
, struct seq_file
*m
)
433 struct blk_mq_hw_ctx
*hctx
= data
;
435 sbitmap_bitmap_show(&hctx
->ctx_map
, m
);
439 static void blk_mq_debugfs_tags_show(struct seq_file
*m
,
440 struct blk_mq_tags
*tags
)
442 seq_printf(m
, "nr_tags=%u\n", tags
->nr_tags
);
443 seq_printf(m
, "nr_reserved_tags=%u\n", tags
->nr_reserved_tags
);
444 seq_printf(m
, "active_queues=%d\n",
445 atomic_read(&tags
->active_queues
));
447 seq_puts(m
, "\nbitmap_tags:\n");
448 sbitmap_queue_show(&tags
->bitmap_tags
, m
);
450 if (tags
->nr_reserved_tags
) {
451 seq_puts(m
, "\nbreserved_tags:\n");
452 sbitmap_queue_show(&tags
->breserved_tags
, m
);
456 static int hctx_tags_show(void *data
, struct seq_file
*m
)
458 struct blk_mq_hw_ctx
*hctx
= data
;
459 struct request_queue
*q
= hctx
->queue
;
462 res
= mutex_lock_interruptible(&q
->sysfs_lock
);
466 blk_mq_debugfs_tags_show(m
, hctx
->tags
);
467 mutex_unlock(&q
->sysfs_lock
);
473 static int hctx_tags_bitmap_show(void *data
, struct seq_file
*m
)
475 struct blk_mq_hw_ctx
*hctx
= data
;
476 struct request_queue
*q
= hctx
->queue
;
479 res
= mutex_lock_interruptible(&q
->sysfs_lock
);
483 sbitmap_bitmap_show(&hctx
->tags
->bitmap_tags
.sb
, m
);
484 mutex_unlock(&q
->sysfs_lock
);
490 static int hctx_sched_tags_show(void *data
, struct seq_file
*m
)
492 struct blk_mq_hw_ctx
*hctx
= data
;
493 struct request_queue
*q
= hctx
->queue
;
496 res
= mutex_lock_interruptible(&q
->sysfs_lock
);
499 if (hctx
->sched_tags
)
500 blk_mq_debugfs_tags_show(m
, hctx
->sched_tags
);
501 mutex_unlock(&q
->sysfs_lock
);
507 static int hctx_sched_tags_bitmap_show(void *data
, struct seq_file
*m
)
509 struct blk_mq_hw_ctx
*hctx
= data
;
510 struct request_queue
*q
= hctx
->queue
;
513 res
= mutex_lock_interruptible(&q
->sysfs_lock
);
516 if (hctx
->sched_tags
)
517 sbitmap_bitmap_show(&hctx
->sched_tags
->bitmap_tags
.sb
, m
);
518 mutex_unlock(&q
->sysfs_lock
);
524 static int hctx_io_poll_show(void *data
, struct seq_file
*m
)
526 struct blk_mq_hw_ctx
*hctx
= data
;
528 seq_printf(m
, "considered=%lu\n", hctx
->poll_considered
);
529 seq_printf(m
, "invoked=%lu\n", hctx
->poll_invoked
);
530 seq_printf(m
, "success=%lu\n", hctx
->poll_success
);
534 static ssize_t
hctx_io_poll_write(void *data
, const char __user
*buf
,
535 size_t count
, loff_t
*ppos
)
537 struct blk_mq_hw_ctx
*hctx
= data
;
539 hctx
->poll_considered
= hctx
->poll_invoked
= hctx
->poll_success
= 0;
543 static int hctx_dispatched_show(void *data
, struct seq_file
*m
)
545 struct blk_mq_hw_ctx
*hctx
= data
;
548 seq_printf(m
, "%8u\t%lu\n", 0U, hctx
->dispatched
[0]);
550 for (i
= 1; i
< BLK_MQ_MAX_DISPATCH_ORDER
- 1; i
++) {
551 unsigned int d
= 1U << (i
- 1);
553 seq_printf(m
, "%8u\t%lu\n", d
, hctx
->dispatched
[i
]);
556 seq_printf(m
, "%8u+\t%lu\n", 1U << (i
- 1), hctx
->dispatched
[i
]);
560 static ssize_t
hctx_dispatched_write(void *data
, const char __user
*buf
,
561 size_t count
, loff_t
*ppos
)
563 struct blk_mq_hw_ctx
*hctx
= data
;
566 for (i
= 0; i
< BLK_MQ_MAX_DISPATCH_ORDER
; i
++)
567 hctx
->dispatched
[i
] = 0;
571 static int hctx_queued_show(void *data
, struct seq_file
*m
)
573 struct blk_mq_hw_ctx
*hctx
= data
;
575 seq_printf(m
, "%lu\n", hctx
->queued
);
579 static ssize_t
hctx_queued_write(void *data
, const char __user
*buf
,
580 size_t count
, loff_t
*ppos
)
582 struct blk_mq_hw_ctx
*hctx
= data
;
588 static int hctx_run_show(void *data
, struct seq_file
*m
)
590 struct blk_mq_hw_ctx
*hctx
= data
;
592 seq_printf(m
, "%lu\n", hctx
->run
);
596 static ssize_t
hctx_run_write(void *data
, const char __user
*buf
, size_t count
,
599 struct blk_mq_hw_ctx
*hctx
= data
;
605 static int hctx_active_show(void *data
, struct seq_file
*m
)
607 struct blk_mq_hw_ctx
*hctx
= data
;
609 seq_printf(m
, "%d\n", atomic_read(&hctx
->nr_active
));
613 static void *ctx_rq_list_start(struct seq_file
*m
, loff_t
*pos
)
614 __acquires(&ctx
->lock
)
616 struct blk_mq_ctx
*ctx
= m
->private;
618 spin_lock(&ctx
->lock
);
619 return seq_list_start(&ctx
->rq_list
, *pos
);
622 static void *ctx_rq_list_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
624 struct blk_mq_ctx
*ctx
= m
->private;
626 return seq_list_next(v
, &ctx
->rq_list
, pos
);
629 static void ctx_rq_list_stop(struct seq_file
*m
, void *v
)
630 __releases(&ctx
->lock
)
632 struct blk_mq_ctx
*ctx
= m
->private;
634 spin_unlock(&ctx
->lock
);
637 static const struct seq_operations ctx_rq_list_seq_ops
= {
638 .start
= ctx_rq_list_start
,
639 .next
= ctx_rq_list_next
,
640 .stop
= ctx_rq_list_stop
,
641 .show
= blk_mq_debugfs_rq_show
,
643 static int ctx_dispatched_show(void *data
, struct seq_file
*m
)
645 struct blk_mq_ctx
*ctx
= data
;
647 seq_printf(m
, "%lu %lu\n", ctx
->rq_dispatched
[1], ctx
->rq_dispatched
[0]);
651 static ssize_t
ctx_dispatched_write(void *data
, const char __user
*buf
,
652 size_t count
, loff_t
*ppos
)
654 struct blk_mq_ctx
*ctx
= data
;
656 ctx
->rq_dispatched
[0] = ctx
->rq_dispatched
[1] = 0;
660 static int ctx_merged_show(void *data
, struct seq_file
*m
)
662 struct blk_mq_ctx
*ctx
= data
;
664 seq_printf(m
, "%lu\n", ctx
->rq_merged
);
668 static ssize_t
ctx_merged_write(void *data
, const char __user
*buf
,
669 size_t count
, loff_t
*ppos
)
671 struct blk_mq_ctx
*ctx
= data
;
677 static int ctx_completed_show(void *data
, struct seq_file
*m
)
679 struct blk_mq_ctx
*ctx
= data
;
681 seq_printf(m
, "%lu %lu\n", ctx
->rq_completed
[1], ctx
->rq_completed
[0]);
685 static ssize_t
ctx_completed_write(void *data
, const char __user
*buf
,
686 size_t count
, loff_t
*ppos
)
688 struct blk_mq_ctx
*ctx
= data
;
690 ctx
->rq_completed
[0] = ctx
->rq_completed
[1] = 0;
694 static int blk_mq_debugfs_show(struct seq_file
*m
, void *v
)
696 const struct blk_mq_debugfs_attr
*attr
= m
->private;
697 void *data
= d_inode(m
->file
->f_path
.dentry
->d_parent
)->i_private
;
699 return attr
->show(data
, m
);
702 static ssize_t
blk_mq_debugfs_write(struct file
*file
, const char __user
*buf
,
703 size_t count
, loff_t
*ppos
)
705 struct seq_file
*m
= file
->private_data
;
706 const struct blk_mq_debugfs_attr
*attr
= m
->private;
707 void *data
= d_inode(file
->f_path
.dentry
->d_parent
)->i_private
;
712 return attr
->write(data
, buf
, count
, ppos
);
715 static int blk_mq_debugfs_open(struct inode
*inode
, struct file
*file
)
717 const struct blk_mq_debugfs_attr
*attr
= inode
->i_private
;
718 void *data
= d_inode(file
->f_path
.dentry
->d_parent
)->i_private
;
723 ret
= seq_open(file
, attr
->seq_ops
);
725 m
= file
->private_data
;
731 if (WARN_ON_ONCE(!attr
->show
))
734 return single_open(file
, blk_mq_debugfs_show
, inode
->i_private
);
737 static int blk_mq_debugfs_release(struct inode
*inode
, struct file
*file
)
739 const struct blk_mq_debugfs_attr
*attr
= inode
->i_private
;
742 return single_release(inode
, file
);
744 return seq_release(inode
, file
);
747 const struct file_operations blk_mq_debugfs_fops
= {
748 .open
= blk_mq_debugfs_open
,
750 .write
= blk_mq_debugfs_write
,
752 .release
= blk_mq_debugfs_release
,
755 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs
[] = {
756 {"poll_stat", 0400, queue_poll_stat_show
},
757 {"requeue_list", 0400, .seq_ops
= &queue_requeue_list_seq_ops
},
758 {"state", 0600, queue_state_show
, queue_state_write
},
759 {"write_hints", 0600, queue_write_hint_show
, queue_write_hint_store
},
763 static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs
[] = {
764 {"state", 0400, hctx_state_show
},
765 {"flags", 0400, hctx_flags_show
},
766 {"dispatch", 0400, .seq_ops
= &hctx_dispatch_seq_ops
},
767 {"busy", 0400, hctx_busy_show
},
768 {"ctx_map", 0400, hctx_ctx_map_show
},
769 {"tags", 0400, hctx_tags_show
},
770 {"tags_bitmap", 0400, hctx_tags_bitmap_show
},
771 {"sched_tags", 0400, hctx_sched_tags_show
},
772 {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show
},
773 {"io_poll", 0600, hctx_io_poll_show
, hctx_io_poll_write
},
774 {"dispatched", 0600, hctx_dispatched_show
, hctx_dispatched_write
},
775 {"queued", 0600, hctx_queued_show
, hctx_queued_write
},
776 {"run", 0600, hctx_run_show
, hctx_run_write
},
777 {"active", 0400, hctx_active_show
},
781 static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs
[] = {
782 {"rq_list", 0400, .seq_ops
= &ctx_rq_list_seq_ops
},
783 {"dispatched", 0600, ctx_dispatched_show
, ctx_dispatched_write
},
784 {"merged", 0600, ctx_merged_show
, ctx_merged_write
},
785 {"completed", 0600, ctx_completed_show
, ctx_completed_write
},
789 static bool debugfs_create_files(struct dentry
*parent
, void *data
,
790 const struct blk_mq_debugfs_attr
*attr
)
792 d_inode(parent
)->i_private
= data
;
794 for (; attr
->name
; attr
++) {
795 if (!debugfs_create_file(attr
->name
, attr
->mode
, parent
,
796 (void *)attr
, &blk_mq_debugfs_fops
))
802 int blk_mq_debugfs_register(struct request_queue
*q
)
804 struct blk_mq_hw_ctx
*hctx
;
807 if (!blk_debugfs_root
)
810 q
->debugfs_dir
= debugfs_create_dir(kobject_name(q
->kobj
.parent
),
815 if (!debugfs_create_files(q
->debugfs_dir
, q
,
816 blk_mq_debugfs_queue_attrs
))
820 * blk_mq_init_hctx() attempted to do this already, but q->debugfs_dir
821 * didn't exist yet (because we don't know what to name the directory
822 * until the queue is registered to a gendisk).
824 queue_for_each_hw_ctx(q
, hctx
, i
) {
825 if (!hctx
->debugfs_dir
&& blk_mq_debugfs_register_hctx(q
, hctx
))
827 if (q
->elevator
&& !hctx
->sched_debugfs_dir
&&
828 blk_mq_debugfs_register_sched_hctx(q
, hctx
))
835 blk_mq_debugfs_unregister(q
);
839 void blk_mq_debugfs_unregister(struct request_queue
*q
)
841 debugfs_remove_recursive(q
->debugfs_dir
);
842 q
->sched_debugfs_dir
= NULL
;
843 q
->debugfs_dir
= NULL
;
846 static int blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx
*hctx
,
847 struct blk_mq_ctx
*ctx
)
849 struct dentry
*ctx_dir
;
852 snprintf(name
, sizeof(name
), "cpu%u", ctx
->cpu
);
853 ctx_dir
= debugfs_create_dir(name
, hctx
->debugfs_dir
);
857 if (!debugfs_create_files(ctx_dir
, ctx
, blk_mq_debugfs_ctx_attrs
))
863 int blk_mq_debugfs_register_hctx(struct request_queue
*q
,
864 struct blk_mq_hw_ctx
*hctx
)
866 struct blk_mq_ctx
*ctx
;
873 snprintf(name
, sizeof(name
), "hctx%u", hctx
->queue_num
);
874 hctx
->debugfs_dir
= debugfs_create_dir(name
, q
->debugfs_dir
);
875 if (!hctx
->debugfs_dir
)
878 if (!debugfs_create_files(hctx
->debugfs_dir
, hctx
,
879 blk_mq_debugfs_hctx_attrs
))
882 hctx_for_each_ctx(hctx
, ctx
, i
) {
883 if (blk_mq_debugfs_register_ctx(hctx
, ctx
))
890 blk_mq_debugfs_unregister_hctx(hctx
);
894 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx
*hctx
)
896 debugfs_remove_recursive(hctx
->debugfs_dir
);
897 hctx
->sched_debugfs_dir
= NULL
;
898 hctx
->debugfs_dir
= NULL
;
901 int blk_mq_debugfs_register_hctxs(struct request_queue
*q
)
903 struct blk_mq_hw_ctx
*hctx
;
906 queue_for_each_hw_ctx(q
, hctx
, i
) {
907 if (blk_mq_debugfs_register_hctx(q
, hctx
))
914 void blk_mq_debugfs_unregister_hctxs(struct request_queue
*q
)
916 struct blk_mq_hw_ctx
*hctx
;
919 queue_for_each_hw_ctx(q
, hctx
, i
)
920 blk_mq_debugfs_unregister_hctx(hctx
);
923 int blk_mq_debugfs_register_sched(struct request_queue
*q
)
925 struct elevator_type
*e
= q
->elevator
->type
;
930 if (!e
->queue_debugfs_attrs
)
933 q
->sched_debugfs_dir
= debugfs_create_dir("sched", q
->debugfs_dir
);
934 if (!q
->sched_debugfs_dir
)
937 if (!debugfs_create_files(q
->sched_debugfs_dir
, q
,
938 e
->queue_debugfs_attrs
))
944 blk_mq_debugfs_unregister_sched(q
);
948 void blk_mq_debugfs_unregister_sched(struct request_queue
*q
)
950 debugfs_remove_recursive(q
->sched_debugfs_dir
);
951 q
->sched_debugfs_dir
= NULL
;
954 int blk_mq_debugfs_register_sched_hctx(struct request_queue
*q
,
955 struct blk_mq_hw_ctx
*hctx
)
957 struct elevator_type
*e
= q
->elevator
->type
;
959 if (!hctx
->debugfs_dir
)
962 if (!e
->hctx_debugfs_attrs
)
965 hctx
->sched_debugfs_dir
= debugfs_create_dir("sched",
967 if (!hctx
->sched_debugfs_dir
)
970 if (!debugfs_create_files(hctx
->sched_debugfs_dir
, hctx
,
971 e
->hctx_debugfs_attrs
))
977 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx
*hctx
)
979 debugfs_remove_recursive(hctx
->sched_debugfs_dir
);
980 hctx
->sched_debugfs_dir
= NULL
;