1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 Facebook
6 #include <linux/kernel.h>
7 #include <linux/blkdev.h>
8 #include <linux/debugfs.h>
10 #include <linux/blk-mq.h>
13 #include "blk-mq-debugfs.h"
14 #include "blk-mq-tag.h"
15 #include "blk-rq-qos.h"
17 static void print_stat(struct seq_file
*m
, struct blk_rq_stat
*stat
)
19 if (stat
->nr_samples
) {
20 seq_printf(m
, "samples=%d, mean=%llu, min=%llu, max=%llu",
21 stat
->nr_samples
, stat
->mean
, stat
->min
, stat
->max
);
23 seq_puts(m
, "samples=0");
27 static int queue_poll_stat_show(void *data
, struct seq_file
*m
)
29 struct request_queue
*q
= data
;
32 for (bucket
= 0; bucket
< (BLK_MQ_POLL_STATS_BKTS
/ 2); bucket
++) {
33 seq_printf(m
, "read (%d Bytes): ", 1 << (9 + bucket
));
34 print_stat(m
, &q
->poll_stat
[2 * bucket
]);
37 seq_printf(m
, "write (%d Bytes): ", 1 << (9 + bucket
));
38 print_stat(m
, &q
->poll_stat
[2 * bucket
+ 1]);
44 static void *queue_requeue_list_start(struct seq_file
*m
, loff_t
*pos
)
45 __acquires(&q
->requeue_lock
)
47 struct request_queue
*q
= m
->private;
49 spin_lock_irq(&q
->requeue_lock
);
50 return seq_list_start(&q
->requeue_list
, *pos
);
53 static void *queue_requeue_list_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
55 struct request_queue
*q
= m
->private;
57 return seq_list_next(v
, &q
->requeue_list
, pos
);
60 static void queue_requeue_list_stop(struct seq_file
*m
, void *v
)
61 __releases(&q
->requeue_lock
)
63 struct request_queue
*q
= m
->private;
65 spin_unlock_irq(&q
->requeue_lock
);
68 static const struct seq_operations queue_requeue_list_seq_ops
= {
69 .start
= queue_requeue_list_start
,
70 .next
= queue_requeue_list_next
,
71 .stop
= queue_requeue_list_stop
,
72 .show
= blk_mq_debugfs_rq_show
,
75 static int blk_flags_show(struct seq_file
*m
, const unsigned long flags
,
76 const char *const *flag_name
, int flag_name_count
)
81 for (i
= 0; i
< sizeof(flags
) * BITS_PER_BYTE
; i
++) {
82 if (!(flags
& BIT(i
)))
87 if (i
< flag_name_count
&& flag_name
[i
])
88 seq_puts(m
, flag_name
[i
]);
90 seq_printf(m
, "%d", i
);
95 static int queue_pm_only_show(void *data
, struct seq_file
*m
)
97 struct request_queue
*q
= data
;
99 seq_printf(m
, "%d\n", atomic_read(&q
->pm_only
));
103 #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
104 static const char *const blk_queue_flag_name
[] = {
105 QUEUE_FLAG_NAME(STOPPED
),
106 QUEUE_FLAG_NAME(DYING
),
107 QUEUE_FLAG_NAME(NOMERGES
),
108 QUEUE_FLAG_NAME(SAME_COMP
),
109 QUEUE_FLAG_NAME(FAIL_IO
),
110 QUEUE_FLAG_NAME(NONROT
),
111 QUEUE_FLAG_NAME(IO_STAT
),
112 QUEUE_FLAG_NAME(DISCARD
),
113 QUEUE_FLAG_NAME(NOXMERGES
),
114 QUEUE_FLAG_NAME(ADD_RANDOM
),
115 QUEUE_FLAG_NAME(SECERASE
),
116 QUEUE_FLAG_NAME(SAME_FORCE
),
117 QUEUE_FLAG_NAME(DEAD
),
118 QUEUE_FLAG_NAME(INIT_DONE
),
119 QUEUE_FLAG_NAME(STABLE_WRITES
),
120 QUEUE_FLAG_NAME(POLL
),
122 QUEUE_FLAG_NAME(FUA
),
123 QUEUE_FLAG_NAME(DAX
),
124 QUEUE_FLAG_NAME(STATS
),
125 QUEUE_FLAG_NAME(POLL_STATS
),
126 QUEUE_FLAG_NAME(REGISTERED
),
127 QUEUE_FLAG_NAME(SCSI_PASSTHROUGH
),
128 QUEUE_FLAG_NAME(QUIESCED
),
129 QUEUE_FLAG_NAME(PCI_P2PDMA
),
130 QUEUE_FLAG_NAME(ZONE_RESETALL
),
131 QUEUE_FLAG_NAME(RQ_ALLOC_TIME
),
132 QUEUE_FLAG_NAME(NOWAIT
),
134 #undef QUEUE_FLAG_NAME
136 static int queue_state_show(void *data
, struct seq_file
*m
)
138 struct request_queue
*q
= data
;
140 blk_flags_show(m
, q
->queue_flags
, blk_queue_flag_name
,
141 ARRAY_SIZE(blk_queue_flag_name
));
146 static ssize_t
queue_state_write(void *data
, const char __user
*buf
,
147 size_t count
, loff_t
*ppos
)
149 struct request_queue
*q
= data
;
150 char opbuf
[16] = { }, *op
;
153 * The "state" attribute is removed after blk_cleanup_queue() has called
154 * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid
155 * triggering a use-after-free.
157 if (blk_queue_dead(q
))
160 if (count
>= sizeof(opbuf
)) {
161 pr_err("%s: operation too long\n", __func__
);
165 if (copy_from_user(opbuf
, buf
, count
))
167 op
= strstrip(opbuf
);
168 if (strcmp(op
, "run") == 0) {
169 blk_mq_run_hw_queues(q
, true);
170 } else if (strcmp(op
, "start") == 0) {
171 blk_mq_start_stopped_hw_queues(q
, true);
172 } else if (strcmp(op
, "kick") == 0) {
173 blk_mq_kick_requeue_list(q
);
175 pr_err("%s: unsupported operation '%s'\n", __func__
, op
);
177 pr_err("%s: use 'run', 'start' or 'kick'\n", __func__
);
183 static int queue_write_hint_show(void *data
, struct seq_file
*m
)
185 struct request_queue
*q
= data
;
188 for (i
= 0; i
< BLK_MAX_WRITE_HINTS
; i
++)
189 seq_printf(m
, "hint%d: %llu\n", i
, q
->write_hints
[i
]);
194 static ssize_t
queue_write_hint_store(void *data
, const char __user
*buf
,
195 size_t count
, loff_t
*ppos
)
197 struct request_queue
*q
= data
;
200 for (i
= 0; i
< BLK_MAX_WRITE_HINTS
; i
++)
201 q
->write_hints
[i
] = 0;
206 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs
[] = {
207 { "poll_stat", 0400, queue_poll_stat_show
},
208 { "requeue_list", 0400, .seq_ops
= &queue_requeue_list_seq_ops
},
209 { "pm_only", 0600, queue_pm_only_show
, NULL
},
210 { "state", 0600, queue_state_show
, queue_state_write
},
211 { "write_hints", 0600, queue_write_hint_show
, queue_write_hint_store
},
212 { "zone_wlock", 0400, queue_zone_wlock_show
, NULL
},
216 #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
217 static const char *const hctx_state_name
[] = {
218 HCTX_STATE_NAME(STOPPED
),
219 HCTX_STATE_NAME(TAG_ACTIVE
),
220 HCTX_STATE_NAME(SCHED_RESTART
),
221 HCTX_STATE_NAME(INACTIVE
),
223 #undef HCTX_STATE_NAME
225 static int hctx_state_show(void *data
, struct seq_file
*m
)
227 struct blk_mq_hw_ctx
*hctx
= data
;
229 blk_flags_show(m
, hctx
->state
, hctx_state_name
,
230 ARRAY_SIZE(hctx_state_name
));
235 #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
236 static const char *const alloc_policy_name
[] = {
237 BLK_TAG_ALLOC_NAME(FIFO
),
238 BLK_TAG_ALLOC_NAME(RR
),
240 #undef BLK_TAG_ALLOC_NAME
242 #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
243 static const char *const hctx_flag_name
[] = {
244 HCTX_FLAG_NAME(SHOULD_MERGE
),
245 HCTX_FLAG_NAME(TAG_QUEUE_SHARED
),
246 HCTX_FLAG_NAME(BLOCKING
),
247 HCTX_FLAG_NAME(NO_SCHED
),
248 HCTX_FLAG_NAME(STACKING
),
249 HCTX_FLAG_NAME(TAG_HCTX_SHARED
),
251 #undef HCTX_FLAG_NAME
253 static int hctx_flags_show(void *data
, struct seq_file
*m
)
255 struct blk_mq_hw_ctx
*hctx
= data
;
256 const int alloc_policy
= BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx
->flags
);
258 seq_puts(m
, "alloc_policy=");
259 if (alloc_policy
< ARRAY_SIZE(alloc_policy_name
) &&
260 alloc_policy_name
[alloc_policy
])
261 seq_puts(m
, alloc_policy_name
[alloc_policy
]);
263 seq_printf(m
, "%d", alloc_policy
);
266 hctx
->flags
^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy
),
267 hctx_flag_name
, ARRAY_SIZE(hctx_flag_name
));
272 #define CMD_FLAG_NAME(name) [__REQ_##name] = #name
273 static const char *const cmd_flag_name
[] = {
274 CMD_FLAG_NAME(FAILFAST_DEV
),
275 CMD_FLAG_NAME(FAILFAST_TRANSPORT
),
276 CMD_FLAG_NAME(FAILFAST_DRIVER
),
280 CMD_FLAG_NAME(NOMERGE
),
282 CMD_FLAG_NAME(INTEGRITY
),
284 CMD_FLAG_NAME(PREFLUSH
),
285 CMD_FLAG_NAME(RAHEAD
),
286 CMD_FLAG_NAME(BACKGROUND
),
287 CMD_FLAG_NAME(NOWAIT
),
288 CMD_FLAG_NAME(NOUNMAP
),
289 CMD_FLAG_NAME(HIPRI
),
293 #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
294 static const char *const rqf_name
[] = {
297 RQF_NAME(SOFTBARRIER
),
299 RQF_NAME(MIXED_MERGE
),
300 RQF_NAME(MQ_INFLIGHT
),
310 RQF_NAME(SPECIAL_PAYLOAD
),
311 RQF_NAME(ZONE_WRITE_LOCKED
),
312 RQF_NAME(MQ_POLL_SLEPT
),
316 static const char *const blk_mq_rq_state_name_array
[] = {
317 [MQ_RQ_IDLE
] = "idle",
318 [MQ_RQ_IN_FLIGHT
] = "in_flight",
319 [MQ_RQ_COMPLETE
] = "complete",
322 static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state
)
324 if (WARN_ON_ONCE((unsigned int)rq_state
>=
325 ARRAY_SIZE(blk_mq_rq_state_name_array
)))
327 return blk_mq_rq_state_name_array
[rq_state
];
330 int __blk_mq_debugfs_rq_show(struct seq_file
*m
, struct request
*rq
)
332 const struct blk_mq_ops
*const mq_ops
= rq
->q
->mq_ops
;
333 const unsigned int op
= req_op(rq
);
334 const char *op_str
= blk_op_str(op
);
336 seq_printf(m
, "%p {.op=", rq
);
337 if (strcmp(op_str
, "UNKNOWN") == 0)
338 seq_printf(m
, "%u", op
);
340 seq_printf(m
, "%s", op_str
);
341 seq_puts(m
, ", .cmd_flags=");
342 blk_flags_show(m
, rq
->cmd_flags
& ~REQ_OP_MASK
, cmd_flag_name
,
343 ARRAY_SIZE(cmd_flag_name
));
344 seq_puts(m
, ", .rq_flags=");
345 blk_flags_show(m
, (__force
unsigned int)rq
->rq_flags
, rqf_name
,
346 ARRAY_SIZE(rqf_name
));
347 seq_printf(m
, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq
)));
348 seq_printf(m
, ", .tag=%d, .internal_tag=%d", rq
->tag
,
351 mq_ops
->show_rq(m
, rq
);
355 EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show
);
357 int blk_mq_debugfs_rq_show(struct seq_file
*m
, void *v
)
359 return __blk_mq_debugfs_rq_show(m
, list_entry_rq(v
));
361 EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show
);
363 static void *hctx_dispatch_start(struct seq_file
*m
, loff_t
*pos
)
364 __acquires(&hctx
->lock
)
366 struct blk_mq_hw_ctx
*hctx
= m
->private;
368 spin_lock(&hctx
->lock
);
369 return seq_list_start(&hctx
->dispatch
, *pos
);
372 static void *hctx_dispatch_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
374 struct blk_mq_hw_ctx
*hctx
= m
->private;
376 return seq_list_next(v
, &hctx
->dispatch
, pos
);
379 static void hctx_dispatch_stop(struct seq_file
*m
, void *v
)
380 __releases(&hctx
->lock
)
382 struct blk_mq_hw_ctx
*hctx
= m
->private;
384 spin_unlock(&hctx
->lock
);
387 static const struct seq_operations hctx_dispatch_seq_ops
= {
388 .start
= hctx_dispatch_start
,
389 .next
= hctx_dispatch_next
,
390 .stop
= hctx_dispatch_stop
,
391 .show
= blk_mq_debugfs_rq_show
,
394 struct show_busy_params
{
396 struct blk_mq_hw_ctx
*hctx
;
400 * Note: the state of a request may change while this function is in progress,
401 * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
402 * keep iterating requests.
404 static bool hctx_show_busy_rq(struct request
*rq
, void *data
, bool reserved
)
406 const struct show_busy_params
*params
= data
;
408 if (rq
->mq_hctx
== params
->hctx
)
409 __blk_mq_debugfs_rq_show(params
->m
, rq
);
414 static int hctx_busy_show(void *data
, struct seq_file
*m
)
416 struct blk_mq_hw_ctx
*hctx
= data
;
417 struct show_busy_params params
= { .m
= m
, .hctx
= hctx
};
419 blk_mq_tagset_busy_iter(hctx
->queue
->tag_set
, hctx_show_busy_rq
,
425 static const char *const hctx_types
[] = {
426 [HCTX_TYPE_DEFAULT
] = "default",
427 [HCTX_TYPE_READ
] = "read",
428 [HCTX_TYPE_POLL
] = "poll",
431 static int hctx_type_show(void *data
, struct seq_file
*m
)
433 struct blk_mq_hw_ctx
*hctx
= data
;
435 BUILD_BUG_ON(ARRAY_SIZE(hctx_types
) != HCTX_MAX_TYPES
);
436 seq_printf(m
, "%s\n", hctx_types
[hctx
->type
]);
440 static int hctx_ctx_map_show(void *data
, struct seq_file
*m
)
442 struct blk_mq_hw_ctx
*hctx
= data
;
444 sbitmap_bitmap_show(&hctx
->ctx_map
, m
);
448 static void blk_mq_debugfs_tags_show(struct seq_file
*m
,
449 struct blk_mq_tags
*tags
)
451 seq_printf(m
, "nr_tags=%u\n", tags
->nr_tags
);
452 seq_printf(m
, "nr_reserved_tags=%u\n", tags
->nr_reserved_tags
);
453 seq_printf(m
, "active_queues=%d\n",
454 atomic_read(&tags
->active_queues
));
456 seq_puts(m
, "\nbitmap_tags:\n");
457 sbitmap_queue_show(tags
->bitmap_tags
, m
);
459 if (tags
->nr_reserved_tags
) {
460 seq_puts(m
, "\nbreserved_tags:\n");
461 sbitmap_queue_show(tags
->breserved_tags
, m
);
465 static int hctx_tags_show(void *data
, struct seq_file
*m
)
467 struct blk_mq_hw_ctx
*hctx
= data
;
468 struct request_queue
*q
= hctx
->queue
;
471 res
= mutex_lock_interruptible(&q
->sysfs_lock
);
475 blk_mq_debugfs_tags_show(m
, hctx
->tags
);
476 mutex_unlock(&q
->sysfs_lock
);
482 static int hctx_tags_bitmap_show(void *data
, struct seq_file
*m
)
484 struct blk_mq_hw_ctx
*hctx
= data
;
485 struct request_queue
*q
= hctx
->queue
;
488 res
= mutex_lock_interruptible(&q
->sysfs_lock
);
492 sbitmap_bitmap_show(&hctx
->tags
->bitmap_tags
->sb
, m
);
493 mutex_unlock(&q
->sysfs_lock
);
499 static int hctx_sched_tags_show(void *data
, struct seq_file
*m
)
501 struct blk_mq_hw_ctx
*hctx
= data
;
502 struct request_queue
*q
= hctx
->queue
;
505 res
= mutex_lock_interruptible(&q
->sysfs_lock
);
508 if (hctx
->sched_tags
)
509 blk_mq_debugfs_tags_show(m
, hctx
->sched_tags
);
510 mutex_unlock(&q
->sysfs_lock
);
516 static int hctx_sched_tags_bitmap_show(void *data
, struct seq_file
*m
)
518 struct blk_mq_hw_ctx
*hctx
= data
;
519 struct request_queue
*q
= hctx
->queue
;
522 res
= mutex_lock_interruptible(&q
->sysfs_lock
);
525 if (hctx
->sched_tags
)
526 sbitmap_bitmap_show(&hctx
->sched_tags
->bitmap_tags
->sb
, m
);
527 mutex_unlock(&q
->sysfs_lock
);
533 static int hctx_io_poll_show(void *data
, struct seq_file
*m
)
535 struct blk_mq_hw_ctx
*hctx
= data
;
537 seq_printf(m
, "considered=%lu\n", hctx
->poll_considered
);
538 seq_printf(m
, "invoked=%lu\n", hctx
->poll_invoked
);
539 seq_printf(m
, "success=%lu\n", hctx
->poll_success
);
543 static ssize_t
hctx_io_poll_write(void *data
, const char __user
*buf
,
544 size_t count
, loff_t
*ppos
)
546 struct blk_mq_hw_ctx
*hctx
= data
;
548 hctx
->poll_considered
= hctx
->poll_invoked
= hctx
->poll_success
= 0;
552 static int hctx_dispatched_show(void *data
, struct seq_file
*m
)
554 struct blk_mq_hw_ctx
*hctx
= data
;
557 seq_printf(m
, "%8u\t%lu\n", 0U, hctx
->dispatched
[0]);
559 for (i
= 1; i
< BLK_MQ_MAX_DISPATCH_ORDER
- 1; i
++) {
560 unsigned int d
= 1U << (i
- 1);
562 seq_printf(m
, "%8u\t%lu\n", d
, hctx
->dispatched
[i
]);
565 seq_printf(m
, "%8u+\t%lu\n", 1U << (i
- 1), hctx
->dispatched
[i
]);
569 static ssize_t
hctx_dispatched_write(void *data
, const char __user
*buf
,
570 size_t count
, loff_t
*ppos
)
572 struct blk_mq_hw_ctx
*hctx
= data
;
575 for (i
= 0; i
< BLK_MQ_MAX_DISPATCH_ORDER
; i
++)
576 hctx
->dispatched
[i
] = 0;
580 static int hctx_queued_show(void *data
, struct seq_file
*m
)
582 struct blk_mq_hw_ctx
*hctx
= data
;
584 seq_printf(m
, "%lu\n", hctx
->queued
);
588 static ssize_t
hctx_queued_write(void *data
, const char __user
*buf
,
589 size_t count
, loff_t
*ppos
)
591 struct blk_mq_hw_ctx
*hctx
= data
;
597 static int hctx_run_show(void *data
, struct seq_file
*m
)
599 struct blk_mq_hw_ctx
*hctx
= data
;
601 seq_printf(m
, "%lu\n", hctx
->run
);
605 static ssize_t
hctx_run_write(void *data
, const char __user
*buf
, size_t count
,
608 struct blk_mq_hw_ctx
*hctx
= data
;
614 static int hctx_active_show(void *data
, struct seq_file
*m
)
616 struct blk_mq_hw_ctx
*hctx
= data
;
618 seq_printf(m
, "%d\n", atomic_read(&hctx
->nr_active
));
622 static int hctx_dispatch_busy_show(void *data
, struct seq_file
*m
)
624 struct blk_mq_hw_ctx
*hctx
= data
;
626 seq_printf(m
, "%u\n", hctx
->dispatch_busy
);
630 #define CTX_RQ_SEQ_OPS(name, type) \
631 static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
632 __acquires(&ctx->lock) \
634 struct blk_mq_ctx *ctx = m->private; \
636 spin_lock(&ctx->lock); \
637 return seq_list_start(&ctx->rq_lists[type], *pos); \
640 static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \
643 struct blk_mq_ctx *ctx = m->private; \
645 return seq_list_next(v, &ctx->rq_lists[type], pos); \
648 static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v) \
649 __releases(&ctx->lock) \
651 struct blk_mq_ctx *ctx = m->private; \
653 spin_unlock(&ctx->lock); \
656 static const struct seq_operations ctx_##name##_rq_list_seq_ops = { \
657 .start = ctx_##name##_rq_list_start, \
658 .next = ctx_##name##_rq_list_next, \
659 .stop = ctx_##name##_rq_list_stop, \
660 .show = blk_mq_debugfs_rq_show, \
663 CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT
);
664 CTX_RQ_SEQ_OPS(read
, HCTX_TYPE_READ
);
665 CTX_RQ_SEQ_OPS(poll
, HCTX_TYPE_POLL
);
667 static int ctx_dispatched_show(void *data
, struct seq_file
*m
)
669 struct blk_mq_ctx
*ctx
= data
;
671 seq_printf(m
, "%lu %lu\n", ctx
->rq_dispatched
[1], ctx
->rq_dispatched
[0]);
675 static ssize_t
ctx_dispatched_write(void *data
, const char __user
*buf
,
676 size_t count
, loff_t
*ppos
)
678 struct blk_mq_ctx
*ctx
= data
;
680 ctx
->rq_dispatched
[0] = ctx
->rq_dispatched
[1] = 0;
684 static int ctx_merged_show(void *data
, struct seq_file
*m
)
686 struct blk_mq_ctx
*ctx
= data
;
688 seq_printf(m
, "%lu\n", ctx
->rq_merged
);
692 static ssize_t
ctx_merged_write(void *data
, const char __user
*buf
,
693 size_t count
, loff_t
*ppos
)
695 struct blk_mq_ctx
*ctx
= data
;
701 static int ctx_completed_show(void *data
, struct seq_file
*m
)
703 struct blk_mq_ctx
*ctx
= data
;
705 seq_printf(m
, "%lu %lu\n", ctx
->rq_completed
[1], ctx
->rq_completed
[0]);
709 static ssize_t
ctx_completed_write(void *data
, const char __user
*buf
,
710 size_t count
, loff_t
*ppos
)
712 struct blk_mq_ctx
*ctx
= data
;
714 ctx
->rq_completed
[0] = ctx
->rq_completed
[1] = 0;
718 static int blk_mq_debugfs_show(struct seq_file
*m
, void *v
)
720 const struct blk_mq_debugfs_attr
*attr
= m
->private;
721 void *data
= d_inode(m
->file
->f_path
.dentry
->d_parent
)->i_private
;
723 return attr
->show(data
, m
);
726 static ssize_t
blk_mq_debugfs_write(struct file
*file
, const char __user
*buf
,
727 size_t count
, loff_t
*ppos
)
729 struct seq_file
*m
= file
->private_data
;
730 const struct blk_mq_debugfs_attr
*attr
= m
->private;
731 void *data
= d_inode(file
->f_path
.dentry
->d_parent
)->i_private
;
734 * Attributes that only implement .seq_ops are read-only and 'attr' is
735 * the same with 'data' in this case.
737 if (attr
== data
|| !attr
->write
)
740 return attr
->write(data
, buf
, count
, ppos
);
743 static int blk_mq_debugfs_open(struct inode
*inode
, struct file
*file
)
745 const struct blk_mq_debugfs_attr
*attr
= inode
->i_private
;
746 void *data
= d_inode(file
->f_path
.dentry
->d_parent
)->i_private
;
751 ret
= seq_open(file
, attr
->seq_ops
);
753 m
= file
->private_data
;
759 if (WARN_ON_ONCE(!attr
->show
))
762 return single_open(file
, blk_mq_debugfs_show
, inode
->i_private
);
765 static int blk_mq_debugfs_release(struct inode
*inode
, struct file
*file
)
767 const struct blk_mq_debugfs_attr
*attr
= inode
->i_private
;
770 return single_release(inode
, file
);
772 return seq_release(inode
, file
);
775 static const struct file_operations blk_mq_debugfs_fops
= {
776 .open
= blk_mq_debugfs_open
,
778 .write
= blk_mq_debugfs_write
,
780 .release
= blk_mq_debugfs_release
,
783 static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs
[] = {
784 {"state", 0400, hctx_state_show
},
785 {"flags", 0400, hctx_flags_show
},
786 {"dispatch", 0400, .seq_ops
= &hctx_dispatch_seq_ops
},
787 {"busy", 0400, hctx_busy_show
},
788 {"ctx_map", 0400, hctx_ctx_map_show
},
789 {"tags", 0400, hctx_tags_show
},
790 {"tags_bitmap", 0400, hctx_tags_bitmap_show
},
791 {"sched_tags", 0400, hctx_sched_tags_show
},
792 {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show
},
793 {"io_poll", 0600, hctx_io_poll_show
, hctx_io_poll_write
},
794 {"dispatched", 0600, hctx_dispatched_show
, hctx_dispatched_write
},
795 {"queued", 0600, hctx_queued_show
, hctx_queued_write
},
796 {"run", 0600, hctx_run_show
, hctx_run_write
},
797 {"active", 0400, hctx_active_show
},
798 {"dispatch_busy", 0400, hctx_dispatch_busy_show
},
799 {"type", 0400, hctx_type_show
},
803 static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs
[] = {
804 {"default_rq_list", 0400, .seq_ops
= &ctx_default_rq_list_seq_ops
},
805 {"read_rq_list", 0400, .seq_ops
= &ctx_read_rq_list_seq_ops
},
806 {"poll_rq_list", 0400, .seq_ops
= &ctx_poll_rq_list_seq_ops
},
807 {"dispatched", 0600, ctx_dispatched_show
, ctx_dispatched_write
},
808 {"merged", 0600, ctx_merged_show
, ctx_merged_write
},
809 {"completed", 0600, ctx_completed_show
, ctx_completed_write
},
813 static void debugfs_create_files(struct dentry
*parent
, void *data
,
814 const struct blk_mq_debugfs_attr
*attr
)
816 if (IS_ERR_OR_NULL(parent
))
819 d_inode(parent
)->i_private
= data
;
821 for (; attr
->name
; attr
++)
822 debugfs_create_file(attr
->name
, attr
->mode
, parent
,
823 (void *)attr
, &blk_mq_debugfs_fops
);
826 void blk_mq_debugfs_register(struct request_queue
*q
)
828 struct blk_mq_hw_ctx
*hctx
;
831 debugfs_create_files(q
->debugfs_dir
, q
, blk_mq_debugfs_queue_attrs
);
834 * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
835 * didn't exist yet (because we don't know what to name the directory
836 * until the queue is registered to a gendisk).
838 if (q
->elevator
&& !q
->sched_debugfs_dir
)
839 blk_mq_debugfs_register_sched(q
);
841 /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
842 queue_for_each_hw_ctx(q
, hctx
, i
) {
843 if (!hctx
->debugfs_dir
)
844 blk_mq_debugfs_register_hctx(q
, hctx
);
845 if (q
->elevator
&& !hctx
->sched_debugfs_dir
)
846 blk_mq_debugfs_register_sched_hctx(q
, hctx
);
850 struct rq_qos
*rqos
= q
->rq_qos
;
853 blk_mq_debugfs_register_rqos(rqos
);
859 void blk_mq_debugfs_unregister(struct request_queue
*q
)
861 q
->sched_debugfs_dir
= NULL
;
864 static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx
*hctx
,
865 struct blk_mq_ctx
*ctx
)
867 struct dentry
*ctx_dir
;
870 snprintf(name
, sizeof(name
), "cpu%u", ctx
->cpu
);
871 ctx_dir
= debugfs_create_dir(name
, hctx
->debugfs_dir
);
873 debugfs_create_files(ctx_dir
, ctx
, blk_mq_debugfs_ctx_attrs
);
876 void blk_mq_debugfs_register_hctx(struct request_queue
*q
,
877 struct blk_mq_hw_ctx
*hctx
)
879 struct blk_mq_ctx
*ctx
;
883 snprintf(name
, sizeof(name
), "hctx%u", hctx
->queue_num
);
884 hctx
->debugfs_dir
= debugfs_create_dir(name
, q
->debugfs_dir
);
886 debugfs_create_files(hctx
->debugfs_dir
, hctx
, blk_mq_debugfs_hctx_attrs
);
888 hctx_for_each_ctx(hctx
, ctx
, i
)
889 blk_mq_debugfs_register_ctx(hctx
, ctx
);
892 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx
*hctx
)
894 debugfs_remove_recursive(hctx
->debugfs_dir
);
895 hctx
->sched_debugfs_dir
= NULL
;
896 hctx
->debugfs_dir
= NULL
;
899 void blk_mq_debugfs_register_hctxs(struct request_queue
*q
)
901 struct blk_mq_hw_ctx
*hctx
;
904 queue_for_each_hw_ctx(q
, hctx
, i
)
905 blk_mq_debugfs_register_hctx(q
, hctx
);
908 void blk_mq_debugfs_unregister_hctxs(struct request_queue
*q
)
910 struct blk_mq_hw_ctx
*hctx
;
913 queue_for_each_hw_ctx(q
, hctx
, i
)
914 blk_mq_debugfs_unregister_hctx(hctx
);
917 void blk_mq_debugfs_register_sched(struct request_queue
*q
)
919 struct elevator_type
*e
= q
->elevator
->type
;
922 * If the parent directory has not been created yet, return, we will be
923 * called again later on and the directory/files will be created then.
928 if (!e
->queue_debugfs_attrs
)
931 q
->sched_debugfs_dir
= debugfs_create_dir("sched", q
->debugfs_dir
);
933 debugfs_create_files(q
->sched_debugfs_dir
, q
, e
->queue_debugfs_attrs
);
936 void blk_mq_debugfs_unregister_sched(struct request_queue
*q
)
938 debugfs_remove_recursive(q
->sched_debugfs_dir
);
939 q
->sched_debugfs_dir
= NULL
;
942 void blk_mq_debugfs_unregister_rqos(struct rq_qos
*rqos
)
944 debugfs_remove_recursive(rqos
->debugfs_dir
);
945 rqos
->debugfs_dir
= NULL
;
948 void blk_mq_debugfs_register_rqos(struct rq_qos
*rqos
)
950 struct request_queue
*q
= rqos
->q
;
951 const char *dir_name
= rq_qos_id_to_name(rqos
->id
);
953 if (rqos
->debugfs_dir
|| !rqos
->ops
->debugfs_attrs
)
956 if (!q
->rqos_debugfs_dir
)
957 q
->rqos_debugfs_dir
= debugfs_create_dir("rqos",
960 rqos
->debugfs_dir
= debugfs_create_dir(dir_name
,
961 rqos
->q
->rqos_debugfs_dir
);
963 debugfs_create_files(rqos
->debugfs_dir
, rqos
, rqos
->ops
->debugfs_attrs
);
966 void blk_mq_debugfs_unregister_queue_rqos(struct request_queue
*q
)
968 debugfs_remove_recursive(q
->rqos_debugfs_dir
);
969 q
->rqos_debugfs_dir
= NULL
;
972 void blk_mq_debugfs_register_sched_hctx(struct request_queue
*q
,
973 struct blk_mq_hw_ctx
*hctx
)
975 struct elevator_type
*e
= q
->elevator
->type
;
977 if (!e
->hctx_debugfs_attrs
)
980 hctx
->sched_debugfs_dir
= debugfs_create_dir("sched",
982 debugfs_create_files(hctx
->sched_debugfs_dir
, hctx
,
983 e
->hctx_debugfs_attrs
);
986 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx
*hctx
)
988 debugfs_remove_recursive(hctx
->sched_debugfs_dir
);
989 hctx
->sched_debugfs_dir
= NULL
;