2 * Copyright (C) 2017 Facebook
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <https://www.gnu.org/licenses/>.
17 #include <linux/kernel.h>
18 #include <linux/blkdev.h>
19 #include <linux/debugfs.h>
21 #include <linux/blk-mq.h>
24 #include "blk-mq-debugfs.h"
25 #include "blk-mq-tag.h"
26 #include "blk-rq-qos.h"
28 static void print_stat(struct seq_file
*m
, struct blk_rq_stat
*stat
)
30 if (stat
->nr_samples
) {
31 seq_printf(m
, "samples=%d, mean=%lld, min=%llu, max=%llu",
32 stat
->nr_samples
, stat
->mean
, stat
->min
, stat
->max
);
34 seq_puts(m
, "samples=0");
38 static int queue_poll_stat_show(void *data
, struct seq_file
*m
)
40 struct request_queue
*q
= data
;
43 for (bucket
= 0; bucket
< BLK_MQ_POLL_STATS_BKTS
/2; bucket
++) {
44 seq_printf(m
, "read (%d Bytes): ", 1 << (9+bucket
));
45 print_stat(m
, &q
->poll_stat
[2*bucket
]);
48 seq_printf(m
, "write (%d Bytes): ", 1 << (9+bucket
));
49 print_stat(m
, &q
->poll_stat
[2*bucket
+1]);
55 static void *queue_requeue_list_start(struct seq_file
*m
, loff_t
*pos
)
56 __acquires(&q
->requeue_lock
)
58 struct request_queue
*q
= m
->private;
60 spin_lock_irq(&q
->requeue_lock
);
61 return seq_list_start(&q
->requeue_list
, *pos
);
64 static void *queue_requeue_list_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
66 struct request_queue
*q
= m
->private;
68 return seq_list_next(v
, &q
->requeue_list
, pos
);
71 static void queue_requeue_list_stop(struct seq_file
*m
, void *v
)
72 __releases(&q
->requeue_lock
)
74 struct request_queue
*q
= m
->private;
76 spin_unlock_irq(&q
->requeue_lock
);
79 static const struct seq_operations queue_requeue_list_seq_ops
= {
80 .start
= queue_requeue_list_start
,
81 .next
= queue_requeue_list_next
,
82 .stop
= queue_requeue_list_stop
,
83 .show
= blk_mq_debugfs_rq_show
,
86 static int blk_flags_show(struct seq_file
*m
, const unsigned long flags
,
87 const char *const *flag_name
, int flag_name_count
)
92 for (i
= 0; i
< sizeof(flags
) * BITS_PER_BYTE
; i
++) {
93 if (!(flags
& BIT(i
)))
98 if (i
< flag_name_count
&& flag_name
[i
])
99 seq_puts(m
, flag_name
[i
]);
101 seq_printf(m
, "%d", i
);
106 static int queue_pm_only_show(void *data
, struct seq_file
*m
)
108 struct request_queue
*q
= data
;
110 seq_printf(m
, "%d\n", atomic_read(&q
->pm_only
));
114 #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
115 static const char *const blk_queue_flag_name
[] = {
116 QUEUE_FLAG_NAME(STOPPED
),
117 QUEUE_FLAG_NAME(DYING
),
118 QUEUE_FLAG_NAME(NOMERGES
),
119 QUEUE_FLAG_NAME(SAME_COMP
),
120 QUEUE_FLAG_NAME(FAIL_IO
),
121 QUEUE_FLAG_NAME(NONROT
),
122 QUEUE_FLAG_NAME(IO_STAT
),
123 QUEUE_FLAG_NAME(DISCARD
),
124 QUEUE_FLAG_NAME(NOXMERGES
),
125 QUEUE_FLAG_NAME(ADD_RANDOM
),
126 QUEUE_FLAG_NAME(SECERASE
),
127 QUEUE_FLAG_NAME(SAME_FORCE
),
128 QUEUE_FLAG_NAME(DEAD
),
129 QUEUE_FLAG_NAME(INIT_DONE
),
130 QUEUE_FLAG_NAME(POLL
),
132 QUEUE_FLAG_NAME(FUA
),
133 QUEUE_FLAG_NAME(DAX
),
134 QUEUE_FLAG_NAME(STATS
),
135 QUEUE_FLAG_NAME(POLL_STATS
),
136 QUEUE_FLAG_NAME(REGISTERED
),
137 QUEUE_FLAG_NAME(SCSI_PASSTHROUGH
),
138 QUEUE_FLAG_NAME(QUIESCED
),
140 #undef QUEUE_FLAG_NAME
142 static int queue_state_show(void *data
, struct seq_file
*m
)
144 struct request_queue
*q
= data
;
146 blk_flags_show(m
, q
->queue_flags
, blk_queue_flag_name
,
147 ARRAY_SIZE(blk_queue_flag_name
));
152 static ssize_t
queue_state_write(void *data
, const char __user
*buf
,
153 size_t count
, loff_t
*ppos
)
155 struct request_queue
*q
= data
;
156 char opbuf
[16] = { }, *op
;
159 * The "state" attribute is removed after blk_cleanup_queue() has called
160 * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid
161 * triggering a use-after-free.
163 if (blk_queue_dead(q
))
166 if (count
>= sizeof(opbuf
)) {
167 pr_err("%s: operation too long\n", __func__
);
171 if (copy_from_user(opbuf
, buf
, count
))
173 op
= strstrip(opbuf
);
174 if (strcmp(op
, "run") == 0) {
175 blk_mq_run_hw_queues(q
, true);
176 } else if (strcmp(op
, "start") == 0) {
177 blk_mq_start_stopped_hw_queues(q
, true);
178 } else if (strcmp(op
, "kick") == 0) {
179 blk_mq_kick_requeue_list(q
);
181 pr_err("%s: unsupported operation '%s'\n", __func__
, op
);
183 pr_err("%s: use 'run', 'start' or 'kick'\n", __func__
);
189 static int queue_write_hint_show(void *data
, struct seq_file
*m
)
191 struct request_queue
*q
= data
;
194 for (i
= 0; i
< BLK_MAX_WRITE_HINTS
; i
++)
195 seq_printf(m
, "hint%d: %llu\n", i
, q
->write_hints
[i
]);
200 static ssize_t
queue_write_hint_store(void *data
, const char __user
*buf
,
201 size_t count
, loff_t
*ppos
)
203 struct request_queue
*q
= data
;
206 for (i
= 0; i
< BLK_MAX_WRITE_HINTS
; i
++)
207 q
->write_hints
[i
] = 0;
212 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs
[] = {
213 { "poll_stat", 0400, queue_poll_stat_show
},
214 { "requeue_list", 0400, .seq_ops
= &queue_requeue_list_seq_ops
},
215 { "pm_only", 0600, queue_pm_only_show
, NULL
},
216 { "state", 0600, queue_state_show
, queue_state_write
},
217 { "write_hints", 0600, queue_write_hint_show
, queue_write_hint_store
},
218 { "zone_wlock", 0400, queue_zone_wlock_show
, NULL
},
222 #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
223 static const char *const hctx_state_name
[] = {
224 HCTX_STATE_NAME(STOPPED
),
225 HCTX_STATE_NAME(TAG_ACTIVE
),
226 HCTX_STATE_NAME(SCHED_RESTART
),
228 #undef HCTX_STATE_NAME
230 static int hctx_state_show(void *data
, struct seq_file
*m
)
232 struct blk_mq_hw_ctx
*hctx
= data
;
234 blk_flags_show(m
, hctx
->state
, hctx_state_name
,
235 ARRAY_SIZE(hctx_state_name
));
240 #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
241 static const char *const alloc_policy_name
[] = {
242 BLK_TAG_ALLOC_NAME(FIFO
),
243 BLK_TAG_ALLOC_NAME(RR
),
245 #undef BLK_TAG_ALLOC_NAME
247 #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
248 static const char *const hctx_flag_name
[] = {
249 HCTX_FLAG_NAME(SHOULD_MERGE
),
250 HCTX_FLAG_NAME(TAG_SHARED
),
251 HCTX_FLAG_NAME(BLOCKING
),
252 HCTX_FLAG_NAME(NO_SCHED
),
254 #undef HCTX_FLAG_NAME
256 static int hctx_flags_show(void *data
, struct seq_file
*m
)
258 struct blk_mq_hw_ctx
*hctx
= data
;
259 const int alloc_policy
= BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx
->flags
);
261 seq_puts(m
, "alloc_policy=");
262 if (alloc_policy
< ARRAY_SIZE(alloc_policy_name
) &&
263 alloc_policy_name
[alloc_policy
])
264 seq_puts(m
, alloc_policy_name
[alloc_policy
]);
266 seq_printf(m
, "%d", alloc_policy
);
269 hctx
->flags
^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy
),
270 hctx_flag_name
, ARRAY_SIZE(hctx_flag_name
));
275 #define REQ_OP_NAME(name) [REQ_OP_##name] = #name
276 static const char *const op_name
[] = {
280 REQ_OP_NAME(DISCARD
),
281 REQ_OP_NAME(SECURE_ERASE
),
282 REQ_OP_NAME(ZONE_RESET
),
283 REQ_OP_NAME(WRITE_SAME
),
284 REQ_OP_NAME(WRITE_ZEROES
),
285 REQ_OP_NAME(SCSI_IN
),
286 REQ_OP_NAME(SCSI_OUT
),
288 REQ_OP_NAME(DRV_OUT
),
292 #define CMD_FLAG_NAME(name) [__REQ_##name] = #name
293 static const char *const cmd_flag_name
[] = {
294 CMD_FLAG_NAME(FAILFAST_DEV
),
295 CMD_FLAG_NAME(FAILFAST_TRANSPORT
),
296 CMD_FLAG_NAME(FAILFAST_DRIVER
),
300 CMD_FLAG_NAME(NOMERGE
),
302 CMD_FLAG_NAME(INTEGRITY
),
304 CMD_FLAG_NAME(PREFLUSH
),
305 CMD_FLAG_NAME(RAHEAD
),
306 CMD_FLAG_NAME(BACKGROUND
),
307 CMD_FLAG_NAME(NOWAIT
),
308 CMD_FLAG_NAME(NOUNMAP
),
309 CMD_FLAG_NAME(HIPRI
),
313 #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
314 static const char *const rqf_name
[] = {
317 RQF_NAME(SOFTBARRIER
),
319 RQF_NAME(MIXED_MERGE
),
320 RQF_NAME(MQ_INFLIGHT
),
332 RQF_NAME(SPECIAL_PAYLOAD
),
333 RQF_NAME(ZONE_WRITE_LOCKED
),
334 RQF_NAME(MQ_POLL_SLEPT
),
338 static const char *const blk_mq_rq_state_name_array
[] = {
339 [MQ_RQ_IDLE
] = "idle",
340 [MQ_RQ_IN_FLIGHT
] = "in_flight",
341 [MQ_RQ_COMPLETE
] = "complete",
344 static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state
)
346 if (WARN_ON_ONCE((unsigned int)rq_state
>=
347 ARRAY_SIZE(blk_mq_rq_state_name_array
)))
349 return blk_mq_rq_state_name_array
[rq_state
];
352 int __blk_mq_debugfs_rq_show(struct seq_file
*m
, struct request
*rq
)
354 const struct blk_mq_ops
*const mq_ops
= rq
->q
->mq_ops
;
355 const unsigned int op
= rq
->cmd_flags
& REQ_OP_MASK
;
357 seq_printf(m
, "%p {.op=", rq
);
358 if (op
< ARRAY_SIZE(op_name
) && op_name
[op
])
359 seq_printf(m
, "%s", op_name
[op
]);
361 seq_printf(m
, "%d", op
);
362 seq_puts(m
, ", .cmd_flags=");
363 blk_flags_show(m
, rq
->cmd_flags
& ~REQ_OP_MASK
, cmd_flag_name
,
364 ARRAY_SIZE(cmd_flag_name
));
365 seq_puts(m
, ", .rq_flags=");
366 blk_flags_show(m
, (__force
unsigned int)rq
->rq_flags
, rqf_name
,
367 ARRAY_SIZE(rqf_name
));
368 seq_printf(m
, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq
)));
369 seq_printf(m
, ", .tag=%d, .internal_tag=%d", rq
->tag
,
372 mq_ops
->show_rq(m
, rq
);
376 EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show
);
378 int blk_mq_debugfs_rq_show(struct seq_file
*m
, void *v
)
380 return __blk_mq_debugfs_rq_show(m
, list_entry_rq(v
));
382 EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show
);
384 static void *hctx_dispatch_start(struct seq_file
*m
, loff_t
*pos
)
385 __acquires(&hctx
->lock
)
387 struct blk_mq_hw_ctx
*hctx
= m
->private;
389 spin_lock(&hctx
->lock
);
390 return seq_list_start(&hctx
->dispatch
, *pos
);
393 static void *hctx_dispatch_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
395 struct blk_mq_hw_ctx
*hctx
= m
->private;
397 return seq_list_next(v
, &hctx
->dispatch
, pos
);
400 static void hctx_dispatch_stop(struct seq_file
*m
, void *v
)
401 __releases(&hctx
->lock
)
403 struct blk_mq_hw_ctx
*hctx
= m
->private;
405 spin_unlock(&hctx
->lock
);
408 static const struct seq_operations hctx_dispatch_seq_ops
= {
409 .start
= hctx_dispatch_start
,
410 .next
= hctx_dispatch_next
,
411 .stop
= hctx_dispatch_stop
,
412 .show
= blk_mq_debugfs_rq_show
,
415 struct show_busy_params
{
417 struct blk_mq_hw_ctx
*hctx
;
421 * Note: the state of a request may change while this function is in progress,
422 * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
423 * keep iterating requests.
425 static bool hctx_show_busy_rq(struct request
*rq
, void *data
, bool reserved
)
427 const struct show_busy_params
*params
= data
;
429 if (rq
->mq_hctx
== params
->hctx
)
430 __blk_mq_debugfs_rq_show(params
->m
,
431 list_entry_rq(&rq
->queuelist
));
436 static int hctx_busy_show(void *data
, struct seq_file
*m
)
438 struct blk_mq_hw_ctx
*hctx
= data
;
439 struct show_busy_params params
= { .m
= m
, .hctx
= hctx
};
441 blk_mq_tagset_busy_iter(hctx
->queue
->tag_set
, hctx_show_busy_rq
,
447 static const char *const hctx_types
[] = {
448 [HCTX_TYPE_DEFAULT
] = "default",
449 [HCTX_TYPE_READ
] = "read",
450 [HCTX_TYPE_POLL
] = "poll",
453 static int hctx_type_show(void *data
, struct seq_file
*m
)
455 struct blk_mq_hw_ctx
*hctx
= data
;
457 BUILD_BUG_ON(ARRAY_SIZE(hctx_types
) != HCTX_MAX_TYPES
);
458 seq_printf(m
, "%s\n", hctx_types
[hctx
->type
]);
462 static int hctx_ctx_map_show(void *data
, struct seq_file
*m
)
464 struct blk_mq_hw_ctx
*hctx
= data
;
466 sbitmap_bitmap_show(&hctx
->ctx_map
, m
);
470 static void blk_mq_debugfs_tags_show(struct seq_file
*m
,
471 struct blk_mq_tags
*tags
)
473 seq_printf(m
, "nr_tags=%u\n", tags
->nr_tags
);
474 seq_printf(m
, "nr_reserved_tags=%u\n", tags
->nr_reserved_tags
);
475 seq_printf(m
, "active_queues=%d\n",
476 atomic_read(&tags
->active_queues
));
478 seq_puts(m
, "\nbitmap_tags:\n");
479 sbitmap_queue_show(&tags
->bitmap_tags
, m
);
481 if (tags
->nr_reserved_tags
) {
482 seq_puts(m
, "\nbreserved_tags:\n");
483 sbitmap_queue_show(&tags
->breserved_tags
, m
);
487 static int hctx_tags_show(void *data
, struct seq_file
*m
)
489 struct blk_mq_hw_ctx
*hctx
= data
;
490 struct request_queue
*q
= hctx
->queue
;
493 res
= mutex_lock_interruptible(&q
->sysfs_lock
);
497 blk_mq_debugfs_tags_show(m
, hctx
->tags
);
498 mutex_unlock(&q
->sysfs_lock
);
504 static int hctx_tags_bitmap_show(void *data
, struct seq_file
*m
)
506 struct blk_mq_hw_ctx
*hctx
= data
;
507 struct request_queue
*q
= hctx
->queue
;
510 res
= mutex_lock_interruptible(&q
->sysfs_lock
);
514 sbitmap_bitmap_show(&hctx
->tags
->bitmap_tags
.sb
, m
);
515 mutex_unlock(&q
->sysfs_lock
);
521 static int hctx_sched_tags_show(void *data
, struct seq_file
*m
)
523 struct blk_mq_hw_ctx
*hctx
= data
;
524 struct request_queue
*q
= hctx
->queue
;
527 res
= mutex_lock_interruptible(&q
->sysfs_lock
);
530 if (hctx
->sched_tags
)
531 blk_mq_debugfs_tags_show(m
, hctx
->sched_tags
);
532 mutex_unlock(&q
->sysfs_lock
);
538 static int hctx_sched_tags_bitmap_show(void *data
, struct seq_file
*m
)
540 struct blk_mq_hw_ctx
*hctx
= data
;
541 struct request_queue
*q
= hctx
->queue
;
544 res
= mutex_lock_interruptible(&q
->sysfs_lock
);
547 if (hctx
->sched_tags
)
548 sbitmap_bitmap_show(&hctx
->sched_tags
->bitmap_tags
.sb
, m
);
549 mutex_unlock(&q
->sysfs_lock
);
555 static int hctx_io_poll_show(void *data
, struct seq_file
*m
)
557 struct blk_mq_hw_ctx
*hctx
= data
;
559 seq_printf(m
, "considered=%lu\n", hctx
->poll_considered
);
560 seq_printf(m
, "invoked=%lu\n", hctx
->poll_invoked
);
561 seq_printf(m
, "success=%lu\n", hctx
->poll_success
);
565 static ssize_t
hctx_io_poll_write(void *data
, const char __user
*buf
,
566 size_t count
, loff_t
*ppos
)
568 struct blk_mq_hw_ctx
*hctx
= data
;
570 hctx
->poll_considered
= hctx
->poll_invoked
= hctx
->poll_success
= 0;
574 static int hctx_dispatched_show(void *data
, struct seq_file
*m
)
576 struct blk_mq_hw_ctx
*hctx
= data
;
579 seq_printf(m
, "%8u\t%lu\n", 0U, hctx
->dispatched
[0]);
581 for (i
= 1; i
< BLK_MQ_MAX_DISPATCH_ORDER
- 1; i
++) {
582 unsigned int d
= 1U << (i
- 1);
584 seq_printf(m
, "%8u\t%lu\n", d
, hctx
->dispatched
[i
]);
587 seq_printf(m
, "%8u+\t%lu\n", 1U << (i
- 1), hctx
->dispatched
[i
]);
591 static ssize_t
hctx_dispatched_write(void *data
, const char __user
*buf
,
592 size_t count
, loff_t
*ppos
)
594 struct blk_mq_hw_ctx
*hctx
= data
;
597 for (i
= 0; i
< BLK_MQ_MAX_DISPATCH_ORDER
; i
++)
598 hctx
->dispatched
[i
] = 0;
602 static int hctx_queued_show(void *data
, struct seq_file
*m
)
604 struct blk_mq_hw_ctx
*hctx
= data
;
606 seq_printf(m
, "%lu\n", hctx
->queued
);
610 static ssize_t
hctx_queued_write(void *data
, const char __user
*buf
,
611 size_t count
, loff_t
*ppos
)
613 struct blk_mq_hw_ctx
*hctx
= data
;
619 static int hctx_run_show(void *data
, struct seq_file
*m
)
621 struct blk_mq_hw_ctx
*hctx
= data
;
623 seq_printf(m
, "%lu\n", hctx
->run
);
627 static ssize_t
hctx_run_write(void *data
, const char __user
*buf
, size_t count
,
630 struct blk_mq_hw_ctx
*hctx
= data
;
636 static int hctx_active_show(void *data
, struct seq_file
*m
)
638 struct blk_mq_hw_ctx
*hctx
= data
;
640 seq_printf(m
, "%d\n", atomic_read(&hctx
->nr_active
));
644 static int hctx_dispatch_busy_show(void *data
, struct seq_file
*m
)
646 struct blk_mq_hw_ctx
*hctx
= data
;
648 seq_printf(m
, "%u\n", hctx
->dispatch_busy
);
652 #define CTX_RQ_SEQ_OPS(name, type) \
653 static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
654 __acquires(&ctx->lock) \
656 struct blk_mq_ctx *ctx = m->private; \
658 spin_lock(&ctx->lock); \
659 return seq_list_start(&ctx->rq_lists[type], *pos); \
662 static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \
665 struct blk_mq_ctx *ctx = m->private; \
667 return seq_list_next(v, &ctx->rq_lists[type], pos); \
670 static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v) \
671 __releases(&ctx->lock) \
673 struct blk_mq_ctx *ctx = m->private; \
675 spin_unlock(&ctx->lock); \
678 static const struct seq_operations ctx_##name##_rq_list_seq_ops = { \
679 .start = ctx_##name##_rq_list_start, \
680 .next = ctx_##name##_rq_list_next, \
681 .stop = ctx_##name##_rq_list_stop, \
682 .show = blk_mq_debugfs_rq_show, \
685 CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT
);
686 CTX_RQ_SEQ_OPS(read
, HCTX_TYPE_READ
);
687 CTX_RQ_SEQ_OPS(poll
, HCTX_TYPE_POLL
);
689 static int ctx_dispatched_show(void *data
, struct seq_file
*m
)
691 struct blk_mq_ctx
*ctx
= data
;
693 seq_printf(m
, "%lu %lu\n", ctx
->rq_dispatched
[1], ctx
->rq_dispatched
[0]);
697 static ssize_t
ctx_dispatched_write(void *data
, const char __user
*buf
,
698 size_t count
, loff_t
*ppos
)
700 struct blk_mq_ctx
*ctx
= data
;
702 ctx
->rq_dispatched
[0] = ctx
->rq_dispatched
[1] = 0;
706 static int ctx_merged_show(void *data
, struct seq_file
*m
)
708 struct blk_mq_ctx
*ctx
= data
;
710 seq_printf(m
, "%lu\n", ctx
->rq_merged
);
714 static ssize_t
ctx_merged_write(void *data
, const char __user
*buf
,
715 size_t count
, loff_t
*ppos
)
717 struct blk_mq_ctx
*ctx
= data
;
723 static int ctx_completed_show(void *data
, struct seq_file
*m
)
725 struct blk_mq_ctx
*ctx
= data
;
727 seq_printf(m
, "%lu %lu\n", ctx
->rq_completed
[1], ctx
->rq_completed
[0]);
731 static ssize_t
ctx_completed_write(void *data
, const char __user
*buf
,
732 size_t count
, loff_t
*ppos
)
734 struct blk_mq_ctx
*ctx
= data
;
736 ctx
->rq_completed
[0] = ctx
->rq_completed
[1] = 0;
740 static int blk_mq_debugfs_show(struct seq_file
*m
, void *v
)
742 const struct blk_mq_debugfs_attr
*attr
= m
->private;
743 void *data
= d_inode(m
->file
->f_path
.dentry
->d_parent
)->i_private
;
745 return attr
->show(data
, m
);
748 static ssize_t
blk_mq_debugfs_write(struct file
*file
, const char __user
*buf
,
749 size_t count
, loff_t
*ppos
)
751 struct seq_file
*m
= file
->private_data
;
752 const struct blk_mq_debugfs_attr
*attr
= m
->private;
753 void *data
= d_inode(file
->f_path
.dentry
->d_parent
)->i_private
;
756 * Attributes that only implement .seq_ops are read-only and 'attr' is
757 * the same with 'data' in this case.
759 if (attr
== data
|| !attr
->write
)
762 return attr
->write(data
, buf
, count
, ppos
);
765 static int blk_mq_debugfs_open(struct inode
*inode
, struct file
*file
)
767 const struct blk_mq_debugfs_attr
*attr
= inode
->i_private
;
768 void *data
= d_inode(file
->f_path
.dentry
->d_parent
)->i_private
;
773 ret
= seq_open(file
, attr
->seq_ops
);
775 m
= file
->private_data
;
781 if (WARN_ON_ONCE(!attr
->show
))
784 return single_open(file
, blk_mq_debugfs_show
, inode
->i_private
);
787 static int blk_mq_debugfs_release(struct inode
*inode
, struct file
*file
)
789 const struct blk_mq_debugfs_attr
*attr
= inode
->i_private
;
792 return single_release(inode
, file
);
794 return seq_release(inode
, file
);
797 static const struct file_operations blk_mq_debugfs_fops
= {
798 .open
= blk_mq_debugfs_open
,
800 .write
= blk_mq_debugfs_write
,
802 .release
= blk_mq_debugfs_release
,
805 static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs
[] = {
806 {"state", 0400, hctx_state_show
},
807 {"flags", 0400, hctx_flags_show
},
808 {"dispatch", 0400, .seq_ops
= &hctx_dispatch_seq_ops
},
809 {"busy", 0400, hctx_busy_show
},
810 {"ctx_map", 0400, hctx_ctx_map_show
},
811 {"tags", 0400, hctx_tags_show
},
812 {"tags_bitmap", 0400, hctx_tags_bitmap_show
},
813 {"sched_tags", 0400, hctx_sched_tags_show
},
814 {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show
},
815 {"io_poll", 0600, hctx_io_poll_show
, hctx_io_poll_write
},
816 {"dispatched", 0600, hctx_dispatched_show
, hctx_dispatched_write
},
817 {"queued", 0600, hctx_queued_show
, hctx_queued_write
},
818 {"run", 0600, hctx_run_show
, hctx_run_write
},
819 {"active", 0400, hctx_active_show
},
820 {"dispatch_busy", 0400, hctx_dispatch_busy_show
},
821 {"type", 0400, hctx_type_show
},
825 static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs
[] = {
826 {"default_rq_list", 0400, .seq_ops
= &ctx_default_rq_list_seq_ops
},
827 {"read_rq_list", 0400, .seq_ops
= &ctx_read_rq_list_seq_ops
},
828 {"poll_rq_list", 0400, .seq_ops
= &ctx_poll_rq_list_seq_ops
},
829 {"dispatched", 0600, ctx_dispatched_show
, ctx_dispatched_write
},
830 {"merged", 0600, ctx_merged_show
, ctx_merged_write
},
831 {"completed", 0600, ctx_completed_show
, ctx_completed_write
},
835 static bool debugfs_create_files(struct dentry
*parent
, void *data
,
836 const struct blk_mq_debugfs_attr
*attr
)
838 if (IS_ERR_OR_NULL(parent
))
841 d_inode(parent
)->i_private
= data
;
843 for (; attr
->name
; attr
++) {
844 if (!debugfs_create_file(attr
->name
, attr
->mode
, parent
,
845 (void *)attr
, &blk_mq_debugfs_fops
))
851 int blk_mq_debugfs_register(struct request_queue
*q
)
853 struct blk_mq_hw_ctx
*hctx
;
856 if (!blk_debugfs_root
)
859 q
->debugfs_dir
= debugfs_create_dir(kobject_name(q
->kobj
.parent
),
864 if (!debugfs_create_files(q
->debugfs_dir
, q
,
865 blk_mq_debugfs_queue_attrs
))
869 * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
870 * didn't exist yet (because we don't know what to name the directory
871 * until the queue is registered to a gendisk).
873 if (q
->elevator
&& !q
->sched_debugfs_dir
)
874 blk_mq_debugfs_register_sched(q
);
876 /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
877 queue_for_each_hw_ctx(q
, hctx
, i
) {
878 if (!hctx
->debugfs_dir
&& blk_mq_debugfs_register_hctx(q
, hctx
))
880 if (q
->elevator
&& !hctx
->sched_debugfs_dir
&&
881 blk_mq_debugfs_register_sched_hctx(q
, hctx
))
886 struct rq_qos
*rqos
= q
->rq_qos
;
889 blk_mq_debugfs_register_rqos(rqos
);
897 blk_mq_debugfs_unregister(q
);
901 void blk_mq_debugfs_unregister(struct request_queue
*q
)
903 debugfs_remove_recursive(q
->debugfs_dir
);
904 q
->sched_debugfs_dir
= NULL
;
905 q
->debugfs_dir
= NULL
;
908 static int blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx
*hctx
,
909 struct blk_mq_ctx
*ctx
)
911 struct dentry
*ctx_dir
;
914 snprintf(name
, sizeof(name
), "cpu%u", ctx
->cpu
);
915 ctx_dir
= debugfs_create_dir(name
, hctx
->debugfs_dir
);
919 if (!debugfs_create_files(ctx_dir
, ctx
, blk_mq_debugfs_ctx_attrs
))
925 int blk_mq_debugfs_register_hctx(struct request_queue
*q
,
926 struct blk_mq_hw_ctx
*hctx
)
928 struct blk_mq_ctx
*ctx
;
935 snprintf(name
, sizeof(name
), "hctx%u", hctx
->queue_num
);
936 hctx
->debugfs_dir
= debugfs_create_dir(name
, q
->debugfs_dir
);
937 if (!hctx
->debugfs_dir
)
940 if (!debugfs_create_files(hctx
->debugfs_dir
, hctx
,
941 blk_mq_debugfs_hctx_attrs
))
944 hctx_for_each_ctx(hctx
, ctx
, i
) {
945 if (blk_mq_debugfs_register_ctx(hctx
, ctx
))
952 blk_mq_debugfs_unregister_hctx(hctx
);
956 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx
*hctx
)
958 debugfs_remove_recursive(hctx
->debugfs_dir
);
959 hctx
->sched_debugfs_dir
= NULL
;
960 hctx
->debugfs_dir
= NULL
;
963 int blk_mq_debugfs_register_hctxs(struct request_queue
*q
)
965 struct blk_mq_hw_ctx
*hctx
;
968 queue_for_each_hw_ctx(q
, hctx
, i
) {
969 if (blk_mq_debugfs_register_hctx(q
, hctx
))
976 void blk_mq_debugfs_unregister_hctxs(struct request_queue
*q
)
978 struct blk_mq_hw_ctx
*hctx
;
981 queue_for_each_hw_ctx(q
, hctx
, i
)
982 blk_mq_debugfs_unregister_hctx(hctx
);
985 int blk_mq_debugfs_register_sched(struct request_queue
*q
)
987 struct elevator_type
*e
= q
->elevator
->type
;
992 if (!e
->queue_debugfs_attrs
)
995 q
->sched_debugfs_dir
= debugfs_create_dir("sched", q
->debugfs_dir
);
996 if (!q
->sched_debugfs_dir
)
999 if (!debugfs_create_files(q
->sched_debugfs_dir
, q
,
1000 e
->queue_debugfs_attrs
))
1006 blk_mq_debugfs_unregister_sched(q
);
1010 void blk_mq_debugfs_unregister_sched(struct request_queue
*q
)
1012 debugfs_remove_recursive(q
->sched_debugfs_dir
);
1013 q
->sched_debugfs_dir
= NULL
;
1016 void blk_mq_debugfs_unregister_rqos(struct rq_qos
*rqos
)
1018 debugfs_remove_recursive(rqos
->debugfs_dir
);
1019 rqos
->debugfs_dir
= NULL
;
1022 int blk_mq_debugfs_register_rqos(struct rq_qos
*rqos
)
1024 struct request_queue
*q
= rqos
->q
;
1025 const char *dir_name
= rq_qos_id_to_name(rqos
->id
);
1027 if (!q
->debugfs_dir
)
1030 if (rqos
->debugfs_dir
|| !rqos
->ops
->debugfs_attrs
)
1033 if (!q
->rqos_debugfs_dir
) {
1034 q
->rqos_debugfs_dir
= debugfs_create_dir("rqos",
1036 if (!q
->rqos_debugfs_dir
)
1040 rqos
->debugfs_dir
= debugfs_create_dir(dir_name
,
1041 rqos
->q
->rqos_debugfs_dir
);
1042 if (!rqos
->debugfs_dir
)
1045 if (!debugfs_create_files(rqos
->debugfs_dir
, rqos
,
1046 rqos
->ops
->debugfs_attrs
))
1050 blk_mq_debugfs_unregister_rqos(rqos
);
1054 void blk_mq_debugfs_unregister_queue_rqos(struct request_queue
*q
)
1056 debugfs_remove_recursive(q
->rqos_debugfs_dir
);
1057 q
->rqos_debugfs_dir
= NULL
;
1060 int blk_mq_debugfs_register_sched_hctx(struct request_queue
*q
,
1061 struct blk_mq_hw_ctx
*hctx
)
1063 struct elevator_type
*e
= q
->elevator
->type
;
1065 if (!hctx
->debugfs_dir
)
1068 if (!e
->hctx_debugfs_attrs
)
1071 hctx
->sched_debugfs_dir
= debugfs_create_dir("sched",
1073 if (!hctx
->sched_debugfs_dir
)
1076 if (!debugfs_create_files(hctx
->sched_debugfs_dir
, hctx
,
1077 e
->hctx_debugfs_attrs
))
1083 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx
*hctx
)
1085 debugfs_remove_recursive(hctx
->sched_debugfs_dir
);
1086 hctx
->sched_debugfs_dir
= NULL
;