1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 Facebook
6 #include <linux/kernel.h>
7 #include <linux/blkdev.h>
8 #include <linux/build_bug.h>
9 #include <linux/debugfs.h>
13 #include "blk-mq-debugfs.h"
14 #include "blk-mq-sched.h"
15 #include "blk-rq-qos.h"
17 static int queue_poll_stat_show(void *data
, struct seq_file
*m
)
22 static void *queue_requeue_list_start(struct seq_file
*m
, loff_t
*pos
)
23 __acquires(&q
->requeue_lock
)
25 struct request_queue
*q
= m
->private;
27 spin_lock_irq(&q
->requeue_lock
);
28 return seq_list_start(&q
->requeue_list
, *pos
);
31 static void *queue_requeue_list_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
33 struct request_queue
*q
= m
->private;
35 return seq_list_next(v
, &q
->requeue_list
, pos
);
38 static void queue_requeue_list_stop(struct seq_file
*m
, void *v
)
39 __releases(&q
->requeue_lock
)
41 struct request_queue
*q
= m
->private;
43 spin_unlock_irq(&q
->requeue_lock
);
46 static const struct seq_operations queue_requeue_list_seq_ops
= {
47 .start
= queue_requeue_list_start
,
48 .next
= queue_requeue_list_next
,
49 .stop
= queue_requeue_list_stop
,
50 .show
= blk_mq_debugfs_rq_show
,
53 static int blk_flags_show(struct seq_file
*m
, const unsigned long flags
,
54 const char *const *flag_name
, int flag_name_count
)
59 for (i
= 0; i
< sizeof(flags
) * BITS_PER_BYTE
; i
++) {
60 if (!(flags
& BIT(i
)))
65 if (i
< flag_name_count
&& flag_name
[i
])
66 seq_puts(m
, flag_name
[i
]);
68 seq_printf(m
, "%d", i
);
73 static int queue_pm_only_show(void *data
, struct seq_file
*m
)
75 struct request_queue
*q
= data
;
77 seq_printf(m
, "%d\n", atomic_read(&q
->pm_only
));
81 #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
82 static const char *const blk_queue_flag_name
[] = {
83 QUEUE_FLAG_NAME(DYING
),
84 QUEUE_FLAG_NAME(NOMERGES
),
85 QUEUE_FLAG_NAME(SAME_COMP
),
86 QUEUE_FLAG_NAME(FAIL_IO
),
87 QUEUE_FLAG_NAME(NOXMERGES
),
88 QUEUE_FLAG_NAME(SAME_FORCE
),
89 QUEUE_FLAG_NAME(INIT_DONE
),
90 QUEUE_FLAG_NAME(STATS
),
91 QUEUE_FLAG_NAME(REGISTERED
),
92 QUEUE_FLAG_NAME(QUIESCED
),
93 QUEUE_FLAG_NAME(RQ_ALLOC_TIME
),
94 QUEUE_FLAG_NAME(HCTX_ACTIVE
),
95 QUEUE_FLAG_NAME(SQ_SCHED
),
97 #undef QUEUE_FLAG_NAME
99 static int queue_state_show(void *data
, struct seq_file
*m
)
101 struct request_queue
*q
= data
;
103 BUILD_BUG_ON(ARRAY_SIZE(blk_queue_flag_name
) != QUEUE_FLAG_MAX
);
104 blk_flags_show(m
, q
->queue_flags
, blk_queue_flag_name
,
105 ARRAY_SIZE(blk_queue_flag_name
));
110 static ssize_t
queue_state_write(void *data
, const char __user
*buf
,
111 size_t count
, loff_t
*ppos
)
113 struct request_queue
*q
= data
;
114 char opbuf
[16] = { }, *op
;
117 * The "state" attribute is removed when the queue is removed. Don't
118 * allow setting the state on a dying queue to avoid a use-after-free.
120 if (blk_queue_dying(q
))
123 if (count
>= sizeof(opbuf
)) {
124 pr_err("%s: operation too long\n", __func__
);
128 if (copy_from_user(opbuf
, buf
, count
))
130 op
= strstrip(opbuf
);
131 if (strcmp(op
, "run") == 0) {
132 blk_mq_run_hw_queues(q
, true);
133 } else if (strcmp(op
, "start") == 0) {
134 blk_mq_start_stopped_hw_queues(q
, true);
135 } else if (strcmp(op
, "kick") == 0) {
136 blk_mq_kick_requeue_list(q
);
138 pr_err("%s: unsupported operation '%s'\n", __func__
, op
);
140 pr_err("%s: use 'run', 'start' or 'kick'\n", __func__
);
146 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs
[] = {
147 { "poll_stat", 0400, queue_poll_stat_show
},
148 { "requeue_list", 0400, .seq_ops
= &queue_requeue_list_seq_ops
},
149 { "pm_only", 0600, queue_pm_only_show
, NULL
},
150 { "state", 0600, queue_state_show
, queue_state_write
},
151 { "zone_wplugs", 0400, queue_zone_wplugs_show
, NULL
},
155 #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
156 static const char *const hctx_state_name
[] = {
157 HCTX_STATE_NAME(STOPPED
),
158 HCTX_STATE_NAME(TAG_ACTIVE
),
159 HCTX_STATE_NAME(SCHED_RESTART
),
160 HCTX_STATE_NAME(INACTIVE
),
162 #undef HCTX_STATE_NAME
164 static int hctx_state_show(void *data
, struct seq_file
*m
)
166 struct blk_mq_hw_ctx
*hctx
= data
;
168 BUILD_BUG_ON(ARRAY_SIZE(hctx_state_name
) != BLK_MQ_S_MAX
);
169 blk_flags_show(m
, hctx
->state
, hctx_state_name
,
170 ARRAY_SIZE(hctx_state_name
));
175 #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
176 static const char *const alloc_policy_name
[] = {
177 BLK_TAG_ALLOC_NAME(FIFO
),
178 BLK_TAG_ALLOC_NAME(RR
),
180 #undef BLK_TAG_ALLOC_NAME
182 #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
183 static const char *const hctx_flag_name
[] = {
184 HCTX_FLAG_NAME(SHOULD_MERGE
),
185 HCTX_FLAG_NAME(TAG_QUEUE_SHARED
),
186 HCTX_FLAG_NAME(STACKING
),
187 HCTX_FLAG_NAME(TAG_HCTX_SHARED
),
188 HCTX_FLAG_NAME(BLOCKING
),
189 HCTX_FLAG_NAME(NO_SCHED
),
190 HCTX_FLAG_NAME(NO_SCHED_BY_DEFAULT
),
192 #undef HCTX_FLAG_NAME
194 static int hctx_flags_show(void *data
, struct seq_file
*m
)
196 struct blk_mq_hw_ctx
*hctx
= data
;
197 const int alloc_policy
= BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx
->flags
);
199 BUILD_BUG_ON(ARRAY_SIZE(hctx_flag_name
) !=
200 BLK_MQ_F_ALLOC_POLICY_START_BIT
);
201 BUILD_BUG_ON(ARRAY_SIZE(alloc_policy_name
) != BLK_TAG_ALLOC_MAX
);
203 seq_puts(m
, "alloc_policy=");
204 if (alloc_policy
< ARRAY_SIZE(alloc_policy_name
) &&
205 alloc_policy_name
[alloc_policy
])
206 seq_puts(m
, alloc_policy_name
[alloc_policy
]);
208 seq_printf(m
, "%d", alloc_policy
);
211 hctx
->flags
^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy
),
212 hctx_flag_name
, ARRAY_SIZE(hctx_flag_name
));
217 #define CMD_FLAG_NAME(name) [__REQ_##name] = #name
218 static const char *const cmd_flag_name
[] = {
219 CMD_FLAG_NAME(FAILFAST_DEV
),
220 CMD_FLAG_NAME(FAILFAST_TRANSPORT
),
221 CMD_FLAG_NAME(FAILFAST_DRIVER
),
225 CMD_FLAG_NAME(NOMERGE
),
227 CMD_FLAG_NAME(INTEGRITY
),
229 CMD_FLAG_NAME(PREFLUSH
),
230 CMD_FLAG_NAME(RAHEAD
),
231 CMD_FLAG_NAME(BACKGROUND
),
232 CMD_FLAG_NAME(NOWAIT
),
233 CMD_FLAG_NAME(POLLED
),
234 CMD_FLAG_NAME(ALLOC_CACHE
),
237 CMD_FLAG_NAME(FS_PRIVATE
),
238 CMD_FLAG_NAME(ATOMIC
),
239 CMD_FLAG_NAME(NOUNMAP
),
243 #define RQF_NAME(name) [__RQF_##name] = #name
244 static const char *const rqf_name
[] = {
247 RQF_NAME(MIXED_MERGE
),
249 RQF_NAME(SCHED_TAGS
),
257 RQF_NAME(SPECIAL_PAYLOAD
),
258 RQF_NAME(ZONE_WRITE_PLUGGING
),
264 static const char *const blk_mq_rq_state_name_array
[] = {
265 [MQ_RQ_IDLE
] = "idle",
266 [MQ_RQ_IN_FLIGHT
] = "in_flight",
267 [MQ_RQ_COMPLETE
] = "complete",
270 static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state
)
272 if (WARN_ON_ONCE((unsigned int)rq_state
>=
273 ARRAY_SIZE(blk_mq_rq_state_name_array
)))
275 return blk_mq_rq_state_name_array
[rq_state
];
278 int __blk_mq_debugfs_rq_show(struct seq_file
*m
, struct request
*rq
)
280 const struct blk_mq_ops
*const mq_ops
= rq
->q
->mq_ops
;
281 const enum req_op op
= req_op(rq
);
282 const char *op_str
= blk_op_str(op
);
284 BUILD_BUG_ON(ARRAY_SIZE(cmd_flag_name
) != __REQ_NR_BITS
);
285 BUILD_BUG_ON(ARRAY_SIZE(rqf_name
) != __RQF_BITS
);
287 seq_printf(m
, "%p {.op=", rq
);
288 if (strcmp(op_str
, "UNKNOWN") == 0)
289 seq_printf(m
, "%u", op
);
291 seq_printf(m
, "%s", op_str
);
292 seq_puts(m
, ", .cmd_flags=");
293 blk_flags_show(m
, (__force
unsigned int)(rq
->cmd_flags
& ~REQ_OP_MASK
),
294 cmd_flag_name
, ARRAY_SIZE(cmd_flag_name
));
295 seq_puts(m
, ", .rq_flags=");
296 blk_flags_show(m
, (__force
unsigned int)rq
->rq_flags
, rqf_name
,
297 ARRAY_SIZE(rqf_name
));
298 seq_printf(m
, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq
)));
299 seq_printf(m
, ", .tag=%d, .internal_tag=%d", rq
->tag
,
302 mq_ops
->show_rq(m
, rq
);
306 EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show
);
308 int blk_mq_debugfs_rq_show(struct seq_file
*m
, void *v
)
310 return __blk_mq_debugfs_rq_show(m
, list_entry_rq(v
));
312 EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show
);
314 static void *hctx_dispatch_start(struct seq_file
*m
, loff_t
*pos
)
315 __acquires(&hctx
->lock
)
317 struct blk_mq_hw_ctx
*hctx
= m
->private;
319 spin_lock(&hctx
->lock
);
320 return seq_list_start(&hctx
->dispatch
, *pos
);
323 static void *hctx_dispatch_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
325 struct blk_mq_hw_ctx
*hctx
= m
->private;
327 return seq_list_next(v
, &hctx
->dispatch
, pos
);
330 static void hctx_dispatch_stop(struct seq_file
*m
, void *v
)
331 __releases(&hctx
->lock
)
333 struct blk_mq_hw_ctx
*hctx
= m
->private;
335 spin_unlock(&hctx
->lock
);
338 static const struct seq_operations hctx_dispatch_seq_ops
= {
339 .start
= hctx_dispatch_start
,
340 .next
= hctx_dispatch_next
,
341 .stop
= hctx_dispatch_stop
,
342 .show
= blk_mq_debugfs_rq_show
,
345 struct show_busy_params
{
347 struct blk_mq_hw_ctx
*hctx
;
351 * Note: the state of a request may change while this function is in progress,
352 * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
353 * keep iterating requests.
355 static bool hctx_show_busy_rq(struct request
*rq
, void *data
)
357 const struct show_busy_params
*params
= data
;
359 if (rq
->mq_hctx
== params
->hctx
)
360 __blk_mq_debugfs_rq_show(params
->m
, rq
);
365 static int hctx_busy_show(void *data
, struct seq_file
*m
)
367 struct blk_mq_hw_ctx
*hctx
= data
;
368 struct show_busy_params params
= { .m
= m
, .hctx
= hctx
};
370 blk_mq_tagset_busy_iter(hctx
->queue
->tag_set
, hctx_show_busy_rq
,
376 static const char *const hctx_types
[] = {
377 [HCTX_TYPE_DEFAULT
] = "default",
378 [HCTX_TYPE_READ
] = "read",
379 [HCTX_TYPE_POLL
] = "poll",
382 static int hctx_type_show(void *data
, struct seq_file
*m
)
384 struct blk_mq_hw_ctx
*hctx
= data
;
386 BUILD_BUG_ON(ARRAY_SIZE(hctx_types
) != HCTX_MAX_TYPES
);
387 seq_printf(m
, "%s\n", hctx_types
[hctx
->type
]);
391 static int hctx_ctx_map_show(void *data
, struct seq_file
*m
)
393 struct blk_mq_hw_ctx
*hctx
= data
;
395 sbitmap_bitmap_show(&hctx
->ctx_map
, m
);
399 static void blk_mq_debugfs_tags_show(struct seq_file
*m
,
400 struct blk_mq_tags
*tags
)
402 seq_printf(m
, "nr_tags=%u\n", tags
->nr_tags
);
403 seq_printf(m
, "nr_reserved_tags=%u\n", tags
->nr_reserved_tags
);
404 seq_printf(m
, "active_queues=%d\n",
405 READ_ONCE(tags
->active_queues
));
407 seq_puts(m
, "\nbitmap_tags:\n");
408 sbitmap_queue_show(&tags
->bitmap_tags
, m
);
410 if (tags
->nr_reserved_tags
) {
411 seq_puts(m
, "\nbreserved_tags:\n");
412 sbitmap_queue_show(&tags
->breserved_tags
, m
);
416 static int hctx_tags_show(void *data
, struct seq_file
*m
)
418 struct blk_mq_hw_ctx
*hctx
= data
;
419 struct request_queue
*q
= hctx
->queue
;
422 res
= mutex_lock_interruptible(&q
->sysfs_lock
);
426 blk_mq_debugfs_tags_show(m
, hctx
->tags
);
427 mutex_unlock(&q
->sysfs_lock
);
433 static int hctx_tags_bitmap_show(void *data
, struct seq_file
*m
)
435 struct blk_mq_hw_ctx
*hctx
= data
;
436 struct request_queue
*q
= hctx
->queue
;
439 res
= mutex_lock_interruptible(&q
->sysfs_lock
);
443 sbitmap_bitmap_show(&hctx
->tags
->bitmap_tags
.sb
, m
);
444 mutex_unlock(&q
->sysfs_lock
);
450 static int hctx_sched_tags_show(void *data
, struct seq_file
*m
)
452 struct blk_mq_hw_ctx
*hctx
= data
;
453 struct request_queue
*q
= hctx
->queue
;
456 res
= mutex_lock_interruptible(&q
->sysfs_lock
);
459 if (hctx
->sched_tags
)
460 blk_mq_debugfs_tags_show(m
, hctx
->sched_tags
);
461 mutex_unlock(&q
->sysfs_lock
);
467 static int hctx_sched_tags_bitmap_show(void *data
, struct seq_file
*m
)
469 struct blk_mq_hw_ctx
*hctx
= data
;
470 struct request_queue
*q
= hctx
->queue
;
473 res
= mutex_lock_interruptible(&q
->sysfs_lock
);
476 if (hctx
->sched_tags
)
477 sbitmap_bitmap_show(&hctx
->sched_tags
->bitmap_tags
.sb
, m
);
478 mutex_unlock(&q
->sysfs_lock
);
484 static int hctx_active_show(void *data
, struct seq_file
*m
)
486 struct blk_mq_hw_ctx
*hctx
= data
;
488 seq_printf(m
, "%d\n", __blk_mq_active_requests(hctx
));
492 static int hctx_dispatch_busy_show(void *data
, struct seq_file
*m
)
494 struct blk_mq_hw_ctx
*hctx
= data
;
496 seq_printf(m
, "%u\n", hctx
->dispatch_busy
);
500 #define CTX_RQ_SEQ_OPS(name, type) \
501 static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
502 __acquires(&ctx->lock) \
504 struct blk_mq_ctx *ctx = m->private; \
506 spin_lock(&ctx->lock); \
507 return seq_list_start(&ctx->rq_lists[type], *pos); \
510 static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \
513 struct blk_mq_ctx *ctx = m->private; \
515 return seq_list_next(v, &ctx->rq_lists[type], pos); \
518 static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v) \
519 __releases(&ctx->lock) \
521 struct blk_mq_ctx *ctx = m->private; \
523 spin_unlock(&ctx->lock); \
526 static const struct seq_operations ctx_##name##_rq_list_seq_ops = { \
527 .start = ctx_##name##_rq_list_start, \
528 .next = ctx_##name##_rq_list_next, \
529 .stop = ctx_##name##_rq_list_stop, \
530 .show = blk_mq_debugfs_rq_show, \
533 CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT
);
534 CTX_RQ_SEQ_OPS(read
, HCTX_TYPE_READ
);
535 CTX_RQ_SEQ_OPS(poll
, HCTX_TYPE_POLL
);
537 static int blk_mq_debugfs_show(struct seq_file
*m
, void *v
)
539 const struct blk_mq_debugfs_attr
*attr
= m
->private;
540 void *data
= d_inode(m
->file
->f_path
.dentry
->d_parent
)->i_private
;
542 return attr
->show(data
, m
);
545 static ssize_t
blk_mq_debugfs_write(struct file
*file
, const char __user
*buf
,
546 size_t count
, loff_t
*ppos
)
548 struct seq_file
*m
= file
->private_data
;
549 const struct blk_mq_debugfs_attr
*attr
= m
->private;
550 void *data
= d_inode(file
->f_path
.dentry
->d_parent
)->i_private
;
553 * Attributes that only implement .seq_ops are read-only and 'attr' is
554 * the same with 'data' in this case.
556 if (attr
== data
|| !attr
->write
)
559 return attr
->write(data
, buf
, count
, ppos
);
562 static int blk_mq_debugfs_open(struct inode
*inode
, struct file
*file
)
564 const struct blk_mq_debugfs_attr
*attr
= inode
->i_private
;
565 void *data
= d_inode(file
->f_path
.dentry
->d_parent
)->i_private
;
570 ret
= seq_open(file
, attr
->seq_ops
);
572 m
= file
->private_data
;
578 if (WARN_ON_ONCE(!attr
->show
))
581 return single_open(file
, blk_mq_debugfs_show
, inode
->i_private
);
584 static int blk_mq_debugfs_release(struct inode
*inode
, struct file
*file
)
586 const struct blk_mq_debugfs_attr
*attr
= inode
->i_private
;
589 return single_release(inode
, file
);
591 return seq_release(inode
, file
);
594 static const struct file_operations blk_mq_debugfs_fops
= {
595 .open
= blk_mq_debugfs_open
,
597 .write
= blk_mq_debugfs_write
,
599 .release
= blk_mq_debugfs_release
,
602 static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs
[] = {
603 {"state", 0400, hctx_state_show
},
604 {"flags", 0400, hctx_flags_show
},
605 {"dispatch", 0400, .seq_ops
= &hctx_dispatch_seq_ops
},
606 {"busy", 0400, hctx_busy_show
},
607 {"ctx_map", 0400, hctx_ctx_map_show
},
608 {"tags", 0400, hctx_tags_show
},
609 {"tags_bitmap", 0400, hctx_tags_bitmap_show
},
610 {"sched_tags", 0400, hctx_sched_tags_show
},
611 {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show
},
612 {"active", 0400, hctx_active_show
},
613 {"dispatch_busy", 0400, hctx_dispatch_busy_show
},
614 {"type", 0400, hctx_type_show
},
618 static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs
[] = {
619 {"default_rq_list", 0400, .seq_ops
= &ctx_default_rq_list_seq_ops
},
620 {"read_rq_list", 0400, .seq_ops
= &ctx_read_rq_list_seq_ops
},
621 {"poll_rq_list", 0400, .seq_ops
= &ctx_poll_rq_list_seq_ops
},
625 static void debugfs_create_files(struct dentry
*parent
, void *data
,
626 const struct blk_mq_debugfs_attr
*attr
)
628 if (IS_ERR_OR_NULL(parent
))
631 d_inode(parent
)->i_private
= data
;
633 for (; attr
->name
; attr
++)
634 debugfs_create_file(attr
->name
, attr
->mode
, parent
,
635 (void *)attr
, &blk_mq_debugfs_fops
);
638 void blk_mq_debugfs_register(struct request_queue
*q
)
640 struct blk_mq_hw_ctx
*hctx
;
643 debugfs_create_files(q
->debugfs_dir
, q
, blk_mq_debugfs_queue_attrs
);
646 * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
647 * didn't exist yet (because we don't know what to name the directory
648 * until the queue is registered to a gendisk).
650 if (q
->elevator
&& !q
->sched_debugfs_dir
)
651 blk_mq_debugfs_register_sched(q
);
653 /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
654 queue_for_each_hw_ctx(q
, hctx
, i
) {
655 if (!hctx
->debugfs_dir
)
656 blk_mq_debugfs_register_hctx(q
, hctx
);
657 if (q
->elevator
&& !hctx
->sched_debugfs_dir
)
658 blk_mq_debugfs_register_sched_hctx(q
, hctx
);
662 struct rq_qos
*rqos
= q
->rq_qos
;
665 blk_mq_debugfs_register_rqos(rqos
);
671 static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx
*hctx
,
672 struct blk_mq_ctx
*ctx
)
674 struct dentry
*ctx_dir
;
677 snprintf(name
, sizeof(name
), "cpu%u", ctx
->cpu
);
678 ctx_dir
= debugfs_create_dir(name
, hctx
->debugfs_dir
);
680 debugfs_create_files(ctx_dir
, ctx
, blk_mq_debugfs_ctx_attrs
);
683 void blk_mq_debugfs_register_hctx(struct request_queue
*q
,
684 struct blk_mq_hw_ctx
*hctx
)
686 struct blk_mq_ctx
*ctx
;
693 snprintf(name
, sizeof(name
), "hctx%u", hctx
->queue_num
);
694 hctx
->debugfs_dir
= debugfs_create_dir(name
, q
->debugfs_dir
);
696 debugfs_create_files(hctx
->debugfs_dir
, hctx
, blk_mq_debugfs_hctx_attrs
);
698 hctx_for_each_ctx(hctx
, ctx
, i
)
699 blk_mq_debugfs_register_ctx(hctx
, ctx
);
702 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx
*hctx
)
704 if (!hctx
->queue
->debugfs_dir
)
706 debugfs_remove_recursive(hctx
->debugfs_dir
);
707 hctx
->sched_debugfs_dir
= NULL
;
708 hctx
->debugfs_dir
= NULL
;
711 void blk_mq_debugfs_register_hctxs(struct request_queue
*q
)
713 struct blk_mq_hw_ctx
*hctx
;
716 queue_for_each_hw_ctx(q
, hctx
, i
)
717 blk_mq_debugfs_register_hctx(q
, hctx
);
720 void blk_mq_debugfs_unregister_hctxs(struct request_queue
*q
)
722 struct blk_mq_hw_ctx
*hctx
;
725 queue_for_each_hw_ctx(q
, hctx
, i
)
726 blk_mq_debugfs_unregister_hctx(hctx
);
729 void blk_mq_debugfs_register_sched(struct request_queue
*q
)
731 struct elevator_type
*e
= q
->elevator
->type
;
733 lockdep_assert_held(&q
->debugfs_mutex
);
736 * If the parent directory has not been created yet, return, we will be
737 * called again later on and the directory/files will be created then.
742 if (!e
->queue_debugfs_attrs
)
745 q
->sched_debugfs_dir
= debugfs_create_dir("sched", q
->debugfs_dir
);
747 debugfs_create_files(q
->sched_debugfs_dir
, q
, e
->queue_debugfs_attrs
);
750 void blk_mq_debugfs_unregister_sched(struct request_queue
*q
)
752 lockdep_assert_held(&q
->debugfs_mutex
);
754 debugfs_remove_recursive(q
->sched_debugfs_dir
);
755 q
->sched_debugfs_dir
= NULL
;
758 static const char *rq_qos_id_to_name(enum rq_qos_id id
)
771 void blk_mq_debugfs_unregister_rqos(struct rq_qos
*rqos
)
773 lockdep_assert_held(&rqos
->disk
->queue
->debugfs_mutex
);
775 if (!rqos
->disk
->queue
->debugfs_dir
)
777 debugfs_remove_recursive(rqos
->debugfs_dir
);
778 rqos
->debugfs_dir
= NULL
;
781 void blk_mq_debugfs_register_rqos(struct rq_qos
*rqos
)
783 struct request_queue
*q
= rqos
->disk
->queue
;
784 const char *dir_name
= rq_qos_id_to_name(rqos
->id
);
786 lockdep_assert_held(&q
->debugfs_mutex
);
788 if (rqos
->debugfs_dir
|| !rqos
->ops
->debugfs_attrs
)
791 if (!q
->rqos_debugfs_dir
)
792 q
->rqos_debugfs_dir
= debugfs_create_dir("rqos",
795 rqos
->debugfs_dir
= debugfs_create_dir(dir_name
, q
->rqos_debugfs_dir
);
796 debugfs_create_files(rqos
->debugfs_dir
, rqos
, rqos
->ops
->debugfs_attrs
);
799 void blk_mq_debugfs_register_sched_hctx(struct request_queue
*q
,
800 struct blk_mq_hw_ctx
*hctx
)
802 struct elevator_type
*e
= q
->elevator
->type
;
804 lockdep_assert_held(&q
->debugfs_mutex
);
807 * If the parent debugfs directory has not been created yet, return;
808 * We will be called again later on with appropriate parent debugfs
809 * directory from blk_register_queue()
811 if (!hctx
->debugfs_dir
)
814 if (!e
->hctx_debugfs_attrs
)
817 hctx
->sched_debugfs_dir
= debugfs_create_dir("sched",
819 debugfs_create_files(hctx
->sched_debugfs_dir
, hctx
,
820 e
->hctx_debugfs_attrs
);
823 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx
*hctx
)
825 lockdep_assert_held(&hctx
->queue
->debugfs_mutex
);
827 if (!hctx
->queue
->debugfs_dir
)
829 debugfs_remove_recursive(hctx
->sched_debugfs_dir
);
830 hctx
->sched_debugfs_dir
= NULL
;