1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/module.h>
4 #include <linux/backing-dev.h>
6 #include <linux/blkdev.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/workqueue.h>
11 #include <linux/smp.h>
16 static void blk_mq_sysfs_release(struct kobject
*kobj
)
18 struct blk_mq_ctxs
*ctxs
= container_of(kobj
, struct blk_mq_ctxs
, kobj
);
20 free_percpu(ctxs
->queue_ctx
);
24 static void blk_mq_ctx_sysfs_release(struct kobject
*kobj
)
26 struct blk_mq_ctx
*ctx
= container_of(kobj
, struct blk_mq_ctx
, kobj
);
28 /* ctx->ctxs won't be released until all ctx are freed */
29 kobject_put(&ctx
->ctxs
->kobj
);
32 static void blk_mq_hw_sysfs_release(struct kobject
*kobj
)
34 struct blk_mq_hw_ctx
*hctx
= container_of(kobj
, struct blk_mq_hw_ctx
,
37 blk_free_flush_queue(hctx
->fq
);
38 sbitmap_free(&hctx
->ctx_map
);
39 free_cpumask_var(hctx
->cpumask
);
44 struct blk_mq_hw_ctx_sysfs_entry
{
45 struct attribute attr
;
46 ssize_t (*show
)(struct blk_mq_hw_ctx
*, char *);
49 static ssize_t
blk_mq_hw_sysfs_show(struct kobject
*kobj
,
50 struct attribute
*attr
, char *page
)
52 struct blk_mq_hw_ctx_sysfs_entry
*entry
;
53 struct blk_mq_hw_ctx
*hctx
;
54 struct request_queue
*q
;
57 entry
= container_of(attr
, struct blk_mq_hw_ctx_sysfs_entry
, attr
);
58 hctx
= container_of(kobj
, struct blk_mq_hw_ctx
, kobj
);
64 mutex_lock(&q
->sysfs_lock
);
65 res
= entry
->show(hctx
, page
);
66 mutex_unlock(&q
->sysfs_lock
);
70 static ssize_t
blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx
*hctx
,
73 return sprintf(page
, "%u\n", hctx
->tags
->nr_tags
);
76 static ssize_t
blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx
*hctx
,
79 return sprintf(page
, "%u\n", hctx
->tags
->nr_reserved_tags
);
82 static ssize_t
blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx
*hctx
, char *page
)
84 const size_t size
= PAGE_SIZE
- 1;
85 unsigned int i
, first
= 1;
88 for_each_cpu(i
, hctx
->cpumask
) {
90 ret
= snprintf(pos
+ page
, size
- pos
, "%u", i
);
92 ret
= snprintf(pos
+ page
, size
- pos
, ", %u", i
);
94 if (ret
>= size
- pos
)
101 ret
= snprintf(pos
+ page
, size
+ 1 - pos
, "\n");
105 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags
= {
106 .attr
= {.name
= "nr_tags", .mode
= 0444 },
107 .show
= blk_mq_hw_sysfs_nr_tags_show
,
109 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags
= {
110 .attr
= {.name
= "nr_reserved_tags", .mode
= 0444 },
111 .show
= blk_mq_hw_sysfs_nr_reserved_tags_show
,
113 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus
= {
114 .attr
= {.name
= "cpu_list", .mode
= 0444 },
115 .show
= blk_mq_hw_sysfs_cpus_show
,
118 static struct attribute
*default_hw_ctx_attrs
[] = {
119 &blk_mq_hw_sysfs_nr_tags
.attr
,
120 &blk_mq_hw_sysfs_nr_reserved_tags
.attr
,
121 &blk_mq_hw_sysfs_cpus
.attr
,
124 ATTRIBUTE_GROUPS(default_hw_ctx
);
126 static const struct sysfs_ops blk_mq_hw_sysfs_ops
= {
127 .show
= blk_mq_hw_sysfs_show
,
130 static const struct kobj_type blk_mq_ktype
= {
131 .release
= blk_mq_sysfs_release
,
134 static const struct kobj_type blk_mq_ctx_ktype
= {
135 .release
= blk_mq_ctx_sysfs_release
,
138 static const struct kobj_type blk_mq_hw_ktype
= {
139 .sysfs_ops
= &blk_mq_hw_sysfs_ops
,
140 .default_groups
= default_hw_ctx_groups
,
141 .release
= blk_mq_hw_sysfs_release
,
144 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx
*hctx
)
146 struct blk_mq_ctx
*ctx
;
152 hctx_for_each_ctx(hctx
, ctx
, i
)
153 kobject_del(&ctx
->kobj
);
155 kobject_del(&hctx
->kobj
);
158 static int blk_mq_register_hctx(struct blk_mq_hw_ctx
*hctx
)
160 struct request_queue
*q
= hctx
->queue
;
161 struct blk_mq_ctx
*ctx
;
167 ret
= kobject_add(&hctx
->kobj
, q
->mq_kobj
, "%u", hctx
->queue_num
);
171 hctx_for_each_ctx(hctx
, ctx
, i
) {
172 ret
= kobject_add(&ctx
->kobj
, &hctx
->kobj
, "cpu%u", ctx
->cpu
);
179 hctx_for_each_ctx(hctx
, ctx
, j
) {
181 kobject_del(&ctx
->kobj
);
183 kobject_del(&hctx
->kobj
);
187 void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx
*hctx
)
189 kobject_init(&hctx
->kobj
, &blk_mq_hw_ktype
);
192 void blk_mq_sysfs_deinit(struct request_queue
*q
)
194 struct blk_mq_ctx
*ctx
;
197 for_each_possible_cpu(cpu
) {
198 ctx
= per_cpu_ptr(q
->queue_ctx
, cpu
);
199 kobject_put(&ctx
->kobj
);
201 kobject_put(q
->mq_kobj
);
204 void blk_mq_sysfs_init(struct request_queue
*q
)
206 struct blk_mq_ctx
*ctx
;
209 kobject_init(q
->mq_kobj
, &blk_mq_ktype
);
211 for_each_possible_cpu(cpu
) {
212 ctx
= per_cpu_ptr(q
->queue_ctx
, cpu
);
214 kobject_get(q
->mq_kobj
);
215 kobject_init(&ctx
->kobj
, &blk_mq_ctx_ktype
);
219 int blk_mq_sysfs_register(struct gendisk
*disk
)
221 struct request_queue
*q
= disk
->queue
;
222 struct blk_mq_hw_ctx
*hctx
;
226 lockdep_assert_held(&q
->sysfs_dir_lock
);
228 ret
= kobject_add(q
->mq_kobj
, &disk_to_dev(disk
)->kobj
, "mq");
232 kobject_uevent(q
->mq_kobj
, KOBJ_ADD
);
234 queue_for_each_hw_ctx(q
, hctx
, i
) {
235 ret
= blk_mq_register_hctx(hctx
);
240 q
->mq_sysfs_init_done
= true;
246 queue_for_each_hw_ctx(q
, hctx
, j
) {
248 blk_mq_unregister_hctx(hctx
);
251 kobject_uevent(q
->mq_kobj
, KOBJ_REMOVE
);
252 kobject_del(q
->mq_kobj
);
256 void blk_mq_sysfs_unregister(struct gendisk
*disk
)
258 struct request_queue
*q
= disk
->queue
;
259 struct blk_mq_hw_ctx
*hctx
;
262 lockdep_assert_held(&q
->sysfs_dir_lock
);
264 queue_for_each_hw_ctx(q
, hctx
, i
)
265 blk_mq_unregister_hctx(hctx
);
267 kobject_uevent(q
->mq_kobj
, KOBJ_REMOVE
);
268 kobject_del(q
->mq_kobj
);
270 q
->mq_sysfs_init_done
= false;
273 void blk_mq_sysfs_unregister_hctxs(struct request_queue
*q
)
275 struct blk_mq_hw_ctx
*hctx
;
278 mutex_lock(&q
->sysfs_dir_lock
);
279 if (!q
->mq_sysfs_init_done
)
282 queue_for_each_hw_ctx(q
, hctx
, i
)
283 blk_mq_unregister_hctx(hctx
);
286 mutex_unlock(&q
->sysfs_dir_lock
);
289 int blk_mq_sysfs_register_hctxs(struct request_queue
*q
)
291 struct blk_mq_hw_ctx
*hctx
;
295 mutex_lock(&q
->sysfs_dir_lock
);
296 if (!q
->mq_sysfs_init_done
)
299 queue_for_each_hw_ctx(q
, hctx
, i
) {
300 ret
= blk_mq_register_hctx(hctx
);
306 mutex_unlock(&q
->sysfs_dir_lock
);