gro: Allow tunnel stacking in the case of FOU/GUE
[linux/fpc-iii.git] / block / blk-mq-sysfs.c
blob279c5d674edf3cb38627feb360eb745194eecd4e
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/backing-dev.h>
4 #include <linux/bio.h>
5 #include <linux/blkdev.h>
6 #include <linux/mm.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/workqueue.h>
10 #include <linux/smp.h>
12 #include <linux/blk-mq.h>
13 #include "blk-mq.h"
14 #include "blk-mq-tag.h"
16 static void blk_mq_sysfs_release(struct kobject *kobj)
20 struct blk_mq_ctx_sysfs_entry {
21 struct attribute attr;
22 ssize_t (*show)(struct blk_mq_ctx *, char *);
23 ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
26 struct blk_mq_hw_ctx_sysfs_entry {
27 struct attribute attr;
28 ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
29 ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
32 static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
33 char *page)
35 struct blk_mq_ctx_sysfs_entry *entry;
36 struct blk_mq_ctx *ctx;
37 struct request_queue *q;
38 ssize_t res;
40 entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
41 ctx = container_of(kobj, struct blk_mq_ctx, kobj);
42 q = ctx->queue;
44 if (!entry->show)
45 return -EIO;
47 res = -ENOENT;
48 mutex_lock(&q->sysfs_lock);
49 if (!blk_queue_dying(q))
50 res = entry->show(ctx, page);
51 mutex_unlock(&q->sysfs_lock);
52 return res;
55 static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
56 const char *page, size_t length)
58 struct blk_mq_ctx_sysfs_entry *entry;
59 struct blk_mq_ctx *ctx;
60 struct request_queue *q;
61 ssize_t res;
63 entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
64 ctx = container_of(kobj, struct blk_mq_ctx, kobj);
65 q = ctx->queue;
67 if (!entry->store)
68 return -EIO;
70 res = -ENOENT;
71 mutex_lock(&q->sysfs_lock);
72 if (!blk_queue_dying(q))
73 res = entry->store(ctx, page, length);
74 mutex_unlock(&q->sysfs_lock);
75 return res;
78 static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
79 struct attribute *attr, char *page)
81 struct blk_mq_hw_ctx_sysfs_entry *entry;
82 struct blk_mq_hw_ctx *hctx;
83 struct request_queue *q;
84 ssize_t res;
86 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
87 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
88 q = hctx->queue;
90 if (!entry->show)
91 return -EIO;
93 res = -ENOENT;
94 mutex_lock(&q->sysfs_lock);
95 if (!blk_queue_dying(q))
96 res = entry->show(hctx, page);
97 mutex_unlock(&q->sysfs_lock);
98 return res;
101 static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
102 struct attribute *attr, const char *page,
103 size_t length)
105 struct blk_mq_hw_ctx_sysfs_entry *entry;
106 struct blk_mq_hw_ctx *hctx;
107 struct request_queue *q;
108 ssize_t res;
110 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
111 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
112 q = hctx->queue;
114 if (!entry->store)
115 return -EIO;
117 res = -ENOENT;
118 mutex_lock(&q->sysfs_lock);
119 if (!blk_queue_dying(q))
120 res = entry->store(hctx, page, length);
121 mutex_unlock(&q->sysfs_lock);
122 return res;
125 static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page)
127 return sprintf(page, "%lu %lu\n", ctx->rq_dispatched[1],
128 ctx->rq_dispatched[0]);
131 static ssize_t blk_mq_sysfs_merged_show(struct blk_mq_ctx *ctx, char *page)
133 return sprintf(page, "%lu\n", ctx->rq_merged);
136 static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page)
138 return sprintf(page, "%lu %lu\n", ctx->rq_completed[1],
139 ctx->rq_completed[0]);
142 static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg)
144 struct request *rq;
145 int len = snprintf(page, PAGE_SIZE - 1, "%s:\n", msg);
147 list_for_each_entry(rq, list, queuelist) {
148 const int rq_len = 2 * sizeof(rq) + 2;
150 /* if the output will be truncated */
151 if (PAGE_SIZE - 1 < len + rq_len) {
152 /* backspacing if it can't hold '\t...\n' */
153 if (PAGE_SIZE - 1 < len + 5)
154 len -= rq_len;
155 len += snprintf(page + len, PAGE_SIZE - 1 - len,
156 "\t...\n");
157 break;
159 len += snprintf(page + len, PAGE_SIZE - 1 - len,
160 "\t%p\n", rq);
163 return len;
166 static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
168 ssize_t ret;
170 spin_lock(&ctx->lock);
171 ret = sysfs_list_show(page, &ctx->rq_list, "CTX pending");
172 spin_unlock(&ctx->lock);
174 return ret;
177 static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx,
178 char *page)
180 return sprintf(page, "%lu\n", hctx->queued);
183 static ssize_t blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx *hctx, char *page)
185 return sprintf(page, "%lu\n", hctx->run);
188 static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx,
189 char *page)
191 char *start_page = page;
192 int i;
194 page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
196 for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) {
197 unsigned long d = 1U << (i - 1);
199 page += sprintf(page, "%8lu\t%lu\n", d, hctx->dispatched[i]);
202 return page - start_page;
205 static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx,
206 char *page)
208 ssize_t ret;
210 spin_lock(&hctx->lock);
211 ret = sysfs_list_show(page, &hctx->dispatch, "HCTX pending");
212 spin_unlock(&hctx->lock);
214 return ret;
217 static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
219 return blk_mq_tag_sysfs_show(hctx->tags, page);
222 static ssize_t blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx *hctx, char *page)
224 return sprintf(page, "%u\n", atomic_read(&hctx->nr_active));
227 static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
229 unsigned int i, first = 1;
230 ssize_t ret = 0;
232 blk_mq_disable_hotplug();
234 for_each_cpu(i, hctx->cpumask) {
235 if (first)
236 ret += sprintf(ret + page, "%u", i);
237 else
238 ret += sprintf(ret + page, ", %u", i);
240 first = 0;
243 blk_mq_enable_hotplug();
245 ret += sprintf(ret + page, "\n");
246 return ret;
249 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
250 .attr = {.name = "dispatched", .mode = S_IRUGO },
251 .show = blk_mq_sysfs_dispatched_show,
253 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged = {
254 .attr = {.name = "merged", .mode = S_IRUGO },
255 .show = blk_mq_sysfs_merged_show,
257 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed = {
258 .attr = {.name = "completed", .mode = S_IRUGO },
259 .show = blk_mq_sysfs_completed_show,
261 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_rq_list = {
262 .attr = {.name = "rq_list", .mode = S_IRUGO },
263 .show = blk_mq_sysfs_rq_list_show,
266 static struct attribute *default_ctx_attrs[] = {
267 &blk_mq_sysfs_dispatched.attr,
268 &blk_mq_sysfs_merged.attr,
269 &blk_mq_sysfs_completed.attr,
270 &blk_mq_sysfs_rq_list.attr,
271 NULL,
274 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued = {
275 .attr = {.name = "queued", .mode = S_IRUGO },
276 .show = blk_mq_hw_sysfs_queued_show,
278 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run = {
279 .attr = {.name = "run", .mode = S_IRUGO },
280 .show = blk_mq_hw_sysfs_run_show,
282 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = {
283 .attr = {.name = "dispatched", .mode = S_IRUGO },
284 .show = blk_mq_hw_sysfs_dispatched_show,
286 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_active = {
287 .attr = {.name = "active", .mode = S_IRUGO },
288 .show = blk_mq_hw_sysfs_active_show,
290 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = {
291 .attr = {.name = "pending", .mode = S_IRUGO },
292 .show = blk_mq_hw_sysfs_rq_list_show,
294 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
295 .attr = {.name = "tags", .mode = S_IRUGO },
296 .show = blk_mq_hw_sysfs_tags_show,
298 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
299 .attr = {.name = "cpu_list", .mode = S_IRUGO },
300 .show = blk_mq_hw_sysfs_cpus_show,
303 static struct attribute *default_hw_ctx_attrs[] = {
304 &blk_mq_hw_sysfs_queued.attr,
305 &blk_mq_hw_sysfs_run.attr,
306 &blk_mq_hw_sysfs_dispatched.attr,
307 &blk_mq_hw_sysfs_pending.attr,
308 &blk_mq_hw_sysfs_tags.attr,
309 &blk_mq_hw_sysfs_cpus.attr,
310 &blk_mq_hw_sysfs_active.attr,
311 NULL,
314 static const struct sysfs_ops blk_mq_sysfs_ops = {
315 .show = blk_mq_sysfs_show,
316 .store = blk_mq_sysfs_store,
319 static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
320 .show = blk_mq_hw_sysfs_show,
321 .store = blk_mq_hw_sysfs_store,
324 static struct kobj_type blk_mq_ktype = {
325 .sysfs_ops = &blk_mq_sysfs_ops,
326 .release = blk_mq_sysfs_release,
329 static struct kobj_type blk_mq_ctx_ktype = {
330 .sysfs_ops = &blk_mq_sysfs_ops,
331 .default_attrs = default_ctx_attrs,
332 .release = blk_mq_sysfs_release,
335 static struct kobj_type blk_mq_hw_ktype = {
336 .sysfs_ops = &blk_mq_hw_sysfs_ops,
337 .default_attrs = default_hw_ctx_attrs,
338 .release = blk_mq_sysfs_release,
341 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
343 struct blk_mq_ctx *ctx;
344 int i;
346 if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP))
347 return;
349 hctx_for_each_ctx(hctx, ctx, i)
350 kobject_del(&ctx->kobj);
352 kobject_del(&hctx->kobj);
355 static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
357 struct request_queue *q = hctx->queue;
358 struct blk_mq_ctx *ctx;
359 int i, ret;
361 if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP))
362 return 0;
364 ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
365 if (ret)
366 return ret;
368 hctx_for_each_ctx(hctx, ctx, i) {
369 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
370 if (ret)
371 break;
374 return ret;
377 void blk_mq_unregister_disk(struct gendisk *disk)
379 struct request_queue *q = disk->queue;
380 struct blk_mq_hw_ctx *hctx;
381 struct blk_mq_ctx *ctx;
382 int i, j;
384 queue_for_each_hw_ctx(q, hctx, i) {
385 blk_mq_unregister_hctx(hctx);
387 hctx_for_each_ctx(hctx, ctx, j)
388 kobject_put(&ctx->kobj);
390 kobject_put(&hctx->kobj);
393 kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
394 kobject_del(&q->mq_kobj);
395 kobject_put(&q->mq_kobj);
397 kobject_put(&disk_to_dev(disk)->kobj);
400 static void blk_mq_sysfs_init(struct request_queue *q)
402 struct blk_mq_hw_ctx *hctx;
403 struct blk_mq_ctx *ctx;
404 int i;
406 kobject_init(&q->mq_kobj, &blk_mq_ktype);
408 queue_for_each_hw_ctx(q, hctx, i)
409 kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
411 queue_for_each_ctx(q, ctx, i)
412 kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
415 /* see blk_register_queue() */
416 void blk_mq_finish_init(struct request_queue *q)
418 percpu_ref_switch_to_percpu(&q->mq_usage_counter);
421 int blk_mq_register_disk(struct gendisk *disk)
423 struct device *dev = disk_to_dev(disk);
424 struct request_queue *q = disk->queue;
425 struct blk_mq_hw_ctx *hctx;
426 int ret, i;
428 blk_mq_sysfs_init(q);
430 ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
431 if (ret < 0)
432 return ret;
434 kobject_uevent(&q->mq_kobj, KOBJ_ADD);
436 queue_for_each_hw_ctx(q, hctx, i) {
437 hctx->flags |= BLK_MQ_F_SYSFS_UP;
438 ret = blk_mq_register_hctx(hctx);
439 if (ret)
440 break;
443 if (ret) {
444 blk_mq_unregister_disk(disk);
445 return ret;
448 return 0;
450 EXPORT_SYMBOL_GPL(blk_mq_register_disk);
452 void blk_mq_sysfs_unregister(struct request_queue *q)
454 struct blk_mq_hw_ctx *hctx;
455 int i;
457 queue_for_each_hw_ctx(q, hctx, i)
458 blk_mq_unregister_hctx(hctx);
461 int blk_mq_sysfs_register(struct request_queue *q)
463 struct blk_mq_hw_ctx *hctx;
464 int i, ret = 0;
466 queue_for_each_hw_ctx(q, hctx, i) {
467 ret = blk_mq_register_hctx(hctx);
468 if (ret)
469 break;
472 return ret;