1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/percpu_ida.h>
5 #include <linux/blk-mq.h>
8 #include "blk-mq-tag.h"
11 * Per tagged queue (tag address space) map
15 unsigned int nr_reserved_tags
;
16 unsigned int nr_batch_move
;
17 unsigned int nr_max_cache
;
19 struct percpu_ida free_tags
;
20 struct percpu_ida reserved_tags
;
23 void blk_mq_wait_for_tags(struct blk_mq_tags
*tags
)
25 int tag
= blk_mq_get_tag(tags
, __GFP_WAIT
, false);
26 blk_mq_put_tag(tags
, tag
);
29 bool blk_mq_has_free_tags(struct blk_mq_tags
*tags
)
32 percpu_ida_free_tags(&tags
->free_tags
, nr_cpu_ids
) != 0;
35 static unsigned int __blk_mq_get_tag(struct blk_mq_tags
*tags
, gfp_t gfp
)
39 tag
= percpu_ida_alloc(&tags
->free_tags
, (gfp
& __GFP_WAIT
) ?
40 TASK_UNINTERRUPTIBLE
: TASK_RUNNING
);
42 return BLK_MQ_TAG_FAIL
;
43 return tag
+ tags
->nr_reserved_tags
;
46 static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags
*tags
,
51 if (unlikely(!tags
->nr_reserved_tags
)) {
53 return BLK_MQ_TAG_FAIL
;
56 tag
= percpu_ida_alloc(&tags
->reserved_tags
, (gfp
& __GFP_WAIT
) ?
57 TASK_UNINTERRUPTIBLE
: TASK_RUNNING
);
59 return BLK_MQ_TAG_FAIL
;
63 unsigned int blk_mq_get_tag(struct blk_mq_tags
*tags
, gfp_t gfp
, bool reserved
)
66 return __blk_mq_get_tag(tags
, gfp
);
68 return __blk_mq_get_reserved_tag(tags
, gfp
);
71 static void __blk_mq_put_tag(struct blk_mq_tags
*tags
, unsigned int tag
)
73 BUG_ON(tag
>= tags
->nr_tags
);
75 percpu_ida_free(&tags
->free_tags
, tag
- tags
->nr_reserved_tags
);
78 static void __blk_mq_put_reserved_tag(struct blk_mq_tags
*tags
,
81 BUG_ON(tag
>= tags
->nr_reserved_tags
);
83 percpu_ida_free(&tags
->reserved_tags
, tag
);
86 void blk_mq_put_tag(struct blk_mq_tags
*tags
, unsigned int tag
)
88 if (tag
>= tags
->nr_reserved_tags
)
89 __blk_mq_put_tag(tags
, tag
);
91 __blk_mq_put_reserved_tag(tags
, tag
);
94 static int __blk_mq_tag_iter(unsigned id
, void *data
)
96 unsigned long *tag_map
= data
;
97 __set_bit(id
, tag_map
);
101 void blk_mq_tag_busy_iter(struct blk_mq_tags
*tags
,
102 void (*fn
)(void *, unsigned long *), void *data
)
104 unsigned long *tag_map
;
107 map_size
= ALIGN(tags
->nr_tags
, BITS_PER_LONG
) / BITS_PER_LONG
;
108 tag_map
= kzalloc(map_size
* sizeof(unsigned long), GFP_ATOMIC
);
112 percpu_ida_for_each_free(&tags
->free_tags
, __blk_mq_tag_iter
, tag_map
);
113 if (tags
->nr_reserved_tags
)
114 percpu_ida_for_each_free(&tags
->reserved_tags
, __blk_mq_tag_iter
,
121 struct blk_mq_tags
*blk_mq_init_tags(unsigned int total_tags
,
122 unsigned int reserved_tags
, int node
)
124 unsigned int nr_tags
, nr_cache
;
125 struct blk_mq_tags
*tags
;
128 if (total_tags
> BLK_MQ_TAG_MAX
) {
129 pr_err("blk-mq: tag depth too large\n");
133 tags
= kzalloc_node(sizeof(*tags
), GFP_KERNEL
, node
);
137 nr_tags
= total_tags
- reserved_tags
;
138 nr_cache
= nr_tags
/ num_possible_cpus();
140 if (nr_cache
< BLK_MQ_TAG_CACHE_MIN
)
141 nr_cache
= BLK_MQ_TAG_CACHE_MIN
;
142 else if (nr_cache
> BLK_MQ_TAG_CACHE_MAX
)
143 nr_cache
= BLK_MQ_TAG_CACHE_MAX
;
145 tags
->nr_tags
= total_tags
;
146 tags
->nr_reserved_tags
= reserved_tags
;
147 tags
->nr_max_cache
= nr_cache
;
148 tags
->nr_batch_move
= max(1u, nr_cache
/ 2);
150 ret
= __percpu_ida_init(&tags
->free_tags
, tags
->nr_tags
-
151 tags
->nr_reserved_tags
,
153 tags
->nr_batch_move
);
159 * With max_cahe and batch set to 1, the allocator fallbacks to
160 * no cached. It's fine reserved tags allocation is slow.
162 ret
= __percpu_ida_init(&tags
->reserved_tags
, reserved_tags
,
165 goto err_reserved_tags
;
171 percpu_ida_destroy(&tags
->free_tags
);
177 void blk_mq_free_tags(struct blk_mq_tags
*tags
)
179 percpu_ida_destroy(&tags
->free_tags
);
180 percpu_ida_destroy(&tags
->reserved_tags
);
184 ssize_t
blk_mq_tag_sysfs_show(struct blk_mq_tags
*tags
, char *page
)
186 char *orig_page
= page
;
192 page
+= sprintf(page
, "nr_tags=%u, reserved_tags=%u, batch_move=%u,"
193 " max_cache=%u\n", tags
->nr_tags
, tags
->nr_reserved_tags
,
194 tags
->nr_batch_move
, tags
->nr_max_cache
);
196 page
+= sprintf(page
, "nr_free=%u, nr_reserved=%u\n",
197 percpu_ida_free_tags(&tags
->free_tags
, nr_cpu_ids
),
198 percpu_ida_free_tags(&tags
->reserved_tags
, nr_cpu_ids
));
200 for_each_possible_cpu(cpu
) {
201 page
+= sprintf(page
, " cpu%02u: nr_free=%u\n", cpu
,
202 percpu_ida_free_tags(&tags
->free_tags
, cpu
));
205 return page
- orig_page
;