netfilter: nf_tables: use new transaction infrastructure to handle sets
[linux/fpc-iii.git] / block / blk-mq-tag.c
blob83ae96c51a2762cf7386f096e348eace58525e37
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/percpu_ida.h>
5 #include <linux/blk-mq.h>
6 #include "blk.h"
7 #include "blk-mq.h"
8 #include "blk-mq-tag.h"
11 * Per tagged queue (tag address space) map
13 struct blk_mq_tags {
14 unsigned int nr_tags;
15 unsigned int nr_reserved_tags;
16 unsigned int nr_batch_move;
17 unsigned int nr_max_cache;
19 struct percpu_ida free_tags;
20 struct percpu_ida reserved_tags;
23 void blk_mq_wait_for_tags(struct blk_mq_tags *tags)
25 int tag = blk_mq_get_tag(tags, __GFP_WAIT, false);
26 blk_mq_put_tag(tags, tag);
29 bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
31 return !tags ||
32 percpu_ida_free_tags(&tags->free_tags, nr_cpu_ids) != 0;
35 static unsigned int __blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp)
37 int tag;
39 tag = percpu_ida_alloc(&tags->free_tags, (gfp & __GFP_WAIT) ?
40 TASK_UNINTERRUPTIBLE : TASK_RUNNING);
41 if (tag < 0)
42 return BLK_MQ_TAG_FAIL;
43 return tag + tags->nr_reserved_tags;
46 static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags,
47 gfp_t gfp)
49 int tag;
51 if (unlikely(!tags->nr_reserved_tags)) {
52 WARN_ON_ONCE(1);
53 return BLK_MQ_TAG_FAIL;
56 tag = percpu_ida_alloc(&tags->reserved_tags, (gfp & __GFP_WAIT) ?
57 TASK_UNINTERRUPTIBLE : TASK_RUNNING);
58 if (tag < 0)
59 return BLK_MQ_TAG_FAIL;
60 return tag;
63 unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp, bool reserved)
65 if (!reserved)
66 return __blk_mq_get_tag(tags, gfp);
68 return __blk_mq_get_reserved_tag(tags, gfp);
71 static void __blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag)
73 BUG_ON(tag >= tags->nr_tags);
75 percpu_ida_free(&tags->free_tags, tag - tags->nr_reserved_tags);
78 static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags,
79 unsigned int tag)
81 BUG_ON(tag >= tags->nr_reserved_tags);
83 percpu_ida_free(&tags->reserved_tags, tag);
86 void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag)
88 if (tag >= tags->nr_reserved_tags)
89 __blk_mq_put_tag(tags, tag);
90 else
91 __blk_mq_put_reserved_tag(tags, tag);
94 static int __blk_mq_tag_iter(unsigned id, void *data)
96 unsigned long *tag_map = data;
97 __set_bit(id, tag_map);
98 return 0;
101 void blk_mq_tag_busy_iter(struct blk_mq_tags *tags,
102 void (*fn)(void *, unsigned long *), void *data)
104 unsigned long *tag_map;
105 size_t map_size;
107 map_size = ALIGN(tags->nr_tags, BITS_PER_LONG) / BITS_PER_LONG;
108 tag_map = kzalloc(map_size * sizeof(unsigned long), GFP_ATOMIC);
109 if (!tag_map)
110 return;
112 percpu_ida_for_each_free(&tags->free_tags, __blk_mq_tag_iter, tag_map);
113 if (tags->nr_reserved_tags)
114 percpu_ida_for_each_free(&tags->reserved_tags, __blk_mq_tag_iter,
115 tag_map);
117 fn(data, tag_map);
118 kfree(tag_map);
121 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
122 unsigned int reserved_tags, int node)
124 unsigned int nr_tags, nr_cache;
125 struct blk_mq_tags *tags;
126 int ret;
128 if (total_tags > BLK_MQ_TAG_MAX) {
129 pr_err("blk-mq: tag depth too large\n");
130 return NULL;
133 tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
134 if (!tags)
135 return NULL;
137 nr_tags = total_tags - reserved_tags;
138 nr_cache = nr_tags / num_possible_cpus();
140 if (nr_cache < BLK_MQ_TAG_CACHE_MIN)
141 nr_cache = BLK_MQ_TAG_CACHE_MIN;
142 else if (nr_cache > BLK_MQ_TAG_CACHE_MAX)
143 nr_cache = BLK_MQ_TAG_CACHE_MAX;
145 tags->nr_tags = total_tags;
146 tags->nr_reserved_tags = reserved_tags;
147 tags->nr_max_cache = nr_cache;
148 tags->nr_batch_move = max(1u, nr_cache / 2);
150 ret = __percpu_ida_init(&tags->free_tags, tags->nr_tags -
151 tags->nr_reserved_tags,
152 tags->nr_max_cache,
153 tags->nr_batch_move);
154 if (ret)
155 goto err_free_tags;
157 if (reserved_tags) {
159 * With max_cahe and batch set to 1, the allocator fallbacks to
160 * no cached. It's fine reserved tags allocation is slow.
162 ret = __percpu_ida_init(&tags->reserved_tags, reserved_tags,
163 1, 1);
164 if (ret)
165 goto err_reserved_tags;
168 return tags;
170 err_reserved_tags:
171 percpu_ida_destroy(&tags->free_tags);
172 err_free_tags:
173 kfree(tags);
174 return NULL;
177 void blk_mq_free_tags(struct blk_mq_tags *tags)
179 percpu_ida_destroy(&tags->free_tags);
180 percpu_ida_destroy(&tags->reserved_tags);
181 kfree(tags);
184 ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
186 char *orig_page = page;
187 unsigned int cpu;
189 if (!tags)
190 return 0;
192 page += sprintf(page, "nr_tags=%u, reserved_tags=%u, batch_move=%u,"
193 " max_cache=%u\n", tags->nr_tags, tags->nr_reserved_tags,
194 tags->nr_batch_move, tags->nr_max_cache);
196 page += sprintf(page, "nr_free=%u, nr_reserved=%u\n",
197 percpu_ida_free_tags(&tags->free_tags, nr_cpu_ids),
198 percpu_ida_free_tags(&tags->reserved_tags, nr_cpu_ids));
200 for_each_possible_cpu(cpu) {
201 page += sprintf(page, " cpu%02u: nr_free=%u\n", cpu,
202 percpu_ida_free_tags(&tags->free_tags, cpu));
205 return page - orig_page;