Merge tag 'fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm...
[linux/fpc-iii.git] / lib / percpu_ida.c
blob9d054bf91d0f3cd278494dedd3114dfd3b292bcb
1 /*
2 * Percpu IDA library
4 * Copyright (C) 2013 Datera, Inc. Kent Overstreet
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2, or (at
9 * your option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
17 #include <linux/bitmap.h>
18 #include <linux/bitops.h>
19 #include <linux/bug.h>
20 #include <linux/err.h>
21 #include <linux/export.h>
22 #include <linux/hardirq.h>
23 #include <linux/idr.h>
24 #include <linux/init.h>
25 #include <linux/kernel.h>
26 #include <linux/percpu.h>
27 #include <linux/sched.h>
28 #include <linux/slab.h>
29 #include <linux/string.h>
30 #include <linux/spinlock.h>
31 #include <linux/percpu_ida.h>
33 struct percpu_ida_cpu {
35 * Even though this is percpu, we need a lock for tag stealing by remote
36 * CPUs:
38 spinlock_t lock;
40 /* nr_free/freelist form a stack of free IDs */
41 unsigned nr_free;
42 unsigned freelist[];
45 static inline void move_tags(unsigned *dst, unsigned *dst_nr,
46 unsigned *src, unsigned *src_nr,
47 unsigned nr)
49 *src_nr -= nr;
50 memcpy(dst + *dst_nr, src + *src_nr, sizeof(unsigned) * nr);
51 *dst_nr += nr;
55 * Try to steal tags from a remote cpu's percpu freelist.
57 * We first check how many percpu freelists have tags - we don't steal tags
58 * unless enough percpu freelists have tags on them that it's possible more than
59 * half the total tags could be stuck on remote percpu freelists.
61 * Then we iterate through the cpus until we find some tags - we don't attempt
62 * to find the "best" cpu to steal from, to keep cacheline bouncing to a
63 * minimum.
65 static inline void steal_tags(struct percpu_ida *pool,
66 struct percpu_ida_cpu *tags)
68 unsigned cpus_have_tags, cpu = pool->cpu_last_stolen;
69 struct percpu_ida_cpu *remote;
71 for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags);
72 cpus_have_tags * pool->percpu_max_size > pool->nr_tags / 2;
73 cpus_have_tags--) {
74 cpu = cpumask_next(cpu, &pool->cpus_have_tags);
76 if (cpu >= nr_cpu_ids) {
77 cpu = cpumask_first(&pool->cpus_have_tags);
78 if (cpu >= nr_cpu_ids)
79 BUG();
82 pool->cpu_last_stolen = cpu;
83 remote = per_cpu_ptr(pool->tag_cpu, cpu);
85 cpumask_clear_cpu(cpu, &pool->cpus_have_tags);
87 if (remote == tags)
88 continue;
90 spin_lock(&remote->lock);
92 if (remote->nr_free) {
93 memcpy(tags->freelist,
94 remote->freelist,
95 sizeof(unsigned) * remote->nr_free);
97 tags->nr_free = remote->nr_free;
98 remote->nr_free = 0;
101 spin_unlock(&remote->lock);
103 if (tags->nr_free)
104 break;
109 * Pop up to IDA_PCPU_BATCH_MOVE IDs off the global freelist, and push them onto
110 * our percpu freelist:
112 static inline void alloc_global_tags(struct percpu_ida *pool,
113 struct percpu_ida_cpu *tags)
115 move_tags(tags->freelist, &tags->nr_free,
116 pool->freelist, &pool->nr_free,
117 min(pool->nr_free, pool->percpu_batch_size));
120 static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags)
122 int tag = -ENOSPC;
124 spin_lock(&tags->lock);
125 if (tags->nr_free)
126 tag = tags->freelist[--tags->nr_free];
127 spin_unlock(&tags->lock);
129 return tag;
133 * percpu_ida_alloc - allocate a tag
134 * @pool: pool to allocate from
135 * @gfp: gfp flags
137 * Returns a tag - an integer in the range [0..nr_tags) (passed to
138 * tag_pool_init()), or otherwise -ENOSPC on allocation failure.
140 * Safe to be called from interrupt context (assuming it isn't passed
141 * __GFP_WAIT, of course).
143 * @gfp indicates whether or not to wait until a free id is available (it's not
144 * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep
145 * however long it takes until another thread frees an id (same semantics as a
146 * mempool).
148 * Will not fail if passed __GFP_WAIT.
150 int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp)
152 DEFINE_WAIT(wait);
153 struct percpu_ida_cpu *tags;
154 unsigned long flags;
155 int tag;
157 local_irq_save(flags);
158 tags = this_cpu_ptr(pool->tag_cpu);
160 /* Fastpath */
161 tag = alloc_local_tag(tags);
162 if (likely(tag >= 0)) {
163 local_irq_restore(flags);
164 return tag;
167 while (1) {
168 spin_lock(&pool->lock);
171 * prepare_to_wait() must come before steal_tags(), in case
172 * percpu_ida_free() on another cpu flips a bit in
173 * cpus_have_tags
175 * global lock held and irqs disabled, don't need percpu lock
177 prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
179 if (!tags->nr_free)
180 alloc_global_tags(pool, tags);
181 if (!tags->nr_free)
182 steal_tags(pool, tags);
184 if (tags->nr_free) {
185 tag = tags->freelist[--tags->nr_free];
186 if (tags->nr_free)
187 cpumask_set_cpu(smp_processor_id(),
188 &pool->cpus_have_tags);
191 spin_unlock(&pool->lock);
192 local_irq_restore(flags);
194 if (tag >= 0 || !(gfp & __GFP_WAIT))
195 break;
197 schedule();
199 local_irq_save(flags);
200 tags = this_cpu_ptr(pool->tag_cpu);
203 finish_wait(&pool->wait, &wait);
204 return tag;
206 EXPORT_SYMBOL_GPL(percpu_ida_alloc);
209 * percpu_ida_free - free a tag
210 * @pool: pool @tag was allocated from
211 * @tag: a tag previously allocated with percpu_ida_alloc()
213 * Safe to be called from interrupt context.
215 void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
217 struct percpu_ida_cpu *tags;
218 unsigned long flags;
219 unsigned nr_free;
221 BUG_ON(tag >= pool->nr_tags);
223 local_irq_save(flags);
224 tags = this_cpu_ptr(pool->tag_cpu);
226 spin_lock(&tags->lock);
227 tags->freelist[tags->nr_free++] = tag;
229 nr_free = tags->nr_free;
230 spin_unlock(&tags->lock);
232 if (nr_free == 1) {
233 cpumask_set_cpu(smp_processor_id(),
234 &pool->cpus_have_tags);
235 wake_up(&pool->wait);
238 if (nr_free == pool->percpu_max_size) {
239 spin_lock(&pool->lock);
242 * Global lock held and irqs disabled, don't need percpu
243 * lock
245 if (tags->nr_free == pool->percpu_max_size) {
246 move_tags(pool->freelist, &pool->nr_free,
247 tags->freelist, &tags->nr_free,
248 pool->percpu_batch_size);
250 wake_up(&pool->wait);
252 spin_unlock(&pool->lock);
255 local_irq_restore(flags);
257 EXPORT_SYMBOL_GPL(percpu_ida_free);
260 * percpu_ida_destroy - release a tag pool's resources
261 * @pool: pool to free
263 * Frees the resources allocated by percpu_ida_init().
265 void percpu_ida_destroy(struct percpu_ida *pool)
267 free_percpu(pool->tag_cpu);
268 free_pages((unsigned long) pool->freelist,
269 get_order(pool->nr_tags * sizeof(unsigned)));
271 EXPORT_SYMBOL_GPL(percpu_ida_destroy);
274 * percpu_ida_init - initialize a percpu tag pool
275 * @pool: pool to initialize
276 * @nr_tags: number of tags that will be available for allocation
278 * Initializes @pool so that it can be used to allocate tags - integers in the
279 * range [0, nr_tags). Typically, they'll be used by driver code to refer to a
280 * preallocated array of tag structures.
282 * Allocation is percpu, but sharding is limited by nr_tags - for best
283 * performance, the workload should not span more cpus than nr_tags / 128.
285 int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
286 unsigned long max_size, unsigned long batch_size)
288 unsigned i, cpu, order;
290 memset(pool, 0, sizeof(*pool));
292 init_waitqueue_head(&pool->wait);
293 spin_lock_init(&pool->lock);
294 pool->nr_tags = nr_tags;
295 pool->percpu_max_size = max_size;
296 pool->percpu_batch_size = batch_size;
298 /* Guard against overflow */
299 if (nr_tags > (unsigned) INT_MAX + 1) {
300 pr_err("percpu_ida_init(): nr_tags too large\n");
301 return -EINVAL;
304 order = get_order(nr_tags * sizeof(unsigned));
305 pool->freelist = (void *) __get_free_pages(GFP_KERNEL, order);
306 if (!pool->freelist)
307 return -ENOMEM;
309 for (i = 0; i < nr_tags; i++)
310 pool->freelist[i] = i;
312 pool->nr_free = nr_tags;
314 pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) +
315 pool->percpu_max_size * sizeof(unsigned),
316 sizeof(unsigned));
317 if (!pool->tag_cpu)
318 goto err;
320 for_each_possible_cpu(cpu)
321 spin_lock_init(&per_cpu_ptr(pool->tag_cpu, cpu)->lock);
323 return 0;
324 err:
325 percpu_ida_destroy(pool);
326 return -ENOMEM;
328 EXPORT_SYMBOL_GPL(__percpu_ida_init);
331 * percpu_ida_for_each_free - iterate free ids of a pool
332 * @pool: pool to iterate
333 * @fn: interate callback function
334 * @data: parameter for @fn
336 * Note, this doesn't guarantee to iterate all free ids restrictly. Some free
337 * ids might be missed, some might be iterated duplicated, and some might
338 * be iterated and not free soon.
340 int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
341 void *data)
343 unsigned long flags;
344 struct percpu_ida_cpu *remote;
345 unsigned cpu, i, err = 0;
347 local_irq_save(flags);
348 for_each_possible_cpu(cpu) {
349 remote = per_cpu_ptr(pool->tag_cpu, cpu);
350 spin_lock(&remote->lock);
351 for (i = 0; i < remote->nr_free; i++) {
352 err = fn(remote->freelist[i], data);
353 if (err)
354 break;
356 spin_unlock(&remote->lock);
357 if (err)
358 goto out;
361 spin_lock(&pool->lock);
362 for (i = 0; i < pool->nr_free; i++) {
363 err = fn(pool->freelist[i], data);
364 if (err)
365 break;
367 spin_unlock(&pool->lock);
368 out:
369 local_irq_restore(flags);
370 return err;
372 EXPORT_SYMBOL_GPL(percpu_ida_for_each_free);
375 * percpu_ida_free_tags - return free tags number of a specific cpu or global pool
376 * @pool: pool related
377 * @cpu: specific cpu or global pool if @cpu == nr_cpu_ids
379 * Note: this just returns a snapshot of free tags number.
381 unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu)
383 struct percpu_ida_cpu *remote;
384 if (cpu == nr_cpu_ids)
385 return pool->nr_free;
386 remote = per_cpu_ptr(pool->tag_cpu, cpu);
387 return remote->nr_free;
389 EXPORT_SYMBOL_GPL(percpu_ida_free_tags);