ima: re-initialize iint->atomic_flags
[linux/fpc-iii.git] / lib / percpu_ida.c
blob6016f1deb1f5f7daf110fe0ca9938c1980e52260
1 /*
2 * Percpu IDA library
4 * Copyright (C) 2013 Datera, Inc. Kent Overstreet
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2, or (at
9 * your option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
17 #include <linux/mm.h>
18 #include <linux/bitmap.h>
19 #include <linux/bitops.h>
20 #include <linux/bug.h>
21 #include <linux/err.h>
22 #include <linux/export.h>
23 #include <linux/init.h>
24 #include <linux/kernel.h>
25 #include <linux/percpu.h>
26 #include <linux/sched/signal.h>
27 #include <linux/string.h>
28 #include <linux/spinlock.h>
29 #include <linux/percpu_ida.h>
31 struct percpu_ida_cpu {
33 * Even though this is percpu, we need a lock for tag stealing by remote
34 * CPUs:
36 spinlock_t lock;
38 /* nr_free/freelist form a stack of free IDs */
39 unsigned nr_free;
40 unsigned freelist[];
43 static inline void move_tags(unsigned *dst, unsigned *dst_nr,
44 unsigned *src, unsigned *src_nr,
45 unsigned nr)
47 *src_nr -= nr;
48 memcpy(dst + *dst_nr, src + *src_nr, sizeof(unsigned) * nr);
49 *dst_nr += nr;
53 * Try to steal tags from a remote cpu's percpu freelist.
55 * We first check how many percpu freelists have tags
57 * Then we iterate through the cpus until we find some tags - we don't attempt
58 * to find the "best" cpu to steal from, to keep cacheline bouncing to a
59 * minimum.
61 static inline void steal_tags(struct percpu_ida *pool,
62 struct percpu_ida_cpu *tags)
64 unsigned cpus_have_tags, cpu = pool->cpu_last_stolen;
65 struct percpu_ida_cpu *remote;
67 for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags);
68 cpus_have_tags; cpus_have_tags--) {
69 cpu = cpumask_next(cpu, &pool->cpus_have_tags);
71 if (cpu >= nr_cpu_ids) {
72 cpu = cpumask_first(&pool->cpus_have_tags);
73 if (cpu >= nr_cpu_ids)
74 BUG();
77 pool->cpu_last_stolen = cpu;
78 remote = per_cpu_ptr(pool->tag_cpu, cpu);
80 cpumask_clear_cpu(cpu, &pool->cpus_have_tags);
82 if (remote == tags)
83 continue;
85 spin_lock(&remote->lock);
87 if (remote->nr_free) {
88 memcpy(tags->freelist,
89 remote->freelist,
90 sizeof(unsigned) * remote->nr_free);
92 tags->nr_free = remote->nr_free;
93 remote->nr_free = 0;
96 spin_unlock(&remote->lock);
98 if (tags->nr_free)
99 break;
104 * Pop up to IDA_PCPU_BATCH_MOVE IDs off the global freelist, and push them onto
105 * our percpu freelist:
107 static inline void alloc_global_tags(struct percpu_ida *pool,
108 struct percpu_ida_cpu *tags)
110 move_tags(tags->freelist, &tags->nr_free,
111 pool->freelist, &pool->nr_free,
112 min(pool->nr_free, pool->percpu_batch_size));
115 static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags)
117 int tag = -ENOSPC;
119 spin_lock(&tags->lock);
120 if (tags->nr_free)
121 tag = tags->freelist[--tags->nr_free];
122 spin_unlock(&tags->lock);
124 return tag;
128 * percpu_ida_alloc - allocate a tag
129 * @pool: pool to allocate from
130 * @state: task state for prepare_to_wait
132 * Returns a tag - an integer in the range [0..nr_tags) (passed to
133 * tag_pool_init()), or otherwise -ENOSPC on allocation failure.
135 * Safe to be called from interrupt context (assuming it isn't passed
136 * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course).
138 * @gfp indicates whether or not to wait until a free id is available (it's not
139 * used for internal memory allocations); thus if passed __GFP_RECLAIM we may sleep
140 * however long it takes until another thread frees an id (same semantics as a
141 * mempool).
143 * Will not fail if passed TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE.
145 int percpu_ida_alloc(struct percpu_ida *pool, int state)
147 DEFINE_WAIT(wait);
148 struct percpu_ida_cpu *tags;
149 unsigned long flags;
150 int tag;
152 local_irq_save(flags);
153 tags = this_cpu_ptr(pool->tag_cpu);
155 /* Fastpath */
156 tag = alloc_local_tag(tags);
157 if (likely(tag >= 0)) {
158 local_irq_restore(flags);
159 return tag;
162 while (1) {
163 spin_lock(&pool->lock);
166 * prepare_to_wait() must come before steal_tags(), in case
167 * percpu_ida_free() on another cpu flips a bit in
168 * cpus_have_tags
170 * global lock held and irqs disabled, don't need percpu lock
172 if (state != TASK_RUNNING)
173 prepare_to_wait(&pool->wait, &wait, state);
175 if (!tags->nr_free)
176 alloc_global_tags(pool, tags);
177 if (!tags->nr_free)
178 steal_tags(pool, tags);
180 if (tags->nr_free) {
181 tag = tags->freelist[--tags->nr_free];
182 if (tags->nr_free)
183 cpumask_set_cpu(smp_processor_id(),
184 &pool->cpus_have_tags);
187 spin_unlock(&pool->lock);
188 local_irq_restore(flags);
190 if (tag >= 0 || state == TASK_RUNNING)
191 break;
193 if (signal_pending_state(state, current)) {
194 tag = -ERESTARTSYS;
195 break;
198 schedule();
200 local_irq_save(flags);
201 tags = this_cpu_ptr(pool->tag_cpu);
203 if (state != TASK_RUNNING)
204 finish_wait(&pool->wait, &wait);
206 return tag;
208 EXPORT_SYMBOL_GPL(percpu_ida_alloc);
211 * percpu_ida_free - free a tag
212 * @pool: pool @tag was allocated from
213 * @tag: a tag previously allocated with percpu_ida_alloc()
215 * Safe to be called from interrupt context.
217 void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
219 struct percpu_ida_cpu *tags;
220 unsigned long flags;
221 unsigned nr_free;
223 BUG_ON(tag >= pool->nr_tags);
225 local_irq_save(flags);
226 tags = this_cpu_ptr(pool->tag_cpu);
228 spin_lock(&tags->lock);
229 tags->freelist[tags->nr_free++] = tag;
231 nr_free = tags->nr_free;
232 spin_unlock(&tags->lock);
234 if (nr_free == 1) {
235 cpumask_set_cpu(smp_processor_id(),
236 &pool->cpus_have_tags);
237 wake_up(&pool->wait);
240 if (nr_free == pool->percpu_max_size) {
241 spin_lock(&pool->lock);
244 * Global lock held and irqs disabled, don't need percpu
245 * lock
247 if (tags->nr_free == pool->percpu_max_size) {
248 move_tags(pool->freelist, &pool->nr_free,
249 tags->freelist, &tags->nr_free,
250 pool->percpu_batch_size);
252 wake_up(&pool->wait);
254 spin_unlock(&pool->lock);
257 local_irq_restore(flags);
259 EXPORT_SYMBOL_GPL(percpu_ida_free);
262 * percpu_ida_destroy - release a tag pool's resources
263 * @pool: pool to free
265 * Frees the resources allocated by percpu_ida_init().
267 void percpu_ida_destroy(struct percpu_ida *pool)
269 free_percpu(pool->tag_cpu);
270 free_pages((unsigned long) pool->freelist,
271 get_order(pool->nr_tags * sizeof(unsigned)));
273 EXPORT_SYMBOL_GPL(percpu_ida_destroy);
276 * percpu_ida_init - initialize a percpu tag pool
277 * @pool: pool to initialize
278 * @nr_tags: number of tags that will be available for allocation
280 * Initializes @pool so that it can be used to allocate tags - integers in the
281 * range [0, nr_tags). Typically, they'll be used by driver code to refer to a
282 * preallocated array of tag structures.
284 * Allocation is percpu, but sharding is limited by nr_tags - for best
285 * performance, the workload should not span more cpus than nr_tags / 128.
287 int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
288 unsigned long max_size, unsigned long batch_size)
290 unsigned i, cpu, order;
292 memset(pool, 0, sizeof(*pool));
294 init_waitqueue_head(&pool->wait);
295 spin_lock_init(&pool->lock);
296 pool->nr_tags = nr_tags;
297 pool->percpu_max_size = max_size;
298 pool->percpu_batch_size = batch_size;
300 /* Guard against overflow */
301 if (nr_tags > (unsigned) INT_MAX + 1) {
302 pr_err("percpu_ida_init(): nr_tags too large\n");
303 return -EINVAL;
306 order = get_order(nr_tags * sizeof(unsigned));
307 pool->freelist = (void *) __get_free_pages(GFP_KERNEL, order);
308 if (!pool->freelist)
309 return -ENOMEM;
311 for (i = 0; i < nr_tags; i++)
312 pool->freelist[i] = i;
314 pool->nr_free = nr_tags;
316 pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) +
317 pool->percpu_max_size * sizeof(unsigned),
318 sizeof(unsigned));
319 if (!pool->tag_cpu)
320 goto err;
322 for_each_possible_cpu(cpu)
323 spin_lock_init(&per_cpu_ptr(pool->tag_cpu, cpu)->lock);
325 return 0;
326 err:
327 percpu_ida_destroy(pool);
328 return -ENOMEM;
330 EXPORT_SYMBOL_GPL(__percpu_ida_init);
333 * percpu_ida_for_each_free - iterate free ids of a pool
334 * @pool: pool to iterate
335 * @fn: interate callback function
336 * @data: parameter for @fn
338 * Note, this doesn't guarantee to iterate all free ids restrictly. Some free
339 * ids might be missed, some might be iterated duplicated, and some might
340 * be iterated and not free soon.
342 int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
343 void *data)
345 unsigned long flags;
346 struct percpu_ida_cpu *remote;
347 unsigned cpu, i, err = 0;
349 local_irq_save(flags);
350 for_each_possible_cpu(cpu) {
351 remote = per_cpu_ptr(pool->tag_cpu, cpu);
352 spin_lock(&remote->lock);
353 for (i = 0; i < remote->nr_free; i++) {
354 err = fn(remote->freelist[i], data);
355 if (err)
356 break;
358 spin_unlock(&remote->lock);
359 if (err)
360 goto out;
363 spin_lock(&pool->lock);
364 for (i = 0; i < pool->nr_free; i++) {
365 err = fn(pool->freelist[i], data);
366 if (err)
367 break;
369 spin_unlock(&pool->lock);
370 out:
371 local_irq_restore(flags);
372 return err;
374 EXPORT_SYMBOL_GPL(percpu_ida_for_each_free);
377 * percpu_ida_free_tags - return free tags number of a specific cpu or global pool
378 * @pool: pool related
379 * @cpu: specific cpu or global pool if @cpu == nr_cpu_ids
381 * Note: this just returns a snapshot of free tags number.
383 unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu)
385 struct percpu_ida_cpu *remote;
386 if (cpu == nr_cpu_ids)
387 return pool->nr_free;
388 remote = per_cpu_ptr(pool->tag_cpu, cpu);
389 return remote->nr_free;
391 EXPORT_SYMBOL_GPL(percpu_ida_free_tags);