Merge tag 'timers_urgent_for_v6.13_rc1' of git://git.kernel.org/pub/scm/linux/kernel...
[drm/drm-misc.git] / lib / objpool.c
blobb998b720c7329d2907005972aa77fb94e66bb430
1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/objpool.h>
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
6 #include <linux/atomic.h>
7 #include <linux/irqflags.h>
8 #include <linux/cpumask.h>
9 #include <linux/log2.h>
12 * objpool: ring-array based lockless MPMC/FIFO queues
14 * Copyright: wuqiang.matt@bytedance.com,mhiramat@kernel.org
17 /* initialize percpu objpool_slot */
18 static int
19 objpool_init_percpu_slot(struct objpool_head *pool,
20 struct objpool_slot *slot,
21 int nodes, void *context,
22 objpool_init_obj_cb objinit)
24 void *obj = (void *)&slot->entries[pool->capacity];
25 int i;
27 /* initialize elements of percpu objpool_slot */
28 slot->mask = pool->capacity - 1;
30 for (i = 0; i < nodes; i++) {
31 if (objinit) {
32 int rc = objinit(obj, context);
33 if (rc)
34 return rc;
36 slot->entries[slot->tail & slot->mask] = obj;
37 obj = obj + pool->obj_size;
38 slot->tail++;
39 slot->last = slot->tail;
40 pool->nr_objs++;
43 return 0;
46 /* allocate and initialize percpu slots */
47 static int
48 objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs,
49 void *context, objpool_init_obj_cb objinit)
51 int i, cpu_count = 0;
53 for (i = 0; i < nr_cpu_ids; i++) {
55 struct objpool_slot *slot;
56 int nodes, size, rc;
58 /* skip the cpu node which could never be present */
59 if (!cpu_possible(i))
60 continue;
62 /* compute how many objects to be allocated with this slot */
63 nodes = nr_objs / pool->nr_possible_cpus;
64 if (cpu_count < (nr_objs % pool->nr_possible_cpus))
65 nodes++;
66 cpu_count++;
68 size = struct_size(slot, entries, pool->capacity) +
69 pool->obj_size * nodes;
72 * here we allocate percpu-slot & objs together in a single
73 * allocation to make it more compact, taking advantage of
74 * warm caches and TLB hits. in default vmalloc is used to
75 * reduce the pressure of kernel slab system. as we know,
76 * mimimal size of vmalloc is one page since vmalloc would
77 * always align the requested size to page size.
78 * but if vmalloc fails or it is not available (e.g. GFP_ATOMIC)
79 * allocate percpu slot with kmalloc.
81 slot = NULL;
83 if ((pool->gfp & (GFP_ATOMIC | GFP_KERNEL)) != GFP_ATOMIC)
84 slot = __vmalloc_node(size, sizeof(void *), pool->gfp,
85 cpu_to_node(i), __builtin_return_address(0));
87 if (!slot) {
88 slot = kmalloc_node(size, pool->gfp, cpu_to_node(i));
89 if (!slot)
90 return -ENOMEM;
92 memset(slot, 0, size);
93 pool->cpu_slots[i] = slot;
95 /* initialize the objpool_slot of cpu node i */
96 rc = objpool_init_percpu_slot(pool, slot, nodes, context, objinit);
97 if (rc)
98 return rc;
101 return 0;
104 /* cleanup all percpu slots of the object pool */
105 static void objpool_fini_percpu_slots(struct objpool_head *pool)
107 int i;
109 if (!pool->cpu_slots)
110 return;
112 for (i = 0; i < nr_cpu_ids; i++)
113 kvfree(pool->cpu_slots[i]);
114 kfree(pool->cpu_slots);
117 /* initialize object pool and pre-allocate objects */
118 int objpool_init(struct objpool_head *pool, int nr_objs, int object_size,
119 gfp_t gfp, void *context, objpool_init_obj_cb objinit,
120 objpool_fini_cb release)
122 int rc, capacity, slot_size;
124 /* check input parameters */
125 if (nr_objs <= 0 || nr_objs > OBJPOOL_NR_OBJECT_MAX ||
126 object_size <= 0 || object_size > OBJPOOL_OBJECT_SIZE_MAX)
127 return -EINVAL;
129 /* align up to unsigned long size */
130 object_size = ALIGN(object_size, sizeof(long));
132 /* calculate capacity of percpu objpool_slot */
133 capacity = roundup_pow_of_two(nr_objs);
134 if (!capacity)
135 return -EINVAL;
137 /* initialize objpool pool */
138 memset(pool, 0, sizeof(struct objpool_head));
139 pool->nr_possible_cpus = num_possible_cpus();
140 pool->obj_size = object_size;
141 pool->capacity = capacity;
142 pool->gfp = gfp & ~__GFP_ZERO;
143 pool->context = context;
144 pool->release = release;
145 slot_size = nr_cpu_ids * sizeof(struct objpool_slot);
146 pool->cpu_slots = kzalloc(slot_size, pool->gfp);
147 if (!pool->cpu_slots)
148 return -ENOMEM;
150 /* initialize per-cpu slots */
151 rc = objpool_init_percpu_slots(pool, nr_objs, context, objinit);
152 if (rc)
153 objpool_fini_percpu_slots(pool);
154 else
155 refcount_set(&pool->ref, pool->nr_objs + 1);
157 return rc;
159 EXPORT_SYMBOL_GPL(objpool_init);
161 /* release whole objpool forcely */
162 void objpool_free(struct objpool_head *pool)
164 if (!pool->cpu_slots)
165 return;
167 /* release percpu slots */
168 objpool_fini_percpu_slots(pool);
170 /* call user's cleanup callback if provided */
171 if (pool->release)
172 pool->release(pool, pool->context);
174 EXPORT_SYMBOL_GPL(objpool_free);
176 /* drop the allocated object, rather reclaim it to objpool */
177 int objpool_drop(void *obj, struct objpool_head *pool)
179 if (!obj || !pool)
180 return -EINVAL;
182 if (refcount_dec_and_test(&pool->ref)) {
183 objpool_free(pool);
184 return 0;
187 return -EAGAIN;
189 EXPORT_SYMBOL_GPL(objpool_drop);
191 /* drop unused objects and defref objpool for releasing */
192 void objpool_fini(struct objpool_head *pool)
194 int count = 1; /* extra ref for objpool itself */
196 /* drop all remained objects from objpool */
197 while (objpool_pop(pool))
198 count++;
200 if (refcount_sub_and_test(count, &pool->ref))
201 objpool_free(pool);
203 EXPORT_SYMBOL_GPL(objpool_fini);