1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/objpool.h>
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
6 #include <linux/atomic.h>
7 #include <linux/irqflags.h>
8 #include <linux/cpumask.h>
9 #include <linux/log2.h>
12 * objpool: ring-array based lockless MPMC/FIFO queues
14 * Copyright: wuqiang.matt@bytedance.com,mhiramat@kernel.org
17 /* initialize percpu objpool_slot */
19 objpool_init_percpu_slot(struct objpool_head
*pool
,
20 struct objpool_slot
*slot
,
21 int nodes
, void *context
,
22 objpool_init_obj_cb objinit
)
24 void *obj
= (void *)&slot
->entries
[pool
->capacity
];
27 /* initialize elements of percpu objpool_slot */
28 slot
->mask
= pool
->capacity
- 1;
30 for (i
= 0; i
< nodes
; i
++) {
32 int rc
= objinit(obj
, context
);
36 slot
->entries
[slot
->tail
& slot
->mask
] = obj
;
37 obj
= obj
+ pool
->obj_size
;
39 slot
->last
= slot
->tail
;
46 /* allocate and initialize percpu slots */
48 objpool_init_percpu_slots(struct objpool_head
*pool
, int nr_objs
,
49 void *context
, objpool_init_obj_cb objinit
)
53 for (i
= 0; i
< nr_cpu_ids
; i
++) {
55 struct objpool_slot
*slot
;
58 /* skip the cpu node which could never be present */
62 /* compute how many objects to be allocated with this slot */
63 nodes
= nr_objs
/ pool
->nr_possible_cpus
;
64 if (cpu_count
< (nr_objs
% pool
->nr_possible_cpus
))
68 size
= struct_size(slot
, entries
, pool
->capacity
) +
69 pool
->obj_size
* nodes
;
72 * here we allocate percpu-slot & objs together in a single
73 * allocation to make it more compact, taking advantage of
74 * warm caches and TLB hits. in default vmalloc is used to
75 * reduce the pressure of kernel slab system. as we know,
76 * mimimal size of vmalloc is one page since vmalloc would
77 * always align the requested size to page size.
78 * but if vmalloc fails or it is not available (e.g. GFP_ATOMIC)
79 * allocate percpu slot with kmalloc.
83 if ((pool
->gfp
& (GFP_ATOMIC
| GFP_KERNEL
)) != GFP_ATOMIC
)
84 slot
= __vmalloc_node(size
, sizeof(void *), pool
->gfp
,
85 cpu_to_node(i
), __builtin_return_address(0));
88 slot
= kmalloc_node(size
, pool
->gfp
, cpu_to_node(i
));
92 memset(slot
, 0, size
);
93 pool
->cpu_slots
[i
] = slot
;
95 /* initialize the objpool_slot of cpu node i */
96 rc
= objpool_init_percpu_slot(pool
, slot
, nodes
, context
, objinit
);
104 /* cleanup all percpu slots of the object pool */
105 static void objpool_fini_percpu_slots(struct objpool_head
*pool
)
109 if (!pool
->cpu_slots
)
112 for (i
= 0; i
< nr_cpu_ids
; i
++)
113 kvfree(pool
->cpu_slots
[i
]);
114 kfree(pool
->cpu_slots
);
117 /* initialize object pool and pre-allocate objects */
118 int objpool_init(struct objpool_head
*pool
, int nr_objs
, int object_size
,
119 gfp_t gfp
, void *context
, objpool_init_obj_cb objinit
,
120 objpool_fini_cb release
)
122 int rc
, capacity
, slot_size
;
124 /* check input parameters */
125 if (nr_objs
<= 0 || nr_objs
> OBJPOOL_NR_OBJECT_MAX
||
126 object_size
<= 0 || object_size
> OBJPOOL_OBJECT_SIZE_MAX
)
129 /* align up to unsigned long size */
130 object_size
= ALIGN(object_size
, sizeof(long));
132 /* calculate capacity of percpu objpool_slot */
133 capacity
= roundup_pow_of_two(nr_objs
);
137 /* initialize objpool pool */
138 memset(pool
, 0, sizeof(struct objpool_head
));
139 pool
->nr_possible_cpus
= num_possible_cpus();
140 pool
->obj_size
= object_size
;
141 pool
->capacity
= capacity
;
142 pool
->gfp
= gfp
& ~__GFP_ZERO
;
143 pool
->context
= context
;
144 pool
->release
= release
;
145 slot_size
= nr_cpu_ids
* sizeof(struct objpool_slot
);
146 pool
->cpu_slots
= kzalloc(slot_size
, pool
->gfp
);
147 if (!pool
->cpu_slots
)
150 /* initialize per-cpu slots */
151 rc
= objpool_init_percpu_slots(pool
, nr_objs
, context
, objinit
);
153 objpool_fini_percpu_slots(pool
);
155 refcount_set(&pool
->ref
, pool
->nr_objs
+ 1);
159 EXPORT_SYMBOL_GPL(objpool_init
);
161 /* release whole objpool forcely */
162 void objpool_free(struct objpool_head
*pool
)
164 if (!pool
->cpu_slots
)
167 /* release percpu slots */
168 objpool_fini_percpu_slots(pool
);
170 /* call user's cleanup callback if provided */
172 pool
->release(pool
, pool
->context
);
174 EXPORT_SYMBOL_GPL(objpool_free
);
176 /* drop the allocated object, rather reclaim it to objpool */
177 int objpool_drop(void *obj
, struct objpool_head
*pool
)
182 if (refcount_dec_and_test(&pool
->ref
)) {
189 EXPORT_SYMBOL_GPL(objpool_drop
);
191 /* drop unused objects and defref objpool for releasing */
192 void objpool_fini(struct objpool_head
*pool
)
194 int count
= 1; /* extra ref for objpool itself */
196 /* drop all remained objects from objpool */
197 while (objpool_pop(pool
))
200 if (refcount_sub_and_test(count
, &pool
->ref
))
203 EXPORT_SYMBOL_GPL(objpool_fini
);