1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
4 #include <linux/llist.h>
6 #include <linux/irq_work.h>
7 #include <linux/bpf_mem_alloc.h>
8 #include <linux/memcontrol.h>
11 /* Any context (including NMI) BPF specific memory allocator.
13 * Tracing BPF programs can attach to kprobe and fentry. Hence they
14 * run in unknown context where calling plain kmalloc() might not be safe.
16 * Front-end kmalloc() with per-cpu per-bucket cache of free elements.
17 * Refill this cache asynchronously from irq_work.
20 * 16 32 64 96 128 196 256 512 1024 2048 4096
23 * 16 32 64 96 128 196 256 512 1024 2048 4096
25 * The buckets are prefilled at the start.
26 * BPF programs always run with migration disabled.
27 * It's safe to allocate from cache of the current cpu with irqs disabled.
28 * Free-ing is always done into bucket of the current cpu as well.
29 * irq_work trims extra free elements from buckets with kfree
30 * and refills them with kmalloc, so global kmalloc logic takes care
31 * of freeing objects allocated by one cpu and freed on another.
33 * Every allocated objected is padded with extra 8 bytes that contains
36 #define LLIST_NODE_SZ sizeof(struct llist_node)
38 #define BPF_MEM_ALLOC_SIZE_MAX 4096
40 /* similar to kmalloc, but sizeof == 8 bucket is gone */
41 static u8 size_index
[24] __ro_after_init
= {
68 static int bpf_mem_cache_idx(size_t size
)
70 if (!size
|| size
> BPF_MEM_ALLOC_SIZE_MAX
)
74 return size_index
[(size
- 1) / 8] - 1;
76 return fls(size
- 1) - 2;
81 struct bpf_mem_cache
{
82 /* per-cpu list of free objects of size 'unit_size'.
83 * All accesses are done with interrupts disabled and 'active' counter
84 * protection with __llist_add() and __llist_del_first().
86 struct llist_head free_llist
;
89 /* Operations on the free_list from unit_alloc/unit_free/bpf_mem_refill
90 * are sequenced by per-cpu 'active' counter. But unit_free() cannot
91 * fail. When 'active' is busy the unit_free() will add an object to
94 struct llist_head free_llist_extra
;
96 struct irq_work refill_work
;
97 struct obj_cgroup
*objcg
;
99 /* count of objects in free_llist */
101 int low_watermark
, high_watermark
, batch
;
104 struct bpf_mem_cache
*tgt
;
106 /* list of objects to be freed after RCU GP */
107 struct llist_head free_by_rcu
;
108 struct llist_node
*free_by_rcu_tail
;
109 struct llist_head waiting_for_gp
;
110 struct llist_node
*waiting_for_gp_tail
;
112 atomic_t call_rcu_in_progress
;
113 struct llist_head free_llist_extra_rcu
;
115 /* list of objects to be freed after RCU tasks trace GP */
116 struct llist_head free_by_rcu_ttrace
;
117 struct llist_head waiting_for_gp_ttrace
;
118 struct rcu_head rcu_ttrace
;
119 atomic_t call_rcu_ttrace_in_progress
;
122 struct bpf_mem_caches
{
123 struct bpf_mem_cache cache
[NUM_CACHES
];
126 static const u16 sizes
[NUM_CACHES
] = {96, 192, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096};
128 static struct llist_node notrace
*__llist_del_first(struct llist_head
*head
)
130 struct llist_node
*entry
, *next
;
140 static void *__alloc(struct bpf_mem_cache
*c
, int node
, gfp_t flags
)
142 if (c
->percpu_size
) {
143 void __percpu
**obj
= kmalloc_node(c
->percpu_size
, flags
, node
);
144 void __percpu
*pptr
= __alloc_percpu_gfp(c
->unit_size
, 8, flags
);
155 return kmalloc_node(c
->unit_size
, flags
| __GFP_ZERO
, node
);
158 static struct mem_cgroup
*get_memcg(const struct bpf_mem_cache
*c
)
162 return get_mem_cgroup_from_objcg(c
->objcg
);
163 return root_mem_cgroup
;
169 static void inc_active(struct bpf_mem_cache
*c
, unsigned long *flags
)
171 if (IS_ENABLED(CONFIG_PREEMPT_RT
))
172 /* In RT irq_work runs in per-cpu kthread, so disable
173 * interrupts to avoid preemption and interrupts and
174 * reduce the chance of bpf prog executing on this cpu
175 * when active counter is busy.
177 local_irq_save(*flags
);
178 /* alloc_bulk runs from irq_work which will not preempt a bpf
179 * program that does unit_alloc/unit_free since IRQs are
180 * disabled there. There is no race to increment 'active'
181 * counter. It protects free_llist from corruption in case NMI
182 * bpf prog preempted this loop.
184 WARN_ON_ONCE(local_inc_return(&c
->active
) != 1);
187 static void dec_active(struct bpf_mem_cache
*c
, unsigned long *flags
)
189 local_dec(&c
->active
);
190 if (IS_ENABLED(CONFIG_PREEMPT_RT
))
191 local_irq_restore(*flags
);
194 static void add_obj_to_free_list(struct bpf_mem_cache
*c
, void *obj
)
198 inc_active(c
, &flags
);
199 __llist_add(obj
, &c
->free_llist
);
201 dec_active(c
, &flags
);
204 /* Mostly runs from irq_work except __init phase. */
205 static void alloc_bulk(struct bpf_mem_cache
*c
, int cnt
, int node
, bool atomic
)
207 struct mem_cgroup
*memcg
= NULL
, *old_memcg
;
212 gfp
= __GFP_NOWARN
| __GFP_ACCOUNT
;
213 gfp
|= atomic
? GFP_NOWAIT
: GFP_KERNEL
;
215 for (i
= 0; i
< cnt
; i
++) {
217 * For every 'c' llist_del_first(&c->free_by_rcu_ttrace); is
218 * done only by one CPU == current CPU. Other CPUs might
219 * llist_add() and llist_del_all() in parallel.
221 obj
= llist_del_first(&c
->free_by_rcu_ttrace
);
224 add_obj_to_free_list(c
, obj
);
229 for (; i
< cnt
; i
++) {
230 obj
= llist_del_first(&c
->waiting_for_gp_ttrace
);
233 add_obj_to_free_list(c
, obj
);
238 memcg
= get_memcg(c
);
239 old_memcg
= set_active_memcg(memcg
);
240 for (; i
< cnt
; i
++) {
241 /* Allocate, but don't deplete atomic reserves that typical
242 * GFP_ATOMIC would do. irq_work runs on this cpu and kmalloc
243 * will allocate from the current numa node which is what we
246 obj
= __alloc(c
, node
, gfp
);
249 add_obj_to_free_list(c
, obj
);
251 set_active_memcg(old_memcg
);
252 mem_cgroup_put(memcg
);
255 static void free_one(void *obj
, bool percpu
)
258 free_percpu(((void __percpu
**)obj
)[1]);
263 static int free_all(struct llist_node
*llnode
, bool percpu
)
265 struct llist_node
*pos
, *t
;
268 llist_for_each_safe(pos
, t
, llnode
) {
269 free_one(pos
, percpu
);
275 static void __free_rcu(struct rcu_head
*head
)
277 struct bpf_mem_cache
*c
= container_of(head
, struct bpf_mem_cache
, rcu_ttrace
);
279 free_all(llist_del_all(&c
->waiting_for_gp_ttrace
), !!c
->percpu_size
);
280 atomic_set(&c
->call_rcu_ttrace_in_progress
, 0);
283 static void __free_rcu_tasks_trace(struct rcu_head
*head
)
285 /* If RCU Tasks Trace grace period implies RCU grace period,
286 * there is no need to invoke call_rcu().
288 if (rcu_trace_implies_rcu_gp())
291 call_rcu(head
, __free_rcu
);
294 static void enque_to_free(struct bpf_mem_cache
*c
, void *obj
)
296 struct llist_node
*llnode
= obj
;
298 /* bpf_mem_cache is a per-cpu object. Freeing happens in irq_work.
299 * Nothing races to add to free_by_rcu_ttrace list.
301 llist_add(llnode
, &c
->free_by_rcu_ttrace
);
304 static void do_call_rcu_ttrace(struct bpf_mem_cache
*c
)
306 struct llist_node
*llnode
, *t
;
308 if (atomic_xchg(&c
->call_rcu_ttrace_in_progress
, 1)) {
309 if (unlikely(READ_ONCE(c
->draining
))) {
310 llnode
= llist_del_all(&c
->free_by_rcu_ttrace
);
311 free_all(llnode
, !!c
->percpu_size
);
316 WARN_ON_ONCE(!llist_empty(&c
->waiting_for_gp_ttrace
));
317 llist_for_each_safe(llnode
, t
, llist_del_all(&c
->free_by_rcu_ttrace
))
318 llist_add(llnode
, &c
->waiting_for_gp_ttrace
);
320 if (unlikely(READ_ONCE(c
->draining
))) {
321 __free_rcu(&c
->rcu_ttrace
);
325 /* Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
326 * If RCU Tasks Trace grace period implies RCU grace period, free
327 * these elements directly, else use call_rcu() to wait for normal
328 * progs to finish and finally do free_one() on each element.
330 call_rcu_tasks_trace(&c
->rcu_ttrace
, __free_rcu_tasks_trace
);
333 static void free_bulk(struct bpf_mem_cache
*c
)
335 struct bpf_mem_cache
*tgt
= c
->tgt
;
336 struct llist_node
*llnode
, *t
;
340 WARN_ON_ONCE(tgt
->unit_size
!= c
->unit_size
);
341 WARN_ON_ONCE(tgt
->percpu_size
!= c
->percpu_size
);
344 inc_active(c
, &flags
);
345 llnode
= __llist_del_first(&c
->free_llist
);
350 dec_active(c
, &flags
);
352 enque_to_free(tgt
, llnode
);
353 } while (cnt
> (c
->high_watermark
+ c
->low_watermark
) / 2);
355 /* and drain free_llist_extra */
356 llist_for_each_safe(llnode
, t
, llist_del_all(&c
->free_llist_extra
))
357 enque_to_free(tgt
, llnode
);
358 do_call_rcu_ttrace(tgt
);
361 static void __free_by_rcu(struct rcu_head
*head
)
363 struct bpf_mem_cache
*c
= container_of(head
, struct bpf_mem_cache
, rcu
);
364 struct bpf_mem_cache
*tgt
= c
->tgt
;
365 struct llist_node
*llnode
;
367 WARN_ON_ONCE(tgt
->unit_size
!= c
->unit_size
);
368 WARN_ON_ONCE(tgt
->percpu_size
!= c
->percpu_size
);
370 llnode
= llist_del_all(&c
->waiting_for_gp
);
374 llist_add_batch(llnode
, c
->waiting_for_gp_tail
, &tgt
->free_by_rcu_ttrace
);
376 /* Objects went through regular RCU GP. Send them to RCU tasks trace */
377 do_call_rcu_ttrace(tgt
);
379 atomic_set(&c
->call_rcu_in_progress
, 0);
382 static void check_free_by_rcu(struct bpf_mem_cache
*c
)
384 struct llist_node
*llnode
, *t
;
387 /* drain free_llist_extra_rcu */
388 if (unlikely(!llist_empty(&c
->free_llist_extra_rcu
))) {
389 inc_active(c
, &flags
);
390 llist_for_each_safe(llnode
, t
, llist_del_all(&c
->free_llist_extra_rcu
))
391 if (__llist_add(llnode
, &c
->free_by_rcu
))
392 c
->free_by_rcu_tail
= llnode
;
393 dec_active(c
, &flags
);
396 if (llist_empty(&c
->free_by_rcu
))
399 if (atomic_xchg(&c
->call_rcu_in_progress
, 1)) {
401 * Instead of kmalloc-ing new rcu_head and triggering 10k
402 * call_rcu() to hit rcutree.qhimark and force RCU to notice
403 * the overload just ask RCU to hurry up. There could be many
404 * objects in free_by_rcu list.
405 * This hint reduces memory consumption for an artificial
406 * benchmark from 2 Gbyte to 150 Mbyte.
408 rcu_request_urgent_qs_task(current
);
412 WARN_ON_ONCE(!llist_empty(&c
->waiting_for_gp
));
414 inc_active(c
, &flags
);
415 WRITE_ONCE(c
->waiting_for_gp
.first
, __llist_del_all(&c
->free_by_rcu
));
416 c
->waiting_for_gp_tail
= c
->free_by_rcu_tail
;
417 dec_active(c
, &flags
);
419 if (unlikely(READ_ONCE(c
->draining
))) {
420 free_all(llist_del_all(&c
->waiting_for_gp
), !!c
->percpu_size
);
421 atomic_set(&c
->call_rcu_in_progress
, 0);
423 call_rcu_hurry(&c
->rcu
, __free_by_rcu
);
427 static void bpf_mem_refill(struct irq_work
*work
)
429 struct bpf_mem_cache
*c
= container_of(work
, struct bpf_mem_cache
, refill_work
);
432 /* Racy access to free_cnt. It doesn't need to be 100% accurate */
434 if (cnt
< c
->low_watermark
)
435 /* irq_work runs on this cpu and kmalloc will allocate
436 * from the current numa node which is what we want here.
438 alloc_bulk(c
, c
->batch
, NUMA_NO_NODE
, true);
439 else if (cnt
> c
->high_watermark
)
442 check_free_by_rcu(c
);
445 static void notrace
irq_work_raise(struct bpf_mem_cache
*c
)
447 irq_work_queue(&c
->refill_work
);
450 /* For typical bpf map case that uses bpf_mem_cache_alloc and single bucket
451 * the freelist cache will be elem_size * 64 (or less) on each cpu.
453 * For bpf programs that don't have statically known allocation sizes and
454 * assuming (low_mark + high_mark) / 2 as an average number of elements per
455 * bucket and all buckets are used the total amount of memory in freelists
456 * on each cpu will be:
457 * 64*16 + 64*32 + 64*64 + 64*96 + 64*128 + 64*196 + 64*256 + 32*512 + 16*1024 + 8*2048 + 4*4096
458 * == ~ 116 Kbyte using below heuristic.
459 * Initialized, but unused bpf allocator (not bpf map specific one) will
460 * consume ~ 11 Kbyte per cpu.
461 * Typical case will be between 11K and 116K closer to 11K.
462 * bpf progs can and should share bpf_mem_cache when possible.
464 * Percpu allocation is typically rare. To avoid potential unnecessary large
465 * memory consumption, set low_mark = 1 and high_mark = 3, resulting in c->batch = 1.
467 static void init_refill_work(struct bpf_mem_cache
*c
)
469 init_irq_work(&c
->refill_work
, bpf_mem_refill
);
470 if (c
->percpu_size
) {
471 c
->low_watermark
= 1;
472 c
->high_watermark
= 3;
473 } else if (c
->unit_size
<= 256) {
474 c
->low_watermark
= 32;
475 c
->high_watermark
= 96;
477 /* When page_size == 4k, order-0 cache will have low_mark == 2
478 * and high_mark == 6 with batch alloc of 3 individual pages at
480 * 8k allocs and above low == 1, high == 3, batch == 1.
482 c
->low_watermark
= max(32 * 256 / c
->unit_size
, 1);
483 c
->high_watermark
= max(96 * 256 / c
->unit_size
, 3);
485 c
->batch
= max((c
->high_watermark
- c
->low_watermark
) / 4 * 3, 1);
488 static void prefill_mem_cache(struct bpf_mem_cache
*c
, int cpu
)
492 /* To avoid consuming memory, for non-percpu allocation, assume that
493 * 1st run of bpf prog won't be doing more than 4 map_update_elem from
494 * irq disabled region if unit size is less than or equal to 256.
495 * For all other cases, let us just do one allocation.
497 if (!c
->percpu_size
&& c
->unit_size
<= 256)
499 alloc_bulk(c
, cnt
, cpu_to_node(cpu
), false);
502 /* When size != 0 bpf_mem_cache for each cpu.
503 * This is typical bpf hash map use case when all elements have equal size.
505 * When size == 0 allocate 11 bpf_mem_cache-s for each cpu, then rely on
506 * kmalloc/kfree. Max allocation size is 4096 in this case.
507 * This is bpf_dynptr and bpf_kptr use case.
509 int bpf_mem_alloc_init(struct bpf_mem_alloc
*ma
, int size
, bool percpu
)
511 struct bpf_mem_caches
*cc
; struct bpf_mem_caches __percpu
*pcc
;
512 struct bpf_mem_cache
*c
; struct bpf_mem_cache __percpu
*pc
;
513 struct obj_cgroup
*objcg
= NULL
;
514 int cpu
, i
, unit_size
, percpu_size
= 0;
516 if (percpu
&& size
== 0)
519 /* room for llist_node and per-cpu pointer */
521 percpu_size
= LLIST_NODE_SZ
+ sizeof(void *);
525 pc
= __alloc_percpu_gfp(sizeof(*pc
), 8, GFP_KERNEL
);
530 size
+= LLIST_NODE_SZ
; /* room for llist_node */
534 if (memcg_bpf_enabled())
535 objcg
= get_obj_cgroup_from_current();
539 for_each_possible_cpu(cpu
) {
540 c
= per_cpu_ptr(pc
, cpu
);
541 c
->unit_size
= unit_size
;
543 c
->percpu_size
= percpu_size
;
546 prefill_mem_cache(c
, cpu
);
552 pcc
= __alloc_percpu_gfp(sizeof(*cc
), 8, GFP_KERNEL
);
556 objcg
= get_obj_cgroup_from_current();
559 for_each_possible_cpu(cpu
) {
560 cc
= per_cpu_ptr(pcc
, cpu
);
561 for (i
= 0; i
< NUM_CACHES
; i
++) {
563 c
->unit_size
= sizes
[i
];
565 c
->percpu_size
= percpu_size
;
569 prefill_mem_cache(c
, cpu
);
577 int bpf_mem_alloc_percpu_init(struct bpf_mem_alloc
*ma
, struct obj_cgroup
*objcg
)
579 struct bpf_mem_caches __percpu
*pcc
;
581 pcc
= __alloc_percpu_gfp(sizeof(struct bpf_mem_caches
), 8, GFP_KERNEL
);
591 int bpf_mem_alloc_percpu_unit_init(struct bpf_mem_alloc
*ma
, int size
)
593 struct bpf_mem_caches
*cc
; struct bpf_mem_caches __percpu
*pcc
;
594 int cpu
, i
, unit_size
, percpu_size
;
595 struct obj_cgroup
*objcg
;
596 struct bpf_mem_cache
*c
;
598 i
= bpf_mem_cache_idx(size
);
602 /* room for llist_node and per-cpu pointer */
603 percpu_size
= LLIST_NODE_SZ
+ sizeof(void *);
605 unit_size
= sizes
[i
];
609 for_each_possible_cpu(cpu
) {
610 cc
= per_cpu_ptr(pcc
, cpu
);
615 c
->unit_size
= unit_size
;
617 c
->percpu_size
= percpu_size
;
621 prefill_mem_cache(c
, cpu
);
627 static void drain_mem_cache(struct bpf_mem_cache
*c
)
629 bool percpu
= !!c
->percpu_size
;
631 /* No progs are using this bpf_mem_cache, but htab_map_free() called
632 * bpf_mem_cache_free() for all remaining elements and they can be in
633 * free_by_rcu_ttrace or in waiting_for_gp_ttrace lists, so drain those lists now.
635 * Except for waiting_for_gp_ttrace list, there are no concurrent operations
636 * on these lists, so it is safe to use __llist_del_all().
638 free_all(llist_del_all(&c
->free_by_rcu_ttrace
), percpu
);
639 free_all(llist_del_all(&c
->waiting_for_gp_ttrace
), percpu
);
640 free_all(__llist_del_all(&c
->free_llist
), percpu
);
641 free_all(__llist_del_all(&c
->free_llist_extra
), percpu
);
642 free_all(__llist_del_all(&c
->free_by_rcu
), percpu
);
643 free_all(__llist_del_all(&c
->free_llist_extra_rcu
), percpu
);
644 free_all(llist_del_all(&c
->waiting_for_gp
), percpu
);
647 static void check_mem_cache(struct bpf_mem_cache
*c
)
649 WARN_ON_ONCE(!llist_empty(&c
->free_by_rcu_ttrace
));
650 WARN_ON_ONCE(!llist_empty(&c
->waiting_for_gp_ttrace
));
651 WARN_ON_ONCE(!llist_empty(&c
->free_llist
));
652 WARN_ON_ONCE(!llist_empty(&c
->free_llist_extra
));
653 WARN_ON_ONCE(!llist_empty(&c
->free_by_rcu
));
654 WARN_ON_ONCE(!llist_empty(&c
->free_llist_extra_rcu
));
655 WARN_ON_ONCE(!llist_empty(&c
->waiting_for_gp
));
658 static void check_leaked_objs(struct bpf_mem_alloc
*ma
)
660 struct bpf_mem_caches
*cc
;
661 struct bpf_mem_cache
*c
;
665 for_each_possible_cpu(cpu
) {
666 c
= per_cpu_ptr(ma
->cache
, cpu
);
671 for_each_possible_cpu(cpu
) {
672 cc
= per_cpu_ptr(ma
->caches
, cpu
);
673 for (i
= 0; i
< NUM_CACHES
; i
++) {
681 static void free_mem_alloc_no_barrier(struct bpf_mem_alloc
*ma
)
683 check_leaked_objs(ma
);
684 free_percpu(ma
->cache
);
685 free_percpu(ma
->caches
);
690 static void free_mem_alloc(struct bpf_mem_alloc
*ma
)
692 /* waiting_for_gp[_ttrace] lists were drained, but RCU callbacks
693 * might still execute. Wait for them.
695 * rcu_barrier_tasks_trace() doesn't imply synchronize_rcu_tasks_trace(),
696 * but rcu_barrier_tasks_trace() and rcu_barrier() below are only used
697 * to wait for the pending __free_rcu_tasks_trace() and __free_rcu(),
698 * so if call_rcu(head, __free_rcu) is skipped due to
699 * rcu_trace_implies_rcu_gp(), it will be OK to skip rcu_barrier() by
700 * using rcu_trace_implies_rcu_gp() as well.
702 rcu_barrier(); /* wait for __free_by_rcu */
703 rcu_barrier_tasks_trace(); /* wait for __free_rcu */
704 if (!rcu_trace_implies_rcu_gp())
706 free_mem_alloc_no_barrier(ma
);
709 static void free_mem_alloc_deferred(struct work_struct
*work
)
711 struct bpf_mem_alloc
*ma
= container_of(work
, struct bpf_mem_alloc
, work
);
717 static void destroy_mem_alloc(struct bpf_mem_alloc
*ma
, int rcu_in_progress
)
719 struct bpf_mem_alloc
*copy
;
721 if (!rcu_in_progress
) {
722 /* Fast path. No callbacks are pending, hence no need to do
725 free_mem_alloc_no_barrier(ma
);
729 copy
= kmemdup(ma
, sizeof(*ma
), GFP_KERNEL
);
731 /* Slow path with inline barrier-s */
736 /* Defer barriers into worker to let the rest of map memory to be freed */
737 memset(ma
, 0, sizeof(*ma
));
738 INIT_WORK(©
->work
, free_mem_alloc_deferred
);
739 queue_work(system_unbound_wq
, ©
->work
);
742 void bpf_mem_alloc_destroy(struct bpf_mem_alloc
*ma
)
744 struct bpf_mem_caches
*cc
;
745 struct bpf_mem_cache
*c
;
746 int cpu
, i
, rcu_in_progress
;
750 for_each_possible_cpu(cpu
) {
751 c
= per_cpu_ptr(ma
->cache
, cpu
);
752 WRITE_ONCE(c
->draining
, true);
753 irq_work_sync(&c
->refill_work
);
755 rcu_in_progress
+= atomic_read(&c
->call_rcu_ttrace_in_progress
);
756 rcu_in_progress
+= atomic_read(&c
->call_rcu_in_progress
);
758 obj_cgroup_put(ma
->objcg
);
759 destroy_mem_alloc(ma
, rcu_in_progress
);
763 for_each_possible_cpu(cpu
) {
764 cc
= per_cpu_ptr(ma
->caches
, cpu
);
765 for (i
= 0; i
< NUM_CACHES
; i
++) {
767 WRITE_ONCE(c
->draining
, true);
768 irq_work_sync(&c
->refill_work
);
770 rcu_in_progress
+= atomic_read(&c
->call_rcu_ttrace_in_progress
);
771 rcu_in_progress
+= atomic_read(&c
->call_rcu_in_progress
);
774 obj_cgroup_put(ma
->objcg
);
775 destroy_mem_alloc(ma
, rcu_in_progress
);
779 /* notrace is necessary here and in other functions to make sure
780 * bpf programs cannot attach to them and cause llist corruptions.
782 static void notrace
*unit_alloc(struct bpf_mem_cache
*c
)
784 struct llist_node
*llnode
= NULL
;
788 /* Disable irqs to prevent the following race for majority of prog types:
791 * preemption or irq -> prog_B
794 * but prog_B could be a perf_event NMI prog.
795 * Use per-cpu 'active' counter to order free_list access between
796 * unit_alloc/unit_free/bpf_mem_refill.
798 local_irq_save(flags
);
799 if (local_inc_return(&c
->active
) == 1) {
800 llnode
= __llist_del_first(&c
->free_llist
);
803 *(struct bpf_mem_cache
**)llnode
= c
;
806 local_dec(&c
->active
);
810 if (cnt
< c
->low_watermark
)
812 /* Enable IRQ after the enqueue of irq work completes, so irq work
813 * will run after IRQ is enabled and free_llist may be refilled by
814 * irq work before other task preempts current task.
816 local_irq_restore(flags
);
821 /* Though 'ptr' object could have been allocated on a different cpu
822 * add it to the free_llist of the current cpu.
823 * Let kfree() logic deal with it when it's later called from irq_work.
825 static void notrace
unit_free(struct bpf_mem_cache
*c
, void *ptr
)
827 struct llist_node
*llnode
= ptr
- LLIST_NODE_SZ
;
831 BUILD_BUG_ON(LLIST_NODE_SZ
> 8);
834 * Remember bpf_mem_cache that allocated this object.
835 * The hint is not accurate.
837 c
->tgt
= *(struct bpf_mem_cache
**)llnode
;
839 local_irq_save(flags
);
840 if (local_inc_return(&c
->active
) == 1) {
841 __llist_add(llnode
, &c
->free_llist
);
844 /* unit_free() cannot fail. Therefore add an object to atomic
845 * llist. free_bulk() will drain it. Though free_llist_extra is
846 * a per-cpu list we have to use atomic llist_add here, since
847 * it also can be interrupted by bpf nmi prog that does another
848 * unit_free() into the same free_llist_extra.
850 llist_add(llnode
, &c
->free_llist_extra
);
852 local_dec(&c
->active
);
854 if (cnt
> c
->high_watermark
)
855 /* free few objects from current cpu into global kmalloc pool */
857 /* Enable IRQ after irq_work_raise() completes, otherwise when current
858 * task is preempted by task which does unit_alloc(), unit_alloc() may
859 * return NULL unexpectedly because irq work is already pending but can
860 * not been triggered and free_llist can not be refilled timely.
862 local_irq_restore(flags
);
865 static void notrace
unit_free_rcu(struct bpf_mem_cache
*c
, void *ptr
)
867 struct llist_node
*llnode
= ptr
- LLIST_NODE_SZ
;
870 c
->tgt
= *(struct bpf_mem_cache
**)llnode
;
872 local_irq_save(flags
);
873 if (local_inc_return(&c
->active
) == 1) {
874 if (__llist_add(llnode
, &c
->free_by_rcu
))
875 c
->free_by_rcu_tail
= llnode
;
877 llist_add(llnode
, &c
->free_llist_extra_rcu
);
879 local_dec(&c
->active
);
881 if (!atomic_read(&c
->call_rcu_in_progress
))
883 local_irq_restore(flags
);
886 /* Called from BPF program or from sys_bpf syscall.
887 * In both cases migration is disabled.
889 void notrace
*bpf_mem_alloc(struct bpf_mem_alloc
*ma
, size_t size
)
898 size
+= LLIST_NODE_SZ
;
899 idx
= bpf_mem_cache_idx(size
);
903 ret
= unit_alloc(this_cpu_ptr(ma
->caches
)->cache
+ idx
);
904 return !ret
? NULL
: ret
+ LLIST_NODE_SZ
;
907 void notrace
bpf_mem_free(struct bpf_mem_alloc
*ma
, void *ptr
)
909 struct bpf_mem_cache
*c
;
915 c
= *(void **)(ptr
- LLIST_NODE_SZ
);
916 idx
= bpf_mem_cache_idx(c
->unit_size
);
917 if (WARN_ON_ONCE(idx
< 0))
920 unit_free(this_cpu_ptr(ma
->caches
)->cache
+ idx
, ptr
);
923 void notrace
bpf_mem_free_rcu(struct bpf_mem_alloc
*ma
, void *ptr
)
925 struct bpf_mem_cache
*c
;
931 c
= *(void **)(ptr
- LLIST_NODE_SZ
);
932 idx
= bpf_mem_cache_idx(c
->unit_size
);
933 if (WARN_ON_ONCE(idx
< 0))
936 unit_free_rcu(this_cpu_ptr(ma
->caches
)->cache
+ idx
, ptr
);
939 void notrace
*bpf_mem_cache_alloc(struct bpf_mem_alloc
*ma
)
943 ret
= unit_alloc(this_cpu_ptr(ma
->cache
));
944 return !ret
? NULL
: ret
+ LLIST_NODE_SZ
;
947 void notrace
bpf_mem_cache_free(struct bpf_mem_alloc
*ma
, void *ptr
)
952 unit_free(this_cpu_ptr(ma
->cache
), ptr
);
955 void notrace
bpf_mem_cache_free_rcu(struct bpf_mem_alloc
*ma
, void *ptr
)
960 unit_free_rcu(this_cpu_ptr(ma
->cache
), ptr
);
963 /* Directly does a kfree() without putting 'ptr' back to the free_llist
964 * for reuse and without waiting for a rcu_tasks_trace gp.
965 * The caller must first go through the rcu_tasks_trace gp for 'ptr'
966 * before calling bpf_mem_cache_raw_free().
967 * It could be used when the rcu_tasks_trace callback does not have
968 * a hold on the original bpf_mem_alloc object that allocated the
969 * 'ptr'. This should only be used in the uncommon code path.
970 * Otherwise, the bpf_mem_alloc's free_llist cannot be refilled
971 * and may affect performance.
973 void bpf_mem_cache_raw_free(void *ptr
)
978 kfree(ptr
- LLIST_NODE_SZ
);
981 /* When flags == GFP_KERNEL, it signals that the caller will not cause
982 * deadlock when using kmalloc. bpf_mem_cache_alloc_flags() will use
983 * kmalloc if the free_llist is empty.
985 void notrace
*bpf_mem_cache_alloc_flags(struct bpf_mem_alloc
*ma
, gfp_t flags
)
987 struct bpf_mem_cache
*c
;
990 c
= this_cpu_ptr(ma
->cache
);
993 if (!ret
&& flags
== GFP_KERNEL
) {
994 struct mem_cgroup
*memcg
, *old_memcg
;
996 memcg
= get_memcg(c
);
997 old_memcg
= set_active_memcg(memcg
);
998 ret
= __alloc(c
, NUMA_NO_NODE
, GFP_KERNEL
| __GFP_NOWARN
| __GFP_ACCOUNT
);
1000 *(struct bpf_mem_cache
**)ret
= c
;
1001 set_active_memcg(old_memcg
);
1002 mem_cgroup_put(memcg
);
1005 return !ret
? NULL
: ret
+ LLIST_NODE_SZ
;
1008 int bpf_mem_alloc_check_size(bool percpu
, size_t size
)
1010 /* The size of percpu allocation doesn't have LLIST_NODE_SZ overhead */
1011 if ((percpu
&& size
> BPF_MEM_ALLOC_SIZE_MAX
) ||
1012 (!percpu
&& size
> BPF_MEM_ALLOC_SIZE_MAX
- LLIST_NODE_SZ
))