1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016 Facebook
7 #include <linux/jhash.h>
8 #include <linux/filter.h>
9 #include <linux/rculist_nulls.h>
10 #include <linux/random.h>
11 #include <uapi/linux/btf.h>
12 #include "percpu_freelist.h"
13 #include "bpf_lru_list.h"
14 #include "map_in_map.h"
16 #define HTAB_CREATE_FLAG_MASK \
17 (BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE | \
18 BPF_F_ACCESS_MASK | BPF_F_ZERO_SEED)
20 #define BATCH_OPS(_name) \
22 _name##_map_lookup_batch, \
23 .map_lookup_and_delete_batch = \
24 _name##_map_lookup_and_delete_batch, \
26 generic_map_update_batch, \
28 generic_map_delete_batch
31 * The bucket lock has two protection scopes:
33 * 1) Serializing concurrent operations from BPF programs on differrent
36 * 2) Serializing concurrent operations from BPF programs and sys_bpf()
38 * BPF programs can execute in any context including perf, kprobes and
39 * tracing. As there are almost no limits where perf, kprobes and tracing
40 * can be invoked from the lock operations need to be protected against
41 * deadlocks. Deadlocks can be caused by recursion and by an invocation in
42 * the lock held section when functions which acquire this lock are invoked
43 * from sys_bpf(). BPF recursion is prevented by incrementing the per CPU
44 * variable bpf_prog_active, which prevents BPF programs attached to perf
45 * events, kprobes and tracing to be invoked before the prior invocation
46 * from one of these contexts completed. sys_bpf() uses the same mechanism
47 * by pinning the task to the current CPU and incrementing the recursion
48 * protection accross the map operation.
50 * This has subtle implications on PREEMPT_RT. PREEMPT_RT forbids certain
51 * operations like memory allocations (even with GFP_ATOMIC) from atomic
52 * contexts. This is required because even with GFP_ATOMIC the memory
53 * allocator calls into code pathes which acquire locks with long held lock
54 * sections. To ensure the deterministic behaviour these locks are regular
55 * spinlocks, which are converted to 'sleepable' spinlocks on RT. The only
56 * true atomic contexts on an RT kernel are the low level hardware
57 * handling, scheduling, low level interrupt handling, NMIs etc. None of
58 * these contexts should ever do memory allocations.
60 * As regular device interrupt handlers and soft interrupts are forced into
61 * thread context, the existing code which does
62 * spin_lock*(); alloc(GPF_ATOMIC); spin_unlock*();
65 * In theory the BPF locks could be converted to regular spinlocks as well,
66 * but the bucket locks and percpu_freelist locks can be taken from
67 * arbitrary contexts (perf, kprobes, tracepoints) which are required to be
68 * atomic contexts even on RT. These mechanisms require preallocated maps,
69 * so there is no need to invoke memory allocations within the lock held
72 * BPF maps which need dynamic allocation are only used from (forced)
73 * thread context on RT and can therefore use regular spinlocks which in
74 * turn allows to invoke memory allocations from the lock held section.
76 * On a non RT kernel this distinction is neither possible nor required.
77 * spinlock maps to raw_spinlock and the extra code is optimized out by the
81 struct hlist_nulls_head head
;
83 raw_spinlock_t raw_lock
;
90 struct bucket
*buckets
;
93 struct pcpu_freelist freelist
;
96 struct htab_elem
*__percpu
*extra_elems
;
97 atomic_t count
; /* number of elements in this hashtable */
98 u32 n_buckets
; /* number of hash buckets */
99 u32 elem_size
; /* size of each element in bytes */
103 /* each htab element is struct htab_elem + key + value */
106 struct hlist_nulls_node hash_node
;
110 struct bpf_htab
*htab
;
111 struct pcpu_freelist_node fnode
;
112 struct htab_elem
*batch_flink
;
118 struct bpf_lru_node lru_node
;
121 char key
[] __aligned(8);
124 static inline bool htab_is_prealloc(const struct bpf_htab
*htab
)
126 return !(htab
->map
.map_flags
& BPF_F_NO_PREALLOC
);
129 static inline bool htab_use_raw_lock(const struct bpf_htab
*htab
)
131 return (!IS_ENABLED(CONFIG_PREEMPT_RT
) || htab_is_prealloc(htab
));
134 static void htab_init_buckets(struct bpf_htab
*htab
)
138 for (i
= 0; i
< htab
->n_buckets
; i
++) {
139 INIT_HLIST_NULLS_HEAD(&htab
->buckets
[i
].head
, i
);
140 if (htab_use_raw_lock(htab
))
141 raw_spin_lock_init(&htab
->buckets
[i
].raw_lock
);
143 spin_lock_init(&htab
->buckets
[i
].lock
);
147 static inline unsigned long htab_lock_bucket(const struct bpf_htab
*htab
,
152 if (htab_use_raw_lock(htab
))
153 raw_spin_lock_irqsave(&b
->raw_lock
, flags
);
155 spin_lock_irqsave(&b
->lock
, flags
);
159 static inline void htab_unlock_bucket(const struct bpf_htab
*htab
,
163 if (htab_use_raw_lock(htab
))
164 raw_spin_unlock_irqrestore(&b
->raw_lock
, flags
);
166 spin_unlock_irqrestore(&b
->lock
, flags
);
169 static bool htab_lru_map_delete_node(void *arg
, struct bpf_lru_node
*node
);
171 static bool htab_is_lru(const struct bpf_htab
*htab
)
173 return htab
->map
.map_type
== BPF_MAP_TYPE_LRU_HASH
||
174 htab
->map
.map_type
== BPF_MAP_TYPE_LRU_PERCPU_HASH
;
177 static bool htab_is_percpu(const struct bpf_htab
*htab
)
179 return htab
->map
.map_type
== BPF_MAP_TYPE_PERCPU_HASH
||
180 htab
->map
.map_type
== BPF_MAP_TYPE_LRU_PERCPU_HASH
;
183 static inline void htab_elem_set_ptr(struct htab_elem
*l
, u32 key_size
,
186 *(void __percpu
**)(l
->key
+ key_size
) = pptr
;
189 static inline void __percpu
*htab_elem_get_ptr(struct htab_elem
*l
, u32 key_size
)
191 return *(void __percpu
**)(l
->key
+ key_size
);
194 static void *fd_htab_map_get_ptr(const struct bpf_map
*map
, struct htab_elem
*l
)
196 return *(void **)(l
->key
+ roundup(map
->key_size
, 8));
199 static struct htab_elem
*get_htab_elem(struct bpf_htab
*htab
, int i
)
201 return (struct htab_elem
*) (htab
->elems
+ i
* htab
->elem_size
);
204 static void htab_free_elems(struct bpf_htab
*htab
)
208 if (!htab_is_percpu(htab
))
211 for (i
= 0; i
< htab
->map
.max_entries
; i
++) {
214 pptr
= htab_elem_get_ptr(get_htab_elem(htab
, i
),
220 bpf_map_area_free(htab
->elems
);
223 /* The LRU list has a lock (lru_lock). Each htab bucket has a lock
224 * (bucket_lock). If both locks need to be acquired together, the lock
225 * order is always lru_lock -> bucket_lock and this only happens in
226 * bpf_lru_list.c logic. For example, certain code path of
227 * bpf_lru_pop_free(), which is called by function prealloc_lru_pop(),
228 * will acquire lru_lock first followed by acquiring bucket_lock.
230 * In hashtab.c, to avoid deadlock, lock acquisition of
231 * bucket_lock followed by lru_lock is not allowed. In such cases,
232 * bucket_lock needs to be released first before acquiring lru_lock.
234 static struct htab_elem
*prealloc_lru_pop(struct bpf_htab
*htab
, void *key
,
237 struct bpf_lru_node
*node
= bpf_lru_pop_free(&htab
->lru
, hash
);
241 l
= container_of(node
, struct htab_elem
, lru_node
);
242 memcpy(l
->key
, key
, htab
->map
.key_size
);
249 static int prealloc_init(struct bpf_htab
*htab
)
251 u32 num_entries
= htab
->map
.max_entries
;
252 int err
= -ENOMEM
, i
;
254 if (!htab_is_percpu(htab
) && !htab_is_lru(htab
))
255 num_entries
+= num_possible_cpus();
257 htab
->elems
= bpf_map_area_alloc(htab
->elem_size
* num_entries
,
258 htab
->map
.numa_node
);
262 if (!htab_is_percpu(htab
))
263 goto skip_percpu_elems
;
265 for (i
= 0; i
< num_entries
; i
++) {
266 u32 size
= round_up(htab
->map
.value_size
, 8);
269 pptr
= __alloc_percpu_gfp(size
, 8, GFP_USER
| __GFP_NOWARN
);
272 htab_elem_set_ptr(get_htab_elem(htab
, i
), htab
->map
.key_size
,
278 if (htab_is_lru(htab
))
279 err
= bpf_lru_init(&htab
->lru
,
280 htab
->map
.map_flags
& BPF_F_NO_COMMON_LRU
,
281 offsetof(struct htab_elem
, hash
) -
282 offsetof(struct htab_elem
, lru_node
),
283 htab_lru_map_delete_node
,
286 err
= pcpu_freelist_init(&htab
->freelist
);
291 if (htab_is_lru(htab
))
292 bpf_lru_populate(&htab
->lru
, htab
->elems
,
293 offsetof(struct htab_elem
, lru_node
),
294 htab
->elem_size
, num_entries
);
296 pcpu_freelist_populate(&htab
->freelist
,
297 htab
->elems
+ offsetof(struct htab_elem
, fnode
),
298 htab
->elem_size
, num_entries
);
303 htab_free_elems(htab
);
307 static void prealloc_destroy(struct bpf_htab
*htab
)
309 htab_free_elems(htab
);
311 if (htab_is_lru(htab
))
312 bpf_lru_destroy(&htab
->lru
);
314 pcpu_freelist_destroy(&htab
->freelist
);
317 static int alloc_extra_elems(struct bpf_htab
*htab
)
319 struct htab_elem
*__percpu
*pptr
, *l_new
;
320 struct pcpu_freelist_node
*l
;
323 pptr
= __alloc_percpu_gfp(sizeof(struct htab_elem
*), 8,
324 GFP_USER
| __GFP_NOWARN
);
328 for_each_possible_cpu(cpu
) {
329 l
= pcpu_freelist_pop(&htab
->freelist
);
330 /* pop will succeed, since prealloc_init()
331 * preallocated extra num_possible_cpus elements
333 l_new
= container_of(l
, struct htab_elem
, fnode
);
334 *per_cpu_ptr(pptr
, cpu
) = l_new
;
336 htab
->extra_elems
= pptr
;
340 /* Called from syscall */
341 static int htab_map_alloc_check(union bpf_attr
*attr
)
343 bool percpu
= (attr
->map_type
== BPF_MAP_TYPE_PERCPU_HASH
||
344 attr
->map_type
== BPF_MAP_TYPE_LRU_PERCPU_HASH
);
345 bool lru
= (attr
->map_type
== BPF_MAP_TYPE_LRU_HASH
||
346 attr
->map_type
== BPF_MAP_TYPE_LRU_PERCPU_HASH
);
347 /* percpu_lru means each cpu has its own LRU list.
348 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
349 * the map's value itself is percpu. percpu_lru has
350 * nothing to do with the map's value.
352 bool percpu_lru
= (attr
->map_flags
& BPF_F_NO_COMMON_LRU
);
353 bool prealloc
= !(attr
->map_flags
& BPF_F_NO_PREALLOC
);
354 bool zero_seed
= (attr
->map_flags
& BPF_F_ZERO_SEED
);
355 int numa_node
= bpf_map_attr_numa_node(attr
);
357 BUILD_BUG_ON(offsetof(struct htab_elem
, htab
) !=
358 offsetof(struct htab_elem
, hash_node
.pprev
));
359 BUILD_BUG_ON(offsetof(struct htab_elem
, fnode
.next
) !=
360 offsetof(struct htab_elem
, hash_node
.pprev
));
362 if (lru
&& !bpf_capable())
363 /* LRU implementation is much complicated than other
364 * maps. Hence, limit to CAP_BPF.
368 if (zero_seed
&& !capable(CAP_SYS_ADMIN
))
369 /* Guard against local DoS, and discourage production use. */
372 if (attr
->map_flags
& ~HTAB_CREATE_FLAG_MASK
||
373 !bpf_map_flags_access_ok(attr
->map_flags
))
376 if (!lru
&& percpu_lru
)
379 if (lru
&& !prealloc
)
382 if (numa_node
!= NUMA_NO_NODE
&& (percpu
|| percpu_lru
))
385 /* check sanity of attributes.
386 * value_size == 0 may be allowed in the future to use map as a set
388 if (attr
->max_entries
== 0 || attr
->key_size
== 0 ||
389 attr
->value_size
== 0)
392 if (attr
->key_size
> MAX_BPF_STACK
)
393 /* eBPF programs initialize keys on stack, so they cannot be
394 * larger than max stack size
398 if (attr
->value_size
>= KMALLOC_MAX_SIZE
-
399 MAX_BPF_STACK
- sizeof(struct htab_elem
))
400 /* if value_size is bigger, the user space won't be able to
401 * access the elements via bpf syscall. This check also makes
402 * sure that the elem_size doesn't overflow and it's
403 * kmalloc-able later in htab_map_update_elem()
410 static struct bpf_map
*htab_map_alloc(union bpf_attr
*attr
)
412 bool percpu
= (attr
->map_type
== BPF_MAP_TYPE_PERCPU_HASH
||
413 attr
->map_type
== BPF_MAP_TYPE_LRU_PERCPU_HASH
);
414 bool lru
= (attr
->map_type
== BPF_MAP_TYPE_LRU_HASH
||
415 attr
->map_type
== BPF_MAP_TYPE_LRU_PERCPU_HASH
);
416 /* percpu_lru means each cpu has its own LRU list.
417 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
418 * the map's value itself is percpu. percpu_lru has
419 * nothing to do with the map's value.
421 bool percpu_lru
= (attr
->map_flags
& BPF_F_NO_COMMON_LRU
);
422 bool prealloc
= !(attr
->map_flags
& BPF_F_NO_PREALLOC
);
423 struct bpf_htab
*htab
;
427 htab
= kzalloc(sizeof(*htab
), GFP_USER
);
429 return ERR_PTR(-ENOMEM
);
431 bpf_map_init_from_attr(&htab
->map
, attr
);
434 /* ensure each CPU's lru list has >=1 elements.
435 * since we are at it, make each lru list has the same
436 * number of elements.
438 htab
->map
.max_entries
= roundup(attr
->max_entries
,
439 num_possible_cpus());
440 if (htab
->map
.max_entries
< attr
->max_entries
)
441 htab
->map
.max_entries
= rounddown(attr
->max_entries
,
442 num_possible_cpus());
445 /* hash table size must be power of 2 */
446 htab
->n_buckets
= roundup_pow_of_two(htab
->map
.max_entries
);
448 htab
->elem_size
= sizeof(struct htab_elem
) +
449 round_up(htab
->map
.key_size
, 8);
451 htab
->elem_size
+= sizeof(void *);
453 htab
->elem_size
+= round_up(htab
->map
.value_size
, 8);
456 /* prevent zero size kmalloc and check for u32 overflow */
457 if (htab
->n_buckets
== 0 ||
458 htab
->n_buckets
> U32_MAX
/ sizeof(struct bucket
))
461 cost
= (u64
) htab
->n_buckets
* sizeof(struct bucket
) +
462 (u64
) htab
->elem_size
* htab
->map
.max_entries
;
465 cost
+= (u64
) round_up(htab
->map
.value_size
, 8) *
466 num_possible_cpus() * htab
->map
.max_entries
;
468 cost
+= (u64
) htab
->elem_size
* num_possible_cpus();
470 /* if map size is larger than memlock limit, reject it */
471 err
= bpf_map_charge_init(&htab
->map
.memory
, cost
);
476 htab
->buckets
= bpf_map_area_alloc(htab
->n_buckets
*
477 sizeof(struct bucket
),
478 htab
->map
.numa_node
);
482 if (htab
->map
.map_flags
& BPF_F_ZERO_SEED
)
485 htab
->hashrnd
= get_random_int();
487 htab_init_buckets(htab
);
490 err
= prealloc_init(htab
);
494 if (!percpu
&& !lru
) {
495 /* lru itself can remove the least used element, so
496 * there is no need for an extra elem during map_update.
498 err
= alloc_extra_elems(htab
);
507 prealloc_destroy(htab
);
509 bpf_map_area_free(htab
->buckets
);
511 bpf_map_charge_finish(&htab
->map
.memory
);
517 static inline u32
htab_map_hash(const void *key
, u32 key_len
, u32 hashrnd
)
519 return jhash(key
, key_len
, hashrnd
);
522 static inline struct bucket
*__select_bucket(struct bpf_htab
*htab
, u32 hash
)
524 return &htab
->buckets
[hash
& (htab
->n_buckets
- 1)];
527 static inline struct hlist_nulls_head
*select_bucket(struct bpf_htab
*htab
, u32 hash
)
529 return &__select_bucket(htab
, hash
)->head
;
532 /* this lookup function can only be called with bucket lock taken */
533 static struct htab_elem
*lookup_elem_raw(struct hlist_nulls_head
*head
, u32 hash
,
534 void *key
, u32 key_size
)
536 struct hlist_nulls_node
*n
;
539 hlist_nulls_for_each_entry_rcu(l
, n
, head
, hash_node
)
540 if (l
->hash
== hash
&& !memcmp(&l
->key
, key
, key_size
))
546 /* can be called without bucket lock. it will repeat the loop in
547 * the unlikely event when elements moved from one bucket into another
548 * while link list is being walked
550 static struct htab_elem
*lookup_nulls_elem_raw(struct hlist_nulls_head
*head
,
552 u32 key_size
, u32 n_buckets
)
554 struct hlist_nulls_node
*n
;
558 hlist_nulls_for_each_entry_rcu(l
, n
, head
, hash_node
)
559 if (l
->hash
== hash
&& !memcmp(&l
->key
, key
, key_size
))
562 if (unlikely(get_nulls_value(n
) != (hash
& (n_buckets
- 1))))
568 /* Called from syscall or from eBPF program directly, so
569 * arguments have to match bpf_map_lookup_elem() exactly.
570 * The return value is adjusted by BPF instructions
571 * in htab_map_gen_lookup().
573 static void *__htab_map_lookup_elem(struct bpf_map
*map
, void *key
)
575 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
576 struct hlist_nulls_head
*head
;
580 /* Must be called with rcu_read_lock. */
581 WARN_ON_ONCE(!rcu_read_lock_held());
583 key_size
= map
->key_size
;
585 hash
= htab_map_hash(key
, key_size
, htab
->hashrnd
);
587 head
= select_bucket(htab
, hash
);
589 l
= lookup_nulls_elem_raw(head
, hash
, key
, key_size
, htab
->n_buckets
);
594 static void *htab_map_lookup_elem(struct bpf_map
*map
, void *key
)
596 struct htab_elem
*l
= __htab_map_lookup_elem(map
, key
);
599 return l
->key
+ round_up(map
->key_size
, 8);
604 /* inline bpf_map_lookup_elem() call.
607 * bpf_map_lookup_elem
608 * map->ops->map_lookup_elem
609 * htab_map_lookup_elem
610 * __htab_map_lookup_elem
613 * __htab_map_lookup_elem
615 static u32
htab_map_gen_lookup(struct bpf_map
*map
, struct bpf_insn
*insn_buf
)
617 struct bpf_insn
*insn
= insn_buf
;
618 const int ret
= BPF_REG_0
;
620 BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem
,
621 (void *(*)(struct bpf_map
*map
, void *key
))NULL
));
622 *insn
++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem
));
623 *insn
++ = BPF_JMP_IMM(BPF_JEQ
, ret
, 0, 1);
624 *insn
++ = BPF_ALU64_IMM(BPF_ADD
, ret
,
625 offsetof(struct htab_elem
, key
) +
626 round_up(map
->key_size
, 8));
627 return insn
- insn_buf
;
630 static __always_inline
void *__htab_lru_map_lookup_elem(struct bpf_map
*map
,
631 void *key
, const bool mark
)
633 struct htab_elem
*l
= __htab_map_lookup_elem(map
, key
);
637 bpf_lru_node_set_ref(&l
->lru_node
);
638 return l
->key
+ round_up(map
->key_size
, 8);
644 static void *htab_lru_map_lookup_elem(struct bpf_map
*map
, void *key
)
646 return __htab_lru_map_lookup_elem(map
, key
, true);
649 static void *htab_lru_map_lookup_elem_sys(struct bpf_map
*map
, void *key
)
651 return __htab_lru_map_lookup_elem(map
, key
, false);
654 static u32
htab_lru_map_gen_lookup(struct bpf_map
*map
,
655 struct bpf_insn
*insn_buf
)
657 struct bpf_insn
*insn
= insn_buf
;
658 const int ret
= BPF_REG_0
;
659 const int ref_reg
= BPF_REG_1
;
661 BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem
,
662 (void *(*)(struct bpf_map
*map
, void *key
))NULL
));
663 *insn
++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem
));
664 *insn
++ = BPF_JMP_IMM(BPF_JEQ
, ret
, 0, 4);
665 *insn
++ = BPF_LDX_MEM(BPF_B
, ref_reg
, ret
,
666 offsetof(struct htab_elem
, lru_node
) +
667 offsetof(struct bpf_lru_node
, ref
));
668 *insn
++ = BPF_JMP_IMM(BPF_JNE
, ref_reg
, 0, 1);
669 *insn
++ = BPF_ST_MEM(BPF_B
, ret
,
670 offsetof(struct htab_elem
, lru_node
) +
671 offsetof(struct bpf_lru_node
, ref
),
673 *insn
++ = BPF_ALU64_IMM(BPF_ADD
, ret
,
674 offsetof(struct htab_elem
, key
) +
675 round_up(map
->key_size
, 8));
676 return insn
- insn_buf
;
679 /* It is called from the bpf_lru_list when the LRU needs to delete
680 * older elements from the htab.
682 static bool htab_lru_map_delete_node(void *arg
, struct bpf_lru_node
*node
)
684 struct bpf_htab
*htab
= (struct bpf_htab
*)arg
;
685 struct htab_elem
*l
= NULL
, *tgt_l
;
686 struct hlist_nulls_head
*head
;
687 struct hlist_nulls_node
*n
;
691 tgt_l
= container_of(node
, struct htab_elem
, lru_node
);
692 b
= __select_bucket(htab
, tgt_l
->hash
);
695 flags
= htab_lock_bucket(htab
, b
);
697 hlist_nulls_for_each_entry_rcu(l
, n
, head
, hash_node
)
699 hlist_nulls_del_rcu(&l
->hash_node
);
703 htab_unlock_bucket(htab
, b
, flags
);
708 /* Called from syscall */
709 static int htab_map_get_next_key(struct bpf_map
*map
, void *key
, void *next_key
)
711 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
712 struct hlist_nulls_head
*head
;
713 struct htab_elem
*l
, *next_l
;
717 WARN_ON_ONCE(!rcu_read_lock_held());
719 key_size
= map
->key_size
;
722 goto find_first_elem
;
724 hash
= htab_map_hash(key
, key_size
, htab
->hashrnd
);
726 head
= select_bucket(htab
, hash
);
729 l
= lookup_nulls_elem_raw(head
, hash
, key
, key_size
, htab
->n_buckets
);
732 goto find_first_elem
;
734 /* key was found, get next key in the same bucket */
735 next_l
= hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l
->hash_node
)),
736 struct htab_elem
, hash_node
);
739 /* if next elem in this hash list is non-zero, just return it */
740 memcpy(next_key
, next_l
->key
, key_size
);
744 /* no more elements in this hash list, go to the next bucket */
745 i
= hash
& (htab
->n_buckets
- 1);
749 /* iterate over buckets */
750 for (; i
< htab
->n_buckets
; i
++) {
751 head
= select_bucket(htab
, i
);
753 /* pick first element in the bucket */
754 next_l
= hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head
)),
755 struct htab_elem
, hash_node
);
757 /* if it's not empty, just return it */
758 memcpy(next_key
, next_l
->key
, key_size
);
763 /* iterated over all buckets and all elements */
767 static void htab_elem_free(struct bpf_htab
*htab
, struct htab_elem
*l
)
769 if (htab
->map
.map_type
== BPF_MAP_TYPE_PERCPU_HASH
)
770 free_percpu(htab_elem_get_ptr(l
, htab
->map
.key_size
));
774 static void htab_elem_free_rcu(struct rcu_head
*head
)
776 struct htab_elem
*l
= container_of(head
, struct htab_elem
, rcu
);
777 struct bpf_htab
*htab
= l
->htab
;
779 htab_elem_free(htab
, l
);
782 static void free_htab_elem(struct bpf_htab
*htab
, struct htab_elem
*l
)
784 struct bpf_map
*map
= &htab
->map
;
786 if (map
->ops
->map_fd_put_ptr
) {
787 void *ptr
= fd_htab_map_get_ptr(map
, l
);
789 map
->ops
->map_fd_put_ptr(ptr
);
792 if (htab_is_prealloc(htab
)) {
793 __pcpu_freelist_push(&htab
->freelist
, &l
->fnode
);
795 atomic_dec(&htab
->count
);
797 call_rcu(&l
->rcu
, htab_elem_free_rcu
);
801 static void pcpu_copy_value(struct bpf_htab
*htab
, void __percpu
*pptr
,
802 void *value
, bool onallcpus
)
805 /* copy true value_size bytes */
806 memcpy(this_cpu_ptr(pptr
), value
, htab
->map
.value_size
);
808 u32 size
= round_up(htab
->map
.value_size
, 8);
811 for_each_possible_cpu(cpu
) {
812 bpf_long_memcpy(per_cpu_ptr(pptr
, cpu
),
819 static bool fd_htab_map_needs_adjust(const struct bpf_htab
*htab
)
821 return htab
->map
.map_type
== BPF_MAP_TYPE_HASH_OF_MAPS
&&
825 static struct htab_elem
*alloc_htab_elem(struct bpf_htab
*htab
, void *key
,
826 void *value
, u32 key_size
, u32 hash
,
827 bool percpu
, bool onallcpus
,
828 struct htab_elem
*old_elem
)
830 u32 size
= htab
->map
.value_size
;
831 bool prealloc
= htab_is_prealloc(htab
);
832 struct htab_elem
*l_new
, **pl_new
;
837 /* if we're updating the existing element,
838 * use per-cpu extra elems to avoid freelist_pop/push
840 pl_new
= this_cpu_ptr(htab
->extra_elems
);
844 struct pcpu_freelist_node
*l
;
846 l
= __pcpu_freelist_pop(&htab
->freelist
);
848 return ERR_PTR(-E2BIG
);
849 l_new
= container_of(l
, struct htab_elem
, fnode
);
852 if (atomic_inc_return(&htab
->count
) > htab
->map
.max_entries
)
854 /* when map is full and update() is replacing
855 * old element, it's ok to allocate, since
856 * old element will be freed immediately.
857 * Otherwise return an error
859 l_new
= ERR_PTR(-E2BIG
);
862 l_new
= kmalloc_node(htab
->elem_size
, GFP_ATOMIC
| __GFP_NOWARN
,
863 htab
->map
.numa_node
);
865 l_new
= ERR_PTR(-ENOMEM
);
868 check_and_init_map_lock(&htab
->map
,
869 l_new
->key
+ round_up(key_size
, 8));
872 memcpy(l_new
->key
, key
, key_size
);
874 size
= round_up(size
, 8);
876 pptr
= htab_elem_get_ptr(l_new
, key_size
);
878 /* alloc_percpu zero-fills */
879 pptr
= __alloc_percpu_gfp(size
, 8,
880 GFP_ATOMIC
| __GFP_NOWARN
);
883 l_new
= ERR_PTR(-ENOMEM
);
888 pcpu_copy_value(htab
, pptr
, value
, onallcpus
);
891 htab_elem_set_ptr(l_new
, key_size
, pptr
);
892 } else if (fd_htab_map_needs_adjust(htab
)) {
893 size
= round_up(size
, 8);
894 memcpy(l_new
->key
+ round_up(key_size
, 8), value
, size
);
896 copy_map_value(&htab
->map
,
897 l_new
->key
+ round_up(key_size
, 8),
904 atomic_dec(&htab
->count
);
908 static int check_flags(struct bpf_htab
*htab
, struct htab_elem
*l_old
,
911 if (l_old
&& (map_flags
& ~BPF_F_LOCK
) == BPF_NOEXIST
)
912 /* elem already exists */
915 if (!l_old
&& (map_flags
& ~BPF_F_LOCK
) == BPF_EXIST
)
916 /* elem doesn't exist, cannot update it */
922 /* Called from syscall or from eBPF program */
923 static int htab_map_update_elem(struct bpf_map
*map
, void *key
, void *value
,
926 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
927 struct htab_elem
*l_new
= NULL
, *l_old
;
928 struct hlist_nulls_head
*head
;
934 if (unlikely((map_flags
& ~BPF_F_LOCK
) > BPF_EXIST
))
938 WARN_ON_ONCE(!rcu_read_lock_held());
940 key_size
= map
->key_size
;
942 hash
= htab_map_hash(key
, key_size
, htab
->hashrnd
);
944 b
= __select_bucket(htab
, hash
);
947 if (unlikely(map_flags
& BPF_F_LOCK
)) {
948 if (unlikely(!map_value_has_spin_lock(map
)))
950 /* find an element without taking the bucket lock */
951 l_old
= lookup_nulls_elem_raw(head
, hash
, key
, key_size
,
953 ret
= check_flags(htab
, l_old
, map_flags
);
957 /* grab the element lock and update value in place */
958 copy_map_value_locked(map
,
959 l_old
->key
+ round_up(key_size
, 8),
963 /* fall through, grab the bucket lock and lookup again.
964 * 99.9% chance that the element won't be found,
965 * but second lookup under lock has to be done.
969 flags
= htab_lock_bucket(htab
, b
);
971 l_old
= lookup_elem_raw(head
, hash
, key
, key_size
);
973 ret
= check_flags(htab
, l_old
, map_flags
);
977 if (unlikely(l_old
&& (map_flags
& BPF_F_LOCK
))) {
978 /* first lookup without the bucket lock didn't find the element,
979 * but second lookup with the bucket lock found it.
980 * This case is highly unlikely, but has to be dealt with:
981 * grab the element lock in addition to the bucket lock
982 * and update element in place
984 copy_map_value_locked(map
,
985 l_old
->key
+ round_up(key_size
, 8),
991 l_new
= alloc_htab_elem(htab
, key
, value
, key_size
, hash
, false, false,
994 /* all pre-allocated elements are in use or memory exhausted */
995 ret
= PTR_ERR(l_new
);
999 /* add new element to the head of the list, so that
1000 * concurrent search will find it before old elem
1002 hlist_nulls_add_head_rcu(&l_new
->hash_node
, head
);
1004 hlist_nulls_del_rcu(&l_old
->hash_node
);
1005 if (!htab_is_prealloc(htab
))
1006 free_htab_elem(htab
, l_old
);
1010 htab_unlock_bucket(htab
, b
, flags
);
1014 static int htab_lru_map_update_elem(struct bpf_map
*map
, void *key
, void *value
,
1017 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
1018 struct htab_elem
*l_new
, *l_old
= NULL
;
1019 struct hlist_nulls_head
*head
;
1020 unsigned long flags
;
1025 if (unlikely(map_flags
> BPF_EXIST
))
1029 WARN_ON_ONCE(!rcu_read_lock_held());
1031 key_size
= map
->key_size
;
1033 hash
= htab_map_hash(key
, key_size
, htab
->hashrnd
);
1035 b
= __select_bucket(htab
, hash
);
1038 /* For LRU, we need to alloc before taking bucket's
1039 * spinlock because getting free nodes from LRU may need
1040 * to remove older elements from htab and this removal
1041 * operation will need a bucket lock.
1043 l_new
= prealloc_lru_pop(htab
, key
, hash
);
1046 memcpy(l_new
->key
+ round_up(map
->key_size
, 8), value
, map
->value_size
);
1048 flags
= htab_lock_bucket(htab
, b
);
1050 l_old
= lookup_elem_raw(head
, hash
, key
, key_size
);
1052 ret
= check_flags(htab
, l_old
, map_flags
);
1056 /* add new element to the head of the list, so that
1057 * concurrent search will find it before old elem
1059 hlist_nulls_add_head_rcu(&l_new
->hash_node
, head
);
1061 bpf_lru_node_set_ref(&l_new
->lru_node
);
1062 hlist_nulls_del_rcu(&l_old
->hash_node
);
1067 htab_unlock_bucket(htab
, b
, flags
);
1070 bpf_lru_push_free(&htab
->lru
, &l_new
->lru_node
);
1072 bpf_lru_push_free(&htab
->lru
, &l_old
->lru_node
);
1077 static int __htab_percpu_map_update_elem(struct bpf_map
*map
, void *key
,
1078 void *value
, u64 map_flags
,
1081 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
1082 struct htab_elem
*l_new
= NULL
, *l_old
;
1083 struct hlist_nulls_head
*head
;
1084 unsigned long flags
;
1089 if (unlikely(map_flags
> BPF_EXIST
))
1093 WARN_ON_ONCE(!rcu_read_lock_held());
1095 key_size
= map
->key_size
;
1097 hash
= htab_map_hash(key
, key_size
, htab
->hashrnd
);
1099 b
= __select_bucket(htab
, hash
);
1102 flags
= htab_lock_bucket(htab
, b
);
1104 l_old
= lookup_elem_raw(head
, hash
, key
, key_size
);
1106 ret
= check_flags(htab
, l_old
, map_flags
);
1111 /* per-cpu hash map can update value in-place */
1112 pcpu_copy_value(htab
, htab_elem_get_ptr(l_old
, key_size
),
1115 l_new
= alloc_htab_elem(htab
, key
, value
, key_size
,
1116 hash
, true, onallcpus
, NULL
);
1117 if (IS_ERR(l_new
)) {
1118 ret
= PTR_ERR(l_new
);
1121 hlist_nulls_add_head_rcu(&l_new
->hash_node
, head
);
1125 htab_unlock_bucket(htab
, b
, flags
);
1129 static int __htab_lru_percpu_map_update_elem(struct bpf_map
*map
, void *key
,
1130 void *value
, u64 map_flags
,
1133 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
1134 struct htab_elem
*l_new
= NULL
, *l_old
;
1135 struct hlist_nulls_head
*head
;
1136 unsigned long flags
;
1141 if (unlikely(map_flags
> BPF_EXIST
))
1145 WARN_ON_ONCE(!rcu_read_lock_held());
1147 key_size
= map
->key_size
;
1149 hash
= htab_map_hash(key
, key_size
, htab
->hashrnd
);
1151 b
= __select_bucket(htab
, hash
);
1154 /* For LRU, we need to alloc before taking bucket's
1155 * spinlock because LRU's elem alloc may need
1156 * to remove older elem from htab and this removal
1157 * operation will need a bucket lock.
1159 if (map_flags
!= BPF_EXIST
) {
1160 l_new
= prealloc_lru_pop(htab
, key
, hash
);
1165 flags
= htab_lock_bucket(htab
, b
);
1167 l_old
= lookup_elem_raw(head
, hash
, key
, key_size
);
1169 ret
= check_flags(htab
, l_old
, map_flags
);
1174 bpf_lru_node_set_ref(&l_old
->lru_node
);
1176 /* per-cpu hash map can update value in-place */
1177 pcpu_copy_value(htab
, htab_elem_get_ptr(l_old
, key_size
),
1180 pcpu_copy_value(htab
, htab_elem_get_ptr(l_new
, key_size
),
1182 hlist_nulls_add_head_rcu(&l_new
->hash_node
, head
);
1187 htab_unlock_bucket(htab
, b
, flags
);
1189 bpf_lru_push_free(&htab
->lru
, &l_new
->lru_node
);
1193 static int htab_percpu_map_update_elem(struct bpf_map
*map
, void *key
,
1194 void *value
, u64 map_flags
)
1196 return __htab_percpu_map_update_elem(map
, key
, value
, map_flags
, false);
1199 static int htab_lru_percpu_map_update_elem(struct bpf_map
*map
, void *key
,
1200 void *value
, u64 map_flags
)
1202 return __htab_lru_percpu_map_update_elem(map
, key
, value
, map_flags
,
1206 /* Called from syscall or from eBPF program */
1207 static int htab_map_delete_elem(struct bpf_map
*map
, void *key
)
1209 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
1210 struct hlist_nulls_head
*head
;
1212 struct htab_elem
*l
;
1213 unsigned long flags
;
1217 WARN_ON_ONCE(!rcu_read_lock_held());
1219 key_size
= map
->key_size
;
1221 hash
= htab_map_hash(key
, key_size
, htab
->hashrnd
);
1222 b
= __select_bucket(htab
, hash
);
1225 flags
= htab_lock_bucket(htab
, b
);
1227 l
= lookup_elem_raw(head
, hash
, key
, key_size
);
1230 hlist_nulls_del_rcu(&l
->hash_node
);
1231 free_htab_elem(htab
, l
);
1235 htab_unlock_bucket(htab
, b
, flags
);
1239 static int htab_lru_map_delete_elem(struct bpf_map
*map
, void *key
)
1241 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
1242 struct hlist_nulls_head
*head
;
1244 struct htab_elem
*l
;
1245 unsigned long flags
;
1249 WARN_ON_ONCE(!rcu_read_lock_held());
1251 key_size
= map
->key_size
;
1253 hash
= htab_map_hash(key
, key_size
, htab
->hashrnd
);
1254 b
= __select_bucket(htab
, hash
);
1257 flags
= htab_lock_bucket(htab
, b
);
1259 l
= lookup_elem_raw(head
, hash
, key
, key_size
);
1262 hlist_nulls_del_rcu(&l
->hash_node
);
1266 htab_unlock_bucket(htab
, b
, flags
);
1268 bpf_lru_push_free(&htab
->lru
, &l
->lru_node
);
1272 static void delete_all_elements(struct bpf_htab
*htab
)
1276 for (i
= 0; i
< htab
->n_buckets
; i
++) {
1277 struct hlist_nulls_head
*head
= select_bucket(htab
, i
);
1278 struct hlist_nulls_node
*n
;
1279 struct htab_elem
*l
;
1281 hlist_nulls_for_each_entry_safe(l
, n
, head
, hash_node
) {
1282 hlist_nulls_del_rcu(&l
->hash_node
);
1283 htab_elem_free(htab
, l
);
1288 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
1289 static void htab_map_free(struct bpf_map
*map
)
1291 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
1293 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
1294 * so the programs (can be more than one that used this map) were
1295 * disconnected from events. Wait for outstanding critical sections in
1296 * these programs to complete
1300 /* some of free_htab_elem() callbacks for elements of this map may
1301 * not have executed. Wait for them.
1304 if (!htab_is_prealloc(htab
))
1305 delete_all_elements(htab
);
1307 prealloc_destroy(htab
);
1309 free_percpu(htab
->extra_elems
);
1310 bpf_map_area_free(htab
->buckets
);
1314 static void htab_map_seq_show_elem(struct bpf_map
*map
, void *key
,
1321 value
= htab_map_lookup_elem(map
, key
);
1327 btf_type_seq_show(map
->btf
, map
->btf_key_type_id
, key
, m
);
1329 btf_type_seq_show(map
->btf
, map
->btf_value_type_id
, value
, m
);
1336 __htab_map_lookup_and_delete_batch(struct bpf_map
*map
,
1337 const union bpf_attr
*attr
,
1338 union bpf_attr __user
*uattr
,
1339 bool do_delete
, bool is_lru_map
,
1342 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
1343 u32 bucket_cnt
, total
, key_size
, value_size
, roundup_key_size
;
1344 void *keys
= NULL
, *values
= NULL
, *value
, *dst_key
, *dst_val
;
1345 void __user
*uvalues
= u64_to_user_ptr(attr
->batch
.values
);
1346 void __user
*ukeys
= u64_to_user_ptr(attr
->batch
.keys
);
1347 void *ubatch
= u64_to_user_ptr(attr
->batch
.in_batch
);
1348 u32 batch
, max_count
, size
, bucket_size
;
1349 struct htab_elem
*node_to_free
= NULL
;
1350 u64 elem_map_flags
, map_flags
;
1351 struct hlist_nulls_head
*head
;
1352 struct hlist_nulls_node
*n
;
1353 unsigned long flags
= 0;
1354 bool locked
= false;
1355 struct htab_elem
*l
;
1359 elem_map_flags
= attr
->batch
.elem_flags
;
1360 if ((elem_map_flags
& ~BPF_F_LOCK
) ||
1361 ((elem_map_flags
& BPF_F_LOCK
) && !map_value_has_spin_lock(map
)))
1364 map_flags
= attr
->batch
.flags
;
1368 max_count
= attr
->batch
.count
;
1372 if (put_user(0, &uattr
->batch
.count
))
1376 if (ubatch
&& copy_from_user(&batch
, ubatch
, sizeof(batch
)))
1379 if (batch
>= htab
->n_buckets
)
1382 key_size
= htab
->map
.key_size
;
1383 roundup_key_size
= round_up(htab
->map
.key_size
, 8);
1384 value_size
= htab
->map
.value_size
;
1385 size
= round_up(value_size
, 8);
1387 value_size
= size
* num_possible_cpus();
1389 /* while experimenting with hash tables with sizes ranging from 10 to
1390 * 1000, it was observed that a bucket can have upto 5 entries.
1395 /* We cannot do copy_from_user or copy_to_user inside
1396 * the rcu_read_lock. Allocate enough space here.
1398 keys
= kvmalloc(key_size
* bucket_size
, GFP_USER
| __GFP_NOWARN
);
1399 values
= kvmalloc(value_size
* bucket_size
, GFP_USER
| __GFP_NOWARN
);
1400 if (!keys
|| !values
) {
1406 bpf_disable_instrumentation();
1411 b
= &htab
->buckets
[batch
];
1413 /* do not grab the lock unless need it (bucket_cnt > 0). */
1415 flags
= htab_lock_bucket(htab
, b
);
1418 hlist_nulls_for_each_entry_rcu(l
, n
, head
, hash_node
)
1421 if (bucket_cnt
&& !locked
) {
1426 if (bucket_cnt
> (max_count
- total
)) {
1429 /* Note that since bucket_cnt > 0 here, it is implicit
1430 * that the locked was grabbed, so release it.
1432 htab_unlock_bucket(htab
, b
, flags
);
1434 bpf_enable_instrumentation();
1438 if (bucket_cnt
> bucket_size
) {
1439 bucket_size
= bucket_cnt
;
1440 /* Note that since bucket_cnt > 0 here, it is implicit
1441 * that the locked was grabbed, so release it.
1443 htab_unlock_bucket(htab
, b
, flags
);
1445 bpf_enable_instrumentation();
1451 /* Next block is only safe to run if you have grabbed the lock */
1455 hlist_nulls_for_each_entry_safe(l
, n
, head
, hash_node
) {
1456 memcpy(dst_key
, l
->key
, key_size
);
1460 void __percpu
*pptr
;
1462 pptr
= htab_elem_get_ptr(l
, map
->key_size
);
1463 for_each_possible_cpu(cpu
) {
1464 bpf_long_memcpy(dst_val
+ off
,
1465 per_cpu_ptr(pptr
, cpu
), size
);
1469 value
= l
->key
+ roundup_key_size
;
1470 if (elem_map_flags
& BPF_F_LOCK
)
1471 copy_map_value_locked(map
, dst_val
, value
,
1474 copy_map_value(map
, dst_val
, value
);
1475 check_and_init_map_lock(map
, dst_val
);
1478 hlist_nulls_del_rcu(&l
->hash_node
);
1480 /* bpf_lru_push_free() will acquire lru_lock, which
1481 * may cause deadlock. See comments in function
1482 * prealloc_lru_pop(). Let us do bpf_lru_push_free()
1483 * after releasing the bucket lock.
1486 l
->batch_flink
= node_to_free
;
1489 free_htab_elem(htab
, l
);
1492 dst_key
+= key_size
;
1493 dst_val
+= value_size
;
1496 htab_unlock_bucket(htab
, b
, flags
);
1499 while (node_to_free
) {
1501 node_to_free
= node_to_free
->batch_flink
;
1502 bpf_lru_push_free(&htab
->lru
, &l
->lru_node
);
1506 /* If we are not copying data, we can go to next bucket and avoid
1507 * unlocking the rcu.
1509 if (!bucket_cnt
&& (batch
+ 1 < htab
->n_buckets
)) {
1515 bpf_enable_instrumentation();
1516 if (bucket_cnt
&& (copy_to_user(ukeys
+ total
* key_size
, keys
,
1517 key_size
* bucket_cnt
) ||
1518 copy_to_user(uvalues
+ total
* value_size
, values
,
1519 value_size
* bucket_cnt
))) {
1524 total
+= bucket_cnt
;
1526 if (batch
>= htab
->n_buckets
) {
1536 /* copy # of entries and next batch */
1537 ubatch
= u64_to_user_ptr(attr
->batch
.out_batch
);
1538 if (copy_to_user(ubatch
, &batch
, sizeof(batch
)) ||
1539 put_user(total
, &uattr
->batch
.count
))
1549 htab_percpu_map_lookup_batch(struct bpf_map
*map
, const union bpf_attr
*attr
,
1550 union bpf_attr __user
*uattr
)
1552 return __htab_map_lookup_and_delete_batch(map
, attr
, uattr
, false,
1557 htab_percpu_map_lookup_and_delete_batch(struct bpf_map
*map
,
1558 const union bpf_attr
*attr
,
1559 union bpf_attr __user
*uattr
)
1561 return __htab_map_lookup_and_delete_batch(map
, attr
, uattr
, true,
1566 htab_map_lookup_batch(struct bpf_map
*map
, const union bpf_attr
*attr
,
1567 union bpf_attr __user
*uattr
)
1569 return __htab_map_lookup_and_delete_batch(map
, attr
, uattr
, false,
1574 htab_map_lookup_and_delete_batch(struct bpf_map
*map
,
1575 const union bpf_attr
*attr
,
1576 union bpf_attr __user
*uattr
)
1578 return __htab_map_lookup_and_delete_batch(map
, attr
, uattr
, true,
1583 htab_lru_percpu_map_lookup_batch(struct bpf_map
*map
,
1584 const union bpf_attr
*attr
,
1585 union bpf_attr __user
*uattr
)
1587 return __htab_map_lookup_and_delete_batch(map
, attr
, uattr
, false,
1592 htab_lru_percpu_map_lookup_and_delete_batch(struct bpf_map
*map
,
1593 const union bpf_attr
*attr
,
1594 union bpf_attr __user
*uattr
)
1596 return __htab_map_lookup_and_delete_batch(map
, attr
, uattr
, true,
1601 htab_lru_map_lookup_batch(struct bpf_map
*map
, const union bpf_attr
*attr
,
1602 union bpf_attr __user
*uattr
)
1604 return __htab_map_lookup_and_delete_batch(map
, attr
, uattr
, false,
1609 htab_lru_map_lookup_and_delete_batch(struct bpf_map
*map
,
1610 const union bpf_attr
*attr
,
1611 union bpf_attr __user
*uattr
)
1613 return __htab_map_lookup_and_delete_batch(map
, attr
, uattr
, true,
1617 const struct bpf_map_ops htab_map_ops
= {
1618 .map_alloc_check
= htab_map_alloc_check
,
1619 .map_alloc
= htab_map_alloc
,
1620 .map_free
= htab_map_free
,
1621 .map_get_next_key
= htab_map_get_next_key
,
1622 .map_lookup_elem
= htab_map_lookup_elem
,
1623 .map_update_elem
= htab_map_update_elem
,
1624 .map_delete_elem
= htab_map_delete_elem
,
1625 .map_gen_lookup
= htab_map_gen_lookup
,
1626 .map_seq_show_elem
= htab_map_seq_show_elem
,
1630 const struct bpf_map_ops htab_lru_map_ops
= {
1631 .map_alloc_check
= htab_map_alloc_check
,
1632 .map_alloc
= htab_map_alloc
,
1633 .map_free
= htab_map_free
,
1634 .map_get_next_key
= htab_map_get_next_key
,
1635 .map_lookup_elem
= htab_lru_map_lookup_elem
,
1636 .map_lookup_elem_sys_only
= htab_lru_map_lookup_elem_sys
,
1637 .map_update_elem
= htab_lru_map_update_elem
,
1638 .map_delete_elem
= htab_lru_map_delete_elem
,
1639 .map_gen_lookup
= htab_lru_map_gen_lookup
,
1640 .map_seq_show_elem
= htab_map_seq_show_elem
,
1641 BATCH_OPS(htab_lru
),
1644 /* Called from eBPF program */
1645 static void *htab_percpu_map_lookup_elem(struct bpf_map
*map
, void *key
)
1647 struct htab_elem
*l
= __htab_map_lookup_elem(map
, key
);
1650 return this_cpu_ptr(htab_elem_get_ptr(l
, map
->key_size
));
1655 static void *htab_lru_percpu_map_lookup_elem(struct bpf_map
*map
, void *key
)
1657 struct htab_elem
*l
= __htab_map_lookup_elem(map
, key
);
1660 bpf_lru_node_set_ref(&l
->lru_node
);
1661 return this_cpu_ptr(htab_elem_get_ptr(l
, map
->key_size
));
1667 int bpf_percpu_hash_copy(struct bpf_map
*map
, void *key
, void *value
)
1669 struct htab_elem
*l
;
1670 void __percpu
*pptr
;
1675 /* per_cpu areas are zero-filled and bpf programs can only
1676 * access 'value_size' of them, so copying rounded areas
1677 * will not leak any kernel data
1679 size
= round_up(map
->value_size
, 8);
1681 l
= __htab_map_lookup_elem(map
, key
);
1684 /* We do not mark LRU map element here in order to not mess up
1685 * eviction heuristics when user space does a map walk.
1687 pptr
= htab_elem_get_ptr(l
, map
->key_size
);
1688 for_each_possible_cpu(cpu
) {
1689 bpf_long_memcpy(value
+ off
,
1690 per_cpu_ptr(pptr
, cpu
), size
);
1699 int bpf_percpu_hash_update(struct bpf_map
*map
, void *key
, void *value
,
1702 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
1706 if (htab_is_lru(htab
))
1707 ret
= __htab_lru_percpu_map_update_elem(map
, key
, value
,
1710 ret
= __htab_percpu_map_update_elem(map
, key
, value
, map_flags
,
1717 static void htab_percpu_map_seq_show_elem(struct bpf_map
*map
, void *key
,
1720 struct htab_elem
*l
;
1721 void __percpu
*pptr
;
1726 l
= __htab_map_lookup_elem(map
, key
);
1732 btf_type_seq_show(map
->btf
, map
->btf_key_type_id
, key
, m
);
1733 seq_puts(m
, ": {\n");
1734 pptr
= htab_elem_get_ptr(l
, map
->key_size
);
1735 for_each_possible_cpu(cpu
) {
1736 seq_printf(m
, "\tcpu%d: ", cpu
);
1737 btf_type_seq_show(map
->btf
, map
->btf_value_type_id
,
1738 per_cpu_ptr(pptr
, cpu
), m
);
1746 const struct bpf_map_ops htab_percpu_map_ops
= {
1747 .map_alloc_check
= htab_map_alloc_check
,
1748 .map_alloc
= htab_map_alloc
,
1749 .map_free
= htab_map_free
,
1750 .map_get_next_key
= htab_map_get_next_key
,
1751 .map_lookup_elem
= htab_percpu_map_lookup_elem
,
1752 .map_update_elem
= htab_percpu_map_update_elem
,
1753 .map_delete_elem
= htab_map_delete_elem
,
1754 .map_seq_show_elem
= htab_percpu_map_seq_show_elem
,
1755 BATCH_OPS(htab_percpu
),
1758 const struct bpf_map_ops htab_lru_percpu_map_ops
= {
1759 .map_alloc_check
= htab_map_alloc_check
,
1760 .map_alloc
= htab_map_alloc
,
1761 .map_free
= htab_map_free
,
1762 .map_get_next_key
= htab_map_get_next_key
,
1763 .map_lookup_elem
= htab_lru_percpu_map_lookup_elem
,
1764 .map_update_elem
= htab_lru_percpu_map_update_elem
,
1765 .map_delete_elem
= htab_lru_map_delete_elem
,
1766 .map_seq_show_elem
= htab_percpu_map_seq_show_elem
,
1767 BATCH_OPS(htab_lru_percpu
),
1770 static int fd_htab_map_alloc_check(union bpf_attr
*attr
)
1772 if (attr
->value_size
!= sizeof(u32
))
1774 return htab_map_alloc_check(attr
);
1777 static void fd_htab_map_free(struct bpf_map
*map
)
1779 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
1780 struct hlist_nulls_node
*n
;
1781 struct hlist_nulls_head
*head
;
1782 struct htab_elem
*l
;
1785 for (i
= 0; i
< htab
->n_buckets
; i
++) {
1786 head
= select_bucket(htab
, i
);
1788 hlist_nulls_for_each_entry_safe(l
, n
, head
, hash_node
) {
1789 void *ptr
= fd_htab_map_get_ptr(map
, l
);
1791 map
->ops
->map_fd_put_ptr(ptr
);
1798 /* only called from syscall */
1799 int bpf_fd_htab_map_lookup_elem(struct bpf_map
*map
, void *key
, u32
*value
)
1804 if (!map
->ops
->map_fd_sys_lookup_elem
)
1808 ptr
= htab_map_lookup_elem(map
, key
);
1810 *value
= map
->ops
->map_fd_sys_lookup_elem(READ_ONCE(*ptr
));
1818 /* only called from syscall */
1819 int bpf_fd_htab_map_update_elem(struct bpf_map
*map
, struct file
*map_file
,
1820 void *key
, void *value
, u64 map_flags
)
1824 u32 ufd
= *(u32
*)value
;
1826 ptr
= map
->ops
->map_fd_get_ptr(map
, map_file
, ufd
);
1828 return PTR_ERR(ptr
);
1830 ret
= htab_map_update_elem(map
, key
, &ptr
, map_flags
);
1832 map
->ops
->map_fd_put_ptr(ptr
);
1837 static struct bpf_map
*htab_of_map_alloc(union bpf_attr
*attr
)
1839 struct bpf_map
*map
, *inner_map_meta
;
1841 inner_map_meta
= bpf_map_meta_alloc(attr
->inner_map_fd
);
1842 if (IS_ERR(inner_map_meta
))
1843 return inner_map_meta
;
1845 map
= htab_map_alloc(attr
);
1847 bpf_map_meta_free(inner_map_meta
);
1851 map
->inner_map_meta
= inner_map_meta
;
1856 static void *htab_of_map_lookup_elem(struct bpf_map
*map
, void *key
)
1858 struct bpf_map
**inner_map
= htab_map_lookup_elem(map
, key
);
1863 return READ_ONCE(*inner_map
);
1866 static u32
htab_of_map_gen_lookup(struct bpf_map
*map
,
1867 struct bpf_insn
*insn_buf
)
1869 struct bpf_insn
*insn
= insn_buf
;
1870 const int ret
= BPF_REG_0
;
1872 BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem
,
1873 (void *(*)(struct bpf_map
*map
, void *key
))NULL
));
1874 *insn
++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem
));
1875 *insn
++ = BPF_JMP_IMM(BPF_JEQ
, ret
, 0, 2);
1876 *insn
++ = BPF_ALU64_IMM(BPF_ADD
, ret
,
1877 offsetof(struct htab_elem
, key
) +
1878 round_up(map
->key_size
, 8));
1879 *insn
++ = BPF_LDX_MEM(BPF_DW
, ret
, ret
, 0);
1881 return insn
- insn_buf
;
1884 static void htab_of_map_free(struct bpf_map
*map
)
1886 bpf_map_meta_free(map
->inner_map_meta
);
1887 fd_htab_map_free(map
);
1890 const struct bpf_map_ops htab_of_maps_map_ops
= {
1891 .map_alloc_check
= fd_htab_map_alloc_check
,
1892 .map_alloc
= htab_of_map_alloc
,
1893 .map_free
= htab_of_map_free
,
1894 .map_get_next_key
= htab_map_get_next_key
,
1895 .map_lookup_elem
= htab_of_map_lookup_elem
,
1896 .map_delete_elem
= htab_map_delete_elem
,
1897 .map_fd_get_ptr
= bpf_map_fd_get_ptr
,
1898 .map_fd_put_ptr
= bpf_map_fd_put_ptr
,
1899 .map_fd_sys_lookup_elem
= bpf_map_fd_sys_lookup_elem
,
1900 .map_gen_lookup
= htab_of_map_gen_lookup
,
1901 .map_check_btf
= map_check_no_btf
,