1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 * Copyright (c) 2016 Facebook
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/bpf.h>
14 #include <linux/jhash.h>
15 #include <linux/filter.h>
16 #include <linux/vmalloc.h>
17 #include "percpu_freelist.h"
18 #include "bpf_lru_list.h"
21 struct hlist_head head
;
27 struct bucket
*buckets
;
30 struct pcpu_freelist freelist
;
33 void __percpu
*extra_elems
;
34 atomic_t count
; /* number of elements in this hashtable */
35 u32 n_buckets
; /* number of hash buckets */
36 u32 elem_size
; /* size of each element in bytes */
39 enum extra_elem_state
{
40 HTAB_NOT_AN_EXTRA_ELEM
= 0,
45 /* each htab element is struct htab_elem + key + value */
48 struct hlist_node hash_node
;
49 struct bpf_htab
*htab
;
50 struct pcpu_freelist_node fnode
;
54 enum extra_elem_state state
;
55 struct bpf_lru_node lru_node
;
58 char key
[0] __aligned(8);
61 static bool htab_lru_map_delete_node(void *arg
, struct bpf_lru_node
*node
);
63 static bool htab_is_lru(const struct bpf_htab
*htab
)
65 return htab
->map
.map_type
== BPF_MAP_TYPE_LRU_HASH
||
66 htab
->map
.map_type
== BPF_MAP_TYPE_LRU_PERCPU_HASH
;
69 static bool htab_is_percpu(const struct bpf_htab
*htab
)
71 return htab
->map
.map_type
== BPF_MAP_TYPE_PERCPU_HASH
||
72 htab
->map
.map_type
== BPF_MAP_TYPE_LRU_PERCPU_HASH
;
75 static inline void htab_elem_set_ptr(struct htab_elem
*l
, u32 key_size
,
78 *(void __percpu
**)(l
->key
+ key_size
) = pptr
;
81 static inline void __percpu
*htab_elem_get_ptr(struct htab_elem
*l
, u32 key_size
)
83 return *(void __percpu
**)(l
->key
+ key_size
);
86 static struct htab_elem
*get_htab_elem(struct bpf_htab
*htab
, int i
)
88 return (struct htab_elem
*) (htab
->elems
+ i
* htab
->elem_size
);
91 static void htab_free_elems(struct bpf_htab
*htab
)
95 if (!htab_is_percpu(htab
))
98 for (i
= 0; i
< htab
->map
.max_entries
; i
++) {
101 pptr
= htab_elem_get_ptr(get_htab_elem(htab
, i
),
109 static struct htab_elem
*prealloc_lru_pop(struct bpf_htab
*htab
, void *key
,
112 struct bpf_lru_node
*node
= bpf_lru_pop_free(&htab
->lru
, hash
);
116 l
= container_of(node
, struct htab_elem
, lru_node
);
117 memcpy(l
->key
, key
, htab
->map
.key_size
);
124 static int prealloc_init(struct bpf_htab
*htab
)
126 int err
= -ENOMEM
, i
;
128 htab
->elems
= vzalloc(htab
->elem_size
* htab
->map
.max_entries
);
132 if (!htab_is_percpu(htab
))
133 goto skip_percpu_elems
;
135 for (i
= 0; i
< htab
->map
.max_entries
; i
++) {
136 u32 size
= round_up(htab
->map
.value_size
, 8);
139 pptr
= __alloc_percpu_gfp(size
, 8, GFP_USER
| __GFP_NOWARN
);
142 htab_elem_set_ptr(get_htab_elem(htab
, i
), htab
->map
.key_size
,
147 if (htab_is_lru(htab
))
148 err
= bpf_lru_init(&htab
->lru
,
149 htab
->map
.map_flags
& BPF_F_NO_COMMON_LRU
,
150 offsetof(struct htab_elem
, hash
) -
151 offsetof(struct htab_elem
, lru_node
),
152 htab_lru_map_delete_node
,
155 err
= pcpu_freelist_init(&htab
->freelist
);
160 if (htab_is_lru(htab
))
161 bpf_lru_populate(&htab
->lru
, htab
->elems
,
162 offsetof(struct htab_elem
, lru_node
),
163 htab
->elem_size
, htab
->map
.max_entries
);
165 pcpu_freelist_populate(&htab
->freelist
, htab
->elems
,
166 htab
->elem_size
, htab
->map
.max_entries
);
171 htab_free_elems(htab
);
175 static void prealloc_destroy(struct bpf_htab
*htab
)
177 htab_free_elems(htab
);
179 if (htab_is_lru(htab
))
180 bpf_lru_destroy(&htab
->lru
);
182 pcpu_freelist_destroy(&htab
->freelist
);
185 static int alloc_extra_elems(struct bpf_htab
*htab
)
190 pptr
= __alloc_percpu_gfp(htab
->elem_size
, 8, GFP_USER
| __GFP_NOWARN
);
194 for_each_possible_cpu(cpu
) {
195 ((struct htab_elem
*)per_cpu_ptr(pptr
, cpu
))->state
=
196 HTAB_EXTRA_ELEM_FREE
;
198 htab
->extra_elems
= pptr
;
202 /* Called from syscall */
203 static struct bpf_map
*htab_map_alloc(union bpf_attr
*attr
)
205 bool percpu
= (attr
->map_type
== BPF_MAP_TYPE_PERCPU_HASH
||
206 attr
->map_type
== BPF_MAP_TYPE_LRU_PERCPU_HASH
);
207 bool lru
= (attr
->map_type
== BPF_MAP_TYPE_LRU_HASH
||
208 attr
->map_type
== BPF_MAP_TYPE_LRU_PERCPU_HASH
);
209 /* percpu_lru means each cpu has its own LRU list.
210 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
211 * the map's value itself is percpu. percpu_lru has
212 * nothing to do with the map's value.
214 bool percpu_lru
= (attr
->map_flags
& BPF_F_NO_COMMON_LRU
);
215 bool prealloc
= !(attr
->map_flags
& BPF_F_NO_PREALLOC
);
216 struct bpf_htab
*htab
;
220 if (lru
&& !capable(CAP_SYS_ADMIN
))
221 /* LRU implementation is much complicated than other
222 * maps. Hence, limit to CAP_SYS_ADMIN for now.
224 return ERR_PTR(-EPERM
);
226 if (attr
->map_flags
& ~(BPF_F_NO_PREALLOC
| BPF_F_NO_COMMON_LRU
))
227 /* reserved bits should not be used */
228 return ERR_PTR(-EINVAL
);
230 if (!lru
&& percpu_lru
)
231 return ERR_PTR(-EINVAL
);
233 if (lru
&& !prealloc
)
234 return ERR_PTR(-ENOTSUPP
);
236 htab
= kzalloc(sizeof(*htab
), GFP_USER
);
238 return ERR_PTR(-ENOMEM
);
240 /* mandatory map attributes */
241 htab
->map
.map_type
= attr
->map_type
;
242 htab
->map
.key_size
= attr
->key_size
;
243 htab
->map
.value_size
= attr
->value_size
;
244 htab
->map
.max_entries
= attr
->max_entries
;
245 htab
->map
.map_flags
= attr
->map_flags
;
247 /* check sanity of attributes.
248 * value_size == 0 may be allowed in the future to use map as a set
251 if (htab
->map
.max_entries
== 0 || htab
->map
.key_size
== 0 ||
252 htab
->map
.value_size
== 0)
256 /* ensure each CPU's lru list has >=1 elements.
257 * since we are at it, make each lru list has the same
258 * number of elements.
260 htab
->map
.max_entries
= roundup(attr
->max_entries
,
261 num_possible_cpus());
262 if (htab
->map
.max_entries
< attr
->max_entries
)
263 htab
->map
.max_entries
= rounddown(attr
->max_entries
,
264 num_possible_cpus());
267 /* hash table size must be power of 2 */
268 htab
->n_buckets
= roundup_pow_of_two(htab
->map
.max_entries
);
271 if (htab
->map
.key_size
> MAX_BPF_STACK
)
272 /* eBPF programs initialize keys on stack, so they cannot be
273 * larger than max stack size
277 if (htab
->map
.value_size
>= (1 << (KMALLOC_SHIFT_MAX
- 1)) -
278 MAX_BPF_STACK
- sizeof(struct htab_elem
))
279 /* if value_size is bigger, the user space won't be able to
280 * access the elements via bpf syscall. This check also makes
281 * sure that the elem_size doesn't overflow and it's
282 * kmalloc-able later in htab_map_update_elem()
286 if (percpu
&& round_up(htab
->map
.value_size
, 8) > PCPU_MIN_UNIT_SIZE
)
287 /* make sure the size for pcpu_alloc() is reasonable */
290 htab
->elem_size
= sizeof(struct htab_elem
) +
291 round_up(htab
->map
.key_size
, 8);
293 htab
->elem_size
+= sizeof(void *);
295 htab
->elem_size
+= round_up(htab
->map
.value_size
, 8);
297 /* prevent zero size kmalloc and check for u32 overflow */
298 if (htab
->n_buckets
== 0 ||
299 htab
->n_buckets
> U32_MAX
/ sizeof(struct bucket
))
302 cost
= (u64
) htab
->n_buckets
* sizeof(struct bucket
) +
303 (u64
) htab
->elem_size
* htab
->map
.max_entries
;
306 cost
+= (u64
) round_up(htab
->map
.value_size
, 8) *
307 num_possible_cpus() * htab
->map
.max_entries
;
309 cost
+= (u64
) htab
->elem_size
* num_possible_cpus();
311 if (cost
>= U32_MAX
- PAGE_SIZE
)
312 /* make sure page count doesn't overflow */
315 htab
->map
.pages
= round_up(cost
, PAGE_SIZE
) >> PAGE_SHIFT
;
317 /* if map size is larger than memlock limit, reject it early */
318 err
= bpf_map_precharge_memlock(htab
->map
.pages
);
323 htab
->buckets
= kmalloc_array(htab
->n_buckets
, sizeof(struct bucket
),
324 GFP_USER
| __GFP_NOWARN
);
326 if (!htab
->buckets
) {
327 htab
->buckets
= vmalloc(htab
->n_buckets
* sizeof(struct bucket
));
332 for (i
= 0; i
< htab
->n_buckets
; i
++) {
333 INIT_HLIST_HEAD(&htab
->buckets
[i
].head
);
334 raw_spin_lock_init(&htab
->buckets
[i
].lock
);
337 if (!percpu
&& !lru
) {
338 /* lru itself can remove the least used element, so
339 * there is no need for an extra elem during map_update.
341 err
= alloc_extra_elems(htab
);
347 err
= prealloc_init(htab
);
349 goto free_extra_elems
;
355 free_percpu(htab
->extra_elems
);
357 kvfree(htab
->buckets
);
363 static inline u32
htab_map_hash(const void *key
, u32 key_len
)
365 return jhash(key
, key_len
, 0);
368 static inline struct bucket
*__select_bucket(struct bpf_htab
*htab
, u32 hash
)
370 return &htab
->buckets
[hash
& (htab
->n_buckets
- 1)];
373 static inline struct hlist_head
*select_bucket(struct bpf_htab
*htab
, u32 hash
)
375 return &__select_bucket(htab
, hash
)->head
;
378 static struct htab_elem
*lookup_elem_raw(struct hlist_head
*head
, u32 hash
,
379 void *key
, u32 key_size
)
383 hlist_for_each_entry_rcu(l
, head
, hash_node
)
384 if (l
->hash
== hash
&& !memcmp(&l
->key
, key
, key_size
))
390 /* Called from syscall or from eBPF program */
391 static void *__htab_map_lookup_elem(struct bpf_map
*map
, void *key
)
393 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
394 struct hlist_head
*head
;
398 /* Must be called with rcu_read_lock. */
399 WARN_ON_ONCE(!rcu_read_lock_held());
401 key_size
= map
->key_size
;
403 hash
= htab_map_hash(key
, key_size
);
405 head
= select_bucket(htab
, hash
);
407 l
= lookup_elem_raw(head
, hash
, key
, key_size
);
412 static void *htab_map_lookup_elem(struct bpf_map
*map
, void *key
)
414 struct htab_elem
*l
= __htab_map_lookup_elem(map
, key
);
417 return l
->key
+ round_up(map
->key_size
, 8);
422 static void *htab_lru_map_lookup_elem(struct bpf_map
*map
, void *key
)
424 struct htab_elem
*l
= __htab_map_lookup_elem(map
, key
);
427 bpf_lru_node_set_ref(&l
->lru_node
);
428 return l
->key
+ round_up(map
->key_size
, 8);
434 /* It is called from the bpf_lru_list when the LRU needs to delete
435 * older elements from the htab.
437 static bool htab_lru_map_delete_node(void *arg
, struct bpf_lru_node
*node
)
439 struct bpf_htab
*htab
= (struct bpf_htab
*)arg
;
440 struct htab_elem
*l
, *tgt_l
;
441 struct hlist_head
*head
;
445 tgt_l
= container_of(node
, struct htab_elem
, lru_node
);
446 b
= __select_bucket(htab
, tgt_l
->hash
);
449 raw_spin_lock_irqsave(&b
->lock
, flags
);
451 hlist_for_each_entry_rcu(l
, head
, hash_node
)
453 hlist_del_rcu(&l
->hash_node
);
457 raw_spin_unlock_irqrestore(&b
->lock
, flags
);
462 /* Called from syscall */
463 static int htab_map_get_next_key(struct bpf_map
*map
, void *key
, void *next_key
)
465 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
466 struct hlist_head
*head
;
467 struct htab_elem
*l
, *next_l
;
471 WARN_ON_ONCE(!rcu_read_lock_held());
473 key_size
= map
->key_size
;
475 hash
= htab_map_hash(key
, key_size
);
477 head
= select_bucket(htab
, hash
);
480 l
= lookup_elem_raw(head
, hash
, key
, key_size
);
484 goto find_first_elem
;
487 /* key was found, get next key in the same bucket */
488 next_l
= hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&l
->hash_node
)),
489 struct htab_elem
, hash_node
);
492 /* if next elem in this hash list is non-zero, just return it */
493 memcpy(next_key
, next_l
->key
, key_size
);
497 /* no more elements in this hash list, go to the next bucket */
498 i
= hash
& (htab
->n_buckets
- 1);
502 /* iterate over buckets */
503 for (; i
< htab
->n_buckets
; i
++) {
504 head
= select_bucket(htab
, i
);
506 /* pick first element in the bucket */
507 next_l
= hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head
)),
508 struct htab_elem
, hash_node
);
510 /* if it's not empty, just return it */
511 memcpy(next_key
, next_l
->key
, key_size
);
516 /* iterated over all buckets and all elements */
520 static void htab_elem_free(struct bpf_htab
*htab
, struct htab_elem
*l
)
522 if (htab
->map
.map_type
== BPF_MAP_TYPE_PERCPU_HASH
)
523 free_percpu(htab_elem_get_ptr(l
, htab
->map
.key_size
));
527 static void htab_elem_free_rcu(struct rcu_head
*head
)
529 struct htab_elem
*l
= container_of(head
, struct htab_elem
, rcu
);
530 struct bpf_htab
*htab
= l
->htab
;
532 /* must increment bpf_prog_active to avoid kprobe+bpf triggering while
533 * we're calling kfree, otherwise deadlock is possible if kprobes
534 * are placed somewhere inside of slub
537 __this_cpu_inc(bpf_prog_active
);
538 htab_elem_free(htab
, l
);
539 __this_cpu_dec(bpf_prog_active
);
543 static void free_htab_elem(struct bpf_htab
*htab
, struct htab_elem
*l
)
545 if (l
->state
== HTAB_EXTRA_ELEM_USED
) {
546 l
->state
= HTAB_EXTRA_ELEM_FREE
;
550 if (!(htab
->map
.map_flags
& BPF_F_NO_PREALLOC
)) {
551 pcpu_freelist_push(&htab
->freelist
, &l
->fnode
);
553 atomic_dec(&htab
->count
);
555 call_rcu(&l
->rcu
, htab_elem_free_rcu
);
559 static void pcpu_copy_value(struct bpf_htab
*htab
, void __percpu
*pptr
,
560 void *value
, bool onallcpus
)
563 /* copy true value_size bytes */
564 memcpy(this_cpu_ptr(pptr
), value
, htab
->map
.value_size
);
566 u32 size
= round_up(htab
->map
.value_size
, 8);
569 for_each_possible_cpu(cpu
) {
570 bpf_long_memcpy(per_cpu_ptr(pptr
, cpu
),
577 static struct htab_elem
*alloc_htab_elem(struct bpf_htab
*htab
, void *key
,
578 void *value
, u32 key_size
, u32 hash
,
579 bool percpu
, bool onallcpus
,
580 bool old_elem_exists
)
582 u32 size
= htab
->map
.value_size
;
583 bool prealloc
= !(htab
->map
.map_flags
& BPF_F_NO_PREALLOC
);
584 struct htab_elem
*l_new
;
589 l_new
= (struct htab_elem
*)pcpu_freelist_pop(&htab
->freelist
);
593 if (atomic_inc_return(&htab
->count
) > htab
->map
.max_entries
) {
594 atomic_dec(&htab
->count
);
597 l_new
= kmalloc(htab
->elem_size
,
598 GFP_ATOMIC
| __GFP_NOWARN
);
600 return ERR_PTR(-ENOMEM
);
605 if (!old_elem_exists
)
608 /* if we're updating the existing element and the hash table
609 * is full, use per-cpu extra elems
611 l_new
= this_cpu_ptr(htab
->extra_elems
);
612 if (l_new
->state
!= HTAB_EXTRA_ELEM_FREE
)
613 return ERR_PTR(-E2BIG
);
614 l_new
->state
= HTAB_EXTRA_ELEM_USED
;
616 l_new
->state
= HTAB_NOT_AN_EXTRA_ELEM
;
619 memcpy(l_new
->key
, key
, key_size
);
621 /* round up value_size to 8 bytes */
622 size
= round_up(size
, 8);
625 pptr
= htab_elem_get_ptr(l_new
, key_size
);
627 /* alloc_percpu zero-fills */
628 pptr
= __alloc_percpu_gfp(size
, 8,
629 GFP_ATOMIC
| __GFP_NOWARN
);
632 return ERR_PTR(-ENOMEM
);
636 pcpu_copy_value(htab
, pptr
, value
, onallcpus
);
639 htab_elem_set_ptr(l_new
, key_size
, pptr
);
641 memcpy(l_new
->key
+ round_up(key_size
, 8), value
, size
);
648 static int check_flags(struct bpf_htab
*htab
, struct htab_elem
*l_old
,
651 if (l_old
&& map_flags
== BPF_NOEXIST
)
652 /* elem already exists */
655 if (!l_old
&& map_flags
== BPF_EXIST
)
656 /* elem doesn't exist, cannot update it */
662 /* Called from syscall or from eBPF program */
663 static int htab_map_update_elem(struct bpf_map
*map
, void *key
, void *value
,
666 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
667 struct htab_elem
*l_new
= NULL
, *l_old
;
668 struct hlist_head
*head
;
674 if (unlikely(map_flags
> BPF_EXIST
))
678 WARN_ON_ONCE(!rcu_read_lock_held());
680 key_size
= map
->key_size
;
682 hash
= htab_map_hash(key
, key_size
);
684 b
= __select_bucket(htab
, hash
);
687 /* bpf_map_update_elem() can be called in_irq() */
688 raw_spin_lock_irqsave(&b
->lock
, flags
);
690 l_old
= lookup_elem_raw(head
, hash
, key
, key_size
);
692 ret
= check_flags(htab
, l_old
, map_flags
);
696 l_new
= alloc_htab_elem(htab
, key
, value
, key_size
, hash
, false, false,
699 /* all pre-allocated elements are in use or memory exhausted */
700 ret
= PTR_ERR(l_new
);
704 /* add new element to the head of the list, so that
705 * concurrent search will find it before old elem
707 hlist_add_head_rcu(&l_new
->hash_node
, head
);
709 hlist_del_rcu(&l_old
->hash_node
);
710 free_htab_elem(htab
, l_old
);
714 raw_spin_unlock_irqrestore(&b
->lock
, flags
);
718 static int htab_lru_map_update_elem(struct bpf_map
*map
, void *key
, void *value
,
721 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
722 struct htab_elem
*l_new
, *l_old
= NULL
;
723 struct hlist_head
*head
;
729 if (unlikely(map_flags
> BPF_EXIST
))
733 WARN_ON_ONCE(!rcu_read_lock_held());
735 key_size
= map
->key_size
;
737 hash
= htab_map_hash(key
, key_size
);
739 b
= __select_bucket(htab
, hash
);
742 /* For LRU, we need to alloc before taking bucket's
743 * spinlock because getting free nodes from LRU may need
744 * to remove older elements from htab and this removal
745 * operation will need a bucket lock.
747 l_new
= prealloc_lru_pop(htab
, key
, hash
);
750 memcpy(l_new
->key
+ round_up(map
->key_size
, 8), value
, map
->value_size
);
752 /* bpf_map_update_elem() can be called in_irq() */
753 raw_spin_lock_irqsave(&b
->lock
, flags
);
755 l_old
= lookup_elem_raw(head
, hash
, key
, key_size
);
757 ret
= check_flags(htab
, l_old
, map_flags
);
761 /* add new element to the head of the list, so that
762 * concurrent search will find it before old elem
764 hlist_add_head_rcu(&l_new
->hash_node
, head
);
766 bpf_lru_node_set_ref(&l_new
->lru_node
);
767 hlist_del_rcu(&l_old
->hash_node
);
772 raw_spin_unlock_irqrestore(&b
->lock
, flags
);
775 bpf_lru_push_free(&htab
->lru
, &l_new
->lru_node
);
777 bpf_lru_push_free(&htab
->lru
, &l_old
->lru_node
);
782 static int __htab_percpu_map_update_elem(struct bpf_map
*map
, void *key
,
783 void *value
, u64 map_flags
,
786 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
787 struct htab_elem
*l_new
= NULL
, *l_old
;
788 struct hlist_head
*head
;
794 if (unlikely(map_flags
> BPF_EXIST
))
798 WARN_ON_ONCE(!rcu_read_lock_held());
800 key_size
= map
->key_size
;
802 hash
= htab_map_hash(key
, key_size
);
804 b
= __select_bucket(htab
, hash
);
807 /* bpf_map_update_elem() can be called in_irq() */
808 raw_spin_lock_irqsave(&b
->lock
, flags
);
810 l_old
= lookup_elem_raw(head
, hash
, key
, key_size
);
812 ret
= check_flags(htab
, l_old
, map_flags
);
817 /* per-cpu hash map can update value in-place */
818 pcpu_copy_value(htab
, htab_elem_get_ptr(l_old
, key_size
),
821 l_new
= alloc_htab_elem(htab
, key
, value
, key_size
,
822 hash
, true, onallcpus
, false);
824 ret
= PTR_ERR(l_new
);
827 hlist_add_head_rcu(&l_new
->hash_node
, head
);
831 raw_spin_unlock_irqrestore(&b
->lock
, flags
);
835 static int __htab_lru_percpu_map_update_elem(struct bpf_map
*map
, void *key
,
836 void *value
, u64 map_flags
,
839 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
840 struct htab_elem
*l_new
= NULL
, *l_old
;
841 struct hlist_head
*head
;
847 if (unlikely(map_flags
> BPF_EXIST
))
851 WARN_ON_ONCE(!rcu_read_lock_held());
853 key_size
= map
->key_size
;
855 hash
= htab_map_hash(key
, key_size
);
857 b
= __select_bucket(htab
, hash
);
860 /* For LRU, we need to alloc before taking bucket's
861 * spinlock because LRU's elem alloc may need
862 * to remove older elem from htab and this removal
863 * operation will need a bucket lock.
865 if (map_flags
!= BPF_EXIST
) {
866 l_new
= prealloc_lru_pop(htab
, key
, hash
);
871 /* bpf_map_update_elem() can be called in_irq() */
872 raw_spin_lock_irqsave(&b
->lock
, flags
);
874 l_old
= lookup_elem_raw(head
, hash
, key
, key_size
);
876 ret
= check_flags(htab
, l_old
, map_flags
);
881 bpf_lru_node_set_ref(&l_old
->lru_node
);
883 /* per-cpu hash map can update value in-place */
884 pcpu_copy_value(htab
, htab_elem_get_ptr(l_old
, key_size
),
887 pcpu_copy_value(htab
, htab_elem_get_ptr(l_new
, key_size
),
889 hlist_add_head_rcu(&l_new
->hash_node
, head
);
894 raw_spin_unlock_irqrestore(&b
->lock
, flags
);
896 bpf_lru_push_free(&htab
->lru
, &l_new
->lru_node
);
900 static int htab_percpu_map_update_elem(struct bpf_map
*map
, void *key
,
901 void *value
, u64 map_flags
)
903 return __htab_percpu_map_update_elem(map
, key
, value
, map_flags
, false);
906 static int htab_lru_percpu_map_update_elem(struct bpf_map
*map
, void *key
,
907 void *value
, u64 map_flags
)
909 return __htab_lru_percpu_map_update_elem(map
, key
, value
, map_flags
,
913 /* Called from syscall or from eBPF program */
914 static int htab_map_delete_elem(struct bpf_map
*map
, void *key
)
916 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
917 struct hlist_head
*head
;
924 WARN_ON_ONCE(!rcu_read_lock_held());
926 key_size
= map
->key_size
;
928 hash
= htab_map_hash(key
, key_size
);
929 b
= __select_bucket(htab
, hash
);
932 raw_spin_lock_irqsave(&b
->lock
, flags
);
934 l
= lookup_elem_raw(head
, hash
, key
, key_size
);
937 hlist_del_rcu(&l
->hash_node
);
938 free_htab_elem(htab
, l
);
942 raw_spin_unlock_irqrestore(&b
->lock
, flags
);
946 static int htab_lru_map_delete_elem(struct bpf_map
*map
, void *key
)
948 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
949 struct hlist_head
*head
;
956 WARN_ON_ONCE(!rcu_read_lock_held());
958 key_size
= map
->key_size
;
960 hash
= htab_map_hash(key
, key_size
);
961 b
= __select_bucket(htab
, hash
);
964 raw_spin_lock_irqsave(&b
->lock
, flags
);
966 l
= lookup_elem_raw(head
, hash
, key
, key_size
);
969 hlist_del_rcu(&l
->hash_node
);
973 raw_spin_unlock_irqrestore(&b
->lock
, flags
);
975 bpf_lru_push_free(&htab
->lru
, &l
->lru_node
);
979 static void delete_all_elements(struct bpf_htab
*htab
)
983 for (i
= 0; i
< htab
->n_buckets
; i
++) {
984 struct hlist_head
*head
= select_bucket(htab
, i
);
985 struct hlist_node
*n
;
988 hlist_for_each_entry_safe(l
, n
, head
, hash_node
) {
989 hlist_del_rcu(&l
->hash_node
);
990 if (l
->state
!= HTAB_EXTRA_ELEM_USED
)
991 htab_elem_free(htab
, l
);
995 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
996 static void htab_map_free(struct bpf_map
*map
)
998 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
1000 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
1001 * so the programs (can be more than one that used this map) were
1002 * disconnected from events. Wait for outstanding critical sections in
1003 * these programs to complete
1007 /* some of free_htab_elem() callbacks for elements of this map may
1008 * not have executed. Wait for them.
1011 if (htab
->map
.map_flags
& BPF_F_NO_PREALLOC
)
1012 delete_all_elements(htab
);
1014 prealloc_destroy(htab
);
1016 free_percpu(htab
->extra_elems
);
1017 kvfree(htab
->buckets
);
1021 static const struct bpf_map_ops htab_ops
= {
1022 .map_alloc
= htab_map_alloc
,
1023 .map_free
= htab_map_free
,
1024 .map_get_next_key
= htab_map_get_next_key
,
1025 .map_lookup_elem
= htab_map_lookup_elem
,
1026 .map_update_elem
= htab_map_update_elem
,
1027 .map_delete_elem
= htab_map_delete_elem
,
1030 static struct bpf_map_type_list htab_type __read_mostly
= {
1032 .type
= BPF_MAP_TYPE_HASH
,
1035 static const struct bpf_map_ops htab_lru_ops
= {
1036 .map_alloc
= htab_map_alloc
,
1037 .map_free
= htab_map_free
,
1038 .map_get_next_key
= htab_map_get_next_key
,
1039 .map_lookup_elem
= htab_lru_map_lookup_elem
,
1040 .map_update_elem
= htab_lru_map_update_elem
,
1041 .map_delete_elem
= htab_lru_map_delete_elem
,
1044 static struct bpf_map_type_list htab_lru_type __read_mostly
= {
1045 .ops
= &htab_lru_ops
,
1046 .type
= BPF_MAP_TYPE_LRU_HASH
,
1049 /* Called from eBPF program */
1050 static void *htab_percpu_map_lookup_elem(struct bpf_map
*map
, void *key
)
1052 struct htab_elem
*l
= __htab_map_lookup_elem(map
, key
);
1055 return this_cpu_ptr(htab_elem_get_ptr(l
, map
->key_size
));
1060 static void *htab_lru_percpu_map_lookup_elem(struct bpf_map
*map
, void *key
)
1062 struct htab_elem
*l
= __htab_map_lookup_elem(map
, key
);
1065 bpf_lru_node_set_ref(&l
->lru_node
);
1066 return this_cpu_ptr(htab_elem_get_ptr(l
, map
->key_size
));
1072 int bpf_percpu_hash_copy(struct bpf_map
*map
, void *key
, void *value
)
1074 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
1075 struct htab_elem
*l
;
1076 void __percpu
*pptr
;
1081 /* per_cpu areas are zero-filled and bpf programs can only
1082 * access 'value_size' of them, so copying rounded areas
1083 * will not leak any kernel data
1085 size
= round_up(map
->value_size
, 8);
1087 l
= __htab_map_lookup_elem(map
, key
);
1090 if (htab_is_lru(htab
))
1091 bpf_lru_node_set_ref(&l
->lru_node
);
1092 pptr
= htab_elem_get_ptr(l
, map
->key_size
);
1093 for_each_possible_cpu(cpu
) {
1094 bpf_long_memcpy(value
+ off
,
1095 per_cpu_ptr(pptr
, cpu
), size
);
1104 int bpf_percpu_hash_update(struct bpf_map
*map
, void *key
, void *value
,
1107 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
1111 if (htab_is_lru(htab
))
1112 ret
= __htab_lru_percpu_map_update_elem(map
, key
, value
,
1115 ret
= __htab_percpu_map_update_elem(map
, key
, value
, map_flags
,
1122 static const struct bpf_map_ops htab_percpu_ops
= {
1123 .map_alloc
= htab_map_alloc
,
1124 .map_free
= htab_map_free
,
1125 .map_get_next_key
= htab_map_get_next_key
,
1126 .map_lookup_elem
= htab_percpu_map_lookup_elem
,
1127 .map_update_elem
= htab_percpu_map_update_elem
,
1128 .map_delete_elem
= htab_map_delete_elem
,
1131 static struct bpf_map_type_list htab_percpu_type __read_mostly
= {
1132 .ops
= &htab_percpu_ops
,
1133 .type
= BPF_MAP_TYPE_PERCPU_HASH
,
1136 static const struct bpf_map_ops htab_lru_percpu_ops
= {
1137 .map_alloc
= htab_map_alloc
,
1138 .map_free
= htab_map_free
,
1139 .map_get_next_key
= htab_map_get_next_key
,
1140 .map_lookup_elem
= htab_lru_percpu_map_lookup_elem
,
1141 .map_update_elem
= htab_lru_percpu_map_update_elem
,
1142 .map_delete_elem
= htab_lru_map_delete_elem
,
1145 static struct bpf_map_type_list htab_lru_percpu_type __read_mostly
= {
1146 .ops
= &htab_lru_percpu_ops
,
1147 .type
= BPF_MAP_TYPE_LRU_PERCPU_HASH
,
1150 static int __init
register_htab_map(void)
1152 bpf_register_map_type(&htab_type
);
1153 bpf_register_map_type(&htab_percpu_type
);
1154 bpf_register_map_type(&htab_lru_type
);
1155 bpf_register_map_type(&htab_lru_percpu_type
);
1158 late_initcall(register_htab_map
);