1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 * Copyright (c) 2016 Facebook
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/bpf.h>
14 #include <linux/jhash.h>
15 #include <linux/filter.h>
16 #include <linux/rculist_nulls.h>
17 #include "percpu_freelist.h"
20 struct hlist_nulls_head head
;
26 struct bucket
*buckets
;
28 struct pcpu_freelist freelist
;
29 void __percpu
*extra_elems
;
30 atomic_t count
; /* number of elements in this hashtable */
31 u32 n_buckets
; /* number of hash buckets */
32 u32 elem_size
; /* size of each element in bytes */
35 enum extra_elem_state
{
36 HTAB_NOT_AN_EXTRA_ELEM
= 0,
41 /* each htab element is struct htab_elem + key + value */
44 struct hlist_nulls_node hash_node
;
48 struct bpf_htab
*htab
;
49 struct pcpu_freelist_node fnode
;
55 enum extra_elem_state state
;
58 char key
[0] __aligned(8);
61 static inline void htab_elem_set_ptr(struct htab_elem
*l
, u32 key_size
,
64 *(void __percpu
**)(l
->key
+ key_size
) = pptr
;
67 static inline void __percpu
*htab_elem_get_ptr(struct htab_elem
*l
, u32 key_size
)
69 return *(void __percpu
**)(l
->key
+ key_size
);
72 static struct htab_elem
*get_htab_elem(struct bpf_htab
*htab
, int i
)
74 return (struct htab_elem
*) (htab
->elems
+ i
* htab
->elem_size
);
77 static void htab_free_elems(struct bpf_htab
*htab
)
81 if (htab
->map
.map_type
!= BPF_MAP_TYPE_PERCPU_HASH
)
84 for (i
= 0; i
< htab
->map
.max_entries
; i
++) {
87 pptr
= htab_elem_get_ptr(get_htab_elem(htab
, i
),
92 bpf_map_area_free(htab
->elems
);
95 static int prealloc_elems_and_freelist(struct bpf_htab
*htab
)
99 htab
->elems
= bpf_map_area_alloc(htab
->elem_size
*
100 htab
->map
.max_entries
);
104 if (htab
->map
.map_type
!= BPF_MAP_TYPE_PERCPU_HASH
)
105 goto skip_percpu_elems
;
107 for (i
= 0; i
< htab
->map
.max_entries
; i
++) {
108 u32 size
= round_up(htab
->map
.value_size
, 8);
111 pptr
= __alloc_percpu_gfp(size
, 8, GFP_USER
| __GFP_NOWARN
);
114 htab_elem_set_ptr(get_htab_elem(htab
, i
), htab
->map
.key_size
,
119 err
= pcpu_freelist_init(&htab
->freelist
);
123 pcpu_freelist_populate(&htab
->freelist
,
124 htab
->elems
+ offsetof(struct htab_elem
, fnode
),
125 htab
->elem_size
, htab
->map
.max_entries
);
130 htab_free_elems(htab
);
134 static int alloc_extra_elems(struct bpf_htab
*htab
)
139 pptr
= __alloc_percpu_gfp(htab
->elem_size
, 8, GFP_USER
| __GFP_NOWARN
);
143 for_each_possible_cpu(cpu
) {
144 ((struct htab_elem
*)per_cpu_ptr(pptr
, cpu
))->state
=
145 HTAB_EXTRA_ELEM_FREE
;
147 htab
->extra_elems
= pptr
;
151 /* Called from syscall */
152 static struct bpf_map
*htab_map_alloc(union bpf_attr
*attr
)
154 bool percpu
= attr
->map_type
== BPF_MAP_TYPE_PERCPU_HASH
;
155 struct bpf_htab
*htab
;
159 BUILD_BUG_ON(offsetof(struct htab_elem
, htab
) !=
160 offsetof(struct htab_elem
, hash_node
.pprev
));
161 BUILD_BUG_ON(offsetof(struct htab_elem
, fnode
.next
) !=
162 offsetof(struct htab_elem
, hash_node
.pprev
));
164 if (attr
->map_flags
& ~BPF_F_NO_PREALLOC
)
165 /* reserved bits should not be used */
166 return ERR_PTR(-EINVAL
);
168 htab
= kzalloc(sizeof(*htab
), GFP_USER
);
170 return ERR_PTR(-ENOMEM
);
172 /* mandatory map attributes */
173 htab
->map
.map_type
= attr
->map_type
;
174 htab
->map
.key_size
= attr
->key_size
;
175 htab
->map
.value_size
= attr
->value_size
;
176 htab
->map
.max_entries
= attr
->max_entries
;
177 htab
->map
.map_flags
= attr
->map_flags
;
179 /* check sanity of attributes.
180 * value_size == 0 may be allowed in the future to use map as a set
183 if (htab
->map
.max_entries
== 0 || htab
->map
.key_size
== 0 ||
184 htab
->map
.value_size
== 0)
187 /* hash table size must be power of 2 */
188 htab
->n_buckets
= roundup_pow_of_two(htab
->map
.max_entries
);
191 if (htab
->map
.key_size
> MAX_BPF_STACK
)
192 /* eBPF programs initialize keys on stack, so they cannot be
193 * larger than max stack size
197 if (htab
->map
.value_size
>= (1 << (KMALLOC_SHIFT_MAX
- 1)) -
198 MAX_BPF_STACK
- sizeof(struct htab_elem
))
199 /* if value_size is bigger, the user space won't be able to
200 * access the elements via bpf syscall. This check also makes
201 * sure that the elem_size doesn't overflow and it's
202 * kmalloc-able later in htab_map_update_elem()
206 if (percpu
&& round_up(htab
->map
.value_size
, 8) > PCPU_MIN_UNIT_SIZE
)
207 /* make sure the size for pcpu_alloc() is reasonable */
210 htab
->elem_size
= sizeof(struct htab_elem
) +
211 round_up(htab
->map
.key_size
, 8);
213 htab
->elem_size
+= sizeof(void *);
215 htab
->elem_size
+= round_up(htab
->map
.value_size
, 8);
217 /* prevent zero size kmalloc and check for u32 overflow */
218 if (htab
->n_buckets
== 0 ||
219 htab
->n_buckets
> U32_MAX
/ sizeof(struct bucket
))
222 cost
= (u64
) htab
->n_buckets
* sizeof(struct bucket
) +
223 (u64
) htab
->elem_size
* htab
->map
.max_entries
;
226 cost
+= (u64
) round_up(htab
->map
.value_size
, 8) *
227 num_possible_cpus() * htab
->map
.max_entries
;
229 cost
+= (u64
) htab
->elem_size
* num_possible_cpus();
231 if (cost
>= U32_MAX
- PAGE_SIZE
)
232 /* make sure page count doesn't overflow */
235 htab
->map
.pages
= round_up(cost
, PAGE_SIZE
) >> PAGE_SHIFT
;
237 /* if map size is larger than memlock limit, reject it early */
238 err
= bpf_map_precharge_memlock(htab
->map
.pages
);
243 htab
->buckets
= bpf_map_area_alloc(htab
->n_buckets
*
244 sizeof(struct bucket
));
248 for (i
= 0; i
< htab
->n_buckets
; i
++) {
249 INIT_HLIST_NULLS_HEAD(&htab
->buckets
[i
].head
, i
);
250 raw_spin_lock_init(&htab
->buckets
[i
].lock
);
254 err
= alloc_extra_elems(htab
);
259 if (!(attr
->map_flags
& BPF_F_NO_PREALLOC
)) {
260 err
= prealloc_elems_and_freelist(htab
);
262 goto free_extra_elems
;
268 free_percpu(htab
->extra_elems
);
270 bpf_map_area_free(htab
->buckets
);
276 static inline u32
htab_map_hash(const void *key
, u32 key_len
)
278 return jhash(key
, key_len
, 0);
281 static inline struct bucket
*__select_bucket(struct bpf_htab
*htab
, u32 hash
)
283 return &htab
->buckets
[hash
& (htab
->n_buckets
- 1)];
286 static inline struct hlist_nulls_head
*select_bucket(struct bpf_htab
*htab
, u32 hash
)
288 return &__select_bucket(htab
, hash
)->head
;
291 /* this lookup function can only be called with bucket lock taken */
292 static struct htab_elem
*lookup_elem_raw(struct hlist_nulls_head
*head
, u32 hash
,
293 void *key
, u32 key_size
)
295 struct hlist_nulls_node
*n
;
298 hlist_nulls_for_each_entry_rcu(l
, n
, head
, hash_node
)
299 if (l
->hash
== hash
&& !memcmp(&l
->key
, key
, key_size
))
305 /* can be called without bucket lock. it will repeat the loop in
306 * the unlikely event when elements moved from one bucket into another
307 * while link list is being walked
309 static struct htab_elem
*lookup_nulls_elem_raw(struct hlist_nulls_head
*head
,
311 u32 key_size
, u32 n_buckets
)
313 struct hlist_nulls_node
*n
;
317 hlist_nulls_for_each_entry_rcu(l
, n
, head
, hash_node
)
318 if (l
->hash
== hash
&& !memcmp(&l
->key
, key
, key_size
))
321 if (unlikely(get_nulls_value(n
) != (hash
& (n_buckets
- 1))))
327 /* Called from syscall or from eBPF program */
328 static void *__htab_map_lookup_elem(struct bpf_map
*map
, void *key
)
330 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
331 struct hlist_nulls_head
*head
;
335 /* Must be called with rcu_read_lock. */
336 WARN_ON_ONCE(!rcu_read_lock_held());
338 key_size
= map
->key_size
;
340 hash
= htab_map_hash(key
, key_size
);
342 head
= select_bucket(htab
, hash
);
344 l
= lookup_nulls_elem_raw(head
, hash
, key
, key_size
, htab
->n_buckets
);
349 static void *htab_map_lookup_elem(struct bpf_map
*map
, void *key
)
351 struct htab_elem
*l
= __htab_map_lookup_elem(map
, key
);
354 return l
->key
+ round_up(map
->key_size
, 8);
359 /* Called from syscall */
360 static int htab_map_get_next_key(struct bpf_map
*map
, void *key
, void *next_key
)
362 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
363 struct hlist_nulls_head
*head
;
364 struct htab_elem
*l
, *next_l
;
368 WARN_ON_ONCE(!rcu_read_lock_held());
370 key_size
= map
->key_size
;
373 goto find_first_elem
;
375 hash
= htab_map_hash(key
, key_size
);
377 head
= select_bucket(htab
, hash
);
380 l
= lookup_nulls_elem_raw(head
, hash
, key
, key_size
, htab
->n_buckets
);
383 goto find_first_elem
;
385 /* key was found, get next key in the same bucket */
386 next_l
= hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l
->hash_node
)),
387 struct htab_elem
, hash_node
);
390 /* if next elem in this hash list is non-zero, just return it */
391 memcpy(next_key
, next_l
->key
, key_size
);
395 /* no more elements in this hash list, go to the next bucket */
396 i
= hash
& (htab
->n_buckets
- 1);
400 /* iterate over buckets */
401 for (; i
< htab
->n_buckets
; i
++) {
402 head
= select_bucket(htab
, i
);
404 /* pick first element in the bucket */
405 next_l
= hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head
)),
406 struct htab_elem
, hash_node
);
408 /* if it's not empty, just return it */
409 memcpy(next_key
, next_l
->key
, key_size
);
414 /* iterated over all buckets and all elements */
418 static void htab_elem_free(struct bpf_htab
*htab
, struct htab_elem
*l
)
420 if (htab
->map
.map_type
== BPF_MAP_TYPE_PERCPU_HASH
)
421 free_percpu(htab_elem_get_ptr(l
, htab
->map
.key_size
));
425 static void htab_elem_free_rcu(struct rcu_head
*head
)
427 struct htab_elem
*l
= container_of(head
, struct htab_elem
, rcu
);
428 struct bpf_htab
*htab
= l
->htab
;
430 /* must increment bpf_prog_active to avoid kprobe+bpf triggering while
431 * we're calling kfree, otherwise deadlock is possible if kprobes
432 * are placed somewhere inside of slub
435 __this_cpu_inc(bpf_prog_active
);
436 htab_elem_free(htab
, l
);
437 __this_cpu_dec(bpf_prog_active
);
441 static void free_htab_elem(struct bpf_htab
*htab
, struct htab_elem
*l
)
443 if (l
->state
== HTAB_EXTRA_ELEM_USED
) {
444 l
->state
= HTAB_EXTRA_ELEM_FREE
;
448 if (!(htab
->map
.map_flags
& BPF_F_NO_PREALLOC
)) {
449 pcpu_freelist_push(&htab
->freelist
, &l
->fnode
);
451 atomic_dec(&htab
->count
);
453 call_rcu(&l
->rcu
, htab_elem_free_rcu
);
457 static struct htab_elem
*alloc_htab_elem(struct bpf_htab
*htab
, void *key
,
458 void *value
, u32 key_size
, u32 hash
,
459 bool percpu
, bool onallcpus
,
460 bool old_elem_exists
)
462 u32 size
= htab
->map
.value_size
;
463 bool prealloc
= !(htab
->map
.map_flags
& BPF_F_NO_PREALLOC
);
464 struct htab_elem
*l_new
;
469 struct pcpu_freelist_node
*l
;
471 l
= pcpu_freelist_pop(&htab
->freelist
);
475 l_new
= container_of(l
, struct htab_elem
, fnode
);
477 if (atomic_inc_return(&htab
->count
) > htab
->map
.max_entries
) {
478 atomic_dec(&htab
->count
);
481 l_new
= kmalloc(htab
->elem_size
,
482 GFP_ATOMIC
| __GFP_NOWARN
);
484 return ERR_PTR(-ENOMEM
);
489 if (!old_elem_exists
)
492 /* if we're updating the existing element and the hash table
493 * is full, use per-cpu extra elems
495 l_new
= this_cpu_ptr(htab
->extra_elems
);
496 if (l_new
->state
!= HTAB_EXTRA_ELEM_FREE
)
497 return ERR_PTR(-E2BIG
);
498 l_new
->state
= HTAB_EXTRA_ELEM_USED
;
500 l_new
->state
= HTAB_NOT_AN_EXTRA_ELEM
;
503 memcpy(l_new
->key
, key
, key_size
);
505 /* round up value_size to 8 bytes */
506 size
= round_up(size
, 8);
509 pptr
= htab_elem_get_ptr(l_new
, key_size
);
511 /* alloc_percpu zero-fills */
512 pptr
= __alloc_percpu_gfp(size
, 8,
513 GFP_ATOMIC
| __GFP_NOWARN
);
516 return ERR_PTR(-ENOMEM
);
521 /* copy true value_size bytes */
522 memcpy(this_cpu_ptr(pptr
), value
, htab
->map
.value_size
);
526 for_each_possible_cpu(cpu
) {
527 bpf_long_memcpy(per_cpu_ptr(pptr
, cpu
),
533 htab_elem_set_ptr(l_new
, key_size
, pptr
);
535 memcpy(l_new
->key
+ round_up(key_size
, 8), value
, size
);
542 static int check_flags(struct bpf_htab
*htab
, struct htab_elem
*l_old
,
545 if (l_old
&& map_flags
== BPF_NOEXIST
)
546 /* elem already exists */
549 if (!l_old
&& map_flags
== BPF_EXIST
)
550 /* elem doesn't exist, cannot update it */
556 /* Called from syscall or from eBPF program */
557 static int htab_map_update_elem(struct bpf_map
*map
, void *key
, void *value
,
560 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
561 struct htab_elem
*l_new
= NULL
, *l_old
;
562 struct hlist_nulls_head
*head
;
568 if (unlikely(map_flags
> BPF_EXIST
))
572 WARN_ON_ONCE(!rcu_read_lock_held());
574 key_size
= map
->key_size
;
576 hash
= htab_map_hash(key
, key_size
);
578 b
= __select_bucket(htab
, hash
);
581 /* bpf_map_update_elem() can be called in_irq() */
582 raw_spin_lock_irqsave(&b
->lock
, flags
);
584 l_old
= lookup_elem_raw(head
, hash
, key
, key_size
);
586 ret
= check_flags(htab
, l_old
, map_flags
);
590 l_new
= alloc_htab_elem(htab
, key
, value
, key_size
, hash
, false, false,
593 /* all pre-allocated elements are in use or memory exhausted */
594 ret
= PTR_ERR(l_new
);
598 /* add new element to the head of the list, so that
599 * concurrent search will find it before old elem
601 hlist_nulls_add_head_rcu(&l_new
->hash_node
, head
);
603 hlist_nulls_del_rcu(&l_old
->hash_node
);
604 free_htab_elem(htab
, l_old
);
608 raw_spin_unlock_irqrestore(&b
->lock
, flags
);
612 static int __htab_percpu_map_update_elem(struct bpf_map
*map
, void *key
,
613 void *value
, u64 map_flags
,
616 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
617 struct htab_elem
*l_new
= NULL
, *l_old
;
618 struct hlist_nulls_head
*head
;
624 if (unlikely(map_flags
> BPF_EXIST
))
628 WARN_ON_ONCE(!rcu_read_lock_held());
630 key_size
= map
->key_size
;
632 hash
= htab_map_hash(key
, key_size
);
634 b
= __select_bucket(htab
, hash
);
637 /* bpf_map_update_elem() can be called in_irq() */
638 raw_spin_lock_irqsave(&b
->lock
, flags
);
640 l_old
= lookup_elem_raw(head
, hash
, key
, key_size
);
642 ret
= check_flags(htab
, l_old
, map_flags
);
647 void __percpu
*pptr
= htab_elem_get_ptr(l_old
, key_size
);
648 u32 size
= htab
->map
.value_size
;
650 /* per-cpu hash map can update value in-place */
652 memcpy(this_cpu_ptr(pptr
), value
, size
);
656 size
= round_up(size
, 8);
657 for_each_possible_cpu(cpu
) {
658 bpf_long_memcpy(per_cpu_ptr(pptr
, cpu
),
664 l_new
= alloc_htab_elem(htab
, key
, value
, key_size
,
665 hash
, true, onallcpus
, false);
667 ret
= PTR_ERR(l_new
);
670 hlist_nulls_add_head_rcu(&l_new
->hash_node
, head
);
674 raw_spin_unlock_irqrestore(&b
->lock
, flags
);
678 static int htab_percpu_map_update_elem(struct bpf_map
*map
, void *key
,
679 void *value
, u64 map_flags
)
681 return __htab_percpu_map_update_elem(map
, key
, value
, map_flags
, false);
684 /* Called from syscall or from eBPF program */
685 static int htab_map_delete_elem(struct bpf_map
*map
, void *key
)
687 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
688 struct hlist_nulls_head
*head
;
695 WARN_ON_ONCE(!rcu_read_lock_held());
697 key_size
= map
->key_size
;
699 hash
= htab_map_hash(key
, key_size
);
700 b
= __select_bucket(htab
, hash
);
703 raw_spin_lock_irqsave(&b
->lock
, flags
);
705 l
= lookup_elem_raw(head
, hash
, key
, key_size
);
708 hlist_nulls_del_rcu(&l
->hash_node
);
709 free_htab_elem(htab
, l
);
713 raw_spin_unlock_irqrestore(&b
->lock
, flags
);
717 static void delete_all_elements(struct bpf_htab
*htab
)
721 for (i
= 0; i
< htab
->n_buckets
; i
++) {
722 struct hlist_nulls_head
*head
= select_bucket(htab
, i
);
723 struct hlist_nulls_node
*n
;
726 hlist_nulls_for_each_entry_safe(l
, n
, head
, hash_node
) {
727 hlist_nulls_del_rcu(&l
->hash_node
);
728 if (l
->state
!= HTAB_EXTRA_ELEM_USED
)
729 htab_elem_free(htab
, l
);
733 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
734 static void htab_map_free(struct bpf_map
*map
)
736 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
738 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
739 * so the programs (can be more than one that used this map) were
740 * disconnected from events. Wait for outstanding critical sections in
741 * these programs to complete
745 /* some of free_htab_elem() callbacks for elements of this map may
746 * not have executed. Wait for them.
749 if (htab
->map
.map_flags
& BPF_F_NO_PREALLOC
) {
750 delete_all_elements(htab
);
752 htab_free_elems(htab
);
753 pcpu_freelist_destroy(&htab
->freelist
);
755 free_percpu(htab
->extra_elems
);
756 bpf_map_area_free(htab
->buckets
);
760 static const struct bpf_map_ops htab_ops
= {
761 .map_alloc
= htab_map_alloc
,
762 .map_free
= htab_map_free
,
763 .map_get_next_key
= htab_map_get_next_key
,
764 .map_lookup_elem
= htab_map_lookup_elem
,
765 .map_update_elem
= htab_map_update_elem
,
766 .map_delete_elem
= htab_map_delete_elem
,
769 static struct bpf_map_type_list htab_type __read_mostly
= {
771 .type
= BPF_MAP_TYPE_HASH
,
774 /* Called from eBPF program */
775 static void *htab_percpu_map_lookup_elem(struct bpf_map
*map
, void *key
)
777 struct htab_elem
*l
= __htab_map_lookup_elem(map
, key
);
780 return this_cpu_ptr(htab_elem_get_ptr(l
, map
->key_size
));
785 int bpf_percpu_hash_copy(struct bpf_map
*map
, void *key
, void *value
)
793 /* per_cpu areas are zero-filled and bpf programs can only
794 * access 'value_size' of them, so copying rounded areas
795 * will not leak any kernel data
797 size
= round_up(map
->value_size
, 8);
799 l
= __htab_map_lookup_elem(map
, key
);
802 pptr
= htab_elem_get_ptr(l
, map
->key_size
);
803 for_each_possible_cpu(cpu
) {
804 bpf_long_memcpy(value
+ off
,
805 per_cpu_ptr(pptr
, cpu
), size
);
814 int bpf_percpu_hash_update(struct bpf_map
*map
, void *key
, void *value
,
820 ret
= __htab_percpu_map_update_elem(map
, key
, value
, map_flags
, true);
826 static const struct bpf_map_ops htab_percpu_ops
= {
827 .map_alloc
= htab_map_alloc
,
828 .map_free
= htab_map_free
,
829 .map_get_next_key
= htab_map_get_next_key
,
830 .map_lookup_elem
= htab_percpu_map_lookup_elem
,
831 .map_update_elem
= htab_percpu_map_update_elem
,
832 .map_delete_elem
= htab_map_delete_elem
,
835 static struct bpf_map_type_list htab_percpu_type __read_mostly
= {
836 .ops
= &htab_percpu_ops
,
837 .type
= BPF_MAP_TYPE_PERCPU_HASH
,
840 static int __init
register_htab_map(void)
842 bpf_register_map_type(&htab_type
);
843 bpf_register_map_type(&htab_percpu_type
);
846 late_initcall(register_htab_map
);