1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 * Copyright (c) 2016 Facebook
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/bpf.h>
14 #include <linux/jhash.h>
15 #include <linux/filter.h>
16 #include <linux/rculist_nulls.h>
17 #include "percpu_freelist.h"
20 struct hlist_nulls_head head
;
26 struct bucket
*buckets
;
28 struct pcpu_freelist freelist
;
29 void __percpu
*extra_elems
;
30 atomic_t count
; /* number of elements in this hashtable */
31 u32 n_buckets
; /* number of hash buckets */
32 u32 elem_size
; /* size of each element in bytes */
35 enum extra_elem_state
{
36 HTAB_NOT_AN_EXTRA_ELEM
= 0,
41 /* each htab element is struct htab_elem + key + value */
44 struct hlist_nulls_node hash_node
;
48 struct bpf_htab
*htab
;
49 struct pcpu_freelist_node fnode
;
55 enum extra_elem_state state
;
58 char key
[0] __aligned(8);
61 static inline void htab_elem_set_ptr(struct htab_elem
*l
, u32 key_size
,
64 *(void __percpu
**)(l
->key
+ key_size
) = pptr
;
67 static inline void __percpu
*htab_elem_get_ptr(struct htab_elem
*l
, u32 key_size
)
69 return *(void __percpu
**)(l
->key
+ key_size
);
72 static struct htab_elem
*get_htab_elem(struct bpf_htab
*htab
, int i
)
74 return (struct htab_elem
*) (htab
->elems
+ i
* htab
->elem_size
);
77 static void htab_free_elems(struct bpf_htab
*htab
)
81 if (htab
->map
.map_type
!= BPF_MAP_TYPE_PERCPU_HASH
)
84 for (i
= 0; i
< htab
->map
.max_entries
; i
++) {
87 pptr
= htab_elem_get_ptr(get_htab_elem(htab
, i
),
92 bpf_map_area_free(htab
->elems
);
95 static int prealloc_elems_and_freelist(struct bpf_htab
*htab
)
99 htab
->elems
= bpf_map_area_alloc(htab
->elem_size
*
100 htab
->map
.max_entries
);
104 if (htab
->map
.map_type
!= BPF_MAP_TYPE_PERCPU_HASH
)
105 goto skip_percpu_elems
;
107 for (i
= 0; i
< htab
->map
.max_entries
; i
++) {
108 u32 size
= round_up(htab
->map
.value_size
, 8);
111 pptr
= __alloc_percpu_gfp(size
, 8, GFP_USER
| __GFP_NOWARN
);
114 htab_elem_set_ptr(get_htab_elem(htab
, i
), htab
->map
.key_size
,
119 err
= pcpu_freelist_init(&htab
->freelist
);
123 pcpu_freelist_populate(&htab
->freelist
,
124 htab
->elems
+ offsetof(struct htab_elem
, fnode
),
125 htab
->elem_size
, htab
->map
.max_entries
);
130 htab_free_elems(htab
);
134 static int alloc_extra_elems(struct bpf_htab
*htab
)
139 pptr
= __alloc_percpu_gfp(htab
->elem_size
, 8, GFP_USER
| __GFP_NOWARN
);
143 for_each_possible_cpu(cpu
) {
144 ((struct htab_elem
*)per_cpu_ptr(pptr
, cpu
))->state
=
145 HTAB_EXTRA_ELEM_FREE
;
147 htab
->extra_elems
= pptr
;
151 /* Called from syscall */
152 static struct bpf_map
*htab_map_alloc(union bpf_attr
*attr
)
154 bool percpu
= attr
->map_type
== BPF_MAP_TYPE_PERCPU_HASH
;
155 struct bpf_htab
*htab
;
159 BUILD_BUG_ON(offsetof(struct htab_elem
, htab
) !=
160 offsetof(struct htab_elem
, hash_node
.pprev
));
161 BUILD_BUG_ON(offsetof(struct htab_elem
, fnode
.next
) !=
162 offsetof(struct htab_elem
, hash_node
.pprev
));
164 if (attr
->map_flags
& ~BPF_F_NO_PREALLOC
)
165 /* reserved bits should not be used */
166 return ERR_PTR(-EINVAL
);
168 htab
= kzalloc(sizeof(*htab
), GFP_USER
);
170 return ERR_PTR(-ENOMEM
);
172 /* mandatory map attributes */
173 htab
->map
.map_type
= attr
->map_type
;
174 htab
->map
.key_size
= attr
->key_size
;
175 htab
->map
.value_size
= attr
->value_size
;
176 htab
->map
.max_entries
= attr
->max_entries
;
177 htab
->map
.map_flags
= attr
->map_flags
;
179 /* check sanity of attributes.
180 * value_size == 0 may be allowed in the future to use map as a set
183 if (htab
->map
.max_entries
== 0 || htab
->map
.key_size
== 0 ||
184 htab
->map
.value_size
== 0)
187 /* hash table size must be power of 2 */
188 htab
->n_buckets
= roundup_pow_of_two(htab
->map
.max_entries
);
191 if (htab
->map
.key_size
> MAX_BPF_STACK
)
192 /* eBPF programs initialize keys on stack, so they cannot be
193 * larger than max stack size
197 if (htab
->map
.value_size
>= (1 << (KMALLOC_SHIFT_MAX
- 1)) -
198 MAX_BPF_STACK
- sizeof(struct htab_elem
))
199 /* if value_size is bigger, the user space won't be able to
200 * access the elements via bpf syscall. This check also makes
201 * sure that the elem_size doesn't overflow and it's
202 * kmalloc-able later in htab_map_update_elem()
206 if (percpu
&& round_up(htab
->map
.value_size
, 8) > PCPU_MIN_UNIT_SIZE
)
207 /* make sure the size for pcpu_alloc() is reasonable */
210 htab
->elem_size
= sizeof(struct htab_elem
) +
211 round_up(htab
->map
.key_size
, 8);
213 htab
->elem_size
+= sizeof(void *);
215 htab
->elem_size
+= round_up(htab
->map
.value_size
, 8);
217 /* prevent zero size kmalloc and check for u32 overflow */
218 if (htab
->n_buckets
== 0 ||
219 htab
->n_buckets
> U32_MAX
/ sizeof(struct bucket
))
222 cost
= (u64
) htab
->n_buckets
* sizeof(struct bucket
) +
223 (u64
) htab
->elem_size
* htab
->map
.max_entries
;
226 cost
+= (u64
) round_up(htab
->map
.value_size
, 8) *
227 num_possible_cpus() * htab
->map
.max_entries
;
229 cost
+= (u64
) htab
->elem_size
* num_possible_cpus();
231 if (cost
>= U32_MAX
- PAGE_SIZE
)
232 /* make sure page count doesn't overflow */
235 htab
->map
.pages
= round_up(cost
, PAGE_SIZE
) >> PAGE_SHIFT
;
237 /* if map size is larger than memlock limit, reject it early */
238 err
= bpf_map_precharge_memlock(htab
->map
.pages
);
243 htab
->buckets
= bpf_map_area_alloc(htab
->n_buckets
*
244 sizeof(struct bucket
));
248 for (i
= 0; i
< htab
->n_buckets
; i
++) {
249 INIT_HLIST_NULLS_HEAD(&htab
->buckets
[i
].head
, i
);
250 raw_spin_lock_init(&htab
->buckets
[i
].lock
);
254 err
= alloc_extra_elems(htab
);
259 if (!(attr
->map_flags
& BPF_F_NO_PREALLOC
)) {
260 err
= prealloc_elems_and_freelist(htab
);
262 goto free_extra_elems
;
268 free_percpu(htab
->extra_elems
);
270 bpf_map_area_free(htab
->buckets
);
276 static inline u32
htab_map_hash(const void *key
, u32 key_len
)
278 return jhash(key
, key_len
, 0);
281 static inline struct bucket
*__select_bucket(struct bpf_htab
*htab
, u32 hash
)
283 return &htab
->buckets
[hash
& (htab
->n_buckets
- 1)];
286 static inline struct hlist_nulls_head
*select_bucket(struct bpf_htab
*htab
, u32 hash
)
288 return &__select_bucket(htab
, hash
)->head
;
291 /* this lookup function can only be called with bucket lock taken */
292 static struct htab_elem
*lookup_elem_raw(struct hlist_nulls_head
*head
, u32 hash
,
293 void *key
, u32 key_size
)
295 struct hlist_nulls_node
*n
;
298 hlist_nulls_for_each_entry_rcu(l
, n
, head
, hash_node
)
299 if (l
->hash
== hash
&& !memcmp(&l
->key
, key
, key_size
))
305 /* can be called without bucket lock. it will repeat the loop in
306 * the unlikely event when elements moved from one bucket into another
307 * while link list is being walked
309 static struct htab_elem
*lookup_nulls_elem_raw(struct hlist_nulls_head
*head
,
311 u32 key_size
, u32 n_buckets
)
313 struct hlist_nulls_node
*n
;
317 hlist_nulls_for_each_entry_rcu(l
, n
, head
, hash_node
)
318 if (l
->hash
== hash
&& !memcmp(&l
->key
, key
, key_size
))
321 if (unlikely(get_nulls_value(n
) != (hash
& (n_buckets
- 1))))
327 /* Called from syscall or from eBPF program */
328 static void *__htab_map_lookup_elem(struct bpf_map
*map
, void *key
)
330 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
331 struct hlist_nulls_head
*head
;
335 /* Must be called with rcu_read_lock. */
336 WARN_ON_ONCE(!rcu_read_lock_held());
338 key_size
= map
->key_size
;
340 hash
= htab_map_hash(key
, key_size
);
342 head
= select_bucket(htab
, hash
);
344 l
= lookup_nulls_elem_raw(head
, hash
, key
, key_size
, htab
->n_buckets
);
349 static void *htab_map_lookup_elem(struct bpf_map
*map
, void *key
)
351 struct htab_elem
*l
= __htab_map_lookup_elem(map
, key
);
354 return l
->key
+ round_up(map
->key_size
, 8);
359 /* Called from syscall */
360 static int htab_map_get_next_key(struct bpf_map
*map
, void *key
, void *next_key
)
362 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
363 struct hlist_nulls_head
*head
;
364 struct htab_elem
*l
, *next_l
;
368 WARN_ON_ONCE(!rcu_read_lock_held());
370 key_size
= map
->key_size
;
373 goto find_first_elem
;
375 hash
= htab_map_hash(key
, key_size
);
377 head
= select_bucket(htab
, hash
);
380 l
= lookup_nulls_elem_raw(head
, hash
, key
, key_size
, htab
->n_buckets
);
383 goto find_first_elem
;
385 /* key was found, get next key in the same bucket */
386 next_l
= hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l
->hash_node
)),
387 struct htab_elem
, hash_node
);
390 /* if next elem in this hash list is non-zero, just return it */
391 memcpy(next_key
, next_l
->key
, key_size
);
395 /* no more elements in this hash list, go to the next bucket */
396 i
= hash
& (htab
->n_buckets
- 1);
400 /* iterate over buckets */
401 for (; i
< htab
->n_buckets
; i
++) {
402 head
= select_bucket(htab
, i
);
404 /* pick first element in the bucket */
405 next_l
= hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head
)),
406 struct htab_elem
, hash_node
);
408 /* if it's not empty, just return it */
409 memcpy(next_key
, next_l
->key
, key_size
);
414 /* iterated over all buckets and all elements */
418 static void htab_elem_free(struct bpf_htab
*htab
, struct htab_elem
*l
)
420 if (htab
->map
.map_type
== BPF_MAP_TYPE_PERCPU_HASH
)
421 free_percpu(htab_elem_get_ptr(l
, htab
->map
.key_size
));
425 static void htab_elem_free_rcu(struct rcu_head
*head
)
427 struct htab_elem
*l
= container_of(head
, struct htab_elem
, rcu
);
428 struct bpf_htab
*htab
= l
->htab
;
430 htab_elem_free(htab
, l
);
433 static void free_htab_elem(struct bpf_htab
*htab
, struct htab_elem
*l
)
435 if (l
->state
== HTAB_EXTRA_ELEM_USED
) {
436 l
->state
= HTAB_EXTRA_ELEM_FREE
;
440 if (!(htab
->map
.map_flags
& BPF_F_NO_PREALLOC
)) {
441 pcpu_freelist_push(&htab
->freelist
, &l
->fnode
);
443 atomic_dec(&htab
->count
);
445 call_rcu(&l
->rcu
, htab_elem_free_rcu
);
449 static struct htab_elem
*alloc_htab_elem(struct bpf_htab
*htab
, void *key
,
450 void *value
, u32 key_size
, u32 hash
,
451 bool percpu
, bool onallcpus
,
452 bool old_elem_exists
)
454 u32 size
= htab
->map
.value_size
;
455 bool prealloc
= !(htab
->map
.map_flags
& BPF_F_NO_PREALLOC
);
456 struct htab_elem
*l_new
;
461 struct pcpu_freelist_node
*l
;
463 l
= pcpu_freelist_pop(&htab
->freelist
);
467 l_new
= container_of(l
, struct htab_elem
, fnode
);
469 if (atomic_inc_return(&htab
->count
) > htab
->map
.max_entries
) {
470 atomic_dec(&htab
->count
);
473 l_new
= kmalloc(htab
->elem_size
,
474 GFP_ATOMIC
| __GFP_NOWARN
);
476 return ERR_PTR(-ENOMEM
);
481 if (!old_elem_exists
)
484 /* if we're updating the existing element and the hash table
485 * is full, use per-cpu extra elems
487 l_new
= this_cpu_ptr(htab
->extra_elems
);
488 if (l_new
->state
!= HTAB_EXTRA_ELEM_FREE
)
489 return ERR_PTR(-E2BIG
);
490 l_new
->state
= HTAB_EXTRA_ELEM_USED
;
492 l_new
->state
= HTAB_NOT_AN_EXTRA_ELEM
;
495 memcpy(l_new
->key
, key
, key_size
);
497 /* round up value_size to 8 bytes */
498 size
= round_up(size
, 8);
501 pptr
= htab_elem_get_ptr(l_new
, key_size
);
503 /* alloc_percpu zero-fills */
504 pptr
= __alloc_percpu_gfp(size
, 8,
505 GFP_ATOMIC
| __GFP_NOWARN
);
508 return ERR_PTR(-ENOMEM
);
513 /* copy true value_size bytes */
514 memcpy(this_cpu_ptr(pptr
), value
, htab
->map
.value_size
);
518 for_each_possible_cpu(cpu
) {
519 bpf_long_memcpy(per_cpu_ptr(pptr
, cpu
),
525 htab_elem_set_ptr(l_new
, key_size
, pptr
);
527 memcpy(l_new
->key
+ round_up(key_size
, 8), value
, size
);
534 static int check_flags(struct bpf_htab
*htab
, struct htab_elem
*l_old
,
537 if (l_old
&& map_flags
== BPF_NOEXIST
)
538 /* elem already exists */
541 if (!l_old
&& map_flags
== BPF_EXIST
)
542 /* elem doesn't exist, cannot update it */
548 /* Called from syscall or from eBPF program */
549 static int htab_map_update_elem(struct bpf_map
*map
, void *key
, void *value
,
552 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
553 struct htab_elem
*l_new
= NULL
, *l_old
;
554 struct hlist_nulls_head
*head
;
560 if (unlikely(map_flags
> BPF_EXIST
))
564 WARN_ON_ONCE(!rcu_read_lock_held());
566 key_size
= map
->key_size
;
568 hash
= htab_map_hash(key
, key_size
);
570 b
= __select_bucket(htab
, hash
);
573 /* bpf_map_update_elem() can be called in_irq() */
574 raw_spin_lock_irqsave(&b
->lock
, flags
);
576 l_old
= lookup_elem_raw(head
, hash
, key
, key_size
);
578 ret
= check_flags(htab
, l_old
, map_flags
);
582 l_new
= alloc_htab_elem(htab
, key
, value
, key_size
, hash
, false, false,
585 /* all pre-allocated elements are in use or memory exhausted */
586 ret
= PTR_ERR(l_new
);
590 /* add new element to the head of the list, so that
591 * concurrent search will find it before old elem
593 hlist_nulls_add_head_rcu(&l_new
->hash_node
, head
);
595 hlist_nulls_del_rcu(&l_old
->hash_node
);
596 free_htab_elem(htab
, l_old
);
600 raw_spin_unlock_irqrestore(&b
->lock
, flags
);
604 static int __htab_percpu_map_update_elem(struct bpf_map
*map
, void *key
,
605 void *value
, u64 map_flags
,
608 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
609 struct htab_elem
*l_new
= NULL
, *l_old
;
610 struct hlist_nulls_head
*head
;
616 if (unlikely(map_flags
> BPF_EXIST
))
620 WARN_ON_ONCE(!rcu_read_lock_held());
622 key_size
= map
->key_size
;
624 hash
= htab_map_hash(key
, key_size
);
626 b
= __select_bucket(htab
, hash
);
629 /* bpf_map_update_elem() can be called in_irq() */
630 raw_spin_lock_irqsave(&b
->lock
, flags
);
632 l_old
= lookup_elem_raw(head
, hash
, key
, key_size
);
634 ret
= check_flags(htab
, l_old
, map_flags
);
639 void __percpu
*pptr
= htab_elem_get_ptr(l_old
, key_size
);
640 u32 size
= htab
->map
.value_size
;
642 /* per-cpu hash map can update value in-place */
644 memcpy(this_cpu_ptr(pptr
), value
, size
);
648 size
= round_up(size
, 8);
649 for_each_possible_cpu(cpu
) {
650 bpf_long_memcpy(per_cpu_ptr(pptr
, cpu
),
656 l_new
= alloc_htab_elem(htab
, key
, value
, key_size
,
657 hash
, true, onallcpus
, false);
659 ret
= PTR_ERR(l_new
);
662 hlist_nulls_add_head_rcu(&l_new
->hash_node
, head
);
666 raw_spin_unlock_irqrestore(&b
->lock
, flags
);
670 static int htab_percpu_map_update_elem(struct bpf_map
*map
, void *key
,
671 void *value
, u64 map_flags
)
673 return __htab_percpu_map_update_elem(map
, key
, value
, map_flags
, false);
676 /* Called from syscall or from eBPF program */
677 static int htab_map_delete_elem(struct bpf_map
*map
, void *key
)
679 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
680 struct hlist_nulls_head
*head
;
687 WARN_ON_ONCE(!rcu_read_lock_held());
689 key_size
= map
->key_size
;
691 hash
= htab_map_hash(key
, key_size
);
692 b
= __select_bucket(htab
, hash
);
695 raw_spin_lock_irqsave(&b
->lock
, flags
);
697 l
= lookup_elem_raw(head
, hash
, key
, key_size
);
700 hlist_nulls_del_rcu(&l
->hash_node
);
701 free_htab_elem(htab
, l
);
705 raw_spin_unlock_irqrestore(&b
->lock
, flags
);
709 static void delete_all_elements(struct bpf_htab
*htab
)
713 for (i
= 0; i
< htab
->n_buckets
; i
++) {
714 struct hlist_nulls_head
*head
= select_bucket(htab
, i
);
715 struct hlist_nulls_node
*n
;
718 hlist_nulls_for_each_entry_safe(l
, n
, head
, hash_node
) {
719 hlist_nulls_del_rcu(&l
->hash_node
);
720 if (l
->state
!= HTAB_EXTRA_ELEM_USED
)
721 htab_elem_free(htab
, l
);
725 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
726 static void htab_map_free(struct bpf_map
*map
)
728 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
730 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
731 * so the programs (can be more than one that used this map) were
732 * disconnected from events. Wait for outstanding critical sections in
733 * these programs to complete
737 /* some of free_htab_elem() callbacks for elements of this map may
738 * not have executed. Wait for them.
741 if (htab
->map
.map_flags
& BPF_F_NO_PREALLOC
) {
742 delete_all_elements(htab
);
744 htab_free_elems(htab
);
745 pcpu_freelist_destroy(&htab
->freelist
);
747 free_percpu(htab
->extra_elems
);
748 bpf_map_area_free(htab
->buckets
);
752 static const struct bpf_map_ops htab_ops
= {
753 .map_alloc
= htab_map_alloc
,
754 .map_free
= htab_map_free
,
755 .map_get_next_key
= htab_map_get_next_key
,
756 .map_lookup_elem
= htab_map_lookup_elem
,
757 .map_update_elem
= htab_map_update_elem
,
758 .map_delete_elem
= htab_map_delete_elem
,
761 static struct bpf_map_type_list htab_type __read_mostly
= {
763 .type
= BPF_MAP_TYPE_HASH
,
766 /* Called from eBPF program */
767 static void *htab_percpu_map_lookup_elem(struct bpf_map
*map
, void *key
)
769 struct htab_elem
*l
= __htab_map_lookup_elem(map
, key
);
772 return this_cpu_ptr(htab_elem_get_ptr(l
, map
->key_size
));
777 int bpf_percpu_hash_copy(struct bpf_map
*map
, void *key
, void *value
)
785 /* per_cpu areas are zero-filled and bpf programs can only
786 * access 'value_size' of them, so copying rounded areas
787 * will not leak any kernel data
789 size
= round_up(map
->value_size
, 8);
791 l
= __htab_map_lookup_elem(map
, key
);
794 pptr
= htab_elem_get_ptr(l
, map
->key_size
);
795 for_each_possible_cpu(cpu
) {
796 bpf_long_memcpy(value
+ off
,
797 per_cpu_ptr(pptr
, cpu
), size
);
806 int bpf_percpu_hash_update(struct bpf_map
*map
, void *key
, void *value
,
812 ret
= __htab_percpu_map_update_elem(map
, key
, value
, map_flags
, true);
818 static const struct bpf_map_ops htab_percpu_ops
= {
819 .map_alloc
= htab_map_alloc
,
820 .map_free
= htab_map_free
,
821 .map_get_next_key
= htab_map_get_next_key
,
822 .map_lookup_elem
= htab_percpu_map_lookup_elem
,
823 .map_update_elem
= htab_percpu_map_update_elem
,
824 .map_delete_elem
= htab_map_delete_elem
,
827 static struct bpf_map_type_list htab_percpu_type __read_mostly
= {
828 .ops
= &htab_percpu_ops
,
829 .type
= BPF_MAP_TYPE_PERCPU_HASH
,
832 static int __init
register_htab_map(void)
834 bpf_register_map_type(&htab_type
);
835 bpf_register_map_type(&htab_percpu_type
);
838 late_initcall(register_htab_map
);