Linux 4.9.243
[linux/fpc-iii.git] / kernel / bpf / hashtab.c
blob1253261fdb3ba936a70daf85760642b5ab7f5619
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 * Copyright (c) 2016 Facebook
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/bpf.h>
14 #include <linux/jhash.h>
15 #include <linux/filter.h>
16 #include <linux/rculist_nulls.h>
17 #include "percpu_freelist.h"
19 struct bucket {
20 struct hlist_nulls_head head;
21 raw_spinlock_t lock;
24 struct bpf_htab {
25 struct bpf_map map;
26 struct bucket *buckets;
27 void *elems;
28 struct pcpu_freelist freelist;
29 void __percpu *extra_elems;
30 atomic_t count; /* number of elements in this hashtable */
31 u32 n_buckets; /* number of hash buckets */
32 u32 elem_size; /* size of each element in bytes */
35 enum extra_elem_state {
36 HTAB_NOT_AN_EXTRA_ELEM = 0,
37 HTAB_EXTRA_ELEM_FREE,
38 HTAB_EXTRA_ELEM_USED
41 /* each htab element is struct htab_elem + key + value */
42 struct htab_elem {
43 union {
44 struct hlist_nulls_node hash_node;
45 struct {
46 void *padding;
47 union {
48 struct bpf_htab *htab;
49 struct pcpu_freelist_node fnode;
53 union {
54 struct rcu_head rcu;
55 enum extra_elem_state state;
57 u32 hash;
58 char key[0] __aligned(8);
61 static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
62 void __percpu *pptr)
64 *(void __percpu **)(l->key + key_size) = pptr;
67 static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
69 return *(void __percpu **)(l->key + key_size);
72 static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
74 return (struct htab_elem *) (htab->elems + i * htab->elem_size);
77 static void htab_free_elems(struct bpf_htab *htab)
79 int i;
81 if (htab->map.map_type != BPF_MAP_TYPE_PERCPU_HASH)
82 goto free_elems;
84 for (i = 0; i < htab->map.max_entries; i++) {
85 void __percpu *pptr;
87 pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
88 htab->map.key_size);
89 free_percpu(pptr);
91 free_elems:
92 bpf_map_area_free(htab->elems);
95 static int prealloc_elems_and_freelist(struct bpf_htab *htab)
97 int err = -ENOMEM, i;
99 htab->elems = bpf_map_area_alloc(htab->elem_size *
100 htab->map.max_entries);
101 if (!htab->elems)
102 return -ENOMEM;
104 if (htab->map.map_type != BPF_MAP_TYPE_PERCPU_HASH)
105 goto skip_percpu_elems;
107 for (i = 0; i < htab->map.max_entries; i++) {
108 u32 size = round_up(htab->map.value_size, 8);
109 void __percpu *pptr;
111 pptr = __alloc_percpu_gfp(size, 8, GFP_USER | __GFP_NOWARN);
112 if (!pptr)
113 goto free_elems;
114 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
115 pptr);
118 skip_percpu_elems:
119 err = pcpu_freelist_init(&htab->freelist);
120 if (err)
121 goto free_elems;
123 pcpu_freelist_populate(&htab->freelist,
124 htab->elems + offsetof(struct htab_elem, fnode),
125 htab->elem_size, htab->map.max_entries);
127 return 0;
129 free_elems:
130 htab_free_elems(htab);
131 return err;
134 static int alloc_extra_elems(struct bpf_htab *htab)
136 void __percpu *pptr;
137 int cpu;
139 pptr = __alloc_percpu_gfp(htab->elem_size, 8, GFP_USER | __GFP_NOWARN);
140 if (!pptr)
141 return -ENOMEM;
143 for_each_possible_cpu(cpu) {
144 ((struct htab_elem *)per_cpu_ptr(pptr, cpu))->state =
145 HTAB_EXTRA_ELEM_FREE;
147 htab->extra_elems = pptr;
148 return 0;
151 /* Called from syscall */
152 static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
154 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_HASH;
155 struct bpf_htab *htab;
156 int err, i;
157 u64 cost;
159 BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
160 offsetof(struct htab_elem, hash_node.pprev));
161 BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
162 offsetof(struct htab_elem, hash_node.pprev));
164 if (attr->map_flags & ~BPF_F_NO_PREALLOC)
165 /* reserved bits should not be used */
166 return ERR_PTR(-EINVAL);
168 htab = kzalloc(sizeof(*htab), GFP_USER);
169 if (!htab)
170 return ERR_PTR(-ENOMEM);
172 /* mandatory map attributes */
173 htab->map.map_type = attr->map_type;
174 htab->map.key_size = attr->key_size;
175 htab->map.value_size = attr->value_size;
176 htab->map.max_entries = attr->max_entries;
177 htab->map.map_flags = attr->map_flags;
179 /* check sanity of attributes.
180 * value_size == 0 may be allowed in the future to use map as a set
182 err = -EINVAL;
183 if (htab->map.max_entries == 0 || htab->map.key_size == 0 ||
184 htab->map.value_size == 0)
185 goto free_htab;
187 /* hash table size must be power of 2 */
188 htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
190 err = -E2BIG;
191 if (htab->map.key_size > MAX_BPF_STACK)
192 /* eBPF programs initialize keys on stack, so they cannot be
193 * larger than max stack size
195 goto free_htab;
197 if (htab->map.value_size >= (1 << (KMALLOC_SHIFT_MAX - 1)) -
198 MAX_BPF_STACK - sizeof(struct htab_elem))
199 /* if value_size is bigger, the user space won't be able to
200 * access the elements via bpf syscall. This check also makes
201 * sure that the elem_size doesn't overflow and it's
202 * kmalloc-able later in htab_map_update_elem()
204 goto free_htab;
206 if (percpu && round_up(htab->map.value_size, 8) > PCPU_MIN_UNIT_SIZE)
207 /* make sure the size for pcpu_alloc() is reasonable */
208 goto free_htab;
210 htab->elem_size = sizeof(struct htab_elem) +
211 round_up(htab->map.key_size, 8);
212 if (percpu)
213 htab->elem_size += sizeof(void *);
214 else
215 htab->elem_size += round_up(htab->map.value_size, 8);
217 /* prevent zero size kmalloc and check for u32 overflow */
218 if (htab->n_buckets == 0 ||
219 htab->n_buckets > U32_MAX / sizeof(struct bucket))
220 goto free_htab;
222 cost = (u64) htab->n_buckets * sizeof(struct bucket) +
223 (u64) htab->elem_size * htab->map.max_entries;
225 if (percpu)
226 cost += (u64) round_up(htab->map.value_size, 8) *
227 num_possible_cpus() * htab->map.max_entries;
228 else
229 cost += (u64) htab->elem_size * num_possible_cpus();
231 if (cost >= U32_MAX - PAGE_SIZE)
232 /* make sure page count doesn't overflow */
233 goto free_htab;
235 htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
237 /* if map size is larger than memlock limit, reject it early */
238 err = bpf_map_precharge_memlock(htab->map.pages);
239 if (err)
240 goto free_htab;
242 err = -ENOMEM;
243 htab->buckets = bpf_map_area_alloc(htab->n_buckets *
244 sizeof(struct bucket));
245 if (!htab->buckets)
246 goto free_htab;
248 for (i = 0; i < htab->n_buckets; i++) {
249 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
250 raw_spin_lock_init(&htab->buckets[i].lock);
253 if (!percpu) {
254 err = alloc_extra_elems(htab);
255 if (err)
256 goto free_buckets;
259 if (!(attr->map_flags & BPF_F_NO_PREALLOC)) {
260 err = prealloc_elems_and_freelist(htab);
261 if (err)
262 goto free_extra_elems;
265 return &htab->map;
267 free_extra_elems:
268 free_percpu(htab->extra_elems);
269 free_buckets:
270 bpf_map_area_free(htab->buckets);
271 free_htab:
272 kfree(htab);
273 return ERR_PTR(err);
276 static inline u32 htab_map_hash(const void *key, u32 key_len)
278 return jhash(key, key_len, 0);
281 static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
283 return &htab->buckets[hash & (htab->n_buckets - 1)];
286 static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
288 return &__select_bucket(htab, hash)->head;
291 /* this lookup function can only be called with bucket lock taken */
292 static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
293 void *key, u32 key_size)
295 struct hlist_nulls_node *n;
296 struct htab_elem *l;
298 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
299 if (l->hash == hash && !memcmp(&l->key, key, key_size))
300 return l;
302 return NULL;
305 /* can be called without bucket lock. it will repeat the loop in
306 * the unlikely event when elements moved from one bucket into another
307 * while link list is being walked
309 static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
310 u32 hash, void *key,
311 u32 key_size, u32 n_buckets)
313 struct hlist_nulls_node *n;
314 struct htab_elem *l;
316 again:
317 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
318 if (l->hash == hash && !memcmp(&l->key, key, key_size))
319 return l;
321 if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
322 goto again;
324 return NULL;
327 /* Called from syscall or from eBPF program */
328 static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
330 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
331 struct hlist_nulls_head *head;
332 struct htab_elem *l;
333 u32 hash, key_size;
335 /* Must be called with rcu_read_lock. */
336 WARN_ON_ONCE(!rcu_read_lock_held());
338 key_size = map->key_size;
340 hash = htab_map_hash(key, key_size);
342 head = select_bucket(htab, hash);
344 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
346 return l;
349 static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
351 struct htab_elem *l = __htab_map_lookup_elem(map, key);
353 if (l)
354 return l->key + round_up(map->key_size, 8);
356 return NULL;
359 /* Called from syscall */
360 static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
362 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
363 struct hlist_nulls_head *head;
364 struct htab_elem *l, *next_l;
365 u32 hash, key_size;
366 int i = 0;
368 WARN_ON_ONCE(!rcu_read_lock_held());
370 key_size = map->key_size;
372 if (!key)
373 goto find_first_elem;
375 hash = htab_map_hash(key, key_size);
377 head = select_bucket(htab, hash);
379 /* lookup the key */
380 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
382 if (!l)
383 goto find_first_elem;
385 /* key was found, get next key in the same bucket */
386 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)),
387 struct htab_elem, hash_node);
389 if (next_l) {
390 /* if next elem in this hash list is non-zero, just return it */
391 memcpy(next_key, next_l->key, key_size);
392 return 0;
395 /* no more elements in this hash list, go to the next bucket */
396 i = hash & (htab->n_buckets - 1);
397 i++;
399 find_first_elem:
400 /* iterate over buckets */
401 for (; i < htab->n_buckets; i++) {
402 head = select_bucket(htab, i);
404 /* pick first element in the bucket */
405 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)),
406 struct htab_elem, hash_node);
407 if (next_l) {
408 /* if it's not empty, just return it */
409 memcpy(next_key, next_l->key, key_size);
410 return 0;
414 /* iterated over all buckets and all elements */
415 return -ENOENT;
418 static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
420 if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
421 free_percpu(htab_elem_get_ptr(l, htab->map.key_size));
422 kfree(l);
425 static void htab_elem_free_rcu(struct rcu_head *head)
427 struct htab_elem *l = container_of(head, struct htab_elem, rcu);
428 struct bpf_htab *htab = l->htab;
430 htab_elem_free(htab, l);
433 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
435 if (l->state == HTAB_EXTRA_ELEM_USED) {
436 l->state = HTAB_EXTRA_ELEM_FREE;
437 return;
440 if (!(htab->map.map_flags & BPF_F_NO_PREALLOC)) {
441 pcpu_freelist_push(&htab->freelist, &l->fnode);
442 } else {
443 atomic_dec(&htab->count);
444 l->htab = htab;
445 call_rcu(&l->rcu, htab_elem_free_rcu);
449 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
450 void *value, u32 key_size, u32 hash,
451 bool percpu, bool onallcpus,
452 bool old_elem_exists)
454 u32 size = htab->map.value_size;
455 bool prealloc = !(htab->map.map_flags & BPF_F_NO_PREALLOC);
456 struct htab_elem *l_new;
457 void __percpu *pptr;
458 int err = 0;
460 if (prealloc) {
461 struct pcpu_freelist_node *l;
463 l = pcpu_freelist_pop(&htab->freelist);
464 if (!l)
465 err = -E2BIG;
466 else
467 l_new = container_of(l, struct htab_elem, fnode);
468 } else {
469 if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
470 atomic_dec(&htab->count);
471 err = -E2BIG;
472 } else {
473 l_new = kmalloc(htab->elem_size,
474 GFP_ATOMIC | __GFP_NOWARN);
475 if (!l_new)
476 return ERR_PTR(-ENOMEM);
480 if (err) {
481 if (!old_elem_exists)
482 return ERR_PTR(err);
484 /* if we're updating the existing element and the hash table
485 * is full, use per-cpu extra elems
487 l_new = this_cpu_ptr(htab->extra_elems);
488 if (l_new->state != HTAB_EXTRA_ELEM_FREE)
489 return ERR_PTR(-E2BIG);
490 l_new->state = HTAB_EXTRA_ELEM_USED;
491 } else {
492 l_new->state = HTAB_NOT_AN_EXTRA_ELEM;
495 memcpy(l_new->key, key, key_size);
496 if (percpu) {
497 /* round up value_size to 8 bytes */
498 size = round_up(size, 8);
500 if (prealloc) {
501 pptr = htab_elem_get_ptr(l_new, key_size);
502 } else {
503 /* alloc_percpu zero-fills */
504 pptr = __alloc_percpu_gfp(size, 8,
505 GFP_ATOMIC | __GFP_NOWARN);
506 if (!pptr) {
507 kfree(l_new);
508 return ERR_PTR(-ENOMEM);
512 if (!onallcpus) {
513 /* copy true value_size bytes */
514 memcpy(this_cpu_ptr(pptr), value, htab->map.value_size);
515 } else {
516 int off = 0, cpu;
518 for_each_possible_cpu(cpu) {
519 bpf_long_memcpy(per_cpu_ptr(pptr, cpu),
520 value + off, size);
521 off += size;
524 if (!prealloc)
525 htab_elem_set_ptr(l_new, key_size, pptr);
526 } else {
527 memcpy(l_new->key + round_up(key_size, 8), value, size);
530 l_new->hash = hash;
531 return l_new;
534 static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
535 u64 map_flags)
537 if (l_old && map_flags == BPF_NOEXIST)
538 /* elem already exists */
539 return -EEXIST;
541 if (!l_old && map_flags == BPF_EXIST)
542 /* elem doesn't exist, cannot update it */
543 return -ENOENT;
545 return 0;
548 /* Called from syscall or from eBPF program */
549 static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
550 u64 map_flags)
552 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
553 struct htab_elem *l_new = NULL, *l_old;
554 struct hlist_nulls_head *head;
555 unsigned long flags;
556 struct bucket *b;
557 u32 key_size, hash;
558 int ret;
560 if (unlikely(map_flags > BPF_EXIST))
561 /* unknown flags */
562 return -EINVAL;
564 WARN_ON_ONCE(!rcu_read_lock_held());
566 key_size = map->key_size;
568 hash = htab_map_hash(key, key_size);
570 b = __select_bucket(htab, hash);
571 head = &b->head;
573 /* bpf_map_update_elem() can be called in_irq() */
574 raw_spin_lock_irqsave(&b->lock, flags);
576 l_old = lookup_elem_raw(head, hash, key, key_size);
578 ret = check_flags(htab, l_old, map_flags);
579 if (ret)
580 goto err;
582 l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
583 !!l_old);
584 if (IS_ERR(l_new)) {
585 /* all pre-allocated elements are in use or memory exhausted */
586 ret = PTR_ERR(l_new);
587 goto err;
590 /* add new element to the head of the list, so that
591 * concurrent search will find it before old elem
593 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
594 if (l_old) {
595 hlist_nulls_del_rcu(&l_old->hash_node);
596 free_htab_elem(htab, l_old);
598 ret = 0;
599 err:
600 raw_spin_unlock_irqrestore(&b->lock, flags);
601 return ret;
604 static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
605 void *value, u64 map_flags,
606 bool onallcpus)
608 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
609 struct htab_elem *l_new = NULL, *l_old;
610 struct hlist_nulls_head *head;
611 unsigned long flags;
612 struct bucket *b;
613 u32 key_size, hash;
614 int ret;
616 if (unlikely(map_flags > BPF_EXIST))
617 /* unknown flags */
618 return -EINVAL;
620 WARN_ON_ONCE(!rcu_read_lock_held());
622 key_size = map->key_size;
624 hash = htab_map_hash(key, key_size);
626 b = __select_bucket(htab, hash);
627 head = &b->head;
629 /* bpf_map_update_elem() can be called in_irq() */
630 raw_spin_lock_irqsave(&b->lock, flags);
632 l_old = lookup_elem_raw(head, hash, key, key_size);
634 ret = check_flags(htab, l_old, map_flags);
635 if (ret)
636 goto err;
638 if (l_old) {
639 void __percpu *pptr = htab_elem_get_ptr(l_old, key_size);
640 u32 size = htab->map.value_size;
642 /* per-cpu hash map can update value in-place */
643 if (!onallcpus) {
644 memcpy(this_cpu_ptr(pptr), value, size);
645 } else {
646 int off = 0, cpu;
648 size = round_up(size, 8);
649 for_each_possible_cpu(cpu) {
650 bpf_long_memcpy(per_cpu_ptr(pptr, cpu),
651 value + off, size);
652 off += size;
655 } else {
656 l_new = alloc_htab_elem(htab, key, value, key_size,
657 hash, true, onallcpus, false);
658 if (IS_ERR(l_new)) {
659 ret = PTR_ERR(l_new);
660 goto err;
662 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
664 ret = 0;
665 err:
666 raw_spin_unlock_irqrestore(&b->lock, flags);
667 return ret;
670 static int htab_percpu_map_update_elem(struct bpf_map *map, void *key,
671 void *value, u64 map_flags)
673 return __htab_percpu_map_update_elem(map, key, value, map_flags, false);
676 /* Called from syscall or from eBPF program */
677 static int htab_map_delete_elem(struct bpf_map *map, void *key)
679 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
680 struct hlist_nulls_head *head;
681 struct bucket *b;
682 struct htab_elem *l;
683 unsigned long flags;
684 u32 hash, key_size;
685 int ret = -ENOENT;
687 WARN_ON_ONCE(!rcu_read_lock_held());
689 key_size = map->key_size;
691 hash = htab_map_hash(key, key_size);
692 b = __select_bucket(htab, hash);
693 head = &b->head;
695 raw_spin_lock_irqsave(&b->lock, flags);
697 l = lookup_elem_raw(head, hash, key, key_size);
699 if (l) {
700 hlist_nulls_del_rcu(&l->hash_node);
701 free_htab_elem(htab, l);
702 ret = 0;
705 raw_spin_unlock_irqrestore(&b->lock, flags);
706 return ret;
709 static void delete_all_elements(struct bpf_htab *htab)
711 int i;
713 for (i = 0; i < htab->n_buckets; i++) {
714 struct hlist_nulls_head *head = select_bucket(htab, i);
715 struct hlist_nulls_node *n;
716 struct htab_elem *l;
718 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
719 hlist_nulls_del_rcu(&l->hash_node);
720 if (l->state != HTAB_EXTRA_ELEM_USED)
721 htab_elem_free(htab, l);
725 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
726 static void htab_map_free(struct bpf_map *map)
728 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
730 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
731 * so the programs (can be more than one that used this map) were
732 * disconnected from events. Wait for outstanding critical sections in
733 * these programs to complete
735 synchronize_rcu();
737 /* some of free_htab_elem() callbacks for elements of this map may
738 * not have executed. Wait for them.
740 rcu_barrier();
741 if (htab->map.map_flags & BPF_F_NO_PREALLOC) {
742 delete_all_elements(htab);
743 } else {
744 htab_free_elems(htab);
745 pcpu_freelist_destroy(&htab->freelist);
747 free_percpu(htab->extra_elems);
748 bpf_map_area_free(htab->buckets);
749 kfree(htab);
752 static const struct bpf_map_ops htab_ops = {
753 .map_alloc = htab_map_alloc,
754 .map_free = htab_map_free,
755 .map_get_next_key = htab_map_get_next_key,
756 .map_lookup_elem = htab_map_lookup_elem,
757 .map_update_elem = htab_map_update_elem,
758 .map_delete_elem = htab_map_delete_elem,
761 static struct bpf_map_type_list htab_type __read_mostly = {
762 .ops = &htab_ops,
763 .type = BPF_MAP_TYPE_HASH,
766 /* Called from eBPF program */
767 static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
769 struct htab_elem *l = __htab_map_lookup_elem(map, key);
771 if (l)
772 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
773 else
774 return NULL;
777 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
779 struct htab_elem *l;
780 void __percpu *pptr;
781 int ret = -ENOENT;
782 int cpu, off = 0;
783 u32 size;
785 /* per_cpu areas are zero-filled and bpf programs can only
786 * access 'value_size' of them, so copying rounded areas
787 * will not leak any kernel data
789 size = round_up(map->value_size, 8);
790 rcu_read_lock();
791 l = __htab_map_lookup_elem(map, key);
792 if (!l)
793 goto out;
794 pptr = htab_elem_get_ptr(l, map->key_size);
795 for_each_possible_cpu(cpu) {
796 bpf_long_memcpy(value + off,
797 per_cpu_ptr(pptr, cpu), size);
798 off += size;
800 ret = 0;
801 out:
802 rcu_read_unlock();
803 return ret;
806 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
807 u64 map_flags)
809 int ret;
811 rcu_read_lock();
812 ret = __htab_percpu_map_update_elem(map, key, value, map_flags, true);
813 rcu_read_unlock();
815 return ret;
818 static const struct bpf_map_ops htab_percpu_ops = {
819 .map_alloc = htab_map_alloc,
820 .map_free = htab_map_free,
821 .map_get_next_key = htab_map_get_next_key,
822 .map_lookup_elem = htab_percpu_map_lookup_elem,
823 .map_update_elem = htab_percpu_map_update_elem,
824 .map_delete_elem = htab_map_delete_elem,
827 static struct bpf_map_type_list htab_percpu_type __read_mostly = {
828 .ops = &htab_percpu_ops,
829 .type = BPF_MAP_TYPE_PERCPU_HASH,
832 static int __init register_htab_map(void)
834 bpf_register_map_type(&htab_type);
835 bpf_register_map_type(&htab_percpu_type);
836 return 0;
838 late_initcall(register_htab_map);