mm/thp: fix __split_huge_pmd_locked() for migration PMD
[linux/fpc-iii.git] / kernel / bpf / devmap.c
blob6684696fa4571c5aca39f71072d1f3ab0c2fa963
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
3 */
5 /* Devmaps primary use is as a backend map for XDP BPF helper call
6 * bpf_redirect_map(). Because XDP is mostly concerned with performance we
7 * spent some effort to ensure the datapath with redirect maps does not use
8 * any locking. This is a quick note on the details.
10 * We have three possible paths to get into the devmap control plane bpf
11 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
12 * will invoke an update, delete, or lookup operation. To ensure updates and
13 * deletes appear atomic from the datapath side xchg() is used to modify the
14 * netdev_map array. Then because the datapath does a lookup into the netdev_map
15 * array (read-only) from an RCU critical section we use call_rcu() to wait for
16 * an rcu grace period before free'ing the old data structures. This ensures the
17 * datapath always has a valid copy. However, the datapath does a "flush"
18 * operation that pushes any pending packets in the driver outside the RCU
19 * critical section. Each bpf_dtab_netdev tracks these pending operations using
20 * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed until
21 * this list is empty, indicating outstanding flush operations have completed.
23 * BPF syscalls may race with BPF program calls on any of the update, delete
24 * or lookup operations. As noted above the xchg() operation also keep the
25 * netdev_map consistent in this case. From the devmap side BPF programs
26 * calling into these operations are the same as multiple user space threads
27 * making system calls.
29 * Finally, any of the above may race with a netdev_unregister notifier. The
30 * unregister notifier must search for net devices in the map structure that
31 * contain a reference to the net device and remove them. This is a two step
32 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
33 * check to see if the ifindex is the same as the net_device being removed.
34 * When removing the dev a cmpxchg() is used to ensure the correct dev is
35 * removed, in the case of a concurrent update or delete operation it is
36 * possible that the initially referenced dev is no longer in the map. As the
37 * notifier hook walks the map we know that new dev references can not be
38 * added by the user because core infrastructure ensures dev_get_by_index()
39 * calls will fail at this point.
41 * The devmap_hash type is a map type which interprets keys as ifindexes and
42 * indexes these using a hashmap. This allows maps that use ifindex as key to be
43 * densely packed instead of having holes in the lookup array for unused
44 * ifindexes. The setup and packet enqueue/send code is shared between the two
45 * types of devmap; only the lookup and insertion is different.
47 #include <linux/bpf.h>
48 #include <net/xdp.h>
49 #include <linux/filter.h>
50 #include <trace/events/xdp.h>
52 #define DEV_CREATE_FLAG_MASK \
53 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
55 #define DEV_MAP_BULK_SIZE 16
56 struct bpf_dtab_netdev;
58 struct xdp_bulk_queue {
59 struct xdp_frame *q[DEV_MAP_BULK_SIZE];
60 struct list_head flush_node;
61 struct net_device *dev_rx;
62 struct bpf_dtab_netdev *obj;
63 unsigned int count;
66 struct bpf_dtab_netdev {
67 struct net_device *dev; /* must be first member, due to tracepoint */
68 struct hlist_node index_hlist;
69 struct bpf_dtab *dtab;
70 struct xdp_bulk_queue __percpu *bulkq;
71 struct rcu_head rcu;
72 unsigned int idx; /* keep track of map index for tracepoint */
75 struct bpf_dtab {
76 struct bpf_map map;
77 struct bpf_dtab_netdev **netdev_map; /* DEVMAP type only */
78 struct list_head __percpu *flush_list;
79 struct list_head list;
81 /* these are only used for DEVMAP_HASH type maps */
82 struct hlist_head *dev_index_head;
83 spinlock_t index_lock;
84 unsigned int items;
85 u32 n_buckets;
88 static DEFINE_SPINLOCK(dev_map_lock);
89 static LIST_HEAD(dev_map_list);
91 static struct hlist_head *dev_map_create_hash(unsigned int entries,
92 int numa_node)
94 int i;
95 struct hlist_head *hash;
97 hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node);
98 if (hash != NULL)
99 for (i = 0; i < entries; i++)
100 INIT_HLIST_HEAD(&hash[i]);
102 return hash;
105 static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
106 int idx)
108 return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
111 static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
113 int err, cpu;
114 u64 cost;
116 /* check sanity of attributes */
117 if (attr->max_entries == 0 || attr->key_size != 4 ||
118 attr->value_size != 4 || attr->map_flags & ~DEV_CREATE_FLAG_MASK)
119 return -EINVAL;
121 /* Lookup returns a pointer straight to dev->ifindex, so make sure the
122 * verifier prevents writes from the BPF side
124 attr->map_flags |= BPF_F_RDONLY_PROG;
127 bpf_map_init_from_attr(&dtab->map, attr);
129 /* make sure page count doesn't overflow */
130 cost = (u64) sizeof(struct list_head) * num_possible_cpus();
132 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
133 dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
135 if (!dtab->n_buckets) /* Overflow check */
136 return -EINVAL;
137 cost += (u64) sizeof(struct hlist_head) * dtab->n_buckets;
138 } else {
139 cost += (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
142 /* if map size is larger than memlock limit, reject it */
143 err = bpf_map_charge_init(&dtab->map.memory, cost);
144 if (err)
145 return -EINVAL;
147 dtab->flush_list = alloc_percpu(struct list_head);
148 if (!dtab->flush_list)
149 goto free_charge;
151 for_each_possible_cpu(cpu)
152 INIT_LIST_HEAD(per_cpu_ptr(dtab->flush_list, cpu));
154 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
155 dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
156 dtab->map.numa_node);
157 if (!dtab->dev_index_head)
158 goto free_percpu;
160 spin_lock_init(&dtab->index_lock);
161 } else {
162 dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
163 sizeof(struct bpf_dtab_netdev *),
164 dtab->map.numa_node);
165 if (!dtab->netdev_map)
166 goto free_percpu;
169 return 0;
171 free_percpu:
172 free_percpu(dtab->flush_list);
173 free_charge:
174 bpf_map_charge_finish(&dtab->map.memory);
175 return -ENOMEM;
178 static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
180 struct bpf_dtab *dtab;
181 int err;
183 if (!capable(CAP_NET_ADMIN))
184 return ERR_PTR(-EPERM);
186 dtab = kzalloc(sizeof(*dtab), GFP_USER);
187 if (!dtab)
188 return ERR_PTR(-ENOMEM);
190 err = dev_map_init_map(dtab, attr);
191 if (err) {
192 kfree(dtab);
193 return ERR_PTR(err);
196 spin_lock(&dev_map_lock);
197 list_add_tail_rcu(&dtab->list, &dev_map_list);
198 spin_unlock(&dev_map_lock);
200 return &dtab->map;
203 static void dev_map_free(struct bpf_map *map)
205 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
206 int i, cpu;
208 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
209 * so the programs (can be more than one that used this map) were
210 * disconnected from events. Wait for outstanding critical sections in
211 * these programs to complete. The rcu critical section only guarantees
212 * no further reads against netdev_map. It does __not__ ensure pending
213 * flush operations (if any) are complete.
216 spin_lock(&dev_map_lock);
217 list_del_rcu(&dtab->list);
218 spin_unlock(&dev_map_lock);
220 bpf_clear_redirect_map(map);
221 synchronize_rcu();
223 /* Make sure prior __dev_map_entry_free() have completed. */
224 rcu_barrier();
226 /* To ensure all pending flush operations have completed wait for flush
227 * list to empty on _all_ cpus.
228 * Because the above synchronize_rcu() ensures the map is disconnected
229 * from the program we can assume no new items will be added.
231 for_each_online_cpu(cpu) {
232 struct list_head *flush_list = per_cpu_ptr(dtab->flush_list, cpu);
234 while (!list_empty(flush_list))
235 cond_resched();
238 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
239 for (i = 0; i < dtab->n_buckets; i++) {
240 struct bpf_dtab_netdev *dev;
241 struct hlist_head *head;
242 struct hlist_node *next;
244 head = dev_map_index_hash(dtab, i);
246 hlist_for_each_entry_safe(dev, next, head, index_hlist) {
247 hlist_del_rcu(&dev->index_hlist);
248 free_percpu(dev->bulkq);
249 dev_put(dev->dev);
250 kfree(dev);
254 bpf_map_area_free(dtab->dev_index_head);
255 } else {
256 for (i = 0; i < dtab->map.max_entries; i++) {
257 struct bpf_dtab_netdev *dev;
259 dev = dtab->netdev_map[i];
260 if (!dev)
261 continue;
263 free_percpu(dev->bulkq);
264 dev_put(dev->dev);
265 kfree(dev);
268 bpf_map_area_free(dtab->netdev_map);
271 free_percpu(dtab->flush_list);
272 kfree(dtab);
275 static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
277 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
278 u32 index = key ? *(u32 *)key : U32_MAX;
279 u32 *next = next_key;
281 if (index >= dtab->map.max_entries) {
282 *next = 0;
283 return 0;
286 if (index == dtab->map.max_entries - 1)
287 return -ENOENT;
288 *next = index + 1;
289 return 0;
292 struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
294 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
295 struct hlist_head *head = dev_map_index_hash(dtab, key);
296 struct bpf_dtab_netdev *dev;
298 hlist_for_each_entry_rcu(dev, head, index_hlist,
299 lockdep_is_held(&dtab->index_lock))
300 if (dev->idx == key)
301 return dev;
303 return NULL;
306 static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
307 void *next_key)
309 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
310 u32 idx, *next = next_key;
311 struct bpf_dtab_netdev *dev, *next_dev;
312 struct hlist_head *head;
313 int i = 0;
315 if (!key)
316 goto find_first;
318 idx = *(u32 *)key;
320 dev = __dev_map_hash_lookup_elem(map, idx);
321 if (!dev)
322 goto find_first;
324 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)),
325 struct bpf_dtab_netdev, index_hlist);
327 if (next_dev) {
328 *next = next_dev->idx;
329 return 0;
332 i = idx & (dtab->n_buckets - 1);
333 i++;
335 find_first:
336 for (; i < dtab->n_buckets; i++) {
337 head = dev_map_index_hash(dtab, i);
339 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
340 struct bpf_dtab_netdev,
341 index_hlist);
342 if (next_dev) {
343 *next = next_dev->idx;
344 return 0;
348 return -ENOENT;
351 static int bq_xmit_all(struct xdp_bulk_queue *bq, u32 flags,
352 bool in_napi_ctx)
354 struct bpf_dtab_netdev *obj = bq->obj;
355 struct net_device *dev = obj->dev;
356 int sent = 0, drops = 0, err = 0;
357 int i;
359 if (unlikely(!bq->count))
360 return 0;
362 for (i = 0; i < bq->count; i++) {
363 struct xdp_frame *xdpf = bq->q[i];
365 prefetch(xdpf);
368 sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags);
369 if (sent < 0) {
370 err = sent;
371 sent = 0;
372 goto error;
374 drops = bq->count - sent;
375 out:
376 bq->count = 0;
378 trace_xdp_devmap_xmit(&obj->dtab->map, obj->idx,
379 sent, drops, bq->dev_rx, dev, err);
380 bq->dev_rx = NULL;
381 __list_del_clearprev(&bq->flush_node);
382 return 0;
383 error:
384 /* If ndo_xdp_xmit fails with an errno, no frames have been
385 * xmit'ed and it's our responsibility to them free all.
387 for (i = 0; i < bq->count; i++) {
388 struct xdp_frame *xdpf = bq->q[i];
390 /* RX path under NAPI protection, can return frames faster */
391 if (likely(in_napi_ctx))
392 xdp_return_frame_rx_napi(xdpf);
393 else
394 xdp_return_frame(xdpf);
395 drops++;
397 goto out;
400 /* __dev_map_flush is called from xdp_do_flush_map() which _must_ be signaled
401 * from the driver before returning from its napi->poll() routine. The poll()
402 * routine is called either from busy_poll context or net_rx_action signaled
403 * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the
404 * net device can be torn down. On devmap tear down we ensure the flush list
405 * is empty before completing to ensure all flush operations have completed.
407 void __dev_map_flush(struct bpf_map *map)
409 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
410 struct list_head *flush_list = this_cpu_ptr(dtab->flush_list);
411 struct xdp_bulk_queue *bq, *tmp;
413 rcu_read_lock();
414 list_for_each_entry_safe(bq, tmp, flush_list, flush_node)
415 bq_xmit_all(bq, XDP_XMIT_FLUSH, true);
416 rcu_read_unlock();
419 /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or
420 * update happens in parallel here a dev_put wont happen until after reading the
421 * ifindex.
423 struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
425 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
426 struct bpf_dtab_netdev *obj;
428 if (key >= map->max_entries)
429 return NULL;
431 obj = READ_ONCE(dtab->netdev_map[key]);
432 return obj;
435 /* Runs under RCU-read-side, plus in softirq under NAPI protection.
436 * Thus, safe percpu variable access.
438 static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
439 struct net_device *dev_rx)
442 struct list_head *flush_list = this_cpu_ptr(obj->dtab->flush_list);
443 struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);
445 if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
446 bq_xmit_all(bq, 0, true);
448 /* Ingress dev_rx will be the same for all xdp_frame's in
449 * bulk_queue, because bq stored per-CPU and must be flushed
450 * from net_device drivers NAPI func end.
452 if (!bq->dev_rx)
453 bq->dev_rx = dev_rx;
455 bq->q[bq->count++] = xdpf;
457 if (!bq->flush_node.prev)
458 list_add(&bq->flush_node, flush_list);
460 return 0;
463 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
464 struct net_device *dev_rx)
466 struct net_device *dev = dst->dev;
467 struct xdp_frame *xdpf;
468 int err;
470 if (!dev->netdev_ops->ndo_xdp_xmit)
471 return -EOPNOTSUPP;
473 err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
474 if (unlikely(err))
475 return err;
477 xdpf = convert_to_xdp_frame(xdp);
478 if (unlikely(!xdpf))
479 return -EOVERFLOW;
481 return bq_enqueue(dst, xdpf, dev_rx);
484 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
485 struct bpf_prog *xdp_prog)
487 int err;
489 err = xdp_ok_fwd_dev(dst->dev, skb->len);
490 if (unlikely(err))
491 return err;
492 skb->dev = dst->dev;
493 generic_xdp_tx(skb, xdp_prog);
495 return 0;
498 static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
500 struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
501 struct net_device *dev = obj ? obj->dev : NULL;
503 return dev ? &dev->ifindex : NULL;
506 static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
508 struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map,
509 *(u32 *)key);
510 struct net_device *dev = obj ? obj->dev : NULL;
512 return dev ? &dev->ifindex : NULL;
515 static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
517 if (dev->dev->netdev_ops->ndo_xdp_xmit) {
518 struct xdp_bulk_queue *bq;
519 int cpu;
521 rcu_read_lock();
522 for_each_online_cpu(cpu) {
523 bq = per_cpu_ptr(dev->bulkq, cpu);
524 bq_xmit_all(bq, XDP_XMIT_FLUSH, false);
526 rcu_read_unlock();
530 static void __dev_map_entry_free(struct rcu_head *rcu)
532 struct bpf_dtab_netdev *dev;
534 dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
535 dev_map_flush_old(dev);
536 free_percpu(dev->bulkq);
537 dev_put(dev->dev);
538 kfree(dev);
541 static int dev_map_delete_elem(struct bpf_map *map, void *key)
543 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
544 struct bpf_dtab_netdev *old_dev;
545 int k = *(u32 *)key;
547 if (k >= map->max_entries)
548 return -EINVAL;
550 /* Use call_rcu() here to ensure any rcu critical sections have
551 * completed, but this does not guarantee a flush has happened
552 * yet. Because driver side rcu_read_lock/unlock only protects the
553 * running XDP program. However, for pending flush operations the
554 * dev and ctx are stored in another per cpu map. And additionally,
555 * the driver tear down ensures all soft irqs are complete before
556 * removing the net device in the case of dev_put equals zero.
558 old_dev = xchg(&dtab->netdev_map[k], NULL);
559 if (old_dev)
560 call_rcu(&old_dev->rcu, __dev_map_entry_free);
561 return 0;
564 static int dev_map_hash_delete_elem(struct bpf_map *map, void *key)
566 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
567 struct bpf_dtab_netdev *old_dev;
568 int k = *(u32 *)key;
569 unsigned long flags;
570 int ret = -ENOENT;
572 spin_lock_irqsave(&dtab->index_lock, flags);
574 old_dev = __dev_map_hash_lookup_elem(map, k);
575 if (old_dev) {
576 dtab->items--;
577 hlist_del_init_rcu(&old_dev->index_hlist);
578 call_rcu(&old_dev->rcu, __dev_map_entry_free);
579 ret = 0;
581 spin_unlock_irqrestore(&dtab->index_lock, flags);
583 return ret;
586 static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
587 struct bpf_dtab *dtab,
588 u32 ifindex,
589 unsigned int idx)
591 gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
592 struct bpf_dtab_netdev *dev;
593 struct xdp_bulk_queue *bq;
594 int cpu;
596 dev = kmalloc_node(sizeof(*dev), gfp, dtab->map.numa_node);
597 if (!dev)
598 return ERR_PTR(-ENOMEM);
600 dev->bulkq = __alloc_percpu_gfp(sizeof(*dev->bulkq),
601 sizeof(void *), gfp);
602 if (!dev->bulkq) {
603 kfree(dev);
604 return ERR_PTR(-ENOMEM);
607 for_each_possible_cpu(cpu) {
608 bq = per_cpu_ptr(dev->bulkq, cpu);
609 bq->obj = dev;
612 dev->dev = dev_get_by_index(net, ifindex);
613 if (!dev->dev) {
614 free_percpu(dev->bulkq);
615 kfree(dev);
616 return ERR_PTR(-EINVAL);
619 dev->idx = idx;
620 dev->dtab = dtab;
622 return dev;
625 static int __dev_map_update_elem(struct net *net, struct bpf_map *map,
626 void *key, void *value, u64 map_flags)
628 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
629 struct bpf_dtab_netdev *dev, *old_dev;
630 u32 ifindex = *(u32 *)value;
631 u32 i = *(u32 *)key;
633 if (unlikely(map_flags > BPF_EXIST))
634 return -EINVAL;
635 if (unlikely(i >= dtab->map.max_entries))
636 return -E2BIG;
637 if (unlikely(map_flags == BPF_NOEXIST))
638 return -EEXIST;
640 if (!ifindex) {
641 dev = NULL;
642 } else {
643 dev = __dev_map_alloc_node(net, dtab, ifindex, i);
644 if (IS_ERR(dev))
645 return PTR_ERR(dev);
648 /* Use call_rcu() here to ensure rcu critical sections have completed
649 * Remembering the driver side flush operation will happen before the
650 * net device is removed.
652 old_dev = xchg(&dtab->netdev_map[i], dev);
653 if (old_dev)
654 call_rcu(&old_dev->rcu, __dev_map_entry_free);
656 return 0;
659 static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
660 u64 map_flags)
662 return __dev_map_update_elem(current->nsproxy->net_ns,
663 map, key, value, map_flags);
666 static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
667 void *key, void *value, u64 map_flags)
669 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
670 struct bpf_dtab_netdev *dev, *old_dev;
671 u32 ifindex = *(u32 *)value;
672 u32 idx = *(u32 *)key;
673 unsigned long flags;
674 int err = -EEXIST;
676 if (unlikely(map_flags > BPF_EXIST || !ifindex))
677 return -EINVAL;
679 spin_lock_irqsave(&dtab->index_lock, flags);
681 old_dev = __dev_map_hash_lookup_elem(map, idx);
682 if (old_dev && (map_flags & BPF_NOEXIST))
683 goto out_err;
685 dev = __dev_map_alloc_node(net, dtab, ifindex, idx);
686 if (IS_ERR(dev)) {
687 err = PTR_ERR(dev);
688 goto out_err;
691 if (old_dev) {
692 hlist_del_rcu(&old_dev->index_hlist);
693 } else {
694 if (dtab->items >= dtab->map.max_entries) {
695 spin_unlock_irqrestore(&dtab->index_lock, flags);
696 call_rcu(&dev->rcu, __dev_map_entry_free);
697 return -E2BIG;
699 dtab->items++;
702 hlist_add_head_rcu(&dev->index_hlist,
703 dev_map_index_hash(dtab, idx));
704 spin_unlock_irqrestore(&dtab->index_lock, flags);
706 if (old_dev)
707 call_rcu(&old_dev->rcu, __dev_map_entry_free);
709 return 0;
711 out_err:
712 spin_unlock_irqrestore(&dtab->index_lock, flags);
713 return err;
716 static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
717 u64 map_flags)
719 return __dev_map_hash_update_elem(current->nsproxy->net_ns,
720 map, key, value, map_flags);
723 const struct bpf_map_ops dev_map_ops = {
724 .map_alloc = dev_map_alloc,
725 .map_free = dev_map_free,
726 .map_get_next_key = dev_map_get_next_key,
727 .map_lookup_elem = dev_map_lookup_elem,
728 .map_update_elem = dev_map_update_elem,
729 .map_delete_elem = dev_map_delete_elem,
730 .map_check_btf = map_check_no_btf,
733 const struct bpf_map_ops dev_map_hash_ops = {
734 .map_alloc = dev_map_alloc,
735 .map_free = dev_map_free,
736 .map_get_next_key = dev_map_hash_get_next_key,
737 .map_lookup_elem = dev_map_hash_lookup_elem,
738 .map_update_elem = dev_map_hash_update_elem,
739 .map_delete_elem = dev_map_hash_delete_elem,
740 .map_check_btf = map_check_no_btf,
743 static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
744 struct net_device *netdev)
746 unsigned long flags;
747 u32 i;
749 spin_lock_irqsave(&dtab->index_lock, flags);
750 for (i = 0; i < dtab->n_buckets; i++) {
751 struct bpf_dtab_netdev *dev;
752 struct hlist_head *head;
753 struct hlist_node *next;
755 head = dev_map_index_hash(dtab, i);
757 hlist_for_each_entry_safe(dev, next, head, index_hlist) {
758 if (netdev != dev->dev)
759 continue;
761 dtab->items--;
762 hlist_del_rcu(&dev->index_hlist);
763 call_rcu(&dev->rcu, __dev_map_entry_free);
766 spin_unlock_irqrestore(&dtab->index_lock, flags);
769 static int dev_map_notification(struct notifier_block *notifier,
770 ulong event, void *ptr)
772 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
773 struct bpf_dtab *dtab;
774 int i;
776 switch (event) {
777 case NETDEV_UNREGISTER:
778 /* This rcu_read_lock/unlock pair is needed because
779 * dev_map_list is an RCU list AND to ensure a delete
780 * operation does not free a netdev_map entry while we
781 * are comparing it against the netdev being unregistered.
783 rcu_read_lock();
784 list_for_each_entry_rcu(dtab, &dev_map_list, list) {
785 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
786 dev_map_hash_remove_netdev(dtab, netdev);
787 continue;
790 for (i = 0; i < dtab->map.max_entries; i++) {
791 struct bpf_dtab_netdev *dev, *odev;
793 dev = READ_ONCE(dtab->netdev_map[i]);
794 if (!dev || netdev != dev->dev)
795 continue;
796 odev = cmpxchg(&dtab->netdev_map[i], dev, NULL);
797 if (dev == odev)
798 call_rcu(&dev->rcu,
799 __dev_map_entry_free);
802 rcu_read_unlock();
803 break;
804 default:
805 break;
807 return NOTIFY_OK;
810 static struct notifier_block dev_map_notifier = {
811 .notifier_call = dev_map_notification,
814 static int __init dev_map_init(void)
816 /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
817 BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
818 offsetof(struct _bpf_dtab_netdev, dev));
819 register_netdevice_notifier(&dev_map_notifier);
820 return 0;
823 subsys_initcall(dev_map_init);