1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
5 /* Devmaps primary use is as a backend map for XDP BPF helper call
6 * bpf_redirect_map(). Because XDP is mostly concerned with performance we
7 * spent some effort to ensure the datapath with redirect maps does not use
8 * any locking. This is a quick note on the details.
10 * We have three possible paths to get into the devmap control plane bpf
11 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
12 * will invoke an update, delete, or lookup operation. To ensure updates and
13 * deletes appear atomic from the datapath side xchg() is used to modify the
14 * netdev_map array. Then because the datapath does a lookup into the netdev_map
15 * array (read-only) from an RCU critical section we use call_rcu() to wait for
16 * an rcu grace period before free'ing the old data structures. This ensures the
17 * datapath always has a valid copy. However, the datapath does a "flush"
18 * operation that pushes any pending packets in the driver outside the RCU
19 * critical section. Each bpf_dtab_netdev tracks these pending operations using
20 * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed until
21 * this list is empty, indicating outstanding flush operations have completed.
23 * BPF syscalls may race with BPF program calls on any of the update, delete
24 * or lookup operations. As noted above the xchg() operation also keep the
25 * netdev_map consistent in this case. From the devmap side BPF programs
26 * calling into these operations are the same as multiple user space threads
27 * making system calls.
29 * Finally, any of the above may race with a netdev_unregister notifier. The
30 * unregister notifier must search for net devices in the map structure that
31 * contain a reference to the net device and remove them. This is a two step
32 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
33 * check to see if the ifindex is the same as the net_device being removed.
34 * When removing the dev a cmpxchg() is used to ensure the correct dev is
35 * removed, in the case of a concurrent update or delete operation it is
36 * possible that the initially referenced dev is no longer in the map. As the
37 * notifier hook walks the map we know that new dev references can not be
38 * added by the user because core infrastructure ensures dev_get_by_index()
39 * calls will fail at this point.
41 * The devmap_hash type is a map type which interprets keys as ifindexes and
42 * indexes these using a hashmap. This allows maps that use ifindex as key to be
43 * densely packed instead of having holes in the lookup array for unused
44 * ifindexes. The setup and packet enqueue/send code is shared between the two
45 * types of devmap; only the lookup and insertion is different.
47 #include <linux/bpf.h>
49 #include <linux/filter.h>
50 #include <trace/events/xdp.h>
52 #define DEV_CREATE_FLAG_MASK \
53 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
55 #define DEV_MAP_BULK_SIZE 16
56 struct bpf_dtab_netdev
;
58 struct xdp_bulk_queue
{
59 struct xdp_frame
*q
[DEV_MAP_BULK_SIZE
];
60 struct list_head flush_node
;
61 struct net_device
*dev_rx
;
62 struct bpf_dtab_netdev
*obj
;
66 struct bpf_dtab_netdev
{
67 struct net_device
*dev
; /* must be first member, due to tracepoint */
68 struct hlist_node index_hlist
;
69 struct bpf_dtab
*dtab
;
70 struct xdp_bulk_queue __percpu
*bulkq
;
72 unsigned int idx
; /* keep track of map index for tracepoint */
77 struct bpf_dtab_netdev
**netdev_map
; /* DEVMAP type only */
78 struct list_head __percpu
*flush_list
;
79 struct list_head list
;
81 /* these are only used for DEVMAP_HASH type maps */
82 struct hlist_head
*dev_index_head
;
83 spinlock_t index_lock
;
88 static DEFINE_SPINLOCK(dev_map_lock
);
89 static LIST_HEAD(dev_map_list
);
91 static struct hlist_head
*dev_map_create_hash(unsigned int entries
)
94 struct hlist_head
*hash
;
96 hash
= kmalloc_array(entries
, sizeof(*hash
), GFP_KERNEL
);
98 for (i
= 0; i
< entries
; i
++)
99 INIT_HLIST_HEAD(&hash
[i
]);
104 static inline struct hlist_head
*dev_map_index_hash(struct bpf_dtab
*dtab
,
107 return &dtab
->dev_index_head
[idx
& (dtab
->n_buckets
- 1)];
110 static int dev_map_init_map(struct bpf_dtab
*dtab
, union bpf_attr
*attr
)
115 /* check sanity of attributes */
116 if (attr
->max_entries
== 0 || attr
->key_size
!= 4 ||
117 attr
->value_size
!= 4 || attr
->map_flags
& ~DEV_CREATE_FLAG_MASK
)
120 /* Lookup returns a pointer straight to dev->ifindex, so make sure the
121 * verifier prevents writes from the BPF side
123 attr
->map_flags
|= BPF_F_RDONLY_PROG
;
126 bpf_map_init_from_attr(&dtab
->map
, attr
);
128 /* make sure page count doesn't overflow */
129 cost
= (u64
) sizeof(struct list_head
) * num_possible_cpus();
131 if (attr
->map_type
== BPF_MAP_TYPE_DEVMAP_HASH
) {
132 dtab
->n_buckets
= roundup_pow_of_two(dtab
->map
.max_entries
);
134 if (!dtab
->n_buckets
) /* Overflow check */
136 cost
+= (u64
) sizeof(struct hlist_head
) * dtab
->n_buckets
;
138 cost
+= (u64
) dtab
->map
.max_entries
* sizeof(struct bpf_dtab_netdev
*);
141 /* if map size is larger than memlock limit, reject it */
142 err
= bpf_map_charge_init(&dtab
->map
.memory
, cost
);
146 dtab
->flush_list
= alloc_percpu(struct list_head
);
147 if (!dtab
->flush_list
)
150 for_each_possible_cpu(cpu
)
151 INIT_LIST_HEAD(per_cpu_ptr(dtab
->flush_list
, cpu
));
153 if (attr
->map_type
== BPF_MAP_TYPE_DEVMAP_HASH
) {
154 dtab
->dev_index_head
= dev_map_create_hash(dtab
->n_buckets
);
155 if (!dtab
->dev_index_head
)
158 spin_lock_init(&dtab
->index_lock
);
160 dtab
->netdev_map
= bpf_map_area_alloc(dtab
->map
.max_entries
*
161 sizeof(struct bpf_dtab_netdev
*),
162 dtab
->map
.numa_node
);
163 if (!dtab
->netdev_map
)
170 free_percpu(dtab
->flush_list
);
172 bpf_map_charge_finish(&dtab
->map
.memory
);
176 static struct bpf_map
*dev_map_alloc(union bpf_attr
*attr
)
178 struct bpf_dtab
*dtab
;
181 if (!capable(CAP_NET_ADMIN
))
182 return ERR_PTR(-EPERM
);
184 dtab
= kzalloc(sizeof(*dtab
), GFP_USER
);
186 return ERR_PTR(-ENOMEM
);
188 err
= dev_map_init_map(dtab
, attr
);
194 spin_lock(&dev_map_lock
);
195 list_add_tail_rcu(&dtab
->list
, &dev_map_list
);
196 spin_unlock(&dev_map_lock
);
201 static void dev_map_free(struct bpf_map
*map
)
203 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
206 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
207 * so the programs (can be more than one that used this map) were
208 * disconnected from events. Wait for outstanding critical sections in
209 * these programs to complete. The rcu critical section only guarantees
210 * no further reads against netdev_map. It does __not__ ensure pending
211 * flush operations (if any) are complete.
214 spin_lock(&dev_map_lock
);
215 list_del_rcu(&dtab
->list
);
216 spin_unlock(&dev_map_lock
);
218 bpf_clear_redirect_map(map
);
221 /* Make sure prior __dev_map_entry_free() have completed. */
224 /* To ensure all pending flush operations have completed wait for flush
225 * list to empty on _all_ cpus.
226 * Because the above synchronize_rcu() ensures the map is disconnected
227 * from the program we can assume no new items will be added.
229 for_each_online_cpu(cpu
) {
230 struct list_head
*flush_list
= per_cpu_ptr(dtab
->flush_list
, cpu
);
232 while (!list_empty(flush_list
))
236 if (dtab
->map
.map_type
== BPF_MAP_TYPE_DEVMAP_HASH
) {
237 for (i
= 0; i
< dtab
->n_buckets
; i
++) {
238 struct bpf_dtab_netdev
*dev
;
239 struct hlist_head
*head
;
240 struct hlist_node
*next
;
242 head
= dev_map_index_hash(dtab
, i
);
244 hlist_for_each_entry_safe(dev
, next
, head
, index_hlist
) {
245 hlist_del_rcu(&dev
->index_hlist
);
246 free_percpu(dev
->bulkq
);
252 kfree(dtab
->dev_index_head
);
254 for (i
= 0; i
< dtab
->map
.max_entries
; i
++) {
255 struct bpf_dtab_netdev
*dev
;
257 dev
= dtab
->netdev_map
[i
];
261 free_percpu(dev
->bulkq
);
266 bpf_map_area_free(dtab
->netdev_map
);
269 free_percpu(dtab
->flush_list
);
273 static int dev_map_get_next_key(struct bpf_map
*map
, void *key
, void *next_key
)
275 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
276 u32 index
= key
? *(u32
*)key
: U32_MAX
;
277 u32
*next
= next_key
;
279 if (index
>= dtab
->map
.max_entries
) {
284 if (index
== dtab
->map
.max_entries
- 1)
290 struct bpf_dtab_netdev
*__dev_map_hash_lookup_elem(struct bpf_map
*map
, u32 key
)
292 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
293 struct hlist_head
*head
= dev_map_index_hash(dtab
, key
);
294 struct bpf_dtab_netdev
*dev
;
296 hlist_for_each_entry_rcu(dev
, head
, index_hlist
)
303 static int dev_map_hash_get_next_key(struct bpf_map
*map
, void *key
,
306 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
307 u32 idx
, *next
= next_key
;
308 struct bpf_dtab_netdev
*dev
, *next_dev
;
309 struct hlist_head
*head
;
317 dev
= __dev_map_hash_lookup_elem(map
, idx
);
321 next_dev
= hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev
->index_hlist
)),
322 struct bpf_dtab_netdev
, index_hlist
);
325 *next
= next_dev
->idx
;
329 i
= idx
& (dtab
->n_buckets
- 1);
333 for (; i
< dtab
->n_buckets
; i
++) {
334 head
= dev_map_index_hash(dtab
, i
);
336 next_dev
= hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head
)),
337 struct bpf_dtab_netdev
,
340 *next
= next_dev
->idx
;
348 static int bq_xmit_all(struct xdp_bulk_queue
*bq
, u32 flags
,
351 struct bpf_dtab_netdev
*obj
= bq
->obj
;
352 struct net_device
*dev
= obj
->dev
;
353 int sent
= 0, drops
= 0, err
= 0;
356 if (unlikely(!bq
->count
))
359 for (i
= 0; i
< bq
->count
; i
++) {
360 struct xdp_frame
*xdpf
= bq
->q
[i
];
365 sent
= dev
->netdev_ops
->ndo_xdp_xmit(dev
, bq
->count
, bq
->q
, flags
);
371 drops
= bq
->count
- sent
;
375 trace_xdp_devmap_xmit(&obj
->dtab
->map
, obj
->idx
,
376 sent
, drops
, bq
->dev_rx
, dev
, err
);
378 __list_del_clearprev(&bq
->flush_node
);
381 /* If ndo_xdp_xmit fails with an errno, no frames have been
382 * xmit'ed and it's our responsibility to them free all.
384 for (i
= 0; i
< bq
->count
; i
++) {
385 struct xdp_frame
*xdpf
= bq
->q
[i
];
387 /* RX path under NAPI protection, can return frames faster */
388 if (likely(in_napi_ctx
))
389 xdp_return_frame_rx_napi(xdpf
);
391 xdp_return_frame(xdpf
);
397 /* __dev_map_flush is called from xdp_do_flush_map() which _must_ be signaled
398 * from the driver before returning from its napi->poll() routine. The poll()
399 * routine is called either from busy_poll context or net_rx_action signaled
400 * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the
401 * net device can be torn down. On devmap tear down we ensure the flush list
402 * is empty before completing to ensure all flush operations have completed.
404 void __dev_map_flush(struct bpf_map
*map
)
406 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
407 struct list_head
*flush_list
= this_cpu_ptr(dtab
->flush_list
);
408 struct xdp_bulk_queue
*bq
, *tmp
;
411 list_for_each_entry_safe(bq
, tmp
, flush_list
, flush_node
)
412 bq_xmit_all(bq
, XDP_XMIT_FLUSH
, true);
416 /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or
417 * update happens in parallel here a dev_put wont happen until after reading the
420 struct bpf_dtab_netdev
*__dev_map_lookup_elem(struct bpf_map
*map
, u32 key
)
422 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
423 struct bpf_dtab_netdev
*obj
;
425 if (key
>= map
->max_entries
)
428 obj
= READ_ONCE(dtab
->netdev_map
[key
]);
432 /* Runs under RCU-read-side, plus in softirq under NAPI protection.
433 * Thus, safe percpu variable access.
435 static int bq_enqueue(struct bpf_dtab_netdev
*obj
, struct xdp_frame
*xdpf
,
436 struct net_device
*dev_rx
)
439 struct list_head
*flush_list
= this_cpu_ptr(obj
->dtab
->flush_list
);
440 struct xdp_bulk_queue
*bq
= this_cpu_ptr(obj
->bulkq
);
442 if (unlikely(bq
->count
== DEV_MAP_BULK_SIZE
))
443 bq_xmit_all(bq
, 0, true);
445 /* Ingress dev_rx will be the same for all xdp_frame's in
446 * bulk_queue, because bq stored per-CPU and must be flushed
447 * from net_device drivers NAPI func end.
452 bq
->q
[bq
->count
++] = xdpf
;
454 if (!bq
->flush_node
.prev
)
455 list_add(&bq
->flush_node
, flush_list
);
460 int dev_map_enqueue(struct bpf_dtab_netdev
*dst
, struct xdp_buff
*xdp
,
461 struct net_device
*dev_rx
)
463 struct net_device
*dev
= dst
->dev
;
464 struct xdp_frame
*xdpf
;
467 if (!dev
->netdev_ops
->ndo_xdp_xmit
)
470 err
= xdp_ok_fwd_dev(dev
, xdp
->data_end
- xdp
->data
);
474 xdpf
= convert_to_xdp_frame(xdp
);
478 return bq_enqueue(dst
, xdpf
, dev_rx
);
481 int dev_map_generic_redirect(struct bpf_dtab_netdev
*dst
, struct sk_buff
*skb
,
482 struct bpf_prog
*xdp_prog
)
486 err
= xdp_ok_fwd_dev(dst
->dev
, skb
->len
);
490 generic_xdp_tx(skb
, xdp_prog
);
495 static void *dev_map_lookup_elem(struct bpf_map
*map
, void *key
)
497 struct bpf_dtab_netdev
*obj
= __dev_map_lookup_elem(map
, *(u32
*)key
);
498 struct net_device
*dev
= obj
? obj
->dev
: NULL
;
500 return dev
? &dev
->ifindex
: NULL
;
503 static void *dev_map_hash_lookup_elem(struct bpf_map
*map
, void *key
)
505 struct bpf_dtab_netdev
*obj
= __dev_map_hash_lookup_elem(map
,
507 struct net_device
*dev
= obj
? obj
->dev
: NULL
;
509 return dev
? &dev
->ifindex
: NULL
;
512 static void dev_map_flush_old(struct bpf_dtab_netdev
*dev
)
514 if (dev
->dev
->netdev_ops
->ndo_xdp_xmit
) {
515 struct xdp_bulk_queue
*bq
;
519 for_each_online_cpu(cpu
) {
520 bq
= per_cpu_ptr(dev
->bulkq
, cpu
);
521 bq_xmit_all(bq
, XDP_XMIT_FLUSH
, false);
527 static void __dev_map_entry_free(struct rcu_head
*rcu
)
529 struct bpf_dtab_netdev
*dev
;
531 dev
= container_of(rcu
, struct bpf_dtab_netdev
, rcu
);
532 dev_map_flush_old(dev
);
533 free_percpu(dev
->bulkq
);
538 static int dev_map_delete_elem(struct bpf_map
*map
, void *key
)
540 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
541 struct bpf_dtab_netdev
*old_dev
;
544 if (k
>= map
->max_entries
)
547 /* Use call_rcu() here to ensure any rcu critical sections have
548 * completed, but this does not guarantee a flush has happened
549 * yet. Because driver side rcu_read_lock/unlock only protects the
550 * running XDP program. However, for pending flush operations the
551 * dev and ctx are stored in another per cpu map. And additionally,
552 * the driver tear down ensures all soft irqs are complete before
553 * removing the net device in the case of dev_put equals zero.
555 old_dev
= xchg(&dtab
->netdev_map
[k
], NULL
);
557 call_rcu(&old_dev
->rcu
, __dev_map_entry_free
);
561 static int dev_map_hash_delete_elem(struct bpf_map
*map
, void *key
)
563 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
564 struct bpf_dtab_netdev
*old_dev
;
569 spin_lock_irqsave(&dtab
->index_lock
, flags
);
571 old_dev
= __dev_map_hash_lookup_elem(map
, k
);
574 hlist_del_init_rcu(&old_dev
->index_hlist
);
575 call_rcu(&old_dev
->rcu
, __dev_map_entry_free
);
578 spin_unlock_irqrestore(&dtab
->index_lock
, flags
);
583 static struct bpf_dtab_netdev
*__dev_map_alloc_node(struct net
*net
,
584 struct bpf_dtab
*dtab
,
588 gfp_t gfp
= GFP_ATOMIC
| __GFP_NOWARN
;
589 struct bpf_dtab_netdev
*dev
;
590 struct xdp_bulk_queue
*bq
;
593 dev
= kmalloc_node(sizeof(*dev
), gfp
, dtab
->map
.numa_node
);
595 return ERR_PTR(-ENOMEM
);
597 dev
->bulkq
= __alloc_percpu_gfp(sizeof(*dev
->bulkq
),
598 sizeof(void *), gfp
);
601 return ERR_PTR(-ENOMEM
);
604 for_each_possible_cpu(cpu
) {
605 bq
= per_cpu_ptr(dev
->bulkq
, cpu
);
609 dev
->dev
= dev_get_by_index(net
, ifindex
);
611 free_percpu(dev
->bulkq
);
613 return ERR_PTR(-EINVAL
);
622 static int __dev_map_update_elem(struct net
*net
, struct bpf_map
*map
,
623 void *key
, void *value
, u64 map_flags
)
625 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
626 struct bpf_dtab_netdev
*dev
, *old_dev
;
627 u32 ifindex
= *(u32
*)value
;
630 if (unlikely(map_flags
> BPF_EXIST
))
632 if (unlikely(i
>= dtab
->map
.max_entries
))
634 if (unlikely(map_flags
== BPF_NOEXIST
))
640 dev
= __dev_map_alloc_node(net
, dtab
, ifindex
, i
);
645 /* Use call_rcu() here to ensure rcu critical sections have completed
646 * Remembering the driver side flush operation will happen before the
647 * net device is removed.
649 old_dev
= xchg(&dtab
->netdev_map
[i
], dev
);
651 call_rcu(&old_dev
->rcu
, __dev_map_entry_free
);
656 static int dev_map_update_elem(struct bpf_map
*map
, void *key
, void *value
,
659 return __dev_map_update_elem(current
->nsproxy
->net_ns
,
660 map
, key
, value
, map_flags
);
663 static int __dev_map_hash_update_elem(struct net
*net
, struct bpf_map
*map
,
664 void *key
, void *value
, u64 map_flags
)
666 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
667 struct bpf_dtab_netdev
*dev
, *old_dev
;
668 u32 ifindex
= *(u32
*)value
;
669 u32 idx
= *(u32
*)key
;
673 if (unlikely(map_flags
> BPF_EXIST
|| !ifindex
))
676 spin_lock_irqsave(&dtab
->index_lock
, flags
);
678 old_dev
= __dev_map_hash_lookup_elem(map
, idx
);
679 if (old_dev
&& (map_flags
& BPF_NOEXIST
))
682 dev
= __dev_map_alloc_node(net
, dtab
, ifindex
, idx
);
689 hlist_del_rcu(&old_dev
->index_hlist
);
691 if (dtab
->items
>= dtab
->map
.max_entries
) {
692 spin_unlock_irqrestore(&dtab
->index_lock
, flags
);
693 call_rcu(&dev
->rcu
, __dev_map_entry_free
);
699 hlist_add_head_rcu(&dev
->index_hlist
,
700 dev_map_index_hash(dtab
, idx
));
701 spin_unlock_irqrestore(&dtab
->index_lock
, flags
);
704 call_rcu(&old_dev
->rcu
, __dev_map_entry_free
);
709 spin_unlock_irqrestore(&dtab
->index_lock
, flags
);
713 static int dev_map_hash_update_elem(struct bpf_map
*map
, void *key
, void *value
,
716 return __dev_map_hash_update_elem(current
->nsproxy
->net_ns
,
717 map
, key
, value
, map_flags
);
720 const struct bpf_map_ops dev_map_ops
= {
721 .map_alloc
= dev_map_alloc
,
722 .map_free
= dev_map_free
,
723 .map_get_next_key
= dev_map_get_next_key
,
724 .map_lookup_elem
= dev_map_lookup_elem
,
725 .map_update_elem
= dev_map_update_elem
,
726 .map_delete_elem
= dev_map_delete_elem
,
727 .map_check_btf
= map_check_no_btf
,
730 const struct bpf_map_ops dev_map_hash_ops
= {
731 .map_alloc
= dev_map_alloc
,
732 .map_free
= dev_map_free
,
733 .map_get_next_key
= dev_map_hash_get_next_key
,
734 .map_lookup_elem
= dev_map_hash_lookup_elem
,
735 .map_update_elem
= dev_map_hash_update_elem
,
736 .map_delete_elem
= dev_map_hash_delete_elem
,
737 .map_check_btf
= map_check_no_btf
,
740 static void dev_map_hash_remove_netdev(struct bpf_dtab
*dtab
,
741 struct net_device
*netdev
)
746 spin_lock_irqsave(&dtab
->index_lock
, flags
);
747 for (i
= 0; i
< dtab
->n_buckets
; i
++) {
748 struct bpf_dtab_netdev
*dev
;
749 struct hlist_head
*head
;
750 struct hlist_node
*next
;
752 head
= dev_map_index_hash(dtab
, i
);
754 hlist_for_each_entry_safe(dev
, next
, head
, index_hlist
) {
755 if (netdev
!= dev
->dev
)
759 hlist_del_rcu(&dev
->index_hlist
);
760 call_rcu(&dev
->rcu
, __dev_map_entry_free
);
763 spin_unlock_irqrestore(&dtab
->index_lock
, flags
);
766 static int dev_map_notification(struct notifier_block
*notifier
,
767 ulong event
, void *ptr
)
769 struct net_device
*netdev
= netdev_notifier_info_to_dev(ptr
);
770 struct bpf_dtab
*dtab
;
774 case NETDEV_UNREGISTER
:
775 /* This rcu_read_lock/unlock pair is needed because
776 * dev_map_list is an RCU list AND to ensure a delete
777 * operation does not free a netdev_map entry while we
778 * are comparing it against the netdev being unregistered.
781 list_for_each_entry_rcu(dtab
, &dev_map_list
, list
) {
782 if (dtab
->map
.map_type
== BPF_MAP_TYPE_DEVMAP_HASH
) {
783 dev_map_hash_remove_netdev(dtab
, netdev
);
787 for (i
= 0; i
< dtab
->map
.max_entries
; i
++) {
788 struct bpf_dtab_netdev
*dev
, *odev
;
790 dev
= READ_ONCE(dtab
->netdev_map
[i
]);
791 if (!dev
|| netdev
!= dev
->dev
)
793 odev
= cmpxchg(&dtab
->netdev_map
[i
], dev
, NULL
);
796 __dev_map_entry_free
);
807 static struct notifier_block dev_map_notifier
= {
808 .notifier_call
= dev_map_notification
,
811 static int __init
dev_map_init(void)
813 /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
814 BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev
, dev
) !=
815 offsetof(struct _bpf_dtab_netdev
, dev
));
816 register_netdevice_notifier(&dev_map_notifier
);
820 subsys_initcall(dev_map_init
);