1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
5 /* Devmaps primary use is as a backend map for XDP BPF helper call
6 * bpf_redirect_map(). Because XDP is mostly concerned with performance we
7 * spent some effort to ensure the datapath with redirect maps does not use
8 * any locking. This is a quick note on the details.
10 * We have three possible paths to get into the devmap control plane bpf
11 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
12 * will invoke an update, delete, or lookup operation. To ensure updates and
13 * deletes appear atomic from the datapath side xchg() is used to modify the
14 * netdev_map array. Then because the datapath does a lookup into the netdev_map
15 * array (read-only) from an RCU critical section we use call_rcu() to wait for
16 * an rcu grace period before free'ing the old data structures. This ensures the
17 * datapath always has a valid copy. However, the datapath does a "flush"
18 * operation that pushes any pending packets in the driver outside the RCU
19 * critical section. Each bpf_dtab_netdev tracks these pending operations using
20 * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed until
21 * this list is empty, indicating outstanding flush operations have completed.
23 * BPF syscalls may race with BPF program calls on any of the update, delete
24 * or lookup operations. As noted above the xchg() operation also keep the
25 * netdev_map consistent in this case. From the devmap side BPF programs
26 * calling into these operations are the same as multiple user space threads
27 * making system calls.
29 * Finally, any of the above may race with a netdev_unregister notifier. The
30 * unregister notifier must search for net devices in the map structure that
31 * contain a reference to the net device and remove them. This is a two step
32 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
33 * check to see if the ifindex is the same as the net_device being removed.
34 * When removing the dev a cmpxchg() is used to ensure the correct dev is
35 * removed, in the case of a concurrent update or delete operation it is
36 * possible that the initially referenced dev is no longer in the map. As the
37 * notifier hook walks the map we know that new dev references can not be
38 * added by the user because core infrastructure ensures dev_get_by_index()
39 * calls will fail at this point.
41 * The devmap_hash type is a map type which interprets keys as ifindexes and
42 * indexes these using a hashmap. This allows maps that use ifindex as key to be
43 * densely packed instead of having holes in the lookup array for unused
44 * ifindexes. The setup and packet enqueue/send code is shared between the two
45 * types of devmap; only the lookup and insertion is different.
47 #include <linux/bpf.h>
49 #include <linux/filter.h>
50 #include <trace/events/xdp.h>
52 #define DEV_CREATE_FLAG_MASK \
53 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
55 struct xdp_dev_bulk_queue
{
56 struct xdp_frame
*q
[DEV_MAP_BULK_SIZE
];
57 struct list_head flush_node
;
58 struct net_device
*dev
;
59 struct net_device
*dev_rx
;
63 struct bpf_dtab_netdev
{
64 struct net_device
*dev
; /* must be first member, due to tracepoint */
65 struct hlist_node index_hlist
;
66 struct bpf_dtab
*dtab
;
67 struct bpf_prog
*xdp_prog
;
70 struct bpf_devmap_val val
;
75 struct bpf_dtab_netdev
**netdev_map
; /* DEVMAP type only */
76 struct list_head list
;
78 /* these are only used for DEVMAP_HASH type maps */
79 struct hlist_head
*dev_index_head
;
80 spinlock_t index_lock
;
85 static DEFINE_PER_CPU(struct list_head
, dev_flush_list
);
86 static DEFINE_SPINLOCK(dev_map_lock
);
87 static LIST_HEAD(dev_map_list
);
89 static struct hlist_head
*dev_map_create_hash(unsigned int entries
,
93 struct hlist_head
*hash
;
95 hash
= bpf_map_area_alloc(entries
* sizeof(*hash
), numa_node
);
97 for (i
= 0; i
< entries
; i
++)
98 INIT_HLIST_HEAD(&hash
[i
]);
103 static inline struct hlist_head
*dev_map_index_hash(struct bpf_dtab
*dtab
,
106 return &dtab
->dev_index_head
[idx
& (dtab
->n_buckets
- 1)];
109 static int dev_map_init_map(struct bpf_dtab
*dtab
, union bpf_attr
*attr
)
111 u32 valsize
= attr
->value_size
;
113 /* check sanity of attributes. 2 value sizes supported:
115 * 8 bytes: ifindex + prog fd
117 if (attr
->max_entries
== 0 || attr
->key_size
!= 4 ||
118 (valsize
!= offsetofend(struct bpf_devmap_val
, ifindex
) &&
119 valsize
!= offsetofend(struct bpf_devmap_val
, bpf_prog
.fd
)) ||
120 attr
->map_flags
& ~DEV_CREATE_FLAG_MASK
)
123 /* Lookup returns a pointer straight to dev->ifindex, so make sure the
124 * verifier prevents writes from the BPF side
126 attr
->map_flags
|= BPF_F_RDONLY_PROG
;
129 bpf_map_init_from_attr(&dtab
->map
, attr
);
131 if (attr
->map_type
== BPF_MAP_TYPE_DEVMAP_HASH
) {
132 dtab
->n_buckets
= roundup_pow_of_two(dtab
->map
.max_entries
);
134 if (!dtab
->n_buckets
) /* Overflow check */
138 if (attr
->map_type
== BPF_MAP_TYPE_DEVMAP_HASH
) {
139 dtab
->dev_index_head
= dev_map_create_hash(dtab
->n_buckets
,
140 dtab
->map
.numa_node
);
141 if (!dtab
->dev_index_head
)
144 spin_lock_init(&dtab
->index_lock
);
146 dtab
->netdev_map
= bpf_map_area_alloc(dtab
->map
.max_entries
*
147 sizeof(struct bpf_dtab_netdev
*),
148 dtab
->map
.numa_node
);
149 if (!dtab
->netdev_map
)
156 static struct bpf_map
*dev_map_alloc(union bpf_attr
*attr
)
158 struct bpf_dtab
*dtab
;
161 if (!capable(CAP_NET_ADMIN
))
162 return ERR_PTR(-EPERM
);
164 dtab
= kzalloc(sizeof(*dtab
), GFP_USER
| __GFP_ACCOUNT
);
166 return ERR_PTR(-ENOMEM
);
168 err
= dev_map_init_map(dtab
, attr
);
174 spin_lock(&dev_map_lock
);
175 list_add_tail_rcu(&dtab
->list
, &dev_map_list
);
176 spin_unlock(&dev_map_lock
);
181 static void dev_map_free(struct bpf_map
*map
)
183 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
186 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
187 * so the programs (can be more than one that used this map) were
188 * disconnected from events. The following synchronize_rcu() guarantees
189 * both rcu read critical sections complete and waits for
190 * preempt-disable regions (NAPI being the relevant context here) so we
191 * are certain there will be no further reads against the netdev_map and
192 * all flush operations are complete. Flush operations can only be done
193 * from NAPI context for this reason.
196 spin_lock(&dev_map_lock
);
197 list_del_rcu(&dtab
->list
);
198 spin_unlock(&dev_map_lock
);
200 bpf_clear_redirect_map(map
);
203 /* Make sure prior __dev_map_entry_free() have completed. */
206 if (dtab
->map
.map_type
== BPF_MAP_TYPE_DEVMAP_HASH
) {
207 for (i
= 0; i
< dtab
->n_buckets
; i
++) {
208 struct bpf_dtab_netdev
*dev
;
209 struct hlist_head
*head
;
210 struct hlist_node
*next
;
212 head
= dev_map_index_hash(dtab
, i
);
214 hlist_for_each_entry_safe(dev
, next
, head
, index_hlist
) {
215 hlist_del_rcu(&dev
->index_hlist
);
217 bpf_prog_put(dev
->xdp_prog
);
223 bpf_map_area_free(dtab
->dev_index_head
);
225 for (i
= 0; i
< dtab
->map
.max_entries
; i
++) {
226 struct bpf_dtab_netdev
*dev
;
228 dev
= dtab
->netdev_map
[i
];
233 bpf_prog_put(dev
->xdp_prog
);
238 bpf_map_area_free(dtab
->netdev_map
);
244 static int dev_map_get_next_key(struct bpf_map
*map
, void *key
, void *next_key
)
246 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
247 u32 index
= key
? *(u32
*)key
: U32_MAX
;
248 u32
*next
= next_key
;
250 if (index
>= dtab
->map
.max_entries
) {
255 if (index
== dtab
->map
.max_entries
- 1)
261 struct bpf_dtab_netdev
*__dev_map_hash_lookup_elem(struct bpf_map
*map
, u32 key
)
263 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
264 struct hlist_head
*head
= dev_map_index_hash(dtab
, key
);
265 struct bpf_dtab_netdev
*dev
;
267 hlist_for_each_entry_rcu(dev
, head
, index_hlist
,
268 lockdep_is_held(&dtab
->index_lock
))
275 static int dev_map_hash_get_next_key(struct bpf_map
*map
, void *key
,
278 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
279 u32 idx
, *next
= next_key
;
280 struct bpf_dtab_netdev
*dev
, *next_dev
;
281 struct hlist_head
*head
;
289 dev
= __dev_map_hash_lookup_elem(map
, idx
);
293 next_dev
= hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev
->index_hlist
)),
294 struct bpf_dtab_netdev
, index_hlist
);
297 *next
= next_dev
->idx
;
301 i
= idx
& (dtab
->n_buckets
- 1);
305 for (; i
< dtab
->n_buckets
; i
++) {
306 head
= dev_map_index_hash(dtab
, i
);
308 next_dev
= hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head
)),
309 struct bpf_dtab_netdev
,
312 *next
= next_dev
->idx
;
320 bool dev_map_can_have_prog(struct bpf_map
*map
)
322 if ((map
->map_type
== BPF_MAP_TYPE_DEVMAP
||
323 map
->map_type
== BPF_MAP_TYPE_DEVMAP_HASH
) &&
324 map
->value_size
!= offsetofend(struct bpf_devmap_val
, ifindex
))
330 static void bq_xmit_all(struct xdp_dev_bulk_queue
*bq
, u32 flags
)
332 struct net_device
*dev
= bq
->dev
;
333 int sent
= 0, drops
= 0, err
= 0;
336 if (unlikely(!bq
->count
))
339 for (i
= 0; i
< bq
->count
; i
++) {
340 struct xdp_frame
*xdpf
= bq
->q
[i
];
345 sent
= dev
->netdev_ops
->ndo_xdp_xmit(dev
, bq
->count
, bq
->q
, flags
);
351 drops
= bq
->count
- sent
;
355 trace_xdp_devmap_xmit(bq
->dev_rx
, dev
, sent
, drops
, err
);
357 __list_del_clearprev(&bq
->flush_node
);
360 /* If ndo_xdp_xmit fails with an errno, no frames have been
361 * xmit'ed and it's our responsibility to them free all.
363 for (i
= 0; i
< bq
->count
; i
++) {
364 struct xdp_frame
*xdpf
= bq
->q
[i
];
366 xdp_return_frame_rx_napi(xdpf
);
372 /* __dev_flush is called from xdp_do_flush() which _must_ be signaled
373 * from the driver before returning from its napi->poll() routine. The poll()
374 * routine is called either from busy_poll context or net_rx_action signaled
375 * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the
376 * net device can be torn down. On devmap tear down we ensure the flush list
377 * is empty before completing to ensure all flush operations have completed.
378 * When drivers update the bpf program they may need to ensure any flush ops
379 * are also complete. Using synchronize_rcu or call_rcu will suffice for this
380 * because both wait for napi context to exit.
382 void __dev_flush(void)
384 struct list_head
*flush_list
= this_cpu_ptr(&dev_flush_list
);
385 struct xdp_dev_bulk_queue
*bq
, *tmp
;
387 list_for_each_entry_safe(bq
, tmp
, flush_list
, flush_node
)
388 bq_xmit_all(bq
, XDP_XMIT_FLUSH
);
391 /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or
392 * update happens in parallel here a dev_put wont happen until after reading the
395 struct bpf_dtab_netdev
*__dev_map_lookup_elem(struct bpf_map
*map
, u32 key
)
397 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
398 struct bpf_dtab_netdev
*obj
;
400 if (key
>= map
->max_entries
)
403 obj
= READ_ONCE(dtab
->netdev_map
[key
]);
407 /* Runs under RCU-read-side, plus in softirq under NAPI protection.
408 * Thus, safe percpu variable access.
410 static void bq_enqueue(struct net_device
*dev
, struct xdp_frame
*xdpf
,
411 struct net_device
*dev_rx
)
413 struct list_head
*flush_list
= this_cpu_ptr(&dev_flush_list
);
414 struct xdp_dev_bulk_queue
*bq
= this_cpu_ptr(dev
->xdp_bulkq
);
416 if (unlikely(bq
->count
== DEV_MAP_BULK_SIZE
))
419 /* Ingress dev_rx will be the same for all xdp_frame's in
420 * bulk_queue, because bq stored per-CPU and must be flushed
421 * from net_device drivers NAPI func end.
426 bq
->q
[bq
->count
++] = xdpf
;
428 if (!bq
->flush_node
.prev
)
429 list_add(&bq
->flush_node
, flush_list
);
432 static inline int __xdp_enqueue(struct net_device
*dev
, struct xdp_buff
*xdp
,
433 struct net_device
*dev_rx
)
435 struct xdp_frame
*xdpf
;
438 if (!dev
->netdev_ops
->ndo_xdp_xmit
)
441 err
= xdp_ok_fwd_dev(dev
, xdp
->data_end
- xdp
->data
);
445 xdpf
= xdp_convert_buff_to_frame(xdp
);
449 bq_enqueue(dev
, xdpf
, dev_rx
);
453 static struct xdp_buff
*dev_map_run_prog(struct net_device
*dev
,
454 struct xdp_buff
*xdp
,
455 struct bpf_prog
*xdp_prog
)
457 struct xdp_txq_info txq
= { .dev
= dev
};
460 xdp_set_data_meta_invalid(xdp
);
463 act
= bpf_prog_run_xdp(xdp_prog
, xdp
);
470 bpf_warn_invalid_xdp_action(act
);
473 trace_xdp_exception(dev
, xdp_prog
, act
);
477 xdp_return_buff(xdp
);
481 int dev_xdp_enqueue(struct net_device
*dev
, struct xdp_buff
*xdp
,
482 struct net_device
*dev_rx
)
484 return __xdp_enqueue(dev
, xdp
, dev_rx
);
487 int dev_map_enqueue(struct bpf_dtab_netdev
*dst
, struct xdp_buff
*xdp
,
488 struct net_device
*dev_rx
)
490 struct net_device
*dev
= dst
->dev
;
493 xdp
= dev_map_run_prog(dev
, xdp
, dst
->xdp_prog
);
497 return __xdp_enqueue(dev
, xdp
, dev_rx
);
500 int dev_map_generic_redirect(struct bpf_dtab_netdev
*dst
, struct sk_buff
*skb
,
501 struct bpf_prog
*xdp_prog
)
505 err
= xdp_ok_fwd_dev(dst
->dev
, skb
->len
);
509 generic_xdp_tx(skb
, xdp_prog
);
514 static void *dev_map_lookup_elem(struct bpf_map
*map
, void *key
)
516 struct bpf_dtab_netdev
*obj
= __dev_map_lookup_elem(map
, *(u32
*)key
);
518 return obj
? &obj
->val
: NULL
;
521 static void *dev_map_hash_lookup_elem(struct bpf_map
*map
, void *key
)
523 struct bpf_dtab_netdev
*obj
= __dev_map_hash_lookup_elem(map
,
525 return obj
? &obj
->val
: NULL
;
528 static void __dev_map_entry_free(struct rcu_head
*rcu
)
530 struct bpf_dtab_netdev
*dev
;
532 dev
= container_of(rcu
, struct bpf_dtab_netdev
, rcu
);
534 bpf_prog_put(dev
->xdp_prog
);
539 static int dev_map_delete_elem(struct bpf_map
*map
, void *key
)
541 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
542 struct bpf_dtab_netdev
*old_dev
;
545 if (k
>= map
->max_entries
)
548 /* Use call_rcu() here to ensure any rcu critical sections have
549 * completed as well as any flush operations because call_rcu
550 * will wait for preempt-disable region to complete, NAPI in this
551 * context. And additionally, the driver tear down ensures all
552 * soft irqs are complete before removing the net device in the
553 * case of dev_put equals zero.
555 old_dev
= xchg(&dtab
->netdev_map
[k
], NULL
);
557 call_rcu(&old_dev
->rcu
, __dev_map_entry_free
);
561 static int dev_map_hash_delete_elem(struct bpf_map
*map
, void *key
)
563 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
564 struct bpf_dtab_netdev
*old_dev
;
569 spin_lock_irqsave(&dtab
->index_lock
, flags
);
571 old_dev
= __dev_map_hash_lookup_elem(map
, k
);
574 hlist_del_init_rcu(&old_dev
->index_hlist
);
575 call_rcu(&old_dev
->rcu
, __dev_map_entry_free
);
578 spin_unlock_irqrestore(&dtab
->index_lock
, flags
);
583 static struct bpf_dtab_netdev
*__dev_map_alloc_node(struct net
*net
,
584 struct bpf_dtab
*dtab
,
585 struct bpf_devmap_val
*val
,
588 struct bpf_prog
*prog
= NULL
;
589 struct bpf_dtab_netdev
*dev
;
591 dev
= bpf_map_kmalloc_node(&dtab
->map
, sizeof(*dev
),
592 GFP_ATOMIC
| __GFP_NOWARN
,
593 dtab
->map
.numa_node
);
595 return ERR_PTR(-ENOMEM
);
597 dev
->dev
= dev_get_by_index(net
, val
->ifindex
);
601 if (val
->bpf_prog
.fd
> 0) {
602 prog
= bpf_prog_get_type_dev(val
->bpf_prog
.fd
,
603 BPF_PROG_TYPE_XDP
, false);
606 if (prog
->expected_attach_type
!= BPF_XDP_DEVMAP
)
613 dev
->xdp_prog
= prog
;
614 dev
->val
.bpf_prog
.id
= prog
->aux
->id
;
616 dev
->xdp_prog
= NULL
;
617 dev
->val
.bpf_prog
.id
= 0;
619 dev
->val
.ifindex
= val
->ifindex
;
628 return ERR_PTR(-EINVAL
);
631 static int __dev_map_update_elem(struct net
*net
, struct bpf_map
*map
,
632 void *key
, void *value
, u64 map_flags
)
634 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
635 struct bpf_dtab_netdev
*dev
, *old_dev
;
636 struct bpf_devmap_val val
= {};
639 if (unlikely(map_flags
> BPF_EXIST
))
641 if (unlikely(i
>= dtab
->map
.max_entries
))
643 if (unlikely(map_flags
== BPF_NOEXIST
))
646 /* already verified value_size <= sizeof val */
647 memcpy(&val
, value
, map
->value_size
);
651 /* can not specify fd if ifindex is 0 */
652 if (val
.bpf_prog
.fd
> 0)
655 dev
= __dev_map_alloc_node(net
, dtab
, &val
, i
);
660 /* Use call_rcu() here to ensure rcu critical sections have completed
661 * Remembering the driver side flush operation will happen before the
662 * net device is removed.
664 old_dev
= xchg(&dtab
->netdev_map
[i
], dev
);
666 call_rcu(&old_dev
->rcu
, __dev_map_entry_free
);
671 static int dev_map_update_elem(struct bpf_map
*map
, void *key
, void *value
,
674 return __dev_map_update_elem(current
->nsproxy
->net_ns
,
675 map
, key
, value
, map_flags
);
678 static int __dev_map_hash_update_elem(struct net
*net
, struct bpf_map
*map
,
679 void *key
, void *value
, u64 map_flags
)
681 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
682 struct bpf_dtab_netdev
*dev
, *old_dev
;
683 struct bpf_devmap_val val
= {};
684 u32 idx
= *(u32
*)key
;
688 /* already verified value_size <= sizeof val */
689 memcpy(&val
, value
, map
->value_size
);
691 if (unlikely(map_flags
> BPF_EXIST
|| !val
.ifindex
))
694 spin_lock_irqsave(&dtab
->index_lock
, flags
);
696 old_dev
= __dev_map_hash_lookup_elem(map
, idx
);
697 if (old_dev
&& (map_flags
& BPF_NOEXIST
))
700 dev
= __dev_map_alloc_node(net
, dtab
, &val
, idx
);
707 hlist_del_rcu(&old_dev
->index_hlist
);
709 if (dtab
->items
>= dtab
->map
.max_entries
) {
710 spin_unlock_irqrestore(&dtab
->index_lock
, flags
);
711 call_rcu(&dev
->rcu
, __dev_map_entry_free
);
717 hlist_add_head_rcu(&dev
->index_hlist
,
718 dev_map_index_hash(dtab
, idx
));
719 spin_unlock_irqrestore(&dtab
->index_lock
, flags
);
722 call_rcu(&old_dev
->rcu
, __dev_map_entry_free
);
727 spin_unlock_irqrestore(&dtab
->index_lock
, flags
);
731 static int dev_map_hash_update_elem(struct bpf_map
*map
, void *key
, void *value
,
734 return __dev_map_hash_update_elem(current
->nsproxy
->net_ns
,
735 map
, key
, value
, map_flags
);
738 static int dev_map_btf_id
;
739 const struct bpf_map_ops dev_map_ops
= {
740 .map_meta_equal
= bpf_map_meta_equal
,
741 .map_alloc
= dev_map_alloc
,
742 .map_free
= dev_map_free
,
743 .map_get_next_key
= dev_map_get_next_key
,
744 .map_lookup_elem
= dev_map_lookup_elem
,
745 .map_update_elem
= dev_map_update_elem
,
746 .map_delete_elem
= dev_map_delete_elem
,
747 .map_check_btf
= map_check_no_btf
,
748 .map_btf_name
= "bpf_dtab",
749 .map_btf_id
= &dev_map_btf_id
,
752 static int dev_map_hash_map_btf_id
;
753 const struct bpf_map_ops dev_map_hash_ops
= {
754 .map_meta_equal
= bpf_map_meta_equal
,
755 .map_alloc
= dev_map_alloc
,
756 .map_free
= dev_map_free
,
757 .map_get_next_key
= dev_map_hash_get_next_key
,
758 .map_lookup_elem
= dev_map_hash_lookup_elem
,
759 .map_update_elem
= dev_map_hash_update_elem
,
760 .map_delete_elem
= dev_map_hash_delete_elem
,
761 .map_check_btf
= map_check_no_btf
,
762 .map_btf_name
= "bpf_dtab",
763 .map_btf_id
= &dev_map_hash_map_btf_id
,
766 static void dev_map_hash_remove_netdev(struct bpf_dtab
*dtab
,
767 struct net_device
*netdev
)
772 spin_lock_irqsave(&dtab
->index_lock
, flags
);
773 for (i
= 0; i
< dtab
->n_buckets
; i
++) {
774 struct bpf_dtab_netdev
*dev
;
775 struct hlist_head
*head
;
776 struct hlist_node
*next
;
778 head
= dev_map_index_hash(dtab
, i
);
780 hlist_for_each_entry_safe(dev
, next
, head
, index_hlist
) {
781 if (netdev
!= dev
->dev
)
785 hlist_del_rcu(&dev
->index_hlist
);
786 call_rcu(&dev
->rcu
, __dev_map_entry_free
);
789 spin_unlock_irqrestore(&dtab
->index_lock
, flags
);
792 static int dev_map_notification(struct notifier_block
*notifier
,
793 ulong event
, void *ptr
)
795 struct net_device
*netdev
= netdev_notifier_info_to_dev(ptr
);
796 struct bpf_dtab
*dtab
;
800 case NETDEV_REGISTER
:
801 if (!netdev
->netdev_ops
->ndo_xdp_xmit
|| netdev
->xdp_bulkq
)
804 /* will be freed in free_netdev() */
806 __alloc_percpu_gfp(sizeof(struct xdp_dev_bulk_queue
),
807 sizeof(void *), GFP_ATOMIC
);
808 if (!netdev
->xdp_bulkq
)
811 for_each_possible_cpu(cpu
)
812 per_cpu_ptr(netdev
->xdp_bulkq
, cpu
)->dev
= netdev
;
814 case NETDEV_UNREGISTER
:
815 /* This rcu_read_lock/unlock pair is needed because
816 * dev_map_list is an RCU list AND to ensure a delete
817 * operation does not free a netdev_map entry while we
818 * are comparing it against the netdev being unregistered.
821 list_for_each_entry_rcu(dtab
, &dev_map_list
, list
) {
822 if (dtab
->map
.map_type
== BPF_MAP_TYPE_DEVMAP_HASH
) {
823 dev_map_hash_remove_netdev(dtab
, netdev
);
827 for (i
= 0; i
< dtab
->map
.max_entries
; i
++) {
828 struct bpf_dtab_netdev
*dev
, *odev
;
830 dev
= READ_ONCE(dtab
->netdev_map
[i
]);
831 if (!dev
|| netdev
!= dev
->dev
)
833 odev
= cmpxchg(&dtab
->netdev_map
[i
], dev
, NULL
);
836 __dev_map_entry_free
);
847 static struct notifier_block dev_map_notifier
= {
848 .notifier_call
= dev_map_notification
,
851 static int __init
dev_map_init(void)
855 /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
856 BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev
, dev
) !=
857 offsetof(struct _bpf_dtab_netdev
, dev
));
858 register_netdevice_notifier(&dev_map_notifier
);
860 for_each_possible_cpu(cpu
)
861 INIT_LIST_HEAD(&per_cpu(dev_flush_list
, cpu
));
865 subsys_initcall(dev_map_init
);