1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
5 /* Devmaps primary use is as a backend map for XDP BPF helper call
6 * bpf_redirect_map(). Because XDP is mostly concerned with performance we
7 * spent some effort to ensure the datapath with redirect maps does not use
8 * any locking. This is a quick note on the details.
10 * We have three possible paths to get into the devmap control plane bpf
11 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
12 * will invoke an update, delete, or lookup operation. To ensure updates and
13 * deletes appear atomic from the datapath side xchg() is used to modify the
14 * netdev_map array. Then because the datapath does a lookup into the netdev_map
15 * array (read-only) from an RCU critical section we use call_rcu() to wait for
16 * an rcu grace period before free'ing the old data structures. This ensures the
17 * datapath always has a valid copy. However, the datapath does a "flush"
18 * operation that pushes any pending packets in the driver outside the RCU
19 * critical section. Each bpf_dtab_netdev tracks these pending operations using
20 * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed until
21 * this list is empty, indicating outstanding flush operations have completed.
23 * BPF syscalls may race with BPF program calls on any of the update, delete
24 * or lookup operations. As noted above the xchg() operation also keep the
25 * netdev_map consistent in this case. From the devmap side BPF programs
26 * calling into these operations are the same as multiple user space threads
27 * making system calls.
29 * Finally, any of the above may race with a netdev_unregister notifier. The
30 * unregister notifier must search for net devices in the map structure that
31 * contain a reference to the net device and remove them. This is a two step
32 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
33 * check to see if the ifindex is the same as the net_device being removed.
34 * When removing the dev a cmpxchg() is used to ensure the correct dev is
35 * removed, in the case of a concurrent update or delete operation it is
36 * possible that the initially referenced dev is no longer in the map. As the
37 * notifier hook walks the map we know that new dev references can not be
38 * added by the user because core infrastructure ensures dev_get_by_index()
39 * calls will fail at this point.
41 * The devmap_hash type is a map type which interprets keys as ifindexes and
42 * indexes these using a hashmap. This allows maps that use ifindex as key to be
43 * densely packed instead of having holes in the lookup array for unused
44 * ifindexes. The setup and packet enqueue/send code is shared between the two
45 * types of devmap; only the lookup and insertion is different.
47 #include <linux/bpf.h>
49 #include <linux/filter.h>
50 #include <trace/events/xdp.h>
52 #define DEV_CREATE_FLAG_MASK \
53 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
55 struct xdp_dev_bulk_queue
{
56 struct xdp_frame
*q
[DEV_MAP_BULK_SIZE
];
57 struct list_head flush_node
;
58 struct net_device
*dev
;
59 struct net_device
*dev_rx
;
63 struct bpf_dtab_netdev
{
64 struct net_device
*dev
; /* must be first member, due to tracepoint */
65 struct hlist_node index_hlist
;
66 struct bpf_dtab
*dtab
;
67 struct bpf_prog
*xdp_prog
;
70 struct bpf_devmap_val val
;
75 struct bpf_dtab_netdev
**netdev_map
; /* DEVMAP type only */
76 struct list_head list
;
78 /* these are only used for DEVMAP_HASH type maps */
79 struct hlist_head
*dev_index_head
;
80 spinlock_t index_lock
;
85 static DEFINE_PER_CPU(struct list_head
, dev_flush_list
);
86 static DEFINE_SPINLOCK(dev_map_lock
);
87 static LIST_HEAD(dev_map_list
);
89 static struct hlist_head
*dev_map_create_hash(unsigned int entries
,
93 struct hlist_head
*hash
;
95 hash
= bpf_map_area_alloc(entries
* sizeof(*hash
), numa_node
);
97 for (i
= 0; i
< entries
; i
++)
98 INIT_HLIST_HEAD(&hash
[i
]);
103 static inline struct hlist_head
*dev_map_index_hash(struct bpf_dtab
*dtab
,
106 return &dtab
->dev_index_head
[idx
& (dtab
->n_buckets
- 1)];
109 static int dev_map_init_map(struct bpf_dtab
*dtab
, union bpf_attr
*attr
)
111 u32 valsize
= attr
->value_size
;
115 /* check sanity of attributes. 2 value sizes supported:
117 * 8 bytes: ifindex + prog fd
119 if (attr
->max_entries
== 0 || attr
->key_size
!= 4 ||
120 (valsize
!= offsetofend(struct bpf_devmap_val
, ifindex
) &&
121 valsize
!= offsetofend(struct bpf_devmap_val
, bpf_prog
.fd
)) ||
122 attr
->map_flags
& ~DEV_CREATE_FLAG_MASK
)
125 /* Lookup returns a pointer straight to dev->ifindex, so make sure the
126 * verifier prevents writes from the BPF side
128 attr
->map_flags
|= BPF_F_RDONLY_PROG
;
131 bpf_map_init_from_attr(&dtab
->map
, attr
);
133 if (attr
->map_type
== BPF_MAP_TYPE_DEVMAP_HASH
) {
134 dtab
->n_buckets
= roundup_pow_of_two(dtab
->map
.max_entries
);
136 if (!dtab
->n_buckets
) /* Overflow check */
138 cost
+= (u64
) sizeof(struct hlist_head
) * dtab
->n_buckets
;
140 cost
+= (u64
) dtab
->map
.max_entries
* sizeof(struct bpf_dtab_netdev
*);
143 /* if map size is larger than memlock limit, reject it */
144 err
= bpf_map_charge_init(&dtab
->map
.memory
, cost
);
148 if (attr
->map_type
== BPF_MAP_TYPE_DEVMAP_HASH
) {
149 dtab
->dev_index_head
= dev_map_create_hash(dtab
->n_buckets
,
150 dtab
->map
.numa_node
);
151 if (!dtab
->dev_index_head
)
154 spin_lock_init(&dtab
->index_lock
);
156 dtab
->netdev_map
= bpf_map_area_alloc(dtab
->map
.max_entries
*
157 sizeof(struct bpf_dtab_netdev
*),
158 dtab
->map
.numa_node
);
159 if (!dtab
->netdev_map
)
166 bpf_map_charge_finish(&dtab
->map
.memory
);
170 static struct bpf_map
*dev_map_alloc(union bpf_attr
*attr
)
172 struct bpf_dtab
*dtab
;
175 if (!capable(CAP_NET_ADMIN
))
176 return ERR_PTR(-EPERM
);
178 dtab
= kzalloc(sizeof(*dtab
), GFP_USER
);
180 return ERR_PTR(-ENOMEM
);
182 err
= dev_map_init_map(dtab
, attr
);
188 spin_lock(&dev_map_lock
);
189 list_add_tail_rcu(&dtab
->list
, &dev_map_list
);
190 spin_unlock(&dev_map_lock
);
195 static void dev_map_free(struct bpf_map
*map
)
197 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
200 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
201 * so the programs (can be more than one that used this map) were
202 * disconnected from events. The following synchronize_rcu() guarantees
203 * both rcu read critical sections complete and waits for
204 * preempt-disable regions (NAPI being the relevant context here) so we
205 * are certain there will be no further reads against the netdev_map and
206 * all flush operations are complete. Flush operations can only be done
207 * from NAPI context for this reason.
210 spin_lock(&dev_map_lock
);
211 list_del_rcu(&dtab
->list
);
212 spin_unlock(&dev_map_lock
);
214 bpf_clear_redirect_map(map
);
217 /* Make sure prior __dev_map_entry_free() have completed. */
220 if (dtab
->map
.map_type
== BPF_MAP_TYPE_DEVMAP_HASH
) {
221 for (i
= 0; i
< dtab
->n_buckets
; i
++) {
222 struct bpf_dtab_netdev
*dev
;
223 struct hlist_head
*head
;
224 struct hlist_node
*next
;
226 head
= dev_map_index_hash(dtab
, i
);
228 hlist_for_each_entry_safe(dev
, next
, head
, index_hlist
) {
229 hlist_del_rcu(&dev
->index_hlist
);
231 bpf_prog_put(dev
->xdp_prog
);
237 bpf_map_area_free(dtab
->dev_index_head
);
239 for (i
= 0; i
< dtab
->map
.max_entries
; i
++) {
240 struct bpf_dtab_netdev
*dev
;
242 dev
= dtab
->netdev_map
[i
];
247 bpf_prog_put(dev
->xdp_prog
);
252 bpf_map_area_free(dtab
->netdev_map
);
258 static int dev_map_get_next_key(struct bpf_map
*map
, void *key
, void *next_key
)
260 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
261 u32 index
= key
? *(u32
*)key
: U32_MAX
;
262 u32
*next
= next_key
;
264 if (index
>= dtab
->map
.max_entries
) {
269 if (index
== dtab
->map
.max_entries
- 1)
275 struct bpf_dtab_netdev
*__dev_map_hash_lookup_elem(struct bpf_map
*map
, u32 key
)
277 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
278 struct hlist_head
*head
= dev_map_index_hash(dtab
, key
);
279 struct bpf_dtab_netdev
*dev
;
281 hlist_for_each_entry_rcu(dev
, head
, index_hlist
,
282 lockdep_is_held(&dtab
->index_lock
))
289 static int dev_map_hash_get_next_key(struct bpf_map
*map
, void *key
,
292 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
293 u32 idx
, *next
= next_key
;
294 struct bpf_dtab_netdev
*dev
, *next_dev
;
295 struct hlist_head
*head
;
303 dev
= __dev_map_hash_lookup_elem(map
, idx
);
307 next_dev
= hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev
->index_hlist
)),
308 struct bpf_dtab_netdev
, index_hlist
);
311 *next
= next_dev
->idx
;
315 i
= idx
& (dtab
->n_buckets
- 1);
319 for (; i
< dtab
->n_buckets
; i
++) {
320 head
= dev_map_index_hash(dtab
, i
);
322 next_dev
= hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head
)),
323 struct bpf_dtab_netdev
,
326 *next
= next_dev
->idx
;
334 bool dev_map_can_have_prog(struct bpf_map
*map
)
336 if ((map
->map_type
== BPF_MAP_TYPE_DEVMAP
||
337 map
->map_type
== BPF_MAP_TYPE_DEVMAP_HASH
) &&
338 map
->value_size
!= offsetofend(struct bpf_devmap_val
, ifindex
))
344 static int bq_xmit_all(struct xdp_dev_bulk_queue
*bq
, u32 flags
)
346 struct net_device
*dev
= bq
->dev
;
347 int sent
= 0, drops
= 0, err
= 0;
350 if (unlikely(!bq
->count
))
353 for (i
= 0; i
< bq
->count
; i
++) {
354 struct xdp_frame
*xdpf
= bq
->q
[i
];
359 sent
= dev
->netdev_ops
->ndo_xdp_xmit(dev
, bq
->count
, bq
->q
, flags
);
365 drops
= bq
->count
- sent
;
369 trace_xdp_devmap_xmit(bq
->dev_rx
, dev
, sent
, drops
, err
);
371 __list_del_clearprev(&bq
->flush_node
);
374 /* If ndo_xdp_xmit fails with an errno, no frames have been
375 * xmit'ed and it's our responsibility to them free all.
377 for (i
= 0; i
< bq
->count
; i
++) {
378 struct xdp_frame
*xdpf
= bq
->q
[i
];
380 xdp_return_frame_rx_napi(xdpf
);
386 /* __dev_flush is called from xdp_do_flush() which _must_ be signaled
387 * from the driver before returning from its napi->poll() routine. The poll()
388 * routine is called either from busy_poll context or net_rx_action signaled
389 * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the
390 * net device can be torn down. On devmap tear down we ensure the flush list
391 * is empty before completing to ensure all flush operations have completed.
392 * When drivers update the bpf program they may need to ensure any flush ops
393 * are also complete. Using synchronize_rcu or call_rcu will suffice for this
394 * because both wait for napi context to exit.
396 void __dev_flush(void)
398 struct list_head
*flush_list
= this_cpu_ptr(&dev_flush_list
);
399 struct xdp_dev_bulk_queue
*bq
, *tmp
;
401 list_for_each_entry_safe(bq
, tmp
, flush_list
, flush_node
)
402 bq_xmit_all(bq
, XDP_XMIT_FLUSH
);
405 /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or
406 * update happens in parallel here a dev_put wont happen until after reading the
409 struct bpf_dtab_netdev
*__dev_map_lookup_elem(struct bpf_map
*map
, u32 key
)
411 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
412 struct bpf_dtab_netdev
*obj
;
414 if (key
>= map
->max_entries
)
417 obj
= READ_ONCE(dtab
->netdev_map
[key
]);
421 /* Runs under RCU-read-side, plus in softirq under NAPI protection.
422 * Thus, safe percpu variable access.
424 static int bq_enqueue(struct net_device
*dev
, struct xdp_frame
*xdpf
,
425 struct net_device
*dev_rx
)
427 struct list_head
*flush_list
= this_cpu_ptr(&dev_flush_list
);
428 struct xdp_dev_bulk_queue
*bq
= this_cpu_ptr(dev
->xdp_bulkq
);
430 if (unlikely(bq
->count
== DEV_MAP_BULK_SIZE
))
433 /* Ingress dev_rx will be the same for all xdp_frame's in
434 * bulk_queue, because bq stored per-CPU and must be flushed
435 * from net_device drivers NAPI func end.
440 bq
->q
[bq
->count
++] = xdpf
;
442 if (!bq
->flush_node
.prev
)
443 list_add(&bq
->flush_node
, flush_list
);
448 static inline int __xdp_enqueue(struct net_device
*dev
, struct xdp_buff
*xdp
,
449 struct net_device
*dev_rx
)
451 struct xdp_frame
*xdpf
;
454 if (!dev
->netdev_ops
->ndo_xdp_xmit
)
457 err
= xdp_ok_fwd_dev(dev
, xdp
->data_end
- xdp
->data
);
461 xdpf
= xdp_convert_buff_to_frame(xdp
);
465 return bq_enqueue(dev
, xdpf
, dev_rx
);
468 static struct xdp_buff
*dev_map_run_prog(struct net_device
*dev
,
469 struct xdp_buff
*xdp
,
470 struct bpf_prog
*xdp_prog
)
472 struct xdp_txq_info txq
= { .dev
= dev
};
475 xdp_set_data_meta_invalid(xdp
);
478 act
= bpf_prog_run_xdp(xdp_prog
, xdp
);
485 bpf_warn_invalid_xdp_action(act
);
488 trace_xdp_exception(dev
, xdp_prog
, act
);
492 xdp_return_buff(xdp
);
496 int dev_xdp_enqueue(struct net_device
*dev
, struct xdp_buff
*xdp
,
497 struct net_device
*dev_rx
)
499 return __xdp_enqueue(dev
, xdp
, dev_rx
);
502 int dev_map_enqueue(struct bpf_dtab_netdev
*dst
, struct xdp_buff
*xdp
,
503 struct net_device
*dev_rx
)
505 struct net_device
*dev
= dst
->dev
;
508 xdp
= dev_map_run_prog(dev
, xdp
, dst
->xdp_prog
);
512 return __xdp_enqueue(dev
, xdp
, dev_rx
);
515 int dev_map_generic_redirect(struct bpf_dtab_netdev
*dst
, struct sk_buff
*skb
,
516 struct bpf_prog
*xdp_prog
)
520 err
= xdp_ok_fwd_dev(dst
->dev
, skb
->len
);
524 generic_xdp_tx(skb
, xdp_prog
);
529 static void *dev_map_lookup_elem(struct bpf_map
*map
, void *key
)
531 struct bpf_dtab_netdev
*obj
= __dev_map_lookup_elem(map
, *(u32
*)key
);
533 return obj
? &obj
->val
: NULL
;
536 static void *dev_map_hash_lookup_elem(struct bpf_map
*map
, void *key
)
538 struct bpf_dtab_netdev
*obj
= __dev_map_hash_lookup_elem(map
,
540 return obj
? &obj
->val
: NULL
;
543 static void __dev_map_entry_free(struct rcu_head
*rcu
)
545 struct bpf_dtab_netdev
*dev
;
547 dev
= container_of(rcu
, struct bpf_dtab_netdev
, rcu
);
549 bpf_prog_put(dev
->xdp_prog
);
554 static int dev_map_delete_elem(struct bpf_map
*map
, void *key
)
556 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
557 struct bpf_dtab_netdev
*old_dev
;
560 if (k
>= map
->max_entries
)
563 /* Use call_rcu() here to ensure any rcu critical sections have
564 * completed as well as any flush operations because call_rcu
565 * will wait for preempt-disable region to complete, NAPI in this
566 * context. And additionally, the driver tear down ensures all
567 * soft irqs are complete before removing the net device in the
568 * case of dev_put equals zero.
570 old_dev
= xchg(&dtab
->netdev_map
[k
], NULL
);
572 call_rcu(&old_dev
->rcu
, __dev_map_entry_free
);
576 static int dev_map_hash_delete_elem(struct bpf_map
*map
, void *key
)
578 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
579 struct bpf_dtab_netdev
*old_dev
;
584 spin_lock_irqsave(&dtab
->index_lock
, flags
);
586 old_dev
= __dev_map_hash_lookup_elem(map
, k
);
589 hlist_del_init_rcu(&old_dev
->index_hlist
);
590 call_rcu(&old_dev
->rcu
, __dev_map_entry_free
);
593 spin_unlock_irqrestore(&dtab
->index_lock
, flags
);
598 static struct bpf_dtab_netdev
*__dev_map_alloc_node(struct net
*net
,
599 struct bpf_dtab
*dtab
,
600 struct bpf_devmap_val
*val
,
603 struct bpf_prog
*prog
= NULL
;
604 struct bpf_dtab_netdev
*dev
;
606 dev
= kmalloc_node(sizeof(*dev
), GFP_ATOMIC
| __GFP_NOWARN
,
607 dtab
->map
.numa_node
);
609 return ERR_PTR(-ENOMEM
);
611 dev
->dev
= dev_get_by_index(net
, val
->ifindex
);
615 if (val
->bpf_prog
.fd
> 0) {
616 prog
= bpf_prog_get_type_dev(val
->bpf_prog
.fd
,
617 BPF_PROG_TYPE_XDP
, false);
620 if (prog
->expected_attach_type
!= BPF_XDP_DEVMAP
)
627 dev
->xdp_prog
= prog
;
628 dev
->val
.bpf_prog
.id
= prog
->aux
->id
;
630 dev
->xdp_prog
= NULL
;
631 dev
->val
.bpf_prog
.id
= 0;
633 dev
->val
.ifindex
= val
->ifindex
;
642 return ERR_PTR(-EINVAL
);
645 static int __dev_map_update_elem(struct net
*net
, struct bpf_map
*map
,
646 void *key
, void *value
, u64 map_flags
)
648 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
649 struct bpf_dtab_netdev
*dev
, *old_dev
;
650 struct bpf_devmap_val val
= {};
653 if (unlikely(map_flags
> BPF_EXIST
))
655 if (unlikely(i
>= dtab
->map
.max_entries
))
657 if (unlikely(map_flags
== BPF_NOEXIST
))
660 /* already verified value_size <= sizeof val */
661 memcpy(&val
, value
, map
->value_size
);
665 /* can not specify fd if ifindex is 0 */
666 if (val
.bpf_prog
.fd
> 0)
669 dev
= __dev_map_alloc_node(net
, dtab
, &val
, i
);
674 /* Use call_rcu() here to ensure rcu critical sections have completed
675 * Remembering the driver side flush operation will happen before the
676 * net device is removed.
678 old_dev
= xchg(&dtab
->netdev_map
[i
], dev
);
680 call_rcu(&old_dev
->rcu
, __dev_map_entry_free
);
685 static int dev_map_update_elem(struct bpf_map
*map
, void *key
, void *value
,
688 return __dev_map_update_elem(current
->nsproxy
->net_ns
,
689 map
, key
, value
, map_flags
);
692 static int __dev_map_hash_update_elem(struct net
*net
, struct bpf_map
*map
,
693 void *key
, void *value
, u64 map_flags
)
695 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
696 struct bpf_dtab_netdev
*dev
, *old_dev
;
697 struct bpf_devmap_val val
= {};
698 u32 idx
= *(u32
*)key
;
702 /* already verified value_size <= sizeof val */
703 memcpy(&val
, value
, map
->value_size
);
705 if (unlikely(map_flags
> BPF_EXIST
|| !val
.ifindex
))
708 spin_lock_irqsave(&dtab
->index_lock
, flags
);
710 old_dev
= __dev_map_hash_lookup_elem(map
, idx
);
711 if (old_dev
&& (map_flags
& BPF_NOEXIST
))
714 dev
= __dev_map_alloc_node(net
, dtab
, &val
, idx
);
721 hlist_del_rcu(&old_dev
->index_hlist
);
723 if (dtab
->items
>= dtab
->map
.max_entries
) {
724 spin_unlock_irqrestore(&dtab
->index_lock
, flags
);
725 call_rcu(&dev
->rcu
, __dev_map_entry_free
);
731 hlist_add_head_rcu(&dev
->index_hlist
,
732 dev_map_index_hash(dtab
, idx
));
733 spin_unlock_irqrestore(&dtab
->index_lock
, flags
);
736 call_rcu(&old_dev
->rcu
, __dev_map_entry_free
);
741 spin_unlock_irqrestore(&dtab
->index_lock
, flags
);
745 static int dev_map_hash_update_elem(struct bpf_map
*map
, void *key
, void *value
,
748 return __dev_map_hash_update_elem(current
->nsproxy
->net_ns
,
749 map
, key
, value
, map_flags
);
752 const struct bpf_map_ops dev_map_ops
= {
753 .map_alloc
= dev_map_alloc
,
754 .map_free
= dev_map_free
,
755 .map_get_next_key
= dev_map_get_next_key
,
756 .map_lookup_elem
= dev_map_lookup_elem
,
757 .map_update_elem
= dev_map_update_elem
,
758 .map_delete_elem
= dev_map_delete_elem
,
759 .map_check_btf
= map_check_no_btf
,
762 const struct bpf_map_ops dev_map_hash_ops
= {
763 .map_alloc
= dev_map_alloc
,
764 .map_free
= dev_map_free
,
765 .map_get_next_key
= dev_map_hash_get_next_key
,
766 .map_lookup_elem
= dev_map_hash_lookup_elem
,
767 .map_update_elem
= dev_map_hash_update_elem
,
768 .map_delete_elem
= dev_map_hash_delete_elem
,
769 .map_check_btf
= map_check_no_btf
,
772 static void dev_map_hash_remove_netdev(struct bpf_dtab
*dtab
,
773 struct net_device
*netdev
)
778 spin_lock_irqsave(&dtab
->index_lock
, flags
);
779 for (i
= 0; i
< dtab
->n_buckets
; i
++) {
780 struct bpf_dtab_netdev
*dev
;
781 struct hlist_head
*head
;
782 struct hlist_node
*next
;
784 head
= dev_map_index_hash(dtab
, i
);
786 hlist_for_each_entry_safe(dev
, next
, head
, index_hlist
) {
787 if (netdev
!= dev
->dev
)
791 hlist_del_rcu(&dev
->index_hlist
);
792 call_rcu(&dev
->rcu
, __dev_map_entry_free
);
795 spin_unlock_irqrestore(&dtab
->index_lock
, flags
);
798 static int dev_map_notification(struct notifier_block
*notifier
,
799 ulong event
, void *ptr
)
801 struct net_device
*netdev
= netdev_notifier_info_to_dev(ptr
);
802 struct bpf_dtab
*dtab
;
806 case NETDEV_REGISTER
:
807 if (!netdev
->netdev_ops
->ndo_xdp_xmit
|| netdev
->xdp_bulkq
)
810 /* will be freed in free_netdev() */
812 __alloc_percpu_gfp(sizeof(struct xdp_dev_bulk_queue
),
813 sizeof(void *), GFP_ATOMIC
);
814 if (!netdev
->xdp_bulkq
)
817 for_each_possible_cpu(cpu
)
818 per_cpu_ptr(netdev
->xdp_bulkq
, cpu
)->dev
= netdev
;
820 case NETDEV_UNREGISTER
:
821 /* This rcu_read_lock/unlock pair is needed because
822 * dev_map_list is an RCU list AND to ensure a delete
823 * operation does not free a netdev_map entry while we
824 * are comparing it against the netdev being unregistered.
827 list_for_each_entry_rcu(dtab
, &dev_map_list
, list
) {
828 if (dtab
->map
.map_type
== BPF_MAP_TYPE_DEVMAP_HASH
) {
829 dev_map_hash_remove_netdev(dtab
, netdev
);
833 for (i
= 0; i
< dtab
->map
.max_entries
; i
++) {
834 struct bpf_dtab_netdev
*dev
, *odev
;
836 dev
= READ_ONCE(dtab
->netdev_map
[i
]);
837 if (!dev
|| netdev
!= dev
->dev
)
839 odev
= cmpxchg(&dtab
->netdev_map
[i
], dev
, NULL
);
842 __dev_map_entry_free
);
853 static struct notifier_block dev_map_notifier
= {
854 .notifier_call
= dev_map_notification
,
857 static int __init
dev_map_init(void)
861 /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
862 BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev
, dev
) !=
863 offsetof(struct _bpf_dtab_netdev
, dev
));
864 register_netdevice_notifier(&dev_map_notifier
);
866 for_each_possible_cpu(cpu
)
867 INIT_LIST_HEAD(&per_cpu(dev_flush_list
, cpu
));
871 subsys_initcall(dev_map_init
);