2 * Copyright (C) 2017-2018 Netronome Systems, Inc.
4 * This software is licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
8 * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
9 * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
10 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
11 * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
12 * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
13 * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16 #include <linux/bpf.h>
17 #include <linux/bpf_verifier.h>
18 #include <linux/bug.h>
19 #include <linux/kdev_t.h>
20 #include <linux/list.h>
21 #include <linux/lockdep.h>
22 #include <linux/netdevice.h>
23 #include <linux/printk.h>
24 #include <linux/proc_ns.h>
25 #include <linux/rhashtable.h>
26 #include <linux/rtnetlink.h>
27 #include <linux/rwsem.h>
30 /* Protects offdevs, members of bpf_offload_netdev and offload members
32 * RTNL lock cannot be taken when holding this lock.
34 static DECLARE_RWSEM(bpf_devs_lock
);
36 struct bpf_offload_dev
{
37 const struct bpf_prog_offload_ops
*ops
;
38 struct list_head netdevs
;
42 struct bpf_offload_netdev
{
44 struct net_device
*netdev
;
45 struct bpf_offload_dev
*offdev
; /* NULL when bound-only */
46 struct list_head progs
;
47 struct list_head maps
;
48 struct list_head offdev_netdevs
;
51 static const struct rhashtable_params offdevs_params
= {
53 .key_len
= sizeof(struct net_device
*),
54 .key_offset
= offsetof(struct bpf_offload_netdev
, netdev
),
55 .head_offset
= offsetof(struct bpf_offload_netdev
, l
),
56 .automatic_shrinking
= true,
59 static struct rhashtable offdevs
;
61 static int bpf_dev_offload_check(struct net_device
*netdev
)
65 if (!netdev
->netdev_ops
->ndo_bpf
)
70 static struct bpf_offload_netdev
*
71 bpf_offload_find_netdev(struct net_device
*netdev
)
73 lockdep_assert_held(&bpf_devs_lock
);
75 return rhashtable_lookup_fast(&offdevs
, &netdev
, offdevs_params
);
78 static int __bpf_offload_dev_netdev_register(struct bpf_offload_dev
*offdev
,
79 struct net_device
*netdev
)
81 struct bpf_offload_netdev
*ondev
;
84 ondev
= kzalloc(sizeof(*ondev
), GFP_KERNEL
);
88 ondev
->netdev
= netdev
;
89 ondev
->offdev
= offdev
;
90 INIT_LIST_HEAD(&ondev
->progs
);
91 INIT_LIST_HEAD(&ondev
->maps
);
93 err
= rhashtable_insert_fast(&offdevs
, &ondev
->l
, offdevs_params
);
95 netdev_warn(netdev
, "failed to register for BPF offload\n");
100 list_add(&ondev
->offdev_netdevs
, &offdev
->netdevs
);
108 static void __bpf_prog_offload_destroy(struct bpf_prog
*prog
)
110 struct bpf_prog_offload
*offload
= prog
->aux
->offload
;
112 if (offload
->dev_state
)
113 offload
->offdev
->ops
->destroy(prog
);
115 list_del_init(&offload
->offloads
);
117 prog
->aux
->offload
= NULL
;
120 static int bpf_map_offload_ndo(struct bpf_offloaded_map
*offmap
,
121 enum bpf_netdev_command cmd
)
123 struct netdev_bpf data
= {};
124 struct net_device
*netdev
;
129 data
.offmap
= offmap
;
130 /* Caller must make sure netdev is valid */
131 netdev
= offmap
->netdev
;
133 return netdev
->netdev_ops
->ndo_bpf(netdev
, &data
);
136 static void __bpf_map_offload_destroy(struct bpf_offloaded_map
*offmap
)
138 WARN_ON(bpf_map_offload_ndo(offmap
, BPF_OFFLOAD_MAP_FREE
));
139 /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
140 bpf_map_free_id(&offmap
->map
);
141 list_del_init(&offmap
->offloads
);
142 offmap
->netdev
= NULL
;
145 static void __bpf_offload_dev_netdev_unregister(struct bpf_offload_dev
*offdev
,
146 struct net_device
*netdev
)
148 struct bpf_offload_netdev
*ondev
, *altdev
= NULL
;
149 struct bpf_offloaded_map
*offmap
, *mtmp
;
150 struct bpf_prog_offload
*offload
, *ptmp
;
154 ondev
= rhashtable_lookup_fast(&offdevs
, &netdev
, offdevs_params
);
158 WARN_ON(rhashtable_remove_fast(&offdevs
, &ondev
->l
, offdevs_params
));
160 /* Try to move the objects to another netdev of the device */
162 list_del(&ondev
->offdev_netdevs
);
163 altdev
= list_first_entry_or_null(&offdev
->netdevs
,
164 struct bpf_offload_netdev
,
169 list_for_each_entry(offload
, &ondev
->progs
, offloads
)
170 offload
->netdev
= altdev
->netdev
;
171 list_splice_init(&ondev
->progs
, &altdev
->progs
);
173 list_for_each_entry(offmap
, &ondev
->maps
, offloads
)
174 offmap
->netdev
= altdev
->netdev
;
175 list_splice_init(&ondev
->maps
, &altdev
->maps
);
177 list_for_each_entry_safe(offload
, ptmp
, &ondev
->progs
, offloads
)
178 __bpf_prog_offload_destroy(offload
->prog
);
179 list_for_each_entry_safe(offmap
, mtmp
, &ondev
->maps
, offloads
)
180 __bpf_map_offload_destroy(offmap
);
183 WARN_ON(!list_empty(&ondev
->progs
));
184 WARN_ON(!list_empty(&ondev
->maps
));
188 static int __bpf_prog_dev_bound_init(struct bpf_prog
*prog
, struct net_device
*netdev
)
190 struct bpf_offload_netdev
*ondev
;
191 struct bpf_prog_offload
*offload
;
194 offload
= kzalloc(sizeof(*offload
), GFP_USER
);
198 offload
->prog
= prog
;
199 offload
->netdev
= netdev
;
201 ondev
= bpf_offload_find_netdev(offload
->netdev
);
202 /* When program is offloaded require presence of "true"
203 * bpf_offload_netdev, avoid the one created for !ondev case below.
205 if (bpf_prog_is_offloaded(prog
->aux
) && (!ondev
|| !ondev
->offdev
)) {
210 /* When only binding to the device, explicitly
211 * create an entry in the hashtable.
213 err
= __bpf_offload_dev_netdev_register(NULL
, offload
->netdev
);
216 ondev
= bpf_offload_find_netdev(offload
->netdev
);
218 offload
->offdev
= ondev
->offdev
;
219 prog
->aux
->offload
= offload
;
220 list_add_tail(&offload
->offloads
, &ondev
->progs
);
228 int bpf_prog_dev_bound_init(struct bpf_prog
*prog
, union bpf_attr
*attr
)
230 struct net_device
*netdev
;
233 if (attr
->prog_type
!= BPF_PROG_TYPE_SCHED_CLS
&&
234 attr
->prog_type
!= BPF_PROG_TYPE_XDP
)
237 if (attr
->prog_flags
& ~(BPF_F_XDP_DEV_BOUND_ONLY
| BPF_F_XDP_HAS_FRAGS
))
240 /* Frags are allowed only if program is dev-bound-only, but not
241 * if it is requesting bpf offload.
243 if (attr
->prog_flags
& BPF_F_XDP_HAS_FRAGS
&&
244 !(attr
->prog_flags
& BPF_F_XDP_DEV_BOUND_ONLY
))
247 if (attr
->prog_type
== BPF_PROG_TYPE_SCHED_CLS
&&
248 attr
->prog_flags
& BPF_F_XDP_DEV_BOUND_ONLY
)
251 netdev
= dev_get_by_index(current
->nsproxy
->net_ns
, attr
->prog_ifindex
);
255 err
= bpf_dev_offload_check(netdev
);
259 prog
->aux
->offload_requested
= !(attr
->prog_flags
& BPF_F_XDP_DEV_BOUND_ONLY
);
261 down_write(&bpf_devs_lock
);
262 err
= __bpf_prog_dev_bound_init(prog
, netdev
);
263 up_write(&bpf_devs_lock
);
270 int bpf_prog_dev_bound_inherit(struct bpf_prog
*new_prog
, struct bpf_prog
*old_prog
)
274 if (!bpf_prog_is_dev_bound(old_prog
->aux
))
277 if (bpf_prog_is_offloaded(old_prog
->aux
))
280 new_prog
->aux
->dev_bound
= old_prog
->aux
->dev_bound
;
281 new_prog
->aux
->offload_requested
= old_prog
->aux
->offload_requested
;
283 down_write(&bpf_devs_lock
);
284 if (!old_prog
->aux
->offload
) {
289 err
= __bpf_prog_dev_bound_init(new_prog
, old_prog
->aux
->offload
->netdev
);
292 up_write(&bpf_devs_lock
);
296 int bpf_prog_offload_verifier_prep(struct bpf_prog
*prog
)
298 struct bpf_prog_offload
*offload
;
301 down_read(&bpf_devs_lock
);
302 offload
= prog
->aux
->offload
;
304 ret
= offload
->offdev
->ops
->prepare(prog
);
305 offload
->dev_state
= !ret
;
307 up_read(&bpf_devs_lock
);
312 int bpf_prog_offload_verify_insn(struct bpf_verifier_env
*env
,
313 int insn_idx
, int prev_insn_idx
)
315 struct bpf_prog_offload
*offload
;
318 down_read(&bpf_devs_lock
);
319 offload
= env
->prog
->aux
->offload
;
321 ret
= offload
->offdev
->ops
->insn_hook(env
, insn_idx
,
323 up_read(&bpf_devs_lock
);
328 int bpf_prog_offload_finalize(struct bpf_verifier_env
*env
)
330 struct bpf_prog_offload
*offload
;
333 down_read(&bpf_devs_lock
);
334 offload
= env
->prog
->aux
->offload
;
336 if (offload
->offdev
->ops
->finalize
)
337 ret
= offload
->offdev
->ops
->finalize(env
);
341 up_read(&bpf_devs_lock
);
347 bpf_prog_offload_replace_insn(struct bpf_verifier_env
*env
, u32 off
,
348 struct bpf_insn
*insn
)
350 const struct bpf_prog_offload_ops
*ops
;
351 struct bpf_prog_offload
*offload
;
352 int ret
= -EOPNOTSUPP
;
354 down_read(&bpf_devs_lock
);
355 offload
= env
->prog
->aux
->offload
;
357 ops
= offload
->offdev
->ops
;
358 if (!offload
->opt_failed
&& ops
->replace_insn
)
359 ret
= ops
->replace_insn(env
, off
, insn
);
360 offload
->opt_failed
|= ret
;
362 up_read(&bpf_devs_lock
);
366 bpf_prog_offload_remove_insns(struct bpf_verifier_env
*env
, u32 off
, u32 cnt
)
368 struct bpf_prog_offload
*offload
;
369 int ret
= -EOPNOTSUPP
;
371 down_read(&bpf_devs_lock
);
372 offload
= env
->prog
->aux
->offload
;
374 if (!offload
->opt_failed
&& offload
->offdev
->ops
->remove_insns
)
375 ret
= offload
->offdev
->ops
->remove_insns(env
, off
, cnt
);
376 offload
->opt_failed
|= ret
;
378 up_read(&bpf_devs_lock
);
381 void bpf_prog_dev_bound_destroy(struct bpf_prog
*prog
)
383 struct bpf_offload_netdev
*ondev
;
384 struct net_device
*netdev
;
387 down_write(&bpf_devs_lock
);
388 if (prog
->aux
->offload
) {
389 list_del_init(&prog
->aux
->offload
->offloads
);
391 netdev
= prog
->aux
->offload
->netdev
;
392 __bpf_prog_offload_destroy(prog
);
394 ondev
= bpf_offload_find_netdev(netdev
);
395 if (!ondev
->offdev
&& list_empty(&ondev
->progs
))
396 __bpf_offload_dev_netdev_unregister(NULL
, netdev
);
398 up_write(&bpf_devs_lock
);
402 static int bpf_prog_offload_translate(struct bpf_prog
*prog
)
404 struct bpf_prog_offload
*offload
;
407 down_read(&bpf_devs_lock
);
408 offload
= prog
->aux
->offload
;
410 ret
= offload
->offdev
->ops
->translate(prog
);
411 up_read(&bpf_devs_lock
);
416 static unsigned int bpf_prog_warn_on_exec(const void *ctx
,
417 const struct bpf_insn
*insn
)
419 WARN(1, "attempt to execute device eBPF program on the host!");
423 int bpf_prog_offload_compile(struct bpf_prog
*prog
)
425 prog
->bpf_func
= bpf_prog_warn_on_exec
;
427 return bpf_prog_offload_translate(prog
);
430 struct ns_get_path_bpf_prog_args
{
431 struct bpf_prog
*prog
;
432 struct bpf_prog_info
*info
;
435 static struct ns_common
*bpf_prog_offload_info_fill_ns(void *private_data
)
437 struct ns_get_path_bpf_prog_args
*args
= private_data
;
438 struct bpf_prog_aux
*aux
= args
->prog
->aux
;
439 struct ns_common
*ns
;
443 down_read(&bpf_devs_lock
);
446 args
->info
->ifindex
= aux
->offload
->netdev
->ifindex
;
447 net
= dev_net(aux
->offload
->netdev
);
451 args
->info
->ifindex
= 0;
455 up_read(&bpf_devs_lock
);
461 int bpf_prog_offload_info_fill(struct bpf_prog_info
*info
,
462 struct bpf_prog
*prog
)
464 struct ns_get_path_bpf_prog_args args
= {
468 struct bpf_prog_aux
*aux
= prog
->aux
;
469 struct inode
*ns_inode
;
475 res
= ns_get_path_cb(&ns_path
, bpf_prog_offload_info_fill_ns
, &args
);
482 down_read(&bpf_devs_lock
);
485 up_read(&bpf_devs_lock
);
489 ulen
= info
->jited_prog_len
;
490 info
->jited_prog_len
= aux
->offload
->jited_len
;
491 if (info
->jited_prog_len
&& ulen
) {
492 uinsns
= u64_to_user_ptr(info
->jited_prog_insns
);
493 ulen
= min_t(u32
, info
->jited_prog_len
, ulen
);
494 if (copy_to_user(uinsns
, aux
->offload
->jited_image
, ulen
)) {
495 up_read(&bpf_devs_lock
);
500 up_read(&bpf_devs_lock
);
502 ns_inode
= ns_path
.dentry
->d_inode
;
503 info
->netns_dev
= new_encode_dev(ns_inode
->i_sb
->s_dev
);
504 info
->netns_ino
= ns_inode
->i_ino
;
510 const struct bpf_prog_ops bpf_offload_prog_ops
= {
513 struct bpf_map
*bpf_map_offload_map_alloc(union bpf_attr
*attr
)
515 struct net
*net
= current
->nsproxy
->net_ns
;
516 struct bpf_offload_netdev
*ondev
;
517 struct bpf_offloaded_map
*offmap
;
520 if (!capable(CAP_SYS_ADMIN
))
521 return ERR_PTR(-EPERM
);
522 if (attr
->map_type
!= BPF_MAP_TYPE_ARRAY
&&
523 attr
->map_type
!= BPF_MAP_TYPE_HASH
)
524 return ERR_PTR(-EINVAL
);
526 offmap
= bpf_map_area_alloc(sizeof(*offmap
), NUMA_NO_NODE
);
528 return ERR_PTR(-ENOMEM
);
530 bpf_map_init_from_attr(&offmap
->map
, attr
);
533 down_write(&bpf_devs_lock
);
534 offmap
->netdev
= __dev_get_by_index(net
, attr
->map_ifindex
);
535 err
= bpf_dev_offload_check(offmap
->netdev
);
539 ondev
= bpf_offload_find_netdev(offmap
->netdev
);
545 err
= bpf_map_offload_ndo(offmap
, BPF_OFFLOAD_MAP_ALLOC
);
549 list_add_tail(&offmap
->offloads
, &ondev
->maps
);
550 up_write(&bpf_devs_lock
);
556 up_write(&bpf_devs_lock
);
558 bpf_map_area_free(offmap
);
562 void bpf_map_offload_map_free(struct bpf_map
*map
)
564 struct bpf_offloaded_map
*offmap
= map_to_offmap(map
);
567 down_write(&bpf_devs_lock
);
569 __bpf_map_offload_destroy(offmap
);
570 up_write(&bpf_devs_lock
);
573 bpf_map_area_free(offmap
);
576 u64
bpf_map_offload_map_mem_usage(const struct bpf_map
*map
)
578 /* The memory dynamically allocated in netdev dev_ops is not counted */
579 return sizeof(struct bpf_offloaded_map
);
582 int bpf_map_offload_lookup_elem(struct bpf_map
*map
, void *key
, void *value
)
584 struct bpf_offloaded_map
*offmap
= map_to_offmap(map
);
587 down_read(&bpf_devs_lock
);
589 ret
= offmap
->dev_ops
->map_lookup_elem(offmap
, key
, value
);
590 up_read(&bpf_devs_lock
);
595 int bpf_map_offload_update_elem(struct bpf_map
*map
,
596 void *key
, void *value
, u64 flags
)
598 struct bpf_offloaded_map
*offmap
= map_to_offmap(map
);
601 if (unlikely(flags
> BPF_EXIST
))
604 down_read(&bpf_devs_lock
);
606 ret
= offmap
->dev_ops
->map_update_elem(offmap
, key
, value
,
608 up_read(&bpf_devs_lock
);
613 int bpf_map_offload_delete_elem(struct bpf_map
*map
, void *key
)
615 struct bpf_offloaded_map
*offmap
= map_to_offmap(map
);
618 down_read(&bpf_devs_lock
);
620 ret
= offmap
->dev_ops
->map_delete_elem(offmap
, key
);
621 up_read(&bpf_devs_lock
);
626 int bpf_map_offload_get_next_key(struct bpf_map
*map
, void *key
, void *next_key
)
628 struct bpf_offloaded_map
*offmap
= map_to_offmap(map
);
631 down_read(&bpf_devs_lock
);
633 ret
= offmap
->dev_ops
->map_get_next_key(offmap
, key
, next_key
);
634 up_read(&bpf_devs_lock
);
639 struct ns_get_path_bpf_map_args
{
640 struct bpf_offloaded_map
*offmap
;
641 struct bpf_map_info
*info
;
644 static struct ns_common
*bpf_map_offload_info_fill_ns(void *private_data
)
646 struct ns_get_path_bpf_map_args
*args
= private_data
;
647 struct ns_common
*ns
;
651 down_read(&bpf_devs_lock
);
653 if (args
->offmap
->netdev
) {
654 args
->info
->ifindex
= args
->offmap
->netdev
->ifindex
;
655 net
= dev_net(args
->offmap
->netdev
);
659 args
->info
->ifindex
= 0;
663 up_read(&bpf_devs_lock
);
669 int bpf_map_offload_info_fill(struct bpf_map_info
*info
, struct bpf_map
*map
)
671 struct ns_get_path_bpf_map_args args
= {
672 .offmap
= map_to_offmap(map
),
675 struct inode
*ns_inode
;
679 res
= ns_get_path_cb(&ns_path
, bpf_map_offload_info_fill_ns
, &args
);
686 ns_inode
= ns_path
.dentry
->d_inode
;
687 info
->netns_dev
= new_encode_dev(ns_inode
->i_sb
->s_dev
);
688 info
->netns_ino
= ns_inode
->i_ino
;
694 static bool __bpf_offload_dev_match(struct bpf_prog
*prog
,
695 struct net_device
*netdev
)
697 struct bpf_offload_netdev
*ondev1
, *ondev2
;
698 struct bpf_prog_offload
*offload
;
700 if (!bpf_prog_is_dev_bound(prog
->aux
))
703 offload
= prog
->aux
->offload
;
706 if (offload
->netdev
== netdev
)
709 ondev1
= bpf_offload_find_netdev(offload
->netdev
);
710 ondev2
= bpf_offload_find_netdev(netdev
);
712 return ondev1
&& ondev2
&& ondev1
->offdev
== ondev2
->offdev
;
715 bool bpf_offload_dev_match(struct bpf_prog
*prog
, struct net_device
*netdev
)
719 down_read(&bpf_devs_lock
);
720 ret
= __bpf_offload_dev_match(prog
, netdev
);
721 up_read(&bpf_devs_lock
);
725 EXPORT_SYMBOL_GPL(bpf_offload_dev_match
);
727 bool bpf_prog_dev_bound_match(const struct bpf_prog
*lhs
, const struct bpf_prog
*rhs
)
731 if (bpf_prog_is_offloaded(lhs
->aux
) != bpf_prog_is_offloaded(rhs
->aux
))
734 down_read(&bpf_devs_lock
);
735 ret
= lhs
->aux
->offload
&& rhs
->aux
->offload
&&
736 lhs
->aux
->offload
->netdev
&&
737 lhs
->aux
->offload
->netdev
== rhs
->aux
->offload
->netdev
;
738 up_read(&bpf_devs_lock
);
743 bool bpf_offload_prog_map_match(struct bpf_prog
*prog
, struct bpf_map
*map
)
745 struct bpf_offloaded_map
*offmap
;
748 if (!bpf_map_is_offloaded(map
))
749 return bpf_map_offload_neutral(map
);
750 offmap
= map_to_offmap(map
);
752 down_read(&bpf_devs_lock
);
753 ret
= __bpf_offload_dev_match(prog
, offmap
->netdev
);
754 up_read(&bpf_devs_lock
);
759 int bpf_offload_dev_netdev_register(struct bpf_offload_dev
*offdev
,
760 struct net_device
*netdev
)
764 down_write(&bpf_devs_lock
);
765 err
= __bpf_offload_dev_netdev_register(offdev
, netdev
);
766 up_write(&bpf_devs_lock
);
769 EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register
);
771 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev
*offdev
,
772 struct net_device
*netdev
)
774 down_write(&bpf_devs_lock
);
775 __bpf_offload_dev_netdev_unregister(offdev
, netdev
);
776 up_write(&bpf_devs_lock
);
778 EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister
);
780 struct bpf_offload_dev
*
781 bpf_offload_dev_create(const struct bpf_prog_offload_ops
*ops
, void *priv
)
783 struct bpf_offload_dev
*offdev
;
785 offdev
= kzalloc(sizeof(*offdev
), GFP_KERNEL
);
787 return ERR_PTR(-ENOMEM
);
791 INIT_LIST_HEAD(&offdev
->netdevs
);
795 EXPORT_SYMBOL_GPL(bpf_offload_dev_create
);
797 void bpf_offload_dev_destroy(struct bpf_offload_dev
*offdev
)
799 WARN_ON(!list_empty(&offdev
->netdevs
));
802 EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy
);
804 void *bpf_offload_dev_priv(struct bpf_offload_dev
*offdev
)
808 EXPORT_SYMBOL_GPL(bpf_offload_dev_priv
);
810 void bpf_dev_bound_netdev_unregister(struct net_device
*dev
)
812 struct bpf_offload_netdev
*ondev
;
816 down_write(&bpf_devs_lock
);
817 ondev
= bpf_offload_find_netdev(dev
);
818 if (ondev
&& !ondev
->offdev
)
819 __bpf_offload_dev_netdev_unregister(NULL
, ondev
->netdev
);
820 up_write(&bpf_devs_lock
);
823 int bpf_dev_bound_kfunc_check(struct bpf_verifier_log
*log
,
824 struct bpf_prog_aux
*prog_aux
)
826 if (!bpf_prog_is_dev_bound(prog_aux
)) {
827 bpf_log(log
, "metadata kfuncs require device-bound program\n");
831 if (bpf_prog_is_offloaded(prog_aux
)) {
832 bpf_log(log
, "metadata kfuncs can't be offloaded\n");
839 void *bpf_dev_bound_resolve_kfunc(struct bpf_prog
*prog
, u32 func_id
)
841 const struct xdp_metadata_ops
*ops
;
844 /* We don't hold bpf_devs_lock while resolving several
845 * kfuncs and can race with the unregister_netdevice().
846 * We rely on bpf_dev_bound_match() check at attach
847 * to render this program unusable.
849 down_read(&bpf_devs_lock
);
850 if (!prog
->aux
->offload
)
853 ops
= prog
->aux
->offload
->netdev
->xdp_metadata_ops
;
857 #define XDP_METADATA_KFUNC(name, _, __, xmo) \
858 if (func_id == bpf_xdp_metadata_kfunc_id(name)) p = ops->xmo;
859 XDP_METADATA_KFUNC_xxx
860 #undef XDP_METADATA_KFUNC
863 up_read(&bpf_devs_lock
);
868 static int __init
bpf_offload_init(void)
870 return rhashtable_init(&offdevs
, &offdevs_params
);
873 core_initcall(bpf_offload_init
);