2 * Copyright (C) 2017 Netronome Systems, Inc.
4 * This software is licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
8 * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
9 * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
10 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
11 * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
12 * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
13 * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16 #include <linux/bpf.h>
17 #include <linux/bpf_verifier.h>
18 #include <linux/bug.h>
19 #include <linux/kdev_t.h>
20 #include <linux/list.h>
21 #include <linux/netdevice.h>
22 #include <linux/printk.h>
23 #include <linux/proc_ns.h>
24 #include <linux/rtnetlink.h>
25 #include <linux/rwsem.h>
27 /* Protects bpf_prog_offload_devs, bpf_map_offload_devs and offload members
29 * RTNL lock cannot be taken when holding this lock.
31 static DECLARE_RWSEM(bpf_devs_lock
);
32 static LIST_HEAD(bpf_prog_offload_devs
);
33 static LIST_HEAD(bpf_map_offload_devs
);
35 static int bpf_dev_offload_check(struct net_device
*netdev
)
39 if (!netdev
->netdev_ops
->ndo_bpf
)
44 int bpf_prog_offload_init(struct bpf_prog
*prog
, union bpf_attr
*attr
)
46 struct bpf_prog_offload
*offload
;
49 if (attr
->prog_type
!= BPF_PROG_TYPE_SCHED_CLS
&&
50 attr
->prog_type
!= BPF_PROG_TYPE_XDP
)
56 offload
= kzalloc(sizeof(*offload
), GFP_USER
);
62 offload
->netdev
= dev_get_by_index(current
->nsproxy
->net_ns
,
64 err
= bpf_dev_offload_check(offload
->netdev
);
68 down_write(&bpf_devs_lock
);
69 if (offload
->netdev
->reg_state
!= NETREG_REGISTERED
) {
73 prog
->aux
->offload
= offload
;
74 list_add_tail(&offload
->offloads
, &bpf_prog_offload_devs
);
75 dev_put(offload
->netdev
);
76 up_write(&bpf_devs_lock
);
80 up_write(&bpf_devs_lock
);
83 dev_put(offload
->netdev
);
88 static int __bpf_offload_ndo(struct bpf_prog
*prog
, enum bpf_netdev_command cmd
,
89 struct netdev_bpf
*data
)
91 struct bpf_prog_offload
*offload
= prog
->aux
->offload
;
92 struct net_device
*netdev
;
98 netdev
= offload
->netdev
;
102 return netdev
->netdev_ops
->ndo_bpf(netdev
, data
);
105 int bpf_prog_offload_verifier_prep(struct bpf_verifier_env
*env
)
107 struct netdev_bpf data
= {};
110 data
.verifier
.prog
= env
->prog
;
113 err
= __bpf_offload_ndo(env
->prog
, BPF_OFFLOAD_VERIFIER_PREP
, &data
);
117 env
->prog
->aux
->offload
->dev_ops
= data
.verifier
.ops
;
118 env
->prog
->aux
->offload
->dev_state
= true;
124 int bpf_prog_offload_verify_insn(struct bpf_verifier_env
*env
,
125 int insn_idx
, int prev_insn_idx
)
127 struct bpf_prog_offload
*offload
;
130 down_read(&bpf_devs_lock
);
131 offload
= env
->prog
->aux
->offload
;
133 ret
= offload
->dev_ops
->insn_hook(env
, insn_idx
, prev_insn_idx
);
134 up_read(&bpf_devs_lock
);
139 static void __bpf_prog_offload_destroy(struct bpf_prog
*prog
)
141 struct bpf_prog_offload
*offload
= prog
->aux
->offload
;
142 struct netdev_bpf data
= {};
144 data
.offload
.prog
= prog
;
146 if (offload
->dev_state
)
147 WARN_ON(__bpf_offload_ndo(prog
, BPF_OFFLOAD_DESTROY
, &data
));
149 /* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */
150 bpf_prog_free_id(prog
, true);
152 list_del_init(&offload
->offloads
);
154 prog
->aux
->offload
= NULL
;
157 void bpf_prog_offload_destroy(struct bpf_prog
*prog
)
160 down_write(&bpf_devs_lock
);
161 if (prog
->aux
->offload
)
162 __bpf_prog_offload_destroy(prog
);
163 up_write(&bpf_devs_lock
);
167 static int bpf_prog_offload_translate(struct bpf_prog
*prog
)
169 struct netdev_bpf data
= {};
172 data
.offload
.prog
= prog
;
175 ret
= __bpf_offload_ndo(prog
, BPF_OFFLOAD_TRANSLATE
, &data
);
181 static unsigned int bpf_prog_warn_on_exec(const void *ctx
,
182 const struct bpf_insn
*insn
)
184 WARN(1, "attempt to execute device eBPF program on the host!");
188 int bpf_prog_offload_compile(struct bpf_prog
*prog
)
190 prog
->bpf_func
= bpf_prog_warn_on_exec
;
192 return bpf_prog_offload_translate(prog
);
195 struct ns_get_path_bpf_prog_args
{
196 struct bpf_prog
*prog
;
197 struct bpf_prog_info
*info
;
200 static struct ns_common
*bpf_prog_offload_info_fill_ns(void *private_data
)
202 struct ns_get_path_bpf_prog_args
*args
= private_data
;
203 struct bpf_prog_aux
*aux
= args
->prog
->aux
;
204 struct ns_common
*ns
;
208 down_read(&bpf_devs_lock
);
211 args
->info
->ifindex
= aux
->offload
->netdev
->ifindex
;
212 net
= dev_net(aux
->offload
->netdev
);
216 args
->info
->ifindex
= 0;
220 up_read(&bpf_devs_lock
);
226 int bpf_prog_offload_info_fill(struct bpf_prog_info
*info
,
227 struct bpf_prog
*prog
)
229 struct ns_get_path_bpf_prog_args args
= {
233 struct bpf_prog_aux
*aux
= prog
->aux
;
234 struct inode
*ns_inode
;
240 res
= ns_get_path_cb(&ns_path
, bpf_prog_offload_info_fill_ns
, &args
);
247 down_read(&bpf_devs_lock
);
250 up_read(&bpf_devs_lock
);
254 ulen
= info
->jited_prog_len
;
255 info
->jited_prog_len
= aux
->offload
->jited_len
;
256 if (info
->jited_prog_len
& ulen
) {
257 uinsns
= u64_to_user_ptr(info
->jited_prog_insns
);
258 ulen
= min_t(u32
, info
->jited_prog_len
, ulen
);
259 if (copy_to_user(uinsns
, aux
->offload
->jited_image
, ulen
)) {
260 up_read(&bpf_devs_lock
);
265 up_read(&bpf_devs_lock
);
267 ns_inode
= ns_path
.dentry
->d_inode
;
268 info
->netns_dev
= new_encode_dev(ns_inode
->i_sb
->s_dev
);
269 info
->netns_ino
= ns_inode
->i_ino
;
275 const struct bpf_prog_ops bpf_offload_prog_ops
= {
278 static int bpf_map_offload_ndo(struct bpf_offloaded_map
*offmap
,
279 enum bpf_netdev_command cmd
)
281 struct netdev_bpf data
= {};
282 struct net_device
*netdev
;
287 data
.offmap
= offmap
;
288 /* Caller must make sure netdev is valid */
289 netdev
= offmap
->netdev
;
291 return netdev
->netdev_ops
->ndo_bpf(netdev
, &data
);
294 struct bpf_map
*bpf_map_offload_map_alloc(union bpf_attr
*attr
)
296 struct net
*net
= current
->nsproxy
->net_ns
;
297 struct bpf_offloaded_map
*offmap
;
300 if (!capable(CAP_SYS_ADMIN
))
301 return ERR_PTR(-EPERM
);
302 if (attr
->map_type
!= BPF_MAP_TYPE_ARRAY
&&
303 attr
->map_type
!= BPF_MAP_TYPE_HASH
)
304 return ERR_PTR(-EINVAL
);
306 offmap
= kzalloc(sizeof(*offmap
), GFP_USER
);
308 return ERR_PTR(-ENOMEM
);
310 bpf_map_init_from_attr(&offmap
->map
, attr
);
313 down_write(&bpf_devs_lock
);
314 offmap
->netdev
= __dev_get_by_index(net
, attr
->map_ifindex
);
315 err
= bpf_dev_offload_check(offmap
->netdev
);
319 err
= bpf_map_offload_ndo(offmap
, BPF_OFFLOAD_MAP_ALLOC
);
323 list_add_tail(&offmap
->offloads
, &bpf_map_offload_devs
);
324 up_write(&bpf_devs_lock
);
330 up_write(&bpf_devs_lock
);
336 static void __bpf_map_offload_destroy(struct bpf_offloaded_map
*offmap
)
338 WARN_ON(bpf_map_offload_ndo(offmap
, BPF_OFFLOAD_MAP_FREE
));
339 /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
340 bpf_map_free_id(&offmap
->map
, true);
341 list_del_init(&offmap
->offloads
);
342 offmap
->netdev
= NULL
;
345 void bpf_map_offload_map_free(struct bpf_map
*map
)
347 struct bpf_offloaded_map
*offmap
= map_to_offmap(map
);
350 down_write(&bpf_devs_lock
);
352 __bpf_map_offload_destroy(offmap
);
353 up_write(&bpf_devs_lock
);
359 int bpf_map_offload_lookup_elem(struct bpf_map
*map
, void *key
, void *value
)
361 struct bpf_offloaded_map
*offmap
= map_to_offmap(map
);
364 down_read(&bpf_devs_lock
);
366 ret
= offmap
->dev_ops
->map_lookup_elem(offmap
, key
, value
);
367 up_read(&bpf_devs_lock
);
372 int bpf_map_offload_update_elem(struct bpf_map
*map
,
373 void *key
, void *value
, u64 flags
)
375 struct bpf_offloaded_map
*offmap
= map_to_offmap(map
);
378 if (unlikely(flags
> BPF_EXIST
))
381 down_read(&bpf_devs_lock
);
383 ret
= offmap
->dev_ops
->map_update_elem(offmap
, key
, value
,
385 up_read(&bpf_devs_lock
);
390 int bpf_map_offload_delete_elem(struct bpf_map
*map
, void *key
)
392 struct bpf_offloaded_map
*offmap
= map_to_offmap(map
);
395 down_read(&bpf_devs_lock
);
397 ret
= offmap
->dev_ops
->map_delete_elem(offmap
, key
);
398 up_read(&bpf_devs_lock
);
403 int bpf_map_offload_get_next_key(struct bpf_map
*map
, void *key
, void *next_key
)
405 struct bpf_offloaded_map
*offmap
= map_to_offmap(map
);
408 down_read(&bpf_devs_lock
);
410 ret
= offmap
->dev_ops
->map_get_next_key(offmap
, key
, next_key
);
411 up_read(&bpf_devs_lock
);
416 struct ns_get_path_bpf_map_args
{
417 struct bpf_offloaded_map
*offmap
;
418 struct bpf_map_info
*info
;
421 static struct ns_common
*bpf_map_offload_info_fill_ns(void *private_data
)
423 struct ns_get_path_bpf_map_args
*args
= private_data
;
424 struct ns_common
*ns
;
428 down_read(&bpf_devs_lock
);
430 if (args
->offmap
->netdev
) {
431 args
->info
->ifindex
= args
->offmap
->netdev
->ifindex
;
432 net
= dev_net(args
->offmap
->netdev
);
436 args
->info
->ifindex
= 0;
440 up_read(&bpf_devs_lock
);
446 int bpf_map_offload_info_fill(struct bpf_map_info
*info
, struct bpf_map
*map
)
448 struct ns_get_path_bpf_map_args args
= {
449 .offmap
= map_to_offmap(map
),
452 struct inode
*ns_inode
;
456 res
= ns_get_path_cb(&ns_path
, bpf_map_offload_info_fill_ns
, &args
);
463 ns_inode
= ns_path
.dentry
->d_inode
;
464 info
->netns_dev
= new_encode_dev(ns_inode
->i_sb
->s_dev
);
465 info
->netns_ino
= ns_inode
->i_ino
;
471 bool bpf_offload_dev_match(struct bpf_prog
*prog
, struct bpf_map
*map
)
473 struct bpf_offloaded_map
*offmap
;
474 struct bpf_prog_offload
*offload
;
477 if (!bpf_prog_is_dev_bound(prog
->aux
) || !bpf_map_is_dev_bound(map
))
480 down_read(&bpf_devs_lock
);
481 offload
= prog
->aux
->offload
;
482 offmap
= map_to_offmap(map
);
484 ret
= offload
&& offload
->netdev
== offmap
->netdev
;
485 up_read(&bpf_devs_lock
);
490 static void bpf_offload_orphan_all_progs(struct net_device
*netdev
)
492 struct bpf_prog_offload
*offload
, *tmp
;
494 list_for_each_entry_safe(offload
, tmp
, &bpf_prog_offload_devs
, offloads
)
495 if (offload
->netdev
== netdev
)
496 __bpf_prog_offload_destroy(offload
->prog
);
499 static void bpf_offload_orphan_all_maps(struct net_device
*netdev
)
501 struct bpf_offloaded_map
*offmap
, *tmp
;
503 list_for_each_entry_safe(offmap
, tmp
, &bpf_map_offload_devs
, offloads
)
504 if (offmap
->netdev
== netdev
)
505 __bpf_map_offload_destroy(offmap
);
508 static int bpf_offload_notification(struct notifier_block
*notifier
,
509 ulong event
, void *ptr
)
511 struct net_device
*netdev
= netdev_notifier_info_to_dev(ptr
);
516 case NETDEV_UNREGISTER
:
517 /* ignore namespace changes */
518 if (netdev
->reg_state
!= NETREG_UNREGISTERING
)
521 down_write(&bpf_devs_lock
);
522 bpf_offload_orphan_all_progs(netdev
);
523 bpf_offload_orphan_all_maps(netdev
);
524 up_write(&bpf_devs_lock
);
532 static struct notifier_block bpf_offload_notifier
= {
533 .notifier_call
= bpf_offload_notification
,
536 static int __init
bpf_offload_init(void)
538 register_netdevice_notifier(&bpf_offload_notifier
);
542 subsys_initcall(bpf_offload_init
);