2 * Copyright (C) 2017 Netronome Systems, Inc.
4 * This software is licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
8 * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
9 * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
10 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
11 * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
12 * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
13 * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16 #include <linux/bpf.h>
17 #include <linux/bpf_verifier.h>
18 #include <linux/debugfs.h>
19 #include <linux/kernel.h>
20 #include <linux/mutex.h>
21 #include <linux/rtnetlink.h>
22 #include <net/pkt_cls.h>
24 #include "netdevsim.h"
26 #define pr_vlog(env, fmt, ...) \
27 bpf_verifier_log_write(env, "[netdevsim] " fmt, ##__VA_ARGS__)
29 struct nsim_bpf_bound_prog
{
30 struct nsim_dev
*nsim_dev
;
31 struct bpf_prog
*prog
;
38 #define NSIM_BPF_MAX_KEYS 2
40 struct nsim_bpf_bound_map
{
42 struct bpf_offloaded_map
*map
;
44 struct nsim_map_entry
{
47 } entry
[NSIM_BPF_MAX_KEYS
];
51 static int nsim_bpf_string_show(struct seq_file
*file
, void *data
)
53 const char **str
= file
->private;
56 seq_printf(file
, "%s\n", *str
);
60 DEFINE_SHOW_ATTRIBUTE(nsim_bpf_string
);
63 nsim_bpf_verify_insn(struct bpf_verifier_env
*env
, int insn_idx
, int prev_insn
)
65 struct nsim_bpf_bound_prog
*state
;
68 state
= env
->prog
->aux
->offload
->dev_priv
;
69 if (state
->nsim_dev
->bpf_bind_verifier_delay
&& !insn_idx
)
70 msleep(state
->nsim_dev
->bpf_bind_verifier_delay
);
72 if (insn_idx
== env
->prog
->len
- 1) {
73 pr_vlog(env
, "Hello from netdevsim!\n");
75 if (!state
->nsim_dev
->bpf_bind_verifier_accept
)
82 static int nsim_bpf_finalize(struct bpf_verifier_env
*env
)
87 static bool nsim_xdp_offload_active(struct netdevsim
*ns
)
89 return ns
->xdp_hw
.prog
;
92 static void nsim_prog_set_loaded(struct bpf_prog
*prog
, bool loaded
)
94 struct nsim_bpf_bound_prog
*state
;
96 if (!prog
|| !prog
->aux
->offload
)
99 state
= prog
->aux
->offload
->dev_priv
;
100 state
->is_loaded
= loaded
;
104 nsim_bpf_offload(struct netdevsim
*ns
, struct bpf_prog
*prog
, bool oldprog
)
106 nsim_prog_set_loaded(ns
->bpf_offloaded
, false);
108 WARN(!!ns
->bpf_offloaded
!= oldprog
,
109 "bad offload state, expected offload %sto be active",
110 oldprog
? "" : "not ");
111 ns
->bpf_offloaded
= prog
;
112 ns
->bpf_offloaded_id
= prog
? prog
->aux
->id
: 0;
113 nsim_prog_set_loaded(prog
, true);
118 int nsim_bpf_setup_tc_block_cb(enum tc_setup_type type
,
119 void *type_data
, void *cb_priv
)
121 struct tc_cls_bpf_offload
*cls_bpf
= type_data
;
122 struct bpf_prog
*prog
= cls_bpf
->prog
;
123 struct netdevsim
*ns
= cb_priv
;
124 struct bpf_prog
*oldprog
;
126 if (type
!= TC_SETUP_CLSBPF
) {
127 NSIM_EA(cls_bpf
->common
.extack
,
128 "only offload of BPF classifiers supported");
132 if (!tc_cls_can_offload_and_chain0(ns
->netdev
, &cls_bpf
->common
))
135 if (cls_bpf
->common
.protocol
!= htons(ETH_P_ALL
)) {
136 NSIM_EA(cls_bpf
->common
.extack
,
137 "only ETH_P_ALL supported as filter protocol");
141 if (!ns
->bpf_tc_accept
) {
142 NSIM_EA(cls_bpf
->common
.extack
,
143 "netdevsim configured to reject BPF TC offload");
146 /* Note: progs without skip_sw will probably not be dev bound */
147 if (prog
&& !prog
->aux
->offload
&& !ns
->bpf_tc_non_bound_accept
) {
148 NSIM_EA(cls_bpf
->common
.extack
,
149 "netdevsim configured to reject unbound programs");
153 if (cls_bpf
->command
!= TC_CLSBPF_OFFLOAD
)
156 oldprog
= cls_bpf
->oldprog
;
158 /* Don't remove if oldprog doesn't match driver's state */
159 if (ns
->bpf_offloaded
!= oldprog
) {
163 if (ns
->bpf_offloaded
) {
164 NSIM_EA(cls_bpf
->common
.extack
,
165 "driver and netdev offload states mismatch");
170 return nsim_bpf_offload(ns
, cls_bpf
->prog
, oldprog
);
173 int nsim_bpf_disable_tc(struct netdevsim
*ns
)
175 if (ns
->bpf_offloaded
&& !nsim_xdp_offload_active(ns
))
180 static int nsim_xdp_offload_prog(struct netdevsim
*ns
, struct netdev_bpf
*bpf
)
182 if (!nsim_xdp_offload_active(ns
) && !bpf
->prog
)
184 if (!nsim_xdp_offload_active(ns
) && bpf
->prog
&& ns
->bpf_offloaded
) {
185 NSIM_EA(bpf
->extack
, "TC program is already loaded");
189 return nsim_bpf_offload(ns
, bpf
->prog
, nsim_xdp_offload_active(ns
));
193 nsim_xdp_set_prog(struct netdevsim
*ns
, struct netdev_bpf
*bpf
,
194 struct xdp_attachment_info
*xdp
)
198 if (bpf
->command
== XDP_SETUP_PROG
&& !ns
->bpf_xdpdrv_accept
) {
199 NSIM_EA(bpf
->extack
, "driver XDP disabled in DebugFS");
202 if (bpf
->command
== XDP_SETUP_PROG_HW
&& !ns
->bpf_xdpoffload_accept
) {
203 NSIM_EA(bpf
->extack
, "XDP offload disabled in DebugFS");
207 if (bpf
->command
== XDP_SETUP_PROG_HW
) {
208 err
= nsim_xdp_offload_prog(ns
, bpf
);
213 xdp_attachment_setup(xdp
, bpf
);
218 static int nsim_bpf_create_prog(struct nsim_dev
*nsim_dev
,
219 struct bpf_prog
*prog
)
221 struct nsim_bpf_bound_prog
*state
;
225 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
229 state
->nsim_dev
= nsim_dev
;
231 state
->state
= "verify";
233 /* Program id is not populated yet when we create the state. */
234 sprintf(name
, "%u", nsim_dev
->prog_id_gen
++);
235 state
->ddir
= debugfs_create_dir(name
, nsim_dev
->ddir_bpf_bound_progs
);
236 if (IS_ERR(state
->ddir
)) {
237 ret
= PTR_ERR(state
->ddir
);
242 debugfs_create_u32("id", 0400, state
->ddir
, &prog
->aux
->id
);
243 debugfs_create_file("state", 0400, state
->ddir
,
244 &state
->state
, &nsim_bpf_string_fops
);
245 debugfs_create_bool("loaded", 0400, state
->ddir
, &state
->is_loaded
);
247 list_add_tail(&state
->l
, &nsim_dev
->bpf_bound_progs
);
249 prog
->aux
->offload
->dev_priv
= state
;
254 static int nsim_bpf_verifier_prep(struct bpf_prog
*prog
)
256 struct nsim_dev
*nsim_dev
=
257 bpf_offload_dev_priv(prog
->aux
->offload
->offdev
);
259 if (!nsim_dev
->bpf_bind_accept
)
262 return nsim_bpf_create_prog(nsim_dev
, prog
);
265 static int nsim_bpf_translate(struct bpf_prog
*prog
)
267 struct nsim_bpf_bound_prog
*state
= prog
->aux
->offload
->dev_priv
;
269 state
->state
= "xlated";
273 static void nsim_bpf_destroy_prog(struct bpf_prog
*prog
)
275 struct nsim_bpf_bound_prog
*state
;
277 state
= prog
->aux
->offload
->dev_priv
;
278 WARN(state
->is_loaded
,
279 "offload state destroyed while program still bound");
280 debugfs_remove_recursive(state
->ddir
);
285 static const struct bpf_prog_offload_ops nsim_bpf_dev_ops
= {
286 .insn_hook
= nsim_bpf_verify_insn
,
287 .finalize
= nsim_bpf_finalize
,
288 .prepare
= nsim_bpf_verifier_prep
,
289 .translate
= nsim_bpf_translate
,
290 .destroy
= nsim_bpf_destroy_prog
,
293 static int nsim_setup_prog_checks(struct netdevsim
*ns
, struct netdev_bpf
*bpf
)
295 if (bpf
->prog
&& bpf
->prog
->aux
->offload
) {
296 NSIM_EA(bpf
->extack
, "attempt to load offloaded prog to drv");
299 if (ns
->netdev
->mtu
> NSIM_XDP_MAX_MTU
) {
300 NSIM_EA(bpf
->extack
, "MTU too large w/ XDP enabled");
307 nsim_setup_prog_hw_checks(struct netdevsim
*ns
, struct netdev_bpf
*bpf
)
309 struct nsim_bpf_bound_prog
*state
;
314 if (!bpf
->prog
->aux
->offload
) {
315 NSIM_EA(bpf
->extack
, "xdpoffload of non-bound program");
318 if (!bpf_offload_dev_match(bpf
->prog
, ns
->netdev
)) {
319 NSIM_EA(bpf
->extack
, "program bound to different dev");
323 state
= bpf
->prog
->aux
->offload
->dev_priv
;
324 if (WARN_ON(strcmp(state
->state
, "xlated"))) {
325 NSIM_EA(bpf
->extack
, "offloading program in bad state");
332 nsim_map_key_match(struct bpf_map
*map
, struct nsim_map_entry
*e
, void *key
)
334 return e
->key
&& !memcmp(key
, e
->key
, map
->key_size
);
337 static int nsim_map_key_find(struct bpf_offloaded_map
*offmap
, void *key
)
339 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
342 for (i
= 0; i
< ARRAY_SIZE(nmap
->entry
); i
++)
343 if (nsim_map_key_match(&offmap
->map
, &nmap
->entry
[i
], key
))
350 nsim_map_alloc_elem(struct bpf_offloaded_map
*offmap
, unsigned int idx
)
352 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
354 nmap
->entry
[idx
].key
= kmalloc(offmap
->map
.key_size
, GFP_USER
);
355 if (!nmap
->entry
[idx
].key
)
357 nmap
->entry
[idx
].value
= kmalloc(offmap
->map
.value_size
, GFP_USER
);
358 if (!nmap
->entry
[idx
].value
) {
359 kfree(nmap
->entry
[idx
].key
);
360 nmap
->entry
[idx
].key
= NULL
;
368 nsim_map_get_next_key(struct bpf_offloaded_map
*offmap
,
369 void *key
, void *next_key
)
371 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
374 mutex_lock(&nmap
->mutex
);
377 idx
= nsim_map_key_find(offmap
, key
);
383 for (; idx
< ARRAY_SIZE(nmap
->entry
); idx
++) {
384 if (nmap
->entry
[idx
].key
) {
385 memcpy(next_key
, nmap
->entry
[idx
].key
,
386 offmap
->map
.key_size
);
391 mutex_unlock(&nmap
->mutex
);
393 if (idx
== ARRAY_SIZE(nmap
->entry
))
399 nsim_map_lookup_elem(struct bpf_offloaded_map
*offmap
, void *key
, void *value
)
401 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
404 mutex_lock(&nmap
->mutex
);
406 idx
= nsim_map_key_find(offmap
, key
);
408 memcpy(value
, nmap
->entry
[idx
].value
, offmap
->map
.value_size
);
410 mutex_unlock(&nmap
->mutex
);
412 return idx
< 0 ? idx
: 0;
416 nsim_map_update_elem(struct bpf_offloaded_map
*offmap
,
417 void *key
, void *value
, u64 flags
)
419 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
422 mutex_lock(&nmap
->mutex
);
424 idx
= nsim_map_key_find(offmap
, key
);
425 if (idx
< 0 && flags
== BPF_EXIST
) {
429 if (idx
>= 0 && flags
== BPF_NOEXIST
) {
435 for (idx
= 0; idx
< ARRAY_SIZE(nmap
->entry
); idx
++)
436 if (!nmap
->entry
[idx
].key
)
438 if (idx
== ARRAY_SIZE(nmap
->entry
)) {
443 err
= nsim_map_alloc_elem(offmap
, idx
);
448 memcpy(nmap
->entry
[idx
].key
, key
, offmap
->map
.key_size
);
449 memcpy(nmap
->entry
[idx
].value
, value
, offmap
->map
.value_size
);
451 mutex_unlock(&nmap
->mutex
);
456 static int nsim_map_delete_elem(struct bpf_offloaded_map
*offmap
, void *key
)
458 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
461 if (offmap
->map
.map_type
== BPF_MAP_TYPE_ARRAY
)
464 mutex_lock(&nmap
->mutex
);
466 idx
= nsim_map_key_find(offmap
, key
);
468 kfree(nmap
->entry
[idx
].key
);
469 kfree(nmap
->entry
[idx
].value
);
470 memset(&nmap
->entry
[idx
], 0, sizeof(nmap
->entry
[idx
]));
473 mutex_unlock(&nmap
->mutex
);
475 return idx
< 0 ? idx
: 0;
478 static const struct bpf_map_dev_ops nsim_bpf_map_ops
= {
479 .map_get_next_key
= nsim_map_get_next_key
,
480 .map_lookup_elem
= nsim_map_lookup_elem
,
481 .map_update_elem
= nsim_map_update_elem
,
482 .map_delete_elem
= nsim_map_delete_elem
,
486 nsim_bpf_map_alloc(struct netdevsim
*ns
, struct bpf_offloaded_map
*offmap
)
488 struct nsim_bpf_bound_map
*nmap
;
491 if (WARN_ON(offmap
->map
.map_type
!= BPF_MAP_TYPE_ARRAY
&&
492 offmap
->map
.map_type
!= BPF_MAP_TYPE_HASH
))
494 if (offmap
->map
.max_entries
> NSIM_BPF_MAX_KEYS
)
496 if (offmap
->map
.map_flags
)
499 nmap
= kzalloc(sizeof(*nmap
), GFP_USER
);
503 offmap
->dev_priv
= nmap
;
506 mutex_init(&nmap
->mutex
);
508 if (offmap
->map
.map_type
== BPF_MAP_TYPE_ARRAY
) {
509 for (i
= 0; i
< ARRAY_SIZE(nmap
->entry
); i
++) {
512 err
= nsim_map_alloc_elem(offmap
, i
);
515 key
= nmap
->entry
[i
].key
;
520 offmap
->dev_ops
= &nsim_bpf_map_ops
;
521 list_add_tail(&nmap
->l
, &ns
->nsim_dev
->bpf_bound_maps
);
527 kfree(nmap
->entry
[i
].key
);
528 kfree(nmap
->entry
[i
].value
);
534 static void nsim_bpf_map_free(struct bpf_offloaded_map
*offmap
)
536 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
539 for (i
= 0; i
< ARRAY_SIZE(nmap
->entry
); i
++) {
540 kfree(nmap
->entry
[i
].key
);
541 kfree(nmap
->entry
[i
].value
);
543 list_del_init(&nmap
->l
);
544 mutex_destroy(&nmap
->mutex
);
548 int nsim_bpf(struct net_device
*dev
, struct netdev_bpf
*bpf
)
550 struct netdevsim
*ns
= netdev_priv(dev
);
555 switch (bpf
->command
) {
557 err
= nsim_setup_prog_checks(ns
, bpf
);
561 return nsim_xdp_set_prog(ns
, bpf
, &ns
->xdp
);
562 case XDP_SETUP_PROG_HW
:
563 err
= nsim_setup_prog_hw_checks(ns
, bpf
);
567 return nsim_xdp_set_prog(ns
, bpf
, &ns
->xdp_hw
);
568 case BPF_OFFLOAD_MAP_ALLOC
:
569 if (!ns
->bpf_map_accept
)
572 return nsim_bpf_map_alloc(ns
, bpf
->offmap
);
573 case BPF_OFFLOAD_MAP_FREE
:
574 nsim_bpf_map_free(bpf
->offmap
);
581 int nsim_bpf_dev_init(struct nsim_dev
*nsim_dev
)
585 INIT_LIST_HEAD(&nsim_dev
->bpf_bound_progs
);
586 INIT_LIST_HEAD(&nsim_dev
->bpf_bound_maps
);
588 nsim_dev
->ddir_bpf_bound_progs
= debugfs_create_dir("bpf_bound_progs",
590 if (IS_ERR(nsim_dev
->ddir_bpf_bound_progs
))
591 return PTR_ERR(nsim_dev
->ddir_bpf_bound_progs
);
593 nsim_dev
->bpf_dev
= bpf_offload_dev_create(&nsim_bpf_dev_ops
, nsim_dev
);
594 err
= PTR_ERR_OR_ZERO(nsim_dev
->bpf_dev
);
598 nsim_dev
->bpf_bind_accept
= true;
599 debugfs_create_bool("bpf_bind_accept", 0600, nsim_dev
->ddir
,
600 &nsim_dev
->bpf_bind_accept
);
601 debugfs_create_u32("bpf_bind_verifier_delay", 0600, nsim_dev
->ddir
,
602 &nsim_dev
->bpf_bind_verifier_delay
);
603 nsim_dev
->bpf_bind_verifier_accept
= true;
604 debugfs_create_bool("bpf_bind_verifier_accept", 0600, nsim_dev
->ddir
,
605 &nsim_dev
->bpf_bind_verifier_accept
);
609 void nsim_bpf_dev_exit(struct nsim_dev
*nsim_dev
)
611 WARN_ON(!list_empty(&nsim_dev
->bpf_bound_progs
));
612 WARN_ON(!list_empty(&nsim_dev
->bpf_bound_maps
));
613 bpf_offload_dev_destroy(nsim_dev
->bpf_dev
);
616 int nsim_bpf_init(struct netdevsim
*ns
)
618 struct dentry
*ddir
= ns
->nsim_dev_port
->ddir
;
621 err
= bpf_offload_dev_netdev_register(ns
->nsim_dev
->bpf_dev
,
626 debugfs_create_u32("bpf_offloaded_id", 0400, ddir
,
627 &ns
->bpf_offloaded_id
);
629 ns
->bpf_tc_accept
= true;
630 debugfs_create_bool("bpf_tc_accept", 0600, ddir
,
632 debugfs_create_bool("bpf_tc_non_bound_accept", 0600, ddir
,
633 &ns
->bpf_tc_non_bound_accept
);
634 ns
->bpf_xdpdrv_accept
= true;
635 debugfs_create_bool("bpf_xdpdrv_accept", 0600, ddir
,
636 &ns
->bpf_xdpdrv_accept
);
637 ns
->bpf_xdpoffload_accept
= true;
638 debugfs_create_bool("bpf_xdpoffload_accept", 0600, ddir
,
639 &ns
->bpf_xdpoffload_accept
);
641 ns
->bpf_map_accept
= true;
642 debugfs_create_bool("bpf_map_accept", 0600, ddir
,
643 &ns
->bpf_map_accept
);
648 void nsim_bpf_uninit(struct netdevsim
*ns
)
650 WARN_ON(ns
->xdp
.prog
);
651 WARN_ON(ns
->xdp_hw
.prog
);
652 WARN_ON(ns
->bpf_offloaded
);
653 bpf_offload_dev_netdev_unregister(ns
->nsim_dev
->bpf_dev
, ns
->netdev
);