2 * Copyright (C) 2017 Netronome Systems, Inc.
4 * This software is licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
8 * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
9 * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
10 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
11 * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
12 * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
13 * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16 #include <linux/bpf.h>
17 #include <linux/bpf_verifier.h>
18 #include <linux/debugfs.h>
19 #include <linux/kernel.h>
20 #include <linux/mutex.h>
21 #include <linux/rtnetlink.h>
22 #include <net/pkt_cls.h>
24 #include "netdevsim.h"
26 #define pr_vlog(env, fmt, ...) \
27 bpf_verifier_log_write(env, "[netdevsim] " fmt, ##__VA_ARGS__)
29 struct nsim_bpf_bound_prog
{
31 struct bpf_prog
*prog
;
38 #define NSIM_BPF_MAX_KEYS 2
40 struct nsim_bpf_bound_map
{
42 struct bpf_offloaded_map
*map
;
44 struct nsim_map_entry
{
47 } entry
[NSIM_BPF_MAX_KEYS
];
51 static int nsim_debugfs_bpf_string_read(struct seq_file
*file
, void *data
)
53 const char **str
= file
->private;
56 seq_printf(file
, "%s\n", *str
);
61 static int nsim_debugfs_bpf_string_open(struct inode
*inode
, struct file
*f
)
63 return single_open(f
, nsim_debugfs_bpf_string_read
, inode
->i_private
);
66 static const struct file_operations nsim_bpf_string_fops
= {
68 .open
= nsim_debugfs_bpf_string_open
,
69 .release
= single_release
,
75 nsim_bpf_verify_insn(struct bpf_verifier_env
*env
, int insn_idx
, int prev_insn
)
77 struct nsim_bpf_bound_prog
*state
;
79 state
= env
->prog
->aux
->offload
->dev_priv
;
80 if (state
->ns
->bpf_bind_verifier_delay
&& !insn_idx
)
81 msleep(state
->ns
->bpf_bind_verifier_delay
);
83 if (insn_idx
== env
->prog
->len
- 1)
84 pr_vlog(env
, "Hello from netdevsim!\n");
89 static int nsim_bpf_finalize(struct bpf_verifier_env
*env
)
94 static const struct bpf_prog_offload_ops nsim_bpf_analyzer_ops
= {
95 .insn_hook
= nsim_bpf_verify_insn
,
96 .finalize
= nsim_bpf_finalize
,
99 static bool nsim_xdp_offload_active(struct netdevsim
*ns
)
101 return ns
->xdp_hw
.prog
;
104 static void nsim_prog_set_loaded(struct bpf_prog
*prog
, bool loaded
)
106 struct nsim_bpf_bound_prog
*state
;
108 if (!prog
|| !prog
->aux
->offload
)
111 state
= prog
->aux
->offload
->dev_priv
;
112 state
->is_loaded
= loaded
;
116 nsim_bpf_offload(struct netdevsim
*ns
, struct bpf_prog
*prog
, bool oldprog
)
118 nsim_prog_set_loaded(ns
->bpf_offloaded
, false);
120 WARN(!!ns
->bpf_offloaded
!= oldprog
,
121 "bad offload state, expected offload %sto be active",
122 oldprog
? "" : "not ");
123 ns
->bpf_offloaded
= prog
;
124 ns
->bpf_offloaded_id
= prog
? prog
->aux
->id
: 0;
125 nsim_prog_set_loaded(prog
, true);
130 int nsim_bpf_setup_tc_block_cb(enum tc_setup_type type
,
131 void *type_data
, void *cb_priv
)
133 struct tc_cls_bpf_offload
*cls_bpf
= type_data
;
134 struct bpf_prog
*prog
= cls_bpf
->prog
;
135 struct netdevsim
*ns
= cb_priv
;
136 struct bpf_prog
*oldprog
;
138 if (type
!= TC_SETUP_CLSBPF
) {
139 NSIM_EA(cls_bpf
->common
.extack
,
140 "only offload of BPF classifiers supported");
144 if (!tc_cls_can_offload_and_chain0(ns
->netdev
, &cls_bpf
->common
))
147 if (cls_bpf
->common
.protocol
!= htons(ETH_P_ALL
)) {
148 NSIM_EA(cls_bpf
->common
.extack
,
149 "only ETH_P_ALL supported as filter protocol");
153 if (!ns
->bpf_tc_accept
) {
154 NSIM_EA(cls_bpf
->common
.extack
,
155 "netdevsim configured to reject BPF TC offload");
158 /* Note: progs without skip_sw will probably not be dev bound */
159 if (prog
&& !prog
->aux
->offload
&& !ns
->bpf_tc_non_bound_accept
) {
160 NSIM_EA(cls_bpf
->common
.extack
,
161 "netdevsim configured to reject unbound programs");
165 if (cls_bpf
->command
!= TC_CLSBPF_OFFLOAD
)
168 oldprog
= cls_bpf
->oldprog
;
170 /* Don't remove if oldprog doesn't match driver's state */
171 if (ns
->bpf_offloaded
!= oldprog
) {
175 if (ns
->bpf_offloaded
) {
176 NSIM_EA(cls_bpf
->common
.extack
,
177 "driver and netdev offload states mismatch");
182 return nsim_bpf_offload(ns
, cls_bpf
->prog
, oldprog
);
185 int nsim_bpf_disable_tc(struct netdevsim
*ns
)
187 if (ns
->bpf_offloaded
&& !nsim_xdp_offload_active(ns
))
192 static int nsim_xdp_offload_prog(struct netdevsim
*ns
, struct netdev_bpf
*bpf
)
194 if (!nsim_xdp_offload_active(ns
) && !bpf
->prog
)
196 if (!nsim_xdp_offload_active(ns
) && bpf
->prog
&& ns
->bpf_offloaded
) {
197 NSIM_EA(bpf
->extack
, "TC program is already loaded");
201 return nsim_bpf_offload(ns
, bpf
->prog
, nsim_xdp_offload_active(ns
));
205 nsim_xdp_set_prog(struct netdevsim
*ns
, struct netdev_bpf
*bpf
,
206 struct xdp_attachment_info
*xdp
)
210 if (!xdp_attachment_flags_ok(xdp
, bpf
))
213 if (bpf
->command
== XDP_SETUP_PROG
&& !ns
->bpf_xdpdrv_accept
) {
214 NSIM_EA(bpf
->extack
, "driver XDP disabled in DebugFS");
217 if (bpf
->command
== XDP_SETUP_PROG_HW
&& !ns
->bpf_xdpoffload_accept
) {
218 NSIM_EA(bpf
->extack
, "XDP offload disabled in DebugFS");
222 if (bpf
->command
== XDP_SETUP_PROG_HW
) {
223 err
= nsim_xdp_offload_prog(ns
, bpf
);
228 xdp_attachment_setup(xdp
, bpf
);
233 static int nsim_bpf_create_prog(struct netdevsim
*ns
, struct bpf_prog
*prog
)
235 struct nsim_bpf_bound_prog
*state
;
238 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
244 state
->state
= "verify";
246 /* Program id is not populated yet when we create the state. */
247 sprintf(name
, "%u", ns
->sdev
->prog_id_gen
++);
248 state
->ddir
= debugfs_create_dir(name
, ns
->sdev
->ddir_bpf_bound_progs
);
249 if (IS_ERR_OR_NULL(state
->ddir
)) {
254 debugfs_create_u32("id", 0400, state
->ddir
, &prog
->aux
->id
);
255 debugfs_create_file("state", 0400, state
->ddir
,
256 &state
->state
, &nsim_bpf_string_fops
);
257 debugfs_create_bool("loaded", 0400, state
->ddir
, &state
->is_loaded
);
259 list_add_tail(&state
->l
, &ns
->sdev
->bpf_bound_progs
);
261 prog
->aux
->offload
->dev_priv
= state
;
266 static void nsim_bpf_destroy_prog(struct bpf_prog
*prog
)
268 struct nsim_bpf_bound_prog
*state
;
270 state
= prog
->aux
->offload
->dev_priv
;
271 WARN(state
->is_loaded
,
272 "offload state destroyed while program still bound");
273 debugfs_remove_recursive(state
->ddir
);
278 static int nsim_setup_prog_checks(struct netdevsim
*ns
, struct netdev_bpf
*bpf
)
280 if (bpf
->prog
&& bpf
->prog
->aux
->offload
) {
281 NSIM_EA(bpf
->extack
, "attempt to load offloaded prog to drv");
284 if (ns
->netdev
->mtu
> NSIM_XDP_MAX_MTU
) {
285 NSIM_EA(bpf
->extack
, "MTU too large w/ XDP enabled");
292 nsim_setup_prog_hw_checks(struct netdevsim
*ns
, struct netdev_bpf
*bpf
)
294 struct nsim_bpf_bound_prog
*state
;
299 if (!bpf
->prog
->aux
->offload
) {
300 NSIM_EA(bpf
->extack
, "xdpoffload of non-bound program");
303 if (!bpf_offload_dev_match(bpf
->prog
, ns
->netdev
)) {
304 NSIM_EA(bpf
->extack
, "program bound to different dev");
308 state
= bpf
->prog
->aux
->offload
->dev_priv
;
309 if (WARN_ON(strcmp(state
->state
, "xlated"))) {
310 NSIM_EA(bpf
->extack
, "offloading program in bad state");
317 nsim_map_key_match(struct bpf_map
*map
, struct nsim_map_entry
*e
, void *key
)
319 return e
->key
&& !memcmp(key
, e
->key
, map
->key_size
);
322 static int nsim_map_key_find(struct bpf_offloaded_map
*offmap
, void *key
)
324 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
327 for (i
= 0; i
< ARRAY_SIZE(nmap
->entry
); i
++)
328 if (nsim_map_key_match(&offmap
->map
, &nmap
->entry
[i
], key
))
335 nsim_map_alloc_elem(struct bpf_offloaded_map
*offmap
, unsigned int idx
)
337 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
339 nmap
->entry
[idx
].key
= kmalloc(offmap
->map
.key_size
, GFP_USER
);
340 if (!nmap
->entry
[idx
].key
)
342 nmap
->entry
[idx
].value
= kmalloc(offmap
->map
.value_size
, GFP_USER
);
343 if (!nmap
->entry
[idx
].value
) {
344 kfree(nmap
->entry
[idx
].key
);
345 nmap
->entry
[idx
].key
= NULL
;
353 nsim_map_get_next_key(struct bpf_offloaded_map
*offmap
,
354 void *key
, void *next_key
)
356 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
359 mutex_lock(&nmap
->mutex
);
362 idx
= nsim_map_key_find(offmap
, key
);
368 for (; idx
< ARRAY_SIZE(nmap
->entry
); idx
++) {
369 if (nmap
->entry
[idx
].key
) {
370 memcpy(next_key
, nmap
->entry
[idx
].key
,
371 offmap
->map
.key_size
);
376 mutex_unlock(&nmap
->mutex
);
378 if (idx
== ARRAY_SIZE(nmap
->entry
))
384 nsim_map_lookup_elem(struct bpf_offloaded_map
*offmap
, void *key
, void *value
)
386 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
389 mutex_lock(&nmap
->mutex
);
391 idx
= nsim_map_key_find(offmap
, key
);
393 memcpy(value
, nmap
->entry
[idx
].value
, offmap
->map
.value_size
);
395 mutex_unlock(&nmap
->mutex
);
397 return idx
< 0 ? idx
: 0;
401 nsim_map_update_elem(struct bpf_offloaded_map
*offmap
,
402 void *key
, void *value
, u64 flags
)
404 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
407 mutex_lock(&nmap
->mutex
);
409 idx
= nsim_map_key_find(offmap
, key
);
410 if (idx
< 0 && flags
== BPF_EXIST
) {
414 if (idx
>= 0 && flags
== BPF_NOEXIST
) {
420 for (idx
= 0; idx
< ARRAY_SIZE(nmap
->entry
); idx
++)
421 if (!nmap
->entry
[idx
].key
)
423 if (idx
== ARRAY_SIZE(nmap
->entry
)) {
428 err
= nsim_map_alloc_elem(offmap
, idx
);
433 memcpy(nmap
->entry
[idx
].key
, key
, offmap
->map
.key_size
);
434 memcpy(nmap
->entry
[idx
].value
, value
, offmap
->map
.value_size
);
436 mutex_unlock(&nmap
->mutex
);
441 static int nsim_map_delete_elem(struct bpf_offloaded_map
*offmap
, void *key
)
443 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
446 if (offmap
->map
.map_type
== BPF_MAP_TYPE_ARRAY
)
449 mutex_lock(&nmap
->mutex
);
451 idx
= nsim_map_key_find(offmap
, key
);
453 kfree(nmap
->entry
[idx
].key
);
454 kfree(nmap
->entry
[idx
].value
);
455 memset(&nmap
->entry
[idx
], 0, sizeof(nmap
->entry
[idx
]));
458 mutex_unlock(&nmap
->mutex
);
460 return idx
< 0 ? idx
: 0;
463 static const struct bpf_map_dev_ops nsim_bpf_map_ops
= {
464 .map_get_next_key
= nsim_map_get_next_key
,
465 .map_lookup_elem
= nsim_map_lookup_elem
,
466 .map_update_elem
= nsim_map_update_elem
,
467 .map_delete_elem
= nsim_map_delete_elem
,
471 nsim_bpf_map_alloc(struct netdevsim
*ns
, struct bpf_offloaded_map
*offmap
)
473 struct nsim_bpf_bound_map
*nmap
;
476 if (WARN_ON(offmap
->map
.map_type
!= BPF_MAP_TYPE_ARRAY
&&
477 offmap
->map
.map_type
!= BPF_MAP_TYPE_HASH
))
479 if (offmap
->map
.max_entries
> NSIM_BPF_MAX_KEYS
)
481 if (offmap
->map
.map_flags
)
484 nmap
= kzalloc(sizeof(*nmap
), GFP_USER
);
488 offmap
->dev_priv
= nmap
;
491 mutex_init(&nmap
->mutex
);
493 if (offmap
->map
.map_type
== BPF_MAP_TYPE_ARRAY
) {
494 for (i
= 0; i
< ARRAY_SIZE(nmap
->entry
); i
++) {
497 err
= nsim_map_alloc_elem(offmap
, i
);
500 key
= nmap
->entry
[i
].key
;
505 offmap
->dev_ops
= &nsim_bpf_map_ops
;
506 list_add_tail(&nmap
->l
, &ns
->sdev
->bpf_bound_maps
);
512 kfree(nmap
->entry
[i
].key
);
513 kfree(nmap
->entry
[i
].value
);
519 static void nsim_bpf_map_free(struct bpf_offloaded_map
*offmap
)
521 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
524 for (i
= 0; i
< ARRAY_SIZE(nmap
->entry
); i
++) {
525 kfree(nmap
->entry
[i
].key
);
526 kfree(nmap
->entry
[i
].value
);
528 list_del_init(&nmap
->l
);
529 mutex_destroy(&nmap
->mutex
);
533 int nsim_bpf(struct net_device
*dev
, struct netdev_bpf
*bpf
)
535 struct netdevsim
*ns
= netdev_priv(dev
);
536 struct nsim_bpf_bound_prog
*state
;
541 switch (bpf
->command
) {
542 case BPF_OFFLOAD_VERIFIER_PREP
:
543 if (!ns
->bpf_bind_accept
)
546 err
= nsim_bpf_create_prog(ns
, bpf
->verifier
.prog
);
550 bpf
->verifier
.ops
= &nsim_bpf_analyzer_ops
;
552 case BPF_OFFLOAD_TRANSLATE
:
553 state
= bpf
->offload
.prog
->aux
->offload
->dev_priv
;
555 state
->state
= "xlated";
557 case BPF_OFFLOAD_DESTROY
:
558 nsim_bpf_destroy_prog(bpf
->offload
.prog
);
561 return xdp_attachment_query(&ns
->xdp
, bpf
);
562 case XDP_QUERY_PROG_HW
:
563 return xdp_attachment_query(&ns
->xdp_hw
, bpf
);
565 err
= nsim_setup_prog_checks(ns
, bpf
);
569 return nsim_xdp_set_prog(ns
, bpf
, &ns
->xdp
);
570 case XDP_SETUP_PROG_HW
:
571 err
= nsim_setup_prog_hw_checks(ns
, bpf
);
575 return nsim_xdp_set_prog(ns
, bpf
, &ns
->xdp_hw
);
576 case BPF_OFFLOAD_MAP_ALLOC
:
577 if (!ns
->bpf_map_accept
)
580 return nsim_bpf_map_alloc(ns
, bpf
->offmap
);
581 case BPF_OFFLOAD_MAP_FREE
:
582 nsim_bpf_map_free(bpf
->offmap
);
589 int nsim_bpf_init(struct netdevsim
*ns
)
593 if (ns
->sdev
->refcnt
== 1) {
594 INIT_LIST_HEAD(&ns
->sdev
->bpf_bound_progs
);
595 INIT_LIST_HEAD(&ns
->sdev
->bpf_bound_maps
);
597 ns
->sdev
->ddir_bpf_bound_progs
=
598 debugfs_create_dir("bpf_bound_progs", ns
->sdev
->ddir
);
599 if (IS_ERR_OR_NULL(ns
->sdev
->ddir_bpf_bound_progs
))
602 ns
->sdev
->bpf_dev
= bpf_offload_dev_create();
603 err
= PTR_ERR_OR_ZERO(ns
->sdev
->bpf_dev
);
608 err
= bpf_offload_dev_netdev_register(ns
->sdev
->bpf_dev
, ns
->netdev
);
610 goto err_destroy_bdev
;
612 debugfs_create_u32("bpf_offloaded_id", 0400, ns
->ddir
,
613 &ns
->bpf_offloaded_id
);
615 ns
->bpf_bind_accept
= true;
616 debugfs_create_bool("bpf_bind_accept", 0600, ns
->ddir
,
617 &ns
->bpf_bind_accept
);
618 debugfs_create_u32("bpf_bind_verifier_delay", 0600, ns
->ddir
,
619 &ns
->bpf_bind_verifier_delay
);
621 ns
->bpf_tc_accept
= true;
622 debugfs_create_bool("bpf_tc_accept", 0600, ns
->ddir
,
624 debugfs_create_bool("bpf_tc_non_bound_accept", 0600, ns
->ddir
,
625 &ns
->bpf_tc_non_bound_accept
);
626 ns
->bpf_xdpdrv_accept
= true;
627 debugfs_create_bool("bpf_xdpdrv_accept", 0600, ns
->ddir
,
628 &ns
->bpf_xdpdrv_accept
);
629 ns
->bpf_xdpoffload_accept
= true;
630 debugfs_create_bool("bpf_xdpoffload_accept", 0600, ns
->ddir
,
631 &ns
->bpf_xdpoffload_accept
);
633 ns
->bpf_map_accept
= true;
634 debugfs_create_bool("bpf_map_accept", 0600, ns
->ddir
,
635 &ns
->bpf_map_accept
);
640 if (ns
->sdev
->refcnt
== 1)
641 bpf_offload_dev_destroy(ns
->sdev
->bpf_dev
);
645 void nsim_bpf_uninit(struct netdevsim
*ns
)
647 WARN_ON(ns
->xdp
.prog
);
648 WARN_ON(ns
->xdp_hw
.prog
);
649 WARN_ON(ns
->bpf_offloaded
);
650 bpf_offload_dev_netdev_unregister(ns
->sdev
->bpf_dev
, ns
->netdev
);
652 if (ns
->sdev
->refcnt
== 1) {
653 WARN_ON(!list_empty(&ns
->sdev
->bpf_bound_progs
));
654 WARN_ON(!list_empty(&ns
->sdev
->bpf_bound_maps
));
655 bpf_offload_dev_destroy(ns
->sdev
->bpf_dev
);