2 * Copyright (C) 2017 Netronome Systems, Inc.
4 * This software is licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
8 * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
9 * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
10 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
11 * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
12 * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
13 * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16 #include <linux/bpf.h>
17 #include <linux/bpf_verifier.h>
18 #include <linux/debugfs.h>
19 #include <linux/kernel.h>
20 #include <linux/mutex.h>
21 #include <linux/rtnetlink.h>
22 #include <net/pkt_cls.h>
24 #include "netdevsim.h"
26 #define pr_vlog(env, fmt, ...) \
27 bpf_verifier_log_write(env, "[netdevsim] " fmt, ##__VA_ARGS__)
29 struct nsim_bpf_bound_prog
{
31 struct bpf_prog
*prog
;
38 #define NSIM_BPF_MAX_KEYS 2
40 struct nsim_bpf_bound_map
{
42 struct bpf_offloaded_map
*map
;
44 struct nsim_map_entry
{
47 } entry
[NSIM_BPF_MAX_KEYS
];
51 static int nsim_debugfs_bpf_string_read(struct seq_file
*file
, void *data
)
53 const char **str
= file
->private;
56 seq_printf(file
, "%s\n", *str
);
61 static int nsim_debugfs_bpf_string_open(struct inode
*inode
, struct file
*f
)
63 return single_open(f
, nsim_debugfs_bpf_string_read
, inode
->i_private
);
66 static const struct file_operations nsim_bpf_string_fops
= {
68 .open
= nsim_debugfs_bpf_string_open
,
69 .release
= single_release
,
75 nsim_bpf_verify_insn(struct bpf_verifier_env
*env
, int insn_idx
, int prev_insn
)
77 struct nsim_bpf_bound_prog
*state
;
79 state
= env
->prog
->aux
->offload
->dev_priv
;
80 if (state
->ns
->bpf_bind_verifier_delay
&& !insn_idx
)
81 msleep(state
->ns
->bpf_bind_verifier_delay
);
83 if (insn_idx
== env
->prog
->len
- 1)
84 pr_vlog(env
, "Hello from netdevsim!\n");
89 static const struct bpf_prog_offload_ops nsim_bpf_analyzer_ops
= {
90 .insn_hook
= nsim_bpf_verify_insn
,
93 static bool nsim_xdp_offload_active(struct netdevsim
*ns
)
95 return ns
->xdp_hw
.prog
;
98 static void nsim_prog_set_loaded(struct bpf_prog
*prog
, bool loaded
)
100 struct nsim_bpf_bound_prog
*state
;
102 if (!prog
|| !prog
->aux
->offload
)
105 state
= prog
->aux
->offload
->dev_priv
;
106 state
->is_loaded
= loaded
;
110 nsim_bpf_offload(struct netdevsim
*ns
, struct bpf_prog
*prog
, bool oldprog
)
112 nsim_prog_set_loaded(ns
->bpf_offloaded
, false);
114 WARN(!!ns
->bpf_offloaded
!= oldprog
,
115 "bad offload state, expected offload %sto be active",
116 oldprog
? "" : "not ");
117 ns
->bpf_offloaded
= prog
;
118 ns
->bpf_offloaded_id
= prog
? prog
->aux
->id
: 0;
119 nsim_prog_set_loaded(prog
, true);
124 int nsim_bpf_setup_tc_block_cb(enum tc_setup_type type
,
125 void *type_data
, void *cb_priv
)
127 struct tc_cls_bpf_offload
*cls_bpf
= type_data
;
128 struct bpf_prog
*prog
= cls_bpf
->prog
;
129 struct netdevsim
*ns
= cb_priv
;
130 struct bpf_prog
*oldprog
;
132 if (type
!= TC_SETUP_CLSBPF
) {
133 NSIM_EA(cls_bpf
->common
.extack
,
134 "only offload of BPF classifiers supported");
138 if (!tc_cls_can_offload_and_chain0(ns
->netdev
, &cls_bpf
->common
))
141 if (cls_bpf
->common
.protocol
!= htons(ETH_P_ALL
)) {
142 NSIM_EA(cls_bpf
->common
.extack
,
143 "only ETH_P_ALL supported as filter protocol");
147 if (!ns
->bpf_tc_accept
) {
148 NSIM_EA(cls_bpf
->common
.extack
,
149 "netdevsim configured to reject BPF TC offload");
152 /* Note: progs without skip_sw will probably not be dev bound */
153 if (prog
&& !prog
->aux
->offload
&& !ns
->bpf_tc_non_bound_accept
) {
154 NSIM_EA(cls_bpf
->common
.extack
,
155 "netdevsim configured to reject unbound programs");
159 if (cls_bpf
->command
!= TC_CLSBPF_OFFLOAD
)
162 oldprog
= cls_bpf
->oldprog
;
164 /* Don't remove if oldprog doesn't match driver's state */
165 if (ns
->bpf_offloaded
!= oldprog
) {
169 if (ns
->bpf_offloaded
) {
170 NSIM_EA(cls_bpf
->common
.extack
,
171 "driver and netdev offload states mismatch");
176 return nsim_bpf_offload(ns
, cls_bpf
->prog
, oldprog
);
179 int nsim_bpf_disable_tc(struct netdevsim
*ns
)
181 if (ns
->bpf_offloaded
&& !nsim_xdp_offload_active(ns
))
186 static int nsim_xdp_offload_prog(struct netdevsim
*ns
, struct netdev_bpf
*bpf
)
188 if (!nsim_xdp_offload_active(ns
) && !bpf
->prog
)
190 if (!nsim_xdp_offload_active(ns
) && bpf
->prog
&& ns
->bpf_offloaded
) {
191 NSIM_EA(bpf
->extack
, "TC program is already loaded");
195 return nsim_bpf_offload(ns
, bpf
->prog
, nsim_xdp_offload_active(ns
));
199 nsim_xdp_set_prog(struct netdevsim
*ns
, struct netdev_bpf
*bpf
,
200 struct xdp_attachment_info
*xdp
)
204 if (!xdp_attachment_flags_ok(xdp
, bpf
))
207 if (bpf
->command
== XDP_SETUP_PROG
&& !ns
->bpf_xdpdrv_accept
) {
208 NSIM_EA(bpf
->extack
, "driver XDP disabled in DebugFS");
211 if (bpf
->command
== XDP_SETUP_PROG_HW
&& !ns
->bpf_xdpoffload_accept
) {
212 NSIM_EA(bpf
->extack
, "XDP offload disabled in DebugFS");
216 if (bpf
->command
== XDP_SETUP_PROG_HW
) {
217 err
= nsim_xdp_offload_prog(ns
, bpf
);
222 xdp_attachment_setup(xdp
, bpf
);
227 static int nsim_bpf_create_prog(struct netdevsim
*ns
, struct bpf_prog
*prog
)
229 struct nsim_bpf_bound_prog
*state
;
232 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
238 state
->state
= "verify";
240 /* Program id is not populated yet when we create the state. */
241 sprintf(name
, "%u", ns
->sdev
->prog_id_gen
++);
242 state
->ddir
= debugfs_create_dir(name
, ns
->sdev
->ddir_bpf_bound_progs
);
243 if (IS_ERR_OR_NULL(state
->ddir
)) {
248 debugfs_create_u32("id", 0400, state
->ddir
, &prog
->aux
->id
);
249 debugfs_create_file("state", 0400, state
->ddir
,
250 &state
->state
, &nsim_bpf_string_fops
);
251 debugfs_create_bool("loaded", 0400, state
->ddir
, &state
->is_loaded
);
253 list_add_tail(&state
->l
, &ns
->sdev
->bpf_bound_progs
);
255 prog
->aux
->offload
->dev_priv
= state
;
260 static void nsim_bpf_destroy_prog(struct bpf_prog
*prog
)
262 struct nsim_bpf_bound_prog
*state
;
264 state
= prog
->aux
->offload
->dev_priv
;
265 WARN(state
->is_loaded
,
266 "offload state destroyed while program still bound");
267 debugfs_remove_recursive(state
->ddir
);
272 static int nsim_setup_prog_checks(struct netdevsim
*ns
, struct netdev_bpf
*bpf
)
274 if (bpf
->prog
&& bpf
->prog
->aux
->offload
) {
275 NSIM_EA(bpf
->extack
, "attempt to load offloaded prog to drv");
278 if (ns
->netdev
->mtu
> NSIM_XDP_MAX_MTU
) {
279 NSIM_EA(bpf
->extack
, "MTU too large w/ XDP enabled");
286 nsim_setup_prog_hw_checks(struct netdevsim
*ns
, struct netdev_bpf
*bpf
)
288 struct nsim_bpf_bound_prog
*state
;
293 if (!bpf
->prog
->aux
->offload
) {
294 NSIM_EA(bpf
->extack
, "xdpoffload of non-bound program");
297 if (!bpf_offload_dev_match(bpf
->prog
, ns
->netdev
)) {
298 NSIM_EA(bpf
->extack
, "program bound to different dev");
302 state
= bpf
->prog
->aux
->offload
->dev_priv
;
303 if (WARN_ON(strcmp(state
->state
, "xlated"))) {
304 NSIM_EA(bpf
->extack
, "offloading program in bad state");
311 nsim_map_key_match(struct bpf_map
*map
, struct nsim_map_entry
*e
, void *key
)
313 return e
->key
&& !memcmp(key
, e
->key
, map
->key_size
);
316 static int nsim_map_key_find(struct bpf_offloaded_map
*offmap
, void *key
)
318 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
321 for (i
= 0; i
< ARRAY_SIZE(nmap
->entry
); i
++)
322 if (nsim_map_key_match(&offmap
->map
, &nmap
->entry
[i
], key
))
329 nsim_map_alloc_elem(struct bpf_offloaded_map
*offmap
, unsigned int idx
)
331 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
333 nmap
->entry
[idx
].key
= kmalloc(offmap
->map
.key_size
, GFP_USER
);
334 if (!nmap
->entry
[idx
].key
)
336 nmap
->entry
[idx
].value
= kmalloc(offmap
->map
.value_size
, GFP_USER
);
337 if (!nmap
->entry
[idx
].value
) {
338 kfree(nmap
->entry
[idx
].key
);
339 nmap
->entry
[idx
].key
= NULL
;
347 nsim_map_get_next_key(struct bpf_offloaded_map
*offmap
,
348 void *key
, void *next_key
)
350 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
353 mutex_lock(&nmap
->mutex
);
356 idx
= nsim_map_key_find(offmap
, key
);
362 for (; idx
< ARRAY_SIZE(nmap
->entry
); idx
++) {
363 if (nmap
->entry
[idx
].key
) {
364 memcpy(next_key
, nmap
->entry
[idx
].key
,
365 offmap
->map
.key_size
);
370 mutex_unlock(&nmap
->mutex
);
372 if (idx
== ARRAY_SIZE(nmap
->entry
))
378 nsim_map_lookup_elem(struct bpf_offloaded_map
*offmap
, void *key
, void *value
)
380 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
383 mutex_lock(&nmap
->mutex
);
385 idx
= nsim_map_key_find(offmap
, key
);
387 memcpy(value
, nmap
->entry
[idx
].value
, offmap
->map
.value_size
);
389 mutex_unlock(&nmap
->mutex
);
391 return idx
< 0 ? idx
: 0;
395 nsim_map_update_elem(struct bpf_offloaded_map
*offmap
,
396 void *key
, void *value
, u64 flags
)
398 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
401 mutex_lock(&nmap
->mutex
);
403 idx
= nsim_map_key_find(offmap
, key
);
404 if (idx
< 0 && flags
== BPF_EXIST
) {
408 if (idx
>= 0 && flags
== BPF_NOEXIST
) {
414 for (idx
= 0; idx
< ARRAY_SIZE(nmap
->entry
); idx
++)
415 if (!nmap
->entry
[idx
].key
)
417 if (idx
== ARRAY_SIZE(nmap
->entry
)) {
422 err
= nsim_map_alloc_elem(offmap
, idx
);
427 memcpy(nmap
->entry
[idx
].key
, key
, offmap
->map
.key_size
);
428 memcpy(nmap
->entry
[idx
].value
, value
, offmap
->map
.value_size
);
430 mutex_unlock(&nmap
->mutex
);
435 static int nsim_map_delete_elem(struct bpf_offloaded_map
*offmap
, void *key
)
437 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
440 if (offmap
->map
.map_type
== BPF_MAP_TYPE_ARRAY
)
443 mutex_lock(&nmap
->mutex
);
445 idx
= nsim_map_key_find(offmap
, key
);
447 kfree(nmap
->entry
[idx
].key
);
448 kfree(nmap
->entry
[idx
].value
);
449 memset(&nmap
->entry
[idx
], 0, sizeof(nmap
->entry
[idx
]));
452 mutex_unlock(&nmap
->mutex
);
454 return idx
< 0 ? idx
: 0;
457 static const struct bpf_map_dev_ops nsim_bpf_map_ops
= {
458 .map_get_next_key
= nsim_map_get_next_key
,
459 .map_lookup_elem
= nsim_map_lookup_elem
,
460 .map_update_elem
= nsim_map_update_elem
,
461 .map_delete_elem
= nsim_map_delete_elem
,
465 nsim_bpf_map_alloc(struct netdevsim
*ns
, struct bpf_offloaded_map
*offmap
)
467 struct nsim_bpf_bound_map
*nmap
;
470 if (WARN_ON(offmap
->map
.map_type
!= BPF_MAP_TYPE_ARRAY
&&
471 offmap
->map
.map_type
!= BPF_MAP_TYPE_HASH
))
473 if (offmap
->map
.max_entries
> NSIM_BPF_MAX_KEYS
)
475 if (offmap
->map
.map_flags
)
478 nmap
= kzalloc(sizeof(*nmap
), GFP_USER
);
482 offmap
->dev_priv
= nmap
;
485 mutex_init(&nmap
->mutex
);
487 if (offmap
->map
.map_type
== BPF_MAP_TYPE_ARRAY
) {
488 for (i
= 0; i
< ARRAY_SIZE(nmap
->entry
); i
++) {
491 err
= nsim_map_alloc_elem(offmap
, i
);
494 key
= nmap
->entry
[i
].key
;
499 offmap
->dev_ops
= &nsim_bpf_map_ops
;
500 list_add_tail(&nmap
->l
, &ns
->sdev
->bpf_bound_maps
);
506 kfree(nmap
->entry
[i
].key
);
507 kfree(nmap
->entry
[i
].value
);
513 static void nsim_bpf_map_free(struct bpf_offloaded_map
*offmap
)
515 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
518 for (i
= 0; i
< ARRAY_SIZE(nmap
->entry
); i
++) {
519 kfree(nmap
->entry
[i
].key
);
520 kfree(nmap
->entry
[i
].value
);
522 list_del_init(&nmap
->l
);
523 mutex_destroy(&nmap
->mutex
);
527 int nsim_bpf(struct net_device
*dev
, struct netdev_bpf
*bpf
)
529 struct netdevsim
*ns
= netdev_priv(dev
);
530 struct nsim_bpf_bound_prog
*state
;
535 switch (bpf
->command
) {
536 case BPF_OFFLOAD_VERIFIER_PREP
:
537 if (!ns
->bpf_bind_accept
)
540 err
= nsim_bpf_create_prog(ns
, bpf
->verifier
.prog
);
544 bpf
->verifier
.ops
= &nsim_bpf_analyzer_ops
;
546 case BPF_OFFLOAD_TRANSLATE
:
547 state
= bpf
->offload
.prog
->aux
->offload
->dev_priv
;
549 state
->state
= "xlated";
551 case BPF_OFFLOAD_DESTROY
:
552 nsim_bpf_destroy_prog(bpf
->offload
.prog
);
555 return xdp_attachment_query(&ns
->xdp
, bpf
);
556 case XDP_QUERY_PROG_HW
:
557 return xdp_attachment_query(&ns
->xdp_hw
, bpf
);
559 err
= nsim_setup_prog_checks(ns
, bpf
);
563 return nsim_xdp_set_prog(ns
, bpf
, &ns
->xdp
);
564 case XDP_SETUP_PROG_HW
:
565 err
= nsim_setup_prog_hw_checks(ns
, bpf
);
569 return nsim_xdp_set_prog(ns
, bpf
, &ns
->xdp_hw
);
570 case BPF_OFFLOAD_MAP_ALLOC
:
571 if (!ns
->bpf_map_accept
)
574 return nsim_bpf_map_alloc(ns
, bpf
->offmap
);
575 case BPF_OFFLOAD_MAP_FREE
:
576 nsim_bpf_map_free(bpf
->offmap
);
583 int nsim_bpf_init(struct netdevsim
*ns
)
587 if (ns
->sdev
->refcnt
== 1) {
588 INIT_LIST_HEAD(&ns
->sdev
->bpf_bound_progs
);
589 INIT_LIST_HEAD(&ns
->sdev
->bpf_bound_maps
);
591 ns
->sdev
->ddir_bpf_bound_progs
=
592 debugfs_create_dir("bpf_bound_progs", ns
->sdev
->ddir
);
593 if (IS_ERR_OR_NULL(ns
->sdev
->ddir_bpf_bound_progs
))
596 ns
->sdev
->bpf_dev
= bpf_offload_dev_create();
597 err
= PTR_ERR_OR_ZERO(ns
->sdev
->bpf_dev
);
602 err
= bpf_offload_dev_netdev_register(ns
->sdev
->bpf_dev
, ns
->netdev
);
604 goto err_destroy_bdev
;
606 debugfs_create_u32("bpf_offloaded_id", 0400, ns
->ddir
,
607 &ns
->bpf_offloaded_id
);
609 ns
->bpf_bind_accept
= true;
610 debugfs_create_bool("bpf_bind_accept", 0600, ns
->ddir
,
611 &ns
->bpf_bind_accept
);
612 debugfs_create_u32("bpf_bind_verifier_delay", 0600, ns
->ddir
,
613 &ns
->bpf_bind_verifier_delay
);
615 ns
->bpf_tc_accept
= true;
616 debugfs_create_bool("bpf_tc_accept", 0600, ns
->ddir
,
618 debugfs_create_bool("bpf_tc_non_bound_accept", 0600, ns
->ddir
,
619 &ns
->bpf_tc_non_bound_accept
);
620 ns
->bpf_xdpdrv_accept
= true;
621 debugfs_create_bool("bpf_xdpdrv_accept", 0600, ns
->ddir
,
622 &ns
->bpf_xdpdrv_accept
);
623 ns
->bpf_xdpoffload_accept
= true;
624 debugfs_create_bool("bpf_xdpoffload_accept", 0600, ns
->ddir
,
625 &ns
->bpf_xdpoffload_accept
);
627 ns
->bpf_map_accept
= true;
628 debugfs_create_bool("bpf_map_accept", 0600, ns
->ddir
,
629 &ns
->bpf_map_accept
);
634 if (ns
->sdev
->refcnt
== 1)
635 bpf_offload_dev_destroy(ns
->sdev
->bpf_dev
);
639 void nsim_bpf_uninit(struct netdevsim
*ns
)
641 WARN_ON(ns
->xdp
.prog
);
642 WARN_ON(ns
->xdp_hw
.prog
);
643 WARN_ON(ns
->bpf_offloaded
);
644 bpf_offload_dev_netdev_unregister(ns
->sdev
->bpf_dev
, ns
->netdev
);
646 if (ns
->sdev
->refcnt
== 1) {
647 WARN_ON(!list_empty(&ns
->sdev
->bpf_bound_progs
));
648 WARN_ON(!list_empty(&ns
->sdev
->bpf_bound_maps
));
649 bpf_offload_dev_destroy(ns
->sdev
->bpf_dev
);