2 * Copyright (C) 2017 Netronome Systems, Inc.
4 * This software is licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
8 * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
9 * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
10 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
11 * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
12 * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
13 * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16 #include <linux/bpf.h>
17 #include <linux/bpf_verifier.h>
18 #include <linux/debugfs.h>
19 #include <linux/kernel.h>
20 #include <linux/mutex.h>
21 #include <linux/rtnetlink.h>
22 #include <net/pkt_cls.h>
24 #include "netdevsim.h"
26 #define pr_vlog(env, fmt, ...) \
27 bpf_verifier_log_write(env, "[netdevsim] " fmt, ##__VA_ARGS__)
29 struct nsim_bpf_bound_prog
{
30 struct nsim_dev
*nsim_dev
;
31 struct bpf_prog
*prog
;
38 #define NSIM_BPF_MAX_KEYS 2
40 struct nsim_bpf_bound_map
{
42 struct bpf_offloaded_map
*map
;
44 struct nsim_map_entry
{
47 } entry
[NSIM_BPF_MAX_KEYS
];
51 static int nsim_bpf_string_show(struct seq_file
*file
, void *data
)
53 const char **str
= file
->private;
56 seq_printf(file
, "%s\n", *str
);
60 DEFINE_SHOW_ATTRIBUTE(nsim_bpf_string
);
63 nsim_bpf_verify_insn(struct bpf_verifier_env
*env
, int insn_idx
, int prev_insn
)
65 struct nsim_bpf_bound_prog
*state
;
67 state
= env
->prog
->aux
->offload
->dev_priv
;
68 if (state
->nsim_dev
->bpf_bind_verifier_delay
&& !insn_idx
)
69 msleep(state
->nsim_dev
->bpf_bind_verifier_delay
);
71 if (insn_idx
== env
->prog
->len
- 1)
72 pr_vlog(env
, "Hello from netdevsim!\n");
77 static int nsim_bpf_finalize(struct bpf_verifier_env
*env
)
82 static bool nsim_xdp_offload_active(struct netdevsim
*ns
)
84 return ns
->xdp_hw
.prog
;
87 static void nsim_prog_set_loaded(struct bpf_prog
*prog
, bool loaded
)
89 struct nsim_bpf_bound_prog
*state
;
91 if (!prog
|| !prog
->aux
->offload
)
94 state
= prog
->aux
->offload
->dev_priv
;
95 state
->is_loaded
= loaded
;
99 nsim_bpf_offload(struct netdevsim
*ns
, struct bpf_prog
*prog
, bool oldprog
)
101 nsim_prog_set_loaded(ns
->bpf_offloaded
, false);
103 WARN(!!ns
->bpf_offloaded
!= oldprog
,
104 "bad offload state, expected offload %sto be active",
105 oldprog
? "" : "not ");
106 ns
->bpf_offloaded
= prog
;
107 ns
->bpf_offloaded_id
= prog
? prog
->aux
->id
: 0;
108 nsim_prog_set_loaded(prog
, true);
113 int nsim_bpf_setup_tc_block_cb(enum tc_setup_type type
,
114 void *type_data
, void *cb_priv
)
116 struct tc_cls_bpf_offload
*cls_bpf
= type_data
;
117 struct bpf_prog
*prog
= cls_bpf
->prog
;
118 struct netdevsim
*ns
= cb_priv
;
119 struct bpf_prog
*oldprog
;
121 if (type
!= TC_SETUP_CLSBPF
) {
122 NSIM_EA(cls_bpf
->common
.extack
,
123 "only offload of BPF classifiers supported");
127 if (!tc_cls_can_offload_and_chain0(ns
->netdev
, &cls_bpf
->common
))
130 if (cls_bpf
->common
.protocol
!= htons(ETH_P_ALL
)) {
131 NSIM_EA(cls_bpf
->common
.extack
,
132 "only ETH_P_ALL supported as filter protocol");
136 if (!ns
->bpf_tc_accept
) {
137 NSIM_EA(cls_bpf
->common
.extack
,
138 "netdevsim configured to reject BPF TC offload");
141 /* Note: progs without skip_sw will probably not be dev bound */
142 if (prog
&& !prog
->aux
->offload
&& !ns
->bpf_tc_non_bound_accept
) {
143 NSIM_EA(cls_bpf
->common
.extack
,
144 "netdevsim configured to reject unbound programs");
148 if (cls_bpf
->command
!= TC_CLSBPF_OFFLOAD
)
151 oldprog
= cls_bpf
->oldprog
;
153 /* Don't remove if oldprog doesn't match driver's state */
154 if (ns
->bpf_offloaded
!= oldprog
) {
158 if (ns
->bpf_offloaded
) {
159 NSIM_EA(cls_bpf
->common
.extack
,
160 "driver and netdev offload states mismatch");
165 return nsim_bpf_offload(ns
, cls_bpf
->prog
, oldprog
);
168 int nsim_bpf_disable_tc(struct netdevsim
*ns
)
170 if (ns
->bpf_offloaded
&& !nsim_xdp_offload_active(ns
))
175 static int nsim_xdp_offload_prog(struct netdevsim
*ns
, struct netdev_bpf
*bpf
)
177 if (!nsim_xdp_offload_active(ns
) && !bpf
->prog
)
179 if (!nsim_xdp_offload_active(ns
) && bpf
->prog
&& ns
->bpf_offloaded
) {
180 NSIM_EA(bpf
->extack
, "TC program is already loaded");
184 return nsim_bpf_offload(ns
, bpf
->prog
, nsim_xdp_offload_active(ns
));
188 nsim_xdp_set_prog(struct netdevsim
*ns
, struct netdev_bpf
*bpf
,
189 struct xdp_attachment_info
*xdp
)
193 if (!xdp_attachment_flags_ok(xdp
, bpf
))
196 if (bpf
->command
== XDP_SETUP_PROG
&& !ns
->bpf_xdpdrv_accept
) {
197 NSIM_EA(bpf
->extack
, "driver XDP disabled in DebugFS");
200 if (bpf
->command
== XDP_SETUP_PROG_HW
&& !ns
->bpf_xdpoffload_accept
) {
201 NSIM_EA(bpf
->extack
, "XDP offload disabled in DebugFS");
205 if (bpf
->command
== XDP_SETUP_PROG_HW
) {
206 err
= nsim_xdp_offload_prog(ns
, bpf
);
211 xdp_attachment_setup(xdp
, bpf
);
216 static int nsim_bpf_create_prog(struct nsim_dev
*nsim_dev
,
217 struct bpf_prog
*prog
)
219 struct nsim_bpf_bound_prog
*state
;
222 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
226 state
->nsim_dev
= nsim_dev
;
228 state
->state
= "verify";
230 /* Program id is not populated yet when we create the state. */
231 sprintf(name
, "%u", nsim_dev
->prog_id_gen
++);
232 state
->ddir
= debugfs_create_dir(name
, nsim_dev
->ddir_bpf_bound_progs
);
233 if (IS_ERR_OR_NULL(state
->ddir
)) {
238 debugfs_create_u32("id", 0400, state
->ddir
, &prog
->aux
->id
);
239 debugfs_create_file("state", 0400, state
->ddir
,
240 &state
->state
, &nsim_bpf_string_fops
);
241 debugfs_create_bool("loaded", 0400, state
->ddir
, &state
->is_loaded
);
243 list_add_tail(&state
->l
, &nsim_dev
->bpf_bound_progs
);
245 prog
->aux
->offload
->dev_priv
= state
;
250 static int nsim_bpf_verifier_prep(struct bpf_prog
*prog
)
252 struct nsim_dev
*nsim_dev
=
253 bpf_offload_dev_priv(prog
->aux
->offload
->offdev
);
255 if (!nsim_dev
->bpf_bind_accept
)
258 return nsim_bpf_create_prog(nsim_dev
, prog
);
261 static int nsim_bpf_translate(struct bpf_prog
*prog
)
263 struct nsim_bpf_bound_prog
*state
= prog
->aux
->offload
->dev_priv
;
265 state
->state
= "xlated";
269 static void nsim_bpf_destroy_prog(struct bpf_prog
*prog
)
271 struct nsim_bpf_bound_prog
*state
;
273 state
= prog
->aux
->offload
->dev_priv
;
274 WARN(state
->is_loaded
,
275 "offload state destroyed while program still bound");
276 debugfs_remove_recursive(state
->ddir
);
281 static const struct bpf_prog_offload_ops nsim_bpf_dev_ops
= {
282 .insn_hook
= nsim_bpf_verify_insn
,
283 .finalize
= nsim_bpf_finalize
,
284 .prepare
= nsim_bpf_verifier_prep
,
285 .translate
= nsim_bpf_translate
,
286 .destroy
= nsim_bpf_destroy_prog
,
289 static int nsim_setup_prog_checks(struct netdevsim
*ns
, struct netdev_bpf
*bpf
)
291 if (bpf
->prog
&& bpf
->prog
->aux
->offload
) {
292 NSIM_EA(bpf
->extack
, "attempt to load offloaded prog to drv");
295 if (ns
->netdev
->mtu
> NSIM_XDP_MAX_MTU
) {
296 NSIM_EA(bpf
->extack
, "MTU too large w/ XDP enabled");
303 nsim_setup_prog_hw_checks(struct netdevsim
*ns
, struct netdev_bpf
*bpf
)
305 struct nsim_bpf_bound_prog
*state
;
310 if (!bpf
->prog
->aux
->offload
) {
311 NSIM_EA(bpf
->extack
, "xdpoffload of non-bound program");
314 if (!bpf_offload_dev_match(bpf
->prog
, ns
->netdev
)) {
315 NSIM_EA(bpf
->extack
, "program bound to different dev");
319 state
= bpf
->prog
->aux
->offload
->dev_priv
;
320 if (WARN_ON(strcmp(state
->state
, "xlated"))) {
321 NSIM_EA(bpf
->extack
, "offloading program in bad state");
328 nsim_map_key_match(struct bpf_map
*map
, struct nsim_map_entry
*e
, void *key
)
330 return e
->key
&& !memcmp(key
, e
->key
, map
->key_size
);
333 static int nsim_map_key_find(struct bpf_offloaded_map
*offmap
, void *key
)
335 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
338 for (i
= 0; i
< ARRAY_SIZE(nmap
->entry
); i
++)
339 if (nsim_map_key_match(&offmap
->map
, &nmap
->entry
[i
], key
))
346 nsim_map_alloc_elem(struct bpf_offloaded_map
*offmap
, unsigned int idx
)
348 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
350 nmap
->entry
[idx
].key
= kmalloc(offmap
->map
.key_size
, GFP_USER
);
351 if (!nmap
->entry
[idx
].key
)
353 nmap
->entry
[idx
].value
= kmalloc(offmap
->map
.value_size
, GFP_USER
);
354 if (!nmap
->entry
[idx
].value
) {
355 kfree(nmap
->entry
[idx
].key
);
356 nmap
->entry
[idx
].key
= NULL
;
364 nsim_map_get_next_key(struct bpf_offloaded_map
*offmap
,
365 void *key
, void *next_key
)
367 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
370 mutex_lock(&nmap
->mutex
);
373 idx
= nsim_map_key_find(offmap
, key
);
379 for (; idx
< ARRAY_SIZE(nmap
->entry
); idx
++) {
380 if (nmap
->entry
[idx
].key
) {
381 memcpy(next_key
, nmap
->entry
[idx
].key
,
382 offmap
->map
.key_size
);
387 mutex_unlock(&nmap
->mutex
);
389 if (idx
== ARRAY_SIZE(nmap
->entry
))
395 nsim_map_lookup_elem(struct bpf_offloaded_map
*offmap
, void *key
, void *value
)
397 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
400 mutex_lock(&nmap
->mutex
);
402 idx
= nsim_map_key_find(offmap
, key
);
404 memcpy(value
, nmap
->entry
[idx
].value
, offmap
->map
.value_size
);
406 mutex_unlock(&nmap
->mutex
);
408 return idx
< 0 ? idx
: 0;
412 nsim_map_update_elem(struct bpf_offloaded_map
*offmap
,
413 void *key
, void *value
, u64 flags
)
415 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
418 mutex_lock(&nmap
->mutex
);
420 idx
= nsim_map_key_find(offmap
, key
);
421 if (idx
< 0 && flags
== BPF_EXIST
) {
425 if (idx
>= 0 && flags
== BPF_NOEXIST
) {
431 for (idx
= 0; idx
< ARRAY_SIZE(nmap
->entry
); idx
++)
432 if (!nmap
->entry
[idx
].key
)
434 if (idx
== ARRAY_SIZE(nmap
->entry
)) {
439 err
= nsim_map_alloc_elem(offmap
, idx
);
444 memcpy(nmap
->entry
[idx
].key
, key
, offmap
->map
.key_size
);
445 memcpy(nmap
->entry
[idx
].value
, value
, offmap
->map
.value_size
);
447 mutex_unlock(&nmap
->mutex
);
452 static int nsim_map_delete_elem(struct bpf_offloaded_map
*offmap
, void *key
)
454 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
457 if (offmap
->map
.map_type
== BPF_MAP_TYPE_ARRAY
)
460 mutex_lock(&nmap
->mutex
);
462 idx
= nsim_map_key_find(offmap
, key
);
464 kfree(nmap
->entry
[idx
].key
);
465 kfree(nmap
->entry
[idx
].value
);
466 memset(&nmap
->entry
[idx
], 0, sizeof(nmap
->entry
[idx
]));
469 mutex_unlock(&nmap
->mutex
);
471 return idx
< 0 ? idx
: 0;
474 static const struct bpf_map_dev_ops nsim_bpf_map_ops
= {
475 .map_get_next_key
= nsim_map_get_next_key
,
476 .map_lookup_elem
= nsim_map_lookup_elem
,
477 .map_update_elem
= nsim_map_update_elem
,
478 .map_delete_elem
= nsim_map_delete_elem
,
482 nsim_bpf_map_alloc(struct netdevsim
*ns
, struct bpf_offloaded_map
*offmap
)
484 struct nsim_bpf_bound_map
*nmap
;
487 if (WARN_ON(offmap
->map
.map_type
!= BPF_MAP_TYPE_ARRAY
&&
488 offmap
->map
.map_type
!= BPF_MAP_TYPE_HASH
))
490 if (offmap
->map
.max_entries
> NSIM_BPF_MAX_KEYS
)
492 if (offmap
->map
.map_flags
)
495 nmap
= kzalloc(sizeof(*nmap
), GFP_USER
);
499 offmap
->dev_priv
= nmap
;
502 mutex_init(&nmap
->mutex
);
504 if (offmap
->map
.map_type
== BPF_MAP_TYPE_ARRAY
) {
505 for (i
= 0; i
< ARRAY_SIZE(nmap
->entry
); i
++) {
508 err
= nsim_map_alloc_elem(offmap
, i
);
511 key
= nmap
->entry
[i
].key
;
516 offmap
->dev_ops
= &nsim_bpf_map_ops
;
517 list_add_tail(&nmap
->l
, &ns
->nsim_dev
->bpf_bound_maps
);
523 kfree(nmap
->entry
[i
].key
);
524 kfree(nmap
->entry
[i
].value
);
530 static void nsim_bpf_map_free(struct bpf_offloaded_map
*offmap
)
532 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
535 for (i
= 0; i
< ARRAY_SIZE(nmap
->entry
); i
++) {
536 kfree(nmap
->entry
[i
].key
);
537 kfree(nmap
->entry
[i
].value
);
539 list_del_init(&nmap
->l
);
540 mutex_destroy(&nmap
->mutex
);
544 int nsim_bpf(struct net_device
*dev
, struct netdev_bpf
*bpf
)
546 struct netdevsim
*ns
= netdev_priv(dev
);
551 switch (bpf
->command
) {
553 return xdp_attachment_query(&ns
->xdp
, bpf
);
554 case XDP_QUERY_PROG_HW
:
555 return xdp_attachment_query(&ns
->xdp_hw
, bpf
);
557 err
= nsim_setup_prog_checks(ns
, bpf
);
561 return nsim_xdp_set_prog(ns
, bpf
, &ns
->xdp
);
562 case XDP_SETUP_PROG_HW
:
563 err
= nsim_setup_prog_hw_checks(ns
, bpf
);
567 return nsim_xdp_set_prog(ns
, bpf
, &ns
->xdp_hw
);
568 case BPF_OFFLOAD_MAP_ALLOC
:
569 if (!ns
->bpf_map_accept
)
572 return nsim_bpf_map_alloc(ns
, bpf
->offmap
);
573 case BPF_OFFLOAD_MAP_FREE
:
574 nsim_bpf_map_free(bpf
->offmap
);
581 int nsim_bpf_dev_init(struct nsim_dev
*nsim_dev
)
585 INIT_LIST_HEAD(&nsim_dev
->bpf_bound_progs
);
586 INIT_LIST_HEAD(&nsim_dev
->bpf_bound_maps
);
588 nsim_dev
->ddir_bpf_bound_progs
= debugfs_create_dir("bpf_bound_progs",
590 if (IS_ERR_OR_NULL(nsim_dev
->ddir_bpf_bound_progs
))
593 nsim_dev
->bpf_dev
= bpf_offload_dev_create(&nsim_bpf_dev_ops
, nsim_dev
);
594 err
= PTR_ERR_OR_ZERO(nsim_dev
->bpf_dev
);
598 nsim_dev
->bpf_bind_accept
= true;
599 debugfs_create_bool("bpf_bind_accept", 0600, nsim_dev
->ddir
,
600 &nsim_dev
->bpf_bind_accept
);
601 debugfs_create_u32("bpf_bind_verifier_delay", 0600, nsim_dev
->ddir
,
602 &nsim_dev
->bpf_bind_verifier_delay
);
606 void nsim_bpf_dev_exit(struct nsim_dev
*nsim_dev
)
608 WARN_ON(!list_empty(&nsim_dev
->bpf_bound_progs
));
609 WARN_ON(!list_empty(&nsim_dev
->bpf_bound_maps
));
610 bpf_offload_dev_destroy(nsim_dev
->bpf_dev
);
613 int nsim_bpf_init(struct netdevsim
*ns
)
615 struct dentry
*ddir
= ns
->nsim_dev_port
->ddir
;
618 err
= bpf_offload_dev_netdev_register(ns
->nsim_dev
->bpf_dev
,
623 debugfs_create_u32("bpf_offloaded_id", 0400, ddir
,
624 &ns
->bpf_offloaded_id
);
626 ns
->bpf_tc_accept
= true;
627 debugfs_create_bool("bpf_tc_accept", 0600, ddir
,
629 debugfs_create_bool("bpf_tc_non_bound_accept", 0600, ddir
,
630 &ns
->bpf_tc_non_bound_accept
);
631 ns
->bpf_xdpdrv_accept
= true;
632 debugfs_create_bool("bpf_xdpdrv_accept", 0600, ddir
,
633 &ns
->bpf_xdpdrv_accept
);
634 ns
->bpf_xdpoffload_accept
= true;
635 debugfs_create_bool("bpf_xdpoffload_accept", 0600, ddir
,
636 &ns
->bpf_xdpoffload_accept
);
638 ns
->bpf_map_accept
= true;
639 debugfs_create_bool("bpf_map_accept", 0600, ddir
,
640 &ns
->bpf_map_accept
);
645 void nsim_bpf_uninit(struct netdevsim
*ns
)
647 WARN_ON(ns
->xdp
.prog
);
648 WARN_ON(ns
->xdp_hw
.prog
);
649 WARN_ON(ns
->bpf_offloaded
);
650 bpf_offload_dev_netdev_unregister(ns
->nsim_dev
->bpf_dev
, ns
->netdev
);