2 * Copyright (C) 2017 Netronome Systems, Inc.
4 * This software is licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
8 * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
9 * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
10 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
11 * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
12 * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
13 * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16 #include <linux/bpf.h>
17 #include <linux/bpf_verifier.h>
18 #include <linux/debugfs.h>
19 #include <linux/kernel.h>
20 #include <linux/mutex.h>
21 #include <linux/rtnetlink.h>
22 #include <net/pkt_cls.h>
24 #include "netdevsim.h"
26 #define pr_vlog(env, fmt, ...) \
27 bpf_verifier_log_write(env, "[netdevsim] " fmt, ##__VA_ARGS__)
29 struct nsim_bpf_bound_prog
{
31 struct bpf_prog
*prog
;
38 #define NSIM_BPF_MAX_KEYS 2
40 struct nsim_bpf_bound_map
{
42 struct bpf_offloaded_map
*map
;
44 struct nsim_map_entry
{
47 } entry
[NSIM_BPF_MAX_KEYS
];
51 static int nsim_debugfs_bpf_string_read(struct seq_file
*file
, void *data
)
53 const char **str
= file
->private;
56 seq_printf(file
, "%s\n", *str
);
61 static int nsim_debugfs_bpf_string_open(struct inode
*inode
, struct file
*f
)
63 return single_open(f
, nsim_debugfs_bpf_string_read
, inode
->i_private
);
66 static const struct file_operations nsim_bpf_string_fops
= {
68 .open
= nsim_debugfs_bpf_string_open
,
69 .release
= single_release
,
75 nsim_bpf_verify_insn(struct bpf_verifier_env
*env
, int insn_idx
, int prev_insn
)
77 struct nsim_bpf_bound_prog
*state
;
79 state
= env
->prog
->aux
->offload
->dev_priv
;
80 if (state
->ns
->bpf_bind_verifier_delay
&& !insn_idx
)
81 msleep(state
->ns
->bpf_bind_verifier_delay
);
83 if (insn_idx
== env
->prog
->len
- 1)
84 pr_vlog(env
, "Hello from netdevsim!\n");
89 static const struct bpf_prog_offload_ops nsim_bpf_analyzer_ops
= {
90 .insn_hook
= nsim_bpf_verify_insn
,
93 static bool nsim_xdp_offload_active(struct netdevsim
*ns
)
95 return ns
->xdp_prog_mode
== XDP_ATTACHED_HW
;
98 static void nsim_prog_set_loaded(struct bpf_prog
*prog
, bool loaded
)
100 struct nsim_bpf_bound_prog
*state
;
102 if (!prog
|| !prog
->aux
->offload
)
105 state
= prog
->aux
->offload
->dev_priv
;
106 state
->is_loaded
= loaded
;
110 nsim_bpf_offload(struct netdevsim
*ns
, struct bpf_prog
*prog
, bool oldprog
)
112 nsim_prog_set_loaded(ns
->bpf_offloaded
, false);
114 WARN(!!ns
->bpf_offloaded
!= oldprog
,
115 "bad offload state, expected offload %sto be active",
116 oldprog
? "" : "not ");
117 ns
->bpf_offloaded
= prog
;
118 ns
->bpf_offloaded_id
= prog
? prog
->aux
->id
: 0;
119 nsim_prog_set_loaded(prog
, true);
124 int nsim_bpf_setup_tc_block_cb(enum tc_setup_type type
,
125 void *type_data
, void *cb_priv
)
127 struct tc_cls_bpf_offload
*cls_bpf
= type_data
;
128 struct bpf_prog
*prog
= cls_bpf
->prog
;
129 struct netdevsim
*ns
= cb_priv
;
130 struct bpf_prog
*oldprog
;
132 if (type
!= TC_SETUP_CLSBPF
) {
133 NSIM_EA(cls_bpf
->common
.extack
,
134 "only offload of BPF classifiers supported");
138 if (!tc_cls_can_offload_and_chain0(ns
->netdev
, &cls_bpf
->common
))
141 if (cls_bpf
->common
.protocol
!= htons(ETH_P_ALL
)) {
142 NSIM_EA(cls_bpf
->common
.extack
,
143 "only ETH_P_ALL supported as filter protocol");
147 if (!ns
->bpf_tc_accept
) {
148 NSIM_EA(cls_bpf
->common
.extack
,
149 "netdevsim configured to reject BPF TC offload");
152 /* Note: progs without skip_sw will probably not be dev bound */
153 if (prog
&& !prog
->aux
->offload
&& !ns
->bpf_tc_non_bound_accept
) {
154 NSIM_EA(cls_bpf
->common
.extack
,
155 "netdevsim configured to reject unbound programs");
159 if (cls_bpf
->command
!= TC_CLSBPF_OFFLOAD
)
162 oldprog
= cls_bpf
->oldprog
;
164 /* Don't remove if oldprog doesn't match driver's state */
165 if (ns
->bpf_offloaded
!= oldprog
) {
169 if (ns
->bpf_offloaded
) {
170 NSIM_EA(cls_bpf
->common
.extack
,
171 "driver and netdev offload states mismatch");
176 return nsim_bpf_offload(ns
, cls_bpf
->prog
, oldprog
);
179 int nsim_bpf_disable_tc(struct netdevsim
*ns
)
181 if (ns
->bpf_offloaded
&& !nsim_xdp_offload_active(ns
))
186 static int nsim_xdp_offload_prog(struct netdevsim
*ns
, struct netdev_bpf
*bpf
)
188 if (!nsim_xdp_offload_active(ns
) && !bpf
->prog
)
190 if (!nsim_xdp_offload_active(ns
) && bpf
->prog
&& ns
->bpf_offloaded
) {
191 NSIM_EA(bpf
->extack
, "TC program is already loaded");
195 return nsim_bpf_offload(ns
, bpf
->prog
, nsim_xdp_offload_active(ns
));
198 static int nsim_xdp_set_prog(struct netdevsim
*ns
, struct netdev_bpf
*bpf
)
202 if (ns
->xdp_prog
&& (bpf
->flags
^ ns
->xdp_flags
) & XDP_FLAGS_MODES
) {
203 NSIM_EA(bpf
->extack
, "program loaded with different flags");
207 if (bpf
->command
== XDP_SETUP_PROG
&& !ns
->bpf_xdpdrv_accept
) {
208 NSIM_EA(bpf
->extack
, "driver XDP disabled in DebugFS");
211 if (bpf
->command
== XDP_SETUP_PROG_HW
&& !ns
->bpf_xdpoffload_accept
) {
212 NSIM_EA(bpf
->extack
, "XDP offload disabled in DebugFS");
216 if (bpf
->command
== XDP_SETUP_PROG_HW
) {
217 err
= nsim_xdp_offload_prog(ns
, bpf
);
223 bpf_prog_put(ns
->xdp_prog
);
225 ns
->xdp_prog
= bpf
->prog
;
226 ns
->xdp_flags
= bpf
->flags
;
229 ns
->xdp_prog_mode
= XDP_ATTACHED_NONE
;
230 else if (bpf
->command
== XDP_SETUP_PROG
)
231 ns
->xdp_prog_mode
= XDP_ATTACHED_DRV
;
233 ns
->xdp_prog_mode
= XDP_ATTACHED_HW
;
238 static int nsim_bpf_create_prog(struct netdevsim
*ns
, struct bpf_prog
*prog
)
240 struct nsim_bpf_bound_prog
*state
;
243 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
249 state
->state
= "verify";
251 /* Program id is not populated yet when we create the state. */
252 sprintf(name
, "%u", ns
->prog_id_gen
++);
253 state
->ddir
= debugfs_create_dir(name
, ns
->ddir_bpf_bound_progs
);
254 if (IS_ERR_OR_NULL(state
->ddir
)) {
259 debugfs_create_u32("id", 0400, state
->ddir
, &prog
->aux
->id
);
260 debugfs_create_file("state", 0400, state
->ddir
,
261 &state
->state
, &nsim_bpf_string_fops
);
262 debugfs_create_bool("loaded", 0400, state
->ddir
, &state
->is_loaded
);
264 list_add_tail(&state
->l
, &ns
->bpf_bound_progs
);
266 prog
->aux
->offload
->dev_priv
= state
;
271 static void nsim_bpf_destroy_prog(struct bpf_prog
*prog
)
273 struct nsim_bpf_bound_prog
*state
;
275 state
= prog
->aux
->offload
->dev_priv
;
276 WARN(state
->is_loaded
,
277 "offload state destroyed while program still bound");
278 debugfs_remove_recursive(state
->ddir
);
283 static int nsim_setup_prog_checks(struct netdevsim
*ns
, struct netdev_bpf
*bpf
)
285 if (bpf
->prog
&& bpf
->prog
->aux
->offload
) {
286 NSIM_EA(bpf
->extack
, "attempt to load offloaded prog to drv");
289 if (ns
->netdev
->mtu
> NSIM_XDP_MAX_MTU
) {
290 NSIM_EA(bpf
->extack
, "MTU too large w/ XDP enabled");
293 if (nsim_xdp_offload_active(ns
)) {
294 NSIM_EA(bpf
->extack
, "xdp offload active, can't load drv prog");
301 nsim_setup_prog_hw_checks(struct netdevsim
*ns
, struct netdev_bpf
*bpf
)
303 struct nsim_bpf_bound_prog
*state
;
308 if (!bpf
->prog
->aux
->offload
) {
309 NSIM_EA(bpf
->extack
, "xdpoffload of non-bound program");
312 if (bpf
->prog
->aux
->offload
->netdev
!= ns
->netdev
) {
313 NSIM_EA(bpf
->extack
, "program bound to different dev");
317 state
= bpf
->prog
->aux
->offload
->dev_priv
;
318 if (WARN_ON(strcmp(state
->state
, "xlated"))) {
319 NSIM_EA(bpf
->extack
, "offloading program in bad state");
326 nsim_map_key_match(struct bpf_map
*map
, struct nsim_map_entry
*e
, void *key
)
328 return e
->key
&& !memcmp(key
, e
->key
, map
->key_size
);
331 static int nsim_map_key_find(struct bpf_offloaded_map
*offmap
, void *key
)
333 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
336 for (i
= 0; i
< ARRAY_SIZE(nmap
->entry
); i
++)
337 if (nsim_map_key_match(&offmap
->map
, &nmap
->entry
[i
], key
))
344 nsim_map_alloc_elem(struct bpf_offloaded_map
*offmap
, unsigned int idx
)
346 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
348 nmap
->entry
[idx
].key
= kmalloc(offmap
->map
.key_size
, GFP_USER
);
349 if (!nmap
->entry
[idx
].key
)
351 nmap
->entry
[idx
].value
= kmalloc(offmap
->map
.value_size
, GFP_USER
);
352 if (!nmap
->entry
[idx
].value
) {
353 kfree(nmap
->entry
[idx
].key
);
354 nmap
->entry
[idx
].key
= NULL
;
362 nsim_map_get_next_key(struct bpf_offloaded_map
*offmap
,
363 void *key
, void *next_key
)
365 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
368 mutex_lock(&nmap
->mutex
);
371 idx
= nsim_map_key_find(offmap
, key
);
377 for (; idx
< ARRAY_SIZE(nmap
->entry
); idx
++) {
378 if (nmap
->entry
[idx
].key
) {
379 memcpy(next_key
, nmap
->entry
[idx
].key
,
380 offmap
->map
.key_size
);
385 mutex_unlock(&nmap
->mutex
);
387 if (idx
== ARRAY_SIZE(nmap
->entry
))
393 nsim_map_lookup_elem(struct bpf_offloaded_map
*offmap
, void *key
, void *value
)
395 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
398 mutex_lock(&nmap
->mutex
);
400 idx
= nsim_map_key_find(offmap
, key
);
402 memcpy(value
, nmap
->entry
[idx
].value
, offmap
->map
.value_size
);
404 mutex_unlock(&nmap
->mutex
);
406 return idx
< 0 ? idx
: 0;
410 nsim_map_update_elem(struct bpf_offloaded_map
*offmap
,
411 void *key
, void *value
, u64 flags
)
413 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
416 mutex_lock(&nmap
->mutex
);
418 idx
= nsim_map_key_find(offmap
, key
);
419 if (idx
< 0 && flags
== BPF_EXIST
) {
423 if (idx
>= 0 && flags
== BPF_NOEXIST
) {
429 for (idx
= 0; idx
< ARRAY_SIZE(nmap
->entry
); idx
++)
430 if (!nmap
->entry
[idx
].key
)
432 if (idx
== ARRAY_SIZE(nmap
->entry
)) {
437 err
= nsim_map_alloc_elem(offmap
, idx
);
442 memcpy(nmap
->entry
[idx
].key
, key
, offmap
->map
.key_size
);
443 memcpy(nmap
->entry
[idx
].value
, value
, offmap
->map
.value_size
);
445 mutex_unlock(&nmap
->mutex
);
450 static int nsim_map_delete_elem(struct bpf_offloaded_map
*offmap
, void *key
)
452 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
455 if (offmap
->map
.map_type
== BPF_MAP_TYPE_ARRAY
)
458 mutex_lock(&nmap
->mutex
);
460 idx
= nsim_map_key_find(offmap
, key
);
462 kfree(nmap
->entry
[idx
].key
);
463 kfree(nmap
->entry
[idx
].value
);
464 memset(&nmap
->entry
[idx
], 0, sizeof(nmap
->entry
[idx
]));
467 mutex_unlock(&nmap
->mutex
);
469 return idx
< 0 ? idx
: 0;
472 static const struct bpf_map_dev_ops nsim_bpf_map_ops
= {
473 .map_get_next_key
= nsim_map_get_next_key
,
474 .map_lookup_elem
= nsim_map_lookup_elem
,
475 .map_update_elem
= nsim_map_update_elem
,
476 .map_delete_elem
= nsim_map_delete_elem
,
480 nsim_bpf_map_alloc(struct netdevsim
*ns
, struct bpf_offloaded_map
*offmap
)
482 struct nsim_bpf_bound_map
*nmap
;
485 if (WARN_ON(offmap
->map
.map_type
!= BPF_MAP_TYPE_ARRAY
&&
486 offmap
->map
.map_type
!= BPF_MAP_TYPE_HASH
))
488 if (offmap
->map
.max_entries
> NSIM_BPF_MAX_KEYS
)
490 if (offmap
->map
.map_flags
)
493 nmap
= kzalloc(sizeof(*nmap
), GFP_USER
);
497 offmap
->dev_priv
= nmap
;
500 mutex_init(&nmap
->mutex
);
502 if (offmap
->map
.map_type
== BPF_MAP_TYPE_ARRAY
) {
503 for (i
= 0; i
< ARRAY_SIZE(nmap
->entry
); i
++) {
506 err
= nsim_map_alloc_elem(offmap
, i
);
509 key
= nmap
->entry
[i
].key
;
514 offmap
->dev_ops
= &nsim_bpf_map_ops
;
515 list_add_tail(&nmap
->l
, &ns
->bpf_bound_maps
);
521 kfree(nmap
->entry
[i
].key
);
522 kfree(nmap
->entry
[i
].value
);
528 static void nsim_bpf_map_free(struct bpf_offloaded_map
*offmap
)
530 struct nsim_bpf_bound_map
*nmap
= offmap
->dev_priv
;
533 for (i
= 0; i
< ARRAY_SIZE(nmap
->entry
); i
++) {
534 kfree(nmap
->entry
[i
].key
);
535 kfree(nmap
->entry
[i
].value
);
537 list_del_init(&nmap
->l
);
538 mutex_destroy(&nmap
->mutex
);
542 int nsim_bpf(struct net_device
*dev
, struct netdev_bpf
*bpf
)
544 struct netdevsim
*ns
= netdev_priv(dev
);
545 struct nsim_bpf_bound_prog
*state
;
550 switch (bpf
->command
) {
551 case BPF_OFFLOAD_VERIFIER_PREP
:
552 if (!ns
->bpf_bind_accept
)
555 err
= nsim_bpf_create_prog(ns
, bpf
->verifier
.prog
);
559 bpf
->verifier
.ops
= &nsim_bpf_analyzer_ops
;
561 case BPF_OFFLOAD_TRANSLATE
:
562 state
= bpf
->offload
.prog
->aux
->offload
->dev_priv
;
564 state
->state
= "xlated";
566 case BPF_OFFLOAD_DESTROY
:
567 nsim_bpf_destroy_prog(bpf
->offload
.prog
);
570 bpf
->prog_attached
= ns
->xdp_prog_mode
;
571 bpf
->prog_id
= ns
->xdp_prog
? ns
->xdp_prog
->aux
->id
: 0;
572 bpf
->prog_flags
= ns
->xdp_prog
? ns
->xdp_flags
: 0;
575 err
= nsim_setup_prog_checks(ns
, bpf
);
579 return nsim_xdp_set_prog(ns
, bpf
);
580 case XDP_SETUP_PROG_HW
:
581 err
= nsim_setup_prog_hw_checks(ns
, bpf
);
585 return nsim_xdp_set_prog(ns
, bpf
);
586 case BPF_OFFLOAD_MAP_ALLOC
:
587 if (!ns
->bpf_map_accept
)
590 return nsim_bpf_map_alloc(ns
, bpf
->offmap
);
591 case BPF_OFFLOAD_MAP_FREE
:
592 nsim_bpf_map_free(bpf
->offmap
);
599 int nsim_bpf_init(struct netdevsim
*ns
)
601 INIT_LIST_HEAD(&ns
->bpf_bound_progs
);
602 INIT_LIST_HEAD(&ns
->bpf_bound_maps
);
604 debugfs_create_u32("bpf_offloaded_id", 0400, ns
->ddir
,
605 &ns
->bpf_offloaded_id
);
607 ns
->bpf_bind_accept
= true;
608 debugfs_create_bool("bpf_bind_accept", 0600, ns
->ddir
,
609 &ns
->bpf_bind_accept
);
610 debugfs_create_u32("bpf_bind_verifier_delay", 0600, ns
->ddir
,
611 &ns
->bpf_bind_verifier_delay
);
612 ns
->ddir_bpf_bound_progs
=
613 debugfs_create_dir("bpf_bound_progs", ns
->ddir
);
614 if (IS_ERR_OR_NULL(ns
->ddir_bpf_bound_progs
))
617 ns
->bpf_tc_accept
= true;
618 debugfs_create_bool("bpf_tc_accept", 0600, ns
->ddir
,
620 debugfs_create_bool("bpf_tc_non_bound_accept", 0600, ns
->ddir
,
621 &ns
->bpf_tc_non_bound_accept
);
622 ns
->bpf_xdpdrv_accept
= true;
623 debugfs_create_bool("bpf_xdpdrv_accept", 0600, ns
->ddir
,
624 &ns
->bpf_xdpdrv_accept
);
625 ns
->bpf_xdpoffload_accept
= true;
626 debugfs_create_bool("bpf_xdpoffload_accept", 0600, ns
->ddir
,
627 &ns
->bpf_xdpoffload_accept
);
629 ns
->bpf_map_accept
= true;
630 debugfs_create_bool("bpf_map_accept", 0600, ns
->ddir
,
631 &ns
->bpf_map_accept
);
636 void nsim_bpf_uninit(struct netdevsim
*ns
)
638 WARN_ON(!list_empty(&ns
->bpf_bound_progs
));
639 WARN_ON(!list_empty(&ns
->bpf_bound_maps
));
640 WARN_ON(ns
->xdp_prog
);
641 WARN_ON(ns
->bpf_offloaded
);