1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
4 #include <net/pkt_cls.h>
6 #include "../nfpcore/nfp_cpp.h"
7 #include "../nfpcore/nfp_nffw.h"
8 #include "../nfpcore/nfp_nsp.h"
9 #include "../nfp_app.h"
10 #include "../nfp_main.h"
11 #include "../nfp_net.h"
12 #include "../nfp_port.h"
16 const struct rhashtable_params nfp_bpf_maps_neutral_params
= {
18 .key_len
= sizeof_field(struct bpf_map
, id
),
19 .key_offset
= offsetof(struct nfp_bpf_neutral_map
, map_id
),
20 .head_offset
= offsetof(struct nfp_bpf_neutral_map
, l
),
21 .automatic_shrinking
= true,
24 static bool nfp_net_ebpf_capable(struct nfp_net
*nn
)
26 #ifdef __LITTLE_ENDIAN
27 struct nfp_app_bpf
*bpf
= nn
->app
->priv
;
29 return nn
->cap
& NFP_NET_CFG_CTRL_BPF
&&
31 nn_readb(nn
, NFP_NET_CFG_BPF_ABI
) == bpf
->abi_version
;
38 nfp_bpf_xdp_offload(struct nfp_app
*app
, struct nfp_net
*nn
,
39 struct bpf_prog
*prog
, struct netlink_ext_ack
*extack
)
41 bool running
, xdp_running
;
43 if (!nfp_net_ebpf_capable(nn
))
46 running
= nn
->dp
.ctrl
& NFP_NET_CFG_CTRL_BPF
;
47 xdp_running
= running
&& nn
->xdp_hw
.prog
;
49 if (!prog
&& !xdp_running
)
51 if (prog
&& running
&& !xdp_running
)
54 return nfp_net_bpf_offload(nn
, prog
, running
, extack
);
57 static const char *nfp_bpf_extra_cap(struct nfp_app
*app
, struct nfp_net
*nn
)
59 return nfp_net_ebpf_capable(nn
) ? "BPF" : "";
63 nfp_bpf_vnic_alloc(struct nfp_app
*app
, struct nfp_net
*nn
, unsigned int id
)
65 struct nfp_pf
*pf
= app
->pf
;
66 struct nfp_bpf_vnic
*bv
;
70 nfp_err(pf
->cpp
, "No ETH table\n");
73 if (pf
->max_data_vnics
!= pf
->eth_tbl
->count
) {
74 nfp_err(pf
->cpp
, "ETH entries don't match vNICs (%d vs %d)\n",
75 pf
->max_data_vnics
, pf
->eth_tbl
->count
);
79 bv
= kzalloc(sizeof(*bv
), GFP_KERNEL
);
84 err
= nfp_app_nic_vnic_alloc(app
, nn
, id
);
88 bv
->start_off
= nn_readw(nn
, NFP_NET_CFG_BPF_START
);
89 bv
->tgt_done
= nn_readw(nn
, NFP_NET_CFG_BPF_DONE
);
97 static void nfp_bpf_vnic_free(struct nfp_app
*app
, struct nfp_net
*nn
)
99 struct nfp_bpf_vnic
*bv
= nn
->app_priv
;
101 WARN_ON(bv
->tc_prog
);
105 static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type
,
106 void *type_data
, void *cb_priv
)
108 struct tc_cls_bpf_offload
*cls_bpf
= type_data
;
109 struct nfp_net
*nn
= cb_priv
;
110 struct bpf_prog
*oldprog
;
111 struct nfp_bpf_vnic
*bv
;
114 if (type
!= TC_SETUP_CLSBPF
) {
115 NL_SET_ERR_MSG_MOD(cls_bpf
->common
.extack
,
116 "only offload of BPF classifiers supported");
119 if (!tc_cls_can_offload_and_chain0(nn
->dp
.netdev
, &cls_bpf
->common
))
121 if (!nfp_net_ebpf_capable(nn
)) {
122 NL_SET_ERR_MSG_MOD(cls_bpf
->common
.extack
,
123 "NFP firmware does not support eBPF offload");
126 if (cls_bpf
->common
.protocol
!= htons(ETH_P_ALL
)) {
127 NL_SET_ERR_MSG_MOD(cls_bpf
->common
.extack
,
128 "only ETH_P_ALL supported as filter protocol");
132 /* Only support TC direct action */
133 if (!cls_bpf
->exts_integrated
||
134 tcf_exts_has_actions(cls_bpf
->exts
)) {
135 NL_SET_ERR_MSG_MOD(cls_bpf
->common
.extack
,
136 "only direct action with no legacy actions supported");
140 if (cls_bpf
->command
!= TC_CLSBPF_OFFLOAD
)
144 oldprog
= cls_bpf
->oldprog
;
146 /* Don't remove if oldprog doesn't match driver's state */
147 if (bv
->tc_prog
!= oldprog
) {
153 err
= nfp_net_bpf_offload(nn
, cls_bpf
->prog
, oldprog
,
154 cls_bpf
->common
.extack
);
158 bv
->tc_prog
= cls_bpf
->prog
;
159 nn
->port
->tc_offload_cnt
= !!bv
->tc_prog
;
163 static LIST_HEAD(nfp_bpf_block_cb_list
);
165 static int nfp_bpf_setup_tc(struct nfp_app
*app
, struct net_device
*netdev
,
166 enum tc_setup_type type
, void *type_data
)
168 struct nfp_net
*nn
= netdev_priv(netdev
);
172 return flow_block_cb_setup_simple(type_data
,
173 &nfp_bpf_block_cb_list
,
174 nfp_bpf_setup_tc_block_cb
,
182 nfp_bpf_check_mtu(struct nfp_app
*app
, struct net_device
*netdev
, int new_mtu
)
184 struct nfp_net
*nn
= netdev_priv(netdev
);
185 struct nfp_bpf_vnic
*bv
;
186 struct bpf_prog
*prog
;
188 if (~nn
->dp
.ctrl
& NFP_NET_CFG_CTRL_BPF
)
191 if (nn
->xdp_hw
.prog
) {
192 prog
= nn
->xdp_hw
.prog
;
198 if (nfp_bpf_offload_check_mtu(nn
, prog
, new_mtu
)) {
199 nn_info(nn
, "BPF offload active, potential packet access beyond hardware packet boundary");
206 nfp_bpf_parse_cap_adjust_head(struct nfp_app_bpf
*bpf
, void __iomem
*value
,
209 struct nfp_bpf_cap_tlv_adjust_head __iomem
*cap
= value
;
210 struct nfp_cpp
*cpp
= bpf
->app
->pf
->cpp
;
212 if (length
< sizeof(*cap
)) {
213 nfp_err(cpp
, "truncated adjust_head TLV: %d\n", length
);
217 bpf
->adjust_head
.flags
= readl(&cap
->flags
);
218 bpf
->adjust_head
.off_min
= readl(&cap
->off_min
);
219 bpf
->adjust_head
.off_max
= readl(&cap
->off_max
);
220 bpf
->adjust_head
.guaranteed_sub
= readl(&cap
->guaranteed_sub
);
221 bpf
->adjust_head
.guaranteed_add
= readl(&cap
->guaranteed_add
);
223 if (bpf
->adjust_head
.off_min
> bpf
->adjust_head
.off_max
) {
224 nfp_err(cpp
, "invalid adjust_head TLV: min > max\n");
227 if (!FIELD_FIT(UR_REG_IMM_MAX
, bpf
->adjust_head
.off_min
) ||
228 !FIELD_FIT(UR_REG_IMM_MAX
, bpf
->adjust_head
.off_max
)) {
229 nfp_warn(cpp
, "disabling adjust_head - driver expects min/max to fit in as immediates\n");
230 memset(&bpf
->adjust_head
, 0, sizeof(bpf
->adjust_head
));
238 nfp_bpf_parse_cap_func(struct nfp_app_bpf
*bpf
, void __iomem
*value
, u32 length
)
240 struct nfp_bpf_cap_tlv_func __iomem
*cap
= value
;
242 if (length
< sizeof(*cap
)) {
243 nfp_err(bpf
->app
->cpp
, "truncated function TLV: %d\n", length
);
247 switch (readl(&cap
->func_id
)) {
248 case BPF_FUNC_map_lookup_elem
:
249 bpf
->helpers
.map_lookup
= readl(&cap
->func_addr
);
251 case BPF_FUNC_map_update_elem
:
252 bpf
->helpers
.map_update
= readl(&cap
->func_addr
);
254 case BPF_FUNC_map_delete_elem
:
255 bpf
->helpers
.map_delete
= readl(&cap
->func_addr
);
257 case BPF_FUNC_perf_event_output
:
258 bpf
->helpers
.perf_event_output
= readl(&cap
->func_addr
);
266 nfp_bpf_parse_cap_maps(struct nfp_app_bpf
*bpf
, void __iomem
*value
, u32 length
)
268 struct nfp_bpf_cap_tlv_maps __iomem
*cap
= value
;
270 if (length
< sizeof(*cap
)) {
271 nfp_err(bpf
->app
->cpp
, "truncated maps TLV: %d\n", length
);
275 bpf
->maps
.types
= readl(&cap
->types
);
276 bpf
->maps
.max_maps
= readl(&cap
->max_maps
);
277 bpf
->maps
.max_elems
= readl(&cap
->max_elems
);
278 bpf
->maps
.max_key_sz
= readl(&cap
->max_key_sz
);
279 bpf
->maps
.max_val_sz
= readl(&cap
->max_val_sz
);
280 bpf
->maps
.max_elem_sz
= readl(&cap
->max_elem_sz
);
286 nfp_bpf_parse_cap_random(struct nfp_app_bpf
*bpf
, void __iomem
*value
,
289 bpf
->pseudo_random
= true;
294 nfp_bpf_parse_cap_qsel(struct nfp_app_bpf
*bpf
, void __iomem
*value
, u32 length
)
296 bpf
->queue_select
= true;
301 nfp_bpf_parse_cap_adjust_tail(struct nfp_app_bpf
*bpf
, void __iomem
*value
,
304 bpf
->adjust_tail
= true;
309 nfp_bpf_parse_cap_cmsg_multi_ent(struct nfp_app_bpf
*bpf
, void __iomem
*value
,
312 bpf
->cmsg_multi_ent
= true;
317 nfp_bpf_parse_cap_abi_version(struct nfp_app_bpf
*bpf
, void __iomem
*value
,
321 nfp_err(bpf
->app
->cpp
, "truncated ABI version TLV: %d\n",
326 bpf
->abi_version
= readl(value
);
327 if (bpf
->abi_version
< 2 || bpf
->abi_version
> 3) {
328 nfp_warn(bpf
->app
->cpp
, "unsupported BPF ABI version: %d\n",
330 bpf
->abi_version
= 0;
336 static int nfp_bpf_parse_capabilities(struct nfp_app
*app
)
338 struct nfp_cpp
*cpp
= app
->pf
->cpp
;
339 struct nfp_cpp_area
*area
;
340 u8 __iomem
*mem
, *start
;
342 mem
= nfp_rtsym_map(app
->pf
->rtbl
, "_abi_bpf_capabilities", "bpf.cap",
345 return PTR_ERR(mem
) == -ENOENT
? 0 : PTR_ERR(mem
);
348 while (mem
- start
+ 8 <= nfp_cpp_area_size(area
)) {
353 length
= readl(mem
+ 4);
357 if (mem
- start
> nfp_cpp_area_size(area
))
358 goto err_release_free
;
361 case NFP_BPF_CAP_TYPE_FUNC
:
362 if (nfp_bpf_parse_cap_func(app
->priv
, value
, length
))
363 goto err_release_free
;
365 case NFP_BPF_CAP_TYPE_ADJUST_HEAD
:
366 if (nfp_bpf_parse_cap_adjust_head(app
->priv
, value
,
368 goto err_release_free
;
370 case NFP_BPF_CAP_TYPE_MAPS
:
371 if (nfp_bpf_parse_cap_maps(app
->priv
, value
, length
))
372 goto err_release_free
;
374 case NFP_BPF_CAP_TYPE_RANDOM
:
375 if (nfp_bpf_parse_cap_random(app
->priv
, value
, length
))
376 goto err_release_free
;
378 case NFP_BPF_CAP_TYPE_QUEUE_SELECT
:
379 if (nfp_bpf_parse_cap_qsel(app
->priv
, value
, length
))
380 goto err_release_free
;
382 case NFP_BPF_CAP_TYPE_ADJUST_TAIL
:
383 if (nfp_bpf_parse_cap_adjust_tail(app
->priv
, value
,
385 goto err_release_free
;
387 case NFP_BPF_CAP_TYPE_ABI_VERSION
:
388 if (nfp_bpf_parse_cap_abi_version(app
->priv
, value
,
390 goto err_release_free
;
392 case NFP_BPF_CAP_TYPE_CMSG_MULTI_ENT
:
393 if (nfp_bpf_parse_cap_cmsg_multi_ent(app
->priv
, value
,
395 goto err_release_free
;
398 nfp_dbg(cpp
, "unknown BPF capability: %d\n", type
);
402 if (mem
- start
!= nfp_cpp_area_size(area
)) {
403 nfp_err(cpp
, "BPF capabilities left after parsing, parsed:%zd total length:%zu\n",
404 mem
- start
, nfp_cpp_area_size(area
));
405 goto err_release_free
;
408 nfp_cpp_area_release_free(area
);
413 nfp_err(cpp
, "invalid BPF capabilities at offset:%zd\n", mem
- start
);
414 nfp_cpp_area_release_free(area
);
418 static void nfp_bpf_init_capabilities(struct nfp_app_bpf
*bpf
)
420 bpf
->abi_version
= 2; /* Original BPF ABI version */
423 static int nfp_bpf_ndo_init(struct nfp_app
*app
, struct net_device
*netdev
)
425 struct nfp_app_bpf
*bpf
= app
->priv
;
427 return bpf_offload_dev_netdev_register(bpf
->bpf_dev
, netdev
);
430 static void nfp_bpf_ndo_uninit(struct nfp_app
*app
, struct net_device
*netdev
)
432 struct nfp_app_bpf
*bpf
= app
->priv
;
434 bpf_offload_dev_netdev_unregister(bpf
->bpf_dev
, netdev
);
437 static int nfp_bpf_start(struct nfp_app
*app
)
439 struct nfp_app_bpf
*bpf
= app
->priv
;
441 if (app
->ctrl
->dp
.mtu
< nfp_bpf_ctrl_cmsg_min_mtu(bpf
)) {
442 nfp_err(bpf
->app
->cpp
,
443 "ctrl channel MTU below min required %u < %u\n",
444 app
->ctrl
->dp
.mtu
, nfp_bpf_ctrl_cmsg_min_mtu(bpf
));
448 if (bpf
->cmsg_multi_ent
)
449 bpf
->cmsg_cache_cnt
= nfp_bpf_ctrl_cmsg_cache_cnt(bpf
);
451 bpf
->cmsg_cache_cnt
= 1;
456 static int nfp_bpf_init(struct nfp_app
*app
)
458 struct nfp_app_bpf
*bpf
;
461 bpf
= kzalloc(sizeof(*bpf
), GFP_KERNEL
);
467 INIT_LIST_HEAD(&bpf
->map_list
);
469 err
= nfp_ccm_init(&bpf
->ccm
, app
);
473 err
= rhashtable_init(&bpf
->maps_neutral
, &nfp_bpf_maps_neutral_params
);
477 nfp_bpf_init_capabilities(bpf
);
479 err
= nfp_bpf_parse_capabilities(app
);
481 goto err_free_neutral_maps
;
483 if (bpf
->abi_version
< 3) {
484 bpf
->cmsg_key_sz
= CMSG_MAP_KEY_LW
* 4;
485 bpf
->cmsg_val_sz
= CMSG_MAP_VALUE_LW
* 4;
487 bpf
->cmsg_key_sz
= bpf
->maps
.max_key_sz
;
488 bpf
->cmsg_val_sz
= bpf
->maps
.max_val_sz
;
489 app
->ctrl_mtu
= nfp_bpf_ctrl_cmsg_mtu(bpf
);
492 bpf
->bpf_dev
= bpf_offload_dev_create(&nfp_bpf_dev_ops
, bpf
);
493 err
= PTR_ERR_OR_ZERO(bpf
->bpf_dev
);
495 goto err_free_neutral_maps
;
499 err_free_neutral_maps
:
500 rhashtable_destroy(&bpf
->maps_neutral
);
502 nfp_ccm_clean(&bpf
->ccm
);
508 static void nfp_bpf_clean(struct nfp_app
*app
)
510 struct nfp_app_bpf
*bpf
= app
->priv
;
512 bpf_offload_dev_destroy(bpf
->bpf_dev
);
513 nfp_ccm_clean(&bpf
->ccm
);
514 WARN_ON(!list_empty(&bpf
->map_list
));
515 WARN_ON(bpf
->maps_in_use
|| bpf
->map_elems_in_use
);
516 rhashtable_free_and_destroy(&bpf
->maps_neutral
,
517 nfp_check_rhashtable_empty
, NULL
);
521 const struct nfp_app_type app_bpf
= {
522 .id
= NFP_APP_BPF_NIC
,
527 .init
= nfp_bpf_init
,
528 .clean
= nfp_bpf_clean
,
529 .start
= nfp_bpf_start
,
531 .check_mtu
= nfp_bpf_check_mtu
,
533 .extra_cap
= nfp_bpf_extra_cap
,
535 .ndo_init
= nfp_bpf_ndo_init
,
536 .ndo_uninit
= nfp_bpf_ndo_uninit
,
538 .vnic_alloc
= nfp_bpf_vnic_alloc
,
539 .vnic_free
= nfp_bpf_vnic_free
,
541 .ctrl_msg_rx
= nfp_bpf_ctrl_msg_rx
,
542 .ctrl_msg_rx_raw
= nfp_bpf_ctrl_msg_rx_raw
,
544 .setup_tc
= nfp_bpf_setup_tc
,
546 .xdp_offload
= nfp_bpf_xdp_offload
,