1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
4 #include <net/pkt_cls.h>
6 #include "../nfpcore/nfp_cpp.h"
7 #include "../nfpcore/nfp_nffw.h"
8 #include "../nfpcore/nfp_nsp.h"
9 #include "../nfp_app.h"
10 #include "../nfp_main.h"
11 #include "../nfp_net.h"
12 #include "../nfp_port.h"
16 const struct rhashtable_params nfp_bpf_maps_neutral_params
= {
18 .key_len
= sizeof_field(struct bpf_map
, id
),
19 .key_offset
= offsetof(struct nfp_bpf_neutral_map
, map_id
),
20 .head_offset
= offsetof(struct nfp_bpf_neutral_map
, l
),
21 .automatic_shrinking
= true,
24 static bool nfp_net_ebpf_capable(struct nfp_net
*nn
)
26 #ifdef __LITTLE_ENDIAN
27 struct nfp_app_bpf
*bpf
= nn
->app
->priv
;
29 return nn
->cap
& NFP_NET_CFG_CTRL_BPF
&&
31 nn_readb(nn
, NFP_NET_CFG_BPF_ABI
) == bpf
->abi_version
;
38 nfp_bpf_xdp_offload(struct nfp_app
*app
, struct nfp_net
*nn
,
39 struct bpf_prog
*prog
, struct netlink_ext_ack
*extack
)
41 bool running
, xdp_running
;
43 if (!nfp_net_ebpf_capable(nn
))
46 running
= nn
->dp
.ctrl
& NFP_NET_CFG_CTRL_BPF
;
47 xdp_running
= running
&& nn
->xdp_hw
.prog
;
49 if (!prog
&& !xdp_running
)
51 if (prog
&& running
&& !xdp_running
)
54 return nfp_net_bpf_offload(nn
, prog
, running
, extack
);
57 static const char *nfp_bpf_extra_cap(struct nfp_app
*app
, struct nfp_net
*nn
)
59 return nfp_net_ebpf_capable(nn
) ? "BPF" : "";
63 nfp_bpf_vnic_alloc(struct nfp_app
*app
, struct nfp_net
*nn
, unsigned int id
)
65 struct nfp_pf
*pf
= app
->pf
;
66 struct nfp_bpf_vnic
*bv
;
70 nfp_err(pf
->cpp
, "No ETH table\n");
73 if (pf
->max_data_vnics
!= pf
->eth_tbl
->count
) {
74 nfp_err(pf
->cpp
, "ETH entries don't match vNICs (%d vs %d)\n",
75 pf
->max_data_vnics
, pf
->eth_tbl
->count
);
79 bv
= kzalloc(sizeof(*bv
), GFP_KERNEL
);
84 err
= nfp_app_nic_vnic_alloc(app
, nn
, id
);
88 bv
->start_off
= nn_readw(nn
, NFP_NET_CFG_BPF_START
);
89 bv
->tgt_done
= nn_readw(nn
, NFP_NET_CFG_BPF_DONE
);
97 static void nfp_bpf_vnic_free(struct nfp_app
*app
, struct nfp_net
*nn
)
99 struct nfp_bpf_vnic
*bv
= nn
->app_priv
;
101 WARN_ON(bv
->tc_prog
);
105 static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type
,
106 void *type_data
, void *cb_priv
)
108 struct tc_cls_bpf_offload
*cls_bpf
= type_data
;
109 struct nfp_net
*nn
= cb_priv
;
110 struct bpf_prog
*oldprog
;
111 struct nfp_bpf_vnic
*bv
;
114 if (type
!= TC_SETUP_CLSBPF
) {
115 NL_SET_ERR_MSG_MOD(cls_bpf
->common
.extack
,
116 "only offload of BPF classifiers supported");
119 if (!tc_cls_can_offload_and_chain0(nn
->dp
.netdev
, &cls_bpf
->common
))
121 if (!nfp_net_ebpf_capable(nn
)) {
122 NL_SET_ERR_MSG_MOD(cls_bpf
->common
.extack
,
123 "NFP firmware does not support eBPF offload");
126 if (cls_bpf
->common
.protocol
!= htons(ETH_P_ALL
)) {
127 NL_SET_ERR_MSG_MOD(cls_bpf
->common
.extack
,
128 "only ETH_P_ALL supported as filter protocol");
132 /* Only support TC direct action */
133 if (!cls_bpf
->exts_integrated
||
134 tcf_exts_has_actions(cls_bpf
->exts
)) {
135 NL_SET_ERR_MSG_MOD(cls_bpf
->common
.extack
,
136 "only direct action with no legacy actions supported");
140 if (cls_bpf
->command
!= TC_CLSBPF_OFFLOAD
)
144 oldprog
= cls_bpf
->oldprog
;
146 /* Don't remove if oldprog doesn't match driver's state */
147 if (bv
->tc_prog
!= oldprog
) {
153 err
= nfp_net_bpf_offload(nn
, cls_bpf
->prog
, oldprog
,
154 cls_bpf
->common
.extack
);
158 bv
->tc_prog
= cls_bpf
->prog
;
159 nn
->port
->tc_offload_cnt
= !!bv
->tc_prog
;
163 static LIST_HEAD(nfp_bpf_block_cb_list
);
165 static int nfp_bpf_setup_tc(struct nfp_app
*app
, struct net_device
*netdev
,
166 enum tc_setup_type type
, void *type_data
)
168 struct nfp_net
*nn
= netdev_priv(netdev
);
172 return flow_block_cb_setup_simple(type_data
,
173 &nfp_bpf_block_cb_list
,
174 nfp_bpf_setup_tc_block_cb
,
182 nfp_bpf_check_mtu(struct nfp_app
*app
, struct net_device
*netdev
, int new_mtu
)
184 struct nfp_net
*nn
= netdev_priv(netdev
);
185 unsigned int max_mtu
;
187 if (~nn
->dp
.ctrl
& NFP_NET_CFG_CTRL_BPF
)
190 max_mtu
= nn_readb(nn
, NFP_NET_CFG_BPF_INL_MTU
) * 64 - 32;
191 if (new_mtu
> max_mtu
) {
192 nn_info(nn
, "BPF offload active, MTU over %u not supported\n",
200 nfp_bpf_parse_cap_adjust_head(struct nfp_app_bpf
*bpf
, void __iomem
*value
,
203 struct nfp_bpf_cap_tlv_adjust_head __iomem
*cap
= value
;
204 struct nfp_cpp
*cpp
= bpf
->app
->pf
->cpp
;
206 if (length
< sizeof(*cap
)) {
207 nfp_err(cpp
, "truncated adjust_head TLV: %d\n", length
);
211 bpf
->adjust_head
.flags
= readl(&cap
->flags
);
212 bpf
->adjust_head
.off_min
= readl(&cap
->off_min
);
213 bpf
->adjust_head
.off_max
= readl(&cap
->off_max
);
214 bpf
->adjust_head
.guaranteed_sub
= readl(&cap
->guaranteed_sub
);
215 bpf
->adjust_head
.guaranteed_add
= readl(&cap
->guaranteed_add
);
217 if (bpf
->adjust_head
.off_min
> bpf
->adjust_head
.off_max
) {
218 nfp_err(cpp
, "invalid adjust_head TLV: min > max\n");
221 if (!FIELD_FIT(UR_REG_IMM_MAX
, bpf
->adjust_head
.off_min
) ||
222 !FIELD_FIT(UR_REG_IMM_MAX
, bpf
->adjust_head
.off_max
)) {
223 nfp_warn(cpp
, "disabling adjust_head - driver expects min/max to fit in as immediates\n");
224 memset(&bpf
->adjust_head
, 0, sizeof(bpf
->adjust_head
));
232 nfp_bpf_parse_cap_func(struct nfp_app_bpf
*bpf
, void __iomem
*value
, u32 length
)
234 struct nfp_bpf_cap_tlv_func __iomem
*cap
= value
;
236 if (length
< sizeof(*cap
)) {
237 nfp_err(bpf
->app
->cpp
, "truncated function TLV: %d\n", length
);
241 switch (readl(&cap
->func_id
)) {
242 case BPF_FUNC_map_lookup_elem
:
243 bpf
->helpers
.map_lookup
= readl(&cap
->func_addr
);
245 case BPF_FUNC_map_update_elem
:
246 bpf
->helpers
.map_update
= readl(&cap
->func_addr
);
248 case BPF_FUNC_map_delete_elem
:
249 bpf
->helpers
.map_delete
= readl(&cap
->func_addr
);
251 case BPF_FUNC_perf_event_output
:
252 bpf
->helpers
.perf_event_output
= readl(&cap
->func_addr
);
260 nfp_bpf_parse_cap_maps(struct nfp_app_bpf
*bpf
, void __iomem
*value
, u32 length
)
262 struct nfp_bpf_cap_tlv_maps __iomem
*cap
= value
;
264 if (length
< sizeof(*cap
)) {
265 nfp_err(bpf
->app
->cpp
, "truncated maps TLV: %d\n", length
);
269 bpf
->maps
.types
= readl(&cap
->types
);
270 bpf
->maps
.max_maps
= readl(&cap
->max_maps
);
271 bpf
->maps
.max_elems
= readl(&cap
->max_elems
);
272 bpf
->maps
.max_key_sz
= readl(&cap
->max_key_sz
);
273 bpf
->maps
.max_val_sz
= readl(&cap
->max_val_sz
);
274 bpf
->maps
.max_elem_sz
= readl(&cap
->max_elem_sz
);
280 nfp_bpf_parse_cap_random(struct nfp_app_bpf
*bpf
, void __iomem
*value
,
283 bpf
->pseudo_random
= true;
288 nfp_bpf_parse_cap_qsel(struct nfp_app_bpf
*bpf
, void __iomem
*value
, u32 length
)
290 bpf
->queue_select
= true;
295 nfp_bpf_parse_cap_adjust_tail(struct nfp_app_bpf
*bpf
, void __iomem
*value
,
298 bpf
->adjust_tail
= true;
303 nfp_bpf_parse_cap_cmsg_multi_ent(struct nfp_app_bpf
*bpf
, void __iomem
*value
,
306 bpf
->cmsg_multi_ent
= true;
311 nfp_bpf_parse_cap_abi_version(struct nfp_app_bpf
*bpf
, void __iomem
*value
,
315 nfp_err(bpf
->app
->cpp
, "truncated ABI version TLV: %d\n",
320 bpf
->abi_version
= readl(value
);
321 if (bpf
->abi_version
< 2 || bpf
->abi_version
> 3) {
322 nfp_warn(bpf
->app
->cpp
, "unsupported BPF ABI version: %d\n",
324 bpf
->abi_version
= 0;
330 static int nfp_bpf_parse_capabilities(struct nfp_app
*app
)
332 struct nfp_cpp
*cpp
= app
->pf
->cpp
;
333 struct nfp_cpp_area
*area
;
334 u8 __iomem
*mem
, *start
;
336 mem
= nfp_rtsym_map(app
->pf
->rtbl
, "_abi_bpf_capabilities", "bpf.cap",
339 return PTR_ERR(mem
) == -ENOENT
? 0 : PTR_ERR(mem
);
342 while (mem
- start
+ 8 <= nfp_cpp_area_size(area
)) {
347 length
= readl(mem
+ 4);
351 if (mem
- start
> nfp_cpp_area_size(area
))
352 goto err_release_free
;
355 case NFP_BPF_CAP_TYPE_FUNC
:
356 if (nfp_bpf_parse_cap_func(app
->priv
, value
, length
))
357 goto err_release_free
;
359 case NFP_BPF_CAP_TYPE_ADJUST_HEAD
:
360 if (nfp_bpf_parse_cap_adjust_head(app
->priv
, value
,
362 goto err_release_free
;
364 case NFP_BPF_CAP_TYPE_MAPS
:
365 if (nfp_bpf_parse_cap_maps(app
->priv
, value
, length
))
366 goto err_release_free
;
368 case NFP_BPF_CAP_TYPE_RANDOM
:
369 if (nfp_bpf_parse_cap_random(app
->priv
, value
, length
))
370 goto err_release_free
;
372 case NFP_BPF_CAP_TYPE_QUEUE_SELECT
:
373 if (nfp_bpf_parse_cap_qsel(app
->priv
, value
, length
))
374 goto err_release_free
;
376 case NFP_BPF_CAP_TYPE_ADJUST_TAIL
:
377 if (nfp_bpf_parse_cap_adjust_tail(app
->priv
, value
,
379 goto err_release_free
;
381 case NFP_BPF_CAP_TYPE_ABI_VERSION
:
382 if (nfp_bpf_parse_cap_abi_version(app
->priv
, value
,
384 goto err_release_free
;
386 case NFP_BPF_CAP_TYPE_CMSG_MULTI_ENT
:
387 if (nfp_bpf_parse_cap_cmsg_multi_ent(app
->priv
, value
,
389 goto err_release_free
;
392 nfp_dbg(cpp
, "unknown BPF capability: %d\n", type
);
396 if (mem
- start
!= nfp_cpp_area_size(area
)) {
397 nfp_err(cpp
, "BPF capabilities left after parsing, parsed:%zd total length:%zu\n",
398 mem
- start
, nfp_cpp_area_size(area
));
399 goto err_release_free
;
402 nfp_cpp_area_release_free(area
);
407 nfp_err(cpp
, "invalid BPF capabilities at offset:%zd\n", mem
- start
);
408 nfp_cpp_area_release_free(area
);
412 static void nfp_bpf_init_capabilities(struct nfp_app_bpf
*bpf
)
414 bpf
->abi_version
= 2; /* Original BPF ABI version */
417 static int nfp_bpf_ndo_init(struct nfp_app
*app
, struct net_device
*netdev
)
419 struct nfp_app_bpf
*bpf
= app
->priv
;
421 return bpf_offload_dev_netdev_register(bpf
->bpf_dev
, netdev
);
424 static void nfp_bpf_ndo_uninit(struct nfp_app
*app
, struct net_device
*netdev
)
426 struct nfp_app_bpf
*bpf
= app
->priv
;
428 bpf_offload_dev_netdev_unregister(bpf
->bpf_dev
, netdev
);
431 static int nfp_bpf_start(struct nfp_app
*app
)
433 struct nfp_app_bpf
*bpf
= app
->priv
;
435 if (app
->ctrl
->dp
.mtu
< nfp_bpf_ctrl_cmsg_min_mtu(bpf
)) {
436 nfp_err(bpf
->app
->cpp
,
437 "ctrl channel MTU below min required %u < %u\n",
438 app
->ctrl
->dp
.mtu
, nfp_bpf_ctrl_cmsg_min_mtu(bpf
));
442 if (bpf
->cmsg_multi_ent
)
443 bpf
->cmsg_cache_cnt
= nfp_bpf_ctrl_cmsg_cache_cnt(bpf
);
445 bpf
->cmsg_cache_cnt
= 1;
450 static int nfp_bpf_init(struct nfp_app
*app
)
452 struct nfp_app_bpf
*bpf
;
455 bpf
= kzalloc(sizeof(*bpf
), GFP_KERNEL
);
461 INIT_LIST_HEAD(&bpf
->map_list
);
463 err
= nfp_ccm_init(&bpf
->ccm
, app
);
467 err
= rhashtable_init(&bpf
->maps_neutral
, &nfp_bpf_maps_neutral_params
);
471 nfp_bpf_init_capabilities(bpf
);
473 err
= nfp_bpf_parse_capabilities(app
);
475 goto err_free_neutral_maps
;
477 if (bpf
->abi_version
< 3) {
478 bpf
->cmsg_key_sz
= CMSG_MAP_KEY_LW
* 4;
479 bpf
->cmsg_val_sz
= CMSG_MAP_VALUE_LW
* 4;
481 bpf
->cmsg_key_sz
= bpf
->maps
.max_key_sz
;
482 bpf
->cmsg_val_sz
= bpf
->maps
.max_val_sz
;
483 app
->ctrl_mtu
= nfp_bpf_ctrl_cmsg_mtu(bpf
);
486 bpf
->bpf_dev
= bpf_offload_dev_create(&nfp_bpf_dev_ops
, bpf
);
487 err
= PTR_ERR_OR_ZERO(bpf
->bpf_dev
);
489 goto err_free_neutral_maps
;
493 err_free_neutral_maps
:
494 rhashtable_destroy(&bpf
->maps_neutral
);
496 nfp_ccm_clean(&bpf
->ccm
);
502 static void nfp_bpf_clean(struct nfp_app
*app
)
504 struct nfp_app_bpf
*bpf
= app
->priv
;
506 bpf_offload_dev_destroy(bpf
->bpf_dev
);
507 nfp_ccm_clean(&bpf
->ccm
);
508 WARN_ON(!list_empty(&bpf
->map_list
));
509 WARN_ON(bpf
->maps_in_use
|| bpf
->map_elems_in_use
);
510 rhashtable_free_and_destroy(&bpf
->maps_neutral
,
511 nfp_check_rhashtable_empty
, NULL
);
515 const struct nfp_app_type app_bpf
= {
516 .id
= NFP_APP_BPF_NIC
,
521 .init
= nfp_bpf_init
,
522 .clean
= nfp_bpf_clean
,
523 .start
= nfp_bpf_start
,
525 .check_mtu
= nfp_bpf_check_mtu
,
527 .extra_cap
= nfp_bpf_extra_cap
,
529 .ndo_init
= nfp_bpf_ndo_init
,
530 .ndo_uninit
= nfp_bpf_ndo_uninit
,
532 .vnic_alloc
= nfp_bpf_vnic_alloc
,
533 .vnic_free
= nfp_bpf_vnic_free
,
535 .ctrl_msg_rx
= nfp_bpf_ctrl_msg_rx
,
536 .ctrl_msg_rx_raw
= nfp_bpf_ctrl_msg_rx_raw
,
538 .setup_tc
= nfp_bpf_setup_tc
,
540 .xdp_offload
= nfp_bpf_xdp_offload
,