1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2016-2018 Netronome Systems, Inc. */
6 * Netronome network device driver: TC offload functions for PF and VF
9 #define pr_fmt(fmt) "NFP net bpf: " fmt
11 #include <linux/bpf.h>
12 #include <linux/kernel.h>
13 #include <linux/netdevice.h>
14 #include <linux/pci.h>
15 #include <linux/jiffies.h>
16 #include <linux/timer.h>
17 #include <linux/list.h>
20 #include <net/pkt_cls.h>
21 #include <net/tc_act/tc_gact.h>
22 #include <net/tc_act/tc_mirred.h>
26 #include "../nfp_app.h"
27 #include "../nfp_net_ctrl.h"
28 #include "../nfp_net.h"
31 nfp_map_ptr_record(struct nfp_app_bpf
*bpf
, struct nfp_prog
*nfp_prog
,
34 struct nfp_bpf_neutral_map
*record
;
37 /* Reuse path - other offloaded program is already tracking this map. */
38 record
= rhashtable_lookup_fast(&bpf
->maps_neutral
, &map
->id
,
39 nfp_bpf_maps_neutral_params
);
41 nfp_prog
->map_records
[nfp_prog
->map_records_cnt
++] = record
;
46 /* Grab a single ref to the map for our record. The prog destroy ndo
47 * happens after free_used_maps().
51 record
= kmalloc(sizeof(*record
), GFP_KERNEL
);
58 record
->map_id
= map
->id
;
61 err
= rhashtable_insert_fast(&bpf
->maps_neutral
, &record
->l
,
62 nfp_bpf_maps_neutral_params
);
66 nfp_prog
->map_records
[nfp_prog
->map_records_cnt
++] = record
;
78 nfp_map_ptrs_forget(struct nfp_app_bpf
*bpf
, struct nfp_prog
*nfp_prog
)
83 for (i
= 0; i
< nfp_prog
->map_records_cnt
; i
++) {
84 if (--nfp_prog
->map_records
[i
]->count
) {
85 nfp_prog
->map_records
[i
] = NULL
;
89 WARN_ON(rhashtable_remove_fast(&bpf
->maps_neutral
,
90 &nfp_prog
->map_records
[i
]->l
,
91 nfp_bpf_maps_neutral_params
));
98 for (i
= 0; i
< nfp_prog
->map_records_cnt
; i
++)
99 if (nfp_prog
->map_records
[i
]) {
100 bpf_map_put(nfp_prog
->map_records
[i
]->ptr
);
101 kfree(nfp_prog
->map_records
[i
]);
105 kfree(nfp_prog
->map_records
);
106 nfp_prog
->map_records
= NULL
;
107 nfp_prog
->map_records_cnt
= 0;
111 nfp_map_ptrs_record(struct nfp_app_bpf
*bpf
, struct nfp_prog
*nfp_prog
,
112 struct bpf_prog
*prog
)
116 mutex_lock(&prog
->aux
->used_maps_mutex
);
118 /* Quickly count the maps we will have to remember */
120 for (i
= 0; i
< prog
->aux
->used_map_cnt
; i
++)
121 if (bpf_map_offload_neutral(prog
->aux
->used_maps
[i
]))
126 nfp_prog
->map_records
= kmalloc_array(cnt
,
127 sizeof(nfp_prog
->map_records
[0]),
129 if (!nfp_prog
->map_records
) {
134 for (i
= 0; i
< prog
->aux
->used_map_cnt
; i
++)
135 if (bpf_map_offload_neutral(prog
->aux
->used_maps
[i
])) {
136 err
= nfp_map_ptr_record(bpf
, nfp_prog
,
137 prog
->aux
->used_maps
[i
]);
139 nfp_map_ptrs_forget(bpf
, nfp_prog
);
143 WARN_ON(cnt
!= nfp_prog
->map_records_cnt
);
146 mutex_unlock(&prog
->aux
->used_maps_mutex
);
151 nfp_prog_prepare(struct nfp_prog
*nfp_prog
, const struct bpf_insn
*prog
,
154 struct nfp_insn_meta
*meta
;
157 for (i
= 0; i
< cnt
; i
++) {
158 meta
= kzalloc(sizeof(*meta
), GFP_KERNEL
);
162 meta
->insn
= prog
[i
];
164 if (is_mbpf_alu(meta
)) {
165 meta
->umin_src
= U64_MAX
;
166 meta
->umin_dst
= U64_MAX
;
169 list_add_tail(&meta
->l
, &nfp_prog
->insns
);
171 nfp_prog
->n_insns
= cnt
;
173 nfp_bpf_jit_prepare(nfp_prog
);
178 static void nfp_prog_free(struct nfp_prog
*nfp_prog
)
180 struct nfp_insn_meta
*meta
, *tmp
;
182 kfree(nfp_prog
->subprog
);
184 list_for_each_entry_safe(meta
, tmp
, &nfp_prog
->insns
, l
) {
191 static int nfp_bpf_verifier_prep(struct bpf_prog
*prog
)
193 struct nfp_prog
*nfp_prog
;
196 nfp_prog
= kzalloc(sizeof(*nfp_prog
), GFP_KERNEL
);
199 prog
->aux
->offload
->dev_priv
= nfp_prog
;
201 INIT_LIST_HEAD(&nfp_prog
->insns
);
202 nfp_prog
->type
= prog
->type
;
203 nfp_prog
->bpf
= bpf_offload_dev_priv(prog
->aux
->offload
->offdev
);
205 ret
= nfp_prog_prepare(nfp_prog
, prog
->insnsi
, prog
->len
);
209 nfp_prog
->verifier_meta
= nfp_prog_first_meta(nfp_prog
);
214 nfp_prog_free(nfp_prog
);
219 static int nfp_bpf_translate(struct bpf_prog
*prog
)
221 struct nfp_net
*nn
= netdev_priv(prog
->aux
->offload
->netdev
);
222 struct nfp_prog
*nfp_prog
= prog
->aux
->offload
->dev_priv
;
223 unsigned int max_instr
;
226 /* We depend on dead code elimination succeeding */
227 if (prog
->aux
->offload
->opt_failed
)
230 max_instr
= nn_readw(nn
, NFP_NET_CFG_BPF_MAX_LEN
);
231 nfp_prog
->__prog_alloc_len
= max_instr
* sizeof(u64
);
233 nfp_prog
->prog
= kvmalloc(nfp_prog
->__prog_alloc_len
, GFP_KERNEL
);
237 err
= nfp_bpf_jit(nfp_prog
);
241 prog
->aux
->offload
->jited_len
= nfp_prog
->prog_len
* sizeof(u64
);
242 prog
->aux
->offload
->jited_image
= nfp_prog
->prog
;
244 return nfp_map_ptrs_record(nfp_prog
->bpf
, nfp_prog
, prog
);
247 static void nfp_bpf_destroy(struct bpf_prog
*prog
)
249 struct nfp_prog
*nfp_prog
= prog
->aux
->offload
->dev_priv
;
251 kvfree(nfp_prog
->prog
);
252 nfp_map_ptrs_forget(nfp_prog
->bpf
, nfp_prog
);
253 nfp_prog_free(nfp_prog
);
256 /* Atomic engine requires values to be in big endian, we need to byte swap
257 * the value words used with xadd.
259 static void nfp_map_bpf_byte_swap(struct nfp_bpf_map
*nfp_map
, void *value
)
264 for (i
= 0; i
< DIV_ROUND_UP(nfp_map
->offmap
->map
.value_size
, 4); i
++)
265 if (nfp_map
->use_map
[i
].type
== NFP_MAP_USE_ATOMIC_CNT
)
266 word
[i
] = (__force u32
)cpu_to_be32(word
[i
]);
269 /* Mark value as unsafely initialized in case it becomes atomic later
270 * and we didn't byte swap something non-byte swap neutral.
273 nfp_map_bpf_byte_swap_record(struct nfp_bpf_map
*nfp_map
, void *value
)
278 for (i
= 0; i
< DIV_ROUND_UP(nfp_map
->offmap
->map
.value_size
, 4); i
++)
279 if (nfp_map
->use_map
[i
].type
== NFP_MAP_UNUSED
&&
280 word
[i
] != (__force u32
)cpu_to_be32(word
[i
]))
281 nfp_map
->use_map
[i
].non_zero_update
= 1;
285 nfp_bpf_map_lookup_entry(struct bpf_offloaded_map
*offmap
,
286 void *key
, void *value
)
290 err
= nfp_bpf_ctrl_lookup_entry(offmap
, key
, value
);
294 nfp_map_bpf_byte_swap(offmap
->dev_priv
, value
);
299 nfp_bpf_map_update_entry(struct bpf_offloaded_map
*offmap
,
300 void *key
, void *value
, u64 flags
)
302 nfp_map_bpf_byte_swap(offmap
->dev_priv
, value
);
303 nfp_map_bpf_byte_swap_record(offmap
->dev_priv
, value
);
304 return nfp_bpf_ctrl_update_entry(offmap
, key
, value
, flags
);
308 nfp_bpf_map_get_next_key(struct bpf_offloaded_map
*offmap
,
309 void *key
, void *next_key
)
312 return nfp_bpf_ctrl_getfirst_entry(offmap
, next_key
);
313 return nfp_bpf_ctrl_getnext_entry(offmap
, key
, next_key
);
317 nfp_bpf_map_delete_elem(struct bpf_offloaded_map
*offmap
, void *key
)
319 if (offmap
->map
.map_type
== BPF_MAP_TYPE_ARRAY
)
321 return nfp_bpf_ctrl_del_entry(offmap
, key
);
324 static const struct bpf_map_dev_ops nfp_bpf_map_ops
= {
325 .map_get_next_key
= nfp_bpf_map_get_next_key
,
326 .map_lookup_elem
= nfp_bpf_map_lookup_entry
,
327 .map_update_elem
= nfp_bpf_map_update_entry
,
328 .map_delete_elem
= nfp_bpf_map_delete_elem
,
332 nfp_bpf_map_alloc(struct nfp_app_bpf
*bpf
, struct bpf_offloaded_map
*offmap
)
334 struct nfp_bpf_map
*nfp_map
;
335 unsigned int use_map_size
;
338 if (!bpf
->maps
.types
)
341 if (offmap
->map
.map_flags
||
342 offmap
->map
.numa_node
!= NUMA_NO_NODE
) {
343 pr_info("map flags are not supported\n");
347 if (!(bpf
->maps
.types
& 1 << offmap
->map
.map_type
)) {
348 pr_info("map type not supported\n");
351 if (bpf
->maps
.max_maps
== bpf
->maps_in_use
) {
352 pr_info("too many maps for a device\n");
355 if (bpf
->maps
.max_elems
- bpf
->map_elems_in_use
<
356 offmap
->map
.max_entries
) {
357 pr_info("map with too many elements: %u, left: %u\n",
358 offmap
->map
.max_entries
,
359 bpf
->maps
.max_elems
- bpf
->map_elems_in_use
);
363 if (round_up(offmap
->map
.key_size
, 8) +
364 round_up(offmap
->map
.value_size
, 8) > bpf
->maps
.max_elem_sz
) {
365 pr_info("map elements too large: %u, FW max element size (key+value): %u\n",
366 round_up(offmap
->map
.key_size
, 8) +
367 round_up(offmap
->map
.value_size
, 8),
368 bpf
->maps
.max_elem_sz
);
371 if (offmap
->map
.key_size
> bpf
->maps
.max_key_sz
) {
372 pr_info("map key size %u, FW max is %u\n",
373 offmap
->map
.key_size
, bpf
->maps
.max_key_sz
);
376 if (offmap
->map
.value_size
> bpf
->maps
.max_val_sz
) {
377 pr_info("map value size %u, FW max is %u\n",
378 offmap
->map
.value_size
, bpf
->maps
.max_val_sz
);
382 use_map_size
= DIV_ROUND_UP(offmap
->map
.value_size
, 4) *
383 sizeof_field(struct nfp_bpf_map
, use_map
[0]);
385 nfp_map
= kzalloc(sizeof(*nfp_map
) + use_map_size
, GFP_USER
);
389 offmap
->dev_priv
= nfp_map
;
390 nfp_map
->offmap
= offmap
;
392 spin_lock_init(&nfp_map
->cache_lock
);
394 res
= nfp_bpf_ctrl_alloc_map(bpf
, &offmap
->map
);
401 offmap
->dev_ops
= &nfp_bpf_map_ops
;
403 bpf
->map_elems_in_use
+= offmap
->map
.max_entries
;
404 list_add_tail(&nfp_map
->l
, &bpf
->map_list
);
410 nfp_bpf_map_free(struct nfp_app_bpf
*bpf
, struct bpf_offloaded_map
*offmap
)
412 struct nfp_bpf_map
*nfp_map
= offmap
->dev_priv
;
414 nfp_bpf_ctrl_free_map(bpf
, nfp_map
);
415 dev_consume_skb_any(nfp_map
->cache
);
416 WARN_ON_ONCE(nfp_map
->cache_blockers
);
417 list_del_init(&nfp_map
->l
);
418 bpf
->map_elems_in_use
-= offmap
->map
.max_entries
;
425 int nfp_ndo_bpf(struct nfp_app
*app
, struct nfp_net
*nn
, struct netdev_bpf
*bpf
)
427 switch (bpf
->command
) {
428 case BPF_OFFLOAD_MAP_ALLOC
:
429 return nfp_bpf_map_alloc(app
->priv
, bpf
->offmap
);
430 case BPF_OFFLOAD_MAP_FREE
:
431 return nfp_bpf_map_free(app
->priv
, bpf
->offmap
);
438 nfp_bpf_perf_event_copy(void *dst
, const void *src
,
439 unsigned long off
, unsigned long len
)
441 memcpy(dst
, src
+ off
, len
);
445 int nfp_bpf_event_output(struct nfp_app_bpf
*bpf
, const void *data
,
448 struct cmsg_bpf_event
*cbe
= (void *)data
;
449 struct nfp_bpf_neutral_map
*record
;
450 u32 pkt_size
, data_size
, map_id
;
453 if (len
< sizeof(struct cmsg_bpf_event
))
456 pkt_size
= be32_to_cpu(cbe
->pkt_size
);
457 data_size
= be32_to_cpu(cbe
->data_size
);
458 map_id_full
= be64_to_cpu(cbe
->map_ptr
);
459 map_id
= map_id_full
;
461 if (len
< sizeof(struct cmsg_bpf_event
) + pkt_size
+ data_size
)
463 if (cbe
->hdr
.ver
!= NFP_CCM_ABI_VERSION
)
467 record
= rhashtable_lookup(&bpf
->maps_neutral
, &map_id
,
468 nfp_bpf_maps_neutral_params
);
469 if (!record
|| map_id_full
> U32_MAX
) {
471 cmsg_warn(bpf
, "perf event: map id %lld (0x%llx) not recognized, dropping event\n",
472 map_id_full
, map_id_full
);
476 bpf_event_output(record
->ptr
, be32_to_cpu(cbe
->cpu_id
),
477 &cbe
->data
[round_up(pkt_size
, 4)], data_size
,
478 cbe
->data
, pkt_size
, nfp_bpf_perf_event_copy
);
485 nfp_net_bpf_load(struct nfp_net
*nn
, struct bpf_prog
*prog
,
486 struct netlink_ext_ack
*extack
)
488 struct nfp_prog
*nfp_prog
= prog
->aux
->offload
->dev_priv
;
489 unsigned int fw_mtu
, pkt_off
, max_stack
, max_prog_len
;
494 fw_mtu
= nn_readb(nn
, NFP_NET_CFG_BPF_INL_MTU
) * 64 - 32;
495 pkt_off
= min(prog
->aux
->max_pkt_offset
, nn
->dp
.netdev
->mtu
);
496 if (fw_mtu
< pkt_off
) {
497 NL_SET_ERR_MSG_MOD(extack
, "BPF offload not supported with potential packet access beyond HW packet split boundary");
501 max_stack
= nn_readb(nn
, NFP_NET_CFG_BPF_STACK_SZ
) * 64;
502 if (nfp_prog
->stack_size
> max_stack
) {
503 NL_SET_ERR_MSG_MOD(extack
, "stack too large");
507 max_prog_len
= nn_readw(nn
, NFP_NET_CFG_BPF_MAX_LEN
);
508 if (nfp_prog
->prog_len
> max_prog_len
) {
509 NL_SET_ERR_MSG_MOD(extack
, "program too long");
513 img
= nfp_bpf_relo_for_vnic(nfp_prog
, nn
->app_priv
);
517 dma_addr
= dma_map_single(nn
->dp
.dev
, img
,
518 nfp_prog
->prog_len
* sizeof(u64
),
520 if (dma_mapping_error(nn
->dp
.dev
, dma_addr
)) {
525 nn_writew(nn
, NFP_NET_CFG_BPF_SIZE
, nfp_prog
->prog_len
);
526 nn_writeq(nn
, NFP_NET_CFG_BPF_ADDR
, dma_addr
);
528 /* Load up the JITed code */
529 err
= nfp_net_reconfig(nn
, NFP_NET_CFG_UPDATE_BPF
);
531 NL_SET_ERR_MSG_MOD(extack
,
532 "FW command error while loading BPF");
534 dma_unmap_single(nn
->dp
.dev
, dma_addr
, nfp_prog
->prog_len
* sizeof(u64
),
542 nfp_net_bpf_start(struct nfp_net
*nn
, struct netlink_ext_ack
*extack
)
546 /* Enable passing packets through BPF function */
547 nn
->dp
.ctrl
|= NFP_NET_CFG_CTRL_BPF
;
548 nn_writel(nn
, NFP_NET_CFG_CTRL
, nn
->dp
.ctrl
);
549 err
= nfp_net_reconfig(nn
, NFP_NET_CFG_UPDATE_GEN
);
551 NL_SET_ERR_MSG_MOD(extack
,
552 "FW command error while enabling BPF");
555 static int nfp_net_bpf_stop(struct nfp_net
*nn
)
557 if (!(nn
->dp
.ctrl
& NFP_NET_CFG_CTRL_BPF
))
560 nn
->dp
.ctrl
&= ~NFP_NET_CFG_CTRL_BPF
;
561 nn_writel(nn
, NFP_NET_CFG_CTRL
, nn
->dp
.ctrl
);
563 return nfp_net_reconfig(nn
, NFP_NET_CFG_UPDATE_GEN
);
566 int nfp_net_bpf_offload(struct nfp_net
*nn
, struct bpf_prog
*prog
,
567 bool old_prog
, struct netlink_ext_ack
*extack
)
571 if (prog
&& !bpf_offload_dev_match(prog
, nn
->dp
.netdev
))
574 if (prog
&& old_prog
) {
577 cap
= nn_readb(nn
, NFP_NET_CFG_BPF_CAP
);
578 if (!(cap
& NFP_NET_BPF_CAP_RELO
)) {
579 NL_SET_ERR_MSG_MOD(extack
,
580 "FW does not support live reload");
585 /* Something else is loaded, different program type? */
586 if (!old_prog
&& nn
->dp
.ctrl
& NFP_NET_CFG_CTRL_BPF
)
589 if (old_prog
&& !prog
)
590 return nfp_net_bpf_stop(nn
);
592 err
= nfp_net_bpf_load(nn
, prog
, extack
);
597 nfp_net_bpf_start(nn
, extack
);
602 const struct bpf_prog_offload_ops nfp_bpf_dev_ops
= {
603 .insn_hook
= nfp_verify_insn
,
604 .finalize
= nfp_bpf_finalize
,
605 .replace_insn
= nfp_bpf_opt_replace_insn
,
606 .remove_insns
= nfp_bpf_opt_remove_insns
,
607 .prepare
= nfp_bpf_verifier_prep
,
608 .translate
= nfp_bpf_translate
,
609 .destroy
= nfp_bpf_destroy
,