WIP FPC-III support
[linux/fpc-iii.git] / drivers / net / ethernet / netronome / nfp / bpf / offload.c
blob53851853562c63221afca3e0f2aa8be2aa40ec48
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2016-2018 Netronome Systems, Inc. */
4 /*
5 * nfp_net_offload.c
6 * Netronome network device driver: TC offload functions for PF and VF
7 */
9 #define pr_fmt(fmt) "NFP net bpf: " fmt
11 #include <linux/bpf.h>
12 #include <linux/kernel.h>
13 #include <linux/netdevice.h>
14 #include <linux/pci.h>
15 #include <linux/jiffies.h>
16 #include <linux/timer.h>
17 #include <linux/list.h>
18 #include <linux/mm.h>
20 #include <net/pkt_cls.h>
21 #include <net/tc_act/tc_gact.h>
22 #include <net/tc_act/tc_mirred.h>
24 #include "main.h"
25 #include "../ccm.h"
26 #include "../nfp_app.h"
27 #include "../nfp_net_ctrl.h"
28 #include "../nfp_net.h"
30 static int
31 nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
32 struct bpf_map *map)
34 struct nfp_bpf_neutral_map *record;
35 int err;
37 /* Reuse path - other offloaded program is already tracking this map. */
38 record = rhashtable_lookup_fast(&bpf->maps_neutral, &map->id,
39 nfp_bpf_maps_neutral_params);
40 if (record) {
41 nfp_prog->map_records[nfp_prog->map_records_cnt++] = record;
42 record->count++;
43 return 0;
46 /* Grab a single ref to the map for our record. The prog destroy ndo
47 * happens after free_used_maps().
49 bpf_map_inc(map);
51 record = kmalloc(sizeof(*record), GFP_KERNEL);
52 if (!record) {
53 err = -ENOMEM;
54 goto err_map_put;
57 record->ptr = map;
58 record->map_id = map->id;
59 record->count = 1;
61 err = rhashtable_insert_fast(&bpf->maps_neutral, &record->l,
62 nfp_bpf_maps_neutral_params);
63 if (err)
64 goto err_free_rec;
66 nfp_prog->map_records[nfp_prog->map_records_cnt++] = record;
68 return 0;
70 err_free_rec:
71 kfree(record);
72 err_map_put:
73 bpf_map_put(map);
74 return err;
77 static void
78 nfp_map_ptrs_forget(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog)
80 bool freed = false;
81 int i;
83 for (i = 0; i < nfp_prog->map_records_cnt; i++) {
84 if (--nfp_prog->map_records[i]->count) {
85 nfp_prog->map_records[i] = NULL;
86 continue;
89 WARN_ON(rhashtable_remove_fast(&bpf->maps_neutral,
90 &nfp_prog->map_records[i]->l,
91 nfp_bpf_maps_neutral_params));
92 freed = true;
95 if (freed) {
96 synchronize_rcu();
98 for (i = 0; i < nfp_prog->map_records_cnt; i++)
99 if (nfp_prog->map_records[i]) {
100 bpf_map_put(nfp_prog->map_records[i]->ptr);
101 kfree(nfp_prog->map_records[i]);
105 kfree(nfp_prog->map_records);
106 nfp_prog->map_records = NULL;
107 nfp_prog->map_records_cnt = 0;
110 static int
111 nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
112 struct bpf_prog *prog)
114 int i, cnt, err = 0;
116 mutex_lock(&prog->aux->used_maps_mutex);
118 /* Quickly count the maps we will have to remember */
119 cnt = 0;
120 for (i = 0; i < prog->aux->used_map_cnt; i++)
121 if (bpf_map_offload_neutral(prog->aux->used_maps[i]))
122 cnt++;
123 if (!cnt)
124 goto out;
126 nfp_prog->map_records = kmalloc_array(cnt,
127 sizeof(nfp_prog->map_records[0]),
128 GFP_KERNEL);
129 if (!nfp_prog->map_records) {
130 err = -ENOMEM;
131 goto out;
134 for (i = 0; i < prog->aux->used_map_cnt; i++)
135 if (bpf_map_offload_neutral(prog->aux->used_maps[i])) {
136 err = nfp_map_ptr_record(bpf, nfp_prog,
137 prog->aux->used_maps[i]);
138 if (err) {
139 nfp_map_ptrs_forget(bpf, nfp_prog);
140 goto out;
143 WARN_ON(cnt != nfp_prog->map_records_cnt);
145 out:
146 mutex_unlock(&prog->aux->used_maps_mutex);
147 return err;
150 static int
151 nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
152 unsigned int cnt)
154 struct nfp_insn_meta *meta;
155 unsigned int i;
157 for (i = 0; i < cnt; i++) {
158 meta = kzalloc(sizeof(*meta), GFP_KERNEL);
159 if (!meta)
160 return -ENOMEM;
162 meta->insn = prog[i];
163 meta->n = i;
164 if (is_mbpf_alu(meta)) {
165 meta->umin_src = U64_MAX;
166 meta->umin_dst = U64_MAX;
169 list_add_tail(&meta->l, &nfp_prog->insns);
171 nfp_prog->n_insns = cnt;
173 nfp_bpf_jit_prepare(nfp_prog);
175 return 0;
178 static void nfp_prog_free(struct nfp_prog *nfp_prog)
180 struct nfp_insn_meta *meta, *tmp;
182 kfree(nfp_prog->subprog);
184 list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
185 list_del(&meta->l);
186 kfree(meta);
188 kfree(nfp_prog);
191 static int nfp_bpf_verifier_prep(struct bpf_prog *prog)
193 struct nfp_prog *nfp_prog;
194 int ret;
196 nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
197 if (!nfp_prog)
198 return -ENOMEM;
199 prog->aux->offload->dev_priv = nfp_prog;
201 INIT_LIST_HEAD(&nfp_prog->insns);
202 nfp_prog->type = prog->type;
203 nfp_prog->bpf = bpf_offload_dev_priv(prog->aux->offload->offdev);
205 ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len);
206 if (ret)
207 goto err_free;
209 nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog);
211 return 0;
213 err_free:
214 nfp_prog_free(nfp_prog);
216 return ret;
219 static int nfp_bpf_translate(struct bpf_prog *prog)
221 struct nfp_net *nn = netdev_priv(prog->aux->offload->netdev);
222 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
223 unsigned int max_instr;
224 int err;
226 /* We depend on dead code elimination succeeding */
227 if (prog->aux->offload->opt_failed)
228 return -EINVAL;
230 max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
231 nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
233 nfp_prog->prog = kvmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
234 if (!nfp_prog->prog)
235 return -ENOMEM;
237 err = nfp_bpf_jit(nfp_prog);
238 if (err)
239 return err;
241 prog->aux->offload->jited_len = nfp_prog->prog_len * sizeof(u64);
242 prog->aux->offload->jited_image = nfp_prog->prog;
244 return nfp_map_ptrs_record(nfp_prog->bpf, nfp_prog, prog);
247 static void nfp_bpf_destroy(struct bpf_prog *prog)
249 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
251 kvfree(nfp_prog->prog);
252 nfp_map_ptrs_forget(nfp_prog->bpf, nfp_prog);
253 nfp_prog_free(nfp_prog);
256 /* Atomic engine requires values to be in big endian, we need to byte swap
257 * the value words used with xadd.
259 static void nfp_map_bpf_byte_swap(struct nfp_bpf_map *nfp_map, void *value)
261 u32 *word = value;
262 unsigned int i;
264 for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++)
265 if (nfp_map->use_map[i].type == NFP_MAP_USE_ATOMIC_CNT)
266 word[i] = (__force u32)cpu_to_be32(word[i]);
269 /* Mark value as unsafely initialized in case it becomes atomic later
270 * and we didn't byte swap something non-byte swap neutral.
272 static void
273 nfp_map_bpf_byte_swap_record(struct nfp_bpf_map *nfp_map, void *value)
275 u32 *word = value;
276 unsigned int i;
278 for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++)
279 if (nfp_map->use_map[i].type == NFP_MAP_UNUSED &&
280 word[i] != (__force u32)cpu_to_be32(word[i]))
281 nfp_map->use_map[i].non_zero_update = 1;
284 static int
285 nfp_bpf_map_lookup_entry(struct bpf_offloaded_map *offmap,
286 void *key, void *value)
288 int err;
290 err = nfp_bpf_ctrl_lookup_entry(offmap, key, value);
291 if (err)
292 return err;
294 nfp_map_bpf_byte_swap(offmap->dev_priv, value);
295 return 0;
298 static int
299 nfp_bpf_map_update_entry(struct bpf_offloaded_map *offmap,
300 void *key, void *value, u64 flags)
302 nfp_map_bpf_byte_swap(offmap->dev_priv, value);
303 nfp_map_bpf_byte_swap_record(offmap->dev_priv, value);
304 return nfp_bpf_ctrl_update_entry(offmap, key, value, flags);
307 static int
308 nfp_bpf_map_get_next_key(struct bpf_offloaded_map *offmap,
309 void *key, void *next_key)
311 if (!key)
312 return nfp_bpf_ctrl_getfirst_entry(offmap, next_key);
313 return nfp_bpf_ctrl_getnext_entry(offmap, key, next_key);
316 static int
317 nfp_bpf_map_delete_elem(struct bpf_offloaded_map *offmap, void *key)
319 if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY)
320 return -EINVAL;
321 return nfp_bpf_ctrl_del_entry(offmap, key);
324 static const struct bpf_map_dev_ops nfp_bpf_map_ops = {
325 .map_get_next_key = nfp_bpf_map_get_next_key,
326 .map_lookup_elem = nfp_bpf_map_lookup_entry,
327 .map_update_elem = nfp_bpf_map_update_entry,
328 .map_delete_elem = nfp_bpf_map_delete_elem,
331 static int
332 nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
334 struct nfp_bpf_map *nfp_map;
335 unsigned int use_map_size;
336 long long int res;
338 if (!bpf->maps.types)
339 return -EOPNOTSUPP;
341 if (offmap->map.map_flags ||
342 offmap->map.numa_node != NUMA_NO_NODE) {
343 pr_info("map flags are not supported\n");
344 return -EINVAL;
347 if (!(bpf->maps.types & 1 << offmap->map.map_type)) {
348 pr_info("map type not supported\n");
349 return -EOPNOTSUPP;
351 if (bpf->maps.max_maps == bpf->maps_in_use) {
352 pr_info("too many maps for a device\n");
353 return -ENOMEM;
355 if (bpf->maps.max_elems - bpf->map_elems_in_use <
356 offmap->map.max_entries) {
357 pr_info("map with too many elements: %u, left: %u\n",
358 offmap->map.max_entries,
359 bpf->maps.max_elems - bpf->map_elems_in_use);
360 return -ENOMEM;
363 if (round_up(offmap->map.key_size, 8) +
364 round_up(offmap->map.value_size, 8) > bpf->maps.max_elem_sz) {
365 pr_info("map elements too large: %u, FW max element size (key+value): %u\n",
366 round_up(offmap->map.key_size, 8) +
367 round_up(offmap->map.value_size, 8),
368 bpf->maps.max_elem_sz);
369 return -ENOMEM;
371 if (offmap->map.key_size > bpf->maps.max_key_sz) {
372 pr_info("map key size %u, FW max is %u\n",
373 offmap->map.key_size, bpf->maps.max_key_sz);
374 return -ENOMEM;
376 if (offmap->map.value_size > bpf->maps.max_val_sz) {
377 pr_info("map value size %u, FW max is %u\n",
378 offmap->map.value_size, bpf->maps.max_val_sz);
379 return -ENOMEM;
382 use_map_size = DIV_ROUND_UP(offmap->map.value_size, 4) *
383 sizeof_field(struct nfp_bpf_map, use_map[0]);
385 nfp_map = kzalloc(sizeof(*nfp_map) + use_map_size, GFP_USER);
386 if (!nfp_map)
387 return -ENOMEM;
389 offmap->dev_priv = nfp_map;
390 nfp_map->offmap = offmap;
391 nfp_map->bpf = bpf;
392 spin_lock_init(&nfp_map->cache_lock);
394 res = nfp_bpf_ctrl_alloc_map(bpf, &offmap->map);
395 if (res < 0) {
396 kfree(nfp_map);
397 return res;
400 nfp_map->tid = res;
401 offmap->dev_ops = &nfp_bpf_map_ops;
402 bpf->maps_in_use++;
403 bpf->map_elems_in_use += offmap->map.max_entries;
404 list_add_tail(&nfp_map->l, &bpf->map_list);
406 return 0;
409 static int
410 nfp_bpf_map_free(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
412 struct nfp_bpf_map *nfp_map = offmap->dev_priv;
414 nfp_bpf_ctrl_free_map(bpf, nfp_map);
415 dev_consume_skb_any(nfp_map->cache);
416 WARN_ON_ONCE(nfp_map->cache_blockers);
417 list_del_init(&nfp_map->l);
418 bpf->map_elems_in_use -= offmap->map.max_entries;
419 bpf->maps_in_use--;
420 kfree(nfp_map);
422 return 0;
425 int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
427 switch (bpf->command) {
428 case BPF_OFFLOAD_MAP_ALLOC:
429 return nfp_bpf_map_alloc(app->priv, bpf->offmap);
430 case BPF_OFFLOAD_MAP_FREE:
431 return nfp_bpf_map_free(app->priv, bpf->offmap);
432 default:
433 return -EINVAL;
437 static unsigned long
438 nfp_bpf_perf_event_copy(void *dst, const void *src,
439 unsigned long off, unsigned long len)
441 memcpy(dst, src + off, len);
442 return 0;
445 int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
446 unsigned int len)
448 struct cmsg_bpf_event *cbe = (void *)data;
449 struct nfp_bpf_neutral_map *record;
450 u32 pkt_size, data_size, map_id;
451 u64 map_id_full;
453 if (len < sizeof(struct cmsg_bpf_event))
454 return -EINVAL;
456 pkt_size = be32_to_cpu(cbe->pkt_size);
457 data_size = be32_to_cpu(cbe->data_size);
458 map_id_full = be64_to_cpu(cbe->map_ptr);
459 map_id = map_id_full;
461 if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
462 return -EINVAL;
463 if (cbe->hdr.ver != NFP_CCM_ABI_VERSION)
464 return -EINVAL;
466 rcu_read_lock();
467 record = rhashtable_lookup(&bpf->maps_neutral, &map_id,
468 nfp_bpf_maps_neutral_params);
469 if (!record || map_id_full > U32_MAX) {
470 rcu_read_unlock();
471 cmsg_warn(bpf, "perf event: map id %lld (0x%llx) not recognized, dropping event\n",
472 map_id_full, map_id_full);
473 return -EINVAL;
476 bpf_event_output(record->ptr, be32_to_cpu(cbe->cpu_id),
477 &cbe->data[round_up(pkt_size, 4)], data_size,
478 cbe->data, pkt_size, nfp_bpf_perf_event_copy);
479 rcu_read_unlock();
481 return 0;
484 static int
485 nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
486 struct netlink_ext_ack *extack)
488 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
489 unsigned int fw_mtu, pkt_off, max_stack, max_prog_len;
490 dma_addr_t dma_addr;
491 void *img;
492 int err;
494 fw_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
495 pkt_off = min(prog->aux->max_pkt_offset, nn->dp.netdev->mtu);
496 if (fw_mtu < pkt_off) {
497 NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with potential packet access beyond HW packet split boundary");
498 return -EOPNOTSUPP;
501 max_stack = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
502 if (nfp_prog->stack_size > max_stack) {
503 NL_SET_ERR_MSG_MOD(extack, "stack too large");
504 return -EOPNOTSUPP;
507 max_prog_len = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
508 if (nfp_prog->prog_len > max_prog_len) {
509 NL_SET_ERR_MSG_MOD(extack, "program too long");
510 return -EOPNOTSUPP;
513 img = nfp_bpf_relo_for_vnic(nfp_prog, nn->app_priv);
514 if (IS_ERR(img))
515 return PTR_ERR(img);
517 dma_addr = dma_map_single(nn->dp.dev, img,
518 nfp_prog->prog_len * sizeof(u64),
519 DMA_TO_DEVICE);
520 if (dma_mapping_error(nn->dp.dev, dma_addr)) {
521 kfree(img);
522 return -ENOMEM;
525 nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len);
526 nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
528 /* Load up the JITed code */
529 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
530 if (err)
531 NL_SET_ERR_MSG_MOD(extack,
532 "FW command error while loading BPF");
534 dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64),
535 DMA_TO_DEVICE);
536 kfree(img);
538 return err;
541 static void
542 nfp_net_bpf_start(struct nfp_net *nn, struct netlink_ext_ack *extack)
544 int err;
546 /* Enable passing packets through BPF function */
547 nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF;
548 nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
549 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
550 if (err)
551 NL_SET_ERR_MSG_MOD(extack,
552 "FW command error while enabling BPF");
555 static int nfp_net_bpf_stop(struct nfp_net *nn)
557 if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
558 return 0;
560 nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
561 nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
563 return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
566 int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
567 bool old_prog, struct netlink_ext_ack *extack)
569 int err;
571 if (prog && !bpf_offload_dev_match(prog, nn->dp.netdev))
572 return -EINVAL;
574 if (prog && old_prog) {
575 u8 cap;
577 cap = nn_readb(nn, NFP_NET_CFG_BPF_CAP);
578 if (!(cap & NFP_NET_BPF_CAP_RELO)) {
579 NL_SET_ERR_MSG_MOD(extack,
580 "FW does not support live reload");
581 return -EBUSY;
585 /* Something else is loaded, different program type? */
586 if (!old_prog && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
587 return -EBUSY;
589 if (old_prog && !prog)
590 return nfp_net_bpf_stop(nn);
592 err = nfp_net_bpf_load(nn, prog, extack);
593 if (err)
594 return err;
596 if (!old_prog)
597 nfp_net_bpf_start(nn, extack);
599 return 0;
602 const struct bpf_prog_offload_ops nfp_bpf_dev_ops = {
603 .insn_hook = nfp_verify_insn,
604 .finalize = nfp_bpf_finalize,
605 .replace_insn = nfp_bpf_opt_replace_insn,
606 .remove_insns = nfp_bpf_opt_remove_insns,
607 .prepare = nfp_bpf_verifier_prep,
608 .translate = nfp_bpf_translate,
609 .destroy = nfp_bpf_destroy,