2 * Copyright (C) 2016-2017 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * Netronome network device driver: TC offload functions for PF and VF
39 #define pr_fmt(fmt) "NFP net bpf: " fmt
41 #include <linux/bpf.h>
42 #include <linux/kernel.h>
43 #include <linux/netdevice.h>
44 #include <linux/pci.h>
45 #include <linux/jiffies.h>
46 #include <linux/timer.h>
47 #include <linux/list.h>
50 #include <net/pkt_cls.h>
51 #include <net/tc_act/tc_gact.h>
52 #include <net/tc_act/tc_mirred.h>
55 #include "../nfp_app.h"
56 #include "../nfp_net_ctrl.h"
57 #include "../nfp_net.h"
60 nfp_prog_prepare(struct nfp_prog
*nfp_prog
, const struct bpf_insn
*prog
,
63 struct nfp_insn_meta
*meta
;
66 for (i
= 0; i
< cnt
; i
++) {
67 meta
= kzalloc(sizeof(*meta
), GFP_KERNEL
);
74 list_add_tail(&meta
->l
, &nfp_prog
->insns
);
77 nfp_bpf_jit_prepare(nfp_prog
, cnt
);
82 static void nfp_prog_free(struct nfp_prog
*nfp_prog
)
84 struct nfp_insn_meta
*meta
, *tmp
;
86 list_for_each_entry_safe(meta
, tmp
, &nfp_prog
->insns
, l
) {
94 nfp_bpf_verifier_prep(struct nfp_app
*app
, struct nfp_net
*nn
,
95 struct netdev_bpf
*bpf
)
97 struct bpf_prog
*prog
= bpf
->verifier
.prog
;
98 struct nfp_prog
*nfp_prog
;
101 nfp_prog
= kzalloc(sizeof(*nfp_prog
), GFP_KERNEL
);
104 prog
->aux
->offload
->dev_priv
= nfp_prog
;
106 INIT_LIST_HEAD(&nfp_prog
->insns
);
107 nfp_prog
->type
= prog
->type
;
108 nfp_prog
->bpf
= app
->priv
;
110 ret
= nfp_prog_prepare(nfp_prog
, prog
->insnsi
, prog
->len
);
114 nfp_prog
->verifier_meta
= nfp_prog_first_meta(nfp_prog
);
115 bpf
->verifier
.ops
= &nfp_bpf_analyzer_ops
;
120 nfp_prog_free(nfp_prog
);
125 static int nfp_bpf_translate(struct nfp_net
*nn
, struct bpf_prog
*prog
)
127 struct nfp_prog
*nfp_prog
= prog
->aux
->offload
->dev_priv
;
128 unsigned int stack_size
;
129 unsigned int max_instr
;
132 stack_size
= nn_readb(nn
, NFP_NET_CFG_BPF_STACK_SZ
) * 64;
133 if (prog
->aux
->stack_depth
> stack_size
) {
134 nn_info(nn
, "stack too large: program %dB > FW stack %dB\n",
135 prog
->aux
->stack_depth
, stack_size
);
138 nfp_prog
->stack_depth
= round_up(prog
->aux
->stack_depth
, 4);
140 max_instr
= nn_readw(nn
, NFP_NET_CFG_BPF_MAX_LEN
);
141 nfp_prog
->__prog_alloc_len
= max_instr
* sizeof(u64
);
143 nfp_prog
->prog
= kvmalloc(nfp_prog
->__prog_alloc_len
, GFP_KERNEL
);
147 err
= nfp_bpf_jit(nfp_prog
);
151 prog
->aux
->offload
->jited_len
= nfp_prog
->prog_len
* sizeof(u64
);
152 prog
->aux
->offload
->jited_image
= nfp_prog
->prog
;
157 static int nfp_bpf_destroy(struct nfp_net
*nn
, struct bpf_prog
*prog
)
159 struct nfp_prog
*nfp_prog
= prog
->aux
->offload
->dev_priv
;
161 kvfree(nfp_prog
->prog
);
162 nfp_prog_free(nfp_prog
);
168 nfp_bpf_map_get_next_key(struct bpf_offloaded_map
*offmap
,
169 void *key
, void *next_key
)
172 return nfp_bpf_ctrl_getfirst_entry(offmap
, next_key
);
173 return nfp_bpf_ctrl_getnext_entry(offmap
, key
, next_key
);
177 nfp_bpf_map_delete_elem(struct bpf_offloaded_map
*offmap
, void *key
)
179 if (offmap
->map
.map_type
== BPF_MAP_TYPE_ARRAY
)
181 return nfp_bpf_ctrl_del_entry(offmap
, key
);
184 static const struct bpf_map_dev_ops nfp_bpf_map_ops
= {
185 .map_get_next_key
= nfp_bpf_map_get_next_key
,
186 .map_lookup_elem
= nfp_bpf_ctrl_lookup_entry
,
187 .map_update_elem
= nfp_bpf_ctrl_update_entry
,
188 .map_delete_elem
= nfp_bpf_map_delete_elem
,
192 nfp_bpf_map_alloc(struct nfp_app_bpf
*bpf
, struct bpf_offloaded_map
*offmap
)
194 struct nfp_bpf_map
*nfp_map
;
197 if (!bpf
->maps
.types
)
200 if (offmap
->map
.map_flags
||
201 offmap
->map
.numa_node
!= NUMA_NO_NODE
) {
202 pr_info("map flags are not supported\n");
206 if (!(bpf
->maps
.types
& 1 << offmap
->map
.map_type
)) {
207 pr_info("map type not supported\n");
210 if (bpf
->maps
.max_maps
== bpf
->maps_in_use
) {
211 pr_info("too many maps for a device\n");
214 if (bpf
->maps
.max_elems
- bpf
->map_elems_in_use
<
215 offmap
->map
.max_entries
) {
216 pr_info("map with too many elements: %u, left: %u\n",
217 offmap
->map
.max_entries
,
218 bpf
->maps
.max_elems
- bpf
->map_elems_in_use
);
221 if (offmap
->map
.key_size
> bpf
->maps
.max_key_sz
||
222 offmap
->map
.value_size
> bpf
->maps
.max_val_sz
||
223 round_up(offmap
->map
.key_size
, 8) +
224 round_up(offmap
->map
.value_size
, 8) > bpf
->maps
.max_elem_sz
) {
225 pr_info("elements don't fit in device constraints\n");
229 nfp_map
= kzalloc(sizeof(*nfp_map
), GFP_USER
);
233 offmap
->dev_priv
= nfp_map
;
234 nfp_map
->offmap
= offmap
;
237 res
= nfp_bpf_ctrl_alloc_map(bpf
, &offmap
->map
);
244 offmap
->dev_ops
= &nfp_bpf_map_ops
;
246 bpf
->map_elems_in_use
+= offmap
->map
.max_entries
;
247 list_add_tail(&nfp_map
->l
, &bpf
->map_list
);
253 nfp_bpf_map_free(struct nfp_app_bpf
*bpf
, struct bpf_offloaded_map
*offmap
)
255 struct nfp_bpf_map
*nfp_map
= offmap
->dev_priv
;
257 nfp_bpf_ctrl_free_map(bpf
, nfp_map
);
258 list_del_init(&nfp_map
->l
);
259 bpf
->map_elems_in_use
-= offmap
->map
.max_entries
;
266 int nfp_ndo_bpf(struct nfp_app
*app
, struct nfp_net
*nn
, struct netdev_bpf
*bpf
)
268 switch (bpf
->command
) {
269 case BPF_OFFLOAD_VERIFIER_PREP
:
270 return nfp_bpf_verifier_prep(app
, nn
, bpf
);
271 case BPF_OFFLOAD_TRANSLATE
:
272 return nfp_bpf_translate(nn
, bpf
->offload
.prog
);
273 case BPF_OFFLOAD_DESTROY
:
274 return nfp_bpf_destroy(nn
, bpf
->offload
.prog
);
275 case BPF_OFFLOAD_MAP_ALLOC
:
276 return nfp_bpf_map_alloc(app
->priv
, bpf
->offmap
);
277 case BPF_OFFLOAD_MAP_FREE
:
278 return nfp_bpf_map_free(app
->priv
, bpf
->offmap
);
285 nfp_net_bpf_load(struct nfp_net
*nn
, struct bpf_prog
*prog
,
286 struct netlink_ext_ack
*extack
)
288 struct nfp_prog
*nfp_prog
= prog
->aux
->offload
->dev_priv
;
289 unsigned int max_mtu
;
294 max_mtu
= nn_readb(nn
, NFP_NET_CFG_BPF_INL_MTU
) * 64 - 32;
295 if (max_mtu
< nn
->dp
.netdev
->mtu
) {
296 NL_SET_ERR_MSG_MOD(extack
, "BPF offload not supported with MTU larger than HW packet split boundary");
300 img
= nfp_bpf_relo_for_vnic(nfp_prog
, nn
->app_priv
);
304 dma_addr
= dma_map_single(nn
->dp
.dev
, img
,
305 nfp_prog
->prog_len
* sizeof(u64
),
307 if (dma_mapping_error(nn
->dp
.dev
, dma_addr
)) {
312 nn_writew(nn
, NFP_NET_CFG_BPF_SIZE
, nfp_prog
->prog_len
);
313 nn_writeq(nn
, NFP_NET_CFG_BPF_ADDR
, dma_addr
);
315 /* Load up the JITed code */
316 err
= nfp_net_reconfig(nn
, NFP_NET_CFG_UPDATE_BPF
);
318 NL_SET_ERR_MSG_MOD(extack
,
319 "FW command error while loading BPF");
321 dma_unmap_single(nn
->dp
.dev
, dma_addr
, nfp_prog
->prog_len
* sizeof(u64
),
329 nfp_net_bpf_start(struct nfp_net
*nn
, struct netlink_ext_ack
*extack
)
333 /* Enable passing packets through BPF function */
334 nn
->dp
.ctrl
|= NFP_NET_CFG_CTRL_BPF
;
335 nn_writel(nn
, NFP_NET_CFG_CTRL
, nn
->dp
.ctrl
);
336 err
= nfp_net_reconfig(nn
, NFP_NET_CFG_UPDATE_GEN
);
338 NL_SET_ERR_MSG_MOD(extack
,
339 "FW command error while enabling BPF");
342 static int nfp_net_bpf_stop(struct nfp_net
*nn
)
344 if (!(nn
->dp
.ctrl
& NFP_NET_CFG_CTRL_BPF
))
347 nn
->dp
.ctrl
&= ~NFP_NET_CFG_CTRL_BPF
;
348 nn_writel(nn
, NFP_NET_CFG_CTRL
, nn
->dp
.ctrl
);
350 return nfp_net_reconfig(nn
, NFP_NET_CFG_UPDATE_GEN
);
353 int nfp_net_bpf_offload(struct nfp_net
*nn
, struct bpf_prog
*prog
,
354 bool old_prog
, struct netlink_ext_ack
*extack
)
359 struct bpf_prog_offload
*offload
= prog
->aux
->offload
;
363 if (offload
->netdev
!= nn
->dp
.netdev
)
367 if (prog
&& old_prog
) {
370 cap
= nn_readb(nn
, NFP_NET_CFG_BPF_CAP
);
371 if (!(cap
& NFP_NET_BPF_CAP_RELO
)) {
372 NL_SET_ERR_MSG_MOD(extack
,
373 "FW does not support live reload");
378 /* Something else is loaded, different program type? */
379 if (!old_prog
&& nn
->dp
.ctrl
& NFP_NET_CFG_CTRL_BPF
)
382 if (old_prog
&& !prog
)
383 return nfp_net_bpf_stop(nn
);
385 err
= nfp_net_bpf_load(nn
, prog
, extack
);
390 nfp_net_bpf_start(nn
, extack
);