Linux 4.16.11
[linux/fpc-iii.git] / drivers / net / ethernet / netronome / nfp / bpf / offload.c
blob0a7732385469564ca4870cee2c95ee0db7ef9eef
1 /*
2 * Copyright (C) 2016-2017 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
35 * nfp_net_offload.c
36 * Netronome network device driver: TC offload functions for PF and VF
39 #define pr_fmt(fmt) "NFP net bpf: " fmt
41 #include <linux/bpf.h>
42 #include <linux/kernel.h>
43 #include <linux/netdevice.h>
44 #include <linux/pci.h>
45 #include <linux/jiffies.h>
46 #include <linux/timer.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
50 #include <net/pkt_cls.h>
51 #include <net/tc_act/tc_gact.h>
52 #include <net/tc_act/tc_mirred.h>
54 #include "main.h"
55 #include "../nfp_app.h"
56 #include "../nfp_net_ctrl.h"
57 #include "../nfp_net.h"
59 static int
60 nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
61 unsigned int cnt)
63 struct nfp_insn_meta *meta;
64 unsigned int i;
66 for (i = 0; i < cnt; i++) {
67 meta = kzalloc(sizeof(*meta), GFP_KERNEL);
68 if (!meta)
69 return -ENOMEM;
71 meta->insn = prog[i];
72 meta->n = i;
74 list_add_tail(&meta->l, &nfp_prog->insns);
77 nfp_bpf_jit_prepare(nfp_prog, cnt);
79 return 0;
82 static void nfp_prog_free(struct nfp_prog *nfp_prog)
84 struct nfp_insn_meta *meta, *tmp;
86 list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
87 list_del(&meta->l);
88 kfree(meta);
90 kfree(nfp_prog);
93 static int
94 nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
95 struct netdev_bpf *bpf)
97 struct bpf_prog *prog = bpf->verifier.prog;
98 struct nfp_prog *nfp_prog;
99 int ret;
101 nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
102 if (!nfp_prog)
103 return -ENOMEM;
104 prog->aux->offload->dev_priv = nfp_prog;
106 INIT_LIST_HEAD(&nfp_prog->insns);
107 nfp_prog->type = prog->type;
108 nfp_prog->bpf = app->priv;
110 ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len);
111 if (ret)
112 goto err_free;
114 nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog);
115 bpf->verifier.ops = &nfp_bpf_analyzer_ops;
117 return 0;
119 err_free:
120 nfp_prog_free(nfp_prog);
122 return ret;
125 static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
127 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
128 unsigned int stack_size;
129 unsigned int max_instr;
130 int err;
132 stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
133 if (prog->aux->stack_depth > stack_size) {
134 nn_info(nn, "stack too large: program %dB > FW stack %dB\n",
135 prog->aux->stack_depth, stack_size);
136 return -EOPNOTSUPP;
138 nfp_prog->stack_depth = round_up(prog->aux->stack_depth, 4);
140 max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
141 nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
143 nfp_prog->prog = kvmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
144 if (!nfp_prog->prog)
145 return -ENOMEM;
147 err = nfp_bpf_jit(nfp_prog);
148 if (err)
149 return err;
151 prog->aux->offload->jited_len = nfp_prog->prog_len * sizeof(u64);
152 prog->aux->offload->jited_image = nfp_prog->prog;
154 return 0;
157 static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog)
159 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
161 kvfree(nfp_prog->prog);
162 nfp_prog_free(nfp_prog);
164 return 0;
167 static int
168 nfp_bpf_map_get_next_key(struct bpf_offloaded_map *offmap,
169 void *key, void *next_key)
171 if (!key)
172 return nfp_bpf_ctrl_getfirst_entry(offmap, next_key);
173 return nfp_bpf_ctrl_getnext_entry(offmap, key, next_key);
176 static int
177 nfp_bpf_map_delete_elem(struct bpf_offloaded_map *offmap, void *key)
179 if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY)
180 return -EINVAL;
181 return nfp_bpf_ctrl_del_entry(offmap, key);
184 static const struct bpf_map_dev_ops nfp_bpf_map_ops = {
185 .map_get_next_key = nfp_bpf_map_get_next_key,
186 .map_lookup_elem = nfp_bpf_ctrl_lookup_entry,
187 .map_update_elem = nfp_bpf_ctrl_update_entry,
188 .map_delete_elem = nfp_bpf_map_delete_elem,
191 static int
192 nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
194 struct nfp_bpf_map *nfp_map;
195 long long int res;
197 if (!bpf->maps.types)
198 return -EOPNOTSUPP;
200 if (offmap->map.map_flags ||
201 offmap->map.numa_node != NUMA_NO_NODE) {
202 pr_info("map flags are not supported\n");
203 return -EINVAL;
206 if (!(bpf->maps.types & 1 << offmap->map.map_type)) {
207 pr_info("map type not supported\n");
208 return -EOPNOTSUPP;
210 if (bpf->maps.max_maps == bpf->maps_in_use) {
211 pr_info("too many maps for a device\n");
212 return -ENOMEM;
214 if (bpf->maps.max_elems - bpf->map_elems_in_use <
215 offmap->map.max_entries) {
216 pr_info("map with too many elements: %u, left: %u\n",
217 offmap->map.max_entries,
218 bpf->maps.max_elems - bpf->map_elems_in_use);
219 return -ENOMEM;
221 if (offmap->map.key_size > bpf->maps.max_key_sz ||
222 offmap->map.value_size > bpf->maps.max_val_sz ||
223 round_up(offmap->map.key_size, 8) +
224 round_up(offmap->map.value_size, 8) > bpf->maps.max_elem_sz) {
225 pr_info("elements don't fit in device constraints\n");
226 return -ENOMEM;
229 nfp_map = kzalloc(sizeof(*nfp_map), GFP_USER);
230 if (!nfp_map)
231 return -ENOMEM;
233 offmap->dev_priv = nfp_map;
234 nfp_map->offmap = offmap;
235 nfp_map->bpf = bpf;
237 res = nfp_bpf_ctrl_alloc_map(bpf, &offmap->map);
238 if (res < 0) {
239 kfree(nfp_map);
240 return res;
243 nfp_map->tid = res;
244 offmap->dev_ops = &nfp_bpf_map_ops;
245 bpf->maps_in_use++;
246 bpf->map_elems_in_use += offmap->map.max_entries;
247 list_add_tail(&nfp_map->l, &bpf->map_list);
249 return 0;
252 static int
253 nfp_bpf_map_free(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
255 struct nfp_bpf_map *nfp_map = offmap->dev_priv;
257 nfp_bpf_ctrl_free_map(bpf, nfp_map);
258 list_del_init(&nfp_map->l);
259 bpf->map_elems_in_use -= offmap->map.max_entries;
260 bpf->maps_in_use--;
261 kfree(nfp_map);
263 return 0;
266 int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
268 switch (bpf->command) {
269 case BPF_OFFLOAD_VERIFIER_PREP:
270 return nfp_bpf_verifier_prep(app, nn, bpf);
271 case BPF_OFFLOAD_TRANSLATE:
272 return nfp_bpf_translate(nn, bpf->offload.prog);
273 case BPF_OFFLOAD_DESTROY:
274 return nfp_bpf_destroy(nn, bpf->offload.prog);
275 case BPF_OFFLOAD_MAP_ALLOC:
276 return nfp_bpf_map_alloc(app->priv, bpf->offmap);
277 case BPF_OFFLOAD_MAP_FREE:
278 return nfp_bpf_map_free(app->priv, bpf->offmap);
279 default:
280 return -EINVAL;
284 static int
285 nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
286 struct netlink_ext_ack *extack)
288 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
289 unsigned int max_mtu;
290 dma_addr_t dma_addr;
291 void *img;
292 int err;
294 max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
295 if (max_mtu < nn->dp.netdev->mtu) {
296 NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with MTU larger than HW packet split boundary");
297 return -EOPNOTSUPP;
300 img = nfp_bpf_relo_for_vnic(nfp_prog, nn->app_priv);
301 if (IS_ERR(img))
302 return PTR_ERR(img);
304 dma_addr = dma_map_single(nn->dp.dev, img,
305 nfp_prog->prog_len * sizeof(u64),
306 DMA_TO_DEVICE);
307 if (dma_mapping_error(nn->dp.dev, dma_addr)) {
308 kfree(img);
309 return -ENOMEM;
312 nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len);
313 nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
315 /* Load up the JITed code */
316 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
317 if (err)
318 NL_SET_ERR_MSG_MOD(extack,
319 "FW command error while loading BPF");
321 dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64),
322 DMA_TO_DEVICE);
323 kfree(img);
325 return err;
328 static void
329 nfp_net_bpf_start(struct nfp_net *nn, struct netlink_ext_ack *extack)
331 int err;
333 /* Enable passing packets through BPF function */
334 nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF;
335 nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
336 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
337 if (err)
338 NL_SET_ERR_MSG_MOD(extack,
339 "FW command error while enabling BPF");
342 static int nfp_net_bpf_stop(struct nfp_net *nn)
344 if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
345 return 0;
347 nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
348 nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
350 return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
353 int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
354 bool old_prog, struct netlink_ext_ack *extack)
356 int err;
358 if (prog) {
359 struct bpf_prog_offload *offload = prog->aux->offload;
361 if (!offload)
362 return -EINVAL;
363 if (offload->netdev != nn->dp.netdev)
364 return -EINVAL;
367 if (prog && old_prog) {
368 u8 cap;
370 cap = nn_readb(nn, NFP_NET_CFG_BPF_CAP);
371 if (!(cap & NFP_NET_BPF_CAP_RELO)) {
372 NL_SET_ERR_MSG_MOD(extack,
373 "FW does not support live reload");
374 return -EBUSY;
378 /* Something else is loaded, different program type? */
379 if (!old_prog && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
380 return -EBUSY;
382 if (old_prog && !prog)
383 return nfp_net_bpf_stop(nn);
385 err = nfp_net_bpf_load(nn, prog, extack);
386 if (err)
387 return err;
389 if (!old_prog)
390 nfp_net_bpf_start(nn, extack);
392 return 0;