Linux 4.16.11
[linux/fpc-iii.git] / drivers / net / ethernet / netronome / nfp / bpf / cmsg.c
blob80d3aa0fc9d3ed817b10a0cdba97d95d48d5a326
1 /*
2 * Copyright (C) 2017 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
34 #include <linux/bpf.h>
35 #include <linux/bitops.h>
36 #include <linux/bug.h>
37 #include <linux/jiffies.h>
38 #include <linux/skbuff.h>
39 #include <linux/wait.h>
41 #include "../nfp_app.h"
42 #include "../nfp_net.h"
43 #include "fw.h"
44 #include "main.h"
46 #define cmsg_warn(bpf, msg...) nn_dp_warn(&(bpf)->app->ctrl->dp, msg)
48 #define NFP_BPF_TAG_ALLOC_SPAN (U16_MAX / 4)
50 static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf)
52 u16 used_tags;
54 used_tags = bpf->tag_alloc_next - bpf->tag_alloc_last;
56 return used_tags > NFP_BPF_TAG_ALLOC_SPAN;
59 static int nfp_bpf_alloc_tag(struct nfp_app_bpf *bpf)
61 /* All FW communication for BPF is request-reply. To make sure we
62 * don't reuse the message ID too early after timeout - limit the
63 * number of requests in flight.
65 if (nfp_bpf_all_tags_busy(bpf)) {
66 cmsg_warn(bpf, "all FW request contexts busy!\n");
67 return -EAGAIN;
70 WARN_ON(__test_and_set_bit(bpf->tag_alloc_next, bpf->tag_allocator));
71 return bpf->tag_alloc_next++;
74 static void nfp_bpf_free_tag(struct nfp_app_bpf *bpf, u16 tag)
76 WARN_ON(!__test_and_clear_bit(tag, bpf->tag_allocator));
78 while (!test_bit(bpf->tag_alloc_last, bpf->tag_allocator) &&
79 bpf->tag_alloc_last != bpf->tag_alloc_next)
80 bpf->tag_alloc_last++;
83 static struct sk_buff *
84 nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
86 struct sk_buff *skb;
88 skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL);
89 skb_put(skb, size);
91 return skb;
94 static struct sk_buff *
95 nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n)
97 unsigned int size;
99 size = sizeof(struct cmsg_req_map_op);
100 size += sizeof(struct cmsg_key_value_pair) * n;
102 return nfp_bpf_cmsg_alloc(bpf, size);
105 static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb)
107 struct cmsg_hdr *hdr;
109 hdr = (struct cmsg_hdr *)skb->data;
111 return be16_to_cpu(hdr->tag);
114 static struct sk_buff *__nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
116 unsigned int msg_tag;
117 struct sk_buff *skb;
119 skb_queue_walk(&bpf->cmsg_replies, skb) {
120 msg_tag = nfp_bpf_cmsg_get_tag(skb);
121 if (msg_tag == tag) {
122 nfp_bpf_free_tag(bpf, tag);
123 __skb_unlink(skb, &bpf->cmsg_replies);
124 return skb;
128 return NULL;
131 static struct sk_buff *nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
133 struct sk_buff *skb;
135 nfp_ctrl_lock(bpf->app->ctrl);
136 skb = __nfp_bpf_reply(bpf, tag);
137 nfp_ctrl_unlock(bpf->app->ctrl);
139 return skb;
142 static struct sk_buff *nfp_bpf_reply_drop_tag(struct nfp_app_bpf *bpf, u16 tag)
144 struct sk_buff *skb;
146 nfp_ctrl_lock(bpf->app->ctrl);
147 skb = __nfp_bpf_reply(bpf, tag);
148 if (!skb)
149 nfp_bpf_free_tag(bpf, tag);
150 nfp_ctrl_unlock(bpf->app->ctrl);
152 return skb;
155 static struct sk_buff *
156 nfp_bpf_cmsg_wait_reply(struct nfp_app_bpf *bpf, enum nfp_bpf_cmsg_type type,
157 int tag)
159 struct sk_buff *skb;
160 int i, err;
162 for (i = 0; i < 50; i++) {
163 udelay(4);
164 skb = nfp_bpf_reply(bpf, tag);
165 if (skb)
166 return skb;
169 err = wait_event_interruptible_timeout(bpf->cmsg_wq,
170 skb = nfp_bpf_reply(bpf, tag),
171 msecs_to_jiffies(5000));
172 /* We didn't get a response - try last time and atomically drop
173 * the tag even if no response is matched.
175 if (!skb)
176 skb = nfp_bpf_reply_drop_tag(bpf, tag);
177 if (err < 0) {
178 cmsg_warn(bpf, "%s waiting for response to 0x%02x: %d\n",
179 err == ERESTARTSYS ? "interrupted" : "error",
180 type, err);
181 return ERR_PTR(err);
183 if (!skb) {
184 cmsg_warn(bpf, "timeout waiting for response to 0x%02x\n",
185 type);
186 return ERR_PTR(-ETIMEDOUT);
189 return skb;
192 static struct sk_buff *
193 nfp_bpf_cmsg_communicate(struct nfp_app_bpf *bpf, struct sk_buff *skb,
194 enum nfp_bpf_cmsg_type type, unsigned int reply_size)
196 struct cmsg_hdr *hdr;
197 int tag;
199 nfp_ctrl_lock(bpf->app->ctrl);
200 tag = nfp_bpf_alloc_tag(bpf);
201 if (tag < 0) {
202 nfp_ctrl_unlock(bpf->app->ctrl);
203 dev_kfree_skb_any(skb);
204 return ERR_PTR(tag);
207 hdr = (void *)skb->data;
208 hdr->ver = CMSG_MAP_ABI_VERSION;
209 hdr->type = type;
210 hdr->tag = cpu_to_be16(tag);
212 __nfp_app_ctrl_tx(bpf->app, skb);
214 nfp_ctrl_unlock(bpf->app->ctrl);
216 skb = nfp_bpf_cmsg_wait_reply(bpf, type, tag);
217 if (IS_ERR(skb))
218 return skb;
220 hdr = (struct cmsg_hdr *)skb->data;
221 /* 0 reply_size means caller will do the validation */
222 if (reply_size && skb->len != reply_size) {
223 cmsg_warn(bpf, "cmsg drop - wrong size %d != %d!\n",
224 skb->len, reply_size);
225 goto err_free;
227 if (hdr->type != __CMSG_REPLY(type)) {
228 cmsg_warn(bpf, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
229 hdr->type, __CMSG_REPLY(type));
230 goto err_free;
233 return skb;
234 err_free:
235 dev_kfree_skb_any(skb);
236 return ERR_PTR(-EIO);
239 static int
240 nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf,
241 struct cmsg_reply_map_simple *reply)
243 static const int res_table[] = {
244 [CMSG_RC_SUCCESS] = 0,
245 [CMSG_RC_ERR_MAP_FD] = -EBADFD,
246 [CMSG_RC_ERR_MAP_NOENT] = -ENOENT,
247 [CMSG_RC_ERR_MAP_ERR] = -EINVAL,
248 [CMSG_RC_ERR_MAP_PARSE] = -EIO,
249 [CMSG_RC_ERR_MAP_EXIST] = -EEXIST,
250 [CMSG_RC_ERR_MAP_NOMEM] = -ENOMEM,
251 [CMSG_RC_ERR_MAP_E2BIG] = -E2BIG,
253 u32 rc;
255 rc = be32_to_cpu(reply->rc);
256 if (rc >= ARRAY_SIZE(res_table)) {
257 cmsg_warn(bpf, "FW responded with invalid status: %u\n", rc);
258 return -EIO;
261 return res_table[rc];
264 long long int
265 nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map)
267 struct cmsg_reply_map_alloc_tbl *reply;
268 struct cmsg_req_map_alloc_tbl *req;
269 struct sk_buff *skb;
270 u32 tid;
271 int err;
273 skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
274 if (!skb)
275 return -ENOMEM;
277 req = (void *)skb->data;
278 req->key_size = cpu_to_be32(map->key_size);
279 req->value_size = cpu_to_be32(map->value_size);
280 req->max_entries = cpu_to_be32(map->max_entries);
281 req->map_type = cpu_to_be32(map->map_type);
282 req->map_flags = 0;
284 skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_ALLOC,
285 sizeof(*reply));
286 if (IS_ERR(skb))
287 return PTR_ERR(skb);
289 reply = (void *)skb->data;
290 err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
291 if (err)
292 goto err_free;
294 tid = be32_to_cpu(reply->tid);
295 dev_consume_skb_any(skb);
297 return tid;
298 err_free:
299 dev_kfree_skb_any(skb);
300 return err;
303 void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map)
305 struct cmsg_reply_map_free_tbl *reply;
306 struct cmsg_req_map_free_tbl *req;
307 struct sk_buff *skb;
308 int err;
310 skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
311 if (!skb) {
312 cmsg_warn(bpf, "leaking map - failed to allocate msg\n");
313 return;
316 req = (void *)skb->data;
317 req->tid = cpu_to_be32(nfp_map->tid);
319 skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_FREE,
320 sizeof(*reply));
321 if (IS_ERR(skb)) {
322 cmsg_warn(bpf, "leaking map - I/O error\n");
323 return;
326 reply = (void *)skb->data;
327 err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
328 if (err)
329 cmsg_warn(bpf, "leaking map - FW responded with: %d\n", err);
331 dev_consume_skb_any(skb);
334 static int
335 nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
336 enum nfp_bpf_cmsg_type op,
337 u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value)
339 struct nfp_bpf_map *nfp_map = offmap->dev_priv;
340 struct nfp_app_bpf *bpf = nfp_map->bpf;
341 struct bpf_map *map = &offmap->map;
342 struct cmsg_reply_map_op *reply;
343 struct cmsg_req_map_op *req;
344 struct sk_buff *skb;
345 int err;
347 /* FW messages have no space for more than 32 bits of flags */
348 if (flags >> 32)
349 return -EOPNOTSUPP;
351 skb = nfp_bpf_cmsg_map_req_alloc(bpf, 1);
352 if (!skb)
353 return -ENOMEM;
355 req = (void *)skb->data;
356 req->tid = cpu_to_be32(nfp_map->tid);
357 req->count = cpu_to_be32(1);
358 req->flags = cpu_to_be32(flags);
360 /* Copy inputs */
361 if (key)
362 memcpy(&req->elem[0].key, key, map->key_size);
363 if (value)
364 memcpy(&req->elem[0].value, value, map->value_size);
366 skb = nfp_bpf_cmsg_communicate(bpf, skb, op,
367 sizeof(*reply) + sizeof(*reply->elem));
368 if (IS_ERR(skb))
369 return PTR_ERR(skb);
371 reply = (void *)skb->data;
372 err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
373 if (err)
374 goto err_free;
376 /* Copy outputs */
377 if (out_key)
378 memcpy(out_key, &reply->elem[0].key, map->key_size);
379 if (out_value)
380 memcpy(out_value, &reply->elem[0].value, map->value_size);
382 dev_consume_skb_any(skb);
384 return 0;
385 err_free:
386 dev_kfree_skb_any(skb);
387 return err;
390 int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
391 void *key, void *value, u64 flags)
393 return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_UPDATE,
394 key, value, flags, NULL, NULL);
397 int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key)
399 return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_DELETE,
400 key, NULL, 0, NULL, NULL);
403 int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
404 void *key, void *value)
406 return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_LOOKUP,
407 key, NULL, 0, NULL, value);
410 int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
411 void *next_key)
413 return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETFIRST,
414 NULL, NULL, 0, next_key, NULL);
417 int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
418 void *key, void *next_key)
420 return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETNEXT,
421 key, NULL, 0, next_key, NULL);
424 void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
426 struct nfp_app_bpf *bpf = app->priv;
427 unsigned int tag;
429 if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) {
430 cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len);
431 goto err_free;
434 nfp_ctrl_lock(bpf->app->ctrl);
436 tag = nfp_bpf_cmsg_get_tag(skb);
437 if (unlikely(!test_bit(tag, bpf->tag_allocator))) {
438 cmsg_warn(bpf, "cmsg drop - no one is waiting for tag %u!\n",
439 tag);
440 goto err_unlock;
443 __skb_queue_tail(&bpf->cmsg_replies, skb);
444 wake_up_interruptible_all(&bpf->cmsg_wq);
446 nfp_ctrl_unlock(bpf->app->ctrl);
448 return;
449 err_unlock:
450 nfp_ctrl_unlock(bpf->app->ctrl);
451 err_free:
452 dev_kfree_skb_any(skb);