2 * Copyright (C) 2017 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/bpf.h>
35 #include <linux/bitops.h>
36 #include <linux/bug.h>
37 #include <linux/jiffies.h>
38 #include <linux/skbuff.h>
39 #include <linux/wait.h>
41 #include "../nfp_app.h"
42 #include "../nfp_net.h"
46 #define cmsg_warn(bpf, msg...) nn_dp_warn(&(bpf)->app->ctrl->dp, msg)
48 #define NFP_BPF_TAG_ALLOC_SPAN (U16_MAX / 4)
50 static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf
*bpf
)
54 used_tags
= bpf
->tag_alloc_next
- bpf
->tag_alloc_last
;
56 return used_tags
> NFP_BPF_TAG_ALLOC_SPAN
;
59 static int nfp_bpf_alloc_tag(struct nfp_app_bpf
*bpf
)
61 /* All FW communication for BPF is request-reply. To make sure we
62 * don't reuse the message ID too early after timeout - limit the
63 * number of requests in flight.
65 if (nfp_bpf_all_tags_busy(bpf
)) {
66 cmsg_warn(bpf
, "all FW request contexts busy!\n");
70 WARN_ON(__test_and_set_bit(bpf
->tag_alloc_next
, bpf
->tag_allocator
));
71 return bpf
->tag_alloc_next
++;
74 static void nfp_bpf_free_tag(struct nfp_app_bpf
*bpf
, u16 tag
)
76 WARN_ON(!__test_and_clear_bit(tag
, bpf
->tag_allocator
));
78 while (!test_bit(bpf
->tag_alloc_last
, bpf
->tag_allocator
) &&
79 bpf
->tag_alloc_last
!= bpf
->tag_alloc_next
)
80 bpf
->tag_alloc_last
++;
83 static struct sk_buff
*
84 nfp_bpf_cmsg_alloc(struct nfp_app_bpf
*bpf
, unsigned int size
)
88 skb
= nfp_app_ctrl_msg_alloc(bpf
->app
, size
, GFP_KERNEL
);
94 static struct sk_buff
*
95 nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf
*bpf
, unsigned int n
)
99 size
= sizeof(struct cmsg_req_map_op
);
100 size
+= sizeof(struct cmsg_key_value_pair
) * n
;
102 return nfp_bpf_cmsg_alloc(bpf
, size
);
105 static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff
*skb
)
107 struct cmsg_hdr
*hdr
;
109 hdr
= (struct cmsg_hdr
*)skb
->data
;
111 return be16_to_cpu(hdr
->tag
);
114 static struct sk_buff
*__nfp_bpf_reply(struct nfp_app_bpf
*bpf
, u16 tag
)
116 unsigned int msg_tag
;
119 skb_queue_walk(&bpf
->cmsg_replies
, skb
) {
120 msg_tag
= nfp_bpf_cmsg_get_tag(skb
);
121 if (msg_tag
== tag
) {
122 nfp_bpf_free_tag(bpf
, tag
);
123 __skb_unlink(skb
, &bpf
->cmsg_replies
);
131 static struct sk_buff
*nfp_bpf_reply(struct nfp_app_bpf
*bpf
, u16 tag
)
135 nfp_ctrl_lock(bpf
->app
->ctrl
);
136 skb
= __nfp_bpf_reply(bpf
, tag
);
137 nfp_ctrl_unlock(bpf
->app
->ctrl
);
142 static struct sk_buff
*nfp_bpf_reply_drop_tag(struct nfp_app_bpf
*bpf
, u16 tag
)
146 nfp_ctrl_lock(bpf
->app
->ctrl
);
147 skb
= __nfp_bpf_reply(bpf
, tag
);
149 nfp_bpf_free_tag(bpf
, tag
);
150 nfp_ctrl_unlock(bpf
->app
->ctrl
);
155 static struct sk_buff
*
156 nfp_bpf_cmsg_wait_reply(struct nfp_app_bpf
*bpf
, enum nfp_bpf_cmsg_type type
,
162 for (i
= 0; i
< 50; i
++) {
164 skb
= nfp_bpf_reply(bpf
, tag
);
169 err
= wait_event_interruptible_timeout(bpf
->cmsg_wq
,
170 skb
= nfp_bpf_reply(bpf
, tag
),
171 msecs_to_jiffies(5000));
172 /* We didn't get a response - try last time and atomically drop
173 * the tag even if no response is matched.
176 skb
= nfp_bpf_reply_drop_tag(bpf
, tag
);
178 cmsg_warn(bpf
, "%s waiting for response to 0x%02x: %d\n",
179 err
== ERESTARTSYS
? "interrupted" : "error",
184 cmsg_warn(bpf
, "timeout waiting for response to 0x%02x\n",
186 return ERR_PTR(-ETIMEDOUT
);
192 static struct sk_buff
*
193 nfp_bpf_cmsg_communicate(struct nfp_app_bpf
*bpf
, struct sk_buff
*skb
,
194 enum nfp_bpf_cmsg_type type
, unsigned int reply_size
)
196 struct cmsg_hdr
*hdr
;
199 nfp_ctrl_lock(bpf
->app
->ctrl
);
200 tag
= nfp_bpf_alloc_tag(bpf
);
202 nfp_ctrl_unlock(bpf
->app
->ctrl
);
203 dev_kfree_skb_any(skb
);
207 hdr
= (void *)skb
->data
;
208 hdr
->ver
= CMSG_MAP_ABI_VERSION
;
210 hdr
->tag
= cpu_to_be16(tag
);
212 __nfp_app_ctrl_tx(bpf
->app
, skb
);
214 nfp_ctrl_unlock(bpf
->app
->ctrl
);
216 skb
= nfp_bpf_cmsg_wait_reply(bpf
, type
, tag
);
220 hdr
= (struct cmsg_hdr
*)skb
->data
;
221 /* 0 reply_size means caller will do the validation */
222 if (reply_size
&& skb
->len
!= reply_size
) {
223 cmsg_warn(bpf
, "cmsg drop - wrong size %d != %d!\n",
224 skb
->len
, reply_size
);
227 if (hdr
->type
!= __CMSG_REPLY(type
)) {
228 cmsg_warn(bpf
, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
229 hdr
->type
, __CMSG_REPLY(type
));
235 dev_kfree_skb_any(skb
);
236 return ERR_PTR(-EIO
);
240 nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf
*bpf
,
241 struct cmsg_reply_map_simple
*reply
)
243 static const int res_table
[] = {
244 [CMSG_RC_SUCCESS
] = 0,
245 [CMSG_RC_ERR_MAP_FD
] = -EBADFD
,
246 [CMSG_RC_ERR_MAP_NOENT
] = -ENOENT
,
247 [CMSG_RC_ERR_MAP_ERR
] = -EINVAL
,
248 [CMSG_RC_ERR_MAP_PARSE
] = -EIO
,
249 [CMSG_RC_ERR_MAP_EXIST
] = -EEXIST
,
250 [CMSG_RC_ERR_MAP_NOMEM
] = -ENOMEM
,
251 [CMSG_RC_ERR_MAP_E2BIG
] = -E2BIG
,
255 rc
= be32_to_cpu(reply
->rc
);
256 if (rc
>= ARRAY_SIZE(res_table
)) {
257 cmsg_warn(bpf
, "FW responded with invalid status: %u\n", rc
);
261 return res_table
[rc
];
265 nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf
*bpf
, struct bpf_map
*map
)
267 struct cmsg_reply_map_alloc_tbl
*reply
;
268 struct cmsg_req_map_alloc_tbl
*req
;
273 skb
= nfp_bpf_cmsg_alloc(bpf
, sizeof(*req
));
277 req
= (void *)skb
->data
;
278 req
->key_size
= cpu_to_be32(map
->key_size
);
279 req
->value_size
= cpu_to_be32(map
->value_size
);
280 req
->max_entries
= cpu_to_be32(map
->max_entries
);
281 req
->map_type
= cpu_to_be32(map
->map_type
);
284 skb
= nfp_bpf_cmsg_communicate(bpf
, skb
, CMSG_TYPE_MAP_ALLOC
,
289 reply
= (void *)skb
->data
;
290 err
= nfp_bpf_ctrl_rc_to_errno(bpf
, &reply
->reply_hdr
);
294 tid
= be32_to_cpu(reply
->tid
);
295 dev_consume_skb_any(skb
);
299 dev_kfree_skb_any(skb
);
303 void nfp_bpf_ctrl_free_map(struct nfp_app_bpf
*bpf
, struct nfp_bpf_map
*nfp_map
)
305 struct cmsg_reply_map_free_tbl
*reply
;
306 struct cmsg_req_map_free_tbl
*req
;
310 skb
= nfp_bpf_cmsg_alloc(bpf
, sizeof(*req
));
312 cmsg_warn(bpf
, "leaking map - failed to allocate msg\n");
316 req
= (void *)skb
->data
;
317 req
->tid
= cpu_to_be32(nfp_map
->tid
);
319 skb
= nfp_bpf_cmsg_communicate(bpf
, skb
, CMSG_TYPE_MAP_FREE
,
322 cmsg_warn(bpf
, "leaking map - I/O error\n");
326 reply
= (void *)skb
->data
;
327 err
= nfp_bpf_ctrl_rc_to_errno(bpf
, &reply
->reply_hdr
);
329 cmsg_warn(bpf
, "leaking map - FW responded with: %d\n", err
);
331 dev_consume_skb_any(skb
);
335 nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map
*offmap
,
336 enum nfp_bpf_cmsg_type op
,
337 u8
*key
, u8
*value
, u64 flags
, u8
*out_key
, u8
*out_value
)
339 struct nfp_bpf_map
*nfp_map
= offmap
->dev_priv
;
340 struct nfp_app_bpf
*bpf
= nfp_map
->bpf
;
341 struct bpf_map
*map
= &offmap
->map
;
342 struct cmsg_reply_map_op
*reply
;
343 struct cmsg_req_map_op
*req
;
347 /* FW messages have no space for more than 32 bits of flags */
351 skb
= nfp_bpf_cmsg_map_req_alloc(bpf
, 1);
355 req
= (void *)skb
->data
;
356 req
->tid
= cpu_to_be32(nfp_map
->tid
);
357 req
->count
= cpu_to_be32(1);
358 req
->flags
= cpu_to_be32(flags
);
362 memcpy(&req
->elem
[0].key
, key
, map
->key_size
);
364 memcpy(&req
->elem
[0].value
, value
, map
->value_size
);
366 skb
= nfp_bpf_cmsg_communicate(bpf
, skb
, op
,
367 sizeof(*reply
) + sizeof(*reply
->elem
));
371 reply
= (void *)skb
->data
;
372 err
= nfp_bpf_ctrl_rc_to_errno(bpf
, &reply
->reply_hdr
);
378 memcpy(out_key
, &reply
->elem
[0].key
, map
->key_size
);
380 memcpy(out_value
, &reply
->elem
[0].value
, map
->value_size
);
382 dev_consume_skb_any(skb
);
386 dev_kfree_skb_any(skb
);
390 int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map
*offmap
,
391 void *key
, void *value
, u64 flags
)
393 return nfp_bpf_ctrl_entry_op(offmap
, CMSG_TYPE_MAP_UPDATE
,
394 key
, value
, flags
, NULL
, NULL
);
397 int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map
*offmap
, void *key
)
399 return nfp_bpf_ctrl_entry_op(offmap
, CMSG_TYPE_MAP_DELETE
,
400 key
, NULL
, 0, NULL
, NULL
);
403 int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map
*offmap
,
404 void *key
, void *value
)
406 return nfp_bpf_ctrl_entry_op(offmap
, CMSG_TYPE_MAP_LOOKUP
,
407 key
, NULL
, 0, NULL
, value
);
410 int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map
*offmap
,
413 return nfp_bpf_ctrl_entry_op(offmap
, CMSG_TYPE_MAP_GETFIRST
,
414 NULL
, NULL
, 0, next_key
, NULL
);
417 int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map
*offmap
,
418 void *key
, void *next_key
)
420 return nfp_bpf_ctrl_entry_op(offmap
, CMSG_TYPE_MAP_GETNEXT
,
421 key
, NULL
, 0, next_key
, NULL
);
424 void nfp_bpf_ctrl_msg_rx(struct nfp_app
*app
, struct sk_buff
*skb
)
426 struct nfp_app_bpf
*bpf
= app
->priv
;
429 if (unlikely(skb
->len
< sizeof(struct cmsg_reply_map_simple
))) {
430 cmsg_warn(bpf
, "cmsg drop - too short %d!\n", skb
->len
);
434 nfp_ctrl_lock(bpf
->app
->ctrl
);
436 tag
= nfp_bpf_cmsg_get_tag(skb
);
437 if (unlikely(!test_bit(tag
, bpf
->tag_allocator
))) {
438 cmsg_warn(bpf
, "cmsg drop - no one is waiting for tag %u!\n",
443 __skb_queue_tail(&bpf
->cmsg_replies
, skb
);
444 wake_up_interruptible_all(&bpf
->cmsg_wq
);
446 nfp_ctrl_unlock(bpf
->app
->ctrl
);
450 nfp_ctrl_unlock(bpf
->app
->ctrl
);
452 dev_kfree_skb_any(skb
);