1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
5 #include <linux/bitops.h>
7 #include <linux/jiffies.h>
8 #include <linux/skbuff.h>
9 #include <linux/timekeeping.h>
12 #include "../nfp_app.h"
13 #include "../nfp_net.h"
17 static struct sk_buff
*
18 nfp_bpf_cmsg_alloc(struct nfp_app_bpf
*bpf
, unsigned int size
)
22 skb
= nfp_app_ctrl_msg_alloc(bpf
->app
, size
, GFP_KERNEL
);
29 nfp_bpf_cmsg_map_req_size(struct nfp_app_bpf
*bpf
, unsigned int n
)
33 size
= sizeof(struct cmsg_req_map_op
);
34 size
+= (bpf
->cmsg_key_sz
+ bpf
->cmsg_val_sz
) * n
;
39 static struct sk_buff
*
40 nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf
*bpf
, unsigned int n
)
42 return nfp_bpf_cmsg_alloc(bpf
, nfp_bpf_cmsg_map_req_size(bpf
, n
));
46 nfp_bpf_cmsg_map_reply_size(struct nfp_app_bpf
*bpf
, unsigned int n
)
50 size
= sizeof(struct cmsg_reply_map_op
);
51 size
+= (bpf
->cmsg_key_sz
+ bpf
->cmsg_val_sz
) * n
;
57 nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf
*bpf
,
58 struct cmsg_reply_map_simple
*reply
)
60 static const int res_table
[] = {
61 [CMSG_RC_SUCCESS
] = 0,
62 [CMSG_RC_ERR_MAP_FD
] = -EBADFD
,
63 [CMSG_RC_ERR_MAP_NOENT
] = -ENOENT
,
64 [CMSG_RC_ERR_MAP_ERR
] = -EINVAL
,
65 [CMSG_RC_ERR_MAP_PARSE
] = -EIO
,
66 [CMSG_RC_ERR_MAP_EXIST
] = -EEXIST
,
67 [CMSG_RC_ERR_MAP_NOMEM
] = -ENOMEM
,
68 [CMSG_RC_ERR_MAP_E2BIG
] = -E2BIG
,
72 rc
= be32_to_cpu(reply
->rc
);
73 if (rc
>= ARRAY_SIZE(res_table
)) {
74 cmsg_warn(bpf
, "FW responded with invalid status: %u\n", rc
);
82 nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf
*bpf
, struct bpf_map
*map
)
84 struct cmsg_reply_map_alloc_tbl
*reply
;
85 struct cmsg_req_map_alloc_tbl
*req
;
90 skb
= nfp_bpf_cmsg_alloc(bpf
, sizeof(*req
));
94 req
= (void *)skb
->data
;
95 req
->key_size
= cpu_to_be32(map
->key_size
);
96 req
->value_size
= cpu_to_be32(map
->value_size
);
97 req
->max_entries
= cpu_to_be32(map
->max_entries
);
98 req
->map_type
= cpu_to_be32(map
->map_type
);
101 skb
= nfp_ccm_communicate(&bpf
->ccm
, skb
, NFP_CCM_TYPE_BPF_MAP_ALLOC
,
106 reply
= (void *)skb
->data
;
107 err
= nfp_bpf_ctrl_rc_to_errno(bpf
, &reply
->reply_hdr
);
111 tid
= be32_to_cpu(reply
->tid
);
112 dev_consume_skb_any(skb
);
116 dev_kfree_skb_any(skb
);
120 void nfp_bpf_ctrl_free_map(struct nfp_app_bpf
*bpf
, struct nfp_bpf_map
*nfp_map
)
122 struct cmsg_reply_map_free_tbl
*reply
;
123 struct cmsg_req_map_free_tbl
*req
;
127 skb
= nfp_bpf_cmsg_alloc(bpf
, sizeof(*req
));
129 cmsg_warn(bpf
, "leaking map - failed to allocate msg\n");
133 req
= (void *)skb
->data
;
134 req
->tid
= cpu_to_be32(nfp_map
->tid
);
136 skb
= nfp_ccm_communicate(&bpf
->ccm
, skb
, NFP_CCM_TYPE_BPF_MAP_FREE
,
139 cmsg_warn(bpf
, "leaking map - I/O error\n");
143 reply
= (void *)skb
->data
;
144 err
= nfp_bpf_ctrl_rc_to_errno(bpf
, &reply
->reply_hdr
);
146 cmsg_warn(bpf
, "leaking map - FW responded with: %d\n", err
);
148 dev_consume_skb_any(skb
);
152 nfp_bpf_ctrl_req_key(struct nfp_app_bpf
*bpf
, struct cmsg_req_map_op
*req
,
155 return &req
->data
[bpf
->cmsg_key_sz
* n
+ bpf
->cmsg_val_sz
* n
];
159 nfp_bpf_ctrl_req_val(struct nfp_app_bpf
*bpf
, struct cmsg_req_map_op
*req
,
162 return &req
->data
[bpf
->cmsg_key_sz
* (n
+ 1) + bpf
->cmsg_val_sz
* n
];
166 nfp_bpf_ctrl_reply_key(struct nfp_app_bpf
*bpf
, struct cmsg_reply_map_op
*reply
,
169 return &reply
->data
[bpf
->cmsg_key_sz
* n
+ bpf
->cmsg_val_sz
* n
];
173 nfp_bpf_ctrl_reply_val(struct nfp_app_bpf
*bpf
, struct cmsg_reply_map_op
*reply
,
176 return &reply
->data
[bpf
->cmsg_key_sz
* (n
+ 1) + bpf
->cmsg_val_sz
* n
];
179 static bool nfp_bpf_ctrl_op_cache_invalidate(enum nfp_ccm_type op
)
181 return op
== NFP_CCM_TYPE_BPF_MAP_UPDATE
||
182 op
== NFP_CCM_TYPE_BPF_MAP_DELETE
;
185 static bool nfp_bpf_ctrl_op_cache_capable(enum nfp_ccm_type op
)
187 return op
== NFP_CCM_TYPE_BPF_MAP_LOOKUP
||
188 op
== NFP_CCM_TYPE_BPF_MAP_GETNEXT
;
191 static bool nfp_bpf_ctrl_op_cache_fill(enum nfp_ccm_type op
)
193 return op
== NFP_CCM_TYPE_BPF_MAP_GETFIRST
||
194 op
== NFP_CCM_TYPE_BPF_MAP_GETNEXT
;
198 nfp_bpf_ctrl_op_cache_get(struct nfp_bpf_map
*nfp_map
, enum nfp_ccm_type op
,
199 const u8
*key
, u8
*out_key
, u8
*out_value
,
202 struct bpf_map
*map
= &nfp_map
->offmap
->map
;
203 struct nfp_app_bpf
*bpf
= nfp_map
->bpf
;
204 unsigned int i
, count
, n_entries
;
205 struct cmsg_reply_map_op
*reply
;
207 n_entries
= nfp_bpf_ctrl_op_cache_fill(op
) ? bpf
->cmsg_cache_cnt
: 1;
209 spin_lock(&nfp_map
->cache_lock
);
210 *cache_gen
= nfp_map
->cache_gen
;
211 if (nfp_map
->cache_blockers
)
214 if (nfp_bpf_ctrl_op_cache_invalidate(op
))
216 if (!nfp_bpf_ctrl_op_cache_capable(op
))
221 if (nfp_map
->cache_to
< ktime_get_ns())
222 goto exit_invalidate
;
224 reply
= (void *)nfp_map
->cache
->data
;
225 count
= be32_to_cpu(reply
->count
);
227 for (i
= 0; i
< count
; i
++) {
230 cached_key
= nfp_bpf_ctrl_reply_key(bpf
, reply
, i
);
231 if (memcmp(cached_key
, key
, map
->key_size
))
234 if (op
== NFP_CCM_TYPE_BPF_MAP_LOOKUP
)
235 memcpy(out_value
, nfp_bpf_ctrl_reply_val(bpf
, reply
, i
),
237 if (op
== NFP_CCM_TYPE_BPF_MAP_GETNEXT
) {
242 nfp_bpf_ctrl_reply_key(bpf
, reply
, i
+ 1),
252 nfp_map
->cache_blockers
++;
254 dev_consume_skb_any(nfp_map
->cache
);
255 nfp_map
->cache
= NULL
;
257 spin_unlock(&nfp_map
->cache_lock
);
262 nfp_bpf_ctrl_op_cache_put(struct nfp_bpf_map
*nfp_map
, enum nfp_ccm_type op
,
263 struct sk_buff
*skb
, u32 cache_gen
)
265 bool blocker
, filler
;
267 blocker
= nfp_bpf_ctrl_op_cache_invalidate(op
);
268 filler
= nfp_bpf_ctrl_op_cache_fill(op
);
269 if (blocker
|| filler
) {
273 to
= ktime_get_ns() + NFP_BPF_MAP_CACHE_TIME_NS
;
275 spin_lock(&nfp_map
->cache_lock
);
277 nfp_map
->cache_blockers
--;
278 nfp_map
->cache_gen
++;
280 if (filler
&& !nfp_map
->cache_blockers
&&
281 nfp_map
->cache_gen
== cache_gen
) {
282 nfp_map
->cache_to
= to
;
283 swap(nfp_map
->cache
, skb
);
285 spin_unlock(&nfp_map
->cache_lock
);
288 dev_consume_skb_any(skb
);
292 nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map
*offmap
, enum nfp_ccm_type op
,
293 u8
*key
, u8
*value
, u64 flags
, u8
*out_key
, u8
*out_value
)
295 struct nfp_bpf_map
*nfp_map
= offmap
->dev_priv
;
296 unsigned int n_entries
, reply_entries
, count
;
297 struct nfp_app_bpf
*bpf
= nfp_map
->bpf
;
298 struct bpf_map
*map
= &offmap
->map
;
299 struct cmsg_reply_map_op
*reply
;
300 struct cmsg_req_map_op
*req
;
305 /* FW messages have no space for more than 32 bits of flags */
309 /* Handle op cache */
310 n_entries
= nfp_bpf_ctrl_op_cache_get(nfp_map
, op
, key
, out_key
,
311 out_value
, &cache_gen
);
315 skb
= nfp_bpf_cmsg_map_req_alloc(bpf
, 1);
321 req
= (void *)skb
->data
;
322 req
->tid
= cpu_to_be32(nfp_map
->tid
);
323 req
->count
= cpu_to_be32(n_entries
);
324 req
->flags
= cpu_to_be32(flags
);
328 memcpy(nfp_bpf_ctrl_req_key(bpf
, req
, 0), key
, map
->key_size
);
330 memcpy(nfp_bpf_ctrl_req_val(bpf
, req
, 0), value
,
333 skb
= nfp_ccm_communicate(&bpf
->ccm
, skb
, op
, 0);
339 if (skb
->len
< sizeof(*reply
)) {
340 cmsg_warn(bpf
, "cmsg drop - type 0x%02x too short %d!\n",
346 reply
= (void *)skb
->data
;
347 count
= be32_to_cpu(reply
->count
);
348 err
= nfp_bpf_ctrl_rc_to_errno(bpf
, &reply
->reply_hdr
);
349 /* FW responds with message sized to hold the good entries,
350 * plus one extra entry if there was an error.
352 reply_entries
= count
+ !!err
;
353 if (n_entries
> 1 && count
)
358 if (skb
->len
!= nfp_bpf_cmsg_map_reply_size(bpf
, reply_entries
)) {
359 cmsg_warn(bpf
, "cmsg drop - type 0x%02x too short %d for %d entries!\n",
360 op
, skb
->len
, reply_entries
);
367 memcpy(out_key
, nfp_bpf_ctrl_reply_key(bpf
, reply
, 0),
370 memcpy(out_value
, nfp_bpf_ctrl_reply_val(bpf
, reply
, 0),
373 nfp_bpf_ctrl_op_cache_put(nfp_map
, op
, skb
, cache_gen
);
377 dev_kfree_skb_any(skb
);
379 nfp_bpf_ctrl_op_cache_put(nfp_map
, op
, NULL
, cache_gen
);
383 int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map
*offmap
,
384 void *key
, void *value
, u64 flags
)
386 return nfp_bpf_ctrl_entry_op(offmap
, NFP_CCM_TYPE_BPF_MAP_UPDATE
,
387 key
, value
, flags
, NULL
, NULL
);
390 int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map
*offmap
, void *key
)
392 return nfp_bpf_ctrl_entry_op(offmap
, NFP_CCM_TYPE_BPF_MAP_DELETE
,
393 key
, NULL
, 0, NULL
, NULL
);
396 int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map
*offmap
,
397 void *key
, void *value
)
399 return nfp_bpf_ctrl_entry_op(offmap
, NFP_CCM_TYPE_BPF_MAP_LOOKUP
,
400 key
, NULL
, 0, NULL
, value
);
403 int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map
*offmap
,
406 return nfp_bpf_ctrl_entry_op(offmap
, NFP_CCM_TYPE_BPF_MAP_GETFIRST
,
407 NULL
, NULL
, 0, next_key
, NULL
);
410 int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map
*offmap
,
411 void *key
, void *next_key
)
413 return nfp_bpf_ctrl_entry_op(offmap
, NFP_CCM_TYPE_BPF_MAP_GETNEXT
,
414 key
, NULL
, 0, next_key
, NULL
);
417 unsigned int nfp_bpf_ctrl_cmsg_min_mtu(struct nfp_app_bpf
*bpf
)
419 return max(nfp_bpf_cmsg_map_req_size(bpf
, 1),
420 nfp_bpf_cmsg_map_reply_size(bpf
, 1));
423 unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf
*bpf
)
425 return max3(NFP_NET_DEFAULT_MTU
,
426 nfp_bpf_cmsg_map_req_size(bpf
, NFP_BPF_MAP_CACHE_CNT
),
427 nfp_bpf_cmsg_map_reply_size(bpf
, NFP_BPF_MAP_CACHE_CNT
));
430 unsigned int nfp_bpf_ctrl_cmsg_cache_cnt(struct nfp_app_bpf
*bpf
)
432 unsigned int mtu
, req_max
, reply_max
, entry_sz
;
434 mtu
= bpf
->app
->ctrl
->dp
.mtu
;
435 entry_sz
= bpf
->cmsg_key_sz
+ bpf
->cmsg_val_sz
;
436 req_max
= (mtu
- sizeof(struct cmsg_req_map_op
)) / entry_sz
;
437 reply_max
= (mtu
- sizeof(struct cmsg_reply_map_op
)) / entry_sz
;
439 return min3(req_max
, reply_max
, NFP_BPF_MAP_CACHE_CNT
);
442 void nfp_bpf_ctrl_msg_rx(struct nfp_app
*app
, struct sk_buff
*skb
)
444 struct nfp_app_bpf
*bpf
= app
->priv
;
446 if (unlikely(skb
->len
< sizeof(struct cmsg_reply_map_simple
))) {
447 cmsg_warn(bpf
, "cmsg drop - too short %d!\n", skb
->len
);
448 dev_kfree_skb_any(skb
);
452 if (nfp_ccm_get_type(skb
) == NFP_CCM_TYPE_BPF_BPF_EVENT
) {
453 if (!nfp_bpf_event_output(bpf
, skb
->data
, skb
->len
))
454 dev_consume_skb_any(skb
);
456 dev_kfree_skb_any(skb
);
459 nfp_ccm_rx(&bpf
->ccm
, skb
);
463 nfp_bpf_ctrl_msg_rx_raw(struct nfp_app
*app
, const void *data
, unsigned int len
)
465 const struct nfp_ccm_hdr
*hdr
= data
;
466 struct nfp_app_bpf
*bpf
= app
->priv
;
468 if (unlikely(len
< sizeof(struct cmsg_reply_map_simple
))) {
469 cmsg_warn(bpf
, "cmsg drop - too short %d!\n", len
);
473 if (hdr
->type
== NFP_CCM_TYPE_BPF_BPF_EVENT
)
474 nfp_bpf_event_output(bpf
, data
, len
);
476 cmsg_warn(bpf
, "cmsg drop - msg type %d with raw buffer!\n",