1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
4 #include <linux/hash.h>
5 #include <linux/hashtable.h>
6 #include <linux/jhash.h>
7 #include <linux/math64.h>
8 #include <linux/vmalloc.h>
9 #include <net/pkt_cls.h>
13 #include "../nfp_app.h"
15 struct nfp_mask_id_table
{
16 struct hlist_node link
;
22 struct nfp_fl_flow_table_cmp_arg
{
23 struct net_device
*netdev
;
27 struct nfp_fl_stats_ctx_to_flow
{
28 struct rhash_head ht_node
;
30 struct nfp_fl_payload
*flow
;
33 static const struct rhashtable_params stats_ctx_table_params
= {
34 .key_offset
= offsetof(struct nfp_fl_stats_ctx_to_flow
, stats_cxt
),
35 .head_offset
= offsetof(struct nfp_fl_stats_ctx_to_flow
, ht_node
),
36 .key_len
= sizeof(u32
),
39 static int nfp_release_stats_entry(struct nfp_app
*app
, u32 stats_context_id
)
41 struct nfp_flower_priv
*priv
= app
->priv
;
42 struct circ_buf
*ring
;
44 ring
= &priv
->stats_ids
.free_list
;
45 /* Check if buffer is full. */
46 if (!CIRC_SPACE(ring
->head
, ring
->tail
,
47 priv
->stats_ring_size
* NFP_FL_STATS_ELEM_RS
-
48 NFP_FL_STATS_ELEM_RS
+ 1))
51 memcpy(&ring
->buf
[ring
->head
], &stats_context_id
, NFP_FL_STATS_ELEM_RS
);
52 ring
->head
= (ring
->head
+ NFP_FL_STATS_ELEM_RS
) %
53 (priv
->stats_ring_size
* NFP_FL_STATS_ELEM_RS
);
58 static int nfp_get_stats_entry(struct nfp_app
*app
, u32
*stats_context_id
)
60 struct nfp_flower_priv
*priv
= app
->priv
;
61 u32 freed_stats_id
, temp_stats_id
;
62 struct circ_buf
*ring
;
64 ring
= &priv
->stats_ids
.free_list
;
65 freed_stats_id
= priv
->stats_ring_size
;
66 /* Check for unallocated entries first. */
67 if (priv
->stats_ids
.init_unalloc
> 0) {
69 FIELD_PREP(NFP_FL_STAT_ID_STAT
,
70 priv
->stats_ids
.init_unalloc
- 1) |
71 FIELD_PREP(NFP_FL_STAT_ID_MU_NUM
,
72 priv
->active_mem_unit
);
74 if (++priv
->active_mem_unit
== priv
->total_mem_units
) {
75 priv
->stats_ids
.init_unalloc
--;
76 priv
->active_mem_unit
= 0;
82 /* Check if buffer is empty. */
83 if (ring
->head
== ring
->tail
) {
84 *stats_context_id
= freed_stats_id
;
88 memcpy(&temp_stats_id
, &ring
->buf
[ring
->tail
], NFP_FL_STATS_ELEM_RS
);
89 *stats_context_id
= temp_stats_id
;
90 memcpy(&ring
->buf
[ring
->tail
], &freed_stats_id
, NFP_FL_STATS_ELEM_RS
);
91 ring
->tail
= (ring
->tail
+ NFP_FL_STATS_ELEM_RS
) %
92 (priv
->stats_ring_size
* NFP_FL_STATS_ELEM_RS
);
97 /* Must be called with either RTNL or rcu_read_lock */
98 struct nfp_fl_payload
*
99 nfp_flower_search_fl_table(struct nfp_app
*app
, unsigned long tc_flower_cookie
,
100 struct net_device
*netdev
)
102 struct nfp_fl_flow_table_cmp_arg flower_cmp_arg
;
103 struct nfp_flower_priv
*priv
= app
->priv
;
105 flower_cmp_arg
.netdev
= netdev
;
106 flower_cmp_arg
.cookie
= tc_flower_cookie
;
108 return rhashtable_lookup_fast(&priv
->flow_table
, &flower_cmp_arg
,
109 nfp_flower_table_params
);
112 void nfp_flower_rx_flow_stats(struct nfp_app
*app
, struct sk_buff
*skb
)
114 unsigned int msg_len
= nfp_flower_cmsg_get_data_len(skb
);
115 struct nfp_flower_priv
*priv
= app
->priv
;
116 struct nfp_fl_stats_frame
*stats
;
121 msg
= nfp_flower_cmsg_get_data(skb
);
123 spin_lock(&priv
->stats_lock
);
124 for (i
= 0; i
< msg_len
/ sizeof(*stats
); i
++) {
125 stats
= (struct nfp_fl_stats_frame
*)msg
+ i
;
126 ctx_id
= be32_to_cpu(stats
->stats_con_id
);
127 priv
->stats
[ctx_id
].pkts
+= be32_to_cpu(stats
->pkt_count
);
128 priv
->stats
[ctx_id
].bytes
+= be64_to_cpu(stats
->byte_count
);
129 priv
->stats
[ctx_id
].used
= jiffies
;
131 spin_unlock(&priv
->stats_lock
);
134 static int nfp_release_mask_id(struct nfp_app
*app
, u8 mask_id
)
136 struct nfp_flower_priv
*priv
= app
->priv
;
137 struct circ_buf
*ring
;
139 ring
= &priv
->mask_ids
.mask_id_free_list
;
140 /* Checking if buffer is full. */
141 if (CIRC_SPACE(ring
->head
, ring
->tail
, NFP_FLOWER_MASK_ENTRY_RS
) == 0)
144 memcpy(&ring
->buf
[ring
->head
], &mask_id
, NFP_FLOWER_MASK_ELEMENT_RS
);
145 ring
->head
= (ring
->head
+ NFP_FLOWER_MASK_ELEMENT_RS
) %
146 (NFP_FLOWER_MASK_ENTRY_RS
* NFP_FLOWER_MASK_ELEMENT_RS
);
148 priv
->mask_ids
.last_used
[mask_id
] = ktime_get();
153 static int nfp_mask_alloc(struct nfp_app
*app
, u8
*mask_id
)
155 struct nfp_flower_priv
*priv
= app
->priv
;
156 ktime_t reuse_timeout
;
157 struct circ_buf
*ring
;
158 u8 temp_id
, freed_id
;
160 ring
= &priv
->mask_ids
.mask_id_free_list
;
161 freed_id
= NFP_FLOWER_MASK_ENTRY_RS
- 1;
162 /* Checking for unallocated entries first. */
163 if (priv
->mask_ids
.init_unallocated
> 0) {
164 *mask_id
= priv
->mask_ids
.init_unallocated
;
165 priv
->mask_ids
.init_unallocated
--;
169 /* Checking if buffer is empty. */
170 if (ring
->head
== ring
->tail
)
173 memcpy(&temp_id
, &ring
->buf
[ring
->tail
], NFP_FLOWER_MASK_ELEMENT_RS
);
176 reuse_timeout
= ktime_add_ns(priv
->mask_ids
.last_used
[*mask_id
],
177 NFP_FL_MASK_REUSE_TIME_NS
);
179 if (ktime_before(ktime_get(), reuse_timeout
))
182 memcpy(&ring
->buf
[ring
->tail
], &freed_id
, NFP_FLOWER_MASK_ELEMENT_RS
);
183 ring
->tail
= (ring
->tail
+ NFP_FLOWER_MASK_ELEMENT_RS
) %
184 (NFP_FLOWER_MASK_ENTRY_RS
* NFP_FLOWER_MASK_ELEMENT_RS
);
194 nfp_add_mask_table(struct nfp_app
*app
, char *mask_data
, u32 mask_len
)
196 struct nfp_flower_priv
*priv
= app
->priv
;
197 struct nfp_mask_id_table
*mask_entry
;
198 unsigned long hash_key
;
201 if (nfp_mask_alloc(app
, &mask_id
))
204 mask_entry
= kmalloc(sizeof(*mask_entry
), GFP_KERNEL
);
206 nfp_release_mask_id(app
, mask_id
);
210 INIT_HLIST_NODE(&mask_entry
->link
);
211 mask_entry
->mask_id
= mask_id
;
212 hash_key
= jhash(mask_data
, mask_len
, priv
->mask_id_seed
);
213 mask_entry
->hash_key
= hash_key
;
214 mask_entry
->ref_cnt
= 1;
215 hash_add(priv
->mask_table
, &mask_entry
->link
, hash_key
);
220 static struct nfp_mask_id_table
*
221 nfp_search_mask_table(struct nfp_app
*app
, char *mask_data
, u32 mask_len
)
223 struct nfp_flower_priv
*priv
= app
->priv
;
224 struct nfp_mask_id_table
*mask_entry
;
225 unsigned long hash_key
;
227 hash_key
= jhash(mask_data
, mask_len
, priv
->mask_id_seed
);
229 hash_for_each_possible(priv
->mask_table
, mask_entry
, link
, hash_key
)
230 if (mask_entry
->hash_key
== hash_key
)
237 nfp_find_in_mask_table(struct nfp_app
*app
, char *mask_data
, u32 mask_len
)
239 struct nfp_mask_id_table
*mask_entry
;
241 mask_entry
= nfp_search_mask_table(app
, mask_data
, mask_len
);
245 mask_entry
->ref_cnt
++;
247 /* Casting u8 to int for later use. */
248 return mask_entry
->mask_id
;
252 nfp_check_mask_add(struct nfp_app
*app
, char *mask_data
, u32 mask_len
,
253 u8
*meta_flags
, u8
*mask_id
)
257 id
= nfp_find_in_mask_table(app
, mask_data
, mask_len
);
259 id
= nfp_add_mask_table(app
, mask_data
, mask_len
);
262 *meta_flags
|= NFP_FL_META_FLAG_MANAGE_MASK
;
270 nfp_check_mask_remove(struct nfp_app
*app
, char *mask_data
, u32 mask_len
,
271 u8
*meta_flags
, u8
*mask_id
)
273 struct nfp_mask_id_table
*mask_entry
;
275 mask_entry
= nfp_search_mask_table(app
, mask_data
, mask_len
);
279 *mask_id
= mask_entry
->mask_id
;
280 mask_entry
->ref_cnt
--;
281 if (!mask_entry
->ref_cnt
) {
282 hash_del(&mask_entry
->link
);
283 nfp_release_mask_id(app
, *mask_id
);
286 *meta_flags
|= NFP_FL_META_FLAG_MANAGE_MASK
;
292 int nfp_compile_flow_metadata(struct nfp_app
*app
,
293 struct flow_cls_offload
*flow
,
294 struct nfp_fl_payload
*nfp_flow
,
295 struct net_device
*netdev
,
296 struct netlink_ext_ack
*extack
)
298 struct nfp_fl_stats_ctx_to_flow
*ctx_entry
;
299 struct nfp_flower_priv
*priv
= app
->priv
;
300 struct nfp_fl_payload
*check_entry
;
305 err
= nfp_get_stats_entry(app
, &stats_cxt
);
307 NL_SET_ERR_MSG_MOD(extack
, "invalid entry: cannot allocate new stats context");
311 nfp_flow
->meta
.host_ctx_id
= cpu_to_be32(stats_cxt
);
312 nfp_flow
->meta
.host_cookie
= cpu_to_be64(flow
->cookie
);
313 nfp_flow
->ingress_dev
= netdev
;
315 ctx_entry
= kzalloc(sizeof(*ctx_entry
), GFP_KERNEL
);
318 goto err_release_stats
;
321 ctx_entry
->stats_cxt
= stats_cxt
;
322 ctx_entry
->flow
= nfp_flow
;
324 if (rhashtable_insert_fast(&priv
->stats_ctx_table
, &ctx_entry
->ht_node
,
325 stats_ctx_table_params
)) {
327 goto err_free_ctx_entry
;
331 if (!nfp_check_mask_add(app
, nfp_flow
->mask_data
,
332 nfp_flow
->meta
.mask_len
,
333 &nfp_flow
->meta
.flags
, &new_mask_id
)) {
334 NL_SET_ERR_MSG_MOD(extack
, "invalid entry: cannot allocate a new mask id");
335 if (nfp_release_stats_entry(app
, stats_cxt
)) {
336 NL_SET_ERR_MSG_MOD(extack
, "invalid entry: cannot release stats context");
338 goto err_remove_rhash
;
341 goto err_remove_rhash
;
344 nfp_flow
->meta
.flow_version
= cpu_to_be64(priv
->flower_version
);
345 priv
->flower_version
++;
347 /* Update flow payload with mask ids. */
348 nfp_flow
->unmasked_data
[NFP_FL_MASK_ID_LOCATION
] = new_mask_id
;
349 priv
->stats
[stats_cxt
].pkts
= 0;
350 priv
->stats
[stats_cxt
].bytes
= 0;
351 priv
->stats
[stats_cxt
].used
= jiffies
;
353 check_entry
= nfp_flower_search_fl_table(app
, flow
->cookie
, netdev
);
355 NL_SET_ERR_MSG_MOD(extack
, "invalid entry: cannot offload duplicate flow entry");
356 if (nfp_release_stats_entry(app
, stats_cxt
)) {
357 NL_SET_ERR_MSG_MOD(extack
, "invalid entry: cannot release stats context");
359 goto err_remove_mask
;
362 if (!nfp_check_mask_remove(app
, nfp_flow
->mask_data
,
363 nfp_flow
->meta
.mask_len
,
364 NULL
, &new_mask_id
)) {
365 NL_SET_ERR_MSG_MOD(extack
, "invalid entry: cannot release mask id");
367 goto err_remove_mask
;
371 goto err_remove_mask
;
377 nfp_check_mask_remove(app
, nfp_flow
->mask_data
, nfp_flow
->meta
.mask_len
,
380 WARN_ON_ONCE(rhashtable_remove_fast(&priv
->stats_ctx_table
,
382 stats_ctx_table_params
));
386 nfp_release_stats_entry(app
, stats_cxt
);
391 void __nfp_modify_flow_metadata(struct nfp_flower_priv
*priv
,
392 struct nfp_fl_payload
*nfp_flow
)
394 nfp_flow
->meta
.flags
&= ~NFP_FL_META_FLAG_MANAGE_MASK
;
395 nfp_flow
->meta
.flow_version
= cpu_to_be64(priv
->flower_version
);
396 priv
->flower_version
++;
399 int nfp_modify_flow_metadata(struct nfp_app
*app
,
400 struct nfp_fl_payload
*nfp_flow
)
402 struct nfp_fl_stats_ctx_to_flow
*ctx_entry
;
403 struct nfp_flower_priv
*priv
= app
->priv
;
407 __nfp_modify_flow_metadata(priv
, nfp_flow
);
409 nfp_check_mask_remove(app
, nfp_flow
->mask_data
,
410 nfp_flow
->meta
.mask_len
, &nfp_flow
->meta
.flags
,
413 /* Update flow payload with mask ids. */
414 nfp_flow
->unmasked_data
[NFP_FL_MASK_ID_LOCATION
] = new_mask_id
;
416 /* Release the stats ctx id and ctx to flow table entry. */
417 temp_ctx_id
= be32_to_cpu(nfp_flow
->meta
.host_ctx_id
);
419 ctx_entry
= rhashtable_lookup_fast(&priv
->stats_ctx_table
, &temp_ctx_id
,
420 stats_ctx_table_params
);
424 WARN_ON_ONCE(rhashtable_remove_fast(&priv
->stats_ctx_table
,
426 stats_ctx_table_params
));
429 return nfp_release_stats_entry(app
, temp_ctx_id
);
432 struct nfp_fl_payload
*
433 nfp_flower_get_fl_payload_from_ctx(struct nfp_app
*app
, u32 ctx_id
)
435 struct nfp_fl_stats_ctx_to_flow
*ctx_entry
;
436 struct nfp_flower_priv
*priv
= app
->priv
;
438 ctx_entry
= rhashtable_lookup_fast(&priv
->stats_ctx_table
, &ctx_id
,
439 stats_ctx_table_params
);
443 return ctx_entry
->flow
;
446 static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg
*arg
,
449 const struct nfp_fl_flow_table_cmp_arg
*cmp_arg
= arg
->key
;
450 const struct nfp_fl_payload
*flow_entry
= obj
;
452 if (flow_entry
->ingress_dev
== cmp_arg
->netdev
)
453 return flow_entry
->tc_flower_cookie
!= cmp_arg
->cookie
;
458 static u32
nfp_fl_obj_hashfn(const void *data
, u32 len
, u32 seed
)
460 const struct nfp_fl_payload
*flower_entry
= data
;
462 return jhash2((u32
*)&flower_entry
->tc_flower_cookie
,
463 sizeof(flower_entry
->tc_flower_cookie
) / sizeof(u32
),
467 static u32
nfp_fl_key_hashfn(const void *data
, u32 len
, u32 seed
)
469 const struct nfp_fl_flow_table_cmp_arg
*cmp_arg
= data
;
471 return jhash2((u32
*)&cmp_arg
->cookie
,
472 sizeof(cmp_arg
->cookie
) / sizeof(u32
), seed
);
475 const struct rhashtable_params nfp_flower_table_params
= {
476 .head_offset
= offsetof(struct nfp_fl_payload
, fl_node
),
477 .hashfn
= nfp_fl_key_hashfn
,
478 .obj_cmpfn
= nfp_fl_obj_cmpfn
,
479 .obj_hashfn
= nfp_fl_obj_hashfn
,
480 .automatic_shrinking
= true,
483 int nfp_flower_metadata_init(struct nfp_app
*app
, u64 host_ctx_count
,
484 unsigned int host_num_mems
)
486 struct nfp_flower_priv
*priv
= app
->priv
;
489 hash_init(priv
->mask_table
);
491 err
= rhashtable_init(&priv
->flow_table
, &nfp_flower_table_params
);
495 err
= rhashtable_init(&priv
->stats_ctx_table
, &stats_ctx_table_params
);
497 goto err_free_flow_table
;
499 get_random_bytes(&priv
->mask_id_seed
, sizeof(priv
->mask_id_seed
));
501 /* Init ring buffer and unallocated mask_ids. */
502 priv
->mask_ids
.mask_id_free_list
.buf
=
503 kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS
,
504 NFP_FLOWER_MASK_ELEMENT_RS
, GFP_KERNEL
);
505 if (!priv
->mask_ids
.mask_id_free_list
.buf
)
506 goto err_free_stats_ctx_table
;
508 priv
->mask_ids
.init_unallocated
= NFP_FLOWER_MASK_ENTRY_RS
- 1;
510 /* Init timestamps for mask id*/
511 priv
->mask_ids
.last_used
=
512 kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS
,
513 sizeof(*priv
->mask_ids
.last_used
), GFP_KERNEL
);
514 if (!priv
->mask_ids
.last_used
)
515 goto err_free_mask_id
;
517 /* Init ring buffer and unallocated stats_ids. */
518 priv
->stats_ids
.free_list
.buf
=
519 vmalloc(array_size(NFP_FL_STATS_ELEM_RS
,
520 priv
->stats_ring_size
));
521 if (!priv
->stats_ids
.free_list
.buf
)
522 goto err_free_last_used
;
524 priv
->stats_ids
.init_unalloc
= div_u64(host_ctx_count
, host_num_mems
);
526 stats_size
= FIELD_PREP(NFP_FL_STAT_ID_STAT
, host_ctx_count
) |
527 FIELD_PREP(NFP_FL_STAT_ID_MU_NUM
, host_num_mems
- 1);
528 priv
->stats
= kvmalloc_array(stats_size
, sizeof(struct nfp_fl_stats
),
531 goto err_free_ring_buf
;
533 spin_lock_init(&priv
->stats_lock
);
538 vfree(priv
->stats_ids
.free_list
.buf
);
540 kfree(priv
->mask_ids
.last_used
);
542 kfree(priv
->mask_ids
.mask_id_free_list
.buf
);
543 err_free_stats_ctx_table
:
544 rhashtable_destroy(&priv
->stats_ctx_table
);
546 rhashtable_destroy(&priv
->flow_table
);
550 void nfp_flower_metadata_cleanup(struct nfp_app
*app
)
552 struct nfp_flower_priv
*priv
= app
->priv
;
557 rhashtable_free_and_destroy(&priv
->flow_table
,
558 nfp_check_rhashtable_empty
, NULL
);
559 rhashtable_free_and_destroy(&priv
->stats_ctx_table
,
560 nfp_check_rhashtable_empty
, NULL
);
562 kfree(priv
->mask_ids
.mask_id_free_list
.buf
);
563 kfree(priv
->mask_ids
.last_used
);
564 vfree(priv
->stats_ids
.free_list
.buf
);