1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/list.h>
6 #include <linux/netdevice.h>
8 #include "spectrum_mr_tcam.h"
11 #include "core_acl_flex_actions.h"
12 #include "spectrum_mr.h"
14 struct mlxsw_sp_mr_tcam
{
18 /* This struct maps to one RIGR2 register entry */
19 struct mlxsw_sp_mr_erif_sublist
{
20 struct list_head list
;
23 u16 erif_indices
[MLXSW_REG_RIGR2_MAX_ERIFS
];
27 struct mlxsw_sp_mr_tcam_erif_list
{
28 struct list_head erif_sublists
;
33 mlxsw_sp_mr_erif_sublist_full(struct mlxsw_sp
*mlxsw_sp
,
34 struct mlxsw_sp_mr_erif_sublist
*erif_sublist
)
36 int erif_list_entries
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
,
37 MC_ERIF_LIST_ENTRIES
);
39 return erif_sublist
->num_erifs
== erif_list_entries
;
43 mlxsw_sp_mr_erif_list_init(struct mlxsw_sp_mr_tcam_erif_list
*erif_list
)
45 INIT_LIST_HEAD(&erif_list
->erif_sublists
);
48 static struct mlxsw_sp_mr_erif_sublist
*
49 mlxsw_sp_mr_erif_sublist_create(struct mlxsw_sp
*mlxsw_sp
,
50 struct mlxsw_sp_mr_tcam_erif_list
*erif_list
)
52 struct mlxsw_sp_mr_erif_sublist
*erif_sublist
;
55 erif_sublist
= kzalloc(sizeof(*erif_sublist
), GFP_KERNEL
);
57 return ERR_PTR(-ENOMEM
);
58 err
= mlxsw_sp_kvdl_alloc(mlxsw_sp
, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR
,
59 1, &erif_sublist
->rigr2_kvdl_index
);
65 list_add_tail(&erif_sublist
->list
, &erif_list
->erif_sublists
);
70 mlxsw_sp_mr_erif_sublist_destroy(struct mlxsw_sp
*mlxsw_sp
,
71 struct mlxsw_sp_mr_erif_sublist
*erif_sublist
)
73 list_del(&erif_sublist
->list
);
74 mlxsw_sp_kvdl_free(mlxsw_sp
, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR
,
75 1, erif_sublist
->rigr2_kvdl_index
);
80 mlxsw_sp_mr_erif_list_add(struct mlxsw_sp
*mlxsw_sp
,
81 struct mlxsw_sp_mr_tcam_erif_list
*erif_list
,
84 struct mlxsw_sp_mr_erif_sublist
*sublist
;
86 /* If either there is no erif_entry or the last one is full, allocate a
89 if (list_empty(&erif_list
->erif_sublists
)) {
90 sublist
= mlxsw_sp_mr_erif_sublist_create(mlxsw_sp
, erif_list
);
92 return PTR_ERR(sublist
);
93 erif_list
->kvdl_index
= sublist
->rigr2_kvdl_index
;
95 sublist
= list_last_entry(&erif_list
->erif_sublists
,
96 struct mlxsw_sp_mr_erif_sublist
,
98 sublist
->synced
= false;
99 if (mlxsw_sp_mr_erif_sublist_full(mlxsw_sp
, sublist
)) {
100 sublist
= mlxsw_sp_mr_erif_sublist_create(mlxsw_sp
,
103 return PTR_ERR(sublist
);
107 /* Add the eRIF to the last entry's last index */
108 sublist
->erif_indices
[sublist
->num_erifs
++] = erif_index
;
113 mlxsw_sp_mr_erif_list_flush(struct mlxsw_sp
*mlxsw_sp
,
114 struct mlxsw_sp_mr_tcam_erif_list
*erif_list
)
116 struct mlxsw_sp_mr_erif_sublist
*erif_sublist
, *tmp
;
118 list_for_each_entry_safe(erif_sublist
, tmp
, &erif_list
->erif_sublists
,
120 mlxsw_sp_mr_erif_sublist_destroy(mlxsw_sp
, erif_sublist
);
124 mlxsw_sp_mr_erif_list_commit(struct mlxsw_sp
*mlxsw_sp
,
125 struct mlxsw_sp_mr_tcam_erif_list
*erif_list
)
127 struct mlxsw_sp_mr_erif_sublist
*curr_sublist
;
128 char rigr2_pl
[MLXSW_REG_RIGR2_LEN
];
132 list_for_each_entry(curr_sublist
, &erif_list
->erif_sublists
, list
) {
133 if (curr_sublist
->synced
)
136 /* If the sublist is not the last one, pack the next index */
137 if (list_is_last(&curr_sublist
->list
,
138 &erif_list
->erif_sublists
)) {
139 mlxsw_reg_rigr2_pack(rigr2_pl
,
140 curr_sublist
->rigr2_kvdl_index
,
143 struct mlxsw_sp_mr_erif_sublist
*next_sublist
;
145 next_sublist
= list_next_entry(curr_sublist
, list
);
146 mlxsw_reg_rigr2_pack(rigr2_pl
,
147 curr_sublist
->rigr2_kvdl_index
,
149 next_sublist
->rigr2_kvdl_index
);
152 /* Pack all the erifs */
153 for (i
= 0; i
< curr_sublist
->num_erifs
; i
++) {
154 u16 erif_index
= curr_sublist
->erif_indices
[i
];
156 mlxsw_reg_rigr2_erif_entry_pack(rigr2_pl
, i
, true,
160 /* Write the entry */
161 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(rigr2
),
164 /* No need of a rollback here because this
165 * hardware entry should not be pointed yet.
168 curr_sublist
->synced
= true;
173 static void mlxsw_sp_mr_erif_list_move(struct mlxsw_sp_mr_tcam_erif_list
*to
,
174 struct mlxsw_sp_mr_tcam_erif_list
*from
)
176 list_splice(&from
->erif_sublists
, &to
->erif_sublists
);
177 to
->kvdl_index
= from
->kvdl_index
;
180 struct mlxsw_sp_mr_tcam_route
{
181 struct mlxsw_sp_mr_tcam_erif_list erif_list
;
182 struct mlxsw_afa_block
*afa_block
;
184 enum mlxsw_sp_mr_route_action action
;
185 struct mlxsw_sp_mr_route_key key
;
191 static struct mlxsw_afa_block
*
192 mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp
*mlxsw_sp
,
193 enum mlxsw_sp_mr_route_action route_action
,
194 u16 irif_index
, u32 counter_index
,
196 struct mlxsw_sp_mr_tcam_erif_list
*erif_list
)
198 struct mlxsw_afa_block
*afa_block
;
201 afa_block
= mlxsw_afa_block_create(mlxsw_sp
->afa
);
203 return ERR_PTR(-ENOMEM
);
205 err
= mlxsw_afa_block_append_allocated_counter(afa_block
,
210 switch (route_action
) {
211 case MLXSW_SP_MR_ROUTE_ACTION_TRAP
:
212 err
= mlxsw_afa_block_append_trap(afa_block
,
217 case MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD
:
218 case MLXSW_SP_MR_ROUTE_ACTION_FORWARD
:
219 /* If we are about to append a multicast router action, commit
222 err
= mlxsw_sp_mr_erif_list_commit(mlxsw_sp
, erif_list
);
226 err
= mlxsw_afa_block_append_mcrouter(afa_block
, irif_index
,
228 erif_list
->kvdl_index
);
232 if (route_action
== MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD
) {
233 err
= mlxsw_afa_block_append_trap_and_forward(afa_block
,
244 err
= mlxsw_afa_block_commit(afa_block
);
249 mlxsw_afa_block_destroy(afa_block
);
254 mlxsw_sp_mr_tcam_afa_block_destroy(struct mlxsw_afa_block
*afa_block
)
256 mlxsw_afa_block_destroy(afa_block
);
260 mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp
*mlxsw_sp
,
261 struct mlxsw_sp_mr_tcam_erif_list
*erif_list
,
262 struct mlxsw_sp_mr_route_info
*route_info
)
267 for (i
= 0; i
< route_info
->erif_num
; i
++) {
268 u16 erif_index
= route_info
->erif_indices
[i
];
270 err
= mlxsw_sp_mr_erif_list_add(mlxsw_sp
, erif_list
,
279 mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp
*mlxsw_sp
, void *priv
,
281 struct mlxsw_sp_mr_route_params
*route_params
)
283 const struct mlxsw_sp_mr_tcam_ops
*ops
= mlxsw_sp
->mr_tcam_ops
;
284 struct mlxsw_sp_mr_tcam_route
*route
= route_priv
;
285 struct mlxsw_sp_mr_tcam
*mr_tcam
= priv
;
288 route
->key
= route_params
->key
;
289 route
->irif_index
= route_params
->value
.irif_index
;
290 route
->min_mtu
= route_params
->value
.min_mtu
;
291 route
->action
= route_params
->value
.route_action
;
293 /* Create the egress RIFs list */
294 mlxsw_sp_mr_erif_list_init(&route
->erif_list
);
295 err
= mlxsw_sp_mr_tcam_erif_populate(mlxsw_sp
, &route
->erif_list
,
296 &route_params
->value
);
298 goto err_erif_populate
;
300 /* Create the flow counter */
301 err
= mlxsw_sp_flow_counter_alloc(mlxsw_sp
, &route
->counter_index
);
303 goto err_counter_alloc
;
305 /* Create the flexible action block */
306 route
->afa_block
= mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp
,
309 route
->counter_index
,
312 if (IS_ERR(route
->afa_block
)) {
313 err
= PTR_ERR(route
->afa_block
);
314 goto err_afa_block_create
;
317 route
->priv
= kzalloc(ops
->route_priv_size
, GFP_KERNEL
);
320 goto err_route_priv_alloc
;
323 /* Write the route to the TCAM */
324 err
= ops
->route_create(mlxsw_sp
, mr_tcam
->priv
, route
->priv
,
325 &route
->key
, route
->afa_block
,
328 goto err_route_create
;
333 err_route_priv_alloc
:
334 mlxsw_sp_mr_tcam_afa_block_destroy(route
->afa_block
);
335 err_afa_block_create
:
336 mlxsw_sp_flow_counter_free(mlxsw_sp
, route
->counter_index
);
339 mlxsw_sp_mr_erif_list_flush(mlxsw_sp
, &route
->erif_list
);
343 static void mlxsw_sp_mr_tcam_route_destroy(struct mlxsw_sp
*mlxsw_sp
,
344 void *priv
, void *route_priv
)
346 const struct mlxsw_sp_mr_tcam_ops
*ops
= mlxsw_sp
->mr_tcam_ops
;
347 struct mlxsw_sp_mr_tcam_route
*route
= route_priv
;
348 struct mlxsw_sp_mr_tcam
*mr_tcam
= priv
;
350 ops
->route_destroy(mlxsw_sp
, mr_tcam
->priv
, route
->priv
, &route
->key
);
352 mlxsw_sp_mr_tcam_afa_block_destroy(route
->afa_block
);
353 mlxsw_sp_flow_counter_free(mlxsw_sp
, route
->counter_index
);
354 mlxsw_sp_mr_erif_list_flush(mlxsw_sp
, &route
->erif_list
);
357 static int mlxsw_sp_mr_tcam_route_stats(struct mlxsw_sp
*mlxsw_sp
,
358 void *route_priv
, u64
*packets
,
361 struct mlxsw_sp_mr_tcam_route
*route
= route_priv
;
363 return mlxsw_sp_flow_counter_get(mlxsw_sp
, route
->counter_index
,
368 mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp
*mlxsw_sp
,
370 enum mlxsw_sp_mr_route_action route_action
)
372 const struct mlxsw_sp_mr_tcam_ops
*ops
= mlxsw_sp
->mr_tcam_ops
;
373 struct mlxsw_sp_mr_tcam_route
*route
= route_priv
;
374 struct mlxsw_afa_block
*afa_block
;
377 /* Create a new flexible action block */
378 afa_block
= mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp
, route_action
,
380 route
->counter_index
,
383 if (IS_ERR(afa_block
))
384 return PTR_ERR(afa_block
);
386 /* Update the TCAM route entry */
387 err
= ops
->route_update(mlxsw_sp
, route
->priv
, &route
->key
, afa_block
);
391 /* Delete the old one */
392 mlxsw_sp_mr_tcam_afa_block_destroy(route
->afa_block
);
393 route
->afa_block
= afa_block
;
394 route
->action
= route_action
;
397 mlxsw_sp_mr_tcam_afa_block_destroy(afa_block
);
401 static int mlxsw_sp_mr_tcam_route_min_mtu_update(struct mlxsw_sp
*mlxsw_sp
,
402 void *route_priv
, u16 min_mtu
)
404 const struct mlxsw_sp_mr_tcam_ops
*ops
= mlxsw_sp
->mr_tcam_ops
;
405 struct mlxsw_sp_mr_tcam_route
*route
= route_priv
;
406 struct mlxsw_afa_block
*afa_block
;
409 /* Create a new flexible action block */
410 afa_block
= mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp
,
413 route
->counter_index
,
416 if (IS_ERR(afa_block
))
417 return PTR_ERR(afa_block
);
419 /* Update the TCAM route entry */
420 err
= ops
->route_update(mlxsw_sp
, route
->priv
, &route
->key
, afa_block
);
424 /* Delete the old one */
425 mlxsw_sp_mr_tcam_afa_block_destroy(route
->afa_block
);
426 route
->afa_block
= afa_block
;
427 route
->min_mtu
= min_mtu
;
430 mlxsw_sp_mr_tcam_afa_block_destroy(afa_block
);
434 static int mlxsw_sp_mr_tcam_route_irif_update(struct mlxsw_sp
*mlxsw_sp
,
435 void *route_priv
, u16 irif_index
)
437 struct mlxsw_sp_mr_tcam_route
*route
= route_priv
;
439 if (route
->action
!= MLXSW_SP_MR_ROUTE_ACTION_TRAP
)
441 route
->irif_index
= irif_index
;
445 static int mlxsw_sp_mr_tcam_route_erif_add(struct mlxsw_sp
*mlxsw_sp
,
446 void *route_priv
, u16 erif_index
)
448 struct mlxsw_sp_mr_tcam_route
*route
= route_priv
;
451 err
= mlxsw_sp_mr_erif_list_add(mlxsw_sp
, &route
->erif_list
,
456 /* Commit the action only if the route action is not TRAP */
457 if (route
->action
!= MLXSW_SP_MR_ROUTE_ACTION_TRAP
)
458 return mlxsw_sp_mr_erif_list_commit(mlxsw_sp
,
463 static int mlxsw_sp_mr_tcam_route_erif_del(struct mlxsw_sp
*mlxsw_sp
,
464 void *route_priv
, u16 erif_index
)
466 const struct mlxsw_sp_mr_tcam_ops
*ops
= mlxsw_sp
->mr_tcam_ops
;
467 struct mlxsw_sp_mr_tcam_route
*route
= route_priv
;
468 struct mlxsw_sp_mr_erif_sublist
*erif_sublist
;
469 struct mlxsw_sp_mr_tcam_erif_list erif_list
;
470 struct mlxsw_afa_block
*afa_block
;
474 /* Create a copy of the original erif_list without the deleted entry */
475 mlxsw_sp_mr_erif_list_init(&erif_list
);
476 list_for_each_entry(erif_sublist
, &route
->erif_list
.erif_sublists
, list
) {
477 for (i
= 0; i
< erif_sublist
->num_erifs
; i
++) {
478 u16 curr_erif
= erif_sublist
->erif_indices
[i
];
480 if (curr_erif
== erif_index
)
482 err
= mlxsw_sp_mr_erif_list_add(mlxsw_sp
, &erif_list
,
485 goto err_erif_list_add
;
489 /* Create the flexible action block pointing to the new erif_list */
490 afa_block
= mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp
, route
->action
,
492 route
->counter_index
,
495 if (IS_ERR(afa_block
)) {
496 err
= PTR_ERR(afa_block
);
497 goto err_afa_block_create
;
500 /* Update the TCAM route entry */
501 err
= ops
->route_update(mlxsw_sp
, route
->priv
, &route
->key
, afa_block
);
503 goto err_route_write
;
505 mlxsw_sp_mr_tcam_afa_block_destroy(route
->afa_block
);
506 mlxsw_sp_mr_erif_list_flush(mlxsw_sp
, &route
->erif_list
);
507 route
->afa_block
= afa_block
;
508 mlxsw_sp_mr_erif_list_move(&route
->erif_list
, &erif_list
);
512 mlxsw_sp_mr_tcam_afa_block_destroy(afa_block
);
513 err_afa_block_create
:
515 mlxsw_sp_mr_erif_list_flush(mlxsw_sp
, &erif_list
);
520 mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp
*mlxsw_sp
, void *route_priv
,
521 struct mlxsw_sp_mr_route_info
*route_info
)
523 const struct mlxsw_sp_mr_tcam_ops
*ops
= mlxsw_sp
->mr_tcam_ops
;
524 struct mlxsw_sp_mr_tcam_route
*route
= route_priv
;
525 struct mlxsw_sp_mr_tcam_erif_list erif_list
;
526 struct mlxsw_afa_block
*afa_block
;
529 /* Create a new erif_list */
530 mlxsw_sp_mr_erif_list_init(&erif_list
);
531 err
= mlxsw_sp_mr_tcam_erif_populate(mlxsw_sp
, &erif_list
, route_info
);
533 goto err_erif_populate
;
535 /* Create the flexible action block pointing to the new erif_list */
536 afa_block
= mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp
,
537 route_info
->route_action
,
538 route_info
->irif_index
,
539 route
->counter_index
,
542 if (IS_ERR(afa_block
)) {
543 err
= PTR_ERR(afa_block
);
544 goto err_afa_block_create
;
547 /* Update the TCAM route entry */
548 err
= ops
->route_update(mlxsw_sp
, route
->priv
, &route
->key
, afa_block
);
550 goto err_route_write
;
552 mlxsw_sp_mr_tcam_afa_block_destroy(route
->afa_block
);
553 mlxsw_sp_mr_erif_list_flush(mlxsw_sp
, &route
->erif_list
);
554 route
->afa_block
= afa_block
;
555 mlxsw_sp_mr_erif_list_move(&route
->erif_list
, &erif_list
);
556 route
->action
= route_info
->route_action
;
557 route
->irif_index
= route_info
->irif_index
;
558 route
->min_mtu
= route_info
->min_mtu
;
562 mlxsw_sp_mr_tcam_afa_block_destroy(afa_block
);
563 err_afa_block_create
:
565 mlxsw_sp_mr_erif_list_flush(mlxsw_sp
, &erif_list
);
569 static int mlxsw_sp_mr_tcam_init(struct mlxsw_sp
*mlxsw_sp
, void *priv
)
571 const struct mlxsw_sp_mr_tcam_ops
*ops
= mlxsw_sp
->mr_tcam_ops
;
572 struct mlxsw_sp_mr_tcam
*mr_tcam
= priv
;
575 if (!MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MC_ERIF_LIST_ENTRIES
))
578 mr_tcam
->priv
= kzalloc(ops
->priv_size
, GFP_KERNEL
);
582 err
= ops
->init(mlxsw_sp
, mr_tcam
->priv
);
588 kfree(mr_tcam
->priv
);
592 static void mlxsw_sp_mr_tcam_fini(struct mlxsw_sp
*mlxsw_sp
, void *priv
)
594 const struct mlxsw_sp_mr_tcam_ops
*ops
= mlxsw_sp
->mr_tcam_ops
;
595 struct mlxsw_sp_mr_tcam
*mr_tcam
= priv
;
597 ops
->fini(mr_tcam
->priv
);
598 kfree(mr_tcam
->priv
);
601 const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops
= {
602 .priv_size
= sizeof(struct mlxsw_sp_mr_tcam
),
603 .route_priv_size
= sizeof(struct mlxsw_sp_mr_tcam_route
),
604 .init
= mlxsw_sp_mr_tcam_init
,
605 .route_create
= mlxsw_sp_mr_tcam_route_create
,
606 .route_update
= mlxsw_sp_mr_tcam_route_update
,
607 .route_stats
= mlxsw_sp_mr_tcam_route_stats
,
608 .route_action_update
= mlxsw_sp_mr_tcam_route_action_update
,
609 .route_min_mtu_update
= mlxsw_sp_mr_tcam_route_min_mtu_update
,
610 .route_irif_update
= mlxsw_sp_mr_tcam_route_irif_update
,
611 .route_erif_add
= mlxsw_sp_mr_tcam_route_erif_add
,
612 .route_erif_del
= mlxsw_sp_mr_tcam_route_erif_del
,
613 .route_destroy
= mlxsw_sp_mr_tcam_route_destroy
,
614 .fini
= mlxsw_sp_mr_tcam_fini
,