1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/errno.h>
7 #include <linux/list.h>
8 #include <linux/string.h>
9 #include <linux/rhashtable.h>
10 #include <linux/netdevice.h>
11 #include <linux/mutex.h>
12 #include <net/net_namespace.h>
13 #include <net/tc_act/tc_vlan.h>
17 #include "resources.h"
19 #include "core_acl_flex_keys.h"
20 #include "core_acl_flex_actions.h"
21 #include "spectrum_acl_tcam.h"
24 struct mlxsw_sp
*mlxsw_sp
;
25 struct mlxsw_afk
*afk
;
26 struct mlxsw_sp_fid
*dummy_fid
;
27 struct rhashtable ruleset_ht
;
28 struct list_head rules
;
29 struct mutex rules_lock
; /* Protects rules list */
31 struct delayed_work dw
;
32 unsigned long interval
; /* ms */
33 #define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
34 } rule_activity_update
;
35 struct mlxsw_sp_acl_tcam tcam
;
38 struct mlxsw_afk
*mlxsw_sp_acl_afk(struct mlxsw_sp_acl
*acl
)
43 struct mlxsw_sp_acl_block_binding
{
44 struct list_head list
;
45 struct net_device
*dev
;
46 struct mlxsw_sp_port
*mlxsw_sp_port
;
50 struct mlxsw_sp_acl_ruleset_ht_key
{
51 struct mlxsw_sp_acl_block
*block
;
53 const struct mlxsw_sp_acl_profile_ops
*ops
;
56 struct mlxsw_sp_acl_ruleset
{
57 struct rhash_head ht_node
; /* Member of acl HT */
58 struct mlxsw_sp_acl_ruleset_ht_key ht_key
;
59 struct rhashtable rule_ht
;
60 unsigned int ref_count
;
62 /* priv has to be always the last item */
65 struct mlxsw_sp_acl_rule
{
66 struct rhash_head ht_node
; /* Member of rule HT */
67 struct list_head list
;
68 unsigned long cookie
; /* HT key */
69 struct mlxsw_sp_acl_ruleset
*ruleset
;
70 struct mlxsw_sp_acl_rule_info
*rulei
;
75 /* priv has to be always the last item */
78 static const struct rhashtable_params mlxsw_sp_acl_ruleset_ht_params
= {
79 .key_len
= sizeof(struct mlxsw_sp_acl_ruleset_ht_key
),
80 .key_offset
= offsetof(struct mlxsw_sp_acl_ruleset
, ht_key
),
81 .head_offset
= offsetof(struct mlxsw_sp_acl_ruleset
, ht_node
),
82 .automatic_shrinking
= true,
85 static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params
= {
86 .key_len
= sizeof(unsigned long),
87 .key_offset
= offsetof(struct mlxsw_sp_acl_rule
, cookie
),
88 .head_offset
= offsetof(struct mlxsw_sp_acl_rule
, ht_node
),
89 .automatic_shrinking
= true,
92 struct mlxsw_sp_fid
*mlxsw_sp_acl_dummy_fid(struct mlxsw_sp
*mlxsw_sp
)
94 return mlxsw_sp
->acl
->dummy_fid
;
97 struct mlxsw_sp
*mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block
*block
)
99 return block
->mlxsw_sp
;
103 mlxsw_sp_acl_block_rule_count(const struct mlxsw_sp_acl_block
*block
)
105 return block
? block
->rule_count
: 0;
108 void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block
*block
)
111 block
->disable_count
++;
114 void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block
*block
)
117 block
->disable_count
--;
120 bool mlxsw_sp_acl_block_disabled(const struct mlxsw_sp_acl_block
*block
)
122 return block
->disable_count
;
125 bool mlxsw_sp_acl_block_is_egress_bound(const struct mlxsw_sp_acl_block
*block
)
127 return block
->egress_binding_count
;
130 bool mlxsw_sp_acl_block_is_ingress_bound(const struct mlxsw_sp_acl_block
*block
)
132 return block
->ingress_binding_count
;
135 bool mlxsw_sp_acl_block_is_mixed_bound(const struct mlxsw_sp_acl_block
*block
)
137 return block
->ingress_binding_count
&& block
->egress_binding_count
;
141 mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset
*ruleset
)
143 /* We hold a reference on ruleset ourselves */
144 return ruleset
->ref_count
== 2;
148 mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp
*mlxsw_sp
,
149 struct mlxsw_sp_acl_block
*block
,
150 struct mlxsw_sp_acl_block_binding
*binding
)
152 struct mlxsw_sp_acl_ruleset
*ruleset
= block
->ruleset_zero
;
153 const struct mlxsw_sp_acl_profile_ops
*ops
= ruleset
->ht_key
.ops
;
155 return ops
->ruleset_bind(mlxsw_sp
, ruleset
->priv
,
156 binding
->mlxsw_sp_port
, binding
->ingress
);
160 mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp
*mlxsw_sp
,
161 struct mlxsw_sp_acl_block
*block
,
162 struct mlxsw_sp_acl_block_binding
*binding
)
164 struct mlxsw_sp_acl_ruleset
*ruleset
= block
->ruleset_zero
;
165 const struct mlxsw_sp_acl_profile_ops
*ops
= ruleset
->ht_key
.ops
;
167 ops
->ruleset_unbind(mlxsw_sp
, ruleset
->priv
,
168 binding
->mlxsw_sp_port
, binding
->ingress
);
172 mlxsw_sp_acl_ruleset_block_bound(const struct mlxsw_sp_acl_block
*block
)
174 return block
->ruleset_zero
;
178 mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp
*mlxsw_sp
,
179 struct mlxsw_sp_acl_ruleset
*ruleset
,
180 struct mlxsw_sp_acl_block
*block
)
182 struct mlxsw_sp_acl_block_binding
*binding
;
185 block
->ruleset_zero
= ruleset
;
186 list_for_each_entry(binding
, &block
->binding_list
, list
) {
187 err
= mlxsw_sp_acl_ruleset_bind(mlxsw_sp
, block
, binding
);
194 list_for_each_entry_continue_reverse(binding
, &block
->binding_list
,
196 mlxsw_sp_acl_ruleset_unbind(mlxsw_sp
, block
, binding
);
197 block
->ruleset_zero
= NULL
;
203 mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp
*mlxsw_sp
,
204 struct mlxsw_sp_acl_ruleset
*ruleset
,
205 struct mlxsw_sp_acl_block
*block
)
207 struct mlxsw_sp_acl_block_binding
*binding
;
209 list_for_each_entry(binding
, &block
->binding_list
, list
)
210 mlxsw_sp_acl_ruleset_unbind(mlxsw_sp
, block
, binding
);
211 block
->ruleset_zero
= NULL
;
214 struct mlxsw_sp_acl_block
*mlxsw_sp_acl_block_create(struct mlxsw_sp
*mlxsw_sp
,
217 struct mlxsw_sp_acl_block
*block
;
219 block
= kzalloc(sizeof(*block
), GFP_KERNEL
);
222 INIT_LIST_HEAD(&block
->binding_list
);
223 block
->mlxsw_sp
= mlxsw_sp
;
228 void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block
*block
)
230 WARN_ON(!list_empty(&block
->binding_list
));
234 static struct mlxsw_sp_acl_block_binding
*
235 mlxsw_sp_acl_block_lookup(struct mlxsw_sp_acl_block
*block
,
236 struct mlxsw_sp_port
*mlxsw_sp_port
, bool ingress
)
238 struct mlxsw_sp_acl_block_binding
*binding
;
240 list_for_each_entry(binding
, &block
->binding_list
, list
)
241 if (binding
->mlxsw_sp_port
== mlxsw_sp_port
&&
242 binding
->ingress
== ingress
)
247 int mlxsw_sp_acl_block_bind(struct mlxsw_sp
*mlxsw_sp
,
248 struct mlxsw_sp_acl_block
*block
,
249 struct mlxsw_sp_port
*mlxsw_sp_port
,
251 struct netlink_ext_ack
*extack
)
253 struct mlxsw_sp_acl_block_binding
*binding
;
256 if (WARN_ON(mlxsw_sp_acl_block_lookup(block
, mlxsw_sp_port
, ingress
)))
259 if (ingress
&& block
->ingress_blocker_rule_count
) {
260 NL_SET_ERR_MSG_MOD(extack
, "Block cannot be bound to ingress because it contains unsupported rules");
264 if (!ingress
&& block
->egress_blocker_rule_count
) {
265 NL_SET_ERR_MSG_MOD(extack
, "Block cannot be bound to egress because it contains unsupported rules");
269 binding
= kzalloc(sizeof(*binding
), GFP_KERNEL
);
272 binding
->mlxsw_sp_port
= mlxsw_sp_port
;
273 binding
->ingress
= ingress
;
275 if (mlxsw_sp_acl_ruleset_block_bound(block
)) {
276 err
= mlxsw_sp_acl_ruleset_bind(mlxsw_sp
, block
, binding
);
278 goto err_ruleset_bind
;
282 block
->ingress_binding_count
++;
284 block
->egress_binding_count
++;
285 list_add(&binding
->list
, &block
->binding_list
);
293 int mlxsw_sp_acl_block_unbind(struct mlxsw_sp
*mlxsw_sp
,
294 struct mlxsw_sp_acl_block
*block
,
295 struct mlxsw_sp_port
*mlxsw_sp_port
,
298 struct mlxsw_sp_acl_block_binding
*binding
;
300 binding
= mlxsw_sp_acl_block_lookup(block
, mlxsw_sp_port
, ingress
);
304 list_del(&binding
->list
);
307 block
->ingress_binding_count
--;
309 block
->egress_binding_count
--;
311 if (mlxsw_sp_acl_ruleset_block_bound(block
))
312 mlxsw_sp_acl_ruleset_unbind(mlxsw_sp
, block
, binding
);
318 static struct mlxsw_sp_acl_ruleset
*
319 mlxsw_sp_acl_ruleset_create(struct mlxsw_sp
*mlxsw_sp
,
320 struct mlxsw_sp_acl_block
*block
, u32 chain_index
,
321 const struct mlxsw_sp_acl_profile_ops
*ops
,
322 struct mlxsw_afk_element_usage
*tmplt_elusage
)
324 struct mlxsw_sp_acl
*acl
= mlxsw_sp
->acl
;
325 struct mlxsw_sp_acl_ruleset
*ruleset
;
329 alloc_size
= sizeof(*ruleset
) + ops
->ruleset_priv_size
;
330 ruleset
= kzalloc(alloc_size
, GFP_KERNEL
);
332 return ERR_PTR(-ENOMEM
);
333 ruleset
->ref_count
= 1;
334 ruleset
->ht_key
.block
= block
;
335 ruleset
->ht_key
.chain_index
= chain_index
;
336 ruleset
->ht_key
.ops
= ops
;
338 err
= rhashtable_init(&ruleset
->rule_ht
, &mlxsw_sp_acl_rule_ht_params
);
340 goto err_rhashtable_init
;
342 err
= ops
->ruleset_add(mlxsw_sp
, &acl
->tcam
, ruleset
->priv
,
345 goto err_ops_ruleset_add
;
347 err
= rhashtable_insert_fast(&acl
->ruleset_ht
, &ruleset
->ht_node
,
348 mlxsw_sp_acl_ruleset_ht_params
);
355 ops
->ruleset_del(mlxsw_sp
, ruleset
->priv
);
357 rhashtable_destroy(&ruleset
->rule_ht
);
363 static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp
*mlxsw_sp
,
364 struct mlxsw_sp_acl_ruleset
*ruleset
)
366 const struct mlxsw_sp_acl_profile_ops
*ops
= ruleset
->ht_key
.ops
;
367 struct mlxsw_sp_acl
*acl
= mlxsw_sp
->acl
;
369 rhashtable_remove_fast(&acl
->ruleset_ht
, &ruleset
->ht_node
,
370 mlxsw_sp_acl_ruleset_ht_params
);
371 ops
->ruleset_del(mlxsw_sp
, ruleset
->priv
);
372 rhashtable_destroy(&ruleset
->rule_ht
);
376 static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset
*ruleset
)
378 ruleset
->ref_count
++;
381 static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp
*mlxsw_sp
,
382 struct mlxsw_sp_acl_ruleset
*ruleset
)
384 if (--ruleset
->ref_count
)
386 mlxsw_sp_acl_ruleset_destroy(mlxsw_sp
, ruleset
);
389 static struct mlxsw_sp_acl_ruleset
*
390 __mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl
*acl
,
391 struct mlxsw_sp_acl_block
*block
, u32 chain_index
,
392 const struct mlxsw_sp_acl_profile_ops
*ops
)
394 struct mlxsw_sp_acl_ruleset_ht_key ht_key
;
396 memset(&ht_key
, 0, sizeof(ht_key
));
397 ht_key
.block
= block
;
398 ht_key
.chain_index
= chain_index
;
400 return rhashtable_lookup_fast(&acl
->ruleset_ht
, &ht_key
,
401 mlxsw_sp_acl_ruleset_ht_params
);
404 struct mlxsw_sp_acl_ruleset
*
405 mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp
*mlxsw_sp
,
406 struct mlxsw_sp_acl_block
*block
, u32 chain_index
,
407 enum mlxsw_sp_acl_profile profile
)
409 const struct mlxsw_sp_acl_profile_ops
*ops
;
410 struct mlxsw_sp_acl
*acl
= mlxsw_sp
->acl
;
411 struct mlxsw_sp_acl_ruleset
*ruleset
;
413 ops
= mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp
, profile
);
415 return ERR_PTR(-EINVAL
);
416 ruleset
= __mlxsw_sp_acl_ruleset_lookup(acl
, block
, chain_index
, ops
);
418 return ERR_PTR(-ENOENT
);
422 struct mlxsw_sp_acl_ruleset
*
423 mlxsw_sp_acl_ruleset_get(struct mlxsw_sp
*mlxsw_sp
,
424 struct mlxsw_sp_acl_block
*block
, u32 chain_index
,
425 enum mlxsw_sp_acl_profile profile
,
426 struct mlxsw_afk_element_usage
*tmplt_elusage
)
428 const struct mlxsw_sp_acl_profile_ops
*ops
;
429 struct mlxsw_sp_acl
*acl
= mlxsw_sp
->acl
;
430 struct mlxsw_sp_acl_ruleset
*ruleset
;
432 ops
= mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp
, profile
);
434 return ERR_PTR(-EINVAL
);
436 ruleset
= __mlxsw_sp_acl_ruleset_lookup(acl
, block
, chain_index
, ops
);
438 mlxsw_sp_acl_ruleset_ref_inc(ruleset
);
441 return mlxsw_sp_acl_ruleset_create(mlxsw_sp
, block
, chain_index
, ops
,
445 void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp
*mlxsw_sp
,
446 struct mlxsw_sp_acl_ruleset
*ruleset
)
448 mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp
, ruleset
);
451 u16
mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset
*ruleset
)
453 const struct mlxsw_sp_acl_profile_ops
*ops
= ruleset
->ht_key
.ops
;
455 return ops
->ruleset_group_id(ruleset
->priv
);
458 struct mlxsw_sp_acl_rule_info
*
459 mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl
*acl
,
460 struct mlxsw_afa_block
*afa_block
)
462 struct mlxsw_sp_acl_rule_info
*rulei
;
465 rulei
= kzalloc(sizeof(*rulei
), GFP_KERNEL
);
470 rulei
->act_block
= afa_block
;
474 rulei
->act_block
= mlxsw_afa_block_create(acl
->mlxsw_sp
->afa
);
475 if (IS_ERR(rulei
->act_block
)) {
476 err
= PTR_ERR(rulei
->act_block
);
477 goto err_afa_block_create
;
479 rulei
->action_created
= 1;
482 err_afa_block_create
:
487 void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info
*rulei
)
489 if (rulei
->action_created
)
490 mlxsw_afa_block_destroy(rulei
->act_block
);
494 int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info
*rulei
)
496 return mlxsw_afa_block_commit(rulei
->act_block
);
499 void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info
*rulei
,
500 unsigned int priority
)
502 rulei
->priority
= priority
;
505 void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info
*rulei
,
506 enum mlxsw_afk_element element
,
507 u32 key_value
, u32 mask_value
)
509 mlxsw_afk_values_add_u32(&rulei
->values
, element
,
510 key_value
, mask_value
);
513 void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info
*rulei
,
514 enum mlxsw_afk_element element
,
515 const char *key_value
,
516 const char *mask_value
, unsigned int len
)
518 mlxsw_afk_values_add_buf(&rulei
->values
, element
,
519 key_value
, mask_value
, len
);
522 int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info
*rulei
)
524 return mlxsw_afa_block_continue(rulei
->act_block
);
527 int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info
*rulei
,
530 return mlxsw_afa_block_jump(rulei
->act_block
, group_id
);
533 int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info
*rulei
)
535 return mlxsw_afa_block_terminate(rulei
->act_block
);
538 int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info
*rulei
,
540 const struct flow_action_cookie
*fa_cookie
,
541 struct netlink_ext_ack
*extack
)
543 return mlxsw_afa_block_append_drop(rulei
->act_block
, ingress
,
547 int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info
*rulei
)
549 return mlxsw_afa_block_append_trap(rulei
->act_block
,
553 int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp
*mlxsw_sp
,
554 struct mlxsw_sp_acl_rule_info
*rulei
,
555 struct net_device
*out_dev
,
556 struct netlink_ext_ack
*extack
)
558 struct mlxsw_sp_port
*mlxsw_sp_port
;
563 if (!mlxsw_sp_port_dev_check(out_dev
)) {
564 NL_SET_ERR_MSG_MOD(extack
, "Invalid output device");
567 mlxsw_sp_port
= netdev_priv(out_dev
);
568 if (mlxsw_sp_port
->mlxsw_sp
!= mlxsw_sp
) {
569 NL_SET_ERR_MSG_MOD(extack
, "Invalid output device");
572 local_port
= mlxsw_sp_port
->local_port
;
575 /* If out_dev is NULL, the caller wants to
576 * set forward to ingress port.
581 return mlxsw_afa_block_append_fwd(rulei
->act_block
,
582 local_port
, in_port
, extack
);
585 int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp
*mlxsw_sp
,
586 struct mlxsw_sp_acl_rule_info
*rulei
,
587 struct mlxsw_sp_acl_block
*block
,
588 struct net_device
*out_dev
,
589 struct netlink_ext_ack
*extack
)
591 struct mlxsw_sp_acl_block_binding
*binding
;
592 struct mlxsw_sp_port
*in_port
;
594 if (!list_is_singular(&block
->binding_list
)) {
595 NL_SET_ERR_MSG_MOD(extack
, "Only a single mirror source is allowed");
598 binding
= list_first_entry(&block
->binding_list
,
599 struct mlxsw_sp_acl_block_binding
, list
);
600 in_port
= binding
->mlxsw_sp_port
;
602 return mlxsw_afa_block_append_mirror(rulei
->act_block
,
609 int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp
*mlxsw_sp
,
610 struct mlxsw_sp_acl_rule_info
*rulei
,
611 u32 action
, u16 vid
, u16 proto
, u8 prio
,
612 struct netlink_ext_ack
*extack
)
616 if (action
== FLOW_ACTION_VLAN_MANGLE
) {
625 NL_SET_ERR_MSG_MOD(extack
, "Unsupported VLAN protocol");
626 dev_err(mlxsw_sp
->bus_info
->dev
, "Unsupported VLAN protocol %#04x\n",
631 return mlxsw_afa_block_append_vlan_modify(rulei
->act_block
,
632 vid
, prio
, ethertype
,
635 NL_SET_ERR_MSG_MOD(extack
, "Unsupported VLAN action");
636 dev_err(mlxsw_sp
->bus_info
->dev
, "Unsupported VLAN action\n");
641 int mlxsw_sp_acl_rulei_act_priority(struct mlxsw_sp
*mlxsw_sp
,
642 struct mlxsw_sp_acl_rule_info
*rulei
,
643 u32 prio
, struct netlink_ext_ack
*extack
)
645 /* Even though both Linux and Spectrum switches support 16 priorities,
646 * spectrum_qdisc only processes the first eight priomap elements, and
647 * the DCB and PFC features are tied to 8 priorities as well. Therefore
648 * bounce attempts to prioritize packets to higher priorities.
650 if (prio
>= IEEE_8021QAZ_MAX_TCS
) {
651 NL_SET_ERR_MSG_MOD(extack
, "Only priorities 0..7 are supported");
654 return mlxsw_afa_block_append_qos_switch_prio(rulei
->act_block
, prio
,
658 enum mlxsw_sp_acl_mangle_field
{
659 MLXSW_SP_ACL_MANGLE_FIELD_IP_DSFIELD
,
660 MLXSW_SP_ACL_MANGLE_FIELD_IP_DSCP
,
661 MLXSW_SP_ACL_MANGLE_FIELD_IP_ECN
,
664 struct mlxsw_sp_acl_mangle_action
{
665 enum flow_action_mangle_base htype
;
666 /* Offset is u32-aligned. */
668 /* Mask bits are unset for the modified field. */
670 /* Shift required to extract the set value. */
672 enum mlxsw_sp_acl_mangle_field field
;
675 #define MLXSW_SP_ACL_MANGLE_ACTION(_htype, _offset, _mask, _shift, _field) \
681 .field = MLXSW_SP_ACL_MANGLE_FIELD_##_field, \
684 #define MLXSW_SP_ACL_MANGLE_ACTION_IP4(_offset, _mask, _shift, _field) \
685 MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_IP4, \
686 _offset, _mask, _shift, _field)
688 #define MLXSW_SP_ACL_MANGLE_ACTION_IP6(_offset, _mask, _shift, _field) \
689 MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_IP6, \
690 _offset, _mask, _shift, _field)
692 static struct mlxsw_sp_acl_mangle_action mlxsw_sp_acl_mangle_actions
[] = {
693 MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xff00ffff, 16, IP_DSFIELD
),
694 MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xff03ffff, 18, IP_DSCP
),
695 MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xfffcffff, 16, IP_ECN
),
696 MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xf00fffff, 20, IP_DSFIELD
),
697 MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xf03fffff, 22, IP_DSCP
),
698 MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xffcfffff, 20, IP_ECN
),
702 mlxsw_sp_acl_rulei_act_mangle_field(struct mlxsw_sp
*mlxsw_sp
,
703 struct mlxsw_sp_acl_rule_info
*rulei
,
704 struct mlxsw_sp_acl_mangle_action
*mact
,
705 u32 val
, struct netlink_ext_ack
*extack
)
707 switch (mact
->field
) {
708 case MLXSW_SP_ACL_MANGLE_FIELD_IP_DSFIELD
:
709 return mlxsw_afa_block_append_qos_dsfield(rulei
->act_block
,
711 case MLXSW_SP_ACL_MANGLE_FIELD_IP_DSCP
:
712 return mlxsw_afa_block_append_qos_dscp(rulei
->act_block
,
714 case MLXSW_SP_ACL_MANGLE_FIELD_IP_ECN
:
715 return mlxsw_afa_block_append_qos_ecn(rulei
->act_block
,
719 /* We shouldn't have gotten a match in the first place! */
720 WARN_ONCE(1, "Unhandled mangle field");
724 int mlxsw_sp_acl_rulei_act_mangle(struct mlxsw_sp
*mlxsw_sp
,
725 struct mlxsw_sp_acl_rule_info
*rulei
,
726 enum flow_action_mangle_base htype
,
727 u32 offset
, u32 mask
, u32 val
,
728 struct netlink_ext_ack
*extack
)
730 struct mlxsw_sp_acl_mangle_action
*mact
;
733 for (i
= 0; i
< ARRAY_SIZE(mlxsw_sp_acl_mangle_actions
); ++i
) {
734 mact
= &mlxsw_sp_acl_mangle_actions
[i
];
735 if (mact
->htype
== htype
&&
736 mact
->offset
== offset
&&
737 mact
->mask
== mask
) {
739 return mlxsw_sp_acl_rulei_act_mangle_field(mlxsw_sp
,
745 NL_SET_ERR_MSG_MOD(extack
, "Unsupported mangle field");
749 int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp
*mlxsw_sp
,
750 struct mlxsw_sp_acl_rule_info
*rulei
,
751 struct netlink_ext_ack
*extack
)
755 err
= mlxsw_afa_block_append_counter(rulei
->act_block
,
756 &rulei
->counter_index
, extack
);
759 rulei
->counter_valid
= true;
763 int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp
*mlxsw_sp
,
764 struct mlxsw_sp_acl_rule_info
*rulei
,
765 u16 fid
, struct netlink_ext_ack
*extack
)
767 return mlxsw_afa_block_append_fid_set(rulei
->act_block
, fid
, extack
);
770 struct mlxsw_sp_acl_rule
*
771 mlxsw_sp_acl_rule_create(struct mlxsw_sp
*mlxsw_sp
,
772 struct mlxsw_sp_acl_ruleset
*ruleset
,
773 unsigned long cookie
,
774 struct mlxsw_afa_block
*afa_block
,
775 struct netlink_ext_ack
*extack
)
777 const struct mlxsw_sp_acl_profile_ops
*ops
= ruleset
->ht_key
.ops
;
778 struct mlxsw_sp_acl_rule
*rule
;
781 mlxsw_sp_acl_ruleset_ref_inc(ruleset
);
782 rule
= kzalloc(sizeof(*rule
) + ops
->rule_priv_size
,
788 rule
->cookie
= cookie
;
789 rule
->ruleset
= ruleset
;
791 rule
->rulei
= mlxsw_sp_acl_rulei_create(mlxsw_sp
->acl
, afa_block
);
792 if (IS_ERR(rule
->rulei
)) {
793 err
= PTR_ERR(rule
->rulei
);
794 goto err_rulei_create
;
802 mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp
, ruleset
);
806 void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp
*mlxsw_sp
,
807 struct mlxsw_sp_acl_rule
*rule
)
809 struct mlxsw_sp_acl_ruleset
*ruleset
= rule
->ruleset
;
811 mlxsw_sp_acl_rulei_destroy(rule
->rulei
);
813 mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp
, ruleset
);
816 int mlxsw_sp_acl_rule_add(struct mlxsw_sp
*mlxsw_sp
,
817 struct mlxsw_sp_acl_rule
*rule
)
819 struct mlxsw_sp_acl_ruleset
*ruleset
= rule
->ruleset
;
820 const struct mlxsw_sp_acl_profile_ops
*ops
= ruleset
->ht_key
.ops
;
821 struct mlxsw_sp_acl_block
*block
= ruleset
->ht_key
.block
;
824 err
= ops
->rule_add(mlxsw_sp
, ruleset
->priv
, rule
->priv
, rule
->rulei
);
828 err
= rhashtable_insert_fast(&ruleset
->rule_ht
, &rule
->ht_node
,
829 mlxsw_sp_acl_rule_ht_params
);
831 goto err_rhashtable_insert
;
833 if (!ruleset
->ht_key
.chain_index
&&
834 mlxsw_sp_acl_ruleset_is_singular(ruleset
)) {
835 /* We only need ruleset with chain index 0, the implicit
836 * one, to be directly bound to device. The rest of the
837 * rulesets are bound by "Goto action set".
839 err
= mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp
, ruleset
, block
);
841 goto err_ruleset_block_bind
;
844 mutex_lock(&mlxsw_sp
->acl
->rules_lock
);
845 list_add_tail(&rule
->list
, &mlxsw_sp
->acl
->rules
);
846 mutex_unlock(&mlxsw_sp
->acl
->rules_lock
);
848 block
->ingress_blocker_rule_count
+= rule
->rulei
->ingress_bind_blocker
;
849 block
->egress_blocker_rule_count
+= rule
->rulei
->egress_bind_blocker
;
852 err_ruleset_block_bind
:
853 rhashtable_remove_fast(&ruleset
->rule_ht
, &rule
->ht_node
,
854 mlxsw_sp_acl_rule_ht_params
);
855 err_rhashtable_insert
:
856 ops
->rule_del(mlxsw_sp
, rule
->priv
);
860 void mlxsw_sp_acl_rule_del(struct mlxsw_sp
*mlxsw_sp
,
861 struct mlxsw_sp_acl_rule
*rule
)
863 struct mlxsw_sp_acl_ruleset
*ruleset
= rule
->ruleset
;
864 const struct mlxsw_sp_acl_profile_ops
*ops
= ruleset
->ht_key
.ops
;
865 struct mlxsw_sp_acl_block
*block
= ruleset
->ht_key
.block
;
867 block
->egress_blocker_rule_count
-= rule
->rulei
->egress_bind_blocker
;
868 block
->ingress_blocker_rule_count
-= rule
->rulei
->ingress_bind_blocker
;
869 ruleset
->ht_key
.block
->rule_count
--;
870 mutex_lock(&mlxsw_sp
->acl
->rules_lock
);
871 list_del(&rule
->list
);
872 mutex_unlock(&mlxsw_sp
->acl
->rules_lock
);
873 if (!ruleset
->ht_key
.chain_index
&&
874 mlxsw_sp_acl_ruleset_is_singular(ruleset
))
875 mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp
, ruleset
,
876 ruleset
->ht_key
.block
);
877 rhashtable_remove_fast(&ruleset
->rule_ht
, &rule
->ht_node
,
878 mlxsw_sp_acl_rule_ht_params
);
879 ops
->rule_del(mlxsw_sp
, rule
->priv
);
882 int mlxsw_sp_acl_rule_action_replace(struct mlxsw_sp
*mlxsw_sp
,
883 struct mlxsw_sp_acl_rule
*rule
,
884 struct mlxsw_afa_block
*afa_block
)
886 struct mlxsw_sp_acl_ruleset
*ruleset
= rule
->ruleset
;
887 const struct mlxsw_sp_acl_profile_ops
*ops
= ruleset
->ht_key
.ops
;
888 struct mlxsw_sp_acl_rule_info
*rulei
;
890 rulei
= mlxsw_sp_acl_rule_rulei(rule
);
891 rulei
->act_block
= afa_block
;
893 return ops
->rule_action_replace(mlxsw_sp
, rule
->priv
, rule
->rulei
);
896 struct mlxsw_sp_acl_rule
*
897 mlxsw_sp_acl_rule_lookup(struct mlxsw_sp
*mlxsw_sp
,
898 struct mlxsw_sp_acl_ruleset
*ruleset
,
899 unsigned long cookie
)
901 return rhashtable_lookup_fast(&ruleset
->rule_ht
, &cookie
,
902 mlxsw_sp_acl_rule_ht_params
);
905 struct mlxsw_sp_acl_rule_info
*
906 mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule
*rule
)
911 static int mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp
*mlxsw_sp
,
912 struct mlxsw_sp_acl_rule
*rule
)
914 struct mlxsw_sp_acl_ruleset
*ruleset
= rule
->ruleset
;
915 const struct mlxsw_sp_acl_profile_ops
*ops
= ruleset
->ht_key
.ops
;
919 err
= ops
->rule_activity_get(mlxsw_sp
, rule
->priv
, &active
);
923 rule
->last_used
= jiffies
;
927 static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl
*acl
)
929 struct mlxsw_sp_acl_rule
*rule
;
932 mutex_lock(&acl
->rules_lock
);
933 list_for_each_entry(rule
, &acl
->rules
, list
) {
934 err
= mlxsw_sp_acl_rule_activity_update(acl
->mlxsw_sp
,
937 goto err_rule_update
;
939 mutex_unlock(&acl
->rules_lock
);
943 mutex_unlock(&acl
->rules_lock
);
947 static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl
*acl
)
949 unsigned long interval
= acl
->rule_activity_update
.interval
;
951 mlxsw_core_schedule_dw(&acl
->rule_activity_update
.dw
,
952 msecs_to_jiffies(interval
));
955 static void mlxsw_sp_acl_rule_activity_update_work(struct work_struct
*work
)
957 struct mlxsw_sp_acl
*acl
= container_of(work
, struct mlxsw_sp_acl
,
958 rule_activity_update
.dw
.work
);
961 err
= mlxsw_sp_acl_rules_activity_update(acl
);
963 dev_err(acl
->mlxsw_sp
->bus_info
->dev
, "Could not update acl activity");
965 mlxsw_sp_acl_rule_activity_work_schedule(acl
);
968 int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp
*mlxsw_sp
,
969 struct mlxsw_sp_acl_rule
*rule
,
970 u64
*packets
, u64
*bytes
, u64
*last_use
,
971 enum flow_action_hw_stats
*used_hw_stats
)
974 struct mlxsw_sp_acl_rule_info
*rulei
;
975 u64 current_packets
= 0;
976 u64 current_bytes
= 0;
979 rulei
= mlxsw_sp_acl_rule_rulei(rule
);
980 if (rulei
->counter_valid
) {
981 err
= mlxsw_sp_flow_counter_get(mlxsw_sp
, rulei
->counter_index
,
986 *used_hw_stats
= FLOW_ACTION_HW_STATS_IMMEDIATE
;
988 *packets
= current_packets
- rule
->last_packets
;
989 *bytes
= current_bytes
- rule
->last_bytes
;
990 *last_use
= rule
->last_used
;
992 rule
->last_bytes
= current_bytes
;
993 rule
->last_packets
= current_packets
;
998 int mlxsw_sp_acl_init(struct mlxsw_sp
*mlxsw_sp
)
1000 struct mlxsw_sp_fid
*fid
;
1001 struct mlxsw_sp_acl
*acl
;
1005 alloc_size
= sizeof(*acl
) + mlxsw_sp_acl_tcam_priv_size(mlxsw_sp
);
1006 acl
= kzalloc(alloc_size
, GFP_KERNEL
);
1009 mlxsw_sp
->acl
= acl
;
1010 acl
->mlxsw_sp
= mlxsw_sp
;
1011 acl
->afk
= mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp
->core
,
1016 goto err_afk_create
;
1019 err
= rhashtable_init(&acl
->ruleset_ht
,
1020 &mlxsw_sp_acl_ruleset_ht_params
);
1022 goto err_rhashtable_init
;
1024 fid
= mlxsw_sp_fid_dummy_get(mlxsw_sp
);
1029 acl
->dummy_fid
= fid
;
1031 INIT_LIST_HEAD(&acl
->rules
);
1032 mutex_init(&acl
->rules_lock
);
1033 err
= mlxsw_sp_acl_tcam_init(mlxsw_sp
, &acl
->tcam
);
1035 goto err_acl_ops_init
;
1037 /* Create the delayed work for the rule activity_update */
1038 INIT_DELAYED_WORK(&acl
->rule_activity_update
.dw
,
1039 mlxsw_sp_acl_rule_activity_update_work
);
1040 acl
->rule_activity_update
.interval
= MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS
;
1041 mlxsw_core_schedule_dw(&acl
->rule_activity_update
.dw
, 0);
1045 mutex_destroy(&acl
->rules_lock
);
1046 mlxsw_sp_fid_put(fid
);
1048 rhashtable_destroy(&acl
->ruleset_ht
);
1049 err_rhashtable_init
:
1050 mlxsw_afk_destroy(acl
->afk
);
1056 void mlxsw_sp_acl_fini(struct mlxsw_sp
*mlxsw_sp
)
1058 struct mlxsw_sp_acl
*acl
= mlxsw_sp
->acl
;
1060 cancel_delayed_work_sync(&mlxsw_sp
->acl
->rule_activity_update
.dw
);
1061 mlxsw_sp_acl_tcam_fini(mlxsw_sp
, &acl
->tcam
);
1062 mutex_destroy(&acl
->rules_lock
);
1063 WARN_ON(!list_empty(&acl
->rules
));
1064 mlxsw_sp_fid_put(acl
->dummy_fid
);
1065 rhashtable_destroy(&acl
->ruleset_ht
);
1066 mlxsw_afk_destroy(acl
->afk
);
1070 u32
mlxsw_sp_acl_region_rehash_intrvl_get(struct mlxsw_sp
*mlxsw_sp
)
1072 struct mlxsw_sp_acl
*acl
= mlxsw_sp
->acl
;
1074 return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(mlxsw_sp
,
1078 int mlxsw_sp_acl_region_rehash_intrvl_set(struct mlxsw_sp
*mlxsw_sp
, u32 val
)
1080 struct mlxsw_sp_acl
*acl
= mlxsw_sp
->acl
;
1082 return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(mlxsw_sp
,