1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Broadcom Starfighter 2 DSA switch CFP support
5 * Copyright (C) 2016, Broadcom
8 #include <linux/list.h>
9 #include <linux/ethtool.h>
10 #include <linux/if_ether.h>
12 #include <linux/netdevice.h>
14 #include <linux/bitmap.h>
15 #include <net/flow_offload.h>
16 #include <net/switchdev.h>
17 #include <uapi/linux/if_bridge.h>
20 #include "bcm_sf2_regs.h"
24 struct ethtool_rx_flow_spec fs
;
25 struct list_head next
;
28 struct cfp_udf_slice_layout
{
29 u8 slices
[UDFS_PER_SLICE
];
34 struct cfp_udf_layout
{
35 struct cfp_udf_slice_layout udfs
[UDF_NUM_SLICES
];
38 static const u8 zero_slice
[UDFS_PER_SLICE
] = { };
40 /* UDF slices layout for a TCPv4/UDPv4 specification */
41 static const struct cfp_udf_layout udf_tcpip4_layout
= {
45 /* End of L2, byte offset 12, src IP[0:15] */
47 /* End of L2, byte offset 14, src IP[16:31] */
49 /* End of L2, byte offset 16, dst IP[0:15] */
51 /* End of L2, byte offset 18, dst IP[16:31] */
53 /* End of L3, byte offset 0, src port */
55 /* End of L3, byte offset 2, dst port */
59 .mask_value
= L3_FRAMING_MASK
| IPPROTO_MASK
| IP_FRAG
,
60 .base_offset
= CORE_UDF_0_A_0_8_PORT_0
+ UDF_SLICE_OFFSET
,
65 /* UDF slices layout for a TCPv6/UDPv6 specification */
66 static const struct cfp_udf_layout udf_tcpip6_layout
= {
70 /* End of L2, byte offset 8, src IP[0:15] */
72 /* End of L2, byte offset 10, src IP[16:31] */
74 /* End of L2, byte offset 12, src IP[32:47] */
76 /* End of L2, byte offset 14, src IP[48:63] */
78 /* End of L2, byte offset 16, src IP[64:79] */
80 /* End of L2, byte offset 18, src IP[80:95] */
82 /* End of L2, byte offset 20, src IP[96:111] */
84 /* End of L2, byte offset 22, src IP[112:127] */
86 /* End of L3, byte offset 0, src port */
89 .mask_value
= L3_FRAMING_MASK
| IPPROTO_MASK
| IP_FRAG
,
90 .base_offset
= CORE_UDF_0_B_0_8_PORT_0
,
94 /* End of L2, byte offset 24, dst IP[0:15] */
96 /* End of L2, byte offset 26, dst IP[16:31] */
98 /* End of L2, byte offset 28, dst IP[32:47] */
100 /* End of L2, byte offset 30, dst IP[48:63] */
102 /* End of L2, byte offset 32, dst IP[64:79] */
104 /* End of L2, byte offset 34, dst IP[80:95] */
106 /* End of L2, byte offset 36, dst IP[96:111] */
108 /* End of L2, byte offset 38, dst IP[112:127] */
110 /* End of L3, byte offset 2, dst port */
113 .mask_value
= L3_FRAMING_MASK
| IPPROTO_MASK
| IP_FRAG
,
114 .base_offset
= CORE_UDF_0_D_0_11_PORT_0
,
119 static inline unsigned int bcm_sf2_get_num_udf_slices(const u8
*layout
)
121 unsigned int i
, count
= 0;
123 for (i
= 0; i
< UDFS_PER_SLICE
; i
++) {
131 static inline u32
udf_upper_bits(int num_udf
)
133 return GENMASK(num_udf
- 1, 0) >> (UDFS_PER_SLICE
- 1);
136 static inline u32
udf_lower_bits(int num_udf
)
138 return (u8
)GENMASK(num_udf
- 1, 0);
141 static unsigned int bcm_sf2_get_slice_number(const struct cfp_udf_layout
*l
,
144 const struct cfp_udf_slice_layout
*slice_layout
;
145 unsigned int slice_idx
;
147 for (slice_idx
= start
; slice_idx
< UDF_NUM_SLICES
; slice_idx
++) {
148 slice_layout
= &l
->udfs
[slice_idx
];
149 if (memcmp(slice_layout
->slices
, zero_slice
,
157 static void bcm_sf2_cfp_udf_set(struct bcm_sf2_priv
*priv
,
158 const struct cfp_udf_layout
*layout
,
159 unsigned int slice_num
)
161 u32 offset
= layout
->udfs
[slice_num
].base_offset
;
164 for (i
= 0; i
< UDFS_PER_SLICE
; i
++)
165 core_writel(priv
, layout
->udfs
[slice_num
].slices
[i
],
169 static int bcm_sf2_cfp_op(struct bcm_sf2_priv
*priv
, unsigned int op
)
171 unsigned int timeout
= 1000;
174 reg
= core_readl(priv
, CORE_CFP_ACC
);
175 reg
&= ~(OP_SEL_MASK
| RAM_SEL_MASK
);
176 reg
|= OP_STR_DONE
| op
;
177 core_writel(priv
, reg
, CORE_CFP_ACC
);
180 reg
= core_readl(priv
, CORE_CFP_ACC
);
181 if (!(reg
& OP_STR_DONE
))
193 static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv
*priv
,
198 WARN_ON(addr
>= priv
->num_cfp_rules
);
200 reg
= core_readl(priv
, CORE_CFP_ACC
);
201 reg
&= ~(XCESS_ADDR_MASK
<< XCESS_ADDR_SHIFT
);
202 reg
|= addr
<< XCESS_ADDR_SHIFT
;
203 core_writel(priv
, reg
, CORE_CFP_ACC
);
206 static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv
*priv
)
208 /* Entry #0 is reserved */
209 return priv
->num_cfp_rules
- 1;
212 static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv
*priv
,
213 unsigned int rule_index
,
215 unsigned int port_num
,
216 unsigned int queue_num
,
222 /* Replace ARL derived destination with DST_MAP derived, define
223 * which port and queue this should be forwarded to.
226 reg
= CHANGE_FWRD_MAP_IB_REP_ARL
|
227 BIT(port_num
+ DST_MAP_IB_SHIFT
) |
228 CHANGE_TC
| queue_num
<< NEW_TC_SHIFT
;
232 /* Enable looping back to the original port */
233 if (src_port
== port_num
)
236 core_writel(priv
, reg
, CORE_ACT_POL_DATA0
);
238 /* Set classification ID that needs to be put in Broadcom tag */
239 core_writel(priv
, rule_index
<< CHAIN_ID_SHIFT
, CORE_ACT_POL_DATA1
);
241 core_writel(priv
, 0, CORE_ACT_POL_DATA2
);
243 /* Configure policer RAM now */
244 ret
= bcm_sf2_cfp_op(priv
, OP_SEL_WRITE
| ACT_POL_RAM
);
246 pr_err("Policer entry at %d failed\n", rule_index
);
250 /* Disable the policer */
251 core_writel(priv
, POLICER_MODE_DISABLE
, CORE_RATE_METER0
);
253 /* Now the rate meter */
254 ret
= bcm_sf2_cfp_op(priv
, OP_SEL_WRITE
| RATE_METER_RAM
);
256 pr_err("Meter entry at %d failed\n", rule_index
);
263 static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv
*priv
,
264 struct flow_dissector_key_ipv4_addrs
*addrs
,
265 struct flow_dissector_key_ports
*ports
,
266 const __be16 vlan_tci
,
267 unsigned int slice_num
, u8 num_udf
,
272 /* UDF_Valid[7:0] [31:24]
276 reg
= udf_lower_bits(num_udf
) << 24 | be16_to_cpu(vlan_tci
) >> 8;
278 core_writel(priv
, reg
, CORE_CFP_MASK_PORT(5));
280 core_writel(priv
, reg
, CORE_CFP_DATA_PORT(5));
286 reg
= (u32
)(be16_to_cpu(vlan_tci
) & 0xff) << 24;
288 offset
= CORE_CFP_MASK_PORT(4);
290 offset
= CORE_CFP_DATA_PORT(4);
291 core_writel(priv
, reg
, offset
);
297 reg
= be16_to_cpu(ports
->dst
) >> 8;
299 offset
= CORE_CFP_MASK_PORT(3);
301 offset
= CORE_CFP_DATA_PORT(3);
302 core_writel(priv
, reg
, offset
);
308 reg
= (be16_to_cpu(ports
->dst
) & 0xff) << 24 |
309 (u32
)be16_to_cpu(ports
->src
) << 8 |
310 (be32_to_cpu(addrs
->dst
) & 0x0000ff00) >> 8;
312 offset
= CORE_CFP_MASK_PORT(2);
314 offset
= CORE_CFP_DATA_PORT(2);
315 core_writel(priv
, reg
, offset
);
321 reg
= (u32
)(be32_to_cpu(addrs
->dst
) & 0xff) << 24 |
322 (u32
)(be32_to_cpu(addrs
->dst
) >> 16) << 8 |
323 (be32_to_cpu(addrs
->src
) & 0x0000ff00) >> 8;
325 offset
= CORE_CFP_MASK_PORT(1);
327 offset
= CORE_CFP_DATA_PORT(1);
328 core_writel(priv
, reg
, offset
);
336 reg
= (u32
)(be32_to_cpu(addrs
->src
) & 0xff) << 24 |
337 (u32
)(be32_to_cpu(addrs
->src
) >> 16) << 8 |
338 SLICE_NUM(slice_num
) | SLICE_VALID
;
340 offset
= CORE_CFP_MASK_PORT(0);
342 offset
= CORE_CFP_DATA_PORT(0);
343 core_writel(priv
, reg
, offset
);
346 static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv
*priv
, int port
,
347 unsigned int port_num
,
348 unsigned int queue_num
,
349 struct ethtool_rx_flow_spec
*fs
)
351 __be16 vlan_tci
= 0, vlan_m_tci
= htons(0xffff);
352 struct ethtool_rx_flow_spec_input input
= {};
353 const struct cfp_udf_layout
*layout
;
354 unsigned int slice_num
, rule_index
;
355 struct ethtool_rx_flow_rule
*flow
;
356 struct flow_match_ipv4_addrs ipv4
;
357 struct flow_match_ports ports
;
358 struct flow_match_ip ip
;
359 u8 ip_proto
, ip_frag
;
364 switch (fs
->flow_type
& ~FLOW_EXT
) {
366 ip_proto
= IPPROTO_TCP
;
369 ip_proto
= IPPROTO_UDP
;
375 ip_frag
= !!(be32_to_cpu(fs
->h_ext
.data
[0]) & 1);
377 /* Extract VLAN TCI */
378 if (fs
->flow_type
& FLOW_EXT
) {
379 vlan_tci
= fs
->h_ext
.vlan_tci
;
380 vlan_m_tci
= fs
->m_ext
.vlan_tci
;
383 /* Locate the first rule available */
384 if (fs
->location
== RX_CLS_LOC_ANY
)
385 rule_index
= find_first_zero_bit(priv
->cfp
.used
,
386 priv
->num_cfp_rules
);
388 rule_index
= fs
->location
;
390 if (rule_index
> bcm_sf2_cfp_rule_size(priv
))
394 flow
= ethtool_rx_flow_rule_create(&input
);
396 return PTR_ERR(flow
);
398 flow_rule_match_ipv4_addrs(flow
->rule
, &ipv4
);
399 flow_rule_match_ports(flow
->rule
, &ports
);
400 flow_rule_match_ip(flow
->rule
, &ip
);
402 layout
= &udf_tcpip4_layout
;
403 /* We only use one UDF slice for now */
404 slice_num
= bcm_sf2_get_slice_number(layout
, 0);
405 if (slice_num
== UDF_NUM_SLICES
) {
407 goto out_err_flow_rule
;
410 num_udf
= bcm_sf2_get_num_udf_slices(layout
->udfs
[slice_num
].slices
);
412 /* Apply the UDF layout for this filter */
413 bcm_sf2_cfp_udf_set(priv
, layout
, slice_num
);
415 /* Apply to all packets received through this port */
416 core_writel(priv
, BIT(port
), CORE_CFP_DATA_PORT(7));
418 /* Source port map match */
419 core_writel(priv
, 0xff, CORE_CFP_MASK_PORT(7));
421 /* S-Tag status [31:30]
422 * C-Tag status [29:28]
435 core_writel(priv
, ip
.key
->tos
<< IPTOS_SHIFT
|
436 ip_proto
<< IPPROTO_SHIFT
| ip_frag
<< IP_FRAG_SHIFT
|
437 udf_upper_bits(num_udf
),
438 CORE_CFP_DATA_PORT(6));
440 /* Mask with the specific layout for IPv4 packets */
441 core_writel(priv
, layout
->udfs
[slice_num
].mask_value
|
442 udf_upper_bits(num_udf
), CORE_CFP_MASK_PORT(6));
444 /* Program the match and the mask */
445 bcm_sf2_cfp_slice_ipv4(priv
, ipv4
.key
, ports
.key
, vlan_tci
,
446 slice_num
, num_udf
, false);
447 bcm_sf2_cfp_slice_ipv4(priv
, ipv4
.mask
, ports
.mask
, vlan_m_tci
,
448 SLICE_NUM_MASK
, num_udf
, true);
450 /* Insert into TCAM now */
451 bcm_sf2_cfp_rule_addr_set(priv
, rule_index
);
453 ret
= bcm_sf2_cfp_op(priv
, OP_SEL_WRITE
| TCAM_SEL
);
455 pr_err("TCAM entry at addr %d failed\n", rule_index
);
456 goto out_err_flow_rule
;
459 /* Insert into Action and policer RAMs now */
460 ret
= bcm_sf2_cfp_act_pol_set(priv
, rule_index
, port
, port_num
,
463 goto out_err_flow_rule
;
465 /* Turn on CFP for this rule now */
466 reg
= core_readl(priv
, CORE_CFP_CTL_REG
);
468 core_writel(priv
, reg
, CORE_CFP_CTL_REG
);
470 /* Flag the rule as being used and return it */
471 set_bit(rule_index
, priv
->cfp
.used
);
472 set_bit(rule_index
, priv
->cfp
.unique
);
473 fs
->location
= rule_index
;
478 ethtool_rx_flow_rule_destroy(flow
);
482 static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv
*priv
,
483 const __be32
*ip6_addr
, const __be16 port
,
484 const __be16 vlan_tci
,
485 unsigned int slice_num
, u32 udf_bits
,
488 u32 reg
, tmp
, val
, offset
;
490 /* UDF_Valid[7:0] [31:24]
494 reg
= udf_bits
<< 24 | be16_to_cpu(vlan_tci
) >> 8;
496 core_writel(priv
, reg
, CORE_CFP_MASK_PORT(5));
498 core_writel(priv
, reg
, CORE_CFP_DATA_PORT(5));
501 * UDF_n_B8 [23:8] (port)
502 * UDF_n_B7 (upper) [7:0] (addr[15:8])
504 reg
= be32_to_cpu(ip6_addr
[3]);
505 val
= (u32
)be16_to_cpu(port
) << 8 | ((reg
>> 8) & 0xff);
506 val
|= (u32
)(be16_to_cpu(vlan_tci
) & 0xff) << 24;
508 offset
= CORE_CFP_MASK_PORT(4);
510 offset
= CORE_CFP_DATA_PORT(4);
511 core_writel(priv
, val
, offset
);
513 /* UDF_n_B7 (lower) [31:24] (addr[7:0])
514 * UDF_n_B6 [23:8] (addr[31:16])
515 * UDF_n_B5 (upper) [7:0] (addr[47:40])
517 tmp
= be32_to_cpu(ip6_addr
[2]);
518 val
= (u32
)(reg
& 0xff) << 24 | (u32
)(reg
>> 16) << 8 |
521 offset
= CORE_CFP_MASK_PORT(3);
523 offset
= CORE_CFP_DATA_PORT(3);
524 core_writel(priv
, val
, offset
);
526 /* UDF_n_B5 (lower) [31:24] (addr[39:32])
527 * UDF_n_B4 [23:8] (addr[63:48])
528 * UDF_n_B3 (upper) [7:0] (addr[79:72])
530 reg
= be32_to_cpu(ip6_addr
[1]);
531 val
= (u32
)(tmp
& 0xff) << 24 | (u32
)(tmp
>> 16) << 8 |
534 offset
= CORE_CFP_MASK_PORT(2);
536 offset
= CORE_CFP_DATA_PORT(2);
537 core_writel(priv
, val
, offset
);
539 /* UDF_n_B3 (lower) [31:24] (addr[71:64])
540 * UDF_n_B2 [23:8] (addr[95:80])
541 * UDF_n_B1 (upper) [7:0] (addr[111:104])
543 tmp
= be32_to_cpu(ip6_addr
[0]);
544 val
= (u32
)(reg
& 0xff) << 24 | (u32
)(reg
>> 16) << 8 |
547 offset
= CORE_CFP_MASK_PORT(1);
549 offset
= CORE_CFP_DATA_PORT(1);
550 core_writel(priv
, val
, offset
);
552 /* UDF_n_B1 (lower) [31:24] (addr[103:96])
553 * UDF_n_B0 [23:8] (addr[127:112])
558 reg
= (u32
)(tmp
& 0xff) << 24 | (u32
)(tmp
>> 16) << 8 |
559 SLICE_NUM(slice_num
) | SLICE_VALID
;
561 offset
= CORE_CFP_MASK_PORT(0);
563 offset
= CORE_CFP_DATA_PORT(0);
564 core_writel(priv
, reg
, offset
);
567 static struct cfp_rule
*bcm_sf2_cfp_rule_find(struct bcm_sf2_priv
*priv
,
568 int port
, u32 location
)
570 struct cfp_rule
*rule
;
572 list_for_each_entry(rule
, &priv
->cfp
.rules_list
, next
) {
573 if (rule
->port
== port
&& rule
->fs
.location
== location
)
580 static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv
*priv
, int port
,
581 struct ethtool_rx_flow_spec
*fs
)
583 struct cfp_rule
*rule
= NULL
;
587 if (list_empty(&priv
->cfp
.rules_list
))
590 list_for_each_entry(rule
, &priv
->cfp
.rules_list
, next
) {
592 if (rule
->port
!= port
)
595 if (rule
->fs
.flow_type
!= fs
->flow_type
||
596 rule
->fs
.ring_cookie
!= fs
->ring_cookie
||
597 rule
->fs
.h_ext
.data
[0] != fs
->h_ext
.data
[0])
600 switch (fs
->flow_type
& ~FLOW_EXT
) {
603 fs_size
= sizeof(struct ethtool_tcpip6_spec
);
607 fs_size
= sizeof(struct ethtool_tcpip4_spec
);
613 ret
= memcmp(&rule
->fs
.h_u
, &fs
->h_u
, fs_size
);
614 ret
|= memcmp(&rule
->fs
.m_u
, &fs
->m_u
, fs_size
);
615 /* Compare VLAN TCI values as well */
616 if (rule
->fs
.flow_type
& FLOW_EXT
) {
617 ret
|= rule
->fs
.h_ext
.vlan_tci
!= fs
->h_ext
.vlan_tci
;
618 ret
|= rule
->fs
.m_ext
.vlan_tci
!= fs
->m_ext
.vlan_tci
;
627 static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv
*priv
, int port
,
628 unsigned int port_num
,
629 unsigned int queue_num
,
630 struct ethtool_rx_flow_spec
*fs
)
632 __be16 vlan_tci
= 0, vlan_m_tci
= htons(0xffff);
633 struct ethtool_rx_flow_spec_input input
= {};
634 unsigned int slice_num
, rule_index
[2];
635 const struct cfp_udf_layout
*layout
;
636 struct ethtool_rx_flow_rule
*flow
;
637 struct flow_match_ipv6_addrs ipv6
;
638 struct flow_match_ports ports
;
639 u8 ip_proto
, ip_frag
;
644 switch (fs
->flow_type
& ~FLOW_EXT
) {
646 ip_proto
= IPPROTO_TCP
;
649 ip_proto
= IPPROTO_UDP
;
655 ip_frag
= !!(be32_to_cpu(fs
->h_ext
.data
[0]) & 1);
657 /* Extract VLAN TCI */
658 if (fs
->flow_type
& FLOW_EXT
) {
659 vlan_tci
= fs
->h_ext
.vlan_tci
;
660 vlan_m_tci
= fs
->m_ext
.vlan_tci
;
663 layout
= &udf_tcpip6_layout
;
664 slice_num
= bcm_sf2_get_slice_number(layout
, 0);
665 if (slice_num
== UDF_NUM_SLICES
)
668 num_udf
= bcm_sf2_get_num_udf_slices(layout
->udfs
[slice_num
].slices
);
670 /* Negotiate two indexes, one for the second half which we are chained
671 * from, which is what we will return to user-space, and a second one
672 * which is used to store its first half. That first half does not
673 * allow any choice of placement, so it just needs to find the next
674 * available bit. We return the second half as fs->location because
675 * that helps with the rule lookup later on since the second half is
676 * chained from its first half, we can easily identify IPv6 CFP rules
677 * by looking whether they carry a CHAIN_ID.
679 * We also want the second half to have a lower rule_index than its
680 * first half because the HW search is by incrementing addresses.
682 if (fs
->location
== RX_CLS_LOC_ANY
)
683 rule_index
[1] = find_first_zero_bit(priv
->cfp
.used
,
684 priv
->num_cfp_rules
);
686 rule_index
[1] = fs
->location
;
687 if (rule_index
[1] > bcm_sf2_cfp_rule_size(priv
))
690 /* Flag it as used (cleared on error path) such that we can immediately
691 * obtain a second one to chain from.
693 set_bit(rule_index
[1], priv
->cfp
.used
);
695 rule_index
[0] = find_first_zero_bit(priv
->cfp
.used
,
696 priv
->num_cfp_rules
);
697 if (rule_index
[0] > bcm_sf2_cfp_rule_size(priv
)) {
703 flow
= ethtool_rx_flow_rule_create(&input
);
708 flow_rule_match_ipv6_addrs(flow
->rule
, &ipv6
);
709 flow_rule_match_ports(flow
->rule
, &ports
);
711 /* Apply the UDF layout for this filter */
712 bcm_sf2_cfp_udf_set(priv
, layout
, slice_num
);
714 /* Apply to all packets received through this port */
715 core_writel(priv
, BIT(port
), CORE_CFP_DATA_PORT(7));
717 /* Source port map match */
718 core_writel(priv
, 0xff, CORE_CFP_MASK_PORT(7));
720 /* S-Tag status [31:30]
721 * C-Tag status [29:28]
734 reg
= 1 << L3_FRAMING_SHIFT
| ip_proto
<< IPPROTO_SHIFT
|
735 ip_frag
<< IP_FRAG_SHIFT
| udf_upper_bits(num_udf
);
736 core_writel(priv
, reg
, CORE_CFP_DATA_PORT(6));
738 /* Mask with the specific layout for IPv6 packets including
741 reg
= layout
->udfs
[slice_num
].mask_value
| udf_upper_bits(num_udf
);
742 core_writel(priv
, reg
, CORE_CFP_MASK_PORT(6));
744 /* Slice the IPv6 source address and port */
745 bcm_sf2_cfp_slice_ipv6(priv
, ipv6
.key
->src
.in6_u
.u6_addr32
,
746 ports
.key
->src
, vlan_tci
, slice_num
,
747 udf_lower_bits(num_udf
), false);
748 bcm_sf2_cfp_slice_ipv6(priv
, ipv6
.mask
->src
.in6_u
.u6_addr32
,
749 ports
.mask
->src
, vlan_m_tci
, SLICE_NUM_MASK
,
750 udf_lower_bits(num_udf
), true);
752 /* Insert into TCAM now because we need to insert a second rule */
753 bcm_sf2_cfp_rule_addr_set(priv
, rule_index
[0]);
755 ret
= bcm_sf2_cfp_op(priv
, OP_SEL_WRITE
| TCAM_SEL
);
757 pr_err("TCAM entry at addr %d failed\n", rule_index
[0]);
758 goto out_err_flow_rule
;
761 /* Insert into Action and policer RAMs now */
762 ret
= bcm_sf2_cfp_act_pol_set(priv
, rule_index
[0], port
, port_num
,
765 goto out_err_flow_rule
;
767 /* Now deal with the second slice to chain this rule */
768 slice_num
= bcm_sf2_get_slice_number(layout
, slice_num
+ 1);
769 if (slice_num
== UDF_NUM_SLICES
) {
771 goto out_err_flow_rule
;
774 num_udf
= bcm_sf2_get_num_udf_slices(layout
->udfs
[slice_num
].slices
);
776 /* Apply the UDF layout for this filter */
777 bcm_sf2_cfp_udf_set(priv
, layout
, slice_num
);
779 /* Chained rule, source port match is coming from the rule we are
782 core_writel(priv
, 0, CORE_CFP_DATA_PORT(7));
783 core_writel(priv
, 0, CORE_CFP_MASK_PORT(7));
786 * CHAIN ID [31:24] chain to previous slice
788 * UDF_Valid[11:8] [19:16]
789 * UDF_Valid[7:0] [15:8]
792 reg
= rule_index
[0] << 24 | udf_upper_bits(num_udf
) << 16 |
793 udf_lower_bits(num_udf
) << 8;
794 core_writel(priv
, reg
, CORE_CFP_DATA_PORT(6));
796 /* Mask all except chain ID, UDF Valid[8] and UDF Valid[7:0] */
797 reg
= XCESS_ADDR_MASK
<< 24 | udf_upper_bits(num_udf
) << 16 |
798 udf_lower_bits(num_udf
) << 8;
799 core_writel(priv
, reg
, CORE_CFP_MASK_PORT(6));
801 bcm_sf2_cfp_slice_ipv6(priv
, ipv6
.key
->dst
.in6_u
.u6_addr32
,
802 ports
.key
->dst
, 0, slice_num
,
804 bcm_sf2_cfp_slice_ipv6(priv
, ipv6
.mask
->dst
.in6_u
.u6_addr32
,
805 ports
.key
->dst
, 0, SLICE_NUM_MASK
,
808 /* Insert into TCAM now */
809 bcm_sf2_cfp_rule_addr_set(priv
, rule_index
[1]);
811 ret
= bcm_sf2_cfp_op(priv
, OP_SEL_WRITE
| TCAM_SEL
);
813 pr_err("TCAM entry at addr %d failed\n", rule_index
[1]);
814 goto out_err_flow_rule
;
817 /* Insert into Action and policer RAMs now, set chain ID to
818 * the one we are chained to
820 ret
= bcm_sf2_cfp_act_pol_set(priv
, rule_index
[1], port
, port_num
,
823 goto out_err_flow_rule
;
825 /* Turn on CFP for this rule now */
826 reg
= core_readl(priv
, CORE_CFP_CTL_REG
);
828 core_writel(priv
, reg
, CORE_CFP_CTL_REG
);
830 /* Flag the second half rule as being used now, return it as the
831 * location, and flag it as unique while dumping rules
833 set_bit(rule_index
[0], priv
->cfp
.used
);
834 set_bit(rule_index
[1], priv
->cfp
.unique
);
835 fs
->location
= rule_index
[1];
840 ethtool_rx_flow_rule_destroy(flow
);
842 clear_bit(rule_index
[1], priv
->cfp
.used
);
846 static int bcm_sf2_cfp_rule_insert(struct dsa_switch
*ds
, int port
,
847 struct ethtool_rx_flow_spec
*fs
)
849 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
850 s8 cpu_port
= dsa_to_port(ds
, port
)->cpu_dp
->index
;
851 __u64 ring_cookie
= fs
->ring_cookie
;
852 struct switchdev_obj_port_vlan vlan
;
853 unsigned int queue_num
, port_num
;
857 /* This rule is a Wake-on-LAN filter and we must specifically
858 * target the CPU port in order for it to be working.
860 if (ring_cookie
== RX_CLS_FLOW_WAKE
)
861 ring_cookie
= cpu_port
* SF2_NUM_EGRESS_QUEUES
;
863 /* We do not support discarding packets, check that the
864 * destination port is enabled and that we are within the
865 * number of ports supported by the switch
867 port_num
= ring_cookie
/ SF2_NUM_EGRESS_QUEUES
;
869 if (ring_cookie
== RX_CLS_FLOW_DISC
||
870 !(dsa_is_user_port(ds
, port_num
) ||
871 dsa_is_cpu_port(ds
, port_num
)) ||
872 port_num
>= priv
->hw_params
.num_ports
)
875 /* If the rule is matching a particular VLAN, make sure that we honor
876 * the matching and have it tagged or untagged on the destination port,
877 * we do this on egress with a VLAN entry. The egress tagging attribute
878 * is expected to be provided in h_ext.data[1] bit 0. A 1 means untagged,
881 if (fs
->flow_type
& FLOW_EXT
) {
882 /* We cannot support matching multiple VLAN IDs yet */
883 if ((be16_to_cpu(fs
->m_ext
.vlan_tci
) & VLAN_VID_MASK
) !=
887 vid
= be16_to_cpu(fs
->h_ext
.vlan_tci
) & VLAN_VID_MASK
;
889 if (be32_to_cpu(fs
->h_ext
.data
[1]) & 1)
890 vlan
.flags
= BRIDGE_VLAN_INFO_UNTAGGED
;
894 ret
= ds
->ops
->port_vlan_add(ds
, port_num
, &vlan
, NULL
);
900 * We have a small oddity where Port 6 just does not have a
901 * valid bit here (so we substract by one).
903 queue_num
= ring_cookie
% SF2_NUM_EGRESS_QUEUES
;
907 switch (fs
->flow_type
& ~FLOW_EXT
) {
910 ret
= bcm_sf2_cfp_ipv4_rule_set(priv
, port
, port_num
,
915 ret
= bcm_sf2_cfp_ipv6_rule_set(priv
, port
, port_num
,
926 static int bcm_sf2_cfp_rule_set(struct dsa_switch
*ds
, int port
,
927 struct ethtool_rx_flow_spec
*fs
)
929 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
930 struct cfp_rule
*rule
= NULL
;
933 /* Check for unsupported extensions */
934 if (fs
->flow_type
& FLOW_MAC_EXT
)
937 if (fs
->location
!= RX_CLS_LOC_ANY
&&
938 fs
->location
> bcm_sf2_cfp_rule_size(priv
))
941 if ((fs
->flow_type
& FLOW_EXT
) &&
942 !(ds
->ops
->port_vlan_add
|| ds
->ops
->port_vlan_del
))
945 if (fs
->location
!= RX_CLS_LOC_ANY
&&
946 test_bit(fs
->location
, priv
->cfp
.used
))
949 ret
= bcm_sf2_cfp_rule_cmp(priv
, port
, fs
);
953 rule
= kzalloc(sizeof(*rule
), GFP_KERNEL
);
957 ret
= bcm_sf2_cfp_rule_insert(ds
, port
, fs
);
964 memcpy(&rule
->fs
, fs
, sizeof(*fs
));
965 list_add_tail(&rule
->next
, &priv
->cfp
.rules_list
);
970 static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv
*priv
, int port
,
971 u32 loc
, u32
*next_loc
)
976 /* Indicate which rule we want to read */
977 bcm_sf2_cfp_rule_addr_set(priv
, loc
);
979 ret
= bcm_sf2_cfp_op(priv
, OP_SEL_READ
| TCAM_SEL
);
983 /* Check if this is possibly an IPv6 rule that would
984 * indicate we need to delete its companion rule
987 reg
= core_readl(priv
, CORE_CFP_DATA_PORT(6));
989 *next_loc
= (reg
>> 24) & CHAIN_ID_MASK
;
991 /* Clear its valid bits */
992 reg
= core_readl(priv
, CORE_CFP_DATA_PORT(0));
994 core_writel(priv
, reg
, CORE_CFP_DATA_PORT(0));
996 /* Write back this entry into the TCAM now */
997 ret
= bcm_sf2_cfp_op(priv
, OP_SEL_WRITE
| TCAM_SEL
);
1001 clear_bit(loc
, priv
->cfp
.used
);
1002 clear_bit(loc
, priv
->cfp
.unique
);
1007 static int bcm_sf2_cfp_rule_remove(struct bcm_sf2_priv
*priv
, int port
,
1013 ret
= bcm_sf2_cfp_rule_del_one(priv
, port
, loc
, &next_loc
);
1017 /* If this was an IPv6 rule, delete is companion rule too */
1019 ret
= bcm_sf2_cfp_rule_del_one(priv
, port
, next_loc
, NULL
);
1024 static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv
*priv
, int port
, u32 loc
)
1026 struct cfp_rule
*rule
;
1029 if (loc
> bcm_sf2_cfp_rule_size(priv
))
1032 /* Refuse deleting unused rules, and those that are not unique since
1033 * that could leave IPv6 rules with one of the chained rule in the
1036 if (!test_bit(loc
, priv
->cfp
.unique
) || loc
== 0)
1039 rule
= bcm_sf2_cfp_rule_find(priv
, port
, loc
);
1043 ret
= bcm_sf2_cfp_rule_remove(priv
, port
, loc
);
1045 list_del(&rule
->next
);
1051 static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec
*flow
)
1055 for (i
= 0; i
< sizeof(flow
->m_u
); i
++)
1056 flow
->m_u
.hdata
[i
] ^= 0xff;
1058 flow
->m_ext
.vlan_etype
^= cpu_to_be16(~0);
1059 flow
->m_ext
.vlan_tci
^= cpu_to_be16(~0);
1060 flow
->m_ext
.data
[0] ^= cpu_to_be32(~0);
1061 flow
->m_ext
.data
[1] ^= cpu_to_be32(~0);
1064 static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv
*priv
, int port
,
1065 struct ethtool_rxnfc
*nfc
)
1067 struct cfp_rule
*rule
;
1069 rule
= bcm_sf2_cfp_rule_find(priv
, port
, nfc
->fs
.location
);
1073 memcpy(&nfc
->fs
, &rule
->fs
, sizeof(rule
->fs
));
1075 bcm_sf2_invert_masks(&nfc
->fs
);
1077 /* Put the TCAM size here */
1078 nfc
->data
= bcm_sf2_cfp_rule_size(priv
);
1083 /* We implement the search doing a TCAM search operation */
1084 static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv
*priv
,
1085 int port
, struct ethtool_rxnfc
*nfc
,
1088 unsigned int index
= 1, rules_cnt
= 0;
1090 for_each_set_bit_from(index
, priv
->cfp
.unique
, priv
->num_cfp_rules
) {
1091 rule_locs
[rules_cnt
] = index
;
1095 /* Put the TCAM size here */
1096 nfc
->data
= bcm_sf2_cfp_rule_size(priv
);
1097 nfc
->rule_cnt
= rules_cnt
;
1102 int bcm_sf2_get_rxnfc(struct dsa_switch
*ds
, int port
,
1103 struct ethtool_rxnfc
*nfc
, u32
*rule_locs
)
1105 struct net_device
*p
= dsa_port_to_conduit(dsa_to_port(ds
, port
));
1106 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
1109 mutex_lock(&priv
->cfp
.lock
);
1112 case ETHTOOL_GRXCLSRLCNT
:
1113 /* Subtract the default, unusable rule */
1114 nfc
->rule_cnt
= bitmap_weight(priv
->cfp
.unique
,
1115 priv
->num_cfp_rules
) - 1;
1116 /* We support specifying rule locations */
1117 nfc
->data
|= RX_CLS_LOC_SPECIAL
;
1119 case ETHTOOL_GRXCLSRULE
:
1120 ret
= bcm_sf2_cfp_rule_get(priv
, port
, nfc
);
1122 case ETHTOOL_GRXCLSRLALL
:
1123 ret
= bcm_sf2_cfp_rule_get_all(priv
, port
, nfc
, rule_locs
);
1130 mutex_unlock(&priv
->cfp
.lock
);
1135 /* Pass up the commands to the attached master network device */
1136 if (p
->ethtool_ops
->get_rxnfc
) {
1137 ret
= p
->ethtool_ops
->get_rxnfc(p
, nfc
, rule_locs
);
1138 if (ret
== -EOPNOTSUPP
)
1145 int bcm_sf2_set_rxnfc(struct dsa_switch
*ds
, int port
,
1146 struct ethtool_rxnfc
*nfc
)
1148 struct net_device
*p
= dsa_port_to_conduit(dsa_to_port(ds
, port
));
1149 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
1152 mutex_lock(&priv
->cfp
.lock
);
1155 case ETHTOOL_SRXCLSRLINS
:
1156 ret
= bcm_sf2_cfp_rule_set(ds
, port
, &nfc
->fs
);
1159 case ETHTOOL_SRXCLSRLDEL
:
1160 ret
= bcm_sf2_cfp_rule_del(priv
, port
, nfc
->fs
.location
);
1167 mutex_unlock(&priv
->cfp
.lock
);
1172 /* Pass up the commands to the attached master network device.
1173 * This can fail, so rollback the operation if we need to.
1175 if (p
->ethtool_ops
->set_rxnfc
) {
1176 ret
= p
->ethtool_ops
->set_rxnfc(p
, nfc
);
1177 if (ret
&& ret
!= -EOPNOTSUPP
) {
1178 mutex_lock(&priv
->cfp
.lock
);
1179 bcm_sf2_cfp_rule_del(priv
, port
, nfc
->fs
.location
);
1180 mutex_unlock(&priv
->cfp
.lock
);
1189 int bcm_sf2_cfp_rst(struct bcm_sf2_priv
*priv
)
1191 unsigned int timeout
= 1000;
1194 reg
= core_readl(priv
, CORE_CFP_ACC
);
1196 core_writel(priv
, reg
, CORE_CFP_ACC
);
1199 reg
= core_readl(priv
, CORE_CFP_ACC
);
1200 if (!(reg
& TCAM_RESET
))
1204 } while (timeout
--);
1212 void bcm_sf2_cfp_exit(struct dsa_switch
*ds
)
1214 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
1215 struct cfp_rule
*rule
, *n
;
1217 if (list_empty(&priv
->cfp
.rules_list
))
1220 list_for_each_entry_safe_reverse(rule
, n
, &priv
->cfp
.rules_list
, next
)
1221 bcm_sf2_cfp_rule_del(priv
, rule
->port
, rule
->fs
.location
);
1224 int bcm_sf2_cfp_resume(struct dsa_switch
*ds
)
1226 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
1227 struct cfp_rule
*rule
;
1231 if (list_empty(&priv
->cfp
.rules_list
))
1234 reg
= core_readl(priv
, CORE_CFP_CTL_REG
);
1235 reg
&= ~CFP_EN_MAP_MASK
;
1236 core_writel(priv
, reg
, CORE_CFP_CTL_REG
);
1238 ret
= bcm_sf2_cfp_rst(priv
);
1242 list_for_each_entry(rule
, &priv
->cfp
.rules_list
, next
) {
1243 ret
= bcm_sf2_cfp_rule_remove(priv
, rule
->port
,
1246 dev_err(ds
->dev
, "failed to remove rule\n");
1250 ret
= bcm_sf2_cfp_rule_insert(ds
, rule
->port
, &rule
->fs
);
1252 dev_err(ds
->dev
, "failed to restore rule\n");
1260 static const struct bcm_sf2_cfp_stat
{
1261 unsigned int offset
;
1262 unsigned int ram_loc
;
1264 } bcm_sf2_cfp_stats
[] = {
1266 .offset
= CORE_STAT_GREEN_CNTR
,
1267 .ram_loc
= GREEN_STAT_RAM
,
1271 .offset
= CORE_STAT_YELLOW_CNTR
,
1272 .ram_loc
= YELLOW_STAT_RAM
,
1276 .offset
= CORE_STAT_RED_CNTR
,
1277 .ram_loc
= RED_STAT_RAM
,
1282 void bcm_sf2_cfp_get_strings(struct dsa_switch
*ds
, int port
, u32 stringset
,
1285 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
1288 if (stringset
!= ETH_SS_STATS
)
1291 for (i
= 1; i
< priv
->num_cfp_rules
; i
++)
1292 for (j
= 0; j
< ARRAY_SIZE(bcm_sf2_cfp_stats
); j
++)
1293 ethtool_sprintf(data
, "CFP%03d_%sCntr", i
,
1294 bcm_sf2_cfp_stats
[j
].name
);
1297 void bcm_sf2_cfp_get_ethtool_stats(struct dsa_switch
*ds
, int port
,
1300 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
1301 unsigned int s
= ARRAY_SIZE(bcm_sf2_cfp_stats
);
1302 const struct bcm_sf2_cfp_stat
*stat
;
1303 unsigned int i
, j
, iter
;
1304 struct cfp_rule
*rule
;
1307 mutex_lock(&priv
->cfp
.lock
);
1308 for (i
= 1; i
< priv
->num_cfp_rules
; i
++) {
1309 rule
= bcm_sf2_cfp_rule_find(priv
, port
, i
);
1313 for (j
= 0; j
< s
; j
++) {
1314 stat
= &bcm_sf2_cfp_stats
[j
];
1316 bcm_sf2_cfp_rule_addr_set(priv
, i
);
1317 ret
= bcm_sf2_cfp_op(priv
, stat
->ram_loc
| OP_SEL_READ
);
1321 iter
= (i
- 1) * s
+ j
;
1322 data
[iter
] = core_readl(priv
, stat
->offset
);
1326 mutex_unlock(&priv
->cfp
.lock
);
1329 int bcm_sf2_cfp_get_sset_count(struct dsa_switch
*ds
, int port
, int sset
)
1331 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
1333 if (sset
!= ETH_SS_STATS
)
1336 /* 3 counters per CFP rules */
1337 return (priv
->num_cfp_rules
- 1) * ARRAY_SIZE(bcm_sf2_cfp_stats
);