2 * Broadcom Starfighter 2 DSA switch CFP support
4 * Copyright (C) 2016, Broadcom
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/list.h>
13 #include <linux/ethtool.h>
14 #include <linux/if_ether.h>
16 #include <linux/netdevice.h>
18 #include <linux/bitmap.h>
21 #include "bcm_sf2_regs.h"
23 struct cfp_udf_slice_layout
{
24 u8 slices
[UDFS_PER_SLICE
];
29 struct cfp_udf_layout
{
30 struct cfp_udf_slice_layout udfs
[UDF_NUM_SLICES
];
33 static const u8 zero_slice
[UDFS_PER_SLICE
] = { };
35 /* UDF slices layout for a TCPv4/UDPv4 specification */
36 static const struct cfp_udf_layout udf_tcpip4_layout
= {
40 /* End of L2, byte offset 12, src IP[0:15] */
42 /* End of L2, byte offset 14, src IP[16:31] */
44 /* End of L2, byte offset 16, dst IP[0:15] */
46 /* End of L2, byte offset 18, dst IP[16:31] */
48 /* End of L3, byte offset 0, src port */
50 /* End of L3, byte offset 2, dst port */
54 .mask_value
= L3_FRAMING_MASK
| IPPROTO_MASK
| IP_FRAG
,
55 .base_offset
= CORE_UDF_0_A_0_8_PORT_0
+ UDF_SLICE_OFFSET
,
60 /* UDF slices layout for a TCPv6/UDPv6 specification */
61 static const struct cfp_udf_layout udf_tcpip6_layout
= {
65 /* End of L2, byte offset 8, src IP[0:15] */
67 /* End of L2, byte offset 10, src IP[16:31] */
69 /* End of L2, byte offset 12, src IP[32:47] */
71 /* End of L2, byte offset 14, src IP[48:63] */
73 /* End of L2, byte offset 16, src IP[64:79] */
75 /* End of L2, byte offset 18, src IP[80:95] */
77 /* End of L2, byte offset 20, src IP[96:111] */
79 /* End of L2, byte offset 22, src IP[112:127] */
81 /* End of L3, byte offset 0, src port */
84 .mask_value
= L3_FRAMING_MASK
| IPPROTO_MASK
| IP_FRAG
,
85 .base_offset
= CORE_UDF_0_B_0_8_PORT_0
,
89 /* End of L2, byte offset 24, dst IP[0:15] */
91 /* End of L2, byte offset 26, dst IP[16:31] */
93 /* End of L2, byte offset 28, dst IP[32:47] */
95 /* End of L2, byte offset 30, dst IP[48:63] */
97 /* End of L2, byte offset 32, dst IP[64:79] */
99 /* End of L2, byte offset 34, dst IP[80:95] */
101 /* End of L2, byte offset 36, dst IP[96:111] */
103 /* End of L2, byte offset 38, dst IP[112:127] */
105 /* End of L3, byte offset 2, dst port */
108 .mask_value
= L3_FRAMING_MASK
| IPPROTO_MASK
| IP_FRAG
,
109 .base_offset
= CORE_UDF_0_D_0_11_PORT_0
,
114 static inline unsigned int bcm_sf2_get_num_udf_slices(const u8
*layout
)
116 unsigned int i
, count
= 0;
118 for (i
= 0; i
< UDFS_PER_SLICE
; i
++) {
126 static inline u32
udf_upper_bits(unsigned int num_udf
)
128 return GENMASK(num_udf
- 1, 0) >> (UDFS_PER_SLICE
- 1);
131 static inline u32
udf_lower_bits(unsigned int num_udf
)
133 return (u8
)GENMASK(num_udf
- 1, 0);
136 static unsigned int bcm_sf2_get_slice_number(const struct cfp_udf_layout
*l
,
139 const struct cfp_udf_slice_layout
*slice_layout
;
140 unsigned int slice_idx
;
142 for (slice_idx
= start
; slice_idx
< UDF_NUM_SLICES
; slice_idx
++) {
143 slice_layout
= &l
->udfs
[slice_idx
];
144 if (memcmp(slice_layout
->slices
, zero_slice
,
152 static void bcm_sf2_cfp_udf_set(struct bcm_sf2_priv
*priv
,
153 const struct cfp_udf_layout
*layout
,
154 unsigned int slice_num
)
156 u32 offset
= layout
->udfs
[slice_num
].base_offset
;
159 for (i
= 0; i
< UDFS_PER_SLICE
; i
++)
160 core_writel(priv
, layout
->udfs
[slice_num
].slices
[i
],
164 static int bcm_sf2_cfp_op(struct bcm_sf2_priv
*priv
, unsigned int op
)
166 unsigned int timeout
= 1000;
169 reg
= core_readl(priv
, CORE_CFP_ACC
);
170 reg
&= ~(OP_SEL_MASK
| RAM_SEL_MASK
);
171 reg
|= OP_STR_DONE
| op
;
172 core_writel(priv
, reg
, CORE_CFP_ACC
);
175 reg
= core_readl(priv
, CORE_CFP_ACC
);
176 if (!(reg
& OP_STR_DONE
))
188 static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv
*priv
,
193 WARN_ON(addr
>= priv
->num_cfp_rules
);
195 reg
= core_readl(priv
, CORE_CFP_ACC
);
196 reg
&= ~(XCESS_ADDR_MASK
<< XCESS_ADDR_SHIFT
);
197 reg
|= addr
<< XCESS_ADDR_SHIFT
;
198 core_writel(priv
, reg
, CORE_CFP_ACC
);
201 static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv
*priv
)
203 /* Entry #0 is reserved */
204 return priv
->num_cfp_rules
- 1;
207 static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv
*priv
,
208 unsigned int rule_index
,
209 unsigned int port_num
,
210 unsigned int queue_num
,
216 /* Replace ARL derived destination with DST_MAP derived, define
217 * which port and queue this should be forwarded to.
220 reg
= CHANGE_FWRD_MAP_IB_REP_ARL
|
221 BIT(port_num
+ DST_MAP_IB_SHIFT
) |
222 CHANGE_TC
| queue_num
<< NEW_TC_SHIFT
;
226 core_writel(priv
, reg
, CORE_ACT_POL_DATA0
);
228 /* Set classification ID that needs to be put in Broadcom tag */
229 core_writel(priv
, rule_index
<< CHAIN_ID_SHIFT
, CORE_ACT_POL_DATA1
);
231 core_writel(priv
, 0, CORE_ACT_POL_DATA2
);
233 /* Configure policer RAM now */
234 ret
= bcm_sf2_cfp_op(priv
, OP_SEL_WRITE
| ACT_POL_RAM
);
236 pr_err("Policer entry at %d failed\n", rule_index
);
240 /* Disable the policer */
241 core_writel(priv
, POLICER_MODE_DISABLE
, CORE_RATE_METER0
);
243 /* Now the rate meter */
244 ret
= bcm_sf2_cfp_op(priv
, OP_SEL_WRITE
| RATE_METER_RAM
);
246 pr_err("Meter entry at %d failed\n", rule_index
);
253 static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv
*priv
,
254 struct ethtool_tcpip4_spec
*v4_spec
,
255 unsigned int slice_num
,
266 offset
= CORE_CFP_MASK_PORT(4);
268 offset
= CORE_CFP_DATA_PORT(4);
269 core_writel(priv
, reg
, offset
);
275 reg
= be16_to_cpu(v4_spec
->pdst
) >> 8;
277 offset
= CORE_CFP_MASK_PORT(3);
279 offset
= CORE_CFP_DATA_PORT(3);
280 core_writel(priv
, reg
, offset
);
286 reg
= (be16_to_cpu(v4_spec
->pdst
) & 0xff) << 24 |
287 (u32
)be16_to_cpu(v4_spec
->psrc
) << 8 |
288 (be32_to_cpu(v4_spec
->ip4dst
) & 0x0000ff00) >> 8;
290 offset
= CORE_CFP_MASK_PORT(2);
292 offset
= CORE_CFP_DATA_PORT(2);
293 core_writel(priv
, reg
, offset
);
299 reg
= (u32
)(be32_to_cpu(v4_spec
->ip4dst
) & 0xff) << 24 |
300 (u32
)(be32_to_cpu(v4_spec
->ip4dst
) >> 16) << 8 |
301 (be32_to_cpu(v4_spec
->ip4src
) & 0x0000ff00) >> 8;
303 offset
= CORE_CFP_MASK_PORT(1);
305 offset
= CORE_CFP_DATA_PORT(1);
306 core_writel(priv
, reg
, offset
);
314 reg
= (u32
)(be32_to_cpu(v4_spec
->ip4src
) & 0xff) << 24 |
315 (u32
)(be32_to_cpu(v4_spec
->ip4src
) >> 16) << 8 |
316 SLICE_NUM(slice_num
) | SLICE_VALID
;
318 offset
= CORE_CFP_MASK_PORT(0);
320 offset
= CORE_CFP_DATA_PORT(0);
321 core_writel(priv
, reg
, offset
);
324 static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv
*priv
, int port
,
325 unsigned int port_num
,
326 unsigned int queue_num
,
327 struct ethtool_rx_flow_spec
*fs
)
329 struct ethtool_tcpip4_spec
*v4_spec
, *v4_m_spec
;
330 const struct cfp_udf_layout
*layout
;
331 unsigned int slice_num
, rule_index
;
332 u8 ip_proto
, ip_frag
;
337 switch (fs
->flow_type
& ~FLOW_EXT
) {
339 ip_proto
= IPPROTO_TCP
;
340 v4_spec
= &fs
->h_u
.tcp_ip4_spec
;
341 v4_m_spec
= &fs
->m_u
.tcp_ip4_spec
;
344 ip_proto
= IPPROTO_UDP
;
345 v4_spec
= &fs
->h_u
.udp_ip4_spec
;
346 v4_m_spec
= &fs
->m_u
.udp_ip4_spec
;
352 ip_frag
= be32_to_cpu(fs
->m_ext
.data
[0]);
354 /* Locate the first rule available */
355 if (fs
->location
== RX_CLS_LOC_ANY
)
356 rule_index
= find_first_zero_bit(priv
->cfp
.used
,
357 bcm_sf2_cfp_rule_size(priv
));
359 rule_index
= fs
->location
;
361 layout
= &udf_tcpip4_layout
;
362 /* We only use one UDF slice for now */
363 slice_num
= bcm_sf2_get_slice_number(layout
, 0);
364 if (slice_num
== UDF_NUM_SLICES
)
367 num_udf
= bcm_sf2_get_num_udf_slices(layout
->udfs
[slice_num
].slices
);
369 /* Apply the UDF layout for this filter */
370 bcm_sf2_cfp_udf_set(priv
, layout
, slice_num
);
372 /* Apply to all packets received through this port */
373 core_writel(priv
, BIT(port
), CORE_CFP_DATA_PORT(7));
375 /* Source port map match */
376 core_writel(priv
, 0xff, CORE_CFP_MASK_PORT(7));
378 /* S-Tag status [31:30]
379 * C-Tag status [29:28]
392 core_writel(priv
, v4_spec
->tos
<< IPTOS_SHIFT
|
393 ip_proto
<< IPPROTO_SHIFT
| ip_frag
<< IP_FRAG_SHIFT
|
394 udf_upper_bits(num_udf
),
395 CORE_CFP_DATA_PORT(6));
397 /* Mask with the specific layout for IPv4 packets */
398 core_writel(priv
, layout
->udfs
[slice_num
].mask_value
|
399 udf_upper_bits(num_udf
), CORE_CFP_MASK_PORT(6));
401 /* UDF_Valid[7:0] [31:24]
405 core_writel(priv
, udf_lower_bits(num_udf
) << 24, CORE_CFP_DATA_PORT(5));
407 /* Mask all but valid UDFs */
408 core_writel(priv
, udf_lower_bits(num_udf
) << 24, CORE_CFP_MASK_PORT(5));
410 /* Program the match and the mask */
411 bcm_sf2_cfp_slice_ipv4(priv
, v4_spec
, slice_num
, false);
412 bcm_sf2_cfp_slice_ipv4(priv
, v4_m_spec
, SLICE_NUM_MASK
, true);
414 /* Insert into TCAM now */
415 bcm_sf2_cfp_rule_addr_set(priv
, rule_index
);
417 ret
= bcm_sf2_cfp_op(priv
, OP_SEL_WRITE
| TCAM_SEL
);
419 pr_err("TCAM entry at addr %d failed\n", rule_index
);
423 /* Insert into Action and policer RAMs now */
424 ret
= bcm_sf2_cfp_act_pol_set(priv
, rule_index
, port_num
,
429 /* Turn on CFP for this rule now */
430 reg
= core_readl(priv
, CORE_CFP_CTL_REG
);
432 core_writel(priv
, reg
, CORE_CFP_CTL_REG
);
434 /* Flag the rule as being used and return it */
435 set_bit(rule_index
, priv
->cfp
.used
);
436 set_bit(rule_index
, priv
->cfp
.unique
);
437 fs
->location
= rule_index
;
442 static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv
*priv
,
443 const __be32
*ip6_addr
, const __be16 port
,
444 unsigned int slice_num
,
447 u32 reg
, tmp
, val
, offset
;
450 * UDF_n_B8 [23:8] (port)
451 * UDF_n_B7 (upper) [7:0] (addr[15:8])
453 reg
= be32_to_cpu(ip6_addr
[3]);
454 val
= (u32
)be16_to_cpu(port
) << 8 | ((reg
>> 8) & 0xff);
456 offset
= CORE_CFP_MASK_PORT(4);
458 offset
= CORE_CFP_DATA_PORT(4);
459 core_writel(priv
, val
, offset
);
461 /* UDF_n_B7 (lower) [31:24] (addr[7:0])
462 * UDF_n_B6 [23:8] (addr[31:16])
463 * UDF_n_B5 (upper) [7:0] (addr[47:40])
465 tmp
= be32_to_cpu(ip6_addr
[2]);
466 val
= (u32
)(reg
& 0xff) << 24 | (u32
)(reg
>> 16) << 8 |
469 offset
= CORE_CFP_MASK_PORT(3);
471 offset
= CORE_CFP_DATA_PORT(3);
472 core_writel(priv
, val
, offset
);
474 /* UDF_n_B5 (lower) [31:24] (addr[39:32])
475 * UDF_n_B4 [23:8] (addr[63:48])
476 * UDF_n_B3 (upper) [7:0] (addr[79:72])
478 reg
= be32_to_cpu(ip6_addr
[1]);
479 val
= (u32
)(tmp
& 0xff) << 24 | (u32
)(tmp
>> 16) << 8 |
482 offset
= CORE_CFP_MASK_PORT(2);
484 offset
= CORE_CFP_DATA_PORT(2);
485 core_writel(priv
, val
, offset
);
487 /* UDF_n_B3 (lower) [31:24] (addr[71:64])
488 * UDF_n_B2 [23:8] (addr[95:80])
489 * UDF_n_B1 (upper) [7:0] (addr[111:104])
491 tmp
= be32_to_cpu(ip6_addr
[0]);
492 val
= (u32
)(reg
& 0xff) << 24 | (u32
)(reg
>> 16) << 8 |
495 offset
= CORE_CFP_MASK_PORT(1);
497 offset
= CORE_CFP_DATA_PORT(1);
498 core_writel(priv
, val
, offset
);
500 /* UDF_n_B1 (lower) [31:24] (addr[103:96])
501 * UDF_n_B0 [23:8] (addr[127:112])
506 reg
= (u32
)(tmp
& 0xff) << 24 | (u32
)(tmp
>> 16) << 8 |
507 SLICE_NUM(slice_num
) | SLICE_VALID
;
509 offset
= CORE_CFP_MASK_PORT(0);
511 offset
= CORE_CFP_DATA_PORT(0);
512 core_writel(priv
, reg
, offset
);
515 static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv
*priv
, int port
,
516 unsigned int port_num
,
517 unsigned int queue_num
,
518 struct ethtool_rx_flow_spec
*fs
)
520 struct ethtool_tcpip6_spec
*v6_spec
, *v6_m_spec
;
521 unsigned int slice_num
, rule_index
[2];
522 const struct cfp_udf_layout
*layout
;
523 u8 ip_proto
, ip_frag
;
528 switch (fs
->flow_type
& ~FLOW_EXT
) {
530 ip_proto
= IPPROTO_TCP
;
531 v6_spec
= &fs
->h_u
.tcp_ip6_spec
;
532 v6_m_spec
= &fs
->m_u
.tcp_ip6_spec
;
535 ip_proto
= IPPROTO_UDP
;
536 v6_spec
= &fs
->h_u
.udp_ip6_spec
;
537 v6_m_spec
= &fs
->m_u
.udp_ip6_spec
;
543 ip_frag
= be32_to_cpu(fs
->m_ext
.data
[0]);
545 layout
= &udf_tcpip6_layout
;
546 slice_num
= bcm_sf2_get_slice_number(layout
, 0);
547 if (slice_num
== UDF_NUM_SLICES
)
550 num_udf
= bcm_sf2_get_num_udf_slices(layout
->udfs
[slice_num
].slices
);
552 /* Negotiate two indexes, one for the second half which we are chained
553 * from, which is what we will return to user-space, and a second one
554 * which is used to store its first half. That first half does not
555 * allow any choice of placement, so it just needs to find the next
556 * available bit. We return the second half as fs->location because
557 * that helps with the rule lookup later on since the second half is
558 * chained from its first half, we can easily identify IPv6 CFP rules
559 * by looking whether they carry a CHAIN_ID.
561 * We also want the second half to have a lower rule_index than its
562 * first half because the HW search is by incrementing addresses.
564 if (fs
->location
== RX_CLS_LOC_ANY
)
565 rule_index
[0] = find_first_zero_bit(priv
->cfp
.used
,
566 bcm_sf2_cfp_rule_size(priv
));
568 rule_index
[0] = fs
->location
;
570 /* Flag it as used (cleared on error path) such that we can immediately
571 * obtain a second one to chain from.
573 set_bit(rule_index
[0], priv
->cfp
.used
);
575 rule_index
[1] = find_first_zero_bit(priv
->cfp
.used
,
576 bcm_sf2_cfp_rule_size(priv
));
577 if (rule_index
[1] > bcm_sf2_cfp_rule_size(priv
)) {
582 /* Apply the UDF layout for this filter */
583 bcm_sf2_cfp_udf_set(priv
, layout
, slice_num
);
585 /* Apply to all packets received through this port */
586 core_writel(priv
, BIT(port
), CORE_CFP_DATA_PORT(7));
588 /* Source port map match */
589 core_writel(priv
, 0xff, CORE_CFP_MASK_PORT(7));
591 /* S-Tag status [31:30]
592 * C-Tag status [29:28]
605 reg
= 1 << L3_FRAMING_SHIFT
| ip_proto
<< IPPROTO_SHIFT
|
606 ip_frag
<< IP_FRAG_SHIFT
| udf_upper_bits(num_udf
);
607 core_writel(priv
, reg
, CORE_CFP_DATA_PORT(6));
609 /* Mask with the specific layout for IPv6 packets including
612 reg
= layout
->udfs
[slice_num
].mask_value
| udf_upper_bits(num_udf
);
613 core_writel(priv
, reg
, CORE_CFP_MASK_PORT(6));
615 /* UDF_Valid[7:0] [31:24]
619 core_writel(priv
, udf_lower_bits(num_udf
) << 24, CORE_CFP_DATA_PORT(5));
621 /* Mask all but valid UDFs */
622 core_writel(priv
, udf_lower_bits(num_udf
) << 24, CORE_CFP_MASK_PORT(5));
624 /* Slice the IPv6 source address and port */
625 bcm_sf2_cfp_slice_ipv6(priv
, v6_spec
->ip6src
, v6_spec
->psrc
,
627 bcm_sf2_cfp_slice_ipv6(priv
, v6_m_spec
->ip6src
, v6_m_spec
->psrc
,
628 SLICE_NUM_MASK
, true);
630 /* Insert into TCAM now because we need to insert a second rule */
631 bcm_sf2_cfp_rule_addr_set(priv
, rule_index
[0]);
633 ret
= bcm_sf2_cfp_op(priv
, OP_SEL_WRITE
| TCAM_SEL
);
635 pr_err("TCAM entry at addr %d failed\n", rule_index
[0]);
639 /* Insert into Action and policer RAMs now */
640 ret
= bcm_sf2_cfp_act_pol_set(priv
, rule_index
[0], port_num
,
645 /* Now deal with the second slice to chain this rule */
646 slice_num
= bcm_sf2_get_slice_number(layout
, slice_num
+ 1);
647 if (slice_num
== UDF_NUM_SLICES
) {
652 num_udf
= bcm_sf2_get_num_udf_slices(layout
->udfs
[slice_num
].slices
);
654 /* Apply the UDF layout for this filter */
655 bcm_sf2_cfp_udf_set(priv
, layout
, slice_num
);
657 /* Chained rule, source port match is coming from the rule we are
660 core_writel(priv
, 0, CORE_CFP_DATA_PORT(7));
661 core_writel(priv
, 0, CORE_CFP_MASK_PORT(7));
664 * CHAIN ID [31:24] chain to previous slice
666 * UDF_Valid[11:8] [19:16]
667 * UDF_Valid[7:0] [15:8]
670 reg
= rule_index
[0] << 24 | udf_upper_bits(num_udf
) << 16 |
671 udf_lower_bits(num_udf
) << 8;
672 core_writel(priv
, reg
, CORE_CFP_DATA_PORT(6));
674 /* Mask all except chain ID, UDF Valid[8] and UDF Valid[7:0] */
675 reg
= XCESS_ADDR_MASK
<< 24 | udf_upper_bits(num_udf
) << 16 |
676 udf_lower_bits(num_udf
) << 8;
677 core_writel(priv
, reg
, CORE_CFP_MASK_PORT(6));
680 core_writel(priv
, 0, CORE_CFP_DATA_PORT(5));
683 core_writel(priv
, 0, CORE_CFP_MASK_PORT(5));
685 bcm_sf2_cfp_slice_ipv6(priv
, v6_spec
->ip6dst
, v6_spec
->pdst
, slice_num
,
687 bcm_sf2_cfp_slice_ipv6(priv
, v6_m_spec
->ip6dst
, v6_m_spec
->pdst
,
688 SLICE_NUM_MASK
, true);
690 /* Insert into TCAM now */
691 bcm_sf2_cfp_rule_addr_set(priv
, rule_index
[1]);
693 ret
= bcm_sf2_cfp_op(priv
, OP_SEL_WRITE
| TCAM_SEL
);
695 pr_err("TCAM entry at addr %d failed\n", rule_index
[1]);
699 /* Insert into Action and policer RAMs now, set chain ID to
700 * the one we are chained to
702 ret
= bcm_sf2_cfp_act_pol_set(priv
, rule_index
[1], port_num
,
707 /* Turn on CFP for this rule now */
708 reg
= core_readl(priv
, CORE_CFP_CTL_REG
);
710 core_writel(priv
, reg
, CORE_CFP_CTL_REG
);
712 /* Flag the second half rule as being used now, return it as the
713 * location, and flag it as unique while dumping rules
715 set_bit(rule_index
[1], priv
->cfp
.used
);
716 set_bit(rule_index
[1], priv
->cfp
.unique
);
717 fs
->location
= rule_index
[1];
722 clear_bit(rule_index
[0], priv
->cfp
.used
);
726 static int bcm_sf2_cfp_rule_set(struct dsa_switch
*ds
, int port
,
727 struct ethtool_rx_flow_spec
*fs
)
729 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
730 unsigned int queue_num
, port_num
;
733 /* Check for unsupported extensions */
734 if ((fs
->flow_type
& FLOW_EXT
) && (fs
->m_ext
.vlan_etype
||
738 if (fs
->location
!= RX_CLS_LOC_ANY
&&
739 test_bit(fs
->location
, priv
->cfp
.used
))
742 if (fs
->location
!= RX_CLS_LOC_ANY
&&
743 fs
->location
> bcm_sf2_cfp_rule_size(priv
))
746 /* We do not support discarding packets, check that the
747 * destination port is enabled and that we are within the
748 * number of ports supported by the switch
750 port_num
= fs
->ring_cookie
/ SF2_NUM_EGRESS_QUEUES
;
752 if (fs
->ring_cookie
== RX_CLS_FLOW_DISC
||
753 !dsa_is_user_port(ds
, port_num
) ||
754 port_num
>= priv
->hw_params
.num_ports
)
757 * We have a small oddity where Port 6 just does not have a
758 * valid bit here (so we substract by one).
760 queue_num
= fs
->ring_cookie
% SF2_NUM_EGRESS_QUEUES
;
764 switch (fs
->flow_type
& ~FLOW_EXT
) {
767 ret
= bcm_sf2_cfp_ipv4_rule_set(priv
, port
, port_num
,
772 ret
= bcm_sf2_cfp_ipv6_rule_set(priv
, port
, port_num
,
782 static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv
*priv
, int port
,
783 u32 loc
, u32
*next_loc
)
788 /* Refuse deletion of unused rules, and the default reserved rule */
789 if (!test_bit(loc
, priv
->cfp
.used
) || loc
== 0)
792 /* Indicate which rule we want to read */
793 bcm_sf2_cfp_rule_addr_set(priv
, loc
);
795 ret
= bcm_sf2_cfp_op(priv
, OP_SEL_READ
| TCAM_SEL
);
799 /* Check if this is possibly an IPv6 rule that would
800 * indicate we need to delete its companion rule
803 reg
= core_readl(priv
, CORE_CFP_DATA_PORT(6));
805 *next_loc
= (reg
>> 24) & CHAIN_ID_MASK
;
807 /* Clear its valid bits */
808 reg
= core_readl(priv
, CORE_CFP_DATA_PORT(0));
810 core_writel(priv
, reg
, CORE_CFP_DATA_PORT(0));
812 /* Write back this entry into the TCAM now */
813 ret
= bcm_sf2_cfp_op(priv
, OP_SEL_WRITE
| TCAM_SEL
);
817 clear_bit(loc
, priv
->cfp
.used
);
818 clear_bit(loc
, priv
->cfp
.unique
);
823 static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv
*priv
, int port
,
829 ret
= bcm_sf2_cfp_rule_del_one(priv
, port
, loc
, &next_loc
);
833 /* If this was an IPv6 rule, delete is companion rule too */
835 ret
= bcm_sf2_cfp_rule_del_one(priv
, port
, next_loc
, NULL
);
840 static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec
*flow
)
844 for (i
= 0; i
< sizeof(flow
->m_u
); i
++)
845 flow
->m_u
.hdata
[i
] ^= 0xff;
847 flow
->m_ext
.vlan_etype
^= cpu_to_be16(~0);
848 flow
->m_ext
.vlan_tci
^= cpu_to_be16(~0);
849 flow
->m_ext
.data
[0] ^= cpu_to_be32(~0);
850 flow
->m_ext
.data
[1] ^= cpu_to_be32(~0);
853 static int bcm_sf2_cfp_unslice_ipv4(struct bcm_sf2_priv
*priv
,
854 struct ethtool_tcpip4_spec
*v4_spec
,
857 u32 reg
, offset
, ipv4
;
861 offset
= CORE_CFP_MASK_PORT(3);
863 offset
= CORE_CFP_DATA_PORT(3);
865 reg
= core_readl(priv
, offset
);
866 /* src port [15:8] */
867 src_dst_port
= reg
<< 8;
870 offset
= CORE_CFP_MASK_PORT(2);
872 offset
= CORE_CFP_DATA_PORT(2);
874 reg
= core_readl(priv
, offset
);
876 src_dst_port
|= (reg
>> 24);
878 v4_spec
->pdst
= cpu_to_be16(src_dst_port
);
879 v4_spec
->psrc
= cpu_to_be16((u16
)(reg
>> 8));
881 /* IPv4 dst [15:8] */
882 ipv4
= (reg
& 0xff) << 8;
885 offset
= CORE_CFP_MASK_PORT(1);
887 offset
= CORE_CFP_DATA_PORT(1);
889 reg
= core_readl(priv
, offset
);
890 /* IPv4 dst [31:16] */
891 ipv4
|= ((reg
>> 8) & 0xffff) << 16;
893 ipv4
|= (reg
>> 24) & 0xff;
894 v4_spec
->ip4dst
= cpu_to_be32(ipv4
);
896 /* IPv4 src [15:8] */
897 ipv4
= (reg
& 0xff) << 8;
900 offset
= CORE_CFP_MASK_PORT(0);
902 offset
= CORE_CFP_DATA_PORT(0);
903 reg
= core_readl(priv
, offset
);
905 /* Once the TCAM is programmed, the mask reflects the slice number
906 * being matched, don't bother checking it when reading back the
909 if (!mask
&& !(reg
& SLICE_VALID
))
913 ipv4
|= (reg
>> 24) & 0xff;
914 /* IPv4 src [31:16] */
915 ipv4
|= ((reg
>> 8) & 0xffff) << 16;
916 v4_spec
->ip4src
= cpu_to_be32(ipv4
);
921 static int bcm_sf2_cfp_ipv4_rule_get(struct bcm_sf2_priv
*priv
, int port
,
922 struct ethtool_rx_flow_spec
*fs
)
924 struct ethtool_tcpip4_spec
*v4_spec
= NULL
, *v4_m_spec
= NULL
;
928 reg
= core_readl(priv
, CORE_CFP_DATA_PORT(6));
930 switch ((reg
& IPPROTO_MASK
) >> IPPROTO_SHIFT
) {
932 fs
->flow_type
= TCP_V4_FLOW
;
933 v4_spec
= &fs
->h_u
.tcp_ip4_spec
;
934 v4_m_spec
= &fs
->m_u
.tcp_ip4_spec
;
937 fs
->flow_type
= UDP_V4_FLOW
;
938 v4_spec
= &fs
->h_u
.udp_ip4_spec
;
939 v4_m_spec
= &fs
->m_u
.udp_ip4_spec
;
945 fs
->m_ext
.data
[0] = cpu_to_be32((reg
>> IP_FRAG_SHIFT
) & 1);
946 v4_spec
->tos
= (reg
>> IPTOS_SHIFT
) & IPTOS_MASK
;
948 ret
= bcm_sf2_cfp_unslice_ipv4(priv
, v4_spec
, false);
952 return bcm_sf2_cfp_unslice_ipv4(priv
, v4_m_spec
, true);
955 static int bcm_sf2_cfp_unslice_ipv6(struct bcm_sf2_priv
*priv
,
956 __be32
*ip6_addr
, __be16
*port
,
959 u32 reg
, tmp
, offset
;
962 * UDF_n_B8 [23:8] (port)
963 * UDF_n_B7 (upper) [7:0] (addr[15:8])
966 offset
= CORE_CFP_MASK_PORT(4);
968 offset
= CORE_CFP_DATA_PORT(4);
969 reg
= core_readl(priv
, offset
);
970 *port
= cpu_to_be32(reg
) >> 8;
971 tmp
= (u32
)(reg
& 0xff) << 8;
973 /* UDF_n_B7 (lower) [31:24] (addr[7:0])
974 * UDF_n_B6 [23:8] (addr[31:16])
975 * UDF_n_B5 (upper) [7:0] (addr[47:40])
978 offset
= CORE_CFP_MASK_PORT(3);
980 offset
= CORE_CFP_DATA_PORT(3);
981 reg
= core_readl(priv
, offset
);
982 tmp
|= (reg
>> 24) & 0xff;
983 tmp
|= (u32
)((reg
>> 8) << 16);
984 ip6_addr
[3] = cpu_to_be32(tmp
);
985 tmp
= (u32
)(reg
& 0xff) << 8;
987 /* UDF_n_B5 (lower) [31:24] (addr[39:32])
988 * UDF_n_B4 [23:8] (addr[63:48])
989 * UDF_n_B3 (upper) [7:0] (addr[79:72])
992 offset
= CORE_CFP_MASK_PORT(2);
994 offset
= CORE_CFP_DATA_PORT(2);
995 reg
= core_readl(priv
, offset
);
996 tmp
|= (reg
>> 24) & 0xff;
997 tmp
|= (u32
)((reg
>> 8) << 16);
998 ip6_addr
[2] = cpu_to_be32(tmp
);
999 tmp
= (u32
)(reg
& 0xff) << 8;
1001 /* UDF_n_B3 (lower) [31:24] (addr[71:64])
1002 * UDF_n_B2 [23:8] (addr[95:80])
1003 * UDF_n_B1 (upper) [7:0] (addr[111:104])
1006 offset
= CORE_CFP_MASK_PORT(1);
1008 offset
= CORE_CFP_DATA_PORT(1);
1009 reg
= core_readl(priv
, offset
);
1010 tmp
|= (reg
>> 24) & 0xff;
1011 tmp
|= (u32
)((reg
>> 8) << 16);
1012 ip6_addr
[1] = cpu_to_be32(tmp
);
1013 tmp
= (u32
)(reg
& 0xff) << 8;
1015 /* UDF_n_B1 (lower) [31:24] (addr[103:96])
1016 * UDF_n_B0 [23:8] (addr[127:112])
1022 offset
= CORE_CFP_MASK_PORT(0);
1024 offset
= CORE_CFP_DATA_PORT(0);
1025 reg
= core_readl(priv
, offset
);
1026 tmp
|= (reg
>> 24) & 0xff;
1027 tmp
|= (u32
)((reg
>> 8) << 16);
1028 ip6_addr
[0] = cpu_to_be32(tmp
);
1030 if (!mask
&& !(reg
& SLICE_VALID
))
1036 static int bcm_sf2_cfp_ipv6_rule_get(struct bcm_sf2_priv
*priv
, int port
,
1037 struct ethtool_rx_flow_spec
*fs
,
1040 struct ethtool_tcpip6_spec
*v6_spec
= NULL
, *v6_m_spec
= NULL
;
1044 /* UDPv6 and TCPv6 both use ethtool_tcpip6_spec so we are fine
1045 * assuming tcp_ip6_spec here being an union.
1047 v6_spec
= &fs
->h_u
.tcp_ip6_spec
;
1048 v6_m_spec
= &fs
->m_u
.tcp_ip6_spec
;
1050 /* Read the second half first */
1051 ret
= bcm_sf2_cfp_unslice_ipv6(priv
, v6_spec
->ip6dst
, &v6_spec
->pdst
,
1056 ret
= bcm_sf2_cfp_unslice_ipv6(priv
, v6_m_spec
->ip6dst
,
1057 &v6_m_spec
->pdst
, true);
1061 /* Read last to avoid next entry clobbering the results during search
1062 * operations. We would not have the port enabled for this rule, so
1063 * don't bother checking it.
1065 (void)core_readl(priv
, CORE_CFP_DATA_PORT(7));
1067 /* The slice number is valid, so read the rule we are chained from now
1068 * which is our first half.
1070 bcm_sf2_cfp_rule_addr_set(priv
, next_loc
);
1071 ret
= bcm_sf2_cfp_op(priv
, OP_SEL_READ
| TCAM_SEL
);
1075 reg
= core_readl(priv
, CORE_CFP_DATA_PORT(6));
1077 switch ((reg
& IPPROTO_MASK
) >> IPPROTO_SHIFT
) {
1079 fs
->flow_type
= TCP_V6_FLOW
;
1082 fs
->flow_type
= UDP_V6_FLOW
;
1088 ret
= bcm_sf2_cfp_unslice_ipv6(priv
, v6_spec
->ip6src
, &v6_spec
->psrc
,
1093 return bcm_sf2_cfp_unslice_ipv6(priv
, v6_m_spec
->ip6src
,
1094 &v6_m_spec
->psrc
, true);
1097 static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv
*priv
, int port
,
1098 struct ethtool_rxnfc
*nfc
)
1100 u32 reg
, ipv4_or_chain_id
;
1101 unsigned int queue_num
;
1104 bcm_sf2_cfp_rule_addr_set(priv
, nfc
->fs
.location
);
1106 ret
= bcm_sf2_cfp_op(priv
, OP_SEL_READ
| ACT_POL_RAM
);
1110 reg
= core_readl(priv
, CORE_ACT_POL_DATA0
);
1112 ret
= bcm_sf2_cfp_op(priv
, OP_SEL_READ
| TCAM_SEL
);
1116 /* Extract the destination port */
1117 nfc
->fs
.ring_cookie
= fls((reg
>> DST_MAP_IB_SHIFT
) &
1118 DST_MAP_IB_MASK
) - 1;
1120 /* There is no Port 6, so we compensate for that here */
1121 if (nfc
->fs
.ring_cookie
>= 6)
1122 nfc
->fs
.ring_cookie
++;
1123 nfc
->fs
.ring_cookie
*= SF2_NUM_EGRESS_QUEUES
;
1125 /* Extract the destination queue */
1126 queue_num
= (reg
>> NEW_TC_SHIFT
) & NEW_TC_MASK
;
1127 nfc
->fs
.ring_cookie
+= queue_num
;
1129 /* Extract the L3_FRAMING or CHAIN_ID */
1130 reg
= core_readl(priv
, CORE_CFP_DATA_PORT(6));
1132 /* With IPv6 rules this would contain a non-zero chain ID since
1133 * we reserve entry 0 and it cannot be used. So if we read 0 here
1134 * this means an IPv4 rule.
1136 ipv4_or_chain_id
= (reg
>> L3_FRAMING_SHIFT
) & 0xff;
1137 if (ipv4_or_chain_id
== 0)
1138 ret
= bcm_sf2_cfp_ipv4_rule_get(priv
, port
, &nfc
->fs
);
1140 ret
= bcm_sf2_cfp_ipv6_rule_get(priv
, port
, &nfc
->fs
,
1145 /* Read last to avoid next entry clobbering the results during search
1148 reg
= core_readl(priv
, CORE_CFP_DATA_PORT(7));
1149 if (!(reg
& 1 << port
))
1152 bcm_sf2_invert_masks(&nfc
->fs
);
1154 /* Put the TCAM size here */
1155 nfc
->data
= bcm_sf2_cfp_rule_size(priv
);
1160 /* We implement the search doing a TCAM search operation */
1161 static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv
*priv
,
1162 int port
, struct ethtool_rxnfc
*nfc
,
1165 unsigned int index
= 1, rules_cnt
= 0;
1167 for_each_set_bit_from(index
, priv
->cfp
.unique
, priv
->num_cfp_rules
) {
1168 rule_locs
[rules_cnt
] = index
;
1172 /* Put the TCAM size here */
1173 nfc
->data
= bcm_sf2_cfp_rule_size(priv
);
1174 nfc
->rule_cnt
= rules_cnt
;
1179 int bcm_sf2_get_rxnfc(struct dsa_switch
*ds
, int port
,
1180 struct ethtool_rxnfc
*nfc
, u32
*rule_locs
)
1182 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
1185 mutex_lock(&priv
->cfp
.lock
);
1188 case ETHTOOL_GRXCLSRLCNT
:
1189 /* Subtract the default, unusable rule */
1190 nfc
->rule_cnt
= bitmap_weight(priv
->cfp
.unique
,
1191 priv
->num_cfp_rules
) - 1;
1192 /* We support specifying rule locations */
1193 nfc
->data
|= RX_CLS_LOC_SPECIAL
;
1195 case ETHTOOL_GRXCLSRULE
:
1196 ret
= bcm_sf2_cfp_rule_get(priv
, port
, nfc
);
1198 case ETHTOOL_GRXCLSRLALL
:
1199 ret
= bcm_sf2_cfp_rule_get_all(priv
, port
, nfc
, rule_locs
);
1206 mutex_unlock(&priv
->cfp
.lock
);
1211 int bcm_sf2_set_rxnfc(struct dsa_switch
*ds
, int port
,
1212 struct ethtool_rxnfc
*nfc
)
1214 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
1217 mutex_lock(&priv
->cfp
.lock
);
1220 case ETHTOOL_SRXCLSRLINS
:
1221 ret
= bcm_sf2_cfp_rule_set(ds
, port
, &nfc
->fs
);
1224 case ETHTOOL_SRXCLSRLDEL
:
1225 ret
= bcm_sf2_cfp_rule_del(priv
, port
, nfc
->fs
.location
);
1232 mutex_unlock(&priv
->cfp
.lock
);
1237 int bcm_sf2_cfp_rst(struct bcm_sf2_priv
*priv
)
1239 unsigned int timeout
= 1000;
1242 reg
= core_readl(priv
, CORE_CFP_ACC
);
1244 core_writel(priv
, reg
, CORE_CFP_ACC
);
1247 reg
= core_readl(priv
, CORE_CFP_ACC
);
1248 if (!(reg
& TCAM_RESET
))
1252 } while (timeout
--);