2 * Broadcom Starfighter 2 DSA switch CFP support
4 * Copyright (C) 2016, Broadcom
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/list.h>
13 #include <linux/ethtool.h>
14 #include <linux/if_ether.h>
16 #include <linux/netdevice.h>
18 #include <linux/bitmap.h>
21 #include "bcm_sf2_regs.h"
23 struct cfp_udf_slice_layout
{
24 u8 slices
[UDFS_PER_SLICE
];
29 struct cfp_udf_layout
{
30 struct cfp_udf_slice_layout udfs
[UDF_NUM_SLICES
];
33 static const u8 zero_slice
[UDFS_PER_SLICE
] = { };
35 /* UDF slices layout for a TCPv4/UDPv4 specification */
36 static const struct cfp_udf_layout udf_tcpip4_layout
= {
40 /* End of L2, byte offset 12, src IP[0:15] */
42 /* End of L2, byte offset 14, src IP[16:31] */
44 /* End of L2, byte offset 16, dst IP[0:15] */
46 /* End of L2, byte offset 18, dst IP[16:31] */
48 /* End of L3, byte offset 0, src port */
50 /* End of L3, byte offset 2, dst port */
54 .mask_value
= L3_FRAMING_MASK
| IPPROTO_MASK
| IP_FRAG
,
55 .base_offset
= CORE_UDF_0_A_0_8_PORT_0
+ UDF_SLICE_OFFSET
,
60 /* UDF slices layout for a TCPv6/UDPv6 specification */
61 static const struct cfp_udf_layout udf_tcpip6_layout
= {
65 /* End of L2, byte offset 8, src IP[0:15] */
67 /* End of L2, byte offset 10, src IP[16:31] */
69 /* End of L2, byte offset 12, src IP[32:47] */
71 /* End of L2, byte offset 14, src IP[48:63] */
73 /* End of L2, byte offset 16, src IP[64:79] */
75 /* End of L2, byte offset 18, src IP[80:95] */
77 /* End of L2, byte offset 20, src IP[96:111] */
79 /* End of L2, byte offset 22, src IP[112:127] */
81 /* End of L3, byte offset 0, src port */
84 .mask_value
= L3_FRAMING_MASK
| IPPROTO_MASK
| IP_FRAG
,
85 .base_offset
= CORE_UDF_0_B_0_8_PORT_0
,
89 /* End of L2, byte offset 24, dst IP[0:15] */
91 /* End of L2, byte offset 26, dst IP[16:31] */
93 /* End of L2, byte offset 28, dst IP[32:47] */
95 /* End of L2, byte offset 30, dst IP[48:63] */
97 /* End of L2, byte offset 32, dst IP[64:79] */
99 /* End of L2, byte offset 34, dst IP[80:95] */
101 /* End of L2, byte offset 36, dst IP[96:111] */
103 /* End of L2, byte offset 38, dst IP[112:127] */
105 /* End of L3, byte offset 2, dst port */
108 .mask_value
= L3_FRAMING_MASK
| IPPROTO_MASK
| IP_FRAG
,
109 .base_offset
= CORE_UDF_0_D_0_11_PORT_0
,
114 static inline unsigned int bcm_sf2_get_num_udf_slices(const u8
*layout
)
116 unsigned int i
, count
= 0;
118 for (i
= 0; i
< UDFS_PER_SLICE
; i
++) {
126 static inline u32
udf_upper_bits(unsigned int num_udf
)
128 return GENMASK(num_udf
- 1, 0) >> (UDFS_PER_SLICE
- 1);
131 static inline u32
udf_lower_bits(unsigned int num_udf
)
133 return (u8
)GENMASK(num_udf
- 1, 0);
136 static unsigned int bcm_sf2_get_slice_number(const struct cfp_udf_layout
*l
,
139 const struct cfp_udf_slice_layout
*slice_layout
;
140 unsigned int slice_idx
;
142 for (slice_idx
= start
; slice_idx
< UDF_NUM_SLICES
; slice_idx
++) {
143 slice_layout
= &l
->udfs
[slice_idx
];
144 if (memcmp(slice_layout
->slices
, zero_slice
,
152 static void bcm_sf2_cfp_udf_set(struct bcm_sf2_priv
*priv
,
153 const struct cfp_udf_layout
*layout
,
154 unsigned int slice_num
)
156 u32 offset
= layout
->udfs
[slice_num
].base_offset
;
159 for (i
= 0; i
< UDFS_PER_SLICE
; i
++)
160 core_writel(priv
, layout
->udfs
[slice_num
].slices
[i
],
164 static int bcm_sf2_cfp_op(struct bcm_sf2_priv
*priv
, unsigned int op
)
166 unsigned int timeout
= 1000;
169 reg
= core_readl(priv
, CORE_CFP_ACC
);
170 reg
&= ~(OP_SEL_MASK
| RAM_SEL_MASK
);
171 reg
|= OP_STR_DONE
| op
;
172 core_writel(priv
, reg
, CORE_CFP_ACC
);
175 reg
= core_readl(priv
, CORE_CFP_ACC
);
176 if (!(reg
& OP_STR_DONE
))
188 static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv
*priv
,
193 WARN_ON(addr
>= priv
->num_cfp_rules
);
195 reg
= core_readl(priv
, CORE_CFP_ACC
);
196 reg
&= ~(XCESS_ADDR_MASK
<< XCESS_ADDR_SHIFT
);
197 reg
|= addr
<< XCESS_ADDR_SHIFT
;
198 core_writel(priv
, reg
, CORE_CFP_ACC
);
201 static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv
*priv
)
203 /* Entry #0 is reserved */
204 return priv
->num_cfp_rules
- 1;
207 static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv
*priv
,
208 unsigned int rule_index
,
209 unsigned int port_num
,
210 unsigned int queue_num
,
216 /* Replace ARL derived destination with DST_MAP derived, define
217 * which port and queue this should be forwarded to.
220 reg
= CHANGE_FWRD_MAP_IB_REP_ARL
|
221 BIT(port_num
+ DST_MAP_IB_SHIFT
) |
222 CHANGE_TC
| queue_num
<< NEW_TC_SHIFT
;
226 core_writel(priv
, reg
, CORE_ACT_POL_DATA0
);
228 /* Set classification ID that needs to be put in Broadcom tag */
229 core_writel(priv
, rule_index
<< CHAIN_ID_SHIFT
, CORE_ACT_POL_DATA1
);
231 core_writel(priv
, 0, CORE_ACT_POL_DATA2
);
233 /* Configure policer RAM now */
234 ret
= bcm_sf2_cfp_op(priv
, OP_SEL_WRITE
| ACT_POL_RAM
);
236 pr_err("Policer entry at %d failed\n", rule_index
);
240 /* Disable the policer */
241 core_writel(priv
, POLICER_MODE_DISABLE
, CORE_RATE_METER0
);
243 /* Now the rate meter */
244 ret
= bcm_sf2_cfp_op(priv
, OP_SEL_WRITE
| RATE_METER_RAM
);
246 pr_err("Meter entry at %d failed\n", rule_index
);
253 static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv
*priv
,
254 struct ethtool_tcpip4_spec
*v4_spec
,
255 unsigned int slice_num
,
266 offset
= CORE_CFP_MASK_PORT(4);
268 offset
= CORE_CFP_DATA_PORT(4);
269 core_writel(priv
, reg
, offset
);
275 reg
= be16_to_cpu(v4_spec
->pdst
) >> 8;
277 offset
= CORE_CFP_MASK_PORT(3);
279 offset
= CORE_CFP_DATA_PORT(3);
280 core_writel(priv
, reg
, offset
);
286 reg
= (be16_to_cpu(v4_spec
->pdst
) & 0xff) << 24 |
287 (u32
)be16_to_cpu(v4_spec
->psrc
) << 8 |
288 (be32_to_cpu(v4_spec
->ip4dst
) & 0x0000ff00) >> 8;
290 offset
= CORE_CFP_MASK_PORT(2);
292 offset
= CORE_CFP_DATA_PORT(2);
293 core_writel(priv
, reg
, offset
);
299 reg
= (u32
)(be32_to_cpu(v4_spec
->ip4dst
) & 0xff) << 24 |
300 (u32
)(be32_to_cpu(v4_spec
->ip4dst
) >> 16) << 8 |
301 (be32_to_cpu(v4_spec
->ip4src
) & 0x0000ff00) >> 8;
303 offset
= CORE_CFP_MASK_PORT(1);
305 offset
= CORE_CFP_DATA_PORT(1);
306 core_writel(priv
, reg
, offset
);
314 reg
= (u32
)(be32_to_cpu(v4_spec
->ip4src
) & 0xff) << 24 |
315 (u32
)(be32_to_cpu(v4_spec
->ip4src
) >> 16) << 8 |
316 SLICE_NUM(slice_num
) | SLICE_VALID
;
318 offset
= CORE_CFP_MASK_PORT(0);
320 offset
= CORE_CFP_DATA_PORT(0);
321 core_writel(priv
, reg
, offset
);
324 static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv
*priv
, int port
,
325 unsigned int port_num
,
326 unsigned int queue_num
,
327 struct ethtool_rx_flow_spec
*fs
)
329 struct ethtool_tcpip4_spec
*v4_spec
, *v4_m_spec
;
330 const struct cfp_udf_layout
*layout
;
331 unsigned int slice_num
, rule_index
;
332 u8 ip_proto
, ip_frag
;
337 switch (fs
->flow_type
& ~FLOW_EXT
) {
339 ip_proto
= IPPROTO_TCP
;
340 v4_spec
= &fs
->h_u
.tcp_ip4_spec
;
341 v4_m_spec
= &fs
->m_u
.tcp_ip4_spec
;
344 ip_proto
= IPPROTO_UDP
;
345 v4_spec
= &fs
->h_u
.udp_ip4_spec
;
346 v4_m_spec
= &fs
->m_u
.udp_ip4_spec
;
352 ip_frag
= be32_to_cpu(fs
->m_ext
.data
[0]);
354 /* Locate the first rule available */
355 if (fs
->location
== RX_CLS_LOC_ANY
)
356 rule_index
= find_first_zero_bit(priv
->cfp
.used
,
357 priv
->num_cfp_rules
);
359 rule_index
= fs
->location
;
361 if (rule_index
> bcm_sf2_cfp_rule_size(priv
))
364 layout
= &udf_tcpip4_layout
;
365 /* We only use one UDF slice for now */
366 slice_num
= bcm_sf2_get_slice_number(layout
, 0);
367 if (slice_num
== UDF_NUM_SLICES
)
370 num_udf
= bcm_sf2_get_num_udf_slices(layout
->udfs
[slice_num
].slices
);
372 /* Apply the UDF layout for this filter */
373 bcm_sf2_cfp_udf_set(priv
, layout
, slice_num
);
375 /* Apply to all packets received through this port */
376 core_writel(priv
, BIT(port
), CORE_CFP_DATA_PORT(7));
378 /* Source port map match */
379 core_writel(priv
, 0xff, CORE_CFP_MASK_PORT(7));
381 /* S-Tag status [31:30]
382 * C-Tag status [29:28]
395 core_writel(priv
, v4_spec
->tos
<< IPTOS_SHIFT
|
396 ip_proto
<< IPPROTO_SHIFT
| ip_frag
<< IP_FRAG_SHIFT
|
397 udf_upper_bits(num_udf
),
398 CORE_CFP_DATA_PORT(6));
400 /* Mask with the specific layout for IPv4 packets */
401 core_writel(priv
, layout
->udfs
[slice_num
].mask_value
|
402 udf_upper_bits(num_udf
), CORE_CFP_MASK_PORT(6));
404 /* UDF_Valid[7:0] [31:24]
408 core_writel(priv
, udf_lower_bits(num_udf
) << 24, CORE_CFP_DATA_PORT(5));
410 /* Mask all but valid UDFs */
411 core_writel(priv
, udf_lower_bits(num_udf
) << 24, CORE_CFP_MASK_PORT(5));
413 /* Program the match and the mask */
414 bcm_sf2_cfp_slice_ipv4(priv
, v4_spec
, slice_num
, false);
415 bcm_sf2_cfp_slice_ipv4(priv
, v4_m_spec
, SLICE_NUM_MASK
, true);
417 /* Insert into TCAM now */
418 bcm_sf2_cfp_rule_addr_set(priv
, rule_index
);
420 ret
= bcm_sf2_cfp_op(priv
, OP_SEL_WRITE
| TCAM_SEL
);
422 pr_err("TCAM entry at addr %d failed\n", rule_index
);
426 /* Insert into Action and policer RAMs now */
427 ret
= bcm_sf2_cfp_act_pol_set(priv
, rule_index
, port_num
,
432 /* Turn on CFP for this rule now */
433 reg
= core_readl(priv
, CORE_CFP_CTL_REG
);
435 core_writel(priv
, reg
, CORE_CFP_CTL_REG
);
437 /* Flag the rule as being used and return it */
438 set_bit(rule_index
, priv
->cfp
.used
);
439 set_bit(rule_index
, priv
->cfp
.unique
);
440 fs
->location
= rule_index
;
445 static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv
*priv
,
446 const __be32
*ip6_addr
, const __be16 port
,
447 unsigned int slice_num
,
450 u32 reg
, tmp
, val
, offset
;
453 * UDF_n_B8 [23:8] (port)
454 * UDF_n_B7 (upper) [7:0] (addr[15:8])
456 reg
= be32_to_cpu(ip6_addr
[3]);
457 val
= (u32
)be16_to_cpu(port
) << 8 | ((reg
>> 8) & 0xff);
459 offset
= CORE_CFP_MASK_PORT(4);
461 offset
= CORE_CFP_DATA_PORT(4);
462 core_writel(priv
, val
, offset
);
464 /* UDF_n_B7 (lower) [31:24] (addr[7:0])
465 * UDF_n_B6 [23:8] (addr[31:16])
466 * UDF_n_B5 (upper) [7:0] (addr[47:40])
468 tmp
= be32_to_cpu(ip6_addr
[2]);
469 val
= (u32
)(reg
& 0xff) << 24 | (u32
)(reg
>> 16) << 8 |
472 offset
= CORE_CFP_MASK_PORT(3);
474 offset
= CORE_CFP_DATA_PORT(3);
475 core_writel(priv
, val
, offset
);
477 /* UDF_n_B5 (lower) [31:24] (addr[39:32])
478 * UDF_n_B4 [23:8] (addr[63:48])
479 * UDF_n_B3 (upper) [7:0] (addr[79:72])
481 reg
= be32_to_cpu(ip6_addr
[1]);
482 val
= (u32
)(tmp
& 0xff) << 24 | (u32
)(tmp
>> 16) << 8 |
485 offset
= CORE_CFP_MASK_PORT(2);
487 offset
= CORE_CFP_DATA_PORT(2);
488 core_writel(priv
, val
, offset
);
490 /* UDF_n_B3 (lower) [31:24] (addr[71:64])
491 * UDF_n_B2 [23:8] (addr[95:80])
492 * UDF_n_B1 (upper) [7:0] (addr[111:104])
494 tmp
= be32_to_cpu(ip6_addr
[0]);
495 val
= (u32
)(reg
& 0xff) << 24 | (u32
)(reg
>> 16) << 8 |
498 offset
= CORE_CFP_MASK_PORT(1);
500 offset
= CORE_CFP_DATA_PORT(1);
501 core_writel(priv
, val
, offset
);
503 /* UDF_n_B1 (lower) [31:24] (addr[103:96])
504 * UDF_n_B0 [23:8] (addr[127:112])
509 reg
= (u32
)(tmp
& 0xff) << 24 | (u32
)(tmp
>> 16) << 8 |
510 SLICE_NUM(slice_num
) | SLICE_VALID
;
512 offset
= CORE_CFP_MASK_PORT(0);
514 offset
= CORE_CFP_DATA_PORT(0);
515 core_writel(priv
, reg
, offset
);
518 static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv
*priv
, int port
,
519 unsigned int port_num
,
520 unsigned int queue_num
,
521 struct ethtool_rx_flow_spec
*fs
)
523 struct ethtool_tcpip6_spec
*v6_spec
, *v6_m_spec
;
524 unsigned int slice_num
, rule_index
[2];
525 const struct cfp_udf_layout
*layout
;
526 u8 ip_proto
, ip_frag
;
531 switch (fs
->flow_type
& ~FLOW_EXT
) {
533 ip_proto
= IPPROTO_TCP
;
534 v6_spec
= &fs
->h_u
.tcp_ip6_spec
;
535 v6_m_spec
= &fs
->m_u
.tcp_ip6_spec
;
538 ip_proto
= IPPROTO_UDP
;
539 v6_spec
= &fs
->h_u
.udp_ip6_spec
;
540 v6_m_spec
= &fs
->m_u
.udp_ip6_spec
;
546 ip_frag
= be32_to_cpu(fs
->m_ext
.data
[0]);
548 layout
= &udf_tcpip6_layout
;
549 slice_num
= bcm_sf2_get_slice_number(layout
, 0);
550 if (slice_num
== UDF_NUM_SLICES
)
553 num_udf
= bcm_sf2_get_num_udf_slices(layout
->udfs
[slice_num
].slices
);
555 /* Negotiate two indexes, one for the second half which we are chained
556 * from, which is what we will return to user-space, and a second one
557 * which is used to store its first half. That first half does not
558 * allow any choice of placement, so it just needs to find the next
559 * available bit. We return the second half as fs->location because
560 * that helps with the rule lookup later on since the second half is
561 * chained from its first half, we can easily identify IPv6 CFP rules
562 * by looking whether they carry a CHAIN_ID.
564 * We also want the second half to have a lower rule_index than its
565 * first half because the HW search is by incrementing addresses.
567 if (fs
->location
== RX_CLS_LOC_ANY
)
568 rule_index
[1] = find_first_zero_bit(priv
->cfp
.used
,
569 priv
->num_cfp_rules
);
571 rule_index
[1] = fs
->location
;
572 if (rule_index
[1] > bcm_sf2_cfp_rule_size(priv
))
575 /* Flag it as used (cleared on error path) such that we can immediately
576 * obtain a second one to chain from.
578 set_bit(rule_index
[1], priv
->cfp
.used
);
580 rule_index
[0] = find_first_zero_bit(priv
->cfp
.used
,
581 priv
->num_cfp_rules
);
582 if (rule_index
[0] > bcm_sf2_cfp_rule_size(priv
)) {
587 /* Apply the UDF layout for this filter */
588 bcm_sf2_cfp_udf_set(priv
, layout
, slice_num
);
590 /* Apply to all packets received through this port */
591 core_writel(priv
, BIT(port
), CORE_CFP_DATA_PORT(7));
593 /* Source port map match */
594 core_writel(priv
, 0xff, CORE_CFP_MASK_PORT(7));
596 /* S-Tag status [31:30]
597 * C-Tag status [29:28]
610 reg
= 1 << L3_FRAMING_SHIFT
| ip_proto
<< IPPROTO_SHIFT
|
611 ip_frag
<< IP_FRAG_SHIFT
| udf_upper_bits(num_udf
);
612 core_writel(priv
, reg
, CORE_CFP_DATA_PORT(6));
614 /* Mask with the specific layout for IPv6 packets including
617 reg
= layout
->udfs
[slice_num
].mask_value
| udf_upper_bits(num_udf
);
618 core_writel(priv
, reg
, CORE_CFP_MASK_PORT(6));
620 /* UDF_Valid[7:0] [31:24]
624 core_writel(priv
, udf_lower_bits(num_udf
) << 24, CORE_CFP_DATA_PORT(5));
626 /* Mask all but valid UDFs */
627 core_writel(priv
, udf_lower_bits(num_udf
) << 24, CORE_CFP_MASK_PORT(5));
629 /* Slice the IPv6 source address and port */
630 bcm_sf2_cfp_slice_ipv6(priv
, v6_spec
->ip6src
, v6_spec
->psrc
,
632 bcm_sf2_cfp_slice_ipv6(priv
, v6_m_spec
->ip6src
, v6_m_spec
->psrc
,
633 SLICE_NUM_MASK
, true);
635 /* Insert into TCAM now because we need to insert a second rule */
636 bcm_sf2_cfp_rule_addr_set(priv
, rule_index
[0]);
638 ret
= bcm_sf2_cfp_op(priv
, OP_SEL_WRITE
| TCAM_SEL
);
640 pr_err("TCAM entry at addr %d failed\n", rule_index
[0]);
644 /* Insert into Action and policer RAMs now */
645 ret
= bcm_sf2_cfp_act_pol_set(priv
, rule_index
[0], port_num
,
650 /* Now deal with the second slice to chain this rule */
651 slice_num
= bcm_sf2_get_slice_number(layout
, slice_num
+ 1);
652 if (slice_num
== UDF_NUM_SLICES
) {
657 num_udf
= bcm_sf2_get_num_udf_slices(layout
->udfs
[slice_num
].slices
);
659 /* Apply the UDF layout for this filter */
660 bcm_sf2_cfp_udf_set(priv
, layout
, slice_num
);
662 /* Chained rule, source port match is coming from the rule we are
665 core_writel(priv
, 0, CORE_CFP_DATA_PORT(7));
666 core_writel(priv
, 0, CORE_CFP_MASK_PORT(7));
669 * CHAIN ID [31:24] chain to previous slice
671 * UDF_Valid[11:8] [19:16]
672 * UDF_Valid[7:0] [15:8]
675 reg
= rule_index
[0] << 24 | udf_upper_bits(num_udf
) << 16 |
676 udf_lower_bits(num_udf
) << 8;
677 core_writel(priv
, reg
, CORE_CFP_DATA_PORT(6));
679 /* Mask all except chain ID, UDF Valid[8] and UDF Valid[7:0] */
680 reg
= XCESS_ADDR_MASK
<< 24 | udf_upper_bits(num_udf
) << 16 |
681 udf_lower_bits(num_udf
) << 8;
682 core_writel(priv
, reg
, CORE_CFP_MASK_PORT(6));
685 core_writel(priv
, 0, CORE_CFP_DATA_PORT(5));
688 core_writel(priv
, 0, CORE_CFP_MASK_PORT(5));
690 bcm_sf2_cfp_slice_ipv6(priv
, v6_spec
->ip6dst
, v6_spec
->pdst
, slice_num
,
692 bcm_sf2_cfp_slice_ipv6(priv
, v6_m_spec
->ip6dst
, v6_m_spec
->pdst
,
693 SLICE_NUM_MASK
, true);
695 /* Insert into TCAM now */
696 bcm_sf2_cfp_rule_addr_set(priv
, rule_index
[1]);
698 ret
= bcm_sf2_cfp_op(priv
, OP_SEL_WRITE
| TCAM_SEL
);
700 pr_err("TCAM entry at addr %d failed\n", rule_index
[1]);
704 /* Insert into Action and policer RAMs now, set chain ID to
705 * the one we are chained to
707 ret
= bcm_sf2_cfp_act_pol_set(priv
, rule_index
[1], port_num
,
712 /* Turn on CFP for this rule now */
713 reg
= core_readl(priv
, CORE_CFP_CTL_REG
);
715 core_writel(priv
, reg
, CORE_CFP_CTL_REG
);
717 /* Flag the second half rule as being used now, return it as the
718 * location, and flag it as unique while dumping rules
720 set_bit(rule_index
[0], priv
->cfp
.used
);
721 set_bit(rule_index
[1], priv
->cfp
.unique
);
722 fs
->location
= rule_index
[1];
727 clear_bit(rule_index
[1], priv
->cfp
.used
);
731 static int bcm_sf2_cfp_rule_set(struct dsa_switch
*ds
, int port
,
732 struct ethtool_rx_flow_spec
*fs
)
734 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
735 s8 cpu_port
= ds
->ports
[port
].cpu_dp
->index
;
736 __u64 ring_cookie
= fs
->ring_cookie
;
737 unsigned int queue_num
, port_num
;
740 /* Check for unsupported extensions */
741 if ((fs
->flow_type
& FLOW_EXT
) && (fs
->m_ext
.vlan_etype
||
745 if (fs
->location
!= RX_CLS_LOC_ANY
&&
746 fs
->location
> bcm_sf2_cfp_rule_size(priv
))
749 if (fs
->location
!= RX_CLS_LOC_ANY
&&
750 test_bit(fs
->location
, priv
->cfp
.used
))
753 /* This rule is a Wake-on-LAN filter and we must specifically
754 * target the CPU port in order for it to be working.
756 if (ring_cookie
== RX_CLS_FLOW_WAKE
)
757 ring_cookie
= cpu_port
* SF2_NUM_EGRESS_QUEUES
;
759 /* We do not support discarding packets, check that the
760 * destination port is enabled and that we are within the
761 * number of ports supported by the switch
763 port_num
= ring_cookie
/ SF2_NUM_EGRESS_QUEUES
;
765 if (ring_cookie
== RX_CLS_FLOW_DISC
||
766 !(dsa_is_user_port(ds
, port_num
) ||
767 dsa_is_cpu_port(ds
, port_num
)) ||
768 port_num
>= priv
->hw_params
.num_ports
)
771 * We have a small oddity where Port 6 just does not have a
772 * valid bit here (so we substract by one).
774 queue_num
= ring_cookie
% SF2_NUM_EGRESS_QUEUES
;
778 switch (fs
->flow_type
& ~FLOW_EXT
) {
781 ret
= bcm_sf2_cfp_ipv4_rule_set(priv
, port
, port_num
,
786 ret
= bcm_sf2_cfp_ipv6_rule_set(priv
, port
, port_num
,
796 static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv
*priv
, int port
,
797 u32 loc
, u32
*next_loc
)
802 /* Indicate which rule we want to read */
803 bcm_sf2_cfp_rule_addr_set(priv
, loc
);
805 ret
= bcm_sf2_cfp_op(priv
, OP_SEL_READ
| TCAM_SEL
);
809 /* Check if this is possibly an IPv6 rule that would
810 * indicate we need to delete its companion rule
813 reg
= core_readl(priv
, CORE_CFP_DATA_PORT(6));
815 *next_loc
= (reg
>> 24) & CHAIN_ID_MASK
;
817 /* Clear its valid bits */
818 reg
= core_readl(priv
, CORE_CFP_DATA_PORT(0));
820 core_writel(priv
, reg
, CORE_CFP_DATA_PORT(0));
822 /* Write back this entry into the TCAM now */
823 ret
= bcm_sf2_cfp_op(priv
, OP_SEL_WRITE
| TCAM_SEL
);
827 clear_bit(loc
, priv
->cfp
.used
);
828 clear_bit(loc
, priv
->cfp
.unique
);
833 static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv
*priv
, int port
,
839 if (loc
> bcm_sf2_cfp_rule_size(priv
))
842 /* Refuse deleting unused rules, and those that are not unique since
843 * that could leave IPv6 rules with one of the chained rule in the
846 if (!test_bit(loc
, priv
->cfp
.unique
) || loc
== 0)
849 ret
= bcm_sf2_cfp_rule_del_one(priv
, port
, loc
, &next_loc
);
853 /* If this was an IPv6 rule, delete is companion rule too */
855 ret
= bcm_sf2_cfp_rule_del_one(priv
, port
, next_loc
, NULL
);
860 static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec
*flow
)
864 for (i
= 0; i
< sizeof(flow
->m_u
); i
++)
865 flow
->m_u
.hdata
[i
] ^= 0xff;
867 flow
->m_ext
.vlan_etype
^= cpu_to_be16(~0);
868 flow
->m_ext
.vlan_tci
^= cpu_to_be16(~0);
869 flow
->m_ext
.data
[0] ^= cpu_to_be32(~0);
870 flow
->m_ext
.data
[1] ^= cpu_to_be32(~0);
873 static int bcm_sf2_cfp_unslice_ipv4(struct bcm_sf2_priv
*priv
,
874 struct ethtool_tcpip4_spec
*v4_spec
,
877 u32 reg
, offset
, ipv4
;
881 offset
= CORE_CFP_MASK_PORT(3);
883 offset
= CORE_CFP_DATA_PORT(3);
885 reg
= core_readl(priv
, offset
);
886 /* src port [15:8] */
887 src_dst_port
= reg
<< 8;
890 offset
= CORE_CFP_MASK_PORT(2);
892 offset
= CORE_CFP_DATA_PORT(2);
894 reg
= core_readl(priv
, offset
);
896 src_dst_port
|= (reg
>> 24);
898 v4_spec
->pdst
= cpu_to_be16(src_dst_port
);
899 v4_spec
->psrc
= cpu_to_be16((u16
)(reg
>> 8));
901 /* IPv4 dst [15:8] */
902 ipv4
= (reg
& 0xff) << 8;
905 offset
= CORE_CFP_MASK_PORT(1);
907 offset
= CORE_CFP_DATA_PORT(1);
909 reg
= core_readl(priv
, offset
);
910 /* IPv4 dst [31:16] */
911 ipv4
|= ((reg
>> 8) & 0xffff) << 16;
913 ipv4
|= (reg
>> 24) & 0xff;
914 v4_spec
->ip4dst
= cpu_to_be32(ipv4
);
916 /* IPv4 src [15:8] */
917 ipv4
= (reg
& 0xff) << 8;
920 offset
= CORE_CFP_MASK_PORT(0);
922 offset
= CORE_CFP_DATA_PORT(0);
923 reg
= core_readl(priv
, offset
);
925 /* Once the TCAM is programmed, the mask reflects the slice number
926 * being matched, don't bother checking it when reading back the
929 if (!mask
&& !(reg
& SLICE_VALID
))
933 ipv4
|= (reg
>> 24) & 0xff;
934 /* IPv4 src [31:16] */
935 ipv4
|= ((reg
>> 8) & 0xffff) << 16;
936 v4_spec
->ip4src
= cpu_to_be32(ipv4
);
941 static int bcm_sf2_cfp_ipv4_rule_get(struct bcm_sf2_priv
*priv
, int port
,
942 struct ethtool_rx_flow_spec
*fs
)
944 struct ethtool_tcpip4_spec
*v4_spec
= NULL
, *v4_m_spec
= NULL
;
948 reg
= core_readl(priv
, CORE_CFP_DATA_PORT(6));
950 switch ((reg
& IPPROTO_MASK
) >> IPPROTO_SHIFT
) {
952 fs
->flow_type
= TCP_V4_FLOW
;
953 v4_spec
= &fs
->h_u
.tcp_ip4_spec
;
954 v4_m_spec
= &fs
->m_u
.tcp_ip4_spec
;
957 fs
->flow_type
= UDP_V4_FLOW
;
958 v4_spec
= &fs
->h_u
.udp_ip4_spec
;
959 v4_m_spec
= &fs
->m_u
.udp_ip4_spec
;
965 fs
->m_ext
.data
[0] = cpu_to_be32((reg
>> IP_FRAG_SHIFT
) & 1);
966 v4_spec
->tos
= (reg
>> IPTOS_SHIFT
) & IPTOS_MASK
;
968 ret
= bcm_sf2_cfp_unslice_ipv4(priv
, v4_spec
, false);
972 return bcm_sf2_cfp_unslice_ipv4(priv
, v4_m_spec
, true);
975 static int bcm_sf2_cfp_unslice_ipv6(struct bcm_sf2_priv
*priv
,
976 __be32
*ip6_addr
, __be16
*port
,
979 u32 reg
, tmp
, offset
;
982 * UDF_n_B8 [23:8] (port)
983 * UDF_n_B7 (upper) [7:0] (addr[15:8])
986 offset
= CORE_CFP_MASK_PORT(4);
988 offset
= CORE_CFP_DATA_PORT(4);
989 reg
= core_readl(priv
, offset
);
990 *port
= cpu_to_be32(reg
) >> 8;
991 tmp
= (u32
)(reg
& 0xff) << 8;
993 /* UDF_n_B7 (lower) [31:24] (addr[7:0])
994 * UDF_n_B6 [23:8] (addr[31:16])
995 * UDF_n_B5 (upper) [7:0] (addr[47:40])
998 offset
= CORE_CFP_MASK_PORT(3);
1000 offset
= CORE_CFP_DATA_PORT(3);
1001 reg
= core_readl(priv
, offset
);
1002 tmp
|= (reg
>> 24) & 0xff;
1003 tmp
|= (u32
)((reg
>> 8) << 16);
1004 ip6_addr
[3] = cpu_to_be32(tmp
);
1005 tmp
= (u32
)(reg
& 0xff) << 8;
1007 /* UDF_n_B5 (lower) [31:24] (addr[39:32])
1008 * UDF_n_B4 [23:8] (addr[63:48])
1009 * UDF_n_B3 (upper) [7:0] (addr[79:72])
1012 offset
= CORE_CFP_MASK_PORT(2);
1014 offset
= CORE_CFP_DATA_PORT(2);
1015 reg
= core_readl(priv
, offset
);
1016 tmp
|= (reg
>> 24) & 0xff;
1017 tmp
|= (u32
)((reg
>> 8) << 16);
1018 ip6_addr
[2] = cpu_to_be32(tmp
);
1019 tmp
= (u32
)(reg
& 0xff) << 8;
1021 /* UDF_n_B3 (lower) [31:24] (addr[71:64])
1022 * UDF_n_B2 [23:8] (addr[95:80])
1023 * UDF_n_B1 (upper) [7:0] (addr[111:104])
1026 offset
= CORE_CFP_MASK_PORT(1);
1028 offset
= CORE_CFP_DATA_PORT(1);
1029 reg
= core_readl(priv
, offset
);
1030 tmp
|= (reg
>> 24) & 0xff;
1031 tmp
|= (u32
)((reg
>> 8) << 16);
1032 ip6_addr
[1] = cpu_to_be32(tmp
);
1033 tmp
= (u32
)(reg
& 0xff) << 8;
1035 /* UDF_n_B1 (lower) [31:24] (addr[103:96])
1036 * UDF_n_B0 [23:8] (addr[127:112])
1042 offset
= CORE_CFP_MASK_PORT(0);
1044 offset
= CORE_CFP_DATA_PORT(0);
1045 reg
= core_readl(priv
, offset
);
1046 tmp
|= (reg
>> 24) & 0xff;
1047 tmp
|= (u32
)((reg
>> 8) << 16);
1048 ip6_addr
[0] = cpu_to_be32(tmp
);
1050 if (!mask
&& !(reg
& SLICE_VALID
))
1056 static int bcm_sf2_cfp_ipv6_rule_get(struct bcm_sf2_priv
*priv
, int port
,
1057 struct ethtool_rx_flow_spec
*fs
,
1060 struct ethtool_tcpip6_spec
*v6_spec
= NULL
, *v6_m_spec
= NULL
;
1064 /* UDPv6 and TCPv6 both use ethtool_tcpip6_spec so we are fine
1065 * assuming tcp_ip6_spec here being an union.
1067 v6_spec
= &fs
->h_u
.tcp_ip6_spec
;
1068 v6_m_spec
= &fs
->m_u
.tcp_ip6_spec
;
1070 /* Read the second half first */
1071 ret
= bcm_sf2_cfp_unslice_ipv6(priv
, v6_spec
->ip6dst
, &v6_spec
->pdst
,
1076 ret
= bcm_sf2_cfp_unslice_ipv6(priv
, v6_m_spec
->ip6dst
,
1077 &v6_m_spec
->pdst
, true);
1081 /* Read last to avoid next entry clobbering the results during search
1082 * operations. We would not have the port enabled for this rule, so
1083 * don't bother checking it.
1085 (void)core_readl(priv
, CORE_CFP_DATA_PORT(7));
1087 /* The slice number is valid, so read the rule we are chained from now
1088 * which is our first half.
1090 bcm_sf2_cfp_rule_addr_set(priv
, next_loc
);
1091 ret
= bcm_sf2_cfp_op(priv
, OP_SEL_READ
| TCAM_SEL
);
1095 reg
= core_readl(priv
, CORE_CFP_DATA_PORT(6));
1097 switch ((reg
& IPPROTO_MASK
) >> IPPROTO_SHIFT
) {
1099 fs
->flow_type
= TCP_V6_FLOW
;
1102 fs
->flow_type
= UDP_V6_FLOW
;
1108 ret
= bcm_sf2_cfp_unslice_ipv6(priv
, v6_spec
->ip6src
, &v6_spec
->psrc
,
1113 return bcm_sf2_cfp_unslice_ipv6(priv
, v6_m_spec
->ip6src
,
1114 &v6_m_spec
->psrc
, true);
1117 static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv
*priv
, int port
,
1118 struct ethtool_rxnfc
*nfc
)
1120 u32 reg
, ipv4_or_chain_id
;
1121 unsigned int queue_num
;
1124 bcm_sf2_cfp_rule_addr_set(priv
, nfc
->fs
.location
);
1126 ret
= bcm_sf2_cfp_op(priv
, OP_SEL_READ
| ACT_POL_RAM
);
1130 reg
= core_readl(priv
, CORE_ACT_POL_DATA0
);
1132 ret
= bcm_sf2_cfp_op(priv
, OP_SEL_READ
| TCAM_SEL
);
1136 /* Extract the destination port */
1137 nfc
->fs
.ring_cookie
= fls((reg
>> DST_MAP_IB_SHIFT
) &
1138 DST_MAP_IB_MASK
) - 1;
1140 /* There is no Port 6, so we compensate for that here */
1141 if (nfc
->fs
.ring_cookie
>= 6)
1142 nfc
->fs
.ring_cookie
++;
1143 nfc
->fs
.ring_cookie
*= SF2_NUM_EGRESS_QUEUES
;
1145 /* Extract the destination queue */
1146 queue_num
= (reg
>> NEW_TC_SHIFT
) & NEW_TC_MASK
;
1147 nfc
->fs
.ring_cookie
+= queue_num
;
1149 /* Extract the L3_FRAMING or CHAIN_ID */
1150 reg
= core_readl(priv
, CORE_CFP_DATA_PORT(6));
1152 /* With IPv6 rules this would contain a non-zero chain ID since
1153 * we reserve entry 0 and it cannot be used. So if we read 0 here
1154 * this means an IPv4 rule.
1156 ipv4_or_chain_id
= (reg
>> L3_FRAMING_SHIFT
) & 0xff;
1157 if (ipv4_or_chain_id
== 0)
1158 ret
= bcm_sf2_cfp_ipv4_rule_get(priv
, port
, &nfc
->fs
);
1160 ret
= bcm_sf2_cfp_ipv6_rule_get(priv
, port
, &nfc
->fs
,
1165 /* Read last to avoid next entry clobbering the results during search
1168 reg
= core_readl(priv
, CORE_CFP_DATA_PORT(7));
1169 if (!(reg
& 1 << port
))
1172 bcm_sf2_invert_masks(&nfc
->fs
);
1174 /* Put the TCAM size here */
1175 nfc
->data
= bcm_sf2_cfp_rule_size(priv
);
1180 /* We implement the search doing a TCAM search operation */
1181 static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv
*priv
,
1182 int port
, struct ethtool_rxnfc
*nfc
,
1185 unsigned int index
= 1, rules_cnt
= 0;
1187 for_each_set_bit_from(index
, priv
->cfp
.unique
, priv
->num_cfp_rules
) {
1188 rule_locs
[rules_cnt
] = index
;
1192 /* Put the TCAM size here */
1193 nfc
->data
= bcm_sf2_cfp_rule_size(priv
);
1194 nfc
->rule_cnt
= rules_cnt
;
1199 int bcm_sf2_get_rxnfc(struct dsa_switch
*ds
, int port
,
1200 struct ethtool_rxnfc
*nfc
, u32
*rule_locs
)
1202 struct net_device
*p
= ds
->ports
[port
].cpu_dp
->master
;
1203 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
1206 mutex_lock(&priv
->cfp
.lock
);
1209 case ETHTOOL_GRXCLSRLCNT
:
1210 /* Subtract the default, unusable rule */
1211 nfc
->rule_cnt
= bitmap_weight(priv
->cfp
.unique
,
1212 priv
->num_cfp_rules
) - 1;
1213 /* We support specifying rule locations */
1214 nfc
->data
|= RX_CLS_LOC_SPECIAL
;
1216 case ETHTOOL_GRXCLSRULE
:
1217 ret
= bcm_sf2_cfp_rule_get(priv
, port
, nfc
);
1219 case ETHTOOL_GRXCLSRLALL
:
1220 ret
= bcm_sf2_cfp_rule_get_all(priv
, port
, nfc
, rule_locs
);
1227 mutex_unlock(&priv
->cfp
.lock
);
1232 /* Pass up the commands to the attached master network device */
1233 if (p
->ethtool_ops
->get_rxnfc
) {
1234 ret
= p
->ethtool_ops
->get_rxnfc(p
, nfc
, rule_locs
);
1235 if (ret
== -EOPNOTSUPP
)
1242 int bcm_sf2_set_rxnfc(struct dsa_switch
*ds
, int port
,
1243 struct ethtool_rxnfc
*nfc
)
1245 struct net_device
*p
= ds
->ports
[port
].cpu_dp
->master
;
1246 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
1249 mutex_lock(&priv
->cfp
.lock
);
1252 case ETHTOOL_SRXCLSRLINS
:
1253 ret
= bcm_sf2_cfp_rule_set(ds
, port
, &nfc
->fs
);
1256 case ETHTOOL_SRXCLSRLDEL
:
1257 ret
= bcm_sf2_cfp_rule_del(priv
, port
, nfc
->fs
.location
);
1264 mutex_unlock(&priv
->cfp
.lock
);
1269 /* Pass up the commands to the attached master network device.
1270 * This can fail, so rollback the operation if we need to.
1272 if (p
->ethtool_ops
->set_rxnfc
) {
1273 ret
= p
->ethtool_ops
->set_rxnfc(p
, nfc
);
1274 if (ret
&& ret
!= -EOPNOTSUPP
) {
1275 mutex_lock(&priv
->cfp
.lock
);
1276 bcm_sf2_cfp_rule_del(priv
, port
, nfc
->fs
.location
);
1277 mutex_unlock(&priv
->cfp
.lock
);
1286 int bcm_sf2_cfp_rst(struct bcm_sf2_priv
*priv
)
1288 unsigned int timeout
= 1000;
1291 reg
= core_readl(priv
, CORE_CFP_ACC
);
1293 core_writel(priv
, reg
, CORE_CFP_ACC
);
1296 reg
= core_readl(priv
, CORE_CFP_ACC
);
1297 if (!(reg
& TCAM_RESET
))
1301 } while (timeout
--);