2 * Broadcom Starfighter 2 DSA switch driver
4 * Copyright (C) 2014, Broadcom Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/interrupt.h>
16 #include <linux/platform_device.h>
18 #include <linux/phy.h>
19 #include <linux/phy_fixed.h>
20 #include <linux/mii.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_address.h>
24 #include <linux/of_net.h>
25 #include <linux/of_mdio.h>
27 #include <linux/ethtool.h>
28 #include <linux/if_bridge.h>
29 #include <linux/brcmphy.h>
30 #include <linux/etherdevice.h>
31 #include <net/switchdev.h>
34 #include "bcm_sf2_regs.h"
36 /* String, offset, and register size in bytes if different from 4 bytes */
37 static const struct bcm_sf2_hw_stats bcm_sf2_mib
[] = {
38 { "TxOctets", 0x000, 8 },
39 { "TxDropPkts", 0x020 },
40 { "TxQPKTQ0", 0x030 },
41 { "TxBroadcastPkts", 0x040 },
42 { "TxMulticastPkts", 0x050 },
43 { "TxUnicastPKts", 0x060 },
44 { "TxCollisions", 0x070 },
45 { "TxSingleCollision", 0x080 },
46 { "TxMultipleCollision", 0x090 },
47 { "TxDeferredCollision", 0x0a0 },
48 { "TxLateCollision", 0x0b0 },
49 { "TxExcessiveCollision", 0x0c0 },
50 { "TxFrameInDisc", 0x0d0 },
51 { "TxPausePkts", 0x0e0 },
52 { "TxQPKTQ1", 0x0f0 },
53 { "TxQPKTQ2", 0x100 },
54 { "TxQPKTQ3", 0x110 },
55 { "TxQPKTQ4", 0x120 },
56 { "TxQPKTQ5", 0x130 },
57 { "RxOctets", 0x140, 8 },
58 { "RxUndersizePkts", 0x160 },
59 { "RxPausePkts", 0x170 },
60 { "RxPkts64Octets", 0x180 },
61 { "RxPkts65to127Octets", 0x190 },
62 { "RxPkts128to255Octets", 0x1a0 },
63 { "RxPkts256to511Octets", 0x1b0 },
64 { "RxPkts512to1023Octets", 0x1c0 },
65 { "RxPkts1024toMaxPktsOctets", 0x1d0 },
66 { "RxOversizePkts", 0x1e0 },
67 { "RxJabbers", 0x1f0 },
68 { "RxAlignmentErrors", 0x200 },
69 { "RxFCSErrors", 0x210 },
70 { "RxGoodOctets", 0x220, 8 },
71 { "RxDropPkts", 0x240 },
72 { "RxUnicastPkts", 0x250 },
73 { "RxMulticastPkts", 0x260 },
74 { "RxBroadcastPkts", 0x270 },
75 { "RxSAChanges", 0x280 },
76 { "RxFragments", 0x290 },
77 { "RxJumboPkt", 0x2a0 },
78 { "RxSymblErr", 0x2b0 },
79 { "InRangeErrCount", 0x2c0 },
80 { "OutRangeErrCount", 0x2d0 },
81 { "EEELpiEvent", 0x2e0 },
82 { "EEELpiDuration", 0x2f0 },
83 { "RxDiscard", 0x300, 8 },
84 { "TxQPKTQ6", 0x320 },
85 { "TxQPKTQ7", 0x330 },
86 { "TxPkts64Octets", 0x340 },
87 { "TxPkts65to127Octets", 0x350 },
88 { "TxPkts128to255Octets", 0x360 },
89 { "TxPkts256to511Ocets", 0x370 },
90 { "TxPkts512to1023Ocets", 0x380 },
91 { "TxPkts1024toMaxPktOcets", 0x390 },
94 #define BCM_SF2_STATS_SIZE ARRAY_SIZE(bcm_sf2_mib)
96 static void bcm_sf2_sw_get_strings(struct dsa_switch
*ds
,
97 int port
, uint8_t *data
)
101 for (i
= 0; i
< BCM_SF2_STATS_SIZE
; i
++)
102 memcpy(data
+ i
* ETH_GSTRING_LEN
,
103 bcm_sf2_mib
[i
].string
, ETH_GSTRING_LEN
);
106 static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch
*ds
,
107 int port
, uint64_t *data
)
109 struct bcm_sf2_priv
*priv
= ds_to_priv(ds
);
110 const struct bcm_sf2_hw_stats
*s
;
115 mutex_lock(&priv
->stats_mutex
);
117 /* Now fetch the per-port counters */
118 for (i
= 0; i
< BCM_SF2_STATS_SIZE
; i
++) {
121 /* Do a latched 64-bit read if needed */
122 offset
= s
->reg
+ CORE_P_MIB_OFFSET(port
);
123 if (s
->sizeof_stat
== 8)
124 val
= core_readq(priv
, offset
);
126 val
= core_readl(priv
, offset
);
131 mutex_unlock(&priv
->stats_mutex
);
134 static int bcm_sf2_sw_get_sset_count(struct dsa_switch
*ds
)
136 return BCM_SF2_STATS_SIZE
;
139 static const char *bcm_sf2_sw_drv_probe(struct device
*dsa_dev
,
140 struct device
*host_dev
, int sw_addr
,
143 struct bcm_sf2_priv
*priv
;
145 priv
= devm_kzalloc(dsa_dev
, sizeof(*priv
), GFP_KERNEL
);
150 return "Broadcom Starfighter 2";
153 static void bcm_sf2_imp_vlan_setup(struct dsa_switch
*ds
, int cpu_port
)
155 struct bcm_sf2_priv
*priv
= ds_to_priv(ds
);
159 /* Enable the IMP Port to be in the same VLAN as the other ports
160 * on a per-port basis such that we only have Port i and IMP in
163 for (i
= 0; i
< priv
->hw_params
.num_ports
; i
++) {
164 if (!((1 << i
) & ds
->enabled_port_mask
))
167 reg
= core_readl(priv
, CORE_PORT_VLAN_CTL_PORT(i
));
168 reg
|= (1 << cpu_port
);
169 core_writel(priv
, reg
, CORE_PORT_VLAN_CTL_PORT(i
));
173 static void bcm_sf2_imp_setup(struct dsa_switch
*ds
, int port
)
175 struct bcm_sf2_priv
*priv
= ds_to_priv(ds
);
178 /* Enable the port memories */
179 reg
= core_readl(priv
, CORE_MEM_PSM_VDD_CTRL
);
180 reg
&= ~P_TXQ_PSM_VDD(port
);
181 core_writel(priv
, reg
, CORE_MEM_PSM_VDD_CTRL
);
183 /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
184 reg
= core_readl(priv
, CORE_IMP_CTL
);
185 reg
|= (RX_BCST_EN
| RX_MCST_EN
| RX_UCST_EN
);
186 reg
&= ~(RX_DIS
| TX_DIS
);
187 core_writel(priv
, reg
, CORE_IMP_CTL
);
189 /* Enable forwarding */
190 core_writel(priv
, SW_FWDG_EN
, CORE_SWMODE
);
192 /* Enable IMP port in dumb mode */
193 reg
= core_readl(priv
, CORE_SWITCH_CTRL
);
194 reg
|= MII_DUMB_FWDG_EN
;
195 core_writel(priv
, reg
, CORE_SWITCH_CTRL
);
197 /* Resolve which bit controls the Broadcom tag */
200 val
= BRCM_HDR_EN_P8
;
203 val
= BRCM_HDR_EN_P7
;
206 val
= BRCM_HDR_EN_P5
;
213 /* Enable Broadcom tags for IMP port */
214 reg
= core_readl(priv
, CORE_BRCM_HDR_CTRL
);
216 core_writel(priv
, reg
, CORE_BRCM_HDR_CTRL
);
218 /* Enable reception Broadcom tag for CPU TX (switch RX) to
219 * allow us to tag outgoing frames
221 reg
= core_readl(priv
, CORE_BRCM_HDR_RX_DIS
);
223 core_writel(priv
, reg
, CORE_BRCM_HDR_RX_DIS
);
225 /* Enable transmission of Broadcom tags from the switch (CPU RX) to
226 * allow delivering frames to the per-port net_devices
228 reg
= core_readl(priv
, CORE_BRCM_HDR_TX_DIS
);
230 core_writel(priv
, reg
, CORE_BRCM_HDR_TX_DIS
);
232 /* Force link status for IMP port */
233 reg
= core_readl(priv
, CORE_STS_OVERRIDE_IMP
);
234 reg
|= (MII_SW_OR
| LINK_STS
);
235 core_writel(priv
, reg
, CORE_STS_OVERRIDE_IMP
);
238 static void bcm_sf2_eee_enable_set(struct dsa_switch
*ds
, int port
, bool enable
)
240 struct bcm_sf2_priv
*priv
= ds_to_priv(ds
);
243 reg
= core_readl(priv
, CORE_EEE_EN_CTRL
);
248 core_writel(priv
, reg
, CORE_EEE_EN_CTRL
);
251 static void bcm_sf2_gphy_enable_set(struct dsa_switch
*ds
, bool enable
)
253 struct bcm_sf2_priv
*priv
= ds_to_priv(ds
);
256 reg
= reg_readl(priv
, REG_SPHY_CNTRL
);
259 reg
&= ~(EXT_PWR_DOWN
| IDDQ_BIAS
| CK25_DIS
);
260 reg_writel(priv
, reg
, REG_SPHY_CNTRL
);
262 reg
= reg_readl(priv
, REG_SPHY_CNTRL
);
265 reg
|= EXT_PWR_DOWN
| IDDQ_BIAS
| PHY_RESET
;
266 reg_writel(priv
, reg
, REG_SPHY_CNTRL
);
270 reg_writel(priv
, reg
, REG_SPHY_CNTRL
);
272 /* Use PHY-driven LED signaling */
274 reg
= reg_readl(priv
, REG_LED_CNTRL(0));
275 reg
|= SPDLNK_SRC_SEL
;
276 reg_writel(priv
, reg
, REG_LED_CNTRL(0));
280 static inline void bcm_sf2_port_intr_enable(struct bcm_sf2_priv
*priv
,
290 /* Port 0 interrupts are located on the first bank */
291 intrl2_0_mask_clear(priv
, P_IRQ_MASK(P0_IRQ_OFF
));
294 off
= P_IRQ_OFF(port
);
298 intrl2_1_mask_clear(priv
, P_IRQ_MASK(off
));
301 static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv
*priv
,
311 /* Port 0 interrupts are located on the first bank */
312 intrl2_0_mask_set(priv
, P_IRQ_MASK(P0_IRQ_OFF
));
313 intrl2_0_writel(priv
, P_IRQ_MASK(P0_IRQ_OFF
), INTRL2_CPU_CLEAR
);
316 off
= P_IRQ_OFF(port
);
320 intrl2_1_mask_set(priv
, P_IRQ_MASK(off
));
321 intrl2_1_writel(priv
, P_IRQ_MASK(off
), INTRL2_CPU_CLEAR
);
324 static int bcm_sf2_port_setup(struct dsa_switch
*ds
, int port
,
325 struct phy_device
*phy
)
327 struct bcm_sf2_priv
*priv
= ds_to_priv(ds
);
328 s8 cpu_port
= ds
->dst
[ds
->index
].cpu_port
;
331 /* Clear the memory power down */
332 reg
= core_readl(priv
, CORE_MEM_PSM_VDD_CTRL
);
333 reg
&= ~P_TXQ_PSM_VDD(port
);
334 core_writel(priv
, reg
, CORE_MEM_PSM_VDD_CTRL
);
336 /* Clear the Rx and Tx disable bits and set to no spanning tree */
337 core_writel(priv
, 0, CORE_G_PCTL_PORT(port
));
339 /* Re-enable the GPHY and re-apply workarounds */
340 if (priv
->int_phy_mask
& 1 << port
&& priv
->hw_params
.num_gphy
== 1) {
341 bcm_sf2_gphy_enable_set(ds
, true);
343 /* if phy_stop() has been called before, phy
344 * will be in halted state, and phy_start()
347 * the resume path does not configure back
348 * autoneg settings, and since we hard reset
349 * the phy manually here, we need to reset the
350 * state machine also.
352 phy
->state
= PHY_READY
;
357 /* Enable MoCA port interrupts to get notified */
358 if (port
== priv
->moca_port
)
359 bcm_sf2_port_intr_enable(priv
, port
);
361 /* Set this port, and only this one to be in the default VLAN,
362 * if member of a bridge, restore its membership prior to
363 * bringing down this port.
365 reg
= core_readl(priv
, CORE_PORT_VLAN_CTL_PORT(port
));
366 reg
&= ~PORT_VLAN_CTRL_MASK
;
368 reg
|= priv
->port_sts
[port
].vlan_ctl_mask
;
369 core_writel(priv
, reg
, CORE_PORT_VLAN_CTL_PORT(port
));
371 bcm_sf2_imp_vlan_setup(ds
, cpu_port
);
373 /* If EEE was enabled, restore it */
374 if (priv
->port_sts
[port
].eee
.eee_enabled
)
375 bcm_sf2_eee_enable_set(ds
, port
, true);
380 static void bcm_sf2_port_disable(struct dsa_switch
*ds
, int port
,
381 struct phy_device
*phy
)
383 struct bcm_sf2_priv
*priv
= ds_to_priv(ds
);
386 if (priv
->wol_ports_mask
& (1 << port
))
389 if (port
== priv
->moca_port
)
390 bcm_sf2_port_intr_disable(priv
, port
);
392 if (priv
->int_phy_mask
& 1 << port
&& priv
->hw_params
.num_gphy
== 1)
393 bcm_sf2_gphy_enable_set(ds
, false);
395 if (dsa_is_cpu_port(ds
, port
))
398 off
= CORE_G_PCTL_PORT(port
);
400 reg
= core_readl(priv
, off
);
401 reg
|= RX_DIS
| TX_DIS
;
402 core_writel(priv
, reg
, off
);
404 /* Power down the port memory */
405 reg
= core_readl(priv
, CORE_MEM_PSM_VDD_CTRL
);
406 reg
|= P_TXQ_PSM_VDD(port
);
407 core_writel(priv
, reg
, CORE_MEM_PSM_VDD_CTRL
);
410 /* Returns 0 if EEE was not enabled, or 1 otherwise
412 static int bcm_sf2_eee_init(struct dsa_switch
*ds
, int port
,
413 struct phy_device
*phy
)
415 struct bcm_sf2_priv
*priv
= ds_to_priv(ds
);
416 struct ethtool_eee
*p
= &priv
->port_sts
[port
].eee
;
419 p
->supported
= (SUPPORTED_1000baseT_Full
| SUPPORTED_100baseT_Full
);
421 ret
= phy_init_eee(phy
, 0);
425 bcm_sf2_eee_enable_set(ds
, port
, true);
430 static int bcm_sf2_sw_get_eee(struct dsa_switch
*ds
, int port
,
431 struct ethtool_eee
*e
)
433 struct bcm_sf2_priv
*priv
= ds_to_priv(ds
);
434 struct ethtool_eee
*p
= &priv
->port_sts
[port
].eee
;
437 reg
= core_readl(priv
, CORE_EEE_LPI_INDICATE
);
438 e
->eee_enabled
= p
->eee_enabled
;
439 e
->eee_active
= !!(reg
& (1 << port
));
444 static int bcm_sf2_sw_set_eee(struct dsa_switch
*ds
, int port
,
445 struct phy_device
*phydev
,
446 struct ethtool_eee
*e
)
448 struct bcm_sf2_priv
*priv
= ds_to_priv(ds
);
449 struct ethtool_eee
*p
= &priv
->port_sts
[port
].eee
;
451 p
->eee_enabled
= e
->eee_enabled
;
453 if (!p
->eee_enabled
) {
454 bcm_sf2_eee_enable_set(ds
, port
, false);
456 p
->eee_enabled
= bcm_sf2_eee_init(ds
, port
, phydev
);
464 static int bcm_sf2_fast_age_op(struct bcm_sf2_priv
*priv
)
466 unsigned int timeout
= 1000;
469 reg
= core_readl(priv
, CORE_FAST_AGE_CTRL
);
470 reg
|= EN_AGE_PORT
| EN_AGE_VLAN
| EN_AGE_DYNAMIC
| FAST_AGE_STR_DONE
;
471 core_writel(priv
, reg
, CORE_FAST_AGE_CTRL
);
474 reg
= core_readl(priv
, CORE_FAST_AGE_CTRL
);
475 if (!(reg
& FAST_AGE_STR_DONE
))
484 core_writel(priv
, 0, CORE_FAST_AGE_CTRL
);
489 /* Fast-ageing of ARL entries for a given port, equivalent to an ARL
490 * flush for that port.
492 static int bcm_sf2_sw_fast_age_port(struct dsa_switch
*ds
, int port
)
494 struct bcm_sf2_priv
*priv
= ds_to_priv(ds
);
496 core_writel(priv
, port
, CORE_FAST_AGE_PORT
);
498 return bcm_sf2_fast_age_op(priv
);
501 static int bcm_sf2_sw_fast_age_vlan(struct bcm_sf2_priv
*priv
, u16 vid
)
503 core_writel(priv
, vid
, CORE_FAST_AGE_VID
);
505 return bcm_sf2_fast_age_op(priv
);
508 static int bcm_sf2_vlan_op_wait(struct bcm_sf2_priv
*priv
)
510 unsigned int timeout
= 10;
514 reg
= core_readl(priv
, CORE_ARLA_VTBL_RWCTRL
);
515 if (!(reg
& ARLA_VTBL_STDN
))
518 usleep_range(1000, 2000);
524 static int bcm_sf2_vlan_op(struct bcm_sf2_priv
*priv
, u8 op
)
526 core_writel(priv
, ARLA_VTBL_STDN
| op
, CORE_ARLA_VTBL_RWCTRL
);
528 return bcm_sf2_vlan_op_wait(priv
);
531 static void bcm_sf2_set_vlan_entry(struct bcm_sf2_priv
*priv
, u16 vid
,
532 struct bcm_sf2_vlan
*vlan
)
536 core_writel(priv
, vid
& VTBL_ADDR_INDEX_MASK
, CORE_ARLA_VTBL_ADDR
);
537 core_writel(priv
, vlan
->untag
<< UNTAG_MAP_SHIFT
| vlan
->members
,
538 CORE_ARLA_VTBL_ENTRY
);
540 ret
= bcm_sf2_vlan_op(priv
, ARLA_VTBL_CMD_WRITE
);
542 pr_err("failed to write VLAN entry\n");
545 static int bcm_sf2_get_vlan_entry(struct bcm_sf2_priv
*priv
, u16 vid
,
546 struct bcm_sf2_vlan
*vlan
)
551 core_writel(priv
, vid
& VTBL_ADDR_INDEX_MASK
, CORE_ARLA_VTBL_ADDR
);
553 ret
= bcm_sf2_vlan_op(priv
, ARLA_VTBL_CMD_READ
);
557 entry
= core_readl(priv
, CORE_ARLA_VTBL_ENTRY
);
558 vlan
->members
= entry
& FWD_MAP_MASK
;
559 vlan
->untag
= (entry
>> UNTAG_MAP_SHIFT
) & UNTAG_MAP_MASK
;
564 static int bcm_sf2_sw_br_join(struct dsa_switch
*ds
, int port
,
565 struct net_device
*bridge
)
567 struct bcm_sf2_priv
*priv
= ds_to_priv(ds
);
568 s8 cpu_port
= ds
->dst
->cpu_port
;
572 /* Make this port leave the all VLANs join since we will have proper
573 * VLAN entries from now on
575 reg
= core_readl(priv
, CORE_JOIN_ALL_VLAN_EN
);
577 if ((reg
& BIT(cpu_port
)) == BIT(cpu_port
))
578 reg
&= ~BIT(cpu_port
);
579 core_writel(priv
, reg
, CORE_JOIN_ALL_VLAN_EN
);
581 priv
->port_sts
[port
].bridge_dev
= bridge
;
582 p_ctl
= core_readl(priv
, CORE_PORT_VLAN_CTL_PORT(port
));
584 for (i
= 0; i
< priv
->hw_params
.num_ports
; i
++) {
585 if (priv
->port_sts
[i
].bridge_dev
!= bridge
)
588 /* Add this local port to the remote port VLAN control
589 * membership and update the remote port bitmask
591 reg
= core_readl(priv
, CORE_PORT_VLAN_CTL_PORT(i
));
593 core_writel(priv
, reg
, CORE_PORT_VLAN_CTL_PORT(i
));
594 priv
->port_sts
[i
].vlan_ctl_mask
= reg
;
599 /* Configure the local port VLAN control membership to include
600 * remote ports and update the local port bitmask
602 core_writel(priv
, p_ctl
, CORE_PORT_VLAN_CTL_PORT(port
));
603 priv
->port_sts
[port
].vlan_ctl_mask
= p_ctl
;
608 static void bcm_sf2_sw_br_leave(struct dsa_switch
*ds
, int port
)
610 struct bcm_sf2_priv
*priv
= ds_to_priv(ds
);
611 struct net_device
*bridge
= priv
->port_sts
[port
].bridge_dev
;
612 s8 cpu_port
= ds
->dst
->cpu_port
;
616 p_ctl
= core_readl(priv
, CORE_PORT_VLAN_CTL_PORT(port
));
618 for (i
= 0; i
< priv
->hw_params
.num_ports
; i
++) {
619 /* Don't touch the remaining ports */
620 if (priv
->port_sts
[i
].bridge_dev
!= bridge
)
623 reg
= core_readl(priv
, CORE_PORT_VLAN_CTL_PORT(i
));
625 core_writel(priv
, reg
, CORE_PORT_VLAN_CTL_PORT(i
));
626 priv
->port_sts
[port
].vlan_ctl_mask
= reg
;
628 /* Prevent self removal to preserve isolation */
633 core_writel(priv
, p_ctl
, CORE_PORT_VLAN_CTL_PORT(port
));
634 priv
->port_sts
[port
].vlan_ctl_mask
= p_ctl
;
635 priv
->port_sts
[port
].bridge_dev
= NULL
;
637 /* Make this port join all VLANs without VLAN entries */
638 reg
= core_readl(priv
, CORE_JOIN_ALL_VLAN_EN
);
640 if (!(reg
& BIT(cpu_port
)))
641 reg
|= BIT(cpu_port
);
642 core_writel(priv
, reg
, CORE_JOIN_ALL_VLAN_EN
);
645 static void bcm_sf2_sw_br_set_stp_state(struct dsa_switch
*ds
, int port
,
648 struct bcm_sf2_priv
*priv
= ds_to_priv(ds
);
649 u8 hw_state
, cur_hw_state
;
652 reg
= core_readl(priv
, CORE_G_PCTL_PORT(port
));
653 cur_hw_state
= reg
& (G_MISTP_STATE_MASK
<< G_MISTP_STATE_SHIFT
);
656 case BR_STATE_DISABLED
:
657 hw_state
= G_MISTP_DIS_STATE
;
659 case BR_STATE_LISTENING
:
660 hw_state
= G_MISTP_LISTEN_STATE
;
662 case BR_STATE_LEARNING
:
663 hw_state
= G_MISTP_LEARN_STATE
;
665 case BR_STATE_FORWARDING
:
666 hw_state
= G_MISTP_FWD_STATE
;
668 case BR_STATE_BLOCKING
:
669 hw_state
= G_MISTP_BLOCK_STATE
;
672 pr_err("%s: invalid STP state: %d\n", __func__
, state
);
676 /* Fast-age ARL entries if we are moving a port from Learning or
677 * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening
680 if (cur_hw_state
!= hw_state
) {
681 if (cur_hw_state
>= G_MISTP_LEARN_STATE
&&
682 hw_state
<= G_MISTP_LISTEN_STATE
) {
683 if (bcm_sf2_sw_fast_age_port(ds
, port
)) {
684 pr_err("%s: fast-ageing failed\n", __func__
);
690 reg
= core_readl(priv
, CORE_G_PCTL_PORT(port
));
691 reg
&= ~(G_MISTP_STATE_MASK
<< G_MISTP_STATE_SHIFT
);
693 core_writel(priv
, reg
, CORE_G_PCTL_PORT(port
));
696 /* Address Resolution Logic routines */
697 static int bcm_sf2_arl_op_wait(struct bcm_sf2_priv
*priv
)
699 unsigned int timeout
= 10;
703 reg
= core_readl(priv
, CORE_ARLA_RWCTL
);
704 if (!(reg
& ARL_STRTDN
))
707 usleep_range(1000, 2000);
713 static int bcm_sf2_arl_rw_op(struct bcm_sf2_priv
*priv
, unsigned int op
)
720 cmd
= core_readl(priv
, CORE_ARLA_RWCTL
);
721 cmd
&= ~IVL_SVL_SELECT
;
727 core_writel(priv
, cmd
, CORE_ARLA_RWCTL
);
729 return bcm_sf2_arl_op_wait(priv
);
732 static int bcm_sf2_arl_read(struct bcm_sf2_priv
*priv
, u64 mac
,
733 u16 vid
, struct bcm_sf2_arl_entry
*ent
, u8
*idx
,
739 ret
= bcm_sf2_arl_op_wait(priv
);
743 /* Read the 4 bins */
744 for (i
= 0; i
< 4; i
++) {
748 mac_vid
= core_readq(priv
, CORE_ARLA_MACVID_ENTRY(i
));
749 fwd_entry
= core_readl(priv
, CORE_ARLA_FWD_ENTRY(i
));
750 bcm_sf2_arl_to_entry(ent
, mac_vid
, fwd_entry
);
752 if (ent
->is_valid
&& is_valid
) {
757 /* This is the MAC we just deleted */
758 if (!is_valid
&& (mac_vid
& mac
))
765 static int bcm_sf2_arl_op(struct bcm_sf2_priv
*priv
, int op
, int port
,
766 const unsigned char *addr
, u16 vid
, bool is_valid
)
768 struct bcm_sf2_arl_entry ent
;
770 u64 mac
, mac_vid
= 0;
774 /* Convert the array into a 64-bit MAC */
775 mac
= bcm_sf2_mac_to_u64(addr
);
777 /* Perform a read for the given MAC and VID */
778 core_writeq(priv
, mac
, CORE_ARLA_MAC
);
779 core_writel(priv
, vid
, CORE_ARLA_VID
);
781 /* Issue a read operation for this MAC */
782 ret
= bcm_sf2_arl_rw_op(priv
, 1);
786 ret
= bcm_sf2_arl_read(priv
, mac
, vid
, &ent
, &idx
, is_valid
);
787 /* If this is a read, just finish now */
791 /* We could not find a matching MAC, so reset to a new entry */
797 memset(&ent
, 0, sizeof(ent
));
799 ent
.is_valid
= is_valid
;
801 ent
.is_static
= true;
802 memcpy(ent
.mac
, addr
, ETH_ALEN
);
803 bcm_sf2_arl_from_entry(&mac_vid
, &fwd_entry
, &ent
);
805 core_writeq(priv
, mac_vid
, CORE_ARLA_MACVID_ENTRY(idx
));
806 core_writel(priv
, fwd_entry
, CORE_ARLA_FWD_ENTRY(idx
));
808 ret
= bcm_sf2_arl_rw_op(priv
, 0);
812 /* Re-read the entry to check */
813 return bcm_sf2_arl_read(priv
, mac
, vid
, &ent
, &idx
, is_valid
);
816 static int bcm_sf2_sw_fdb_prepare(struct dsa_switch
*ds
, int port
,
817 const struct switchdev_obj_port_fdb
*fdb
,
818 struct switchdev_trans
*trans
)
820 /* We do not need to do anything specific here yet */
824 static void bcm_sf2_sw_fdb_add(struct dsa_switch
*ds
, int port
,
825 const struct switchdev_obj_port_fdb
*fdb
,
826 struct switchdev_trans
*trans
)
828 struct bcm_sf2_priv
*priv
= ds_to_priv(ds
);
830 if (bcm_sf2_arl_op(priv
, 0, port
, fdb
->addr
, fdb
->vid
, true))
831 pr_err("%s: failed to add MAC address\n", __func__
);
834 static int bcm_sf2_sw_fdb_del(struct dsa_switch
*ds
, int port
,
835 const struct switchdev_obj_port_fdb
*fdb
)
837 struct bcm_sf2_priv
*priv
= ds_to_priv(ds
);
839 return bcm_sf2_arl_op(priv
, 0, port
, fdb
->addr
, fdb
->vid
, false);
842 static int bcm_sf2_arl_search_wait(struct bcm_sf2_priv
*priv
)
844 unsigned timeout
= 1000;
848 reg
= core_readl(priv
, CORE_ARLA_SRCH_CTL
);
849 if (!(reg
& ARLA_SRCH_STDN
))
852 if (reg
& ARLA_SRCH_VLID
)
855 usleep_range(1000, 2000);
861 static void bcm_sf2_arl_search_rd(struct bcm_sf2_priv
*priv
, u8 idx
,
862 struct bcm_sf2_arl_entry
*ent
)
867 mac_vid
= core_readq(priv
, CORE_ARLA_SRCH_RSLT_MACVID(idx
));
868 fwd_entry
= core_readl(priv
, CORE_ARLA_SRCH_RSLT(idx
));
869 bcm_sf2_arl_to_entry(ent
, mac_vid
, fwd_entry
);
872 static int bcm_sf2_sw_fdb_copy(struct net_device
*dev
, int port
,
873 const struct bcm_sf2_arl_entry
*ent
,
874 struct switchdev_obj_port_fdb
*fdb
,
875 int (*cb
)(struct switchdev_obj
*obj
))
880 if (port
!= ent
->port
)
883 ether_addr_copy(fdb
->addr
, ent
->mac
);
885 fdb
->ndm_state
= ent
->is_static
? NUD_NOARP
: NUD_REACHABLE
;
887 return cb(&fdb
->obj
);
890 static int bcm_sf2_sw_fdb_dump(struct dsa_switch
*ds
, int port
,
891 struct switchdev_obj_port_fdb
*fdb
,
892 int (*cb
)(struct switchdev_obj
*obj
))
894 struct bcm_sf2_priv
*priv
= ds_to_priv(ds
);
895 struct net_device
*dev
= ds
->ports
[port
].netdev
;
896 struct bcm_sf2_arl_entry results
[2];
897 unsigned int count
= 0;
900 /* Start search operation */
901 core_writel(priv
, ARLA_SRCH_STDN
, CORE_ARLA_SRCH_CTL
);
904 ret
= bcm_sf2_arl_search_wait(priv
);
908 /* Read both entries, then return their values back */
909 bcm_sf2_arl_search_rd(priv
, 0, &results
[0]);
910 ret
= bcm_sf2_sw_fdb_copy(dev
, port
, &results
[0], fdb
, cb
);
914 bcm_sf2_arl_search_rd(priv
, 1, &results
[1]);
915 ret
= bcm_sf2_sw_fdb_copy(dev
, port
, &results
[1], fdb
, cb
);
919 if (!results
[0].is_valid
&& !results
[1].is_valid
)
922 } while (count
++ < CORE_ARLA_NUM_ENTRIES
);
927 static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv
*priv
, int op
, int addr
,
933 reg
= reg_readl(priv
, REG_SWITCH_CNTRL
);
934 reg
|= MDIO_MASTER_SEL
;
935 reg_writel(priv
, reg
, REG_SWITCH_CNTRL
);
937 /* Page << 8 | offset */
940 core_writel(priv
, addr
, reg
);
942 /* Page << 8 | offset */
943 reg
= 0x80 << 8 | regnum
<< 1;
947 ret
= core_readl(priv
, reg
);
949 core_writel(priv
, val
, reg
);
951 reg
= reg_readl(priv
, REG_SWITCH_CNTRL
);
952 reg
&= ~MDIO_MASTER_SEL
;
953 reg_writel(priv
, reg
, REG_SWITCH_CNTRL
);
958 static int bcm_sf2_sw_mdio_read(struct mii_bus
*bus
, int addr
, int regnum
)
960 struct bcm_sf2_priv
*priv
= bus
->priv
;
962 /* Intercept reads from Broadcom pseudo-PHY address, else, send
963 * them to our master MDIO bus controller
965 if (addr
== BRCM_PSEUDO_PHY_ADDR
&& priv
->indir_phy_mask
& BIT(addr
))
966 return bcm_sf2_sw_indir_rw(priv
, 1, addr
, regnum
, 0);
968 return mdiobus_read(priv
->master_mii_bus
, addr
, regnum
);
971 static int bcm_sf2_sw_mdio_write(struct mii_bus
*bus
, int addr
, int regnum
,
974 struct bcm_sf2_priv
*priv
= bus
->priv
;
976 /* Intercept writes to the Broadcom pseudo-PHY address, else,
977 * send them to our master MDIO bus controller
979 if (addr
== BRCM_PSEUDO_PHY_ADDR
&& priv
->indir_phy_mask
& BIT(addr
))
980 bcm_sf2_sw_indir_rw(priv
, 0, addr
, regnum
, val
);
982 mdiobus_write(priv
->master_mii_bus
, addr
, regnum
, val
);
987 static irqreturn_t
bcm_sf2_switch_0_isr(int irq
, void *dev_id
)
989 struct bcm_sf2_priv
*priv
= dev_id
;
991 priv
->irq0_stat
= intrl2_0_readl(priv
, INTRL2_CPU_STATUS
) &
993 intrl2_0_writel(priv
, priv
->irq0_stat
, INTRL2_CPU_CLEAR
);
998 static irqreturn_t
bcm_sf2_switch_1_isr(int irq
, void *dev_id
)
1000 struct bcm_sf2_priv
*priv
= dev_id
;
1002 priv
->irq1_stat
= intrl2_1_readl(priv
, INTRL2_CPU_STATUS
) &
1004 intrl2_1_writel(priv
, priv
->irq1_stat
, INTRL2_CPU_CLEAR
);
1006 if (priv
->irq1_stat
& P_LINK_UP_IRQ(P7_IRQ_OFF
))
1007 priv
->port_sts
[7].link
= 1;
1008 if (priv
->irq1_stat
& P_LINK_DOWN_IRQ(P7_IRQ_OFF
))
1009 priv
->port_sts
[7].link
= 0;
1014 static int bcm_sf2_sw_rst(struct bcm_sf2_priv
*priv
)
1016 unsigned int timeout
= 1000;
1019 reg
= core_readl(priv
, CORE_WATCHDOG_CTRL
);
1020 reg
|= SOFTWARE_RESET
| EN_CHIP_RST
| EN_SW_RESET
;
1021 core_writel(priv
, reg
, CORE_WATCHDOG_CTRL
);
1024 reg
= core_readl(priv
, CORE_WATCHDOG_CTRL
);
1025 if (!(reg
& SOFTWARE_RESET
))
1028 usleep_range(1000, 2000);
1029 } while (timeout
-- > 0);
1037 static void bcm_sf2_intr_disable(struct bcm_sf2_priv
*priv
)
1039 intrl2_0_writel(priv
, 0xffffffff, INTRL2_CPU_MASK_SET
);
1040 intrl2_0_writel(priv
, 0xffffffff, INTRL2_CPU_CLEAR
);
1041 intrl2_0_writel(priv
, 0, INTRL2_CPU_MASK_CLEAR
);
1042 intrl2_1_writel(priv
, 0xffffffff, INTRL2_CPU_MASK_SET
);
1043 intrl2_1_writel(priv
, 0xffffffff, INTRL2_CPU_CLEAR
);
1044 intrl2_1_writel(priv
, 0, INTRL2_CPU_MASK_CLEAR
);
1047 static void bcm_sf2_identify_ports(struct bcm_sf2_priv
*priv
,
1048 struct device_node
*dn
)
1050 struct device_node
*port
;
1051 const char *phy_mode_str
;
1053 unsigned int port_num
;
1056 priv
->moca_port
= -1;
1058 for_each_available_child_of_node(dn
, port
) {
1059 if (of_property_read_u32(port
, "reg", &port_num
))
1062 /* Internal PHYs get assigned a specific 'phy-mode' property
1063 * value: "internal" to help flag them before MDIO probing
1064 * has completed, since they might be turned off at that
1067 mode
= of_get_phy_mode(port
);
1069 ret
= of_property_read_string(port
, "phy-mode",
1074 if (!strcasecmp(phy_mode_str
, "internal"))
1075 priv
->int_phy_mask
|= 1 << port_num
;
1078 if (mode
== PHY_INTERFACE_MODE_MOCA
)
1079 priv
->moca_port
= port_num
;
1083 static int bcm_sf2_mdio_register(struct dsa_switch
*ds
)
1085 struct bcm_sf2_priv
*priv
= ds_to_priv(ds
);
1086 struct device_node
*dn
;
1090 /* Find our integrated MDIO bus node */
1091 dn
= of_find_compatible_node(NULL
, NULL
, "brcm,unimac-mdio");
1092 priv
->master_mii_bus
= of_mdio_find_bus(dn
);
1093 if (!priv
->master_mii_bus
)
1094 return -EPROBE_DEFER
;
1096 get_device(&priv
->master_mii_bus
->dev
);
1097 priv
->master_mii_dn
= dn
;
1099 priv
->slave_mii_bus
= devm_mdiobus_alloc(ds
->dev
);
1100 if (!priv
->slave_mii_bus
)
1103 priv
->slave_mii_bus
->priv
= priv
;
1104 priv
->slave_mii_bus
->name
= "sf2 slave mii";
1105 priv
->slave_mii_bus
->read
= bcm_sf2_sw_mdio_read
;
1106 priv
->slave_mii_bus
->write
= bcm_sf2_sw_mdio_write
;
1107 snprintf(priv
->slave_mii_bus
->id
, MII_BUS_ID_SIZE
, "sf2-%d",
1109 priv
->slave_mii_bus
->dev
.of_node
= dn
;
1111 /* Include the pseudo-PHY address to divert reads towards our
1112 * workaround. This is only required for 7445D0, since 7445E0
1113 * disconnects the internal switch pseudo-PHY such that we can use the
1114 * regular SWITCH_MDIO master controller instead.
1116 * Here we flag the pseudo PHY as needing special treatment and would
1117 * otherwise make all other PHY read/writes go to the master MDIO bus
1118 * controller that comes with this switch backed by the "mdio-unimac"
1121 if (of_machine_is_compatible("brcm,bcm7445d0"))
1122 priv
->indir_phy_mask
|= (1 << BRCM_PSEUDO_PHY_ADDR
);
1124 priv
->indir_phy_mask
= 0;
1126 ds
->phys_mii_mask
= priv
->indir_phy_mask
;
1127 ds
->slave_mii_bus
= priv
->slave_mii_bus
;
1128 priv
->slave_mii_bus
->parent
= ds
->dev
->parent
;
1129 priv
->slave_mii_bus
->phy_mask
= ~priv
->indir_phy_mask
;
1132 err
= of_mdiobus_register(priv
->slave_mii_bus
, dn
);
1134 err
= mdiobus_register(priv
->slave_mii_bus
);
1142 static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv
*priv
)
1144 mdiobus_unregister(priv
->slave_mii_bus
);
1145 if (priv
->master_mii_dn
)
1146 of_node_put(priv
->master_mii_dn
);
1149 static int bcm_sf2_sw_set_addr(struct dsa_switch
*ds
, u8
*addr
)
1154 static u32
bcm_sf2_sw_get_phy_flags(struct dsa_switch
*ds
, int port
)
1156 struct bcm_sf2_priv
*priv
= ds_to_priv(ds
);
1158 /* The BCM7xxx PHY driver expects to find the integrated PHY revision
1159 * in bits 15:8 and the patch level in bits 7:0 which is exactly what
1160 * the REG_PHY_REVISION register layout is.
1163 return priv
->hw_params
.gphy_rev
;
1166 static void bcm_sf2_sw_adjust_link(struct dsa_switch
*ds
, int port
,
1167 struct phy_device
*phydev
)
1169 struct bcm_sf2_priv
*priv
= ds_to_priv(ds
);
1170 u32 id_mode_dis
= 0, port_mode
;
1171 const char *str
= NULL
;
1174 switch (phydev
->interface
) {
1175 case PHY_INTERFACE_MODE_RGMII
:
1176 str
= "RGMII (no delay)";
1178 case PHY_INTERFACE_MODE_RGMII_TXID
:
1180 str
= "RGMII (TX delay)";
1181 port_mode
= EXT_GPHY
;
1183 case PHY_INTERFACE_MODE_MII
:
1185 port_mode
= EXT_EPHY
;
1187 case PHY_INTERFACE_MODE_REVMII
:
1188 str
= "Reverse MII";
1189 port_mode
= EXT_REVMII
;
1192 /* All other PHYs: internal and MoCA */
1196 /* If the link is down, just disable the interface to conserve power */
1197 if (!phydev
->link
) {
1198 reg
= reg_readl(priv
, REG_RGMII_CNTRL_P(port
));
1199 reg
&= ~RGMII_MODE_EN
;
1200 reg_writel(priv
, reg
, REG_RGMII_CNTRL_P(port
));
1204 /* Clear id_mode_dis bit, and the existing port mode, but
1205 * make sure we enable the RGMII block for data to pass
1207 reg
= reg_readl(priv
, REG_RGMII_CNTRL_P(port
));
1208 reg
&= ~ID_MODE_DIS
;
1209 reg
&= ~(PORT_MODE_MASK
<< PORT_MODE_SHIFT
);
1210 reg
&= ~(RX_PAUSE_EN
| TX_PAUSE_EN
);
1212 reg
|= port_mode
| RGMII_MODE_EN
;
1216 if (phydev
->pause
) {
1217 if (phydev
->asym_pause
)
1222 reg_writel(priv
, reg
, REG_RGMII_CNTRL_P(port
));
1224 pr_info("Port %d configured for %s\n", port
, str
);
1227 /* Force link settings detected from the PHY */
1229 switch (phydev
->speed
) {
1231 reg
|= SPDSTS_1000
<< SPEED_SHIFT
;
1234 reg
|= SPDSTS_100
<< SPEED_SHIFT
;
1240 if (phydev
->duplex
== DUPLEX_FULL
)
1243 core_writel(priv
, reg
, CORE_STS_OVERRIDE_GMIIP_PORT(port
));
1246 static void bcm_sf2_sw_fixed_link_update(struct dsa_switch
*ds
, int port
,
1247 struct fixed_phy_status
*status
)
1249 struct bcm_sf2_priv
*priv
= ds_to_priv(ds
);
1253 duplex
= core_readl(priv
, CORE_DUPSTS
);
1254 pause
= core_readl(priv
, CORE_PAUSESTS
);
1258 /* MoCA port is special as we do not get link status from CORE_LNKSTS,
1259 * which means that we need to force the link at the port override
1260 * level to get the data to flow. We do use what the interrupt handler
1261 * did determine before.
1263 * For the other ports, we just force the link status, since this is
1264 * a fixed PHY device.
1266 if (port
== priv
->moca_port
) {
1267 status
->link
= priv
->port_sts
[port
].link
;
1268 /* For MoCA interfaces, also force a link down notification
1269 * since some version of the user-space daemon (mocad) use
1270 * cmd->autoneg to force the link, which messes up the PHY
1271 * state machine and make it go in PHY_FORCING state instead.
1274 netif_carrier_off(ds
->ports
[port
].netdev
);
1278 status
->duplex
= !!(duplex
& (1 << port
));
1281 reg
= core_readl(priv
, CORE_STS_OVERRIDE_GMIIP_PORT(port
));
1287 core_writel(priv
, reg
, CORE_STS_OVERRIDE_GMIIP_PORT(port
));
1289 if ((pause
& (1 << port
)) &&
1290 (pause
& (1 << (port
+ PAUSESTS_TX_PAUSE_SHIFT
)))) {
1291 status
->asym_pause
= 1;
1295 if (pause
& (1 << port
))
1299 static int bcm_sf2_sw_suspend(struct dsa_switch
*ds
)
1301 struct bcm_sf2_priv
*priv
= ds_to_priv(ds
);
1304 bcm_sf2_intr_disable(priv
);
1306 /* Disable all ports physically present including the IMP
1307 * port, the other ones have already been disabled during
1310 for (port
= 0; port
< DSA_MAX_PORTS
; port
++) {
1311 if ((1 << port
) & ds
->enabled_port_mask
||
1312 dsa_is_cpu_port(ds
, port
))
1313 bcm_sf2_port_disable(ds
, port
, NULL
);
1319 static int bcm_sf2_sw_resume(struct dsa_switch
*ds
)
1321 struct bcm_sf2_priv
*priv
= ds_to_priv(ds
);
1325 ret
= bcm_sf2_sw_rst(priv
);
1327 pr_err("%s: failed to software reset switch\n", __func__
);
1331 if (priv
->hw_params
.num_gphy
== 1)
1332 bcm_sf2_gphy_enable_set(ds
, true);
1334 for (port
= 0; port
< DSA_MAX_PORTS
; port
++) {
1335 if ((1 << port
) & ds
->enabled_port_mask
)
1336 bcm_sf2_port_setup(ds
, port
, NULL
);
1337 else if (dsa_is_cpu_port(ds
, port
))
1338 bcm_sf2_imp_setup(ds
, port
);
1344 static void bcm_sf2_sw_get_wol(struct dsa_switch
*ds
, int port
,
1345 struct ethtool_wolinfo
*wol
)
1347 struct net_device
*p
= ds
->dst
[ds
->index
].master_netdev
;
1348 struct bcm_sf2_priv
*priv
= ds_to_priv(ds
);
1349 struct ethtool_wolinfo pwol
;
1351 /* Get the parent device WoL settings */
1352 p
->ethtool_ops
->get_wol(p
, &pwol
);
1354 /* Advertise the parent device supported settings */
1355 wol
->supported
= pwol
.supported
;
1356 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
1358 if (pwol
.wolopts
& WAKE_MAGICSECURE
)
1359 memcpy(&wol
->sopass
, pwol
.sopass
, sizeof(wol
->sopass
));
1361 if (priv
->wol_ports_mask
& (1 << port
))
1362 wol
->wolopts
= pwol
.wolopts
;
1367 static int bcm_sf2_sw_set_wol(struct dsa_switch
*ds
, int port
,
1368 struct ethtool_wolinfo
*wol
)
1370 struct net_device
*p
= ds
->dst
[ds
->index
].master_netdev
;
1371 struct bcm_sf2_priv
*priv
= ds_to_priv(ds
);
1372 s8 cpu_port
= ds
->dst
[ds
->index
].cpu_port
;
1373 struct ethtool_wolinfo pwol
;
1375 p
->ethtool_ops
->get_wol(p
, &pwol
);
1376 if (wol
->wolopts
& ~pwol
.supported
)
1380 priv
->wol_ports_mask
|= (1 << port
);
1382 priv
->wol_ports_mask
&= ~(1 << port
);
1384 /* If we have at least one port enabled, make sure the CPU port
1385 * is also enabled. If the CPU port is the last one enabled, we disable
1386 * it since this configuration does not make sense.
1388 if (priv
->wol_ports_mask
&& priv
->wol_ports_mask
!= (1 << cpu_port
))
1389 priv
->wol_ports_mask
|= (1 << cpu_port
);
1391 priv
->wol_ports_mask
&= ~(1 << cpu_port
);
1393 return p
->ethtool_ops
->set_wol(p
, wol
);
1396 static void bcm_sf2_enable_vlan(struct bcm_sf2_priv
*priv
, bool enable
)
1398 u32 mgmt
, vc0
, vc1
, vc4
, vc5
;
1400 mgmt
= core_readl(priv
, CORE_SWMODE
);
1401 vc0
= core_readl(priv
, CORE_VLAN_CTRL0
);
1402 vc1
= core_readl(priv
, CORE_VLAN_CTRL1
);
1403 vc4
= core_readl(priv
, CORE_VLAN_CTRL4
);
1404 vc5
= core_readl(priv
, CORE_VLAN_CTRL5
);
1406 mgmt
&= ~SW_FWDG_MODE
;
1409 vc0
|= VLAN_EN
| VLAN_LEARN_MODE_IVL
;
1410 vc1
|= EN_RSV_MCAST_UNTAG
| EN_RSV_MCAST_FWDMAP
;
1411 vc4
&= ~(INGR_VID_CHK_MASK
<< INGR_VID_CHK_SHIFT
);
1412 vc4
|= INGR_VID_CHK_DROP
;
1413 vc5
|= DROP_VTABLE_MISS
| EN_VID_FFF_FWD
;
1415 vc0
&= ~(VLAN_EN
| VLAN_LEARN_MODE_IVL
);
1416 vc1
&= ~(EN_RSV_MCAST_UNTAG
| EN_RSV_MCAST_FWDMAP
);
1417 vc4
&= ~(INGR_VID_CHK_MASK
<< INGR_VID_CHK_SHIFT
);
1418 vc5
&= ~(DROP_VTABLE_MISS
| EN_VID_FFF_FWD
);
1419 vc4
|= INGR_VID_CHK_VID_VIOL_IMP
;
1422 core_writel(priv
, vc0
, CORE_VLAN_CTRL0
);
1423 core_writel(priv
, vc1
, CORE_VLAN_CTRL1
);
1424 core_writel(priv
, 0, CORE_VLAN_CTRL3
);
1425 core_writel(priv
, vc4
, CORE_VLAN_CTRL4
);
1426 core_writel(priv
, vc5
, CORE_VLAN_CTRL5
);
1427 core_writel(priv
, mgmt
, CORE_SWMODE
);
1430 static void bcm_sf2_sw_configure_vlan(struct dsa_switch
*ds
)
1432 struct bcm_sf2_priv
*priv
= ds_to_priv(ds
);
1435 /* Clear all VLANs */
1436 bcm_sf2_vlan_op(priv
, ARLA_VTBL_CMD_CLEAR
);
1438 for (port
= 0; port
< priv
->hw_params
.num_ports
; port
++) {
1439 if (!((1 << port
) & ds
->enabled_port_mask
))
1442 core_writel(priv
, 1, CORE_DEFAULT_1Q_TAG_P(port
));
1446 static int bcm_sf2_sw_vlan_filtering(struct dsa_switch
*ds
, int port
,
1447 bool vlan_filtering
)
1452 static int bcm_sf2_sw_vlan_prepare(struct dsa_switch
*ds
, int port
,
1453 const struct switchdev_obj_port_vlan
*vlan
,
1454 struct switchdev_trans
*trans
)
1456 struct bcm_sf2_priv
*priv
= ds_to_priv(ds
);
1458 bcm_sf2_enable_vlan(priv
, true);
1463 static void bcm_sf2_sw_vlan_add(struct dsa_switch
*ds
, int port
,
1464 const struct switchdev_obj_port_vlan
*vlan
,
1465 struct switchdev_trans
*trans
)
1467 struct bcm_sf2_priv
*priv
= ds_to_priv(ds
);
1468 bool untagged
= vlan
->flags
& BRIDGE_VLAN_INFO_UNTAGGED
;
1469 bool pvid
= vlan
->flags
& BRIDGE_VLAN_INFO_PVID
;
1470 s8 cpu_port
= ds
->dst
->cpu_port
;
1471 struct bcm_sf2_vlan
*vl
;
1474 for (vid
= vlan
->vid_begin
; vid
<= vlan
->vid_end
; ++vid
) {
1475 vl
= &priv
->vlans
[vid
];
1477 bcm_sf2_get_vlan_entry(priv
, vid
, vl
);
1479 vl
->members
|= BIT(port
) | BIT(cpu_port
);
1481 vl
->untag
|= BIT(port
) | BIT(cpu_port
);
1483 vl
->untag
&= ~(BIT(port
) | BIT(cpu_port
));
1485 bcm_sf2_set_vlan_entry(priv
, vid
, vl
);
1486 bcm_sf2_sw_fast_age_vlan(priv
, vid
);
1490 core_writel(priv
, vlan
->vid_end
, CORE_DEFAULT_1Q_TAG_P(port
));
1491 core_writel(priv
, vlan
->vid_end
,
1492 CORE_DEFAULT_1Q_TAG_P(cpu_port
));
1493 bcm_sf2_sw_fast_age_vlan(priv
, vid
);
1497 static int bcm_sf2_sw_vlan_del(struct dsa_switch
*ds
, int port
,
1498 const struct switchdev_obj_port_vlan
*vlan
)
1500 struct bcm_sf2_priv
*priv
= ds_to_priv(ds
);
1501 bool untagged
= vlan
->flags
& BRIDGE_VLAN_INFO_UNTAGGED
;
1502 s8 cpu_port
= ds
->dst
->cpu_port
;
1503 struct bcm_sf2_vlan
*vl
;
1507 pvid
= core_readl(priv
, CORE_DEFAULT_1Q_TAG_P(port
));
1509 for (vid
= vlan
->vid_begin
; vid
<= vlan
->vid_end
; ++vid
) {
1510 vl
= &priv
->vlans
[vid
];
1512 ret
= bcm_sf2_get_vlan_entry(priv
, vid
, vl
);
1516 vl
->members
&= ~BIT(port
);
1517 if ((vl
->members
& BIT(cpu_port
)) == BIT(cpu_port
))
1522 vl
->untag
&= ~BIT(port
);
1523 if ((vl
->untag
& BIT(port
)) == BIT(cpu_port
))
1527 bcm_sf2_set_vlan_entry(priv
, vid
, vl
);
1528 bcm_sf2_sw_fast_age_vlan(priv
, vid
);
1531 core_writel(priv
, pvid
, CORE_DEFAULT_1Q_TAG_P(port
));
1532 core_writel(priv
, pvid
, CORE_DEFAULT_1Q_TAG_P(cpu_port
));
1533 bcm_sf2_sw_fast_age_vlan(priv
, vid
);
1538 static int bcm_sf2_sw_vlan_dump(struct dsa_switch
*ds
, int port
,
1539 struct switchdev_obj_port_vlan
*vlan
,
1540 int (*cb
)(struct switchdev_obj
*obj
))
1542 struct bcm_sf2_priv
*priv
= ds_to_priv(ds
);
1543 struct bcm_sf2_port_status
*p
= &priv
->port_sts
[port
];
1544 struct bcm_sf2_vlan
*vl
;
1548 pvid
= core_readl(priv
, CORE_DEFAULT_1Q_TAG_P(port
));
1550 for (vid
= 0; vid
< VLAN_N_VID
; vid
++) {
1551 vl
= &priv
->vlans
[vid
];
1553 if (!(vl
->members
& BIT(port
)))
1556 vlan
->vid_begin
= vlan
->vid_end
= vid
;
1559 if (vl
->untag
& BIT(port
))
1560 vlan
->flags
|= BRIDGE_VLAN_INFO_UNTAGGED
;
1562 vlan
->flags
|= BRIDGE_VLAN_INFO_PVID
;
1564 err
= cb(&vlan
->obj
);
1572 static int bcm_sf2_sw_setup(struct dsa_switch
*ds
)
1574 const char *reg_names
[BCM_SF2_REGS_NUM
] = BCM_SF2_REGS_NAME
;
1575 struct bcm_sf2_priv
*priv
= ds_to_priv(ds
);
1576 struct device_node
*dn
;
1577 void __iomem
**base
;
1583 spin_lock_init(&priv
->indir_lock
);
1584 mutex_init(&priv
->stats_mutex
);
1586 /* All the interesting properties are at the parent device_node
1589 dn
= ds
->cd
->of_node
->parent
;
1590 bcm_sf2_identify_ports(priv
, ds
->cd
->of_node
);
1592 priv
->irq0
= irq_of_parse_and_map(dn
, 0);
1593 priv
->irq1
= irq_of_parse_and_map(dn
, 1);
1596 for (i
= 0; i
< BCM_SF2_REGS_NUM
; i
++) {
1597 *base
= of_iomap(dn
, i
);
1598 if (*base
== NULL
) {
1599 pr_err("unable to find register: %s\n", reg_names
[i
]);
1606 ret
= bcm_sf2_sw_rst(priv
);
1608 pr_err("unable to software reset switch: %d\n", ret
);
1612 ret
= bcm_sf2_mdio_register(ds
);
1614 pr_err("failed to register MDIO bus\n");
1618 /* Disable all interrupts and request them */
1619 bcm_sf2_intr_disable(priv
);
1621 ret
= request_irq(priv
->irq0
, bcm_sf2_switch_0_isr
, 0,
1624 pr_err("failed to request switch_0 IRQ\n");
1628 ret
= request_irq(priv
->irq1
, bcm_sf2_switch_1_isr
, 0,
1631 pr_err("failed to request switch_1 IRQ\n");
1635 /* Reset the MIB counters */
1636 reg
= core_readl(priv
, CORE_GMNCFGCFG
);
1638 core_writel(priv
, reg
, CORE_GMNCFGCFG
);
1639 reg
&= ~RST_MIB_CNT
;
1640 core_writel(priv
, reg
, CORE_GMNCFGCFG
);
1642 /* Get the maximum number of ports for this switch */
1643 priv
->hw_params
.num_ports
= core_readl(priv
, CORE_IMP0_PRT_ID
) + 1;
1644 if (priv
->hw_params
.num_ports
> DSA_MAX_PORTS
)
1645 priv
->hw_params
.num_ports
= DSA_MAX_PORTS
;
1647 /* Assume a single GPHY setup if we can't read that property */
1648 if (of_property_read_u32(dn
, "brcm,num-gphy",
1649 &priv
->hw_params
.num_gphy
))
1650 priv
->hw_params
.num_gphy
= 1;
1652 /* Enable all valid ports and disable those unused */
1653 for (port
= 0; port
< priv
->hw_params
.num_ports
; port
++) {
1654 /* IMP port receives special treatment */
1655 if ((1 << port
) & ds
->enabled_port_mask
)
1656 bcm_sf2_port_setup(ds
, port
, NULL
);
1657 else if (dsa_is_cpu_port(ds
, port
))
1658 bcm_sf2_imp_setup(ds
, port
);
1660 bcm_sf2_port_disable(ds
, port
, NULL
);
1663 bcm_sf2_sw_configure_vlan(ds
);
1665 rev
= reg_readl(priv
, REG_SWITCH_REVISION
);
1666 priv
->hw_params
.top_rev
= (rev
>> SWITCH_TOP_REV_SHIFT
) &
1667 SWITCH_TOP_REV_MASK
;
1668 priv
->hw_params
.core_rev
= (rev
& SF2_REV_MASK
);
1670 rev
= reg_readl(priv
, REG_PHY_REVISION
);
1671 priv
->hw_params
.gphy_rev
= rev
& PHY_REVISION_MASK
;
1673 pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n",
1674 priv
->hw_params
.top_rev
>> 8, priv
->hw_params
.top_rev
& 0xff,
1675 priv
->hw_params
.core_rev
>> 8, priv
->hw_params
.core_rev
& 0xff,
1676 priv
->core
, priv
->irq0
, priv
->irq1
);
1681 free_irq(priv
->irq0
, priv
);
1683 bcm_sf2_mdio_unregister(priv
);
1686 for (i
= 0; i
< BCM_SF2_REGS_NUM
; i
++) {
1694 static struct dsa_switch_driver bcm_sf2_switch_driver
= {
1695 .tag_protocol
= DSA_TAG_PROTO_BRCM
,
1696 .probe
= bcm_sf2_sw_drv_probe
,
1697 .setup
= bcm_sf2_sw_setup
,
1698 .set_addr
= bcm_sf2_sw_set_addr
,
1699 .get_phy_flags
= bcm_sf2_sw_get_phy_flags
,
1700 .get_strings
= bcm_sf2_sw_get_strings
,
1701 .get_ethtool_stats
= bcm_sf2_sw_get_ethtool_stats
,
1702 .get_sset_count
= bcm_sf2_sw_get_sset_count
,
1703 .adjust_link
= bcm_sf2_sw_adjust_link
,
1704 .fixed_link_update
= bcm_sf2_sw_fixed_link_update
,
1705 .suspend
= bcm_sf2_sw_suspend
,
1706 .resume
= bcm_sf2_sw_resume
,
1707 .get_wol
= bcm_sf2_sw_get_wol
,
1708 .set_wol
= bcm_sf2_sw_set_wol
,
1709 .port_enable
= bcm_sf2_port_setup
,
1710 .port_disable
= bcm_sf2_port_disable
,
1711 .get_eee
= bcm_sf2_sw_get_eee
,
1712 .set_eee
= bcm_sf2_sw_set_eee
,
1713 .port_bridge_join
= bcm_sf2_sw_br_join
,
1714 .port_bridge_leave
= bcm_sf2_sw_br_leave
,
1715 .port_stp_state_set
= bcm_sf2_sw_br_set_stp_state
,
1716 .port_fdb_prepare
= bcm_sf2_sw_fdb_prepare
,
1717 .port_fdb_add
= bcm_sf2_sw_fdb_add
,
1718 .port_fdb_del
= bcm_sf2_sw_fdb_del
,
1719 .port_fdb_dump
= bcm_sf2_sw_fdb_dump
,
1720 .port_vlan_filtering
= bcm_sf2_sw_vlan_filtering
,
1721 .port_vlan_prepare
= bcm_sf2_sw_vlan_prepare
,
1722 .port_vlan_add
= bcm_sf2_sw_vlan_add
,
1723 .port_vlan_del
= bcm_sf2_sw_vlan_del
,
1724 .port_vlan_dump
= bcm_sf2_sw_vlan_dump
,
1727 static int __init
bcm_sf2_init(void)
1729 register_switch_driver(&bcm_sf2_switch_driver
);
1733 module_init(bcm_sf2_init
);
1735 static void __exit
bcm_sf2_exit(void)
1737 unregister_switch_driver(&bcm_sf2_switch_driver
);
1739 module_exit(bcm_sf2_exit
);
1741 MODULE_AUTHOR("Broadcom Corporation");
1742 MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip");
1743 MODULE_LICENSE("GPL");
1744 MODULE_ALIAS("platform:brcm-sf2");