2 * Broadcom Starfighter 2 DSA switch driver
4 * Copyright (C) 2014, Broadcom Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/interrupt.h>
16 #include <linux/platform_device.h>
17 #include <linux/phy.h>
18 #include <linux/phy_fixed.h>
19 #include <linux/phylink.h>
20 #include <linux/mii.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_address.h>
24 #include <linux/of_net.h>
25 #include <linux/of_mdio.h>
27 #include <linux/ethtool.h>
28 #include <linux/if_bridge.h>
29 #include <linux/brcmphy.h>
30 #include <linux/etherdevice.h>
31 #include <linux/platform_data/b53.h>
34 #include "bcm_sf2_regs.h"
35 #include "b53/b53_priv.h"
36 #include "b53/b53_regs.h"
38 static void bcm_sf2_imp_setup(struct dsa_switch
*ds
, int port
)
40 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
44 /* Enable the port memories */
45 reg
= core_readl(priv
, CORE_MEM_PSM_VDD_CTRL
);
46 reg
&= ~P_TXQ_PSM_VDD(port
);
47 core_writel(priv
, reg
, CORE_MEM_PSM_VDD_CTRL
);
49 /* Enable forwarding */
50 core_writel(priv
, SW_FWDG_EN
, CORE_SWMODE
);
52 /* Enable IMP port in dumb mode */
53 reg
= core_readl(priv
, CORE_SWITCH_CTRL
);
54 reg
|= MII_DUMB_FWDG_EN
;
55 core_writel(priv
, reg
, CORE_SWITCH_CTRL
);
57 /* Configure Traffic Class to QoS mapping, allow each priority to map
58 * to a different queue number
60 reg
= core_readl(priv
, CORE_PORT_TC2_QOS_MAP_PORT(port
));
61 for (i
= 0; i
< SF2_NUM_EGRESS_QUEUES
; i
++)
62 reg
|= i
<< (PRT_TO_QID_SHIFT
* i
);
63 core_writel(priv
, reg
, CORE_PORT_TC2_QOS_MAP_PORT(port
));
65 b53_brcm_hdr_setup(ds
, port
);
68 if (priv
->type
== BCM7445_DEVICE_ID
)
69 offset
= CORE_STS_OVERRIDE_IMP
;
71 offset
= CORE_STS_OVERRIDE_IMP2
;
73 /* Force link status for IMP port */
74 reg
= core_readl(priv
, offset
);
75 reg
|= (MII_SW_OR
| LINK_STS
);
76 reg
&= ~GMII_SPEED_UP_2G
;
77 core_writel(priv
, reg
, offset
);
79 /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
80 reg
= core_readl(priv
, CORE_IMP_CTL
);
81 reg
|= (RX_BCST_EN
| RX_MCST_EN
| RX_UCST_EN
);
82 reg
&= ~(RX_DIS
| TX_DIS
);
83 core_writel(priv
, reg
, CORE_IMP_CTL
);
85 reg
= core_readl(priv
, CORE_G_PCTL_PORT(port
));
86 reg
&= ~(RX_DIS
| TX_DIS
);
87 core_writel(priv
, reg
, CORE_G_PCTL_PORT(port
));
91 static void bcm_sf2_gphy_enable_set(struct dsa_switch
*ds
, bool enable
)
93 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
96 reg
= reg_readl(priv
, REG_SPHY_CNTRL
);
99 reg
&= ~(EXT_PWR_DOWN
| IDDQ_BIAS
| IDDQ_GLOBAL_PWR
| CK25_DIS
);
100 reg_writel(priv
, reg
, REG_SPHY_CNTRL
);
102 reg
= reg_readl(priv
, REG_SPHY_CNTRL
);
105 reg
|= EXT_PWR_DOWN
| IDDQ_BIAS
| PHY_RESET
;
106 reg_writel(priv
, reg
, REG_SPHY_CNTRL
);
110 reg_writel(priv
, reg
, REG_SPHY_CNTRL
);
112 /* Use PHY-driven LED signaling */
114 reg
= reg_readl(priv
, REG_LED_CNTRL(0));
115 reg
|= SPDLNK_SRC_SEL
;
116 reg_writel(priv
, reg
, REG_LED_CNTRL(0));
120 static inline void bcm_sf2_port_intr_enable(struct bcm_sf2_priv
*priv
,
130 /* Port 0 interrupts are located on the first bank */
131 intrl2_0_mask_clear(priv
, P_IRQ_MASK(P0_IRQ_OFF
));
134 off
= P_IRQ_OFF(port
);
138 intrl2_1_mask_clear(priv
, P_IRQ_MASK(off
));
141 static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv
*priv
,
151 /* Port 0 interrupts are located on the first bank */
152 intrl2_0_mask_set(priv
, P_IRQ_MASK(P0_IRQ_OFF
));
153 intrl2_0_writel(priv
, P_IRQ_MASK(P0_IRQ_OFF
), INTRL2_CPU_CLEAR
);
156 off
= P_IRQ_OFF(port
);
160 intrl2_1_mask_set(priv
, P_IRQ_MASK(off
));
161 intrl2_1_writel(priv
, P_IRQ_MASK(off
), INTRL2_CPU_CLEAR
);
164 static int bcm_sf2_port_setup(struct dsa_switch
*ds
, int port
,
165 struct phy_device
*phy
)
167 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
171 /* Clear the memory power down */
172 reg
= core_readl(priv
, CORE_MEM_PSM_VDD_CTRL
);
173 reg
&= ~P_TXQ_PSM_VDD(port
);
174 core_writel(priv
, reg
, CORE_MEM_PSM_VDD_CTRL
);
176 /* Enable learning */
177 reg
= core_readl(priv
, CORE_DIS_LEARN
);
179 core_writel(priv
, reg
, CORE_DIS_LEARN
);
181 /* Enable Broadcom tags for that port if requested */
182 if (priv
->brcm_tag_mask
& BIT(port
))
183 b53_brcm_hdr_setup(ds
, port
);
185 /* Configure Traffic Class to QoS mapping, allow each priority to map
186 * to a different queue number
188 reg
= core_readl(priv
, CORE_PORT_TC2_QOS_MAP_PORT(port
));
189 for (i
= 0; i
< SF2_NUM_EGRESS_QUEUES
; i
++)
190 reg
|= i
<< (PRT_TO_QID_SHIFT
* i
);
191 core_writel(priv
, reg
, CORE_PORT_TC2_QOS_MAP_PORT(port
));
193 /* Re-enable the GPHY and re-apply workarounds */
194 if (priv
->int_phy_mask
& 1 << port
&& priv
->hw_params
.num_gphy
== 1) {
195 bcm_sf2_gphy_enable_set(ds
, true);
197 /* if phy_stop() has been called before, phy
198 * will be in halted state, and phy_start()
201 * the resume path does not configure back
202 * autoneg settings, and since we hard reset
203 * the phy manually here, we need to reset the
204 * state machine also.
206 phy
->state
= PHY_READY
;
211 /* Enable MoCA port interrupts to get notified */
212 if (port
== priv
->moca_port
)
213 bcm_sf2_port_intr_enable(priv
, port
);
215 /* Set per-queue pause threshold to 32 */
216 core_writel(priv
, 32, CORE_TXQ_THD_PAUSE_QN_PORT(port
));
218 /* Set ACB threshold to 24 */
219 for (i
= 0; i
< SF2_NUM_EGRESS_QUEUES
; i
++) {
220 reg
= acb_readl(priv
, ACB_QUEUE_CFG(port
*
221 SF2_NUM_EGRESS_QUEUES
+ i
));
222 reg
&= ~XOFF_THRESHOLD_MASK
;
224 acb_writel(priv
, reg
, ACB_QUEUE_CFG(port
*
225 SF2_NUM_EGRESS_QUEUES
+ i
));
228 return b53_enable_port(ds
, port
, phy
);
231 static void bcm_sf2_port_disable(struct dsa_switch
*ds
, int port
,
232 struct phy_device
*phy
)
234 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
237 /* Disable learning while in WoL mode */
238 if (priv
->wol_ports_mask
& (1 << port
)) {
239 reg
= core_readl(priv
, CORE_DIS_LEARN
);
241 core_writel(priv
, reg
, CORE_DIS_LEARN
);
245 if (port
== priv
->moca_port
)
246 bcm_sf2_port_intr_disable(priv
, port
);
248 if (priv
->int_phy_mask
& 1 << port
&& priv
->hw_params
.num_gphy
== 1)
249 bcm_sf2_gphy_enable_set(ds
, false);
251 b53_disable_port(ds
, port
, phy
);
253 /* Power down the port memory */
254 reg
= core_readl(priv
, CORE_MEM_PSM_VDD_CTRL
);
255 reg
|= P_TXQ_PSM_VDD(port
);
256 core_writel(priv
, reg
, CORE_MEM_PSM_VDD_CTRL
);
260 static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv
*priv
, int op
, int addr
,
266 reg
= reg_readl(priv
, REG_SWITCH_CNTRL
);
267 reg
|= MDIO_MASTER_SEL
;
268 reg_writel(priv
, reg
, REG_SWITCH_CNTRL
);
270 /* Page << 8 | offset */
273 core_writel(priv
, addr
, reg
);
275 /* Page << 8 | offset */
276 reg
= 0x80 << 8 | regnum
<< 1;
280 ret
= core_readl(priv
, reg
);
282 core_writel(priv
, val
, reg
);
284 reg
= reg_readl(priv
, REG_SWITCH_CNTRL
);
285 reg
&= ~MDIO_MASTER_SEL
;
286 reg_writel(priv
, reg
, REG_SWITCH_CNTRL
);
291 static int bcm_sf2_sw_mdio_read(struct mii_bus
*bus
, int addr
, int regnum
)
293 struct bcm_sf2_priv
*priv
= bus
->priv
;
295 /* Intercept reads from Broadcom pseudo-PHY address, else, send
296 * them to our master MDIO bus controller
298 if (addr
== BRCM_PSEUDO_PHY_ADDR
&& priv
->indir_phy_mask
& BIT(addr
))
299 return bcm_sf2_sw_indir_rw(priv
, 1, addr
, regnum
, 0);
301 return mdiobus_read_nested(priv
->master_mii_bus
, addr
, regnum
);
304 static int bcm_sf2_sw_mdio_write(struct mii_bus
*bus
, int addr
, int regnum
,
307 struct bcm_sf2_priv
*priv
= bus
->priv
;
309 /* Intercept writes to the Broadcom pseudo-PHY address, else,
310 * send them to our master MDIO bus controller
312 if (addr
== BRCM_PSEUDO_PHY_ADDR
&& priv
->indir_phy_mask
& BIT(addr
))
313 return bcm_sf2_sw_indir_rw(priv
, 0, addr
, regnum
, val
);
315 return mdiobus_write_nested(priv
->master_mii_bus
, addr
,
319 static irqreturn_t
bcm_sf2_switch_0_isr(int irq
, void *dev_id
)
321 struct dsa_switch
*ds
= dev_id
;
322 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
324 priv
->irq0_stat
= intrl2_0_readl(priv
, INTRL2_CPU_STATUS
) &
326 intrl2_0_writel(priv
, priv
->irq0_stat
, INTRL2_CPU_CLEAR
);
331 static irqreturn_t
bcm_sf2_switch_1_isr(int irq
, void *dev_id
)
333 struct dsa_switch
*ds
= dev_id
;
334 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
336 priv
->irq1_stat
= intrl2_1_readl(priv
, INTRL2_CPU_STATUS
) &
338 intrl2_1_writel(priv
, priv
->irq1_stat
, INTRL2_CPU_CLEAR
);
340 if (priv
->irq1_stat
& P_LINK_UP_IRQ(P7_IRQ_OFF
)) {
341 priv
->port_sts
[7].link
= true;
342 dsa_port_phylink_mac_change(ds
, 7, true);
344 if (priv
->irq1_stat
& P_LINK_DOWN_IRQ(P7_IRQ_OFF
)) {
345 priv
->port_sts
[7].link
= false;
346 dsa_port_phylink_mac_change(ds
, 7, false);
352 static int bcm_sf2_sw_rst(struct bcm_sf2_priv
*priv
)
354 unsigned int timeout
= 1000;
357 reg
= core_readl(priv
, CORE_WATCHDOG_CTRL
);
358 reg
|= SOFTWARE_RESET
| EN_CHIP_RST
| EN_SW_RESET
;
359 core_writel(priv
, reg
, CORE_WATCHDOG_CTRL
);
362 reg
= core_readl(priv
, CORE_WATCHDOG_CTRL
);
363 if (!(reg
& SOFTWARE_RESET
))
366 usleep_range(1000, 2000);
367 } while (timeout
-- > 0);
375 static void bcm_sf2_intr_disable(struct bcm_sf2_priv
*priv
)
377 intrl2_0_mask_set(priv
, 0xffffffff);
378 intrl2_0_writel(priv
, 0xffffffff, INTRL2_CPU_CLEAR
);
379 intrl2_1_mask_set(priv
, 0xffffffff);
380 intrl2_1_writel(priv
, 0xffffffff, INTRL2_CPU_CLEAR
);
383 static void bcm_sf2_identify_ports(struct bcm_sf2_priv
*priv
,
384 struct device_node
*dn
)
386 struct device_node
*port
;
388 unsigned int port_num
;
390 priv
->moca_port
= -1;
392 for_each_available_child_of_node(dn
, port
) {
393 if (of_property_read_u32(port
, "reg", &port_num
))
396 /* Internal PHYs get assigned a specific 'phy-mode' property
397 * value: "internal" to help flag them before MDIO probing
398 * has completed, since they might be turned off at that
401 mode
= of_get_phy_mode(port
);
405 if (mode
== PHY_INTERFACE_MODE_INTERNAL
)
406 priv
->int_phy_mask
|= 1 << port_num
;
408 if (mode
== PHY_INTERFACE_MODE_MOCA
)
409 priv
->moca_port
= port_num
;
411 if (of_property_read_bool(port
, "brcm,use-bcm-hdr"))
412 priv
->brcm_tag_mask
|= 1 << port_num
;
416 static int bcm_sf2_mdio_register(struct dsa_switch
*ds
)
418 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
419 struct device_node
*dn
;
423 /* Find our integrated MDIO bus node */
424 dn
= of_find_compatible_node(NULL
, NULL
, "brcm,unimac-mdio");
425 priv
->master_mii_bus
= of_mdio_find_bus(dn
);
426 if (!priv
->master_mii_bus
)
427 return -EPROBE_DEFER
;
429 get_device(&priv
->master_mii_bus
->dev
);
430 priv
->master_mii_dn
= dn
;
432 priv
->slave_mii_bus
= devm_mdiobus_alloc(ds
->dev
);
433 if (!priv
->slave_mii_bus
)
436 priv
->slave_mii_bus
->priv
= priv
;
437 priv
->slave_mii_bus
->name
= "sf2 slave mii";
438 priv
->slave_mii_bus
->read
= bcm_sf2_sw_mdio_read
;
439 priv
->slave_mii_bus
->write
= bcm_sf2_sw_mdio_write
;
440 snprintf(priv
->slave_mii_bus
->id
, MII_BUS_ID_SIZE
, "sf2-%d",
442 priv
->slave_mii_bus
->dev
.of_node
= dn
;
444 /* Include the pseudo-PHY address to divert reads towards our
445 * workaround. This is only required for 7445D0, since 7445E0
446 * disconnects the internal switch pseudo-PHY such that we can use the
447 * regular SWITCH_MDIO master controller instead.
449 * Here we flag the pseudo PHY as needing special treatment and would
450 * otherwise make all other PHY read/writes go to the master MDIO bus
451 * controller that comes with this switch backed by the "mdio-unimac"
454 if (of_machine_is_compatible("brcm,bcm7445d0"))
455 priv
->indir_phy_mask
|= (1 << BRCM_PSEUDO_PHY_ADDR
);
457 priv
->indir_phy_mask
= 0;
459 ds
->phys_mii_mask
= priv
->indir_phy_mask
;
460 ds
->slave_mii_bus
= priv
->slave_mii_bus
;
461 priv
->slave_mii_bus
->parent
= ds
->dev
->parent
;
462 priv
->slave_mii_bus
->phy_mask
= ~priv
->indir_phy_mask
;
464 err
= mdiobus_register(priv
->slave_mii_bus
);
471 static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv
*priv
)
473 mdiobus_unregister(priv
->slave_mii_bus
);
474 if (priv
->master_mii_dn
)
475 of_node_put(priv
->master_mii_dn
);
478 static u32
bcm_sf2_sw_get_phy_flags(struct dsa_switch
*ds
, int port
)
480 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
482 /* The BCM7xxx PHY driver expects to find the integrated PHY revision
483 * in bits 15:8 and the patch level in bits 7:0 which is exactly what
484 * the REG_PHY_REVISION register layout is.
487 return priv
->hw_params
.gphy_rev
;
490 static void bcm_sf2_sw_validate(struct dsa_switch
*ds
, int port
,
491 unsigned long *supported
,
492 struct phylink_link_state
*state
)
494 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask
) = { 0, };
496 if (!phy_interface_mode_is_rgmii(state
->interface
) &&
497 state
->interface
!= PHY_INTERFACE_MODE_MII
&&
498 state
->interface
!= PHY_INTERFACE_MODE_REVMII
&&
499 state
->interface
!= PHY_INTERFACE_MODE_GMII
&&
500 state
->interface
!= PHY_INTERFACE_MODE_INTERNAL
&&
501 state
->interface
!= PHY_INTERFACE_MODE_MOCA
) {
502 bitmap_zero(supported
, __ETHTOOL_LINK_MODE_MASK_NBITS
);
504 "Unsupported interface: %d\n", state
->interface
);
508 /* Allow all the expected bits */
509 phylink_set(mask
, Autoneg
);
510 phylink_set_port_modes(mask
);
511 phylink_set(mask
, Pause
);
512 phylink_set(mask
, Asym_Pause
);
514 /* With the exclusion of MII and Reverse MII, we support Gigabit,
515 * including Half duplex
517 if (state
->interface
!= PHY_INTERFACE_MODE_MII
&&
518 state
->interface
!= PHY_INTERFACE_MODE_REVMII
) {
519 phylink_set(mask
, 1000baseT_Full
);
520 phylink_set(mask
, 1000baseT_Half
);
523 phylink_set(mask
, 10baseT_Half
);
524 phylink_set(mask
, 10baseT_Full
);
525 phylink_set(mask
, 100baseT_Half
);
526 phylink_set(mask
, 100baseT_Full
);
528 bitmap_and(supported
, supported
, mask
,
529 __ETHTOOL_LINK_MODE_MASK_NBITS
);
530 bitmap_and(state
->advertising
, state
->advertising
, mask
,
531 __ETHTOOL_LINK_MODE_MASK_NBITS
);
534 static void bcm_sf2_sw_mac_config(struct dsa_switch
*ds
, int port
,
536 const struct phylink_link_state
*state
)
538 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
539 u32 id_mode_dis
= 0, port_mode
;
542 if (priv
->type
== BCM7445_DEVICE_ID
)
543 offset
= CORE_STS_OVERRIDE_GMIIP_PORT(port
);
545 offset
= CORE_STS_OVERRIDE_GMIIP2_PORT(port
);
547 switch (state
->interface
) {
548 case PHY_INTERFACE_MODE_RGMII
:
551 case PHY_INTERFACE_MODE_RGMII_TXID
:
552 port_mode
= EXT_GPHY
;
554 case PHY_INTERFACE_MODE_MII
:
555 port_mode
= EXT_EPHY
;
557 case PHY_INTERFACE_MODE_REVMII
:
558 port_mode
= EXT_REVMII
;
561 /* all other PHYs: internal and MoCA */
565 /* Clear id_mode_dis bit, and the existing port mode, let
566 * RGMII_MODE_EN bet set by mac_link_{up,down}
568 reg
= reg_readl(priv
, REG_RGMII_CNTRL_P(port
));
570 reg
&= ~(PORT_MODE_MASK
<< PORT_MODE_SHIFT
);
571 reg
&= ~(RX_PAUSE_EN
| TX_PAUSE_EN
);
577 if (state
->pause
& MLO_PAUSE_TXRX_MASK
) {
578 if (state
->pause
& MLO_PAUSE_TX
)
583 reg_writel(priv
, reg
, REG_RGMII_CNTRL_P(port
));
586 /* Force link settings detected from the PHY */
588 switch (state
->speed
) {
590 reg
|= SPDSTS_1000
<< SPEED_SHIFT
;
593 reg
|= SPDSTS_100
<< SPEED_SHIFT
;
599 if (state
->duplex
== DUPLEX_FULL
)
602 core_writel(priv
, reg
, offset
);
605 static void bcm_sf2_sw_mac_link_set(struct dsa_switch
*ds
, int port
,
606 phy_interface_t interface
, bool link
)
608 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
611 if (!phy_interface_mode_is_rgmii(interface
) &&
612 interface
!= PHY_INTERFACE_MODE_MII
&&
613 interface
!= PHY_INTERFACE_MODE_REVMII
)
616 /* If the link is down, just disable the interface to conserve power */
617 reg
= reg_readl(priv
, REG_RGMII_CNTRL_P(port
));
619 reg
|= RGMII_MODE_EN
;
621 reg
&= ~RGMII_MODE_EN
;
622 reg_writel(priv
, reg
, REG_RGMII_CNTRL_P(port
));
625 static void bcm_sf2_sw_mac_link_down(struct dsa_switch
*ds
, int port
,
627 phy_interface_t interface
)
629 bcm_sf2_sw_mac_link_set(ds
, port
, interface
, false);
632 static void bcm_sf2_sw_mac_link_up(struct dsa_switch
*ds
, int port
,
634 phy_interface_t interface
,
635 struct phy_device
*phydev
)
637 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
638 struct ethtool_eee
*p
= &priv
->dev
->ports
[port
].eee
;
640 bcm_sf2_sw_mac_link_set(ds
, port
, interface
, true);
642 if (mode
== MLO_AN_PHY
&& phydev
)
643 p
->eee_enabled
= b53_eee_init(ds
, port
, phydev
);
646 static void bcm_sf2_sw_fixed_state(struct dsa_switch
*ds
, int port
,
647 struct phylink_link_state
*status
)
649 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
651 status
->link
= false;
653 /* MoCA port is special as we do not get link status from CORE_LNKSTS,
654 * which means that we need to force the link at the port override
655 * level to get the data to flow. We do use what the interrupt handler
656 * did determine before.
658 * For the other ports, we just force the link status, since this is
659 * a fixed PHY device.
661 if (port
== priv
->moca_port
) {
662 status
->link
= priv
->port_sts
[port
].link
;
663 /* For MoCA interfaces, also force a link down notification
664 * since some version of the user-space daemon (mocad) use
665 * cmd->autoneg to force the link, which messes up the PHY
666 * state machine and make it go in PHY_FORCING state instead.
669 netif_carrier_off(ds
->ports
[port
].slave
);
670 status
->duplex
= DUPLEX_FULL
;
676 static void bcm_sf2_enable_acb(struct dsa_switch
*ds
)
678 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
681 /* Enable ACB globally */
682 reg
= acb_readl(priv
, ACB_CONTROL
);
683 reg
|= (ACB_FLUSH_MASK
<< ACB_FLUSH_SHIFT
);
684 acb_writel(priv
, reg
, ACB_CONTROL
);
685 reg
&= ~(ACB_FLUSH_MASK
<< ACB_FLUSH_SHIFT
);
686 reg
|= ACB_EN
| ACB_ALGORITHM
;
687 acb_writel(priv
, reg
, ACB_CONTROL
);
690 static int bcm_sf2_sw_suspend(struct dsa_switch
*ds
)
692 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
695 bcm_sf2_intr_disable(priv
);
697 /* Disable all ports physically present including the IMP
698 * port, the other ones have already been disabled during
701 for (port
= 0; port
< ds
->num_ports
; port
++) {
702 if (dsa_is_user_port(ds
, port
) || dsa_is_cpu_port(ds
, port
))
703 bcm_sf2_port_disable(ds
, port
, NULL
);
709 static int bcm_sf2_sw_resume(struct dsa_switch
*ds
)
711 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
714 ret
= bcm_sf2_sw_rst(priv
);
716 pr_err("%s: failed to software reset switch\n", __func__
);
720 if (priv
->hw_params
.num_gphy
== 1)
721 bcm_sf2_gphy_enable_set(ds
, true);
728 static void bcm_sf2_sw_get_wol(struct dsa_switch
*ds
, int port
,
729 struct ethtool_wolinfo
*wol
)
731 struct net_device
*p
= ds
->ports
[port
].cpu_dp
->master
;
732 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
733 struct ethtool_wolinfo pwol
= { };
735 /* Get the parent device WoL settings */
736 if (p
->ethtool_ops
->get_wol
)
737 p
->ethtool_ops
->get_wol(p
, &pwol
);
739 /* Advertise the parent device supported settings */
740 wol
->supported
= pwol
.supported
;
741 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
743 if (pwol
.wolopts
& WAKE_MAGICSECURE
)
744 memcpy(&wol
->sopass
, pwol
.sopass
, sizeof(wol
->sopass
));
746 if (priv
->wol_ports_mask
& (1 << port
))
747 wol
->wolopts
= pwol
.wolopts
;
752 static int bcm_sf2_sw_set_wol(struct dsa_switch
*ds
, int port
,
753 struct ethtool_wolinfo
*wol
)
755 struct net_device
*p
= ds
->ports
[port
].cpu_dp
->master
;
756 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
757 s8 cpu_port
= ds
->ports
[port
].cpu_dp
->index
;
758 struct ethtool_wolinfo pwol
= { };
760 if (p
->ethtool_ops
->get_wol
)
761 p
->ethtool_ops
->get_wol(p
, &pwol
);
762 if (wol
->wolopts
& ~pwol
.supported
)
766 priv
->wol_ports_mask
|= (1 << port
);
768 priv
->wol_ports_mask
&= ~(1 << port
);
770 /* If we have at least one port enabled, make sure the CPU port
771 * is also enabled. If the CPU port is the last one enabled, we disable
772 * it since this configuration does not make sense.
774 if (priv
->wol_ports_mask
&& priv
->wol_ports_mask
!= (1 << cpu_port
))
775 priv
->wol_ports_mask
|= (1 << cpu_port
);
777 priv
->wol_ports_mask
&= ~(1 << cpu_port
);
779 return p
->ethtool_ops
->set_wol(p
, wol
);
782 static int bcm_sf2_sw_setup(struct dsa_switch
*ds
)
784 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
787 /* Enable all valid ports and disable those unused */
788 for (port
= 0; port
< priv
->hw_params
.num_ports
; port
++) {
789 /* IMP port receives special treatment */
790 if (dsa_is_user_port(ds
, port
))
791 bcm_sf2_port_setup(ds
, port
, NULL
);
792 else if (dsa_is_cpu_port(ds
, port
))
793 bcm_sf2_imp_setup(ds
, port
);
795 bcm_sf2_port_disable(ds
, port
, NULL
);
798 b53_configure_vlan(ds
);
799 bcm_sf2_enable_acb(ds
);
804 /* The SWITCH_CORE register space is managed by b53 but operates on a page +
805 * register basis so we need to translate that into an address that the
806 * bus-glue understands.
808 #define SF2_PAGE_REG_MKADDR(page, reg) ((page) << 10 | (reg) << 2)
810 static int bcm_sf2_core_read8(struct b53_device
*dev
, u8 page
, u8 reg
,
813 struct bcm_sf2_priv
*priv
= dev
->priv
;
815 *val
= core_readl(priv
, SF2_PAGE_REG_MKADDR(page
, reg
));
820 static int bcm_sf2_core_read16(struct b53_device
*dev
, u8 page
, u8 reg
,
823 struct bcm_sf2_priv
*priv
= dev
->priv
;
825 *val
= core_readl(priv
, SF2_PAGE_REG_MKADDR(page
, reg
));
830 static int bcm_sf2_core_read32(struct b53_device
*dev
, u8 page
, u8 reg
,
833 struct bcm_sf2_priv
*priv
= dev
->priv
;
835 *val
= core_readl(priv
, SF2_PAGE_REG_MKADDR(page
, reg
));
840 static int bcm_sf2_core_read64(struct b53_device
*dev
, u8 page
, u8 reg
,
843 struct bcm_sf2_priv
*priv
= dev
->priv
;
845 *val
= core_readq(priv
, SF2_PAGE_REG_MKADDR(page
, reg
));
850 static int bcm_sf2_core_write8(struct b53_device
*dev
, u8 page
, u8 reg
,
853 struct bcm_sf2_priv
*priv
= dev
->priv
;
855 core_writel(priv
, value
, SF2_PAGE_REG_MKADDR(page
, reg
));
860 static int bcm_sf2_core_write16(struct b53_device
*dev
, u8 page
, u8 reg
,
863 struct bcm_sf2_priv
*priv
= dev
->priv
;
865 core_writel(priv
, value
, SF2_PAGE_REG_MKADDR(page
, reg
));
870 static int bcm_sf2_core_write32(struct b53_device
*dev
, u8 page
, u8 reg
,
873 struct bcm_sf2_priv
*priv
= dev
->priv
;
875 core_writel(priv
, value
, SF2_PAGE_REG_MKADDR(page
, reg
));
880 static int bcm_sf2_core_write64(struct b53_device
*dev
, u8 page
, u8 reg
,
883 struct bcm_sf2_priv
*priv
= dev
->priv
;
885 core_writeq(priv
, value
, SF2_PAGE_REG_MKADDR(page
, reg
));
890 static const struct b53_io_ops bcm_sf2_io_ops
= {
891 .read8
= bcm_sf2_core_read8
,
892 .read16
= bcm_sf2_core_read16
,
893 .read32
= bcm_sf2_core_read32
,
894 .read48
= bcm_sf2_core_read64
,
895 .read64
= bcm_sf2_core_read64
,
896 .write8
= bcm_sf2_core_write8
,
897 .write16
= bcm_sf2_core_write16
,
898 .write32
= bcm_sf2_core_write32
,
899 .write48
= bcm_sf2_core_write64
,
900 .write64
= bcm_sf2_core_write64
,
903 static const struct dsa_switch_ops bcm_sf2_ops
= {
904 .get_tag_protocol
= b53_get_tag_protocol
,
905 .setup
= bcm_sf2_sw_setup
,
906 .get_strings
= b53_get_strings
,
907 .get_ethtool_stats
= b53_get_ethtool_stats
,
908 .get_sset_count
= b53_get_sset_count
,
909 .get_ethtool_phy_stats
= b53_get_ethtool_phy_stats
,
910 .get_phy_flags
= bcm_sf2_sw_get_phy_flags
,
911 .phylink_validate
= bcm_sf2_sw_validate
,
912 .phylink_mac_config
= bcm_sf2_sw_mac_config
,
913 .phylink_mac_link_down
= bcm_sf2_sw_mac_link_down
,
914 .phylink_mac_link_up
= bcm_sf2_sw_mac_link_up
,
915 .phylink_fixed_state
= bcm_sf2_sw_fixed_state
,
916 .suspend
= bcm_sf2_sw_suspend
,
917 .resume
= bcm_sf2_sw_resume
,
918 .get_wol
= bcm_sf2_sw_get_wol
,
919 .set_wol
= bcm_sf2_sw_set_wol
,
920 .port_enable
= bcm_sf2_port_setup
,
921 .port_disable
= bcm_sf2_port_disable
,
922 .get_mac_eee
= b53_get_mac_eee
,
923 .set_mac_eee
= b53_set_mac_eee
,
924 .port_bridge_join
= b53_br_join
,
925 .port_bridge_leave
= b53_br_leave
,
926 .port_stp_state_set
= b53_br_set_stp_state
,
927 .port_fast_age
= b53_br_fast_age
,
928 .port_vlan_filtering
= b53_vlan_filtering
,
929 .port_vlan_prepare
= b53_vlan_prepare
,
930 .port_vlan_add
= b53_vlan_add
,
931 .port_vlan_del
= b53_vlan_del
,
932 .port_fdb_dump
= b53_fdb_dump
,
933 .port_fdb_add
= b53_fdb_add
,
934 .port_fdb_del
= b53_fdb_del
,
935 .get_rxnfc
= bcm_sf2_get_rxnfc
,
936 .set_rxnfc
= bcm_sf2_set_rxnfc
,
937 .port_mirror_add
= b53_mirror_add
,
938 .port_mirror_del
= b53_mirror_del
,
941 struct bcm_sf2_of_data
{
943 const u16
*reg_offsets
;
944 unsigned int core_reg_align
;
945 unsigned int num_cfp_rules
;
948 /* Register offsets for the SWITCH_REG_* block */
949 static const u16 bcm_sf2_7445_reg_offsets
[] = {
950 [REG_SWITCH_CNTRL
] = 0x00,
951 [REG_SWITCH_STATUS
] = 0x04,
952 [REG_DIR_DATA_WRITE
] = 0x08,
953 [REG_DIR_DATA_READ
] = 0x0C,
954 [REG_SWITCH_REVISION
] = 0x18,
955 [REG_PHY_REVISION
] = 0x1C,
956 [REG_SPHY_CNTRL
] = 0x2C,
957 [REG_RGMII_0_CNTRL
] = 0x34,
958 [REG_RGMII_1_CNTRL
] = 0x40,
959 [REG_RGMII_2_CNTRL
] = 0x4c,
960 [REG_LED_0_CNTRL
] = 0x90,
961 [REG_LED_1_CNTRL
] = 0x94,
962 [REG_LED_2_CNTRL
] = 0x98,
965 static const struct bcm_sf2_of_data bcm_sf2_7445_data
= {
966 .type
= BCM7445_DEVICE_ID
,
968 .reg_offsets
= bcm_sf2_7445_reg_offsets
,
969 .num_cfp_rules
= 256,
972 static const u16 bcm_sf2_7278_reg_offsets
[] = {
973 [REG_SWITCH_CNTRL
] = 0x00,
974 [REG_SWITCH_STATUS
] = 0x04,
975 [REG_DIR_DATA_WRITE
] = 0x08,
976 [REG_DIR_DATA_READ
] = 0x0c,
977 [REG_SWITCH_REVISION
] = 0x10,
978 [REG_PHY_REVISION
] = 0x14,
979 [REG_SPHY_CNTRL
] = 0x24,
980 [REG_RGMII_0_CNTRL
] = 0xe0,
981 [REG_RGMII_1_CNTRL
] = 0xec,
982 [REG_RGMII_2_CNTRL
] = 0xf8,
983 [REG_LED_0_CNTRL
] = 0x40,
984 [REG_LED_1_CNTRL
] = 0x4c,
985 [REG_LED_2_CNTRL
] = 0x58,
988 static const struct bcm_sf2_of_data bcm_sf2_7278_data
= {
989 .type
= BCM7278_DEVICE_ID
,
991 .reg_offsets
= bcm_sf2_7278_reg_offsets
,
992 .num_cfp_rules
= 128,
995 static const struct of_device_id bcm_sf2_of_match
[] = {
996 { .compatible
= "brcm,bcm7445-switch-v4.0",
997 .data
= &bcm_sf2_7445_data
999 { .compatible
= "brcm,bcm7278-switch-v4.0",
1000 .data
= &bcm_sf2_7278_data
1002 { .compatible
= "brcm,bcm7278-switch-v4.8",
1003 .data
= &bcm_sf2_7278_data
1007 MODULE_DEVICE_TABLE(of
, bcm_sf2_of_match
);
1009 static int bcm_sf2_sw_probe(struct platform_device
*pdev
)
1011 const char *reg_names
[BCM_SF2_REGS_NUM
] = BCM_SF2_REGS_NAME
;
1012 struct device_node
*dn
= pdev
->dev
.of_node
;
1013 const struct of_device_id
*of_id
= NULL
;
1014 const struct bcm_sf2_of_data
*data
;
1015 struct b53_platform_data
*pdata
;
1016 struct dsa_switch_ops
*ops
;
1017 struct device_node
*ports
;
1018 struct bcm_sf2_priv
*priv
;
1019 struct b53_device
*dev
;
1020 struct dsa_switch
*ds
;
1021 void __iomem
**base
;
1027 priv
= devm_kzalloc(&pdev
->dev
, sizeof(*priv
), GFP_KERNEL
);
1031 ops
= devm_kzalloc(&pdev
->dev
, sizeof(*ops
), GFP_KERNEL
);
1035 dev
= b53_switch_alloc(&pdev
->dev
, &bcm_sf2_io_ops
, priv
);
1039 pdata
= devm_kzalloc(&pdev
->dev
, sizeof(*pdata
), GFP_KERNEL
);
1043 of_id
= of_match_node(bcm_sf2_of_match
, dn
);
1044 if (!of_id
|| !of_id
->data
)
1049 /* Set SWITCH_REG register offsets and SWITCH_CORE align factor */
1050 priv
->type
= data
->type
;
1051 priv
->reg_offsets
= data
->reg_offsets
;
1052 priv
->core_reg_align
= data
->core_reg_align
;
1053 priv
->num_cfp_rules
= data
->num_cfp_rules
;
1055 /* Auto-detection using standard registers will not work, so
1056 * provide an indication of what kind of device we are for
1057 * b53_common to work with
1059 pdata
->chip_id
= priv
->type
;
1064 ds
->ops
= &bcm_sf2_ops
;
1066 /* Advertise the 8 egress queues */
1067 ds
->num_tx_queues
= SF2_NUM_EGRESS_QUEUES
;
1069 dev_set_drvdata(&pdev
->dev
, priv
);
1071 spin_lock_init(&priv
->indir_lock
);
1072 mutex_init(&priv
->stats_mutex
);
1073 mutex_init(&priv
->cfp
.lock
);
1075 /* CFP rule #0 cannot be used for specific classifications, flag it as
1078 set_bit(0, priv
->cfp
.used
);
1079 set_bit(0, priv
->cfp
.unique
);
1081 ports
= of_find_node_by_name(dn
, "ports");
1083 bcm_sf2_identify_ports(priv
, ports
);
1087 priv
->irq0
= irq_of_parse_and_map(dn
, 0);
1088 priv
->irq1
= irq_of_parse_and_map(dn
, 1);
1091 for (i
= 0; i
< BCM_SF2_REGS_NUM
; i
++) {
1092 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, i
);
1093 *base
= devm_ioremap_resource(&pdev
->dev
, r
);
1094 if (IS_ERR(*base
)) {
1095 pr_err("unable to find register: %s\n", reg_names
[i
]);
1096 return PTR_ERR(*base
);
1101 ret
= bcm_sf2_sw_rst(priv
);
1103 pr_err("unable to software reset switch: %d\n", ret
);
1107 bcm_sf2_gphy_enable_set(priv
->dev
->ds
, true);
1109 ret
= bcm_sf2_mdio_register(ds
);
1111 pr_err("failed to register MDIO bus\n");
1115 bcm_sf2_gphy_enable_set(priv
->dev
->ds
, false);
1117 ret
= bcm_sf2_cfp_rst(priv
);
1119 pr_err("failed to reset CFP\n");
1123 /* Disable all interrupts and request them */
1124 bcm_sf2_intr_disable(priv
);
1126 ret
= devm_request_irq(&pdev
->dev
, priv
->irq0
, bcm_sf2_switch_0_isr
, 0,
1129 pr_err("failed to request switch_0 IRQ\n");
1133 ret
= devm_request_irq(&pdev
->dev
, priv
->irq1
, bcm_sf2_switch_1_isr
, 0,
1136 pr_err("failed to request switch_1 IRQ\n");
1140 /* Reset the MIB counters */
1141 reg
= core_readl(priv
, CORE_GMNCFGCFG
);
1143 core_writel(priv
, reg
, CORE_GMNCFGCFG
);
1144 reg
&= ~RST_MIB_CNT
;
1145 core_writel(priv
, reg
, CORE_GMNCFGCFG
);
1147 /* Get the maximum number of ports for this switch */
1148 priv
->hw_params
.num_ports
= core_readl(priv
, CORE_IMP0_PRT_ID
) + 1;
1149 if (priv
->hw_params
.num_ports
> DSA_MAX_PORTS
)
1150 priv
->hw_params
.num_ports
= DSA_MAX_PORTS
;
1152 /* Assume a single GPHY setup if we can't read that property */
1153 if (of_property_read_u32(dn
, "brcm,num-gphy",
1154 &priv
->hw_params
.num_gphy
))
1155 priv
->hw_params
.num_gphy
= 1;
1157 rev
= reg_readl(priv
, REG_SWITCH_REVISION
);
1158 priv
->hw_params
.top_rev
= (rev
>> SWITCH_TOP_REV_SHIFT
) &
1159 SWITCH_TOP_REV_MASK
;
1160 priv
->hw_params
.core_rev
= (rev
& SF2_REV_MASK
);
1162 rev
= reg_readl(priv
, REG_PHY_REVISION
);
1163 priv
->hw_params
.gphy_rev
= rev
& PHY_REVISION_MASK
;
1165 ret
= b53_switch_register(dev
);
1169 pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n",
1170 priv
->hw_params
.top_rev
>> 8, priv
->hw_params
.top_rev
& 0xff,
1171 priv
->hw_params
.core_rev
>> 8, priv
->hw_params
.core_rev
& 0xff,
1172 priv
->core
, priv
->irq0
, priv
->irq1
);
1177 bcm_sf2_mdio_unregister(priv
);
1181 static int bcm_sf2_sw_remove(struct platform_device
*pdev
)
1183 struct bcm_sf2_priv
*priv
= platform_get_drvdata(pdev
);
1185 priv
->wol_ports_mask
= 0;
1186 dsa_unregister_switch(priv
->dev
->ds
);
1187 /* Disable all ports and interrupts */
1188 bcm_sf2_sw_suspend(priv
->dev
->ds
);
1189 bcm_sf2_mdio_unregister(priv
);
1194 static void bcm_sf2_sw_shutdown(struct platform_device
*pdev
)
1196 struct bcm_sf2_priv
*priv
= platform_get_drvdata(pdev
);
1198 /* For a kernel about to be kexec'd we want to keep the GPHY on for a
1199 * successful MDIO bus scan to occur. If we did turn off the GPHY
1200 * before (e.g: port_disable), this will also power it back on.
1202 * Do not rely on kexec_in_progress, just power the PHY on.
1204 if (priv
->hw_params
.num_gphy
== 1)
1205 bcm_sf2_gphy_enable_set(priv
->dev
->ds
, true);
1208 #ifdef CONFIG_PM_SLEEP
1209 static int bcm_sf2_suspend(struct device
*dev
)
1211 struct platform_device
*pdev
= to_platform_device(dev
);
1212 struct bcm_sf2_priv
*priv
= platform_get_drvdata(pdev
);
1214 return dsa_switch_suspend(priv
->dev
->ds
);
1217 static int bcm_sf2_resume(struct device
*dev
)
1219 struct platform_device
*pdev
= to_platform_device(dev
);
1220 struct bcm_sf2_priv
*priv
= platform_get_drvdata(pdev
);
1222 return dsa_switch_resume(priv
->dev
->ds
);
1224 #endif /* CONFIG_PM_SLEEP */
1226 static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops
,
1227 bcm_sf2_suspend
, bcm_sf2_resume
);
1230 static struct platform_driver bcm_sf2_driver
= {
1231 .probe
= bcm_sf2_sw_probe
,
1232 .remove
= bcm_sf2_sw_remove
,
1233 .shutdown
= bcm_sf2_sw_shutdown
,
1236 .of_match_table
= bcm_sf2_of_match
,
1237 .pm
= &bcm_sf2_pm_ops
,
1240 module_platform_driver(bcm_sf2_driver
);
1242 MODULE_AUTHOR("Broadcom Corporation");
1243 MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip");
1244 MODULE_LICENSE("GPL");
1245 MODULE_ALIAS("platform:brcm-sf2");