2 * Broadcom Starfighter 2 DSA switch driver
4 * Copyright (C) 2014, Broadcom Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/interrupt.h>
16 #include <linux/platform_device.h>
17 #include <linux/phy.h>
18 #include <linux/phy_fixed.h>
19 #include <linux/mii.h>
21 #include <linux/of_irq.h>
22 #include <linux/of_address.h>
23 #include <linux/of_net.h>
24 #include <linux/of_mdio.h>
26 #include <linux/ethtool.h>
27 #include <linux/if_bridge.h>
28 #include <linux/brcmphy.h>
29 #include <linux/etherdevice.h>
30 #include <linux/platform_data/b53.h>
33 #include "bcm_sf2_regs.h"
34 #include "b53/b53_priv.h"
35 #include "b53/b53_regs.h"
37 static enum dsa_tag_protocol
bcm_sf2_sw_get_tag_protocol(struct dsa_switch
*ds
,
40 return DSA_TAG_PROTO_BRCM
;
43 static void bcm_sf2_imp_setup(struct dsa_switch
*ds
, int port
)
45 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
49 if (priv
->type
== BCM7445_DEVICE_ID
)
50 offset
= CORE_STS_OVERRIDE_IMP
;
52 offset
= CORE_STS_OVERRIDE_IMP2
;
54 /* Enable the port memories */
55 reg
= core_readl(priv
, CORE_MEM_PSM_VDD_CTRL
);
56 reg
&= ~P_TXQ_PSM_VDD(port
);
57 core_writel(priv
, reg
, CORE_MEM_PSM_VDD_CTRL
);
59 /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
60 reg
= core_readl(priv
, CORE_IMP_CTL
);
61 reg
|= (RX_BCST_EN
| RX_MCST_EN
| RX_UCST_EN
);
62 reg
&= ~(RX_DIS
| TX_DIS
);
63 core_writel(priv
, reg
, CORE_IMP_CTL
);
65 /* Enable forwarding */
66 core_writel(priv
, SW_FWDG_EN
, CORE_SWMODE
);
68 /* Enable IMP port in dumb mode */
69 reg
= core_readl(priv
, CORE_SWITCH_CTRL
);
70 reg
|= MII_DUMB_FWDG_EN
;
71 core_writel(priv
, reg
, CORE_SWITCH_CTRL
);
73 /* Configure Traffic Class to QoS mapping, allow each priority to map
74 * to a different queue number
76 reg
= core_readl(priv
, CORE_PORT_TC2_QOS_MAP_PORT(port
));
77 for (i
= 0; i
< SF2_NUM_EGRESS_QUEUES
; i
++)
78 reg
|= i
<< (PRT_TO_QID_SHIFT
* i
);
79 core_writel(priv
, reg
, CORE_PORT_TC2_QOS_MAP_PORT(port
));
81 b53_brcm_hdr_setup(ds
, port
);
83 /* Force link status for IMP port */
84 reg
= core_readl(priv
, offset
);
85 reg
|= (MII_SW_OR
| LINK_STS
);
86 core_writel(priv
, reg
, offset
);
89 static void bcm_sf2_gphy_enable_set(struct dsa_switch
*ds
, bool enable
)
91 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
94 reg
= reg_readl(priv
, REG_SPHY_CNTRL
);
97 reg
&= ~(EXT_PWR_DOWN
| IDDQ_BIAS
| IDDQ_GLOBAL_PWR
| CK25_DIS
);
98 reg_writel(priv
, reg
, REG_SPHY_CNTRL
);
100 reg
= reg_readl(priv
, REG_SPHY_CNTRL
);
103 reg
|= EXT_PWR_DOWN
| IDDQ_BIAS
| PHY_RESET
;
104 reg_writel(priv
, reg
, REG_SPHY_CNTRL
);
108 reg_writel(priv
, reg
, REG_SPHY_CNTRL
);
110 /* Use PHY-driven LED signaling */
112 reg
= reg_readl(priv
, REG_LED_CNTRL(0));
113 reg
|= SPDLNK_SRC_SEL
;
114 reg_writel(priv
, reg
, REG_LED_CNTRL(0));
118 static inline void bcm_sf2_port_intr_enable(struct bcm_sf2_priv
*priv
,
128 /* Port 0 interrupts are located on the first bank */
129 intrl2_0_mask_clear(priv
, P_IRQ_MASK(P0_IRQ_OFF
));
132 off
= P_IRQ_OFF(port
);
136 intrl2_1_mask_clear(priv
, P_IRQ_MASK(off
));
139 static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv
*priv
,
149 /* Port 0 interrupts are located on the first bank */
150 intrl2_0_mask_set(priv
, P_IRQ_MASK(P0_IRQ_OFF
));
151 intrl2_0_writel(priv
, P_IRQ_MASK(P0_IRQ_OFF
), INTRL2_CPU_CLEAR
);
154 off
= P_IRQ_OFF(port
);
158 intrl2_1_mask_set(priv
, P_IRQ_MASK(off
));
159 intrl2_1_writel(priv
, P_IRQ_MASK(off
), INTRL2_CPU_CLEAR
);
162 static int bcm_sf2_port_setup(struct dsa_switch
*ds
, int port
,
163 struct phy_device
*phy
)
165 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
169 /* Clear the memory power down */
170 reg
= core_readl(priv
, CORE_MEM_PSM_VDD_CTRL
);
171 reg
&= ~P_TXQ_PSM_VDD(port
);
172 core_writel(priv
, reg
, CORE_MEM_PSM_VDD_CTRL
);
174 /* Enable Broadcom tags for that port if requested */
175 if (priv
->brcm_tag_mask
& BIT(port
))
176 b53_brcm_hdr_setup(ds
, port
);
178 /* Configure Traffic Class to QoS mapping, allow each priority to map
179 * to a different queue number
181 reg
= core_readl(priv
, CORE_PORT_TC2_QOS_MAP_PORT(port
));
182 for (i
= 0; i
< SF2_NUM_EGRESS_QUEUES
; i
++)
183 reg
|= i
<< (PRT_TO_QID_SHIFT
* i
);
184 core_writel(priv
, reg
, CORE_PORT_TC2_QOS_MAP_PORT(port
));
186 /* Re-enable the GPHY and re-apply workarounds */
187 if (priv
->int_phy_mask
& 1 << port
&& priv
->hw_params
.num_gphy
== 1) {
188 bcm_sf2_gphy_enable_set(ds
, true);
190 /* if phy_stop() has been called before, phy
191 * will be in halted state, and phy_start()
194 * the resume path does not configure back
195 * autoneg settings, and since we hard reset
196 * the phy manually here, we need to reset the
197 * state machine also.
199 phy
->state
= PHY_READY
;
204 /* Enable MoCA port interrupts to get notified */
205 if (port
== priv
->moca_port
)
206 bcm_sf2_port_intr_enable(priv
, port
);
208 /* Set per-queue pause threshold to 32 */
209 core_writel(priv
, 32, CORE_TXQ_THD_PAUSE_QN_PORT(port
));
211 /* Set ACB threshold to 24 */
212 for (i
= 0; i
< SF2_NUM_EGRESS_QUEUES
; i
++) {
213 reg
= acb_readl(priv
, ACB_QUEUE_CFG(port
*
214 SF2_NUM_EGRESS_QUEUES
+ i
));
215 reg
&= ~XOFF_THRESHOLD_MASK
;
217 acb_writel(priv
, reg
, ACB_QUEUE_CFG(port
*
218 SF2_NUM_EGRESS_QUEUES
+ i
));
221 return b53_enable_port(ds
, port
, phy
);
224 static void bcm_sf2_port_disable(struct dsa_switch
*ds
, int port
,
225 struct phy_device
*phy
)
227 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
230 if (priv
->wol_ports_mask
& (1 << port
))
233 if (port
== priv
->moca_port
)
234 bcm_sf2_port_intr_disable(priv
, port
);
236 if (priv
->int_phy_mask
& 1 << port
&& priv
->hw_params
.num_gphy
== 1)
237 bcm_sf2_gphy_enable_set(ds
, false);
239 if (dsa_is_cpu_port(ds
, port
))
242 off
= CORE_G_PCTL_PORT(port
);
244 b53_disable_port(ds
, port
, phy
);
246 /* Power down the port memory */
247 reg
= core_readl(priv
, CORE_MEM_PSM_VDD_CTRL
);
248 reg
|= P_TXQ_PSM_VDD(port
);
249 core_writel(priv
, reg
, CORE_MEM_PSM_VDD_CTRL
);
253 static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv
*priv
, int op
, int addr
,
259 reg
= reg_readl(priv
, REG_SWITCH_CNTRL
);
260 reg
|= MDIO_MASTER_SEL
;
261 reg_writel(priv
, reg
, REG_SWITCH_CNTRL
);
263 /* Page << 8 | offset */
266 core_writel(priv
, addr
, reg
);
268 /* Page << 8 | offset */
269 reg
= 0x80 << 8 | regnum
<< 1;
273 ret
= core_readl(priv
, reg
);
275 core_writel(priv
, val
, reg
);
277 reg
= reg_readl(priv
, REG_SWITCH_CNTRL
);
278 reg
&= ~MDIO_MASTER_SEL
;
279 reg_writel(priv
, reg
, REG_SWITCH_CNTRL
);
284 static int bcm_sf2_sw_mdio_read(struct mii_bus
*bus
, int addr
, int regnum
)
286 struct bcm_sf2_priv
*priv
= bus
->priv
;
288 /* Intercept reads from Broadcom pseudo-PHY address, else, send
289 * them to our master MDIO bus controller
291 if (addr
== BRCM_PSEUDO_PHY_ADDR
&& priv
->indir_phy_mask
& BIT(addr
))
292 return bcm_sf2_sw_indir_rw(priv
, 1, addr
, regnum
, 0);
294 return mdiobus_read_nested(priv
->master_mii_bus
, addr
, regnum
);
297 static int bcm_sf2_sw_mdio_write(struct mii_bus
*bus
, int addr
, int regnum
,
300 struct bcm_sf2_priv
*priv
= bus
->priv
;
302 /* Intercept writes to the Broadcom pseudo-PHY address, else,
303 * send them to our master MDIO bus controller
305 if (addr
== BRCM_PSEUDO_PHY_ADDR
&& priv
->indir_phy_mask
& BIT(addr
))
306 bcm_sf2_sw_indir_rw(priv
, 0, addr
, regnum
, val
);
308 mdiobus_write_nested(priv
->master_mii_bus
, addr
, regnum
, val
);
313 static irqreturn_t
bcm_sf2_switch_0_isr(int irq
, void *dev_id
)
315 struct bcm_sf2_priv
*priv
= dev_id
;
317 priv
->irq0_stat
= intrl2_0_readl(priv
, INTRL2_CPU_STATUS
) &
319 intrl2_0_writel(priv
, priv
->irq0_stat
, INTRL2_CPU_CLEAR
);
324 static irqreturn_t
bcm_sf2_switch_1_isr(int irq
, void *dev_id
)
326 struct bcm_sf2_priv
*priv
= dev_id
;
328 priv
->irq1_stat
= intrl2_1_readl(priv
, INTRL2_CPU_STATUS
) &
330 intrl2_1_writel(priv
, priv
->irq1_stat
, INTRL2_CPU_CLEAR
);
332 if (priv
->irq1_stat
& P_LINK_UP_IRQ(P7_IRQ_OFF
))
333 priv
->port_sts
[7].link
= 1;
334 if (priv
->irq1_stat
& P_LINK_DOWN_IRQ(P7_IRQ_OFF
))
335 priv
->port_sts
[7].link
= 0;
340 static int bcm_sf2_sw_rst(struct bcm_sf2_priv
*priv
)
342 unsigned int timeout
= 1000;
345 reg
= core_readl(priv
, CORE_WATCHDOG_CTRL
);
346 reg
|= SOFTWARE_RESET
| EN_CHIP_RST
| EN_SW_RESET
;
347 core_writel(priv
, reg
, CORE_WATCHDOG_CTRL
);
350 reg
= core_readl(priv
, CORE_WATCHDOG_CTRL
);
351 if (!(reg
& SOFTWARE_RESET
))
354 usleep_range(1000, 2000);
355 } while (timeout
-- > 0);
363 static void bcm_sf2_intr_disable(struct bcm_sf2_priv
*priv
)
365 intrl2_0_mask_set(priv
, 0xffffffff);
366 intrl2_0_writel(priv
, 0xffffffff, INTRL2_CPU_CLEAR
);
367 intrl2_1_mask_set(priv
, 0xffffffff);
368 intrl2_1_writel(priv
, 0xffffffff, INTRL2_CPU_CLEAR
);
371 static void bcm_sf2_identify_ports(struct bcm_sf2_priv
*priv
,
372 struct device_node
*dn
)
374 struct device_node
*port
;
376 unsigned int port_num
;
378 priv
->moca_port
= -1;
380 for_each_available_child_of_node(dn
, port
) {
381 if (of_property_read_u32(port
, "reg", &port_num
))
384 /* Internal PHYs get assigned a specific 'phy-mode' property
385 * value: "internal" to help flag them before MDIO probing
386 * has completed, since they might be turned off at that
389 mode
= of_get_phy_mode(port
);
393 if (mode
== PHY_INTERFACE_MODE_INTERNAL
)
394 priv
->int_phy_mask
|= 1 << port_num
;
396 if (mode
== PHY_INTERFACE_MODE_MOCA
)
397 priv
->moca_port
= port_num
;
399 if (of_property_read_bool(port
, "brcm,use-bcm-hdr"))
400 priv
->brcm_tag_mask
|= 1 << port_num
;
404 static int bcm_sf2_mdio_register(struct dsa_switch
*ds
)
406 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
407 struct device_node
*dn
;
411 /* Find our integrated MDIO bus node */
412 dn
= of_find_compatible_node(NULL
, NULL
, "brcm,unimac-mdio");
413 priv
->master_mii_bus
= of_mdio_find_bus(dn
);
414 if (!priv
->master_mii_bus
)
415 return -EPROBE_DEFER
;
417 get_device(&priv
->master_mii_bus
->dev
);
418 priv
->master_mii_dn
= dn
;
420 priv
->slave_mii_bus
= devm_mdiobus_alloc(ds
->dev
);
421 if (!priv
->slave_mii_bus
)
424 priv
->slave_mii_bus
->priv
= priv
;
425 priv
->slave_mii_bus
->name
= "sf2 slave mii";
426 priv
->slave_mii_bus
->read
= bcm_sf2_sw_mdio_read
;
427 priv
->slave_mii_bus
->write
= bcm_sf2_sw_mdio_write
;
428 snprintf(priv
->slave_mii_bus
->id
, MII_BUS_ID_SIZE
, "sf2-%d",
430 priv
->slave_mii_bus
->dev
.of_node
= dn
;
432 /* Include the pseudo-PHY address to divert reads towards our
433 * workaround. This is only required for 7445D0, since 7445E0
434 * disconnects the internal switch pseudo-PHY such that we can use the
435 * regular SWITCH_MDIO master controller instead.
437 * Here we flag the pseudo PHY as needing special treatment and would
438 * otherwise make all other PHY read/writes go to the master MDIO bus
439 * controller that comes with this switch backed by the "mdio-unimac"
442 if (of_machine_is_compatible("brcm,bcm7445d0"))
443 priv
->indir_phy_mask
|= (1 << BRCM_PSEUDO_PHY_ADDR
);
445 priv
->indir_phy_mask
= 0;
447 ds
->phys_mii_mask
= priv
->indir_phy_mask
;
448 ds
->slave_mii_bus
= priv
->slave_mii_bus
;
449 priv
->slave_mii_bus
->parent
= ds
->dev
->parent
;
450 priv
->slave_mii_bus
->phy_mask
= ~priv
->indir_phy_mask
;
453 err
= of_mdiobus_register(priv
->slave_mii_bus
, dn
);
455 err
= mdiobus_register(priv
->slave_mii_bus
);
463 static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv
*priv
)
465 mdiobus_unregister(priv
->slave_mii_bus
);
466 if (priv
->master_mii_dn
)
467 of_node_put(priv
->master_mii_dn
);
470 static u32
bcm_sf2_sw_get_phy_flags(struct dsa_switch
*ds
, int port
)
472 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
474 /* The BCM7xxx PHY driver expects to find the integrated PHY revision
475 * in bits 15:8 and the patch level in bits 7:0 which is exactly what
476 * the REG_PHY_REVISION register layout is.
479 return priv
->hw_params
.gphy_rev
;
482 static void bcm_sf2_sw_adjust_link(struct dsa_switch
*ds
, int port
,
483 struct phy_device
*phydev
)
485 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
486 struct ethtool_eee
*p
= &priv
->dev
->ports
[port
].eee
;
487 u32 id_mode_dis
= 0, port_mode
;
488 const char *str
= NULL
;
491 if (priv
->type
== BCM7445_DEVICE_ID
)
492 offset
= CORE_STS_OVERRIDE_GMIIP_PORT(port
);
494 offset
= CORE_STS_OVERRIDE_GMIIP2_PORT(port
);
496 switch (phydev
->interface
) {
497 case PHY_INTERFACE_MODE_RGMII
:
498 str
= "RGMII (no delay)";
500 case PHY_INTERFACE_MODE_RGMII_TXID
:
502 str
= "RGMII (TX delay)";
503 port_mode
= EXT_GPHY
;
505 case PHY_INTERFACE_MODE_MII
:
507 port_mode
= EXT_EPHY
;
509 case PHY_INTERFACE_MODE_REVMII
:
511 port_mode
= EXT_REVMII
;
514 /* All other PHYs: internal and MoCA */
518 /* If the link is down, just disable the interface to conserve power */
520 reg
= reg_readl(priv
, REG_RGMII_CNTRL_P(port
));
521 reg
&= ~RGMII_MODE_EN
;
522 reg_writel(priv
, reg
, REG_RGMII_CNTRL_P(port
));
526 /* Clear id_mode_dis bit, and the existing port mode, but
527 * make sure we enable the RGMII block for data to pass
529 reg
= reg_readl(priv
, REG_RGMII_CNTRL_P(port
));
531 reg
&= ~(PORT_MODE_MASK
<< PORT_MODE_SHIFT
);
532 reg
&= ~(RX_PAUSE_EN
| TX_PAUSE_EN
);
534 reg
|= port_mode
| RGMII_MODE_EN
;
539 if (phydev
->asym_pause
)
544 reg_writel(priv
, reg
, REG_RGMII_CNTRL_P(port
));
546 pr_info("Port %d configured for %s\n", port
, str
);
549 /* Force link settings detected from the PHY */
551 switch (phydev
->speed
) {
553 reg
|= SPDSTS_1000
<< SPEED_SHIFT
;
556 reg
|= SPDSTS_100
<< SPEED_SHIFT
;
562 if (phydev
->duplex
== DUPLEX_FULL
)
565 core_writel(priv
, reg
, offset
);
567 if (!phydev
->is_pseudo_fixed_link
)
568 p
->eee_enabled
= b53_eee_init(ds
, port
, phydev
);
571 static void bcm_sf2_sw_fixed_link_update(struct dsa_switch
*ds
, int port
,
572 struct fixed_phy_status
*status
)
574 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
575 u32 duplex
, pause
, offset
;
578 if (priv
->type
== BCM7445_DEVICE_ID
)
579 offset
= CORE_STS_OVERRIDE_GMIIP_PORT(port
);
581 offset
= CORE_STS_OVERRIDE_GMIIP2_PORT(port
);
583 duplex
= core_readl(priv
, CORE_DUPSTS
);
584 pause
= core_readl(priv
, CORE_PAUSESTS
);
588 /* MoCA port is special as we do not get link status from CORE_LNKSTS,
589 * which means that we need to force the link at the port override
590 * level to get the data to flow. We do use what the interrupt handler
591 * did determine before.
593 * For the other ports, we just force the link status, since this is
594 * a fixed PHY device.
596 if (port
== priv
->moca_port
) {
597 status
->link
= priv
->port_sts
[port
].link
;
598 /* For MoCA interfaces, also force a link down notification
599 * since some version of the user-space daemon (mocad) use
600 * cmd->autoneg to force the link, which messes up the PHY
601 * state machine and make it go in PHY_FORCING state instead.
604 netif_carrier_off(ds
->ports
[port
].slave
);
608 status
->duplex
= !!(duplex
& (1 << port
));
611 reg
= core_readl(priv
, offset
);
617 core_writel(priv
, reg
, offset
);
619 if ((pause
& (1 << port
)) &&
620 (pause
& (1 << (port
+ PAUSESTS_TX_PAUSE_SHIFT
)))) {
621 status
->asym_pause
= 1;
625 if (pause
& (1 << port
))
629 static void bcm_sf2_enable_acb(struct dsa_switch
*ds
)
631 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
634 /* Enable ACB globally */
635 reg
= acb_readl(priv
, ACB_CONTROL
);
636 reg
|= (ACB_FLUSH_MASK
<< ACB_FLUSH_SHIFT
);
637 acb_writel(priv
, reg
, ACB_CONTROL
);
638 reg
&= ~(ACB_FLUSH_MASK
<< ACB_FLUSH_SHIFT
);
639 reg
|= ACB_EN
| ACB_ALGORITHM
;
640 acb_writel(priv
, reg
, ACB_CONTROL
);
643 static int bcm_sf2_sw_suspend(struct dsa_switch
*ds
)
645 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
648 bcm_sf2_intr_disable(priv
);
650 /* Disable all ports physically present including the IMP
651 * port, the other ones have already been disabled during
654 for (port
= 0; port
< DSA_MAX_PORTS
; port
++) {
655 if (dsa_is_user_port(ds
, port
) || dsa_is_cpu_port(ds
, port
))
656 bcm_sf2_port_disable(ds
, port
, NULL
);
662 static int bcm_sf2_sw_resume(struct dsa_switch
*ds
)
664 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
668 ret
= bcm_sf2_sw_rst(priv
);
670 pr_err("%s: failed to software reset switch\n", __func__
);
674 if (priv
->hw_params
.num_gphy
== 1)
675 bcm_sf2_gphy_enable_set(ds
, true);
677 for (port
= 0; port
< DSA_MAX_PORTS
; port
++) {
678 if (dsa_is_user_port(ds
, port
))
679 bcm_sf2_port_setup(ds
, port
, NULL
);
680 else if (dsa_is_cpu_port(ds
, port
))
681 bcm_sf2_imp_setup(ds
, port
);
684 bcm_sf2_enable_acb(ds
);
689 static void bcm_sf2_sw_get_wol(struct dsa_switch
*ds
, int port
,
690 struct ethtool_wolinfo
*wol
)
692 struct net_device
*p
= ds
->ports
[port
].cpu_dp
->master
;
693 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
694 struct ethtool_wolinfo pwol
;
696 /* Get the parent device WoL settings */
697 p
->ethtool_ops
->get_wol(p
, &pwol
);
699 /* Advertise the parent device supported settings */
700 wol
->supported
= pwol
.supported
;
701 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
703 if (pwol
.wolopts
& WAKE_MAGICSECURE
)
704 memcpy(&wol
->sopass
, pwol
.sopass
, sizeof(wol
->sopass
));
706 if (priv
->wol_ports_mask
& (1 << port
))
707 wol
->wolopts
= pwol
.wolopts
;
712 static int bcm_sf2_sw_set_wol(struct dsa_switch
*ds
, int port
,
713 struct ethtool_wolinfo
*wol
)
715 struct net_device
*p
= ds
->ports
[port
].cpu_dp
->master
;
716 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
717 s8 cpu_port
= ds
->ports
[port
].cpu_dp
->index
;
718 struct ethtool_wolinfo pwol
;
720 p
->ethtool_ops
->get_wol(p
, &pwol
);
721 if (wol
->wolopts
& ~pwol
.supported
)
725 priv
->wol_ports_mask
|= (1 << port
);
727 priv
->wol_ports_mask
&= ~(1 << port
);
729 /* If we have at least one port enabled, make sure the CPU port
730 * is also enabled. If the CPU port is the last one enabled, we disable
731 * it since this configuration does not make sense.
733 if (priv
->wol_ports_mask
&& priv
->wol_ports_mask
!= (1 << cpu_port
))
734 priv
->wol_ports_mask
|= (1 << cpu_port
);
736 priv
->wol_ports_mask
&= ~(1 << cpu_port
);
738 return p
->ethtool_ops
->set_wol(p
, wol
);
741 static int bcm_sf2_sw_setup(struct dsa_switch
*ds
)
743 struct bcm_sf2_priv
*priv
= bcm_sf2_to_priv(ds
);
746 /* Enable all valid ports and disable those unused */
747 for (port
= 0; port
< priv
->hw_params
.num_ports
; port
++) {
748 /* IMP port receives special treatment */
749 if (dsa_is_user_port(ds
, port
))
750 bcm_sf2_port_setup(ds
, port
, NULL
);
751 else if (dsa_is_cpu_port(ds
, port
))
752 bcm_sf2_imp_setup(ds
, port
);
754 bcm_sf2_port_disable(ds
, port
, NULL
);
757 b53_configure_vlan(ds
);
758 bcm_sf2_enable_acb(ds
);
763 /* The SWITCH_CORE register space is managed by b53 but operates on a page +
764 * register basis so we need to translate that into an address that the
765 * bus-glue understands.
767 #define SF2_PAGE_REG_MKADDR(page, reg) ((page) << 10 | (reg) << 2)
769 static int bcm_sf2_core_read8(struct b53_device
*dev
, u8 page
, u8 reg
,
772 struct bcm_sf2_priv
*priv
= dev
->priv
;
774 *val
= core_readl(priv
, SF2_PAGE_REG_MKADDR(page
, reg
));
779 static int bcm_sf2_core_read16(struct b53_device
*dev
, u8 page
, u8 reg
,
782 struct bcm_sf2_priv
*priv
= dev
->priv
;
784 *val
= core_readl(priv
, SF2_PAGE_REG_MKADDR(page
, reg
));
789 static int bcm_sf2_core_read32(struct b53_device
*dev
, u8 page
, u8 reg
,
792 struct bcm_sf2_priv
*priv
= dev
->priv
;
794 *val
= core_readl(priv
, SF2_PAGE_REG_MKADDR(page
, reg
));
799 static int bcm_sf2_core_read64(struct b53_device
*dev
, u8 page
, u8 reg
,
802 struct bcm_sf2_priv
*priv
= dev
->priv
;
804 *val
= core_readq(priv
, SF2_PAGE_REG_MKADDR(page
, reg
));
809 static int bcm_sf2_core_write8(struct b53_device
*dev
, u8 page
, u8 reg
,
812 struct bcm_sf2_priv
*priv
= dev
->priv
;
814 core_writel(priv
, value
, SF2_PAGE_REG_MKADDR(page
, reg
));
819 static int bcm_sf2_core_write16(struct b53_device
*dev
, u8 page
, u8 reg
,
822 struct bcm_sf2_priv
*priv
= dev
->priv
;
824 core_writel(priv
, value
, SF2_PAGE_REG_MKADDR(page
, reg
));
829 static int bcm_sf2_core_write32(struct b53_device
*dev
, u8 page
, u8 reg
,
832 struct bcm_sf2_priv
*priv
= dev
->priv
;
834 core_writel(priv
, value
, SF2_PAGE_REG_MKADDR(page
, reg
));
839 static int bcm_sf2_core_write64(struct b53_device
*dev
, u8 page
, u8 reg
,
842 struct bcm_sf2_priv
*priv
= dev
->priv
;
844 core_writeq(priv
, value
, SF2_PAGE_REG_MKADDR(page
, reg
));
849 static const struct b53_io_ops bcm_sf2_io_ops
= {
850 .read8
= bcm_sf2_core_read8
,
851 .read16
= bcm_sf2_core_read16
,
852 .read32
= bcm_sf2_core_read32
,
853 .read48
= bcm_sf2_core_read64
,
854 .read64
= bcm_sf2_core_read64
,
855 .write8
= bcm_sf2_core_write8
,
856 .write16
= bcm_sf2_core_write16
,
857 .write32
= bcm_sf2_core_write32
,
858 .write48
= bcm_sf2_core_write64
,
859 .write64
= bcm_sf2_core_write64
,
862 static const struct dsa_switch_ops bcm_sf2_ops
= {
863 .get_tag_protocol
= bcm_sf2_sw_get_tag_protocol
,
864 .setup
= bcm_sf2_sw_setup
,
865 .get_strings
= b53_get_strings
,
866 .get_ethtool_stats
= b53_get_ethtool_stats
,
867 .get_sset_count
= b53_get_sset_count
,
868 .get_phy_flags
= bcm_sf2_sw_get_phy_flags
,
869 .adjust_link
= bcm_sf2_sw_adjust_link
,
870 .fixed_link_update
= bcm_sf2_sw_fixed_link_update
,
871 .suspend
= bcm_sf2_sw_suspend
,
872 .resume
= bcm_sf2_sw_resume
,
873 .get_wol
= bcm_sf2_sw_get_wol
,
874 .set_wol
= bcm_sf2_sw_set_wol
,
875 .port_enable
= bcm_sf2_port_setup
,
876 .port_disable
= bcm_sf2_port_disable
,
877 .get_mac_eee
= b53_get_mac_eee
,
878 .set_mac_eee
= b53_set_mac_eee
,
879 .port_bridge_join
= b53_br_join
,
880 .port_bridge_leave
= b53_br_leave
,
881 .port_stp_state_set
= b53_br_set_stp_state
,
882 .port_fast_age
= b53_br_fast_age
,
883 .port_vlan_filtering
= b53_vlan_filtering
,
884 .port_vlan_prepare
= b53_vlan_prepare
,
885 .port_vlan_add
= b53_vlan_add
,
886 .port_vlan_del
= b53_vlan_del
,
887 .port_fdb_dump
= b53_fdb_dump
,
888 .port_fdb_add
= b53_fdb_add
,
889 .port_fdb_del
= b53_fdb_del
,
890 .get_rxnfc
= bcm_sf2_get_rxnfc
,
891 .set_rxnfc
= bcm_sf2_set_rxnfc
,
892 .port_mirror_add
= b53_mirror_add
,
893 .port_mirror_del
= b53_mirror_del
,
896 struct bcm_sf2_of_data
{
898 const u16
*reg_offsets
;
899 unsigned int core_reg_align
;
900 unsigned int num_cfp_rules
;
903 /* Register offsets for the SWITCH_REG_* block */
904 static const u16 bcm_sf2_7445_reg_offsets
[] = {
905 [REG_SWITCH_CNTRL
] = 0x00,
906 [REG_SWITCH_STATUS
] = 0x04,
907 [REG_DIR_DATA_WRITE
] = 0x08,
908 [REG_DIR_DATA_READ
] = 0x0C,
909 [REG_SWITCH_REVISION
] = 0x18,
910 [REG_PHY_REVISION
] = 0x1C,
911 [REG_SPHY_CNTRL
] = 0x2C,
912 [REG_RGMII_0_CNTRL
] = 0x34,
913 [REG_RGMII_1_CNTRL
] = 0x40,
914 [REG_RGMII_2_CNTRL
] = 0x4c,
915 [REG_LED_0_CNTRL
] = 0x90,
916 [REG_LED_1_CNTRL
] = 0x94,
917 [REG_LED_2_CNTRL
] = 0x98,
920 static const struct bcm_sf2_of_data bcm_sf2_7445_data
= {
921 .type
= BCM7445_DEVICE_ID
,
923 .reg_offsets
= bcm_sf2_7445_reg_offsets
,
924 .num_cfp_rules
= 256,
927 static const u16 bcm_sf2_7278_reg_offsets
[] = {
928 [REG_SWITCH_CNTRL
] = 0x00,
929 [REG_SWITCH_STATUS
] = 0x04,
930 [REG_DIR_DATA_WRITE
] = 0x08,
931 [REG_DIR_DATA_READ
] = 0x0c,
932 [REG_SWITCH_REVISION
] = 0x10,
933 [REG_PHY_REVISION
] = 0x14,
934 [REG_SPHY_CNTRL
] = 0x24,
935 [REG_RGMII_0_CNTRL
] = 0xe0,
936 [REG_RGMII_1_CNTRL
] = 0xec,
937 [REG_RGMII_2_CNTRL
] = 0xf8,
938 [REG_LED_0_CNTRL
] = 0x40,
939 [REG_LED_1_CNTRL
] = 0x4c,
940 [REG_LED_2_CNTRL
] = 0x58,
943 static const struct bcm_sf2_of_data bcm_sf2_7278_data
= {
944 .type
= BCM7278_DEVICE_ID
,
946 .reg_offsets
= bcm_sf2_7278_reg_offsets
,
947 .num_cfp_rules
= 128,
950 static const struct of_device_id bcm_sf2_of_match
[] = {
951 { .compatible
= "brcm,bcm7445-switch-v4.0",
952 .data
= &bcm_sf2_7445_data
954 { .compatible
= "brcm,bcm7278-switch-v4.0",
955 .data
= &bcm_sf2_7278_data
959 MODULE_DEVICE_TABLE(of
, bcm_sf2_of_match
);
961 static int bcm_sf2_sw_probe(struct platform_device
*pdev
)
963 const char *reg_names
[BCM_SF2_REGS_NUM
] = BCM_SF2_REGS_NAME
;
964 struct device_node
*dn
= pdev
->dev
.of_node
;
965 const struct of_device_id
*of_id
= NULL
;
966 const struct bcm_sf2_of_data
*data
;
967 struct b53_platform_data
*pdata
;
968 struct dsa_switch_ops
*ops
;
969 struct bcm_sf2_priv
*priv
;
970 struct b53_device
*dev
;
971 struct dsa_switch
*ds
;
978 priv
= devm_kzalloc(&pdev
->dev
, sizeof(*priv
), GFP_KERNEL
);
982 ops
= devm_kzalloc(&pdev
->dev
, sizeof(*ops
), GFP_KERNEL
);
986 dev
= b53_switch_alloc(&pdev
->dev
, &bcm_sf2_io_ops
, priv
);
990 pdata
= devm_kzalloc(&pdev
->dev
, sizeof(*pdata
), GFP_KERNEL
);
994 of_id
= of_match_node(bcm_sf2_of_match
, dn
);
995 if (!of_id
|| !of_id
->data
)
1000 /* Set SWITCH_REG register offsets and SWITCH_CORE align factor */
1001 priv
->type
= data
->type
;
1002 priv
->reg_offsets
= data
->reg_offsets
;
1003 priv
->core_reg_align
= data
->core_reg_align
;
1004 priv
->num_cfp_rules
= data
->num_cfp_rules
;
1006 /* Auto-detection using standard registers will not work, so
1007 * provide an indication of what kind of device we are for
1008 * b53_common to work with
1010 pdata
->chip_id
= priv
->type
;
1015 ds
->ops
= &bcm_sf2_ops
;
1017 /* Advertise the 8 egress queues */
1018 ds
->num_tx_queues
= SF2_NUM_EGRESS_QUEUES
;
1020 dev_set_drvdata(&pdev
->dev
, priv
);
1022 spin_lock_init(&priv
->indir_lock
);
1023 mutex_init(&priv
->stats_mutex
);
1024 mutex_init(&priv
->cfp
.lock
);
1026 /* CFP rule #0 cannot be used for specific classifications, flag it as
1029 set_bit(0, priv
->cfp
.used
);
1030 set_bit(0, priv
->cfp
.unique
);
1032 bcm_sf2_identify_ports(priv
, dn
->child
);
1034 priv
->irq0
= irq_of_parse_and_map(dn
, 0);
1035 priv
->irq1
= irq_of_parse_and_map(dn
, 1);
1038 for (i
= 0; i
< BCM_SF2_REGS_NUM
; i
++) {
1039 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, i
);
1040 *base
= devm_ioremap_resource(&pdev
->dev
, r
);
1041 if (IS_ERR(*base
)) {
1042 pr_err("unable to find register: %s\n", reg_names
[i
]);
1043 return PTR_ERR(*base
);
1048 ret
= bcm_sf2_sw_rst(priv
);
1050 pr_err("unable to software reset switch: %d\n", ret
);
1054 ret
= bcm_sf2_mdio_register(ds
);
1056 pr_err("failed to register MDIO bus\n");
1060 ret
= bcm_sf2_cfp_rst(priv
);
1062 pr_err("failed to reset CFP\n");
1066 /* Disable all interrupts and request them */
1067 bcm_sf2_intr_disable(priv
);
1069 ret
= devm_request_irq(&pdev
->dev
, priv
->irq0
, bcm_sf2_switch_0_isr
, 0,
1072 pr_err("failed to request switch_0 IRQ\n");
1076 ret
= devm_request_irq(&pdev
->dev
, priv
->irq1
, bcm_sf2_switch_1_isr
, 0,
1079 pr_err("failed to request switch_1 IRQ\n");
1083 /* Reset the MIB counters */
1084 reg
= core_readl(priv
, CORE_GMNCFGCFG
);
1086 core_writel(priv
, reg
, CORE_GMNCFGCFG
);
1087 reg
&= ~RST_MIB_CNT
;
1088 core_writel(priv
, reg
, CORE_GMNCFGCFG
);
1090 /* Get the maximum number of ports for this switch */
1091 priv
->hw_params
.num_ports
= core_readl(priv
, CORE_IMP0_PRT_ID
) + 1;
1092 if (priv
->hw_params
.num_ports
> DSA_MAX_PORTS
)
1093 priv
->hw_params
.num_ports
= DSA_MAX_PORTS
;
1095 /* Assume a single GPHY setup if we can't read that property */
1096 if (of_property_read_u32(dn
, "brcm,num-gphy",
1097 &priv
->hw_params
.num_gphy
))
1098 priv
->hw_params
.num_gphy
= 1;
1100 rev
= reg_readl(priv
, REG_SWITCH_REVISION
);
1101 priv
->hw_params
.top_rev
= (rev
>> SWITCH_TOP_REV_SHIFT
) &
1102 SWITCH_TOP_REV_MASK
;
1103 priv
->hw_params
.core_rev
= (rev
& SF2_REV_MASK
);
1105 rev
= reg_readl(priv
, REG_PHY_REVISION
);
1106 priv
->hw_params
.gphy_rev
= rev
& PHY_REVISION_MASK
;
1108 ret
= b53_switch_register(dev
);
1112 pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n",
1113 priv
->hw_params
.top_rev
>> 8, priv
->hw_params
.top_rev
& 0xff,
1114 priv
->hw_params
.core_rev
>> 8, priv
->hw_params
.core_rev
& 0xff,
1115 priv
->core
, priv
->irq0
, priv
->irq1
);
1120 bcm_sf2_mdio_unregister(priv
);
1124 static int bcm_sf2_sw_remove(struct platform_device
*pdev
)
1126 struct bcm_sf2_priv
*priv
= platform_get_drvdata(pdev
);
1128 /* Disable all ports and interrupts */
1129 priv
->wol_ports_mask
= 0;
1130 bcm_sf2_sw_suspend(priv
->dev
->ds
);
1131 dsa_unregister_switch(priv
->dev
->ds
);
1132 bcm_sf2_mdio_unregister(priv
);
1137 static void bcm_sf2_sw_shutdown(struct platform_device
*pdev
)
1139 struct bcm_sf2_priv
*priv
= platform_get_drvdata(pdev
);
1141 /* For a kernel about to be kexec'd we want to keep the GPHY on for a
1142 * successful MDIO bus scan to occur. If we did turn off the GPHY
1143 * before (e.g: port_disable), this will also power it back on.
1145 * Do not rely on kexec_in_progress, just power the PHY on.
1147 if (priv
->hw_params
.num_gphy
== 1)
1148 bcm_sf2_gphy_enable_set(priv
->dev
->ds
, true);
1151 #ifdef CONFIG_PM_SLEEP
1152 static int bcm_sf2_suspend(struct device
*dev
)
1154 struct platform_device
*pdev
= to_platform_device(dev
);
1155 struct bcm_sf2_priv
*priv
= platform_get_drvdata(pdev
);
1157 return dsa_switch_suspend(priv
->dev
->ds
);
1160 static int bcm_sf2_resume(struct device
*dev
)
1162 struct platform_device
*pdev
= to_platform_device(dev
);
1163 struct bcm_sf2_priv
*priv
= platform_get_drvdata(pdev
);
1165 return dsa_switch_resume(priv
->dev
->ds
);
1167 #endif /* CONFIG_PM_SLEEP */
1169 static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops
,
1170 bcm_sf2_suspend
, bcm_sf2_resume
);
1173 static struct platform_driver bcm_sf2_driver
= {
1174 .probe
= bcm_sf2_sw_probe
,
1175 .remove
= bcm_sf2_sw_remove
,
1176 .shutdown
= bcm_sf2_sw_shutdown
,
1179 .of_match_table
= bcm_sf2_of_match
,
1180 .pm
= &bcm_sf2_pm_ops
,
1183 module_platform_driver(bcm_sf2_driver
);
1185 MODULE_AUTHOR("Broadcom Corporation");
1186 MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip");
1187 MODULE_LICENSE("GPL");
1188 MODULE_ALIAS("platform:brcm-sf2");