1 // SPDX-License-Identifier: GPL-2.0
3 * Lantiq / Intel GSWIP switch driver for VRX200 SoCs
5 * Copyright (C) 2010 Lantiq Deutschland
6 * Copyright (C) 2012 John Crispin <john@phrozen.org>
7 * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de>
9 * The VLAN and bridge model the GSWIP hardware uses does not directly
10 * matches the model DSA uses.
12 * The hardware has 64 possible table entries for bridges with one VLAN
13 * ID, one flow id and a list of ports for each bridge. All entries which
14 * match the same flow ID are combined in the mac learning table, they
15 * act as one global bridge.
16 * The hardware does not support VLAN filter on the port, but on the
17 * bridge, this driver converts the DSA model to the hardware.
19 * The CPU gets all the exception frames which do not match any forwarding
20 * rule and the CPU port is also added to all bridges. This makes it possible
21 * to handle all the special cases easily in software.
22 * At the initialization the driver allocates one bridge table entry for
23 * each switch port which is used when the port is used without an
24 * explicit bridge. This prevents the frames from being forwarded
25 * between all LAN ports by default.
28 #include <linux/clk.h>
29 #include <linux/etherdevice.h>
30 #include <linux/firmware.h>
31 #include <linux/if_bridge.h>
32 #include <linux/if_vlan.h>
33 #include <linux/iopoll.h>
34 #include <linux/mfd/syscon.h>
35 #include <linux/module.h>
36 #include <linux/of_mdio.h>
37 #include <linux/of_net.h>
38 #include <linux/of_platform.h>
39 #include <linux/phy.h>
40 #include <linux/phylink.h>
41 #include <linux/platform_device.h>
42 #include <linux/regmap.h>
43 #include <linux/reset.h>
45 #include <dt-bindings/mips/lantiq_rcu_gphy.h>
47 #include "lantiq_pce.h"
49 /* GSWIP MDIO Registers */
50 #define GSWIP_MDIO_GLOB 0x00
51 #define GSWIP_MDIO_GLOB_ENABLE BIT(15)
52 #define GSWIP_MDIO_CTRL 0x08
53 #define GSWIP_MDIO_CTRL_BUSY BIT(12)
54 #define GSWIP_MDIO_CTRL_RD BIT(11)
55 #define GSWIP_MDIO_CTRL_WR BIT(10)
56 #define GSWIP_MDIO_CTRL_PHYAD_MASK 0x1f
57 #define GSWIP_MDIO_CTRL_PHYAD_SHIFT 5
58 #define GSWIP_MDIO_CTRL_REGAD_MASK 0x1f
59 #define GSWIP_MDIO_READ 0x09
60 #define GSWIP_MDIO_WRITE 0x0A
61 #define GSWIP_MDIO_MDC_CFG0 0x0B
62 #define GSWIP_MDIO_MDC_CFG1 0x0C
63 #define GSWIP_MDIO_PHYp(p) (0x15 - (p))
64 #define GSWIP_MDIO_PHY_LINK_MASK 0x6000
65 #define GSWIP_MDIO_PHY_LINK_AUTO 0x0000
66 #define GSWIP_MDIO_PHY_LINK_DOWN 0x4000
67 #define GSWIP_MDIO_PHY_LINK_UP 0x2000
68 #define GSWIP_MDIO_PHY_SPEED_MASK 0x1800
69 #define GSWIP_MDIO_PHY_SPEED_AUTO 0x1800
70 #define GSWIP_MDIO_PHY_SPEED_M10 0x0000
71 #define GSWIP_MDIO_PHY_SPEED_M100 0x0800
72 #define GSWIP_MDIO_PHY_SPEED_G1 0x1000
73 #define GSWIP_MDIO_PHY_FDUP_MASK 0x0600
74 #define GSWIP_MDIO_PHY_FDUP_AUTO 0x0000
75 #define GSWIP_MDIO_PHY_FDUP_EN 0x0200
76 #define GSWIP_MDIO_PHY_FDUP_DIS 0x0600
77 #define GSWIP_MDIO_PHY_FCONTX_MASK 0x0180
78 #define GSWIP_MDIO_PHY_FCONTX_AUTO 0x0000
79 #define GSWIP_MDIO_PHY_FCONTX_EN 0x0100
80 #define GSWIP_MDIO_PHY_FCONTX_DIS 0x0180
81 #define GSWIP_MDIO_PHY_FCONRX_MASK 0x0060
82 #define GSWIP_MDIO_PHY_FCONRX_AUTO 0x0000
83 #define GSWIP_MDIO_PHY_FCONRX_EN 0x0020
84 #define GSWIP_MDIO_PHY_FCONRX_DIS 0x0060
85 #define GSWIP_MDIO_PHY_ADDR_MASK 0x001f
86 #define GSWIP_MDIO_PHY_MASK (GSWIP_MDIO_PHY_ADDR_MASK | \
87 GSWIP_MDIO_PHY_FCONRX_MASK | \
88 GSWIP_MDIO_PHY_FCONTX_MASK | \
89 GSWIP_MDIO_PHY_LINK_MASK | \
90 GSWIP_MDIO_PHY_SPEED_MASK | \
91 GSWIP_MDIO_PHY_FDUP_MASK)
93 /* GSWIP MII Registers */
94 #define GSWIP_MII_CFG0 0x00
95 #define GSWIP_MII_CFG1 0x02
96 #define GSWIP_MII_CFG5 0x04
97 #define GSWIP_MII_CFG_EN BIT(14)
98 #define GSWIP_MII_CFG_LDCLKDIS BIT(12)
99 #define GSWIP_MII_CFG_MODE_MIIP 0x0
100 #define GSWIP_MII_CFG_MODE_MIIM 0x1
101 #define GSWIP_MII_CFG_MODE_RMIIP 0x2
102 #define GSWIP_MII_CFG_MODE_RMIIM 0x3
103 #define GSWIP_MII_CFG_MODE_RGMII 0x4
104 #define GSWIP_MII_CFG_MODE_MASK 0xf
105 #define GSWIP_MII_CFG_RATE_M2P5 0x00
106 #define GSWIP_MII_CFG_RATE_M25 0x10
107 #define GSWIP_MII_CFG_RATE_M125 0x20
108 #define GSWIP_MII_CFG_RATE_M50 0x30
109 #define GSWIP_MII_CFG_RATE_AUTO 0x40
110 #define GSWIP_MII_CFG_RATE_MASK 0x70
111 #define GSWIP_MII_PCDU0 0x01
112 #define GSWIP_MII_PCDU1 0x03
113 #define GSWIP_MII_PCDU5 0x05
114 #define GSWIP_MII_PCDU_TXDLY_MASK GENMASK(2, 0)
115 #define GSWIP_MII_PCDU_RXDLY_MASK GENMASK(9, 7)
117 /* GSWIP Core Registers */
118 #define GSWIP_SWRES 0x000
119 #define GSWIP_SWRES_R1 BIT(1) /* GSWIP Software reset */
120 #define GSWIP_SWRES_R0 BIT(0) /* GSWIP Hardware reset */
121 #define GSWIP_VERSION 0x013
122 #define GSWIP_VERSION_REV_SHIFT 0
123 #define GSWIP_VERSION_REV_MASK GENMASK(7, 0)
124 #define GSWIP_VERSION_MOD_SHIFT 8
125 #define GSWIP_VERSION_MOD_MASK GENMASK(15, 8)
126 #define GSWIP_VERSION_2_0 0x100
127 #define GSWIP_VERSION_2_1 0x021
128 #define GSWIP_VERSION_2_2 0x122
129 #define GSWIP_VERSION_2_2_ETC 0x022
131 #define GSWIP_BM_RAM_VAL(x) (0x043 - (x))
132 #define GSWIP_BM_RAM_ADDR 0x044
133 #define GSWIP_BM_RAM_CTRL 0x045
134 #define GSWIP_BM_RAM_CTRL_BAS BIT(15)
135 #define GSWIP_BM_RAM_CTRL_OPMOD BIT(5)
136 #define GSWIP_BM_RAM_CTRL_ADDR_MASK GENMASK(4, 0)
137 #define GSWIP_BM_QUEUE_GCTRL 0x04A
138 #define GSWIP_BM_QUEUE_GCTRL_GL_MOD BIT(10)
139 /* buffer management Port Configuration Register */
140 #define GSWIP_BM_PCFGp(p) (0x080 + ((p) * 2))
141 #define GSWIP_BM_PCFG_CNTEN BIT(0) /* RMON Counter Enable */
142 #define GSWIP_BM_PCFG_IGCNT BIT(1) /* Ingres Special Tag RMON count */
143 /* buffer management Port Control Register */
144 #define GSWIP_BM_RMON_CTRLp(p) (0x81 + ((p) * 2))
145 #define GSWIP_BM_CTRL_RMON_RAM1_RES BIT(0) /* Software Reset for RMON RAM 1 */
146 #define GSWIP_BM_CTRL_RMON_RAM2_RES BIT(1) /* Software Reset for RMON RAM 2 */
149 #define GSWIP_PCE_TBL_KEY(x) (0x447 - (x))
150 #define GSWIP_PCE_TBL_MASK 0x448
151 #define GSWIP_PCE_TBL_VAL(x) (0x44D - (x))
152 #define GSWIP_PCE_TBL_ADDR 0x44E
153 #define GSWIP_PCE_TBL_CTRL 0x44F
154 #define GSWIP_PCE_TBL_CTRL_BAS BIT(15)
155 #define GSWIP_PCE_TBL_CTRL_TYPE BIT(13)
156 #define GSWIP_PCE_TBL_CTRL_VLD BIT(12)
157 #define GSWIP_PCE_TBL_CTRL_KEYFORM BIT(11)
158 #define GSWIP_PCE_TBL_CTRL_GMAP_MASK GENMASK(10, 7)
159 #define GSWIP_PCE_TBL_CTRL_OPMOD_MASK GENMASK(6, 5)
160 #define GSWIP_PCE_TBL_CTRL_OPMOD_ADRD 0x00
161 #define GSWIP_PCE_TBL_CTRL_OPMOD_ADWR 0x20
162 #define GSWIP_PCE_TBL_CTRL_OPMOD_KSRD 0x40
163 #define GSWIP_PCE_TBL_CTRL_OPMOD_KSWR 0x60
164 #define GSWIP_PCE_TBL_CTRL_ADDR_MASK GENMASK(4, 0)
165 #define GSWIP_PCE_PMAP1 0x453 /* Monitoring port map */
166 #define GSWIP_PCE_PMAP2 0x454 /* Default Multicast port map */
167 #define GSWIP_PCE_PMAP3 0x455 /* Default Unknown Unicast port map */
168 #define GSWIP_PCE_GCTRL_0 0x456
169 #define GSWIP_PCE_GCTRL_0_MTFL BIT(0) /* MAC Table Flushing */
170 #define GSWIP_PCE_GCTRL_0_MC_VALID BIT(3)
171 #define GSWIP_PCE_GCTRL_0_VLAN BIT(14) /* VLAN aware Switching */
172 #define GSWIP_PCE_GCTRL_1 0x457
173 #define GSWIP_PCE_GCTRL_1_MAC_GLOCK BIT(2) /* MAC Address table lock */
174 #define GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD BIT(3) /* Mac address table lock forwarding mode */
175 #define GSWIP_PCE_PCTRL_0p(p) (0x480 + ((p) * 0xA))
176 #define GSWIP_PCE_PCTRL_0_TVM BIT(5) /* Transparent VLAN mode */
177 #define GSWIP_PCE_PCTRL_0_VREP BIT(6) /* VLAN Replace Mode */
178 #define GSWIP_PCE_PCTRL_0_INGRESS BIT(11) /* Accept special tag in ingress */
179 #define GSWIP_PCE_PCTRL_0_PSTATE_LISTEN 0x0
180 #define GSWIP_PCE_PCTRL_0_PSTATE_RX 0x1
181 #define GSWIP_PCE_PCTRL_0_PSTATE_TX 0x2
182 #define GSWIP_PCE_PCTRL_0_PSTATE_LEARNING 0x3
183 #define GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING 0x7
184 #define GSWIP_PCE_PCTRL_0_PSTATE_MASK GENMASK(2, 0)
185 #define GSWIP_PCE_VCTRL(p) (0x485 + ((p) * 0xA))
186 #define GSWIP_PCE_VCTRL_UVR BIT(0) /* Unknown VLAN Rule */
187 #define GSWIP_PCE_VCTRL_VIMR BIT(3) /* VLAN Ingress Member violation rule */
188 #define GSWIP_PCE_VCTRL_VEMR BIT(4) /* VLAN Egress Member violation rule */
189 #define GSWIP_PCE_VCTRL_VSR BIT(5) /* VLAN Security */
190 #define GSWIP_PCE_VCTRL_VID0 BIT(6) /* Priority Tagged Rule */
191 #define GSWIP_PCE_DEFPVID(p) (0x486 + ((p) * 0xA))
193 #define GSWIP_MAC_FLEN 0x8C5
194 #define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC))
195 #define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */
197 /* Ethernet Switch Fetch DMA Port Control Register */
198 #define GSWIP_FDMA_PCTRLp(p) (0xA80 + ((p) * 0x6))
199 #define GSWIP_FDMA_PCTRL_EN BIT(0) /* FDMA Port Enable */
200 #define GSWIP_FDMA_PCTRL_STEN BIT(1) /* Special Tag Insertion Enable */
201 #define GSWIP_FDMA_PCTRL_VLANMOD_MASK GENMASK(4, 3) /* VLAN Modification Control */
202 #define GSWIP_FDMA_PCTRL_VLANMOD_SHIFT 3 /* VLAN Modification Control */
203 #define GSWIP_FDMA_PCTRL_VLANMOD_DIS (0x0 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
204 #define GSWIP_FDMA_PCTRL_VLANMOD_PRIO (0x1 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
205 #define GSWIP_FDMA_PCTRL_VLANMOD_ID (0x2 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
206 #define GSWIP_FDMA_PCTRL_VLANMOD_BOTH (0x3 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
208 /* Ethernet Switch Store DMA Port Control Register */
209 #define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6))
210 #define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */
211 #define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
212 #define GSWIP_SDMA_PCTRL_PAUFWD BIT(1) /* Pause Frame Forwarding */
214 #define GSWIP_TABLE_ACTIVE_VLAN 0x01
215 #define GSWIP_TABLE_VLAN_MAPPING 0x02
216 #define GSWIP_TABLE_MAC_BRIDGE 0x0b
217 #define GSWIP_TABLE_MAC_BRIDGE_STATIC 0x01 /* Static not, aging entry */
219 #define XRX200_GPHY_FW_ALIGN (16 * 1024)
221 struct gswip_hw_info
{
226 struct xway_gphy_match_data
{
227 char *fe_firmware_name
;
228 char *ge_firmware_name
;
231 struct gswip_gphy_fw
{
232 struct clk
*clk_gate
;
233 struct reset_control
*reset
;
239 struct net_device
*bridge
;
248 const struct gswip_hw_info
*hw_info
;
249 const struct xway_gphy_match_data
*gphy_fw_name_cfg
;
250 struct dsa_switch
*ds
;
252 struct regmap
*rcu_regmap
;
253 struct gswip_vlan vlans
[64];
255 struct gswip_gphy_fw
*gphy_fw
;
256 u32 port_vlan_filter
;
259 struct gswip_pce_table_entry
{
260 u16 index
; // PCE_TBL_ADDR.ADDR = pData->table_index
261 u16 table
; // PCE_TBL_CTRL.ADDR = pData->table
271 struct gswip_rmon_cnt_desc
{
277 #define MIB_DESC(_size, _offset, _name) {.size = _size, .offset = _offset, .name = _name}
279 static const struct gswip_rmon_cnt_desc gswip_rmon_cnt
[] = {
280 /** Receive Packet Count (only packets that are accepted and not discarded). */
281 MIB_DESC(1, 0x1F, "RxGoodPkts"),
282 MIB_DESC(1, 0x23, "RxUnicastPkts"),
283 MIB_DESC(1, 0x22, "RxMulticastPkts"),
284 MIB_DESC(1, 0x21, "RxFCSErrorPkts"),
285 MIB_DESC(1, 0x1D, "RxUnderSizeGoodPkts"),
286 MIB_DESC(1, 0x1E, "RxUnderSizeErrorPkts"),
287 MIB_DESC(1, 0x1B, "RxOversizeGoodPkts"),
288 MIB_DESC(1, 0x1C, "RxOversizeErrorPkts"),
289 MIB_DESC(1, 0x20, "RxGoodPausePkts"),
290 MIB_DESC(1, 0x1A, "RxAlignErrorPkts"),
291 MIB_DESC(1, 0x12, "Rx64BytePkts"),
292 MIB_DESC(1, 0x13, "Rx127BytePkts"),
293 MIB_DESC(1, 0x14, "Rx255BytePkts"),
294 MIB_DESC(1, 0x15, "Rx511BytePkts"),
295 MIB_DESC(1, 0x16, "Rx1023BytePkts"),
296 /** Receive Size 1024-1522 (or more, if configured) Packet Count. */
297 MIB_DESC(1, 0x17, "RxMaxBytePkts"),
298 MIB_DESC(1, 0x18, "RxDroppedPkts"),
299 MIB_DESC(1, 0x19, "RxFilteredPkts"),
300 MIB_DESC(2, 0x24, "RxGoodBytes"),
301 MIB_DESC(2, 0x26, "RxBadBytes"),
302 MIB_DESC(1, 0x11, "TxAcmDroppedPkts"),
303 MIB_DESC(1, 0x0C, "TxGoodPkts"),
304 MIB_DESC(1, 0x06, "TxUnicastPkts"),
305 MIB_DESC(1, 0x07, "TxMulticastPkts"),
306 MIB_DESC(1, 0x00, "Tx64BytePkts"),
307 MIB_DESC(1, 0x01, "Tx127BytePkts"),
308 MIB_DESC(1, 0x02, "Tx255BytePkts"),
309 MIB_DESC(1, 0x03, "Tx511BytePkts"),
310 MIB_DESC(1, 0x04, "Tx1023BytePkts"),
311 /** Transmit Size 1024-1522 (or more, if configured) Packet Count. */
312 MIB_DESC(1, 0x05, "TxMaxBytePkts"),
313 MIB_DESC(1, 0x08, "TxSingleCollCount"),
314 MIB_DESC(1, 0x09, "TxMultCollCount"),
315 MIB_DESC(1, 0x0A, "TxLateCollCount"),
316 MIB_DESC(1, 0x0B, "TxExcessCollCount"),
317 MIB_DESC(1, 0x0D, "TxPauseCount"),
318 MIB_DESC(1, 0x10, "TxDroppedPkts"),
319 MIB_DESC(2, 0x0E, "TxGoodBytes"),
322 static u32
gswip_switch_r(struct gswip_priv
*priv
, u32 offset
)
324 return __raw_readl(priv
->gswip
+ (offset
* 4));
327 static void gswip_switch_w(struct gswip_priv
*priv
, u32 val
, u32 offset
)
329 __raw_writel(val
, priv
->gswip
+ (offset
* 4));
332 static void gswip_switch_mask(struct gswip_priv
*priv
, u32 clear
, u32 set
,
335 u32 val
= gswip_switch_r(priv
, offset
);
339 gswip_switch_w(priv
, val
, offset
);
342 static u32
gswip_switch_r_timeout(struct gswip_priv
*priv
, u32 offset
,
347 return readx_poll_timeout(__raw_readl
, priv
->gswip
+ (offset
* 4), val
,
348 (val
& cleared
) == 0, 20, 50000);
351 static u32
gswip_mdio_r(struct gswip_priv
*priv
, u32 offset
)
353 return __raw_readl(priv
->mdio
+ (offset
* 4));
356 static void gswip_mdio_w(struct gswip_priv
*priv
, u32 val
, u32 offset
)
358 __raw_writel(val
, priv
->mdio
+ (offset
* 4));
361 static void gswip_mdio_mask(struct gswip_priv
*priv
, u32 clear
, u32 set
,
364 u32 val
= gswip_mdio_r(priv
, offset
);
368 gswip_mdio_w(priv
, val
, offset
);
371 static u32
gswip_mii_r(struct gswip_priv
*priv
, u32 offset
)
373 return __raw_readl(priv
->mii
+ (offset
* 4));
376 static void gswip_mii_w(struct gswip_priv
*priv
, u32 val
, u32 offset
)
378 __raw_writel(val
, priv
->mii
+ (offset
* 4));
381 static void gswip_mii_mask(struct gswip_priv
*priv
, u32 clear
, u32 set
,
384 u32 val
= gswip_mii_r(priv
, offset
);
388 gswip_mii_w(priv
, val
, offset
);
391 static void gswip_mii_mask_cfg(struct gswip_priv
*priv
, u32 clear
, u32 set
,
396 gswip_mii_mask(priv
, clear
, set
, GSWIP_MII_CFG0
);
399 gswip_mii_mask(priv
, clear
, set
, GSWIP_MII_CFG1
);
402 gswip_mii_mask(priv
, clear
, set
, GSWIP_MII_CFG5
);
407 static void gswip_mii_mask_pcdu(struct gswip_priv
*priv
, u32 clear
, u32 set
,
412 gswip_mii_mask(priv
, clear
, set
, GSWIP_MII_PCDU0
);
415 gswip_mii_mask(priv
, clear
, set
, GSWIP_MII_PCDU1
);
418 gswip_mii_mask(priv
, clear
, set
, GSWIP_MII_PCDU5
);
423 static int gswip_mdio_poll(struct gswip_priv
*priv
)
427 while (likely(cnt
--)) {
428 u32 ctrl
= gswip_mdio_r(priv
, GSWIP_MDIO_CTRL
);
430 if ((ctrl
& GSWIP_MDIO_CTRL_BUSY
) == 0)
432 usleep_range(20, 40);
438 static int gswip_mdio_wr(struct mii_bus
*bus
, int addr
, int reg
, u16 val
)
440 struct gswip_priv
*priv
= bus
->priv
;
443 err
= gswip_mdio_poll(priv
);
445 dev_err(&bus
->dev
, "waiting for MDIO bus busy timed out\n");
449 gswip_mdio_w(priv
, val
, GSWIP_MDIO_WRITE
);
450 gswip_mdio_w(priv
, GSWIP_MDIO_CTRL_BUSY
| GSWIP_MDIO_CTRL_WR
|
451 ((addr
& GSWIP_MDIO_CTRL_PHYAD_MASK
) << GSWIP_MDIO_CTRL_PHYAD_SHIFT
) |
452 (reg
& GSWIP_MDIO_CTRL_REGAD_MASK
),
458 static int gswip_mdio_rd(struct mii_bus
*bus
, int addr
, int reg
)
460 struct gswip_priv
*priv
= bus
->priv
;
463 err
= gswip_mdio_poll(priv
);
465 dev_err(&bus
->dev
, "waiting for MDIO bus busy timed out\n");
469 gswip_mdio_w(priv
, GSWIP_MDIO_CTRL_BUSY
| GSWIP_MDIO_CTRL_RD
|
470 ((addr
& GSWIP_MDIO_CTRL_PHYAD_MASK
) << GSWIP_MDIO_CTRL_PHYAD_SHIFT
) |
471 (reg
& GSWIP_MDIO_CTRL_REGAD_MASK
),
474 err
= gswip_mdio_poll(priv
);
476 dev_err(&bus
->dev
, "waiting for MDIO bus busy timed out\n");
480 return gswip_mdio_r(priv
, GSWIP_MDIO_READ
);
483 static int gswip_mdio(struct gswip_priv
*priv
, struct device_node
*mdio_np
)
485 struct dsa_switch
*ds
= priv
->ds
;
487 ds
->slave_mii_bus
= devm_mdiobus_alloc(priv
->dev
);
488 if (!ds
->slave_mii_bus
)
491 ds
->slave_mii_bus
->priv
= priv
;
492 ds
->slave_mii_bus
->read
= gswip_mdio_rd
;
493 ds
->slave_mii_bus
->write
= gswip_mdio_wr
;
494 ds
->slave_mii_bus
->name
= "lantiq,xrx200-mdio";
495 snprintf(ds
->slave_mii_bus
->id
, MII_BUS_ID_SIZE
, "%s-mii",
496 dev_name(priv
->dev
));
497 ds
->slave_mii_bus
->parent
= priv
->dev
;
498 ds
->slave_mii_bus
->phy_mask
= ~ds
->phys_mii_mask
;
500 return of_mdiobus_register(ds
->slave_mii_bus
, mdio_np
);
503 static int gswip_pce_table_entry_read(struct gswip_priv
*priv
,
504 struct gswip_pce_table_entry
*tbl
)
509 u16 addr_mode
= tbl
->key_mode
? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD
:
510 GSWIP_PCE_TBL_CTRL_OPMOD_ADRD
;
512 err
= gswip_switch_r_timeout(priv
, GSWIP_PCE_TBL_CTRL
,
513 GSWIP_PCE_TBL_CTRL_BAS
);
517 gswip_switch_w(priv
, tbl
->index
, GSWIP_PCE_TBL_ADDR
);
518 gswip_switch_mask(priv
, GSWIP_PCE_TBL_CTRL_ADDR_MASK
|
519 GSWIP_PCE_TBL_CTRL_OPMOD_MASK
,
520 tbl
->table
| addr_mode
| GSWIP_PCE_TBL_CTRL_BAS
,
523 err
= gswip_switch_r_timeout(priv
, GSWIP_PCE_TBL_CTRL
,
524 GSWIP_PCE_TBL_CTRL_BAS
);
528 for (i
= 0; i
< ARRAY_SIZE(tbl
->key
); i
++)
529 tbl
->key
[i
] = gswip_switch_r(priv
, GSWIP_PCE_TBL_KEY(i
));
531 for (i
= 0; i
< ARRAY_SIZE(tbl
->val
); i
++)
532 tbl
->val
[i
] = gswip_switch_r(priv
, GSWIP_PCE_TBL_VAL(i
));
534 tbl
->mask
= gswip_switch_r(priv
, GSWIP_PCE_TBL_MASK
);
536 crtl
= gswip_switch_r(priv
, GSWIP_PCE_TBL_CTRL
);
538 tbl
->type
= !!(crtl
& GSWIP_PCE_TBL_CTRL_TYPE
);
539 tbl
->valid
= !!(crtl
& GSWIP_PCE_TBL_CTRL_VLD
);
540 tbl
->gmap
= (crtl
& GSWIP_PCE_TBL_CTRL_GMAP_MASK
) >> 7;
545 static int gswip_pce_table_entry_write(struct gswip_priv
*priv
,
546 struct gswip_pce_table_entry
*tbl
)
551 u16 addr_mode
= tbl
->key_mode
? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR
:
552 GSWIP_PCE_TBL_CTRL_OPMOD_ADWR
;
554 err
= gswip_switch_r_timeout(priv
, GSWIP_PCE_TBL_CTRL
,
555 GSWIP_PCE_TBL_CTRL_BAS
);
559 gswip_switch_w(priv
, tbl
->index
, GSWIP_PCE_TBL_ADDR
);
560 gswip_switch_mask(priv
, GSWIP_PCE_TBL_CTRL_ADDR_MASK
|
561 GSWIP_PCE_TBL_CTRL_OPMOD_MASK
,
562 tbl
->table
| addr_mode
,
565 for (i
= 0; i
< ARRAY_SIZE(tbl
->key
); i
++)
566 gswip_switch_w(priv
, tbl
->key
[i
], GSWIP_PCE_TBL_KEY(i
));
568 for (i
= 0; i
< ARRAY_SIZE(tbl
->val
); i
++)
569 gswip_switch_w(priv
, tbl
->val
[i
], GSWIP_PCE_TBL_VAL(i
));
571 gswip_switch_mask(priv
, GSWIP_PCE_TBL_CTRL_ADDR_MASK
|
572 GSWIP_PCE_TBL_CTRL_OPMOD_MASK
,
573 tbl
->table
| addr_mode
,
576 gswip_switch_w(priv
, tbl
->mask
, GSWIP_PCE_TBL_MASK
);
578 crtl
= gswip_switch_r(priv
, GSWIP_PCE_TBL_CTRL
);
579 crtl
&= ~(GSWIP_PCE_TBL_CTRL_TYPE
| GSWIP_PCE_TBL_CTRL_VLD
|
580 GSWIP_PCE_TBL_CTRL_GMAP_MASK
);
582 crtl
|= GSWIP_PCE_TBL_CTRL_TYPE
;
584 crtl
|= GSWIP_PCE_TBL_CTRL_VLD
;
585 crtl
|= (tbl
->gmap
<< 7) & GSWIP_PCE_TBL_CTRL_GMAP_MASK
;
586 crtl
|= GSWIP_PCE_TBL_CTRL_BAS
;
587 gswip_switch_w(priv
, crtl
, GSWIP_PCE_TBL_CTRL
);
589 return gswip_switch_r_timeout(priv
, GSWIP_PCE_TBL_CTRL
,
590 GSWIP_PCE_TBL_CTRL_BAS
);
593 /* Add the LAN port into a bridge with the CPU port by
594 * default. This prevents automatic forwarding of
595 * packages between the LAN ports when no explicit
596 * bridge is configured.
598 static int gswip_add_single_port_br(struct gswip_priv
*priv
, int port
, bool add
)
600 struct gswip_pce_table_entry vlan_active
= {0,};
601 struct gswip_pce_table_entry vlan_mapping
= {0,};
602 unsigned int cpu_port
= priv
->hw_info
->cpu_port
;
603 unsigned int max_ports
= priv
->hw_info
->max_ports
;
606 if (port
>= max_ports
) {
607 dev_err(priv
->dev
, "single port for %i supported\n", port
);
611 vlan_active
.index
= port
+ 1;
612 vlan_active
.table
= GSWIP_TABLE_ACTIVE_VLAN
;
613 vlan_active
.key
[0] = 0; /* vid */
614 vlan_active
.val
[0] = port
+ 1 /* fid */;
615 vlan_active
.valid
= add
;
616 err
= gswip_pce_table_entry_write(priv
, &vlan_active
);
618 dev_err(priv
->dev
, "failed to write active VLAN: %d\n", err
);
625 vlan_mapping
.index
= port
+ 1;
626 vlan_mapping
.table
= GSWIP_TABLE_VLAN_MAPPING
;
627 vlan_mapping
.val
[0] = 0 /* vid */;
628 vlan_mapping
.val
[1] = BIT(port
) | BIT(cpu_port
);
629 vlan_mapping
.val
[2] = 0;
630 err
= gswip_pce_table_entry_write(priv
, &vlan_mapping
);
632 dev_err(priv
->dev
, "failed to write VLAN mapping: %d\n", err
);
639 static int gswip_port_enable(struct dsa_switch
*ds
, int port
,
640 struct phy_device
*phydev
)
642 struct gswip_priv
*priv
= ds
->priv
;
645 if (!dsa_is_user_port(ds
, port
))
648 if (!dsa_is_cpu_port(ds
, port
)) {
649 err
= gswip_add_single_port_br(priv
, port
, true);
654 /* RMON Counter Enable for port */
655 gswip_switch_w(priv
, GSWIP_BM_PCFG_CNTEN
, GSWIP_BM_PCFGp(port
));
657 /* enable port fetch/store dma & VLAN Modification */
658 gswip_switch_mask(priv
, 0, GSWIP_FDMA_PCTRL_EN
|
659 GSWIP_FDMA_PCTRL_VLANMOD_BOTH
,
660 GSWIP_FDMA_PCTRLp(port
));
661 gswip_switch_mask(priv
, 0, GSWIP_SDMA_PCTRL_EN
,
662 GSWIP_SDMA_PCTRLp(port
));
664 if (!dsa_is_cpu_port(ds
, port
)) {
665 u32 macconf
= GSWIP_MDIO_PHY_LINK_AUTO
|
666 GSWIP_MDIO_PHY_SPEED_AUTO
|
667 GSWIP_MDIO_PHY_FDUP_AUTO
|
668 GSWIP_MDIO_PHY_FCONTX_AUTO
|
669 GSWIP_MDIO_PHY_FCONRX_AUTO
|
670 (phydev
->mdio
.addr
& GSWIP_MDIO_PHY_ADDR_MASK
);
672 gswip_mdio_w(priv
, macconf
, GSWIP_MDIO_PHYp(port
));
673 /* Activate MDIO auto polling */
674 gswip_mdio_mask(priv
, 0, BIT(port
), GSWIP_MDIO_MDC_CFG0
);
680 static void gswip_port_disable(struct dsa_switch
*ds
, int port
)
682 struct gswip_priv
*priv
= ds
->priv
;
684 if (!dsa_is_user_port(ds
, port
))
687 if (!dsa_is_cpu_port(ds
, port
)) {
688 gswip_mdio_mask(priv
, GSWIP_MDIO_PHY_LINK_DOWN
,
689 GSWIP_MDIO_PHY_LINK_MASK
,
690 GSWIP_MDIO_PHYp(port
));
691 /* Deactivate MDIO auto polling */
692 gswip_mdio_mask(priv
, BIT(port
), 0, GSWIP_MDIO_MDC_CFG0
);
695 gswip_switch_mask(priv
, GSWIP_FDMA_PCTRL_EN
, 0,
696 GSWIP_FDMA_PCTRLp(port
));
697 gswip_switch_mask(priv
, GSWIP_SDMA_PCTRL_EN
, 0,
698 GSWIP_SDMA_PCTRLp(port
));
701 static int gswip_pce_load_microcode(struct gswip_priv
*priv
)
706 gswip_switch_mask(priv
, GSWIP_PCE_TBL_CTRL_ADDR_MASK
|
707 GSWIP_PCE_TBL_CTRL_OPMOD_MASK
,
708 GSWIP_PCE_TBL_CTRL_OPMOD_ADWR
, GSWIP_PCE_TBL_CTRL
);
709 gswip_switch_w(priv
, 0, GSWIP_PCE_TBL_MASK
);
711 for (i
= 0; i
< ARRAY_SIZE(gswip_pce_microcode
); i
++) {
712 gswip_switch_w(priv
, i
, GSWIP_PCE_TBL_ADDR
);
713 gswip_switch_w(priv
, gswip_pce_microcode
[i
].val_0
,
714 GSWIP_PCE_TBL_VAL(0));
715 gswip_switch_w(priv
, gswip_pce_microcode
[i
].val_1
,
716 GSWIP_PCE_TBL_VAL(1));
717 gswip_switch_w(priv
, gswip_pce_microcode
[i
].val_2
,
718 GSWIP_PCE_TBL_VAL(2));
719 gswip_switch_w(priv
, gswip_pce_microcode
[i
].val_3
,
720 GSWIP_PCE_TBL_VAL(3));
722 /* start the table access: */
723 gswip_switch_mask(priv
, 0, GSWIP_PCE_TBL_CTRL_BAS
,
725 err
= gswip_switch_r_timeout(priv
, GSWIP_PCE_TBL_CTRL
,
726 GSWIP_PCE_TBL_CTRL_BAS
);
731 /* tell the switch that the microcode is loaded */
732 gswip_switch_mask(priv
, 0, GSWIP_PCE_GCTRL_0_MC_VALID
,
738 static int gswip_port_vlan_filtering(struct dsa_switch
*ds
, int port
,
741 struct gswip_priv
*priv
= ds
->priv
;
742 struct net_device
*bridge
= dsa_to_port(ds
, port
)->bridge_dev
;
744 /* Do not allow changing the VLAN filtering options while in bridge */
745 if (!!(priv
->port_vlan_filter
& BIT(port
)) != vlan_filtering
&& bridge
)
748 if (vlan_filtering
) {
749 /* Use port based VLAN tag */
750 gswip_switch_mask(priv
,
752 GSWIP_PCE_VCTRL_UVR
| GSWIP_PCE_VCTRL_VIMR
|
753 GSWIP_PCE_VCTRL_VEMR
,
754 GSWIP_PCE_VCTRL(port
));
755 gswip_switch_mask(priv
, GSWIP_PCE_PCTRL_0_TVM
, 0,
756 GSWIP_PCE_PCTRL_0p(port
));
758 /* Use port based VLAN tag */
759 gswip_switch_mask(priv
,
760 GSWIP_PCE_VCTRL_UVR
| GSWIP_PCE_VCTRL_VIMR
|
761 GSWIP_PCE_VCTRL_VEMR
,
763 GSWIP_PCE_VCTRL(port
));
764 gswip_switch_mask(priv
, 0, GSWIP_PCE_PCTRL_0_TVM
,
765 GSWIP_PCE_PCTRL_0p(port
));
771 static int gswip_setup(struct dsa_switch
*ds
)
773 struct gswip_priv
*priv
= ds
->priv
;
774 unsigned int cpu_port
= priv
->hw_info
->cpu_port
;
778 gswip_switch_w(priv
, GSWIP_SWRES_R0
, GSWIP_SWRES
);
779 usleep_range(5000, 10000);
780 gswip_switch_w(priv
, 0, GSWIP_SWRES
);
782 /* disable port fetch/store dma on all ports */
783 for (i
= 0; i
< priv
->hw_info
->max_ports
; i
++) {
784 gswip_port_disable(ds
, i
);
785 gswip_port_vlan_filtering(ds
, i
, false);
789 gswip_mdio_mask(priv
, 0, GSWIP_MDIO_GLOB_ENABLE
, GSWIP_MDIO_GLOB
);
791 err
= gswip_pce_load_microcode(priv
);
793 dev_err(priv
->dev
, "writing PCE microcode failed, %i", err
);
797 /* Default unknown Broadcast/Multicast/Unicast port maps */
798 gswip_switch_w(priv
, BIT(cpu_port
), GSWIP_PCE_PMAP1
);
799 gswip_switch_w(priv
, BIT(cpu_port
), GSWIP_PCE_PMAP2
);
800 gswip_switch_w(priv
, BIT(cpu_port
), GSWIP_PCE_PMAP3
);
802 /* disable PHY auto polling */
803 gswip_mdio_w(priv
, 0x0, GSWIP_MDIO_MDC_CFG0
);
804 /* Configure the MDIO Clock 2.5 MHz */
805 gswip_mdio_mask(priv
, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1
);
807 /* Disable the xMII link */
808 gswip_mii_mask_cfg(priv
, GSWIP_MII_CFG_EN
, 0, 0);
809 gswip_mii_mask_cfg(priv
, GSWIP_MII_CFG_EN
, 0, 1);
810 gswip_mii_mask_cfg(priv
, GSWIP_MII_CFG_EN
, 0, 5);
812 /* enable special tag insertion on cpu port */
813 gswip_switch_mask(priv
, 0, GSWIP_FDMA_PCTRL_STEN
,
814 GSWIP_FDMA_PCTRLp(cpu_port
));
816 /* accept special tag in ingress direction */
817 gswip_switch_mask(priv
, 0, GSWIP_PCE_PCTRL_0_INGRESS
,
818 GSWIP_PCE_PCTRL_0p(cpu_port
));
820 gswip_switch_mask(priv
, 0, GSWIP_MAC_CTRL_2_MLEN
,
821 GSWIP_MAC_CTRL_2p(cpu_port
));
822 gswip_switch_w(priv
, VLAN_ETH_FRAME_LEN
+ 8, GSWIP_MAC_FLEN
);
823 gswip_switch_mask(priv
, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD
,
824 GSWIP_BM_QUEUE_GCTRL
);
826 /* VLAN aware Switching */
827 gswip_switch_mask(priv
, 0, GSWIP_PCE_GCTRL_0_VLAN
, GSWIP_PCE_GCTRL_0
);
829 /* Flush MAC Table */
830 gswip_switch_mask(priv
, 0, GSWIP_PCE_GCTRL_0_MTFL
, GSWIP_PCE_GCTRL_0
);
832 err
= gswip_switch_r_timeout(priv
, GSWIP_PCE_GCTRL_0
,
833 GSWIP_PCE_GCTRL_0_MTFL
);
835 dev_err(priv
->dev
, "MAC flushing didn't finish\n");
839 gswip_port_enable(ds
, cpu_port
, NULL
);
843 static enum dsa_tag_protocol
gswip_get_tag_protocol(struct dsa_switch
*ds
,
845 enum dsa_tag_protocol mp
)
847 return DSA_TAG_PROTO_GSWIP
;
850 static int gswip_vlan_active_create(struct gswip_priv
*priv
,
851 struct net_device
*bridge
,
854 struct gswip_pce_table_entry vlan_active
= {0,};
855 unsigned int max_ports
= priv
->hw_info
->max_ports
;
860 /* Look for a free slot */
861 for (i
= max_ports
; i
< ARRAY_SIZE(priv
->vlans
); i
++) {
862 if (!priv
->vlans
[i
].bridge
) {
874 vlan_active
.index
= idx
;
875 vlan_active
.table
= GSWIP_TABLE_ACTIVE_VLAN
;
876 vlan_active
.key
[0] = vid
;
877 vlan_active
.val
[0] = fid
;
878 vlan_active
.valid
= true;
880 err
= gswip_pce_table_entry_write(priv
, &vlan_active
);
882 dev_err(priv
->dev
, "failed to write active VLAN: %d\n", err
);
886 priv
->vlans
[idx
].bridge
= bridge
;
887 priv
->vlans
[idx
].vid
= vid
;
888 priv
->vlans
[idx
].fid
= fid
;
893 static int gswip_vlan_active_remove(struct gswip_priv
*priv
, int idx
)
895 struct gswip_pce_table_entry vlan_active
= {0,};
898 vlan_active
.index
= idx
;
899 vlan_active
.table
= GSWIP_TABLE_ACTIVE_VLAN
;
900 vlan_active
.valid
= false;
901 err
= gswip_pce_table_entry_write(priv
, &vlan_active
);
903 dev_err(priv
->dev
, "failed to delete active VLAN: %d\n", err
);
904 priv
->vlans
[idx
].bridge
= NULL
;
909 static int gswip_vlan_add_unaware(struct gswip_priv
*priv
,
910 struct net_device
*bridge
, int port
)
912 struct gswip_pce_table_entry vlan_mapping
= {0,};
913 unsigned int max_ports
= priv
->hw_info
->max_ports
;
914 unsigned int cpu_port
= priv
->hw_info
->cpu_port
;
915 bool active_vlan_created
= false;
920 /* Check if there is already a page for this bridge */
921 for (i
= max_ports
; i
< ARRAY_SIZE(priv
->vlans
); i
++) {
922 if (priv
->vlans
[i
].bridge
== bridge
) {
928 /* If this bridge is not programmed yet, add a Active VLAN table
929 * entry in a free slot and prepare the VLAN mapping table entry.
932 idx
= gswip_vlan_active_create(priv
, bridge
, -1, 0);
935 active_vlan_created
= true;
937 vlan_mapping
.index
= idx
;
938 vlan_mapping
.table
= GSWIP_TABLE_VLAN_MAPPING
;
939 /* VLAN ID byte, maps to the VLAN ID of vlan active table */
940 vlan_mapping
.val
[0] = 0;
942 /* Read the existing VLAN mapping entry from the switch */
943 vlan_mapping
.index
= idx
;
944 vlan_mapping
.table
= GSWIP_TABLE_VLAN_MAPPING
;
945 err
= gswip_pce_table_entry_read(priv
, &vlan_mapping
);
947 dev_err(priv
->dev
, "failed to read VLAN mapping: %d\n",
953 /* Update the VLAN mapping entry and write it to the switch */
954 vlan_mapping
.val
[1] |= BIT(cpu_port
);
955 vlan_mapping
.val
[1] |= BIT(port
);
956 err
= gswip_pce_table_entry_write(priv
, &vlan_mapping
);
958 dev_err(priv
->dev
, "failed to write VLAN mapping: %d\n", err
);
959 /* In case an Active VLAN was creaetd delete it again */
960 if (active_vlan_created
)
961 gswip_vlan_active_remove(priv
, idx
);
965 gswip_switch_w(priv
, 0, GSWIP_PCE_DEFPVID(port
));
969 static int gswip_vlan_add_aware(struct gswip_priv
*priv
,
970 struct net_device
*bridge
, int port
,
971 u16 vid
, bool untagged
,
974 struct gswip_pce_table_entry vlan_mapping
= {0,};
975 unsigned int max_ports
= priv
->hw_info
->max_ports
;
976 unsigned int cpu_port
= priv
->hw_info
->cpu_port
;
977 bool active_vlan_created
= false;
983 /* Check if there is already a page for this bridge */
984 for (i
= max_ports
; i
< ARRAY_SIZE(priv
->vlans
); i
++) {
985 if (priv
->vlans
[i
].bridge
== bridge
) {
986 if (fid
!= -1 && fid
!= priv
->vlans
[i
].fid
)
987 dev_err(priv
->dev
, "one bridge with multiple flow ids\n");
988 fid
= priv
->vlans
[i
].fid
;
989 if (priv
->vlans
[i
].vid
== vid
) {
996 /* If this bridge is not programmed yet, add a Active VLAN table
997 * entry in a free slot and prepare the VLAN mapping table entry.
1000 idx
= gswip_vlan_active_create(priv
, bridge
, fid
, vid
);
1003 active_vlan_created
= true;
1005 vlan_mapping
.index
= idx
;
1006 vlan_mapping
.table
= GSWIP_TABLE_VLAN_MAPPING
;
1007 /* VLAN ID byte, maps to the VLAN ID of vlan active table */
1008 vlan_mapping
.val
[0] = vid
;
1010 /* Read the existing VLAN mapping entry from the switch */
1011 vlan_mapping
.index
= idx
;
1012 vlan_mapping
.table
= GSWIP_TABLE_VLAN_MAPPING
;
1013 err
= gswip_pce_table_entry_read(priv
, &vlan_mapping
);
1015 dev_err(priv
->dev
, "failed to read VLAN mapping: %d\n",
1021 vlan_mapping
.val
[0] = vid
;
1022 /* Update the VLAN mapping entry and write it to the switch */
1023 vlan_mapping
.val
[1] |= BIT(cpu_port
);
1024 vlan_mapping
.val
[2] |= BIT(cpu_port
);
1025 vlan_mapping
.val
[1] |= BIT(port
);
1027 vlan_mapping
.val
[2] &= ~BIT(port
);
1029 vlan_mapping
.val
[2] |= BIT(port
);
1030 err
= gswip_pce_table_entry_write(priv
, &vlan_mapping
);
1032 dev_err(priv
->dev
, "failed to write VLAN mapping: %d\n", err
);
1033 /* In case an Active VLAN was creaetd delete it again */
1034 if (active_vlan_created
)
1035 gswip_vlan_active_remove(priv
, idx
);
1040 gswip_switch_w(priv
, idx
, GSWIP_PCE_DEFPVID(port
));
1045 static int gswip_vlan_remove(struct gswip_priv
*priv
,
1046 struct net_device
*bridge
, int port
,
1047 u16 vid
, bool pvid
, bool vlan_aware
)
1049 struct gswip_pce_table_entry vlan_mapping
= {0,};
1050 unsigned int max_ports
= priv
->hw_info
->max_ports
;
1051 unsigned int cpu_port
= priv
->hw_info
->cpu_port
;
1056 /* Check if there is already a page for this bridge */
1057 for (i
= max_ports
; i
< ARRAY_SIZE(priv
->vlans
); i
++) {
1058 if (priv
->vlans
[i
].bridge
== bridge
&&
1059 (!vlan_aware
|| priv
->vlans
[i
].vid
== vid
)) {
1066 dev_err(priv
->dev
, "bridge to leave does not exists\n");
1070 vlan_mapping
.index
= idx
;
1071 vlan_mapping
.table
= GSWIP_TABLE_VLAN_MAPPING
;
1072 err
= gswip_pce_table_entry_read(priv
, &vlan_mapping
);
1074 dev_err(priv
->dev
, "failed to read VLAN mapping: %d\n", err
);
1078 vlan_mapping
.val
[1] &= ~BIT(port
);
1079 vlan_mapping
.val
[2] &= ~BIT(port
);
1080 err
= gswip_pce_table_entry_write(priv
, &vlan_mapping
);
1082 dev_err(priv
->dev
, "failed to write VLAN mapping: %d\n", err
);
1086 /* In case all ports are removed from the bridge, remove the VLAN */
1087 if ((vlan_mapping
.val
[1] & ~BIT(cpu_port
)) == 0) {
1088 err
= gswip_vlan_active_remove(priv
, idx
);
1090 dev_err(priv
->dev
, "failed to write active VLAN: %d\n",
1096 /* GSWIP 2.2 (GRX300) and later program here the VID directly. */
1098 gswip_switch_w(priv
, 0, GSWIP_PCE_DEFPVID(port
));
1103 static int gswip_port_bridge_join(struct dsa_switch
*ds
, int port
,
1104 struct net_device
*bridge
)
1106 struct gswip_priv
*priv
= ds
->priv
;
1109 /* When the bridge uses VLAN filtering we have to configure VLAN
1110 * specific bridges. No bridge is configured here.
1112 if (!br_vlan_enabled(bridge
)) {
1113 err
= gswip_vlan_add_unaware(priv
, bridge
, port
);
1116 priv
->port_vlan_filter
&= ~BIT(port
);
1118 priv
->port_vlan_filter
|= BIT(port
);
1120 return gswip_add_single_port_br(priv
, port
, false);
1123 static void gswip_port_bridge_leave(struct dsa_switch
*ds
, int port
,
1124 struct net_device
*bridge
)
1126 struct gswip_priv
*priv
= ds
->priv
;
1128 gswip_add_single_port_br(priv
, port
, true);
1130 /* When the bridge uses VLAN filtering we have to configure VLAN
1131 * specific bridges. No bridge is configured here.
1133 if (!br_vlan_enabled(bridge
))
1134 gswip_vlan_remove(priv
, bridge
, port
, 0, true, false);
1137 static int gswip_port_vlan_prepare(struct dsa_switch
*ds
, int port
,
1138 const struct switchdev_obj_port_vlan
*vlan
)
1140 struct gswip_priv
*priv
= ds
->priv
;
1141 struct net_device
*bridge
= dsa_to_port(ds
, port
)->bridge_dev
;
1142 unsigned int max_ports
= priv
->hw_info
->max_ports
;
1145 int pos
= max_ports
;
1147 /* We only support VLAN filtering on bridges */
1148 if (!dsa_is_cpu_port(ds
, port
) && !bridge
)
1151 for (vid
= vlan
->vid_begin
; vid
<= vlan
->vid_end
; ++vid
) {
1154 /* Check if there is already a page for this VLAN */
1155 for (i
= max_ports
; i
< ARRAY_SIZE(priv
->vlans
); i
++) {
1156 if (priv
->vlans
[i
].bridge
== bridge
&&
1157 priv
->vlans
[i
].vid
== vid
) {
1163 /* If this VLAN is not programmed yet, we have to reserve
1164 * one entry in the VLAN table. Make sure we start at the
1165 * next position round.
1168 /* Look for a free slot */
1169 for (; pos
< ARRAY_SIZE(priv
->vlans
); pos
++) {
1170 if (!priv
->vlans
[pos
].bridge
) {
1185 static void gswip_port_vlan_add(struct dsa_switch
*ds
, int port
,
1186 const struct switchdev_obj_port_vlan
*vlan
)
1188 struct gswip_priv
*priv
= ds
->priv
;
1189 struct net_device
*bridge
= dsa_to_port(ds
, port
)->bridge_dev
;
1190 bool untagged
= vlan
->flags
& BRIDGE_VLAN_INFO_UNTAGGED
;
1191 bool pvid
= vlan
->flags
& BRIDGE_VLAN_INFO_PVID
;
1194 /* We have to receive all packets on the CPU port and should not
1195 * do any VLAN filtering here. This is also called with bridge
1196 * NULL and then we do not know for which bridge to configure
1199 if (dsa_is_cpu_port(ds
, port
))
1202 for (vid
= vlan
->vid_begin
; vid
<= vlan
->vid_end
; ++vid
)
1203 gswip_vlan_add_aware(priv
, bridge
, port
, vid
, untagged
, pvid
);
1206 static int gswip_port_vlan_del(struct dsa_switch
*ds
, int port
,
1207 const struct switchdev_obj_port_vlan
*vlan
)
1209 struct gswip_priv
*priv
= ds
->priv
;
1210 struct net_device
*bridge
= dsa_to_port(ds
, port
)->bridge_dev
;
1211 bool pvid
= vlan
->flags
& BRIDGE_VLAN_INFO_PVID
;
1215 /* We have to receive all packets on the CPU port and should not
1216 * do any VLAN filtering here. This is also called with bridge
1217 * NULL and then we do not know for which bridge to configure
1220 if (dsa_is_cpu_port(ds
, port
))
1223 for (vid
= vlan
->vid_begin
; vid
<= vlan
->vid_end
; ++vid
) {
1224 err
= gswip_vlan_remove(priv
, bridge
, port
, vid
, pvid
, true);
1232 static void gswip_port_fast_age(struct dsa_switch
*ds
, int port
)
1234 struct gswip_priv
*priv
= ds
->priv
;
1235 struct gswip_pce_table_entry mac_bridge
= {0,};
1239 for (i
= 0; i
< 2048; i
++) {
1240 mac_bridge
.table
= GSWIP_TABLE_MAC_BRIDGE
;
1241 mac_bridge
.index
= i
;
1243 err
= gswip_pce_table_entry_read(priv
, &mac_bridge
);
1245 dev_err(priv
->dev
, "failed to read mac bridge: %d\n",
1250 if (!mac_bridge
.valid
)
1253 if (mac_bridge
.val
[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC
)
1256 if (((mac_bridge
.val
[0] & GENMASK(7, 4)) >> 4) != port
)
1259 mac_bridge
.valid
= false;
1260 err
= gswip_pce_table_entry_write(priv
, &mac_bridge
);
1262 dev_err(priv
->dev
, "failed to write mac bridge: %d\n",
1269 static void gswip_port_stp_state_set(struct dsa_switch
*ds
, int port
, u8 state
)
1271 struct gswip_priv
*priv
= ds
->priv
;
1275 case BR_STATE_DISABLED
:
1276 gswip_switch_mask(priv
, GSWIP_SDMA_PCTRL_EN
, 0,
1277 GSWIP_SDMA_PCTRLp(port
));
1279 case BR_STATE_BLOCKING
:
1280 case BR_STATE_LISTENING
:
1281 stp_state
= GSWIP_PCE_PCTRL_0_PSTATE_LISTEN
;
1283 case BR_STATE_LEARNING
:
1284 stp_state
= GSWIP_PCE_PCTRL_0_PSTATE_LEARNING
;
1286 case BR_STATE_FORWARDING
:
1287 stp_state
= GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING
;
1290 dev_err(priv
->dev
, "invalid STP state: %d\n", state
);
1294 gswip_switch_mask(priv
, 0, GSWIP_SDMA_PCTRL_EN
,
1295 GSWIP_SDMA_PCTRLp(port
));
1296 gswip_switch_mask(priv
, GSWIP_PCE_PCTRL_0_PSTATE_MASK
, stp_state
,
1297 GSWIP_PCE_PCTRL_0p(port
));
1300 static int gswip_port_fdb(struct dsa_switch
*ds
, int port
,
1301 const unsigned char *addr
, u16 vid
, bool add
)
1303 struct gswip_priv
*priv
= ds
->priv
;
1304 struct net_device
*bridge
= dsa_to_port(ds
, port
)->bridge_dev
;
1305 struct gswip_pce_table_entry mac_bridge
= {0,};
1306 unsigned int cpu_port
= priv
->hw_info
->cpu_port
;
1314 for (i
= cpu_port
; i
< ARRAY_SIZE(priv
->vlans
); i
++) {
1315 if (priv
->vlans
[i
].bridge
== bridge
) {
1316 fid
= priv
->vlans
[i
].fid
;
1322 dev_err(priv
->dev
, "Port not part of a bridge\n");
1326 mac_bridge
.table
= GSWIP_TABLE_MAC_BRIDGE
;
1327 mac_bridge
.key_mode
= true;
1328 mac_bridge
.key
[0] = addr
[5] | (addr
[4] << 8);
1329 mac_bridge
.key
[1] = addr
[3] | (addr
[2] << 8);
1330 mac_bridge
.key
[2] = addr
[1] | (addr
[0] << 8);
1331 mac_bridge
.key
[3] = fid
;
1332 mac_bridge
.val
[0] = add
? BIT(port
) : 0; /* port map */
1333 mac_bridge
.val
[1] = GSWIP_TABLE_MAC_BRIDGE_STATIC
;
1334 mac_bridge
.valid
= add
;
1336 err
= gswip_pce_table_entry_write(priv
, &mac_bridge
);
1338 dev_err(priv
->dev
, "failed to write mac bridge: %d\n", err
);
1343 static int gswip_port_fdb_add(struct dsa_switch
*ds
, int port
,
1344 const unsigned char *addr
, u16 vid
)
1346 return gswip_port_fdb(ds
, port
, addr
, vid
, true);
1349 static int gswip_port_fdb_del(struct dsa_switch
*ds
, int port
,
1350 const unsigned char *addr
, u16 vid
)
1352 return gswip_port_fdb(ds
, port
, addr
, vid
, false);
1355 static int gswip_port_fdb_dump(struct dsa_switch
*ds
, int port
,
1356 dsa_fdb_dump_cb_t
*cb
, void *data
)
1358 struct gswip_priv
*priv
= ds
->priv
;
1359 struct gswip_pce_table_entry mac_bridge
= {0,};
1360 unsigned char addr
[6];
1364 for (i
= 0; i
< 2048; i
++) {
1365 mac_bridge
.table
= GSWIP_TABLE_MAC_BRIDGE
;
1366 mac_bridge
.index
= i
;
1368 err
= gswip_pce_table_entry_read(priv
, &mac_bridge
);
1370 dev_err(priv
->dev
, "failed to write mac bridge: %d\n",
1375 if (!mac_bridge
.valid
)
1378 addr
[5] = mac_bridge
.key
[0] & 0xff;
1379 addr
[4] = (mac_bridge
.key
[0] >> 8) & 0xff;
1380 addr
[3] = mac_bridge
.key
[1] & 0xff;
1381 addr
[2] = (mac_bridge
.key
[1] >> 8) & 0xff;
1382 addr
[1] = mac_bridge
.key
[2] & 0xff;
1383 addr
[0] = (mac_bridge
.key
[2] >> 8) & 0xff;
1384 if (mac_bridge
.val
[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC
) {
1385 if (mac_bridge
.val
[0] & BIT(port
))
1386 cb(addr
, 0, true, data
);
1388 if (((mac_bridge
.val
[0] & GENMASK(7, 4)) >> 4) == port
)
1389 cb(addr
, 0, false, data
);
1395 static void gswip_phylink_validate(struct dsa_switch
*ds
, int port
,
1396 unsigned long *supported
,
1397 struct phylink_link_state
*state
)
1399 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask
) = { 0, };
1404 if (!phy_interface_mode_is_rgmii(state
->interface
) &&
1405 state
->interface
!= PHY_INTERFACE_MODE_MII
&&
1406 state
->interface
!= PHY_INTERFACE_MODE_REVMII
&&
1407 state
->interface
!= PHY_INTERFACE_MODE_RMII
)
1413 if (state
->interface
!= PHY_INTERFACE_MODE_INTERNAL
)
1417 if (!phy_interface_mode_is_rgmii(state
->interface
) &&
1418 state
->interface
!= PHY_INTERFACE_MODE_INTERNAL
)
1422 bitmap_zero(supported
, __ETHTOOL_LINK_MODE_MASK_NBITS
);
1423 dev_err(ds
->dev
, "Unsupported port: %i\n", port
);
1427 /* Allow all the expected bits */
1428 phylink_set(mask
, Autoneg
);
1429 phylink_set_port_modes(mask
);
1430 phylink_set(mask
, Pause
);
1431 phylink_set(mask
, Asym_Pause
);
1433 /* With the exclusion of MII and Reverse MII, we support Gigabit,
1434 * including Half duplex
1436 if (state
->interface
!= PHY_INTERFACE_MODE_MII
&&
1437 state
->interface
!= PHY_INTERFACE_MODE_REVMII
) {
1438 phylink_set(mask
, 1000baseT_Full
);
1439 phylink_set(mask
, 1000baseT_Half
);
1442 phylink_set(mask
, 10baseT_Half
);
1443 phylink_set(mask
, 10baseT_Full
);
1444 phylink_set(mask
, 100baseT_Half
);
1445 phylink_set(mask
, 100baseT_Full
);
1447 bitmap_and(supported
, supported
, mask
,
1448 __ETHTOOL_LINK_MODE_MASK_NBITS
);
1449 bitmap_and(state
->advertising
, state
->advertising
, mask
,
1450 __ETHTOOL_LINK_MODE_MASK_NBITS
);
1454 bitmap_zero(supported
, __ETHTOOL_LINK_MODE_MASK_NBITS
);
1455 dev_err(ds
->dev
, "Unsupported interface: %d\n", state
->interface
);
1459 static void gswip_phylink_mac_config(struct dsa_switch
*ds
, int port
,
1461 const struct phylink_link_state
*state
)
1463 struct gswip_priv
*priv
= ds
->priv
;
1466 miicfg
|= GSWIP_MII_CFG_LDCLKDIS
;
1468 switch (state
->interface
) {
1469 case PHY_INTERFACE_MODE_MII
:
1470 case PHY_INTERFACE_MODE_INTERNAL
:
1471 miicfg
|= GSWIP_MII_CFG_MODE_MIIM
;
1473 case PHY_INTERFACE_MODE_REVMII
:
1474 miicfg
|= GSWIP_MII_CFG_MODE_MIIP
;
1476 case PHY_INTERFACE_MODE_RMII
:
1477 miicfg
|= GSWIP_MII_CFG_MODE_RMIIM
;
1479 case PHY_INTERFACE_MODE_RGMII
:
1480 case PHY_INTERFACE_MODE_RGMII_ID
:
1481 case PHY_INTERFACE_MODE_RGMII_RXID
:
1482 case PHY_INTERFACE_MODE_RGMII_TXID
:
1483 miicfg
|= GSWIP_MII_CFG_MODE_RGMII
;
1487 "Unsupported interface: %d\n", state
->interface
);
1490 gswip_mii_mask_cfg(priv
, GSWIP_MII_CFG_MODE_MASK
, miicfg
, port
);
1492 switch (state
->interface
) {
1493 case PHY_INTERFACE_MODE_RGMII_ID
:
1494 gswip_mii_mask_pcdu(priv
, GSWIP_MII_PCDU_TXDLY_MASK
|
1495 GSWIP_MII_PCDU_RXDLY_MASK
, 0, port
);
1497 case PHY_INTERFACE_MODE_RGMII_RXID
:
1498 gswip_mii_mask_pcdu(priv
, GSWIP_MII_PCDU_RXDLY_MASK
, 0, port
);
1500 case PHY_INTERFACE_MODE_RGMII_TXID
:
1501 gswip_mii_mask_pcdu(priv
, GSWIP_MII_PCDU_TXDLY_MASK
, 0, port
);
1508 static void gswip_phylink_mac_link_down(struct dsa_switch
*ds
, int port
,
1510 phy_interface_t interface
)
1512 struct gswip_priv
*priv
= ds
->priv
;
1514 gswip_mii_mask_cfg(priv
, GSWIP_MII_CFG_EN
, 0, port
);
1517 static void gswip_phylink_mac_link_up(struct dsa_switch
*ds
, int port
,
1519 phy_interface_t interface
,
1520 struct phy_device
*phydev
)
1522 struct gswip_priv
*priv
= ds
->priv
;
1524 /* Enable the xMII interface only for the external PHY */
1525 if (interface
!= PHY_INTERFACE_MODE_INTERNAL
)
1526 gswip_mii_mask_cfg(priv
, 0, GSWIP_MII_CFG_EN
, port
);
1529 static void gswip_get_strings(struct dsa_switch
*ds
, int port
, u32 stringset
,
1534 if (stringset
!= ETH_SS_STATS
)
1537 for (i
= 0; i
< ARRAY_SIZE(gswip_rmon_cnt
); i
++)
1538 strncpy(data
+ i
* ETH_GSTRING_LEN
, gswip_rmon_cnt
[i
].name
,
1542 static u32
gswip_bcm_ram_entry_read(struct gswip_priv
*priv
, u32 table
,
1548 gswip_switch_w(priv
, index
, GSWIP_BM_RAM_ADDR
);
1549 gswip_switch_mask(priv
, GSWIP_BM_RAM_CTRL_ADDR_MASK
|
1550 GSWIP_BM_RAM_CTRL_OPMOD
,
1551 table
| GSWIP_BM_RAM_CTRL_BAS
,
1554 err
= gswip_switch_r_timeout(priv
, GSWIP_BM_RAM_CTRL
,
1555 GSWIP_BM_RAM_CTRL_BAS
);
1557 dev_err(priv
->dev
, "timeout while reading table: %u, index: %u",
1562 result
= gswip_switch_r(priv
, GSWIP_BM_RAM_VAL(0));
1563 result
|= gswip_switch_r(priv
, GSWIP_BM_RAM_VAL(1)) << 16;
1568 static void gswip_get_ethtool_stats(struct dsa_switch
*ds
, int port
,
1571 struct gswip_priv
*priv
= ds
->priv
;
1572 const struct gswip_rmon_cnt_desc
*rmon_cnt
;
1576 for (i
= 0; i
< ARRAY_SIZE(gswip_rmon_cnt
); i
++) {
1577 rmon_cnt
= &gswip_rmon_cnt
[i
];
1579 data
[i
] = gswip_bcm_ram_entry_read(priv
, port
,
1581 if (rmon_cnt
->size
== 2) {
1582 high
= gswip_bcm_ram_entry_read(priv
, port
,
1583 rmon_cnt
->offset
+ 1);
1584 data
[i
] |= high
<< 32;
1589 static int gswip_get_sset_count(struct dsa_switch
*ds
, int port
, int sset
)
1591 if (sset
!= ETH_SS_STATS
)
1594 return ARRAY_SIZE(gswip_rmon_cnt
);
1597 static const struct dsa_switch_ops gswip_switch_ops
= {
1598 .get_tag_protocol
= gswip_get_tag_protocol
,
1599 .setup
= gswip_setup
,
1600 .port_enable
= gswip_port_enable
,
1601 .port_disable
= gswip_port_disable
,
1602 .port_bridge_join
= gswip_port_bridge_join
,
1603 .port_bridge_leave
= gswip_port_bridge_leave
,
1604 .port_fast_age
= gswip_port_fast_age
,
1605 .port_vlan_filtering
= gswip_port_vlan_filtering
,
1606 .port_vlan_prepare
= gswip_port_vlan_prepare
,
1607 .port_vlan_add
= gswip_port_vlan_add
,
1608 .port_vlan_del
= gswip_port_vlan_del
,
1609 .port_stp_state_set
= gswip_port_stp_state_set
,
1610 .port_fdb_add
= gswip_port_fdb_add
,
1611 .port_fdb_del
= gswip_port_fdb_del
,
1612 .port_fdb_dump
= gswip_port_fdb_dump
,
1613 .phylink_validate
= gswip_phylink_validate
,
1614 .phylink_mac_config
= gswip_phylink_mac_config
,
1615 .phylink_mac_link_down
= gswip_phylink_mac_link_down
,
1616 .phylink_mac_link_up
= gswip_phylink_mac_link_up
,
1617 .get_strings
= gswip_get_strings
,
1618 .get_ethtool_stats
= gswip_get_ethtool_stats
,
1619 .get_sset_count
= gswip_get_sset_count
,
1622 static const struct xway_gphy_match_data xrx200a1x_gphy_data
= {
1623 .fe_firmware_name
= "lantiq/xrx200_phy22f_a14.bin",
1624 .ge_firmware_name
= "lantiq/xrx200_phy11g_a14.bin",
1627 static const struct xway_gphy_match_data xrx200a2x_gphy_data
= {
1628 .fe_firmware_name
= "lantiq/xrx200_phy22f_a22.bin",
1629 .ge_firmware_name
= "lantiq/xrx200_phy11g_a22.bin",
1632 static const struct xway_gphy_match_data xrx300_gphy_data
= {
1633 .fe_firmware_name
= "lantiq/xrx300_phy22f_a21.bin",
1634 .ge_firmware_name
= "lantiq/xrx300_phy11g_a21.bin",
1637 static const struct of_device_id xway_gphy_match
[] = {
1638 { .compatible
= "lantiq,xrx200-gphy-fw", .data
= NULL
},
1639 { .compatible
= "lantiq,xrx200a1x-gphy-fw", .data
= &xrx200a1x_gphy_data
},
1640 { .compatible
= "lantiq,xrx200a2x-gphy-fw", .data
= &xrx200a2x_gphy_data
},
1641 { .compatible
= "lantiq,xrx300-gphy-fw", .data
= &xrx300_gphy_data
},
1642 { .compatible
= "lantiq,xrx330-gphy-fw", .data
= &xrx300_gphy_data
},
1646 static int gswip_gphy_fw_load(struct gswip_priv
*priv
, struct gswip_gphy_fw
*gphy_fw
)
1648 struct device
*dev
= priv
->dev
;
1649 const struct firmware
*fw
;
1651 dma_addr_t dma_addr
;
1652 dma_addr_t dev_addr
;
1656 ret
= clk_prepare_enable(gphy_fw
->clk_gate
);
1660 reset_control_assert(gphy_fw
->reset
);
1662 ret
= request_firmware(&fw
, gphy_fw
->fw_name
, dev
);
1664 dev_err(dev
, "failed to load firmware: %s, error: %i\n",
1665 gphy_fw
->fw_name
, ret
);
1669 /* GPHY cores need the firmware code in a persistent and contiguous
1670 * memory area with a 16 kB boundary aligned start address.
1672 size
= fw
->size
+ XRX200_GPHY_FW_ALIGN
;
1674 fw_addr
= dmam_alloc_coherent(dev
, size
, &dma_addr
, GFP_KERNEL
);
1676 fw_addr
= PTR_ALIGN(fw_addr
, XRX200_GPHY_FW_ALIGN
);
1677 dev_addr
= ALIGN(dma_addr
, XRX200_GPHY_FW_ALIGN
);
1678 memcpy(fw_addr
, fw
->data
, fw
->size
);
1680 dev_err(dev
, "failed to alloc firmware memory\n");
1681 release_firmware(fw
);
1685 release_firmware(fw
);
1687 ret
= regmap_write(priv
->rcu_regmap
, gphy_fw
->fw_addr_offset
, dev_addr
);
1691 reset_control_deassert(gphy_fw
->reset
);
1696 static int gswip_gphy_fw_probe(struct gswip_priv
*priv
,
1697 struct gswip_gphy_fw
*gphy_fw
,
1698 struct device_node
*gphy_fw_np
, int i
)
1700 struct device
*dev
= priv
->dev
;
1705 snprintf(gphyname
, sizeof(gphyname
), "gphy%d", i
);
1707 gphy_fw
->clk_gate
= devm_clk_get(dev
, gphyname
);
1708 if (IS_ERR(gphy_fw
->clk_gate
)) {
1709 dev_err(dev
, "Failed to lookup gate clock\n");
1710 return PTR_ERR(gphy_fw
->clk_gate
);
1713 ret
= of_property_read_u32(gphy_fw_np
, "reg", &gphy_fw
->fw_addr_offset
);
1717 ret
= of_property_read_u32(gphy_fw_np
, "lantiq,gphy-mode", &gphy_mode
);
1718 /* Default to GE mode */
1720 gphy_mode
= GPHY_MODE_GE
;
1722 switch (gphy_mode
) {
1724 gphy_fw
->fw_name
= priv
->gphy_fw_name_cfg
->fe_firmware_name
;
1727 gphy_fw
->fw_name
= priv
->gphy_fw_name_cfg
->ge_firmware_name
;
1730 dev_err(dev
, "Unknown GPHY mode %d\n", gphy_mode
);
1734 gphy_fw
->reset
= of_reset_control_array_get_exclusive(gphy_fw_np
);
1735 if (IS_ERR(gphy_fw
->reset
)) {
1736 if (PTR_ERR(gphy_fw
->reset
) != -EPROBE_DEFER
)
1737 dev_err(dev
, "Failed to lookup gphy reset\n");
1738 return PTR_ERR(gphy_fw
->reset
);
1741 return gswip_gphy_fw_load(priv
, gphy_fw
);
1744 static void gswip_gphy_fw_remove(struct gswip_priv
*priv
,
1745 struct gswip_gphy_fw
*gphy_fw
)
1749 /* check if the device was fully probed */
1750 if (!gphy_fw
->fw_name
)
1753 ret
= regmap_write(priv
->rcu_regmap
, gphy_fw
->fw_addr_offset
, 0);
1755 dev_err(priv
->dev
, "can not reset GPHY FW pointer");
1757 clk_disable_unprepare(gphy_fw
->clk_gate
);
1759 reset_control_put(gphy_fw
->reset
);
1762 static int gswip_gphy_fw_list(struct gswip_priv
*priv
,
1763 struct device_node
*gphy_fw_list_np
, u32 version
)
1765 struct device
*dev
= priv
->dev
;
1766 struct device_node
*gphy_fw_np
;
1767 const struct of_device_id
*match
;
1771 /* The VRX200 rev 1.1 uses the GSWIP 2.0 and needs the older
1772 * GPHY firmware. The VRX200 rev 1.2 uses the GSWIP 2.1 and also
1773 * needs a different GPHY firmware.
1775 if (of_device_is_compatible(gphy_fw_list_np
, "lantiq,xrx200-gphy-fw")) {
1777 case GSWIP_VERSION_2_0
:
1778 priv
->gphy_fw_name_cfg
= &xrx200a1x_gphy_data
;
1780 case GSWIP_VERSION_2_1
:
1781 priv
->gphy_fw_name_cfg
= &xrx200a2x_gphy_data
;
1784 dev_err(dev
, "unknown GSWIP version: 0x%x", version
);
1789 match
= of_match_node(xway_gphy_match
, gphy_fw_list_np
);
1790 if (match
&& match
->data
)
1791 priv
->gphy_fw_name_cfg
= match
->data
;
1793 if (!priv
->gphy_fw_name_cfg
) {
1794 dev_err(dev
, "GPHY compatible type not supported");
1798 priv
->num_gphy_fw
= of_get_available_child_count(gphy_fw_list_np
);
1799 if (!priv
->num_gphy_fw
)
1802 priv
->rcu_regmap
= syscon_regmap_lookup_by_phandle(gphy_fw_list_np
,
1804 if (IS_ERR(priv
->rcu_regmap
))
1805 return PTR_ERR(priv
->rcu_regmap
);
1807 priv
->gphy_fw
= devm_kmalloc_array(dev
, priv
->num_gphy_fw
,
1808 sizeof(*priv
->gphy_fw
),
1809 GFP_KERNEL
| __GFP_ZERO
);
1813 for_each_available_child_of_node(gphy_fw_list_np
, gphy_fw_np
) {
1814 err
= gswip_gphy_fw_probe(priv
, &priv
->gphy_fw
[i
],
1824 for (i
= 0; i
< priv
->num_gphy_fw
; i
++)
1825 gswip_gphy_fw_remove(priv
, &priv
->gphy_fw
[i
]);
1829 static int gswip_probe(struct platform_device
*pdev
)
1831 struct gswip_priv
*priv
;
1832 struct device_node
*mdio_np
, *gphy_fw_np
;
1833 struct device
*dev
= &pdev
->dev
;
1838 priv
= devm_kzalloc(dev
, sizeof(*priv
), GFP_KERNEL
);
1842 priv
->gswip
= devm_platform_ioremap_resource(pdev
, 0);
1843 if (IS_ERR(priv
->gswip
))
1844 return PTR_ERR(priv
->gswip
);
1846 priv
->mdio
= devm_platform_ioremap_resource(pdev
, 1);
1847 if (IS_ERR(priv
->mdio
))
1848 return PTR_ERR(priv
->mdio
);
1850 priv
->mii
= devm_platform_ioremap_resource(pdev
, 2);
1851 if (IS_ERR(priv
->mii
))
1852 return PTR_ERR(priv
->mii
);
1854 priv
->hw_info
= of_device_get_match_data(dev
);
1858 priv
->ds
= devm_kzalloc(dev
, sizeof(*priv
->ds
), GFP_KERNEL
);
1862 priv
->ds
->dev
= dev
;
1863 priv
->ds
->num_ports
= priv
->hw_info
->max_ports
;
1864 priv
->ds
->priv
= priv
;
1865 priv
->ds
->ops
= &gswip_switch_ops
;
1867 version
= gswip_switch_r(priv
, GSWIP_VERSION
);
1869 /* bring up the mdio bus */
1870 gphy_fw_np
= of_get_compatible_child(dev
->of_node
, "lantiq,gphy-fw");
1872 err
= gswip_gphy_fw_list(priv
, gphy_fw_np
, version
);
1873 of_node_put(gphy_fw_np
);
1875 dev_err(dev
, "gphy fw probe failed\n");
1880 /* bring up the mdio bus */
1881 mdio_np
= of_get_compatible_child(dev
->of_node
, "lantiq,xrx200-mdio");
1883 err
= gswip_mdio(priv
, mdio_np
);
1885 dev_err(dev
, "mdio probe failed\n");
1890 err
= dsa_register_switch(priv
->ds
);
1892 dev_err(dev
, "dsa switch register failed: %i\n", err
);
1895 if (!dsa_is_cpu_port(priv
->ds
, priv
->hw_info
->cpu_port
)) {
1896 dev_err(dev
, "wrong CPU port defined, HW only supports port: %i",
1897 priv
->hw_info
->cpu_port
);
1899 goto disable_switch
;
1902 platform_set_drvdata(pdev
, priv
);
1904 dev_info(dev
, "probed GSWIP version %lx mod %lx\n",
1905 (version
& GSWIP_VERSION_REV_MASK
) >> GSWIP_VERSION_REV_SHIFT
,
1906 (version
& GSWIP_VERSION_MOD_MASK
) >> GSWIP_VERSION_MOD_SHIFT
);
1910 gswip_mdio_mask(priv
, GSWIP_MDIO_GLOB_ENABLE
, 0, GSWIP_MDIO_GLOB
);
1911 dsa_unregister_switch(priv
->ds
);
1914 mdiobus_unregister(priv
->ds
->slave_mii_bus
);
1916 of_node_put(mdio_np
);
1917 for (i
= 0; i
< priv
->num_gphy_fw
; i
++)
1918 gswip_gphy_fw_remove(priv
, &priv
->gphy_fw
[i
]);
1922 static int gswip_remove(struct platform_device
*pdev
)
1924 struct gswip_priv
*priv
= platform_get_drvdata(pdev
);
1927 /* disable the switch */
1928 gswip_mdio_mask(priv
, GSWIP_MDIO_GLOB_ENABLE
, 0, GSWIP_MDIO_GLOB
);
1930 dsa_unregister_switch(priv
->ds
);
1932 if (priv
->ds
->slave_mii_bus
) {
1933 mdiobus_unregister(priv
->ds
->slave_mii_bus
);
1934 of_node_put(priv
->ds
->slave_mii_bus
->dev
.of_node
);
1937 for (i
= 0; i
< priv
->num_gphy_fw
; i
++)
1938 gswip_gphy_fw_remove(priv
, &priv
->gphy_fw
[i
]);
1943 static const struct gswip_hw_info gswip_xrx200
= {
1948 static const struct of_device_id gswip_of_match
[] = {
1949 { .compatible
= "lantiq,xrx200-gswip", .data
= &gswip_xrx200
},
1952 MODULE_DEVICE_TABLE(of
, gswip_of_match
);
1954 static struct platform_driver gswip_driver
= {
1955 .probe
= gswip_probe
,
1956 .remove
= gswip_remove
,
1959 .of_match_table
= gswip_of_match
,
1963 module_platform_driver(gswip_driver
);
1965 MODULE_FIRMWARE("lantiq/xrx300_phy11g_a21.bin");
1966 MODULE_FIRMWARE("lantiq/xrx300_phy22f_a21.bin");
1967 MODULE_FIRMWARE("lantiq/xrx200_phy11g_a14.bin");
1968 MODULE_FIRMWARE("lantiq/xrx200_phy11g_a22.bin");
1969 MODULE_FIRMWARE("lantiq/xrx200_phy22f_a14.bin");
1970 MODULE_FIRMWARE("lantiq/xrx200_phy22f_a22.bin");
1971 MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
1972 MODULE_DESCRIPTION("Lantiq / Intel GSWIP driver");
1973 MODULE_LICENSE("GPL v2");