1 // SPDX-License-Identifier: GPL-2.0
3 * Lantiq / Intel GSWIP switch driver for VRX200 SoCs
5 * Copyright (C) 2010 Lantiq Deutschland
6 * Copyright (C) 2012 John Crispin <john@phrozen.org>
7 * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de>
9 * The VLAN and bridge model the GSWIP hardware uses does not directly
10 * matches the model DSA uses.
12 * The hardware has 64 possible table entries for bridges with one VLAN
13 * ID, one flow id and a list of ports for each bridge. All entries which
14 * match the same flow ID are combined in the mac learning table, they
15 * act as one global bridge.
16 * The hardware does not support VLAN filter on the port, but on the
17 * bridge, this driver converts the DSA model to the hardware.
19 * The CPU gets all the exception frames which do not match any forwarding
20 * rule and the CPU port is also added to all bridges. This makes it possible
21 * to handle all the special cases easily in software.
22 * At the initialization the driver allocates one bridge table entry for
23 * each switch port which is used when the port is used without an
24 * explicit bridge. This prevents the frames from being forwarded
25 * between all LAN ports by default.
28 #include <linux/clk.h>
29 #include <linux/etherdevice.h>
30 #include <linux/firmware.h>
31 #include <linux/if_bridge.h>
32 #include <linux/if_vlan.h>
33 #include <linux/iopoll.h>
34 #include <linux/mfd/syscon.h>
35 #include <linux/module.h>
36 #include <linux/of_mdio.h>
37 #include <linux/of_net.h>
38 #include <linux/of_platform.h>
39 #include <linux/phy.h>
40 #include <linux/phylink.h>
41 #include <linux/platform_device.h>
42 #include <linux/regmap.h>
43 #include <linux/reset.h>
45 #include <dt-bindings/mips/lantiq_rcu_gphy.h>
47 #include "lantiq_pce.h"
49 /* GSWIP MDIO Registers */
50 #define GSWIP_MDIO_GLOB 0x00
51 #define GSWIP_MDIO_GLOB_ENABLE BIT(15)
52 #define GSWIP_MDIO_CTRL 0x08
53 #define GSWIP_MDIO_CTRL_BUSY BIT(12)
54 #define GSWIP_MDIO_CTRL_RD BIT(11)
55 #define GSWIP_MDIO_CTRL_WR BIT(10)
56 #define GSWIP_MDIO_CTRL_PHYAD_MASK 0x1f
57 #define GSWIP_MDIO_CTRL_PHYAD_SHIFT 5
58 #define GSWIP_MDIO_CTRL_REGAD_MASK 0x1f
59 #define GSWIP_MDIO_READ 0x09
60 #define GSWIP_MDIO_WRITE 0x0A
61 #define GSWIP_MDIO_MDC_CFG0 0x0B
62 #define GSWIP_MDIO_MDC_CFG1 0x0C
63 #define GSWIP_MDIO_PHYp(p) (0x15 - (p))
64 #define GSWIP_MDIO_PHY_LINK_MASK 0x6000
65 #define GSWIP_MDIO_PHY_LINK_AUTO 0x0000
66 #define GSWIP_MDIO_PHY_LINK_DOWN 0x4000
67 #define GSWIP_MDIO_PHY_LINK_UP 0x2000
68 #define GSWIP_MDIO_PHY_SPEED_MASK 0x1800
69 #define GSWIP_MDIO_PHY_SPEED_AUTO 0x1800
70 #define GSWIP_MDIO_PHY_SPEED_M10 0x0000
71 #define GSWIP_MDIO_PHY_SPEED_M100 0x0800
72 #define GSWIP_MDIO_PHY_SPEED_G1 0x1000
73 #define GSWIP_MDIO_PHY_FDUP_MASK 0x0600
74 #define GSWIP_MDIO_PHY_FDUP_AUTO 0x0000
75 #define GSWIP_MDIO_PHY_FDUP_EN 0x0200
76 #define GSWIP_MDIO_PHY_FDUP_DIS 0x0600
77 #define GSWIP_MDIO_PHY_FCONTX_MASK 0x0180
78 #define GSWIP_MDIO_PHY_FCONTX_AUTO 0x0000
79 #define GSWIP_MDIO_PHY_FCONTX_EN 0x0100
80 #define GSWIP_MDIO_PHY_FCONTX_DIS 0x0180
81 #define GSWIP_MDIO_PHY_FCONRX_MASK 0x0060
82 #define GSWIP_MDIO_PHY_FCONRX_AUTO 0x0000
83 #define GSWIP_MDIO_PHY_FCONRX_EN 0x0020
84 #define GSWIP_MDIO_PHY_FCONRX_DIS 0x0060
85 #define GSWIP_MDIO_PHY_ADDR_MASK 0x001f
86 #define GSWIP_MDIO_PHY_MASK (GSWIP_MDIO_PHY_ADDR_MASK | \
87 GSWIP_MDIO_PHY_FCONRX_MASK | \
88 GSWIP_MDIO_PHY_FCONTX_MASK | \
89 GSWIP_MDIO_PHY_LINK_MASK | \
90 GSWIP_MDIO_PHY_SPEED_MASK | \
91 GSWIP_MDIO_PHY_FDUP_MASK)
93 /* GSWIP MII Registers */
94 #define GSWIP_MII_CFG0 0x00
95 #define GSWIP_MII_CFG1 0x02
96 #define GSWIP_MII_CFG5 0x04
97 #define GSWIP_MII_CFG_EN BIT(14)
98 #define GSWIP_MII_CFG_LDCLKDIS BIT(12)
99 #define GSWIP_MII_CFG_MODE_MIIP 0x0
100 #define GSWIP_MII_CFG_MODE_MIIM 0x1
101 #define GSWIP_MII_CFG_MODE_RMIIP 0x2
102 #define GSWIP_MII_CFG_MODE_RMIIM 0x3
103 #define GSWIP_MII_CFG_MODE_RGMII 0x4
104 #define GSWIP_MII_CFG_MODE_MASK 0xf
105 #define GSWIP_MII_CFG_RATE_M2P5 0x00
106 #define GSWIP_MII_CFG_RATE_M25 0x10
107 #define GSWIP_MII_CFG_RATE_M125 0x20
108 #define GSWIP_MII_CFG_RATE_M50 0x30
109 #define GSWIP_MII_CFG_RATE_AUTO 0x40
110 #define GSWIP_MII_CFG_RATE_MASK 0x70
111 #define GSWIP_MII_PCDU0 0x01
112 #define GSWIP_MII_PCDU1 0x03
113 #define GSWIP_MII_PCDU5 0x05
114 #define GSWIP_MII_PCDU_TXDLY_MASK GENMASK(2, 0)
115 #define GSWIP_MII_PCDU_RXDLY_MASK GENMASK(9, 7)
117 /* GSWIP Core Registers */
118 #define GSWIP_SWRES 0x000
119 #define GSWIP_SWRES_R1 BIT(1) /* GSWIP Software reset */
120 #define GSWIP_SWRES_R0 BIT(0) /* GSWIP Hardware reset */
121 #define GSWIP_VERSION 0x013
122 #define GSWIP_VERSION_REV_SHIFT 0
123 #define GSWIP_VERSION_REV_MASK GENMASK(7, 0)
124 #define GSWIP_VERSION_MOD_SHIFT 8
125 #define GSWIP_VERSION_MOD_MASK GENMASK(15, 8)
126 #define GSWIP_VERSION_2_0 0x100
127 #define GSWIP_VERSION_2_1 0x021
128 #define GSWIP_VERSION_2_2 0x122
129 #define GSWIP_VERSION_2_2_ETC 0x022
131 #define GSWIP_BM_RAM_VAL(x) (0x043 - (x))
132 #define GSWIP_BM_RAM_ADDR 0x044
133 #define GSWIP_BM_RAM_CTRL 0x045
134 #define GSWIP_BM_RAM_CTRL_BAS BIT(15)
135 #define GSWIP_BM_RAM_CTRL_OPMOD BIT(5)
136 #define GSWIP_BM_RAM_CTRL_ADDR_MASK GENMASK(4, 0)
137 #define GSWIP_BM_QUEUE_GCTRL 0x04A
138 #define GSWIP_BM_QUEUE_GCTRL_GL_MOD BIT(10)
139 /* buffer management Port Configuration Register */
140 #define GSWIP_BM_PCFGp(p) (0x080 + ((p) * 2))
141 #define GSWIP_BM_PCFG_CNTEN BIT(0) /* RMON Counter Enable */
142 #define GSWIP_BM_PCFG_IGCNT BIT(1) /* Ingres Special Tag RMON count */
143 /* buffer management Port Control Register */
144 #define GSWIP_BM_RMON_CTRLp(p) (0x81 + ((p) * 2))
145 #define GSWIP_BM_CTRL_RMON_RAM1_RES BIT(0) /* Software Reset for RMON RAM 1 */
146 #define GSWIP_BM_CTRL_RMON_RAM2_RES BIT(1) /* Software Reset for RMON RAM 2 */
149 #define GSWIP_PCE_TBL_KEY(x) (0x447 - (x))
150 #define GSWIP_PCE_TBL_MASK 0x448
151 #define GSWIP_PCE_TBL_VAL(x) (0x44D - (x))
152 #define GSWIP_PCE_TBL_ADDR 0x44E
153 #define GSWIP_PCE_TBL_CTRL 0x44F
154 #define GSWIP_PCE_TBL_CTRL_BAS BIT(15)
155 #define GSWIP_PCE_TBL_CTRL_TYPE BIT(13)
156 #define GSWIP_PCE_TBL_CTRL_VLD BIT(12)
157 #define GSWIP_PCE_TBL_CTRL_KEYFORM BIT(11)
158 #define GSWIP_PCE_TBL_CTRL_GMAP_MASK GENMASK(10, 7)
159 #define GSWIP_PCE_TBL_CTRL_OPMOD_MASK GENMASK(6, 5)
160 #define GSWIP_PCE_TBL_CTRL_OPMOD_ADRD 0x00
161 #define GSWIP_PCE_TBL_CTRL_OPMOD_ADWR 0x20
162 #define GSWIP_PCE_TBL_CTRL_OPMOD_KSRD 0x40
163 #define GSWIP_PCE_TBL_CTRL_OPMOD_KSWR 0x60
164 #define GSWIP_PCE_TBL_CTRL_ADDR_MASK GENMASK(4, 0)
165 #define GSWIP_PCE_PMAP1 0x453 /* Monitoring port map */
166 #define GSWIP_PCE_PMAP2 0x454 /* Default Multicast port map */
167 #define GSWIP_PCE_PMAP3 0x455 /* Default Unknown Unicast port map */
168 #define GSWIP_PCE_GCTRL_0 0x456
169 #define GSWIP_PCE_GCTRL_0_MTFL BIT(0) /* MAC Table Flushing */
170 #define GSWIP_PCE_GCTRL_0_MC_VALID BIT(3)
171 #define GSWIP_PCE_GCTRL_0_VLAN BIT(14) /* VLAN aware Switching */
172 #define GSWIP_PCE_GCTRL_1 0x457
173 #define GSWIP_PCE_GCTRL_1_MAC_GLOCK BIT(2) /* MAC Address table lock */
174 #define GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD BIT(3) /* Mac address table lock forwarding mode */
175 #define GSWIP_PCE_PCTRL_0p(p) (0x480 + ((p) * 0xA))
176 #define GSWIP_PCE_PCTRL_0_TVM BIT(5) /* Transparent VLAN mode */
177 #define GSWIP_PCE_PCTRL_0_VREP BIT(6) /* VLAN Replace Mode */
178 #define GSWIP_PCE_PCTRL_0_INGRESS BIT(11) /* Accept special tag in ingress */
179 #define GSWIP_PCE_PCTRL_0_PSTATE_LISTEN 0x0
180 #define GSWIP_PCE_PCTRL_0_PSTATE_RX 0x1
181 #define GSWIP_PCE_PCTRL_0_PSTATE_TX 0x2
182 #define GSWIP_PCE_PCTRL_0_PSTATE_LEARNING 0x3
183 #define GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING 0x7
184 #define GSWIP_PCE_PCTRL_0_PSTATE_MASK GENMASK(2, 0)
185 #define GSWIP_PCE_VCTRL(p) (0x485 + ((p) * 0xA))
186 #define GSWIP_PCE_VCTRL_UVR BIT(0) /* Unknown VLAN Rule */
187 #define GSWIP_PCE_VCTRL_VIMR BIT(3) /* VLAN Ingress Member violation rule */
188 #define GSWIP_PCE_VCTRL_VEMR BIT(4) /* VLAN Egress Member violation rule */
189 #define GSWIP_PCE_VCTRL_VSR BIT(5) /* VLAN Security */
190 #define GSWIP_PCE_VCTRL_VID0 BIT(6) /* Priority Tagged Rule */
191 #define GSWIP_PCE_DEFPVID(p) (0x486 + ((p) * 0xA))
193 #define GSWIP_MAC_FLEN 0x8C5
194 #define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC))
195 #define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */
197 /* Ethernet Switch Fetch DMA Port Control Register */
198 #define GSWIP_FDMA_PCTRLp(p) (0xA80 + ((p) * 0x6))
199 #define GSWIP_FDMA_PCTRL_EN BIT(0) /* FDMA Port Enable */
200 #define GSWIP_FDMA_PCTRL_STEN BIT(1) /* Special Tag Insertion Enable */
201 #define GSWIP_FDMA_PCTRL_VLANMOD_MASK GENMASK(4, 3) /* VLAN Modification Control */
202 #define GSWIP_FDMA_PCTRL_VLANMOD_SHIFT 3 /* VLAN Modification Control */
203 #define GSWIP_FDMA_PCTRL_VLANMOD_DIS (0x0 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
204 #define GSWIP_FDMA_PCTRL_VLANMOD_PRIO (0x1 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
205 #define GSWIP_FDMA_PCTRL_VLANMOD_ID (0x2 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
206 #define GSWIP_FDMA_PCTRL_VLANMOD_BOTH (0x3 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
208 /* Ethernet Switch Store DMA Port Control Register */
209 #define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6))
210 #define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */
211 #define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
212 #define GSWIP_SDMA_PCTRL_PAUFWD BIT(1) /* Pause Frame Forwarding */
214 #define GSWIP_TABLE_ACTIVE_VLAN 0x01
215 #define GSWIP_TABLE_VLAN_MAPPING 0x02
216 #define GSWIP_TABLE_MAC_BRIDGE 0x0b
217 #define GSWIP_TABLE_MAC_BRIDGE_STATIC 0x01 /* Static not, aging entry */
219 #define XRX200_GPHY_FW_ALIGN (16 * 1024)
221 struct gswip_hw_info
{
226 struct xway_gphy_match_data
{
227 char *fe_firmware_name
;
228 char *ge_firmware_name
;
231 struct gswip_gphy_fw
{
232 struct clk
*clk_gate
;
233 struct reset_control
*reset
;
239 struct net_device
*bridge
;
248 const struct gswip_hw_info
*hw_info
;
249 const struct xway_gphy_match_data
*gphy_fw_name_cfg
;
250 struct dsa_switch
*ds
;
252 struct regmap
*rcu_regmap
;
253 struct gswip_vlan vlans
[64];
255 struct gswip_gphy_fw
*gphy_fw
;
256 u32 port_vlan_filter
;
259 struct gswip_pce_table_entry
{
260 u16 index
; // PCE_TBL_ADDR.ADDR = pData->table_index
261 u16 table
; // PCE_TBL_CTRL.ADDR = pData->table
271 struct gswip_rmon_cnt_desc
{
277 #define MIB_DESC(_size, _offset, _name) {.size = _size, .offset = _offset, .name = _name}
279 static const struct gswip_rmon_cnt_desc gswip_rmon_cnt
[] = {
280 /** Receive Packet Count (only packets that are accepted and not discarded). */
281 MIB_DESC(1, 0x1F, "RxGoodPkts"),
282 MIB_DESC(1, 0x23, "RxUnicastPkts"),
283 MIB_DESC(1, 0x22, "RxMulticastPkts"),
284 MIB_DESC(1, 0x21, "RxFCSErrorPkts"),
285 MIB_DESC(1, 0x1D, "RxUnderSizeGoodPkts"),
286 MIB_DESC(1, 0x1E, "RxUnderSizeErrorPkts"),
287 MIB_DESC(1, 0x1B, "RxOversizeGoodPkts"),
288 MIB_DESC(1, 0x1C, "RxOversizeErrorPkts"),
289 MIB_DESC(1, 0x20, "RxGoodPausePkts"),
290 MIB_DESC(1, 0x1A, "RxAlignErrorPkts"),
291 MIB_DESC(1, 0x12, "Rx64BytePkts"),
292 MIB_DESC(1, 0x13, "Rx127BytePkts"),
293 MIB_DESC(1, 0x14, "Rx255BytePkts"),
294 MIB_DESC(1, 0x15, "Rx511BytePkts"),
295 MIB_DESC(1, 0x16, "Rx1023BytePkts"),
296 /** Receive Size 1024-1522 (or more, if configured) Packet Count. */
297 MIB_DESC(1, 0x17, "RxMaxBytePkts"),
298 MIB_DESC(1, 0x18, "RxDroppedPkts"),
299 MIB_DESC(1, 0x19, "RxFilteredPkts"),
300 MIB_DESC(2, 0x24, "RxGoodBytes"),
301 MIB_DESC(2, 0x26, "RxBadBytes"),
302 MIB_DESC(1, 0x11, "TxAcmDroppedPkts"),
303 MIB_DESC(1, 0x0C, "TxGoodPkts"),
304 MIB_DESC(1, 0x06, "TxUnicastPkts"),
305 MIB_DESC(1, 0x07, "TxMulticastPkts"),
306 MIB_DESC(1, 0x00, "Tx64BytePkts"),
307 MIB_DESC(1, 0x01, "Tx127BytePkts"),
308 MIB_DESC(1, 0x02, "Tx255BytePkts"),
309 MIB_DESC(1, 0x03, "Tx511BytePkts"),
310 MIB_DESC(1, 0x04, "Tx1023BytePkts"),
311 /** Transmit Size 1024-1522 (or more, if configured) Packet Count. */
312 MIB_DESC(1, 0x05, "TxMaxBytePkts"),
313 MIB_DESC(1, 0x08, "TxSingleCollCount"),
314 MIB_DESC(1, 0x09, "TxMultCollCount"),
315 MIB_DESC(1, 0x0A, "TxLateCollCount"),
316 MIB_DESC(1, 0x0B, "TxExcessCollCount"),
317 MIB_DESC(1, 0x0D, "TxPauseCount"),
318 MIB_DESC(1, 0x10, "TxDroppedPkts"),
319 MIB_DESC(2, 0x0E, "TxGoodBytes"),
322 static u32
gswip_switch_r(struct gswip_priv
*priv
, u32 offset
)
324 return __raw_readl(priv
->gswip
+ (offset
* 4));
327 static void gswip_switch_w(struct gswip_priv
*priv
, u32 val
, u32 offset
)
329 __raw_writel(val
, priv
->gswip
+ (offset
* 4));
332 static void gswip_switch_mask(struct gswip_priv
*priv
, u32 clear
, u32 set
,
335 u32 val
= gswip_switch_r(priv
, offset
);
339 gswip_switch_w(priv
, val
, offset
);
342 static u32
gswip_switch_r_timeout(struct gswip_priv
*priv
, u32 offset
,
347 return readx_poll_timeout(__raw_readl
, priv
->gswip
+ (offset
* 4), val
,
348 (val
& cleared
) == 0, 20, 50000);
351 static u32
gswip_mdio_r(struct gswip_priv
*priv
, u32 offset
)
353 return __raw_readl(priv
->mdio
+ (offset
* 4));
356 static void gswip_mdio_w(struct gswip_priv
*priv
, u32 val
, u32 offset
)
358 __raw_writel(val
, priv
->mdio
+ (offset
* 4));
361 static void gswip_mdio_mask(struct gswip_priv
*priv
, u32 clear
, u32 set
,
364 u32 val
= gswip_mdio_r(priv
, offset
);
368 gswip_mdio_w(priv
, val
, offset
);
371 static u32
gswip_mii_r(struct gswip_priv
*priv
, u32 offset
)
373 return __raw_readl(priv
->mii
+ (offset
* 4));
376 static void gswip_mii_w(struct gswip_priv
*priv
, u32 val
, u32 offset
)
378 __raw_writel(val
, priv
->mii
+ (offset
* 4));
381 static void gswip_mii_mask(struct gswip_priv
*priv
, u32 clear
, u32 set
,
384 u32 val
= gswip_mii_r(priv
, offset
);
388 gswip_mii_w(priv
, val
, offset
);
391 static void gswip_mii_mask_cfg(struct gswip_priv
*priv
, u32 clear
, u32 set
,
396 gswip_mii_mask(priv
, clear
, set
, GSWIP_MII_CFG0
);
399 gswip_mii_mask(priv
, clear
, set
, GSWIP_MII_CFG1
);
402 gswip_mii_mask(priv
, clear
, set
, GSWIP_MII_CFG5
);
407 static void gswip_mii_mask_pcdu(struct gswip_priv
*priv
, u32 clear
, u32 set
,
412 gswip_mii_mask(priv
, clear
, set
, GSWIP_MII_PCDU0
);
415 gswip_mii_mask(priv
, clear
, set
, GSWIP_MII_PCDU1
);
418 gswip_mii_mask(priv
, clear
, set
, GSWIP_MII_PCDU5
);
423 static int gswip_mdio_poll(struct gswip_priv
*priv
)
427 while (likely(cnt
--)) {
428 u32 ctrl
= gswip_mdio_r(priv
, GSWIP_MDIO_CTRL
);
430 if ((ctrl
& GSWIP_MDIO_CTRL_BUSY
) == 0)
432 usleep_range(20, 40);
438 static int gswip_mdio_wr(struct mii_bus
*bus
, int addr
, int reg
, u16 val
)
440 struct gswip_priv
*priv
= bus
->priv
;
443 err
= gswip_mdio_poll(priv
);
445 dev_err(&bus
->dev
, "waiting for MDIO bus busy timed out\n");
449 gswip_mdio_w(priv
, val
, GSWIP_MDIO_WRITE
);
450 gswip_mdio_w(priv
, GSWIP_MDIO_CTRL_BUSY
| GSWIP_MDIO_CTRL_WR
|
451 ((addr
& GSWIP_MDIO_CTRL_PHYAD_MASK
) << GSWIP_MDIO_CTRL_PHYAD_SHIFT
) |
452 (reg
& GSWIP_MDIO_CTRL_REGAD_MASK
),
458 static int gswip_mdio_rd(struct mii_bus
*bus
, int addr
, int reg
)
460 struct gswip_priv
*priv
= bus
->priv
;
463 err
= gswip_mdio_poll(priv
);
465 dev_err(&bus
->dev
, "waiting for MDIO bus busy timed out\n");
469 gswip_mdio_w(priv
, GSWIP_MDIO_CTRL_BUSY
| GSWIP_MDIO_CTRL_RD
|
470 ((addr
& GSWIP_MDIO_CTRL_PHYAD_MASK
) << GSWIP_MDIO_CTRL_PHYAD_SHIFT
) |
471 (reg
& GSWIP_MDIO_CTRL_REGAD_MASK
),
474 err
= gswip_mdio_poll(priv
);
476 dev_err(&bus
->dev
, "waiting for MDIO bus busy timed out\n");
480 return gswip_mdio_r(priv
, GSWIP_MDIO_READ
);
483 static int gswip_mdio(struct gswip_priv
*priv
, struct device_node
*mdio_np
)
485 struct dsa_switch
*ds
= priv
->ds
;
487 ds
->slave_mii_bus
= devm_mdiobus_alloc(priv
->dev
);
488 if (!ds
->slave_mii_bus
)
491 ds
->slave_mii_bus
->priv
= priv
;
492 ds
->slave_mii_bus
->read
= gswip_mdio_rd
;
493 ds
->slave_mii_bus
->write
= gswip_mdio_wr
;
494 ds
->slave_mii_bus
->name
= "lantiq,xrx200-mdio";
495 snprintf(ds
->slave_mii_bus
->id
, MII_BUS_ID_SIZE
, "%s-mii",
496 dev_name(priv
->dev
));
497 ds
->slave_mii_bus
->parent
= priv
->dev
;
498 ds
->slave_mii_bus
->phy_mask
= ~ds
->phys_mii_mask
;
500 return of_mdiobus_register(ds
->slave_mii_bus
, mdio_np
);
503 static int gswip_pce_table_entry_read(struct gswip_priv
*priv
,
504 struct gswip_pce_table_entry
*tbl
)
509 u16 addr_mode
= tbl
->key_mode
? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD
:
510 GSWIP_PCE_TBL_CTRL_OPMOD_ADRD
;
512 err
= gswip_switch_r_timeout(priv
, GSWIP_PCE_TBL_CTRL
,
513 GSWIP_PCE_TBL_CTRL_BAS
);
517 gswip_switch_w(priv
, tbl
->index
, GSWIP_PCE_TBL_ADDR
);
518 gswip_switch_mask(priv
, GSWIP_PCE_TBL_CTRL_ADDR_MASK
|
519 GSWIP_PCE_TBL_CTRL_OPMOD_MASK
,
520 tbl
->table
| addr_mode
| GSWIP_PCE_TBL_CTRL_BAS
,
523 err
= gswip_switch_r_timeout(priv
, GSWIP_PCE_TBL_CTRL
,
524 GSWIP_PCE_TBL_CTRL_BAS
);
528 for (i
= 0; i
< ARRAY_SIZE(tbl
->key
); i
++)
529 tbl
->key
[i
] = gswip_switch_r(priv
, GSWIP_PCE_TBL_KEY(i
));
531 for (i
= 0; i
< ARRAY_SIZE(tbl
->val
); i
++)
532 tbl
->val
[i
] = gswip_switch_r(priv
, GSWIP_PCE_TBL_VAL(i
));
534 tbl
->mask
= gswip_switch_r(priv
, GSWIP_PCE_TBL_MASK
);
536 crtl
= gswip_switch_r(priv
, GSWIP_PCE_TBL_CTRL
);
538 tbl
->type
= !!(crtl
& GSWIP_PCE_TBL_CTRL_TYPE
);
539 tbl
->valid
= !!(crtl
& GSWIP_PCE_TBL_CTRL_VLD
);
540 tbl
->gmap
= (crtl
& GSWIP_PCE_TBL_CTRL_GMAP_MASK
) >> 7;
545 static int gswip_pce_table_entry_write(struct gswip_priv
*priv
,
546 struct gswip_pce_table_entry
*tbl
)
551 u16 addr_mode
= tbl
->key_mode
? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR
:
552 GSWIP_PCE_TBL_CTRL_OPMOD_ADWR
;
554 err
= gswip_switch_r_timeout(priv
, GSWIP_PCE_TBL_CTRL
,
555 GSWIP_PCE_TBL_CTRL_BAS
);
559 gswip_switch_w(priv
, tbl
->index
, GSWIP_PCE_TBL_ADDR
);
560 gswip_switch_mask(priv
, GSWIP_PCE_TBL_CTRL_ADDR_MASK
|
561 GSWIP_PCE_TBL_CTRL_OPMOD_MASK
,
562 tbl
->table
| addr_mode
,
565 for (i
= 0; i
< ARRAY_SIZE(tbl
->key
); i
++)
566 gswip_switch_w(priv
, tbl
->key
[i
], GSWIP_PCE_TBL_KEY(i
));
568 for (i
= 0; i
< ARRAY_SIZE(tbl
->val
); i
++)
569 gswip_switch_w(priv
, tbl
->val
[i
], GSWIP_PCE_TBL_VAL(i
));
571 gswip_switch_mask(priv
, GSWIP_PCE_TBL_CTRL_ADDR_MASK
|
572 GSWIP_PCE_TBL_CTRL_OPMOD_MASK
,
573 tbl
->table
| addr_mode
,
576 gswip_switch_w(priv
, tbl
->mask
, GSWIP_PCE_TBL_MASK
);
578 crtl
= gswip_switch_r(priv
, GSWIP_PCE_TBL_CTRL
);
579 crtl
&= ~(GSWIP_PCE_TBL_CTRL_TYPE
| GSWIP_PCE_TBL_CTRL_VLD
|
580 GSWIP_PCE_TBL_CTRL_GMAP_MASK
);
582 crtl
|= GSWIP_PCE_TBL_CTRL_TYPE
;
584 crtl
|= GSWIP_PCE_TBL_CTRL_VLD
;
585 crtl
|= (tbl
->gmap
<< 7) & GSWIP_PCE_TBL_CTRL_GMAP_MASK
;
586 crtl
|= GSWIP_PCE_TBL_CTRL_BAS
;
587 gswip_switch_w(priv
, crtl
, GSWIP_PCE_TBL_CTRL
);
589 return gswip_switch_r_timeout(priv
, GSWIP_PCE_TBL_CTRL
,
590 GSWIP_PCE_TBL_CTRL_BAS
);
593 /* Add the LAN port into a bridge with the CPU port by
594 * default. This prevents automatic forwarding of
595 * packages between the LAN ports when no explicit
596 * bridge is configured.
598 static int gswip_add_single_port_br(struct gswip_priv
*priv
, int port
, bool add
)
600 struct gswip_pce_table_entry vlan_active
= {0,};
601 struct gswip_pce_table_entry vlan_mapping
= {0,};
602 unsigned int cpu_port
= priv
->hw_info
->cpu_port
;
603 unsigned int max_ports
= priv
->hw_info
->max_ports
;
606 if (port
>= max_ports
) {
607 dev_err(priv
->dev
, "single port for %i supported\n", port
);
611 vlan_active
.index
= port
+ 1;
612 vlan_active
.table
= GSWIP_TABLE_ACTIVE_VLAN
;
613 vlan_active
.key
[0] = 0; /* vid */
614 vlan_active
.val
[0] = port
+ 1 /* fid */;
615 vlan_active
.valid
= add
;
616 err
= gswip_pce_table_entry_write(priv
, &vlan_active
);
618 dev_err(priv
->dev
, "failed to write active VLAN: %d\n", err
);
625 vlan_mapping
.index
= port
+ 1;
626 vlan_mapping
.table
= GSWIP_TABLE_VLAN_MAPPING
;
627 vlan_mapping
.val
[0] = 0 /* vid */;
628 vlan_mapping
.val
[1] = BIT(port
) | BIT(cpu_port
);
629 vlan_mapping
.val
[2] = 0;
630 err
= gswip_pce_table_entry_write(priv
, &vlan_mapping
);
632 dev_err(priv
->dev
, "failed to write VLAN mapping: %d\n", err
);
639 static int gswip_port_enable(struct dsa_switch
*ds
, int port
,
640 struct phy_device
*phydev
)
642 struct gswip_priv
*priv
= ds
->priv
;
645 if (!dsa_is_cpu_port(ds
, port
)) {
646 err
= gswip_add_single_port_br(priv
, port
, true);
651 /* RMON Counter Enable for port */
652 gswip_switch_w(priv
, GSWIP_BM_PCFG_CNTEN
, GSWIP_BM_PCFGp(port
));
654 /* enable port fetch/store dma & VLAN Modification */
655 gswip_switch_mask(priv
, 0, GSWIP_FDMA_PCTRL_EN
|
656 GSWIP_FDMA_PCTRL_VLANMOD_BOTH
,
657 GSWIP_FDMA_PCTRLp(port
));
658 gswip_switch_mask(priv
, 0, GSWIP_SDMA_PCTRL_EN
,
659 GSWIP_SDMA_PCTRLp(port
));
661 if (!dsa_is_cpu_port(ds
, port
)) {
662 u32 macconf
= GSWIP_MDIO_PHY_LINK_AUTO
|
663 GSWIP_MDIO_PHY_SPEED_AUTO
|
664 GSWIP_MDIO_PHY_FDUP_AUTO
|
665 GSWIP_MDIO_PHY_FCONTX_AUTO
|
666 GSWIP_MDIO_PHY_FCONRX_AUTO
|
667 (phydev
->mdio
.addr
& GSWIP_MDIO_PHY_ADDR_MASK
);
669 gswip_mdio_w(priv
, macconf
, GSWIP_MDIO_PHYp(port
));
670 /* Activate MDIO auto polling */
671 gswip_mdio_mask(priv
, 0, BIT(port
), GSWIP_MDIO_MDC_CFG0
);
677 static void gswip_port_disable(struct dsa_switch
*ds
, int port
)
679 struct gswip_priv
*priv
= ds
->priv
;
681 if (!dsa_is_cpu_port(ds
, port
)) {
682 gswip_mdio_mask(priv
, GSWIP_MDIO_PHY_LINK_DOWN
,
683 GSWIP_MDIO_PHY_LINK_MASK
,
684 GSWIP_MDIO_PHYp(port
));
685 /* Deactivate MDIO auto polling */
686 gswip_mdio_mask(priv
, BIT(port
), 0, GSWIP_MDIO_MDC_CFG0
);
689 gswip_switch_mask(priv
, GSWIP_FDMA_PCTRL_EN
, 0,
690 GSWIP_FDMA_PCTRLp(port
));
691 gswip_switch_mask(priv
, GSWIP_SDMA_PCTRL_EN
, 0,
692 GSWIP_SDMA_PCTRLp(port
));
695 static int gswip_pce_load_microcode(struct gswip_priv
*priv
)
700 gswip_switch_mask(priv
, GSWIP_PCE_TBL_CTRL_ADDR_MASK
|
701 GSWIP_PCE_TBL_CTRL_OPMOD_MASK
,
702 GSWIP_PCE_TBL_CTRL_OPMOD_ADWR
, GSWIP_PCE_TBL_CTRL
);
703 gswip_switch_w(priv
, 0, GSWIP_PCE_TBL_MASK
);
705 for (i
= 0; i
< ARRAY_SIZE(gswip_pce_microcode
); i
++) {
706 gswip_switch_w(priv
, i
, GSWIP_PCE_TBL_ADDR
);
707 gswip_switch_w(priv
, gswip_pce_microcode
[i
].val_0
,
708 GSWIP_PCE_TBL_VAL(0));
709 gswip_switch_w(priv
, gswip_pce_microcode
[i
].val_1
,
710 GSWIP_PCE_TBL_VAL(1));
711 gswip_switch_w(priv
, gswip_pce_microcode
[i
].val_2
,
712 GSWIP_PCE_TBL_VAL(2));
713 gswip_switch_w(priv
, gswip_pce_microcode
[i
].val_3
,
714 GSWIP_PCE_TBL_VAL(3));
716 /* start the table access: */
717 gswip_switch_mask(priv
, 0, GSWIP_PCE_TBL_CTRL_BAS
,
719 err
= gswip_switch_r_timeout(priv
, GSWIP_PCE_TBL_CTRL
,
720 GSWIP_PCE_TBL_CTRL_BAS
);
725 /* tell the switch that the microcode is loaded */
726 gswip_switch_mask(priv
, 0, GSWIP_PCE_GCTRL_0_MC_VALID
,
732 static int gswip_port_vlan_filtering(struct dsa_switch
*ds
, int port
,
735 struct gswip_priv
*priv
= ds
->priv
;
736 struct net_device
*bridge
= dsa_to_port(ds
, port
)->bridge_dev
;
738 /* Do not allow changing the VLAN filtering options while in bridge */
739 if (!!(priv
->port_vlan_filter
& BIT(port
)) != vlan_filtering
&& bridge
)
742 if (vlan_filtering
) {
743 /* Use port based VLAN tag */
744 gswip_switch_mask(priv
,
746 GSWIP_PCE_VCTRL_UVR
| GSWIP_PCE_VCTRL_VIMR
|
747 GSWIP_PCE_VCTRL_VEMR
,
748 GSWIP_PCE_VCTRL(port
));
749 gswip_switch_mask(priv
, GSWIP_PCE_PCTRL_0_TVM
, 0,
750 GSWIP_PCE_PCTRL_0p(port
));
752 /* Use port based VLAN tag */
753 gswip_switch_mask(priv
,
754 GSWIP_PCE_VCTRL_UVR
| GSWIP_PCE_VCTRL_VIMR
|
755 GSWIP_PCE_VCTRL_VEMR
,
757 GSWIP_PCE_VCTRL(port
));
758 gswip_switch_mask(priv
, 0, GSWIP_PCE_PCTRL_0_TVM
,
759 GSWIP_PCE_PCTRL_0p(port
));
765 static int gswip_setup(struct dsa_switch
*ds
)
767 struct gswip_priv
*priv
= ds
->priv
;
768 unsigned int cpu_port
= priv
->hw_info
->cpu_port
;
772 gswip_switch_w(priv
, GSWIP_SWRES_R0
, GSWIP_SWRES
);
773 usleep_range(5000, 10000);
774 gswip_switch_w(priv
, 0, GSWIP_SWRES
);
776 /* disable port fetch/store dma on all ports */
777 for (i
= 0; i
< priv
->hw_info
->max_ports
; i
++) {
778 gswip_port_disable(ds
, i
);
779 gswip_port_vlan_filtering(ds
, i
, false);
783 gswip_mdio_mask(priv
, 0, GSWIP_MDIO_GLOB_ENABLE
, GSWIP_MDIO_GLOB
);
785 err
= gswip_pce_load_microcode(priv
);
787 dev_err(priv
->dev
, "writing PCE microcode failed, %i", err
);
791 /* Default unknown Broadcast/Multicast/Unicast port maps */
792 gswip_switch_w(priv
, BIT(cpu_port
), GSWIP_PCE_PMAP1
);
793 gswip_switch_w(priv
, BIT(cpu_port
), GSWIP_PCE_PMAP2
);
794 gswip_switch_w(priv
, BIT(cpu_port
), GSWIP_PCE_PMAP3
);
796 /* disable PHY auto polling */
797 gswip_mdio_w(priv
, 0x0, GSWIP_MDIO_MDC_CFG0
);
798 /* Configure the MDIO Clock 2.5 MHz */
799 gswip_mdio_mask(priv
, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1
);
801 /* Disable the xMII link */
802 gswip_mii_mask_cfg(priv
, GSWIP_MII_CFG_EN
, 0, 0);
803 gswip_mii_mask_cfg(priv
, GSWIP_MII_CFG_EN
, 0, 1);
804 gswip_mii_mask_cfg(priv
, GSWIP_MII_CFG_EN
, 0, 5);
806 /* enable special tag insertion on cpu port */
807 gswip_switch_mask(priv
, 0, GSWIP_FDMA_PCTRL_STEN
,
808 GSWIP_FDMA_PCTRLp(cpu_port
));
810 /* accept special tag in ingress direction */
811 gswip_switch_mask(priv
, 0, GSWIP_PCE_PCTRL_0_INGRESS
,
812 GSWIP_PCE_PCTRL_0p(cpu_port
));
814 gswip_switch_mask(priv
, 0, GSWIP_MAC_CTRL_2_MLEN
,
815 GSWIP_MAC_CTRL_2p(cpu_port
));
816 gswip_switch_w(priv
, VLAN_ETH_FRAME_LEN
+ 8, GSWIP_MAC_FLEN
);
817 gswip_switch_mask(priv
, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD
,
818 GSWIP_BM_QUEUE_GCTRL
);
820 /* VLAN aware Switching */
821 gswip_switch_mask(priv
, 0, GSWIP_PCE_GCTRL_0_VLAN
, GSWIP_PCE_GCTRL_0
);
823 /* Flush MAC Table */
824 gswip_switch_mask(priv
, 0, GSWIP_PCE_GCTRL_0_MTFL
, GSWIP_PCE_GCTRL_0
);
826 err
= gswip_switch_r_timeout(priv
, GSWIP_PCE_GCTRL_0
,
827 GSWIP_PCE_GCTRL_0_MTFL
);
829 dev_err(priv
->dev
, "MAC flushing didn't finish\n");
833 gswip_port_enable(ds
, cpu_port
, NULL
);
837 static enum dsa_tag_protocol
gswip_get_tag_protocol(struct dsa_switch
*ds
,
840 return DSA_TAG_PROTO_GSWIP
;
843 static int gswip_vlan_active_create(struct gswip_priv
*priv
,
844 struct net_device
*bridge
,
847 struct gswip_pce_table_entry vlan_active
= {0,};
848 unsigned int max_ports
= priv
->hw_info
->max_ports
;
853 /* Look for a free slot */
854 for (i
= max_ports
; i
< ARRAY_SIZE(priv
->vlans
); i
++) {
855 if (!priv
->vlans
[i
].bridge
) {
867 vlan_active
.index
= idx
;
868 vlan_active
.table
= GSWIP_TABLE_ACTIVE_VLAN
;
869 vlan_active
.key
[0] = vid
;
870 vlan_active
.val
[0] = fid
;
871 vlan_active
.valid
= true;
873 err
= gswip_pce_table_entry_write(priv
, &vlan_active
);
875 dev_err(priv
->dev
, "failed to write active VLAN: %d\n", err
);
879 priv
->vlans
[idx
].bridge
= bridge
;
880 priv
->vlans
[idx
].vid
= vid
;
881 priv
->vlans
[idx
].fid
= fid
;
886 static int gswip_vlan_active_remove(struct gswip_priv
*priv
, int idx
)
888 struct gswip_pce_table_entry vlan_active
= {0,};
891 vlan_active
.index
= idx
;
892 vlan_active
.table
= GSWIP_TABLE_ACTIVE_VLAN
;
893 vlan_active
.valid
= false;
894 err
= gswip_pce_table_entry_write(priv
, &vlan_active
);
896 dev_err(priv
->dev
, "failed to delete active VLAN: %d\n", err
);
897 priv
->vlans
[idx
].bridge
= NULL
;
902 static int gswip_vlan_add_unaware(struct gswip_priv
*priv
,
903 struct net_device
*bridge
, int port
)
905 struct gswip_pce_table_entry vlan_mapping
= {0,};
906 unsigned int max_ports
= priv
->hw_info
->max_ports
;
907 unsigned int cpu_port
= priv
->hw_info
->cpu_port
;
908 bool active_vlan_created
= false;
913 /* Check if there is already a page for this bridge */
914 for (i
= max_ports
; i
< ARRAY_SIZE(priv
->vlans
); i
++) {
915 if (priv
->vlans
[i
].bridge
== bridge
) {
921 /* If this bridge is not programmed yet, add a Active VLAN table
922 * entry in a free slot and prepare the VLAN mapping table entry.
925 idx
= gswip_vlan_active_create(priv
, bridge
, -1, 0);
928 active_vlan_created
= true;
930 vlan_mapping
.index
= idx
;
931 vlan_mapping
.table
= GSWIP_TABLE_VLAN_MAPPING
;
932 /* VLAN ID byte, maps to the VLAN ID of vlan active table */
933 vlan_mapping
.val
[0] = 0;
935 /* Read the existing VLAN mapping entry from the switch */
936 vlan_mapping
.index
= idx
;
937 vlan_mapping
.table
= GSWIP_TABLE_VLAN_MAPPING
;
938 err
= gswip_pce_table_entry_read(priv
, &vlan_mapping
);
940 dev_err(priv
->dev
, "failed to read VLAN mapping: %d\n",
946 /* Update the VLAN mapping entry and write it to the switch */
947 vlan_mapping
.val
[1] |= BIT(cpu_port
);
948 vlan_mapping
.val
[1] |= BIT(port
);
949 err
= gswip_pce_table_entry_write(priv
, &vlan_mapping
);
951 dev_err(priv
->dev
, "failed to write VLAN mapping: %d\n", err
);
952 /* In case an Active VLAN was creaetd delete it again */
953 if (active_vlan_created
)
954 gswip_vlan_active_remove(priv
, idx
);
958 gswip_switch_w(priv
, 0, GSWIP_PCE_DEFPVID(port
));
962 static int gswip_vlan_add_aware(struct gswip_priv
*priv
,
963 struct net_device
*bridge
, int port
,
964 u16 vid
, bool untagged
,
967 struct gswip_pce_table_entry vlan_mapping
= {0,};
968 unsigned int max_ports
= priv
->hw_info
->max_ports
;
969 unsigned int cpu_port
= priv
->hw_info
->cpu_port
;
970 bool active_vlan_created
= false;
976 /* Check if there is already a page for this bridge */
977 for (i
= max_ports
; i
< ARRAY_SIZE(priv
->vlans
); i
++) {
978 if (priv
->vlans
[i
].bridge
== bridge
) {
979 if (fid
!= -1 && fid
!= priv
->vlans
[i
].fid
)
980 dev_err(priv
->dev
, "one bridge with multiple flow ids\n");
981 fid
= priv
->vlans
[i
].fid
;
982 if (priv
->vlans
[i
].vid
== vid
) {
989 /* If this bridge is not programmed yet, add a Active VLAN table
990 * entry in a free slot and prepare the VLAN mapping table entry.
993 idx
= gswip_vlan_active_create(priv
, bridge
, fid
, vid
);
996 active_vlan_created
= true;
998 vlan_mapping
.index
= idx
;
999 vlan_mapping
.table
= GSWIP_TABLE_VLAN_MAPPING
;
1000 /* VLAN ID byte, maps to the VLAN ID of vlan active table */
1001 vlan_mapping
.val
[0] = vid
;
1003 /* Read the existing VLAN mapping entry from the switch */
1004 vlan_mapping
.index
= idx
;
1005 vlan_mapping
.table
= GSWIP_TABLE_VLAN_MAPPING
;
1006 err
= gswip_pce_table_entry_read(priv
, &vlan_mapping
);
1008 dev_err(priv
->dev
, "failed to read VLAN mapping: %d\n",
1014 vlan_mapping
.val
[0] = vid
;
1015 /* Update the VLAN mapping entry and write it to the switch */
1016 vlan_mapping
.val
[1] |= BIT(cpu_port
);
1017 vlan_mapping
.val
[2] |= BIT(cpu_port
);
1018 vlan_mapping
.val
[1] |= BIT(port
);
1020 vlan_mapping
.val
[2] &= ~BIT(port
);
1022 vlan_mapping
.val
[2] |= BIT(port
);
1023 err
= gswip_pce_table_entry_write(priv
, &vlan_mapping
);
1025 dev_err(priv
->dev
, "failed to write VLAN mapping: %d\n", err
);
1026 /* In case an Active VLAN was creaetd delete it again */
1027 if (active_vlan_created
)
1028 gswip_vlan_active_remove(priv
, idx
);
1033 gswip_switch_w(priv
, idx
, GSWIP_PCE_DEFPVID(port
));
1038 static int gswip_vlan_remove(struct gswip_priv
*priv
,
1039 struct net_device
*bridge
, int port
,
1040 u16 vid
, bool pvid
, bool vlan_aware
)
1042 struct gswip_pce_table_entry vlan_mapping
= {0,};
1043 unsigned int max_ports
= priv
->hw_info
->max_ports
;
1044 unsigned int cpu_port
= priv
->hw_info
->cpu_port
;
1049 /* Check if there is already a page for this bridge */
1050 for (i
= max_ports
; i
< ARRAY_SIZE(priv
->vlans
); i
++) {
1051 if (priv
->vlans
[i
].bridge
== bridge
&&
1052 (!vlan_aware
|| priv
->vlans
[i
].vid
== vid
)) {
1059 dev_err(priv
->dev
, "bridge to leave does not exists\n");
1063 vlan_mapping
.index
= idx
;
1064 vlan_mapping
.table
= GSWIP_TABLE_VLAN_MAPPING
;
1065 err
= gswip_pce_table_entry_read(priv
, &vlan_mapping
);
1067 dev_err(priv
->dev
, "failed to read VLAN mapping: %d\n", err
);
1071 vlan_mapping
.val
[1] &= ~BIT(port
);
1072 vlan_mapping
.val
[2] &= ~BIT(port
);
1073 err
= gswip_pce_table_entry_write(priv
, &vlan_mapping
);
1075 dev_err(priv
->dev
, "failed to write VLAN mapping: %d\n", err
);
1079 /* In case all ports are removed from the bridge, remove the VLAN */
1080 if ((vlan_mapping
.val
[1] & ~BIT(cpu_port
)) == 0) {
1081 err
= gswip_vlan_active_remove(priv
, idx
);
1083 dev_err(priv
->dev
, "failed to write active VLAN: %d\n",
1089 /* GSWIP 2.2 (GRX300) and later program here the VID directly. */
1091 gswip_switch_w(priv
, 0, GSWIP_PCE_DEFPVID(port
));
1096 static int gswip_port_bridge_join(struct dsa_switch
*ds
, int port
,
1097 struct net_device
*bridge
)
1099 struct gswip_priv
*priv
= ds
->priv
;
1102 /* When the bridge uses VLAN filtering we have to configure VLAN
1103 * specific bridges. No bridge is configured here.
1105 if (!br_vlan_enabled(bridge
)) {
1106 err
= gswip_vlan_add_unaware(priv
, bridge
, port
);
1109 priv
->port_vlan_filter
&= ~BIT(port
);
1111 priv
->port_vlan_filter
|= BIT(port
);
1113 return gswip_add_single_port_br(priv
, port
, false);
1116 static void gswip_port_bridge_leave(struct dsa_switch
*ds
, int port
,
1117 struct net_device
*bridge
)
1119 struct gswip_priv
*priv
= ds
->priv
;
1121 gswip_add_single_port_br(priv
, port
, true);
1123 /* When the bridge uses VLAN filtering we have to configure VLAN
1124 * specific bridges. No bridge is configured here.
1126 if (!br_vlan_enabled(bridge
))
1127 gswip_vlan_remove(priv
, bridge
, port
, 0, true, false);
1130 static int gswip_port_vlan_prepare(struct dsa_switch
*ds
, int port
,
1131 const struct switchdev_obj_port_vlan
*vlan
)
1133 struct gswip_priv
*priv
= ds
->priv
;
1134 struct net_device
*bridge
= dsa_to_port(ds
, port
)->bridge_dev
;
1135 unsigned int max_ports
= priv
->hw_info
->max_ports
;
1138 int pos
= max_ports
;
1140 /* We only support VLAN filtering on bridges */
1141 if (!dsa_is_cpu_port(ds
, port
) && !bridge
)
1144 for (vid
= vlan
->vid_begin
; vid
<= vlan
->vid_end
; ++vid
) {
1147 /* Check if there is already a page for this VLAN */
1148 for (i
= max_ports
; i
< ARRAY_SIZE(priv
->vlans
); i
++) {
1149 if (priv
->vlans
[i
].bridge
== bridge
&&
1150 priv
->vlans
[i
].vid
== vid
) {
1156 /* If this VLAN is not programmed yet, we have to reserve
1157 * one entry in the VLAN table. Make sure we start at the
1158 * next position round.
1161 /* Look for a free slot */
1162 for (; pos
< ARRAY_SIZE(priv
->vlans
); pos
++) {
1163 if (!priv
->vlans
[pos
].bridge
) {
1178 static void gswip_port_vlan_add(struct dsa_switch
*ds
, int port
,
1179 const struct switchdev_obj_port_vlan
*vlan
)
1181 struct gswip_priv
*priv
= ds
->priv
;
1182 struct net_device
*bridge
= dsa_to_port(ds
, port
)->bridge_dev
;
1183 bool untagged
= vlan
->flags
& BRIDGE_VLAN_INFO_UNTAGGED
;
1184 bool pvid
= vlan
->flags
& BRIDGE_VLAN_INFO_PVID
;
1187 /* We have to receive all packets on the CPU port and should not
1188 * do any VLAN filtering here. This is also called with bridge
1189 * NULL and then we do not know for which bridge to configure
1192 if (dsa_is_cpu_port(ds
, port
))
1195 for (vid
= vlan
->vid_begin
; vid
<= vlan
->vid_end
; ++vid
)
1196 gswip_vlan_add_aware(priv
, bridge
, port
, vid
, untagged
, pvid
);
1199 static int gswip_port_vlan_del(struct dsa_switch
*ds
, int port
,
1200 const struct switchdev_obj_port_vlan
*vlan
)
1202 struct gswip_priv
*priv
= ds
->priv
;
1203 struct net_device
*bridge
= dsa_to_port(ds
, port
)->bridge_dev
;
1204 bool pvid
= vlan
->flags
& BRIDGE_VLAN_INFO_PVID
;
1208 /* We have to receive all packets on the CPU port and should not
1209 * do any VLAN filtering here. This is also called with bridge
1210 * NULL and then we do not know for which bridge to configure
1213 if (dsa_is_cpu_port(ds
, port
))
1216 for (vid
= vlan
->vid_begin
; vid
<= vlan
->vid_end
; ++vid
) {
1217 err
= gswip_vlan_remove(priv
, bridge
, port
, vid
, pvid
, true);
1225 static void gswip_port_fast_age(struct dsa_switch
*ds
, int port
)
1227 struct gswip_priv
*priv
= ds
->priv
;
1228 struct gswip_pce_table_entry mac_bridge
= {0,};
1232 for (i
= 0; i
< 2048; i
++) {
1233 mac_bridge
.table
= GSWIP_TABLE_MAC_BRIDGE
;
1234 mac_bridge
.index
= i
;
1236 err
= gswip_pce_table_entry_read(priv
, &mac_bridge
);
1238 dev_err(priv
->dev
, "failed to read mac bridge: %d\n",
1243 if (!mac_bridge
.valid
)
1246 if (mac_bridge
.val
[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC
)
1249 if (((mac_bridge
.val
[0] & GENMASK(7, 4)) >> 4) != port
)
1252 mac_bridge
.valid
= false;
1253 err
= gswip_pce_table_entry_write(priv
, &mac_bridge
);
1255 dev_err(priv
->dev
, "failed to write mac bridge: %d\n",
1262 static void gswip_port_stp_state_set(struct dsa_switch
*ds
, int port
, u8 state
)
1264 struct gswip_priv
*priv
= ds
->priv
;
1268 case BR_STATE_DISABLED
:
1269 gswip_switch_mask(priv
, GSWIP_SDMA_PCTRL_EN
, 0,
1270 GSWIP_SDMA_PCTRLp(port
));
1272 case BR_STATE_BLOCKING
:
1273 case BR_STATE_LISTENING
:
1274 stp_state
= GSWIP_PCE_PCTRL_0_PSTATE_LISTEN
;
1276 case BR_STATE_LEARNING
:
1277 stp_state
= GSWIP_PCE_PCTRL_0_PSTATE_LEARNING
;
1279 case BR_STATE_FORWARDING
:
1280 stp_state
= GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING
;
1283 dev_err(priv
->dev
, "invalid STP state: %d\n", state
);
1287 gswip_switch_mask(priv
, 0, GSWIP_SDMA_PCTRL_EN
,
1288 GSWIP_SDMA_PCTRLp(port
));
1289 gswip_switch_mask(priv
, GSWIP_PCE_PCTRL_0_PSTATE_MASK
, stp_state
,
1290 GSWIP_PCE_PCTRL_0p(port
));
1293 static int gswip_port_fdb(struct dsa_switch
*ds
, int port
,
1294 const unsigned char *addr
, u16 vid
, bool add
)
1296 struct gswip_priv
*priv
= ds
->priv
;
1297 struct net_device
*bridge
= dsa_to_port(ds
, port
)->bridge_dev
;
1298 struct gswip_pce_table_entry mac_bridge
= {0,};
1299 unsigned int cpu_port
= priv
->hw_info
->cpu_port
;
1307 for (i
= cpu_port
; i
< ARRAY_SIZE(priv
->vlans
); i
++) {
1308 if (priv
->vlans
[i
].bridge
== bridge
) {
1309 fid
= priv
->vlans
[i
].fid
;
1315 dev_err(priv
->dev
, "Port not part of a bridge\n");
1319 mac_bridge
.table
= GSWIP_TABLE_MAC_BRIDGE
;
1320 mac_bridge
.key_mode
= true;
1321 mac_bridge
.key
[0] = addr
[5] | (addr
[4] << 8);
1322 mac_bridge
.key
[1] = addr
[3] | (addr
[2] << 8);
1323 mac_bridge
.key
[2] = addr
[1] | (addr
[0] << 8);
1324 mac_bridge
.key
[3] = fid
;
1325 mac_bridge
.val
[0] = add
? BIT(port
) : 0; /* port map */
1326 mac_bridge
.val
[1] = GSWIP_TABLE_MAC_BRIDGE_STATIC
;
1327 mac_bridge
.valid
= add
;
1329 err
= gswip_pce_table_entry_write(priv
, &mac_bridge
);
1331 dev_err(priv
->dev
, "failed to write mac bridge: %d\n", err
);
1336 static int gswip_port_fdb_add(struct dsa_switch
*ds
, int port
,
1337 const unsigned char *addr
, u16 vid
)
1339 return gswip_port_fdb(ds
, port
, addr
, vid
, true);
1342 static int gswip_port_fdb_del(struct dsa_switch
*ds
, int port
,
1343 const unsigned char *addr
, u16 vid
)
1345 return gswip_port_fdb(ds
, port
, addr
, vid
, false);
1348 static int gswip_port_fdb_dump(struct dsa_switch
*ds
, int port
,
1349 dsa_fdb_dump_cb_t
*cb
, void *data
)
1351 struct gswip_priv
*priv
= ds
->priv
;
1352 struct gswip_pce_table_entry mac_bridge
= {0,};
1353 unsigned char addr
[6];
1357 for (i
= 0; i
< 2048; i
++) {
1358 mac_bridge
.table
= GSWIP_TABLE_MAC_BRIDGE
;
1359 mac_bridge
.index
= i
;
1361 err
= gswip_pce_table_entry_read(priv
, &mac_bridge
);
1363 dev_err(priv
->dev
, "failed to write mac bridge: %d\n",
1368 if (!mac_bridge
.valid
)
1371 addr
[5] = mac_bridge
.key
[0] & 0xff;
1372 addr
[4] = (mac_bridge
.key
[0] >> 8) & 0xff;
1373 addr
[3] = mac_bridge
.key
[1] & 0xff;
1374 addr
[2] = (mac_bridge
.key
[1] >> 8) & 0xff;
1375 addr
[1] = mac_bridge
.key
[2] & 0xff;
1376 addr
[0] = (mac_bridge
.key
[2] >> 8) & 0xff;
1377 if (mac_bridge
.val
[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC
) {
1378 if (mac_bridge
.val
[0] & BIT(port
))
1379 cb(addr
, 0, true, data
);
1381 if (((mac_bridge
.val
[0] & GENMASK(7, 4)) >> 4) == port
)
1382 cb(addr
, 0, false, data
);
1388 static void gswip_phylink_validate(struct dsa_switch
*ds
, int port
,
1389 unsigned long *supported
,
1390 struct phylink_link_state
*state
)
1392 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask
) = { 0, };
1397 if (!phy_interface_mode_is_rgmii(state
->interface
) &&
1398 state
->interface
!= PHY_INTERFACE_MODE_MII
&&
1399 state
->interface
!= PHY_INTERFACE_MODE_REVMII
&&
1400 state
->interface
!= PHY_INTERFACE_MODE_RMII
)
1406 if (state
->interface
!= PHY_INTERFACE_MODE_INTERNAL
)
1410 if (!phy_interface_mode_is_rgmii(state
->interface
) &&
1411 state
->interface
!= PHY_INTERFACE_MODE_INTERNAL
)
1415 bitmap_zero(supported
, __ETHTOOL_LINK_MODE_MASK_NBITS
);
1416 dev_err(ds
->dev
, "Unsupported port: %i\n", port
);
1420 /* Allow all the expected bits */
1421 phylink_set(mask
, Autoneg
);
1422 phylink_set_port_modes(mask
);
1423 phylink_set(mask
, Pause
);
1424 phylink_set(mask
, Asym_Pause
);
1426 /* With the exclusion of MII and Reverse MII, we support Gigabit,
1427 * including Half duplex
1429 if (state
->interface
!= PHY_INTERFACE_MODE_MII
&&
1430 state
->interface
!= PHY_INTERFACE_MODE_REVMII
) {
1431 phylink_set(mask
, 1000baseT_Full
);
1432 phylink_set(mask
, 1000baseT_Half
);
1435 phylink_set(mask
, 10baseT_Half
);
1436 phylink_set(mask
, 10baseT_Full
);
1437 phylink_set(mask
, 100baseT_Half
);
1438 phylink_set(mask
, 100baseT_Full
);
1440 bitmap_and(supported
, supported
, mask
,
1441 __ETHTOOL_LINK_MODE_MASK_NBITS
);
1442 bitmap_and(state
->advertising
, state
->advertising
, mask
,
1443 __ETHTOOL_LINK_MODE_MASK_NBITS
);
1447 bitmap_zero(supported
, __ETHTOOL_LINK_MODE_MASK_NBITS
);
1448 dev_err(ds
->dev
, "Unsupported interface: %d\n", state
->interface
);
1452 static void gswip_phylink_mac_config(struct dsa_switch
*ds
, int port
,
1454 const struct phylink_link_state
*state
)
1456 struct gswip_priv
*priv
= ds
->priv
;
1459 miicfg
|= GSWIP_MII_CFG_LDCLKDIS
;
1461 switch (state
->interface
) {
1462 case PHY_INTERFACE_MODE_MII
:
1463 case PHY_INTERFACE_MODE_INTERNAL
:
1464 miicfg
|= GSWIP_MII_CFG_MODE_MIIM
;
1466 case PHY_INTERFACE_MODE_REVMII
:
1467 miicfg
|= GSWIP_MII_CFG_MODE_MIIP
;
1469 case PHY_INTERFACE_MODE_RMII
:
1470 miicfg
|= GSWIP_MII_CFG_MODE_RMIIM
;
1472 case PHY_INTERFACE_MODE_RGMII
:
1473 case PHY_INTERFACE_MODE_RGMII_ID
:
1474 case PHY_INTERFACE_MODE_RGMII_RXID
:
1475 case PHY_INTERFACE_MODE_RGMII_TXID
:
1476 miicfg
|= GSWIP_MII_CFG_MODE_RGMII
;
1480 "Unsupported interface: %d\n", state
->interface
);
1483 gswip_mii_mask_cfg(priv
, GSWIP_MII_CFG_MODE_MASK
, miicfg
, port
);
1485 switch (state
->interface
) {
1486 case PHY_INTERFACE_MODE_RGMII_ID
:
1487 gswip_mii_mask_pcdu(priv
, GSWIP_MII_PCDU_TXDLY_MASK
|
1488 GSWIP_MII_PCDU_RXDLY_MASK
, 0, port
);
1490 case PHY_INTERFACE_MODE_RGMII_RXID
:
1491 gswip_mii_mask_pcdu(priv
, GSWIP_MII_PCDU_RXDLY_MASK
, 0, port
);
1493 case PHY_INTERFACE_MODE_RGMII_TXID
:
1494 gswip_mii_mask_pcdu(priv
, GSWIP_MII_PCDU_TXDLY_MASK
, 0, port
);
1501 static void gswip_phylink_mac_link_down(struct dsa_switch
*ds
, int port
,
1503 phy_interface_t interface
)
1505 struct gswip_priv
*priv
= ds
->priv
;
1507 gswip_mii_mask_cfg(priv
, GSWIP_MII_CFG_EN
, 0, port
);
1510 static void gswip_phylink_mac_link_up(struct dsa_switch
*ds
, int port
,
1512 phy_interface_t interface
,
1513 struct phy_device
*phydev
)
1515 struct gswip_priv
*priv
= ds
->priv
;
1517 /* Enable the xMII interface only for the external PHY */
1518 if (interface
!= PHY_INTERFACE_MODE_INTERNAL
)
1519 gswip_mii_mask_cfg(priv
, 0, GSWIP_MII_CFG_EN
, port
);
1522 static void gswip_get_strings(struct dsa_switch
*ds
, int port
, u32 stringset
,
1527 if (stringset
!= ETH_SS_STATS
)
1530 for (i
= 0; i
< ARRAY_SIZE(gswip_rmon_cnt
); i
++)
1531 strncpy(data
+ i
* ETH_GSTRING_LEN
, gswip_rmon_cnt
[i
].name
,
1535 static u32
gswip_bcm_ram_entry_read(struct gswip_priv
*priv
, u32 table
,
1541 gswip_switch_w(priv
, index
, GSWIP_BM_RAM_ADDR
);
1542 gswip_switch_mask(priv
, GSWIP_BM_RAM_CTRL_ADDR_MASK
|
1543 GSWIP_BM_RAM_CTRL_OPMOD
,
1544 table
| GSWIP_BM_RAM_CTRL_BAS
,
1547 err
= gswip_switch_r_timeout(priv
, GSWIP_BM_RAM_CTRL
,
1548 GSWIP_BM_RAM_CTRL_BAS
);
1550 dev_err(priv
->dev
, "timeout while reading table: %u, index: %u",
1555 result
= gswip_switch_r(priv
, GSWIP_BM_RAM_VAL(0));
1556 result
|= gswip_switch_r(priv
, GSWIP_BM_RAM_VAL(1)) << 16;
1561 static void gswip_get_ethtool_stats(struct dsa_switch
*ds
, int port
,
1564 struct gswip_priv
*priv
= ds
->priv
;
1565 const struct gswip_rmon_cnt_desc
*rmon_cnt
;
1569 for (i
= 0; i
< ARRAY_SIZE(gswip_rmon_cnt
); i
++) {
1570 rmon_cnt
= &gswip_rmon_cnt
[i
];
1572 data
[i
] = gswip_bcm_ram_entry_read(priv
, port
,
1574 if (rmon_cnt
->size
== 2) {
1575 high
= gswip_bcm_ram_entry_read(priv
, port
,
1576 rmon_cnt
->offset
+ 1);
1577 data
[i
] |= high
<< 32;
1582 static int gswip_get_sset_count(struct dsa_switch
*ds
, int port
, int sset
)
1584 if (sset
!= ETH_SS_STATS
)
1587 return ARRAY_SIZE(gswip_rmon_cnt
);
1590 static const struct dsa_switch_ops gswip_switch_ops
= {
1591 .get_tag_protocol
= gswip_get_tag_protocol
,
1592 .setup
= gswip_setup
,
1593 .port_enable
= gswip_port_enable
,
1594 .port_disable
= gswip_port_disable
,
1595 .port_bridge_join
= gswip_port_bridge_join
,
1596 .port_bridge_leave
= gswip_port_bridge_leave
,
1597 .port_fast_age
= gswip_port_fast_age
,
1598 .port_vlan_filtering
= gswip_port_vlan_filtering
,
1599 .port_vlan_prepare
= gswip_port_vlan_prepare
,
1600 .port_vlan_add
= gswip_port_vlan_add
,
1601 .port_vlan_del
= gswip_port_vlan_del
,
1602 .port_stp_state_set
= gswip_port_stp_state_set
,
1603 .port_fdb_add
= gswip_port_fdb_add
,
1604 .port_fdb_del
= gswip_port_fdb_del
,
1605 .port_fdb_dump
= gswip_port_fdb_dump
,
1606 .phylink_validate
= gswip_phylink_validate
,
1607 .phylink_mac_config
= gswip_phylink_mac_config
,
1608 .phylink_mac_link_down
= gswip_phylink_mac_link_down
,
1609 .phylink_mac_link_up
= gswip_phylink_mac_link_up
,
1610 .get_strings
= gswip_get_strings
,
1611 .get_ethtool_stats
= gswip_get_ethtool_stats
,
1612 .get_sset_count
= gswip_get_sset_count
,
1615 static const struct xway_gphy_match_data xrx200a1x_gphy_data
= {
1616 .fe_firmware_name
= "lantiq/xrx200_phy22f_a14.bin",
1617 .ge_firmware_name
= "lantiq/xrx200_phy11g_a14.bin",
1620 static const struct xway_gphy_match_data xrx200a2x_gphy_data
= {
1621 .fe_firmware_name
= "lantiq/xrx200_phy22f_a22.bin",
1622 .ge_firmware_name
= "lantiq/xrx200_phy11g_a22.bin",
1625 static const struct xway_gphy_match_data xrx300_gphy_data
= {
1626 .fe_firmware_name
= "lantiq/xrx300_phy22f_a21.bin",
1627 .ge_firmware_name
= "lantiq/xrx300_phy11g_a21.bin",
1630 static const struct of_device_id xway_gphy_match
[] = {
1631 { .compatible
= "lantiq,xrx200-gphy-fw", .data
= NULL
},
1632 { .compatible
= "lantiq,xrx200a1x-gphy-fw", .data
= &xrx200a1x_gphy_data
},
1633 { .compatible
= "lantiq,xrx200a2x-gphy-fw", .data
= &xrx200a2x_gphy_data
},
1634 { .compatible
= "lantiq,xrx300-gphy-fw", .data
= &xrx300_gphy_data
},
1635 { .compatible
= "lantiq,xrx330-gphy-fw", .data
= &xrx300_gphy_data
},
1639 static int gswip_gphy_fw_load(struct gswip_priv
*priv
, struct gswip_gphy_fw
*gphy_fw
)
1641 struct device
*dev
= priv
->dev
;
1642 const struct firmware
*fw
;
1644 dma_addr_t dma_addr
;
1645 dma_addr_t dev_addr
;
1649 ret
= clk_prepare_enable(gphy_fw
->clk_gate
);
1653 reset_control_assert(gphy_fw
->reset
);
1655 ret
= request_firmware(&fw
, gphy_fw
->fw_name
, dev
);
1657 dev_err(dev
, "failed to load firmware: %s, error: %i\n",
1658 gphy_fw
->fw_name
, ret
);
1662 /* GPHY cores need the firmware code in a persistent and contiguous
1663 * memory area with a 16 kB boundary aligned start address.
1665 size
= fw
->size
+ XRX200_GPHY_FW_ALIGN
;
1667 fw_addr
= dmam_alloc_coherent(dev
, size
, &dma_addr
, GFP_KERNEL
);
1669 fw_addr
= PTR_ALIGN(fw_addr
, XRX200_GPHY_FW_ALIGN
);
1670 dev_addr
= ALIGN(dma_addr
, XRX200_GPHY_FW_ALIGN
);
1671 memcpy(fw_addr
, fw
->data
, fw
->size
);
1673 dev_err(dev
, "failed to alloc firmware memory\n");
1674 release_firmware(fw
);
1678 release_firmware(fw
);
1680 ret
= regmap_write(priv
->rcu_regmap
, gphy_fw
->fw_addr_offset
, dev_addr
);
1684 reset_control_deassert(gphy_fw
->reset
);
1689 static int gswip_gphy_fw_probe(struct gswip_priv
*priv
,
1690 struct gswip_gphy_fw
*gphy_fw
,
1691 struct device_node
*gphy_fw_np
, int i
)
1693 struct device
*dev
= priv
->dev
;
1698 snprintf(gphyname
, sizeof(gphyname
), "gphy%d", i
);
1700 gphy_fw
->clk_gate
= devm_clk_get(dev
, gphyname
);
1701 if (IS_ERR(gphy_fw
->clk_gate
)) {
1702 dev_err(dev
, "Failed to lookup gate clock\n");
1703 return PTR_ERR(gphy_fw
->clk_gate
);
1706 ret
= of_property_read_u32(gphy_fw_np
, "reg", &gphy_fw
->fw_addr_offset
);
1710 ret
= of_property_read_u32(gphy_fw_np
, "lantiq,gphy-mode", &gphy_mode
);
1711 /* Default to GE mode */
1713 gphy_mode
= GPHY_MODE_GE
;
1715 switch (gphy_mode
) {
1717 gphy_fw
->fw_name
= priv
->gphy_fw_name_cfg
->fe_firmware_name
;
1720 gphy_fw
->fw_name
= priv
->gphy_fw_name_cfg
->ge_firmware_name
;
1723 dev_err(dev
, "Unknown GPHY mode %d\n", gphy_mode
);
1727 gphy_fw
->reset
= of_reset_control_array_get_exclusive(gphy_fw_np
);
1728 if (IS_ERR(gphy_fw
->reset
)) {
1729 if (PTR_ERR(gphy_fw
->reset
) != -EPROBE_DEFER
)
1730 dev_err(dev
, "Failed to lookup gphy reset\n");
1731 return PTR_ERR(gphy_fw
->reset
);
1734 return gswip_gphy_fw_load(priv
, gphy_fw
);
1737 static void gswip_gphy_fw_remove(struct gswip_priv
*priv
,
1738 struct gswip_gphy_fw
*gphy_fw
)
1742 /* check if the device was fully probed */
1743 if (!gphy_fw
->fw_name
)
1746 ret
= regmap_write(priv
->rcu_regmap
, gphy_fw
->fw_addr_offset
, 0);
1748 dev_err(priv
->dev
, "can not reset GPHY FW pointer");
1750 clk_disable_unprepare(gphy_fw
->clk_gate
);
1752 reset_control_put(gphy_fw
->reset
);
1755 static int gswip_gphy_fw_list(struct gswip_priv
*priv
,
1756 struct device_node
*gphy_fw_list_np
, u32 version
)
1758 struct device
*dev
= priv
->dev
;
1759 struct device_node
*gphy_fw_np
;
1760 const struct of_device_id
*match
;
1764 /* The VRX200 rev 1.1 uses the GSWIP 2.0 and needs the older
1765 * GPHY firmware. The VRX200 rev 1.2 uses the GSWIP 2.1 and also
1766 * needs a different GPHY firmware.
1768 if (of_device_is_compatible(gphy_fw_list_np
, "lantiq,xrx200-gphy-fw")) {
1770 case GSWIP_VERSION_2_0
:
1771 priv
->gphy_fw_name_cfg
= &xrx200a1x_gphy_data
;
1773 case GSWIP_VERSION_2_1
:
1774 priv
->gphy_fw_name_cfg
= &xrx200a2x_gphy_data
;
1777 dev_err(dev
, "unknown GSWIP version: 0x%x", version
);
1782 match
= of_match_node(xway_gphy_match
, gphy_fw_list_np
);
1783 if (match
&& match
->data
)
1784 priv
->gphy_fw_name_cfg
= match
->data
;
1786 if (!priv
->gphy_fw_name_cfg
) {
1787 dev_err(dev
, "GPHY compatible type not supported");
1791 priv
->num_gphy_fw
= of_get_available_child_count(gphy_fw_list_np
);
1792 if (!priv
->num_gphy_fw
)
1795 priv
->rcu_regmap
= syscon_regmap_lookup_by_phandle(gphy_fw_list_np
,
1797 if (IS_ERR(priv
->rcu_regmap
))
1798 return PTR_ERR(priv
->rcu_regmap
);
1800 priv
->gphy_fw
= devm_kmalloc_array(dev
, priv
->num_gphy_fw
,
1801 sizeof(*priv
->gphy_fw
),
1802 GFP_KERNEL
| __GFP_ZERO
);
1806 for_each_available_child_of_node(gphy_fw_list_np
, gphy_fw_np
) {
1807 err
= gswip_gphy_fw_probe(priv
, &priv
->gphy_fw
[i
],
1817 for (i
= 0; i
< priv
->num_gphy_fw
; i
++)
1818 gswip_gphy_fw_remove(priv
, &priv
->gphy_fw
[i
]);
1822 static int gswip_probe(struct platform_device
*pdev
)
1824 struct gswip_priv
*priv
;
1825 struct resource
*gswip_res
, *mdio_res
, *mii_res
;
1826 struct device_node
*mdio_np
, *gphy_fw_np
;
1827 struct device
*dev
= &pdev
->dev
;
1832 priv
= devm_kzalloc(dev
, sizeof(*priv
), GFP_KERNEL
);
1836 gswip_res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1837 priv
->gswip
= devm_ioremap_resource(dev
, gswip_res
);
1838 if (IS_ERR(priv
->gswip
))
1839 return PTR_ERR(priv
->gswip
);
1841 mdio_res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
1842 priv
->mdio
= devm_ioremap_resource(dev
, mdio_res
);
1843 if (IS_ERR(priv
->mdio
))
1844 return PTR_ERR(priv
->mdio
);
1846 mii_res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 2);
1847 priv
->mii
= devm_ioremap_resource(dev
, mii_res
);
1848 if (IS_ERR(priv
->mii
))
1849 return PTR_ERR(priv
->mii
);
1851 priv
->hw_info
= of_device_get_match_data(dev
);
1855 priv
->ds
= dsa_switch_alloc(dev
, priv
->hw_info
->max_ports
);
1859 priv
->ds
->priv
= priv
;
1860 priv
->ds
->ops
= &gswip_switch_ops
;
1862 version
= gswip_switch_r(priv
, GSWIP_VERSION
);
1864 /* bring up the mdio bus */
1865 gphy_fw_np
= of_get_compatible_child(dev
->of_node
, "lantiq,gphy-fw");
1867 err
= gswip_gphy_fw_list(priv
, gphy_fw_np
, version
);
1868 of_node_put(gphy_fw_np
);
1870 dev_err(dev
, "gphy fw probe failed\n");
1875 /* bring up the mdio bus */
1876 mdio_np
= of_get_compatible_child(dev
->of_node
, "lantiq,xrx200-mdio");
1878 err
= gswip_mdio(priv
, mdio_np
);
1880 dev_err(dev
, "mdio probe failed\n");
1885 err
= dsa_register_switch(priv
->ds
);
1887 dev_err(dev
, "dsa switch register failed: %i\n", err
);
1890 if (!dsa_is_cpu_port(priv
->ds
, priv
->hw_info
->cpu_port
)) {
1891 dev_err(dev
, "wrong CPU port defined, HW only supports port: %i",
1892 priv
->hw_info
->cpu_port
);
1894 goto disable_switch
;
1897 platform_set_drvdata(pdev
, priv
);
1899 dev_info(dev
, "probed GSWIP version %lx mod %lx\n",
1900 (version
& GSWIP_VERSION_REV_MASK
) >> GSWIP_VERSION_REV_SHIFT
,
1901 (version
& GSWIP_VERSION_MOD_MASK
) >> GSWIP_VERSION_MOD_SHIFT
);
1905 gswip_mdio_mask(priv
, GSWIP_MDIO_GLOB_ENABLE
, 0, GSWIP_MDIO_GLOB
);
1906 dsa_unregister_switch(priv
->ds
);
1909 mdiobus_unregister(priv
->ds
->slave_mii_bus
);
1911 of_node_put(mdio_np
);
1912 for (i
= 0; i
< priv
->num_gphy_fw
; i
++)
1913 gswip_gphy_fw_remove(priv
, &priv
->gphy_fw
[i
]);
1917 static int gswip_remove(struct platform_device
*pdev
)
1919 struct gswip_priv
*priv
= platform_get_drvdata(pdev
);
1922 /* disable the switch */
1923 gswip_mdio_mask(priv
, GSWIP_MDIO_GLOB_ENABLE
, 0, GSWIP_MDIO_GLOB
);
1925 dsa_unregister_switch(priv
->ds
);
1927 if (priv
->ds
->slave_mii_bus
) {
1928 mdiobus_unregister(priv
->ds
->slave_mii_bus
);
1929 of_node_put(priv
->ds
->slave_mii_bus
->dev
.of_node
);
1932 for (i
= 0; i
< priv
->num_gphy_fw
; i
++)
1933 gswip_gphy_fw_remove(priv
, &priv
->gphy_fw
[i
]);
1938 static const struct gswip_hw_info gswip_xrx200
= {
1943 static const struct of_device_id gswip_of_match
[] = {
1944 { .compatible
= "lantiq,xrx200-gswip", .data
= &gswip_xrx200
},
1947 MODULE_DEVICE_TABLE(of
, gswip_of_match
);
1949 static struct platform_driver gswip_driver
= {
1950 .probe
= gswip_probe
,
1951 .remove
= gswip_remove
,
1954 .of_match_table
= gswip_of_match
,
1958 module_platform_driver(gswip_driver
);
1960 MODULE_FIRMWARE("lantiq/xrx300_phy11g_a21.bin");
1961 MODULE_FIRMWARE("lantiq/xrx300_phy22f_a21.bin");
1962 MODULE_FIRMWARE("lantiq/xrx200_phy11g_a14.bin");
1963 MODULE_FIRMWARE("lantiq/xrx200_phy11g_a22.bin");
1964 MODULE_FIRMWARE("lantiq/xrx200_phy22f_a14.bin");
1965 MODULE_FIRMWARE("lantiq/xrx200_phy22f_a22.bin");
1966 MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
1967 MODULE_DESCRIPTION("Lantiq / Intel GSWIP driver");
1968 MODULE_LICENSE("GPL v2");