1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
4 * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
5 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
6 * Copyright (c) 2016 John Crispin <john@phrozen.org>
9 #include <linux/module.h>
10 #include <linux/phy.h>
11 #include <linux/netdevice.h>
13 #include <linux/of_net.h>
14 #include <linux/of_platform.h>
15 #include <linux/if_bridge.h>
16 #include <linux/mdio.h>
17 #include <linux/etherdevice.h>
21 #define MIB_DESC(_s, _o, _n) \
28 static const struct qca8k_mib_desc ar8327_mib
[] = {
29 MIB_DESC(1, 0x00, "RxBroad"),
30 MIB_DESC(1, 0x04, "RxPause"),
31 MIB_DESC(1, 0x08, "RxMulti"),
32 MIB_DESC(1, 0x0c, "RxFcsErr"),
33 MIB_DESC(1, 0x10, "RxAlignErr"),
34 MIB_DESC(1, 0x14, "RxRunt"),
35 MIB_DESC(1, 0x18, "RxFragment"),
36 MIB_DESC(1, 0x1c, "Rx64Byte"),
37 MIB_DESC(1, 0x20, "Rx128Byte"),
38 MIB_DESC(1, 0x24, "Rx256Byte"),
39 MIB_DESC(1, 0x28, "Rx512Byte"),
40 MIB_DESC(1, 0x2c, "Rx1024Byte"),
41 MIB_DESC(1, 0x30, "Rx1518Byte"),
42 MIB_DESC(1, 0x34, "RxMaxByte"),
43 MIB_DESC(1, 0x38, "RxTooLong"),
44 MIB_DESC(2, 0x3c, "RxGoodByte"),
45 MIB_DESC(2, 0x44, "RxBadByte"),
46 MIB_DESC(1, 0x4c, "RxOverFlow"),
47 MIB_DESC(1, 0x50, "Filtered"),
48 MIB_DESC(1, 0x54, "TxBroad"),
49 MIB_DESC(1, 0x58, "TxPause"),
50 MIB_DESC(1, 0x5c, "TxMulti"),
51 MIB_DESC(1, 0x60, "TxUnderRun"),
52 MIB_DESC(1, 0x64, "Tx64Byte"),
53 MIB_DESC(1, 0x68, "Tx128Byte"),
54 MIB_DESC(1, 0x6c, "Tx256Byte"),
55 MIB_DESC(1, 0x70, "Tx512Byte"),
56 MIB_DESC(1, 0x74, "Tx1024Byte"),
57 MIB_DESC(1, 0x78, "Tx1518Byte"),
58 MIB_DESC(1, 0x7c, "TxMaxByte"),
59 MIB_DESC(1, 0x80, "TxOverSize"),
60 MIB_DESC(2, 0x84, "TxByte"),
61 MIB_DESC(1, 0x8c, "TxCollision"),
62 MIB_DESC(1, 0x90, "TxAbortCol"),
63 MIB_DESC(1, 0x94, "TxMultiCol"),
64 MIB_DESC(1, 0x98, "TxSingleCol"),
65 MIB_DESC(1, 0x9c, "TxExcDefer"),
66 MIB_DESC(1, 0xa0, "TxDefer"),
67 MIB_DESC(1, 0xa4, "TxLateCol"),
70 /* The 32bit switch registers are accessed indirectly. To achieve this we need
71 * to set the page of the register. Track the last page that was set to reduce
74 static u16 qca8k_current_page
= 0xffff;
77 qca8k_split_addr(u32 regaddr
, u16
*r1
, u16
*r2
, u16
*page
)
86 *page
= regaddr
& 0x3ff;
90 qca8k_mii_read32(struct mii_bus
*bus
, int phy_id
, u32 regnum
)
95 ret
= bus
->read(bus
, phy_id
, regnum
);
98 ret
= bus
->read(bus
, phy_id
, regnum
+ 1);
103 dev_err_ratelimited(&bus
->dev
,
104 "failed to read qca8k 32bit register\n");
112 qca8k_mii_write32(struct mii_bus
*bus
, int phy_id
, u32 regnum
, u32 val
)
118 hi
= (u16
)(val
>> 16);
120 ret
= bus
->write(bus
, phy_id
, regnum
, lo
);
122 ret
= bus
->write(bus
, phy_id
, regnum
+ 1, hi
);
124 dev_err_ratelimited(&bus
->dev
,
125 "failed to write qca8k 32bit register\n");
129 qca8k_set_page(struct mii_bus
*bus
, u16 page
)
131 if (page
== qca8k_current_page
)
134 if (bus
->write(bus
, 0x18, 0, page
) < 0)
135 dev_err_ratelimited(&bus
->dev
,
136 "failed to set qca8k page\n");
137 qca8k_current_page
= page
;
141 qca8k_read(struct qca8k_priv
*priv
, u32 reg
)
146 qca8k_split_addr(reg
, &r1
, &r2
, &page
);
148 mutex_lock_nested(&priv
->bus
->mdio_lock
, MDIO_MUTEX_NESTED
);
150 qca8k_set_page(priv
->bus
, page
);
151 val
= qca8k_mii_read32(priv
->bus
, 0x10 | r2
, r1
);
153 mutex_unlock(&priv
->bus
->mdio_lock
);
159 qca8k_write(struct qca8k_priv
*priv
, u32 reg
, u32 val
)
163 qca8k_split_addr(reg
, &r1
, &r2
, &page
);
165 mutex_lock_nested(&priv
->bus
->mdio_lock
, MDIO_MUTEX_NESTED
);
167 qca8k_set_page(priv
->bus
, page
);
168 qca8k_mii_write32(priv
->bus
, 0x10 | r2
, r1
, val
);
170 mutex_unlock(&priv
->bus
->mdio_lock
);
174 qca8k_rmw(struct qca8k_priv
*priv
, u32 reg
, u32 mask
, u32 val
)
179 qca8k_split_addr(reg
, &r1
, &r2
, &page
);
181 mutex_lock_nested(&priv
->bus
->mdio_lock
, MDIO_MUTEX_NESTED
);
183 qca8k_set_page(priv
->bus
, page
);
184 ret
= qca8k_mii_read32(priv
->bus
, 0x10 | r2
, r1
);
187 qca8k_mii_write32(priv
->bus
, 0x10 | r2
, r1
, ret
);
189 mutex_unlock(&priv
->bus
->mdio_lock
);
195 qca8k_reg_set(struct qca8k_priv
*priv
, u32 reg
, u32 val
)
197 qca8k_rmw(priv
, reg
, 0, val
);
201 qca8k_reg_clear(struct qca8k_priv
*priv
, u32 reg
, u32 val
)
203 qca8k_rmw(priv
, reg
, val
, 0);
207 qca8k_regmap_read(void *ctx
, uint32_t reg
, uint32_t *val
)
209 struct qca8k_priv
*priv
= (struct qca8k_priv
*)ctx
;
211 *val
= qca8k_read(priv
, reg
);
217 qca8k_regmap_write(void *ctx
, uint32_t reg
, uint32_t val
)
219 struct qca8k_priv
*priv
= (struct qca8k_priv
*)ctx
;
221 qca8k_write(priv
, reg
, val
);
226 static const struct regmap_range qca8k_readable_ranges
[] = {
227 regmap_reg_range(0x0000, 0x00e4), /* Global control */
228 regmap_reg_range(0x0100, 0x0168), /* EEE control */
229 regmap_reg_range(0x0200, 0x0270), /* Parser control */
230 regmap_reg_range(0x0400, 0x0454), /* ACL */
231 regmap_reg_range(0x0600, 0x0718), /* Lookup */
232 regmap_reg_range(0x0800, 0x0b70), /* QM */
233 regmap_reg_range(0x0c00, 0x0c80), /* PKT */
234 regmap_reg_range(0x0e00, 0x0e98), /* L3 */
235 regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
236 regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
237 regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
238 regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
239 regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
240 regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
241 regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
245 static const struct regmap_access_table qca8k_readable_table
= {
246 .yes_ranges
= qca8k_readable_ranges
,
247 .n_yes_ranges
= ARRAY_SIZE(qca8k_readable_ranges
),
250 static struct regmap_config qca8k_regmap_config
= {
254 .max_register
= 0x16ac, /* end MIB - Port6 range */
255 .reg_read
= qca8k_regmap_read
,
256 .reg_write
= qca8k_regmap_write
,
257 .rd_table
= &qca8k_readable_table
,
261 qca8k_busy_wait(struct qca8k_priv
*priv
, u32 reg
, u32 mask
)
263 unsigned long timeout
;
265 timeout
= jiffies
+ msecs_to_jiffies(20);
267 /* loop until the busy flag has cleared */
269 u32 val
= qca8k_read(priv
, reg
);
270 int busy
= val
& mask
;
275 } while (!time_after_eq(jiffies
, timeout
));
277 return time_after_eq(jiffies
, timeout
);
281 qca8k_fdb_read(struct qca8k_priv
*priv
, struct qca8k_fdb
*fdb
)
286 /* load the ARL table into an array */
287 for (i
= 0; i
< 4; i
++)
288 reg
[i
] = qca8k_read(priv
, QCA8K_REG_ATU_DATA0
+ (i
* 4));
291 fdb
->vid
= (reg
[2] >> QCA8K_ATU_VID_S
) & QCA8K_ATU_VID_M
;
293 fdb
->aging
= reg
[2] & QCA8K_ATU_STATUS_M
;
294 /* portmask - 54:48 */
295 fdb
->port_mask
= (reg
[1] >> QCA8K_ATU_PORT_S
) & QCA8K_ATU_PORT_M
;
297 fdb
->mac
[0] = (reg
[1] >> QCA8K_ATU_ADDR0_S
) & 0xff;
298 fdb
->mac
[1] = reg
[1] & 0xff;
299 fdb
->mac
[2] = (reg
[0] >> QCA8K_ATU_ADDR2_S
) & 0xff;
300 fdb
->mac
[3] = (reg
[0] >> QCA8K_ATU_ADDR3_S
) & 0xff;
301 fdb
->mac
[4] = (reg
[0] >> QCA8K_ATU_ADDR4_S
) & 0xff;
302 fdb
->mac
[5] = reg
[0] & 0xff;
306 qca8k_fdb_write(struct qca8k_priv
*priv
, u16 vid
, u8 port_mask
, const u8
*mac
,
313 reg
[2] = (vid
& QCA8K_ATU_VID_M
) << QCA8K_ATU_VID_S
;
315 reg
[2] |= aging
& QCA8K_ATU_STATUS_M
;
316 /* portmask - 54:48 */
317 reg
[1] = (port_mask
& QCA8K_ATU_PORT_M
) << QCA8K_ATU_PORT_S
;
319 reg
[1] |= mac
[0] << QCA8K_ATU_ADDR0_S
;
321 reg
[0] |= mac
[2] << QCA8K_ATU_ADDR2_S
;
322 reg
[0] |= mac
[3] << QCA8K_ATU_ADDR3_S
;
323 reg
[0] |= mac
[4] << QCA8K_ATU_ADDR4_S
;
326 /* load the array into the ARL table */
327 for (i
= 0; i
< 3; i
++)
328 qca8k_write(priv
, QCA8K_REG_ATU_DATA0
+ (i
* 4), reg
[i
]);
332 qca8k_fdb_access(struct qca8k_priv
*priv
, enum qca8k_fdb_cmd cmd
, int port
)
336 /* Set the command and FDB index */
337 reg
= QCA8K_ATU_FUNC_BUSY
;
340 reg
|= QCA8K_ATU_FUNC_PORT_EN
;
341 reg
|= (port
& QCA8K_ATU_FUNC_PORT_M
) << QCA8K_ATU_FUNC_PORT_S
;
344 /* Write the function register triggering the table access */
345 qca8k_write(priv
, QCA8K_REG_ATU_FUNC
, reg
);
347 /* wait for completion */
348 if (qca8k_busy_wait(priv
, QCA8K_REG_ATU_FUNC
, QCA8K_ATU_FUNC_BUSY
))
351 /* Check for table full violation when adding an entry */
352 if (cmd
== QCA8K_FDB_LOAD
) {
353 reg
= qca8k_read(priv
, QCA8K_REG_ATU_FUNC
);
354 if (reg
& QCA8K_ATU_FUNC_FULL
)
362 qca8k_fdb_next(struct qca8k_priv
*priv
, struct qca8k_fdb
*fdb
, int port
)
366 qca8k_fdb_write(priv
, fdb
->vid
, fdb
->port_mask
, fdb
->mac
, fdb
->aging
);
367 ret
= qca8k_fdb_access(priv
, QCA8K_FDB_NEXT
, port
);
369 qca8k_fdb_read(priv
, fdb
);
375 qca8k_fdb_add(struct qca8k_priv
*priv
, const u8
*mac
, u16 port_mask
,
380 mutex_lock(&priv
->reg_mutex
);
381 qca8k_fdb_write(priv
, vid
, port_mask
, mac
, aging
);
382 ret
= qca8k_fdb_access(priv
, QCA8K_FDB_LOAD
, -1);
383 mutex_unlock(&priv
->reg_mutex
);
389 qca8k_fdb_del(struct qca8k_priv
*priv
, const u8
*mac
, u16 port_mask
, u16 vid
)
393 mutex_lock(&priv
->reg_mutex
);
394 qca8k_fdb_write(priv
, vid
, port_mask
, mac
, 0);
395 ret
= qca8k_fdb_access(priv
, QCA8K_FDB_PURGE
, -1);
396 mutex_unlock(&priv
->reg_mutex
);
402 qca8k_fdb_flush(struct qca8k_priv
*priv
)
404 mutex_lock(&priv
->reg_mutex
);
405 qca8k_fdb_access(priv
, QCA8K_FDB_FLUSH
, -1);
406 mutex_unlock(&priv
->reg_mutex
);
410 qca8k_mib_init(struct qca8k_priv
*priv
)
412 mutex_lock(&priv
->reg_mutex
);
413 qca8k_reg_set(priv
, QCA8K_REG_MIB
, QCA8K_MIB_FLUSH
| QCA8K_MIB_BUSY
);
414 qca8k_busy_wait(priv
, QCA8K_REG_MIB
, QCA8K_MIB_BUSY
);
415 qca8k_reg_set(priv
, QCA8K_REG_MIB
, QCA8K_MIB_CPU_KEEP
);
416 qca8k_write(priv
, QCA8K_REG_MODULE_EN
, QCA8K_MODULE_EN_MIB
);
417 mutex_unlock(&priv
->reg_mutex
);
421 qca8k_set_pad_ctrl(struct qca8k_priv
*priv
, int port
, int mode
)
427 reg
= QCA8K_REG_PORT0_PAD_CTRL
;
430 reg
= QCA8K_REG_PORT6_PAD_CTRL
;
433 pr_err("Can't set PAD_CTRL on port %d\n", port
);
437 /* Configure a port to be directly connected to an external
441 case PHY_INTERFACE_MODE_RGMII
:
442 /* RGMII mode means no delay so don't enable the delay */
443 val
= QCA8K_PORT_PAD_RGMII_EN
;
444 qca8k_write(priv
, reg
, val
);
446 case PHY_INTERFACE_MODE_RGMII_ID
:
447 /* RGMII_ID needs internal delay. This is enabled through
448 * PORT5_PAD_CTRL for all ports, rather than individual port
451 qca8k_write(priv
, reg
,
452 QCA8K_PORT_PAD_RGMII_EN
|
453 QCA8K_PORT_PAD_RGMII_TX_DELAY(QCA8K_MAX_DELAY
) |
454 QCA8K_PORT_PAD_RGMII_RX_DELAY(QCA8K_MAX_DELAY
));
455 qca8k_write(priv
, QCA8K_REG_PORT5_PAD_CTRL
,
456 QCA8K_PORT_PAD_RGMII_RX_DELAY_EN
);
458 case PHY_INTERFACE_MODE_SGMII
:
459 qca8k_write(priv
, reg
, QCA8K_PORT_PAD_SGMII_EN
);
462 pr_err("xMII mode %d not supported\n", mode
);
470 qca8k_port_set_status(struct qca8k_priv
*priv
, int port
, int enable
)
472 u32 mask
= QCA8K_PORT_STATUS_TXMAC
| QCA8K_PORT_STATUS_RXMAC
;
474 /* Port 0 and 6 have no internal PHY */
475 if (port
> 0 && port
< 6)
476 mask
|= QCA8K_PORT_STATUS_LINK_AUTO
;
479 qca8k_reg_set(priv
, QCA8K_REG_PORT_STATUS(port
), mask
);
481 qca8k_reg_clear(priv
, QCA8K_REG_PORT_STATUS(port
), mask
);
485 qca8k_port_to_phy(int port
)
488 * Port 0 has no internal phy.
489 * Port 1 has an internal PHY at MDIO address 0.
490 * Port 2 has an internal PHY at MDIO address 1.
492 * Port 5 has an internal PHY at MDIO address 4.
493 * Port 6 has no internal PHY.
500 qca8k_mdio_write(struct qca8k_priv
*priv
, int port
, u32 regnum
, u16 data
)
504 if (regnum
>= QCA8K_MDIO_MASTER_MAX_REG
)
507 /* callee is responsible for not passing bad ports,
508 * but we still would like to make spills impossible.
510 phy
= qca8k_port_to_phy(port
) % PHY_MAX_ADDR
;
511 val
= QCA8K_MDIO_MASTER_BUSY
| QCA8K_MDIO_MASTER_EN
|
512 QCA8K_MDIO_MASTER_WRITE
| QCA8K_MDIO_MASTER_PHY_ADDR(phy
) |
513 QCA8K_MDIO_MASTER_REG_ADDR(regnum
) |
514 QCA8K_MDIO_MASTER_DATA(data
);
516 qca8k_write(priv
, QCA8K_MDIO_MASTER_CTRL
, val
);
518 return qca8k_busy_wait(priv
, QCA8K_MDIO_MASTER_CTRL
,
519 QCA8K_MDIO_MASTER_BUSY
);
523 qca8k_mdio_read(struct qca8k_priv
*priv
, int port
, u32 regnum
)
527 if (regnum
>= QCA8K_MDIO_MASTER_MAX_REG
)
530 /* callee is responsible for not passing bad ports,
531 * but we still would like to make spills impossible.
533 phy
= qca8k_port_to_phy(port
) % PHY_MAX_ADDR
;
534 val
= QCA8K_MDIO_MASTER_BUSY
| QCA8K_MDIO_MASTER_EN
|
535 QCA8K_MDIO_MASTER_READ
| QCA8K_MDIO_MASTER_PHY_ADDR(phy
) |
536 QCA8K_MDIO_MASTER_REG_ADDR(regnum
);
538 qca8k_write(priv
, QCA8K_MDIO_MASTER_CTRL
, val
);
540 if (qca8k_busy_wait(priv
, QCA8K_MDIO_MASTER_CTRL
,
541 QCA8K_MDIO_MASTER_BUSY
))
544 val
= (qca8k_read(priv
, QCA8K_MDIO_MASTER_CTRL
) &
545 QCA8K_MDIO_MASTER_DATA_MASK
);
551 qca8k_phy_write(struct dsa_switch
*ds
, int port
, int regnum
, u16 data
)
553 struct qca8k_priv
*priv
= ds
->priv
;
555 return qca8k_mdio_write(priv
, port
, regnum
, data
);
559 qca8k_phy_read(struct dsa_switch
*ds
, int port
, int regnum
)
561 struct qca8k_priv
*priv
= ds
->priv
;
564 ret
= qca8k_mdio_read(priv
, port
, regnum
);
573 qca8k_setup_mdio_bus(struct qca8k_priv
*priv
)
575 u32 internal_mdio_mask
= 0, external_mdio_mask
= 0, reg
;
576 struct device_node
*ports
, *port
;
579 ports
= of_get_child_by_name(priv
->dev
->of_node
, "ports");
583 for_each_available_child_of_node(ports
, port
) {
584 err
= of_property_read_u32(port
, "reg", ®
);
588 if (!dsa_is_user_port(priv
->ds
, reg
))
591 if (of_property_read_bool(port
, "phy-handle"))
592 external_mdio_mask
|= BIT(reg
);
594 internal_mdio_mask
|= BIT(reg
);
597 if (!external_mdio_mask
&& !internal_mdio_mask
) {
598 dev_err(priv
->dev
, "no PHYs are defined.\n");
602 /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
603 * the MDIO_MASTER register also _disconnects_ the external MDC
604 * passthrough to the internal PHYs. It's not possible to use both
605 * configurations at the same time!
607 * Because this came up during the review process:
608 * If the external mdio-bus driver is capable magically disabling
609 * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
610 * accessors for the time being, it would be possible to pull this
613 if (!!external_mdio_mask
&& !!internal_mdio_mask
) {
614 dev_err(priv
->dev
, "either internal or external mdio bus configuration is supported.\n");
618 if (external_mdio_mask
) {
619 /* Make sure to disable the internal mdio bus in cases
620 * a dt-overlay and driver reload changed the configuration
623 qca8k_reg_clear(priv
, QCA8K_MDIO_MASTER_CTRL
,
624 QCA8K_MDIO_MASTER_EN
);
628 priv
->ops
.phy_read
= qca8k_phy_read
;
629 priv
->ops
.phy_write
= qca8k_phy_write
;
634 qca8k_setup(struct dsa_switch
*ds
)
636 struct qca8k_priv
*priv
= (struct qca8k_priv
*)ds
->priv
;
637 int ret
, i
, phy_mode
= -1;
640 /* Make sure that port 0 is the cpu port */
641 if (!dsa_is_cpu_port(ds
, 0)) {
642 pr_err("port 0 is not the CPU port\n");
646 mutex_init(&priv
->reg_mutex
);
648 /* Start by setting up the register mapping */
649 priv
->regmap
= devm_regmap_init(ds
->dev
, NULL
, priv
,
650 &qca8k_regmap_config
);
651 if (IS_ERR(priv
->regmap
))
652 pr_warn("regmap initialization failed");
654 ret
= qca8k_setup_mdio_bus(priv
);
658 /* Initialize CPU port pad mode (xMII type, delays...) */
659 phy_mode
= of_get_phy_mode(ds
->ports
[QCA8K_CPU_PORT
].dn
);
661 pr_err("Can't find phy-mode for master device\n");
664 ret
= qca8k_set_pad_ctrl(priv
, QCA8K_CPU_PORT
, phy_mode
);
668 /* Enable CPU Port, force it to maximum bandwidth and full-duplex */
669 mask
= QCA8K_PORT_STATUS_SPEED_1000
| QCA8K_PORT_STATUS_TXFLOW
|
670 QCA8K_PORT_STATUS_RXFLOW
| QCA8K_PORT_STATUS_DUPLEX
;
671 qca8k_write(priv
, QCA8K_REG_PORT_STATUS(QCA8K_CPU_PORT
), mask
);
672 qca8k_reg_set(priv
, QCA8K_REG_GLOBAL_FW_CTRL0
,
673 QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN
);
674 qca8k_port_set_status(priv
, QCA8K_CPU_PORT
, 1);
675 priv
->port_sts
[QCA8K_CPU_PORT
].enabled
= 1;
677 /* Enable MIB counters */
678 qca8k_mib_init(priv
);
680 /* Enable QCA header mode on the cpu port */
681 qca8k_write(priv
, QCA8K_REG_PORT_HDR_CTRL(QCA8K_CPU_PORT
),
682 QCA8K_PORT_HDR_CTRL_ALL
<< QCA8K_PORT_HDR_CTRL_TX_S
|
683 QCA8K_PORT_HDR_CTRL_ALL
<< QCA8K_PORT_HDR_CTRL_RX_S
);
685 /* Disable forwarding by default on all ports */
686 for (i
= 0; i
< QCA8K_NUM_PORTS
; i
++)
687 qca8k_rmw(priv
, QCA8K_PORT_LOOKUP_CTRL(i
),
688 QCA8K_PORT_LOOKUP_MEMBER
, 0);
690 /* Disable MAC by default on all user ports */
691 for (i
= 1; i
< QCA8K_NUM_PORTS
; i
++)
692 if (dsa_is_user_port(ds
, i
))
693 qca8k_port_set_status(priv
, i
, 0);
695 /* Forward all unknown frames to CPU port for Linux processing */
696 qca8k_write(priv
, QCA8K_REG_GLOBAL_FW_CTRL1
,
697 BIT(0) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S
|
698 BIT(0) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S
|
699 BIT(0) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S
|
700 BIT(0) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S
);
702 /* Setup connection between CPU port & user ports */
703 for (i
= 0; i
< DSA_MAX_PORTS
; i
++) {
704 /* CPU port gets connected to all user ports of the switch */
705 if (dsa_is_cpu_port(ds
, i
)) {
706 qca8k_rmw(priv
, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT
),
707 QCA8K_PORT_LOOKUP_MEMBER
, dsa_user_ports(ds
));
710 /* Invividual user ports get connected to CPU port only */
711 if (dsa_is_user_port(ds
, i
)) {
712 int shift
= 16 * (i
% 2);
714 qca8k_rmw(priv
, QCA8K_PORT_LOOKUP_CTRL(i
),
715 QCA8K_PORT_LOOKUP_MEMBER
,
716 BIT(QCA8K_CPU_PORT
));
718 /* Enable ARP Auto-learning by default */
719 qca8k_reg_set(priv
, QCA8K_PORT_LOOKUP_CTRL(i
),
720 QCA8K_PORT_LOOKUP_LEARN
);
722 /* For port based vlans to work we need to set the
725 qca8k_rmw(priv
, QCA8K_EGRESS_VLAN(i
),
726 0xffff << shift
, 1 << shift
);
727 qca8k_write(priv
, QCA8K_REG_PORT_VLAN_CTRL0(i
),
728 QCA8K_PORT_VLAN_CVID(1) |
729 QCA8K_PORT_VLAN_SVID(1));
733 /* Flush the FDB table */
734 qca8k_fdb_flush(priv
);
740 qca8k_adjust_link(struct dsa_switch
*ds
, int port
, struct phy_device
*phy
)
742 struct qca8k_priv
*priv
= ds
->priv
;
745 /* Force fixed-link setting for CPU port, skip others. */
746 if (!phy_is_pseudo_fixed_link(phy
))
750 switch (phy
->speed
) {
752 reg
= QCA8K_PORT_STATUS_SPEED_10
;
755 reg
= QCA8K_PORT_STATUS_SPEED_100
;
758 reg
= QCA8K_PORT_STATUS_SPEED_1000
;
761 dev_dbg(priv
->dev
, "port%d link speed %dMbps not supported.\n",
766 /* Set duplex mode */
767 if (phy
->duplex
== DUPLEX_FULL
)
768 reg
|= QCA8K_PORT_STATUS_DUPLEX
;
770 /* Force flow control */
771 if (dsa_is_cpu_port(ds
, port
))
772 reg
|= QCA8K_PORT_STATUS_RXFLOW
| QCA8K_PORT_STATUS_TXFLOW
;
774 /* Force link down before changing MAC options */
775 qca8k_port_set_status(priv
, port
, 0);
776 qca8k_write(priv
, QCA8K_REG_PORT_STATUS(port
), reg
);
777 qca8k_port_set_status(priv
, port
, 1);
781 qca8k_get_strings(struct dsa_switch
*ds
, int port
, u32 stringset
, uint8_t *data
)
785 if (stringset
!= ETH_SS_STATS
)
788 for (i
= 0; i
< ARRAY_SIZE(ar8327_mib
); i
++)
789 strncpy(data
+ i
* ETH_GSTRING_LEN
, ar8327_mib
[i
].name
,
794 qca8k_get_ethtool_stats(struct dsa_switch
*ds
, int port
,
797 struct qca8k_priv
*priv
= (struct qca8k_priv
*)ds
->priv
;
798 const struct qca8k_mib_desc
*mib
;
802 for (i
= 0; i
< ARRAY_SIZE(ar8327_mib
); i
++) {
803 mib
= &ar8327_mib
[i
];
804 reg
= QCA8K_PORT_MIB_COUNTER(port
) + mib
->offset
;
806 data
[i
] = qca8k_read(priv
, reg
);
807 if (mib
->size
== 2) {
808 hi
= qca8k_read(priv
, reg
+ 4);
815 qca8k_get_sset_count(struct dsa_switch
*ds
, int port
, int sset
)
817 if (sset
!= ETH_SS_STATS
)
820 return ARRAY_SIZE(ar8327_mib
);
824 qca8k_set_mac_eee(struct dsa_switch
*ds
, int port
, struct ethtool_eee
*eee
)
826 struct qca8k_priv
*priv
= (struct qca8k_priv
*)ds
->priv
;
827 u32 lpi_en
= QCA8K_REG_EEE_CTRL_LPI_EN(port
);
830 mutex_lock(&priv
->reg_mutex
);
831 reg
= qca8k_read(priv
, QCA8K_REG_EEE_CTRL
);
832 if (eee
->eee_enabled
)
836 qca8k_write(priv
, QCA8K_REG_EEE_CTRL
, reg
);
837 mutex_unlock(&priv
->reg_mutex
);
843 qca8k_get_mac_eee(struct dsa_switch
*ds
, int port
, struct ethtool_eee
*e
)
845 /* Nothing to do on the port's MAC */
850 qca8k_port_stp_state_set(struct dsa_switch
*ds
, int port
, u8 state
)
852 struct qca8k_priv
*priv
= (struct qca8k_priv
*)ds
->priv
;
856 case BR_STATE_DISABLED
:
857 stp_state
= QCA8K_PORT_LOOKUP_STATE_DISABLED
;
859 case BR_STATE_BLOCKING
:
860 stp_state
= QCA8K_PORT_LOOKUP_STATE_BLOCKING
;
862 case BR_STATE_LISTENING
:
863 stp_state
= QCA8K_PORT_LOOKUP_STATE_LISTENING
;
865 case BR_STATE_LEARNING
:
866 stp_state
= QCA8K_PORT_LOOKUP_STATE_LEARNING
;
868 case BR_STATE_FORWARDING
:
870 stp_state
= QCA8K_PORT_LOOKUP_STATE_FORWARD
;
874 qca8k_rmw(priv
, QCA8K_PORT_LOOKUP_CTRL(port
),
875 QCA8K_PORT_LOOKUP_STATE_MASK
, stp_state
);
879 qca8k_port_bridge_join(struct dsa_switch
*ds
, int port
, struct net_device
*br
)
881 struct qca8k_priv
*priv
= (struct qca8k_priv
*)ds
->priv
;
882 int port_mask
= BIT(QCA8K_CPU_PORT
);
885 for (i
= 1; i
< QCA8K_NUM_PORTS
; i
++) {
886 if (dsa_to_port(ds
, i
)->bridge_dev
!= br
)
888 /* Add this port to the portvlan mask of the other ports
892 QCA8K_PORT_LOOKUP_CTRL(i
),
897 /* Add all other ports to this ports portvlan mask */
898 qca8k_rmw(priv
, QCA8K_PORT_LOOKUP_CTRL(port
),
899 QCA8K_PORT_LOOKUP_MEMBER
, port_mask
);
905 qca8k_port_bridge_leave(struct dsa_switch
*ds
, int port
, struct net_device
*br
)
907 struct qca8k_priv
*priv
= (struct qca8k_priv
*)ds
->priv
;
910 for (i
= 1; i
< QCA8K_NUM_PORTS
; i
++) {
911 if (dsa_to_port(ds
, i
)->bridge_dev
!= br
)
913 /* Remove this port to the portvlan mask of the other ports
916 qca8k_reg_clear(priv
,
917 QCA8K_PORT_LOOKUP_CTRL(i
),
921 /* Set the cpu port to be the only one in the portvlan mask of
924 qca8k_rmw(priv
, QCA8K_PORT_LOOKUP_CTRL(port
),
925 QCA8K_PORT_LOOKUP_MEMBER
, BIT(QCA8K_CPU_PORT
));
929 qca8k_port_enable(struct dsa_switch
*ds
, int port
,
930 struct phy_device
*phy
)
932 struct qca8k_priv
*priv
= (struct qca8k_priv
*)ds
->priv
;
934 qca8k_port_set_status(priv
, port
, 1);
935 priv
->port_sts
[port
].enabled
= 1;
941 qca8k_port_disable(struct dsa_switch
*ds
, int port
)
943 struct qca8k_priv
*priv
= (struct qca8k_priv
*)ds
->priv
;
945 qca8k_port_set_status(priv
, port
, 0);
946 priv
->port_sts
[port
].enabled
= 0;
950 qca8k_port_fdb_insert(struct qca8k_priv
*priv
, const u8
*addr
,
951 u16 port_mask
, u16 vid
)
953 /* Set the vid to the port vlan id if no vid is set */
957 return qca8k_fdb_add(priv
, addr
, port_mask
, vid
,
958 QCA8K_ATU_STATUS_STATIC
);
962 qca8k_port_fdb_add(struct dsa_switch
*ds
, int port
,
963 const unsigned char *addr
, u16 vid
)
965 struct qca8k_priv
*priv
= (struct qca8k_priv
*)ds
->priv
;
966 u16 port_mask
= BIT(port
);
968 return qca8k_port_fdb_insert(priv
, addr
, port_mask
, vid
);
972 qca8k_port_fdb_del(struct dsa_switch
*ds
, int port
,
973 const unsigned char *addr
, u16 vid
)
975 struct qca8k_priv
*priv
= (struct qca8k_priv
*)ds
->priv
;
976 u16 port_mask
= BIT(port
);
981 return qca8k_fdb_del(priv
, addr
, port_mask
, vid
);
985 qca8k_port_fdb_dump(struct dsa_switch
*ds
, int port
,
986 dsa_fdb_dump_cb_t
*cb
, void *data
)
988 struct qca8k_priv
*priv
= (struct qca8k_priv
*)ds
->priv
;
989 struct qca8k_fdb _fdb
= { 0 };
990 int cnt
= QCA8K_NUM_FDB_RECORDS
;
994 mutex_lock(&priv
->reg_mutex
);
995 while (cnt
-- && !qca8k_fdb_next(priv
, &_fdb
, port
)) {
998 is_static
= (_fdb
.aging
== QCA8K_ATU_STATUS_STATIC
);
999 ret
= cb(_fdb
.mac
, _fdb
.vid
, is_static
, data
);
1003 mutex_unlock(&priv
->reg_mutex
);
1008 static enum dsa_tag_protocol
1009 qca8k_get_tag_protocol(struct dsa_switch
*ds
, int port
)
1011 return DSA_TAG_PROTO_QCA
;
1014 static const struct dsa_switch_ops qca8k_switch_ops
= {
1015 .get_tag_protocol
= qca8k_get_tag_protocol
,
1016 .setup
= qca8k_setup
,
1017 .adjust_link
= qca8k_adjust_link
,
1018 .get_strings
= qca8k_get_strings
,
1019 .get_ethtool_stats
= qca8k_get_ethtool_stats
,
1020 .get_sset_count
= qca8k_get_sset_count
,
1021 .get_mac_eee
= qca8k_get_mac_eee
,
1022 .set_mac_eee
= qca8k_set_mac_eee
,
1023 .port_enable
= qca8k_port_enable
,
1024 .port_disable
= qca8k_port_disable
,
1025 .port_stp_state_set
= qca8k_port_stp_state_set
,
1026 .port_bridge_join
= qca8k_port_bridge_join
,
1027 .port_bridge_leave
= qca8k_port_bridge_leave
,
1028 .port_fdb_add
= qca8k_port_fdb_add
,
1029 .port_fdb_del
= qca8k_port_fdb_del
,
1030 .port_fdb_dump
= qca8k_port_fdb_dump
,
1034 qca8k_sw_probe(struct mdio_device
*mdiodev
)
1036 struct qca8k_priv
*priv
;
1039 /* allocate the private data struct so that we can probe the switches
1042 priv
= devm_kzalloc(&mdiodev
->dev
, sizeof(*priv
), GFP_KERNEL
);
1046 priv
->bus
= mdiodev
->bus
;
1047 priv
->dev
= &mdiodev
->dev
;
1049 /* read the switches ID register */
1050 id
= qca8k_read(priv
, QCA8K_REG_MASK_CTRL
);
1051 id
>>= QCA8K_MASK_CTRL_ID_S
;
1052 id
&= QCA8K_MASK_CTRL_ID_M
;
1053 if (id
!= QCA8K_ID_QCA8337
)
1056 priv
->ds
= dsa_switch_alloc(&mdiodev
->dev
, DSA_MAX_PORTS
);
1060 priv
->ds
->priv
= priv
;
1061 priv
->ops
= qca8k_switch_ops
;
1062 priv
->ds
->ops
= &priv
->ops
;
1063 mutex_init(&priv
->reg_mutex
);
1064 dev_set_drvdata(&mdiodev
->dev
, priv
);
1066 return dsa_register_switch(priv
->ds
);
1070 qca8k_sw_remove(struct mdio_device
*mdiodev
)
1072 struct qca8k_priv
*priv
= dev_get_drvdata(&mdiodev
->dev
);
1075 for (i
= 0; i
< QCA8K_NUM_PORTS
; i
++)
1076 qca8k_port_set_status(priv
, i
, 0);
1078 dsa_unregister_switch(priv
->ds
);
1081 #ifdef CONFIG_PM_SLEEP
1083 qca8k_set_pm(struct qca8k_priv
*priv
, int enable
)
1087 for (i
= 0; i
< QCA8K_NUM_PORTS
; i
++) {
1088 if (!priv
->port_sts
[i
].enabled
)
1091 qca8k_port_set_status(priv
, i
, enable
);
1095 static int qca8k_suspend(struct device
*dev
)
1097 struct qca8k_priv
*priv
= dev_get_drvdata(dev
);
1099 qca8k_set_pm(priv
, 0);
1101 return dsa_switch_suspend(priv
->ds
);
1104 static int qca8k_resume(struct device
*dev
)
1106 struct qca8k_priv
*priv
= dev_get_drvdata(dev
);
1108 qca8k_set_pm(priv
, 1);
1110 return dsa_switch_resume(priv
->ds
);
1112 #endif /* CONFIG_PM_SLEEP */
1114 static SIMPLE_DEV_PM_OPS(qca8k_pm_ops
,
1115 qca8k_suspend
, qca8k_resume
);
1117 static const struct of_device_id qca8k_of_match
[] = {
1118 { .compatible
= "qca,qca8334" },
1119 { .compatible
= "qca,qca8337" },
1123 static struct mdio_driver qca8kmdio_driver
= {
1124 .probe
= qca8k_sw_probe
,
1125 .remove
= qca8k_sw_remove
,
1128 .of_match_table
= qca8k_of_match
,
1129 .pm
= &qca8k_pm_ops
,
1133 mdio_module_driver(qca8kmdio_driver
);
1135 MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
1136 MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
1137 MODULE_LICENSE("GPL v2");
1138 MODULE_ALIAS("platform:qca8k");