1 /* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
10 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
12 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
15 #include <linux/of_device.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_net.h>
18 #include <linux/mfd/syscon.h>
19 #include <linux/regmap.h>
20 #include <linux/clk.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/if_vlan.h>
23 #include <linux/reset.h>
24 #include <linux/tcp.h>
26 #include "mtk_eth_soc.h"
28 static int mtk_msg_level
= -1;
29 module_param_named(msg_level
, mtk_msg_level
, int, 0);
30 MODULE_PARM_DESC(msg_level
, "Message level (-1=defaults,0=none,...,16=all)");
32 #define MTK_ETHTOOL_STAT(x) { #x, \
33 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
35 /* strings used by ethtool */
36 static const struct mtk_ethtool_stats
{
37 char str
[ETH_GSTRING_LEN
];
39 } mtk_ethtool_stats
[] = {
40 MTK_ETHTOOL_STAT(tx_bytes
),
41 MTK_ETHTOOL_STAT(tx_packets
),
42 MTK_ETHTOOL_STAT(tx_skip
),
43 MTK_ETHTOOL_STAT(tx_collisions
),
44 MTK_ETHTOOL_STAT(rx_bytes
),
45 MTK_ETHTOOL_STAT(rx_packets
),
46 MTK_ETHTOOL_STAT(rx_overflow
),
47 MTK_ETHTOOL_STAT(rx_fcs_errors
),
48 MTK_ETHTOOL_STAT(rx_short_errors
),
49 MTK_ETHTOOL_STAT(rx_long_errors
),
50 MTK_ETHTOOL_STAT(rx_checksum_errors
),
51 MTK_ETHTOOL_STAT(rx_flow_control_packets
),
54 static const char * const mtk_clks_source_name
[] = {
55 "ethif", "esw", "gp1", "gp2", "trgpll"
58 void mtk_w32(struct mtk_eth
*eth
, u32 val
, unsigned reg
)
60 __raw_writel(val
, eth
->base
+ reg
);
63 u32
mtk_r32(struct mtk_eth
*eth
, unsigned reg
)
65 return __raw_readl(eth
->base
+ reg
);
68 static int mtk_mdio_busy_wait(struct mtk_eth
*eth
)
70 unsigned long t_start
= jiffies
;
73 if (!(mtk_r32(eth
, MTK_PHY_IAC
) & PHY_IAC_ACCESS
))
75 if (time_after(jiffies
, t_start
+ PHY_IAC_TIMEOUT
))
80 dev_err(eth
->dev
, "mdio: MDIO timeout\n");
84 static u32
_mtk_mdio_write(struct mtk_eth
*eth
, u32 phy_addr
,
85 u32 phy_register
, u32 write_data
)
87 if (mtk_mdio_busy_wait(eth
))
92 mtk_w32(eth
, PHY_IAC_ACCESS
| PHY_IAC_START
| PHY_IAC_WRITE
|
93 (phy_register
<< PHY_IAC_REG_SHIFT
) |
94 (phy_addr
<< PHY_IAC_ADDR_SHIFT
) | write_data
,
97 if (mtk_mdio_busy_wait(eth
))
103 static u32
_mtk_mdio_read(struct mtk_eth
*eth
, int phy_addr
, int phy_reg
)
107 if (mtk_mdio_busy_wait(eth
))
110 mtk_w32(eth
, PHY_IAC_ACCESS
| PHY_IAC_START
| PHY_IAC_READ
|
111 (phy_reg
<< PHY_IAC_REG_SHIFT
) |
112 (phy_addr
<< PHY_IAC_ADDR_SHIFT
),
115 if (mtk_mdio_busy_wait(eth
))
118 d
= mtk_r32(eth
, MTK_PHY_IAC
) & 0xffff;
123 static int mtk_mdio_write(struct mii_bus
*bus
, int phy_addr
,
124 int phy_reg
, u16 val
)
126 struct mtk_eth
*eth
= bus
->priv
;
128 return _mtk_mdio_write(eth
, phy_addr
, phy_reg
, val
);
131 static int mtk_mdio_read(struct mii_bus
*bus
, int phy_addr
, int phy_reg
)
133 struct mtk_eth
*eth
= bus
->priv
;
135 return _mtk_mdio_read(eth
, phy_addr
, phy_reg
);
138 static void mtk_gmac0_rgmii_adjust(struct mtk_eth
*eth
, int speed
)
143 val
= (speed
== SPEED_1000
) ?
144 INTF_MODE_RGMII_1000
: INTF_MODE_RGMII_10_100
;
145 mtk_w32(eth
, val
, INTF_MODE
);
147 regmap_update_bits(eth
->ethsys
, ETHSYS_CLKCFG0
,
148 ETHSYS_TRGMII_CLK_SEL362_5
,
149 ETHSYS_TRGMII_CLK_SEL362_5
);
151 val
= (speed
== SPEED_1000
) ? 250000000 : 500000000;
152 ret
= clk_set_rate(eth
->clks
[MTK_CLK_TRGPLL
], val
);
154 dev_err(eth
->dev
, "Failed to set trgmii pll: %d\n", ret
);
156 val
= (speed
== SPEED_1000
) ?
157 RCK_CTRL_RGMII_1000
: RCK_CTRL_RGMII_10_100
;
158 mtk_w32(eth
, val
, TRGMII_RCK_CTRL
);
160 val
= (speed
== SPEED_1000
) ?
161 TCK_CTRL_RGMII_1000
: TCK_CTRL_RGMII_10_100
;
162 mtk_w32(eth
, val
, TRGMII_TCK_CTRL
);
165 static void mtk_phy_link_adjust(struct net_device
*dev
)
167 struct mtk_mac
*mac
= netdev_priv(dev
);
168 u16 lcl_adv
= 0, rmt_adv
= 0;
170 u32 mcr
= MAC_MCR_MAX_RX_1536
| MAC_MCR_IPG_CFG
|
171 MAC_MCR_FORCE_MODE
| MAC_MCR_TX_EN
|
172 MAC_MCR_RX_EN
| MAC_MCR_BACKOFF_EN
|
175 if (unlikely(test_bit(MTK_RESETTING
, &mac
->hw
->state
)))
178 switch (dev
->phydev
->speed
) {
180 mcr
|= MAC_MCR_SPEED_1000
;
183 mcr
|= MAC_MCR_SPEED_100
;
187 if (mac
->id
== 0 && !mac
->trgmii
)
188 mtk_gmac0_rgmii_adjust(mac
->hw
, dev
->phydev
->speed
);
190 if (dev
->phydev
->link
)
191 mcr
|= MAC_MCR_FORCE_LINK
;
193 if (dev
->phydev
->duplex
) {
194 mcr
|= MAC_MCR_FORCE_DPX
;
196 if (dev
->phydev
->pause
)
197 rmt_adv
= LPA_PAUSE_CAP
;
198 if (dev
->phydev
->asym_pause
)
199 rmt_adv
|= LPA_PAUSE_ASYM
;
201 if (dev
->phydev
->advertising
& ADVERTISED_Pause
)
202 lcl_adv
|= ADVERTISE_PAUSE_CAP
;
203 if (dev
->phydev
->advertising
& ADVERTISED_Asym_Pause
)
204 lcl_adv
|= ADVERTISE_PAUSE_ASYM
;
206 flowctrl
= mii_resolve_flowctrl_fdx(lcl_adv
, rmt_adv
);
208 if (flowctrl
& FLOW_CTRL_TX
)
209 mcr
|= MAC_MCR_FORCE_TX_FC
;
210 if (flowctrl
& FLOW_CTRL_RX
)
211 mcr
|= MAC_MCR_FORCE_RX_FC
;
213 netif_dbg(mac
->hw
, link
, dev
, "rx pause %s, tx pause %s\n",
214 flowctrl
& FLOW_CTRL_RX
? "enabled" : "disabled",
215 flowctrl
& FLOW_CTRL_TX
? "enabled" : "disabled");
218 mtk_w32(mac
->hw
, mcr
, MTK_MAC_MCR(mac
->id
));
220 if (dev
->phydev
->link
)
221 netif_carrier_on(dev
);
223 netif_carrier_off(dev
);
226 static int mtk_phy_connect_node(struct mtk_eth
*eth
, struct mtk_mac
*mac
,
227 struct device_node
*phy_node
)
229 struct phy_device
*phydev
;
232 phy_mode
= of_get_phy_mode(phy_node
);
234 dev_err(eth
->dev
, "incorrect phy-mode %d\n", phy_mode
);
238 phydev
= of_phy_connect(eth
->netdev
[mac
->id
], phy_node
,
239 mtk_phy_link_adjust
, 0, phy_mode
);
241 dev_err(eth
->dev
, "could not connect to PHY\n");
246 "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n",
247 mac
->id
, phydev_name(phydev
), phydev
->phy_id
,
253 static int mtk_phy_connect(struct net_device
*dev
)
255 struct mtk_mac
*mac
= netdev_priv(dev
);
257 struct device_node
*np
;
261 np
= of_parse_phandle(mac
->of_node
, "phy-handle", 0);
262 if (!np
&& of_phy_is_fixed_link(mac
->of_node
))
263 if (!of_phy_register_fixed_link(mac
->of_node
))
264 np
= of_node_get(mac
->of_node
);
268 switch (of_get_phy_mode(np
)) {
269 case PHY_INTERFACE_MODE_TRGMII
:
271 case PHY_INTERFACE_MODE_RGMII_TXID
:
272 case PHY_INTERFACE_MODE_RGMII_RXID
:
273 case PHY_INTERFACE_MODE_RGMII_ID
:
274 case PHY_INTERFACE_MODE_RGMII
:
277 case PHY_INTERFACE_MODE_MII
:
280 case PHY_INTERFACE_MODE_REVMII
:
283 case PHY_INTERFACE_MODE_RMII
:
292 /* put the gmac into the right mode */
293 regmap_read(eth
->ethsys
, ETHSYS_SYSCFG0
, &val
);
294 val
&= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK
, mac
->id
);
295 val
|= SYSCFG0_GE_MODE(mac
->ge_mode
, mac
->id
);
296 regmap_write(eth
->ethsys
, ETHSYS_SYSCFG0
, val
);
298 /* couple phydev to net_device */
299 if (mtk_phy_connect_node(eth
, mac
, np
))
302 dev
->phydev
->autoneg
= AUTONEG_ENABLE
;
303 dev
->phydev
->speed
= 0;
304 dev
->phydev
->duplex
= 0;
306 if (of_phy_is_fixed_link(mac
->of_node
))
307 dev
->phydev
->supported
|=
308 SUPPORTED_Pause
| SUPPORTED_Asym_Pause
;
310 dev
->phydev
->supported
&= PHY_GBIT_FEATURES
| SUPPORTED_Pause
|
311 SUPPORTED_Asym_Pause
;
312 dev
->phydev
->advertising
= dev
->phydev
->supported
|
314 phy_start_aneg(dev
->phydev
);
321 if (of_phy_is_fixed_link(mac
->of_node
))
322 of_phy_deregister_fixed_link(mac
->of_node
);
324 dev_err(eth
->dev
, "%s: invalid phy\n", __func__
);
328 static int mtk_mdio_init(struct mtk_eth
*eth
)
330 struct device_node
*mii_np
;
333 mii_np
= of_get_child_by_name(eth
->dev
->of_node
, "mdio-bus");
335 dev_err(eth
->dev
, "no %s child node found", "mdio-bus");
339 if (!of_device_is_available(mii_np
)) {
344 eth
->mii_bus
= devm_mdiobus_alloc(eth
->dev
);
350 eth
->mii_bus
->name
= "mdio";
351 eth
->mii_bus
->read
= mtk_mdio_read
;
352 eth
->mii_bus
->write
= mtk_mdio_write
;
353 eth
->mii_bus
->priv
= eth
;
354 eth
->mii_bus
->parent
= eth
->dev
;
356 snprintf(eth
->mii_bus
->id
, MII_BUS_ID_SIZE
, "%s", mii_np
->name
);
357 ret
= of_mdiobus_register(eth
->mii_bus
, mii_np
);
364 static void mtk_mdio_cleanup(struct mtk_eth
*eth
)
369 mdiobus_unregister(eth
->mii_bus
);
372 static inline void mtk_irq_disable(struct mtk_eth
*eth
,
373 unsigned reg
, u32 mask
)
378 spin_lock_irqsave(ð
->irq_lock
, flags
);
379 val
= mtk_r32(eth
, reg
);
380 mtk_w32(eth
, val
& ~mask
, reg
);
381 spin_unlock_irqrestore(ð
->irq_lock
, flags
);
384 static inline void mtk_irq_enable(struct mtk_eth
*eth
,
385 unsigned reg
, u32 mask
)
390 spin_lock_irqsave(ð
->irq_lock
, flags
);
391 val
= mtk_r32(eth
, reg
);
392 mtk_w32(eth
, val
| mask
, reg
);
393 spin_unlock_irqrestore(ð
->irq_lock
, flags
);
396 static int mtk_set_mac_address(struct net_device
*dev
, void *p
)
398 int ret
= eth_mac_addr(dev
, p
);
399 struct mtk_mac
*mac
= netdev_priv(dev
);
400 const char *macaddr
= dev
->dev_addr
;
405 if (unlikely(test_bit(MTK_RESETTING
, &mac
->hw
->state
)))
408 spin_lock_bh(&mac
->hw
->page_lock
);
409 mtk_w32(mac
->hw
, (macaddr
[0] << 8) | macaddr
[1],
410 MTK_GDMA_MAC_ADRH(mac
->id
));
411 mtk_w32(mac
->hw
, (macaddr
[2] << 24) | (macaddr
[3] << 16) |
412 (macaddr
[4] << 8) | macaddr
[5],
413 MTK_GDMA_MAC_ADRL(mac
->id
));
414 spin_unlock_bh(&mac
->hw
->page_lock
);
419 void mtk_stats_update_mac(struct mtk_mac
*mac
)
421 struct mtk_hw_stats
*hw_stats
= mac
->hw_stats
;
422 unsigned int base
= MTK_GDM1_TX_GBCNT
;
425 base
+= hw_stats
->reg_offset
;
427 u64_stats_update_begin(&hw_stats
->syncp
);
429 hw_stats
->rx_bytes
+= mtk_r32(mac
->hw
, base
);
430 stats
= mtk_r32(mac
->hw
, base
+ 0x04);
432 hw_stats
->rx_bytes
+= (stats
<< 32);
433 hw_stats
->rx_packets
+= mtk_r32(mac
->hw
, base
+ 0x08);
434 hw_stats
->rx_overflow
+= mtk_r32(mac
->hw
, base
+ 0x10);
435 hw_stats
->rx_fcs_errors
+= mtk_r32(mac
->hw
, base
+ 0x14);
436 hw_stats
->rx_short_errors
+= mtk_r32(mac
->hw
, base
+ 0x18);
437 hw_stats
->rx_long_errors
+= mtk_r32(mac
->hw
, base
+ 0x1c);
438 hw_stats
->rx_checksum_errors
+= mtk_r32(mac
->hw
, base
+ 0x20);
439 hw_stats
->rx_flow_control_packets
+=
440 mtk_r32(mac
->hw
, base
+ 0x24);
441 hw_stats
->tx_skip
+= mtk_r32(mac
->hw
, base
+ 0x28);
442 hw_stats
->tx_collisions
+= mtk_r32(mac
->hw
, base
+ 0x2c);
443 hw_stats
->tx_bytes
+= mtk_r32(mac
->hw
, base
+ 0x30);
444 stats
= mtk_r32(mac
->hw
, base
+ 0x34);
446 hw_stats
->tx_bytes
+= (stats
<< 32);
447 hw_stats
->tx_packets
+= mtk_r32(mac
->hw
, base
+ 0x38);
448 u64_stats_update_end(&hw_stats
->syncp
);
451 static void mtk_stats_update(struct mtk_eth
*eth
)
455 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
456 if (!eth
->mac
[i
] || !eth
->mac
[i
]->hw_stats
)
458 if (spin_trylock(ð
->mac
[i
]->hw_stats
->stats_lock
)) {
459 mtk_stats_update_mac(eth
->mac
[i
]);
460 spin_unlock(ð
->mac
[i
]->hw_stats
->stats_lock
);
465 static void mtk_get_stats64(struct net_device
*dev
,
466 struct rtnl_link_stats64
*storage
)
468 struct mtk_mac
*mac
= netdev_priv(dev
);
469 struct mtk_hw_stats
*hw_stats
= mac
->hw_stats
;
472 if (netif_running(dev
) && netif_device_present(dev
)) {
473 if (spin_trylock(&hw_stats
->stats_lock
)) {
474 mtk_stats_update_mac(mac
);
475 spin_unlock(&hw_stats
->stats_lock
);
480 start
= u64_stats_fetch_begin_irq(&hw_stats
->syncp
);
481 storage
->rx_packets
= hw_stats
->rx_packets
;
482 storage
->tx_packets
= hw_stats
->tx_packets
;
483 storage
->rx_bytes
= hw_stats
->rx_bytes
;
484 storage
->tx_bytes
= hw_stats
->tx_bytes
;
485 storage
->collisions
= hw_stats
->tx_collisions
;
486 storage
->rx_length_errors
= hw_stats
->rx_short_errors
+
487 hw_stats
->rx_long_errors
;
488 storage
->rx_over_errors
= hw_stats
->rx_overflow
;
489 storage
->rx_crc_errors
= hw_stats
->rx_fcs_errors
;
490 storage
->rx_errors
= hw_stats
->rx_checksum_errors
;
491 storage
->tx_aborted_errors
= hw_stats
->tx_skip
;
492 } while (u64_stats_fetch_retry_irq(&hw_stats
->syncp
, start
));
494 storage
->tx_errors
= dev
->stats
.tx_errors
;
495 storage
->rx_dropped
= dev
->stats
.rx_dropped
;
496 storage
->tx_dropped
= dev
->stats
.tx_dropped
;
499 static inline int mtk_max_frag_size(int mtu
)
501 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
502 if (mtu
+ MTK_RX_ETH_HLEN
< MTK_MAX_RX_LENGTH
)
503 mtu
= MTK_MAX_RX_LENGTH
- MTK_RX_ETH_HLEN
;
505 return SKB_DATA_ALIGN(MTK_RX_HLEN
+ mtu
) +
506 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
509 static inline int mtk_max_buf_size(int frag_size
)
511 int buf_size
= frag_size
- NET_SKB_PAD
- NET_IP_ALIGN
-
512 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
514 WARN_ON(buf_size
< MTK_MAX_RX_LENGTH
);
519 static inline void mtk_rx_get_desc(struct mtk_rx_dma
*rxd
,
520 struct mtk_rx_dma
*dma_rxd
)
522 rxd
->rxd1
= READ_ONCE(dma_rxd
->rxd1
);
523 rxd
->rxd2
= READ_ONCE(dma_rxd
->rxd2
);
524 rxd
->rxd3
= READ_ONCE(dma_rxd
->rxd3
);
525 rxd
->rxd4
= READ_ONCE(dma_rxd
->rxd4
);
528 /* the qdma core needs scratch memory to be setup */
529 static int mtk_init_fq_dma(struct mtk_eth
*eth
)
531 dma_addr_t phy_ring_tail
;
532 int cnt
= MTK_DMA_SIZE
;
536 eth
->scratch_ring
= dma_alloc_coherent(eth
->dev
,
537 cnt
* sizeof(struct mtk_tx_dma
),
538 ð
->phy_scratch_ring
,
539 GFP_ATOMIC
| __GFP_ZERO
);
540 if (unlikely(!eth
->scratch_ring
))
543 eth
->scratch_head
= kcalloc(cnt
, MTK_QDMA_PAGE_SIZE
,
545 if (unlikely(!eth
->scratch_head
))
548 dma_addr
= dma_map_single(eth
->dev
,
549 eth
->scratch_head
, cnt
* MTK_QDMA_PAGE_SIZE
,
551 if (unlikely(dma_mapping_error(eth
->dev
, dma_addr
)))
554 memset(eth
->scratch_ring
, 0x0, sizeof(struct mtk_tx_dma
) * cnt
);
555 phy_ring_tail
= eth
->phy_scratch_ring
+
556 (sizeof(struct mtk_tx_dma
) * (cnt
- 1));
558 for (i
= 0; i
< cnt
; i
++) {
559 eth
->scratch_ring
[i
].txd1
=
560 (dma_addr
+ (i
* MTK_QDMA_PAGE_SIZE
));
562 eth
->scratch_ring
[i
].txd2
= (eth
->phy_scratch_ring
+
563 ((i
+ 1) * sizeof(struct mtk_tx_dma
)));
564 eth
->scratch_ring
[i
].txd3
= TX_DMA_SDL(MTK_QDMA_PAGE_SIZE
);
567 mtk_w32(eth
, eth
->phy_scratch_ring
, MTK_QDMA_FQ_HEAD
);
568 mtk_w32(eth
, phy_ring_tail
, MTK_QDMA_FQ_TAIL
);
569 mtk_w32(eth
, (cnt
<< 16) | cnt
, MTK_QDMA_FQ_CNT
);
570 mtk_w32(eth
, MTK_QDMA_PAGE_SIZE
<< 16, MTK_QDMA_FQ_BLEN
);
575 static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring
*ring
, u32 desc
)
577 void *ret
= ring
->dma
;
579 return ret
+ (desc
- ring
->phys
);
582 static inline struct mtk_tx_buf
*mtk_desc_to_tx_buf(struct mtk_tx_ring
*ring
,
583 struct mtk_tx_dma
*txd
)
585 int idx
= txd
- ring
->dma
;
587 return &ring
->buf
[idx
];
590 static void mtk_tx_unmap(struct mtk_eth
*eth
, struct mtk_tx_buf
*tx_buf
)
592 if (tx_buf
->flags
& MTK_TX_FLAGS_SINGLE0
) {
593 dma_unmap_single(eth
->dev
,
594 dma_unmap_addr(tx_buf
, dma_addr0
),
595 dma_unmap_len(tx_buf
, dma_len0
),
597 } else if (tx_buf
->flags
& MTK_TX_FLAGS_PAGE0
) {
598 dma_unmap_page(eth
->dev
,
599 dma_unmap_addr(tx_buf
, dma_addr0
),
600 dma_unmap_len(tx_buf
, dma_len0
),
605 (tx_buf
->skb
!= (struct sk_buff
*)MTK_DMA_DUMMY_DESC
))
606 dev_kfree_skb_any(tx_buf
->skb
);
610 static int mtk_tx_map(struct sk_buff
*skb
, struct net_device
*dev
,
611 int tx_num
, struct mtk_tx_ring
*ring
, bool gso
)
613 struct mtk_mac
*mac
= netdev_priv(dev
);
614 struct mtk_eth
*eth
= mac
->hw
;
615 struct mtk_tx_dma
*itxd
, *txd
;
616 struct mtk_tx_buf
*itx_buf
, *tx_buf
;
617 dma_addr_t mapped_addr
;
618 unsigned int nr_frags
;
622 itxd
= ring
->next_free
;
623 if (itxd
== ring
->last_free
)
626 /* set the forward port */
627 fport
= (mac
->id
+ 1) << TX_DMA_FPORT_SHIFT
;
630 itx_buf
= mtk_desc_to_tx_buf(ring
, itxd
);
631 memset(itx_buf
, 0, sizeof(*itx_buf
));
636 /* TX Checksum offload */
637 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
638 txd4
|= TX_DMA_CHKSUM
;
640 /* VLAN header offload */
641 if (skb_vlan_tag_present(skb
))
642 txd4
|= TX_DMA_INS_VLAN
| skb_vlan_tag_get(skb
);
644 mapped_addr
= dma_map_single(eth
->dev
, skb
->data
,
645 skb_headlen(skb
), DMA_TO_DEVICE
);
646 if (unlikely(dma_mapping_error(eth
->dev
, mapped_addr
)))
649 WRITE_ONCE(itxd
->txd1
, mapped_addr
);
650 itx_buf
->flags
|= MTK_TX_FLAGS_SINGLE0
;
651 itx_buf
->flags
|= (!mac
->id
) ? MTK_TX_FLAGS_FPORT0
:
653 dma_unmap_addr_set(itx_buf
, dma_addr0
, mapped_addr
);
654 dma_unmap_len_set(itx_buf
, dma_len0
, skb_headlen(skb
));
658 nr_frags
= skb_shinfo(skb
)->nr_frags
;
659 for (i
= 0; i
< nr_frags
; i
++) {
660 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
661 unsigned int offset
= 0;
662 int frag_size
= skb_frag_size(frag
);
665 bool last_frag
= false;
666 unsigned int frag_map_size
;
668 txd
= mtk_qdma_phys_to_virt(ring
, txd
->txd2
);
669 if (txd
== ring
->last_free
)
673 frag_map_size
= min(frag_size
, MTK_TX_DMA_BUF_LEN
);
674 mapped_addr
= skb_frag_dma_map(eth
->dev
, frag
, offset
,
677 if (unlikely(dma_mapping_error(eth
->dev
, mapped_addr
)))
680 if (i
== nr_frags
- 1 &&
681 (frag_size
- frag_map_size
) == 0)
684 WRITE_ONCE(txd
->txd1
, mapped_addr
);
685 WRITE_ONCE(txd
->txd3
, (TX_DMA_SWC
|
686 TX_DMA_PLEN0(frag_map_size
) |
687 last_frag
* TX_DMA_LS0
));
688 WRITE_ONCE(txd
->txd4
, fport
);
690 tx_buf
= mtk_desc_to_tx_buf(ring
, txd
);
691 memset(tx_buf
, 0, sizeof(*tx_buf
));
692 tx_buf
->skb
= (struct sk_buff
*)MTK_DMA_DUMMY_DESC
;
693 tx_buf
->flags
|= MTK_TX_FLAGS_PAGE0
;
694 tx_buf
->flags
|= (!mac
->id
) ? MTK_TX_FLAGS_FPORT0
:
697 dma_unmap_addr_set(tx_buf
, dma_addr0
, mapped_addr
);
698 dma_unmap_len_set(tx_buf
, dma_len0
, frag_map_size
);
699 frag_size
-= frag_map_size
;
700 offset
+= frag_map_size
;
704 /* store skb to cleanup */
707 WRITE_ONCE(itxd
->txd4
, txd4
);
708 WRITE_ONCE(itxd
->txd3
, (TX_DMA_SWC
| TX_DMA_PLEN0(skb_headlen(skb
)) |
709 (!nr_frags
* TX_DMA_LS0
)));
711 netdev_sent_queue(dev
, skb
->len
);
712 skb_tx_timestamp(skb
);
714 ring
->next_free
= mtk_qdma_phys_to_virt(ring
, txd
->txd2
);
715 atomic_sub(n_desc
, &ring
->free_count
);
717 /* make sure that all changes to the dma ring are flushed before we
722 if (netif_xmit_stopped(netdev_get_tx_queue(dev
, 0)) || !skb
->xmit_more
)
723 mtk_w32(eth
, txd
->txd2
, MTK_QTX_CTX_PTR
);
729 tx_buf
= mtk_desc_to_tx_buf(ring
, itxd
);
732 mtk_tx_unmap(eth
, tx_buf
);
734 itxd
->txd3
= TX_DMA_LS0
| TX_DMA_OWNER_CPU
;
735 itxd
= mtk_qdma_phys_to_virt(ring
, itxd
->txd2
);
736 } while (itxd
!= txd
);
741 static inline int mtk_cal_txd_req(struct sk_buff
*skb
)
744 struct skb_frag_struct
*frag
;
747 if (skb_is_gso(skb
)) {
748 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
749 frag
= &skb_shinfo(skb
)->frags
[i
];
750 nfrags
+= DIV_ROUND_UP(frag
->size
, MTK_TX_DMA_BUF_LEN
);
753 nfrags
+= skb_shinfo(skb
)->nr_frags
;
759 static int mtk_queue_stopped(struct mtk_eth
*eth
)
763 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
766 if (netif_queue_stopped(eth
->netdev
[i
]))
773 static void mtk_wake_queue(struct mtk_eth
*eth
)
777 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
780 netif_wake_queue(eth
->netdev
[i
]);
784 static void mtk_stop_queue(struct mtk_eth
*eth
)
788 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
791 netif_stop_queue(eth
->netdev
[i
]);
795 static int mtk_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
797 struct mtk_mac
*mac
= netdev_priv(dev
);
798 struct mtk_eth
*eth
= mac
->hw
;
799 struct mtk_tx_ring
*ring
= ð
->tx_ring
;
800 struct net_device_stats
*stats
= &dev
->stats
;
804 /* normally we can rely on the stack not calling this more than once,
805 * however we have 2 queues running on the same ring so we need to lock
808 spin_lock(ð
->page_lock
);
810 if (unlikely(test_bit(MTK_RESETTING
, ð
->state
)))
813 tx_num
= mtk_cal_txd_req(skb
);
814 if (unlikely(atomic_read(&ring
->free_count
) <= tx_num
)) {
816 netif_err(eth
, tx_queued
, dev
,
817 "Tx Ring full when queue awake!\n");
818 spin_unlock(ð
->page_lock
);
819 return NETDEV_TX_BUSY
;
822 /* TSO: fill MSS info in tcp checksum field */
823 if (skb_is_gso(skb
)) {
824 if (skb_cow_head(skb
, 0)) {
825 netif_warn(eth
, tx_err
, dev
,
826 "GSO expand head fail.\n");
830 if (skb_shinfo(skb
)->gso_type
&
831 (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
)) {
833 tcp_hdr(skb
)->check
= htons(skb_shinfo(skb
)->gso_size
);
837 if (mtk_tx_map(skb
, dev
, tx_num
, ring
, gso
) < 0)
840 if (unlikely(atomic_read(&ring
->free_count
) <= ring
->thresh
))
843 spin_unlock(ð
->page_lock
);
848 spin_unlock(ð
->page_lock
);
850 dev_kfree_skb_any(skb
);
854 static struct mtk_rx_ring
*mtk_get_rx_ring(struct mtk_eth
*eth
)
857 struct mtk_rx_ring
*ring
;
861 return ð
->rx_ring
[0];
863 for (i
= 0; i
< MTK_MAX_RX_RING_NUM
; i
++) {
864 ring
= ð
->rx_ring
[i
];
865 idx
= NEXT_RX_DESP_IDX(ring
->calc_idx
, ring
->dma_size
);
866 if (ring
->dma
[idx
].rxd2
& RX_DMA_DONE
) {
867 ring
->calc_idx_update
= true;
875 static void mtk_update_rx_cpu_idx(struct mtk_eth
*eth
)
877 struct mtk_rx_ring
*ring
;
881 ring
= ð
->rx_ring
[0];
882 mtk_w32(eth
, ring
->calc_idx
, ring
->crx_idx_reg
);
884 for (i
= 0; i
< MTK_MAX_RX_RING_NUM
; i
++) {
885 ring
= ð
->rx_ring
[i
];
886 if (ring
->calc_idx_update
) {
887 ring
->calc_idx_update
= false;
888 mtk_w32(eth
, ring
->calc_idx
, ring
->crx_idx_reg
);
894 static int mtk_poll_rx(struct napi_struct
*napi
, int budget
,
897 struct mtk_rx_ring
*ring
;
901 struct mtk_rx_dma
*rxd
, trxd
;
904 while (done
< budget
) {
905 struct net_device
*netdev
;
910 ring
= mtk_get_rx_ring(eth
);
914 idx
= NEXT_RX_DESP_IDX(ring
->calc_idx
, ring
->dma_size
);
915 rxd
= &ring
->dma
[idx
];
916 data
= ring
->data
[idx
];
918 mtk_rx_get_desc(&trxd
, rxd
);
919 if (!(trxd
.rxd2
& RX_DMA_DONE
))
922 /* find out which mac the packet come from. values start at 1 */
923 mac
= (trxd
.rxd4
>> RX_DMA_FPORT_SHIFT
) &
927 netdev
= eth
->netdev
[mac
];
929 if (unlikely(test_bit(MTK_RESETTING
, ð
->state
)))
932 /* alloc new buffer */
933 new_data
= napi_alloc_frag(ring
->frag_size
);
934 if (unlikely(!new_data
)) {
935 netdev
->stats
.rx_dropped
++;
938 dma_addr
= dma_map_single(eth
->dev
,
939 new_data
+ NET_SKB_PAD
,
942 if (unlikely(dma_mapping_error(eth
->dev
, dma_addr
))) {
943 skb_free_frag(new_data
);
944 netdev
->stats
.rx_dropped
++;
949 skb
= build_skb(data
, ring
->frag_size
);
950 if (unlikely(!skb
)) {
951 skb_free_frag(new_data
);
952 netdev
->stats
.rx_dropped
++;
955 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
);
957 dma_unmap_single(eth
->dev
, trxd
.rxd1
,
958 ring
->buf_size
, DMA_FROM_DEVICE
);
959 pktlen
= RX_DMA_GET_PLEN0(trxd
.rxd2
);
961 skb_put(skb
, pktlen
);
962 if (trxd
.rxd4
& RX_DMA_L4_VALID
)
963 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
965 skb_checksum_none_assert(skb
);
966 skb
->protocol
= eth_type_trans(skb
, netdev
);
968 if (netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
&&
969 RX_DMA_VID(trxd
.rxd3
))
970 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
971 RX_DMA_VID(trxd
.rxd3
));
972 napi_gro_receive(napi
, skb
);
974 ring
->data
[idx
] = new_data
;
975 rxd
->rxd1
= (unsigned int)dma_addr
;
978 rxd
->rxd2
= RX_DMA_PLEN0(ring
->buf_size
);
980 ring
->calc_idx
= idx
;
987 /* make sure that all changes to the dma ring are flushed before
991 mtk_update_rx_cpu_idx(eth
);
997 static int mtk_poll_tx(struct mtk_eth
*eth
, int budget
)
999 struct mtk_tx_ring
*ring
= ð
->tx_ring
;
1000 struct mtk_tx_dma
*desc
;
1001 struct sk_buff
*skb
;
1002 struct mtk_tx_buf
*tx_buf
;
1003 unsigned int done
[MTK_MAX_DEVS
];
1004 unsigned int bytes
[MTK_MAX_DEVS
];
1006 static int condition
;
1009 memset(done
, 0, sizeof(done
));
1010 memset(bytes
, 0, sizeof(bytes
));
1012 cpu
= mtk_r32(eth
, MTK_QTX_CRX_PTR
);
1013 dma
= mtk_r32(eth
, MTK_QTX_DRX_PTR
);
1015 desc
= mtk_qdma_phys_to_virt(ring
, cpu
);
1017 while ((cpu
!= dma
) && budget
) {
1018 u32 next_cpu
= desc
->txd2
;
1021 desc
= mtk_qdma_phys_to_virt(ring
, desc
->txd2
);
1022 if ((desc
->txd3
& TX_DMA_OWNER_CPU
) == 0)
1025 tx_buf
= mtk_desc_to_tx_buf(ring
, desc
);
1026 if (tx_buf
->flags
& MTK_TX_FLAGS_FPORT1
)
1035 if (skb
!= (struct sk_buff
*)MTK_DMA_DUMMY_DESC
) {
1036 bytes
[mac
] += skb
->len
;
1040 mtk_tx_unmap(eth
, tx_buf
);
1042 ring
->last_free
= desc
;
1043 atomic_inc(&ring
->free_count
);
1048 mtk_w32(eth
, cpu
, MTK_QTX_CRX_PTR
);
1050 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
1051 if (!eth
->netdev
[i
] || !done
[i
])
1053 netdev_completed_queue(eth
->netdev
[i
], done
[i
], bytes
[i
]);
1057 if (mtk_queue_stopped(eth
) &&
1058 (atomic_read(&ring
->free_count
) > ring
->thresh
))
1059 mtk_wake_queue(eth
);
1064 static void mtk_handle_status_irq(struct mtk_eth
*eth
)
1066 u32 status2
= mtk_r32(eth
, MTK_INT_STATUS2
);
1068 if (unlikely(status2
& (MTK_GDM1_AF
| MTK_GDM2_AF
))) {
1069 mtk_stats_update(eth
);
1070 mtk_w32(eth
, (MTK_GDM1_AF
| MTK_GDM2_AF
),
1075 static int mtk_napi_tx(struct napi_struct
*napi
, int budget
)
1077 struct mtk_eth
*eth
= container_of(napi
, struct mtk_eth
, tx_napi
);
1081 mtk_handle_status_irq(eth
);
1082 mtk_w32(eth
, MTK_TX_DONE_INT
, MTK_QMTK_INT_STATUS
);
1083 tx_done
= mtk_poll_tx(eth
, budget
);
1085 if (unlikely(netif_msg_intr(eth
))) {
1086 status
= mtk_r32(eth
, MTK_QMTK_INT_STATUS
);
1087 mask
= mtk_r32(eth
, MTK_QDMA_INT_MASK
);
1089 "done tx %d, intr 0x%08x/0x%x\n",
1090 tx_done
, status
, mask
);
1093 if (tx_done
== budget
)
1096 status
= mtk_r32(eth
, MTK_QMTK_INT_STATUS
);
1097 if (status
& MTK_TX_DONE_INT
)
1100 napi_complete(napi
);
1101 mtk_irq_enable(eth
, MTK_QDMA_INT_MASK
, MTK_TX_DONE_INT
);
1106 static int mtk_napi_rx(struct napi_struct
*napi
, int budget
)
1108 struct mtk_eth
*eth
= container_of(napi
, struct mtk_eth
, rx_napi
);
1111 int remain_budget
= budget
;
1113 mtk_handle_status_irq(eth
);
1116 mtk_w32(eth
, MTK_RX_DONE_INT
, MTK_PDMA_INT_STATUS
);
1117 rx_done
= mtk_poll_rx(napi
, remain_budget
, eth
);
1119 if (unlikely(netif_msg_intr(eth
))) {
1120 status
= mtk_r32(eth
, MTK_PDMA_INT_STATUS
);
1121 mask
= mtk_r32(eth
, MTK_PDMA_INT_MASK
);
1123 "done rx %d, intr 0x%08x/0x%x\n",
1124 rx_done
, status
, mask
);
1126 if (rx_done
== remain_budget
)
1129 status
= mtk_r32(eth
, MTK_PDMA_INT_STATUS
);
1130 if (status
& MTK_RX_DONE_INT
) {
1131 remain_budget
-= rx_done
;
1134 napi_complete(napi
);
1135 mtk_irq_enable(eth
, MTK_PDMA_INT_MASK
, MTK_RX_DONE_INT
);
1137 return rx_done
+ budget
- remain_budget
;
1140 static int mtk_tx_alloc(struct mtk_eth
*eth
)
1142 struct mtk_tx_ring
*ring
= ð
->tx_ring
;
1143 int i
, sz
= sizeof(*ring
->dma
);
1145 ring
->buf
= kcalloc(MTK_DMA_SIZE
, sizeof(*ring
->buf
),
1150 ring
->dma
= dma_alloc_coherent(eth
->dev
,
1153 GFP_ATOMIC
| __GFP_ZERO
);
1157 memset(ring
->dma
, 0, MTK_DMA_SIZE
* sz
);
1158 for (i
= 0; i
< MTK_DMA_SIZE
; i
++) {
1159 int next
= (i
+ 1) % MTK_DMA_SIZE
;
1160 u32 next_ptr
= ring
->phys
+ next
* sz
;
1162 ring
->dma
[i
].txd2
= next_ptr
;
1163 ring
->dma
[i
].txd3
= TX_DMA_LS0
| TX_DMA_OWNER_CPU
;
1166 atomic_set(&ring
->free_count
, MTK_DMA_SIZE
- 2);
1167 ring
->next_free
= &ring
->dma
[0];
1168 ring
->last_free
= &ring
->dma
[MTK_DMA_SIZE
- 1];
1169 ring
->thresh
= MAX_SKB_FRAGS
;
1171 /* make sure that all changes to the dma ring are flushed before we
1176 mtk_w32(eth
, ring
->phys
, MTK_QTX_CTX_PTR
);
1177 mtk_w32(eth
, ring
->phys
, MTK_QTX_DTX_PTR
);
1179 ring
->phys
+ ((MTK_DMA_SIZE
- 1) * sz
),
1182 ring
->phys
+ ((MTK_DMA_SIZE
- 1) * sz
),
1184 mtk_w32(eth
, (QDMA_RES_THRES
<< 8) | QDMA_RES_THRES
, MTK_QTX_CFG(0));
1192 static void mtk_tx_clean(struct mtk_eth
*eth
)
1194 struct mtk_tx_ring
*ring
= ð
->tx_ring
;
1198 for (i
= 0; i
< MTK_DMA_SIZE
; i
++)
1199 mtk_tx_unmap(eth
, &ring
->buf
[i
]);
1205 dma_free_coherent(eth
->dev
,
1206 MTK_DMA_SIZE
* sizeof(*ring
->dma
),
1213 static int mtk_rx_alloc(struct mtk_eth
*eth
, int ring_no
, int rx_flag
)
1215 struct mtk_rx_ring
*ring
= ð
->rx_ring
[ring_no
];
1216 int rx_data_len
, rx_dma_size
;
1219 if (rx_flag
== MTK_RX_FLAGS_HWLRO
) {
1220 rx_data_len
= MTK_MAX_LRO_RX_LENGTH
;
1221 rx_dma_size
= MTK_HW_LRO_DMA_SIZE
;
1223 rx_data_len
= ETH_DATA_LEN
;
1224 rx_dma_size
= MTK_DMA_SIZE
;
1227 ring
->frag_size
= mtk_max_frag_size(rx_data_len
);
1228 ring
->buf_size
= mtk_max_buf_size(ring
->frag_size
);
1229 ring
->data
= kcalloc(rx_dma_size
, sizeof(*ring
->data
),
1234 for (i
= 0; i
< rx_dma_size
; i
++) {
1235 ring
->data
[i
] = netdev_alloc_frag(ring
->frag_size
);
1240 ring
->dma
= dma_alloc_coherent(eth
->dev
,
1241 rx_dma_size
* sizeof(*ring
->dma
),
1243 GFP_ATOMIC
| __GFP_ZERO
);
1247 for (i
= 0; i
< rx_dma_size
; i
++) {
1248 dma_addr_t dma_addr
= dma_map_single(eth
->dev
,
1249 ring
->data
[i
] + NET_SKB_PAD
,
1252 if (unlikely(dma_mapping_error(eth
->dev
, dma_addr
)))
1254 ring
->dma
[i
].rxd1
= (unsigned int)dma_addr
;
1256 ring
->dma
[i
].rxd2
= RX_DMA_PLEN0(ring
->buf_size
);
1258 ring
->dma_size
= rx_dma_size
;
1259 ring
->calc_idx_update
= false;
1260 ring
->calc_idx
= rx_dma_size
- 1;
1261 ring
->crx_idx_reg
= MTK_PRX_CRX_IDX_CFG(ring_no
);
1262 /* make sure that all changes to the dma ring are flushed before we
1267 mtk_w32(eth
, ring
->phys
, MTK_PRX_BASE_PTR_CFG(ring_no
));
1268 mtk_w32(eth
, rx_dma_size
, MTK_PRX_MAX_CNT_CFG(ring_no
));
1269 mtk_w32(eth
, ring
->calc_idx
, ring
->crx_idx_reg
);
1270 mtk_w32(eth
, MTK_PST_DRX_IDX_CFG(ring_no
), MTK_PDMA_RST_IDX
);
1275 static void mtk_rx_clean(struct mtk_eth
*eth
, int ring_no
)
1277 struct mtk_rx_ring
*ring
= ð
->rx_ring
[ring_no
];
1280 if (ring
->data
&& ring
->dma
) {
1281 for (i
= 0; i
< ring
->dma_size
; i
++) {
1284 if (!ring
->dma
[i
].rxd1
)
1286 dma_unmap_single(eth
->dev
,
1290 skb_free_frag(ring
->data
[i
]);
1297 dma_free_coherent(eth
->dev
,
1298 ring
->dma_size
* sizeof(*ring
->dma
),
1305 static int mtk_hwlro_rx_init(struct mtk_eth
*eth
)
1308 u32 ring_ctrl_dw1
= 0, ring_ctrl_dw2
= 0, ring_ctrl_dw3
= 0;
1309 u32 lro_ctrl_dw0
= 0, lro_ctrl_dw3
= 0;
1311 /* set LRO rings to auto-learn modes */
1312 ring_ctrl_dw2
|= MTK_RING_AUTO_LERAN_MODE
;
1314 /* validate LRO ring */
1315 ring_ctrl_dw2
|= MTK_RING_VLD
;
1317 /* set AGE timer (unit: 20us) */
1318 ring_ctrl_dw2
|= MTK_RING_AGE_TIME_H
;
1319 ring_ctrl_dw1
|= MTK_RING_AGE_TIME_L
;
1321 /* set max AGG timer (unit: 20us) */
1322 ring_ctrl_dw2
|= MTK_RING_MAX_AGG_TIME
;
1324 /* set max LRO AGG count */
1325 ring_ctrl_dw2
|= MTK_RING_MAX_AGG_CNT_L
;
1326 ring_ctrl_dw3
|= MTK_RING_MAX_AGG_CNT_H
;
1328 for (i
= 1; i
< MTK_MAX_RX_RING_NUM
; i
++) {
1329 mtk_w32(eth
, ring_ctrl_dw1
, MTK_LRO_CTRL_DW1_CFG(i
));
1330 mtk_w32(eth
, ring_ctrl_dw2
, MTK_LRO_CTRL_DW2_CFG(i
));
1331 mtk_w32(eth
, ring_ctrl_dw3
, MTK_LRO_CTRL_DW3_CFG(i
));
1334 /* IPv4 checksum update enable */
1335 lro_ctrl_dw0
|= MTK_L3_CKS_UPD_EN
;
1337 /* switch priority comparison to packet count mode */
1338 lro_ctrl_dw0
|= MTK_LRO_ALT_PKT_CNT_MODE
;
1340 /* bandwidth threshold setting */
1341 mtk_w32(eth
, MTK_HW_LRO_BW_THRE
, MTK_PDMA_LRO_CTRL_DW2
);
1343 /* auto-learn score delta setting */
1344 mtk_w32(eth
, MTK_HW_LRO_REPLACE_DELTA
, MTK_PDMA_LRO_ALT_SCORE_DELTA
);
1346 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
1347 mtk_w32(eth
, (MTK_HW_LRO_TIMER_UNIT
<< 16) | MTK_HW_LRO_REFRESH_TIME
,
1348 MTK_PDMA_LRO_ALT_REFRESH_TIMER
);
1350 /* set HW LRO mode & the max aggregation count for rx packets */
1351 lro_ctrl_dw3
|= MTK_ADMA_MODE
| (MTK_HW_LRO_MAX_AGG_CNT
& 0xff);
1353 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
1354 lro_ctrl_dw3
|= MTK_LRO_MIN_RXD_SDL
;
1357 lro_ctrl_dw0
|= MTK_LRO_EN
;
1359 mtk_w32(eth
, lro_ctrl_dw3
, MTK_PDMA_LRO_CTRL_DW3
);
1360 mtk_w32(eth
, lro_ctrl_dw0
, MTK_PDMA_LRO_CTRL_DW0
);
1365 static void mtk_hwlro_rx_uninit(struct mtk_eth
*eth
)
1370 /* relinquish lro rings, flush aggregated packets */
1371 mtk_w32(eth
, MTK_LRO_RING_RELINQUISH_REQ
, MTK_PDMA_LRO_CTRL_DW0
);
1373 /* wait for relinquishments done */
1374 for (i
= 0; i
< 10; i
++) {
1375 val
= mtk_r32(eth
, MTK_PDMA_LRO_CTRL_DW0
);
1376 if (val
& MTK_LRO_RING_RELINQUISH_DONE
) {
1383 /* invalidate lro rings */
1384 for (i
= 1; i
< MTK_MAX_RX_RING_NUM
; i
++)
1385 mtk_w32(eth
, 0, MTK_LRO_CTRL_DW2_CFG(i
));
1387 /* disable HW LRO */
1388 mtk_w32(eth
, 0, MTK_PDMA_LRO_CTRL_DW0
);
1391 static void mtk_hwlro_val_ipaddr(struct mtk_eth
*eth
, int idx
, __be32 ip
)
1395 reg_val
= mtk_r32(eth
, MTK_LRO_CTRL_DW2_CFG(idx
));
1397 /* invalidate the IP setting */
1398 mtk_w32(eth
, (reg_val
& ~MTK_RING_MYIP_VLD
), MTK_LRO_CTRL_DW2_CFG(idx
));
1400 mtk_w32(eth
, ip
, MTK_LRO_DIP_DW0_CFG(idx
));
1402 /* validate the IP setting */
1403 mtk_w32(eth
, (reg_val
| MTK_RING_MYIP_VLD
), MTK_LRO_CTRL_DW2_CFG(idx
));
1406 static void mtk_hwlro_inval_ipaddr(struct mtk_eth
*eth
, int idx
)
1410 reg_val
= mtk_r32(eth
, MTK_LRO_CTRL_DW2_CFG(idx
));
1412 /* invalidate the IP setting */
1413 mtk_w32(eth
, (reg_val
& ~MTK_RING_MYIP_VLD
), MTK_LRO_CTRL_DW2_CFG(idx
));
1415 mtk_w32(eth
, 0, MTK_LRO_DIP_DW0_CFG(idx
));
1418 static int mtk_hwlro_get_ip_cnt(struct mtk_mac
*mac
)
1423 for (i
= 0; i
< MTK_MAX_LRO_IP_CNT
; i
++) {
1424 if (mac
->hwlro_ip
[i
])
1431 static int mtk_hwlro_add_ipaddr(struct net_device
*dev
,
1432 struct ethtool_rxnfc
*cmd
)
1434 struct ethtool_rx_flow_spec
*fsp
=
1435 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
1436 struct mtk_mac
*mac
= netdev_priv(dev
);
1437 struct mtk_eth
*eth
= mac
->hw
;
1440 if ((fsp
->flow_type
!= TCP_V4_FLOW
) ||
1441 (!fsp
->h_u
.tcp_ip4_spec
.ip4dst
) ||
1442 (fsp
->location
> 1))
1445 mac
->hwlro_ip
[fsp
->location
] = htonl(fsp
->h_u
.tcp_ip4_spec
.ip4dst
);
1446 hwlro_idx
= (mac
->id
* MTK_MAX_LRO_IP_CNT
) + fsp
->location
;
1448 mac
->hwlro_ip_cnt
= mtk_hwlro_get_ip_cnt(mac
);
1450 mtk_hwlro_val_ipaddr(eth
, hwlro_idx
, mac
->hwlro_ip
[fsp
->location
]);
1455 static int mtk_hwlro_del_ipaddr(struct net_device
*dev
,
1456 struct ethtool_rxnfc
*cmd
)
1458 struct ethtool_rx_flow_spec
*fsp
=
1459 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
1460 struct mtk_mac
*mac
= netdev_priv(dev
);
1461 struct mtk_eth
*eth
= mac
->hw
;
1464 if (fsp
->location
> 1)
1467 mac
->hwlro_ip
[fsp
->location
] = 0;
1468 hwlro_idx
= (mac
->id
* MTK_MAX_LRO_IP_CNT
) + fsp
->location
;
1470 mac
->hwlro_ip_cnt
= mtk_hwlro_get_ip_cnt(mac
);
1472 mtk_hwlro_inval_ipaddr(eth
, hwlro_idx
);
1477 static void mtk_hwlro_netdev_disable(struct net_device
*dev
)
1479 struct mtk_mac
*mac
= netdev_priv(dev
);
1480 struct mtk_eth
*eth
= mac
->hw
;
1483 for (i
= 0; i
< MTK_MAX_LRO_IP_CNT
; i
++) {
1484 mac
->hwlro_ip
[i
] = 0;
1485 hwlro_idx
= (mac
->id
* MTK_MAX_LRO_IP_CNT
) + i
;
1487 mtk_hwlro_inval_ipaddr(eth
, hwlro_idx
);
1490 mac
->hwlro_ip_cnt
= 0;
1493 static int mtk_hwlro_get_fdir_entry(struct net_device
*dev
,
1494 struct ethtool_rxnfc
*cmd
)
1496 struct mtk_mac
*mac
= netdev_priv(dev
);
1497 struct ethtool_rx_flow_spec
*fsp
=
1498 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
1500 /* only tcp dst ipv4 is meaningful, others are meaningless */
1501 fsp
->flow_type
= TCP_V4_FLOW
;
1502 fsp
->h_u
.tcp_ip4_spec
.ip4dst
= ntohl(mac
->hwlro_ip
[fsp
->location
]);
1503 fsp
->m_u
.tcp_ip4_spec
.ip4dst
= 0;
1505 fsp
->h_u
.tcp_ip4_spec
.ip4src
= 0;
1506 fsp
->m_u
.tcp_ip4_spec
.ip4src
= 0xffffffff;
1507 fsp
->h_u
.tcp_ip4_spec
.psrc
= 0;
1508 fsp
->m_u
.tcp_ip4_spec
.psrc
= 0xffff;
1509 fsp
->h_u
.tcp_ip4_spec
.pdst
= 0;
1510 fsp
->m_u
.tcp_ip4_spec
.pdst
= 0xffff;
1511 fsp
->h_u
.tcp_ip4_spec
.tos
= 0;
1512 fsp
->m_u
.tcp_ip4_spec
.tos
= 0xff;
1517 static int mtk_hwlro_get_fdir_all(struct net_device
*dev
,
1518 struct ethtool_rxnfc
*cmd
,
1521 struct mtk_mac
*mac
= netdev_priv(dev
);
1525 for (i
= 0; i
< MTK_MAX_LRO_IP_CNT
; i
++) {
1526 if (mac
->hwlro_ip
[i
]) {
1532 cmd
->rule_cnt
= cnt
;
1537 static netdev_features_t
mtk_fix_features(struct net_device
*dev
,
1538 netdev_features_t features
)
1540 if (!(features
& NETIF_F_LRO
)) {
1541 struct mtk_mac
*mac
= netdev_priv(dev
);
1542 int ip_cnt
= mtk_hwlro_get_ip_cnt(mac
);
1545 netdev_info(dev
, "RX flow is programmed, LRO should keep on\n");
1547 features
|= NETIF_F_LRO
;
1554 static int mtk_set_features(struct net_device
*dev
, netdev_features_t features
)
1558 if (!((dev
->features
^ features
) & NETIF_F_LRO
))
1561 if (!(features
& NETIF_F_LRO
))
1562 mtk_hwlro_netdev_disable(dev
);
1567 /* wait for DMA to finish whatever it is doing before we start using it again */
1568 static int mtk_dma_busy_wait(struct mtk_eth
*eth
)
1570 unsigned long t_start
= jiffies
;
1573 if (!(mtk_r32(eth
, MTK_QDMA_GLO_CFG
) &
1574 (MTK_RX_DMA_BUSY
| MTK_TX_DMA_BUSY
)))
1576 if (time_after(jiffies
, t_start
+ MTK_DMA_BUSY_TIMEOUT
))
1580 dev_err(eth
->dev
, "DMA init timeout\n");
1584 static int mtk_dma_init(struct mtk_eth
*eth
)
1589 if (mtk_dma_busy_wait(eth
))
1592 /* QDMA needs scratch memory for internal reordering of the
1595 err
= mtk_init_fq_dma(eth
);
1599 err
= mtk_tx_alloc(eth
);
1603 err
= mtk_rx_alloc(eth
, 0, MTK_RX_FLAGS_NORMAL
);
1608 for (i
= 1; i
< MTK_MAX_RX_RING_NUM
; i
++) {
1609 err
= mtk_rx_alloc(eth
, i
, MTK_RX_FLAGS_HWLRO
);
1613 err
= mtk_hwlro_rx_init(eth
);
1618 /* Enable random early drop and set drop threshold automatically */
1619 mtk_w32(eth
, FC_THRES_DROP_MODE
| FC_THRES_DROP_EN
| FC_THRES_MIN
,
1621 mtk_w32(eth
, 0x0, MTK_QDMA_HRED2
);
1626 static void mtk_dma_free(struct mtk_eth
*eth
)
1630 for (i
= 0; i
< MTK_MAC_COUNT
; i
++)
1632 netdev_reset_queue(eth
->netdev
[i
]);
1633 if (eth
->scratch_ring
) {
1634 dma_free_coherent(eth
->dev
,
1635 MTK_DMA_SIZE
* sizeof(struct mtk_tx_dma
),
1637 eth
->phy_scratch_ring
);
1638 eth
->scratch_ring
= NULL
;
1639 eth
->phy_scratch_ring
= 0;
1642 mtk_rx_clean(eth
, 0);
1645 mtk_hwlro_rx_uninit(eth
);
1646 for (i
= 1; i
< MTK_MAX_RX_RING_NUM
; i
++)
1647 mtk_rx_clean(eth
, i
);
1650 kfree(eth
->scratch_head
);
1653 static void mtk_tx_timeout(struct net_device
*dev
)
1655 struct mtk_mac
*mac
= netdev_priv(dev
);
1656 struct mtk_eth
*eth
= mac
->hw
;
1658 eth
->netdev
[mac
->id
]->stats
.tx_errors
++;
1659 netif_err(eth
, tx_err
, dev
,
1660 "transmit timed out\n");
1661 schedule_work(ð
->pending_work
);
1664 static irqreturn_t
mtk_handle_irq_rx(int irq
, void *_eth
)
1666 struct mtk_eth
*eth
= _eth
;
1668 if (likely(napi_schedule_prep(ð
->rx_napi
))) {
1669 __napi_schedule(ð
->rx_napi
);
1670 mtk_irq_disable(eth
, MTK_PDMA_INT_MASK
, MTK_RX_DONE_INT
);
1676 static irqreturn_t
mtk_handle_irq_tx(int irq
, void *_eth
)
1678 struct mtk_eth
*eth
= _eth
;
1680 if (likely(napi_schedule_prep(ð
->tx_napi
))) {
1681 __napi_schedule(ð
->tx_napi
);
1682 mtk_irq_disable(eth
, MTK_QDMA_INT_MASK
, MTK_TX_DONE_INT
);
1688 #ifdef CONFIG_NET_POLL_CONTROLLER
1689 static void mtk_poll_controller(struct net_device
*dev
)
1691 struct mtk_mac
*mac
= netdev_priv(dev
);
1692 struct mtk_eth
*eth
= mac
->hw
;
1694 mtk_irq_disable(eth
, MTK_QDMA_INT_MASK
, MTK_TX_DONE_INT
);
1695 mtk_irq_disable(eth
, MTK_PDMA_INT_MASK
, MTK_RX_DONE_INT
);
1696 mtk_handle_irq_rx(eth
->irq
[2], dev
);
1697 mtk_irq_enable(eth
, MTK_QDMA_INT_MASK
, MTK_TX_DONE_INT
);
1698 mtk_irq_enable(eth
, MTK_PDMA_INT_MASK
, MTK_RX_DONE_INT
);
1702 static int mtk_start_dma(struct mtk_eth
*eth
)
1706 err
= mtk_dma_init(eth
);
1713 MTK_TX_WB_DDONE
| MTK_TX_DMA_EN
|
1714 MTK_DMA_SIZE_16DWORDS
| MTK_NDP_CO_PRO
,
1718 MTK_RX_DMA_EN
| MTK_RX_2B_OFFSET
|
1719 MTK_RX_BT_32DWORDS
| MTK_MULTI_EN
,
1725 static int mtk_open(struct net_device
*dev
)
1727 struct mtk_mac
*mac
= netdev_priv(dev
);
1728 struct mtk_eth
*eth
= mac
->hw
;
1730 /* we run 2 netdevs on the same dma ring so we only bring it up once */
1731 if (!atomic_read(ð
->dma_refcnt
)) {
1732 int err
= mtk_start_dma(eth
);
1737 napi_enable(ð
->tx_napi
);
1738 napi_enable(ð
->rx_napi
);
1739 mtk_irq_enable(eth
, MTK_QDMA_INT_MASK
, MTK_TX_DONE_INT
);
1740 mtk_irq_enable(eth
, MTK_PDMA_INT_MASK
, MTK_RX_DONE_INT
);
1742 atomic_inc(ð
->dma_refcnt
);
1744 phy_start(dev
->phydev
);
1745 netif_start_queue(dev
);
1750 static void mtk_stop_dma(struct mtk_eth
*eth
, u32 glo_cfg
)
1755 /* stop the dma engine */
1756 spin_lock_bh(ð
->page_lock
);
1757 val
= mtk_r32(eth
, glo_cfg
);
1758 mtk_w32(eth
, val
& ~(MTK_TX_WB_DDONE
| MTK_RX_DMA_EN
| MTK_TX_DMA_EN
),
1760 spin_unlock_bh(ð
->page_lock
);
1762 /* wait for dma stop */
1763 for (i
= 0; i
< 10; i
++) {
1764 val
= mtk_r32(eth
, glo_cfg
);
1765 if (val
& (MTK_TX_DMA_BUSY
| MTK_RX_DMA_BUSY
)) {
1773 static int mtk_stop(struct net_device
*dev
)
1775 struct mtk_mac
*mac
= netdev_priv(dev
);
1776 struct mtk_eth
*eth
= mac
->hw
;
1778 netif_tx_disable(dev
);
1779 phy_stop(dev
->phydev
);
1781 /* only shutdown DMA if this is the last user */
1782 if (!atomic_dec_and_test(ð
->dma_refcnt
))
1785 mtk_irq_disable(eth
, MTK_QDMA_INT_MASK
, MTK_TX_DONE_INT
);
1786 mtk_irq_disable(eth
, MTK_PDMA_INT_MASK
, MTK_RX_DONE_INT
);
1787 napi_disable(ð
->tx_napi
);
1788 napi_disable(ð
->rx_napi
);
1790 mtk_stop_dma(eth
, MTK_QDMA_GLO_CFG
);
1791 mtk_stop_dma(eth
, MTK_PDMA_GLO_CFG
);
1798 static void ethsys_reset(struct mtk_eth
*eth
, u32 reset_bits
)
1800 regmap_update_bits(eth
->ethsys
, ETHSYS_RSTCTRL
,
1804 usleep_range(1000, 1100);
1805 regmap_update_bits(eth
->ethsys
, ETHSYS_RSTCTRL
,
1811 static int mtk_hw_init(struct mtk_eth
*eth
)
1815 if (test_and_set_bit(MTK_HW_INIT
, ð
->state
))
1818 pm_runtime_enable(eth
->dev
);
1819 pm_runtime_get_sync(eth
->dev
);
1821 clk_prepare_enable(eth
->clks
[MTK_CLK_ETHIF
]);
1822 clk_prepare_enable(eth
->clks
[MTK_CLK_ESW
]);
1823 clk_prepare_enable(eth
->clks
[MTK_CLK_GP1
]);
1824 clk_prepare_enable(eth
->clks
[MTK_CLK_GP2
]);
1825 ethsys_reset(eth
, RSTCTRL_FE
);
1826 ethsys_reset(eth
, RSTCTRL_PPE
);
1828 regmap_read(eth
->ethsys
, ETHSYS_SYSCFG0
, &val
);
1829 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
1832 val
&= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK
, eth
->mac
[i
]->id
);
1833 val
|= SYSCFG0_GE_MODE(eth
->mac
[i
]->ge_mode
, eth
->mac
[i
]->id
);
1835 regmap_write(eth
->ethsys
, ETHSYS_SYSCFG0
, val
);
1837 /* Set GE2 driving and slew rate */
1838 regmap_write(eth
->pctl
, GPIO_DRV_SEL10
, 0xa00);
1841 regmap_write(eth
->pctl
, GPIO_OD33_CTRL8
, 0x5);
1844 regmap_write(eth
->pctl
, GPIO_BIAS_CTRL
, 0x0);
1846 /* GE1, Force 1000M/FD, FC ON */
1847 mtk_w32(eth
, MAC_MCR_FIXED_LINK
, MTK_MAC_MCR(0));
1849 /* GE2, Force 1000M/FD, FC ON */
1850 mtk_w32(eth
, MAC_MCR_FIXED_LINK
, MTK_MAC_MCR(1));
1852 /* Enable RX VLan Offloading */
1853 mtk_w32(eth
, 1, MTK_CDMP_EG_CTRL
);
1855 /* disable delay and normal interrupt */
1856 mtk_w32(eth
, 0, MTK_QDMA_DELAY_INT
);
1857 mtk_w32(eth
, 0, MTK_PDMA_DELAY_INT
);
1858 mtk_irq_disable(eth
, MTK_QDMA_INT_MASK
, ~0);
1859 mtk_irq_disable(eth
, MTK_PDMA_INT_MASK
, ~0);
1860 mtk_w32(eth
, RST_GL_PSE
, MTK_RST_GL
);
1861 mtk_w32(eth
, 0, MTK_RST_GL
);
1863 /* FE int grouping */
1864 mtk_w32(eth
, MTK_TX_DONE_INT
, MTK_PDMA_INT_GRP1
);
1865 mtk_w32(eth
, MTK_RX_DONE_INT
, MTK_PDMA_INT_GRP2
);
1866 mtk_w32(eth
, MTK_TX_DONE_INT
, MTK_QDMA_INT_GRP1
);
1867 mtk_w32(eth
, MTK_RX_DONE_INT
, MTK_QDMA_INT_GRP2
);
1868 mtk_w32(eth
, 0x21021000, MTK_FE_INT_GRP
);
1870 for (i
= 0; i
< 2; i
++) {
1871 u32 val
= mtk_r32(eth
, MTK_GDMA_FWD_CFG(i
));
1873 /* setup the forward port to send frame to PDMA */
1876 /* Enable RX checksum */
1877 val
|= MTK_GDMA_ICS_EN
| MTK_GDMA_TCS_EN
| MTK_GDMA_UCS_EN
;
1879 /* setup the mac dma */
1880 mtk_w32(eth
, val
, MTK_GDMA_FWD_CFG(i
));
1886 static int mtk_hw_deinit(struct mtk_eth
*eth
)
1888 if (!test_and_clear_bit(MTK_HW_INIT
, ð
->state
))
1891 clk_disable_unprepare(eth
->clks
[MTK_CLK_GP2
]);
1892 clk_disable_unprepare(eth
->clks
[MTK_CLK_GP1
]);
1893 clk_disable_unprepare(eth
->clks
[MTK_CLK_ESW
]);
1894 clk_disable_unprepare(eth
->clks
[MTK_CLK_ETHIF
]);
1896 pm_runtime_put_sync(eth
->dev
);
1897 pm_runtime_disable(eth
->dev
);
1902 static int __init
mtk_init(struct net_device
*dev
)
1904 struct mtk_mac
*mac
= netdev_priv(dev
);
1905 struct mtk_eth
*eth
= mac
->hw
;
1906 const char *mac_addr
;
1908 mac_addr
= of_get_mac_address(mac
->of_node
);
1910 ether_addr_copy(dev
->dev_addr
, mac_addr
);
1912 /* If the mac address is invalid, use random mac address */
1913 if (!is_valid_ether_addr(dev
->dev_addr
)) {
1914 random_ether_addr(dev
->dev_addr
);
1915 dev_err(eth
->dev
, "generated random MAC address %pM\n",
1917 dev
->addr_assign_type
= NET_ADDR_RANDOM
;
1920 return mtk_phy_connect(dev
);
1923 static void mtk_uninit(struct net_device
*dev
)
1925 struct mtk_mac
*mac
= netdev_priv(dev
);
1926 struct mtk_eth
*eth
= mac
->hw
;
1928 phy_disconnect(dev
->phydev
);
1929 if (of_phy_is_fixed_link(mac
->of_node
))
1930 of_phy_deregister_fixed_link(mac
->of_node
);
1931 mtk_irq_disable(eth
, MTK_QDMA_INT_MASK
, ~0);
1932 mtk_irq_disable(eth
, MTK_PDMA_INT_MASK
, ~0);
1935 static int mtk_do_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
1941 return phy_mii_ioctl(dev
->phydev
, ifr
, cmd
);
1949 static void mtk_pending_work(struct work_struct
*work
)
1951 struct mtk_eth
*eth
= container_of(work
, struct mtk_eth
, pending_work
);
1953 unsigned long restart
= 0;
1957 dev_dbg(eth
->dev
, "[%s][%d] reset\n", __func__
, __LINE__
);
1959 while (test_and_set_bit_lock(MTK_RESETTING
, ð
->state
))
1962 dev_dbg(eth
->dev
, "[%s][%d] mtk_stop starts\n", __func__
, __LINE__
);
1963 /* stop all devices to make sure that dma is properly shut down */
1964 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
1965 if (!eth
->netdev
[i
])
1967 mtk_stop(eth
->netdev
[i
]);
1968 __set_bit(i
, &restart
);
1970 dev_dbg(eth
->dev
, "[%s][%d] mtk_stop ends\n", __func__
, __LINE__
);
1972 /* restart underlying hardware such as power, clock, pin mux
1973 * and the connected phy
1978 pinctrl_select_state(eth
->dev
->pins
->p
,
1979 eth
->dev
->pins
->default_state
);
1982 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
1984 of_phy_is_fixed_link(eth
->mac
[i
]->of_node
))
1986 err
= phy_init_hw(eth
->netdev
[i
]->phydev
);
1988 dev_err(eth
->dev
, "%s: PHY init failed.\n",
1989 eth
->netdev
[i
]->name
);
1992 /* restart DMA and enable IRQs */
1993 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
1994 if (!test_bit(i
, &restart
))
1996 err
= mtk_open(eth
->netdev
[i
]);
1998 netif_alert(eth
, ifup
, eth
->netdev
[i
],
1999 "Driver up/down cycle failed, closing device.\n");
2000 dev_close(eth
->netdev
[i
]);
2004 dev_dbg(eth
->dev
, "[%s][%d] reset done\n", __func__
, __LINE__
);
2006 clear_bit_unlock(MTK_RESETTING
, ð
->state
);
2011 static int mtk_free_dev(struct mtk_eth
*eth
)
2015 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
2016 if (!eth
->netdev
[i
])
2018 free_netdev(eth
->netdev
[i
]);
2024 static int mtk_unreg_dev(struct mtk_eth
*eth
)
2028 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
2029 if (!eth
->netdev
[i
])
2031 unregister_netdev(eth
->netdev
[i
]);
2037 static int mtk_cleanup(struct mtk_eth
*eth
)
2041 cancel_work_sync(ð
->pending_work
);
2046 static int mtk_get_link_ksettings(struct net_device
*ndev
,
2047 struct ethtool_link_ksettings
*cmd
)
2049 struct mtk_mac
*mac
= netdev_priv(ndev
);
2051 if (unlikely(test_bit(MTK_RESETTING
, &mac
->hw
->state
)))
2054 return phy_ethtool_ksettings_get(ndev
->phydev
, cmd
);
2057 static int mtk_set_link_ksettings(struct net_device
*ndev
,
2058 const struct ethtool_link_ksettings
*cmd
)
2060 struct mtk_mac
*mac
= netdev_priv(ndev
);
2062 if (unlikely(test_bit(MTK_RESETTING
, &mac
->hw
->state
)))
2065 return phy_ethtool_ksettings_set(ndev
->phydev
, cmd
);
2068 static void mtk_get_drvinfo(struct net_device
*dev
,
2069 struct ethtool_drvinfo
*info
)
2071 struct mtk_mac
*mac
= netdev_priv(dev
);
2073 strlcpy(info
->driver
, mac
->hw
->dev
->driver
->name
, sizeof(info
->driver
));
2074 strlcpy(info
->bus_info
, dev_name(mac
->hw
->dev
), sizeof(info
->bus_info
));
2075 info
->n_stats
= ARRAY_SIZE(mtk_ethtool_stats
);
2078 static u32
mtk_get_msglevel(struct net_device
*dev
)
2080 struct mtk_mac
*mac
= netdev_priv(dev
);
2082 return mac
->hw
->msg_enable
;
2085 static void mtk_set_msglevel(struct net_device
*dev
, u32 value
)
2087 struct mtk_mac
*mac
= netdev_priv(dev
);
2089 mac
->hw
->msg_enable
= value
;
2092 static int mtk_nway_reset(struct net_device
*dev
)
2094 struct mtk_mac
*mac
= netdev_priv(dev
);
2096 if (unlikely(test_bit(MTK_RESETTING
, &mac
->hw
->state
)))
2099 return genphy_restart_aneg(dev
->phydev
);
2102 static u32
mtk_get_link(struct net_device
*dev
)
2104 struct mtk_mac
*mac
= netdev_priv(dev
);
2107 if (unlikely(test_bit(MTK_RESETTING
, &mac
->hw
->state
)))
2110 err
= genphy_update_link(dev
->phydev
);
2112 return ethtool_op_get_link(dev
);
2114 return dev
->phydev
->link
;
2117 static void mtk_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
2121 switch (stringset
) {
2123 for (i
= 0; i
< ARRAY_SIZE(mtk_ethtool_stats
); i
++) {
2124 memcpy(data
, mtk_ethtool_stats
[i
].str
, ETH_GSTRING_LEN
);
2125 data
+= ETH_GSTRING_LEN
;
2131 static int mtk_get_sset_count(struct net_device
*dev
, int sset
)
2135 return ARRAY_SIZE(mtk_ethtool_stats
);
2141 static void mtk_get_ethtool_stats(struct net_device
*dev
,
2142 struct ethtool_stats
*stats
, u64
*data
)
2144 struct mtk_mac
*mac
= netdev_priv(dev
);
2145 struct mtk_hw_stats
*hwstats
= mac
->hw_stats
;
2146 u64
*data_src
, *data_dst
;
2150 if (unlikely(test_bit(MTK_RESETTING
, &mac
->hw
->state
)))
2153 if (netif_running(dev
) && netif_device_present(dev
)) {
2154 if (spin_trylock(&hwstats
->stats_lock
)) {
2155 mtk_stats_update_mac(mac
);
2156 spin_unlock(&hwstats
->stats_lock
);
2160 data_src
= (u64
*)hwstats
;
2164 start
= u64_stats_fetch_begin_irq(&hwstats
->syncp
);
2166 for (i
= 0; i
< ARRAY_SIZE(mtk_ethtool_stats
); i
++)
2167 *data_dst
++ = *(data_src
+ mtk_ethtool_stats
[i
].offset
);
2168 } while (u64_stats_fetch_retry_irq(&hwstats
->syncp
, start
));
2171 static int mtk_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
,
2174 int ret
= -EOPNOTSUPP
;
2177 case ETHTOOL_GRXRINGS
:
2178 if (dev
->features
& NETIF_F_LRO
) {
2179 cmd
->data
= MTK_MAX_RX_RING_NUM
;
2183 case ETHTOOL_GRXCLSRLCNT
:
2184 if (dev
->features
& NETIF_F_LRO
) {
2185 struct mtk_mac
*mac
= netdev_priv(dev
);
2187 cmd
->rule_cnt
= mac
->hwlro_ip_cnt
;
2191 case ETHTOOL_GRXCLSRULE
:
2192 if (dev
->features
& NETIF_F_LRO
)
2193 ret
= mtk_hwlro_get_fdir_entry(dev
, cmd
);
2195 case ETHTOOL_GRXCLSRLALL
:
2196 if (dev
->features
& NETIF_F_LRO
)
2197 ret
= mtk_hwlro_get_fdir_all(dev
, cmd
,
2207 static int mtk_set_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
)
2209 int ret
= -EOPNOTSUPP
;
2212 case ETHTOOL_SRXCLSRLINS
:
2213 if (dev
->features
& NETIF_F_LRO
)
2214 ret
= mtk_hwlro_add_ipaddr(dev
, cmd
);
2216 case ETHTOOL_SRXCLSRLDEL
:
2217 if (dev
->features
& NETIF_F_LRO
)
2218 ret
= mtk_hwlro_del_ipaddr(dev
, cmd
);
2227 static const struct ethtool_ops mtk_ethtool_ops
= {
2228 .get_link_ksettings
= mtk_get_link_ksettings
,
2229 .set_link_ksettings
= mtk_set_link_ksettings
,
2230 .get_drvinfo
= mtk_get_drvinfo
,
2231 .get_msglevel
= mtk_get_msglevel
,
2232 .set_msglevel
= mtk_set_msglevel
,
2233 .nway_reset
= mtk_nway_reset
,
2234 .get_link
= mtk_get_link
,
2235 .get_strings
= mtk_get_strings
,
2236 .get_sset_count
= mtk_get_sset_count
,
2237 .get_ethtool_stats
= mtk_get_ethtool_stats
,
2238 .get_rxnfc
= mtk_get_rxnfc
,
2239 .set_rxnfc
= mtk_set_rxnfc
,
2242 static const struct net_device_ops mtk_netdev_ops
= {
2243 .ndo_init
= mtk_init
,
2244 .ndo_uninit
= mtk_uninit
,
2245 .ndo_open
= mtk_open
,
2246 .ndo_stop
= mtk_stop
,
2247 .ndo_start_xmit
= mtk_start_xmit
,
2248 .ndo_set_mac_address
= mtk_set_mac_address
,
2249 .ndo_validate_addr
= eth_validate_addr
,
2250 .ndo_do_ioctl
= mtk_do_ioctl
,
2251 .ndo_tx_timeout
= mtk_tx_timeout
,
2252 .ndo_get_stats64
= mtk_get_stats64
,
2253 .ndo_fix_features
= mtk_fix_features
,
2254 .ndo_set_features
= mtk_set_features
,
2255 #ifdef CONFIG_NET_POLL_CONTROLLER
2256 .ndo_poll_controller
= mtk_poll_controller
,
2260 static int mtk_add_mac(struct mtk_eth
*eth
, struct device_node
*np
)
2262 struct mtk_mac
*mac
;
2263 const __be32
*_id
= of_get_property(np
, "reg", NULL
);
2267 dev_err(eth
->dev
, "missing mac id\n");
2271 id
= be32_to_cpup(_id
);
2272 if (id
>= MTK_MAC_COUNT
) {
2273 dev_err(eth
->dev
, "%d is not a valid mac id\n", id
);
2277 if (eth
->netdev
[id
]) {
2278 dev_err(eth
->dev
, "duplicate mac id found: %d\n", id
);
2282 eth
->netdev
[id
] = alloc_etherdev(sizeof(*mac
));
2283 if (!eth
->netdev
[id
]) {
2284 dev_err(eth
->dev
, "alloc_etherdev failed\n");
2287 mac
= netdev_priv(eth
->netdev
[id
]);
2293 memset(mac
->hwlro_ip
, 0, sizeof(mac
->hwlro_ip
));
2294 mac
->hwlro_ip_cnt
= 0;
2296 mac
->hw_stats
= devm_kzalloc(eth
->dev
,
2297 sizeof(*mac
->hw_stats
),
2299 if (!mac
->hw_stats
) {
2300 dev_err(eth
->dev
, "failed to allocate counter memory\n");
2304 spin_lock_init(&mac
->hw_stats
->stats_lock
);
2305 u64_stats_init(&mac
->hw_stats
->syncp
);
2306 mac
->hw_stats
->reg_offset
= id
* MTK_STAT_OFFSET
;
2308 SET_NETDEV_DEV(eth
->netdev
[id
], eth
->dev
);
2309 eth
->netdev
[id
]->watchdog_timeo
= 5 * HZ
;
2310 eth
->netdev
[id
]->netdev_ops
= &mtk_netdev_ops
;
2311 eth
->netdev
[id
]->base_addr
= (unsigned long)eth
->base
;
2313 eth
->netdev
[id
]->hw_features
= MTK_HW_FEATURES
;
2315 eth
->netdev
[id
]->hw_features
|= NETIF_F_LRO
;
2317 eth
->netdev
[id
]->vlan_features
= MTK_HW_FEATURES
&
2318 ~(NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
);
2319 eth
->netdev
[id
]->features
|= MTK_HW_FEATURES
;
2320 eth
->netdev
[id
]->ethtool_ops
= &mtk_ethtool_ops
;
2322 eth
->netdev
[id
]->irq
= eth
->irq
[0];
2326 free_netdev(eth
->netdev
[id
]);
2330 static int mtk_get_chip_id(struct mtk_eth
*eth
, u32
*chip_id
)
2334 regmap_read(eth
->ethsys
, ETHSYS_CHIPID0_3
, &val
[0]);
2335 regmap_read(eth
->ethsys
, ETHSYS_CHIPID4_7
, &val
[1]);
2337 id
[3] = ((val
[0] >> 16) & 0xff) - '0';
2338 id
[2] = ((val
[0] >> 24) & 0xff) - '0';
2339 id
[1] = (val
[1] & 0xff) - '0';
2340 id
[0] = ((val
[1] >> 8) & 0xff) - '0';
2342 *chip_id
= (id
[3] * 1000) + (id
[2] * 100) +
2343 (id
[1] * 10) + id
[0];
2346 dev_err(eth
->dev
, "failed to get chip id\n");
2350 dev_info(eth
->dev
, "chip id = %d\n", *chip_id
);
2355 static bool mtk_is_hwlro_supported(struct mtk_eth
*eth
)
2357 switch (eth
->chip_id
) {
2365 static int mtk_probe(struct platform_device
*pdev
)
2367 struct resource
*res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2368 struct device_node
*mac_np
;
2369 const struct of_device_id
*match
;
2370 struct mtk_soc_data
*soc
;
2371 struct mtk_eth
*eth
;
2375 match
= of_match_device(of_mtk_match
, &pdev
->dev
);
2376 soc
= (struct mtk_soc_data
*)match
->data
;
2378 eth
= devm_kzalloc(&pdev
->dev
, sizeof(*eth
), GFP_KERNEL
);
2382 eth
->dev
= &pdev
->dev
;
2383 eth
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
2384 if (IS_ERR(eth
->base
))
2385 return PTR_ERR(eth
->base
);
2387 spin_lock_init(ð
->page_lock
);
2388 spin_lock_init(ð
->irq_lock
);
2390 eth
->ethsys
= syscon_regmap_lookup_by_phandle(pdev
->dev
.of_node
,
2392 if (IS_ERR(eth
->ethsys
)) {
2393 dev_err(&pdev
->dev
, "no ethsys regmap found\n");
2394 return PTR_ERR(eth
->ethsys
);
2397 eth
->pctl
= syscon_regmap_lookup_by_phandle(pdev
->dev
.of_node
,
2399 if (IS_ERR(eth
->pctl
)) {
2400 dev_err(&pdev
->dev
, "no pctl regmap found\n");
2401 return PTR_ERR(eth
->pctl
);
2404 for (i
= 0; i
< 3; i
++) {
2405 eth
->irq
[i
] = platform_get_irq(pdev
, i
);
2406 if (eth
->irq
[i
] < 0) {
2407 dev_err(&pdev
->dev
, "no IRQ%d resource found\n", i
);
2411 for (i
= 0; i
< ARRAY_SIZE(eth
->clks
); i
++) {
2412 eth
->clks
[i
] = devm_clk_get(eth
->dev
,
2413 mtk_clks_source_name
[i
]);
2414 if (IS_ERR(eth
->clks
[i
])) {
2415 if (PTR_ERR(eth
->clks
[i
]) == -EPROBE_DEFER
)
2416 return -EPROBE_DEFER
;
2421 eth
->msg_enable
= netif_msg_init(mtk_msg_level
, MTK_DEFAULT_MSG_ENABLE
);
2422 INIT_WORK(ð
->pending_work
, mtk_pending_work
);
2424 err
= mtk_hw_init(eth
);
2428 err
= mtk_get_chip_id(eth
, ð
->chip_id
);
2432 eth
->hwlro
= mtk_is_hwlro_supported(eth
);
2434 for_each_child_of_node(pdev
->dev
.of_node
, mac_np
) {
2435 if (!of_device_is_compatible(mac_np
,
2436 "mediatek,eth-mac"))
2439 if (!of_device_is_available(mac_np
))
2442 err
= mtk_add_mac(eth
, mac_np
);
2447 err
= devm_request_irq(eth
->dev
, eth
->irq
[1], mtk_handle_irq_tx
, 0,
2448 dev_name(eth
->dev
), eth
);
2452 err
= devm_request_irq(eth
->dev
, eth
->irq
[2], mtk_handle_irq_rx
, 0,
2453 dev_name(eth
->dev
), eth
);
2457 err
= mtk_mdio_init(eth
);
2461 for (i
= 0; i
< MTK_MAX_DEVS
; i
++) {
2462 if (!eth
->netdev
[i
])
2465 err
= register_netdev(eth
->netdev
[i
]);
2467 dev_err(eth
->dev
, "error bringing up device\n");
2468 goto err_deinit_mdio
;
2470 netif_info(eth
, probe
, eth
->netdev
[i
],
2471 "mediatek frame engine at 0x%08lx, irq %d\n",
2472 eth
->netdev
[i
]->base_addr
, eth
->irq
[0]);
2475 /* we run 2 devices on the same DMA ring so we need a dummy device
2478 init_dummy_netdev(ð
->dummy_dev
);
2479 netif_napi_add(ð
->dummy_dev
, ð
->tx_napi
, mtk_napi_tx
,
2481 netif_napi_add(ð
->dummy_dev
, ð
->rx_napi
, mtk_napi_rx
,
2484 platform_set_drvdata(pdev
, eth
);
2489 mtk_mdio_cleanup(eth
);
2498 static int mtk_remove(struct platform_device
*pdev
)
2500 struct mtk_eth
*eth
= platform_get_drvdata(pdev
);
2503 /* stop all devices to make sure that dma is properly shut down */
2504 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
2505 if (!eth
->netdev
[i
])
2507 mtk_stop(eth
->netdev
[i
]);
2512 netif_napi_del(ð
->tx_napi
);
2513 netif_napi_del(ð
->rx_napi
);
2515 mtk_mdio_cleanup(eth
);
2520 const struct of_device_id of_mtk_match
[] = {
2521 { .compatible
= "mediatek,mt2701-eth" },
2524 MODULE_DEVICE_TABLE(of
, of_mtk_match
);
2526 static struct platform_driver mtk_driver
= {
2528 .remove
= mtk_remove
,
2530 .name
= "mtk_soc_eth",
2531 .of_match_table
= of_mtk_match
,
2535 module_platform_driver(mtk_driver
);
2537 MODULE_LICENSE("GPL");
2538 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
2539 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");