1 // SPDX-License-Identifier: GPL-2.0
2 /* Atheros AR71xx built-in ethernet mac driver
4 * Copyright (C) 2019 Oleksij Rempel <o.rempel@pengutronix.de>
6 * List of authors contributed to this driver before mainlining:
7 * Alexander Couzens <lynxis@fe80.eu>
8 * Christian Lamparter <chunkeey@gmail.com>
9 * Chuanhong Guo <gch981213@gmail.com>
10 * Daniel F. Dickinson <cshored@thecshore.com>
11 * David Bauer <mail@david-bauer.net>
12 * Felix Fietkau <nbd@nbd.name>
13 * Gabor Juhos <juhosg@freemail.hu>
14 * Hauke Mehrtens <hauke@hauke-m.de>
15 * Johann Neuhauser <johann@it-neuhauser.de>
16 * John Crispin <john@phrozen.org>
17 * Jo-Philipp Wich <jo@mein.io>
18 * Koen Vandeputte <koen.vandeputte@ncentric.com>
19 * Lucian Cristian <lucian.cristian@gmail.com>
20 * Matt Merhar <mattmerhar@protonmail.com>
21 * Milan Krstic <milan.krstic@gmail.com>
22 * Petr Štetiar <ynezz@true.cz>
23 * Rosen Penev <rosenp@gmail.com>
24 * Stephen Walker <stephendwalker+github@gmail.com>
25 * Vittorio Gambaletta <openwrt@vittgam.net>
26 * Weijie Gao <hackpascal@gmail.com>
27 * Imre Kaloz <kaloz@openwrt.org>
30 #include <linux/if_vlan.h>
31 #include <linux/mfd/syscon.h>
32 #include <linux/of_mdio.h>
33 #include <linux/of_net.h>
34 #include <linux/of_platform.h>
35 #include <linux/regmap.h>
36 #include <linux/reset.h>
37 #include <linux/clk.h>
40 /* For our NAPI weight bigger does *NOT* mean better - it means more
41 * D-cache misses and lots more wasted cycles than we'll ever
42 * possibly gain from saving instructions.
44 #define AG71XX_NAPI_WEIGHT 32
45 #define AG71XX_OOM_REFILL (1 + HZ / 10)
47 #define AG71XX_INT_ERR (AG71XX_INT_RX_BE | AG71XX_INT_TX_BE)
48 #define AG71XX_INT_TX (AG71XX_INT_TX_PS)
49 #define AG71XX_INT_RX (AG71XX_INT_RX_PR | AG71XX_INT_RX_OF)
51 #define AG71XX_INT_POLL (AG71XX_INT_RX | AG71XX_INT_TX)
52 #define AG71XX_INT_INIT (AG71XX_INT_ERR | AG71XX_INT_POLL)
54 #define AG71XX_TX_MTU_LEN 1540
56 #define AG71XX_TX_RING_SPLIT 512
57 #define AG71XX_TX_RING_DS_PER_PKT DIV_ROUND_UP(AG71XX_TX_MTU_LEN, \
59 #define AG71XX_TX_RING_SIZE_DEFAULT 128
60 #define AG71XX_RX_RING_SIZE_DEFAULT 256
62 #define AG71XX_MDIO_RETRY 1000
63 #define AG71XX_MDIO_DELAY 5
64 #define AG71XX_MDIO_MAX_CLK 5000000
66 /* Register offsets */
67 #define AG71XX_REG_MAC_CFG1 0x0000
68 #define MAC_CFG1_TXE BIT(0) /* Tx Enable */
69 #define MAC_CFG1_STX BIT(1) /* Synchronize Tx Enable */
70 #define MAC_CFG1_RXE BIT(2) /* Rx Enable */
71 #define MAC_CFG1_SRX BIT(3) /* Synchronize Rx Enable */
72 #define MAC_CFG1_TFC BIT(4) /* Tx Flow Control Enable */
73 #define MAC_CFG1_RFC BIT(5) /* Rx Flow Control Enable */
74 #define MAC_CFG1_SR BIT(31) /* Soft Reset */
75 #define MAC_CFG1_INIT (MAC_CFG1_RXE | MAC_CFG1_TXE | \
76 MAC_CFG1_SRX | MAC_CFG1_STX)
78 #define AG71XX_REG_MAC_CFG2 0x0004
79 #define MAC_CFG2_FDX BIT(0)
80 #define MAC_CFG2_PAD_CRC_EN BIT(2)
81 #define MAC_CFG2_LEN_CHECK BIT(4)
82 #define MAC_CFG2_IF_1000 BIT(9)
83 #define MAC_CFG2_IF_10_100 BIT(8)
85 #define AG71XX_REG_MAC_MFL 0x0010
87 #define AG71XX_REG_MII_CFG 0x0020
88 #define MII_CFG_CLK_DIV_4 0
89 #define MII_CFG_CLK_DIV_6 2
90 #define MII_CFG_CLK_DIV_8 3
91 #define MII_CFG_CLK_DIV_10 4
92 #define MII_CFG_CLK_DIV_14 5
93 #define MII_CFG_CLK_DIV_20 6
94 #define MII_CFG_CLK_DIV_28 7
95 #define MII_CFG_CLK_DIV_34 8
96 #define MII_CFG_CLK_DIV_42 9
97 #define MII_CFG_CLK_DIV_50 10
98 #define MII_CFG_CLK_DIV_58 11
99 #define MII_CFG_CLK_DIV_66 12
100 #define MII_CFG_CLK_DIV_74 13
101 #define MII_CFG_CLK_DIV_82 14
102 #define MII_CFG_CLK_DIV_98 15
103 #define MII_CFG_RESET BIT(31)
105 #define AG71XX_REG_MII_CMD 0x0024
106 #define MII_CMD_READ BIT(0)
108 #define AG71XX_REG_MII_ADDR 0x0028
109 #define MII_ADDR_SHIFT 8
111 #define AG71XX_REG_MII_CTRL 0x002c
112 #define AG71XX_REG_MII_STATUS 0x0030
113 #define AG71XX_REG_MII_IND 0x0034
114 #define MII_IND_BUSY BIT(0)
115 #define MII_IND_INVALID BIT(2)
117 #define AG71XX_REG_MAC_IFCTL 0x0038
118 #define MAC_IFCTL_SPEED BIT(16)
120 #define AG71XX_REG_MAC_ADDR1 0x0040
121 #define AG71XX_REG_MAC_ADDR2 0x0044
122 #define AG71XX_REG_FIFO_CFG0 0x0048
123 #define FIFO_CFG0_WTM BIT(0) /* Watermark Module */
124 #define FIFO_CFG0_RXS BIT(1) /* Rx System Module */
125 #define FIFO_CFG0_RXF BIT(2) /* Rx Fabric Module */
126 #define FIFO_CFG0_TXS BIT(3) /* Tx System Module */
127 #define FIFO_CFG0_TXF BIT(4) /* Tx Fabric Module */
128 #define FIFO_CFG0_ALL (FIFO_CFG0_WTM | FIFO_CFG0_RXS | FIFO_CFG0_RXF \
129 | FIFO_CFG0_TXS | FIFO_CFG0_TXF)
130 #define FIFO_CFG0_INIT (FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT)
132 #define FIFO_CFG0_ENABLE_SHIFT 8
134 #define AG71XX_REG_FIFO_CFG1 0x004c
135 #define AG71XX_REG_FIFO_CFG2 0x0050
136 #define AG71XX_REG_FIFO_CFG3 0x0054
137 #define AG71XX_REG_FIFO_CFG4 0x0058
138 #define FIFO_CFG4_DE BIT(0) /* Drop Event */
139 #define FIFO_CFG4_DV BIT(1) /* RX_DV Event */
140 #define FIFO_CFG4_FC BIT(2) /* False Carrier */
141 #define FIFO_CFG4_CE BIT(3) /* Code Error */
142 #define FIFO_CFG4_CR BIT(4) /* CRC error */
143 #define FIFO_CFG4_LM BIT(5) /* Length Mismatch */
144 #define FIFO_CFG4_LO BIT(6) /* Length out of range */
145 #define FIFO_CFG4_OK BIT(7) /* Packet is OK */
146 #define FIFO_CFG4_MC BIT(8) /* Multicast Packet */
147 #define FIFO_CFG4_BC BIT(9) /* Broadcast Packet */
148 #define FIFO_CFG4_DR BIT(10) /* Dribble */
149 #define FIFO_CFG4_LE BIT(11) /* Long Event */
150 #define FIFO_CFG4_CF BIT(12) /* Control Frame */
151 #define FIFO_CFG4_PF BIT(13) /* Pause Frame */
152 #define FIFO_CFG4_UO BIT(14) /* Unsupported Opcode */
153 #define FIFO_CFG4_VT BIT(15) /* VLAN tag detected */
154 #define FIFO_CFG4_FT BIT(16) /* Frame Truncated */
155 #define FIFO_CFG4_UC BIT(17) /* Unicast Packet */
156 #define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
157 FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \
158 FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \
159 FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \
160 FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \
163 #define AG71XX_REG_FIFO_CFG5 0x005c
164 #define FIFO_CFG5_DE BIT(0) /* Drop Event */
165 #define FIFO_CFG5_DV BIT(1) /* RX_DV Event */
166 #define FIFO_CFG5_FC BIT(2) /* False Carrier */
167 #define FIFO_CFG5_CE BIT(3) /* Code Error */
168 #define FIFO_CFG5_LM BIT(4) /* Length Mismatch */
169 #define FIFO_CFG5_LO BIT(5) /* Length Out of Range */
170 #define FIFO_CFG5_OK BIT(6) /* Packet is OK */
171 #define FIFO_CFG5_MC BIT(7) /* Multicast Packet */
172 #define FIFO_CFG5_BC BIT(8) /* Broadcast Packet */
173 #define FIFO_CFG5_DR BIT(9) /* Dribble */
174 #define FIFO_CFG5_CF BIT(10) /* Control Frame */
175 #define FIFO_CFG5_PF BIT(11) /* Pause Frame */
176 #define FIFO_CFG5_UO BIT(12) /* Unsupported Opcode */
177 #define FIFO_CFG5_VT BIT(13) /* VLAN tag detected */
178 #define FIFO_CFG5_LE BIT(14) /* Long Event */
179 #define FIFO_CFG5_FT BIT(15) /* Frame Truncated */
180 #define FIFO_CFG5_16 BIT(16) /* unknown */
181 #define FIFO_CFG5_17 BIT(17) /* unknown */
182 #define FIFO_CFG5_SF BIT(18) /* Short Frame */
183 #define FIFO_CFG5_BM BIT(19) /* Byte Mode */
184 #define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
185 FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \
186 FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \
187 FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \
188 FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
189 FIFO_CFG5_17 | FIFO_CFG5_SF)
191 #define AG71XX_REG_TX_CTRL 0x0180
192 #define TX_CTRL_TXE BIT(0) /* Tx Enable */
194 #define AG71XX_REG_TX_DESC 0x0184
195 #define AG71XX_REG_TX_STATUS 0x0188
196 #define TX_STATUS_PS BIT(0) /* Packet Sent */
197 #define TX_STATUS_UR BIT(1) /* Tx Underrun */
198 #define TX_STATUS_BE BIT(3) /* Bus Error */
200 #define AG71XX_REG_RX_CTRL 0x018c
201 #define RX_CTRL_RXE BIT(0) /* Rx Enable */
203 #define AG71XX_DMA_RETRY 10
204 #define AG71XX_DMA_DELAY 1
206 #define AG71XX_REG_RX_DESC 0x0190
207 #define AG71XX_REG_RX_STATUS 0x0194
208 #define RX_STATUS_PR BIT(0) /* Packet Received */
209 #define RX_STATUS_OF BIT(2) /* Rx Overflow */
210 #define RX_STATUS_BE BIT(3) /* Bus Error */
212 #define AG71XX_REG_INT_ENABLE 0x0198
213 #define AG71XX_REG_INT_STATUS 0x019c
214 #define AG71XX_INT_TX_PS BIT(0)
215 #define AG71XX_INT_TX_UR BIT(1)
216 #define AG71XX_INT_TX_BE BIT(3)
217 #define AG71XX_INT_RX_PR BIT(4)
218 #define AG71XX_INT_RX_OF BIT(6)
219 #define AG71XX_INT_RX_BE BIT(7)
221 #define AG71XX_REG_FIFO_DEPTH 0x01a8
222 #define AG71XX_REG_RX_SM 0x01b0
223 #define AG71XX_REG_TX_SM 0x01b4
225 #define ETH_SWITCH_HEADER_LEN 2
227 #define AG71XX_DEFAULT_MSG_ENABLE \
237 #define DESC_EMPTY BIT(31)
238 #define DESC_MORE BIT(24)
239 #define DESC_PKTLEN_M 0xfff
247 #define AG71XX_DESC_SIZE roundup(sizeof(struct ag71xx_desc), \
264 /* "Hot" fields in the data path. */
268 /* "Cold" fields - not used in the data path. */
269 struct ag71xx_buf
*buf
;
272 dma_addr_t descs_dma
;
289 u16 desc_pktlen_mask
;
290 bool tx_hang_workaround
;
291 enum ag71xx_type type
;
295 /* Critical data related to the per-packet data path are clustered
296 * early in this structure to help improve the D-cache footprint.
298 struct ag71xx_ring rx_ring ____cacheline_aligned
;
299 struct ag71xx_ring tx_ring ____cacheline_aligned
;
304 struct net_device
*ndev
;
305 struct platform_device
*pdev
;
306 struct napi_struct napi
;
308 const struct ag71xx_dcfg
*dcfg
;
310 /* From this point onwards we're not looking at per-packet fields. */
311 void __iomem
*mac_base
;
313 struct ag71xx_desc
*stop_desc
;
314 dma_addr_t stop_desc_dma
;
316 phy_interface_t phy_if_mode
;
318 struct delayed_work restart_work
;
319 struct timer_list oom_timer
;
321 struct reset_control
*mac_reset
;
326 struct reset_control
*mdio_reset
;
327 struct mii_bus
*mii_bus
;
328 struct clk
*clk_mdio
;
332 static int ag71xx_desc_empty(struct ag71xx_desc
*desc
)
334 return (desc
->ctrl
& DESC_EMPTY
) != 0;
337 static struct ag71xx_desc
*ag71xx_ring_desc(struct ag71xx_ring
*ring
, int idx
)
339 return (struct ag71xx_desc
*)&ring
->descs_cpu
[idx
* AG71XX_DESC_SIZE
];
342 static int ag71xx_ring_size_order(int size
)
344 return fls(size
- 1);
347 static bool ag71xx_is(struct ag71xx
*ag
, enum ag71xx_type type
)
349 return ag
->dcfg
->type
== type
;
352 static void ag71xx_wr(struct ag71xx
*ag
, unsigned int reg
, u32 value
)
354 iowrite32(value
, ag
->mac_base
+ reg
);
356 (void)ioread32(ag
->mac_base
+ reg
);
359 static u32
ag71xx_rr(struct ag71xx
*ag
, unsigned int reg
)
361 return ioread32(ag
->mac_base
+ reg
);
364 static void ag71xx_sb(struct ag71xx
*ag
, unsigned int reg
, u32 mask
)
368 r
= ag
->mac_base
+ reg
;
369 iowrite32(ioread32(r
) | mask
, r
);
374 static void ag71xx_cb(struct ag71xx
*ag
, unsigned int reg
, u32 mask
)
378 r
= ag
->mac_base
+ reg
;
379 iowrite32(ioread32(r
) & ~mask
, r
);
384 static void ag71xx_int_enable(struct ag71xx
*ag
, u32 ints
)
386 ag71xx_sb(ag
, AG71XX_REG_INT_ENABLE
, ints
);
389 static void ag71xx_int_disable(struct ag71xx
*ag
, u32 ints
)
391 ag71xx_cb(ag
, AG71XX_REG_INT_ENABLE
, ints
);
394 static int ag71xx_mdio_wait_busy(struct ag71xx
*ag
)
396 struct net_device
*ndev
= ag
->ndev
;
399 for (i
= 0; i
< AG71XX_MDIO_RETRY
; i
++) {
402 udelay(AG71XX_MDIO_DELAY
);
404 busy
= ag71xx_rr(ag
, AG71XX_REG_MII_IND
);
408 udelay(AG71XX_MDIO_DELAY
);
411 netif_err(ag
, link
, ndev
, "MDIO operation timed out\n");
416 static int ag71xx_mdio_mii_read(struct mii_bus
*bus
, int addr
, int reg
)
418 struct ag71xx
*ag
= bus
->priv
;
421 err
= ag71xx_mdio_wait_busy(ag
);
425 ag71xx_wr(ag
, AG71XX_REG_MII_ADDR
,
426 ((addr
& 0x1f) << MII_ADDR_SHIFT
) | (reg
& 0xff));
427 /* enable read mode */
428 ag71xx_wr(ag
, AG71XX_REG_MII_CMD
, MII_CMD_READ
);
430 err
= ag71xx_mdio_wait_busy(ag
);
434 val
= ag71xx_rr(ag
, AG71XX_REG_MII_STATUS
);
435 /* disable read mode */
436 ag71xx_wr(ag
, AG71XX_REG_MII_CMD
, 0);
438 netif_dbg(ag
, link
, ag
->ndev
, "mii_read: addr=%04x, reg=%04x, value=%04x\n",
444 static int ag71xx_mdio_mii_write(struct mii_bus
*bus
, int addr
, int reg
,
447 struct ag71xx
*ag
= bus
->priv
;
449 netif_dbg(ag
, link
, ag
->ndev
, "mii_write: addr=%04x, reg=%04x, value=%04x\n",
452 ag71xx_wr(ag
, AG71XX_REG_MII_ADDR
,
453 ((addr
& 0x1f) << MII_ADDR_SHIFT
) | (reg
& 0xff));
454 ag71xx_wr(ag
, AG71XX_REG_MII_CTRL
, val
);
456 return ag71xx_mdio_wait_busy(ag
);
459 static const u32 ar71xx_mdio_div_table
[] = {
460 4, 4, 6, 8, 10, 14, 20, 28,
463 static const u32 ar7240_mdio_div_table
[] = {
464 2, 2, 4, 6, 8, 12, 18, 26, 32, 40, 48, 56, 62, 70, 78, 96,
467 static const u32 ar933x_mdio_div_table
[] = {
468 4, 4, 6, 8, 10, 14, 20, 28, 34, 42, 50, 58, 66, 74, 82, 98,
471 static int ag71xx_mdio_get_divider(struct ag71xx
*ag
, u32
*div
)
473 unsigned long ref_clock
;
477 ref_clock
= clk_get_rate(ag
->clk_mdio
);
481 if (ag71xx_is(ag
, AR9330
) || ag71xx_is(ag
, AR9340
)) {
482 table
= ar933x_mdio_div_table
;
483 ndivs
= ARRAY_SIZE(ar933x_mdio_div_table
);
484 } else if (ag71xx_is(ag
, AR7240
)) {
485 table
= ar7240_mdio_div_table
;
486 ndivs
= ARRAY_SIZE(ar7240_mdio_div_table
);
488 table
= ar71xx_mdio_div_table
;
489 ndivs
= ARRAY_SIZE(ar71xx_mdio_div_table
);
492 for (i
= 0; i
< ndivs
; i
++) {
495 t
= ref_clock
/ table
[i
];
496 if (t
<= AG71XX_MDIO_MAX_CLK
) {
505 static int ag71xx_mdio_reset(struct mii_bus
*bus
)
507 struct ag71xx
*ag
= bus
->priv
;
511 err
= ag71xx_mdio_get_divider(ag
, &t
);
515 ag71xx_wr(ag
, AG71XX_REG_MII_CFG
, t
| MII_CFG_RESET
);
516 usleep_range(100, 200);
518 ag71xx_wr(ag
, AG71XX_REG_MII_CFG
, t
);
519 usleep_range(100, 200);
524 static int ag71xx_mdio_probe(struct ag71xx
*ag
)
526 struct device
*dev
= &ag
->pdev
->dev
;
527 struct net_device
*ndev
= ag
->ndev
;
528 static struct mii_bus
*mii_bus
;
529 struct device_node
*np
, *mnp
;
535 ag
->clk_mdio
= devm_clk_get(dev
, "mdio");
536 if (IS_ERR(ag
->clk_mdio
)) {
537 netif_err(ag
, probe
, ndev
, "Failed to get mdio clk.\n");
538 return PTR_ERR(ag
->clk_mdio
);
541 err
= clk_prepare_enable(ag
->clk_mdio
);
543 netif_err(ag
, probe
, ndev
, "Failed to enable mdio clk.\n");
547 mii_bus
= devm_mdiobus_alloc(dev
);
550 goto mdio_err_put_clk
;
553 ag
->mdio_reset
= of_reset_control_get_exclusive(np
, "mdio");
554 if (IS_ERR(ag
->mdio_reset
)) {
555 netif_err(ag
, probe
, ndev
, "Failed to get reset mdio.\n");
556 return PTR_ERR(ag
->mdio_reset
);
559 mii_bus
->name
= "ag71xx_mdio";
560 mii_bus
->read
= ag71xx_mdio_mii_read
;
561 mii_bus
->write
= ag71xx_mdio_mii_write
;
562 mii_bus
->reset
= ag71xx_mdio_reset
;
564 mii_bus
->parent
= dev
;
565 snprintf(mii_bus
->id
, MII_BUS_ID_SIZE
, "%s.%d", np
->name
, ag
->mac_idx
);
567 if (!IS_ERR(ag
->mdio_reset
)) {
568 reset_control_assert(ag
->mdio_reset
);
570 reset_control_deassert(ag
->mdio_reset
);
574 mnp
= of_get_child_by_name(np
, "mdio");
575 err
= of_mdiobus_register(mii_bus
, mnp
);
578 goto mdio_err_put_clk
;
580 ag
->mii_bus
= mii_bus
;
585 clk_disable_unprepare(ag
->clk_mdio
);
589 static void ag71xx_mdio_remove(struct ag71xx
*ag
)
592 mdiobus_unregister(ag
->mii_bus
);
593 clk_disable_unprepare(ag
->clk_mdio
);
596 static void ag71xx_hw_stop(struct ag71xx
*ag
)
598 /* disable all interrupts and stop the rx/tx engine */
599 ag71xx_wr(ag
, AG71XX_REG_INT_ENABLE
, 0);
600 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, 0);
601 ag71xx_wr(ag
, AG71XX_REG_TX_CTRL
, 0);
604 static bool ag71xx_check_dma_stuck(struct ag71xx
*ag
)
606 unsigned long timestamp
;
607 u32 rx_sm
, tx_sm
, rx_fd
;
609 timestamp
= netdev_get_tx_queue(ag
->ndev
, 0)->trans_start
;
610 if (likely(time_before(jiffies
, timestamp
+ HZ
/ 10)))
613 if (!netif_carrier_ok(ag
->ndev
))
616 rx_sm
= ag71xx_rr(ag
, AG71XX_REG_RX_SM
);
617 if ((rx_sm
& 0x7) == 0x3 && ((rx_sm
>> 4) & 0x7) == 0x6)
620 tx_sm
= ag71xx_rr(ag
, AG71XX_REG_TX_SM
);
621 rx_fd
= ag71xx_rr(ag
, AG71XX_REG_FIFO_DEPTH
);
622 if (((tx_sm
>> 4) & 0x7) == 0 && ((rx_sm
& 0x7) == 0) &&
623 ((rx_sm
>> 4) & 0x7) == 0 && rx_fd
== 0)
629 static int ag71xx_tx_packets(struct ag71xx
*ag
, bool flush
)
631 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
632 int sent
= 0, bytes_compl
= 0, n
= 0;
633 struct net_device
*ndev
= ag
->ndev
;
634 int ring_mask
, ring_size
;
635 bool dma_stuck
= false;
637 ring_mask
= BIT(ring
->order
) - 1;
638 ring_size
= BIT(ring
->order
);
640 netif_dbg(ag
, tx_queued
, ndev
, "processing TX ring\n");
642 while (ring
->dirty
+ n
!= ring
->curr
) {
643 struct ag71xx_desc
*desc
;
647 i
= (ring
->dirty
+ n
) & ring_mask
;
648 desc
= ag71xx_ring_desc(ring
, i
);
649 skb
= ring
->buf
[i
].tx
.skb
;
651 if (!flush
&& !ag71xx_desc_empty(desc
)) {
652 if (ag
->dcfg
->tx_hang_workaround
&&
653 ag71xx_check_dma_stuck(ag
)) {
654 schedule_delayed_work(&ag
->restart_work
,
662 desc
->ctrl
|= DESC_EMPTY
;
668 dev_kfree_skb_any(skb
);
669 ring
->buf
[i
].tx
.skb
= NULL
;
671 bytes_compl
+= ring
->buf
[i
].tx
.len
;
677 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_PS
);
682 netif_dbg(ag
, tx_done
, ndev
, "%d packets sent out\n", sent
);
687 ag
->ndev
->stats
.tx_bytes
+= bytes_compl
;
688 ag
->ndev
->stats
.tx_packets
+= sent
;
690 netdev_completed_queue(ag
->ndev
, sent
, bytes_compl
);
691 if ((ring
->curr
- ring
->dirty
) < (ring_size
* 3) / 4)
692 netif_wake_queue(ag
->ndev
);
695 cancel_delayed_work(&ag
->restart_work
);
700 static void ag71xx_dma_wait_stop(struct ag71xx
*ag
)
702 struct net_device
*ndev
= ag
->ndev
;
705 for (i
= 0; i
< AG71XX_DMA_RETRY
; i
++) {
708 mdelay(AG71XX_DMA_DELAY
);
710 rx
= ag71xx_rr(ag
, AG71XX_REG_RX_CTRL
) & RX_CTRL_RXE
;
711 tx
= ag71xx_rr(ag
, AG71XX_REG_TX_CTRL
) & TX_CTRL_TXE
;
716 netif_err(ag
, hw
, ndev
, "DMA stop operation timed out\n");
719 static void ag71xx_dma_reset(struct ag71xx
*ag
)
721 struct net_device
*ndev
= ag
->ndev
;
726 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, 0);
727 ag71xx_wr(ag
, AG71XX_REG_TX_CTRL
, 0);
729 /* give the hardware some time to really stop all rx/tx activity
730 * clearing the descriptors too early causes random memory corruption
732 ag71xx_dma_wait_stop(ag
);
734 /* clear descriptor addresses */
735 ag71xx_wr(ag
, AG71XX_REG_TX_DESC
, ag
->stop_desc_dma
);
736 ag71xx_wr(ag
, AG71XX_REG_RX_DESC
, ag
->stop_desc_dma
);
738 /* clear pending RX/TX interrupts */
739 for (i
= 0; i
< 256; i
++) {
740 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_PR
);
741 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_PS
);
744 /* clear pending errors */
745 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_BE
| RX_STATUS_OF
);
746 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_BE
| TX_STATUS_UR
);
748 val
= ag71xx_rr(ag
, AG71XX_REG_RX_STATUS
);
750 netif_err(ag
, hw
, ndev
, "unable to clear DMA Rx status: %08x\n",
753 val
= ag71xx_rr(ag
, AG71XX_REG_TX_STATUS
);
755 /* mask out reserved bits */
759 netif_err(ag
, hw
, ndev
, "unable to clear DMA Tx status: %08x\n",
763 static void ag71xx_hw_setup(struct ag71xx
*ag
)
765 u32 init
= MAC_CFG1_INIT
;
767 /* setup MAC configuration registers */
768 ag71xx_wr(ag
, AG71XX_REG_MAC_CFG1
, init
);
770 ag71xx_sb(ag
, AG71XX_REG_MAC_CFG2
,
771 MAC_CFG2_PAD_CRC_EN
| MAC_CFG2_LEN_CHECK
);
773 /* setup max frame length to zero */
774 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
, 0);
776 /* setup FIFO configuration registers */
777 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG0
, FIFO_CFG0_INIT
);
778 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG1
, ag
->fifodata
[0]);
779 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG2
, ag
->fifodata
[1]);
780 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG4
, FIFO_CFG4_INIT
);
781 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG5
, FIFO_CFG5_INIT
);
784 static unsigned int ag71xx_max_frame_len(unsigned int mtu
)
786 return ETH_SWITCH_HEADER_LEN
+ ETH_HLEN
+ VLAN_HLEN
+ mtu
+ ETH_FCS_LEN
;
789 static void ag71xx_hw_set_macaddr(struct ag71xx
*ag
, unsigned char *mac
)
793 t
= (((u32
)mac
[5]) << 24) | (((u32
)mac
[4]) << 16)
794 | (((u32
)mac
[3]) << 8) | ((u32
)mac
[2]);
796 ag71xx_wr(ag
, AG71XX_REG_MAC_ADDR1
, t
);
798 t
= (((u32
)mac
[1]) << 24) | (((u32
)mac
[0]) << 16);
799 ag71xx_wr(ag
, AG71XX_REG_MAC_ADDR2
, t
);
802 static void ag71xx_fast_reset(struct ag71xx
*ag
)
804 struct net_device
*dev
= ag
->ndev
;
810 mii_reg
= ag71xx_rr(ag
, AG71XX_REG_MII_CFG
);
811 rx_ds
= ag71xx_rr(ag
, AG71XX_REG_RX_DESC
);
813 ag71xx_tx_packets(ag
, true);
815 reset_control_assert(ag
->mac_reset
);
816 usleep_range(10, 20);
817 reset_control_deassert(ag
->mac_reset
);
818 usleep_range(10, 20);
820 ag71xx_dma_reset(ag
);
822 ag
->tx_ring
.curr
= 0;
823 ag
->tx_ring
.dirty
= 0;
824 netdev_reset_queue(ag
->ndev
);
826 /* setup max frame length */
827 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
,
828 ag71xx_max_frame_len(ag
->ndev
->mtu
));
830 ag71xx_wr(ag
, AG71XX_REG_RX_DESC
, rx_ds
);
831 ag71xx_wr(ag
, AG71XX_REG_TX_DESC
, ag
->tx_ring
.descs_dma
);
832 ag71xx_wr(ag
, AG71XX_REG_MII_CFG
, mii_reg
);
834 ag71xx_hw_set_macaddr(ag
, dev
->dev_addr
);
837 static void ag71xx_hw_start(struct ag71xx
*ag
)
839 /* start RX engine */
840 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, RX_CTRL_RXE
);
842 /* enable interrupts */
843 ag71xx_wr(ag
, AG71XX_REG_INT_ENABLE
, AG71XX_INT_INIT
);
845 netif_wake_queue(ag
->ndev
);
848 static void ag71xx_link_adjust(struct ag71xx
*ag
, bool update
)
850 struct phy_device
*phydev
= ag
->ndev
->phydev
;
855 if (!phydev
->link
&& update
) {
860 if (!ag71xx_is(ag
, AR7100
) && !ag71xx_is(ag
, AR9130
))
861 ag71xx_fast_reset(ag
);
863 cfg2
= ag71xx_rr(ag
, AG71XX_REG_MAC_CFG2
);
864 cfg2
&= ~(MAC_CFG2_IF_1000
| MAC_CFG2_IF_10_100
| MAC_CFG2_FDX
);
865 cfg2
|= (phydev
->duplex
) ? MAC_CFG2_FDX
: 0;
867 ifctl
= ag71xx_rr(ag
, AG71XX_REG_MAC_IFCTL
);
868 ifctl
&= ~(MAC_IFCTL_SPEED
);
870 fifo5
= ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG5
);
871 fifo5
&= ~FIFO_CFG5_BM
;
873 switch (phydev
->speed
) {
875 cfg2
|= MAC_CFG2_IF_1000
;
876 fifo5
|= FIFO_CFG5_BM
;
879 cfg2
|= MAC_CFG2_IF_10_100
;
880 ifctl
|= MAC_IFCTL_SPEED
;
883 cfg2
|= MAC_CFG2_IF_10_100
;
886 WARN(1, "not supported speed %i\n", phydev
->speed
);
890 if (ag
->tx_ring
.desc_split
) {
891 ag
->fifodata
[2] &= 0xffff;
892 ag
->fifodata
[2] |= ((2048 - ag
->tx_ring
.desc_split
) / 4) << 16;
895 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG3
, ag
->fifodata
[2]);
897 ag71xx_wr(ag
, AG71XX_REG_MAC_CFG2
, cfg2
);
898 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG5
, fifo5
);
899 ag71xx_wr(ag
, AG71XX_REG_MAC_IFCTL
, ifctl
);
904 phy_print_status(phydev
);
907 static void ag71xx_phy_link_adjust(struct net_device
*ndev
)
909 struct ag71xx
*ag
= netdev_priv(ndev
);
911 ag71xx_link_adjust(ag
, true);
914 static int ag71xx_phy_connect(struct ag71xx
*ag
)
916 struct device_node
*np
= ag
->pdev
->dev
.of_node
;
917 struct net_device
*ndev
= ag
->ndev
;
918 struct device_node
*phy_node
;
919 struct phy_device
*phydev
;
922 if (of_phy_is_fixed_link(np
)) {
923 ret
= of_phy_register_fixed_link(np
);
925 netif_err(ag
, probe
, ndev
, "Failed to register fixed PHY link: %d\n",
930 phy_node
= of_node_get(np
);
932 phy_node
= of_parse_phandle(np
, "phy-handle", 0);
936 netif_err(ag
, probe
, ndev
, "Could not find valid phy node\n");
940 phydev
= of_phy_connect(ag
->ndev
, phy_node
, ag71xx_phy_link_adjust
,
943 of_node_put(phy_node
);
946 netif_err(ag
, probe
, ndev
, "Could not connect to PHY device\n");
950 phy_attached_info(phydev
);
955 static void ag71xx_ring_tx_clean(struct ag71xx
*ag
)
957 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
958 int ring_mask
= BIT(ring
->order
) - 1;
959 u32 bytes_compl
= 0, pkts_compl
= 0;
960 struct net_device
*ndev
= ag
->ndev
;
962 while (ring
->curr
!= ring
->dirty
) {
963 struct ag71xx_desc
*desc
;
964 u32 i
= ring
->dirty
& ring_mask
;
966 desc
= ag71xx_ring_desc(ring
, i
);
967 if (!ag71xx_desc_empty(desc
)) {
969 ndev
->stats
.tx_errors
++;
972 if (ring
->buf
[i
].tx
.skb
) {
973 bytes_compl
+= ring
->buf
[i
].tx
.len
;
975 dev_kfree_skb_any(ring
->buf
[i
].tx
.skb
);
977 ring
->buf
[i
].tx
.skb
= NULL
;
981 /* flush descriptors */
984 netdev_completed_queue(ndev
, pkts_compl
, bytes_compl
);
987 static void ag71xx_ring_tx_init(struct ag71xx
*ag
)
989 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
990 int ring_size
= BIT(ring
->order
);
991 int ring_mask
= ring_size
- 1;
994 for (i
= 0; i
< ring_size
; i
++) {
995 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
997 desc
->next
= (u32
)(ring
->descs_dma
+
998 AG71XX_DESC_SIZE
* ((i
+ 1) & ring_mask
));
1000 desc
->ctrl
= DESC_EMPTY
;
1001 ring
->buf
[i
].tx
.skb
= NULL
;
1004 /* flush descriptors */
1009 netdev_reset_queue(ag
->ndev
);
1012 static void ag71xx_ring_rx_clean(struct ag71xx
*ag
)
1014 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
1015 int ring_size
= BIT(ring
->order
);
1021 for (i
= 0; i
< ring_size
; i
++)
1022 if (ring
->buf
[i
].rx
.rx_buf
) {
1023 dma_unmap_single(&ag
->pdev
->dev
,
1024 ring
->buf
[i
].rx
.dma_addr
,
1025 ag
->rx_buf_size
, DMA_FROM_DEVICE
);
1026 skb_free_frag(ring
->buf
[i
].rx
.rx_buf
);
1030 static int ag71xx_buffer_size(struct ag71xx
*ag
)
1032 return ag
->rx_buf_size
+
1033 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
1036 static bool ag71xx_fill_rx_buf(struct ag71xx
*ag
, struct ag71xx_buf
*buf
,
1038 void *(*alloc
)(unsigned int size
))
1040 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
1041 struct ag71xx_desc
*desc
;
1044 desc
= ag71xx_ring_desc(ring
, buf
- &ring
->buf
[0]);
1046 data
= alloc(ag71xx_buffer_size(ag
));
1050 buf
->rx
.rx_buf
= data
;
1051 buf
->rx
.dma_addr
= dma_map_single(&ag
->pdev
->dev
, data
, ag
->rx_buf_size
,
1053 desc
->data
= (u32
)buf
->rx
.dma_addr
+ offset
;
1057 static int ag71xx_ring_rx_init(struct ag71xx
*ag
)
1059 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
1060 struct net_device
*ndev
= ag
->ndev
;
1061 int ring_mask
= BIT(ring
->order
) - 1;
1062 int ring_size
= BIT(ring
->order
);
1067 for (i
= 0; i
< ring_size
; i
++) {
1068 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
1070 desc
->next
= (u32
)(ring
->descs_dma
+
1071 AG71XX_DESC_SIZE
* ((i
+ 1) & ring_mask
));
1073 netif_dbg(ag
, rx_status
, ndev
, "RX desc at %p, next is %08x\n",
1077 for (i
= 0; i
< ring_size
; i
++) {
1078 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
1080 if (!ag71xx_fill_rx_buf(ag
, &ring
->buf
[i
], ag
->rx_buf_offset
,
1081 netdev_alloc_frag
)) {
1086 desc
->ctrl
= DESC_EMPTY
;
1089 /* flush descriptors */
1098 static int ag71xx_ring_rx_refill(struct ag71xx
*ag
)
1100 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
1101 int ring_mask
= BIT(ring
->order
) - 1;
1102 int offset
= ag
->rx_buf_offset
;
1106 for (; ring
->curr
- ring
->dirty
> 0; ring
->dirty
++) {
1107 struct ag71xx_desc
*desc
;
1110 i
= ring
->dirty
& ring_mask
;
1111 desc
= ag71xx_ring_desc(ring
, i
);
1113 if (!ring
->buf
[i
].rx
.rx_buf
&&
1114 !ag71xx_fill_rx_buf(ag
, &ring
->buf
[i
], offset
,
1118 desc
->ctrl
= DESC_EMPTY
;
1122 /* flush descriptors */
1125 netif_dbg(ag
, rx_status
, ag
->ndev
, "%u rx descriptors refilled\n",
1131 static int ag71xx_rings_init(struct ag71xx
*ag
)
1133 struct ag71xx_ring
*tx
= &ag
->tx_ring
;
1134 struct ag71xx_ring
*rx
= &ag
->rx_ring
;
1135 int ring_size
, tx_size
;
1137 ring_size
= BIT(tx
->order
) + BIT(rx
->order
);
1138 tx_size
= BIT(tx
->order
);
1140 tx
->buf
= kcalloc(ring_size
, sizeof(*tx
->buf
), GFP_KERNEL
);
1144 tx
->descs_cpu
= dma_alloc_coherent(&ag
->pdev
->dev
,
1145 ring_size
* AG71XX_DESC_SIZE
,
1146 &tx
->descs_dma
, GFP_KERNEL
);
1147 if (!tx
->descs_cpu
) {
1153 rx
->buf
= &tx
->buf
[tx_size
];
1154 rx
->descs_cpu
= ((void *)tx
->descs_cpu
) + tx_size
* AG71XX_DESC_SIZE
;
1155 rx
->descs_dma
= tx
->descs_dma
+ tx_size
* AG71XX_DESC_SIZE
;
1157 ag71xx_ring_tx_init(ag
);
1158 return ag71xx_ring_rx_init(ag
);
1161 static void ag71xx_rings_free(struct ag71xx
*ag
)
1163 struct ag71xx_ring
*tx
= &ag
->tx_ring
;
1164 struct ag71xx_ring
*rx
= &ag
->rx_ring
;
1167 ring_size
= BIT(tx
->order
) + BIT(rx
->order
);
1170 dma_free_coherent(&ag
->pdev
->dev
, ring_size
* AG71XX_DESC_SIZE
,
1171 tx
->descs_cpu
, tx
->descs_dma
);
1175 tx
->descs_cpu
= NULL
;
1176 rx
->descs_cpu
= NULL
;
1181 static void ag71xx_rings_cleanup(struct ag71xx
*ag
)
1183 ag71xx_ring_rx_clean(ag
);
1184 ag71xx_ring_tx_clean(ag
);
1185 ag71xx_rings_free(ag
);
1187 netdev_reset_queue(ag
->ndev
);
1190 static void ag71xx_hw_init(struct ag71xx
*ag
)
1194 ag71xx_sb(ag
, AG71XX_REG_MAC_CFG1
, MAC_CFG1_SR
);
1195 usleep_range(20, 30);
1197 reset_control_assert(ag
->mac_reset
);
1199 reset_control_deassert(ag
->mac_reset
);
1202 ag71xx_hw_setup(ag
);
1204 ag71xx_dma_reset(ag
);
1207 static int ag71xx_hw_enable(struct ag71xx
*ag
)
1211 ret
= ag71xx_rings_init(ag
);
1215 napi_enable(&ag
->napi
);
1216 ag71xx_wr(ag
, AG71XX_REG_TX_DESC
, ag
->tx_ring
.descs_dma
);
1217 ag71xx_wr(ag
, AG71XX_REG_RX_DESC
, ag
->rx_ring
.descs_dma
);
1218 netif_start_queue(ag
->ndev
);
1223 static void ag71xx_hw_disable(struct ag71xx
*ag
)
1225 netif_stop_queue(ag
->ndev
);
1228 ag71xx_dma_reset(ag
);
1230 napi_disable(&ag
->napi
);
1231 del_timer_sync(&ag
->oom_timer
);
1233 ag71xx_rings_cleanup(ag
);
1236 static int ag71xx_open(struct net_device
*ndev
)
1238 struct ag71xx
*ag
= netdev_priv(ndev
);
1239 unsigned int max_frame_len
;
1242 max_frame_len
= ag71xx_max_frame_len(ndev
->mtu
);
1244 SKB_DATA_ALIGN(max_frame_len
+ NET_SKB_PAD
+ NET_IP_ALIGN
);
1246 /* setup max frame length */
1247 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
, max_frame_len
);
1248 ag71xx_hw_set_macaddr(ag
, ndev
->dev_addr
);
1250 ret
= ag71xx_hw_enable(ag
);
1254 ret
= ag71xx_phy_connect(ag
);
1258 phy_start(ndev
->phydev
);
1263 ag71xx_rings_cleanup(ag
);
1267 static int ag71xx_stop(struct net_device
*ndev
)
1269 struct ag71xx
*ag
= netdev_priv(ndev
);
1271 phy_stop(ndev
->phydev
);
1272 phy_disconnect(ndev
->phydev
);
1273 ag71xx_hw_disable(ag
);
1278 static int ag71xx_fill_dma_desc(struct ag71xx_ring
*ring
, u32 addr
, int len
)
1280 int i
, ring_mask
, ndesc
, split
;
1281 struct ag71xx_desc
*desc
;
1283 ring_mask
= BIT(ring
->order
) - 1;
1285 split
= ring
->desc_split
;
1291 unsigned int cur_len
= len
;
1293 i
= (ring
->curr
+ ndesc
) & ring_mask
;
1294 desc
= ag71xx_ring_desc(ring
, i
);
1296 if (!ag71xx_desc_empty(desc
))
1299 if (cur_len
> split
) {
1302 /* TX will hang if DMA transfers <= 4 bytes,
1303 * make sure next segment is more than 4 bytes long.
1305 if (len
<= split
+ 4)
1314 cur_len
|= DESC_MORE
;
1316 /* prevent early tx attempt of this descriptor */
1318 cur_len
|= DESC_EMPTY
;
1320 desc
->ctrl
= cur_len
;
1327 static netdev_tx_t
ag71xx_hard_start_xmit(struct sk_buff
*skb
,
1328 struct net_device
*ndev
)
1330 int i
, n
, ring_min
, ring_mask
, ring_size
;
1331 struct ag71xx
*ag
= netdev_priv(ndev
);
1332 struct ag71xx_ring
*ring
;
1333 struct ag71xx_desc
*desc
;
1334 dma_addr_t dma_addr
;
1336 ring
= &ag
->tx_ring
;
1337 ring_mask
= BIT(ring
->order
) - 1;
1338 ring_size
= BIT(ring
->order
);
1340 if (skb
->len
<= 4) {
1341 netif_dbg(ag
, tx_err
, ndev
, "packet len is too small\n");
1345 dma_addr
= dma_map_single(&ag
->pdev
->dev
, skb
->data
, skb
->len
,
1348 i
= ring
->curr
& ring_mask
;
1349 desc
= ag71xx_ring_desc(ring
, i
);
1351 /* setup descriptor fields */
1352 n
= ag71xx_fill_dma_desc(ring
, (u32
)dma_addr
,
1353 skb
->len
& ag
->dcfg
->desc_pktlen_mask
);
1355 goto err_drop_unmap
;
1357 i
= (ring
->curr
+ n
- 1) & ring_mask
;
1358 ring
->buf
[i
].tx
.len
= skb
->len
;
1359 ring
->buf
[i
].tx
.skb
= skb
;
1361 netdev_sent_queue(ndev
, skb
->len
);
1363 skb_tx_timestamp(skb
);
1365 desc
->ctrl
&= ~DESC_EMPTY
;
1368 /* flush descriptor */
1372 if (ring
->desc_split
)
1373 ring_min
*= AG71XX_TX_RING_DS_PER_PKT
;
1375 if (ring
->curr
- ring
->dirty
>= ring_size
- ring_min
) {
1376 netif_dbg(ag
, tx_err
, ndev
, "tx queue full\n");
1377 netif_stop_queue(ndev
);
1380 netif_dbg(ag
, tx_queued
, ndev
, "packet injected into TX queue\n");
1382 /* enable TX engine */
1383 ag71xx_wr(ag
, AG71XX_REG_TX_CTRL
, TX_CTRL_TXE
);
1385 return NETDEV_TX_OK
;
1388 dma_unmap_single(&ag
->pdev
->dev
, dma_addr
, skb
->len
, DMA_TO_DEVICE
);
1391 ndev
->stats
.tx_dropped
++;
1394 return NETDEV_TX_OK
;
1397 static void ag71xx_oom_timer_handler(struct timer_list
*t
)
1399 struct ag71xx
*ag
= from_timer(ag
, t
, oom_timer
);
1401 napi_schedule(&ag
->napi
);
1404 static void ag71xx_tx_timeout(struct net_device
*ndev
, unsigned int txqueue
)
1406 struct ag71xx
*ag
= netdev_priv(ndev
);
1408 netif_err(ag
, tx_err
, ndev
, "tx timeout\n");
1410 schedule_delayed_work(&ag
->restart_work
, 1);
1413 static void ag71xx_restart_work_func(struct work_struct
*work
)
1415 struct ag71xx
*ag
= container_of(work
, struct ag71xx
,
1417 struct net_device
*ndev
= ag
->ndev
;
1420 ag71xx_hw_disable(ag
);
1421 ag71xx_hw_enable(ag
);
1422 if (ndev
->phydev
->link
)
1423 ag71xx_link_adjust(ag
, false);
1427 static int ag71xx_rx_packets(struct ag71xx
*ag
, int limit
)
1429 struct net_device
*ndev
= ag
->ndev
;
1430 int ring_mask
, ring_size
, done
= 0;
1431 unsigned int pktlen_mask
, offset
;
1432 struct sk_buff
*next
, *skb
;
1433 struct ag71xx_ring
*ring
;
1434 struct list_head rx_list
;
1436 ring
= &ag
->rx_ring
;
1437 pktlen_mask
= ag
->dcfg
->desc_pktlen_mask
;
1438 offset
= ag
->rx_buf_offset
;
1439 ring_mask
= BIT(ring
->order
) - 1;
1440 ring_size
= BIT(ring
->order
);
1442 netif_dbg(ag
, rx_status
, ndev
, "rx packets, limit=%d, curr=%u, dirty=%u\n",
1443 limit
, ring
->curr
, ring
->dirty
);
1445 INIT_LIST_HEAD(&rx_list
);
1447 while (done
< limit
) {
1448 unsigned int i
= ring
->curr
& ring_mask
;
1449 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
1453 if (ag71xx_desc_empty(desc
))
1456 if ((ring
->dirty
+ ring_size
) == ring
->curr
) {
1457 WARN_ONCE(1, "RX out of ring");
1461 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_PR
);
1463 pktlen
= desc
->ctrl
& pktlen_mask
;
1464 pktlen
-= ETH_FCS_LEN
;
1466 dma_unmap_single(&ag
->pdev
->dev
, ring
->buf
[i
].rx
.dma_addr
,
1467 ag
->rx_buf_size
, DMA_FROM_DEVICE
);
1469 ndev
->stats
.rx_packets
++;
1470 ndev
->stats
.rx_bytes
+= pktlen
;
1472 skb
= build_skb(ring
->buf
[i
].rx
.rx_buf
, ag71xx_buffer_size(ag
));
1474 skb_free_frag(ring
->buf
[i
].rx
.rx_buf
);
1478 skb_reserve(skb
, offset
);
1479 skb_put(skb
, pktlen
);
1482 ndev
->stats
.rx_dropped
++;
1486 skb
->ip_summed
= CHECKSUM_NONE
;
1487 list_add_tail(&skb
->list
, &rx_list
);
1491 ring
->buf
[i
].rx
.rx_buf
= NULL
;
1497 ag71xx_ring_rx_refill(ag
);
1499 list_for_each_entry_safe(skb
, next
, &rx_list
, list
)
1500 skb
->protocol
= eth_type_trans(skb
, ndev
);
1501 netif_receive_skb_list(&rx_list
);
1503 netif_dbg(ag
, rx_status
, ndev
, "rx finish, curr=%u, dirty=%u, done=%d\n",
1504 ring
->curr
, ring
->dirty
, done
);
1509 static int ag71xx_poll(struct napi_struct
*napi
, int limit
)
1511 struct ag71xx
*ag
= container_of(napi
, struct ag71xx
, napi
);
1512 struct ag71xx_ring
*rx_ring
= &ag
->rx_ring
;
1513 int rx_ring_size
= BIT(rx_ring
->order
);
1514 struct net_device
*ndev
= ag
->ndev
;
1515 int tx_done
, rx_done
;
1518 tx_done
= ag71xx_tx_packets(ag
, false);
1520 netif_dbg(ag
, rx_status
, ndev
, "processing RX ring\n");
1521 rx_done
= ag71xx_rx_packets(ag
, limit
);
1523 if (!rx_ring
->buf
[rx_ring
->dirty
% rx_ring_size
].rx
.rx_buf
)
1526 status
= ag71xx_rr(ag
, AG71XX_REG_RX_STATUS
);
1527 if (unlikely(status
& RX_STATUS_OF
)) {
1528 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_OF
);
1529 ndev
->stats
.rx_fifo_errors
++;
1532 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, RX_CTRL_RXE
);
1535 if (rx_done
< limit
) {
1536 if (status
& RX_STATUS_PR
)
1539 status
= ag71xx_rr(ag
, AG71XX_REG_TX_STATUS
);
1540 if (status
& TX_STATUS_PS
)
1543 netif_dbg(ag
, rx_status
, ndev
, "disable polling mode, rx=%d, tx=%d,limit=%d\n",
1544 rx_done
, tx_done
, limit
);
1546 napi_complete(napi
);
1548 /* enable interrupts */
1549 ag71xx_int_enable(ag
, AG71XX_INT_POLL
);
1554 netif_dbg(ag
, rx_status
, ndev
, "stay in polling mode, rx=%d, tx=%d, limit=%d\n",
1555 rx_done
, tx_done
, limit
);
1559 netif_err(ag
, rx_err
, ndev
, "out of memory\n");
1561 mod_timer(&ag
->oom_timer
, jiffies
+ AG71XX_OOM_REFILL
);
1562 napi_complete(napi
);
1566 static irqreturn_t
ag71xx_interrupt(int irq
, void *dev_id
)
1568 struct net_device
*ndev
= dev_id
;
1572 ag
= netdev_priv(ndev
);
1573 status
= ag71xx_rr(ag
, AG71XX_REG_INT_STATUS
);
1575 if (unlikely(!status
))
1578 if (unlikely(status
& AG71XX_INT_ERR
)) {
1579 if (status
& AG71XX_INT_TX_BE
) {
1580 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_BE
);
1581 netif_err(ag
, intr
, ndev
, "TX BUS error\n");
1583 if (status
& AG71XX_INT_RX_BE
) {
1584 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_BE
);
1585 netif_err(ag
, intr
, ndev
, "RX BUS error\n");
1589 if (likely(status
& AG71XX_INT_POLL
)) {
1590 ag71xx_int_disable(ag
, AG71XX_INT_POLL
);
1591 netif_dbg(ag
, intr
, ndev
, "enable polling mode\n");
1592 napi_schedule(&ag
->napi
);
1598 static int ag71xx_change_mtu(struct net_device
*ndev
, int new_mtu
)
1600 struct ag71xx
*ag
= netdev_priv(ndev
);
1602 ndev
->mtu
= new_mtu
;
1603 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
,
1604 ag71xx_max_frame_len(ndev
->mtu
));
1609 static const struct net_device_ops ag71xx_netdev_ops
= {
1610 .ndo_open
= ag71xx_open
,
1611 .ndo_stop
= ag71xx_stop
,
1612 .ndo_start_xmit
= ag71xx_hard_start_xmit
,
1613 .ndo_do_ioctl
= phy_do_ioctl
,
1614 .ndo_tx_timeout
= ag71xx_tx_timeout
,
1615 .ndo_change_mtu
= ag71xx_change_mtu
,
1616 .ndo_set_mac_address
= eth_mac_addr
,
1617 .ndo_validate_addr
= eth_validate_addr
,
1620 static const u32 ar71xx_addr_ar7100
[] = {
1621 0x19000000, 0x1a000000,
1624 static int ag71xx_probe(struct platform_device
*pdev
)
1626 struct device_node
*np
= pdev
->dev
.of_node
;
1627 const struct ag71xx_dcfg
*dcfg
;
1628 struct net_device
*ndev
;
1629 struct resource
*res
;
1630 const void *mac_addr
;
1631 int tx_size
, err
, i
;
1637 ndev
= devm_alloc_etherdev(&pdev
->dev
, sizeof(*ag
));
1641 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1645 dcfg
= of_device_get_match_data(&pdev
->dev
);
1649 ag
= netdev_priv(ndev
);
1651 for (i
= 0; i
< ARRAY_SIZE(ar71xx_addr_ar7100
); i
++) {
1652 if (ar71xx_addr_ar7100
[i
] == res
->start
)
1656 if (ag
->mac_idx
< 0) {
1657 netif_err(ag
, probe
, ndev
, "unknown mac idx\n");
1661 ag
->clk_eth
= devm_clk_get(&pdev
->dev
, "eth");
1662 if (IS_ERR(ag
->clk_eth
)) {
1663 netif_err(ag
, probe
, ndev
, "Failed to get eth clk.\n");
1664 return PTR_ERR(ag
->clk_eth
);
1667 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
1672 ag
->msg_enable
= netif_msg_init(-1, AG71XX_DEFAULT_MSG_ENABLE
);
1673 memcpy(ag
->fifodata
, dcfg
->fifodata
, sizeof(ag
->fifodata
));
1675 ag
->mac_reset
= devm_reset_control_get(&pdev
->dev
, "mac");
1676 if (IS_ERR(ag
->mac_reset
)) {
1677 netif_err(ag
, probe
, ndev
, "missing mac reset\n");
1678 err
= PTR_ERR(ag
->mac_reset
);
1682 ag
->mac_base
= devm_ioremap(&pdev
->dev
, res
->start
, resource_size(res
));
1683 if (!ag
->mac_base
) {
1688 ndev
->irq
= platform_get_irq(pdev
, 0);
1689 err
= devm_request_irq(&pdev
->dev
, ndev
->irq
, ag71xx_interrupt
,
1690 0x0, dev_name(&pdev
->dev
), ndev
);
1692 netif_err(ag
, probe
, ndev
, "unable to request IRQ %d\n",
1697 ndev
->netdev_ops
= &ag71xx_netdev_ops
;
1699 INIT_DELAYED_WORK(&ag
->restart_work
, ag71xx_restart_work_func
);
1700 timer_setup(&ag
->oom_timer
, ag71xx_oom_timer_handler
, 0);
1702 tx_size
= AG71XX_TX_RING_SIZE_DEFAULT
;
1703 ag
->rx_ring
.order
= ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT
);
1706 ndev
->max_mtu
= dcfg
->max_frame_len
- ag71xx_max_frame_len(0);
1708 ag
->rx_buf_offset
= NET_SKB_PAD
;
1709 if (!ag71xx_is(ag
, AR7100
) && !ag71xx_is(ag
, AR9130
))
1710 ag
->rx_buf_offset
+= NET_IP_ALIGN
;
1712 if (ag71xx_is(ag
, AR7100
)) {
1713 ag
->tx_ring
.desc_split
= AG71XX_TX_RING_SPLIT
;
1714 tx_size
*= AG71XX_TX_RING_DS_PER_PKT
;
1716 ag
->tx_ring
.order
= ag71xx_ring_size_order(tx_size
);
1718 ag
->stop_desc
= dmam_alloc_coherent(&pdev
->dev
,
1719 sizeof(struct ag71xx_desc
),
1720 &ag
->stop_desc_dma
, GFP_KERNEL
);
1721 if (!ag
->stop_desc
) {
1726 ag
->stop_desc
->data
= 0;
1727 ag
->stop_desc
->ctrl
= 0;
1728 ag
->stop_desc
->next
= (u32
)ag
->stop_desc_dma
;
1730 mac_addr
= of_get_mac_address(np
);
1731 if (!IS_ERR(mac_addr
))
1732 memcpy(ndev
->dev_addr
, mac_addr
, ETH_ALEN
);
1733 if (IS_ERR(mac_addr
) || !is_valid_ether_addr(ndev
->dev_addr
)) {
1734 netif_err(ag
, probe
, ndev
, "invalid MAC address, using random address\n");
1735 eth_random_addr(ndev
->dev_addr
);
1738 err
= of_get_phy_mode(np
, &ag
->phy_if_mode
);
1740 netif_err(ag
, probe
, ndev
, "missing phy-mode property in DT\n");
1744 netif_napi_add(ndev
, &ag
->napi
, ag71xx_poll
, AG71XX_NAPI_WEIGHT
);
1746 err
= clk_prepare_enable(ag
->clk_eth
);
1748 netif_err(ag
, probe
, ndev
, "Failed to enable eth clk.\n");
1752 ag71xx_wr(ag
, AG71XX_REG_MAC_CFG1
, 0);
1756 err
= ag71xx_mdio_probe(ag
);
1760 platform_set_drvdata(pdev
, ndev
);
1762 err
= register_netdev(ndev
);
1764 netif_err(ag
, probe
, ndev
, "unable to register net device\n");
1765 platform_set_drvdata(pdev
, NULL
);
1766 goto err_mdio_remove
;
1769 netif_info(ag
, probe
, ndev
, "Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n",
1770 (unsigned long)ag
->mac_base
, ndev
->irq
,
1771 phy_modes(ag
->phy_if_mode
));
1776 ag71xx_mdio_remove(ag
);
1778 clk_disable_unprepare(ag
->clk_eth
);
1784 static int ag71xx_remove(struct platform_device
*pdev
)
1786 struct net_device
*ndev
= platform_get_drvdata(pdev
);
1792 ag
= netdev_priv(ndev
);
1793 unregister_netdev(ndev
);
1794 ag71xx_mdio_remove(ag
);
1795 clk_disable_unprepare(ag
->clk_eth
);
1796 platform_set_drvdata(pdev
, NULL
);
1801 static const u32 ar71xx_fifo_ar7100
[] = {
1802 0x0fff0000, 0x00001fff, 0x00780fff,
1805 static const u32 ar71xx_fifo_ar9130
[] = {
1806 0x0fff0000, 0x00001fff, 0x008001ff,
1809 static const u32 ar71xx_fifo_ar9330
[] = {
1810 0x0010ffff, 0x015500aa, 0x01f00140,
1813 static const struct ag71xx_dcfg ag71xx_dcfg_ar7100
= {
1815 .fifodata
= ar71xx_fifo_ar7100
,
1816 .max_frame_len
= 1540,
1817 .desc_pktlen_mask
= SZ_4K
- 1,
1818 .tx_hang_workaround
= false,
1821 static const struct ag71xx_dcfg ag71xx_dcfg_ar7240
= {
1823 .fifodata
= ar71xx_fifo_ar7100
,
1824 .max_frame_len
= 1540,
1825 .desc_pktlen_mask
= SZ_4K
- 1,
1826 .tx_hang_workaround
= true,
1829 static const struct ag71xx_dcfg ag71xx_dcfg_ar9130
= {
1831 .fifodata
= ar71xx_fifo_ar9130
,
1832 .max_frame_len
= 1540,
1833 .desc_pktlen_mask
= SZ_4K
- 1,
1834 .tx_hang_workaround
= false,
1837 static const struct ag71xx_dcfg ag71xx_dcfg_ar9330
= {
1839 .fifodata
= ar71xx_fifo_ar9330
,
1840 .max_frame_len
= 1540,
1841 .desc_pktlen_mask
= SZ_4K
- 1,
1842 .tx_hang_workaround
= true,
1845 static const struct ag71xx_dcfg ag71xx_dcfg_ar9340
= {
1847 .fifodata
= ar71xx_fifo_ar9330
,
1848 .max_frame_len
= SZ_16K
- 1,
1849 .desc_pktlen_mask
= SZ_16K
- 1,
1850 .tx_hang_workaround
= true,
1853 static const struct ag71xx_dcfg ag71xx_dcfg_qca9530
= {
1855 .fifodata
= ar71xx_fifo_ar9330
,
1856 .max_frame_len
= SZ_16K
- 1,
1857 .desc_pktlen_mask
= SZ_16K
- 1,
1858 .tx_hang_workaround
= true,
1861 static const struct ag71xx_dcfg ag71xx_dcfg_qca9550
= {
1863 .fifodata
= ar71xx_fifo_ar9330
,
1864 .max_frame_len
= 1540,
1865 .desc_pktlen_mask
= SZ_16K
- 1,
1866 .tx_hang_workaround
= true,
1869 static const struct of_device_id ag71xx_match
[] = {
1870 { .compatible
= "qca,ar7100-eth", .data
= &ag71xx_dcfg_ar7100
},
1871 { .compatible
= "qca,ar7240-eth", .data
= &ag71xx_dcfg_ar7240
},
1872 { .compatible
= "qca,ar7241-eth", .data
= &ag71xx_dcfg_ar7240
},
1873 { .compatible
= "qca,ar7242-eth", .data
= &ag71xx_dcfg_ar7240
},
1874 { .compatible
= "qca,ar9130-eth", .data
= &ag71xx_dcfg_ar9130
},
1875 { .compatible
= "qca,ar9330-eth", .data
= &ag71xx_dcfg_ar9330
},
1876 { .compatible
= "qca,ar9340-eth", .data
= &ag71xx_dcfg_ar9340
},
1877 { .compatible
= "qca,qca9530-eth", .data
= &ag71xx_dcfg_qca9530
},
1878 { .compatible
= "qca,qca9550-eth", .data
= &ag71xx_dcfg_qca9550
},
1879 { .compatible
= "qca,qca9560-eth", .data
= &ag71xx_dcfg_qca9550
},
1883 static struct platform_driver ag71xx_driver
= {
1884 .probe
= ag71xx_probe
,
1885 .remove
= ag71xx_remove
,
1888 .of_match_table
= ag71xx_match
,
1892 module_platform_driver(ag71xx_driver
);
1893 MODULE_LICENSE("GPL v2");