1 // SPDX-License-Identifier: GPL-2.0
2 /* Atheros AR71xx built-in ethernet mac driver
4 * Copyright (C) 2019 Oleksij Rempel <o.rempel@pengutronix.de>
6 * List of authors contributed to this driver before mainlining:
7 * Alexander Couzens <lynxis@fe80.eu>
8 * Christian Lamparter <chunkeey@gmail.com>
9 * Chuanhong Guo <gch981213@gmail.com>
10 * Daniel F. Dickinson <cshored@thecshore.com>
11 * David Bauer <mail@david-bauer.net>
12 * Felix Fietkau <nbd@nbd.name>
13 * Gabor Juhos <juhosg@freemail.hu>
14 * Hauke Mehrtens <hauke@hauke-m.de>
15 * Johann Neuhauser <johann@it-neuhauser.de>
16 * John Crispin <john@phrozen.org>
17 * Jo-Philipp Wich <jo@mein.io>
18 * Koen Vandeputte <koen.vandeputte@ncentric.com>
19 * Lucian Cristian <lucian.cristian@gmail.com>
20 * Matt Merhar <mattmerhar@protonmail.com>
21 * Milan Krstic <milan.krstic@gmail.com>
22 * Petr Štetiar <ynezz@true.cz>
23 * Rosen Penev <rosenp@gmail.com>
24 * Stephen Walker <stephendwalker+github@gmail.com>
25 * Vittorio Gambaletta <openwrt@vittgam.net>
26 * Weijie Gao <hackpascal@gmail.com>
27 * Imre Kaloz <kaloz@openwrt.org>
30 #include <linux/if_vlan.h>
31 #include <linux/mfd/syscon.h>
32 #include <linux/of_mdio.h>
33 #include <linux/of_net.h>
34 #include <linux/of_platform.h>
35 #include <linux/phylink.h>
36 #include <linux/regmap.h>
37 #include <linux/reset.h>
38 #include <linux/clk.h>
41 /* For our NAPI weight bigger does *NOT* mean better - it means more
42 * D-cache misses and lots more wasted cycles than we'll ever
43 * possibly gain from saving instructions.
45 #define AG71XX_NAPI_WEIGHT 32
46 #define AG71XX_OOM_REFILL (1 + HZ / 10)
48 #define AG71XX_INT_ERR (AG71XX_INT_RX_BE | AG71XX_INT_TX_BE)
49 #define AG71XX_INT_TX (AG71XX_INT_TX_PS)
50 #define AG71XX_INT_RX (AG71XX_INT_RX_PR | AG71XX_INT_RX_OF)
52 #define AG71XX_INT_POLL (AG71XX_INT_RX | AG71XX_INT_TX)
53 #define AG71XX_INT_INIT (AG71XX_INT_ERR | AG71XX_INT_POLL)
55 #define AG71XX_TX_MTU_LEN 1540
57 #define AG71XX_TX_RING_SPLIT 512
58 #define AG71XX_TX_RING_DS_PER_PKT DIV_ROUND_UP(AG71XX_TX_MTU_LEN, \
60 #define AG71XX_TX_RING_SIZE_DEFAULT 128
61 #define AG71XX_RX_RING_SIZE_DEFAULT 256
63 #define AG71XX_MDIO_RETRY 1000
64 #define AG71XX_MDIO_DELAY 5
65 #define AG71XX_MDIO_MAX_CLK 5000000
67 /* Register offsets */
68 #define AG71XX_REG_MAC_CFG1 0x0000
69 #define MAC_CFG1_TXE BIT(0) /* Tx Enable */
70 #define MAC_CFG1_STX BIT(1) /* Synchronize Tx Enable */
71 #define MAC_CFG1_RXE BIT(2) /* Rx Enable */
72 #define MAC_CFG1_SRX BIT(3) /* Synchronize Rx Enable */
73 #define MAC_CFG1_TFC BIT(4) /* Tx Flow Control Enable */
74 #define MAC_CFG1_RFC BIT(5) /* Rx Flow Control Enable */
75 #define MAC_CFG1_SR BIT(31) /* Soft Reset */
76 #define MAC_CFG1_INIT (MAC_CFG1_RXE | MAC_CFG1_TXE | \
77 MAC_CFG1_SRX | MAC_CFG1_STX)
79 #define AG71XX_REG_MAC_CFG2 0x0004
80 #define MAC_CFG2_FDX BIT(0)
81 #define MAC_CFG2_PAD_CRC_EN BIT(2)
82 #define MAC_CFG2_LEN_CHECK BIT(4)
83 #define MAC_CFG2_IF_1000 BIT(9)
84 #define MAC_CFG2_IF_10_100 BIT(8)
86 #define AG71XX_REG_MAC_MFL 0x0010
88 #define AG71XX_REG_MII_CFG 0x0020
89 #define MII_CFG_CLK_DIV_4 0
90 #define MII_CFG_CLK_DIV_6 2
91 #define MII_CFG_CLK_DIV_8 3
92 #define MII_CFG_CLK_DIV_10 4
93 #define MII_CFG_CLK_DIV_14 5
94 #define MII_CFG_CLK_DIV_20 6
95 #define MII_CFG_CLK_DIV_28 7
96 #define MII_CFG_CLK_DIV_34 8
97 #define MII_CFG_CLK_DIV_42 9
98 #define MII_CFG_CLK_DIV_50 10
99 #define MII_CFG_CLK_DIV_58 11
100 #define MII_CFG_CLK_DIV_66 12
101 #define MII_CFG_CLK_DIV_74 13
102 #define MII_CFG_CLK_DIV_82 14
103 #define MII_CFG_CLK_DIV_98 15
104 #define MII_CFG_RESET BIT(31)
106 #define AG71XX_REG_MII_CMD 0x0024
107 #define MII_CMD_READ BIT(0)
109 #define AG71XX_REG_MII_ADDR 0x0028
110 #define MII_ADDR_SHIFT 8
112 #define AG71XX_REG_MII_CTRL 0x002c
113 #define AG71XX_REG_MII_STATUS 0x0030
114 #define AG71XX_REG_MII_IND 0x0034
115 #define MII_IND_BUSY BIT(0)
116 #define MII_IND_INVALID BIT(2)
118 #define AG71XX_REG_MAC_IFCTL 0x0038
119 #define MAC_IFCTL_SPEED BIT(16)
121 #define AG71XX_REG_MAC_ADDR1 0x0040
122 #define AG71XX_REG_MAC_ADDR2 0x0044
123 #define AG71XX_REG_FIFO_CFG0 0x0048
124 #define FIFO_CFG0_WTM BIT(0) /* Watermark Module */
125 #define FIFO_CFG0_RXS BIT(1) /* Rx System Module */
126 #define FIFO_CFG0_RXF BIT(2) /* Rx Fabric Module */
127 #define FIFO_CFG0_TXS BIT(3) /* Tx System Module */
128 #define FIFO_CFG0_TXF BIT(4) /* Tx Fabric Module */
129 #define FIFO_CFG0_ALL (FIFO_CFG0_WTM | FIFO_CFG0_RXS | FIFO_CFG0_RXF \
130 | FIFO_CFG0_TXS | FIFO_CFG0_TXF)
131 #define FIFO_CFG0_INIT (FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT)
133 #define FIFO_CFG0_ENABLE_SHIFT 8
135 #define AG71XX_REG_FIFO_CFG1 0x004c
136 #define AG71XX_REG_FIFO_CFG2 0x0050
137 #define AG71XX_REG_FIFO_CFG3 0x0054
138 #define AG71XX_REG_FIFO_CFG4 0x0058
139 #define FIFO_CFG4_DE BIT(0) /* Drop Event */
140 #define FIFO_CFG4_DV BIT(1) /* RX_DV Event */
141 #define FIFO_CFG4_FC BIT(2) /* False Carrier */
142 #define FIFO_CFG4_CE BIT(3) /* Code Error */
143 #define FIFO_CFG4_CR BIT(4) /* CRC error */
144 #define FIFO_CFG4_LM BIT(5) /* Length Mismatch */
145 #define FIFO_CFG4_LO BIT(6) /* Length out of range */
146 #define FIFO_CFG4_OK BIT(7) /* Packet is OK */
147 #define FIFO_CFG4_MC BIT(8) /* Multicast Packet */
148 #define FIFO_CFG4_BC BIT(9) /* Broadcast Packet */
149 #define FIFO_CFG4_DR BIT(10) /* Dribble */
150 #define FIFO_CFG4_LE BIT(11) /* Long Event */
151 #define FIFO_CFG4_CF BIT(12) /* Control Frame */
152 #define FIFO_CFG4_PF BIT(13) /* Pause Frame */
153 #define FIFO_CFG4_UO BIT(14) /* Unsupported Opcode */
154 #define FIFO_CFG4_VT BIT(15) /* VLAN tag detected */
155 #define FIFO_CFG4_FT BIT(16) /* Frame Truncated */
156 #define FIFO_CFG4_UC BIT(17) /* Unicast Packet */
157 #define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
158 FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \
159 FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \
160 FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \
161 FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \
164 #define AG71XX_REG_FIFO_CFG5 0x005c
165 #define FIFO_CFG5_DE BIT(0) /* Drop Event */
166 #define FIFO_CFG5_DV BIT(1) /* RX_DV Event */
167 #define FIFO_CFG5_FC BIT(2) /* False Carrier */
168 #define FIFO_CFG5_CE BIT(3) /* Code Error */
169 #define FIFO_CFG5_LM BIT(4) /* Length Mismatch */
170 #define FIFO_CFG5_LO BIT(5) /* Length Out of Range */
171 #define FIFO_CFG5_OK BIT(6) /* Packet is OK */
172 #define FIFO_CFG5_MC BIT(7) /* Multicast Packet */
173 #define FIFO_CFG5_BC BIT(8) /* Broadcast Packet */
174 #define FIFO_CFG5_DR BIT(9) /* Dribble */
175 #define FIFO_CFG5_CF BIT(10) /* Control Frame */
176 #define FIFO_CFG5_PF BIT(11) /* Pause Frame */
177 #define FIFO_CFG5_UO BIT(12) /* Unsupported Opcode */
178 #define FIFO_CFG5_VT BIT(13) /* VLAN tag detected */
179 #define FIFO_CFG5_LE BIT(14) /* Long Event */
180 #define FIFO_CFG5_FT BIT(15) /* Frame Truncated */
181 #define FIFO_CFG5_16 BIT(16) /* unknown */
182 #define FIFO_CFG5_17 BIT(17) /* unknown */
183 #define FIFO_CFG5_SF BIT(18) /* Short Frame */
184 #define FIFO_CFG5_BM BIT(19) /* Byte Mode */
185 #define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
186 FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \
187 FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \
188 FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \
189 FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
190 FIFO_CFG5_17 | FIFO_CFG5_SF)
192 #define AG71XX_REG_TX_CTRL 0x0180
193 #define TX_CTRL_TXE BIT(0) /* Tx Enable */
195 #define AG71XX_REG_TX_DESC 0x0184
196 #define AG71XX_REG_TX_STATUS 0x0188
197 #define TX_STATUS_PS BIT(0) /* Packet Sent */
198 #define TX_STATUS_UR BIT(1) /* Tx Underrun */
199 #define TX_STATUS_BE BIT(3) /* Bus Error */
201 #define AG71XX_REG_RX_CTRL 0x018c
202 #define RX_CTRL_RXE BIT(0) /* Rx Enable */
204 #define AG71XX_DMA_RETRY 10
205 #define AG71XX_DMA_DELAY 1
207 #define AG71XX_REG_RX_DESC 0x0190
208 #define AG71XX_REG_RX_STATUS 0x0194
209 #define RX_STATUS_PR BIT(0) /* Packet Received */
210 #define RX_STATUS_OF BIT(2) /* Rx Overflow */
211 #define RX_STATUS_BE BIT(3) /* Bus Error */
213 #define AG71XX_REG_INT_ENABLE 0x0198
214 #define AG71XX_REG_INT_STATUS 0x019c
215 #define AG71XX_INT_TX_PS BIT(0)
216 #define AG71XX_INT_TX_UR BIT(1)
217 #define AG71XX_INT_TX_BE BIT(3)
218 #define AG71XX_INT_RX_PR BIT(4)
219 #define AG71XX_INT_RX_OF BIT(6)
220 #define AG71XX_INT_RX_BE BIT(7)
222 #define AG71XX_REG_FIFO_DEPTH 0x01a8
223 #define AG71XX_REG_RX_SM 0x01b0
224 #define AG71XX_REG_TX_SM 0x01b4
226 #define ETH_SWITCH_HEADER_LEN 2
228 #define AG71XX_DEFAULT_MSG_ENABLE \
238 #define DESC_EMPTY BIT(31)
239 #define DESC_MORE BIT(24)
240 #define DESC_PKTLEN_M 0xfff
248 #define AG71XX_DESC_SIZE roundup(sizeof(struct ag71xx_desc), \
265 /* "Hot" fields in the data path. */
269 /* "Cold" fields - not used in the data path. */
270 struct ag71xx_buf
*buf
;
273 dma_addr_t descs_dma
;
290 u16 desc_pktlen_mask
;
291 bool tx_hang_workaround
;
292 enum ag71xx_type type
;
296 /* Critical data related to the per-packet data path are clustered
297 * early in this structure to help improve the D-cache footprint.
299 struct ag71xx_ring rx_ring ____cacheline_aligned
;
300 struct ag71xx_ring tx_ring ____cacheline_aligned
;
305 struct net_device
*ndev
;
306 struct platform_device
*pdev
;
307 struct napi_struct napi
;
309 const struct ag71xx_dcfg
*dcfg
;
311 /* From this point onwards we're not looking at per-packet fields. */
312 void __iomem
*mac_base
;
314 struct ag71xx_desc
*stop_desc
;
315 dma_addr_t stop_desc_dma
;
317 phy_interface_t phy_if_mode
;
318 struct phylink
*phylink
;
319 struct phylink_config phylink_config
;
321 struct delayed_work restart_work
;
322 struct timer_list oom_timer
;
324 struct reset_control
*mac_reset
;
329 struct reset_control
*mdio_reset
;
330 struct mii_bus
*mii_bus
;
331 struct clk
*clk_mdio
;
335 static int ag71xx_desc_empty(struct ag71xx_desc
*desc
)
337 return (desc
->ctrl
& DESC_EMPTY
) != 0;
340 static struct ag71xx_desc
*ag71xx_ring_desc(struct ag71xx_ring
*ring
, int idx
)
342 return (struct ag71xx_desc
*)&ring
->descs_cpu
[idx
* AG71XX_DESC_SIZE
];
345 static int ag71xx_ring_size_order(int size
)
347 return fls(size
- 1);
350 static bool ag71xx_is(struct ag71xx
*ag
, enum ag71xx_type type
)
352 return ag
->dcfg
->type
== type
;
355 static void ag71xx_wr(struct ag71xx
*ag
, unsigned int reg
, u32 value
)
357 iowrite32(value
, ag
->mac_base
+ reg
);
359 (void)ioread32(ag
->mac_base
+ reg
);
362 static u32
ag71xx_rr(struct ag71xx
*ag
, unsigned int reg
)
364 return ioread32(ag
->mac_base
+ reg
);
367 static void ag71xx_sb(struct ag71xx
*ag
, unsigned int reg
, u32 mask
)
371 r
= ag
->mac_base
+ reg
;
372 iowrite32(ioread32(r
) | mask
, r
);
377 static void ag71xx_cb(struct ag71xx
*ag
, unsigned int reg
, u32 mask
)
381 r
= ag
->mac_base
+ reg
;
382 iowrite32(ioread32(r
) & ~mask
, r
);
387 static void ag71xx_int_enable(struct ag71xx
*ag
, u32 ints
)
389 ag71xx_sb(ag
, AG71XX_REG_INT_ENABLE
, ints
);
392 static void ag71xx_int_disable(struct ag71xx
*ag
, u32 ints
)
394 ag71xx_cb(ag
, AG71XX_REG_INT_ENABLE
, ints
);
397 static int ag71xx_mdio_wait_busy(struct ag71xx
*ag
)
399 struct net_device
*ndev
= ag
->ndev
;
402 for (i
= 0; i
< AG71XX_MDIO_RETRY
; i
++) {
405 udelay(AG71XX_MDIO_DELAY
);
407 busy
= ag71xx_rr(ag
, AG71XX_REG_MII_IND
);
411 udelay(AG71XX_MDIO_DELAY
);
414 netif_err(ag
, link
, ndev
, "MDIO operation timed out\n");
419 static int ag71xx_mdio_mii_read(struct mii_bus
*bus
, int addr
, int reg
)
421 struct ag71xx
*ag
= bus
->priv
;
424 err
= ag71xx_mdio_wait_busy(ag
);
428 ag71xx_wr(ag
, AG71XX_REG_MII_ADDR
,
429 ((addr
& 0x1f) << MII_ADDR_SHIFT
) | (reg
& 0xff));
430 /* enable read mode */
431 ag71xx_wr(ag
, AG71XX_REG_MII_CMD
, MII_CMD_READ
);
433 err
= ag71xx_mdio_wait_busy(ag
);
437 val
= ag71xx_rr(ag
, AG71XX_REG_MII_STATUS
);
438 /* disable read mode */
439 ag71xx_wr(ag
, AG71XX_REG_MII_CMD
, 0);
441 netif_dbg(ag
, link
, ag
->ndev
, "mii_read: addr=%04x, reg=%04x, value=%04x\n",
447 static int ag71xx_mdio_mii_write(struct mii_bus
*bus
, int addr
, int reg
,
450 struct ag71xx
*ag
= bus
->priv
;
452 netif_dbg(ag
, link
, ag
->ndev
, "mii_write: addr=%04x, reg=%04x, value=%04x\n",
455 ag71xx_wr(ag
, AG71XX_REG_MII_ADDR
,
456 ((addr
& 0x1f) << MII_ADDR_SHIFT
) | (reg
& 0xff));
457 ag71xx_wr(ag
, AG71XX_REG_MII_CTRL
, val
);
459 return ag71xx_mdio_wait_busy(ag
);
462 static const u32 ar71xx_mdio_div_table
[] = {
463 4, 4, 6, 8, 10, 14, 20, 28,
466 static const u32 ar7240_mdio_div_table
[] = {
467 2, 2, 4, 6, 8, 12, 18, 26, 32, 40, 48, 56, 62, 70, 78, 96,
470 static const u32 ar933x_mdio_div_table
[] = {
471 4, 4, 6, 8, 10, 14, 20, 28, 34, 42, 50, 58, 66, 74, 82, 98,
474 static int ag71xx_mdio_get_divider(struct ag71xx
*ag
, u32
*div
)
476 unsigned long ref_clock
;
480 ref_clock
= clk_get_rate(ag
->clk_mdio
);
484 if (ag71xx_is(ag
, AR9330
) || ag71xx_is(ag
, AR9340
)) {
485 table
= ar933x_mdio_div_table
;
486 ndivs
= ARRAY_SIZE(ar933x_mdio_div_table
);
487 } else if (ag71xx_is(ag
, AR7240
)) {
488 table
= ar7240_mdio_div_table
;
489 ndivs
= ARRAY_SIZE(ar7240_mdio_div_table
);
491 table
= ar71xx_mdio_div_table
;
492 ndivs
= ARRAY_SIZE(ar71xx_mdio_div_table
);
495 for (i
= 0; i
< ndivs
; i
++) {
498 t
= ref_clock
/ table
[i
];
499 if (t
<= AG71XX_MDIO_MAX_CLK
) {
508 static int ag71xx_mdio_reset(struct mii_bus
*bus
)
510 struct ag71xx
*ag
= bus
->priv
;
514 err
= ag71xx_mdio_get_divider(ag
, &t
);
518 ag71xx_wr(ag
, AG71XX_REG_MII_CFG
, t
| MII_CFG_RESET
);
519 usleep_range(100, 200);
521 ag71xx_wr(ag
, AG71XX_REG_MII_CFG
, t
);
522 usleep_range(100, 200);
527 static int ag71xx_mdio_probe(struct ag71xx
*ag
)
529 struct device
*dev
= &ag
->pdev
->dev
;
530 struct net_device
*ndev
= ag
->ndev
;
531 static struct mii_bus
*mii_bus
;
532 struct device_node
*np
, *mnp
;
538 ag
->clk_mdio
= devm_clk_get(dev
, "mdio");
539 if (IS_ERR(ag
->clk_mdio
)) {
540 netif_err(ag
, probe
, ndev
, "Failed to get mdio clk.\n");
541 return PTR_ERR(ag
->clk_mdio
);
544 err
= clk_prepare_enable(ag
->clk_mdio
);
546 netif_err(ag
, probe
, ndev
, "Failed to enable mdio clk.\n");
550 mii_bus
= devm_mdiobus_alloc(dev
);
553 goto mdio_err_put_clk
;
556 ag
->mdio_reset
= of_reset_control_get_exclusive(np
, "mdio");
557 if (IS_ERR(ag
->mdio_reset
)) {
558 netif_err(ag
, probe
, ndev
, "Failed to get reset mdio.\n");
559 return PTR_ERR(ag
->mdio_reset
);
562 mii_bus
->name
= "ag71xx_mdio";
563 mii_bus
->read
= ag71xx_mdio_mii_read
;
564 mii_bus
->write
= ag71xx_mdio_mii_write
;
565 mii_bus
->reset
= ag71xx_mdio_reset
;
567 mii_bus
->parent
= dev
;
568 snprintf(mii_bus
->id
, MII_BUS_ID_SIZE
, "%s.%d", np
->name
, ag
->mac_idx
);
570 if (!IS_ERR(ag
->mdio_reset
)) {
571 reset_control_assert(ag
->mdio_reset
);
573 reset_control_deassert(ag
->mdio_reset
);
577 mnp
= of_get_child_by_name(np
, "mdio");
578 err
= of_mdiobus_register(mii_bus
, mnp
);
581 goto mdio_err_put_clk
;
583 ag
->mii_bus
= mii_bus
;
588 clk_disable_unprepare(ag
->clk_mdio
);
592 static void ag71xx_mdio_remove(struct ag71xx
*ag
)
595 mdiobus_unregister(ag
->mii_bus
);
596 clk_disable_unprepare(ag
->clk_mdio
);
599 static void ag71xx_hw_stop(struct ag71xx
*ag
)
601 /* disable all interrupts and stop the rx/tx engine */
602 ag71xx_wr(ag
, AG71XX_REG_INT_ENABLE
, 0);
603 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, 0);
604 ag71xx_wr(ag
, AG71XX_REG_TX_CTRL
, 0);
607 static bool ag71xx_check_dma_stuck(struct ag71xx
*ag
)
609 unsigned long timestamp
;
610 u32 rx_sm
, tx_sm
, rx_fd
;
612 timestamp
= netdev_get_tx_queue(ag
->ndev
, 0)->trans_start
;
613 if (likely(time_before(jiffies
, timestamp
+ HZ
/ 10)))
616 if (!netif_carrier_ok(ag
->ndev
))
619 rx_sm
= ag71xx_rr(ag
, AG71XX_REG_RX_SM
);
620 if ((rx_sm
& 0x7) == 0x3 && ((rx_sm
>> 4) & 0x7) == 0x6)
623 tx_sm
= ag71xx_rr(ag
, AG71XX_REG_TX_SM
);
624 rx_fd
= ag71xx_rr(ag
, AG71XX_REG_FIFO_DEPTH
);
625 if (((tx_sm
>> 4) & 0x7) == 0 && ((rx_sm
& 0x7) == 0) &&
626 ((rx_sm
>> 4) & 0x7) == 0 && rx_fd
== 0)
632 static int ag71xx_tx_packets(struct ag71xx
*ag
, bool flush
)
634 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
635 int sent
= 0, bytes_compl
= 0, n
= 0;
636 struct net_device
*ndev
= ag
->ndev
;
637 int ring_mask
, ring_size
;
638 bool dma_stuck
= false;
640 ring_mask
= BIT(ring
->order
) - 1;
641 ring_size
= BIT(ring
->order
);
643 netif_dbg(ag
, tx_queued
, ndev
, "processing TX ring\n");
645 while (ring
->dirty
+ n
!= ring
->curr
) {
646 struct ag71xx_desc
*desc
;
650 i
= (ring
->dirty
+ n
) & ring_mask
;
651 desc
= ag71xx_ring_desc(ring
, i
);
652 skb
= ring
->buf
[i
].tx
.skb
;
654 if (!flush
&& !ag71xx_desc_empty(desc
)) {
655 if (ag
->dcfg
->tx_hang_workaround
&&
656 ag71xx_check_dma_stuck(ag
)) {
657 schedule_delayed_work(&ag
->restart_work
,
665 desc
->ctrl
|= DESC_EMPTY
;
671 dev_kfree_skb_any(skb
);
672 ring
->buf
[i
].tx
.skb
= NULL
;
674 bytes_compl
+= ring
->buf
[i
].tx
.len
;
680 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_PS
);
685 netif_dbg(ag
, tx_done
, ndev
, "%d packets sent out\n", sent
);
690 ag
->ndev
->stats
.tx_bytes
+= bytes_compl
;
691 ag
->ndev
->stats
.tx_packets
+= sent
;
693 netdev_completed_queue(ag
->ndev
, sent
, bytes_compl
);
694 if ((ring
->curr
- ring
->dirty
) < (ring_size
* 3) / 4)
695 netif_wake_queue(ag
->ndev
);
698 cancel_delayed_work(&ag
->restart_work
);
703 static void ag71xx_dma_wait_stop(struct ag71xx
*ag
)
705 struct net_device
*ndev
= ag
->ndev
;
708 for (i
= 0; i
< AG71XX_DMA_RETRY
; i
++) {
711 mdelay(AG71XX_DMA_DELAY
);
713 rx
= ag71xx_rr(ag
, AG71XX_REG_RX_CTRL
) & RX_CTRL_RXE
;
714 tx
= ag71xx_rr(ag
, AG71XX_REG_TX_CTRL
) & TX_CTRL_TXE
;
719 netif_err(ag
, hw
, ndev
, "DMA stop operation timed out\n");
722 static void ag71xx_dma_reset(struct ag71xx
*ag
)
724 struct net_device
*ndev
= ag
->ndev
;
729 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, 0);
730 ag71xx_wr(ag
, AG71XX_REG_TX_CTRL
, 0);
732 /* give the hardware some time to really stop all rx/tx activity
733 * clearing the descriptors too early causes random memory corruption
735 ag71xx_dma_wait_stop(ag
);
737 /* clear descriptor addresses */
738 ag71xx_wr(ag
, AG71XX_REG_TX_DESC
, ag
->stop_desc_dma
);
739 ag71xx_wr(ag
, AG71XX_REG_RX_DESC
, ag
->stop_desc_dma
);
741 /* clear pending RX/TX interrupts */
742 for (i
= 0; i
< 256; i
++) {
743 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_PR
);
744 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_PS
);
747 /* clear pending errors */
748 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_BE
| RX_STATUS_OF
);
749 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_BE
| TX_STATUS_UR
);
751 val
= ag71xx_rr(ag
, AG71XX_REG_RX_STATUS
);
753 netif_err(ag
, hw
, ndev
, "unable to clear DMA Rx status: %08x\n",
756 val
= ag71xx_rr(ag
, AG71XX_REG_TX_STATUS
);
758 /* mask out reserved bits */
762 netif_err(ag
, hw
, ndev
, "unable to clear DMA Tx status: %08x\n",
766 static void ag71xx_hw_setup(struct ag71xx
*ag
)
768 u32 init
= MAC_CFG1_INIT
;
770 /* setup MAC configuration registers */
771 ag71xx_wr(ag
, AG71XX_REG_MAC_CFG1
, init
);
773 ag71xx_sb(ag
, AG71XX_REG_MAC_CFG2
,
774 MAC_CFG2_PAD_CRC_EN
| MAC_CFG2_LEN_CHECK
);
776 /* setup max frame length to zero */
777 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
, 0);
779 /* setup FIFO configuration registers */
780 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG0
, FIFO_CFG0_INIT
);
781 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG1
, ag
->fifodata
[0]);
782 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG2
, ag
->fifodata
[1]);
783 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG4
, FIFO_CFG4_INIT
);
784 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG5
, FIFO_CFG5_INIT
);
787 static unsigned int ag71xx_max_frame_len(unsigned int mtu
)
789 return ETH_SWITCH_HEADER_LEN
+ ETH_HLEN
+ VLAN_HLEN
+ mtu
+ ETH_FCS_LEN
;
792 static void ag71xx_hw_set_macaddr(struct ag71xx
*ag
, unsigned char *mac
)
796 t
= (((u32
)mac
[5]) << 24) | (((u32
)mac
[4]) << 16)
797 | (((u32
)mac
[3]) << 8) | ((u32
)mac
[2]);
799 ag71xx_wr(ag
, AG71XX_REG_MAC_ADDR1
, t
);
801 t
= (((u32
)mac
[1]) << 24) | (((u32
)mac
[0]) << 16);
802 ag71xx_wr(ag
, AG71XX_REG_MAC_ADDR2
, t
);
805 static void ag71xx_fast_reset(struct ag71xx
*ag
)
807 struct net_device
*dev
= ag
->ndev
;
813 mii_reg
= ag71xx_rr(ag
, AG71XX_REG_MII_CFG
);
814 rx_ds
= ag71xx_rr(ag
, AG71XX_REG_RX_DESC
);
816 ag71xx_tx_packets(ag
, true);
818 reset_control_assert(ag
->mac_reset
);
819 usleep_range(10, 20);
820 reset_control_deassert(ag
->mac_reset
);
821 usleep_range(10, 20);
823 ag71xx_dma_reset(ag
);
825 ag
->tx_ring
.curr
= 0;
826 ag
->tx_ring
.dirty
= 0;
827 netdev_reset_queue(ag
->ndev
);
829 /* setup max frame length */
830 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
,
831 ag71xx_max_frame_len(ag
->ndev
->mtu
));
833 ag71xx_wr(ag
, AG71XX_REG_RX_DESC
, rx_ds
);
834 ag71xx_wr(ag
, AG71XX_REG_TX_DESC
, ag
->tx_ring
.descs_dma
);
835 ag71xx_wr(ag
, AG71XX_REG_MII_CFG
, mii_reg
);
837 ag71xx_hw_set_macaddr(ag
, dev
->dev_addr
);
840 static void ag71xx_hw_start(struct ag71xx
*ag
)
842 /* start RX engine */
843 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, RX_CTRL_RXE
);
845 /* enable interrupts */
846 ag71xx_wr(ag
, AG71XX_REG_INT_ENABLE
, AG71XX_INT_INIT
);
848 netif_wake_queue(ag
->ndev
);
851 static void ag71xx_mac_config(struct phylink_config
*config
, unsigned int mode
,
852 const struct phylink_link_state
*state
)
854 struct ag71xx
*ag
= netdev_priv(to_net_dev(config
->dev
));
856 if (phylink_autoneg_inband(mode
))
859 if (!ag71xx_is(ag
, AR7100
) && !ag71xx_is(ag
, AR9130
))
860 ag71xx_fast_reset(ag
);
862 if (ag
->tx_ring
.desc_split
) {
863 ag
->fifodata
[2] &= 0xffff;
864 ag
->fifodata
[2] |= ((2048 - ag
->tx_ring
.desc_split
) / 4) << 16;
867 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG3
, ag
->fifodata
[2]);
870 static void ag71xx_mac_validate(struct phylink_config
*config
,
871 unsigned long *supported
,
872 struct phylink_link_state
*state
)
874 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask
) = { 0, };
876 if (state
->interface
!= PHY_INTERFACE_MODE_NA
&&
877 state
->interface
!= PHY_INTERFACE_MODE_GMII
&&
878 state
->interface
!= PHY_INTERFACE_MODE_MII
) {
879 bitmap_zero(supported
, __ETHTOOL_LINK_MODE_MASK_NBITS
);
883 phylink_set(mask
, MII
);
885 phylink_set(mask
, Autoneg
);
886 phylink_set(mask
, 10baseT_Half
);
887 phylink_set(mask
, 10baseT_Full
);
888 phylink_set(mask
, 100baseT_Half
);
889 phylink_set(mask
, 100baseT_Full
);
891 if (state
->interface
== PHY_INTERFACE_MODE_NA
||
892 state
->interface
== PHY_INTERFACE_MODE_GMII
) {
893 phylink_set(mask
, 1000baseT_Full
);
894 phylink_set(mask
, 1000baseX_Full
);
897 bitmap_and(supported
, supported
, mask
,
898 __ETHTOOL_LINK_MODE_MASK_NBITS
);
899 bitmap_and(state
->advertising
, state
->advertising
, mask
,
900 __ETHTOOL_LINK_MODE_MASK_NBITS
);
903 static void ag71xx_mac_pcs_get_state(struct phylink_config
*config
,
904 struct phylink_link_state
*state
)
909 static void ag71xx_mac_an_restart(struct phylink_config
*config
)
914 static void ag71xx_mac_link_down(struct phylink_config
*config
,
915 unsigned int mode
, phy_interface_t interface
)
917 struct ag71xx
*ag
= netdev_priv(to_net_dev(config
->dev
));
922 static void ag71xx_mac_link_up(struct phylink_config
*config
,
923 struct phy_device
*phy
,
924 unsigned int mode
, phy_interface_t interface
,
925 int speed
, int duplex
,
926 bool tx_pause
, bool rx_pause
)
928 struct ag71xx
*ag
= netdev_priv(to_net_dev(config
->dev
));
933 cfg2
= ag71xx_rr(ag
, AG71XX_REG_MAC_CFG2
);
934 cfg2
&= ~(MAC_CFG2_IF_1000
| MAC_CFG2_IF_10_100
| MAC_CFG2_FDX
);
935 cfg2
|= duplex
? MAC_CFG2_FDX
: 0;
937 ifctl
= ag71xx_rr(ag
, AG71XX_REG_MAC_IFCTL
);
938 ifctl
&= ~(MAC_IFCTL_SPEED
);
940 fifo5
= ag71xx_rr(ag
, AG71XX_REG_FIFO_CFG5
);
941 fifo5
&= ~FIFO_CFG5_BM
;
945 cfg2
|= MAC_CFG2_IF_1000
;
946 fifo5
|= FIFO_CFG5_BM
;
949 cfg2
|= MAC_CFG2_IF_10_100
;
950 ifctl
|= MAC_IFCTL_SPEED
;
953 cfg2
|= MAC_CFG2_IF_10_100
;
959 ag71xx_wr(ag
, AG71XX_REG_MAC_CFG2
, cfg2
);
960 ag71xx_wr(ag
, AG71XX_REG_FIFO_CFG5
, fifo5
);
961 ag71xx_wr(ag
, AG71XX_REG_MAC_IFCTL
, ifctl
);
966 static const struct phylink_mac_ops ag71xx_phylink_mac_ops
= {
967 .validate
= ag71xx_mac_validate
,
968 .mac_pcs_get_state
= ag71xx_mac_pcs_get_state
,
969 .mac_an_restart
= ag71xx_mac_an_restart
,
970 .mac_config
= ag71xx_mac_config
,
971 .mac_link_down
= ag71xx_mac_link_down
,
972 .mac_link_up
= ag71xx_mac_link_up
,
975 static int ag71xx_phylink_setup(struct ag71xx
*ag
)
977 struct phylink
*phylink
;
979 ag
->phylink_config
.dev
= &ag
->ndev
->dev
;
980 ag
->phylink_config
.type
= PHYLINK_NETDEV
;
982 phylink
= phylink_create(&ag
->phylink_config
, ag
->pdev
->dev
.fwnode
,
983 ag
->phy_if_mode
, &ag71xx_phylink_mac_ops
);
985 return PTR_ERR(phylink
);
987 ag
->phylink
= phylink
;
991 static void ag71xx_ring_tx_clean(struct ag71xx
*ag
)
993 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
994 int ring_mask
= BIT(ring
->order
) - 1;
995 u32 bytes_compl
= 0, pkts_compl
= 0;
996 struct net_device
*ndev
= ag
->ndev
;
998 while (ring
->curr
!= ring
->dirty
) {
999 struct ag71xx_desc
*desc
;
1000 u32 i
= ring
->dirty
& ring_mask
;
1002 desc
= ag71xx_ring_desc(ring
, i
);
1003 if (!ag71xx_desc_empty(desc
)) {
1005 ndev
->stats
.tx_errors
++;
1008 if (ring
->buf
[i
].tx
.skb
) {
1009 bytes_compl
+= ring
->buf
[i
].tx
.len
;
1011 dev_kfree_skb_any(ring
->buf
[i
].tx
.skb
);
1013 ring
->buf
[i
].tx
.skb
= NULL
;
1017 /* flush descriptors */
1020 netdev_completed_queue(ndev
, pkts_compl
, bytes_compl
);
1023 static void ag71xx_ring_tx_init(struct ag71xx
*ag
)
1025 struct ag71xx_ring
*ring
= &ag
->tx_ring
;
1026 int ring_size
= BIT(ring
->order
);
1027 int ring_mask
= ring_size
- 1;
1030 for (i
= 0; i
< ring_size
; i
++) {
1031 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
1033 desc
->next
= (u32
)(ring
->descs_dma
+
1034 AG71XX_DESC_SIZE
* ((i
+ 1) & ring_mask
));
1036 desc
->ctrl
= DESC_EMPTY
;
1037 ring
->buf
[i
].tx
.skb
= NULL
;
1040 /* flush descriptors */
1045 netdev_reset_queue(ag
->ndev
);
1048 static void ag71xx_ring_rx_clean(struct ag71xx
*ag
)
1050 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
1051 int ring_size
= BIT(ring
->order
);
1057 for (i
= 0; i
< ring_size
; i
++)
1058 if (ring
->buf
[i
].rx
.rx_buf
) {
1059 dma_unmap_single(&ag
->pdev
->dev
,
1060 ring
->buf
[i
].rx
.dma_addr
,
1061 ag
->rx_buf_size
, DMA_FROM_DEVICE
);
1062 skb_free_frag(ring
->buf
[i
].rx
.rx_buf
);
1066 static int ag71xx_buffer_size(struct ag71xx
*ag
)
1068 return ag
->rx_buf_size
+
1069 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
1072 static bool ag71xx_fill_rx_buf(struct ag71xx
*ag
, struct ag71xx_buf
*buf
,
1074 void *(*alloc
)(unsigned int size
))
1076 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
1077 struct ag71xx_desc
*desc
;
1080 desc
= ag71xx_ring_desc(ring
, buf
- &ring
->buf
[0]);
1082 data
= alloc(ag71xx_buffer_size(ag
));
1086 buf
->rx
.rx_buf
= data
;
1087 buf
->rx
.dma_addr
= dma_map_single(&ag
->pdev
->dev
, data
, ag
->rx_buf_size
,
1089 desc
->data
= (u32
)buf
->rx
.dma_addr
+ offset
;
1093 static int ag71xx_ring_rx_init(struct ag71xx
*ag
)
1095 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
1096 struct net_device
*ndev
= ag
->ndev
;
1097 int ring_mask
= BIT(ring
->order
) - 1;
1098 int ring_size
= BIT(ring
->order
);
1103 for (i
= 0; i
< ring_size
; i
++) {
1104 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
1106 desc
->next
= (u32
)(ring
->descs_dma
+
1107 AG71XX_DESC_SIZE
* ((i
+ 1) & ring_mask
));
1109 netif_dbg(ag
, rx_status
, ndev
, "RX desc at %p, next is %08x\n",
1113 for (i
= 0; i
< ring_size
; i
++) {
1114 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
1116 if (!ag71xx_fill_rx_buf(ag
, &ring
->buf
[i
], ag
->rx_buf_offset
,
1117 netdev_alloc_frag
)) {
1122 desc
->ctrl
= DESC_EMPTY
;
1125 /* flush descriptors */
1134 static int ag71xx_ring_rx_refill(struct ag71xx
*ag
)
1136 struct ag71xx_ring
*ring
= &ag
->rx_ring
;
1137 int ring_mask
= BIT(ring
->order
) - 1;
1138 int offset
= ag
->rx_buf_offset
;
1142 for (; ring
->curr
- ring
->dirty
> 0; ring
->dirty
++) {
1143 struct ag71xx_desc
*desc
;
1146 i
= ring
->dirty
& ring_mask
;
1147 desc
= ag71xx_ring_desc(ring
, i
);
1149 if (!ring
->buf
[i
].rx
.rx_buf
&&
1150 !ag71xx_fill_rx_buf(ag
, &ring
->buf
[i
], offset
,
1154 desc
->ctrl
= DESC_EMPTY
;
1158 /* flush descriptors */
1161 netif_dbg(ag
, rx_status
, ag
->ndev
, "%u rx descriptors refilled\n",
1167 static int ag71xx_rings_init(struct ag71xx
*ag
)
1169 struct ag71xx_ring
*tx
= &ag
->tx_ring
;
1170 struct ag71xx_ring
*rx
= &ag
->rx_ring
;
1171 int ring_size
, tx_size
;
1173 ring_size
= BIT(tx
->order
) + BIT(rx
->order
);
1174 tx_size
= BIT(tx
->order
);
1176 tx
->buf
= kcalloc(ring_size
, sizeof(*tx
->buf
), GFP_KERNEL
);
1180 tx
->descs_cpu
= dma_alloc_coherent(&ag
->pdev
->dev
,
1181 ring_size
* AG71XX_DESC_SIZE
,
1182 &tx
->descs_dma
, GFP_KERNEL
);
1183 if (!tx
->descs_cpu
) {
1189 rx
->buf
= &tx
->buf
[tx_size
];
1190 rx
->descs_cpu
= ((void *)tx
->descs_cpu
) + tx_size
* AG71XX_DESC_SIZE
;
1191 rx
->descs_dma
= tx
->descs_dma
+ tx_size
* AG71XX_DESC_SIZE
;
1193 ag71xx_ring_tx_init(ag
);
1194 return ag71xx_ring_rx_init(ag
);
1197 static void ag71xx_rings_free(struct ag71xx
*ag
)
1199 struct ag71xx_ring
*tx
= &ag
->tx_ring
;
1200 struct ag71xx_ring
*rx
= &ag
->rx_ring
;
1203 ring_size
= BIT(tx
->order
) + BIT(rx
->order
);
1206 dma_free_coherent(&ag
->pdev
->dev
, ring_size
* AG71XX_DESC_SIZE
,
1207 tx
->descs_cpu
, tx
->descs_dma
);
1211 tx
->descs_cpu
= NULL
;
1212 rx
->descs_cpu
= NULL
;
1217 static void ag71xx_rings_cleanup(struct ag71xx
*ag
)
1219 ag71xx_ring_rx_clean(ag
);
1220 ag71xx_ring_tx_clean(ag
);
1221 ag71xx_rings_free(ag
);
1223 netdev_reset_queue(ag
->ndev
);
1226 static void ag71xx_hw_init(struct ag71xx
*ag
)
1230 ag71xx_sb(ag
, AG71XX_REG_MAC_CFG1
, MAC_CFG1_SR
);
1231 usleep_range(20, 30);
1233 reset_control_assert(ag
->mac_reset
);
1235 reset_control_deassert(ag
->mac_reset
);
1238 ag71xx_hw_setup(ag
);
1240 ag71xx_dma_reset(ag
);
1243 static int ag71xx_hw_enable(struct ag71xx
*ag
)
1247 ret
= ag71xx_rings_init(ag
);
1251 napi_enable(&ag
->napi
);
1252 ag71xx_wr(ag
, AG71XX_REG_TX_DESC
, ag
->tx_ring
.descs_dma
);
1253 ag71xx_wr(ag
, AG71XX_REG_RX_DESC
, ag
->rx_ring
.descs_dma
);
1254 netif_start_queue(ag
->ndev
);
1259 static void ag71xx_hw_disable(struct ag71xx
*ag
)
1261 netif_stop_queue(ag
->ndev
);
1264 ag71xx_dma_reset(ag
);
1266 napi_disable(&ag
->napi
);
1267 del_timer_sync(&ag
->oom_timer
);
1269 ag71xx_rings_cleanup(ag
);
1272 static int ag71xx_open(struct net_device
*ndev
)
1274 struct ag71xx
*ag
= netdev_priv(ndev
);
1275 unsigned int max_frame_len
;
1278 ret
= phylink_of_phy_connect(ag
->phylink
, ag
->pdev
->dev
.of_node
, 0);
1280 netif_err(ag
, link
, ndev
, "phylink_of_phy_connect filed with err: %i\n",
1285 max_frame_len
= ag71xx_max_frame_len(ndev
->mtu
);
1287 SKB_DATA_ALIGN(max_frame_len
+ NET_SKB_PAD
+ NET_IP_ALIGN
);
1289 /* setup max frame length */
1290 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
, max_frame_len
);
1291 ag71xx_hw_set_macaddr(ag
, ndev
->dev_addr
);
1293 ret
= ag71xx_hw_enable(ag
);
1297 phylink_start(ag
->phylink
);
1302 ag71xx_rings_cleanup(ag
);
1306 static int ag71xx_stop(struct net_device
*ndev
)
1308 struct ag71xx
*ag
= netdev_priv(ndev
);
1310 phylink_stop(ag
->phylink
);
1311 phylink_disconnect_phy(ag
->phylink
);
1312 ag71xx_hw_disable(ag
);
1317 static int ag71xx_fill_dma_desc(struct ag71xx_ring
*ring
, u32 addr
, int len
)
1319 int i
, ring_mask
, ndesc
, split
;
1320 struct ag71xx_desc
*desc
;
1322 ring_mask
= BIT(ring
->order
) - 1;
1324 split
= ring
->desc_split
;
1330 unsigned int cur_len
= len
;
1332 i
= (ring
->curr
+ ndesc
) & ring_mask
;
1333 desc
= ag71xx_ring_desc(ring
, i
);
1335 if (!ag71xx_desc_empty(desc
))
1338 if (cur_len
> split
) {
1341 /* TX will hang if DMA transfers <= 4 bytes,
1342 * make sure next segment is more than 4 bytes long.
1344 if (len
<= split
+ 4)
1353 cur_len
|= DESC_MORE
;
1355 /* prevent early tx attempt of this descriptor */
1357 cur_len
|= DESC_EMPTY
;
1359 desc
->ctrl
= cur_len
;
1366 static netdev_tx_t
ag71xx_hard_start_xmit(struct sk_buff
*skb
,
1367 struct net_device
*ndev
)
1369 int i
, n
, ring_min
, ring_mask
, ring_size
;
1370 struct ag71xx
*ag
= netdev_priv(ndev
);
1371 struct ag71xx_ring
*ring
;
1372 struct ag71xx_desc
*desc
;
1373 dma_addr_t dma_addr
;
1375 ring
= &ag
->tx_ring
;
1376 ring_mask
= BIT(ring
->order
) - 1;
1377 ring_size
= BIT(ring
->order
);
1379 if (skb
->len
<= 4) {
1380 netif_dbg(ag
, tx_err
, ndev
, "packet len is too small\n");
1384 dma_addr
= dma_map_single(&ag
->pdev
->dev
, skb
->data
, skb
->len
,
1387 i
= ring
->curr
& ring_mask
;
1388 desc
= ag71xx_ring_desc(ring
, i
);
1390 /* setup descriptor fields */
1391 n
= ag71xx_fill_dma_desc(ring
, (u32
)dma_addr
,
1392 skb
->len
& ag
->dcfg
->desc_pktlen_mask
);
1394 goto err_drop_unmap
;
1396 i
= (ring
->curr
+ n
- 1) & ring_mask
;
1397 ring
->buf
[i
].tx
.len
= skb
->len
;
1398 ring
->buf
[i
].tx
.skb
= skb
;
1400 netdev_sent_queue(ndev
, skb
->len
);
1402 skb_tx_timestamp(skb
);
1404 desc
->ctrl
&= ~DESC_EMPTY
;
1407 /* flush descriptor */
1411 if (ring
->desc_split
)
1412 ring_min
*= AG71XX_TX_RING_DS_PER_PKT
;
1414 if (ring
->curr
- ring
->dirty
>= ring_size
- ring_min
) {
1415 netif_dbg(ag
, tx_err
, ndev
, "tx queue full\n");
1416 netif_stop_queue(ndev
);
1419 netif_dbg(ag
, tx_queued
, ndev
, "packet injected into TX queue\n");
1421 /* enable TX engine */
1422 ag71xx_wr(ag
, AG71XX_REG_TX_CTRL
, TX_CTRL_TXE
);
1424 return NETDEV_TX_OK
;
1427 dma_unmap_single(&ag
->pdev
->dev
, dma_addr
, skb
->len
, DMA_TO_DEVICE
);
1430 ndev
->stats
.tx_dropped
++;
1433 return NETDEV_TX_OK
;
1436 static void ag71xx_oom_timer_handler(struct timer_list
*t
)
1438 struct ag71xx
*ag
= from_timer(ag
, t
, oom_timer
);
1440 napi_schedule(&ag
->napi
);
1443 static void ag71xx_tx_timeout(struct net_device
*ndev
, unsigned int txqueue
)
1445 struct ag71xx
*ag
= netdev_priv(ndev
);
1447 netif_err(ag
, tx_err
, ndev
, "tx timeout\n");
1449 schedule_delayed_work(&ag
->restart_work
, 1);
1452 static void ag71xx_restart_work_func(struct work_struct
*work
)
1454 struct ag71xx
*ag
= container_of(work
, struct ag71xx
,
1458 ag71xx_hw_disable(ag
);
1459 ag71xx_hw_enable(ag
);
1461 phylink_stop(ag
->phylink
);
1462 phylink_start(ag
->phylink
);
1467 static int ag71xx_rx_packets(struct ag71xx
*ag
, int limit
)
1469 struct net_device
*ndev
= ag
->ndev
;
1470 int ring_mask
, ring_size
, done
= 0;
1471 unsigned int pktlen_mask
, offset
;
1472 struct sk_buff
*next
, *skb
;
1473 struct ag71xx_ring
*ring
;
1474 struct list_head rx_list
;
1476 ring
= &ag
->rx_ring
;
1477 pktlen_mask
= ag
->dcfg
->desc_pktlen_mask
;
1478 offset
= ag
->rx_buf_offset
;
1479 ring_mask
= BIT(ring
->order
) - 1;
1480 ring_size
= BIT(ring
->order
);
1482 netif_dbg(ag
, rx_status
, ndev
, "rx packets, limit=%d, curr=%u, dirty=%u\n",
1483 limit
, ring
->curr
, ring
->dirty
);
1485 INIT_LIST_HEAD(&rx_list
);
1487 while (done
< limit
) {
1488 unsigned int i
= ring
->curr
& ring_mask
;
1489 struct ag71xx_desc
*desc
= ag71xx_ring_desc(ring
, i
);
1493 if (ag71xx_desc_empty(desc
))
1496 if ((ring
->dirty
+ ring_size
) == ring
->curr
) {
1497 WARN_ONCE(1, "RX out of ring");
1501 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_PR
);
1503 pktlen
= desc
->ctrl
& pktlen_mask
;
1504 pktlen
-= ETH_FCS_LEN
;
1506 dma_unmap_single(&ag
->pdev
->dev
, ring
->buf
[i
].rx
.dma_addr
,
1507 ag
->rx_buf_size
, DMA_FROM_DEVICE
);
1509 ndev
->stats
.rx_packets
++;
1510 ndev
->stats
.rx_bytes
+= pktlen
;
1512 skb
= build_skb(ring
->buf
[i
].rx
.rx_buf
, ag71xx_buffer_size(ag
));
1514 skb_free_frag(ring
->buf
[i
].rx
.rx_buf
);
1518 skb_reserve(skb
, offset
);
1519 skb_put(skb
, pktlen
);
1522 ndev
->stats
.rx_dropped
++;
1526 skb
->ip_summed
= CHECKSUM_NONE
;
1527 list_add_tail(&skb
->list
, &rx_list
);
1531 ring
->buf
[i
].rx
.rx_buf
= NULL
;
1537 ag71xx_ring_rx_refill(ag
);
1539 list_for_each_entry_safe(skb
, next
, &rx_list
, list
)
1540 skb
->protocol
= eth_type_trans(skb
, ndev
);
1541 netif_receive_skb_list(&rx_list
);
1543 netif_dbg(ag
, rx_status
, ndev
, "rx finish, curr=%u, dirty=%u, done=%d\n",
1544 ring
->curr
, ring
->dirty
, done
);
1549 static int ag71xx_poll(struct napi_struct
*napi
, int limit
)
1551 struct ag71xx
*ag
= container_of(napi
, struct ag71xx
, napi
);
1552 struct ag71xx_ring
*rx_ring
= &ag
->rx_ring
;
1553 int rx_ring_size
= BIT(rx_ring
->order
);
1554 struct net_device
*ndev
= ag
->ndev
;
1555 int tx_done
, rx_done
;
1558 tx_done
= ag71xx_tx_packets(ag
, false);
1560 netif_dbg(ag
, rx_status
, ndev
, "processing RX ring\n");
1561 rx_done
= ag71xx_rx_packets(ag
, limit
);
1563 if (!rx_ring
->buf
[rx_ring
->dirty
% rx_ring_size
].rx
.rx_buf
)
1566 status
= ag71xx_rr(ag
, AG71XX_REG_RX_STATUS
);
1567 if (unlikely(status
& RX_STATUS_OF
)) {
1568 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_OF
);
1569 ndev
->stats
.rx_fifo_errors
++;
1572 ag71xx_wr(ag
, AG71XX_REG_RX_CTRL
, RX_CTRL_RXE
);
1575 if (rx_done
< limit
) {
1576 if (status
& RX_STATUS_PR
)
1579 status
= ag71xx_rr(ag
, AG71XX_REG_TX_STATUS
);
1580 if (status
& TX_STATUS_PS
)
1583 netif_dbg(ag
, rx_status
, ndev
, "disable polling mode, rx=%d, tx=%d,limit=%d\n",
1584 rx_done
, tx_done
, limit
);
1586 napi_complete(napi
);
1588 /* enable interrupts */
1589 ag71xx_int_enable(ag
, AG71XX_INT_POLL
);
1594 netif_dbg(ag
, rx_status
, ndev
, "stay in polling mode, rx=%d, tx=%d, limit=%d\n",
1595 rx_done
, tx_done
, limit
);
1599 netif_err(ag
, rx_err
, ndev
, "out of memory\n");
1601 mod_timer(&ag
->oom_timer
, jiffies
+ AG71XX_OOM_REFILL
);
1602 napi_complete(napi
);
1606 static irqreturn_t
ag71xx_interrupt(int irq
, void *dev_id
)
1608 struct net_device
*ndev
= dev_id
;
1612 ag
= netdev_priv(ndev
);
1613 status
= ag71xx_rr(ag
, AG71XX_REG_INT_STATUS
);
1615 if (unlikely(!status
))
1618 if (unlikely(status
& AG71XX_INT_ERR
)) {
1619 if (status
& AG71XX_INT_TX_BE
) {
1620 ag71xx_wr(ag
, AG71XX_REG_TX_STATUS
, TX_STATUS_BE
);
1621 netif_err(ag
, intr
, ndev
, "TX BUS error\n");
1623 if (status
& AG71XX_INT_RX_BE
) {
1624 ag71xx_wr(ag
, AG71XX_REG_RX_STATUS
, RX_STATUS_BE
);
1625 netif_err(ag
, intr
, ndev
, "RX BUS error\n");
1629 if (likely(status
& AG71XX_INT_POLL
)) {
1630 ag71xx_int_disable(ag
, AG71XX_INT_POLL
);
1631 netif_dbg(ag
, intr
, ndev
, "enable polling mode\n");
1632 napi_schedule(&ag
->napi
);
1638 static int ag71xx_change_mtu(struct net_device
*ndev
, int new_mtu
)
1640 struct ag71xx
*ag
= netdev_priv(ndev
);
1642 ndev
->mtu
= new_mtu
;
1643 ag71xx_wr(ag
, AG71XX_REG_MAC_MFL
,
1644 ag71xx_max_frame_len(ndev
->mtu
));
1649 static const struct net_device_ops ag71xx_netdev_ops
= {
1650 .ndo_open
= ag71xx_open
,
1651 .ndo_stop
= ag71xx_stop
,
1652 .ndo_start_xmit
= ag71xx_hard_start_xmit
,
1653 .ndo_do_ioctl
= phy_do_ioctl
,
1654 .ndo_tx_timeout
= ag71xx_tx_timeout
,
1655 .ndo_change_mtu
= ag71xx_change_mtu
,
1656 .ndo_set_mac_address
= eth_mac_addr
,
1657 .ndo_validate_addr
= eth_validate_addr
,
1660 static const u32 ar71xx_addr_ar7100
[] = {
1661 0x19000000, 0x1a000000,
1664 static int ag71xx_probe(struct platform_device
*pdev
)
1666 struct device_node
*np
= pdev
->dev
.of_node
;
1667 const struct ag71xx_dcfg
*dcfg
;
1668 struct net_device
*ndev
;
1669 struct resource
*res
;
1670 const void *mac_addr
;
1671 int tx_size
, err
, i
;
1677 ndev
= devm_alloc_etherdev(&pdev
->dev
, sizeof(*ag
));
1681 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1685 dcfg
= of_device_get_match_data(&pdev
->dev
);
1689 ag
= netdev_priv(ndev
);
1691 for (i
= 0; i
< ARRAY_SIZE(ar71xx_addr_ar7100
); i
++) {
1692 if (ar71xx_addr_ar7100
[i
] == res
->start
)
1696 if (ag
->mac_idx
< 0) {
1697 netif_err(ag
, probe
, ndev
, "unknown mac idx\n");
1701 ag
->clk_eth
= devm_clk_get(&pdev
->dev
, "eth");
1702 if (IS_ERR(ag
->clk_eth
)) {
1703 netif_err(ag
, probe
, ndev
, "Failed to get eth clk.\n");
1704 return PTR_ERR(ag
->clk_eth
);
1707 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
1712 ag
->msg_enable
= netif_msg_init(-1, AG71XX_DEFAULT_MSG_ENABLE
);
1713 memcpy(ag
->fifodata
, dcfg
->fifodata
, sizeof(ag
->fifodata
));
1715 ag
->mac_reset
= devm_reset_control_get(&pdev
->dev
, "mac");
1716 if (IS_ERR(ag
->mac_reset
)) {
1717 netif_err(ag
, probe
, ndev
, "missing mac reset\n");
1718 err
= PTR_ERR(ag
->mac_reset
);
1722 ag
->mac_base
= devm_ioremap(&pdev
->dev
, res
->start
, resource_size(res
));
1723 if (!ag
->mac_base
) {
1728 ndev
->irq
= platform_get_irq(pdev
, 0);
1729 err
= devm_request_irq(&pdev
->dev
, ndev
->irq
, ag71xx_interrupt
,
1730 0x0, dev_name(&pdev
->dev
), ndev
);
1732 netif_err(ag
, probe
, ndev
, "unable to request IRQ %d\n",
1737 ndev
->netdev_ops
= &ag71xx_netdev_ops
;
1739 INIT_DELAYED_WORK(&ag
->restart_work
, ag71xx_restart_work_func
);
1740 timer_setup(&ag
->oom_timer
, ag71xx_oom_timer_handler
, 0);
1742 tx_size
= AG71XX_TX_RING_SIZE_DEFAULT
;
1743 ag
->rx_ring
.order
= ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT
);
1746 ndev
->max_mtu
= dcfg
->max_frame_len
- ag71xx_max_frame_len(0);
1748 ag
->rx_buf_offset
= NET_SKB_PAD
;
1749 if (!ag71xx_is(ag
, AR7100
) && !ag71xx_is(ag
, AR9130
))
1750 ag
->rx_buf_offset
+= NET_IP_ALIGN
;
1752 if (ag71xx_is(ag
, AR7100
)) {
1753 ag
->tx_ring
.desc_split
= AG71XX_TX_RING_SPLIT
;
1754 tx_size
*= AG71XX_TX_RING_DS_PER_PKT
;
1756 ag
->tx_ring
.order
= ag71xx_ring_size_order(tx_size
);
1758 ag
->stop_desc
= dmam_alloc_coherent(&pdev
->dev
,
1759 sizeof(struct ag71xx_desc
),
1760 &ag
->stop_desc_dma
, GFP_KERNEL
);
1761 if (!ag
->stop_desc
) {
1766 ag
->stop_desc
->data
= 0;
1767 ag
->stop_desc
->ctrl
= 0;
1768 ag
->stop_desc
->next
= (u32
)ag
->stop_desc_dma
;
1770 mac_addr
= of_get_mac_address(np
);
1771 if (!IS_ERR(mac_addr
))
1772 memcpy(ndev
->dev_addr
, mac_addr
, ETH_ALEN
);
1773 if (IS_ERR(mac_addr
) || !is_valid_ether_addr(ndev
->dev_addr
)) {
1774 netif_err(ag
, probe
, ndev
, "invalid MAC address, using random address\n");
1775 eth_random_addr(ndev
->dev_addr
);
1778 err
= of_get_phy_mode(np
, &ag
->phy_if_mode
);
1780 netif_err(ag
, probe
, ndev
, "missing phy-mode property in DT\n");
1784 netif_napi_add(ndev
, &ag
->napi
, ag71xx_poll
, AG71XX_NAPI_WEIGHT
);
1786 err
= clk_prepare_enable(ag
->clk_eth
);
1788 netif_err(ag
, probe
, ndev
, "Failed to enable eth clk.\n");
1792 ag71xx_wr(ag
, AG71XX_REG_MAC_CFG1
, 0);
1796 err
= ag71xx_mdio_probe(ag
);
1800 platform_set_drvdata(pdev
, ndev
);
1802 err
= ag71xx_phylink_setup(ag
);
1804 netif_err(ag
, probe
, ndev
, "failed to setup phylink (%d)\n", err
);
1805 goto err_mdio_remove
;
1808 err
= register_netdev(ndev
);
1810 netif_err(ag
, probe
, ndev
, "unable to register net device\n");
1811 platform_set_drvdata(pdev
, NULL
);
1812 goto err_mdio_remove
;
1815 netif_info(ag
, probe
, ndev
, "Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n",
1816 (unsigned long)ag
->mac_base
, ndev
->irq
,
1817 phy_modes(ag
->phy_if_mode
));
1822 ag71xx_mdio_remove(ag
);
1824 clk_disable_unprepare(ag
->clk_eth
);
1830 static int ag71xx_remove(struct platform_device
*pdev
)
1832 struct net_device
*ndev
= platform_get_drvdata(pdev
);
1838 ag
= netdev_priv(ndev
);
1839 unregister_netdev(ndev
);
1840 ag71xx_mdio_remove(ag
);
1841 clk_disable_unprepare(ag
->clk_eth
);
1842 platform_set_drvdata(pdev
, NULL
);
1847 static const u32 ar71xx_fifo_ar7100
[] = {
1848 0x0fff0000, 0x00001fff, 0x00780fff,
1851 static const u32 ar71xx_fifo_ar9130
[] = {
1852 0x0fff0000, 0x00001fff, 0x008001ff,
1855 static const u32 ar71xx_fifo_ar9330
[] = {
1856 0x0010ffff, 0x015500aa, 0x01f00140,
1859 static const struct ag71xx_dcfg ag71xx_dcfg_ar7100
= {
1861 .fifodata
= ar71xx_fifo_ar7100
,
1862 .max_frame_len
= 1540,
1863 .desc_pktlen_mask
= SZ_4K
- 1,
1864 .tx_hang_workaround
= false,
1867 static const struct ag71xx_dcfg ag71xx_dcfg_ar7240
= {
1869 .fifodata
= ar71xx_fifo_ar7100
,
1870 .max_frame_len
= 1540,
1871 .desc_pktlen_mask
= SZ_4K
- 1,
1872 .tx_hang_workaround
= true,
1875 static const struct ag71xx_dcfg ag71xx_dcfg_ar9130
= {
1877 .fifodata
= ar71xx_fifo_ar9130
,
1878 .max_frame_len
= 1540,
1879 .desc_pktlen_mask
= SZ_4K
- 1,
1880 .tx_hang_workaround
= false,
1883 static const struct ag71xx_dcfg ag71xx_dcfg_ar9330
= {
1885 .fifodata
= ar71xx_fifo_ar9330
,
1886 .max_frame_len
= 1540,
1887 .desc_pktlen_mask
= SZ_4K
- 1,
1888 .tx_hang_workaround
= true,
1891 static const struct ag71xx_dcfg ag71xx_dcfg_ar9340
= {
1893 .fifodata
= ar71xx_fifo_ar9330
,
1894 .max_frame_len
= SZ_16K
- 1,
1895 .desc_pktlen_mask
= SZ_16K
- 1,
1896 .tx_hang_workaround
= true,
1899 static const struct ag71xx_dcfg ag71xx_dcfg_qca9530
= {
1901 .fifodata
= ar71xx_fifo_ar9330
,
1902 .max_frame_len
= SZ_16K
- 1,
1903 .desc_pktlen_mask
= SZ_16K
- 1,
1904 .tx_hang_workaround
= true,
1907 static const struct ag71xx_dcfg ag71xx_dcfg_qca9550
= {
1909 .fifodata
= ar71xx_fifo_ar9330
,
1910 .max_frame_len
= 1540,
1911 .desc_pktlen_mask
= SZ_16K
- 1,
1912 .tx_hang_workaround
= true,
1915 static const struct of_device_id ag71xx_match
[] = {
1916 { .compatible
= "qca,ar7100-eth", .data
= &ag71xx_dcfg_ar7100
},
1917 { .compatible
= "qca,ar7240-eth", .data
= &ag71xx_dcfg_ar7240
},
1918 { .compatible
= "qca,ar7241-eth", .data
= &ag71xx_dcfg_ar7240
},
1919 { .compatible
= "qca,ar7242-eth", .data
= &ag71xx_dcfg_ar7240
},
1920 { .compatible
= "qca,ar9130-eth", .data
= &ag71xx_dcfg_ar9130
},
1921 { .compatible
= "qca,ar9330-eth", .data
= &ag71xx_dcfg_ar9330
},
1922 { .compatible
= "qca,ar9340-eth", .data
= &ag71xx_dcfg_ar9340
},
1923 { .compatible
= "qca,qca9530-eth", .data
= &ag71xx_dcfg_qca9530
},
1924 { .compatible
= "qca,qca9550-eth", .data
= &ag71xx_dcfg_qca9550
},
1925 { .compatible
= "qca,qca9560-eth", .data
= &ag71xx_dcfg_qca9550
},
1929 static struct platform_driver ag71xx_driver
= {
1930 .probe
= ag71xx_probe
,
1931 .remove
= ag71xx_remove
,
1934 .of_match_table
= ag71xx_match
,
1938 module_platform_driver(ag71xx_driver
);
1939 MODULE_LICENSE("GPL v2");