2 * Copyright (C) 2015 Mans Rullgard <mans@mansr.com>
4 * Mostly rewritten, based on driver from Sigma Designs. Original
5 * copyright notice below.
8 * Driver for tangox SMP864x/SMP865x/SMP867x/SMP868x builtin Ethernet Mac.
10 * Copyright (C) 2005 Maxime Bizon <mbizon@freebox.fr>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
23 #include <linux/module.h>
24 #include <linux/etherdevice.h>
25 #include <linux/delay.h>
26 #include <linux/ethtool.h>
27 #include <linux/interrupt.h>
28 #include <linux/platform_device.h>
29 #include <linux/of_device.h>
30 #include <linux/of_mdio.h>
31 #include <linux/of_net.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/phy.h>
34 #include <linux/cache.h>
35 #include <linux/jiffies.h>
37 #include <linux/iopoll.h>
38 #include <asm/barrier.h>
42 static void nb8800_tx_done(struct net_device
*dev
);
43 static int nb8800_dma_stop(struct net_device
*dev
);
45 static inline u8
nb8800_readb(struct nb8800_priv
*priv
, int reg
)
47 return readb_relaxed(priv
->base
+ reg
);
50 static inline u32
nb8800_readl(struct nb8800_priv
*priv
, int reg
)
52 return readl_relaxed(priv
->base
+ reg
);
55 static inline void nb8800_writeb(struct nb8800_priv
*priv
, int reg
, u8 val
)
57 writeb_relaxed(val
, priv
->base
+ reg
);
60 static inline void nb8800_writew(struct nb8800_priv
*priv
, int reg
, u16 val
)
62 writew_relaxed(val
, priv
->base
+ reg
);
65 static inline void nb8800_writel(struct nb8800_priv
*priv
, int reg
, u32 val
)
67 writel_relaxed(val
, priv
->base
+ reg
);
70 static inline void nb8800_maskb(struct nb8800_priv
*priv
, int reg
,
73 u32 old
= nb8800_readb(priv
, reg
);
74 u32
new = (old
& ~mask
) | (val
& mask
);
77 nb8800_writeb(priv
, reg
, new);
80 static inline void nb8800_maskl(struct nb8800_priv
*priv
, int reg
,
83 u32 old
= nb8800_readl(priv
, reg
);
84 u32
new = (old
& ~mask
) | (val
& mask
);
87 nb8800_writel(priv
, reg
, new);
90 static inline void nb8800_modb(struct nb8800_priv
*priv
, int reg
, u8 bits
,
93 nb8800_maskb(priv
, reg
, bits
, set
? bits
: 0);
96 static inline void nb8800_setb(struct nb8800_priv
*priv
, int reg
, u8 bits
)
98 nb8800_maskb(priv
, reg
, bits
, bits
);
101 static inline void nb8800_clearb(struct nb8800_priv
*priv
, int reg
, u8 bits
)
103 nb8800_maskb(priv
, reg
, bits
, 0);
106 static inline void nb8800_modl(struct nb8800_priv
*priv
, int reg
, u32 bits
,
109 nb8800_maskl(priv
, reg
, bits
, set
? bits
: 0);
112 static inline void nb8800_setl(struct nb8800_priv
*priv
, int reg
, u32 bits
)
114 nb8800_maskl(priv
, reg
, bits
, bits
);
117 static inline void nb8800_clearl(struct nb8800_priv
*priv
, int reg
, u32 bits
)
119 nb8800_maskl(priv
, reg
, bits
, 0);
122 static int nb8800_mdio_wait(struct mii_bus
*bus
)
124 struct nb8800_priv
*priv
= bus
->priv
;
127 return readl_poll_timeout_atomic(priv
->base
+ NB8800_MDIO_CMD
,
128 val
, !(val
& MDIO_CMD_GO
), 1, 1000);
131 static int nb8800_mdio_cmd(struct mii_bus
*bus
, u32 cmd
)
133 struct nb8800_priv
*priv
= bus
->priv
;
136 err
= nb8800_mdio_wait(bus
);
140 nb8800_writel(priv
, NB8800_MDIO_CMD
, cmd
);
142 nb8800_writel(priv
, NB8800_MDIO_CMD
, cmd
| MDIO_CMD_GO
);
144 return nb8800_mdio_wait(bus
);
147 static int nb8800_mdio_read(struct mii_bus
*bus
, int phy_id
, int reg
)
149 struct nb8800_priv
*priv
= bus
->priv
;
153 err
= nb8800_mdio_cmd(bus
, MDIO_CMD_ADDR(phy_id
) | MDIO_CMD_REG(reg
));
157 val
= nb8800_readl(priv
, NB8800_MDIO_STS
);
158 if (val
& MDIO_STS_ERR
)
164 static int nb8800_mdio_write(struct mii_bus
*bus
, int phy_id
, int reg
, u16 val
)
166 u32 cmd
= MDIO_CMD_ADDR(phy_id
) | MDIO_CMD_REG(reg
) |
167 MDIO_CMD_DATA(val
) | MDIO_CMD_WR
;
169 return nb8800_mdio_cmd(bus
, cmd
);
172 static void nb8800_mac_tx(struct net_device
*dev
, bool enable
)
174 struct nb8800_priv
*priv
= netdev_priv(dev
);
176 while (nb8800_readl(priv
, NB8800_TXC_CR
) & TCR_EN
)
179 nb8800_modb(priv
, NB8800_TX_CTL1
, TX_EN
, enable
);
182 static void nb8800_mac_rx(struct net_device
*dev
, bool enable
)
184 nb8800_modb(netdev_priv(dev
), NB8800_RX_CTL
, RX_EN
, enable
);
187 static void nb8800_mac_af(struct net_device
*dev
, bool enable
)
189 nb8800_modb(netdev_priv(dev
), NB8800_RX_CTL
, RX_AF_EN
, enable
);
192 static void nb8800_start_rx(struct net_device
*dev
)
194 nb8800_setl(netdev_priv(dev
), NB8800_RXC_CR
, RCR_EN
);
197 static int nb8800_alloc_rx(struct net_device
*dev
, unsigned int i
, bool napi
)
199 struct nb8800_priv
*priv
= netdev_priv(dev
);
200 struct nb8800_rx_desc
*rxd
= &priv
->rx_descs
[i
];
201 struct nb8800_rx_buf
*rxb
= &priv
->rx_bufs
[i
];
202 int size
= L1_CACHE_ALIGN(RX_BUF_SIZE
);
205 unsigned long offset
;
208 data
= napi
? napi_alloc_frag(size
) : netdev_alloc_frag(size
);
212 page
= virt_to_head_page(data
);
213 offset
= data
- page_address(page
);
215 dma_addr
= dma_map_page(&dev
->dev
, page
, offset
, RX_BUF_SIZE
,
218 if (dma_mapping_error(&dev
->dev
, dma_addr
)) {
224 rxb
->offset
= offset
;
225 rxd
->desc
.s_addr
= dma_addr
;
230 static void nb8800_receive(struct net_device
*dev
, unsigned int i
,
233 struct nb8800_priv
*priv
= netdev_priv(dev
);
234 struct nb8800_rx_desc
*rxd
= &priv
->rx_descs
[i
];
235 struct page
*page
= priv
->rx_bufs
[i
].page
;
236 int offset
= priv
->rx_bufs
[i
].offset
;
237 void *data
= page_address(page
) + offset
;
238 dma_addr_t dma
= rxd
->desc
.s_addr
;
243 size
= len
<= RX_COPYBREAK
? len
: RX_COPYHDR
;
245 skb
= napi_alloc_skb(&priv
->napi
, size
);
247 netdev_err(dev
, "rx skb allocation failed\n");
248 dev
->stats
.rx_dropped
++;
252 if (len
<= RX_COPYBREAK
) {
253 dma_sync_single_for_cpu(&dev
->dev
, dma
, len
, DMA_FROM_DEVICE
);
254 skb_put_data(skb
, data
, len
);
255 dma_sync_single_for_device(&dev
->dev
, dma
, len
,
258 err
= nb8800_alloc_rx(dev
, i
, true);
260 netdev_err(dev
, "rx buffer allocation failed\n");
261 dev
->stats
.rx_dropped
++;
266 dma_unmap_page(&dev
->dev
, dma
, RX_BUF_SIZE
, DMA_FROM_DEVICE
);
267 skb_put_data(skb
, data
, RX_COPYHDR
);
268 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, page
,
269 offset
+ RX_COPYHDR
, len
- RX_COPYHDR
,
273 skb
->protocol
= eth_type_trans(skb
, dev
);
274 napi_gro_receive(&priv
->napi
, skb
);
277 static void nb8800_rx_error(struct net_device
*dev
, u32 report
)
279 if (report
& RX_LENGTH_ERR
)
280 dev
->stats
.rx_length_errors
++;
282 if (report
& RX_FCS_ERR
)
283 dev
->stats
.rx_crc_errors
++;
285 if (report
& RX_FIFO_OVERRUN
)
286 dev
->stats
.rx_fifo_errors
++;
288 if (report
& RX_ALIGNMENT_ERROR
)
289 dev
->stats
.rx_frame_errors
++;
291 dev
->stats
.rx_errors
++;
294 static int nb8800_poll(struct napi_struct
*napi
, int budget
)
296 struct net_device
*dev
= napi
->dev
;
297 struct nb8800_priv
*priv
= netdev_priv(dev
);
298 struct nb8800_rx_desc
*rxd
;
299 unsigned int last
= priv
->rx_eoc
;
307 struct nb8800_rx_buf
*rxb
;
310 next
= (last
+ 1) % RX_DESC_COUNT
;
312 rxb
= &priv
->rx_bufs
[next
];
313 rxd
= &priv
->rx_descs
[next
];
318 len
= RX_BYTES_TRANSFERRED(rxd
->report
);
320 if (IS_RX_ERROR(rxd
->report
))
321 nb8800_rx_error(dev
, rxd
->report
);
323 nb8800_receive(dev
, next
, len
);
325 dev
->stats
.rx_packets
++;
326 dev
->stats
.rx_bytes
+= len
;
328 if (rxd
->report
& RX_MULTICAST_PKT
)
329 dev
->stats
.multicast
++;
334 } while (work
< budget
);
337 priv
->rx_descs
[last
].desc
.config
|= DESC_EOC
;
338 wmb(); /* ensure new EOC is written before clearing old */
339 priv
->rx_descs
[priv
->rx_eoc
].desc
.config
&= ~DESC_EOC
;
341 nb8800_start_rx(dev
);
345 nb8800_writel(priv
, NB8800_RX_ITR
, priv
->rx_itr_irq
);
347 /* If a packet arrived after we last checked but
348 * before writing RX_ITR, the interrupt will be
349 * delayed, so we retrieve it now.
351 if (priv
->rx_descs
[next
].report
)
354 napi_complete_done(napi
, work
);
360 static void __nb8800_tx_dma_start(struct net_device
*dev
)
362 struct nb8800_priv
*priv
= netdev_priv(dev
);
363 struct nb8800_tx_buf
*txb
;
366 txb
= &priv
->tx_bufs
[priv
->tx_queue
];
370 txc_cr
= nb8800_readl(priv
, NB8800_TXC_CR
);
374 nb8800_writel(priv
, NB8800_TX_DESC_ADDR
, txb
->dma_desc
);
375 wmb(); /* ensure desc addr is written before starting DMA */
376 nb8800_writel(priv
, NB8800_TXC_CR
, txc_cr
| TCR_EN
);
378 priv
->tx_queue
= (priv
->tx_queue
+ txb
->chain_len
) % TX_DESC_COUNT
;
381 static void nb8800_tx_dma_start(struct net_device
*dev
)
383 struct nb8800_priv
*priv
= netdev_priv(dev
);
385 spin_lock_irq(&priv
->tx_lock
);
386 __nb8800_tx_dma_start(dev
);
387 spin_unlock_irq(&priv
->tx_lock
);
390 static void nb8800_tx_dma_start_irq(struct net_device
*dev
)
392 struct nb8800_priv
*priv
= netdev_priv(dev
);
394 spin_lock(&priv
->tx_lock
);
395 __nb8800_tx_dma_start(dev
);
396 spin_unlock(&priv
->tx_lock
);
399 static int nb8800_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
401 struct nb8800_priv
*priv
= netdev_priv(dev
);
402 struct nb8800_tx_desc
*txd
;
403 struct nb8800_tx_buf
*txb
;
404 struct nb8800_dma_desc
*desc
;
406 unsigned int dma_len
;
410 if (atomic_read(&priv
->tx_free
) <= NB8800_DESC_LOW
) {
411 netif_stop_queue(dev
);
412 return NETDEV_TX_BUSY
;
415 align
= (8 - (uintptr_t)skb
->data
) & 7;
417 dma_len
= skb
->len
- align
;
418 dma_addr
= dma_map_single(&dev
->dev
, skb
->data
+ align
,
419 dma_len
, DMA_TO_DEVICE
);
421 if (dma_mapping_error(&dev
->dev
, dma_addr
)) {
422 netdev_err(dev
, "tx dma mapping error\n");
424 dev
->stats
.tx_dropped
++;
428 if (atomic_dec_return(&priv
->tx_free
) <= NB8800_DESC_LOW
) {
429 netif_stop_queue(dev
);
433 next
= priv
->tx_next
;
434 txb
= &priv
->tx_bufs
[next
];
435 txd
= &priv
->tx_descs
[next
];
436 desc
= &txd
->desc
[0];
438 next
= (next
+ 1) % TX_DESC_COUNT
;
441 memcpy(txd
->buf
, skb
->data
, align
);
444 txb
->dma_desc
+ offsetof(struct nb8800_tx_desc
, buf
);
445 desc
->n_addr
= txb
->dma_desc
+ sizeof(txd
->desc
[0]);
446 desc
->config
= DESC_BTS(2) | DESC_DS
| align
;
451 desc
->s_addr
= dma_addr
;
452 desc
->n_addr
= priv
->tx_bufs
[next
].dma_desc
;
453 desc
->config
= DESC_BTS(2) | DESC_DS
| DESC_EOF
| dma_len
;
456 desc
->config
|= DESC_EOC
;
459 txb
->dma_addr
= dma_addr
;
460 txb
->dma_len
= dma_len
;
462 if (!priv
->tx_chain
) {
464 priv
->tx_chain
= txb
;
466 priv
->tx_chain
->chain_len
++;
469 netdev_sent_queue(dev
, skb
->len
);
471 priv
->tx_next
= next
;
473 if (!skb
->xmit_more
) {
475 priv
->tx_chain
->ready
= true;
476 priv
->tx_chain
= NULL
;
477 nb8800_tx_dma_start(dev
);
483 static void nb8800_tx_error(struct net_device
*dev
, u32 report
)
485 if (report
& TX_LATE_COLLISION
)
486 dev
->stats
.collisions
++;
488 if (report
& TX_PACKET_DROPPED
)
489 dev
->stats
.tx_dropped
++;
491 if (report
& TX_FIFO_UNDERRUN
)
492 dev
->stats
.tx_fifo_errors
++;
494 dev
->stats
.tx_errors
++;
497 static void nb8800_tx_done(struct net_device
*dev
)
499 struct nb8800_priv
*priv
= netdev_priv(dev
);
500 unsigned int limit
= priv
->tx_next
;
501 unsigned int done
= priv
->tx_done
;
502 unsigned int packets
= 0;
503 unsigned int len
= 0;
505 while (done
!= limit
) {
506 struct nb8800_tx_desc
*txd
= &priv
->tx_descs
[done
];
507 struct nb8800_tx_buf
*txb
= &priv
->tx_bufs
[done
];
516 dma_unmap_single(&dev
->dev
, txb
->dma_addr
, txb
->dma_len
,
519 if (IS_TX_ERROR(txd
->report
)) {
520 nb8800_tx_error(dev
, txd
->report
);
526 dev
->stats
.tx_packets
++;
527 dev
->stats
.tx_bytes
+= TX_BYTES_TRANSFERRED(txd
->report
);
528 dev
->stats
.collisions
+= TX_EARLY_COLLISIONS(txd
->report
);
534 done
= (done
+ 1) % TX_DESC_COUNT
;
539 smp_mb__before_atomic();
540 atomic_add(packets
, &priv
->tx_free
);
541 netdev_completed_queue(dev
, packets
, len
);
542 netif_wake_queue(dev
);
543 priv
->tx_done
= done
;
547 static irqreturn_t
nb8800_irq(int irq
, void *dev_id
)
549 struct net_device
*dev
= dev_id
;
550 struct nb8800_priv
*priv
= netdev_priv(dev
);
551 irqreturn_t ret
= IRQ_NONE
;
555 val
= nb8800_readl(priv
, NB8800_TXC_SR
);
557 nb8800_writel(priv
, NB8800_TXC_SR
, val
);
560 nb8800_tx_dma_start_irq(dev
);
563 napi_schedule_irqoff(&priv
->napi
);
565 if (unlikely(val
& TSR_DE
))
566 netdev_err(dev
, "TX DMA error\n");
568 /* should never happen with automatic status retrieval */
569 if (unlikely(val
& TSR_TO
))
570 netdev_err(dev
, "TX Status FIFO overflow\n");
576 val
= nb8800_readl(priv
, NB8800_RXC_SR
);
578 nb8800_writel(priv
, NB8800_RXC_SR
, val
);
580 if (likely(val
& (RSR_RI
| RSR_DI
))) {
581 nb8800_writel(priv
, NB8800_RX_ITR
, priv
->rx_itr_poll
);
582 napi_schedule_irqoff(&priv
->napi
);
585 if (unlikely(val
& RSR_DE
))
586 netdev_err(dev
, "RX DMA error\n");
588 /* should never happen with automatic status retrieval */
589 if (unlikely(val
& RSR_RO
))
590 netdev_err(dev
, "RX Status FIFO overflow\n");
598 static void nb8800_mac_config(struct net_device
*dev
)
600 struct nb8800_priv
*priv
= netdev_priv(dev
);
601 bool gigabit
= priv
->speed
== SPEED_1000
;
602 u32 mac_mode_mask
= RGMII_MODE
| HALF_DUPLEX
| GMAC_MODE
;
609 mac_mode
|= HALF_DUPLEX
;
612 if (phy_interface_is_rgmii(dev
->phydev
))
613 mac_mode
|= RGMII_MODE
;
615 mac_mode
|= GMAC_MODE
;
618 /* Should be 512 but register is only 8 bits */
625 ict
= DIV_ROUND_UP(phy_clk
, clk_get_rate(priv
->clk
));
627 nb8800_writeb(priv
, NB8800_IC_THRESHOLD
, ict
);
628 nb8800_writeb(priv
, NB8800_SLOT_TIME
, slot_time
);
629 nb8800_maskb(priv
, NB8800_MAC_MODE
, mac_mode_mask
, mac_mode
);
632 static void nb8800_pause_config(struct net_device
*dev
)
634 struct nb8800_priv
*priv
= netdev_priv(dev
);
635 struct phy_device
*phydev
= dev
->phydev
;
638 if (priv
->pause_aneg
) {
639 if (!phydev
|| !phydev
->link
)
642 priv
->pause_rx
= phydev
->pause
;
643 priv
->pause_tx
= phydev
->pause
^ phydev
->asym_pause
;
646 nb8800_modb(priv
, NB8800_RX_CTL
, RX_PAUSE_EN
, priv
->pause_rx
);
648 rxcr
= nb8800_readl(priv
, NB8800_RXC_CR
);
649 if (!!(rxcr
& RCR_FL
) == priv
->pause_tx
)
652 if (netif_running(dev
)) {
653 napi_disable(&priv
->napi
);
654 netif_tx_lock_bh(dev
);
655 nb8800_dma_stop(dev
);
656 nb8800_modl(priv
, NB8800_RXC_CR
, RCR_FL
, priv
->pause_tx
);
657 nb8800_start_rx(dev
);
658 netif_tx_unlock_bh(dev
);
659 napi_enable(&priv
->napi
);
661 nb8800_modl(priv
, NB8800_RXC_CR
, RCR_FL
, priv
->pause_tx
);
665 static void nb8800_link_reconfigure(struct net_device
*dev
)
667 struct nb8800_priv
*priv
= netdev_priv(dev
);
668 struct phy_device
*phydev
= dev
->phydev
;
672 if (phydev
->speed
!= priv
->speed
) {
673 priv
->speed
= phydev
->speed
;
677 if (phydev
->duplex
!= priv
->duplex
) {
678 priv
->duplex
= phydev
->duplex
;
683 nb8800_mac_config(dev
);
685 nb8800_pause_config(dev
);
688 if (phydev
->link
!= priv
->link
) {
689 priv
->link
= phydev
->link
;
694 phy_print_status(phydev
);
697 static void nb8800_update_mac_addr(struct net_device
*dev
)
699 struct nb8800_priv
*priv
= netdev_priv(dev
);
702 for (i
= 0; i
< ETH_ALEN
; i
++)
703 nb8800_writeb(priv
, NB8800_SRC_ADDR(i
), dev
->dev_addr
[i
]);
705 for (i
= 0; i
< ETH_ALEN
; i
++)
706 nb8800_writeb(priv
, NB8800_UC_ADDR(i
), dev
->dev_addr
[i
]);
709 static int nb8800_set_mac_address(struct net_device
*dev
, void *addr
)
711 struct sockaddr
*sock
= addr
;
713 if (netif_running(dev
))
716 ether_addr_copy(dev
->dev_addr
, sock
->sa_data
);
717 nb8800_update_mac_addr(dev
);
722 static void nb8800_mc_init(struct net_device
*dev
, int val
)
724 struct nb8800_priv
*priv
= netdev_priv(dev
);
726 nb8800_writeb(priv
, NB8800_MC_INIT
, val
);
727 readb_poll_timeout_atomic(priv
->base
+ NB8800_MC_INIT
, val
, !val
,
731 static void nb8800_set_rx_mode(struct net_device
*dev
)
733 struct nb8800_priv
*priv
= netdev_priv(dev
);
734 struct netdev_hw_addr
*ha
;
737 if (dev
->flags
& (IFF_PROMISC
| IFF_ALLMULTI
)) {
738 nb8800_mac_af(dev
, false);
742 nb8800_mac_af(dev
, true);
743 nb8800_mc_init(dev
, 0);
745 netdev_for_each_mc_addr(ha
, dev
) {
746 for (i
= 0; i
< ETH_ALEN
; i
++)
747 nb8800_writeb(priv
, NB8800_MC_ADDR(i
), ha
->addr
[i
]);
749 nb8800_mc_init(dev
, 0xff);
753 #define RX_DESC_SIZE (RX_DESC_COUNT * sizeof(struct nb8800_rx_desc))
754 #define TX_DESC_SIZE (TX_DESC_COUNT * sizeof(struct nb8800_tx_desc))
756 static void nb8800_dma_free(struct net_device
*dev
)
758 struct nb8800_priv
*priv
= netdev_priv(dev
);
762 for (i
= 0; i
< RX_DESC_COUNT
; i
++)
763 if (priv
->rx_bufs
[i
].page
)
764 put_page(priv
->rx_bufs
[i
].page
);
766 kfree(priv
->rx_bufs
);
767 priv
->rx_bufs
= NULL
;
771 for (i
= 0; i
< TX_DESC_COUNT
; i
++)
772 kfree_skb(priv
->tx_bufs
[i
].skb
);
774 kfree(priv
->tx_bufs
);
775 priv
->tx_bufs
= NULL
;
778 if (priv
->rx_descs
) {
779 dma_free_coherent(dev
->dev
.parent
, RX_DESC_SIZE
, priv
->rx_descs
,
781 priv
->rx_descs
= NULL
;
784 if (priv
->tx_descs
) {
785 dma_free_coherent(dev
->dev
.parent
, TX_DESC_SIZE
, priv
->tx_descs
,
787 priv
->tx_descs
= NULL
;
791 static void nb8800_dma_reset(struct net_device
*dev
)
793 struct nb8800_priv
*priv
= netdev_priv(dev
);
794 struct nb8800_rx_desc
*rxd
;
795 struct nb8800_tx_desc
*txd
;
798 for (i
= 0; i
< RX_DESC_COUNT
; i
++) {
799 dma_addr_t rx_dma
= priv
->rx_desc_dma
+ i
* sizeof(*rxd
);
801 rxd
= &priv
->rx_descs
[i
];
802 rxd
->desc
.n_addr
= rx_dma
+ sizeof(*rxd
);
804 rx_dma
+ offsetof(struct nb8800_rx_desc
, report
);
805 rxd
->desc
.config
= priv
->rx_dma_config
;
809 rxd
->desc
.n_addr
= priv
->rx_desc_dma
;
810 rxd
->desc
.config
|= DESC_EOC
;
812 priv
->rx_eoc
= RX_DESC_COUNT
- 1;
814 for (i
= 0; i
< TX_DESC_COUNT
; i
++) {
815 struct nb8800_tx_buf
*txb
= &priv
->tx_bufs
[i
];
816 dma_addr_t r_dma
= txb
->dma_desc
+
817 offsetof(struct nb8800_tx_desc
, report
);
819 txd
= &priv
->tx_descs
[i
];
820 txd
->desc
[0].r_addr
= r_dma
;
821 txd
->desc
[1].r_addr
= r_dma
;
828 atomic_set(&priv
->tx_free
, TX_DESC_COUNT
);
830 nb8800_writel(priv
, NB8800_RX_DESC_ADDR
, priv
->rx_desc_dma
);
832 wmb(); /* ensure all setup is written before starting */
835 static int nb8800_dma_init(struct net_device
*dev
)
837 struct nb8800_priv
*priv
= netdev_priv(dev
);
838 unsigned int n_rx
= RX_DESC_COUNT
;
839 unsigned int n_tx
= TX_DESC_COUNT
;
843 priv
->rx_descs
= dma_alloc_coherent(dev
->dev
.parent
, RX_DESC_SIZE
,
844 &priv
->rx_desc_dma
, GFP_KERNEL
);
848 priv
->rx_bufs
= kcalloc(n_rx
, sizeof(*priv
->rx_bufs
), GFP_KERNEL
);
852 for (i
= 0; i
< n_rx
; i
++) {
853 err
= nb8800_alloc_rx(dev
, i
, false);
858 priv
->tx_descs
= dma_alloc_coherent(dev
->dev
.parent
, TX_DESC_SIZE
,
859 &priv
->tx_desc_dma
, GFP_KERNEL
);
863 priv
->tx_bufs
= kcalloc(n_tx
, sizeof(*priv
->tx_bufs
), GFP_KERNEL
);
867 for (i
= 0; i
< n_tx
; i
++)
868 priv
->tx_bufs
[i
].dma_desc
=
869 priv
->tx_desc_dma
+ i
* sizeof(struct nb8800_tx_desc
);
871 nb8800_dma_reset(dev
);
876 nb8800_dma_free(dev
);
881 static int nb8800_dma_stop(struct net_device
*dev
)
883 struct nb8800_priv
*priv
= netdev_priv(dev
);
884 struct nb8800_tx_buf
*txb
= &priv
->tx_bufs
[0];
885 struct nb8800_tx_desc
*txd
= &priv
->tx_descs
[0];
892 /* wait for tx to finish */
893 err
= readl_poll_timeout_atomic(priv
->base
+ NB8800_TXC_CR
, txcr
,
895 priv
->tx_done
== priv
->tx_next
,
900 /* The rx DMA only stops if it reaches the end of chain.
901 * To make this happen, we set the EOC flag on all rx
902 * descriptors, put the device in loopback mode, and send
903 * a few dummy frames. The interrupt handler will ignore
904 * these since NAPI is disabled and no real frames are in
908 for (i
= 0; i
< RX_DESC_COUNT
; i
++)
909 priv
->rx_descs
[i
].desc
.config
|= DESC_EOC
;
911 txd
->desc
[0].s_addr
=
912 txb
->dma_desc
+ offsetof(struct nb8800_tx_desc
, buf
);
913 txd
->desc
[0].config
= DESC_BTS(2) | DESC_DS
| DESC_EOF
| DESC_EOC
| 8;
914 memset(txd
->buf
, 0, sizeof(txd
->buf
));
916 nb8800_mac_af(dev
, false);
917 nb8800_setb(priv
, NB8800_MAC_MODE
, LOOPBACK_EN
);
920 nb8800_writel(priv
, NB8800_TX_DESC_ADDR
, txb
->dma_desc
);
922 nb8800_writel(priv
, NB8800_TXC_CR
, txcr
| TCR_EN
);
924 err
= readl_poll_timeout_atomic(priv
->base
+ NB8800_RXC_CR
,
925 rxcr
, !(rxcr
& RCR_EN
),
927 } while (err
&& --retry
);
929 nb8800_mac_af(dev
, true);
930 nb8800_clearb(priv
, NB8800_MAC_MODE
, LOOPBACK_EN
);
931 nb8800_dma_reset(dev
);
933 return retry
? 0 : -ETIMEDOUT
;
936 static void nb8800_pause_adv(struct net_device
*dev
)
938 struct nb8800_priv
*priv
= netdev_priv(dev
);
939 struct phy_device
*phydev
= dev
->phydev
;
946 adv
|= ADVERTISED_Pause
| ADVERTISED_Asym_Pause
;
948 adv
^= ADVERTISED_Asym_Pause
;
950 phydev
->supported
|= adv
;
951 phydev
->advertising
|= adv
;
954 static int nb8800_open(struct net_device
*dev
)
956 struct nb8800_priv
*priv
= netdev_priv(dev
);
957 struct phy_device
*phydev
;
960 /* clear any pending interrupts */
961 nb8800_writel(priv
, NB8800_RXC_SR
, 0xf);
962 nb8800_writel(priv
, NB8800_TXC_SR
, 0xf);
964 err
= nb8800_dma_init(dev
);
968 err
= request_irq(dev
->irq
, nb8800_irq
, 0, dev_name(&dev
->dev
), dev
);
972 nb8800_mac_rx(dev
, true);
973 nb8800_mac_tx(dev
, true);
975 phydev
= of_phy_connect(dev
, priv
->phy_node
,
976 nb8800_link_reconfigure
, 0,
983 nb8800_pause_adv(dev
);
985 netdev_reset_queue(dev
);
986 napi_enable(&priv
->napi
);
987 netif_start_queue(dev
);
989 nb8800_start_rx(dev
);
995 free_irq(dev
->irq
, dev
);
997 nb8800_dma_free(dev
);
1002 static int nb8800_stop(struct net_device
*dev
)
1004 struct nb8800_priv
*priv
= netdev_priv(dev
);
1005 struct phy_device
*phydev
= dev
->phydev
;
1009 netif_stop_queue(dev
);
1010 napi_disable(&priv
->napi
);
1012 nb8800_dma_stop(dev
);
1013 nb8800_mac_rx(dev
, false);
1014 nb8800_mac_tx(dev
, false);
1016 phy_disconnect(phydev
);
1018 free_irq(dev
->irq
, dev
);
1020 nb8800_dma_free(dev
);
1025 static int nb8800_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
1027 return phy_mii_ioctl(dev
->phydev
, rq
, cmd
);
1030 static const struct net_device_ops nb8800_netdev_ops
= {
1031 .ndo_open
= nb8800_open
,
1032 .ndo_stop
= nb8800_stop
,
1033 .ndo_start_xmit
= nb8800_xmit
,
1034 .ndo_set_mac_address
= nb8800_set_mac_address
,
1035 .ndo_set_rx_mode
= nb8800_set_rx_mode
,
1036 .ndo_do_ioctl
= nb8800_ioctl
,
1037 .ndo_validate_addr
= eth_validate_addr
,
1040 static void nb8800_get_pauseparam(struct net_device
*dev
,
1041 struct ethtool_pauseparam
*pp
)
1043 struct nb8800_priv
*priv
= netdev_priv(dev
);
1045 pp
->autoneg
= priv
->pause_aneg
;
1046 pp
->rx_pause
= priv
->pause_rx
;
1047 pp
->tx_pause
= priv
->pause_tx
;
1050 static int nb8800_set_pauseparam(struct net_device
*dev
,
1051 struct ethtool_pauseparam
*pp
)
1053 struct nb8800_priv
*priv
= netdev_priv(dev
);
1054 struct phy_device
*phydev
= dev
->phydev
;
1056 priv
->pause_aneg
= pp
->autoneg
;
1057 priv
->pause_rx
= pp
->rx_pause
;
1058 priv
->pause_tx
= pp
->tx_pause
;
1060 nb8800_pause_adv(dev
);
1062 if (!priv
->pause_aneg
)
1063 nb8800_pause_config(dev
);
1065 phy_start_aneg(phydev
);
1070 static const char nb8800_stats_names
[][ETH_GSTRING_LEN
] = {
1073 "rx_undersize_frames",
1074 "rx_fragment_frames",
1075 "rx_64_byte_frames",
1076 "rx_127_byte_frames",
1077 "rx_255_byte_frames",
1078 "rx_511_byte_frames",
1079 "rx_1023_byte_frames",
1080 "rx_max_size_frames",
1081 "rx_oversize_frames",
1082 "rx_bad_fcs_frames",
1083 "rx_broadcast_frames",
1084 "rx_multicast_frames",
1085 "rx_control_frames",
1087 "rx_unsup_control_frames",
1088 "rx_align_error_frames",
1089 "rx_overrun_frames",
1096 "tx_64_byte_frames",
1097 "tx_127_byte_frames",
1098 "tx_255_byte_frames",
1099 "tx_511_byte_frames",
1100 "tx_1023_byte_frames",
1101 "tx_max_size_frames",
1102 "tx_oversize_frames",
1103 "tx_broadcast_frames",
1104 "tx_multicast_frames",
1105 "tx_control_frames",
1107 "tx_underrun_frames",
1108 "tx_single_collision_frames",
1109 "tx_multi_collision_frames",
1110 "tx_deferred_collision_frames",
1111 "tx_late_collision_frames",
1112 "tx_excessive_collision_frames",
1118 #define NB8800_NUM_STATS ARRAY_SIZE(nb8800_stats_names)
1120 static int nb8800_get_sset_count(struct net_device
*dev
, int sset
)
1122 if (sset
== ETH_SS_STATS
)
1123 return NB8800_NUM_STATS
;
1128 static void nb8800_get_strings(struct net_device
*dev
, u32 sset
, u8
*buf
)
1130 if (sset
== ETH_SS_STATS
)
1131 memcpy(buf
, &nb8800_stats_names
, sizeof(nb8800_stats_names
));
1134 static u32
nb8800_read_stat(struct net_device
*dev
, int index
)
1136 struct nb8800_priv
*priv
= netdev_priv(dev
);
1138 nb8800_writeb(priv
, NB8800_STAT_INDEX
, index
);
1140 return nb8800_readl(priv
, NB8800_STAT_DATA
);
1143 static void nb8800_get_ethtool_stats(struct net_device
*dev
,
1144 struct ethtool_stats
*estats
, u64
*st
)
1149 for (i
= 0; i
< NB8800_NUM_STATS
/ 2; i
++) {
1150 rx
= nb8800_read_stat(dev
, i
);
1151 tx
= nb8800_read_stat(dev
, i
| 0x80);
1153 st
[i
+ NB8800_NUM_STATS
/ 2] = tx
;
1157 static const struct ethtool_ops nb8800_ethtool_ops
= {
1158 .nway_reset
= phy_ethtool_nway_reset
,
1159 .get_link
= ethtool_op_get_link
,
1160 .get_pauseparam
= nb8800_get_pauseparam
,
1161 .set_pauseparam
= nb8800_set_pauseparam
,
1162 .get_sset_count
= nb8800_get_sset_count
,
1163 .get_strings
= nb8800_get_strings
,
1164 .get_ethtool_stats
= nb8800_get_ethtool_stats
,
1165 .get_link_ksettings
= phy_ethtool_get_link_ksettings
,
1166 .set_link_ksettings
= phy_ethtool_set_link_ksettings
,
1169 static int nb8800_hw_init(struct net_device
*dev
)
1171 struct nb8800_priv
*priv
= netdev_priv(dev
);
1174 val
= TX_RETRY_EN
| TX_PAD_EN
| TX_APPEND_FCS
;
1175 nb8800_writeb(priv
, NB8800_TX_CTL1
, val
);
1177 /* Collision retry count */
1178 nb8800_writeb(priv
, NB8800_TX_CTL2
, 5);
1180 val
= RX_PAD_STRIP
| RX_AF_EN
;
1181 nb8800_writeb(priv
, NB8800_RX_CTL
, val
);
1183 /* Chosen by fair dice roll */
1184 nb8800_writeb(priv
, NB8800_RANDOM_SEED
, 4);
1186 /* TX cycles per deferral period */
1187 nb8800_writeb(priv
, NB8800_TX_SDP
, 12);
1189 /* The following three threshold values have been
1190 * experimentally determined for good results.
1193 /* RX/TX FIFO threshold for partial empty (64-bit entries) */
1194 nb8800_writeb(priv
, NB8800_PE_THRESHOLD
, 0);
1196 /* RX/TX FIFO threshold for partial full (64-bit entries) */
1197 nb8800_writeb(priv
, NB8800_PF_THRESHOLD
, 255);
1199 /* Buffer size for transmit (64-bit entries) */
1200 nb8800_writeb(priv
, NB8800_TX_BUFSIZE
, 64);
1202 /* Configure tx DMA */
1204 val
= nb8800_readl(priv
, NB8800_TXC_CR
);
1205 val
&= TCR_LE
; /* keep endian setting */
1206 val
|= TCR_DM
; /* DMA descriptor mode */
1207 val
|= TCR_RS
; /* automatically store tx status */
1208 val
|= TCR_DIE
; /* interrupt on DMA chain completion */
1209 val
|= TCR_TFI(7); /* interrupt after 7 frames transmitted */
1210 val
|= TCR_BTS(2); /* 32-byte bus transaction size */
1211 nb8800_writel(priv
, NB8800_TXC_CR
, val
);
1213 /* TX complete interrupt after 10 ms or 7 frames (see above) */
1214 val
= clk_get_rate(priv
->clk
) / 100;
1215 nb8800_writel(priv
, NB8800_TX_ITR
, val
);
1217 /* Configure rx DMA */
1219 val
= nb8800_readl(priv
, NB8800_RXC_CR
);
1220 val
&= RCR_LE
; /* keep endian setting */
1221 val
|= RCR_DM
; /* DMA descriptor mode */
1222 val
|= RCR_RS
; /* automatically store rx status */
1223 val
|= RCR_DIE
; /* interrupt at end of DMA chain */
1224 val
|= RCR_RFI(7); /* interrupt after 7 frames received */
1225 val
|= RCR_BTS(2); /* 32-byte bus transaction size */
1226 nb8800_writel(priv
, NB8800_RXC_CR
, val
);
1228 /* The rx interrupt can fire before the DMA has completed
1229 * unless a small delay is added. 50 us is hopefully enough.
1231 priv
->rx_itr_irq
= clk_get_rate(priv
->clk
) / 20000;
1233 /* In NAPI poll mode we want to disable interrupts, but the
1234 * hardware does not permit this. Delay 10 ms instead.
1236 priv
->rx_itr_poll
= clk_get_rate(priv
->clk
) / 100;
1238 nb8800_writel(priv
, NB8800_RX_ITR
, priv
->rx_itr_irq
);
1240 priv
->rx_dma_config
= RX_BUF_SIZE
| DESC_BTS(2) | DESC_DS
| DESC_EOF
;
1242 /* Flow control settings */
1244 /* Pause time of 0.1 ms */
1246 nb8800_writeb(priv
, NB8800_PQ1
, val
>> 8);
1247 nb8800_writeb(priv
, NB8800_PQ2
, val
& 0xff);
1249 /* Auto-negotiate by default */
1250 priv
->pause_aneg
= true;
1251 priv
->pause_rx
= true;
1252 priv
->pause_tx
= true;
1254 nb8800_mc_init(dev
, 0);
1259 static int nb8800_tangox_init(struct net_device
*dev
)
1261 struct nb8800_priv
*priv
= netdev_priv(dev
);
1262 u32 pad_mode
= PAD_MODE_MII
;
1264 switch (priv
->phy_mode
) {
1265 case PHY_INTERFACE_MODE_MII
:
1266 case PHY_INTERFACE_MODE_GMII
:
1267 pad_mode
= PAD_MODE_MII
;
1270 case PHY_INTERFACE_MODE_RGMII
:
1271 case PHY_INTERFACE_MODE_RGMII_ID
:
1272 case PHY_INTERFACE_MODE_RGMII_RXID
:
1273 case PHY_INTERFACE_MODE_RGMII_TXID
:
1274 pad_mode
= PAD_MODE_RGMII
;
1278 dev_err(dev
->dev
.parent
, "unsupported phy mode %s\n",
1279 phy_modes(priv
->phy_mode
));
1283 nb8800_writeb(priv
, NB8800_TANGOX_PAD_MODE
, pad_mode
);
1288 static int nb8800_tangox_reset(struct net_device
*dev
)
1290 struct nb8800_priv
*priv
= netdev_priv(dev
);
1293 nb8800_writeb(priv
, NB8800_TANGOX_RESET
, 0);
1294 usleep_range(1000, 10000);
1295 nb8800_writeb(priv
, NB8800_TANGOX_RESET
, 1);
1297 wmb(); /* ensure reset is cleared before proceeding */
1299 clk_div
= DIV_ROUND_UP(clk_get_rate(priv
->clk
), 2 * MAX_MDC_CLOCK
);
1300 nb8800_writew(priv
, NB8800_TANGOX_MDIO_CLKDIV
, clk_div
);
1305 static const struct nb8800_ops nb8800_tangox_ops
= {
1306 .init
= nb8800_tangox_init
,
1307 .reset
= nb8800_tangox_reset
,
1310 static int nb8800_tango4_init(struct net_device
*dev
)
1312 struct nb8800_priv
*priv
= netdev_priv(dev
);
1315 err
= nb8800_tangox_init(dev
);
1319 /* On tango4 interrupt on DMA completion per frame works and gives
1320 * better performance despite generating more rx interrupts.
1323 /* Disable unnecessary interrupt on rx completion */
1324 nb8800_clearl(priv
, NB8800_RXC_CR
, RCR_RFI(7));
1326 /* Request interrupt on descriptor DMA completion */
1327 priv
->rx_dma_config
|= DESC_ID
;
1332 static const struct nb8800_ops nb8800_tango4_ops
= {
1333 .init
= nb8800_tango4_init
,
1334 .reset
= nb8800_tangox_reset
,
1337 static const struct of_device_id nb8800_dt_ids
[] = {
1339 .compatible
= "aurora,nb8800",
1342 .compatible
= "sigma,smp8642-ethernet",
1343 .data
= &nb8800_tangox_ops
,
1346 .compatible
= "sigma,smp8734-ethernet",
1347 .data
= &nb8800_tango4_ops
,
1351 MODULE_DEVICE_TABLE(of
, nb8800_dt_ids
);
1353 static int nb8800_probe(struct platform_device
*pdev
)
1355 const struct of_device_id
*match
;
1356 const struct nb8800_ops
*ops
= NULL
;
1357 struct nb8800_priv
*priv
;
1358 struct resource
*res
;
1359 struct net_device
*dev
;
1360 struct mii_bus
*bus
;
1361 const unsigned char *mac
;
1366 match
= of_match_device(nb8800_dt_ids
, &pdev
->dev
);
1370 irq
= platform_get_irq(pdev
, 0);
1372 dev_err(&pdev
->dev
, "No IRQ\n");
1376 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1377 base
= devm_ioremap_resource(&pdev
->dev
, res
);
1379 return PTR_ERR(base
);
1381 dev_dbg(&pdev
->dev
, "AU-NB8800 Ethernet at %pa\n", &res
->start
);
1383 dev
= alloc_etherdev(sizeof(*priv
));
1387 platform_set_drvdata(pdev
, dev
);
1388 SET_NETDEV_DEV(dev
, &pdev
->dev
);
1390 priv
= netdev_priv(dev
);
1393 priv
->phy_mode
= of_get_phy_mode(pdev
->dev
.of_node
);
1394 if (priv
->phy_mode
< 0)
1395 priv
->phy_mode
= PHY_INTERFACE_MODE_RGMII
;
1397 priv
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
1398 if (IS_ERR(priv
->clk
)) {
1399 dev_err(&pdev
->dev
, "failed to get clock\n");
1400 ret
= PTR_ERR(priv
->clk
);
1404 ret
= clk_prepare_enable(priv
->clk
);
1408 spin_lock_init(&priv
->tx_lock
);
1410 if (ops
&& ops
->reset
) {
1411 ret
= ops
->reset(dev
);
1413 goto err_disable_clk
;
1416 bus
= devm_mdiobus_alloc(&pdev
->dev
);
1419 goto err_disable_clk
;
1422 bus
->name
= "nb8800-mii";
1423 bus
->read
= nb8800_mdio_read
;
1424 bus
->write
= nb8800_mdio_write
;
1425 bus
->parent
= &pdev
->dev
;
1426 snprintf(bus
->id
, MII_BUS_ID_SIZE
, "%lx.nb8800-mii",
1427 (unsigned long)res
->start
);
1430 ret
= of_mdiobus_register(bus
, pdev
->dev
.of_node
);
1432 dev_err(&pdev
->dev
, "failed to register MII bus\n");
1433 goto err_disable_clk
;
1436 if (of_phy_is_fixed_link(pdev
->dev
.of_node
)) {
1437 ret
= of_phy_register_fixed_link(pdev
->dev
.of_node
);
1439 dev_err(&pdev
->dev
, "bad fixed-link spec\n");
1442 priv
->phy_node
= of_node_get(pdev
->dev
.of_node
);
1445 if (!priv
->phy_node
)
1446 priv
->phy_node
= of_parse_phandle(pdev
->dev
.of_node
,
1449 if (!priv
->phy_node
) {
1450 dev_err(&pdev
->dev
, "no PHY specified\n");
1455 priv
->mii_bus
= bus
;
1457 ret
= nb8800_hw_init(dev
);
1459 goto err_deregister_fixed_link
;
1461 if (ops
&& ops
->init
) {
1462 ret
= ops
->init(dev
);
1464 goto err_deregister_fixed_link
;
1467 dev
->netdev_ops
= &nb8800_netdev_ops
;
1468 dev
->ethtool_ops
= &nb8800_ethtool_ops
;
1469 dev
->flags
|= IFF_MULTICAST
;
1472 mac
= of_get_mac_address(pdev
->dev
.of_node
);
1474 ether_addr_copy(dev
->dev_addr
, mac
);
1476 if (!is_valid_ether_addr(dev
->dev_addr
))
1477 eth_hw_addr_random(dev
);
1479 nb8800_update_mac_addr(dev
);
1481 netif_carrier_off(dev
);
1483 ret
= register_netdev(dev
);
1485 netdev_err(dev
, "failed to register netdev\n");
1489 netif_napi_add(dev
, &priv
->napi
, nb8800_poll
, NAPI_POLL_WEIGHT
);
1491 netdev_info(dev
, "MAC address %pM\n", dev
->dev_addr
);
1496 nb8800_dma_free(dev
);
1497 err_deregister_fixed_link
:
1498 if (of_phy_is_fixed_link(pdev
->dev
.of_node
))
1499 of_phy_deregister_fixed_link(pdev
->dev
.of_node
);
1501 of_node_put(priv
->phy_node
);
1502 mdiobus_unregister(bus
);
1504 clk_disable_unprepare(priv
->clk
);
1511 static int nb8800_remove(struct platform_device
*pdev
)
1513 struct net_device
*ndev
= platform_get_drvdata(pdev
);
1514 struct nb8800_priv
*priv
= netdev_priv(ndev
);
1516 unregister_netdev(ndev
);
1517 if (of_phy_is_fixed_link(pdev
->dev
.of_node
))
1518 of_phy_deregister_fixed_link(pdev
->dev
.of_node
);
1519 of_node_put(priv
->phy_node
);
1521 mdiobus_unregister(priv
->mii_bus
);
1523 clk_disable_unprepare(priv
->clk
);
1525 nb8800_dma_free(ndev
);
1531 static struct platform_driver nb8800_driver
= {
1534 .of_match_table
= nb8800_dt_ids
,
1536 .probe
= nb8800_probe
,
1537 .remove
= nb8800_remove
,
1540 module_platform_driver(nb8800_driver
);
1542 MODULE_DESCRIPTION("Aurora AU-NB8800 Ethernet driver");
1543 MODULE_AUTHOR("Mans Rullgard <mans@mansr.com>");
1544 MODULE_LICENSE("GPL");