perf tools: Don't clone maps from parent when synthesizing forks
[linux/fpc-iii.git] / drivers / net / ethernet / dnet.c
blob79521e27f0d1738c1d5d55449dfcc55ccff2da7e
1 /*
2 * Dave DNET Ethernet Controller driver
4 * Copyright (C) 2008 Dave S.r.l. <www.dave.eu>
5 * Copyright (C) 2009 Ilya Yanok, Emcraft Systems Ltd, <yanok@emcraft.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/kernel.h>
15 #include <linux/types.h>
16 #include <linux/slab.h>
17 #include <linux/delay.h>
18 #include <linux/interrupt.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/platform_device.h>
23 #include <linux/phy.h>
25 #include "dnet.h"
27 #undef DEBUG
29 /* function for reading internal MAC register */
30 static u16 dnet_readw_mac(struct dnet *bp, u16 reg)
32 u16 data_read;
34 /* issue a read */
35 dnet_writel(bp, reg, MACREG_ADDR);
37 /* since a read/write op to the MAC is very slow,
38 * we must wait before reading the data */
39 ndelay(500);
41 /* read data read from the MAC register */
42 data_read = dnet_readl(bp, MACREG_DATA);
44 /* all done */
45 return data_read;
48 /* function for writing internal MAC register */
49 static void dnet_writew_mac(struct dnet *bp, u16 reg, u16 val)
51 /* load data to write */
52 dnet_writel(bp, val, MACREG_DATA);
54 /* issue a write */
55 dnet_writel(bp, reg | DNET_INTERNAL_WRITE, MACREG_ADDR);
57 /* since a read/write op to the MAC is very slow,
58 * we must wait before exiting */
59 ndelay(500);
62 static void __dnet_set_hwaddr(struct dnet *bp)
64 u16 tmp;
66 tmp = be16_to_cpup((__be16 *)bp->dev->dev_addr);
67 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp);
68 tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 2));
69 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp);
70 tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 4));
71 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp);
74 static void dnet_get_hwaddr(struct dnet *bp)
76 u16 tmp;
77 u8 addr[6];
80 * from MAC docs:
81 * "Note that the MAC address is stored in the registers in Hexadecimal
82 * form. For example, to set the MAC Address to: AC-DE-48-00-00-80
83 * would require writing 0xAC (octet 0) to address 0x0B (high byte of
84 * Mac_addr[15:0]), 0xDE (octet 1) to address 0x0A (Low byte of
85 * Mac_addr[15:0]), 0x48 (octet 2) to address 0x0D (high byte of
86 * Mac_addr[15:0]), 0x00 (octet 3) to address 0x0C (Low byte of
87 * Mac_addr[15:0]), 0x00 (octet 4) to address 0x0F (high byte of
88 * Mac_addr[15:0]), and 0x80 (octet 5) to address * 0x0E (Low byte of
89 * Mac_addr[15:0]).
91 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG);
92 *((__be16 *)addr) = cpu_to_be16(tmp);
93 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG);
94 *((__be16 *)(addr + 2)) = cpu_to_be16(tmp);
95 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG);
96 *((__be16 *)(addr + 4)) = cpu_to_be16(tmp);
98 if (is_valid_ether_addr(addr))
99 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
102 static int dnet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
104 struct dnet *bp = bus->priv;
105 u16 value;
107 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
108 & DNET_INTERNAL_GMII_MNG_CMD_FIN))
109 cpu_relax();
111 /* only 5 bits allowed for phy-addr and reg_offset */
112 mii_id &= 0x1f;
113 regnum &= 0x1f;
115 /* prepare reg_value for a read */
116 value = (mii_id << 8);
117 value |= regnum;
119 /* write control word */
120 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, value);
122 /* wait for end of transfer */
123 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
124 & DNET_INTERNAL_GMII_MNG_CMD_FIN))
125 cpu_relax();
127 value = dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG);
129 pr_debug("mdio_read %02x:%02x <- %04x\n", mii_id, regnum, value);
131 return value;
134 static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
135 u16 value)
137 struct dnet *bp = bus->priv;
138 u16 tmp;
140 pr_debug("mdio_write %02x:%02x <- %04x\n", mii_id, regnum, value);
142 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
143 & DNET_INTERNAL_GMII_MNG_CMD_FIN))
144 cpu_relax();
146 /* prepare for a write operation */
147 tmp = (1 << 13);
149 /* only 5 bits allowed for phy-addr and reg_offset */
150 mii_id &= 0x1f;
151 regnum &= 0x1f;
153 /* only 16 bits on data */
154 value &= 0xffff;
156 /* prepare reg_value for a write */
157 tmp |= (mii_id << 8);
158 tmp |= regnum;
160 /* write data to write first */
161 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG, value);
163 /* write control word */
164 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, tmp);
166 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
167 & DNET_INTERNAL_GMII_MNG_CMD_FIN))
168 cpu_relax();
170 return 0;
173 static void dnet_handle_link_change(struct net_device *dev)
175 struct dnet *bp = netdev_priv(dev);
176 struct phy_device *phydev = dev->phydev;
177 unsigned long flags;
178 u32 mode_reg, ctl_reg;
180 int status_change = 0;
182 spin_lock_irqsave(&bp->lock, flags);
184 mode_reg = dnet_readw_mac(bp, DNET_INTERNAL_MODE_REG);
185 ctl_reg = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG);
187 if (phydev->link) {
188 if (bp->duplex != phydev->duplex) {
189 if (phydev->duplex)
190 ctl_reg &=
191 ~(DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP);
192 else
193 ctl_reg |=
194 DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP;
196 bp->duplex = phydev->duplex;
197 status_change = 1;
200 if (bp->speed != phydev->speed) {
201 status_change = 1;
202 switch (phydev->speed) {
203 case 1000:
204 mode_reg |= DNET_INTERNAL_MODE_GBITEN;
205 break;
206 case 100:
207 case 10:
208 mode_reg &= ~DNET_INTERNAL_MODE_GBITEN;
209 break;
210 default:
211 printk(KERN_WARNING
212 "%s: Ack! Speed (%d) is not "
213 "10/100/1000!\n", dev->name,
214 phydev->speed);
215 break;
217 bp->speed = phydev->speed;
221 if (phydev->link != bp->link) {
222 if (phydev->link) {
223 mode_reg |=
224 (DNET_INTERNAL_MODE_RXEN | DNET_INTERNAL_MODE_TXEN);
225 } else {
226 mode_reg &=
227 ~(DNET_INTERNAL_MODE_RXEN |
228 DNET_INTERNAL_MODE_TXEN);
229 bp->speed = 0;
230 bp->duplex = -1;
232 bp->link = phydev->link;
234 status_change = 1;
237 if (status_change) {
238 dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, ctl_reg);
239 dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, mode_reg);
242 spin_unlock_irqrestore(&bp->lock, flags);
244 if (status_change) {
245 if (phydev->link)
246 printk(KERN_INFO "%s: link up (%d/%s)\n",
247 dev->name, phydev->speed,
248 DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
249 else
250 printk(KERN_INFO "%s: link down\n", dev->name);
254 static int dnet_mii_probe(struct net_device *dev)
256 struct dnet *bp = netdev_priv(dev);
257 struct phy_device *phydev = NULL;
259 /* find the first phy */
260 phydev = phy_find_first(bp->mii_bus);
262 if (!phydev) {
263 printk(KERN_ERR "%s: no PHY found\n", dev->name);
264 return -ENODEV;
267 /* TODO : add pin_irq */
269 /* attach the mac to the phy */
270 if (bp->capabilities & DNET_HAS_RMII) {
271 phydev = phy_connect(dev, phydev_name(phydev),
272 &dnet_handle_link_change,
273 PHY_INTERFACE_MODE_RMII);
274 } else {
275 phydev = phy_connect(dev, phydev_name(phydev),
276 &dnet_handle_link_change,
277 PHY_INTERFACE_MODE_MII);
280 if (IS_ERR(phydev)) {
281 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
282 return PTR_ERR(phydev);
285 /* mask with MAC supported features */
286 if (bp->capabilities & DNET_HAS_GIGABIT)
287 phy_set_max_speed(phydev, SPEED_1000);
288 else
289 phy_set_max_speed(phydev, SPEED_100);
291 phy_support_asym_pause(phydev);
293 bp->link = 0;
294 bp->speed = 0;
295 bp->duplex = -1;
297 return 0;
300 static int dnet_mii_init(struct dnet *bp)
302 int err;
304 bp->mii_bus = mdiobus_alloc();
305 if (bp->mii_bus == NULL)
306 return -ENOMEM;
308 bp->mii_bus->name = "dnet_mii_bus";
309 bp->mii_bus->read = &dnet_mdio_read;
310 bp->mii_bus->write = &dnet_mdio_write;
312 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
313 bp->pdev->name, bp->pdev->id);
315 bp->mii_bus->priv = bp;
317 if (mdiobus_register(bp->mii_bus)) {
318 err = -ENXIO;
319 goto err_out;
322 if (dnet_mii_probe(bp->dev) != 0) {
323 err = -ENXIO;
324 goto err_out_unregister_bus;
327 return 0;
329 err_out_unregister_bus:
330 mdiobus_unregister(bp->mii_bus);
331 err_out:
332 mdiobus_free(bp->mii_bus);
333 return err;
336 /* For Neptune board: LINK1000 as Link LED and TX as activity LED */
337 static int dnet_phy_marvell_fixup(struct phy_device *phydev)
339 return phy_write(phydev, 0x18, 0x4148);
342 static void dnet_update_stats(struct dnet *bp)
344 u32 __iomem *reg = bp->regs + DNET_RX_PKT_IGNR_CNT;
345 u32 *p = &bp->hw_stats.rx_pkt_ignr;
346 u32 *end = &bp->hw_stats.rx_byte + 1;
348 WARN_ON((unsigned long)(end - p - 1) !=
349 (DNET_RX_BYTE_CNT - DNET_RX_PKT_IGNR_CNT) / 4);
351 for (; p < end; p++, reg++)
352 *p += readl(reg);
354 reg = bp->regs + DNET_TX_UNICAST_CNT;
355 p = &bp->hw_stats.tx_unicast;
356 end = &bp->hw_stats.tx_byte + 1;
358 WARN_ON((unsigned long)(end - p - 1) !=
359 (DNET_TX_BYTE_CNT - DNET_TX_UNICAST_CNT) / 4);
361 for (; p < end; p++, reg++)
362 *p += readl(reg);
365 static int dnet_poll(struct napi_struct *napi, int budget)
367 struct dnet *bp = container_of(napi, struct dnet, napi);
368 struct net_device *dev = bp->dev;
369 int npackets = 0;
370 unsigned int pkt_len;
371 struct sk_buff *skb;
372 unsigned int *data_ptr;
373 u32 int_enable;
374 u32 cmd_word;
375 int i;
377 while (npackets < budget) {
379 * break out of while loop if there are no more
380 * packets waiting
382 if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16))
383 break;
385 cmd_word = dnet_readl(bp, RX_LEN_FIFO);
386 pkt_len = cmd_word & 0xFFFF;
388 if (cmd_word & 0xDF180000)
389 printk(KERN_ERR "%s packet receive error %x\n",
390 __func__, cmd_word);
392 skb = netdev_alloc_skb(dev, pkt_len + 5);
393 if (skb != NULL) {
394 /* Align IP on 16 byte boundaries */
395 skb_reserve(skb, 2);
397 * 'skb_put()' points to the start of sk_buff
398 * data area.
400 data_ptr = skb_put(skb, pkt_len);
401 for (i = 0; i < (pkt_len + 3) >> 2; i++)
402 *data_ptr++ = dnet_readl(bp, RX_DATA_FIFO);
403 skb->protocol = eth_type_trans(skb, dev);
404 netif_receive_skb(skb);
405 npackets++;
406 } else
407 printk(KERN_NOTICE
408 "%s: No memory to allocate a sk_buff of "
409 "size %u.\n", dev->name, pkt_len);
412 if (npackets < budget) {
413 /* We processed all packets available. Tell NAPI it can
414 * stop polling then re-enable rx interrupts.
416 napi_complete_done(napi, npackets);
417 int_enable = dnet_readl(bp, INTR_ENB);
418 int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
419 dnet_writel(bp, int_enable, INTR_ENB);
422 return npackets;
425 static irqreturn_t dnet_interrupt(int irq, void *dev_id)
427 struct net_device *dev = dev_id;
428 struct dnet *bp = netdev_priv(dev);
429 u32 int_src, int_enable, int_current;
430 unsigned long flags;
431 unsigned int handled = 0;
433 spin_lock_irqsave(&bp->lock, flags);
435 /* read and clear the DNET irq (clear on read) */
436 int_src = dnet_readl(bp, INTR_SRC);
437 int_enable = dnet_readl(bp, INTR_ENB);
438 int_current = int_src & int_enable;
440 /* restart the queue if we had stopped it for TX fifo almost full */
441 if (int_current & DNET_INTR_SRC_TX_FIFOAE) {
442 int_enable = dnet_readl(bp, INTR_ENB);
443 int_enable &= ~DNET_INTR_ENB_TX_FIFOAE;
444 dnet_writel(bp, int_enable, INTR_ENB);
445 netif_wake_queue(dev);
446 handled = 1;
449 /* RX FIFO error checking */
450 if (int_current &
451 (DNET_INTR_SRC_RX_CMDFIFOFF | DNET_INTR_SRC_RX_DATAFIFOFF)) {
452 printk(KERN_ERR "%s: RX fifo error %x, irq %x\n", __func__,
453 dnet_readl(bp, RX_STATUS), int_current);
454 /* we can only flush the RX FIFOs */
455 dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH, SYS_CTL);
456 ndelay(500);
457 dnet_writel(bp, 0, SYS_CTL);
458 handled = 1;
461 /* TX FIFO error checking */
462 if (int_current &
463 (DNET_INTR_SRC_TX_FIFOFULL | DNET_INTR_SRC_TX_DISCFRM)) {
464 printk(KERN_ERR "%s: TX fifo error %x, irq %x\n", __func__,
465 dnet_readl(bp, TX_STATUS), int_current);
466 /* we can only flush the TX FIFOs */
467 dnet_writel(bp, DNET_SYS_CTL_TXFIFOFLUSH, SYS_CTL);
468 ndelay(500);
469 dnet_writel(bp, 0, SYS_CTL);
470 handled = 1;
473 if (int_current & DNET_INTR_SRC_RX_CMDFIFOAF) {
474 if (napi_schedule_prep(&bp->napi)) {
476 * There's no point taking any more interrupts
477 * until we have processed the buffers
479 /* Disable Rx interrupts and schedule NAPI poll */
480 int_enable = dnet_readl(bp, INTR_ENB);
481 int_enable &= ~DNET_INTR_SRC_RX_CMDFIFOAF;
482 dnet_writel(bp, int_enable, INTR_ENB);
483 __napi_schedule(&bp->napi);
485 handled = 1;
488 if (!handled)
489 pr_debug("%s: irq %x remains\n", __func__, int_current);
491 spin_unlock_irqrestore(&bp->lock, flags);
493 return IRQ_RETVAL(handled);
496 #ifdef DEBUG
497 static inline void dnet_print_skb(struct sk_buff *skb)
499 int k;
500 printk(KERN_DEBUG PFX "data:");
501 for (k = 0; k < skb->len; k++)
502 printk(" %02x", (unsigned int)skb->data[k]);
503 printk("\n");
505 #else
506 #define dnet_print_skb(skb) do {} while (0)
507 #endif
509 static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
512 struct dnet *bp = netdev_priv(dev);
513 u32 tx_status, irq_enable;
514 unsigned int len, i, tx_cmd, wrsz;
515 unsigned long flags;
516 unsigned int *bufp;
518 tx_status = dnet_readl(bp, TX_STATUS);
520 pr_debug("start_xmit: len %u head %p data %p\n",
521 skb->len, skb->head, skb->data);
522 dnet_print_skb(skb);
524 /* frame size (words) */
525 len = (skb->len + 3) >> 2;
527 spin_lock_irqsave(&bp->lock, flags);
529 tx_status = dnet_readl(bp, TX_STATUS);
531 bufp = (unsigned int *)(((unsigned long) skb->data) & ~0x3UL);
532 wrsz = (u32) skb->len + 3;
533 wrsz += ((unsigned long) skb->data) & 0x3;
534 wrsz >>= 2;
535 tx_cmd = ((((unsigned long)(skb->data)) & 0x03) << 16) | (u32) skb->len;
537 /* check if there is enough room for the current frame */
538 if (wrsz < (DNET_FIFO_SIZE - dnet_readl(bp, TX_FIFO_WCNT))) {
539 for (i = 0; i < wrsz; i++)
540 dnet_writel(bp, *bufp++, TX_DATA_FIFO);
543 * inform MAC that a packet's written and ready to be
544 * shipped out
546 dnet_writel(bp, tx_cmd, TX_LEN_FIFO);
549 if (dnet_readl(bp, TX_FIFO_WCNT) > DNET_FIFO_TX_DATA_AF_TH) {
550 netif_stop_queue(dev);
551 tx_status = dnet_readl(bp, INTR_SRC);
552 irq_enable = dnet_readl(bp, INTR_ENB);
553 irq_enable |= DNET_INTR_ENB_TX_FIFOAE;
554 dnet_writel(bp, irq_enable, INTR_ENB);
557 skb_tx_timestamp(skb);
559 /* free the buffer */
560 dev_kfree_skb(skb);
562 spin_unlock_irqrestore(&bp->lock, flags);
564 return NETDEV_TX_OK;
567 static void dnet_reset_hw(struct dnet *bp)
569 /* put ts_mac in IDLE state i.e. disable rx/tx */
570 dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, DNET_INTERNAL_MODE_FCEN);
573 * RX FIFO almost full threshold: only cmd FIFO almost full is
574 * implemented for RX side
576 dnet_writel(bp, DNET_FIFO_RX_CMD_AF_TH, RX_FIFO_TH);
578 * TX FIFO almost empty threshold: only data FIFO almost empty
579 * is implemented for TX side
581 dnet_writel(bp, DNET_FIFO_TX_DATA_AE_TH, TX_FIFO_TH);
583 /* flush rx/tx fifos */
584 dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH | DNET_SYS_CTL_TXFIFOFLUSH,
585 SYS_CTL);
586 msleep(1);
587 dnet_writel(bp, 0, SYS_CTL);
590 static void dnet_init_hw(struct dnet *bp)
592 u32 config;
594 dnet_reset_hw(bp);
595 __dnet_set_hwaddr(bp);
597 config = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG);
599 if (bp->dev->flags & IFF_PROMISC)
600 /* Copy All Frames */
601 config |= DNET_INTERNAL_RXTX_CONTROL_ENPROMISC;
602 if (!(bp->dev->flags & IFF_BROADCAST))
603 /* No BroadCast */
604 config |= DNET_INTERNAL_RXTX_CONTROL_RXMULTICAST;
606 config |= DNET_INTERNAL_RXTX_CONTROL_RXPAUSE |
607 DNET_INTERNAL_RXTX_CONTROL_RXBROADCAST |
608 DNET_INTERNAL_RXTX_CONTROL_DROPCONTROL |
609 DNET_INTERNAL_RXTX_CONTROL_DISCFXFCS;
611 dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, config);
613 /* clear irq before enabling them */
614 config = dnet_readl(bp, INTR_SRC);
616 /* enable RX/TX interrupt, recv packet ready interrupt */
617 dnet_writel(bp, DNET_INTR_ENB_GLOBAL_ENABLE | DNET_INTR_ENB_RX_SUMMARY |
618 DNET_INTR_ENB_TX_SUMMARY | DNET_INTR_ENB_RX_FIFOERR |
619 DNET_INTR_ENB_RX_ERROR | DNET_INTR_ENB_RX_FIFOFULL |
620 DNET_INTR_ENB_TX_FIFOFULL | DNET_INTR_ENB_TX_DISCFRM |
621 DNET_INTR_ENB_RX_PKTRDY, INTR_ENB);
624 static int dnet_open(struct net_device *dev)
626 struct dnet *bp = netdev_priv(dev);
628 /* if the phy is not yet register, retry later */
629 if (!dev->phydev)
630 return -EAGAIN;
632 napi_enable(&bp->napi);
633 dnet_init_hw(bp);
635 phy_start_aneg(dev->phydev);
637 /* schedule a link state check */
638 phy_start(dev->phydev);
640 netif_start_queue(dev);
642 return 0;
645 static int dnet_close(struct net_device *dev)
647 struct dnet *bp = netdev_priv(dev);
649 netif_stop_queue(dev);
650 napi_disable(&bp->napi);
652 if (dev->phydev)
653 phy_stop(dev->phydev);
655 dnet_reset_hw(bp);
656 netif_carrier_off(dev);
658 return 0;
661 static inline void dnet_print_pretty_hwstats(struct dnet_stats *hwstat)
663 pr_debug("%s\n", __func__);
664 pr_debug("----------------------------- RX statistics "
665 "-------------------------------\n");
666 pr_debug("RX_PKT_IGNR_CNT %-8x\n", hwstat->rx_pkt_ignr);
667 pr_debug("RX_LEN_CHK_ERR_CNT %-8x\n", hwstat->rx_len_chk_err);
668 pr_debug("RX_LNG_FRM_CNT %-8x\n", hwstat->rx_lng_frm);
669 pr_debug("RX_SHRT_FRM_CNT %-8x\n", hwstat->rx_shrt_frm);
670 pr_debug("RX_IPG_VIOL_CNT %-8x\n", hwstat->rx_ipg_viol);
671 pr_debug("RX_CRC_ERR_CNT %-8x\n", hwstat->rx_crc_err);
672 pr_debug("RX_OK_PKT_CNT %-8x\n", hwstat->rx_ok_pkt);
673 pr_debug("RX_CTL_FRM_CNT %-8x\n", hwstat->rx_ctl_frm);
674 pr_debug("RX_PAUSE_FRM_CNT %-8x\n", hwstat->rx_pause_frm);
675 pr_debug("RX_MULTICAST_CNT %-8x\n", hwstat->rx_multicast);
676 pr_debug("RX_BROADCAST_CNT %-8x\n", hwstat->rx_broadcast);
677 pr_debug("RX_VLAN_TAG_CNT %-8x\n", hwstat->rx_vlan_tag);
678 pr_debug("RX_PRE_SHRINK_CNT %-8x\n", hwstat->rx_pre_shrink);
679 pr_debug("RX_DRIB_NIB_CNT %-8x\n", hwstat->rx_drib_nib);
680 pr_debug("RX_UNSUP_OPCD_CNT %-8x\n", hwstat->rx_unsup_opcd);
681 pr_debug("RX_BYTE_CNT %-8x\n", hwstat->rx_byte);
682 pr_debug("----------------------------- TX statistics "
683 "-------------------------------\n");
684 pr_debug("TX_UNICAST_CNT %-8x\n", hwstat->tx_unicast);
685 pr_debug("TX_PAUSE_FRM_CNT %-8x\n", hwstat->tx_pause_frm);
686 pr_debug("TX_MULTICAST_CNT %-8x\n", hwstat->tx_multicast);
687 pr_debug("TX_BRDCAST_CNT %-8x\n", hwstat->tx_brdcast);
688 pr_debug("TX_VLAN_TAG_CNT %-8x\n", hwstat->tx_vlan_tag);
689 pr_debug("TX_BAD_FCS_CNT %-8x\n", hwstat->tx_bad_fcs);
690 pr_debug("TX_JUMBO_CNT %-8x\n", hwstat->tx_jumbo);
691 pr_debug("TX_BYTE_CNT %-8x\n", hwstat->tx_byte);
694 static struct net_device_stats *dnet_get_stats(struct net_device *dev)
697 struct dnet *bp = netdev_priv(dev);
698 struct net_device_stats *nstat = &dev->stats;
699 struct dnet_stats *hwstat = &bp->hw_stats;
701 /* read stats from hardware */
702 dnet_update_stats(bp);
704 /* Convert HW stats into netdevice stats */
705 nstat->rx_errors = (hwstat->rx_len_chk_err +
706 hwstat->rx_lng_frm + hwstat->rx_shrt_frm +
707 /* ignore IGP violation error
708 hwstat->rx_ipg_viol + */
709 hwstat->rx_crc_err +
710 hwstat->rx_pre_shrink +
711 hwstat->rx_drib_nib + hwstat->rx_unsup_opcd);
712 nstat->tx_errors = hwstat->tx_bad_fcs;
713 nstat->rx_length_errors = (hwstat->rx_len_chk_err +
714 hwstat->rx_lng_frm +
715 hwstat->rx_shrt_frm + hwstat->rx_pre_shrink);
716 nstat->rx_crc_errors = hwstat->rx_crc_err;
717 nstat->rx_frame_errors = hwstat->rx_pre_shrink + hwstat->rx_drib_nib;
718 nstat->rx_packets = hwstat->rx_ok_pkt;
719 nstat->tx_packets = (hwstat->tx_unicast +
720 hwstat->tx_multicast + hwstat->tx_brdcast);
721 nstat->rx_bytes = hwstat->rx_byte;
722 nstat->tx_bytes = hwstat->tx_byte;
723 nstat->multicast = hwstat->rx_multicast;
724 nstat->rx_missed_errors = hwstat->rx_pkt_ignr;
726 dnet_print_pretty_hwstats(hwstat);
728 return nstat;
731 static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
733 struct phy_device *phydev = dev->phydev;
735 if (!netif_running(dev))
736 return -EINVAL;
738 if (!phydev)
739 return -ENODEV;
741 return phy_mii_ioctl(phydev, rq, cmd);
744 static void dnet_get_drvinfo(struct net_device *dev,
745 struct ethtool_drvinfo *info)
747 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
748 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
749 strlcpy(info->bus_info, "0", sizeof(info->bus_info));
752 static const struct ethtool_ops dnet_ethtool_ops = {
753 .get_drvinfo = dnet_get_drvinfo,
754 .get_link = ethtool_op_get_link,
755 .get_ts_info = ethtool_op_get_ts_info,
756 .get_link_ksettings = phy_ethtool_get_link_ksettings,
757 .set_link_ksettings = phy_ethtool_set_link_ksettings,
760 static const struct net_device_ops dnet_netdev_ops = {
761 .ndo_open = dnet_open,
762 .ndo_stop = dnet_close,
763 .ndo_get_stats = dnet_get_stats,
764 .ndo_start_xmit = dnet_start_xmit,
765 .ndo_do_ioctl = dnet_ioctl,
766 .ndo_set_mac_address = eth_mac_addr,
767 .ndo_validate_addr = eth_validate_addr,
770 static int dnet_probe(struct platform_device *pdev)
772 struct resource *res;
773 struct net_device *dev;
774 struct dnet *bp;
775 struct phy_device *phydev;
776 int err;
777 unsigned int irq;
779 irq = platform_get_irq(pdev, 0);
781 dev = alloc_etherdev(sizeof(*bp));
782 if (!dev)
783 return -ENOMEM;
785 /* TODO: Actually, we have some interesting features... */
786 dev->features |= 0;
788 bp = netdev_priv(dev);
789 bp->dev = dev;
791 platform_set_drvdata(pdev, dev);
792 SET_NETDEV_DEV(dev, &pdev->dev);
794 spin_lock_init(&bp->lock);
796 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
797 bp->regs = devm_ioremap_resource(&pdev->dev, res);
798 if (IS_ERR(bp->regs)) {
799 err = PTR_ERR(bp->regs);
800 goto err_out_free_dev;
803 dev->irq = irq;
804 err = request_irq(dev->irq, dnet_interrupt, 0, DRV_NAME, dev);
805 if (err) {
806 dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
807 irq, err);
808 goto err_out_free_dev;
811 dev->netdev_ops = &dnet_netdev_ops;
812 netif_napi_add(dev, &bp->napi, dnet_poll, 64);
813 dev->ethtool_ops = &dnet_ethtool_ops;
815 dev->base_addr = (unsigned long)bp->regs;
817 bp->capabilities = dnet_readl(bp, VERCAPS) & DNET_CAPS_MASK;
819 dnet_get_hwaddr(bp);
821 if (!is_valid_ether_addr(dev->dev_addr)) {
822 /* choose a random ethernet address */
823 eth_hw_addr_random(dev);
824 __dnet_set_hwaddr(bp);
827 err = register_netdev(dev);
828 if (err) {
829 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
830 goto err_out_free_irq;
833 /* register the PHY board fixup (for Marvell 88E1111) */
834 err = phy_register_fixup_for_uid(0x01410cc0, 0xfffffff0,
835 dnet_phy_marvell_fixup);
836 /* we can live without it, so just issue a warning */
837 if (err)
838 dev_warn(&pdev->dev, "Cannot register PHY board fixup.\n");
840 err = dnet_mii_init(bp);
841 if (err)
842 goto err_out_unregister_netdev;
844 dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n",
845 bp->regs, (unsigned int)res->start, dev->irq, dev->dev_addr);
846 dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma\n",
847 (bp->capabilities & DNET_HAS_MDIO) ? "" : "no ",
848 (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ",
849 (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ",
850 (bp->capabilities & DNET_HAS_DMA) ? "" : "no ");
851 phydev = dev->phydev;
852 phy_attached_info(phydev);
854 return 0;
856 err_out_unregister_netdev:
857 unregister_netdev(dev);
858 err_out_free_irq:
859 free_irq(dev->irq, dev);
860 err_out_free_dev:
861 free_netdev(dev);
862 return err;
865 static int dnet_remove(struct platform_device *pdev)
868 struct net_device *dev;
869 struct dnet *bp;
871 dev = platform_get_drvdata(pdev);
873 if (dev) {
874 bp = netdev_priv(dev);
875 if (dev->phydev)
876 phy_disconnect(dev->phydev);
877 mdiobus_unregister(bp->mii_bus);
878 mdiobus_free(bp->mii_bus);
879 unregister_netdev(dev);
880 free_irq(dev->irq, dev);
881 free_netdev(dev);
884 return 0;
887 static struct platform_driver dnet_driver = {
888 .probe = dnet_probe,
889 .remove = dnet_remove,
890 .driver = {
891 .name = "dnet",
895 module_platform_driver(dnet_driver);
897 MODULE_LICENSE("GPL");
898 MODULE_DESCRIPTION("Dave DNET Ethernet driver");
899 MODULE_AUTHOR("Ilya Yanok <yanok@emcraft.com>, "
900 "Matteo Vit <matteo.vit@dave.eu>");