WIP FPC-III support
[linux/fpc-iii.git] / drivers / net / ethernet / apple / bmac.c
blob1e4e402f07d76368c23da8838d7a06dd5e73edc2
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Network device driver for the BMAC ethernet controller on
4 * Apple Powermacs. Assumes it's under a DBDMA controller.
6 * Copyright (C) 1998 Randy Gobbel.
8 * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to
9 * dynamic procfs inode.
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/delay.h>
17 #include <linux/string.h>
18 #include <linux/timer.h>
19 #include <linux/proc_fs.h>
20 #include <linux/init.h>
21 #include <linux/spinlock.h>
22 #include <linux/crc32.h>
23 #include <linux/crc32poly.h>
24 #include <linux/bitrev.h>
25 #include <linux/ethtool.h>
26 #include <linux/slab.h>
27 #include <linux/pgtable.h>
28 #include <asm/prom.h>
29 #include <asm/dbdma.h>
30 #include <asm/io.h>
31 #include <asm/page.h>
32 #include <asm/machdep.h>
33 #include <asm/pmac_feature.h>
34 #include <asm/macio.h>
35 #include <asm/irq.h>
37 #include "bmac.h"
39 #define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
40 #define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
42 /* switch to use multicast code lifted from sunhme driver */
43 #define SUNHME_MULTICAST
45 #define N_RX_RING 64
46 #define N_TX_RING 32
47 #define MAX_TX_ACTIVE 1
48 #define ETHERCRC 4
49 #define ETHERMINPACKET 64
50 #define ETHERMTU 1500
51 #define RX_BUFLEN (ETHERMTU + 14 + ETHERCRC + 2)
52 #define TX_TIMEOUT HZ /* 1 second */
54 /* Bits in transmit DMA status */
55 #define TX_DMA_ERR 0x80
57 #define XXDEBUG(args)
59 struct bmac_data {
60 /* volatile struct bmac *bmac; */
61 struct sk_buff_head *queue;
62 volatile struct dbdma_regs __iomem *tx_dma;
63 int tx_dma_intr;
64 volatile struct dbdma_regs __iomem *rx_dma;
65 int rx_dma_intr;
66 volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */
67 volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */
68 struct macio_dev *mdev;
69 int is_bmac_plus;
70 struct sk_buff *rx_bufs[N_RX_RING];
71 int rx_fill;
72 int rx_empty;
73 struct sk_buff *tx_bufs[N_TX_RING];
74 int tx_fill;
75 int tx_empty;
76 unsigned char tx_fullup;
77 struct timer_list tx_timeout;
78 int timeout_active;
79 int sleeping;
80 int opened;
81 unsigned short hash_use_count[64];
82 unsigned short hash_table_mask[4];
83 spinlock_t lock;
86 #if 0 /* Move that to ethtool */
88 typedef struct bmac_reg_entry {
89 char *name;
90 unsigned short reg_offset;
91 } bmac_reg_entry_t;
93 #define N_REG_ENTRIES 31
95 static bmac_reg_entry_t reg_entries[N_REG_ENTRIES] = {
96 {"MEMADD", MEMADD},
97 {"MEMDATAHI", MEMDATAHI},
98 {"MEMDATALO", MEMDATALO},
99 {"TXPNTR", TXPNTR},
100 {"RXPNTR", RXPNTR},
101 {"IPG1", IPG1},
102 {"IPG2", IPG2},
103 {"ALIMIT", ALIMIT},
104 {"SLOT", SLOT},
105 {"PALEN", PALEN},
106 {"PAPAT", PAPAT},
107 {"TXSFD", TXSFD},
108 {"JAM", JAM},
109 {"TXCFG", TXCFG},
110 {"TXMAX", TXMAX},
111 {"TXMIN", TXMIN},
112 {"PAREG", PAREG},
113 {"DCNT", DCNT},
114 {"NCCNT", NCCNT},
115 {"NTCNT", NTCNT},
116 {"EXCNT", EXCNT},
117 {"LTCNT", LTCNT},
118 {"TXSM", TXSM},
119 {"RXCFG", RXCFG},
120 {"RXMAX", RXMAX},
121 {"RXMIN", RXMIN},
122 {"FRCNT", FRCNT},
123 {"AECNT", AECNT},
124 {"FECNT", FECNT},
125 {"RXSM", RXSM},
126 {"RXCV", RXCV}
129 #endif
131 static unsigned char *bmac_emergency_rxbuf;
134 * Number of bytes of private data per BMAC: allow enough for
135 * the rx and tx dma commands plus a branch dma command each,
136 * and another 16 bytes to allow us to align the dma command
137 * buffers on a 16 byte boundary.
139 #define PRIV_BYTES (sizeof(struct bmac_data) \
140 + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \
141 + sizeof(struct sk_buff_head))
143 static int bmac_open(struct net_device *dev);
144 static int bmac_close(struct net_device *dev);
145 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
146 static void bmac_set_multicast(struct net_device *dev);
147 static void bmac_reset_and_enable(struct net_device *dev);
148 static void bmac_start_chip(struct net_device *dev);
149 static void bmac_init_chip(struct net_device *dev);
150 static void bmac_init_registers(struct net_device *dev);
151 static void bmac_enable_and_reset_chip(struct net_device *dev);
152 static int bmac_set_address(struct net_device *dev, void *addr);
153 static irqreturn_t bmac_misc_intr(int irq, void *dev_id);
154 static irqreturn_t bmac_txdma_intr(int irq, void *dev_id);
155 static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id);
156 static void bmac_set_timeout(struct net_device *dev);
157 static void bmac_tx_timeout(struct timer_list *t);
158 static netdev_tx_t bmac_output(struct sk_buff *skb, struct net_device *dev);
159 static void bmac_start(struct net_device *dev);
161 #define DBDMA_SET(x) ( ((x) | (x) << 16) )
162 #define DBDMA_CLEAR(x) ( (x) << 16)
164 static inline void
165 dbdma_st32(volatile __u32 __iomem *a, unsigned long x)
167 __asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory");
170 static inline unsigned long
171 dbdma_ld32(volatile __u32 __iomem *a)
173 __u32 swap;
174 __asm__ volatile ("lwbrx %0,0,%1" : "=r" (swap) : "r" (a));
175 return swap;
178 static void
179 dbdma_continue(volatile struct dbdma_regs __iomem *dmap)
181 dbdma_st32(&dmap->control,
182 DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD));
183 eieio();
186 static void
187 dbdma_reset(volatile struct dbdma_regs __iomem *dmap)
189 dbdma_st32(&dmap->control,
190 DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN));
191 eieio();
192 while (dbdma_ld32(&dmap->status) & RUN)
193 eieio();
196 static void
197 dbdma_setcmd(volatile struct dbdma_cmd *cp,
198 unsigned short cmd, unsigned count, unsigned long addr,
199 unsigned long cmd_dep)
201 out_le16(&cp->command, cmd);
202 out_le16(&cp->req_count, count);
203 out_le32(&cp->phy_addr, addr);
204 out_le32(&cp->cmd_dep, cmd_dep);
205 out_le16(&cp->xfer_status, 0);
206 out_le16(&cp->res_count, 0);
209 static inline
210 void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data )
212 out_le16((void __iomem *)dev->base_addr + reg_offset, data);
216 static inline
217 unsigned short bmread(struct net_device *dev, unsigned long reg_offset )
219 return in_le16((void __iomem *)dev->base_addr + reg_offset);
222 static void
223 bmac_enable_and_reset_chip(struct net_device *dev)
225 struct bmac_data *bp = netdev_priv(dev);
226 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
227 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
229 if (rd)
230 dbdma_reset(rd);
231 if (td)
232 dbdma_reset(td);
234 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1);
237 #define MIFDELAY udelay(10)
239 static unsigned int
240 bmac_mif_readbits(struct net_device *dev, int nb)
242 unsigned int val = 0;
244 while (--nb >= 0) {
245 bmwrite(dev, MIFCSR, 0);
246 MIFDELAY;
247 if (bmread(dev, MIFCSR) & 8)
248 val |= 1 << nb;
249 bmwrite(dev, MIFCSR, 1);
250 MIFDELAY;
252 bmwrite(dev, MIFCSR, 0);
253 MIFDELAY;
254 bmwrite(dev, MIFCSR, 1);
255 MIFDELAY;
256 return val;
259 static void
260 bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb)
262 int b;
264 while (--nb >= 0) {
265 b = (val & (1 << nb))? 6: 4;
266 bmwrite(dev, MIFCSR, b);
267 MIFDELAY;
268 bmwrite(dev, MIFCSR, b|1);
269 MIFDELAY;
273 static unsigned int
274 bmac_mif_read(struct net_device *dev, unsigned int addr)
276 unsigned int val;
278 bmwrite(dev, MIFCSR, 4);
279 MIFDELAY;
280 bmac_mif_writebits(dev, ~0U, 32);
281 bmac_mif_writebits(dev, 6, 4);
282 bmac_mif_writebits(dev, addr, 10);
283 bmwrite(dev, MIFCSR, 2);
284 MIFDELAY;
285 bmwrite(dev, MIFCSR, 1);
286 MIFDELAY;
287 val = bmac_mif_readbits(dev, 17);
288 bmwrite(dev, MIFCSR, 4);
289 MIFDELAY;
290 return val;
293 static void
294 bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val)
296 bmwrite(dev, MIFCSR, 4);
297 MIFDELAY;
298 bmac_mif_writebits(dev, ~0U, 32);
299 bmac_mif_writebits(dev, 5, 4);
300 bmac_mif_writebits(dev, addr, 10);
301 bmac_mif_writebits(dev, 2, 2);
302 bmac_mif_writebits(dev, val, 16);
303 bmac_mif_writebits(dev, 3, 2);
306 static void
307 bmac_init_registers(struct net_device *dev)
309 struct bmac_data *bp = netdev_priv(dev);
310 volatile unsigned short regValue;
311 unsigned short *pWord16;
312 int i;
314 /* XXDEBUG(("bmac: enter init_registers\n")); */
316 bmwrite(dev, RXRST, RxResetValue);
317 bmwrite(dev, TXRST, TxResetBit);
319 i = 100;
320 do {
321 --i;
322 udelay(10000);
323 regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */
324 } while ((regValue & TxResetBit) && i > 0);
326 if (!bp->is_bmac_plus) {
327 regValue = bmread(dev, XCVRIF);
328 regValue |= ClkBit | SerialMode | COLActiveLow;
329 bmwrite(dev, XCVRIF, regValue);
330 udelay(10000);
333 bmwrite(dev, RSEED, (unsigned short)0x1968);
335 regValue = bmread(dev, XIFC);
336 regValue |= TxOutputEnable;
337 bmwrite(dev, XIFC, regValue);
339 bmread(dev, PAREG);
341 /* set collision counters to 0 */
342 bmwrite(dev, NCCNT, 0);
343 bmwrite(dev, NTCNT, 0);
344 bmwrite(dev, EXCNT, 0);
345 bmwrite(dev, LTCNT, 0);
347 /* set rx counters to 0 */
348 bmwrite(dev, FRCNT, 0);
349 bmwrite(dev, LECNT, 0);
350 bmwrite(dev, AECNT, 0);
351 bmwrite(dev, FECNT, 0);
352 bmwrite(dev, RXCV, 0);
354 /* set tx fifo information */
355 bmwrite(dev, TXTH, 4); /* 4 octets before tx starts */
357 bmwrite(dev, TXFIFOCSR, 0); /* first disable txFIFO */
358 bmwrite(dev, TXFIFOCSR, TxFIFOEnable );
360 /* set rx fifo information */
361 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
362 bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
364 //bmwrite(dev, TXCFG, TxMACEnable); /* TxNeverGiveUp maybe later */
365 bmread(dev, STATUS); /* read it just to clear it */
367 /* zero out the chip Hash Filter registers */
368 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
369 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
370 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
371 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
372 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
374 pWord16 = (unsigned short *)dev->dev_addr;
375 bmwrite(dev, MADD0, *pWord16++);
376 bmwrite(dev, MADD1, *pWord16++);
377 bmwrite(dev, MADD2, *pWord16);
379 bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets);
381 bmwrite(dev, INTDISABLE, EnableNormal);
384 #if 0
385 static void
386 bmac_disable_interrupts(struct net_device *dev)
388 bmwrite(dev, INTDISABLE, DisableAll);
391 static void
392 bmac_enable_interrupts(struct net_device *dev)
394 bmwrite(dev, INTDISABLE, EnableNormal);
396 #endif
399 static void
400 bmac_start_chip(struct net_device *dev)
402 struct bmac_data *bp = netdev_priv(dev);
403 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
404 unsigned short oldConfig;
406 /* enable rx dma channel */
407 dbdma_continue(rd);
409 oldConfig = bmread(dev, TXCFG);
410 bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
412 /* turn on rx plus any other bits already on (promiscuous possibly) */
413 oldConfig = bmread(dev, RXCFG);
414 bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
415 udelay(20000);
418 static void
419 bmac_init_phy(struct net_device *dev)
421 unsigned int addr;
422 struct bmac_data *bp = netdev_priv(dev);
424 printk(KERN_DEBUG "phy registers:");
425 for (addr = 0; addr < 32; ++addr) {
426 if ((addr & 7) == 0)
427 printk(KERN_DEBUG);
428 printk(KERN_CONT " %.4x", bmac_mif_read(dev, addr));
430 printk(KERN_CONT "\n");
432 if (bp->is_bmac_plus) {
433 unsigned int capable, ctrl;
435 ctrl = bmac_mif_read(dev, 0);
436 capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1;
437 if (bmac_mif_read(dev, 4) != capable ||
438 (ctrl & 0x1000) == 0) {
439 bmac_mif_write(dev, 4, capable);
440 bmac_mif_write(dev, 0, 0x1200);
441 } else
442 bmac_mif_write(dev, 0, 0x1000);
446 static void bmac_init_chip(struct net_device *dev)
448 bmac_init_phy(dev);
449 bmac_init_registers(dev);
452 #ifdef CONFIG_PM
453 static int bmac_suspend(struct macio_dev *mdev, pm_message_t state)
455 struct net_device* dev = macio_get_drvdata(mdev);
456 struct bmac_data *bp = netdev_priv(dev);
457 unsigned long flags;
458 unsigned short config;
459 int i;
461 netif_device_detach(dev);
462 /* prolly should wait for dma to finish & turn off the chip */
463 spin_lock_irqsave(&bp->lock, flags);
464 if (bp->timeout_active) {
465 del_timer(&bp->tx_timeout);
466 bp->timeout_active = 0;
468 disable_irq(dev->irq);
469 disable_irq(bp->tx_dma_intr);
470 disable_irq(bp->rx_dma_intr);
471 bp->sleeping = 1;
472 spin_unlock_irqrestore(&bp->lock, flags);
473 if (bp->opened) {
474 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
475 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
477 config = bmread(dev, RXCFG);
478 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
479 config = bmread(dev, TXCFG);
480 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
481 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
482 /* disable rx and tx dma */
483 rd->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
484 td->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
485 /* free some skb's */
486 for (i=0; i<N_RX_RING; i++) {
487 if (bp->rx_bufs[i] != NULL) {
488 dev_kfree_skb(bp->rx_bufs[i]);
489 bp->rx_bufs[i] = NULL;
492 for (i = 0; i<N_TX_RING; i++) {
493 if (bp->tx_bufs[i] != NULL) {
494 dev_kfree_skb(bp->tx_bufs[i]);
495 bp->tx_bufs[i] = NULL;
499 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
500 return 0;
503 static int bmac_resume(struct macio_dev *mdev)
505 struct net_device* dev = macio_get_drvdata(mdev);
506 struct bmac_data *bp = netdev_priv(dev);
508 /* see if this is enough */
509 if (bp->opened)
510 bmac_reset_and_enable(dev);
512 enable_irq(dev->irq);
513 enable_irq(bp->tx_dma_intr);
514 enable_irq(bp->rx_dma_intr);
515 netif_device_attach(dev);
517 return 0;
519 #endif /* CONFIG_PM */
521 static int bmac_set_address(struct net_device *dev, void *addr)
523 struct bmac_data *bp = netdev_priv(dev);
524 unsigned char *p = addr;
525 unsigned short *pWord16;
526 unsigned long flags;
527 int i;
529 XXDEBUG(("bmac: enter set_address\n"));
530 spin_lock_irqsave(&bp->lock, flags);
532 for (i = 0; i < 6; ++i) {
533 dev->dev_addr[i] = p[i];
535 /* load up the hardware address */
536 pWord16 = (unsigned short *)dev->dev_addr;
537 bmwrite(dev, MADD0, *pWord16++);
538 bmwrite(dev, MADD1, *pWord16++);
539 bmwrite(dev, MADD2, *pWord16);
541 spin_unlock_irqrestore(&bp->lock, flags);
542 XXDEBUG(("bmac: exit set_address\n"));
543 return 0;
546 static inline void bmac_set_timeout(struct net_device *dev)
548 struct bmac_data *bp = netdev_priv(dev);
549 unsigned long flags;
551 spin_lock_irqsave(&bp->lock, flags);
552 if (bp->timeout_active)
553 del_timer(&bp->tx_timeout);
554 bp->tx_timeout.expires = jiffies + TX_TIMEOUT;
555 add_timer(&bp->tx_timeout);
556 bp->timeout_active = 1;
557 spin_unlock_irqrestore(&bp->lock, flags);
560 static void
561 bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
563 void *vaddr;
564 unsigned long baddr;
565 unsigned long len;
567 len = skb->len;
568 vaddr = skb->data;
569 baddr = virt_to_bus(vaddr);
571 dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0);
574 static void
575 bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
577 unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf;
579 dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN,
580 virt_to_bus(addr), 0);
583 static void
584 bmac_init_tx_ring(struct bmac_data *bp)
586 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
588 memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd));
590 bp->tx_empty = 0;
591 bp->tx_fill = 0;
592 bp->tx_fullup = 0;
594 /* put a branch at the end of the tx command list */
595 dbdma_setcmd(&bp->tx_cmds[N_TX_RING],
596 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds));
598 /* reset tx dma */
599 dbdma_reset(td);
600 out_le32(&td->wait_sel, 0x00200020);
601 out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds));
604 static int
605 bmac_init_rx_ring(struct net_device *dev)
607 struct bmac_data *bp = netdev_priv(dev);
608 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
609 int i;
610 struct sk_buff *skb;
612 /* initialize list of sk_buffs for receiving and set up recv dma */
613 memset((char *)bp->rx_cmds, 0,
614 (N_RX_RING + 1) * sizeof(struct dbdma_cmd));
615 for (i = 0; i < N_RX_RING; i++) {
616 if ((skb = bp->rx_bufs[i]) == NULL) {
617 bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
618 if (skb != NULL)
619 skb_reserve(skb, 2);
621 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
624 bp->rx_empty = 0;
625 bp->rx_fill = i;
627 /* Put a branch back to the beginning of the receive command list */
628 dbdma_setcmd(&bp->rx_cmds[N_RX_RING],
629 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds));
631 /* start rx dma */
632 dbdma_reset(rd);
633 out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds));
635 return 1;
639 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev)
641 struct bmac_data *bp = netdev_priv(dev);
642 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
643 int i;
645 /* see if there's a free slot in the tx ring */
646 /* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */
647 /* bp->tx_empty, bp->tx_fill)); */
648 i = bp->tx_fill + 1;
649 if (i >= N_TX_RING)
650 i = 0;
651 if (i == bp->tx_empty) {
652 netif_stop_queue(dev);
653 bp->tx_fullup = 1;
654 XXDEBUG(("bmac_transmit_packet: tx ring full\n"));
655 return -1; /* can't take it at the moment */
658 dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0);
660 bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]);
662 bp->tx_bufs[bp->tx_fill] = skb;
663 bp->tx_fill = i;
665 dev->stats.tx_bytes += skb->len;
667 dbdma_continue(td);
669 return 0;
672 static int rxintcount;
674 static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id)
676 struct net_device *dev = (struct net_device *) dev_id;
677 struct bmac_data *bp = netdev_priv(dev);
678 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
679 volatile struct dbdma_cmd *cp;
680 int i, nb, stat;
681 struct sk_buff *skb;
682 unsigned int residual;
683 int last;
684 unsigned long flags;
686 spin_lock_irqsave(&bp->lock, flags);
688 if (++rxintcount < 10) {
689 XXDEBUG(("bmac_rxdma_intr\n"));
692 last = -1;
693 i = bp->rx_empty;
695 while (1) {
696 cp = &bp->rx_cmds[i];
697 stat = le16_to_cpu(cp->xfer_status);
698 residual = le16_to_cpu(cp->res_count);
699 if ((stat & ACTIVE) == 0)
700 break;
701 nb = RX_BUFLEN - residual - 2;
702 if (nb < (ETHERMINPACKET - ETHERCRC)) {
703 skb = NULL;
704 dev->stats.rx_length_errors++;
705 dev->stats.rx_errors++;
706 } else {
707 skb = bp->rx_bufs[i];
708 bp->rx_bufs[i] = NULL;
710 if (skb != NULL) {
711 nb -= ETHERCRC;
712 skb_put(skb, nb);
713 skb->protocol = eth_type_trans(skb, dev);
714 netif_rx(skb);
715 ++dev->stats.rx_packets;
716 dev->stats.rx_bytes += nb;
717 } else {
718 ++dev->stats.rx_dropped;
720 if ((skb = bp->rx_bufs[i]) == NULL) {
721 bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
722 if (skb != NULL)
723 skb_reserve(bp->rx_bufs[i], 2);
725 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
726 cp->res_count = cpu_to_le16(0);
727 cp->xfer_status = cpu_to_le16(0);
728 last = i;
729 if (++i >= N_RX_RING) i = 0;
732 if (last != -1) {
733 bp->rx_fill = last;
734 bp->rx_empty = i;
737 dbdma_continue(rd);
738 spin_unlock_irqrestore(&bp->lock, flags);
740 if (rxintcount < 10) {
741 XXDEBUG(("bmac_rxdma_intr done\n"));
743 return IRQ_HANDLED;
746 static int txintcount;
748 static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
750 struct net_device *dev = (struct net_device *) dev_id;
751 struct bmac_data *bp = netdev_priv(dev);
752 volatile struct dbdma_cmd *cp;
753 int stat;
754 unsigned long flags;
756 spin_lock_irqsave(&bp->lock, flags);
758 if (txintcount++ < 10) {
759 XXDEBUG(("bmac_txdma_intr\n"));
762 /* del_timer(&bp->tx_timeout); */
763 /* bp->timeout_active = 0; */
765 while (1) {
766 cp = &bp->tx_cmds[bp->tx_empty];
767 stat = le16_to_cpu(cp->xfer_status);
768 if (txintcount < 10) {
769 XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat));
771 if (!(stat & ACTIVE)) {
773 * status field might not have been filled by DBDMA
775 if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr)))
776 break;
779 if (bp->tx_bufs[bp->tx_empty]) {
780 ++dev->stats.tx_packets;
781 dev_consume_skb_irq(bp->tx_bufs[bp->tx_empty]);
783 bp->tx_bufs[bp->tx_empty] = NULL;
784 bp->tx_fullup = 0;
785 netif_wake_queue(dev);
786 if (++bp->tx_empty >= N_TX_RING)
787 bp->tx_empty = 0;
788 if (bp->tx_empty == bp->tx_fill)
789 break;
792 spin_unlock_irqrestore(&bp->lock, flags);
794 if (txintcount < 10) {
795 XXDEBUG(("bmac_txdma_intr done->bmac_start\n"));
798 bmac_start(dev);
799 return IRQ_HANDLED;
802 #ifndef SUNHME_MULTICAST
803 /* Real fast bit-reversal algorithm, 6-bit values */
804 static int reverse6[64] = {
805 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
806 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
807 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
808 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
809 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
810 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
811 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
812 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
815 static unsigned int
816 crc416(unsigned int curval, unsigned short nxtval)
818 unsigned int counter, cur = curval, next = nxtval;
819 int high_crc_set, low_data_set;
821 /* Swap bytes */
822 next = ((next & 0x00FF) << 8) | (next >> 8);
824 /* Compute bit-by-bit */
825 for (counter = 0; counter < 16; ++counter) {
826 /* is high CRC bit set? */
827 if ((cur & 0x80000000) == 0) high_crc_set = 0;
828 else high_crc_set = 1;
830 cur = cur << 1;
832 if ((next & 0x0001) == 0) low_data_set = 0;
833 else low_data_set = 1;
835 next = next >> 1;
837 /* do the XOR */
838 if (high_crc_set ^ low_data_set) cur = cur ^ CRC32_POLY_BE;
840 return cur;
843 static unsigned int
844 bmac_crc(unsigned short *address)
846 unsigned int newcrc;
848 XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
849 newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */
850 newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */
851 newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */
853 return(newcrc);
857 * Add requested mcast addr to BMac's hash table filter.
861 static void
862 bmac_addhash(struct bmac_data *bp, unsigned char *addr)
864 unsigned int crc;
865 unsigned short mask;
867 if (!(*addr)) return;
868 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
869 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
870 if (bp->hash_use_count[crc]++) return; /* This bit is already set */
871 mask = crc % 16;
872 mask = (unsigned char)1 << mask;
873 bp->hash_use_count[crc/16] |= mask;
876 static void
877 bmac_removehash(struct bmac_data *bp, unsigned char *addr)
879 unsigned int crc;
880 unsigned char mask;
882 /* Now, delete the address from the filter copy, as indicated */
883 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
884 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
885 if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */
886 if (--bp->hash_use_count[crc]) return; /* That bit is still in use */
887 mask = crc % 16;
888 mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */
889 bp->hash_table_mask[crc/16] &= mask;
893 * Sync the adapter with the software copy of the multicast mask
894 * (logical address filter).
897 static void
898 bmac_rx_off(struct net_device *dev)
900 unsigned short rx_cfg;
902 rx_cfg = bmread(dev, RXCFG);
903 rx_cfg &= ~RxMACEnable;
904 bmwrite(dev, RXCFG, rx_cfg);
905 do {
906 rx_cfg = bmread(dev, RXCFG);
907 } while (rx_cfg & RxMACEnable);
910 unsigned short
911 bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable)
913 unsigned short rx_cfg;
915 rx_cfg = bmread(dev, RXCFG);
916 rx_cfg |= RxMACEnable;
917 if (hash_enable) rx_cfg |= RxHashFilterEnable;
918 else rx_cfg &= ~RxHashFilterEnable;
919 if (promisc_enable) rx_cfg |= RxPromiscEnable;
920 else rx_cfg &= ~RxPromiscEnable;
921 bmwrite(dev, RXRST, RxResetValue);
922 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
923 bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
924 bmwrite(dev, RXCFG, rx_cfg );
925 return rx_cfg;
928 static void
929 bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp)
931 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
932 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
933 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
934 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
937 #if 0
938 static void
939 bmac_add_multi(struct net_device *dev,
940 struct bmac_data *bp, unsigned char *addr)
942 /* XXDEBUG(("bmac: enter bmac_add_multi\n")); */
943 bmac_addhash(bp, addr);
944 bmac_rx_off(dev);
945 bmac_update_hash_table_mask(dev, bp);
946 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
947 /* XXDEBUG(("bmac: exit bmac_add_multi\n")); */
950 static void
951 bmac_remove_multi(struct net_device *dev,
952 struct bmac_data *bp, unsigned char *addr)
954 bmac_removehash(bp, addr);
955 bmac_rx_off(dev);
956 bmac_update_hash_table_mask(dev, bp);
957 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
959 #endif
961 /* Set or clear the multicast filter for this adaptor.
962 num_addrs == -1 Promiscuous mode, receive all packets
963 num_addrs == 0 Normal mode, clear multicast list
964 num_addrs > 0 Multicast mode, receive normal and MC packets, and do
965 best-effort filtering.
967 static void bmac_set_multicast(struct net_device *dev)
969 struct netdev_hw_addr *ha;
970 struct bmac_data *bp = netdev_priv(dev);
971 int num_addrs = netdev_mc_count(dev);
972 unsigned short rx_cfg;
973 int i;
975 if (bp->sleeping)
976 return;
978 XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
980 if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
981 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff;
982 bmac_update_hash_table_mask(dev, bp);
983 rx_cfg = bmac_rx_on(dev, 1, 0);
984 XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n"));
985 } else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) {
986 rx_cfg = bmread(dev, RXCFG);
987 rx_cfg |= RxPromiscEnable;
988 bmwrite(dev, RXCFG, rx_cfg);
989 rx_cfg = bmac_rx_on(dev, 0, 1);
990 XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg));
991 } else {
992 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
993 for (i=0; i<64; i++) bp->hash_use_count[i] = 0;
994 if (num_addrs == 0) {
995 rx_cfg = bmac_rx_on(dev, 0, 0);
996 XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
997 } else {
998 netdev_for_each_mc_addr(ha, dev)
999 bmac_addhash(bp, ha->addr);
1000 bmac_update_hash_table_mask(dev, bp);
1001 rx_cfg = bmac_rx_on(dev, 1, 0);
1002 XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
1005 /* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */
1007 #else /* ifdef SUNHME_MULTICAST */
1009 /* The version of set_multicast below was lifted from sunhme.c */
1011 static void bmac_set_multicast(struct net_device *dev)
1013 struct netdev_hw_addr *ha;
1014 unsigned short rx_cfg;
1015 u32 crc;
1017 if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
1018 bmwrite(dev, BHASH0, 0xffff);
1019 bmwrite(dev, BHASH1, 0xffff);
1020 bmwrite(dev, BHASH2, 0xffff);
1021 bmwrite(dev, BHASH3, 0xffff);
1022 } else if(dev->flags & IFF_PROMISC) {
1023 rx_cfg = bmread(dev, RXCFG);
1024 rx_cfg |= RxPromiscEnable;
1025 bmwrite(dev, RXCFG, rx_cfg);
1026 } else {
1027 u16 hash_table[4] = { 0 };
1029 rx_cfg = bmread(dev, RXCFG);
1030 rx_cfg &= ~RxPromiscEnable;
1031 bmwrite(dev, RXCFG, rx_cfg);
1033 netdev_for_each_mc_addr(ha, dev) {
1034 crc = ether_crc_le(6, ha->addr);
1035 crc >>= 26;
1036 hash_table[crc >> 4] |= 1 << (crc & 0xf);
1038 bmwrite(dev, BHASH0, hash_table[0]);
1039 bmwrite(dev, BHASH1, hash_table[1]);
1040 bmwrite(dev, BHASH2, hash_table[2]);
1041 bmwrite(dev, BHASH3, hash_table[3]);
1044 #endif /* SUNHME_MULTICAST */
1046 static int miscintcount;
1048 static irqreturn_t bmac_misc_intr(int irq, void *dev_id)
1050 struct net_device *dev = (struct net_device *) dev_id;
1051 unsigned int status = bmread(dev, STATUS);
1052 if (miscintcount++ < 10) {
1053 XXDEBUG(("bmac_misc_intr\n"));
1055 /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */
1056 /* bmac_txdma_intr_inner(irq, dev_id); */
1057 /* if (status & FrameReceived) dev->stats.rx_dropped++; */
1058 if (status & RxErrorMask) dev->stats.rx_errors++;
1059 if (status & RxCRCCntExp) dev->stats.rx_crc_errors++;
1060 if (status & RxLenCntExp) dev->stats.rx_length_errors++;
1061 if (status & RxOverFlow) dev->stats.rx_over_errors++;
1062 if (status & RxAlignCntExp) dev->stats.rx_frame_errors++;
1064 /* if (status & FrameSent) dev->stats.tx_dropped++; */
1065 if (status & TxErrorMask) dev->stats.tx_errors++;
1066 if (status & TxUnderrun) dev->stats.tx_fifo_errors++;
1067 if (status & TxNormalCollExp) dev->stats.collisions++;
1068 return IRQ_HANDLED;
1072 * Procedure for reading EEPROM
1074 #define SROMAddressLength 5
1075 #define DataInOn 0x0008
1076 #define DataInOff 0x0000
1077 #define Clk 0x0002
1078 #define ChipSelect 0x0001
1079 #define SDIShiftCount 3
1080 #define SD0ShiftCount 2
1081 #define DelayValue 1000 /* number of microseconds */
1082 #define SROMStartOffset 10 /* this is in words */
1083 #define SROMReadCount 3 /* number of words to read from SROM */
1084 #define SROMAddressBits 6
1085 #define EnetAddressOffset 20
1087 static unsigned char
1088 bmac_clock_out_bit(struct net_device *dev)
1090 unsigned short data;
1091 unsigned short val;
1093 bmwrite(dev, SROMCSR, ChipSelect | Clk);
1094 udelay(DelayValue);
1096 data = bmread(dev, SROMCSR);
1097 udelay(DelayValue);
1098 val = (data >> SD0ShiftCount) & 1;
1100 bmwrite(dev, SROMCSR, ChipSelect);
1101 udelay(DelayValue);
1103 return val;
1106 static void
1107 bmac_clock_in_bit(struct net_device *dev, unsigned int val)
1109 unsigned short data;
1111 if (val != 0 && val != 1) return;
1113 data = (val << SDIShiftCount);
1114 bmwrite(dev, SROMCSR, data | ChipSelect );
1115 udelay(DelayValue);
1117 bmwrite(dev, SROMCSR, data | ChipSelect | Clk );
1118 udelay(DelayValue);
1120 bmwrite(dev, SROMCSR, data | ChipSelect);
1121 udelay(DelayValue);
1124 static void
1125 reset_and_select_srom(struct net_device *dev)
1127 /* first reset */
1128 bmwrite(dev, SROMCSR, 0);
1129 udelay(DelayValue);
1131 /* send it the read command (110) */
1132 bmac_clock_in_bit(dev, 1);
1133 bmac_clock_in_bit(dev, 1);
1134 bmac_clock_in_bit(dev, 0);
1137 static unsigned short
1138 read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len)
1140 unsigned short data, val;
1141 int i;
1143 /* send out the address we want to read from */
1144 for (i = 0; i < addr_len; i++) {
1145 val = addr >> (addr_len-i-1);
1146 bmac_clock_in_bit(dev, val & 1);
1149 /* Now read in the 16-bit data */
1150 data = 0;
1151 for (i = 0; i < 16; i++) {
1152 val = bmac_clock_out_bit(dev);
1153 data <<= 1;
1154 data |= val;
1156 bmwrite(dev, SROMCSR, 0);
1158 return data;
1162 * It looks like Cogent and SMC use different methods for calculating
1163 * checksums. What a pain..
1166 static int
1167 bmac_verify_checksum(struct net_device *dev)
1169 unsigned short data, storedCS;
1171 reset_and_select_srom(dev);
1172 data = read_srom(dev, 3, SROMAddressBits);
1173 storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00);
1175 return 0;
1179 static void
1180 bmac_get_station_address(struct net_device *dev, unsigned char *ea)
1182 int i;
1183 unsigned short data;
1185 for (i = 0; i < 3; i++)
1187 reset_and_select_srom(dev);
1188 data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
1189 ea[2*i] = bitrev8(data & 0x0ff);
1190 ea[2*i+1] = bitrev8((data >> 8) & 0x0ff);
1194 static void bmac_reset_and_enable(struct net_device *dev)
1196 struct bmac_data *bp = netdev_priv(dev);
1197 unsigned long flags;
1198 struct sk_buff *skb;
1199 unsigned char *data;
1201 spin_lock_irqsave(&bp->lock, flags);
1202 bmac_enable_and_reset_chip(dev);
1203 bmac_init_tx_ring(bp);
1204 bmac_init_rx_ring(dev);
1205 bmac_init_chip(dev);
1206 bmac_start_chip(dev);
1207 bmwrite(dev, INTDISABLE, EnableNormal);
1208 bp->sleeping = 0;
1211 * It seems that the bmac can't receive until it's transmitted
1212 * a packet. So we give it a dummy packet to transmit.
1214 skb = netdev_alloc_skb(dev, ETHERMINPACKET);
1215 if (skb != NULL) {
1216 data = skb_put_zero(skb, ETHERMINPACKET);
1217 memcpy(data, dev->dev_addr, ETH_ALEN);
1218 memcpy(data + ETH_ALEN, dev->dev_addr, ETH_ALEN);
1219 bmac_transmit_packet(skb, dev);
1221 spin_unlock_irqrestore(&bp->lock, flags);
1224 static const struct ethtool_ops bmac_ethtool_ops = {
1225 .get_link = ethtool_op_get_link,
1228 static const struct net_device_ops bmac_netdev_ops = {
1229 .ndo_open = bmac_open,
1230 .ndo_stop = bmac_close,
1231 .ndo_start_xmit = bmac_output,
1232 .ndo_set_rx_mode = bmac_set_multicast,
1233 .ndo_set_mac_address = bmac_set_address,
1234 .ndo_validate_addr = eth_validate_addr,
1237 static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
1239 int j, rev, ret;
1240 struct bmac_data *bp;
1241 const unsigned char *prop_addr;
1242 unsigned char addr[6];
1243 struct net_device *dev;
1244 int is_bmac_plus = ((int)match->data) != 0;
1246 if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
1247 printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n");
1248 return -ENODEV;
1250 prop_addr = of_get_property(macio_get_of_node(mdev),
1251 "mac-address", NULL);
1252 if (prop_addr == NULL) {
1253 prop_addr = of_get_property(macio_get_of_node(mdev),
1254 "local-mac-address", NULL);
1255 if (prop_addr == NULL) {
1256 printk(KERN_ERR "BMAC: Can't get mac-address\n");
1257 return -ENODEV;
1260 memcpy(addr, prop_addr, sizeof(addr));
1262 dev = alloc_etherdev(PRIV_BYTES);
1263 if (!dev)
1264 return -ENOMEM;
1266 bp = netdev_priv(dev);
1267 SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
1268 macio_set_drvdata(mdev, dev);
1270 bp->mdev = mdev;
1271 spin_lock_init(&bp->lock);
1273 if (macio_request_resources(mdev, "bmac")) {
1274 printk(KERN_ERR "BMAC: can't request IO resource !\n");
1275 goto out_free;
1278 dev->base_addr = (unsigned long)
1279 ioremap(macio_resource_start(mdev, 0), macio_resource_len(mdev, 0));
1280 if (dev->base_addr == 0)
1281 goto out_release;
1283 dev->irq = macio_irq(mdev, 0);
1285 bmac_enable_and_reset_chip(dev);
1286 bmwrite(dev, INTDISABLE, DisableAll);
1288 rev = addr[0] == 0 && addr[1] == 0xA0;
1289 for (j = 0; j < 6; ++j)
1290 dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
1292 /* Enable chip without interrupts for now */
1293 bmac_enable_and_reset_chip(dev);
1294 bmwrite(dev, INTDISABLE, DisableAll);
1296 dev->netdev_ops = &bmac_netdev_ops;
1297 dev->ethtool_ops = &bmac_ethtool_ops;
1299 bmac_get_station_address(dev, addr);
1300 if (bmac_verify_checksum(dev) != 0)
1301 goto err_out_iounmap;
1303 bp->is_bmac_plus = is_bmac_plus;
1304 bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1));
1305 if (!bp->tx_dma)
1306 goto err_out_iounmap;
1307 bp->tx_dma_intr = macio_irq(mdev, 1);
1308 bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2));
1309 if (!bp->rx_dma)
1310 goto err_out_iounmap_tx;
1311 bp->rx_dma_intr = macio_irq(mdev, 2);
1313 bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1);
1314 bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1;
1316 bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1);
1317 skb_queue_head_init(bp->queue);
1319 timer_setup(&bp->tx_timeout, bmac_tx_timeout, 0);
1321 ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
1322 if (ret) {
1323 printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
1324 goto err_out_iounmap_rx;
1326 ret = request_irq(bp->tx_dma_intr, bmac_txdma_intr, 0, "BMAC-txdma", dev);
1327 if (ret) {
1328 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->tx_dma_intr);
1329 goto err_out_irq0;
1331 ret = request_irq(bp->rx_dma_intr, bmac_rxdma_intr, 0, "BMAC-rxdma", dev);
1332 if (ret) {
1333 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->rx_dma_intr);
1334 goto err_out_irq1;
1337 /* Mask chip interrupts and disable chip, will be
1338 * re-enabled on open()
1340 disable_irq(dev->irq);
1341 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1343 if (register_netdev(dev) != 0) {
1344 printk(KERN_ERR "BMAC: Ethernet registration failed\n");
1345 goto err_out_irq2;
1348 printk(KERN_INFO "%s: BMAC%s at %pM",
1349 dev->name, (is_bmac_plus ? "+" : ""), dev->dev_addr);
1350 XXDEBUG((", base_addr=%#0lx", dev->base_addr));
1351 printk("\n");
1353 return 0;
1355 err_out_irq2:
1356 free_irq(bp->rx_dma_intr, dev);
1357 err_out_irq1:
1358 free_irq(bp->tx_dma_intr, dev);
1359 err_out_irq0:
1360 free_irq(dev->irq, dev);
1361 err_out_iounmap_rx:
1362 iounmap(bp->rx_dma);
1363 err_out_iounmap_tx:
1364 iounmap(bp->tx_dma);
1365 err_out_iounmap:
1366 iounmap((void __iomem *)dev->base_addr);
1367 out_release:
1368 macio_release_resources(mdev);
1369 out_free:
1370 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1371 free_netdev(dev);
1373 return -ENODEV;
1376 static int bmac_open(struct net_device *dev)
1378 struct bmac_data *bp = netdev_priv(dev);
1379 /* XXDEBUG(("bmac: enter open\n")); */
1380 /* reset the chip */
1381 bp->opened = 1;
1382 bmac_reset_and_enable(dev);
1383 enable_irq(dev->irq);
1384 return 0;
1387 static int bmac_close(struct net_device *dev)
1389 struct bmac_data *bp = netdev_priv(dev);
1390 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
1391 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
1392 unsigned short config;
1393 int i;
1395 bp->sleeping = 1;
1397 /* disable rx and tx */
1398 config = bmread(dev, RXCFG);
1399 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1401 config = bmread(dev, TXCFG);
1402 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1404 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
1406 /* disable rx and tx dma */
1407 rd->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
1408 td->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
1410 /* free some skb's */
1411 XXDEBUG(("bmac: free rx bufs\n"));
1412 for (i=0; i<N_RX_RING; i++) {
1413 if (bp->rx_bufs[i] != NULL) {
1414 dev_kfree_skb(bp->rx_bufs[i]);
1415 bp->rx_bufs[i] = NULL;
1418 XXDEBUG(("bmac: free tx bufs\n"));
1419 for (i = 0; i<N_TX_RING; i++) {
1420 if (bp->tx_bufs[i] != NULL) {
1421 dev_kfree_skb(bp->tx_bufs[i]);
1422 bp->tx_bufs[i] = NULL;
1425 XXDEBUG(("bmac: all bufs freed\n"));
1427 bp->opened = 0;
1428 disable_irq(dev->irq);
1429 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1431 return 0;
1434 static void
1435 bmac_start(struct net_device *dev)
1437 struct bmac_data *bp = netdev_priv(dev);
1438 int i;
1439 struct sk_buff *skb;
1440 unsigned long flags;
1442 if (bp->sleeping)
1443 return;
1445 spin_lock_irqsave(&bp->lock, flags);
1446 while (1) {
1447 i = bp->tx_fill + 1;
1448 if (i >= N_TX_RING)
1449 i = 0;
1450 if (i == bp->tx_empty)
1451 break;
1452 skb = skb_dequeue(bp->queue);
1453 if (skb == NULL)
1454 break;
1455 bmac_transmit_packet(skb, dev);
1457 spin_unlock_irqrestore(&bp->lock, flags);
1460 static netdev_tx_t
1461 bmac_output(struct sk_buff *skb, struct net_device *dev)
1463 struct bmac_data *bp = netdev_priv(dev);
1464 skb_queue_tail(bp->queue, skb);
1465 bmac_start(dev);
1466 return NETDEV_TX_OK;
1469 static void bmac_tx_timeout(struct timer_list *t)
1471 struct bmac_data *bp = from_timer(bp, t, tx_timeout);
1472 struct net_device *dev = macio_get_drvdata(bp->mdev);
1473 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
1474 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
1475 volatile struct dbdma_cmd *cp;
1476 unsigned long flags;
1477 unsigned short config, oldConfig;
1478 int i;
1480 XXDEBUG(("bmac: tx_timeout called\n"));
1481 spin_lock_irqsave(&bp->lock, flags);
1482 bp->timeout_active = 0;
1484 /* update various counters */
1485 /* bmac_handle_misc_intrs(bp, 0); */
1487 cp = &bp->tx_cmds[bp->tx_empty];
1488 /* XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */
1489 /* le32_to_cpu(td->status), le16_to_cpu(cp->xfer_status), bp->tx_bad_runt, */
1490 /* mb->pr, mb->xmtfs, mb->fifofc)); */
1492 /* turn off both tx and rx and reset the chip */
1493 config = bmread(dev, RXCFG);
1494 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1495 config = bmread(dev, TXCFG);
1496 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1497 out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1498 printk(KERN_ERR "bmac: transmit timeout - resetting\n");
1499 bmac_enable_and_reset_chip(dev);
1501 /* restart rx dma */
1502 cp = bus_to_virt(le32_to_cpu(rd->cmdptr));
1503 out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1504 out_le16(&cp->xfer_status, 0);
1505 out_le32(&rd->cmdptr, virt_to_bus(cp));
1506 out_le32(&rd->control, DBDMA_SET(RUN|WAKE));
1508 /* fix up the transmit side */
1509 XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n",
1510 bp->tx_empty, bp->tx_fill, bp->tx_fullup));
1511 i = bp->tx_empty;
1512 ++dev->stats.tx_errors;
1513 if (i != bp->tx_fill) {
1514 dev_kfree_skb(bp->tx_bufs[i]);
1515 bp->tx_bufs[i] = NULL;
1516 if (++i >= N_TX_RING) i = 0;
1517 bp->tx_empty = i;
1519 bp->tx_fullup = 0;
1520 netif_wake_queue(dev);
1521 if (i != bp->tx_fill) {
1522 cp = &bp->tx_cmds[i];
1523 out_le16(&cp->xfer_status, 0);
1524 out_le16(&cp->command, OUTPUT_LAST);
1525 out_le32(&td->cmdptr, virt_to_bus(cp));
1526 out_le32(&td->control, DBDMA_SET(RUN));
1527 /* bmac_set_timeout(dev); */
1528 XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i));
1531 /* turn it back on */
1532 oldConfig = bmread(dev, RXCFG);
1533 bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
1534 oldConfig = bmread(dev, TXCFG);
1535 bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
1537 spin_unlock_irqrestore(&bp->lock, flags);
1540 #if 0
1541 static void dump_dbdma(volatile struct dbdma_cmd *cp,int count)
1543 int i,*ip;
1545 for (i=0;i< count;i++) {
1546 ip = (int*)(cp+i);
1548 printk("dbdma req 0x%x addr 0x%x baddr 0x%x xfer/res 0x%x\n",
1549 le32_to_cpup(ip+0),
1550 le32_to_cpup(ip+1),
1551 le32_to_cpup(ip+2),
1552 le32_to_cpup(ip+3));
1556 #endif
1558 #if 0
1559 static int
1560 bmac_proc_info(char *buffer, char **start, off_t offset, int length)
1562 int len = 0;
1563 off_t pos = 0;
1564 off_t begin = 0;
1565 int i;
1567 if (bmac_devs == NULL)
1568 return -ENOSYS;
1570 len += sprintf(buffer, "BMAC counters & registers\n");
1572 for (i = 0; i<N_REG_ENTRIES; i++) {
1573 len += sprintf(buffer + len, "%s: %#08x\n",
1574 reg_entries[i].name,
1575 bmread(bmac_devs, reg_entries[i].reg_offset));
1576 pos = begin + len;
1578 if (pos < offset) {
1579 len = 0;
1580 begin = pos;
1583 if (pos > offset+length) break;
1586 *start = buffer + (offset - begin);
1587 len -= (offset - begin);
1589 if (len > length) len = length;
1591 return len;
1593 #endif
1595 static int bmac_remove(struct macio_dev *mdev)
1597 struct net_device *dev = macio_get_drvdata(mdev);
1598 struct bmac_data *bp = netdev_priv(dev);
1600 unregister_netdev(dev);
1602 free_irq(dev->irq, dev);
1603 free_irq(bp->tx_dma_intr, dev);
1604 free_irq(bp->rx_dma_intr, dev);
1606 iounmap((void __iomem *)dev->base_addr);
1607 iounmap(bp->tx_dma);
1608 iounmap(bp->rx_dma);
1610 macio_release_resources(mdev);
1612 free_netdev(dev);
1614 return 0;
1617 static const struct of_device_id bmac_match[] =
1620 .name = "bmac",
1621 .data = (void *)0,
1624 .type = "network",
1625 .compatible = "bmac+",
1626 .data = (void *)1,
1630 MODULE_DEVICE_TABLE (of, bmac_match);
1632 static struct macio_driver bmac_driver =
1634 .driver = {
1635 .name = "bmac",
1636 .owner = THIS_MODULE,
1637 .of_match_table = bmac_match,
1639 .probe = bmac_probe,
1640 .remove = bmac_remove,
1641 #ifdef CONFIG_PM
1642 .suspend = bmac_suspend,
1643 .resume = bmac_resume,
1644 #endif
1648 static int __init bmac_init(void)
1650 if (bmac_emergency_rxbuf == NULL) {
1651 bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL);
1652 if (bmac_emergency_rxbuf == NULL)
1653 return -ENOMEM;
1656 return macio_register_driver(&bmac_driver);
1659 static void __exit bmac_exit(void)
1661 macio_unregister_driver(&bmac_driver);
1663 kfree(bmac_emergency_rxbuf);
1664 bmac_emergency_rxbuf = NULL;
1667 MODULE_AUTHOR("Randy Gobbel/Paul Mackerras");
1668 MODULE_DESCRIPTION("PowerMac BMAC ethernet driver.");
1669 MODULE_LICENSE("GPL");
1671 module_init(bmac_init);
1672 module_exit(bmac_exit);