staging: rtl8188eu: rename HalSetBrateCfg() - style
[linux/fpc-iii.git] / drivers / net / ethernet / apple / bmac.c
blob024998d6d8c6ed959c555c82b66f687cb0a85c2c
1 /*
2 * Network device driver for the BMAC ethernet controller on
3 * Apple Powermacs. Assumes it's under a DBDMA controller.
5 * Copyright (C) 1998 Randy Gobbel.
7 * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to
8 * dynamic procfs inode.
9 */
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/delay.h>
16 #include <linux/string.h>
17 #include <linux/timer.h>
18 #include <linux/proc_fs.h>
19 #include <linux/init.h>
20 #include <linux/spinlock.h>
21 #include <linux/crc32.h>
22 #include <linux/crc32poly.h>
23 #include <linux/bitrev.h>
24 #include <linux/ethtool.h>
25 #include <linux/slab.h>
26 #include <asm/prom.h>
27 #include <asm/dbdma.h>
28 #include <asm/io.h>
29 #include <asm/page.h>
30 #include <asm/pgtable.h>
31 #include <asm/machdep.h>
32 #include <asm/pmac_feature.h>
33 #include <asm/macio.h>
34 #include <asm/irq.h>
36 #include "bmac.h"
38 #define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
39 #define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
41 /* switch to use multicast code lifted from sunhme driver */
42 #define SUNHME_MULTICAST
44 #define N_RX_RING 64
45 #define N_TX_RING 32
46 #define MAX_TX_ACTIVE 1
47 #define ETHERCRC 4
48 #define ETHERMINPACKET 64
49 #define ETHERMTU 1500
50 #define RX_BUFLEN (ETHERMTU + 14 + ETHERCRC + 2)
51 #define TX_TIMEOUT HZ /* 1 second */
53 /* Bits in transmit DMA status */
54 #define TX_DMA_ERR 0x80
56 #define XXDEBUG(args)
58 struct bmac_data {
59 /* volatile struct bmac *bmac; */
60 struct sk_buff_head *queue;
61 volatile struct dbdma_regs __iomem *tx_dma;
62 int tx_dma_intr;
63 volatile struct dbdma_regs __iomem *rx_dma;
64 int rx_dma_intr;
65 volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */
66 volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */
67 struct macio_dev *mdev;
68 int is_bmac_plus;
69 struct sk_buff *rx_bufs[N_RX_RING];
70 int rx_fill;
71 int rx_empty;
72 struct sk_buff *tx_bufs[N_TX_RING];
73 int tx_fill;
74 int tx_empty;
75 unsigned char tx_fullup;
76 struct timer_list tx_timeout;
77 int timeout_active;
78 int sleeping;
79 int opened;
80 unsigned short hash_use_count[64];
81 unsigned short hash_table_mask[4];
82 spinlock_t lock;
85 #if 0 /* Move that to ethtool */
87 typedef struct bmac_reg_entry {
88 char *name;
89 unsigned short reg_offset;
90 } bmac_reg_entry_t;
92 #define N_REG_ENTRIES 31
94 static bmac_reg_entry_t reg_entries[N_REG_ENTRIES] = {
95 {"MEMADD", MEMADD},
96 {"MEMDATAHI", MEMDATAHI},
97 {"MEMDATALO", MEMDATALO},
98 {"TXPNTR", TXPNTR},
99 {"RXPNTR", RXPNTR},
100 {"IPG1", IPG1},
101 {"IPG2", IPG2},
102 {"ALIMIT", ALIMIT},
103 {"SLOT", SLOT},
104 {"PALEN", PALEN},
105 {"PAPAT", PAPAT},
106 {"TXSFD", TXSFD},
107 {"JAM", JAM},
108 {"TXCFG", TXCFG},
109 {"TXMAX", TXMAX},
110 {"TXMIN", TXMIN},
111 {"PAREG", PAREG},
112 {"DCNT", DCNT},
113 {"NCCNT", NCCNT},
114 {"NTCNT", NTCNT},
115 {"EXCNT", EXCNT},
116 {"LTCNT", LTCNT},
117 {"TXSM", TXSM},
118 {"RXCFG", RXCFG},
119 {"RXMAX", RXMAX},
120 {"RXMIN", RXMIN},
121 {"FRCNT", FRCNT},
122 {"AECNT", AECNT},
123 {"FECNT", FECNT},
124 {"RXSM", RXSM},
125 {"RXCV", RXCV}
128 #endif
130 static unsigned char *bmac_emergency_rxbuf;
133 * Number of bytes of private data per BMAC: allow enough for
134 * the rx and tx dma commands plus a branch dma command each,
135 * and another 16 bytes to allow us to align the dma command
136 * buffers on a 16 byte boundary.
138 #define PRIV_BYTES (sizeof(struct bmac_data) \
139 + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \
140 + sizeof(struct sk_buff_head))
142 static int bmac_open(struct net_device *dev);
143 static int bmac_close(struct net_device *dev);
144 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
145 static void bmac_set_multicast(struct net_device *dev);
146 static void bmac_reset_and_enable(struct net_device *dev);
147 static void bmac_start_chip(struct net_device *dev);
148 static void bmac_init_chip(struct net_device *dev);
149 static void bmac_init_registers(struct net_device *dev);
150 static void bmac_enable_and_reset_chip(struct net_device *dev);
151 static int bmac_set_address(struct net_device *dev, void *addr);
152 static irqreturn_t bmac_misc_intr(int irq, void *dev_id);
153 static irqreturn_t bmac_txdma_intr(int irq, void *dev_id);
154 static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id);
155 static void bmac_set_timeout(struct net_device *dev);
156 static void bmac_tx_timeout(struct timer_list *t);
157 static int bmac_output(struct sk_buff *skb, struct net_device *dev);
158 static void bmac_start(struct net_device *dev);
160 #define DBDMA_SET(x) ( ((x) | (x) << 16) )
161 #define DBDMA_CLEAR(x) ( (x) << 16)
163 static inline void
164 dbdma_st32(volatile __u32 __iomem *a, unsigned long x)
166 __asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory");
169 static inline unsigned long
170 dbdma_ld32(volatile __u32 __iomem *a)
172 __u32 swap;
173 __asm__ volatile ("lwbrx %0,0,%1" : "=r" (swap) : "r" (a));
174 return swap;
177 static void
178 dbdma_continue(volatile struct dbdma_regs __iomem *dmap)
180 dbdma_st32(&dmap->control,
181 DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD));
182 eieio();
185 static void
186 dbdma_reset(volatile struct dbdma_regs __iomem *dmap)
188 dbdma_st32(&dmap->control,
189 DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN));
190 eieio();
191 while (dbdma_ld32(&dmap->status) & RUN)
192 eieio();
195 static void
196 dbdma_setcmd(volatile struct dbdma_cmd *cp,
197 unsigned short cmd, unsigned count, unsigned long addr,
198 unsigned long cmd_dep)
200 out_le16(&cp->command, cmd);
201 out_le16(&cp->req_count, count);
202 out_le32(&cp->phy_addr, addr);
203 out_le32(&cp->cmd_dep, cmd_dep);
204 out_le16(&cp->xfer_status, 0);
205 out_le16(&cp->res_count, 0);
208 static inline
209 void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data )
211 out_le16((void __iomem *)dev->base_addr + reg_offset, data);
215 static inline
216 unsigned short bmread(struct net_device *dev, unsigned long reg_offset )
218 return in_le16((void __iomem *)dev->base_addr + reg_offset);
221 static void
222 bmac_enable_and_reset_chip(struct net_device *dev)
224 struct bmac_data *bp = netdev_priv(dev);
225 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
226 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
228 if (rd)
229 dbdma_reset(rd);
230 if (td)
231 dbdma_reset(td);
233 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1);
236 #define MIFDELAY udelay(10)
238 static unsigned int
239 bmac_mif_readbits(struct net_device *dev, int nb)
241 unsigned int val = 0;
243 while (--nb >= 0) {
244 bmwrite(dev, MIFCSR, 0);
245 MIFDELAY;
246 if (bmread(dev, MIFCSR) & 8)
247 val |= 1 << nb;
248 bmwrite(dev, MIFCSR, 1);
249 MIFDELAY;
251 bmwrite(dev, MIFCSR, 0);
252 MIFDELAY;
253 bmwrite(dev, MIFCSR, 1);
254 MIFDELAY;
255 return val;
258 static void
259 bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb)
261 int b;
263 while (--nb >= 0) {
264 b = (val & (1 << nb))? 6: 4;
265 bmwrite(dev, MIFCSR, b);
266 MIFDELAY;
267 bmwrite(dev, MIFCSR, b|1);
268 MIFDELAY;
272 static unsigned int
273 bmac_mif_read(struct net_device *dev, unsigned int addr)
275 unsigned int val;
277 bmwrite(dev, MIFCSR, 4);
278 MIFDELAY;
279 bmac_mif_writebits(dev, ~0U, 32);
280 bmac_mif_writebits(dev, 6, 4);
281 bmac_mif_writebits(dev, addr, 10);
282 bmwrite(dev, MIFCSR, 2);
283 MIFDELAY;
284 bmwrite(dev, MIFCSR, 1);
285 MIFDELAY;
286 val = bmac_mif_readbits(dev, 17);
287 bmwrite(dev, MIFCSR, 4);
288 MIFDELAY;
289 return val;
292 static void
293 bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val)
295 bmwrite(dev, MIFCSR, 4);
296 MIFDELAY;
297 bmac_mif_writebits(dev, ~0U, 32);
298 bmac_mif_writebits(dev, 5, 4);
299 bmac_mif_writebits(dev, addr, 10);
300 bmac_mif_writebits(dev, 2, 2);
301 bmac_mif_writebits(dev, val, 16);
302 bmac_mif_writebits(dev, 3, 2);
305 static void
306 bmac_init_registers(struct net_device *dev)
308 struct bmac_data *bp = netdev_priv(dev);
309 volatile unsigned short regValue;
310 unsigned short *pWord16;
311 int i;
313 /* XXDEBUG(("bmac: enter init_registers\n")); */
315 bmwrite(dev, RXRST, RxResetValue);
316 bmwrite(dev, TXRST, TxResetBit);
318 i = 100;
319 do {
320 --i;
321 udelay(10000);
322 regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */
323 } while ((regValue & TxResetBit) && i > 0);
325 if (!bp->is_bmac_plus) {
326 regValue = bmread(dev, XCVRIF);
327 regValue |= ClkBit | SerialMode | COLActiveLow;
328 bmwrite(dev, XCVRIF, regValue);
329 udelay(10000);
332 bmwrite(dev, RSEED, (unsigned short)0x1968);
334 regValue = bmread(dev, XIFC);
335 regValue |= TxOutputEnable;
336 bmwrite(dev, XIFC, regValue);
338 bmread(dev, PAREG);
340 /* set collision counters to 0 */
341 bmwrite(dev, NCCNT, 0);
342 bmwrite(dev, NTCNT, 0);
343 bmwrite(dev, EXCNT, 0);
344 bmwrite(dev, LTCNT, 0);
346 /* set rx counters to 0 */
347 bmwrite(dev, FRCNT, 0);
348 bmwrite(dev, LECNT, 0);
349 bmwrite(dev, AECNT, 0);
350 bmwrite(dev, FECNT, 0);
351 bmwrite(dev, RXCV, 0);
353 /* set tx fifo information */
354 bmwrite(dev, TXTH, 4); /* 4 octets before tx starts */
356 bmwrite(dev, TXFIFOCSR, 0); /* first disable txFIFO */
357 bmwrite(dev, TXFIFOCSR, TxFIFOEnable );
359 /* set rx fifo information */
360 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
361 bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
363 //bmwrite(dev, TXCFG, TxMACEnable); /* TxNeverGiveUp maybe later */
364 bmread(dev, STATUS); /* read it just to clear it */
366 /* zero out the chip Hash Filter registers */
367 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
368 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
369 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
370 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
371 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
373 pWord16 = (unsigned short *)dev->dev_addr;
374 bmwrite(dev, MADD0, *pWord16++);
375 bmwrite(dev, MADD1, *pWord16++);
376 bmwrite(dev, MADD2, *pWord16);
378 bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets);
380 bmwrite(dev, INTDISABLE, EnableNormal);
383 #if 0
384 static void
385 bmac_disable_interrupts(struct net_device *dev)
387 bmwrite(dev, INTDISABLE, DisableAll);
390 static void
391 bmac_enable_interrupts(struct net_device *dev)
393 bmwrite(dev, INTDISABLE, EnableNormal);
395 #endif
398 static void
399 bmac_start_chip(struct net_device *dev)
401 struct bmac_data *bp = netdev_priv(dev);
402 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
403 unsigned short oldConfig;
405 /* enable rx dma channel */
406 dbdma_continue(rd);
408 oldConfig = bmread(dev, TXCFG);
409 bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
411 /* turn on rx plus any other bits already on (promiscuous possibly) */
412 oldConfig = bmread(dev, RXCFG);
413 bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
414 udelay(20000);
417 static void
418 bmac_init_phy(struct net_device *dev)
420 unsigned int addr;
421 struct bmac_data *bp = netdev_priv(dev);
423 printk(KERN_DEBUG "phy registers:");
424 for (addr = 0; addr < 32; ++addr) {
425 if ((addr & 7) == 0)
426 printk(KERN_DEBUG);
427 printk(KERN_CONT " %.4x", bmac_mif_read(dev, addr));
429 printk(KERN_CONT "\n");
431 if (bp->is_bmac_plus) {
432 unsigned int capable, ctrl;
434 ctrl = bmac_mif_read(dev, 0);
435 capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1;
436 if (bmac_mif_read(dev, 4) != capable ||
437 (ctrl & 0x1000) == 0) {
438 bmac_mif_write(dev, 4, capable);
439 bmac_mif_write(dev, 0, 0x1200);
440 } else
441 bmac_mif_write(dev, 0, 0x1000);
445 static void bmac_init_chip(struct net_device *dev)
447 bmac_init_phy(dev);
448 bmac_init_registers(dev);
451 #ifdef CONFIG_PM
452 static int bmac_suspend(struct macio_dev *mdev, pm_message_t state)
454 struct net_device* dev = macio_get_drvdata(mdev);
455 struct bmac_data *bp = netdev_priv(dev);
456 unsigned long flags;
457 unsigned short config;
458 int i;
460 netif_device_detach(dev);
461 /* prolly should wait for dma to finish & turn off the chip */
462 spin_lock_irqsave(&bp->lock, flags);
463 if (bp->timeout_active) {
464 del_timer(&bp->tx_timeout);
465 bp->timeout_active = 0;
467 disable_irq(dev->irq);
468 disable_irq(bp->tx_dma_intr);
469 disable_irq(bp->rx_dma_intr);
470 bp->sleeping = 1;
471 spin_unlock_irqrestore(&bp->lock, flags);
472 if (bp->opened) {
473 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
474 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
476 config = bmread(dev, RXCFG);
477 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
478 config = bmread(dev, TXCFG);
479 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
480 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
481 /* disable rx and tx dma */
482 rd->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
483 td->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
484 /* free some skb's */
485 for (i=0; i<N_RX_RING; i++) {
486 if (bp->rx_bufs[i] != NULL) {
487 dev_kfree_skb(bp->rx_bufs[i]);
488 bp->rx_bufs[i] = NULL;
491 for (i = 0; i<N_TX_RING; i++) {
492 if (bp->tx_bufs[i] != NULL) {
493 dev_kfree_skb(bp->tx_bufs[i]);
494 bp->tx_bufs[i] = NULL;
498 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
499 return 0;
502 static int bmac_resume(struct macio_dev *mdev)
504 struct net_device* dev = macio_get_drvdata(mdev);
505 struct bmac_data *bp = netdev_priv(dev);
507 /* see if this is enough */
508 if (bp->opened)
509 bmac_reset_and_enable(dev);
511 enable_irq(dev->irq);
512 enable_irq(bp->tx_dma_intr);
513 enable_irq(bp->rx_dma_intr);
514 netif_device_attach(dev);
516 return 0;
518 #endif /* CONFIG_PM */
520 static int bmac_set_address(struct net_device *dev, void *addr)
522 struct bmac_data *bp = netdev_priv(dev);
523 unsigned char *p = addr;
524 unsigned short *pWord16;
525 unsigned long flags;
526 int i;
528 XXDEBUG(("bmac: enter set_address\n"));
529 spin_lock_irqsave(&bp->lock, flags);
531 for (i = 0; i < 6; ++i) {
532 dev->dev_addr[i] = p[i];
534 /* load up the hardware address */
535 pWord16 = (unsigned short *)dev->dev_addr;
536 bmwrite(dev, MADD0, *pWord16++);
537 bmwrite(dev, MADD1, *pWord16++);
538 bmwrite(dev, MADD2, *pWord16);
540 spin_unlock_irqrestore(&bp->lock, flags);
541 XXDEBUG(("bmac: exit set_address\n"));
542 return 0;
545 static inline void bmac_set_timeout(struct net_device *dev)
547 struct bmac_data *bp = netdev_priv(dev);
548 unsigned long flags;
550 spin_lock_irqsave(&bp->lock, flags);
551 if (bp->timeout_active)
552 del_timer(&bp->tx_timeout);
553 bp->tx_timeout.expires = jiffies + TX_TIMEOUT;
554 add_timer(&bp->tx_timeout);
555 bp->timeout_active = 1;
556 spin_unlock_irqrestore(&bp->lock, flags);
559 static void
560 bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
562 void *vaddr;
563 unsigned long baddr;
564 unsigned long len;
566 len = skb->len;
567 vaddr = skb->data;
568 baddr = virt_to_bus(vaddr);
570 dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0);
573 static void
574 bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
576 unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf;
578 dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN,
579 virt_to_bus(addr), 0);
582 static void
583 bmac_init_tx_ring(struct bmac_data *bp)
585 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
587 memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd));
589 bp->tx_empty = 0;
590 bp->tx_fill = 0;
591 bp->tx_fullup = 0;
593 /* put a branch at the end of the tx command list */
594 dbdma_setcmd(&bp->tx_cmds[N_TX_RING],
595 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds));
597 /* reset tx dma */
598 dbdma_reset(td);
599 out_le32(&td->wait_sel, 0x00200020);
600 out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds));
603 static int
604 bmac_init_rx_ring(struct net_device *dev)
606 struct bmac_data *bp = netdev_priv(dev);
607 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
608 int i;
609 struct sk_buff *skb;
611 /* initialize list of sk_buffs for receiving and set up recv dma */
612 memset((char *)bp->rx_cmds, 0,
613 (N_RX_RING + 1) * sizeof(struct dbdma_cmd));
614 for (i = 0; i < N_RX_RING; i++) {
615 if ((skb = bp->rx_bufs[i]) == NULL) {
616 bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
617 if (skb != NULL)
618 skb_reserve(skb, 2);
620 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
623 bp->rx_empty = 0;
624 bp->rx_fill = i;
626 /* Put a branch back to the beginning of the receive command list */
627 dbdma_setcmd(&bp->rx_cmds[N_RX_RING],
628 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds));
630 /* start rx dma */
631 dbdma_reset(rd);
632 out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds));
634 return 1;
638 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev)
640 struct bmac_data *bp = netdev_priv(dev);
641 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
642 int i;
644 /* see if there's a free slot in the tx ring */
645 /* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */
646 /* bp->tx_empty, bp->tx_fill)); */
647 i = bp->tx_fill + 1;
648 if (i >= N_TX_RING)
649 i = 0;
650 if (i == bp->tx_empty) {
651 netif_stop_queue(dev);
652 bp->tx_fullup = 1;
653 XXDEBUG(("bmac_transmit_packet: tx ring full\n"));
654 return -1; /* can't take it at the moment */
657 dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0);
659 bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]);
661 bp->tx_bufs[bp->tx_fill] = skb;
662 bp->tx_fill = i;
664 dev->stats.tx_bytes += skb->len;
666 dbdma_continue(td);
668 return 0;
671 static int rxintcount;
673 static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id)
675 struct net_device *dev = (struct net_device *) dev_id;
676 struct bmac_data *bp = netdev_priv(dev);
677 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
678 volatile struct dbdma_cmd *cp;
679 int i, nb, stat;
680 struct sk_buff *skb;
681 unsigned int residual;
682 int last;
683 unsigned long flags;
685 spin_lock_irqsave(&bp->lock, flags);
687 if (++rxintcount < 10) {
688 XXDEBUG(("bmac_rxdma_intr\n"));
691 last = -1;
692 i = bp->rx_empty;
694 while (1) {
695 cp = &bp->rx_cmds[i];
696 stat = le16_to_cpu(cp->xfer_status);
697 residual = le16_to_cpu(cp->res_count);
698 if ((stat & ACTIVE) == 0)
699 break;
700 nb = RX_BUFLEN - residual - 2;
701 if (nb < (ETHERMINPACKET - ETHERCRC)) {
702 skb = NULL;
703 dev->stats.rx_length_errors++;
704 dev->stats.rx_errors++;
705 } else {
706 skb = bp->rx_bufs[i];
707 bp->rx_bufs[i] = NULL;
709 if (skb != NULL) {
710 nb -= ETHERCRC;
711 skb_put(skb, nb);
712 skb->protocol = eth_type_trans(skb, dev);
713 netif_rx(skb);
714 ++dev->stats.rx_packets;
715 dev->stats.rx_bytes += nb;
716 } else {
717 ++dev->stats.rx_dropped;
719 if ((skb = bp->rx_bufs[i]) == NULL) {
720 bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
721 if (skb != NULL)
722 skb_reserve(bp->rx_bufs[i], 2);
724 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
725 cp->res_count = cpu_to_le16(0);
726 cp->xfer_status = cpu_to_le16(0);
727 last = i;
728 if (++i >= N_RX_RING) i = 0;
731 if (last != -1) {
732 bp->rx_fill = last;
733 bp->rx_empty = i;
736 dbdma_continue(rd);
737 spin_unlock_irqrestore(&bp->lock, flags);
739 if (rxintcount < 10) {
740 XXDEBUG(("bmac_rxdma_intr done\n"));
742 return IRQ_HANDLED;
745 static int txintcount;
747 static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
749 struct net_device *dev = (struct net_device *) dev_id;
750 struct bmac_data *bp = netdev_priv(dev);
751 volatile struct dbdma_cmd *cp;
752 int stat;
753 unsigned long flags;
755 spin_lock_irqsave(&bp->lock, flags);
757 if (txintcount++ < 10) {
758 XXDEBUG(("bmac_txdma_intr\n"));
761 /* del_timer(&bp->tx_timeout); */
762 /* bp->timeout_active = 0; */
764 while (1) {
765 cp = &bp->tx_cmds[bp->tx_empty];
766 stat = le16_to_cpu(cp->xfer_status);
767 if (txintcount < 10) {
768 XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat));
770 if (!(stat & ACTIVE)) {
772 * status field might not have been filled by DBDMA
774 if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr)))
775 break;
778 if (bp->tx_bufs[bp->tx_empty]) {
779 ++dev->stats.tx_packets;
780 dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]);
782 bp->tx_bufs[bp->tx_empty] = NULL;
783 bp->tx_fullup = 0;
784 netif_wake_queue(dev);
785 if (++bp->tx_empty >= N_TX_RING)
786 bp->tx_empty = 0;
787 if (bp->tx_empty == bp->tx_fill)
788 break;
791 spin_unlock_irqrestore(&bp->lock, flags);
793 if (txintcount < 10) {
794 XXDEBUG(("bmac_txdma_intr done->bmac_start\n"));
797 bmac_start(dev);
798 return IRQ_HANDLED;
801 #ifndef SUNHME_MULTICAST
802 /* Real fast bit-reversal algorithm, 6-bit values */
803 static int reverse6[64] = {
804 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
805 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
806 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
807 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
808 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
809 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
810 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
811 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
814 static unsigned int
815 crc416(unsigned int curval, unsigned short nxtval)
817 register unsigned int counter, cur = curval, next = nxtval;
818 register int high_crc_set, low_data_set;
820 /* Swap bytes */
821 next = ((next & 0x00FF) << 8) | (next >> 8);
823 /* Compute bit-by-bit */
824 for (counter = 0; counter < 16; ++counter) {
825 /* is high CRC bit set? */
826 if ((cur & 0x80000000) == 0) high_crc_set = 0;
827 else high_crc_set = 1;
829 cur = cur << 1;
831 if ((next & 0x0001) == 0) low_data_set = 0;
832 else low_data_set = 1;
834 next = next >> 1;
836 /* do the XOR */
837 if (high_crc_set ^ low_data_set) cur = cur ^ CRC32_POLY_BE;
839 return cur;
842 static unsigned int
843 bmac_crc(unsigned short *address)
845 unsigned int newcrc;
847 XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
848 newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */
849 newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */
850 newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */
852 return(newcrc);
856 * Add requested mcast addr to BMac's hash table filter.
860 static void
861 bmac_addhash(struct bmac_data *bp, unsigned char *addr)
863 unsigned int crc;
864 unsigned short mask;
866 if (!(*addr)) return;
867 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
868 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
869 if (bp->hash_use_count[crc]++) return; /* This bit is already set */
870 mask = crc % 16;
871 mask = (unsigned char)1 << mask;
872 bp->hash_use_count[crc/16] |= mask;
875 static void
876 bmac_removehash(struct bmac_data *bp, unsigned char *addr)
878 unsigned int crc;
879 unsigned char mask;
881 /* Now, delete the address from the filter copy, as indicated */
882 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
883 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
884 if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */
885 if (--bp->hash_use_count[crc]) return; /* That bit is still in use */
886 mask = crc % 16;
887 mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */
888 bp->hash_table_mask[crc/16] &= mask;
892 * Sync the adapter with the software copy of the multicast mask
893 * (logical address filter).
896 static void
897 bmac_rx_off(struct net_device *dev)
899 unsigned short rx_cfg;
901 rx_cfg = bmread(dev, RXCFG);
902 rx_cfg &= ~RxMACEnable;
903 bmwrite(dev, RXCFG, rx_cfg);
904 do {
905 rx_cfg = bmread(dev, RXCFG);
906 } while (rx_cfg & RxMACEnable);
909 unsigned short
910 bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable)
912 unsigned short rx_cfg;
914 rx_cfg = bmread(dev, RXCFG);
915 rx_cfg |= RxMACEnable;
916 if (hash_enable) rx_cfg |= RxHashFilterEnable;
917 else rx_cfg &= ~RxHashFilterEnable;
918 if (promisc_enable) rx_cfg |= RxPromiscEnable;
919 else rx_cfg &= ~RxPromiscEnable;
920 bmwrite(dev, RXRST, RxResetValue);
921 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
922 bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
923 bmwrite(dev, RXCFG, rx_cfg );
924 return rx_cfg;
927 static void
928 bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp)
930 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
931 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
932 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
933 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
936 #if 0
937 static void
938 bmac_add_multi(struct net_device *dev,
939 struct bmac_data *bp, unsigned char *addr)
941 /* XXDEBUG(("bmac: enter bmac_add_multi\n")); */
942 bmac_addhash(bp, addr);
943 bmac_rx_off(dev);
944 bmac_update_hash_table_mask(dev, bp);
945 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
946 /* XXDEBUG(("bmac: exit bmac_add_multi\n")); */
949 static void
950 bmac_remove_multi(struct net_device *dev,
951 struct bmac_data *bp, unsigned char *addr)
953 bmac_removehash(bp, addr);
954 bmac_rx_off(dev);
955 bmac_update_hash_table_mask(dev, bp);
956 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
958 #endif
960 /* Set or clear the multicast filter for this adaptor.
961 num_addrs == -1 Promiscuous mode, receive all packets
962 num_addrs == 0 Normal mode, clear multicast list
963 num_addrs > 0 Multicast mode, receive normal and MC packets, and do
964 best-effort filtering.
966 static void bmac_set_multicast(struct net_device *dev)
968 struct netdev_hw_addr *ha;
969 struct bmac_data *bp = netdev_priv(dev);
970 int num_addrs = netdev_mc_count(dev);
971 unsigned short rx_cfg;
972 int i;
974 if (bp->sleeping)
975 return;
977 XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
979 if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
980 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff;
981 bmac_update_hash_table_mask(dev, bp);
982 rx_cfg = bmac_rx_on(dev, 1, 0);
983 XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n"));
984 } else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) {
985 rx_cfg = bmread(dev, RXCFG);
986 rx_cfg |= RxPromiscEnable;
987 bmwrite(dev, RXCFG, rx_cfg);
988 rx_cfg = bmac_rx_on(dev, 0, 1);
989 XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg));
990 } else {
991 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
992 for (i=0; i<64; i++) bp->hash_use_count[i] = 0;
993 if (num_addrs == 0) {
994 rx_cfg = bmac_rx_on(dev, 0, 0);
995 XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
996 } else {
997 netdev_for_each_mc_addr(ha, dev)
998 bmac_addhash(bp, ha->addr);
999 bmac_update_hash_table_mask(dev, bp);
1000 rx_cfg = bmac_rx_on(dev, 1, 0);
1001 XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
1004 /* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */
1006 #else /* ifdef SUNHME_MULTICAST */
1008 /* The version of set_multicast below was lifted from sunhme.c */
1010 static void bmac_set_multicast(struct net_device *dev)
1012 struct netdev_hw_addr *ha;
1013 unsigned short rx_cfg;
1014 u32 crc;
1016 if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
1017 bmwrite(dev, BHASH0, 0xffff);
1018 bmwrite(dev, BHASH1, 0xffff);
1019 bmwrite(dev, BHASH2, 0xffff);
1020 bmwrite(dev, BHASH3, 0xffff);
1021 } else if(dev->flags & IFF_PROMISC) {
1022 rx_cfg = bmread(dev, RXCFG);
1023 rx_cfg |= RxPromiscEnable;
1024 bmwrite(dev, RXCFG, rx_cfg);
1025 } else {
1026 u16 hash_table[4] = { 0 };
1028 rx_cfg = bmread(dev, RXCFG);
1029 rx_cfg &= ~RxPromiscEnable;
1030 bmwrite(dev, RXCFG, rx_cfg);
1032 netdev_for_each_mc_addr(ha, dev) {
1033 crc = ether_crc_le(6, ha->addr);
1034 crc >>= 26;
1035 hash_table[crc >> 4] |= 1 << (crc & 0xf);
1037 bmwrite(dev, BHASH0, hash_table[0]);
1038 bmwrite(dev, BHASH1, hash_table[1]);
1039 bmwrite(dev, BHASH2, hash_table[2]);
1040 bmwrite(dev, BHASH3, hash_table[3]);
1043 #endif /* SUNHME_MULTICAST */
1045 static int miscintcount;
1047 static irqreturn_t bmac_misc_intr(int irq, void *dev_id)
1049 struct net_device *dev = (struct net_device *) dev_id;
1050 unsigned int status = bmread(dev, STATUS);
1051 if (miscintcount++ < 10) {
1052 XXDEBUG(("bmac_misc_intr\n"));
1054 /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */
1055 /* bmac_txdma_intr_inner(irq, dev_id); */
1056 /* if (status & FrameReceived) dev->stats.rx_dropped++; */
1057 if (status & RxErrorMask) dev->stats.rx_errors++;
1058 if (status & RxCRCCntExp) dev->stats.rx_crc_errors++;
1059 if (status & RxLenCntExp) dev->stats.rx_length_errors++;
1060 if (status & RxOverFlow) dev->stats.rx_over_errors++;
1061 if (status & RxAlignCntExp) dev->stats.rx_frame_errors++;
1063 /* if (status & FrameSent) dev->stats.tx_dropped++; */
1064 if (status & TxErrorMask) dev->stats.tx_errors++;
1065 if (status & TxUnderrun) dev->stats.tx_fifo_errors++;
1066 if (status & TxNormalCollExp) dev->stats.collisions++;
1067 return IRQ_HANDLED;
1071 * Procedure for reading EEPROM
1073 #define SROMAddressLength 5
1074 #define DataInOn 0x0008
1075 #define DataInOff 0x0000
1076 #define Clk 0x0002
1077 #define ChipSelect 0x0001
1078 #define SDIShiftCount 3
1079 #define SD0ShiftCount 2
1080 #define DelayValue 1000 /* number of microseconds */
1081 #define SROMStartOffset 10 /* this is in words */
1082 #define SROMReadCount 3 /* number of words to read from SROM */
1083 #define SROMAddressBits 6
1084 #define EnetAddressOffset 20
1086 static unsigned char
1087 bmac_clock_out_bit(struct net_device *dev)
1089 unsigned short data;
1090 unsigned short val;
1092 bmwrite(dev, SROMCSR, ChipSelect | Clk);
1093 udelay(DelayValue);
1095 data = bmread(dev, SROMCSR);
1096 udelay(DelayValue);
1097 val = (data >> SD0ShiftCount) & 1;
1099 bmwrite(dev, SROMCSR, ChipSelect);
1100 udelay(DelayValue);
1102 return val;
1105 static void
1106 bmac_clock_in_bit(struct net_device *dev, unsigned int val)
1108 unsigned short data;
1110 if (val != 0 && val != 1) return;
1112 data = (val << SDIShiftCount);
1113 bmwrite(dev, SROMCSR, data | ChipSelect );
1114 udelay(DelayValue);
1116 bmwrite(dev, SROMCSR, data | ChipSelect | Clk );
1117 udelay(DelayValue);
1119 bmwrite(dev, SROMCSR, data | ChipSelect);
1120 udelay(DelayValue);
1123 static void
1124 reset_and_select_srom(struct net_device *dev)
1126 /* first reset */
1127 bmwrite(dev, SROMCSR, 0);
1128 udelay(DelayValue);
1130 /* send it the read command (110) */
1131 bmac_clock_in_bit(dev, 1);
1132 bmac_clock_in_bit(dev, 1);
1133 bmac_clock_in_bit(dev, 0);
1136 static unsigned short
1137 read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len)
1139 unsigned short data, val;
1140 int i;
1142 /* send out the address we want to read from */
1143 for (i = 0; i < addr_len; i++) {
1144 val = addr >> (addr_len-i-1);
1145 bmac_clock_in_bit(dev, val & 1);
1148 /* Now read in the 16-bit data */
1149 data = 0;
1150 for (i = 0; i < 16; i++) {
1151 val = bmac_clock_out_bit(dev);
1152 data <<= 1;
1153 data |= val;
1155 bmwrite(dev, SROMCSR, 0);
1157 return data;
1161 * It looks like Cogent and SMC use different methods for calculating
1162 * checksums. What a pain..
1165 static int
1166 bmac_verify_checksum(struct net_device *dev)
1168 unsigned short data, storedCS;
1170 reset_and_select_srom(dev);
1171 data = read_srom(dev, 3, SROMAddressBits);
1172 storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00);
1174 return 0;
1178 static void
1179 bmac_get_station_address(struct net_device *dev, unsigned char *ea)
1181 int i;
1182 unsigned short data;
1184 for (i = 0; i < 6; i++)
1186 reset_and_select_srom(dev);
1187 data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
1188 ea[2*i] = bitrev8(data & 0x0ff);
1189 ea[2*i+1] = bitrev8((data >> 8) & 0x0ff);
1193 static void bmac_reset_and_enable(struct net_device *dev)
1195 struct bmac_data *bp = netdev_priv(dev);
1196 unsigned long flags;
1197 struct sk_buff *skb;
1198 unsigned char *data;
1200 spin_lock_irqsave(&bp->lock, flags);
1201 bmac_enable_and_reset_chip(dev);
1202 bmac_init_tx_ring(bp);
1203 bmac_init_rx_ring(dev);
1204 bmac_init_chip(dev);
1205 bmac_start_chip(dev);
1206 bmwrite(dev, INTDISABLE, EnableNormal);
1207 bp->sleeping = 0;
1210 * It seems that the bmac can't receive until it's transmitted
1211 * a packet. So we give it a dummy packet to transmit.
1213 skb = netdev_alloc_skb(dev, ETHERMINPACKET);
1214 if (skb != NULL) {
1215 data = skb_put_zero(skb, ETHERMINPACKET);
1216 memcpy(data, dev->dev_addr, ETH_ALEN);
1217 memcpy(data + ETH_ALEN, dev->dev_addr, ETH_ALEN);
1218 bmac_transmit_packet(skb, dev);
1220 spin_unlock_irqrestore(&bp->lock, flags);
1223 static const struct ethtool_ops bmac_ethtool_ops = {
1224 .get_link = ethtool_op_get_link,
1227 static const struct net_device_ops bmac_netdev_ops = {
1228 .ndo_open = bmac_open,
1229 .ndo_stop = bmac_close,
1230 .ndo_start_xmit = bmac_output,
1231 .ndo_set_rx_mode = bmac_set_multicast,
1232 .ndo_set_mac_address = bmac_set_address,
1233 .ndo_validate_addr = eth_validate_addr,
1236 static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
1238 int j, rev, ret;
1239 struct bmac_data *bp;
1240 const unsigned char *prop_addr;
1241 unsigned char addr[6];
1242 struct net_device *dev;
1243 int is_bmac_plus = ((int)match->data) != 0;
1245 if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
1246 printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n");
1247 return -ENODEV;
1249 prop_addr = of_get_property(macio_get_of_node(mdev),
1250 "mac-address", NULL);
1251 if (prop_addr == NULL) {
1252 prop_addr = of_get_property(macio_get_of_node(mdev),
1253 "local-mac-address", NULL);
1254 if (prop_addr == NULL) {
1255 printk(KERN_ERR "BMAC: Can't get mac-address\n");
1256 return -ENODEV;
1259 memcpy(addr, prop_addr, sizeof(addr));
1261 dev = alloc_etherdev(PRIV_BYTES);
1262 if (!dev)
1263 return -ENOMEM;
1265 bp = netdev_priv(dev);
1266 SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
1267 macio_set_drvdata(mdev, dev);
1269 bp->mdev = mdev;
1270 spin_lock_init(&bp->lock);
1272 if (macio_request_resources(mdev, "bmac")) {
1273 printk(KERN_ERR "BMAC: can't request IO resource !\n");
1274 goto out_free;
1277 dev->base_addr = (unsigned long)
1278 ioremap(macio_resource_start(mdev, 0), macio_resource_len(mdev, 0));
1279 if (dev->base_addr == 0)
1280 goto out_release;
1282 dev->irq = macio_irq(mdev, 0);
1284 bmac_enable_and_reset_chip(dev);
1285 bmwrite(dev, INTDISABLE, DisableAll);
1287 rev = addr[0] == 0 && addr[1] == 0xA0;
1288 for (j = 0; j < 6; ++j)
1289 dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
1291 /* Enable chip without interrupts for now */
1292 bmac_enable_and_reset_chip(dev);
1293 bmwrite(dev, INTDISABLE, DisableAll);
1295 dev->netdev_ops = &bmac_netdev_ops;
1296 dev->ethtool_ops = &bmac_ethtool_ops;
1298 bmac_get_station_address(dev, addr);
1299 if (bmac_verify_checksum(dev) != 0)
1300 goto err_out_iounmap;
1302 bp->is_bmac_plus = is_bmac_plus;
1303 bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1));
1304 if (!bp->tx_dma)
1305 goto err_out_iounmap;
1306 bp->tx_dma_intr = macio_irq(mdev, 1);
1307 bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2));
1308 if (!bp->rx_dma)
1309 goto err_out_iounmap_tx;
1310 bp->rx_dma_intr = macio_irq(mdev, 2);
1312 bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1);
1313 bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1;
1315 bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1);
1316 skb_queue_head_init(bp->queue);
1318 timer_setup(&bp->tx_timeout, bmac_tx_timeout, 0);
1320 ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
1321 if (ret) {
1322 printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
1323 goto err_out_iounmap_rx;
1325 ret = request_irq(bp->tx_dma_intr, bmac_txdma_intr, 0, "BMAC-txdma", dev);
1326 if (ret) {
1327 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->tx_dma_intr);
1328 goto err_out_irq0;
1330 ret = request_irq(bp->rx_dma_intr, bmac_rxdma_intr, 0, "BMAC-rxdma", dev);
1331 if (ret) {
1332 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->rx_dma_intr);
1333 goto err_out_irq1;
1336 /* Mask chip interrupts and disable chip, will be
1337 * re-enabled on open()
1339 disable_irq(dev->irq);
1340 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1342 if (register_netdev(dev) != 0) {
1343 printk(KERN_ERR "BMAC: Ethernet registration failed\n");
1344 goto err_out_irq2;
1347 printk(KERN_INFO "%s: BMAC%s at %pM",
1348 dev->name, (is_bmac_plus ? "+" : ""), dev->dev_addr);
1349 XXDEBUG((", base_addr=%#0lx", dev->base_addr));
1350 printk("\n");
1352 return 0;
1354 err_out_irq2:
1355 free_irq(bp->rx_dma_intr, dev);
1356 err_out_irq1:
1357 free_irq(bp->tx_dma_intr, dev);
1358 err_out_irq0:
1359 free_irq(dev->irq, dev);
1360 err_out_iounmap_rx:
1361 iounmap(bp->rx_dma);
1362 err_out_iounmap_tx:
1363 iounmap(bp->tx_dma);
1364 err_out_iounmap:
1365 iounmap((void __iomem *)dev->base_addr);
1366 out_release:
1367 macio_release_resources(mdev);
1368 out_free:
1369 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1370 free_netdev(dev);
1372 return -ENODEV;
1375 static int bmac_open(struct net_device *dev)
1377 struct bmac_data *bp = netdev_priv(dev);
1378 /* XXDEBUG(("bmac: enter open\n")); */
1379 /* reset the chip */
1380 bp->opened = 1;
1381 bmac_reset_and_enable(dev);
1382 enable_irq(dev->irq);
1383 return 0;
1386 static int bmac_close(struct net_device *dev)
1388 struct bmac_data *bp = netdev_priv(dev);
1389 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
1390 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
1391 unsigned short config;
1392 int i;
1394 bp->sleeping = 1;
1396 /* disable rx and tx */
1397 config = bmread(dev, RXCFG);
1398 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1400 config = bmread(dev, TXCFG);
1401 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1403 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
1405 /* disable rx and tx dma */
1406 rd->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
1407 td->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
1409 /* free some skb's */
1410 XXDEBUG(("bmac: free rx bufs\n"));
1411 for (i=0; i<N_RX_RING; i++) {
1412 if (bp->rx_bufs[i] != NULL) {
1413 dev_kfree_skb(bp->rx_bufs[i]);
1414 bp->rx_bufs[i] = NULL;
1417 XXDEBUG(("bmac: free tx bufs\n"));
1418 for (i = 0; i<N_TX_RING; i++) {
1419 if (bp->tx_bufs[i] != NULL) {
1420 dev_kfree_skb(bp->tx_bufs[i]);
1421 bp->tx_bufs[i] = NULL;
1424 XXDEBUG(("bmac: all bufs freed\n"));
1426 bp->opened = 0;
1427 disable_irq(dev->irq);
1428 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1430 return 0;
1433 static void
1434 bmac_start(struct net_device *dev)
1436 struct bmac_data *bp = netdev_priv(dev);
1437 int i;
1438 struct sk_buff *skb;
1439 unsigned long flags;
1441 if (bp->sleeping)
1442 return;
1444 spin_lock_irqsave(&bp->lock, flags);
1445 while (1) {
1446 i = bp->tx_fill + 1;
1447 if (i >= N_TX_RING)
1448 i = 0;
1449 if (i == bp->tx_empty)
1450 break;
1451 skb = skb_dequeue(bp->queue);
1452 if (skb == NULL)
1453 break;
1454 bmac_transmit_packet(skb, dev);
1456 spin_unlock_irqrestore(&bp->lock, flags);
1459 static int
1460 bmac_output(struct sk_buff *skb, struct net_device *dev)
1462 struct bmac_data *bp = netdev_priv(dev);
1463 skb_queue_tail(bp->queue, skb);
1464 bmac_start(dev);
1465 return NETDEV_TX_OK;
1468 static void bmac_tx_timeout(struct timer_list *t)
1470 struct bmac_data *bp = from_timer(bp, t, tx_timeout);
1471 struct net_device *dev = macio_get_drvdata(bp->mdev);
1472 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
1473 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
1474 volatile struct dbdma_cmd *cp;
1475 unsigned long flags;
1476 unsigned short config, oldConfig;
1477 int i;
1479 XXDEBUG(("bmac: tx_timeout called\n"));
1480 spin_lock_irqsave(&bp->lock, flags);
1481 bp->timeout_active = 0;
1483 /* update various counters */
1484 /* bmac_handle_misc_intrs(bp, 0); */
1486 cp = &bp->tx_cmds[bp->tx_empty];
1487 /* XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */
1488 /* le32_to_cpu(td->status), le16_to_cpu(cp->xfer_status), bp->tx_bad_runt, */
1489 /* mb->pr, mb->xmtfs, mb->fifofc)); */
1491 /* turn off both tx and rx and reset the chip */
1492 config = bmread(dev, RXCFG);
1493 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1494 config = bmread(dev, TXCFG);
1495 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1496 out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1497 printk(KERN_ERR "bmac: transmit timeout - resetting\n");
1498 bmac_enable_and_reset_chip(dev);
1500 /* restart rx dma */
1501 cp = bus_to_virt(le32_to_cpu(rd->cmdptr));
1502 out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1503 out_le16(&cp->xfer_status, 0);
1504 out_le32(&rd->cmdptr, virt_to_bus(cp));
1505 out_le32(&rd->control, DBDMA_SET(RUN|WAKE));
1507 /* fix up the transmit side */
1508 XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n",
1509 bp->tx_empty, bp->tx_fill, bp->tx_fullup));
1510 i = bp->tx_empty;
1511 ++dev->stats.tx_errors;
1512 if (i != bp->tx_fill) {
1513 dev_kfree_skb(bp->tx_bufs[i]);
1514 bp->tx_bufs[i] = NULL;
1515 if (++i >= N_TX_RING) i = 0;
1516 bp->tx_empty = i;
1518 bp->tx_fullup = 0;
1519 netif_wake_queue(dev);
1520 if (i != bp->tx_fill) {
1521 cp = &bp->tx_cmds[i];
1522 out_le16(&cp->xfer_status, 0);
1523 out_le16(&cp->command, OUTPUT_LAST);
1524 out_le32(&td->cmdptr, virt_to_bus(cp));
1525 out_le32(&td->control, DBDMA_SET(RUN));
1526 /* bmac_set_timeout(dev); */
1527 XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i));
1530 /* turn it back on */
1531 oldConfig = bmread(dev, RXCFG);
1532 bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
1533 oldConfig = bmread(dev, TXCFG);
1534 bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
1536 spin_unlock_irqrestore(&bp->lock, flags);
1539 #if 0
1540 static void dump_dbdma(volatile struct dbdma_cmd *cp,int count)
1542 int i,*ip;
1544 for (i=0;i< count;i++) {
1545 ip = (int*)(cp+i);
1547 printk("dbdma req 0x%x addr 0x%x baddr 0x%x xfer/res 0x%x\n",
1548 le32_to_cpup(ip+0),
1549 le32_to_cpup(ip+1),
1550 le32_to_cpup(ip+2),
1551 le32_to_cpup(ip+3));
1555 #endif
1557 #if 0
1558 static int
1559 bmac_proc_info(char *buffer, char **start, off_t offset, int length)
1561 int len = 0;
1562 off_t pos = 0;
1563 off_t begin = 0;
1564 int i;
1566 if (bmac_devs == NULL)
1567 return -ENOSYS;
1569 len += sprintf(buffer, "BMAC counters & registers\n");
1571 for (i = 0; i<N_REG_ENTRIES; i++) {
1572 len += sprintf(buffer + len, "%s: %#08x\n",
1573 reg_entries[i].name,
1574 bmread(bmac_devs, reg_entries[i].reg_offset));
1575 pos = begin + len;
1577 if (pos < offset) {
1578 len = 0;
1579 begin = pos;
1582 if (pos > offset+length) break;
1585 *start = buffer + (offset - begin);
1586 len -= (offset - begin);
1588 if (len > length) len = length;
1590 return len;
1592 #endif
1594 static int bmac_remove(struct macio_dev *mdev)
1596 struct net_device *dev = macio_get_drvdata(mdev);
1597 struct bmac_data *bp = netdev_priv(dev);
1599 unregister_netdev(dev);
1601 free_irq(dev->irq, dev);
1602 free_irq(bp->tx_dma_intr, dev);
1603 free_irq(bp->rx_dma_intr, dev);
1605 iounmap((void __iomem *)dev->base_addr);
1606 iounmap(bp->tx_dma);
1607 iounmap(bp->rx_dma);
1609 macio_release_resources(mdev);
1611 free_netdev(dev);
1613 return 0;
1616 static const struct of_device_id bmac_match[] =
1619 .name = "bmac",
1620 .data = (void *)0,
1623 .type = "network",
1624 .compatible = "bmac+",
1625 .data = (void *)1,
1629 MODULE_DEVICE_TABLE (of, bmac_match);
1631 static struct macio_driver bmac_driver =
1633 .driver = {
1634 .name = "bmac",
1635 .owner = THIS_MODULE,
1636 .of_match_table = bmac_match,
1638 .probe = bmac_probe,
1639 .remove = bmac_remove,
1640 #ifdef CONFIG_PM
1641 .suspend = bmac_suspend,
1642 .resume = bmac_resume,
1643 #endif
1647 static int __init bmac_init(void)
1649 if (bmac_emergency_rxbuf == NULL) {
1650 bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL);
1651 if (bmac_emergency_rxbuf == NULL)
1652 return -ENOMEM;
1655 return macio_register_driver(&bmac_driver);
1658 static void __exit bmac_exit(void)
1660 macio_unregister_driver(&bmac_driver);
1662 kfree(bmac_emergency_rxbuf);
1663 bmac_emergency_rxbuf = NULL;
1666 MODULE_AUTHOR("Randy Gobbel/Paul Mackerras");
1667 MODULE_DESCRIPTION("PowerMac BMAC ethernet driver.");
1668 MODULE_LICENSE("GPL");
1670 module_init(bmac_init);
1671 module_exit(bmac_exit);