staging: rtl8188eu: Replace function name in string with __func__
[linux/fpc-iii.git] / drivers / net / ethernet / apple / bmac.c
blob5a655d289dd589d281843031ad0e0a188f4d54b9
1 /*
2 * Network device driver for the BMAC ethernet controller on
3 * Apple Powermacs. Assumes it's under a DBDMA controller.
5 * Copyright (C) 1998 Randy Gobbel.
7 * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to
8 * dynamic procfs inode.
9 */
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/delay.h>
16 #include <linux/string.h>
17 #include <linux/timer.h>
18 #include <linux/proc_fs.h>
19 #include <linux/init.h>
20 #include <linux/spinlock.h>
21 #include <linux/crc32.h>
22 #include <linux/bitrev.h>
23 #include <linux/ethtool.h>
24 #include <linux/slab.h>
25 #include <asm/prom.h>
26 #include <asm/dbdma.h>
27 #include <asm/io.h>
28 #include <asm/page.h>
29 #include <asm/pgtable.h>
30 #include <asm/machdep.h>
31 #include <asm/pmac_feature.h>
32 #include <asm/macio.h>
33 #include <asm/irq.h>
35 #include "bmac.h"
37 #define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
38 #define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
41 * CRC polynomial - used in working out multicast filter bits.
43 #define ENET_CRCPOLY 0x04c11db7
45 /* switch to use multicast code lifted from sunhme driver */
46 #define SUNHME_MULTICAST
48 #define N_RX_RING 64
49 #define N_TX_RING 32
50 #define MAX_TX_ACTIVE 1
51 #define ETHERCRC 4
52 #define ETHERMINPACKET 64
53 #define ETHERMTU 1500
54 #define RX_BUFLEN (ETHERMTU + 14 + ETHERCRC + 2)
55 #define TX_TIMEOUT HZ /* 1 second */
57 /* Bits in transmit DMA status */
58 #define TX_DMA_ERR 0x80
60 #define XXDEBUG(args)
62 struct bmac_data {
63 /* volatile struct bmac *bmac; */
64 struct sk_buff_head *queue;
65 volatile struct dbdma_regs __iomem *tx_dma;
66 int tx_dma_intr;
67 volatile struct dbdma_regs __iomem *rx_dma;
68 int rx_dma_intr;
69 volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */
70 volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */
71 struct macio_dev *mdev;
72 int is_bmac_plus;
73 struct sk_buff *rx_bufs[N_RX_RING];
74 int rx_fill;
75 int rx_empty;
76 struct sk_buff *tx_bufs[N_TX_RING];
77 int tx_fill;
78 int tx_empty;
79 unsigned char tx_fullup;
80 struct timer_list tx_timeout;
81 int timeout_active;
82 int sleeping;
83 int opened;
84 unsigned short hash_use_count[64];
85 unsigned short hash_table_mask[4];
86 spinlock_t lock;
89 #if 0 /* Move that to ethtool */
91 typedef struct bmac_reg_entry {
92 char *name;
93 unsigned short reg_offset;
94 } bmac_reg_entry_t;
96 #define N_REG_ENTRIES 31
98 static bmac_reg_entry_t reg_entries[N_REG_ENTRIES] = {
99 {"MEMADD", MEMADD},
100 {"MEMDATAHI", MEMDATAHI},
101 {"MEMDATALO", MEMDATALO},
102 {"TXPNTR", TXPNTR},
103 {"RXPNTR", RXPNTR},
104 {"IPG1", IPG1},
105 {"IPG2", IPG2},
106 {"ALIMIT", ALIMIT},
107 {"SLOT", SLOT},
108 {"PALEN", PALEN},
109 {"PAPAT", PAPAT},
110 {"TXSFD", TXSFD},
111 {"JAM", JAM},
112 {"TXCFG", TXCFG},
113 {"TXMAX", TXMAX},
114 {"TXMIN", TXMIN},
115 {"PAREG", PAREG},
116 {"DCNT", DCNT},
117 {"NCCNT", NCCNT},
118 {"NTCNT", NTCNT},
119 {"EXCNT", EXCNT},
120 {"LTCNT", LTCNT},
121 {"TXSM", TXSM},
122 {"RXCFG", RXCFG},
123 {"RXMAX", RXMAX},
124 {"RXMIN", RXMIN},
125 {"FRCNT", FRCNT},
126 {"AECNT", AECNT},
127 {"FECNT", FECNT},
128 {"RXSM", RXSM},
129 {"RXCV", RXCV}
132 #endif
134 static unsigned char *bmac_emergency_rxbuf;
137 * Number of bytes of private data per BMAC: allow enough for
138 * the rx and tx dma commands plus a branch dma command each,
139 * and another 16 bytes to allow us to align the dma command
140 * buffers on a 16 byte boundary.
142 #define PRIV_BYTES (sizeof(struct bmac_data) \
143 + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \
144 + sizeof(struct sk_buff_head))
146 static int bmac_open(struct net_device *dev);
147 static int bmac_close(struct net_device *dev);
148 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
149 static void bmac_set_multicast(struct net_device *dev);
150 static void bmac_reset_and_enable(struct net_device *dev);
151 static void bmac_start_chip(struct net_device *dev);
152 static void bmac_init_chip(struct net_device *dev);
153 static void bmac_init_registers(struct net_device *dev);
154 static void bmac_enable_and_reset_chip(struct net_device *dev);
155 static int bmac_set_address(struct net_device *dev, void *addr);
156 static irqreturn_t bmac_misc_intr(int irq, void *dev_id);
157 static irqreturn_t bmac_txdma_intr(int irq, void *dev_id);
158 static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id);
159 static void bmac_set_timeout(struct net_device *dev);
160 static void bmac_tx_timeout(struct timer_list *t);
161 static int bmac_output(struct sk_buff *skb, struct net_device *dev);
162 static void bmac_start(struct net_device *dev);
164 #define DBDMA_SET(x) ( ((x) | (x) << 16) )
165 #define DBDMA_CLEAR(x) ( (x) << 16)
167 static inline void
168 dbdma_st32(volatile __u32 __iomem *a, unsigned long x)
170 __asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory");
173 static inline unsigned long
174 dbdma_ld32(volatile __u32 __iomem *a)
176 __u32 swap;
177 __asm__ volatile ("lwbrx %0,0,%1" : "=r" (swap) : "r" (a));
178 return swap;
181 static void
182 dbdma_continue(volatile struct dbdma_regs __iomem *dmap)
184 dbdma_st32(&dmap->control,
185 DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD));
186 eieio();
189 static void
190 dbdma_reset(volatile struct dbdma_regs __iomem *dmap)
192 dbdma_st32(&dmap->control,
193 DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN));
194 eieio();
195 while (dbdma_ld32(&dmap->status) & RUN)
196 eieio();
199 static void
200 dbdma_setcmd(volatile struct dbdma_cmd *cp,
201 unsigned short cmd, unsigned count, unsigned long addr,
202 unsigned long cmd_dep)
204 out_le16(&cp->command, cmd);
205 out_le16(&cp->req_count, count);
206 out_le32(&cp->phy_addr, addr);
207 out_le32(&cp->cmd_dep, cmd_dep);
208 out_le16(&cp->xfer_status, 0);
209 out_le16(&cp->res_count, 0);
212 static inline
213 void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data )
215 out_le16((void __iomem *)dev->base_addr + reg_offset, data);
219 static inline
220 unsigned short bmread(struct net_device *dev, unsigned long reg_offset )
222 return in_le16((void __iomem *)dev->base_addr + reg_offset);
225 static void
226 bmac_enable_and_reset_chip(struct net_device *dev)
228 struct bmac_data *bp = netdev_priv(dev);
229 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
230 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
232 if (rd)
233 dbdma_reset(rd);
234 if (td)
235 dbdma_reset(td);
237 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1);
240 #define MIFDELAY udelay(10)
242 static unsigned int
243 bmac_mif_readbits(struct net_device *dev, int nb)
245 unsigned int val = 0;
247 while (--nb >= 0) {
248 bmwrite(dev, MIFCSR, 0);
249 MIFDELAY;
250 if (bmread(dev, MIFCSR) & 8)
251 val |= 1 << nb;
252 bmwrite(dev, MIFCSR, 1);
253 MIFDELAY;
255 bmwrite(dev, MIFCSR, 0);
256 MIFDELAY;
257 bmwrite(dev, MIFCSR, 1);
258 MIFDELAY;
259 return val;
262 static void
263 bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb)
265 int b;
267 while (--nb >= 0) {
268 b = (val & (1 << nb))? 6: 4;
269 bmwrite(dev, MIFCSR, b);
270 MIFDELAY;
271 bmwrite(dev, MIFCSR, b|1);
272 MIFDELAY;
276 static unsigned int
277 bmac_mif_read(struct net_device *dev, unsigned int addr)
279 unsigned int val;
281 bmwrite(dev, MIFCSR, 4);
282 MIFDELAY;
283 bmac_mif_writebits(dev, ~0U, 32);
284 bmac_mif_writebits(dev, 6, 4);
285 bmac_mif_writebits(dev, addr, 10);
286 bmwrite(dev, MIFCSR, 2);
287 MIFDELAY;
288 bmwrite(dev, MIFCSR, 1);
289 MIFDELAY;
290 val = bmac_mif_readbits(dev, 17);
291 bmwrite(dev, MIFCSR, 4);
292 MIFDELAY;
293 return val;
296 static void
297 bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val)
299 bmwrite(dev, MIFCSR, 4);
300 MIFDELAY;
301 bmac_mif_writebits(dev, ~0U, 32);
302 bmac_mif_writebits(dev, 5, 4);
303 bmac_mif_writebits(dev, addr, 10);
304 bmac_mif_writebits(dev, 2, 2);
305 bmac_mif_writebits(dev, val, 16);
306 bmac_mif_writebits(dev, 3, 2);
309 static void
310 bmac_init_registers(struct net_device *dev)
312 struct bmac_data *bp = netdev_priv(dev);
313 volatile unsigned short regValue;
314 unsigned short *pWord16;
315 int i;
317 /* XXDEBUG(("bmac: enter init_registers\n")); */
319 bmwrite(dev, RXRST, RxResetValue);
320 bmwrite(dev, TXRST, TxResetBit);
322 i = 100;
323 do {
324 --i;
325 udelay(10000);
326 regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */
327 } while ((regValue & TxResetBit) && i > 0);
329 if (!bp->is_bmac_plus) {
330 regValue = bmread(dev, XCVRIF);
331 regValue |= ClkBit | SerialMode | COLActiveLow;
332 bmwrite(dev, XCVRIF, regValue);
333 udelay(10000);
336 bmwrite(dev, RSEED, (unsigned short)0x1968);
338 regValue = bmread(dev, XIFC);
339 regValue |= TxOutputEnable;
340 bmwrite(dev, XIFC, regValue);
342 bmread(dev, PAREG);
344 /* set collision counters to 0 */
345 bmwrite(dev, NCCNT, 0);
346 bmwrite(dev, NTCNT, 0);
347 bmwrite(dev, EXCNT, 0);
348 bmwrite(dev, LTCNT, 0);
350 /* set rx counters to 0 */
351 bmwrite(dev, FRCNT, 0);
352 bmwrite(dev, LECNT, 0);
353 bmwrite(dev, AECNT, 0);
354 bmwrite(dev, FECNT, 0);
355 bmwrite(dev, RXCV, 0);
357 /* set tx fifo information */
358 bmwrite(dev, TXTH, 4); /* 4 octets before tx starts */
360 bmwrite(dev, TXFIFOCSR, 0); /* first disable txFIFO */
361 bmwrite(dev, TXFIFOCSR, TxFIFOEnable );
363 /* set rx fifo information */
364 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
365 bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
367 //bmwrite(dev, TXCFG, TxMACEnable); /* TxNeverGiveUp maybe later */
368 bmread(dev, STATUS); /* read it just to clear it */
370 /* zero out the chip Hash Filter registers */
371 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
372 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
373 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
374 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
375 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
377 pWord16 = (unsigned short *)dev->dev_addr;
378 bmwrite(dev, MADD0, *pWord16++);
379 bmwrite(dev, MADD1, *pWord16++);
380 bmwrite(dev, MADD2, *pWord16);
382 bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets);
384 bmwrite(dev, INTDISABLE, EnableNormal);
387 #if 0
388 static void
389 bmac_disable_interrupts(struct net_device *dev)
391 bmwrite(dev, INTDISABLE, DisableAll);
394 static void
395 bmac_enable_interrupts(struct net_device *dev)
397 bmwrite(dev, INTDISABLE, EnableNormal);
399 #endif
402 static void
403 bmac_start_chip(struct net_device *dev)
405 struct bmac_data *bp = netdev_priv(dev);
406 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
407 unsigned short oldConfig;
409 /* enable rx dma channel */
410 dbdma_continue(rd);
412 oldConfig = bmread(dev, TXCFG);
413 bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
415 /* turn on rx plus any other bits already on (promiscuous possibly) */
416 oldConfig = bmread(dev, RXCFG);
417 bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
418 udelay(20000);
421 static void
422 bmac_init_phy(struct net_device *dev)
424 unsigned int addr;
425 struct bmac_data *bp = netdev_priv(dev);
427 printk(KERN_DEBUG "phy registers:");
428 for (addr = 0; addr < 32; ++addr) {
429 if ((addr & 7) == 0)
430 printk(KERN_DEBUG);
431 printk(KERN_CONT " %.4x", bmac_mif_read(dev, addr));
433 printk(KERN_CONT "\n");
435 if (bp->is_bmac_plus) {
436 unsigned int capable, ctrl;
438 ctrl = bmac_mif_read(dev, 0);
439 capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1;
440 if (bmac_mif_read(dev, 4) != capable ||
441 (ctrl & 0x1000) == 0) {
442 bmac_mif_write(dev, 4, capable);
443 bmac_mif_write(dev, 0, 0x1200);
444 } else
445 bmac_mif_write(dev, 0, 0x1000);
449 static void bmac_init_chip(struct net_device *dev)
451 bmac_init_phy(dev);
452 bmac_init_registers(dev);
455 #ifdef CONFIG_PM
456 static int bmac_suspend(struct macio_dev *mdev, pm_message_t state)
458 struct net_device* dev = macio_get_drvdata(mdev);
459 struct bmac_data *bp = netdev_priv(dev);
460 unsigned long flags;
461 unsigned short config;
462 int i;
464 netif_device_detach(dev);
465 /* prolly should wait for dma to finish & turn off the chip */
466 spin_lock_irqsave(&bp->lock, flags);
467 if (bp->timeout_active) {
468 del_timer(&bp->tx_timeout);
469 bp->timeout_active = 0;
471 disable_irq(dev->irq);
472 disable_irq(bp->tx_dma_intr);
473 disable_irq(bp->rx_dma_intr);
474 bp->sleeping = 1;
475 spin_unlock_irqrestore(&bp->lock, flags);
476 if (bp->opened) {
477 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
478 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
480 config = bmread(dev, RXCFG);
481 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
482 config = bmread(dev, TXCFG);
483 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
484 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
485 /* disable rx and tx dma */
486 rd->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
487 td->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
488 /* free some skb's */
489 for (i=0; i<N_RX_RING; i++) {
490 if (bp->rx_bufs[i] != NULL) {
491 dev_kfree_skb(bp->rx_bufs[i]);
492 bp->rx_bufs[i] = NULL;
495 for (i = 0; i<N_TX_RING; i++) {
496 if (bp->tx_bufs[i] != NULL) {
497 dev_kfree_skb(bp->tx_bufs[i]);
498 bp->tx_bufs[i] = NULL;
502 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
503 return 0;
506 static int bmac_resume(struct macio_dev *mdev)
508 struct net_device* dev = macio_get_drvdata(mdev);
509 struct bmac_data *bp = netdev_priv(dev);
511 /* see if this is enough */
512 if (bp->opened)
513 bmac_reset_and_enable(dev);
515 enable_irq(dev->irq);
516 enable_irq(bp->tx_dma_intr);
517 enable_irq(bp->rx_dma_intr);
518 netif_device_attach(dev);
520 return 0;
522 #endif /* CONFIG_PM */
524 static int bmac_set_address(struct net_device *dev, void *addr)
526 struct bmac_data *bp = netdev_priv(dev);
527 unsigned char *p = addr;
528 unsigned short *pWord16;
529 unsigned long flags;
530 int i;
532 XXDEBUG(("bmac: enter set_address\n"));
533 spin_lock_irqsave(&bp->lock, flags);
535 for (i = 0; i < 6; ++i) {
536 dev->dev_addr[i] = p[i];
538 /* load up the hardware address */
539 pWord16 = (unsigned short *)dev->dev_addr;
540 bmwrite(dev, MADD0, *pWord16++);
541 bmwrite(dev, MADD1, *pWord16++);
542 bmwrite(dev, MADD2, *pWord16);
544 spin_unlock_irqrestore(&bp->lock, flags);
545 XXDEBUG(("bmac: exit set_address\n"));
546 return 0;
549 static inline void bmac_set_timeout(struct net_device *dev)
551 struct bmac_data *bp = netdev_priv(dev);
552 unsigned long flags;
554 spin_lock_irqsave(&bp->lock, flags);
555 if (bp->timeout_active)
556 del_timer(&bp->tx_timeout);
557 bp->tx_timeout.expires = jiffies + TX_TIMEOUT;
558 add_timer(&bp->tx_timeout);
559 bp->timeout_active = 1;
560 spin_unlock_irqrestore(&bp->lock, flags);
563 static void
564 bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
566 void *vaddr;
567 unsigned long baddr;
568 unsigned long len;
570 len = skb->len;
571 vaddr = skb->data;
572 baddr = virt_to_bus(vaddr);
574 dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0);
577 static void
578 bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
580 unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf;
582 dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN,
583 virt_to_bus(addr), 0);
586 static void
587 bmac_init_tx_ring(struct bmac_data *bp)
589 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
591 memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd));
593 bp->tx_empty = 0;
594 bp->tx_fill = 0;
595 bp->tx_fullup = 0;
597 /* put a branch at the end of the tx command list */
598 dbdma_setcmd(&bp->tx_cmds[N_TX_RING],
599 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds));
601 /* reset tx dma */
602 dbdma_reset(td);
603 out_le32(&td->wait_sel, 0x00200020);
604 out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds));
607 static int
608 bmac_init_rx_ring(struct net_device *dev)
610 struct bmac_data *bp = netdev_priv(dev);
611 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
612 int i;
613 struct sk_buff *skb;
615 /* initialize list of sk_buffs for receiving and set up recv dma */
616 memset((char *)bp->rx_cmds, 0,
617 (N_RX_RING + 1) * sizeof(struct dbdma_cmd));
618 for (i = 0; i < N_RX_RING; i++) {
619 if ((skb = bp->rx_bufs[i]) == NULL) {
620 bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
621 if (skb != NULL)
622 skb_reserve(skb, 2);
624 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
627 bp->rx_empty = 0;
628 bp->rx_fill = i;
630 /* Put a branch back to the beginning of the receive command list */
631 dbdma_setcmd(&bp->rx_cmds[N_RX_RING],
632 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds));
634 /* start rx dma */
635 dbdma_reset(rd);
636 out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds));
638 return 1;
642 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev)
644 struct bmac_data *bp = netdev_priv(dev);
645 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
646 int i;
648 /* see if there's a free slot in the tx ring */
649 /* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */
650 /* bp->tx_empty, bp->tx_fill)); */
651 i = bp->tx_fill + 1;
652 if (i >= N_TX_RING)
653 i = 0;
654 if (i == bp->tx_empty) {
655 netif_stop_queue(dev);
656 bp->tx_fullup = 1;
657 XXDEBUG(("bmac_transmit_packet: tx ring full\n"));
658 return -1; /* can't take it at the moment */
661 dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0);
663 bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]);
665 bp->tx_bufs[bp->tx_fill] = skb;
666 bp->tx_fill = i;
668 dev->stats.tx_bytes += skb->len;
670 dbdma_continue(td);
672 return 0;
675 static int rxintcount;
677 static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id)
679 struct net_device *dev = (struct net_device *) dev_id;
680 struct bmac_data *bp = netdev_priv(dev);
681 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
682 volatile struct dbdma_cmd *cp;
683 int i, nb, stat;
684 struct sk_buff *skb;
685 unsigned int residual;
686 int last;
687 unsigned long flags;
689 spin_lock_irqsave(&bp->lock, flags);
691 if (++rxintcount < 10) {
692 XXDEBUG(("bmac_rxdma_intr\n"));
695 last = -1;
696 i = bp->rx_empty;
698 while (1) {
699 cp = &bp->rx_cmds[i];
700 stat = le16_to_cpu(cp->xfer_status);
701 residual = le16_to_cpu(cp->res_count);
702 if ((stat & ACTIVE) == 0)
703 break;
704 nb = RX_BUFLEN - residual - 2;
705 if (nb < (ETHERMINPACKET - ETHERCRC)) {
706 skb = NULL;
707 dev->stats.rx_length_errors++;
708 dev->stats.rx_errors++;
709 } else {
710 skb = bp->rx_bufs[i];
711 bp->rx_bufs[i] = NULL;
713 if (skb != NULL) {
714 nb -= ETHERCRC;
715 skb_put(skb, nb);
716 skb->protocol = eth_type_trans(skb, dev);
717 netif_rx(skb);
718 ++dev->stats.rx_packets;
719 dev->stats.rx_bytes += nb;
720 } else {
721 ++dev->stats.rx_dropped;
723 if ((skb = bp->rx_bufs[i]) == NULL) {
724 bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
725 if (skb != NULL)
726 skb_reserve(bp->rx_bufs[i], 2);
728 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
729 cp->res_count = cpu_to_le16(0);
730 cp->xfer_status = cpu_to_le16(0);
731 last = i;
732 if (++i >= N_RX_RING) i = 0;
735 if (last != -1) {
736 bp->rx_fill = last;
737 bp->rx_empty = i;
740 dbdma_continue(rd);
741 spin_unlock_irqrestore(&bp->lock, flags);
743 if (rxintcount < 10) {
744 XXDEBUG(("bmac_rxdma_intr done\n"));
746 return IRQ_HANDLED;
749 static int txintcount;
751 static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
753 struct net_device *dev = (struct net_device *) dev_id;
754 struct bmac_data *bp = netdev_priv(dev);
755 volatile struct dbdma_cmd *cp;
756 int stat;
757 unsigned long flags;
759 spin_lock_irqsave(&bp->lock, flags);
761 if (txintcount++ < 10) {
762 XXDEBUG(("bmac_txdma_intr\n"));
765 /* del_timer(&bp->tx_timeout); */
766 /* bp->timeout_active = 0; */
768 while (1) {
769 cp = &bp->tx_cmds[bp->tx_empty];
770 stat = le16_to_cpu(cp->xfer_status);
771 if (txintcount < 10) {
772 XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat));
774 if (!(stat & ACTIVE)) {
776 * status field might not have been filled by DBDMA
778 if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr)))
779 break;
782 if (bp->tx_bufs[bp->tx_empty]) {
783 ++dev->stats.tx_packets;
784 dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]);
786 bp->tx_bufs[bp->tx_empty] = NULL;
787 bp->tx_fullup = 0;
788 netif_wake_queue(dev);
789 if (++bp->tx_empty >= N_TX_RING)
790 bp->tx_empty = 0;
791 if (bp->tx_empty == bp->tx_fill)
792 break;
795 spin_unlock_irqrestore(&bp->lock, flags);
797 if (txintcount < 10) {
798 XXDEBUG(("bmac_txdma_intr done->bmac_start\n"));
801 bmac_start(dev);
802 return IRQ_HANDLED;
805 #ifndef SUNHME_MULTICAST
806 /* Real fast bit-reversal algorithm, 6-bit values */
807 static int reverse6[64] = {
808 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
809 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
810 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
811 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
812 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
813 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
814 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
815 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
818 static unsigned int
819 crc416(unsigned int curval, unsigned short nxtval)
821 register unsigned int counter, cur = curval, next = nxtval;
822 register int high_crc_set, low_data_set;
824 /* Swap bytes */
825 next = ((next & 0x00FF) << 8) | (next >> 8);
827 /* Compute bit-by-bit */
828 for (counter = 0; counter < 16; ++counter) {
829 /* is high CRC bit set? */
830 if ((cur & 0x80000000) == 0) high_crc_set = 0;
831 else high_crc_set = 1;
833 cur = cur << 1;
835 if ((next & 0x0001) == 0) low_data_set = 0;
836 else low_data_set = 1;
838 next = next >> 1;
840 /* do the XOR */
841 if (high_crc_set ^ low_data_set) cur = cur ^ ENET_CRCPOLY;
843 return cur;
846 static unsigned int
847 bmac_crc(unsigned short *address)
849 unsigned int newcrc;
851 XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
852 newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */
853 newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */
854 newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */
856 return(newcrc);
860 * Add requested mcast addr to BMac's hash table filter.
864 static void
865 bmac_addhash(struct bmac_data *bp, unsigned char *addr)
867 unsigned int crc;
868 unsigned short mask;
870 if (!(*addr)) return;
871 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
872 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
873 if (bp->hash_use_count[crc]++) return; /* This bit is already set */
874 mask = crc % 16;
875 mask = (unsigned char)1 << mask;
876 bp->hash_use_count[crc/16] |= mask;
879 static void
880 bmac_removehash(struct bmac_data *bp, unsigned char *addr)
882 unsigned int crc;
883 unsigned char mask;
885 /* Now, delete the address from the filter copy, as indicated */
886 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
887 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
888 if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */
889 if (--bp->hash_use_count[crc]) return; /* That bit is still in use */
890 mask = crc % 16;
891 mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */
892 bp->hash_table_mask[crc/16] &= mask;
896 * Sync the adapter with the software copy of the multicast mask
897 * (logical address filter).
900 static void
901 bmac_rx_off(struct net_device *dev)
903 unsigned short rx_cfg;
905 rx_cfg = bmread(dev, RXCFG);
906 rx_cfg &= ~RxMACEnable;
907 bmwrite(dev, RXCFG, rx_cfg);
908 do {
909 rx_cfg = bmread(dev, RXCFG);
910 } while (rx_cfg & RxMACEnable);
913 unsigned short
914 bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable)
916 unsigned short rx_cfg;
918 rx_cfg = bmread(dev, RXCFG);
919 rx_cfg |= RxMACEnable;
920 if (hash_enable) rx_cfg |= RxHashFilterEnable;
921 else rx_cfg &= ~RxHashFilterEnable;
922 if (promisc_enable) rx_cfg |= RxPromiscEnable;
923 else rx_cfg &= ~RxPromiscEnable;
924 bmwrite(dev, RXRST, RxResetValue);
925 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
926 bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
927 bmwrite(dev, RXCFG, rx_cfg );
928 return rx_cfg;
931 static void
932 bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp)
934 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
935 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
936 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
937 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
940 #if 0
941 static void
942 bmac_add_multi(struct net_device *dev,
943 struct bmac_data *bp, unsigned char *addr)
945 /* XXDEBUG(("bmac: enter bmac_add_multi\n")); */
946 bmac_addhash(bp, addr);
947 bmac_rx_off(dev);
948 bmac_update_hash_table_mask(dev, bp);
949 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
950 /* XXDEBUG(("bmac: exit bmac_add_multi\n")); */
953 static void
954 bmac_remove_multi(struct net_device *dev,
955 struct bmac_data *bp, unsigned char *addr)
957 bmac_removehash(bp, addr);
958 bmac_rx_off(dev);
959 bmac_update_hash_table_mask(dev, bp);
960 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
962 #endif
964 /* Set or clear the multicast filter for this adaptor.
965 num_addrs == -1 Promiscuous mode, receive all packets
966 num_addrs == 0 Normal mode, clear multicast list
967 num_addrs > 0 Multicast mode, receive normal and MC packets, and do
968 best-effort filtering.
970 static void bmac_set_multicast(struct net_device *dev)
972 struct netdev_hw_addr *ha;
973 struct bmac_data *bp = netdev_priv(dev);
974 int num_addrs = netdev_mc_count(dev);
975 unsigned short rx_cfg;
976 int i;
978 if (bp->sleeping)
979 return;
981 XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
983 if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
984 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff;
985 bmac_update_hash_table_mask(dev, bp);
986 rx_cfg = bmac_rx_on(dev, 1, 0);
987 XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n"));
988 } else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) {
989 rx_cfg = bmread(dev, RXCFG);
990 rx_cfg |= RxPromiscEnable;
991 bmwrite(dev, RXCFG, rx_cfg);
992 rx_cfg = bmac_rx_on(dev, 0, 1);
993 XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg));
994 } else {
995 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
996 for (i=0; i<64; i++) bp->hash_use_count[i] = 0;
997 if (num_addrs == 0) {
998 rx_cfg = bmac_rx_on(dev, 0, 0);
999 XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
1000 } else {
1001 netdev_for_each_mc_addr(ha, dev)
1002 bmac_addhash(bp, ha->addr);
1003 bmac_update_hash_table_mask(dev, bp);
1004 rx_cfg = bmac_rx_on(dev, 1, 0);
1005 XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
1008 /* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */
1010 #else /* ifdef SUNHME_MULTICAST */
1012 /* The version of set_multicast below was lifted from sunhme.c */
1014 static void bmac_set_multicast(struct net_device *dev)
1016 struct netdev_hw_addr *ha;
1017 unsigned short rx_cfg;
1018 u32 crc;
1020 if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
1021 bmwrite(dev, BHASH0, 0xffff);
1022 bmwrite(dev, BHASH1, 0xffff);
1023 bmwrite(dev, BHASH2, 0xffff);
1024 bmwrite(dev, BHASH3, 0xffff);
1025 } else if(dev->flags & IFF_PROMISC) {
1026 rx_cfg = bmread(dev, RXCFG);
1027 rx_cfg |= RxPromiscEnable;
1028 bmwrite(dev, RXCFG, rx_cfg);
1029 } else {
1030 u16 hash_table[4] = { 0 };
1032 rx_cfg = bmread(dev, RXCFG);
1033 rx_cfg &= ~RxPromiscEnable;
1034 bmwrite(dev, RXCFG, rx_cfg);
1036 netdev_for_each_mc_addr(ha, dev) {
1037 crc = ether_crc_le(6, ha->addr);
1038 crc >>= 26;
1039 hash_table[crc >> 4] |= 1 << (crc & 0xf);
1041 bmwrite(dev, BHASH0, hash_table[0]);
1042 bmwrite(dev, BHASH1, hash_table[1]);
1043 bmwrite(dev, BHASH2, hash_table[2]);
1044 bmwrite(dev, BHASH3, hash_table[3]);
1047 #endif /* SUNHME_MULTICAST */
1049 static int miscintcount;
1051 static irqreturn_t bmac_misc_intr(int irq, void *dev_id)
1053 struct net_device *dev = (struct net_device *) dev_id;
1054 unsigned int status = bmread(dev, STATUS);
1055 if (miscintcount++ < 10) {
1056 XXDEBUG(("bmac_misc_intr\n"));
1058 /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */
1059 /* bmac_txdma_intr_inner(irq, dev_id); */
1060 /* if (status & FrameReceived) dev->stats.rx_dropped++; */
1061 if (status & RxErrorMask) dev->stats.rx_errors++;
1062 if (status & RxCRCCntExp) dev->stats.rx_crc_errors++;
1063 if (status & RxLenCntExp) dev->stats.rx_length_errors++;
1064 if (status & RxOverFlow) dev->stats.rx_over_errors++;
1065 if (status & RxAlignCntExp) dev->stats.rx_frame_errors++;
1067 /* if (status & FrameSent) dev->stats.tx_dropped++; */
1068 if (status & TxErrorMask) dev->stats.tx_errors++;
1069 if (status & TxUnderrun) dev->stats.tx_fifo_errors++;
1070 if (status & TxNormalCollExp) dev->stats.collisions++;
1071 return IRQ_HANDLED;
1075 * Procedure for reading EEPROM
1077 #define SROMAddressLength 5
1078 #define DataInOn 0x0008
1079 #define DataInOff 0x0000
1080 #define Clk 0x0002
1081 #define ChipSelect 0x0001
1082 #define SDIShiftCount 3
1083 #define SD0ShiftCount 2
1084 #define DelayValue 1000 /* number of microseconds */
1085 #define SROMStartOffset 10 /* this is in words */
1086 #define SROMReadCount 3 /* number of words to read from SROM */
1087 #define SROMAddressBits 6
1088 #define EnetAddressOffset 20
1090 static unsigned char
1091 bmac_clock_out_bit(struct net_device *dev)
1093 unsigned short data;
1094 unsigned short val;
1096 bmwrite(dev, SROMCSR, ChipSelect | Clk);
1097 udelay(DelayValue);
1099 data = bmread(dev, SROMCSR);
1100 udelay(DelayValue);
1101 val = (data >> SD0ShiftCount) & 1;
1103 bmwrite(dev, SROMCSR, ChipSelect);
1104 udelay(DelayValue);
1106 return val;
1109 static void
1110 bmac_clock_in_bit(struct net_device *dev, unsigned int val)
1112 unsigned short data;
1114 if (val != 0 && val != 1) return;
1116 data = (val << SDIShiftCount);
1117 bmwrite(dev, SROMCSR, data | ChipSelect );
1118 udelay(DelayValue);
1120 bmwrite(dev, SROMCSR, data | ChipSelect | Clk );
1121 udelay(DelayValue);
1123 bmwrite(dev, SROMCSR, data | ChipSelect);
1124 udelay(DelayValue);
1127 static void
1128 reset_and_select_srom(struct net_device *dev)
1130 /* first reset */
1131 bmwrite(dev, SROMCSR, 0);
1132 udelay(DelayValue);
1134 /* send it the read command (110) */
1135 bmac_clock_in_bit(dev, 1);
1136 bmac_clock_in_bit(dev, 1);
1137 bmac_clock_in_bit(dev, 0);
1140 static unsigned short
1141 read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len)
1143 unsigned short data, val;
1144 int i;
1146 /* send out the address we want to read from */
1147 for (i = 0; i < addr_len; i++) {
1148 val = addr >> (addr_len-i-1);
1149 bmac_clock_in_bit(dev, val & 1);
1152 /* Now read in the 16-bit data */
1153 data = 0;
1154 for (i = 0; i < 16; i++) {
1155 val = bmac_clock_out_bit(dev);
1156 data <<= 1;
1157 data |= val;
1159 bmwrite(dev, SROMCSR, 0);
1161 return data;
1165 * It looks like Cogent and SMC use different methods for calculating
1166 * checksums. What a pain..
1169 static int
1170 bmac_verify_checksum(struct net_device *dev)
1172 unsigned short data, storedCS;
1174 reset_and_select_srom(dev);
1175 data = read_srom(dev, 3, SROMAddressBits);
1176 storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00);
1178 return 0;
1182 static void
1183 bmac_get_station_address(struct net_device *dev, unsigned char *ea)
1185 int i;
1186 unsigned short data;
1188 for (i = 0; i < 6; i++)
1190 reset_and_select_srom(dev);
1191 data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
1192 ea[2*i] = bitrev8(data & 0x0ff);
1193 ea[2*i+1] = bitrev8((data >> 8) & 0x0ff);
1197 static void bmac_reset_and_enable(struct net_device *dev)
1199 struct bmac_data *bp = netdev_priv(dev);
1200 unsigned long flags;
1201 struct sk_buff *skb;
1202 unsigned char *data;
1204 spin_lock_irqsave(&bp->lock, flags);
1205 bmac_enable_and_reset_chip(dev);
1206 bmac_init_tx_ring(bp);
1207 bmac_init_rx_ring(dev);
1208 bmac_init_chip(dev);
1209 bmac_start_chip(dev);
1210 bmwrite(dev, INTDISABLE, EnableNormal);
1211 bp->sleeping = 0;
1214 * It seems that the bmac can't receive until it's transmitted
1215 * a packet. So we give it a dummy packet to transmit.
1217 skb = netdev_alloc_skb(dev, ETHERMINPACKET);
1218 if (skb != NULL) {
1219 data = skb_put_zero(skb, ETHERMINPACKET);
1220 memcpy(data, dev->dev_addr, ETH_ALEN);
1221 memcpy(data + ETH_ALEN, dev->dev_addr, ETH_ALEN);
1222 bmac_transmit_packet(skb, dev);
1224 spin_unlock_irqrestore(&bp->lock, flags);
1227 static const struct ethtool_ops bmac_ethtool_ops = {
1228 .get_link = ethtool_op_get_link,
1231 static const struct net_device_ops bmac_netdev_ops = {
1232 .ndo_open = bmac_open,
1233 .ndo_stop = bmac_close,
1234 .ndo_start_xmit = bmac_output,
1235 .ndo_set_rx_mode = bmac_set_multicast,
1236 .ndo_set_mac_address = bmac_set_address,
1237 .ndo_validate_addr = eth_validate_addr,
1240 static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
1242 int j, rev, ret;
1243 struct bmac_data *bp;
1244 const unsigned char *prop_addr;
1245 unsigned char addr[6];
1246 struct net_device *dev;
1247 int is_bmac_plus = ((int)match->data) != 0;
1249 if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
1250 printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n");
1251 return -ENODEV;
1253 prop_addr = of_get_property(macio_get_of_node(mdev),
1254 "mac-address", NULL);
1255 if (prop_addr == NULL) {
1256 prop_addr = of_get_property(macio_get_of_node(mdev),
1257 "local-mac-address", NULL);
1258 if (prop_addr == NULL) {
1259 printk(KERN_ERR "BMAC: Can't get mac-address\n");
1260 return -ENODEV;
1263 memcpy(addr, prop_addr, sizeof(addr));
1265 dev = alloc_etherdev(PRIV_BYTES);
1266 if (!dev)
1267 return -ENOMEM;
1269 bp = netdev_priv(dev);
1270 SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
1271 macio_set_drvdata(mdev, dev);
1273 bp->mdev = mdev;
1274 spin_lock_init(&bp->lock);
1276 if (macio_request_resources(mdev, "bmac")) {
1277 printk(KERN_ERR "BMAC: can't request IO resource !\n");
1278 goto out_free;
1281 dev->base_addr = (unsigned long)
1282 ioremap(macio_resource_start(mdev, 0), macio_resource_len(mdev, 0));
1283 if (dev->base_addr == 0)
1284 goto out_release;
1286 dev->irq = macio_irq(mdev, 0);
1288 bmac_enable_and_reset_chip(dev);
1289 bmwrite(dev, INTDISABLE, DisableAll);
1291 rev = addr[0] == 0 && addr[1] == 0xA0;
1292 for (j = 0; j < 6; ++j)
1293 dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
1295 /* Enable chip without interrupts for now */
1296 bmac_enable_and_reset_chip(dev);
1297 bmwrite(dev, INTDISABLE, DisableAll);
1299 dev->netdev_ops = &bmac_netdev_ops;
1300 dev->ethtool_ops = &bmac_ethtool_ops;
1302 bmac_get_station_address(dev, addr);
1303 if (bmac_verify_checksum(dev) != 0)
1304 goto err_out_iounmap;
1306 bp->is_bmac_plus = is_bmac_plus;
1307 bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1));
1308 if (!bp->tx_dma)
1309 goto err_out_iounmap;
1310 bp->tx_dma_intr = macio_irq(mdev, 1);
1311 bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2));
1312 if (!bp->rx_dma)
1313 goto err_out_iounmap_tx;
1314 bp->rx_dma_intr = macio_irq(mdev, 2);
1316 bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1);
1317 bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1;
1319 bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1);
1320 skb_queue_head_init(bp->queue);
1322 timer_setup(&bp->tx_timeout, bmac_tx_timeout, 0);
1324 ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
1325 if (ret) {
1326 printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
1327 goto err_out_iounmap_rx;
1329 ret = request_irq(bp->tx_dma_intr, bmac_txdma_intr, 0, "BMAC-txdma", dev);
1330 if (ret) {
1331 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->tx_dma_intr);
1332 goto err_out_irq0;
1334 ret = request_irq(bp->rx_dma_intr, bmac_rxdma_intr, 0, "BMAC-rxdma", dev);
1335 if (ret) {
1336 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->rx_dma_intr);
1337 goto err_out_irq1;
1340 /* Mask chip interrupts and disable chip, will be
1341 * re-enabled on open()
1343 disable_irq(dev->irq);
1344 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1346 if (register_netdev(dev) != 0) {
1347 printk(KERN_ERR "BMAC: Ethernet registration failed\n");
1348 goto err_out_irq2;
1351 printk(KERN_INFO "%s: BMAC%s at %pM",
1352 dev->name, (is_bmac_plus ? "+" : ""), dev->dev_addr);
1353 XXDEBUG((", base_addr=%#0lx", dev->base_addr));
1354 printk("\n");
1356 return 0;
1358 err_out_irq2:
1359 free_irq(bp->rx_dma_intr, dev);
1360 err_out_irq1:
1361 free_irq(bp->tx_dma_intr, dev);
1362 err_out_irq0:
1363 free_irq(dev->irq, dev);
1364 err_out_iounmap_rx:
1365 iounmap(bp->rx_dma);
1366 err_out_iounmap_tx:
1367 iounmap(bp->tx_dma);
1368 err_out_iounmap:
1369 iounmap((void __iomem *)dev->base_addr);
1370 out_release:
1371 macio_release_resources(mdev);
1372 out_free:
1373 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1374 free_netdev(dev);
1376 return -ENODEV;
1379 static int bmac_open(struct net_device *dev)
1381 struct bmac_data *bp = netdev_priv(dev);
1382 /* XXDEBUG(("bmac: enter open\n")); */
1383 /* reset the chip */
1384 bp->opened = 1;
1385 bmac_reset_and_enable(dev);
1386 enable_irq(dev->irq);
1387 return 0;
1390 static int bmac_close(struct net_device *dev)
1392 struct bmac_data *bp = netdev_priv(dev);
1393 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
1394 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
1395 unsigned short config;
1396 int i;
1398 bp->sleeping = 1;
1400 /* disable rx and tx */
1401 config = bmread(dev, RXCFG);
1402 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1404 config = bmread(dev, TXCFG);
1405 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1407 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
1409 /* disable rx and tx dma */
1410 rd->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
1411 td->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
1413 /* free some skb's */
1414 XXDEBUG(("bmac: free rx bufs\n"));
1415 for (i=0; i<N_RX_RING; i++) {
1416 if (bp->rx_bufs[i] != NULL) {
1417 dev_kfree_skb(bp->rx_bufs[i]);
1418 bp->rx_bufs[i] = NULL;
1421 XXDEBUG(("bmac: free tx bufs\n"));
1422 for (i = 0; i<N_TX_RING; i++) {
1423 if (bp->tx_bufs[i] != NULL) {
1424 dev_kfree_skb(bp->tx_bufs[i]);
1425 bp->tx_bufs[i] = NULL;
1428 XXDEBUG(("bmac: all bufs freed\n"));
1430 bp->opened = 0;
1431 disable_irq(dev->irq);
1432 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1434 return 0;
1437 static void
1438 bmac_start(struct net_device *dev)
1440 struct bmac_data *bp = netdev_priv(dev);
1441 int i;
1442 struct sk_buff *skb;
1443 unsigned long flags;
1445 if (bp->sleeping)
1446 return;
1448 spin_lock_irqsave(&bp->lock, flags);
1449 while (1) {
1450 i = bp->tx_fill + 1;
1451 if (i >= N_TX_RING)
1452 i = 0;
1453 if (i == bp->tx_empty)
1454 break;
1455 skb = skb_dequeue(bp->queue);
1456 if (skb == NULL)
1457 break;
1458 bmac_transmit_packet(skb, dev);
1460 spin_unlock_irqrestore(&bp->lock, flags);
1463 static int
1464 bmac_output(struct sk_buff *skb, struct net_device *dev)
1466 struct bmac_data *bp = netdev_priv(dev);
1467 skb_queue_tail(bp->queue, skb);
1468 bmac_start(dev);
1469 return NETDEV_TX_OK;
1472 static void bmac_tx_timeout(struct timer_list *t)
1474 struct bmac_data *bp = from_timer(bp, t, tx_timeout);
1475 struct net_device *dev = macio_get_drvdata(bp->mdev);
1476 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
1477 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
1478 volatile struct dbdma_cmd *cp;
1479 unsigned long flags;
1480 unsigned short config, oldConfig;
1481 int i;
1483 XXDEBUG(("bmac: tx_timeout called\n"));
1484 spin_lock_irqsave(&bp->lock, flags);
1485 bp->timeout_active = 0;
1487 /* update various counters */
1488 /* bmac_handle_misc_intrs(bp, 0); */
1490 cp = &bp->tx_cmds[bp->tx_empty];
1491 /* XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */
1492 /* le32_to_cpu(td->status), le16_to_cpu(cp->xfer_status), bp->tx_bad_runt, */
1493 /* mb->pr, mb->xmtfs, mb->fifofc)); */
1495 /* turn off both tx and rx and reset the chip */
1496 config = bmread(dev, RXCFG);
1497 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1498 config = bmread(dev, TXCFG);
1499 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1500 out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1501 printk(KERN_ERR "bmac: transmit timeout - resetting\n");
1502 bmac_enable_and_reset_chip(dev);
1504 /* restart rx dma */
1505 cp = bus_to_virt(le32_to_cpu(rd->cmdptr));
1506 out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1507 out_le16(&cp->xfer_status, 0);
1508 out_le32(&rd->cmdptr, virt_to_bus(cp));
1509 out_le32(&rd->control, DBDMA_SET(RUN|WAKE));
1511 /* fix up the transmit side */
1512 XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n",
1513 bp->tx_empty, bp->tx_fill, bp->tx_fullup));
1514 i = bp->tx_empty;
1515 ++dev->stats.tx_errors;
1516 if (i != bp->tx_fill) {
1517 dev_kfree_skb(bp->tx_bufs[i]);
1518 bp->tx_bufs[i] = NULL;
1519 if (++i >= N_TX_RING) i = 0;
1520 bp->tx_empty = i;
1522 bp->tx_fullup = 0;
1523 netif_wake_queue(dev);
1524 if (i != bp->tx_fill) {
1525 cp = &bp->tx_cmds[i];
1526 out_le16(&cp->xfer_status, 0);
1527 out_le16(&cp->command, OUTPUT_LAST);
1528 out_le32(&td->cmdptr, virt_to_bus(cp));
1529 out_le32(&td->control, DBDMA_SET(RUN));
1530 /* bmac_set_timeout(dev); */
1531 XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i));
1534 /* turn it back on */
1535 oldConfig = bmread(dev, RXCFG);
1536 bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
1537 oldConfig = bmread(dev, TXCFG);
1538 bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
1540 spin_unlock_irqrestore(&bp->lock, flags);
1543 #if 0
1544 static void dump_dbdma(volatile struct dbdma_cmd *cp,int count)
1546 int i,*ip;
1548 for (i=0;i< count;i++) {
1549 ip = (int*)(cp+i);
1551 printk("dbdma req 0x%x addr 0x%x baddr 0x%x xfer/res 0x%x\n",
1552 le32_to_cpup(ip+0),
1553 le32_to_cpup(ip+1),
1554 le32_to_cpup(ip+2),
1555 le32_to_cpup(ip+3));
1559 #endif
1561 #if 0
1562 static int
1563 bmac_proc_info(char *buffer, char **start, off_t offset, int length)
1565 int len = 0;
1566 off_t pos = 0;
1567 off_t begin = 0;
1568 int i;
1570 if (bmac_devs == NULL)
1571 return -ENOSYS;
1573 len += sprintf(buffer, "BMAC counters & registers\n");
1575 for (i = 0; i<N_REG_ENTRIES; i++) {
1576 len += sprintf(buffer + len, "%s: %#08x\n",
1577 reg_entries[i].name,
1578 bmread(bmac_devs, reg_entries[i].reg_offset));
1579 pos = begin + len;
1581 if (pos < offset) {
1582 len = 0;
1583 begin = pos;
1586 if (pos > offset+length) break;
1589 *start = buffer + (offset - begin);
1590 len -= (offset - begin);
1592 if (len > length) len = length;
1594 return len;
1596 #endif
1598 static int bmac_remove(struct macio_dev *mdev)
1600 struct net_device *dev = macio_get_drvdata(mdev);
1601 struct bmac_data *bp = netdev_priv(dev);
1603 unregister_netdev(dev);
1605 free_irq(dev->irq, dev);
1606 free_irq(bp->tx_dma_intr, dev);
1607 free_irq(bp->rx_dma_intr, dev);
1609 iounmap((void __iomem *)dev->base_addr);
1610 iounmap(bp->tx_dma);
1611 iounmap(bp->rx_dma);
1613 macio_release_resources(mdev);
1615 free_netdev(dev);
1617 return 0;
1620 static const struct of_device_id bmac_match[] =
1623 .name = "bmac",
1624 .data = (void *)0,
1627 .type = "network",
1628 .compatible = "bmac+",
1629 .data = (void *)1,
1633 MODULE_DEVICE_TABLE (of, bmac_match);
1635 static struct macio_driver bmac_driver =
1637 .driver = {
1638 .name = "bmac",
1639 .owner = THIS_MODULE,
1640 .of_match_table = bmac_match,
1642 .probe = bmac_probe,
1643 .remove = bmac_remove,
1644 #ifdef CONFIG_PM
1645 .suspend = bmac_suspend,
1646 .resume = bmac_resume,
1647 #endif
1651 static int __init bmac_init(void)
1653 if (bmac_emergency_rxbuf == NULL) {
1654 bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL);
1655 if (bmac_emergency_rxbuf == NULL)
1656 return -ENOMEM;
1659 return macio_register_driver(&bmac_driver);
1662 static void __exit bmac_exit(void)
1664 macio_unregister_driver(&bmac_driver);
1666 kfree(bmac_emergency_rxbuf);
1667 bmac_emergency_rxbuf = NULL;
1670 MODULE_AUTHOR("Randy Gobbel/Paul Mackerras");
1671 MODULE_DESCRIPTION("PowerMac BMAC ethernet driver.");
1672 MODULE_LICENSE("GPL");
1674 module_init(bmac_init);
1675 module_exit(bmac_exit);