* add p cc
[mascara-docs.git] / i386 / linux / linux-2.3.21 / drivers / net / bmac.c
blobf3d3bcc01a50342a20de1216d17eb472ea87870f
1 /*
2 * Network device driver for the BMAC ethernet controller on
3 * Apple Powermacs. Assumes it's under a DBDMA controller.
5 * Copyright (C) 1998 Randy Gobbel.
6 */
7 #include <linux/config.h>
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/delay.h>
13 #include <linux/string.h>
14 #include <linux/timer.h>
15 #include <linux/proc_fs.h>
16 #include <asm/prom.h>
17 #include <asm/dbdma.h>
18 #include <asm/io.h>
19 #include <asm/page.h>
20 #include <asm/pgtable.h>
21 #include <asm/feature.h>
22 #ifdef CONFIG_PMAC_PBOOK
23 #include <linux/adb.h>
24 #include <linux/pmu.h>
25 #include <asm/irq.h>
26 #endif
27 #include "bmac.h"
29 #define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
30 #define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
33 * CRC polynomial - used in working out multicast filter bits.
35 #define ENET_CRCPOLY 0x04c11db7
37 /* switch to use multicast code lifted from sunhme driver */
38 #define SUNHME_MULTICAST
40 #define N_RX_RING 64
41 #define N_TX_RING 32
42 #define MAX_TX_ACTIVE 1
43 #define ETHERCRC 4
44 #define ETHERMINPACKET 64
45 #define ETHERMTU 1500
46 #define RX_BUFLEN (ETHERMTU + 14 + ETHERCRC + 2)
47 #define TX_TIMEOUT HZ /* 1 second */
49 /* Bits in transmit DMA status */
50 #define TX_DMA_ERR 0x80
52 #define XXDEBUG(args)
54 struct bmac_data {
55 /* volatile struct bmac *bmac; */
56 struct sk_buff_head *queue;
57 volatile struct dbdma_regs *tx_dma;
58 int tx_dma_intr;
59 volatile struct dbdma_regs *rx_dma;
60 int rx_dma_intr;
61 volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */
62 volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */
63 struct device_node *node;
64 struct sk_buff *rx_bufs[N_RX_RING];
65 int rx_fill;
66 int rx_empty;
67 struct sk_buff *tx_bufs[N_TX_RING];
68 int tx_fill;
69 int tx_empty;
70 unsigned char tx_fullup;
71 struct net_device_stats stats;
72 struct timer_list tx_timeout;
73 int timeout_active;
74 int reset_and_enabled;
75 int rx_allocated;
76 int tx_allocated;
77 unsigned short hash_use_count[64];
78 unsigned short hash_table_mask[4];
81 typedef struct bmac_reg_entry {
82 char *name;
83 unsigned short reg_offset;
84 } bmac_reg_entry_t;
86 #define N_REG_ENTRIES 31
88 bmac_reg_entry_t reg_entries[N_REG_ENTRIES] = {
89 {"MEMADD", MEMADD},
90 {"MEMDATAHI", MEMDATAHI},
91 {"MEMDATALO", MEMDATALO},
92 {"TXPNTR", TXPNTR},
93 {"RXPNTR", RXPNTR},
94 {"IPG1", IPG1},
95 {"IPG2", IPG2},
96 {"ALIMIT", ALIMIT},
97 {"SLOT", SLOT},
98 {"PALEN", PALEN},
99 {"PAPAT", PAPAT},
100 {"TXSFD", TXSFD},
101 {"JAM", JAM},
102 {"TXCFG", TXCFG},
103 {"TXMAX", TXMAX},
104 {"TXMIN", TXMIN},
105 {"PAREG", PAREG},
106 {"DCNT", DCNT},
107 {"NCCNT", NCCNT},
108 {"NTCNT", NTCNT},
109 {"EXCNT", EXCNT},
110 {"LTCNT", LTCNT},
111 {"TXSM", TXSM},
112 {"RXCFG", RXCFG},
113 {"RXMAX", RXMAX},
114 {"RXMIN", RXMIN},
115 {"FRCNT", FRCNT},
116 {"AECNT", AECNT},
117 {"FECNT", FECNT},
118 {"RXSM", RXSM},
119 {"RXCV", RXCV}
122 struct net_device *bmac_devs = NULL;
123 static int is_bmac_plus;
125 #ifdef CONFIG_PMAC_PBOOK
126 int bmac_sleep_notify(struct pmu_sleep_notifier *self, int when);
127 static struct pmu_sleep_notifier bmac_sleep_notifier = {
128 bmac_sleep_notify, SLEEP_LEVEL_NET,
130 #endif
132 #if 0
134 * If we can't get a skbuff when we need it, we use this area for DMA.
136 static unsigned char dummy_buf[RX_BUFLEN];
137 #endif
140 * Number of bytes of private data per BMAC: allow enough for
141 * the rx and tx dma commands plus a branch dma command each,
142 * and another 16 bytes to allow us to align the dma command
143 * buffers on a 16 byte boundary.
145 #define PRIV_BYTES (sizeof(struct bmac_data) \
146 + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \
147 + sizeof(struct sk_buff_head))
149 static unsigned char bitrev(unsigned char b);
150 static int bmac_open(struct net_device *dev);
151 static int bmac_close(struct net_device *dev);
152 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
153 static struct net_device_stats *bmac_stats(struct net_device *dev);
154 static void bmac_set_multicast(struct net_device *dev);
155 static int bmac_reset_and_enable(struct net_device *dev, int enable);
156 static void bmac_start_chip(struct net_device *dev);
157 static int bmac_init_chip(struct net_device *dev);
158 static void bmac_init_registers(struct net_device *dev);
159 static void bmac_reset_chip(struct net_device *dev);
160 static int bmac_set_address(struct net_device *dev, void *addr);
161 static void bmac_misc_intr(int irq, void *dev_id, struct pt_regs *regs);
162 static void bmac_txdma_intr(int irq, void *dev_id, struct pt_regs *regs);
163 static void bmac_rxdma_intr(int irq, void *dev_id, struct pt_regs *regs);
164 static void bmac_set_timeout(struct net_device *dev);
165 static void bmac_tx_timeout(unsigned long data);
166 static int bmac_proc_info ( char *buffer, char **start, off_t offset, int length, int dummy);
167 static int bmac_output(struct sk_buff *skb, struct net_device *dev);
168 static void bmac_start(struct net_device *dev);
170 #define DBDMA_SET(x) ( ((x) | (x) << 16) )
171 #define DBDMA_CLEAR(x) ( (x) << 16)
173 static __inline__ void
174 dbdma_st32(volatile unsigned long *a, unsigned long x)
176 __asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory");
177 return;
180 static __inline__ unsigned long
181 dbdma_ld32(volatile unsigned long *a)
183 unsigned long swap;
184 __asm__ volatile ("lwbrx %0,0,%1" : "=r" (swap) : "r" (a));
185 return swap;
188 void
189 dbdma_stop(volatile struct dbdma_regs *dmap)
191 dbdma_st32((volatile unsigned long *)&dmap->control, DBDMA_CLEAR(RUN) | DBDMA_SET(FLUSH));
192 eieio();
194 while (dbdma_ld32((volatile unsigned long *)&dmap->status) & (ACTIVE|FLUSH))
195 eieio();
198 static void
199 dbdma_continue(volatile struct dbdma_regs *dmap)
201 dbdma_st32((volatile unsigned long *)&dmap->control,
202 DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD));
203 eieio();
206 static void
207 dbdma_reset(volatile struct dbdma_regs *dmap)
209 dbdma_st32((volatile unsigned long *)&dmap->control,
210 DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN));
211 eieio();
212 while (dbdma_ld32((volatile unsigned long *)&dmap->status) & RUN) eieio();
215 static void
216 dbdma_setcmd(volatile struct dbdma_cmd *cp,
217 unsigned short cmd, unsigned count, unsigned long addr,
218 unsigned long cmd_dep)
220 out_le16(&cp->command, cmd);
221 out_le16(&cp->req_count, count);
222 out_le32(&cp->phy_addr, addr);
223 out_le32(&cp->cmd_dep, cmd_dep);
224 out_le16(&cp->xfer_status, 0);
225 out_le16(&cp->res_count, 0);
228 static __inline__
229 void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data )
231 out_le16((void *)dev->base_addr + reg_offset, data);
235 static __inline__
236 volatile unsigned short bmread(struct net_device *dev, unsigned long reg_offset )
238 return in_le16((void *)dev->base_addr + reg_offset);
241 static void
242 bmac_reset_chip(struct net_device *dev)
244 struct bmac_data *bp = (struct bmac_data *) dev->priv;
245 volatile struct dbdma_regs *rd = bp->rx_dma;
246 volatile struct dbdma_regs *td = bp->tx_dma;
248 dbdma_reset(rd);
249 dbdma_reset(td);
251 feature_set(bp->node, FEATURE_BMac_IO_enable);
252 udelay(10000);
253 feature_set(bp->node, FEATURE_BMac_reset);
254 udelay(10000);
255 feature_clear(bp->node, FEATURE_BMac_reset);
256 udelay(10000);
259 #define MIFDELAY udelay(10)
261 static unsigned int
262 bmac_mif_readbits(struct net_device *dev, int nb)
264 unsigned int val = 0;
266 while (--nb >= 0) {
267 bmwrite(dev, MIFCSR, 0);
268 MIFDELAY;
269 if (bmread(dev, MIFCSR) & 8)
270 val |= 1 << nb;
271 bmwrite(dev, MIFCSR, 1);
272 MIFDELAY;
274 bmwrite(dev, MIFCSR, 0);
275 MIFDELAY;
276 bmwrite(dev, MIFCSR, 1);
277 MIFDELAY;
278 return val;
281 static void
282 bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb)
284 int b;
286 while (--nb >= 0) {
287 b = (val & (1 << nb))? 6: 4;
288 bmwrite(dev, MIFCSR, b);
289 MIFDELAY;
290 bmwrite(dev, MIFCSR, b|1);
291 MIFDELAY;
295 static unsigned int
296 bmac_mif_read(struct net_device *dev, unsigned int addr)
298 unsigned int val;
300 bmwrite(dev, MIFCSR, 4);
301 MIFDELAY;
302 bmac_mif_writebits(dev, ~0U, 32);
303 bmac_mif_writebits(dev, 6, 4);
304 bmac_mif_writebits(dev, addr, 10);
305 bmwrite(dev, MIFCSR, 2);
306 MIFDELAY;
307 bmwrite(dev, MIFCSR, 1);
308 MIFDELAY;
309 val = bmac_mif_readbits(dev, 17);
310 bmwrite(dev, MIFCSR, 4);
311 MIFDELAY;
312 return val;
315 static void
316 bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val)
318 bmwrite(dev, MIFCSR, 4);
319 MIFDELAY;
320 bmac_mif_writebits(dev, ~0U, 32);
321 bmac_mif_writebits(dev, 5, 4);
322 bmac_mif_writebits(dev, addr, 10);
323 bmac_mif_writebits(dev, 2, 2);
324 bmac_mif_writebits(dev, val, 16);
325 bmac_mif_writebits(dev, 3, 2);
328 static void
329 bmac_init_registers(struct net_device *dev)
331 struct bmac_data *bp = (struct bmac_data *) dev->priv;
332 volatile unsigned short regValue;
333 unsigned short *pWord16;
334 int i;
336 /* XXDEBUG(("bmac: enter init_registers\n")); */
338 bmwrite(dev, RXRST, RxResetValue);
339 bmwrite(dev, TXRST, TxResetBit);
341 i = 100;
342 do {
343 --i;
344 udelay(10000);
345 regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */
346 } while ((regValue & TxResetBit) && i > 0);
348 if (!is_bmac_plus) {
349 regValue = bmread(dev, XCVRIF);
350 regValue |= ClkBit | SerialMode | COLActiveLow;
351 bmwrite(dev, XCVRIF, regValue);
352 udelay(10000);
355 bmwrite(dev, RSEED, (unsigned short)0x1968);
357 regValue = bmread(dev, XIFC);
358 regValue |= TxOutputEnable;
359 bmwrite(dev, XIFC, regValue);
361 bmread(dev, PAREG);
363 /* set collision counters to 0 */
364 bmwrite(dev, NCCNT, 0);
365 bmwrite(dev, NTCNT, 0);
366 bmwrite(dev, EXCNT, 0);
367 bmwrite(dev, LTCNT, 0);
369 /* set rx counters to 0 */
370 bmwrite(dev, FRCNT, 0);
371 bmwrite(dev, LECNT, 0);
372 bmwrite(dev, AECNT, 0);
373 bmwrite(dev, FECNT, 0);
374 bmwrite(dev, RXCV, 0);
376 /* set tx fifo information */
377 bmwrite(dev, TXTH, 4); /* 4 octets before tx starts */
379 bmwrite(dev, TXFIFOCSR, 0); /* first disable txFIFO */
380 bmwrite(dev, TXFIFOCSR, TxFIFOEnable );
382 /* set rx fifo information */
383 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
384 bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
386 //bmwrite(dev, TXCFG, TxMACEnable); /* TxNeverGiveUp maybe later */
387 bmread(dev, STATUS); /* read it just to clear it */
389 /* zero out the chip Hash Filter registers */
390 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
391 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
392 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
393 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
394 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
396 pWord16 = (unsigned short *)dev->dev_addr;
397 bmwrite(dev, MADD0, *pWord16++);
398 bmwrite(dev, MADD1, *pWord16++);
399 bmwrite(dev, MADD2, *pWord16);
401 bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets);
403 bmwrite(dev, INTDISABLE, EnableNormal);
405 return;
408 #if 0
409 static void
410 bmac_disable_interrupts(struct net_device *dev)
412 bmwrite(dev, INTDISABLE, DisableAll);
415 static void
416 bmac_enable_interrupts(struct net_device *dev)
418 bmwrite(dev, INTDISABLE, EnableNormal);
420 #endif
423 static void
424 bmac_start_chip(struct net_device *dev)
426 struct bmac_data *bp = (struct bmac_data *) dev->priv;
427 volatile struct dbdma_regs *rd = bp->rx_dma;
428 unsigned short oldConfig;
430 /* enable rx dma channel */
431 dbdma_continue(rd);
433 oldConfig = bmread(dev, TXCFG);
434 bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
436 /* turn on rx plus any other bits already on (promiscuous possibly) */
437 oldConfig = bmread(dev, RXCFG);
438 bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
439 udelay(20000);
442 static void
443 bmac_init_phy(struct net_device *dev)
445 unsigned int addr;
447 printk(KERN_DEBUG "phy registers:");
448 for (addr = 0; addr < 32; ++addr) {
449 if ((addr & 7) == 0)
450 printk("\n" KERN_DEBUG);
451 printk(" %.4x", bmac_mif_read(dev, addr));
453 printk("\n");
454 if (is_bmac_plus) {
455 unsigned int capable, ctrl;
457 ctrl = bmac_mif_read(dev, 0);
458 capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1;
459 if (bmac_mif_read(dev, 4) != capable
460 || (ctrl & 0x1000) == 0) {
461 bmac_mif_write(dev, 4, capable);
462 bmac_mif_write(dev, 0, 0x1200);
463 } else
464 bmac_mif_write(dev, 0, 0x1000);
468 static int
469 bmac_init_chip(struct net_device *dev)
471 bmac_init_phy(dev);
472 bmac_init_registers(dev);
473 return 1;
476 #ifdef CONFIG_PMAC_PBOOK
478 bmac_sleep_notify(struct pmu_sleep_notifier *self, int when)
480 struct bmac_data *bp;
482 if (bmac_devs == 0)
483 return PBOOK_SLEEP_OK;
485 bp = (struct bmac_data *) bmac_devs->priv;
487 switch (when) {
488 case PBOOK_SLEEP_REQUEST:
489 break;
490 case PBOOK_SLEEP_REJECT:
491 break;
492 case PBOOK_SLEEP_NOW:
493 /* prolly should wait for dma to finish & turn off the chip */
494 disable_irq(bmac_devs->irq);
495 disable_irq(bp->tx_dma_intr);
496 disable_irq(bp->rx_dma_intr);
497 feature_set(bp->node, FEATURE_BMac_reset);
498 udelay(10000);
499 feature_clear(bp->node, FEATURE_BMac_IO_enable);
500 udelay(10000);
501 break;
502 case PBOOK_WAKE:
503 /* see if this is enough */
504 bmac_reset_and_enable(bmac_devs, 1);
505 enable_irq(bmac_devs->irq);
506 enable_irq(bp->tx_dma_intr);
507 enable_irq(bp->rx_dma_intr);
508 break;
510 return PBOOK_SLEEP_OK;
512 #endif
514 static int bmac_set_address(struct net_device *dev, void *addr)
516 unsigned char *p = addr;
517 unsigned short *pWord16;
518 unsigned long flags;
519 int i;
521 XXDEBUG(("bmac: enter set_address\n"));
522 save_flags(flags); cli();
524 for (i = 0; i < 6; ++i) {
525 dev->dev_addr[i] = p[i];
527 /* load up the hardware address */
528 pWord16 = (unsigned short *)dev->dev_addr;
529 bmwrite(dev, MADD0, *pWord16++);
530 bmwrite(dev, MADD1, *pWord16++);
531 bmwrite(dev, MADD2, *pWord16);
533 restore_flags(flags);
534 XXDEBUG(("bmac: exit set_address\n"));
535 return 0;
538 static inline void bmac_set_timeout(struct net_device *dev)
540 struct bmac_data *bp = (struct bmac_data *) dev->priv;
541 unsigned long flags;
543 save_flags(flags);
544 cli();
545 if (bp->timeout_active)
546 del_timer(&bp->tx_timeout);
547 bp->tx_timeout.expires = jiffies + TX_TIMEOUT;
548 bp->tx_timeout.function = bmac_tx_timeout;
549 bp->tx_timeout.data = (unsigned long) dev;
550 add_timer(&bp->tx_timeout);
551 bp->timeout_active = 1;
552 restore_flags(flags);
555 static void
556 bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
558 void *vaddr;
559 unsigned long baddr;
560 unsigned long len;
562 len = skb->len;
563 vaddr = skb->data;
564 baddr = virt_to_bus(vaddr);
566 dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0);
569 static void
570 bmac_construct_rxbuff(unsigned char *addr, volatile struct dbdma_cmd *cp)
572 dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN, virt_to_bus(addr), 0);
575 /* Bit-reverse one byte of an ethernet hardware address. */
576 static unsigned char
577 bitrev(unsigned char b)
579 int d = 0, i;
581 for (i = 0; i < 8; ++i, b >>= 1)
582 d = (d << 1) | (b & 1);
583 return d;
587 static int
588 bmac_init_tx_ring(struct bmac_data *bp)
590 volatile struct dbdma_regs *td = bp->tx_dma;
592 memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd));
594 bp->tx_empty = 0;
595 bp->tx_fill = 0;
596 bp->tx_fullup = 0;
598 /* put a branch at the end of the tx command list */
599 dbdma_setcmd(&bp->tx_cmds[N_TX_RING],
600 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds));
602 /* reset tx dma */
603 dbdma_reset(td);
604 out_le32(&td->wait_sel, 0x00200020);
605 out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds));
607 return 1;
611 static int
612 bmac_init_rx_ring(struct bmac_data *bp)
614 volatile struct dbdma_regs *rd = bp->rx_dma;
615 int i;
617 /* initialize list of sk_buffs for receiving and set up recv dma */
618 if (!bp->rx_allocated) {
619 for (i = 0; i < N_RX_RING; i++) {
620 bp->rx_bufs[i] = dev_alloc_skb(RX_BUFLEN+2);
621 if (bp->rx_bufs[i] == NULL) return 0;
622 skb_reserve(bp->rx_bufs[i], 2);
624 bp->rx_allocated = 1;
627 memset((char *)bp->rx_cmds, 0, (N_RX_RING+1) * sizeof(struct dbdma_cmd));
628 for (i = 0; i < N_RX_RING; i++)
629 bmac_construct_rxbuff(bp->rx_bufs[i]->data, &bp->rx_cmds[i]);
631 bp->rx_empty = 0;
632 bp->rx_fill = i;
634 /* Put a branch back to the beginning of the receive command list */
635 dbdma_setcmd(&bp->rx_cmds[N_RX_RING],
636 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds));
638 /* start rx dma */
639 dbdma_reset(rd);
640 out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds));
642 return 1;
646 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev)
648 struct bmac_data *bp = (struct bmac_data *) dev->priv;
649 volatile struct dbdma_regs *td = bp->tx_dma;
650 int i;
652 /* see if there's a free slot in the tx ring */
653 /* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */
654 /* bp->tx_empty, bp->tx_fill)); */
655 i = bp->tx_fill + 1;
656 if (i >= N_TX_RING) i = 0;
657 if (i == bp->tx_empty) {
658 dev->tbusy = 1;
659 bp->tx_fullup = 1;
660 XXDEBUG(("bmac_transmit_packet: tx ring full\n"));
661 return -1; /* can't take it at the moment */
664 dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0);
666 bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]);
668 bp->tx_bufs[bp->tx_fill] = skb;
669 bp->tx_fill = i;
671 bp->stats.tx_bytes += skb->len;
673 dbdma_continue(td);
675 return 0;
678 static int rxintcount = 0;
680 static void bmac_rxdma_intr(int irq, void *dev_id, struct pt_regs *regs)
682 struct net_device *dev = (struct net_device *) dev_id;
683 struct bmac_data *bp = (struct bmac_data *) dev->priv;
684 volatile struct dbdma_regs *rd = bp->rx_dma;
685 volatile struct dbdma_cmd *cp;
686 int i, nb, stat;
687 struct sk_buff *skb;
688 unsigned int residual;
689 int last;
690 unsigned long flags;
692 save_flags(flags); cli();
694 if (++rxintcount < 10) {
695 XXDEBUG(("bmac_rxdma_intr\n"));
698 last = -1;
699 i = bp->rx_empty;
701 while (1) {
702 cp = &bp->rx_cmds[i];
703 stat = ld_le16(&cp->xfer_status);
704 residual = ld_le16(&cp->res_count);
705 if ((stat & ACTIVE) == 0) break;
706 nb = RX_BUFLEN - residual - 2;
707 if (nb < (ETHERMINPACKET - ETHERCRC)) {
708 skb = NULL;
709 bp->stats.rx_length_errors++;
710 bp->stats.rx_errors++;
711 } else skb = bp->rx_bufs[i];
712 if (skb != NULL) {
713 nb -= ETHERCRC;
714 skb_put(skb, nb);
715 skb->dev = dev;
716 skb->protocol = eth_type_trans(skb, dev);
717 netif_rx(skb);
718 bp->rx_bufs[i] = dev_alloc_skb(RX_BUFLEN+2);
719 skb_reserve(bp->rx_bufs[i], 2);
720 bmac_construct_rxbuff(bp->rx_bufs[i]->data, &bp->rx_cmds[i]);
721 ++bp->stats.rx_packets;
722 bp->stats.rx_bytes += nb;
723 } else {
724 ++bp->stats.rx_dropped;
726 st_le16(&cp->res_count, 0);
727 st_le16(&cp->xfer_status, 0);
728 last = i;
729 if (++i >= N_RX_RING) i = 0;
732 if (last != -1) {
733 bp->rx_fill = last;
734 bp->rx_empty = i;
737 restore_flags(flags);
739 dbdma_continue(rd);
741 if (rxintcount < 10) {
742 XXDEBUG(("bmac_rxdma_intr done\n"));
746 static int txintcount = 0;
748 static void bmac_txdma_intr(int irq, void *dev_id, struct pt_regs *regs)
750 struct net_device *dev = (struct net_device *) dev_id;
751 struct bmac_data *bp = (struct bmac_data *) dev->priv;
752 volatile struct dbdma_cmd *cp;
753 int stat;
754 unsigned long flags;
756 save_flags(flags); cli();
758 if (txintcount++ < 10) {
759 XXDEBUG(("bmac_txdma_intr\n"));
762 /* del_timer(&bp->tx_timeout); */
763 /* bp->timeout_active = 0; */
765 while (1) {
766 cp = &bp->tx_cmds[bp->tx_empty];
767 stat = ld_le16(&cp->xfer_status);
768 if (txintcount < 10) {
769 XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat));
771 if (!(stat & ACTIVE)) break;
773 if (bp->tx_bufs[bp->tx_empty]) {
774 ++bp->stats.tx_packets;
775 dev_kfree_skb(bp->tx_bufs[bp->tx_empty]);
777 bp->tx_bufs[bp->tx_empty] = NULL;
778 bp->tx_fullup = 0;
779 dev->tbusy = 0;
780 /* XXDEBUG(("bmac_intr: cleared tbusy, empty=%d fill=%d\n", */
781 /* i, bp->tx_fill)); */
782 mark_bh(NET_BH);
783 if (++bp->tx_empty >= N_TX_RING) bp->tx_empty = 0;
784 if (bp->tx_empty == bp->tx_fill) break;
787 restore_flags(flags);
789 if (txintcount < 10) {
790 XXDEBUG(("bmac_txdma_intr done->bmac_start\n"));
793 bmac_start(dev);
796 static struct net_device_stats *bmac_stats(struct net_device *dev)
798 struct bmac_data *p = (struct bmac_data *) dev->priv;
800 return &p->stats;
803 #ifndef SUNHME_MULTICAST
804 /* Real fast bit-reversal algorithm, 6-bit values */
805 static int reverse6[64] = {
806 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
807 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
808 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
809 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
810 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
811 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
812 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
813 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
816 static unsigned int
817 crc416(unsigned int curval, unsigned short nxtval)
819 register unsigned int counter, cur = curval, next = nxtval;
820 register int high_crc_set, low_data_set;
822 /* Swap bytes */
823 next = ((next & 0x00FF) << 8) | (next >> 8);
825 /* Compute bit-by-bit */
826 for (counter = 0; counter < 16; ++counter) {
827 /* is high CRC bit set? */
828 if ((cur & 0x80000000) == 0) high_crc_set = 0;
829 else high_crc_set = 1;
831 cur = cur << 1;
833 if ((next & 0x0001) == 0) low_data_set = 0;
834 else low_data_set = 1;
836 next = next >> 1;
838 /* do the XOR */
839 if (high_crc_set ^ low_data_set) cur = cur ^ ENET_CRCPOLY;
841 return cur;
844 static unsigned int
845 bmac_crc(unsigned short *address)
847 unsigned int newcrc;
849 XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
850 newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */
851 newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */
852 newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */
854 return(newcrc);
858 * Add requested mcast addr to BMac's hash table filter.
862 static void
863 bmac_addhash(struct bmac_data *bp, unsigned char *addr)
865 unsigned int crc;
866 unsigned short mask;
868 if (!(*addr)) return;
869 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
870 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
871 if (bp->hash_use_count[crc]++) return; /* This bit is already set */
872 mask = crc % 16;
873 mask = (unsigned char)1 << mask;
874 bp->hash_use_count[crc/16] |= mask;
877 static void
878 bmac_removehash(struct bmac_data *bp, unsigned char *addr)
880 unsigned int crc;
881 unsigned char mask;
883 /* Now, delete the address from the filter copy, as indicated */
884 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
885 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
886 if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */
887 if (--bp->hash_use_count[crc]) return; /* That bit is still in use */
888 mask = crc % 16;
889 mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */
890 bp->hash_table_mask[crc/16] &= mask;
894 * Sync the adapter with the software copy of the multicast mask
895 * (logical address filter).
898 static void
899 bmac_rx_off(struct net_device *dev)
901 unsigned short rx_cfg;
903 rx_cfg = bmread(dev, RXCFG);
904 rx_cfg &= ~RxMACEnable;
905 bmwrite(dev, RXCFG, rx_cfg);
906 do {
907 rx_cfg = bmread(dev, RXCFG);
908 } while (rx_cfg & RxMACEnable);
911 unsigned short
912 bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable)
914 unsigned short rx_cfg;
916 rx_cfg = bmread(dev, RXCFG);
917 rx_cfg |= RxMACEnable;
918 if (hash_enable) rx_cfg |= RxHashFilterEnable;
919 else rx_cfg &= ~RxHashFilterEnable;
920 if (promisc_enable) rx_cfg |= RxPromiscEnable;
921 else rx_cfg &= ~RxPromiscEnable;
922 bmwrite(dev, RXRST, RxResetValue);
923 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
924 bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
925 bmwrite(dev, RXCFG, rx_cfg );
926 return rx_cfg;
929 static void
930 bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp)
932 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
933 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
934 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
935 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
938 #if 0
939 static void
940 bmac_add_multi(struct net_device *dev,
941 struct bmac_data *bp, unsigned char *addr)
943 /* XXDEBUG(("bmac: enter bmac_add_multi\n")); */
944 bmac_addhash(bp, addr);
945 bmac_rx_off(dev);
946 bmac_update_hash_table_mask(dev, bp);
947 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
948 /* XXDEBUG(("bmac: exit bmac_add_multi\n")); */
951 static void
952 bmac_remove_multi(struct net_device *dev,
953 struct bmac_data *bp, unsigned char *addr)
955 bmac_removehash(bp, addr);
956 bmac_rx_off(dev);
957 bmac_update_hash_table_mask(dev, bp);
958 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
960 #endif
962 /* Set or clear the multicast filter for this adaptor.
963 num_addrs == -1 Promiscuous mode, receive all packets
964 num_addrs == 0 Normal mode, clear multicast list
965 num_addrs > 0 Multicast mode, receive normal and MC packets, and do
966 best-effort filtering.
968 static void bmac_set_multicast(struct net_device *dev)
970 struct dev_mc_list *dmi;
971 struct bmac_data *bp = (struct bmac_data *) dev->priv;
972 int num_addrs = dev->mc_count;
973 unsigned short rx_cfg;
974 int i;
976 XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
978 if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
979 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff;
980 bmac_update_hash_table_mask(dev, bp);
981 rx_cfg = bmac_rx_on(dev, 1, 0);
982 XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n"));
983 } else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) {
984 rx_cfg = bmread(dev, RXCFG);
985 rx_cfg |= RxPromiscEnable;
986 bmwrite(dev, RXCFG, rx_cfg);
987 rx_cfg = bmac_rx_on(dev, 0, 1);
988 XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg));
989 } else {
990 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
991 for (i=0; i<64; i++) bp->hash_use_count[i] = 0;
992 if (num_addrs == 0) {
993 rx_cfg = bmac_rx_on(dev, 0, 0);
994 XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
995 } else {
996 for (dmi=dev->mc_list; dmi!=NULL; dmi=dmi->next)
997 bmac_addhash(bp, dmi->dmi_addr);
998 bmac_update_hash_table_mask(dev, bp);
999 rx_cfg = bmac_rx_on(dev, 1, 0);
1000 XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
1003 /* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */
1005 #else /* ifdef SUNHME_MULTICAST */
1007 /* The version of set_multicast below was lifted from sunhme.c */
1009 #define CRC_POLYNOMIAL_BE 0x04c11db7UL /* Ethernet CRC, big endian */
1010 #define CRC_POLYNOMIAL_LE 0xedb88320UL /* Ethernet CRC, little endian */
1012 static void bmac_set_multicast(struct net_device *dev)
1014 struct dev_mc_list *dmi = dev->mc_list;
1015 char *addrs;
1016 int i, j, bit, byte;
1017 unsigned short rx_cfg;
1018 u32 crc, poly = CRC_POLYNOMIAL_LE;
1020 /* Let the transmits drain. */
1021 /* while(dev->tbusy) schedule(); */
1023 /* Lock out others. */
1024 /* set_bit(0, (void *) &dev->tbusy); */
1026 if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
1027 bmwrite(dev, BHASH0, 0xffff);
1028 bmwrite(dev, BHASH1, 0xffff);
1029 bmwrite(dev, BHASH2, 0xffff);
1030 bmwrite(dev, BHASH3, 0xffff);
1031 } else if(dev->flags & IFF_PROMISC) {
1032 rx_cfg = bmread(dev, RXCFG);
1033 rx_cfg |= RxPromiscEnable;
1034 bmwrite(dev, RXCFG, rx_cfg);
1035 } else {
1036 u16 hash_table[4];
1038 rx_cfg = bmread(dev, RXCFG);
1039 rx_cfg &= ~RxPromiscEnable;
1040 bmwrite(dev, RXCFG, rx_cfg);
1042 for(i = 0; i < 4; i++) hash_table[i] = 0;
1044 for(i = 0; i < dev->mc_count; i++) {
1045 addrs = dmi->dmi_addr;
1046 dmi = dmi->next;
1048 if(!(*addrs & 1))
1049 continue;
1051 crc = 0xffffffffU;
1052 for(byte = 0; byte < 6; byte++) {
1053 for(bit = *addrs++, j = 0; j < 8; j++, bit >>= 1) {
1054 int test;
1056 test = ((bit ^ crc) & 0x01);
1057 crc >>= 1;
1058 if(test)
1059 crc = crc ^ poly;
1062 crc >>= 26;
1063 hash_table[crc >> 4] |= 1 << (crc & 0xf);
1065 bmwrite(dev, BHASH0, hash_table[0]);
1066 bmwrite(dev, BHASH1, hash_table[1]);
1067 bmwrite(dev, BHASH2, hash_table[2]);
1068 bmwrite(dev, BHASH3, hash_table[3]);
1071 /* Let us get going again. */
1072 /* dev->tbusy = 0; */
1074 #endif /* SUNHME_MULTICAST */
1076 static int miscintcount = 0;
1078 static void bmac_misc_intr(int irq, void *dev_id, struct pt_regs *regs)
1080 struct net_device *dev = (struct net_device *) dev_id;
1081 struct bmac_data *bp = (struct bmac_data *)dev->priv;
1082 unsigned int status = bmread(dev, STATUS);
1083 if (miscintcount++ < 10) {
1084 XXDEBUG(("bmac_misc_intr\n"));
1086 /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */
1087 /* bmac_txdma_intr_inner(irq, dev_id, regs); */
1088 /* if (status & FrameReceived) bp->stats.rx_dropped++; */
1089 if (status & RxErrorMask) bp->stats.rx_errors++;
1090 if (status & RxCRCCntExp) bp->stats.rx_crc_errors++;
1091 if (status & RxLenCntExp) bp->stats.rx_length_errors++;
1092 if (status & RxOverFlow) bp->stats.rx_over_errors++;
1093 if (status & RxAlignCntExp) bp->stats.rx_frame_errors++;
1095 /* if (status & FrameSent) bp->stats.tx_dropped++; */
1096 if (status & TxErrorMask) bp->stats.tx_errors++;
1097 if (status & TxUnderrun) bp->stats.tx_fifo_errors++;
1098 if (status & TxNormalCollExp) bp->stats.collisions++;
1102 * Procedure for reading EEPROM
1104 #define SROMAddressLength 5
1105 #define DataInOn 0x0008
1106 #define DataInOff 0x0000
1107 #define Clk 0x0002
1108 #define ChipSelect 0x0001
1109 #define SDIShiftCount 3
1110 #define SD0ShiftCount 2
1111 #define DelayValue 1000 /* number of microseconds */
1112 #define SROMStartOffset 10 /* this is in words */
1113 #define SROMReadCount 3 /* number of words to read from SROM */
1114 #define SROMAddressBits 6
1115 #define EnetAddressOffset 20
1117 static unsigned char
1118 bmac_clock_out_bit(struct net_device *dev)
1120 unsigned short data;
1121 unsigned short val;
1123 bmwrite(dev, SROMCSR, ChipSelect | Clk);
1124 udelay(DelayValue);
1126 data = bmread(dev, SROMCSR);
1127 udelay(DelayValue);
1128 val = (data >> SD0ShiftCount) & 1;
1130 bmwrite(dev, SROMCSR, ChipSelect);
1131 udelay(DelayValue);
1133 return val;
1136 static void
1137 bmac_clock_in_bit(struct net_device *dev, unsigned int val)
1139 unsigned short data;
1141 if (val != 0 && val != 1) return;
1143 data = (val << SDIShiftCount);
1144 bmwrite(dev, SROMCSR, data | ChipSelect );
1145 udelay(DelayValue);
1147 bmwrite(dev, SROMCSR, data | ChipSelect | Clk );
1148 udelay(DelayValue);
1150 bmwrite(dev, SROMCSR, data | ChipSelect);
1151 udelay(DelayValue);
1154 static void
1155 reset_and_select_srom(struct net_device *dev)
1157 /* first reset */
1158 bmwrite(dev, SROMCSR, 0);
1159 udelay(DelayValue);
1161 /* send it the read command (110) */
1162 bmac_clock_in_bit(dev, 1);
1163 bmac_clock_in_bit(dev, 1);
1164 bmac_clock_in_bit(dev, 0);
1167 static unsigned short
1168 read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len)
1170 unsigned short data, val;
1171 int i;
1173 /* send out the address we want to read from */
1174 for (i = 0; i < addr_len; i++) {
1175 val = addr >> (addr_len-i-1);
1176 bmac_clock_in_bit(dev, val & 1);
1179 /* Now read in the 16-bit data */
1180 data = 0;
1181 for (i = 0; i < 16; i++) {
1182 val = bmac_clock_out_bit(dev);
1183 data <<= 1;
1184 data |= val;
1186 bmwrite(dev, SROMCSR, 0);
1188 return data;
1192 * It looks like Cogent and SMC use different methods for calculating
1193 * checksums. What a pain..
1196 static int
1197 bmac_verify_checksum(struct net_device *dev)
1199 unsigned short data, storedCS;
1201 reset_and_select_srom(dev);
1202 data = read_srom(dev, 3, SROMAddressBits);
1203 storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00);
1205 return 0;
1209 static void
1210 bmac_get_station_address(struct net_device *dev, unsigned char *ea)
1212 int i;
1213 unsigned short data;
1215 for (i = 0; i < 6; i++)
1217 reset_and_select_srom(dev);
1218 data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
1219 ea[2*i] = bitrev(data & 0x0ff);
1220 ea[2*i+1] = bitrev((data >> 8) & 0x0ff);
1224 static int bmac_reset_and_enable(struct net_device *dev, int enable)
1226 struct bmac_data *bp = dev->priv;
1227 unsigned long flags;
1228 struct sk_buff *skb;
1229 unsigned char *data;
1231 save_flags(flags); cli();
1232 bp->reset_and_enabled = 0;
1233 bmac_reset_chip(dev);
1234 if (enable) {
1235 if (!bmac_init_tx_ring(bp) || !bmac_init_rx_ring(bp)) return 0;
1236 if (!bmac_init_chip(dev)) return 0;
1237 bmac_start_chip(dev);
1238 bmwrite(dev, INTDISABLE, EnableNormal);
1239 bp->reset_and_enabled = 1;
1242 * It seems that the bmac can't receive until it's transmitted
1243 * a packet. So we give it a dummy packet to transmit.
1245 skb = dev_alloc_skb(ETHERMINPACKET);
1246 data = skb_put(skb, ETHERMINPACKET);
1247 memset(data, 0, ETHERMINPACKET);
1248 memcpy(data, dev->dev_addr, 6);
1249 memcpy(data+6, dev->dev_addr, 6);
1250 bmac_transmit_packet(skb, dev);
1252 restore_flags(flags);
1253 return 1;
1257 bmac_probe(struct net_device *dev)
1259 int j, rev;
1260 struct bmac_data *bp;
1261 struct device_node *bmacs;
1262 unsigned char *addr;
1263 static struct device_node *all_bmacs = NULL, *next_bmac;
1265 if (all_bmacs == NULL) {
1266 all_bmacs = find_devices("bmac");
1267 is_bmac_plus = 0;
1268 if (all_bmacs == NULL) {
1269 all_bmacs = find_compatible_devices("network", "bmac+");
1270 if (all_bmacs)
1271 is_bmac_plus = 1;
1273 next_bmac = all_bmacs;
1275 bmacs = next_bmac;
1276 if (bmacs == NULL) return -ENODEV;
1277 next_bmac = bmacs->next;
1279 if (bmac_devs == 0) {
1280 bmac_devs = dev; /* KLUDGE!! */
1281 #ifdef CONFIG_PMAC_PBOOK
1282 pmu_register_sleep_notifier(&bmac_sleep_notifier);
1283 #endif
1286 if (bmacs->n_addrs != 3 || bmacs->n_intrs != 3) {
1287 printk(KERN_ERR "can't use BMAC %s: expect 3 addrs and 3 intrs\n",
1288 bmacs->full_name);
1289 return -EINVAL;
1292 if (dev == NULL) {
1293 dev = init_etherdev(NULL, PRIV_BYTES);
1294 bmac_devs = dev; /*KLUDGE!!*/
1295 } else {
1296 /* XXX this doesn't look right (but it's never used :-) */
1297 dev->priv = kmalloc(PRIV_BYTES, GFP_KERNEL);
1298 if (dev->priv == 0) return -ENOMEM;
1301 #ifdef MODULE
1302 bmac_devs = dev;
1303 #endif
1305 dev->base_addr = (unsigned long)
1306 ioremap(bmacs->addrs[0].address, bmacs->addrs[0].size);
1307 dev->irq = bmacs->intrs[0].line;
1309 bmwrite(dev, INTDISABLE, DisableAll);
1311 addr = get_property(bmacs, "mac-address", NULL);
1312 if (addr == NULL) {
1313 addr = get_property(bmacs, "local-mac-address", NULL);
1314 if (addr == NULL) {
1315 printk(KERN_ERR "Can't get mac-address for BMAC at %lx\n",
1316 dev->base_addr);
1317 return -EAGAIN;
1321 printk(KERN_INFO "%s: BMAC%s at", dev->name, (is_bmac_plus? "+": ""));
1322 rev = addr[0] == 0 && addr[1] == 0xA0;
1323 for (j = 0; j < 6; ++j) {
1324 dev->dev_addr[j] = rev? bitrev(addr[j]): addr[j];
1325 printk("%c%.2x", (j? ':': ' '), dev->dev_addr[j]);
1327 XXDEBUG((", base_addr=%#0lx", dev->base_addr));
1328 printk("\n");
1330 dev->open = bmac_open;
1331 dev->stop = bmac_close;
1332 dev->hard_start_xmit = bmac_output;
1333 dev->get_stats = bmac_stats;
1334 dev->set_multicast_list = bmac_set_multicast;
1335 dev->set_mac_address = bmac_set_address;
1337 bmac_get_station_address(dev, addr);
1338 if (bmac_verify_checksum(dev) != 0) return -EINVAL;
1340 ether_setup(dev);
1342 bp = (struct bmac_data *) dev->priv;
1343 memset(bp, 0, sizeof(struct bmac_data));
1344 bp->tx_dma = (volatile struct dbdma_regs *)
1345 ioremap(bmacs->addrs[1].address, bmacs->addrs[1].size);
1346 bp->tx_dma_intr = bmacs->intrs[1].line;
1347 bp->rx_dma = (volatile struct dbdma_regs *)
1348 ioremap(bmacs->addrs[2].address, bmacs->addrs[2].size);
1349 bp->rx_dma_intr = bmacs->intrs[2].line;
1351 bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1);
1352 bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1;
1354 bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1);
1355 skb_queue_head_init(bp->queue);
1357 bp->node = bmacs;
1358 memset(&bp->stats, 0, sizeof(bp->stats));
1359 memset((char *) bp->tx_cmds, 0,
1360 (N_TX_RING + N_RX_RING + 2) * sizeof(struct dbdma_cmd));
1361 /* init_timer(&bp->tx_timeout); */
1362 /* bp->timeout_active = 0; */
1364 if (request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev)) {
1365 printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
1366 return -EAGAIN;
1368 if (request_irq(bmacs->intrs[1].line, bmac_txdma_intr, 0, "BMAC-txdma",
1369 dev)) {
1370 printk(KERN_ERR "BMAC: can't get irq %d\n", bmacs->intrs[1].line);
1371 return -EAGAIN;
1373 if (request_irq(bmacs->intrs[2].line, bmac_rxdma_intr, 0, "BMAC-rxdma",
1374 dev)) {
1375 printk(KERN_ERR "BMAC: can't get irq %d\n", bmacs->intrs[2].line);
1376 return -EAGAIN;
1379 if (!bmac_reset_and_enable(dev, 0)) return -ENOMEM;
1381 #ifdef CONFIG_PROC_FS
1382 proc_net_register(&(struct proc_dir_entry) {
1383 PROC_NET_BMAC, 4, "bmac",
1384 S_IFREG | S_IRUGO, 1, 0, 0,
1385 0, &proc_net_inode_operations,
1386 bmac_proc_info
1388 #endif
1390 return 0;
1393 static int bmac_open(struct net_device *dev)
1395 /* XXDEBUG(("bmac: enter open\n")); */
1396 /* reset the chip */
1397 bmac_reset_and_enable(dev, 1);
1399 dev->flags |= IFF_UP | IFF_RUNNING;
1401 MOD_INC_USE_COUNT;
1402 return 0;
1405 static int bmac_close(struct net_device *dev)
1407 struct bmac_data *bp = (struct bmac_data *) dev->priv;
1408 volatile struct dbdma_regs *rd = bp->rx_dma;
1409 volatile struct dbdma_regs *td = bp->tx_dma;
1410 unsigned short config;
1411 int i;
1413 dev->flags &= ~(IFF_UP | IFF_RUNNING);
1415 /* disable rx and tx */
1416 config = bmread(dev, RXCFG);
1417 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1419 config = bmread(dev, TXCFG);
1420 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1422 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
1424 /* disable rx and tx dma */
1425 st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
1426 st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
1428 /* free some skb's */
1429 XXDEBUG(("bmac: free rx bufs\n"));
1430 for (i=0; i<N_RX_RING; i++) {
1431 if (bp->rx_bufs[i] != NULL) {
1432 dev_kfree_skb(bp->rx_bufs[i]);
1433 bp->rx_bufs[i] = NULL;
1436 bp->rx_allocated = 0;
1437 XXDEBUG(("bmac: free tx bufs\n"));
1438 for (i = 0; i<N_TX_RING; i++) {
1439 if (bp->tx_bufs[i] != NULL) {
1440 dev_kfree_skb(bp->tx_bufs[i]);
1441 bp->tx_bufs[i] = NULL;
1444 bp->reset_and_enabled = 0;
1445 XXDEBUG(("bmac: all bufs freed\n"));
1447 MOD_DEC_USE_COUNT;
1449 return 0;
1452 static void
1453 bmac_start(struct net_device *dev)
1455 struct bmac_data *bp = dev->priv;
1456 int i;
1457 struct sk_buff *skb;
1458 unsigned long flags;
1460 save_flags(flags); cli();
1461 while (1) {
1462 i = bp->tx_fill + 1;
1463 if (i >= N_TX_RING) i = 0;
1464 if (i == bp->tx_empty) break;
1465 skb = skb_dequeue(bp->queue);
1466 if (skb == NULL) break;
1467 bmac_transmit_packet(skb, dev);
1469 restore_flags(flags);
1472 static int
1473 bmac_output(struct sk_buff *skb, struct net_device *dev)
1475 struct bmac_data *bp = dev->priv;
1476 skb_queue_tail(bp->queue, skb);
1477 bmac_start(dev);
1478 return 0;
1481 static void bmac_tx_timeout(unsigned long data)
1483 struct net_device *dev = (struct net_device *) data;
1484 struct bmac_data *bp = (struct bmac_data *) dev->priv;
1485 volatile struct dbdma_regs *td = bp->tx_dma;
1486 volatile struct dbdma_regs *rd = bp->rx_dma;
1487 volatile struct dbdma_cmd *cp;
1488 unsigned long flags;
1489 unsigned short config, oldConfig;
1490 int i;
1492 XXDEBUG(("bmac: tx_timeout called\n"));
1493 save_flags(flags); cli();
1494 bp->timeout_active = 0;
1496 /* update various counters */
1497 /* bmac_handle_misc_intrs(bp, 0); */
1499 cp = &bp->tx_cmds[bp->tx_empty];
1500 /* XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */
1501 /* ld_le32(&td->status), ld_le16(&cp->xfer_status), bp->tx_bad_runt, */
1502 /* mb->pr, mb->xmtfs, mb->fifofc)); */
1504 /* turn off both tx and rx and reset the chip */
1505 config = bmread(dev, RXCFG);
1506 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1507 config = bmread(dev, TXCFG);
1508 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1509 out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1510 printk(KERN_ERR "bmac: transmit timeout - resetting\n");
1511 bmac_reset_chip(dev);
1513 /* restart rx dma */
1514 cp = bus_to_virt(ld_le32(&rd->cmdptr));
1515 out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1516 out_le16(&cp->xfer_status, 0);
1517 out_le32(&rd->cmdptr, virt_to_bus(cp));
1518 out_le32(&rd->control, DBDMA_SET(RUN|WAKE));
1520 /* fix up the transmit side */
1521 XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n",
1522 bp->tx_empty, bp->tx_fill, bp->tx_fullup));
1523 i = bp->tx_empty;
1524 ++bp->stats.tx_errors;
1525 if (i != bp->tx_fill) {
1526 dev_kfree_skb(bp->tx_bufs[i]);
1527 bp->tx_bufs[i] = NULL;
1528 if (++i >= N_TX_RING) i = 0;
1529 bp->tx_empty = i;
1531 bp->tx_fullup = 0;
1532 dev->tbusy = 0;
1533 mark_bh(NET_BH);
1534 XXDEBUG((KERN_DEBUG "bmac: clearing tbusy\n"));
1535 if (i != bp->tx_fill) {
1536 cp = &bp->tx_cmds[i];
1537 out_le16(&cp->xfer_status, 0);
1538 out_le16(&cp->command, OUTPUT_LAST);
1539 out_le32(&td->cmdptr, virt_to_bus(cp));
1540 out_le32(&td->control, DBDMA_SET(RUN));
1541 /* bmac_set_timeout(dev); */
1542 XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i));
1545 /* turn it back on */
1546 oldConfig = bmread(dev, RXCFG);
1547 bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
1548 oldConfig = bmread(dev, TXCFG);
1549 bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
1551 restore_flags(flags);
1554 #if 0
1555 static void dump_dbdma(volatile struct dbdma_cmd *cp,int count)
1557 int i,*ip;
1559 for (i=0;i< count;i++) {
1560 ip = (int*)(cp+i);
1562 printk("dbdma req 0x%x addr 0x%x baddr 0x%x xfer/res 0x%x\n",
1563 ld_le32(ip+0),
1564 ld_le32(ip+1),
1565 ld_le32(ip+2),
1566 ld_le32(ip+3));
1570 #endif
1572 static int
1573 bmac_proc_info(char *buffer, char **start, off_t offset, int length, int dummy)
1575 int len = 0;
1576 off_t pos = 0;
1577 off_t begin = 0;
1578 int i;
1580 if (bmac_devs == NULL) return (-ENOSYS);
1582 len += sprintf(buffer, "BMAC counters & registers\n");
1584 for (i = 0; i<N_REG_ENTRIES; i++) {
1585 len += sprintf(buffer + len, "%s: %#08x\n",
1586 reg_entries[i].name,
1587 bmread(bmac_devs, reg_entries[i].reg_offset));
1588 pos = begin + len;
1590 if (pos < offset) {
1591 len = 0;
1592 begin = pos;
1595 if (pos > offset+length) break;
1598 *start = buffer + (offset - begin);
1599 len -= (offset - begin);
1601 if (len > length) len = length;
1603 return len;
1606 #ifdef MODULE
1608 MODULE_AUTHOR("Randy Gobbel/Paul Mackerras");
1609 MODULE_DESCRIPTION("PowerMac BMAC ethernet driver.");
1611 int init_module(void)
1613 int res;
1615 if(bmac_devs != NULL)
1616 return -EBUSY;
1617 res = bmac_probe(NULL);
1618 return res;
1621 void cleanup_module(void)
1623 struct bmac_data *bp;
1625 if (bmac_devs == 0)
1626 return;
1628 bp = (struct bmac_data *) bmac_devs->priv;
1629 unregister_netdev(bmac_devs);
1631 free_irq(bmac_devs->irq, bmac_misc_intr);
1632 free_irq(bp->tx_dma_intr, bmac_txdma_intr);
1633 free_irq(bp->rx_dma_intr, bmac_rxdma_intr);
1635 #ifdef CONFIG_PMAC_PBOOK
1636 pmu_unregister_sleep_notifier(&bmac_sleep_notifier);
1637 #endif
1638 kfree(bmac_devs);
1639 bmac_devs = NULL;
1642 #endif