2 * Network device driver for the MACE ethernet controller on
3 * Apple Powermacs. Assumes it's under a DBDMA controller.
5 * Copyright (C) 1996 Paul Mackerras.
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/delay.h>
13 #include <linux/string.h>
14 #include <linux/timer.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/crc32.h>
18 #include <linux/spinlock.h>
19 #include <linux/bitrev.h>
20 #include <linux/slab.h>
22 #include <asm/dbdma.h>
24 #include <asm/pgtable.h>
25 #include <asm/macio.h>
29 static int port_aaui
= -1;
33 #define MAX_TX_ACTIVE 1
34 #define NCMDS_TX 1 /* dma commands per element in tx ring */
35 #define RX_BUFLEN (ETH_FRAME_LEN + 8)
36 #define TX_TIMEOUT HZ /* 1 second */
38 /* Chip rev needs workaround on HW & multicast addr change */
39 #define BROKEN_ADDRCHG_REV 0x0941
41 /* Bits in transmit DMA status */
42 #define TX_DMA_ERR 0x80
45 volatile struct mace __iomem
*mace
;
46 volatile struct dbdma_regs __iomem
*tx_dma
;
48 volatile struct dbdma_regs __iomem
*rx_dma
;
50 volatile struct dbdma_cmd
*tx_cmds
; /* xmit dma command list */
51 volatile struct dbdma_cmd
*rx_cmds
; /* recv dma command list */
52 struct sk_buff
*rx_bufs
[N_RX_RING
];
55 struct sk_buff
*tx_bufs
[N_TX_RING
];
59 unsigned char tx_fullup
;
60 unsigned char tx_active
;
61 unsigned char tx_bad_runt
;
62 struct timer_list tx_timeout
;
66 struct macio_dev
*mdev
;
71 * Number of bytes of private data per MACE: allow enough for
72 * the rx and tx dma commands plus a branch dma command each,
73 * and another 16 bytes to allow us to align the dma command
74 * buffers on a 16 byte boundary.
76 #define PRIV_BYTES (sizeof(struct mace_data) \
77 + (N_RX_RING + NCMDS_TX * N_TX_RING + 3) * sizeof(struct dbdma_cmd))
79 static int mace_open(struct net_device
*dev
);
80 static int mace_close(struct net_device
*dev
);
81 static int mace_xmit_start(struct sk_buff
*skb
, struct net_device
*dev
);
82 static void mace_set_multicast(struct net_device
*dev
);
83 static void mace_reset(struct net_device
*dev
);
84 static int mace_set_address(struct net_device
*dev
, void *addr
);
85 static irqreturn_t
mace_interrupt(int irq
, void *dev_id
);
86 static irqreturn_t
mace_txdma_intr(int irq
, void *dev_id
);
87 static irqreturn_t
mace_rxdma_intr(int irq
, void *dev_id
);
88 static void mace_set_timeout(struct net_device
*dev
);
89 static void mace_tx_timeout(unsigned long data
);
90 static inline void dbdma_reset(volatile struct dbdma_regs __iomem
*dma
);
91 static inline void mace_clean_rings(struct mace_data
*mp
);
92 static void __mace_set_address(struct net_device
*dev
, void *addr
);
95 * If we can't get a skbuff when we need it, we use this area for DMA.
97 static unsigned char *dummy_buf
;
99 static const struct net_device_ops mace_netdev_ops
= {
100 .ndo_open
= mace_open
,
101 .ndo_stop
= mace_close
,
102 .ndo_start_xmit
= mace_xmit_start
,
103 .ndo_set_rx_mode
= mace_set_multicast
,
104 .ndo_set_mac_address
= mace_set_address
,
105 .ndo_change_mtu
= eth_change_mtu
,
106 .ndo_validate_addr
= eth_validate_addr
,
109 static int mace_probe(struct macio_dev
*mdev
, const struct of_device_id
*match
)
111 struct device_node
*mace
= macio_get_of_node(mdev
);
112 struct net_device
*dev
;
113 struct mace_data
*mp
;
114 const unsigned char *addr
;
115 int j
, rev
, rc
= -EBUSY
;
117 if (macio_resource_count(mdev
) != 3 || macio_irq_count(mdev
) != 3) {
118 printk(KERN_ERR
"can't use MACE %s: need 3 addrs and 3 irqs\n",
123 addr
= of_get_property(mace
, "mac-address", NULL
);
125 addr
= of_get_property(mace
, "local-mac-address", NULL
);
127 printk(KERN_ERR
"Can't get mac-address for MACE %s\n",
134 * lazy allocate the driver-wide dummy buffer. (Note that we
135 * never have more than one MACE in the system anyway)
137 if (dummy_buf
== NULL
) {
138 dummy_buf
= kmalloc(RX_BUFLEN
+2, GFP_KERNEL
);
139 if (dummy_buf
== NULL
)
143 if (macio_request_resources(mdev
, "mace")) {
144 printk(KERN_ERR
"MACE: can't request IO resources !\n");
148 dev
= alloc_etherdev(PRIV_BYTES
);
153 SET_NETDEV_DEV(dev
, &mdev
->ofdev
.dev
);
155 mp
= netdev_priv(dev
);
157 macio_set_drvdata(mdev
, dev
);
159 dev
->base_addr
= macio_resource_start(mdev
, 0);
160 mp
->mace
= ioremap(dev
->base_addr
, 0x1000);
161 if (mp
->mace
== NULL
) {
162 printk(KERN_ERR
"MACE: can't map IO resources !\n");
166 dev
->irq
= macio_irq(mdev
, 0);
168 rev
= addr
[0] == 0 && addr
[1] == 0xA0;
169 for (j
= 0; j
< 6; ++j
) {
170 dev
->dev_addr
[j
] = rev
? bitrev8(addr
[j
]): addr
[j
];
172 mp
->chipid
= (in_8(&mp
->mace
->chipid_hi
) << 8) |
173 in_8(&mp
->mace
->chipid_lo
);
176 mp
= netdev_priv(dev
);
177 mp
->maccc
= ENXMT
| ENRCV
;
179 mp
->tx_dma
= ioremap(macio_resource_start(mdev
, 1), 0x1000);
180 if (mp
->tx_dma
== NULL
) {
181 printk(KERN_ERR
"MACE: can't map TX DMA resources !\n");
185 mp
->tx_dma_intr
= macio_irq(mdev
, 1);
187 mp
->rx_dma
= ioremap(macio_resource_start(mdev
, 2), 0x1000);
188 if (mp
->rx_dma
== NULL
) {
189 printk(KERN_ERR
"MACE: can't map RX DMA resources !\n");
191 goto err_unmap_tx_dma
;
193 mp
->rx_dma_intr
= macio_irq(mdev
, 2);
195 mp
->tx_cmds
= (volatile struct dbdma_cmd
*) DBDMA_ALIGN(mp
+ 1);
196 mp
->rx_cmds
= mp
->tx_cmds
+ NCMDS_TX
* N_TX_RING
+ 1;
198 memset((char *) mp
->tx_cmds
, 0,
199 (NCMDS_TX
*N_TX_RING
+ N_RX_RING
+ 2) * sizeof(struct dbdma_cmd
));
200 init_timer(&mp
->tx_timeout
);
201 spin_lock_init(&mp
->lock
);
202 mp
->timeout_active
= 0;
205 mp
->port_aaui
= port_aaui
;
207 /* Apple Network Server uses the AAUI port */
208 if (of_machine_is_compatible("AAPL,ShinerESB"))
211 #ifdef CONFIG_MACE_AAUI_PORT
219 dev
->netdev_ops
= &mace_netdev_ops
;
222 * Most of what is below could be moved to mace_open()
226 rc
= request_irq(dev
->irq
, mace_interrupt
, 0, "MACE", dev
);
228 printk(KERN_ERR
"MACE: can't get irq %d\n", dev
->irq
);
229 goto err_unmap_rx_dma
;
231 rc
= request_irq(mp
->tx_dma_intr
, mace_txdma_intr
, 0, "MACE-txdma", dev
);
233 printk(KERN_ERR
"MACE: can't get irq %d\n", mp
->tx_dma_intr
);
236 rc
= request_irq(mp
->rx_dma_intr
, mace_rxdma_intr
, 0, "MACE-rxdma", dev
);
238 printk(KERN_ERR
"MACE: can't get irq %d\n", mp
->rx_dma_intr
);
239 goto err_free_tx_irq
;
242 rc
= register_netdev(dev
);
244 printk(KERN_ERR
"MACE: Cannot register net device, aborting.\n");
245 goto err_free_rx_irq
;
248 printk(KERN_INFO
"%s: MACE at %pM, chip revision %d.%d\n",
249 dev
->name
, dev
->dev_addr
,
250 mp
->chipid
>> 8, mp
->chipid
& 0xff);
255 free_irq(macio_irq(mdev
, 2), dev
);
257 free_irq(macio_irq(mdev
, 1), dev
);
259 free_irq(macio_irq(mdev
, 0), dev
);
269 macio_release_resources(mdev
);
274 static int mace_remove(struct macio_dev
*mdev
)
276 struct net_device
*dev
= macio_get_drvdata(mdev
);
277 struct mace_data
*mp
;
281 macio_set_drvdata(mdev
, NULL
);
283 mp
= netdev_priv(dev
);
285 unregister_netdev(dev
);
287 free_irq(dev
->irq
, dev
);
288 free_irq(mp
->tx_dma_intr
, dev
);
289 free_irq(mp
->rx_dma_intr
, dev
);
297 macio_release_resources(mdev
);
302 static void dbdma_reset(volatile struct dbdma_regs __iomem
*dma
)
306 out_le32(&dma
->control
, (WAKE
|FLUSH
|PAUSE
|RUN
) << 16);
309 * Yes this looks peculiar, but apparently it needs to be this
310 * way on some machines.
312 for (i
= 200; i
> 0; --i
)
313 if (le32_to_cpu(dma
->control
) & RUN
)
317 static void mace_reset(struct net_device
*dev
)
319 struct mace_data
*mp
= netdev_priv(dev
);
320 volatile struct mace __iomem
*mb
= mp
->mace
;
323 /* soft-reset the chip */
326 out_8(&mb
->biucc
, SWRST
);
327 if (in_8(&mb
->biucc
) & SWRST
) {
334 printk(KERN_ERR
"mace: cannot reset chip!\n");
338 out_8(&mb
->imr
, 0xff); /* disable all intrs for now */
340 out_8(&mb
->maccc
, 0); /* turn off tx, rx */
342 out_8(&mb
->biucc
, XMTSP_64
);
343 out_8(&mb
->utr
, RTRD
);
344 out_8(&mb
->fifocc
, RCVFW_32
| XMTFW_16
| XMTFWU
| RCVFWU
| XMTBRST
);
345 out_8(&mb
->xmtfc
, AUTO_PAD_XMIT
); /* auto-pad short frames */
346 out_8(&mb
->rcvfc
, 0);
348 /* load up the hardware address */
349 __mace_set_address(dev
, dev
->dev_addr
);
351 /* clear the multicast filter */
352 if (mp
->chipid
== BROKEN_ADDRCHG_REV
)
353 out_8(&mb
->iac
, LOGADDR
);
355 out_8(&mb
->iac
, ADDRCHG
| LOGADDR
);
356 while ((in_8(&mb
->iac
) & ADDRCHG
) != 0)
359 for (i
= 0; i
< 8; ++i
)
360 out_8(&mb
->ladrf
, 0);
362 /* done changing address */
363 if (mp
->chipid
!= BROKEN_ADDRCHG_REV
)
367 out_8(&mb
->plscc
, PORTSEL_AUI
+ ENPLSIO
);
369 out_8(&mb
->plscc
, PORTSEL_GPSI
+ ENPLSIO
);
372 static void __mace_set_address(struct net_device
*dev
, void *addr
)
374 struct mace_data
*mp
= netdev_priv(dev
);
375 volatile struct mace __iomem
*mb
= mp
->mace
;
376 unsigned char *p
= addr
;
379 /* load up the hardware address */
380 if (mp
->chipid
== BROKEN_ADDRCHG_REV
)
381 out_8(&mb
->iac
, PHYADDR
);
383 out_8(&mb
->iac
, ADDRCHG
| PHYADDR
);
384 while ((in_8(&mb
->iac
) & ADDRCHG
) != 0)
387 for (i
= 0; i
< 6; ++i
)
388 out_8(&mb
->padr
, dev
->dev_addr
[i
] = p
[i
]);
389 if (mp
->chipid
!= BROKEN_ADDRCHG_REV
)
393 static int mace_set_address(struct net_device
*dev
, void *addr
)
395 struct mace_data
*mp
= netdev_priv(dev
);
396 volatile struct mace __iomem
*mb
= mp
->mace
;
399 spin_lock_irqsave(&mp
->lock
, flags
);
401 __mace_set_address(dev
, addr
);
403 /* note: setting ADDRCHG clears ENRCV */
404 out_8(&mb
->maccc
, mp
->maccc
);
406 spin_unlock_irqrestore(&mp
->lock
, flags
);
410 static inline void mace_clean_rings(struct mace_data
*mp
)
414 /* free some skb's */
415 for (i
= 0; i
< N_RX_RING
; ++i
) {
416 if (mp
->rx_bufs
[i
] != NULL
) {
417 dev_kfree_skb(mp
->rx_bufs
[i
]);
418 mp
->rx_bufs
[i
] = NULL
;
421 for (i
= mp
->tx_empty
; i
!= mp
->tx_fill
; ) {
422 dev_kfree_skb(mp
->tx_bufs
[i
]);
423 if (++i
>= N_TX_RING
)
428 static int mace_open(struct net_device
*dev
)
430 struct mace_data
*mp
= netdev_priv(dev
);
431 volatile struct mace __iomem
*mb
= mp
->mace
;
432 volatile struct dbdma_regs __iomem
*rd
= mp
->rx_dma
;
433 volatile struct dbdma_regs __iomem
*td
= mp
->tx_dma
;
434 volatile struct dbdma_cmd
*cp
;
442 /* initialize list of sk_buffs for receiving and set up recv dma */
443 mace_clean_rings(mp
);
444 memset((char *)mp
->rx_cmds
, 0, N_RX_RING
* sizeof(struct dbdma_cmd
));
446 for (i
= 0; i
< N_RX_RING
- 1; ++i
) {
447 skb
= netdev_alloc_skb(dev
, RX_BUFLEN
+ 2);
451 skb_reserve(skb
, 2); /* so IP header lands on 4-byte bdry */
454 mp
->rx_bufs
[i
] = skb
;
455 cp
->req_count
= cpu_to_le16(RX_BUFLEN
);
456 cp
->command
= cpu_to_le16(INPUT_LAST
+ INTR_ALWAYS
);
457 cp
->phy_addr
= cpu_to_le32(virt_to_bus(data
));
461 mp
->rx_bufs
[i
] = NULL
;
462 cp
->command
= cpu_to_le16(DBDMA_STOP
);
466 /* Put a branch back to the beginning of the receive command list */
468 cp
->command
= cpu_to_le16(DBDMA_NOP
+ BR_ALWAYS
);
469 cp
->cmd_dep
= cpu_to_le32(virt_to_bus(mp
->rx_cmds
));
472 out_le32(&rd
->control
, (RUN
|PAUSE
|FLUSH
|WAKE
) << 16); /* clear run bit */
473 out_le32(&rd
->cmdptr
, virt_to_bus(mp
->rx_cmds
));
474 out_le32(&rd
->control
, (RUN
<< 16) | RUN
);
476 /* put a branch at the end of the tx command list */
477 cp
= mp
->tx_cmds
+ NCMDS_TX
* N_TX_RING
;
478 cp
->command
= cpu_to_le16(DBDMA_NOP
+ BR_ALWAYS
);
479 cp
->cmd_dep
= cpu_to_le32(virt_to_bus(mp
->tx_cmds
));
482 out_le32(&td
->control
, (RUN
|PAUSE
|FLUSH
|WAKE
) << 16);
483 out_le32(&td
->cmdptr
, virt_to_bus(mp
->tx_cmds
));
491 out_8(&mb
->maccc
, mp
->maccc
);
492 /* enable all interrupts except receive interrupts */
493 out_8(&mb
->imr
, RCVINT
);
498 static int mace_close(struct net_device
*dev
)
500 struct mace_data
*mp
= netdev_priv(dev
);
501 volatile struct mace __iomem
*mb
= mp
->mace
;
502 volatile struct dbdma_regs __iomem
*rd
= mp
->rx_dma
;
503 volatile struct dbdma_regs __iomem
*td
= mp
->tx_dma
;
505 /* disable rx and tx */
506 out_8(&mb
->maccc
, 0);
507 out_8(&mb
->imr
, 0xff); /* disable all intrs */
509 /* disable rx and tx dma */
510 rd
->control
= cpu_to_le32((RUN
|PAUSE
|FLUSH
|WAKE
) << 16); /* clear run bit */
511 td
->control
= cpu_to_le32((RUN
|PAUSE
|FLUSH
|WAKE
) << 16); /* clear run bit */
513 mace_clean_rings(mp
);
518 static inline void mace_set_timeout(struct net_device
*dev
)
520 struct mace_data
*mp
= netdev_priv(dev
);
522 if (mp
->timeout_active
)
523 del_timer(&mp
->tx_timeout
);
524 mp
->tx_timeout
.expires
= jiffies
+ TX_TIMEOUT
;
525 mp
->tx_timeout
.function
= mace_tx_timeout
;
526 mp
->tx_timeout
.data
= (unsigned long) dev
;
527 add_timer(&mp
->tx_timeout
);
528 mp
->timeout_active
= 1;
531 static int mace_xmit_start(struct sk_buff
*skb
, struct net_device
*dev
)
533 struct mace_data
*mp
= netdev_priv(dev
);
534 volatile struct dbdma_regs __iomem
*td
= mp
->tx_dma
;
535 volatile struct dbdma_cmd
*cp
, *np
;
539 /* see if there's a free slot in the tx ring */
540 spin_lock_irqsave(&mp
->lock
, flags
);
543 if (next
>= N_TX_RING
)
545 if (next
== mp
->tx_empty
) {
546 netif_stop_queue(dev
);
548 spin_unlock_irqrestore(&mp
->lock
, flags
);
549 return NETDEV_TX_BUSY
; /* can't take it at the moment */
551 spin_unlock_irqrestore(&mp
->lock
, flags
);
553 /* partially fill in the dma command block */
555 if (len
> ETH_FRAME_LEN
) {
556 printk(KERN_DEBUG
"mace: xmit frame too long (%d)\n", len
);
559 mp
->tx_bufs
[fill
] = skb
;
560 cp
= mp
->tx_cmds
+ NCMDS_TX
* fill
;
561 cp
->req_count
= cpu_to_le16(len
);
562 cp
->phy_addr
= cpu_to_le32(virt_to_bus(skb
->data
));
564 np
= mp
->tx_cmds
+ NCMDS_TX
* next
;
565 out_le16(&np
->command
, DBDMA_STOP
);
567 /* poke the tx dma channel */
568 spin_lock_irqsave(&mp
->lock
, flags
);
570 if (!mp
->tx_bad_runt
&& mp
->tx_active
< MAX_TX_ACTIVE
) {
571 out_le16(&cp
->xfer_status
, 0);
572 out_le16(&cp
->command
, OUTPUT_LAST
);
573 out_le32(&td
->control
, ((RUN
|WAKE
) << 16) + (RUN
|WAKE
));
575 mace_set_timeout(dev
);
577 if (++next
>= N_TX_RING
)
579 if (next
== mp
->tx_empty
)
580 netif_stop_queue(dev
);
581 spin_unlock_irqrestore(&mp
->lock
, flags
);
586 static void mace_set_multicast(struct net_device
*dev
)
588 struct mace_data
*mp
= netdev_priv(dev
);
589 volatile struct mace __iomem
*mb
= mp
->mace
;
594 spin_lock_irqsave(&mp
->lock
, flags
);
596 if (dev
->flags
& IFF_PROMISC
) {
599 unsigned char multicast_filter
[8];
600 struct netdev_hw_addr
*ha
;
602 if (dev
->flags
& IFF_ALLMULTI
) {
603 for (i
= 0; i
< 8; i
++)
604 multicast_filter
[i
] = 0xff;
606 for (i
= 0; i
< 8; i
++)
607 multicast_filter
[i
] = 0;
608 netdev_for_each_mc_addr(ha
, dev
) {
609 crc
= ether_crc_le(6, ha
->addr
);
610 i
= crc
>> 26; /* bit number in multicast_filter */
611 multicast_filter
[i
>> 3] |= 1 << (i
& 7);
615 printk("Multicast filter :");
616 for (i
= 0; i
< 8; i
++)
617 printk("%02x ", multicast_filter
[i
]);
621 if (mp
->chipid
== BROKEN_ADDRCHG_REV
)
622 out_8(&mb
->iac
, LOGADDR
);
624 out_8(&mb
->iac
, ADDRCHG
| LOGADDR
);
625 while ((in_8(&mb
->iac
) & ADDRCHG
) != 0)
628 for (i
= 0; i
< 8; ++i
)
629 out_8(&mb
->ladrf
, multicast_filter
[i
]);
630 if (mp
->chipid
!= BROKEN_ADDRCHG_REV
)
634 out_8(&mb
->maccc
, mp
->maccc
);
635 spin_unlock_irqrestore(&mp
->lock
, flags
);
638 static void mace_handle_misc_intrs(struct mace_data
*mp
, int intr
, struct net_device
*dev
)
640 volatile struct mace __iomem
*mb
= mp
->mace
;
641 static int mace_babbles
, mace_jabbers
;
644 dev
->stats
.rx_missed_errors
+= 256;
645 dev
->stats
.rx_missed_errors
+= in_8(&mb
->mpc
); /* reading clears it */
647 dev
->stats
.rx_length_errors
+= 256;
648 dev
->stats
.rx_length_errors
+= in_8(&mb
->rntpc
); /* reading clears it */
650 ++dev
->stats
.tx_heartbeat_errors
;
652 if (mace_babbles
++ < 4)
653 printk(KERN_DEBUG
"mace: babbling transmitter\n");
655 if (mace_jabbers
++ < 4)
656 printk(KERN_DEBUG
"mace: jabbering transceiver\n");
659 static irqreturn_t
mace_interrupt(int irq
, void *dev_id
)
661 struct net_device
*dev
= (struct net_device
*) dev_id
;
662 struct mace_data
*mp
= netdev_priv(dev
);
663 volatile struct mace __iomem
*mb
= mp
->mace
;
664 volatile struct dbdma_regs __iomem
*td
= mp
->tx_dma
;
665 volatile struct dbdma_cmd
*cp
;
666 int intr
, fs
, i
, stat
, x
;
669 /* static int mace_last_fs, mace_last_xcount; */
671 spin_lock_irqsave(&mp
->lock
, flags
);
672 intr
= in_8(&mb
->ir
); /* read interrupt register */
673 in_8(&mb
->xmtrc
); /* get retries */
674 mace_handle_misc_intrs(mp
, intr
, dev
);
677 while (in_8(&mb
->pr
) & XMTSV
) {
678 del_timer(&mp
->tx_timeout
);
679 mp
->timeout_active
= 0;
681 * Clear any interrupt indication associated with this status
682 * word. This appears to unlatch any error indication from
683 * the DMA controller.
685 intr
= in_8(&mb
->ir
);
687 mace_handle_misc_intrs(mp
, intr
, dev
);
688 if (mp
->tx_bad_runt
) {
689 fs
= in_8(&mb
->xmtfs
);
691 out_8(&mb
->xmtfc
, AUTO_PAD_XMIT
);
694 dstat
= le32_to_cpu(td
->status
);
695 /* stop DMA controller */
696 out_le32(&td
->control
, RUN
<< 16);
698 * xcount is the number of complete frames which have been
699 * written to the fifo but for which status has not been read.
701 xcount
= (in_8(&mb
->fifofc
) >> XMTFC_SH
) & XMTFC_MASK
;
702 if (xcount
== 0 || (dstat
& DEAD
)) {
704 * If a packet was aborted before the DMA controller has
705 * finished transferring it, it seems that there are 2 bytes
706 * which are stuck in some buffer somewhere. These will get
707 * transmitted as soon as we read the frame status (which
708 * reenables the transmit data transfer request). Turning
709 * off the DMA controller and/or resetting the MACE doesn't
710 * help. So we disable auto-padding and FCS transmission
711 * so the two bytes will only be a runt packet which should
712 * be ignored by other stations.
714 out_8(&mb
->xmtfc
, DXMTFCS
);
716 fs
= in_8(&mb
->xmtfs
);
717 if ((fs
& XMTSV
) == 0) {
718 printk(KERN_ERR
"mace: xmtfs not valid! (fs=%x xc=%d ds=%x)\n",
722 * XXX mace likes to hang the machine after a xmtfs error.
723 * This is hard to reproduce, resetting *may* help
726 cp
= mp
->tx_cmds
+ NCMDS_TX
* i
;
727 stat
= le16_to_cpu(cp
->xfer_status
);
728 if ((fs
& (UFLO
|LCOL
|LCAR
|RTRY
)) || (dstat
& DEAD
) || xcount
== 0) {
730 * Check whether there were in fact 2 bytes written to
734 x
= (in_8(&mb
->fifofc
) >> XMTFC_SH
) & XMTFC_MASK
;
736 /* there were two bytes with an end-of-packet indication */
738 mace_set_timeout(dev
);
741 * Either there weren't the two bytes buffered up, or they
742 * didn't have an end-of-packet indication.
743 * We flush the transmit FIFO just in case (by setting the
744 * XMTFWU bit with the transmitter disabled).
746 out_8(&mb
->maccc
, in_8(&mb
->maccc
) & ~ENXMT
);
747 out_8(&mb
->fifocc
, in_8(&mb
->fifocc
) | XMTFWU
);
749 out_8(&mb
->maccc
, in_8(&mb
->maccc
) | ENXMT
);
750 out_8(&mb
->xmtfc
, AUTO_PAD_XMIT
);
753 /* dma should have finished */
754 if (i
== mp
->tx_fill
) {
755 printk(KERN_DEBUG
"mace: tx ring ran out? (fs=%x xc=%d ds=%x)\n",
760 if (fs
& (UFLO
|LCOL
|LCAR
|RTRY
)) {
761 ++dev
->stats
.tx_errors
;
763 ++dev
->stats
.tx_carrier_errors
;
764 if (fs
& (UFLO
|LCOL
|RTRY
))
765 ++dev
->stats
.tx_aborted_errors
;
767 dev
->stats
.tx_bytes
+= mp
->tx_bufs
[i
]->len
;
768 ++dev
->stats
.tx_packets
;
770 dev_kfree_skb_irq(mp
->tx_bufs
[i
]);
772 if (++i
>= N_TX_RING
)
776 mace_last_xcount
= xcount
;
780 if (i
!= mp
->tx_empty
) {
782 netif_wake_queue(dev
);
788 if (!mp
->tx_bad_runt
&& i
!= mp
->tx_fill
&& mp
->tx_active
< MAX_TX_ACTIVE
) {
790 /* set up the next one */
791 cp
= mp
->tx_cmds
+ NCMDS_TX
* i
;
792 out_le16(&cp
->xfer_status
, 0);
793 out_le16(&cp
->command
, OUTPUT_LAST
);
795 if (++i
>= N_TX_RING
)
797 } while (i
!= mp
->tx_fill
&& mp
->tx_active
< MAX_TX_ACTIVE
);
798 out_le32(&td
->control
, ((RUN
|WAKE
) << 16) + (RUN
|WAKE
));
799 mace_set_timeout(dev
);
801 spin_unlock_irqrestore(&mp
->lock
, flags
);
805 static void mace_tx_timeout(unsigned long data
)
807 struct net_device
*dev
= (struct net_device
*) data
;
808 struct mace_data
*mp
= netdev_priv(dev
);
809 volatile struct mace __iomem
*mb
= mp
->mace
;
810 volatile struct dbdma_regs __iomem
*td
= mp
->tx_dma
;
811 volatile struct dbdma_regs __iomem
*rd
= mp
->rx_dma
;
812 volatile struct dbdma_cmd
*cp
;
816 spin_lock_irqsave(&mp
->lock
, flags
);
817 mp
->timeout_active
= 0;
818 if (mp
->tx_active
== 0 && !mp
->tx_bad_runt
)
821 /* update various counters */
822 mace_handle_misc_intrs(mp
, in_8(&mb
->ir
), dev
);
824 cp
= mp
->tx_cmds
+ NCMDS_TX
* mp
->tx_empty
;
826 /* turn off both tx and rx and reset the chip */
827 out_8(&mb
->maccc
, 0);
828 printk(KERN_ERR
"mace: transmit timeout - resetting\n");
833 cp
= bus_to_virt(le32_to_cpu(rd
->cmdptr
));
835 out_le16(&cp
->xfer_status
, 0);
836 out_le32(&rd
->cmdptr
, virt_to_bus(cp
));
837 out_le32(&rd
->control
, (RUN
<< 16) | RUN
);
839 /* fix up the transmit side */
842 ++dev
->stats
.tx_errors
;
843 if (mp
->tx_bad_runt
) {
845 } else if (i
!= mp
->tx_fill
) {
846 dev_kfree_skb(mp
->tx_bufs
[i
]);
847 if (++i
>= N_TX_RING
)
852 netif_wake_queue(dev
);
853 if (i
!= mp
->tx_fill
) {
854 cp
= mp
->tx_cmds
+ NCMDS_TX
* i
;
855 out_le16(&cp
->xfer_status
, 0);
856 out_le16(&cp
->command
, OUTPUT_LAST
);
857 out_le32(&td
->cmdptr
, virt_to_bus(cp
));
858 out_le32(&td
->control
, (RUN
<< 16) | RUN
);
860 mace_set_timeout(dev
);
863 /* turn it back on */
864 out_8(&mb
->imr
, RCVINT
);
865 out_8(&mb
->maccc
, mp
->maccc
);
868 spin_unlock_irqrestore(&mp
->lock
, flags
);
871 static irqreturn_t
mace_txdma_intr(int irq
, void *dev_id
)
876 static irqreturn_t
mace_rxdma_intr(int irq
, void *dev_id
)
878 struct net_device
*dev
= (struct net_device
*) dev_id
;
879 struct mace_data
*mp
= netdev_priv(dev
);
880 volatile struct dbdma_regs __iomem
*rd
= mp
->rx_dma
;
881 volatile struct dbdma_cmd
*cp
, *np
;
882 int i
, nb
, stat
, next
;
884 unsigned frame_status
;
885 static int mace_lost_status
;
889 spin_lock_irqsave(&mp
->lock
, flags
);
890 for (i
= mp
->rx_empty
; i
!= mp
->rx_fill
; ) {
891 cp
= mp
->rx_cmds
+ i
;
892 stat
= le16_to_cpu(cp
->xfer_status
);
893 if ((stat
& ACTIVE
) == 0) {
895 if (next
>= N_RX_RING
)
897 np
= mp
->rx_cmds
+ next
;
898 if (next
!= mp
->rx_fill
&&
899 (le16_to_cpu(np
->xfer_status
) & ACTIVE
) != 0) {
900 printk(KERN_DEBUG
"mace: lost a status word\n");
905 nb
= le16_to_cpu(cp
->req_count
) - le16_to_cpu(cp
->res_count
);
906 out_le16(&cp
->command
, DBDMA_STOP
);
907 /* got a packet, have a look at it */
908 skb
= mp
->rx_bufs
[i
];
910 ++dev
->stats
.rx_dropped
;
913 frame_status
= (data
[nb
-3] << 8) + data
[nb
-4];
914 if (frame_status
& (RS_OFLO
|RS_CLSN
|RS_FRAMERR
|RS_FCSERR
)) {
915 ++dev
->stats
.rx_errors
;
916 if (frame_status
& RS_OFLO
)
917 ++dev
->stats
.rx_over_errors
;
918 if (frame_status
& RS_FRAMERR
)
919 ++dev
->stats
.rx_frame_errors
;
920 if (frame_status
& RS_FCSERR
)
921 ++dev
->stats
.rx_crc_errors
;
923 /* Mace feature AUTO_STRIP_RCV is on by default, dropping the
924 * FCS on frames with 802.3 headers. This means that Ethernet
925 * frames have 8 extra octets at the end, while 802.3 frames
926 * have only 4. We need to correctly account for this. */
927 if (*(unsigned short *)(data
+12) < 1536) /* 802.3 header */
929 else /* Ethernet header; mace includes FCS */
932 skb
->protocol
= eth_type_trans(skb
, dev
);
933 dev
->stats
.rx_bytes
+= skb
->len
;
935 mp
->rx_bufs
[i
] = NULL
;
936 ++dev
->stats
.rx_packets
;
939 ++dev
->stats
.rx_errors
;
940 ++dev
->stats
.rx_length_errors
;
943 /* advance to next */
944 if (++i
>= N_RX_RING
)
952 if (next
>= N_RX_RING
)
954 if (next
== mp
->rx_empty
)
956 cp
= mp
->rx_cmds
+ i
;
957 skb
= mp
->rx_bufs
[i
];
959 skb
= netdev_alloc_skb(dev
, RX_BUFLEN
+ 2);
962 mp
->rx_bufs
[i
] = skb
;
965 cp
->req_count
= cpu_to_le16(RX_BUFLEN
);
966 data
= skb
? skb
->data
: dummy_buf
;
967 cp
->phy_addr
= cpu_to_le32(virt_to_bus(data
));
968 out_le16(&cp
->xfer_status
, 0);
969 out_le16(&cp
->command
, INPUT_LAST
+ INTR_ALWAYS
);
971 if ((le32_to_cpu(rd
->status
) & ACTIVE
) != 0) {
972 out_le32(&rd
->control
, (PAUSE
<< 16) | PAUSE
);
973 while ((in_le32(&rd
->status
) & ACTIVE
) != 0)
979 if (i
!= mp
->rx_fill
) {
980 out_le32(&rd
->control
, ((RUN
|WAKE
) << 16) | (RUN
|WAKE
));
983 spin_unlock_irqrestore(&mp
->lock
, flags
);
987 static const struct of_device_id mace_match
[] =
994 MODULE_DEVICE_TABLE (of
, mace_match
);
996 static struct macio_driver mace_driver
=
1000 .owner
= THIS_MODULE
,
1001 .of_match_table
= mace_match
,
1003 .probe
= mace_probe
,
1004 .remove
= mace_remove
,
1008 static int __init
mace_init(void)
1010 return macio_register_driver(&mace_driver
);
1013 static void __exit
mace_cleanup(void)
1015 macio_unregister_driver(&mace_driver
);
1021 MODULE_AUTHOR("Paul Mackerras");
1022 MODULE_DESCRIPTION("PowerMac MACE driver.");
1023 module_param(port_aaui
, int, 0);
1024 MODULE_PARM_DESC(port_aaui
, "MACE uses AAUI port (0-1)");
1025 MODULE_LICENSE("GPL");
1027 module_init(mace_init
);
1028 module_exit(mace_cleanup
);