Linux 3.4.102
[linux/fpc-iii.git] / drivers / net / ethernet / i825xx / 3c527.c
blob278e791afe0097ab834a4ebe5edeefd157778203
1 /* 3c527.c: 3Com Etherlink/MC32 driver for Linux 2.4 and 2.6.
3 * (c) Copyright 1998 Red Hat Software Inc
4 * Written by Alan Cox.
5 * Further debugging by Carl Drougge.
6 * Initial SMP support by Felipe W Damasio <felipewd@terra.com.br>
7 * Heavily modified by Richard Procter <rnp@paradise.net.nz>
9 * Based on skeleton.c written 1993-94 by Donald Becker and ne2.c
10 * (for the MCA stuff) written by Wim Dumon.
12 * Thanks to 3Com for making this possible by providing me with the
13 * documentation.
15 * This software may be used and distributed according to the terms
16 * of the GNU General Public License, incorporated herein by reference.
20 #define DRV_NAME "3c527"
21 #define DRV_VERSION "0.7-SMP"
22 #define DRV_RELDATE "2003/09/21"
24 static const char *version =
25 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Richard Procter <rnp@paradise.net.nz>\n";
27 /**
28 * DOC: Traps for the unwary
30 * The diagram (Figure 1-1) and the POS summary disagree with the
31 * "Interrupt Level" section in the manual.
33 * The manual contradicts itself when describing the minimum number
34 * buffers in the 'configure lists' command.
35 * My card accepts a buffer config of 4/4.
37 * Setting the SAV BP bit does not save bad packets, but
38 * only enables RX on-card stats collection.
40 * The documentation in places seems to miss things. In actual fact
41 * I've always eventually found everything is documented, it just
42 * requires careful study.
44 * DOC: Theory Of Operation
46 * The 3com 3c527 is a 32bit MCA bus mastering adapter with a large
47 * amount of on board intelligence that housekeeps a somewhat dumber
48 * Intel NIC. For performance we want to keep the transmit queue deep
49 * as the card can transmit packets while fetching others from main
50 * memory by bus master DMA. Transmission and reception are driven by
51 * circular buffer queues.
53 * The mailboxes can be used for controlling how the card traverses
54 * its buffer rings, but are used only for initial setup in this
55 * implementation. The exec mailbox allows a variety of commands to
56 * be executed. Each command must complete before the next is
57 * executed. Primarily we use the exec mailbox for controlling the
58 * multicast lists. We have to do a certain amount of interesting
59 * hoop jumping as the multicast list changes can occur in interrupt
60 * state when the card has an exec command pending. We defer such
61 * events until the command completion interrupt.
63 * A copy break scheme (taken from 3c59x.c) is employed whereby
64 * received frames exceeding a configurable length are passed
65 * directly to the higher networking layers without incuring a copy,
66 * in what amounts to a time/space trade-off.
68 * The card also keeps a large amount of statistical information
69 * on-board. In a perfect world, these could be used safely at no
70 * cost. However, lacking information to the contrary, processing
71 * them without races would involve so much extra complexity as to
72 * make it unworthwhile to do so. In the end, a hybrid SW/HW
73 * implementation was made necessary --- see mc32_update_stats().
75 * DOC: Notes
77 * It should be possible to use two or more cards, but at this stage
78 * only by loading two copies of the same module.
80 * The on-board 82586 NIC has trouble receiving multiple
81 * back-to-back frames and so is likely to drop packets from fast
82 * senders.
83 **/
85 #include <linux/module.h>
87 #include <linux/errno.h>
88 #include <linux/netdevice.h>
89 #include <linux/etherdevice.h>
90 #include <linux/if_ether.h>
91 #include <linux/init.h>
92 #include <linux/kernel.h>
93 #include <linux/types.h>
94 #include <linux/fcntl.h>
95 #include <linux/interrupt.h>
96 #include <linux/mca-legacy.h>
97 #include <linux/ioport.h>
98 #include <linux/in.h>
99 #include <linux/skbuff.h>
100 #include <linux/slab.h>
101 #include <linux/string.h>
102 #include <linux/wait.h>
103 #include <linux/ethtool.h>
104 #include <linux/completion.h>
105 #include <linux/bitops.h>
106 #include <linux/semaphore.h>
108 #include <asm/uaccess.h>
109 #include <asm/io.h>
110 #include <asm/dma.h>
112 #include "3c527.h"
114 MODULE_LICENSE("GPL");
117 * The name of the card. Is used for messages and in the requests for
118 * io regions, irqs and dma channels
120 static const char* cardname = DRV_NAME;
122 /* use 0 for production, 1 for verification, >2 for debug */
123 #ifndef NET_DEBUG
124 #define NET_DEBUG 2
125 #endif
127 static unsigned int mc32_debug = NET_DEBUG;
129 /* The number of low I/O ports used by the ethercard. */
130 #define MC32_IO_EXTENT 8
132 /* As implemented, values must be a power-of-2 -- 4/8/16/32 */
133 #define TX_RING_LEN 32 /* Typically the card supports 37 */
134 #define RX_RING_LEN 8 /* " " " */
136 /* Copy break point, see above for details.
137 * Setting to > 1512 effectively disables this feature. */
138 #define RX_COPYBREAK 200 /* Value from 3c59x.c */
140 /* Issue the 82586 workaround command - this is for "busy lans", but
141 * basically means for all lans now days - has a performance (latency)
142 * cost, but best set. */
143 static const int WORKAROUND_82586=1;
145 /* Pointers to buffers and their on-card records */
146 struct mc32_ring_desc
148 volatile struct skb_header *p;
149 struct sk_buff *skb;
152 /* Information that needs to be kept for each board. */
153 struct mc32_local
155 int slot;
157 u32 base;
158 volatile struct mc32_mailbox *rx_box;
159 volatile struct mc32_mailbox *tx_box;
160 volatile struct mc32_mailbox *exec_box;
161 volatile struct mc32_stats *stats; /* Start of on-card statistics */
162 u16 tx_chain; /* Transmit list start offset */
163 u16 rx_chain; /* Receive list start offset */
164 u16 tx_len; /* Transmit list count */
165 u16 rx_len; /* Receive list count */
167 u16 xceiver_desired_state; /* HALTED or RUNNING */
168 u16 cmd_nonblocking; /* Thread is uninterested in command result */
169 u16 mc_reload_wait; /* A multicast load request is pending */
170 u32 mc_list_valid; /* True when the mclist is set */
172 struct mc32_ring_desc tx_ring[TX_RING_LEN]; /* Host Transmit ring */
173 struct mc32_ring_desc rx_ring[RX_RING_LEN]; /* Host Receive ring */
175 atomic_t tx_count; /* buffers left */
176 atomic_t tx_ring_head; /* index to tx en-queue end */
177 u16 tx_ring_tail; /* index to tx de-queue end */
179 u16 rx_ring_tail; /* index to rx de-queue end */
181 struct semaphore cmd_mutex; /* Serialises issuing of execute commands */
182 struct completion execution_cmd; /* Card has completed an execute command */
183 struct completion xceiver_cmd; /* Card has completed a tx or rx command */
186 /* The station (ethernet) address prefix, used for a sanity check. */
187 #define SA_ADDR0 0x02
188 #define SA_ADDR1 0x60
189 #define SA_ADDR2 0xAC
191 struct mca_adapters_t {
192 unsigned int id;
193 char *name;
196 static const struct mca_adapters_t mc32_adapters[] = {
197 { 0x0041, "3COM EtherLink MC/32" },
198 { 0x8EF5, "IBM High Performance Lan Adapter" },
199 { 0x0000, NULL }
203 /* Macros for ring index manipulations */
204 static inline u16 next_rx(u16 rx) { return (rx+1)&(RX_RING_LEN-1); };
205 static inline u16 prev_rx(u16 rx) { return (rx-1)&(RX_RING_LEN-1); };
207 static inline u16 next_tx(u16 tx) { return (tx+1)&(TX_RING_LEN-1); };
210 /* Index to functions, as function prototypes. */
211 static int mc32_probe1(struct net_device *dev, int ioaddr);
212 static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len);
213 static int mc32_open(struct net_device *dev);
214 static void mc32_timeout(struct net_device *dev);
215 static netdev_tx_t mc32_send_packet(struct sk_buff *skb,
216 struct net_device *dev);
217 static irqreturn_t mc32_interrupt(int irq, void *dev_id);
218 static int mc32_close(struct net_device *dev);
219 static struct net_device_stats *mc32_get_stats(struct net_device *dev);
220 static void mc32_set_multicast_list(struct net_device *dev);
221 static void mc32_reset_multicast_list(struct net_device *dev);
222 static const struct ethtool_ops netdev_ethtool_ops;
224 static void cleanup_card(struct net_device *dev)
226 struct mc32_local *lp = netdev_priv(dev);
227 unsigned slot = lp->slot;
228 mca_mark_as_unused(slot);
229 mca_set_adapter_name(slot, NULL);
230 free_irq(dev->irq, dev);
231 release_region(dev->base_addr, MC32_IO_EXTENT);
235 * mc32_probe - Search for supported boards
236 * @unit: interface number to use
238 * Because MCA bus is a real bus and we can scan for cards we could do a
239 * single scan for all boards here. Right now we use the passed in device
240 * structure and scan for only one board. This needs fixing for modules
241 * in particular.
244 struct net_device *__init mc32_probe(int unit)
246 struct net_device *dev = alloc_etherdev(sizeof(struct mc32_local));
247 static int current_mca_slot = -1;
248 int i;
249 int err;
251 if (!dev)
252 return ERR_PTR(-ENOMEM);
254 if (unit >= 0)
255 sprintf(dev->name, "eth%d", unit);
257 /* Do not check any supplied i/o locations.
258 POS registers usually don't fail :) */
260 /* MCA cards have POS registers.
261 Autodetecting MCA cards is extremely simple.
262 Just search for the card. */
264 for(i = 0; (mc32_adapters[i].name != NULL); i++) {
265 current_mca_slot =
266 mca_find_unused_adapter(mc32_adapters[i].id, 0);
268 if(current_mca_slot != MCA_NOTFOUND) {
269 if(!mc32_probe1(dev, current_mca_slot))
271 mca_set_adapter_name(current_mca_slot,
272 mc32_adapters[i].name);
273 mca_mark_as_used(current_mca_slot);
274 err = register_netdev(dev);
275 if (err) {
276 cleanup_card(dev);
277 free_netdev(dev);
278 dev = ERR_PTR(err);
280 return dev;
285 free_netdev(dev);
286 return ERR_PTR(-ENODEV);
289 static const struct net_device_ops netdev_ops = {
290 .ndo_open = mc32_open,
291 .ndo_stop = mc32_close,
292 .ndo_start_xmit = mc32_send_packet,
293 .ndo_get_stats = mc32_get_stats,
294 .ndo_set_rx_mode = mc32_set_multicast_list,
295 .ndo_tx_timeout = mc32_timeout,
296 .ndo_change_mtu = eth_change_mtu,
297 .ndo_set_mac_address = eth_mac_addr,
298 .ndo_validate_addr = eth_validate_addr,
302 * mc32_probe1 - Check a given slot for a board and test the card
303 * @dev: Device structure to fill in
304 * @slot: The MCA bus slot being used by this card
306 * Decode the slot data and configure the card structures. Having done this we
307 * can reset the card and configure it. The card does a full self test cycle
308 * in firmware so we have to wait for it to return and post us either a
309 * failure case or some addresses we use to find the board internals.
312 static int __init mc32_probe1(struct net_device *dev, int slot)
314 static unsigned version_printed;
315 int i, err;
316 u8 POS;
317 u32 base;
318 struct mc32_local *lp = netdev_priv(dev);
319 static const u16 mca_io_bases[] = {
320 0x7280,0x7290,
321 0x7680,0x7690,
322 0x7A80,0x7A90,
323 0x7E80,0x7E90
325 static const u32 mca_mem_bases[] = {
326 0x00C0000,
327 0x00C4000,
328 0x00C8000,
329 0x00CC000,
330 0x00D0000,
331 0x00D4000,
332 0x00D8000,
333 0x00DC000
335 static const char * const failures[] = {
336 "Processor instruction",
337 "Processor data bus",
338 "Processor data bus",
339 "Processor data bus",
340 "Adapter bus",
341 "ROM checksum",
342 "Base RAM",
343 "Extended RAM",
344 "82586 internal loopback",
345 "82586 initialisation failure",
346 "Adapter list configuration error"
349 /* Time to play MCA games */
351 if (mc32_debug && version_printed++ == 0)
352 pr_debug("%s", version);
354 pr_info("%s: %s found in slot %d: ", dev->name, cardname, slot);
356 POS = mca_read_stored_pos(slot, 2);
358 if(!(POS&1))
360 pr_cont("disabled.\n");
361 return -ENODEV;
364 /* Fill in the 'dev' fields. */
365 dev->base_addr = mca_io_bases[(POS>>1)&7];
366 dev->mem_start = mca_mem_bases[(POS>>4)&7];
368 POS = mca_read_stored_pos(slot, 4);
369 if(!(POS&1))
371 pr_cont("memory window disabled.\n");
372 return -ENODEV;
375 POS = mca_read_stored_pos(slot, 5);
377 i=(POS>>4)&3;
378 if(i==3)
380 pr_cont("invalid memory window.\n");
381 return -ENODEV;
384 i*=16384;
385 i+=16384;
387 dev->mem_end=dev->mem_start + i;
389 dev->irq = ((POS>>2)&3)+9;
391 if(!request_region(dev->base_addr, MC32_IO_EXTENT, cardname))
393 pr_cont("io 0x%3lX, which is busy.\n", dev->base_addr);
394 return -EBUSY;
397 pr_cont("io 0x%3lX irq %d mem 0x%lX (%dK)\n",
398 dev->base_addr, dev->irq, dev->mem_start, i/1024);
401 /* We ought to set the cache line size here.. */
405 * Go PROM browsing
408 /* Retrieve and print the ethernet address. */
409 for (i = 0; i < 6; i++)
411 mca_write_pos(slot, 6, i+12);
412 mca_write_pos(slot, 7, 0);
414 dev->dev_addr[i] = mca_read_pos(slot,3);
417 pr_info("%s: Address %pM ", dev->name, dev->dev_addr);
419 mca_write_pos(slot, 6, 0);
420 mca_write_pos(slot, 7, 0);
422 POS = mca_read_stored_pos(slot, 4);
424 if(POS&2)
425 pr_cont(": BNC port selected.\n");
426 else
427 pr_cont(": AUI port selected.\n");
429 POS=inb(dev->base_addr+HOST_CTRL);
430 POS|=HOST_CTRL_ATTN|HOST_CTRL_RESET;
431 POS&=~HOST_CTRL_INTE;
432 outb(POS, dev->base_addr+HOST_CTRL);
433 /* Reset adapter */
434 udelay(100);
435 /* Reset off */
436 POS&=~(HOST_CTRL_ATTN|HOST_CTRL_RESET);
437 outb(POS, dev->base_addr+HOST_CTRL);
439 udelay(300);
442 * Grab the IRQ
445 err = request_irq(dev->irq, mc32_interrupt, IRQF_SHARED, DRV_NAME, dev);
446 if (err) {
447 release_region(dev->base_addr, MC32_IO_EXTENT);
448 pr_err("%s: unable to get IRQ %d.\n", DRV_NAME, dev->irq);
449 goto err_exit_ports;
452 memset(lp, 0, sizeof(struct mc32_local));
453 lp->slot = slot;
455 i=0;
457 base = inb(dev->base_addr);
459 while(base == 0xFF)
461 i++;
462 if(i == 1000)
464 pr_err("%s: failed to boot adapter.\n", dev->name);
465 err = -ENODEV;
466 goto err_exit_irq;
468 udelay(1000);
469 if(inb(dev->base_addr+2)&(1<<5))
470 base = inb(dev->base_addr);
473 if(base>0)
475 if(base < 0x0C)
476 pr_err("%s: %s%s.\n", dev->name, failures[base-1],
477 base<0x0A?" test failure":"");
478 else
479 pr_err("%s: unknown failure %d.\n", dev->name, base);
480 err = -ENODEV;
481 goto err_exit_irq;
484 base=0;
485 for(i=0;i<4;i++)
487 int n=0;
489 while(!(inb(dev->base_addr+2)&(1<<5)))
491 n++;
492 udelay(50);
493 if(n>100)
495 pr_err("%s: mailbox read fail (%d).\n", dev->name, i);
496 err = -ENODEV;
497 goto err_exit_irq;
501 base|=(inb(dev->base_addr)<<(8*i));
504 lp->exec_box=isa_bus_to_virt(dev->mem_start+base);
506 base=lp->exec_box->data[1]<<16|lp->exec_box->data[0];
508 lp->base = dev->mem_start+base;
510 lp->rx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[2]);
511 lp->tx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[3]);
513 lp->stats = isa_bus_to_virt(lp->base + lp->exec_box->data[5]);
516 * Descriptor chains (card relative)
519 lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */
520 lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */
521 lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */
522 lp->rx_len = lp->exec_box->data[11]; /* Receive list count */
524 sema_init(&lp->cmd_mutex, 0);
525 init_completion(&lp->execution_cmd);
526 init_completion(&lp->xceiver_cmd);
528 pr_info("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n",
529 dev->name, lp->exec_box->data[12], lp->rx_len, lp->tx_len, lp->base);
531 dev->netdev_ops = &netdev_ops;
532 dev->watchdog_timeo = HZ*5; /* Board does all the work */
533 dev->ethtool_ops = &netdev_ethtool_ops;
535 return 0;
537 err_exit_irq:
538 free_irq(dev->irq, dev);
539 err_exit_ports:
540 release_region(dev->base_addr, MC32_IO_EXTENT);
541 return err;
546 * mc32_ready_poll - wait until we can feed it a command
547 * @dev: The device to wait for
549 * Wait until the card becomes ready to accept a command via the
550 * command register. This tells us nothing about the completion
551 * status of any pending commands and takes very little time at all.
554 static inline void mc32_ready_poll(struct net_device *dev)
556 int ioaddr = dev->base_addr;
557 while(!(inb(ioaddr+HOST_STATUS)&HOST_STATUS_CRR));
562 * mc32_command_nowait - send a command non blocking
563 * @dev: The 3c527 to issue the command to
564 * @cmd: The command word to write to the mailbox
565 * @data: A data block if the command expects one
566 * @len: Length of the data block
568 * Send a command from interrupt state. If there is a command
569 * currently being executed then we return an error of -1. It
570 * simply isn't viable to wait around as commands may be
571 * slow. This can theoretically be starved on SMP, but it's hard
572 * to see a realistic situation. We do not wait for the command
573 * to complete --- we rely on the interrupt handler to tidy up
574 * after us.
577 static int mc32_command_nowait(struct net_device *dev, u16 cmd, void *data, int len)
579 struct mc32_local *lp = netdev_priv(dev);
580 int ioaddr = dev->base_addr;
581 int ret = -1;
583 if (down_trylock(&lp->cmd_mutex) == 0)
585 lp->cmd_nonblocking=1;
586 lp->exec_box->mbox=0;
587 lp->exec_box->mbox=cmd;
588 memcpy((void *)lp->exec_box->data, data, len);
589 barrier(); /* the memcpy forgot the volatile so be sure */
591 /* Send the command */
592 mc32_ready_poll(dev);
593 outb(1<<6, ioaddr+HOST_CMD);
595 ret = 0;
597 /* Interrupt handler will signal mutex on completion */
600 return ret;
605 * mc32_command - send a command and sleep until completion
606 * @dev: The 3c527 card to issue the command to
607 * @cmd: The command word to write to the mailbox
608 * @data: A data block if the command expects one
609 * @len: Length of the data block
611 * Sends exec commands in a user context. This permits us to wait around
612 * for the replies and also to wait for the command buffer to complete
613 * from a previous command before we execute our command. After our
614 * command completes we will attempt any pending multicast reload
615 * we blocked off by hogging the exec buffer.
617 * You feed the card a command, you wait, it interrupts you get a
618 * reply. All well and good. The complication arises because you use
619 * commands for filter list changes which come in at bh level from things
620 * like IPV6 group stuff.
623 static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len)
625 struct mc32_local *lp = netdev_priv(dev);
626 int ioaddr = dev->base_addr;
627 int ret = 0;
629 down(&lp->cmd_mutex);
632 * My Turn
635 lp->cmd_nonblocking=0;
636 lp->exec_box->mbox=0;
637 lp->exec_box->mbox=cmd;
638 memcpy((void *)lp->exec_box->data, data, len);
639 barrier(); /* the memcpy forgot the volatile so be sure */
641 mc32_ready_poll(dev);
642 outb(1<<6, ioaddr+HOST_CMD);
644 wait_for_completion(&lp->execution_cmd);
646 if(lp->exec_box->mbox&(1<<13))
647 ret = -1;
649 up(&lp->cmd_mutex);
652 * A multicast set got blocked - try it now
655 if(lp->mc_reload_wait)
657 mc32_reset_multicast_list(dev);
660 return ret;
665 * mc32_start_transceiver - tell board to restart tx/rx
666 * @dev: The 3c527 card to issue the command to
668 * This may be called from the interrupt state, where it is used
669 * to restart the rx ring if the card runs out of rx buffers.
671 * We must first check if it's ok to (re)start the transceiver. See
672 * mc32_close for details.
675 static void mc32_start_transceiver(struct net_device *dev) {
677 struct mc32_local *lp = netdev_priv(dev);
678 int ioaddr = dev->base_addr;
680 /* Ignore RX overflow on device closure */
681 if (lp->xceiver_desired_state==HALTED)
682 return;
684 /* Give the card the offset to the post-EOL-bit RX descriptor */
685 mc32_ready_poll(dev);
686 lp->rx_box->mbox=0;
687 lp->rx_box->data[0]=lp->rx_ring[prev_rx(lp->rx_ring_tail)].p->next;
688 outb(HOST_CMD_START_RX, ioaddr+HOST_CMD);
690 mc32_ready_poll(dev);
691 lp->tx_box->mbox=0;
692 outb(HOST_CMD_RESTRT_TX, ioaddr+HOST_CMD); /* card ignores this on RX restart */
694 /* We are not interrupted on start completion */
699 * mc32_halt_transceiver - tell board to stop tx/rx
700 * @dev: The 3c527 card to issue the command to
702 * We issue the commands to halt the card's transceiver. In fact,
703 * after some experimenting we now simply tell the card to
704 * suspend. When issuing aborts occasionally odd things happened.
706 * We then sleep until the card has notified us that both rx and
707 * tx have been suspended.
710 static void mc32_halt_transceiver(struct net_device *dev)
712 struct mc32_local *lp = netdev_priv(dev);
713 int ioaddr = dev->base_addr;
715 mc32_ready_poll(dev);
716 lp->rx_box->mbox=0;
717 outb(HOST_CMD_SUSPND_RX, ioaddr+HOST_CMD);
718 wait_for_completion(&lp->xceiver_cmd);
720 mc32_ready_poll(dev);
721 lp->tx_box->mbox=0;
722 outb(HOST_CMD_SUSPND_TX, ioaddr+HOST_CMD);
723 wait_for_completion(&lp->xceiver_cmd);
728 * mc32_load_rx_ring - load the ring of receive buffers
729 * @dev: 3c527 to build the ring for
731 * This initialises the on-card and driver datastructures to
732 * the point where mc32_start_transceiver() can be called.
734 * The card sets up the receive ring for us. We are required to use the
735 * ring it provides, although the size of the ring is configurable.
737 * We allocate an sk_buff for each ring entry in turn and
738 * initialise its house-keeping info. At the same time, we read
739 * each 'next' pointer in our rx_ring array. This reduces slow
740 * shared-memory reads and makes it easy to access predecessor
741 * descriptors.
743 * We then set the end-of-list bit for the last entry so that the
744 * card will know when it has run out of buffers.
747 static int mc32_load_rx_ring(struct net_device *dev)
749 struct mc32_local *lp = netdev_priv(dev);
750 int i;
751 u16 rx_base;
752 volatile struct skb_header *p;
754 rx_base=lp->rx_chain;
756 for(i=0; i<RX_RING_LEN; i++) {
757 lp->rx_ring[i].skb=alloc_skb(1532, GFP_KERNEL);
758 if (lp->rx_ring[i].skb==NULL) {
759 for (;i>=0;i--)
760 kfree_skb(lp->rx_ring[i].skb);
761 return -ENOBUFS;
763 skb_reserve(lp->rx_ring[i].skb, 18);
765 p=isa_bus_to_virt(lp->base+rx_base);
767 p->control=0;
768 p->data=isa_virt_to_bus(lp->rx_ring[i].skb->data);
769 p->status=0;
770 p->length=1532;
772 lp->rx_ring[i].p=p;
773 rx_base=p->next;
776 lp->rx_ring[i-1].p->control |= CONTROL_EOL;
778 lp->rx_ring_tail=0;
780 return 0;
785 * mc32_flush_rx_ring - free the ring of receive buffers
786 * @lp: Local data of 3c527 to flush the rx ring of
788 * Free the buffer for each ring slot. This may be called
789 * before mc32_load_rx_ring(), eg. on error in mc32_open().
790 * Requires rx skb pointers to point to a valid skb, or NULL.
793 static void mc32_flush_rx_ring(struct net_device *dev)
795 struct mc32_local *lp = netdev_priv(dev);
796 int i;
798 for(i=0; i < RX_RING_LEN; i++)
800 if (lp->rx_ring[i].skb) {
801 dev_kfree_skb(lp->rx_ring[i].skb);
802 lp->rx_ring[i].skb = NULL;
804 lp->rx_ring[i].p=NULL;
810 * mc32_load_tx_ring - load transmit ring
811 * @dev: The 3c527 card to issue the command to
813 * This sets up the host transmit data-structures.
815 * First, we obtain from the card it's current position in the tx
816 * ring, so that we will know where to begin transmitting
817 * packets.
819 * Then, we read the 'next' pointers from the on-card tx ring into
820 * our tx_ring array to reduce slow shared-mem reads. Finally, we
821 * intitalise the tx house keeping variables.
825 static void mc32_load_tx_ring(struct net_device *dev)
827 struct mc32_local *lp = netdev_priv(dev);
828 volatile struct skb_header *p;
829 int i;
830 u16 tx_base;
832 tx_base=lp->tx_box->data[0];
834 for(i=0 ; i<TX_RING_LEN ; i++)
836 p=isa_bus_to_virt(lp->base+tx_base);
837 lp->tx_ring[i].p=p;
838 lp->tx_ring[i].skb=NULL;
840 tx_base=p->next;
843 /* -1 so that tx_ring_head cannot "lap" tx_ring_tail */
844 /* see mc32_tx_ring */
846 atomic_set(&lp->tx_count, TX_RING_LEN-1);
847 atomic_set(&lp->tx_ring_head, 0);
848 lp->tx_ring_tail=0;
853 * mc32_flush_tx_ring - free transmit ring
854 * @lp: Local data of 3c527 to flush the tx ring of
856 * If the ring is non-empty, zip over the it, freeing any
857 * allocated skb_buffs. The tx ring house-keeping variables are
858 * then reset. Requires rx skb pointers to point to a valid skb,
859 * or NULL.
862 static void mc32_flush_tx_ring(struct net_device *dev)
864 struct mc32_local *lp = netdev_priv(dev);
865 int i;
867 for (i=0; i < TX_RING_LEN; i++)
869 if (lp->tx_ring[i].skb)
871 dev_kfree_skb(lp->tx_ring[i].skb);
872 lp->tx_ring[i].skb = NULL;
876 atomic_set(&lp->tx_count, 0);
877 atomic_set(&lp->tx_ring_head, 0);
878 lp->tx_ring_tail=0;
883 * mc32_open - handle 'up' of card
884 * @dev: device to open
886 * The user is trying to bring the card into ready state. This requires
887 * a brief dialogue with the card. Firstly we enable interrupts and then
888 * 'indications'. Without these enabled the card doesn't bother telling
889 * us what it has done. This had me puzzled for a week.
891 * We configure the number of card descriptors, then load the network
892 * address and multicast filters. Turn on the workaround mode. This
893 * works around a bug in the 82586 - it asks the firmware to do
894 * so. It has a performance (latency) hit but is needed on busy
895 * [read most] lans. We load the ring with buffers then we kick it
896 * all off.
899 static int mc32_open(struct net_device *dev)
901 int ioaddr = dev->base_addr;
902 struct mc32_local *lp = netdev_priv(dev);
903 u8 one=1;
904 u8 regs;
905 u16 descnumbuffs[2] = {TX_RING_LEN, RX_RING_LEN};
908 * Interrupts enabled
911 regs=inb(ioaddr+HOST_CTRL);
912 regs|=HOST_CTRL_INTE;
913 outb(regs, ioaddr+HOST_CTRL);
916 * Allow ourselves to issue commands
919 up(&lp->cmd_mutex);
923 * Send the indications on command
926 mc32_command(dev, 4, &one, 2);
929 * Poke it to make sure it's really dead.
932 mc32_halt_transceiver(dev);
933 mc32_flush_tx_ring(dev);
936 * Ask card to set up on-card descriptors to our spec
939 if(mc32_command(dev, 8, descnumbuffs, 4)) {
940 pr_info("%s: %s rejected our buffer configuration!\n",
941 dev->name, cardname);
942 mc32_close(dev);
943 return -ENOBUFS;
946 /* Report new configuration */
947 mc32_command(dev, 6, NULL, 0);
949 lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */
950 lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */
951 lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */
952 lp->rx_len = lp->exec_box->data[11]; /* Receive list count */
954 /* Set Network Address */
955 mc32_command(dev, 1, dev->dev_addr, 6);
957 /* Set the filters */
958 mc32_set_multicast_list(dev);
960 if (WORKAROUND_82586) {
961 u16 zero_word=0;
962 mc32_command(dev, 0x0D, &zero_word, 2); /* 82586 bug workaround on */
965 mc32_load_tx_ring(dev);
967 if(mc32_load_rx_ring(dev))
969 mc32_close(dev);
970 return -ENOBUFS;
973 lp->xceiver_desired_state = RUNNING;
975 /* And finally, set the ball rolling... */
976 mc32_start_transceiver(dev);
978 netif_start_queue(dev);
980 return 0;
985 * mc32_timeout - handle a timeout from the network layer
986 * @dev: 3c527 that timed out
988 * Handle a timeout on transmit from the 3c527. This normally means
989 * bad things as the hardware handles cable timeouts and mess for
990 * us.
994 static void mc32_timeout(struct net_device *dev)
996 pr_warning("%s: transmit timed out?\n", dev->name);
997 /* Try to restart the adaptor. */
998 netif_wake_queue(dev);
1003 * mc32_send_packet - queue a frame for transmit
1004 * @skb: buffer to transmit
1005 * @dev: 3c527 to send it out of
1007 * Transmit a buffer. This normally means throwing the buffer onto
1008 * the transmit queue as the queue is quite large. If the queue is
1009 * full then we set tx_busy and return. Once the interrupt handler
1010 * gets messages telling it to reclaim transmit queue entries, we will
1011 * clear tx_busy and the kernel will start calling this again.
1013 * We do not disable interrupts or acquire any locks; this can
1014 * run concurrently with mc32_tx_ring(), and the function itself
1015 * is serialised at a higher layer. However, similarly for the
1016 * card itself, we must ensure that we update tx_ring_head only
1017 * after we've established a valid packet on the tx ring (and
1018 * before we let the card "see" it, to prevent it racing with the
1019 * irq handler).
1023 static netdev_tx_t mc32_send_packet(struct sk_buff *skb,
1024 struct net_device *dev)
1026 struct mc32_local *lp = netdev_priv(dev);
1027 u32 head = atomic_read(&lp->tx_ring_head);
1029 volatile struct skb_header *p, *np;
1031 netif_stop_queue(dev);
1033 if(atomic_read(&lp->tx_count)==0) {
1034 return NETDEV_TX_BUSY;
1037 if (skb_padto(skb, ETH_ZLEN)) {
1038 netif_wake_queue(dev);
1039 return NETDEV_TX_OK;
1042 atomic_dec(&lp->tx_count);
1044 /* P is the last sending/sent buffer as a pointer */
1045 p=lp->tx_ring[head].p;
1047 head = next_tx(head);
1049 /* NP is the buffer we will be loading */
1050 np=lp->tx_ring[head].p;
1052 /* We will need this to flush the buffer out */
1053 lp->tx_ring[head].skb=skb;
1055 np->length = unlikely(skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
1056 np->data = isa_virt_to_bus(skb->data);
1057 np->status = 0;
1058 np->control = CONTROL_EOP | CONTROL_EOL;
1059 wmb();
1062 * The new frame has been setup; we can now
1063 * let the interrupt handler and card "see" it
1066 atomic_set(&lp->tx_ring_head, head);
1067 p->control &= ~CONTROL_EOL;
1069 netif_wake_queue(dev);
1070 return NETDEV_TX_OK;
1075 * mc32_update_stats - pull off the on board statistics
1076 * @dev: 3c527 to service
1079 * Query and reset the on-card stats. There's the small possibility
1080 * of a race here, which would result in an underestimation of
1081 * actual errors. As such, we'd prefer to keep all our stats
1082 * collection in software. As a rule, we do. However it can't be
1083 * used for rx errors and collisions as, by default, the card discards
1084 * bad rx packets.
1086 * Setting the SAV BP in the rx filter command supposedly
1087 * stops this behaviour. However, testing shows that it only seems to
1088 * enable the collation of on-card rx statistics --- the driver
1089 * never sees an RX descriptor with an error status set.
1093 static void mc32_update_stats(struct net_device *dev)
1095 struct mc32_local *lp = netdev_priv(dev);
1096 volatile struct mc32_stats *st = lp->stats;
1098 u32 rx_errors=0;
1100 rx_errors+=dev->stats.rx_crc_errors +=st->rx_crc_errors;
1101 st->rx_crc_errors=0;
1102 rx_errors+=dev->stats.rx_fifo_errors +=st->rx_overrun_errors;
1103 st->rx_overrun_errors=0;
1104 rx_errors+=dev->stats.rx_frame_errors +=st->rx_alignment_errors;
1105 st->rx_alignment_errors=0;
1106 rx_errors+=dev->stats.rx_length_errors+=st->rx_tooshort_errors;
1107 st->rx_tooshort_errors=0;
1108 rx_errors+=dev->stats.rx_missed_errors+=st->rx_outofresource_errors;
1109 st->rx_outofresource_errors=0;
1110 dev->stats.rx_errors=rx_errors;
1112 /* Number of packets which saw one collision */
1113 dev->stats.collisions+=st->dataC[10];
1114 st->dataC[10]=0;
1116 /* Number of packets which saw 2--15 collisions */
1117 dev->stats.collisions+=st->dataC[11];
1118 st->dataC[11]=0;
1123 * mc32_rx_ring - process the receive ring
1124 * @dev: 3c527 that needs its receive ring processing
1127 * We have received one or more indications from the card that a
1128 * receive has completed. The buffer ring thus contains dirty
1129 * entries. We walk the ring by iterating over the circular rx_ring
1130 * array, starting at the next dirty buffer (which happens to be the
1131 * one we finished up at last time around).
1133 * For each completed packet, we will either copy it and pass it up
1134 * the stack or, if the packet is near MTU sized, we allocate
1135 * another buffer and flip the old one up the stack.
1137 * We must succeed in keeping a buffer on the ring. If necessary we
1138 * will toss a received packet rather than lose a ring entry. Once
1139 * the first uncompleted descriptor is found, we move the
1140 * End-Of-List bit to include the buffers just processed.
1144 static void mc32_rx_ring(struct net_device *dev)
1146 struct mc32_local *lp = netdev_priv(dev);
1147 volatile struct skb_header *p;
1148 u16 rx_ring_tail;
1149 u16 rx_old_tail;
1150 int x=0;
1152 rx_old_tail = rx_ring_tail = lp->rx_ring_tail;
1156 p=lp->rx_ring[rx_ring_tail].p;
1158 if(!(p->status & (1<<7))) { /* Not COMPLETED */
1159 break;
1161 if(p->status & (1<<6)) /* COMPLETED_OK */
1164 u16 length=p->length;
1165 struct sk_buff *skb;
1166 struct sk_buff *newskb;
1168 /* Try to save time by avoiding a copy on big frames */
1170 if ((length > RX_COPYBREAK) &&
1171 ((newskb = netdev_alloc_skb(dev, 1532)) != NULL))
1173 skb=lp->rx_ring[rx_ring_tail].skb;
1174 skb_put(skb, length);
1176 skb_reserve(newskb,18);
1177 lp->rx_ring[rx_ring_tail].skb=newskb;
1178 p->data=isa_virt_to_bus(newskb->data);
1180 else
1182 skb = netdev_alloc_skb(dev, length + 2);
1184 if(skb==NULL) {
1185 dev->stats.rx_dropped++;
1186 goto dropped;
1189 skb_reserve(skb,2);
1190 memcpy(skb_put(skb, length),
1191 lp->rx_ring[rx_ring_tail].skb->data, length);
1194 skb->protocol=eth_type_trans(skb,dev);
1195 dev->stats.rx_packets++;
1196 dev->stats.rx_bytes += length;
1197 netif_rx(skb);
1200 dropped:
1201 p->length = 1532;
1202 p->status = 0;
1204 rx_ring_tail=next_rx(rx_ring_tail);
1206 while(x++<48);
1208 /* If there was actually a frame to be processed, place the EOL bit */
1209 /* at the descriptor prior to the one to be filled next */
1211 if (rx_ring_tail != rx_old_tail)
1213 lp->rx_ring[prev_rx(rx_ring_tail)].p->control |= CONTROL_EOL;
1214 lp->rx_ring[prev_rx(rx_old_tail)].p->control &= ~CONTROL_EOL;
1216 lp->rx_ring_tail=rx_ring_tail;
1222 * mc32_tx_ring - process completed transmits
1223 * @dev: 3c527 that needs its transmit ring processing
1226 * This operates in a similar fashion to mc32_rx_ring. We iterate
1227 * over the transmit ring. For each descriptor which has been
1228 * processed by the card, we free its associated buffer and note
1229 * any errors. This continues until the transmit ring is emptied
1230 * or we reach a descriptor that hasn't yet been processed by the
1231 * card.
1235 static void mc32_tx_ring(struct net_device *dev)
1237 struct mc32_local *lp = netdev_priv(dev);
1238 volatile struct skb_header *np;
1241 * We rely on head==tail to mean 'queue empty'.
1242 * This is why lp->tx_count=TX_RING_LEN-1: in order to prevent
1243 * tx_ring_head wrapping to tail and confusing a 'queue empty'
1244 * condition with 'queue full'
1247 while (lp->tx_ring_tail != atomic_read(&lp->tx_ring_head))
1249 u16 t;
1251 t=next_tx(lp->tx_ring_tail);
1252 np=lp->tx_ring[t].p;
1254 if(!(np->status & (1<<7)))
1256 /* Not COMPLETED */
1257 break;
1259 dev->stats.tx_packets++;
1260 if(!(np->status & (1<<6))) /* Not COMPLETED_OK */
1262 dev->stats.tx_errors++;
1264 switch(np->status&0x0F)
1266 case 1:
1267 dev->stats.tx_aborted_errors++;
1268 break; /* Max collisions */
1269 case 2:
1270 dev->stats.tx_fifo_errors++;
1271 break;
1272 case 3:
1273 dev->stats.tx_carrier_errors++;
1274 break;
1275 case 4:
1276 dev->stats.tx_window_errors++;
1277 break; /* CTS Lost */
1278 case 5:
1279 dev->stats.tx_aborted_errors++;
1280 break; /* Transmit timeout */
1283 /* Packets are sent in order - this is
1284 basically a FIFO queue of buffers matching
1285 the card ring */
1286 dev->stats.tx_bytes+=lp->tx_ring[t].skb->len;
1287 dev_kfree_skb_irq(lp->tx_ring[t].skb);
1288 lp->tx_ring[t].skb=NULL;
1289 atomic_inc(&lp->tx_count);
1290 netif_wake_queue(dev);
1292 lp->tx_ring_tail=t;
1299 * mc32_interrupt - handle an interrupt from a 3c527
1300 * @irq: Interrupt number
1301 * @dev_id: 3c527 that requires servicing
1302 * @regs: Registers (unused)
1305 * An interrupt is raised whenever the 3c527 writes to the command
1306 * register. This register contains the message it wishes to send us
1307 * packed into a single byte field. We keep reading status entries
1308 * until we have processed all the control items, but simply count
1309 * transmit and receive reports. When all reports are in we empty the
1310 * transceiver rings as appropriate. This saves the overhead of
1311 * multiple command requests.
1313 * Because MCA is level-triggered, we shouldn't miss indications.
1314 * Therefore, we needn't ask the card to suspend interrupts within
1315 * this handler. The card receives an implicit acknowledgment of the
1316 * current interrupt when we read the command register.
1320 static irqreturn_t mc32_interrupt(int irq, void *dev_id)
1322 struct net_device *dev = dev_id;
1323 struct mc32_local *lp;
1324 int ioaddr, status, boguscount = 0;
1325 int rx_event = 0;
1326 int tx_event = 0;
1328 ioaddr = dev->base_addr;
1329 lp = netdev_priv(dev);
1331 /* See whats cooking */
1333 while((inb(ioaddr+HOST_STATUS)&HOST_STATUS_CWR) && boguscount++<2000)
1335 status=inb(ioaddr+HOST_CMD);
1337 pr_debug("Status TX%d RX%d EX%d OV%d BC%d\n",
1338 (status&7), (status>>3)&7, (status>>6)&1,
1339 (status>>7)&1, boguscount);
1341 switch(status&7)
1343 case 0:
1344 break;
1345 case 6: /* TX fail */
1346 case 2: /* TX ok */
1347 tx_event = 1;
1348 break;
1349 case 3: /* Halt */
1350 case 4: /* Abort */
1351 complete(&lp->xceiver_cmd);
1352 break;
1353 default:
1354 pr_notice("%s: strange tx ack %d\n", dev->name, status&7);
1356 status>>=3;
1357 switch(status&7)
1359 case 0:
1360 break;
1361 case 2: /* RX */
1362 rx_event=1;
1363 break;
1364 case 3: /* Halt */
1365 case 4: /* Abort */
1366 complete(&lp->xceiver_cmd);
1367 break;
1368 case 6:
1369 /* Out of RX buffers stat */
1370 /* Must restart rx */
1371 dev->stats.rx_dropped++;
1372 mc32_rx_ring(dev);
1373 mc32_start_transceiver(dev);
1374 break;
1375 default:
1376 pr_notice("%s: strange rx ack %d\n",
1377 dev->name, status&7);
1379 status>>=3;
1380 if(status&1)
1383 * No thread is waiting: we need to tidy
1384 * up ourself.
1387 if (lp->cmd_nonblocking) {
1388 up(&lp->cmd_mutex);
1389 if (lp->mc_reload_wait)
1390 mc32_reset_multicast_list(dev);
1392 else complete(&lp->execution_cmd);
1394 if(status&2)
1397 * We get interrupted once per
1398 * counter that is about to overflow.
1401 mc32_update_stats(dev);
1407 * Process the transmit and receive rings
1410 if(tx_event)
1411 mc32_tx_ring(dev);
1413 if(rx_event)
1414 mc32_rx_ring(dev);
1416 return IRQ_HANDLED;
1421 * mc32_close - user configuring the 3c527 down
1422 * @dev: 3c527 card to shut down
1424 * The 3c527 is a bus mastering device. We must be careful how we
1425 * shut it down. It may also be running shared interrupt so we have
1426 * to be sure to silence it properly
1428 * We indicate that the card is closing to the rest of the
1429 * driver. Otherwise, it is possible that the card may run out
1430 * of receive buffers and restart the transceiver while we're
1431 * trying to close it.
1433 * We abort any receive and transmits going on and then wait until
1434 * any pending exec commands have completed in other code threads.
1435 * In theory we can't get here while that is true, in practice I am
1436 * paranoid
1438 * We turn off the interrupt enable for the board to be sure it can't
1439 * intefere with other devices.
1442 static int mc32_close(struct net_device *dev)
1444 struct mc32_local *lp = netdev_priv(dev);
1445 int ioaddr = dev->base_addr;
1447 u8 regs;
1448 u16 one=1;
1450 lp->xceiver_desired_state = HALTED;
1451 netif_stop_queue(dev);
1454 * Send the indications on command (handy debug check)
1457 mc32_command(dev, 4, &one, 2);
1459 /* Shut down the transceiver */
1461 mc32_halt_transceiver(dev);
1463 /* Ensure we issue no more commands beyond this point */
1465 down(&lp->cmd_mutex);
1467 /* Ok the card is now stopping */
1469 regs=inb(ioaddr+HOST_CTRL);
1470 regs&=~HOST_CTRL_INTE;
1471 outb(regs, ioaddr+HOST_CTRL);
1473 mc32_flush_rx_ring(dev);
1474 mc32_flush_tx_ring(dev);
1476 mc32_update_stats(dev);
1478 return 0;
1483 * mc32_get_stats - hand back stats to network layer
1484 * @dev: The 3c527 card to handle
1486 * We've collected all the stats we can in software already. Now
1487 * it's time to update those kept on-card and return the lot.
1491 static struct net_device_stats *mc32_get_stats(struct net_device *dev)
1493 mc32_update_stats(dev);
1494 return &dev->stats;
1499 * do_mc32_set_multicast_list - attempt to update multicasts
1500 * @dev: 3c527 device to load the list on
1501 * @retry: indicates this is not the first call.
1504 * Actually set or clear the multicast filter for this adaptor. The
1505 * locking issues are handled by this routine. We have to track
1506 * state as it may take multiple calls to get the command sequence
1507 * completed. We just keep trying to schedule the loads until we
1508 * manage to process them all.
1510 * num_addrs == -1 Promiscuous mode, receive all packets
1512 * num_addrs == 0 Normal mode, clear multicast list
1514 * num_addrs > 0 Multicast mode, receive normal and MC packets,
1515 * and do best-effort filtering.
1517 * See mc32_update_stats() regards setting the SAV BP bit.
1521 static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
1523 struct mc32_local *lp = netdev_priv(dev);
1524 u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */
1526 if ((dev->flags&IFF_PROMISC) ||
1527 (dev->flags&IFF_ALLMULTI) ||
1528 netdev_mc_count(dev) > 10)
1529 /* Enable promiscuous mode */
1530 filt |= 1;
1531 else if (!netdev_mc_empty(dev))
1533 unsigned char block[62];
1534 unsigned char *bp;
1535 struct netdev_hw_addr *ha;
1537 if(retry==0)
1538 lp->mc_list_valid = 0;
1539 if(!lp->mc_list_valid)
1541 block[1]=0;
1542 block[0]=netdev_mc_count(dev);
1543 bp=block+2;
1545 netdev_for_each_mc_addr(ha, dev) {
1546 memcpy(bp, ha->addr, 6);
1547 bp+=6;
1549 if(mc32_command_nowait(dev, 2, block,
1550 2+6*netdev_mc_count(dev))==-1)
1552 lp->mc_reload_wait = 1;
1553 return;
1555 lp->mc_list_valid=1;
1559 if(mc32_command_nowait(dev, 0, &filt, 2)==-1)
1561 lp->mc_reload_wait = 1;
1563 else {
1564 lp->mc_reload_wait = 0;
1570 * mc32_set_multicast_list - queue multicast list update
1571 * @dev: The 3c527 to use
1573 * Commence loading the multicast list. This is called when the kernel
1574 * changes the lists. It will override any pending list we are trying to
1575 * load.
1578 static void mc32_set_multicast_list(struct net_device *dev)
1580 do_mc32_set_multicast_list(dev,0);
1585 * mc32_reset_multicast_list - reset multicast list
1586 * @dev: The 3c527 to use
1588 * Attempt the next step in loading the multicast lists. If this attempt
1589 * fails to complete then it will be scheduled and this function called
1590 * again later from elsewhere.
1593 static void mc32_reset_multicast_list(struct net_device *dev)
1595 do_mc32_set_multicast_list(dev,1);
1598 static void netdev_get_drvinfo(struct net_device *dev,
1599 struct ethtool_drvinfo *info)
1601 strcpy(info->driver, DRV_NAME);
1602 strcpy(info->version, DRV_VERSION);
1603 sprintf(info->bus_info, "MCA 0x%lx", dev->base_addr);
1606 static u32 netdev_get_msglevel(struct net_device *dev)
1608 return mc32_debug;
1611 static void netdev_set_msglevel(struct net_device *dev, u32 level)
1613 mc32_debug = level;
1616 static const struct ethtool_ops netdev_ethtool_ops = {
1617 .get_drvinfo = netdev_get_drvinfo,
1618 .get_msglevel = netdev_get_msglevel,
1619 .set_msglevel = netdev_set_msglevel,
1622 #ifdef MODULE
1624 static struct net_device *this_device;
1627 * init_module - entry point
1629 * Probe and locate a 3c527 card. This really should probe and locate
1630 * all the 3c527 cards in the machine not just one of them. Yes you can
1631 * insmod multiple modules for now but it's a hack.
1634 int __init init_module(void)
1636 this_device = mc32_probe(-1);
1637 if (IS_ERR(this_device))
1638 return PTR_ERR(this_device);
1639 return 0;
1643 * cleanup_module - free resources for an unload
1645 * Unloading time. We release the MCA bus resources and the interrupt
1646 * at which point everything is ready to unload. The card must be stopped
1647 * at this point or we would not have been called. When we unload we
1648 * leave the card stopped but not totally shut down. When the card is
1649 * initialized it must be rebooted or the rings reloaded before any
1650 * transmit operations are allowed to start scribbling into memory.
1653 void __exit cleanup_module(void)
1655 unregister_netdev(this_device);
1656 cleanup_card(this_device);
1657 free_netdev(this_device);
1660 #endif /* MODULE */