1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
3 Written 1998-1999 by Donald Becker.
5 This software may be used and distributed according to the terms
6 of the GNU Public License (GPL), incorporated herein by reference.
7 Drivers derived from this code also fall under the GPL and must retain
8 this authorship and copyright notice.
10 This driver is designed for the VIA VT86c100A Rhine-II PCI Fast Ethernet
11 controller. It also works with the older 3043 Rhine-I chip.
13 The author may be reached as becker@cesdis.edu, or
18 Support and updates available at
19 http://cesdis.gsfc.nasa.gov/linux/drivers/via-rhine.html
22 static const char *versionA
=
23 "via-rhine.c:v1.01 2/27/99 Written by Donald Becker\n";
24 static const char *versionB
=
25 " http://cesdis.gsfc.nasa.gov/linux/drivers/via-rhine.html\n";
27 /* A few user-configurable values. These may be modified when a driver
30 static int debug
= 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
31 static int max_interrupt_work
= 20;
32 static int min_pci_latency
= 64;
34 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
35 Setting to > 1518 effectively disables this feature. */
36 static int rx_copybreak
= 0;
38 /* Used to pass the media type, etc.
39 Both 'options[]' and 'full_duplex[]' should exist for driver
41 The media type is usually passed in 'options[]'.
43 #define MAX_UNITS 8 /* More are supported, limit only on options */
44 static int options
[MAX_UNITS
] = {-1, -1, -1, -1, -1, -1, -1, -1};
45 static int full_duplex
[MAX_UNITS
] = {-1, -1, -1, -1, -1, -1, -1, -1};
47 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
48 The Rhine has a 64 element 8390-like hash table. */
49 static const int multicast_filter_limit
= 32;
51 /* Operational parameters that are set at compile time. */
53 /* Keep the ring sizes a power of two for compile efficiency.
54 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
55 Making the Tx ring too large decreases the effectiveness of channel
56 bonding and packet priority.
57 There are no ill effects from too-large receive rings. */
58 #define TX_RING_SIZE 8
59 #define RX_RING_SIZE 16
61 /* Operational parameters that usually are not changed. */
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT (2*HZ)
65 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
67 #include <linux/module.h>
68 #include <linux/kernel.h>
69 #include <linux/version.h>
70 #include <linux/string.h>
71 #include <linux/timer.h>
72 #include <linux/errno.h>
73 #include <linux/ioport.h>
74 #include <linux/malloc.h>
75 #include <linux/interrupt.h>
76 #include <linux/pci.h>
77 #include <linux/netdevice.h>
78 #include <linux/etherdevice.h>
79 #include <linux/skbuff.h>
80 #include <asm/processor.h> /* Processor type for cache alignment. */
81 #include <asm/bitops.h>
84 /* This driver was written to use PCI memory space, however some x86
85 motherboards only configure I/O space accesses correctly. */
86 #if defined(__i386__) && !defined(VIA_USE_MEMORY)
89 #if defined(__alpha__)
107 /* Kernel compatibility defines, some common to David Hind's PCMCIA package.
108 This is only in the support-all-kernels source code. */
110 #define RUN_AT(x) (jiffies + (x))
113 char kernel_version
[] = UTS_RELEASE
;
116 #define ioremap vremap
117 #define iounmap vfree
120 #if defined(MODULE) && LINUX_VERSION_CODE > 0x20115
121 MODULE_AUTHOR("Donald Becker <becker@cesdis.gsfc.nasa.gov>");
122 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
123 MODULE_PARM(max_interrupt_work
, "i");
124 MODULE_PARM(min_pci_latency
, "i");
125 MODULE_PARM(debug
, "i");
126 MODULE_PARM(rx_copybreak
, "i");
127 MODULE_PARM(options
, "1-" __MODULE_STRING(MAX_UNITS
) "i");
128 MODULE_PARM(full_duplex
, "1-" __MODULE_STRING(MAX_UNITS
) "i");
130 #if LINUX_VERSION_CODE < 0x20123
131 #define test_and_set_bit(val, addr) set_bit(val, addr)
133 #if LINUX_VERSION_CODE <= 0x20139
134 #define net_device_stats enet_statistics
136 #define NETSTATS_VER2
138 #if LINUX_VERSION_CODE < 0x20155 || defined(CARDBUS)
139 /* Grrrr, the PCI code changed, but did not consider CardBus... */
140 #include <linux/bios32.h>
141 #define PCI_SUPPORT_VER1
143 #define PCI_SUPPORT_VER2
145 #if LINUX_VERSION_CODE < 0x20159
146 #define dev_free_skb(skb) dev_kfree_skb(skb, FREE_WRITE);
148 #define dev_free_skb(skb) dev_kfree_skb(skb);
155 I. Board Compatibility
157 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
160 II. Board-specific settings
162 Boards with this chip are functional only in a bus-master PCI slot.
164 Many operational settings are loaded from the EEPROM to the Config word at
165 offset 0x78. This driver assumes that they are correct.
166 If this driver is compiled to use PCI memory space operations the EEPROM
167 must be configured to enable memory ops.
169 III. Driver operation
173 This driver uses two statically allocated fixed-size descriptor lists
174 formed into rings by a branch from the final descriptor to the beginning of
175 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
177 IIIb/c. Transmit/Receive Structure
179 This driver attempts to use a zero-copy receive and transmit scheme.
181 Alas, all data buffers are required to start on a 32 bit boundary, so
182 the driver must often copy transmit packets into bounce buffers.
184 The driver allocates full frame size skbuffs for the Rx ring buffers at
185 open() time and passes the skb->data field to the chip as receive data
186 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
187 a fresh skbuff is allocated and the frame is copied to the new skbuff.
188 When the incoming frame is larger, the skbuff is passed directly up the
189 protocol stack. Buffers consumed this way are replaced by newly allocated
190 skbuffs in the last phase of netdev_rx().
192 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
193 using a full-sized skbuff for small frames vs. the copying costs of larger
194 frames. New boards are typically used in generously configured machines
195 and the underfilled buffers have negligible impact compared to the benefit of
196 a single allocation size, so the default value of zero results in never
197 copying packets. When copying is done, the cost is usually mitigated by using
198 a combined copy/checksum routine. Copying also preloads the cache, which is
199 most useful with small frames.
201 Since the VIA chips are only able to transfer data to buffers on 32 bit
202 boundaries, the the IP header at offset 14 in an ethernet frame isn't
203 longword aligned for further processing. Copying these unaligned buffers
204 has the beneficial effect of 16-byte aligning the IP header.
206 IIId. Synchronization
208 The driver runs as two independent, single-threaded flows of control. One
209 is the send-packet routine, which enforces single-threaded use by the
210 dev->tbusy flag. The other thread is the interrupt handler, which is single
211 threaded by the hardware and interrupt handling software.
213 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
214 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
215 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
216 the 'lp->tx_full' flag.
218 The interrupt handler has exclusive control over the Rx ring and records stats
219 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
220 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
221 clears both the tx_full and tbusy flags.
227 Preliminary VT86C100A manual from http://www.via.com.tw/
228 http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
229 http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
233 The VT86C100A manual is not reliable information.
234 The chip does not handle unaligned transmit or receive buffers, resulting
235 in significant performance degradation for bounce buffer copies on transmit
236 and unaligned IP headers on receive.
237 The chip does not pad to minimum transmit length.
243 /* This table drives the PCI probe routines. It's mostly boilerplate in all
244 of the drivers, and will likely be provided by some future kernel.
245 Note the matching code -- the first table entry matchs all 56** cards but
246 second only the 1234 card.
249 PCI_USES_IO
=1, PCI_USES_MEM
=2, PCI_USES_MASTER
=4,
250 PCI_ADDR0
=0x10<<0, PCI_ADDR1
=0x10<<1, PCI_ADDR2
=0x10<<2, PCI_ADDR3
=0x10<<3,
254 u16 vendor_id
, device_id
, device_id_mask
, flags
;
256 struct net_device
*(*probe1
)(int pci_bus
, int pci_devfn
, struct net_device
*dev
,
257 long ioaddr
, int irq
, int chip_idx
, int fnd_cnt
);
260 static struct net_device
*via_probe1(int pci_bus
, int pci_devfn
,
261 struct net_device
*dev
, long ioaddr
, int irq
,
262 int chp_idx
, int fnd_cnt
);
264 static struct pci_id_info pci_tbl
[] = {
265 { "VIA VT86C100A Rhine-II", 0x1106, 0x6100, 0xffff,
266 PCI_USES_MEM
|PCI_USES_IO
|PCI_USES_MEM
|PCI_USES_MASTER
, 128, via_probe1
},
267 { "VIA VT3043 Rhine", 0x1106, 0x3043, 0xffff,
268 PCI_USES_IO
|PCI_USES_MEM
|PCI_USES_MASTER
, 128, via_probe1
},
269 {0,}, /* 0 terminated list. */
273 /* A chip capabilities table, matching the entries in pci_tbl[] above. */
274 enum chip_capability_flags
{CanHaveMII
=1, };
278 } static cap_tbl
[] = {
284 /* Offsets to the device registers.
286 enum register_offsets
{
287 StationAddr
=0x00, RxConfig
=0x06, TxConfig
=0x07, ChipCmd
=0x08,
288 IntrStatus
=0x0C, IntrEnable
=0x0E,
289 MulticastFilter0
=0x10, MulticastFilter1
=0x14,
290 RxRingPtr
=0x18, TxRingPtr
=0x1C,
291 MIIPhyAddr
=0x6C, MIIStatus
=0x6D, PCIConfig
=0x6E,
292 MIICmd
=0x70, MIIRegAddr
=0x71, MIIData
=0x72,
293 Config
=0x78, RxMissed
=0x7C, RxCRCErrs
=0x7E,
296 /* Bits in the interrupt status/mask registers. */
297 enum intr_status_bits
{
298 IntrRxDone
=0x0001, IntrRxErr
=0x0004, IntrRxEmpty
=0x0020,
299 IntrTxDone
=0x0002, IntrTxAbort
=0x0008, IntrTxUnderrun
=0x0010,
301 IntrStatsMax
=0x0080, IntrRxEarly
=0x0100, IntrMIIChange
=0x0200,
302 IntrRxOverflow
=0x0400, IntrRxDropped
=0x0800, IntrRxNoBuf
=0x1000,
303 IntrTxAborted
=0x2000, IntrLinkChange
=0x4000,
305 IntrNormalSummary
=0x0003, IntrAbnormalSummary
=0x8260,
309 /* The Rx and Tx buffer descriptors. */
325 /* Bits in *_desc.status */
326 enum rx_status_bits
{
327 RxDescOwn
=0x80000000, RxOK
=0x8000, RxWholePkt
=0x0300, RxErr
=0x008F};
328 enum desc_status_bits
{
329 DescOwn
=0x8000, DescEndPacket
=0x4000, DescIntr
=0x1000,
332 /* Bits in ChipCmd. */
334 CmdInit
=0x0001, CmdStart
=0x0002, CmdStop
=0x0004, CmdRxOn
=0x0008,
335 CmdTxOn
=0x0010, CmdTxDemand
=0x0020, CmdRxDemand
=0x0040,
336 CmdEarlyRx
=0x0100, CmdEarlyTx
=0x0200, CmdFDuplex
=0x0400,
337 CmdNoTxPoll
=0x0800, CmdReset
=0x8000,
340 struct netdev_private
{
341 /* Descriptor rings first for alignment. */
342 struct rx_desc rx_ring
[RX_RING_SIZE
];
343 struct tx_desc tx_ring
[TX_RING_SIZE
];
344 /* The addresses of receive-in-place skbuffs. */
345 struct sk_buff
* rx_skbuff
[RX_RING_SIZE
];
346 /* The saved address of a sent-in-place packet/buffer, for later free(). */
347 struct sk_buff
* tx_skbuff
[TX_RING_SIZE
];
348 unsigned char *tx_buf
[TX_RING_SIZE
]; /* Tx bounce buffers */
349 unsigned char *tx_bufs
; /* Tx bounce buffer region. */
350 struct net_device
*next_module
; /* Link for devices of this type. */
351 struct net_device_stats stats
;
352 struct timer_list timer
; /* Media monitoring timer. */
353 unsigned char pci_bus
, pci_devfn
;
354 /* Frequently used values: keep some adjacent for cache effect. */
356 long in_interrupt
; /* Word-long for SMP locks. */
357 struct rx_desc
*rx_head_desc
;
358 unsigned int cur_rx
, dirty_rx
; /* Producer/consumer ring indices */
359 unsigned int cur_tx
, dirty_tx
;
360 unsigned int rx_buf_sz
; /* Based on MTU+slack. */
361 u16 chip_cmd
; /* Current setting for ChipCmd */
362 unsigned int tx_full
:1; /* The Tx queue is full. */
363 /* These values are keep track of the transceiver/media in use. */
364 unsigned int full_duplex
:1; /* Full-duplex operation requested. */
365 unsigned int duplex_lock
:1;
366 unsigned int medialock
:1; /* Do not sense media. */
367 unsigned int default_port
:4; /* Last dev->if_port value. */
368 u8 tx_thresh
, rx_thresh
;
369 /* MII transceiver section. */
370 int mii_cnt
; /* MII device addresses. */
371 u16 advertising
; /* NWay media advertisement */
372 unsigned char phys
[2]; /* MII device addresses. */
375 static int mdio_read(struct net_device
*dev
, int phy_id
, int location
);
376 static void mdio_write(struct net_device
*dev
, int phy_id
, int location
, int value
);
377 static int netdev_open(struct net_device
*dev
);
378 static void check_duplex(struct net_device
*dev
);
379 static void netdev_timer(unsigned long data
);
380 static void tx_timeout(struct net_device
*dev
);
381 static void init_ring(struct net_device
*dev
);
382 static int start_tx(struct sk_buff
*skb
, struct net_device
*dev
);
383 static void intr_handler(int irq
, void *dev_instance
, struct pt_regs
*regs
);
384 static int netdev_rx(struct net_device
*dev
);
385 static void netdev_error(struct net_device
*dev
, int intr_status
);
386 static void set_rx_mode(struct net_device
*dev
);
387 static struct net_device_stats
*get_stats(struct net_device
*dev
);
388 static int mii_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
389 static int netdev_close(struct net_device
*dev
);
393 /* A list of our installed devices, for removing the driver module. */
394 static struct net_device
*root_net_dev
= NULL
;
396 /* Ideally we would detect all network cards in slot order. That would
397 be best done a central PCI probe dispatch, which wouldn't work
398 well when dynamically adding drivers. So instead we detect just the
399 cards we know about in slot order. */
401 static int pci_etherdev_probe(struct net_device
*dev
, struct pci_id_info pci_tbl
[])
405 unsigned char pci_bus
, pci_device_fn
;
407 if ( ! pcibios_present())
410 for (;pci_index
< 0xff; pci_index
++) {
411 u16 vendor
, device
, pci_command
, new_command
;
416 if (pcibios_find_class (PCI_CLASS_NETWORK_ETHERNET
<< 8, pci_index
,
417 &pci_bus
, &pci_device_fn
)
418 != PCIBIOS_SUCCESSFUL
)
420 pcibios_read_config_word(pci_bus
, pci_device_fn
,
421 PCI_VENDOR_ID
, &vendor
);
422 pcibios_read_config_word(pci_bus
, pci_device_fn
,
423 PCI_DEVICE_ID
, &device
);
425 for (chip_idx
= 0; pci_tbl
[chip_idx
].vendor_id
; chip_idx
++)
426 if (vendor
== pci_tbl
[chip_idx
].vendor_id
427 && (device
& pci_tbl
[chip_idx
].device_id_mask
) ==
428 pci_tbl
[chip_idx
].device_id
)
430 if (pci_tbl
[chip_idx
].vendor_id
== 0) /* Compiled out! */
434 #if defined(PCI_SUPPORT_VER2)
435 struct pci_dev
*pdev
= pci_find_slot(pci_bus
, pci_device_fn
);
437 pciaddr
= pdev
->resource
[0].start
;
439 pciaddr
= pdev
->resource
[1].start
;
445 pcibios_read_config_byte(pci_bus
, pci_device_fn
,
446 PCI_INTERRUPT_LINE
, &pci_irq_line
);
448 pcibios_read_config_dword(pci_bus
, pci_device_fn
,
449 PCI_BASE_ADDRESS_0
, &pci_memaddr
);
450 pciaddr
= pci_memaddr
;
452 pcibios_read_config_dword(pci_bus
, pci_device_fn
,
453 PCI_BASE_ADDRESS_1
, &pci_memaddr
);
454 pciaddr
= pci_memaddr
;
461 printk(KERN_INFO
"Found %s at PCI address %#lx, IRQ %d.\n",
462 pci_tbl
[chip_idx
].name
, pciaddr
, irq
);
464 if (pci_tbl
[chip_idx
].flags
& PCI_USES_IO
) {
465 ioaddr
= pciaddr
& ~3;
466 if (check_region(ioaddr
, pci_tbl
[chip_idx
].io_size
))
468 } else if ((ioaddr
= (long)ioremap(pciaddr
& ~0xf,
469 pci_tbl
[chip_idx
].io_size
)) == 0) {
470 printk(KERN_INFO
"Failed to map PCI address %#lx.\n",
475 pcibios_read_config_word(pci_bus
, pci_device_fn
,
476 PCI_COMMAND
, &pci_command
);
477 new_command
= pci_command
| (pci_tbl
[chip_idx
].flags
& 7);
478 if (pci_command
!= new_command
) {
479 printk(KERN_INFO
" The PCI BIOS has not enabled the"
480 " device at %d/%d! Updating PCI command %4.4x->%4.4x.\n",
481 pci_bus
, pci_device_fn
, pci_command
, new_command
);
482 pcibios_write_config_word(pci_bus
, pci_device_fn
,
483 PCI_COMMAND
, new_command
);
486 dev
= pci_tbl
[chip_idx
].probe1(pci_bus
, pci_device_fn
, dev
, ioaddr
,
487 irq
, chip_idx
, cards_found
);
489 if (dev
&& (pci_tbl
[chip_idx
].flags
& PCI_COMMAND_MASTER
)) {
491 pcibios_read_config_byte(pci_bus
, pci_device_fn
,
492 PCI_LATENCY_TIMER
, &pci_latency
);
493 if (pci_latency
< min_pci_latency
) {
494 printk(KERN_INFO
" PCI latency timer (CFLT) is "
495 "unreasonably low at %d. Setting to %d clocks.\n",
496 pci_latency
, min_pci_latency
);
497 pcibios_write_config_byte(pci_bus
, pci_device_fn
,
498 PCI_LATENCY_TIMER
, min_pci_latency
);
505 return cards_found
? 0 : -ENODEV
;
509 int via_rhine_probe(struct net_device
*dev
)
511 printk(KERN_INFO
"%s" KERN_INFO
"%s", versionA
, versionB
);
512 return pci_etherdev_probe(dev
, pci_tbl
);
516 static struct net_device
*via_probe1(int pci_bus
, int pci_devfn
,
517 struct net_device
*dev
, long ioaddr
, int irq
,
518 int chip_id
, int card_idx
)
520 struct netdev_private
*np
;
521 int i
, option
= card_idx
< MAX_UNITS
? options
[card_idx
] : 0;
523 dev
= init_etherdev(dev
, 0);
525 printk(KERN_INFO
"%s: %s at 0x%lx, ",
526 dev
->name
, pci_tbl
[chip_id
].name
, ioaddr
);
528 /* Ideally we would be read the EEPROM but access may be locked. */
529 for (i
= 0; i
<6; i
++)
530 dev
->dev_addr
[i
] = readb(ioaddr
+ StationAddr
+ i
);
531 for (i
= 0; i
< 5; i
++)
532 printk("%2.2x:", dev
->dev_addr
[i
]);
533 printk("%2.2x, IRQ %d.\n", dev
->dev_addr
[i
], irq
);
536 request_region(ioaddr
, pci_tbl
[chip_id
].io_size
, dev
->name
);
539 /* Reset the chip to erase previous misconfiguration. */
540 writew(CmdReset
, ioaddr
+ ChipCmd
);
542 dev
->base_addr
= ioaddr
;
545 /* Make certain the descriptor lists are cache-aligned. */
546 np
= (void *)(((long)kmalloc(sizeof(*np
), GFP_KERNEL
) + 31) & ~31);
547 memset(np
, 0, sizeof(*np
));
550 np
->next_module
= root_net_dev
;
553 np
->pci_bus
= pci_bus
;
554 np
->pci_devfn
= pci_devfn
;
555 np
->chip_id
= chip_id
;
558 option
= dev
->mem_start
;
560 /* The lower four bits are the media type. */
564 np
->default_port
= option
& 15;
565 if (np
->default_port
)
568 if (card_idx
< MAX_UNITS
&& full_duplex
[card_idx
] > 0)
574 /* The chip-specific entries in the device structure. */
575 dev
->open
= &netdev_open
;
576 dev
->hard_start_xmit
= &start_tx
;
577 dev
->stop
= &netdev_close
;
578 dev
->get_stats
= &get_stats
;
579 dev
->set_multicast_list
= &set_rx_mode
;
580 dev
->do_ioctl
= &mii_ioctl
;
582 if (cap_tbl
[np
->chip_id
].flags
& CanHaveMII
) {
583 int phy
, phy_idx
= 0;
584 np
->phys
[0] = 1; /* Standard for this chip. */
585 for (phy
= 1; phy
< 32 && phy_idx
< 4; phy
++) {
586 int mii_status
= mdio_read(dev
, phy
, 1);
587 if (mii_status
!= 0xffff && mii_status
!= 0x0000) {
588 np
->phys
[phy_idx
++] = phy
;
589 np
->advertising
= mdio_read(dev
, phy
, 4);
590 printk(KERN_INFO
"%s: MII PHY found at address %d, status "
591 "0x%4.4x advertising %4.4x Link %4.4x.\n",
592 dev
->name
, phy
, mii_status
, np
->advertising
,
593 mdio_read(dev
, phy
, 5));
596 np
->mii_cnt
= phy_idx
;
603 /* Read and write over the MII Management Data I/O (MDIO) interface. */
605 static int mdio_read(struct net_device
*dev
, int phy_id
, int regnum
)
607 long ioaddr
= dev
->base_addr
;
610 /* Wait for a previous command to complete. */
611 while ((readb(ioaddr
+ MIICmd
) & 0x60) && --boguscnt
> 0)
613 writeb(0x00, ioaddr
+ MIICmd
);
614 writeb(phy_id
, ioaddr
+ MIIPhyAddr
);
615 writeb(regnum
, ioaddr
+ MIIRegAddr
);
616 writeb(0x40, ioaddr
+ MIICmd
); /* Trigger read */
618 while ((readb(ioaddr
+ MIICmd
) & 0x40) && --boguscnt
> 0)
620 return readw(ioaddr
+ MIIData
);
623 static void mdio_write(struct net_device
*dev
, int phy_id
, int regnum
, int value
)
625 long ioaddr
= dev
->base_addr
;
628 /* Wait for a previous command to complete. */
629 while ((readb(ioaddr
+ MIICmd
) & 0x60) && --boguscnt
> 0)
631 writeb(0x00, ioaddr
+ MIICmd
);
632 writeb(phy_id
, ioaddr
+ MIIPhyAddr
);
633 writeb(regnum
, ioaddr
+ MIIRegAddr
);
634 writew(value
, ioaddr
+ MIIData
);
635 writeb(0x20, ioaddr
+ MIICmd
); /* Trigger write. */
640 static int netdev_open(struct net_device
*dev
)
642 struct netdev_private
*np
= (struct netdev_private
*)dev
->priv
;
643 long ioaddr
= dev
->base_addr
;
646 /* Reset the chip. */
647 writew(CmdReset
, ioaddr
+ ChipCmd
);
649 if (request_irq(dev
->irq
, &intr_handler
, SA_SHIRQ
, dev
->name
, dev
))
653 printk(KERN_DEBUG
"%s: netdev_open() irq %d.\n",
654 dev
->name
, dev
->irq
);
660 writel(virt_to_bus(np
->rx_ring
), ioaddr
+ RxRingPtr
);
661 writel(virt_to_bus(np
->tx_ring
), ioaddr
+ TxRingPtr
);
663 for (i
= 0; i
< 6; i
++)
664 writeb(dev
->dev_addr
[i
], ioaddr
+ StationAddr
+ i
);
666 /* Initialize other registers. */
667 writew(0x0006, ioaddr
+ PCIConfig
); /* Tune configuration??? */
668 /* Configure the FIFO thresholds. */
669 writeb(0x20, ioaddr
+ TxConfig
); /* Initial threshold 32 bytes */
670 np
->tx_thresh
= 0x20;
671 np
->rx_thresh
= 0x60; /* Written in set_rx_mode(). */
673 if (dev
->if_port
== 0)
674 dev
->if_port
= np
->default_port
;
678 np
->in_interrupt
= 0;
684 /* Enable interrupts by setting the interrupt mask. */
685 writew(IntrRxDone
| IntrRxErr
| IntrRxEmpty
| IntrRxOverflow
| IntrRxDropped
|
686 IntrTxDone
| IntrTxAbort
| IntrTxUnderrun
|
687 IntrPCIErr
| IntrStatsMax
| IntrLinkChange
| IntrMIIChange
,
688 ioaddr
+ IntrEnable
);
690 np
->chip_cmd
= CmdStart
|CmdTxOn
|CmdRxOn
|CmdNoTxPoll
;
692 np
->chip_cmd
|= CmdFDuplex
;
693 writew(np
->chip_cmd
, ioaddr
+ ChipCmd
);
698 printk(KERN_DEBUG
"%s: Done netdev_open(), status %4.4x "
699 "MII status: %4.4x.\n",
700 dev
->name
, readw(ioaddr
+ ChipCmd
),
701 mdio_read(dev
, np
->phys
[0], 1));
703 /* Set the timer to check for link beat. */
704 init_timer(&np
->timer
);
705 np
->timer
.expires
= RUN_AT(1);
706 np
->timer
.data
= (unsigned long)dev
;
707 np
->timer
.function
= &netdev_timer
; /* timer handler */
708 add_timer(&np
->timer
);
713 static void check_duplex(struct net_device
*dev
)
715 struct netdev_private
*np
= (struct netdev_private
*)dev
->priv
;
716 long ioaddr
= dev
->base_addr
;
717 int mii_reg5
= mdio_read(dev
, np
->phys
[0], 5);
720 if (np
->duplex_lock
|| mii_reg5
== 0xffff)
722 duplex
= (mii_reg5
& 0x0100) || (mii_reg5
& 0x01C0) == 0x0040;
723 if (np
->full_duplex
!= duplex
) {
724 np
->full_duplex
= duplex
;
726 printk(KERN_INFO
"%s: Setting %s-duplex based on MII #%d link"
727 " partner capability of %4.4x.\n", dev
->name
,
728 duplex
? "full" : "half", np
->phys
[0], mii_reg5
);
730 np
->chip_cmd
|= CmdFDuplex
;
732 np
->chip_cmd
&= ~CmdFDuplex
;
733 writew(np
->chip_cmd
, ioaddr
+ ChipCmd
);
737 static void netdev_timer(unsigned long data
)
739 struct net_device
*dev
= (struct net_device
*)data
;
740 struct netdev_private
*np
= (struct netdev_private
*)dev
->priv
;
741 long ioaddr
= dev
->base_addr
;
742 int next_tick
= 10*HZ
;
745 printk(KERN_DEBUG
"%s: VIA Rhine monitor tick, status %4.4x.\n",
746 dev
->name
, readw(ioaddr
+ IntrStatus
));
750 np
->timer
.expires
= RUN_AT(next_tick
);
751 add_timer(&np
->timer
);
754 static void tx_timeout(struct net_device
*dev
)
756 struct netdev_private
*np
= (struct netdev_private
*)dev
->priv
;
757 long ioaddr
= dev
->base_addr
;
759 printk(KERN_WARNING
"%s: Transmit timed out, status %4.4x, PHY status "
760 "%4.4x, resetting...\n",
761 dev
->name
, readw(ioaddr
+ IntrStatus
),
762 mdio_read(dev
, np
->phys
[0], 1));
764 /* Perhaps we should reinitialize the hardware here. */
766 /* Stop and restart the chip's Tx processes . */
768 /* Trigger an immediate transmit demand. */
770 dev
->trans_start
= jiffies
;
771 np
->stats
.tx_errors
++;
776 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
777 static void init_ring(struct net_device
*dev
)
779 struct netdev_private
*np
= (struct netdev_private
*)dev
->priv
;
783 np
->cur_rx
= np
->cur_tx
= 0;
784 np
->dirty_rx
= np
->dirty_tx
= 0;
786 np
->rx_buf_sz
= (dev
->mtu
<= 1500 ? PKT_BUF_SZ
: dev
->mtu
+ 32);
787 np
->rx_head_desc
= &np
->rx_ring
[0];
789 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
790 np
->rx_ring
[i
].rx_status
= 0;
791 np
->rx_ring
[i
].rx_length
= 0;
792 np
->rx_ring
[i
].desc_length
= np
->rx_buf_sz
;
793 np
->rx_ring
[i
].next_desc
= virt_to_bus(&np
->rx_ring
[i
+1]);
794 np
->rx_skbuff
[i
] = 0;
796 /* Mark the last entry as wrapping the ring. */
797 np
->rx_ring
[i
-1].next_desc
= virt_to_bus(&np
->rx_ring
[0]);
799 /* Fill in the Rx buffers. */
800 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
801 struct sk_buff
*skb
= dev_alloc_skb(np
->rx_buf_sz
);
802 np
->rx_skbuff
[i
] = skb
;
805 skb
->dev
= dev
; /* Mark as being used by this device. */
806 np
->rx_ring
[i
].addr
= virt_to_bus(skb
->tail
);
807 np
->rx_ring
[i
].rx_status
= 0;
808 np
->rx_ring
[i
].rx_length
= DescOwn
;
810 np
->dirty_rx
= (unsigned int)(i
- RX_RING_SIZE
);
812 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
813 np
->tx_skbuff
[i
] = 0;
814 np
->tx_ring
[i
].tx_own
= 0;
815 np
->tx_ring
[i
].desc_length
= 0x00e08000;
816 np
->tx_ring
[i
].next_desc
= virt_to_bus(&np
->tx_ring
[i
+1]);
817 np
->tx_buf
[i
] = kmalloc(PKT_BUF_SZ
, GFP_KERNEL
);
819 np
->tx_ring
[i
-1].next_desc
= virt_to_bus(&np
->tx_ring
[0]);
824 static int start_tx(struct sk_buff
*skb
, struct net_device
*dev
)
826 struct netdev_private
*np
= (struct netdev_private
*)dev
->priv
;
829 /* Block a timer-based transmit from overlapping. This could better be
830 done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
831 if (test_and_set_bit(0, (void*)&dev
->tbusy
) != 0) {
832 if (jiffies
- dev
->trans_start
< TX_TIMEOUT
)
838 /* Caution: the write order is important here, set the field
839 with the "ownership" bits last. */
841 /* Calculate the next Tx descriptor entry. */
842 entry
= np
->cur_tx
% TX_RING_SIZE
;
844 np
->tx_skbuff
[entry
] = skb
;
846 if ((long)skb
->data
& 3) { /* Must use alignment buffer. */
847 if (np
->tx_buf
[entry
] == NULL
&&
848 (np
->tx_buf
[entry
] = kmalloc(PKT_BUF_SZ
, GFP_KERNEL
)) == NULL
)
850 memcpy(np
->tx_buf
[entry
], skb
->data
, skb
->len
);
851 np
->tx_ring
[entry
].addr
= virt_to_bus(np
->tx_buf
[entry
]);
853 np
->tx_ring
[entry
].addr
= virt_to_bus(skb
->data
);
855 np
->tx_ring
[entry
].desc_length
= 0x00E08000 |
856 (skb
->len
>= ETH_ZLEN
? skb
->len
: ETH_ZLEN
);
857 np
->tx_ring
[entry
].tx_own
= DescOwn
;
861 /* Non-x86 Todo: explicitly flush cache lines here. */
863 /* Wake the potentially-idle transmit channel. */
864 writew(CmdTxDemand
| np
->chip_cmd
, dev
->base_addr
+ ChipCmd
);
866 if (np
->cur_tx
- np
->dirty_tx
< TX_RING_SIZE
- 1)
867 clear_bit(0, (void*)&dev
->tbusy
); /* Typical path */
870 dev
->trans_start
= jiffies
;
873 printk(KERN_DEBUG
"%s: Transmit frame #%d queued in slot %d.\n",
874 dev
->name
, np
->cur_tx
, entry
);
879 /* The interrupt handler does all of the Rx thread work and cleans up
880 after the Tx thread. */
881 static void intr_handler(int irq
, void *dev_instance
, struct pt_regs
*rgs
)
883 struct net_device
*dev
= (struct net_device
*)dev_instance
;
884 struct netdev_private
*np
;
885 long ioaddr
, boguscnt
= max_interrupt_work
;
887 ioaddr
= dev
->base_addr
;
888 np
= (struct netdev_private
*)dev
->priv
;
889 #if defined(__i386__)
890 /* A lock to prevent simultaneous entry bug on Intel SMP machines. */
891 if (test_and_set_bit(0, (void*)&dev
->interrupt
)) {
892 printk(KERN_ERR
"%s: SMP simultaneous entry of an interrupt handler.\n",
894 dev
->interrupt
= 0; /* Avoid halting machine. */
898 if (dev
->interrupt
) {
899 printk(KERN_ERR
"%s: Re-entering the interrupt handler.\n", dev
->name
);
906 u32 intr_status
= readw(ioaddr
+ IntrStatus
);
908 /* Acknowledge all of the current interrupt sources ASAP. */
909 writew(intr_status
& 0xffff, ioaddr
+ IntrStatus
);
912 printk(KERN_DEBUG
"%s: Interrupt, status %4.4x.\n",
913 dev
->name
, intr_status
);
915 if (intr_status
== 0)
918 if (intr_status
& (IntrRxDone
| IntrRxErr
| IntrRxDropped
|
919 IntrRxWakeUp
| IntrRxEmpty
| IntrRxNoBuf
))
922 for (; np
->cur_tx
- np
->dirty_tx
> 0; np
->dirty_tx
++) {
923 int entry
= np
->dirty_tx
% TX_RING_SIZE
;
925 if (np
->tx_ring
[entry
].tx_own
)
927 txstatus
= np
->tx_ring
[entry
].tx_status
;
929 printk(KERN_DEBUG
" Tx scavenge %d status %4.4x.\n",
931 if (txstatus
& 0x8000) {
933 printk(KERN_DEBUG
"%s: Transmit error, Tx status %4.4x.\n",
934 dev
->name
, txstatus
);
935 np
->stats
.tx_errors
++;
936 if (txstatus
& 0x0400) np
->stats
.tx_carrier_errors
++;
937 if (txstatus
& 0x0200) np
->stats
.tx_window_errors
++;
938 if (txstatus
& 0x0100) np
->stats
.tx_aborted_errors
++;
939 if (txstatus
& 0x0080) np
->stats
.tx_heartbeat_errors
++;
940 if (txstatus
& 0x0002) np
->stats
.tx_fifo_errors
++;
942 if (txstatus
& 0x0100) np
->stats
.collisions16
++;
944 /* Transmitter restarted in 'abnormal' handler. */
947 if (txstatus
& 0x0001) np
->stats
.tx_deferred
++;
949 np
->stats
.collisions
+= (txstatus
>> 3) & 15;
950 #if defined(NETSTATS_VER2)
951 np
->stats
.tx_bytes
+= np
->tx_ring
[entry
].desc_length
& 0x7ff;
953 np
->stats
.tx_packets
++;
955 /* Free the original skb. */
956 dev_free_skb(np
->tx_skbuff
[entry
]);
957 np
->tx_skbuff
[entry
] = 0;
959 if (np
->tx_full
&& dev
->tbusy
960 && np
->cur_tx
- np
->dirty_tx
< TX_RING_SIZE
- 4) {
961 /* The ring is no longer full, clear tbusy. */
963 clear_bit(0, (void*)&dev
->tbusy
);
967 /* Abnormal error summary/uncommon events handlers. */
968 if (intr_status
& (IntrPCIErr
| IntrLinkChange
| IntrMIIChange
|
969 IntrStatsMax
| IntrTxAbort
| IntrTxUnderrun
))
970 netdev_error(dev
, intr_status
);
972 if (--boguscnt
< 0) {
973 printk(KERN_WARNING
"%s: Too much work at interrupt, "
975 dev
->name
, intr_status
);
981 printk(KERN_DEBUG
"%s: exiting interrupt, status=%#4.4x.\n",
982 dev
->name
, readw(ioaddr
+ IntrStatus
));
984 #if defined(__i386__)
985 clear_bit(0, (void*)&dev
->interrupt
);
992 /* This routine is logically part of the interrupt handler, but isolated
993 for clarity and better register allocation. */
994 static int netdev_rx(struct net_device
*dev
)
996 struct netdev_private
*np
= (struct netdev_private
*)dev
->priv
;
997 int entry
= np
->cur_rx
% RX_RING_SIZE
;
998 int boguscnt
= np
->dirty_rx
+ RX_RING_SIZE
- np
->cur_rx
;
1001 printk(KERN_DEBUG
" In netdev_rx(), entry %d status %4.4x.\n",
1002 entry
, np
->rx_head_desc
->rx_length
);
1005 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1006 while ( ! (np
->rx_head_desc
->rx_length
& DescOwn
)) {
1007 struct rx_desc
*desc
= np
->rx_head_desc
;
1008 int data_size
= desc
->rx_length
;
1009 u16 desc_status
= desc
->rx_status
;
1012 printk(KERN_DEBUG
" netdev_rx() status is %4.4x.\n",
1016 if ( (desc_status
& (RxWholePkt
| RxErr
)) != RxWholePkt
) {
1017 if ((desc_status
& RxWholePkt
) != RxWholePkt
) {
1018 printk(KERN_WARNING
"%s: Oversized Ethernet frame spanned "
1019 "multiple buffers, entry %#x length %d status %4.4x!\n",
1020 dev
->name
, np
->cur_rx
, data_size
, desc_status
);
1021 printk(KERN_WARNING
"%s: Oversized Ethernet frame %p vs %p.\n",
1022 dev
->name
, np
->rx_head_desc
,
1023 &np
->rx_ring
[np
->cur_rx
% RX_RING_SIZE
]);
1024 np
->stats
.rx_length_errors
++;
1025 } else if (desc_status
& RxErr
) {
1026 /* There was a error. */
1028 printk(KERN_DEBUG
" netdev_rx() Rx error was %8.8x.\n",
1030 np
->stats
.rx_errors
++;
1031 if (desc_status
& 0x0030) np
->stats
.rx_length_errors
++;
1032 if (desc_status
& 0x0048) np
->stats
.rx_fifo_errors
++;
1033 if (desc_status
& 0x0004) np
->stats
.rx_frame_errors
++;
1034 if (desc_status
& 0x0002) np
->stats
.rx_crc_errors
++;
1037 struct sk_buff
*skb
;
1038 /* Length should omit the CRC */
1039 u16 pkt_len
= data_size
- 4;
1041 /* Check if the packet is long enough to accept without copying
1042 to a minimally-sized skbuff. */
1043 if (pkt_len
< rx_copybreak
1044 && (skb
= dev_alloc_skb(pkt_len
+ 2)) != NULL
) {
1046 skb_reserve(skb
, 2); /* 16 byte align the IP header */
1047 #if ! defined(__alpha__) || USE_IP_COPYSUM /* Avoid misaligned on Alpha */
1048 eth_copy_and_sum(skb
, bus_to_virt(desc
->addr
),
1050 skb_put(skb
, pkt_len
);
1052 memcpy(skb_put(skb
,pkt_len
), bus_to_virt(desc
->addr
), pkt_len
);
1055 skb_put(skb
= np
->rx_skbuff
[entry
], pkt_len
);
1056 np
->rx_skbuff
[entry
] = NULL
;
1058 skb
->protocol
= eth_type_trans(skb
, dev
);
1059 np
->stats
.rx_bytes
+=skb
->len
;
1061 dev
->last_rx
= jiffies
;
1062 np
->stats
.rx_packets
++;
1064 entry
= (++np
->cur_rx
) % RX_RING_SIZE
;
1065 np
->rx_head_desc
= &np
->rx_ring
[entry
];
1068 /* Refill the Rx ring buffers. */
1069 for (; np
->cur_rx
- np
->dirty_rx
> 0; np
->dirty_rx
++) {
1070 struct sk_buff
*skb
;
1071 entry
= np
->dirty_rx
% RX_RING_SIZE
;
1072 if (np
->rx_skbuff
[entry
] == NULL
) {
1073 skb
= dev_alloc_skb(np
->rx_buf_sz
);
1074 np
->rx_skbuff
[entry
] = skb
;
1076 break; /* Better luck next round. */
1077 skb
->dev
= dev
; /* Mark as being used by this device. */
1078 np
->rx_ring
[entry
].addr
= virt_to_bus(skb
->tail
);
1080 np
->rx_ring
[entry
].rx_status
= 0;
1081 np
->rx_ring
[entry
].rx_length
= DescOwn
;
1084 /* Pre-emptively restart Rx engine. */
1085 writew(CmdRxDemand
| np
->chip_cmd
, dev
->base_addr
+ ChipCmd
);
1089 static void netdev_error(struct net_device
*dev
, int intr_status
)
1091 struct netdev_private
*np
= (struct netdev_private
*)dev
->priv
;
1092 long ioaddr
= dev
->base_addr
;
1094 if (intr_status
& (IntrMIIChange
| IntrLinkChange
)) {
1095 if (readb(ioaddr
+ MIIStatus
) & 0x02)
1096 /* Link failed, restart autonegotiation. */
1097 mdio_write(dev
, np
->phys
[0], 0, 0x3300);
1101 printk(KERN_ERR
"%s: MII status changed: Autonegotiation "
1102 "advertising %4.4x partner %4.4x.\n", dev
->name
,
1103 mdio_read(dev
, np
->phys
[0], 4),
1104 mdio_read(dev
, np
->phys
[0], 5));
1106 if (intr_status
& IntrStatsMax
) {
1107 np
->stats
.rx_crc_errors
+= readw(ioaddr
+ RxCRCErrs
);
1108 np
->stats
.rx_missed_errors
+= readw(ioaddr
+ RxMissed
);
1109 writel(0, RxMissed
);
1111 if (intr_status
& IntrTxAbort
) {
1112 /* Stats counted in Tx-done handler, just restart Tx. */
1113 writew(CmdTxDemand
| np
->chip_cmd
, dev
->base_addr
+ ChipCmd
);
1115 if (intr_status
& IntrTxUnderrun
) {
1116 if (np
->tx_thresh
< 0xE0)
1117 writeb(np
->tx_thresh
+= 0x20, ioaddr
+ TxConfig
);
1119 printk(KERN_INFO
"%s: Transmitter underrun, increasing Tx "
1120 "threshold setting to %2.2x.\n", dev
->name
, np
->tx_thresh
);
1122 if ((intr_status
& ~(IntrLinkChange
|IntrStatsMax
|IntrTxAbort
)) && debug
) {
1123 printk(KERN_ERR
"%s: Something Wicked happened! %4.4x.\n",
1124 dev
->name
, intr_status
);
1125 /* Recovery for other fault sources not known. */
1126 writew(CmdTxDemand
| np
->chip_cmd
, dev
->base_addr
+ ChipCmd
);
1130 static struct enet_statistics
*get_stats(struct net_device
*dev
)
1132 struct netdev_private
*np
= (struct netdev_private
*)dev
->priv
;
1133 long ioaddr
= dev
->base_addr
;
1135 /* Nominally we should lock this segment of code for SMP, although
1136 the vulnerability window is very small and statistics are
1138 np
->stats
.rx_crc_errors
+= readw(ioaddr
+ RxCRCErrs
);
1139 np
->stats
.rx_missed_errors
+= readw(ioaddr
+ RxMissed
);
1140 writel(0, RxMissed
);
1145 /* The big-endian AUTODIN II ethernet CRC calculation.
1146 N.B. Do not use for bulk data, use a table-based routine instead.
1147 This is common code and should be moved to net/core/crc.c */
1148 static unsigned const ethernet_polynomial
= 0x04c11db7U
;
1149 static inline u32
ether_crc(int length
, unsigned char *data
)
1153 while(--length
>= 0) {
1154 unsigned char current_octet
= *data
++;
1156 for (bit
= 0; bit
< 8; bit
++, current_octet
>>= 1) {
1158 ((crc
< 0) ^ (current_octet
& 1) ? ethernet_polynomial
: 0);
1164 static void set_rx_mode(struct net_device
*dev
)
1166 struct netdev_private
*np
= (struct netdev_private
*)dev
->priv
;
1167 long ioaddr
= dev
->base_addr
;
1168 u32 mc_filter
[2]; /* Multicast hash filter */
1169 u8 rx_mode
; /* Note: 0x02=accept runt, 0x01=accept errs */
1171 if (dev
->flags
& IFF_PROMISC
) { /* Set promiscuous. */
1172 /* Unconditionally log net taps. */
1173 printk(KERN_NOTICE
"%s: Promiscuous mode enabled.\n", dev
->name
);
1175 } else if ((dev
->mc_count
> multicast_filter_limit
)
1176 || (dev
->flags
& IFF_ALLMULTI
)) {
1177 /* Too many to match, or accept all multicasts. */
1180 struct dev_mc_list
*mclist
;
1182 memset(mc_filter
, 0, sizeof(mc_filter
));
1183 for (i
= 0, mclist
= dev
->mc_list
; mclist
&& i
< dev
->mc_count
;
1184 i
++, mclist
= mclist
->next
) {
1185 set_bit(ether_crc(ETH_ALEN
, mclist
->dmi_addr
) >> 26,
1188 writel(mc_filter
[0], ioaddr
+ MulticastFilter0
);
1189 writel(mc_filter
[1], ioaddr
+ MulticastFilter1
);
1192 writeb(np
->rx_thresh
| rx_mode
, ioaddr
+ RxConfig
);
1195 static int mii_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
1197 u16
*data
= (u16
*)&rq
->ifr_data
;
1200 case SIOCDEVPRIVATE
: /* Get the address of the PHY in use. */
1201 data
[0] = ((struct netdev_private
*)dev
->priv
)->phys
[0] & 0x1f;
1203 case SIOCDEVPRIVATE
+1: /* Read the specified MII register. */
1204 data
[3] = mdio_read(dev
, data
[0] & 0x1f, data
[1] & 0x1f);
1206 case SIOCDEVPRIVATE
+2: /* Write the specified MII register */
1209 mdio_write(dev
, data
[0] & 0x1f, data
[1] & 0x1f, data
[2]);
1216 static int netdev_close(struct net_device
*dev
)
1218 long ioaddr
= dev
->base_addr
;
1219 struct netdev_private
*np
= (struct netdev_private
*)dev
->priv
;
1226 printk(KERN_DEBUG
"%s: Shutting down ethercard, status was %4.4x.\n",
1227 dev
->name
, readw(ioaddr
+ ChipCmd
));
1229 /* Disable interrupts by clearing the interrupt mask. */
1230 writew(0x0000, ioaddr
+ IntrEnable
);
1232 /* Stop the chip's Tx and Rx processes. */
1233 writew(CmdStop
, ioaddr
+ ChipCmd
);
1235 del_timer(&np
->timer
);
1237 free_irq(dev
->irq
, dev
);
1239 /* Free all the skbuffs in the Rx queue. */
1240 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
1241 np
->rx_ring
[i
].rx_length
= 0;
1242 np
->rx_ring
[i
].addr
= 0xBADF00D0; /* An invalid address. */
1243 if (np
->rx_skbuff
[i
]) {
1244 #if LINUX_VERSION_CODE < 0x20100
1245 np
->rx_skbuff
[i
]->free
= 1;
1247 dev_free_skb(np
->rx_skbuff
[i
]);
1249 np
->rx_skbuff
[i
] = 0;
1251 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
1252 if (np
->tx_skbuff
[i
])
1253 dev_free_skb(np
->tx_skbuff
[i
]);
1254 np
->tx_skbuff
[i
] = 0;
1264 int init_module(void)
1266 if (debug
) /* Emit version even if no cards detected. */
1267 printk(KERN_INFO
"%s" KERN_INFO
"%s", versionA
, versionB
);
1269 register_driver(ðerdev_ops
);
1272 return pci_etherdev_probe(NULL
, pci_tbl
);
1276 void cleanup_module(void)
1280 unregister_driver(ðerdev_ops
);
1283 /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
1284 while (root_net_dev
) {
1285 struct netdev_private
*np
=
1286 (struct netdev_private
*)(root_net_dev
->priv
);
1287 unregister_netdev(root_net_dev
);
1289 release_region(root_net_dev
->base_addr
, pci_tbl
[np
->chip_id
].io_size
);
1291 iounmap((char *)(root_net_dev
->base_addr
));
1293 kfree(root_net_dev
);
1294 root_net_dev
= np
->next_module
;
1296 kfree(np
); /* Assumption: no struct realignment. */
1305 * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c via-rhine.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
1306 * SMP-compile-command: "gcc -D__SMP__ -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c via-rhine.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"