1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
3 Written 1999-2000 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
19 [link no longer provides useful info -jgarzik]
20 Archives of the mailing list are still available at
21 http://www.beowulf.org/pipermail/netdrivers/
25 #define DRV_NAME "sundance"
26 #define DRV_VERSION "1.2"
27 #define DRV_RELDATE "11-Sep-2006"
30 /* The user-configurable values.
31 These may be modified when a driver module is loaded.*/
32 static int debug
= 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
33 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
34 Typical is a 64 element hash table based on the Ethernet CRC. */
35 static const int multicast_filter_limit
= 32;
37 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
38 Setting to > 1518 effectively disables this feature.
39 This chip can receive into offset buffers, so the Alpha does not
41 static int rx_copybreak
;
42 static int flowctrl
=1;
44 /* media[] specifies the media type the NIC operates at.
45 autosense Autosensing active media.
46 10mbps_hd 10Mbps half duplex.
47 10mbps_fd 10Mbps full duplex.
48 100mbps_hd 100Mbps half duplex.
49 100mbps_fd 100Mbps full duplex.
50 0 Autosensing active media.
53 3 100Mbps half duplex.
54 4 100Mbps full duplex.
57 static char *media
[MAX_UNITS
];
60 /* Operational parameters that are set at compile time. */
62 /* Keep the ring sizes a power of two for compile efficiency.
63 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
64 Making the Tx ring too large decreases the effectiveness of channel
65 bonding and packet priority, and more than 128 requires modifying the
67 Large receive rings merely waste memory. */
68 #define TX_RING_SIZE 32
69 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
70 #define RX_RING_SIZE 64
72 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
73 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
75 /* Operational parameters that usually are not changed. */
76 /* Time in jiffies before concluding the transmitter is hung. */
77 #define TX_TIMEOUT (4*HZ)
78 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
80 /* Include files, designed to support most kernel versions 2.0.0 and later. */
81 #include <linux/module.h>
82 #include <linux/kernel.h>
83 #include <linux/string.h>
84 #include <linux/timer.h>
85 #include <linux/errno.h>
86 #include <linux/ioport.h>
87 #include <linux/interrupt.h>
88 #include <linux/pci.h>
89 #include <linux/netdevice.h>
90 #include <linux/etherdevice.h>
91 #include <linux/skbuff.h>
92 #include <linux/init.h>
93 #include <linux/bitops.h>
94 #include <asm/uaccess.h>
95 #include <asm/processor.h> /* Processor type for cache alignment. */
97 #include <linux/delay.h>
98 #include <linux/spinlock.h>
99 #include <linux/dma-mapping.h>
100 #include <linux/crc32.h>
101 #include <linux/ethtool.h>
102 #include <linux/mii.h>
104 /* These identify the driver base version and may not be removed. */
105 static const char version
[] __devinitconst
=
106 KERN_INFO DRV_NAME
".c:v" DRV_VERSION
" " DRV_RELDATE
107 " Written by Donald Becker\n";
109 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
110 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
111 MODULE_LICENSE("GPL");
113 module_param(debug
, int, 0);
114 module_param(rx_copybreak
, int, 0);
115 module_param_array(media
, charp
, NULL
, 0);
116 module_param(flowctrl
, int, 0);
117 MODULE_PARM_DESC(debug
, "Sundance Alta debug level (0-5)");
118 MODULE_PARM_DESC(rx_copybreak
, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
119 MODULE_PARM_DESC(flowctrl
, "Sundance Alta flow control [0|1]");
124 I. Board Compatibility
126 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
128 II. Board-specific settings
130 III. Driver operation
134 This driver uses two statically allocated fixed-size descriptor lists
135 formed into rings by a branch from the final descriptor to the beginning of
136 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
137 Some chips explicitly use only 2^N sized rings, while others use a
138 'next descriptor' pointer that the driver forms into rings.
140 IIIb/c. Transmit/Receive Structure
142 This driver uses a zero-copy receive and transmit scheme.
143 The driver allocates full frame size skbuffs for the Rx ring buffers at
144 open() time and passes the skb->data field to the chip as receive data
145 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
146 a fresh skbuff is allocated and the frame is copied to the new skbuff.
147 When the incoming frame is larger, the skbuff is passed directly up the
148 protocol stack. Buffers consumed this way are replaced by newly allocated
149 skbuffs in a later phase of receives.
151 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
152 using a full-sized skbuff for small frames vs. the copying costs of larger
153 frames. New boards are typically used in generously configured machines
154 and the underfilled buffers have negligible impact compared to the benefit of
155 a single allocation size, so the default value of zero results in never
156 copying packets. When copying is done, the cost is usually mitigated by using
157 a combined copy/checksum routine. Copying also preloads the cache, which is
158 most useful with small frames.
160 A subtle aspect of the operation is that the IP header at offset 14 in an
161 ethernet frame isn't longword aligned for further processing.
162 Unaligned buffers are permitted by the Sundance hardware, so
163 frames are received into the skbuff at an offset of "+2", 16-byte aligning
166 IIId. Synchronization
168 The driver runs as two independent, single-threaded flows of control. One
169 is the send-packet routine, which enforces single-threaded use by the
170 dev->tbusy flag. The other thread is the interrupt handler, which is single
171 threaded by the hardware and interrupt handling software.
173 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
174 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
175 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
176 the 'lp->tx_full' flag.
178 The interrupt handler has exclusive control over the Rx ring and records stats
179 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
180 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
181 clears both the tx_full and tbusy flags.
187 The Sundance ST201 datasheet, preliminary version.
188 The Kendin KS8723 datasheet, preliminary version.
189 The ICplus IP100 datasheet, preliminary version.
190 http://www.scyld.com/expert/100mbps.html
191 http://www.scyld.com/expert/NWay.html
197 /* Work-around for Kendin chip bugs. */
198 #ifndef CONFIG_SUNDANCE_MMIO
202 static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl
) = {
203 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
204 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
205 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
206 { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
207 { 0x1186, 0x1002, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 4 },
208 { 0x13F0, 0x0201, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 5 },
209 { 0x13F0, 0x0200, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 6 },
212 MODULE_DEVICE_TABLE(pci
, sundance_pci_tbl
);
221 static const struct pci_id_info pci_id_tbl
[] __devinitdata
= {
222 {"D-Link DFE-550TX FAST Ethernet Adapter"},
223 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
224 {"D-Link DFE-580TX 4 port Server Adapter"},
225 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
226 {"D-Link DL10050-based FAST Ethernet Adapter"},
227 {"Sundance Technology Alta"},
228 {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
229 { } /* terminate list. */
232 /* This driver was written to use PCI memory space, however x86-oriented
233 hardware often uses I/O space accesses. */
235 /* Offsets to the device registers.
236 Unlike software-only systems, device drivers interact with complex hardware.
237 It's not useful to define symbolic names for every register bit in the
238 device. The name can only partially document the semantics and make
239 the driver longer and more difficult to read.
240 In general, only the important configuration values or bits changed
241 multiple times should be defined symbolically.
246 TxDMABurstThresh
= 0x08,
247 TxDMAUrgentThresh
= 0x09,
248 TxDMAPollPeriod
= 0x0a,
253 RxDMABurstThresh
= 0x14,
254 RxDMAUrgentThresh
= 0x15,
255 RxDMAPollPeriod
= 0x16,
274 MulticastFilter0
= 0x60,
275 MulticastFilter1
= 0x64,
282 StatsCarrierError
= 0x74,
283 StatsLateColl
= 0x75,
284 StatsMultiColl
= 0x76,
288 StatsTxXSDefer
= 0x7a,
294 /* Aliased and bogus values! */
298 #define ASIC_HI_WORD(x) ((x) + 2)
300 enum ASICCtrl_HiWord_bit
{
301 GlobalReset
= 0x0001,
306 NetworkReset
= 0x0020,
311 /* Bits in the interrupt status/mask registers. */
312 enum intr_status_bits
{
313 IntrSummary
=0x0001, IntrPCIErr
=0x0002, IntrMACCtrl
=0x0008,
314 IntrTxDone
=0x0004, IntrRxDone
=0x0010, IntrRxStart
=0x0020,
316 StatsMax
=0x0080, LinkChange
=0x0100,
317 IntrTxDMADone
=0x0200, IntrRxDMADone
=0x0400,
320 /* Bits in the RxMode register. */
322 AcceptAllIPMulti
=0x20, AcceptMultiHash
=0x10, AcceptAll
=0x08,
323 AcceptBroadcast
=0x04, AcceptMulticast
=0x02, AcceptMyPhys
=0x01,
325 /* Bits in MACCtrl. */
326 enum mac_ctrl0_bits
{
327 EnbFullDuplex
=0x20, EnbRcvLargeFrame
=0x40,
328 EnbFlowCtrl
=0x100, EnbPassRxCRC
=0x200,
330 enum mac_ctrl1_bits
{
331 StatsEnable
=0x0020, StatsDisable
=0x0040, StatsEnabled
=0x0080,
332 TxEnable
=0x0100, TxDisable
=0x0200, TxEnabled
=0x0400,
333 RxEnable
=0x0800, RxDisable
=0x1000, RxEnabled
=0x2000,
336 /* The Rx and Tx buffer descriptors. */
337 /* Note that using only 32 bit fields simplifies conversion to big-endian
342 struct desc_frag
{ __le32 addr
, length
; } frag
[1];
345 /* Bits in netdev_desc.status */
346 enum desc_status_bits
{
348 DescEndPacket
=0x4000,
352 DescIntrOnDMADone
=0x80000000,
353 DisableAlign
= 0x00000001,
356 #define PRIV_ALIGN 15 /* Required alignment mask */
357 /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
358 within the structure. */
360 struct netdev_private
{
361 /* Descriptor rings first for alignment. */
362 struct netdev_desc
*rx_ring
;
363 struct netdev_desc
*tx_ring
;
364 struct sk_buff
* rx_skbuff
[RX_RING_SIZE
];
365 struct sk_buff
* tx_skbuff
[TX_RING_SIZE
];
366 dma_addr_t tx_ring_dma
;
367 dma_addr_t rx_ring_dma
;
368 struct timer_list timer
; /* Media monitoring timer. */
369 /* ethtool extra stats */
371 u64 tx_multiple_collisions
;
372 u64 tx_single_collisions
;
373 u64 tx_late_collisions
;
375 u64 tx_deferred_excessive
;
382 /* Frequently used values: keep some adjacent for cache effect. */
386 unsigned int cur_rx
, dirty_rx
; /* Producer/consumer ring indices */
387 unsigned int rx_buf_sz
; /* Based on MTU+slack. */
388 struct netdev_desc
*last_tx
; /* Last Tx descriptor used. */
389 unsigned int cur_tx
, dirty_tx
;
390 /* These values are keep track of the transceiver/media in use. */
391 unsigned int flowctrl
:1;
392 unsigned int default_port
:4; /* Last dev->if_port value. */
393 unsigned int an_enable
:1;
395 struct tasklet_struct rx_tasklet
;
396 struct tasklet_struct tx_tasklet
;
399 /* Multicast and receive mode. */
400 spinlock_t mcastlock
; /* SMP lock multicast updates. */
402 /* MII transceiver section. */
403 struct mii_if_info mii_if
;
404 int mii_preamble_required
;
405 unsigned char phys
[MII_CNT
]; /* MII device addresses, only first one used. */
406 struct pci_dev
*pci_dev
;
411 /* The station address location in the EEPROM. */
412 #define EEPROM_SA_OFFSET 0x10
413 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
414 IntrDrvRqst | IntrTxDone | StatsMax | \
417 static int change_mtu(struct net_device
*dev
, int new_mtu
);
418 static int eeprom_read(void __iomem
*ioaddr
, int location
);
419 static int mdio_read(struct net_device
*dev
, int phy_id
, int location
);
420 static void mdio_write(struct net_device
*dev
, int phy_id
, int location
, int value
);
421 static int mdio_wait_link(struct net_device
*dev
, int wait
);
422 static int netdev_open(struct net_device
*dev
);
423 static void check_duplex(struct net_device
*dev
);
424 static void netdev_timer(unsigned long data
);
425 static void tx_timeout(struct net_device
*dev
);
426 static void init_ring(struct net_device
*dev
);
427 static netdev_tx_t
start_tx(struct sk_buff
*skb
, struct net_device
*dev
);
428 static int reset_tx (struct net_device
*dev
);
429 static irqreturn_t
intr_handler(int irq
, void *dev_instance
);
430 static void rx_poll(unsigned long data
);
431 static void tx_poll(unsigned long data
);
432 static void refill_rx (struct net_device
*dev
);
433 static void netdev_error(struct net_device
*dev
, int intr_status
);
434 static void netdev_error(struct net_device
*dev
, int intr_status
);
435 static void set_rx_mode(struct net_device
*dev
);
436 static int __set_mac_addr(struct net_device
*dev
);
437 static int sundance_set_mac_addr(struct net_device
*dev
, void *data
);
438 static struct net_device_stats
*get_stats(struct net_device
*dev
);
439 static int netdev_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
440 static int netdev_close(struct net_device
*dev
);
441 static const struct ethtool_ops ethtool_ops
;
443 static void sundance_reset(struct net_device
*dev
, unsigned long reset_cmd
)
445 struct netdev_private
*np
= netdev_priv(dev
);
446 void __iomem
*ioaddr
= np
->base
+ ASICCtrl
;
449 /* ST201 documentation states ASICCtrl is a 32bit register */
450 iowrite32 (reset_cmd
| ioread32 (ioaddr
), ioaddr
);
451 /* ST201 documentation states reset can take up to 1 ms */
453 while (ioread32 (ioaddr
) & (ResetBusy
<< 16)) {
454 if (--countdown
== 0) {
455 printk(KERN_WARNING
"%s : reset not completed !!\n", dev
->name
);
462 static const struct net_device_ops netdev_ops
= {
463 .ndo_open
= netdev_open
,
464 .ndo_stop
= netdev_close
,
465 .ndo_start_xmit
= start_tx
,
466 .ndo_get_stats
= get_stats
,
467 .ndo_set_rx_mode
= set_rx_mode
,
468 .ndo_do_ioctl
= netdev_ioctl
,
469 .ndo_tx_timeout
= tx_timeout
,
470 .ndo_change_mtu
= change_mtu
,
471 .ndo_set_mac_address
= sundance_set_mac_addr
,
472 .ndo_validate_addr
= eth_validate_addr
,
475 static int __devinit
sundance_probe1 (struct pci_dev
*pdev
,
476 const struct pci_device_id
*ent
)
478 struct net_device
*dev
;
479 struct netdev_private
*np
;
481 int chip_idx
= ent
->driver_data
;
484 void __iomem
*ioaddr
;
493 int phy
, phy_end
, phy_idx
= 0;
495 /* when built into the kernel, we only print version if device is found */
497 static int printed_version
;
498 if (!printed_version
++)
502 if (pci_enable_device(pdev
))
504 pci_set_master(pdev
);
508 dev
= alloc_etherdev(sizeof(*np
));
511 SET_NETDEV_DEV(dev
, &pdev
->dev
);
513 if (pci_request_regions(pdev
, DRV_NAME
))
516 ioaddr
= pci_iomap(pdev
, bar
, netdev_io_size
);
520 for (i
= 0; i
< 3; i
++)
521 ((__le16
*)dev
->dev_addr
)[i
] =
522 cpu_to_le16(eeprom_read(ioaddr
, i
+ EEPROM_SA_OFFSET
));
523 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
525 dev
->base_addr
= (unsigned long)ioaddr
;
528 np
= netdev_priv(dev
);
531 np
->chip_id
= chip_idx
;
532 np
->msg_enable
= (1 << debug
) - 1;
533 spin_lock_init(&np
->lock
);
534 spin_lock_init(&np
->statlock
);
535 tasklet_init(&np
->rx_tasklet
, rx_poll
, (unsigned long)dev
);
536 tasklet_init(&np
->tx_tasklet
, tx_poll
, (unsigned long)dev
);
538 ring_space
= dma_alloc_coherent(&pdev
->dev
, TX_TOTAL_SIZE
,
539 &ring_dma
, GFP_KERNEL
);
541 goto err_out_cleardev
;
542 np
->tx_ring
= (struct netdev_desc
*)ring_space
;
543 np
->tx_ring_dma
= ring_dma
;
545 ring_space
= dma_alloc_coherent(&pdev
->dev
, RX_TOTAL_SIZE
,
546 &ring_dma
, GFP_KERNEL
);
548 goto err_out_unmap_tx
;
549 np
->rx_ring
= (struct netdev_desc
*)ring_space
;
550 np
->rx_ring_dma
= ring_dma
;
552 np
->mii_if
.dev
= dev
;
553 np
->mii_if
.mdio_read
= mdio_read
;
554 np
->mii_if
.mdio_write
= mdio_write
;
555 np
->mii_if
.phy_id_mask
= 0x1f;
556 np
->mii_if
.reg_num_mask
= 0x1f;
558 /* The chip-specific entries in the device structure. */
559 dev
->netdev_ops
= &netdev_ops
;
560 SET_ETHTOOL_OPS(dev
, ðtool_ops
);
561 dev
->watchdog_timeo
= TX_TIMEOUT
;
563 pci_set_drvdata(pdev
, dev
);
565 i
= register_netdev(dev
);
567 goto err_out_unmap_rx
;
569 printk(KERN_INFO
"%s: %s at %p, %pM, IRQ %d.\n",
570 dev
->name
, pci_id_tbl
[chip_idx
].name
, ioaddr
,
573 np
->phys
[0] = 1; /* Default setting */
574 np
->mii_preamble_required
++;
577 * It seems some phys doesn't deal well with address 0 being accessed
580 if (sundance_pci_tbl
[np
->chip_id
].device
== 0x0200) {
585 phy_end
= 32; /* wraps to zero, due to 'phy & 0x1f' */
587 for (; phy
<= phy_end
&& phy_idx
< MII_CNT
; phy
++) {
588 int phyx
= phy
& 0x1f;
589 int mii_status
= mdio_read(dev
, phyx
, MII_BMSR
);
590 if (mii_status
!= 0xffff && mii_status
!= 0x0000) {
591 np
->phys
[phy_idx
++] = phyx
;
592 np
->mii_if
.advertising
= mdio_read(dev
, phyx
, MII_ADVERTISE
);
593 if ((mii_status
& 0x0040) == 0)
594 np
->mii_preamble_required
++;
595 printk(KERN_INFO
"%s: MII PHY found at address %d, status "
596 "0x%4.4x advertising %4.4x.\n",
597 dev
->name
, phyx
, mii_status
, np
->mii_if
.advertising
);
600 np
->mii_preamble_required
--;
603 printk(KERN_INFO
"%s: No MII transceiver found, aborting. ASIC status %x\n",
604 dev
->name
, ioread32(ioaddr
+ ASICCtrl
));
605 goto err_out_unregister
;
608 np
->mii_if
.phy_id
= np
->phys
[0];
610 /* Parse override configuration */
612 if (card_idx
< MAX_UNITS
) {
613 if (media
[card_idx
] != NULL
) {
615 if (strcmp (media
[card_idx
], "100mbps_fd") == 0 ||
616 strcmp (media
[card_idx
], "4") == 0) {
618 np
->mii_if
.full_duplex
= 1;
619 } else if (strcmp (media
[card_idx
], "100mbps_hd") == 0 ||
620 strcmp (media
[card_idx
], "3") == 0) {
622 np
->mii_if
.full_duplex
= 0;
623 } else if (strcmp (media
[card_idx
], "10mbps_fd") == 0 ||
624 strcmp (media
[card_idx
], "2") == 0) {
626 np
->mii_if
.full_duplex
= 1;
627 } else if (strcmp (media
[card_idx
], "10mbps_hd") == 0 ||
628 strcmp (media
[card_idx
], "1") == 0) {
630 np
->mii_if
.full_duplex
= 0;
640 if (ioread32 (ioaddr
+ ASICCtrl
) & 0x80) {
641 /* Default 100Mbps Full */
644 np
->mii_if
.full_duplex
= 1;
649 mdio_write (dev
, np
->phys
[0], MII_BMCR
, BMCR_RESET
);
651 /* If flow control enabled, we need to advertise it.*/
653 mdio_write (dev
, np
->phys
[0], MII_ADVERTISE
, np
->mii_if
.advertising
| 0x0400);
654 mdio_write (dev
, np
->phys
[0], MII_BMCR
, BMCR_ANENABLE
|BMCR_ANRESTART
);
655 /* Force media type */
656 if (!np
->an_enable
) {
658 mii_ctl
|= (np
->speed
== 100) ? BMCR_SPEED100
: 0;
659 mii_ctl
|= (np
->mii_if
.full_duplex
) ? BMCR_FULLDPLX
: 0;
660 mdio_write (dev
, np
->phys
[0], MII_BMCR
, mii_ctl
);
661 printk (KERN_INFO
"Override speed=%d, %s duplex\n",
662 np
->speed
, np
->mii_if
.full_duplex
? "Full" : "Half");
666 /* Perhaps move the reset here? */
667 /* Reset the chip to erase previous misconfiguration. */
668 if (netif_msg_hw(np
))
669 printk("ASIC Control is %x.\n", ioread32(ioaddr
+ ASICCtrl
));
670 sundance_reset(dev
, 0x00ff << 16);
671 if (netif_msg_hw(np
))
672 printk("ASIC Control is now %x.\n", ioread32(ioaddr
+ ASICCtrl
));
678 unregister_netdev(dev
);
680 dma_free_coherent(&pdev
->dev
, RX_TOTAL_SIZE
,
681 np
->rx_ring
, np
->rx_ring_dma
);
683 dma_free_coherent(&pdev
->dev
, TX_TOTAL_SIZE
,
684 np
->tx_ring
, np
->tx_ring_dma
);
686 pci_set_drvdata(pdev
, NULL
);
687 pci_iounmap(pdev
, ioaddr
);
689 pci_release_regions(pdev
);
695 static int change_mtu(struct net_device
*dev
, int new_mtu
)
697 if ((new_mtu
< 68) || (new_mtu
> 8191)) /* Set by RxDMAFrameLen */
699 if (netif_running(dev
))
705 #define eeprom_delay(ee_addr) ioread32(ee_addr)
706 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
707 static int __devinit
eeprom_read(void __iomem
*ioaddr
, int location
)
709 int boguscnt
= 10000; /* Typical 1900 ticks. */
710 iowrite16(0x0200 | (location
& 0xff), ioaddr
+ EECtrl
);
712 eeprom_delay(ioaddr
+ EECtrl
);
713 if (! (ioread16(ioaddr
+ EECtrl
) & 0x8000)) {
714 return ioread16(ioaddr
+ EEData
);
716 } while (--boguscnt
> 0);
720 /* MII transceiver control section.
721 Read and write the MII registers using software-generated serial
722 MDIO protocol. See the MII specifications or DP83840A data sheet
725 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
726 met by back-to-back 33Mhz PCI cycles. */
727 #define mdio_delay() ioread8(mdio_addr)
730 MDIO_ShiftClk
=0x0001, MDIO_Data
=0x0002, MDIO_EnbOutput
=0x0004,
732 #define MDIO_EnbIn (0)
733 #define MDIO_WRITE0 (MDIO_EnbOutput)
734 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
736 /* Generate the preamble required for initial synchronization and
737 a few older transceivers. */
738 static void mdio_sync(void __iomem
*mdio_addr
)
742 /* Establish sync by sending at least 32 logic ones. */
743 while (--bits
>= 0) {
744 iowrite8(MDIO_WRITE1
, mdio_addr
);
746 iowrite8(MDIO_WRITE1
| MDIO_ShiftClk
, mdio_addr
);
751 static int mdio_read(struct net_device
*dev
, int phy_id
, int location
)
753 struct netdev_private
*np
= netdev_priv(dev
);
754 void __iomem
*mdio_addr
= np
->base
+ MIICtrl
;
755 int mii_cmd
= (0xf6 << 10) | (phy_id
<< 5) | location
;
758 if (np
->mii_preamble_required
)
759 mdio_sync(mdio_addr
);
761 /* Shift the read command bits out. */
762 for (i
= 15; i
>= 0; i
--) {
763 int dataval
= (mii_cmd
& (1 << i
)) ? MDIO_WRITE1
: MDIO_WRITE0
;
765 iowrite8(dataval
, mdio_addr
);
767 iowrite8(dataval
| MDIO_ShiftClk
, mdio_addr
);
770 /* Read the two transition, 16 data, and wire-idle bits. */
771 for (i
= 19; i
> 0; i
--) {
772 iowrite8(MDIO_EnbIn
, mdio_addr
);
774 retval
= (retval
<< 1) | ((ioread8(mdio_addr
) & MDIO_Data
) ? 1 : 0);
775 iowrite8(MDIO_EnbIn
| MDIO_ShiftClk
, mdio_addr
);
778 return (retval
>>1) & 0xffff;
781 static void mdio_write(struct net_device
*dev
, int phy_id
, int location
, int value
)
783 struct netdev_private
*np
= netdev_priv(dev
);
784 void __iomem
*mdio_addr
= np
->base
+ MIICtrl
;
785 int mii_cmd
= (0x5002 << 16) | (phy_id
<< 23) | (location
<<18) | value
;
788 if (np
->mii_preamble_required
)
789 mdio_sync(mdio_addr
);
791 /* Shift the command bits out. */
792 for (i
= 31; i
>= 0; i
--) {
793 int dataval
= (mii_cmd
& (1 << i
)) ? MDIO_WRITE1
: MDIO_WRITE0
;
795 iowrite8(dataval
, mdio_addr
);
797 iowrite8(dataval
| MDIO_ShiftClk
, mdio_addr
);
800 /* Clear out extra bits. */
801 for (i
= 2; i
> 0; i
--) {
802 iowrite8(MDIO_EnbIn
, mdio_addr
);
804 iowrite8(MDIO_EnbIn
| MDIO_ShiftClk
, mdio_addr
);
809 static int mdio_wait_link(struct net_device
*dev
, int wait
)
813 struct netdev_private
*np
;
815 np
= netdev_priv(dev
);
816 phy_id
= np
->phys
[0];
819 bmsr
= mdio_read(dev
, phy_id
, MII_BMSR
);
823 } while (--wait
> 0);
827 static int netdev_open(struct net_device
*dev
)
829 struct netdev_private
*np
= netdev_priv(dev
);
830 void __iomem
*ioaddr
= np
->base
;
834 /* Do we need to reset the chip??? */
836 i
= request_irq(dev
->irq
, intr_handler
, IRQF_SHARED
, dev
->name
, dev
);
840 if (netif_msg_ifup(np
))
841 printk(KERN_DEBUG
"%s: netdev_open() irq %d.\n",
842 dev
->name
, dev
->irq
);
845 iowrite32(np
->rx_ring_dma
, ioaddr
+ RxListPtr
);
846 /* The Tx list pointer is written as packets are queued. */
848 /* Initialize other registers. */
850 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
851 iowrite16(dev
->mtu
+ 18, ioaddr
+ MaxFrameSize
);
853 iowrite16(dev
->mtu
+ 14, ioaddr
+ MaxFrameSize
);
856 iowrite32(ioread32(ioaddr
+ ASICCtrl
) | 0x0C, ioaddr
+ ASICCtrl
);
858 /* Configure the PCI bus bursts and FIFO thresholds. */
860 if (dev
->if_port
== 0)
861 dev
->if_port
= np
->default_port
;
863 spin_lock_init(&np
->mcastlock
);
866 iowrite16(0, ioaddr
+ IntrEnable
);
867 iowrite16(0, ioaddr
+ DownCounter
);
868 /* Set the chip to poll every N*320nsec. */
869 iowrite8(100, ioaddr
+ RxDMAPollPeriod
);
870 iowrite8(127, ioaddr
+ TxDMAPollPeriod
);
871 /* Fix DFE-580TX packet drop issue */
872 if (np
->pci_dev
->revision
>= 0x14)
873 iowrite8(0x01, ioaddr
+ DebugCtrl1
);
874 netif_start_queue(dev
);
876 spin_lock_irqsave(&np
->lock
, flags
);
878 spin_unlock_irqrestore(&np
->lock
, flags
);
880 iowrite16 (StatsEnable
| RxEnable
| TxEnable
, ioaddr
+ MACCtrl1
);
882 if (netif_msg_ifup(np
))
883 printk(KERN_DEBUG
"%s: Done netdev_open(), status: Rx %x Tx %x "
884 "MAC Control %x, %4.4x %4.4x.\n",
885 dev
->name
, ioread32(ioaddr
+ RxStatus
), ioread8(ioaddr
+ TxStatus
),
886 ioread32(ioaddr
+ MACCtrl0
),
887 ioread16(ioaddr
+ MACCtrl1
), ioread16(ioaddr
+ MACCtrl0
));
889 /* Set the timer to check for link beat. */
890 init_timer(&np
->timer
);
891 np
->timer
.expires
= jiffies
+ 3*HZ
;
892 np
->timer
.data
= (unsigned long)dev
;
893 np
->timer
.function
= netdev_timer
; /* timer handler */
894 add_timer(&np
->timer
);
896 /* Enable interrupts by setting the interrupt mask. */
897 iowrite16(DEFAULT_INTR
, ioaddr
+ IntrEnable
);
902 static void check_duplex(struct net_device
*dev
)
904 struct netdev_private
*np
= netdev_priv(dev
);
905 void __iomem
*ioaddr
= np
->base
;
906 int mii_lpa
= mdio_read(dev
, np
->phys
[0], MII_LPA
);
907 int negotiated
= mii_lpa
& np
->mii_if
.advertising
;
911 if (!np
->an_enable
|| mii_lpa
== 0xffff) {
912 if (np
->mii_if
.full_duplex
)
913 iowrite16 (ioread16 (ioaddr
+ MACCtrl0
) | EnbFullDuplex
,
918 /* Autonegotiation */
919 duplex
= (negotiated
& 0x0100) || (negotiated
& 0x01C0) == 0x0040;
920 if (np
->mii_if
.full_duplex
!= duplex
) {
921 np
->mii_if
.full_duplex
= duplex
;
922 if (netif_msg_link(np
))
923 printk(KERN_INFO
"%s: Setting %s-duplex based on MII #%d "
924 "negotiated capability %4.4x.\n", dev
->name
,
925 duplex
? "full" : "half", np
->phys
[0], negotiated
);
926 iowrite16(ioread16(ioaddr
+ MACCtrl0
) | (duplex
? 0x20 : 0), ioaddr
+ MACCtrl0
);
930 static void netdev_timer(unsigned long data
)
932 struct net_device
*dev
= (struct net_device
*)data
;
933 struct netdev_private
*np
= netdev_priv(dev
);
934 void __iomem
*ioaddr
= np
->base
;
935 int next_tick
= 10*HZ
;
937 if (netif_msg_timer(np
)) {
938 printk(KERN_DEBUG
"%s: Media selection timer tick, intr status %4.4x, "
940 dev
->name
, ioread16(ioaddr
+ IntrEnable
),
941 ioread8(ioaddr
+ TxStatus
), ioread32(ioaddr
+ RxStatus
));
944 np
->timer
.expires
= jiffies
+ next_tick
;
945 add_timer(&np
->timer
);
948 static void tx_timeout(struct net_device
*dev
)
950 struct netdev_private
*np
= netdev_priv(dev
);
951 void __iomem
*ioaddr
= np
->base
;
954 netif_stop_queue(dev
);
955 tasklet_disable(&np
->tx_tasklet
);
956 iowrite16(0, ioaddr
+ IntrEnable
);
957 printk(KERN_WARNING
"%s: Transmit timed out, TxStatus %2.2x "
959 " resetting...\n", dev
->name
, ioread8(ioaddr
+ TxStatus
),
960 ioread8(ioaddr
+ TxFrameId
));
964 for (i
=0; i
<TX_RING_SIZE
; i
++) {
965 printk(KERN_DEBUG
"%02x %08llx %08x %08x(%02x) %08x %08x\n", i
,
966 (unsigned long long)(np
->tx_ring_dma
+ i
*sizeof(*np
->tx_ring
)),
967 le32_to_cpu(np
->tx_ring
[i
].next_desc
),
968 le32_to_cpu(np
->tx_ring
[i
].status
),
969 (le32_to_cpu(np
->tx_ring
[i
].status
) >> 2) & 0xff,
970 le32_to_cpu(np
->tx_ring
[i
].frag
[0].addr
),
971 le32_to_cpu(np
->tx_ring
[i
].frag
[0].length
));
973 printk(KERN_DEBUG
"TxListPtr=%08x netif_queue_stopped=%d\n",
974 ioread32(np
->base
+ TxListPtr
),
975 netif_queue_stopped(dev
));
976 printk(KERN_DEBUG
"cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
977 np
->cur_tx
, np
->cur_tx
% TX_RING_SIZE
,
978 np
->dirty_tx
, np
->dirty_tx
% TX_RING_SIZE
);
979 printk(KERN_DEBUG
"cur_rx=%d dirty_rx=%d\n", np
->cur_rx
, np
->dirty_rx
);
980 printk(KERN_DEBUG
"cur_task=%d\n", np
->cur_task
);
982 spin_lock_irqsave(&np
->lock
, flag
);
984 /* Stop and restart the chip's Tx processes . */
986 spin_unlock_irqrestore(&np
->lock
, flag
);
990 dev
->trans_start
= jiffies
; /* prevent tx timeout */
991 dev
->stats
.tx_errors
++;
992 if (np
->cur_tx
- np
->dirty_tx
< TX_QUEUE_LEN
- 4) {
993 netif_wake_queue(dev
);
995 iowrite16(DEFAULT_INTR
, ioaddr
+ IntrEnable
);
996 tasklet_enable(&np
->tx_tasklet
);
1000 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1001 static void init_ring(struct net_device
*dev
)
1003 struct netdev_private
*np
= netdev_priv(dev
);
1006 np
->cur_rx
= np
->cur_tx
= 0;
1007 np
->dirty_rx
= np
->dirty_tx
= 0;
1010 np
->rx_buf_sz
= (dev
->mtu
<= 1520 ? PKT_BUF_SZ
: dev
->mtu
+ 16);
1012 /* Initialize all Rx descriptors. */
1013 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
1014 np
->rx_ring
[i
].next_desc
= cpu_to_le32(np
->rx_ring_dma
+
1015 ((i
+1)%RX_RING_SIZE
)*sizeof(*np
->rx_ring
));
1016 np
->rx_ring
[i
].status
= 0;
1017 np
->rx_ring
[i
].frag
[0].length
= 0;
1018 np
->rx_skbuff
[i
] = NULL
;
1021 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1022 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
1023 struct sk_buff
*skb
=
1024 netdev_alloc_skb(dev
, np
->rx_buf_sz
+ 2);
1025 np
->rx_skbuff
[i
] = skb
;
1028 skb_reserve(skb
, 2); /* 16 byte align the IP header. */
1029 np
->rx_ring
[i
].frag
[0].addr
= cpu_to_le32(
1030 dma_map_single(&np
->pci_dev
->dev
, skb
->data
,
1031 np
->rx_buf_sz
, DMA_FROM_DEVICE
));
1032 if (dma_mapping_error(&np
->pci_dev
->dev
,
1033 np
->rx_ring
[i
].frag
[0].addr
)) {
1035 np
->rx_skbuff
[i
] = NULL
;
1038 np
->rx_ring
[i
].frag
[0].length
= cpu_to_le32(np
->rx_buf_sz
| LastFrag
);
1040 np
->dirty_rx
= (unsigned int)(i
- RX_RING_SIZE
);
1042 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
1043 np
->tx_skbuff
[i
] = NULL
;
1044 np
->tx_ring
[i
].status
= 0;
1048 static void tx_poll (unsigned long data
)
1050 struct net_device
*dev
= (struct net_device
*)data
;
1051 struct netdev_private
*np
= netdev_priv(dev
);
1052 unsigned head
= np
->cur_task
% TX_RING_SIZE
;
1053 struct netdev_desc
*txdesc
=
1054 &np
->tx_ring
[(np
->cur_tx
- 1) % TX_RING_SIZE
];
1056 /* Chain the next pointer */
1057 for (; np
->cur_tx
- np
->cur_task
> 0; np
->cur_task
++) {
1058 int entry
= np
->cur_task
% TX_RING_SIZE
;
1059 txdesc
= &np
->tx_ring
[entry
];
1061 np
->last_tx
->next_desc
= cpu_to_le32(np
->tx_ring_dma
+
1062 entry
*sizeof(struct netdev_desc
));
1064 np
->last_tx
= txdesc
;
1066 /* Indicate the latest descriptor of tx ring */
1067 txdesc
->status
|= cpu_to_le32(DescIntrOnTx
);
1069 if (ioread32 (np
->base
+ TxListPtr
) == 0)
1070 iowrite32 (np
->tx_ring_dma
+ head
* sizeof(struct netdev_desc
),
1071 np
->base
+ TxListPtr
);
1075 start_tx (struct sk_buff
*skb
, struct net_device
*dev
)
1077 struct netdev_private
*np
= netdev_priv(dev
);
1078 struct netdev_desc
*txdesc
;
1081 /* Calculate the next Tx descriptor entry. */
1082 entry
= np
->cur_tx
% TX_RING_SIZE
;
1083 np
->tx_skbuff
[entry
] = skb
;
1084 txdesc
= &np
->tx_ring
[entry
];
1086 txdesc
->next_desc
= 0;
1087 txdesc
->status
= cpu_to_le32 ((entry
<< 2) | DisableAlign
);
1088 txdesc
->frag
[0].addr
= cpu_to_le32(dma_map_single(&np
->pci_dev
->dev
,
1089 skb
->data
, skb
->len
, DMA_TO_DEVICE
));
1090 if (dma_mapping_error(&np
->pci_dev
->dev
,
1091 txdesc
->frag
[0].addr
))
1093 txdesc
->frag
[0].length
= cpu_to_le32 (skb
->len
| LastFrag
);
1095 /* Increment cur_tx before tasklet_schedule() */
1098 /* Schedule a tx_poll() task */
1099 tasklet_schedule(&np
->tx_tasklet
);
1101 /* On some architectures: explicitly flush cache lines here. */
1102 if (np
->cur_tx
- np
->dirty_tx
< TX_QUEUE_LEN
- 1 &&
1103 !netif_queue_stopped(dev
)) {
1106 netif_stop_queue (dev
);
1108 if (netif_msg_tx_queued(np
)) {
1110 "%s: Transmit frame #%d queued in slot %d.\n",
1111 dev
->name
, np
->cur_tx
, entry
);
1113 return NETDEV_TX_OK
;
1117 np
->tx_skbuff
[entry
] = NULL
;
1118 dev
->stats
.tx_dropped
++;
1119 return NETDEV_TX_OK
;
1122 /* Reset hardware tx and free all of tx buffers */
1124 reset_tx (struct net_device
*dev
)
1126 struct netdev_private
*np
= netdev_priv(dev
);
1127 void __iomem
*ioaddr
= np
->base
;
1128 struct sk_buff
*skb
;
1131 /* Reset tx logic, TxListPtr will be cleaned */
1132 iowrite16 (TxDisable
, ioaddr
+ MACCtrl1
);
1133 sundance_reset(dev
, (NetworkReset
|FIFOReset
|DMAReset
|TxReset
) << 16);
1135 /* free all tx skbuff */
1136 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
1137 np
->tx_ring
[i
].next_desc
= 0;
1139 skb
= np
->tx_skbuff
[i
];
1141 dma_unmap_single(&np
->pci_dev
->dev
,
1142 le32_to_cpu(np
->tx_ring
[i
].frag
[0].addr
),
1143 skb
->len
, DMA_TO_DEVICE
);
1144 dev_kfree_skb_any(skb
);
1145 np
->tx_skbuff
[i
] = NULL
;
1146 dev
->stats
.tx_dropped
++;
1149 np
->cur_tx
= np
->dirty_tx
= 0;
1153 iowrite8(127, ioaddr
+ TxDMAPollPeriod
);
1155 iowrite16 (StatsEnable
| RxEnable
| TxEnable
, ioaddr
+ MACCtrl1
);
1159 /* The interrupt handler cleans up after the Tx thread,
1160 and schedule a Rx thread work */
1161 static irqreturn_t
intr_handler(int irq
, void *dev_instance
)
1163 struct net_device
*dev
= (struct net_device
*)dev_instance
;
1164 struct netdev_private
*np
= netdev_priv(dev
);
1165 void __iomem
*ioaddr
= np
->base
;
1174 int intr_status
= ioread16(ioaddr
+ IntrStatus
);
1175 iowrite16(intr_status
, ioaddr
+ IntrStatus
);
1177 if (netif_msg_intr(np
))
1178 printk(KERN_DEBUG
"%s: Interrupt, status %4.4x.\n",
1179 dev
->name
, intr_status
);
1181 if (!(intr_status
& DEFAULT_INTR
))
1186 if (intr_status
& (IntrRxDMADone
)) {
1187 iowrite16(DEFAULT_INTR
& ~(IntrRxDone
|IntrRxDMADone
),
1188 ioaddr
+ IntrEnable
);
1190 np
->budget
= RX_BUDGET
;
1191 tasklet_schedule(&np
->rx_tasklet
);
1193 if (intr_status
& (IntrTxDone
| IntrDrvRqst
)) {
1194 tx_status
= ioread16 (ioaddr
+ TxStatus
);
1195 for (tx_cnt
=32; tx_status
& 0x80; --tx_cnt
) {
1196 if (netif_msg_tx_done(np
))
1198 ("%s: Transmit status is %2.2x.\n",
1199 dev
->name
, tx_status
);
1200 if (tx_status
& 0x1e) {
1201 if (netif_msg_tx_err(np
))
1202 printk("%s: Transmit error status %4.4x.\n",
1203 dev
->name
, tx_status
);
1204 dev
->stats
.tx_errors
++;
1205 if (tx_status
& 0x10)
1206 dev
->stats
.tx_fifo_errors
++;
1207 if (tx_status
& 0x08)
1208 dev
->stats
.collisions
++;
1209 if (tx_status
& 0x04)
1210 dev
->stats
.tx_fifo_errors
++;
1211 if (tx_status
& 0x02)
1212 dev
->stats
.tx_window_errors
++;
1215 ** This reset has been verified on
1216 ** DFE-580TX boards ! phdm@macqel.be.
1218 if (tx_status
& 0x10) { /* TxUnderrun */
1219 /* Restart Tx FIFO and transmitter */
1220 sundance_reset(dev
, (NetworkReset
|FIFOReset
|TxReset
) << 16);
1221 /* No need to reset the Tx pointer here */
1223 /* Restart the Tx. Need to make sure tx enabled */
1226 iowrite16(ioread16(ioaddr
+ MACCtrl1
) | TxEnable
, ioaddr
+ MACCtrl1
);
1227 if (ioread16(ioaddr
+ MACCtrl1
) & TxEnabled
)
1232 /* Yup, this is a documentation bug. It cost me *hours*. */
1233 iowrite16 (0, ioaddr
+ TxStatus
);
1235 iowrite32(5000, ioaddr
+ DownCounter
);
1238 tx_status
= ioread16 (ioaddr
+ TxStatus
);
1240 hw_frame_id
= (tx_status
>> 8) & 0xff;
1242 hw_frame_id
= ioread8(ioaddr
+ TxFrameId
);
1245 if (np
->pci_dev
->revision
>= 0x14) {
1246 spin_lock(&np
->lock
);
1247 for (; np
->cur_tx
- np
->dirty_tx
> 0; np
->dirty_tx
++) {
1248 int entry
= np
->dirty_tx
% TX_RING_SIZE
;
1249 struct sk_buff
*skb
;
1251 sw_frame_id
= (le32_to_cpu(
1252 np
->tx_ring
[entry
].status
) >> 2) & 0xff;
1253 if (sw_frame_id
== hw_frame_id
&&
1254 !(le32_to_cpu(np
->tx_ring
[entry
].status
)
1257 if (sw_frame_id
== (hw_frame_id
+ 1) %
1260 skb
= np
->tx_skbuff
[entry
];
1261 /* Free the original skb. */
1262 dma_unmap_single(&np
->pci_dev
->dev
,
1263 le32_to_cpu(np
->tx_ring
[entry
].frag
[0].addr
),
1264 skb
->len
, DMA_TO_DEVICE
);
1265 dev_kfree_skb_irq (np
->tx_skbuff
[entry
]);
1266 np
->tx_skbuff
[entry
] = NULL
;
1267 np
->tx_ring
[entry
].frag
[0].addr
= 0;
1268 np
->tx_ring
[entry
].frag
[0].length
= 0;
1270 spin_unlock(&np
->lock
);
1272 spin_lock(&np
->lock
);
1273 for (; np
->cur_tx
- np
->dirty_tx
> 0; np
->dirty_tx
++) {
1274 int entry
= np
->dirty_tx
% TX_RING_SIZE
;
1275 struct sk_buff
*skb
;
1276 if (!(le32_to_cpu(np
->tx_ring
[entry
].status
)
1279 skb
= np
->tx_skbuff
[entry
];
1280 /* Free the original skb. */
1281 dma_unmap_single(&np
->pci_dev
->dev
,
1282 le32_to_cpu(np
->tx_ring
[entry
].frag
[0].addr
),
1283 skb
->len
, DMA_TO_DEVICE
);
1284 dev_kfree_skb_irq (np
->tx_skbuff
[entry
]);
1285 np
->tx_skbuff
[entry
] = NULL
;
1286 np
->tx_ring
[entry
].frag
[0].addr
= 0;
1287 np
->tx_ring
[entry
].frag
[0].length
= 0;
1289 spin_unlock(&np
->lock
);
1292 if (netif_queue_stopped(dev
) &&
1293 np
->cur_tx
- np
->dirty_tx
< TX_QUEUE_LEN
- 4) {
1294 /* The ring is no longer full, clear busy flag. */
1295 netif_wake_queue (dev
);
1297 /* Abnormal error summary/uncommon events handlers. */
1298 if (intr_status
& (IntrPCIErr
| LinkChange
| StatsMax
))
1299 netdev_error(dev
, intr_status
);
1301 if (netif_msg_intr(np
))
1302 printk(KERN_DEBUG
"%s: exiting interrupt, status=%#4.4x.\n",
1303 dev
->name
, ioread16(ioaddr
+ IntrStatus
));
1304 return IRQ_RETVAL(handled
);
1307 static void rx_poll(unsigned long data
)
1309 struct net_device
*dev
= (struct net_device
*)data
;
1310 struct netdev_private
*np
= netdev_priv(dev
);
1311 int entry
= np
->cur_rx
% RX_RING_SIZE
;
1312 int boguscnt
= np
->budget
;
1313 void __iomem
*ioaddr
= np
->base
;
1316 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1318 struct netdev_desc
*desc
= &(np
->rx_ring
[entry
]);
1319 u32 frame_status
= le32_to_cpu(desc
->status
);
1322 if (--boguscnt
< 0) {
1325 if (!(frame_status
& DescOwn
))
1327 pkt_len
= frame_status
& 0x1fff; /* Chip omits the CRC. */
1328 if (netif_msg_rx_status(np
))
1329 printk(KERN_DEBUG
" netdev_rx() status was %8.8x.\n",
1331 if (frame_status
& 0x001f4000) {
1332 /* There was a error. */
1333 if (netif_msg_rx_err(np
))
1334 printk(KERN_DEBUG
" netdev_rx() Rx error was %8.8x.\n",
1336 dev
->stats
.rx_errors
++;
1337 if (frame_status
& 0x00100000)
1338 dev
->stats
.rx_length_errors
++;
1339 if (frame_status
& 0x00010000)
1340 dev
->stats
.rx_fifo_errors
++;
1341 if (frame_status
& 0x00060000)
1342 dev
->stats
.rx_frame_errors
++;
1343 if (frame_status
& 0x00080000)
1344 dev
->stats
.rx_crc_errors
++;
1345 if (frame_status
& 0x00100000) {
1346 printk(KERN_WARNING
"%s: Oversized Ethernet frame,"
1348 dev
->name
, frame_status
);
1351 struct sk_buff
*skb
;
1352 #ifndef final_version
1353 if (netif_msg_rx_status(np
))
1354 printk(KERN_DEBUG
" netdev_rx() normal Rx pkt length %d"
1355 ", bogus_cnt %d.\n",
1358 /* Check if the packet is long enough to accept without copying
1359 to a minimally-sized skbuff. */
1360 if (pkt_len
< rx_copybreak
&&
1361 (skb
= netdev_alloc_skb(dev
, pkt_len
+ 2)) != NULL
) {
1362 skb_reserve(skb
, 2); /* 16 byte align the IP header */
1363 dma_sync_single_for_cpu(&np
->pci_dev
->dev
,
1364 le32_to_cpu(desc
->frag
[0].addr
),
1365 np
->rx_buf_sz
, DMA_FROM_DEVICE
);
1366 skb_copy_to_linear_data(skb
, np
->rx_skbuff
[entry
]->data
, pkt_len
);
1367 dma_sync_single_for_device(&np
->pci_dev
->dev
,
1368 le32_to_cpu(desc
->frag
[0].addr
),
1369 np
->rx_buf_sz
, DMA_FROM_DEVICE
);
1370 skb_put(skb
, pkt_len
);
1372 dma_unmap_single(&np
->pci_dev
->dev
,
1373 le32_to_cpu(desc
->frag
[0].addr
),
1374 np
->rx_buf_sz
, DMA_FROM_DEVICE
);
1375 skb_put(skb
= np
->rx_skbuff
[entry
], pkt_len
);
1376 np
->rx_skbuff
[entry
] = NULL
;
1378 skb
->protocol
= eth_type_trans(skb
, dev
);
1379 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1382 entry
= (entry
+ 1) % RX_RING_SIZE
;
1387 np
->budget
-= received
;
1388 iowrite16(DEFAULT_INTR
, ioaddr
+ IntrEnable
);
1396 np
->budget
-= received
;
1397 if (np
->budget
<= 0)
1398 np
->budget
= RX_BUDGET
;
1399 tasklet_schedule(&np
->rx_tasklet
);
1402 static void refill_rx (struct net_device
*dev
)
1404 struct netdev_private
*np
= netdev_priv(dev
);
1408 /* Refill the Rx ring buffers. */
1409 for (;(np
->cur_rx
- np
->dirty_rx
+ RX_RING_SIZE
) % RX_RING_SIZE
> 0;
1410 np
->dirty_rx
= (np
->dirty_rx
+ 1) % RX_RING_SIZE
) {
1411 struct sk_buff
*skb
;
1412 entry
= np
->dirty_rx
% RX_RING_SIZE
;
1413 if (np
->rx_skbuff
[entry
] == NULL
) {
1414 skb
= netdev_alloc_skb(dev
, np
->rx_buf_sz
+ 2);
1415 np
->rx_skbuff
[entry
] = skb
;
1417 break; /* Better luck next round. */
1418 skb_reserve(skb
, 2); /* Align IP on 16 byte boundaries */
1419 np
->rx_ring
[entry
].frag
[0].addr
= cpu_to_le32(
1420 dma_map_single(&np
->pci_dev
->dev
, skb
->data
,
1421 np
->rx_buf_sz
, DMA_FROM_DEVICE
));
1422 if (dma_mapping_error(&np
->pci_dev
->dev
,
1423 np
->rx_ring
[entry
].frag
[0].addr
)) {
1424 dev_kfree_skb_irq(skb
);
1425 np
->rx_skbuff
[entry
] = NULL
;
1429 /* Perhaps we need not reset this field. */
1430 np
->rx_ring
[entry
].frag
[0].length
=
1431 cpu_to_le32(np
->rx_buf_sz
| LastFrag
);
1432 np
->rx_ring
[entry
].status
= 0;
1436 static void netdev_error(struct net_device
*dev
, int intr_status
)
1438 struct netdev_private
*np
= netdev_priv(dev
);
1439 void __iomem
*ioaddr
= np
->base
;
1440 u16 mii_ctl
, mii_advertise
, mii_lpa
;
1443 if (intr_status
& LinkChange
) {
1444 if (mdio_wait_link(dev
, 10) == 0) {
1445 printk(KERN_INFO
"%s: Link up\n", dev
->name
);
1446 if (np
->an_enable
) {
1447 mii_advertise
= mdio_read(dev
, np
->phys
[0],
1449 mii_lpa
= mdio_read(dev
, np
->phys
[0], MII_LPA
);
1450 mii_advertise
&= mii_lpa
;
1451 printk(KERN_INFO
"%s: Link changed: ",
1453 if (mii_advertise
& ADVERTISE_100FULL
) {
1455 printk("100Mbps, full duplex\n");
1456 } else if (mii_advertise
& ADVERTISE_100HALF
) {
1458 printk("100Mbps, half duplex\n");
1459 } else if (mii_advertise
& ADVERTISE_10FULL
) {
1461 printk("10Mbps, full duplex\n");
1462 } else if (mii_advertise
& ADVERTISE_10HALF
) {
1464 printk("10Mbps, half duplex\n");
1469 mii_ctl
= mdio_read(dev
, np
->phys
[0], MII_BMCR
);
1470 speed
= (mii_ctl
& BMCR_SPEED100
) ? 100 : 10;
1472 printk(KERN_INFO
"%s: Link changed: %dMbps ,",
1474 printk("%s duplex.\n",
1475 (mii_ctl
& BMCR_FULLDPLX
) ?
1479 if (np
->flowctrl
&& np
->mii_if
.full_duplex
) {
1480 iowrite16(ioread16(ioaddr
+ MulticastFilter1
+2) | 0x0200,
1481 ioaddr
+ MulticastFilter1
+2);
1482 iowrite16(ioread16(ioaddr
+ MACCtrl0
) | EnbFlowCtrl
,
1485 netif_carrier_on(dev
);
1487 printk(KERN_INFO
"%s: Link down\n", dev
->name
);
1488 netif_carrier_off(dev
);
1491 if (intr_status
& StatsMax
) {
1494 if (intr_status
& IntrPCIErr
) {
1495 printk(KERN_ERR
"%s: Something Wicked happened! %4.4x.\n",
1496 dev
->name
, intr_status
);
1497 /* We must do a global reset of DMA to continue. */
1501 static struct net_device_stats
*get_stats(struct net_device
*dev
)
1503 struct netdev_private
*np
= netdev_priv(dev
);
1504 void __iomem
*ioaddr
= np
->base
;
1505 unsigned long flags
;
1506 u8 late_coll
, single_coll
, mult_coll
;
1508 spin_lock_irqsave(&np
->statlock
, flags
);
1509 /* The chip only need report frame silently dropped. */
1510 dev
->stats
.rx_missed_errors
+= ioread8(ioaddr
+ RxMissed
);
1511 dev
->stats
.tx_packets
+= ioread16(ioaddr
+ TxFramesOK
);
1512 dev
->stats
.rx_packets
+= ioread16(ioaddr
+ RxFramesOK
);
1513 dev
->stats
.tx_carrier_errors
+= ioread8(ioaddr
+ StatsCarrierError
);
1515 mult_coll
= ioread8(ioaddr
+ StatsMultiColl
);
1516 np
->xstats
.tx_multiple_collisions
+= mult_coll
;
1517 single_coll
= ioread8(ioaddr
+ StatsOneColl
);
1518 np
->xstats
.tx_single_collisions
+= single_coll
;
1519 late_coll
= ioread8(ioaddr
+ StatsLateColl
);
1520 np
->xstats
.tx_late_collisions
+= late_coll
;
1521 dev
->stats
.collisions
+= mult_coll
1525 np
->xstats
.tx_deferred
+= ioread8(ioaddr
+ StatsTxDefer
);
1526 np
->xstats
.tx_deferred_excessive
+= ioread8(ioaddr
+ StatsTxXSDefer
);
1527 np
->xstats
.tx_aborted
+= ioread8(ioaddr
+ StatsTxAbort
);
1528 np
->xstats
.tx_bcasts
+= ioread8(ioaddr
+ StatsBcastTx
);
1529 np
->xstats
.rx_bcasts
+= ioread8(ioaddr
+ StatsBcastRx
);
1530 np
->xstats
.tx_mcasts
+= ioread8(ioaddr
+ StatsMcastTx
);
1531 np
->xstats
.rx_mcasts
+= ioread8(ioaddr
+ StatsMcastRx
);
1533 dev
->stats
.tx_bytes
+= ioread16(ioaddr
+ TxOctetsLow
);
1534 dev
->stats
.tx_bytes
+= ioread16(ioaddr
+ TxOctetsHigh
) << 16;
1535 dev
->stats
.rx_bytes
+= ioread16(ioaddr
+ RxOctetsLow
);
1536 dev
->stats
.rx_bytes
+= ioread16(ioaddr
+ RxOctetsHigh
) << 16;
1538 spin_unlock_irqrestore(&np
->statlock
, flags
);
1543 static void set_rx_mode(struct net_device
*dev
)
1545 struct netdev_private
*np
= netdev_priv(dev
);
1546 void __iomem
*ioaddr
= np
->base
;
1547 u16 mc_filter
[4]; /* Multicast hash filter */
1551 if (dev
->flags
& IFF_PROMISC
) { /* Set promiscuous. */
1552 memset(mc_filter
, 0xff, sizeof(mc_filter
));
1553 rx_mode
= AcceptBroadcast
| AcceptMulticast
| AcceptAll
| AcceptMyPhys
;
1554 } else if ((netdev_mc_count(dev
) > multicast_filter_limit
) ||
1555 (dev
->flags
& IFF_ALLMULTI
)) {
1556 /* Too many to match, or accept all multicasts. */
1557 memset(mc_filter
, 0xff, sizeof(mc_filter
));
1558 rx_mode
= AcceptBroadcast
| AcceptMulticast
| AcceptMyPhys
;
1559 } else if (!netdev_mc_empty(dev
)) {
1560 struct netdev_hw_addr
*ha
;
1564 memset (mc_filter
, 0, sizeof (mc_filter
));
1565 netdev_for_each_mc_addr(ha
, dev
) {
1566 crc
= ether_crc_le(ETH_ALEN
, ha
->addr
);
1567 for (index
=0, bit
=0; bit
< 6; bit
++, crc
<<= 1)
1568 if (crc
& 0x80000000) index
|= 1 << bit
;
1569 mc_filter
[index
/16] |= (1 << (index
% 16));
1571 rx_mode
= AcceptBroadcast
| AcceptMultiHash
| AcceptMyPhys
;
1573 iowrite8(AcceptBroadcast
| AcceptMyPhys
, ioaddr
+ RxMode
);
1576 if (np
->mii_if
.full_duplex
&& np
->flowctrl
)
1577 mc_filter
[3] |= 0x0200;
1579 for (i
= 0; i
< 4; i
++)
1580 iowrite16(mc_filter
[i
], ioaddr
+ MulticastFilter0
+ i
*2);
1581 iowrite8(rx_mode
, ioaddr
+ RxMode
);
1584 static int __set_mac_addr(struct net_device
*dev
)
1586 struct netdev_private
*np
= netdev_priv(dev
);
1589 addr16
= (dev
->dev_addr
[0] | (dev
->dev_addr
[1] << 8));
1590 iowrite16(addr16
, np
->base
+ StationAddr
);
1591 addr16
= (dev
->dev_addr
[2] | (dev
->dev_addr
[3] << 8));
1592 iowrite16(addr16
, np
->base
+ StationAddr
+2);
1593 addr16
= (dev
->dev_addr
[4] | (dev
->dev_addr
[5] << 8));
1594 iowrite16(addr16
, np
->base
+ StationAddr
+4);
1598 /* Invoked with rtnl_lock held */
1599 static int sundance_set_mac_addr(struct net_device
*dev
, void *data
)
1601 const struct sockaddr
*addr
= data
;
1603 if (!is_valid_ether_addr(addr
->sa_data
))
1604 return -EADDRNOTAVAIL
;
1605 memcpy(dev
->dev_addr
, addr
->sa_data
, ETH_ALEN
);
1606 __set_mac_addr(dev
);
1611 static const struct {
1612 const char name
[ETH_GSTRING_LEN
];
1613 } sundance_stats
[] = {
1614 { "tx_multiple_collisions" },
1615 { "tx_single_collisions" },
1616 { "tx_late_collisions" },
1618 { "tx_deferred_excessive" },
1626 static int check_if_running(struct net_device
*dev
)
1628 if (!netif_running(dev
))
1633 static void get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
1635 struct netdev_private
*np
= netdev_priv(dev
);
1636 strlcpy(info
->driver
, DRV_NAME
, sizeof(info
->driver
));
1637 strlcpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
1638 strlcpy(info
->bus_info
, pci_name(np
->pci_dev
), sizeof(info
->bus_info
));
1641 static int get_settings(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
1643 struct netdev_private
*np
= netdev_priv(dev
);
1644 spin_lock_irq(&np
->lock
);
1645 mii_ethtool_gset(&np
->mii_if
, ecmd
);
1646 spin_unlock_irq(&np
->lock
);
1650 static int set_settings(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
1652 struct netdev_private
*np
= netdev_priv(dev
);
1654 spin_lock_irq(&np
->lock
);
1655 res
= mii_ethtool_sset(&np
->mii_if
, ecmd
);
1656 spin_unlock_irq(&np
->lock
);
1660 static int nway_reset(struct net_device
*dev
)
1662 struct netdev_private
*np
= netdev_priv(dev
);
1663 return mii_nway_restart(&np
->mii_if
);
1666 static u32
get_link(struct net_device
*dev
)
1668 struct netdev_private
*np
= netdev_priv(dev
);
1669 return mii_link_ok(&np
->mii_if
);
1672 static u32
get_msglevel(struct net_device
*dev
)
1674 struct netdev_private
*np
= netdev_priv(dev
);
1675 return np
->msg_enable
;
1678 static void set_msglevel(struct net_device
*dev
, u32 val
)
1680 struct netdev_private
*np
= netdev_priv(dev
);
1681 np
->msg_enable
= val
;
1684 static void get_strings(struct net_device
*dev
, u32 stringset
,
1687 if (stringset
== ETH_SS_STATS
)
1688 memcpy(data
, sundance_stats
, sizeof(sundance_stats
));
1691 static int get_sset_count(struct net_device
*dev
, int sset
)
1695 return ARRAY_SIZE(sundance_stats
);
1701 static void get_ethtool_stats(struct net_device
*dev
,
1702 struct ethtool_stats
*stats
, u64
*data
)
1704 struct netdev_private
*np
= netdev_priv(dev
);
1708 data
[i
++] = np
->xstats
.tx_multiple_collisions
;
1709 data
[i
++] = np
->xstats
.tx_single_collisions
;
1710 data
[i
++] = np
->xstats
.tx_late_collisions
;
1711 data
[i
++] = np
->xstats
.tx_deferred
;
1712 data
[i
++] = np
->xstats
.tx_deferred_excessive
;
1713 data
[i
++] = np
->xstats
.tx_aborted
;
1714 data
[i
++] = np
->xstats
.tx_bcasts
;
1715 data
[i
++] = np
->xstats
.rx_bcasts
;
1716 data
[i
++] = np
->xstats
.tx_mcasts
;
1717 data
[i
++] = np
->xstats
.rx_mcasts
;
1720 static const struct ethtool_ops ethtool_ops
= {
1721 .begin
= check_if_running
,
1722 .get_drvinfo
= get_drvinfo
,
1723 .get_settings
= get_settings
,
1724 .set_settings
= set_settings
,
1725 .nway_reset
= nway_reset
,
1726 .get_link
= get_link
,
1727 .get_msglevel
= get_msglevel
,
1728 .set_msglevel
= set_msglevel
,
1729 .get_strings
= get_strings
,
1730 .get_sset_count
= get_sset_count
,
1731 .get_ethtool_stats
= get_ethtool_stats
,
1734 static int netdev_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
1736 struct netdev_private
*np
= netdev_priv(dev
);
1739 if (!netif_running(dev
))
1742 spin_lock_irq(&np
->lock
);
1743 rc
= generic_mii_ioctl(&np
->mii_if
, if_mii(rq
), cmd
, NULL
);
1744 spin_unlock_irq(&np
->lock
);
1749 static int netdev_close(struct net_device
*dev
)
1751 struct netdev_private
*np
= netdev_priv(dev
);
1752 void __iomem
*ioaddr
= np
->base
;
1753 struct sk_buff
*skb
;
1756 /* Wait and kill tasklet */
1757 tasklet_kill(&np
->rx_tasklet
);
1758 tasklet_kill(&np
->tx_tasklet
);
1764 netif_stop_queue(dev
);
1766 if (netif_msg_ifdown(np
)) {
1767 printk(KERN_DEBUG
"%s: Shutting down ethercard, status was Tx %2.2x "
1768 "Rx %4.4x Int %2.2x.\n",
1769 dev
->name
, ioread8(ioaddr
+ TxStatus
),
1770 ioread32(ioaddr
+ RxStatus
), ioread16(ioaddr
+ IntrStatus
));
1771 printk(KERN_DEBUG
"%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1772 dev
->name
, np
->cur_tx
, np
->dirty_tx
, np
->cur_rx
, np
->dirty_rx
);
1775 /* Disable interrupts by clearing the interrupt mask. */
1776 iowrite16(0x0000, ioaddr
+ IntrEnable
);
1778 /* Disable Rx and Tx DMA for safely release resource */
1779 iowrite32(0x500, ioaddr
+ DMACtrl
);
1781 /* Stop the chip's Tx and Rx processes. */
1782 iowrite16(TxDisable
| RxDisable
| StatsDisable
, ioaddr
+ MACCtrl1
);
1784 for (i
= 2000; i
> 0; i
--) {
1785 if ((ioread32(ioaddr
+ DMACtrl
) & 0xc000) == 0)
1790 iowrite16(GlobalReset
| DMAReset
| FIFOReset
| NetworkReset
,
1791 ioaddr
+ ASIC_HI_WORD(ASICCtrl
));
1793 for (i
= 2000; i
> 0; i
--) {
1794 if ((ioread16(ioaddr
+ ASIC_HI_WORD(ASICCtrl
)) & ResetBusy
) == 0)
1800 if (netif_msg_hw(np
)) {
1801 printk(KERN_DEBUG
" Tx ring at %8.8x:\n",
1802 (int)(np
->tx_ring_dma
));
1803 for (i
= 0; i
< TX_RING_SIZE
; i
++)
1804 printk(KERN_DEBUG
" #%d desc. %4.4x %8.8x %8.8x.\n",
1805 i
, np
->tx_ring
[i
].status
, np
->tx_ring
[i
].frag
[0].addr
,
1806 np
->tx_ring
[i
].frag
[0].length
);
1807 printk(KERN_DEBUG
" Rx ring %8.8x:\n",
1808 (int)(np
->rx_ring_dma
));
1809 for (i
= 0; i
< /*RX_RING_SIZE*/4 ; i
++) {
1810 printk(KERN_DEBUG
" #%d desc. %4.4x %4.4x %8.8x\n",
1811 i
, np
->rx_ring
[i
].status
, np
->rx_ring
[i
].frag
[0].addr
,
1812 np
->rx_ring
[i
].frag
[0].length
);
1815 #endif /* __i386__ debugging only */
1817 free_irq(dev
->irq
, dev
);
1819 del_timer_sync(&np
->timer
);
1821 /* Free all the skbuffs in the Rx queue. */
1822 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
1823 np
->rx_ring
[i
].status
= 0;
1824 skb
= np
->rx_skbuff
[i
];
1826 dma_unmap_single(&np
->pci_dev
->dev
,
1827 le32_to_cpu(np
->rx_ring
[i
].frag
[0].addr
),
1828 np
->rx_buf_sz
, DMA_FROM_DEVICE
);
1830 np
->rx_skbuff
[i
] = NULL
;
1832 np
->rx_ring
[i
].frag
[0].addr
= cpu_to_le32(0xBADF00D0); /* poison */
1834 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
1835 np
->tx_ring
[i
].next_desc
= 0;
1836 skb
= np
->tx_skbuff
[i
];
1838 dma_unmap_single(&np
->pci_dev
->dev
,
1839 le32_to_cpu(np
->tx_ring
[i
].frag
[0].addr
),
1840 skb
->len
, DMA_TO_DEVICE
);
1842 np
->tx_skbuff
[i
] = NULL
;
1849 static void __devexit
sundance_remove1 (struct pci_dev
*pdev
)
1851 struct net_device
*dev
= pci_get_drvdata(pdev
);
1854 struct netdev_private
*np
= netdev_priv(dev
);
1855 unregister_netdev(dev
);
1856 dma_free_coherent(&pdev
->dev
, RX_TOTAL_SIZE
,
1857 np
->rx_ring
, np
->rx_ring_dma
);
1858 dma_free_coherent(&pdev
->dev
, TX_TOTAL_SIZE
,
1859 np
->tx_ring
, np
->tx_ring_dma
);
1860 pci_iounmap(pdev
, np
->base
);
1861 pci_release_regions(pdev
);
1863 pci_set_drvdata(pdev
, NULL
);
1869 static int sundance_suspend(struct pci_dev
*pci_dev
, pm_message_t state
)
1871 struct net_device
*dev
= pci_get_drvdata(pci_dev
);
1873 if (!netif_running(dev
))
1877 netif_device_detach(dev
);
1879 pci_save_state(pci_dev
);
1880 pci_set_power_state(pci_dev
, pci_choose_state(pci_dev
, state
));
1885 static int sundance_resume(struct pci_dev
*pci_dev
)
1887 struct net_device
*dev
= pci_get_drvdata(pci_dev
);
1890 if (!netif_running(dev
))
1893 pci_set_power_state(pci_dev
, PCI_D0
);
1894 pci_restore_state(pci_dev
);
1896 err
= netdev_open(dev
);
1898 printk(KERN_ERR
"%s: Can't resume interface!\n",
1903 netif_device_attach(dev
);
1909 #endif /* CONFIG_PM */
1911 static struct pci_driver sundance_driver
= {
1913 .id_table
= sundance_pci_tbl
,
1914 .probe
= sundance_probe1
,
1915 .remove
= __devexit_p(sundance_remove1
),
1917 .suspend
= sundance_suspend
,
1918 .resume
= sundance_resume
,
1919 #endif /* CONFIG_PM */
1922 static int __init
sundance_init(void)
1924 /* when a module, this is printed whether or not devices are found in probe */
1928 return pci_register_driver(&sundance_driver
);
1931 static void __exit
sundance_exit(void)
1933 pci_unregister_driver(&sundance_driver
);
1936 module_init(sundance_init
);
1937 module_exit(sundance_exit
);