1 /* de2104x.c: A Linux PCI Ethernet driver for Intel/Digital 21040/1 chips. */
3 Copyright 2001,2003 Jeff Garzik <jgarzik@pobox.com>
5 Copyright 1994, 1995 Digital Equipment Corporation. [de4x5.c]
6 Written/copyright 1994-2001 by Donald Becker. [tulip.c]
8 This software may be used and distributed according to the terms of
9 the GNU General Public License (GPL), incorporated herein by reference.
10 Drivers based on or derived from this code fall under the GPL and must
11 retain the authorship, copyright and license notice. This file is not
12 a complete program and may only be used when the entire operating
13 system is licensed under the GPL.
15 See the file COPYING in this distribution for more information.
17 TODO, in rough priority order:
18 * Support forcing media type with a module parameter,
19 like dl2k.c/sundance.c
20 * Constants (module parms?) for Rx work limit
21 * Complete reset on PciErr
22 * Jumbo frames / dev->change_mtu
23 * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
24 * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
25 * Implement Tx software interrupt mitigation via
30 #define DRV_NAME "de2104x"
31 #define DRV_VERSION "0.7"
32 #define DRV_RELDATE "Mar 17, 2004"
34 #include <linux/config.h>
35 #include <linux/module.h>
36 #include <linux/kernel.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/init.h>
40 #include <linux/pci.h>
41 #include <linux/delay.h>
42 #include <linux/ethtool.h>
43 #include <linux/compiler.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/crc32.h>
49 #include <asm/uaccess.h>
50 #include <asm/unaligned.h>
52 /* These identify the driver base version and may not be removed. */
53 static char version
[] =
54 KERN_INFO DRV_NAME
" PCI Ethernet driver v" DRV_VERSION
" (" DRV_RELDATE
")\n";
56 MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
57 MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
58 MODULE_LICENSE("GPL");
59 MODULE_VERSION(DRV_VERSION
);
61 static int debug
= -1;
62 module_param (debug
, int, 0);
63 MODULE_PARM_DESC (debug
, "de2104x bitmapped message enable number");
65 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
66 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
67 || defined(__sparc_) || defined(__ia64__) \
68 || defined(__sh__) || defined(__mips__)
69 static int rx_copybreak
= 1518;
71 static int rx_copybreak
= 100;
73 module_param (rx_copybreak
, int, 0);
74 MODULE_PARM_DESC (rx_copybreak
, "de2104x Breakpoint at which Rx packets are copied");
76 #define PFX DRV_NAME ": "
78 #define DE_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
86 #define DE_RX_RING_SIZE 64
87 #define DE_TX_RING_SIZE 64
88 #define DE_RING_BYTES \
89 ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \
90 (sizeof(struct de_desc) * DE_TX_RING_SIZE))
91 #define NEXT_TX(N) (((N) + 1) & (DE_TX_RING_SIZE - 1))
92 #define NEXT_RX(N) (((N) + 1) & (DE_RX_RING_SIZE - 1))
93 #define TX_BUFFS_AVAIL(CP) \
94 (((CP)->tx_tail <= (CP)->tx_head) ? \
95 (CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head : \
96 (CP)->tx_tail - (CP)->tx_head - 1)
98 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
101 #define DE_SETUP_SKB ((struct sk_buff *) 1)
102 #define DE_DUMMY_SKB ((struct sk_buff *) 2)
103 #define DE_SETUP_FRAME_WORDS 96
104 #define DE_EEPROM_WORDS 256
105 #define DE_EEPROM_SIZE (DE_EEPROM_WORDS * sizeof(u16))
106 #define DE_MAX_MEDIA 5
108 #define DE_MEDIA_TP_AUTO 0
109 #define DE_MEDIA_BNC 1
110 #define DE_MEDIA_AUI 2
111 #define DE_MEDIA_TP 3
112 #define DE_MEDIA_TP_FD 4
113 #define DE_MEDIA_INVALID DE_MAX_MEDIA
114 #define DE_MEDIA_FIRST 0
115 #define DE_MEDIA_LAST (DE_MAX_MEDIA - 1)
116 #define DE_AUI_BNC (SUPPORTED_AUI | SUPPORTED_BNC)
118 #define DE_TIMER_LINK (60 * HZ)
119 #define DE_TIMER_NO_LINK (5 * HZ)
121 #define DE_NUM_REGS 16
122 #define DE_REGS_SIZE (DE_NUM_REGS * sizeof(u32))
123 #define DE_REGS_VER 1
125 /* Time in jiffies before concluding the transmitter is hung. */
126 #define TX_TIMEOUT (6*HZ)
128 #define DE_UNALIGNED_16(a) (u16)(get_unaligned((u16 *)(a)))
130 /* This is a mysterious value that can be written to CSR11 in the 21040 (only)
131 to support a pre-NWay full-duplex signaling mechanism using short frames.
132 No one knows what it should be, but if left at its default value some
133 10base2(!) packets trigger a full-duplex-request interrupt. */
134 #define FULL_DUPLEX_MAGIC 0x6969
157 CacheAlign16
= 0x00008000,
158 BurstLen4
= 0x00000400,
161 NormalTxPoll
= (1 << 0),
162 NormalRxPoll
= (1 << 0),
164 /* Tx/Rx descriptor status bits */
167 RxErrLong
= (1 << 7),
169 RxErrFIFO
= (1 << 0),
170 RxErrRunt
= (1 << 11),
171 RxErrFrame
= (1 << 14),
173 FirstFrag
= (1 << 29),
174 LastFrag
= (1 << 30),
176 TxFIFOUnder
= (1 << 1),
177 TxLinkFail
= (1 << 2) | (1 << 10) | (1 << 11),
180 TxJabber
= (1 << 14),
181 SetupFrame
= (1 << 27),
192 TxState
= (1 << 22) | (1 << 21) | (1 << 20),
193 RxState
= (1 << 19) | (1 << 18) | (1 << 17),
194 LinkFail
= (1 << 12),
196 RxStopped
= (1 << 8),
197 TxStopped
= (1 << 1),
200 TxEnable
= (1 << 13),
202 RxTx
= TxEnable
| RxEnable
,
203 FullDuplex
= (1 << 9),
204 AcceptAllMulticast
= (1 << 7),
205 AcceptAllPhys
= (1 << 6),
207 MacModeClear
= (1<<12) | (1<<11) | (1<<10) | (1<<8) | (1<<3) |
208 RxTx
| BOCnt
| AcceptAllPhys
| AcceptAllMulticast
,
211 EE_SHIFT_CLK
= 0x02, /* EEPROM shift clock. */
212 EE_CS
= 0x01, /* EEPROM chip select. */
213 EE_DATA_WRITE
= 0x04, /* Data from the Tulip to EEPROM. */
216 EE_DATA_READ
= 0x08, /* Data from the EEPROM chip. */
217 EE_ENB
= (0x4800 | EE_CS
),
219 /* The EEPROM commands include the alway-set leading bit. */
223 RxMissedOver
= (1 << 16),
224 RxMissedMask
= 0xffff,
226 /* SROM-related bits */
228 MediaBlockMask
= 0x3f,
229 MediaCustomCSRs
= (1 << 6),
232 PM_Sleep
= (1 << 31),
233 PM_Snooze
= (1 << 30),
234 PM_Mask
= PM_Sleep
| PM_Snooze
,
237 NWayState
= (1 << 14) | (1 << 13) | (1 << 12),
238 NWayRestart
= (1 << 12),
239 NonselPortActive
= (1 << 9),
240 LinkFailStatus
= (1 << 2),
241 NetCxnErr
= (1 << 1),
244 static const u32 de_intr_mask
=
245 IntrOK
| IntrErr
| RxIntr
| RxEmpty
| TxIntr
| TxEmpty
|
246 LinkPass
| LinkFail
| PciErr
;
249 * Set the programmable burst length to 4 longwords for all:
250 * DMA errors result without these values. Cache align 16 long.
252 static const u32 de_bus_mode
= CacheAlign16
| BurstLen4
;
254 struct de_srom_media_block
{
259 } __attribute__((packed
));
261 struct de_srom_info_leaf
{
265 } __attribute__((packed
));
275 u16 type
; /* DE_MEDIA_xxx */
292 struct net_device
*dev
;
295 struct de_desc
*rx_ring
;
296 struct de_desc
*tx_ring
;
297 struct ring_info tx_skb
[DE_TX_RING_SIZE
];
298 struct ring_info rx_skb
[DE_RX_RING_SIZE
];
304 struct net_device_stats net_stats
;
306 struct pci_dev
*pdev
;
308 u16 setup_frame
[DE_SETUP_FRAME_WORDS
];
313 struct media_info media
[DE_MAX_MEDIA
];
314 struct timer_list media_timer
;
318 unsigned de21040
: 1;
319 unsigned media_lock
: 1;
323 static void de_set_rx_mode (struct net_device
*dev
);
324 static void de_tx (struct de_private
*de
);
325 static void de_clean_rings (struct de_private
*de
);
326 static void de_media_interrupt (struct de_private
*de
, u32 status
);
327 static void de21040_media_timer (unsigned long data
);
328 static void de21041_media_timer (unsigned long data
);
329 static unsigned int de_ok_to_advertise (struct de_private
*de
, u32 new_media
);
332 static struct pci_device_id de_pci_tbl
[] = {
333 { PCI_VENDOR_ID_DEC
, PCI_DEVICE_ID_DEC_TULIP
,
334 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0 },
335 { PCI_VENDOR_ID_DEC
, PCI_DEVICE_ID_DEC_TULIP_PLUS
,
336 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 1 },
339 MODULE_DEVICE_TABLE(pci
, de_pci_tbl
);
341 static const char * const media_name
[DE_MAX_MEDIA
] = {
349 /* 21040 transceiver register settings:
350 * TP AUTO(unused), BNC(unused), AUI, TP, TP FD*/
351 static u16 t21040_csr13
[] = { 0, 0, 0x8F09, 0x8F01, 0x8F01, };
352 static u16 t21040_csr14
[] = { 0, 0, 0x0705, 0xFFFF, 0xFFFD, };
353 static u16 t21040_csr15
[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
355 /* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
356 static u16 t21041_csr13
[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
357 static u16 t21041_csr14
[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, };
358 static u16 t21041_csr15
[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
361 #define dr32(reg) readl(de->regs + (reg))
362 #define dw32(reg,val) writel((val), de->regs + (reg))
365 static void de_rx_err_acct (struct de_private
*de
, unsigned rx_tail
,
368 if (netif_msg_rx_err (de
))
370 "%s: rx err, slot %d status 0x%x len %d\n",
371 de
->dev
->name
, rx_tail
, status
, len
);
373 if ((status
& 0x38000300) != 0x0300) {
374 /* Ingore earlier buffers. */
375 if ((status
& 0xffff) != 0x7fff) {
376 if (netif_msg_rx_err(de
))
377 printk(KERN_WARNING
"%s: Oversized Ethernet frame "
378 "spanned multiple buffers, status %8.8x!\n",
379 de
->dev
->name
, status
);
380 de
->net_stats
.rx_length_errors
++;
382 } else if (status
& RxError
) {
383 /* There was a fatal error. */
384 de
->net_stats
.rx_errors
++; /* end of a packet.*/
385 if (status
& 0x0890) de
->net_stats
.rx_length_errors
++;
386 if (status
& RxErrCRC
) de
->net_stats
.rx_crc_errors
++;
387 if (status
& RxErrFIFO
) de
->net_stats
.rx_fifo_errors
++;
391 static void de_rx (struct de_private
*de
)
393 unsigned rx_tail
= de
->rx_tail
;
394 unsigned rx_work
= DE_RX_RING_SIZE
;
401 struct sk_buff
*skb
, *copy_skb
;
402 unsigned copying_skb
, buflen
;
404 skb
= de
->rx_skb
[rx_tail
].skb
;
408 status
= le32_to_cpu(de
->rx_ring
[rx_tail
].opts1
);
409 if (status
& DescOwn
)
412 len
= ((status
>> 16) & 0x7ff) - 4;
413 mapping
= de
->rx_skb
[rx_tail
].mapping
;
415 if (unlikely(drop
)) {
416 de
->net_stats
.rx_dropped
++;
420 if (unlikely((status
& 0x38008300) != 0x0300)) {
421 de_rx_err_acct(de
, rx_tail
, status
, len
);
425 copying_skb
= (len
<= rx_copybreak
);
427 if (unlikely(netif_msg_rx_status(de
)))
428 printk(KERN_DEBUG
"%s: rx slot %d status 0x%x len %d copying? %d\n",
429 de
->dev
->name
, rx_tail
, status
, len
,
432 buflen
= copying_skb
? (len
+ RX_OFFSET
) : de
->rx_buf_sz
;
433 copy_skb
= dev_alloc_skb (buflen
);
434 if (unlikely(!copy_skb
)) {
435 de
->net_stats
.rx_dropped
++;
440 copy_skb
->dev
= de
->dev
;
443 pci_unmap_single(de
->pdev
, mapping
,
444 buflen
, PCI_DMA_FROMDEVICE
);
448 de
->rx_skb
[rx_tail
].mapping
=
449 pci_map_single(de
->pdev
, copy_skb
->data
,
450 buflen
, PCI_DMA_FROMDEVICE
);
451 de
->rx_skb
[rx_tail
].skb
= copy_skb
;
453 pci_dma_sync_single_for_cpu(de
->pdev
, mapping
, len
, PCI_DMA_FROMDEVICE
);
454 skb_reserve(copy_skb
, RX_OFFSET
);
455 memcpy(skb_put(copy_skb
, len
), skb
->data
, len
);
457 pci_dma_sync_single_for_device(de
->pdev
, mapping
, len
, PCI_DMA_FROMDEVICE
);
459 /* We'll reuse the original ring buffer. */
463 skb
->protocol
= eth_type_trans (skb
, de
->dev
);
465 de
->net_stats
.rx_packets
++;
466 de
->net_stats
.rx_bytes
+= skb
->len
;
467 de
->dev
->last_rx
= jiffies
;
469 if (rc
== NET_RX_DROP
)
473 de
->rx_ring
[rx_tail
].opts1
= cpu_to_le32(DescOwn
);
474 if (rx_tail
== (DE_RX_RING_SIZE
- 1))
475 de
->rx_ring
[rx_tail
].opts2
=
476 cpu_to_le32(RingEnd
| de
->rx_buf_sz
);
478 de
->rx_ring
[rx_tail
].opts2
= cpu_to_le32(de
->rx_buf_sz
);
479 de
->rx_ring
[rx_tail
].addr1
= cpu_to_le32(mapping
);
480 rx_tail
= NEXT_RX(rx_tail
);
484 printk(KERN_WARNING
"%s: rx work limit reached\n", de
->dev
->name
);
486 de
->rx_tail
= rx_tail
;
489 static irqreturn_t
de_interrupt (int irq
, void *dev_instance
, struct pt_regs
*regs
)
491 struct net_device
*dev
= dev_instance
;
492 struct de_private
*de
= dev
->priv
;
495 status
= dr32(MacStatus
);
496 if ((!(status
& (IntrOK
|IntrErr
))) || (status
== 0xFFFF))
499 if (netif_msg_intr(de
))
500 printk(KERN_DEBUG
"%s: intr, status %08x mode %08x desc %u/%u/%u\n",
501 dev
->name
, status
, dr32(MacMode
), de
->rx_tail
, de
->tx_head
, de
->tx_tail
);
503 dw32(MacStatus
, status
);
505 if (status
& (RxIntr
| RxEmpty
)) {
507 if (status
& RxEmpty
)
508 dw32(RxPoll
, NormalRxPoll
);
511 spin_lock(&de
->lock
);
513 if (status
& (TxIntr
| TxEmpty
))
516 if (status
& (LinkPass
| LinkFail
))
517 de_media_interrupt(de
, status
);
519 spin_unlock(&de
->lock
);
521 if (status
& PciErr
) {
524 pci_read_config_word(de
->pdev
, PCI_STATUS
, &pci_status
);
525 pci_write_config_word(de
->pdev
, PCI_STATUS
, pci_status
);
526 printk(KERN_ERR
"%s: PCI bus error, status=%08x, PCI status=%04x\n",
527 dev
->name
, status
, pci_status
);
533 static void de_tx (struct de_private
*de
)
535 unsigned tx_head
= de
->tx_head
;
536 unsigned tx_tail
= de
->tx_tail
;
538 while (tx_tail
!= tx_head
) {
543 status
= le32_to_cpu(de
->tx_ring
[tx_tail
].opts1
);
544 if (status
& DescOwn
)
547 skb
= de
->tx_skb
[tx_tail
].skb
;
550 if (unlikely(skb
== DE_DUMMY_SKB
))
553 if (unlikely(skb
== DE_SETUP_SKB
)) {
554 pci_unmap_single(de
->pdev
, de
->tx_skb
[tx_tail
].mapping
,
555 sizeof(de
->setup_frame
), PCI_DMA_TODEVICE
);
559 pci_unmap_single(de
->pdev
, de
->tx_skb
[tx_tail
].mapping
,
560 skb
->len
, PCI_DMA_TODEVICE
);
562 if (status
& LastFrag
) {
563 if (status
& TxError
) {
564 if (netif_msg_tx_err(de
))
565 printk(KERN_DEBUG
"%s: tx err, status 0x%x\n",
566 de
->dev
->name
, status
);
567 de
->net_stats
.tx_errors
++;
569 de
->net_stats
.tx_window_errors
++;
570 if (status
& TxMaxCol
)
571 de
->net_stats
.tx_aborted_errors
++;
572 if (status
& TxLinkFail
)
573 de
->net_stats
.tx_carrier_errors
++;
574 if (status
& TxFIFOUnder
)
575 de
->net_stats
.tx_fifo_errors
++;
577 de
->net_stats
.tx_packets
++;
578 de
->net_stats
.tx_bytes
+= skb
->len
;
579 if (netif_msg_tx_done(de
))
580 printk(KERN_DEBUG
"%s: tx done, slot %d\n", de
->dev
->name
, tx_tail
);
582 dev_kfree_skb_irq(skb
);
586 de
->tx_skb
[tx_tail
].skb
= NULL
;
588 tx_tail
= NEXT_TX(tx_tail
);
591 de
->tx_tail
= tx_tail
;
593 if (netif_queue_stopped(de
->dev
) && (TX_BUFFS_AVAIL(de
) > (DE_TX_RING_SIZE
/ 4)))
594 netif_wake_queue(de
->dev
);
597 static int de_start_xmit (struct sk_buff
*skb
, struct net_device
*dev
)
599 struct de_private
*de
= dev
->priv
;
600 unsigned int entry
, tx_free
;
601 u32 mapping
, len
, flags
= FirstFrag
| LastFrag
;
604 spin_lock_irq(&de
->lock
);
606 tx_free
= TX_BUFFS_AVAIL(de
);
608 netif_stop_queue(dev
);
609 spin_unlock_irq(&de
->lock
);
616 txd
= &de
->tx_ring
[entry
];
619 mapping
= pci_map_single(de
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
620 if (entry
== (DE_TX_RING_SIZE
- 1))
622 if (!tx_free
|| (tx_free
== (DE_TX_RING_SIZE
/ 2)))
625 txd
->opts2
= cpu_to_le32(flags
);
626 txd
->addr1
= cpu_to_le32(mapping
);
628 de
->tx_skb
[entry
].skb
= skb
;
629 de
->tx_skb
[entry
].mapping
= mapping
;
632 txd
->opts1
= cpu_to_le32(DescOwn
);
635 de
->tx_head
= NEXT_TX(entry
);
636 if (netif_msg_tx_queued(de
))
637 printk(KERN_DEBUG
"%s: tx queued, slot %d, skblen %d\n",
638 dev
->name
, entry
, skb
->len
);
641 netif_stop_queue(dev
);
643 spin_unlock_irq(&de
->lock
);
645 /* Trigger an immediate transmit demand. */
646 dw32(TxPoll
, NormalTxPoll
);
647 dev
->trans_start
= jiffies
;
652 /* Set or clear the multicast filter for this adaptor.
653 Note that we only use exclusion around actually queueing the
654 new frame, not around filling de->setup_frame. This is non-deterministic
655 when re-entered but still correct. */
658 #define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
660 static void build_setup_frame_hash(u16
*setup_frm
, struct net_device
*dev
)
662 struct de_private
*de
= dev
->priv
;
664 struct dev_mc_list
*mclist
;
668 memset(hash_table
, 0, sizeof(hash_table
));
669 set_bit_le(255, hash_table
); /* Broadcast entry */
670 /* This should work on big-endian machines as well. */
671 for (i
= 0, mclist
= dev
->mc_list
; mclist
&& i
< dev
->mc_count
;
672 i
++, mclist
= mclist
->next
) {
673 int index
= ether_crc_le(ETH_ALEN
, mclist
->dmi_addr
) & 0x1ff;
675 set_bit_le(index
, hash_table
);
677 for (i
= 0; i
< 32; i
++) {
678 *setup_frm
++ = hash_table
[i
];
679 *setup_frm
++ = hash_table
[i
];
681 setup_frm
= &de
->setup_frame
[13*6];
684 /* Fill the final entry with our physical address. */
685 eaddrs
= (u16
*)dev
->dev_addr
;
686 *setup_frm
++ = eaddrs
[0]; *setup_frm
++ = eaddrs
[0];
687 *setup_frm
++ = eaddrs
[1]; *setup_frm
++ = eaddrs
[1];
688 *setup_frm
++ = eaddrs
[2]; *setup_frm
++ = eaddrs
[2];
691 static void build_setup_frame_perfect(u16
*setup_frm
, struct net_device
*dev
)
693 struct de_private
*de
= dev
->priv
;
694 struct dev_mc_list
*mclist
;
698 /* We have <= 14 addresses so we can use the wonderful
699 16 address perfect filtering of the Tulip. */
700 for (i
= 0, mclist
= dev
->mc_list
; i
< dev
->mc_count
;
701 i
++, mclist
= mclist
->next
) {
702 eaddrs
= (u16
*)mclist
->dmi_addr
;
703 *setup_frm
++ = *eaddrs
; *setup_frm
++ = *eaddrs
++;
704 *setup_frm
++ = *eaddrs
; *setup_frm
++ = *eaddrs
++;
705 *setup_frm
++ = *eaddrs
; *setup_frm
++ = *eaddrs
++;
707 /* Fill the unused entries with the broadcast address. */
708 memset(setup_frm
, 0xff, (15-i
)*12);
709 setup_frm
= &de
->setup_frame
[15*6];
711 /* Fill the final entry with our physical address. */
712 eaddrs
= (u16
*)dev
->dev_addr
;
713 *setup_frm
++ = eaddrs
[0]; *setup_frm
++ = eaddrs
[0];
714 *setup_frm
++ = eaddrs
[1]; *setup_frm
++ = eaddrs
[1];
715 *setup_frm
++ = eaddrs
[2]; *setup_frm
++ = eaddrs
[2];
719 static void __de_set_rx_mode (struct net_device
*dev
)
721 struct de_private
*de
= dev
->priv
;
726 struct de_desc
*dummy_txd
= NULL
;
728 macmode
= dr32(MacMode
) & ~(AcceptAllMulticast
| AcceptAllPhys
);
730 if (dev
->flags
& IFF_PROMISC
) { /* Set promiscuous. */
731 macmode
|= AcceptAllMulticast
| AcceptAllPhys
;
735 if ((dev
->mc_count
> 1000) || (dev
->flags
& IFF_ALLMULTI
)) {
736 /* Too many to filter well -- accept all multicasts. */
737 macmode
|= AcceptAllMulticast
;
741 /* Note that only the low-address shortword of setup_frame is valid!
742 The values are doubled for big-endian architectures. */
743 if (dev
->mc_count
> 14) /* Must use a multicast hash table. */
744 build_setup_frame_hash (de
->setup_frame
, dev
);
746 build_setup_frame_perfect (de
->setup_frame
, dev
);
749 * Now add this frame to the Tx list.
754 /* Avoid a chip errata by prefixing a dummy entry. */
756 de
->tx_skb
[entry
].skb
= DE_DUMMY_SKB
;
758 dummy_txd
= &de
->tx_ring
[entry
];
759 dummy_txd
->opts2
= (entry
== (DE_TX_RING_SIZE
- 1)) ?
760 cpu_to_le32(RingEnd
) : 0;
761 dummy_txd
->addr1
= 0;
763 /* Must set DescOwned later to avoid race with chip */
765 entry
= NEXT_TX(entry
);
768 de
->tx_skb
[entry
].skb
= DE_SETUP_SKB
;
769 de
->tx_skb
[entry
].mapping
= mapping
=
770 pci_map_single (de
->pdev
, de
->setup_frame
,
771 sizeof (de
->setup_frame
), PCI_DMA_TODEVICE
);
773 /* Put the setup frame on the Tx list. */
774 txd
= &de
->tx_ring
[entry
];
775 if (entry
== (DE_TX_RING_SIZE
- 1))
776 txd
->opts2
= cpu_to_le32(SetupFrame
| RingEnd
| sizeof (de
->setup_frame
));
778 txd
->opts2
= cpu_to_le32(SetupFrame
| sizeof (de
->setup_frame
));
779 txd
->addr1
= cpu_to_le32(mapping
);
782 txd
->opts1
= cpu_to_le32(DescOwn
);
786 dummy_txd
->opts1
= cpu_to_le32(DescOwn
);
790 de
->tx_head
= NEXT_TX(entry
);
792 if (TX_BUFFS_AVAIL(de
) < 0)
794 if (TX_BUFFS_AVAIL(de
) == 0)
795 netif_stop_queue(dev
);
797 /* Trigger an immediate transmit demand. */
798 dw32(TxPoll
, NormalTxPoll
);
801 if (macmode
!= dr32(MacMode
))
802 dw32(MacMode
, macmode
);
805 static void de_set_rx_mode (struct net_device
*dev
)
808 struct de_private
*de
= dev
->priv
;
810 spin_lock_irqsave (&de
->lock
, flags
);
811 __de_set_rx_mode(dev
);
812 spin_unlock_irqrestore (&de
->lock
, flags
);
815 static inline void de_rx_missed(struct de_private
*de
, u32 rx_missed
)
817 if (unlikely(rx_missed
& RxMissedOver
))
818 de
->net_stats
.rx_missed_errors
+= RxMissedMask
;
820 de
->net_stats
.rx_missed_errors
+= (rx_missed
& RxMissedMask
);
823 static void __de_get_stats(struct de_private
*de
)
825 u32 tmp
= dr32(RxMissed
); /* self-clearing */
827 de_rx_missed(de
, tmp
);
830 static struct net_device_stats
*de_get_stats(struct net_device
*dev
)
832 struct de_private
*de
= dev
->priv
;
834 /* The chip only need report frame silently dropped. */
835 spin_lock_irq(&de
->lock
);
836 if (netif_running(dev
) && netif_device_present(dev
))
838 spin_unlock_irq(&de
->lock
);
840 return &de
->net_stats
;
843 static inline int de_is_running (struct de_private
*de
)
845 return (dr32(MacStatus
) & (RxState
| TxState
)) ? 1 : 0;
848 static void de_stop_rxtx (struct de_private
*de
)
851 unsigned int work
= 1000;
853 macmode
= dr32(MacMode
);
854 if (macmode
& RxTx
) {
855 dw32(MacMode
, macmode
& ~RxTx
);
860 if (!de_is_running(de
))
865 printk(KERN_WARNING
"%s: timeout expired stopping DMA\n", de
->dev
->name
);
868 static inline void de_start_rxtx (struct de_private
*de
)
872 macmode
= dr32(MacMode
);
873 if ((macmode
& RxTx
) != RxTx
) {
874 dw32(MacMode
, macmode
| RxTx
);
879 static void de_stop_hw (struct de_private
*de
)
887 dw32(MacStatus
, dr32(MacStatus
));
892 de
->tx_head
= de
->tx_tail
= 0;
895 static void de_link_up(struct de_private
*de
)
897 if (!netif_carrier_ok(de
->dev
)) {
898 netif_carrier_on(de
->dev
);
899 if (netif_msg_link(de
))
900 printk(KERN_INFO
"%s: link up, media %s\n",
901 de
->dev
->name
, media_name
[de
->media_type
]);
905 static void de_link_down(struct de_private
*de
)
907 if (netif_carrier_ok(de
->dev
)) {
908 netif_carrier_off(de
->dev
);
909 if (netif_msg_link(de
))
910 printk(KERN_INFO
"%s: link down\n", de
->dev
->name
);
914 static void de_set_media (struct de_private
*de
)
916 unsigned media
= de
->media_type
;
917 u32 macmode
= dr32(MacMode
);
919 if (de_is_running(de
))
923 dw32(CSR11
, FULL_DUPLEX_MAGIC
);
924 dw32(CSR13
, 0); /* Reset phy */
925 dw32(CSR14
, de
->media
[media
].csr14
);
926 dw32(CSR15
, de
->media
[media
].csr15
);
927 dw32(CSR13
, de
->media
[media
].csr13
);
929 /* must delay 10ms before writing to other registers,
934 if (media
== DE_MEDIA_TP_FD
)
935 macmode
|= FullDuplex
;
937 macmode
&= ~FullDuplex
;
939 if (netif_msg_link(de
)) {
940 printk(KERN_INFO
"%s: set link %s\n"
941 KERN_INFO
"%s: mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n"
942 KERN_INFO
"%s: set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
943 de
->dev
->name
, media_name
[media
],
944 de
->dev
->name
, dr32(MacMode
), dr32(SIAStatus
),
945 dr32(CSR13
), dr32(CSR14
), dr32(CSR15
),
946 de
->dev
->name
, macmode
, de
->media
[media
].csr13
,
947 de
->media
[media
].csr14
, de
->media
[media
].csr15
);
949 if (macmode
!= dr32(MacMode
))
950 dw32(MacMode
, macmode
);
953 static void de_next_media (struct de_private
*de
, u32
*media
,
954 unsigned int n_media
)
958 for (i
= 0; i
< n_media
; i
++) {
959 if (de_ok_to_advertise(de
, media
[i
])) {
960 de
->media_type
= media
[i
];
966 static void de21040_media_timer (unsigned long data
)
968 struct de_private
*de
= (struct de_private
*) data
;
969 struct net_device
*dev
= de
->dev
;
970 u32 status
= dr32(SIAStatus
);
971 unsigned int carrier
;
974 carrier
= (status
& NetCxnErr
) ? 0 : 1;
977 if (de
->media_type
!= DE_MEDIA_AUI
&& (status
& LinkFailStatus
))
980 de
->media_timer
.expires
= jiffies
+ DE_TIMER_LINK
;
981 add_timer(&de
->media_timer
);
982 if (!netif_carrier_ok(dev
))
985 if (netif_msg_timer(de
))
986 printk(KERN_INFO
"%s: %s link ok, status %x\n",
987 dev
->name
, media_name
[de
->media_type
],
997 if (de
->media_type
== DE_MEDIA_AUI
) {
998 u32 next_state
= DE_MEDIA_TP
;
999 de_next_media(de
, &next_state
, 1);
1001 u32 next_state
= DE_MEDIA_AUI
;
1002 de_next_media(de
, &next_state
, 1);
1005 spin_lock_irqsave(&de
->lock
, flags
);
1007 spin_unlock_irqrestore(&de
->lock
, flags
);
1012 de
->media_timer
.expires
= jiffies
+ DE_TIMER_NO_LINK
;
1013 add_timer(&de
->media_timer
);
1015 if (netif_msg_timer(de
))
1016 printk(KERN_INFO
"%s: no link, trying media %s, status %x\n",
1017 dev
->name
, media_name
[de
->media_type
], status
);
1020 static unsigned int de_ok_to_advertise (struct de_private
*de
, u32 new_media
)
1022 switch (new_media
) {
1023 case DE_MEDIA_TP_AUTO
:
1024 if (!(de
->media_advertise
& ADVERTISED_Autoneg
))
1026 if (!(de
->media_advertise
& (ADVERTISED_10baseT_Half
| ADVERTISED_10baseT_Full
)))
1030 if (!(de
->media_advertise
& ADVERTISED_BNC
))
1034 if (!(de
->media_advertise
& ADVERTISED_AUI
))
1038 if (!(de
->media_advertise
& ADVERTISED_10baseT_Half
))
1041 case DE_MEDIA_TP_FD
:
1042 if (!(de
->media_advertise
& ADVERTISED_10baseT_Full
))
1050 static void de21041_media_timer (unsigned long data
)
1052 struct de_private
*de
= (struct de_private
*) data
;
1053 struct net_device
*dev
= de
->dev
;
1054 u32 status
= dr32(SIAStatus
);
1055 unsigned int carrier
;
1056 unsigned long flags
;
1058 carrier
= (status
& NetCxnErr
) ? 0 : 1;
1061 if ((de
->media_type
== DE_MEDIA_TP_AUTO
||
1062 de
->media_type
== DE_MEDIA_TP
||
1063 de
->media_type
== DE_MEDIA_TP_FD
) &&
1064 (status
& LinkFailStatus
))
1067 de
->media_timer
.expires
= jiffies
+ DE_TIMER_LINK
;
1068 add_timer(&de
->media_timer
);
1069 if (!netif_carrier_ok(dev
))
1072 if (netif_msg_timer(de
))
1073 printk(KERN_INFO
"%s: %s link ok, mode %x status %x\n",
1074 dev
->name
, media_name
[de
->media_type
],
1075 dr32(MacMode
), status
);
1081 /* if media type locked, don't switch media */
1085 /* if activity detected, use that as hint for new media type */
1086 if (status
& NonselPortActive
) {
1087 unsigned int have_media
= 1;
1089 /* if AUI/BNC selected, then activity is on TP port */
1090 if (de
->media_type
== DE_MEDIA_AUI
||
1091 de
->media_type
== DE_MEDIA_BNC
) {
1092 if (de_ok_to_advertise(de
, DE_MEDIA_TP_AUTO
))
1093 de
->media_type
= DE_MEDIA_TP_AUTO
;
1098 /* TP selected. If there is only TP and BNC, then it's BNC */
1099 else if (((de
->media_supported
& DE_AUI_BNC
) == SUPPORTED_BNC
) &&
1100 de_ok_to_advertise(de
, DE_MEDIA_BNC
))
1101 de
->media_type
= DE_MEDIA_BNC
;
1103 /* TP selected. If there is only TP and AUI, then it's AUI */
1104 else if (((de
->media_supported
& DE_AUI_BNC
) == SUPPORTED_AUI
) &&
1105 de_ok_to_advertise(de
, DE_MEDIA_AUI
))
1106 de
->media_type
= DE_MEDIA_AUI
;
1108 /* otherwise, ignore the hint */
1117 * Absent or ambiguous activity hint, move to next advertised
1118 * media state. If de->media_type is left unchanged, this
1119 * simply resets the PHY and reloads the current media settings.
1121 if (de
->media_type
== DE_MEDIA_AUI
) {
1122 u32 next_states
[] = { DE_MEDIA_BNC
, DE_MEDIA_TP_AUTO
};
1123 de_next_media(de
, next_states
, ARRAY_SIZE(next_states
));
1124 } else if (de
->media_type
== DE_MEDIA_BNC
) {
1125 u32 next_states
[] = { DE_MEDIA_TP_AUTO
, DE_MEDIA_AUI
};
1126 de_next_media(de
, next_states
, ARRAY_SIZE(next_states
));
1128 u32 next_states
[] = { DE_MEDIA_AUI
, DE_MEDIA_BNC
, DE_MEDIA_TP_AUTO
};
1129 de_next_media(de
, next_states
, ARRAY_SIZE(next_states
));
1133 spin_lock_irqsave(&de
->lock
, flags
);
1135 spin_unlock_irqrestore(&de
->lock
, flags
);
1140 de
->media_timer
.expires
= jiffies
+ DE_TIMER_NO_LINK
;
1141 add_timer(&de
->media_timer
);
1143 if (netif_msg_timer(de
))
1144 printk(KERN_INFO
"%s: no link, trying media %s, status %x\n",
1145 dev
->name
, media_name
[de
->media_type
], status
);
1148 static void de_media_interrupt (struct de_private
*de
, u32 status
)
1150 if (status
& LinkPass
) {
1152 mod_timer(&de
->media_timer
, jiffies
+ DE_TIMER_LINK
);
1156 if (!(status
& LinkFail
))
1159 if (netif_carrier_ok(de
->dev
)) {
1161 mod_timer(&de
->media_timer
, jiffies
+ DE_TIMER_NO_LINK
);
1165 static int de_reset_mac (struct de_private
*de
)
1170 * Reset MAC. de4x5.c and tulip.c examined for "advice"
1174 if (dr32(BusMode
) == 0xffffffff)
1177 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
1178 dw32 (BusMode
, CmdReset
);
1181 dw32 (BusMode
, de_bus_mode
);
1184 for (tmp
= 0; tmp
< 5; tmp
++) {
1191 status
= dr32(MacStatus
);
1192 if (status
& (RxState
| TxState
))
1194 if (status
== 0xffffffff)
1199 static void de_adapter_wake (struct de_private
*de
)
1206 pci_read_config_dword(de
->pdev
, PCIPM
, &pmctl
);
1207 if (pmctl
& PM_Mask
) {
1209 pci_write_config_dword(de
->pdev
, PCIPM
, pmctl
);
1211 /* de4x5.c delays, so we do too */
1216 static void de_adapter_sleep (struct de_private
*de
)
1223 pci_read_config_dword(de
->pdev
, PCIPM
, &pmctl
);
1225 pci_write_config_dword(de
->pdev
, PCIPM
, pmctl
);
1228 static int de_init_hw (struct de_private
*de
)
1230 struct net_device
*dev
= de
->dev
;
1234 de_adapter_wake(de
);
1236 macmode
= dr32(MacMode
) & ~MacModeClear
;
1238 rc
= de_reset_mac(de
);
1242 de_set_media(de
); /* reset phy */
1244 dw32(RxRingAddr
, de
->ring_dma
);
1245 dw32(TxRingAddr
, de
->ring_dma
+ (sizeof(struct de_desc
) * DE_RX_RING_SIZE
));
1247 dw32(MacMode
, RxTx
| macmode
);
1249 dr32(RxMissed
); /* self-clearing */
1251 dw32(IntrMask
, de_intr_mask
);
1253 de_set_rx_mode(dev
);
1258 static int de_refill_rx (struct de_private
*de
)
1262 for (i
= 0; i
< DE_RX_RING_SIZE
; i
++) {
1263 struct sk_buff
*skb
;
1265 skb
= dev_alloc_skb(de
->rx_buf_sz
);
1271 de
->rx_skb
[i
].mapping
= pci_map_single(de
->pdev
,
1272 skb
->data
, de
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
1273 de
->rx_skb
[i
].skb
= skb
;
1275 de
->rx_ring
[i
].opts1
= cpu_to_le32(DescOwn
);
1276 if (i
== (DE_RX_RING_SIZE
- 1))
1277 de
->rx_ring
[i
].opts2
=
1278 cpu_to_le32(RingEnd
| de
->rx_buf_sz
);
1280 de
->rx_ring
[i
].opts2
= cpu_to_le32(de
->rx_buf_sz
);
1281 de
->rx_ring
[i
].addr1
= cpu_to_le32(de
->rx_skb
[i
].mapping
);
1282 de
->rx_ring
[i
].addr2
= 0;
1292 static int de_init_rings (struct de_private
*de
)
1294 memset(de
->tx_ring
, 0, sizeof(struct de_desc
) * DE_TX_RING_SIZE
);
1295 de
->tx_ring
[DE_TX_RING_SIZE
- 1].opts2
= cpu_to_le32(RingEnd
);
1298 de
->tx_head
= de
->tx_tail
= 0;
1300 return de_refill_rx (de
);
1303 static int de_alloc_rings (struct de_private
*de
)
1305 de
->rx_ring
= pci_alloc_consistent(de
->pdev
, DE_RING_BYTES
, &de
->ring_dma
);
1308 de
->tx_ring
= &de
->rx_ring
[DE_RX_RING_SIZE
];
1309 return de_init_rings(de
);
1312 static void de_clean_rings (struct de_private
*de
)
1316 memset(de
->rx_ring
, 0, sizeof(struct de_desc
) * DE_RX_RING_SIZE
);
1317 de
->rx_ring
[DE_RX_RING_SIZE
- 1].opts2
= cpu_to_le32(RingEnd
);
1319 memset(de
->tx_ring
, 0, sizeof(struct de_desc
) * DE_TX_RING_SIZE
);
1320 de
->tx_ring
[DE_TX_RING_SIZE
- 1].opts2
= cpu_to_le32(RingEnd
);
1323 for (i
= 0; i
< DE_RX_RING_SIZE
; i
++) {
1324 if (de
->rx_skb
[i
].skb
) {
1325 pci_unmap_single(de
->pdev
, de
->rx_skb
[i
].mapping
,
1326 de
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
1327 dev_kfree_skb(de
->rx_skb
[i
].skb
);
1331 for (i
= 0; i
< DE_TX_RING_SIZE
; i
++) {
1332 struct sk_buff
*skb
= de
->tx_skb
[i
].skb
;
1333 if ((skb
) && (skb
!= DE_DUMMY_SKB
)) {
1334 if (skb
!= DE_SETUP_SKB
) {
1336 de
->net_stats
.tx_dropped
++;
1337 pci_unmap_single(de
->pdev
,
1338 de
->tx_skb
[i
].mapping
,
1339 skb
->len
, PCI_DMA_TODEVICE
);
1341 pci_unmap_single(de
->pdev
,
1342 de
->tx_skb
[i
].mapping
,
1343 sizeof(de
->setup_frame
),
1349 memset(&de
->rx_skb
, 0, sizeof(struct ring_info
) * DE_RX_RING_SIZE
);
1350 memset(&de
->tx_skb
, 0, sizeof(struct ring_info
) * DE_TX_RING_SIZE
);
1353 static void de_free_rings (struct de_private
*de
)
1356 pci_free_consistent(de
->pdev
, DE_RING_BYTES
, de
->rx_ring
, de
->ring_dma
);
1361 static int de_open (struct net_device
*dev
)
1363 struct de_private
*de
= dev
->priv
;
1365 unsigned long flags
;
1367 if (netif_msg_ifup(de
))
1368 printk(KERN_DEBUG
"%s: enabling interface\n", dev
->name
);
1370 de
->rx_buf_sz
= (dev
->mtu
<= 1500 ? PKT_BUF_SZ
: dev
->mtu
+ 32);
1372 rc
= de_alloc_rings(de
);
1374 printk(KERN_ERR
"%s: ring allocation failure, err=%d\n",
1379 rc
= de_init_hw(de
);
1381 printk(KERN_ERR
"%s: h/w init failure, err=%d\n",
1386 rc
= request_irq(dev
->irq
, de_interrupt
, SA_SHIRQ
, dev
->name
, dev
);
1388 printk(KERN_ERR
"%s: IRQ %d request failure, err=%d\n",
1389 dev
->name
, dev
->irq
, rc
);
1393 netif_start_queue(dev
);
1394 mod_timer(&de
->media_timer
, jiffies
+ DE_TIMER_NO_LINK
);
1399 spin_lock_irqsave(&de
->lock
, flags
);
1401 spin_unlock_irqrestore(&de
->lock
, flags
);
1408 static int de_close (struct net_device
*dev
)
1410 struct de_private
*de
= dev
->priv
;
1411 unsigned long flags
;
1413 if (netif_msg_ifdown(de
))
1414 printk(KERN_DEBUG
"%s: disabling interface\n", dev
->name
);
1416 del_timer_sync(&de
->media_timer
);
1418 spin_lock_irqsave(&de
->lock
, flags
);
1420 netif_stop_queue(dev
);
1421 netif_carrier_off(dev
);
1422 spin_unlock_irqrestore(&de
->lock
, flags
);
1424 free_irq(dev
->irq
, dev
);
1427 de_adapter_sleep(de
);
1428 pci_disable_device(de
->pdev
);
1432 static void de_tx_timeout (struct net_device
*dev
)
1434 struct de_private
*de
= dev
->priv
;
1436 printk(KERN_DEBUG
"%s: NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
1437 dev
->name
, dr32(MacStatus
), dr32(MacMode
), dr32(SIAStatus
),
1438 de
->rx_tail
, de
->tx_head
, de
->tx_tail
);
1440 del_timer_sync(&de
->media_timer
);
1442 disable_irq(dev
->irq
);
1443 spin_lock_irq(&de
->lock
);
1446 netif_stop_queue(dev
);
1447 netif_carrier_off(dev
);
1449 spin_unlock_irq(&de
->lock
);
1450 enable_irq(dev
->irq
);
1452 /* Update the error counts. */
1455 synchronize_irq(dev
->irq
);
1460 netif_wake_queue(dev
);
1463 static void __de_get_regs(struct de_private
*de
, u8
*buf
)
1466 u32
*rbuf
= (u32
*)buf
;
1469 for (i
= 0; i
< DE_NUM_REGS
; i
++)
1470 rbuf
[i
] = dr32(i
* 8);
1472 /* handle self-clearing RxMissed counter, CSR8 */
1473 de_rx_missed(de
, rbuf
[8]);
1476 static int __de_get_settings(struct de_private
*de
, struct ethtool_cmd
*ecmd
)
1478 ecmd
->supported
= de
->media_supported
;
1479 ecmd
->transceiver
= XCVR_INTERNAL
;
1480 ecmd
->phy_address
= 0;
1481 ecmd
->advertising
= de
->media_advertise
;
1483 switch (de
->media_type
) {
1485 ecmd
->port
= PORT_AUI
;
1489 ecmd
->port
= PORT_BNC
;
1493 ecmd
->port
= PORT_TP
;
1494 ecmd
->speed
= SPEED_10
;
1498 if (dr32(MacMode
) & FullDuplex
)
1499 ecmd
->duplex
= DUPLEX_FULL
;
1501 ecmd
->duplex
= DUPLEX_HALF
;
1504 ecmd
->autoneg
= AUTONEG_DISABLE
;
1506 ecmd
->autoneg
= AUTONEG_ENABLE
;
1508 /* ignore maxtxpkt, maxrxpkt for now */
1513 static int __de_set_settings(struct de_private
*de
, struct ethtool_cmd
*ecmd
)
1516 unsigned int media_lock
;
1518 if (ecmd
->speed
!= SPEED_10
&& ecmd
->speed
!= 5 && ecmd
->speed
!= 2)
1520 if (de
->de21040
&& ecmd
->speed
== 2)
1522 if (ecmd
->duplex
!= DUPLEX_HALF
&& ecmd
->duplex
!= DUPLEX_FULL
)
1524 if (ecmd
->port
!= PORT_TP
&& ecmd
->port
!= PORT_AUI
&& ecmd
->port
!= PORT_BNC
)
1526 if (de
->de21040
&& ecmd
->port
== PORT_BNC
)
1528 if (ecmd
->transceiver
!= XCVR_INTERNAL
)
1530 if (ecmd
->autoneg
!= AUTONEG_DISABLE
&& ecmd
->autoneg
!= AUTONEG_ENABLE
)
1532 if (ecmd
->advertising
& ~de
->media_supported
)
1534 if (ecmd
->autoneg
== AUTONEG_ENABLE
&&
1535 (!(ecmd
->advertising
& ADVERTISED_Autoneg
)))
1538 switch (ecmd
->port
) {
1540 new_media
= DE_MEDIA_AUI
;
1541 if (!(ecmd
->advertising
& ADVERTISED_AUI
))
1545 new_media
= DE_MEDIA_BNC
;
1546 if (!(ecmd
->advertising
& ADVERTISED_BNC
))
1550 if (ecmd
->autoneg
== AUTONEG_ENABLE
)
1551 new_media
= DE_MEDIA_TP_AUTO
;
1552 else if (ecmd
->duplex
== DUPLEX_FULL
)
1553 new_media
= DE_MEDIA_TP_FD
;
1555 new_media
= DE_MEDIA_TP
;
1556 if (!(ecmd
->advertising
& ADVERTISED_TP
))
1558 if (!(ecmd
->advertising
& (ADVERTISED_10baseT_Full
| ADVERTISED_10baseT_Half
)))
1563 media_lock
= (ecmd
->autoneg
== AUTONEG_ENABLE
) ? 0 : 1;
1565 if ((new_media
== de
->media_type
) &&
1566 (media_lock
== de
->media_lock
) &&
1567 (ecmd
->advertising
== de
->media_advertise
))
1568 return 0; /* nothing to change */
1573 de
->media_type
= new_media
;
1574 de
->media_lock
= media_lock
;
1575 de
->media_advertise
= ecmd
->advertising
;
1581 static void de_get_drvinfo (struct net_device
*dev
,struct ethtool_drvinfo
*info
)
1583 struct de_private
*de
= dev
->priv
;
1585 strcpy (info
->driver
, DRV_NAME
);
1586 strcpy (info
->version
, DRV_VERSION
);
1587 strcpy (info
->bus_info
, pci_name(de
->pdev
));
1588 info
->eedump_len
= DE_EEPROM_SIZE
;
1591 static int de_get_regs_len(struct net_device
*dev
)
1593 return DE_REGS_SIZE
;
1596 static int de_get_settings(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
1598 struct de_private
*de
= dev
->priv
;
1601 spin_lock_irq(&de
->lock
);
1602 rc
= __de_get_settings(de
, ecmd
);
1603 spin_unlock_irq(&de
->lock
);
1608 static int de_set_settings(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
1610 struct de_private
*de
= dev
->priv
;
1613 spin_lock_irq(&de
->lock
);
1614 rc
= __de_set_settings(de
, ecmd
);
1615 spin_unlock_irq(&de
->lock
);
1620 static u32
de_get_msglevel(struct net_device
*dev
)
1622 struct de_private
*de
= dev
->priv
;
1624 return de
->msg_enable
;
1627 static void de_set_msglevel(struct net_device
*dev
, u32 msglvl
)
1629 struct de_private
*de
= dev
->priv
;
1631 de
->msg_enable
= msglvl
;
1634 static int de_get_eeprom(struct net_device
*dev
,
1635 struct ethtool_eeprom
*eeprom
, u8
*data
)
1637 struct de_private
*de
= dev
->priv
;
1641 if ((eeprom
->offset
!= 0) || (eeprom
->magic
!= 0) ||
1642 (eeprom
->len
!= DE_EEPROM_SIZE
))
1644 memcpy(data
, de
->ee_data
, eeprom
->len
);
1649 static int de_nway_reset(struct net_device
*dev
)
1651 struct de_private
*de
= dev
->priv
;
1654 if (de
->media_type
!= DE_MEDIA_TP_AUTO
)
1656 if (netif_carrier_ok(de
->dev
))
1659 status
= dr32(SIAStatus
);
1660 dw32(SIAStatus
, (status
& ~NWayState
) | NWayRestart
);
1661 if (netif_msg_link(de
))
1662 printk(KERN_INFO
"%s: link nway restart, status %x,%x\n",
1663 de
->dev
->name
, status
, dr32(SIAStatus
));
1667 static void de_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
,
1670 struct de_private
*de
= dev
->priv
;
1672 regs
->version
= (DE_REGS_VER
<< 2) | de
->de21040
;
1674 spin_lock_irq(&de
->lock
);
1675 __de_get_regs(de
, data
);
1676 spin_unlock_irq(&de
->lock
);
1679 static struct ethtool_ops de_ethtool_ops
= {
1680 .get_link
= ethtool_op_get_link
,
1681 .get_tx_csum
= ethtool_op_get_tx_csum
,
1682 .get_sg
= ethtool_op_get_sg
,
1683 .get_drvinfo
= de_get_drvinfo
,
1684 .get_regs_len
= de_get_regs_len
,
1685 .get_settings
= de_get_settings
,
1686 .set_settings
= de_set_settings
,
1687 .get_msglevel
= de_get_msglevel
,
1688 .set_msglevel
= de_set_msglevel
,
1689 .get_eeprom
= de_get_eeprom
,
1690 .nway_reset
= de_nway_reset
,
1691 .get_regs
= de_get_regs
,
1694 static void __init
de21040_get_mac_address (struct de_private
*de
)
1698 dw32 (ROMCmd
, 0); /* Reset the pointer with a dummy write. */
1700 for (i
= 0; i
< 6; i
++) {
1701 int value
, boguscnt
= 100000;
1703 value
= dr32(ROMCmd
);
1704 while (value
< 0 && --boguscnt
> 0);
1705 de
->dev
->dev_addr
[i
] = value
;
1708 printk(KERN_WARNING PFX
"timeout reading 21040 MAC address byte %u\n", i
);
1712 static void __init
de21040_get_media_info(struct de_private
*de
)
1716 de
->media_type
= DE_MEDIA_TP
;
1717 de
->media_supported
|= SUPPORTED_TP
| SUPPORTED_10baseT_Full
|
1718 SUPPORTED_10baseT_Half
| SUPPORTED_AUI
;
1719 de
->media_advertise
= de
->media_supported
;
1721 for (i
= 0; i
< DE_MAX_MEDIA
; i
++) {
1725 case DE_MEDIA_TP_FD
:
1726 de
->media
[i
].type
= i
;
1727 de
->media
[i
].csr13
= t21040_csr13
[i
];
1728 de
->media
[i
].csr14
= t21040_csr14
[i
];
1729 de
->media
[i
].csr15
= t21040_csr15
[i
];
1732 de
->media
[i
].type
= DE_MEDIA_INVALID
;
1738 /* Note: this routine returns extra data bits for size detection. */
1739 static unsigned __init
tulip_read_eeprom(void __iomem
*regs
, int location
, int addr_len
)
1742 unsigned retval
= 0;
1743 void __iomem
*ee_addr
= regs
+ ROMCmd
;
1744 int read_cmd
= location
| (EE_READ_CMD
<< addr_len
);
1746 writel(EE_ENB
& ~EE_CS
, ee_addr
);
1747 writel(EE_ENB
, ee_addr
);
1749 /* Shift the read command bits out. */
1750 for (i
= 4 + addr_len
; i
>= 0; i
--) {
1751 short dataval
= (read_cmd
& (1 << i
)) ? EE_DATA_WRITE
: 0;
1752 writel(EE_ENB
| dataval
, ee_addr
);
1754 writel(EE_ENB
| dataval
| EE_SHIFT_CLK
, ee_addr
);
1756 retval
= (retval
<< 1) | ((readl(ee_addr
) & EE_DATA_READ
) ? 1 : 0);
1758 writel(EE_ENB
, ee_addr
);
1761 for (i
= 16; i
> 0; i
--) {
1762 writel(EE_ENB
| EE_SHIFT_CLK
, ee_addr
);
1764 retval
= (retval
<< 1) | ((readl(ee_addr
) & EE_DATA_READ
) ? 1 : 0);
1765 writel(EE_ENB
, ee_addr
);
1769 /* Terminate the EEPROM access. */
1770 writel(EE_ENB
& ~EE_CS
, ee_addr
);
1774 static void __init
de21041_get_srom_info (struct de_private
*de
)
1776 unsigned i
, sa_offset
= 0, ofs
;
1777 u8 ee_data
[DE_EEPROM_SIZE
+ 6] = {};
1778 unsigned ee_addr_size
= tulip_read_eeprom(de
->regs
, 0xff, 8) & 0x40000 ? 8 : 6;
1779 struct de_srom_info_leaf
*il
;
1782 /* download entire eeprom */
1783 for (i
= 0; i
< DE_EEPROM_WORDS
; i
++)
1784 ((u16
*)ee_data
)[i
] =
1785 le16_to_cpu(tulip_read_eeprom(de
->regs
, i
, ee_addr_size
));
1787 /* DEC now has a specification but early board makers
1788 just put the address in the first EEPROM locations. */
1789 /* This does memcmp(eedata, eedata+16, 8) */
1791 #ifndef CONFIG_MIPS_COBALT
1793 for (i
= 0; i
< 8; i
++)
1794 if (ee_data
[i
] != ee_data
[16+i
])
1799 /* store MAC address */
1800 for (i
= 0; i
< 6; i
++)
1801 de
->dev
->dev_addr
[i
] = ee_data
[i
+ sa_offset
];
1803 /* get offset of controller 0 info leaf. ignore 2nd byte. */
1804 ofs
= ee_data
[SROMC0InfoLeaf
];
1805 if (ofs
>= (sizeof(ee_data
) - sizeof(struct de_srom_info_leaf
) - sizeof(struct de_srom_media_block
)))
1808 /* get pointer to info leaf */
1809 il
= (struct de_srom_info_leaf
*) &ee_data
[ofs
];
1811 /* paranoia checks */
1812 if (il
->n_blocks
== 0)
1814 if ((sizeof(ee_data
) - ofs
) <
1815 (sizeof(struct de_srom_info_leaf
) + (sizeof(struct de_srom_media_block
) * il
->n_blocks
)))
1818 /* get default media type */
1819 switch (DE_UNALIGNED_16(&il
->default_media
)) {
1820 case 0x0001: de
->media_type
= DE_MEDIA_BNC
; break;
1821 case 0x0002: de
->media_type
= DE_MEDIA_AUI
; break;
1822 case 0x0204: de
->media_type
= DE_MEDIA_TP_FD
; break;
1823 default: de
->media_type
= DE_MEDIA_TP_AUTO
; break;
1826 if (netif_msg_probe(de
))
1827 printk(KERN_INFO
"de%d: SROM leaf offset %u, default media %s\n",
1829 media_name
[de
->media_type
]);
1831 /* init SIA register values to defaults */
1832 for (i
= 0; i
< DE_MAX_MEDIA
; i
++) {
1833 de
->media
[i
].type
= DE_MEDIA_INVALID
;
1834 de
->media
[i
].csr13
= 0xffff;
1835 de
->media
[i
].csr14
= 0xffff;
1836 de
->media
[i
].csr15
= 0xffff;
1839 /* parse media blocks to see what medias are supported,
1840 * and if any custom CSR values are provided
1842 bufp
= ((void *)il
) + sizeof(*il
);
1843 for (i
= 0; i
< il
->n_blocks
; i
++) {
1844 struct de_srom_media_block
*ib
= bufp
;
1847 /* index based on media type in media block */
1848 switch(ib
->opts
& MediaBlockMask
) {
1849 case 0: /* 10baseT */
1850 de
->media_supported
|= SUPPORTED_TP
| SUPPORTED_10baseT_Half
1851 | SUPPORTED_Autoneg
;
1853 de
->media
[DE_MEDIA_TP_AUTO
].type
= DE_MEDIA_TP_AUTO
;
1856 de
->media_supported
|= SUPPORTED_BNC
;
1860 de
->media_supported
|= SUPPORTED_AUI
;
1863 case 4: /* 10baseT-FD */
1864 de
->media_supported
|= SUPPORTED_TP
| SUPPORTED_10baseT_Full
1865 | SUPPORTED_Autoneg
;
1866 idx
= DE_MEDIA_TP_FD
;
1867 de
->media
[DE_MEDIA_TP_AUTO
].type
= DE_MEDIA_TP_AUTO
;
1873 de
->media
[idx
].type
= idx
;
1875 if (netif_msg_probe(de
))
1876 printk(KERN_INFO
"de%d: media block #%u: %s",
1878 media_name
[de
->media
[idx
].type
]);
1880 bufp
+= sizeof (ib
->opts
);
1882 if (ib
->opts
& MediaCustomCSRs
) {
1883 de
->media
[idx
].csr13
= DE_UNALIGNED_16(&ib
->csr13
);
1884 de
->media
[idx
].csr14
= DE_UNALIGNED_16(&ib
->csr14
);
1885 de
->media
[idx
].csr15
= DE_UNALIGNED_16(&ib
->csr15
);
1886 bufp
+= sizeof(ib
->csr13
) + sizeof(ib
->csr14
) +
1889 if (netif_msg_probe(de
))
1890 printk(" (%x,%x,%x)\n",
1891 de
->media
[idx
].csr13
,
1892 de
->media
[idx
].csr14
,
1893 de
->media
[idx
].csr15
);
1895 } else if (netif_msg_probe(de
))
1898 if (bufp
> ((void *)&ee_data
[DE_EEPROM_SIZE
- 3]))
1902 de
->media_advertise
= de
->media_supported
;
1905 /* fill in defaults, for cases where custom CSRs not used */
1906 for (i
= 0; i
< DE_MAX_MEDIA
; i
++) {
1907 if (de
->media
[i
].csr13
== 0xffff)
1908 de
->media
[i
].csr13
= t21041_csr13
[i
];
1909 if (de
->media
[i
].csr14
== 0xffff)
1910 de
->media
[i
].csr14
= t21041_csr14
[i
];
1911 if (de
->media
[i
].csr15
== 0xffff)
1912 de
->media
[i
].csr15
= t21041_csr15
[i
];
1915 de
->ee_data
= kmalloc(DE_EEPROM_SIZE
, GFP_KERNEL
);
1917 memcpy(de
->ee_data
, &ee_data
[0], DE_EEPROM_SIZE
);
1922 /* for error cases, it's ok to assume we support all these */
1923 for (i
= 0; i
< DE_MAX_MEDIA
; i
++)
1924 de
->media
[i
].type
= i
;
1925 de
->media_supported
=
1926 SUPPORTED_10baseT_Half
|
1927 SUPPORTED_10baseT_Full
|
1935 static int __init
de_init_one (struct pci_dev
*pdev
,
1936 const struct pci_device_id
*ent
)
1938 struct net_device
*dev
;
1939 struct de_private
*de
;
1942 unsigned long pciaddr
;
1943 static int board_idx
= -1;
1949 printk("%s", version
);
1952 /* allocate a new ethernet device structure, and fill in defaults */
1953 dev
= alloc_etherdev(sizeof(struct de_private
));
1957 SET_MODULE_OWNER(dev
);
1958 SET_NETDEV_DEV(dev
, &pdev
->dev
);
1959 dev
->open
= de_open
;
1960 dev
->stop
= de_close
;
1961 dev
->set_multicast_list
= de_set_rx_mode
;
1962 dev
->hard_start_xmit
= de_start_xmit
;
1963 dev
->get_stats
= de_get_stats
;
1964 dev
->ethtool_ops
= &de_ethtool_ops
;
1965 dev
->tx_timeout
= de_tx_timeout
;
1966 dev
->watchdog_timeo
= TX_TIMEOUT
;
1969 de
->de21040
= ent
->driver_data
== 0 ? 1 : 0;
1972 de
->msg_enable
= (debug
< 0 ? DE_DEF_MSG_ENABLE
: debug
);
1973 de
->board_idx
= board_idx
;
1974 spin_lock_init (&de
->lock
);
1975 init_timer(&de
->media_timer
);
1977 de
->media_timer
.function
= de21040_media_timer
;
1979 de
->media_timer
.function
= de21041_media_timer
;
1980 de
->media_timer
.data
= (unsigned long) de
;
1982 netif_carrier_off(dev
);
1983 netif_stop_queue(dev
);
1985 /* wake up device, assign resources */
1986 rc
= pci_enable_device(pdev
);
1990 /* reserve PCI resources to ensure driver atomicity */
1991 rc
= pci_request_regions(pdev
, DRV_NAME
);
1993 goto err_out_disable
;
1995 /* check for invalid IRQ value */
1996 if (pdev
->irq
< 2) {
1998 printk(KERN_ERR PFX
"invalid irq (%d) for pci dev %s\n",
1999 pdev
->irq
, pci_name(pdev
));
2003 dev
->irq
= pdev
->irq
;
2005 /* obtain and check validity of PCI I/O address */
2006 pciaddr
= pci_resource_start(pdev
, 1);
2009 printk(KERN_ERR PFX
"no MMIO resource for pci dev %s\n",
2013 if (pci_resource_len(pdev
, 1) < DE_REGS_SIZE
) {
2015 printk(KERN_ERR PFX
"MMIO resource (%lx) too small on pci dev %s\n",
2016 pci_resource_len(pdev
, 1), pci_name(pdev
));
2020 /* remap CSR registers */
2021 regs
= ioremap_nocache(pciaddr
, DE_REGS_SIZE
);
2024 printk(KERN_ERR PFX
"Cannot map PCI MMIO (%lx@%lx) on pci dev %s\n",
2025 pci_resource_len(pdev
, 1), pciaddr
, pci_name(pdev
));
2028 dev
->base_addr
= (unsigned long) regs
;
2031 de_adapter_wake(de
);
2033 /* make sure hardware is not running */
2034 rc
= de_reset_mac(de
);
2036 printk(KERN_ERR PFX
"Cannot reset MAC, pci dev %s\n",
2041 /* get MAC address, initialize default media type and
2042 * get list of supported media
2045 de21040_get_mac_address(de
);
2046 de21040_get_media_info(de
);
2048 de21041_get_srom_info(de
);
2051 /* register new network interface with kernel */
2052 rc
= register_netdev(dev
);
2056 /* print info about board and interface just registered */
2057 printk (KERN_INFO
"%s: %s at 0x%lx, "
2058 "%02x:%02x:%02x:%02x:%02x:%02x, "
2061 de
->de21040
? "21040" : "21041",
2063 dev
->dev_addr
[0], dev
->dev_addr
[1],
2064 dev
->dev_addr
[2], dev
->dev_addr
[3],
2065 dev
->dev_addr
[4], dev
->dev_addr
[5],
2068 pci_set_drvdata(pdev
, dev
);
2070 /* enable busmastering */
2071 pci_set_master(pdev
);
2073 /* put adapter to sleep */
2074 de_adapter_sleep(de
);
2083 pci_release_regions(pdev
);
2085 pci_disable_device(pdev
);
2091 static void __exit
de_remove_one (struct pci_dev
*pdev
)
2093 struct net_device
*dev
= pci_get_drvdata(pdev
);
2094 struct de_private
*de
= dev
->priv
;
2098 unregister_netdev(dev
);
2102 pci_release_regions(pdev
);
2103 pci_disable_device(pdev
);
2104 pci_set_drvdata(pdev
, NULL
);
2110 static int de_suspend (struct pci_dev
*pdev
, pm_message_t state
)
2112 struct net_device
*dev
= pci_get_drvdata (pdev
);
2113 struct de_private
*de
= dev
->priv
;
2116 if (netif_running (dev
)) {
2117 del_timer_sync(&de
->media_timer
);
2119 disable_irq(dev
->irq
);
2120 spin_lock_irq(&de
->lock
);
2123 netif_stop_queue(dev
);
2124 netif_device_detach(dev
);
2125 netif_carrier_off(dev
);
2127 spin_unlock_irq(&de
->lock
);
2128 enable_irq(dev
->irq
);
2130 /* Update the error counts. */
2133 synchronize_irq(dev
->irq
);
2136 de_adapter_sleep(de
);
2137 pci_disable_device(pdev
);
2139 netif_device_detach(dev
);
2145 static int de_resume (struct pci_dev
*pdev
)
2147 struct net_device
*dev
= pci_get_drvdata (pdev
);
2148 struct de_private
*de
= dev
->priv
;
2151 if (netif_device_present(dev
))
2153 if (netif_running(dev
)) {
2154 pci_enable_device(pdev
);
2156 netif_device_attach(dev
);
2158 netif_device_attach(dev
);
2165 #endif /* CONFIG_PM */
2167 static struct pci_driver de_driver
= {
2169 .id_table
= de_pci_tbl
,
2170 .probe
= de_init_one
,
2171 .remove
= __exit_p(de_remove_one
),
2173 .suspend
= de_suspend
,
2174 .resume
= de_resume
,
2178 static int __init
de_init (void)
2181 printk("%s", version
);
2183 return pci_module_init (&de_driver
);
2186 static void __exit
de_exit (void)
2188 pci_unregister_driver (&de_driver
);
2191 module_init(de_init
);
2192 module_exit(de_exit
);