2 * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
3 * All rights reserved. www.lanmedia.com
4 * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
6 * This code is written by:
7 * Andrew Stanley-Jones (asj@cban.com)
8 * Rob Braun (bbraun@vix.com),
9 * Michael Graff (explorer@vix.com) and
10 * Matt Thomas (matt@3am-software.com).
17 * This software may be used and distributed according to the terms
18 * of the GNU General Public License version 2, incorporated herein by reference.
20 * Driver for the LanMedia LMC5200, LMC5245, LMC1000, LMC1200 cards.
22 * To control link specific options lmcctl is required.
23 * It can be obtained from ftp.lanmedia.com.
26 * Linux uses the device struct lmc_private to pass private information
29 * The initialization portion of this driver (the lmc_reset() and the
30 * lmc_dec_reset() functions, as well as the led controls and the
31 * lmc_initcsrs() functions.
33 * The watchdog function runs every second and checks to see if
34 * we still have link, and that the timing source is what we expected
35 * it to be. If link is lost, the interface is marked down, and
36 * we no longer can transmit.
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/string.h>
43 #include <linux/timer.h>
44 #include <linux/ptrace.h>
45 #include <linux/errno.h>
46 #include <linux/ioport.h>
47 #include <linux/slab.h>
48 #include <linux/interrupt.h>
49 #include <linux/pci.h>
50 #include <linux/delay.h>
51 #include <linux/hdlc.h>
52 #include <linux/init.h>
54 #include <linux/if_arp.h>
55 #include <linux/netdevice.h>
56 #include <linux/etherdevice.h>
57 #include <linux/skbuff.h>
58 #include <linux/inet.h>
59 #include <linux/bitops.h>
60 #include <asm/processor.h> /* Processor type for cache alignment. */
63 #include <asm/uaccess.h>
64 //#include <asm/spinlock.h>
66 #define DRIVER_MAJOR_VERSION 1
67 #define DRIVER_MINOR_VERSION 34
68 #define DRIVER_SUB_VERSION 0
70 #define DRIVER_VERSION ((DRIVER_MAJOR_VERSION << 8) + DRIVER_MINOR_VERSION)
74 #include "lmc_ioctl.h"
75 #include "lmc_debug.h"
76 #include "lmc_proto.h"
78 static int LMC_PKT_BUF_SZ
= 1542;
80 static DEFINE_PCI_DEVICE_TABLE(lmc_pci_tbl
) = {
81 { PCI_VENDOR_ID_DEC
, PCI_DEVICE_ID_DEC_TULIP_FAST
,
82 PCI_VENDOR_ID_LMC
, PCI_ANY_ID
},
83 { PCI_VENDOR_ID_DEC
, PCI_DEVICE_ID_DEC_TULIP_FAST
,
84 PCI_ANY_ID
, PCI_VENDOR_ID_LMC
},
88 MODULE_DEVICE_TABLE(pci
, lmc_pci_tbl
);
89 MODULE_LICENSE("GPL v2");
92 static netdev_tx_t
lmc_start_xmit(struct sk_buff
*skb
,
93 struct net_device
*dev
);
94 static int lmc_rx (struct net_device
*dev
);
95 static int lmc_open(struct net_device
*dev
);
96 static int lmc_close(struct net_device
*dev
);
97 static struct net_device_stats
*lmc_get_stats(struct net_device
*dev
);
98 static irqreturn_t
lmc_interrupt(int irq
, void *dev_instance
);
99 static void lmc_initcsrs(lmc_softc_t
* const sc
, lmc_csrptr_t csr_base
, size_t csr_size
);
100 static void lmc_softreset(lmc_softc_t
* const);
101 static void lmc_running_reset(struct net_device
*dev
);
102 static int lmc_ifdown(struct net_device
* const);
103 static void lmc_watchdog(unsigned long data
);
104 static void lmc_reset(lmc_softc_t
* const sc
);
105 static void lmc_dec_reset(lmc_softc_t
* const sc
);
106 static void lmc_driver_timeout(struct net_device
*dev
);
109 * linux reserves 16 device specific IOCTLs. We call them
110 * LMCIOC* to control various bits of our world.
112 int lmc_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
) /*fold00*/
114 lmc_softc_t
*sc
= dev_to_sc(dev
);
116 int ret
= -EOPNOTSUPP
;
120 lmc_trace(dev
, "lmc_ioctl in");
123 * Most functions mess with the structure
124 * Disable interrupts while we do the polling
129 * Return current driver state. Since we keep this up
130 * To date internally, just copy this out to the user.
132 case LMCIOCGINFO
: /*fold01*/
133 if (copy_to_user(ifr
->ifr_data
, &sc
->ictl
, sizeof(lmc_ctl_t
)))
139 case LMCIOCSINFO
: /*fold01*/
140 if (!capable(CAP_NET_ADMIN
)) {
145 if(dev
->flags
& IFF_UP
){
150 if (copy_from_user(&ctl
, ifr
->ifr_data
, sizeof(lmc_ctl_t
))) {
155 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
156 sc
->lmc_media
->set_status (sc
, &ctl
);
158 if(ctl
.crc_length
!= sc
->ictl
.crc_length
) {
159 sc
->lmc_media
->set_crc_length(sc
, ctl
.crc_length
);
160 if (sc
->ictl
.crc_length
== LMC_CTL_CRC_LENGTH_16
)
161 sc
->TxDescriptControlInit
|= LMC_TDES_ADD_CRC_DISABLE
;
163 sc
->TxDescriptControlInit
&= ~LMC_TDES_ADD_CRC_DISABLE
;
165 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
170 case LMCIOCIFTYPE
: /*fold01*/
172 u16 old_type
= sc
->if_type
;
175 if (!capable(CAP_NET_ADMIN
)) {
180 if (copy_from_user(&new_type
, ifr
->ifr_data
, sizeof(u16
))) {
186 if (new_type
== old_type
)
189 break; /* no change */
192 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
195 sc
->if_type
= new_type
;
196 lmc_proto_attach(sc
);
197 ret
= lmc_proto_open(sc
);
198 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
202 case LMCIOCGETXINFO
: /*fold01*/
203 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
204 sc
->lmc_xinfo
.Magic0
= 0xBEEFCAFE;
206 sc
->lmc_xinfo
.PciCardType
= sc
->lmc_cardtype
;
207 sc
->lmc_xinfo
.PciSlotNumber
= 0;
208 sc
->lmc_xinfo
.DriverMajorVersion
= DRIVER_MAJOR_VERSION
;
209 sc
->lmc_xinfo
.DriverMinorVersion
= DRIVER_MINOR_VERSION
;
210 sc
->lmc_xinfo
.DriverSubVersion
= DRIVER_SUB_VERSION
;
211 sc
->lmc_xinfo
.XilinxRevisionNumber
=
212 lmc_mii_readreg (sc
, 0, 3) & 0xf;
213 sc
->lmc_xinfo
.MaxFrameSize
= LMC_PKT_BUF_SZ
;
214 sc
->lmc_xinfo
.link_status
= sc
->lmc_media
->get_link_status (sc
);
215 sc
->lmc_xinfo
.mii_reg16
= lmc_mii_readreg (sc
, 0, 16);
216 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
218 sc
->lmc_xinfo
.Magic1
= 0xDEADBEEF;
220 if (copy_to_user(ifr
->ifr_data
, &sc
->lmc_xinfo
,
221 sizeof(struct lmc_xinfo
)))
228 case LMCIOCGETLMCSTATS
:
229 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
230 if (sc
->lmc_cardtype
== LMC_CARDTYPE_T1
) {
231 lmc_mii_writereg(sc
, 0, 17, T1FRAMER_FERR_LSB
);
232 sc
->extra_stats
.framingBitErrorCount
+=
233 lmc_mii_readreg(sc
, 0, 18) & 0xff;
234 lmc_mii_writereg(sc
, 0, 17, T1FRAMER_FERR_MSB
);
235 sc
->extra_stats
.framingBitErrorCount
+=
236 (lmc_mii_readreg(sc
, 0, 18) & 0xff) << 8;
237 lmc_mii_writereg(sc
, 0, 17, T1FRAMER_LCV_LSB
);
238 sc
->extra_stats
.lineCodeViolationCount
+=
239 lmc_mii_readreg(sc
, 0, 18) & 0xff;
240 lmc_mii_writereg(sc
, 0, 17, T1FRAMER_LCV_MSB
);
241 sc
->extra_stats
.lineCodeViolationCount
+=
242 (lmc_mii_readreg(sc
, 0, 18) & 0xff) << 8;
243 lmc_mii_writereg(sc
, 0, 17, T1FRAMER_AERR
);
244 regVal
= lmc_mii_readreg(sc
, 0, 18) & 0xff;
246 sc
->extra_stats
.lossOfFrameCount
+=
247 (regVal
& T1FRAMER_LOF_MASK
) >> 4;
248 sc
->extra_stats
.changeOfFrameAlignmentCount
+=
249 (regVal
& T1FRAMER_COFA_MASK
) >> 2;
250 sc
->extra_stats
.severelyErroredFrameCount
+=
251 regVal
& T1FRAMER_SEF_MASK
;
253 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
254 if (copy_to_user(ifr
->ifr_data
, &sc
->lmc_device
->stats
,
255 sizeof(sc
->lmc_device
->stats
)) ||
256 copy_to_user(ifr
->ifr_data
+ sizeof(sc
->lmc_device
->stats
),
257 &sc
->extra_stats
, sizeof(sc
->extra_stats
)))
263 case LMCIOCCLEARLMCSTATS
:
264 if (!capable(CAP_NET_ADMIN
)) {
269 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
270 memset(&sc
->lmc_device
->stats
, 0, sizeof(sc
->lmc_device
->stats
));
271 memset(&sc
->extra_stats
, 0, sizeof(sc
->extra_stats
));
272 sc
->extra_stats
.check
= STATCHECK
;
273 sc
->extra_stats
.version_size
= (DRIVER_VERSION
<< 16) +
274 sizeof(sc
->lmc_device
->stats
) + sizeof(sc
->extra_stats
);
275 sc
->extra_stats
.lmc_cardtype
= sc
->lmc_cardtype
;
276 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
280 case LMCIOCSETCIRCUIT
: /*fold01*/
281 if (!capable(CAP_NET_ADMIN
)){
286 if(dev
->flags
& IFF_UP
){
291 if (copy_from_user(&ctl
, ifr
->ifr_data
, sizeof(lmc_ctl_t
))) {
295 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
296 sc
->lmc_media
->set_circuit_type(sc
, ctl
.circuit_type
);
297 sc
->ictl
.circuit_type
= ctl
.circuit_type
;
298 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
303 case LMCIOCRESET
: /*fold01*/
304 if (!capable(CAP_NET_ADMIN
)){
309 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
310 /* Reset driver and bring back to current state */
311 printk (" REG16 before reset +%04x\n", lmc_mii_readreg (sc
, 0, 16));
312 lmc_running_reset (dev
);
313 printk (" REG16 after reset +%04x\n", lmc_mii_readreg (sc
, 0, 16));
315 LMC_EVENT_LOG(LMC_EVENT_FORCEDRESET
, LMC_CSR_READ (sc
, csr_status
), lmc_mii_readreg (sc
, 0, 16));
316 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
322 case LMCIOCDUMPEVENTLOG
:
323 if (copy_to_user(ifr
->ifr_data
, &lmcEventLogIndex
, sizeof(u32
))) {
327 if (copy_to_user(ifr
->ifr_data
+ sizeof(u32
), lmcEventLogBuf
,
328 sizeof(lmcEventLogBuf
)))
334 #endif /* end ifdef _DBG_EVENTLOG */
335 case LMCIOCT1CONTROL
: /*fold01*/
336 if (sc
->lmc_cardtype
!= LMC_CARDTYPE_T1
){
341 case LMCIOCXILINX
: /*fold01*/
343 struct lmc_xilinx_control xc
; /*fold02*/
345 if (!capable(CAP_NET_ADMIN
)){
351 * Stop the xwitter whlie we restart the hardware
353 netif_stop_queue(dev
);
355 if (copy_from_user(&xc
, ifr
->ifr_data
, sizeof(struct lmc_xilinx_control
))) {
360 case lmc_xilinx_reset
: /*fold02*/
363 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
364 mii
= lmc_mii_readreg (sc
, 0, 16);
367 * Make all of them 0 and make input
369 lmc_gpio_mkinput(sc
, 0xff);
372 * make the reset output
374 lmc_gpio_mkoutput(sc
, LMC_GEP_RESET
);
377 * RESET low to force configuration. This also forces
378 * the transmitter clock to be internal, but we expect to reset
382 sc
->lmc_gpio
&= ~LMC_GEP_RESET
;
383 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
387 * hold for more than 10 microseconds
391 sc
->lmc_gpio
|= LMC_GEP_RESET
;
392 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
396 * stop driving Xilinx-related signals
398 lmc_gpio_mkinput(sc
, 0xff);
400 /* Reset the frammer hardware */
401 sc
->lmc_media
->set_link_status (sc
, 1);
402 sc
->lmc_media
->set_status (sc
, NULL
);
403 // lmc_softreset(sc);
407 for(i
= 0; i
< 5; i
++){
408 lmc_led_on(sc
, LMC_DS3_LED0
);
410 lmc_led_off(sc
, LMC_DS3_LED0
);
411 lmc_led_on(sc
, LMC_DS3_LED1
);
413 lmc_led_off(sc
, LMC_DS3_LED1
);
414 lmc_led_on(sc
, LMC_DS3_LED3
);
416 lmc_led_off(sc
, LMC_DS3_LED3
);
417 lmc_led_on(sc
, LMC_DS3_LED2
);
419 lmc_led_off(sc
, LMC_DS3_LED2
);
422 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
431 case lmc_xilinx_load_prom
: /*fold02*/
434 int timeout
= 500000;
435 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
436 mii
= lmc_mii_readreg (sc
, 0, 16);
439 * Make all of them 0 and make input
441 lmc_gpio_mkinput(sc
, 0xff);
444 * make the reset output
446 lmc_gpio_mkoutput(sc
, LMC_GEP_DP
| LMC_GEP_RESET
);
449 * RESET low to force configuration. This also forces
450 * the transmitter clock to be internal, but we expect to reset
454 sc
->lmc_gpio
&= ~(LMC_GEP_RESET
| LMC_GEP_DP
);
455 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
459 * hold for more than 10 microseconds
463 sc
->lmc_gpio
|= LMC_GEP_DP
| LMC_GEP_RESET
;
464 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
467 * busy wait for the chip to reset
469 while( (LMC_CSR_READ(sc
, csr_gp
) & LMC_GEP_INIT
) == 0 &&
475 * stop driving Xilinx-related signals
477 lmc_gpio_mkinput(sc
, 0xff);
478 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
487 case lmc_xilinx_load
: /*fold02*/
491 int timeout
= 500000;
498 data
= kmalloc(xc
.len
, GFP_KERNEL
);
504 if(copy_from_user(data
, xc
.data
, xc
.len
))
511 printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev
->name
, xc
.len
, xc
.data
, data
);
513 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
514 lmc_gpio_mkinput(sc
, 0xff);
517 * Clear the Xilinx and start prgramming from the DEC
528 sc
->lmc_gpio
&= ~LMC_GEP_DP
;
529 sc
->lmc_gpio
&= ~LMC_GEP_RESET
;
530 sc
->lmc_gpio
|= LMC_GEP_MODE
;
531 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
533 lmc_gpio_mkoutput(sc
, LMC_GEP_MODE
| LMC_GEP_DP
| LMC_GEP_RESET
);
536 * Wait at least 10 us 20 to be safe
541 * Clear reset and activate programming lines
548 lmc_gpio_mkinput(sc
, LMC_GEP_DP
| LMC_GEP_RESET
);
551 * Set LOAD, DATA, Clock to 1
554 sc
->lmc_gpio
|= LMC_GEP_MODE
;
555 sc
->lmc_gpio
|= LMC_GEP_DATA
;
556 sc
->lmc_gpio
|= LMC_GEP_CLK
;
557 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
559 lmc_gpio_mkoutput(sc
, LMC_GEP_DATA
| LMC_GEP_CLK
| LMC_GEP_MODE
);
562 * busy wait for the chip to reset
564 while( (LMC_CSR_READ(sc
, csr_gp
) & LMC_GEP_INIT
) == 0 &&
568 printk(KERN_DEBUG
"%s: Waited %d for the Xilinx to clear it's memory\n", dev
->name
, 500000-timeout
);
570 for(pos
= 0; pos
< xc
.len
; pos
++){
573 sc
->lmc_gpio
&= ~LMC_GEP_DATA
; /* Data is 0 */
576 sc
->lmc_gpio
|= LMC_GEP_DATA
; /* Data is 1 */
579 printk(KERN_WARNING
"%s Bad data in xilinx programming data at %d, got %d wanted 0 or 1\n", dev
->name
, pos
, data
[pos
]);
580 sc
->lmc_gpio
|= LMC_GEP_DATA
; /* Assume it's 1 */
582 sc
->lmc_gpio
&= ~LMC_GEP_CLK
; /* Clock to zero */
583 sc
->lmc_gpio
|= LMC_GEP_MODE
;
584 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
587 sc
->lmc_gpio
|= LMC_GEP_CLK
; /* Put the clack back to one */
588 sc
->lmc_gpio
|= LMC_GEP_MODE
;
589 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
592 if((LMC_CSR_READ(sc
, csr_gp
) & LMC_GEP_INIT
) == 0){
593 printk(KERN_WARNING
"%s: Reprogramming FAILED. Needs to be reprogrammed. (corrupted data)\n", dev
->name
);
595 else if((LMC_CSR_READ(sc
, csr_gp
) & LMC_GEP_DP
) == 0){
596 printk(KERN_WARNING
"%s: Reprogramming FAILED. Needs to be reprogrammed. (done)\n", dev
->name
);
599 printk(KERN_DEBUG
"%s: Done reprogramming Xilinx, %d bits, good luck!\n", dev
->name
, pos
);
602 lmc_gpio_mkinput(sc
, 0xff);
604 sc
->lmc_miireg16
|= LMC_MII16_FIFO_RESET
;
605 lmc_mii_writereg(sc
, 0, 16, sc
->lmc_miireg16
);
607 sc
->lmc_miireg16
&= ~LMC_MII16_FIFO_RESET
;
608 lmc_mii_writereg(sc
, 0, 16, sc
->lmc_miireg16
);
609 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
622 netif_wake_queue(dev
);
628 /* If we don't know what to do, give the protocol a shot. */
629 ret
= lmc_proto_ioctl (sc
, ifr
, cmd
);
633 lmc_trace(dev
, "lmc_ioctl out");
639 /* the watchdog process that cruises around */
640 static void lmc_watchdog (unsigned long data
) /*fold00*/
642 struct net_device
*dev
= (struct net_device
*)data
;
643 lmc_softc_t
*sc
= dev_to_sc(dev
);
648 lmc_trace(dev
, "lmc_watchdog in");
650 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
652 if(sc
->check
!= 0xBEAFCAFE){
653 printk("LMC: Corrupt net_device struct, breaking out\n");
654 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
659 /* Make sure the tx jabber and rx watchdog are off,
660 * and the transmit and receive processes are running.
663 LMC_CSR_WRITE (sc
, csr_15
, 0x00000011);
664 sc
->lmc_cmdmode
|= TULIP_CMD_TXRUN
| TULIP_CMD_RXRUN
;
665 LMC_CSR_WRITE (sc
, csr_command
, sc
->lmc_cmdmode
);
670 LMC_EVENT_LOG(LMC_EVENT_WATCHDOG
, LMC_CSR_READ (sc
, csr_status
), lmc_mii_readreg (sc
, 0, 16));
672 /* --- begin time out check -----------------------------------
673 * check for a transmit interrupt timeout
674 * Has the packet xmt vs xmt serviced threshold been exceeded */
675 if (sc
->lmc_taint_tx
== sc
->lastlmc_taint_tx
&&
676 sc
->lmc_device
->stats
.tx_packets
> sc
->lasttx_packets
&&
677 sc
->tx_TimeoutInd
== 0)
680 /* wait for the watchdog to come around again */
681 sc
->tx_TimeoutInd
= 1;
683 else if (sc
->lmc_taint_tx
== sc
->lastlmc_taint_tx
&&
684 sc
->lmc_device
->stats
.tx_packets
> sc
->lasttx_packets
&&
688 LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO
, LMC_CSR_READ (sc
, csr_status
), 0);
690 sc
->tx_TimeoutDisplay
= 1;
691 sc
->extra_stats
.tx_TimeoutCnt
++;
693 /* DEC chip is stuck, hit it with a RESET!!!! */
694 lmc_running_reset (dev
);
697 /* look at receive & transmit process state to make sure they are running */
698 LMC_EVENT_LOG(LMC_EVENT_RESET1
, LMC_CSR_READ (sc
, csr_status
), 0);
700 /* look at: DSR - 02 for Reg 16
706 LMC_EVENT_LOG(LMC_EVENT_RESET2
, lmc_mii_readreg (sc
, 0, 16), lmc_mii_readreg (sc
, 0, 17));
708 /* reset the transmit timeout detection flag */
709 sc
->tx_TimeoutInd
= 0;
710 sc
->lastlmc_taint_tx
= sc
->lmc_taint_tx
;
711 sc
->lasttx_packets
= sc
->lmc_device
->stats
.tx_packets
;
713 sc
->tx_TimeoutInd
= 0;
714 sc
->lastlmc_taint_tx
= sc
->lmc_taint_tx
;
715 sc
->lasttx_packets
= sc
->lmc_device
->stats
.tx_packets
;
718 /* --- end time out check ----------------------------------- */
721 link_status
= sc
->lmc_media
->get_link_status (sc
);
724 * hardware level link lost, but the interface is marked as up.
727 if ((link_status
== 0) && (sc
->last_link_status
!= 0)) {
728 printk(KERN_WARNING
"%s: hardware/physical link down\n", dev
->name
);
729 sc
->last_link_status
= 0;
730 /* lmc_reset (sc); Why reset??? The link can go down ok */
732 /* Inform the world that link has been lost */
733 netif_carrier_off(dev
);
737 * hardware link is up, but the interface is marked as down.
738 * Bring it back up again.
740 if (link_status
!= 0 && sc
->last_link_status
== 0) {
741 printk(KERN_WARNING
"%s: hardware/physical link up\n", dev
->name
);
742 sc
->last_link_status
= 1;
743 /* lmc_reset (sc); Again why reset??? */
745 netif_carrier_on(dev
);
748 /* Call media specific watchdog functions */
749 sc
->lmc_media
->watchdog(sc
);
752 * Poke the transmitter to make sure it
753 * never stops, even if we run out of mem
755 LMC_CSR_WRITE(sc
, csr_rxpoll
, 0);
758 * Check for code that failed
759 * and try and fix it as appropriate
761 if(sc
->failed_ring
== 1){
763 * Failed to setup the recv/xmit rin
769 if(sc
->failed_recv_alloc
== 1){
771 * We failed to alloc mem in the
772 * interrupt handler, go through the rings
775 sc
->failed_recv_alloc
= 0;
781 * remember the timer value
785 ticks
= LMC_CSR_READ (sc
, csr_gp_timer
);
786 LMC_CSR_WRITE (sc
, csr_gp_timer
, 0xffffffffUL
);
787 sc
->ictl
.ticks
= 0x0000ffff - (ticks
& 0x0000ffff);
790 * restart this timer.
792 sc
->timer
.expires
= jiffies
+ (HZ
);
793 add_timer (&sc
->timer
);
795 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
797 lmc_trace(dev
, "lmc_watchdog out");
801 static int lmc_attach(struct net_device
*dev
, unsigned short encoding
,
802 unsigned short parity
)
804 if (encoding
== ENCODING_NRZ
&& parity
== PARITY_CRC16_PR1_CCITT
)
809 static const struct net_device_ops lmc_ops
= {
810 .ndo_open
= lmc_open
,
811 .ndo_stop
= lmc_close
,
812 .ndo_change_mtu
= hdlc_change_mtu
,
813 .ndo_start_xmit
= hdlc_start_xmit
,
814 .ndo_do_ioctl
= lmc_ioctl
,
815 .ndo_tx_timeout
= lmc_driver_timeout
,
816 .ndo_get_stats
= lmc_get_stats
,
819 static int __devinit
lmc_init_one(struct pci_dev
*pdev
,
820 const struct pci_device_id
*ent
)
823 struct net_device
*dev
;
827 static int cards_found
;
829 /* lmc_trace(dev, "lmc_init_one in"); */
831 err
= pci_enable_device(pdev
);
833 printk(KERN_ERR
"lmc: pci enable failed: %d\n", err
);
837 err
= pci_request_regions(pdev
, "lmc");
839 printk(KERN_ERR
"lmc: pci_request_region failed\n");
844 * Allocate our own device structure
846 sc
= kzalloc(sizeof(lmc_softc_t
), GFP_KERNEL
);
852 dev
= alloc_hdlcdev(sc
);
854 printk(KERN_ERR
"lmc:alloc_netdev for device failed\n");
859 dev
->type
= ARPHRD_HDLC
;
860 dev_to_hdlc(dev
)->xmit
= lmc_start_xmit
;
861 dev_to_hdlc(dev
)->attach
= lmc_attach
;
862 dev
->netdev_ops
= &lmc_ops
;
863 dev
->watchdog_timeo
= HZ
; /* 1 second */
864 dev
->tx_queue_len
= 100;
865 sc
->lmc_device
= dev
;
866 sc
->name
= dev
->name
;
867 sc
->if_type
= LMC_PPP
;
868 sc
->check
= 0xBEAFCAFE;
869 dev
->base_addr
= pci_resource_start(pdev
, 0);
870 dev
->irq
= pdev
->irq
;
871 pci_set_drvdata(pdev
, dev
);
872 SET_NETDEV_DEV(dev
, &pdev
->dev
);
875 * This will get the protocol layer ready and do any 1 time init's
876 * Must have a valid sc and dev structure
878 lmc_proto_attach(sc
);
880 /* Init the spin lock so can call it latter */
882 spin_lock_init(&sc
->lmc_lock
);
883 pci_set_master(pdev
);
885 printk(KERN_INFO
"%s: detected at %lx, irq %d\n", dev
->name
,
886 dev
->base_addr
, dev
->irq
);
888 err
= register_hdlc_device(dev
);
890 printk(KERN_ERR
"%s: register_netdev failed.\n", dev
->name
);
895 sc
->lmc_cardtype
= LMC_CARDTYPE_UNKNOWN
;
896 sc
->lmc_timing
= LMC_CTL_CLOCK_SOURCE_EXT
;
900 * Check either the subvendor or the subdevice, some systems reverse
901 * the setting in the bois, seems to be version and arch dependent?
902 * Fix the error, exchange the two values
904 if ((subdevice
= pdev
->subsystem_device
) == PCI_VENDOR_ID_LMC
)
905 subdevice
= pdev
->subsystem_vendor
;
908 case PCI_DEVICE_ID_LMC_HSSI
:
909 printk(KERN_INFO
"%s: LMC HSSI\n", dev
->name
);
910 sc
->lmc_cardtype
= LMC_CARDTYPE_HSSI
;
911 sc
->lmc_media
= &lmc_hssi_media
;
913 case PCI_DEVICE_ID_LMC_DS3
:
914 printk(KERN_INFO
"%s: LMC DS3\n", dev
->name
);
915 sc
->lmc_cardtype
= LMC_CARDTYPE_DS3
;
916 sc
->lmc_media
= &lmc_ds3_media
;
918 case PCI_DEVICE_ID_LMC_SSI
:
919 printk(KERN_INFO
"%s: LMC SSI\n", dev
->name
);
920 sc
->lmc_cardtype
= LMC_CARDTYPE_SSI
;
921 sc
->lmc_media
= &lmc_ssi_media
;
923 case PCI_DEVICE_ID_LMC_T1
:
924 printk(KERN_INFO
"%s: LMC T1\n", dev
->name
);
925 sc
->lmc_cardtype
= LMC_CARDTYPE_T1
;
926 sc
->lmc_media
= &lmc_t1_media
;
929 printk(KERN_WARNING
"%s: LMC UNKNOWN CARD!\n", dev
->name
);
933 lmc_initcsrs (sc
, dev
->base_addr
, 8);
935 lmc_gpio_mkinput (sc
, 0xff);
936 sc
->lmc_gpio
= 0; /* drive no signals yet */
938 sc
->lmc_media
->defaults (sc
);
940 sc
->lmc_media
->set_link_status (sc
, LMC_LINK_UP
);
942 /* verify that the PCI Sub System ID matches the Adapter Model number
943 * from the MII register
945 AdapModelNum
= (lmc_mii_readreg (sc
, 0, 3) & 0x3f0) >> 4;
947 if ((AdapModelNum
!= LMC_ADAP_T1
|| /* detect LMC1200 */
948 subdevice
!= PCI_DEVICE_ID_LMC_T1
) &&
949 (AdapModelNum
!= LMC_ADAP_SSI
|| /* detect LMC1000 */
950 subdevice
!= PCI_DEVICE_ID_LMC_SSI
) &&
951 (AdapModelNum
!= LMC_ADAP_DS3
|| /* detect LMC5245 */
952 subdevice
!= PCI_DEVICE_ID_LMC_DS3
) &&
953 (AdapModelNum
!= LMC_ADAP_HSSI
|| /* detect LMC5200 */
954 subdevice
!= PCI_DEVICE_ID_LMC_HSSI
))
955 printk(KERN_WARNING
"%s: Model number (%d) miscompare for PCI"
956 " Subsystem ID = 0x%04x\n",
957 dev
->name
, AdapModelNum
, subdevice
);
962 LMC_CSR_WRITE (sc
, csr_gp_timer
, 0xFFFFFFFFUL
);
964 sc
->board_idx
= cards_found
++;
965 sc
->extra_stats
.check
= STATCHECK
;
966 sc
->extra_stats
.version_size
= (DRIVER_VERSION
<< 16) +
967 sizeof(sc
->lmc_device
->stats
) + sizeof(sc
->extra_stats
);
968 sc
->extra_stats
.lmc_cardtype
= sc
->lmc_cardtype
;
971 sc
->last_link_status
= 0;
973 lmc_trace(dev
, "lmc_init_one out");
977 pci_set_drvdata(pdev
, NULL
);
980 pci_release_regions(pdev
);
982 pci_disable_device(pdev
);
987 * Called from pci when removing module.
989 static void __devexit
lmc_remove_one(struct pci_dev
*pdev
)
991 struct net_device
*dev
= pci_get_drvdata(pdev
);
994 printk(KERN_DEBUG
"%s: removing...\n", dev
->name
);
995 unregister_hdlc_device(dev
);
997 pci_release_regions(pdev
);
998 pci_disable_device(pdev
);
999 pci_set_drvdata(pdev
, NULL
);
1003 /* After this is called, packets can be sent.
1004 * Does not initialize the addresses
1006 static int lmc_open(struct net_device
*dev
)
1008 lmc_softc_t
*sc
= dev_to_sc(dev
);
1011 lmc_trace(dev
, "lmc_open in");
1013 lmc_led_on(sc
, LMC_DS3_LED0
);
1018 LMC_EVENT_LOG(LMC_EVENT_RESET1
, LMC_CSR_READ(sc
, csr_status
), 0);
1019 LMC_EVENT_LOG(LMC_EVENT_RESET2
, lmc_mii_readreg(sc
, 0, 16),
1020 lmc_mii_readreg(sc
, 0, 17));
1023 lmc_trace(dev
, "lmc_open lmc_ok out");
1029 /* Since we have to use PCI bus, this should work on x86,alpha,ppc */
1030 if (request_irq (dev
->irq
, lmc_interrupt
, IRQF_SHARED
, dev
->name
, dev
)){
1031 printk(KERN_WARNING
"%s: could not get irq: %d\n", dev
->name
, dev
->irq
);
1032 lmc_trace(dev
, "lmc_open irq failed out");
1037 /* Assert Terminal Active */
1038 sc
->lmc_miireg16
|= LMC_MII16_LED_ALL
;
1039 sc
->lmc_media
->set_link_status (sc
, LMC_LINK_UP
);
1042 * reset to last state.
1044 sc
->lmc_media
->set_status (sc
, NULL
);
1046 /* setup default bits to be used in tulip_desc_t transmit descriptor
1048 sc
->TxDescriptControlInit
= (
1049 LMC_TDES_INTERRUPT_ON_COMPLETION
1050 | LMC_TDES_FIRST_SEGMENT
1051 | LMC_TDES_LAST_SEGMENT
1052 | LMC_TDES_SECOND_ADDR_CHAINED
1053 | LMC_TDES_DISABLE_PADDING
1056 if (sc
->ictl
.crc_length
== LMC_CTL_CRC_LENGTH_16
) {
1057 /* disable 32 bit CRC generated by ASIC */
1058 sc
->TxDescriptControlInit
|= LMC_TDES_ADD_CRC_DISABLE
;
1060 sc
->lmc_media
->set_crc_length(sc
, sc
->ictl
.crc_length
);
1061 /* Acknoledge the Terminal Active and light LEDs */
1063 /* dev->flags |= IFF_UP; */
1065 if ((err
= lmc_proto_open(sc
)) != 0)
1068 netif_start_queue(dev
);
1069 sc
->extra_stats
.tx_tbusy0
++;
1072 * select what interrupts we want to get
1074 sc
->lmc_intrmask
= 0;
1075 /* Should be using the default interrupt mask defined in the .h file. */
1076 sc
->lmc_intrmask
|= (TULIP_STS_NORMALINTR
1079 | TULIP_STS_ABNRMLINTR
1080 | TULIP_STS_SYSERROR
1081 | TULIP_STS_TXSTOPPED
1082 | TULIP_STS_TXUNDERFLOW
1083 | TULIP_STS_RXSTOPPED
1086 LMC_CSR_WRITE (sc
, csr_intr
, sc
->lmc_intrmask
);
1088 sc
->lmc_cmdmode
|= TULIP_CMD_TXRUN
;
1089 sc
->lmc_cmdmode
|= TULIP_CMD_RXRUN
;
1090 LMC_CSR_WRITE (sc
, csr_command
, sc
->lmc_cmdmode
);
1092 sc
->lmc_ok
= 1; /* Run watchdog */
1095 * Set the if up now - pfb
1098 sc
->last_link_status
= 1;
1101 * Setup a timer for the watchdog on probe, and start it running.
1102 * Since lmc_ok == 0, it will be a NOP for now.
1104 init_timer (&sc
->timer
);
1105 sc
->timer
.expires
= jiffies
+ HZ
;
1106 sc
->timer
.data
= (unsigned long) dev
;
1107 sc
->timer
.function
= lmc_watchdog
;
1108 add_timer (&sc
->timer
);
1110 lmc_trace(dev
, "lmc_open out");
1115 /* Total reset to compensate for the AdTran DSU doing bad things
1119 static void lmc_running_reset (struct net_device
*dev
) /*fold00*/
1121 lmc_softc_t
*sc
= dev_to_sc(dev
);
1123 lmc_trace(dev
, "lmc_runnig_reset in");
1125 /* stop interrupts */
1126 /* Clear the interrupt mask */
1127 LMC_CSR_WRITE (sc
, csr_intr
, 0x00000000);
1132 /* sc->lmc_miireg16 |= LMC_MII16_LED_ALL; */
1133 sc
->lmc_media
->set_link_status (sc
, 1);
1134 sc
->lmc_media
->set_status (sc
, NULL
);
1136 netif_wake_queue(dev
);
1139 sc
->extra_stats
.tx_tbusy0
++;
1141 sc
->lmc_intrmask
= TULIP_DEFAULT_INTR_MASK
;
1142 LMC_CSR_WRITE (sc
, csr_intr
, sc
->lmc_intrmask
);
1144 sc
->lmc_cmdmode
|= (TULIP_CMD_TXRUN
| TULIP_CMD_RXRUN
);
1145 LMC_CSR_WRITE (sc
, csr_command
, sc
->lmc_cmdmode
);
1147 lmc_trace(dev
, "lmc_runnin_reset_out");
1151 /* This is what is called when you ifconfig down a device.
1152 * This disables the timer for the watchdog and keepalives,
1153 * and disables the irq for dev.
1155 static int lmc_close(struct net_device
*dev
)
1157 /* not calling release_region() as we should */
1158 lmc_softc_t
*sc
= dev_to_sc(dev
);
1160 lmc_trace(dev
, "lmc_close in");
1163 sc
->lmc_media
->set_link_status (sc
, 0);
1164 del_timer (&sc
->timer
);
1165 lmc_proto_close(sc
);
1168 lmc_trace(dev
, "lmc_close out");
1173 /* Ends the transfer of packets */
1174 /* When the interface goes down, this is called */
1175 static int lmc_ifdown (struct net_device
*dev
) /*fold00*/
1177 lmc_softc_t
*sc
= dev_to_sc(dev
);
1181 lmc_trace(dev
, "lmc_ifdown in");
1183 /* Don't let anything else go on right now */
1185 netif_stop_queue(dev
);
1186 sc
->extra_stats
.tx_tbusy1
++;
1188 /* stop interrupts */
1189 /* Clear the interrupt mask */
1190 LMC_CSR_WRITE (sc
, csr_intr
, 0x00000000);
1192 /* Stop Tx and Rx on the chip */
1193 csr6
= LMC_CSR_READ (sc
, csr_command
);
1194 csr6
&= ~LMC_DEC_ST
; /* Turn off the Transmission bit */
1195 csr6
&= ~LMC_DEC_SR
; /* Turn off the Receive bit */
1196 LMC_CSR_WRITE (sc
, csr_command
, csr6
);
1198 sc
->lmc_device
->stats
.rx_missed_errors
+=
1199 LMC_CSR_READ(sc
, csr_missed_frames
) & 0xffff;
1201 /* release the interrupt */
1202 if(sc
->got_irq
== 1){
1203 free_irq (dev
->irq
, dev
);
1207 /* free skbuffs in the Rx queue */
1208 for (i
= 0; i
< LMC_RXDESCS
; i
++)
1210 struct sk_buff
*skb
= sc
->lmc_rxq
[i
];
1211 sc
->lmc_rxq
[i
] = NULL
;
1212 sc
->lmc_rxring
[i
].status
= 0;
1213 sc
->lmc_rxring
[i
].length
= 0;
1214 sc
->lmc_rxring
[i
].buffer1
= 0xDEADBEEF;
1217 sc
->lmc_rxq
[i
] = NULL
;
1220 for (i
= 0; i
< LMC_TXDESCS
; i
++)
1222 if (sc
->lmc_txq
[i
] != NULL
)
1223 dev_kfree_skb(sc
->lmc_txq
[i
]);
1224 sc
->lmc_txq
[i
] = NULL
;
1227 lmc_led_off (sc
, LMC_MII16_LED_ALL
);
1229 netif_wake_queue(dev
);
1230 sc
->extra_stats
.tx_tbusy0
++;
1232 lmc_trace(dev
, "lmc_ifdown out");
1237 /* Interrupt handling routine. This will take an incoming packet, or clean
1238 * up after a trasmit.
1240 static irqreturn_t
lmc_interrupt (int irq
, void *dev_instance
) /*fold00*/
1242 struct net_device
*dev
= (struct net_device
*) dev_instance
;
1243 lmc_softc_t
*sc
= dev_to_sc(dev
);
1249 int max_work
= LMC_RXDESCS
;
1252 lmc_trace(dev
, "lmc_interrupt in");
1254 spin_lock(&sc
->lmc_lock
);
1257 * Read the csr to find what interrupts we have (if any)
1259 csr
= LMC_CSR_READ (sc
, csr_status
);
1262 * Make sure this is our interrupt
1264 if ( ! (csr
& sc
->lmc_intrmask
)) {
1265 goto lmc_int_fail_out
;
1270 /* always go through this loop at least once */
1271 while (csr
& sc
->lmc_intrmask
) {
1275 * Clear interrupt bits, we handle all case below
1277 LMC_CSR_WRITE (sc
, csr_status
, csr
);
1281 * - Transmit process timed out CSR5<1>
1282 * - Transmit jabber timeout CSR5<3>
1283 * - Transmit underflow CSR5<5>
1284 * - Transmit Receiver buffer unavailable CSR5<7>
1285 * - Receive process stopped CSR5<8>
1286 * - Receive watchdog timeout CSR5<9>
1287 * - Early transmit interrupt CSR5<10>
1289 * Is this really right? Should we do a running reset for jabber?
1290 * (being a WAN card and all)
1292 if (csr
& TULIP_STS_ABNRMLINTR
){
1293 lmc_running_reset (dev
);
1297 if (csr
& TULIP_STS_RXINTR
){
1298 lmc_trace(dev
, "rx interrupt");
1302 if (csr
& (TULIP_STS_TXINTR
| TULIP_STS_TXNOBUF
| TULIP_STS_TXSTOPPED
)) {
1305 /* reset the transmit timeout detection flag -baz */
1306 sc
->extra_stats
.tx_NoCompleteCnt
= 0;
1308 badtx
= sc
->lmc_taint_tx
;
1309 i
= badtx
% LMC_TXDESCS
;
1311 while ((badtx
< sc
->lmc_next_tx
)) {
1312 stat
= sc
->lmc_txring
[i
].status
;
1314 LMC_EVENT_LOG (LMC_EVENT_XMTINT
, stat
,
1315 sc
->lmc_txring
[i
].length
);
1317 * If bit 31 is 1 the tulip owns it break out of the loop
1319 if (stat
& 0x80000000)
1322 n_compl
++ ; /* i.e., have an empty slot in ring */
1324 * If we have no skbuff or have cleared it
1325 * Already continue to the next buffer
1327 if (sc
->lmc_txq
[i
] == NULL
)
1331 * Check the total error summary to look for any errors
1333 if (stat
& 0x8000) {
1334 sc
->lmc_device
->stats
.tx_errors
++;
1336 sc
->lmc_device
->stats
.tx_aborted_errors
++;
1338 sc
->lmc_device
->stats
.tx_carrier_errors
++;
1340 sc
->lmc_device
->stats
.tx_window_errors
++;
1342 sc
->lmc_device
->stats
.tx_fifo_errors
++;
1344 sc
->lmc_device
->stats
.tx_bytes
+= sc
->lmc_txring
[i
].length
& 0x7ff;
1346 sc
->lmc_device
->stats
.tx_packets
++;
1349 // dev_kfree_skb(sc->lmc_txq[i]);
1350 dev_kfree_skb_irq(sc
->lmc_txq
[i
]);
1351 sc
->lmc_txq
[i
] = NULL
;
1354 i
= badtx
% LMC_TXDESCS
;
1357 if (sc
->lmc_next_tx
- badtx
> LMC_TXDESCS
)
1359 printk ("%s: out of sync pointer\n", dev
->name
);
1360 badtx
+= LMC_TXDESCS
;
1362 LMC_EVENT_LOG(LMC_EVENT_TBUSY0
, n_compl
, 0);
1364 netif_wake_queue(dev
);
1365 sc
->extra_stats
.tx_tbusy0
++;
1369 sc
->extra_stats
.dirtyTx
= badtx
;
1370 sc
->extra_stats
.lmc_next_tx
= sc
->lmc_next_tx
;
1371 sc
->extra_stats
.lmc_txfull
= sc
->lmc_txfull
;
1373 sc
->lmc_taint_tx
= badtx
;
1376 * Why was there a break here???
1378 } /* end handle transmit interrupt */
1380 if (csr
& TULIP_STS_SYSERROR
) {
1382 printk (KERN_WARNING
"%s: system bus error csr: %#8.8x\n", dev
->name
, csr
);
1383 error
= csr
>>23 & 0x7;
1386 printk(KERN_WARNING
"%s: Parity Fault (bad)\n", dev
->name
);
1389 printk(KERN_WARNING
"%s: Master Abort (naughty)\n", dev
->name
);
1392 printk(KERN_WARNING
"%s: Target Abort (not so naughty)\n", dev
->name
);
1395 printk(KERN_WARNING
"%s: This bus error code was supposed to be reserved!\n", dev
->name
);
1399 LMC_EVENT_LOG(LMC_EVENT_RESET1
, LMC_CSR_READ (sc
, csr_status
), 0);
1400 LMC_EVENT_LOG(LMC_EVENT_RESET2
,
1401 lmc_mii_readreg (sc
, 0, 16),
1402 lmc_mii_readreg (sc
, 0, 17));
1411 * Get current csr status to make sure
1412 * we've cleared all interrupts
1414 csr
= LMC_CSR_READ (sc
, csr_status
);
1415 } /* end interrupt loop */
1416 LMC_EVENT_LOG(LMC_EVENT_INT
, firstcsr
, csr
);
1420 spin_unlock(&sc
->lmc_lock
);
1422 lmc_trace(dev
, "lmc_interrupt out");
1423 return IRQ_RETVAL(handled
);
1426 static netdev_tx_t
lmc_start_xmit(struct sk_buff
*skb
,
1427 struct net_device
*dev
)
1429 lmc_softc_t
*sc
= dev_to_sc(dev
);
1432 unsigned long flags
;
1434 lmc_trace(dev
, "lmc_start_xmit in");
1436 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
1438 /* normal path, tbusy known to be zero */
1440 entry
= sc
->lmc_next_tx
% LMC_TXDESCS
;
1442 sc
->lmc_txq
[entry
] = skb
;
1443 sc
->lmc_txring
[entry
].buffer1
= virt_to_bus (skb
->data
);
1445 LMC_CONSOLE_LOG("xmit", skb
->data
, skb
->len
);
1448 /* If the queue is less than half full, don't interrupt */
1449 if (sc
->lmc_next_tx
- sc
->lmc_taint_tx
< LMC_TXDESCS
/ 2)
1451 /* Do not interrupt on completion of this packet */
1453 netif_wake_queue(dev
);
1455 else if (sc
->lmc_next_tx
- sc
->lmc_taint_tx
== LMC_TXDESCS
/ 2)
1457 /* This generates an interrupt on completion of this packet */
1459 netif_wake_queue(dev
);
1461 else if (sc
->lmc_next_tx
- sc
->lmc_taint_tx
< LMC_TXDESCS
- 1)
1463 /* Do not interrupt on completion of this packet */
1465 netif_wake_queue(dev
);
1469 /* This generates an interrupt on completion of this packet */
1472 netif_stop_queue(dev
);
1475 flag
= LMC_TDES_INTERRUPT_ON_COMPLETION
;
1477 if (sc
->lmc_next_tx
- sc
->lmc_taint_tx
>= LMC_TXDESCS
- 1)
1478 { /* ring full, go busy */
1480 netif_stop_queue(dev
);
1481 sc
->extra_stats
.tx_tbusy1
++;
1482 LMC_EVENT_LOG(LMC_EVENT_TBUSY1
, entry
, 0);
1487 if (entry
== LMC_TXDESCS
- 1) /* last descriptor in ring */
1488 flag
|= LMC_TDES_END_OF_RING
; /* flag as such for Tulip */
1490 /* don't pad small packets either */
1491 flag
= sc
->lmc_txring
[entry
].length
= (skb
->len
) | flag
|
1492 sc
->TxDescriptControlInit
;
1494 /* set the transmit timeout flag to be checked in
1495 * the watchdog timer handler. -baz
1498 sc
->extra_stats
.tx_NoCompleteCnt
++;
1501 /* give ownership to the chip */
1502 LMC_EVENT_LOG(LMC_EVENT_XMT
, flag
, entry
);
1503 sc
->lmc_txring
[entry
].status
= 0x80000000;
1506 LMC_CSR_WRITE (sc
, csr_txpoll
, 0);
1508 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
1510 lmc_trace(dev
, "lmc_start_xmit_out");
1511 return NETDEV_TX_OK
;
1515 static int lmc_rx(struct net_device
*dev
)
1517 lmc_softc_t
*sc
= dev_to_sc(dev
);
1519 int rx_work_limit
= LMC_RXDESCS
;
1520 unsigned int next_rx
;
1521 int rxIntLoopCnt
; /* debug -baz */
1522 int localLengthErrCnt
= 0;
1524 struct sk_buff
*skb
, *nsb
;
1527 lmc_trace(dev
, "lmc_rx in");
1529 lmc_led_on(sc
, LMC_DS3_LED3
);
1531 rxIntLoopCnt
= 0; /* debug -baz */
1533 i
= sc
->lmc_next_rx
% LMC_RXDESCS
;
1534 next_rx
= sc
->lmc_next_rx
;
1536 while (((stat
= sc
->lmc_rxring
[i
].status
) & LMC_RDES_OWN_BIT
) != DESC_OWNED_BY_DC21X4
)
1538 rxIntLoopCnt
++; /* debug -baz */
1539 len
= ((stat
& LMC_RDES_FRAME_LENGTH
) >> RDES_FRAME_LENGTH_BIT_NUMBER
);
1540 if ((stat
& 0x0300) != 0x0300) { /* Check first segment and last segment */
1541 if ((stat
& 0x0000ffff) != 0x7fff) {
1542 /* Oversized frame */
1543 sc
->lmc_device
->stats
.rx_length_errors
++;
1548 if (stat
& 0x00000008) { /* Catch a dribbling bit error */
1549 sc
->lmc_device
->stats
.rx_errors
++;
1550 sc
->lmc_device
->stats
.rx_frame_errors
++;
1555 if (stat
& 0x00000004) { /* Catch a CRC error by the Xilinx */
1556 sc
->lmc_device
->stats
.rx_errors
++;
1557 sc
->lmc_device
->stats
.rx_crc_errors
++;
1561 if (len
> LMC_PKT_BUF_SZ
) {
1562 sc
->lmc_device
->stats
.rx_length_errors
++;
1563 localLengthErrCnt
++;
1567 if (len
< sc
->lmc_crcSize
+ 2) {
1568 sc
->lmc_device
->stats
.rx_length_errors
++;
1569 sc
->extra_stats
.rx_SmallPktCnt
++;
1570 localLengthErrCnt
++;
1574 if(stat
& 0x00004000){
1575 printk(KERN_WARNING
"%s: Receiver descriptor error, receiver out of sync?\n", dev
->name
);
1578 len
-= sc
->lmc_crcSize
;
1580 skb
= sc
->lmc_rxq
[i
];
1583 * We ran out of memory at some point
1584 * just allocate an skb buff and continue.
1588 nsb
= dev_alloc_skb (LMC_PKT_BUF_SZ
+ 2);
1590 sc
->lmc_rxq
[i
] = nsb
;
1592 sc
->lmc_rxring
[i
].buffer1
= virt_to_bus(skb_tail_pointer(nsb
));
1594 sc
->failed_recv_alloc
= 1;
1598 sc
->lmc_device
->stats
.rx_packets
++;
1599 sc
->lmc_device
->stats
.rx_bytes
+= len
;
1601 LMC_CONSOLE_LOG("recv", skb
->data
, len
);
1604 * I'm not sure of the sanity of this
1605 * Packets could be arriving at a constant
1606 * 44.210mbits/sec and we're going to copy
1607 * them into a new buffer??
1610 if(len
> (LMC_MTU
- (LMC_MTU
>>2))){ /* len > LMC_MTU * 0.75 */
1612 * If it's a large packet don't copy it just hand it up
1616 sc
->lmc_rxq
[i
] = NULL
;
1617 sc
->lmc_rxring
[i
].buffer1
= 0x0;
1620 skb
->protocol
= lmc_proto_type(sc
, skb
);
1621 skb_reset_mac_header(skb
);
1622 /* skb_reset_network_header(skb); */
1624 lmc_proto_netif(sc
, skb
);
1627 * This skb will be destroyed by the upper layers, make a new one
1629 nsb
= dev_alloc_skb (LMC_PKT_BUF_SZ
+ 2);
1631 sc
->lmc_rxq
[i
] = nsb
;
1633 sc
->lmc_rxring
[i
].buffer1
= virt_to_bus(skb_tail_pointer(nsb
));
1634 /* Transferred to 21140 below */
1638 * We've run out of memory, stop trying to allocate
1639 * memory and exit the interrupt handler
1641 * The chip may run out of receivers and stop
1642 * in which care we'll try to allocate the buffer
1643 * again. (once a second)
1645 sc
->extra_stats
.rx_BuffAllocErr
++;
1646 LMC_EVENT_LOG(LMC_EVENT_RCVINT
, stat
, len
);
1647 sc
->failed_recv_alloc
= 1;
1648 goto skip_out_of_mem
;
1652 nsb
= dev_alloc_skb(len
);
1654 goto give_it_anyways
;
1656 skb_copy_from_linear_data(skb
, skb_put(nsb
, len
), len
);
1658 nsb
->protocol
= lmc_proto_type(sc
, nsb
);
1659 skb_reset_mac_header(nsb
);
1660 /* skb_reset_network_header(nsb); */
1662 lmc_proto_netif(sc
, nsb
);
1666 LMC_EVENT_LOG(LMC_EVENT_RCVINT
, stat
, len
);
1667 sc
->lmc_rxring
[i
].status
= DESC_OWNED_BY_DC21X4
;
1670 i
= sc
->lmc_next_rx
% LMC_RXDESCS
;
1672 if (rx_work_limit
< 0)
1676 /* detect condition for LMC1000 where DSU cable attaches and fills
1677 * descriptors with bogus packets
1679 if (localLengthErrCnt > LMC_RXDESCS - 3) {
1680 sc->extra_stats.rx_BadPktSurgeCnt++;
1681 LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, localLengthErrCnt,
1682 sc->extra_stats.rx_BadPktSurgeCnt);
1685 /* save max count of receive descriptors serviced */
1686 if (rxIntLoopCnt
> sc
->extra_stats
.rxIntLoopCnt
)
1687 sc
->extra_stats
.rxIntLoopCnt
= rxIntLoopCnt
; /* debug -baz */
1690 if (rxIntLoopCnt
== 0)
1692 for (i
= 0; i
< LMC_RXDESCS
; i
++)
1694 if ((sc
->lmc_rxring
[i
].status
& LMC_RDES_OWN_BIT
)
1695 != DESC_OWNED_BY_DC21X4
)
1700 LMC_EVENT_LOG(LMC_EVENT_RCVEND
, rxIntLoopCnt
, 0);
1705 lmc_led_off(sc
, LMC_DS3_LED3
);
1709 lmc_trace(dev
, "lmc_rx out");
1714 static struct net_device_stats
*lmc_get_stats(struct net_device
*dev
)
1716 lmc_softc_t
*sc
= dev_to_sc(dev
);
1717 unsigned long flags
;
1719 lmc_trace(dev
, "lmc_get_stats in");
1721 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
1723 sc
->lmc_device
->stats
.rx_missed_errors
+= LMC_CSR_READ(sc
, csr_missed_frames
) & 0xffff;
1725 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
1727 lmc_trace(dev
, "lmc_get_stats out");
1729 return &sc
->lmc_device
->stats
;
1732 static struct pci_driver lmc_driver
= {
1734 .id_table
= lmc_pci_tbl
,
1735 .probe
= lmc_init_one
,
1736 .remove
= __devexit_p(lmc_remove_one
),
1739 static int __init
init_lmc(void)
1741 return pci_register_driver(&lmc_driver
);
1744 static void __exit
exit_lmc(void)
1746 pci_unregister_driver(&lmc_driver
);
1749 module_init(init_lmc
);
1750 module_exit(exit_lmc
);
1752 unsigned lmc_mii_readreg (lmc_softc_t
* const sc
, unsigned devaddr
, unsigned regno
) /*fold00*/
1755 int command
= (0xf6 << 10) | (devaddr
<< 5) | regno
;
1758 lmc_trace(sc
->lmc_device
, "lmc_mii_readreg in");
1762 lmc_trace(sc
->lmc_device
, "lmc_mii_readreg: done sync");
1764 for (i
= 15; i
>= 0; i
--)
1766 int dataval
= (command
& (1 << i
)) ? 0x20000 : 0;
1768 LMC_CSR_WRITE (sc
, csr_9
, dataval
);
1770 /* __SLOW_DOWN_IO; */
1771 LMC_CSR_WRITE (sc
, csr_9
, dataval
| 0x10000);
1773 /* __SLOW_DOWN_IO; */
1776 lmc_trace(sc
->lmc_device
, "lmc_mii_readreg: done1");
1778 for (i
= 19; i
> 0; i
--)
1780 LMC_CSR_WRITE (sc
, csr_9
, 0x40000);
1782 /* __SLOW_DOWN_IO; */
1783 retval
= (retval
<< 1) | ((LMC_CSR_READ (sc
, csr_9
) & 0x80000) ? 1 : 0);
1784 LMC_CSR_WRITE (sc
, csr_9
, 0x40000 | 0x10000);
1786 /* __SLOW_DOWN_IO; */
1789 lmc_trace(sc
->lmc_device
, "lmc_mii_readreg out");
1791 return (retval
>> 1) & 0xffff;
1794 void lmc_mii_writereg (lmc_softc_t
* const sc
, unsigned devaddr
, unsigned regno
, unsigned data
) /*fold00*/
1797 int command
= (0x5002 << 16) | (devaddr
<< 23) | (regno
<< 18) | data
;
1799 lmc_trace(sc
->lmc_device
, "lmc_mii_writereg in");
1808 if (command
& (1 << i
))
1813 LMC_CSR_WRITE (sc
, csr_9
, datav
);
1815 /* __SLOW_DOWN_IO; */
1816 LMC_CSR_WRITE (sc
, csr_9
, (datav
| 0x10000));
1818 /* __SLOW_DOWN_IO; */
1825 LMC_CSR_WRITE (sc
, csr_9
, 0x40000);
1827 /* __SLOW_DOWN_IO; */
1828 LMC_CSR_WRITE (sc
, csr_9
, 0x50000);
1830 /* __SLOW_DOWN_IO; */
1834 lmc_trace(sc
->lmc_device
, "lmc_mii_writereg out");
1837 static void lmc_softreset (lmc_softc_t
* const sc
) /*fold00*/
1841 lmc_trace(sc
->lmc_device
, "lmc_softreset in");
1843 /* Initialize the receive rings and buffers. */
1845 sc
->lmc_next_rx
= 0;
1846 sc
->lmc_next_tx
= 0;
1847 sc
->lmc_taint_rx
= 0;
1848 sc
->lmc_taint_tx
= 0;
1851 * Setup each one of the receiver buffers
1852 * allocate an skbuff for each one, setup the descriptor table
1853 * and point each buffer at the next one
1856 for (i
= 0; i
< LMC_RXDESCS
; i
++)
1858 struct sk_buff
*skb
;
1860 if (sc
->lmc_rxq
[i
] == NULL
)
1862 skb
= dev_alloc_skb (LMC_PKT_BUF_SZ
+ 2);
1864 printk(KERN_WARNING
"%s: Failed to allocate receiver ring, will try again\n", sc
->name
);
1865 sc
->failed_ring
= 1;
1869 sc
->lmc_rxq
[i
] = skb
;
1874 skb
= sc
->lmc_rxq
[i
];
1877 skb
->dev
= sc
->lmc_device
;
1879 /* owned by 21140 */
1880 sc
->lmc_rxring
[i
].status
= 0x80000000;
1882 /* used to be PKT_BUF_SZ now uses skb since we lose some to head room */
1883 sc
->lmc_rxring
[i
].length
= skb_tailroom(skb
);
1885 /* use to be tail which is dumb since you're thinking why write
1886 * to the end of the packj,et but since there's nothing there tail == data
1888 sc
->lmc_rxring
[i
].buffer1
= virt_to_bus (skb
->data
);
1890 /* This is fair since the structure is static and we have the next address */
1891 sc
->lmc_rxring
[i
].buffer2
= virt_to_bus (&sc
->lmc_rxring
[i
+ 1]);
1899 sc
->lmc_rxring
[i
- 1].length
|= 0x02000000; /* Set end of buffers flag */
1900 sc
->lmc_rxring
[i
- 1].buffer2
= virt_to_bus(&sc
->lmc_rxring
[0]); /* Point back to the start */
1902 LMC_CSR_WRITE (sc
, csr_rxlist
, virt_to_bus (sc
->lmc_rxring
)); /* write base address */
1904 /* Initialize the transmit rings and buffers */
1905 for (i
= 0; i
< LMC_TXDESCS
; i
++)
1907 if (sc
->lmc_txq
[i
] != NULL
){ /* have buffer */
1908 dev_kfree_skb(sc
->lmc_txq
[i
]); /* free it */
1909 sc
->lmc_device
->stats
.tx_dropped
++; /* We just dropped a packet */
1911 sc
->lmc_txq
[i
] = NULL
;
1912 sc
->lmc_txring
[i
].status
= 0x00000000;
1913 sc
->lmc_txring
[i
].buffer2
= virt_to_bus (&sc
->lmc_txring
[i
+ 1]);
1915 sc
->lmc_txring
[i
- 1].buffer2
= virt_to_bus (&sc
->lmc_txring
[0]);
1916 LMC_CSR_WRITE (sc
, csr_txlist
, virt_to_bus (sc
->lmc_txring
));
1918 lmc_trace(sc
->lmc_device
, "lmc_softreset out");
1921 void lmc_gpio_mkinput(lmc_softc_t
* const sc
, u32 bits
) /*fold00*/
1923 lmc_trace(sc
->lmc_device
, "lmc_gpio_mkinput in");
1924 sc
->lmc_gpio_io
&= ~bits
;
1925 LMC_CSR_WRITE(sc
, csr_gp
, TULIP_GP_PINSET
| (sc
->lmc_gpio_io
));
1926 lmc_trace(sc
->lmc_device
, "lmc_gpio_mkinput out");
1929 void lmc_gpio_mkoutput(lmc_softc_t
* const sc
, u32 bits
) /*fold00*/
1931 lmc_trace(sc
->lmc_device
, "lmc_gpio_mkoutput in");
1932 sc
->lmc_gpio_io
|= bits
;
1933 LMC_CSR_WRITE(sc
, csr_gp
, TULIP_GP_PINSET
| (sc
->lmc_gpio_io
));
1934 lmc_trace(sc
->lmc_device
, "lmc_gpio_mkoutput out");
1937 void lmc_led_on(lmc_softc_t
* const sc
, u32 led
) /*fold00*/
1939 lmc_trace(sc
->lmc_device
, "lmc_led_on in");
1940 if((~sc
->lmc_miireg16
) & led
){ /* Already on! */
1941 lmc_trace(sc
->lmc_device
, "lmc_led_on aon out");
1945 sc
->lmc_miireg16
&= ~led
;
1946 lmc_mii_writereg(sc
, 0, 16, sc
->lmc_miireg16
);
1947 lmc_trace(sc
->lmc_device
, "lmc_led_on out");
1950 void lmc_led_off(lmc_softc_t
* const sc
, u32 led
) /*fold00*/
1952 lmc_trace(sc
->lmc_device
, "lmc_led_off in");
1953 if(sc
->lmc_miireg16
& led
){ /* Already set don't do anything */
1954 lmc_trace(sc
->lmc_device
, "lmc_led_off aoff out");
1958 sc
->lmc_miireg16
|= led
;
1959 lmc_mii_writereg(sc
, 0, 16, sc
->lmc_miireg16
);
1960 lmc_trace(sc
->lmc_device
, "lmc_led_off out");
1963 static void lmc_reset(lmc_softc_t
* const sc
) /*fold00*/
1965 lmc_trace(sc
->lmc_device
, "lmc_reset in");
1966 sc
->lmc_miireg16
|= LMC_MII16_FIFO_RESET
;
1967 lmc_mii_writereg(sc
, 0, 16, sc
->lmc_miireg16
);
1969 sc
->lmc_miireg16
&= ~LMC_MII16_FIFO_RESET
;
1970 lmc_mii_writereg(sc
, 0, 16, sc
->lmc_miireg16
);
1973 * make some of the GPIO pins be outputs
1975 lmc_gpio_mkoutput(sc
, LMC_GEP_RESET
);
1978 * RESET low to force state reset. This also forces
1979 * the transmitter clock to be internal, but we expect to reset
1980 * that later anyway.
1982 sc
->lmc_gpio
&= ~(LMC_GEP_RESET
);
1983 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
1986 * hold for more than 10 microseconds
1991 * stop driving Xilinx-related signals
1993 lmc_gpio_mkinput(sc
, LMC_GEP_RESET
);
1996 * Call media specific init routine
1998 sc
->lmc_media
->init(sc
);
2000 sc
->extra_stats
.resetCount
++;
2001 lmc_trace(sc
->lmc_device
, "lmc_reset out");
2004 static void lmc_dec_reset(lmc_softc_t
* const sc
) /*fold00*/
2007 lmc_trace(sc
->lmc_device
, "lmc_dec_reset in");
2010 * disable all interrupts
2012 sc
->lmc_intrmask
= 0;
2013 LMC_CSR_WRITE(sc
, csr_intr
, sc
->lmc_intrmask
);
2016 * Reset the chip with a software reset command.
2017 * Wait 10 microseconds (actually 50 PCI cycles but at
2018 * 33MHz that comes to two microseconds but wait a
2019 * bit longer anyways)
2021 LMC_CSR_WRITE(sc
, csr_busmode
, TULIP_BUSMODE_SWRESET
);
2024 sc
->lmc_busmode
= LMC_CSR_READ(sc
, csr_busmode
);
2025 sc
->lmc_busmode
= 0x00100000;
2026 sc
->lmc_busmode
&= ~TULIP_BUSMODE_SWRESET
;
2027 LMC_CSR_WRITE(sc
, csr_busmode
, sc
->lmc_busmode
);
2029 sc
->lmc_cmdmode
= LMC_CSR_READ(sc
, csr_command
);
2033 * no ethernet address in frames we write
2034 * disable padding (txdesc, padding disable)
2035 * ignore runt frames (rdes0 bit 15)
2036 * no receiver watchdog or transmitter jabber timer
2037 * (csr15 bit 0,14 == 1)
2038 * if using 16-bit CRC, turn off CRC (trans desc, crc disable)
2041 sc
->lmc_cmdmode
|= ( TULIP_CMD_PROMISCUOUS
2042 | TULIP_CMD_FULLDUPLEX
2043 | TULIP_CMD_PASSBADPKT
2044 | TULIP_CMD_NOHEARTBEAT
2045 | TULIP_CMD_PORTSELECT
2046 | TULIP_CMD_RECEIVEALL
2047 | TULIP_CMD_MUSTBEONE
2049 sc
->lmc_cmdmode
&= ~( TULIP_CMD_OPERMODE
2050 | TULIP_CMD_THRESHOLDCTL
2051 | TULIP_CMD_STOREFWD
2052 | TULIP_CMD_TXTHRSHLDCTL
2055 LMC_CSR_WRITE(sc
, csr_command
, sc
->lmc_cmdmode
);
2058 * disable receiver watchdog and transmit jabber
2060 val
= LMC_CSR_READ(sc
, csr_sia_general
);
2061 val
|= (TULIP_WATCHDOG_TXDISABLE
| TULIP_WATCHDOG_RXDISABLE
);
2062 LMC_CSR_WRITE(sc
, csr_sia_general
, val
);
2064 lmc_trace(sc
->lmc_device
, "lmc_dec_reset out");
2067 static void lmc_initcsrs(lmc_softc_t
* const sc
, lmc_csrptr_t csr_base
, /*fold00*/
2070 lmc_trace(sc
->lmc_device
, "lmc_initcsrs in");
2071 sc
->lmc_csrs
.csr_busmode
= csr_base
+ 0 * csr_size
;
2072 sc
->lmc_csrs
.csr_txpoll
= csr_base
+ 1 * csr_size
;
2073 sc
->lmc_csrs
.csr_rxpoll
= csr_base
+ 2 * csr_size
;
2074 sc
->lmc_csrs
.csr_rxlist
= csr_base
+ 3 * csr_size
;
2075 sc
->lmc_csrs
.csr_txlist
= csr_base
+ 4 * csr_size
;
2076 sc
->lmc_csrs
.csr_status
= csr_base
+ 5 * csr_size
;
2077 sc
->lmc_csrs
.csr_command
= csr_base
+ 6 * csr_size
;
2078 sc
->lmc_csrs
.csr_intr
= csr_base
+ 7 * csr_size
;
2079 sc
->lmc_csrs
.csr_missed_frames
= csr_base
+ 8 * csr_size
;
2080 sc
->lmc_csrs
.csr_9
= csr_base
+ 9 * csr_size
;
2081 sc
->lmc_csrs
.csr_10
= csr_base
+ 10 * csr_size
;
2082 sc
->lmc_csrs
.csr_11
= csr_base
+ 11 * csr_size
;
2083 sc
->lmc_csrs
.csr_12
= csr_base
+ 12 * csr_size
;
2084 sc
->lmc_csrs
.csr_13
= csr_base
+ 13 * csr_size
;
2085 sc
->lmc_csrs
.csr_14
= csr_base
+ 14 * csr_size
;
2086 sc
->lmc_csrs
.csr_15
= csr_base
+ 15 * csr_size
;
2087 lmc_trace(sc
->lmc_device
, "lmc_initcsrs out");
2090 static void lmc_driver_timeout(struct net_device
*dev
)
2092 lmc_softc_t
*sc
= dev_to_sc(dev
);
2094 unsigned long flags
;
2096 lmc_trace(dev
, "lmc_driver_timeout in");
2098 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
2100 printk("%s: Xmitter busy|\n", dev
->name
);
2102 sc
->extra_stats
.tx_tbusy_calls
++;
2103 if (jiffies
- dev_trans_start(dev
) < TX_TIMEOUT
)
2107 * Chip seems to have locked up
2109 * This whips out all our decriptor
2110 * table and starts from scartch
2113 LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO
,
2114 LMC_CSR_READ (sc
, csr_status
),
2115 sc
->extra_stats
.tx_ProcTimeout
);
2117 lmc_running_reset (dev
);
2119 LMC_EVENT_LOG(LMC_EVENT_RESET1
, LMC_CSR_READ (sc
, csr_status
), 0);
2120 LMC_EVENT_LOG(LMC_EVENT_RESET2
,
2121 lmc_mii_readreg (sc
, 0, 16),
2122 lmc_mii_readreg (sc
, 0, 17));
2124 /* restart the tx processes */
2125 csr6
= LMC_CSR_READ (sc
, csr_command
);
2126 LMC_CSR_WRITE (sc
, csr_command
, csr6
| 0x0002);
2127 LMC_CSR_WRITE (sc
, csr_command
, csr6
| 0x2002);
2129 /* immediate transmit */
2130 LMC_CSR_WRITE (sc
, csr_txpoll
, 0);
2132 sc
->lmc_device
->stats
.tx_errors
++;
2133 sc
->extra_stats
.tx_ProcTimeout
++; /* -baz */
2135 dev
->trans_start
= jiffies
; /* prevent tx timeout */
2139 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
2141 lmc_trace(dev
, "lmc_driver_timout out");