2 * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
3 * All rights reserved. www.lanmedia.com
5 * This code is written by:
6 * Andrew Stanley-Jones (asj@cban.com)
7 * Rob Braun (bbraun@vix.com),
8 * Michael Graff (explorer@vix.com) and
9 * Matt Thomas (matt@3am-software.com).
16 * This software may be used and distributed according to the terms
17 * of the GNU General Public License version 2, incorporated herein by reference.
19 * Driver for the LanMedia LMC5200, LMC5245, LMC1000, LMC1200 cards.
21 * To control link specific options lmcctl is required.
22 * It can be obtained from ftp.lanmedia.com.
25 * Linux uses the device struct lmc_private to pass private information
28 * The initialization portion of this driver (the lmc_reset() and the
29 * lmc_dec_reset() functions, as well as the led controls and the
30 * lmc_initcsrs() functions.
32 * The watchdog function runs every second and checks to see if
33 * we still have link, and that the timing source is what we expected
34 * it to be. If link is lost, the interface is marked down, and
35 * we no longer can transmit.
39 /* $Id: lmc_main.c,v 1.36 2000/04/11 05:25:25 asj Exp $ */
41 #include <linux/kernel.h>
42 #include <linux/module.h>
43 #include <linux/string.h>
44 #include <linux/timer.h>
45 #include <linux/ptrace.h>
46 #include <linux/errno.h>
47 #include <linux/ioport.h>
48 #include <linux/slab.h>
49 #include <linux/interrupt.h>
50 #include <linux/pci.h>
51 #include <linux/delay.h>
52 #include <linux/init.h>
54 #include <linux/if_arp.h>
55 #include <linux/netdevice.h>
56 #include <linux/etherdevice.h>
57 #include <linux/skbuff.h>
58 #include <linux/inet.h>
59 #include <linux/bitops.h>
61 #include <net/syncppp.h>
63 #include <asm/processor.h> /* Processor type for cache alignment. */
66 #include <asm/uaccess.h>
67 //#include <asm/spinlock.h>
69 #define DRIVER_MAJOR_VERSION 1
70 #define DRIVER_MINOR_VERSION 34
71 #define DRIVER_SUB_VERSION 0
73 #define DRIVER_VERSION ((DRIVER_MAJOR_VERSION << 8) + DRIVER_MINOR_VERSION)
77 #include "lmc_ioctl.h"
78 #include "lmc_debug.h"
79 #include "lmc_proto.h"
81 static int lmc_first_load
= 0;
83 static int LMC_PKT_BUF_SZ
= 1542;
85 static struct pci_device_id lmc_pci_tbl
[] = {
86 { PCI_VENDOR_ID_DEC
, PCI_DEVICE_ID_DEC_TULIP_FAST
,
87 PCI_VENDOR_ID_LMC
, PCI_ANY_ID
},
88 { PCI_VENDOR_ID_DEC
, PCI_DEVICE_ID_DEC_TULIP_FAST
,
89 PCI_ANY_ID
, PCI_VENDOR_ID_LMC
},
93 MODULE_DEVICE_TABLE(pci
, lmc_pci_tbl
);
94 MODULE_LICENSE("GPL");
97 static int lmc_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
98 static int lmc_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
99 static int lmc_rx (struct net_device
*dev
);
100 static int lmc_open(struct net_device
*dev
);
101 static int lmc_close(struct net_device
*dev
);
102 static struct net_device_stats
*lmc_get_stats(struct net_device
*dev
);
103 static irqreturn_t
lmc_interrupt(int irq
, void *dev_instance
, struct pt_regs
*regs
);
104 static void lmc_initcsrs(lmc_softc_t
* const sc
, lmc_csrptr_t csr_base
, size_t csr_size
);
105 static void lmc_softreset(lmc_softc_t
* const);
106 static void lmc_running_reset(struct net_device
*dev
);
107 static int lmc_ifdown(struct net_device
* const);
108 static void lmc_watchdog(unsigned long data
);
109 static void lmc_reset(lmc_softc_t
* const sc
);
110 static void lmc_dec_reset(lmc_softc_t
* const sc
);
111 static void lmc_driver_timeout(struct net_device
*dev
);
114 * linux reserves 16 device specific IOCTLs. We call them
115 * LMCIOC* to control various bits of our world.
117 int lmc_ioctl (struct net_device
*dev
, struct ifreq
*ifr
, int cmd
) /*fold00*/
131 lmc_trace(dev
, "lmc_ioctl in");
134 * Most functions mess with the structure
135 * Disable interrupts while we do the polling
137 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
141 * Return current driver state. Since we keep this up
142 * To date internally, just copy this out to the user.
144 case LMCIOCGINFO
: /*fold01*/
145 if (copy_to_user(ifr
->ifr_data
, &sc
->ictl
, sizeof (lmc_ctl_t
)))
150 case LMCIOCSINFO
: /*fold01*/
151 sp
= &((struct ppp_device
*) dev
)->sppp
;
152 if (!capable(CAP_NET_ADMIN
)) {
157 if(dev
->flags
& IFF_UP
){
162 if (copy_from_user(&ctl
, ifr
->ifr_data
, sizeof (lmc_ctl_t
)))
165 sc
->lmc_media
->set_status (sc
, &ctl
);
167 if(ctl
.crc_length
!= sc
->ictl
.crc_length
) {
168 sc
->lmc_media
->set_crc_length(sc
, ctl
.crc_length
);
169 if (sc
->ictl
.crc_length
== LMC_CTL_CRC_LENGTH_16
)
170 sc
->TxDescriptControlInit
|= LMC_TDES_ADD_CRC_DISABLE
;
172 sc
->TxDescriptControlInit
&= ~LMC_TDES_ADD_CRC_DISABLE
;
175 if (ctl
.keepalive_onoff
== LMC_CTL_OFF
)
176 sp
->pp_flags
&= ~PP_KEEPALIVE
; /* Turn off */
178 sp
->pp_flags
|= PP_KEEPALIVE
; /* Turn on */
183 case LMCIOCIFTYPE
: /*fold01*/
185 u_int16_t old_type
= sc
->if_type
;
188 if (!capable(CAP_NET_ADMIN
)) {
193 if (copy_from_user(&new_type
, ifr
->ifr_data
, sizeof(u_int16_t
)))
197 if (new_type
== old_type
)
200 break; /* no change */
204 lmc_proto_detach(sc
);
206 sc
->if_type
= new_type
;
207 // lmc_proto_init(sc);
208 lmc_proto_attach(sc
);
215 case LMCIOCGETXINFO
: /*fold01*/
216 sc
->lmc_xinfo
.Magic0
= 0xBEEFCAFE;
218 sc
->lmc_xinfo
.PciCardType
= sc
->lmc_cardtype
;
219 sc
->lmc_xinfo
.PciSlotNumber
= 0;
220 sc
->lmc_xinfo
.DriverMajorVersion
= DRIVER_MAJOR_VERSION
;
221 sc
->lmc_xinfo
.DriverMinorVersion
= DRIVER_MINOR_VERSION
;
222 sc
->lmc_xinfo
.DriverSubVersion
= DRIVER_SUB_VERSION
;
223 sc
->lmc_xinfo
.XilinxRevisionNumber
=
224 lmc_mii_readreg (sc
, 0, 3) & 0xf;
225 sc
->lmc_xinfo
.MaxFrameSize
= LMC_PKT_BUF_SZ
;
226 sc
->lmc_xinfo
.link_status
= sc
->lmc_media
->get_link_status (sc
);
227 sc
->lmc_xinfo
.mii_reg16
= lmc_mii_readreg (sc
, 0, 16);
229 sc
->lmc_xinfo
.Magic1
= 0xDEADBEEF;
231 if (copy_to_user(ifr
->ifr_data
, &sc
->lmc_xinfo
,
232 sizeof (struct lmc_xinfo
)))
238 case LMCIOCGETLMCSTATS
: /*fold01*/
239 if (sc
->lmc_cardtype
== LMC_CARDTYPE_T1
){
240 lmc_mii_writereg (sc
, 0, 17, T1FRAMER_FERR_LSB
);
241 sc
->stats
.framingBitErrorCount
+=
242 lmc_mii_readreg (sc
, 0, 18) & 0xff;
243 lmc_mii_writereg (sc
, 0, 17, T1FRAMER_FERR_MSB
);
244 sc
->stats
.framingBitErrorCount
+=
245 (lmc_mii_readreg (sc
, 0, 18) & 0xff) << 8;
246 lmc_mii_writereg (sc
, 0, 17, T1FRAMER_LCV_LSB
);
247 sc
->stats
.lineCodeViolationCount
+=
248 lmc_mii_readreg (sc
, 0, 18) & 0xff;
249 lmc_mii_writereg (sc
, 0, 17, T1FRAMER_LCV_MSB
);
250 sc
->stats
.lineCodeViolationCount
+=
251 (lmc_mii_readreg (sc
, 0, 18) & 0xff) << 8;
252 lmc_mii_writereg (sc
, 0, 17, T1FRAMER_AERR
);
253 regVal
= lmc_mii_readreg (sc
, 0, 18) & 0xff;
255 sc
->stats
.lossOfFrameCount
+=
256 (regVal
& T1FRAMER_LOF_MASK
) >> 4;
257 sc
->stats
.changeOfFrameAlignmentCount
+=
258 (regVal
& T1FRAMER_COFA_MASK
) >> 2;
259 sc
->stats
.severelyErroredFrameCount
+=
260 regVal
& T1FRAMER_SEF_MASK
;
263 if (copy_to_user(ifr
->ifr_data
, &sc
->stats
,
264 sizeof (struct lmc_statistics
)))
270 case LMCIOCCLEARLMCSTATS
: /*fold01*/
271 if (!capable(CAP_NET_ADMIN
)){
276 memset (&sc
->stats
, 0, sizeof (struct lmc_statistics
));
277 sc
->stats
.check
= STATCHECK
;
278 sc
->stats
.version_size
= (DRIVER_VERSION
<< 16) +
279 sizeof (struct lmc_statistics
);
280 sc
->stats
.lmc_cardtype
= sc
->lmc_cardtype
;
284 case LMCIOCSETCIRCUIT
: /*fold01*/
285 if (!capable(CAP_NET_ADMIN
)){
290 if(dev
->flags
& IFF_UP
){
295 if (copy_from_user(&ctl
, ifr
->ifr_data
, sizeof (lmc_ctl_t
)))
297 sc
->lmc_media
->set_circuit_type(sc
, ctl
.circuit_type
);
298 sc
->ictl
.circuit_type
= ctl
.circuit_type
;
303 case LMCIOCRESET
: /*fold01*/
304 if (!capable(CAP_NET_ADMIN
)){
309 /* Reset driver and bring back to current state */
310 printk (" REG16 before reset +%04x\n", lmc_mii_readreg (sc
, 0, 16));
311 lmc_running_reset (dev
);
312 printk (" REG16 after reset +%04x\n", lmc_mii_readreg (sc
, 0, 16));
314 LMC_EVENT_LOG(LMC_EVENT_FORCEDRESET
, LMC_CSR_READ (sc
, csr_status
), lmc_mii_readreg (sc
, 0, 16));
320 case LMCIOCDUMPEVENTLOG
:
321 if (copy_to_user(ifr
->ifr_data
, &lmcEventLogIndex
, sizeof (u32
)))
323 if (copy_to_user(ifr
->ifr_data
+ sizeof (u32
), lmcEventLogBuf
, sizeof (lmcEventLogBuf
)))
328 #endif /* end ifdef _DBG_EVENTLOG */
329 case LMCIOCT1CONTROL
: /*fold01*/
330 if (sc
->lmc_cardtype
!= LMC_CARDTYPE_T1
){
335 case LMCIOCXILINX
: /*fold01*/
337 struct lmc_xilinx_control xc
; /*fold02*/
339 if (!capable(CAP_NET_ADMIN
)){
345 * Stop the xwitter whlie we restart the hardware
347 netif_stop_queue(dev
);
349 if (copy_from_user(&xc
, ifr
->ifr_data
, sizeof (struct lmc_xilinx_control
)))
352 case lmc_xilinx_reset
: /*fold02*/
355 mii
= lmc_mii_readreg (sc
, 0, 16);
358 * Make all of them 0 and make input
360 lmc_gpio_mkinput(sc
, 0xff);
363 * make the reset output
365 lmc_gpio_mkoutput(sc
, LMC_GEP_RESET
);
368 * RESET low to force configuration. This also forces
369 * the transmitter clock to be internal, but we expect to reset
373 sc
->lmc_gpio
&= ~LMC_GEP_RESET
;
374 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
378 * hold for more than 10 microseconds
382 sc
->lmc_gpio
|= LMC_GEP_RESET
;
383 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
387 * stop driving Xilinx-related signals
389 lmc_gpio_mkinput(sc
, 0xff);
391 /* Reset the frammer hardware */
392 sc
->lmc_media
->set_link_status (sc
, 1);
393 sc
->lmc_media
->set_status (sc
, NULL
);
394 // lmc_softreset(sc);
398 for(i
= 0; i
< 5; i
++){
399 lmc_led_on(sc
, LMC_DS3_LED0
);
401 lmc_led_off(sc
, LMC_DS3_LED0
);
402 lmc_led_on(sc
, LMC_DS3_LED1
);
404 lmc_led_off(sc
, LMC_DS3_LED1
);
405 lmc_led_on(sc
, LMC_DS3_LED3
);
407 lmc_led_off(sc
, LMC_DS3_LED3
);
408 lmc_led_on(sc
, LMC_DS3_LED2
);
410 lmc_led_off(sc
, LMC_DS3_LED2
);
421 case lmc_xilinx_load_prom
: /*fold02*/
424 int timeout
= 500000;
425 mii
= lmc_mii_readreg (sc
, 0, 16);
428 * Make all of them 0 and make input
430 lmc_gpio_mkinput(sc
, 0xff);
433 * make the reset output
435 lmc_gpio_mkoutput(sc
, LMC_GEP_DP
| LMC_GEP_RESET
);
438 * RESET low to force configuration. This also forces
439 * the transmitter clock to be internal, but we expect to reset
443 sc
->lmc_gpio
&= ~(LMC_GEP_RESET
| LMC_GEP_DP
);
444 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
448 * hold for more than 10 microseconds
452 sc
->lmc_gpio
|= LMC_GEP_DP
| LMC_GEP_RESET
;
453 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
456 * busy wait for the chip to reset
458 while( (LMC_CSR_READ(sc
, csr_gp
) & LMC_GEP_INIT
) == 0 &&
464 * stop driving Xilinx-related signals
466 lmc_gpio_mkinput(sc
, 0xff);
475 case lmc_xilinx_load
: /*fold02*/
479 int timeout
= 500000;
486 data
= kmalloc(xc
.len
, GFP_KERNEL
);
488 printk(KERN_WARNING
"%s: Failed to allocate memory for copy\n", dev
->name
);
493 if(copy_from_user(data
, xc
.data
, xc
.len
))
500 printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev
->name
, xc
.len
, xc
.data
, data
);
502 lmc_gpio_mkinput(sc
, 0xff);
505 * Clear the Xilinx and start prgramming from the DEC
516 sc
->lmc_gpio
&= ~LMC_GEP_DP
;
517 sc
->lmc_gpio
&= ~LMC_GEP_RESET
;
518 sc
->lmc_gpio
|= LMC_GEP_MODE
;
519 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
521 lmc_gpio_mkoutput(sc
, LMC_GEP_MODE
| LMC_GEP_DP
| LMC_GEP_RESET
);
524 * Wait at least 10 us 20 to be safe
529 * Clear reset and activate programming lines
536 lmc_gpio_mkinput(sc
, LMC_GEP_DP
| LMC_GEP_RESET
);
539 * Set LOAD, DATA, Clock to 1
542 sc
->lmc_gpio
|= LMC_GEP_MODE
;
543 sc
->lmc_gpio
|= LMC_GEP_DATA
;
544 sc
->lmc_gpio
|= LMC_GEP_CLK
;
545 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
547 lmc_gpio_mkoutput(sc
, LMC_GEP_DATA
| LMC_GEP_CLK
| LMC_GEP_MODE
);
550 * busy wait for the chip to reset
552 while( (LMC_CSR_READ(sc
, csr_gp
) & LMC_GEP_INIT
) == 0 &&
556 printk(KERN_DEBUG
"%s: Waited %d for the Xilinx to clear it's memory\n", dev
->name
, 500000-timeout
);
558 for(pos
= 0; pos
< xc
.len
; pos
++){
561 sc
->lmc_gpio
&= ~LMC_GEP_DATA
; /* Data is 0 */
564 sc
->lmc_gpio
|= LMC_GEP_DATA
; /* Data is 1 */
567 printk(KERN_WARNING
"%s Bad data in xilinx programming data at %d, got %d wanted 0 or 1\n", dev
->name
, pos
, data
[pos
]);
568 sc
->lmc_gpio
|= LMC_GEP_DATA
; /* Assume it's 1 */
570 sc
->lmc_gpio
&= ~LMC_GEP_CLK
; /* Clock to zero */
571 sc
->lmc_gpio
|= LMC_GEP_MODE
;
572 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
575 sc
->lmc_gpio
|= LMC_GEP_CLK
; /* Put the clack back to one */
576 sc
->lmc_gpio
|= LMC_GEP_MODE
;
577 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
580 if((LMC_CSR_READ(sc
, csr_gp
) & LMC_GEP_INIT
) == 0){
581 printk(KERN_WARNING
"%s: Reprogramming FAILED. Needs to be reprogrammed. (corrupted data)\n", dev
->name
);
583 else if((LMC_CSR_READ(sc
, csr_gp
) & LMC_GEP_DP
) == 0){
584 printk(KERN_WARNING
"%s: Reprogramming FAILED. Needs to be reprogrammed. (done)\n", dev
->name
);
587 printk(KERN_DEBUG
"%s: Done reprogramming Xilinx, %d bits, good luck!\n", dev
->name
, pos
);
590 lmc_gpio_mkinput(sc
, 0xff);
592 sc
->lmc_miireg16
|= LMC_MII16_FIFO_RESET
;
593 lmc_mii_writereg(sc
, 0, 16, sc
->lmc_miireg16
);
595 sc
->lmc_miireg16
&= ~LMC_MII16_FIFO_RESET
;
596 lmc_mii_writereg(sc
, 0, 16, sc
->lmc_miireg16
);
609 netif_wake_queue(dev
);
615 /* If we don't know what to do, give the protocol a shot. */
616 ret
= lmc_proto_ioctl (sc
, ifr
, cmd
);
620 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
); /*fold01*/
622 lmc_trace(dev
, "lmc_ioctl out");
628 /* the watchdog process that cruises around */
629 static void lmc_watchdog (unsigned long data
) /*fold00*/
631 struct net_device
*dev
= (struct net_device
*) data
;
639 lmc_trace(dev
, "lmc_watchdog in");
641 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
643 if(sc
->check
!= 0xBEAFCAFE){
644 printk("LMC: Corrupt net_device stuct, breaking out\n");
645 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
650 /* Make sure the tx jabber and rx watchdog are off,
651 * and the transmit and receive processes are running.
654 LMC_CSR_WRITE (sc
, csr_15
, 0x00000011);
655 sc
->lmc_cmdmode
|= TULIP_CMD_TXRUN
| TULIP_CMD_RXRUN
;
656 LMC_CSR_WRITE (sc
, csr_command
, sc
->lmc_cmdmode
);
661 LMC_EVENT_LOG(LMC_EVENT_WATCHDOG
, LMC_CSR_READ (sc
, csr_status
), lmc_mii_readreg (sc
, 0, 16));
663 /* --- begin time out check -----------------------------------
664 * check for a transmit interrupt timeout
665 * Has the packet xmt vs xmt serviced threshold been exceeded */
666 if (sc
->lmc_taint_tx
== sc
->lastlmc_taint_tx
&&
667 sc
->stats
.tx_packets
> sc
->lasttx_packets
&&
668 sc
->tx_TimeoutInd
== 0)
671 /* wait for the watchdog to come around again */
672 sc
->tx_TimeoutInd
= 1;
674 else if (sc
->lmc_taint_tx
== sc
->lastlmc_taint_tx
&&
675 sc
->stats
.tx_packets
> sc
->lasttx_packets
&&
679 LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO
, LMC_CSR_READ (sc
, csr_status
), 0);
681 sc
->tx_TimeoutDisplay
= 1;
682 sc
->stats
.tx_TimeoutCnt
++;
684 /* DEC chip is stuck, hit it with a RESET!!!! */
685 lmc_running_reset (dev
);
688 /* look at receive & transmit process state to make sure they are running */
689 LMC_EVENT_LOG(LMC_EVENT_RESET1
, LMC_CSR_READ (sc
, csr_status
), 0);
691 /* look at: DSR - 02 for Reg 16
697 LMC_EVENT_LOG(LMC_EVENT_RESET2
, lmc_mii_readreg (sc
, 0, 16), lmc_mii_readreg (sc
, 0, 17));
699 /* reset the transmit timeout detection flag */
700 sc
->tx_TimeoutInd
= 0;
701 sc
->lastlmc_taint_tx
= sc
->lmc_taint_tx
;
702 sc
->lasttx_packets
= sc
->stats
.tx_packets
;
706 sc
->tx_TimeoutInd
= 0;
707 sc
->lastlmc_taint_tx
= sc
->lmc_taint_tx
;
708 sc
->lasttx_packets
= sc
->stats
.tx_packets
;
711 /* --- end time out check ----------------------------------- */
714 link_status
= sc
->lmc_media
->get_link_status (sc
);
717 * hardware level link lost, but the interface is marked as up.
720 if ((link_status
== 0) && (sc
->last_link_status
!= 0)) {
721 printk(KERN_WARNING
"%s: hardware/physical link down\n", dev
->name
);
722 sc
->last_link_status
= 0;
723 /* lmc_reset (sc); Why reset??? The link can go down ok */
725 /* Inform the world that link has been lost */
726 dev
->flags
&= ~IFF_RUNNING
;
730 * hardware link is up, but the interface is marked as down.
731 * Bring it back up again.
733 if (link_status
!= 0 && sc
->last_link_status
== 0) {
734 printk(KERN_WARNING
"%s: hardware/physical link up\n", dev
->name
);
735 sc
->last_link_status
= 1;
736 /* lmc_reset (sc); Again why reset??? */
738 /* Inform the world that link protocol is back up. */
739 dev
->flags
|= IFF_RUNNING
;
741 /* Now we have to tell the syncppp that we had an outage
742 * and that it should deal. Calling sppp_reopen here
743 * should do the trick, but we may have to call sppp_close
744 * when the link goes down, and call sppp_open here.
745 * Subject to more testing.
749 lmc_proto_reopen(sc
);
753 /* Call media specific watchdog functions */
754 sc
->lmc_media
->watchdog(sc
);
757 * Poke the transmitter to make sure it
758 * never stops, even if we run out of mem
760 LMC_CSR_WRITE(sc
, csr_rxpoll
, 0);
763 * Check for code that failed
764 * and try and fix it as appropriate
766 if(sc
->failed_ring
== 1){
768 * Failed to setup the recv/xmit rin
774 if(sc
->failed_recv_alloc
== 1){
776 * We failed to alloc mem in the
777 * interrupt handler, go through the rings
780 sc
->failed_recv_alloc
= 0;
786 * remember the timer value
790 ticks
= LMC_CSR_READ (sc
, csr_gp_timer
);
791 LMC_CSR_WRITE (sc
, csr_gp_timer
, 0xffffffffUL
);
792 sc
->ictl
.ticks
= 0x0000ffff - (ticks
& 0x0000ffff);
795 * restart this timer.
797 sc
->timer
.expires
= jiffies
+ (HZ
);
798 add_timer (&sc
->timer
);
800 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
802 lmc_trace(dev
, "lmc_watchdog out");
806 static void lmc_setup(struct net_device
* const dev
) /*fold00*/
808 lmc_trace(dev
, "lmc_setup in");
810 dev
->type
= ARPHRD_HDLC
;
811 dev
->hard_start_xmit
= lmc_start_xmit
;
812 dev
->open
= lmc_open
;
813 dev
->stop
= lmc_close
;
814 dev
->get_stats
= lmc_get_stats
;
815 dev
->do_ioctl
= lmc_ioctl
;
816 dev
->tx_timeout
= lmc_driver_timeout
;
817 dev
->watchdog_timeo
= (HZ
); /* 1 second */
819 lmc_trace(dev
, "lmc_setup out");
823 static int __devinit
lmc_init_one(struct pci_dev
*pdev
,
824 const struct pci_device_id
*ent
)
826 struct net_device
*dev
;
829 u_int16_t AdapModelNum
;
831 static int cards_found
;
833 /* We name by type not by vendor */
834 static const char lmcname
[] = "hdlc%d";
837 * GCOM uses LMC vendor name so that clients can know which card
840 static const char lmcname
[] = "lmc%d";
845 * Allocate our own device structure
847 dev
= alloc_netdev(sizeof(lmc_softc_t
), lmcname
, lmc_setup
);
849 printk (KERN_ERR
"lmc:alloc_netdev for device failed\n");
853 lmc_trace(dev
, "lmc_init_one in");
855 err
= pci_enable_device(pdev
);
857 printk(KERN_ERR
"lmc: pci enable failed:%d\n", err
);
861 if (pci_request_regions(pdev
, "lmc")) {
862 printk(KERN_ERR
"lmc: pci_request_region failed\n");
867 pci_set_drvdata(pdev
, dev
);
869 if(lmc_first_load
== 0){
870 printk(KERN_INFO
"Lan Media Corporation WAN Driver Version %d.%d.%d\n",
871 DRIVER_MAJOR_VERSION
, DRIVER_MINOR_VERSION
,DRIVER_SUB_VERSION
);
876 sc
->lmc_device
= dev
;
877 sc
->name
= dev
->name
;
879 /* Initialize the sppp layer */
880 /* An ioctl can cause a subsequent detach for raw frame interface */
881 sc
->if_type
= LMC_PPP
;
882 sc
->check
= 0xBEAFCAFE;
883 dev
->base_addr
= pci_resource_start(pdev
, 0);
884 dev
->irq
= pdev
->irq
;
886 SET_MODULE_OWNER(dev
);
887 SET_NETDEV_DEV(dev
, &pdev
->dev
);
890 * This will get the protocol layer ready and do any 1 time init's
891 * Must have a valid sc and dev structure
895 lmc_proto_attach(sc
);
898 * Why were we changing this???
899 dev->tx_queue_len = 100;
902 /* Init the spin lock so can call it latter */
904 spin_lock_init(&sc
->lmc_lock
);
905 pci_set_master(pdev
);
907 printk ("%s: detected at %lx, irq %d\n", dev
->name
,
908 dev
->base_addr
, dev
->irq
);
910 if (register_netdev (dev
) != 0) {
911 printk (KERN_ERR
"%s: register_netdev failed.\n", dev
->name
);
915 sc
->lmc_cardtype
= LMC_CARDTYPE_UNKNOWN
;
916 sc
->lmc_timing
= LMC_CTL_CLOCK_SOURCE_EXT
;
920 * Check either the subvendor or the subdevice, some systems reverse
921 * the setting in the bois, seems to be version and arch dependent?
922 * Fix the error, exchange the two values
924 if ((subdevice
= pdev
->subsystem_device
) == PCI_VENDOR_ID_LMC
)
925 subdevice
= pdev
->subsystem_vendor
;
928 case PCI_DEVICE_ID_LMC_HSSI
:
929 printk ("%s: LMC HSSI\n", dev
->name
);
930 sc
->lmc_cardtype
= LMC_CARDTYPE_HSSI
;
931 sc
->lmc_media
= &lmc_hssi_media
;
933 case PCI_DEVICE_ID_LMC_DS3
:
934 printk ("%s: LMC DS3\n", dev
->name
);
935 sc
->lmc_cardtype
= LMC_CARDTYPE_DS3
;
936 sc
->lmc_media
= &lmc_ds3_media
;
938 case PCI_DEVICE_ID_LMC_SSI
:
939 printk ("%s: LMC SSI\n", dev
->name
);
940 sc
->lmc_cardtype
= LMC_CARDTYPE_SSI
;
941 sc
->lmc_media
= &lmc_ssi_media
;
943 case PCI_DEVICE_ID_LMC_T1
:
944 printk ("%s: LMC T1\n", dev
->name
);
945 sc
->lmc_cardtype
= LMC_CARDTYPE_T1
;
946 sc
->lmc_media
= &lmc_t1_media
;
949 printk (KERN_WARNING
"%s: LMC UNKOWN CARD!\n", dev
->name
);
953 lmc_initcsrs (sc
, dev
->base_addr
, 8);
955 lmc_gpio_mkinput (sc
, 0xff);
956 sc
->lmc_gpio
= 0; /* drive no signals yet */
958 sc
->lmc_media
->defaults (sc
);
960 sc
->lmc_media
->set_link_status (sc
, LMC_LINK_UP
);
962 /* verify that the PCI Sub System ID matches the Adapter Model number
963 * from the MII register
965 AdapModelNum
= (lmc_mii_readreg (sc
, 0, 3) & 0x3f0) >> 4;
967 if ((AdapModelNum
== LMC_ADAP_T1
968 && subdevice
== PCI_DEVICE_ID_LMC_T1
) || /* detect LMC1200 */
969 (AdapModelNum
== LMC_ADAP_SSI
970 && subdevice
== PCI_DEVICE_ID_LMC_SSI
) || /* detect LMC1000 */
971 (AdapModelNum
== LMC_ADAP_DS3
972 && subdevice
== PCI_DEVICE_ID_LMC_DS3
) || /* detect LMC5245 */
973 (AdapModelNum
== LMC_ADAP_HSSI
974 && subdevice
== PCI_DEVICE_ID_LMC_HSSI
))
975 { /* detect LMC5200 */
979 printk ("%s: Model number (%d) miscompare for PCI Subsystem ID = 0x%04x\n",
980 dev
->name
, AdapModelNum
, subdevice
);
986 LMC_CSR_WRITE (sc
, csr_gp_timer
, 0xFFFFFFFFUL
);
988 sc
->board_idx
= cards_found
++;
989 sc
->stats
.check
= STATCHECK
;
990 sc
->stats
.version_size
= (DRIVER_VERSION
<< 16) +
991 sizeof (struct lmc_statistics
);
992 sc
->stats
.lmc_cardtype
= sc
->lmc_cardtype
;
995 sc
->last_link_status
= 0;
997 lmc_trace(dev
, "lmc_init_one out");
1001 lmc_proto_detach(sc
);
1004 pci_release_regions(pdev
);
1005 pci_set_drvdata(pdev
, NULL
);
1014 * Called from pci when removing module.
1016 static void __devexit
lmc_remove_one (struct pci_dev
*pdev
)
1018 struct net_device
*dev
= pci_get_drvdata(pdev
);
1021 lmc_softc_t
*sc
= dev
->priv
;
1023 printk("%s: removing...\n", dev
->name
);
1024 lmc_proto_detach(sc
);
1025 unregister_netdev(dev
);
1027 pci_release_regions(pdev
);
1028 pci_disable_device(pdev
);
1029 pci_set_drvdata(pdev
, NULL
);
1033 /* After this is called, packets can be sent.
1034 * Does not initialize the addresses
1036 static int lmc_open (struct net_device
*dev
) /*fold00*/
1038 lmc_softc_t
*sc
= dev
->priv
;
1040 lmc_trace(dev
, "lmc_open in");
1042 lmc_led_on(sc
, LMC_DS3_LED0
);
1047 LMC_EVENT_LOG(LMC_EVENT_RESET1
, LMC_CSR_READ (sc
, csr_status
), 0);
1048 LMC_EVENT_LOG(LMC_EVENT_RESET2
,
1049 lmc_mii_readreg (sc
, 0, 16),
1050 lmc_mii_readreg (sc
, 0, 17));
1054 lmc_trace(dev
, "lmc_open lmc_ok out");
1060 /* Since we have to use PCI bus, this should work on x86,alpha,ppc */
1061 if (request_irq (dev
->irq
, &lmc_interrupt
, SA_SHIRQ
, dev
->name
, dev
)){
1062 printk(KERN_WARNING
"%s: could not get irq: %d\n", dev
->name
, dev
->irq
);
1063 lmc_trace(dev
, "lmc_open irq failed out");
1068 /* Assert Terminal Active */
1069 sc
->lmc_miireg16
|= LMC_MII16_LED_ALL
;
1070 sc
->lmc_media
->set_link_status (sc
, LMC_LINK_UP
);
1073 * reset to last state.
1075 sc
->lmc_media
->set_status (sc
, NULL
);
1077 /* setup default bits to be used in tulip_desc_t transmit descriptor
1079 sc
->TxDescriptControlInit
= (
1080 LMC_TDES_INTERRUPT_ON_COMPLETION
1081 | LMC_TDES_FIRST_SEGMENT
1082 | LMC_TDES_LAST_SEGMENT
1083 | LMC_TDES_SECOND_ADDR_CHAINED
1084 | LMC_TDES_DISABLE_PADDING
1087 if (sc
->ictl
.crc_length
== LMC_CTL_CRC_LENGTH_16
) {
1088 /* disable 32 bit CRC generated by ASIC */
1089 sc
->TxDescriptControlInit
|= LMC_TDES_ADD_CRC_DISABLE
;
1091 sc
->lmc_media
->set_crc_length(sc
, sc
->ictl
.crc_length
);
1092 /* Acknoledge the Terminal Active and light LEDs */
1094 /* dev->flags |= IFF_UP; */
1098 dev
->do_ioctl
= lmc_ioctl
;
1101 netif_start_queue(dev
);
1103 sc
->stats
.tx_tbusy0
++ ;
1106 * select what interrupts we want to get
1108 sc
->lmc_intrmask
= 0;
1109 /* Should be using the default interrupt mask defined in the .h file. */
1110 sc
->lmc_intrmask
|= (TULIP_STS_NORMALINTR
1113 | TULIP_STS_ABNRMLINTR
1114 | TULIP_STS_SYSERROR
1115 | TULIP_STS_TXSTOPPED
1116 | TULIP_STS_TXUNDERFLOW
1117 | TULIP_STS_RXSTOPPED
1120 LMC_CSR_WRITE (sc
, csr_intr
, sc
->lmc_intrmask
);
1122 sc
->lmc_cmdmode
|= TULIP_CMD_TXRUN
;
1123 sc
->lmc_cmdmode
|= TULIP_CMD_RXRUN
;
1124 LMC_CSR_WRITE (sc
, csr_command
, sc
->lmc_cmdmode
);
1126 sc
->lmc_ok
= 1; /* Run watchdog */
1129 * Set the if up now - pfb
1132 sc
->last_link_status
= 1;
1135 * Setup a timer for the watchdog on probe, and start it running.
1136 * Since lmc_ok == 0, it will be a NOP for now.
1138 init_timer (&sc
->timer
);
1139 sc
->timer
.expires
= jiffies
+ HZ
;
1140 sc
->timer
.data
= (unsigned long) dev
;
1141 sc
->timer
.function
= &lmc_watchdog
;
1142 add_timer (&sc
->timer
);
1144 lmc_trace(dev
, "lmc_open out");
1149 /* Total reset to compensate for the AdTran DSU doing bad things
1153 static void lmc_running_reset (struct net_device
*dev
) /*fold00*/
1156 lmc_softc_t
*sc
= (lmc_softc_t
*) dev
->priv
;
1158 lmc_trace(dev
, "lmc_runnig_reset in");
1160 /* stop interrupts */
1161 /* Clear the interrupt mask */
1162 LMC_CSR_WRITE (sc
, csr_intr
, 0x00000000);
1167 /* sc->lmc_miireg16 |= LMC_MII16_LED_ALL; */
1168 sc
->lmc_media
->set_link_status (sc
, 1);
1169 sc
->lmc_media
->set_status (sc
, NULL
);
1171 //dev->flags |= IFF_RUNNING;
1173 netif_wake_queue(dev
);
1176 sc
->stats
.tx_tbusy0
++ ;
1178 sc
->lmc_intrmask
= TULIP_DEFAULT_INTR_MASK
;
1179 LMC_CSR_WRITE (sc
, csr_intr
, sc
->lmc_intrmask
);
1181 sc
->lmc_cmdmode
|= (TULIP_CMD_TXRUN
| TULIP_CMD_RXRUN
);
1182 LMC_CSR_WRITE (sc
, csr_command
, sc
->lmc_cmdmode
);
1184 lmc_trace(dev
, "lmc_runnin_reset_out");
1188 /* This is what is called when you ifconfig down a device.
1189 * This disables the timer for the watchdog and keepalives,
1190 * and disables the irq for dev.
1192 static int lmc_close (struct net_device
*dev
) /*fold00*/
1194 /* not calling release_region() as we should */
1197 lmc_trace(dev
, "lmc_close in");
1201 sc
->lmc_media
->set_link_status (sc
, 0);
1202 del_timer (&sc
->timer
);
1203 lmc_proto_close(sc
);
1206 lmc_trace(dev
, "lmc_close out");
1211 /* Ends the transfer of packets */
1212 /* When the interface goes down, this is called */
1213 static int lmc_ifdown (struct net_device
*dev
) /*fold00*/
1215 lmc_softc_t
*sc
= dev
->priv
;
1219 lmc_trace(dev
, "lmc_ifdown in");
1221 /* Don't let anything else go on right now */
1223 netif_stop_queue(dev
);
1224 sc
->stats
.tx_tbusy1
++ ;
1226 /* stop interrupts */
1227 /* Clear the interrupt mask */
1228 LMC_CSR_WRITE (sc
, csr_intr
, 0x00000000);
1230 /* Stop Tx and Rx on the chip */
1231 csr6
= LMC_CSR_READ (sc
, csr_command
);
1232 csr6
&= ~LMC_DEC_ST
; /* Turn off the Transmission bit */
1233 csr6
&= ~LMC_DEC_SR
; /* Turn off the Receive bit */
1234 LMC_CSR_WRITE (sc
, csr_command
, csr6
);
1236 dev
->flags
&= ~IFF_RUNNING
;
1238 sc
->stats
.rx_missed_errors
+=
1239 LMC_CSR_READ (sc
, csr_missed_frames
) & 0xffff;
1241 /* release the interrupt */
1242 if(sc
->got_irq
== 1){
1243 free_irq (dev
->irq
, dev
);
1247 /* free skbuffs in the Rx queue */
1248 for (i
= 0; i
< LMC_RXDESCS
; i
++)
1250 struct sk_buff
*skb
= sc
->lmc_rxq
[i
];
1251 sc
->lmc_rxq
[i
] = NULL
;
1252 sc
->lmc_rxring
[i
].status
= 0;
1253 sc
->lmc_rxring
[i
].length
= 0;
1254 sc
->lmc_rxring
[i
].buffer1
= 0xDEADBEEF;
1257 sc
->lmc_rxq
[i
] = NULL
;
1260 for (i
= 0; i
< LMC_TXDESCS
; i
++)
1262 if (sc
->lmc_txq
[i
] != NULL
)
1263 dev_kfree_skb(sc
->lmc_txq
[i
]);
1264 sc
->lmc_txq
[i
] = NULL
;
1267 lmc_led_off (sc
, LMC_MII16_LED_ALL
);
1269 netif_wake_queue(dev
);
1270 sc
->stats
.tx_tbusy0
++ ;
1272 lmc_trace(dev
, "lmc_ifdown out");
1277 /* Interrupt handling routine. This will take an incoming packet, or clean
1278 * up after a trasmit.
1280 static irqreturn_t
lmc_interrupt (int irq
, void *dev_instance
, struct pt_regs
*regs
) /*fold00*/
1282 struct net_device
*dev
= (struct net_device
*) dev_instance
;
1289 int max_work
= LMC_RXDESCS
;
1292 lmc_trace(dev
, "lmc_interrupt in");
1296 spin_lock(&sc
->lmc_lock
);
1299 * Read the csr to find what interrupts we have (if any)
1301 csr
= LMC_CSR_READ (sc
, csr_status
);
1304 * Make sure this is our interrupt
1306 if ( ! (csr
& sc
->lmc_intrmask
)) {
1307 goto lmc_int_fail_out
;
1312 /* always go through this loop at least once */
1313 while (csr
& sc
->lmc_intrmask
) {
1317 * Clear interrupt bits, we handle all case below
1319 LMC_CSR_WRITE (sc
, csr_status
, csr
);
1323 * - Transmit process timed out CSR5<1>
1324 * - Transmit jabber timeout CSR5<3>
1325 * - Transmit underflow CSR5<5>
1326 * - Transmit Receiver buffer unavailable CSR5<7>
1327 * - Receive process stopped CSR5<8>
1328 * - Receive watchdog timeout CSR5<9>
1329 * - Early transmit interrupt CSR5<10>
1331 * Is this really right? Should we do a running reset for jabber?
1332 * (being a WAN card and all)
1334 if (csr
& TULIP_STS_ABNRMLINTR
){
1335 lmc_running_reset (dev
);
1339 if (csr
& TULIP_STS_RXINTR
){
1340 lmc_trace(dev
, "rx interrupt");
1344 if (csr
& (TULIP_STS_TXINTR
| TULIP_STS_TXNOBUF
| TULIP_STS_TXSTOPPED
)) {
1347 /* reset the transmit timeout detection flag -baz */
1348 sc
->stats
.tx_NoCompleteCnt
= 0;
1350 badtx
= sc
->lmc_taint_tx
;
1351 i
= badtx
% LMC_TXDESCS
;
1353 while ((badtx
< sc
->lmc_next_tx
)) {
1354 stat
= sc
->lmc_txring
[i
].status
;
1356 LMC_EVENT_LOG (LMC_EVENT_XMTINT
, stat
,
1357 sc
->lmc_txring
[i
].length
);
1359 * If bit 31 is 1 the tulip owns it break out of the loop
1361 if (stat
& 0x80000000)
1364 n_compl
++ ; /* i.e., have an empty slot in ring */
1366 * If we have no skbuff or have cleared it
1367 * Already continue to the next buffer
1369 if (sc
->lmc_txq
[i
] == NULL
)
1373 * Check the total error summary to look for any errors
1375 if (stat
& 0x8000) {
1376 sc
->stats
.tx_errors
++;
1378 sc
->stats
.tx_aborted_errors
++;
1380 sc
->stats
.tx_carrier_errors
++;
1382 sc
->stats
.tx_window_errors
++;
1384 sc
->stats
.tx_fifo_errors
++;
1388 sc
->stats
.tx_bytes
+= sc
->lmc_txring
[i
].length
& 0x7ff;
1390 sc
->stats
.tx_packets
++;
1393 // dev_kfree_skb(sc->lmc_txq[i]);
1394 dev_kfree_skb_irq(sc
->lmc_txq
[i
]);
1395 sc
->lmc_txq
[i
] = NULL
;
1398 i
= badtx
% LMC_TXDESCS
;
1401 if (sc
->lmc_next_tx
- badtx
> LMC_TXDESCS
)
1403 printk ("%s: out of sync pointer\n", dev
->name
);
1404 badtx
+= LMC_TXDESCS
;
1406 LMC_EVENT_LOG(LMC_EVENT_TBUSY0
, n_compl
, 0);
1408 netif_wake_queue(dev
);
1409 sc
->stats
.tx_tbusy0
++ ;
1413 sc
->stats
.dirtyTx
= badtx
;
1414 sc
->stats
.lmc_next_tx
= sc
->lmc_next_tx
;
1415 sc
->stats
.lmc_txfull
= sc
->lmc_txfull
;
1417 sc
->lmc_taint_tx
= badtx
;
1420 * Why was there a break here???
1422 } /* end handle transmit interrupt */
1424 if (csr
& TULIP_STS_SYSERROR
) {
1426 printk (KERN_WARNING
"%s: system bus error csr: %#8.8x\n", dev
->name
, csr
);
1427 error
= csr
>>23 & 0x7;
1430 printk(KERN_WARNING
"%s: Parity Fault (bad)\n", dev
->name
);
1433 printk(KERN_WARNING
"%s: Master Abort (naughty)\n", dev
->name
);
1436 printk(KERN_WARNING
"%s: Target Abort (not so naughty)\n", dev
->name
);
1439 printk(KERN_WARNING
"%s: This bus error code was supposed to be reserved!\n", dev
->name
);
1443 LMC_EVENT_LOG(LMC_EVENT_RESET1
, LMC_CSR_READ (sc
, csr_status
), 0);
1444 LMC_EVENT_LOG(LMC_EVENT_RESET2
,
1445 lmc_mii_readreg (sc
, 0, 16),
1446 lmc_mii_readreg (sc
, 0, 17));
1455 * Get current csr status to make sure
1456 * we've cleared all interrupts
1458 csr
= LMC_CSR_READ (sc
, csr_status
);
1459 } /* end interrupt loop */
1460 LMC_EVENT_LOG(LMC_EVENT_INT
, firstcsr
, csr
);
1464 spin_unlock(&sc
->lmc_lock
);
1466 lmc_trace(dev
, "lmc_interrupt out");
1467 return IRQ_RETVAL(handled
);
1470 static int lmc_start_xmit (struct sk_buff
*skb
, struct net_device
*dev
) /*fold00*/
1476 unsigned long flags
;
1478 lmc_trace(dev
, "lmc_start_xmit in");
1482 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
1484 /* normal path, tbusy known to be zero */
1486 entry
= sc
->lmc_next_tx
% LMC_TXDESCS
;
1488 sc
->lmc_txq
[entry
] = skb
;
1489 sc
->lmc_txring
[entry
].buffer1
= virt_to_bus (skb
->data
);
1491 LMC_CONSOLE_LOG("xmit", skb
->data
, skb
->len
);
1494 /* If the queue is less than half full, don't interrupt */
1495 if (sc
->lmc_next_tx
- sc
->lmc_taint_tx
< LMC_TXDESCS
/ 2)
1497 /* Do not interrupt on completion of this packet */
1499 netif_wake_queue(dev
);
1501 else if (sc
->lmc_next_tx
- sc
->lmc_taint_tx
== LMC_TXDESCS
/ 2)
1503 /* This generates an interrupt on completion of this packet */
1505 netif_wake_queue(dev
);
1507 else if (sc
->lmc_next_tx
- sc
->lmc_taint_tx
< LMC_TXDESCS
- 1)
1509 /* Do not interrupt on completion of this packet */
1511 netif_wake_queue(dev
);
1515 /* This generates an interrupt on completion of this packet */
1518 netif_stop_queue(dev
);
1521 flag
= LMC_TDES_INTERRUPT_ON_COMPLETION
;
1523 if (sc
->lmc_next_tx
- sc
->lmc_taint_tx
>= LMC_TXDESCS
- 1)
1524 { /* ring full, go busy */
1526 netif_stop_queue(dev
);
1527 sc
->stats
.tx_tbusy1
++ ;
1528 LMC_EVENT_LOG(LMC_EVENT_TBUSY1
, entry
, 0);
1533 if (entry
== LMC_TXDESCS
- 1) /* last descriptor in ring */
1534 flag
|= LMC_TDES_END_OF_RING
; /* flag as such for Tulip */
1536 /* don't pad small packets either */
1537 flag
= sc
->lmc_txring
[entry
].length
= (skb
->len
) | flag
|
1538 sc
->TxDescriptControlInit
;
1540 /* set the transmit timeout flag to be checked in
1541 * the watchdog timer handler. -baz
1544 sc
->stats
.tx_NoCompleteCnt
++;
1547 /* give ownership to the chip */
1548 LMC_EVENT_LOG(LMC_EVENT_XMT
, flag
, entry
);
1549 sc
->lmc_txring
[entry
].status
= 0x80000000;
1552 LMC_CSR_WRITE (sc
, csr_txpoll
, 0);
1554 dev
->trans_start
= jiffies
;
1556 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
1558 lmc_trace(dev
, "lmc_start_xmit_out");
1563 static int lmc_rx (struct net_device
*dev
) /*fold00*/
1567 int rx_work_limit
= LMC_RXDESCS
;
1568 unsigned int next_rx
;
1569 int rxIntLoopCnt
; /* debug -baz */
1570 int localLengthErrCnt
= 0;
1572 struct sk_buff
*skb
, *nsb
;
1575 lmc_trace(dev
, "lmc_rx in");
1579 lmc_led_on(sc
, LMC_DS3_LED3
);
1581 rxIntLoopCnt
= 0; /* debug -baz */
1583 i
= sc
->lmc_next_rx
% LMC_RXDESCS
;
1584 next_rx
= sc
->lmc_next_rx
;
1586 while (((stat
= sc
->lmc_rxring
[i
].status
) & LMC_RDES_OWN_BIT
) != DESC_OWNED_BY_DC21X4
)
1588 rxIntLoopCnt
++; /* debug -baz */
1589 len
= ((stat
& LMC_RDES_FRAME_LENGTH
) >> RDES_FRAME_LENGTH_BIT_NUMBER
);
1590 if ((stat
& 0x0300) != 0x0300) { /* Check first segment and last segment */
1591 if ((stat
& 0x0000ffff) != 0x7fff) {
1592 /* Oversized frame */
1593 sc
->stats
.rx_length_errors
++;
1598 if(stat
& 0x00000008){ /* Catch a dribbling bit error */
1599 sc
->stats
.rx_errors
++;
1600 sc
->stats
.rx_frame_errors
++;
1605 if(stat
& 0x00000004){ /* Catch a CRC error by the Xilinx */
1606 sc
->stats
.rx_errors
++;
1607 sc
->stats
.rx_crc_errors
++;
1612 if (len
> LMC_PKT_BUF_SZ
){
1613 sc
->stats
.rx_length_errors
++;
1614 localLengthErrCnt
++;
1618 if (len
< sc
->lmc_crcSize
+ 2) {
1619 sc
->stats
.rx_length_errors
++;
1620 sc
->stats
.rx_SmallPktCnt
++;
1621 localLengthErrCnt
++;
1625 if(stat
& 0x00004000){
1626 printk(KERN_WARNING
"%s: Receiver descriptor error, receiver out of sync?\n", dev
->name
);
1629 len
-= sc
->lmc_crcSize
;
1631 skb
= sc
->lmc_rxq
[i
];
1634 * We ran out of memory at some point
1635 * just allocate an skb buff and continue.
1639 nsb
= dev_alloc_skb (LMC_PKT_BUF_SZ
+ 2);
1641 sc
->lmc_rxq
[i
] = nsb
;
1643 sc
->lmc_rxring
[i
].buffer1
= virt_to_bus (nsb
->tail
);
1645 sc
->failed_recv_alloc
= 1;
1649 dev
->last_rx
= jiffies
;
1650 sc
->stats
.rx_packets
++;
1651 sc
->stats
.rx_bytes
+= len
;
1653 LMC_CONSOLE_LOG("recv", skb
->data
, len
);
1656 * I'm not sure of the sanity of this
1657 * Packets could be arriving at a constant
1658 * 44.210mbits/sec and we're going to copy
1659 * them into a new buffer??
1662 if(len
> (LMC_MTU
- (LMC_MTU
>>2))){ /* len > LMC_MTU * 0.75 */
1664 * If it's a large packet don't copy it just hand it up
1668 sc
->lmc_rxq
[i
] = NULL
;
1669 sc
->lmc_rxring
[i
].buffer1
= 0x0;
1672 skb
->protocol
= lmc_proto_type(sc
, skb
);
1673 skb
->protocol
= htons(ETH_P_WAN_PPP
);
1674 skb
->mac
.raw
= skb
->data
;
1675 // skb->nh.raw = skb->data;
1677 lmc_proto_netif(sc
, skb
);
1680 * This skb will be destroyed by the upper layers, make a new one
1682 nsb
= dev_alloc_skb (LMC_PKT_BUF_SZ
+ 2);
1684 sc
->lmc_rxq
[i
] = nsb
;
1686 sc
->lmc_rxring
[i
].buffer1
= virt_to_bus (nsb
->tail
);
1687 /* Transferred to 21140 below */
1691 * We've run out of memory, stop trying to allocate
1692 * memory and exit the interrupt handler
1694 * The chip may run out of receivers and stop
1695 * in which care we'll try to allocate the buffer
1696 * again. (once a second)
1698 sc
->stats
.rx_BuffAllocErr
++;
1699 LMC_EVENT_LOG(LMC_EVENT_RCVINT
, stat
, len
);
1700 sc
->failed_recv_alloc
= 1;
1701 goto skip_out_of_mem
;
1705 nsb
= dev_alloc_skb(len
);
1707 goto give_it_anyways
;
1709 memcpy(skb_put(nsb
, len
), skb
->data
, len
);
1711 nsb
->protocol
= lmc_proto_type(sc
, skb
);
1712 nsb
->mac
.raw
= nsb
->data
;
1713 // nsb->nh.raw = nsb->data;
1715 lmc_proto_netif(sc
, nsb
);
1719 LMC_EVENT_LOG(LMC_EVENT_RCVINT
, stat
, len
);
1720 sc
->lmc_rxring
[i
].status
= DESC_OWNED_BY_DC21X4
;
1723 i
= sc
->lmc_next_rx
% LMC_RXDESCS
;
1725 if (rx_work_limit
< 0)
1729 /* detect condition for LMC1000 where DSU cable attaches and fills
1730 * descriptors with bogus packets
1732 if (localLengthErrCnt > LMC_RXDESCS - 3) {
1733 sc->stats.rx_BadPktSurgeCnt++;
1734 LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE,
1736 sc->stats.rx_BadPktSurgeCnt);
1739 /* save max count of receive descriptors serviced */
1740 if (rxIntLoopCnt
> sc
->stats
.rxIntLoopCnt
) {
1741 sc
->stats
.rxIntLoopCnt
= rxIntLoopCnt
; /* debug -baz */
1745 if (rxIntLoopCnt
== 0)
1747 for (i
= 0; i
< LMC_RXDESCS
; i
++)
1749 if ((sc
->lmc_rxring
[i
].status
& LMC_RDES_OWN_BIT
)
1750 != DESC_OWNED_BY_DC21X4
)
1755 LMC_EVENT_LOG(LMC_EVENT_RCVEND
, rxIntLoopCnt
, 0);
1760 lmc_led_off(sc
, LMC_DS3_LED3
);
1764 lmc_trace(dev
, "lmc_rx out");
1769 static struct net_device_stats
*lmc_get_stats (struct net_device
*dev
) /*fold00*/
1771 lmc_softc_t
*sc
= dev
->priv
;
1772 unsigned long flags
;
1774 lmc_trace(dev
, "lmc_get_stats in");
1777 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
1779 sc
->stats
.rx_missed_errors
+= LMC_CSR_READ (sc
, csr_missed_frames
) & 0xffff;
1781 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
1783 lmc_trace(dev
, "lmc_get_stats out");
1785 return (struct net_device_stats
*) &sc
->stats
;
1788 static struct pci_driver lmc_driver
= {
1790 .id_table
= lmc_pci_tbl
,
1791 .probe
= lmc_init_one
,
1792 .remove
= __devexit_p(lmc_remove_one
),
1795 static int __init
init_lmc(void)
1797 return pci_module_init(&lmc_driver
);
1800 static void __exit
exit_lmc(void)
1802 pci_unregister_driver(&lmc_driver
);
1805 module_init(init_lmc
);
1806 module_exit(exit_lmc
);
1808 unsigned lmc_mii_readreg (lmc_softc_t
* const sc
, unsigned devaddr
, unsigned regno
) /*fold00*/
1811 int command
= (0xf6 << 10) | (devaddr
<< 5) | regno
;
1814 lmc_trace(sc
->lmc_device
, "lmc_mii_readreg in");
1818 lmc_trace(sc
->lmc_device
, "lmc_mii_readreg: done sync");
1820 for (i
= 15; i
>= 0; i
--)
1822 int dataval
= (command
& (1 << i
)) ? 0x20000 : 0;
1824 LMC_CSR_WRITE (sc
, csr_9
, dataval
);
1826 /* __SLOW_DOWN_IO; */
1827 LMC_CSR_WRITE (sc
, csr_9
, dataval
| 0x10000);
1829 /* __SLOW_DOWN_IO; */
1832 lmc_trace(sc
->lmc_device
, "lmc_mii_readreg: done1");
1834 for (i
= 19; i
> 0; i
--)
1836 LMC_CSR_WRITE (sc
, csr_9
, 0x40000);
1838 /* __SLOW_DOWN_IO; */
1839 retval
= (retval
<< 1) | ((LMC_CSR_READ (sc
, csr_9
) & 0x80000) ? 1 : 0);
1840 LMC_CSR_WRITE (sc
, csr_9
, 0x40000 | 0x10000);
1842 /* __SLOW_DOWN_IO; */
1845 lmc_trace(sc
->lmc_device
, "lmc_mii_readreg out");
1847 return (retval
>> 1) & 0xffff;
1850 void lmc_mii_writereg (lmc_softc_t
* const sc
, unsigned devaddr
, unsigned regno
, unsigned data
) /*fold00*/
1853 int command
= (0x5002 << 16) | (devaddr
<< 23) | (regno
<< 18) | data
;
1855 lmc_trace(sc
->lmc_device
, "lmc_mii_writereg in");
1864 if (command
& (1 << i
))
1869 LMC_CSR_WRITE (sc
, csr_9
, datav
);
1871 /* __SLOW_DOWN_IO; */
1872 LMC_CSR_WRITE (sc
, csr_9
, (datav
| 0x10000));
1874 /* __SLOW_DOWN_IO; */
1881 LMC_CSR_WRITE (sc
, csr_9
, 0x40000);
1883 /* __SLOW_DOWN_IO; */
1884 LMC_CSR_WRITE (sc
, csr_9
, 0x50000);
1886 /* __SLOW_DOWN_IO; */
1890 lmc_trace(sc
->lmc_device
, "lmc_mii_writereg out");
1893 static void lmc_softreset (lmc_softc_t
* const sc
) /*fold00*/
1897 lmc_trace(sc
->lmc_device
, "lmc_softreset in");
1899 /* Initialize the receive rings and buffers. */
1901 sc
->lmc_next_rx
= 0;
1902 sc
->lmc_next_tx
= 0;
1903 sc
->lmc_taint_rx
= 0;
1904 sc
->lmc_taint_tx
= 0;
1907 * Setup each one of the receiver buffers
1908 * allocate an skbuff for each one, setup the descriptor table
1909 * and point each buffer at the next one
1912 for (i
= 0; i
< LMC_RXDESCS
; i
++)
1914 struct sk_buff
*skb
;
1916 if (sc
->lmc_rxq
[i
] == NULL
)
1918 skb
= dev_alloc_skb (LMC_PKT_BUF_SZ
+ 2);
1920 printk(KERN_WARNING
"%s: Failed to allocate receiver ring, will try again\n", sc
->name
);
1921 sc
->failed_ring
= 1;
1925 sc
->lmc_rxq
[i
] = skb
;
1930 skb
= sc
->lmc_rxq
[i
];
1933 skb
->dev
= sc
->lmc_device
;
1935 /* owned by 21140 */
1936 sc
->lmc_rxring
[i
].status
= 0x80000000;
1938 /* used to be PKT_BUF_SZ now uses skb since we lose some to head room */
1939 sc
->lmc_rxring
[i
].length
= skb
->end
- skb
->data
;
1941 /* use to be tail which is dumb since you're thinking why write
1942 * to the end of the packj,et but since there's nothing there tail == data
1944 sc
->lmc_rxring
[i
].buffer1
= virt_to_bus (skb
->data
);
1946 /* This is fair since the structure is static and we have the next address */
1947 sc
->lmc_rxring
[i
].buffer2
= virt_to_bus (&sc
->lmc_rxring
[i
+ 1]);
1954 sc
->lmc_rxring
[i
- 1].length
|= 0x02000000; /* Set end of buffers flag */
1955 sc
->lmc_rxring
[i
- 1].buffer2
= virt_to_bus (&sc
->lmc_rxring
[0]); /* Point back to the start */
1956 LMC_CSR_WRITE (sc
, csr_rxlist
, virt_to_bus (sc
->lmc_rxring
)); /* write base address */
1959 /* Initialize the transmit rings and buffers */
1960 for (i
= 0; i
< LMC_TXDESCS
; i
++)
1962 if (sc
->lmc_txq
[i
] != NULL
){ /* have buffer */
1963 dev_kfree_skb(sc
->lmc_txq
[i
]); /* free it */
1964 sc
->stats
.tx_dropped
++; /* We just dropped a packet */
1966 sc
->lmc_txq
[i
] = NULL
;
1967 sc
->lmc_txring
[i
].status
= 0x00000000;
1968 sc
->lmc_txring
[i
].buffer2
= virt_to_bus (&sc
->lmc_txring
[i
+ 1]);
1970 sc
->lmc_txring
[i
- 1].buffer2
= virt_to_bus (&sc
->lmc_txring
[0]);
1971 LMC_CSR_WRITE (sc
, csr_txlist
, virt_to_bus (sc
->lmc_txring
));
1973 lmc_trace(sc
->lmc_device
, "lmc_softreset out");
1976 void lmc_gpio_mkinput(lmc_softc_t
* const sc
, u_int32_t bits
) /*fold00*/
1978 lmc_trace(sc
->lmc_device
, "lmc_gpio_mkinput in");
1979 sc
->lmc_gpio_io
&= ~bits
;
1980 LMC_CSR_WRITE(sc
, csr_gp
, TULIP_GP_PINSET
| (sc
->lmc_gpio_io
));
1981 lmc_trace(sc
->lmc_device
, "lmc_gpio_mkinput out");
1984 void lmc_gpio_mkoutput(lmc_softc_t
* const sc
, u_int32_t bits
) /*fold00*/
1986 lmc_trace(sc
->lmc_device
, "lmc_gpio_mkoutput in");
1987 sc
->lmc_gpio_io
|= bits
;
1988 LMC_CSR_WRITE(sc
, csr_gp
, TULIP_GP_PINSET
| (sc
->lmc_gpio_io
));
1989 lmc_trace(sc
->lmc_device
, "lmc_gpio_mkoutput out");
1992 void lmc_led_on(lmc_softc_t
* const sc
, u_int32_t led
) /*fold00*/
1994 lmc_trace(sc
->lmc_device
, "lmc_led_on in");
1995 if((~sc
->lmc_miireg16
) & led
){ /* Already on! */
1996 lmc_trace(sc
->lmc_device
, "lmc_led_on aon out");
2000 sc
->lmc_miireg16
&= ~led
;
2001 lmc_mii_writereg(sc
, 0, 16, sc
->lmc_miireg16
);
2002 lmc_trace(sc
->lmc_device
, "lmc_led_on out");
2005 void lmc_led_off(lmc_softc_t
* const sc
, u_int32_t led
) /*fold00*/
2007 lmc_trace(sc
->lmc_device
, "lmc_led_off in");
2008 if(sc
->lmc_miireg16
& led
){ /* Already set don't do anything */
2009 lmc_trace(sc
->lmc_device
, "lmc_led_off aoff out");
2013 sc
->lmc_miireg16
|= led
;
2014 lmc_mii_writereg(sc
, 0, 16, sc
->lmc_miireg16
);
2015 lmc_trace(sc
->lmc_device
, "lmc_led_off out");
2018 static void lmc_reset(lmc_softc_t
* const sc
) /*fold00*/
2020 lmc_trace(sc
->lmc_device
, "lmc_reset in");
2021 sc
->lmc_miireg16
|= LMC_MII16_FIFO_RESET
;
2022 lmc_mii_writereg(sc
, 0, 16, sc
->lmc_miireg16
);
2024 sc
->lmc_miireg16
&= ~LMC_MII16_FIFO_RESET
;
2025 lmc_mii_writereg(sc
, 0, 16, sc
->lmc_miireg16
);
2028 * make some of the GPIO pins be outputs
2030 lmc_gpio_mkoutput(sc
, LMC_GEP_RESET
);
2033 * RESET low to force state reset. This also forces
2034 * the transmitter clock to be internal, but we expect to reset
2035 * that later anyway.
2037 sc
->lmc_gpio
&= ~(LMC_GEP_RESET
);
2038 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
2041 * hold for more than 10 microseconds
2046 * stop driving Xilinx-related signals
2048 lmc_gpio_mkinput(sc
, LMC_GEP_RESET
);
2051 * Call media specific init routine
2053 sc
->lmc_media
->init(sc
);
2055 sc
->stats
.resetCount
++;
2056 lmc_trace(sc
->lmc_device
, "lmc_reset out");
2059 static void lmc_dec_reset(lmc_softc_t
* const sc
) /*fold00*/
2062 lmc_trace(sc
->lmc_device
, "lmc_dec_reset in");
2065 * disable all interrupts
2067 sc
->lmc_intrmask
= 0;
2068 LMC_CSR_WRITE(sc
, csr_intr
, sc
->lmc_intrmask
);
2071 * Reset the chip with a software reset command.
2072 * Wait 10 microseconds (actually 50 PCI cycles but at
2073 * 33MHz that comes to two microseconds but wait a
2074 * bit longer anyways)
2076 LMC_CSR_WRITE(sc
, csr_busmode
, TULIP_BUSMODE_SWRESET
);
2079 sc
->lmc_busmode
= LMC_CSR_READ(sc
, csr_busmode
);
2080 sc
->lmc_busmode
= 0x00100000;
2081 sc
->lmc_busmode
&= ~TULIP_BUSMODE_SWRESET
;
2082 LMC_CSR_WRITE(sc
, csr_busmode
, sc
->lmc_busmode
);
2084 sc
->lmc_cmdmode
= LMC_CSR_READ(sc
, csr_command
);
2088 * no ethernet address in frames we write
2089 * disable padding (txdesc, padding disable)
2090 * ignore runt frames (rdes0 bit 15)
2091 * no receiver watchdog or transmitter jabber timer
2092 * (csr15 bit 0,14 == 1)
2093 * if using 16-bit CRC, turn off CRC (trans desc, crc disable)
2096 sc
->lmc_cmdmode
|= ( TULIP_CMD_PROMISCUOUS
2097 | TULIP_CMD_FULLDUPLEX
2098 | TULIP_CMD_PASSBADPKT
2099 | TULIP_CMD_NOHEARTBEAT
2100 | TULIP_CMD_PORTSELECT
2101 | TULIP_CMD_RECEIVEALL
2102 | TULIP_CMD_MUSTBEONE
2104 sc
->lmc_cmdmode
&= ~( TULIP_CMD_OPERMODE
2105 | TULIP_CMD_THRESHOLDCTL
2106 | TULIP_CMD_STOREFWD
2107 | TULIP_CMD_TXTHRSHLDCTL
2110 LMC_CSR_WRITE(sc
, csr_command
, sc
->lmc_cmdmode
);
2113 * disable receiver watchdog and transmit jabber
2115 val
= LMC_CSR_READ(sc
, csr_sia_general
);
2116 val
|= (TULIP_WATCHDOG_TXDISABLE
| TULIP_WATCHDOG_RXDISABLE
);
2117 LMC_CSR_WRITE(sc
, csr_sia_general
, val
);
2119 lmc_trace(sc
->lmc_device
, "lmc_dec_reset out");
2122 static void lmc_initcsrs(lmc_softc_t
* const sc
, lmc_csrptr_t csr_base
, /*fold00*/
2125 lmc_trace(sc
->lmc_device
, "lmc_initcsrs in");
2126 sc
->lmc_csrs
.csr_busmode
= csr_base
+ 0 * csr_size
;
2127 sc
->lmc_csrs
.csr_txpoll
= csr_base
+ 1 * csr_size
;
2128 sc
->lmc_csrs
.csr_rxpoll
= csr_base
+ 2 * csr_size
;
2129 sc
->lmc_csrs
.csr_rxlist
= csr_base
+ 3 * csr_size
;
2130 sc
->lmc_csrs
.csr_txlist
= csr_base
+ 4 * csr_size
;
2131 sc
->lmc_csrs
.csr_status
= csr_base
+ 5 * csr_size
;
2132 sc
->lmc_csrs
.csr_command
= csr_base
+ 6 * csr_size
;
2133 sc
->lmc_csrs
.csr_intr
= csr_base
+ 7 * csr_size
;
2134 sc
->lmc_csrs
.csr_missed_frames
= csr_base
+ 8 * csr_size
;
2135 sc
->lmc_csrs
.csr_9
= csr_base
+ 9 * csr_size
;
2136 sc
->lmc_csrs
.csr_10
= csr_base
+ 10 * csr_size
;
2137 sc
->lmc_csrs
.csr_11
= csr_base
+ 11 * csr_size
;
2138 sc
->lmc_csrs
.csr_12
= csr_base
+ 12 * csr_size
;
2139 sc
->lmc_csrs
.csr_13
= csr_base
+ 13 * csr_size
;
2140 sc
->lmc_csrs
.csr_14
= csr_base
+ 14 * csr_size
;
2141 sc
->lmc_csrs
.csr_15
= csr_base
+ 15 * csr_size
;
2142 lmc_trace(sc
->lmc_device
, "lmc_initcsrs out");
2145 static void lmc_driver_timeout(struct net_device
*dev
) { /*fold00*/
2148 unsigned long flags
;
2150 lmc_trace(dev
, "lmc_driver_timeout in");
2154 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
2156 printk("%s: Xmitter busy|\n", dev
->name
);
2158 sc
->stats
.tx_tbusy_calls
++ ;
2159 if (jiffies
- dev
->trans_start
< TX_TIMEOUT
) {
2164 * Chip seems to have locked up
2166 * This whips out all our decriptor
2167 * table and starts from scartch
2170 LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO
,
2171 LMC_CSR_READ (sc
, csr_status
),
2172 sc
->stats
.tx_ProcTimeout
);
2174 lmc_running_reset (dev
);
2176 LMC_EVENT_LOG(LMC_EVENT_RESET1
, LMC_CSR_READ (sc
, csr_status
), 0);
2177 LMC_EVENT_LOG(LMC_EVENT_RESET2
,
2178 lmc_mii_readreg (sc
, 0, 16),
2179 lmc_mii_readreg (sc
, 0, 17));
2181 /* restart the tx processes */
2182 csr6
= LMC_CSR_READ (sc
, csr_command
);
2183 LMC_CSR_WRITE (sc
, csr_command
, csr6
| 0x0002);
2184 LMC_CSR_WRITE (sc
, csr_command
, csr6
| 0x2002);
2186 /* immediate transmit */
2187 LMC_CSR_WRITE (sc
, csr_txpoll
, 0);
2189 sc
->stats
.tx_errors
++;
2190 sc
->stats
.tx_ProcTimeout
++; /* -baz */
2192 dev
->trans_start
= jiffies
;
2196 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
2198 lmc_trace(dev
, "lmc_driver_timout out");