2 * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
3 * All rights reserved. www.lanmedia.com
4 * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
6 * This code is written by:
7 * Andrew Stanley-Jones (asj@cban.com)
8 * Rob Braun (bbraun@vix.com),
9 * Michael Graff (explorer@vix.com) and
10 * Matt Thomas (matt@3am-software.com).
17 * This software may be used and distributed according to the terms
18 * of the GNU General Public License version 2, incorporated herein by reference.
20 * Driver for the LanMedia LMC5200, LMC5245, LMC1000, LMC1200 cards.
22 * To control link specific options lmcctl is required.
23 * It can be obtained from ftp.lanmedia.com.
26 * Linux uses the device struct lmc_private to pass private information
29 * The initialization portion of this driver (the lmc_reset() and the
30 * lmc_dec_reset() functions, as well as the led controls and the
31 * lmc_initcsrs() functions.
33 * The watchdog function runs every second and checks to see if
34 * we still have link, and that the timing source is what we expected
35 * it to be. If link is lost, the interface is marked down, and
36 * we no longer can transmit.
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/string.h>
43 #include <linux/timer.h>
44 #include <linux/ptrace.h>
45 #include <linux/errno.h>
46 #include <linux/ioport.h>
47 #include <linux/slab.h>
48 #include <linux/interrupt.h>
49 #include <linux/pci.h>
50 #include <linux/delay.h>
51 #include <linux/hdlc.h>
53 #include <linux/if_arp.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/inet.h>
58 #include <linux/bitops.h>
59 #include <asm/processor.h> /* Processor type for cache alignment. */
62 #include <asm/uaccess.h>
63 //#include <asm/spinlock.h>
65 #define DRIVER_MAJOR_VERSION 1
66 #define DRIVER_MINOR_VERSION 34
67 #define DRIVER_SUB_VERSION 0
69 #define DRIVER_VERSION ((DRIVER_MAJOR_VERSION << 8) + DRIVER_MINOR_VERSION)
73 #include "lmc_ioctl.h"
74 #include "lmc_debug.h"
75 #include "lmc_proto.h"
77 static int LMC_PKT_BUF_SZ
= 1542;
79 static const struct pci_device_id lmc_pci_tbl
[] = {
80 { PCI_VENDOR_ID_DEC
, PCI_DEVICE_ID_DEC_TULIP_FAST
,
81 PCI_VENDOR_ID_LMC
, PCI_ANY_ID
},
82 { PCI_VENDOR_ID_DEC
, PCI_DEVICE_ID_DEC_TULIP_FAST
,
83 PCI_ANY_ID
, PCI_VENDOR_ID_LMC
},
87 MODULE_DEVICE_TABLE(pci
, lmc_pci_tbl
);
88 MODULE_LICENSE("GPL v2");
91 static netdev_tx_t
lmc_start_xmit(struct sk_buff
*skb
,
92 struct net_device
*dev
);
93 static int lmc_rx (struct net_device
*dev
);
94 static int lmc_open(struct net_device
*dev
);
95 static int lmc_close(struct net_device
*dev
);
96 static struct net_device_stats
*lmc_get_stats(struct net_device
*dev
);
97 static irqreturn_t
lmc_interrupt(int irq
, void *dev_instance
);
98 static void lmc_initcsrs(lmc_softc_t
* const sc
, lmc_csrptr_t csr_base
, size_t csr_size
);
99 static void lmc_softreset(lmc_softc_t
* const);
100 static void lmc_running_reset(struct net_device
*dev
);
101 static int lmc_ifdown(struct net_device
* const);
102 static void lmc_watchdog(unsigned long data
);
103 static void lmc_reset(lmc_softc_t
* const sc
);
104 static void lmc_dec_reset(lmc_softc_t
* const sc
);
105 static void lmc_driver_timeout(struct net_device
*dev
);
108 * linux reserves 16 device specific IOCTLs. We call them
109 * LMCIOC* to control various bits of our world.
111 int lmc_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
) /*fold00*/
113 lmc_softc_t
*sc
= dev_to_sc(dev
);
115 int ret
= -EOPNOTSUPP
;
119 lmc_trace(dev
, "lmc_ioctl in");
122 * Most functions mess with the structure
123 * Disable interrupts while we do the polling
128 * Return current driver state. Since we keep this up
129 * To date internally, just copy this out to the user.
131 case LMCIOCGINFO
: /*fold01*/
132 if (copy_to_user(ifr
->ifr_data
, &sc
->ictl
, sizeof(lmc_ctl_t
)))
138 case LMCIOCSINFO
: /*fold01*/
139 if (!capable(CAP_NET_ADMIN
)) {
144 if(dev
->flags
& IFF_UP
){
149 if (copy_from_user(&ctl
, ifr
->ifr_data
, sizeof(lmc_ctl_t
))) {
154 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
155 sc
->lmc_media
->set_status (sc
, &ctl
);
157 if(ctl
.crc_length
!= sc
->ictl
.crc_length
) {
158 sc
->lmc_media
->set_crc_length(sc
, ctl
.crc_length
);
159 if (sc
->ictl
.crc_length
== LMC_CTL_CRC_LENGTH_16
)
160 sc
->TxDescriptControlInit
|= LMC_TDES_ADD_CRC_DISABLE
;
162 sc
->TxDescriptControlInit
&= ~LMC_TDES_ADD_CRC_DISABLE
;
164 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
169 case LMCIOCIFTYPE
: /*fold01*/
171 u16 old_type
= sc
->if_type
;
174 if (!capable(CAP_NET_ADMIN
)) {
179 if (copy_from_user(&new_type
, ifr
->ifr_data
, sizeof(u16
))) {
185 if (new_type
== old_type
)
188 break; /* no change */
191 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
194 sc
->if_type
= new_type
;
195 lmc_proto_attach(sc
);
196 ret
= lmc_proto_open(sc
);
197 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
201 case LMCIOCGETXINFO
: /*fold01*/
202 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
203 sc
->lmc_xinfo
.Magic0
= 0xBEEFCAFE;
205 sc
->lmc_xinfo
.PciCardType
= sc
->lmc_cardtype
;
206 sc
->lmc_xinfo
.PciSlotNumber
= 0;
207 sc
->lmc_xinfo
.DriverMajorVersion
= DRIVER_MAJOR_VERSION
;
208 sc
->lmc_xinfo
.DriverMinorVersion
= DRIVER_MINOR_VERSION
;
209 sc
->lmc_xinfo
.DriverSubVersion
= DRIVER_SUB_VERSION
;
210 sc
->lmc_xinfo
.XilinxRevisionNumber
=
211 lmc_mii_readreg (sc
, 0, 3) & 0xf;
212 sc
->lmc_xinfo
.MaxFrameSize
= LMC_PKT_BUF_SZ
;
213 sc
->lmc_xinfo
.link_status
= sc
->lmc_media
->get_link_status (sc
);
214 sc
->lmc_xinfo
.mii_reg16
= lmc_mii_readreg (sc
, 0, 16);
215 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
217 sc
->lmc_xinfo
.Magic1
= 0xDEADBEEF;
219 if (copy_to_user(ifr
->ifr_data
, &sc
->lmc_xinfo
,
220 sizeof(struct lmc_xinfo
)))
227 case LMCIOCGETLMCSTATS
:
228 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
229 if (sc
->lmc_cardtype
== LMC_CARDTYPE_T1
) {
230 lmc_mii_writereg(sc
, 0, 17, T1FRAMER_FERR_LSB
);
231 sc
->extra_stats
.framingBitErrorCount
+=
232 lmc_mii_readreg(sc
, 0, 18) & 0xff;
233 lmc_mii_writereg(sc
, 0, 17, T1FRAMER_FERR_MSB
);
234 sc
->extra_stats
.framingBitErrorCount
+=
235 (lmc_mii_readreg(sc
, 0, 18) & 0xff) << 8;
236 lmc_mii_writereg(sc
, 0, 17, T1FRAMER_LCV_LSB
);
237 sc
->extra_stats
.lineCodeViolationCount
+=
238 lmc_mii_readreg(sc
, 0, 18) & 0xff;
239 lmc_mii_writereg(sc
, 0, 17, T1FRAMER_LCV_MSB
);
240 sc
->extra_stats
.lineCodeViolationCount
+=
241 (lmc_mii_readreg(sc
, 0, 18) & 0xff) << 8;
242 lmc_mii_writereg(sc
, 0, 17, T1FRAMER_AERR
);
243 regVal
= lmc_mii_readreg(sc
, 0, 18) & 0xff;
245 sc
->extra_stats
.lossOfFrameCount
+=
246 (regVal
& T1FRAMER_LOF_MASK
) >> 4;
247 sc
->extra_stats
.changeOfFrameAlignmentCount
+=
248 (regVal
& T1FRAMER_COFA_MASK
) >> 2;
249 sc
->extra_stats
.severelyErroredFrameCount
+=
250 regVal
& T1FRAMER_SEF_MASK
;
252 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
253 if (copy_to_user(ifr
->ifr_data
, &sc
->lmc_device
->stats
,
254 sizeof(sc
->lmc_device
->stats
)) ||
255 copy_to_user(ifr
->ifr_data
+ sizeof(sc
->lmc_device
->stats
),
256 &sc
->extra_stats
, sizeof(sc
->extra_stats
)))
262 case LMCIOCCLEARLMCSTATS
:
263 if (!capable(CAP_NET_ADMIN
)) {
268 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
269 memset(&sc
->lmc_device
->stats
, 0, sizeof(sc
->lmc_device
->stats
));
270 memset(&sc
->extra_stats
, 0, sizeof(sc
->extra_stats
));
271 sc
->extra_stats
.check
= STATCHECK
;
272 sc
->extra_stats
.version_size
= (DRIVER_VERSION
<< 16) +
273 sizeof(sc
->lmc_device
->stats
) + sizeof(sc
->extra_stats
);
274 sc
->extra_stats
.lmc_cardtype
= sc
->lmc_cardtype
;
275 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
279 case LMCIOCSETCIRCUIT
: /*fold01*/
280 if (!capable(CAP_NET_ADMIN
)){
285 if(dev
->flags
& IFF_UP
){
290 if (copy_from_user(&ctl
, ifr
->ifr_data
, sizeof(lmc_ctl_t
))) {
294 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
295 sc
->lmc_media
->set_circuit_type(sc
, ctl
.circuit_type
);
296 sc
->ictl
.circuit_type
= ctl
.circuit_type
;
297 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
302 case LMCIOCRESET
: /*fold01*/
303 if (!capable(CAP_NET_ADMIN
)){
308 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
309 /* Reset driver and bring back to current state */
310 printk (" REG16 before reset +%04x\n", lmc_mii_readreg (sc
, 0, 16));
311 lmc_running_reset (dev
);
312 printk (" REG16 after reset +%04x\n", lmc_mii_readreg (sc
, 0, 16));
314 LMC_EVENT_LOG(LMC_EVENT_FORCEDRESET
, LMC_CSR_READ (sc
, csr_status
), lmc_mii_readreg (sc
, 0, 16));
315 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
321 case LMCIOCDUMPEVENTLOG
:
322 if (copy_to_user(ifr
->ifr_data
, &lmcEventLogIndex
, sizeof(u32
))) {
326 if (copy_to_user(ifr
->ifr_data
+ sizeof(u32
), lmcEventLogBuf
,
327 sizeof(lmcEventLogBuf
)))
333 #endif /* end ifdef _DBG_EVENTLOG */
334 case LMCIOCT1CONTROL
: /*fold01*/
335 if (sc
->lmc_cardtype
!= LMC_CARDTYPE_T1
){
340 case LMCIOCXILINX
: /*fold01*/
342 struct lmc_xilinx_control xc
; /*fold02*/
344 if (!capable(CAP_NET_ADMIN
)){
350 * Stop the xwitter whlie we restart the hardware
352 netif_stop_queue(dev
);
354 if (copy_from_user(&xc
, ifr
->ifr_data
, sizeof(struct lmc_xilinx_control
))) {
359 case lmc_xilinx_reset
: /*fold02*/
362 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
363 mii
= lmc_mii_readreg (sc
, 0, 16);
366 * Make all of them 0 and make input
368 lmc_gpio_mkinput(sc
, 0xff);
371 * make the reset output
373 lmc_gpio_mkoutput(sc
, LMC_GEP_RESET
);
376 * RESET low to force configuration. This also forces
377 * the transmitter clock to be internal, but we expect to reset
381 sc
->lmc_gpio
&= ~LMC_GEP_RESET
;
382 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
386 * hold for more than 10 microseconds
390 sc
->lmc_gpio
|= LMC_GEP_RESET
;
391 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
395 * stop driving Xilinx-related signals
397 lmc_gpio_mkinput(sc
, 0xff);
399 /* Reset the frammer hardware */
400 sc
->lmc_media
->set_link_status (sc
, 1);
401 sc
->lmc_media
->set_status (sc
, NULL
);
402 // lmc_softreset(sc);
406 for(i
= 0; i
< 5; i
++){
407 lmc_led_on(sc
, LMC_DS3_LED0
);
409 lmc_led_off(sc
, LMC_DS3_LED0
);
410 lmc_led_on(sc
, LMC_DS3_LED1
);
412 lmc_led_off(sc
, LMC_DS3_LED1
);
413 lmc_led_on(sc
, LMC_DS3_LED3
);
415 lmc_led_off(sc
, LMC_DS3_LED3
);
416 lmc_led_on(sc
, LMC_DS3_LED2
);
418 lmc_led_off(sc
, LMC_DS3_LED2
);
421 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
430 case lmc_xilinx_load_prom
: /*fold02*/
433 int timeout
= 500000;
434 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
435 mii
= lmc_mii_readreg (sc
, 0, 16);
438 * Make all of them 0 and make input
440 lmc_gpio_mkinput(sc
, 0xff);
443 * make the reset output
445 lmc_gpio_mkoutput(sc
, LMC_GEP_DP
| LMC_GEP_RESET
);
448 * RESET low to force configuration. This also forces
449 * the transmitter clock to be internal, but we expect to reset
453 sc
->lmc_gpio
&= ~(LMC_GEP_RESET
| LMC_GEP_DP
);
454 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
458 * hold for more than 10 microseconds
462 sc
->lmc_gpio
|= LMC_GEP_DP
| LMC_GEP_RESET
;
463 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
466 * busy wait for the chip to reset
468 while( (LMC_CSR_READ(sc
, csr_gp
) & LMC_GEP_INIT
) == 0 &&
474 * stop driving Xilinx-related signals
476 lmc_gpio_mkinput(sc
, 0xff);
477 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
486 case lmc_xilinx_load
: /*fold02*/
490 int timeout
= 500000;
497 data
= kmalloc(xc
.len
, GFP_KERNEL
);
503 if(copy_from_user(data
, xc
.data
, xc
.len
))
510 printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev
->name
, xc
.len
, xc
.data
, data
);
512 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
513 lmc_gpio_mkinput(sc
, 0xff);
516 * Clear the Xilinx and start prgramming from the DEC
527 sc
->lmc_gpio
&= ~LMC_GEP_DP
;
528 sc
->lmc_gpio
&= ~LMC_GEP_RESET
;
529 sc
->lmc_gpio
|= LMC_GEP_MODE
;
530 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
532 lmc_gpio_mkoutput(sc
, LMC_GEP_MODE
| LMC_GEP_DP
| LMC_GEP_RESET
);
535 * Wait at least 10 us 20 to be safe
540 * Clear reset and activate programming lines
547 lmc_gpio_mkinput(sc
, LMC_GEP_DP
| LMC_GEP_RESET
);
550 * Set LOAD, DATA, Clock to 1
553 sc
->lmc_gpio
|= LMC_GEP_MODE
;
554 sc
->lmc_gpio
|= LMC_GEP_DATA
;
555 sc
->lmc_gpio
|= LMC_GEP_CLK
;
556 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
558 lmc_gpio_mkoutput(sc
, LMC_GEP_DATA
| LMC_GEP_CLK
| LMC_GEP_MODE
);
561 * busy wait for the chip to reset
563 while( (LMC_CSR_READ(sc
, csr_gp
) & LMC_GEP_INIT
) == 0 &&
567 printk(KERN_DEBUG
"%s: Waited %d for the Xilinx to clear it's memory\n", dev
->name
, 500000-timeout
);
569 for(pos
= 0; pos
< xc
.len
; pos
++){
572 sc
->lmc_gpio
&= ~LMC_GEP_DATA
; /* Data is 0 */
575 sc
->lmc_gpio
|= LMC_GEP_DATA
; /* Data is 1 */
578 printk(KERN_WARNING
"%s Bad data in xilinx programming data at %d, got %d wanted 0 or 1\n", dev
->name
, pos
, data
[pos
]);
579 sc
->lmc_gpio
|= LMC_GEP_DATA
; /* Assume it's 1 */
581 sc
->lmc_gpio
&= ~LMC_GEP_CLK
; /* Clock to zero */
582 sc
->lmc_gpio
|= LMC_GEP_MODE
;
583 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
586 sc
->lmc_gpio
|= LMC_GEP_CLK
; /* Put the clack back to one */
587 sc
->lmc_gpio
|= LMC_GEP_MODE
;
588 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
591 if((LMC_CSR_READ(sc
, csr_gp
) & LMC_GEP_INIT
) == 0){
592 printk(KERN_WARNING
"%s: Reprogramming FAILED. Needs to be reprogrammed. (corrupted data)\n", dev
->name
);
594 else if((LMC_CSR_READ(sc
, csr_gp
) & LMC_GEP_DP
) == 0){
595 printk(KERN_WARNING
"%s: Reprogramming FAILED. Needs to be reprogrammed. (done)\n", dev
->name
);
598 printk(KERN_DEBUG
"%s: Done reprogramming Xilinx, %d bits, good luck!\n", dev
->name
, pos
);
601 lmc_gpio_mkinput(sc
, 0xff);
603 sc
->lmc_miireg16
|= LMC_MII16_FIFO_RESET
;
604 lmc_mii_writereg(sc
, 0, 16, sc
->lmc_miireg16
);
606 sc
->lmc_miireg16
&= ~LMC_MII16_FIFO_RESET
;
607 lmc_mii_writereg(sc
, 0, 16, sc
->lmc_miireg16
);
608 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
621 netif_wake_queue(dev
);
627 /* If we don't know what to do, give the protocol a shot. */
628 ret
= lmc_proto_ioctl (sc
, ifr
, cmd
);
632 lmc_trace(dev
, "lmc_ioctl out");
638 /* the watchdog process that cruises around */
639 static void lmc_watchdog (unsigned long data
) /*fold00*/
641 struct net_device
*dev
= (struct net_device
*)data
;
642 lmc_softc_t
*sc
= dev_to_sc(dev
);
647 lmc_trace(dev
, "lmc_watchdog in");
649 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
651 if(sc
->check
!= 0xBEAFCAFE){
652 printk("LMC: Corrupt net_device struct, breaking out\n");
653 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
658 /* Make sure the tx jabber and rx watchdog are off,
659 * and the transmit and receive processes are running.
662 LMC_CSR_WRITE (sc
, csr_15
, 0x00000011);
663 sc
->lmc_cmdmode
|= TULIP_CMD_TXRUN
| TULIP_CMD_RXRUN
;
664 LMC_CSR_WRITE (sc
, csr_command
, sc
->lmc_cmdmode
);
669 LMC_EVENT_LOG(LMC_EVENT_WATCHDOG
, LMC_CSR_READ (sc
, csr_status
), lmc_mii_readreg (sc
, 0, 16));
671 /* --- begin time out check -----------------------------------
672 * check for a transmit interrupt timeout
673 * Has the packet xmt vs xmt serviced threshold been exceeded */
674 if (sc
->lmc_taint_tx
== sc
->lastlmc_taint_tx
&&
675 sc
->lmc_device
->stats
.tx_packets
> sc
->lasttx_packets
&&
676 sc
->tx_TimeoutInd
== 0)
679 /* wait for the watchdog to come around again */
680 sc
->tx_TimeoutInd
= 1;
682 else if (sc
->lmc_taint_tx
== sc
->lastlmc_taint_tx
&&
683 sc
->lmc_device
->stats
.tx_packets
> sc
->lasttx_packets
&&
687 LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO
, LMC_CSR_READ (sc
, csr_status
), 0);
689 sc
->tx_TimeoutDisplay
= 1;
690 sc
->extra_stats
.tx_TimeoutCnt
++;
692 /* DEC chip is stuck, hit it with a RESET!!!! */
693 lmc_running_reset (dev
);
696 /* look at receive & transmit process state to make sure they are running */
697 LMC_EVENT_LOG(LMC_EVENT_RESET1
, LMC_CSR_READ (sc
, csr_status
), 0);
699 /* look at: DSR - 02 for Reg 16
705 LMC_EVENT_LOG(LMC_EVENT_RESET2
, lmc_mii_readreg (sc
, 0, 16), lmc_mii_readreg (sc
, 0, 17));
707 /* reset the transmit timeout detection flag */
708 sc
->tx_TimeoutInd
= 0;
709 sc
->lastlmc_taint_tx
= sc
->lmc_taint_tx
;
710 sc
->lasttx_packets
= sc
->lmc_device
->stats
.tx_packets
;
712 sc
->tx_TimeoutInd
= 0;
713 sc
->lastlmc_taint_tx
= sc
->lmc_taint_tx
;
714 sc
->lasttx_packets
= sc
->lmc_device
->stats
.tx_packets
;
717 /* --- end time out check ----------------------------------- */
720 link_status
= sc
->lmc_media
->get_link_status (sc
);
723 * hardware level link lost, but the interface is marked as up.
726 if ((link_status
== 0) && (sc
->last_link_status
!= 0)) {
727 printk(KERN_WARNING
"%s: hardware/physical link down\n", dev
->name
);
728 sc
->last_link_status
= 0;
729 /* lmc_reset (sc); Why reset??? The link can go down ok */
731 /* Inform the world that link has been lost */
732 netif_carrier_off(dev
);
736 * hardware link is up, but the interface is marked as down.
737 * Bring it back up again.
739 if (link_status
!= 0 && sc
->last_link_status
== 0) {
740 printk(KERN_WARNING
"%s: hardware/physical link up\n", dev
->name
);
741 sc
->last_link_status
= 1;
742 /* lmc_reset (sc); Again why reset??? */
744 netif_carrier_on(dev
);
747 /* Call media specific watchdog functions */
748 sc
->lmc_media
->watchdog(sc
);
751 * Poke the transmitter to make sure it
752 * never stops, even if we run out of mem
754 LMC_CSR_WRITE(sc
, csr_rxpoll
, 0);
757 * Check for code that failed
758 * and try and fix it as appropriate
760 if(sc
->failed_ring
== 1){
762 * Failed to setup the recv/xmit rin
768 if(sc
->failed_recv_alloc
== 1){
770 * We failed to alloc mem in the
771 * interrupt handler, go through the rings
774 sc
->failed_recv_alloc
= 0;
780 * remember the timer value
784 ticks
= LMC_CSR_READ (sc
, csr_gp_timer
);
785 LMC_CSR_WRITE (sc
, csr_gp_timer
, 0xffffffffUL
);
786 sc
->ictl
.ticks
= 0x0000ffff - (ticks
& 0x0000ffff);
789 * restart this timer.
791 sc
->timer
.expires
= jiffies
+ (HZ
);
792 add_timer (&sc
->timer
);
794 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
796 lmc_trace(dev
, "lmc_watchdog out");
800 static int lmc_attach(struct net_device
*dev
, unsigned short encoding
,
801 unsigned short parity
)
803 if (encoding
== ENCODING_NRZ
&& parity
== PARITY_CRC16_PR1_CCITT
)
808 static const struct net_device_ops lmc_ops
= {
809 .ndo_open
= lmc_open
,
810 .ndo_stop
= lmc_close
,
811 .ndo_change_mtu
= hdlc_change_mtu
,
812 .ndo_start_xmit
= hdlc_start_xmit
,
813 .ndo_do_ioctl
= lmc_ioctl
,
814 .ndo_tx_timeout
= lmc_driver_timeout
,
815 .ndo_get_stats
= lmc_get_stats
,
818 static int lmc_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
821 struct net_device
*dev
;
825 static int cards_found
;
827 /* lmc_trace(dev, "lmc_init_one in"); */
829 err
= pcim_enable_device(pdev
);
831 printk(KERN_ERR
"lmc: pci enable failed: %d\n", err
);
835 err
= pci_request_regions(pdev
, "lmc");
837 printk(KERN_ERR
"lmc: pci_request_region failed\n");
842 * Allocate our own device structure
844 sc
= devm_kzalloc(&pdev
->dev
, sizeof(lmc_softc_t
), GFP_KERNEL
);
848 dev
= alloc_hdlcdev(sc
);
850 printk(KERN_ERR
"lmc:alloc_netdev for device failed\n");
855 dev
->type
= ARPHRD_HDLC
;
856 dev_to_hdlc(dev
)->xmit
= lmc_start_xmit
;
857 dev_to_hdlc(dev
)->attach
= lmc_attach
;
858 dev
->netdev_ops
= &lmc_ops
;
859 dev
->watchdog_timeo
= HZ
; /* 1 second */
860 dev
->tx_queue_len
= 100;
861 sc
->lmc_device
= dev
;
862 sc
->name
= dev
->name
;
863 sc
->if_type
= LMC_PPP
;
864 sc
->check
= 0xBEAFCAFE;
865 dev
->base_addr
= pci_resource_start(pdev
, 0);
866 dev
->irq
= pdev
->irq
;
867 pci_set_drvdata(pdev
, dev
);
868 SET_NETDEV_DEV(dev
, &pdev
->dev
);
871 * This will get the protocol layer ready and do any 1 time init's
872 * Must have a valid sc and dev structure
874 lmc_proto_attach(sc
);
876 /* Init the spin lock so can call it latter */
878 spin_lock_init(&sc
->lmc_lock
);
879 pci_set_master(pdev
);
881 printk(KERN_INFO
"%s: detected at %lx, irq %d\n", dev
->name
,
882 dev
->base_addr
, dev
->irq
);
884 err
= register_hdlc_device(dev
);
886 printk(KERN_ERR
"%s: register_netdev failed.\n", dev
->name
);
891 sc
->lmc_cardtype
= LMC_CARDTYPE_UNKNOWN
;
892 sc
->lmc_timing
= LMC_CTL_CLOCK_SOURCE_EXT
;
896 * Check either the subvendor or the subdevice, some systems reverse
897 * the setting in the bois, seems to be version and arch dependent?
898 * Fix the error, exchange the two values
900 if ((subdevice
= pdev
->subsystem_device
) == PCI_VENDOR_ID_LMC
)
901 subdevice
= pdev
->subsystem_vendor
;
904 case PCI_DEVICE_ID_LMC_HSSI
:
905 printk(KERN_INFO
"%s: LMC HSSI\n", dev
->name
);
906 sc
->lmc_cardtype
= LMC_CARDTYPE_HSSI
;
907 sc
->lmc_media
= &lmc_hssi_media
;
909 case PCI_DEVICE_ID_LMC_DS3
:
910 printk(KERN_INFO
"%s: LMC DS3\n", dev
->name
);
911 sc
->lmc_cardtype
= LMC_CARDTYPE_DS3
;
912 sc
->lmc_media
= &lmc_ds3_media
;
914 case PCI_DEVICE_ID_LMC_SSI
:
915 printk(KERN_INFO
"%s: LMC SSI\n", dev
->name
);
916 sc
->lmc_cardtype
= LMC_CARDTYPE_SSI
;
917 sc
->lmc_media
= &lmc_ssi_media
;
919 case PCI_DEVICE_ID_LMC_T1
:
920 printk(KERN_INFO
"%s: LMC T1\n", dev
->name
);
921 sc
->lmc_cardtype
= LMC_CARDTYPE_T1
;
922 sc
->lmc_media
= &lmc_t1_media
;
925 printk(KERN_WARNING
"%s: LMC UNKNOWN CARD!\n", dev
->name
);
929 lmc_initcsrs (sc
, dev
->base_addr
, 8);
931 lmc_gpio_mkinput (sc
, 0xff);
932 sc
->lmc_gpio
= 0; /* drive no signals yet */
934 sc
->lmc_media
->defaults (sc
);
936 sc
->lmc_media
->set_link_status (sc
, LMC_LINK_UP
);
938 /* verify that the PCI Sub System ID matches the Adapter Model number
939 * from the MII register
941 AdapModelNum
= (lmc_mii_readreg (sc
, 0, 3) & 0x3f0) >> 4;
943 if ((AdapModelNum
!= LMC_ADAP_T1
|| /* detect LMC1200 */
944 subdevice
!= PCI_DEVICE_ID_LMC_T1
) &&
945 (AdapModelNum
!= LMC_ADAP_SSI
|| /* detect LMC1000 */
946 subdevice
!= PCI_DEVICE_ID_LMC_SSI
) &&
947 (AdapModelNum
!= LMC_ADAP_DS3
|| /* detect LMC5245 */
948 subdevice
!= PCI_DEVICE_ID_LMC_DS3
) &&
949 (AdapModelNum
!= LMC_ADAP_HSSI
|| /* detect LMC5200 */
950 subdevice
!= PCI_DEVICE_ID_LMC_HSSI
))
951 printk(KERN_WARNING
"%s: Model number (%d) miscompare for PCI"
952 " Subsystem ID = 0x%04x\n",
953 dev
->name
, AdapModelNum
, subdevice
);
958 LMC_CSR_WRITE (sc
, csr_gp_timer
, 0xFFFFFFFFUL
);
960 sc
->board_idx
= cards_found
++;
961 sc
->extra_stats
.check
= STATCHECK
;
962 sc
->extra_stats
.version_size
= (DRIVER_VERSION
<< 16) +
963 sizeof(sc
->lmc_device
->stats
) + sizeof(sc
->extra_stats
);
964 sc
->extra_stats
.lmc_cardtype
= sc
->lmc_cardtype
;
967 sc
->last_link_status
= 0;
969 lmc_trace(dev
, "lmc_init_one out");
974 * Called from pci when removing module.
976 static void lmc_remove_one(struct pci_dev
*pdev
)
978 struct net_device
*dev
= pci_get_drvdata(pdev
);
981 printk(KERN_DEBUG
"%s: removing...\n", dev
->name
);
982 unregister_hdlc_device(dev
);
987 /* After this is called, packets can be sent.
988 * Does not initialize the addresses
990 static int lmc_open(struct net_device
*dev
)
992 lmc_softc_t
*sc
= dev_to_sc(dev
);
995 lmc_trace(dev
, "lmc_open in");
997 lmc_led_on(sc
, LMC_DS3_LED0
);
1002 LMC_EVENT_LOG(LMC_EVENT_RESET1
, LMC_CSR_READ(sc
, csr_status
), 0);
1003 LMC_EVENT_LOG(LMC_EVENT_RESET2
, lmc_mii_readreg(sc
, 0, 16),
1004 lmc_mii_readreg(sc
, 0, 17));
1007 lmc_trace(dev
, "lmc_open lmc_ok out");
1013 /* Since we have to use PCI bus, this should work on x86,alpha,ppc */
1014 if (request_irq (dev
->irq
, lmc_interrupt
, IRQF_SHARED
, dev
->name
, dev
)){
1015 printk(KERN_WARNING
"%s: could not get irq: %d\n", dev
->name
, dev
->irq
);
1016 lmc_trace(dev
, "lmc_open irq failed out");
1021 /* Assert Terminal Active */
1022 sc
->lmc_miireg16
|= LMC_MII16_LED_ALL
;
1023 sc
->lmc_media
->set_link_status (sc
, LMC_LINK_UP
);
1026 * reset to last state.
1028 sc
->lmc_media
->set_status (sc
, NULL
);
1030 /* setup default bits to be used in tulip_desc_t transmit descriptor
1032 sc
->TxDescriptControlInit
= (
1033 LMC_TDES_INTERRUPT_ON_COMPLETION
1034 | LMC_TDES_FIRST_SEGMENT
1035 | LMC_TDES_LAST_SEGMENT
1036 | LMC_TDES_SECOND_ADDR_CHAINED
1037 | LMC_TDES_DISABLE_PADDING
1040 if (sc
->ictl
.crc_length
== LMC_CTL_CRC_LENGTH_16
) {
1041 /* disable 32 bit CRC generated by ASIC */
1042 sc
->TxDescriptControlInit
|= LMC_TDES_ADD_CRC_DISABLE
;
1044 sc
->lmc_media
->set_crc_length(sc
, sc
->ictl
.crc_length
);
1045 /* Acknoledge the Terminal Active and light LEDs */
1047 /* dev->flags |= IFF_UP; */
1049 if ((err
= lmc_proto_open(sc
)) != 0)
1052 netif_start_queue(dev
);
1053 sc
->extra_stats
.tx_tbusy0
++;
1056 * select what interrupts we want to get
1058 sc
->lmc_intrmask
= 0;
1059 /* Should be using the default interrupt mask defined in the .h file. */
1060 sc
->lmc_intrmask
|= (TULIP_STS_NORMALINTR
1063 | TULIP_STS_ABNRMLINTR
1064 | TULIP_STS_SYSERROR
1065 | TULIP_STS_TXSTOPPED
1066 | TULIP_STS_TXUNDERFLOW
1067 | TULIP_STS_RXSTOPPED
1070 LMC_CSR_WRITE (sc
, csr_intr
, sc
->lmc_intrmask
);
1072 sc
->lmc_cmdmode
|= TULIP_CMD_TXRUN
;
1073 sc
->lmc_cmdmode
|= TULIP_CMD_RXRUN
;
1074 LMC_CSR_WRITE (sc
, csr_command
, sc
->lmc_cmdmode
);
1076 sc
->lmc_ok
= 1; /* Run watchdog */
1079 * Set the if up now - pfb
1082 sc
->last_link_status
= 1;
1085 * Setup a timer for the watchdog on probe, and start it running.
1086 * Since lmc_ok == 0, it will be a NOP for now.
1088 init_timer (&sc
->timer
);
1089 sc
->timer
.expires
= jiffies
+ HZ
;
1090 sc
->timer
.data
= (unsigned long) dev
;
1091 sc
->timer
.function
= lmc_watchdog
;
1092 add_timer (&sc
->timer
);
1094 lmc_trace(dev
, "lmc_open out");
1099 /* Total reset to compensate for the AdTran DSU doing bad things
1103 static void lmc_running_reset (struct net_device
*dev
) /*fold00*/
1105 lmc_softc_t
*sc
= dev_to_sc(dev
);
1107 lmc_trace(dev
, "lmc_running_reset in");
1109 /* stop interrupts */
1110 /* Clear the interrupt mask */
1111 LMC_CSR_WRITE (sc
, csr_intr
, 0x00000000);
1116 /* sc->lmc_miireg16 |= LMC_MII16_LED_ALL; */
1117 sc
->lmc_media
->set_link_status (sc
, 1);
1118 sc
->lmc_media
->set_status (sc
, NULL
);
1120 netif_wake_queue(dev
);
1123 sc
->extra_stats
.tx_tbusy0
++;
1125 sc
->lmc_intrmask
= TULIP_DEFAULT_INTR_MASK
;
1126 LMC_CSR_WRITE (sc
, csr_intr
, sc
->lmc_intrmask
);
1128 sc
->lmc_cmdmode
|= (TULIP_CMD_TXRUN
| TULIP_CMD_RXRUN
);
1129 LMC_CSR_WRITE (sc
, csr_command
, sc
->lmc_cmdmode
);
1131 lmc_trace(dev
, "lmc_runnin_reset_out");
1135 /* This is what is called when you ifconfig down a device.
1136 * This disables the timer for the watchdog and keepalives,
1137 * and disables the irq for dev.
1139 static int lmc_close(struct net_device
*dev
)
1141 /* not calling release_region() as we should */
1142 lmc_softc_t
*sc
= dev_to_sc(dev
);
1144 lmc_trace(dev
, "lmc_close in");
1147 sc
->lmc_media
->set_link_status (sc
, 0);
1148 del_timer (&sc
->timer
);
1149 lmc_proto_close(sc
);
1152 lmc_trace(dev
, "lmc_close out");
1157 /* Ends the transfer of packets */
1158 /* When the interface goes down, this is called */
1159 static int lmc_ifdown (struct net_device
*dev
) /*fold00*/
1161 lmc_softc_t
*sc
= dev_to_sc(dev
);
1165 lmc_trace(dev
, "lmc_ifdown in");
1167 /* Don't let anything else go on right now */
1169 netif_stop_queue(dev
);
1170 sc
->extra_stats
.tx_tbusy1
++;
1172 /* stop interrupts */
1173 /* Clear the interrupt mask */
1174 LMC_CSR_WRITE (sc
, csr_intr
, 0x00000000);
1176 /* Stop Tx and Rx on the chip */
1177 csr6
= LMC_CSR_READ (sc
, csr_command
);
1178 csr6
&= ~LMC_DEC_ST
; /* Turn off the Transmission bit */
1179 csr6
&= ~LMC_DEC_SR
; /* Turn off the Receive bit */
1180 LMC_CSR_WRITE (sc
, csr_command
, csr6
);
1182 sc
->lmc_device
->stats
.rx_missed_errors
+=
1183 LMC_CSR_READ(sc
, csr_missed_frames
) & 0xffff;
1185 /* release the interrupt */
1186 if(sc
->got_irq
== 1){
1187 free_irq (dev
->irq
, dev
);
1191 /* free skbuffs in the Rx queue */
1192 for (i
= 0; i
< LMC_RXDESCS
; i
++)
1194 struct sk_buff
*skb
= sc
->lmc_rxq
[i
];
1195 sc
->lmc_rxq
[i
] = NULL
;
1196 sc
->lmc_rxring
[i
].status
= 0;
1197 sc
->lmc_rxring
[i
].length
= 0;
1198 sc
->lmc_rxring
[i
].buffer1
= 0xDEADBEEF;
1201 sc
->lmc_rxq
[i
] = NULL
;
1204 for (i
= 0; i
< LMC_TXDESCS
; i
++)
1206 if (sc
->lmc_txq
[i
] != NULL
)
1207 dev_kfree_skb(sc
->lmc_txq
[i
]);
1208 sc
->lmc_txq
[i
] = NULL
;
1211 lmc_led_off (sc
, LMC_MII16_LED_ALL
);
1213 netif_wake_queue(dev
);
1214 sc
->extra_stats
.tx_tbusy0
++;
1216 lmc_trace(dev
, "lmc_ifdown out");
1221 /* Interrupt handling routine. This will take an incoming packet, or clean
1222 * up after a trasmit.
1224 static irqreturn_t
lmc_interrupt (int irq
, void *dev_instance
) /*fold00*/
1226 struct net_device
*dev
= (struct net_device
*) dev_instance
;
1227 lmc_softc_t
*sc
= dev_to_sc(dev
);
1233 int max_work
= LMC_RXDESCS
;
1236 lmc_trace(dev
, "lmc_interrupt in");
1238 spin_lock(&sc
->lmc_lock
);
1241 * Read the csr to find what interrupts we have (if any)
1243 csr
= LMC_CSR_READ (sc
, csr_status
);
1246 * Make sure this is our interrupt
1248 if ( ! (csr
& sc
->lmc_intrmask
)) {
1249 goto lmc_int_fail_out
;
1254 /* always go through this loop at least once */
1255 while (csr
& sc
->lmc_intrmask
) {
1259 * Clear interrupt bits, we handle all case below
1261 LMC_CSR_WRITE (sc
, csr_status
, csr
);
1265 * - Transmit process timed out CSR5<1>
1266 * - Transmit jabber timeout CSR5<3>
1267 * - Transmit underflow CSR5<5>
1268 * - Transmit Receiver buffer unavailable CSR5<7>
1269 * - Receive process stopped CSR5<8>
1270 * - Receive watchdog timeout CSR5<9>
1271 * - Early transmit interrupt CSR5<10>
1273 * Is this really right? Should we do a running reset for jabber?
1274 * (being a WAN card and all)
1276 if (csr
& TULIP_STS_ABNRMLINTR
){
1277 lmc_running_reset (dev
);
1281 if (csr
& TULIP_STS_RXINTR
){
1282 lmc_trace(dev
, "rx interrupt");
1286 if (csr
& (TULIP_STS_TXINTR
| TULIP_STS_TXNOBUF
| TULIP_STS_TXSTOPPED
)) {
1289 /* reset the transmit timeout detection flag -baz */
1290 sc
->extra_stats
.tx_NoCompleteCnt
= 0;
1292 badtx
= sc
->lmc_taint_tx
;
1293 i
= badtx
% LMC_TXDESCS
;
1295 while ((badtx
< sc
->lmc_next_tx
)) {
1296 stat
= sc
->lmc_txring
[i
].status
;
1298 LMC_EVENT_LOG (LMC_EVENT_XMTINT
, stat
,
1299 sc
->lmc_txring
[i
].length
);
1301 * If bit 31 is 1 the tulip owns it break out of the loop
1303 if (stat
& 0x80000000)
1306 n_compl
++ ; /* i.e., have an empty slot in ring */
1308 * If we have no skbuff or have cleared it
1309 * Already continue to the next buffer
1311 if (sc
->lmc_txq
[i
] == NULL
)
1315 * Check the total error summary to look for any errors
1317 if (stat
& 0x8000) {
1318 sc
->lmc_device
->stats
.tx_errors
++;
1320 sc
->lmc_device
->stats
.tx_aborted_errors
++;
1322 sc
->lmc_device
->stats
.tx_carrier_errors
++;
1324 sc
->lmc_device
->stats
.tx_window_errors
++;
1326 sc
->lmc_device
->stats
.tx_fifo_errors
++;
1328 sc
->lmc_device
->stats
.tx_bytes
+= sc
->lmc_txring
[i
].length
& 0x7ff;
1330 sc
->lmc_device
->stats
.tx_packets
++;
1333 // dev_kfree_skb(sc->lmc_txq[i]);
1334 dev_kfree_skb_irq(sc
->lmc_txq
[i
]);
1335 sc
->lmc_txq
[i
] = NULL
;
1338 i
= badtx
% LMC_TXDESCS
;
1341 if (sc
->lmc_next_tx
- badtx
> LMC_TXDESCS
)
1343 printk ("%s: out of sync pointer\n", dev
->name
);
1344 badtx
+= LMC_TXDESCS
;
1346 LMC_EVENT_LOG(LMC_EVENT_TBUSY0
, n_compl
, 0);
1348 netif_wake_queue(dev
);
1349 sc
->extra_stats
.tx_tbusy0
++;
1353 sc
->extra_stats
.dirtyTx
= badtx
;
1354 sc
->extra_stats
.lmc_next_tx
= sc
->lmc_next_tx
;
1355 sc
->extra_stats
.lmc_txfull
= sc
->lmc_txfull
;
1357 sc
->lmc_taint_tx
= badtx
;
1360 * Why was there a break here???
1362 } /* end handle transmit interrupt */
1364 if (csr
& TULIP_STS_SYSERROR
) {
1366 printk (KERN_WARNING
"%s: system bus error csr: %#8.8x\n", dev
->name
, csr
);
1367 error
= csr
>>23 & 0x7;
1370 printk(KERN_WARNING
"%s: Parity Fault (bad)\n", dev
->name
);
1373 printk(KERN_WARNING
"%s: Master Abort (naughty)\n", dev
->name
);
1376 printk(KERN_WARNING
"%s: Target Abort (not so naughty)\n", dev
->name
);
1379 printk(KERN_WARNING
"%s: This bus error code was supposed to be reserved!\n", dev
->name
);
1383 LMC_EVENT_LOG(LMC_EVENT_RESET1
, LMC_CSR_READ (sc
, csr_status
), 0);
1384 LMC_EVENT_LOG(LMC_EVENT_RESET2
,
1385 lmc_mii_readreg (sc
, 0, 16),
1386 lmc_mii_readreg (sc
, 0, 17));
1395 * Get current csr status to make sure
1396 * we've cleared all interrupts
1398 csr
= LMC_CSR_READ (sc
, csr_status
);
1399 } /* end interrupt loop */
1400 LMC_EVENT_LOG(LMC_EVENT_INT
, firstcsr
, csr
);
1404 spin_unlock(&sc
->lmc_lock
);
1406 lmc_trace(dev
, "lmc_interrupt out");
1407 return IRQ_RETVAL(handled
);
1410 static netdev_tx_t
lmc_start_xmit(struct sk_buff
*skb
,
1411 struct net_device
*dev
)
1413 lmc_softc_t
*sc
= dev_to_sc(dev
);
1416 unsigned long flags
;
1418 lmc_trace(dev
, "lmc_start_xmit in");
1420 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
1422 /* normal path, tbusy known to be zero */
1424 entry
= sc
->lmc_next_tx
% LMC_TXDESCS
;
1426 sc
->lmc_txq
[entry
] = skb
;
1427 sc
->lmc_txring
[entry
].buffer1
= virt_to_bus (skb
->data
);
1429 LMC_CONSOLE_LOG("xmit", skb
->data
, skb
->len
);
1432 /* If the queue is less than half full, don't interrupt */
1433 if (sc
->lmc_next_tx
- sc
->lmc_taint_tx
< LMC_TXDESCS
/ 2)
1435 /* Do not interrupt on completion of this packet */
1437 netif_wake_queue(dev
);
1439 else if (sc
->lmc_next_tx
- sc
->lmc_taint_tx
== LMC_TXDESCS
/ 2)
1441 /* This generates an interrupt on completion of this packet */
1443 netif_wake_queue(dev
);
1445 else if (sc
->lmc_next_tx
- sc
->lmc_taint_tx
< LMC_TXDESCS
- 1)
1447 /* Do not interrupt on completion of this packet */
1449 netif_wake_queue(dev
);
1453 /* This generates an interrupt on completion of this packet */
1456 netif_stop_queue(dev
);
1459 flag
= LMC_TDES_INTERRUPT_ON_COMPLETION
;
1461 if (sc
->lmc_next_tx
- sc
->lmc_taint_tx
>= LMC_TXDESCS
- 1)
1462 { /* ring full, go busy */
1464 netif_stop_queue(dev
);
1465 sc
->extra_stats
.tx_tbusy1
++;
1466 LMC_EVENT_LOG(LMC_EVENT_TBUSY1
, entry
, 0);
1471 if (entry
== LMC_TXDESCS
- 1) /* last descriptor in ring */
1472 flag
|= LMC_TDES_END_OF_RING
; /* flag as such for Tulip */
1474 /* don't pad small packets either */
1475 flag
= sc
->lmc_txring
[entry
].length
= (skb
->len
) | flag
|
1476 sc
->TxDescriptControlInit
;
1478 /* set the transmit timeout flag to be checked in
1479 * the watchdog timer handler. -baz
1482 sc
->extra_stats
.tx_NoCompleteCnt
++;
1485 /* give ownership to the chip */
1486 LMC_EVENT_LOG(LMC_EVENT_XMT
, flag
, entry
);
1487 sc
->lmc_txring
[entry
].status
= 0x80000000;
1490 LMC_CSR_WRITE (sc
, csr_txpoll
, 0);
1492 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
1494 lmc_trace(dev
, "lmc_start_xmit_out");
1495 return NETDEV_TX_OK
;
1499 static int lmc_rx(struct net_device
*dev
)
1501 lmc_softc_t
*sc
= dev_to_sc(dev
);
1503 int rx_work_limit
= LMC_RXDESCS
;
1504 unsigned int next_rx
;
1505 int rxIntLoopCnt
; /* debug -baz */
1506 int localLengthErrCnt
= 0;
1508 struct sk_buff
*skb
, *nsb
;
1511 lmc_trace(dev
, "lmc_rx in");
1513 lmc_led_on(sc
, LMC_DS3_LED3
);
1515 rxIntLoopCnt
= 0; /* debug -baz */
1517 i
= sc
->lmc_next_rx
% LMC_RXDESCS
;
1518 next_rx
= sc
->lmc_next_rx
;
1520 while (((stat
= sc
->lmc_rxring
[i
].status
) & LMC_RDES_OWN_BIT
) != DESC_OWNED_BY_DC21X4
)
1522 rxIntLoopCnt
++; /* debug -baz */
1523 len
= ((stat
& LMC_RDES_FRAME_LENGTH
) >> RDES_FRAME_LENGTH_BIT_NUMBER
);
1524 if ((stat
& 0x0300) != 0x0300) { /* Check first segment and last segment */
1525 if ((stat
& 0x0000ffff) != 0x7fff) {
1526 /* Oversized frame */
1527 sc
->lmc_device
->stats
.rx_length_errors
++;
1532 if (stat
& 0x00000008) { /* Catch a dribbling bit error */
1533 sc
->lmc_device
->stats
.rx_errors
++;
1534 sc
->lmc_device
->stats
.rx_frame_errors
++;
1539 if (stat
& 0x00000004) { /* Catch a CRC error by the Xilinx */
1540 sc
->lmc_device
->stats
.rx_errors
++;
1541 sc
->lmc_device
->stats
.rx_crc_errors
++;
1545 if (len
> LMC_PKT_BUF_SZ
) {
1546 sc
->lmc_device
->stats
.rx_length_errors
++;
1547 localLengthErrCnt
++;
1551 if (len
< sc
->lmc_crcSize
+ 2) {
1552 sc
->lmc_device
->stats
.rx_length_errors
++;
1553 sc
->extra_stats
.rx_SmallPktCnt
++;
1554 localLengthErrCnt
++;
1558 if(stat
& 0x00004000){
1559 printk(KERN_WARNING
"%s: Receiver descriptor error, receiver out of sync?\n", dev
->name
);
1562 len
-= sc
->lmc_crcSize
;
1564 skb
= sc
->lmc_rxq
[i
];
1567 * We ran out of memory at some point
1568 * just allocate an skb buff and continue.
1572 nsb
= dev_alloc_skb (LMC_PKT_BUF_SZ
+ 2);
1574 sc
->lmc_rxq
[i
] = nsb
;
1576 sc
->lmc_rxring
[i
].buffer1
= virt_to_bus(skb_tail_pointer(nsb
));
1578 sc
->failed_recv_alloc
= 1;
1582 sc
->lmc_device
->stats
.rx_packets
++;
1583 sc
->lmc_device
->stats
.rx_bytes
+= len
;
1585 LMC_CONSOLE_LOG("recv", skb
->data
, len
);
1588 * I'm not sure of the sanity of this
1589 * Packets could be arriving at a constant
1590 * 44.210mbits/sec and we're going to copy
1591 * them into a new buffer??
1594 if(len
> (LMC_MTU
- (LMC_MTU
>>2))){ /* len > LMC_MTU * 0.75 */
1596 * If it's a large packet don't copy it just hand it up
1600 sc
->lmc_rxq
[i
] = NULL
;
1601 sc
->lmc_rxring
[i
].buffer1
= 0x0;
1604 skb
->protocol
= lmc_proto_type(sc
, skb
);
1605 skb_reset_mac_header(skb
);
1606 /* skb_reset_network_header(skb); */
1608 lmc_proto_netif(sc
, skb
);
1611 * This skb will be destroyed by the upper layers, make a new one
1613 nsb
= dev_alloc_skb (LMC_PKT_BUF_SZ
+ 2);
1615 sc
->lmc_rxq
[i
] = nsb
;
1617 sc
->lmc_rxring
[i
].buffer1
= virt_to_bus(skb_tail_pointer(nsb
));
1618 /* Transferred to 21140 below */
1622 * We've run out of memory, stop trying to allocate
1623 * memory and exit the interrupt handler
1625 * The chip may run out of receivers and stop
1626 * in which care we'll try to allocate the buffer
1627 * again. (once a second)
1629 sc
->extra_stats
.rx_BuffAllocErr
++;
1630 LMC_EVENT_LOG(LMC_EVENT_RCVINT
, stat
, len
);
1631 sc
->failed_recv_alloc
= 1;
1632 goto skip_out_of_mem
;
1636 nsb
= dev_alloc_skb(len
);
1638 goto give_it_anyways
;
1640 skb_copy_from_linear_data(skb
, skb_put(nsb
, len
), len
);
1642 nsb
->protocol
= lmc_proto_type(sc
, nsb
);
1643 skb_reset_mac_header(nsb
);
1644 /* skb_reset_network_header(nsb); */
1646 lmc_proto_netif(sc
, nsb
);
1650 LMC_EVENT_LOG(LMC_EVENT_RCVINT
, stat
, len
);
1651 sc
->lmc_rxring
[i
].status
= DESC_OWNED_BY_DC21X4
;
1654 i
= sc
->lmc_next_rx
% LMC_RXDESCS
;
1656 if (rx_work_limit
< 0)
1660 /* detect condition for LMC1000 where DSU cable attaches and fills
1661 * descriptors with bogus packets
1663 if (localLengthErrCnt > LMC_RXDESCS - 3) {
1664 sc->extra_stats.rx_BadPktSurgeCnt++;
1665 LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, localLengthErrCnt,
1666 sc->extra_stats.rx_BadPktSurgeCnt);
1669 /* save max count of receive descriptors serviced */
1670 if (rxIntLoopCnt
> sc
->extra_stats
.rxIntLoopCnt
)
1671 sc
->extra_stats
.rxIntLoopCnt
= rxIntLoopCnt
; /* debug -baz */
1674 if (rxIntLoopCnt
== 0)
1676 for (i
= 0; i
< LMC_RXDESCS
; i
++)
1678 if ((sc
->lmc_rxring
[i
].status
& LMC_RDES_OWN_BIT
)
1679 != DESC_OWNED_BY_DC21X4
)
1684 LMC_EVENT_LOG(LMC_EVENT_RCVEND
, rxIntLoopCnt
, 0);
1689 lmc_led_off(sc
, LMC_DS3_LED3
);
1693 lmc_trace(dev
, "lmc_rx out");
1698 static struct net_device_stats
*lmc_get_stats(struct net_device
*dev
)
1700 lmc_softc_t
*sc
= dev_to_sc(dev
);
1701 unsigned long flags
;
1703 lmc_trace(dev
, "lmc_get_stats in");
1705 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
1707 sc
->lmc_device
->stats
.rx_missed_errors
+= LMC_CSR_READ(sc
, csr_missed_frames
) & 0xffff;
1709 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
1711 lmc_trace(dev
, "lmc_get_stats out");
1713 return &sc
->lmc_device
->stats
;
1716 static struct pci_driver lmc_driver
= {
1718 .id_table
= lmc_pci_tbl
,
1719 .probe
= lmc_init_one
,
1720 .remove
= lmc_remove_one
,
1723 module_pci_driver(lmc_driver
);
1725 unsigned lmc_mii_readreg (lmc_softc_t
* const sc
, unsigned devaddr
, unsigned regno
) /*fold00*/
1728 int command
= (0xf6 << 10) | (devaddr
<< 5) | regno
;
1731 lmc_trace(sc
->lmc_device
, "lmc_mii_readreg in");
1735 lmc_trace(sc
->lmc_device
, "lmc_mii_readreg: done sync");
1737 for (i
= 15; i
>= 0; i
--)
1739 int dataval
= (command
& (1 << i
)) ? 0x20000 : 0;
1741 LMC_CSR_WRITE (sc
, csr_9
, dataval
);
1743 /* __SLOW_DOWN_IO; */
1744 LMC_CSR_WRITE (sc
, csr_9
, dataval
| 0x10000);
1746 /* __SLOW_DOWN_IO; */
1749 lmc_trace(sc
->lmc_device
, "lmc_mii_readreg: done1");
1751 for (i
= 19; i
> 0; i
--)
1753 LMC_CSR_WRITE (sc
, csr_9
, 0x40000);
1755 /* __SLOW_DOWN_IO; */
1756 retval
= (retval
<< 1) | ((LMC_CSR_READ (sc
, csr_9
) & 0x80000) ? 1 : 0);
1757 LMC_CSR_WRITE (sc
, csr_9
, 0x40000 | 0x10000);
1759 /* __SLOW_DOWN_IO; */
1762 lmc_trace(sc
->lmc_device
, "lmc_mii_readreg out");
1764 return (retval
>> 1) & 0xffff;
1767 void lmc_mii_writereg (lmc_softc_t
* const sc
, unsigned devaddr
, unsigned regno
, unsigned data
) /*fold00*/
1770 int command
= (0x5002 << 16) | (devaddr
<< 23) | (regno
<< 18) | data
;
1772 lmc_trace(sc
->lmc_device
, "lmc_mii_writereg in");
1781 if (command
& (1 << i
))
1786 LMC_CSR_WRITE (sc
, csr_9
, datav
);
1788 /* __SLOW_DOWN_IO; */
1789 LMC_CSR_WRITE (sc
, csr_9
, (datav
| 0x10000));
1791 /* __SLOW_DOWN_IO; */
1798 LMC_CSR_WRITE (sc
, csr_9
, 0x40000);
1800 /* __SLOW_DOWN_IO; */
1801 LMC_CSR_WRITE (sc
, csr_9
, 0x50000);
1803 /* __SLOW_DOWN_IO; */
1807 lmc_trace(sc
->lmc_device
, "lmc_mii_writereg out");
1810 static void lmc_softreset (lmc_softc_t
* const sc
) /*fold00*/
1814 lmc_trace(sc
->lmc_device
, "lmc_softreset in");
1816 /* Initialize the receive rings and buffers. */
1818 sc
->lmc_next_rx
= 0;
1819 sc
->lmc_next_tx
= 0;
1820 sc
->lmc_taint_rx
= 0;
1821 sc
->lmc_taint_tx
= 0;
1824 * Setup each one of the receiver buffers
1825 * allocate an skbuff for each one, setup the descriptor table
1826 * and point each buffer at the next one
1829 for (i
= 0; i
< LMC_RXDESCS
; i
++)
1831 struct sk_buff
*skb
;
1833 if (sc
->lmc_rxq
[i
] == NULL
)
1835 skb
= dev_alloc_skb (LMC_PKT_BUF_SZ
+ 2);
1837 printk(KERN_WARNING
"%s: Failed to allocate receiver ring, will try again\n", sc
->name
);
1838 sc
->failed_ring
= 1;
1842 sc
->lmc_rxq
[i
] = skb
;
1847 skb
= sc
->lmc_rxq
[i
];
1850 skb
->dev
= sc
->lmc_device
;
1852 /* owned by 21140 */
1853 sc
->lmc_rxring
[i
].status
= 0x80000000;
1855 /* used to be PKT_BUF_SZ now uses skb since we lose some to head room */
1856 sc
->lmc_rxring
[i
].length
= skb_tailroom(skb
);
1858 /* use to be tail which is dumb since you're thinking why write
1859 * to the end of the packj,et but since there's nothing there tail == data
1861 sc
->lmc_rxring
[i
].buffer1
= virt_to_bus (skb
->data
);
1863 /* This is fair since the structure is static and we have the next address */
1864 sc
->lmc_rxring
[i
].buffer2
= virt_to_bus (&sc
->lmc_rxring
[i
+ 1]);
1872 sc
->lmc_rxring
[i
- 1].length
|= 0x02000000; /* Set end of buffers flag */
1873 sc
->lmc_rxring
[i
- 1].buffer2
= virt_to_bus(&sc
->lmc_rxring
[0]); /* Point back to the start */
1875 LMC_CSR_WRITE (sc
, csr_rxlist
, virt_to_bus (sc
->lmc_rxring
)); /* write base address */
1877 /* Initialize the transmit rings and buffers */
1878 for (i
= 0; i
< LMC_TXDESCS
; i
++)
1880 if (sc
->lmc_txq
[i
] != NULL
){ /* have buffer */
1881 dev_kfree_skb(sc
->lmc_txq
[i
]); /* free it */
1882 sc
->lmc_device
->stats
.tx_dropped
++; /* We just dropped a packet */
1884 sc
->lmc_txq
[i
] = NULL
;
1885 sc
->lmc_txring
[i
].status
= 0x00000000;
1886 sc
->lmc_txring
[i
].buffer2
= virt_to_bus (&sc
->lmc_txring
[i
+ 1]);
1888 sc
->lmc_txring
[i
- 1].buffer2
= virt_to_bus (&sc
->lmc_txring
[0]);
1889 LMC_CSR_WRITE (sc
, csr_txlist
, virt_to_bus (sc
->lmc_txring
));
1891 lmc_trace(sc
->lmc_device
, "lmc_softreset out");
1894 void lmc_gpio_mkinput(lmc_softc_t
* const sc
, u32 bits
) /*fold00*/
1896 lmc_trace(sc
->lmc_device
, "lmc_gpio_mkinput in");
1897 sc
->lmc_gpio_io
&= ~bits
;
1898 LMC_CSR_WRITE(sc
, csr_gp
, TULIP_GP_PINSET
| (sc
->lmc_gpio_io
));
1899 lmc_trace(sc
->lmc_device
, "lmc_gpio_mkinput out");
1902 void lmc_gpio_mkoutput(lmc_softc_t
* const sc
, u32 bits
) /*fold00*/
1904 lmc_trace(sc
->lmc_device
, "lmc_gpio_mkoutput in");
1905 sc
->lmc_gpio_io
|= bits
;
1906 LMC_CSR_WRITE(sc
, csr_gp
, TULIP_GP_PINSET
| (sc
->lmc_gpio_io
));
1907 lmc_trace(sc
->lmc_device
, "lmc_gpio_mkoutput out");
1910 void lmc_led_on(lmc_softc_t
* const sc
, u32 led
) /*fold00*/
1912 lmc_trace(sc
->lmc_device
, "lmc_led_on in");
1913 if((~sc
->lmc_miireg16
) & led
){ /* Already on! */
1914 lmc_trace(sc
->lmc_device
, "lmc_led_on aon out");
1918 sc
->lmc_miireg16
&= ~led
;
1919 lmc_mii_writereg(sc
, 0, 16, sc
->lmc_miireg16
);
1920 lmc_trace(sc
->lmc_device
, "lmc_led_on out");
1923 void lmc_led_off(lmc_softc_t
* const sc
, u32 led
) /*fold00*/
1925 lmc_trace(sc
->lmc_device
, "lmc_led_off in");
1926 if(sc
->lmc_miireg16
& led
){ /* Already set don't do anything */
1927 lmc_trace(sc
->lmc_device
, "lmc_led_off aoff out");
1931 sc
->lmc_miireg16
|= led
;
1932 lmc_mii_writereg(sc
, 0, 16, sc
->lmc_miireg16
);
1933 lmc_trace(sc
->lmc_device
, "lmc_led_off out");
1936 static void lmc_reset(lmc_softc_t
* const sc
) /*fold00*/
1938 lmc_trace(sc
->lmc_device
, "lmc_reset in");
1939 sc
->lmc_miireg16
|= LMC_MII16_FIFO_RESET
;
1940 lmc_mii_writereg(sc
, 0, 16, sc
->lmc_miireg16
);
1942 sc
->lmc_miireg16
&= ~LMC_MII16_FIFO_RESET
;
1943 lmc_mii_writereg(sc
, 0, 16, sc
->lmc_miireg16
);
1946 * make some of the GPIO pins be outputs
1948 lmc_gpio_mkoutput(sc
, LMC_GEP_RESET
);
1951 * RESET low to force state reset. This also forces
1952 * the transmitter clock to be internal, but we expect to reset
1953 * that later anyway.
1955 sc
->lmc_gpio
&= ~(LMC_GEP_RESET
);
1956 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
1959 * hold for more than 10 microseconds
1964 * stop driving Xilinx-related signals
1966 lmc_gpio_mkinput(sc
, LMC_GEP_RESET
);
1969 * Call media specific init routine
1971 sc
->lmc_media
->init(sc
);
1973 sc
->extra_stats
.resetCount
++;
1974 lmc_trace(sc
->lmc_device
, "lmc_reset out");
1977 static void lmc_dec_reset(lmc_softc_t
* const sc
) /*fold00*/
1980 lmc_trace(sc
->lmc_device
, "lmc_dec_reset in");
1983 * disable all interrupts
1985 sc
->lmc_intrmask
= 0;
1986 LMC_CSR_WRITE(sc
, csr_intr
, sc
->lmc_intrmask
);
1989 * Reset the chip with a software reset command.
1990 * Wait 10 microseconds (actually 50 PCI cycles but at
1991 * 33MHz that comes to two microseconds but wait a
1992 * bit longer anyways)
1994 LMC_CSR_WRITE(sc
, csr_busmode
, TULIP_BUSMODE_SWRESET
);
1997 sc
->lmc_busmode
= LMC_CSR_READ(sc
, csr_busmode
);
1998 sc
->lmc_busmode
= 0x00100000;
1999 sc
->lmc_busmode
&= ~TULIP_BUSMODE_SWRESET
;
2000 LMC_CSR_WRITE(sc
, csr_busmode
, sc
->lmc_busmode
);
2002 sc
->lmc_cmdmode
= LMC_CSR_READ(sc
, csr_command
);
2006 * no ethernet address in frames we write
2007 * disable padding (txdesc, padding disable)
2008 * ignore runt frames (rdes0 bit 15)
2009 * no receiver watchdog or transmitter jabber timer
2010 * (csr15 bit 0,14 == 1)
2011 * if using 16-bit CRC, turn off CRC (trans desc, crc disable)
2014 sc
->lmc_cmdmode
|= ( TULIP_CMD_PROMISCUOUS
2015 | TULIP_CMD_FULLDUPLEX
2016 | TULIP_CMD_PASSBADPKT
2017 | TULIP_CMD_NOHEARTBEAT
2018 | TULIP_CMD_PORTSELECT
2019 | TULIP_CMD_RECEIVEALL
2020 | TULIP_CMD_MUSTBEONE
2022 sc
->lmc_cmdmode
&= ~( TULIP_CMD_OPERMODE
2023 | TULIP_CMD_THRESHOLDCTL
2024 | TULIP_CMD_STOREFWD
2025 | TULIP_CMD_TXTHRSHLDCTL
2028 LMC_CSR_WRITE(sc
, csr_command
, sc
->lmc_cmdmode
);
2031 * disable receiver watchdog and transmit jabber
2033 val
= LMC_CSR_READ(sc
, csr_sia_general
);
2034 val
|= (TULIP_WATCHDOG_TXDISABLE
| TULIP_WATCHDOG_RXDISABLE
);
2035 LMC_CSR_WRITE(sc
, csr_sia_general
, val
);
2037 lmc_trace(sc
->lmc_device
, "lmc_dec_reset out");
2040 static void lmc_initcsrs(lmc_softc_t
* const sc
, lmc_csrptr_t csr_base
, /*fold00*/
2043 lmc_trace(sc
->lmc_device
, "lmc_initcsrs in");
2044 sc
->lmc_csrs
.csr_busmode
= csr_base
+ 0 * csr_size
;
2045 sc
->lmc_csrs
.csr_txpoll
= csr_base
+ 1 * csr_size
;
2046 sc
->lmc_csrs
.csr_rxpoll
= csr_base
+ 2 * csr_size
;
2047 sc
->lmc_csrs
.csr_rxlist
= csr_base
+ 3 * csr_size
;
2048 sc
->lmc_csrs
.csr_txlist
= csr_base
+ 4 * csr_size
;
2049 sc
->lmc_csrs
.csr_status
= csr_base
+ 5 * csr_size
;
2050 sc
->lmc_csrs
.csr_command
= csr_base
+ 6 * csr_size
;
2051 sc
->lmc_csrs
.csr_intr
= csr_base
+ 7 * csr_size
;
2052 sc
->lmc_csrs
.csr_missed_frames
= csr_base
+ 8 * csr_size
;
2053 sc
->lmc_csrs
.csr_9
= csr_base
+ 9 * csr_size
;
2054 sc
->lmc_csrs
.csr_10
= csr_base
+ 10 * csr_size
;
2055 sc
->lmc_csrs
.csr_11
= csr_base
+ 11 * csr_size
;
2056 sc
->lmc_csrs
.csr_12
= csr_base
+ 12 * csr_size
;
2057 sc
->lmc_csrs
.csr_13
= csr_base
+ 13 * csr_size
;
2058 sc
->lmc_csrs
.csr_14
= csr_base
+ 14 * csr_size
;
2059 sc
->lmc_csrs
.csr_15
= csr_base
+ 15 * csr_size
;
2060 lmc_trace(sc
->lmc_device
, "lmc_initcsrs out");
2063 static void lmc_driver_timeout(struct net_device
*dev
)
2065 lmc_softc_t
*sc
= dev_to_sc(dev
);
2067 unsigned long flags
;
2069 lmc_trace(dev
, "lmc_driver_timeout in");
2071 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
2073 printk("%s: Xmitter busy|\n", dev
->name
);
2075 sc
->extra_stats
.tx_tbusy_calls
++;
2076 if (jiffies
- dev_trans_start(dev
) < TX_TIMEOUT
)
2080 * Chip seems to have locked up
2082 * This whips out all our decriptor
2083 * table and starts from scartch
2086 LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO
,
2087 LMC_CSR_READ (sc
, csr_status
),
2088 sc
->extra_stats
.tx_ProcTimeout
);
2090 lmc_running_reset (dev
);
2092 LMC_EVENT_LOG(LMC_EVENT_RESET1
, LMC_CSR_READ (sc
, csr_status
), 0);
2093 LMC_EVENT_LOG(LMC_EVENT_RESET2
,
2094 lmc_mii_readreg (sc
, 0, 16),
2095 lmc_mii_readreg (sc
, 0, 17));
2097 /* restart the tx processes */
2098 csr6
= LMC_CSR_READ (sc
, csr_command
);
2099 LMC_CSR_WRITE (sc
, csr_command
, csr6
| 0x0002);
2100 LMC_CSR_WRITE (sc
, csr_command
, csr6
| 0x2002);
2102 /* immediate transmit */
2103 LMC_CSR_WRITE (sc
, csr_txpoll
, 0);
2105 sc
->lmc_device
->stats
.tx_errors
++;
2106 sc
->extra_stats
.tx_ProcTimeout
++; /* -baz */
2108 dev
->trans_start
= jiffies
; /* prevent tx timeout */
2112 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
2114 lmc_trace(dev
, "lmc_driver_timeout out");