1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
4 * All rights reserved. www.lanmedia.com
5 * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
7 * This code is written by:
8 * Andrew Stanley-Jones (asj@cban.com)
9 * Rob Braun (bbraun@vix.com),
10 * Michael Graff (explorer@vix.com) and
11 * Matt Thomas (matt@3am-software.com).
18 * Driver for the LanMedia LMC5200, LMC5245, LMC1000, LMC1200 cards.
20 * To control link specific options lmcctl is required.
21 * It can be obtained from ftp.lanmedia.com.
24 * Linux uses the device struct lmc_private to pass private information
27 * The initialization portion of this driver (the lmc_reset() and the
28 * lmc_dec_reset() functions, as well as the led controls and the
29 * lmc_initcsrs() functions.
31 * The watchdog function runs every second and checks to see if
32 * we still have link, and that the timing source is what we expected
33 * it to be. If link is lost, the interface is marked down, and
34 * we no longer can transmit.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/string.h>
40 #include <linux/timer.h>
41 #include <linux/ptrace.h>
42 #include <linux/errno.h>
43 #include <linux/ioport.h>
44 #include <linux/slab.h>
45 #include <linux/interrupt.h>
46 #include <linux/pci.h>
47 #include <linux/delay.h>
48 #include <linux/hdlc.h>
50 #include <linux/if_arp.h>
51 #include <linux/netdevice.h>
52 #include <linux/etherdevice.h>
53 #include <linux/skbuff.h>
54 #include <linux/inet.h>
55 #include <linux/bitops.h>
56 #include <asm/processor.h> /* Processor type for cache alignment. */
59 #include <linux/uaccess.h>
60 //#include <asm/spinlock.h>
62 #define DRIVER_MAJOR_VERSION 1
63 #define DRIVER_MINOR_VERSION 34
64 #define DRIVER_SUB_VERSION 0
66 #define DRIVER_VERSION ((DRIVER_MAJOR_VERSION << 8) + DRIVER_MINOR_VERSION)
70 #include "lmc_ioctl.h"
71 #include "lmc_debug.h"
72 #include "lmc_proto.h"
74 static int LMC_PKT_BUF_SZ
= 1542;
76 static const struct pci_device_id lmc_pci_tbl
[] = {
77 { PCI_VENDOR_ID_DEC
, PCI_DEVICE_ID_DEC_TULIP_FAST
,
78 PCI_VENDOR_ID_LMC
, PCI_ANY_ID
},
79 { PCI_VENDOR_ID_DEC
, PCI_DEVICE_ID_DEC_TULIP_FAST
,
80 PCI_ANY_ID
, PCI_VENDOR_ID_LMC
},
84 MODULE_DEVICE_TABLE(pci
, lmc_pci_tbl
);
85 MODULE_LICENSE("GPL v2");
88 static netdev_tx_t
lmc_start_xmit(struct sk_buff
*skb
,
89 struct net_device
*dev
);
90 static int lmc_rx (struct net_device
*dev
);
91 static int lmc_open(struct net_device
*dev
);
92 static int lmc_close(struct net_device
*dev
);
93 static struct net_device_stats
*lmc_get_stats(struct net_device
*dev
);
94 static irqreturn_t
lmc_interrupt(int irq
, void *dev_instance
);
95 static void lmc_initcsrs(lmc_softc_t
* const sc
, lmc_csrptr_t csr_base
, size_t csr_size
);
96 static void lmc_softreset(lmc_softc_t
* const);
97 static void lmc_running_reset(struct net_device
*dev
);
98 static int lmc_ifdown(struct net_device
* const);
99 static void lmc_watchdog(struct timer_list
*t
);
100 static void lmc_reset(lmc_softc_t
* const sc
);
101 static void lmc_dec_reset(lmc_softc_t
* const sc
);
102 static void lmc_driver_timeout(struct net_device
*dev
, unsigned int txqueue
);
105 * linux reserves 16 device specific IOCTLs. We call them
106 * LMCIOC* to control various bits of our world.
108 int lmc_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
) /*fold00*/
110 lmc_softc_t
*sc
= dev_to_sc(dev
);
112 int ret
= -EOPNOTSUPP
;
116 lmc_trace(dev
, "lmc_ioctl in");
119 * Most functions mess with the structure
120 * Disable interrupts while we do the polling
125 * Return current driver state. Since we keep this up
126 * To date internally, just copy this out to the user.
128 case LMCIOCGINFO
: /*fold01*/
129 if (copy_to_user(ifr
->ifr_data
, &sc
->ictl
, sizeof(lmc_ctl_t
)))
135 case LMCIOCSINFO
: /*fold01*/
136 if (!capable(CAP_NET_ADMIN
)) {
141 if(dev
->flags
& IFF_UP
){
146 if (copy_from_user(&ctl
, ifr
->ifr_data
, sizeof(lmc_ctl_t
))) {
151 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
152 sc
->lmc_media
->set_status (sc
, &ctl
);
154 if(ctl
.crc_length
!= sc
->ictl
.crc_length
) {
155 sc
->lmc_media
->set_crc_length(sc
, ctl
.crc_length
);
156 if (sc
->ictl
.crc_length
== LMC_CTL_CRC_LENGTH_16
)
157 sc
->TxDescriptControlInit
|= LMC_TDES_ADD_CRC_DISABLE
;
159 sc
->TxDescriptControlInit
&= ~LMC_TDES_ADD_CRC_DISABLE
;
161 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
166 case LMCIOCIFTYPE
: /*fold01*/
168 u16 old_type
= sc
->if_type
;
171 if (!capable(CAP_NET_ADMIN
)) {
176 if (copy_from_user(&new_type
, ifr
->ifr_data
, sizeof(u16
))) {
182 if (new_type
== old_type
)
185 break; /* no change */
188 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
191 sc
->if_type
= new_type
;
192 lmc_proto_attach(sc
);
193 ret
= lmc_proto_open(sc
);
194 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
198 case LMCIOCGETXINFO
: /*fold01*/
199 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
200 sc
->lmc_xinfo
.Magic0
= 0xBEEFCAFE;
202 sc
->lmc_xinfo
.PciCardType
= sc
->lmc_cardtype
;
203 sc
->lmc_xinfo
.PciSlotNumber
= 0;
204 sc
->lmc_xinfo
.DriverMajorVersion
= DRIVER_MAJOR_VERSION
;
205 sc
->lmc_xinfo
.DriverMinorVersion
= DRIVER_MINOR_VERSION
;
206 sc
->lmc_xinfo
.DriverSubVersion
= DRIVER_SUB_VERSION
;
207 sc
->lmc_xinfo
.XilinxRevisionNumber
=
208 lmc_mii_readreg (sc
, 0, 3) & 0xf;
209 sc
->lmc_xinfo
.MaxFrameSize
= LMC_PKT_BUF_SZ
;
210 sc
->lmc_xinfo
.link_status
= sc
->lmc_media
->get_link_status (sc
);
211 sc
->lmc_xinfo
.mii_reg16
= lmc_mii_readreg (sc
, 0, 16);
212 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
214 sc
->lmc_xinfo
.Magic1
= 0xDEADBEEF;
216 if (copy_to_user(ifr
->ifr_data
, &sc
->lmc_xinfo
,
217 sizeof(struct lmc_xinfo
)))
224 case LMCIOCGETLMCSTATS
:
225 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
226 if (sc
->lmc_cardtype
== LMC_CARDTYPE_T1
) {
227 lmc_mii_writereg(sc
, 0, 17, T1FRAMER_FERR_LSB
);
228 sc
->extra_stats
.framingBitErrorCount
+=
229 lmc_mii_readreg(sc
, 0, 18) & 0xff;
230 lmc_mii_writereg(sc
, 0, 17, T1FRAMER_FERR_MSB
);
231 sc
->extra_stats
.framingBitErrorCount
+=
232 (lmc_mii_readreg(sc
, 0, 18) & 0xff) << 8;
233 lmc_mii_writereg(sc
, 0, 17, T1FRAMER_LCV_LSB
);
234 sc
->extra_stats
.lineCodeViolationCount
+=
235 lmc_mii_readreg(sc
, 0, 18) & 0xff;
236 lmc_mii_writereg(sc
, 0, 17, T1FRAMER_LCV_MSB
);
237 sc
->extra_stats
.lineCodeViolationCount
+=
238 (lmc_mii_readreg(sc
, 0, 18) & 0xff) << 8;
239 lmc_mii_writereg(sc
, 0, 17, T1FRAMER_AERR
);
240 regVal
= lmc_mii_readreg(sc
, 0, 18) & 0xff;
242 sc
->extra_stats
.lossOfFrameCount
+=
243 (regVal
& T1FRAMER_LOF_MASK
) >> 4;
244 sc
->extra_stats
.changeOfFrameAlignmentCount
+=
245 (regVal
& T1FRAMER_COFA_MASK
) >> 2;
246 sc
->extra_stats
.severelyErroredFrameCount
+=
247 regVal
& T1FRAMER_SEF_MASK
;
249 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
250 if (copy_to_user(ifr
->ifr_data
, &sc
->lmc_device
->stats
,
251 sizeof(sc
->lmc_device
->stats
)) ||
252 copy_to_user(ifr
->ifr_data
+ sizeof(sc
->lmc_device
->stats
),
253 &sc
->extra_stats
, sizeof(sc
->extra_stats
)))
259 case LMCIOCCLEARLMCSTATS
:
260 if (!capable(CAP_NET_ADMIN
)) {
265 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
266 memset(&sc
->lmc_device
->stats
, 0, sizeof(sc
->lmc_device
->stats
));
267 memset(&sc
->extra_stats
, 0, sizeof(sc
->extra_stats
));
268 sc
->extra_stats
.check
= STATCHECK
;
269 sc
->extra_stats
.version_size
= (DRIVER_VERSION
<< 16) +
270 sizeof(sc
->lmc_device
->stats
) + sizeof(sc
->extra_stats
);
271 sc
->extra_stats
.lmc_cardtype
= sc
->lmc_cardtype
;
272 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
276 case LMCIOCSETCIRCUIT
: /*fold01*/
277 if (!capable(CAP_NET_ADMIN
)){
282 if(dev
->flags
& IFF_UP
){
287 if (copy_from_user(&ctl
, ifr
->ifr_data
, sizeof(lmc_ctl_t
))) {
291 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
292 sc
->lmc_media
->set_circuit_type(sc
, ctl
.circuit_type
);
293 sc
->ictl
.circuit_type
= ctl
.circuit_type
;
294 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
299 case LMCIOCRESET
: /*fold01*/
300 if (!capable(CAP_NET_ADMIN
)){
305 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
306 /* Reset driver and bring back to current state */
307 printk (" REG16 before reset +%04x\n", lmc_mii_readreg (sc
, 0, 16));
308 lmc_running_reset (dev
);
309 printk (" REG16 after reset +%04x\n", lmc_mii_readreg (sc
, 0, 16));
311 LMC_EVENT_LOG(LMC_EVENT_FORCEDRESET
, LMC_CSR_READ (sc
, csr_status
), lmc_mii_readreg (sc
, 0, 16));
312 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
318 case LMCIOCDUMPEVENTLOG
:
319 if (copy_to_user(ifr
->ifr_data
, &lmcEventLogIndex
, sizeof(u32
))) {
323 if (copy_to_user(ifr
->ifr_data
+ sizeof(u32
), lmcEventLogBuf
,
324 sizeof(lmcEventLogBuf
)))
330 #endif /* end ifdef _DBG_EVENTLOG */
331 case LMCIOCT1CONTROL
: /*fold01*/
332 if (sc
->lmc_cardtype
!= LMC_CARDTYPE_T1
){
337 case LMCIOCXILINX
: /*fold01*/
339 struct lmc_xilinx_control xc
; /*fold02*/
341 if (!capable(CAP_NET_ADMIN
)){
347 * Stop the xwitter whlie we restart the hardware
349 netif_stop_queue(dev
);
351 if (copy_from_user(&xc
, ifr
->ifr_data
, sizeof(struct lmc_xilinx_control
))) {
356 case lmc_xilinx_reset
: /*fold02*/
359 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
360 mii
= lmc_mii_readreg (sc
, 0, 16);
363 * Make all of them 0 and make input
365 lmc_gpio_mkinput(sc
, 0xff);
368 * make the reset output
370 lmc_gpio_mkoutput(sc
, LMC_GEP_RESET
);
373 * RESET low to force configuration. This also forces
374 * the transmitter clock to be internal, but we expect to reset
378 sc
->lmc_gpio
&= ~LMC_GEP_RESET
;
379 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
383 * hold for more than 10 microseconds
387 sc
->lmc_gpio
|= LMC_GEP_RESET
;
388 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
392 * stop driving Xilinx-related signals
394 lmc_gpio_mkinput(sc
, 0xff);
396 /* Reset the frammer hardware */
397 sc
->lmc_media
->set_link_status (sc
, 1);
398 sc
->lmc_media
->set_status (sc
, NULL
);
399 // lmc_softreset(sc);
403 for(i
= 0; i
< 5; i
++){
404 lmc_led_on(sc
, LMC_DS3_LED0
);
406 lmc_led_off(sc
, LMC_DS3_LED0
);
407 lmc_led_on(sc
, LMC_DS3_LED1
);
409 lmc_led_off(sc
, LMC_DS3_LED1
);
410 lmc_led_on(sc
, LMC_DS3_LED3
);
412 lmc_led_off(sc
, LMC_DS3_LED3
);
413 lmc_led_on(sc
, LMC_DS3_LED2
);
415 lmc_led_off(sc
, LMC_DS3_LED2
);
418 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
427 case lmc_xilinx_load_prom
: /*fold02*/
430 int timeout
= 500000;
431 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
432 mii
= lmc_mii_readreg (sc
, 0, 16);
435 * Make all of them 0 and make input
437 lmc_gpio_mkinput(sc
, 0xff);
440 * make the reset output
442 lmc_gpio_mkoutput(sc
, LMC_GEP_DP
| LMC_GEP_RESET
);
445 * RESET low to force configuration. This also forces
446 * the transmitter clock to be internal, but we expect to reset
450 sc
->lmc_gpio
&= ~(LMC_GEP_RESET
| LMC_GEP_DP
);
451 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
455 * hold for more than 10 microseconds
459 sc
->lmc_gpio
|= LMC_GEP_DP
| LMC_GEP_RESET
;
460 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
463 * busy wait for the chip to reset
465 while( (LMC_CSR_READ(sc
, csr_gp
) & LMC_GEP_INIT
) == 0 &&
471 * stop driving Xilinx-related signals
473 lmc_gpio_mkinput(sc
, 0xff);
474 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
483 case lmc_xilinx_load
: /*fold02*/
487 int timeout
= 500000;
494 data
= memdup_user(xc
.data
, xc
.len
);
500 printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev
->name
, xc
.len
, xc
.data
, data
);
502 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
503 lmc_gpio_mkinput(sc
, 0xff);
506 * Clear the Xilinx and start prgramming from the DEC
517 sc
->lmc_gpio
&= ~LMC_GEP_DP
;
518 sc
->lmc_gpio
&= ~LMC_GEP_RESET
;
519 sc
->lmc_gpio
|= LMC_GEP_MODE
;
520 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
522 lmc_gpio_mkoutput(sc
, LMC_GEP_MODE
| LMC_GEP_DP
| LMC_GEP_RESET
);
525 * Wait at least 10 us 20 to be safe
530 * Clear reset and activate programming lines
537 lmc_gpio_mkinput(sc
, LMC_GEP_DP
| LMC_GEP_RESET
);
540 * Set LOAD, DATA, Clock to 1
543 sc
->lmc_gpio
|= LMC_GEP_MODE
;
544 sc
->lmc_gpio
|= LMC_GEP_DATA
;
545 sc
->lmc_gpio
|= LMC_GEP_CLK
;
546 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
548 lmc_gpio_mkoutput(sc
, LMC_GEP_DATA
| LMC_GEP_CLK
| LMC_GEP_MODE
);
551 * busy wait for the chip to reset
553 while( (LMC_CSR_READ(sc
, csr_gp
) & LMC_GEP_INIT
) == 0 &&
557 printk(KERN_DEBUG
"%s: Waited %d for the Xilinx to clear it's memory\n", dev
->name
, 500000-timeout
);
559 for(pos
= 0; pos
< xc
.len
; pos
++){
562 sc
->lmc_gpio
&= ~LMC_GEP_DATA
; /* Data is 0 */
565 sc
->lmc_gpio
|= LMC_GEP_DATA
; /* Data is 1 */
568 printk(KERN_WARNING
"%s Bad data in xilinx programming data at %d, got %d wanted 0 or 1\n", dev
->name
, pos
, data
[pos
]);
569 sc
->lmc_gpio
|= LMC_GEP_DATA
; /* Assume it's 1 */
571 sc
->lmc_gpio
&= ~LMC_GEP_CLK
; /* Clock to zero */
572 sc
->lmc_gpio
|= LMC_GEP_MODE
;
573 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
576 sc
->lmc_gpio
|= LMC_GEP_CLK
; /* Put the clack back to one */
577 sc
->lmc_gpio
|= LMC_GEP_MODE
;
578 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
581 if((LMC_CSR_READ(sc
, csr_gp
) & LMC_GEP_INIT
) == 0){
582 printk(KERN_WARNING
"%s: Reprogramming FAILED. Needs to be reprogrammed. (corrupted data)\n", dev
->name
);
584 else if((LMC_CSR_READ(sc
, csr_gp
) & LMC_GEP_DP
) == 0){
585 printk(KERN_WARNING
"%s: Reprogramming FAILED. Needs to be reprogrammed. (done)\n", dev
->name
);
588 printk(KERN_DEBUG
"%s: Done reprogramming Xilinx, %d bits, good luck!\n", dev
->name
, pos
);
591 lmc_gpio_mkinput(sc
, 0xff);
593 sc
->lmc_miireg16
|= LMC_MII16_FIFO_RESET
;
594 lmc_mii_writereg(sc
, 0, 16, sc
->lmc_miireg16
);
596 sc
->lmc_miireg16
&= ~LMC_MII16_FIFO_RESET
;
597 lmc_mii_writereg(sc
, 0, 16, sc
->lmc_miireg16
);
598 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
611 netif_wake_queue(dev
);
617 /* If we don't know what to do, give the protocol a shot. */
618 ret
= lmc_proto_ioctl (sc
, ifr
, cmd
);
622 lmc_trace(dev
, "lmc_ioctl out");
628 /* the watchdog process that cruises around */
629 static void lmc_watchdog(struct timer_list
*t
) /*fold00*/
631 lmc_softc_t
*sc
= from_timer(sc
, t
, timer
);
632 struct net_device
*dev
= sc
->lmc_device
;
637 lmc_trace(dev
, "lmc_watchdog in");
639 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
641 if(sc
->check
!= 0xBEAFCAFE){
642 printk("LMC: Corrupt net_device struct, breaking out\n");
643 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
648 /* Make sure the tx jabber and rx watchdog are off,
649 * and the transmit and receive processes are running.
652 LMC_CSR_WRITE (sc
, csr_15
, 0x00000011);
653 sc
->lmc_cmdmode
|= TULIP_CMD_TXRUN
| TULIP_CMD_RXRUN
;
654 LMC_CSR_WRITE (sc
, csr_command
, sc
->lmc_cmdmode
);
659 LMC_EVENT_LOG(LMC_EVENT_WATCHDOG
, LMC_CSR_READ (sc
, csr_status
), lmc_mii_readreg (sc
, 0, 16));
661 /* --- begin time out check -----------------------------------
662 * check for a transmit interrupt timeout
663 * Has the packet xmt vs xmt serviced threshold been exceeded */
664 if (sc
->lmc_taint_tx
== sc
->lastlmc_taint_tx
&&
665 sc
->lmc_device
->stats
.tx_packets
> sc
->lasttx_packets
&&
666 sc
->tx_TimeoutInd
== 0)
669 /* wait for the watchdog to come around again */
670 sc
->tx_TimeoutInd
= 1;
672 else if (sc
->lmc_taint_tx
== sc
->lastlmc_taint_tx
&&
673 sc
->lmc_device
->stats
.tx_packets
> sc
->lasttx_packets
&&
677 LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO
, LMC_CSR_READ (sc
, csr_status
), 0);
679 sc
->tx_TimeoutDisplay
= 1;
680 sc
->extra_stats
.tx_TimeoutCnt
++;
682 /* DEC chip is stuck, hit it with a RESET!!!! */
683 lmc_running_reset (dev
);
686 /* look at receive & transmit process state to make sure they are running */
687 LMC_EVENT_LOG(LMC_EVENT_RESET1
, LMC_CSR_READ (sc
, csr_status
), 0);
689 /* look at: DSR - 02 for Reg 16
695 LMC_EVENT_LOG(LMC_EVENT_RESET2
, lmc_mii_readreg (sc
, 0, 16), lmc_mii_readreg (sc
, 0, 17));
697 /* reset the transmit timeout detection flag */
698 sc
->tx_TimeoutInd
= 0;
699 sc
->lastlmc_taint_tx
= sc
->lmc_taint_tx
;
700 sc
->lasttx_packets
= sc
->lmc_device
->stats
.tx_packets
;
702 sc
->tx_TimeoutInd
= 0;
703 sc
->lastlmc_taint_tx
= sc
->lmc_taint_tx
;
704 sc
->lasttx_packets
= sc
->lmc_device
->stats
.tx_packets
;
707 /* --- end time out check ----------------------------------- */
710 link_status
= sc
->lmc_media
->get_link_status (sc
);
713 * hardware level link lost, but the interface is marked as up.
716 if ((link_status
== 0) && (sc
->last_link_status
!= 0)) {
717 printk(KERN_WARNING
"%s: hardware/physical link down\n", dev
->name
);
718 sc
->last_link_status
= 0;
719 /* lmc_reset (sc); Why reset??? The link can go down ok */
721 /* Inform the world that link has been lost */
722 netif_carrier_off(dev
);
726 * hardware link is up, but the interface is marked as down.
727 * Bring it back up again.
729 if (link_status
!= 0 && sc
->last_link_status
== 0) {
730 printk(KERN_WARNING
"%s: hardware/physical link up\n", dev
->name
);
731 sc
->last_link_status
= 1;
732 /* lmc_reset (sc); Again why reset??? */
734 netif_carrier_on(dev
);
737 /* Call media specific watchdog functions */
738 sc
->lmc_media
->watchdog(sc
);
741 * Poke the transmitter to make sure it
742 * never stops, even if we run out of mem
744 LMC_CSR_WRITE(sc
, csr_rxpoll
, 0);
747 * Check for code that failed
748 * and try and fix it as appropriate
750 if(sc
->failed_ring
== 1){
752 * Failed to setup the recv/xmit rin
758 if(sc
->failed_recv_alloc
== 1){
760 * We failed to alloc mem in the
761 * interrupt handler, go through the rings
764 sc
->failed_recv_alloc
= 0;
770 * remember the timer value
774 ticks
= LMC_CSR_READ (sc
, csr_gp_timer
);
775 LMC_CSR_WRITE (sc
, csr_gp_timer
, 0xffffffffUL
);
776 sc
->ictl
.ticks
= 0x0000ffff - (ticks
& 0x0000ffff);
779 * restart this timer.
781 sc
->timer
.expires
= jiffies
+ (HZ
);
782 add_timer (&sc
->timer
);
784 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
786 lmc_trace(dev
, "lmc_watchdog out");
790 static int lmc_attach(struct net_device
*dev
, unsigned short encoding
,
791 unsigned short parity
)
793 if (encoding
== ENCODING_NRZ
&& parity
== PARITY_CRC16_PR1_CCITT
)
798 static const struct net_device_ops lmc_ops
= {
799 .ndo_open
= lmc_open
,
800 .ndo_stop
= lmc_close
,
801 .ndo_start_xmit
= hdlc_start_xmit
,
802 .ndo_do_ioctl
= lmc_ioctl
,
803 .ndo_tx_timeout
= lmc_driver_timeout
,
804 .ndo_get_stats
= lmc_get_stats
,
807 static int lmc_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
810 struct net_device
*dev
;
814 static int cards_found
;
816 /* lmc_trace(dev, "lmc_init_one in"); */
818 err
= pcim_enable_device(pdev
);
820 printk(KERN_ERR
"lmc: pci enable failed: %d\n", err
);
824 err
= pci_request_regions(pdev
, "lmc");
826 printk(KERN_ERR
"lmc: pci_request_region failed\n");
831 * Allocate our own device structure
833 sc
= devm_kzalloc(&pdev
->dev
, sizeof(lmc_softc_t
), GFP_KERNEL
);
837 dev
= alloc_hdlcdev(sc
);
839 printk(KERN_ERR
"lmc:alloc_netdev for device failed\n");
844 dev
->type
= ARPHRD_HDLC
;
845 dev_to_hdlc(dev
)->xmit
= lmc_start_xmit
;
846 dev_to_hdlc(dev
)->attach
= lmc_attach
;
847 dev
->netdev_ops
= &lmc_ops
;
848 dev
->watchdog_timeo
= HZ
; /* 1 second */
849 dev
->tx_queue_len
= 100;
850 sc
->lmc_device
= dev
;
851 sc
->name
= dev
->name
;
852 sc
->if_type
= LMC_PPP
;
853 sc
->check
= 0xBEAFCAFE;
854 dev
->base_addr
= pci_resource_start(pdev
, 0);
855 dev
->irq
= pdev
->irq
;
856 pci_set_drvdata(pdev
, dev
);
857 SET_NETDEV_DEV(dev
, &pdev
->dev
);
860 * This will get the protocol layer ready and do any 1 time init's
861 * Must have a valid sc and dev structure
863 lmc_proto_attach(sc
);
865 /* Init the spin lock so can call it latter */
867 spin_lock_init(&sc
->lmc_lock
);
868 pci_set_master(pdev
);
870 printk(KERN_INFO
"%s: detected at %lx, irq %d\n", dev
->name
,
871 dev
->base_addr
, dev
->irq
);
873 err
= register_hdlc_device(dev
);
875 printk(KERN_ERR
"%s: register_netdev failed.\n", dev
->name
);
880 sc
->lmc_cardtype
= LMC_CARDTYPE_UNKNOWN
;
881 sc
->lmc_timing
= LMC_CTL_CLOCK_SOURCE_EXT
;
885 * Check either the subvendor or the subdevice, some systems reverse
886 * the setting in the bois, seems to be version and arch dependent?
887 * Fix the error, exchange the two values
889 if ((subdevice
= pdev
->subsystem_device
) == PCI_VENDOR_ID_LMC
)
890 subdevice
= pdev
->subsystem_vendor
;
893 case PCI_DEVICE_ID_LMC_HSSI
:
894 printk(KERN_INFO
"%s: LMC HSSI\n", dev
->name
);
895 sc
->lmc_cardtype
= LMC_CARDTYPE_HSSI
;
896 sc
->lmc_media
= &lmc_hssi_media
;
898 case PCI_DEVICE_ID_LMC_DS3
:
899 printk(KERN_INFO
"%s: LMC DS3\n", dev
->name
);
900 sc
->lmc_cardtype
= LMC_CARDTYPE_DS3
;
901 sc
->lmc_media
= &lmc_ds3_media
;
903 case PCI_DEVICE_ID_LMC_SSI
:
904 printk(KERN_INFO
"%s: LMC SSI\n", dev
->name
);
905 sc
->lmc_cardtype
= LMC_CARDTYPE_SSI
;
906 sc
->lmc_media
= &lmc_ssi_media
;
908 case PCI_DEVICE_ID_LMC_T1
:
909 printk(KERN_INFO
"%s: LMC T1\n", dev
->name
);
910 sc
->lmc_cardtype
= LMC_CARDTYPE_T1
;
911 sc
->lmc_media
= &lmc_t1_media
;
914 printk(KERN_WARNING
"%s: LMC UNKNOWN CARD!\n", dev
->name
);
918 lmc_initcsrs (sc
, dev
->base_addr
, 8);
920 lmc_gpio_mkinput (sc
, 0xff);
921 sc
->lmc_gpio
= 0; /* drive no signals yet */
923 sc
->lmc_media
->defaults (sc
);
925 sc
->lmc_media
->set_link_status (sc
, LMC_LINK_UP
);
927 /* verify that the PCI Sub System ID matches the Adapter Model number
928 * from the MII register
930 AdapModelNum
= (lmc_mii_readreg (sc
, 0, 3) & 0x3f0) >> 4;
932 if ((AdapModelNum
!= LMC_ADAP_T1
|| /* detect LMC1200 */
933 subdevice
!= PCI_DEVICE_ID_LMC_T1
) &&
934 (AdapModelNum
!= LMC_ADAP_SSI
|| /* detect LMC1000 */
935 subdevice
!= PCI_DEVICE_ID_LMC_SSI
) &&
936 (AdapModelNum
!= LMC_ADAP_DS3
|| /* detect LMC5245 */
937 subdevice
!= PCI_DEVICE_ID_LMC_DS3
) &&
938 (AdapModelNum
!= LMC_ADAP_HSSI
|| /* detect LMC5200 */
939 subdevice
!= PCI_DEVICE_ID_LMC_HSSI
))
940 printk(KERN_WARNING
"%s: Model number (%d) miscompare for PCI"
941 " Subsystem ID = 0x%04x\n",
942 dev
->name
, AdapModelNum
, subdevice
);
947 LMC_CSR_WRITE (sc
, csr_gp_timer
, 0xFFFFFFFFUL
);
949 sc
->board_idx
= cards_found
++;
950 sc
->extra_stats
.check
= STATCHECK
;
951 sc
->extra_stats
.version_size
= (DRIVER_VERSION
<< 16) +
952 sizeof(sc
->lmc_device
->stats
) + sizeof(sc
->extra_stats
);
953 sc
->extra_stats
.lmc_cardtype
= sc
->lmc_cardtype
;
956 sc
->last_link_status
= 0;
958 lmc_trace(dev
, "lmc_init_one out");
963 * Called from pci when removing module.
965 static void lmc_remove_one(struct pci_dev
*pdev
)
967 struct net_device
*dev
= pci_get_drvdata(pdev
);
970 printk(KERN_DEBUG
"%s: removing...\n", dev
->name
);
971 unregister_hdlc_device(dev
);
976 /* After this is called, packets can be sent.
977 * Does not initialize the addresses
979 static int lmc_open(struct net_device
*dev
)
981 lmc_softc_t
*sc
= dev_to_sc(dev
);
984 lmc_trace(dev
, "lmc_open in");
986 lmc_led_on(sc
, LMC_DS3_LED0
);
991 LMC_EVENT_LOG(LMC_EVENT_RESET1
, LMC_CSR_READ(sc
, csr_status
), 0);
992 LMC_EVENT_LOG(LMC_EVENT_RESET2
, lmc_mii_readreg(sc
, 0, 16),
993 lmc_mii_readreg(sc
, 0, 17));
996 lmc_trace(dev
, "lmc_open lmc_ok out");
1002 /* Since we have to use PCI bus, this should work on x86,alpha,ppc */
1003 if (request_irq (dev
->irq
, lmc_interrupt
, IRQF_SHARED
, dev
->name
, dev
)){
1004 printk(KERN_WARNING
"%s: could not get irq: %d\n", dev
->name
, dev
->irq
);
1005 lmc_trace(dev
, "lmc_open irq failed out");
1010 /* Assert Terminal Active */
1011 sc
->lmc_miireg16
|= LMC_MII16_LED_ALL
;
1012 sc
->lmc_media
->set_link_status (sc
, LMC_LINK_UP
);
1015 * reset to last state.
1017 sc
->lmc_media
->set_status (sc
, NULL
);
1019 /* setup default bits to be used in tulip_desc_t transmit descriptor
1021 sc
->TxDescriptControlInit
= (
1022 LMC_TDES_INTERRUPT_ON_COMPLETION
1023 | LMC_TDES_FIRST_SEGMENT
1024 | LMC_TDES_LAST_SEGMENT
1025 | LMC_TDES_SECOND_ADDR_CHAINED
1026 | LMC_TDES_DISABLE_PADDING
1029 if (sc
->ictl
.crc_length
== LMC_CTL_CRC_LENGTH_16
) {
1030 /* disable 32 bit CRC generated by ASIC */
1031 sc
->TxDescriptControlInit
|= LMC_TDES_ADD_CRC_DISABLE
;
1033 sc
->lmc_media
->set_crc_length(sc
, sc
->ictl
.crc_length
);
1034 /* Acknoledge the Terminal Active and light LEDs */
1036 /* dev->flags |= IFF_UP; */
1038 if ((err
= lmc_proto_open(sc
)) != 0)
1041 netif_start_queue(dev
);
1042 sc
->extra_stats
.tx_tbusy0
++;
1045 * select what interrupts we want to get
1047 sc
->lmc_intrmask
= 0;
1048 /* Should be using the default interrupt mask defined in the .h file. */
1049 sc
->lmc_intrmask
|= (TULIP_STS_NORMALINTR
1052 | TULIP_STS_ABNRMLINTR
1053 | TULIP_STS_SYSERROR
1054 | TULIP_STS_TXSTOPPED
1055 | TULIP_STS_TXUNDERFLOW
1056 | TULIP_STS_RXSTOPPED
1059 LMC_CSR_WRITE (sc
, csr_intr
, sc
->lmc_intrmask
);
1061 sc
->lmc_cmdmode
|= TULIP_CMD_TXRUN
;
1062 sc
->lmc_cmdmode
|= TULIP_CMD_RXRUN
;
1063 LMC_CSR_WRITE (sc
, csr_command
, sc
->lmc_cmdmode
);
1065 sc
->lmc_ok
= 1; /* Run watchdog */
1068 * Set the if up now - pfb
1071 sc
->last_link_status
= 1;
1074 * Setup a timer for the watchdog on probe, and start it running.
1075 * Since lmc_ok == 0, it will be a NOP for now.
1077 timer_setup(&sc
->timer
, lmc_watchdog
, 0);
1078 sc
->timer
.expires
= jiffies
+ HZ
;
1079 add_timer (&sc
->timer
);
1081 lmc_trace(dev
, "lmc_open out");
1086 /* Total reset to compensate for the AdTran DSU doing bad things
1090 static void lmc_running_reset (struct net_device
*dev
) /*fold00*/
1092 lmc_softc_t
*sc
= dev_to_sc(dev
);
1094 lmc_trace(dev
, "lmc_running_reset in");
1096 /* stop interrupts */
1097 /* Clear the interrupt mask */
1098 LMC_CSR_WRITE (sc
, csr_intr
, 0x00000000);
1103 /* sc->lmc_miireg16 |= LMC_MII16_LED_ALL; */
1104 sc
->lmc_media
->set_link_status (sc
, 1);
1105 sc
->lmc_media
->set_status (sc
, NULL
);
1107 netif_wake_queue(dev
);
1110 sc
->extra_stats
.tx_tbusy0
++;
1112 sc
->lmc_intrmask
= TULIP_DEFAULT_INTR_MASK
;
1113 LMC_CSR_WRITE (sc
, csr_intr
, sc
->lmc_intrmask
);
1115 sc
->lmc_cmdmode
|= (TULIP_CMD_TXRUN
| TULIP_CMD_RXRUN
);
1116 LMC_CSR_WRITE (sc
, csr_command
, sc
->lmc_cmdmode
);
1118 lmc_trace(dev
, "lmc_running_reset_out");
1122 /* This is what is called when you ifconfig down a device.
1123 * This disables the timer for the watchdog and keepalives,
1124 * and disables the irq for dev.
1126 static int lmc_close(struct net_device
*dev
)
1128 /* not calling release_region() as we should */
1129 lmc_softc_t
*sc
= dev_to_sc(dev
);
1131 lmc_trace(dev
, "lmc_close in");
1134 sc
->lmc_media
->set_link_status (sc
, 0);
1135 del_timer (&sc
->timer
);
1136 lmc_proto_close(sc
);
1139 lmc_trace(dev
, "lmc_close out");
1144 /* Ends the transfer of packets */
1145 /* When the interface goes down, this is called */
1146 static int lmc_ifdown (struct net_device
*dev
) /*fold00*/
1148 lmc_softc_t
*sc
= dev_to_sc(dev
);
1152 lmc_trace(dev
, "lmc_ifdown in");
1154 /* Don't let anything else go on right now */
1156 netif_stop_queue(dev
);
1157 sc
->extra_stats
.tx_tbusy1
++;
1159 /* stop interrupts */
1160 /* Clear the interrupt mask */
1161 LMC_CSR_WRITE (sc
, csr_intr
, 0x00000000);
1163 /* Stop Tx and Rx on the chip */
1164 csr6
= LMC_CSR_READ (sc
, csr_command
);
1165 csr6
&= ~LMC_DEC_ST
; /* Turn off the Transmission bit */
1166 csr6
&= ~LMC_DEC_SR
; /* Turn off the Receive bit */
1167 LMC_CSR_WRITE (sc
, csr_command
, csr6
);
1169 sc
->lmc_device
->stats
.rx_missed_errors
+=
1170 LMC_CSR_READ(sc
, csr_missed_frames
) & 0xffff;
1172 /* release the interrupt */
1173 if(sc
->got_irq
== 1){
1174 free_irq (dev
->irq
, dev
);
1178 /* free skbuffs in the Rx queue */
1179 for (i
= 0; i
< LMC_RXDESCS
; i
++)
1181 struct sk_buff
*skb
= sc
->lmc_rxq
[i
];
1182 sc
->lmc_rxq
[i
] = NULL
;
1183 sc
->lmc_rxring
[i
].status
= 0;
1184 sc
->lmc_rxring
[i
].length
= 0;
1185 sc
->lmc_rxring
[i
].buffer1
= 0xDEADBEEF;
1188 sc
->lmc_rxq
[i
] = NULL
;
1191 for (i
= 0; i
< LMC_TXDESCS
; i
++)
1193 if (sc
->lmc_txq
[i
] != NULL
)
1194 dev_kfree_skb(sc
->lmc_txq
[i
]);
1195 sc
->lmc_txq
[i
] = NULL
;
1198 lmc_led_off (sc
, LMC_MII16_LED_ALL
);
1200 netif_wake_queue(dev
);
1201 sc
->extra_stats
.tx_tbusy0
++;
1203 lmc_trace(dev
, "lmc_ifdown out");
1208 /* Interrupt handling routine. This will take an incoming packet, or clean
1209 * up after a trasmit.
1211 static irqreturn_t
lmc_interrupt (int irq
, void *dev_instance
) /*fold00*/
1213 struct net_device
*dev
= (struct net_device
*) dev_instance
;
1214 lmc_softc_t
*sc
= dev_to_sc(dev
);
1220 int max_work
= LMC_RXDESCS
;
1223 lmc_trace(dev
, "lmc_interrupt in");
1225 spin_lock(&sc
->lmc_lock
);
1228 * Read the csr to find what interrupts we have (if any)
1230 csr
= LMC_CSR_READ (sc
, csr_status
);
1233 * Make sure this is our interrupt
1235 if ( ! (csr
& sc
->lmc_intrmask
)) {
1236 goto lmc_int_fail_out
;
1241 /* always go through this loop at least once */
1242 while (csr
& sc
->lmc_intrmask
) {
1246 * Clear interrupt bits, we handle all case below
1248 LMC_CSR_WRITE (sc
, csr_status
, csr
);
1252 * - Transmit process timed out CSR5<1>
1253 * - Transmit jabber timeout CSR5<3>
1254 * - Transmit underflow CSR5<5>
1255 * - Transmit Receiver buffer unavailable CSR5<7>
1256 * - Receive process stopped CSR5<8>
1257 * - Receive watchdog timeout CSR5<9>
1258 * - Early transmit interrupt CSR5<10>
1260 * Is this really right? Should we do a running reset for jabber?
1261 * (being a WAN card and all)
1263 if (csr
& TULIP_STS_ABNRMLINTR
){
1264 lmc_running_reset (dev
);
1268 if (csr
& TULIP_STS_RXINTR
){
1269 lmc_trace(dev
, "rx interrupt");
1273 if (csr
& (TULIP_STS_TXINTR
| TULIP_STS_TXNOBUF
| TULIP_STS_TXSTOPPED
)) {
1276 /* reset the transmit timeout detection flag -baz */
1277 sc
->extra_stats
.tx_NoCompleteCnt
= 0;
1279 badtx
= sc
->lmc_taint_tx
;
1280 i
= badtx
% LMC_TXDESCS
;
1282 while ((badtx
< sc
->lmc_next_tx
)) {
1283 stat
= sc
->lmc_txring
[i
].status
;
1285 LMC_EVENT_LOG (LMC_EVENT_XMTINT
, stat
,
1286 sc
->lmc_txring
[i
].length
);
1288 * If bit 31 is 1 the tulip owns it break out of the loop
1290 if (stat
& 0x80000000)
1293 n_compl
++ ; /* i.e., have an empty slot in ring */
1295 * If we have no skbuff or have cleared it
1296 * Already continue to the next buffer
1298 if (sc
->lmc_txq
[i
] == NULL
)
1302 * Check the total error summary to look for any errors
1304 if (stat
& 0x8000) {
1305 sc
->lmc_device
->stats
.tx_errors
++;
1307 sc
->lmc_device
->stats
.tx_aborted_errors
++;
1309 sc
->lmc_device
->stats
.tx_carrier_errors
++;
1311 sc
->lmc_device
->stats
.tx_window_errors
++;
1313 sc
->lmc_device
->stats
.tx_fifo_errors
++;
1315 sc
->lmc_device
->stats
.tx_bytes
+= sc
->lmc_txring
[i
].length
& 0x7ff;
1317 sc
->lmc_device
->stats
.tx_packets
++;
1320 dev_consume_skb_irq(sc
->lmc_txq
[i
]);
1321 sc
->lmc_txq
[i
] = NULL
;
1324 i
= badtx
% LMC_TXDESCS
;
1327 if (sc
->lmc_next_tx
- badtx
> LMC_TXDESCS
)
1329 printk ("%s: out of sync pointer\n", dev
->name
);
1330 badtx
+= LMC_TXDESCS
;
1332 LMC_EVENT_LOG(LMC_EVENT_TBUSY0
, n_compl
, 0);
1334 netif_wake_queue(dev
);
1335 sc
->extra_stats
.tx_tbusy0
++;
1339 sc
->extra_stats
.dirtyTx
= badtx
;
1340 sc
->extra_stats
.lmc_next_tx
= sc
->lmc_next_tx
;
1341 sc
->extra_stats
.lmc_txfull
= sc
->lmc_txfull
;
1343 sc
->lmc_taint_tx
= badtx
;
1346 * Why was there a break here???
1348 } /* end handle transmit interrupt */
1350 if (csr
& TULIP_STS_SYSERROR
) {
1352 printk (KERN_WARNING
"%s: system bus error csr: %#8.8x\n", dev
->name
, csr
);
1353 error
= csr
>>23 & 0x7;
1356 printk(KERN_WARNING
"%s: Parity Fault (bad)\n", dev
->name
);
1359 printk(KERN_WARNING
"%s: Master Abort (naughty)\n", dev
->name
);
1362 printk(KERN_WARNING
"%s: Target Abort (not so naughty)\n", dev
->name
);
1365 printk(KERN_WARNING
"%s: This bus error code was supposed to be reserved!\n", dev
->name
);
1369 LMC_EVENT_LOG(LMC_EVENT_RESET1
, LMC_CSR_READ (sc
, csr_status
), 0);
1370 LMC_EVENT_LOG(LMC_EVENT_RESET2
,
1371 lmc_mii_readreg (sc
, 0, 16),
1372 lmc_mii_readreg (sc
, 0, 17));
1381 * Get current csr status to make sure
1382 * we've cleared all interrupts
1384 csr
= LMC_CSR_READ (sc
, csr_status
);
1385 } /* end interrupt loop */
1386 LMC_EVENT_LOG(LMC_EVENT_INT
, firstcsr
, csr
);
1390 spin_unlock(&sc
->lmc_lock
);
1392 lmc_trace(dev
, "lmc_interrupt out");
1393 return IRQ_RETVAL(handled
);
1396 static netdev_tx_t
lmc_start_xmit(struct sk_buff
*skb
,
1397 struct net_device
*dev
)
1399 lmc_softc_t
*sc
= dev_to_sc(dev
);
1402 unsigned long flags
;
1404 lmc_trace(dev
, "lmc_start_xmit in");
1406 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
1408 /* normal path, tbusy known to be zero */
1410 entry
= sc
->lmc_next_tx
% LMC_TXDESCS
;
1412 sc
->lmc_txq
[entry
] = skb
;
1413 sc
->lmc_txring
[entry
].buffer1
= virt_to_bus (skb
->data
);
1415 LMC_CONSOLE_LOG("xmit", skb
->data
, skb
->len
);
1418 /* If the queue is less than half full, don't interrupt */
1419 if (sc
->lmc_next_tx
- sc
->lmc_taint_tx
< LMC_TXDESCS
/ 2)
1421 /* Do not interrupt on completion of this packet */
1423 netif_wake_queue(dev
);
1425 else if (sc
->lmc_next_tx
- sc
->lmc_taint_tx
== LMC_TXDESCS
/ 2)
1427 /* This generates an interrupt on completion of this packet */
1429 netif_wake_queue(dev
);
1431 else if (sc
->lmc_next_tx
- sc
->lmc_taint_tx
< LMC_TXDESCS
- 1)
1433 /* Do not interrupt on completion of this packet */
1435 netif_wake_queue(dev
);
1439 /* This generates an interrupt on completion of this packet */
1442 netif_stop_queue(dev
);
1445 flag
= LMC_TDES_INTERRUPT_ON_COMPLETION
;
1447 if (sc
->lmc_next_tx
- sc
->lmc_taint_tx
>= LMC_TXDESCS
- 1)
1448 { /* ring full, go busy */
1450 netif_stop_queue(dev
);
1451 sc
->extra_stats
.tx_tbusy1
++;
1452 LMC_EVENT_LOG(LMC_EVENT_TBUSY1
, entry
, 0);
1457 if (entry
== LMC_TXDESCS
- 1) /* last descriptor in ring */
1458 flag
|= LMC_TDES_END_OF_RING
; /* flag as such for Tulip */
1460 /* don't pad small packets either */
1461 flag
= sc
->lmc_txring
[entry
].length
= (skb
->len
) | flag
|
1462 sc
->TxDescriptControlInit
;
1464 /* set the transmit timeout flag to be checked in
1465 * the watchdog timer handler. -baz
1468 sc
->extra_stats
.tx_NoCompleteCnt
++;
1471 /* give ownership to the chip */
1472 LMC_EVENT_LOG(LMC_EVENT_XMT
, flag
, entry
);
1473 sc
->lmc_txring
[entry
].status
= 0x80000000;
1476 LMC_CSR_WRITE (sc
, csr_txpoll
, 0);
1478 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
1480 lmc_trace(dev
, "lmc_start_xmit_out");
1481 return NETDEV_TX_OK
;
1485 static int lmc_rx(struct net_device
*dev
)
1487 lmc_softc_t
*sc
= dev_to_sc(dev
);
1489 int rx_work_limit
= LMC_RXDESCS
;
1490 int rxIntLoopCnt
; /* debug -baz */
1491 int localLengthErrCnt
= 0;
1493 struct sk_buff
*skb
, *nsb
;
1496 lmc_trace(dev
, "lmc_rx in");
1498 lmc_led_on(sc
, LMC_DS3_LED3
);
1500 rxIntLoopCnt
= 0; /* debug -baz */
1502 i
= sc
->lmc_next_rx
% LMC_RXDESCS
;
1504 while (((stat
= sc
->lmc_rxring
[i
].status
) & LMC_RDES_OWN_BIT
) != DESC_OWNED_BY_DC21X4
)
1506 rxIntLoopCnt
++; /* debug -baz */
1507 len
= ((stat
& LMC_RDES_FRAME_LENGTH
) >> RDES_FRAME_LENGTH_BIT_NUMBER
);
1508 if ((stat
& 0x0300) != 0x0300) { /* Check first segment and last segment */
1509 if ((stat
& 0x0000ffff) != 0x7fff) {
1510 /* Oversized frame */
1511 sc
->lmc_device
->stats
.rx_length_errors
++;
1516 if (stat
& 0x00000008) { /* Catch a dribbling bit error */
1517 sc
->lmc_device
->stats
.rx_errors
++;
1518 sc
->lmc_device
->stats
.rx_frame_errors
++;
1523 if (stat
& 0x00000004) { /* Catch a CRC error by the Xilinx */
1524 sc
->lmc_device
->stats
.rx_errors
++;
1525 sc
->lmc_device
->stats
.rx_crc_errors
++;
1529 if (len
> LMC_PKT_BUF_SZ
) {
1530 sc
->lmc_device
->stats
.rx_length_errors
++;
1531 localLengthErrCnt
++;
1535 if (len
< sc
->lmc_crcSize
+ 2) {
1536 sc
->lmc_device
->stats
.rx_length_errors
++;
1537 sc
->extra_stats
.rx_SmallPktCnt
++;
1538 localLengthErrCnt
++;
1542 if(stat
& 0x00004000){
1543 printk(KERN_WARNING
"%s: Receiver descriptor error, receiver out of sync?\n", dev
->name
);
1546 len
-= sc
->lmc_crcSize
;
1548 skb
= sc
->lmc_rxq
[i
];
1551 * We ran out of memory at some point
1552 * just allocate an skb buff and continue.
1556 nsb
= dev_alloc_skb (LMC_PKT_BUF_SZ
+ 2);
1558 sc
->lmc_rxq
[i
] = nsb
;
1560 sc
->lmc_rxring
[i
].buffer1
= virt_to_bus(skb_tail_pointer(nsb
));
1562 sc
->failed_recv_alloc
= 1;
1566 sc
->lmc_device
->stats
.rx_packets
++;
1567 sc
->lmc_device
->stats
.rx_bytes
+= len
;
1569 LMC_CONSOLE_LOG("recv", skb
->data
, len
);
1572 * I'm not sure of the sanity of this
1573 * Packets could be arriving at a constant
1574 * 44.210mbits/sec and we're going to copy
1575 * them into a new buffer??
1578 if(len
> (LMC_MTU
- (LMC_MTU
>>2))){ /* len > LMC_MTU * 0.75 */
1580 * If it's a large packet don't copy it just hand it up
1584 sc
->lmc_rxq
[i
] = NULL
;
1585 sc
->lmc_rxring
[i
].buffer1
= 0x0;
1588 skb
->protocol
= lmc_proto_type(sc
, skb
);
1589 skb_reset_mac_header(skb
);
1590 /* skb_reset_network_header(skb); */
1592 lmc_proto_netif(sc
, skb
);
1595 * This skb will be destroyed by the upper layers, make a new one
1597 nsb
= dev_alloc_skb (LMC_PKT_BUF_SZ
+ 2);
1599 sc
->lmc_rxq
[i
] = nsb
;
1601 sc
->lmc_rxring
[i
].buffer1
= virt_to_bus(skb_tail_pointer(nsb
));
1602 /* Transferred to 21140 below */
1606 * We've run out of memory, stop trying to allocate
1607 * memory and exit the interrupt handler
1609 * The chip may run out of receivers and stop
1610 * in which care we'll try to allocate the buffer
1611 * again. (once a second)
1613 sc
->extra_stats
.rx_BuffAllocErr
++;
1614 LMC_EVENT_LOG(LMC_EVENT_RCVINT
, stat
, len
);
1615 sc
->failed_recv_alloc
= 1;
1616 goto skip_out_of_mem
;
1620 nsb
= dev_alloc_skb(len
);
1622 goto give_it_anyways
;
1624 skb_copy_from_linear_data(skb
, skb_put(nsb
, len
), len
);
1626 nsb
->protocol
= lmc_proto_type(sc
, nsb
);
1627 skb_reset_mac_header(nsb
);
1628 /* skb_reset_network_header(nsb); */
1630 lmc_proto_netif(sc
, nsb
);
1634 LMC_EVENT_LOG(LMC_EVENT_RCVINT
, stat
, len
);
1635 sc
->lmc_rxring
[i
].status
= DESC_OWNED_BY_DC21X4
;
1638 i
= sc
->lmc_next_rx
% LMC_RXDESCS
;
1640 if (rx_work_limit
< 0)
1644 /* detect condition for LMC1000 where DSU cable attaches and fills
1645 * descriptors with bogus packets
1647 if (localLengthErrCnt > LMC_RXDESCS - 3) {
1648 sc->extra_stats.rx_BadPktSurgeCnt++;
1649 LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, localLengthErrCnt,
1650 sc->extra_stats.rx_BadPktSurgeCnt);
1653 /* save max count of receive descriptors serviced */
1654 if (rxIntLoopCnt
> sc
->extra_stats
.rxIntLoopCnt
)
1655 sc
->extra_stats
.rxIntLoopCnt
= rxIntLoopCnt
; /* debug -baz */
1658 if (rxIntLoopCnt
== 0)
1660 for (i
= 0; i
< LMC_RXDESCS
; i
++)
1662 if ((sc
->lmc_rxring
[i
].status
& LMC_RDES_OWN_BIT
)
1663 != DESC_OWNED_BY_DC21X4
)
1668 LMC_EVENT_LOG(LMC_EVENT_RCVEND
, rxIntLoopCnt
, 0);
1673 lmc_led_off(sc
, LMC_DS3_LED3
);
1677 lmc_trace(dev
, "lmc_rx out");
1682 static struct net_device_stats
*lmc_get_stats(struct net_device
*dev
)
1684 lmc_softc_t
*sc
= dev_to_sc(dev
);
1685 unsigned long flags
;
1687 lmc_trace(dev
, "lmc_get_stats in");
1689 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
1691 sc
->lmc_device
->stats
.rx_missed_errors
+= LMC_CSR_READ(sc
, csr_missed_frames
) & 0xffff;
1693 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
1695 lmc_trace(dev
, "lmc_get_stats out");
1697 return &sc
->lmc_device
->stats
;
1700 static struct pci_driver lmc_driver
= {
1702 .id_table
= lmc_pci_tbl
,
1703 .probe
= lmc_init_one
,
1704 .remove
= lmc_remove_one
,
1707 module_pci_driver(lmc_driver
);
1709 unsigned lmc_mii_readreg (lmc_softc_t
* const sc
, unsigned devaddr
, unsigned regno
) /*fold00*/
1712 int command
= (0xf6 << 10) | (devaddr
<< 5) | regno
;
1715 lmc_trace(sc
->lmc_device
, "lmc_mii_readreg in");
1719 lmc_trace(sc
->lmc_device
, "lmc_mii_readreg: done sync");
1721 for (i
= 15; i
>= 0; i
--)
1723 int dataval
= (command
& (1 << i
)) ? 0x20000 : 0;
1725 LMC_CSR_WRITE (sc
, csr_9
, dataval
);
1727 /* __SLOW_DOWN_IO; */
1728 LMC_CSR_WRITE (sc
, csr_9
, dataval
| 0x10000);
1730 /* __SLOW_DOWN_IO; */
1733 lmc_trace(sc
->lmc_device
, "lmc_mii_readreg: done1");
1735 for (i
= 19; i
> 0; i
--)
1737 LMC_CSR_WRITE (sc
, csr_9
, 0x40000);
1739 /* __SLOW_DOWN_IO; */
1740 retval
= (retval
<< 1) | ((LMC_CSR_READ (sc
, csr_9
) & 0x80000) ? 1 : 0);
1741 LMC_CSR_WRITE (sc
, csr_9
, 0x40000 | 0x10000);
1743 /* __SLOW_DOWN_IO; */
1746 lmc_trace(sc
->lmc_device
, "lmc_mii_readreg out");
1748 return (retval
>> 1) & 0xffff;
1751 void lmc_mii_writereg (lmc_softc_t
* const sc
, unsigned devaddr
, unsigned regno
, unsigned data
) /*fold00*/
1754 int command
= (0x5002 << 16) | (devaddr
<< 23) | (regno
<< 18) | data
;
1756 lmc_trace(sc
->lmc_device
, "lmc_mii_writereg in");
1765 if (command
& (1 << i
))
1770 LMC_CSR_WRITE (sc
, csr_9
, datav
);
1772 /* __SLOW_DOWN_IO; */
1773 LMC_CSR_WRITE (sc
, csr_9
, (datav
| 0x10000));
1775 /* __SLOW_DOWN_IO; */
1782 LMC_CSR_WRITE (sc
, csr_9
, 0x40000);
1784 /* __SLOW_DOWN_IO; */
1785 LMC_CSR_WRITE (sc
, csr_9
, 0x50000);
1787 /* __SLOW_DOWN_IO; */
1791 lmc_trace(sc
->lmc_device
, "lmc_mii_writereg out");
1794 static void lmc_softreset (lmc_softc_t
* const sc
) /*fold00*/
1798 lmc_trace(sc
->lmc_device
, "lmc_softreset in");
1800 /* Initialize the receive rings and buffers. */
1802 sc
->lmc_next_rx
= 0;
1803 sc
->lmc_next_tx
= 0;
1804 sc
->lmc_taint_rx
= 0;
1805 sc
->lmc_taint_tx
= 0;
1808 * Setup each one of the receiver buffers
1809 * allocate an skbuff for each one, setup the descriptor table
1810 * and point each buffer at the next one
1813 for (i
= 0; i
< LMC_RXDESCS
; i
++)
1815 struct sk_buff
*skb
;
1817 if (sc
->lmc_rxq
[i
] == NULL
)
1819 skb
= dev_alloc_skb (LMC_PKT_BUF_SZ
+ 2);
1821 printk(KERN_WARNING
"%s: Failed to allocate receiver ring, will try again\n", sc
->name
);
1822 sc
->failed_ring
= 1;
1826 sc
->lmc_rxq
[i
] = skb
;
1831 skb
= sc
->lmc_rxq
[i
];
1834 skb
->dev
= sc
->lmc_device
;
1836 /* owned by 21140 */
1837 sc
->lmc_rxring
[i
].status
= 0x80000000;
1839 /* used to be PKT_BUF_SZ now uses skb since we lose some to head room */
1840 sc
->lmc_rxring
[i
].length
= skb_tailroom(skb
);
1842 /* use to be tail which is dumb since you're thinking why write
1843 * to the end of the packj,et but since there's nothing there tail == data
1845 sc
->lmc_rxring
[i
].buffer1
= virt_to_bus (skb
->data
);
1847 /* This is fair since the structure is static and we have the next address */
1848 sc
->lmc_rxring
[i
].buffer2
= virt_to_bus (&sc
->lmc_rxring
[i
+ 1]);
1856 sc
->lmc_rxring
[i
- 1].length
|= 0x02000000; /* Set end of buffers flag */
1857 sc
->lmc_rxring
[i
- 1].buffer2
= virt_to_bus(&sc
->lmc_rxring
[0]); /* Point back to the start */
1859 LMC_CSR_WRITE (sc
, csr_rxlist
, virt_to_bus (sc
->lmc_rxring
)); /* write base address */
1861 /* Initialize the transmit rings and buffers */
1862 for (i
= 0; i
< LMC_TXDESCS
; i
++)
1864 if (sc
->lmc_txq
[i
] != NULL
){ /* have buffer */
1865 dev_kfree_skb(sc
->lmc_txq
[i
]); /* free it */
1866 sc
->lmc_device
->stats
.tx_dropped
++; /* We just dropped a packet */
1868 sc
->lmc_txq
[i
] = NULL
;
1869 sc
->lmc_txring
[i
].status
= 0x00000000;
1870 sc
->lmc_txring
[i
].buffer2
= virt_to_bus (&sc
->lmc_txring
[i
+ 1]);
1872 sc
->lmc_txring
[i
- 1].buffer2
= virt_to_bus (&sc
->lmc_txring
[0]);
1873 LMC_CSR_WRITE (sc
, csr_txlist
, virt_to_bus (sc
->lmc_txring
));
1875 lmc_trace(sc
->lmc_device
, "lmc_softreset out");
1878 void lmc_gpio_mkinput(lmc_softc_t
* const sc
, u32 bits
) /*fold00*/
1880 lmc_trace(sc
->lmc_device
, "lmc_gpio_mkinput in");
1881 sc
->lmc_gpio_io
&= ~bits
;
1882 LMC_CSR_WRITE(sc
, csr_gp
, TULIP_GP_PINSET
| (sc
->lmc_gpio_io
));
1883 lmc_trace(sc
->lmc_device
, "lmc_gpio_mkinput out");
1886 void lmc_gpio_mkoutput(lmc_softc_t
* const sc
, u32 bits
) /*fold00*/
1888 lmc_trace(sc
->lmc_device
, "lmc_gpio_mkoutput in");
1889 sc
->lmc_gpio_io
|= bits
;
1890 LMC_CSR_WRITE(sc
, csr_gp
, TULIP_GP_PINSET
| (sc
->lmc_gpio_io
));
1891 lmc_trace(sc
->lmc_device
, "lmc_gpio_mkoutput out");
1894 void lmc_led_on(lmc_softc_t
* const sc
, u32 led
) /*fold00*/
1896 lmc_trace(sc
->lmc_device
, "lmc_led_on in");
1897 if((~sc
->lmc_miireg16
) & led
){ /* Already on! */
1898 lmc_trace(sc
->lmc_device
, "lmc_led_on aon out");
1902 sc
->lmc_miireg16
&= ~led
;
1903 lmc_mii_writereg(sc
, 0, 16, sc
->lmc_miireg16
);
1904 lmc_trace(sc
->lmc_device
, "lmc_led_on out");
1907 void lmc_led_off(lmc_softc_t
* const sc
, u32 led
) /*fold00*/
1909 lmc_trace(sc
->lmc_device
, "lmc_led_off in");
1910 if(sc
->lmc_miireg16
& led
){ /* Already set don't do anything */
1911 lmc_trace(sc
->lmc_device
, "lmc_led_off aoff out");
1915 sc
->lmc_miireg16
|= led
;
1916 lmc_mii_writereg(sc
, 0, 16, sc
->lmc_miireg16
);
1917 lmc_trace(sc
->lmc_device
, "lmc_led_off out");
1920 static void lmc_reset(lmc_softc_t
* const sc
) /*fold00*/
1922 lmc_trace(sc
->lmc_device
, "lmc_reset in");
1923 sc
->lmc_miireg16
|= LMC_MII16_FIFO_RESET
;
1924 lmc_mii_writereg(sc
, 0, 16, sc
->lmc_miireg16
);
1926 sc
->lmc_miireg16
&= ~LMC_MII16_FIFO_RESET
;
1927 lmc_mii_writereg(sc
, 0, 16, sc
->lmc_miireg16
);
1930 * make some of the GPIO pins be outputs
1932 lmc_gpio_mkoutput(sc
, LMC_GEP_RESET
);
1935 * RESET low to force state reset. This also forces
1936 * the transmitter clock to be internal, but we expect to reset
1937 * that later anyway.
1939 sc
->lmc_gpio
&= ~(LMC_GEP_RESET
);
1940 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
1943 * hold for more than 10 microseconds
1948 * stop driving Xilinx-related signals
1950 lmc_gpio_mkinput(sc
, LMC_GEP_RESET
);
1953 * Call media specific init routine
1955 sc
->lmc_media
->init(sc
);
1957 sc
->extra_stats
.resetCount
++;
1958 lmc_trace(sc
->lmc_device
, "lmc_reset out");
1961 static void lmc_dec_reset(lmc_softc_t
* const sc
) /*fold00*/
1964 lmc_trace(sc
->lmc_device
, "lmc_dec_reset in");
1967 * disable all interrupts
1969 sc
->lmc_intrmask
= 0;
1970 LMC_CSR_WRITE(sc
, csr_intr
, sc
->lmc_intrmask
);
1973 * Reset the chip with a software reset command.
1974 * Wait 10 microseconds (actually 50 PCI cycles but at
1975 * 33MHz that comes to two microseconds but wait a
1976 * bit longer anyways)
1978 LMC_CSR_WRITE(sc
, csr_busmode
, TULIP_BUSMODE_SWRESET
);
1981 sc
->lmc_busmode
= LMC_CSR_READ(sc
, csr_busmode
);
1982 sc
->lmc_busmode
= 0x00100000;
1983 sc
->lmc_busmode
&= ~TULIP_BUSMODE_SWRESET
;
1984 LMC_CSR_WRITE(sc
, csr_busmode
, sc
->lmc_busmode
);
1986 sc
->lmc_cmdmode
= LMC_CSR_READ(sc
, csr_command
);
1990 * no ethernet address in frames we write
1991 * disable padding (txdesc, padding disable)
1992 * ignore runt frames (rdes0 bit 15)
1993 * no receiver watchdog or transmitter jabber timer
1994 * (csr15 bit 0,14 == 1)
1995 * if using 16-bit CRC, turn off CRC (trans desc, crc disable)
1998 sc
->lmc_cmdmode
|= ( TULIP_CMD_PROMISCUOUS
1999 | TULIP_CMD_FULLDUPLEX
2000 | TULIP_CMD_PASSBADPKT
2001 | TULIP_CMD_NOHEARTBEAT
2002 | TULIP_CMD_PORTSELECT
2003 | TULIP_CMD_RECEIVEALL
2004 | TULIP_CMD_MUSTBEONE
2006 sc
->lmc_cmdmode
&= ~( TULIP_CMD_OPERMODE
2007 | TULIP_CMD_THRESHOLDCTL
2008 | TULIP_CMD_STOREFWD
2009 | TULIP_CMD_TXTHRSHLDCTL
2012 LMC_CSR_WRITE(sc
, csr_command
, sc
->lmc_cmdmode
);
2015 * disable receiver watchdog and transmit jabber
2017 val
= LMC_CSR_READ(sc
, csr_sia_general
);
2018 val
|= (TULIP_WATCHDOG_TXDISABLE
| TULIP_WATCHDOG_RXDISABLE
);
2019 LMC_CSR_WRITE(sc
, csr_sia_general
, val
);
2021 lmc_trace(sc
->lmc_device
, "lmc_dec_reset out");
2024 static void lmc_initcsrs(lmc_softc_t
* const sc
, lmc_csrptr_t csr_base
, /*fold00*/
2027 lmc_trace(sc
->lmc_device
, "lmc_initcsrs in");
2028 sc
->lmc_csrs
.csr_busmode
= csr_base
+ 0 * csr_size
;
2029 sc
->lmc_csrs
.csr_txpoll
= csr_base
+ 1 * csr_size
;
2030 sc
->lmc_csrs
.csr_rxpoll
= csr_base
+ 2 * csr_size
;
2031 sc
->lmc_csrs
.csr_rxlist
= csr_base
+ 3 * csr_size
;
2032 sc
->lmc_csrs
.csr_txlist
= csr_base
+ 4 * csr_size
;
2033 sc
->lmc_csrs
.csr_status
= csr_base
+ 5 * csr_size
;
2034 sc
->lmc_csrs
.csr_command
= csr_base
+ 6 * csr_size
;
2035 sc
->lmc_csrs
.csr_intr
= csr_base
+ 7 * csr_size
;
2036 sc
->lmc_csrs
.csr_missed_frames
= csr_base
+ 8 * csr_size
;
2037 sc
->lmc_csrs
.csr_9
= csr_base
+ 9 * csr_size
;
2038 sc
->lmc_csrs
.csr_10
= csr_base
+ 10 * csr_size
;
2039 sc
->lmc_csrs
.csr_11
= csr_base
+ 11 * csr_size
;
2040 sc
->lmc_csrs
.csr_12
= csr_base
+ 12 * csr_size
;
2041 sc
->lmc_csrs
.csr_13
= csr_base
+ 13 * csr_size
;
2042 sc
->lmc_csrs
.csr_14
= csr_base
+ 14 * csr_size
;
2043 sc
->lmc_csrs
.csr_15
= csr_base
+ 15 * csr_size
;
2044 lmc_trace(sc
->lmc_device
, "lmc_initcsrs out");
2047 static void lmc_driver_timeout(struct net_device
*dev
, unsigned int txqueue
)
2049 lmc_softc_t
*sc
= dev_to_sc(dev
);
2051 unsigned long flags
;
2053 lmc_trace(dev
, "lmc_driver_timeout in");
2055 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
2057 printk("%s: Xmitter busy|\n", dev
->name
);
2059 sc
->extra_stats
.tx_tbusy_calls
++;
2060 if (jiffies
- dev_trans_start(dev
) < TX_TIMEOUT
)
2064 * Chip seems to have locked up
2066 * This whips out all our decriptor
2067 * table and starts from scartch
2070 LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO
,
2071 LMC_CSR_READ (sc
, csr_status
),
2072 sc
->extra_stats
.tx_ProcTimeout
);
2074 lmc_running_reset (dev
);
2076 LMC_EVENT_LOG(LMC_EVENT_RESET1
, LMC_CSR_READ (sc
, csr_status
), 0);
2077 LMC_EVENT_LOG(LMC_EVENT_RESET2
,
2078 lmc_mii_readreg (sc
, 0, 16),
2079 lmc_mii_readreg (sc
, 0, 17));
2081 /* restart the tx processes */
2082 csr6
= LMC_CSR_READ (sc
, csr_command
);
2083 LMC_CSR_WRITE (sc
, csr_command
, csr6
| 0x0002);
2084 LMC_CSR_WRITE (sc
, csr_command
, csr6
| 0x2002);
2086 /* immediate transmit */
2087 LMC_CSR_WRITE (sc
, csr_txpoll
, 0);
2089 sc
->lmc_device
->stats
.tx_errors
++;
2090 sc
->extra_stats
.tx_ProcTimeout
++; /* -baz */
2092 netif_trans_update(dev
); /* prevent tx timeout */
2096 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
2098 lmc_trace(dev
, "lmc_driver_timeout out");