2 * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
3 * All rights reserved. www.lanmedia.com
4 * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
6 * This code is written by:
7 * Andrew Stanley-Jones (asj@cban.com)
8 * Rob Braun (bbraun@vix.com),
9 * Michael Graff (explorer@vix.com) and
10 * Matt Thomas (matt@3am-software.com).
17 * This software may be used and distributed according to the terms
18 * of the GNU General Public License version 2, incorporated herein by reference.
20 * Driver for the LanMedia LMC5200, LMC5245, LMC1000, LMC1200 cards.
22 * To control link specific options lmcctl is required.
23 * It can be obtained from ftp.lanmedia.com.
26 * Linux uses the device struct lmc_private to pass private information
29 * The initialization portion of this driver (the lmc_reset() and the
30 * lmc_dec_reset() functions, as well as the led controls and the
31 * lmc_initcsrs() functions.
33 * The watchdog function runs every second and checks to see if
34 * we still have link, and that the timing source is what we expected
35 * it to be. If link is lost, the interface is marked down, and
36 * we no longer can transmit.
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/string.h>
43 #include <linux/timer.h>
44 #include <linux/ptrace.h>
45 #include <linux/errno.h>
46 #include <linux/ioport.h>
47 #include <linux/slab.h>
48 #include <linux/interrupt.h>
49 #include <linux/pci.h>
50 #include <linux/delay.h>
51 #include <linux/hdlc.h>
53 #include <linux/if_arp.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/inet.h>
58 #include <linux/bitops.h>
59 #include <asm/processor.h> /* Processor type for cache alignment. */
62 #include <linux/uaccess.h>
63 //#include <asm/spinlock.h>
65 #define DRIVER_MAJOR_VERSION 1
66 #define DRIVER_MINOR_VERSION 34
67 #define DRIVER_SUB_VERSION 0
69 #define DRIVER_VERSION ((DRIVER_MAJOR_VERSION << 8) + DRIVER_MINOR_VERSION)
73 #include "lmc_ioctl.h"
74 #include "lmc_debug.h"
75 #include "lmc_proto.h"
77 static int LMC_PKT_BUF_SZ
= 1542;
79 static const struct pci_device_id lmc_pci_tbl
[] = {
80 { PCI_VENDOR_ID_DEC
, PCI_DEVICE_ID_DEC_TULIP_FAST
,
81 PCI_VENDOR_ID_LMC
, PCI_ANY_ID
},
82 { PCI_VENDOR_ID_DEC
, PCI_DEVICE_ID_DEC_TULIP_FAST
,
83 PCI_ANY_ID
, PCI_VENDOR_ID_LMC
},
87 MODULE_DEVICE_TABLE(pci
, lmc_pci_tbl
);
88 MODULE_LICENSE("GPL v2");
91 static netdev_tx_t
lmc_start_xmit(struct sk_buff
*skb
,
92 struct net_device
*dev
);
93 static int lmc_rx (struct net_device
*dev
);
94 static int lmc_open(struct net_device
*dev
);
95 static int lmc_close(struct net_device
*dev
);
96 static struct net_device_stats
*lmc_get_stats(struct net_device
*dev
);
97 static irqreturn_t
lmc_interrupt(int irq
, void *dev_instance
);
98 static void lmc_initcsrs(lmc_softc_t
* const sc
, lmc_csrptr_t csr_base
, size_t csr_size
);
99 static void lmc_softreset(lmc_softc_t
* const);
100 static void lmc_running_reset(struct net_device
*dev
);
101 static int lmc_ifdown(struct net_device
* const);
102 static void lmc_watchdog(struct timer_list
*t
);
103 static void lmc_reset(lmc_softc_t
* const sc
);
104 static void lmc_dec_reset(lmc_softc_t
* const sc
);
105 static void lmc_driver_timeout(struct net_device
*dev
);
108 * linux reserves 16 device specific IOCTLs. We call them
109 * LMCIOC* to control various bits of our world.
111 int lmc_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
) /*fold00*/
113 lmc_softc_t
*sc
= dev_to_sc(dev
);
115 int ret
= -EOPNOTSUPP
;
119 lmc_trace(dev
, "lmc_ioctl in");
122 * Most functions mess with the structure
123 * Disable interrupts while we do the polling
128 * Return current driver state. Since we keep this up
129 * To date internally, just copy this out to the user.
131 case LMCIOCGINFO
: /*fold01*/
132 if (copy_to_user(ifr
->ifr_data
, &sc
->ictl
, sizeof(lmc_ctl_t
)))
138 case LMCIOCSINFO
: /*fold01*/
139 if (!capable(CAP_NET_ADMIN
)) {
144 if(dev
->flags
& IFF_UP
){
149 if (copy_from_user(&ctl
, ifr
->ifr_data
, sizeof(lmc_ctl_t
))) {
154 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
155 sc
->lmc_media
->set_status (sc
, &ctl
);
157 if(ctl
.crc_length
!= sc
->ictl
.crc_length
) {
158 sc
->lmc_media
->set_crc_length(sc
, ctl
.crc_length
);
159 if (sc
->ictl
.crc_length
== LMC_CTL_CRC_LENGTH_16
)
160 sc
->TxDescriptControlInit
|= LMC_TDES_ADD_CRC_DISABLE
;
162 sc
->TxDescriptControlInit
&= ~LMC_TDES_ADD_CRC_DISABLE
;
164 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
169 case LMCIOCIFTYPE
: /*fold01*/
171 u16 old_type
= sc
->if_type
;
174 if (!capable(CAP_NET_ADMIN
)) {
179 if (copy_from_user(&new_type
, ifr
->ifr_data
, sizeof(u16
))) {
185 if (new_type
== old_type
)
188 break; /* no change */
191 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
194 sc
->if_type
= new_type
;
195 lmc_proto_attach(sc
);
196 ret
= lmc_proto_open(sc
);
197 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
201 case LMCIOCGETXINFO
: /*fold01*/
202 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
203 sc
->lmc_xinfo
.Magic0
= 0xBEEFCAFE;
205 sc
->lmc_xinfo
.PciCardType
= sc
->lmc_cardtype
;
206 sc
->lmc_xinfo
.PciSlotNumber
= 0;
207 sc
->lmc_xinfo
.DriverMajorVersion
= DRIVER_MAJOR_VERSION
;
208 sc
->lmc_xinfo
.DriverMinorVersion
= DRIVER_MINOR_VERSION
;
209 sc
->lmc_xinfo
.DriverSubVersion
= DRIVER_SUB_VERSION
;
210 sc
->lmc_xinfo
.XilinxRevisionNumber
=
211 lmc_mii_readreg (sc
, 0, 3) & 0xf;
212 sc
->lmc_xinfo
.MaxFrameSize
= LMC_PKT_BUF_SZ
;
213 sc
->lmc_xinfo
.link_status
= sc
->lmc_media
->get_link_status (sc
);
214 sc
->lmc_xinfo
.mii_reg16
= lmc_mii_readreg (sc
, 0, 16);
215 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
217 sc
->lmc_xinfo
.Magic1
= 0xDEADBEEF;
219 if (copy_to_user(ifr
->ifr_data
, &sc
->lmc_xinfo
,
220 sizeof(struct lmc_xinfo
)))
227 case LMCIOCGETLMCSTATS
:
228 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
229 if (sc
->lmc_cardtype
== LMC_CARDTYPE_T1
) {
230 lmc_mii_writereg(sc
, 0, 17, T1FRAMER_FERR_LSB
);
231 sc
->extra_stats
.framingBitErrorCount
+=
232 lmc_mii_readreg(sc
, 0, 18) & 0xff;
233 lmc_mii_writereg(sc
, 0, 17, T1FRAMER_FERR_MSB
);
234 sc
->extra_stats
.framingBitErrorCount
+=
235 (lmc_mii_readreg(sc
, 0, 18) & 0xff) << 8;
236 lmc_mii_writereg(sc
, 0, 17, T1FRAMER_LCV_LSB
);
237 sc
->extra_stats
.lineCodeViolationCount
+=
238 lmc_mii_readreg(sc
, 0, 18) & 0xff;
239 lmc_mii_writereg(sc
, 0, 17, T1FRAMER_LCV_MSB
);
240 sc
->extra_stats
.lineCodeViolationCount
+=
241 (lmc_mii_readreg(sc
, 0, 18) & 0xff) << 8;
242 lmc_mii_writereg(sc
, 0, 17, T1FRAMER_AERR
);
243 regVal
= lmc_mii_readreg(sc
, 0, 18) & 0xff;
245 sc
->extra_stats
.lossOfFrameCount
+=
246 (regVal
& T1FRAMER_LOF_MASK
) >> 4;
247 sc
->extra_stats
.changeOfFrameAlignmentCount
+=
248 (regVal
& T1FRAMER_COFA_MASK
) >> 2;
249 sc
->extra_stats
.severelyErroredFrameCount
+=
250 regVal
& T1FRAMER_SEF_MASK
;
252 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
253 if (copy_to_user(ifr
->ifr_data
, &sc
->lmc_device
->stats
,
254 sizeof(sc
->lmc_device
->stats
)) ||
255 copy_to_user(ifr
->ifr_data
+ sizeof(sc
->lmc_device
->stats
),
256 &sc
->extra_stats
, sizeof(sc
->extra_stats
)))
262 case LMCIOCCLEARLMCSTATS
:
263 if (!capable(CAP_NET_ADMIN
)) {
268 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
269 memset(&sc
->lmc_device
->stats
, 0, sizeof(sc
->lmc_device
->stats
));
270 memset(&sc
->extra_stats
, 0, sizeof(sc
->extra_stats
));
271 sc
->extra_stats
.check
= STATCHECK
;
272 sc
->extra_stats
.version_size
= (DRIVER_VERSION
<< 16) +
273 sizeof(sc
->lmc_device
->stats
) + sizeof(sc
->extra_stats
);
274 sc
->extra_stats
.lmc_cardtype
= sc
->lmc_cardtype
;
275 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
279 case LMCIOCSETCIRCUIT
: /*fold01*/
280 if (!capable(CAP_NET_ADMIN
)){
285 if(dev
->flags
& IFF_UP
){
290 if (copy_from_user(&ctl
, ifr
->ifr_data
, sizeof(lmc_ctl_t
))) {
294 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
295 sc
->lmc_media
->set_circuit_type(sc
, ctl
.circuit_type
);
296 sc
->ictl
.circuit_type
= ctl
.circuit_type
;
297 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
302 case LMCIOCRESET
: /*fold01*/
303 if (!capable(CAP_NET_ADMIN
)){
308 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
309 /* Reset driver and bring back to current state */
310 printk (" REG16 before reset +%04x\n", lmc_mii_readreg (sc
, 0, 16));
311 lmc_running_reset (dev
);
312 printk (" REG16 after reset +%04x\n", lmc_mii_readreg (sc
, 0, 16));
314 LMC_EVENT_LOG(LMC_EVENT_FORCEDRESET
, LMC_CSR_READ (sc
, csr_status
), lmc_mii_readreg (sc
, 0, 16));
315 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
321 case LMCIOCDUMPEVENTLOG
:
322 if (copy_to_user(ifr
->ifr_data
, &lmcEventLogIndex
, sizeof(u32
))) {
326 if (copy_to_user(ifr
->ifr_data
+ sizeof(u32
), lmcEventLogBuf
,
327 sizeof(lmcEventLogBuf
)))
333 #endif /* end ifdef _DBG_EVENTLOG */
334 case LMCIOCT1CONTROL
: /*fold01*/
335 if (sc
->lmc_cardtype
!= LMC_CARDTYPE_T1
){
340 case LMCIOCXILINX
: /*fold01*/
342 struct lmc_xilinx_control xc
; /*fold02*/
344 if (!capable(CAP_NET_ADMIN
)){
350 * Stop the xwitter whlie we restart the hardware
352 netif_stop_queue(dev
);
354 if (copy_from_user(&xc
, ifr
->ifr_data
, sizeof(struct lmc_xilinx_control
))) {
359 case lmc_xilinx_reset
: /*fold02*/
362 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
363 mii
= lmc_mii_readreg (sc
, 0, 16);
366 * Make all of them 0 and make input
368 lmc_gpio_mkinput(sc
, 0xff);
371 * make the reset output
373 lmc_gpio_mkoutput(sc
, LMC_GEP_RESET
);
376 * RESET low to force configuration. This also forces
377 * the transmitter clock to be internal, but we expect to reset
381 sc
->lmc_gpio
&= ~LMC_GEP_RESET
;
382 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
386 * hold for more than 10 microseconds
390 sc
->lmc_gpio
|= LMC_GEP_RESET
;
391 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
395 * stop driving Xilinx-related signals
397 lmc_gpio_mkinput(sc
, 0xff);
399 /* Reset the frammer hardware */
400 sc
->lmc_media
->set_link_status (sc
, 1);
401 sc
->lmc_media
->set_status (sc
, NULL
);
402 // lmc_softreset(sc);
406 for(i
= 0; i
< 5; i
++){
407 lmc_led_on(sc
, LMC_DS3_LED0
);
409 lmc_led_off(sc
, LMC_DS3_LED0
);
410 lmc_led_on(sc
, LMC_DS3_LED1
);
412 lmc_led_off(sc
, LMC_DS3_LED1
);
413 lmc_led_on(sc
, LMC_DS3_LED3
);
415 lmc_led_off(sc
, LMC_DS3_LED3
);
416 lmc_led_on(sc
, LMC_DS3_LED2
);
418 lmc_led_off(sc
, LMC_DS3_LED2
);
421 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
430 case lmc_xilinx_load_prom
: /*fold02*/
433 int timeout
= 500000;
434 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
435 mii
= lmc_mii_readreg (sc
, 0, 16);
438 * Make all of them 0 and make input
440 lmc_gpio_mkinput(sc
, 0xff);
443 * make the reset output
445 lmc_gpio_mkoutput(sc
, LMC_GEP_DP
| LMC_GEP_RESET
);
448 * RESET low to force configuration. This also forces
449 * the transmitter clock to be internal, but we expect to reset
453 sc
->lmc_gpio
&= ~(LMC_GEP_RESET
| LMC_GEP_DP
);
454 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
458 * hold for more than 10 microseconds
462 sc
->lmc_gpio
|= LMC_GEP_DP
| LMC_GEP_RESET
;
463 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
466 * busy wait for the chip to reset
468 while( (LMC_CSR_READ(sc
, csr_gp
) & LMC_GEP_INIT
) == 0 &&
474 * stop driving Xilinx-related signals
476 lmc_gpio_mkinput(sc
, 0xff);
477 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
486 case lmc_xilinx_load
: /*fold02*/
490 int timeout
= 500000;
497 data
= memdup_user(xc
.data
, xc
.len
);
503 printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev
->name
, xc
.len
, xc
.data
, data
);
505 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
506 lmc_gpio_mkinput(sc
, 0xff);
509 * Clear the Xilinx and start prgramming from the DEC
520 sc
->lmc_gpio
&= ~LMC_GEP_DP
;
521 sc
->lmc_gpio
&= ~LMC_GEP_RESET
;
522 sc
->lmc_gpio
|= LMC_GEP_MODE
;
523 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
525 lmc_gpio_mkoutput(sc
, LMC_GEP_MODE
| LMC_GEP_DP
| LMC_GEP_RESET
);
528 * Wait at least 10 us 20 to be safe
533 * Clear reset and activate programming lines
540 lmc_gpio_mkinput(sc
, LMC_GEP_DP
| LMC_GEP_RESET
);
543 * Set LOAD, DATA, Clock to 1
546 sc
->lmc_gpio
|= LMC_GEP_MODE
;
547 sc
->lmc_gpio
|= LMC_GEP_DATA
;
548 sc
->lmc_gpio
|= LMC_GEP_CLK
;
549 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
551 lmc_gpio_mkoutput(sc
, LMC_GEP_DATA
| LMC_GEP_CLK
| LMC_GEP_MODE
);
554 * busy wait for the chip to reset
556 while( (LMC_CSR_READ(sc
, csr_gp
) & LMC_GEP_INIT
) == 0 &&
560 printk(KERN_DEBUG
"%s: Waited %d for the Xilinx to clear it's memory\n", dev
->name
, 500000-timeout
);
562 for(pos
= 0; pos
< xc
.len
; pos
++){
565 sc
->lmc_gpio
&= ~LMC_GEP_DATA
; /* Data is 0 */
568 sc
->lmc_gpio
|= LMC_GEP_DATA
; /* Data is 1 */
571 printk(KERN_WARNING
"%s Bad data in xilinx programming data at %d, got %d wanted 0 or 1\n", dev
->name
, pos
, data
[pos
]);
572 sc
->lmc_gpio
|= LMC_GEP_DATA
; /* Assume it's 1 */
574 sc
->lmc_gpio
&= ~LMC_GEP_CLK
; /* Clock to zero */
575 sc
->lmc_gpio
|= LMC_GEP_MODE
;
576 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
579 sc
->lmc_gpio
|= LMC_GEP_CLK
; /* Put the clack back to one */
580 sc
->lmc_gpio
|= LMC_GEP_MODE
;
581 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
584 if((LMC_CSR_READ(sc
, csr_gp
) & LMC_GEP_INIT
) == 0){
585 printk(KERN_WARNING
"%s: Reprogramming FAILED. Needs to be reprogrammed. (corrupted data)\n", dev
->name
);
587 else if((LMC_CSR_READ(sc
, csr_gp
) & LMC_GEP_DP
) == 0){
588 printk(KERN_WARNING
"%s: Reprogramming FAILED. Needs to be reprogrammed. (done)\n", dev
->name
);
591 printk(KERN_DEBUG
"%s: Done reprogramming Xilinx, %d bits, good luck!\n", dev
->name
, pos
);
594 lmc_gpio_mkinput(sc
, 0xff);
596 sc
->lmc_miireg16
|= LMC_MII16_FIFO_RESET
;
597 lmc_mii_writereg(sc
, 0, 16, sc
->lmc_miireg16
);
599 sc
->lmc_miireg16
&= ~LMC_MII16_FIFO_RESET
;
600 lmc_mii_writereg(sc
, 0, 16, sc
->lmc_miireg16
);
601 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
614 netif_wake_queue(dev
);
620 /* If we don't know what to do, give the protocol a shot. */
621 ret
= lmc_proto_ioctl (sc
, ifr
, cmd
);
625 lmc_trace(dev
, "lmc_ioctl out");
631 /* the watchdog process that cruises around */
632 static void lmc_watchdog(struct timer_list
*t
) /*fold00*/
634 lmc_softc_t
*sc
= from_timer(sc
, t
, timer
);
635 struct net_device
*dev
= sc
->lmc_device
;
640 lmc_trace(dev
, "lmc_watchdog in");
642 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
644 if(sc
->check
!= 0xBEAFCAFE){
645 printk("LMC: Corrupt net_device struct, breaking out\n");
646 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
651 /* Make sure the tx jabber and rx watchdog are off,
652 * and the transmit and receive processes are running.
655 LMC_CSR_WRITE (sc
, csr_15
, 0x00000011);
656 sc
->lmc_cmdmode
|= TULIP_CMD_TXRUN
| TULIP_CMD_RXRUN
;
657 LMC_CSR_WRITE (sc
, csr_command
, sc
->lmc_cmdmode
);
662 LMC_EVENT_LOG(LMC_EVENT_WATCHDOG
, LMC_CSR_READ (sc
, csr_status
), lmc_mii_readreg (sc
, 0, 16));
664 /* --- begin time out check -----------------------------------
665 * check for a transmit interrupt timeout
666 * Has the packet xmt vs xmt serviced threshold been exceeded */
667 if (sc
->lmc_taint_tx
== sc
->lastlmc_taint_tx
&&
668 sc
->lmc_device
->stats
.tx_packets
> sc
->lasttx_packets
&&
669 sc
->tx_TimeoutInd
== 0)
672 /* wait for the watchdog to come around again */
673 sc
->tx_TimeoutInd
= 1;
675 else if (sc
->lmc_taint_tx
== sc
->lastlmc_taint_tx
&&
676 sc
->lmc_device
->stats
.tx_packets
> sc
->lasttx_packets
&&
680 LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO
, LMC_CSR_READ (sc
, csr_status
), 0);
682 sc
->tx_TimeoutDisplay
= 1;
683 sc
->extra_stats
.tx_TimeoutCnt
++;
685 /* DEC chip is stuck, hit it with a RESET!!!! */
686 lmc_running_reset (dev
);
689 /* look at receive & transmit process state to make sure they are running */
690 LMC_EVENT_LOG(LMC_EVENT_RESET1
, LMC_CSR_READ (sc
, csr_status
), 0);
692 /* look at: DSR - 02 for Reg 16
698 LMC_EVENT_LOG(LMC_EVENT_RESET2
, lmc_mii_readreg (sc
, 0, 16), lmc_mii_readreg (sc
, 0, 17));
700 /* reset the transmit timeout detection flag */
701 sc
->tx_TimeoutInd
= 0;
702 sc
->lastlmc_taint_tx
= sc
->lmc_taint_tx
;
703 sc
->lasttx_packets
= sc
->lmc_device
->stats
.tx_packets
;
705 sc
->tx_TimeoutInd
= 0;
706 sc
->lastlmc_taint_tx
= sc
->lmc_taint_tx
;
707 sc
->lasttx_packets
= sc
->lmc_device
->stats
.tx_packets
;
710 /* --- end time out check ----------------------------------- */
713 link_status
= sc
->lmc_media
->get_link_status (sc
);
716 * hardware level link lost, but the interface is marked as up.
719 if ((link_status
== 0) && (sc
->last_link_status
!= 0)) {
720 printk(KERN_WARNING
"%s: hardware/physical link down\n", dev
->name
);
721 sc
->last_link_status
= 0;
722 /* lmc_reset (sc); Why reset??? The link can go down ok */
724 /* Inform the world that link has been lost */
725 netif_carrier_off(dev
);
729 * hardware link is up, but the interface is marked as down.
730 * Bring it back up again.
732 if (link_status
!= 0 && sc
->last_link_status
== 0) {
733 printk(KERN_WARNING
"%s: hardware/physical link up\n", dev
->name
);
734 sc
->last_link_status
= 1;
735 /* lmc_reset (sc); Again why reset??? */
737 netif_carrier_on(dev
);
740 /* Call media specific watchdog functions */
741 sc
->lmc_media
->watchdog(sc
);
744 * Poke the transmitter to make sure it
745 * never stops, even if we run out of mem
747 LMC_CSR_WRITE(sc
, csr_rxpoll
, 0);
750 * Check for code that failed
751 * and try and fix it as appropriate
753 if(sc
->failed_ring
== 1){
755 * Failed to setup the recv/xmit rin
761 if(sc
->failed_recv_alloc
== 1){
763 * We failed to alloc mem in the
764 * interrupt handler, go through the rings
767 sc
->failed_recv_alloc
= 0;
773 * remember the timer value
777 ticks
= LMC_CSR_READ (sc
, csr_gp_timer
);
778 LMC_CSR_WRITE (sc
, csr_gp_timer
, 0xffffffffUL
);
779 sc
->ictl
.ticks
= 0x0000ffff - (ticks
& 0x0000ffff);
782 * restart this timer.
784 sc
->timer
.expires
= jiffies
+ (HZ
);
785 add_timer (&sc
->timer
);
787 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
789 lmc_trace(dev
, "lmc_watchdog out");
793 static int lmc_attach(struct net_device
*dev
, unsigned short encoding
,
794 unsigned short parity
)
796 if (encoding
== ENCODING_NRZ
&& parity
== PARITY_CRC16_PR1_CCITT
)
801 static const struct net_device_ops lmc_ops
= {
802 .ndo_open
= lmc_open
,
803 .ndo_stop
= lmc_close
,
804 .ndo_start_xmit
= hdlc_start_xmit
,
805 .ndo_do_ioctl
= lmc_ioctl
,
806 .ndo_tx_timeout
= lmc_driver_timeout
,
807 .ndo_get_stats
= lmc_get_stats
,
810 static int lmc_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
813 struct net_device
*dev
;
817 static int cards_found
;
819 /* lmc_trace(dev, "lmc_init_one in"); */
821 err
= pcim_enable_device(pdev
);
823 printk(KERN_ERR
"lmc: pci enable failed: %d\n", err
);
827 err
= pci_request_regions(pdev
, "lmc");
829 printk(KERN_ERR
"lmc: pci_request_region failed\n");
834 * Allocate our own device structure
836 sc
= devm_kzalloc(&pdev
->dev
, sizeof(lmc_softc_t
), GFP_KERNEL
);
840 dev
= alloc_hdlcdev(sc
);
842 printk(KERN_ERR
"lmc:alloc_netdev for device failed\n");
847 dev
->type
= ARPHRD_HDLC
;
848 dev_to_hdlc(dev
)->xmit
= lmc_start_xmit
;
849 dev_to_hdlc(dev
)->attach
= lmc_attach
;
850 dev
->netdev_ops
= &lmc_ops
;
851 dev
->watchdog_timeo
= HZ
; /* 1 second */
852 dev
->tx_queue_len
= 100;
853 sc
->lmc_device
= dev
;
854 sc
->name
= dev
->name
;
855 sc
->if_type
= LMC_PPP
;
856 sc
->check
= 0xBEAFCAFE;
857 dev
->base_addr
= pci_resource_start(pdev
, 0);
858 dev
->irq
= pdev
->irq
;
859 pci_set_drvdata(pdev
, dev
);
860 SET_NETDEV_DEV(dev
, &pdev
->dev
);
863 * This will get the protocol layer ready and do any 1 time init's
864 * Must have a valid sc and dev structure
866 lmc_proto_attach(sc
);
868 /* Init the spin lock so can call it latter */
870 spin_lock_init(&sc
->lmc_lock
);
871 pci_set_master(pdev
);
873 printk(KERN_INFO
"%s: detected at %lx, irq %d\n", dev
->name
,
874 dev
->base_addr
, dev
->irq
);
876 err
= register_hdlc_device(dev
);
878 printk(KERN_ERR
"%s: register_netdev failed.\n", dev
->name
);
883 sc
->lmc_cardtype
= LMC_CARDTYPE_UNKNOWN
;
884 sc
->lmc_timing
= LMC_CTL_CLOCK_SOURCE_EXT
;
888 * Check either the subvendor or the subdevice, some systems reverse
889 * the setting in the bois, seems to be version and arch dependent?
890 * Fix the error, exchange the two values
892 if ((subdevice
= pdev
->subsystem_device
) == PCI_VENDOR_ID_LMC
)
893 subdevice
= pdev
->subsystem_vendor
;
896 case PCI_DEVICE_ID_LMC_HSSI
:
897 printk(KERN_INFO
"%s: LMC HSSI\n", dev
->name
);
898 sc
->lmc_cardtype
= LMC_CARDTYPE_HSSI
;
899 sc
->lmc_media
= &lmc_hssi_media
;
901 case PCI_DEVICE_ID_LMC_DS3
:
902 printk(KERN_INFO
"%s: LMC DS3\n", dev
->name
);
903 sc
->lmc_cardtype
= LMC_CARDTYPE_DS3
;
904 sc
->lmc_media
= &lmc_ds3_media
;
906 case PCI_DEVICE_ID_LMC_SSI
:
907 printk(KERN_INFO
"%s: LMC SSI\n", dev
->name
);
908 sc
->lmc_cardtype
= LMC_CARDTYPE_SSI
;
909 sc
->lmc_media
= &lmc_ssi_media
;
911 case PCI_DEVICE_ID_LMC_T1
:
912 printk(KERN_INFO
"%s: LMC T1\n", dev
->name
);
913 sc
->lmc_cardtype
= LMC_CARDTYPE_T1
;
914 sc
->lmc_media
= &lmc_t1_media
;
917 printk(KERN_WARNING
"%s: LMC UNKNOWN CARD!\n", dev
->name
);
921 lmc_initcsrs (sc
, dev
->base_addr
, 8);
923 lmc_gpio_mkinput (sc
, 0xff);
924 sc
->lmc_gpio
= 0; /* drive no signals yet */
926 sc
->lmc_media
->defaults (sc
);
928 sc
->lmc_media
->set_link_status (sc
, LMC_LINK_UP
);
930 /* verify that the PCI Sub System ID matches the Adapter Model number
931 * from the MII register
933 AdapModelNum
= (lmc_mii_readreg (sc
, 0, 3) & 0x3f0) >> 4;
935 if ((AdapModelNum
!= LMC_ADAP_T1
|| /* detect LMC1200 */
936 subdevice
!= PCI_DEVICE_ID_LMC_T1
) &&
937 (AdapModelNum
!= LMC_ADAP_SSI
|| /* detect LMC1000 */
938 subdevice
!= PCI_DEVICE_ID_LMC_SSI
) &&
939 (AdapModelNum
!= LMC_ADAP_DS3
|| /* detect LMC5245 */
940 subdevice
!= PCI_DEVICE_ID_LMC_DS3
) &&
941 (AdapModelNum
!= LMC_ADAP_HSSI
|| /* detect LMC5200 */
942 subdevice
!= PCI_DEVICE_ID_LMC_HSSI
))
943 printk(KERN_WARNING
"%s: Model number (%d) miscompare for PCI"
944 " Subsystem ID = 0x%04x\n",
945 dev
->name
, AdapModelNum
, subdevice
);
950 LMC_CSR_WRITE (sc
, csr_gp_timer
, 0xFFFFFFFFUL
);
952 sc
->board_idx
= cards_found
++;
953 sc
->extra_stats
.check
= STATCHECK
;
954 sc
->extra_stats
.version_size
= (DRIVER_VERSION
<< 16) +
955 sizeof(sc
->lmc_device
->stats
) + sizeof(sc
->extra_stats
);
956 sc
->extra_stats
.lmc_cardtype
= sc
->lmc_cardtype
;
959 sc
->last_link_status
= 0;
961 lmc_trace(dev
, "lmc_init_one out");
966 * Called from pci when removing module.
968 static void lmc_remove_one(struct pci_dev
*pdev
)
970 struct net_device
*dev
= pci_get_drvdata(pdev
);
973 printk(KERN_DEBUG
"%s: removing...\n", dev
->name
);
974 unregister_hdlc_device(dev
);
979 /* After this is called, packets can be sent.
980 * Does not initialize the addresses
982 static int lmc_open(struct net_device
*dev
)
984 lmc_softc_t
*sc
= dev_to_sc(dev
);
987 lmc_trace(dev
, "lmc_open in");
989 lmc_led_on(sc
, LMC_DS3_LED0
);
994 LMC_EVENT_LOG(LMC_EVENT_RESET1
, LMC_CSR_READ(sc
, csr_status
), 0);
995 LMC_EVENT_LOG(LMC_EVENT_RESET2
, lmc_mii_readreg(sc
, 0, 16),
996 lmc_mii_readreg(sc
, 0, 17));
999 lmc_trace(dev
, "lmc_open lmc_ok out");
1005 /* Since we have to use PCI bus, this should work on x86,alpha,ppc */
1006 if (request_irq (dev
->irq
, lmc_interrupt
, IRQF_SHARED
, dev
->name
, dev
)){
1007 printk(KERN_WARNING
"%s: could not get irq: %d\n", dev
->name
, dev
->irq
);
1008 lmc_trace(dev
, "lmc_open irq failed out");
1013 /* Assert Terminal Active */
1014 sc
->lmc_miireg16
|= LMC_MII16_LED_ALL
;
1015 sc
->lmc_media
->set_link_status (sc
, LMC_LINK_UP
);
1018 * reset to last state.
1020 sc
->lmc_media
->set_status (sc
, NULL
);
1022 /* setup default bits to be used in tulip_desc_t transmit descriptor
1024 sc
->TxDescriptControlInit
= (
1025 LMC_TDES_INTERRUPT_ON_COMPLETION
1026 | LMC_TDES_FIRST_SEGMENT
1027 | LMC_TDES_LAST_SEGMENT
1028 | LMC_TDES_SECOND_ADDR_CHAINED
1029 | LMC_TDES_DISABLE_PADDING
1032 if (sc
->ictl
.crc_length
== LMC_CTL_CRC_LENGTH_16
) {
1033 /* disable 32 bit CRC generated by ASIC */
1034 sc
->TxDescriptControlInit
|= LMC_TDES_ADD_CRC_DISABLE
;
1036 sc
->lmc_media
->set_crc_length(sc
, sc
->ictl
.crc_length
);
1037 /* Acknoledge the Terminal Active and light LEDs */
1039 /* dev->flags |= IFF_UP; */
1041 if ((err
= lmc_proto_open(sc
)) != 0)
1044 netif_start_queue(dev
);
1045 sc
->extra_stats
.tx_tbusy0
++;
1048 * select what interrupts we want to get
1050 sc
->lmc_intrmask
= 0;
1051 /* Should be using the default interrupt mask defined in the .h file. */
1052 sc
->lmc_intrmask
|= (TULIP_STS_NORMALINTR
1055 | TULIP_STS_ABNRMLINTR
1056 | TULIP_STS_SYSERROR
1057 | TULIP_STS_TXSTOPPED
1058 | TULIP_STS_TXUNDERFLOW
1059 | TULIP_STS_RXSTOPPED
1062 LMC_CSR_WRITE (sc
, csr_intr
, sc
->lmc_intrmask
);
1064 sc
->lmc_cmdmode
|= TULIP_CMD_TXRUN
;
1065 sc
->lmc_cmdmode
|= TULIP_CMD_RXRUN
;
1066 LMC_CSR_WRITE (sc
, csr_command
, sc
->lmc_cmdmode
);
1068 sc
->lmc_ok
= 1; /* Run watchdog */
1071 * Set the if up now - pfb
1074 sc
->last_link_status
= 1;
1077 * Setup a timer for the watchdog on probe, and start it running.
1078 * Since lmc_ok == 0, it will be a NOP for now.
1080 timer_setup(&sc
->timer
, lmc_watchdog
, 0);
1081 sc
->timer
.expires
= jiffies
+ HZ
;
1082 add_timer (&sc
->timer
);
1084 lmc_trace(dev
, "lmc_open out");
1089 /* Total reset to compensate for the AdTran DSU doing bad things
1093 static void lmc_running_reset (struct net_device
*dev
) /*fold00*/
1095 lmc_softc_t
*sc
= dev_to_sc(dev
);
1097 lmc_trace(dev
, "lmc_running_reset in");
1099 /* stop interrupts */
1100 /* Clear the interrupt mask */
1101 LMC_CSR_WRITE (sc
, csr_intr
, 0x00000000);
1106 /* sc->lmc_miireg16 |= LMC_MII16_LED_ALL; */
1107 sc
->lmc_media
->set_link_status (sc
, 1);
1108 sc
->lmc_media
->set_status (sc
, NULL
);
1110 netif_wake_queue(dev
);
1113 sc
->extra_stats
.tx_tbusy0
++;
1115 sc
->lmc_intrmask
= TULIP_DEFAULT_INTR_MASK
;
1116 LMC_CSR_WRITE (sc
, csr_intr
, sc
->lmc_intrmask
);
1118 sc
->lmc_cmdmode
|= (TULIP_CMD_TXRUN
| TULIP_CMD_RXRUN
);
1119 LMC_CSR_WRITE (sc
, csr_command
, sc
->lmc_cmdmode
);
1121 lmc_trace(dev
, "lmc_runnin_reset_out");
1125 /* This is what is called when you ifconfig down a device.
1126 * This disables the timer for the watchdog and keepalives,
1127 * and disables the irq for dev.
1129 static int lmc_close(struct net_device
*dev
)
1131 /* not calling release_region() as we should */
1132 lmc_softc_t
*sc
= dev_to_sc(dev
);
1134 lmc_trace(dev
, "lmc_close in");
1137 sc
->lmc_media
->set_link_status (sc
, 0);
1138 del_timer (&sc
->timer
);
1139 lmc_proto_close(sc
);
1142 lmc_trace(dev
, "lmc_close out");
1147 /* Ends the transfer of packets */
1148 /* When the interface goes down, this is called */
1149 static int lmc_ifdown (struct net_device
*dev
) /*fold00*/
1151 lmc_softc_t
*sc
= dev_to_sc(dev
);
1155 lmc_trace(dev
, "lmc_ifdown in");
1157 /* Don't let anything else go on right now */
1159 netif_stop_queue(dev
);
1160 sc
->extra_stats
.tx_tbusy1
++;
1162 /* stop interrupts */
1163 /* Clear the interrupt mask */
1164 LMC_CSR_WRITE (sc
, csr_intr
, 0x00000000);
1166 /* Stop Tx and Rx on the chip */
1167 csr6
= LMC_CSR_READ (sc
, csr_command
);
1168 csr6
&= ~LMC_DEC_ST
; /* Turn off the Transmission bit */
1169 csr6
&= ~LMC_DEC_SR
; /* Turn off the Receive bit */
1170 LMC_CSR_WRITE (sc
, csr_command
, csr6
);
1172 sc
->lmc_device
->stats
.rx_missed_errors
+=
1173 LMC_CSR_READ(sc
, csr_missed_frames
) & 0xffff;
1175 /* release the interrupt */
1176 if(sc
->got_irq
== 1){
1177 free_irq (dev
->irq
, dev
);
1181 /* free skbuffs in the Rx queue */
1182 for (i
= 0; i
< LMC_RXDESCS
; i
++)
1184 struct sk_buff
*skb
= sc
->lmc_rxq
[i
];
1185 sc
->lmc_rxq
[i
] = NULL
;
1186 sc
->lmc_rxring
[i
].status
= 0;
1187 sc
->lmc_rxring
[i
].length
= 0;
1188 sc
->lmc_rxring
[i
].buffer1
= 0xDEADBEEF;
1191 sc
->lmc_rxq
[i
] = NULL
;
1194 for (i
= 0; i
< LMC_TXDESCS
; i
++)
1196 if (sc
->lmc_txq
[i
] != NULL
)
1197 dev_kfree_skb(sc
->lmc_txq
[i
]);
1198 sc
->lmc_txq
[i
] = NULL
;
1201 lmc_led_off (sc
, LMC_MII16_LED_ALL
);
1203 netif_wake_queue(dev
);
1204 sc
->extra_stats
.tx_tbusy0
++;
1206 lmc_trace(dev
, "lmc_ifdown out");
1211 /* Interrupt handling routine. This will take an incoming packet, or clean
1212 * up after a trasmit.
1214 static irqreturn_t
lmc_interrupt (int irq
, void *dev_instance
) /*fold00*/
1216 struct net_device
*dev
= (struct net_device
*) dev_instance
;
1217 lmc_softc_t
*sc
= dev_to_sc(dev
);
1223 int max_work
= LMC_RXDESCS
;
1226 lmc_trace(dev
, "lmc_interrupt in");
1228 spin_lock(&sc
->lmc_lock
);
1231 * Read the csr to find what interrupts we have (if any)
1233 csr
= LMC_CSR_READ (sc
, csr_status
);
1236 * Make sure this is our interrupt
1238 if ( ! (csr
& sc
->lmc_intrmask
)) {
1239 goto lmc_int_fail_out
;
1244 /* always go through this loop at least once */
1245 while (csr
& sc
->lmc_intrmask
) {
1249 * Clear interrupt bits, we handle all case below
1251 LMC_CSR_WRITE (sc
, csr_status
, csr
);
1255 * - Transmit process timed out CSR5<1>
1256 * - Transmit jabber timeout CSR5<3>
1257 * - Transmit underflow CSR5<5>
1258 * - Transmit Receiver buffer unavailable CSR5<7>
1259 * - Receive process stopped CSR5<8>
1260 * - Receive watchdog timeout CSR5<9>
1261 * - Early transmit interrupt CSR5<10>
1263 * Is this really right? Should we do a running reset for jabber?
1264 * (being a WAN card and all)
1266 if (csr
& TULIP_STS_ABNRMLINTR
){
1267 lmc_running_reset (dev
);
1271 if (csr
& TULIP_STS_RXINTR
){
1272 lmc_trace(dev
, "rx interrupt");
1276 if (csr
& (TULIP_STS_TXINTR
| TULIP_STS_TXNOBUF
| TULIP_STS_TXSTOPPED
)) {
1279 /* reset the transmit timeout detection flag -baz */
1280 sc
->extra_stats
.tx_NoCompleteCnt
= 0;
1282 badtx
= sc
->lmc_taint_tx
;
1283 i
= badtx
% LMC_TXDESCS
;
1285 while ((badtx
< sc
->lmc_next_tx
)) {
1286 stat
= sc
->lmc_txring
[i
].status
;
1288 LMC_EVENT_LOG (LMC_EVENT_XMTINT
, stat
,
1289 sc
->lmc_txring
[i
].length
);
1291 * If bit 31 is 1 the tulip owns it break out of the loop
1293 if (stat
& 0x80000000)
1296 n_compl
++ ; /* i.e., have an empty slot in ring */
1298 * If we have no skbuff or have cleared it
1299 * Already continue to the next buffer
1301 if (sc
->lmc_txq
[i
] == NULL
)
1305 * Check the total error summary to look for any errors
1307 if (stat
& 0x8000) {
1308 sc
->lmc_device
->stats
.tx_errors
++;
1310 sc
->lmc_device
->stats
.tx_aborted_errors
++;
1312 sc
->lmc_device
->stats
.tx_carrier_errors
++;
1314 sc
->lmc_device
->stats
.tx_window_errors
++;
1316 sc
->lmc_device
->stats
.tx_fifo_errors
++;
1318 sc
->lmc_device
->stats
.tx_bytes
+= sc
->lmc_txring
[i
].length
& 0x7ff;
1320 sc
->lmc_device
->stats
.tx_packets
++;
1323 // dev_kfree_skb(sc->lmc_txq[i]);
1324 dev_kfree_skb_irq(sc
->lmc_txq
[i
]);
1325 sc
->lmc_txq
[i
] = NULL
;
1328 i
= badtx
% LMC_TXDESCS
;
1331 if (sc
->lmc_next_tx
- badtx
> LMC_TXDESCS
)
1333 printk ("%s: out of sync pointer\n", dev
->name
);
1334 badtx
+= LMC_TXDESCS
;
1336 LMC_EVENT_LOG(LMC_EVENT_TBUSY0
, n_compl
, 0);
1338 netif_wake_queue(dev
);
1339 sc
->extra_stats
.tx_tbusy0
++;
1343 sc
->extra_stats
.dirtyTx
= badtx
;
1344 sc
->extra_stats
.lmc_next_tx
= sc
->lmc_next_tx
;
1345 sc
->extra_stats
.lmc_txfull
= sc
->lmc_txfull
;
1347 sc
->lmc_taint_tx
= badtx
;
1350 * Why was there a break here???
1352 } /* end handle transmit interrupt */
1354 if (csr
& TULIP_STS_SYSERROR
) {
1356 printk (KERN_WARNING
"%s: system bus error csr: %#8.8x\n", dev
->name
, csr
);
1357 error
= csr
>>23 & 0x7;
1360 printk(KERN_WARNING
"%s: Parity Fault (bad)\n", dev
->name
);
1363 printk(KERN_WARNING
"%s: Master Abort (naughty)\n", dev
->name
);
1366 printk(KERN_WARNING
"%s: Target Abort (not so naughty)\n", dev
->name
);
1369 printk(KERN_WARNING
"%s: This bus error code was supposed to be reserved!\n", dev
->name
);
1373 LMC_EVENT_LOG(LMC_EVENT_RESET1
, LMC_CSR_READ (sc
, csr_status
), 0);
1374 LMC_EVENT_LOG(LMC_EVENT_RESET2
,
1375 lmc_mii_readreg (sc
, 0, 16),
1376 lmc_mii_readreg (sc
, 0, 17));
1385 * Get current csr status to make sure
1386 * we've cleared all interrupts
1388 csr
= LMC_CSR_READ (sc
, csr_status
);
1389 } /* end interrupt loop */
1390 LMC_EVENT_LOG(LMC_EVENT_INT
, firstcsr
, csr
);
1394 spin_unlock(&sc
->lmc_lock
);
1396 lmc_trace(dev
, "lmc_interrupt out");
1397 return IRQ_RETVAL(handled
);
1400 static netdev_tx_t
lmc_start_xmit(struct sk_buff
*skb
,
1401 struct net_device
*dev
)
1403 lmc_softc_t
*sc
= dev_to_sc(dev
);
1406 unsigned long flags
;
1408 lmc_trace(dev
, "lmc_start_xmit in");
1410 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
1412 /* normal path, tbusy known to be zero */
1414 entry
= sc
->lmc_next_tx
% LMC_TXDESCS
;
1416 sc
->lmc_txq
[entry
] = skb
;
1417 sc
->lmc_txring
[entry
].buffer1
= virt_to_bus (skb
->data
);
1419 LMC_CONSOLE_LOG("xmit", skb
->data
, skb
->len
);
1422 /* If the queue is less than half full, don't interrupt */
1423 if (sc
->lmc_next_tx
- sc
->lmc_taint_tx
< LMC_TXDESCS
/ 2)
1425 /* Do not interrupt on completion of this packet */
1427 netif_wake_queue(dev
);
1429 else if (sc
->lmc_next_tx
- sc
->lmc_taint_tx
== LMC_TXDESCS
/ 2)
1431 /* This generates an interrupt on completion of this packet */
1433 netif_wake_queue(dev
);
1435 else if (sc
->lmc_next_tx
- sc
->lmc_taint_tx
< LMC_TXDESCS
- 1)
1437 /* Do not interrupt on completion of this packet */
1439 netif_wake_queue(dev
);
1443 /* This generates an interrupt on completion of this packet */
1446 netif_stop_queue(dev
);
1449 flag
= LMC_TDES_INTERRUPT_ON_COMPLETION
;
1451 if (sc
->lmc_next_tx
- sc
->lmc_taint_tx
>= LMC_TXDESCS
- 1)
1452 { /* ring full, go busy */
1454 netif_stop_queue(dev
);
1455 sc
->extra_stats
.tx_tbusy1
++;
1456 LMC_EVENT_LOG(LMC_EVENT_TBUSY1
, entry
, 0);
1461 if (entry
== LMC_TXDESCS
- 1) /* last descriptor in ring */
1462 flag
|= LMC_TDES_END_OF_RING
; /* flag as such for Tulip */
1464 /* don't pad small packets either */
1465 flag
= sc
->lmc_txring
[entry
].length
= (skb
->len
) | flag
|
1466 sc
->TxDescriptControlInit
;
1468 /* set the transmit timeout flag to be checked in
1469 * the watchdog timer handler. -baz
1472 sc
->extra_stats
.tx_NoCompleteCnt
++;
1475 /* give ownership to the chip */
1476 LMC_EVENT_LOG(LMC_EVENT_XMT
, flag
, entry
);
1477 sc
->lmc_txring
[entry
].status
= 0x80000000;
1480 LMC_CSR_WRITE (sc
, csr_txpoll
, 0);
1482 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
1484 lmc_trace(dev
, "lmc_start_xmit_out");
1485 return NETDEV_TX_OK
;
1489 static int lmc_rx(struct net_device
*dev
)
1491 lmc_softc_t
*sc
= dev_to_sc(dev
);
1493 int rx_work_limit
= LMC_RXDESCS
;
1494 unsigned int next_rx
;
1495 int rxIntLoopCnt
; /* debug -baz */
1496 int localLengthErrCnt
= 0;
1498 struct sk_buff
*skb
, *nsb
;
1501 lmc_trace(dev
, "lmc_rx in");
1503 lmc_led_on(sc
, LMC_DS3_LED3
);
1505 rxIntLoopCnt
= 0; /* debug -baz */
1507 i
= sc
->lmc_next_rx
% LMC_RXDESCS
;
1508 next_rx
= sc
->lmc_next_rx
;
1510 while (((stat
= sc
->lmc_rxring
[i
].status
) & LMC_RDES_OWN_BIT
) != DESC_OWNED_BY_DC21X4
)
1512 rxIntLoopCnt
++; /* debug -baz */
1513 len
= ((stat
& LMC_RDES_FRAME_LENGTH
) >> RDES_FRAME_LENGTH_BIT_NUMBER
);
1514 if ((stat
& 0x0300) != 0x0300) { /* Check first segment and last segment */
1515 if ((stat
& 0x0000ffff) != 0x7fff) {
1516 /* Oversized frame */
1517 sc
->lmc_device
->stats
.rx_length_errors
++;
1522 if (stat
& 0x00000008) { /* Catch a dribbling bit error */
1523 sc
->lmc_device
->stats
.rx_errors
++;
1524 sc
->lmc_device
->stats
.rx_frame_errors
++;
1529 if (stat
& 0x00000004) { /* Catch a CRC error by the Xilinx */
1530 sc
->lmc_device
->stats
.rx_errors
++;
1531 sc
->lmc_device
->stats
.rx_crc_errors
++;
1535 if (len
> LMC_PKT_BUF_SZ
) {
1536 sc
->lmc_device
->stats
.rx_length_errors
++;
1537 localLengthErrCnt
++;
1541 if (len
< sc
->lmc_crcSize
+ 2) {
1542 sc
->lmc_device
->stats
.rx_length_errors
++;
1543 sc
->extra_stats
.rx_SmallPktCnt
++;
1544 localLengthErrCnt
++;
1548 if(stat
& 0x00004000){
1549 printk(KERN_WARNING
"%s: Receiver descriptor error, receiver out of sync?\n", dev
->name
);
1552 len
-= sc
->lmc_crcSize
;
1554 skb
= sc
->lmc_rxq
[i
];
1557 * We ran out of memory at some point
1558 * just allocate an skb buff and continue.
1562 nsb
= dev_alloc_skb (LMC_PKT_BUF_SZ
+ 2);
1564 sc
->lmc_rxq
[i
] = nsb
;
1566 sc
->lmc_rxring
[i
].buffer1
= virt_to_bus(skb_tail_pointer(nsb
));
1568 sc
->failed_recv_alloc
= 1;
1572 sc
->lmc_device
->stats
.rx_packets
++;
1573 sc
->lmc_device
->stats
.rx_bytes
+= len
;
1575 LMC_CONSOLE_LOG("recv", skb
->data
, len
);
1578 * I'm not sure of the sanity of this
1579 * Packets could be arriving at a constant
1580 * 44.210mbits/sec and we're going to copy
1581 * them into a new buffer??
1584 if(len
> (LMC_MTU
- (LMC_MTU
>>2))){ /* len > LMC_MTU * 0.75 */
1586 * If it's a large packet don't copy it just hand it up
1590 sc
->lmc_rxq
[i
] = NULL
;
1591 sc
->lmc_rxring
[i
].buffer1
= 0x0;
1594 skb
->protocol
= lmc_proto_type(sc
, skb
);
1595 skb_reset_mac_header(skb
);
1596 /* skb_reset_network_header(skb); */
1598 lmc_proto_netif(sc
, skb
);
1601 * This skb will be destroyed by the upper layers, make a new one
1603 nsb
= dev_alloc_skb (LMC_PKT_BUF_SZ
+ 2);
1605 sc
->lmc_rxq
[i
] = nsb
;
1607 sc
->lmc_rxring
[i
].buffer1
= virt_to_bus(skb_tail_pointer(nsb
));
1608 /* Transferred to 21140 below */
1612 * We've run out of memory, stop trying to allocate
1613 * memory and exit the interrupt handler
1615 * The chip may run out of receivers and stop
1616 * in which care we'll try to allocate the buffer
1617 * again. (once a second)
1619 sc
->extra_stats
.rx_BuffAllocErr
++;
1620 LMC_EVENT_LOG(LMC_EVENT_RCVINT
, stat
, len
);
1621 sc
->failed_recv_alloc
= 1;
1622 goto skip_out_of_mem
;
1626 nsb
= dev_alloc_skb(len
);
1628 goto give_it_anyways
;
1630 skb_copy_from_linear_data(skb
, skb_put(nsb
, len
), len
);
1632 nsb
->protocol
= lmc_proto_type(sc
, nsb
);
1633 skb_reset_mac_header(nsb
);
1634 /* skb_reset_network_header(nsb); */
1636 lmc_proto_netif(sc
, nsb
);
1640 LMC_EVENT_LOG(LMC_EVENT_RCVINT
, stat
, len
);
1641 sc
->lmc_rxring
[i
].status
= DESC_OWNED_BY_DC21X4
;
1644 i
= sc
->lmc_next_rx
% LMC_RXDESCS
;
1646 if (rx_work_limit
< 0)
1650 /* detect condition for LMC1000 where DSU cable attaches and fills
1651 * descriptors with bogus packets
1653 if (localLengthErrCnt > LMC_RXDESCS - 3) {
1654 sc->extra_stats.rx_BadPktSurgeCnt++;
1655 LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, localLengthErrCnt,
1656 sc->extra_stats.rx_BadPktSurgeCnt);
1659 /* save max count of receive descriptors serviced */
1660 if (rxIntLoopCnt
> sc
->extra_stats
.rxIntLoopCnt
)
1661 sc
->extra_stats
.rxIntLoopCnt
= rxIntLoopCnt
; /* debug -baz */
1664 if (rxIntLoopCnt
== 0)
1666 for (i
= 0; i
< LMC_RXDESCS
; i
++)
1668 if ((sc
->lmc_rxring
[i
].status
& LMC_RDES_OWN_BIT
)
1669 != DESC_OWNED_BY_DC21X4
)
1674 LMC_EVENT_LOG(LMC_EVENT_RCVEND
, rxIntLoopCnt
, 0);
1679 lmc_led_off(sc
, LMC_DS3_LED3
);
1683 lmc_trace(dev
, "lmc_rx out");
1688 static struct net_device_stats
*lmc_get_stats(struct net_device
*dev
)
1690 lmc_softc_t
*sc
= dev_to_sc(dev
);
1691 unsigned long flags
;
1693 lmc_trace(dev
, "lmc_get_stats in");
1695 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
1697 sc
->lmc_device
->stats
.rx_missed_errors
+= LMC_CSR_READ(sc
, csr_missed_frames
) & 0xffff;
1699 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
1701 lmc_trace(dev
, "lmc_get_stats out");
1703 return &sc
->lmc_device
->stats
;
1706 static struct pci_driver lmc_driver
= {
1708 .id_table
= lmc_pci_tbl
,
1709 .probe
= lmc_init_one
,
1710 .remove
= lmc_remove_one
,
1713 module_pci_driver(lmc_driver
);
1715 unsigned lmc_mii_readreg (lmc_softc_t
* const sc
, unsigned devaddr
, unsigned regno
) /*fold00*/
1718 int command
= (0xf6 << 10) | (devaddr
<< 5) | regno
;
1721 lmc_trace(sc
->lmc_device
, "lmc_mii_readreg in");
1725 lmc_trace(sc
->lmc_device
, "lmc_mii_readreg: done sync");
1727 for (i
= 15; i
>= 0; i
--)
1729 int dataval
= (command
& (1 << i
)) ? 0x20000 : 0;
1731 LMC_CSR_WRITE (sc
, csr_9
, dataval
);
1733 /* __SLOW_DOWN_IO; */
1734 LMC_CSR_WRITE (sc
, csr_9
, dataval
| 0x10000);
1736 /* __SLOW_DOWN_IO; */
1739 lmc_trace(sc
->lmc_device
, "lmc_mii_readreg: done1");
1741 for (i
= 19; i
> 0; i
--)
1743 LMC_CSR_WRITE (sc
, csr_9
, 0x40000);
1745 /* __SLOW_DOWN_IO; */
1746 retval
= (retval
<< 1) | ((LMC_CSR_READ (sc
, csr_9
) & 0x80000) ? 1 : 0);
1747 LMC_CSR_WRITE (sc
, csr_9
, 0x40000 | 0x10000);
1749 /* __SLOW_DOWN_IO; */
1752 lmc_trace(sc
->lmc_device
, "lmc_mii_readreg out");
1754 return (retval
>> 1) & 0xffff;
1757 void lmc_mii_writereg (lmc_softc_t
* const sc
, unsigned devaddr
, unsigned regno
, unsigned data
) /*fold00*/
1760 int command
= (0x5002 << 16) | (devaddr
<< 23) | (regno
<< 18) | data
;
1762 lmc_trace(sc
->lmc_device
, "lmc_mii_writereg in");
1771 if (command
& (1 << i
))
1776 LMC_CSR_WRITE (sc
, csr_9
, datav
);
1778 /* __SLOW_DOWN_IO; */
1779 LMC_CSR_WRITE (sc
, csr_9
, (datav
| 0x10000));
1781 /* __SLOW_DOWN_IO; */
1788 LMC_CSR_WRITE (sc
, csr_9
, 0x40000);
1790 /* __SLOW_DOWN_IO; */
1791 LMC_CSR_WRITE (sc
, csr_9
, 0x50000);
1793 /* __SLOW_DOWN_IO; */
1797 lmc_trace(sc
->lmc_device
, "lmc_mii_writereg out");
1800 static void lmc_softreset (lmc_softc_t
* const sc
) /*fold00*/
1804 lmc_trace(sc
->lmc_device
, "lmc_softreset in");
1806 /* Initialize the receive rings and buffers. */
1808 sc
->lmc_next_rx
= 0;
1809 sc
->lmc_next_tx
= 0;
1810 sc
->lmc_taint_rx
= 0;
1811 sc
->lmc_taint_tx
= 0;
1814 * Setup each one of the receiver buffers
1815 * allocate an skbuff for each one, setup the descriptor table
1816 * and point each buffer at the next one
1819 for (i
= 0; i
< LMC_RXDESCS
; i
++)
1821 struct sk_buff
*skb
;
1823 if (sc
->lmc_rxq
[i
] == NULL
)
1825 skb
= dev_alloc_skb (LMC_PKT_BUF_SZ
+ 2);
1827 printk(KERN_WARNING
"%s: Failed to allocate receiver ring, will try again\n", sc
->name
);
1828 sc
->failed_ring
= 1;
1832 sc
->lmc_rxq
[i
] = skb
;
1837 skb
= sc
->lmc_rxq
[i
];
1840 skb
->dev
= sc
->lmc_device
;
1842 /* owned by 21140 */
1843 sc
->lmc_rxring
[i
].status
= 0x80000000;
1845 /* used to be PKT_BUF_SZ now uses skb since we lose some to head room */
1846 sc
->lmc_rxring
[i
].length
= skb_tailroom(skb
);
1848 /* use to be tail which is dumb since you're thinking why write
1849 * to the end of the packj,et but since there's nothing there tail == data
1851 sc
->lmc_rxring
[i
].buffer1
= virt_to_bus (skb
->data
);
1853 /* This is fair since the structure is static and we have the next address */
1854 sc
->lmc_rxring
[i
].buffer2
= virt_to_bus (&sc
->lmc_rxring
[i
+ 1]);
1862 sc
->lmc_rxring
[i
- 1].length
|= 0x02000000; /* Set end of buffers flag */
1863 sc
->lmc_rxring
[i
- 1].buffer2
= virt_to_bus(&sc
->lmc_rxring
[0]); /* Point back to the start */
1865 LMC_CSR_WRITE (sc
, csr_rxlist
, virt_to_bus (sc
->lmc_rxring
)); /* write base address */
1867 /* Initialize the transmit rings and buffers */
1868 for (i
= 0; i
< LMC_TXDESCS
; i
++)
1870 if (sc
->lmc_txq
[i
] != NULL
){ /* have buffer */
1871 dev_kfree_skb(sc
->lmc_txq
[i
]); /* free it */
1872 sc
->lmc_device
->stats
.tx_dropped
++; /* We just dropped a packet */
1874 sc
->lmc_txq
[i
] = NULL
;
1875 sc
->lmc_txring
[i
].status
= 0x00000000;
1876 sc
->lmc_txring
[i
].buffer2
= virt_to_bus (&sc
->lmc_txring
[i
+ 1]);
1878 sc
->lmc_txring
[i
- 1].buffer2
= virt_to_bus (&sc
->lmc_txring
[0]);
1879 LMC_CSR_WRITE (sc
, csr_txlist
, virt_to_bus (sc
->lmc_txring
));
1881 lmc_trace(sc
->lmc_device
, "lmc_softreset out");
1884 void lmc_gpio_mkinput(lmc_softc_t
* const sc
, u32 bits
) /*fold00*/
1886 lmc_trace(sc
->lmc_device
, "lmc_gpio_mkinput in");
1887 sc
->lmc_gpio_io
&= ~bits
;
1888 LMC_CSR_WRITE(sc
, csr_gp
, TULIP_GP_PINSET
| (sc
->lmc_gpio_io
));
1889 lmc_trace(sc
->lmc_device
, "lmc_gpio_mkinput out");
1892 void lmc_gpio_mkoutput(lmc_softc_t
* const sc
, u32 bits
) /*fold00*/
1894 lmc_trace(sc
->lmc_device
, "lmc_gpio_mkoutput in");
1895 sc
->lmc_gpio_io
|= bits
;
1896 LMC_CSR_WRITE(sc
, csr_gp
, TULIP_GP_PINSET
| (sc
->lmc_gpio_io
));
1897 lmc_trace(sc
->lmc_device
, "lmc_gpio_mkoutput out");
1900 void lmc_led_on(lmc_softc_t
* const sc
, u32 led
) /*fold00*/
1902 lmc_trace(sc
->lmc_device
, "lmc_led_on in");
1903 if((~sc
->lmc_miireg16
) & led
){ /* Already on! */
1904 lmc_trace(sc
->lmc_device
, "lmc_led_on aon out");
1908 sc
->lmc_miireg16
&= ~led
;
1909 lmc_mii_writereg(sc
, 0, 16, sc
->lmc_miireg16
);
1910 lmc_trace(sc
->lmc_device
, "lmc_led_on out");
1913 void lmc_led_off(lmc_softc_t
* const sc
, u32 led
) /*fold00*/
1915 lmc_trace(sc
->lmc_device
, "lmc_led_off in");
1916 if(sc
->lmc_miireg16
& led
){ /* Already set don't do anything */
1917 lmc_trace(sc
->lmc_device
, "lmc_led_off aoff out");
1921 sc
->lmc_miireg16
|= led
;
1922 lmc_mii_writereg(sc
, 0, 16, sc
->lmc_miireg16
);
1923 lmc_trace(sc
->lmc_device
, "lmc_led_off out");
1926 static void lmc_reset(lmc_softc_t
* const sc
) /*fold00*/
1928 lmc_trace(sc
->lmc_device
, "lmc_reset in");
1929 sc
->lmc_miireg16
|= LMC_MII16_FIFO_RESET
;
1930 lmc_mii_writereg(sc
, 0, 16, sc
->lmc_miireg16
);
1932 sc
->lmc_miireg16
&= ~LMC_MII16_FIFO_RESET
;
1933 lmc_mii_writereg(sc
, 0, 16, sc
->lmc_miireg16
);
1936 * make some of the GPIO pins be outputs
1938 lmc_gpio_mkoutput(sc
, LMC_GEP_RESET
);
1941 * RESET low to force state reset. This also forces
1942 * the transmitter clock to be internal, but we expect to reset
1943 * that later anyway.
1945 sc
->lmc_gpio
&= ~(LMC_GEP_RESET
);
1946 LMC_CSR_WRITE(sc
, csr_gp
, sc
->lmc_gpio
);
1949 * hold for more than 10 microseconds
1954 * stop driving Xilinx-related signals
1956 lmc_gpio_mkinput(sc
, LMC_GEP_RESET
);
1959 * Call media specific init routine
1961 sc
->lmc_media
->init(sc
);
1963 sc
->extra_stats
.resetCount
++;
1964 lmc_trace(sc
->lmc_device
, "lmc_reset out");
1967 static void lmc_dec_reset(lmc_softc_t
* const sc
) /*fold00*/
1970 lmc_trace(sc
->lmc_device
, "lmc_dec_reset in");
1973 * disable all interrupts
1975 sc
->lmc_intrmask
= 0;
1976 LMC_CSR_WRITE(sc
, csr_intr
, sc
->lmc_intrmask
);
1979 * Reset the chip with a software reset command.
1980 * Wait 10 microseconds (actually 50 PCI cycles but at
1981 * 33MHz that comes to two microseconds but wait a
1982 * bit longer anyways)
1984 LMC_CSR_WRITE(sc
, csr_busmode
, TULIP_BUSMODE_SWRESET
);
1987 sc
->lmc_busmode
= LMC_CSR_READ(sc
, csr_busmode
);
1988 sc
->lmc_busmode
= 0x00100000;
1989 sc
->lmc_busmode
&= ~TULIP_BUSMODE_SWRESET
;
1990 LMC_CSR_WRITE(sc
, csr_busmode
, sc
->lmc_busmode
);
1992 sc
->lmc_cmdmode
= LMC_CSR_READ(sc
, csr_command
);
1996 * no ethernet address in frames we write
1997 * disable padding (txdesc, padding disable)
1998 * ignore runt frames (rdes0 bit 15)
1999 * no receiver watchdog or transmitter jabber timer
2000 * (csr15 bit 0,14 == 1)
2001 * if using 16-bit CRC, turn off CRC (trans desc, crc disable)
2004 sc
->lmc_cmdmode
|= ( TULIP_CMD_PROMISCUOUS
2005 | TULIP_CMD_FULLDUPLEX
2006 | TULIP_CMD_PASSBADPKT
2007 | TULIP_CMD_NOHEARTBEAT
2008 | TULIP_CMD_PORTSELECT
2009 | TULIP_CMD_RECEIVEALL
2010 | TULIP_CMD_MUSTBEONE
2012 sc
->lmc_cmdmode
&= ~( TULIP_CMD_OPERMODE
2013 | TULIP_CMD_THRESHOLDCTL
2014 | TULIP_CMD_STOREFWD
2015 | TULIP_CMD_TXTHRSHLDCTL
2018 LMC_CSR_WRITE(sc
, csr_command
, sc
->lmc_cmdmode
);
2021 * disable receiver watchdog and transmit jabber
2023 val
= LMC_CSR_READ(sc
, csr_sia_general
);
2024 val
|= (TULIP_WATCHDOG_TXDISABLE
| TULIP_WATCHDOG_RXDISABLE
);
2025 LMC_CSR_WRITE(sc
, csr_sia_general
, val
);
2027 lmc_trace(sc
->lmc_device
, "lmc_dec_reset out");
2030 static void lmc_initcsrs(lmc_softc_t
* const sc
, lmc_csrptr_t csr_base
, /*fold00*/
2033 lmc_trace(sc
->lmc_device
, "lmc_initcsrs in");
2034 sc
->lmc_csrs
.csr_busmode
= csr_base
+ 0 * csr_size
;
2035 sc
->lmc_csrs
.csr_txpoll
= csr_base
+ 1 * csr_size
;
2036 sc
->lmc_csrs
.csr_rxpoll
= csr_base
+ 2 * csr_size
;
2037 sc
->lmc_csrs
.csr_rxlist
= csr_base
+ 3 * csr_size
;
2038 sc
->lmc_csrs
.csr_txlist
= csr_base
+ 4 * csr_size
;
2039 sc
->lmc_csrs
.csr_status
= csr_base
+ 5 * csr_size
;
2040 sc
->lmc_csrs
.csr_command
= csr_base
+ 6 * csr_size
;
2041 sc
->lmc_csrs
.csr_intr
= csr_base
+ 7 * csr_size
;
2042 sc
->lmc_csrs
.csr_missed_frames
= csr_base
+ 8 * csr_size
;
2043 sc
->lmc_csrs
.csr_9
= csr_base
+ 9 * csr_size
;
2044 sc
->lmc_csrs
.csr_10
= csr_base
+ 10 * csr_size
;
2045 sc
->lmc_csrs
.csr_11
= csr_base
+ 11 * csr_size
;
2046 sc
->lmc_csrs
.csr_12
= csr_base
+ 12 * csr_size
;
2047 sc
->lmc_csrs
.csr_13
= csr_base
+ 13 * csr_size
;
2048 sc
->lmc_csrs
.csr_14
= csr_base
+ 14 * csr_size
;
2049 sc
->lmc_csrs
.csr_15
= csr_base
+ 15 * csr_size
;
2050 lmc_trace(sc
->lmc_device
, "lmc_initcsrs out");
2053 static void lmc_driver_timeout(struct net_device
*dev
)
2055 lmc_softc_t
*sc
= dev_to_sc(dev
);
2057 unsigned long flags
;
2059 lmc_trace(dev
, "lmc_driver_timeout in");
2061 spin_lock_irqsave(&sc
->lmc_lock
, flags
);
2063 printk("%s: Xmitter busy|\n", dev
->name
);
2065 sc
->extra_stats
.tx_tbusy_calls
++;
2066 if (jiffies
- dev_trans_start(dev
) < TX_TIMEOUT
)
2070 * Chip seems to have locked up
2072 * This whips out all our decriptor
2073 * table and starts from scartch
2076 LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO
,
2077 LMC_CSR_READ (sc
, csr_status
),
2078 sc
->extra_stats
.tx_ProcTimeout
);
2080 lmc_running_reset (dev
);
2082 LMC_EVENT_LOG(LMC_EVENT_RESET1
, LMC_CSR_READ (sc
, csr_status
), 0);
2083 LMC_EVENT_LOG(LMC_EVENT_RESET2
,
2084 lmc_mii_readreg (sc
, 0, 16),
2085 lmc_mii_readreg (sc
, 0, 17));
2087 /* restart the tx processes */
2088 csr6
= LMC_CSR_READ (sc
, csr_command
);
2089 LMC_CSR_WRITE (sc
, csr_command
, csr6
| 0x0002);
2090 LMC_CSR_WRITE (sc
, csr_command
, csr6
| 0x2002);
2092 /* immediate transmit */
2093 LMC_CSR_WRITE (sc
, csr_txpoll
, 0);
2095 sc
->lmc_device
->stats
.tx_errors
++;
2096 sc
->extra_stats
.tx_ProcTimeout
++; /* -baz */
2098 netif_trans_update(dev
); /* prevent tx timeout */
2102 spin_unlock_irqrestore(&sc
->lmc_lock
, flags
);
2104 lmc_trace(dev
, "lmc_driver_timeout out");