2 * QLogic QLA3xxx NIC HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
5 * See LICENSE.qla3xxx for copyright and licensing details.
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/dmapool.h>
18 #include <linux/mempool.h>
19 #include <linux/spinlock.h>
20 #include <linux/kthread.h>
21 #include <linux/interrupt.h>
22 #include <linux/errno.h>
23 #include <linux/ioport.h>
25 #include <linux/if_arp.h>
26 #include <linux/if_ether.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/ethtool.h>
30 #include <linux/skbuff.h>
31 #include <linux/rtnetlink.h>
32 #include <linux/if_vlan.h>
33 #include <linux/init.h>
34 #include <linux/delay.h>
39 #define DRV_NAME "qla3xxx"
40 #define DRV_STRING "QLogic ISP3XXX Network Driver"
41 #define DRV_VERSION "v2.02.00-k36"
42 #define PFX DRV_NAME " "
44 static const char ql3xxx_driver_name
[] = DRV_NAME
;
45 static const char ql3xxx_driver_version
[] = DRV_VERSION
;
47 MODULE_AUTHOR("QLogic Corporation");
48 MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION
" ");
49 MODULE_LICENSE("GPL");
50 MODULE_VERSION(DRV_VERSION
);
52 static const u32 default_msg
53 = NETIF_MSG_DRV
| NETIF_MSG_PROBE
| NETIF_MSG_LINK
54 | NETIF_MSG_IFUP
| NETIF_MSG_IFDOWN
;
56 static int debug
= -1; /* defaults above */
57 module_param(debug
, int, 0);
58 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
61 module_param(msi
, int, 0);
62 MODULE_PARM_DESC(msi
, "Turn on Message Signaled Interrupts.");
64 static struct pci_device_id ql3xxx_pci_tbl
[] __devinitdata
= {
65 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC
, QL3022_DEVICE_ID
)},
66 /* required last entry */
70 MODULE_DEVICE_TABLE(pci
, ql3xxx_pci_tbl
);
73 * Caller must take hw_lock.
75 static int ql_sem_spinlock(struct ql3_adapter
*qdev
,
76 u32 sem_mask
, u32 sem_bits
)
78 struct ql3xxx_port_registers __iomem
*port_regs
= qdev
->mem_map_registers
;
80 unsigned int seconds
= 3;
83 writel((sem_mask
| sem_bits
),
84 &port_regs
->CommonRegs
.semaphoreReg
);
85 value
= readl(&port_regs
->CommonRegs
.semaphoreReg
);
86 if ((value
& (sem_mask
>> 16)) == sem_bits
)
93 static void ql_sem_unlock(struct ql3_adapter
*qdev
, u32 sem_mask
)
95 struct ql3xxx_port_registers __iomem
*port_regs
= qdev
->mem_map_registers
;
96 writel(sem_mask
, &port_regs
->CommonRegs
.semaphoreReg
);
97 readl(&port_regs
->CommonRegs
.semaphoreReg
);
100 static int ql_sem_lock(struct ql3_adapter
*qdev
, u32 sem_mask
, u32 sem_bits
)
102 struct ql3xxx_port_registers __iomem
*port_regs
= qdev
->mem_map_registers
;
105 writel((sem_mask
| sem_bits
), &port_regs
->CommonRegs
.semaphoreReg
);
106 value
= readl(&port_regs
->CommonRegs
.semaphoreReg
);
107 return ((value
& (sem_mask
>> 16)) == sem_bits
);
111 * Caller holds hw_lock.
113 static int ql_wait_for_drvr_lock(struct ql3_adapter
*qdev
)
118 if (!ql_sem_lock(qdev
,
120 (QL_RESOURCE_BITS_BASE_CODE
| (qdev
->mac_index
)
126 printk(KERN_ERR PFX
"%s: Timed out waiting for "
132 printk(KERN_DEBUG PFX
133 "%s: driver lock acquired.\n",
140 static void ql_set_register_page(struct ql3_adapter
*qdev
, u32 page
)
142 struct ql3xxx_port_registers __iomem
*port_regs
= qdev
->mem_map_registers
;
144 writel(((ISP_CONTROL_NP_MASK
<< 16) | page
),
145 &port_regs
->CommonRegs
.ispControlStatus
);
146 readl(&port_regs
->CommonRegs
.ispControlStatus
);
147 qdev
->current_page
= page
;
150 static u32
ql_read_common_reg_l(struct ql3_adapter
*qdev
,
154 unsigned long hw_flags
;
156 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
158 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
163 static u32
ql_read_common_reg(struct ql3_adapter
*qdev
,
169 static u32
ql_read_page0_reg_l(struct ql3_adapter
*qdev
, u32 __iomem
*reg
)
172 unsigned long hw_flags
;
174 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
176 if (qdev
->current_page
!= 0)
177 ql_set_register_page(qdev
,0);
180 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
184 static u32
ql_read_page0_reg(struct ql3_adapter
*qdev
, u32 __iomem
*reg
)
186 if (qdev
->current_page
!= 0)
187 ql_set_register_page(qdev
,0);
191 static void ql_write_common_reg_l(struct ql3_adapter
*qdev
,
192 u32 __iomem
*reg
, u32 value
)
194 unsigned long hw_flags
;
196 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
199 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
203 static void ql_write_common_reg(struct ql3_adapter
*qdev
,
204 u32 __iomem
*reg
, u32 value
)
211 static void ql_write_nvram_reg(struct ql3_adapter
*qdev
,
212 u32 __iomem
*reg
, u32 value
)
220 static void ql_write_page0_reg(struct ql3_adapter
*qdev
,
221 u32 __iomem
*reg
, u32 value
)
223 if (qdev
->current_page
!= 0)
224 ql_set_register_page(qdev
,0);
231 * Caller holds hw_lock. Only called during init.
233 static void ql_write_page1_reg(struct ql3_adapter
*qdev
,
234 u32 __iomem
*reg
, u32 value
)
236 if (qdev
->current_page
!= 1)
237 ql_set_register_page(qdev
,1);
244 * Caller holds hw_lock. Only called during init.
246 static void ql_write_page2_reg(struct ql3_adapter
*qdev
,
247 u32 __iomem
*reg
, u32 value
)
249 if (qdev
->current_page
!= 2)
250 ql_set_register_page(qdev
,2);
256 static void ql_disable_interrupts(struct ql3_adapter
*qdev
)
258 struct ql3xxx_port_registers __iomem
*port_regs
= qdev
->mem_map_registers
;
260 ql_write_common_reg_l(qdev
, &port_regs
->CommonRegs
.ispInterruptMaskReg
,
261 (ISP_IMR_ENABLE_INT
<< 16));
265 static void ql_enable_interrupts(struct ql3_adapter
*qdev
)
267 struct ql3xxx_port_registers __iomem
*port_regs
= qdev
->mem_map_registers
;
269 ql_write_common_reg_l(qdev
, &port_regs
->CommonRegs
.ispInterruptMaskReg
,
270 ((0xff << 16) | ISP_IMR_ENABLE_INT
));
274 static void ql_release_to_lrg_buf_free_list(struct ql3_adapter
*qdev
,
275 struct ql_rcv_buf_cb
*lrg_buf_cb
)
278 lrg_buf_cb
->next
= NULL
;
280 if (qdev
->lrg_buf_free_tail
== NULL
) { /* The list is empty */
281 qdev
->lrg_buf_free_head
= qdev
->lrg_buf_free_tail
= lrg_buf_cb
;
283 qdev
->lrg_buf_free_tail
->next
= lrg_buf_cb
;
284 qdev
->lrg_buf_free_tail
= lrg_buf_cb
;
287 if (!lrg_buf_cb
->skb
) {
288 lrg_buf_cb
->skb
= dev_alloc_skb(qdev
->lrg_buffer_len
);
289 if (unlikely(!lrg_buf_cb
->skb
)) {
290 printk(KERN_ERR PFX
"%s: failed dev_alloc_skb().\n",
292 qdev
->lrg_buf_skb_check
++;
295 * We save some space to copy the ethhdr from first
298 skb_reserve(lrg_buf_cb
->skb
, QL_HEADER_SPACE
);
299 map
= pci_map_single(qdev
->pdev
,
300 lrg_buf_cb
->skb
->data
,
301 qdev
->lrg_buffer_len
-
304 lrg_buf_cb
->buf_phy_addr_low
=
305 cpu_to_le32(LS_64BITS(map
));
306 lrg_buf_cb
->buf_phy_addr_high
=
307 cpu_to_le32(MS_64BITS(map
));
308 pci_unmap_addr_set(lrg_buf_cb
, mapaddr
, map
);
309 pci_unmap_len_set(lrg_buf_cb
, maplen
,
310 qdev
->lrg_buffer_len
-
315 qdev
->lrg_buf_free_count
++;
318 static struct ql_rcv_buf_cb
*ql_get_from_lrg_buf_free_list(struct ql3_adapter
321 struct ql_rcv_buf_cb
*lrg_buf_cb
;
323 if ((lrg_buf_cb
= qdev
->lrg_buf_free_head
) != NULL
) {
324 if ((qdev
->lrg_buf_free_head
= lrg_buf_cb
->next
) == NULL
)
325 qdev
->lrg_buf_free_tail
= NULL
;
326 qdev
->lrg_buf_free_count
--;
332 static u32 addrBits
= EEPROM_NO_ADDR_BITS
;
333 static u32 dataBits
= EEPROM_NO_DATA_BITS
;
335 static void fm93c56a_deselect(struct ql3_adapter
*qdev
);
336 static void eeprom_readword(struct ql3_adapter
*qdev
, u32 eepromAddr
,
337 unsigned short *value
);
340 * Caller holds hw_lock.
342 static void fm93c56a_select(struct ql3_adapter
*qdev
)
344 struct ql3xxx_port_registers __iomem
*port_regs
=
345 qdev
->mem_map_registers
;
347 qdev
->eeprom_cmd_data
= AUBURN_EEPROM_CS_1
;
348 ql_write_nvram_reg(qdev
, &port_regs
->CommonRegs
.serialPortInterfaceReg
,
349 ISP_NVRAM_MASK
| qdev
->eeprom_cmd_data
);
350 ql_write_nvram_reg(qdev
, &port_regs
->CommonRegs
.serialPortInterfaceReg
,
351 ((ISP_NVRAM_MASK
<< 16) | qdev
->eeprom_cmd_data
));
355 * Caller holds hw_lock.
357 static void fm93c56a_cmd(struct ql3_adapter
*qdev
, u32 cmd
, u32 eepromAddr
)
363 struct ql3xxx_port_registers __iomem
*port_regs
=
364 qdev
->mem_map_registers
;
366 /* Clock in a zero, then do the start bit */
367 ql_write_nvram_reg(qdev
, &port_regs
->CommonRegs
.serialPortInterfaceReg
,
368 ISP_NVRAM_MASK
| qdev
->eeprom_cmd_data
|
370 ql_write_nvram_reg(qdev
, &port_regs
->CommonRegs
.serialPortInterfaceReg
,
371 ISP_NVRAM_MASK
| qdev
->
372 eeprom_cmd_data
| AUBURN_EEPROM_DO_1
|
373 AUBURN_EEPROM_CLK_RISE
);
374 ql_write_nvram_reg(qdev
, &port_regs
->CommonRegs
.serialPortInterfaceReg
,
375 ISP_NVRAM_MASK
| qdev
->
376 eeprom_cmd_data
| AUBURN_EEPROM_DO_1
|
377 AUBURN_EEPROM_CLK_FALL
);
379 mask
= 1 << (FM93C56A_CMD_BITS
- 1);
380 /* Force the previous data bit to be different */
381 previousBit
= 0xffff;
382 for (i
= 0; i
< FM93C56A_CMD_BITS
; i
++) {
384 (cmd
& mask
) ? AUBURN_EEPROM_DO_1
: AUBURN_EEPROM_DO_0
;
385 if (previousBit
!= dataBit
) {
387 * If the bit changed, then change the DO state to
390 ql_write_nvram_reg(qdev
,
391 &port_regs
->CommonRegs
.
392 serialPortInterfaceReg
,
393 ISP_NVRAM_MASK
| qdev
->
394 eeprom_cmd_data
| dataBit
);
395 previousBit
= dataBit
;
397 ql_write_nvram_reg(qdev
,
398 &port_regs
->CommonRegs
.
399 serialPortInterfaceReg
,
400 ISP_NVRAM_MASK
| qdev
->
401 eeprom_cmd_data
| dataBit
|
402 AUBURN_EEPROM_CLK_RISE
);
403 ql_write_nvram_reg(qdev
,
404 &port_regs
->CommonRegs
.
405 serialPortInterfaceReg
,
406 ISP_NVRAM_MASK
| qdev
->
407 eeprom_cmd_data
| dataBit
|
408 AUBURN_EEPROM_CLK_FALL
);
412 mask
= 1 << (addrBits
- 1);
413 /* Force the previous data bit to be different */
414 previousBit
= 0xffff;
415 for (i
= 0; i
< addrBits
; i
++) {
417 (eepromAddr
& mask
) ? AUBURN_EEPROM_DO_1
:
419 if (previousBit
!= dataBit
) {
421 * If the bit changed, then change the DO state to
424 ql_write_nvram_reg(qdev
,
425 &port_regs
->CommonRegs
.
426 serialPortInterfaceReg
,
427 ISP_NVRAM_MASK
| qdev
->
428 eeprom_cmd_data
| dataBit
);
429 previousBit
= dataBit
;
431 ql_write_nvram_reg(qdev
,
432 &port_regs
->CommonRegs
.
433 serialPortInterfaceReg
,
434 ISP_NVRAM_MASK
| qdev
->
435 eeprom_cmd_data
| dataBit
|
436 AUBURN_EEPROM_CLK_RISE
);
437 ql_write_nvram_reg(qdev
,
438 &port_regs
->CommonRegs
.
439 serialPortInterfaceReg
,
440 ISP_NVRAM_MASK
| qdev
->
441 eeprom_cmd_data
| dataBit
|
442 AUBURN_EEPROM_CLK_FALL
);
443 eepromAddr
= eepromAddr
<< 1;
448 * Caller holds hw_lock.
450 static void fm93c56a_deselect(struct ql3_adapter
*qdev
)
452 struct ql3xxx_port_registers __iomem
*port_regs
=
453 qdev
->mem_map_registers
;
454 qdev
->eeprom_cmd_data
= AUBURN_EEPROM_CS_0
;
455 ql_write_nvram_reg(qdev
, &port_regs
->CommonRegs
.serialPortInterfaceReg
,
456 ISP_NVRAM_MASK
| qdev
->eeprom_cmd_data
);
460 * Caller holds hw_lock.
462 static void fm93c56a_datain(struct ql3_adapter
*qdev
, unsigned short *value
)
467 struct ql3xxx_port_registers __iomem
*port_regs
=
468 qdev
->mem_map_registers
;
470 /* Read the data bits */
471 /* The first bit is a dummy. Clock right over it. */
472 for (i
= 0; i
< dataBits
; i
++) {
473 ql_write_nvram_reg(qdev
,
474 &port_regs
->CommonRegs
.
475 serialPortInterfaceReg
,
476 ISP_NVRAM_MASK
| qdev
->eeprom_cmd_data
|
477 AUBURN_EEPROM_CLK_RISE
);
478 ql_write_nvram_reg(qdev
,
479 &port_regs
->CommonRegs
.
480 serialPortInterfaceReg
,
481 ISP_NVRAM_MASK
| qdev
->eeprom_cmd_data
|
482 AUBURN_EEPROM_CLK_FALL
);
486 &port_regs
->CommonRegs
.
487 serialPortInterfaceReg
) & AUBURN_EEPROM_DI_1
) ? 1 : 0;
488 data
= (data
<< 1) | dataBit
;
494 * Caller holds hw_lock.
496 static void eeprom_readword(struct ql3_adapter
*qdev
,
497 u32 eepromAddr
, unsigned short *value
)
499 fm93c56a_select(qdev
);
500 fm93c56a_cmd(qdev
, (int)FM93C56A_READ
, eepromAddr
);
501 fm93c56a_datain(qdev
, value
);
502 fm93c56a_deselect(qdev
);
505 static void ql_swap_mac_addr(u8
* macAddress
)
509 temp
= macAddress
[0];
510 macAddress
[0] = macAddress
[1];
511 macAddress
[1] = temp
;
512 temp
= macAddress
[2];
513 macAddress
[2] = macAddress
[3];
514 macAddress
[3] = temp
;
515 temp
= macAddress
[4];
516 macAddress
[4] = macAddress
[5];
517 macAddress
[5] = temp
;
521 static int ql_get_nvram_params(struct ql3_adapter
*qdev
)
526 unsigned long hw_flags
;
528 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
530 pEEPROMData
= (u16
*) & qdev
->nvram_data
;
531 qdev
->eeprom_cmd_data
= 0;
532 if(ql_sem_spinlock(qdev
, QL_NVRAM_SEM_MASK
,
533 (QL_RESOURCE_BITS_BASE_CODE
| (qdev
->mac_index
) *
535 printk(KERN_ERR PFX
"%s: Failed ql_sem_spinlock().\n",
537 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
541 for (index
= 0; index
< EEPROM_SIZE
; index
++) {
542 eeprom_readword(qdev
, index
, pEEPROMData
);
543 checksum
+= *pEEPROMData
;
546 ql_sem_unlock(qdev
, QL_NVRAM_SEM_MASK
);
549 printk(KERN_ERR PFX
"%s: checksum should be zero, is %x!!\n",
550 qdev
->ndev
->name
, checksum
);
551 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
556 * We have a problem with endianness for the MAC addresses
557 * and the two 8-bit values version, and numPorts. We
558 * have to swap them on big endian systems.
560 ql_swap_mac_addr(qdev
->nvram_data
.funcCfg_fn0
.macAddress
);
561 ql_swap_mac_addr(qdev
->nvram_data
.funcCfg_fn1
.macAddress
);
562 ql_swap_mac_addr(qdev
->nvram_data
.funcCfg_fn2
.macAddress
);
563 ql_swap_mac_addr(qdev
->nvram_data
.funcCfg_fn3
.macAddress
);
564 pEEPROMData
= (u16
*) & qdev
->nvram_data
.version
;
565 *pEEPROMData
= le16_to_cpu(*pEEPROMData
);
567 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
571 static const u32 PHYAddr
[2] = {
572 PORT0_PHY_ADDRESS
, PORT1_PHY_ADDRESS
575 static int ql_wait_for_mii_ready(struct ql3_adapter
*qdev
)
577 struct ql3xxx_port_registers __iomem
*port_regs
=
578 qdev
->mem_map_registers
;
583 temp
= ql_read_page0_reg(qdev
, &port_regs
->macMIIStatusReg
);
584 if (!(temp
& MAC_MII_STATUS_BSY
))
592 static void ql_mii_enable_scan_mode(struct ql3_adapter
*qdev
)
594 struct ql3xxx_port_registers __iomem
*port_regs
=
595 qdev
->mem_map_registers
;
598 if (qdev
->numPorts
> 1) {
599 /* Auto scan will cycle through multiple ports */
600 scanControl
= MAC_MII_CONTROL_AS
| MAC_MII_CONTROL_SC
;
602 scanControl
= MAC_MII_CONTROL_SC
;
606 * Scan register 1 of PHY/PETBI,
607 * Set up to scan both devices
608 * The autoscan starts from the first register, completes
609 * the last one before rolling over to the first
611 ql_write_page0_reg(qdev
, &port_regs
->macMIIMgmtAddrReg
,
612 PHYAddr
[0] | MII_SCAN_REGISTER
);
614 ql_write_page0_reg(qdev
, &port_regs
->macMIIMgmtControlReg
,
616 ((MAC_MII_CONTROL_SC
| MAC_MII_CONTROL_AS
) << 16));
619 static u8
ql_mii_disable_scan_mode(struct ql3_adapter
*qdev
)
622 struct ql3xxx_port_registers __iomem
*port_regs
=
623 qdev
->mem_map_registers
;
625 /* See if scan mode is enabled before we turn it off */
626 if (ql_read_page0_reg(qdev
, &port_regs
->macMIIMgmtControlReg
) &
627 (MAC_MII_CONTROL_AS
| MAC_MII_CONTROL_SC
)) {
628 /* Scan is enabled */
631 /* Scan is disabled */
636 * When disabling scan mode you must first change the MII register
639 ql_write_page0_reg(qdev
, &port_regs
->macMIIMgmtAddrReg
,
640 PHYAddr
[0] | MII_SCAN_REGISTER
);
642 ql_write_page0_reg(qdev
, &port_regs
->macMIIMgmtControlReg
,
643 ((MAC_MII_CONTROL_SC
| MAC_MII_CONTROL_AS
|
644 MAC_MII_CONTROL_RC
) << 16));
649 static int ql_mii_write_reg_ex(struct ql3_adapter
*qdev
,
650 u16 regAddr
, u16 value
, u32 mac_index
)
652 struct ql3xxx_port_registers __iomem
*port_regs
=
653 qdev
->mem_map_registers
;
656 scanWasEnabled
= ql_mii_disable_scan_mode(qdev
);
658 if (ql_wait_for_mii_ready(qdev
)) {
659 if (netif_msg_link(qdev
))
660 printk(KERN_WARNING PFX
661 "%s Timed out waiting for management port to "
662 "get free before issuing command.\n",
667 ql_write_page0_reg(qdev
, &port_regs
->macMIIMgmtAddrReg
,
668 PHYAddr
[mac_index
] | regAddr
);
670 ql_write_page0_reg(qdev
, &port_regs
->macMIIMgmtDataReg
, value
);
672 /* Wait for write to complete 9/10/04 SJP */
673 if (ql_wait_for_mii_ready(qdev
)) {
674 if (netif_msg_link(qdev
))
675 printk(KERN_WARNING PFX
676 "%s: Timed out waiting for management port to"
677 "get free before issuing command.\n",
683 ql_mii_enable_scan_mode(qdev
);
688 static int ql_mii_read_reg_ex(struct ql3_adapter
*qdev
, u16 regAddr
,
689 u16
* value
, u32 mac_index
)
691 struct ql3xxx_port_registers __iomem
*port_regs
=
692 qdev
->mem_map_registers
;
696 scanWasEnabled
= ql_mii_disable_scan_mode(qdev
);
698 if (ql_wait_for_mii_ready(qdev
)) {
699 if (netif_msg_link(qdev
))
700 printk(KERN_WARNING PFX
701 "%s: Timed out waiting for management port to "
702 "get free before issuing command.\n",
707 ql_write_page0_reg(qdev
, &port_regs
->macMIIMgmtAddrReg
,
708 PHYAddr
[mac_index
] | regAddr
);
710 ql_write_page0_reg(qdev
, &port_regs
->macMIIMgmtControlReg
,
711 (MAC_MII_CONTROL_RC
<< 16));
713 ql_write_page0_reg(qdev
, &port_regs
->macMIIMgmtControlReg
,
714 (MAC_MII_CONTROL_RC
<< 16) | MAC_MII_CONTROL_RC
);
716 /* Wait for the read to complete */
717 if (ql_wait_for_mii_ready(qdev
)) {
718 if (netif_msg_link(qdev
))
719 printk(KERN_WARNING PFX
720 "%s: Timed out waiting for management port to "
721 "get free after issuing command.\n",
726 temp
= ql_read_page0_reg(qdev
, &port_regs
->macMIIMgmtDataReg
);
730 ql_mii_enable_scan_mode(qdev
);
735 static int ql_mii_write_reg(struct ql3_adapter
*qdev
, u16 regAddr
, u16 value
)
737 struct ql3xxx_port_registers __iomem
*port_regs
=
738 qdev
->mem_map_registers
;
740 ql_mii_disable_scan_mode(qdev
);
742 if (ql_wait_for_mii_ready(qdev
)) {
743 if (netif_msg_link(qdev
))
744 printk(KERN_WARNING PFX
745 "%s: Timed out waiting for management port to "
746 "get free before issuing command.\n",
751 ql_write_page0_reg(qdev
, &port_regs
->macMIIMgmtAddrReg
,
752 qdev
->PHYAddr
| regAddr
);
754 ql_write_page0_reg(qdev
, &port_regs
->macMIIMgmtDataReg
, value
);
756 /* Wait for write to complete. */
757 if (ql_wait_for_mii_ready(qdev
)) {
758 if (netif_msg_link(qdev
))
759 printk(KERN_WARNING PFX
760 "%s: Timed out waiting for management port to "
761 "get free before issuing command.\n",
766 ql_mii_enable_scan_mode(qdev
);
771 static int ql_mii_read_reg(struct ql3_adapter
*qdev
, u16 regAddr
, u16
*value
)
774 struct ql3xxx_port_registers __iomem
*port_regs
=
775 qdev
->mem_map_registers
;
777 ql_mii_disable_scan_mode(qdev
);
779 if (ql_wait_for_mii_ready(qdev
)) {
780 if (netif_msg_link(qdev
))
781 printk(KERN_WARNING PFX
782 "%s: Timed out waiting for management port to "
783 "get free before issuing command.\n",
788 ql_write_page0_reg(qdev
, &port_regs
->macMIIMgmtAddrReg
,
789 qdev
->PHYAddr
| regAddr
);
791 ql_write_page0_reg(qdev
, &port_regs
->macMIIMgmtControlReg
,
792 (MAC_MII_CONTROL_RC
<< 16));
794 ql_write_page0_reg(qdev
, &port_regs
->macMIIMgmtControlReg
,
795 (MAC_MII_CONTROL_RC
<< 16) | MAC_MII_CONTROL_RC
);
797 /* Wait for the read to complete */
798 if (ql_wait_for_mii_ready(qdev
)) {
799 if (netif_msg_link(qdev
))
800 printk(KERN_WARNING PFX
801 "%s: Timed out waiting for management port to "
802 "get free before issuing command.\n",
807 temp
= ql_read_page0_reg(qdev
, &port_regs
->macMIIMgmtDataReg
);
810 ql_mii_enable_scan_mode(qdev
);
815 static void ql_petbi_reset(struct ql3_adapter
*qdev
)
817 ql_mii_write_reg(qdev
, PETBI_CONTROL_REG
, PETBI_CTRL_SOFT_RESET
);
820 static void ql_petbi_start_neg(struct ql3_adapter
*qdev
)
824 /* Enable Auto-negotiation sense */
825 ql_mii_read_reg(qdev
, PETBI_TBI_CTRL
, ®
);
826 reg
|= PETBI_TBI_AUTO_SENSE
;
827 ql_mii_write_reg(qdev
, PETBI_TBI_CTRL
, reg
);
829 ql_mii_write_reg(qdev
, PETBI_NEG_ADVER
,
830 PETBI_NEG_PAUSE
| PETBI_NEG_DUPLEX
);
832 ql_mii_write_reg(qdev
, PETBI_CONTROL_REG
,
833 PETBI_CTRL_AUTO_NEG
| PETBI_CTRL_RESTART_NEG
|
834 PETBI_CTRL_FULL_DUPLEX
| PETBI_CTRL_SPEED_1000
);
838 static void ql_petbi_reset_ex(struct ql3_adapter
*qdev
, u32 mac_index
)
840 ql_mii_write_reg_ex(qdev
, PETBI_CONTROL_REG
, PETBI_CTRL_SOFT_RESET
,
844 static void ql_petbi_start_neg_ex(struct ql3_adapter
*qdev
, u32 mac_index
)
848 /* Enable Auto-negotiation sense */
849 ql_mii_read_reg_ex(qdev
, PETBI_TBI_CTRL
, ®
, mac_index
);
850 reg
|= PETBI_TBI_AUTO_SENSE
;
851 ql_mii_write_reg_ex(qdev
, PETBI_TBI_CTRL
, reg
, mac_index
);
853 ql_mii_write_reg_ex(qdev
, PETBI_NEG_ADVER
,
854 PETBI_NEG_PAUSE
| PETBI_NEG_DUPLEX
, mac_index
);
856 ql_mii_write_reg_ex(qdev
, PETBI_CONTROL_REG
,
857 PETBI_CTRL_AUTO_NEG
| PETBI_CTRL_RESTART_NEG
|
858 PETBI_CTRL_FULL_DUPLEX
| PETBI_CTRL_SPEED_1000
,
862 static void ql_petbi_init(struct ql3_adapter
*qdev
)
864 ql_petbi_reset(qdev
);
865 ql_petbi_start_neg(qdev
);
868 static void ql_petbi_init_ex(struct ql3_adapter
*qdev
, u32 mac_index
)
870 ql_petbi_reset_ex(qdev
, mac_index
);
871 ql_petbi_start_neg_ex(qdev
, mac_index
);
874 static int ql_is_petbi_neg_pause(struct ql3_adapter
*qdev
)
878 if (ql_mii_read_reg(qdev
, PETBI_NEG_PARTNER
, ®
) < 0)
881 return (reg
& PETBI_NEG_PAUSE_MASK
) == PETBI_NEG_PAUSE
;
884 static int ql_phy_get_speed(struct ql3_adapter
*qdev
)
888 if (ql_mii_read_reg(qdev
, AUX_CONTROL_STATUS
, ®
) < 0)
891 reg
= (((reg
& 0x18) >> 3) & 3);
903 static int ql_is_full_dup(struct ql3_adapter
*qdev
)
907 if (ql_mii_read_reg(qdev
, AUX_CONTROL_STATUS
, ®
) < 0)
910 return (reg
& PHY_AUX_DUPLEX_STAT
) != 0;
913 static int ql_is_phy_neg_pause(struct ql3_adapter
*qdev
)
917 if (ql_mii_read_reg(qdev
, PHY_NEG_PARTNER
, ®
) < 0)
920 return (reg
& PHY_NEG_PAUSE
) != 0;
924 * Caller holds hw_lock.
926 static void ql_mac_enable(struct ql3_adapter
*qdev
, u32 enable
)
928 struct ql3xxx_port_registers __iomem
*port_regs
=
929 qdev
->mem_map_registers
;
933 value
= (MAC_CONFIG_REG_PE
| (MAC_CONFIG_REG_PE
<< 16));
935 value
= (MAC_CONFIG_REG_PE
<< 16);
938 ql_write_page0_reg(qdev
, &port_regs
->mac1ConfigReg
, value
);
940 ql_write_page0_reg(qdev
, &port_regs
->mac0ConfigReg
, value
);
944 * Caller holds hw_lock.
946 static void ql_mac_cfg_soft_reset(struct ql3_adapter
*qdev
, u32 enable
)
948 struct ql3xxx_port_registers __iomem
*port_regs
=
949 qdev
->mem_map_registers
;
953 value
= (MAC_CONFIG_REG_SR
| (MAC_CONFIG_REG_SR
<< 16));
955 value
= (MAC_CONFIG_REG_SR
<< 16);
958 ql_write_page0_reg(qdev
, &port_regs
->mac1ConfigReg
, value
);
960 ql_write_page0_reg(qdev
, &port_regs
->mac0ConfigReg
, value
);
964 * Caller holds hw_lock.
966 static void ql_mac_cfg_gig(struct ql3_adapter
*qdev
, u32 enable
)
968 struct ql3xxx_port_registers __iomem
*port_regs
=
969 qdev
->mem_map_registers
;
973 value
= (MAC_CONFIG_REG_GM
| (MAC_CONFIG_REG_GM
<< 16));
975 value
= (MAC_CONFIG_REG_GM
<< 16);
978 ql_write_page0_reg(qdev
, &port_regs
->mac1ConfigReg
, value
);
980 ql_write_page0_reg(qdev
, &port_regs
->mac0ConfigReg
, value
);
984 * Caller holds hw_lock.
986 static void ql_mac_cfg_full_dup(struct ql3_adapter
*qdev
, u32 enable
)
988 struct ql3xxx_port_registers __iomem
*port_regs
=
989 qdev
->mem_map_registers
;
993 value
= (MAC_CONFIG_REG_FD
| (MAC_CONFIG_REG_FD
<< 16));
995 value
= (MAC_CONFIG_REG_FD
<< 16);
998 ql_write_page0_reg(qdev
, &port_regs
->mac1ConfigReg
, value
);
1000 ql_write_page0_reg(qdev
, &port_regs
->mac0ConfigReg
, value
);
1004 * Caller holds hw_lock.
1006 static void ql_mac_cfg_pause(struct ql3_adapter
*qdev
, u32 enable
)
1008 struct ql3xxx_port_registers __iomem
*port_regs
=
1009 qdev
->mem_map_registers
;
1014 ((MAC_CONFIG_REG_TF
| MAC_CONFIG_REG_RF
) |
1015 ((MAC_CONFIG_REG_TF
| MAC_CONFIG_REG_RF
) << 16));
1017 value
= ((MAC_CONFIG_REG_TF
| MAC_CONFIG_REG_RF
) << 16);
1019 if (qdev
->mac_index
)
1020 ql_write_page0_reg(qdev
, &port_regs
->mac1ConfigReg
, value
);
1022 ql_write_page0_reg(qdev
, &port_regs
->mac0ConfigReg
, value
);
1026 * Caller holds hw_lock.
1028 static int ql_is_fiber(struct ql3_adapter
*qdev
)
1030 struct ql3xxx_port_registers __iomem
*port_regs
=
1031 qdev
->mem_map_registers
;
1035 switch (qdev
->mac_index
) {
1037 bitToCheck
= PORT_STATUS_SM0
;
1040 bitToCheck
= PORT_STATUS_SM1
;
1044 temp
= ql_read_page0_reg(qdev
, &port_regs
->portStatus
);
1045 return (temp
& bitToCheck
) != 0;
1048 static int ql_is_auto_cfg(struct ql3_adapter
*qdev
)
1051 ql_mii_read_reg(qdev
, 0x00, ®
);
1052 return (reg
& 0x1000) != 0;
1056 * Caller holds hw_lock.
1058 static int ql_is_auto_neg_complete(struct ql3_adapter
*qdev
)
1060 struct ql3xxx_port_registers __iomem
*port_regs
=
1061 qdev
->mem_map_registers
;
1065 switch (qdev
->mac_index
) {
1067 bitToCheck
= PORT_STATUS_AC0
;
1070 bitToCheck
= PORT_STATUS_AC1
;
1074 temp
= ql_read_page0_reg(qdev
, &port_regs
->portStatus
);
1075 if (temp
& bitToCheck
) {
1076 if (netif_msg_link(qdev
))
1077 printk(KERN_INFO PFX
1078 "%s: Auto-Negotiate complete.\n",
1082 if (netif_msg_link(qdev
))
1083 printk(KERN_WARNING PFX
1084 "%s: Auto-Negotiate incomplete.\n",
1091 * ql_is_neg_pause() returns 1 if pause was negotiated to be on
1093 static int ql_is_neg_pause(struct ql3_adapter
*qdev
)
1095 if (ql_is_fiber(qdev
))
1096 return ql_is_petbi_neg_pause(qdev
);
1098 return ql_is_phy_neg_pause(qdev
);
1101 static int ql_auto_neg_error(struct ql3_adapter
*qdev
)
1103 struct ql3xxx_port_registers __iomem
*port_regs
=
1104 qdev
->mem_map_registers
;
1108 switch (qdev
->mac_index
) {
1110 bitToCheck
= PORT_STATUS_AE0
;
1113 bitToCheck
= PORT_STATUS_AE1
;
1116 temp
= ql_read_page0_reg(qdev
, &port_regs
->portStatus
);
1117 return (temp
& bitToCheck
) != 0;
1120 static u32
ql_get_link_speed(struct ql3_adapter
*qdev
)
1122 if (ql_is_fiber(qdev
))
1125 return ql_phy_get_speed(qdev
);
1128 static int ql_is_link_full_dup(struct ql3_adapter
*qdev
)
1130 if (ql_is_fiber(qdev
))
1133 return ql_is_full_dup(qdev
);
1137 * Caller holds hw_lock.
1139 static int ql_link_down_detect(struct ql3_adapter
*qdev
)
1141 struct ql3xxx_port_registers __iomem
*port_regs
=
1142 qdev
->mem_map_registers
;
1146 switch (qdev
->mac_index
) {
1148 bitToCheck
= ISP_CONTROL_LINK_DN_0
;
1151 bitToCheck
= ISP_CONTROL_LINK_DN_1
;
1156 ql_read_common_reg(qdev
, &port_regs
->CommonRegs
.ispControlStatus
);
1157 return (temp
& bitToCheck
) != 0;
1161 * Caller holds hw_lock.
1163 static int ql_link_down_detect_clear(struct ql3_adapter
*qdev
)
1165 struct ql3xxx_port_registers __iomem
*port_regs
=
1166 qdev
->mem_map_registers
;
1168 switch (qdev
->mac_index
) {
1170 ql_write_common_reg(qdev
,
1171 &port_regs
->CommonRegs
.ispControlStatus
,
1172 (ISP_CONTROL_LINK_DN_0
) |
1173 (ISP_CONTROL_LINK_DN_0
<< 16));
1177 ql_write_common_reg(qdev
,
1178 &port_regs
->CommonRegs
.ispControlStatus
,
1179 (ISP_CONTROL_LINK_DN_1
) |
1180 (ISP_CONTROL_LINK_DN_1
<< 16));
1191 * Caller holds hw_lock.
1193 static int ql_this_adapter_controls_port(struct ql3_adapter
*qdev
,
1196 struct ql3xxx_port_registers __iomem
*port_regs
=
1197 qdev
->mem_map_registers
;
1201 switch (mac_index
) {
1203 bitToCheck
= PORT_STATUS_F1_ENABLED
;
1206 bitToCheck
= PORT_STATUS_F3_ENABLED
;
1212 temp
= ql_read_page0_reg(qdev
, &port_regs
->portStatus
);
1213 if (temp
& bitToCheck
) {
1214 if (netif_msg_link(qdev
))
1215 printk(KERN_DEBUG PFX
1216 "%s: is not link master.\n", qdev
->ndev
->name
);
1219 if (netif_msg_link(qdev
))
1220 printk(KERN_DEBUG PFX
1221 "%s: is link master.\n", qdev
->ndev
->name
);
1226 static void ql_phy_reset_ex(struct ql3_adapter
*qdev
, u32 mac_index
)
1228 ql_mii_write_reg_ex(qdev
, CONTROL_REG
, PHY_CTRL_SOFT_RESET
, mac_index
);
1231 static void ql_phy_start_neg_ex(struct ql3_adapter
*qdev
, u32 mac_index
)
1235 ql_mii_write_reg_ex(qdev
, PHY_NEG_ADVER
,
1236 PHY_NEG_PAUSE
| PHY_NEG_ADV_SPEED
| 1, mac_index
);
1238 ql_mii_read_reg_ex(qdev
, CONTROL_REG
, ®
, mac_index
);
1239 ql_mii_write_reg_ex(qdev
, CONTROL_REG
, reg
| PHY_CTRL_RESTART_NEG
,
1243 static void ql_phy_init_ex(struct ql3_adapter
*qdev
, u32 mac_index
)
1245 ql_phy_reset_ex(qdev
, mac_index
);
1246 ql_phy_start_neg_ex(qdev
, mac_index
);
1250 * Caller holds hw_lock.
1252 static u32
ql_get_link_state(struct ql3_adapter
*qdev
)
1254 struct ql3xxx_port_registers __iomem
*port_regs
=
1255 qdev
->mem_map_registers
;
1257 u32 temp
, linkState
;
1259 switch (qdev
->mac_index
) {
1261 bitToCheck
= PORT_STATUS_UP0
;
1264 bitToCheck
= PORT_STATUS_UP1
;
1267 temp
= ql_read_page0_reg(qdev
, &port_regs
->portStatus
);
1268 if (temp
& bitToCheck
) {
1271 linkState
= LS_DOWN
;
1272 if (netif_msg_link(qdev
))
1273 printk(KERN_WARNING PFX
1274 "%s: Link is down.\n", qdev
->ndev
->name
);
1279 static int ql_port_start(struct ql3_adapter
*qdev
)
1281 if(ql_sem_spinlock(qdev
, QL_PHY_GIO_SEM_MASK
,
1282 (QL_RESOURCE_BITS_BASE_CODE
| (qdev
->mac_index
) *
1286 if (ql_is_fiber(qdev
)) {
1287 ql_petbi_init(qdev
);
1290 ql_phy_init_ex(qdev
, qdev
->mac_index
);
1293 ql_sem_unlock(qdev
, QL_PHY_GIO_SEM_MASK
);
1297 static int ql_finish_auto_neg(struct ql3_adapter
*qdev
)
1300 if(ql_sem_spinlock(qdev
, QL_PHY_GIO_SEM_MASK
,
1301 (QL_RESOURCE_BITS_BASE_CODE
| (qdev
->mac_index
) *
1305 if (!ql_auto_neg_error(qdev
)) {
1306 if (test_bit(QL_LINK_MASTER
,&qdev
->flags
)) {
1307 /* configure the MAC */
1308 if (netif_msg_link(qdev
))
1309 printk(KERN_DEBUG PFX
1310 "%s: Configuring link.\n",
1313 ql_mac_cfg_soft_reset(qdev
, 1);
1314 ql_mac_cfg_gig(qdev
,
1318 ql_mac_cfg_full_dup(qdev
,
1321 ql_mac_cfg_pause(qdev
,
1324 ql_mac_cfg_soft_reset(qdev
, 0);
1326 /* enable the MAC */
1327 if (netif_msg_link(qdev
))
1328 printk(KERN_DEBUG PFX
1329 "%s: Enabling mac.\n",
1332 ql_mac_enable(qdev
, 1);
1335 if (netif_msg_link(qdev
))
1336 printk(KERN_DEBUG PFX
1337 "%s: Change port_link_state LS_DOWN to LS_UP.\n",
1339 qdev
->port_link_state
= LS_UP
;
1340 netif_start_queue(qdev
->ndev
);
1341 netif_carrier_on(qdev
->ndev
);
1342 if (netif_msg_link(qdev
))
1343 printk(KERN_INFO PFX
1344 "%s: Link is up at %d Mbps, %s duplex.\n",
1346 ql_get_link_speed(qdev
),
1347 ql_is_link_full_dup(qdev
)
1350 } else { /* Remote error detected */
1352 if (test_bit(QL_LINK_MASTER
,&qdev
->flags
)) {
1353 if (netif_msg_link(qdev
))
1354 printk(KERN_DEBUG PFX
1355 "%s: Remote error detected. "
1356 "Calling ql_port_start().\n",
1360 * ql_port_start() is shared code and needs
1361 * to lock the PHY on it's own.
1363 ql_sem_unlock(qdev
, QL_PHY_GIO_SEM_MASK
);
1364 if(ql_port_start(qdev
)) {/* Restart port */
1370 ql_sem_unlock(qdev
, QL_PHY_GIO_SEM_MASK
);
1374 static void ql_link_state_machine(struct ql3_adapter
*qdev
)
1376 u32 curr_link_state
;
1377 unsigned long hw_flags
;
1379 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
1381 curr_link_state
= ql_get_link_state(qdev
);
1383 if (test_bit(QL_RESET_ACTIVE
,&qdev
->flags
)) {
1384 if (netif_msg_link(qdev
))
1385 printk(KERN_INFO PFX
1386 "%s: Reset in progress, skip processing link "
1387 "state.\n", qdev
->ndev
->name
);
1391 switch (qdev
->port_link_state
) {
1393 if (test_bit(QL_LINK_MASTER
,&qdev
->flags
)) {
1394 ql_port_start(qdev
);
1396 qdev
->port_link_state
= LS_DOWN
;
1400 if (netif_msg_link(qdev
))
1401 printk(KERN_DEBUG PFX
1402 "%s: port_link_state = LS_DOWN.\n",
1404 if (curr_link_state
== LS_UP
) {
1405 if (netif_msg_link(qdev
))
1406 printk(KERN_DEBUG PFX
1407 "%s: curr_link_state = LS_UP.\n",
1409 if (ql_is_auto_neg_complete(qdev
))
1410 ql_finish_auto_neg(qdev
);
1412 if (qdev
->port_link_state
== LS_UP
)
1413 ql_link_down_detect_clear(qdev
);
1420 * See if the link is currently down or went down and came
1423 if ((curr_link_state
== LS_DOWN
) || ql_link_down_detect(qdev
)) {
1424 if (netif_msg_link(qdev
))
1425 printk(KERN_INFO PFX
"%s: Link is down.\n",
1427 qdev
->port_link_state
= LS_DOWN
;
1431 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
1435 * Caller must take hw_lock and QL_PHY_GIO_SEM.
1437 static void ql_get_phy_owner(struct ql3_adapter
*qdev
)
1439 if (ql_this_adapter_controls_port(qdev
, qdev
->mac_index
))
1440 set_bit(QL_LINK_MASTER
,&qdev
->flags
);
1442 clear_bit(QL_LINK_MASTER
,&qdev
->flags
);
1446 * Caller must take hw_lock and QL_PHY_GIO_SEM.
1448 static void ql_init_scan_mode(struct ql3_adapter
*qdev
)
1450 ql_mii_enable_scan_mode(qdev
);
1452 if (test_bit(QL_LINK_OPTICAL
,&qdev
->flags
)) {
1453 if (ql_this_adapter_controls_port(qdev
, qdev
->mac_index
))
1454 ql_petbi_init_ex(qdev
, qdev
->mac_index
);
1456 if (ql_this_adapter_controls_port(qdev
, qdev
->mac_index
))
1457 ql_phy_init_ex(qdev
, qdev
->mac_index
);
1462 * MII_Setup needs to be called before taking the PHY out of reset so that the
1463 * management interface clock speed can be set properly. It would be better if
1464 * we had a way to disable MDC until after the PHY is out of reset, but we
1465 * don't have that capability.
1467 static int ql_mii_setup(struct ql3_adapter
*qdev
)
1470 struct ql3xxx_port_registers __iomem
*port_regs
=
1471 qdev
->mem_map_registers
;
1473 if(ql_sem_spinlock(qdev
, QL_PHY_GIO_SEM_MASK
,
1474 (QL_RESOURCE_BITS_BASE_CODE
| (qdev
->mac_index
) *
1478 /* Divide 125MHz clock by 28 to meet PHY timing requirements */
1479 reg
= MAC_MII_CONTROL_CLK_SEL_DIV28
;
1481 ql_write_page0_reg(qdev
, &port_regs
->macMIIMgmtControlReg
,
1482 reg
| ((MAC_MII_CONTROL_CLK_SEL_MASK
) << 16));
1484 ql_sem_unlock(qdev
, QL_PHY_GIO_SEM_MASK
);
1488 static u32
ql_supported_modes(struct ql3_adapter
*qdev
)
1492 if (test_bit(QL_LINK_OPTICAL
,&qdev
->flags
)) {
1493 supported
= SUPPORTED_1000baseT_Full
| SUPPORTED_FIBRE
1494 | SUPPORTED_Autoneg
;
1496 supported
= SUPPORTED_10baseT_Half
1497 | SUPPORTED_10baseT_Full
1498 | SUPPORTED_100baseT_Half
1499 | SUPPORTED_100baseT_Full
1500 | SUPPORTED_1000baseT_Half
1501 | SUPPORTED_1000baseT_Full
1502 | SUPPORTED_Autoneg
| SUPPORTED_TP
;
1508 static int ql_get_auto_cfg_status(struct ql3_adapter
*qdev
)
1511 unsigned long hw_flags
;
1512 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
1513 if(ql_sem_spinlock(qdev
, QL_PHY_GIO_SEM_MASK
,
1514 (QL_RESOURCE_BITS_BASE_CODE
| (qdev
->mac_index
) *
1517 status
= ql_is_auto_cfg(qdev
);
1518 ql_sem_unlock(qdev
, QL_PHY_GIO_SEM_MASK
);
1519 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
1523 static u32
ql_get_speed(struct ql3_adapter
*qdev
)
1526 unsigned long hw_flags
;
1527 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
1528 if(ql_sem_spinlock(qdev
, QL_PHY_GIO_SEM_MASK
,
1529 (QL_RESOURCE_BITS_BASE_CODE
| (qdev
->mac_index
) *
1532 status
= ql_get_link_speed(qdev
);
1533 ql_sem_unlock(qdev
, QL_PHY_GIO_SEM_MASK
);
1534 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
1538 static int ql_get_full_dup(struct ql3_adapter
*qdev
)
1541 unsigned long hw_flags
;
1542 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
1543 if(ql_sem_spinlock(qdev
, QL_PHY_GIO_SEM_MASK
,
1544 (QL_RESOURCE_BITS_BASE_CODE
| (qdev
->mac_index
) *
1547 status
= ql_is_link_full_dup(qdev
);
1548 ql_sem_unlock(qdev
, QL_PHY_GIO_SEM_MASK
);
1549 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
1554 static int ql_get_settings(struct net_device
*ndev
, struct ethtool_cmd
*ecmd
)
1556 struct ql3_adapter
*qdev
= netdev_priv(ndev
);
1558 ecmd
->transceiver
= XCVR_INTERNAL
;
1559 ecmd
->supported
= ql_supported_modes(qdev
);
1561 if (test_bit(QL_LINK_OPTICAL
,&qdev
->flags
)) {
1562 ecmd
->port
= PORT_FIBRE
;
1564 ecmd
->port
= PORT_TP
;
1565 ecmd
->phy_address
= qdev
->PHYAddr
;
1567 ecmd
->advertising
= ql_supported_modes(qdev
);
1568 ecmd
->autoneg
= ql_get_auto_cfg_status(qdev
);
1569 ecmd
->speed
= ql_get_speed(qdev
);
1570 ecmd
->duplex
= ql_get_full_dup(qdev
);
1574 static void ql_get_drvinfo(struct net_device
*ndev
,
1575 struct ethtool_drvinfo
*drvinfo
)
1577 struct ql3_adapter
*qdev
= netdev_priv(ndev
);
1578 strncpy(drvinfo
->driver
, ql3xxx_driver_name
, 32);
1579 strncpy(drvinfo
->version
, ql3xxx_driver_version
, 32);
1580 strncpy(drvinfo
->fw_version
, "N/A", 32);
1581 strncpy(drvinfo
->bus_info
, pci_name(qdev
->pdev
), 32);
1582 drvinfo
->n_stats
= 0;
1583 drvinfo
->testinfo_len
= 0;
1584 drvinfo
->regdump_len
= 0;
1585 drvinfo
->eedump_len
= 0;
1588 static u32
ql_get_msglevel(struct net_device
*ndev
)
1590 struct ql3_adapter
*qdev
= netdev_priv(ndev
);
1591 return qdev
->msg_enable
;
1594 static void ql_set_msglevel(struct net_device
*ndev
, u32 value
)
1596 struct ql3_adapter
*qdev
= netdev_priv(ndev
);
1597 qdev
->msg_enable
= value
;
1600 static const struct ethtool_ops ql3xxx_ethtool_ops
= {
1601 .get_settings
= ql_get_settings
,
1602 .get_drvinfo
= ql_get_drvinfo
,
1603 .get_perm_addr
= ethtool_op_get_perm_addr
,
1604 .get_link
= ethtool_op_get_link
,
1605 .get_msglevel
= ql_get_msglevel
,
1606 .set_msglevel
= ql_set_msglevel
,
1609 static int ql_populate_free_queue(struct ql3_adapter
*qdev
)
1611 struct ql_rcv_buf_cb
*lrg_buf_cb
= qdev
->lrg_buf_free_head
;
1614 while (lrg_buf_cb
) {
1615 if (!lrg_buf_cb
->skb
) {
1616 lrg_buf_cb
->skb
= dev_alloc_skb(qdev
->lrg_buffer_len
);
1617 if (unlikely(!lrg_buf_cb
->skb
)) {
1618 printk(KERN_DEBUG PFX
1619 "%s: Failed dev_alloc_skb().\n",
1624 * We save some space to copy the ethhdr from
1627 skb_reserve(lrg_buf_cb
->skb
, QL_HEADER_SPACE
);
1628 map
= pci_map_single(qdev
->pdev
,
1629 lrg_buf_cb
->skb
->data
,
1630 qdev
->lrg_buffer_len
-
1632 PCI_DMA_FROMDEVICE
);
1633 lrg_buf_cb
->buf_phy_addr_low
=
1634 cpu_to_le32(LS_64BITS(map
));
1635 lrg_buf_cb
->buf_phy_addr_high
=
1636 cpu_to_le32(MS_64BITS(map
));
1637 pci_unmap_addr_set(lrg_buf_cb
, mapaddr
, map
);
1638 pci_unmap_len_set(lrg_buf_cb
, maplen
,
1639 qdev
->lrg_buffer_len
-
1641 --qdev
->lrg_buf_skb_check
;
1642 if (!qdev
->lrg_buf_skb_check
)
1646 lrg_buf_cb
= lrg_buf_cb
->next
;
1652 * Caller holds hw_lock.
1654 static void ql_update_lrg_bufq_prod_index(struct ql3_adapter
*qdev
)
1656 struct bufq_addr_element
*lrg_buf_q_ele
;
1658 struct ql_rcv_buf_cb
*lrg_buf_cb
;
1659 struct ql3xxx_port_registers __iomem
*port_regs
= qdev
->mem_map_registers
;
1661 if ((qdev
->lrg_buf_free_count
>= 8)
1662 && (qdev
->lrg_buf_release_cnt
>= 16)) {
1664 if (qdev
->lrg_buf_skb_check
)
1665 if (!ql_populate_free_queue(qdev
))
1668 lrg_buf_q_ele
= qdev
->lrg_buf_next_free
;
1670 while ((qdev
->lrg_buf_release_cnt
>= 16)
1671 && (qdev
->lrg_buf_free_count
>= 8)) {
1673 for (i
= 0; i
< 8; i
++) {
1675 ql_get_from_lrg_buf_free_list(qdev
);
1676 lrg_buf_q_ele
->addr_high
=
1677 lrg_buf_cb
->buf_phy_addr_high
;
1678 lrg_buf_q_ele
->addr_low
=
1679 lrg_buf_cb
->buf_phy_addr_low
;
1682 qdev
->lrg_buf_release_cnt
--;
1685 qdev
->lrg_buf_q_producer_index
++;
1687 if (qdev
->lrg_buf_q_producer_index
== NUM_LBUFQ_ENTRIES
)
1688 qdev
->lrg_buf_q_producer_index
= 0;
1690 if (qdev
->lrg_buf_q_producer_index
==
1691 (NUM_LBUFQ_ENTRIES
- 1)) {
1692 lrg_buf_q_ele
= qdev
->lrg_buf_q_virt_addr
;
1696 qdev
->lrg_buf_next_free
= lrg_buf_q_ele
;
1698 ql_write_common_reg(qdev
,
1699 &port_regs
->CommonRegs
.
1700 rxLargeQProducerIndex
,
1701 qdev
->lrg_buf_q_producer_index
);
1705 static void ql_process_mac_tx_intr(struct ql3_adapter
*qdev
,
1706 struct ob_mac_iocb_rsp
*mac_rsp
)
1708 struct ql_tx_buf_cb
*tx_cb
;
1710 tx_cb
= &qdev
->tx_buf
[mac_rsp
->transaction_id
];
1711 pci_unmap_single(qdev
->pdev
,
1712 pci_unmap_addr(tx_cb
, mapaddr
),
1713 pci_unmap_len(tx_cb
, maplen
), PCI_DMA_TODEVICE
);
1714 dev_kfree_skb_irq(tx_cb
->skb
);
1715 qdev
->stats
.tx_packets
++;
1716 qdev
->stats
.tx_bytes
+= tx_cb
->skb
->len
;
1718 atomic_inc(&qdev
->tx_count
);
1721 static void ql_process_mac_rx_intr(struct ql3_adapter
*qdev
,
1722 struct ib_mac_iocb_rsp
*ib_mac_rsp_ptr
)
1725 u32 lrg_buf_phy_addr_low
= 0;
1726 struct ql_rcv_buf_cb
*lrg_buf_cb1
= NULL
;
1727 struct ql_rcv_buf_cb
*lrg_buf_cb2
= NULL
;
1729 struct sk_buff
*skb
;
1730 u16 length
= le16_to_cpu(ib_mac_rsp_ptr
->length
);
1733 * Get the inbound address list (small buffer).
1735 offset
= qdev
->small_buf_index
* QL_SMALL_BUFFER_SIZE
;
1736 if (++qdev
->small_buf_index
== NUM_SMALL_BUFFERS
)
1737 qdev
->small_buf_index
= 0;
1739 curr_ial_ptr
= (u32
*) (qdev
->small_buf_virt_addr
+ offset
);
1740 qdev
->last_rsp_offset
= qdev
->small_buf_phy_addr_low
+ offset
;
1741 qdev
->small_buf_release_cnt
++;
1743 /* start of first buffer */
1744 lrg_buf_phy_addr_low
= le32_to_cpu(*curr_ial_ptr
);
1745 lrg_buf_cb1
= &qdev
->lrg_buf
[qdev
->lrg_buf_index
];
1746 qdev
->lrg_buf_release_cnt
++;
1747 if (++qdev
->lrg_buf_index
== NUM_LARGE_BUFFERS
)
1748 qdev
->lrg_buf_index
= 0;
1749 curr_ial_ptr
++; /* 64-bit pointers require two incs. */
1752 /* start of second buffer */
1753 lrg_buf_phy_addr_low
= le32_to_cpu(*curr_ial_ptr
);
1754 lrg_buf_cb2
= &qdev
->lrg_buf
[qdev
->lrg_buf_index
];
1757 * Second buffer gets sent up the stack.
1759 qdev
->lrg_buf_release_cnt
++;
1760 if (++qdev
->lrg_buf_index
== NUM_LARGE_BUFFERS
)
1761 qdev
->lrg_buf_index
= 0;
1762 skb
= lrg_buf_cb2
->skb
;
1764 qdev
->stats
.rx_packets
++;
1765 qdev
->stats
.rx_bytes
+= length
;
1767 skb_put(skb
, length
);
1768 pci_unmap_single(qdev
->pdev
,
1769 pci_unmap_addr(lrg_buf_cb2
, mapaddr
),
1770 pci_unmap_len(lrg_buf_cb2
, maplen
),
1771 PCI_DMA_FROMDEVICE
);
1772 prefetch(skb
->data
);
1773 skb
->dev
= qdev
->ndev
;
1774 skb
->ip_summed
= CHECKSUM_NONE
;
1775 skb
->protocol
= eth_type_trans(skb
, qdev
->ndev
);
1777 netif_receive_skb(skb
);
1778 qdev
->ndev
->last_rx
= jiffies
;
1779 lrg_buf_cb2
->skb
= NULL
;
1781 ql_release_to_lrg_buf_free_list(qdev
, lrg_buf_cb1
);
1782 ql_release_to_lrg_buf_free_list(qdev
, lrg_buf_cb2
);
1785 static void ql_process_macip_rx_intr(struct ql3_adapter
*qdev
,
1786 struct ib_ip_iocb_rsp
*ib_ip_rsp_ptr
)
1789 u32 lrg_buf_phy_addr_low
= 0;
1790 struct ql_rcv_buf_cb
*lrg_buf_cb1
= NULL
;
1791 struct ql_rcv_buf_cb
*lrg_buf_cb2
= NULL
;
1793 struct sk_buff
*skb1
, *skb2
;
1794 struct net_device
*ndev
= qdev
->ndev
;
1795 u16 length
= le16_to_cpu(ib_ip_rsp_ptr
->length
);
1799 * Get the inbound address list (small buffer).
1802 offset
= qdev
->small_buf_index
* QL_SMALL_BUFFER_SIZE
;
1803 if (++qdev
->small_buf_index
== NUM_SMALL_BUFFERS
)
1804 qdev
->small_buf_index
= 0;
1805 curr_ial_ptr
= (u32
*) (qdev
->small_buf_virt_addr
+ offset
);
1806 qdev
->last_rsp_offset
= qdev
->small_buf_phy_addr_low
+ offset
;
1807 qdev
->small_buf_release_cnt
++;
1809 /* start of first buffer */
1810 lrg_buf_phy_addr_low
= le32_to_cpu(*curr_ial_ptr
);
1811 lrg_buf_cb1
= &qdev
->lrg_buf
[qdev
->lrg_buf_index
];
1813 qdev
->lrg_buf_release_cnt
++;
1814 if (++qdev
->lrg_buf_index
== NUM_LARGE_BUFFERS
)
1815 qdev
->lrg_buf_index
= 0;
1816 skb1
= lrg_buf_cb1
->skb
;
1817 curr_ial_ptr
++; /* 64-bit pointers require two incs. */
1820 /* start of second buffer */
1821 lrg_buf_phy_addr_low
= le32_to_cpu(*curr_ial_ptr
);
1822 lrg_buf_cb2
= &qdev
->lrg_buf
[qdev
->lrg_buf_index
];
1823 skb2
= lrg_buf_cb2
->skb
;
1824 qdev
->lrg_buf_release_cnt
++;
1825 if (++qdev
->lrg_buf_index
== NUM_LARGE_BUFFERS
)
1826 qdev
->lrg_buf_index
= 0;
1828 qdev
->stats
.rx_packets
++;
1829 qdev
->stats
.rx_bytes
+= length
;
1832 * Copy the ethhdr from first buffer to second. This
1833 * is necessary for IP completions.
1835 if (*((u16
*) skb1
->data
) != 0xFFFF)
1836 size
= VLAN_ETH_HLEN
;
1840 skb_put(skb2
, length
); /* Just the second buffer length here. */
1841 pci_unmap_single(qdev
->pdev
,
1842 pci_unmap_addr(lrg_buf_cb2
, mapaddr
),
1843 pci_unmap_len(lrg_buf_cb2
, maplen
),
1844 PCI_DMA_FROMDEVICE
);
1845 prefetch(skb2
->data
);
1847 memcpy(skb_push(skb2
, size
), skb1
->data
+ VLAN_ID_LEN
, size
);
1848 skb2
->dev
= qdev
->ndev
;
1849 skb2
->ip_summed
= CHECKSUM_NONE
;
1850 skb2
->protocol
= eth_type_trans(skb2
, qdev
->ndev
);
1852 netif_receive_skb(skb2
);
1853 ndev
->last_rx
= jiffies
;
1854 lrg_buf_cb2
->skb
= NULL
;
1856 ql_release_to_lrg_buf_free_list(qdev
, lrg_buf_cb1
);
1857 ql_release_to_lrg_buf_free_list(qdev
, lrg_buf_cb2
);
1860 static int ql_tx_rx_clean(struct ql3_adapter
*qdev
,
1861 int *tx_cleaned
, int *rx_cleaned
, int work_to_do
)
1863 struct ql3xxx_port_registers __iomem
*port_regs
= qdev
->mem_map_registers
;
1864 struct net_rsp_iocb
*net_rsp
;
1865 struct net_device
*ndev
= qdev
->ndev
;
1866 unsigned long hw_flags
;
1868 /* While there are entries in the completion queue. */
1869 while ((cpu_to_le32(*(qdev
->prsp_producer_index
)) !=
1870 qdev
->rsp_consumer_index
) && (*rx_cleaned
< work_to_do
)) {
1872 net_rsp
= qdev
->rsp_current
;
1873 switch (net_rsp
->opcode
) {
1875 case OPCODE_OB_MAC_IOCB_FN0
:
1876 case OPCODE_OB_MAC_IOCB_FN2
:
1877 ql_process_mac_tx_intr(qdev
, (struct ob_mac_iocb_rsp
*)
1882 case OPCODE_IB_MAC_IOCB
:
1883 ql_process_mac_rx_intr(qdev
, (struct ib_mac_iocb_rsp
*)
1888 case OPCODE_IB_IP_IOCB
:
1889 ql_process_macip_rx_intr(qdev
, (struct ib_ip_iocb_rsp
*)
1895 u32
*tmp
= (u32
*) net_rsp
;
1897 "%s: Hit default case, not "
1899 " dropping the packet, opcode = "
1901 ndev
->name
, net_rsp
->opcode
);
1903 "0x%08lx 0x%08lx 0x%08lx 0x%08lx \n",
1904 (unsigned long int)tmp
[0],
1905 (unsigned long int)tmp
[1],
1906 (unsigned long int)tmp
[2],
1907 (unsigned long int)tmp
[3]);
1911 qdev
->rsp_consumer_index
++;
1913 if (qdev
->rsp_consumer_index
== NUM_RSP_Q_ENTRIES
) {
1914 qdev
->rsp_consumer_index
= 0;
1915 qdev
->rsp_current
= qdev
->rsp_q_virt_addr
;
1917 qdev
->rsp_current
++;
1921 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
1923 ql_update_lrg_bufq_prod_index(qdev
);
1925 if (qdev
->small_buf_release_cnt
>= 16) {
1926 while (qdev
->small_buf_release_cnt
>= 16) {
1927 qdev
->small_buf_q_producer_index
++;
1929 if (qdev
->small_buf_q_producer_index
==
1931 qdev
->small_buf_q_producer_index
= 0;
1932 qdev
->small_buf_release_cnt
-= 8;
1935 ql_write_common_reg(qdev
,
1936 &port_regs
->CommonRegs
.
1937 rxSmallQProducerIndex
,
1938 qdev
->small_buf_q_producer_index
);
1941 ql_write_common_reg(qdev
,
1942 &port_regs
->CommonRegs
.rspQConsumerIndex
,
1943 qdev
->rsp_consumer_index
);
1944 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
1946 if (unlikely(netif_queue_stopped(qdev
->ndev
))) {
1947 if (netif_queue_stopped(qdev
->ndev
) &&
1948 (atomic_read(&qdev
->tx_count
) > (NUM_REQ_Q_ENTRIES
/ 4)))
1949 netif_wake_queue(qdev
->ndev
);
1952 return *tx_cleaned
+ *rx_cleaned
;
1955 static int ql_poll(struct net_device
*ndev
, int *budget
)
1957 struct ql3_adapter
*qdev
= netdev_priv(ndev
);
1958 int work_to_do
= min(*budget
, ndev
->quota
);
1959 int rx_cleaned
= 0, tx_cleaned
= 0;
1961 if (!netif_carrier_ok(ndev
))
1964 ql_tx_rx_clean(qdev
, &tx_cleaned
, &rx_cleaned
, work_to_do
);
1965 *budget
-= rx_cleaned
;
1966 ndev
->quota
-= rx_cleaned
;
1968 if ((!tx_cleaned
&& !rx_cleaned
) || !netif_running(ndev
)) {
1970 netif_rx_complete(ndev
);
1971 ql_enable_interrupts(qdev
);
1977 static irqreturn_t
ql3xxx_isr(int irq
, void *dev_id
)
1980 struct net_device
*ndev
= dev_id
;
1981 struct ql3_adapter
*qdev
= netdev_priv(ndev
);
1982 struct ql3xxx_port_registers __iomem
*port_regs
= qdev
->mem_map_registers
;
1987 port_regs
= qdev
->mem_map_registers
;
1990 ql_read_common_reg_l(qdev
, &port_regs
->CommonRegs
.ispControlStatus
);
1992 if (value
& (ISP_CONTROL_FE
| ISP_CONTROL_RI
)) {
1993 spin_lock(&qdev
->adapter_lock
);
1994 netif_stop_queue(qdev
->ndev
);
1995 netif_carrier_off(qdev
->ndev
);
1996 ql_disable_interrupts(qdev
);
1997 qdev
->port_link_state
= LS_DOWN
;
1998 set_bit(QL_RESET_ACTIVE
,&qdev
->flags
) ;
2000 if (value
& ISP_CONTROL_FE
) {
2005 ql_read_page0_reg_l(qdev
,
2006 &port_regs
->PortFatalErrStatus
);
2007 printk(KERN_WARNING PFX
2008 "%s: Resetting chip. PortFatalErrStatus "
2009 "register = 0x%x\n", ndev
->name
, var
);
2010 set_bit(QL_RESET_START
,&qdev
->flags
) ;
2013 * Soft Reset Requested.
2015 set_bit(QL_RESET_PER_SCSI
,&qdev
->flags
) ;
2017 "%s: Another function issued a reset to the "
2018 "chip. ISR value = %x.\n", ndev
->name
, value
);
2020 queue_delayed_work(qdev
->workqueue
, &qdev
->reset_work
, 0);
2021 spin_unlock(&qdev
->adapter_lock
);
2022 } else if (value
& ISP_IMR_DISABLE_CMPL_INT
) {
2023 ql_disable_interrupts(qdev
);
2024 if (likely(netif_rx_schedule_prep(ndev
)))
2025 __netif_rx_schedule(ndev
);
2027 ql_enable_interrupts(qdev
);
2032 return IRQ_RETVAL(handled
);
2035 static int ql3xxx_send(struct sk_buff
*skb
, struct net_device
*ndev
)
2037 struct ql3_adapter
*qdev
= (struct ql3_adapter
*)netdev_priv(ndev
);
2038 struct ql3xxx_port_registers __iomem
*port_regs
= qdev
->mem_map_registers
;
2039 struct ql_tx_buf_cb
*tx_cb
;
2040 struct ob_mac_iocb_req
*mac_iocb_ptr
;
2043 if (unlikely(atomic_read(&qdev
->tx_count
) < 2)) {
2044 if (!netif_queue_stopped(ndev
))
2045 netif_stop_queue(ndev
);
2046 return NETDEV_TX_BUSY
;
2048 tx_cb
= &qdev
->tx_buf
[qdev
->req_producer_index
] ;
2049 mac_iocb_ptr
= tx_cb
->queue_entry
;
2050 memset((void *)mac_iocb_ptr
, 0, sizeof(struct ob_mac_iocb_req
));
2051 mac_iocb_ptr
->opcode
= qdev
->mac_ob_opcode
;
2052 mac_iocb_ptr
->flags
|= qdev
->mb_bit_mask
;
2053 mac_iocb_ptr
->transaction_id
= qdev
->req_producer_index
;
2054 mac_iocb_ptr
->data_len
= cpu_to_le16((u16
) skb
->len
);
2056 map
= pci_map_single(qdev
->pdev
, skb
->data
, skb
->len
, PCI_DMA_TODEVICE
);
2057 mac_iocb_ptr
->buf_addr0_low
= cpu_to_le32(LS_64BITS(map
));
2058 mac_iocb_ptr
->buf_addr0_high
= cpu_to_le32(MS_64BITS(map
));
2059 mac_iocb_ptr
->buf_0_len
= cpu_to_le32(skb
->len
| OB_MAC_IOCB_REQ_E
);
2060 pci_unmap_addr_set(tx_cb
, mapaddr
, map
);
2061 pci_unmap_len_set(tx_cb
, maplen
, skb
->len
);
2062 atomic_dec(&qdev
->tx_count
);
2064 qdev
->req_producer_index
++;
2065 if (qdev
->req_producer_index
== NUM_REQ_Q_ENTRIES
)
2066 qdev
->req_producer_index
= 0;
2068 ql_write_common_reg_l(qdev
,
2069 &port_regs
->CommonRegs
.reqQProducerIndex
,
2070 qdev
->req_producer_index
);
2072 ndev
->trans_start
= jiffies
;
2073 if (netif_msg_tx_queued(qdev
))
2074 printk(KERN_DEBUG PFX
"%s: tx queued, slot %d, len %d\n",
2075 ndev
->name
, qdev
->req_producer_index
, skb
->len
);
2077 return NETDEV_TX_OK
;
2079 static int ql_alloc_net_req_rsp_queues(struct ql3_adapter
*qdev
)
2082 (u32
) (NUM_REQ_Q_ENTRIES
* sizeof(struct ob_mac_iocb_req
));
2084 qdev
->req_q_virt_addr
=
2085 pci_alloc_consistent(qdev
->pdev
,
2086 (size_t) qdev
->req_q_size
,
2087 &qdev
->req_q_phy_addr
);
2089 if ((qdev
->req_q_virt_addr
== NULL
) ||
2090 LS_64BITS(qdev
->req_q_phy_addr
) & (qdev
->req_q_size
- 1)) {
2091 printk(KERN_ERR PFX
"%s: reqQ failed.\n",
2096 qdev
->rsp_q_size
= NUM_RSP_Q_ENTRIES
* sizeof(struct net_rsp_iocb
);
2098 qdev
->rsp_q_virt_addr
=
2099 pci_alloc_consistent(qdev
->pdev
,
2100 (size_t) qdev
->rsp_q_size
,
2101 &qdev
->rsp_q_phy_addr
);
2103 if ((qdev
->rsp_q_virt_addr
== NULL
) ||
2104 LS_64BITS(qdev
->rsp_q_phy_addr
) & (qdev
->rsp_q_size
- 1)) {
2106 "%s: rspQ allocation failed\n",
2108 pci_free_consistent(qdev
->pdev
, (size_t) qdev
->req_q_size
,
2109 qdev
->req_q_virt_addr
,
2110 qdev
->req_q_phy_addr
);
2114 set_bit(QL_ALLOC_REQ_RSP_Q_DONE
,&qdev
->flags
);
2119 static void ql_free_net_req_rsp_queues(struct ql3_adapter
*qdev
)
2121 if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE
,&qdev
->flags
)) {
2122 printk(KERN_INFO PFX
2123 "%s: Already done.\n", qdev
->ndev
->name
);
2127 pci_free_consistent(qdev
->pdev
,
2129 qdev
->req_q_virt_addr
, qdev
->req_q_phy_addr
);
2131 qdev
->req_q_virt_addr
= NULL
;
2133 pci_free_consistent(qdev
->pdev
,
2135 qdev
->rsp_q_virt_addr
, qdev
->rsp_q_phy_addr
);
2137 qdev
->rsp_q_virt_addr
= NULL
;
2139 clear_bit(QL_ALLOC_REQ_RSP_Q_DONE
,&qdev
->flags
);
2142 static int ql_alloc_buffer_queues(struct ql3_adapter
*qdev
)
2144 /* Create Large Buffer Queue */
2145 qdev
->lrg_buf_q_size
=
2146 NUM_LBUFQ_ENTRIES
* sizeof(struct lrg_buf_q_entry
);
2147 if (qdev
->lrg_buf_q_size
< PAGE_SIZE
)
2148 qdev
->lrg_buf_q_alloc_size
= PAGE_SIZE
;
2150 qdev
->lrg_buf_q_alloc_size
= qdev
->lrg_buf_q_size
* 2;
2152 qdev
->lrg_buf_q_alloc_virt_addr
=
2153 pci_alloc_consistent(qdev
->pdev
,
2154 qdev
->lrg_buf_q_alloc_size
,
2155 &qdev
->lrg_buf_q_alloc_phy_addr
);
2157 if (qdev
->lrg_buf_q_alloc_virt_addr
== NULL
) {
2159 "%s: lBufQ failed\n", qdev
->ndev
->name
);
2162 qdev
->lrg_buf_q_virt_addr
= qdev
->lrg_buf_q_alloc_virt_addr
;
2163 qdev
->lrg_buf_q_phy_addr
= qdev
->lrg_buf_q_alloc_phy_addr
;
2165 /* Create Small Buffer Queue */
2166 qdev
->small_buf_q_size
=
2167 NUM_SBUFQ_ENTRIES
* sizeof(struct lrg_buf_q_entry
);
2168 if (qdev
->small_buf_q_size
< PAGE_SIZE
)
2169 qdev
->small_buf_q_alloc_size
= PAGE_SIZE
;
2171 qdev
->small_buf_q_alloc_size
= qdev
->small_buf_q_size
* 2;
2173 qdev
->small_buf_q_alloc_virt_addr
=
2174 pci_alloc_consistent(qdev
->pdev
,
2175 qdev
->small_buf_q_alloc_size
,
2176 &qdev
->small_buf_q_alloc_phy_addr
);
2178 if (qdev
->small_buf_q_alloc_virt_addr
== NULL
) {
2180 "%s: Small Buffer Queue allocation failed.\n",
2182 pci_free_consistent(qdev
->pdev
, qdev
->lrg_buf_q_alloc_size
,
2183 qdev
->lrg_buf_q_alloc_virt_addr
,
2184 qdev
->lrg_buf_q_alloc_phy_addr
);
2188 qdev
->small_buf_q_virt_addr
= qdev
->small_buf_q_alloc_virt_addr
;
2189 qdev
->small_buf_q_phy_addr
= qdev
->small_buf_q_alloc_phy_addr
;
2190 set_bit(QL_ALLOC_BUFQS_DONE
,&qdev
->flags
);
2194 static void ql_free_buffer_queues(struct ql3_adapter
*qdev
)
2196 if (!test_bit(QL_ALLOC_BUFQS_DONE
,&qdev
->flags
)) {
2197 printk(KERN_INFO PFX
2198 "%s: Already done.\n", qdev
->ndev
->name
);
2202 pci_free_consistent(qdev
->pdev
,
2203 qdev
->lrg_buf_q_alloc_size
,
2204 qdev
->lrg_buf_q_alloc_virt_addr
,
2205 qdev
->lrg_buf_q_alloc_phy_addr
);
2207 qdev
->lrg_buf_q_virt_addr
= NULL
;
2209 pci_free_consistent(qdev
->pdev
,
2210 qdev
->small_buf_q_alloc_size
,
2211 qdev
->small_buf_q_alloc_virt_addr
,
2212 qdev
->small_buf_q_alloc_phy_addr
);
2214 qdev
->small_buf_q_virt_addr
= NULL
;
2216 clear_bit(QL_ALLOC_BUFQS_DONE
,&qdev
->flags
);
2219 static int ql_alloc_small_buffers(struct ql3_adapter
*qdev
)
2222 struct bufq_addr_element
*small_buf_q_entry
;
2224 /* Currently we allocate on one of memory and use it for smallbuffers */
2225 qdev
->small_buf_total_size
=
2226 (QL_ADDR_ELE_PER_BUFQ_ENTRY
* NUM_SBUFQ_ENTRIES
*
2227 QL_SMALL_BUFFER_SIZE
);
2229 qdev
->small_buf_virt_addr
=
2230 pci_alloc_consistent(qdev
->pdev
,
2231 qdev
->small_buf_total_size
,
2232 &qdev
->small_buf_phy_addr
);
2234 if (qdev
->small_buf_virt_addr
== NULL
) {
2236 "%s: Failed to get small buffer memory.\n",
2241 qdev
->small_buf_phy_addr_low
= LS_64BITS(qdev
->small_buf_phy_addr
);
2242 qdev
->small_buf_phy_addr_high
= MS_64BITS(qdev
->small_buf_phy_addr
);
2244 small_buf_q_entry
= qdev
->small_buf_q_virt_addr
;
2246 qdev
->last_rsp_offset
= qdev
->small_buf_phy_addr_low
;
2248 /* Initialize the small buffer queue. */
2249 for (i
= 0; i
< (QL_ADDR_ELE_PER_BUFQ_ENTRY
* NUM_SBUFQ_ENTRIES
); i
++) {
2250 small_buf_q_entry
->addr_high
=
2251 cpu_to_le32(qdev
->small_buf_phy_addr_high
);
2252 small_buf_q_entry
->addr_low
=
2253 cpu_to_le32(qdev
->small_buf_phy_addr_low
+
2254 (i
* QL_SMALL_BUFFER_SIZE
));
2255 small_buf_q_entry
++;
2257 qdev
->small_buf_index
= 0;
2258 set_bit(QL_ALLOC_SMALL_BUF_DONE
,&qdev
->flags
);
2262 static void ql_free_small_buffers(struct ql3_adapter
*qdev
)
2264 if (!test_bit(QL_ALLOC_SMALL_BUF_DONE
,&qdev
->flags
)) {
2265 printk(KERN_INFO PFX
2266 "%s: Already done.\n", qdev
->ndev
->name
);
2269 if (qdev
->small_buf_virt_addr
!= NULL
) {
2270 pci_free_consistent(qdev
->pdev
,
2271 qdev
->small_buf_total_size
,
2272 qdev
->small_buf_virt_addr
,
2273 qdev
->small_buf_phy_addr
);
2275 qdev
->small_buf_virt_addr
= NULL
;
2279 static void ql_free_large_buffers(struct ql3_adapter
*qdev
)
2282 struct ql_rcv_buf_cb
*lrg_buf_cb
;
2284 for (i
= 0; i
< NUM_LARGE_BUFFERS
; i
++) {
2285 lrg_buf_cb
= &qdev
->lrg_buf
[i
];
2286 if (lrg_buf_cb
->skb
) {
2287 dev_kfree_skb(lrg_buf_cb
->skb
);
2288 pci_unmap_single(qdev
->pdev
,
2289 pci_unmap_addr(lrg_buf_cb
, mapaddr
),
2290 pci_unmap_len(lrg_buf_cb
, maplen
),
2291 PCI_DMA_FROMDEVICE
);
2292 memset(lrg_buf_cb
, 0, sizeof(struct ql_rcv_buf_cb
));
2299 static void ql_init_large_buffers(struct ql3_adapter
*qdev
)
2302 struct ql_rcv_buf_cb
*lrg_buf_cb
;
2303 struct bufq_addr_element
*buf_addr_ele
= qdev
->lrg_buf_q_virt_addr
;
2305 for (i
= 0; i
< NUM_LARGE_BUFFERS
; i
++) {
2306 lrg_buf_cb
= &qdev
->lrg_buf
[i
];
2307 buf_addr_ele
->addr_high
= lrg_buf_cb
->buf_phy_addr_high
;
2308 buf_addr_ele
->addr_low
= lrg_buf_cb
->buf_phy_addr_low
;
2311 qdev
->lrg_buf_index
= 0;
2312 qdev
->lrg_buf_skb_check
= 0;
2315 static int ql_alloc_large_buffers(struct ql3_adapter
*qdev
)
2318 struct ql_rcv_buf_cb
*lrg_buf_cb
;
2319 struct sk_buff
*skb
;
2322 for (i
= 0; i
< NUM_LARGE_BUFFERS
; i
++) {
2323 skb
= dev_alloc_skb(qdev
->lrg_buffer_len
);
2324 if (unlikely(!skb
)) {
2325 /* Better luck next round */
2327 "%s: large buff alloc failed, "
2328 "for %d bytes at index %d.\n",
2330 qdev
->lrg_buffer_len
* 2, i
);
2331 ql_free_large_buffers(qdev
);
2335 lrg_buf_cb
= &qdev
->lrg_buf
[i
];
2336 memset(lrg_buf_cb
, 0, sizeof(struct ql_rcv_buf_cb
));
2337 lrg_buf_cb
->index
= i
;
2338 lrg_buf_cb
->skb
= skb
;
2340 * We save some space to copy the ethhdr from first
2343 skb_reserve(skb
, QL_HEADER_SPACE
);
2344 map
= pci_map_single(qdev
->pdev
,
2346 qdev
->lrg_buffer_len
-
2348 PCI_DMA_FROMDEVICE
);
2349 pci_unmap_addr_set(lrg_buf_cb
, mapaddr
, map
);
2350 pci_unmap_len_set(lrg_buf_cb
, maplen
,
2351 qdev
->lrg_buffer_len
-
2353 lrg_buf_cb
->buf_phy_addr_low
=
2354 cpu_to_le32(LS_64BITS(map
));
2355 lrg_buf_cb
->buf_phy_addr_high
=
2356 cpu_to_le32(MS_64BITS(map
));
2362 static void ql_create_send_free_list(struct ql3_adapter
*qdev
)
2364 struct ql_tx_buf_cb
*tx_cb
;
2366 struct ob_mac_iocb_req
*req_q_curr
=
2367 qdev
->req_q_virt_addr
;
2369 /* Create free list of transmit buffers */
2370 for (i
= 0; i
< NUM_REQ_Q_ENTRIES
; i
++) {
2371 tx_cb
= &qdev
->tx_buf
[i
];
2373 tx_cb
->queue_entry
= req_q_curr
;
2378 static int ql_alloc_mem_resources(struct ql3_adapter
*qdev
)
2380 if (qdev
->ndev
->mtu
== NORMAL_MTU_SIZE
)
2381 qdev
->lrg_buffer_len
= NORMAL_MTU_SIZE
;
2382 else if (qdev
->ndev
->mtu
== JUMBO_MTU_SIZE
) {
2383 qdev
->lrg_buffer_len
= JUMBO_MTU_SIZE
;
2386 "%s: Invalid mtu size. Only 1500 and 9000 are accepted.\n",
2390 qdev
->lrg_buffer_len
+= VLAN_ETH_HLEN
+ VLAN_ID_LEN
+ QL_HEADER_SPACE
;
2391 qdev
->max_frame_size
=
2392 (qdev
->lrg_buffer_len
- QL_HEADER_SPACE
) + ETHERNET_CRC_SIZE
;
2395 * First allocate a page of shared memory and use it for shadow
2396 * locations of Network Request Queue Consumer Address Register and
2397 * Network Completion Queue Producer Index Register
2399 qdev
->shadow_reg_virt_addr
=
2400 pci_alloc_consistent(qdev
->pdev
,
2401 PAGE_SIZE
, &qdev
->shadow_reg_phy_addr
);
2403 if (qdev
->shadow_reg_virt_addr
!= NULL
) {
2404 qdev
->preq_consumer_index
= (u16
*) qdev
->shadow_reg_virt_addr
;
2405 qdev
->req_consumer_index_phy_addr_high
=
2406 MS_64BITS(qdev
->shadow_reg_phy_addr
);
2407 qdev
->req_consumer_index_phy_addr_low
=
2408 LS_64BITS(qdev
->shadow_reg_phy_addr
);
2410 qdev
->prsp_producer_index
=
2411 (u32
*) (((u8
*) qdev
->preq_consumer_index
) + 8);
2412 qdev
->rsp_producer_index_phy_addr_high
=
2413 qdev
->req_consumer_index_phy_addr_high
;
2414 qdev
->rsp_producer_index_phy_addr_low
=
2415 qdev
->req_consumer_index_phy_addr_low
+ 8;
2418 "%s: shadowReg Alloc failed.\n", qdev
->ndev
->name
);
2422 if (ql_alloc_net_req_rsp_queues(qdev
) != 0) {
2424 "%s: ql_alloc_net_req_rsp_queues failed.\n",
2429 if (ql_alloc_buffer_queues(qdev
) != 0) {
2431 "%s: ql_alloc_buffer_queues failed.\n",
2433 goto err_buffer_queues
;
2436 if (ql_alloc_small_buffers(qdev
) != 0) {
2438 "%s: ql_alloc_small_buffers failed\n", qdev
->ndev
->name
);
2439 goto err_small_buffers
;
2442 if (ql_alloc_large_buffers(qdev
) != 0) {
2444 "%s: ql_alloc_large_buffers failed\n", qdev
->ndev
->name
);
2445 goto err_small_buffers
;
2448 /* Initialize the large buffer queue. */
2449 ql_init_large_buffers(qdev
);
2450 ql_create_send_free_list(qdev
);
2452 qdev
->rsp_current
= qdev
->rsp_q_virt_addr
;
2457 ql_free_buffer_queues(qdev
);
2459 ql_free_net_req_rsp_queues(qdev
);
2461 pci_free_consistent(qdev
->pdev
,
2463 qdev
->shadow_reg_virt_addr
,
2464 qdev
->shadow_reg_phy_addr
);
2469 static void ql_free_mem_resources(struct ql3_adapter
*qdev
)
2471 ql_free_large_buffers(qdev
);
2472 ql_free_small_buffers(qdev
);
2473 ql_free_buffer_queues(qdev
);
2474 ql_free_net_req_rsp_queues(qdev
);
2475 if (qdev
->shadow_reg_virt_addr
!= NULL
) {
2476 pci_free_consistent(qdev
->pdev
,
2478 qdev
->shadow_reg_virt_addr
,
2479 qdev
->shadow_reg_phy_addr
);
2480 qdev
->shadow_reg_virt_addr
= NULL
;
2484 static int ql_init_misc_registers(struct ql3_adapter
*qdev
)
2486 struct ql3xxx_local_ram_registers __iomem
*local_ram
=
2487 (void __iomem
*)qdev
->mem_map_registers
;
2489 if(ql_sem_spinlock(qdev
, QL_DDR_RAM_SEM_MASK
,
2490 (QL_RESOURCE_BITS_BASE_CODE
| (qdev
->mac_index
) *
2494 ql_write_page2_reg(qdev
,
2495 &local_ram
->bufletSize
, qdev
->nvram_data
.bufletSize
);
2497 ql_write_page2_reg(qdev
,
2498 &local_ram
->maxBufletCount
,
2499 qdev
->nvram_data
.bufletCount
);
2501 ql_write_page2_reg(qdev
,
2502 &local_ram
->freeBufletThresholdLow
,
2503 (qdev
->nvram_data
.tcpWindowThreshold25
<< 16) |
2504 (qdev
->nvram_data
.tcpWindowThreshold0
));
2506 ql_write_page2_reg(qdev
,
2507 &local_ram
->freeBufletThresholdHigh
,
2508 qdev
->nvram_data
.tcpWindowThreshold50
);
2510 ql_write_page2_reg(qdev
,
2511 &local_ram
->ipHashTableBase
,
2512 (qdev
->nvram_data
.ipHashTableBaseHi
<< 16) |
2513 qdev
->nvram_data
.ipHashTableBaseLo
);
2514 ql_write_page2_reg(qdev
,
2515 &local_ram
->ipHashTableCount
,
2516 qdev
->nvram_data
.ipHashTableSize
);
2517 ql_write_page2_reg(qdev
,
2518 &local_ram
->tcpHashTableBase
,
2519 (qdev
->nvram_data
.tcpHashTableBaseHi
<< 16) |
2520 qdev
->nvram_data
.tcpHashTableBaseLo
);
2521 ql_write_page2_reg(qdev
,
2522 &local_ram
->tcpHashTableCount
,
2523 qdev
->nvram_data
.tcpHashTableSize
);
2524 ql_write_page2_reg(qdev
,
2525 &local_ram
->ncbBase
,
2526 (qdev
->nvram_data
.ncbTableBaseHi
<< 16) |
2527 qdev
->nvram_data
.ncbTableBaseLo
);
2528 ql_write_page2_reg(qdev
,
2529 &local_ram
->maxNcbCount
,
2530 qdev
->nvram_data
.ncbTableSize
);
2531 ql_write_page2_reg(qdev
,
2532 &local_ram
->drbBase
,
2533 (qdev
->nvram_data
.drbTableBaseHi
<< 16) |
2534 qdev
->nvram_data
.drbTableBaseLo
);
2535 ql_write_page2_reg(qdev
,
2536 &local_ram
->maxDrbCount
,
2537 qdev
->nvram_data
.drbTableSize
);
2538 ql_sem_unlock(qdev
, QL_DDR_RAM_SEM_MASK
);
2542 static int ql_adapter_initialize(struct ql3_adapter
*qdev
)
2545 struct ql3xxx_port_registers __iomem
*port_regs
= qdev
->mem_map_registers
;
2546 struct ql3xxx_host_memory_registers __iomem
*hmem_regs
=
2547 (void __iomem
*)port_regs
;
2551 if(ql_mii_setup(qdev
))
2554 /* Bring out PHY out of reset */
2555 ql_write_common_reg(qdev
, &port_regs
->CommonRegs
.serialPortInterfaceReg
,
2556 (ISP_SERIAL_PORT_IF_WE
|
2557 (ISP_SERIAL_PORT_IF_WE
<< 16)));
2559 qdev
->port_link_state
= LS_DOWN
;
2560 netif_carrier_off(qdev
->ndev
);
2562 /* V2 chip fix for ARS-39168. */
2563 ql_write_common_reg(qdev
, &port_regs
->CommonRegs
.serialPortInterfaceReg
,
2564 (ISP_SERIAL_PORT_IF_SDE
|
2565 (ISP_SERIAL_PORT_IF_SDE
<< 16)));
2567 /* Request Queue Registers */
2568 *((u32
*) (qdev
->preq_consumer_index
)) = 0;
2569 atomic_set(&qdev
->tx_count
,NUM_REQ_Q_ENTRIES
);
2570 qdev
->req_producer_index
= 0;
2572 ql_write_page1_reg(qdev
,
2573 &hmem_regs
->reqConsumerIndexAddrHigh
,
2574 qdev
->req_consumer_index_phy_addr_high
);
2575 ql_write_page1_reg(qdev
,
2576 &hmem_regs
->reqConsumerIndexAddrLow
,
2577 qdev
->req_consumer_index_phy_addr_low
);
2579 ql_write_page1_reg(qdev
,
2580 &hmem_regs
->reqBaseAddrHigh
,
2581 MS_64BITS(qdev
->req_q_phy_addr
));
2582 ql_write_page1_reg(qdev
,
2583 &hmem_regs
->reqBaseAddrLow
,
2584 LS_64BITS(qdev
->req_q_phy_addr
));
2585 ql_write_page1_reg(qdev
, &hmem_regs
->reqLength
, NUM_REQ_Q_ENTRIES
);
2587 /* Response Queue Registers */
2588 *((u16
*) (qdev
->prsp_producer_index
)) = 0;
2589 qdev
->rsp_consumer_index
= 0;
2590 qdev
->rsp_current
= qdev
->rsp_q_virt_addr
;
2592 ql_write_page1_reg(qdev
,
2593 &hmem_regs
->rspProducerIndexAddrHigh
,
2594 qdev
->rsp_producer_index_phy_addr_high
);
2596 ql_write_page1_reg(qdev
,
2597 &hmem_regs
->rspProducerIndexAddrLow
,
2598 qdev
->rsp_producer_index_phy_addr_low
);
2600 ql_write_page1_reg(qdev
,
2601 &hmem_regs
->rspBaseAddrHigh
,
2602 MS_64BITS(qdev
->rsp_q_phy_addr
));
2604 ql_write_page1_reg(qdev
,
2605 &hmem_regs
->rspBaseAddrLow
,
2606 LS_64BITS(qdev
->rsp_q_phy_addr
));
2608 ql_write_page1_reg(qdev
, &hmem_regs
->rspLength
, NUM_RSP_Q_ENTRIES
);
2610 /* Large Buffer Queue */
2611 ql_write_page1_reg(qdev
,
2612 &hmem_regs
->rxLargeQBaseAddrHigh
,
2613 MS_64BITS(qdev
->lrg_buf_q_phy_addr
));
2615 ql_write_page1_reg(qdev
,
2616 &hmem_regs
->rxLargeQBaseAddrLow
,
2617 LS_64BITS(qdev
->lrg_buf_q_phy_addr
));
2619 ql_write_page1_reg(qdev
, &hmem_regs
->rxLargeQLength
, NUM_LBUFQ_ENTRIES
);
2621 ql_write_page1_reg(qdev
,
2622 &hmem_regs
->rxLargeBufferLength
,
2623 qdev
->lrg_buffer_len
);
2625 /* Small Buffer Queue */
2626 ql_write_page1_reg(qdev
,
2627 &hmem_regs
->rxSmallQBaseAddrHigh
,
2628 MS_64BITS(qdev
->small_buf_q_phy_addr
));
2630 ql_write_page1_reg(qdev
,
2631 &hmem_regs
->rxSmallQBaseAddrLow
,
2632 LS_64BITS(qdev
->small_buf_q_phy_addr
));
2634 ql_write_page1_reg(qdev
, &hmem_regs
->rxSmallQLength
, NUM_SBUFQ_ENTRIES
);
2635 ql_write_page1_reg(qdev
,
2636 &hmem_regs
->rxSmallBufferLength
,
2637 QL_SMALL_BUFFER_SIZE
);
2639 qdev
->small_buf_q_producer_index
= NUM_SBUFQ_ENTRIES
- 1;
2640 qdev
->small_buf_release_cnt
= 8;
2641 qdev
->lrg_buf_q_producer_index
= NUM_LBUFQ_ENTRIES
- 1;
2642 qdev
->lrg_buf_release_cnt
= 8;
2643 qdev
->lrg_buf_next_free
=
2644 (struct bufq_addr_element
*)qdev
->lrg_buf_q_virt_addr
;
2645 qdev
->small_buf_index
= 0;
2646 qdev
->lrg_buf_index
= 0;
2647 qdev
->lrg_buf_free_count
= 0;
2648 qdev
->lrg_buf_free_head
= NULL
;
2649 qdev
->lrg_buf_free_tail
= NULL
;
2651 ql_write_common_reg(qdev
,
2652 &port_regs
->CommonRegs
.
2653 rxSmallQProducerIndex
,
2654 qdev
->small_buf_q_producer_index
);
2655 ql_write_common_reg(qdev
,
2656 &port_regs
->CommonRegs
.
2657 rxLargeQProducerIndex
,
2658 qdev
->lrg_buf_q_producer_index
);
2661 * Find out if the chip has already been initialized. If it has, then
2662 * we skip some of the initialization.
2664 clear_bit(QL_LINK_MASTER
, &qdev
->flags
);
2665 value
= ql_read_page0_reg(qdev
, &port_regs
->portStatus
);
2666 if ((value
& PORT_STATUS_IC
) == 0) {
2668 /* Chip has not been configured yet, so let it rip. */
2669 if(ql_init_misc_registers(qdev
)) {
2674 if (qdev
->mac_index
)
2675 ql_write_page0_reg(qdev
,
2676 &port_regs
->mac1MaxFrameLengthReg
,
2677 qdev
->max_frame_size
);
2679 ql_write_page0_reg(qdev
,
2680 &port_regs
->mac0MaxFrameLengthReg
,
2681 qdev
->max_frame_size
);
2683 value
= qdev
->nvram_data
.tcpMaxWindowSize
;
2684 ql_write_page0_reg(qdev
, &port_regs
->tcpMaxWindow
, value
);
2686 value
= (0xFFFF << 16) | qdev
->nvram_data
.extHwConfig
;
2688 if(ql_sem_spinlock(qdev
, QL_FLASH_SEM_MASK
,
2689 (QL_RESOURCE_BITS_BASE_CODE
| (qdev
->mac_index
)
2694 ql_write_page0_reg(qdev
, &port_regs
->ExternalHWConfig
, value
);
2695 ql_write_page0_reg(qdev
, &port_regs
->InternalChipConfig
,
2696 (((INTERNAL_CHIP_SD
| INTERNAL_CHIP_WE
) <<
2697 16) | (INTERNAL_CHIP_SD
|
2698 INTERNAL_CHIP_WE
)));
2699 ql_sem_unlock(qdev
, QL_FLASH_SEM_MASK
);
2703 if(ql_sem_spinlock(qdev
, QL_PHY_GIO_SEM_MASK
,
2704 (QL_RESOURCE_BITS_BASE_CODE
| (qdev
->mac_index
) *
2710 ql_init_scan_mode(qdev
);
2711 ql_get_phy_owner(qdev
);
2713 /* Load the MAC Configuration */
2715 /* Program lower 32 bits of the MAC address */
2716 ql_write_page0_reg(qdev
, &port_regs
->macAddrIndirectPtrReg
,
2717 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK
<< 16));
2718 ql_write_page0_reg(qdev
, &port_regs
->macAddrDataReg
,
2719 ((qdev
->ndev
->dev_addr
[2] << 24)
2720 | (qdev
->ndev
->dev_addr
[3] << 16)
2721 | (qdev
->ndev
->dev_addr
[4] << 8)
2722 | qdev
->ndev
->dev_addr
[5]));
2724 /* Program top 16 bits of the MAC address */
2725 ql_write_page0_reg(qdev
, &port_regs
->macAddrIndirectPtrReg
,
2726 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK
<< 16) | 1));
2727 ql_write_page0_reg(qdev
, &port_regs
->macAddrDataReg
,
2728 ((qdev
->ndev
->dev_addr
[0] << 8)
2729 | qdev
->ndev
->dev_addr
[1]));
2731 /* Enable Primary MAC */
2732 ql_write_page0_reg(qdev
, &port_regs
->macAddrIndirectPtrReg
,
2733 ((MAC_ADDR_INDIRECT_PTR_REG_PE
<< 16) |
2734 MAC_ADDR_INDIRECT_PTR_REG_PE
));
2736 /* Clear Primary and Secondary IP addresses */
2737 ql_write_page0_reg(qdev
, &port_regs
->ipAddrIndexReg
,
2738 ((IP_ADDR_INDEX_REG_MASK
<< 16) |
2739 (qdev
->mac_index
<< 2)));
2740 ql_write_page0_reg(qdev
, &port_regs
->ipAddrDataReg
, 0);
2742 ql_write_page0_reg(qdev
, &port_regs
->ipAddrIndexReg
,
2743 ((IP_ADDR_INDEX_REG_MASK
<< 16) |
2744 ((qdev
->mac_index
<< 2) + 1)));
2745 ql_write_page0_reg(qdev
, &port_regs
->ipAddrDataReg
, 0);
2747 ql_sem_unlock(qdev
, QL_PHY_GIO_SEM_MASK
);
2749 /* Indicate Configuration Complete */
2750 ql_write_page0_reg(qdev
,
2751 &port_regs
->portControl
,
2752 ((PORT_CONTROL_CC
<< 16) | PORT_CONTROL_CC
));
2755 value
= ql_read_page0_reg(qdev
, &port_regs
->portStatus
);
2756 if (value
& PORT_STATUS_IC
)
2763 "%s: Hw Initialization timeout.\n", qdev
->ndev
->name
);
2768 /* Enable Ethernet Function */
2770 (PORT_CONTROL_EF
| PORT_CONTROL_ET
| PORT_CONTROL_EI
|
2772 ql_write_page0_reg(qdev
, &port_regs
->portControl
,
2773 ((value
<< 16) | value
));
2780 * Caller holds hw_lock.
2782 static int ql_adapter_reset(struct ql3_adapter
*qdev
)
2784 struct ql3xxx_port_registers __iomem
*port_regs
= qdev
->mem_map_registers
;
2789 set_bit(QL_RESET_ACTIVE
, &qdev
->flags
);
2790 clear_bit(QL_RESET_DONE
, &qdev
->flags
);
2793 * Issue soft reset to chip.
2795 printk(KERN_DEBUG PFX
2796 "%s: Issue soft reset to chip.\n",
2798 ql_write_common_reg(qdev
,
2799 &port_regs
->CommonRegs
.ispControlStatus
,
2800 ((ISP_CONTROL_SR
<< 16) | ISP_CONTROL_SR
));
2802 /* Wait 3 seconds for reset to complete. */
2803 printk(KERN_DEBUG PFX
2804 "%s: Wait 10 milliseconds for reset to complete.\n",
2807 /* Wait until the firmware tells us the Soft Reset is done */
2811 ql_read_common_reg(qdev
,
2812 &port_regs
->CommonRegs
.ispControlStatus
);
2813 if ((value
& ISP_CONTROL_SR
) == 0)
2817 } while ((--max_wait_time
));
2820 * Also, make sure that the Network Reset Interrupt bit has been
2821 * cleared after the soft reset has taken place.
2824 ql_read_common_reg(qdev
, &port_regs
->CommonRegs
.ispControlStatus
);
2825 if (value
& ISP_CONTROL_RI
) {
2826 printk(KERN_DEBUG PFX
2827 "ql_adapter_reset: clearing RI after reset.\n");
2828 ql_write_common_reg(qdev
,
2829 &port_regs
->CommonRegs
.
2831 ((ISP_CONTROL_RI
<< 16) | ISP_CONTROL_RI
));
2834 if (max_wait_time
== 0) {
2835 /* Issue Force Soft Reset */
2836 ql_write_common_reg(qdev
,
2837 &port_regs
->CommonRegs
.
2839 ((ISP_CONTROL_FSR
<< 16) |
2842 * Wait until the firmware tells us the Force Soft Reset is
2848 ql_read_common_reg(qdev
,
2849 &port_regs
->CommonRegs
.
2851 if ((value
& ISP_CONTROL_FSR
) == 0) {
2855 } while ((--max_wait_time
));
2857 if (max_wait_time
== 0)
2860 clear_bit(QL_RESET_ACTIVE
, &qdev
->flags
);
2861 set_bit(QL_RESET_DONE
, &qdev
->flags
);
2865 static void ql_set_mac_info(struct ql3_adapter
*qdev
)
2867 struct ql3xxx_port_registers __iomem
*port_regs
= qdev
->mem_map_registers
;
2868 u32 value
, port_status
;
2871 /* Get the function number */
2873 ql_read_common_reg_l(qdev
, &port_regs
->CommonRegs
.ispControlStatus
);
2874 func_number
= (u8
) ((value
>> 4) & OPCODE_FUNC_ID_MASK
);
2875 port_status
= ql_read_page0_reg(qdev
, &port_regs
->portStatus
);
2876 switch (value
& ISP_CONTROL_FN_MASK
) {
2877 case ISP_CONTROL_FN0_NET
:
2878 qdev
->mac_index
= 0;
2879 qdev
->mac_ob_opcode
= OUTBOUND_MAC_IOCB
| func_number
;
2880 qdev
->tcp_ob_opcode
= OUTBOUND_TCP_IOCB
| func_number
;
2881 qdev
->update_ob_opcode
= UPDATE_NCB_IOCB
| func_number
;
2882 qdev
->mb_bit_mask
= FN0_MA_BITS_MASK
;
2883 qdev
->PHYAddr
= PORT0_PHY_ADDRESS
;
2884 if (port_status
& PORT_STATUS_SM0
)
2885 set_bit(QL_LINK_OPTICAL
,&qdev
->flags
);
2887 clear_bit(QL_LINK_OPTICAL
,&qdev
->flags
);
2890 case ISP_CONTROL_FN1_NET
:
2891 qdev
->mac_index
= 1;
2892 qdev
->mac_ob_opcode
= OUTBOUND_MAC_IOCB
| func_number
;
2893 qdev
->tcp_ob_opcode
= OUTBOUND_TCP_IOCB
| func_number
;
2894 qdev
->update_ob_opcode
= UPDATE_NCB_IOCB
| func_number
;
2895 qdev
->mb_bit_mask
= FN1_MA_BITS_MASK
;
2896 qdev
->PHYAddr
= PORT1_PHY_ADDRESS
;
2897 if (port_status
& PORT_STATUS_SM1
)
2898 set_bit(QL_LINK_OPTICAL
,&qdev
->flags
);
2900 clear_bit(QL_LINK_OPTICAL
,&qdev
->flags
);
2903 case ISP_CONTROL_FN0_SCSI
:
2904 case ISP_CONTROL_FN1_SCSI
:
2906 printk(KERN_DEBUG PFX
2907 "%s: Invalid function number, ispControlStatus = 0x%x\n",
2908 qdev
->ndev
->name
,value
);
2911 qdev
->numPorts
= qdev
->nvram_data
.numPorts
;
2914 static void ql_display_dev_info(struct net_device
*ndev
)
2916 struct ql3_adapter
*qdev
= (struct ql3_adapter
*)netdev_priv(ndev
);
2917 struct pci_dev
*pdev
= qdev
->pdev
;
2919 printk(KERN_INFO PFX
2920 "\n%s Adapter %d RevisionID %d found on PCI slot %d.\n",
2921 DRV_NAME
, qdev
->index
, qdev
->chip_rev_id
, qdev
->pci_slot
);
2922 printk(KERN_INFO PFX
2924 test_bit(QL_LINK_OPTICAL
,&qdev
->flags
) ? "OPTICAL" : "COPPER");
2927 * Print PCI bus width/type.
2929 printk(KERN_INFO PFX
2930 "Bus interface is %s %s.\n",
2931 ((qdev
->pci_width
== 64) ? "64-bit" : "32-bit"),
2932 ((qdev
->pci_x
) ? "PCI-X" : "PCI"));
2934 printk(KERN_INFO PFX
2935 "mem IO base address adjusted = 0x%p\n",
2936 qdev
->mem_map_registers
);
2937 printk(KERN_INFO PFX
"Interrupt number = %d\n", pdev
->irq
);
2939 if (netif_msg_probe(qdev
))
2940 printk(KERN_INFO PFX
2941 "%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
2942 ndev
->name
, ndev
->dev_addr
[0], ndev
->dev_addr
[1],
2943 ndev
->dev_addr
[2], ndev
->dev_addr
[3], ndev
->dev_addr
[4],
2947 static int ql_adapter_down(struct ql3_adapter
*qdev
, int do_reset
)
2949 struct net_device
*ndev
= qdev
->ndev
;
2952 netif_stop_queue(ndev
);
2953 netif_carrier_off(ndev
);
2955 clear_bit(QL_ADAPTER_UP
,&qdev
->flags
);
2956 clear_bit(QL_LINK_MASTER
,&qdev
->flags
);
2958 ql_disable_interrupts(qdev
);
2960 free_irq(qdev
->pdev
->irq
, ndev
);
2962 if (qdev
->msi
&& test_bit(QL_MSI_ENABLED
,&qdev
->flags
)) {
2963 printk(KERN_INFO PFX
2964 "%s: calling pci_disable_msi().\n", qdev
->ndev
->name
);
2965 clear_bit(QL_MSI_ENABLED
,&qdev
->flags
);
2966 pci_disable_msi(qdev
->pdev
);
2969 del_timer_sync(&qdev
->adapter_timer
);
2971 netif_poll_disable(ndev
);
2975 unsigned long hw_flags
;
2977 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
2978 if (ql_wait_for_drvr_lock(qdev
)) {
2979 if ((soft_reset
= ql_adapter_reset(qdev
))) {
2981 "%s: ql_adapter_reset(%d) FAILED!\n",
2982 ndev
->name
, qdev
->index
);
2985 "%s: Releaseing driver lock via chip reset.\n",ndev
->name
);
2988 "%s: Could not acquire driver lock to do "
2989 "reset!\n", ndev
->name
);
2992 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
2994 ql_free_mem_resources(qdev
);
2998 static int ql_adapter_up(struct ql3_adapter
*qdev
)
3000 struct net_device
*ndev
= qdev
->ndev
;
3002 unsigned long irq_flags
= SA_SAMPLE_RANDOM
| SA_SHIRQ
;
3003 unsigned long hw_flags
;
3005 if (ql_alloc_mem_resources(qdev
)) {
3007 "%s Unable to allocate buffers.\n", ndev
->name
);
3012 if (pci_enable_msi(qdev
->pdev
)) {
3014 "%s: User requested MSI, but MSI failed to "
3015 "initialize. Continuing without MSI.\n",
3019 printk(KERN_INFO PFX
"%s: MSI Enabled...\n", qdev
->ndev
->name
);
3020 set_bit(QL_MSI_ENABLED
,&qdev
->flags
);
3021 irq_flags
&= ~SA_SHIRQ
;
3025 if ((err
= request_irq(qdev
->pdev
->irq
,
3027 irq_flags
, ndev
->name
, ndev
))) {
3029 "%s: Failed to reserve interrupt %d already in use.\n",
3030 ndev
->name
, qdev
->pdev
->irq
);
3034 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
3036 if ((err
= ql_wait_for_drvr_lock(qdev
))) {
3037 if ((err
= ql_adapter_initialize(qdev
))) {
3039 "%s: Unable to initialize adapter.\n",
3044 "%s: Releaseing driver lock.\n",ndev
->name
);
3045 ql_sem_unlock(qdev
, QL_DRVR_SEM_MASK
);
3048 "%s: Could not aquire driver lock.\n",
3053 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
3055 set_bit(QL_ADAPTER_UP
,&qdev
->flags
);
3057 mod_timer(&qdev
->adapter_timer
, jiffies
+ HZ
* 1);
3059 netif_poll_enable(ndev
);
3060 ql_enable_interrupts(qdev
);
3064 ql_sem_unlock(qdev
, QL_DRVR_SEM_MASK
);
3066 free_irq(qdev
->pdev
->irq
, ndev
);
3068 if (qdev
->msi
&& test_bit(QL_MSI_ENABLED
,&qdev
->flags
)) {
3069 printk(KERN_INFO PFX
3070 "%s: calling pci_disable_msi().\n",
3072 clear_bit(QL_MSI_ENABLED
,&qdev
->flags
);
3073 pci_disable_msi(qdev
->pdev
);
3078 static int ql_cycle_adapter(struct ql3_adapter
*qdev
, int reset
)
3080 if( ql_adapter_down(qdev
,reset
) || ql_adapter_up(qdev
)) {
3082 "%s: Driver up/down cycle failed, "
3083 "closing device\n",qdev
->ndev
->name
);
3084 dev_close(qdev
->ndev
);
3090 static int ql3xxx_close(struct net_device
*ndev
)
3092 struct ql3_adapter
*qdev
= netdev_priv(ndev
);
3095 * Wait for device to recover from a reset.
3096 * (Rarely happens, but possible.)
3098 while (!test_bit(QL_ADAPTER_UP
,&qdev
->flags
))
3101 ql_adapter_down(qdev
,QL_DO_RESET
);
3105 static int ql3xxx_open(struct net_device
*ndev
)
3107 struct ql3_adapter
*qdev
= netdev_priv(ndev
);
3108 return (ql_adapter_up(qdev
));
3111 static struct net_device_stats
*ql3xxx_get_stats(struct net_device
*dev
)
3113 struct ql3_adapter
*qdev
= (struct ql3_adapter
*)dev
->priv
;
3114 return &qdev
->stats
;
3117 static int ql3xxx_change_mtu(struct net_device
*ndev
, int new_mtu
)
3119 struct ql3_adapter
*qdev
= netdev_priv(ndev
);
3120 printk(KERN_ERR PFX
"%s: new mtu size = %d.\n", ndev
->name
, new_mtu
);
3121 if (new_mtu
!= NORMAL_MTU_SIZE
&& new_mtu
!= JUMBO_MTU_SIZE
) {
3123 "%s: mtu size of %d is not valid. Use exactly %d or "
3124 "%d.\n", ndev
->name
, new_mtu
, NORMAL_MTU_SIZE
,
3129 if (!netif_running(ndev
)) {
3130 ndev
->mtu
= new_mtu
;
3134 ndev
->mtu
= new_mtu
;
3135 return ql_cycle_adapter(qdev
,QL_DO_RESET
);
3138 static void ql3xxx_set_multicast_list(struct net_device
*ndev
)
3141 * We are manually parsing the list in the net_device structure.
3146 static int ql3xxx_set_mac_address(struct net_device
*ndev
, void *p
)
3148 struct ql3_adapter
*qdev
= (struct ql3_adapter
*)netdev_priv(ndev
);
3149 struct ql3xxx_port_registers __iomem
*port_regs
=
3150 qdev
->mem_map_registers
;
3151 struct sockaddr
*addr
= p
;
3152 unsigned long hw_flags
;
3154 if (netif_running(ndev
))
3157 if (!is_valid_ether_addr(addr
->sa_data
))
3158 return -EADDRNOTAVAIL
;
3160 memcpy(ndev
->dev_addr
, addr
->sa_data
, ndev
->addr_len
);
3162 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
3163 /* Program lower 32 bits of the MAC address */
3164 ql_write_page0_reg(qdev
, &port_regs
->macAddrIndirectPtrReg
,
3165 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK
<< 16));
3166 ql_write_page0_reg(qdev
, &port_regs
->macAddrDataReg
,
3167 ((ndev
->dev_addr
[2] << 24) | (ndev
->
3168 dev_addr
[3] << 16) |
3169 (ndev
->dev_addr
[4] << 8) | ndev
->dev_addr
[5]));
3171 /* Program top 16 bits of the MAC address */
3172 ql_write_page0_reg(qdev
, &port_regs
->macAddrIndirectPtrReg
,
3173 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK
<< 16) | 1));
3174 ql_write_page0_reg(qdev
, &port_regs
->macAddrDataReg
,
3175 ((ndev
->dev_addr
[0] << 8) | ndev
->dev_addr
[1]));
3176 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
3181 static void ql3xxx_tx_timeout(struct net_device
*ndev
)
3183 struct ql3_adapter
*qdev
= (struct ql3_adapter
*)netdev_priv(ndev
);
3185 printk(KERN_ERR PFX
"%s: Resetting...\n", ndev
->name
);
3187 * Stop the queues, we've got a problem.
3189 netif_stop_queue(ndev
);
3192 * Wake up the worker to process this event.
3194 queue_delayed_work(qdev
->workqueue
, &qdev
->tx_timeout_work
, 0);
3197 static void ql_reset_work(struct work_struct
*work
)
3199 struct ql3_adapter
*qdev
=
3200 container_of(work
, struct ql3_adapter
, reset_work
.work
);
3201 struct net_device
*ndev
= qdev
->ndev
;
3203 struct ql_tx_buf_cb
*tx_cb
;
3204 int max_wait_time
, i
;
3205 struct ql3xxx_port_registers __iomem
*port_regs
= qdev
->mem_map_registers
;
3206 unsigned long hw_flags
;
3208 if (test_bit((QL_RESET_PER_SCSI
| QL_RESET_START
),&qdev
->flags
)) {
3209 clear_bit(QL_LINK_MASTER
,&qdev
->flags
);
3212 * Loop through the active list and return the skb.
3214 for (i
= 0; i
< NUM_REQ_Q_ENTRIES
; i
++) {
3215 tx_cb
= &qdev
->tx_buf
[i
];
3218 printk(KERN_DEBUG PFX
3219 "%s: Freeing lost SKB.\n",
3221 pci_unmap_single(qdev
->pdev
,
3222 pci_unmap_addr(tx_cb
, mapaddr
),
3223 pci_unmap_len(tx_cb
, maplen
), PCI_DMA_TODEVICE
);
3224 dev_kfree_skb(tx_cb
->skb
);
3230 "%s: Clearing NRI after reset.\n", qdev
->ndev
->name
);
3231 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
3232 ql_write_common_reg(qdev
,
3233 &port_regs
->CommonRegs
.
3235 ((ISP_CONTROL_RI
<< 16) | ISP_CONTROL_RI
));
3237 * Wait the for Soft Reset to Complete.
3241 value
= ql_read_common_reg(qdev
,
3242 &port_regs
->CommonRegs
.
3245 if ((value
& ISP_CONTROL_SR
) == 0) {
3246 printk(KERN_DEBUG PFX
3247 "%s: reset completed.\n",
3252 if (value
& ISP_CONTROL_RI
) {
3253 printk(KERN_DEBUG PFX
3254 "%s: clearing NRI after reset.\n",
3256 ql_write_common_reg(qdev
,
3261 16) | ISP_CONTROL_RI
));
3265 } while (--max_wait_time
);
3266 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
3268 if (value
& ISP_CONTROL_SR
) {
3271 * Set the reset flags and clear the board again.
3272 * Nothing else to do...
3275 "%s: Timed out waiting for reset to "
3276 "complete.\n", ndev
->name
);
3278 "%s: Do a reset.\n", ndev
->name
);
3279 clear_bit(QL_RESET_PER_SCSI
,&qdev
->flags
);
3280 clear_bit(QL_RESET_START
,&qdev
->flags
);
3281 ql_cycle_adapter(qdev
,QL_DO_RESET
);
3285 clear_bit(QL_RESET_ACTIVE
,&qdev
->flags
);
3286 clear_bit(QL_RESET_PER_SCSI
,&qdev
->flags
);
3287 clear_bit(QL_RESET_START
,&qdev
->flags
);
3288 ql_cycle_adapter(qdev
,QL_NO_RESET
);
3292 static void ql_tx_timeout_work(struct work_struct
*work
)
3294 struct ql3_adapter
*qdev
=
3295 container_of(work
, struct ql3_adapter
, tx_timeout_work
.work
);
3297 ql_cycle_adapter(qdev
, QL_DO_RESET
);
3300 static void ql_get_board_info(struct ql3_adapter
*qdev
)
3302 struct ql3xxx_port_registers __iomem
*port_regs
= qdev
->mem_map_registers
;
3305 value
= ql_read_page0_reg_l(qdev
, &port_regs
->portStatus
);
3307 qdev
->chip_rev_id
= ((value
& PORT_STATUS_REV_ID_MASK
) >> 12);
3308 if (value
& PORT_STATUS_64
)
3309 qdev
->pci_width
= 64;
3311 qdev
->pci_width
= 32;
3312 if (value
& PORT_STATUS_X
)
3316 qdev
->pci_slot
= (u8
) PCI_SLOT(qdev
->pdev
->devfn
);
3319 static void ql3xxx_timer(unsigned long ptr
)
3321 struct ql3_adapter
*qdev
= (struct ql3_adapter
*)ptr
;
3323 if (test_bit(QL_RESET_ACTIVE
,&qdev
->flags
)) {
3324 printk(KERN_DEBUG PFX
3325 "%s: Reset in progress.\n",
3330 ql_link_state_machine(qdev
);
3332 /* Restart timer on 2 second interval. */
3334 mod_timer(&qdev
->adapter_timer
, jiffies
+ HZ
* 1);
3337 static int __devinit
ql3xxx_probe(struct pci_dev
*pdev
,
3338 const struct pci_device_id
*pci_entry
)
3340 struct net_device
*ndev
= NULL
;
3341 struct ql3_adapter
*qdev
= NULL
;
3342 static int cards_found
= 0;
3343 int pci_using_dac
, err
;
3345 err
= pci_enable_device(pdev
);
3347 printk(KERN_ERR PFX
"%s cannot enable PCI device\n",
3352 err
= pci_request_regions(pdev
, DRV_NAME
);
3354 printk(KERN_ERR PFX
"%s cannot obtain PCI resources\n",
3356 goto err_out_disable_pdev
;
3359 pci_set_master(pdev
);
3361 if (!pci_set_dma_mask(pdev
, DMA_64BIT_MASK
)) {
3363 err
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
3364 } else if (!(err
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
))) {
3366 err
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
3370 printk(KERN_ERR PFX
"%s no usable DMA configuration\n",
3372 goto err_out_free_regions
;
3375 ndev
= alloc_etherdev(sizeof(struct ql3_adapter
));
3377 goto err_out_free_regions
;
3379 SET_MODULE_OWNER(ndev
);
3380 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
3383 ndev
->features
|= NETIF_F_HIGHDMA
;
3385 pci_set_drvdata(pdev
, ndev
);
3387 qdev
= netdev_priv(ndev
);
3388 qdev
->index
= cards_found
;
3391 qdev
->port_link_state
= LS_DOWN
;
3395 qdev
->msg_enable
= netif_msg_init(debug
, default_msg
);
3397 qdev
->mem_map_registers
=
3398 ioremap_nocache(pci_resource_start(pdev
, 1),
3399 pci_resource_len(qdev
->pdev
, 1));
3400 if (!qdev
->mem_map_registers
) {
3401 printk(KERN_ERR PFX
"%s: cannot map device registers\n",
3403 goto err_out_free_ndev
;
3406 spin_lock_init(&qdev
->adapter_lock
);
3407 spin_lock_init(&qdev
->hw_lock
);
3409 /* Set driver entry points */
3410 ndev
->open
= ql3xxx_open
;
3411 ndev
->hard_start_xmit
= ql3xxx_send
;
3412 ndev
->stop
= ql3xxx_close
;
3413 ndev
->get_stats
= ql3xxx_get_stats
;
3414 ndev
->change_mtu
= ql3xxx_change_mtu
;
3415 ndev
->set_multicast_list
= ql3xxx_set_multicast_list
;
3416 SET_ETHTOOL_OPS(ndev
, &ql3xxx_ethtool_ops
);
3417 ndev
->set_mac_address
= ql3xxx_set_mac_address
;
3418 ndev
->tx_timeout
= ql3xxx_tx_timeout
;
3419 ndev
->watchdog_timeo
= 5 * HZ
;
3421 ndev
->poll
= &ql_poll
;
3424 ndev
->irq
= pdev
->irq
;
3426 /* make sure the EEPROM is good */
3427 if (ql_get_nvram_params(qdev
)) {
3428 printk(KERN_ALERT PFX
3429 "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n",
3431 goto err_out_iounmap
;
3434 ql_set_mac_info(qdev
);
3436 /* Validate and set parameters */
3437 if (qdev
->mac_index
) {
3438 memcpy(ndev
->dev_addr
, &qdev
->nvram_data
.funcCfg_fn2
.macAddress
,
3441 memcpy(ndev
->dev_addr
, &qdev
->nvram_data
.funcCfg_fn0
.macAddress
,
3444 memcpy(ndev
->perm_addr
, ndev
->dev_addr
, ndev
->addr_len
);
3446 ndev
->tx_queue_len
= NUM_REQ_Q_ENTRIES
;
3448 /* Turn off support for multicasting */
3449 ndev
->flags
&= ~IFF_MULTICAST
;
3451 /* Record PCI bus information. */
3452 ql_get_board_info(qdev
);
3455 * Set the Maximum Memory Read Byte Count value. We do this to handle
3459 pci_write_config_word(pdev
, (int)0x4e, (u16
) 0x0036);
3462 err
= register_netdev(ndev
);
3464 printk(KERN_ERR PFX
"%s: cannot register net device\n",
3466 goto err_out_iounmap
;
3469 /* we're going to reset, so assume we have no link for now */
3471 netif_carrier_off(ndev
);
3472 netif_stop_queue(ndev
);
3474 qdev
->workqueue
= create_singlethread_workqueue(ndev
->name
);
3475 INIT_DELAYED_WORK(&qdev
->reset_work
, ql_reset_work
);
3476 INIT_DELAYED_WORK(&qdev
->tx_timeout_work
, ql_tx_timeout_work
);
3478 init_timer(&qdev
->adapter_timer
);
3479 qdev
->adapter_timer
.function
= ql3xxx_timer
;
3480 qdev
->adapter_timer
.expires
= jiffies
+ HZ
* 2; /* two second delay */
3481 qdev
->adapter_timer
.data
= (unsigned long)qdev
;
3484 printk(KERN_ALERT PFX
"%s\n", DRV_STRING
);
3485 printk(KERN_ALERT PFX
"Driver name: %s, Version: %s.\n",
3486 DRV_NAME
, DRV_VERSION
);
3488 ql_display_dev_info(ndev
);
3494 iounmap(qdev
->mem_map_registers
);
3497 err_out_free_regions
:
3498 pci_release_regions(pdev
);
3499 err_out_disable_pdev
:
3500 pci_disable_device(pdev
);
3501 pci_set_drvdata(pdev
, NULL
);
3506 static void __devexit
ql3xxx_remove(struct pci_dev
*pdev
)
3508 struct net_device
*ndev
= pci_get_drvdata(pdev
);
3509 struct ql3_adapter
*qdev
= netdev_priv(ndev
);
3511 unregister_netdev(ndev
);
3512 qdev
= netdev_priv(ndev
);
3514 ql_disable_interrupts(qdev
);
3516 if (qdev
->workqueue
) {
3517 cancel_delayed_work(&qdev
->reset_work
);
3518 cancel_delayed_work(&qdev
->tx_timeout_work
);
3519 destroy_workqueue(qdev
->workqueue
);
3520 qdev
->workqueue
= NULL
;
3523 iounmap(qdev
->mem_map_registers
);
3524 pci_release_regions(pdev
);
3525 pci_set_drvdata(pdev
, NULL
);
3529 static struct pci_driver ql3xxx_driver
= {
3532 .id_table
= ql3xxx_pci_tbl
,
3533 .probe
= ql3xxx_probe
,
3534 .remove
= __devexit_p(ql3xxx_remove
),
3537 static int __init
ql3xxx_init_module(void)
3539 return pci_register_driver(&ql3xxx_driver
);
3542 static void __exit
ql3xxx_exit(void)
3544 pci_unregister_driver(&ql3xxx_driver
);
3547 module_init(ql3xxx_init_module
);
3548 module_exit(ql3xxx_exit
);