treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / net / ethernet / qlogic / qla3xxx.c
blob0fade19e00d4ecdcc4238563135c72104cbd9021
1 /*
2 * QLogic QLA3xxx NIC HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
5 * See LICENSE.qla3xxx for copyright and licensing details.
6 */
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/pci.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/ip.h>
26 #include <linux/in.h>
27 #include <linux/if_arp.h>
28 #include <linux/if_ether.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/ethtool.h>
32 #include <linux/skbuff.h>
33 #include <linux/rtnetlink.h>
34 #include <linux/if_vlan.h>
35 #include <linux/delay.h>
36 #include <linux/mm.h>
37 #include <linux/prefetch.h>
39 #include "qla3xxx.h"
41 #define DRV_NAME "qla3xxx"
42 #define DRV_STRING "QLogic ISP3XXX Network Driver"
43 #define DRV_VERSION "v2.03.00-k5"
45 static const char ql3xxx_driver_name[] = DRV_NAME;
46 static const char ql3xxx_driver_version[] = DRV_VERSION;
48 #define TIMED_OUT_MSG \
49 "Timed out waiting for management port to get free before issuing command\n"
51 MODULE_AUTHOR("QLogic Corporation");
52 MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
53 MODULE_LICENSE("GPL");
54 MODULE_VERSION(DRV_VERSION);
56 static const u32 default_msg
57 = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
58 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
60 static int debug = -1; /* defaults above */
61 module_param(debug, int, 0);
62 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
64 static int msi;
65 module_param(msi, int, 0);
66 MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
68 static const struct pci_device_id ql3xxx_pci_tbl[] = {
69 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
70 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
71 /* required last entry */
72 {0,}
75 MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
78 * These are the known PHY's which are used
80 enum PHY_DEVICE_TYPE {
81 PHY_TYPE_UNKNOWN = 0,
82 PHY_VITESSE_VSC8211,
83 PHY_AGERE_ET1011C,
84 MAX_PHY_DEV_TYPES
87 struct PHY_DEVICE_INFO {
88 const enum PHY_DEVICE_TYPE phyDevice;
89 const u32 phyIdOUI;
90 const u16 phyIdModel;
91 const char *name;
94 static const struct PHY_DEVICE_INFO PHY_DEVICES[] = {
95 {PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"},
96 {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"},
97 {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"},
102 * Caller must take hw_lock.
104 static int ql_sem_spinlock(struct ql3_adapter *qdev,
105 u32 sem_mask, u32 sem_bits)
107 struct ql3xxx_port_registers __iomem *port_regs =
108 qdev->mem_map_registers;
109 u32 value;
110 unsigned int seconds = 3;
112 do {
113 writel((sem_mask | sem_bits),
114 &port_regs->CommonRegs.semaphoreReg);
115 value = readl(&port_regs->CommonRegs.semaphoreReg);
116 if ((value & (sem_mask >> 16)) == sem_bits)
117 return 0;
118 ssleep(1);
119 } while (--seconds);
120 return -1;
123 static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
125 struct ql3xxx_port_registers __iomem *port_regs =
126 qdev->mem_map_registers;
127 writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
128 readl(&port_regs->CommonRegs.semaphoreReg);
131 static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
133 struct ql3xxx_port_registers __iomem *port_regs =
134 qdev->mem_map_registers;
135 u32 value;
137 writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
138 value = readl(&port_regs->CommonRegs.semaphoreReg);
139 return ((value & (sem_mask >> 16)) == sem_bits);
143 * Caller holds hw_lock.
145 static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
147 int i = 0;
149 do {
150 if (ql_sem_lock(qdev,
151 QL_DRVR_SEM_MASK,
152 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
153 * 2) << 1)) {
154 netdev_printk(KERN_DEBUG, qdev->ndev,
155 "driver lock acquired\n");
156 return 1;
158 ssleep(1);
159 } while (++i < 10);
161 netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
162 return 0;
165 static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
167 struct ql3xxx_port_registers __iomem *port_regs =
168 qdev->mem_map_registers;
170 writel(((ISP_CONTROL_NP_MASK << 16) | page),
171 &port_regs->CommonRegs.ispControlStatus);
172 readl(&port_regs->CommonRegs.ispControlStatus);
173 qdev->current_page = page;
176 static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
178 u32 value;
179 unsigned long hw_flags;
181 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
182 value = readl(reg);
183 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
185 return value;
188 static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
190 return readl(reg);
193 static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
195 u32 value;
196 unsigned long hw_flags;
198 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
200 if (qdev->current_page != 0)
201 ql_set_register_page(qdev, 0);
202 value = readl(reg);
204 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
205 return value;
208 static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
210 if (qdev->current_page != 0)
211 ql_set_register_page(qdev, 0);
212 return readl(reg);
215 static void ql_write_common_reg_l(struct ql3_adapter *qdev,
216 u32 __iomem *reg, u32 value)
218 unsigned long hw_flags;
220 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
221 writel(value, reg);
222 readl(reg);
223 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
226 static void ql_write_common_reg(struct ql3_adapter *qdev,
227 u32 __iomem *reg, u32 value)
229 writel(value, reg);
230 readl(reg);
233 static void ql_write_nvram_reg(struct ql3_adapter *qdev,
234 u32 __iomem *reg, u32 value)
236 writel(value, reg);
237 readl(reg);
238 udelay(1);
241 static void ql_write_page0_reg(struct ql3_adapter *qdev,
242 u32 __iomem *reg, u32 value)
244 if (qdev->current_page != 0)
245 ql_set_register_page(qdev, 0);
246 writel(value, reg);
247 readl(reg);
251 * Caller holds hw_lock. Only called during init.
253 static void ql_write_page1_reg(struct ql3_adapter *qdev,
254 u32 __iomem *reg, u32 value)
256 if (qdev->current_page != 1)
257 ql_set_register_page(qdev, 1);
258 writel(value, reg);
259 readl(reg);
263 * Caller holds hw_lock. Only called during init.
265 static void ql_write_page2_reg(struct ql3_adapter *qdev,
266 u32 __iomem *reg, u32 value)
268 if (qdev->current_page != 2)
269 ql_set_register_page(qdev, 2);
270 writel(value, reg);
271 readl(reg);
274 static void ql_disable_interrupts(struct ql3_adapter *qdev)
276 struct ql3xxx_port_registers __iomem *port_regs =
277 qdev->mem_map_registers;
279 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
280 (ISP_IMR_ENABLE_INT << 16));
284 static void ql_enable_interrupts(struct ql3_adapter *qdev)
286 struct ql3xxx_port_registers __iomem *port_regs =
287 qdev->mem_map_registers;
289 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
290 ((0xff << 16) | ISP_IMR_ENABLE_INT));
294 static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
295 struct ql_rcv_buf_cb *lrg_buf_cb)
297 dma_addr_t map;
298 int err;
299 lrg_buf_cb->next = NULL;
301 if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */
302 qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
303 } else {
304 qdev->lrg_buf_free_tail->next = lrg_buf_cb;
305 qdev->lrg_buf_free_tail = lrg_buf_cb;
308 if (!lrg_buf_cb->skb) {
309 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
310 qdev->lrg_buffer_len);
311 if (unlikely(!lrg_buf_cb->skb)) {
312 qdev->lrg_buf_skb_check++;
313 } else {
315 * We save some space to copy the ethhdr from first
316 * buffer
318 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
319 map = pci_map_single(qdev->pdev,
320 lrg_buf_cb->skb->data,
321 qdev->lrg_buffer_len -
322 QL_HEADER_SPACE,
323 PCI_DMA_FROMDEVICE);
324 err = pci_dma_mapping_error(qdev->pdev, map);
325 if (err) {
326 netdev_err(qdev->ndev,
327 "PCI mapping failed with error: %d\n",
328 err);
329 dev_kfree_skb(lrg_buf_cb->skb);
330 lrg_buf_cb->skb = NULL;
332 qdev->lrg_buf_skb_check++;
333 return;
336 lrg_buf_cb->buf_phy_addr_low =
337 cpu_to_le32(LS_64BITS(map));
338 lrg_buf_cb->buf_phy_addr_high =
339 cpu_to_le32(MS_64BITS(map));
340 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
341 dma_unmap_len_set(lrg_buf_cb, maplen,
342 qdev->lrg_buffer_len -
343 QL_HEADER_SPACE);
347 qdev->lrg_buf_free_count++;
350 static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
351 *qdev)
353 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
355 if (lrg_buf_cb != NULL) {
356 qdev->lrg_buf_free_head = lrg_buf_cb->next;
357 if (qdev->lrg_buf_free_head == NULL)
358 qdev->lrg_buf_free_tail = NULL;
359 qdev->lrg_buf_free_count--;
362 return lrg_buf_cb;
365 static u32 addrBits = EEPROM_NO_ADDR_BITS;
366 static u32 dataBits = EEPROM_NO_DATA_BITS;
368 static void fm93c56a_deselect(struct ql3_adapter *qdev);
369 static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
370 unsigned short *value);
373 * Caller holds hw_lock.
375 static void fm93c56a_select(struct ql3_adapter *qdev)
377 struct ql3xxx_port_registers __iomem *port_regs =
378 qdev->mem_map_registers;
379 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
381 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
382 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
386 * Caller holds hw_lock.
388 static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
390 int i;
391 u32 mask;
392 u32 dataBit;
393 u32 previousBit;
394 struct ql3xxx_port_registers __iomem *port_regs =
395 qdev->mem_map_registers;
396 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
398 /* Clock in a zero, then do the start bit */
399 ql_write_nvram_reg(qdev, spir,
400 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
401 AUBURN_EEPROM_DO_1));
402 ql_write_nvram_reg(qdev, spir,
403 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
404 AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE));
405 ql_write_nvram_reg(qdev, spir,
406 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
407 AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL));
409 mask = 1 << (FM93C56A_CMD_BITS - 1);
410 /* Force the previous data bit to be different */
411 previousBit = 0xffff;
412 for (i = 0; i < FM93C56A_CMD_BITS; i++) {
413 dataBit = (cmd & mask)
414 ? AUBURN_EEPROM_DO_1
415 : AUBURN_EEPROM_DO_0;
416 if (previousBit != dataBit) {
417 /* If the bit changed, change the DO state to match */
418 ql_write_nvram_reg(qdev, spir,
419 (ISP_NVRAM_MASK |
420 qdev->eeprom_cmd_data | dataBit));
421 previousBit = dataBit;
423 ql_write_nvram_reg(qdev, spir,
424 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
425 dataBit | AUBURN_EEPROM_CLK_RISE));
426 ql_write_nvram_reg(qdev, spir,
427 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
428 dataBit | AUBURN_EEPROM_CLK_FALL));
429 cmd = cmd << 1;
432 mask = 1 << (addrBits - 1);
433 /* Force the previous data bit to be different */
434 previousBit = 0xffff;
435 for (i = 0; i < addrBits; i++) {
436 dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1
437 : AUBURN_EEPROM_DO_0;
438 if (previousBit != dataBit) {
440 * If the bit changed, then change the DO state to
441 * match
443 ql_write_nvram_reg(qdev, spir,
444 (ISP_NVRAM_MASK |
445 qdev->eeprom_cmd_data | dataBit));
446 previousBit = dataBit;
448 ql_write_nvram_reg(qdev, spir,
449 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
450 dataBit | AUBURN_EEPROM_CLK_RISE));
451 ql_write_nvram_reg(qdev, spir,
452 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
453 dataBit | AUBURN_EEPROM_CLK_FALL));
454 eepromAddr = eepromAddr << 1;
459 * Caller holds hw_lock.
461 static void fm93c56a_deselect(struct ql3_adapter *qdev)
463 struct ql3xxx_port_registers __iomem *port_regs =
464 qdev->mem_map_registers;
465 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
467 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
468 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
472 * Caller holds hw_lock.
474 static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
476 int i;
477 u32 data = 0;
478 u32 dataBit;
479 struct ql3xxx_port_registers __iomem *port_regs =
480 qdev->mem_map_registers;
481 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
483 /* Read the data bits */
484 /* The first bit is a dummy. Clock right over it. */
485 for (i = 0; i < dataBits; i++) {
486 ql_write_nvram_reg(qdev, spir,
487 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
488 AUBURN_EEPROM_CLK_RISE);
489 ql_write_nvram_reg(qdev, spir,
490 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
491 AUBURN_EEPROM_CLK_FALL);
492 dataBit = (ql_read_common_reg(qdev, spir) &
493 AUBURN_EEPROM_DI_1) ? 1 : 0;
494 data = (data << 1) | dataBit;
496 *value = (u16)data;
500 * Caller holds hw_lock.
502 static void eeprom_readword(struct ql3_adapter *qdev,
503 u32 eepromAddr, unsigned short *value)
505 fm93c56a_select(qdev);
506 fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
507 fm93c56a_datain(qdev, value);
508 fm93c56a_deselect(qdev);
511 static void ql_set_mac_addr(struct net_device *ndev, u16 *addr)
513 __le16 *p = (__le16 *)ndev->dev_addr;
514 p[0] = cpu_to_le16(addr[0]);
515 p[1] = cpu_to_le16(addr[1]);
516 p[2] = cpu_to_le16(addr[2]);
519 static int ql_get_nvram_params(struct ql3_adapter *qdev)
521 u16 *pEEPROMData;
522 u16 checksum = 0;
523 u32 index;
524 unsigned long hw_flags;
526 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
528 pEEPROMData = (u16 *)&qdev->nvram_data;
529 qdev->eeprom_cmd_data = 0;
530 if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
531 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
532 2) << 10)) {
533 pr_err("%s: Failed ql_sem_spinlock()\n", __func__);
534 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
535 return -1;
538 for (index = 0; index < EEPROM_SIZE; index++) {
539 eeprom_readword(qdev, index, pEEPROMData);
540 checksum += *pEEPROMData;
541 pEEPROMData++;
543 ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
545 if (checksum != 0) {
546 netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n",
547 checksum);
548 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
549 return -1;
552 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
553 return checksum;
556 static const u32 PHYAddr[2] = {
557 PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS
560 static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
562 struct ql3xxx_port_registers __iomem *port_regs =
563 qdev->mem_map_registers;
564 u32 temp;
565 int count = 1000;
567 while (count) {
568 temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
569 if (!(temp & MAC_MII_STATUS_BSY))
570 return 0;
571 udelay(10);
572 count--;
574 return -1;
577 static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
579 struct ql3xxx_port_registers __iomem *port_regs =
580 qdev->mem_map_registers;
581 u32 scanControl;
583 if (qdev->numPorts > 1) {
584 /* Auto scan will cycle through multiple ports */
585 scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC;
586 } else {
587 scanControl = MAC_MII_CONTROL_SC;
591 * Scan register 1 of PHY/PETBI,
592 * Set up to scan both devices
593 * The autoscan starts from the first register, completes
594 * the last one before rolling over to the first
596 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
597 PHYAddr[0] | MII_SCAN_REGISTER);
599 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
600 (scanControl) |
601 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16));
604 static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
606 u8 ret;
607 struct ql3xxx_port_registers __iomem *port_regs =
608 qdev->mem_map_registers;
610 /* See if scan mode is enabled before we turn it off */
611 if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
612 (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) {
613 /* Scan is enabled */
614 ret = 1;
615 } else {
616 /* Scan is disabled */
617 ret = 0;
621 * When disabling scan mode you must first change the MII register
622 * address
624 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
625 PHYAddr[0] | MII_SCAN_REGISTER);
627 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
628 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS |
629 MAC_MII_CONTROL_RC) << 16));
631 return ret;
634 static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
635 u16 regAddr, u16 value, u32 phyAddr)
637 struct ql3xxx_port_registers __iomem *port_regs =
638 qdev->mem_map_registers;
639 u8 scanWasEnabled;
641 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
643 if (ql_wait_for_mii_ready(qdev)) {
644 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
645 return -1;
648 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
649 phyAddr | regAddr);
651 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
653 /* Wait for write to complete 9/10/04 SJP */
654 if (ql_wait_for_mii_ready(qdev)) {
655 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
656 return -1;
659 if (scanWasEnabled)
660 ql_mii_enable_scan_mode(qdev);
662 return 0;
665 static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
666 u16 *value, u32 phyAddr)
668 struct ql3xxx_port_registers __iomem *port_regs =
669 qdev->mem_map_registers;
670 u8 scanWasEnabled;
671 u32 temp;
673 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
675 if (ql_wait_for_mii_ready(qdev)) {
676 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
677 return -1;
680 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
681 phyAddr | regAddr);
683 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
684 (MAC_MII_CONTROL_RC << 16));
686 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
687 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
689 /* Wait for the read to complete */
690 if (ql_wait_for_mii_ready(qdev)) {
691 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
692 return -1;
695 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
696 *value = (u16) temp;
698 if (scanWasEnabled)
699 ql_mii_enable_scan_mode(qdev);
701 return 0;
704 static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
706 struct ql3xxx_port_registers __iomem *port_regs =
707 qdev->mem_map_registers;
709 ql_mii_disable_scan_mode(qdev);
711 if (ql_wait_for_mii_ready(qdev)) {
712 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
713 return -1;
716 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
717 qdev->PHYAddr | regAddr);
719 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
721 /* Wait for write to complete. */
722 if (ql_wait_for_mii_ready(qdev)) {
723 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
724 return -1;
727 ql_mii_enable_scan_mode(qdev);
729 return 0;
732 static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
734 u32 temp;
735 struct ql3xxx_port_registers __iomem *port_regs =
736 qdev->mem_map_registers;
738 ql_mii_disable_scan_mode(qdev);
740 if (ql_wait_for_mii_ready(qdev)) {
741 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
742 return -1;
745 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
746 qdev->PHYAddr | regAddr);
748 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
749 (MAC_MII_CONTROL_RC << 16));
751 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
752 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
754 /* Wait for the read to complete */
755 if (ql_wait_for_mii_ready(qdev)) {
756 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
757 return -1;
760 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
761 *value = (u16) temp;
763 ql_mii_enable_scan_mode(qdev);
765 return 0;
768 static void ql_petbi_reset(struct ql3_adapter *qdev)
770 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
773 static void ql_petbi_start_neg(struct ql3_adapter *qdev)
775 u16 reg;
777 /* Enable Auto-negotiation sense */
778 ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg);
779 reg |= PETBI_TBI_AUTO_SENSE;
780 ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
782 ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
783 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX);
785 ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
786 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
787 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000);
791 static void ql_petbi_reset_ex(struct ql3_adapter *qdev)
793 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
794 PHYAddr[qdev->mac_index]);
797 static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev)
799 u16 reg;
801 /* Enable Auto-negotiation sense */
802 ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg,
803 PHYAddr[qdev->mac_index]);
804 reg |= PETBI_TBI_AUTO_SENSE;
805 ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg,
806 PHYAddr[qdev->mac_index]);
808 ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
809 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX,
810 PHYAddr[qdev->mac_index]);
812 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
813 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
814 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000,
815 PHYAddr[qdev->mac_index]);
818 static void ql_petbi_init(struct ql3_adapter *qdev)
820 ql_petbi_reset(qdev);
821 ql_petbi_start_neg(qdev);
824 static void ql_petbi_init_ex(struct ql3_adapter *qdev)
826 ql_petbi_reset_ex(qdev);
827 ql_petbi_start_neg_ex(qdev);
830 static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
832 u16 reg;
834 if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0)
835 return 0;
837 return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE;
840 static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
842 netdev_info(qdev->ndev, "enabling Agere specific PHY\n");
843 /* power down device bit 11 = 1 */
844 ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr);
845 /* enable diagnostic mode bit 2 = 1 */
846 ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr);
847 /* 1000MB amplitude adjust (see Agere errata) */
848 ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr);
849 /* 1000MB amplitude adjust (see Agere errata) */
850 ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr);
851 /* 100MB amplitude adjust (see Agere errata) */
852 ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr);
853 /* 100MB amplitude adjust (see Agere errata) */
854 ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr);
855 /* 10MB amplitude adjust (see Agere errata) */
856 ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr);
857 /* 10MB amplitude adjust (see Agere errata) */
858 ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr);
859 /* point to hidden reg 0x2806 */
860 ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
861 /* Write new PHYAD w/bit 5 set */
862 ql_mii_write_reg_ex(qdev, 0x11,
863 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
865 * Disable diagnostic mode bit 2 = 0
866 * Power up device bit 11 = 0
867 * Link up (on) and activity (blink)
869 ql_mii_write_reg(qdev, 0x12, 0x840a);
870 ql_mii_write_reg(qdev, 0x00, 0x1140);
871 ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
874 static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev,
875 u16 phyIdReg0, u16 phyIdReg1)
877 enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN;
878 u32 oui;
879 u16 model;
880 int i;
882 if (phyIdReg0 == 0xffff)
883 return result;
885 if (phyIdReg1 == 0xffff)
886 return result;
888 /* oui is split between two registers */
889 oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10);
891 model = (phyIdReg1 & PHY_MODEL_MASK) >> 4;
893 /* Scan table for this PHY */
894 for (i = 0; i < MAX_PHY_DEV_TYPES; i++) {
895 if ((oui == PHY_DEVICES[i].phyIdOUI) &&
896 (model == PHY_DEVICES[i].phyIdModel)) {
897 netdev_info(qdev->ndev, "Phy: %s\n",
898 PHY_DEVICES[i].name);
899 result = PHY_DEVICES[i].phyDevice;
900 break;
904 return result;
907 static int ql_phy_get_speed(struct ql3_adapter *qdev)
909 u16 reg;
911 switch (qdev->phyType) {
912 case PHY_AGERE_ET1011C: {
913 if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0)
914 return 0;
916 reg = (reg >> 8) & 3;
917 break;
919 default:
920 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
921 return 0;
923 reg = (((reg & 0x18) >> 3) & 3);
926 switch (reg) {
927 case 2:
928 return SPEED_1000;
929 case 1:
930 return SPEED_100;
931 case 0:
932 return SPEED_10;
933 default:
934 return -1;
938 static int ql_is_full_dup(struct ql3_adapter *qdev)
940 u16 reg;
942 switch (qdev->phyType) {
943 case PHY_AGERE_ET1011C: {
944 if (ql_mii_read_reg(qdev, 0x1A, &reg))
945 return 0;
947 return ((reg & 0x0080) && (reg & 0x1000)) != 0;
949 case PHY_VITESSE_VSC8211:
950 default: {
951 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
952 return 0;
953 return (reg & PHY_AUX_DUPLEX_STAT) != 0;
958 static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
960 u16 reg;
962 if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0)
963 return 0;
965 return (reg & PHY_NEG_PAUSE) != 0;
968 static int PHY_Setup(struct ql3_adapter *qdev)
970 u16 reg1;
971 u16 reg2;
972 bool agereAddrChangeNeeded = false;
973 u32 miiAddr = 0;
974 int err;
976 /* Determine the PHY we are using by reading the ID's */
977 err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1);
978 if (err != 0) {
979 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n");
980 return err;
983 err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2);
984 if (err != 0) {
985 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n");
986 return err;
989 /* Check if we have a Agere PHY */
990 if ((reg1 == 0xffff) || (reg2 == 0xffff)) {
992 /* Determine which MII address we should be using
993 determined by the index of the card */
994 if (qdev->mac_index == 0)
995 miiAddr = MII_AGERE_ADDR_1;
996 else
997 miiAddr = MII_AGERE_ADDR_2;
999 err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr);
1000 if (err != 0) {
1001 netdev_err(qdev->ndev,
1002 "Could not read from reg PHY_ID_0_REG after Agere detected\n");
1003 return err;
1006 err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr);
1007 if (err != 0) {
1008 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n");
1009 return err;
1012 /* We need to remember to initialize the Agere PHY */
1013 agereAddrChangeNeeded = true;
1016 /* Determine the particular PHY we have on board to apply
1017 PHY specific initializations */
1018 qdev->phyType = getPhyType(qdev, reg1, reg2);
1020 if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) {
1021 /* need this here so address gets changed */
1022 phyAgereSpecificInit(qdev, miiAddr);
1023 } else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
1024 netdev_err(qdev->ndev, "PHY is unknown\n");
1025 return -EIO;
1028 return 0;
1032 * Caller holds hw_lock.
1034 static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
1036 struct ql3xxx_port_registers __iomem *port_regs =
1037 qdev->mem_map_registers;
1038 u32 value;
1040 if (enable)
1041 value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16));
1042 else
1043 value = (MAC_CONFIG_REG_PE << 16);
1045 if (qdev->mac_index)
1046 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1047 else
1048 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1052 * Caller holds hw_lock.
1054 static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
1056 struct ql3xxx_port_registers __iomem *port_regs =
1057 qdev->mem_map_registers;
1058 u32 value;
1060 if (enable)
1061 value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16));
1062 else
1063 value = (MAC_CONFIG_REG_SR << 16);
1065 if (qdev->mac_index)
1066 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1067 else
1068 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1072 * Caller holds hw_lock.
1074 static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
1076 struct ql3xxx_port_registers __iomem *port_regs =
1077 qdev->mem_map_registers;
1078 u32 value;
1080 if (enable)
1081 value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16));
1082 else
1083 value = (MAC_CONFIG_REG_GM << 16);
1085 if (qdev->mac_index)
1086 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1087 else
1088 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1092 * Caller holds hw_lock.
1094 static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
1096 struct ql3xxx_port_registers __iomem *port_regs =
1097 qdev->mem_map_registers;
1098 u32 value;
1100 if (enable)
1101 value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16));
1102 else
1103 value = (MAC_CONFIG_REG_FD << 16);
1105 if (qdev->mac_index)
1106 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1107 else
1108 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1112 * Caller holds hw_lock.
1114 static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
1116 struct ql3xxx_port_registers __iomem *port_regs =
1117 qdev->mem_map_registers;
1118 u32 value;
1120 if (enable)
1121 value =
1122 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) |
1123 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16));
1124 else
1125 value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16);
1127 if (qdev->mac_index)
1128 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1129 else
1130 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1134 * Caller holds hw_lock.
1136 static int ql_is_fiber(struct ql3_adapter *qdev)
1138 struct ql3xxx_port_registers __iomem *port_regs =
1139 qdev->mem_map_registers;
1140 u32 bitToCheck = 0;
1141 u32 temp;
1143 switch (qdev->mac_index) {
1144 case 0:
1145 bitToCheck = PORT_STATUS_SM0;
1146 break;
1147 case 1:
1148 bitToCheck = PORT_STATUS_SM1;
1149 break;
1152 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1153 return (temp & bitToCheck) != 0;
1156 static int ql_is_auto_cfg(struct ql3_adapter *qdev)
1158 u16 reg;
1159 ql_mii_read_reg(qdev, 0x00, &reg);
1160 return (reg & 0x1000) != 0;
1164 * Caller holds hw_lock.
1166 static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
1168 struct ql3xxx_port_registers __iomem *port_regs =
1169 qdev->mem_map_registers;
1170 u32 bitToCheck = 0;
1171 u32 temp;
1173 switch (qdev->mac_index) {
1174 case 0:
1175 bitToCheck = PORT_STATUS_AC0;
1176 break;
1177 case 1:
1178 bitToCheck = PORT_STATUS_AC1;
1179 break;
1182 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1183 if (temp & bitToCheck) {
1184 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n");
1185 return 1;
1187 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n");
1188 return 0;
1192 * ql_is_neg_pause() returns 1 if pause was negotiated to be on
1194 static int ql_is_neg_pause(struct ql3_adapter *qdev)
1196 if (ql_is_fiber(qdev))
1197 return ql_is_petbi_neg_pause(qdev);
1198 else
1199 return ql_is_phy_neg_pause(qdev);
1202 static int ql_auto_neg_error(struct ql3_adapter *qdev)
1204 struct ql3xxx_port_registers __iomem *port_regs =
1205 qdev->mem_map_registers;
1206 u32 bitToCheck = 0;
1207 u32 temp;
1209 switch (qdev->mac_index) {
1210 case 0:
1211 bitToCheck = PORT_STATUS_AE0;
1212 break;
1213 case 1:
1214 bitToCheck = PORT_STATUS_AE1;
1215 break;
1217 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1218 return (temp & bitToCheck) != 0;
1221 static u32 ql_get_link_speed(struct ql3_adapter *qdev)
1223 if (ql_is_fiber(qdev))
1224 return SPEED_1000;
1225 else
1226 return ql_phy_get_speed(qdev);
1229 static int ql_is_link_full_dup(struct ql3_adapter *qdev)
1231 if (ql_is_fiber(qdev))
1232 return 1;
1233 else
1234 return ql_is_full_dup(qdev);
1238 * Caller holds hw_lock.
1240 static int ql_link_down_detect(struct ql3_adapter *qdev)
1242 struct ql3xxx_port_registers __iomem *port_regs =
1243 qdev->mem_map_registers;
1244 u32 bitToCheck = 0;
1245 u32 temp;
1247 switch (qdev->mac_index) {
1248 case 0:
1249 bitToCheck = ISP_CONTROL_LINK_DN_0;
1250 break;
1251 case 1:
1252 bitToCheck = ISP_CONTROL_LINK_DN_1;
1253 break;
1256 temp =
1257 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
1258 return (temp & bitToCheck) != 0;
1262 * Caller holds hw_lock.
1264 static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
1266 struct ql3xxx_port_registers __iomem *port_regs =
1267 qdev->mem_map_registers;
1269 switch (qdev->mac_index) {
1270 case 0:
1271 ql_write_common_reg(qdev,
1272 &port_regs->CommonRegs.ispControlStatus,
1273 (ISP_CONTROL_LINK_DN_0) |
1274 (ISP_CONTROL_LINK_DN_0 << 16));
1275 break;
1277 case 1:
1278 ql_write_common_reg(qdev,
1279 &port_regs->CommonRegs.ispControlStatus,
1280 (ISP_CONTROL_LINK_DN_1) |
1281 (ISP_CONTROL_LINK_DN_1 << 16));
1282 break;
1284 default:
1285 return 1;
1288 return 0;
1292 * Caller holds hw_lock.
1294 static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
1296 struct ql3xxx_port_registers __iomem *port_regs =
1297 qdev->mem_map_registers;
1298 u32 bitToCheck = 0;
1299 u32 temp;
1301 switch (qdev->mac_index) {
1302 case 0:
1303 bitToCheck = PORT_STATUS_F1_ENABLED;
1304 break;
1305 case 1:
1306 bitToCheck = PORT_STATUS_F3_ENABLED;
1307 break;
1308 default:
1309 break;
1312 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1313 if (temp & bitToCheck) {
1314 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1315 "not link master\n");
1316 return 0;
1319 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n");
1320 return 1;
1323 static void ql_phy_reset_ex(struct ql3_adapter *qdev)
1325 ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET,
1326 PHYAddr[qdev->mac_index]);
1329 static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
1331 u16 reg;
1332 u16 portConfiguration;
1334 if (qdev->phyType == PHY_AGERE_ET1011C)
1335 ql_mii_write_reg(qdev, 0x13, 0x0000);
1336 /* turn off external loopback */
1338 if (qdev->mac_index == 0)
1339 portConfiguration =
1340 qdev->nvram_data.macCfg_port0.portConfiguration;
1341 else
1342 portConfiguration =
1343 qdev->nvram_data.macCfg_port1.portConfiguration;
1345 /* Some HBA's in the field are set to 0 and they need to
1346 be reinterpreted with a default value */
1347 if (portConfiguration == 0)
1348 portConfiguration = PORT_CONFIG_DEFAULT;
1350 /* Set the 1000 advertisements */
1351 ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, &reg,
1352 PHYAddr[qdev->mac_index]);
1353 reg &= ~PHY_GIG_ALL_PARAMS;
1355 if (portConfiguration & PORT_CONFIG_1000MB_SPEED) {
1356 if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED)
1357 reg |= PHY_GIG_ADV_1000F;
1358 else
1359 reg |= PHY_GIG_ADV_1000H;
1362 ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg,
1363 PHYAddr[qdev->mac_index]);
1365 /* Set the 10/100 & pause negotiation advertisements */
1366 ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, &reg,
1367 PHYAddr[qdev->mac_index]);
1368 reg &= ~PHY_NEG_ALL_PARAMS;
1370 if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED)
1371 reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE;
1373 if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) {
1374 if (portConfiguration & PORT_CONFIG_100MB_SPEED)
1375 reg |= PHY_NEG_ADV_100F;
1377 if (portConfiguration & PORT_CONFIG_10MB_SPEED)
1378 reg |= PHY_NEG_ADV_10F;
1381 if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) {
1382 if (portConfiguration & PORT_CONFIG_100MB_SPEED)
1383 reg |= PHY_NEG_ADV_100H;
1385 if (portConfiguration & PORT_CONFIG_10MB_SPEED)
1386 reg |= PHY_NEG_ADV_10H;
1389 if (portConfiguration & PORT_CONFIG_1000MB_SPEED)
1390 reg |= 1;
1392 ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
1393 PHYAddr[qdev->mac_index]);
1395 ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, PHYAddr[qdev->mac_index]);
1397 ql_mii_write_reg_ex(qdev, CONTROL_REG,
1398 reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG,
1399 PHYAddr[qdev->mac_index]);
1402 static void ql_phy_init_ex(struct ql3_adapter *qdev)
1404 ql_phy_reset_ex(qdev);
1405 PHY_Setup(qdev);
1406 ql_phy_start_neg_ex(qdev);
1410 * Caller holds hw_lock.
1412 static u32 ql_get_link_state(struct ql3_adapter *qdev)
1414 struct ql3xxx_port_registers __iomem *port_regs =
1415 qdev->mem_map_registers;
1416 u32 bitToCheck = 0;
1417 u32 temp, linkState;
1419 switch (qdev->mac_index) {
1420 case 0:
1421 bitToCheck = PORT_STATUS_UP0;
1422 break;
1423 case 1:
1424 bitToCheck = PORT_STATUS_UP1;
1425 break;
1428 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1429 if (temp & bitToCheck)
1430 linkState = LS_UP;
1431 else
1432 linkState = LS_DOWN;
1434 return linkState;
1437 static int ql_port_start(struct ql3_adapter *qdev)
1439 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1440 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1441 2) << 7)) {
1442 netdev_err(qdev->ndev, "Could not get hw lock for GIO\n");
1443 return -1;
1446 if (ql_is_fiber(qdev)) {
1447 ql_petbi_init(qdev);
1448 } else {
1449 /* Copper port */
1450 ql_phy_init_ex(qdev);
1453 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1454 return 0;
1457 static int ql_finish_auto_neg(struct ql3_adapter *qdev)
1460 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1461 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1462 2) << 7))
1463 return -1;
1465 if (!ql_auto_neg_error(qdev)) {
1466 if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
1467 /* configure the MAC */
1468 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1469 "Configuring link\n");
1470 ql_mac_cfg_soft_reset(qdev, 1);
1471 ql_mac_cfg_gig(qdev,
1472 (ql_get_link_speed
1473 (qdev) ==
1474 SPEED_1000));
1475 ql_mac_cfg_full_dup(qdev,
1476 ql_is_link_full_dup
1477 (qdev));
1478 ql_mac_cfg_pause(qdev,
1479 ql_is_neg_pause
1480 (qdev));
1481 ql_mac_cfg_soft_reset(qdev, 0);
1483 /* enable the MAC */
1484 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1485 "Enabling mac\n");
1486 ql_mac_enable(qdev, 1);
1489 qdev->port_link_state = LS_UP;
1490 netif_start_queue(qdev->ndev);
1491 netif_carrier_on(qdev->ndev);
1492 netif_info(qdev, link, qdev->ndev,
1493 "Link is up at %d Mbps, %s duplex\n",
1494 ql_get_link_speed(qdev),
1495 ql_is_link_full_dup(qdev) ? "full" : "half");
1497 } else { /* Remote error detected */
1499 if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
1500 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1501 "Remote error detected. Calling ql_port_start()\n");
1503 * ql_port_start() is shared code and needs
1504 * to lock the PHY on it's own.
1506 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1507 if (ql_port_start(qdev)) /* Restart port */
1508 return -1;
1509 return 0;
1512 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1513 return 0;
1516 static void ql_link_state_machine_work(struct work_struct *work)
1518 struct ql3_adapter *qdev =
1519 container_of(work, struct ql3_adapter, link_state_work.work);
1521 u32 curr_link_state;
1522 unsigned long hw_flags;
1524 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1526 curr_link_state = ql_get_link_state(qdev);
1528 if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) {
1529 netif_info(qdev, link, qdev->ndev,
1530 "Reset in progress, skip processing link state\n");
1532 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1534 /* Restart timer on 2 second interval. */
1535 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
1537 return;
1540 switch (qdev->port_link_state) {
1541 default:
1542 if (test_bit(QL_LINK_MASTER, &qdev->flags))
1543 ql_port_start(qdev);
1544 qdev->port_link_state = LS_DOWN;
1545 /* Fall Through */
1547 case LS_DOWN:
1548 if (curr_link_state == LS_UP) {
1549 netif_info(qdev, link, qdev->ndev, "Link is up\n");
1550 if (ql_is_auto_neg_complete(qdev))
1551 ql_finish_auto_neg(qdev);
1553 if (qdev->port_link_state == LS_UP)
1554 ql_link_down_detect_clear(qdev);
1556 qdev->port_link_state = LS_UP;
1558 break;
1560 case LS_UP:
1562 * See if the link is currently down or went down and came
1563 * back up
1565 if (curr_link_state == LS_DOWN) {
1566 netif_info(qdev, link, qdev->ndev, "Link is down\n");
1567 qdev->port_link_state = LS_DOWN;
1569 if (ql_link_down_detect(qdev))
1570 qdev->port_link_state = LS_DOWN;
1571 break;
1573 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1575 /* Restart timer on 2 second interval. */
1576 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
1580 * Caller must take hw_lock and QL_PHY_GIO_SEM.
1582 static void ql_get_phy_owner(struct ql3_adapter *qdev)
1584 if (ql_this_adapter_controls_port(qdev))
1585 set_bit(QL_LINK_MASTER, &qdev->flags);
1586 else
1587 clear_bit(QL_LINK_MASTER, &qdev->flags);
1591 * Caller must take hw_lock and QL_PHY_GIO_SEM.
1593 static void ql_init_scan_mode(struct ql3_adapter *qdev)
1595 ql_mii_enable_scan_mode(qdev);
1597 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
1598 if (ql_this_adapter_controls_port(qdev))
1599 ql_petbi_init_ex(qdev);
1600 } else {
1601 if (ql_this_adapter_controls_port(qdev))
1602 ql_phy_init_ex(qdev);
1607 * MII_Setup needs to be called before taking the PHY out of reset
1608 * so that the management interface clock speed can be set properly.
1609 * It would be better if we had a way to disable MDC until after the
1610 * PHY is out of reset, but we don't have that capability.
1612 static int ql_mii_setup(struct ql3_adapter *qdev)
1614 u32 reg;
1615 struct ql3xxx_port_registers __iomem *port_regs =
1616 qdev->mem_map_registers;
1618 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1619 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1620 2) << 7))
1621 return -1;
1623 if (qdev->device_id == QL3032_DEVICE_ID)
1624 ql_write_page0_reg(qdev,
1625 &port_regs->macMIIMgmtControlReg, 0x0f00000);
1627 /* Divide 125MHz clock by 28 to meet PHY timing requirements */
1628 reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
1630 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
1631 reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16));
1633 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1634 return 0;
1637 #define SUPPORTED_OPTICAL_MODES (SUPPORTED_1000baseT_Full | \
1638 SUPPORTED_FIBRE | \
1639 SUPPORTED_Autoneg)
1640 #define SUPPORTED_TP_MODES (SUPPORTED_10baseT_Half | \
1641 SUPPORTED_10baseT_Full | \
1642 SUPPORTED_100baseT_Half | \
1643 SUPPORTED_100baseT_Full | \
1644 SUPPORTED_1000baseT_Half | \
1645 SUPPORTED_1000baseT_Full | \
1646 SUPPORTED_Autoneg | \
1647 SUPPORTED_TP) \
1649 static u32 ql_supported_modes(struct ql3_adapter *qdev)
1651 if (test_bit(QL_LINK_OPTICAL, &qdev->flags))
1652 return SUPPORTED_OPTICAL_MODES;
1654 return SUPPORTED_TP_MODES;
1657 static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
1659 int status;
1660 unsigned long hw_flags;
1661 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1662 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1663 (QL_RESOURCE_BITS_BASE_CODE |
1664 (qdev->mac_index) * 2) << 7)) {
1665 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1666 return 0;
1668 status = ql_is_auto_cfg(qdev);
1669 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1670 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1671 return status;
1674 static u32 ql_get_speed(struct ql3_adapter *qdev)
1676 u32 status;
1677 unsigned long hw_flags;
1678 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1679 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1680 (QL_RESOURCE_BITS_BASE_CODE |
1681 (qdev->mac_index) * 2) << 7)) {
1682 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1683 return 0;
1685 status = ql_get_link_speed(qdev);
1686 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1687 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1688 return status;
1691 static int ql_get_full_dup(struct ql3_adapter *qdev)
1693 int status;
1694 unsigned long hw_flags;
1695 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1696 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1697 (QL_RESOURCE_BITS_BASE_CODE |
1698 (qdev->mac_index) * 2) << 7)) {
1699 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1700 return 0;
1702 status = ql_is_link_full_dup(qdev);
1703 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1704 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1705 return status;
1708 static int ql_get_link_ksettings(struct net_device *ndev,
1709 struct ethtool_link_ksettings *cmd)
1711 struct ql3_adapter *qdev = netdev_priv(ndev);
1712 u32 supported, advertising;
1714 supported = ql_supported_modes(qdev);
1716 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
1717 cmd->base.port = PORT_FIBRE;
1718 } else {
1719 cmd->base.port = PORT_TP;
1720 cmd->base.phy_address = qdev->PHYAddr;
1722 advertising = ql_supported_modes(qdev);
1723 cmd->base.autoneg = ql_get_auto_cfg_status(qdev);
1724 cmd->base.speed = ql_get_speed(qdev);
1725 cmd->base.duplex = ql_get_full_dup(qdev);
1727 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1728 supported);
1729 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1730 advertising);
1732 return 0;
1735 static void ql_get_drvinfo(struct net_device *ndev,
1736 struct ethtool_drvinfo *drvinfo)
1738 struct ql3_adapter *qdev = netdev_priv(ndev);
1739 strlcpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver));
1740 strlcpy(drvinfo->version, ql3xxx_driver_version,
1741 sizeof(drvinfo->version));
1742 strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
1743 sizeof(drvinfo->bus_info));
1746 static u32 ql_get_msglevel(struct net_device *ndev)
1748 struct ql3_adapter *qdev = netdev_priv(ndev);
1749 return qdev->msg_enable;
1752 static void ql_set_msglevel(struct net_device *ndev, u32 value)
1754 struct ql3_adapter *qdev = netdev_priv(ndev);
1755 qdev->msg_enable = value;
1758 static void ql_get_pauseparam(struct net_device *ndev,
1759 struct ethtool_pauseparam *pause)
1761 struct ql3_adapter *qdev = netdev_priv(ndev);
1762 struct ql3xxx_port_registers __iomem *port_regs =
1763 qdev->mem_map_registers;
1765 u32 reg;
1766 if (qdev->mac_index == 0)
1767 reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg);
1768 else
1769 reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg);
1771 pause->autoneg = ql_get_auto_cfg_status(qdev);
1772 pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2;
1773 pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1;
1776 static const struct ethtool_ops ql3xxx_ethtool_ops = {
1777 .get_drvinfo = ql_get_drvinfo,
1778 .get_link = ethtool_op_get_link,
1779 .get_msglevel = ql_get_msglevel,
1780 .set_msglevel = ql_set_msglevel,
1781 .get_pauseparam = ql_get_pauseparam,
1782 .get_link_ksettings = ql_get_link_ksettings,
1785 static int ql_populate_free_queue(struct ql3_adapter *qdev)
1787 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
1788 dma_addr_t map;
1789 int err;
1791 while (lrg_buf_cb) {
1792 if (!lrg_buf_cb->skb) {
1793 lrg_buf_cb->skb =
1794 netdev_alloc_skb(qdev->ndev,
1795 qdev->lrg_buffer_len);
1796 if (unlikely(!lrg_buf_cb->skb)) {
1797 netdev_printk(KERN_DEBUG, qdev->ndev,
1798 "Failed netdev_alloc_skb()\n");
1799 break;
1800 } else {
1802 * We save some space to copy the ethhdr from
1803 * first buffer
1805 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
1806 map = pci_map_single(qdev->pdev,
1807 lrg_buf_cb->skb->data,
1808 qdev->lrg_buffer_len -
1809 QL_HEADER_SPACE,
1810 PCI_DMA_FROMDEVICE);
1812 err = pci_dma_mapping_error(qdev->pdev, map);
1813 if (err) {
1814 netdev_err(qdev->ndev,
1815 "PCI mapping failed with error: %d\n",
1816 err);
1817 dev_kfree_skb(lrg_buf_cb->skb);
1818 lrg_buf_cb->skb = NULL;
1819 break;
1823 lrg_buf_cb->buf_phy_addr_low =
1824 cpu_to_le32(LS_64BITS(map));
1825 lrg_buf_cb->buf_phy_addr_high =
1826 cpu_to_le32(MS_64BITS(map));
1827 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
1828 dma_unmap_len_set(lrg_buf_cb, maplen,
1829 qdev->lrg_buffer_len -
1830 QL_HEADER_SPACE);
1831 --qdev->lrg_buf_skb_check;
1832 if (!qdev->lrg_buf_skb_check)
1833 return 1;
1836 lrg_buf_cb = lrg_buf_cb->next;
1838 return 0;
1842 * Caller holds hw_lock.
1844 static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
1846 struct ql3xxx_port_registers __iomem *port_regs =
1847 qdev->mem_map_registers;
1849 if (qdev->small_buf_release_cnt >= 16) {
1850 while (qdev->small_buf_release_cnt >= 16) {
1851 qdev->small_buf_q_producer_index++;
1853 if (qdev->small_buf_q_producer_index ==
1854 NUM_SBUFQ_ENTRIES)
1855 qdev->small_buf_q_producer_index = 0;
1856 qdev->small_buf_release_cnt -= 8;
1858 wmb();
1859 writel_relaxed(qdev->small_buf_q_producer_index,
1860 &port_regs->CommonRegs.rxSmallQProducerIndex);
1865 * Caller holds hw_lock.
1867 static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
1869 struct bufq_addr_element *lrg_buf_q_ele;
1870 int i;
1871 struct ql_rcv_buf_cb *lrg_buf_cb;
1872 struct ql3xxx_port_registers __iomem *port_regs =
1873 qdev->mem_map_registers;
1875 if ((qdev->lrg_buf_free_count >= 8) &&
1876 (qdev->lrg_buf_release_cnt >= 16)) {
1878 if (qdev->lrg_buf_skb_check)
1879 if (!ql_populate_free_queue(qdev))
1880 return;
1882 lrg_buf_q_ele = qdev->lrg_buf_next_free;
1884 while ((qdev->lrg_buf_release_cnt >= 16) &&
1885 (qdev->lrg_buf_free_count >= 8)) {
1887 for (i = 0; i < 8; i++) {
1888 lrg_buf_cb =
1889 ql_get_from_lrg_buf_free_list(qdev);
1890 lrg_buf_q_ele->addr_high =
1891 lrg_buf_cb->buf_phy_addr_high;
1892 lrg_buf_q_ele->addr_low =
1893 lrg_buf_cb->buf_phy_addr_low;
1894 lrg_buf_q_ele++;
1896 qdev->lrg_buf_release_cnt--;
1899 qdev->lrg_buf_q_producer_index++;
1901 if (qdev->lrg_buf_q_producer_index ==
1902 qdev->num_lbufq_entries)
1903 qdev->lrg_buf_q_producer_index = 0;
1905 if (qdev->lrg_buf_q_producer_index ==
1906 (qdev->num_lbufq_entries - 1)) {
1907 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
1910 wmb();
1911 qdev->lrg_buf_next_free = lrg_buf_q_ele;
1912 writel(qdev->lrg_buf_q_producer_index,
1913 &port_regs->CommonRegs.rxLargeQProducerIndex);
1917 static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1918 struct ob_mac_iocb_rsp *mac_rsp)
1920 struct ql_tx_buf_cb *tx_cb;
1921 int i;
1923 if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
1924 netdev_warn(qdev->ndev,
1925 "Frame too short but it was padded and sent\n");
1928 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
1930 /* Check the transmit response flags for any errors */
1931 if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
1932 netdev_err(qdev->ndev,
1933 "Frame too short to be legal, frame not sent\n");
1935 qdev->ndev->stats.tx_errors++;
1936 goto frame_not_sent;
1939 if (tx_cb->seg_count == 0) {
1940 netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n",
1941 mac_rsp->transaction_id);
1943 qdev->ndev->stats.tx_errors++;
1944 goto invalid_seg_count;
1947 pci_unmap_single(qdev->pdev,
1948 dma_unmap_addr(&tx_cb->map[0], mapaddr),
1949 dma_unmap_len(&tx_cb->map[0], maplen),
1950 PCI_DMA_TODEVICE);
1951 tx_cb->seg_count--;
1952 if (tx_cb->seg_count) {
1953 for (i = 1; i < tx_cb->seg_count; i++) {
1954 pci_unmap_page(qdev->pdev,
1955 dma_unmap_addr(&tx_cb->map[i],
1956 mapaddr),
1957 dma_unmap_len(&tx_cb->map[i], maplen),
1958 PCI_DMA_TODEVICE);
1961 qdev->ndev->stats.tx_packets++;
1962 qdev->ndev->stats.tx_bytes += tx_cb->skb->len;
1964 frame_not_sent:
1965 dev_kfree_skb_irq(tx_cb->skb);
1966 tx_cb->skb = NULL;
1968 invalid_seg_count:
1969 atomic_inc(&qdev->tx_count);
1972 static void ql_get_sbuf(struct ql3_adapter *qdev)
1974 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
1975 qdev->small_buf_index = 0;
1976 qdev->small_buf_release_cnt++;
1979 static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
1981 struct ql_rcv_buf_cb *lrg_buf_cb = NULL;
1982 lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
1983 qdev->lrg_buf_release_cnt++;
1984 if (++qdev->lrg_buf_index == qdev->num_large_buffers)
1985 qdev->lrg_buf_index = 0;
1986 return lrg_buf_cb;
1990 * The difference between 3022 and 3032 for inbound completions:
1991 * 3022 uses two buffers per completion. The first buffer contains
1992 * (some) header info, the second the remainder of the headers plus
1993 * the data. For this chip we reserve some space at the top of the
1994 * receive buffer so that the header info in buffer one can be
1995 * prepended to the buffer two. Buffer two is the sent up while
1996 * buffer one is returned to the hardware to be reused.
1997 * 3032 receives all of it's data and headers in one buffer for a
1998 * simpler process. 3032 also supports checksum verification as
1999 * can be seen in ql_process_macip_rx_intr().
2001 static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
2002 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
2004 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
2005 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
2006 struct sk_buff *skb;
2007 u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
2010 * Get the inbound address list (small buffer).
2012 ql_get_sbuf(qdev);
2014 if (qdev->device_id == QL3022_DEVICE_ID)
2015 lrg_buf_cb1 = ql_get_lbuf(qdev);
2017 /* start of second buffer */
2018 lrg_buf_cb2 = ql_get_lbuf(qdev);
2019 skb = lrg_buf_cb2->skb;
2021 qdev->ndev->stats.rx_packets++;
2022 qdev->ndev->stats.rx_bytes += length;
2024 skb_put(skb, length);
2025 pci_unmap_single(qdev->pdev,
2026 dma_unmap_addr(lrg_buf_cb2, mapaddr),
2027 dma_unmap_len(lrg_buf_cb2, maplen),
2028 PCI_DMA_FROMDEVICE);
2029 prefetch(skb->data);
2030 skb_checksum_none_assert(skb);
2031 skb->protocol = eth_type_trans(skb, qdev->ndev);
2033 napi_gro_receive(&qdev->napi, skb);
2034 lrg_buf_cb2->skb = NULL;
2036 if (qdev->device_id == QL3022_DEVICE_ID)
2037 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
2038 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2041 static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
2042 struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
2044 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
2045 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
2046 struct sk_buff *skb1 = NULL, *skb2;
2047 struct net_device *ndev = qdev->ndev;
2048 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
2049 u16 size = 0;
2052 * Get the inbound address list (small buffer).
2055 ql_get_sbuf(qdev);
2057 if (qdev->device_id == QL3022_DEVICE_ID) {
2058 /* start of first buffer on 3022 */
2059 lrg_buf_cb1 = ql_get_lbuf(qdev);
2060 skb1 = lrg_buf_cb1->skb;
2061 size = ETH_HLEN;
2062 if (*((u16 *) skb1->data) != 0xFFFF)
2063 size += VLAN_ETH_HLEN - ETH_HLEN;
2066 /* start of second buffer */
2067 lrg_buf_cb2 = ql_get_lbuf(qdev);
2068 skb2 = lrg_buf_cb2->skb;
2070 skb_put(skb2, length); /* Just the second buffer length here. */
2071 pci_unmap_single(qdev->pdev,
2072 dma_unmap_addr(lrg_buf_cb2, mapaddr),
2073 dma_unmap_len(lrg_buf_cb2, maplen),
2074 PCI_DMA_FROMDEVICE);
2075 prefetch(skb2->data);
2077 skb_checksum_none_assert(skb2);
2078 if (qdev->device_id == QL3022_DEVICE_ID) {
2080 * Copy the ethhdr from first buffer to second. This
2081 * is necessary for 3022 IP completions.
2083 skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN,
2084 skb_push(skb2, size), size);
2085 } else {
2086 u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
2087 if (checksum &
2088 (IB_IP_IOCB_RSP_3032_ICE |
2089 IB_IP_IOCB_RSP_3032_CE)) {
2090 netdev_err(ndev,
2091 "%s: Bad checksum for this %s packet, checksum = %x\n",
2092 __func__,
2093 ((checksum & IB_IP_IOCB_RSP_3032_TCP) ?
2094 "TCP" : "UDP"), checksum);
2095 } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) ||
2096 (checksum & IB_IP_IOCB_RSP_3032_UDP &&
2097 !(checksum & IB_IP_IOCB_RSP_3032_NUC))) {
2098 skb2->ip_summed = CHECKSUM_UNNECESSARY;
2101 skb2->protocol = eth_type_trans(skb2, qdev->ndev);
2103 napi_gro_receive(&qdev->napi, skb2);
2104 ndev->stats.rx_packets++;
2105 ndev->stats.rx_bytes += length;
2106 lrg_buf_cb2->skb = NULL;
2108 if (qdev->device_id == QL3022_DEVICE_ID)
2109 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
2110 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2113 static int ql_tx_rx_clean(struct ql3_adapter *qdev, int budget)
2115 struct net_rsp_iocb *net_rsp;
2116 struct net_device *ndev = qdev->ndev;
2117 int work_done = 0;
2119 /* While there are entries in the completion queue. */
2120 while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
2121 qdev->rsp_consumer_index) && (work_done < budget)) {
2123 net_rsp = qdev->rsp_current;
2124 rmb();
2126 * Fix 4032 chip's undocumented "feature" where bit-8 is set
2127 * if the inbound completion is for a VLAN.
2129 if (qdev->device_id == QL3032_DEVICE_ID)
2130 net_rsp->opcode &= 0x7f;
2131 switch (net_rsp->opcode) {
2133 case OPCODE_OB_MAC_IOCB_FN0:
2134 case OPCODE_OB_MAC_IOCB_FN2:
2135 ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
2136 net_rsp);
2137 break;
2139 case OPCODE_IB_MAC_IOCB:
2140 case OPCODE_IB_3032_MAC_IOCB:
2141 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
2142 net_rsp);
2143 work_done++;
2144 break;
2146 case OPCODE_IB_IP_IOCB:
2147 case OPCODE_IB_3032_IP_IOCB:
2148 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
2149 net_rsp);
2150 work_done++;
2151 break;
2152 default: {
2153 u32 *tmp = (u32 *)net_rsp;
2154 netdev_err(ndev,
2155 "Hit default case, not handled!\n"
2156 " dropping the packet, opcode = %x\n"
2157 "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",
2158 net_rsp->opcode,
2159 (unsigned long int)tmp[0],
2160 (unsigned long int)tmp[1],
2161 (unsigned long int)tmp[2],
2162 (unsigned long int)tmp[3]);
2166 qdev->rsp_consumer_index++;
2168 if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
2169 qdev->rsp_consumer_index = 0;
2170 qdev->rsp_current = qdev->rsp_q_virt_addr;
2171 } else {
2172 qdev->rsp_current++;
2177 return work_done;
2180 static int ql_poll(struct napi_struct *napi, int budget)
2182 struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi);
2183 struct ql3xxx_port_registers __iomem *port_regs =
2184 qdev->mem_map_registers;
2185 int work_done;
2187 work_done = ql_tx_rx_clean(qdev, budget);
2189 if (work_done < budget && napi_complete_done(napi, work_done)) {
2190 unsigned long flags;
2192 spin_lock_irqsave(&qdev->hw_lock, flags);
2193 ql_update_small_bufq_prod_index(qdev);
2194 ql_update_lrg_bufq_prod_index(qdev);
2195 writel(qdev->rsp_consumer_index,
2196 &port_regs->CommonRegs.rspQConsumerIndex);
2197 spin_unlock_irqrestore(&qdev->hw_lock, flags);
2199 ql_enable_interrupts(qdev);
2201 return work_done;
2204 static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2207 struct net_device *ndev = dev_id;
2208 struct ql3_adapter *qdev = netdev_priv(ndev);
2209 struct ql3xxx_port_registers __iomem *port_regs =
2210 qdev->mem_map_registers;
2211 u32 value;
2212 int handled = 1;
2213 u32 var;
2215 value = ql_read_common_reg_l(qdev,
2216 &port_regs->CommonRegs.ispControlStatus);
2218 if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
2219 spin_lock(&qdev->adapter_lock);
2220 netif_stop_queue(qdev->ndev);
2221 netif_carrier_off(qdev->ndev);
2222 ql_disable_interrupts(qdev);
2223 qdev->port_link_state = LS_DOWN;
2224 set_bit(QL_RESET_ACTIVE, &qdev->flags) ;
2226 if (value & ISP_CONTROL_FE) {
2228 * Chip Fatal Error.
2230 var =
2231 ql_read_page0_reg_l(qdev,
2232 &port_regs->PortFatalErrStatus);
2233 netdev_warn(ndev,
2234 "Resetting chip. PortFatalErrStatus register = 0x%x\n",
2235 var);
2236 set_bit(QL_RESET_START, &qdev->flags) ;
2237 } else {
2239 * Soft Reset Requested.
2241 set_bit(QL_RESET_PER_SCSI, &qdev->flags) ;
2242 netdev_err(ndev,
2243 "Another function issued a reset to the chip. ISR value = %x\n",
2244 value);
2246 queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
2247 spin_unlock(&qdev->adapter_lock);
2248 } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
2249 ql_disable_interrupts(qdev);
2250 if (likely(napi_schedule_prep(&qdev->napi)))
2251 __napi_schedule(&qdev->napi);
2252 } else
2253 return IRQ_NONE;
2255 return IRQ_RETVAL(handled);
2259 * Get the total number of segments needed for the given number of fragments.
2260 * This is necessary because outbound address lists (OAL) will be used when
2261 * more than two frags are given. Each address list has 5 addr/len pairs.
2262 * The 5th pair in each OAL is used to point to the next OAL if more frags
2263 * are coming. That is why the frags:segment count ratio is not linear.
2265 static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags)
2267 if (qdev->device_id == QL3022_DEVICE_ID)
2268 return 1;
2270 if (frags <= 2)
2271 return frags + 1;
2272 else if (frags <= 6)
2273 return frags + 2;
2274 else if (frags <= 10)
2275 return frags + 3;
2276 else if (frags <= 14)
2277 return frags + 4;
2278 else if (frags <= 18)
2279 return frags + 5;
2280 return -1;
2283 static void ql_hw_csum_setup(const struct sk_buff *skb,
2284 struct ob_mac_iocb_req *mac_iocb_ptr)
2286 const struct iphdr *ip = ip_hdr(skb);
2288 mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb);
2289 mac_iocb_ptr->ip_hdr_len = ip->ihl;
2291 if (ip->protocol == IPPROTO_TCP) {
2292 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
2293 OB_3032MAC_IOCB_REQ_IC;
2294 } else {
2295 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
2296 OB_3032MAC_IOCB_REQ_IC;
2302 * Map the buffers for this transmit.
2303 * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
2305 static int ql_send_map(struct ql3_adapter *qdev,
2306 struct ob_mac_iocb_req *mac_iocb_ptr,
2307 struct ql_tx_buf_cb *tx_cb,
2308 struct sk_buff *skb)
2310 struct oal *oal;
2311 struct oal_entry *oal_entry;
2312 int len = skb_headlen(skb);
2313 dma_addr_t map;
2314 int err;
2315 int completed_segs, i;
2316 int seg_cnt, seg = 0;
2317 int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
2319 seg_cnt = tx_cb->seg_count;
2321 * Map the skb buffer first.
2323 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
2325 err = pci_dma_mapping_error(qdev->pdev, map);
2326 if (err) {
2327 netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n",
2328 err);
2330 return NETDEV_TX_BUSY;
2333 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2334 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2335 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2336 oal_entry->len = cpu_to_le32(len);
2337 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2338 dma_unmap_len_set(&tx_cb->map[seg], maplen, len);
2339 seg++;
2341 if (seg_cnt == 1) {
2342 /* Terminate the last segment. */
2343 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
2344 return NETDEV_TX_OK;
2346 oal = tx_cb->oal;
2347 for (completed_segs = 0;
2348 completed_segs < frag_cnt;
2349 completed_segs++, seg++) {
2350 skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
2351 oal_entry++;
2353 * Check for continuation requirements.
2354 * It's strange but necessary.
2355 * Continuation entry points to outbound address list.
2357 if ((seg == 2 && seg_cnt > 3) ||
2358 (seg == 7 && seg_cnt > 8) ||
2359 (seg == 12 && seg_cnt > 13) ||
2360 (seg == 17 && seg_cnt > 18)) {
2361 map = pci_map_single(qdev->pdev, oal,
2362 sizeof(struct oal),
2363 PCI_DMA_TODEVICE);
2365 err = pci_dma_mapping_error(qdev->pdev, map);
2366 if (err) {
2367 netdev_err(qdev->ndev,
2368 "PCI mapping outbound address list with error: %d\n",
2369 err);
2370 goto map_error;
2373 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2374 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2375 oal_entry->len = cpu_to_le32(sizeof(struct oal) |
2376 OAL_CONT_ENTRY);
2377 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2378 dma_unmap_len_set(&tx_cb->map[seg], maplen,
2379 sizeof(struct oal));
2380 oal_entry = (struct oal_entry *)oal;
2381 oal++;
2382 seg++;
2385 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
2386 DMA_TO_DEVICE);
2388 err = dma_mapping_error(&qdev->pdev->dev, map);
2389 if (err) {
2390 netdev_err(qdev->ndev,
2391 "PCI mapping frags failed with error: %d\n",
2392 err);
2393 goto map_error;
2396 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2397 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2398 oal_entry->len = cpu_to_le32(skb_frag_size(frag));
2399 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2400 dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag));
2402 /* Terminate the last segment. */
2403 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
2404 return NETDEV_TX_OK;
2406 map_error:
2407 /* A PCI mapping failed and now we will need to back out
2408 * We need to traverse through the oal's and associated pages which
2409 * have been mapped and now we must unmap them to clean up properly
2412 seg = 1;
2413 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2414 oal = tx_cb->oal;
2415 for (i = 0; i < completed_segs; i++, seg++) {
2416 oal_entry++;
2419 * Check for continuation requirements.
2420 * It's strange but necessary.
2423 if ((seg == 2 && seg_cnt > 3) ||
2424 (seg == 7 && seg_cnt > 8) ||
2425 (seg == 12 && seg_cnt > 13) ||
2426 (seg == 17 && seg_cnt > 18)) {
2427 pci_unmap_single(qdev->pdev,
2428 dma_unmap_addr(&tx_cb->map[seg], mapaddr),
2429 dma_unmap_len(&tx_cb->map[seg], maplen),
2430 PCI_DMA_TODEVICE);
2431 oal++;
2432 seg++;
2435 pci_unmap_page(qdev->pdev,
2436 dma_unmap_addr(&tx_cb->map[seg], mapaddr),
2437 dma_unmap_len(&tx_cb->map[seg], maplen),
2438 PCI_DMA_TODEVICE);
2441 pci_unmap_single(qdev->pdev,
2442 dma_unmap_addr(&tx_cb->map[0], mapaddr),
2443 dma_unmap_addr(&tx_cb->map[0], maplen),
2444 PCI_DMA_TODEVICE);
2446 return NETDEV_TX_BUSY;
2451 * The difference between 3022 and 3032 sends:
2452 * 3022 only supports a simple single segment transmission.
2453 * 3032 supports checksumming and scatter/gather lists (fragments).
2454 * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
2455 * in the IOCB plus a chain of outbound address lists (OAL) that
2456 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
2457 * will be used to point to an OAL when more ALP entries are required.
2458 * The IOCB is always the top of the chain followed by one or more
2459 * OALs (when necessary).
2461 static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
2462 struct net_device *ndev)
2464 struct ql3_adapter *qdev = netdev_priv(ndev);
2465 struct ql3xxx_port_registers __iomem *port_regs =
2466 qdev->mem_map_registers;
2467 struct ql_tx_buf_cb *tx_cb;
2468 u32 tot_len = skb->len;
2469 struct ob_mac_iocb_req *mac_iocb_ptr;
2471 if (unlikely(atomic_read(&qdev->tx_count) < 2))
2472 return NETDEV_TX_BUSY;
2474 tx_cb = &qdev->tx_buf[qdev->req_producer_index];
2475 tx_cb->seg_count = ql_get_seg_count(qdev,
2476 skb_shinfo(skb)->nr_frags);
2477 if (tx_cb->seg_count == -1) {
2478 netdev_err(ndev, "%s: invalid segment count!\n", __func__);
2479 return NETDEV_TX_OK;
2482 mac_iocb_ptr = tx_cb->queue_entry;
2483 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
2484 mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
2485 mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X;
2486 mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2487 mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2488 mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
2489 tx_cb->skb = skb;
2490 if (qdev->device_id == QL3032_DEVICE_ID &&
2491 skb->ip_summed == CHECKSUM_PARTIAL)
2492 ql_hw_csum_setup(skb, mac_iocb_ptr);
2494 if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) {
2495 netdev_err(ndev, "%s: Could not map the segments!\n", __func__);
2496 return NETDEV_TX_BUSY;
2499 wmb();
2500 qdev->req_producer_index++;
2501 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
2502 qdev->req_producer_index = 0;
2503 wmb();
2504 ql_write_common_reg_l(qdev,
2505 &port_regs->CommonRegs.reqQProducerIndex,
2506 qdev->req_producer_index);
2508 netif_printk(qdev, tx_queued, KERN_DEBUG, ndev,
2509 "tx queued, slot %d, len %d\n",
2510 qdev->req_producer_index, skb->len);
2512 atomic_dec(&qdev->tx_count);
2513 return NETDEV_TX_OK;
2516 static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2518 qdev->req_q_size =
2519 (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
2521 qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
2523 /* The barrier is required to ensure request and response queue
2524 * addr writes to the registers.
2526 wmb();
2528 qdev->req_q_virt_addr =
2529 pci_alloc_consistent(qdev->pdev,
2530 (size_t) qdev->req_q_size,
2531 &qdev->req_q_phy_addr);
2533 if ((qdev->req_q_virt_addr == NULL) ||
2534 LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
2535 netdev_err(qdev->ndev, "reqQ failed\n");
2536 return -ENOMEM;
2539 qdev->rsp_q_virt_addr =
2540 pci_alloc_consistent(qdev->pdev,
2541 (size_t) qdev->rsp_q_size,
2542 &qdev->rsp_q_phy_addr);
2544 if ((qdev->rsp_q_virt_addr == NULL) ||
2545 LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
2546 netdev_err(qdev->ndev, "rspQ allocation failed\n");
2547 pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
2548 qdev->req_q_virt_addr,
2549 qdev->req_q_phy_addr);
2550 return -ENOMEM;
2553 set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
2555 return 0;
2558 static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
2560 if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) {
2561 netdev_info(qdev->ndev, "Already done\n");
2562 return;
2565 pci_free_consistent(qdev->pdev,
2566 qdev->req_q_size,
2567 qdev->req_q_virt_addr, qdev->req_q_phy_addr);
2569 qdev->req_q_virt_addr = NULL;
2571 pci_free_consistent(qdev->pdev,
2572 qdev->rsp_q_size,
2573 qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
2575 qdev->rsp_q_virt_addr = NULL;
2577 clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
2580 static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2582 /* Create Large Buffer Queue */
2583 qdev->lrg_buf_q_size =
2584 qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
2585 if (qdev->lrg_buf_q_size < PAGE_SIZE)
2586 qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
2587 else
2588 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
2590 qdev->lrg_buf = kmalloc_array(qdev->num_large_buffers,
2591 sizeof(struct ql_rcv_buf_cb),
2592 GFP_KERNEL);
2593 if (qdev->lrg_buf == NULL)
2594 return -ENOMEM;
2596 qdev->lrg_buf_q_alloc_virt_addr =
2597 pci_alloc_consistent(qdev->pdev,
2598 qdev->lrg_buf_q_alloc_size,
2599 &qdev->lrg_buf_q_alloc_phy_addr);
2601 if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
2602 netdev_err(qdev->ndev, "lBufQ failed\n");
2603 return -ENOMEM;
2605 qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
2606 qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
2608 /* Create Small Buffer Queue */
2609 qdev->small_buf_q_size =
2610 NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
2611 if (qdev->small_buf_q_size < PAGE_SIZE)
2612 qdev->small_buf_q_alloc_size = PAGE_SIZE;
2613 else
2614 qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
2616 qdev->small_buf_q_alloc_virt_addr =
2617 pci_alloc_consistent(qdev->pdev,
2618 qdev->small_buf_q_alloc_size,
2619 &qdev->small_buf_q_alloc_phy_addr);
2621 if (qdev->small_buf_q_alloc_virt_addr == NULL) {
2622 netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n");
2623 pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
2624 qdev->lrg_buf_q_alloc_virt_addr,
2625 qdev->lrg_buf_q_alloc_phy_addr);
2626 return -ENOMEM;
2629 qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
2630 qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
2631 set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
2632 return 0;
2635 static void ql_free_buffer_queues(struct ql3_adapter *qdev)
2637 if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) {
2638 netdev_info(qdev->ndev, "Already done\n");
2639 return;
2641 kfree(qdev->lrg_buf);
2642 pci_free_consistent(qdev->pdev,
2643 qdev->lrg_buf_q_alloc_size,
2644 qdev->lrg_buf_q_alloc_virt_addr,
2645 qdev->lrg_buf_q_alloc_phy_addr);
2647 qdev->lrg_buf_q_virt_addr = NULL;
2649 pci_free_consistent(qdev->pdev,
2650 qdev->small_buf_q_alloc_size,
2651 qdev->small_buf_q_alloc_virt_addr,
2652 qdev->small_buf_q_alloc_phy_addr);
2654 qdev->small_buf_q_virt_addr = NULL;
2656 clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
2659 static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
2661 int i;
2662 struct bufq_addr_element *small_buf_q_entry;
2664 /* Currently we allocate on one of memory and use it for smallbuffers */
2665 qdev->small_buf_total_size =
2666 (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
2667 QL_SMALL_BUFFER_SIZE);
2669 qdev->small_buf_virt_addr =
2670 pci_alloc_consistent(qdev->pdev,
2671 qdev->small_buf_total_size,
2672 &qdev->small_buf_phy_addr);
2674 if (qdev->small_buf_virt_addr == NULL) {
2675 netdev_err(qdev->ndev, "Failed to get small buffer memory\n");
2676 return -ENOMEM;
2679 qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
2680 qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
2682 small_buf_q_entry = qdev->small_buf_q_virt_addr;
2684 /* Initialize the small buffer queue. */
2685 for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
2686 small_buf_q_entry->addr_high =
2687 cpu_to_le32(qdev->small_buf_phy_addr_high);
2688 small_buf_q_entry->addr_low =
2689 cpu_to_le32(qdev->small_buf_phy_addr_low +
2690 (i * QL_SMALL_BUFFER_SIZE));
2691 small_buf_q_entry++;
2693 qdev->small_buf_index = 0;
2694 set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags);
2695 return 0;
2698 static void ql_free_small_buffers(struct ql3_adapter *qdev)
2700 if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) {
2701 netdev_info(qdev->ndev, "Already done\n");
2702 return;
2704 if (qdev->small_buf_virt_addr != NULL) {
2705 pci_free_consistent(qdev->pdev,
2706 qdev->small_buf_total_size,
2707 qdev->small_buf_virt_addr,
2708 qdev->small_buf_phy_addr);
2710 qdev->small_buf_virt_addr = NULL;
2714 static void ql_free_large_buffers(struct ql3_adapter *qdev)
2716 int i = 0;
2717 struct ql_rcv_buf_cb *lrg_buf_cb;
2719 for (i = 0; i < qdev->num_large_buffers; i++) {
2720 lrg_buf_cb = &qdev->lrg_buf[i];
2721 if (lrg_buf_cb->skb) {
2722 dev_kfree_skb(lrg_buf_cb->skb);
2723 pci_unmap_single(qdev->pdev,
2724 dma_unmap_addr(lrg_buf_cb, mapaddr),
2725 dma_unmap_len(lrg_buf_cb, maplen),
2726 PCI_DMA_FROMDEVICE);
2727 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2728 } else {
2729 break;
2734 static void ql_init_large_buffers(struct ql3_adapter *qdev)
2736 int i;
2737 struct ql_rcv_buf_cb *lrg_buf_cb;
2738 struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
2740 for (i = 0; i < qdev->num_large_buffers; i++) {
2741 lrg_buf_cb = &qdev->lrg_buf[i];
2742 buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
2743 buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
2744 buf_addr_ele++;
2746 qdev->lrg_buf_index = 0;
2747 qdev->lrg_buf_skb_check = 0;
2750 static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2752 int i;
2753 struct ql_rcv_buf_cb *lrg_buf_cb;
2754 struct sk_buff *skb;
2755 dma_addr_t map;
2756 int err;
2758 for (i = 0; i < qdev->num_large_buffers; i++) {
2759 lrg_buf_cb = &qdev->lrg_buf[i];
2760 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2762 skb = netdev_alloc_skb(qdev->ndev,
2763 qdev->lrg_buffer_len);
2764 if (unlikely(!skb)) {
2765 /* Better luck next round */
2766 netdev_err(qdev->ndev,
2767 "large buff alloc failed for %d bytes at index %d\n",
2768 qdev->lrg_buffer_len * 2, i);
2769 ql_free_large_buffers(qdev);
2770 return -ENOMEM;
2771 } else {
2772 lrg_buf_cb->index = i;
2774 * We save some space to copy the ethhdr from first
2775 * buffer
2777 skb_reserve(skb, QL_HEADER_SPACE);
2778 map = pci_map_single(qdev->pdev,
2779 skb->data,
2780 qdev->lrg_buffer_len -
2781 QL_HEADER_SPACE,
2782 PCI_DMA_FROMDEVICE);
2784 err = pci_dma_mapping_error(qdev->pdev, map);
2785 if (err) {
2786 netdev_err(qdev->ndev,
2787 "PCI mapping failed with error: %d\n",
2788 err);
2789 dev_kfree_skb_irq(skb);
2790 ql_free_large_buffers(qdev);
2791 return -ENOMEM;
2794 lrg_buf_cb->skb = skb;
2795 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
2796 dma_unmap_len_set(lrg_buf_cb, maplen,
2797 qdev->lrg_buffer_len -
2798 QL_HEADER_SPACE);
2799 lrg_buf_cb->buf_phy_addr_low =
2800 cpu_to_le32(LS_64BITS(map));
2801 lrg_buf_cb->buf_phy_addr_high =
2802 cpu_to_le32(MS_64BITS(map));
2805 return 0;
2808 static void ql_free_send_free_list(struct ql3_adapter *qdev)
2810 struct ql_tx_buf_cb *tx_cb;
2811 int i;
2813 tx_cb = &qdev->tx_buf[0];
2814 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2815 kfree(tx_cb->oal);
2816 tx_cb->oal = NULL;
2817 tx_cb++;
2821 static int ql_create_send_free_list(struct ql3_adapter *qdev)
2823 struct ql_tx_buf_cb *tx_cb;
2824 int i;
2825 struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr;
2827 /* Create free list of transmit buffers */
2828 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2830 tx_cb = &qdev->tx_buf[i];
2831 tx_cb->skb = NULL;
2832 tx_cb->queue_entry = req_q_curr;
2833 req_q_curr++;
2834 tx_cb->oal = kmalloc(512, GFP_KERNEL);
2835 if (tx_cb->oal == NULL)
2836 return -ENOMEM;
2838 return 0;
2841 static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2843 if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
2844 qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
2845 qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
2846 } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
2848 * Bigger buffers, so less of them.
2850 qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
2851 qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
2852 } else {
2853 netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n",
2854 qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE);
2855 return -ENOMEM;
2857 qdev->num_large_buffers =
2858 qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
2859 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
2860 qdev->max_frame_size =
2861 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
2864 * First allocate a page of shared memory and use it for shadow
2865 * locations of Network Request Queue Consumer Address Register and
2866 * Network Completion Queue Producer Index Register
2868 qdev->shadow_reg_virt_addr =
2869 pci_alloc_consistent(qdev->pdev,
2870 PAGE_SIZE, &qdev->shadow_reg_phy_addr);
2872 if (qdev->shadow_reg_virt_addr != NULL) {
2873 qdev->preq_consumer_index = qdev->shadow_reg_virt_addr;
2874 qdev->req_consumer_index_phy_addr_high =
2875 MS_64BITS(qdev->shadow_reg_phy_addr);
2876 qdev->req_consumer_index_phy_addr_low =
2877 LS_64BITS(qdev->shadow_reg_phy_addr);
2879 qdev->prsp_producer_index =
2880 (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8);
2881 qdev->rsp_producer_index_phy_addr_high =
2882 qdev->req_consumer_index_phy_addr_high;
2883 qdev->rsp_producer_index_phy_addr_low =
2884 qdev->req_consumer_index_phy_addr_low + 8;
2885 } else {
2886 netdev_err(qdev->ndev, "shadowReg Alloc failed\n");
2887 return -ENOMEM;
2890 if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
2891 netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n");
2892 goto err_req_rsp;
2895 if (ql_alloc_buffer_queues(qdev) != 0) {
2896 netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n");
2897 goto err_buffer_queues;
2900 if (ql_alloc_small_buffers(qdev) != 0) {
2901 netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n");
2902 goto err_small_buffers;
2905 if (ql_alloc_large_buffers(qdev) != 0) {
2906 netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n");
2907 goto err_small_buffers;
2910 /* Initialize the large buffer queue. */
2911 ql_init_large_buffers(qdev);
2912 if (ql_create_send_free_list(qdev))
2913 goto err_free_list;
2915 qdev->rsp_current = qdev->rsp_q_virt_addr;
2917 return 0;
2918 err_free_list:
2919 ql_free_send_free_list(qdev);
2920 err_small_buffers:
2921 ql_free_buffer_queues(qdev);
2922 err_buffer_queues:
2923 ql_free_net_req_rsp_queues(qdev);
2924 err_req_rsp:
2925 pci_free_consistent(qdev->pdev,
2926 PAGE_SIZE,
2927 qdev->shadow_reg_virt_addr,
2928 qdev->shadow_reg_phy_addr);
2930 return -ENOMEM;
2933 static void ql_free_mem_resources(struct ql3_adapter *qdev)
2935 ql_free_send_free_list(qdev);
2936 ql_free_large_buffers(qdev);
2937 ql_free_small_buffers(qdev);
2938 ql_free_buffer_queues(qdev);
2939 ql_free_net_req_rsp_queues(qdev);
2940 if (qdev->shadow_reg_virt_addr != NULL) {
2941 pci_free_consistent(qdev->pdev,
2942 PAGE_SIZE,
2943 qdev->shadow_reg_virt_addr,
2944 qdev->shadow_reg_phy_addr);
2945 qdev->shadow_reg_virt_addr = NULL;
2949 static int ql_init_misc_registers(struct ql3_adapter *qdev)
2951 struct ql3xxx_local_ram_registers __iomem *local_ram =
2952 (void __iomem *)qdev->mem_map_registers;
2954 if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
2955 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
2956 2) << 4))
2957 return -1;
2959 ql_write_page2_reg(qdev,
2960 &local_ram->bufletSize, qdev->nvram_data.bufletSize);
2962 ql_write_page2_reg(qdev,
2963 &local_ram->maxBufletCount,
2964 qdev->nvram_data.bufletCount);
2966 ql_write_page2_reg(qdev,
2967 &local_ram->freeBufletThresholdLow,
2968 (qdev->nvram_data.tcpWindowThreshold25 << 16) |
2969 (qdev->nvram_data.tcpWindowThreshold0));
2971 ql_write_page2_reg(qdev,
2972 &local_ram->freeBufletThresholdHigh,
2973 qdev->nvram_data.tcpWindowThreshold50);
2975 ql_write_page2_reg(qdev,
2976 &local_ram->ipHashTableBase,
2977 (qdev->nvram_data.ipHashTableBaseHi << 16) |
2978 qdev->nvram_data.ipHashTableBaseLo);
2979 ql_write_page2_reg(qdev,
2980 &local_ram->ipHashTableCount,
2981 qdev->nvram_data.ipHashTableSize);
2982 ql_write_page2_reg(qdev,
2983 &local_ram->tcpHashTableBase,
2984 (qdev->nvram_data.tcpHashTableBaseHi << 16) |
2985 qdev->nvram_data.tcpHashTableBaseLo);
2986 ql_write_page2_reg(qdev,
2987 &local_ram->tcpHashTableCount,
2988 qdev->nvram_data.tcpHashTableSize);
2989 ql_write_page2_reg(qdev,
2990 &local_ram->ncbBase,
2991 (qdev->nvram_data.ncbTableBaseHi << 16) |
2992 qdev->nvram_data.ncbTableBaseLo);
2993 ql_write_page2_reg(qdev,
2994 &local_ram->maxNcbCount,
2995 qdev->nvram_data.ncbTableSize);
2996 ql_write_page2_reg(qdev,
2997 &local_ram->drbBase,
2998 (qdev->nvram_data.drbTableBaseHi << 16) |
2999 qdev->nvram_data.drbTableBaseLo);
3000 ql_write_page2_reg(qdev,
3001 &local_ram->maxDrbCount,
3002 qdev->nvram_data.drbTableSize);
3003 ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
3004 return 0;
3007 static int ql_adapter_initialize(struct ql3_adapter *qdev)
3009 u32 value;
3010 struct ql3xxx_port_registers __iomem *port_regs =
3011 qdev->mem_map_registers;
3012 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
3013 struct ql3xxx_host_memory_registers __iomem *hmem_regs =
3014 (void __iomem *)port_regs;
3015 u32 delay = 10;
3016 int status = 0;
3018 if (ql_mii_setup(qdev))
3019 return -1;
3021 /* Bring out PHY out of reset */
3022 ql_write_common_reg(qdev, spir,
3023 (ISP_SERIAL_PORT_IF_WE |
3024 (ISP_SERIAL_PORT_IF_WE << 16)));
3025 /* Give the PHY time to come out of reset. */
3026 mdelay(100);
3027 qdev->port_link_state = LS_DOWN;
3028 netif_carrier_off(qdev->ndev);
3030 /* V2 chip fix for ARS-39168. */
3031 ql_write_common_reg(qdev, spir,
3032 (ISP_SERIAL_PORT_IF_SDE |
3033 (ISP_SERIAL_PORT_IF_SDE << 16)));
3035 /* Request Queue Registers */
3036 *((u32 *)(qdev->preq_consumer_index)) = 0;
3037 atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES);
3038 qdev->req_producer_index = 0;
3040 ql_write_page1_reg(qdev,
3041 &hmem_regs->reqConsumerIndexAddrHigh,
3042 qdev->req_consumer_index_phy_addr_high);
3043 ql_write_page1_reg(qdev,
3044 &hmem_regs->reqConsumerIndexAddrLow,
3045 qdev->req_consumer_index_phy_addr_low);
3047 ql_write_page1_reg(qdev,
3048 &hmem_regs->reqBaseAddrHigh,
3049 MS_64BITS(qdev->req_q_phy_addr));
3050 ql_write_page1_reg(qdev,
3051 &hmem_regs->reqBaseAddrLow,
3052 LS_64BITS(qdev->req_q_phy_addr));
3053 ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
3055 /* Response Queue Registers */
3056 *((__le16 *) (qdev->prsp_producer_index)) = 0;
3057 qdev->rsp_consumer_index = 0;
3058 qdev->rsp_current = qdev->rsp_q_virt_addr;
3060 ql_write_page1_reg(qdev,
3061 &hmem_regs->rspProducerIndexAddrHigh,
3062 qdev->rsp_producer_index_phy_addr_high);
3064 ql_write_page1_reg(qdev,
3065 &hmem_regs->rspProducerIndexAddrLow,
3066 qdev->rsp_producer_index_phy_addr_low);
3068 ql_write_page1_reg(qdev,
3069 &hmem_regs->rspBaseAddrHigh,
3070 MS_64BITS(qdev->rsp_q_phy_addr));
3072 ql_write_page1_reg(qdev,
3073 &hmem_regs->rspBaseAddrLow,
3074 LS_64BITS(qdev->rsp_q_phy_addr));
3076 ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
3078 /* Large Buffer Queue */
3079 ql_write_page1_reg(qdev,
3080 &hmem_regs->rxLargeQBaseAddrHigh,
3081 MS_64BITS(qdev->lrg_buf_q_phy_addr));
3083 ql_write_page1_reg(qdev,
3084 &hmem_regs->rxLargeQBaseAddrLow,
3085 LS_64BITS(qdev->lrg_buf_q_phy_addr));
3087 ql_write_page1_reg(qdev,
3088 &hmem_regs->rxLargeQLength,
3089 qdev->num_lbufq_entries);
3091 ql_write_page1_reg(qdev,
3092 &hmem_regs->rxLargeBufferLength,
3093 qdev->lrg_buffer_len);
3095 /* Small Buffer Queue */
3096 ql_write_page1_reg(qdev,
3097 &hmem_regs->rxSmallQBaseAddrHigh,
3098 MS_64BITS(qdev->small_buf_q_phy_addr));
3100 ql_write_page1_reg(qdev,
3101 &hmem_regs->rxSmallQBaseAddrLow,
3102 LS_64BITS(qdev->small_buf_q_phy_addr));
3104 ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
3105 ql_write_page1_reg(qdev,
3106 &hmem_regs->rxSmallBufferLength,
3107 QL_SMALL_BUFFER_SIZE);
3109 qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
3110 qdev->small_buf_release_cnt = 8;
3111 qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
3112 qdev->lrg_buf_release_cnt = 8;
3113 qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr;
3114 qdev->small_buf_index = 0;
3115 qdev->lrg_buf_index = 0;
3116 qdev->lrg_buf_free_count = 0;
3117 qdev->lrg_buf_free_head = NULL;
3118 qdev->lrg_buf_free_tail = NULL;
3120 ql_write_common_reg(qdev,
3121 &port_regs->CommonRegs.
3122 rxSmallQProducerIndex,
3123 qdev->small_buf_q_producer_index);
3124 ql_write_common_reg(qdev,
3125 &port_regs->CommonRegs.
3126 rxLargeQProducerIndex,
3127 qdev->lrg_buf_q_producer_index);
3130 * Find out if the chip has already been initialized. If it has, then
3131 * we skip some of the initialization.
3133 clear_bit(QL_LINK_MASTER, &qdev->flags);
3134 value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3135 if ((value & PORT_STATUS_IC) == 0) {
3137 /* Chip has not been configured yet, so let it rip. */
3138 if (ql_init_misc_registers(qdev)) {
3139 status = -1;
3140 goto out;
3143 value = qdev->nvram_data.tcpMaxWindowSize;
3144 ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
3146 value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
3148 if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
3149 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
3150 * 2) << 13)) {
3151 status = -1;
3152 goto out;
3154 ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
3155 ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
3156 (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) <<
3157 16) | (INTERNAL_CHIP_SD |
3158 INTERNAL_CHIP_WE)));
3159 ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
3162 if (qdev->mac_index)
3163 ql_write_page0_reg(qdev,
3164 &port_regs->mac1MaxFrameLengthReg,
3165 qdev->max_frame_size);
3166 else
3167 ql_write_page0_reg(qdev,
3168 &port_regs->mac0MaxFrameLengthReg,
3169 qdev->max_frame_size);
3171 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
3172 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
3173 2) << 7)) {
3174 status = -1;
3175 goto out;
3178 PHY_Setup(qdev);
3179 ql_init_scan_mode(qdev);
3180 ql_get_phy_owner(qdev);
3182 /* Load the MAC Configuration */
3184 /* Program lower 32 bits of the MAC address */
3185 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3186 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
3187 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3188 ((qdev->ndev->dev_addr[2] << 24)
3189 | (qdev->ndev->dev_addr[3] << 16)
3190 | (qdev->ndev->dev_addr[4] << 8)
3191 | qdev->ndev->dev_addr[5]));
3193 /* Program top 16 bits of the MAC address */
3194 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3195 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
3196 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3197 ((qdev->ndev->dev_addr[0] << 8)
3198 | qdev->ndev->dev_addr[1]));
3200 /* Enable Primary MAC */
3201 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3202 ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) |
3203 MAC_ADDR_INDIRECT_PTR_REG_PE));
3205 /* Clear Primary and Secondary IP addresses */
3206 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3207 ((IP_ADDR_INDEX_REG_MASK << 16) |
3208 (qdev->mac_index << 2)));
3209 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3211 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3212 ((IP_ADDR_INDEX_REG_MASK << 16) |
3213 ((qdev->mac_index << 2) + 1)));
3214 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3216 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
3218 /* Indicate Configuration Complete */
3219 ql_write_page0_reg(qdev,
3220 &port_regs->portControl,
3221 ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC));
3223 do {
3224 value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3225 if (value & PORT_STATUS_IC)
3226 break;
3227 spin_unlock_irq(&qdev->hw_lock);
3228 msleep(500);
3229 spin_lock_irq(&qdev->hw_lock);
3230 } while (--delay);
3232 if (delay == 0) {
3233 netdev_err(qdev->ndev, "Hw Initialization timeout\n");
3234 status = -1;
3235 goto out;
3238 /* Enable Ethernet Function */
3239 if (qdev->device_id == QL3032_DEVICE_ID) {
3240 value =
3241 (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
3242 QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 |
3243 QL3032_PORT_CONTROL_ET);
3244 ql_write_page0_reg(qdev, &port_regs->functionControl,
3245 ((value << 16) | value));
3246 } else {
3247 value =
3248 (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
3249 PORT_CONTROL_HH);
3250 ql_write_page0_reg(qdev, &port_regs->portControl,
3251 ((value << 16) | value));
3255 out:
3256 return status;
3260 * Caller holds hw_lock.
3262 static int ql_adapter_reset(struct ql3_adapter *qdev)
3264 struct ql3xxx_port_registers __iomem *port_regs =
3265 qdev->mem_map_registers;
3266 int status = 0;
3267 u16 value;
3268 int max_wait_time;
3270 set_bit(QL_RESET_ACTIVE, &qdev->flags);
3271 clear_bit(QL_RESET_DONE, &qdev->flags);
3274 * Issue soft reset to chip.
3276 netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n");
3277 ql_write_common_reg(qdev,
3278 &port_regs->CommonRegs.ispControlStatus,
3279 ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
3281 /* Wait 3 seconds for reset to complete. */
3282 netdev_printk(KERN_DEBUG, qdev->ndev,
3283 "Wait 10 milliseconds for reset to complete\n");
3285 /* Wait until the firmware tells us the Soft Reset is done */
3286 max_wait_time = 5;
3287 do {
3288 value =
3289 ql_read_common_reg(qdev,
3290 &port_regs->CommonRegs.ispControlStatus);
3291 if ((value & ISP_CONTROL_SR) == 0)
3292 break;
3294 ssleep(1);
3295 } while ((--max_wait_time));
3298 * Also, make sure that the Network Reset Interrupt bit has been
3299 * cleared after the soft reset has taken place.
3301 value =
3302 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
3303 if (value & ISP_CONTROL_RI) {
3304 netdev_printk(KERN_DEBUG, qdev->ndev,
3305 "clearing RI after reset\n");
3306 ql_write_common_reg(qdev,
3307 &port_regs->CommonRegs.
3308 ispControlStatus,
3309 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3312 if (max_wait_time == 0) {
3313 /* Issue Force Soft Reset */
3314 ql_write_common_reg(qdev,
3315 &port_regs->CommonRegs.
3316 ispControlStatus,
3317 ((ISP_CONTROL_FSR << 16) |
3318 ISP_CONTROL_FSR));
3320 * Wait until the firmware tells us the Force Soft Reset is
3321 * done
3323 max_wait_time = 5;
3324 do {
3325 value = ql_read_common_reg(qdev,
3326 &port_regs->CommonRegs.
3327 ispControlStatus);
3328 if ((value & ISP_CONTROL_FSR) == 0)
3329 break;
3330 ssleep(1);
3331 } while ((--max_wait_time));
3333 if (max_wait_time == 0)
3334 status = 1;
3336 clear_bit(QL_RESET_ACTIVE, &qdev->flags);
3337 set_bit(QL_RESET_DONE, &qdev->flags);
3338 return status;
3341 static void ql_set_mac_info(struct ql3_adapter *qdev)
3343 struct ql3xxx_port_registers __iomem *port_regs =
3344 qdev->mem_map_registers;
3345 u32 value, port_status;
3346 u8 func_number;
3348 /* Get the function number */
3349 value =
3350 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
3351 func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK);
3352 port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
3353 switch (value & ISP_CONTROL_FN_MASK) {
3354 case ISP_CONTROL_FN0_NET:
3355 qdev->mac_index = 0;
3356 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3357 qdev->mb_bit_mask = FN0_MA_BITS_MASK;
3358 qdev->PHYAddr = PORT0_PHY_ADDRESS;
3359 if (port_status & PORT_STATUS_SM0)
3360 set_bit(QL_LINK_OPTICAL, &qdev->flags);
3361 else
3362 clear_bit(QL_LINK_OPTICAL, &qdev->flags);
3363 break;
3365 case ISP_CONTROL_FN1_NET:
3366 qdev->mac_index = 1;
3367 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3368 qdev->mb_bit_mask = FN1_MA_BITS_MASK;
3369 qdev->PHYAddr = PORT1_PHY_ADDRESS;
3370 if (port_status & PORT_STATUS_SM1)
3371 set_bit(QL_LINK_OPTICAL, &qdev->flags);
3372 else
3373 clear_bit(QL_LINK_OPTICAL, &qdev->flags);
3374 break;
3376 case ISP_CONTROL_FN0_SCSI:
3377 case ISP_CONTROL_FN1_SCSI:
3378 default:
3379 netdev_printk(KERN_DEBUG, qdev->ndev,
3380 "Invalid function number, ispControlStatus = 0x%x\n",
3381 value);
3382 break;
3384 qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8;
3387 static void ql_display_dev_info(struct net_device *ndev)
3389 struct ql3_adapter *qdev = netdev_priv(ndev);
3390 struct pci_dev *pdev = qdev->pdev;
3392 netdev_info(ndev,
3393 "%s Adapter %d RevisionID %d found %s on PCI slot %d\n",
3394 DRV_NAME, qdev->index, qdev->chip_rev_id,
3395 qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022",
3396 qdev->pci_slot);
3397 netdev_info(ndev, "%s Interface\n",
3398 test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER");
3401 * Print PCI bus width/type.
3403 netdev_info(ndev, "Bus interface is %s %s\n",
3404 ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
3405 ((qdev->pci_x) ? "PCI-X" : "PCI"));
3407 netdev_info(ndev, "mem IO base address adjusted = 0x%p\n",
3408 qdev->mem_map_registers);
3409 netdev_info(ndev, "Interrupt number = %d\n", pdev->irq);
3411 netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr);
3414 static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
3416 struct net_device *ndev = qdev->ndev;
3417 int retval = 0;
3419 netif_stop_queue(ndev);
3420 netif_carrier_off(ndev);
3422 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3423 clear_bit(QL_LINK_MASTER, &qdev->flags);
3425 ql_disable_interrupts(qdev);
3427 free_irq(qdev->pdev->irq, ndev);
3429 if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3430 netdev_info(qdev->ndev, "calling pci_disable_msi()\n");
3431 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3432 pci_disable_msi(qdev->pdev);
3435 del_timer_sync(&qdev->adapter_timer);
3437 napi_disable(&qdev->napi);
3439 if (do_reset) {
3440 int soft_reset;
3441 unsigned long hw_flags;
3443 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3444 if (ql_wait_for_drvr_lock(qdev)) {
3445 soft_reset = ql_adapter_reset(qdev);
3446 if (soft_reset) {
3447 netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n",
3448 qdev->index);
3450 netdev_err(ndev,
3451 "Releasing driver lock via chip reset\n");
3452 } else {
3453 netdev_err(ndev,
3454 "Could not acquire driver lock to do reset!\n");
3455 retval = -1;
3457 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3459 ql_free_mem_resources(qdev);
3460 return retval;
3463 static int ql_adapter_up(struct ql3_adapter *qdev)
3465 struct net_device *ndev = qdev->ndev;
3466 int err;
3467 unsigned long irq_flags = IRQF_SHARED;
3468 unsigned long hw_flags;
3470 if (ql_alloc_mem_resources(qdev)) {
3471 netdev_err(ndev, "Unable to allocate buffers\n");
3472 return -ENOMEM;
3475 if (qdev->msi) {
3476 if (pci_enable_msi(qdev->pdev)) {
3477 netdev_err(ndev,
3478 "User requested MSI, but MSI failed to initialize. Continuing without MSI.\n");
3479 qdev->msi = 0;
3480 } else {
3481 netdev_info(ndev, "MSI Enabled...\n");
3482 set_bit(QL_MSI_ENABLED, &qdev->flags);
3483 irq_flags &= ~IRQF_SHARED;
3487 err = request_irq(qdev->pdev->irq, ql3xxx_isr,
3488 irq_flags, ndev->name, ndev);
3489 if (err) {
3490 netdev_err(ndev,
3491 "Failed to reserve interrupt %d - already in use\n",
3492 qdev->pdev->irq);
3493 goto err_irq;
3496 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3498 err = ql_wait_for_drvr_lock(qdev);
3499 if (err) {
3500 err = ql_adapter_initialize(qdev);
3501 if (err) {
3502 netdev_err(ndev, "Unable to initialize adapter\n");
3503 goto err_init;
3505 netdev_err(ndev, "Releasing driver lock\n");
3506 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3507 } else {
3508 netdev_err(ndev, "Could not acquire driver lock\n");
3509 goto err_lock;
3512 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3514 set_bit(QL_ADAPTER_UP, &qdev->flags);
3516 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
3518 napi_enable(&qdev->napi);
3519 ql_enable_interrupts(qdev);
3520 return 0;
3522 err_init:
3523 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3524 err_lock:
3525 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3526 free_irq(qdev->pdev->irq, ndev);
3527 err_irq:
3528 if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3529 netdev_info(ndev, "calling pci_disable_msi()\n");
3530 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3531 pci_disable_msi(qdev->pdev);
3533 return err;
3536 static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
3538 if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) {
3539 netdev_err(qdev->ndev,
3540 "Driver up/down cycle failed, closing device\n");
3541 rtnl_lock();
3542 dev_close(qdev->ndev);
3543 rtnl_unlock();
3544 return -1;
3546 return 0;
3549 static int ql3xxx_close(struct net_device *ndev)
3551 struct ql3_adapter *qdev = netdev_priv(ndev);
3554 * Wait for device to recover from a reset.
3555 * (Rarely happens, but possible.)
3557 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3558 msleep(50);
3560 ql_adapter_down(qdev, QL_DO_RESET);
3561 return 0;
3564 static int ql3xxx_open(struct net_device *ndev)
3566 struct ql3_adapter *qdev = netdev_priv(ndev);
3567 return ql_adapter_up(qdev);
3570 static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
3572 struct ql3_adapter *qdev = netdev_priv(ndev);
3573 struct ql3xxx_port_registers __iomem *port_regs =
3574 qdev->mem_map_registers;
3575 struct sockaddr *addr = p;
3576 unsigned long hw_flags;
3578 if (netif_running(ndev))
3579 return -EBUSY;
3581 if (!is_valid_ether_addr(addr->sa_data))
3582 return -EADDRNOTAVAIL;
3584 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3586 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3587 /* Program lower 32 bits of the MAC address */
3588 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3589 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
3590 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3591 ((ndev->dev_addr[2] << 24) | (ndev->
3592 dev_addr[3] << 16) |
3593 (ndev->dev_addr[4] << 8) | ndev->dev_addr[5]));
3595 /* Program top 16 bits of the MAC address */
3596 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3597 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
3598 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3599 ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1]));
3600 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3602 return 0;
3605 static void ql3xxx_tx_timeout(struct net_device *ndev, unsigned int txqueue)
3607 struct ql3_adapter *qdev = netdev_priv(ndev);
3609 netdev_err(ndev, "Resetting...\n");
3611 * Stop the queues, we've got a problem.
3613 netif_stop_queue(ndev);
3616 * Wake up the worker to process this event.
3618 queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
3621 static void ql_reset_work(struct work_struct *work)
3623 struct ql3_adapter *qdev =
3624 container_of(work, struct ql3_adapter, reset_work.work);
3625 struct net_device *ndev = qdev->ndev;
3626 u32 value;
3627 struct ql_tx_buf_cb *tx_cb;
3628 int max_wait_time, i;
3629 struct ql3xxx_port_registers __iomem *port_regs =
3630 qdev->mem_map_registers;
3631 unsigned long hw_flags;
3633 if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) {
3634 clear_bit(QL_LINK_MASTER, &qdev->flags);
3637 * Loop through the active list and return the skb.
3639 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
3640 int j;
3641 tx_cb = &qdev->tx_buf[i];
3642 if (tx_cb->skb) {
3643 netdev_printk(KERN_DEBUG, ndev,
3644 "Freeing lost SKB\n");
3645 pci_unmap_single(qdev->pdev,
3646 dma_unmap_addr(&tx_cb->map[0],
3647 mapaddr),
3648 dma_unmap_len(&tx_cb->map[0], maplen),
3649 PCI_DMA_TODEVICE);
3650 for (j = 1; j < tx_cb->seg_count; j++) {
3651 pci_unmap_page(qdev->pdev,
3652 dma_unmap_addr(&tx_cb->map[j],
3653 mapaddr),
3654 dma_unmap_len(&tx_cb->map[j],
3655 maplen),
3656 PCI_DMA_TODEVICE);
3658 dev_kfree_skb(tx_cb->skb);
3659 tx_cb->skb = NULL;
3663 netdev_err(ndev, "Clearing NRI after reset\n");
3664 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3665 ql_write_common_reg(qdev,
3666 &port_regs->CommonRegs.
3667 ispControlStatus,
3668 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3670 * Wait the for Soft Reset to Complete.
3672 max_wait_time = 10;
3673 do {
3674 value = ql_read_common_reg(qdev,
3675 &port_regs->CommonRegs.
3677 ispControlStatus);
3678 if ((value & ISP_CONTROL_SR) == 0) {
3679 netdev_printk(KERN_DEBUG, ndev,
3680 "reset completed\n");
3681 break;
3684 if (value & ISP_CONTROL_RI) {
3685 netdev_printk(KERN_DEBUG, ndev,
3686 "clearing NRI after reset\n");
3687 ql_write_common_reg(qdev,
3688 &port_regs->
3689 CommonRegs.
3690 ispControlStatus,
3691 ((ISP_CONTROL_RI <<
3692 16) | ISP_CONTROL_RI));
3695 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3696 ssleep(1);
3697 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3698 } while (--max_wait_time);
3699 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3701 if (value & ISP_CONTROL_SR) {
3704 * Set the reset flags and clear the board again.
3705 * Nothing else to do...
3707 netdev_err(ndev,
3708 "Timed out waiting for reset to complete\n");
3709 netdev_err(ndev, "Do a reset\n");
3710 clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
3711 clear_bit(QL_RESET_START, &qdev->flags);
3712 ql_cycle_adapter(qdev, QL_DO_RESET);
3713 return;
3716 clear_bit(QL_RESET_ACTIVE, &qdev->flags);
3717 clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
3718 clear_bit(QL_RESET_START, &qdev->flags);
3719 ql_cycle_adapter(qdev, QL_NO_RESET);
3723 static void ql_tx_timeout_work(struct work_struct *work)
3725 struct ql3_adapter *qdev =
3726 container_of(work, struct ql3_adapter, tx_timeout_work.work);
3728 ql_cycle_adapter(qdev, QL_DO_RESET);
3731 static void ql_get_board_info(struct ql3_adapter *qdev)
3733 struct ql3xxx_port_registers __iomem *port_regs =
3734 qdev->mem_map_registers;
3735 u32 value;
3737 value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
3739 qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
3740 if (value & PORT_STATUS_64)
3741 qdev->pci_width = 64;
3742 else
3743 qdev->pci_width = 32;
3744 if (value & PORT_STATUS_X)
3745 qdev->pci_x = 1;
3746 else
3747 qdev->pci_x = 0;
3748 qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
3751 static void ql3xxx_timer(struct timer_list *t)
3753 struct ql3_adapter *qdev = from_timer(qdev, t, adapter_timer);
3754 queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
3757 static const struct net_device_ops ql3xxx_netdev_ops = {
3758 .ndo_open = ql3xxx_open,
3759 .ndo_start_xmit = ql3xxx_send,
3760 .ndo_stop = ql3xxx_close,
3761 .ndo_validate_addr = eth_validate_addr,
3762 .ndo_set_mac_address = ql3xxx_set_mac_address,
3763 .ndo_tx_timeout = ql3xxx_tx_timeout,
3766 static int ql3xxx_probe(struct pci_dev *pdev,
3767 const struct pci_device_id *pci_entry)
3769 struct net_device *ndev = NULL;
3770 struct ql3_adapter *qdev = NULL;
3771 static int cards_found;
3772 int uninitialized_var(pci_using_dac), err;
3774 err = pci_enable_device(pdev);
3775 if (err) {
3776 pr_err("%s cannot enable PCI device\n", pci_name(pdev));
3777 goto err_out;
3780 err = pci_request_regions(pdev, DRV_NAME);
3781 if (err) {
3782 pr_err("%s cannot obtain PCI resources\n", pci_name(pdev));
3783 goto err_out_disable_pdev;
3786 pci_set_master(pdev);
3788 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3789 pci_using_dac = 1;
3790 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3791 } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
3792 pci_using_dac = 0;
3793 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3796 if (err) {
3797 pr_err("%s no usable DMA configuration\n", pci_name(pdev));
3798 goto err_out_free_regions;
3801 ndev = alloc_etherdev(sizeof(struct ql3_adapter));
3802 if (!ndev) {
3803 err = -ENOMEM;
3804 goto err_out_free_regions;
3807 SET_NETDEV_DEV(ndev, &pdev->dev);
3809 pci_set_drvdata(pdev, ndev);
3811 qdev = netdev_priv(ndev);
3812 qdev->index = cards_found;
3813 qdev->ndev = ndev;
3814 qdev->pdev = pdev;
3815 qdev->device_id = pci_entry->device;
3816 qdev->port_link_state = LS_DOWN;
3817 if (msi)
3818 qdev->msi = 1;
3820 qdev->msg_enable = netif_msg_init(debug, default_msg);
3822 if (pci_using_dac)
3823 ndev->features |= NETIF_F_HIGHDMA;
3824 if (qdev->device_id == QL3032_DEVICE_ID)
3825 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
3827 qdev->mem_map_registers = pci_ioremap_bar(pdev, 1);
3828 if (!qdev->mem_map_registers) {
3829 pr_err("%s: cannot map device registers\n", pci_name(pdev));
3830 err = -EIO;
3831 goto err_out_free_ndev;
3834 spin_lock_init(&qdev->adapter_lock);
3835 spin_lock_init(&qdev->hw_lock);
3837 /* Set driver entry points */
3838 ndev->netdev_ops = &ql3xxx_netdev_ops;
3839 ndev->ethtool_ops = &ql3xxx_ethtool_ops;
3840 ndev->watchdog_timeo = 5 * HZ;
3842 netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
3844 ndev->irq = pdev->irq;
3846 /* make sure the EEPROM is good */
3847 if (ql_get_nvram_params(qdev)) {
3848 pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n",
3849 __func__, qdev->index);
3850 err = -EIO;
3851 goto err_out_iounmap;
3854 ql_set_mac_info(qdev);
3856 /* Validate and set parameters */
3857 if (qdev->mac_index) {
3858 ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
3859 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress);
3860 } else {
3861 ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
3862 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress);
3865 ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
3867 /* Record PCI bus information. */
3868 ql_get_board_info(qdev);
3871 * Set the Maximum Memory Read Byte Count value. We do this to handle
3872 * jumbo frames.
3874 if (qdev->pci_x)
3875 pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
3877 err = register_netdev(ndev);
3878 if (err) {
3879 pr_err("%s: cannot register net device\n", pci_name(pdev));
3880 goto err_out_iounmap;
3883 /* we're going to reset, so assume we have no link for now */
3885 netif_carrier_off(ndev);
3886 netif_stop_queue(ndev);
3888 qdev->workqueue = create_singlethread_workqueue(ndev->name);
3889 if (!qdev->workqueue) {
3890 unregister_netdev(ndev);
3891 err = -ENOMEM;
3892 goto err_out_iounmap;
3895 INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
3896 INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
3897 INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work);
3899 timer_setup(&qdev->adapter_timer, ql3xxx_timer, 0);
3900 qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
3902 if (!cards_found) {
3903 pr_alert("%s\n", DRV_STRING);
3904 pr_alert("Driver name: %s, Version: %s\n",
3905 DRV_NAME, DRV_VERSION);
3907 ql_display_dev_info(ndev);
3909 cards_found++;
3910 return 0;
3912 err_out_iounmap:
3913 iounmap(qdev->mem_map_registers);
3914 err_out_free_ndev:
3915 free_netdev(ndev);
3916 err_out_free_regions:
3917 pci_release_regions(pdev);
3918 err_out_disable_pdev:
3919 pci_disable_device(pdev);
3920 err_out:
3921 return err;
3924 static void ql3xxx_remove(struct pci_dev *pdev)
3926 struct net_device *ndev = pci_get_drvdata(pdev);
3927 struct ql3_adapter *qdev = netdev_priv(ndev);
3929 unregister_netdev(ndev);
3931 ql_disable_interrupts(qdev);
3933 if (qdev->workqueue) {
3934 cancel_delayed_work(&qdev->reset_work);
3935 cancel_delayed_work(&qdev->tx_timeout_work);
3936 destroy_workqueue(qdev->workqueue);
3937 qdev->workqueue = NULL;
3940 iounmap(qdev->mem_map_registers);
3941 pci_release_regions(pdev);
3942 free_netdev(ndev);
3945 static struct pci_driver ql3xxx_driver = {
3947 .name = DRV_NAME,
3948 .id_table = ql3xxx_pci_tbl,
3949 .probe = ql3xxx_probe,
3950 .remove = ql3xxx_remove,
3953 module_pci_driver(ql3xxx_driver);