2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
27 #include <linux/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/skbuff.h>
37 #include <linux/if_vlan.h>
38 #include <linux/delay.h>
40 #include <linux/vmalloc.h>
41 #include <net/ip6_checksum.h>
45 char qlge_driver_name
[] = DRV_NAME
;
46 const char qlge_driver_version
[] = DRV_VERSION
;
48 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49 MODULE_DESCRIPTION(DRV_STRING
" ");
50 MODULE_LICENSE("GPL");
51 MODULE_VERSION(DRV_VERSION
);
53 static const u32 default_msg
=
54 NETIF_MSG_DRV
| NETIF_MSG_PROBE
| NETIF_MSG_LINK
|
55 /* NETIF_MSG_TIMER | */
60 /* NETIF_MSG_TX_QUEUED | */
61 /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
62 /* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW
| NETIF_MSG_WOL
| 0;
65 static int debug
= 0x00007fff; /* defaults above */
66 module_param(debug
, int, 0);
67 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
72 static int qlge_irq_type
= MSIX_IRQ
;
73 module_param(qlge_irq_type
, int, MSIX_IRQ
);
74 MODULE_PARM_DESC(qlge_irq_type
, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
76 static int qlge_mpi_coredump
;
77 module_param(qlge_mpi_coredump
, int, 0);
78 MODULE_PARM_DESC(qlge_mpi_coredump
,
79 "Option to enable MPI firmware dump. "
80 "Default is OFF - Do Not allocate memory. ");
82 static int qlge_force_coredump
;
83 module_param(qlge_force_coredump
, int, 0);
84 MODULE_PARM_DESC(qlge_force_coredump
,
85 "Option to allow force of firmware core dump. "
86 "Default is OFF - Do not allow.");
88 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl
) = {
89 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC
, QLGE_DEVICE_ID_8012
)},
90 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC
, QLGE_DEVICE_ID_8000
)},
91 /* required last entry */
95 MODULE_DEVICE_TABLE(pci
, qlge_pci_tbl
);
97 /* This hardware semaphore causes exclusive access to
98 * resources shared between the NIC driver, MPI firmware,
99 * FCOE firmware and the FC driver.
101 static int ql_sem_trylock(struct ql_adapter
*qdev
, u32 sem_mask
)
106 case SEM_XGMAC0_MASK
:
107 sem_bits
= SEM_SET
<< SEM_XGMAC0_SHIFT
;
109 case SEM_XGMAC1_MASK
:
110 sem_bits
= SEM_SET
<< SEM_XGMAC1_SHIFT
;
113 sem_bits
= SEM_SET
<< SEM_ICB_SHIFT
;
115 case SEM_MAC_ADDR_MASK
:
116 sem_bits
= SEM_SET
<< SEM_MAC_ADDR_SHIFT
;
119 sem_bits
= SEM_SET
<< SEM_FLASH_SHIFT
;
122 sem_bits
= SEM_SET
<< SEM_PROBE_SHIFT
;
124 case SEM_RT_IDX_MASK
:
125 sem_bits
= SEM_SET
<< SEM_RT_IDX_SHIFT
;
127 case SEM_PROC_REG_MASK
:
128 sem_bits
= SEM_SET
<< SEM_PROC_REG_SHIFT
;
131 netif_alert(qdev
, probe
, qdev
->ndev
, "bad Semaphore mask!.\n");
135 ql_write32(qdev
, SEM
, sem_bits
| sem_mask
);
136 return !(ql_read32(qdev
, SEM
) & sem_bits
);
139 int ql_sem_spinlock(struct ql_adapter
*qdev
, u32 sem_mask
)
141 unsigned int wait_count
= 30;
143 if (!ql_sem_trylock(qdev
, sem_mask
))
146 } while (--wait_count
);
150 void ql_sem_unlock(struct ql_adapter
*qdev
, u32 sem_mask
)
152 ql_write32(qdev
, SEM
, sem_mask
);
153 ql_read32(qdev
, SEM
); /* flush */
156 /* This function waits for a specific bit to come ready
157 * in a given register. It is used mostly by the initialize
158 * process, but is also used in kernel thread API such as
159 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
161 int ql_wait_reg_rdy(struct ql_adapter
*qdev
, u32 reg
, u32 bit
, u32 err_bit
)
164 int count
= UDELAY_COUNT
;
167 temp
= ql_read32(qdev
, reg
);
169 /* check for errors */
170 if (temp
& err_bit
) {
171 netif_alert(qdev
, probe
, qdev
->ndev
,
172 "register 0x%.08x access error, value = 0x%.08x!.\n",
175 } else if (temp
& bit
)
177 udelay(UDELAY_DELAY
);
180 netif_alert(qdev
, probe
, qdev
->ndev
,
181 "Timed out waiting for reg %x to come ready.\n", reg
);
185 /* The CFG register is used to download TX and RX control blocks
186 * to the chip. This function waits for an operation to complete.
188 static int ql_wait_cfg(struct ql_adapter
*qdev
, u32 bit
)
190 int count
= UDELAY_COUNT
;
194 temp
= ql_read32(qdev
, CFG
);
199 udelay(UDELAY_DELAY
);
206 /* Used to issue init control blocks to hw. Maps control block,
207 * sets address, triggers download, waits for completion.
209 int ql_write_cfg(struct ql_adapter
*qdev
, void *ptr
, int size
, u32 bit
,
219 (bit
& (CFG_LRQ
| CFG_LR
| CFG_LCQ
)) ? PCI_DMA_TODEVICE
:
222 map
= pci_map_single(qdev
->pdev
, ptr
, size
, direction
);
223 if (pci_dma_mapping_error(qdev
->pdev
, map
)) {
224 netif_err(qdev
, ifup
, qdev
->ndev
, "Couldn't map DMA area.\n");
228 status
= ql_sem_spinlock(qdev
, SEM_ICB_MASK
);
232 status
= ql_wait_cfg(qdev
, bit
);
234 netif_err(qdev
, ifup
, qdev
->ndev
,
235 "Timed out waiting for CFG to come ready.\n");
239 ql_write32(qdev
, ICB_L
, (u32
) map
);
240 ql_write32(qdev
, ICB_H
, (u32
) (map
>> 32));
242 mask
= CFG_Q_MASK
| (bit
<< 16);
243 value
= bit
| (q_id
<< CFG_Q_SHIFT
);
244 ql_write32(qdev
, CFG
, (mask
| value
));
247 * Wait for the bit to clear after signaling hw.
249 status
= ql_wait_cfg(qdev
, bit
);
251 ql_sem_unlock(qdev
, SEM_ICB_MASK
); /* does flush too */
252 pci_unmap_single(qdev
->pdev
, map
, size
, direction
);
256 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
257 int ql_get_mac_addr_reg(struct ql_adapter
*qdev
, u32 type
, u16 index
,
264 case MAC_ADDR_TYPE_MULTI_MAC
:
265 case MAC_ADDR_TYPE_CAM_MAC
:
268 ql_wait_reg_rdy(qdev
,
269 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
272 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
273 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
274 MAC_ADDR_ADR
| MAC_ADDR_RS
| type
); /* type */
276 ql_wait_reg_rdy(qdev
,
277 MAC_ADDR_IDX
, MAC_ADDR_MR
, 0);
280 *value
++ = ql_read32(qdev
, MAC_ADDR_DATA
);
282 ql_wait_reg_rdy(qdev
,
283 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
286 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
287 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
288 MAC_ADDR_ADR
| MAC_ADDR_RS
| type
); /* type */
290 ql_wait_reg_rdy(qdev
,
291 MAC_ADDR_IDX
, MAC_ADDR_MR
, 0);
294 *value
++ = ql_read32(qdev
, MAC_ADDR_DATA
);
295 if (type
== MAC_ADDR_TYPE_CAM_MAC
) {
297 ql_wait_reg_rdy(qdev
,
298 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
301 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
302 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
303 MAC_ADDR_ADR
| MAC_ADDR_RS
| type
); /* type */
305 ql_wait_reg_rdy(qdev
, MAC_ADDR_IDX
,
309 *value
++ = ql_read32(qdev
, MAC_ADDR_DATA
);
313 case MAC_ADDR_TYPE_VLAN
:
314 case MAC_ADDR_TYPE_MULTI_FLTR
:
316 netif_crit(qdev
, ifup
, qdev
->ndev
,
317 "Address type %d not yet supported.\n", type
);
324 /* Set up a MAC, multicast or VLAN address for the
325 * inbound frame matching.
327 static int ql_set_mac_addr_reg(struct ql_adapter
*qdev
, u8
*addr
, u32 type
,
334 case MAC_ADDR_TYPE_MULTI_MAC
:
336 u32 upper
= (addr
[0] << 8) | addr
[1];
337 u32 lower
= (addr
[2] << 24) | (addr
[3] << 16) |
338 (addr
[4] << 8) | (addr
[5]);
341 ql_wait_reg_rdy(qdev
,
342 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
345 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) |
346 (index
<< MAC_ADDR_IDX_SHIFT
) |
348 ql_write32(qdev
, MAC_ADDR_DATA
, lower
);
350 ql_wait_reg_rdy(qdev
,
351 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
354 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) |
355 (index
<< MAC_ADDR_IDX_SHIFT
) |
358 ql_write32(qdev
, MAC_ADDR_DATA
, upper
);
360 ql_wait_reg_rdy(qdev
,
361 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
366 case MAC_ADDR_TYPE_CAM_MAC
:
369 u32 upper
= (addr
[0] << 8) | addr
[1];
371 (addr
[2] << 24) | (addr
[3] << 16) | (addr
[4] << 8) |
374 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
375 "Adding %s address %pM at index %d in the CAM.\n",
376 type
== MAC_ADDR_TYPE_MULTI_MAC
?
377 "MULTICAST" : "UNICAST",
381 ql_wait_reg_rdy(qdev
,
382 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
385 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
386 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
388 ql_write32(qdev
, MAC_ADDR_DATA
, lower
);
390 ql_wait_reg_rdy(qdev
,
391 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
394 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
395 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
397 ql_write32(qdev
, MAC_ADDR_DATA
, upper
);
399 ql_wait_reg_rdy(qdev
,
400 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
403 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
) | /* offset */
404 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
406 /* This field should also include the queue id
407 and possibly the function id. Right now we hardcode
408 the route field to NIC core.
410 cam_output
= (CAM_OUT_ROUTE_NIC
|
412 func
<< CAM_OUT_FUNC_SHIFT
) |
413 (0 << CAM_OUT_CQ_ID_SHIFT
));
415 cam_output
|= CAM_OUT_RV
;
416 /* route to NIC core */
417 ql_write32(qdev
, MAC_ADDR_DATA
, cam_output
);
420 case MAC_ADDR_TYPE_VLAN
:
422 u32 enable_bit
= *((u32
*) &addr
[0]);
423 /* For VLAN, the addr actually holds a bit that
424 * either enables or disables the vlan id we are
425 * addressing. It's either MAC_ADDR_E on or off.
426 * That's bit-27 we're talking about.
428 netif_info(qdev
, ifup
, qdev
->ndev
,
429 "%s VLAN ID %d %s the CAM.\n",
430 enable_bit
? "Adding" : "Removing",
432 enable_bit
? "to" : "from");
435 ql_wait_reg_rdy(qdev
,
436 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
439 ql_write32(qdev
, MAC_ADDR_IDX
, offset
| /* offset */
440 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
442 enable_bit
); /* enable/disable */
445 case MAC_ADDR_TYPE_MULTI_FLTR
:
447 netif_crit(qdev
, ifup
, qdev
->ndev
,
448 "Address type %d not yet supported.\n", type
);
455 /* Set or clear MAC address in hardware. We sometimes
456 * have to clear it to prevent wrong frame routing
457 * especially in a bonding environment.
459 static int ql_set_mac_addr(struct ql_adapter
*qdev
, int set
)
462 char zero_mac_addr
[ETH_ALEN
];
466 addr
= &qdev
->current_mac_addr
[0];
467 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
468 "Set Mac addr %pM\n", addr
);
470 memset(zero_mac_addr
, 0, ETH_ALEN
);
471 addr
= &zero_mac_addr
[0];
472 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
473 "Clearing MAC address\n");
475 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
478 status
= ql_set_mac_addr_reg(qdev
, (u8
*) addr
,
479 MAC_ADDR_TYPE_CAM_MAC
, qdev
->func
* MAX_CQ
);
480 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
482 netif_err(qdev
, ifup
, qdev
->ndev
,
483 "Failed to init mac address.\n");
487 void ql_link_on(struct ql_adapter
*qdev
)
489 netif_err(qdev
, link
, qdev
->ndev
, "Link is up.\n");
490 netif_carrier_on(qdev
->ndev
);
491 ql_set_mac_addr(qdev
, 1);
494 void ql_link_off(struct ql_adapter
*qdev
)
496 netif_err(qdev
, link
, qdev
->ndev
, "Link is down.\n");
497 netif_carrier_off(qdev
->ndev
);
498 ql_set_mac_addr(qdev
, 0);
501 /* Get a specific frame routing value from the CAM.
502 * Used for debug and reg dump.
504 int ql_get_routing_reg(struct ql_adapter
*qdev
, u32 index
, u32
*value
)
508 status
= ql_wait_reg_rdy(qdev
, RT_IDX
, RT_IDX_MW
, 0);
512 ql_write32(qdev
, RT_IDX
,
513 RT_IDX_TYPE_NICQ
| RT_IDX_RS
| (index
<< RT_IDX_IDX_SHIFT
));
514 status
= ql_wait_reg_rdy(qdev
, RT_IDX
, RT_IDX_MR
, 0);
517 *value
= ql_read32(qdev
, RT_DATA
);
522 /* The NIC function for this chip has 16 routing indexes. Each one can be used
523 * to route different frame types to various inbound queues. We send broadcast/
524 * multicast/error frames to the default queue for slow handling,
525 * and CAM hit/RSS frames to the fast handling queues.
527 static int ql_set_routing_reg(struct ql_adapter
*qdev
, u32 index
, u32 mask
,
530 int status
= -EINVAL
; /* Return error if no mask match. */
533 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
534 "%s %s mask %s the routing reg.\n",
535 enable
? "Adding" : "Removing",
536 index
== RT_IDX_ALL_ERR_SLOT
? "MAC ERROR/ALL ERROR" :
537 index
== RT_IDX_IP_CSUM_ERR_SLOT
? "IP CSUM ERROR" :
538 index
== RT_IDX_TCP_UDP_CSUM_ERR_SLOT
? "TCP/UDP CSUM ERROR" :
539 index
== RT_IDX_BCAST_SLOT
? "BROADCAST" :
540 index
== RT_IDX_MCAST_MATCH_SLOT
? "MULTICAST MATCH" :
541 index
== RT_IDX_ALLMULTI_SLOT
? "ALL MULTICAST MATCH" :
542 index
== RT_IDX_UNUSED6_SLOT
? "UNUSED6" :
543 index
== RT_IDX_UNUSED7_SLOT
? "UNUSED7" :
544 index
== RT_IDX_RSS_MATCH_SLOT
? "RSS ALL/IPV4 MATCH" :
545 index
== RT_IDX_RSS_IPV6_SLOT
? "RSS IPV6" :
546 index
== RT_IDX_RSS_TCP4_SLOT
? "RSS TCP4" :
547 index
== RT_IDX_RSS_TCP6_SLOT
? "RSS TCP6" :
548 index
== RT_IDX_CAM_HIT_SLOT
? "CAM HIT" :
549 index
== RT_IDX_UNUSED013
? "UNUSED13" :
550 index
== RT_IDX_UNUSED014
? "UNUSED14" :
551 index
== RT_IDX_PROMISCUOUS_SLOT
? "PROMISCUOUS" :
552 "(Bad index != RT_IDX)",
553 enable
? "to" : "from");
558 value
= RT_IDX_DST_CAM_Q
| /* dest */
559 RT_IDX_TYPE_NICQ
| /* type */
560 (RT_IDX_CAM_HIT_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
563 case RT_IDX_VALID
: /* Promiscuous Mode frames. */
565 value
= RT_IDX_DST_DFLT_Q
| /* dest */
566 RT_IDX_TYPE_NICQ
| /* type */
567 (RT_IDX_PROMISCUOUS_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
570 case RT_IDX_ERR
: /* Pass up MAC,IP,TCP/UDP error frames. */
572 value
= RT_IDX_DST_DFLT_Q
| /* dest */
573 RT_IDX_TYPE_NICQ
| /* type */
574 (RT_IDX_ALL_ERR_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
577 case RT_IDX_BCAST
: /* Pass up Broadcast frames to default Q. */
579 value
= RT_IDX_DST_DFLT_Q
| /* dest */
580 RT_IDX_TYPE_NICQ
| /* type */
581 (RT_IDX_BCAST_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
584 case RT_IDX_MCAST
: /* Pass up All Multicast frames. */
586 value
= RT_IDX_DST_DFLT_Q
| /* dest */
587 RT_IDX_TYPE_NICQ
| /* type */
588 (RT_IDX_ALLMULTI_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
591 case RT_IDX_MCAST_MATCH
: /* Pass up matched Multicast frames. */
593 value
= RT_IDX_DST_DFLT_Q
| /* dest */
594 RT_IDX_TYPE_NICQ
| /* type */
595 (RT_IDX_MCAST_MATCH_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
598 case RT_IDX_RSS_MATCH
: /* Pass up matched RSS frames. */
600 value
= RT_IDX_DST_RSS
| /* dest */
601 RT_IDX_TYPE_NICQ
| /* type */
602 (RT_IDX_RSS_MATCH_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
605 case 0: /* Clear the E-bit on an entry. */
607 value
= RT_IDX_DST_DFLT_Q
| /* dest */
608 RT_IDX_TYPE_NICQ
| /* type */
609 (index
<< RT_IDX_IDX_SHIFT
);/* index */
613 netif_err(qdev
, ifup
, qdev
->ndev
,
614 "Mask type %d not yet supported.\n", mask
);
620 status
= ql_wait_reg_rdy(qdev
, RT_IDX
, RT_IDX_MW
, 0);
623 value
|= (enable
? RT_IDX_E
: 0);
624 ql_write32(qdev
, RT_IDX
, value
);
625 ql_write32(qdev
, RT_DATA
, enable
? mask
: 0);
631 static void ql_enable_interrupts(struct ql_adapter
*qdev
)
633 ql_write32(qdev
, INTR_EN
, (INTR_EN_EI
<< 16) | INTR_EN_EI
);
636 static void ql_disable_interrupts(struct ql_adapter
*qdev
)
638 ql_write32(qdev
, INTR_EN
, (INTR_EN_EI
<< 16));
641 /* If we're running with multiple MSI-X vectors then we enable on the fly.
642 * Otherwise, we may have multiple outstanding workers and don't want to
643 * enable until the last one finishes. In this case, the irq_cnt gets
644 * incremented everytime we queue a worker and decremented everytime
645 * a worker finishes. Once it hits zero we enable the interrupt.
647 u32
ql_enable_completion_interrupt(struct ql_adapter
*qdev
, u32 intr
)
650 unsigned long hw_flags
= 0;
651 struct intr_context
*ctx
= qdev
->intr_context
+ intr
;
653 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
) && intr
)) {
654 /* Always enable if we're MSIX multi interrupts and
655 * it's not the default (zeroeth) interrupt.
657 ql_write32(qdev
, INTR_EN
,
659 var
= ql_read32(qdev
, STS
);
663 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
664 if (atomic_dec_and_test(&ctx
->irq_cnt
)) {
665 ql_write32(qdev
, INTR_EN
,
667 var
= ql_read32(qdev
, STS
);
669 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
673 static u32
ql_disable_completion_interrupt(struct ql_adapter
*qdev
, u32 intr
)
676 struct intr_context
*ctx
;
678 /* HW disables for us if we're MSIX multi interrupts and
679 * it's not the default (zeroeth) interrupt.
681 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
) && intr
))
684 ctx
= qdev
->intr_context
+ intr
;
685 spin_lock(&qdev
->hw_lock
);
686 if (!atomic_read(&ctx
->irq_cnt
)) {
687 ql_write32(qdev
, INTR_EN
,
689 var
= ql_read32(qdev
, STS
);
691 atomic_inc(&ctx
->irq_cnt
);
692 spin_unlock(&qdev
->hw_lock
);
696 static void ql_enable_all_completion_interrupts(struct ql_adapter
*qdev
)
699 for (i
= 0; i
< qdev
->intr_count
; i
++) {
700 /* The enable call does a atomic_dec_and_test
701 * and enables only if the result is zero.
702 * So we precharge it here.
704 if (unlikely(!test_bit(QL_MSIX_ENABLED
, &qdev
->flags
) ||
706 atomic_set(&qdev
->intr_context
[i
].irq_cnt
, 1);
707 ql_enable_completion_interrupt(qdev
, i
);
712 static int ql_validate_flash(struct ql_adapter
*qdev
, u32 size
, const char *str
)
716 __le16
*flash
= (__le16
*)&qdev
->flash
;
718 status
= strncmp((char *)&qdev
->flash
, str
, 4);
720 netif_err(qdev
, ifup
, qdev
->ndev
, "Invalid flash signature.\n");
724 for (i
= 0; i
< size
; i
++)
725 csum
+= le16_to_cpu(*flash
++);
728 netif_err(qdev
, ifup
, qdev
->ndev
,
729 "Invalid flash checksum, csum = 0x%.04x.\n", csum
);
734 static int ql_read_flash_word(struct ql_adapter
*qdev
, int offset
, __le32
*data
)
737 /* wait for reg to come ready */
738 status
= ql_wait_reg_rdy(qdev
,
739 FLASH_ADDR
, FLASH_ADDR_RDY
, FLASH_ADDR_ERR
);
742 /* set up for reg read */
743 ql_write32(qdev
, FLASH_ADDR
, FLASH_ADDR_R
| offset
);
744 /* wait for reg to come ready */
745 status
= ql_wait_reg_rdy(qdev
,
746 FLASH_ADDR
, FLASH_ADDR_RDY
, FLASH_ADDR_ERR
);
749 /* This data is stored on flash as an array of
750 * __le32. Since ql_read32() returns cpu endian
751 * we need to swap it back.
753 *data
= cpu_to_le32(ql_read32(qdev
, FLASH_DATA
));
758 static int ql_get_8000_flash_params(struct ql_adapter
*qdev
)
762 __le32
*p
= (__le32
*)&qdev
->flash
;
766 /* Get flash offset for function and adjust
770 offset
= FUNC0_FLASH_OFFSET
/ sizeof(u32
);
772 offset
= FUNC1_FLASH_OFFSET
/ sizeof(u32
);
774 if (ql_sem_spinlock(qdev
, SEM_FLASH_MASK
))
777 size
= sizeof(struct flash_params_8000
) / sizeof(u32
);
778 for (i
= 0; i
< size
; i
++, p
++) {
779 status
= ql_read_flash_word(qdev
, i
+offset
, p
);
781 netif_err(qdev
, ifup
, qdev
->ndev
,
782 "Error reading flash.\n");
787 status
= ql_validate_flash(qdev
,
788 sizeof(struct flash_params_8000
) / sizeof(u16
),
791 netif_err(qdev
, ifup
, qdev
->ndev
, "Invalid flash.\n");
796 /* Extract either manufacturer or BOFM modified
799 if (qdev
->flash
.flash_params_8000
.data_type1
== 2)
801 qdev
->flash
.flash_params_8000
.mac_addr1
,
802 qdev
->ndev
->addr_len
);
805 qdev
->flash
.flash_params_8000
.mac_addr
,
806 qdev
->ndev
->addr_len
);
808 if (!is_valid_ether_addr(mac_addr
)) {
809 netif_err(qdev
, ifup
, qdev
->ndev
, "Invalid MAC address.\n");
814 memcpy(qdev
->ndev
->dev_addr
,
816 qdev
->ndev
->addr_len
);
819 ql_sem_unlock(qdev
, SEM_FLASH_MASK
);
823 static int ql_get_8012_flash_params(struct ql_adapter
*qdev
)
827 __le32
*p
= (__le32
*)&qdev
->flash
;
829 u32 size
= sizeof(struct flash_params_8012
) / sizeof(u32
);
831 /* Second function's parameters follow the first
837 if (ql_sem_spinlock(qdev
, SEM_FLASH_MASK
))
840 for (i
= 0; i
< size
; i
++, p
++) {
841 status
= ql_read_flash_word(qdev
, i
+offset
, p
);
843 netif_err(qdev
, ifup
, qdev
->ndev
,
844 "Error reading flash.\n");
850 status
= ql_validate_flash(qdev
,
851 sizeof(struct flash_params_8012
) / sizeof(u16
),
854 netif_err(qdev
, ifup
, qdev
->ndev
, "Invalid flash.\n");
859 if (!is_valid_ether_addr(qdev
->flash
.flash_params_8012
.mac_addr
)) {
864 memcpy(qdev
->ndev
->dev_addr
,
865 qdev
->flash
.flash_params_8012
.mac_addr
,
866 qdev
->ndev
->addr_len
);
869 ql_sem_unlock(qdev
, SEM_FLASH_MASK
);
873 /* xgmac register are located behind the xgmac_addr and xgmac_data
874 * register pair. Each read/write requires us to wait for the ready
875 * bit before reading/writing the data.
877 static int ql_write_xgmac_reg(struct ql_adapter
*qdev
, u32 reg
, u32 data
)
880 /* wait for reg to come ready */
881 status
= ql_wait_reg_rdy(qdev
,
882 XGMAC_ADDR
, XGMAC_ADDR_RDY
, XGMAC_ADDR_XME
);
885 /* write the data to the data reg */
886 ql_write32(qdev
, XGMAC_DATA
, data
);
887 /* trigger the write */
888 ql_write32(qdev
, XGMAC_ADDR
, reg
);
892 /* xgmac register are located behind the xgmac_addr and xgmac_data
893 * register pair. Each read/write requires us to wait for the ready
894 * bit before reading/writing the data.
896 int ql_read_xgmac_reg(struct ql_adapter
*qdev
, u32 reg
, u32
*data
)
899 /* wait for reg to come ready */
900 status
= ql_wait_reg_rdy(qdev
,
901 XGMAC_ADDR
, XGMAC_ADDR_RDY
, XGMAC_ADDR_XME
);
904 /* set up for reg read */
905 ql_write32(qdev
, XGMAC_ADDR
, reg
| XGMAC_ADDR_R
);
906 /* wait for reg to come ready */
907 status
= ql_wait_reg_rdy(qdev
,
908 XGMAC_ADDR
, XGMAC_ADDR_RDY
, XGMAC_ADDR_XME
);
912 *data
= ql_read32(qdev
, XGMAC_DATA
);
917 /* This is used for reading the 64-bit statistics regs. */
918 int ql_read_xgmac_reg64(struct ql_adapter
*qdev
, u32 reg
, u64
*data
)
924 status
= ql_read_xgmac_reg(qdev
, reg
, &lo
);
928 status
= ql_read_xgmac_reg(qdev
, reg
+ 4, &hi
);
932 *data
= (u64
) lo
| ((u64
) hi
<< 32);
938 static int ql_8000_port_initialize(struct ql_adapter
*qdev
)
942 * Get MPI firmware version for driver banner
945 status
= ql_mb_about_fw(qdev
);
948 status
= ql_mb_get_fw_state(qdev
);
951 /* Wake up a worker to get/set the TX/RX frame sizes. */
952 queue_delayed_work(qdev
->workqueue
, &qdev
->mpi_port_cfg_work
, 0);
957 /* Take the MAC Core out of reset.
958 * Enable statistics counting.
959 * Take the transmitter/receiver out of reset.
960 * This functionality may be done in the MPI firmware at a
963 static int ql_8012_port_initialize(struct ql_adapter
*qdev
)
968 if (ql_sem_trylock(qdev
, qdev
->xg_sem_mask
)) {
969 /* Another function has the semaphore, so
970 * wait for the port init bit to come ready.
972 netif_info(qdev
, link
, qdev
->ndev
,
973 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
974 status
= ql_wait_reg_rdy(qdev
, STS
, qdev
->port_init
, 0);
976 netif_crit(qdev
, link
, qdev
->ndev
,
977 "Port initialize timed out.\n");
982 netif_info(qdev
, link
, qdev
->ndev
, "Got xgmac semaphore!.\n");
983 /* Set the core reset. */
984 status
= ql_read_xgmac_reg(qdev
, GLOBAL_CFG
, &data
);
987 data
|= GLOBAL_CFG_RESET
;
988 status
= ql_write_xgmac_reg(qdev
, GLOBAL_CFG
, data
);
992 /* Clear the core reset and turn on jumbo for receiver. */
993 data
&= ~GLOBAL_CFG_RESET
; /* Clear core reset. */
994 data
|= GLOBAL_CFG_JUMBO
; /* Turn on jumbo. */
995 data
|= GLOBAL_CFG_TX_STAT_EN
;
996 data
|= GLOBAL_CFG_RX_STAT_EN
;
997 status
= ql_write_xgmac_reg(qdev
, GLOBAL_CFG
, data
);
1001 /* Enable transmitter, and clear it's reset. */
1002 status
= ql_read_xgmac_reg(qdev
, TX_CFG
, &data
);
1005 data
&= ~TX_CFG_RESET
; /* Clear the TX MAC reset. */
1006 data
|= TX_CFG_EN
; /* Enable the transmitter. */
1007 status
= ql_write_xgmac_reg(qdev
, TX_CFG
, data
);
1011 /* Enable receiver and clear it's reset. */
1012 status
= ql_read_xgmac_reg(qdev
, RX_CFG
, &data
);
1015 data
&= ~RX_CFG_RESET
; /* Clear the RX MAC reset. */
1016 data
|= RX_CFG_EN
; /* Enable the receiver. */
1017 status
= ql_write_xgmac_reg(qdev
, RX_CFG
, data
);
1021 /* Turn on jumbo. */
1023 ql_write_xgmac_reg(qdev
, MAC_TX_PARAMS
, MAC_TX_PARAMS_JUMBO
| (0x2580 << 16));
1027 ql_write_xgmac_reg(qdev
, MAC_RX_PARAMS
, 0x2580);
1031 /* Signal to the world that the port is enabled. */
1032 ql_write32(qdev
, STS
, ((qdev
->port_init
<< 16) | qdev
->port_init
));
1034 ql_sem_unlock(qdev
, qdev
->xg_sem_mask
);
1038 static inline unsigned int ql_lbq_block_size(struct ql_adapter
*qdev
)
1040 return PAGE_SIZE
<< qdev
->lbq_buf_order
;
1043 /* Get the next large buffer. */
1044 static struct bq_desc
*ql_get_curr_lbuf(struct rx_ring
*rx_ring
)
1046 struct bq_desc
*lbq_desc
= &rx_ring
->lbq
[rx_ring
->lbq_curr_idx
];
1047 rx_ring
->lbq_curr_idx
++;
1048 if (rx_ring
->lbq_curr_idx
== rx_ring
->lbq_len
)
1049 rx_ring
->lbq_curr_idx
= 0;
1050 rx_ring
->lbq_free_cnt
++;
1054 static struct bq_desc
*ql_get_curr_lchunk(struct ql_adapter
*qdev
,
1055 struct rx_ring
*rx_ring
)
1057 struct bq_desc
*lbq_desc
= ql_get_curr_lbuf(rx_ring
);
1059 pci_dma_sync_single_for_cpu(qdev
->pdev
,
1060 pci_unmap_addr(lbq_desc
, mapaddr
),
1061 rx_ring
->lbq_buf_size
,
1062 PCI_DMA_FROMDEVICE
);
1064 /* If it's the last chunk of our master page then
1067 if ((lbq_desc
->p
.pg_chunk
.offset
+ rx_ring
->lbq_buf_size
)
1068 == ql_lbq_block_size(qdev
))
1069 pci_unmap_page(qdev
->pdev
,
1070 lbq_desc
->p
.pg_chunk
.map
,
1071 ql_lbq_block_size(qdev
),
1072 PCI_DMA_FROMDEVICE
);
1076 /* Get the next small buffer. */
1077 static struct bq_desc
*ql_get_curr_sbuf(struct rx_ring
*rx_ring
)
1079 struct bq_desc
*sbq_desc
= &rx_ring
->sbq
[rx_ring
->sbq_curr_idx
];
1080 rx_ring
->sbq_curr_idx
++;
1081 if (rx_ring
->sbq_curr_idx
== rx_ring
->sbq_len
)
1082 rx_ring
->sbq_curr_idx
= 0;
1083 rx_ring
->sbq_free_cnt
++;
1087 /* Update an rx ring index. */
1088 static void ql_update_cq(struct rx_ring
*rx_ring
)
1090 rx_ring
->cnsmr_idx
++;
1091 rx_ring
->curr_entry
++;
1092 if (unlikely(rx_ring
->cnsmr_idx
== rx_ring
->cq_len
)) {
1093 rx_ring
->cnsmr_idx
= 0;
1094 rx_ring
->curr_entry
= rx_ring
->cq_base
;
1098 static void ql_write_cq_idx(struct rx_ring
*rx_ring
)
1100 ql_write_db_reg(rx_ring
->cnsmr_idx
, rx_ring
->cnsmr_idx_db_reg
);
1103 static int ql_get_next_chunk(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
,
1104 struct bq_desc
*lbq_desc
)
1106 if (!rx_ring
->pg_chunk
.page
) {
1108 rx_ring
->pg_chunk
.page
= alloc_pages(__GFP_COLD
| __GFP_COMP
|
1110 qdev
->lbq_buf_order
);
1111 if (unlikely(!rx_ring
->pg_chunk
.page
)) {
1112 netif_err(qdev
, drv
, qdev
->ndev
,
1113 "page allocation failed.\n");
1116 rx_ring
->pg_chunk
.offset
= 0;
1117 map
= pci_map_page(qdev
->pdev
, rx_ring
->pg_chunk
.page
,
1118 0, ql_lbq_block_size(qdev
),
1119 PCI_DMA_FROMDEVICE
);
1120 if (pci_dma_mapping_error(qdev
->pdev
, map
)) {
1121 __free_pages(rx_ring
->pg_chunk
.page
,
1122 qdev
->lbq_buf_order
);
1123 netif_err(qdev
, drv
, qdev
->ndev
,
1124 "PCI mapping failed.\n");
1127 rx_ring
->pg_chunk
.map
= map
;
1128 rx_ring
->pg_chunk
.va
= page_address(rx_ring
->pg_chunk
.page
);
1131 /* Copy the current master pg_chunk info
1132 * to the current descriptor.
1134 lbq_desc
->p
.pg_chunk
= rx_ring
->pg_chunk
;
1136 /* Adjust the master page chunk for next
1139 rx_ring
->pg_chunk
.offset
+= rx_ring
->lbq_buf_size
;
1140 if (rx_ring
->pg_chunk
.offset
== ql_lbq_block_size(qdev
)) {
1141 rx_ring
->pg_chunk
.page
= NULL
;
1142 lbq_desc
->p
.pg_chunk
.last_flag
= 1;
1144 rx_ring
->pg_chunk
.va
+= rx_ring
->lbq_buf_size
;
1145 get_page(rx_ring
->pg_chunk
.page
);
1146 lbq_desc
->p
.pg_chunk
.last_flag
= 0;
1150 /* Process (refill) a large buffer queue. */
1151 static void ql_update_lbq(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
1153 u32 clean_idx
= rx_ring
->lbq_clean_idx
;
1154 u32 start_idx
= clean_idx
;
1155 struct bq_desc
*lbq_desc
;
1159 while (rx_ring
->lbq_free_cnt
> 32) {
1160 for (i
= 0; i
< 16; i
++) {
1161 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1162 "lbq: try cleaning clean_idx = %d.\n",
1164 lbq_desc
= &rx_ring
->lbq
[clean_idx
];
1165 if (ql_get_next_chunk(qdev
, rx_ring
, lbq_desc
)) {
1166 netif_err(qdev
, ifup
, qdev
->ndev
,
1167 "Could not get a page chunk.\n");
1171 map
= lbq_desc
->p
.pg_chunk
.map
+
1172 lbq_desc
->p
.pg_chunk
.offset
;
1173 pci_unmap_addr_set(lbq_desc
, mapaddr
, map
);
1174 pci_unmap_len_set(lbq_desc
, maplen
,
1175 rx_ring
->lbq_buf_size
);
1176 *lbq_desc
->addr
= cpu_to_le64(map
);
1178 pci_dma_sync_single_for_device(qdev
->pdev
, map
,
1179 rx_ring
->lbq_buf_size
,
1180 PCI_DMA_FROMDEVICE
);
1182 if (clean_idx
== rx_ring
->lbq_len
)
1186 rx_ring
->lbq_clean_idx
= clean_idx
;
1187 rx_ring
->lbq_prod_idx
+= 16;
1188 if (rx_ring
->lbq_prod_idx
== rx_ring
->lbq_len
)
1189 rx_ring
->lbq_prod_idx
= 0;
1190 rx_ring
->lbq_free_cnt
-= 16;
1193 if (start_idx
!= clean_idx
) {
1194 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1195 "lbq: updating prod idx = %d.\n",
1196 rx_ring
->lbq_prod_idx
);
1197 ql_write_db_reg(rx_ring
->lbq_prod_idx
,
1198 rx_ring
->lbq_prod_idx_db_reg
);
1202 /* Process (refill) a small buffer queue. */
1203 static void ql_update_sbq(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
1205 u32 clean_idx
= rx_ring
->sbq_clean_idx
;
1206 u32 start_idx
= clean_idx
;
1207 struct bq_desc
*sbq_desc
;
1211 while (rx_ring
->sbq_free_cnt
> 16) {
1212 for (i
= 0; i
< 16; i
++) {
1213 sbq_desc
= &rx_ring
->sbq
[clean_idx
];
1214 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1215 "sbq: try cleaning clean_idx = %d.\n",
1217 if (sbq_desc
->p
.skb
== NULL
) {
1218 netif_printk(qdev
, rx_status
, KERN_DEBUG
,
1220 "sbq: getting new skb for index %d.\n",
1223 netdev_alloc_skb(qdev
->ndev
,
1225 if (sbq_desc
->p
.skb
== NULL
) {
1226 netif_err(qdev
, probe
, qdev
->ndev
,
1227 "Couldn't get an skb.\n");
1228 rx_ring
->sbq_clean_idx
= clean_idx
;
1231 skb_reserve(sbq_desc
->p
.skb
, QLGE_SB_PAD
);
1232 map
= pci_map_single(qdev
->pdev
,
1233 sbq_desc
->p
.skb
->data
,
1234 rx_ring
->sbq_buf_size
,
1235 PCI_DMA_FROMDEVICE
);
1236 if (pci_dma_mapping_error(qdev
->pdev
, map
)) {
1237 netif_err(qdev
, ifup
, qdev
->ndev
,
1238 "PCI mapping failed.\n");
1239 rx_ring
->sbq_clean_idx
= clean_idx
;
1240 dev_kfree_skb_any(sbq_desc
->p
.skb
);
1241 sbq_desc
->p
.skb
= NULL
;
1244 pci_unmap_addr_set(sbq_desc
, mapaddr
, map
);
1245 pci_unmap_len_set(sbq_desc
, maplen
,
1246 rx_ring
->sbq_buf_size
);
1247 *sbq_desc
->addr
= cpu_to_le64(map
);
1251 if (clean_idx
== rx_ring
->sbq_len
)
1254 rx_ring
->sbq_clean_idx
= clean_idx
;
1255 rx_ring
->sbq_prod_idx
+= 16;
1256 if (rx_ring
->sbq_prod_idx
== rx_ring
->sbq_len
)
1257 rx_ring
->sbq_prod_idx
= 0;
1258 rx_ring
->sbq_free_cnt
-= 16;
1261 if (start_idx
!= clean_idx
) {
1262 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1263 "sbq: updating prod idx = %d.\n",
1264 rx_ring
->sbq_prod_idx
);
1265 ql_write_db_reg(rx_ring
->sbq_prod_idx
,
1266 rx_ring
->sbq_prod_idx_db_reg
);
1270 static void ql_update_buffer_queues(struct ql_adapter
*qdev
,
1271 struct rx_ring
*rx_ring
)
1273 ql_update_sbq(qdev
, rx_ring
);
1274 ql_update_lbq(qdev
, rx_ring
);
1277 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1278 * fails at some stage, or from the interrupt when a tx completes.
1280 static void ql_unmap_send(struct ql_adapter
*qdev
,
1281 struct tx_ring_desc
*tx_ring_desc
, int mapped
)
1284 for (i
= 0; i
< mapped
; i
++) {
1285 if (i
== 0 || (i
== 7 && mapped
> 7)) {
1287 * Unmap the skb->data area, or the
1288 * external sglist (AKA the Outbound
1289 * Address List (OAL)).
1290 * If its the zeroeth element, then it's
1291 * the skb->data area. If it's the 7th
1292 * element and there is more than 6 frags,
1296 netif_printk(qdev
, tx_done
, KERN_DEBUG
,
1298 "unmapping OAL area.\n");
1300 pci_unmap_single(qdev
->pdev
,
1301 pci_unmap_addr(&tx_ring_desc
->map
[i
],
1303 pci_unmap_len(&tx_ring_desc
->map
[i
],
1307 netif_printk(qdev
, tx_done
, KERN_DEBUG
, qdev
->ndev
,
1308 "unmapping frag %d.\n", i
);
1309 pci_unmap_page(qdev
->pdev
,
1310 pci_unmap_addr(&tx_ring_desc
->map
[i
],
1312 pci_unmap_len(&tx_ring_desc
->map
[i
],
1313 maplen
), PCI_DMA_TODEVICE
);
1319 /* Map the buffers for this transmit. This will return
1320 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1322 static int ql_map_send(struct ql_adapter
*qdev
,
1323 struct ob_mac_iocb_req
*mac_iocb_ptr
,
1324 struct sk_buff
*skb
, struct tx_ring_desc
*tx_ring_desc
)
1326 int len
= skb_headlen(skb
);
1328 int frag_idx
, err
, map_idx
= 0;
1329 struct tx_buf_desc
*tbd
= mac_iocb_ptr
->tbd
;
1330 int frag_cnt
= skb_shinfo(skb
)->nr_frags
;
1333 netif_printk(qdev
, tx_queued
, KERN_DEBUG
, qdev
->ndev
,
1334 "frag_cnt = %d.\n", frag_cnt
);
1337 * Map the skb buffer first.
1339 map
= pci_map_single(qdev
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
1341 err
= pci_dma_mapping_error(qdev
->pdev
, map
);
1343 netif_err(qdev
, tx_queued
, qdev
->ndev
,
1344 "PCI mapping failed with error: %d\n", err
);
1346 return NETDEV_TX_BUSY
;
1349 tbd
->len
= cpu_to_le32(len
);
1350 tbd
->addr
= cpu_to_le64(map
);
1351 pci_unmap_addr_set(&tx_ring_desc
->map
[map_idx
], mapaddr
, map
);
1352 pci_unmap_len_set(&tx_ring_desc
->map
[map_idx
], maplen
, len
);
1356 * This loop fills the remainder of the 8 address descriptors
1357 * in the IOCB. If there are more than 7 fragments, then the
1358 * eighth address desc will point to an external list (OAL).
1359 * When this happens, the remainder of the frags will be stored
1362 for (frag_idx
= 0; frag_idx
< frag_cnt
; frag_idx
++, map_idx
++) {
1363 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[frag_idx
];
1365 if (frag_idx
== 6 && frag_cnt
> 7) {
1366 /* Let's tack on an sglist.
1367 * Our control block will now
1369 * iocb->seg[0] = skb->data
1370 * iocb->seg[1] = frag[0]
1371 * iocb->seg[2] = frag[1]
1372 * iocb->seg[3] = frag[2]
1373 * iocb->seg[4] = frag[3]
1374 * iocb->seg[5] = frag[4]
1375 * iocb->seg[6] = frag[5]
1376 * iocb->seg[7] = ptr to OAL (external sglist)
1377 * oal->seg[0] = frag[6]
1378 * oal->seg[1] = frag[7]
1379 * oal->seg[2] = frag[8]
1380 * oal->seg[3] = frag[9]
1381 * oal->seg[4] = frag[10]
1384 /* Tack on the OAL in the eighth segment of IOCB. */
1385 map
= pci_map_single(qdev
->pdev
, &tx_ring_desc
->oal
,
1388 err
= pci_dma_mapping_error(qdev
->pdev
, map
);
1390 netif_err(qdev
, tx_queued
, qdev
->ndev
,
1391 "PCI mapping outbound address list with error: %d\n",
1396 tbd
->addr
= cpu_to_le64(map
);
1398 * The length is the number of fragments
1399 * that remain to be mapped times the length
1400 * of our sglist (OAL).
1403 cpu_to_le32((sizeof(struct tx_buf_desc
) *
1404 (frag_cnt
- frag_idx
)) | TX_DESC_C
);
1405 pci_unmap_addr_set(&tx_ring_desc
->map
[map_idx
], mapaddr
,
1407 pci_unmap_len_set(&tx_ring_desc
->map
[map_idx
], maplen
,
1408 sizeof(struct oal
));
1409 tbd
= (struct tx_buf_desc
*)&tx_ring_desc
->oal
;
1414 pci_map_page(qdev
->pdev
, frag
->page
,
1415 frag
->page_offset
, frag
->size
,
1418 err
= pci_dma_mapping_error(qdev
->pdev
, map
);
1420 netif_err(qdev
, tx_queued
, qdev
->ndev
,
1421 "PCI mapping frags failed with error: %d.\n",
1426 tbd
->addr
= cpu_to_le64(map
);
1427 tbd
->len
= cpu_to_le32(frag
->size
);
1428 pci_unmap_addr_set(&tx_ring_desc
->map
[map_idx
], mapaddr
, map
);
1429 pci_unmap_len_set(&tx_ring_desc
->map
[map_idx
], maplen
,
1433 /* Save the number of segments we've mapped. */
1434 tx_ring_desc
->map_cnt
= map_idx
;
1435 /* Terminate the last segment. */
1436 tbd
->len
= cpu_to_le32(le32_to_cpu(tbd
->len
) | TX_DESC_E
);
1437 return NETDEV_TX_OK
;
1441 * If the first frag mapping failed, then i will be zero.
1442 * This causes the unmap of the skb->data area. Otherwise
1443 * we pass in the number of frags that mapped successfully
1444 * so they can be umapped.
1446 ql_unmap_send(qdev
, tx_ring_desc
, map_idx
);
1447 return NETDEV_TX_BUSY
;
1450 /* Process an inbound completion from an rx ring. */
1451 static void ql_process_mac_rx_gro_page(struct ql_adapter
*qdev
,
1452 struct rx_ring
*rx_ring
,
1453 struct ib_mac_iocb_rsp
*ib_mac_rsp
,
1457 struct sk_buff
*skb
;
1458 struct bq_desc
*lbq_desc
= ql_get_curr_lchunk(qdev
, rx_ring
);
1459 struct skb_frag_struct
*rx_frag
;
1461 struct napi_struct
*napi
= &rx_ring
->napi
;
1463 napi
->dev
= qdev
->ndev
;
1465 skb
= napi_get_frags(napi
);
1467 netif_err(qdev
, drv
, qdev
->ndev
,
1468 "Couldn't get an skb, exiting.\n");
1469 rx_ring
->rx_dropped
++;
1470 put_page(lbq_desc
->p
.pg_chunk
.page
);
1473 prefetch(lbq_desc
->p
.pg_chunk
.va
);
1474 rx_frag
= skb_shinfo(skb
)->frags
;
1475 nr_frags
= skb_shinfo(skb
)->nr_frags
;
1476 rx_frag
+= nr_frags
;
1477 rx_frag
->page
= lbq_desc
->p
.pg_chunk
.page
;
1478 rx_frag
->page_offset
= lbq_desc
->p
.pg_chunk
.offset
;
1479 rx_frag
->size
= length
;
1482 skb
->data_len
+= length
;
1483 skb
->truesize
+= length
;
1484 skb_shinfo(skb
)->nr_frags
++;
1486 rx_ring
->rx_packets
++;
1487 rx_ring
->rx_bytes
+= length
;
1488 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1489 skb_record_rx_queue(skb
, rx_ring
->cq_id
);
1490 if (qdev
->vlgrp
&& (vlan_id
!= 0xffff))
1491 vlan_gro_frags(&rx_ring
->napi
, qdev
->vlgrp
, vlan_id
);
1493 napi_gro_frags(napi
);
1496 /* Process an inbound completion from an rx ring. */
1497 static void ql_process_mac_rx_page(struct ql_adapter
*qdev
,
1498 struct rx_ring
*rx_ring
,
1499 struct ib_mac_iocb_rsp
*ib_mac_rsp
,
1503 struct net_device
*ndev
= qdev
->ndev
;
1504 struct sk_buff
*skb
= NULL
;
1506 struct bq_desc
*lbq_desc
= ql_get_curr_lchunk(qdev
, rx_ring
);
1507 struct napi_struct
*napi
= &rx_ring
->napi
;
1509 skb
= netdev_alloc_skb(ndev
, length
);
1511 netif_err(qdev
, drv
, qdev
->ndev
,
1512 "Couldn't get an skb, need to unwind!.\n");
1513 rx_ring
->rx_dropped
++;
1514 put_page(lbq_desc
->p
.pg_chunk
.page
);
1518 addr
= lbq_desc
->p
.pg_chunk
.va
;
1522 /* Frame error, so drop the packet. */
1523 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_ERR_MASK
) {
1524 netif_err(qdev
, drv
, qdev
->ndev
,
1525 "Receive error, flags2 = 0x%x\n", ib_mac_rsp
->flags2
);
1526 rx_ring
->rx_errors
++;
1530 /* The max framesize filter on this chip is set higher than
1531 * MTU since FCoE uses 2k frames.
1533 if (skb
->len
> ndev
->mtu
+ ETH_HLEN
) {
1534 netif_err(qdev
, drv
, qdev
->ndev
,
1535 "Segment too small, dropping.\n");
1536 rx_ring
->rx_dropped
++;
1539 memcpy(skb_put(skb
, ETH_HLEN
), addr
, ETH_HLEN
);
1540 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1541 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1543 skb_fill_page_desc(skb
, 0, lbq_desc
->p
.pg_chunk
.page
,
1544 lbq_desc
->p
.pg_chunk
.offset
+ETH_HLEN
,
1546 skb
->len
+= length
-ETH_HLEN
;
1547 skb
->data_len
+= length
-ETH_HLEN
;
1548 skb
->truesize
+= length
-ETH_HLEN
;
1550 rx_ring
->rx_packets
++;
1551 rx_ring
->rx_bytes
+= skb
->len
;
1552 skb
->protocol
= eth_type_trans(skb
, ndev
);
1553 skb
->ip_summed
= CHECKSUM_NONE
;
1555 if (qdev
->rx_csum
&&
1556 !(ib_mac_rsp
->flags1
& IB_MAC_CSUM_ERR_MASK
)) {
1558 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_T
) {
1559 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1560 "TCP checksum done!\n");
1561 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1562 } else if ((ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_U
) &&
1563 (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_V4
)) {
1564 /* Unfragmented ipv4 UDP frame. */
1565 struct iphdr
*iph
= (struct iphdr
*) skb
->data
;
1566 if (!(iph
->frag_off
&
1567 cpu_to_be16(IP_MF
|IP_OFFSET
))) {
1568 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1569 netif_printk(qdev
, rx_status
, KERN_DEBUG
,
1571 "TCP checksum done!\n");
1576 skb_record_rx_queue(skb
, rx_ring
->cq_id
);
1577 if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
) {
1578 if (qdev
->vlgrp
&& (vlan_id
!= 0xffff))
1579 vlan_gro_receive(napi
, qdev
->vlgrp
, vlan_id
, skb
);
1581 napi_gro_receive(napi
, skb
);
1583 if (qdev
->vlgrp
&& (vlan_id
!= 0xffff))
1584 vlan_hwaccel_receive_skb(skb
, qdev
->vlgrp
, vlan_id
);
1586 netif_receive_skb(skb
);
1590 dev_kfree_skb_any(skb
);
1591 put_page(lbq_desc
->p
.pg_chunk
.page
);
1594 /* Process an inbound completion from an rx ring. */
1595 static void ql_process_mac_rx_skb(struct ql_adapter
*qdev
,
1596 struct rx_ring
*rx_ring
,
1597 struct ib_mac_iocb_rsp
*ib_mac_rsp
,
1601 struct net_device
*ndev
= qdev
->ndev
;
1602 struct sk_buff
*skb
= NULL
;
1603 struct sk_buff
*new_skb
= NULL
;
1604 struct bq_desc
*sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1606 skb
= sbq_desc
->p
.skb
;
1607 /* Allocate new_skb and copy */
1608 new_skb
= netdev_alloc_skb(qdev
->ndev
, length
+ NET_IP_ALIGN
);
1609 if (new_skb
== NULL
) {
1610 netif_err(qdev
, probe
, qdev
->ndev
,
1611 "No skb available, drop the packet.\n");
1612 rx_ring
->rx_dropped
++;
1615 skb_reserve(new_skb
, NET_IP_ALIGN
);
1616 memcpy(skb_put(new_skb
, length
), skb
->data
, length
);
1619 /* Frame error, so drop the packet. */
1620 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_ERR_MASK
) {
1621 netif_err(qdev
, drv
, qdev
->ndev
,
1622 "Receive error, flags2 = 0x%x\n", ib_mac_rsp
->flags2
);
1623 dev_kfree_skb_any(skb
);
1624 rx_ring
->rx_errors
++;
1628 /* loopback self test for ethtool */
1629 if (test_bit(QL_SELFTEST
, &qdev
->flags
)) {
1630 ql_check_lb_frame(qdev
, skb
);
1631 dev_kfree_skb_any(skb
);
1635 /* The max framesize filter on this chip is set higher than
1636 * MTU since FCoE uses 2k frames.
1638 if (skb
->len
> ndev
->mtu
+ ETH_HLEN
) {
1639 dev_kfree_skb_any(skb
);
1640 rx_ring
->rx_dropped
++;
1644 prefetch(skb
->data
);
1646 if (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) {
1647 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1649 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1650 IB_MAC_IOCB_RSP_M_HASH
? "Hash" :
1651 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1652 IB_MAC_IOCB_RSP_M_REG
? "Registered" :
1653 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1654 IB_MAC_IOCB_RSP_M_PROM
? "Promiscuous" : "");
1656 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_P
)
1657 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1658 "Promiscuous Packet.\n");
1660 rx_ring
->rx_packets
++;
1661 rx_ring
->rx_bytes
+= skb
->len
;
1662 skb
->protocol
= eth_type_trans(skb
, ndev
);
1663 skb
->ip_summed
= CHECKSUM_NONE
;
1665 /* If rx checksum is on, and there are no
1666 * csum or frame errors.
1668 if (qdev
->rx_csum
&&
1669 !(ib_mac_rsp
->flags1
& IB_MAC_CSUM_ERR_MASK
)) {
1671 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_T
) {
1672 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1673 "TCP checksum done!\n");
1674 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1675 } else if ((ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_U
) &&
1676 (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_V4
)) {
1677 /* Unfragmented ipv4 UDP frame. */
1678 struct iphdr
*iph
= (struct iphdr
*) skb
->data
;
1679 if (!(iph
->frag_off
&
1680 cpu_to_be16(IP_MF
|IP_OFFSET
))) {
1681 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1682 netif_printk(qdev
, rx_status
, KERN_DEBUG
,
1684 "TCP checksum done!\n");
1689 skb_record_rx_queue(skb
, rx_ring
->cq_id
);
1690 if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
) {
1691 if (qdev
->vlgrp
&& (vlan_id
!= 0xffff))
1692 vlan_gro_receive(&rx_ring
->napi
, qdev
->vlgrp
,
1695 napi_gro_receive(&rx_ring
->napi
, skb
);
1697 if (qdev
->vlgrp
&& (vlan_id
!= 0xffff))
1698 vlan_hwaccel_receive_skb(skb
, qdev
->vlgrp
, vlan_id
);
1700 netif_receive_skb(skb
);
1704 static void ql_realign_skb(struct sk_buff
*skb
, int len
)
1706 void *temp_addr
= skb
->data
;
1708 /* Undo the skb_reserve(skb,32) we did before
1709 * giving to hardware, and realign data on
1710 * a 2-byte boundary.
1712 skb
->data
-= QLGE_SB_PAD
- NET_IP_ALIGN
;
1713 skb
->tail
-= QLGE_SB_PAD
- NET_IP_ALIGN
;
1714 skb_copy_to_linear_data(skb
, temp_addr
,
1719 * This function builds an skb for the given inbound
1720 * completion. It will be rewritten for readability in the near
1721 * future, but for not it works well.
1723 static struct sk_buff
*ql_build_rx_skb(struct ql_adapter
*qdev
,
1724 struct rx_ring
*rx_ring
,
1725 struct ib_mac_iocb_rsp
*ib_mac_rsp
)
1727 struct bq_desc
*lbq_desc
;
1728 struct bq_desc
*sbq_desc
;
1729 struct sk_buff
*skb
= NULL
;
1730 u32 length
= le32_to_cpu(ib_mac_rsp
->data_len
);
1731 u32 hdr_len
= le32_to_cpu(ib_mac_rsp
->hdr_len
);
1734 * Handle the header buffer if present.
1736 if (ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HV
&&
1737 ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
) {
1738 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1739 "Header of %d bytes in small buffer.\n", hdr_len
);
1741 * Headers fit nicely into a small buffer.
1743 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1744 pci_unmap_single(qdev
->pdev
,
1745 pci_unmap_addr(sbq_desc
, mapaddr
),
1746 pci_unmap_len(sbq_desc
, maplen
),
1747 PCI_DMA_FROMDEVICE
);
1748 skb
= sbq_desc
->p
.skb
;
1749 ql_realign_skb(skb
, hdr_len
);
1750 skb_put(skb
, hdr_len
);
1751 sbq_desc
->p
.skb
= NULL
;
1755 * Handle the data buffer(s).
1757 if (unlikely(!length
)) { /* Is there data too? */
1758 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1759 "No Data buffer in this packet.\n");
1763 if (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_DS
) {
1764 if (ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
) {
1765 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1766 "Headers in small, data of %d bytes in small, combine them.\n",
1769 * Data is less than small buffer size so it's
1770 * stuffed in a small buffer.
1771 * For this case we append the data
1772 * from the "data" small buffer to the "header" small
1775 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1776 pci_dma_sync_single_for_cpu(qdev
->pdev
,
1778 (sbq_desc
, mapaddr
),
1781 PCI_DMA_FROMDEVICE
);
1782 memcpy(skb_put(skb
, length
),
1783 sbq_desc
->p
.skb
->data
, length
);
1784 pci_dma_sync_single_for_device(qdev
->pdev
,
1791 PCI_DMA_FROMDEVICE
);
1793 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1794 "%d bytes in a single small buffer.\n",
1796 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1797 skb
= sbq_desc
->p
.skb
;
1798 ql_realign_skb(skb
, length
);
1799 skb_put(skb
, length
);
1800 pci_unmap_single(qdev
->pdev
,
1801 pci_unmap_addr(sbq_desc
,
1803 pci_unmap_len(sbq_desc
,
1805 PCI_DMA_FROMDEVICE
);
1806 sbq_desc
->p
.skb
= NULL
;
1808 } else if (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_DL
) {
1809 if (ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
) {
1810 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1811 "Header in small, %d bytes in large. Chain large to small!\n",
1814 * The data is in a single large buffer. We
1815 * chain it to the header buffer's skb and let
1818 lbq_desc
= ql_get_curr_lchunk(qdev
, rx_ring
);
1819 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1820 "Chaining page at offset = %d, for %d bytes to skb.\n",
1821 lbq_desc
->p
.pg_chunk
.offset
, length
);
1822 skb_fill_page_desc(skb
, 0, lbq_desc
->p
.pg_chunk
.page
,
1823 lbq_desc
->p
.pg_chunk
.offset
,
1826 skb
->data_len
+= length
;
1827 skb
->truesize
+= length
;
1830 * The headers and data are in a single large buffer. We
1831 * copy it to a new skb and let it go. This can happen with
1832 * jumbo mtu on a non-TCP/UDP frame.
1834 lbq_desc
= ql_get_curr_lchunk(qdev
, rx_ring
);
1835 skb
= netdev_alloc_skb(qdev
->ndev
, length
);
1837 netif_printk(qdev
, probe
, KERN_DEBUG
, qdev
->ndev
,
1838 "No skb available, drop the packet.\n");
1841 pci_unmap_page(qdev
->pdev
,
1842 pci_unmap_addr(lbq_desc
,
1844 pci_unmap_len(lbq_desc
, maplen
),
1845 PCI_DMA_FROMDEVICE
);
1846 skb_reserve(skb
, NET_IP_ALIGN
);
1847 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1848 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1850 skb_fill_page_desc(skb
, 0,
1851 lbq_desc
->p
.pg_chunk
.page
,
1852 lbq_desc
->p
.pg_chunk
.offset
,
1855 skb
->data_len
+= length
;
1856 skb
->truesize
+= length
;
1858 __pskb_pull_tail(skb
,
1859 (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) ?
1860 VLAN_ETH_HLEN
: ETH_HLEN
);
1864 * The data is in a chain of large buffers
1865 * pointed to by a small buffer. We loop
1866 * thru and chain them to the our small header
1868 * frags: There are 18 max frags and our small
1869 * buffer will hold 32 of them. The thing is,
1870 * we'll use 3 max for our 9000 byte jumbo
1871 * frames. If the MTU goes up we could
1872 * eventually be in trouble.
1875 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1876 pci_unmap_single(qdev
->pdev
,
1877 pci_unmap_addr(sbq_desc
, mapaddr
),
1878 pci_unmap_len(sbq_desc
, maplen
),
1879 PCI_DMA_FROMDEVICE
);
1880 if (!(ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
)) {
1882 * This is an non TCP/UDP IP frame, so
1883 * the headers aren't split into a small
1884 * buffer. We have to use the small buffer
1885 * that contains our sg list as our skb to
1886 * send upstairs. Copy the sg list here to
1887 * a local buffer and use it to find the
1890 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1891 "%d bytes of headers & data in chain of large.\n",
1893 skb
= sbq_desc
->p
.skb
;
1894 sbq_desc
->p
.skb
= NULL
;
1895 skb_reserve(skb
, NET_IP_ALIGN
);
1897 while (length
> 0) {
1898 lbq_desc
= ql_get_curr_lchunk(qdev
, rx_ring
);
1899 size
= (length
< rx_ring
->lbq_buf_size
) ? length
:
1900 rx_ring
->lbq_buf_size
;
1902 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1903 "Adding page %d to skb for %d bytes.\n",
1905 skb_fill_page_desc(skb
, i
,
1906 lbq_desc
->p
.pg_chunk
.page
,
1907 lbq_desc
->p
.pg_chunk
.offset
,
1910 skb
->data_len
+= size
;
1911 skb
->truesize
+= size
;
1915 __pskb_pull_tail(skb
, (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) ?
1916 VLAN_ETH_HLEN
: ETH_HLEN
);
1921 /* Process an inbound completion from an rx ring. */
1922 static void ql_process_mac_split_rx_intr(struct ql_adapter
*qdev
,
1923 struct rx_ring
*rx_ring
,
1924 struct ib_mac_iocb_rsp
*ib_mac_rsp
,
1927 struct net_device
*ndev
= qdev
->ndev
;
1928 struct sk_buff
*skb
= NULL
;
1930 QL_DUMP_IB_MAC_RSP(ib_mac_rsp
);
1932 skb
= ql_build_rx_skb(qdev
, rx_ring
, ib_mac_rsp
);
1933 if (unlikely(!skb
)) {
1934 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1935 "No skb available, drop packet.\n");
1936 rx_ring
->rx_dropped
++;
1940 /* Frame error, so drop the packet. */
1941 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_ERR_MASK
) {
1942 netif_err(qdev
, drv
, qdev
->ndev
,
1943 "Receive error, flags2 = 0x%x\n", ib_mac_rsp
->flags2
);
1944 dev_kfree_skb_any(skb
);
1945 rx_ring
->rx_errors
++;
1949 /* The max framesize filter on this chip is set higher than
1950 * MTU since FCoE uses 2k frames.
1952 if (skb
->len
> ndev
->mtu
+ ETH_HLEN
) {
1953 dev_kfree_skb_any(skb
);
1954 rx_ring
->rx_dropped
++;
1958 /* loopback self test for ethtool */
1959 if (test_bit(QL_SELFTEST
, &qdev
->flags
)) {
1960 ql_check_lb_frame(qdev
, skb
);
1961 dev_kfree_skb_any(skb
);
1965 prefetch(skb
->data
);
1967 if (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) {
1968 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
, "%s Multicast.\n",
1969 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1970 IB_MAC_IOCB_RSP_M_HASH
? "Hash" :
1971 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1972 IB_MAC_IOCB_RSP_M_REG
? "Registered" :
1973 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1974 IB_MAC_IOCB_RSP_M_PROM
? "Promiscuous" : "");
1975 rx_ring
->rx_multicast
++;
1977 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_P
) {
1978 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1979 "Promiscuous Packet.\n");
1982 skb
->protocol
= eth_type_trans(skb
, ndev
);
1983 skb
->ip_summed
= CHECKSUM_NONE
;
1985 /* If rx checksum is on, and there are no
1986 * csum or frame errors.
1988 if (qdev
->rx_csum
&&
1989 !(ib_mac_rsp
->flags1
& IB_MAC_CSUM_ERR_MASK
)) {
1991 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_T
) {
1992 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1993 "TCP checksum done!\n");
1994 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1995 } else if ((ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_U
) &&
1996 (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_V4
)) {
1997 /* Unfragmented ipv4 UDP frame. */
1998 struct iphdr
*iph
= (struct iphdr
*) skb
->data
;
1999 if (!(iph
->frag_off
&
2000 cpu_to_be16(IP_MF
|IP_OFFSET
))) {
2001 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2002 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
2003 "TCP checksum done!\n");
2008 rx_ring
->rx_packets
++;
2009 rx_ring
->rx_bytes
+= skb
->len
;
2010 skb_record_rx_queue(skb
, rx_ring
->cq_id
);
2011 if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
) {
2013 (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) &&
2015 vlan_gro_receive(&rx_ring
->napi
, qdev
->vlgrp
,
2018 napi_gro_receive(&rx_ring
->napi
, skb
);
2021 (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) &&
2023 vlan_hwaccel_receive_skb(skb
, qdev
->vlgrp
, vlan_id
);
2025 netif_receive_skb(skb
);
2029 /* Process an inbound completion from an rx ring. */
2030 static unsigned long ql_process_mac_rx_intr(struct ql_adapter
*qdev
,
2031 struct rx_ring
*rx_ring
,
2032 struct ib_mac_iocb_rsp
*ib_mac_rsp
)
2034 u32 length
= le32_to_cpu(ib_mac_rsp
->data_len
);
2035 u16 vlan_id
= (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) ?
2036 ((le16_to_cpu(ib_mac_rsp
->vlan_id
) &
2037 IB_MAC_IOCB_RSP_VLAN_MASK
)) : 0xffff;
2039 QL_DUMP_IB_MAC_RSP(ib_mac_rsp
);
2041 if (ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HV
) {
2042 /* The data and headers are split into
2045 ql_process_mac_split_rx_intr(qdev
, rx_ring
, ib_mac_rsp
,
2047 } else if (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_DS
) {
2048 /* The data fit in a single small buffer.
2049 * Allocate a new skb, copy the data and
2050 * return the buffer to the free pool.
2052 ql_process_mac_rx_skb(qdev
, rx_ring
, ib_mac_rsp
,
2054 } else if ((ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_DL
) &&
2055 !(ib_mac_rsp
->flags1
& IB_MAC_CSUM_ERR_MASK
) &&
2056 (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_T
)) {
2057 /* TCP packet in a page chunk that's been checksummed.
2058 * Tack it on to our GRO skb and let it go.
2060 ql_process_mac_rx_gro_page(qdev
, rx_ring
, ib_mac_rsp
,
2062 } else if (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_DL
) {
2063 /* Non-TCP packet in a page chunk. Allocate an
2064 * skb, tack it on frags, and send it up.
2066 ql_process_mac_rx_page(qdev
, rx_ring
, ib_mac_rsp
,
2069 /* Non-TCP/UDP large frames that span multiple buffers
2070 * can be processed corrrectly by the split frame logic.
2072 ql_process_mac_split_rx_intr(qdev
, rx_ring
, ib_mac_rsp
,
2076 return (unsigned long)length
;
2079 /* Process an outbound completion from an rx ring. */
2080 static void ql_process_mac_tx_intr(struct ql_adapter
*qdev
,
2081 struct ob_mac_iocb_rsp
*mac_rsp
)
2083 struct tx_ring
*tx_ring
;
2084 struct tx_ring_desc
*tx_ring_desc
;
2086 QL_DUMP_OB_MAC_RSP(mac_rsp
);
2087 tx_ring
= &qdev
->tx_ring
[mac_rsp
->txq_idx
];
2088 tx_ring_desc
= &tx_ring
->q
[mac_rsp
->tid
];
2089 ql_unmap_send(qdev
, tx_ring_desc
, tx_ring_desc
->map_cnt
);
2090 tx_ring
->tx_bytes
+= (tx_ring_desc
->skb
)->len
;
2091 tx_ring
->tx_packets
++;
2092 dev_kfree_skb(tx_ring_desc
->skb
);
2093 tx_ring_desc
->skb
= NULL
;
2095 if (unlikely(mac_rsp
->flags1
& (OB_MAC_IOCB_RSP_E
|
2098 OB_MAC_IOCB_RSP_P
| OB_MAC_IOCB_RSP_B
))) {
2099 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_E
) {
2100 netif_warn(qdev
, tx_done
, qdev
->ndev
,
2101 "Total descriptor length did not match transfer length.\n");
2103 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_S
) {
2104 netif_warn(qdev
, tx_done
, qdev
->ndev
,
2105 "Frame too short to be valid, not sent.\n");
2107 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_L
) {
2108 netif_warn(qdev
, tx_done
, qdev
->ndev
,
2109 "Frame too long, but sent anyway.\n");
2111 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_B
) {
2112 netif_warn(qdev
, tx_done
, qdev
->ndev
,
2113 "PCI backplane error. Frame not sent.\n");
2116 atomic_inc(&tx_ring
->tx_count
);
2119 /* Fire up a handler to reset the MPI processor. */
2120 void ql_queue_fw_error(struct ql_adapter
*qdev
)
2123 queue_delayed_work(qdev
->workqueue
, &qdev
->mpi_reset_work
, 0);
2126 void ql_queue_asic_error(struct ql_adapter
*qdev
)
2129 ql_disable_interrupts(qdev
);
2130 /* Clear adapter up bit to signal the recovery
2131 * process that it shouldn't kill the reset worker
2134 clear_bit(QL_ADAPTER_UP
, &qdev
->flags
);
2135 queue_delayed_work(qdev
->workqueue
, &qdev
->asic_reset_work
, 0);
2138 static void ql_process_chip_ae_intr(struct ql_adapter
*qdev
,
2139 struct ib_ae_iocb_rsp
*ib_ae_rsp
)
2141 switch (ib_ae_rsp
->event
) {
2142 case MGMT_ERR_EVENT
:
2143 netif_err(qdev
, rx_err
, qdev
->ndev
,
2144 "Management Processor Fatal Error.\n");
2145 ql_queue_fw_error(qdev
);
2148 case CAM_LOOKUP_ERR_EVENT
:
2149 netif_err(qdev
, link
, qdev
->ndev
,
2150 "Multiple CAM hits lookup occurred.\n");
2151 netif_err(qdev
, drv
, qdev
->ndev
,
2152 "This event shouldn't occur.\n");
2153 ql_queue_asic_error(qdev
);
2156 case SOFT_ECC_ERROR_EVENT
:
2157 netif_err(qdev
, rx_err
, qdev
->ndev
,
2158 "Soft ECC error detected.\n");
2159 ql_queue_asic_error(qdev
);
2162 case PCI_ERR_ANON_BUF_RD
:
2163 netif_err(qdev
, rx_err
, qdev
->ndev
,
2164 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2166 ql_queue_asic_error(qdev
);
2170 netif_err(qdev
, drv
, qdev
->ndev
, "Unexpected event %d.\n",
2172 ql_queue_asic_error(qdev
);
2177 static int ql_clean_outbound_rx_ring(struct rx_ring
*rx_ring
)
2179 struct ql_adapter
*qdev
= rx_ring
->qdev
;
2180 u32 prod
= ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
);
2181 struct ob_mac_iocb_rsp
*net_rsp
= NULL
;
2184 struct tx_ring
*tx_ring
;
2185 /* While there are entries in the completion queue. */
2186 while (prod
!= rx_ring
->cnsmr_idx
) {
2188 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
2189 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2190 rx_ring
->cq_id
, prod
, rx_ring
->cnsmr_idx
);
2192 net_rsp
= (struct ob_mac_iocb_rsp
*)rx_ring
->curr_entry
;
2194 switch (net_rsp
->opcode
) {
2196 case OPCODE_OB_MAC_TSO_IOCB
:
2197 case OPCODE_OB_MAC_IOCB
:
2198 ql_process_mac_tx_intr(qdev
, net_rsp
);
2201 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
2202 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2206 ql_update_cq(rx_ring
);
2207 prod
= ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
);
2209 ql_write_cq_idx(rx_ring
);
2210 tx_ring
= &qdev
->tx_ring
[net_rsp
->txq_idx
];
2211 if (__netif_subqueue_stopped(qdev
->ndev
, tx_ring
->wq_id
) &&
2213 if (atomic_read(&tx_ring
->queue_stopped
) &&
2214 (atomic_read(&tx_ring
->tx_count
) > (tx_ring
->wq_len
/ 4)))
2216 * The queue got stopped because the tx_ring was full.
2217 * Wake it up, because it's now at least 25% empty.
2219 netif_wake_subqueue(qdev
->ndev
, tx_ring
->wq_id
);
2225 static int ql_clean_inbound_rx_ring(struct rx_ring
*rx_ring
, int budget
)
2227 struct ql_adapter
*qdev
= rx_ring
->qdev
;
2228 u32 prod
= ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
);
2229 struct ql_net_rsp_iocb
*net_rsp
;
2232 /* While there are entries in the completion queue. */
2233 while (prod
!= rx_ring
->cnsmr_idx
) {
2235 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
2236 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2237 rx_ring
->cq_id
, prod
, rx_ring
->cnsmr_idx
);
2239 net_rsp
= rx_ring
->curr_entry
;
2241 switch (net_rsp
->opcode
) {
2242 case OPCODE_IB_MAC_IOCB
:
2243 ql_process_mac_rx_intr(qdev
, rx_ring
,
2244 (struct ib_mac_iocb_rsp
*)
2248 case OPCODE_IB_AE_IOCB
:
2249 ql_process_chip_ae_intr(qdev
, (struct ib_ae_iocb_rsp
*)
2253 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
2254 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2259 ql_update_cq(rx_ring
);
2260 prod
= ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
);
2261 if (count
== budget
)
2264 ql_update_buffer_queues(qdev
, rx_ring
);
2265 ql_write_cq_idx(rx_ring
);
2269 static int ql_napi_poll_msix(struct napi_struct
*napi
, int budget
)
2271 struct rx_ring
*rx_ring
= container_of(napi
, struct rx_ring
, napi
);
2272 struct ql_adapter
*qdev
= rx_ring
->qdev
;
2273 struct rx_ring
*trx_ring
;
2274 int i
, work_done
= 0;
2275 struct intr_context
*ctx
= &qdev
->intr_context
[rx_ring
->cq_id
];
2277 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
2278 "Enter, NAPI POLL cq_id = %d.\n", rx_ring
->cq_id
);
2280 /* Service the TX rings first. They start
2281 * right after the RSS rings. */
2282 for (i
= qdev
->rss_ring_count
; i
< qdev
->rx_ring_count
; i
++) {
2283 trx_ring
= &qdev
->rx_ring
[i
];
2284 /* If this TX completion ring belongs to this vector and
2285 * it's not empty then service it.
2287 if ((ctx
->irq_mask
& (1 << trx_ring
->cq_id
)) &&
2288 (ql_read_sh_reg(trx_ring
->prod_idx_sh_reg
) !=
2289 trx_ring
->cnsmr_idx
)) {
2290 netif_printk(qdev
, intr
, KERN_DEBUG
, qdev
->ndev
,
2291 "%s: Servicing TX completion ring %d.\n",
2292 __func__
, trx_ring
->cq_id
);
2293 ql_clean_outbound_rx_ring(trx_ring
);
2298 * Now service the RSS ring if it's active.
2300 if (ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
) !=
2301 rx_ring
->cnsmr_idx
) {
2302 netif_printk(qdev
, intr
, KERN_DEBUG
, qdev
->ndev
,
2303 "%s: Servicing RX completion ring %d.\n",
2304 __func__
, rx_ring
->cq_id
);
2305 work_done
= ql_clean_inbound_rx_ring(rx_ring
, budget
);
2308 if (work_done
< budget
) {
2309 napi_complete(napi
);
2310 ql_enable_completion_interrupt(qdev
, rx_ring
->irq
);
2315 static void qlge_vlan_rx_register(struct net_device
*ndev
, struct vlan_group
*grp
)
2317 struct ql_adapter
*qdev
= netdev_priv(ndev
);
2321 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
2322 "Turning on VLAN in NIC_RCV_CFG.\n");
2323 ql_write32(qdev
, NIC_RCV_CFG
, NIC_RCV_CFG_VLAN_MASK
|
2324 NIC_RCV_CFG_VLAN_MATCH_AND_NON
);
2326 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
2327 "Turning off VLAN in NIC_RCV_CFG.\n");
2328 ql_write32(qdev
, NIC_RCV_CFG
, NIC_RCV_CFG_VLAN_MASK
);
2332 static void qlge_vlan_rx_add_vid(struct net_device
*ndev
, u16 vid
)
2334 struct ql_adapter
*qdev
= netdev_priv(ndev
);
2335 u32 enable_bit
= MAC_ADDR_E
;
2338 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
2341 if (ql_set_mac_addr_reg
2342 (qdev
, (u8
*) &enable_bit
, MAC_ADDR_TYPE_VLAN
, vid
)) {
2343 netif_err(qdev
, ifup
, qdev
->ndev
,
2344 "Failed to init vlan address.\n");
2346 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
2349 static void qlge_vlan_rx_kill_vid(struct net_device
*ndev
, u16 vid
)
2351 struct ql_adapter
*qdev
= netdev_priv(ndev
);
2355 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
2359 if (ql_set_mac_addr_reg
2360 (qdev
, (u8
*) &enable_bit
, MAC_ADDR_TYPE_VLAN
, vid
)) {
2361 netif_err(qdev
, ifup
, qdev
->ndev
,
2362 "Failed to clear vlan address.\n");
2364 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
2368 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2369 static irqreturn_t
qlge_msix_rx_isr(int irq
, void *dev_id
)
2371 struct rx_ring
*rx_ring
= dev_id
;
2372 napi_schedule(&rx_ring
->napi
);
2376 /* This handles a fatal error, MPI activity, and the default
2377 * rx_ring in an MSI-X multiple vector environment.
2378 * In MSI/Legacy environment it also process the rest of
2381 static irqreturn_t
qlge_isr(int irq
, void *dev_id
)
2383 struct rx_ring
*rx_ring
= dev_id
;
2384 struct ql_adapter
*qdev
= rx_ring
->qdev
;
2385 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
2389 spin_lock(&qdev
->hw_lock
);
2390 if (atomic_read(&qdev
->intr_context
[0].irq_cnt
)) {
2391 netif_printk(qdev
, intr
, KERN_DEBUG
, qdev
->ndev
,
2392 "Shared Interrupt, Not ours!\n");
2393 spin_unlock(&qdev
->hw_lock
);
2396 spin_unlock(&qdev
->hw_lock
);
2398 var
= ql_disable_completion_interrupt(qdev
, intr_context
->intr
);
2401 * Check for fatal error.
2404 ql_queue_asic_error(qdev
);
2405 netif_err(qdev
, intr
, qdev
->ndev
,
2406 "Got fatal error, STS = %x.\n", var
);
2407 var
= ql_read32(qdev
, ERR_STS
);
2408 netif_err(qdev
, intr
, qdev
->ndev
,
2409 "Resetting chip. Error Status Register = 0x%x\n", var
);
2414 * Check MPI processor activity.
2416 if ((var
& STS_PI
) &&
2417 (ql_read32(qdev
, INTR_MASK
) & INTR_MASK_PI
)) {
2419 * We've got an async event or mailbox completion.
2420 * Handle it and clear the source of the interrupt.
2422 netif_err(qdev
, intr
, qdev
->ndev
,
2423 "Got MPI processor interrupt.\n");
2424 ql_disable_completion_interrupt(qdev
, intr_context
->intr
);
2425 ql_write32(qdev
, INTR_MASK
, (INTR_MASK_PI
<< 16));
2426 queue_delayed_work_on(smp_processor_id(),
2427 qdev
->workqueue
, &qdev
->mpi_work
, 0);
2432 * Get the bit-mask that shows the active queues for this
2433 * pass. Compare it to the queues that this irq services
2434 * and call napi if there's a match.
2436 var
= ql_read32(qdev
, ISR1
);
2437 if (var
& intr_context
->irq_mask
) {
2438 netif_info(qdev
, intr
, qdev
->ndev
,
2439 "Waking handler for rx_ring[0].\n");
2440 ql_disable_completion_interrupt(qdev
, intr_context
->intr
);
2441 napi_schedule(&rx_ring
->napi
);
2444 ql_enable_completion_interrupt(qdev
, intr_context
->intr
);
2445 return work_done
? IRQ_HANDLED
: IRQ_NONE
;
2448 static int ql_tso(struct sk_buff
*skb
, struct ob_mac_tso_iocb_req
*mac_iocb_ptr
)
2451 if (skb_is_gso(skb
)) {
2453 if (skb_header_cloned(skb
)) {
2454 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2459 mac_iocb_ptr
->opcode
= OPCODE_OB_MAC_TSO_IOCB
;
2460 mac_iocb_ptr
->flags3
|= OB_MAC_TSO_IOCB_IC
;
2461 mac_iocb_ptr
->frame_len
= cpu_to_le32((u32
) skb
->len
);
2462 mac_iocb_ptr
->total_hdrs_len
=
2463 cpu_to_le16(skb_transport_offset(skb
) + tcp_hdrlen(skb
));
2464 mac_iocb_ptr
->net_trans_offset
=
2465 cpu_to_le16(skb_network_offset(skb
) |
2466 skb_transport_offset(skb
)
2467 << OB_MAC_TRANSPORT_HDR_SHIFT
);
2468 mac_iocb_ptr
->mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
2469 mac_iocb_ptr
->flags2
|= OB_MAC_TSO_IOCB_LSO
;
2470 if (likely(skb
->protocol
== htons(ETH_P_IP
))) {
2471 struct iphdr
*iph
= ip_hdr(skb
);
2473 mac_iocb_ptr
->flags1
|= OB_MAC_TSO_IOCB_IP4
;
2474 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
2478 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
2479 mac_iocb_ptr
->flags1
|= OB_MAC_TSO_IOCB_IP6
;
2480 tcp_hdr(skb
)->check
=
2481 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
2482 &ipv6_hdr(skb
)->daddr
,
2490 static void ql_hw_csum_setup(struct sk_buff
*skb
,
2491 struct ob_mac_tso_iocb_req
*mac_iocb_ptr
)
2494 struct iphdr
*iph
= ip_hdr(skb
);
2496 mac_iocb_ptr
->opcode
= OPCODE_OB_MAC_TSO_IOCB
;
2497 mac_iocb_ptr
->frame_len
= cpu_to_le32((u32
) skb
->len
);
2498 mac_iocb_ptr
->net_trans_offset
=
2499 cpu_to_le16(skb_network_offset(skb
) |
2500 skb_transport_offset(skb
) << OB_MAC_TRANSPORT_HDR_SHIFT
);
2502 mac_iocb_ptr
->flags1
|= OB_MAC_TSO_IOCB_IP4
;
2503 len
= (ntohs(iph
->tot_len
) - (iph
->ihl
<< 2));
2504 if (likely(iph
->protocol
== IPPROTO_TCP
)) {
2505 check
= &(tcp_hdr(skb
)->check
);
2506 mac_iocb_ptr
->flags2
|= OB_MAC_TSO_IOCB_TC
;
2507 mac_iocb_ptr
->total_hdrs_len
=
2508 cpu_to_le16(skb_transport_offset(skb
) +
2509 (tcp_hdr(skb
)->doff
<< 2));
2511 check
= &(udp_hdr(skb
)->check
);
2512 mac_iocb_ptr
->flags2
|= OB_MAC_TSO_IOCB_UC
;
2513 mac_iocb_ptr
->total_hdrs_len
=
2514 cpu_to_le16(skb_transport_offset(skb
) +
2515 sizeof(struct udphdr
));
2517 *check
= ~csum_tcpudp_magic(iph
->saddr
,
2518 iph
->daddr
, len
, iph
->protocol
, 0);
2521 static netdev_tx_t
qlge_send(struct sk_buff
*skb
, struct net_device
*ndev
)
2523 struct tx_ring_desc
*tx_ring_desc
;
2524 struct ob_mac_iocb_req
*mac_iocb_ptr
;
2525 struct ql_adapter
*qdev
= netdev_priv(ndev
);
2527 struct tx_ring
*tx_ring
;
2528 u32 tx_ring_idx
= (u32
) skb
->queue_mapping
;
2530 tx_ring
= &qdev
->tx_ring
[tx_ring_idx
];
2532 if (skb_padto(skb
, ETH_ZLEN
))
2533 return NETDEV_TX_OK
;
2535 if (unlikely(atomic_read(&tx_ring
->tx_count
) < 2)) {
2536 netif_info(qdev
, tx_queued
, qdev
->ndev
,
2537 "%s: shutting down tx queue %d du to lack of resources.\n",
2538 __func__
, tx_ring_idx
);
2539 netif_stop_subqueue(ndev
, tx_ring
->wq_id
);
2540 atomic_inc(&tx_ring
->queue_stopped
);
2541 tx_ring
->tx_errors
++;
2542 return NETDEV_TX_BUSY
;
2544 tx_ring_desc
= &tx_ring
->q
[tx_ring
->prod_idx
];
2545 mac_iocb_ptr
= tx_ring_desc
->queue_entry
;
2546 memset((void *)mac_iocb_ptr
, 0, sizeof(*mac_iocb_ptr
));
2548 mac_iocb_ptr
->opcode
= OPCODE_OB_MAC_IOCB
;
2549 mac_iocb_ptr
->tid
= tx_ring_desc
->index
;
2550 /* We use the upper 32-bits to store the tx queue for this IO.
2551 * When we get the completion we can use it to establish the context.
2553 mac_iocb_ptr
->txq_idx
= tx_ring_idx
;
2554 tx_ring_desc
->skb
= skb
;
2556 mac_iocb_ptr
->frame_len
= cpu_to_le16((u16
) skb
->len
);
2558 if (qdev
->vlgrp
&& vlan_tx_tag_present(skb
)) {
2559 netif_printk(qdev
, tx_queued
, KERN_DEBUG
, qdev
->ndev
,
2560 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb
));
2561 mac_iocb_ptr
->flags3
|= OB_MAC_IOCB_V
;
2562 mac_iocb_ptr
->vlan_tci
= cpu_to_le16(vlan_tx_tag_get(skb
));
2564 tso
= ql_tso(skb
, (struct ob_mac_tso_iocb_req
*)mac_iocb_ptr
);
2566 dev_kfree_skb_any(skb
);
2567 return NETDEV_TX_OK
;
2568 } else if (unlikely(!tso
) && (skb
->ip_summed
== CHECKSUM_PARTIAL
)) {
2569 ql_hw_csum_setup(skb
,
2570 (struct ob_mac_tso_iocb_req
*)mac_iocb_ptr
);
2572 if (ql_map_send(qdev
, mac_iocb_ptr
, skb
, tx_ring_desc
) !=
2574 netif_err(qdev
, tx_queued
, qdev
->ndev
,
2575 "Could not map the segments.\n");
2576 tx_ring
->tx_errors
++;
2577 return NETDEV_TX_BUSY
;
2579 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr
);
2580 tx_ring
->prod_idx
++;
2581 if (tx_ring
->prod_idx
== tx_ring
->wq_len
)
2582 tx_ring
->prod_idx
= 0;
2585 ql_write_db_reg(tx_ring
->prod_idx
, tx_ring
->prod_idx_db_reg
);
2586 netif_printk(qdev
, tx_queued
, KERN_DEBUG
, qdev
->ndev
,
2587 "tx queued, slot %d, len %d\n",
2588 tx_ring
->prod_idx
, skb
->len
);
2590 atomic_dec(&tx_ring
->tx_count
);
2591 return NETDEV_TX_OK
;
2595 static void ql_free_shadow_space(struct ql_adapter
*qdev
)
2597 if (qdev
->rx_ring_shadow_reg_area
) {
2598 pci_free_consistent(qdev
->pdev
,
2600 qdev
->rx_ring_shadow_reg_area
,
2601 qdev
->rx_ring_shadow_reg_dma
);
2602 qdev
->rx_ring_shadow_reg_area
= NULL
;
2604 if (qdev
->tx_ring_shadow_reg_area
) {
2605 pci_free_consistent(qdev
->pdev
,
2607 qdev
->tx_ring_shadow_reg_area
,
2608 qdev
->tx_ring_shadow_reg_dma
);
2609 qdev
->tx_ring_shadow_reg_area
= NULL
;
2613 static int ql_alloc_shadow_space(struct ql_adapter
*qdev
)
2615 qdev
->rx_ring_shadow_reg_area
=
2616 pci_alloc_consistent(qdev
->pdev
,
2617 PAGE_SIZE
, &qdev
->rx_ring_shadow_reg_dma
);
2618 if (qdev
->rx_ring_shadow_reg_area
== NULL
) {
2619 netif_err(qdev
, ifup
, qdev
->ndev
,
2620 "Allocation of RX shadow space failed.\n");
2623 memset(qdev
->rx_ring_shadow_reg_area
, 0, PAGE_SIZE
);
2624 qdev
->tx_ring_shadow_reg_area
=
2625 pci_alloc_consistent(qdev
->pdev
, PAGE_SIZE
,
2626 &qdev
->tx_ring_shadow_reg_dma
);
2627 if (qdev
->tx_ring_shadow_reg_area
== NULL
) {
2628 netif_err(qdev
, ifup
, qdev
->ndev
,
2629 "Allocation of TX shadow space failed.\n");
2630 goto err_wqp_sh_area
;
2632 memset(qdev
->tx_ring_shadow_reg_area
, 0, PAGE_SIZE
);
2636 pci_free_consistent(qdev
->pdev
,
2638 qdev
->rx_ring_shadow_reg_area
,
2639 qdev
->rx_ring_shadow_reg_dma
);
2643 static void ql_init_tx_ring(struct ql_adapter
*qdev
, struct tx_ring
*tx_ring
)
2645 struct tx_ring_desc
*tx_ring_desc
;
2647 struct ob_mac_iocb_req
*mac_iocb_ptr
;
2649 mac_iocb_ptr
= tx_ring
->wq_base
;
2650 tx_ring_desc
= tx_ring
->q
;
2651 for (i
= 0; i
< tx_ring
->wq_len
; i
++) {
2652 tx_ring_desc
->index
= i
;
2653 tx_ring_desc
->skb
= NULL
;
2654 tx_ring_desc
->queue_entry
= mac_iocb_ptr
;
2658 atomic_set(&tx_ring
->tx_count
, tx_ring
->wq_len
);
2659 atomic_set(&tx_ring
->queue_stopped
, 0);
2662 static void ql_free_tx_resources(struct ql_adapter
*qdev
,
2663 struct tx_ring
*tx_ring
)
2665 if (tx_ring
->wq_base
) {
2666 pci_free_consistent(qdev
->pdev
, tx_ring
->wq_size
,
2667 tx_ring
->wq_base
, tx_ring
->wq_base_dma
);
2668 tx_ring
->wq_base
= NULL
;
2674 static int ql_alloc_tx_resources(struct ql_adapter
*qdev
,
2675 struct tx_ring
*tx_ring
)
2678 pci_alloc_consistent(qdev
->pdev
, tx_ring
->wq_size
,
2679 &tx_ring
->wq_base_dma
);
2681 if ((tx_ring
->wq_base
== NULL
) ||
2682 tx_ring
->wq_base_dma
& WQ_ADDR_ALIGN
) {
2683 netif_err(qdev
, ifup
, qdev
->ndev
, "tx_ring alloc failed.\n");
2687 kmalloc(tx_ring
->wq_len
* sizeof(struct tx_ring_desc
), GFP_KERNEL
);
2688 if (tx_ring
->q
== NULL
)
2693 pci_free_consistent(qdev
->pdev
, tx_ring
->wq_size
,
2694 tx_ring
->wq_base
, tx_ring
->wq_base_dma
);
2698 static void ql_free_lbq_buffers(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
2700 struct bq_desc
*lbq_desc
;
2702 uint32_t curr_idx
, clean_idx
;
2704 curr_idx
= rx_ring
->lbq_curr_idx
;
2705 clean_idx
= rx_ring
->lbq_clean_idx
;
2706 while (curr_idx
!= clean_idx
) {
2707 lbq_desc
= &rx_ring
->lbq
[curr_idx
];
2709 if (lbq_desc
->p
.pg_chunk
.last_flag
) {
2710 pci_unmap_page(qdev
->pdev
,
2711 lbq_desc
->p
.pg_chunk
.map
,
2712 ql_lbq_block_size(qdev
),
2713 PCI_DMA_FROMDEVICE
);
2714 lbq_desc
->p
.pg_chunk
.last_flag
= 0;
2717 put_page(lbq_desc
->p
.pg_chunk
.page
);
2718 lbq_desc
->p
.pg_chunk
.page
= NULL
;
2720 if (++curr_idx
== rx_ring
->lbq_len
)
2726 static void ql_free_sbq_buffers(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
2729 struct bq_desc
*sbq_desc
;
2731 for (i
= 0; i
< rx_ring
->sbq_len
; i
++) {
2732 sbq_desc
= &rx_ring
->sbq
[i
];
2733 if (sbq_desc
== NULL
) {
2734 netif_err(qdev
, ifup
, qdev
->ndev
,
2735 "sbq_desc %d is NULL.\n", i
);
2738 if (sbq_desc
->p
.skb
) {
2739 pci_unmap_single(qdev
->pdev
,
2740 pci_unmap_addr(sbq_desc
, mapaddr
),
2741 pci_unmap_len(sbq_desc
, maplen
),
2742 PCI_DMA_FROMDEVICE
);
2743 dev_kfree_skb(sbq_desc
->p
.skb
);
2744 sbq_desc
->p
.skb
= NULL
;
2749 /* Free all large and small rx buffers associated
2750 * with the completion queues for this device.
2752 static void ql_free_rx_buffers(struct ql_adapter
*qdev
)
2755 struct rx_ring
*rx_ring
;
2757 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
2758 rx_ring
= &qdev
->rx_ring
[i
];
2760 ql_free_lbq_buffers(qdev
, rx_ring
);
2762 ql_free_sbq_buffers(qdev
, rx_ring
);
2766 static void ql_alloc_rx_buffers(struct ql_adapter
*qdev
)
2768 struct rx_ring
*rx_ring
;
2771 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
2772 rx_ring
= &qdev
->rx_ring
[i
];
2773 if (rx_ring
->type
!= TX_Q
)
2774 ql_update_buffer_queues(qdev
, rx_ring
);
2778 static void ql_init_lbq_ring(struct ql_adapter
*qdev
,
2779 struct rx_ring
*rx_ring
)
2782 struct bq_desc
*lbq_desc
;
2783 __le64
*bq
= rx_ring
->lbq_base
;
2785 memset(rx_ring
->lbq
, 0, rx_ring
->lbq_len
* sizeof(struct bq_desc
));
2786 for (i
= 0; i
< rx_ring
->lbq_len
; i
++) {
2787 lbq_desc
= &rx_ring
->lbq
[i
];
2788 memset(lbq_desc
, 0, sizeof(*lbq_desc
));
2789 lbq_desc
->index
= i
;
2790 lbq_desc
->addr
= bq
;
2795 static void ql_init_sbq_ring(struct ql_adapter
*qdev
,
2796 struct rx_ring
*rx_ring
)
2799 struct bq_desc
*sbq_desc
;
2800 __le64
*bq
= rx_ring
->sbq_base
;
2802 memset(rx_ring
->sbq
, 0, rx_ring
->sbq_len
* sizeof(struct bq_desc
));
2803 for (i
= 0; i
< rx_ring
->sbq_len
; i
++) {
2804 sbq_desc
= &rx_ring
->sbq
[i
];
2805 memset(sbq_desc
, 0, sizeof(*sbq_desc
));
2806 sbq_desc
->index
= i
;
2807 sbq_desc
->addr
= bq
;
2812 static void ql_free_rx_resources(struct ql_adapter
*qdev
,
2813 struct rx_ring
*rx_ring
)
2815 /* Free the small buffer queue. */
2816 if (rx_ring
->sbq_base
) {
2817 pci_free_consistent(qdev
->pdev
,
2819 rx_ring
->sbq_base
, rx_ring
->sbq_base_dma
);
2820 rx_ring
->sbq_base
= NULL
;
2823 /* Free the small buffer queue control blocks. */
2824 kfree(rx_ring
->sbq
);
2825 rx_ring
->sbq
= NULL
;
2827 /* Free the large buffer queue. */
2828 if (rx_ring
->lbq_base
) {
2829 pci_free_consistent(qdev
->pdev
,
2831 rx_ring
->lbq_base
, rx_ring
->lbq_base_dma
);
2832 rx_ring
->lbq_base
= NULL
;
2835 /* Free the large buffer queue control blocks. */
2836 kfree(rx_ring
->lbq
);
2837 rx_ring
->lbq
= NULL
;
2839 /* Free the rx queue. */
2840 if (rx_ring
->cq_base
) {
2841 pci_free_consistent(qdev
->pdev
,
2843 rx_ring
->cq_base
, rx_ring
->cq_base_dma
);
2844 rx_ring
->cq_base
= NULL
;
2848 /* Allocate queues and buffers for this completions queue based
2849 * on the values in the parameter structure. */
2850 static int ql_alloc_rx_resources(struct ql_adapter
*qdev
,
2851 struct rx_ring
*rx_ring
)
2855 * Allocate the completion queue for this rx_ring.
2858 pci_alloc_consistent(qdev
->pdev
, rx_ring
->cq_size
,
2859 &rx_ring
->cq_base_dma
);
2861 if (rx_ring
->cq_base
== NULL
) {
2862 netif_err(qdev
, ifup
, qdev
->ndev
, "rx_ring alloc failed.\n");
2866 if (rx_ring
->sbq_len
) {
2868 * Allocate small buffer queue.
2871 pci_alloc_consistent(qdev
->pdev
, rx_ring
->sbq_size
,
2872 &rx_ring
->sbq_base_dma
);
2874 if (rx_ring
->sbq_base
== NULL
) {
2875 netif_err(qdev
, ifup
, qdev
->ndev
,
2876 "Small buffer queue allocation failed.\n");
2881 * Allocate small buffer queue control blocks.
2884 kmalloc(rx_ring
->sbq_len
* sizeof(struct bq_desc
),
2886 if (rx_ring
->sbq
== NULL
) {
2887 netif_err(qdev
, ifup
, qdev
->ndev
,
2888 "Small buffer queue control block allocation failed.\n");
2892 ql_init_sbq_ring(qdev
, rx_ring
);
2895 if (rx_ring
->lbq_len
) {
2897 * Allocate large buffer queue.
2900 pci_alloc_consistent(qdev
->pdev
, rx_ring
->lbq_size
,
2901 &rx_ring
->lbq_base_dma
);
2903 if (rx_ring
->lbq_base
== NULL
) {
2904 netif_err(qdev
, ifup
, qdev
->ndev
,
2905 "Large buffer queue allocation failed.\n");
2909 * Allocate large buffer queue control blocks.
2912 kmalloc(rx_ring
->lbq_len
* sizeof(struct bq_desc
),
2914 if (rx_ring
->lbq
== NULL
) {
2915 netif_err(qdev
, ifup
, qdev
->ndev
,
2916 "Large buffer queue control block allocation failed.\n");
2920 ql_init_lbq_ring(qdev
, rx_ring
);
2926 ql_free_rx_resources(qdev
, rx_ring
);
2930 static void ql_tx_ring_clean(struct ql_adapter
*qdev
)
2932 struct tx_ring
*tx_ring
;
2933 struct tx_ring_desc
*tx_ring_desc
;
2937 * Loop through all queues and free
2940 for (j
= 0; j
< qdev
->tx_ring_count
; j
++) {
2941 tx_ring
= &qdev
->tx_ring
[j
];
2942 for (i
= 0; i
< tx_ring
->wq_len
; i
++) {
2943 tx_ring_desc
= &tx_ring
->q
[i
];
2944 if (tx_ring_desc
&& tx_ring_desc
->skb
) {
2945 netif_err(qdev
, ifdown
, qdev
->ndev
,
2946 "Freeing lost SKB %p, from queue %d, index %d.\n",
2947 tx_ring_desc
->skb
, j
,
2948 tx_ring_desc
->index
);
2949 ql_unmap_send(qdev
, tx_ring_desc
,
2950 tx_ring_desc
->map_cnt
);
2951 dev_kfree_skb(tx_ring_desc
->skb
);
2952 tx_ring_desc
->skb
= NULL
;
2958 static void ql_free_mem_resources(struct ql_adapter
*qdev
)
2962 for (i
= 0; i
< qdev
->tx_ring_count
; i
++)
2963 ql_free_tx_resources(qdev
, &qdev
->tx_ring
[i
]);
2964 for (i
= 0; i
< qdev
->rx_ring_count
; i
++)
2965 ql_free_rx_resources(qdev
, &qdev
->rx_ring
[i
]);
2966 ql_free_shadow_space(qdev
);
2969 static int ql_alloc_mem_resources(struct ql_adapter
*qdev
)
2973 /* Allocate space for our shadow registers and such. */
2974 if (ql_alloc_shadow_space(qdev
))
2977 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
2978 if (ql_alloc_rx_resources(qdev
, &qdev
->rx_ring
[i
]) != 0) {
2979 netif_err(qdev
, ifup
, qdev
->ndev
,
2980 "RX resource allocation failed.\n");
2984 /* Allocate tx queue resources */
2985 for (i
= 0; i
< qdev
->tx_ring_count
; i
++) {
2986 if (ql_alloc_tx_resources(qdev
, &qdev
->tx_ring
[i
]) != 0) {
2987 netif_err(qdev
, ifup
, qdev
->ndev
,
2988 "TX resource allocation failed.\n");
2995 ql_free_mem_resources(qdev
);
2999 /* Set up the rx ring control block and pass it to the chip.
3000 * The control block is defined as
3001 * "Completion Queue Initialization Control Block", or cqicb.
3003 static int ql_start_rx_ring(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
3005 struct cqicb
*cqicb
= &rx_ring
->cqicb
;
3006 void *shadow_reg
= qdev
->rx_ring_shadow_reg_area
+
3007 (rx_ring
->cq_id
* RX_RING_SHADOW_SPACE
);
3008 u64 shadow_reg_dma
= qdev
->rx_ring_shadow_reg_dma
+
3009 (rx_ring
->cq_id
* RX_RING_SHADOW_SPACE
);
3010 void __iomem
*doorbell_area
=
3011 qdev
->doorbell_area
+ (DB_PAGE_SIZE
* (128 + rx_ring
->cq_id
));
3015 __le64
*base_indirect_ptr
;
3018 /* Set up the shadow registers for this ring. */
3019 rx_ring
->prod_idx_sh_reg
= shadow_reg
;
3020 rx_ring
->prod_idx_sh_reg_dma
= shadow_reg_dma
;
3021 *rx_ring
->prod_idx_sh_reg
= 0;
3022 shadow_reg
+= sizeof(u64
);
3023 shadow_reg_dma
+= sizeof(u64
);
3024 rx_ring
->lbq_base_indirect
= shadow_reg
;
3025 rx_ring
->lbq_base_indirect_dma
= shadow_reg_dma
;
3026 shadow_reg
+= (sizeof(u64
) * MAX_DB_PAGES_PER_BQ(rx_ring
->lbq_len
));
3027 shadow_reg_dma
+= (sizeof(u64
) * MAX_DB_PAGES_PER_BQ(rx_ring
->lbq_len
));
3028 rx_ring
->sbq_base_indirect
= shadow_reg
;
3029 rx_ring
->sbq_base_indirect_dma
= shadow_reg_dma
;
3031 /* PCI doorbell mem area + 0x00 for consumer index register */
3032 rx_ring
->cnsmr_idx_db_reg
= (u32 __iomem
*) doorbell_area
;
3033 rx_ring
->cnsmr_idx
= 0;
3034 rx_ring
->curr_entry
= rx_ring
->cq_base
;
3036 /* PCI doorbell mem area + 0x04 for valid register */
3037 rx_ring
->valid_db_reg
= doorbell_area
+ 0x04;
3039 /* PCI doorbell mem area + 0x18 for large buffer consumer */
3040 rx_ring
->lbq_prod_idx_db_reg
= (u32 __iomem
*) (doorbell_area
+ 0x18);
3042 /* PCI doorbell mem area + 0x1c */
3043 rx_ring
->sbq_prod_idx_db_reg
= (u32 __iomem
*) (doorbell_area
+ 0x1c);
3045 memset((void *)cqicb
, 0, sizeof(struct cqicb
));
3046 cqicb
->msix_vect
= rx_ring
->irq
;
3048 bq_len
= (rx_ring
->cq_len
== 65536) ? 0 : (u16
) rx_ring
->cq_len
;
3049 cqicb
->len
= cpu_to_le16(bq_len
| LEN_V
| LEN_CPP_CONT
);
3051 cqicb
->addr
= cpu_to_le64(rx_ring
->cq_base_dma
);
3053 cqicb
->prod_idx_addr
= cpu_to_le64(rx_ring
->prod_idx_sh_reg_dma
);
3056 * Set up the control block load flags.
3058 cqicb
->flags
= FLAGS_LC
| /* Load queue base address */
3059 FLAGS_LV
| /* Load MSI-X vector */
3060 FLAGS_LI
; /* Load irq delay values */
3061 if (rx_ring
->lbq_len
) {
3062 cqicb
->flags
|= FLAGS_LL
; /* Load lbq values */
3063 tmp
= (u64
)rx_ring
->lbq_base_dma
;
3064 base_indirect_ptr
= (__le64
*) rx_ring
->lbq_base_indirect
;
3067 *base_indirect_ptr
= cpu_to_le64(tmp
);
3068 tmp
+= DB_PAGE_SIZE
;
3069 base_indirect_ptr
++;
3071 } while (page_entries
< MAX_DB_PAGES_PER_BQ(rx_ring
->lbq_len
));
3073 cpu_to_le64(rx_ring
->lbq_base_indirect_dma
);
3074 bq_len
= (rx_ring
->lbq_buf_size
== 65536) ? 0 :
3075 (u16
) rx_ring
->lbq_buf_size
;
3076 cqicb
->lbq_buf_size
= cpu_to_le16(bq_len
);
3077 bq_len
= (rx_ring
->lbq_len
== 65536) ? 0 :
3078 (u16
) rx_ring
->lbq_len
;
3079 cqicb
->lbq_len
= cpu_to_le16(bq_len
);
3080 rx_ring
->lbq_prod_idx
= 0;
3081 rx_ring
->lbq_curr_idx
= 0;
3082 rx_ring
->lbq_clean_idx
= 0;
3083 rx_ring
->lbq_free_cnt
= rx_ring
->lbq_len
;
3085 if (rx_ring
->sbq_len
) {
3086 cqicb
->flags
|= FLAGS_LS
; /* Load sbq values */
3087 tmp
= (u64
)rx_ring
->sbq_base_dma
;
3088 base_indirect_ptr
= (__le64
*) rx_ring
->sbq_base_indirect
;
3091 *base_indirect_ptr
= cpu_to_le64(tmp
);
3092 tmp
+= DB_PAGE_SIZE
;
3093 base_indirect_ptr
++;
3095 } while (page_entries
< MAX_DB_PAGES_PER_BQ(rx_ring
->sbq_len
));
3097 cpu_to_le64(rx_ring
->sbq_base_indirect_dma
);
3098 cqicb
->sbq_buf_size
=
3099 cpu_to_le16((u16
)(rx_ring
->sbq_buf_size
));
3100 bq_len
= (rx_ring
->sbq_len
== 65536) ? 0 :
3101 (u16
) rx_ring
->sbq_len
;
3102 cqicb
->sbq_len
= cpu_to_le16(bq_len
);
3103 rx_ring
->sbq_prod_idx
= 0;
3104 rx_ring
->sbq_curr_idx
= 0;
3105 rx_ring
->sbq_clean_idx
= 0;
3106 rx_ring
->sbq_free_cnt
= rx_ring
->sbq_len
;
3108 switch (rx_ring
->type
) {
3110 cqicb
->irq_delay
= cpu_to_le16(qdev
->tx_coalesce_usecs
);
3111 cqicb
->pkt_delay
= cpu_to_le16(qdev
->tx_max_coalesced_frames
);
3114 /* Inbound completion handling rx_rings run in
3115 * separate NAPI contexts.
3117 netif_napi_add(qdev
->ndev
, &rx_ring
->napi
, ql_napi_poll_msix
,
3119 cqicb
->irq_delay
= cpu_to_le16(qdev
->rx_coalesce_usecs
);
3120 cqicb
->pkt_delay
= cpu_to_le16(qdev
->rx_max_coalesced_frames
);
3123 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3124 "Invalid rx_ring->type = %d.\n", rx_ring
->type
);
3126 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3127 "Initializing rx work queue.\n");
3128 err
= ql_write_cfg(qdev
, cqicb
, sizeof(struct cqicb
),
3129 CFG_LCQ
, rx_ring
->cq_id
);
3131 netif_err(qdev
, ifup
, qdev
->ndev
, "Failed to load CQICB.\n");
3137 static int ql_start_tx_ring(struct ql_adapter
*qdev
, struct tx_ring
*tx_ring
)
3139 struct wqicb
*wqicb
= (struct wqicb
*)tx_ring
;
3140 void __iomem
*doorbell_area
=
3141 qdev
->doorbell_area
+ (DB_PAGE_SIZE
* tx_ring
->wq_id
);
3142 void *shadow_reg
= qdev
->tx_ring_shadow_reg_area
+
3143 (tx_ring
->wq_id
* sizeof(u64
));
3144 u64 shadow_reg_dma
= qdev
->tx_ring_shadow_reg_dma
+
3145 (tx_ring
->wq_id
* sizeof(u64
));
3149 * Assign doorbell registers for this tx_ring.
3151 /* TX PCI doorbell mem area for tx producer index */
3152 tx_ring
->prod_idx_db_reg
= (u32 __iomem
*) doorbell_area
;
3153 tx_ring
->prod_idx
= 0;
3154 /* TX PCI doorbell mem area + 0x04 */
3155 tx_ring
->valid_db_reg
= doorbell_area
+ 0x04;
3158 * Assign shadow registers for this tx_ring.
3160 tx_ring
->cnsmr_idx_sh_reg
= shadow_reg
;
3161 tx_ring
->cnsmr_idx_sh_reg_dma
= shadow_reg_dma
;
3163 wqicb
->len
= cpu_to_le16(tx_ring
->wq_len
| Q_LEN_V
| Q_LEN_CPP_CONT
);
3164 wqicb
->flags
= cpu_to_le16(Q_FLAGS_LC
|
3165 Q_FLAGS_LB
| Q_FLAGS_LI
| Q_FLAGS_LO
);
3166 wqicb
->cq_id_rss
= cpu_to_le16(tx_ring
->cq_id
);
3168 wqicb
->addr
= cpu_to_le64(tx_ring
->wq_base_dma
);
3170 wqicb
->cnsmr_idx_addr
= cpu_to_le64(tx_ring
->cnsmr_idx_sh_reg_dma
);
3172 ql_init_tx_ring(qdev
, tx_ring
);
3174 err
= ql_write_cfg(qdev
, wqicb
, sizeof(*wqicb
), CFG_LRQ
,
3175 (u16
) tx_ring
->wq_id
);
3177 netif_err(qdev
, ifup
, qdev
->ndev
, "Failed to load tx_ring.\n");
3180 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3181 "Successfully loaded WQICB.\n");
3185 static void ql_disable_msix(struct ql_adapter
*qdev
)
3187 if (test_bit(QL_MSIX_ENABLED
, &qdev
->flags
)) {
3188 pci_disable_msix(qdev
->pdev
);
3189 clear_bit(QL_MSIX_ENABLED
, &qdev
->flags
);
3190 kfree(qdev
->msi_x_entry
);
3191 qdev
->msi_x_entry
= NULL
;
3192 } else if (test_bit(QL_MSI_ENABLED
, &qdev
->flags
)) {
3193 pci_disable_msi(qdev
->pdev
);
3194 clear_bit(QL_MSI_ENABLED
, &qdev
->flags
);
3198 /* We start by trying to get the number of vectors
3199 * stored in qdev->intr_count. If we don't get that
3200 * many then we reduce the count and try again.
3202 static void ql_enable_msix(struct ql_adapter
*qdev
)
3206 /* Get the MSIX vectors. */
3207 if (qlge_irq_type
== MSIX_IRQ
) {
3208 /* Try to alloc space for the msix struct,
3209 * if it fails then go to MSI/legacy.
3211 qdev
->msi_x_entry
= kcalloc(qdev
->intr_count
,
3212 sizeof(struct msix_entry
),
3214 if (!qdev
->msi_x_entry
) {
3215 qlge_irq_type
= MSI_IRQ
;
3219 for (i
= 0; i
< qdev
->intr_count
; i
++)
3220 qdev
->msi_x_entry
[i
].entry
= i
;
3222 /* Loop to get our vectors. We start with
3223 * what we want and settle for what we get.
3226 err
= pci_enable_msix(qdev
->pdev
,
3227 qdev
->msi_x_entry
, qdev
->intr_count
);
3229 qdev
->intr_count
= err
;
3233 kfree(qdev
->msi_x_entry
);
3234 qdev
->msi_x_entry
= NULL
;
3235 netif_warn(qdev
, ifup
, qdev
->ndev
,
3236 "MSI-X Enable failed, trying MSI.\n");
3237 qdev
->intr_count
= 1;
3238 qlge_irq_type
= MSI_IRQ
;
3239 } else if (err
== 0) {
3240 set_bit(QL_MSIX_ENABLED
, &qdev
->flags
);
3241 netif_info(qdev
, ifup
, qdev
->ndev
,
3242 "MSI-X Enabled, got %d vectors.\n",
3248 qdev
->intr_count
= 1;
3249 if (qlge_irq_type
== MSI_IRQ
) {
3250 if (!pci_enable_msi(qdev
->pdev
)) {
3251 set_bit(QL_MSI_ENABLED
, &qdev
->flags
);
3252 netif_info(qdev
, ifup
, qdev
->ndev
,
3253 "Running with MSI interrupts.\n");
3257 qlge_irq_type
= LEG_IRQ
;
3258 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3259 "Running with legacy interrupts.\n");
3262 /* Each vector services 1 RSS ring and and 1 or more
3263 * TX completion rings. This function loops through
3264 * the TX completion rings and assigns the vector that
3265 * will service it. An example would be if there are
3266 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3267 * This would mean that vector 0 would service RSS ring 0
3268 * and TX competion rings 0,1,2 and 3. Vector 1 would
3269 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3271 static void ql_set_tx_vect(struct ql_adapter
*qdev
)
3274 u32 tx_rings_per_vector
= qdev
->tx_ring_count
/ qdev
->intr_count
;
3276 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
))) {
3277 /* Assign irq vectors to TX rx_rings.*/
3278 for (vect
= 0, j
= 0, i
= qdev
->rss_ring_count
;
3279 i
< qdev
->rx_ring_count
; i
++) {
3280 if (j
== tx_rings_per_vector
) {
3284 qdev
->rx_ring
[i
].irq
= vect
;
3288 /* For single vector all rings have an irq
3291 for (i
= 0; i
< qdev
->rx_ring_count
; i
++)
3292 qdev
->rx_ring
[i
].irq
= 0;
3296 /* Set the interrupt mask for this vector. Each vector
3297 * will service 1 RSS ring and 1 or more TX completion
3298 * rings. This function sets up a bit mask per vector
3299 * that indicates which rings it services.
3301 static void ql_set_irq_mask(struct ql_adapter
*qdev
, struct intr_context
*ctx
)
3303 int j
, vect
= ctx
->intr
;
3304 u32 tx_rings_per_vector
= qdev
->tx_ring_count
/ qdev
->intr_count
;
3306 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
))) {
3307 /* Add the RSS ring serviced by this vector
3310 ctx
->irq_mask
= (1 << qdev
->rx_ring
[vect
].cq_id
);
3311 /* Add the TX ring(s) serviced by this vector
3313 for (j
= 0; j
< tx_rings_per_vector
; j
++) {
3315 (1 << qdev
->rx_ring
[qdev
->rss_ring_count
+
3316 (vect
* tx_rings_per_vector
) + j
].cq_id
);
3319 /* For single vector we just shift each queue's
3322 for (j
= 0; j
< qdev
->rx_ring_count
; j
++)
3323 ctx
->irq_mask
|= (1 << qdev
->rx_ring
[j
].cq_id
);
3328 * Here we build the intr_context structures based on
3329 * our rx_ring count and intr vector count.
3330 * The intr_context structure is used to hook each vector
3331 * to possibly different handlers.
3333 static void ql_resolve_queues_to_irqs(struct ql_adapter
*qdev
)
3336 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
3338 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
))) {
3339 /* Each rx_ring has it's
3340 * own intr_context since we have separate
3341 * vectors for each queue.
3343 for (i
= 0; i
< qdev
->intr_count
; i
++, intr_context
++) {
3344 qdev
->rx_ring
[i
].irq
= i
;
3345 intr_context
->intr
= i
;
3346 intr_context
->qdev
= qdev
;
3347 /* Set up this vector's bit-mask that indicates
3348 * which queues it services.
3350 ql_set_irq_mask(qdev
, intr_context
);
3352 * We set up each vectors enable/disable/read bits so
3353 * there's no bit/mask calculations in the critical path.
3355 intr_context
->intr_en_mask
=
3356 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
3357 INTR_EN_TYPE_ENABLE
| INTR_EN_IHD_MASK
| INTR_EN_IHD
3359 intr_context
->intr_dis_mask
=
3360 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
3361 INTR_EN_TYPE_DISABLE
| INTR_EN_IHD_MASK
|
3363 intr_context
->intr_read_mask
=
3364 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
3365 INTR_EN_TYPE_READ
| INTR_EN_IHD_MASK
| INTR_EN_IHD
|
3368 /* The first vector/queue handles
3369 * broadcast/multicast, fatal errors,
3370 * and firmware events. This in addition
3371 * to normal inbound NAPI processing.
3373 intr_context
->handler
= qlge_isr
;
3374 sprintf(intr_context
->name
, "%s-rx-%d",
3375 qdev
->ndev
->name
, i
);
3378 * Inbound queues handle unicast frames only.
3380 intr_context
->handler
= qlge_msix_rx_isr
;
3381 sprintf(intr_context
->name
, "%s-rx-%d",
3382 qdev
->ndev
->name
, i
);
3387 * All rx_rings use the same intr_context since
3388 * there is only one vector.
3390 intr_context
->intr
= 0;
3391 intr_context
->qdev
= qdev
;
3393 * We set up each vectors enable/disable/read bits so
3394 * there's no bit/mask calculations in the critical path.
3396 intr_context
->intr_en_mask
=
3397 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
| INTR_EN_TYPE_ENABLE
;
3398 intr_context
->intr_dis_mask
=
3399 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
3400 INTR_EN_TYPE_DISABLE
;
3401 intr_context
->intr_read_mask
=
3402 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
| INTR_EN_TYPE_READ
;
3404 * Single interrupt means one handler for all rings.
3406 intr_context
->handler
= qlge_isr
;
3407 sprintf(intr_context
->name
, "%s-single_irq", qdev
->ndev
->name
);
3408 /* Set up this vector's bit-mask that indicates
3409 * which queues it services. In this case there is
3410 * a single vector so it will service all RSS and
3411 * TX completion rings.
3413 ql_set_irq_mask(qdev
, intr_context
);
3415 /* Tell the TX completion rings which MSIx vector
3416 * they will be using.
3418 ql_set_tx_vect(qdev
);
3421 static void ql_free_irq(struct ql_adapter
*qdev
)
3424 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
3426 for (i
= 0; i
< qdev
->intr_count
; i
++, intr_context
++) {
3427 if (intr_context
->hooked
) {
3428 if (test_bit(QL_MSIX_ENABLED
, &qdev
->flags
)) {
3429 free_irq(qdev
->msi_x_entry
[i
].vector
,
3431 netif_printk(qdev
, ifdown
, KERN_DEBUG
, qdev
->ndev
,
3432 "freeing msix interrupt %d.\n", i
);
3434 free_irq(qdev
->pdev
->irq
, &qdev
->rx_ring
[0]);
3435 netif_printk(qdev
, ifdown
, KERN_DEBUG
, qdev
->ndev
,
3436 "freeing msi interrupt %d.\n", i
);
3440 ql_disable_msix(qdev
);
3443 static int ql_request_irq(struct ql_adapter
*qdev
)
3447 struct pci_dev
*pdev
= qdev
->pdev
;
3448 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
3450 ql_resolve_queues_to_irqs(qdev
);
3452 for (i
= 0; i
< qdev
->intr_count
; i
++, intr_context
++) {
3453 atomic_set(&intr_context
->irq_cnt
, 0);
3454 if (test_bit(QL_MSIX_ENABLED
, &qdev
->flags
)) {
3455 status
= request_irq(qdev
->msi_x_entry
[i
].vector
,
3456 intr_context
->handler
,
3461 netif_err(qdev
, ifup
, qdev
->ndev
,
3462 "Failed request for MSIX interrupt %d.\n",
3466 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3467 "Hooked intr %d, queue type %s, with name %s.\n",
3469 qdev
->rx_ring
[i
].type
== DEFAULT_Q
?
3471 qdev
->rx_ring
[i
].type
== TX_Q
?
3473 qdev
->rx_ring
[i
].type
== RX_Q
?
3475 intr_context
->name
);
3478 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3479 "trying msi or legacy interrupts.\n");
3480 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3481 "%s: irq = %d.\n", __func__
, pdev
->irq
);
3482 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3483 "%s: context->name = %s.\n", __func__
,
3484 intr_context
->name
);
3485 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3486 "%s: dev_id = 0x%p.\n", __func__
,
3489 request_irq(pdev
->irq
, qlge_isr
,
3490 test_bit(QL_MSI_ENABLED
,
3492 flags
) ? 0 : IRQF_SHARED
,
3493 intr_context
->name
, &qdev
->rx_ring
[0]);
3497 netif_err(qdev
, ifup
, qdev
->ndev
,
3498 "Hooked intr %d, queue type %s, with name %s.\n",
3500 qdev
->rx_ring
[0].type
== DEFAULT_Q
?
3502 qdev
->rx_ring
[0].type
== TX_Q
? "TX_Q" :
3503 qdev
->rx_ring
[0].type
== RX_Q
? "RX_Q" : "",
3504 intr_context
->name
);
3506 intr_context
->hooked
= 1;
3510 netif_err(qdev
, ifup
, qdev
->ndev
, "Failed to get the interrupts!!!/n");
3515 static int ql_start_rss(struct ql_adapter
*qdev
)
3517 u8 init_hash_seed
[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3518 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3519 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3520 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3521 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3522 0xbe, 0xac, 0x01, 0xfa};
3523 struct ricb
*ricb
= &qdev
->ricb
;
3526 u8
*hash_id
= (u8
*) ricb
->hash_cq_id
;
3528 memset((void *)ricb
, 0, sizeof(*ricb
));
3530 ricb
->base_cq
= RSS_L4K
;
3532 (RSS_L6K
| RSS_LI
| RSS_LB
| RSS_LM
| RSS_RT4
| RSS_RT6
);
3533 ricb
->mask
= cpu_to_le16((u16
)(0x3ff));
3536 * Fill out the Indirection Table.
3538 for (i
= 0; i
< 1024; i
++)
3539 hash_id
[i
] = (i
& (qdev
->rss_ring_count
- 1));
3541 memcpy((void *)&ricb
->ipv6_hash_key
[0], init_hash_seed
, 40);
3542 memcpy((void *)&ricb
->ipv4_hash_key
[0], init_hash_seed
, 16);
3544 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
, "Initializing RSS.\n");
3546 status
= ql_write_cfg(qdev
, ricb
, sizeof(*ricb
), CFG_LR
, 0);
3548 netif_err(qdev
, ifup
, qdev
->ndev
, "Failed to load RICB.\n");
3551 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3552 "Successfully loaded RICB.\n");
3556 static int ql_clear_routing_entries(struct ql_adapter
*qdev
)
3560 status
= ql_sem_spinlock(qdev
, SEM_RT_IDX_MASK
);
3563 /* Clear all the entries in the routing table. */
3564 for (i
= 0; i
< 16; i
++) {
3565 status
= ql_set_routing_reg(qdev
, i
, 0, 0);
3567 netif_err(qdev
, ifup
, qdev
->ndev
,
3568 "Failed to init routing register for CAM packets.\n");
3572 ql_sem_unlock(qdev
, SEM_RT_IDX_MASK
);
3576 /* Initialize the frame-to-queue routing. */
3577 static int ql_route_initialize(struct ql_adapter
*qdev
)
3581 /* Clear all the entries in the routing table. */
3582 status
= ql_clear_routing_entries(qdev
);
3586 status
= ql_sem_spinlock(qdev
, SEM_RT_IDX_MASK
);
3590 status
= ql_set_routing_reg(qdev
, RT_IDX_ALL_ERR_SLOT
, RT_IDX_ERR
, 1);
3592 netif_err(qdev
, ifup
, qdev
->ndev
,
3593 "Failed to init routing register for error packets.\n");
3596 status
= ql_set_routing_reg(qdev
, RT_IDX_BCAST_SLOT
, RT_IDX_BCAST
, 1);
3598 netif_err(qdev
, ifup
, qdev
->ndev
,
3599 "Failed to init routing register for broadcast packets.\n");
3602 /* If we have more than one inbound queue, then turn on RSS in the
3605 if (qdev
->rss_ring_count
> 1) {
3606 status
= ql_set_routing_reg(qdev
, RT_IDX_RSS_MATCH_SLOT
,
3607 RT_IDX_RSS_MATCH
, 1);
3609 netif_err(qdev
, ifup
, qdev
->ndev
,
3610 "Failed to init routing register for MATCH RSS packets.\n");
3615 status
= ql_set_routing_reg(qdev
, RT_IDX_CAM_HIT_SLOT
,
3618 netif_err(qdev
, ifup
, qdev
->ndev
,
3619 "Failed to init routing register for CAM packets.\n");
3621 ql_sem_unlock(qdev
, SEM_RT_IDX_MASK
);
3625 int ql_cam_route_initialize(struct ql_adapter
*qdev
)
3629 /* If check if the link is up and use to
3630 * determine if we are setting or clearing
3631 * the MAC address in the CAM.
3633 set
= ql_read32(qdev
, STS
);
3634 set
&= qdev
->port_link_up
;
3635 status
= ql_set_mac_addr(qdev
, set
);
3637 netif_err(qdev
, ifup
, qdev
->ndev
, "Failed to init mac address.\n");
3641 status
= ql_route_initialize(qdev
);
3643 netif_err(qdev
, ifup
, qdev
->ndev
, "Failed to init routing table.\n");
3648 static int ql_adapter_initialize(struct ql_adapter
*qdev
)
3655 * Set up the System register to halt on errors.
3657 value
= SYS_EFE
| SYS_FAE
;
3659 ql_write32(qdev
, SYS
, mask
| value
);
3661 /* Set the default queue, and VLAN behavior. */
3662 value
= NIC_RCV_CFG_DFQ
| NIC_RCV_CFG_RV
;
3663 mask
= NIC_RCV_CFG_DFQ_MASK
| (NIC_RCV_CFG_RV
<< 16);
3664 ql_write32(qdev
, NIC_RCV_CFG
, (mask
| value
));
3666 /* Set the MPI interrupt to enabled. */
3667 ql_write32(qdev
, INTR_MASK
, (INTR_MASK_PI
<< 16) | INTR_MASK_PI
);
3669 /* Enable the function, set pagesize, enable error checking. */
3670 value
= FSC_FE
| FSC_EPC_INBOUND
| FSC_EPC_OUTBOUND
|
3671 FSC_EC
| FSC_VM_PAGE_4K
;
3672 value
|= SPLT_SETTING
;
3674 /* Set/clear header splitting. */
3675 mask
= FSC_VM_PAGESIZE_MASK
|
3676 FSC_DBL_MASK
| FSC_DBRST_MASK
| (value
<< 16);
3677 ql_write32(qdev
, FSC
, mask
| value
);
3679 ql_write32(qdev
, SPLT_HDR
, SPLT_LEN
);
3681 /* Set RX packet routing to use port/pci function on which the
3682 * packet arrived on in addition to usual frame routing.
3683 * This is helpful on bonding where both interfaces can have
3684 * the same MAC address.
3686 ql_write32(qdev
, RST_FO
, RST_FO_RR_MASK
| RST_FO_RR_RCV_FUNC_CQ
);
3687 /* Reroute all packets to our Interface.
3688 * They may have been routed to MPI firmware
3691 value
= ql_read32(qdev
, MGMT_RCV_CFG
);
3692 value
&= ~MGMT_RCV_CFG_RM
;
3695 /* Sticky reg needs clearing due to WOL. */
3696 ql_write32(qdev
, MGMT_RCV_CFG
, mask
);
3697 ql_write32(qdev
, MGMT_RCV_CFG
, mask
| value
);
3699 /* Default WOL is enable on Mezz cards */
3700 if (qdev
->pdev
->subsystem_device
== 0x0068 ||
3701 qdev
->pdev
->subsystem_device
== 0x0180)
3702 qdev
->wol
= WAKE_MAGIC
;
3704 /* Start up the rx queues. */
3705 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
3706 status
= ql_start_rx_ring(qdev
, &qdev
->rx_ring
[i
]);
3708 netif_err(qdev
, ifup
, qdev
->ndev
,
3709 "Failed to start rx ring[%d].\n", i
);
3714 /* If there is more than one inbound completion queue
3715 * then download a RICB to configure RSS.
3717 if (qdev
->rss_ring_count
> 1) {
3718 status
= ql_start_rss(qdev
);
3720 netif_err(qdev
, ifup
, qdev
->ndev
, "Failed to start RSS.\n");
3725 /* Start up the tx queues. */
3726 for (i
= 0; i
< qdev
->tx_ring_count
; i
++) {
3727 status
= ql_start_tx_ring(qdev
, &qdev
->tx_ring
[i
]);
3729 netif_err(qdev
, ifup
, qdev
->ndev
,
3730 "Failed to start tx ring[%d].\n", i
);
3735 /* Initialize the port and set the max framesize. */
3736 status
= qdev
->nic_ops
->port_initialize(qdev
);
3738 netif_err(qdev
, ifup
, qdev
->ndev
, "Failed to start port.\n");
3740 /* Set up the MAC address and frame routing filter. */
3741 status
= ql_cam_route_initialize(qdev
);
3743 netif_err(qdev
, ifup
, qdev
->ndev
,
3744 "Failed to init CAM/Routing tables.\n");
3748 /* Start NAPI for the RSS queues. */
3749 for (i
= 0; i
< qdev
->rss_ring_count
; i
++) {
3750 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3751 "Enabling NAPI for rx_ring[%d].\n", i
);
3752 napi_enable(&qdev
->rx_ring
[i
].napi
);
3758 /* Issue soft reset to chip. */
3759 static int ql_adapter_reset(struct ql_adapter
*qdev
)
3763 unsigned long end_jiffies
;
3765 /* Clear all the entries in the routing table. */
3766 status
= ql_clear_routing_entries(qdev
);
3768 netif_err(qdev
, ifup
, qdev
->ndev
, "Failed to clear routing bits.\n");
3772 end_jiffies
= jiffies
+
3773 max((unsigned long)1, usecs_to_jiffies(30));
3775 /* Stop management traffic. */
3776 ql_mb_set_mgmnt_traffic_ctl(qdev
, MB_SET_MPI_TFK_STOP
);
3778 /* Wait for the NIC and MGMNT FIFOs to empty. */
3779 ql_wait_fifo_empty(qdev
);
3781 ql_write32(qdev
, RST_FO
, (RST_FO_FR
<< 16) | RST_FO_FR
);
3784 value
= ql_read32(qdev
, RST_FO
);
3785 if ((value
& RST_FO_FR
) == 0)
3788 } while (time_before(jiffies
, end_jiffies
));
3790 if (value
& RST_FO_FR
) {
3791 netif_err(qdev
, ifdown
, qdev
->ndev
,
3792 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3793 status
= -ETIMEDOUT
;
3796 /* Resume management traffic. */
3797 ql_mb_set_mgmnt_traffic_ctl(qdev
, MB_SET_MPI_TFK_RESUME
);
3801 static void ql_display_dev_info(struct net_device
*ndev
)
3803 struct ql_adapter
*qdev
= (struct ql_adapter
*)netdev_priv(ndev
);
3805 netif_info(qdev
, probe
, qdev
->ndev
,
3806 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3807 "XG Roll = %d, XG Rev = %d.\n",
3810 qdev
->chip_rev_id
& 0x0000000f,
3811 qdev
->chip_rev_id
>> 4 & 0x0000000f,
3812 qdev
->chip_rev_id
>> 8 & 0x0000000f,
3813 qdev
->chip_rev_id
>> 12 & 0x0000000f);
3814 netif_info(qdev
, probe
, qdev
->ndev
,
3815 "MAC address %pM\n", ndev
->dev_addr
);
3818 int ql_wol(struct ql_adapter
*qdev
)
3821 u32 wol
= MB_WOL_DISABLE
;
3823 /* The CAM is still intact after a reset, but if we
3824 * are doing WOL, then we may need to program the
3825 * routing regs. We would also need to issue the mailbox
3826 * commands to instruct the MPI what to do per the ethtool
3830 if (qdev
->wol
& (WAKE_ARP
| WAKE_MAGICSECURE
| WAKE_PHY
| WAKE_UCAST
|
3831 WAKE_MCAST
| WAKE_BCAST
)) {
3832 netif_err(qdev
, ifdown
, qdev
->ndev
,
3833 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3838 if (qdev
->wol
& WAKE_MAGIC
) {
3839 status
= ql_mb_wol_set_magic(qdev
, 1);
3841 netif_err(qdev
, ifdown
, qdev
->ndev
,
3842 "Failed to set magic packet on %s.\n",
3846 netif_info(qdev
, drv
, qdev
->ndev
,
3847 "Enabled magic packet successfully on %s.\n",
3850 wol
|= MB_WOL_MAGIC_PKT
;
3854 wol
|= MB_WOL_MODE_ON
;
3855 status
= ql_mb_wol_mode(qdev
, wol
);
3856 netif_err(qdev
, drv
, qdev
->ndev
,
3857 "WOL %s (wol code 0x%x) on %s\n",
3858 (status
== 0) ? "Successfully set" : "Failed",
3859 wol
, qdev
->ndev
->name
);
3865 static int ql_adapter_down(struct ql_adapter
*qdev
)
3871 /* Don't kill the reset worker thread if we
3872 * are in the process of recovery.
3874 if (test_bit(QL_ADAPTER_UP
, &qdev
->flags
))
3875 cancel_delayed_work_sync(&qdev
->asic_reset_work
);
3876 cancel_delayed_work_sync(&qdev
->mpi_reset_work
);
3877 cancel_delayed_work_sync(&qdev
->mpi_work
);
3878 cancel_delayed_work_sync(&qdev
->mpi_idc_work
);
3879 cancel_delayed_work_sync(&qdev
->mpi_core_to_log
);
3880 cancel_delayed_work_sync(&qdev
->mpi_port_cfg_work
);
3882 for (i
= 0; i
< qdev
->rss_ring_count
; i
++)
3883 napi_disable(&qdev
->rx_ring
[i
].napi
);
3885 clear_bit(QL_ADAPTER_UP
, &qdev
->flags
);
3887 ql_disable_interrupts(qdev
);
3889 ql_tx_ring_clean(qdev
);
3891 /* Call netif_napi_del() from common point.
3893 for (i
= 0; i
< qdev
->rss_ring_count
; i
++)
3894 netif_napi_del(&qdev
->rx_ring
[i
].napi
);
3896 ql_free_rx_buffers(qdev
);
3898 status
= ql_adapter_reset(qdev
);
3900 netif_err(qdev
, ifdown
, qdev
->ndev
, "reset(func #%d) FAILED!\n",
3905 static int ql_adapter_up(struct ql_adapter
*qdev
)
3909 err
= ql_adapter_initialize(qdev
);
3911 netif_info(qdev
, ifup
, qdev
->ndev
, "Unable to initialize adapter.\n");
3914 set_bit(QL_ADAPTER_UP
, &qdev
->flags
);
3915 ql_alloc_rx_buffers(qdev
);
3916 /* If the port is initialized and the
3917 * link is up the turn on the carrier.
3919 if ((ql_read32(qdev
, STS
) & qdev
->port_init
) &&
3920 (ql_read32(qdev
, STS
) & qdev
->port_link_up
))
3922 ql_enable_interrupts(qdev
);
3923 ql_enable_all_completion_interrupts(qdev
);
3924 netif_tx_start_all_queues(qdev
->ndev
);
3928 ql_adapter_reset(qdev
);
3932 static void ql_release_adapter_resources(struct ql_adapter
*qdev
)
3934 ql_free_mem_resources(qdev
);
3938 static int ql_get_adapter_resources(struct ql_adapter
*qdev
)
3942 if (ql_alloc_mem_resources(qdev
)) {
3943 netif_err(qdev
, ifup
, qdev
->ndev
, "Unable to allocate memory.\n");
3946 status
= ql_request_irq(qdev
);
3950 static int qlge_close(struct net_device
*ndev
)
3952 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3954 /* If we hit pci_channel_io_perm_failure
3955 * failure condition, then we already
3956 * brought the adapter down.
3958 if (test_bit(QL_EEH_FATAL
, &qdev
->flags
)) {
3959 netif_err(qdev
, drv
, qdev
->ndev
, "EEH fatal did unload.\n");
3960 clear_bit(QL_EEH_FATAL
, &qdev
->flags
);
3965 * Wait for device to recover from a reset.
3966 * (Rarely happens, but possible.)
3968 while (!test_bit(QL_ADAPTER_UP
, &qdev
->flags
))
3970 ql_adapter_down(qdev
);
3971 ql_release_adapter_resources(qdev
);
3975 static int ql_configure_rings(struct ql_adapter
*qdev
)
3978 struct rx_ring
*rx_ring
;
3979 struct tx_ring
*tx_ring
;
3980 int cpu_cnt
= min(MAX_CPUS
, (int)num_online_cpus());
3981 unsigned int lbq_buf_len
= (qdev
->ndev
->mtu
> 1500) ?
3982 LARGE_BUFFER_MAX_SIZE
: LARGE_BUFFER_MIN_SIZE
;
3984 qdev
->lbq_buf_order
= get_order(lbq_buf_len
);
3986 /* In a perfect world we have one RSS ring for each CPU
3987 * and each has it's own vector. To do that we ask for
3988 * cpu_cnt vectors. ql_enable_msix() will adjust the
3989 * vector count to what we actually get. We then
3990 * allocate an RSS ring for each.
3991 * Essentially, we are doing min(cpu_count, msix_vector_count).
3993 qdev
->intr_count
= cpu_cnt
;
3994 ql_enable_msix(qdev
);
3995 /* Adjust the RSS ring count to the actual vector count. */
3996 qdev
->rss_ring_count
= qdev
->intr_count
;
3997 qdev
->tx_ring_count
= cpu_cnt
;
3998 qdev
->rx_ring_count
= qdev
->tx_ring_count
+ qdev
->rss_ring_count
;
4000 for (i
= 0; i
< qdev
->tx_ring_count
; i
++) {
4001 tx_ring
= &qdev
->tx_ring
[i
];
4002 memset((void *)tx_ring
, 0, sizeof(*tx_ring
));
4003 tx_ring
->qdev
= qdev
;
4005 tx_ring
->wq_len
= qdev
->tx_ring_size
;
4007 tx_ring
->wq_len
* sizeof(struct ob_mac_iocb_req
);
4010 * The completion queue ID for the tx rings start
4011 * immediately after the rss rings.
4013 tx_ring
->cq_id
= qdev
->rss_ring_count
+ i
;
4016 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
4017 rx_ring
= &qdev
->rx_ring
[i
];
4018 memset((void *)rx_ring
, 0, sizeof(*rx_ring
));
4019 rx_ring
->qdev
= qdev
;
4021 rx_ring
->cpu
= i
% cpu_cnt
; /* CPU to run handler on. */
4022 if (i
< qdev
->rss_ring_count
) {
4024 * Inbound (RSS) queues.
4026 rx_ring
->cq_len
= qdev
->rx_ring_size
;
4028 rx_ring
->cq_len
* sizeof(struct ql_net_rsp_iocb
);
4029 rx_ring
->lbq_len
= NUM_LARGE_BUFFERS
;
4031 rx_ring
->lbq_len
* sizeof(__le64
);
4032 rx_ring
->lbq_buf_size
= (u16
)lbq_buf_len
;
4033 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
4034 "lbq_buf_size %d, order = %d\n",
4035 rx_ring
->lbq_buf_size
,
4036 qdev
->lbq_buf_order
);
4037 rx_ring
->sbq_len
= NUM_SMALL_BUFFERS
;
4039 rx_ring
->sbq_len
* sizeof(__le64
);
4040 rx_ring
->sbq_buf_size
= SMALL_BUF_MAP_SIZE
;
4041 rx_ring
->type
= RX_Q
;
4044 * Outbound queue handles outbound completions only.
4046 /* outbound cq is same size as tx_ring it services. */
4047 rx_ring
->cq_len
= qdev
->tx_ring_size
;
4049 rx_ring
->cq_len
* sizeof(struct ql_net_rsp_iocb
);
4050 rx_ring
->lbq_len
= 0;
4051 rx_ring
->lbq_size
= 0;
4052 rx_ring
->lbq_buf_size
= 0;
4053 rx_ring
->sbq_len
= 0;
4054 rx_ring
->sbq_size
= 0;
4055 rx_ring
->sbq_buf_size
= 0;
4056 rx_ring
->type
= TX_Q
;
4062 static int qlge_open(struct net_device
*ndev
)
4065 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4067 err
= ql_adapter_reset(qdev
);
4071 err
= ql_configure_rings(qdev
);
4075 err
= ql_get_adapter_resources(qdev
);
4079 err
= ql_adapter_up(qdev
);
4086 ql_release_adapter_resources(qdev
);
4090 static int ql_change_rx_buffers(struct ql_adapter
*qdev
)
4092 struct rx_ring
*rx_ring
;
4096 /* Wait for an oustanding reset to complete. */
4097 if (!test_bit(QL_ADAPTER_UP
, &qdev
->flags
)) {
4099 while (i
-- && !test_bit(QL_ADAPTER_UP
, &qdev
->flags
)) {
4100 netif_err(qdev
, ifup
, qdev
->ndev
,
4101 "Waiting for adapter UP...\n");
4106 netif_err(qdev
, ifup
, qdev
->ndev
,
4107 "Timed out waiting for adapter UP\n");
4112 status
= ql_adapter_down(qdev
);
4116 /* Get the new rx buffer size. */
4117 lbq_buf_len
= (qdev
->ndev
->mtu
> 1500) ?
4118 LARGE_BUFFER_MAX_SIZE
: LARGE_BUFFER_MIN_SIZE
;
4119 qdev
->lbq_buf_order
= get_order(lbq_buf_len
);
4121 for (i
= 0; i
< qdev
->rss_ring_count
; i
++) {
4122 rx_ring
= &qdev
->rx_ring
[i
];
4123 /* Set the new size. */
4124 rx_ring
->lbq_buf_size
= lbq_buf_len
;
4127 status
= ql_adapter_up(qdev
);
4133 netif_alert(qdev
, ifup
, qdev
->ndev
,
4134 "Driver up/down cycle failed, closing device.\n");
4135 set_bit(QL_ADAPTER_UP
, &qdev
->flags
);
4136 dev_close(qdev
->ndev
);
4140 static int qlge_change_mtu(struct net_device
*ndev
, int new_mtu
)
4142 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4145 if (ndev
->mtu
== 1500 && new_mtu
== 9000) {
4146 netif_err(qdev
, ifup
, qdev
->ndev
, "Changing to jumbo MTU.\n");
4147 } else if (ndev
->mtu
== 9000 && new_mtu
== 1500) {
4148 netif_err(qdev
, ifup
, qdev
->ndev
, "Changing to normal MTU.\n");
4152 queue_delayed_work(qdev
->workqueue
,
4153 &qdev
->mpi_port_cfg_work
, 3*HZ
);
4155 ndev
->mtu
= new_mtu
;
4157 if (!netif_running(qdev
->ndev
)) {
4161 status
= ql_change_rx_buffers(qdev
);
4163 netif_err(qdev
, ifup
, qdev
->ndev
,
4164 "Changing MTU failed.\n");
4170 static struct net_device_stats
*qlge_get_stats(struct net_device
4173 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4174 struct rx_ring
*rx_ring
= &qdev
->rx_ring
[0];
4175 struct tx_ring
*tx_ring
= &qdev
->tx_ring
[0];
4176 unsigned long pkts
, mcast
, dropped
, errors
, bytes
;
4180 pkts
= mcast
= dropped
= errors
= bytes
= 0;
4181 for (i
= 0; i
< qdev
->rss_ring_count
; i
++, rx_ring
++) {
4182 pkts
+= rx_ring
->rx_packets
;
4183 bytes
+= rx_ring
->rx_bytes
;
4184 dropped
+= rx_ring
->rx_dropped
;
4185 errors
+= rx_ring
->rx_errors
;
4186 mcast
+= rx_ring
->rx_multicast
;
4188 ndev
->stats
.rx_packets
= pkts
;
4189 ndev
->stats
.rx_bytes
= bytes
;
4190 ndev
->stats
.rx_dropped
= dropped
;
4191 ndev
->stats
.rx_errors
= errors
;
4192 ndev
->stats
.multicast
= mcast
;
4195 pkts
= errors
= bytes
= 0;
4196 for (i
= 0; i
< qdev
->tx_ring_count
; i
++, tx_ring
++) {
4197 pkts
+= tx_ring
->tx_packets
;
4198 bytes
+= tx_ring
->tx_bytes
;
4199 errors
+= tx_ring
->tx_errors
;
4201 ndev
->stats
.tx_packets
= pkts
;
4202 ndev
->stats
.tx_bytes
= bytes
;
4203 ndev
->stats
.tx_errors
= errors
;
4204 return &ndev
->stats
;
4207 static void qlge_set_multicast_list(struct net_device
*ndev
)
4209 struct ql_adapter
*qdev
= (struct ql_adapter
*)netdev_priv(ndev
);
4210 struct dev_mc_list
*mc_ptr
;
4213 status
= ql_sem_spinlock(qdev
, SEM_RT_IDX_MASK
);
4217 * Set or clear promiscuous mode if a
4218 * transition is taking place.
4220 if (ndev
->flags
& IFF_PROMISC
) {
4221 if (!test_bit(QL_PROMISCUOUS
, &qdev
->flags
)) {
4222 if (ql_set_routing_reg
4223 (qdev
, RT_IDX_PROMISCUOUS_SLOT
, RT_IDX_VALID
, 1)) {
4224 netif_err(qdev
, hw
, qdev
->ndev
,
4225 "Failed to set promiscous mode.\n");
4227 set_bit(QL_PROMISCUOUS
, &qdev
->flags
);
4231 if (test_bit(QL_PROMISCUOUS
, &qdev
->flags
)) {
4232 if (ql_set_routing_reg
4233 (qdev
, RT_IDX_PROMISCUOUS_SLOT
, RT_IDX_VALID
, 0)) {
4234 netif_err(qdev
, hw
, qdev
->ndev
,
4235 "Failed to clear promiscous mode.\n");
4237 clear_bit(QL_PROMISCUOUS
, &qdev
->flags
);
4243 * Set or clear all multicast mode if a
4244 * transition is taking place.
4246 if ((ndev
->flags
& IFF_ALLMULTI
) ||
4247 (netdev_mc_count(ndev
) > MAX_MULTICAST_ENTRIES
)) {
4248 if (!test_bit(QL_ALLMULTI
, &qdev
->flags
)) {
4249 if (ql_set_routing_reg
4250 (qdev
, RT_IDX_ALLMULTI_SLOT
, RT_IDX_MCAST
, 1)) {
4251 netif_err(qdev
, hw
, qdev
->ndev
,
4252 "Failed to set all-multi mode.\n");
4254 set_bit(QL_ALLMULTI
, &qdev
->flags
);
4258 if (test_bit(QL_ALLMULTI
, &qdev
->flags
)) {
4259 if (ql_set_routing_reg
4260 (qdev
, RT_IDX_ALLMULTI_SLOT
, RT_IDX_MCAST
, 0)) {
4261 netif_err(qdev
, hw
, qdev
->ndev
,
4262 "Failed to clear all-multi mode.\n");
4264 clear_bit(QL_ALLMULTI
, &qdev
->flags
);
4269 if (!netdev_mc_empty(ndev
)) {
4270 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
4274 netdev_for_each_mc_addr(mc_ptr
, ndev
) {
4275 if (ql_set_mac_addr_reg(qdev
, (u8
*) mc_ptr
->dmi_addr
,
4276 MAC_ADDR_TYPE_MULTI_MAC
, i
)) {
4277 netif_err(qdev
, hw
, qdev
->ndev
,
4278 "Failed to loadmulticast address.\n");
4279 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
4284 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
4285 if (ql_set_routing_reg
4286 (qdev
, RT_IDX_MCAST_MATCH_SLOT
, RT_IDX_MCAST_MATCH
, 1)) {
4287 netif_err(qdev
, hw
, qdev
->ndev
,
4288 "Failed to set multicast match mode.\n");
4290 set_bit(QL_ALLMULTI
, &qdev
->flags
);
4294 ql_sem_unlock(qdev
, SEM_RT_IDX_MASK
);
4297 static int qlge_set_mac_address(struct net_device
*ndev
, void *p
)
4299 struct ql_adapter
*qdev
= (struct ql_adapter
*)netdev_priv(ndev
);
4300 struct sockaddr
*addr
= p
;
4303 if (!is_valid_ether_addr(addr
->sa_data
))
4304 return -EADDRNOTAVAIL
;
4305 memcpy(ndev
->dev_addr
, addr
->sa_data
, ndev
->addr_len
);
4306 /* Update local copy of current mac address. */
4307 memcpy(qdev
->current_mac_addr
, ndev
->dev_addr
, ndev
->addr_len
);
4309 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
4312 status
= ql_set_mac_addr_reg(qdev
, (u8
*) ndev
->dev_addr
,
4313 MAC_ADDR_TYPE_CAM_MAC
, qdev
->func
* MAX_CQ
);
4315 netif_err(qdev
, hw
, qdev
->ndev
, "Failed to load MAC address.\n");
4316 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
4320 static void qlge_tx_timeout(struct net_device
*ndev
)
4322 struct ql_adapter
*qdev
= (struct ql_adapter
*)netdev_priv(ndev
);
4323 ql_queue_asic_error(qdev
);
4326 static void ql_asic_reset_work(struct work_struct
*work
)
4328 struct ql_adapter
*qdev
=
4329 container_of(work
, struct ql_adapter
, asic_reset_work
.work
);
4332 status
= ql_adapter_down(qdev
);
4336 status
= ql_adapter_up(qdev
);
4340 /* Restore rx mode. */
4341 clear_bit(QL_ALLMULTI
, &qdev
->flags
);
4342 clear_bit(QL_PROMISCUOUS
, &qdev
->flags
);
4343 qlge_set_multicast_list(qdev
->ndev
);
4348 netif_alert(qdev
, ifup
, qdev
->ndev
,
4349 "Driver up/down cycle failed, closing device\n");
4351 set_bit(QL_ADAPTER_UP
, &qdev
->flags
);
4352 dev_close(qdev
->ndev
);
4356 static struct nic_operations qla8012_nic_ops
= {
4357 .get_flash
= ql_get_8012_flash_params
,
4358 .port_initialize
= ql_8012_port_initialize
,
4361 static struct nic_operations qla8000_nic_ops
= {
4362 .get_flash
= ql_get_8000_flash_params
,
4363 .port_initialize
= ql_8000_port_initialize
,
4366 /* Find the pcie function number for the other NIC
4367 * on this chip. Since both NIC functions share a
4368 * common firmware we have the lowest enabled function
4369 * do any common work. Examples would be resetting
4370 * after a fatal firmware error, or doing a firmware
4373 static int ql_get_alt_pcie_func(struct ql_adapter
*qdev
)
4377 u32 nic_func1
, nic_func2
;
4379 status
= ql_read_mpi_reg(qdev
, MPI_TEST_FUNC_PORT_CFG
,
4384 nic_func1
= ((temp
>> MPI_TEST_NIC1_FUNC_SHIFT
) &
4385 MPI_TEST_NIC_FUNC_MASK
);
4386 nic_func2
= ((temp
>> MPI_TEST_NIC2_FUNC_SHIFT
) &
4387 MPI_TEST_NIC_FUNC_MASK
);
4389 if (qdev
->func
== nic_func1
)
4390 qdev
->alt_func
= nic_func2
;
4391 else if (qdev
->func
== nic_func2
)
4392 qdev
->alt_func
= nic_func1
;
4399 static int ql_get_board_info(struct ql_adapter
*qdev
)
4403 (ql_read32(qdev
, STS
) & STS_FUNC_ID_MASK
) >> STS_FUNC_ID_SHIFT
;
4407 status
= ql_get_alt_pcie_func(qdev
);
4411 qdev
->port
= (qdev
->func
< qdev
->alt_func
) ? 0 : 1;
4413 qdev
->xg_sem_mask
= SEM_XGMAC1_MASK
;
4414 qdev
->port_link_up
= STS_PL1
;
4415 qdev
->port_init
= STS_PI1
;
4416 qdev
->mailbox_in
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC2_MBI
;
4417 qdev
->mailbox_out
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC2_MBO
;
4419 qdev
->xg_sem_mask
= SEM_XGMAC0_MASK
;
4420 qdev
->port_link_up
= STS_PL0
;
4421 qdev
->port_init
= STS_PI0
;
4422 qdev
->mailbox_in
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC0_MBI
;
4423 qdev
->mailbox_out
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC0_MBO
;
4425 qdev
->chip_rev_id
= ql_read32(qdev
, REV_ID
);
4426 qdev
->device_id
= qdev
->pdev
->device
;
4427 if (qdev
->device_id
== QLGE_DEVICE_ID_8012
)
4428 qdev
->nic_ops
= &qla8012_nic_ops
;
4429 else if (qdev
->device_id
== QLGE_DEVICE_ID_8000
)
4430 qdev
->nic_ops
= &qla8000_nic_ops
;
4434 static void ql_release_all(struct pci_dev
*pdev
)
4436 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4437 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4439 if (qdev
->workqueue
) {
4440 destroy_workqueue(qdev
->workqueue
);
4441 qdev
->workqueue
= NULL
;
4445 iounmap(qdev
->reg_base
);
4446 if (qdev
->doorbell_area
)
4447 iounmap(qdev
->doorbell_area
);
4448 vfree(qdev
->mpi_coredump
);
4449 pci_release_regions(pdev
);
4450 pci_set_drvdata(pdev
, NULL
);
4453 static int __devinit
ql_init_device(struct pci_dev
*pdev
,
4454 struct net_device
*ndev
, int cards_found
)
4456 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4459 memset((void *)qdev
, 0, sizeof(*qdev
));
4460 err
= pci_enable_device(pdev
);
4462 dev_err(&pdev
->dev
, "PCI device enable failed.\n");
4468 pci_set_drvdata(pdev
, ndev
);
4470 /* Set PCIe read request size */
4471 err
= pcie_set_readrq(pdev
, 4096);
4473 dev_err(&pdev
->dev
, "Set readrq failed.\n");
4477 err
= pci_request_regions(pdev
, DRV_NAME
);
4479 dev_err(&pdev
->dev
, "PCI region request failed.\n");
4483 pci_set_master(pdev
);
4484 if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(64))) {
4485 set_bit(QL_DMA64
, &qdev
->flags
);
4486 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
4488 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
4490 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
4494 dev_err(&pdev
->dev
, "No usable DMA configuration.\n");
4498 /* Set PCIe reset type for EEH to fundamental. */
4499 pdev
->needs_freset
= 1;
4500 pci_save_state(pdev
);
4502 ioremap_nocache(pci_resource_start(pdev
, 1),
4503 pci_resource_len(pdev
, 1));
4504 if (!qdev
->reg_base
) {
4505 dev_err(&pdev
->dev
, "Register mapping failed.\n");
4510 qdev
->doorbell_area_size
= pci_resource_len(pdev
, 3);
4511 qdev
->doorbell_area
=
4512 ioremap_nocache(pci_resource_start(pdev
, 3),
4513 pci_resource_len(pdev
, 3));
4514 if (!qdev
->doorbell_area
) {
4515 dev_err(&pdev
->dev
, "Doorbell register mapping failed.\n");
4520 err
= ql_get_board_info(qdev
);
4522 dev_err(&pdev
->dev
, "Register access failed.\n");
4526 qdev
->msg_enable
= netif_msg_init(debug
, default_msg
);
4527 spin_lock_init(&qdev
->hw_lock
);
4528 spin_lock_init(&qdev
->stats_lock
);
4530 if (qlge_mpi_coredump
) {
4531 qdev
->mpi_coredump
=
4532 vmalloc(sizeof(struct ql_mpi_coredump
));
4533 if (qdev
->mpi_coredump
== NULL
) {
4534 dev_err(&pdev
->dev
, "Coredump alloc failed.\n");
4538 if (qlge_force_coredump
)
4539 set_bit(QL_FRC_COREDUMP
, &qdev
->flags
);
4541 /* make sure the EEPROM is good */
4542 err
= qdev
->nic_ops
->get_flash(qdev
);
4544 dev_err(&pdev
->dev
, "Invalid FLASH.\n");
4548 memcpy(ndev
->perm_addr
, ndev
->dev_addr
, ndev
->addr_len
);
4549 /* Keep local copy of current mac address. */
4550 memcpy(qdev
->current_mac_addr
, ndev
->dev_addr
, ndev
->addr_len
);
4552 /* Set up the default ring sizes. */
4553 qdev
->tx_ring_size
= NUM_TX_RING_ENTRIES
;
4554 qdev
->rx_ring_size
= NUM_RX_RING_ENTRIES
;
4556 /* Set up the coalescing parameters. */
4557 qdev
->rx_coalesce_usecs
= DFLT_COALESCE_WAIT
;
4558 qdev
->tx_coalesce_usecs
= DFLT_COALESCE_WAIT
;
4559 qdev
->rx_max_coalesced_frames
= DFLT_INTER_FRAME_WAIT
;
4560 qdev
->tx_max_coalesced_frames
= DFLT_INTER_FRAME_WAIT
;
4563 * Set up the operating parameters.
4566 qdev
->workqueue
= create_singlethread_workqueue(ndev
->name
);
4567 INIT_DELAYED_WORK(&qdev
->asic_reset_work
, ql_asic_reset_work
);
4568 INIT_DELAYED_WORK(&qdev
->mpi_reset_work
, ql_mpi_reset_work
);
4569 INIT_DELAYED_WORK(&qdev
->mpi_work
, ql_mpi_work
);
4570 INIT_DELAYED_WORK(&qdev
->mpi_port_cfg_work
, ql_mpi_port_cfg_work
);
4571 INIT_DELAYED_WORK(&qdev
->mpi_idc_work
, ql_mpi_idc_work
);
4572 INIT_DELAYED_WORK(&qdev
->mpi_core_to_log
, ql_mpi_core_to_log
);
4573 init_completion(&qdev
->ide_completion
);
4576 dev_info(&pdev
->dev
, "%s\n", DRV_STRING
);
4577 dev_info(&pdev
->dev
, "Driver name: %s, Version: %s.\n",
4578 DRV_NAME
, DRV_VERSION
);
4582 ql_release_all(pdev
);
4584 pci_disable_device(pdev
);
4588 static const struct net_device_ops qlge_netdev_ops
= {
4589 .ndo_open
= qlge_open
,
4590 .ndo_stop
= qlge_close
,
4591 .ndo_start_xmit
= qlge_send
,
4592 .ndo_change_mtu
= qlge_change_mtu
,
4593 .ndo_get_stats
= qlge_get_stats
,
4594 .ndo_set_multicast_list
= qlge_set_multicast_list
,
4595 .ndo_set_mac_address
= qlge_set_mac_address
,
4596 .ndo_validate_addr
= eth_validate_addr
,
4597 .ndo_tx_timeout
= qlge_tx_timeout
,
4598 .ndo_vlan_rx_register
= qlge_vlan_rx_register
,
4599 .ndo_vlan_rx_add_vid
= qlge_vlan_rx_add_vid
,
4600 .ndo_vlan_rx_kill_vid
= qlge_vlan_rx_kill_vid
,
4603 static void ql_timer(unsigned long data
)
4605 struct ql_adapter
*qdev
= (struct ql_adapter
*)data
;
4608 var
= ql_read32(qdev
, STS
);
4609 if (pci_channel_offline(qdev
->pdev
)) {
4610 netif_err(qdev
, ifup
, qdev
->ndev
, "EEH STS = 0x%.08x.\n", var
);
4614 qdev
->timer
.expires
= jiffies
+ (5*HZ
);
4615 add_timer(&qdev
->timer
);
4618 static int __devinit
qlge_probe(struct pci_dev
*pdev
,
4619 const struct pci_device_id
*pci_entry
)
4621 struct net_device
*ndev
= NULL
;
4622 struct ql_adapter
*qdev
= NULL
;
4623 static int cards_found
= 0;
4626 ndev
= alloc_etherdev_mq(sizeof(struct ql_adapter
),
4627 min(MAX_CPUS
, (int)num_online_cpus()));
4631 err
= ql_init_device(pdev
, ndev
, cards_found
);
4637 qdev
= netdev_priv(ndev
);
4638 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
4645 | NETIF_F_HW_VLAN_TX
4646 | NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_FILTER
);
4647 ndev
->features
|= NETIF_F_GRO
;
4649 if (test_bit(QL_DMA64
, &qdev
->flags
))
4650 ndev
->features
|= NETIF_F_HIGHDMA
;
4653 * Set up net_device structure.
4655 ndev
->tx_queue_len
= qdev
->tx_ring_size
;
4656 ndev
->irq
= pdev
->irq
;
4658 ndev
->netdev_ops
= &qlge_netdev_ops
;
4659 SET_ETHTOOL_OPS(ndev
, &qlge_ethtool_ops
);
4660 ndev
->watchdog_timeo
= 10 * HZ
;
4662 err
= register_netdev(ndev
);
4664 dev_err(&pdev
->dev
, "net device registration failed.\n");
4665 ql_release_all(pdev
);
4666 pci_disable_device(pdev
);
4669 /* Start up the timer to trigger EEH if
4672 init_timer_deferrable(&qdev
->timer
);
4673 qdev
->timer
.data
= (unsigned long)qdev
;
4674 qdev
->timer
.function
= ql_timer
;
4675 qdev
->timer
.expires
= jiffies
+ (5*HZ
);
4676 add_timer(&qdev
->timer
);
4678 ql_display_dev_info(ndev
);
4679 atomic_set(&qdev
->lb_count
, 0);
4684 netdev_tx_t
ql_lb_send(struct sk_buff
*skb
, struct net_device
*ndev
)
4686 return qlge_send(skb
, ndev
);
4689 int ql_clean_lb_rx_ring(struct rx_ring
*rx_ring
, int budget
)
4691 return ql_clean_inbound_rx_ring(rx_ring
, budget
);
4694 static void __devexit
qlge_remove(struct pci_dev
*pdev
)
4696 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4697 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4698 del_timer_sync(&qdev
->timer
);
4699 unregister_netdev(ndev
);
4700 ql_release_all(pdev
);
4701 pci_disable_device(pdev
);
4705 /* Clean up resources without touching hardware. */
4706 static void ql_eeh_close(struct net_device
*ndev
)
4709 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4711 if (netif_carrier_ok(ndev
)) {
4712 netif_carrier_off(ndev
);
4713 netif_stop_queue(ndev
);
4716 if (test_bit(QL_ADAPTER_UP
, &qdev
->flags
))
4717 cancel_delayed_work_sync(&qdev
->asic_reset_work
);
4718 cancel_delayed_work_sync(&qdev
->mpi_reset_work
);
4719 cancel_delayed_work_sync(&qdev
->mpi_work
);
4720 cancel_delayed_work_sync(&qdev
->mpi_idc_work
);
4721 cancel_delayed_work_sync(&qdev
->mpi_core_to_log
);
4722 cancel_delayed_work_sync(&qdev
->mpi_port_cfg_work
);
4724 for (i
= 0; i
< qdev
->rss_ring_count
; i
++)
4725 netif_napi_del(&qdev
->rx_ring
[i
].napi
);
4727 clear_bit(QL_ADAPTER_UP
, &qdev
->flags
);
4728 ql_tx_ring_clean(qdev
);
4729 ql_free_rx_buffers(qdev
);
4730 ql_release_adapter_resources(qdev
);
4734 * This callback is called by the PCI subsystem whenever
4735 * a PCI bus error is detected.
4737 static pci_ers_result_t
qlge_io_error_detected(struct pci_dev
*pdev
,
4738 enum pci_channel_state state
)
4740 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4741 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4744 case pci_channel_io_normal
:
4745 return PCI_ERS_RESULT_CAN_RECOVER
;
4746 case pci_channel_io_frozen
:
4747 netif_device_detach(ndev
);
4748 if (netif_running(ndev
))
4750 pci_disable_device(pdev
);
4751 return PCI_ERS_RESULT_NEED_RESET
;
4752 case pci_channel_io_perm_failure
:
4754 "%s: pci_channel_io_perm_failure.\n", __func__
);
4756 set_bit(QL_EEH_FATAL
, &qdev
->flags
);
4757 return PCI_ERS_RESULT_DISCONNECT
;
4760 /* Request a slot reset. */
4761 return PCI_ERS_RESULT_NEED_RESET
;
4765 * This callback is called after the PCI buss has been reset.
4766 * Basically, this tries to restart the card from scratch.
4767 * This is a shortened version of the device probe/discovery code,
4768 * it resembles the first-half of the () routine.
4770 static pci_ers_result_t
qlge_io_slot_reset(struct pci_dev
*pdev
)
4772 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4773 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4775 pdev
->error_state
= pci_channel_io_normal
;
4777 pci_restore_state(pdev
);
4778 if (pci_enable_device(pdev
)) {
4779 netif_err(qdev
, ifup
, qdev
->ndev
,
4780 "Cannot re-enable PCI device after reset.\n");
4781 return PCI_ERS_RESULT_DISCONNECT
;
4783 pci_set_master(pdev
);
4785 if (ql_adapter_reset(qdev
)) {
4786 netif_err(qdev
, drv
, qdev
->ndev
, "reset FAILED!\n");
4787 set_bit(QL_EEH_FATAL
, &qdev
->flags
);
4788 return PCI_ERS_RESULT_DISCONNECT
;
4791 return PCI_ERS_RESULT_RECOVERED
;
4794 static void qlge_io_resume(struct pci_dev
*pdev
)
4796 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4797 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4800 if (netif_running(ndev
)) {
4801 err
= qlge_open(ndev
);
4803 netif_err(qdev
, ifup
, qdev
->ndev
,
4804 "Device initialization failed after reset.\n");
4808 netif_err(qdev
, ifup
, qdev
->ndev
,
4809 "Device was not running prior to EEH.\n");
4811 qdev
->timer
.expires
= jiffies
+ (5*HZ
);
4812 add_timer(&qdev
->timer
);
4813 netif_device_attach(ndev
);
4816 static struct pci_error_handlers qlge_err_handler
= {
4817 .error_detected
= qlge_io_error_detected
,
4818 .slot_reset
= qlge_io_slot_reset
,
4819 .resume
= qlge_io_resume
,
4822 static int qlge_suspend(struct pci_dev
*pdev
, pm_message_t state
)
4824 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4825 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4828 netif_device_detach(ndev
);
4829 del_timer_sync(&qdev
->timer
);
4831 if (netif_running(ndev
)) {
4832 err
= ql_adapter_down(qdev
);
4838 err
= pci_save_state(pdev
);
4842 pci_disable_device(pdev
);
4844 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
4850 static int qlge_resume(struct pci_dev
*pdev
)
4852 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4853 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4856 pci_set_power_state(pdev
, PCI_D0
);
4857 pci_restore_state(pdev
);
4858 err
= pci_enable_device(pdev
);
4860 netif_err(qdev
, ifup
, qdev
->ndev
, "Cannot enable PCI device from suspend\n");
4863 pci_set_master(pdev
);
4865 pci_enable_wake(pdev
, PCI_D3hot
, 0);
4866 pci_enable_wake(pdev
, PCI_D3cold
, 0);
4868 if (netif_running(ndev
)) {
4869 err
= ql_adapter_up(qdev
);
4874 qdev
->timer
.expires
= jiffies
+ (5*HZ
);
4875 add_timer(&qdev
->timer
);
4876 netif_device_attach(ndev
);
4880 #endif /* CONFIG_PM */
4882 static void qlge_shutdown(struct pci_dev
*pdev
)
4884 qlge_suspend(pdev
, PMSG_SUSPEND
);
4887 static struct pci_driver qlge_driver
= {
4889 .id_table
= qlge_pci_tbl
,
4890 .probe
= qlge_probe
,
4891 .remove
= __devexit_p(qlge_remove
),
4893 .suspend
= qlge_suspend
,
4894 .resume
= qlge_resume
,
4896 .shutdown
= qlge_shutdown
,
4897 .err_handler
= &qlge_err_handler
4900 static int __init
qlge_init_module(void)
4902 return pci_register_driver(&qlge_driver
);
4905 static void __exit
qlge_exit(void)
4907 pci_unregister_driver(&qlge_driver
);
4910 module_init(qlge_init_module
);
4911 module_exit(qlge_exit
);