2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/bitops.h>
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/pci.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/pagemap.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dmapool.h>
20 #include <linux/mempool.h>
21 #include <linux/spinlock.h>
22 #include <linux/kthread.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
28 #include <linux/ipv6.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/if_ether.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/if_vlan.h>
38 #include <linux/skbuff.h>
39 #include <linux/delay.h>
41 #include <linux/vmalloc.h>
42 #include <linux/prefetch.h>
43 #include <net/ip6_checksum.h>
47 char qlge_driver_name
[] = DRV_NAME
;
48 const char qlge_driver_version
[] = DRV_VERSION
;
50 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51 MODULE_DESCRIPTION(DRV_STRING
" ");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION(DRV_VERSION
);
55 static const u32 default_msg
=
56 NETIF_MSG_DRV
| NETIF_MSG_PROBE
| NETIF_MSG_LINK
|
57 /* NETIF_MSG_TIMER | */
62 /* NETIF_MSG_TX_QUEUED | */
63 /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
64 /* NETIF_MSG_PKTDATA | */
65 NETIF_MSG_HW
| NETIF_MSG_WOL
| 0;
67 static int debug
= -1; /* defaults above */
68 module_param(debug
, int, 0664);
69 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
74 static int qlge_irq_type
= MSIX_IRQ
;
75 module_param(qlge_irq_type
, int, 0664);
76 MODULE_PARM_DESC(qlge_irq_type
, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
78 static int qlge_mpi_coredump
;
79 module_param(qlge_mpi_coredump
, int, 0);
80 MODULE_PARM_DESC(qlge_mpi_coredump
,
81 "Option to enable MPI firmware dump. "
82 "Default is OFF - Do Not allocate memory. ");
84 static int qlge_force_coredump
;
85 module_param(qlge_force_coredump
, int, 0);
86 MODULE_PARM_DESC(qlge_force_coredump
,
87 "Option to allow force of firmware core dump. "
88 "Default is OFF - Do not allow.");
90 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl
) = {
91 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC
, QLGE_DEVICE_ID_8012
)},
92 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC
, QLGE_DEVICE_ID_8000
)},
93 /* required last entry */
97 MODULE_DEVICE_TABLE(pci
, qlge_pci_tbl
);
99 static int ql_wol(struct ql_adapter
*qdev
);
100 static void qlge_set_multicast_list(struct net_device
*ndev
);
102 /* This hardware semaphore causes exclusive access to
103 * resources shared between the NIC driver, MPI firmware,
104 * FCOE firmware and the FC driver.
106 static int ql_sem_trylock(struct ql_adapter
*qdev
, u32 sem_mask
)
111 case SEM_XGMAC0_MASK
:
112 sem_bits
= SEM_SET
<< SEM_XGMAC0_SHIFT
;
114 case SEM_XGMAC1_MASK
:
115 sem_bits
= SEM_SET
<< SEM_XGMAC1_SHIFT
;
118 sem_bits
= SEM_SET
<< SEM_ICB_SHIFT
;
120 case SEM_MAC_ADDR_MASK
:
121 sem_bits
= SEM_SET
<< SEM_MAC_ADDR_SHIFT
;
124 sem_bits
= SEM_SET
<< SEM_FLASH_SHIFT
;
127 sem_bits
= SEM_SET
<< SEM_PROBE_SHIFT
;
129 case SEM_RT_IDX_MASK
:
130 sem_bits
= SEM_SET
<< SEM_RT_IDX_SHIFT
;
132 case SEM_PROC_REG_MASK
:
133 sem_bits
= SEM_SET
<< SEM_PROC_REG_SHIFT
;
136 netif_alert(qdev
, probe
, qdev
->ndev
, "bad Semaphore mask!.\n");
140 ql_write32(qdev
, SEM
, sem_bits
| sem_mask
);
141 return !(ql_read32(qdev
, SEM
) & sem_bits
);
144 int ql_sem_spinlock(struct ql_adapter
*qdev
, u32 sem_mask
)
146 unsigned int wait_count
= 30;
148 if (!ql_sem_trylock(qdev
, sem_mask
))
151 } while (--wait_count
);
155 void ql_sem_unlock(struct ql_adapter
*qdev
, u32 sem_mask
)
157 ql_write32(qdev
, SEM
, sem_mask
);
158 ql_read32(qdev
, SEM
); /* flush */
161 /* This function waits for a specific bit to come ready
162 * in a given register. It is used mostly by the initialize
163 * process, but is also used in kernel thread API such as
164 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
166 int ql_wait_reg_rdy(struct ql_adapter
*qdev
, u32 reg
, u32 bit
, u32 err_bit
)
169 int count
= UDELAY_COUNT
;
172 temp
= ql_read32(qdev
, reg
);
174 /* check for errors */
175 if (temp
& err_bit
) {
176 netif_alert(qdev
, probe
, qdev
->ndev
,
177 "register 0x%.08x access error, value = 0x%.08x!.\n",
180 } else if (temp
& bit
)
182 udelay(UDELAY_DELAY
);
185 netif_alert(qdev
, probe
, qdev
->ndev
,
186 "Timed out waiting for reg %x to come ready.\n", reg
);
190 /* The CFG register is used to download TX and RX control blocks
191 * to the chip. This function waits for an operation to complete.
193 static int ql_wait_cfg(struct ql_adapter
*qdev
, u32 bit
)
195 int count
= UDELAY_COUNT
;
199 temp
= ql_read32(qdev
, CFG
);
204 udelay(UDELAY_DELAY
);
211 /* Used to issue init control blocks to hw. Maps control block,
212 * sets address, triggers download, waits for completion.
214 int ql_write_cfg(struct ql_adapter
*qdev
, void *ptr
, int size
, u32 bit
,
224 (bit
& (CFG_LRQ
| CFG_LR
| CFG_LCQ
)) ? PCI_DMA_TODEVICE
:
227 map
= pci_map_single(qdev
->pdev
, ptr
, size
, direction
);
228 if (pci_dma_mapping_error(qdev
->pdev
, map
)) {
229 netif_err(qdev
, ifup
, qdev
->ndev
, "Couldn't map DMA area.\n");
233 status
= ql_sem_spinlock(qdev
, SEM_ICB_MASK
);
237 status
= ql_wait_cfg(qdev
, bit
);
239 netif_err(qdev
, ifup
, qdev
->ndev
,
240 "Timed out waiting for CFG to come ready.\n");
244 ql_write32(qdev
, ICB_L
, (u32
) map
);
245 ql_write32(qdev
, ICB_H
, (u32
) (map
>> 32));
247 mask
= CFG_Q_MASK
| (bit
<< 16);
248 value
= bit
| (q_id
<< CFG_Q_SHIFT
);
249 ql_write32(qdev
, CFG
, (mask
| value
));
252 * Wait for the bit to clear after signaling hw.
254 status
= ql_wait_cfg(qdev
, bit
);
256 ql_sem_unlock(qdev
, SEM_ICB_MASK
); /* does flush too */
257 pci_unmap_single(qdev
->pdev
, map
, size
, direction
);
261 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
262 int ql_get_mac_addr_reg(struct ql_adapter
*qdev
, u32 type
, u16 index
,
269 case MAC_ADDR_TYPE_MULTI_MAC
:
270 case MAC_ADDR_TYPE_CAM_MAC
:
273 ql_wait_reg_rdy(qdev
,
274 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
277 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
278 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
279 MAC_ADDR_ADR
| MAC_ADDR_RS
| type
); /* type */
281 ql_wait_reg_rdy(qdev
,
282 MAC_ADDR_IDX
, MAC_ADDR_MR
, 0);
285 *value
++ = ql_read32(qdev
, MAC_ADDR_DATA
);
287 ql_wait_reg_rdy(qdev
,
288 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
291 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
292 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
293 MAC_ADDR_ADR
| MAC_ADDR_RS
| type
); /* type */
295 ql_wait_reg_rdy(qdev
,
296 MAC_ADDR_IDX
, MAC_ADDR_MR
, 0);
299 *value
++ = ql_read32(qdev
, MAC_ADDR_DATA
);
300 if (type
== MAC_ADDR_TYPE_CAM_MAC
) {
302 ql_wait_reg_rdy(qdev
,
303 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
306 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
307 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
308 MAC_ADDR_ADR
| MAC_ADDR_RS
| type
); /* type */
310 ql_wait_reg_rdy(qdev
, MAC_ADDR_IDX
,
314 *value
++ = ql_read32(qdev
, MAC_ADDR_DATA
);
318 case MAC_ADDR_TYPE_VLAN
:
319 case MAC_ADDR_TYPE_MULTI_FLTR
:
321 netif_crit(qdev
, ifup
, qdev
->ndev
,
322 "Address type %d not yet supported.\n", type
);
329 /* Set up a MAC, multicast or VLAN address for the
330 * inbound frame matching.
332 static int ql_set_mac_addr_reg(struct ql_adapter
*qdev
, u8
*addr
, u32 type
,
339 case MAC_ADDR_TYPE_MULTI_MAC
:
341 u32 upper
= (addr
[0] << 8) | addr
[1];
342 u32 lower
= (addr
[2] << 24) | (addr
[3] << 16) |
343 (addr
[4] << 8) | (addr
[5]);
346 ql_wait_reg_rdy(qdev
,
347 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
350 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) |
351 (index
<< MAC_ADDR_IDX_SHIFT
) |
353 ql_write32(qdev
, MAC_ADDR_DATA
, lower
);
355 ql_wait_reg_rdy(qdev
,
356 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
359 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) |
360 (index
<< MAC_ADDR_IDX_SHIFT
) |
363 ql_write32(qdev
, MAC_ADDR_DATA
, upper
);
365 ql_wait_reg_rdy(qdev
,
366 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
371 case MAC_ADDR_TYPE_CAM_MAC
:
374 u32 upper
= (addr
[0] << 8) | addr
[1];
376 (addr
[2] << 24) | (addr
[3] << 16) | (addr
[4] << 8) |
379 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
380 "Adding %s address %pM at index %d in the CAM.\n",
381 type
== MAC_ADDR_TYPE_MULTI_MAC
?
382 "MULTICAST" : "UNICAST",
386 ql_wait_reg_rdy(qdev
,
387 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
390 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
391 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
393 ql_write32(qdev
, MAC_ADDR_DATA
, lower
);
395 ql_wait_reg_rdy(qdev
,
396 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
399 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
400 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
402 ql_write32(qdev
, MAC_ADDR_DATA
, upper
);
404 ql_wait_reg_rdy(qdev
,
405 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
408 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
) | /* offset */
409 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
411 /* This field should also include the queue id
412 and possibly the function id. Right now we hardcode
413 the route field to NIC core.
415 cam_output
= (CAM_OUT_ROUTE_NIC
|
417 func
<< CAM_OUT_FUNC_SHIFT
) |
418 (0 << CAM_OUT_CQ_ID_SHIFT
));
419 if (qdev
->ndev
->features
& NETIF_F_HW_VLAN_RX
)
420 cam_output
|= CAM_OUT_RV
;
421 /* route to NIC core */
422 ql_write32(qdev
, MAC_ADDR_DATA
, cam_output
);
425 case MAC_ADDR_TYPE_VLAN
:
427 u32 enable_bit
= *((u32
*) &addr
[0]);
428 /* For VLAN, the addr actually holds a bit that
429 * either enables or disables the vlan id we are
430 * addressing. It's either MAC_ADDR_E on or off.
431 * That's bit-27 we're talking about.
433 netif_info(qdev
, ifup
, qdev
->ndev
,
434 "%s VLAN ID %d %s the CAM.\n",
435 enable_bit
? "Adding" : "Removing",
437 enable_bit
? "to" : "from");
440 ql_wait_reg_rdy(qdev
,
441 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
444 ql_write32(qdev
, MAC_ADDR_IDX
, offset
| /* offset */
445 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
447 enable_bit
); /* enable/disable */
450 case MAC_ADDR_TYPE_MULTI_FLTR
:
452 netif_crit(qdev
, ifup
, qdev
->ndev
,
453 "Address type %d not yet supported.\n", type
);
460 /* Set or clear MAC address in hardware. We sometimes
461 * have to clear it to prevent wrong frame routing
462 * especially in a bonding environment.
464 static int ql_set_mac_addr(struct ql_adapter
*qdev
, int set
)
467 char zero_mac_addr
[ETH_ALEN
];
471 addr
= &qdev
->current_mac_addr
[0];
472 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
473 "Set Mac addr %pM\n", addr
);
475 memset(zero_mac_addr
, 0, ETH_ALEN
);
476 addr
= &zero_mac_addr
[0];
477 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
478 "Clearing MAC address\n");
480 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
483 status
= ql_set_mac_addr_reg(qdev
, (u8
*) addr
,
484 MAC_ADDR_TYPE_CAM_MAC
, qdev
->func
* MAX_CQ
);
485 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
487 netif_err(qdev
, ifup
, qdev
->ndev
,
488 "Failed to init mac address.\n");
492 void ql_link_on(struct ql_adapter
*qdev
)
494 netif_err(qdev
, link
, qdev
->ndev
, "Link is up.\n");
495 netif_carrier_on(qdev
->ndev
);
496 ql_set_mac_addr(qdev
, 1);
499 void ql_link_off(struct ql_adapter
*qdev
)
501 netif_err(qdev
, link
, qdev
->ndev
, "Link is down.\n");
502 netif_carrier_off(qdev
->ndev
);
503 ql_set_mac_addr(qdev
, 0);
506 /* Get a specific frame routing value from the CAM.
507 * Used for debug and reg dump.
509 int ql_get_routing_reg(struct ql_adapter
*qdev
, u32 index
, u32
*value
)
513 status
= ql_wait_reg_rdy(qdev
, RT_IDX
, RT_IDX_MW
, 0);
517 ql_write32(qdev
, RT_IDX
,
518 RT_IDX_TYPE_NICQ
| RT_IDX_RS
| (index
<< RT_IDX_IDX_SHIFT
));
519 status
= ql_wait_reg_rdy(qdev
, RT_IDX
, RT_IDX_MR
, 0);
522 *value
= ql_read32(qdev
, RT_DATA
);
527 /* The NIC function for this chip has 16 routing indexes. Each one can be used
528 * to route different frame types to various inbound queues. We send broadcast/
529 * multicast/error frames to the default queue for slow handling,
530 * and CAM hit/RSS frames to the fast handling queues.
532 static int ql_set_routing_reg(struct ql_adapter
*qdev
, u32 index
, u32 mask
,
535 int status
= -EINVAL
; /* Return error if no mask match. */
538 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
539 "%s %s mask %s the routing reg.\n",
540 enable
? "Adding" : "Removing",
541 index
== RT_IDX_ALL_ERR_SLOT
? "MAC ERROR/ALL ERROR" :
542 index
== RT_IDX_IP_CSUM_ERR_SLOT
? "IP CSUM ERROR" :
543 index
== RT_IDX_TCP_UDP_CSUM_ERR_SLOT
? "TCP/UDP CSUM ERROR" :
544 index
== RT_IDX_BCAST_SLOT
? "BROADCAST" :
545 index
== RT_IDX_MCAST_MATCH_SLOT
? "MULTICAST MATCH" :
546 index
== RT_IDX_ALLMULTI_SLOT
? "ALL MULTICAST MATCH" :
547 index
== RT_IDX_UNUSED6_SLOT
? "UNUSED6" :
548 index
== RT_IDX_UNUSED7_SLOT
? "UNUSED7" :
549 index
== RT_IDX_RSS_MATCH_SLOT
? "RSS ALL/IPV4 MATCH" :
550 index
== RT_IDX_RSS_IPV6_SLOT
? "RSS IPV6" :
551 index
== RT_IDX_RSS_TCP4_SLOT
? "RSS TCP4" :
552 index
== RT_IDX_RSS_TCP6_SLOT
? "RSS TCP6" :
553 index
== RT_IDX_CAM_HIT_SLOT
? "CAM HIT" :
554 index
== RT_IDX_UNUSED013
? "UNUSED13" :
555 index
== RT_IDX_UNUSED014
? "UNUSED14" :
556 index
== RT_IDX_PROMISCUOUS_SLOT
? "PROMISCUOUS" :
557 "(Bad index != RT_IDX)",
558 enable
? "to" : "from");
563 value
= RT_IDX_DST_CAM_Q
| /* dest */
564 RT_IDX_TYPE_NICQ
| /* type */
565 (RT_IDX_CAM_HIT_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
568 case RT_IDX_VALID
: /* Promiscuous Mode frames. */
570 value
= RT_IDX_DST_DFLT_Q
| /* dest */
571 RT_IDX_TYPE_NICQ
| /* type */
572 (RT_IDX_PROMISCUOUS_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
575 case RT_IDX_ERR
: /* Pass up MAC,IP,TCP/UDP error frames. */
577 value
= RT_IDX_DST_DFLT_Q
| /* dest */
578 RT_IDX_TYPE_NICQ
| /* type */
579 (RT_IDX_ALL_ERR_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
582 case RT_IDX_IP_CSUM_ERR
: /* Pass up IP CSUM error frames. */
584 value
= RT_IDX_DST_DFLT_Q
| /* dest */
585 RT_IDX_TYPE_NICQ
| /* type */
586 (RT_IDX_IP_CSUM_ERR_SLOT
<<
587 RT_IDX_IDX_SHIFT
); /* index */
590 case RT_IDX_TU_CSUM_ERR
: /* Pass up TCP/UDP CSUM error frames. */
592 value
= RT_IDX_DST_DFLT_Q
| /* dest */
593 RT_IDX_TYPE_NICQ
| /* type */
594 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT
<<
595 RT_IDX_IDX_SHIFT
); /* index */
598 case RT_IDX_BCAST
: /* Pass up Broadcast frames to default Q. */
600 value
= RT_IDX_DST_DFLT_Q
| /* dest */
601 RT_IDX_TYPE_NICQ
| /* type */
602 (RT_IDX_BCAST_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
605 case RT_IDX_MCAST
: /* Pass up All Multicast frames. */
607 value
= RT_IDX_DST_DFLT_Q
| /* dest */
608 RT_IDX_TYPE_NICQ
| /* type */
609 (RT_IDX_ALLMULTI_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
612 case RT_IDX_MCAST_MATCH
: /* Pass up matched Multicast frames. */
614 value
= RT_IDX_DST_DFLT_Q
| /* dest */
615 RT_IDX_TYPE_NICQ
| /* type */
616 (RT_IDX_MCAST_MATCH_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
619 case RT_IDX_RSS_MATCH
: /* Pass up matched RSS frames. */
621 value
= RT_IDX_DST_RSS
| /* dest */
622 RT_IDX_TYPE_NICQ
| /* type */
623 (RT_IDX_RSS_MATCH_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
626 case 0: /* Clear the E-bit on an entry. */
628 value
= RT_IDX_DST_DFLT_Q
| /* dest */
629 RT_IDX_TYPE_NICQ
| /* type */
630 (index
<< RT_IDX_IDX_SHIFT
);/* index */
634 netif_err(qdev
, ifup
, qdev
->ndev
,
635 "Mask type %d not yet supported.\n", mask
);
641 status
= ql_wait_reg_rdy(qdev
, RT_IDX
, RT_IDX_MW
, 0);
644 value
|= (enable
? RT_IDX_E
: 0);
645 ql_write32(qdev
, RT_IDX
, value
);
646 ql_write32(qdev
, RT_DATA
, enable
? mask
: 0);
652 static void ql_enable_interrupts(struct ql_adapter
*qdev
)
654 ql_write32(qdev
, INTR_EN
, (INTR_EN_EI
<< 16) | INTR_EN_EI
);
657 static void ql_disable_interrupts(struct ql_adapter
*qdev
)
659 ql_write32(qdev
, INTR_EN
, (INTR_EN_EI
<< 16));
662 /* If we're running with multiple MSI-X vectors then we enable on the fly.
663 * Otherwise, we may have multiple outstanding workers and don't want to
664 * enable until the last one finishes. In this case, the irq_cnt gets
665 * incremented every time we queue a worker and decremented every time
666 * a worker finishes. Once it hits zero we enable the interrupt.
668 u32
ql_enable_completion_interrupt(struct ql_adapter
*qdev
, u32 intr
)
671 unsigned long hw_flags
= 0;
672 struct intr_context
*ctx
= qdev
->intr_context
+ intr
;
674 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
) && intr
)) {
675 /* Always enable if we're MSIX multi interrupts and
676 * it's not the default (zeroeth) interrupt.
678 ql_write32(qdev
, INTR_EN
,
680 var
= ql_read32(qdev
, STS
);
684 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
685 if (atomic_dec_and_test(&ctx
->irq_cnt
)) {
686 ql_write32(qdev
, INTR_EN
,
688 var
= ql_read32(qdev
, STS
);
690 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
694 static u32
ql_disable_completion_interrupt(struct ql_adapter
*qdev
, u32 intr
)
697 struct intr_context
*ctx
;
699 /* HW disables for us if we're MSIX multi interrupts and
700 * it's not the default (zeroeth) interrupt.
702 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
) && intr
))
705 ctx
= qdev
->intr_context
+ intr
;
706 spin_lock(&qdev
->hw_lock
);
707 if (!atomic_read(&ctx
->irq_cnt
)) {
708 ql_write32(qdev
, INTR_EN
,
710 var
= ql_read32(qdev
, STS
);
712 atomic_inc(&ctx
->irq_cnt
);
713 spin_unlock(&qdev
->hw_lock
);
717 static void ql_enable_all_completion_interrupts(struct ql_adapter
*qdev
)
720 for (i
= 0; i
< qdev
->intr_count
; i
++) {
721 /* The enable call does a atomic_dec_and_test
722 * and enables only if the result is zero.
723 * So we precharge it here.
725 if (unlikely(!test_bit(QL_MSIX_ENABLED
, &qdev
->flags
) ||
727 atomic_set(&qdev
->intr_context
[i
].irq_cnt
, 1);
728 ql_enable_completion_interrupt(qdev
, i
);
733 static int ql_validate_flash(struct ql_adapter
*qdev
, u32 size
, const char *str
)
737 __le16
*flash
= (__le16
*)&qdev
->flash
;
739 status
= strncmp((char *)&qdev
->flash
, str
, 4);
741 netif_err(qdev
, ifup
, qdev
->ndev
, "Invalid flash signature.\n");
745 for (i
= 0; i
< size
; i
++)
746 csum
+= le16_to_cpu(*flash
++);
749 netif_err(qdev
, ifup
, qdev
->ndev
,
750 "Invalid flash checksum, csum = 0x%.04x.\n", csum
);
755 static int ql_read_flash_word(struct ql_adapter
*qdev
, int offset
, __le32
*data
)
758 /* wait for reg to come ready */
759 status
= ql_wait_reg_rdy(qdev
,
760 FLASH_ADDR
, FLASH_ADDR_RDY
, FLASH_ADDR_ERR
);
763 /* set up for reg read */
764 ql_write32(qdev
, FLASH_ADDR
, FLASH_ADDR_R
| offset
);
765 /* wait for reg to come ready */
766 status
= ql_wait_reg_rdy(qdev
,
767 FLASH_ADDR
, FLASH_ADDR_RDY
, FLASH_ADDR_ERR
);
770 /* This data is stored on flash as an array of
771 * __le32. Since ql_read32() returns cpu endian
772 * we need to swap it back.
774 *data
= cpu_to_le32(ql_read32(qdev
, FLASH_DATA
));
779 static int ql_get_8000_flash_params(struct ql_adapter
*qdev
)
783 __le32
*p
= (__le32
*)&qdev
->flash
;
787 /* Get flash offset for function and adjust
791 offset
= FUNC0_FLASH_OFFSET
/ sizeof(u32
);
793 offset
= FUNC1_FLASH_OFFSET
/ sizeof(u32
);
795 if (ql_sem_spinlock(qdev
, SEM_FLASH_MASK
))
798 size
= sizeof(struct flash_params_8000
) / sizeof(u32
);
799 for (i
= 0; i
< size
; i
++, p
++) {
800 status
= ql_read_flash_word(qdev
, i
+offset
, p
);
802 netif_err(qdev
, ifup
, qdev
->ndev
,
803 "Error reading flash.\n");
808 status
= ql_validate_flash(qdev
,
809 sizeof(struct flash_params_8000
) / sizeof(u16
),
812 netif_err(qdev
, ifup
, qdev
->ndev
, "Invalid flash.\n");
817 /* Extract either manufacturer or BOFM modified
820 if (qdev
->flash
.flash_params_8000
.data_type1
== 2)
822 qdev
->flash
.flash_params_8000
.mac_addr1
,
823 qdev
->ndev
->addr_len
);
826 qdev
->flash
.flash_params_8000
.mac_addr
,
827 qdev
->ndev
->addr_len
);
829 if (!is_valid_ether_addr(mac_addr
)) {
830 netif_err(qdev
, ifup
, qdev
->ndev
, "Invalid MAC address.\n");
835 memcpy(qdev
->ndev
->dev_addr
,
837 qdev
->ndev
->addr_len
);
840 ql_sem_unlock(qdev
, SEM_FLASH_MASK
);
844 static int ql_get_8012_flash_params(struct ql_adapter
*qdev
)
848 __le32
*p
= (__le32
*)&qdev
->flash
;
850 u32 size
= sizeof(struct flash_params_8012
) / sizeof(u32
);
852 /* Second function's parameters follow the first
858 if (ql_sem_spinlock(qdev
, SEM_FLASH_MASK
))
861 for (i
= 0; i
< size
; i
++, p
++) {
862 status
= ql_read_flash_word(qdev
, i
+offset
, p
);
864 netif_err(qdev
, ifup
, qdev
->ndev
,
865 "Error reading flash.\n");
871 status
= ql_validate_flash(qdev
,
872 sizeof(struct flash_params_8012
) / sizeof(u16
),
875 netif_err(qdev
, ifup
, qdev
->ndev
, "Invalid flash.\n");
880 if (!is_valid_ether_addr(qdev
->flash
.flash_params_8012
.mac_addr
)) {
885 memcpy(qdev
->ndev
->dev_addr
,
886 qdev
->flash
.flash_params_8012
.mac_addr
,
887 qdev
->ndev
->addr_len
);
890 ql_sem_unlock(qdev
, SEM_FLASH_MASK
);
894 /* xgmac register are located behind the xgmac_addr and xgmac_data
895 * register pair. Each read/write requires us to wait for the ready
896 * bit before reading/writing the data.
898 static int ql_write_xgmac_reg(struct ql_adapter
*qdev
, u32 reg
, u32 data
)
901 /* wait for reg to come ready */
902 status
= ql_wait_reg_rdy(qdev
,
903 XGMAC_ADDR
, XGMAC_ADDR_RDY
, XGMAC_ADDR_XME
);
906 /* write the data to the data reg */
907 ql_write32(qdev
, XGMAC_DATA
, data
);
908 /* trigger the write */
909 ql_write32(qdev
, XGMAC_ADDR
, reg
);
913 /* xgmac register are located behind the xgmac_addr and xgmac_data
914 * register pair. Each read/write requires us to wait for the ready
915 * bit before reading/writing the data.
917 int ql_read_xgmac_reg(struct ql_adapter
*qdev
, u32 reg
, u32
*data
)
920 /* wait for reg to come ready */
921 status
= ql_wait_reg_rdy(qdev
,
922 XGMAC_ADDR
, XGMAC_ADDR_RDY
, XGMAC_ADDR_XME
);
925 /* set up for reg read */
926 ql_write32(qdev
, XGMAC_ADDR
, reg
| XGMAC_ADDR_R
);
927 /* wait for reg to come ready */
928 status
= ql_wait_reg_rdy(qdev
,
929 XGMAC_ADDR
, XGMAC_ADDR_RDY
, XGMAC_ADDR_XME
);
933 *data
= ql_read32(qdev
, XGMAC_DATA
);
938 /* This is used for reading the 64-bit statistics regs. */
939 int ql_read_xgmac_reg64(struct ql_adapter
*qdev
, u32 reg
, u64
*data
)
945 status
= ql_read_xgmac_reg(qdev
, reg
, &lo
);
949 status
= ql_read_xgmac_reg(qdev
, reg
+ 4, &hi
);
953 *data
= (u64
) lo
| ((u64
) hi
<< 32);
959 static int ql_8000_port_initialize(struct ql_adapter
*qdev
)
963 * Get MPI firmware version for driver banner
966 status
= ql_mb_about_fw(qdev
);
969 status
= ql_mb_get_fw_state(qdev
);
972 /* Wake up a worker to get/set the TX/RX frame sizes. */
973 queue_delayed_work(qdev
->workqueue
, &qdev
->mpi_port_cfg_work
, 0);
978 /* Take the MAC Core out of reset.
979 * Enable statistics counting.
980 * Take the transmitter/receiver out of reset.
981 * This functionality may be done in the MPI firmware at a
984 static int ql_8012_port_initialize(struct ql_adapter
*qdev
)
989 if (ql_sem_trylock(qdev
, qdev
->xg_sem_mask
)) {
990 /* Another function has the semaphore, so
991 * wait for the port init bit to come ready.
993 netif_info(qdev
, link
, qdev
->ndev
,
994 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
995 status
= ql_wait_reg_rdy(qdev
, STS
, qdev
->port_init
, 0);
997 netif_crit(qdev
, link
, qdev
->ndev
,
998 "Port initialize timed out.\n");
1003 netif_info(qdev
, link
, qdev
->ndev
, "Got xgmac semaphore!.\n");
1004 /* Set the core reset. */
1005 status
= ql_read_xgmac_reg(qdev
, GLOBAL_CFG
, &data
);
1008 data
|= GLOBAL_CFG_RESET
;
1009 status
= ql_write_xgmac_reg(qdev
, GLOBAL_CFG
, data
);
1013 /* Clear the core reset and turn on jumbo for receiver. */
1014 data
&= ~GLOBAL_CFG_RESET
; /* Clear core reset. */
1015 data
|= GLOBAL_CFG_JUMBO
; /* Turn on jumbo. */
1016 data
|= GLOBAL_CFG_TX_STAT_EN
;
1017 data
|= GLOBAL_CFG_RX_STAT_EN
;
1018 status
= ql_write_xgmac_reg(qdev
, GLOBAL_CFG
, data
);
1022 /* Enable transmitter, and clear it's reset. */
1023 status
= ql_read_xgmac_reg(qdev
, TX_CFG
, &data
);
1026 data
&= ~TX_CFG_RESET
; /* Clear the TX MAC reset. */
1027 data
|= TX_CFG_EN
; /* Enable the transmitter. */
1028 status
= ql_write_xgmac_reg(qdev
, TX_CFG
, data
);
1032 /* Enable receiver and clear it's reset. */
1033 status
= ql_read_xgmac_reg(qdev
, RX_CFG
, &data
);
1036 data
&= ~RX_CFG_RESET
; /* Clear the RX MAC reset. */
1037 data
|= RX_CFG_EN
; /* Enable the receiver. */
1038 status
= ql_write_xgmac_reg(qdev
, RX_CFG
, data
);
1042 /* Turn on jumbo. */
1044 ql_write_xgmac_reg(qdev
, MAC_TX_PARAMS
, MAC_TX_PARAMS_JUMBO
| (0x2580 << 16));
1048 ql_write_xgmac_reg(qdev
, MAC_RX_PARAMS
, 0x2580);
1052 /* Signal to the world that the port is enabled. */
1053 ql_write32(qdev
, STS
, ((qdev
->port_init
<< 16) | qdev
->port_init
));
1055 ql_sem_unlock(qdev
, qdev
->xg_sem_mask
);
1059 static inline unsigned int ql_lbq_block_size(struct ql_adapter
*qdev
)
1061 return PAGE_SIZE
<< qdev
->lbq_buf_order
;
1064 /* Get the next large buffer. */
1065 static struct bq_desc
*ql_get_curr_lbuf(struct rx_ring
*rx_ring
)
1067 struct bq_desc
*lbq_desc
= &rx_ring
->lbq
[rx_ring
->lbq_curr_idx
];
1068 rx_ring
->lbq_curr_idx
++;
1069 if (rx_ring
->lbq_curr_idx
== rx_ring
->lbq_len
)
1070 rx_ring
->lbq_curr_idx
= 0;
1071 rx_ring
->lbq_free_cnt
++;
1075 static struct bq_desc
*ql_get_curr_lchunk(struct ql_adapter
*qdev
,
1076 struct rx_ring
*rx_ring
)
1078 struct bq_desc
*lbq_desc
= ql_get_curr_lbuf(rx_ring
);
1080 pci_dma_sync_single_for_cpu(qdev
->pdev
,
1081 dma_unmap_addr(lbq_desc
, mapaddr
),
1082 rx_ring
->lbq_buf_size
,
1083 PCI_DMA_FROMDEVICE
);
1085 /* If it's the last chunk of our master page then
1088 if ((lbq_desc
->p
.pg_chunk
.offset
+ rx_ring
->lbq_buf_size
)
1089 == ql_lbq_block_size(qdev
))
1090 pci_unmap_page(qdev
->pdev
,
1091 lbq_desc
->p
.pg_chunk
.map
,
1092 ql_lbq_block_size(qdev
),
1093 PCI_DMA_FROMDEVICE
);
1097 /* Get the next small buffer. */
1098 static struct bq_desc
*ql_get_curr_sbuf(struct rx_ring
*rx_ring
)
1100 struct bq_desc
*sbq_desc
= &rx_ring
->sbq
[rx_ring
->sbq_curr_idx
];
1101 rx_ring
->sbq_curr_idx
++;
1102 if (rx_ring
->sbq_curr_idx
== rx_ring
->sbq_len
)
1103 rx_ring
->sbq_curr_idx
= 0;
1104 rx_ring
->sbq_free_cnt
++;
1108 /* Update an rx ring index. */
1109 static void ql_update_cq(struct rx_ring
*rx_ring
)
1111 rx_ring
->cnsmr_idx
++;
1112 rx_ring
->curr_entry
++;
1113 if (unlikely(rx_ring
->cnsmr_idx
== rx_ring
->cq_len
)) {
1114 rx_ring
->cnsmr_idx
= 0;
1115 rx_ring
->curr_entry
= rx_ring
->cq_base
;
1119 static void ql_write_cq_idx(struct rx_ring
*rx_ring
)
1121 ql_write_db_reg(rx_ring
->cnsmr_idx
, rx_ring
->cnsmr_idx_db_reg
);
1124 static int ql_get_next_chunk(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
,
1125 struct bq_desc
*lbq_desc
)
1127 if (!rx_ring
->pg_chunk
.page
) {
1129 rx_ring
->pg_chunk
.page
= alloc_pages(__GFP_COLD
| __GFP_COMP
|
1131 qdev
->lbq_buf_order
);
1132 if (unlikely(!rx_ring
->pg_chunk
.page
)) {
1133 netif_err(qdev
, drv
, qdev
->ndev
,
1134 "page allocation failed.\n");
1137 rx_ring
->pg_chunk
.offset
= 0;
1138 map
= pci_map_page(qdev
->pdev
, rx_ring
->pg_chunk
.page
,
1139 0, ql_lbq_block_size(qdev
),
1140 PCI_DMA_FROMDEVICE
);
1141 if (pci_dma_mapping_error(qdev
->pdev
, map
)) {
1142 __free_pages(rx_ring
->pg_chunk
.page
,
1143 qdev
->lbq_buf_order
);
1144 netif_err(qdev
, drv
, qdev
->ndev
,
1145 "PCI mapping failed.\n");
1148 rx_ring
->pg_chunk
.map
= map
;
1149 rx_ring
->pg_chunk
.va
= page_address(rx_ring
->pg_chunk
.page
);
1152 /* Copy the current master pg_chunk info
1153 * to the current descriptor.
1155 lbq_desc
->p
.pg_chunk
= rx_ring
->pg_chunk
;
1157 /* Adjust the master page chunk for next
1160 rx_ring
->pg_chunk
.offset
+= rx_ring
->lbq_buf_size
;
1161 if (rx_ring
->pg_chunk
.offset
== ql_lbq_block_size(qdev
)) {
1162 rx_ring
->pg_chunk
.page
= NULL
;
1163 lbq_desc
->p
.pg_chunk
.last_flag
= 1;
1165 rx_ring
->pg_chunk
.va
+= rx_ring
->lbq_buf_size
;
1166 get_page(rx_ring
->pg_chunk
.page
);
1167 lbq_desc
->p
.pg_chunk
.last_flag
= 0;
1171 /* Process (refill) a large buffer queue. */
1172 static void ql_update_lbq(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
1174 u32 clean_idx
= rx_ring
->lbq_clean_idx
;
1175 u32 start_idx
= clean_idx
;
1176 struct bq_desc
*lbq_desc
;
1180 while (rx_ring
->lbq_free_cnt
> 32) {
1181 for (i
= 0; i
< 16; i
++) {
1182 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1183 "lbq: try cleaning clean_idx = %d.\n",
1185 lbq_desc
= &rx_ring
->lbq
[clean_idx
];
1186 if (ql_get_next_chunk(qdev
, rx_ring
, lbq_desc
)) {
1187 netif_err(qdev
, ifup
, qdev
->ndev
,
1188 "Could not get a page chunk.\n");
1192 map
= lbq_desc
->p
.pg_chunk
.map
+
1193 lbq_desc
->p
.pg_chunk
.offset
;
1194 dma_unmap_addr_set(lbq_desc
, mapaddr
, map
);
1195 dma_unmap_len_set(lbq_desc
, maplen
,
1196 rx_ring
->lbq_buf_size
);
1197 *lbq_desc
->addr
= cpu_to_le64(map
);
1199 pci_dma_sync_single_for_device(qdev
->pdev
, map
,
1200 rx_ring
->lbq_buf_size
,
1201 PCI_DMA_FROMDEVICE
);
1203 if (clean_idx
== rx_ring
->lbq_len
)
1207 rx_ring
->lbq_clean_idx
= clean_idx
;
1208 rx_ring
->lbq_prod_idx
+= 16;
1209 if (rx_ring
->lbq_prod_idx
== rx_ring
->lbq_len
)
1210 rx_ring
->lbq_prod_idx
= 0;
1211 rx_ring
->lbq_free_cnt
-= 16;
1214 if (start_idx
!= clean_idx
) {
1215 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1216 "lbq: updating prod idx = %d.\n",
1217 rx_ring
->lbq_prod_idx
);
1218 ql_write_db_reg(rx_ring
->lbq_prod_idx
,
1219 rx_ring
->lbq_prod_idx_db_reg
);
1223 /* Process (refill) a small buffer queue. */
1224 static void ql_update_sbq(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
1226 u32 clean_idx
= rx_ring
->sbq_clean_idx
;
1227 u32 start_idx
= clean_idx
;
1228 struct bq_desc
*sbq_desc
;
1232 while (rx_ring
->sbq_free_cnt
> 16) {
1233 for (i
= 0; i
< 16; i
++) {
1234 sbq_desc
= &rx_ring
->sbq
[clean_idx
];
1235 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1236 "sbq: try cleaning clean_idx = %d.\n",
1238 if (sbq_desc
->p
.skb
== NULL
) {
1239 netif_printk(qdev
, rx_status
, KERN_DEBUG
,
1241 "sbq: getting new skb for index %d.\n",
1244 netdev_alloc_skb(qdev
->ndev
,
1246 if (sbq_desc
->p
.skb
== NULL
) {
1247 netif_err(qdev
, probe
, qdev
->ndev
,
1248 "Couldn't get an skb.\n");
1249 rx_ring
->sbq_clean_idx
= clean_idx
;
1252 skb_reserve(sbq_desc
->p
.skb
, QLGE_SB_PAD
);
1253 map
= pci_map_single(qdev
->pdev
,
1254 sbq_desc
->p
.skb
->data
,
1255 rx_ring
->sbq_buf_size
,
1256 PCI_DMA_FROMDEVICE
);
1257 if (pci_dma_mapping_error(qdev
->pdev
, map
)) {
1258 netif_err(qdev
, ifup
, qdev
->ndev
,
1259 "PCI mapping failed.\n");
1260 rx_ring
->sbq_clean_idx
= clean_idx
;
1261 dev_kfree_skb_any(sbq_desc
->p
.skb
);
1262 sbq_desc
->p
.skb
= NULL
;
1265 dma_unmap_addr_set(sbq_desc
, mapaddr
, map
);
1266 dma_unmap_len_set(sbq_desc
, maplen
,
1267 rx_ring
->sbq_buf_size
);
1268 *sbq_desc
->addr
= cpu_to_le64(map
);
1272 if (clean_idx
== rx_ring
->sbq_len
)
1275 rx_ring
->sbq_clean_idx
= clean_idx
;
1276 rx_ring
->sbq_prod_idx
+= 16;
1277 if (rx_ring
->sbq_prod_idx
== rx_ring
->sbq_len
)
1278 rx_ring
->sbq_prod_idx
= 0;
1279 rx_ring
->sbq_free_cnt
-= 16;
1282 if (start_idx
!= clean_idx
) {
1283 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1284 "sbq: updating prod idx = %d.\n",
1285 rx_ring
->sbq_prod_idx
);
1286 ql_write_db_reg(rx_ring
->sbq_prod_idx
,
1287 rx_ring
->sbq_prod_idx_db_reg
);
1291 static void ql_update_buffer_queues(struct ql_adapter
*qdev
,
1292 struct rx_ring
*rx_ring
)
1294 ql_update_sbq(qdev
, rx_ring
);
1295 ql_update_lbq(qdev
, rx_ring
);
1298 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1299 * fails at some stage, or from the interrupt when a tx completes.
1301 static void ql_unmap_send(struct ql_adapter
*qdev
,
1302 struct tx_ring_desc
*tx_ring_desc
, int mapped
)
1305 for (i
= 0; i
< mapped
; i
++) {
1306 if (i
== 0 || (i
== 7 && mapped
> 7)) {
1308 * Unmap the skb->data area, or the
1309 * external sglist (AKA the Outbound
1310 * Address List (OAL)).
1311 * If its the zeroeth element, then it's
1312 * the skb->data area. If it's the 7th
1313 * element and there is more than 6 frags,
1317 netif_printk(qdev
, tx_done
, KERN_DEBUG
,
1319 "unmapping OAL area.\n");
1321 pci_unmap_single(qdev
->pdev
,
1322 dma_unmap_addr(&tx_ring_desc
->map
[i
],
1324 dma_unmap_len(&tx_ring_desc
->map
[i
],
1328 netif_printk(qdev
, tx_done
, KERN_DEBUG
, qdev
->ndev
,
1329 "unmapping frag %d.\n", i
);
1330 pci_unmap_page(qdev
->pdev
,
1331 dma_unmap_addr(&tx_ring_desc
->map
[i
],
1333 dma_unmap_len(&tx_ring_desc
->map
[i
],
1334 maplen
), PCI_DMA_TODEVICE
);
1340 /* Map the buffers for this transmit. This will return
1341 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1343 static int ql_map_send(struct ql_adapter
*qdev
,
1344 struct ob_mac_iocb_req
*mac_iocb_ptr
,
1345 struct sk_buff
*skb
, struct tx_ring_desc
*tx_ring_desc
)
1347 int len
= skb_headlen(skb
);
1349 int frag_idx
, err
, map_idx
= 0;
1350 struct tx_buf_desc
*tbd
= mac_iocb_ptr
->tbd
;
1351 int frag_cnt
= skb_shinfo(skb
)->nr_frags
;
1354 netif_printk(qdev
, tx_queued
, KERN_DEBUG
, qdev
->ndev
,
1355 "frag_cnt = %d.\n", frag_cnt
);
1358 * Map the skb buffer first.
1360 map
= pci_map_single(qdev
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
1362 err
= pci_dma_mapping_error(qdev
->pdev
, map
);
1364 netif_err(qdev
, tx_queued
, qdev
->ndev
,
1365 "PCI mapping failed with error: %d\n", err
);
1367 return NETDEV_TX_BUSY
;
1370 tbd
->len
= cpu_to_le32(len
);
1371 tbd
->addr
= cpu_to_le64(map
);
1372 dma_unmap_addr_set(&tx_ring_desc
->map
[map_idx
], mapaddr
, map
);
1373 dma_unmap_len_set(&tx_ring_desc
->map
[map_idx
], maplen
, len
);
1377 * This loop fills the remainder of the 8 address descriptors
1378 * in the IOCB. If there are more than 7 fragments, then the
1379 * eighth address desc will point to an external list (OAL).
1380 * When this happens, the remainder of the frags will be stored
1383 for (frag_idx
= 0; frag_idx
< frag_cnt
; frag_idx
++, map_idx
++) {
1384 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[frag_idx
];
1386 if (frag_idx
== 6 && frag_cnt
> 7) {
1387 /* Let's tack on an sglist.
1388 * Our control block will now
1390 * iocb->seg[0] = skb->data
1391 * iocb->seg[1] = frag[0]
1392 * iocb->seg[2] = frag[1]
1393 * iocb->seg[3] = frag[2]
1394 * iocb->seg[4] = frag[3]
1395 * iocb->seg[5] = frag[4]
1396 * iocb->seg[6] = frag[5]
1397 * iocb->seg[7] = ptr to OAL (external sglist)
1398 * oal->seg[0] = frag[6]
1399 * oal->seg[1] = frag[7]
1400 * oal->seg[2] = frag[8]
1401 * oal->seg[3] = frag[9]
1402 * oal->seg[4] = frag[10]
1405 /* Tack on the OAL in the eighth segment of IOCB. */
1406 map
= pci_map_single(qdev
->pdev
, &tx_ring_desc
->oal
,
1409 err
= pci_dma_mapping_error(qdev
->pdev
, map
);
1411 netif_err(qdev
, tx_queued
, qdev
->ndev
,
1412 "PCI mapping outbound address list with error: %d\n",
1417 tbd
->addr
= cpu_to_le64(map
);
1419 * The length is the number of fragments
1420 * that remain to be mapped times the length
1421 * of our sglist (OAL).
1424 cpu_to_le32((sizeof(struct tx_buf_desc
) *
1425 (frag_cnt
- frag_idx
)) | TX_DESC_C
);
1426 dma_unmap_addr_set(&tx_ring_desc
->map
[map_idx
], mapaddr
,
1428 dma_unmap_len_set(&tx_ring_desc
->map
[map_idx
], maplen
,
1429 sizeof(struct oal
));
1430 tbd
= (struct tx_buf_desc
*)&tx_ring_desc
->oal
;
1435 pci_map_page(qdev
->pdev
, frag
->page
,
1436 frag
->page_offset
, frag
->size
,
1439 err
= pci_dma_mapping_error(qdev
->pdev
, map
);
1441 netif_err(qdev
, tx_queued
, qdev
->ndev
,
1442 "PCI mapping frags failed with error: %d.\n",
1447 tbd
->addr
= cpu_to_le64(map
);
1448 tbd
->len
= cpu_to_le32(frag
->size
);
1449 dma_unmap_addr_set(&tx_ring_desc
->map
[map_idx
], mapaddr
, map
);
1450 dma_unmap_len_set(&tx_ring_desc
->map
[map_idx
], maplen
,
1454 /* Save the number of segments we've mapped. */
1455 tx_ring_desc
->map_cnt
= map_idx
;
1456 /* Terminate the last segment. */
1457 tbd
->len
= cpu_to_le32(le32_to_cpu(tbd
->len
) | TX_DESC_E
);
1458 return NETDEV_TX_OK
;
1462 * If the first frag mapping failed, then i will be zero.
1463 * This causes the unmap of the skb->data area. Otherwise
1464 * we pass in the number of frags that mapped successfully
1465 * so they can be umapped.
1467 ql_unmap_send(qdev
, tx_ring_desc
, map_idx
);
1468 return NETDEV_TX_BUSY
;
1471 /* Process an inbound completion from an rx ring. */
1472 static void ql_process_mac_rx_gro_page(struct ql_adapter
*qdev
,
1473 struct rx_ring
*rx_ring
,
1474 struct ib_mac_iocb_rsp
*ib_mac_rsp
,
1478 struct sk_buff
*skb
;
1479 struct bq_desc
*lbq_desc
= ql_get_curr_lchunk(qdev
, rx_ring
);
1480 struct skb_frag_struct
*rx_frag
;
1482 struct napi_struct
*napi
= &rx_ring
->napi
;
1484 napi
->dev
= qdev
->ndev
;
1486 skb
= napi_get_frags(napi
);
1488 netif_err(qdev
, drv
, qdev
->ndev
,
1489 "Couldn't get an skb, exiting.\n");
1490 rx_ring
->rx_dropped
++;
1491 put_page(lbq_desc
->p
.pg_chunk
.page
);
1494 prefetch(lbq_desc
->p
.pg_chunk
.va
);
1495 rx_frag
= skb_shinfo(skb
)->frags
;
1496 nr_frags
= skb_shinfo(skb
)->nr_frags
;
1497 rx_frag
+= nr_frags
;
1498 rx_frag
->page
= lbq_desc
->p
.pg_chunk
.page
;
1499 rx_frag
->page_offset
= lbq_desc
->p
.pg_chunk
.offset
;
1500 rx_frag
->size
= length
;
1503 skb
->data_len
+= length
;
1504 skb
->truesize
+= length
;
1505 skb_shinfo(skb
)->nr_frags
++;
1507 rx_ring
->rx_packets
++;
1508 rx_ring
->rx_bytes
+= length
;
1509 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1510 skb_record_rx_queue(skb
, rx_ring
->cq_id
);
1511 if (vlan_id
!= 0xffff)
1512 __vlan_hwaccel_put_tag(skb
, vlan_id
);
1513 napi_gro_frags(napi
);
1516 /* Process an inbound completion from an rx ring. */
1517 static void ql_process_mac_rx_page(struct ql_adapter
*qdev
,
1518 struct rx_ring
*rx_ring
,
1519 struct ib_mac_iocb_rsp
*ib_mac_rsp
,
1523 struct net_device
*ndev
= qdev
->ndev
;
1524 struct sk_buff
*skb
= NULL
;
1526 struct bq_desc
*lbq_desc
= ql_get_curr_lchunk(qdev
, rx_ring
);
1527 struct napi_struct
*napi
= &rx_ring
->napi
;
1529 skb
= netdev_alloc_skb(ndev
, length
);
1531 netif_err(qdev
, drv
, qdev
->ndev
,
1532 "Couldn't get an skb, need to unwind!.\n");
1533 rx_ring
->rx_dropped
++;
1534 put_page(lbq_desc
->p
.pg_chunk
.page
);
1538 addr
= lbq_desc
->p
.pg_chunk
.va
;
1542 /* Frame error, so drop the packet. */
1543 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_ERR_MASK
) {
1544 netif_info(qdev
, drv
, qdev
->ndev
,
1545 "Receive error, flags2 = 0x%x\n", ib_mac_rsp
->flags2
);
1546 rx_ring
->rx_errors
++;
1550 /* The max framesize filter on this chip is set higher than
1551 * MTU since FCoE uses 2k frames.
1553 if (skb
->len
> ndev
->mtu
+ ETH_HLEN
) {
1554 netif_err(qdev
, drv
, qdev
->ndev
,
1555 "Segment too small, dropping.\n");
1556 rx_ring
->rx_dropped
++;
1559 memcpy(skb_put(skb
, ETH_HLEN
), addr
, ETH_HLEN
);
1560 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1561 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1563 skb_fill_page_desc(skb
, 0, lbq_desc
->p
.pg_chunk
.page
,
1564 lbq_desc
->p
.pg_chunk
.offset
+ETH_HLEN
,
1566 skb
->len
+= length
-ETH_HLEN
;
1567 skb
->data_len
+= length
-ETH_HLEN
;
1568 skb
->truesize
+= length
-ETH_HLEN
;
1570 rx_ring
->rx_packets
++;
1571 rx_ring
->rx_bytes
+= skb
->len
;
1572 skb
->protocol
= eth_type_trans(skb
, ndev
);
1573 skb_checksum_none_assert(skb
);
1575 if ((ndev
->features
& NETIF_F_RXCSUM
) &&
1576 !(ib_mac_rsp
->flags1
& IB_MAC_CSUM_ERR_MASK
)) {
1578 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_T
) {
1579 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1580 "TCP checksum done!\n");
1581 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1582 } else if ((ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_U
) &&
1583 (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_V4
)) {
1584 /* Unfragmented ipv4 UDP frame. */
1585 struct iphdr
*iph
= (struct iphdr
*) skb
->data
;
1586 if (!(iph
->frag_off
&
1587 cpu_to_be16(IP_MF
|IP_OFFSET
))) {
1588 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1589 netif_printk(qdev
, rx_status
, KERN_DEBUG
,
1591 "TCP checksum done!\n");
1596 skb_record_rx_queue(skb
, rx_ring
->cq_id
);
1597 if (vlan_id
!= 0xffff)
1598 __vlan_hwaccel_put_tag(skb
, vlan_id
);
1599 if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
)
1600 napi_gro_receive(napi
, skb
);
1602 netif_receive_skb(skb
);
1605 dev_kfree_skb_any(skb
);
1606 put_page(lbq_desc
->p
.pg_chunk
.page
);
1609 /* Process an inbound completion from an rx ring. */
1610 static void ql_process_mac_rx_skb(struct ql_adapter
*qdev
,
1611 struct rx_ring
*rx_ring
,
1612 struct ib_mac_iocb_rsp
*ib_mac_rsp
,
1616 struct net_device
*ndev
= qdev
->ndev
;
1617 struct sk_buff
*skb
= NULL
;
1618 struct sk_buff
*new_skb
= NULL
;
1619 struct bq_desc
*sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1621 skb
= sbq_desc
->p
.skb
;
1622 /* Allocate new_skb and copy */
1623 new_skb
= netdev_alloc_skb(qdev
->ndev
, length
+ NET_IP_ALIGN
);
1624 if (new_skb
== NULL
) {
1625 netif_err(qdev
, probe
, qdev
->ndev
,
1626 "No skb available, drop the packet.\n");
1627 rx_ring
->rx_dropped
++;
1630 skb_reserve(new_skb
, NET_IP_ALIGN
);
1631 memcpy(skb_put(new_skb
, length
), skb
->data
, length
);
1634 /* Frame error, so drop the packet. */
1635 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_ERR_MASK
) {
1636 netif_info(qdev
, drv
, qdev
->ndev
,
1637 "Receive error, flags2 = 0x%x\n", ib_mac_rsp
->flags2
);
1638 dev_kfree_skb_any(skb
);
1639 rx_ring
->rx_errors
++;
1643 /* loopback self test for ethtool */
1644 if (test_bit(QL_SELFTEST
, &qdev
->flags
)) {
1645 ql_check_lb_frame(qdev
, skb
);
1646 dev_kfree_skb_any(skb
);
1650 /* The max framesize filter on this chip is set higher than
1651 * MTU since FCoE uses 2k frames.
1653 if (skb
->len
> ndev
->mtu
+ ETH_HLEN
) {
1654 dev_kfree_skb_any(skb
);
1655 rx_ring
->rx_dropped
++;
1659 prefetch(skb
->data
);
1661 if (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) {
1662 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1664 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1665 IB_MAC_IOCB_RSP_M_HASH
? "Hash" :
1666 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1667 IB_MAC_IOCB_RSP_M_REG
? "Registered" :
1668 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1669 IB_MAC_IOCB_RSP_M_PROM
? "Promiscuous" : "");
1671 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_P
)
1672 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1673 "Promiscuous Packet.\n");
1675 rx_ring
->rx_packets
++;
1676 rx_ring
->rx_bytes
+= skb
->len
;
1677 skb
->protocol
= eth_type_trans(skb
, ndev
);
1678 skb_checksum_none_assert(skb
);
1680 /* If rx checksum is on, and there are no
1681 * csum or frame errors.
1683 if ((ndev
->features
& NETIF_F_RXCSUM
) &&
1684 !(ib_mac_rsp
->flags1
& IB_MAC_CSUM_ERR_MASK
)) {
1686 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_T
) {
1687 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1688 "TCP checksum done!\n");
1689 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1690 } else if ((ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_U
) &&
1691 (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_V4
)) {
1692 /* Unfragmented ipv4 UDP frame. */
1693 struct iphdr
*iph
= (struct iphdr
*) skb
->data
;
1694 if (!(iph
->frag_off
&
1695 ntohs(IP_MF
|IP_OFFSET
))) {
1696 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1697 netif_printk(qdev
, rx_status
, KERN_DEBUG
,
1699 "TCP checksum done!\n");
1704 skb_record_rx_queue(skb
, rx_ring
->cq_id
);
1705 if (vlan_id
!= 0xffff)
1706 __vlan_hwaccel_put_tag(skb
, vlan_id
);
1707 if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
)
1708 napi_gro_receive(&rx_ring
->napi
, skb
);
1710 netif_receive_skb(skb
);
1713 static void ql_realign_skb(struct sk_buff
*skb
, int len
)
1715 void *temp_addr
= skb
->data
;
1717 /* Undo the skb_reserve(skb,32) we did before
1718 * giving to hardware, and realign data on
1719 * a 2-byte boundary.
1721 skb
->data
-= QLGE_SB_PAD
- NET_IP_ALIGN
;
1722 skb
->tail
-= QLGE_SB_PAD
- NET_IP_ALIGN
;
1723 skb_copy_to_linear_data(skb
, temp_addr
,
1728 * This function builds an skb for the given inbound
1729 * completion. It will be rewritten for readability in the near
1730 * future, but for not it works well.
1732 static struct sk_buff
*ql_build_rx_skb(struct ql_adapter
*qdev
,
1733 struct rx_ring
*rx_ring
,
1734 struct ib_mac_iocb_rsp
*ib_mac_rsp
)
1736 struct bq_desc
*lbq_desc
;
1737 struct bq_desc
*sbq_desc
;
1738 struct sk_buff
*skb
= NULL
;
1739 u32 length
= le32_to_cpu(ib_mac_rsp
->data_len
);
1740 u32 hdr_len
= le32_to_cpu(ib_mac_rsp
->hdr_len
);
1743 * Handle the header buffer if present.
1745 if (ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HV
&&
1746 ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
) {
1747 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1748 "Header of %d bytes in small buffer.\n", hdr_len
);
1750 * Headers fit nicely into a small buffer.
1752 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1753 pci_unmap_single(qdev
->pdev
,
1754 dma_unmap_addr(sbq_desc
, mapaddr
),
1755 dma_unmap_len(sbq_desc
, maplen
),
1756 PCI_DMA_FROMDEVICE
);
1757 skb
= sbq_desc
->p
.skb
;
1758 ql_realign_skb(skb
, hdr_len
);
1759 skb_put(skb
, hdr_len
);
1760 sbq_desc
->p
.skb
= NULL
;
1764 * Handle the data buffer(s).
1766 if (unlikely(!length
)) { /* Is there data too? */
1767 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1768 "No Data buffer in this packet.\n");
1772 if (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_DS
) {
1773 if (ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
) {
1774 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1775 "Headers in small, data of %d bytes in small, combine them.\n",
1778 * Data is less than small buffer size so it's
1779 * stuffed in a small buffer.
1780 * For this case we append the data
1781 * from the "data" small buffer to the "header" small
1784 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1785 pci_dma_sync_single_for_cpu(qdev
->pdev
,
1787 (sbq_desc
, mapaddr
),
1790 PCI_DMA_FROMDEVICE
);
1791 memcpy(skb_put(skb
, length
),
1792 sbq_desc
->p
.skb
->data
, length
);
1793 pci_dma_sync_single_for_device(qdev
->pdev
,
1800 PCI_DMA_FROMDEVICE
);
1802 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1803 "%d bytes in a single small buffer.\n",
1805 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1806 skb
= sbq_desc
->p
.skb
;
1807 ql_realign_skb(skb
, length
);
1808 skb_put(skb
, length
);
1809 pci_unmap_single(qdev
->pdev
,
1810 dma_unmap_addr(sbq_desc
,
1812 dma_unmap_len(sbq_desc
,
1814 PCI_DMA_FROMDEVICE
);
1815 sbq_desc
->p
.skb
= NULL
;
1817 } else if (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_DL
) {
1818 if (ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
) {
1819 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1820 "Header in small, %d bytes in large. Chain large to small!\n",
1823 * The data is in a single large buffer. We
1824 * chain it to the header buffer's skb and let
1827 lbq_desc
= ql_get_curr_lchunk(qdev
, rx_ring
);
1828 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1829 "Chaining page at offset = %d, for %d bytes to skb.\n",
1830 lbq_desc
->p
.pg_chunk
.offset
, length
);
1831 skb_fill_page_desc(skb
, 0, lbq_desc
->p
.pg_chunk
.page
,
1832 lbq_desc
->p
.pg_chunk
.offset
,
1835 skb
->data_len
+= length
;
1836 skb
->truesize
+= length
;
1839 * The headers and data are in a single large buffer. We
1840 * copy it to a new skb and let it go. This can happen with
1841 * jumbo mtu on a non-TCP/UDP frame.
1843 lbq_desc
= ql_get_curr_lchunk(qdev
, rx_ring
);
1844 skb
= netdev_alloc_skb(qdev
->ndev
, length
);
1846 netif_printk(qdev
, probe
, KERN_DEBUG
, qdev
->ndev
,
1847 "No skb available, drop the packet.\n");
1850 pci_unmap_page(qdev
->pdev
,
1851 dma_unmap_addr(lbq_desc
,
1853 dma_unmap_len(lbq_desc
, maplen
),
1854 PCI_DMA_FROMDEVICE
);
1855 skb_reserve(skb
, NET_IP_ALIGN
);
1856 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1857 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1859 skb_fill_page_desc(skb
, 0,
1860 lbq_desc
->p
.pg_chunk
.page
,
1861 lbq_desc
->p
.pg_chunk
.offset
,
1864 skb
->data_len
+= length
;
1865 skb
->truesize
+= length
;
1867 __pskb_pull_tail(skb
,
1868 (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) ?
1869 VLAN_ETH_HLEN
: ETH_HLEN
);
1873 * The data is in a chain of large buffers
1874 * pointed to by a small buffer. We loop
1875 * thru and chain them to the our small header
1877 * frags: There are 18 max frags and our small
1878 * buffer will hold 32 of them. The thing is,
1879 * we'll use 3 max for our 9000 byte jumbo
1880 * frames. If the MTU goes up we could
1881 * eventually be in trouble.
1884 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1885 pci_unmap_single(qdev
->pdev
,
1886 dma_unmap_addr(sbq_desc
, mapaddr
),
1887 dma_unmap_len(sbq_desc
, maplen
),
1888 PCI_DMA_FROMDEVICE
);
1889 if (!(ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
)) {
1891 * This is an non TCP/UDP IP frame, so
1892 * the headers aren't split into a small
1893 * buffer. We have to use the small buffer
1894 * that contains our sg list as our skb to
1895 * send upstairs. Copy the sg list here to
1896 * a local buffer and use it to find the
1899 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1900 "%d bytes of headers & data in chain of large.\n",
1902 skb
= sbq_desc
->p
.skb
;
1903 sbq_desc
->p
.skb
= NULL
;
1904 skb_reserve(skb
, NET_IP_ALIGN
);
1906 while (length
> 0) {
1907 lbq_desc
= ql_get_curr_lchunk(qdev
, rx_ring
);
1908 size
= (length
< rx_ring
->lbq_buf_size
) ? length
:
1909 rx_ring
->lbq_buf_size
;
1911 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1912 "Adding page %d to skb for %d bytes.\n",
1914 skb_fill_page_desc(skb
, i
,
1915 lbq_desc
->p
.pg_chunk
.page
,
1916 lbq_desc
->p
.pg_chunk
.offset
,
1919 skb
->data_len
+= size
;
1920 skb
->truesize
+= size
;
1924 __pskb_pull_tail(skb
, (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) ?
1925 VLAN_ETH_HLEN
: ETH_HLEN
);
1930 /* Process an inbound completion from an rx ring. */
1931 static void ql_process_mac_split_rx_intr(struct ql_adapter
*qdev
,
1932 struct rx_ring
*rx_ring
,
1933 struct ib_mac_iocb_rsp
*ib_mac_rsp
,
1936 struct net_device
*ndev
= qdev
->ndev
;
1937 struct sk_buff
*skb
= NULL
;
1939 QL_DUMP_IB_MAC_RSP(ib_mac_rsp
);
1941 skb
= ql_build_rx_skb(qdev
, rx_ring
, ib_mac_rsp
);
1942 if (unlikely(!skb
)) {
1943 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1944 "No skb available, drop packet.\n");
1945 rx_ring
->rx_dropped
++;
1949 /* Frame error, so drop the packet. */
1950 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_ERR_MASK
) {
1951 netif_info(qdev
, drv
, qdev
->ndev
,
1952 "Receive error, flags2 = 0x%x\n", ib_mac_rsp
->flags2
);
1953 dev_kfree_skb_any(skb
);
1954 rx_ring
->rx_errors
++;
1958 /* The max framesize filter on this chip is set higher than
1959 * MTU since FCoE uses 2k frames.
1961 if (skb
->len
> ndev
->mtu
+ ETH_HLEN
) {
1962 dev_kfree_skb_any(skb
);
1963 rx_ring
->rx_dropped
++;
1967 /* loopback self test for ethtool */
1968 if (test_bit(QL_SELFTEST
, &qdev
->flags
)) {
1969 ql_check_lb_frame(qdev
, skb
);
1970 dev_kfree_skb_any(skb
);
1974 prefetch(skb
->data
);
1976 if (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) {
1977 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
, "%s Multicast.\n",
1978 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1979 IB_MAC_IOCB_RSP_M_HASH
? "Hash" :
1980 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1981 IB_MAC_IOCB_RSP_M_REG
? "Registered" :
1982 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1983 IB_MAC_IOCB_RSP_M_PROM
? "Promiscuous" : "");
1984 rx_ring
->rx_multicast
++;
1986 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_P
) {
1987 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1988 "Promiscuous Packet.\n");
1991 skb
->protocol
= eth_type_trans(skb
, ndev
);
1992 skb_checksum_none_assert(skb
);
1994 /* If rx checksum is on, and there are no
1995 * csum or frame errors.
1997 if ((ndev
->features
& NETIF_F_RXCSUM
) &&
1998 !(ib_mac_rsp
->flags1
& IB_MAC_CSUM_ERR_MASK
)) {
2000 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_T
) {
2001 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
2002 "TCP checksum done!\n");
2003 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2004 } else if ((ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_U
) &&
2005 (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_V4
)) {
2006 /* Unfragmented ipv4 UDP frame. */
2007 struct iphdr
*iph
= (struct iphdr
*) skb
->data
;
2008 if (!(iph
->frag_off
&
2009 ntohs(IP_MF
|IP_OFFSET
))) {
2010 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2011 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
2012 "TCP checksum done!\n");
2017 rx_ring
->rx_packets
++;
2018 rx_ring
->rx_bytes
+= skb
->len
;
2019 skb_record_rx_queue(skb
, rx_ring
->cq_id
);
2020 if ((ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) && (vlan_id
!= 0))
2021 __vlan_hwaccel_put_tag(skb
, vlan_id
);
2022 if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
)
2023 napi_gro_receive(&rx_ring
->napi
, skb
);
2025 netif_receive_skb(skb
);
2028 /* Process an inbound completion from an rx ring. */
2029 static unsigned long ql_process_mac_rx_intr(struct ql_adapter
*qdev
,
2030 struct rx_ring
*rx_ring
,
2031 struct ib_mac_iocb_rsp
*ib_mac_rsp
)
2033 u32 length
= le32_to_cpu(ib_mac_rsp
->data_len
);
2034 u16 vlan_id
= (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) ?
2035 ((le16_to_cpu(ib_mac_rsp
->vlan_id
) &
2036 IB_MAC_IOCB_RSP_VLAN_MASK
)) : 0xffff;
2038 QL_DUMP_IB_MAC_RSP(ib_mac_rsp
);
2040 if (ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HV
) {
2041 /* The data and headers are split into
2044 ql_process_mac_split_rx_intr(qdev
, rx_ring
, ib_mac_rsp
,
2046 } else if (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_DS
) {
2047 /* The data fit in a single small buffer.
2048 * Allocate a new skb, copy the data and
2049 * return the buffer to the free pool.
2051 ql_process_mac_rx_skb(qdev
, rx_ring
, ib_mac_rsp
,
2053 } else if ((ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_DL
) &&
2054 !(ib_mac_rsp
->flags1
& IB_MAC_CSUM_ERR_MASK
) &&
2055 (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_T
)) {
2056 /* TCP packet in a page chunk that's been checksummed.
2057 * Tack it on to our GRO skb and let it go.
2059 ql_process_mac_rx_gro_page(qdev
, rx_ring
, ib_mac_rsp
,
2061 } else if (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_DL
) {
2062 /* Non-TCP packet in a page chunk. Allocate an
2063 * skb, tack it on frags, and send it up.
2065 ql_process_mac_rx_page(qdev
, rx_ring
, ib_mac_rsp
,
2068 /* Non-TCP/UDP large frames that span multiple buffers
2069 * can be processed corrrectly by the split frame logic.
2071 ql_process_mac_split_rx_intr(qdev
, rx_ring
, ib_mac_rsp
,
2075 return (unsigned long)length
;
2078 /* Process an outbound completion from an rx ring. */
2079 static void ql_process_mac_tx_intr(struct ql_adapter
*qdev
,
2080 struct ob_mac_iocb_rsp
*mac_rsp
)
2082 struct tx_ring
*tx_ring
;
2083 struct tx_ring_desc
*tx_ring_desc
;
2085 QL_DUMP_OB_MAC_RSP(mac_rsp
);
2086 tx_ring
= &qdev
->tx_ring
[mac_rsp
->txq_idx
];
2087 tx_ring_desc
= &tx_ring
->q
[mac_rsp
->tid
];
2088 ql_unmap_send(qdev
, tx_ring_desc
, tx_ring_desc
->map_cnt
);
2089 tx_ring
->tx_bytes
+= (tx_ring_desc
->skb
)->len
;
2090 tx_ring
->tx_packets
++;
2091 dev_kfree_skb(tx_ring_desc
->skb
);
2092 tx_ring_desc
->skb
= NULL
;
2094 if (unlikely(mac_rsp
->flags1
& (OB_MAC_IOCB_RSP_E
|
2097 OB_MAC_IOCB_RSP_P
| OB_MAC_IOCB_RSP_B
))) {
2098 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_E
) {
2099 netif_warn(qdev
, tx_done
, qdev
->ndev
,
2100 "Total descriptor length did not match transfer length.\n");
2102 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_S
) {
2103 netif_warn(qdev
, tx_done
, qdev
->ndev
,
2104 "Frame too short to be valid, not sent.\n");
2106 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_L
) {
2107 netif_warn(qdev
, tx_done
, qdev
->ndev
,
2108 "Frame too long, but sent anyway.\n");
2110 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_B
) {
2111 netif_warn(qdev
, tx_done
, qdev
->ndev
,
2112 "PCI backplane error. Frame not sent.\n");
2115 atomic_inc(&tx_ring
->tx_count
);
2118 /* Fire up a handler to reset the MPI processor. */
2119 void ql_queue_fw_error(struct ql_adapter
*qdev
)
2122 queue_delayed_work(qdev
->workqueue
, &qdev
->mpi_reset_work
, 0);
2125 void ql_queue_asic_error(struct ql_adapter
*qdev
)
2128 ql_disable_interrupts(qdev
);
2129 /* Clear adapter up bit to signal the recovery
2130 * process that it shouldn't kill the reset worker
2133 clear_bit(QL_ADAPTER_UP
, &qdev
->flags
);
2134 /* Set asic recovery bit to indicate reset process that we are
2135 * in fatal error recovery process rather than normal close
2137 set_bit(QL_ASIC_RECOVERY
, &qdev
->flags
);
2138 queue_delayed_work(qdev
->workqueue
, &qdev
->asic_reset_work
, 0);
2141 static void ql_process_chip_ae_intr(struct ql_adapter
*qdev
,
2142 struct ib_ae_iocb_rsp
*ib_ae_rsp
)
2144 switch (ib_ae_rsp
->event
) {
2145 case MGMT_ERR_EVENT
:
2146 netif_err(qdev
, rx_err
, qdev
->ndev
,
2147 "Management Processor Fatal Error.\n");
2148 ql_queue_fw_error(qdev
);
2151 case CAM_LOOKUP_ERR_EVENT
:
2152 netdev_err(qdev
->ndev
, "Multiple CAM hits lookup occurred.\n");
2153 netdev_err(qdev
->ndev
, "This event shouldn't occur.\n");
2154 ql_queue_asic_error(qdev
);
2157 case SOFT_ECC_ERROR_EVENT
:
2158 netdev_err(qdev
->ndev
, "Soft ECC error detected.\n");
2159 ql_queue_asic_error(qdev
);
2162 case PCI_ERR_ANON_BUF_RD
:
2163 netdev_err(qdev
->ndev
, "PCI error occurred when reading "
2164 "anonymous buffers from rx_ring %d.\n",
2166 ql_queue_asic_error(qdev
);
2170 netif_err(qdev
, drv
, qdev
->ndev
, "Unexpected event %d.\n",
2172 ql_queue_asic_error(qdev
);
2177 static int ql_clean_outbound_rx_ring(struct rx_ring
*rx_ring
)
2179 struct ql_adapter
*qdev
= rx_ring
->qdev
;
2180 u32 prod
= ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
);
2181 struct ob_mac_iocb_rsp
*net_rsp
= NULL
;
2184 struct tx_ring
*tx_ring
;
2185 /* While there are entries in the completion queue. */
2186 while (prod
!= rx_ring
->cnsmr_idx
) {
2188 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
2189 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2190 rx_ring
->cq_id
, prod
, rx_ring
->cnsmr_idx
);
2192 net_rsp
= (struct ob_mac_iocb_rsp
*)rx_ring
->curr_entry
;
2194 switch (net_rsp
->opcode
) {
2196 case OPCODE_OB_MAC_TSO_IOCB
:
2197 case OPCODE_OB_MAC_IOCB
:
2198 ql_process_mac_tx_intr(qdev
, net_rsp
);
2201 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
2202 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2206 ql_update_cq(rx_ring
);
2207 prod
= ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
);
2211 ql_write_cq_idx(rx_ring
);
2212 tx_ring
= &qdev
->tx_ring
[net_rsp
->txq_idx
];
2213 if (__netif_subqueue_stopped(qdev
->ndev
, tx_ring
->wq_id
)) {
2214 if (atomic_read(&tx_ring
->queue_stopped
) &&
2215 (atomic_read(&tx_ring
->tx_count
) > (tx_ring
->wq_len
/ 4)))
2217 * The queue got stopped because the tx_ring was full.
2218 * Wake it up, because it's now at least 25% empty.
2220 netif_wake_subqueue(qdev
->ndev
, tx_ring
->wq_id
);
2226 static int ql_clean_inbound_rx_ring(struct rx_ring
*rx_ring
, int budget
)
2228 struct ql_adapter
*qdev
= rx_ring
->qdev
;
2229 u32 prod
= ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
);
2230 struct ql_net_rsp_iocb
*net_rsp
;
2233 /* While there are entries in the completion queue. */
2234 while (prod
!= rx_ring
->cnsmr_idx
) {
2236 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
2237 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2238 rx_ring
->cq_id
, prod
, rx_ring
->cnsmr_idx
);
2240 net_rsp
= rx_ring
->curr_entry
;
2242 switch (net_rsp
->opcode
) {
2243 case OPCODE_IB_MAC_IOCB
:
2244 ql_process_mac_rx_intr(qdev
, rx_ring
,
2245 (struct ib_mac_iocb_rsp
*)
2249 case OPCODE_IB_AE_IOCB
:
2250 ql_process_chip_ae_intr(qdev
, (struct ib_ae_iocb_rsp
*)
2254 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
2255 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2260 ql_update_cq(rx_ring
);
2261 prod
= ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
);
2262 if (count
== budget
)
2265 ql_update_buffer_queues(qdev
, rx_ring
);
2266 ql_write_cq_idx(rx_ring
);
2270 static int ql_napi_poll_msix(struct napi_struct
*napi
, int budget
)
2272 struct rx_ring
*rx_ring
= container_of(napi
, struct rx_ring
, napi
);
2273 struct ql_adapter
*qdev
= rx_ring
->qdev
;
2274 struct rx_ring
*trx_ring
;
2275 int i
, work_done
= 0;
2276 struct intr_context
*ctx
= &qdev
->intr_context
[rx_ring
->cq_id
];
2278 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
2279 "Enter, NAPI POLL cq_id = %d.\n", rx_ring
->cq_id
);
2281 /* Service the TX rings first. They start
2282 * right after the RSS rings. */
2283 for (i
= qdev
->rss_ring_count
; i
< qdev
->rx_ring_count
; i
++) {
2284 trx_ring
= &qdev
->rx_ring
[i
];
2285 /* If this TX completion ring belongs to this vector and
2286 * it's not empty then service it.
2288 if ((ctx
->irq_mask
& (1 << trx_ring
->cq_id
)) &&
2289 (ql_read_sh_reg(trx_ring
->prod_idx_sh_reg
) !=
2290 trx_ring
->cnsmr_idx
)) {
2291 netif_printk(qdev
, intr
, KERN_DEBUG
, qdev
->ndev
,
2292 "%s: Servicing TX completion ring %d.\n",
2293 __func__
, trx_ring
->cq_id
);
2294 ql_clean_outbound_rx_ring(trx_ring
);
2299 * Now service the RSS ring if it's active.
2301 if (ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
) !=
2302 rx_ring
->cnsmr_idx
) {
2303 netif_printk(qdev
, intr
, KERN_DEBUG
, qdev
->ndev
,
2304 "%s: Servicing RX completion ring %d.\n",
2305 __func__
, rx_ring
->cq_id
);
2306 work_done
= ql_clean_inbound_rx_ring(rx_ring
, budget
);
2309 if (work_done
< budget
) {
2310 napi_complete(napi
);
2311 ql_enable_completion_interrupt(qdev
, rx_ring
->irq
);
2316 static void qlge_vlan_mode(struct net_device
*ndev
, u32 features
)
2318 struct ql_adapter
*qdev
= netdev_priv(ndev
);
2320 if (features
& NETIF_F_HW_VLAN_RX
) {
2321 netif_printk(qdev
, ifup
, KERN_DEBUG
, ndev
,
2322 "Turning on VLAN in NIC_RCV_CFG.\n");
2323 ql_write32(qdev
, NIC_RCV_CFG
, NIC_RCV_CFG_VLAN_MASK
|
2324 NIC_RCV_CFG_VLAN_MATCH_AND_NON
);
2326 netif_printk(qdev
, ifup
, KERN_DEBUG
, ndev
,
2327 "Turning off VLAN in NIC_RCV_CFG.\n");
2328 ql_write32(qdev
, NIC_RCV_CFG
, NIC_RCV_CFG_VLAN_MASK
);
2332 static u32
qlge_fix_features(struct net_device
*ndev
, u32 features
)
2335 * Since there is no support for separate rx/tx vlan accel
2336 * enable/disable make sure tx flag is always in same state as rx.
2338 if (features
& NETIF_F_HW_VLAN_RX
)
2339 features
|= NETIF_F_HW_VLAN_TX
;
2341 features
&= ~NETIF_F_HW_VLAN_TX
;
2346 static int qlge_set_features(struct net_device
*ndev
, u32 features
)
2348 u32 changed
= ndev
->features
^ features
;
2350 if (changed
& NETIF_F_HW_VLAN_RX
)
2351 qlge_vlan_mode(ndev
, features
);
2356 static void __qlge_vlan_rx_add_vid(struct ql_adapter
*qdev
, u16 vid
)
2358 u32 enable_bit
= MAC_ADDR_E
;
2360 if (ql_set_mac_addr_reg
2361 (qdev
, (u8
*) &enable_bit
, MAC_ADDR_TYPE_VLAN
, vid
)) {
2362 netif_err(qdev
, ifup
, qdev
->ndev
,
2363 "Failed to init vlan address.\n");
2367 static void qlge_vlan_rx_add_vid(struct net_device
*ndev
, u16 vid
)
2369 struct ql_adapter
*qdev
= netdev_priv(ndev
);
2372 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
2376 __qlge_vlan_rx_add_vid(qdev
, vid
);
2377 set_bit(vid
, qdev
->active_vlans
);
2379 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
2382 static void __qlge_vlan_rx_kill_vid(struct ql_adapter
*qdev
, u16 vid
)
2386 if (ql_set_mac_addr_reg
2387 (qdev
, (u8
*) &enable_bit
, MAC_ADDR_TYPE_VLAN
, vid
)) {
2388 netif_err(qdev
, ifup
, qdev
->ndev
,
2389 "Failed to clear vlan address.\n");
2393 static void qlge_vlan_rx_kill_vid(struct net_device
*ndev
, u16 vid
)
2395 struct ql_adapter
*qdev
= netdev_priv(ndev
);
2398 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
2402 __qlge_vlan_rx_kill_vid(qdev
, vid
);
2403 clear_bit(vid
, qdev
->active_vlans
);
2405 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
2408 static void qlge_restore_vlan(struct ql_adapter
*qdev
)
2413 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
2417 for_each_set_bit(vid
, qdev
->active_vlans
, VLAN_N_VID
)
2418 __qlge_vlan_rx_add_vid(qdev
, vid
);
2420 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
2423 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2424 static irqreturn_t
qlge_msix_rx_isr(int irq
, void *dev_id
)
2426 struct rx_ring
*rx_ring
= dev_id
;
2427 napi_schedule(&rx_ring
->napi
);
2431 /* This handles a fatal error, MPI activity, and the default
2432 * rx_ring in an MSI-X multiple vector environment.
2433 * In MSI/Legacy environment it also process the rest of
2436 static irqreturn_t
qlge_isr(int irq
, void *dev_id
)
2438 struct rx_ring
*rx_ring
= dev_id
;
2439 struct ql_adapter
*qdev
= rx_ring
->qdev
;
2440 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
2444 spin_lock(&qdev
->hw_lock
);
2445 if (atomic_read(&qdev
->intr_context
[0].irq_cnt
)) {
2446 netif_printk(qdev
, intr
, KERN_DEBUG
, qdev
->ndev
,
2447 "Shared Interrupt, Not ours!\n");
2448 spin_unlock(&qdev
->hw_lock
);
2451 spin_unlock(&qdev
->hw_lock
);
2453 var
= ql_disable_completion_interrupt(qdev
, intr_context
->intr
);
2456 * Check for fatal error.
2459 ql_queue_asic_error(qdev
);
2460 netdev_err(qdev
->ndev
, "Got fatal error, STS = %x.\n", var
);
2461 var
= ql_read32(qdev
, ERR_STS
);
2462 netdev_err(qdev
->ndev
, "Resetting chip. "
2463 "Error Status Register = 0x%x\n", var
);
2468 * Check MPI processor activity.
2470 if ((var
& STS_PI
) &&
2471 (ql_read32(qdev
, INTR_MASK
) & INTR_MASK_PI
)) {
2473 * We've got an async event or mailbox completion.
2474 * Handle it and clear the source of the interrupt.
2476 netif_err(qdev
, intr
, qdev
->ndev
,
2477 "Got MPI processor interrupt.\n");
2478 ql_disable_completion_interrupt(qdev
, intr_context
->intr
);
2479 ql_write32(qdev
, INTR_MASK
, (INTR_MASK_PI
<< 16));
2480 queue_delayed_work_on(smp_processor_id(),
2481 qdev
->workqueue
, &qdev
->mpi_work
, 0);
2486 * Get the bit-mask that shows the active queues for this
2487 * pass. Compare it to the queues that this irq services
2488 * and call napi if there's a match.
2490 var
= ql_read32(qdev
, ISR1
);
2491 if (var
& intr_context
->irq_mask
) {
2492 netif_info(qdev
, intr
, qdev
->ndev
,
2493 "Waking handler for rx_ring[0].\n");
2494 ql_disable_completion_interrupt(qdev
, intr_context
->intr
);
2495 napi_schedule(&rx_ring
->napi
);
2498 ql_enable_completion_interrupt(qdev
, intr_context
->intr
);
2499 return work_done
? IRQ_HANDLED
: IRQ_NONE
;
2502 static int ql_tso(struct sk_buff
*skb
, struct ob_mac_tso_iocb_req
*mac_iocb_ptr
)
2505 if (skb_is_gso(skb
)) {
2507 if (skb_header_cloned(skb
)) {
2508 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2513 mac_iocb_ptr
->opcode
= OPCODE_OB_MAC_TSO_IOCB
;
2514 mac_iocb_ptr
->flags3
|= OB_MAC_TSO_IOCB_IC
;
2515 mac_iocb_ptr
->frame_len
= cpu_to_le32((u32
) skb
->len
);
2516 mac_iocb_ptr
->total_hdrs_len
=
2517 cpu_to_le16(skb_transport_offset(skb
) + tcp_hdrlen(skb
));
2518 mac_iocb_ptr
->net_trans_offset
=
2519 cpu_to_le16(skb_network_offset(skb
) |
2520 skb_transport_offset(skb
)
2521 << OB_MAC_TRANSPORT_HDR_SHIFT
);
2522 mac_iocb_ptr
->mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
2523 mac_iocb_ptr
->flags2
|= OB_MAC_TSO_IOCB_LSO
;
2524 if (likely(skb
->protocol
== htons(ETH_P_IP
))) {
2525 struct iphdr
*iph
= ip_hdr(skb
);
2527 mac_iocb_ptr
->flags1
|= OB_MAC_TSO_IOCB_IP4
;
2528 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
2532 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
2533 mac_iocb_ptr
->flags1
|= OB_MAC_TSO_IOCB_IP6
;
2534 tcp_hdr(skb
)->check
=
2535 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
2536 &ipv6_hdr(skb
)->daddr
,
2544 static void ql_hw_csum_setup(struct sk_buff
*skb
,
2545 struct ob_mac_tso_iocb_req
*mac_iocb_ptr
)
2548 struct iphdr
*iph
= ip_hdr(skb
);
2550 mac_iocb_ptr
->opcode
= OPCODE_OB_MAC_TSO_IOCB
;
2551 mac_iocb_ptr
->frame_len
= cpu_to_le32((u32
) skb
->len
);
2552 mac_iocb_ptr
->net_trans_offset
=
2553 cpu_to_le16(skb_network_offset(skb
) |
2554 skb_transport_offset(skb
) << OB_MAC_TRANSPORT_HDR_SHIFT
);
2556 mac_iocb_ptr
->flags1
|= OB_MAC_TSO_IOCB_IP4
;
2557 len
= (ntohs(iph
->tot_len
) - (iph
->ihl
<< 2));
2558 if (likely(iph
->protocol
== IPPROTO_TCP
)) {
2559 check
= &(tcp_hdr(skb
)->check
);
2560 mac_iocb_ptr
->flags2
|= OB_MAC_TSO_IOCB_TC
;
2561 mac_iocb_ptr
->total_hdrs_len
=
2562 cpu_to_le16(skb_transport_offset(skb
) +
2563 (tcp_hdr(skb
)->doff
<< 2));
2565 check
= &(udp_hdr(skb
)->check
);
2566 mac_iocb_ptr
->flags2
|= OB_MAC_TSO_IOCB_UC
;
2567 mac_iocb_ptr
->total_hdrs_len
=
2568 cpu_to_le16(skb_transport_offset(skb
) +
2569 sizeof(struct udphdr
));
2571 *check
= ~csum_tcpudp_magic(iph
->saddr
,
2572 iph
->daddr
, len
, iph
->protocol
, 0);
2575 static netdev_tx_t
qlge_send(struct sk_buff
*skb
, struct net_device
*ndev
)
2577 struct tx_ring_desc
*tx_ring_desc
;
2578 struct ob_mac_iocb_req
*mac_iocb_ptr
;
2579 struct ql_adapter
*qdev
= netdev_priv(ndev
);
2581 struct tx_ring
*tx_ring
;
2582 u32 tx_ring_idx
= (u32
) skb
->queue_mapping
;
2584 tx_ring
= &qdev
->tx_ring
[tx_ring_idx
];
2586 if (skb_padto(skb
, ETH_ZLEN
))
2587 return NETDEV_TX_OK
;
2589 if (unlikely(atomic_read(&tx_ring
->tx_count
) < 2)) {
2590 netif_info(qdev
, tx_queued
, qdev
->ndev
,
2591 "%s: shutting down tx queue %d du to lack of resources.\n",
2592 __func__
, tx_ring_idx
);
2593 netif_stop_subqueue(ndev
, tx_ring
->wq_id
);
2594 atomic_inc(&tx_ring
->queue_stopped
);
2595 tx_ring
->tx_errors
++;
2596 return NETDEV_TX_BUSY
;
2598 tx_ring_desc
= &tx_ring
->q
[tx_ring
->prod_idx
];
2599 mac_iocb_ptr
= tx_ring_desc
->queue_entry
;
2600 memset((void *)mac_iocb_ptr
, 0, sizeof(*mac_iocb_ptr
));
2602 mac_iocb_ptr
->opcode
= OPCODE_OB_MAC_IOCB
;
2603 mac_iocb_ptr
->tid
= tx_ring_desc
->index
;
2604 /* We use the upper 32-bits to store the tx queue for this IO.
2605 * When we get the completion we can use it to establish the context.
2607 mac_iocb_ptr
->txq_idx
= tx_ring_idx
;
2608 tx_ring_desc
->skb
= skb
;
2610 mac_iocb_ptr
->frame_len
= cpu_to_le16((u16
) skb
->len
);
2612 if (vlan_tx_tag_present(skb
)) {
2613 netif_printk(qdev
, tx_queued
, KERN_DEBUG
, qdev
->ndev
,
2614 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb
));
2615 mac_iocb_ptr
->flags3
|= OB_MAC_IOCB_V
;
2616 mac_iocb_ptr
->vlan_tci
= cpu_to_le16(vlan_tx_tag_get(skb
));
2618 tso
= ql_tso(skb
, (struct ob_mac_tso_iocb_req
*)mac_iocb_ptr
);
2620 dev_kfree_skb_any(skb
);
2621 return NETDEV_TX_OK
;
2622 } else if (unlikely(!tso
) && (skb
->ip_summed
== CHECKSUM_PARTIAL
)) {
2623 ql_hw_csum_setup(skb
,
2624 (struct ob_mac_tso_iocb_req
*)mac_iocb_ptr
);
2626 if (ql_map_send(qdev
, mac_iocb_ptr
, skb
, tx_ring_desc
) !=
2628 netif_err(qdev
, tx_queued
, qdev
->ndev
,
2629 "Could not map the segments.\n");
2630 tx_ring
->tx_errors
++;
2631 return NETDEV_TX_BUSY
;
2633 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr
);
2634 tx_ring
->prod_idx
++;
2635 if (tx_ring
->prod_idx
== tx_ring
->wq_len
)
2636 tx_ring
->prod_idx
= 0;
2639 ql_write_db_reg(tx_ring
->prod_idx
, tx_ring
->prod_idx_db_reg
);
2640 netif_printk(qdev
, tx_queued
, KERN_DEBUG
, qdev
->ndev
,
2641 "tx queued, slot %d, len %d\n",
2642 tx_ring
->prod_idx
, skb
->len
);
2644 atomic_dec(&tx_ring
->tx_count
);
2645 return NETDEV_TX_OK
;
2649 static void ql_free_shadow_space(struct ql_adapter
*qdev
)
2651 if (qdev
->rx_ring_shadow_reg_area
) {
2652 pci_free_consistent(qdev
->pdev
,
2654 qdev
->rx_ring_shadow_reg_area
,
2655 qdev
->rx_ring_shadow_reg_dma
);
2656 qdev
->rx_ring_shadow_reg_area
= NULL
;
2658 if (qdev
->tx_ring_shadow_reg_area
) {
2659 pci_free_consistent(qdev
->pdev
,
2661 qdev
->tx_ring_shadow_reg_area
,
2662 qdev
->tx_ring_shadow_reg_dma
);
2663 qdev
->tx_ring_shadow_reg_area
= NULL
;
2667 static int ql_alloc_shadow_space(struct ql_adapter
*qdev
)
2669 qdev
->rx_ring_shadow_reg_area
=
2670 pci_alloc_consistent(qdev
->pdev
,
2671 PAGE_SIZE
, &qdev
->rx_ring_shadow_reg_dma
);
2672 if (qdev
->rx_ring_shadow_reg_area
== NULL
) {
2673 netif_err(qdev
, ifup
, qdev
->ndev
,
2674 "Allocation of RX shadow space failed.\n");
2677 memset(qdev
->rx_ring_shadow_reg_area
, 0, PAGE_SIZE
);
2678 qdev
->tx_ring_shadow_reg_area
=
2679 pci_alloc_consistent(qdev
->pdev
, PAGE_SIZE
,
2680 &qdev
->tx_ring_shadow_reg_dma
);
2681 if (qdev
->tx_ring_shadow_reg_area
== NULL
) {
2682 netif_err(qdev
, ifup
, qdev
->ndev
,
2683 "Allocation of TX shadow space failed.\n");
2684 goto err_wqp_sh_area
;
2686 memset(qdev
->tx_ring_shadow_reg_area
, 0, PAGE_SIZE
);
2690 pci_free_consistent(qdev
->pdev
,
2692 qdev
->rx_ring_shadow_reg_area
,
2693 qdev
->rx_ring_shadow_reg_dma
);
2697 static void ql_init_tx_ring(struct ql_adapter
*qdev
, struct tx_ring
*tx_ring
)
2699 struct tx_ring_desc
*tx_ring_desc
;
2701 struct ob_mac_iocb_req
*mac_iocb_ptr
;
2703 mac_iocb_ptr
= tx_ring
->wq_base
;
2704 tx_ring_desc
= tx_ring
->q
;
2705 for (i
= 0; i
< tx_ring
->wq_len
; i
++) {
2706 tx_ring_desc
->index
= i
;
2707 tx_ring_desc
->skb
= NULL
;
2708 tx_ring_desc
->queue_entry
= mac_iocb_ptr
;
2712 atomic_set(&tx_ring
->tx_count
, tx_ring
->wq_len
);
2713 atomic_set(&tx_ring
->queue_stopped
, 0);
2716 static void ql_free_tx_resources(struct ql_adapter
*qdev
,
2717 struct tx_ring
*tx_ring
)
2719 if (tx_ring
->wq_base
) {
2720 pci_free_consistent(qdev
->pdev
, tx_ring
->wq_size
,
2721 tx_ring
->wq_base
, tx_ring
->wq_base_dma
);
2722 tx_ring
->wq_base
= NULL
;
2728 static int ql_alloc_tx_resources(struct ql_adapter
*qdev
,
2729 struct tx_ring
*tx_ring
)
2732 pci_alloc_consistent(qdev
->pdev
, tx_ring
->wq_size
,
2733 &tx_ring
->wq_base_dma
);
2735 if ((tx_ring
->wq_base
== NULL
) ||
2736 tx_ring
->wq_base_dma
& WQ_ADDR_ALIGN
) {
2737 netif_err(qdev
, ifup
, qdev
->ndev
, "tx_ring alloc failed.\n");
2741 kmalloc(tx_ring
->wq_len
* sizeof(struct tx_ring_desc
), GFP_KERNEL
);
2742 if (tx_ring
->q
== NULL
)
2747 pci_free_consistent(qdev
->pdev
, tx_ring
->wq_size
,
2748 tx_ring
->wq_base
, tx_ring
->wq_base_dma
);
2752 static void ql_free_lbq_buffers(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
2754 struct bq_desc
*lbq_desc
;
2756 uint32_t curr_idx
, clean_idx
;
2758 curr_idx
= rx_ring
->lbq_curr_idx
;
2759 clean_idx
= rx_ring
->lbq_clean_idx
;
2760 while (curr_idx
!= clean_idx
) {
2761 lbq_desc
= &rx_ring
->lbq
[curr_idx
];
2763 if (lbq_desc
->p
.pg_chunk
.last_flag
) {
2764 pci_unmap_page(qdev
->pdev
,
2765 lbq_desc
->p
.pg_chunk
.map
,
2766 ql_lbq_block_size(qdev
),
2767 PCI_DMA_FROMDEVICE
);
2768 lbq_desc
->p
.pg_chunk
.last_flag
= 0;
2771 put_page(lbq_desc
->p
.pg_chunk
.page
);
2772 lbq_desc
->p
.pg_chunk
.page
= NULL
;
2774 if (++curr_idx
== rx_ring
->lbq_len
)
2780 static void ql_free_sbq_buffers(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
2783 struct bq_desc
*sbq_desc
;
2785 for (i
= 0; i
< rx_ring
->sbq_len
; i
++) {
2786 sbq_desc
= &rx_ring
->sbq
[i
];
2787 if (sbq_desc
== NULL
) {
2788 netif_err(qdev
, ifup
, qdev
->ndev
,
2789 "sbq_desc %d is NULL.\n", i
);
2792 if (sbq_desc
->p
.skb
) {
2793 pci_unmap_single(qdev
->pdev
,
2794 dma_unmap_addr(sbq_desc
, mapaddr
),
2795 dma_unmap_len(sbq_desc
, maplen
),
2796 PCI_DMA_FROMDEVICE
);
2797 dev_kfree_skb(sbq_desc
->p
.skb
);
2798 sbq_desc
->p
.skb
= NULL
;
2803 /* Free all large and small rx buffers associated
2804 * with the completion queues for this device.
2806 static void ql_free_rx_buffers(struct ql_adapter
*qdev
)
2809 struct rx_ring
*rx_ring
;
2811 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
2812 rx_ring
= &qdev
->rx_ring
[i
];
2814 ql_free_lbq_buffers(qdev
, rx_ring
);
2816 ql_free_sbq_buffers(qdev
, rx_ring
);
2820 static void ql_alloc_rx_buffers(struct ql_adapter
*qdev
)
2822 struct rx_ring
*rx_ring
;
2825 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
2826 rx_ring
= &qdev
->rx_ring
[i
];
2827 if (rx_ring
->type
!= TX_Q
)
2828 ql_update_buffer_queues(qdev
, rx_ring
);
2832 static void ql_init_lbq_ring(struct ql_adapter
*qdev
,
2833 struct rx_ring
*rx_ring
)
2836 struct bq_desc
*lbq_desc
;
2837 __le64
*bq
= rx_ring
->lbq_base
;
2839 memset(rx_ring
->lbq
, 0, rx_ring
->lbq_len
* sizeof(struct bq_desc
));
2840 for (i
= 0; i
< rx_ring
->lbq_len
; i
++) {
2841 lbq_desc
= &rx_ring
->lbq
[i
];
2842 memset(lbq_desc
, 0, sizeof(*lbq_desc
));
2843 lbq_desc
->index
= i
;
2844 lbq_desc
->addr
= bq
;
2849 static void ql_init_sbq_ring(struct ql_adapter
*qdev
,
2850 struct rx_ring
*rx_ring
)
2853 struct bq_desc
*sbq_desc
;
2854 __le64
*bq
= rx_ring
->sbq_base
;
2856 memset(rx_ring
->sbq
, 0, rx_ring
->sbq_len
* sizeof(struct bq_desc
));
2857 for (i
= 0; i
< rx_ring
->sbq_len
; i
++) {
2858 sbq_desc
= &rx_ring
->sbq
[i
];
2859 memset(sbq_desc
, 0, sizeof(*sbq_desc
));
2860 sbq_desc
->index
= i
;
2861 sbq_desc
->addr
= bq
;
2866 static void ql_free_rx_resources(struct ql_adapter
*qdev
,
2867 struct rx_ring
*rx_ring
)
2869 /* Free the small buffer queue. */
2870 if (rx_ring
->sbq_base
) {
2871 pci_free_consistent(qdev
->pdev
,
2873 rx_ring
->sbq_base
, rx_ring
->sbq_base_dma
);
2874 rx_ring
->sbq_base
= NULL
;
2877 /* Free the small buffer queue control blocks. */
2878 kfree(rx_ring
->sbq
);
2879 rx_ring
->sbq
= NULL
;
2881 /* Free the large buffer queue. */
2882 if (rx_ring
->lbq_base
) {
2883 pci_free_consistent(qdev
->pdev
,
2885 rx_ring
->lbq_base
, rx_ring
->lbq_base_dma
);
2886 rx_ring
->lbq_base
= NULL
;
2889 /* Free the large buffer queue control blocks. */
2890 kfree(rx_ring
->lbq
);
2891 rx_ring
->lbq
= NULL
;
2893 /* Free the rx queue. */
2894 if (rx_ring
->cq_base
) {
2895 pci_free_consistent(qdev
->pdev
,
2897 rx_ring
->cq_base
, rx_ring
->cq_base_dma
);
2898 rx_ring
->cq_base
= NULL
;
2902 /* Allocate queues and buffers for this completions queue based
2903 * on the values in the parameter structure. */
2904 static int ql_alloc_rx_resources(struct ql_adapter
*qdev
,
2905 struct rx_ring
*rx_ring
)
2909 * Allocate the completion queue for this rx_ring.
2912 pci_alloc_consistent(qdev
->pdev
, rx_ring
->cq_size
,
2913 &rx_ring
->cq_base_dma
);
2915 if (rx_ring
->cq_base
== NULL
) {
2916 netif_err(qdev
, ifup
, qdev
->ndev
, "rx_ring alloc failed.\n");
2920 if (rx_ring
->sbq_len
) {
2922 * Allocate small buffer queue.
2925 pci_alloc_consistent(qdev
->pdev
, rx_ring
->sbq_size
,
2926 &rx_ring
->sbq_base_dma
);
2928 if (rx_ring
->sbq_base
== NULL
) {
2929 netif_err(qdev
, ifup
, qdev
->ndev
,
2930 "Small buffer queue allocation failed.\n");
2935 * Allocate small buffer queue control blocks.
2938 kmalloc(rx_ring
->sbq_len
* sizeof(struct bq_desc
),
2940 if (rx_ring
->sbq
== NULL
) {
2941 netif_err(qdev
, ifup
, qdev
->ndev
,
2942 "Small buffer queue control block allocation failed.\n");
2946 ql_init_sbq_ring(qdev
, rx_ring
);
2949 if (rx_ring
->lbq_len
) {
2951 * Allocate large buffer queue.
2954 pci_alloc_consistent(qdev
->pdev
, rx_ring
->lbq_size
,
2955 &rx_ring
->lbq_base_dma
);
2957 if (rx_ring
->lbq_base
== NULL
) {
2958 netif_err(qdev
, ifup
, qdev
->ndev
,
2959 "Large buffer queue allocation failed.\n");
2963 * Allocate large buffer queue control blocks.
2966 kmalloc(rx_ring
->lbq_len
* sizeof(struct bq_desc
),
2968 if (rx_ring
->lbq
== NULL
) {
2969 netif_err(qdev
, ifup
, qdev
->ndev
,
2970 "Large buffer queue control block allocation failed.\n");
2974 ql_init_lbq_ring(qdev
, rx_ring
);
2980 ql_free_rx_resources(qdev
, rx_ring
);
2984 static void ql_tx_ring_clean(struct ql_adapter
*qdev
)
2986 struct tx_ring
*tx_ring
;
2987 struct tx_ring_desc
*tx_ring_desc
;
2991 * Loop through all queues and free
2994 for (j
= 0; j
< qdev
->tx_ring_count
; j
++) {
2995 tx_ring
= &qdev
->tx_ring
[j
];
2996 for (i
= 0; i
< tx_ring
->wq_len
; i
++) {
2997 tx_ring_desc
= &tx_ring
->q
[i
];
2998 if (tx_ring_desc
&& tx_ring_desc
->skb
) {
2999 netif_err(qdev
, ifdown
, qdev
->ndev
,
3000 "Freeing lost SKB %p, from queue %d, index %d.\n",
3001 tx_ring_desc
->skb
, j
,
3002 tx_ring_desc
->index
);
3003 ql_unmap_send(qdev
, tx_ring_desc
,
3004 tx_ring_desc
->map_cnt
);
3005 dev_kfree_skb(tx_ring_desc
->skb
);
3006 tx_ring_desc
->skb
= NULL
;
3012 static void ql_free_mem_resources(struct ql_adapter
*qdev
)
3016 for (i
= 0; i
< qdev
->tx_ring_count
; i
++)
3017 ql_free_tx_resources(qdev
, &qdev
->tx_ring
[i
]);
3018 for (i
= 0; i
< qdev
->rx_ring_count
; i
++)
3019 ql_free_rx_resources(qdev
, &qdev
->rx_ring
[i
]);
3020 ql_free_shadow_space(qdev
);
3023 static int ql_alloc_mem_resources(struct ql_adapter
*qdev
)
3027 /* Allocate space for our shadow registers and such. */
3028 if (ql_alloc_shadow_space(qdev
))
3031 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
3032 if (ql_alloc_rx_resources(qdev
, &qdev
->rx_ring
[i
]) != 0) {
3033 netif_err(qdev
, ifup
, qdev
->ndev
,
3034 "RX resource allocation failed.\n");
3038 /* Allocate tx queue resources */
3039 for (i
= 0; i
< qdev
->tx_ring_count
; i
++) {
3040 if (ql_alloc_tx_resources(qdev
, &qdev
->tx_ring
[i
]) != 0) {
3041 netif_err(qdev
, ifup
, qdev
->ndev
,
3042 "TX resource allocation failed.\n");
3049 ql_free_mem_resources(qdev
);
3053 /* Set up the rx ring control block and pass it to the chip.
3054 * The control block is defined as
3055 * "Completion Queue Initialization Control Block", or cqicb.
3057 static int ql_start_rx_ring(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
3059 struct cqicb
*cqicb
= &rx_ring
->cqicb
;
3060 void *shadow_reg
= qdev
->rx_ring_shadow_reg_area
+
3061 (rx_ring
->cq_id
* RX_RING_SHADOW_SPACE
);
3062 u64 shadow_reg_dma
= qdev
->rx_ring_shadow_reg_dma
+
3063 (rx_ring
->cq_id
* RX_RING_SHADOW_SPACE
);
3064 void __iomem
*doorbell_area
=
3065 qdev
->doorbell_area
+ (DB_PAGE_SIZE
* (128 + rx_ring
->cq_id
));
3069 __le64
*base_indirect_ptr
;
3072 /* Set up the shadow registers for this ring. */
3073 rx_ring
->prod_idx_sh_reg
= shadow_reg
;
3074 rx_ring
->prod_idx_sh_reg_dma
= shadow_reg_dma
;
3075 *rx_ring
->prod_idx_sh_reg
= 0;
3076 shadow_reg
+= sizeof(u64
);
3077 shadow_reg_dma
+= sizeof(u64
);
3078 rx_ring
->lbq_base_indirect
= shadow_reg
;
3079 rx_ring
->lbq_base_indirect_dma
= shadow_reg_dma
;
3080 shadow_reg
+= (sizeof(u64
) * MAX_DB_PAGES_PER_BQ(rx_ring
->lbq_len
));
3081 shadow_reg_dma
+= (sizeof(u64
) * MAX_DB_PAGES_PER_BQ(rx_ring
->lbq_len
));
3082 rx_ring
->sbq_base_indirect
= shadow_reg
;
3083 rx_ring
->sbq_base_indirect_dma
= shadow_reg_dma
;
3085 /* PCI doorbell mem area + 0x00 for consumer index register */
3086 rx_ring
->cnsmr_idx_db_reg
= (u32 __iomem
*) doorbell_area
;
3087 rx_ring
->cnsmr_idx
= 0;
3088 rx_ring
->curr_entry
= rx_ring
->cq_base
;
3090 /* PCI doorbell mem area + 0x04 for valid register */
3091 rx_ring
->valid_db_reg
= doorbell_area
+ 0x04;
3093 /* PCI doorbell mem area + 0x18 for large buffer consumer */
3094 rx_ring
->lbq_prod_idx_db_reg
= (u32 __iomem
*) (doorbell_area
+ 0x18);
3096 /* PCI doorbell mem area + 0x1c */
3097 rx_ring
->sbq_prod_idx_db_reg
= (u32 __iomem
*) (doorbell_area
+ 0x1c);
3099 memset((void *)cqicb
, 0, sizeof(struct cqicb
));
3100 cqicb
->msix_vect
= rx_ring
->irq
;
3102 bq_len
= (rx_ring
->cq_len
== 65536) ? 0 : (u16
) rx_ring
->cq_len
;
3103 cqicb
->len
= cpu_to_le16(bq_len
| LEN_V
| LEN_CPP_CONT
);
3105 cqicb
->addr
= cpu_to_le64(rx_ring
->cq_base_dma
);
3107 cqicb
->prod_idx_addr
= cpu_to_le64(rx_ring
->prod_idx_sh_reg_dma
);
3110 * Set up the control block load flags.
3112 cqicb
->flags
= FLAGS_LC
| /* Load queue base address */
3113 FLAGS_LV
| /* Load MSI-X vector */
3114 FLAGS_LI
; /* Load irq delay values */
3115 if (rx_ring
->lbq_len
) {
3116 cqicb
->flags
|= FLAGS_LL
; /* Load lbq values */
3117 tmp
= (u64
)rx_ring
->lbq_base_dma
;
3118 base_indirect_ptr
= rx_ring
->lbq_base_indirect
;
3121 *base_indirect_ptr
= cpu_to_le64(tmp
);
3122 tmp
+= DB_PAGE_SIZE
;
3123 base_indirect_ptr
++;
3125 } while (page_entries
< MAX_DB_PAGES_PER_BQ(rx_ring
->lbq_len
));
3127 cpu_to_le64(rx_ring
->lbq_base_indirect_dma
);
3128 bq_len
= (rx_ring
->lbq_buf_size
== 65536) ? 0 :
3129 (u16
) rx_ring
->lbq_buf_size
;
3130 cqicb
->lbq_buf_size
= cpu_to_le16(bq_len
);
3131 bq_len
= (rx_ring
->lbq_len
== 65536) ? 0 :
3132 (u16
) rx_ring
->lbq_len
;
3133 cqicb
->lbq_len
= cpu_to_le16(bq_len
);
3134 rx_ring
->lbq_prod_idx
= 0;
3135 rx_ring
->lbq_curr_idx
= 0;
3136 rx_ring
->lbq_clean_idx
= 0;
3137 rx_ring
->lbq_free_cnt
= rx_ring
->lbq_len
;
3139 if (rx_ring
->sbq_len
) {
3140 cqicb
->flags
|= FLAGS_LS
; /* Load sbq values */
3141 tmp
= (u64
)rx_ring
->sbq_base_dma
;
3142 base_indirect_ptr
= rx_ring
->sbq_base_indirect
;
3145 *base_indirect_ptr
= cpu_to_le64(tmp
);
3146 tmp
+= DB_PAGE_SIZE
;
3147 base_indirect_ptr
++;
3149 } while (page_entries
< MAX_DB_PAGES_PER_BQ(rx_ring
->sbq_len
));
3151 cpu_to_le64(rx_ring
->sbq_base_indirect_dma
);
3152 cqicb
->sbq_buf_size
=
3153 cpu_to_le16((u16
)(rx_ring
->sbq_buf_size
));
3154 bq_len
= (rx_ring
->sbq_len
== 65536) ? 0 :
3155 (u16
) rx_ring
->sbq_len
;
3156 cqicb
->sbq_len
= cpu_to_le16(bq_len
);
3157 rx_ring
->sbq_prod_idx
= 0;
3158 rx_ring
->sbq_curr_idx
= 0;
3159 rx_ring
->sbq_clean_idx
= 0;
3160 rx_ring
->sbq_free_cnt
= rx_ring
->sbq_len
;
3162 switch (rx_ring
->type
) {
3164 cqicb
->irq_delay
= cpu_to_le16(qdev
->tx_coalesce_usecs
);
3165 cqicb
->pkt_delay
= cpu_to_le16(qdev
->tx_max_coalesced_frames
);
3168 /* Inbound completion handling rx_rings run in
3169 * separate NAPI contexts.
3171 netif_napi_add(qdev
->ndev
, &rx_ring
->napi
, ql_napi_poll_msix
,
3173 cqicb
->irq_delay
= cpu_to_le16(qdev
->rx_coalesce_usecs
);
3174 cqicb
->pkt_delay
= cpu_to_le16(qdev
->rx_max_coalesced_frames
);
3177 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3178 "Invalid rx_ring->type = %d.\n", rx_ring
->type
);
3180 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3181 "Initializing rx work queue.\n");
3182 err
= ql_write_cfg(qdev
, cqicb
, sizeof(struct cqicb
),
3183 CFG_LCQ
, rx_ring
->cq_id
);
3185 netif_err(qdev
, ifup
, qdev
->ndev
, "Failed to load CQICB.\n");
3191 static int ql_start_tx_ring(struct ql_adapter
*qdev
, struct tx_ring
*tx_ring
)
3193 struct wqicb
*wqicb
= (struct wqicb
*)tx_ring
;
3194 void __iomem
*doorbell_area
=
3195 qdev
->doorbell_area
+ (DB_PAGE_SIZE
* tx_ring
->wq_id
);
3196 void *shadow_reg
= qdev
->tx_ring_shadow_reg_area
+
3197 (tx_ring
->wq_id
* sizeof(u64
));
3198 u64 shadow_reg_dma
= qdev
->tx_ring_shadow_reg_dma
+
3199 (tx_ring
->wq_id
* sizeof(u64
));
3203 * Assign doorbell registers for this tx_ring.
3205 /* TX PCI doorbell mem area for tx producer index */
3206 tx_ring
->prod_idx_db_reg
= (u32 __iomem
*) doorbell_area
;
3207 tx_ring
->prod_idx
= 0;
3208 /* TX PCI doorbell mem area + 0x04 */
3209 tx_ring
->valid_db_reg
= doorbell_area
+ 0x04;
3212 * Assign shadow registers for this tx_ring.
3214 tx_ring
->cnsmr_idx_sh_reg
= shadow_reg
;
3215 tx_ring
->cnsmr_idx_sh_reg_dma
= shadow_reg_dma
;
3217 wqicb
->len
= cpu_to_le16(tx_ring
->wq_len
| Q_LEN_V
| Q_LEN_CPP_CONT
);
3218 wqicb
->flags
= cpu_to_le16(Q_FLAGS_LC
|
3219 Q_FLAGS_LB
| Q_FLAGS_LI
| Q_FLAGS_LO
);
3220 wqicb
->cq_id_rss
= cpu_to_le16(tx_ring
->cq_id
);
3222 wqicb
->addr
= cpu_to_le64(tx_ring
->wq_base_dma
);
3224 wqicb
->cnsmr_idx_addr
= cpu_to_le64(tx_ring
->cnsmr_idx_sh_reg_dma
);
3226 ql_init_tx_ring(qdev
, tx_ring
);
3228 err
= ql_write_cfg(qdev
, wqicb
, sizeof(*wqicb
), CFG_LRQ
,
3229 (u16
) tx_ring
->wq_id
);
3231 netif_err(qdev
, ifup
, qdev
->ndev
, "Failed to load tx_ring.\n");
3234 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3235 "Successfully loaded WQICB.\n");
3239 static void ql_disable_msix(struct ql_adapter
*qdev
)
3241 if (test_bit(QL_MSIX_ENABLED
, &qdev
->flags
)) {
3242 pci_disable_msix(qdev
->pdev
);
3243 clear_bit(QL_MSIX_ENABLED
, &qdev
->flags
);
3244 kfree(qdev
->msi_x_entry
);
3245 qdev
->msi_x_entry
= NULL
;
3246 } else if (test_bit(QL_MSI_ENABLED
, &qdev
->flags
)) {
3247 pci_disable_msi(qdev
->pdev
);
3248 clear_bit(QL_MSI_ENABLED
, &qdev
->flags
);
3252 /* We start by trying to get the number of vectors
3253 * stored in qdev->intr_count. If we don't get that
3254 * many then we reduce the count and try again.
3256 static void ql_enable_msix(struct ql_adapter
*qdev
)
3260 /* Get the MSIX vectors. */
3261 if (qlge_irq_type
== MSIX_IRQ
) {
3262 /* Try to alloc space for the msix struct,
3263 * if it fails then go to MSI/legacy.
3265 qdev
->msi_x_entry
= kcalloc(qdev
->intr_count
,
3266 sizeof(struct msix_entry
),
3268 if (!qdev
->msi_x_entry
) {
3269 qlge_irq_type
= MSI_IRQ
;
3273 for (i
= 0; i
< qdev
->intr_count
; i
++)
3274 qdev
->msi_x_entry
[i
].entry
= i
;
3276 /* Loop to get our vectors. We start with
3277 * what we want and settle for what we get.
3280 err
= pci_enable_msix(qdev
->pdev
,
3281 qdev
->msi_x_entry
, qdev
->intr_count
);
3283 qdev
->intr_count
= err
;
3287 kfree(qdev
->msi_x_entry
);
3288 qdev
->msi_x_entry
= NULL
;
3289 netif_warn(qdev
, ifup
, qdev
->ndev
,
3290 "MSI-X Enable failed, trying MSI.\n");
3291 qdev
->intr_count
= 1;
3292 qlge_irq_type
= MSI_IRQ
;
3293 } else if (err
== 0) {
3294 set_bit(QL_MSIX_ENABLED
, &qdev
->flags
);
3295 netif_info(qdev
, ifup
, qdev
->ndev
,
3296 "MSI-X Enabled, got %d vectors.\n",
3302 qdev
->intr_count
= 1;
3303 if (qlge_irq_type
== MSI_IRQ
) {
3304 if (!pci_enable_msi(qdev
->pdev
)) {
3305 set_bit(QL_MSI_ENABLED
, &qdev
->flags
);
3306 netif_info(qdev
, ifup
, qdev
->ndev
,
3307 "Running with MSI interrupts.\n");
3311 qlge_irq_type
= LEG_IRQ
;
3312 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3313 "Running with legacy interrupts.\n");
3316 /* Each vector services 1 RSS ring and and 1 or more
3317 * TX completion rings. This function loops through
3318 * the TX completion rings and assigns the vector that
3319 * will service it. An example would be if there are
3320 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3321 * This would mean that vector 0 would service RSS ring 0
3322 * and TX completion rings 0,1,2 and 3. Vector 1 would
3323 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3325 static void ql_set_tx_vect(struct ql_adapter
*qdev
)
3328 u32 tx_rings_per_vector
= qdev
->tx_ring_count
/ qdev
->intr_count
;
3330 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
))) {
3331 /* Assign irq vectors to TX rx_rings.*/
3332 for (vect
= 0, j
= 0, i
= qdev
->rss_ring_count
;
3333 i
< qdev
->rx_ring_count
; i
++) {
3334 if (j
== tx_rings_per_vector
) {
3338 qdev
->rx_ring
[i
].irq
= vect
;
3342 /* For single vector all rings have an irq
3345 for (i
= 0; i
< qdev
->rx_ring_count
; i
++)
3346 qdev
->rx_ring
[i
].irq
= 0;
3350 /* Set the interrupt mask for this vector. Each vector
3351 * will service 1 RSS ring and 1 or more TX completion
3352 * rings. This function sets up a bit mask per vector
3353 * that indicates which rings it services.
3355 static void ql_set_irq_mask(struct ql_adapter
*qdev
, struct intr_context
*ctx
)
3357 int j
, vect
= ctx
->intr
;
3358 u32 tx_rings_per_vector
= qdev
->tx_ring_count
/ qdev
->intr_count
;
3360 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
))) {
3361 /* Add the RSS ring serviced by this vector
3364 ctx
->irq_mask
= (1 << qdev
->rx_ring
[vect
].cq_id
);
3365 /* Add the TX ring(s) serviced by this vector
3367 for (j
= 0; j
< tx_rings_per_vector
; j
++) {
3369 (1 << qdev
->rx_ring
[qdev
->rss_ring_count
+
3370 (vect
* tx_rings_per_vector
) + j
].cq_id
);
3373 /* For single vector we just shift each queue's
3376 for (j
= 0; j
< qdev
->rx_ring_count
; j
++)
3377 ctx
->irq_mask
|= (1 << qdev
->rx_ring
[j
].cq_id
);
3382 * Here we build the intr_context structures based on
3383 * our rx_ring count and intr vector count.
3384 * The intr_context structure is used to hook each vector
3385 * to possibly different handlers.
3387 static void ql_resolve_queues_to_irqs(struct ql_adapter
*qdev
)
3390 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
3392 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
))) {
3393 /* Each rx_ring has it's
3394 * own intr_context since we have separate
3395 * vectors for each queue.
3397 for (i
= 0; i
< qdev
->intr_count
; i
++, intr_context
++) {
3398 qdev
->rx_ring
[i
].irq
= i
;
3399 intr_context
->intr
= i
;
3400 intr_context
->qdev
= qdev
;
3401 /* Set up this vector's bit-mask that indicates
3402 * which queues it services.
3404 ql_set_irq_mask(qdev
, intr_context
);
3406 * We set up each vectors enable/disable/read bits so
3407 * there's no bit/mask calculations in the critical path.
3409 intr_context
->intr_en_mask
=
3410 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
3411 INTR_EN_TYPE_ENABLE
| INTR_EN_IHD_MASK
| INTR_EN_IHD
3413 intr_context
->intr_dis_mask
=
3414 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
3415 INTR_EN_TYPE_DISABLE
| INTR_EN_IHD_MASK
|
3417 intr_context
->intr_read_mask
=
3418 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
3419 INTR_EN_TYPE_READ
| INTR_EN_IHD_MASK
| INTR_EN_IHD
|
3422 /* The first vector/queue handles
3423 * broadcast/multicast, fatal errors,
3424 * and firmware events. This in addition
3425 * to normal inbound NAPI processing.
3427 intr_context
->handler
= qlge_isr
;
3428 sprintf(intr_context
->name
, "%s-rx-%d",
3429 qdev
->ndev
->name
, i
);
3432 * Inbound queues handle unicast frames only.
3434 intr_context
->handler
= qlge_msix_rx_isr
;
3435 sprintf(intr_context
->name
, "%s-rx-%d",
3436 qdev
->ndev
->name
, i
);
3441 * All rx_rings use the same intr_context since
3442 * there is only one vector.
3444 intr_context
->intr
= 0;
3445 intr_context
->qdev
= qdev
;
3447 * We set up each vectors enable/disable/read bits so
3448 * there's no bit/mask calculations in the critical path.
3450 intr_context
->intr_en_mask
=
3451 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
| INTR_EN_TYPE_ENABLE
;
3452 intr_context
->intr_dis_mask
=
3453 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
3454 INTR_EN_TYPE_DISABLE
;
3455 intr_context
->intr_read_mask
=
3456 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
| INTR_EN_TYPE_READ
;
3458 * Single interrupt means one handler for all rings.
3460 intr_context
->handler
= qlge_isr
;
3461 sprintf(intr_context
->name
, "%s-single_irq", qdev
->ndev
->name
);
3462 /* Set up this vector's bit-mask that indicates
3463 * which queues it services. In this case there is
3464 * a single vector so it will service all RSS and
3465 * TX completion rings.
3467 ql_set_irq_mask(qdev
, intr_context
);
3469 /* Tell the TX completion rings which MSIx vector
3470 * they will be using.
3472 ql_set_tx_vect(qdev
);
3475 static void ql_free_irq(struct ql_adapter
*qdev
)
3478 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
3480 for (i
= 0; i
< qdev
->intr_count
; i
++, intr_context
++) {
3481 if (intr_context
->hooked
) {
3482 if (test_bit(QL_MSIX_ENABLED
, &qdev
->flags
)) {
3483 free_irq(qdev
->msi_x_entry
[i
].vector
,
3485 netif_printk(qdev
, ifdown
, KERN_DEBUG
, qdev
->ndev
,
3486 "freeing msix interrupt %d.\n", i
);
3488 free_irq(qdev
->pdev
->irq
, &qdev
->rx_ring
[0]);
3489 netif_printk(qdev
, ifdown
, KERN_DEBUG
, qdev
->ndev
,
3490 "freeing msi interrupt %d.\n", i
);
3494 ql_disable_msix(qdev
);
3497 static int ql_request_irq(struct ql_adapter
*qdev
)
3501 struct pci_dev
*pdev
= qdev
->pdev
;
3502 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
3504 ql_resolve_queues_to_irqs(qdev
);
3506 for (i
= 0; i
< qdev
->intr_count
; i
++, intr_context
++) {
3507 atomic_set(&intr_context
->irq_cnt
, 0);
3508 if (test_bit(QL_MSIX_ENABLED
, &qdev
->flags
)) {
3509 status
= request_irq(qdev
->msi_x_entry
[i
].vector
,
3510 intr_context
->handler
,
3515 netif_err(qdev
, ifup
, qdev
->ndev
,
3516 "Failed request for MSIX interrupt %d.\n",
3520 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3521 "Hooked intr %d, queue type %s, with name %s.\n",
3523 qdev
->rx_ring
[i
].type
== DEFAULT_Q
?
3525 qdev
->rx_ring
[i
].type
== TX_Q
?
3527 qdev
->rx_ring
[i
].type
== RX_Q
?
3529 intr_context
->name
);
3532 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3533 "trying msi or legacy interrupts.\n");
3534 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3535 "%s: irq = %d.\n", __func__
, pdev
->irq
);
3536 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3537 "%s: context->name = %s.\n", __func__
,
3538 intr_context
->name
);
3539 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3540 "%s: dev_id = 0x%p.\n", __func__
,
3543 request_irq(pdev
->irq
, qlge_isr
,
3544 test_bit(QL_MSI_ENABLED
,
3546 flags
) ? 0 : IRQF_SHARED
,
3547 intr_context
->name
, &qdev
->rx_ring
[0]);
3551 netif_err(qdev
, ifup
, qdev
->ndev
,
3552 "Hooked intr %d, queue type %s, with name %s.\n",
3554 qdev
->rx_ring
[0].type
== DEFAULT_Q
?
3556 qdev
->rx_ring
[0].type
== TX_Q
? "TX_Q" :
3557 qdev
->rx_ring
[0].type
== RX_Q
? "RX_Q" : "",
3558 intr_context
->name
);
3560 intr_context
->hooked
= 1;
3564 netif_err(qdev
, ifup
, qdev
->ndev
, "Failed to get the interrupts!!!/n");
3569 static int ql_start_rss(struct ql_adapter
*qdev
)
3571 static const u8 init_hash_seed
[] = {
3572 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3573 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3574 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3575 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3576 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3578 struct ricb
*ricb
= &qdev
->ricb
;
3581 u8
*hash_id
= (u8
*) ricb
->hash_cq_id
;
3583 memset((void *)ricb
, 0, sizeof(*ricb
));
3585 ricb
->base_cq
= RSS_L4K
;
3587 (RSS_L6K
| RSS_LI
| RSS_LB
| RSS_LM
| RSS_RT4
| RSS_RT6
);
3588 ricb
->mask
= cpu_to_le16((u16
)(0x3ff));
3591 * Fill out the Indirection Table.
3593 for (i
= 0; i
< 1024; i
++)
3594 hash_id
[i
] = (i
& (qdev
->rss_ring_count
- 1));
3596 memcpy((void *)&ricb
->ipv6_hash_key
[0], init_hash_seed
, 40);
3597 memcpy((void *)&ricb
->ipv4_hash_key
[0], init_hash_seed
, 16);
3599 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
, "Initializing RSS.\n");
3601 status
= ql_write_cfg(qdev
, ricb
, sizeof(*ricb
), CFG_LR
, 0);
3603 netif_err(qdev
, ifup
, qdev
->ndev
, "Failed to load RICB.\n");
3606 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3607 "Successfully loaded RICB.\n");
3611 static int ql_clear_routing_entries(struct ql_adapter
*qdev
)
3615 status
= ql_sem_spinlock(qdev
, SEM_RT_IDX_MASK
);
3618 /* Clear all the entries in the routing table. */
3619 for (i
= 0; i
< 16; i
++) {
3620 status
= ql_set_routing_reg(qdev
, i
, 0, 0);
3622 netif_err(qdev
, ifup
, qdev
->ndev
,
3623 "Failed to init routing register for CAM packets.\n");
3627 ql_sem_unlock(qdev
, SEM_RT_IDX_MASK
);
3631 /* Initialize the frame-to-queue routing. */
3632 static int ql_route_initialize(struct ql_adapter
*qdev
)
3636 /* Clear all the entries in the routing table. */
3637 status
= ql_clear_routing_entries(qdev
);
3641 status
= ql_sem_spinlock(qdev
, SEM_RT_IDX_MASK
);
3645 status
= ql_set_routing_reg(qdev
, RT_IDX_IP_CSUM_ERR_SLOT
,
3646 RT_IDX_IP_CSUM_ERR
, 1);
3648 netif_err(qdev
, ifup
, qdev
->ndev
,
3649 "Failed to init routing register "
3650 "for IP CSUM error packets.\n");
3653 status
= ql_set_routing_reg(qdev
, RT_IDX_TCP_UDP_CSUM_ERR_SLOT
,
3654 RT_IDX_TU_CSUM_ERR
, 1);
3656 netif_err(qdev
, ifup
, qdev
->ndev
,
3657 "Failed to init routing register "
3658 "for TCP/UDP CSUM error packets.\n");
3661 status
= ql_set_routing_reg(qdev
, RT_IDX_BCAST_SLOT
, RT_IDX_BCAST
, 1);
3663 netif_err(qdev
, ifup
, qdev
->ndev
,
3664 "Failed to init routing register for broadcast packets.\n");
3667 /* If we have more than one inbound queue, then turn on RSS in the
3670 if (qdev
->rss_ring_count
> 1) {
3671 status
= ql_set_routing_reg(qdev
, RT_IDX_RSS_MATCH_SLOT
,
3672 RT_IDX_RSS_MATCH
, 1);
3674 netif_err(qdev
, ifup
, qdev
->ndev
,
3675 "Failed to init routing register for MATCH RSS packets.\n");
3680 status
= ql_set_routing_reg(qdev
, RT_IDX_CAM_HIT_SLOT
,
3683 netif_err(qdev
, ifup
, qdev
->ndev
,
3684 "Failed to init routing register for CAM packets.\n");
3686 ql_sem_unlock(qdev
, SEM_RT_IDX_MASK
);
3690 int ql_cam_route_initialize(struct ql_adapter
*qdev
)
3694 /* If check if the link is up and use to
3695 * determine if we are setting or clearing
3696 * the MAC address in the CAM.
3698 set
= ql_read32(qdev
, STS
);
3699 set
&= qdev
->port_link_up
;
3700 status
= ql_set_mac_addr(qdev
, set
);
3702 netif_err(qdev
, ifup
, qdev
->ndev
, "Failed to init mac address.\n");
3706 status
= ql_route_initialize(qdev
);
3708 netif_err(qdev
, ifup
, qdev
->ndev
, "Failed to init routing table.\n");
3713 static int ql_adapter_initialize(struct ql_adapter
*qdev
)
3720 * Set up the System register to halt on errors.
3722 value
= SYS_EFE
| SYS_FAE
;
3724 ql_write32(qdev
, SYS
, mask
| value
);
3726 /* Set the default queue, and VLAN behavior. */
3727 value
= NIC_RCV_CFG_DFQ
| NIC_RCV_CFG_RV
;
3728 mask
= NIC_RCV_CFG_DFQ_MASK
| (NIC_RCV_CFG_RV
<< 16);
3729 ql_write32(qdev
, NIC_RCV_CFG
, (mask
| value
));
3731 /* Set the MPI interrupt to enabled. */
3732 ql_write32(qdev
, INTR_MASK
, (INTR_MASK_PI
<< 16) | INTR_MASK_PI
);
3734 /* Enable the function, set pagesize, enable error checking. */
3735 value
= FSC_FE
| FSC_EPC_INBOUND
| FSC_EPC_OUTBOUND
|
3736 FSC_EC
| FSC_VM_PAGE_4K
;
3737 value
|= SPLT_SETTING
;
3739 /* Set/clear header splitting. */
3740 mask
= FSC_VM_PAGESIZE_MASK
|
3741 FSC_DBL_MASK
| FSC_DBRST_MASK
| (value
<< 16);
3742 ql_write32(qdev
, FSC
, mask
| value
);
3744 ql_write32(qdev
, SPLT_HDR
, SPLT_LEN
);
3746 /* Set RX packet routing to use port/pci function on which the
3747 * packet arrived on in addition to usual frame routing.
3748 * This is helpful on bonding where both interfaces can have
3749 * the same MAC address.
3751 ql_write32(qdev
, RST_FO
, RST_FO_RR_MASK
| RST_FO_RR_RCV_FUNC_CQ
);
3752 /* Reroute all packets to our Interface.
3753 * They may have been routed to MPI firmware
3756 value
= ql_read32(qdev
, MGMT_RCV_CFG
);
3757 value
&= ~MGMT_RCV_CFG_RM
;
3760 /* Sticky reg needs clearing due to WOL. */
3761 ql_write32(qdev
, MGMT_RCV_CFG
, mask
);
3762 ql_write32(qdev
, MGMT_RCV_CFG
, mask
| value
);
3764 /* Default WOL is enable on Mezz cards */
3765 if (qdev
->pdev
->subsystem_device
== 0x0068 ||
3766 qdev
->pdev
->subsystem_device
== 0x0180)
3767 qdev
->wol
= WAKE_MAGIC
;
3769 /* Start up the rx queues. */
3770 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
3771 status
= ql_start_rx_ring(qdev
, &qdev
->rx_ring
[i
]);
3773 netif_err(qdev
, ifup
, qdev
->ndev
,
3774 "Failed to start rx ring[%d].\n", i
);
3779 /* If there is more than one inbound completion queue
3780 * then download a RICB to configure RSS.
3782 if (qdev
->rss_ring_count
> 1) {
3783 status
= ql_start_rss(qdev
);
3785 netif_err(qdev
, ifup
, qdev
->ndev
, "Failed to start RSS.\n");
3790 /* Start up the tx queues. */
3791 for (i
= 0; i
< qdev
->tx_ring_count
; i
++) {
3792 status
= ql_start_tx_ring(qdev
, &qdev
->tx_ring
[i
]);
3794 netif_err(qdev
, ifup
, qdev
->ndev
,
3795 "Failed to start tx ring[%d].\n", i
);
3800 /* Initialize the port and set the max framesize. */
3801 status
= qdev
->nic_ops
->port_initialize(qdev
);
3803 netif_err(qdev
, ifup
, qdev
->ndev
, "Failed to start port.\n");
3805 /* Set up the MAC address and frame routing filter. */
3806 status
= ql_cam_route_initialize(qdev
);
3808 netif_err(qdev
, ifup
, qdev
->ndev
,
3809 "Failed to init CAM/Routing tables.\n");
3813 /* Start NAPI for the RSS queues. */
3814 for (i
= 0; i
< qdev
->rss_ring_count
; i
++) {
3815 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3816 "Enabling NAPI for rx_ring[%d].\n", i
);
3817 napi_enable(&qdev
->rx_ring
[i
].napi
);
3823 /* Issue soft reset to chip. */
3824 static int ql_adapter_reset(struct ql_adapter
*qdev
)
3828 unsigned long end_jiffies
;
3830 /* Clear all the entries in the routing table. */
3831 status
= ql_clear_routing_entries(qdev
);
3833 netif_err(qdev
, ifup
, qdev
->ndev
, "Failed to clear routing bits.\n");
3837 end_jiffies
= jiffies
+
3838 max((unsigned long)1, usecs_to_jiffies(30));
3840 /* Check if bit is set then skip the mailbox command and
3841 * clear the bit, else we are in normal reset process.
3843 if (!test_bit(QL_ASIC_RECOVERY
, &qdev
->flags
)) {
3844 /* Stop management traffic. */
3845 ql_mb_set_mgmnt_traffic_ctl(qdev
, MB_SET_MPI_TFK_STOP
);
3847 /* Wait for the NIC and MGMNT FIFOs to empty. */
3848 ql_wait_fifo_empty(qdev
);
3850 clear_bit(QL_ASIC_RECOVERY
, &qdev
->flags
);
3852 ql_write32(qdev
, RST_FO
, (RST_FO_FR
<< 16) | RST_FO_FR
);
3855 value
= ql_read32(qdev
, RST_FO
);
3856 if ((value
& RST_FO_FR
) == 0)
3859 } while (time_before(jiffies
, end_jiffies
));
3861 if (value
& RST_FO_FR
) {
3862 netif_err(qdev
, ifdown
, qdev
->ndev
,
3863 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3864 status
= -ETIMEDOUT
;
3867 /* Resume management traffic. */
3868 ql_mb_set_mgmnt_traffic_ctl(qdev
, MB_SET_MPI_TFK_RESUME
);
3872 static void ql_display_dev_info(struct net_device
*ndev
)
3874 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3876 netif_info(qdev
, probe
, qdev
->ndev
,
3877 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3878 "XG Roll = %d, XG Rev = %d.\n",
3881 qdev
->chip_rev_id
& 0x0000000f,
3882 qdev
->chip_rev_id
>> 4 & 0x0000000f,
3883 qdev
->chip_rev_id
>> 8 & 0x0000000f,
3884 qdev
->chip_rev_id
>> 12 & 0x0000000f);
3885 netif_info(qdev
, probe
, qdev
->ndev
,
3886 "MAC address %pM\n", ndev
->dev_addr
);
3889 static int ql_wol(struct ql_adapter
*qdev
)
3892 u32 wol
= MB_WOL_DISABLE
;
3894 /* The CAM is still intact after a reset, but if we
3895 * are doing WOL, then we may need to program the
3896 * routing regs. We would also need to issue the mailbox
3897 * commands to instruct the MPI what to do per the ethtool
3901 if (qdev
->wol
& (WAKE_ARP
| WAKE_MAGICSECURE
| WAKE_PHY
| WAKE_UCAST
|
3902 WAKE_MCAST
| WAKE_BCAST
)) {
3903 netif_err(qdev
, ifdown
, qdev
->ndev
,
3904 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3909 if (qdev
->wol
& WAKE_MAGIC
) {
3910 status
= ql_mb_wol_set_magic(qdev
, 1);
3912 netif_err(qdev
, ifdown
, qdev
->ndev
,
3913 "Failed to set magic packet on %s.\n",
3917 netif_info(qdev
, drv
, qdev
->ndev
,
3918 "Enabled magic packet successfully on %s.\n",
3921 wol
|= MB_WOL_MAGIC_PKT
;
3925 wol
|= MB_WOL_MODE_ON
;
3926 status
= ql_mb_wol_mode(qdev
, wol
);
3927 netif_err(qdev
, drv
, qdev
->ndev
,
3928 "WOL %s (wol code 0x%x) on %s\n",
3929 (status
== 0) ? "Successfully set" : "Failed",
3930 wol
, qdev
->ndev
->name
);
3936 static void ql_cancel_all_work_sync(struct ql_adapter
*qdev
)
3939 /* Don't kill the reset worker thread if we
3940 * are in the process of recovery.
3942 if (test_bit(QL_ADAPTER_UP
, &qdev
->flags
))
3943 cancel_delayed_work_sync(&qdev
->asic_reset_work
);
3944 cancel_delayed_work_sync(&qdev
->mpi_reset_work
);
3945 cancel_delayed_work_sync(&qdev
->mpi_work
);
3946 cancel_delayed_work_sync(&qdev
->mpi_idc_work
);
3947 cancel_delayed_work_sync(&qdev
->mpi_core_to_log
);
3948 cancel_delayed_work_sync(&qdev
->mpi_port_cfg_work
);
3951 static int ql_adapter_down(struct ql_adapter
*qdev
)
3957 ql_cancel_all_work_sync(qdev
);
3959 for (i
= 0; i
< qdev
->rss_ring_count
; i
++)
3960 napi_disable(&qdev
->rx_ring
[i
].napi
);
3962 clear_bit(QL_ADAPTER_UP
, &qdev
->flags
);
3964 ql_disable_interrupts(qdev
);
3966 ql_tx_ring_clean(qdev
);
3968 /* Call netif_napi_del() from common point.
3970 for (i
= 0; i
< qdev
->rss_ring_count
; i
++)
3971 netif_napi_del(&qdev
->rx_ring
[i
].napi
);
3973 status
= ql_adapter_reset(qdev
);
3975 netif_err(qdev
, ifdown
, qdev
->ndev
, "reset(func #%d) FAILED!\n",
3977 ql_free_rx_buffers(qdev
);
3982 static int ql_adapter_up(struct ql_adapter
*qdev
)
3986 err
= ql_adapter_initialize(qdev
);
3988 netif_info(qdev
, ifup
, qdev
->ndev
, "Unable to initialize adapter.\n");
3991 set_bit(QL_ADAPTER_UP
, &qdev
->flags
);
3992 ql_alloc_rx_buffers(qdev
);
3993 /* If the port is initialized and the
3994 * link is up the turn on the carrier.
3996 if ((ql_read32(qdev
, STS
) & qdev
->port_init
) &&
3997 (ql_read32(qdev
, STS
) & qdev
->port_link_up
))
3999 /* Restore rx mode. */
4000 clear_bit(QL_ALLMULTI
, &qdev
->flags
);
4001 clear_bit(QL_PROMISCUOUS
, &qdev
->flags
);
4002 qlge_set_multicast_list(qdev
->ndev
);
4004 /* Restore vlan setting. */
4005 qlge_restore_vlan(qdev
);
4007 ql_enable_interrupts(qdev
);
4008 ql_enable_all_completion_interrupts(qdev
);
4009 netif_tx_start_all_queues(qdev
->ndev
);
4013 ql_adapter_reset(qdev
);
4017 static void ql_release_adapter_resources(struct ql_adapter
*qdev
)
4019 ql_free_mem_resources(qdev
);
4023 static int ql_get_adapter_resources(struct ql_adapter
*qdev
)
4027 if (ql_alloc_mem_resources(qdev
)) {
4028 netif_err(qdev
, ifup
, qdev
->ndev
, "Unable to allocate memory.\n");
4031 status
= ql_request_irq(qdev
);
4035 static int qlge_close(struct net_device
*ndev
)
4037 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4039 /* If we hit pci_channel_io_perm_failure
4040 * failure condition, then we already
4041 * brought the adapter down.
4043 if (test_bit(QL_EEH_FATAL
, &qdev
->flags
)) {
4044 netif_err(qdev
, drv
, qdev
->ndev
, "EEH fatal did unload.\n");
4045 clear_bit(QL_EEH_FATAL
, &qdev
->flags
);
4050 * Wait for device to recover from a reset.
4051 * (Rarely happens, but possible.)
4053 while (!test_bit(QL_ADAPTER_UP
, &qdev
->flags
))
4055 ql_adapter_down(qdev
);
4056 ql_release_adapter_resources(qdev
);
4060 static int ql_configure_rings(struct ql_adapter
*qdev
)
4063 struct rx_ring
*rx_ring
;
4064 struct tx_ring
*tx_ring
;
4065 int cpu_cnt
= min(MAX_CPUS
, (int)num_online_cpus());
4066 unsigned int lbq_buf_len
= (qdev
->ndev
->mtu
> 1500) ?
4067 LARGE_BUFFER_MAX_SIZE
: LARGE_BUFFER_MIN_SIZE
;
4069 qdev
->lbq_buf_order
= get_order(lbq_buf_len
);
4071 /* In a perfect world we have one RSS ring for each CPU
4072 * and each has it's own vector. To do that we ask for
4073 * cpu_cnt vectors. ql_enable_msix() will adjust the
4074 * vector count to what we actually get. We then
4075 * allocate an RSS ring for each.
4076 * Essentially, we are doing min(cpu_count, msix_vector_count).
4078 qdev
->intr_count
= cpu_cnt
;
4079 ql_enable_msix(qdev
);
4080 /* Adjust the RSS ring count to the actual vector count. */
4081 qdev
->rss_ring_count
= qdev
->intr_count
;
4082 qdev
->tx_ring_count
= cpu_cnt
;
4083 qdev
->rx_ring_count
= qdev
->tx_ring_count
+ qdev
->rss_ring_count
;
4085 for (i
= 0; i
< qdev
->tx_ring_count
; i
++) {
4086 tx_ring
= &qdev
->tx_ring
[i
];
4087 memset((void *)tx_ring
, 0, sizeof(*tx_ring
));
4088 tx_ring
->qdev
= qdev
;
4090 tx_ring
->wq_len
= qdev
->tx_ring_size
;
4092 tx_ring
->wq_len
* sizeof(struct ob_mac_iocb_req
);
4095 * The completion queue ID for the tx rings start
4096 * immediately after the rss rings.
4098 tx_ring
->cq_id
= qdev
->rss_ring_count
+ i
;
4101 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
4102 rx_ring
= &qdev
->rx_ring
[i
];
4103 memset((void *)rx_ring
, 0, sizeof(*rx_ring
));
4104 rx_ring
->qdev
= qdev
;
4106 rx_ring
->cpu
= i
% cpu_cnt
; /* CPU to run handler on. */
4107 if (i
< qdev
->rss_ring_count
) {
4109 * Inbound (RSS) queues.
4111 rx_ring
->cq_len
= qdev
->rx_ring_size
;
4113 rx_ring
->cq_len
* sizeof(struct ql_net_rsp_iocb
);
4114 rx_ring
->lbq_len
= NUM_LARGE_BUFFERS
;
4116 rx_ring
->lbq_len
* sizeof(__le64
);
4117 rx_ring
->lbq_buf_size
= (u16
)lbq_buf_len
;
4118 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
4119 "lbq_buf_size %d, order = %d\n",
4120 rx_ring
->lbq_buf_size
,
4121 qdev
->lbq_buf_order
);
4122 rx_ring
->sbq_len
= NUM_SMALL_BUFFERS
;
4124 rx_ring
->sbq_len
* sizeof(__le64
);
4125 rx_ring
->sbq_buf_size
= SMALL_BUF_MAP_SIZE
;
4126 rx_ring
->type
= RX_Q
;
4129 * Outbound queue handles outbound completions only.
4131 /* outbound cq is same size as tx_ring it services. */
4132 rx_ring
->cq_len
= qdev
->tx_ring_size
;
4134 rx_ring
->cq_len
* sizeof(struct ql_net_rsp_iocb
);
4135 rx_ring
->lbq_len
= 0;
4136 rx_ring
->lbq_size
= 0;
4137 rx_ring
->lbq_buf_size
= 0;
4138 rx_ring
->sbq_len
= 0;
4139 rx_ring
->sbq_size
= 0;
4140 rx_ring
->sbq_buf_size
= 0;
4141 rx_ring
->type
= TX_Q
;
4147 static int qlge_open(struct net_device
*ndev
)
4150 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4152 err
= ql_adapter_reset(qdev
);
4156 err
= ql_configure_rings(qdev
);
4160 err
= ql_get_adapter_resources(qdev
);
4164 err
= ql_adapter_up(qdev
);
4171 ql_release_adapter_resources(qdev
);
4175 static int ql_change_rx_buffers(struct ql_adapter
*qdev
)
4177 struct rx_ring
*rx_ring
;
4181 /* Wait for an outstanding reset to complete. */
4182 if (!test_bit(QL_ADAPTER_UP
, &qdev
->flags
)) {
4184 while (i
-- && !test_bit(QL_ADAPTER_UP
, &qdev
->flags
)) {
4185 netif_err(qdev
, ifup
, qdev
->ndev
,
4186 "Waiting for adapter UP...\n");
4191 netif_err(qdev
, ifup
, qdev
->ndev
,
4192 "Timed out waiting for adapter UP\n");
4197 status
= ql_adapter_down(qdev
);
4201 /* Get the new rx buffer size. */
4202 lbq_buf_len
= (qdev
->ndev
->mtu
> 1500) ?
4203 LARGE_BUFFER_MAX_SIZE
: LARGE_BUFFER_MIN_SIZE
;
4204 qdev
->lbq_buf_order
= get_order(lbq_buf_len
);
4206 for (i
= 0; i
< qdev
->rss_ring_count
; i
++) {
4207 rx_ring
= &qdev
->rx_ring
[i
];
4208 /* Set the new size. */
4209 rx_ring
->lbq_buf_size
= lbq_buf_len
;
4212 status
= ql_adapter_up(qdev
);
4218 netif_alert(qdev
, ifup
, qdev
->ndev
,
4219 "Driver up/down cycle failed, closing device.\n");
4220 set_bit(QL_ADAPTER_UP
, &qdev
->flags
);
4221 dev_close(qdev
->ndev
);
4225 static int qlge_change_mtu(struct net_device
*ndev
, int new_mtu
)
4227 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4230 if (ndev
->mtu
== 1500 && new_mtu
== 9000) {
4231 netif_err(qdev
, ifup
, qdev
->ndev
, "Changing to jumbo MTU.\n");
4232 } else if (ndev
->mtu
== 9000 && new_mtu
== 1500) {
4233 netif_err(qdev
, ifup
, qdev
->ndev
, "Changing to normal MTU.\n");
4237 queue_delayed_work(qdev
->workqueue
,
4238 &qdev
->mpi_port_cfg_work
, 3*HZ
);
4240 ndev
->mtu
= new_mtu
;
4242 if (!netif_running(qdev
->ndev
)) {
4246 status
= ql_change_rx_buffers(qdev
);
4248 netif_err(qdev
, ifup
, qdev
->ndev
,
4249 "Changing MTU failed.\n");
4255 static struct net_device_stats
*qlge_get_stats(struct net_device
4258 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4259 struct rx_ring
*rx_ring
= &qdev
->rx_ring
[0];
4260 struct tx_ring
*tx_ring
= &qdev
->tx_ring
[0];
4261 unsigned long pkts
, mcast
, dropped
, errors
, bytes
;
4265 pkts
= mcast
= dropped
= errors
= bytes
= 0;
4266 for (i
= 0; i
< qdev
->rss_ring_count
; i
++, rx_ring
++) {
4267 pkts
+= rx_ring
->rx_packets
;
4268 bytes
+= rx_ring
->rx_bytes
;
4269 dropped
+= rx_ring
->rx_dropped
;
4270 errors
+= rx_ring
->rx_errors
;
4271 mcast
+= rx_ring
->rx_multicast
;
4273 ndev
->stats
.rx_packets
= pkts
;
4274 ndev
->stats
.rx_bytes
= bytes
;
4275 ndev
->stats
.rx_dropped
= dropped
;
4276 ndev
->stats
.rx_errors
= errors
;
4277 ndev
->stats
.multicast
= mcast
;
4280 pkts
= errors
= bytes
= 0;
4281 for (i
= 0; i
< qdev
->tx_ring_count
; i
++, tx_ring
++) {
4282 pkts
+= tx_ring
->tx_packets
;
4283 bytes
+= tx_ring
->tx_bytes
;
4284 errors
+= tx_ring
->tx_errors
;
4286 ndev
->stats
.tx_packets
= pkts
;
4287 ndev
->stats
.tx_bytes
= bytes
;
4288 ndev
->stats
.tx_errors
= errors
;
4289 return &ndev
->stats
;
4292 static void qlge_set_multicast_list(struct net_device
*ndev
)
4294 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4295 struct netdev_hw_addr
*ha
;
4298 status
= ql_sem_spinlock(qdev
, SEM_RT_IDX_MASK
);
4302 * Set or clear promiscuous mode if a
4303 * transition is taking place.
4305 if (ndev
->flags
& IFF_PROMISC
) {
4306 if (!test_bit(QL_PROMISCUOUS
, &qdev
->flags
)) {
4307 if (ql_set_routing_reg
4308 (qdev
, RT_IDX_PROMISCUOUS_SLOT
, RT_IDX_VALID
, 1)) {
4309 netif_err(qdev
, hw
, qdev
->ndev
,
4310 "Failed to set promiscuous mode.\n");
4312 set_bit(QL_PROMISCUOUS
, &qdev
->flags
);
4316 if (test_bit(QL_PROMISCUOUS
, &qdev
->flags
)) {
4317 if (ql_set_routing_reg
4318 (qdev
, RT_IDX_PROMISCUOUS_SLOT
, RT_IDX_VALID
, 0)) {
4319 netif_err(qdev
, hw
, qdev
->ndev
,
4320 "Failed to clear promiscuous mode.\n");
4322 clear_bit(QL_PROMISCUOUS
, &qdev
->flags
);
4328 * Set or clear all multicast mode if a
4329 * transition is taking place.
4331 if ((ndev
->flags
& IFF_ALLMULTI
) ||
4332 (netdev_mc_count(ndev
) > MAX_MULTICAST_ENTRIES
)) {
4333 if (!test_bit(QL_ALLMULTI
, &qdev
->flags
)) {
4334 if (ql_set_routing_reg
4335 (qdev
, RT_IDX_ALLMULTI_SLOT
, RT_IDX_MCAST
, 1)) {
4336 netif_err(qdev
, hw
, qdev
->ndev
,
4337 "Failed to set all-multi mode.\n");
4339 set_bit(QL_ALLMULTI
, &qdev
->flags
);
4343 if (test_bit(QL_ALLMULTI
, &qdev
->flags
)) {
4344 if (ql_set_routing_reg
4345 (qdev
, RT_IDX_ALLMULTI_SLOT
, RT_IDX_MCAST
, 0)) {
4346 netif_err(qdev
, hw
, qdev
->ndev
,
4347 "Failed to clear all-multi mode.\n");
4349 clear_bit(QL_ALLMULTI
, &qdev
->flags
);
4354 if (!netdev_mc_empty(ndev
)) {
4355 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
4359 netdev_for_each_mc_addr(ha
, ndev
) {
4360 if (ql_set_mac_addr_reg(qdev
, (u8
*) ha
->addr
,
4361 MAC_ADDR_TYPE_MULTI_MAC
, i
)) {
4362 netif_err(qdev
, hw
, qdev
->ndev
,
4363 "Failed to loadmulticast address.\n");
4364 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
4369 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
4370 if (ql_set_routing_reg
4371 (qdev
, RT_IDX_MCAST_MATCH_SLOT
, RT_IDX_MCAST_MATCH
, 1)) {
4372 netif_err(qdev
, hw
, qdev
->ndev
,
4373 "Failed to set multicast match mode.\n");
4375 set_bit(QL_ALLMULTI
, &qdev
->flags
);
4379 ql_sem_unlock(qdev
, SEM_RT_IDX_MASK
);
4382 static int qlge_set_mac_address(struct net_device
*ndev
, void *p
)
4384 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4385 struct sockaddr
*addr
= p
;
4388 if (!is_valid_ether_addr(addr
->sa_data
))
4389 return -EADDRNOTAVAIL
;
4390 memcpy(ndev
->dev_addr
, addr
->sa_data
, ndev
->addr_len
);
4391 /* Update local copy of current mac address. */
4392 memcpy(qdev
->current_mac_addr
, ndev
->dev_addr
, ndev
->addr_len
);
4394 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
4397 status
= ql_set_mac_addr_reg(qdev
, (u8
*) ndev
->dev_addr
,
4398 MAC_ADDR_TYPE_CAM_MAC
, qdev
->func
* MAX_CQ
);
4400 netif_err(qdev
, hw
, qdev
->ndev
, "Failed to load MAC address.\n");
4401 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
4405 static void qlge_tx_timeout(struct net_device
*ndev
)
4407 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4408 ql_queue_asic_error(qdev
);
4411 static void ql_asic_reset_work(struct work_struct
*work
)
4413 struct ql_adapter
*qdev
=
4414 container_of(work
, struct ql_adapter
, asic_reset_work
.work
);
4417 status
= ql_adapter_down(qdev
);
4421 status
= ql_adapter_up(qdev
);
4425 /* Restore rx mode. */
4426 clear_bit(QL_ALLMULTI
, &qdev
->flags
);
4427 clear_bit(QL_PROMISCUOUS
, &qdev
->flags
);
4428 qlge_set_multicast_list(qdev
->ndev
);
4433 netif_alert(qdev
, ifup
, qdev
->ndev
,
4434 "Driver up/down cycle failed, closing device\n");
4436 set_bit(QL_ADAPTER_UP
, &qdev
->flags
);
4437 dev_close(qdev
->ndev
);
4441 static const struct nic_operations qla8012_nic_ops
= {
4442 .get_flash
= ql_get_8012_flash_params
,
4443 .port_initialize
= ql_8012_port_initialize
,
4446 static const struct nic_operations qla8000_nic_ops
= {
4447 .get_flash
= ql_get_8000_flash_params
,
4448 .port_initialize
= ql_8000_port_initialize
,
4451 /* Find the pcie function number for the other NIC
4452 * on this chip. Since both NIC functions share a
4453 * common firmware we have the lowest enabled function
4454 * do any common work. Examples would be resetting
4455 * after a fatal firmware error, or doing a firmware
4458 static int ql_get_alt_pcie_func(struct ql_adapter
*qdev
)
4462 u32 nic_func1
, nic_func2
;
4464 status
= ql_read_mpi_reg(qdev
, MPI_TEST_FUNC_PORT_CFG
,
4469 nic_func1
= ((temp
>> MPI_TEST_NIC1_FUNC_SHIFT
) &
4470 MPI_TEST_NIC_FUNC_MASK
);
4471 nic_func2
= ((temp
>> MPI_TEST_NIC2_FUNC_SHIFT
) &
4472 MPI_TEST_NIC_FUNC_MASK
);
4474 if (qdev
->func
== nic_func1
)
4475 qdev
->alt_func
= nic_func2
;
4476 else if (qdev
->func
== nic_func2
)
4477 qdev
->alt_func
= nic_func1
;
4484 static int ql_get_board_info(struct ql_adapter
*qdev
)
4488 (ql_read32(qdev
, STS
) & STS_FUNC_ID_MASK
) >> STS_FUNC_ID_SHIFT
;
4492 status
= ql_get_alt_pcie_func(qdev
);
4496 qdev
->port
= (qdev
->func
< qdev
->alt_func
) ? 0 : 1;
4498 qdev
->xg_sem_mask
= SEM_XGMAC1_MASK
;
4499 qdev
->port_link_up
= STS_PL1
;
4500 qdev
->port_init
= STS_PI1
;
4501 qdev
->mailbox_in
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC2_MBI
;
4502 qdev
->mailbox_out
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC2_MBO
;
4504 qdev
->xg_sem_mask
= SEM_XGMAC0_MASK
;
4505 qdev
->port_link_up
= STS_PL0
;
4506 qdev
->port_init
= STS_PI0
;
4507 qdev
->mailbox_in
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC0_MBI
;
4508 qdev
->mailbox_out
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC0_MBO
;
4510 qdev
->chip_rev_id
= ql_read32(qdev
, REV_ID
);
4511 qdev
->device_id
= qdev
->pdev
->device
;
4512 if (qdev
->device_id
== QLGE_DEVICE_ID_8012
)
4513 qdev
->nic_ops
= &qla8012_nic_ops
;
4514 else if (qdev
->device_id
== QLGE_DEVICE_ID_8000
)
4515 qdev
->nic_ops
= &qla8000_nic_ops
;
4519 static void ql_release_all(struct pci_dev
*pdev
)
4521 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4522 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4524 if (qdev
->workqueue
) {
4525 destroy_workqueue(qdev
->workqueue
);
4526 qdev
->workqueue
= NULL
;
4530 iounmap(qdev
->reg_base
);
4531 if (qdev
->doorbell_area
)
4532 iounmap(qdev
->doorbell_area
);
4533 vfree(qdev
->mpi_coredump
);
4534 pci_release_regions(pdev
);
4535 pci_set_drvdata(pdev
, NULL
);
4538 static int __devinit
ql_init_device(struct pci_dev
*pdev
,
4539 struct net_device
*ndev
, int cards_found
)
4541 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4544 memset((void *)qdev
, 0, sizeof(*qdev
));
4545 err
= pci_enable_device(pdev
);
4547 dev_err(&pdev
->dev
, "PCI device enable failed.\n");
4553 pci_set_drvdata(pdev
, ndev
);
4555 /* Set PCIe read request size */
4556 err
= pcie_set_readrq(pdev
, 4096);
4558 dev_err(&pdev
->dev
, "Set readrq failed.\n");
4562 err
= pci_request_regions(pdev
, DRV_NAME
);
4564 dev_err(&pdev
->dev
, "PCI region request failed.\n");
4568 pci_set_master(pdev
);
4569 if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(64))) {
4570 set_bit(QL_DMA64
, &qdev
->flags
);
4571 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
4573 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
4575 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
4579 dev_err(&pdev
->dev
, "No usable DMA configuration.\n");
4583 /* Set PCIe reset type for EEH to fundamental. */
4584 pdev
->needs_freset
= 1;
4585 pci_save_state(pdev
);
4587 ioremap_nocache(pci_resource_start(pdev
, 1),
4588 pci_resource_len(pdev
, 1));
4589 if (!qdev
->reg_base
) {
4590 dev_err(&pdev
->dev
, "Register mapping failed.\n");
4595 qdev
->doorbell_area_size
= pci_resource_len(pdev
, 3);
4596 qdev
->doorbell_area
=
4597 ioremap_nocache(pci_resource_start(pdev
, 3),
4598 pci_resource_len(pdev
, 3));
4599 if (!qdev
->doorbell_area
) {
4600 dev_err(&pdev
->dev
, "Doorbell register mapping failed.\n");
4605 err
= ql_get_board_info(qdev
);
4607 dev_err(&pdev
->dev
, "Register access failed.\n");
4611 qdev
->msg_enable
= netif_msg_init(debug
, default_msg
);
4612 spin_lock_init(&qdev
->hw_lock
);
4613 spin_lock_init(&qdev
->stats_lock
);
4615 if (qlge_mpi_coredump
) {
4616 qdev
->mpi_coredump
=
4617 vmalloc(sizeof(struct ql_mpi_coredump
));
4618 if (qdev
->mpi_coredump
== NULL
) {
4619 dev_err(&pdev
->dev
, "Coredump alloc failed.\n");
4623 if (qlge_force_coredump
)
4624 set_bit(QL_FRC_COREDUMP
, &qdev
->flags
);
4626 /* make sure the EEPROM is good */
4627 err
= qdev
->nic_ops
->get_flash(qdev
);
4629 dev_err(&pdev
->dev
, "Invalid FLASH.\n");
4633 memcpy(ndev
->perm_addr
, ndev
->dev_addr
, ndev
->addr_len
);
4634 /* Keep local copy of current mac address. */
4635 memcpy(qdev
->current_mac_addr
, ndev
->dev_addr
, ndev
->addr_len
);
4637 /* Set up the default ring sizes. */
4638 qdev
->tx_ring_size
= NUM_TX_RING_ENTRIES
;
4639 qdev
->rx_ring_size
= NUM_RX_RING_ENTRIES
;
4641 /* Set up the coalescing parameters. */
4642 qdev
->rx_coalesce_usecs
= DFLT_COALESCE_WAIT
;
4643 qdev
->tx_coalesce_usecs
= DFLT_COALESCE_WAIT
;
4644 qdev
->rx_max_coalesced_frames
= DFLT_INTER_FRAME_WAIT
;
4645 qdev
->tx_max_coalesced_frames
= DFLT_INTER_FRAME_WAIT
;
4648 * Set up the operating parameters.
4650 qdev
->workqueue
= create_singlethread_workqueue(ndev
->name
);
4651 INIT_DELAYED_WORK(&qdev
->asic_reset_work
, ql_asic_reset_work
);
4652 INIT_DELAYED_WORK(&qdev
->mpi_reset_work
, ql_mpi_reset_work
);
4653 INIT_DELAYED_WORK(&qdev
->mpi_work
, ql_mpi_work
);
4654 INIT_DELAYED_WORK(&qdev
->mpi_port_cfg_work
, ql_mpi_port_cfg_work
);
4655 INIT_DELAYED_WORK(&qdev
->mpi_idc_work
, ql_mpi_idc_work
);
4656 INIT_DELAYED_WORK(&qdev
->mpi_core_to_log
, ql_mpi_core_to_log
);
4657 init_completion(&qdev
->ide_completion
);
4658 mutex_init(&qdev
->mpi_mutex
);
4661 dev_info(&pdev
->dev
, "%s\n", DRV_STRING
);
4662 dev_info(&pdev
->dev
, "Driver name: %s, Version: %s.\n",
4663 DRV_NAME
, DRV_VERSION
);
4667 ql_release_all(pdev
);
4669 pci_disable_device(pdev
);
4673 static const struct net_device_ops qlge_netdev_ops
= {
4674 .ndo_open
= qlge_open
,
4675 .ndo_stop
= qlge_close
,
4676 .ndo_start_xmit
= qlge_send
,
4677 .ndo_change_mtu
= qlge_change_mtu
,
4678 .ndo_get_stats
= qlge_get_stats
,
4679 .ndo_set_multicast_list
= qlge_set_multicast_list
,
4680 .ndo_set_mac_address
= qlge_set_mac_address
,
4681 .ndo_validate_addr
= eth_validate_addr
,
4682 .ndo_tx_timeout
= qlge_tx_timeout
,
4683 .ndo_fix_features
= qlge_fix_features
,
4684 .ndo_set_features
= qlge_set_features
,
4685 .ndo_vlan_rx_add_vid
= qlge_vlan_rx_add_vid
,
4686 .ndo_vlan_rx_kill_vid
= qlge_vlan_rx_kill_vid
,
4689 static void ql_timer(unsigned long data
)
4691 struct ql_adapter
*qdev
= (struct ql_adapter
*)data
;
4694 var
= ql_read32(qdev
, STS
);
4695 if (pci_channel_offline(qdev
->pdev
)) {
4696 netif_err(qdev
, ifup
, qdev
->ndev
, "EEH STS = 0x%.08x.\n", var
);
4700 mod_timer(&qdev
->timer
, jiffies
+ (5*HZ
));
4703 static int __devinit
qlge_probe(struct pci_dev
*pdev
,
4704 const struct pci_device_id
*pci_entry
)
4706 struct net_device
*ndev
= NULL
;
4707 struct ql_adapter
*qdev
= NULL
;
4708 static int cards_found
= 0;
4711 ndev
= alloc_etherdev_mq(sizeof(struct ql_adapter
),
4712 min(MAX_CPUS
, (int)num_online_cpus()));
4716 err
= ql_init_device(pdev
, ndev
, cards_found
);
4722 qdev
= netdev_priv(ndev
);
4723 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
4724 ndev
->hw_features
= NETIF_F_SG
| NETIF_F_IP_CSUM
|
4725 NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_TSO_ECN
|
4726 NETIF_F_HW_VLAN_TX
| NETIF_F_RXCSUM
;
4727 ndev
->features
= ndev
->hw_features
|
4728 NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_FILTER
;
4730 if (test_bit(QL_DMA64
, &qdev
->flags
))
4731 ndev
->features
|= NETIF_F_HIGHDMA
;
4734 * Set up net_device structure.
4736 ndev
->tx_queue_len
= qdev
->tx_ring_size
;
4737 ndev
->irq
= pdev
->irq
;
4739 ndev
->netdev_ops
= &qlge_netdev_ops
;
4740 SET_ETHTOOL_OPS(ndev
, &qlge_ethtool_ops
);
4741 ndev
->watchdog_timeo
= 10 * HZ
;
4743 err
= register_netdev(ndev
);
4745 dev_err(&pdev
->dev
, "net device registration failed.\n");
4746 ql_release_all(pdev
);
4747 pci_disable_device(pdev
);
4750 /* Start up the timer to trigger EEH if
4753 init_timer_deferrable(&qdev
->timer
);
4754 qdev
->timer
.data
= (unsigned long)qdev
;
4755 qdev
->timer
.function
= ql_timer
;
4756 qdev
->timer
.expires
= jiffies
+ (5*HZ
);
4757 add_timer(&qdev
->timer
);
4759 ql_display_dev_info(ndev
);
4760 atomic_set(&qdev
->lb_count
, 0);
4765 netdev_tx_t
ql_lb_send(struct sk_buff
*skb
, struct net_device
*ndev
)
4767 return qlge_send(skb
, ndev
);
4770 int ql_clean_lb_rx_ring(struct rx_ring
*rx_ring
, int budget
)
4772 return ql_clean_inbound_rx_ring(rx_ring
, budget
);
4775 static void __devexit
qlge_remove(struct pci_dev
*pdev
)
4777 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4778 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4779 del_timer_sync(&qdev
->timer
);
4780 ql_cancel_all_work_sync(qdev
);
4781 unregister_netdev(ndev
);
4782 ql_release_all(pdev
);
4783 pci_disable_device(pdev
);
4787 /* Clean up resources without touching hardware. */
4788 static void ql_eeh_close(struct net_device
*ndev
)
4791 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4793 if (netif_carrier_ok(ndev
)) {
4794 netif_carrier_off(ndev
);
4795 netif_stop_queue(ndev
);
4798 /* Disabling the timer */
4799 del_timer_sync(&qdev
->timer
);
4800 ql_cancel_all_work_sync(qdev
);
4802 for (i
= 0; i
< qdev
->rss_ring_count
; i
++)
4803 netif_napi_del(&qdev
->rx_ring
[i
].napi
);
4805 clear_bit(QL_ADAPTER_UP
, &qdev
->flags
);
4806 ql_tx_ring_clean(qdev
);
4807 ql_free_rx_buffers(qdev
);
4808 ql_release_adapter_resources(qdev
);
4812 * This callback is called by the PCI subsystem whenever
4813 * a PCI bus error is detected.
4815 static pci_ers_result_t
qlge_io_error_detected(struct pci_dev
*pdev
,
4816 enum pci_channel_state state
)
4818 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4819 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4822 case pci_channel_io_normal
:
4823 return PCI_ERS_RESULT_CAN_RECOVER
;
4824 case pci_channel_io_frozen
:
4825 netif_device_detach(ndev
);
4826 if (netif_running(ndev
))
4828 pci_disable_device(pdev
);
4829 return PCI_ERS_RESULT_NEED_RESET
;
4830 case pci_channel_io_perm_failure
:
4832 "%s: pci_channel_io_perm_failure.\n", __func__
);
4834 set_bit(QL_EEH_FATAL
, &qdev
->flags
);
4835 return PCI_ERS_RESULT_DISCONNECT
;
4838 /* Request a slot reset. */
4839 return PCI_ERS_RESULT_NEED_RESET
;
4843 * This callback is called after the PCI buss has been reset.
4844 * Basically, this tries to restart the card from scratch.
4845 * This is a shortened version of the device probe/discovery code,
4846 * it resembles the first-half of the () routine.
4848 static pci_ers_result_t
qlge_io_slot_reset(struct pci_dev
*pdev
)
4850 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4851 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4853 pdev
->error_state
= pci_channel_io_normal
;
4855 pci_restore_state(pdev
);
4856 if (pci_enable_device(pdev
)) {
4857 netif_err(qdev
, ifup
, qdev
->ndev
,
4858 "Cannot re-enable PCI device after reset.\n");
4859 return PCI_ERS_RESULT_DISCONNECT
;
4861 pci_set_master(pdev
);
4863 if (ql_adapter_reset(qdev
)) {
4864 netif_err(qdev
, drv
, qdev
->ndev
, "reset FAILED!\n");
4865 set_bit(QL_EEH_FATAL
, &qdev
->flags
);
4866 return PCI_ERS_RESULT_DISCONNECT
;
4869 return PCI_ERS_RESULT_RECOVERED
;
4872 static void qlge_io_resume(struct pci_dev
*pdev
)
4874 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4875 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4878 if (netif_running(ndev
)) {
4879 err
= qlge_open(ndev
);
4881 netif_err(qdev
, ifup
, qdev
->ndev
,
4882 "Device initialization failed after reset.\n");
4886 netif_err(qdev
, ifup
, qdev
->ndev
,
4887 "Device was not running prior to EEH.\n");
4889 mod_timer(&qdev
->timer
, jiffies
+ (5*HZ
));
4890 netif_device_attach(ndev
);
4893 static struct pci_error_handlers qlge_err_handler
= {
4894 .error_detected
= qlge_io_error_detected
,
4895 .slot_reset
= qlge_io_slot_reset
,
4896 .resume
= qlge_io_resume
,
4899 static int qlge_suspend(struct pci_dev
*pdev
, pm_message_t state
)
4901 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4902 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4905 netif_device_detach(ndev
);
4906 del_timer_sync(&qdev
->timer
);
4908 if (netif_running(ndev
)) {
4909 err
= ql_adapter_down(qdev
);
4915 err
= pci_save_state(pdev
);
4919 pci_disable_device(pdev
);
4921 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
4927 static int qlge_resume(struct pci_dev
*pdev
)
4929 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4930 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4933 pci_set_power_state(pdev
, PCI_D0
);
4934 pci_restore_state(pdev
);
4935 err
= pci_enable_device(pdev
);
4937 netif_err(qdev
, ifup
, qdev
->ndev
, "Cannot enable PCI device from suspend\n");
4940 pci_set_master(pdev
);
4942 pci_enable_wake(pdev
, PCI_D3hot
, 0);
4943 pci_enable_wake(pdev
, PCI_D3cold
, 0);
4945 if (netif_running(ndev
)) {
4946 err
= ql_adapter_up(qdev
);
4951 mod_timer(&qdev
->timer
, jiffies
+ (5*HZ
));
4952 netif_device_attach(ndev
);
4956 #endif /* CONFIG_PM */
4958 static void qlge_shutdown(struct pci_dev
*pdev
)
4960 qlge_suspend(pdev
, PMSG_SUSPEND
);
4963 static struct pci_driver qlge_driver
= {
4965 .id_table
= qlge_pci_tbl
,
4966 .probe
= qlge_probe
,
4967 .remove
= __devexit_p(qlge_remove
),
4969 .suspend
= qlge_suspend
,
4970 .resume
= qlge_resume
,
4972 .shutdown
= qlge_shutdown
,
4973 .err_handler
= &qlge_err_handler
4976 static int __init
qlge_init_module(void)
4978 return pci_register_driver(&qlge_driver
);
4981 static void __exit
qlge_exit(void)
4983 pci_unregister_driver(&qlge_driver
);
4986 module_init(qlge_init_module
);
4987 module_exit(qlge_exit
);