2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/bitops.h>
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/pci.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/pagemap.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dmapool.h>
20 #include <linux/mempool.h>
21 #include <linux/spinlock.h>
22 #include <linux/kthread.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
28 #include <linux/ipv6.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/if_ether.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/if_vlan.h>
38 #include <linux/skbuff.h>
39 #include <linux/delay.h>
41 #include <linux/vmalloc.h>
42 #include <linux/prefetch.h>
43 #include <net/ip6_checksum.h>
47 char qlge_driver_name
[] = DRV_NAME
;
48 const char qlge_driver_version
[] = DRV_VERSION
;
50 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51 MODULE_DESCRIPTION(DRV_STRING
" ");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION(DRV_VERSION
);
55 static const u32 default_msg
=
56 NETIF_MSG_DRV
| NETIF_MSG_PROBE
| NETIF_MSG_LINK
|
57 /* NETIF_MSG_TIMER | */
62 /* NETIF_MSG_TX_QUEUED | */
63 /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
64 /* NETIF_MSG_PKTDATA | */
65 NETIF_MSG_HW
| NETIF_MSG_WOL
| 0;
67 static int debug
= -1; /* defaults above */
68 module_param(debug
, int, 0664);
69 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
74 static int qlge_irq_type
= MSIX_IRQ
;
75 module_param(qlge_irq_type
, int, 0664);
76 MODULE_PARM_DESC(qlge_irq_type
, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
78 static int qlge_mpi_coredump
;
79 module_param(qlge_mpi_coredump
, int, 0);
80 MODULE_PARM_DESC(qlge_mpi_coredump
,
81 "Option to enable MPI firmware dump. "
82 "Default is OFF - Do Not allocate memory. ");
84 static int qlge_force_coredump
;
85 module_param(qlge_force_coredump
, int, 0);
86 MODULE_PARM_DESC(qlge_force_coredump
,
87 "Option to allow force of firmware core dump. "
88 "Default is OFF - Do not allow.");
90 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl
) = {
91 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC
, QLGE_DEVICE_ID_8012
)},
92 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC
, QLGE_DEVICE_ID_8000
)},
93 /* required last entry */
97 MODULE_DEVICE_TABLE(pci
, qlge_pci_tbl
);
99 static int ql_wol(struct ql_adapter
*qdev
);
100 static void qlge_set_multicast_list(struct net_device
*ndev
);
102 /* This hardware semaphore causes exclusive access to
103 * resources shared between the NIC driver, MPI firmware,
104 * FCOE firmware and the FC driver.
106 static int ql_sem_trylock(struct ql_adapter
*qdev
, u32 sem_mask
)
111 case SEM_XGMAC0_MASK
:
112 sem_bits
= SEM_SET
<< SEM_XGMAC0_SHIFT
;
114 case SEM_XGMAC1_MASK
:
115 sem_bits
= SEM_SET
<< SEM_XGMAC1_SHIFT
;
118 sem_bits
= SEM_SET
<< SEM_ICB_SHIFT
;
120 case SEM_MAC_ADDR_MASK
:
121 sem_bits
= SEM_SET
<< SEM_MAC_ADDR_SHIFT
;
124 sem_bits
= SEM_SET
<< SEM_FLASH_SHIFT
;
127 sem_bits
= SEM_SET
<< SEM_PROBE_SHIFT
;
129 case SEM_RT_IDX_MASK
:
130 sem_bits
= SEM_SET
<< SEM_RT_IDX_SHIFT
;
132 case SEM_PROC_REG_MASK
:
133 sem_bits
= SEM_SET
<< SEM_PROC_REG_SHIFT
;
136 netif_alert(qdev
, probe
, qdev
->ndev
, "bad Semaphore mask!.\n");
140 ql_write32(qdev
, SEM
, sem_bits
| sem_mask
);
141 return !(ql_read32(qdev
, SEM
) & sem_bits
);
144 int ql_sem_spinlock(struct ql_adapter
*qdev
, u32 sem_mask
)
146 unsigned int wait_count
= 30;
148 if (!ql_sem_trylock(qdev
, sem_mask
))
151 } while (--wait_count
);
155 void ql_sem_unlock(struct ql_adapter
*qdev
, u32 sem_mask
)
157 ql_write32(qdev
, SEM
, sem_mask
);
158 ql_read32(qdev
, SEM
); /* flush */
161 /* This function waits for a specific bit to come ready
162 * in a given register. It is used mostly by the initialize
163 * process, but is also used in kernel thread API such as
164 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
166 int ql_wait_reg_rdy(struct ql_adapter
*qdev
, u32 reg
, u32 bit
, u32 err_bit
)
169 int count
= UDELAY_COUNT
;
172 temp
= ql_read32(qdev
, reg
);
174 /* check for errors */
175 if (temp
& err_bit
) {
176 netif_alert(qdev
, probe
, qdev
->ndev
,
177 "register 0x%.08x access error, value = 0x%.08x!.\n",
180 } else if (temp
& bit
)
182 udelay(UDELAY_DELAY
);
185 netif_alert(qdev
, probe
, qdev
->ndev
,
186 "Timed out waiting for reg %x to come ready.\n", reg
);
190 /* The CFG register is used to download TX and RX control blocks
191 * to the chip. This function waits for an operation to complete.
193 static int ql_wait_cfg(struct ql_adapter
*qdev
, u32 bit
)
195 int count
= UDELAY_COUNT
;
199 temp
= ql_read32(qdev
, CFG
);
204 udelay(UDELAY_DELAY
);
211 /* Used to issue init control blocks to hw. Maps control block,
212 * sets address, triggers download, waits for completion.
214 int ql_write_cfg(struct ql_adapter
*qdev
, void *ptr
, int size
, u32 bit
,
224 (bit
& (CFG_LRQ
| CFG_LR
| CFG_LCQ
)) ? PCI_DMA_TODEVICE
:
227 map
= pci_map_single(qdev
->pdev
, ptr
, size
, direction
);
228 if (pci_dma_mapping_error(qdev
->pdev
, map
)) {
229 netif_err(qdev
, ifup
, qdev
->ndev
, "Couldn't map DMA area.\n");
233 status
= ql_sem_spinlock(qdev
, SEM_ICB_MASK
);
237 status
= ql_wait_cfg(qdev
, bit
);
239 netif_err(qdev
, ifup
, qdev
->ndev
,
240 "Timed out waiting for CFG to come ready.\n");
244 ql_write32(qdev
, ICB_L
, (u32
) map
);
245 ql_write32(qdev
, ICB_H
, (u32
) (map
>> 32));
247 mask
= CFG_Q_MASK
| (bit
<< 16);
248 value
= bit
| (q_id
<< CFG_Q_SHIFT
);
249 ql_write32(qdev
, CFG
, (mask
| value
));
252 * Wait for the bit to clear after signaling hw.
254 status
= ql_wait_cfg(qdev
, bit
);
256 ql_sem_unlock(qdev
, SEM_ICB_MASK
); /* does flush too */
257 pci_unmap_single(qdev
->pdev
, map
, size
, direction
);
261 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
262 int ql_get_mac_addr_reg(struct ql_adapter
*qdev
, u32 type
, u16 index
,
269 case MAC_ADDR_TYPE_MULTI_MAC
:
270 case MAC_ADDR_TYPE_CAM_MAC
:
273 ql_wait_reg_rdy(qdev
,
274 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
277 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
278 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
279 MAC_ADDR_ADR
| MAC_ADDR_RS
| type
); /* type */
281 ql_wait_reg_rdy(qdev
,
282 MAC_ADDR_IDX
, MAC_ADDR_MR
, 0);
285 *value
++ = ql_read32(qdev
, MAC_ADDR_DATA
);
287 ql_wait_reg_rdy(qdev
,
288 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
291 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
292 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
293 MAC_ADDR_ADR
| MAC_ADDR_RS
| type
); /* type */
295 ql_wait_reg_rdy(qdev
,
296 MAC_ADDR_IDX
, MAC_ADDR_MR
, 0);
299 *value
++ = ql_read32(qdev
, MAC_ADDR_DATA
);
300 if (type
== MAC_ADDR_TYPE_CAM_MAC
) {
302 ql_wait_reg_rdy(qdev
,
303 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
306 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
307 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
308 MAC_ADDR_ADR
| MAC_ADDR_RS
| type
); /* type */
310 ql_wait_reg_rdy(qdev
, MAC_ADDR_IDX
,
314 *value
++ = ql_read32(qdev
, MAC_ADDR_DATA
);
318 case MAC_ADDR_TYPE_VLAN
:
319 case MAC_ADDR_TYPE_MULTI_FLTR
:
321 netif_crit(qdev
, ifup
, qdev
->ndev
,
322 "Address type %d not yet supported.\n", type
);
329 /* Set up a MAC, multicast or VLAN address for the
330 * inbound frame matching.
332 static int ql_set_mac_addr_reg(struct ql_adapter
*qdev
, u8
*addr
, u32 type
,
339 case MAC_ADDR_TYPE_MULTI_MAC
:
341 u32 upper
= (addr
[0] << 8) | addr
[1];
342 u32 lower
= (addr
[2] << 24) | (addr
[3] << 16) |
343 (addr
[4] << 8) | (addr
[5]);
346 ql_wait_reg_rdy(qdev
,
347 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
350 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) |
351 (index
<< MAC_ADDR_IDX_SHIFT
) |
353 ql_write32(qdev
, MAC_ADDR_DATA
, lower
);
355 ql_wait_reg_rdy(qdev
,
356 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
359 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) |
360 (index
<< MAC_ADDR_IDX_SHIFT
) |
363 ql_write32(qdev
, MAC_ADDR_DATA
, upper
);
365 ql_wait_reg_rdy(qdev
,
366 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
371 case MAC_ADDR_TYPE_CAM_MAC
:
374 u32 upper
= (addr
[0] << 8) | addr
[1];
376 (addr
[2] << 24) | (addr
[3] << 16) | (addr
[4] << 8) |
379 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
380 "Adding %s address %pM at index %d in the CAM.\n",
381 type
== MAC_ADDR_TYPE_MULTI_MAC
?
382 "MULTICAST" : "UNICAST",
386 ql_wait_reg_rdy(qdev
,
387 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
390 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
391 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
393 ql_write32(qdev
, MAC_ADDR_DATA
, lower
);
395 ql_wait_reg_rdy(qdev
,
396 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
399 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
400 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
402 ql_write32(qdev
, MAC_ADDR_DATA
, upper
);
404 ql_wait_reg_rdy(qdev
,
405 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
408 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
) | /* offset */
409 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
411 /* This field should also include the queue id
412 and possibly the function id. Right now we hardcode
413 the route field to NIC core.
415 cam_output
= (CAM_OUT_ROUTE_NIC
|
417 func
<< CAM_OUT_FUNC_SHIFT
) |
418 (0 << CAM_OUT_CQ_ID_SHIFT
));
419 if (qdev
->ndev
->features
& NETIF_F_HW_VLAN_RX
)
420 cam_output
|= CAM_OUT_RV
;
421 /* route to NIC core */
422 ql_write32(qdev
, MAC_ADDR_DATA
, cam_output
);
425 case MAC_ADDR_TYPE_VLAN
:
427 u32 enable_bit
= *((u32
*) &addr
[0]);
428 /* For VLAN, the addr actually holds a bit that
429 * either enables or disables the vlan id we are
430 * addressing. It's either MAC_ADDR_E on or off.
431 * That's bit-27 we're talking about.
433 netif_info(qdev
, ifup
, qdev
->ndev
,
434 "%s VLAN ID %d %s the CAM.\n",
435 enable_bit
? "Adding" : "Removing",
437 enable_bit
? "to" : "from");
440 ql_wait_reg_rdy(qdev
,
441 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
444 ql_write32(qdev
, MAC_ADDR_IDX
, offset
| /* offset */
445 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
447 enable_bit
); /* enable/disable */
450 case MAC_ADDR_TYPE_MULTI_FLTR
:
452 netif_crit(qdev
, ifup
, qdev
->ndev
,
453 "Address type %d not yet supported.\n", type
);
460 /* Set or clear MAC address in hardware. We sometimes
461 * have to clear it to prevent wrong frame routing
462 * especially in a bonding environment.
464 static int ql_set_mac_addr(struct ql_adapter
*qdev
, int set
)
467 char zero_mac_addr
[ETH_ALEN
];
471 addr
= &qdev
->current_mac_addr
[0];
472 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
473 "Set Mac addr %pM\n", addr
);
475 memset(zero_mac_addr
, 0, ETH_ALEN
);
476 addr
= &zero_mac_addr
[0];
477 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
478 "Clearing MAC address\n");
480 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
483 status
= ql_set_mac_addr_reg(qdev
, (u8
*) addr
,
484 MAC_ADDR_TYPE_CAM_MAC
, qdev
->func
* MAX_CQ
);
485 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
487 netif_err(qdev
, ifup
, qdev
->ndev
,
488 "Failed to init mac address.\n");
492 void ql_link_on(struct ql_adapter
*qdev
)
494 netif_err(qdev
, link
, qdev
->ndev
, "Link is up.\n");
495 netif_carrier_on(qdev
->ndev
);
496 ql_set_mac_addr(qdev
, 1);
499 void ql_link_off(struct ql_adapter
*qdev
)
501 netif_err(qdev
, link
, qdev
->ndev
, "Link is down.\n");
502 netif_carrier_off(qdev
->ndev
);
503 ql_set_mac_addr(qdev
, 0);
506 /* Get a specific frame routing value from the CAM.
507 * Used for debug and reg dump.
509 int ql_get_routing_reg(struct ql_adapter
*qdev
, u32 index
, u32
*value
)
513 status
= ql_wait_reg_rdy(qdev
, RT_IDX
, RT_IDX_MW
, 0);
517 ql_write32(qdev
, RT_IDX
,
518 RT_IDX_TYPE_NICQ
| RT_IDX_RS
| (index
<< RT_IDX_IDX_SHIFT
));
519 status
= ql_wait_reg_rdy(qdev
, RT_IDX
, RT_IDX_MR
, 0);
522 *value
= ql_read32(qdev
, RT_DATA
);
527 /* The NIC function for this chip has 16 routing indexes. Each one can be used
528 * to route different frame types to various inbound queues. We send broadcast/
529 * multicast/error frames to the default queue for slow handling,
530 * and CAM hit/RSS frames to the fast handling queues.
532 static int ql_set_routing_reg(struct ql_adapter
*qdev
, u32 index
, u32 mask
,
535 int status
= -EINVAL
; /* Return error if no mask match. */
538 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
539 "%s %s mask %s the routing reg.\n",
540 enable
? "Adding" : "Removing",
541 index
== RT_IDX_ALL_ERR_SLOT
? "MAC ERROR/ALL ERROR" :
542 index
== RT_IDX_IP_CSUM_ERR_SLOT
? "IP CSUM ERROR" :
543 index
== RT_IDX_TCP_UDP_CSUM_ERR_SLOT
? "TCP/UDP CSUM ERROR" :
544 index
== RT_IDX_BCAST_SLOT
? "BROADCAST" :
545 index
== RT_IDX_MCAST_MATCH_SLOT
? "MULTICAST MATCH" :
546 index
== RT_IDX_ALLMULTI_SLOT
? "ALL MULTICAST MATCH" :
547 index
== RT_IDX_UNUSED6_SLOT
? "UNUSED6" :
548 index
== RT_IDX_UNUSED7_SLOT
? "UNUSED7" :
549 index
== RT_IDX_RSS_MATCH_SLOT
? "RSS ALL/IPV4 MATCH" :
550 index
== RT_IDX_RSS_IPV6_SLOT
? "RSS IPV6" :
551 index
== RT_IDX_RSS_TCP4_SLOT
? "RSS TCP4" :
552 index
== RT_IDX_RSS_TCP6_SLOT
? "RSS TCP6" :
553 index
== RT_IDX_CAM_HIT_SLOT
? "CAM HIT" :
554 index
== RT_IDX_UNUSED013
? "UNUSED13" :
555 index
== RT_IDX_UNUSED014
? "UNUSED14" :
556 index
== RT_IDX_PROMISCUOUS_SLOT
? "PROMISCUOUS" :
557 "(Bad index != RT_IDX)",
558 enable
? "to" : "from");
563 value
= RT_IDX_DST_CAM_Q
| /* dest */
564 RT_IDX_TYPE_NICQ
| /* type */
565 (RT_IDX_CAM_HIT_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
568 case RT_IDX_VALID
: /* Promiscuous Mode frames. */
570 value
= RT_IDX_DST_DFLT_Q
| /* dest */
571 RT_IDX_TYPE_NICQ
| /* type */
572 (RT_IDX_PROMISCUOUS_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
575 case RT_IDX_ERR
: /* Pass up MAC,IP,TCP/UDP error frames. */
577 value
= RT_IDX_DST_DFLT_Q
| /* dest */
578 RT_IDX_TYPE_NICQ
| /* type */
579 (RT_IDX_ALL_ERR_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
582 case RT_IDX_IP_CSUM_ERR
: /* Pass up IP CSUM error frames. */
584 value
= RT_IDX_DST_DFLT_Q
| /* dest */
585 RT_IDX_TYPE_NICQ
| /* type */
586 (RT_IDX_IP_CSUM_ERR_SLOT
<<
587 RT_IDX_IDX_SHIFT
); /* index */
590 case RT_IDX_TU_CSUM_ERR
: /* Pass up TCP/UDP CSUM error frames. */
592 value
= RT_IDX_DST_DFLT_Q
| /* dest */
593 RT_IDX_TYPE_NICQ
| /* type */
594 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT
<<
595 RT_IDX_IDX_SHIFT
); /* index */
598 case RT_IDX_BCAST
: /* Pass up Broadcast frames to default Q. */
600 value
= RT_IDX_DST_DFLT_Q
| /* dest */
601 RT_IDX_TYPE_NICQ
| /* type */
602 (RT_IDX_BCAST_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
605 case RT_IDX_MCAST
: /* Pass up All Multicast frames. */
607 value
= RT_IDX_DST_DFLT_Q
| /* dest */
608 RT_IDX_TYPE_NICQ
| /* type */
609 (RT_IDX_ALLMULTI_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
612 case RT_IDX_MCAST_MATCH
: /* Pass up matched Multicast frames. */
614 value
= RT_IDX_DST_DFLT_Q
| /* dest */
615 RT_IDX_TYPE_NICQ
| /* type */
616 (RT_IDX_MCAST_MATCH_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
619 case RT_IDX_RSS_MATCH
: /* Pass up matched RSS frames. */
621 value
= RT_IDX_DST_RSS
| /* dest */
622 RT_IDX_TYPE_NICQ
| /* type */
623 (RT_IDX_RSS_MATCH_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
626 case 0: /* Clear the E-bit on an entry. */
628 value
= RT_IDX_DST_DFLT_Q
| /* dest */
629 RT_IDX_TYPE_NICQ
| /* type */
630 (index
<< RT_IDX_IDX_SHIFT
);/* index */
634 netif_err(qdev
, ifup
, qdev
->ndev
,
635 "Mask type %d not yet supported.\n", mask
);
641 status
= ql_wait_reg_rdy(qdev
, RT_IDX
, RT_IDX_MW
, 0);
644 value
|= (enable
? RT_IDX_E
: 0);
645 ql_write32(qdev
, RT_IDX
, value
);
646 ql_write32(qdev
, RT_DATA
, enable
? mask
: 0);
652 static void ql_enable_interrupts(struct ql_adapter
*qdev
)
654 ql_write32(qdev
, INTR_EN
, (INTR_EN_EI
<< 16) | INTR_EN_EI
);
657 static void ql_disable_interrupts(struct ql_adapter
*qdev
)
659 ql_write32(qdev
, INTR_EN
, (INTR_EN_EI
<< 16));
662 /* If we're running with multiple MSI-X vectors then we enable on the fly.
663 * Otherwise, we may have multiple outstanding workers and don't want to
664 * enable until the last one finishes. In this case, the irq_cnt gets
665 * incremented every time we queue a worker and decremented every time
666 * a worker finishes. Once it hits zero we enable the interrupt.
668 u32
ql_enable_completion_interrupt(struct ql_adapter
*qdev
, u32 intr
)
671 unsigned long hw_flags
= 0;
672 struct intr_context
*ctx
= qdev
->intr_context
+ intr
;
674 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
) && intr
)) {
675 /* Always enable if we're MSIX multi interrupts and
676 * it's not the default (zeroeth) interrupt.
678 ql_write32(qdev
, INTR_EN
,
680 var
= ql_read32(qdev
, STS
);
684 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
685 if (atomic_dec_and_test(&ctx
->irq_cnt
)) {
686 ql_write32(qdev
, INTR_EN
,
688 var
= ql_read32(qdev
, STS
);
690 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
694 static u32
ql_disable_completion_interrupt(struct ql_adapter
*qdev
, u32 intr
)
697 struct intr_context
*ctx
;
699 /* HW disables for us if we're MSIX multi interrupts and
700 * it's not the default (zeroeth) interrupt.
702 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
) && intr
))
705 ctx
= qdev
->intr_context
+ intr
;
706 spin_lock(&qdev
->hw_lock
);
707 if (!atomic_read(&ctx
->irq_cnt
)) {
708 ql_write32(qdev
, INTR_EN
,
710 var
= ql_read32(qdev
, STS
);
712 atomic_inc(&ctx
->irq_cnt
);
713 spin_unlock(&qdev
->hw_lock
);
717 static void ql_enable_all_completion_interrupts(struct ql_adapter
*qdev
)
720 for (i
= 0; i
< qdev
->intr_count
; i
++) {
721 /* The enable call does a atomic_dec_and_test
722 * and enables only if the result is zero.
723 * So we precharge it here.
725 if (unlikely(!test_bit(QL_MSIX_ENABLED
, &qdev
->flags
) ||
727 atomic_set(&qdev
->intr_context
[i
].irq_cnt
, 1);
728 ql_enable_completion_interrupt(qdev
, i
);
733 static int ql_validate_flash(struct ql_adapter
*qdev
, u32 size
, const char *str
)
737 __le16
*flash
= (__le16
*)&qdev
->flash
;
739 status
= strncmp((char *)&qdev
->flash
, str
, 4);
741 netif_err(qdev
, ifup
, qdev
->ndev
, "Invalid flash signature.\n");
745 for (i
= 0; i
< size
; i
++)
746 csum
+= le16_to_cpu(*flash
++);
749 netif_err(qdev
, ifup
, qdev
->ndev
,
750 "Invalid flash checksum, csum = 0x%.04x.\n", csum
);
755 static int ql_read_flash_word(struct ql_adapter
*qdev
, int offset
, __le32
*data
)
758 /* wait for reg to come ready */
759 status
= ql_wait_reg_rdy(qdev
,
760 FLASH_ADDR
, FLASH_ADDR_RDY
, FLASH_ADDR_ERR
);
763 /* set up for reg read */
764 ql_write32(qdev
, FLASH_ADDR
, FLASH_ADDR_R
| offset
);
765 /* wait for reg to come ready */
766 status
= ql_wait_reg_rdy(qdev
,
767 FLASH_ADDR
, FLASH_ADDR_RDY
, FLASH_ADDR_ERR
);
770 /* This data is stored on flash as an array of
771 * __le32. Since ql_read32() returns cpu endian
772 * we need to swap it back.
774 *data
= cpu_to_le32(ql_read32(qdev
, FLASH_DATA
));
779 static int ql_get_8000_flash_params(struct ql_adapter
*qdev
)
783 __le32
*p
= (__le32
*)&qdev
->flash
;
787 /* Get flash offset for function and adjust
791 offset
= FUNC0_FLASH_OFFSET
/ sizeof(u32
);
793 offset
= FUNC1_FLASH_OFFSET
/ sizeof(u32
);
795 if (ql_sem_spinlock(qdev
, SEM_FLASH_MASK
))
798 size
= sizeof(struct flash_params_8000
) / sizeof(u32
);
799 for (i
= 0; i
< size
; i
++, p
++) {
800 status
= ql_read_flash_word(qdev
, i
+offset
, p
);
802 netif_err(qdev
, ifup
, qdev
->ndev
,
803 "Error reading flash.\n");
808 status
= ql_validate_flash(qdev
,
809 sizeof(struct flash_params_8000
) / sizeof(u16
),
812 netif_err(qdev
, ifup
, qdev
->ndev
, "Invalid flash.\n");
817 /* Extract either manufacturer or BOFM modified
820 if (qdev
->flash
.flash_params_8000
.data_type1
== 2)
822 qdev
->flash
.flash_params_8000
.mac_addr1
,
823 qdev
->ndev
->addr_len
);
826 qdev
->flash
.flash_params_8000
.mac_addr
,
827 qdev
->ndev
->addr_len
);
829 if (!is_valid_ether_addr(mac_addr
)) {
830 netif_err(qdev
, ifup
, qdev
->ndev
, "Invalid MAC address.\n");
835 memcpy(qdev
->ndev
->dev_addr
,
837 qdev
->ndev
->addr_len
);
840 ql_sem_unlock(qdev
, SEM_FLASH_MASK
);
844 static int ql_get_8012_flash_params(struct ql_adapter
*qdev
)
848 __le32
*p
= (__le32
*)&qdev
->flash
;
850 u32 size
= sizeof(struct flash_params_8012
) / sizeof(u32
);
852 /* Second function's parameters follow the first
858 if (ql_sem_spinlock(qdev
, SEM_FLASH_MASK
))
861 for (i
= 0; i
< size
; i
++, p
++) {
862 status
= ql_read_flash_word(qdev
, i
+offset
, p
);
864 netif_err(qdev
, ifup
, qdev
->ndev
,
865 "Error reading flash.\n");
871 status
= ql_validate_flash(qdev
,
872 sizeof(struct flash_params_8012
) / sizeof(u16
),
875 netif_err(qdev
, ifup
, qdev
->ndev
, "Invalid flash.\n");
880 if (!is_valid_ether_addr(qdev
->flash
.flash_params_8012
.mac_addr
)) {
885 memcpy(qdev
->ndev
->dev_addr
,
886 qdev
->flash
.flash_params_8012
.mac_addr
,
887 qdev
->ndev
->addr_len
);
890 ql_sem_unlock(qdev
, SEM_FLASH_MASK
);
894 /* xgmac register are located behind the xgmac_addr and xgmac_data
895 * register pair. Each read/write requires us to wait for the ready
896 * bit before reading/writing the data.
898 static int ql_write_xgmac_reg(struct ql_adapter
*qdev
, u32 reg
, u32 data
)
901 /* wait for reg to come ready */
902 status
= ql_wait_reg_rdy(qdev
,
903 XGMAC_ADDR
, XGMAC_ADDR_RDY
, XGMAC_ADDR_XME
);
906 /* write the data to the data reg */
907 ql_write32(qdev
, XGMAC_DATA
, data
);
908 /* trigger the write */
909 ql_write32(qdev
, XGMAC_ADDR
, reg
);
913 /* xgmac register are located behind the xgmac_addr and xgmac_data
914 * register pair. Each read/write requires us to wait for the ready
915 * bit before reading/writing the data.
917 int ql_read_xgmac_reg(struct ql_adapter
*qdev
, u32 reg
, u32
*data
)
920 /* wait for reg to come ready */
921 status
= ql_wait_reg_rdy(qdev
,
922 XGMAC_ADDR
, XGMAC_ADDR_RDY
, XGMAC_ADDR_XME
);
925 /* set up for reg read */
926 ql_write32(qdev
, XGMAC_ADDR
, reg
| XGMAC_ADDR_R
);
927 /* wait for reg to come ready */
928 status
= ql_wait_reg_rdy(qdev
,
929 XGMAC_ADDR
, XGMAC_ADDR_RDY
, XGMAC_ADDR_XME
);
933 *data
= ql_read32(qdev
, XGMAC_DATA
);
938 /* This is used for reading the 64-bit statistics regs. */
939 int ql_read_xgmac_reg64(struct ql_adapter
*qdev
, u32 reg
, u64
*data
)
945 status
= ql_read_xgmac_reg(qdev
, reg
, &lo
);
949 status
= ql_read_xgmac_reg(qdev
, reg
+ 4, &hi
);
953 *data
= (u64
) lo
| ((u64
) hi
<< 32);
959 static int ql_8000_port_initialize(struct ql_adapter
*qdev
)
963 * Get MPI firmware version for driver banner
966 status
= ql_mb_about_fw(qdev
);
969 status
= ql_mb_get_fw_state(qdev
);
972 /* Wake up a worker to get/set the TX/RX frame sizes. */
973 queue_delayed_work(qdev
->workqueue
, &qdev
->mpi_port_cfg_work
, 0);
978 /* Take the MAC Core out of reset.
979 * Enable statistics counting.
980 * Take the transmitter/receiver out of reset.
981 * This functionality may be done in the MPI firmware at a
984 static int ql_8012_port_initialize(struct ql_adapter
*qdev
)
989 if (ql_sem_trylock(qdev
, qdev
->xg_sem_mask
)) {
990 /* Another function has the semaphore, so
991 * wait for the port init bit to come ready.
993 netif_info(qdev
, link
, qdev
->ndev
,
994 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
995 status
= ql_wait_reg_rdy(qdev
, STS
, qdev
->port_init
, 0);
997 netif_crit(qdev
, link
, qdev
->ndev
,
998 "Port initialize timed out.\n");
1003 netif_info(qdev
, link
, qdev
->ndev
, "Got xgmac semaphore!.\n");
1004 /* Set the core reset. */
1005 status
= ql_read_xgmac_reg(qdev
, GLOBAL_CFG
, &data
);
1008 data
|= GLOBAL_CFG_RESET
;
1009 status
= ql_write_xgmac_reg(qdev
, GLOBAL_CFG
, data
);
1013 /* Clear the core reset and turn on jumbo for receiver. */
1014 data
&= ~GLOBAL_CFG_RESET
; /* Clear core reset. */
1015 data
|= GLOBAL_CFG_JUMBO
; /* Turn on jumbo. */
1016 data
|= GLOBAL_CFG_TX_STAT_EN
;
1017 data
|= GLOBAL_CFG_RX_STAT_EN
;
1018 status
= ql_write_xgmac_reg(qdev
, GLOBAL_CFG
, data
);
1022 /* Enable transmitter, and clear it's reset. */
1023 status
= ql_read_xgmac_reg(qdev
, TX_CFG
, &data
);
1026 data
&= ~TX_CFG_RESET
; /* Clear the TX MAC reset. */
1027 data
|= TX_CFG_EN
; /* Enable the transmitter. */
1028 status
= ql_write_xgmac_reg(qdev
, TX_CFG
, data
);
1032 /* Enable receiver and clear it's reset. */
1033 status
= ql_read_xgmac_reg(qdev
, RX_CFG
, &data
);
1036 data
&= ~RX_CFG_RESET
; /* Clear the RX MAC reset. */
1037 data
|= RX_CFG_EN
; /* Enable the receiver. */
1038 status
= ql_write_xgmac_reg(qdev
, RX_CFG
, data
);
1042 /* Turn on jumbo. */
1044 ql_write_xgmac_reg(qdev
, MAC_TX_PARAMS
, MAC_TX_PARAMS_JUMBO
| (0x2580 << 16));
1048 ql_write_xgmac_reg(qdev
, MAC_RX_PARAMS
, 0x2580);
1052 /* Signal to the world that the port is enabled. */
1053 ql_write32(qdev
, STS
, ((qdev
->port_init
<< 16) | qdev
->port_init
));
1055 ql_sem_unlock(qdev
, qdev
->xg_sem_mask
);
1059 static inline unsigned int ql_lbq_block_size(struct ql_adapter
*qdev
)
1061 return PAGE_SIZE
<< qdev
->lbq_buf_order
;
1064 /* Get the next large buffer. */
1065 static struct bq_desc
*ql_get_curr_lbuf(struct rx_ring
*rx_ring
)
1067 struct bq_desc
*lbq_desc
= &rx_ring
->lbq
[rx_ring
->lbq_curr_idx
];
1068 rx_ring
->lbq_curr_idx
++;
1069 if (rx_ring
->lbq_curr_idx
== rx_ring
->lbq_len
)
1070 rx_ring
->lbq_curr_idx
= 0;
1071 rx_ring
->lbq_free_cnt
++;
1075 static struct bq_desc
*ql_get_curr_lchunk(struct ql_adapter
*qdev
,
1076 struct rx_ring
*rx_ring
)
1078 struct bq_desc
*lbq_desc
= ql_get_curr_lbuf(rx_ring
);
1080 pci_dma_sync_single_for_cpu(qdev
->pdev
,
1081 dma_unmap_addr(lbq_desc
, mapaddr
),
1082 rx_ring
->lbq_buf_size
,
1083 PCI_DMA_FROMDEVICE
);
1085 /* If it's the last chunk of our master page then
1088 if ((lbq_desc
->p
.pg_chunk
.offset
+ rx_ring
->lbq_buf_size
)
1089 == ql_lbq_block_size(qdev
))
1090 pci_unmap_page(qdev
->pdev
,
1091 lbq_desc
->p
.pg_chunk
.map
,
1092 ql_lbq_block_size(qdev
),
1093 PCI_DMA_FROMDEVICE
);
1097 /* Get the next small buffer. */
1098 static struct bq_desc
*ql_get_curr_sbuf(struct rx_ring
*rx_ring
)
1100 struct bq_desc
*sbq_desc
= &rx_ring
->sbq
[rx_ring
->sbq_curr_idx
];
1101 rx_ring
->sbq_curr_idx
++;
1102 if (rx_ring
->sbq_curr_idx
== rx_ring
->sbq_len
)
1103 rx_ring
->sbq_curr_idx
= 0;
1104 rx_ring
->sbq_free_cnt
++;
1108 /* Update an rx ring index. */
1109 static void ql_update_cq(struct rx_ring
*rx_ring
)
1111 rx_ring
->cnsmr_idx
++;
1112 rx_ring
->curr_entry
++;
1113 if (unlikely(rx_ring
->cnsmr_idx
== rx_ring
->cq_len
)) {
1114 rx_ring
->cnsmr_idx
= 0;
1115 rx_ring
->curr_entry
= rx_ring
->cq_base
;
1119 static void ql_write_cq_idx(struct rx_ring
*rx_ring
)
1121 ql_write_db_reg(rx_ring
->cnsmr_idx
, rx_ring
->cnsmr_idx_db_reg
);
1124 static int ql_get_next_chunk(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
,
1125 struct bq_desc
*lbq_desc
)
1127 if (!rx_ring
->pg_chunk
.page
) {
1129 rx_ring
->pg_chunk
.page
= alloc_pages(__GFP_COLD
| __GFP_COMP
|
1131 qdev
->lbq_buf_order
);
1132 if (unlikely(!rx_ring
->pg_chunk
.page
)) {
1133 netif_err(qdev
, drv
, qdev
->ndev
,
1134 "page allocation failed.\n");
1137 rx_ring
->pg_chunk
.offset
= 0;
1138 map
= pci_map_page(qdev
->pdev
, rx_ring
->pg_chunk
.page
,
1139 0, ql_lbq_block_size(qdev
),
1140 PCI_DMA_FROMDEVICE
);
1141 if (pci_dma_mapping_error(qdev
->pdev
, map
)) {
1142 __free_pages(rx_ring
->pg_chunk
.page
,
1143 qdev
->lbq_buf_order
);
1144 netif_err(qdev
, drv
, qdev
->ndev
,
1145 "PCI mapping failed.\n");
1148 rx_ring
->pg_chunk
.map
= map
;
1149 rx_ring
->pg_chunk
.va
= page_address(rx_ring
->pg_chunk
.page
);
1152 /* Copy the current master pg_chunk info
1153 * to the current descriptor.
1155 lbq_desc
->p
.pg_chunk
= rx_ring
->pg_chunk
;
1157 /* Adjust the master page chunk for next
1160 rx_ring
->pg_chunk
.offset
+= rx_ring
->lbq_buf_size
;
1161 if (rx_ring
->pg_chunk
.offset
== ql_lbq_block_size(qdev
)) {
1162 rx_ring
->pg_chunk
.page
= NULL
;
1163 lbq_desc
->p
.pg_chunk
.last_flag
= 1;
1165 rx_ring
->pg_chunk
.va
+= rx_ring
->lbq_buf_size
;
1166 get_page(rx_ring
->pg_chunk
.page
);
1167 lbq_desc
->p
.pg_chunk
.last_flag
= 0;
1171 /* Process (refill) a large buffer queue. */
1172 static void ql_update_lbq(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
1174 u32 clean_idx
= rx_ring
->lbq_clean_idx
;
1175 u32 start_idx
= clean_idx
;
1176 struct bq_desc
*lbq_desc
;
1180 while (rx_ring
->lbq_free_cnt
> 32) {
1181 for (i
= 0; i
< 16; i
++) {
1182 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1183 "lbq: try cleaning clean_idx = %d.\n",
1185 lbq_desc
= &rx_ring
->lbq
[clean_idx
];
1186 if (ql_get_next_chunk(qdev
, rx_ring
, lbq_desc
)) {
1187 netif_err(qdev
, ifup
, qdev
->ndev
,
1188 "Could not get a page chunk.\n");
1192 map
= lbq_desc
->p
.pg_chunk
.map
+
1193 lbq_desc
->p
.pg_chunk
.offset
;
1194 dma_unmap_addr_set(lbq_desc
, mapaddr
, map
);
1195 dma_unmap_len_set(lbq_desc
, maplen
,
1196 rx_ring
->lbq_buf_size
);
1197 *lbq_desc
->addr
= cpu_to_le64(map
);
1199 pci_dma_sync_single_for_device(qdev
->pdev
, map
,
1200 rx_ring
->lbq_buf_size
,
1201 PCI_DMA_FROMDEVICE
);
1203 if (clean_idx
== rx_ring
->lbq_len
)
1207 rx_ring
->lbq_clean_idx
= clean_idx
;
1208 rx_ring
->lbq_prod_idx
+= 16;
1209 if (rx_ring
->lbq_prod_idx
== rx_ring
->lbq_len
)
1210 rx_ring
->lbq_prod_idx
= 0;
1211 rx_ring
->lbq_free_cnt
-= 16;
1214 if (start_idx
!= clean_idx
) {
1215 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1216 "lbq: updating prod idx = %d.\n",
1217 rx_ring
->lbq_prod_idx
);
1218 ql_write_db_reg(rx_ring
->lbq_prod_idx
,
1219 rx_ring
->lbq_prod_idx_db_reg
);
1223 /* Process (refill) a small buffer queue. */
1224 static void ql_update_sbq(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
1226 u32 clean_idx
= rx_ring
->sbq_clean_idx
;
1227 u32 start_idx
= clean_idx
;
1228 struct bq_desc
*sbq_desc
;
1232 while (rx_ring
->sbq_free_cnt
> 16) {
1233 for (i
= 0; i
< 16; i
++) {
1234 sbq_desc
= &rx_ring
->sbq
[clean_idx
];
1235 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1236 "sbq: try cleaning clean_idx = %d.\n",
1238 if (sbq_desc
->p
.skb
== NULL
) {
1239 netif_printk(qdev
, rx_status
, KERN_DEBUG
,
1241 "sbq: getting new skb for index %d.\n",
1244 netdev_alloc_skb(qdev
->ndev
,
1246 if (sbq_desc
->p
.skb
== NULL
) {
1247 netif_err(qdev
, probe
, qdev
->ndev
,
1248 "Couldn't get an skb.\n");
1249 rx_ring
->sbq_clean_idx
= clean_idx
;
1252 skb_reserve(sbq_desc
->p
.skb
, QLGE_SB_PAD
);
1253 map
= pci_map_single(qdev
->pdev
,
1254 sbq_desc
->p
.skb
->data
,
1255 rx_ring
->sbq_buf_size
,
1256 PCI_DMA_FROMDEVICE
);
1257 if (pci_dma_mapping_error(qdev
->pdev
, map
)) {
1258 netif_err(qdev
, ifup
, qdev
->ndev
,
1259 "PCI mapping failed.\n");
1260 rx_ring
->sbq_clean_idx
= clean_idx
;
1261 dev_kfree_skb_any(sbq_desc
->p
.skb
);
1262 sbq_desc
->p
.skb
= NULL
;
1265 dma_unmap_addr_set(sbq_desc
, mapaddr
, map
);
1266 dma_unmap_len_set(sbq_desc
, maplen
,
1267 rx_ring
->sbq_buf_size
);
1268 *sbq_desc
->addr
= cpu_to_le64(map
);
1272 if (clean_idx
== rx_ring
->sbq_len
)
1275 rx_ring
->sbq_clean_idx
= clean_idx
;
1276 rx_ring
->sbq_prod_idx
+= 16;
1277 if (rx_ring
->sbq_prod_idx
== rx_ring
->sbq_len
)
1278 rx_ring
->sbq_prod_idx
= 0;
1279 rx_ring
->sbq_free_cnt
-= 16;
1282 if (start_idx
!= clean_idx
) {
1283 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1284 "sbq: updating prod idx = %d.\n",
1285 rx_ring
->sbq_prod_idx
);
1286 ql_write_db_reg(rx_ring
->sbq_prod_idx
,
1287 rx_ring
->sbq_prod_idx_db_reg
);
1291 static void ql_update_buffer_queues(struct ql_adapter
*qdev
,
1292 struct rx_ring
*rx_ring
)
1294 ql_update_sbq(qdev
, rx_ring
);
1295 ql_update_lbq(qdev
, rx_ring
);
1298 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1299 * fails at some stage, or from the interrupt when a tx completes.
1301 static void ql_unmap_send(struct ql_adapter
*qdev
,
1302 struct tx_ring_desc
*tx_ring_desc
, int mapped
)
1305 for (i
= 0; i
< mapped
; i
++) {
1306 if (i
== 0 || (i
== 7 && mapped
> 7)) {
1308 * Unmap the skb->data area, or the
1309 * external sglist (AKA the Outbound
1310 * Address List (OAL)).
1311 * If its the zeroeth element, then it's
1312 * the skb->data area. If it's the 7th
1313 * element and there is more than 6 frags,
1317 netif_printk(qdev
, tx_done
, KERN_DEBUG
,
1319 "unmapping OAL area.\n");
1321 pci_unmap_single(qdev
->pdev
,
1322 dma_unmap_addr(&tx_ring_desc
->map
[i
],
1324 dma_unmap_len(&tx_ring_desc
->map
[i
],
1328 netif_printk(qdev
, tx_done
, KERN_DEBUG
, qdev
->ndev
,
1329 "unmapping frag %d.\n", i
);
1330 pci_unmap_page(qdev
->pdev
,
1331 dma_unmap_addr(&tx_ring_desc
->map
[i
],
1333 dma_unmap_len(&tx_ring_desc
->map
[i
],
1334 maplen
), PCI_DMA_TODEVICE
);
1340 /* Map the buffers for this transmit. This will return
1341 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1343 static int ql_map_send(struct ql_adapter
*qdev
,
1344 struct ob_mac_iocb_req
*mac_iocb_ptr
,
1345 struct sk_buff
*skb
, struct tx_ring_desc
*tx_ring_desc
)
1347 int len
= skb_headlen(skb
);
1349 int frag_idx
, err
, map_idx
= 0;
1350 struct tx_buf_desc
*tbd
= mac_iocb_ptr
->tbd
;
1351 int frag_cnt
= skb_shinfo(skb
)->nr_frags
;
1354 netif_printk(qdev
, tx_queued
, KERN_DEBUG
, qdev
->ndev
,
1355 "frag_cnt = %d.\n", frag_cnt
);
1358 * Map the skb buffer first.
1360 map
= pci_map_single(qdev
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
1362 err
= pci_dma_mapping_error(qdev
->pdev
, map
);
1364 netif_err(qdev
, tx_queued
, qdev
->ndev
,
1365 "PCI mapping failed with error: %d\n", err
);
1367 return NETDEV_TX_BUSY
;
1370 tbd
->len
= cpu_to_le32(len
);
1371 tbd
->addr
= cpu_to_le64(map
);
1372 dma_unmap_addr_set(&tx_ring_desc
->map
[map_idx
], mapaddr
, map
);
1373 dma_unmap_len_set(&tx_ring_desc
->map
[map_idx
], maplen
, len
);
1377 * This loop fills the remainder of the 8 address descriptors
1378 * in the IOCB. If there are more than 7 fragments, then the
1379 * eighth address desc will point to an external list (OAL).
1380 * When this happens, the remainder of the frags will be stored
1383 for (frag_idx
= 0; frag_idx
< frag_cnt
; frag_idx
++, map_idx
++) {
1384 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[frag_idx
];
1386 if (frag_idx
== 6 && frag_cnt
> 7) {
1387 /* Let's tack on an sglist.
1388 * Our control block will now
1390 * iocb->seg[0] = skb->data
1391 * iocb->seg[1] = frag[0]
1392 * iocb->seg[2] = frag[1]
1393 * iocb->seg[3] = frag[2]
1394 * iocb->seg[4] = frag[3]
1395 * iocb->seg[5] = frag[4]
1396 * iocb->seg[6] = frag[5]
1397 * iocb->seg[7] = ptr to OAL (external sglist)
1398 * oal->seg[0] = frag[6]
1399 * oal->seg[1] = frag[7]
1400 * oal->seg[2] = frag[8]
1401 * oal->seg[3] = frag[9]
1402 * oal->seg[4] = frag[10]
1405 /* Tack on the OAL in the eighth segment of IOCB. */
1406 map
= pci_map_single(qdev
->pdev
, &tx_ring_desc
->oal
,
1409 err
= pci_dma_mapping_error(qdev
->pdev
, map
);
1411 netif_err(qdev
, tx_queued
, qdev
->ndev
,
1412 "PCI mapping outbound address list with error: %d\n",
1417 tbd
->addr
= cpu_to_le64(map
);
1419 * The length is the number of fragments
1420 * that remain to be mapped times the length
1421 * of our sglist (OAL).
1424 cpu_to_le32((sizeof(struct tx_buf_desc
) *
1425 (frag_cnt
- frag_idx
)) | TX_DESC_C
);
1426 dma_unmap_addr_set(&tx_ring_desc
->map
[map_idx
], mapaddr
,
1428 dma_unmap_len_set(&tx_ring_desc
->map
[map_idx
], maplen
,
1429 sizeof(struct oal
));
1430 tbd
= (struct tx_buf_desc
*)&tx_ring_desc
->oal
;
1434 map
= skb_frag_dma_map(&qdev
->pdev
->dev
, frag
, 0, skb_frag_size(frag
),
1437 err
= dma_mapping_error(&qdev
->pdev
->dev
, map
);
1439 netif_err(qdev
, tx_queued
, qdev
->ndev
,
1440 "PCI mapping frags failed with error: %d.\n",
1445 tbd
->addr
= cpu_to_le64(map
);
1446 tbd
->len
= cpu_to_le32(skb_frag_size(frag
));
1447 dma_unmap_addr_set(&tx_ring_desc
->map
[map_idx
], mapaddr
, map
);
1448 dma_unmap_len_set(&tx_ring_desc
->map
[map_idx
], maplen
,
1449 skb_frag_size(frag
));
1452 /* Save the number of segments we've mapped. */
1453 tx_ring_desc
->map_cnt
= map_idx
;
1454 /* Terminate the last segment. */
1455 tbd
->len
= cpu_to_le32(le32_to_cpu(tbd
->len
) | TX_DESC_E
);
1456 return NETDEV_TX_OK
;
1460 * If the first frag mapping failed, then i will be zero.
1461 * This causes the unmap of the skb->data area. Otherwise
1462 * we pass in the number of frags that mapped successfully
1463 * so they can be umapped.
1465 ql_unmap_send(qdev
, tx_ring_desc
, map_idx
);
1466 return NETDEV_TX_BUSY
;
1469 /* Process an inbound completion from an rx ring. */
1470 static void ql_process_mac_rx_gro_page(struct ql_adapter
*qdev
,
1471 struct rx_ring
*rx_ring
,
1472 struct ib_mac_iocb_rsp
*ib_mac_rsp
,
1476 struct sk_buff
*skb
;
1477 struct bq_desc
*lbq_desc
= ql_get_curr_lchunk(qdev
, rx_ring
);
1478 struct napi_struct
*napi
= &rx_ring
->napi
;
1480 napi
->dev
= qdev
->ndev
;
1482 skb
= napi_get_frags(napi
);
1484 netif_err(qdev
, drv
, qdev
->ndev
,
1485 "Couldn't get an skb, exiting.\n");
1486 rx_ring
->rx_dropped
++;
1487 put_page(lbq_desc
->p
.pg_chunk
.page
);
1490 prefetch(lbq_desc
->p
.pg_chunk
.va
);
1491 __skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
1492 lbq_desc
->p
.pg_chunk
.page
,
1493 lbq_desc
->p
.pg_chunk
.offset
,
1497 skb
->data_len
+= length
;
1498 skb
->truesize
+= length
;
1499 skb_shinfo(skb
)->nr_frags
++;
1501 rx_ring
->rx_packets
++;
1502 rx_ring
->rx_bytes
+= length
;
1503 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1504 skb_record_rx_queue(skb
, rx_ring
->cq_id
);
1505 if (vlan_id
!= 0xffff)
1506 __vlan_hwaccel_put_tag(skb
, vlan_id
);
1507 napi_gro_frags(napi
);
1510 /* Process an inbound completion from an rx ring. */
1511 static void ql_process_mac_rx_page(struct ql_adapter
*qdev
,
1512 struct rx_ring
*rx_ring
,
1513 struct ib_mac_iocb_rsp
*ib_mac_rsp
,
1517 struct net_device
*ndev
= qdev
->ndev
;
1518 struct sk_buff
*skb
= NULL
;
1520 struct bq_desc
*lbq_desc
= ql_get_curr_lchunk(qdev
, rx_ring
);
1521 struct napi_struct
*napi
= &rx_ring
->napi
;
1523 skb
= netdev_alloc_skb(ndev
, length
);
1525 netif_err(qdev
, drv
, qdev
->ndev
,
1526 "Couldn't get an skb, need to unwind!.\n");
1527 rx_ring
->rx_dropped
++;
1528 put_page(lbq_desc
->p
.pg_chunk
.page
);
1532 addr
= lbq_desc
->p
.pg_chunk
.va
;
1536 /* Frame error, so drop the packet. */
1537 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_ERR_MASK
) {
1538 netif_info(qdev
, drv
, qdev
->ndev
,
1539 "Receive error, flags2 = 0x%x\n", ib_mac_rsp
->flags2
);
1540 rx_ring
->rx_errors
++;
1544 /* The max framesize filter on this chip is set higher than
1545 * MTU since FCoE uses 2k frames.
1547 if (skb
->len
> ndev
->mtu
+ ETH_HLEN
) {
1548 netif_err(qdev
, drv
, qdev
->ndev
,
1549 "Segment too small, dropping.\n");
1550 rx_ring
->rx_dropped
++;
1553 memcpy(skb_put(skb
, ETH_HLEN
), addr
, ETH_HLEN
);
1554 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1555 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1557 skb_fill_page_desc(skb
, 0, lbq_desc
->p
.pg_chunk
.page
,
1558 lbq_desc
->p
.pg_chunk
.offset
+ETH_HLEN
,
1560 skb
->len
+= length
-ETH_HLEN
;
1561 skb
->data_len
+= length
-ETH_HLEN
;
1562 skb
->truesize
+= length
-ETH_HLEN
;
1564 rx_ring
->rx_packets
++;
1565 rx_ring
->rx_bytes
+= skb
->len
;
1566 skb
->protocol
= eth_type_trans(skb
, ndev
);
1567 skb_checksum_none_assert(skb
);
1569 if ((ndev
->features
& NETIF_F_RXCSUM
) &&
1570 !(ib_mac_rsp
->flags1
& IB_MAC_CSUM_ERR_MASK
)) {
1572 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_T
) {
1573 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1574 "TCP checksum done!\n");
1575 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1576 } else if ((ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_U
) &&
1577 (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_V4
)) {
1578 /* Unfragmented ipv4 UDP frame. */
1579 struct iphdr
*iph
= (struct iphdr
*) skb
->data
;
1580 if (!(iph
->frag_off
&
1581 cpu_to_be16(IP_MF
|IP_OFFSET
))) {
1582 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1583 netif_printk(qdev
, rx_status
, KERN_DEBUG
,
1585 "TCP checksum done!\n");
1590 skb_record_rx_queue(skb
, rx_ring
->cq_id
);
1591 if (vlan_id
!= 0xffff)
1592 __vlan_hwaccel_put_tag(skb
, vlan_id
);
1593 if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
)
1594 napi_gro_receive(napi
, skb
);
1596 netif_receive_skb(skb
);
1599 dev_kfree_skb_any(skb
);
1600 put_page(lbq_desc
->p
.pg_chunk
.page
);
1603 /* Process an inbound completion from an rx ring. */
1604 static void ql_process_mac_rx_skb(struct ql_adapter
*qdev
,
1605 struct rx_ring
*rx_ring
,
1606 struct ib_mac_iocb_rsp
*ib_mac_rsp
,
1610 struct net_device
*ndev
= qdev
->ndev
;
1611 struct sk_buff
*skb
= NULL
;
1612 struct sk_buff
*new_skb
= NULL
;
1613 struct bq_desc
*sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1615 skb
= sbq_desc
->p
.skb
;
1616 /* Allocate new_skb and copy */
1617 new_skb
= netdev_alloc_skb(qdev
->ndev
, length
+ NET_IP_ALIGN
);
1618 if (new_skb
== NULL
) {
1619 netif_err(qdev
, probe
, qdev
->ndev
,
1620 "No skb available, drop the packet.\n");
1621 rx_ring
->rx_dropped
++;
1624 skb_reserve(new_skb
, NET_IP_ALIGN
);
1625 memcpy(skb_put(new_skb
, length
), skb
->data
, length
);
1628 /* Frame error, so drop the packet. */
1629 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_ERR_MASK
) {
1630 netif_info(qdev
, drv
, qdev
->ndev
,
1631 "Receive error, flags2 = 0x%x\n", ib_mac_rsp
->flags2
);
1632 dev_kfree_skb_any(skb
);
1633 rx_ring
->rx_errors
++;
1637 /* loopback self test for ethtool */
1638 if (test_bit(QL_SELFTEST
, &qdev
->flags
)) {
1639 ql_check_lb_frame(qdev
, skb
);
1640 dev_kfree_skb_any(skb
);
1644 /* The max framesize filter on this chip is set higher than
1645 * MTU since FCoE uses 2k frames.
1647 if (skb
->len
> ndev
->mtu
+ ETH_HLEN
) {
1648 dev_kfree_skb_any(skb
);
1649 rx_ring
->rx_dropped
++;
1653 prefetch(skb
->data
);
1655 if (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) {
1656 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1658 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1659 IB_MAC_IOCB_RSP_M_HASH
? "Hash" :
1660 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1661 IB_MAC_IOCB_RSP_M_REG
? "Registered" :
1662 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1663 IB_MAC_IOCB_RSP_M_PROM
? "Promiscuous" : "");
1665 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_P
)
1666 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1667 "Promiscuous Packet.\n");
1669 rx_ring
->rx_packets
++;
1670 rx_ring
->rx_bytes
+= skb
->len
;
1671 skb
->protocol
= eth_type_trans(skb
, ndev
);
1672 skb_checksum_none_assert(skb
);
1674 /* If rx checksum is on, and there are no
1675 * csum or frame errors.
1677 if ((ndev
->features
& NETIF_F_RXCSUM
) &&
1678 !(ib_mac_rsp
->flags1
& IB_MAC_CSUM_ERR_MASK
)) {
1680 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_T
) {
1681 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1682 "TCP checksum done!\n");
1683 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1684 } else if ((ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_U
) &&
1685 (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_V4
)) {
1686 /* Unfragmented ipv4 UDP frame. */
1687 struct iphdr
*iph
= (struct iphdr
*) skb
->data
;
1688 if (!(iph
->frag_off
&
1689 ntohs(IP_MF
|IP_OFFSET
))) {
1690 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1691 netif_printk(qdev
, rx_status
, KERN_DEBUG
,
1693 "TCP checksum done!\n");
1698 skb_record_rx_queue(skb
, rx_ring
->cq_id
);
1699 if (vlan_id
!= 0xffff)
1700 __vlan_hwaccel_put_tag(skb
, vlan_id
);
1701 if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
)
1702 napi_gro_receive(&rx_ring
->napi
, skb
);
1704 netif_receive_skb(skb
);
1707 static void ql_realign_skb(struct sk_buff
*skb
, int len
)
1709 void *temp_addr
= skb
->data
;
1711 /* Undo the skb_reserve(skb,32) we did before
1712 * giving to hardware, and realign data on
1713 * a 2-byte boundary.
1715 skb
->data
-= QLGE_SB_PAD
- NET_IP_ALIGN
;
1716 skb
->tail
-= QLGE_SB_PAD
- NET_IP_ALIGN
;
1717 skb_copy_to_linear_data(skb
, temp_addr
,
1722 * This function builds an skb for the given inbound
1723 * completion. It will be rewritten for readability in the near
1724 * future, but for not it works well.
1726 static struct sk_buff
*ql_build_rx_skb(struct ql_adapter
*qdev
,
1727 struct rx_ring
*rx_ring
,
1728 struct ib_mac_iocb_rsp
*ib_mac_rsp
)
1730 struct bq_desc
*lbq_desc
;
1731 struct bq_desc
*sbq_desc
;
1732 struct sk_buff
*skb
= NULL
;
1733 u32 length
= le32_to_cpu(ib_mac_rsp
->data_len
);
1734 u32 hdr_len
= le32_to_cpu(ib_mac_rsp
->hdr_len
);
1737 * Handle the header buffer if present.
1739 if (ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HV
&&
1740 ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
) {
1741 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1742 "Header of %d bytes in small buffer.\n", hdr_len
);
1744 * Headers fit nicely into a small buffer.
1746 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1747 pci_unmap_single(qdev
->pdev
,
1748 dma_unmap_addr(sbq_desc
, mapaddr
),
1749 dma_unmap_len(sbq_desc
, maplen
),
1750 PCI_DMA_FROMDEVICE
);
1751 skb
= sbq_desc
->p
.skb
;
1752 ql_realign_skb(skb
, hdr_len
);
1753 skb_put(skb
, hdr_len
);
1754 sbq_desc
->p
.skb
= NULL
;
1758 * Handle the data buffer(s).
1760 if (unlikely(!length
)) { /* Is there data too? */
1761 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1762 "No Data buffer in this packet.\n");
1766 if (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_DS
) {
1767 if (ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
) {
1768 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1769 "Headers in small, data of %d bytes in small, combine them.\n",
1772 * Data is less than small buffer size so it's
1773 * stuffed in a small buffer.
1774 * For this case we append the data
1775 * from the "data" small buffer to the "header" small
1778 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1779 pci_dma_sync_single_for_cpu(qdev
->pdev
,
1781 (sbq_desc
, mapaddr
),
1784 PCI_DMA_FROMDEVICE
);
1785 memcpy(skb_put(skb
, length
),
1786 sbq_desc
->p
.skb
->data
, length
);
1787 pci_dma_sync_single_for_device(qdev
->pdev
,
1794 PCI_DMA_FROMDEVICE
);
1796 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1797 "%d bytes in a single small buffer.\n",
1799 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1800 skb
= sbq_desc
->p
.skb
;
1801 ql_realign_skb(skb
, length
);
1802 skb_put(skb
, length
);
1803 pci_unmap_single(qdev
->pdev
,
1804 dma_unmap_addr(sbq_desc
,
1806 dma_unmap_len(sbq_desc
,
1808 PCI_DMA_FROMDEVICE
);
1809 sbq_desc
->p
.skb
= NULL
;
1811 } else if (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_DL
) {
1812 if (ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
) {
1813 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1814 "Header in small, %d bytes in large. Chain large to small!\n",
1817 * The data is in a single large buffer. We
1818 * chain it to the header buffer's skb and let
1821 lbq_desc
= ql_get_curr_lchunk(qdev
, rx_ring
);
1822 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1823 "Chaining page at offset = %d, for %d bytes to skb.\n",
1824 lbq_desc
->p
.pg_chunk
.offset
, length
);
1825 skb_fill_page_desc(skb
, 0, lbq_desc
->p
.pg_chunk
.page
,
1826 lbq_desc
->p
.pg_chunk
.offset
,
1829 skb
->data_len
+= length
;
1830 skb
->truesize
+= length
;
1833 * The headers and data are in a single large buffer. We
1834 * copy it to a new skb and let it go. This can happen with
1835 * jumbo mtu on a non-TCP/UDP frame.
1837 lbq_desc
= ql_get_curr_lchunk(qdev
, rx_ring
);
1838 skb
= netdev_alloc_skb(qdev
->ndev
, length
);
1840 netif_printk(qdev
, probe
, KERN_DEBUG
, qdev
->ndev
,
1841 "No skb available, drop the packet.\n");
1844 pci_unmap_page(qdev
->pdev
,
1845 dma_unmap_addr(lbq_desc
,
1847 dma_unmap_len(lbq_desc
, maplen
),
1848 PCI_DMA_FROMDEVICE
);
1849 skb_reserve(skb
, NET_IP_ALIGN
);
1850 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1851 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1853 skb_fill_page_desc(skb
, 0,
1854 lbq_desc
->p
.pg_chunk
.page
,
1855 lbq_desc
->p
.pg_chunk
.offset
,
1858 skb
->data_len
+= length
;
1859 skb
->truesize
+= length
;
1861 __pskb_pull_tail(skb
,
1862 (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) ?
1863 VLAN_ETH_HLEN
: ETH_HLEN
);
1867 * The data is in a chain of large buffers
1868 * pointed to by a small buffer. We loop
1869 * thru and chain them to the our small header
1871 * frags: There are 18 max frags and our small
1872 * buffer will hold 32 of them. The thing is,
1873 * we'll use 3 max for our 9000 byte jumbo
1874 * frames. If the MTU goes up we could
1875 * eventually be in trouble.
1878 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1879 pci_unmap_single(qdev
->pdev
,
1880 dma_unmap_addr(sbq_desc
, mapaddr
),
1881 dma_unmap_len(sbq_desc
, maplen
),
1882 PCI_DMA_FROMDEVICE
);
1883 if (!(ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
)) {
1885 * This is an non TCP/UDP IP frame, so
1886 * the headers aren't split into a small
1887 * buffer. We have to use the small buffer
1888 * that contains our sg list as our skb to
1889 * send upstairs. Copy the sg list here to
1890 * a local buffer and use it to find the
1893 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1894 "%d bytes of headers & data in chain of large.\n",
1896 skb
= sbq_desc
->p
.skb
;
1897 sbq_desc
->p
.skb
= NULL
;
1898 skb_reserve(skb
, NET_IP_ALIGN
);
1900 while (length
> 0) {
1901 lbq_desc
= ql_get_curr_lchunk(qdev
, rx_ring
);
1902 size
= (length
< rx_ring
->lbq_buf_size
) ? length
:
1903 rx_ring
->lbq_buf_size
;
1905 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1906 "Adding page %d to skb for %d bytes.\n",
1908 skb_fill_page_desc(skb
, i
,
1909 lbq_desc
->p
.pg_chunk
.page
,
1910 lbq_desc
->p
.pg_chunk
.offset
,
1913 skb
->data_len
+= size
;
1914 skb
->truesize
+= size
;
1918 __pskb_pull_tail(skb
, (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) ?
1919 VLAN_ETH_HLEN
: ETH_HLEN
);
1924 /* Process an inbound completion from an rx ring. */
1925 static void ql_process_mac_split_rx_intr(struct ql_adapter
*qdev
,
1926 struct rx_ring
*rx_ring
,
1927 struct ib_mac_iocb_rsp
*ib_mac_rsp
,
1930 struct net_device
*ndev
= qdev
->ndev
;
1931 struct sk_buff
*skb
= NULL
;
1933 QL_DUMP_IB_MAC_RSP(ib_mac_rsp
);
1935 skb
= ql_build_rx_skb(qdev
, rx_ring
, ib_mac_rsp
);
1936 if (unlikely(!skb
)) {
1937 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1938 "No skb available, drop packet.\n");
1939 rx_ring
->rx_dropped
++;
1943 /* Frame error, so drop the packet. */
1944 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_ERR_MASK
) {
1945 netif_info(qdev
, drv
, qdev
->ndev
,
1946 "Receive error, flags2 = 0x%x\n", ib_mac_rsp
->flags2
);
1947 dev_kfree_skb_any(skb
);
1948 rx_ring
->rx_errors
++;
1952 /* The max framesize filter on this chip is set higher than
1953 * MTU since FCoE uses 2k frames.
1955 if (skb
->len
> ndev
->mtu
+ ETH_HLEN
) {
1956 dev_kfree_skb_any(skb
);
1957 rx_ring
->rx_dropped
++;
1961 /* loopback self test for ethtool */
1962 if (test_bit(QL_SELFTEST
, &qdev
->flags
)) {
1963 ql_check_lb_frame(qdev
, skb
);
1964 dev_kfree_skb_any(skb
);
1968 prefetch(skb
->data
);
1970 if (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) {
1971 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
, "%s Multicast.\n",
1972 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1973 IB_MAC_IOCB_RSP_M_HASH
? "Hash" :
1974 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1975 IB_MAC_IOCB_RSP_M_REG
? "Registered" :
1976 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1977 IB_MAC_IOCB_RSP_M_PROM
? "Promiscuous" : "");
1978 rx_ring
->rx_multicast
++;
1980 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_P
) {
1981 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1982 "Promiscuous Packet.\n");
1985 skb
->protocol
= eth_type_trans(skb
, ndev
);
1986 skb_checksum_none_assert(skb
);
1988 /* If rx checksum is on, and there are no
1989 * csum or frame errors.
1991 if ((ndev
->features
& NETIF_F_RXCSUM
) &&
1992 !(ib_mac_rsp
->flags1
& IB_MAC_CSUM_ERR_MASK
)) {
1994 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_T
) {
1995 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
1996 "TCP checksum done!\n");
1997 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1998 } else if ((ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_U
) &&
1999 (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_V4
)) {
2000 /* Unfragmented ipv4 UDP frame. */
2001 struct iphdr
*iph
= (struct iphdr
*) skb
->data
;
2002 if (!(iph
->frag_off
&
2003 ntohs(IP_MF
|IP_OFFSET
))) {
2004 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2005 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
2006 "TCP checksum done!\n");
2011 rx_ring
->rx_packets
++;
2012 rx_ring
->rx_bytes
+= skb
->len
;
2013 skb_record_rx_queue(skb
, rx_ring
->cq_id
);
2014 if ((ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) && (vlan_id
!= 0))
2015 __vlan_hwaccel_put_tag(skb
, vlan_id
);
2016 if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
)
2017 napi_gro_receive(&rx_ring
->napi
, skb
);
2019 netif_receive_skb(skb
);
2022 /* Process an inbound completion from an rx ring. */
2023 static unsigned long ql_process_mac_rx_intr(struct ql_adapter
*qdev
,
2024 struct rx_ring
*rx_ring
,
2025 struct ib_mac_iocb_rsp
*ib_mac_rsp
)
2027 u32 length
= le32_to_cpu(ib_mac_rsp
->data_len
);
2028 u16 vlan_id
= (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) ?
2029 ((le16_to_cpu(ib_mac_rsp
->vlan_id
) &
2030 IB_MAC_IOCB_RSP_VLAN_MASK
)) : 0xffff;
2032 QL_DUMP_IB_MAC_RSP(ib_mac_rsp
);
2034 if (ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HV
) {
2035 /* The data and headers are split into
2038 ql_process_mac_split_rx_intr(qdev
, rx_ring
, ib_mac_rsp
,
2040 } else if (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_DS
) {
2041 /* The data fit in a single small buffer.
2042 * Allocate a new skb, copy the data and
2043 * return the buffer to the free pool.
2045 ql_process_mac_rx_skb(qdev
, rx_ring
, ib_mac_rsp
,
2047 } else if ((ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_DL
) &&
2048 !(ib_mac_rsp
->flags1
& IB_MAC_CSUM_ERR_MASK
) &&
2049 (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_T
)) {
2050 /* TCP packet in a page chunk that's been checksummed.
2051 * Tack it on to our GRO skb and let it go.
2053 ql_process_mac_rx_gro_page(qdev
, rx_ring
, ib_mac_rsp
,
2055 } else if (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_DL
) {
2056 /* Non-TCP packet in a page chunk. Allocate an
2057 * skb, tack it on frags, and send it up.
2059 ql_process_mac_rx_page(qdev
, rx_ring
, ib_mac_rsp
,
2062 /* Non-TCP/UDP large frames that span multiple buffers
2063 * can be processed corrrectly by the split frame logic.
2065 ql_process_mac_split_rx_intr(qdev
, rx_ring
, ib_mac_rsp
,
2069 return (unsigned long)length
;
2072 /* Process an outbound completion from an rx ring. */
2073 static void ql_process_mac_tx_intr(struct ql_adapter
*qdev
,
2074 struct ob_mac_iocb_rsp
*mac_rsp
)
2076 struct tx_ring
*tx_ring
;
2077 struct tx_ring_desc
*tx_ring_desc
;
2079 QL_DUMP_OB_MAC_RSP(mac_rsp
);
2080 tx_ring
= &qdev
->tx_ring
[mac_rsp
->txq_idx
];
2081 tx_ring_desc
= &tx_ring
->q
[mac_rsp
->tid
];
2082 ql_unmap_send(qdev
, tx_ring_desc
, tx_ring_desc
->map_cnt
);
2083 tx_ring
->tx_bytes
+= (tx_ring_desc
->skb
)->len
;
2084 tx_ring
->tx_packets
++;
2085 dev_kfree_skb(tx_ring_desc
->skb
);
2086 tx_ring_desc
->skb
= NULL
;
2088 if (unlikely(mac_rsp
->flags1
& (OB_MAC_IOCB_RSP_E
|
2091 OB_MAC_IOCB_RSP_P
| OB_MAC_IOCB_RSP_B
))) {
2092 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_E
) {
2093 netif_warn(qdev
, tx_done
, qdev
->ndev
,
2094 "Total descriptor length did not match transfer length.\n");
2096 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_S
) {
2097 netif_warn(qdev
, tx_done
, qdev
->ndev
,
2098 "Frame too short to be valid, not sent.\n");
2100 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_L
) {
2101 netif_warn(qdev
, tx_done
, qdev
->ndev
,
2102 "Frame too long, but sent anyway.\n");
2104 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_B
) {
2105 netif_warn(qdev
, tx_done
, qdev
->ndev
,
2106 "PCI backplane error. Frame not sent.\n");
2109 atomic_inc(&tx_ring
->tx_count
);
2112 /* Fire up a handler to reset the MPI processor. */
2113 void ql_queue_fw_error(struct ql_adapter
*qdev
)
2116 queue_delayed_work(qdev
->workqueue
, &qdev
->mpi_reset_work
, 0);
2119 void ql_queue_asic_error(struct ql_adapter
*qdev
)
2122 ql_disable_interrupts(qdev
);
2123 /* Clear adapter up bit to signal the recovery
2124 * process that it shouldn't kill the reset worker
2127 clear_bit(QL_ADAPTER_UP
, &qdev
->flags
);
2128 /* Set asic recovery bit to indicate reset process that we are
2129 * in fatal error recovery process rather than normal close
2131 set_bit(QL_ASIC_RECOVERY
, &qdev
->flags
);
2132 queue_delayed_work(qdev
->workqueue
, &qdev
->asic_reset_work
, 0);
2135 static void ql_process_chip_ae_intr(struct ql_adapter
*qdev
,
2136 struct ib_ae_iocb_rsp
*ib_ae_rsp
)
2138 switch (ib_ae_rsp
->event
) {
2139 case MGMT_ERR_EVENT
:
2140 netif_err(qdev
, rx_err
, qdev
->ndev
,
2141 "Management Processor Fatal Error.\n");
2142 ql_queue_fw_error(qdev
);
2145 case CAM_LOOKUP_ERR_EVENT
:
2146 netdev_err(qdev
->ndev
, "Multiple CAM hits lookup occurred.\n");
2147 netdev_err(qdev
->ndev
, "This event shouldn't occur.\n");
2148 ql_queue_asic_error(qdev
);
2151 case SOFT_ECC_ERROR_EVENT
:
2152 netdev_err(qdev
->ndev
, "Soft ECC error detected.\n");
2153 ql_queue_asic_error(qdev
);
2156 case PCI_ERR_ANON_BUF_RD
:
2157 netdev_err(qdev
->ndev
, "PCI error occurred when reading "
2158 "anonymous buffers from rx_ring %d.\n",
2160 ql_queue_asic_error(qdev
);
2164 netif_err(qdev
, drv
, qdev
->ndev
, "Unexpected event %d.\n",
2166 ql_queue_asic_error(qdev
);
2171 static int ql_clean_outbound_rx_ring(struct rx_ring
*rx_ring
)
2173 struct ql_adapter
*qdev
= rx_ring
->qdev
;
2174 u32 prod
= ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
);
2175 struct ob_mac_iocb_rsp
*net_rsp
= NULL
;
2178 struct tx_ring
*tx_ring
;
2179 /* While there are entries in the completion queue. */
2180 while (prod
!= rx_ring
->cnsmr_idx
) {
2182 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
2183 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2184 rx_ring
->cq_id
, prod
, rx_ring
->cnsmr_idx
);
2186 net_rsp
= (struct ob_mac_iocb_rsp
*)rx_ring
->curr_entry
;
2188 switch (net_rsp
->opcode
) {
2190 case OPCODE_OB_MAC_TSO_IOCB
:
2191 case OPCODE_OB_MAC_IOCB
:
2192 ql_process_mac_tx_intr(qdev
, net_rsp
);
2195 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
2196 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2200 ql_update_cq(rx_ring
);
2201 prod
= ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
);
2205 ql_write_cq_idx(rx_ring
);
2206 tx_ring
= &qdev
->tx_ring
[net_rsp
->txq_idx
];
2207 if (__netif_subqueue_stopped(qdev
->ndev
, tx_ring
->wq_id
)) {
2208 if (atomic_read(&tx_ring
->queue_stopped
) &&
2209 (atomic_read(&tx_ring
->tx_count
) > (tx_ring
->wq_len
/ 4)))
2211 * The queue got stopped because the tx_ring was full.
2212 * Wake it up, because it's now at least 25% empty.
2214 netif_wake_subqueue(qdev
->ndev
, tx_ring
->wq_id
);
2220 static int ql_clean_inbound_rx_ring(struct rx_ring
*rx_ring
, int budget
)
2222 struct ql_adapter
*qdev
= rx_ring
->qdev
;
2223 u32 prod
= ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
);
2224 struct ql_net_rsp_iocb
*net_rsp
;
2227 /* While there are entries in the completion queue. */
2228 while (prod
!= rx_ring
->cnsmr_idx
) {
2230 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
2231 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2232 rx_ring
->cq_id
, prod
, rx_ring
->cnsmr_idx
);
2234 net_rsp
= rx_ring
->curr_entry
;
2236 switch (net_rsp
->opcode
) {
2237 case OPCODE_IB_MAC_IOCB
:
2238 ql_process_mac_rx_intr(qdev
, rx_ring
,
2239 (struct ib_mac_iocb_rsp
*)
2243 case OPCODE_IB_AE_IOCB
:
2244 ql_process_chip_ae_intr(qdev
, (struct ib_ae_iocb_rsp
*)
2248 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
2249 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2254 ql_update_cq(rx_ring
);
2255 prod
= ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
);
2256 if (count
== budget
)
2259 ql_update_buffer_queues(qdev
, rx_ring
);
2260 ql_write_cq_idx(rx_ring
);
2264 static int ql_napi_poll_msix(struct napi_struct
*napi
, int budget
)
2266 struct rx_ring
*rx_ring
= container_of(napi
, struct rx_ring
, napi
);
2267 struct ql_adapter
*qdev
= rx_ring
->qdev
;
2268 struct rx_ring
*trx_ring
;
2269 int i
, work_done
= 0;
2270 struct intr_context
*ctx
= &qdev
->intr_context
[rx_ring
->cq_id
];
2272 netif_printk(qdev
, rx_status
, KERN_DEBUG
, qdev
->ndev
,
2273 "Enter, NAPI POLL cq_id = %d.\n", rx_ring
->cq_id
);
2275 /* Service the TX rings first. They start
2276 * right after the RSS rings. */
2277 for (i
= qdev
->rss_ring_count
; i
< qdev
->rx_ring_count
; i
++) {
2278 trx_ring
= &qdev
->rx_ring
[i
];
2279 /* If this TX completion ring belongs to this vector and
2280 * it's not empty then service it.
2282 if ((ctx
->irq_mask
& (1 << trx_ring
->cq_id
)) &&
2283 (ql_read_sh_reg(trx_ring
->prod_idx_sh_reg
) !=
2284 trx_ring
->cnsmr_idx
)) {
2285 netif_printk(qdev
, intr
, KERN_DEBUG
, qdev
->ndev
,
2286 "%s: Servicing TX completion ring %d.\n",
2287 __func__
, trx_ring
->cq_id
);
2288 ql_clean_outbound_rx_ring(trx_ring
);
2293 * Now service the RSS ring if it's active.
2295 if (ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
) !=
2296 rx_ring
->cnsmr_idx
) {
2297 netif_printk(qdev
, intr
, KERN_DEBUG
, qdev
->ndev
,
2298 "%s: Servicing RX completion ring %d.\n",
2299 __func__
, rx_ring
->cq_id
);
2300 work_done
= ql_clean_inbound_rx_ring(rx_ring
, budget
);
2303 if (work_done
< budget
) {
2304 napi_complete(napi
);
2305 ql_enable_completion_interrupt(qdev
, rx_ring
->irq
);
2310 static void qlge_vlan_mode(struct net_device
*ndev
, u32 features
)
2312 struct ql_adapter
*qdev
= netdev_priv(ndev
);
2314 if (features
& NETIF_F_HW_VLAN_RX
) {
2315 netif_printk(qdev
, ifup
, KERN_DEBUG
, ndev
,
2316 "Turning on VLAN in NIC_RCV_CFG.\n");
2317 ql_write32(qdev
, NIC_RCV_CFG
, NIC_RCV_CFG_VLAN_MASK
|
2318 NIC_RCV_CFG_VLAN_MATCH_AND_NON
);
2320 netif_printk(qdev
, ifup
, KERN_DEBUG
, ndev
,
2321 "Turning off VLAN in NIC_RCV_CFG.\n");
2322 ql_write32(qdev
, NIC_RCV_CFG
, NIC_RCV_CFG_VLAN_MASK
);
2326 static u32
qlge_fix_features(struct net_device
*ndev
, u32 features
)
2329 * Since there is no support for separate rx/tx vlan accel
2330 * enable/disable make sure tx flag is always in same state as rx.
2332 if (features
& NETIF_F_HW_VLAN_RX
)
2333 features
|= NETIF_F_HW_VLAN_TX
;
2335 features
&= ~NETIF_F_HW_VLAN_TX
;
2340 static int qlge_set_features(struct net_device
*ndev
, u32 features
)
2342 u32 changed
= ndev
->features
^ features
;
2344 if (changed
& NETIF_F_HW_VLAN_RX
)
2345 qlge_vlan_mode(ndev
, features
);
2350 static void __qlge_vlan_rx_add_vid(struct ql_adapter
*qdev
, u16 vid
)
2352 u32 enable_bit
= MAC_ADDR_E
;
2354 if (ql_set_mac_addr_reg
2355 (qdev
, (u8
*) &enable_bit
, MAC_ADDR_TYPE_VLAN
, vid
)) {
2356 netif_err(qdev
, ifup
, qdev
->ndev
,
2357 "Failed to init vlan address.\n");
2361 static void qlge_vlan_rx_add_vid(struct net_device
*ndev
, u16 vid
)
2363 struct ql_adapter
*qdev
= netdev_priv(ndev
);
2366 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
2370 __qlge_vlan_rx_add_vid(qdev
, vid
);
2371 set_bit(vid
, qdev
->active_vlans
);
2373 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
2376 static void __qlge_vlan_rx_kill_vid(struct ql_adapter
*qdev
, u16 vid
)
2380 if (ql_set_mac_addr_reg
2381 (qdev
, (u8
*) &enable_bit
, MAC_ADDR_TYPE_VLAN
, vid
)) {
2382 netif_err(qdev
, ifup
, qdev
->ndev
,
2383 "Failed to clear vlan address.\n");
2387 static void qlge_vlan_rx_kill_vid(struct net_device
*ndev
, u16 vid
)
2389 struct ql_adapter
*qdev
= netdev_priv(ndev
);
2392 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
2396 __qlge_vlan_rx_kill_vid(qdev
, vid
);
2397 clear_bit(vid
, qdev
->active_vlans
);
2399 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
2402 static void qlge_restore_vlan(struct ql_adapter
*qdev
)
2407 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
2411 for_each_set_bit(vid
, qdev
->active_vlans
, VLAN_N_VID
)
2412 __qlge_vlan_rx_add_vid(qdev
, vid
);
2414 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
2417 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2418 static irqreturn_t
qlge_msix_rx_isr(int irq
, void *dev_id
)
2420 struct rx_ring
*rx_ring
= dev_id
;
2421 napi_schedule(&rx_ring
->napi
);
2425 /* This handles a fatal error, MPI activity, and the default
2426 * rx_ring in an MSI-X multiple vector environment.
2427 * In MSI/Legacy environment it also process the rest of
2430 static irqreturn_t
qlge_isr(int irq
, void *dev_id
)
2432 struct rx_ring
*rx_ring
= dev_id
;
2433 struct ql_adapter
*qdev
= rx_ring
->qdev
;
2434 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
2438 spin_lock(&qdev
->hw_lock
);
2439 if (atomic_read(&qdev
->intr_context
[0].irq_cnt
)) {
2440 netif_printk(qdev
, intr
, KERN_DEBUG
, qdev
->ndev
,
2441 "Shared Interrupt, Not ours!\n");
2442 spin_unlock(&qdev
->hw_lock
);
2445 spin_unlock(&qdev
->hw_lock
);
2447 var
= ql_disable_completion_interrupt(qdev
, intr_context
->intr
);
2450 * Check for fatal error.
2453 ql_queue_asic_error(qdev
);
2454 netdev_err(qdev
->ndev
, "Got fatal error, STS = %x.\n", var
);
2455 var
= ql_read32(qdev
, ERR_STS
);
2456 netdev_err(qdev
->ndev
, "Resetting chip. "
2457 "Error Status Register = 0x%x\n", var
);
2462 * Check MPI processor activity.
2464 if ((var
& STS_PI
) &&
2465 (ql_read32(qdev
, INTR_MASK
) & INTR_MASK_PI
)) {
2467 * We've got an async event or mailbox completion.
2468 * Handle it and clear the source of the interrupt.
2470 netif_err(qdev
, intr
, qdev
->ndev
,
2471 "Got MPI processor interrupt.\n");
2472 ql_disable_completion_interrupt(qdev
, intr_context
->intr
);
2473 ql_write32(qdev
, INTR_MASK
, (INTR_MASK_PI
<< 16));
2474 queue_delayed_work_on(smp_processor_id(),
2475 qdev
->workqueue
, &qdev
->mpi_work
, 0);
2480 * Get the bit-mask that shows the active queues for this
2481 * pass. Compare it to the queues that this irq services
2482 * and call napi if there's a match.
2484 var
= ql_read32(qdev
, ISR1
);
2485 if (var
& intr_context
->irq_mask
) {
2486 netif_info(qdev
, intr
, qdev
->ndev
,
2487 "Waking handler for rx_ring[0].\n");
2488 ql_disable_completion_interrupt(qdev
, intr_context
->intr
);
2489 napi_schedule(&rx_ring
->napi
);
2492 ql_enable_completion_interrupt(qdev
, intr_context
->intr
);
2493 return work_done
? IRQ_HANDLED
: IRQ_NONE
;
2496 static int ql_tso(struct sk_buff
*skb
, struct ob_mac_tso_iocb_req
*mac_iocb_ptr
)
2499 if (skb_is_gso(skb
)) {
2501 if (skb_header_cloned(skb
)) {
2502 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2507 mac_iocb_ptr
->opcode
= OPCODE_OB_MAC_TSO_IOCB
;
2508 mac_iocb_ptr
->flags3
|= OB_MAC_TSO_IOCB_IC
;
2509 mac_iocb_ptr
->frame_len
= cpu_to_le32((u32
) skb
->len
);
2510 mac_iocb_ptr
->total_hdrs_len
=
2511 cpu_to_le16(skb_transport_offset(skb
) + tcp_hdrlen(skb
));
2512 mac_iocb_ptr
->net_trans_offset
=
2513 cpu_to_le16(skb_network_offset(skb
) |
2514 skb_transport_offset(skb
)
2515 << OB_MAC_TRANSPORT_HDR_SHIFT
);
2516 mac_iocb_ptr
->mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
2517 mac_iocb_ptr
->flags2
|= OB_MAC_TSO_IOCB_LSO
;
2518 if (likely(skb
->protocol
== htons(ETH_P_IP
))) {
2519 struct iphdr
*iph
= ip_hdr(skb
);
2521 mac_iocb_ptr
->flags1
|= OB_MAC_TSO_IOCB_IP4
;
2522 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
2526 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
2527 mac_iocb_ptr
->flags1
|= OB_MAC_TSO_IOCB_IP6
;
2528 tcp_hdr(skb
)->check
=
2529 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
2530 &ipv6_hdr(skb
)->daddr
,
2538 static void ql_hw_csum_setup(struct sk_buff
*skb
,
2539 struct ob_mac_tso_iocb_req
*mac_iocb_ptr
)
2542 struct iphdr
*iph
= ip_hdr(skb
);
2544 mac_iocb_ptr
->opcode
= OPCODE_OB_MAC_TSO_IOCB
;
2545 mac_iocb_ptr
->frame_len
= cpu_to_le32((u32
) skb
->len
);
2546 mac_iocb_ptr
->net_trans_offset
=
2547 cpu_to_le16(skb_network_offset(skb
) |
2548 skb_transport_offset(skb
) << OB_MAC_TRANSPORT_HDR_SHIFT
);
2550 mac_iocb_ptr
->flags1
|= OB_MAC_TSO_IOCB_IP4
;
2551 len
= (ntohs(iph
->tot_len
) - (iph
->ihl
<< 2));
2552 if (likely(iph
->protocol
== IPPROTO_TCP
)) {
2553 check
= &(tcp_hdr(skb
)->check
);
2554 mac_iocb_ptr
->flags2
|= OB_MAC_TSO_IOCB_TC
;
2555 mac_iocb_ptr
->total_hdrs_len
=
2556 cpu_to_le16(skb_transport_offset(skb
) +
2557 (tcp_hdr(skb
)->doff
<< 2));
2559 check
= &(udp_hdr(skb
)->check
);
2560 mac_iocb_ptr
->flags2
|= OB_MAC_TSO_IOCB_UC
;
2561 mac_iocb_ptr
->total_hdrs_len
=
2562 cpu_to_le16(skb_transport_offset(skb
) +
2563 sizeof(struct udphdr
));
2565 *check
= ~csum_tcpudp_magic(iph
->saddr
,
2566 iph
->daddr
, len
, iph
->protocol
, 0);
2569 static netdev_tx_t
qlge_send(struct sk_buff
*skb
, struct net_device
*ndev
)
2571 struct tx_ring_desc
*tx_ring_desc
;
2572 struct ob_mac_iocb_req
*mac_iocb_ptr
;
2573 struct ql_adapter
*qdev
= netdev_priv(ndev
);
2575 struct tx_ring
*tx_ring
;
2576 u32 tx_ring_idx
= (u32
) skb
->queue_mapping
;
2578 tx_ring
= &qdev
->tx_ring
[tx_ring_idx
];
2580 if (skb_padto(skb
, ETH_ZLEN
))
2581 return NETDEV_TX_OK
;
2583 if (unlikely(atomic_read(&tx_ring
->tx_count
) < 2)) {
2584 netif_info(qdev
, tx_queued
, qdev
->ndev
,
2585 "%s: shutting down tx queue %d du to lack of resources.\n",
2586 __func__
, tx_ring_idx
);
2587 netif_stop_subqueue(ndev
, tx_ring
->wq_id
);
2588 atomic_inc(&tx_ring
->queue_stopped
);
2589 tx_ring
->tx_errors
++;
2590 return NETDEV_TX_BUSY
;
2592 tx_ring_desc
= &tx_ring
->q
[tx_ring
->prod_idx
];
2593 mac_iocb_ptr
= tx_ring_desc
->queue_entry
;
2594 memset((void *)mac_iocb_ptr
, 0, sizeof(*mac_iocb_ptr
));
2596 mac_iocb_ptr
->opcode
= OPCODE_OB_MAC_IOCB
;
2597 mac_iocb_ptr
->tid
= tx_ring_desc
->index
;
2598 /* We use the upper 32-bits to store the tx queue for this IO.
2599 * When we get the completion we can use it to establish the context.
2601 mac_iocb_ptr
->txq_idx
= tx_ring_idx
;
2602 tx_ring_desc
->skb
= skb
;
2604 mac_iocb_ptr
->frame_len
= cpu_to_le16((u16
) skb
->len
);
2606 if (vlan_tx_tag_present(skb
)) {
2607 netif_printk(qdev
, tx_queued
, KERN_DEBUG
, qdev
->ndev
,
2608 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb
));
2609 mac_iocb_ptr
->flags3
|= OB_MAC_IOCB_V
;
2610 mac_iocb_ptr
->vlan_tci
= cpu_to_le16(vlan_tx_tag_get(skb
));
2612 tso
= ql_tso(skb
, (struct ob_mac_tso_iocb_req
*)mac_iocb_ptr
);
2614 dev_kfree_skb_any(skb
);
2615 return NETDEV_TX_OK
;
2616 } else if (unlikely(!tso
) && (skb
->ip_summed
== CHECKSUM_PARTIAL
)) {
2617 ql_hw_csum_setup(skb
,
2618 (struct ob_mac_tso_iocb_req
*)mac_iocb_ptr
);
2620 if (ql_map_send(qdev
, mac_iocb_ptr
, skb
, tx_ring_desc
) !=
2622 netif_err(qdev
, tx_queued
, qdev
->ndev
,
2623 "Could not map the segments.\n");
2624 tx_ring
->tx_errors
++;
2625 return NETDEV_TX_BUSY
;
2627 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr
);
2628 tx_ring
->prod_idx
++;
2629 if (tx_ring
->prod_idx
== tx_ring
->wq_len
)
2630 tx_ring
->prod_idx
= 0;
2633 ql_write_db_reg(tx_ring
->prod_idx
, tx_ring
->prod_idx_db_reg
);
2634 netif_printk(qdev
, tx_queued
, KERN_DEBUG
, qdev
->ndev
,
2635 "tx queued, slot %d, len %d\n",
2636 tx_ring
->prod_idx
, skb
->len
);
2638 atomic_dec(&tx_ring
->tx_count
);
2639 return NETDEV_TX_OK
;
2643 static void ql_free_shadow_space(struct ql_adapter
*qdev
)
2645 if (qdev
->rx_ring_shadow_reg_area
) {
2646 pci_free_consistent(qdev
->pdev
,
2648 qdev
->rx_ring_shadow_reg_area
,
2649 qdev
->rx_ring_shadow_reg_dma
);
2650 qdev
->rx_ring_shadow_reg_area
= NULL
;
2652 if (qdev
->tx_ring_shadow_reg_area
) {
2653 pci_free_consistent(qdev
->pdev
,
2655 qdev
->tx_ring_shadow_reg_area
,
2656 qdev
->tx_ring_shadow_reg_dma
);
2657 qdev
->tx_ring_shadow_reg_area
= NULL
;
2661 static int ql_alloc_shadow_space(struct ql_adapter
*qdev
)
2663 qdev
->rx_ring_shadow_reg_area
=
2664 pci_alloc_consistent(qdev
->pdev
,
2665 PAGE_SIZE
, &qdev
->rx_ring_shadow_reg_dma
);
2666 if (qdev
->rx_ring_shadow_reg_area
== NULL
) {
2667 netif_err(qdev
, ifup
, qdev
->ndev
,
2668 "Allocation of RX shadow space failed.\n");
2671 memset(qdev
->rx_ring_shadow_reg_area
, 0, PAGE_SIZE
);
2672 qdev
->tx_ring_shadow_reg_area
=
2673 pci_alloc_consistent(qdev
->pdev
, PAGE_SIZE
,
2674 &qdev
->tx_ring_shadow_reg_dma
);
2675 if (qdev
->tx_ring_shadow_reg_area
== NULL
) {
2676 netif_err(qdev
, ifup
, qdev
->ndev
,
2677 "Allocation of TX shadow space failed.\n");
2678 goto err_wqp_sh_area
;
2680 memset(qdev
->tx_ring_shadow_reg_area
, 0, PAGE_SIZE
);
2684 pci_free_consistent(qdev
->pdev
,
2686 qdev
->rx_ring_shadow_reg_area
,
2687 qdev
->rx_ring_shadow_reg_dma
);
2691 static void ql_init_tx_ring(struct ql_adapter
*qdev
, struct tx_ring
*tx_ring
)
2693 struct tx_ring_desc
*tx_ring_desc
;
2695 struct ob_mac_iocb_req
*mac_iocb_ptr
;
2697 mac_iocb_ptr
= tx_ring
->wq_base
;
2698 tx_ring_desc
= tx_ring
->q
;
2699 for (i
= 0; i
< tx_ring
->wq_len
; i
++) {
2700 tx_ring_desc
->index
= i
;
2701 tx_ring_desc
->skb
= NULL
;
2702 tx_ring_desc
->queue_entry
= mac_iocb_ptr
;
2706 atomic_set(&tx_ring
->tx_count
, tx_ring
->wq_len
);
2707 atomic_set(&tx_ring
->queue_stopped
, 0);
2710 static void ql_free_tx_resources(struct ql_adapter
*qdev
,
2711 struct tx_ring
*tx_ring
)
2713 if (tx_ring
->wq_base
) {
2714 pci_free_consistent(qdev
->pdev
, tx_ring
->wq_size
,
2715 tx_ring
->wq_base
, tx_ring
->wq_base_dma
);
2716 tx_ring
->wq_base
= NULL
;
2722 static int ql_alloc_tx_resources(struct ql_adapter
*qdev
,
2723 struct tx_ring
*tx_ring
)
2726 pci_alloc_consistent(qdev
->pdev
, tx_ring
->wq_size
,
2727 &tx_ring
->wq_base_dma
);
2729 if ((tx_ring
->wq_base
== NULL
) ||
2730 tx_ring
->wq_base_dma
& WQ_ADDR_ALIGN
) {
2731 netif_err(qdev
, ifup
, qdev
->ndev
, "tx_ring alloc failed.\n");
2735 kmalloc(tx_ring
->wq_len
* sizeof(struct tx_ring_desc
), GFP_KERNEL
);
2736 if (tx_ring
->q
== NULL
)
2741 pci_free_consistent(qdev
->pdev
, tx_ring
->wq_size
,
2742 tx_ring
->wq_base
, tx_ring
->wq_base_dma
);
2746 static void ql_free_lbq_buffers(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
2748 struct bq_desc
*lbq_desc
;
2750 uint32_t curr_idx
, clean_idx
;
2752 curr_idx
= rx_ring
->lbq_curr_idx
;
2753 clean_idx
= rx_ring
->lbq_clean_idx
;
2754 while (curr_idx
!= clean_idx
) {
2755 lbq_desc
= &rx_ring
->lbq
[curr_idx
];
2757 if (lbq_desc
->p
.pg_chunk
.last_flag
) {
2758 pci_unmap_page(qdev
->pdev
,
2759 lbq_desc
->p
.pg_chunk
.map
,
2760 ql_lbq_block_size(qdev
),
2761 PCI_DMA_FROMDEVICE
);
2762 lbq_desc
->p
.pg_chunk
.last_flag
= 0;
2765 put_page(lbq_desc
->p
.pg_chunk
.page
);
2766 lbq_desc
->p
.pg_chunk
.page
= NULL
;
2768 if (++curr_idx
== rx_ring
->lbq_len
)
2774 static void ql_free_sbq_buffers(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
2777 struct bq_desc
*sbq_desc
;
2779 for (i
= 0; i
< rx_ring
->sbq_len
; i
++) {
2780 sbq_desc
= &rx_ring
->sbq
[i
];
2781 if (sbq_desc
== NULL
) {
2782 netif_err(qdev
, ifup
, qdev
->ndev
,
2783 "sbq_desc %d is NULL.\n", i
);
2786 if (sbq_desc
->p
.skb
) {
2787 pci_unmap_single(qdev
->pdev
,
2788 dma_unmap_addr(sbq_desc
, mapaddr
),
2789 dma_unmap_len(sbq_desc
, maplen
),
2790 PCI_DMA_FROMDEVICE
);
2791 dev_kfree_skb(sbq_desc
->p
.skb
);
2792 sbq_desc
->p
.skb
= NULL
;
2797 /* Free all large and small rx buffers associated
2798 * with the completion queues for this device.
2800 static void ql_free_rx_buffers(struct ql_adapter
*qdev
)
2803 struct rx_ring
*rx_ring
;
2805 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
2806 rx_ring
= &qdev
->rx_ring
[i
];
2808 ql_free_lbq_buffers(qdev
, rx_ring
);
2810 ql_free_sbq_buffers(qdev
, rx_ring
);
2814 static void ql_alloc_rx_buffers(struct ql_adapter
*qdev
)
2816 struct rx_ring
*rx_ring
;
2819 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
2820 rx_ring
= &qdev
->rx_ring
[i
];
2821 if (rx_ring
->type
!= TX_Q
)
2822 ql_update_buffer_queues(qdev
, rx_ring
);
2826 static void ql_init_lbq_ring(struct ql_adapter
*qdev
,
2827 struct rx_ring
*rx_ring
)
2830 struct bq_desc
*lbq_desc
;
2831 __le64
*bq
= rx_ring
->lbq_base
;
2833 memset(rx_ring
->lbq
, 0, rx_ring
->lbq_len
* sizeof(struct bq_desc
));
2834 for (i
= 0; i
< rx_ring
->lbq_len
; i
++) {
2835 lbq_desc
= &rx_ring
->lbq
[i
];
2836 memset(lbq_desc
, 0, sizeof(*lbq_desc
));
2837 lbq_desc
->index
= i
;
2838 lbq_desc
->addr
= bq
;
2843 static void ql_init_sbq_ring(struct ql_adapter
*qdev
,
2844 struct rx_ring
*rx_ring
)
2847 struct bq_desc
*sbq_desc
;
2848 __le64
*bq
= rx_ring
->sbq_base
;
2850 memset(rx_ring
->sbq
, 0, rx_ring
->sbq_len
* sizeof(struct bq_desc
));
2851 for (i
= 0; i
< rx_ring
->sbq_len
; i
++) {
2852 sbq_desc
= &rx_ring
->sbq
[i
];
2853 memset(sbq_desc
, 0, sizeof(*sbq_desc
));
2854 sbq_desc
->index
= i
;
2855 sbq_desc
->addr
= bq
;
2860 static void ql_free_rx_resources(struct ql_adapter
*qdev
,
2861 struct rx_ring
*rx_ring
)
2863 /* Free the small buffer queue. */
2864 if (rx_ring
->sbq_base
) {
2865 pci_free_consistent(qdev
->pdev
,
2867 rx_ring
->sbq_base
, rx_ring
->sbq_base_dma
);
2868 rx_ring
->sbq_base
= NULL
;
2871 /* Free the small buffer queue control blocks. */
2872 kfree(rx_ring
->sbq
);
2873 rx_ring
->sbq
= NULL
;
2875 /* Free the large buffer queue. */
2876 if (rx_ring
->lbq_base
) {
2877 pci_free_consistent(qdev
->pdev
,
2879 rx_ring
->lbq_base
, rx_ring
->lbq_base_dma
);
2880 rx_ring
->lbq_base
= NULL
;
2883 /* Free the large buffer queue control blocks. */
2884 kfree(rx_ring
->lbq
);
2885 rx_ring
->lbq
= NULL
;
2887 /* Free the rx queue. */
2888 if (rx_ring
->cq_base
) {
2889 pci_free_consistent(qdev
->pdev
,
2891 rx_ring
->cq_base
, rx_ring
->cq_base_dma
);
2892 rx_ring
->cq_base
= NULL
;
2896 /* Allocate queues and buffers for this completions queue based
2897 * on the values in the parameter structure. */
2898 static int ql_alloc_rx_resources(struct ql_adapter
*qdev
,
2899 struct rx_ring
*rx_ring
)
2903 * Allocate the completion queue for this rx_ring.
2906 pci_alloc_consistent(qdev
->pdev
, rx_ring
->cq_size
,
2907 &rx_ring
->cq_base_dma
);
2909 if (rx_ring
->cq_base
== NULL
) {
2910 netif_err(qdev
, ifup
, qdev
->ndev
, "rx_ring alloc failed.\n");
2914 if (rx_ring
->sbq_len
) {
2916 * Allocate small buffer queue.
2919 pci_alloc_consistent(qdev
->pdev
, rx_ring
->sbq_size
,
2920 &rx_ring
->sbq_base_dma
);
2922 if (rx_ring
->sbq_base
== NULL
) {
2923 netif_err(qdev
, ifup
, qdev
->ndev
,
2924 "Small buffer queue allocation failed.\n");
2929 * Allocate small buffer queue control blocks.
2932 kmalloc(rx_ring
->sbq_len
* sizeof(struct bq_desc
),
2934 if (rx_ring
->sbq
== NULL
) {
2935 netif_err(qdev
, ifup
, qdev
->ndev
,
2936 "Small buffer queue control block allocation failed.\n");
2940 ql_init_sbq_ring(qdev
, rx_ring
);
2943 if (rx_ring
->lbq_len
) {
2945 * Allocate large buffer queue.
2948 pci_alloc_consistent(qdev
->pdev
, rx_ring
->lbq_size
,
2949 &rx_ring
->lbq_base_dma
);
2951 if (rx_ring
->lbq_base
== NULL
) {
2952 netif_err(qdev
, ifup
, qdev
->ndev
,
2953 "Large buffer queue allocation failed.\n");
2957 * Allocate large buffer queue control blocks.
2960 kmalloc(rx_ring
->lbq_len
* sizeof(struct bq_desc
),
2962 if (rx_ring
->lbq
== NULL
) {
2963 netif_err(qdev
, ifup
, qdev
->ndev
,
2964 "Large buffer queue control block allocation failed.\n");
2968 ql_init_lbq_ring(qdev
, rx_ring
);
2974 ql_free_rx_resources(qdev
, rx_ring
);
2978 static void ql_tx_ring_clean(struct ql_adapter
*qdev
)
2980 struct tx_ring
*tx_ring
;
2981 struct tx_ring_desc
*tx_ring_desc
;
2985 * Loop through all queues and free
2988 for (j
= 0; j
< qdev
->tx_ring_count
; j
++) {
2989 tx_ring
= &qdev
->tx_ring
[j
];
2990 for (i
= 0; i
< tx_ring
->wq_len
; i
++) {
2991 tx_ring_desc
= &tx_ring
->q
[i
];
2992 if (tx_ring_desc
&& tx_ring_desc
->skb
) {
2993 netif_err(qdev
, ifdown
, qdev
->ndev
,
2994 "Freeing lost SKB %p, from queue %d, index %d.\n",
2995 tx_ring_desc
->skb
, j
,
2996 tx_ring_desc
->index
);
2997 ql_unmap_send(qdev
, tx_ring_desc
,
2998 tx_ring_desc
->map_cnt
);
2999 dev_kfree_skb(tx_ring_desc
->skb
);
3000 tx_ring_desc
->skb
= NULL
;
3006 static void ql_free_mem_resources(struct ql_adapter
*qdev
)
3010 for (i
= 0; i
< qdev
->tx_ring_count
; i
++)
3011 ql_free_tx_resources(qdev
, &qdev
->tx_ring
[i
]);
3012 for (i
= 0; i
< qdev
->rx_ring_count
; i
++)
3013 ql_free_rx_resources(qdev
, &qdev
->rx_ring
[i
]);
3014 ql_free_shadow_space(qdev
);
3017 static int ql_alloc_mem_resources(struct ql_adapter
*qdev
)
3021 /* Allocate space for our shadow registers and such. */
3022 if (ql_alloc_shadow_space(qdev
))
3025 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
3026 if (ql_alloc_rx_resources(qdev
, &qdev
->rx_ring
[i
]) != 0) {
3027 netif_err(qdev
, ifup
, qdev
->ndev
,
3028 "RX resource allocation failed.\n");
3032 /* Allocate tx queue resources */
3033 for (i
= 0; i
< qdev
->tx_ring_count
; i
++) {
3034 if (ql_alloc_tx_resources(qdev
, &qdev
->tx_ring
[i
]) != 0) {
3035 netif_err(qdev
, ifup
, qdev
->ndev
,
3036 "TX resource allocation failed.\n");
3043 ql_free_mem_resources(qdev
);
3047 /* Set up the rx ring control block and pass it to the chip.
3048 * The control block is defined as
3049 * "Completion Queue Initialization Control Block", or cqicb.
3051 static int ql_start_rx_ring(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
3053 struct cqicb
*cqicb
= &rx_ring
->cqicb
;
3054 void *shadow_reg
= qdev
->rx_ring_shadow_reg_area
+
3055 (rx_ring
->cq_id
* RX_RING_SHADOW_SPACE
);
3056 u64 shadow_reg_dma
= qdev
->rx_ring_shadow_reg_dma
+
3057 (rx_ring
->cq_id
* RX_RING_SHADOW_SPACE
);
3058 void __iomem
*doorbell_area
=
3059 qdev
->doorbell_area
+ (DB_PAGE_SIZE
* (128 + rx_ring
->cq_id
));
3063 __le64
*base_indirect_ptr
;
3066 /* Set up the shadow registers for this ring. */
3067 rx_ring
->prod_idx_sh_reg
= shadow_reg
;
3068 rx_ring
->prod_idx_sh_reg_dma
= shadow_reg_dma
;
3069 *rx_ring
->prod_idx_sh_reg
= 0;
3070 shadow_reg
+= sizeof(u64
);
3071 shadow_reg_dma
+= sizeof(u64
);
3072 rx_ring
->lbq_base_indirect
= shadow_reg
;
3073 rx_ring
->lbq_base_indirect_dma
= shadow_reg_dma
;
3074 shadow_reg
+= (sizeof(u64
) * MAX_DB_PAGES_PER_BQ(rx_ring
->lbq_len
));
3075 shadow_reg_dma
+= (sizeof(u64
) * MAX_DB_PAGES_PER_BQ(rx_ring
->lbq_len
));
3076 rx_ring
->sbq_base_indirect
= shadow_reg
;
3077 rx_ring
->sbq_base_indirect_dma
= shadow_reg_dma
;
3079 /* PCI doorbell mem area + 0x00 for consumer index register */
3080 rx_ring
->cnsmr_idx_db_reg
= (u32 __iomem
*) doorbell_area
;
3081 rx_ring
->cnsmr_idx
= 0;
3082 rx_ring
->curr_entry
= rx_ring
->cq_base
;
3084 /* PCI doorbell mem area + 0x04 for valid register */
3085 rx_ring
->valid_db_reg
= doorbell_area
+ 0x04;
3087 /* PCI doorbell mem area + 0x18 for large buffer consumer */
3088 rx_ring
->lbq_prod_idx_db_reg
= (u32 __iomem
*) (doorbell_area
+ 0x18);
3090 /* PCI doorbell mem area + 0x1c */
3091 rx_ring
->sbq_prod_idx_db_reg
= (u32 __iomem
*) (doorbell_area
+ 0x1c);
3093 memset((void *)cqicb
, 0, sizeof(struct cqicb
));
3094 cqicb
->msix_vect
= rx_ring
->irq
;
3096 bq_len
= (rx_ring
->cq_len
== 65536) ? 0 : (u16
) rx_ring
->cq_len
;
3097 cqicb
->len
= cpu_to_le16(bq_len
| LEN_V
| LEN_CPP_CONT
);
3099 cqicb
->addr
= cpu_to_le64(rx_ring
->cq_base_dma
);
3101 cqicb
->prod_idx_addr
= cpu_to_le64(rx_ring
->prod_idx_sh_reg_dma
);
3104 * Set up the control block load flags.
3106 cqicb
->flags
= FLAGS_LC
| /* Load queue base address */
3107 FLAGS_LV
| /* Load MSI-X vector */
3108 FLAGS_LI
; /* Load irq delay values */
3109 if (rx_ring
->lbq_len
) {
3110 cqicb
->flags
|= FLAGS_LL
; /* Load lbq values */
3111 tmp
= (u64
)rx_ring
->lbq_base_dma
;
3112 base_indirect_ptr
= rx_ring
->lbq_base_indirect
;
3115 *base_indirect_ptr
= cpu_to_le64(tmp
);
3116 tmp
+= DB_PAGE_SIZE
;
3117 base_indirect_ptr
++;
3119 } while (page_entries
< MAX_DB_PAGES_PER_BQ(rx_ring
->lbq_len
));
3121 cpu_to_le64(rx_ring
->lbq_base_indirect_dma
);
3122 bq_len
= (rx_ring
->lbq_buf_size
== 65536) ? 0 :
3123 (u16
) rx_ring
->lbq_buf_size
;
3124 cqicb
->lbq_buf_size
= cpu_to_le16(bq_len
);
3125 bq_len
= (rx_ring
->lbq_len
== 65536) ? 0 :
3126 (u16
) rx_ring
->lbq_len
;
3127 cqicb
->lbq_len
= cpu_to_le16(bq_len
);
3128 rx_ring
->lbq_prod_idx
= 0;
3129 rx_ring
->lbq_curr_idx
= 0;
3130 rx_ring
->lbq_clean_idx
= 0;
3131 rx_ring
->lbq_free_cnt
= rx_ring
->lbq_len
;
3133 if (rx_ring
->sbq_len
) {
3134 cqicb
->flags
|= FLAGS_LS
; /* Load sbq values */
3135 tmp
= (u64
)rx_ring
->sbq_base_dma
;
3136 base_indirect_ptr
= rx_ring
->sbq_base_indirect
;
3139 *base_indirect_ptr
= cpu_to_le64(tmp
);
3140 tmp
+= DB_PAGE_SIZE
;
3141 base_indirect_ptr
++;
3143 } while (page_entries
< MAX_DB_PAGES_PER_BQ(rx_ring
->sbq_len
));
3145 cpu_to_le64(rx_ring
->sbq_base_indirect_dma
);
3146 cqicb
->sbq_buf_size
=
3147 cpu_to_le16((u16
)(rx_ring
->sbq_buf_size
));
3148 bq_len
= (rx_ring
->sbq_len
== 65536) ? 0 :
3149 (u16
) rx_ring
->sbq_len
;
3150 cqicb
->sbq_len
= cpu_to_le16(bq_len
);
3151 rx_ring
->sbq_prod_idx
= 0;
3152 rx_ring
->sbq_curr_idx
= 0;
3153 rx_ring
->sbq_clean_idx
= 0;
3154 rx_ring
->sbq_free_cnt
= rx_ring
->sbq_len
;
3156 switch (rx_ring
->type
) {
3158 cqicb
->irq_delay
= cpu_to_le16(qdev
->tx_coalesce_usecs
);
3159 cqicb
->pkt_delay
= cpu_to_le16(qdev
->tx_max_coalesced_frames
);
3162 /* Inbound completion handling rx_rings run in
3163 * separate NAPI contexts.
3165 netif_napi_add(qdev
->ndev
, &rx_ring
->napi
, ql_napi_poll_msix
,
3167 cqicb
->irq_delay
= cpu_to_le16(qdev
->rx_coalesce_usecs
);
3168 cqicb
->pkt_delay
= cpu_to_le16(qdev
->rx_max_coalesced_frames
);
3171 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3172 "Invalid rx_ring->type = %d.\n", rx_ring
->type
);
3174 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3175 "Initializing rx work queue.\n");
3176 err
= ql_write_cfg(qdev
, cqicb
, sizeof(struct cqicb
),
3177 CFG_LCQ
, rx_ring
->cq_id
);
3179 netif_err(qdev
, ifup
, qdev
->ndev
, "Failed to load CQICB.\n");
3185 static int ql_start_tx_ring(struct ql_adapter
*qdev
, struct tx_ring
*tx_ring
)
3187 struct wqicb
*wqicb
= (struct wqicb
*)tx_ring
;
3188 void __iomem
*doorbell_area
=
3189 qdev
->doorbell_area
+ (DB_PAGE_SIZE
* tx_ring
->wq_id
);
3190 void *shadow_reg
= qdev
->tx_ring_shadow_reg_area
+
3191 (tx_ring
->wq_id
* sizeof(u64
));
3192 u64 shadow_reg_dma
= qdev
->tx_ring_shadow_reg_dma
+
3193 (tx_ring
->wq_id
* sizeof(u64
));
3197 * Assign doorbell registers for this tx_ring.
3199 /* TX PCI doorbell mem area for tx producer index */
3200 tx_ring
->prod_idx_db_reg
= (u32 __iomem
*) doorbell_area
;
3201 tx_ring
->prod_idx
= 0;
3202 /* TX PCI doorbell mem area + 0x04 */
3203 tx_ring
->valid_db_reg
= doorbell_area
+ 0x04;
3206 * Assign shadow registers for this tx_ring.
3208 tx_ring
->cnsmr_idx_sh_reg
= shadow_reg
;
3209 tx_ring
->cnsmr_idx_sh_reg_dma
= shadow_reg_dma
;
3211 wqicb
->len
= cpu_to_le16(tx_ring
->wq_len
| Q_LEN_V
| Q_LEN_CPP_CONT
);
3212 wqicb
->flags
= cpu_to_le16(Q_FLAGS_LC
|
3213 Q_FLAGS_LB
| Q_FLAGS_LI
| Q_FLAGS_LO
);
3214 wqicb
->cq_id_rss
= cpu_to_le16(tx_ring
->cq_id
);
3216 wqicb
->addr
= cpu_to_le64(tx_ring
->wq_base_dma
);
3218 wqicb
->cnsmr_idx_addr
= cpu_to_le64(tx_ring
->cnsmr_idx_sh_reg_dma
);
3220 ql_init_tx_ring(qdev
, tx_ring
);
3222 err
= ql_write_cfg(qdev
, wqicb
, sizeof(*wqicb
), CFG_LRQ
,
3223 (u16
) tx_ring
->wq_id
);
3225 netif_err(qdev
, ifup
, qdev
->ndev
, "Failed to load tx_ring.\n");
3228 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3229 "Successfully loaded WQICB.\n");
3233 static void ql_disable_msix(struct ql_adapter
*qdev
)
3235 if (test_bit(QL_MSIX_ENABLED
, &qdev
->flags
)) {
3236 pci_disable_msix(qdev
->pdev
);
3237 clear_bit(QL_MSIX_ENABLED
, &qdev
->flags
);
3238 kfree(qdev
->msi_x_entry
);
3239 qdev
->msi_x_entry
= NULL
;
3240 } else if (test_bit(QL_MSI_ENABLED
, &qdev
->flags
)) {
3241 pci_disable_msi(qdev
->pdev
);
3242 clear_bit(QL_MSI_ENABLED
, &qdev
->flags
);
3246 /* We start by trying to get the number of vectors
3247 * stored in qdev->intr_count. If we don't get that
3248 * many then we reduce the count and try again.
3250 static void ql_enable_msix(struct ql_adapter
*qdev
)
3254 /* Get the MSIX vectors. */
3255 if (qlge_irq_type
== MSIX_IRQ
) {
3256 /* Try to alloc space for the msix struct,
3257 * if it fails then go to MSI/legacy.
3259 qdev
->msi_x_entry
= kcalloc(qdev
->intr_count
,
3260 sizeof(struct msix_entry
),
3262 if (!qdev
->msi_x_entry
) {
3263 qlge_irq_type
= MSI_IRQ
;
3267 for (i
= 0; i
< qdev
->intr_count
; i
++)
3268 qdev
->msi_x_entry
[i
].entry
= i
;
3270 /* Loop to get our vectors. We start with
3271 * what we want and settle for what we get.
3274 err
= pci_enable_msix(qdev
->pdev
,
3275 qdev
->msi_x_entry
, qdev
->intr_count
);
3277 qdev
->intr_count
= err
;
3281 kfree(qdev
->msi_x_entry
);
3282 qdev
->msi_x_entry
= NULL
;
3283 netif_warn(qdev
, ifup
, qdev
->ndev
,
3284 "MSI-X Enable failed, trying MSI.\n");
3285 qdev
->intr_count
= 1;
3286 qlge_irq_type
= MSI_IRQ
;
3287 } else if (err
== 0) {
3288 set_bit(QL_MSIX_ENABLED
, &qdev
->flags
);
3289 netif_info(qdev
, ifup
, qdev
->ndev
,
3290 "MSI-X Enabled, got %d vectors.\n",
3296 qdev
->intr_count
= 1;
3297 if (qlge_irq_type
== MSI_IRQ
) {
3298 if (!pci_enable_msi(qdev
->pdev
)) {
3299 set_bit(QL_MSI_ENABLED
, &qdev
->flags
);
3300 netif_info(qdev
, ifup
, qdev
->ndev
,
3301 "Running with MSI interrupts.\n");
3305 qlge_irq_type
= LEG_IRQ
;
3306 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3307 "Running with legacy interrupts.\n");
3310 /* Each vector services 1 RSS ring and and 1 or more
3311 * TX completion rings. This function loops through
3312 * the TX completion rings and assigns the vector that
3313 * will service it. An example would be if there are
3314 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3315 * This would mean that vector 0 would service RSS ring 0
3316 * and TX completion rings 0,1,2 and 3. Vector 1 would
3317 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3319 static void ql_set_tx_vect(struct ql_adapter
*qdev
)
3322 u32 tx_rings_per_vector
= qdev
->tx_ring_count
/ qdev
->intr_count
;
3324 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
))) {
3325 /* Assign irq vectors to TX rx_rings.*/
3326 for (vect
= 0, j
= 0, i
= qdev
->rss_ring_count
;
3327 i
< qdev
->rx_ring_count
; i
++) {
3328 if (j
== tx_rings_per_vector
) {
3332 qdev
->rx_ring
[i
].irq
= vect
;
3336 /* For single vector all rings have an irq
3339 for (i
= 0; i
< qdev
->rx_ring_count
; i
++)
3340 qdev
->rx_ring
[i
].irq
= 0;
3344 /* Set the interrupt mask for this vector. Each vector
3345 * will service 1 RSS ring and 1 or more TX completion
3346 * rings. This function sets up a bit mask per vector
3347 * that indicates which rings it services.
3349 static void ql_set_irq_mask(struct ql_adapter
*qdev
, struct intr_context
*ctx
)
3351 int j
, vect
= ctx
->intr
;
3352 u32 tx_rings_per_vector
= qdev
->tx_ring_count
/ qdev
->intr_count
;
3354 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
))) {
3355 /* Add the RSS ring serviced by this vector
3358 ctx
->irq_mask
= (1 << qdev
->rx_ring
[vect
].cq_id
);
3359 /* Add the TX ring(s) serviced by this vector
3361 for (j
= 0; j
< tx_rings_per_vector
; j
++) {
3363 (1 << qdev
->rx_ring
[qdev
->rss_ring_count
+
3364 (vect
* tx_rings_per_vector
) + j
].cq_id
);
3367 /* For single vector we just shift each queue's
3370 for (j
= 0; j
< qdev
->rx_ring_count
; j
++)
3371 ctx
->irq_mask
|= (1 << qdev
->rx_ring
[j
].cq_id
);
3376 * Here we build the intr_context structures based on
3377 * our rx_ring count and intr vector count.
3378 * The intr_context structure is used to hook each vector
3379 * to possibly different handlers.
3381 static void ql_resolve_queues_to_irqs(struct ql_adapter
*qdev
)
3384 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
3386 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
))) {
3387 /* Each rx_ring has it's
3388 * own intr_context since we have separate
3389 * vectors for each queue.
3391 for (i
= 0; i
< qdev
->intr_count
; i
++, intr_context
++) {
3392 qdev
->rx_ring
[i
].irq
= i
;
3393 intr_context
->intr
= i
;
3394 intr_context
->qdev
= qdev
;
3395 /* Set up this vector's bit-mask that indicates
3396 * which queues it services.
3398 ql_set_irq_mask(qdev
, intr_context
);
3400 * We set up each vectors enable/disable/read bits so
3401 * there's no bit/mask calculations in the critical path.
3403 intr_context
->intr_en_mask
=
3404 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
3405 INTR_EN_TYPE_ENABLE
| INTR_EN_IHD_MASK
| INTR_EN_IHD
3407 intr_context
->intr_dis_mask
=
3408 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
3409 INTR_EN_TYPE_DISABLE
| INTR_EN_IHD_MASK
|
3411 intr_context
->intr_read_mask
=
3412 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
3413 INTR_EN_TYPE_READ
| INTR_EN_IHD_MASK
| INTR_EN_IHD
|
3416 /* The first vector/queue handles
3417 * broadcast/multicast, fatal errors,
3418 * and firmware events. This in addition
3419 * to normal inbound NAPI processing.
3421 intr_context
->handler
= qlge_isr
;
3422 sprintf(intr_context
->name
, "%s-rx-%d",
3423 qdev
->ndev
->name
, i
);
3426 * Inbound queues handle unicast frames only.
3428 intr_context
->handler
= qlge_msix_rx_isr
;
3429 sprintf(intr_context
->name
, "%s-rx-%d",
3430 qdev
->ndev
->name
, i
);
3435 * All rx_rings use the same intr_context since
3436 * there is only one vector.
3438 intr_context
->intr
= 0;
3439 intr_context
->qdev
= qdev
;
3441 * We set up each vectors enable/disable/read bits so
3442 * there's no bit/mask calculations in the critical path.
3444 intr_context
->intr_en_mask
=
3445 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
| INTR_EN_TYPE_ENABLE
;
3446 intr_context
->intr_dis_mask
=
3447 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
3448 INTR_EN_TYPE_DISABLE
;
3449 intr_context
->intr_read_mask
=
3450 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
| INTR_EN_TYPE_READ
;
3452 * Single interrupt means one handler for all rings.
3454 intr_context
->handler
= qlge_isr
;
3455 sprintf(intr_context
->name
, "%s-single_irq", qdev
->ndev
->name
);
3456 /* Set up this vector's bit-mask that indicates
3457 * which queues it services. In this case there is
3458 * a single vector so it will service all RSS and
3459 * TX completion rings.
3461 ql_set_irq_mask(qdev
, intr_context
);
3463 /* Tell the TX completion rings which MSIx vector
3464 * they will be using.
3466 ql_set_tx_vect(qdev
);
3469 static void ql_free_irq(struct ql_adapter
*qdev
)
3472 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
3474 for (i
= 0; i
< qdev
->intr_count
; i
++, intr_context
++) {
3475 if (intr_context
->hooked
) {
3476 if (test_bit(QL_MSIX_ENABLED
, &qdev
->flags
)) {
3477 free_irq(qdev
->msi_x_entry
[i
].vector
,
3479 netif_printk(qdev
, ifdown
, KERN_DEBUG
, qdev
->ndev
,
3480 "freeing msix interrupt %d.\n", i
);
3482 free_irq(qdev
->pdev
->irq
, &qdev
->rx_ring
[0]);
3483 netif_printk(qdev
, ifdown
, KERN_DEBUG
, qdev
->ndev
,
3484 "freeing msi interrupt %d.\n", i
);
3488 ql_disable_msix(qdev
);
3491 static int ql_request_irq(struct ql_adapter
*qdev
)
3495 struct pci_dev
*pdev
= qdev
->pdev
;
3496 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
3498 ql_resolve_queues_to_irqs(qdev
);
3500 for (i
= 0; i
< qdev
->intr_count
; i
++, intr_context
++) {
3501 atomic_set(&intr_context
->irq_cnt
, 0);
3502 if (test_bit(QL_MSIX_ENABLED
, &qdev
->flags
)) {
3503 status
= request_irq(qdev
->msi_x_entry
[i
].vector
,
3504 intr_context
->handler
,
3509 netif_err(qdev
, ifup
, qdev
->ndev
,
3510 "Failed request for MSIX interrupt %d.\n",
3514 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3515 "Hooked intr %d, queue type %s, with name %s.\n",
3517 qdev
->rx_ring
[i
].type
== DEFAULT_Q
?
3519 qdev
->rx_ring
[i
].type
== TX_Q
?
3521 qdev
->rx_ring
[i
].type
== RX_Q
?
3523 intr_context
->name
);
3526 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3527 "trying msi or legacy interrupts.\n");
3528 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3529 "%s: irq = %d.\n", __func__
, pdev
->irq
);
3530 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3531 "%s: context->name = %s.\n", __func__
,
3532 intr_context
->name
);
3533 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3534 "%s: dev_id = 0x%p.\n", __func__
,
3537 request_irq(pdev
->irq
, qlge_isr
,
3538 test_bit(QL_MSI_ENABLED
,
3540 flags
) ? 0 : IRQF_SHARED
,
3541 intr_context
->name
, &qdev
->rx_ring
[0]);
3545 netif_err(qdev
, ifup
, qdev
->ndev
,
3546 "Hooked intr %d, queue type %s, with name %s.\n",
3548 qdev
->rx_ring
[0].type
== DEFAULT_Q
?
3550 qdev
->rx_ring
[0].type
== TX_Q
? "TX_Q" :
3551 qdev
->rx_ring
[0].type
== RX_Q
? "RX_Q" : "",
3552 intr_context
->name
);
3554 intr_context
->hooked
= 1;
3558 netif_err(qdev
, ifup
, qdev
->ndev
, "Failed to get the interrupts!!!/n");
3563 static int ql_start_rss(struct ql_adapter
*qdev
)
3565 static const u8 init_hash_seed
[] = {
3566 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3567 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3568 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3569 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3570 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3572 struct ricb
*ricb
= &qdev
->ricb
;
3575 u8
*hash_id
= (u8
*) ricb
->hash_cq_id
;
3577 memset((void *)ricb
, 0, sizeof(*ricb
));
3579 ricb
->base_cq
= RSS_L4K
;
3581 (RSS_L6K
| RSS_LI
| RSS_LB
| RSS_LM
| RSS_RT4
| RSS_RT6
);
3582 ricb
->mask
= cpu_to_le16((u16
)(0x3ff));
3585 * Fill out the Indirection Table.
3587 for (i
= 0; i
< 1024; i
++)
3588 hash_id
[i
] = (i
& (qdev
->rss_ring_count
- 1));
3590 memcpy((void *)&ricb
->ipv6_hash_key
[0], init_hash_seed
, 40);
3591 memcpy((void *)&ricb
->ipv4_hash_key
[0], init_hash_seed
, 16);
3593 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
, "Initializing RSS.\n");
3595 status
= ql_write_cfg(qdev
, ricb
, sizeof(*ricb
), CFG_LR
, 0);
3597 netif_err(qdev
, ifup
, qdev
->ndev
, "Failed to load RICB.\n");
3600 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3601 "Successfully loaded RICB.\n");
3605 static int ql_clear_routing_entries(struct ql_adapter
*qdev
)
3609 status
= ql_sem_spinlock(qdev
, SEM_RT_IDX_MASK
);
3612 /* Clear all the entries in the routing table. */
3613 for (i
= 0; i
< 16; i
++) {
3614 status
= ql_set_routing_reg(qdev
, i
, 0, 0);
3616 netif_err(qdev
, ifup
, qdev
->ndev
,
3617 "Failed to init routing register for CAM packets.\n");
3621 ql_sem_unlock(qdev
, SEM_RT_IDX_MASK
);
3625 /* Initialize the frame-to-queue routing. */
3626 static int ql_route_initialize(struct ql_adapter
*qdev
)
3630 /* Clear all the entries in the routing table. */
3631 status
= ql_clear_routing_entries(qdev
);
3635 status
= ql_sem_spinlock(qdev
, SEM_RT_IDX_MASK
);
3639 status
= ql_set_routing_reg(qdev
, RT_IDX_IP_CSUM_ERR_SLOT
,
3640 RT_IDX_IP_CSUM_ERR
, 1);
3642 netif_err(qdev
, ifup
, qdev
->ndev
,
3643 "Failed to init routing register "
3644 "for IP CSUM error packets.\n");
3647 status
= ql_set_routing_reg(qdev
, RT_IDX_TCP_UDP_CSUM_ERR_SLOT
,
3648 RT_IDX_TU_CSUM_ERR
, 1);
3650 netif_err(qdev
, ifup
, qdev
->ndev
,
3651 "Failed to init routing register "
3652 "for TCP/UDP CSUM error packets.\n");
3655 status
= ql_set_routing_reg(qdev
, RT_IDX_BCAST_SLOT
, RT_IDX_BCAST
, 1);
3657 netif_err(qdev
, ifup
, qdev
->ndev
,
3658 "Failed to init routing register for broadcast packets.\n");
3661 /* If we have more than one inbound queue, then turn on RSS in the
3664 if (qdev
->rss_ring_count
> 1) {
3665 status
= ql_set_routing_reg(qdev
, RT_IDX_RSS_MATCH_SLOT
,
3666 RT_IDX_RSS_MATCH
, 1);
3668 netif_err(qdev
, ifup
, qdev
->ndev
,
3669 "Failed to init routing register for MATCH RSS packets.\n");
3674 status
= ql_set_routing_reg(qdev
, RT_IDX_CAM_HIT_SLOT
,
3677 netif_err(qdev
, ifup
, qdev
->ndev
,
3678 "Failed to init routing register for CAM packets.\n");
3680 ql_sem_unlock(qdev
, SEM_RT_IDX_MASK
);
3684 int ql_cam_route_initialize(struct ql_adapter
*qdev
)
3688 /* If check if the link is up and use to
3689 * determine if we are setting or clearing
3690 * the MAC address in the CAM.
3692 set
= ql_read32(qdev
, STS
);
3693 set
&= qdev
->port_link_up
;
3694 status
= ql_set_mac_addr(qdev
, set
);
3696 netif_err(qdev
, ifup
, qdev
->ndev
, "Failed to init mac address.\n");
3700 status
= ql_route_initialize(qdev
);
3702 netif_err(qdev
, ifup
, qdev
->ndev
, "Failed to init routing table.\n");
3707 static int ql_adapter_initialize(struct ql_adapter
*qdev
)
3714 * Set up the System register to halt on errors.
3716 value
= SYS_EFE
| SYS_FAE
;
3718 ql_write32(qdev
, SYS
, mask
| value
);
3720 /* Set the default queue, and VLAN behavior. */
3721 value
= NIC_RCV_CFG_DFQ
| NIC_RCV_CFG_RV
;
3722 mask
= NIC_RCV_CFG_DFQ_MASK
| (NIC_RCV_CFG_RV
<< 16);
3723 ql_write32(qdev
, NIC_RCV_CFG
, (mask
| value
));
3725 /* Set the MPI interrupt to enabled. */
3726 ql_write32(qdev
, INTR_MASK
, (INTR_MASK_PI
<< 16) | INTR_MASK_PI
);
3728 /* Enable the function, set pagesize, enable error checking. */
3729 value
= FSC_FE
| FSC_EPC_INBOUND
| FSC_EPC_OUTBOUND
|
3730 FSC_EC
| FSC_VM_PAGE_4K
;
3731 value
|= SPLT_SETTING
;
3733 /* Set/clear header splitting. */
3734 mask
= FSC_VM_PAGESIZE_MASK
|
3735 FSC_DBL_MASK
| FSC_DBRST_MASK
| (value
<< 16);
3736 ql_write32(qdev
, FSC
, mask
| value
);
3738 ql_write32(qdev
, SPLT_HDR
, SPLT_LEN
);
3740 /* Set RX packet routing to use port/pci function on which the
3741 * packet arrived on in addition to usual frame routing.
3742 * This is helpful on bonding where both interfaces can have
3743 * the same MAC address.
3745 ql_write32(qdev
, RST_FO
, RST_FO_RR_MASK
| RST_FO_RR_RCV_FUNC_CQ
);
3746 /* Reroute all packets to our Interface.
3747 * They may have been routed to MPI firmware
3750 value
= ql_read32(qdev
, MGMT_RCV_CFG
);
3751 value
&= ~MGMT_RCV_CFG_RM
;
3754 /* Sticky reg needs clearing due to WOL. */
3755 ql_write32(qdev
, MGMT_RCV_CFG
, mask
);
3756 ql_write32(qdev
, MGMT_RCV_CFG
, mask
| value
);
3758 /* Default WOL is enable on Mezz cards */
3759 if (qdev
->pdev
->subsystem_device
== 0x0068 ||
3760 qdev
->pdev
->subsystem_device
== 0x0180)
3761 qdev
->wol
= WAKE_MAGIC
;
3763 /* Start up the rx queues. */
3764 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
3765 status
= ql_start_rx_ring(qdev
, &qdev
->rx_ring
[i
]);
3767 netif_err(qdev
, ifup
, qdev
->ndev
,
3768 "Failed to start rx ring[%d].\n", i
);
3773 /* If there is more than one inbound completion queue
3774 * then download a RICB to configure RSS.
3776 if (qdev
->rss_ring_count
> 1) {
3777 status
= ql_start_rss(qdev
);
3779 netif_err(qdev
, ifup
, qdev
->ndev
, "Failed to start RSS.\n");
3784 /* Start up the tx queues. */
3785 for (i
= 0; i
< qdev
->tx_ring_count
; i
++) {
3786 status
= ql_start_tx_ring(qdev
, &qdev
->tx_ring
[i
]);
3788 netif_err(qdev
, ifup
, qdev
->ndev
,
3789 "Failed to start tx ring[%d].\n", i
);
3794 /* Initialize the port and set the max framesize. */
3795 status
= qdev
->nic_ops
->port_initialize(qdev
);
3797 netif_err(qdev
, ifup
, qdev
->ndev
, "Failed to start port.\n");
3799 /* Set up the MAC address and frame routing filter. */
3800 status
= ql_cam_route_initialize(qdev
);
3802 netif_err(qdev
, ifup
, qdev
->ndev
,
3803 "Failed to init CAM/Routing tables.\n");
3807 /* Start NAPI for the RSS queues. */
3808 for (i
= 0; i
< qdev
->rss_ring_count
; i
++) {
3809 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
3810 "Enabling NAPI for rx_ring[%d].\n", i
);
3811 napi_enable(&qdev
->rx_ring
[i
].napi
);
3817 /* Issue soft reset to chip. */
3818 static int ql_adapter_reset(struct ql_adapter
*qdev
)
3822 unsigned long end_jiffies
;
3824 /* Clear all the entries in the routing table. */
3825 status
= ql_clear_routing_entries(qdev
);
3827 netif_err(qdev
, ifup
, qdev
->ndev
, "Failed to clear routing bits.\n");
3831 end_jiffies
= jiffies
+
3832 max((unsigned long)1, usecs_to_jiffies(30));
3834 /* Check if bit is set then skip the mailbox command and
3835 * clear the bit, else we are in normal reset process.
3837 if (!test_bit(QL_ASIC_RECOVERY
, &qdev
->flags
)) {
3838 /* Stop management traffic. */
3839 ql_mb_set_mgmnt_traffic_ctl(qdev
, MB_SET_MPI_TFK_STOP
);
3841 /* Wait for the NIC and MGMNT FIFOs to empty. */
3842 ql_wait_fifo_empty(qdev
);
3844 clear_bit(QL_ASIC_RECOVERY
, &qdev
->flags
);
3846 ql_write32(qdev
, RST_FO
, (RST_FO_FR
<< 16) | RST_FO_FR
);
3849 value
= ql_read32(qdev
, RST_FO
);
3850 if ((value
& RST_FO_FR
) == 0)
3853 } while (time_before(jiffies
, end_jiffies
));
3855 if (value
& RST_FO_FR
) {
3856 netif_err(qdev
, ifdown
, qdev
->ndev
,
3857 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3858 status
= -ETIMEDOUT
;
3861 /* Resume management traffic. */
3862 ql_mb_set_mgmnt_traffic_ctl(qdev
, MB_SET_MPI_TFK_RESUME
);
3866 static void ql_display_dev_info(struct net_device
*ndev
)
3868 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3870 netif_info(qdev
, probe
, qdev
->ndev
,
3871 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3872 "XG Roll = %d, XG Rev = %d.\n",
3875 qdev
->chip_rev_id
& 0x0000000f,
3876 qdev
->chip_rev_id
>> 4 & 0x0000000f,
3877 qdev
->chip_rev_id
>> 8 & 0x0000000f,
3878 qdev
->chip_rev_id
>> 12 & 0x0000000f);
3879 netif_info(qdev
, probe
, qdev
->ndev
,
3880 "MAC address %pM\n", ndev
->dev_addr
);
3883 static int ql_wol(struct ql_adapter
*qdev
)
3886 u32 wol
= MB_WOL_DISABLE
;
3888 /* The CAM is still intact after a reset, but if we
3889 * are doing WOL, then we may need to program the
3890 * routing regs. We would also need to issue the mailbox
3891 * commands to instruct the MPI what to do per the ethtool
3895 if (qdev
->wol
& (WAKE_ARP
| WAKE_MAGICSECURE
| WAKE_PHY
| WAKE_UCAST
|
3896 WAKE_MCAST
| WAKE_BCAST
)) {
3897 netif_err(qdev
, ifdown
, qdev
->ndev
,
3898 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3903 if (qdev
->wol
& WAKE_MAGIC
) {
3904 status
= ql_mb_wol_set_magic(qdev
, 1);
3906 netif_err(qdev
, ifdown
, qdev
->ndev
,
3907 "Failed to set magic packet on %s.\n",
3911 netif_info(qdev
, drv
, qdev
->ndev
,
3912 "Enabled magic packet successfully on %s.\n",
3915 wol
|= MB_WOL_MAGIC_PKT
;
3919 wol
|= MB_WOL_MODE_ON
;
3920 status
= ql_mb_wol_mode(qdev
, wol
);
3921 netif_err(qdev
, drv
, qdev
->ndev
,
3922 "WOL %s (wol code 0x%x) on %s\n",
3923 (status
== 0) ? "Successfully set" : "Failed",
3924 wol
, qdev
->ndev
->name
);
3930 static void ql_cancel_all_work_sync(struct ql_adapter
*qdev
)
3933 /* Don't kill the reset worker thread if we
3934 * are in the process of recovery.
3936 if (test_bit(QL_ADAPTER_UP
, &qdev
->flags
))
3937 cancel_delayed_work_sync(&qdev
->asic_reset_work
);
3938 cancel_delayed_work_sync(&qdev
->mpi_reset_work
);
3939 cancel_delayed_work_sync(&qdev
->mpi_work
);
3940 cancel_delayed_work_sync(&qdev
->mpi_idc_work
);
3941 cancel_delayed_work_sync(&qdev
->mpi_core_to_log
);
3942 cancel_delayed_work_sync(&qdev
->mpi_port_cfg_work
);
3945 static int ql_adapter_down(struct ql_adapter
*qdev
)
3951 ql_cancel_all_work_sync(qdev
);
3953 for (i
= 0; i
< qdev
->rss_ring_count
; i
++)
3954 napi_disable(&qdev
->rx_ring
[i
].napi
);
3956 clear_bit(QL_ADAPTER_UP
, &qdev
->flags
);
3958 ql_disable_interrupts(qdev
);
3960 ql_tx_ring_clean(qdev
);
3962 /* Call netif_napi_del() from common point.
3964 for (i
= 0; i
< qdev
->rss_ring_count
; i
++)
3965 netif_napi_del(&qdev
->rx_ring
[i
].napi
);
3967 status
= ql_adapter_reset(qdev
);
3969 netif_err(qdev
, ifdown
, qdev
->ndev
, "reset(func #%d) FAILED!\n",
3971 ql_free_rx_buffers(qdev
);
3976 static int ql_adapter_up(struct ql_adapter
*qdev
)
3980 err
= ql_adapter_initialize(qdev
);
3982 netif_info(qdev
, ifup
, qdev
->ndev
, "Unable to initialize adapter.\n");
3985 set_bit(QL_ADAPTER_UP
, &qdev
->flags
);
3986 ql_alloc_rx_buffers(qdev
);
3987 /* If the port is initialized and the
3988 * link is up the turn on the carrier.
3990 if ((ql_read32(qdev
, STS
) & qdev
->port_init
) &&
3991 (ql_read32(qdev
, STS
) & qdev
->port_link_up
))
3993 /* Restore rx mode. */
3994 clear_bit(QL_ALLMULTI
, &qdev
->flags
);
3995 clear_bit(QL_PROMISCUOUS
, &qdev
->flags
);
3996 qlge_set_multicast_list(qdev
->ndev
);
3998 /* Restore vlan setting. */
3999 qlge_restore_vlan(qdev
);
4001 ql_enable_interrupts(qdev
);
4002 ql_enable_all_completion_interrupts(qdev
);
4003 netif_tx_start_all_queues(qdev
->ndev
);
4007 ql_adapter_reset(qdev
);
4011 static void ql_release_adapter_resources(struct ql_adapter
*qdev
)
4013 ql_free_mem_resources(qdev
);
4017 static int ql_get_adapter_resources(struct ql_adapter
*qdev
)
4021 if (ql_alloc_mem_resources(qdev
)) {
4022 netif_err(qdev
, ifup
, qdev
->ndev
, "Unable to allocate memory.\n");
4025 status
= ql_request_irq(qdev
);
4029 static int qlge_close(struct net_device
*ndev
)
4031 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4033 /* If we hit pci_channel_io_perm_failure
4034 * failure condition, then we already
4035 * brought the adapter down.
4037 if (test_bit(QL_EEH_FATAL
, &qdev
->flags
)) {
4038 netif_err(qdev
, drv
, qdev
->ndev
, "EEH fatal did unload.\n");
4039 clear_bit(QL_EEH_FATAL
, &qdev
->flags
);
4044 * Wait for device to recover from a reset.
4045 * (Rarely happens, but possible.)
4047 while (!test_bit(QL_ADAPTER_UP
, &qdev
->flags
))
4049 ql_adapter_down(qdev
);
4050 ql_release_adapter_resources(qdev
);
4054 static int ql_configure_rings(struct ql_adapter
*qdev
)
4057 struct rx_ring
*rx_ring
;
4058 struct tx_ring
*tx_ring
;
4059 int cpu_cnt
= min(MAX_CPUS
, (int)num_online_cpus());
4060 unsigned int lbq_buf_len
= (qdev
->ndev
->mtu
> 1500) ?
4061 LARGE_BUFFER_MAX_SIZE
: LARGE_BUFFER_MIN_SIZE
;
4063 qdev
->lbq_buf_order
= get_order(lbq_buf_len
);
4065 /* In a perfect world we have one RSS ring for each CPU
4066 * and each has it's own vector. To do that we ask for
4067 * cpu_cnt vectors. ql_enable_msix() will adjust the
4068 * vector count to what we actually get. We then
4069 * allocate an RSS ring for each.
4070 * Essentially, we are doing min(cpu_count, msix_vector_count).
4072 qdev
->intr_count
= cpu_cnt
;
4073 ql_enable_msix(qdev
);
4074 /* Adjust the RSS ring count to the actual vector count. */
4075 qdev
->rss_ring_count
= qdev
->intr_count
;
4076 qdev
->tx_ring_count
= cpu_cnt
;
4077 qdev
->rx_ring_count
= qdev
->tx_ring_count
+ qdev
->rss_ring_count
;
4079 for (i
= 0; i
< qdev
->tx_ring_count
; i
++) {
4080 tx_ring
= &qdev
->tx_ring
[i
];
4081 memset((void *)tx_ring
, 0, sizeof(*tx_ring
));
4082 tx_ring
->qdev
= qdev
;
4084 tx_ring
->wq_len
= qdev
->tx_ring_size
;
4086 tx_ring
->wq_len
* sizeof(struct ob_mac_iocb_req
);
4089 * The completion queue ID for the tx rings start
4090 * immediately after the rss rings.
4092 tx_ring
->cq_id
= qdev
->rss_ring_count
+ i
;
4095 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
4096 rx_ring
= &qdev
->rx_ring
[i
];
4097 memset((void *)rx_ring
, 0, sizeof(*rx_ring
));
4098 rx_ring
->qdev
= qdev
;
4100 rx_ring
->cpu
= i
% cpu_cnt
; /* CPU to run handler on. */
4101 if (i
< qdev
->rss_ring_count
) {
4103 * Inbound (RSS) queues.
4105 rx_ring
->cq_len
= qdev
->rx_ring_size
;
4107 rx_ring
->cq_len
* sizeof(struct ql_net_rsp_iocb
);
4108 rx_ring
->lbq_len
= NUM_LARGE_BUFFERS
;
4110 rx_ring
->lbq_len
* sizeof(__le64
);
4111 rx_ring
->lbq_buf_size
= (u16
)lbq_buf_len
;
4112 netif_printk(qdev
, ifup
, KERN_DEBUG
, qdev
->ndev
,
4113 "lbq_buf_size %d, order = %d\n",
4114 rx_ring
->lbq_buf_size
,
4115 qdev
->lbq_buf_order
);
4116 rx_ring
->sbq_len
= NUM_SMALL_BUFFERS
;
4118 rx_ring
->sbq_len
* sizeof(__le64
);
4119 rx_ring
->sbq_buf_size
= SMALL_BUF_MAP_SIZE
;
4120 rx_ring
->type
= RX_Q
;
4123 * Outbound queue handles outbound completions only.
4125 /* outbound cq is same size as tx_ring it services. */
4126 rx_ring
->cq_len
= qdev
->tx_ring_size
;
4128 rx_ring
->cq_len
* sizeof(struct ql_net_rsp_iocb
);
4129 rx_ring
->lbq_len
= 0;
4130 rx_ring
->lbq_size
= 0;
4131 rx_ring
->lbq_buf_size
= 0;
4132 rx_ring
->sbq_len
= 0;
4133 rx_ring
->sbq_size
= 0;
4134 rx_ring
->sbq_buf_size
= 0;
4135 rx_ring
->type
= TX_Q
;
4141 static int qlge_open(struct net_device
*ndev
)
4144 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4146 err
= ql_adapter_reset(qdev
);
4150 err
= ql_configure_rings(qdev
);
4154 err
= ql_get_adapter_resources(qdev
);
4158 err
= ql_adapter_up(qdev
);
4165 ql_release_adapter_resources(qdev
);
4169 static int ql_change_rx_buffers(struct ql_adapter
*qdev
)
4171 struct rx_ring
*rx_ring
;
4175 /* Wait for an outstanding reset to complete. */
4176 if (!test_bit(QL_ADAPTER_UP
, &qdev
->flags
)) {
4178 while (i
-- && !test_bit(QL_ADAPTER_UP
, &qdev
->flags
)) {
4179 netif_err(qdev
, ifup
, qdev
->ndev
,
4180 "Waiting for adapter UP...\n");
4185 netif_err(qdev
, ifup
, qdev
->ndev
,
4186 "Timed out waiting for adapter UP\n");
4191 status
= ql_adapter_down(qdev
);
4195 /* Get the new rx buffer size. */
4196 lbq_buf_len
= (qdev
->ndev
->mtu
> 1500) ?
4197 LARGE_BUFFER_MAX_SIZE
: LARGE_BUFFER_MIN_SIZE
;
4198 qdev
->lbq_buf_order
= get_order(lbq_buf_len
);
4200 for (i
= 0; i
< qdev
->rss_ring_count
; i
++) {
4201 rx_ring
= &qdev
->rx_ring
[i
];
4202 /* Set the new size. */
4203 rx_ring
->lbq_buf_size
= lbq_buf_len
;
4206 status
= ql_adapter_up(qdev
);
4212 netif_alert(qdev
, ifup
, qdev
->ndev
,
4213 "Driver up/down cycle failed, closing device.\n");
4214 set_bit(QL_ADAPTER_UP
, &qdev
->flags
);
4215 dev_close(qdev
->ndev
);
4219 static int qlge_change_mtu(struct net_device
*ndev
, int new_mtu
)
4221 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4224 if (ndev
->mtu
== 1500 && new_mtu
== 9000) {
4225 netif_err(qdev
, ifup
, qdev
->ndev
, "Changing to jumbo MTU.\n");
4226 } else if (ndev
->mtu
== 9000 && new_mtu
== 1500) {
4227 netif_err(qdev
, ifup
, qdev
->ndev
, "Changing to normal MTU.\n");
4231 queue_delayed_work(qdev
->workqueue
,
4232 &qdev
->mpi_port_cfg_work
, 3*HZ
);
4234 ndev
->mtu
= new_mtu
;
4236 if (!netif_running(qdev
->ndev
)) {
4240 status
= ql_change_rx_buffers(qdev
);
4242 netif_err(qdev
, ifup
, qdev
->ndev
,
4243 "Changing MTU failed.\n");
4249 static struct net_device_stats
*qlge_get_stats(struct net_device
4252 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4253 struct rx_ring
*rx_ring
= &qdev
->rx_ring
[0];
4254 struct tx_ring
*tx_ring
= &qdev
->tx_ring
[0];
4255 unsigned long pkts
, mcast
, dropped
, errors
, bytes
;
4259 pkts
= mcast
= dropped
= errors
= bytes
= 0;
4260 for (i
= 0; i
< qdev
->rss_ring_count
; i
++, rx_ring
++) {
4261 pkts
+= rx_ring
->rx_packets
;
4262 bytes
+= rx_ring
->rx_bytes
;
4263 dropped
+= rx_ring
->rx_dropped
;
4264 errors
+= rx_ring
->rx_errors
;
4265 mcast
+= rx_ring
->rx_multicast
;
4267 ndev
->stats
.rx_packets
= pkts
;
4268 ndev
->stats
.rx_bytes
= bytes
;
4269 ndev
->stats
.rx_dropped
= dropped
;
4270 ndev
->stats
.rx_errors
= errors
;
4271 ndev
->stats
.multicast
= mcast
;
4274 pkts
= errors
= bytes
= 0;
4275 for (i
= 0; i
< qdev
->tx_ring_count
; i
++, tx_ring
++) {
4276 pkts
+= tx_ring
->tx_packets
;
4277 bytes
+= tx_ring
->tx_bytes
;
4278 errors
+= tx_ring
->tx_errors
;
4280 ndev
->stats
.tx_packets
= pkts
;
4281 ndev
->stats
.tx_bytes
= bytes
;
4282 ndev
->stats
.tx_errors
= errors
;
4283 return &ndev
->stats
;
4286 static void qlge_set_multicast_list(struct net_device
*ndev
)
4288 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4289 struct netdev_hw_addr
*ha
;
4292 status
= ql_sem_spinlock(qdev
, SEM_RT_IDX_MASK
);
4296 * Set or clear promiscuous mode if a
4297 * transition is taking place.
4299 if (ndev
->flags
& IFF_PROMISC
) {
4300 if (!test_bit(QL_PROMISCUOUS
, &qdev
->flags
)) {
4301 if (ql_set_routing_reg
4302 (qdev
, RT_IDX_PROMISCUOUS_SLOT
, RT_IDX_VALID
, 1)) {
4303 netif_err(qdev
, hw
, qdev
->ndev
,
4304 "Failed to set promiscuous mode.\n");
4306 set_bit(QL_PROMISCUOUS
, &qdev
->flags
);
4310 if (test_bit(QL_PROMISCUOUS
, &qdev
->flags
)) {
4311 if (ql_set_routing_reg
4312 (qdev
, RT_IDX_PROMISCUOUS_SLOT
, RT_IDX_VALID
, 0)) {
4313 netif_err(qdev
, hw
, qdev
->ndev
,
4314 "Failed to clear promiscuous mode.\n");
4316 clear_bit(QL_PROMISCUOUS
, &qdev
->flags
);
4322 * Set or clear all multicast mode if a
4323 * transition is taking place.
4325 if ((ndev
->flags
& IFF_ALLMULTI
) ||
4326 (netdev_mc_count(ndev
) > MAX_MULTICAST_ENTRIES
)) {
4327 if (!test_bit(QL_ALLMULTI
, &qdev
->flags
)) {
4328 if (ql_set_routing_reg
4329 (qdev
, RT_IDX_ALLMULTI_SLOT
, RT_IDX_MCAST
, 1)) {
4330 netif_err(qdev
, hw
, qdev
->ndev
,
4331 "Failed to set all-multi mode.\n");
4333 set_bit(QL_ALLMULTI
, &qdev
->flags
);
4337 if (test_bit(QL_ALLMULTI
, &qdev
->flags
)) {
4338 if (ql_set_routing_reg
4339 (qdev
, RT_IDX_ALLMULTI_SLOT
, RT_IDX_MCAST
, 0)) {
4340 netif_err(qdev
, hw
, qdev
->ndev
,
4341 "Failed to clear all-multi mode.\n");
4343 clear_bit(QL_ALLMULTI
, &qdev
->flags
);
4348 if (!netdev_mc_empty(ndev
)) {
4349 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
4353 netdev_for_each_mc_addr(ha
, ndev
) {
4354 if (ql_set_mac_addr_reg(qdev
, (u8
*) ha
->addr
,
4355 MAC_ADDR_TYPE_MULTI_MAC
, i
)) {
4356 netif_err(qdev
, hw
, qdev
->ndev
,
4357 "Failed to loadmulticast address.\n");
4358 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
4363 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
4364 if (ql_set_routing_reg
4365 (qdev
, RT_IDX_MCAST_MATCH_SLOT
, RT_IDX_MCAST_MATCH
, 1)) {
4366 netif_err(qdev
, hw
, qdev
->ndev
,
4367 "Failed to set multicast match mode.\n");
4369 set_bit(QL_ALLMULTI
, &qdev
->flags
);
4373 ql_sem_unlock(qdev
, SEM_RT_IDX_MASK
);
4376 static int qlge_set_mac_address(struct net_device
*ndev
, void *p
)
4378 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4379 struct sockaddr
*addr
= p
;
4382 if (!is_valid_ether_addr(addr
->sa_data
))
4383 return -EADDRNOTAVAIL
;
4384 memcpy(ndev
->dev_addr
, addr
->sa_data
, ndev
->addr_len
);
4385 /* Update local copy of current mac address. */
4386 memcpy(qdev
->current_mac_addr
, ndev
->dev_addr
, ndev
->addr_len
);
4388 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
4391 status
= ql_set_mac_addr_reg(qdev
, (u8
*) ndev
->dev_addr
,
4392 MAC_ADDR_TYPE_CAM_MAC
, qdev
->func
* MAX_CQ
);
4394 netif_err(qdev
, hw
, qdev
->ndev
, "Failed to load MAC address.\n");
4395 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
4399 static void qlge_tx_timeout(struct net_device
*ndev
)
4401 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4402 ql_queue_asic_error(qdev
);
4405 static void ql_asic_reset_work(struct work_struct
*work
)
4407 struct ql_adapter
*qdev
=
4408 container_of(work
, struct ql_adapter
, asic_reset_work
.work
);
4411 status
= ql_adapter_down(qdev
);
4415 status
= ql_adapter_up(qdev
);
4419 /* Restore rx mode. */
4420 clear_bit(QL_ALLMULTI
, &qdev
->flags
);
4421 clear_bit(QL_PROMISCUOUS
, &qdev
->flags
);
4422 qlge_set_multicast_list(qdev
->ndev
);
4427 netif_alert(qdev
, ifup
, qdev
->ndev
,
4428 "Driver up/down cycle failed, closing device\n");
4430 set_bit(QL_ADAPTER_UP
, &qdev
->flags
);
4431 dev_close(qdev
->ndev
);
4435 static const struct nic_operations qla8012_nic_ops
= {
4436 .get_flash
= ql_get_8012_flash_params
,
4437 .port_initialize
= ql_8012_port_initialize
,
4440 static const struct nic_operations qla8000_nic_ops
= {
4441 .get_flash
= ql_get_8000_flash_params
,
4442 .port_initialize
= ql_8000_port_initialize
,
4445 /* Find the pcie function number for the other NIC
4446 * on this chip. Since both NIC functions share a
4447 * common firmware we have the lowest enabled function
4448 * do any common work. Examples would be resetting
4449 * after a fatal firmware error, or doing a firmware
4452 static int ql_get_alt_pcie_func(struct ql_adapter
*qdev
)
4456 u32 nic_func1
, nic_func2
;
4458 status
= ql_read_mpi_reg(qdev
, MPI_TEST_FUNC_PORT_CFG
,
4463 nic_func1
= ((temp
>> MPI_TEST_NIC1_FUNC_SHIFT
) &
4464 MPI_TEST_NIC_FUNC_MASK
);
4465 nic_func2
= ((temp
>> MPI_TEST_NIC2_FUNC_SHIFT
) &
4466 MPI_TEST_NIC_FUNC_MASK
);
4468 if (qdev
->func
== nic_func1
)
4469 qdev
->alt_func
= nic_func2
;
4470 else if (qdev
->func
== nic_func2
)
4471 qdev
->alt_func
= nic_func1
;
4478 static int ql_get_board_info(struct ql_adapter
*qdev
)
4482 (ql_read32(qdev
, STS
) & STS_FUNC_ID_MASK
) >> STS_FUNC_ID_SHIFT
;
4486 status
= ql_get_alt_pcie_func(qdev
);
4490 qdev
->port
= (qdev
->func
< qdev
->alt_func
) ? 0 : 1;
4492 qdev
->xg_sem_mask
= SEM_XGMAC1_MASK
;
4493 qdev
->port_link_up
= STS_PL1
;
4494 qdev
->port_init
= STS_PI1
;
4495 qdev
->mailbox_in
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC2_MBI
;
4496 qdev
->mailbox_out
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC2_MBO
;
4498 qdev
->xg_sem_mask
= SEM_XGMAC0_MASK
;
4499 qdev
->port_link_up
= STS_PL0
;
4500 qdev
->port_init
= STS_PI0
;
4501 qdev
->mailbox_in
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC0_MBI
;
4502 qdev
->mailbox_out
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC0_MBO
;
4504 qdev
->chip_rev_id
= ql_read32(qdev
, REV_ID
);
4505 qdev
->device_id
= qdev
->pdev
->device
;
4506 if (qdev
->device_id
== QLGE_DEVICE_ID_8012
)
4507 qdev
->nic_ops
= &qla8012_nic_ops
;
4508 else if (qdev
->device_id
== QLGE_DEVICE_ID_8000
)
4509 qdev
->nic_ops
= &qla8000_nic_ops
;
4513 static void ql_release_all(struct pci_dev
*pdev
)
4515 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4516 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4518 if (qdev
->workqueue
) {
4519 destroy_workqueue(qdev
->workqueue
);
4520 qdev
->workqueue
= NULL
;
4524 iounmap(qdev
->reg_base
);
4525 if (qdev
->doorbell_area
)
4526 iounmap(qdev
->doorbell_area
);
4527 vfree(qdev
->mpi_coredump
);
4528 pci_release_regions(pdev
);
4529 pci_set_drvdata(pdev
, NULL
);
4532 static int __devinit
ql_init_device(struct pci_dev
*pdev
,
4533 struct net_device
*ndev
, int cards_found
)
4535 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4538 memset((void *)qdev
, 0, sizeof(*qdev
));
4539 err
= pci_enable_device(pdev
);
4541 dev_err(&pdev
->dev
, "PCI device enable failed.\n");
4547 pci_set_drvdata(pdev
, ndev
);
4549 /* Set PCIe read request size */
4550 err
= pcie_set_readrq(pdev
, 4096);
4552 dev_err(&pdev
->dev
, "Set readrq failed.\n");
4556 err
= pci_request_regions(pdev
, DRV_NAME
);
4558 dev_err(&pdev
->dev
, "PCI region request failed.\n");
4562 pci_set_master(pdev
);
4563 if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(64))) {
4564 set_bit(QL_DMA64
, &qdev
->flags
);
4565 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
4567 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
4569 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
4573 dev_err(&pdev
->dev
, "No usable DMA configuration.\n");
4577 /* Set PCIe reset type for EEH to fundamental. */
4578 pdev
->needs_freset
= 1;
4579 pci_save_state(pdev
);
4581 ioremap_nocache(pci_resource_start(pdev
, 1),
4582 pci_resource_len(pdev
, 1));
4583 if (!qdev
->reg_base
) {
4584 dev_err(&pdev
->dev
, "Register mapping failed.\n");
4589 qdev
->doorbell_area_size
= pci_resource_len(pdev
, 3);
4590 qdev
->doorbell_area
=
4591 ioremap_nocache(pci_resource_start(pdev
, 3),
4592 pci_resource_len(pdev
, 3));
4593 if (!qdev
->doorbell_area
) {
4594 dev_err(&pdev
->dev
, "Doorbell register mapping failed.\n");
4599 err
= ql_get_board_info(qdev
);
4601 dev_err(&pdev
->dev
, "Register access failed.\n");
4605 qdev
->msg_enable
= netif_msg_init(debug
, default_msg
);
4606 spin_lock_init(&qdev
->hw_lock
);
4607 spin_lock_init(&qdev
->stats_lock
);
4609 if (qlge_mpi_coredump
) {
4610 qdev
->mpi_coredump
=
4611 vmalloc(sizeof(struct ql_mpi_coredump
));
4612 if (qdev
->mpi_coredump
== NULL
) {
4613 dev_err(&pdev
->dev
, "Coredump alloc failed.\n");
4617 if (qlge_force_coredump
)
4618 set_bit(QL_FRC_COREDUMP
, &qdev
->flags
);
4620 /* make sure the EEPROM is good */
4621 err
= qdev
->nic_ops
->get_flash(qdev
);
4623 dev_err(&pdev
->dev
, "Invalid FLASH.\n");
4627 memcpy(ndev
->perm_addr
, ndev
->dev_addr
, ndev
->addr_len
);
4628 /* Keep local copy of current mac address. */
4629 memcpy(qdev
->current_mac_addr
, ndev
->dev_addr
, ndev
->addr_len
);
4631 /* Set up the default ring sizes. */
4632 qdev
->tx_ring_size
= NUM_TX_RING_ENTRIES
;
4633 qdev
->rx_ring_size
= NUM_RX_RING_ENTRIES
;
4635 /* Set up the coalescing parameters. */
4636 qdev
->rx_coalesce_usecs
= DFLT_COALESCE_WAIT
;
4637 qdev
->tx_coalesce_usecs
= DFLT_COALESCE_WAIT
;
4638 qdev
->rx_max_coalesced_frames
= DFLT_INTER_FRAME_WAIT
;
4639 qdev
->tx_max_coalesced_frames
= DFLT_INTER_FRAME_WAIT
;
4642 * Set up the operating parameters.
4644 qdev
->workqueue
= create_singlethread_workqueue(ndev
->name
);
4645 INIT_DELAYED_WORK(&qdev
->asic_reset_work
, ql_asic_reset_work
);
4646 INIT_DELAYED_WORK(&qdev
->mpi_reset_work
, ql_mpi_reset_work
);
4647 INIT_DELAYED_WORK(&qdev
->mpi_work
, ql_mpi_work
);
4648 INIT_DELAYED_WORK(&qdev
->mpi_port_cfg_work
, ql_mpi_port_cfg_work
);
4649 INIT_DELAYED_WORK(&qdev
->mpi_idc_work
, ql_mpi_idc_work
);
4650 INIT_DELAYED_WORK(&qdev
->mpi_core_to_log
, ql_mpi_core_to_log
);
4651 init_completion(&qdev
->ide_completion
);
4652 mutex_init(&qdev
->mpi_mutex
);
4655 dev_info(&pdev
->dev
, "%s\n", DRV_STRING
);
4656 dev_info(&pdev
->dev
, "Driver name: %s, Version: %s.\n",
4657 DRV_NAME
, DRV_VERSION
);
4661 ql_release_all(pdev
);
4663 pci_disable_device(pdev
);
4667 static const struct net_device_ops qlge_netdev_ops
= {
4668 .ndo_open
= qlge_open
,
4669 .ndo_stop
= qlge_close
,
4670 .ndo_start_xmit
= qlge_send
,
4671 .ndo_change_mtu
= qlge_change_mtu
,
4672 .ndo_get_stats
= qlge_get_stats
,
4673 .ndo_set_rx_mode
= qlge_set_multicast_list
,
4674 .ndo_set_mac_address
= qlge_set_mac_address
,
4675 .ndo_validate_addr
= eth_validate_addr
,
4676 .ndo_tx_timeout
= qlge_tx_timeout
,
4677 .ndo_fix_features
= qlge_fix_features
,
4678 .ndo_set_features
= qlge_set_features
,
4679 .ndo_vlan_rx_add_vid
= qlge_vlan_rx_add_vid
,
4680 .ndo_vlan_rx_kill_vid
= qlge_vlan_rx_kill_vid
,
4683 static void ql_timer(unsigned long data
)
4685 struct ql_adapter
*qdev
= (struct ql_adapter
*)data
;
4688 var
= ql_read32(qdev
, STS
);
4689 if (pci_channel_offline(qdev
->pdev
)) {
4690 netif_err(qdev
, ifup
, qdev
->ndev
, "EEH STS = 0x%.08x.\n", var
);
4694 mod_timer(&qdev
->timer
, jiffies
+ (5*HZ
));
4697 static int __devinit
qlge_probe(struct pci_dev
*pdev
,
4698 const struct pci_device_id
*pci_entry
)
4700 struct net_device
*ndev
= NULL
;
4701 struct ql_adapter
*qdev
= NULL
;
4702 static int cards_found
= 0;
4705 ndev
= alloc_etherdev_mq(sizeof(struct ql_adapter
),
4706 min(MAX_CPUS
, (int)num_online_cpus()));
4710 err
= ql_init_device(pdev
, ndev
, cards_found
);
4716 qdev
= netdev_priv(ndev
);
4717 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
4718 ndev
->hw_features
= NETIF_F_SG
| NETIF_F_IP_CSUM
|
4719 NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_TSO_ECN
|
4720 NETIF_F_HW_VLAN_TX
| NETIF_F_RXCSUM
;
4721 ndev
->features
= ndev
->hw_features
|
4722 NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_FILTER
;
4724 if (test_bit(QL_DMA64
, &qdev
->flags
))
4725 ndev
->features
|= NETIF_F_HIGHDMA
;
4728 * Set up net_device structure.
4730 ndev
->tx_queue_len
= qdev
->tx_ring_size
;
4731 ndev
->irq
= pdev
->irq
;
4733 ndev
->netdev_ops
= &qlge_netdev_ops
;
4734 SET_ETHTOOL_OPS(ndev
, &qlge_ethtool_ops
);
4735 ndev
->watchdog_timeo
= 10 * HZ
;
4737 err
= register_netdev(ndev
);
4739 dev_err(&pdev
->dev
, "net device registration failed.\n");
4740 ql_release_all(pdev
);
4741 pci_disable_device(pdev
);
4744 /* Start up the timer to trigger EEH if
4747 init_timer_deferrable(&qdev
->timer
);
4748 qdev
->timer
.data
= (unsigned long)qdev
;
4749 qdev
->timer
.function
= ql_timer
;
4750 qdev
->timer
.expires
= jiffies
+ (5*HZ
);
4751 add_timer(&qdev
->timer
);
4753 ql_display_dev_info(ndev
);
4754 atomic_set(&qdev
->lb_count
, 0);
4759 netdev_tx_t
ql_lb_send(struct sk_buff
*skb
, struct net_device
*ndev
)
4761 return qlge_send(skb
, ndev
);
4764 int ql_clean_lb_rx_ring(struct rx_ring
*rx_ring
, int budget
)
4766 return ql_clean_inbound_rx_ring(rx_ring
, budget
);
4769 static void __devexit
qlge_remove(struct pci_dev
*pdev
)
4771 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4772 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4773 del_timer_sync(&qdev
->timer
);
4774 ql_cancel_all_work_sync(qdev
);
4775 unregister_netdev(ndev
);
4776 ql_release_all(pdev
);
4777 pci_disable_device(pdev
);
4781 /* Clean up resources without touching hardware. */
4782 static void ql_eeh_close(struct net_device
*ndev
)
4785 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4787 if (netif_carrier_ok(ndev
)) {
4788 netif_carrier_off(ndev
);
4789 netif_stop_queue(ndev
);
4792 /* Disabling the timer */
4793 del_timer_sync(&qdev
->timer
);
4794 ql_cancel_all_work_sync(qdev
);
4796 for (i
= 0; i
< qdev
->rss_ring_count
; i
++)
4797 netif_napi_del(&qdev
->rx_ring
[i
].napi
);
4799 clear_bit(QL_ADAPTER_UP
, &qdev
->flags
);
4800 ql_tx_ring_clean(qdev
);
4801 ql_free_rx_buffers(qdev
);
4802 ql_release_adapter_resources(qdev
);
4806 * This callback is called by the PCI subsystem whenever
4807 * a PCI bus error is detected.
4809 static pci_ers_result_t
qlge_io_error_detected(struct pci_dev
*pdev
,
4810 enum pci_channel_state state
)
4812 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4813 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4816 case pci_channel_io_normal
:
4817 return PCI_ERS_RESULT_CAN_RECOVER
;
4818 case pci_channel_io_frozen
:
4819 netif_device_detach(ndev
);
4820 if (netif_running(ndev
))
4822 pci_disable_device(pdev
);
4823 return PCI_ERS_RESULT_NEED_RESET
;
4824 case pci_channel_io_perm_failure
:
4826 "%s: pci_channel_io_perm_failure.\n", __func__
);
4828 set_bit(QL_EEH_FATAL
, &qdev
->flags
);
4829 return PCI_ERS_RESULT_DISCONNECT
;
4832 /* Request a slot reset. */
4833 return PCI_ERS_RESULT_NEED_RESET
;
4837 * This callback is called after the PCI buss has been reset.
4838 * Basically, this tries to restart the card from scratch.
4839 * This is a shortened version of the device probe/discovery code,
4840 * it resembles the first-half of the () routine.
4842 static pci_ers_result_t
qlge_io_slot_reset(struct pci_dev
*pdev
)
4844 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4845 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4847 pdev
->error_state
= pci_channel_io_normal
;
4849 pci_restore_state(pdev
);
4850 if (pci_enable_device(pdev
)) {
4851 netif_err(qdev
, ifup
, qdev
->ndev
,
4852 "Cannot re-enable PCI device after reset.\n");
4853 return PCI_ERS_RESULT_DISCONNECT
;
4855 pci_set_master(pdev
);
4857 if (ql_adapter_reset(qdev
)) {
4858 netif_err(qdev
, drv
, qdev
->ndev
, "reset FAILED!\n");
4859 set_bit(QL_EEH_FATAL
, &qdev
->flags
);
4860 return PCI_ERS_RESULT_DISCONNECT
;
4863 return PCI_ERS_RESULT_RECOVERED
;
4866 static void qlge_io_resume(struct pci_dev
*pdev
)
4868 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4869 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4872 if (netif_running(ndev
)) {
4873 err
= qlge_open(ndev
);
4875 netif_err(qdev
, ifup
, qdev
->ndev
,
4876 "Device initialization failed after reset.\n");
4880 netif_err(qdev
, ifup
, qdev
->ndev
,
4881 "Device was not running prior to EEH.\n");
4883 mod_timer(&qdev
->timer
, jiffies
+ (5*HZ
));
4884 netif_device_attach(ndev
);
4887 static struct pci_error_handlers qlge_err_handler
= {
4888 .error_detected
= qlge_io_error_detected
,
4889 .slot_reset
= qlge_io_slot_reset
,
4890 .resume
= qlge_io_resume
,
4893 static int qlge_suspend(struct pci_dev
*pdev
, pm_message_t state
)
4895 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4896 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4899 netif_device_detach(ndev
);
4900 del_timer_sync(&qdev
->timer
);
4902 if (netif_running(ndev
)) {
4903 err
= ql_adapter_down(qdev
);
4909 err
= pci_save_state(pdev
);
4913 pci_disable_device(pdev
);
4915 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
4921 static int qlge_resume(struct pci_dev
*pdev
)
4923 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4924 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4927 pci_set_power_state(pdev
, PCI_D0
);
4928 pci_restore_state(pdev
);
4929 err
= pci_enable_device(pdev
);
4931 netif_err(qdev
, ifup
, qdev
->ndev
, "Cannot enable PCI device from suspend\n");
4934 pci_set_master(pdev
);
4936 pci_enable_wake(pdev
, PCI_D3hot
, 0);
4937 pci_enable_wake(pdev
, PCI_D3cold
, 0);
4939 if (netif_running(ndev
)) {
4940 err
= ql_adapter_up(qdev
);
4945 mod_timer(&qdev
->timer
, jiffies
+ (5*HZ
));
4946 netif_device_attach(ndev
);
4950 #endif /* CONFIG_PM */
4952 static void qlge_shutdown(struct pci_dev
*pdev
)
4954 qlge_suspend(pdev
, PMSG_SUSPEND
);
4957 static struct pci_driver qlge_driver
= {
4959 .id_table
= qlge_pci_tbl
,
4960 .probe
= qlge_probe
,
4961 .remove
= __devexit_p(qlge_remove
),
4963 .suspend
= qlge_suspend
,
4964 .resume
= qlge_resume
,
4966 .shutdown
= qlge_shutdown
,
4967 .err_handler
= &qlge_err_handler
4970 static int __init
qlge_init_module(void)
4972 return pci_register_driver(&qlge_driver
);
4975 static void __exit
qlge_exit(void)
4977 pci_unregister_driver(&qlge_driver
);
4980 module_init(qlge_init_module
);
4981 module_exit(qlge_exit
);