2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
27 #include <linux/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/skbuff.h>
37 #include <linux/if_vlan.h>
38 #include <linux/delay.h>
40 #include <linux/vmalloc.h>
41 #include <net/ip6_checksum.h>
45 char qlge_driver_name
[] = DRV_NAME
;
46 const char qlge_driver_version
[] = DRV_VERSION
;
48 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49 MODULE_DESCRIPTION(DRV_STRING
" ");
50 MODULE_LICENSE("GPL");
51 MODULE_VERSION(DRV_VERSION
);
53 static const u32 default_msg
=
54 NETIF_MSG_DRV
| NETIF_MSG_PROBE
| NETIF_MSG_LINK
|
55 /* NETIF_MSG_TIMER | */
60 /* NETIF_MSG_TX_QUEUED | */
61 /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
62 /* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW
| NETIF_MSG_WOL
| 0;
65 static int debug
= 0x00007fff; /* defaults above */
66 module_param(debug
, int, 0);
67 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
72 static int irq_type
= MSIX_IRQ
;
73 module_param(irq_type
, int, MSIX_IRQ
);
74 MODULE_PARM_DESC(irq_type
, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
76 static struct pci_device_id qlge_pci_tbl
[] __devinitdata
= {
77 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC
, QLGE_DEVICE_ID_8012
)},
78 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC
, QLGE_DEVICE_ID_8000
)},
79 /* required last entry */
83 MODULE_DEVICE_TABLE(pci
, qlge_pci_tbl
);
85 /* This hardware semaphore causes exclusive access to
86 * resources shared between the NIC driver, MPI firmware,
87 * FCOE firmware and the FC driver.
89 static int ql_sem_trylock(struct ql_adapter
*qdev
, u32 sem_mask
)
95 sem_bits
= SEM_SET
<< SEM_XGMAC0_SHIFT
;
98 sem_bits
= SEM_SET
<< SEM_XGMAC1_SHIFT
;
101 sem_bits
= SEM_SET
<< SEM_ICB_SHIFT
;
103 case SEM_MAC_ADDR_MASK
:
104 sem_bits
= SEM_SET
<< SEM_MAC_ADDR_SHIFT
;
107 sem_bits
= SEM_SET
<< SEM_FLASH_SHIFT
;
110 sem_bits
= SEM_SET
<< SEM_PROBE_SHIFT
;
112 case SEM_RT_IDX_MASK
:
113 sem_bits
= SEM_SET
<< SEM_RT_IDX_SHIFT
;
115 case SEM_PROC_REG_MASK
:
116 sem_bits
= SEM_SET
<< SEM_PROC_REG_SHIFT
;
119 QPRINTK(qdev
, PROBE
, ALERT
, "Bad Semaphore mask!.\n");
123 ql_write32(qdev
, SEM
, sem_bits
| sem_mask
);
124 return !(ql_read32(qdev
, SEM
) & sem_bits
);
127 int ql_sem_spinlock(struct ql_adapter
*qdev
, u32 sem_mask
)
129 unsigned int wait_count
= 30;
131 if (!ql_sem_trylock(qdev
, sem_mask
))
134 } while (--wait_count
);
138 void ql_sem_unlock(struct ql_adapter
*qdev
, u32 sem_mask
)
140 ql_write32(qdev
, SEM
, sem_mask
);
141 ql_read32(qdev
, SEM
); /* flush */
144 /* This function waits for a specific bit to come ready
145 * in a given register. It is used mostly by the initialize
146 * process, but is also used in kernel thread API such as
147 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
149 int ql_wait_reg_rdy(struct ql_adapter
*qdev
, u32 reg
, u32 bit
, u32 err_bit
)
152 int count
= UDELAY_COUNT
;
155 temp
= ql_read32(qdev
, reg
);
157 /* check for errors */
158 if (temp
& err_bit
) {
159 QPRINTK(qdev
, PROBE
, ALERT
,
160 "register 0x%.08x access error, value = 0x%.08x!.\n",
163 } else if (temp
& bit
)
165 udelay(UDELAY_DELAY
);
168 QPRINTK(qdev
, PROBE
, ALERT
,
169 "Timed out waiting for reg %x to come ready.\n", reg
);
173 /* The CFG register is used to download TX and RX control blocks
174 * to the chip. This function waits for an operation to complete.
176 static int ql_wait_cfg(struct ql_adapter
*qdev
, u32 bit
)
178 int count
= UDELAY_COUNT
;
182 temp
= ql_read32(qdev
, CFG
);
187 udelay(UDELAY_DELAY
);
194 /* Used to issue init control blocks to hw. Maps control block,
195 * sets address, triggers download, waits for completion.
197 int ql_write_cfg(struct ql_adapter
*qdev
, void *ptr
, int size
, u32 bit
,
207 (bit
& (CFG_LRQ
| CFG_LR
| CFG_LCQ
)) ? PCI_DMA_TODEVICE
:
210 map
= pci_map_single(qdev
->pdev
, ptr
, size
, direction
);
211 if (pci_dma_mapping_error(qdev
->pdev
, map
)) {
212 QPRINTK(qdev
, IFUP
, ERR
, "Couldn't map DMA area.\n");
216 status
= ql_sem_spinlock(qdev
, SEM_ICB_MASK
);
220 status
= ql_wait_cfg(qdev
, bit
);
222 QPRINTK(qdev
, IFUP
, ERR
,
223 "Timed out waiting for CFG to come ready.\n");
227 ql_write32(qdev
, ICB_L
, (u32
) map
);
228 ql_write32(qdev
, ICB_H
, (u32
) (map
>> 32));
230 mask
= CFG_Q_MASK
| (bit
<< 16);
231 value
= bit
| (q_id
<< CFG_Q_SHIFT
);
232 ql_write32(qdev
, CFG
, (mask
| value
));
235 * Wait for the bit to clear after signaling hw.
237 status
= ql_wait_cfg(qdev
, bit
);
239 ql_sem_unlock(qdev
, SEM_ICB_MASK
); /* does flush too */
240 pci_unmap_single(qdev
->pdev
, map
, size
, direction
);
244 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
245 int ql_get_mac_addr_reg(struct ql_adapter
*qdev
, u32 type
, u16 index
,
252 case MAC_ADDR_TYPE_MULTI_MAC
:
253 case MAC_ADDR_TYPE_CAM_MAC
:
256 ql_wait_reg_rdy(qdev
,
257 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
260 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
261 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
262 MAC_ADDR_ADR
| MAC_ADDR_RS
| type
); /* type */
264 ql_wait_reg_rdy(qdev
,
265 MAC_ADDR_IDX
, MAC_ADDR_MR
, 0);
268 *value
++ = ql_read32(qdev
, MAC_ADDR_DATA
);
270 ql_wait_reg_rdy(qdev
,
271 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
274 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
275 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
276 MAC_ADDR_ADR
| MAC_ADDR_RS
| type
); /* type */
278 ql_wait_reg_rdy(qdev
,
279 MAC_ADDR_IDX
, MAC_ADDR_MR
, 0);
282 *value
++ = ql_read32(qdev
, MAC_ADDR_DATA
);
283 if (type
== MAC_ADDR_TYPE_CAM_MAC
) {
285 ql_wait_reg_rdy(qdev
,
286 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
289 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
290 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
291 MAC_ADDR_ADR
| MAC_ADDR_RS
| type
); /* type */
293 ql_wait_reg_rdy(qdev
, MAC_ADDR_IDX
,
297 *value
++ = ql_read32(qdev
, MAC_ADDR_DATA
);
301 case MAC_ADDR_TYPE_VLAN
:
302 case MAC_ADDR_TYPE_MULTI_FLTR
:
304 QPRINTK(qdev
, IFUP
, CRIT
,
305 "Address type %d not yet supported.\n", type
);
312 /* Set up a MAC, multicast or VLAN address for the
313 * inbound frame matching.
315 static int ql_set_mac_addr_reg(struct ql_adapter
*qdev
, u8
*addr
, u32 type
,
322 case MAC_ADDR_TYPE_MULTI_MAC
:
324 u32 upper
= (addr
[0] << 8) | addr
[1];
325 u32 lower
= (addr
[2] << 24) | (addr
[3] << 16) |
326 (addr
[4] << 8) | (addr
[5]);
329 ql_wait_reg_rdy(qdev
,
330 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
333 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) |
334 (index
<< MAC_ADDR_IDX_SHIFT
) |
336 ql_write32(qdev
, MAC_ADDR_DATA
, lower
);
338 ql_wait_reg_rdy(qdev
,
339 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
342 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) |
343 (index
<< MAC_ADDR_IDX_SHIFT
) |
346 ql_write32(qdev
, MAC_ADDR_DATA
, upper
);
348 ql_wait_reg_rdy(qdev
,
349 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
354 case MAC_ADDR_TYPE_CAM_MAC
:
357 u32 upper
= (addr
[0] << 8) | addr
[1];
359 (addr
[2] << 24) | (addr
[3] << 16) | (addr
[4] << 8) |
362 QPRINTK(qdev
, IFUP
, DEBUG
,
363 "Adding %s address %pM"
364 " at index %d in the CAM.\n",
366 MAC_ADDR_TYPE_MULTI_MAC
) ? "MULTICAST" :
367 "UNICAST"), addr
, index
);
370 ql_wait_reg_rdy(qdev
,
371 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
374 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
375 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
377 ql_write32(qdev
, MAC_ADDR_DATA
, lower
);
379 ql_wait_reg_rdy(qdev
,
380 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
383 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
384 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
386 ql_write32(qdev
, MAC_ADDR_DATA
, upper
);
388 ql_wait_reg_rdy(qdev
,
389 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
392 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
) | /* offset */
393 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
395 /* This field should also include the queue id
396 and possibly the function id. Right now we hardcode
397 the route field to NIC core.
399 cam_output
= (CAM_OUT_ROUTE_NIC
|
401 func
<< CAM_OUT_FUNC_SHIFT
) |
402 (0 << CAM_OUT_CQ_ID_SHIFT
));
404 cam_output
|= CAM_OUT_RV
;
405 /* route to NIC core */
406 ql_write32(qdev
, MAC_ADDR_DATA
, cam_output
);
409 case MAC_ADDR_TYPE_VLAN
:
411 u32 enable_bit
= *((u32
*) &addr
[0]);
412 /* For VLAN, the addr actually holds a bit that
413 * either enables or disables the vlan id we are
414 * addressing. It's either MAC_ADDR_E on or off.
415 * That's bit-27 we're talking about.
417 QPRINTK(qdev
, IFUP
, INFO
, "%s VLAN ID %d %s the CAM.\n",
418 (enable_bit
? "Adding" : "Removing"),
419 index
, (enable_bit
? "to" : "from"));
422 ql_wait_reg_rdy(qdev
,
423 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
426 ql_write32(qdev
, MAC_ADDR_IDX
, offset
| /* offset */
427 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
429 enable_bit
); /* enable/disable */
432 case MAC_ADDR_TYPE_MULTI_FLTR
:
434 QPRINTK(qdev
, IFUP
, CRIT
,
435 "Address type %d not yet supported.\n", type
);
442 /* Set or clear MAC address in hardware. We sometimes
443 * have to clear it to prevent wrong frame routing
444 * especially in a bonding environment.
446 static int ql_set_mac_addr(struct ql_adapter
*qdev
, int set
)
449 char zero_mac_addr
[ETH_ALEN
];
453 addr
= &qdev
->ndev
->dev_addr
[0];
454 QPRINTK(qdev
, IFUP
, DEBUG
,
455 "Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n",
456 addr
[0], addr
[1], addr
[2], addr
[3],
459 memset(zero_mac_addr
, 0, ETH_ALEN
);
460 addr
= &zero_mac_addr
[0];
461 QPRINTK(qdev
, IFUP
, DEBUG
,
462 "Clearing MAC address on %s\n",
465 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
468 status
= ql_set_mac_addr_reg(qdev
, (u8
*) addr
,
469 MAC_ADDR_TYPE_CAM_MAC
, qdev
->func
* MAX_CQ
);
470 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
472 QPRINTK(qdev
, IFUP
, ERR
, "Failed to init mac "
477 void ql_link_on(struct ql_adapter
*qdev
)
479 QPRINTK(qdev
, LINK
, ERR
, "%s: Link is up.\n",
481 netif_carrier_on(qdev
->ndev
);
482 ql_set_mac_addr(qdev
, 1);
485 void ql_link_off(struct ql_adapter
*qdev
)
487 QPRINTK(qdev
, LINK
, ERR
, "%s: Link is down.\n",
489 netif_carrier_off(qdev
->ndev
);
490 ql_set_mac_addr(qdev
, 0);
493 /* Get a specific frame routing value from the CAM.
494 * Used for debug and reg dump.
496 int ql_get_routing_reg(struct ql_adapter
*qdev
, u32 index
, u32
*value
)
500 status
= ql_wait_reg_rdy(qdev
, RT_IDX
, RT_IDX_MW
, 0);
504 ql_write32(qdev
, RT_IDX
,
505 RT_IDX_TYPE_NICQ
| RT_IDX_RS
| (index
<< RT_IDX_IDX_SHIFT
));
506 status
= ql_wait_reg_rdy(qdev
, RT_IDX
, RT_IDX_MR
, 0);
509 *value
= ql_read32(qdev
, RT_DATA
);
514 /* The NIC function for this chip has 16 routing indexes. Each one can be used
515 * to route different frame types to various inbound queues. We send broadcast/
516 * multicast/error frames to the default queue for slow handling,
517 * and CAM hit/RSS frames to the fast handling queues.
519 static int ql_set_routing_reg(struct ql_adapter
*qdev
, u32 index
, u32 mask
,
522 int status
= -EINVAL
; /* Return error if no mask match. */
525 QPRINTK(qdev
, IFUP
, DEBUG
,
526 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
527 (enable
? "Adding" : "Removing"),
528 ((index
== RT_IDX_ALL_ERR_SLOT
) ? "MAC ERROR/ALL ERROR" : ""),
529 ((index
== RT_IDX_IP_CSUM_ERR_SLOT
) ? "IP CSUM ERROR" : ""),
531 RT_IDX_TCP_UDP_CSUM_ERR_SLOT
) ? "TCP/UDP CSUM ERROR" : ""),
532 ((index
== RT_IDX_BCAST_SLOT
) ? "BROADCAST" : ""),
533 ((index
== RT_IDX_MCAST_MATCH_SLOT
) ? "MULTICAST MATCH" : ""),
534 ((index
== RT_IDX_ALLMULTI_SLOT
) ? "ALL MULTICAST MATCH" : ""),
535 ((index
== RT_IDX_UNUSED6_SLOT
) ? "UNUSED6" : ""),
536 ((index
== RT_IDX_UNUSED7_SLOT
) ? "UNUSED7" : ""),
537 ((index
== RT_IDX_RSS_MATCH_SLOT
) ? "RSS ALL/IPV4 MATCH" : ""),
538 ((index
== RT_IDX_RSS_IPV6_SLOT
) ? "RSS IPV6" : ""),
539 ((index
== RT_IDX_RSS_TCP4_SLOT
) ? "RSS TCP4" : ""),
540 ((index
== RT_IDX_RSS_TCP6_SLOT
) ? "RSS TCP6" : ""),
541 ((index
== RT_IDX_CAM_HIT_SLOT
) ? "CAM HIT" : ""),
542 ((index
== RT_IDX_UNUSED013
) ? "UNUSED13" : ""),
543 ((index
== RT_IDX_UNUSED014
) ? "UNUSED14" : ""),
544 ((index
== RT_IDX_PROMISCUOUS_SLOT
) ? "PROMISCUOUS" : ""),
545 (enable
? "to" : "from"));
550 value
= RT_IDX_DST_CAM_Q
| /* dest */
551 RT_IDX_TYPE_NICQ
| /* type */
552 (RT_IDX_CAM_HIT_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
555 case RT_IDX_VALID
: /* Promiscuous Mode frames. */
557 value
= RT_IDX_DST_DFLT_Q
| /* dest */
558 RT_IDX_TYPE_NICQ
| /* type */
559 (RT_IDX_PROMISCUOUS_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
562 case RT_IDX_ERR
: /* Pass up MAC,IP,TCP/UDP error frames. */
564 value
= RT_IDX_DST_DFLT_Q
| /* dest */
565 RT_IDX_TYPE_NICQ
| /* type */
566 (RT_IDX_ALL_ERR_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
569 case RT_IDX_BCAST
: /* Pass up Broadcast frames to default Q. */
571 value
= RT_IDX_DST_DFLT_Q
| /* dest */
572 RT_IDX_TYPE_NICQ
| /* type */
573 (RT_IDX_BCAST_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
576 case RT_IDX_MCAST
: /* Pass up All Multicast frames. */
578 value
= RT_IDX_DST_DFLT_Q
| /* dest */
579 RT_IDX_TYPE_NICQ
| /* type */
580 (RT_IDX_ALLMULTI_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
583 case RT_IDX_MCAST_MATCH
: /* Pass up matched Multicast frames. */
585 value
= RT_IDX_DST_DFLT_Q
| /* dest */
586 RT_IDX_TYPE_NICQ
| /* type */
587 (RT_IDX_MCAST_MATCH_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
590 case RT_IDX_RSS_MATCH
: /* Pass up matched RSS frames. */
592 value
= RT_IDX_DST_RSS
| /* dest */
593 RT_IDX_TYPE_NICQ
| /* type */
594 (RT_IDX_RSS_MATCH_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
597 case 0: /* Clear the E-bit on an entry. */
599 value
= RT_IDX_DST_DFLT_Q
| /* dest */
600 RT_IDX_TYPE_NICQ
| /* type */
601 (index
<< RT_IDX_IDX_SHIFT
);/* index */
605 QPRINTK(qdev
, IFUP
, ERR
, "Mask type %d not yet supported.\n",
612 status
= ql_wait_reg_rdy(qdev
, RT_IDX
, RT_IDX_MW
, 0);
615 value
|= (enable
? RT_IDX_E
: 0);
616 ql_write32(qdev
, RT_IDX
, value
);
617 ql_write32(qdev
, RT_DATA
, enable
? mask
: 0);
623 static void ql_enable_interrupts(struct ql_adapter
*qdev
)
625 ql_write32(qdev
, INTR_EN
, (INTR_EN_EI
<< 16) | INTR_EN_EI
);
628 static void ql_disable_interrupts(struct ql_adapter
*qdev
)
630 ql_write32(qdev
, INTR_EN
, (INTR_EN_EI
<< 16));
633 /* If we're running with multiple MSI-X vectors then we enable on the fly.
634 * Otherwise, we may have multiple outstanding workers and don't want to
635 * enable until the last one finishes. In this case, the irq_cnt gets
636 * incremented everytime we queue a worker and decremented everytime
637 * a worker finishes. Once it hits zero we enable the interrupt.
639 u32
ql_enable_completion_interrupt(struct ql_adapter
*qdev
, u32 intr
)
642 unsigned long hw_flags
= 0;
643 struct intr_context
*ctx
= qdev
->intr_context
+ intr
;
645 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
) && intr
)) {
646 /* Always enable if we're MSIX multi interrupts and
647 * it's not the default (zeroeth) interrupt.
649 ql_write32(qdev
, INTR_EN
,
651 var
= ql_read32(qdev
, STS
);
655 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
656 if (atomic_dec_and_test(&ctx
->irq_cnt
)) {
657 ql_write32(qdev
, INTR_EN
,
659 var
= ql_read32(qdev
, STS
);
661 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
665 static u32
ql_disable_completion_interrupt(struct ql_adapter
*qdev
, u32 intr
)
668 struct intr_context
*ctx
;
670 /* HW disables for us if we're MSIX multi interrupts and
671 * it's not the default (zeroeth) interrupt.
673 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
) && intr
))
676 ctx
= qdev
->intr_context
+ intr
;
677 spin_lock(&qdev
->hw_lock
);
678 if (!atomic_read(&ctx
->irq_cnt
)) {
679 ql_write32(qdev
, INTR_EN
,
681 var
= ql_read32(qdev
, STS
);
683 atomic_inc(&ctx
->irq_cnt
);
684 spin_unlock(&qdev
->hw_lock
);
688 static void ql_enable_all_completion_interrupts(struct ql_adapter
*qdev
)
691 for (i
= 0; i
< qdev
->intr_count
; i
++) {
692 /* The enable call does a atomic_dec_and_test
693 * and enables only if the result is zero.
694 * So we precharge it here.
696 if (unlikely(!test_bit(QL_MSIX_ENABLED
, &qdev
->flags
) ||
698 atomic_set(&qdev
->intr_context
[i
].irq_cnt
, 1);
699 ql_enable_completion_interrupt(qdev
, i
);
704 static int ql_validate_flash(struct ql_adapter
*qdev
, u32 size
, const char *str
)
708 __le16
*flash
= (__le16
*)&qdev
->flash
;
710 status
= strncmp((char *)&qdev
->flash
, str
, 4);
712 QPRINTK(qdev
, IFUP
, ERR
, "Invalid flash signature.\n");
716 for (i
= 0; i
< size
; i
++)
717 csum
+= le16_to_cpu(*flash
++);
720 QPRINTK(qdev
, IFUP
, ERR
,
721 "Invalid flash checksum, csum = 0x%.04x.\n", csum
);
726 static int ql_read_flash_word(struct ql_adapter
*qdev
, int offset
, __le32
*data
)
729 /* wait for reg to come ready */
730 status
= ql_wait_reg_rdy(qdev
,
731 FLASH_ADDR
, FLASH_ADDR_RDY
, FLASH_ADDR_ERR
);
734 /* set up for reg read */
735 ql_write32(qdev
, FLASH_ADDR
, FLASH_ADDR_R
| offset
);
736 /* wait for reg to come ready */
737 status
= ql_wait_reg_rdy(qdev
,
738 FLASH_ADDR
, FLASH_ADDR_RDY
, FLASH_ADDR_ERR
);
741 /* This data is stored on flash as an array of
742 * __le32. Since ql_read32() returns cpu endian
743 * we need to swap it back.
745 *data
= cpu_to_le32(ql_read32(qdev
, FLASH_DATA
));
750 static int ql_get_8000_flash_params(struct ql_adapter
*qdev
)
754 __le32
*p
= (__le32
*)&qdev
->flash
;
758 /* Get flash offset for function and adjust
762 offset
= FUNC0_FLASH_OFFSET
/ sizeof(u32
);
764 offset
= FUNC1_FLASH_OFFSET
/ sizeof(u32
);
766 if (ql_sem_spinlock(qdev
, SEM_FLASH_MASK
))
769 size
= sizeof(struct flash_params_8000
) / sizeof(u32
);
770 for (i
= 0; i
< size
; i
++, p
++) {
771 status
= ql_read_flash_word(qdev
, i
+offset
, p
);
773 QPRINTK(qdev
, IFUP
, ERR
, "Error reading flash.\n");
778 status
= ql_validate_flash(qdev
,
779 sizeof(struct flash_params_8000
) / sizeof(u16
),
782 QPRINTK(qdev
, IFUP
, ERR
, "Invalid flash.\n");
787 /* Extract either manufacturer or BOFM modified
790 if (qdev
->flash
.flash_params_8000
.data_type1
== 2)
792 qdev
->flash
.flash_params_8000
.mac_addr1
,
793 qdev
->ndev
->addr_len
);
796 qdev
->flash
.flash_params_8000
.mac_addr
,
797 qdev
->ndev
->addr_len
);
799 if (!is_valid_ether_addr(mac_addr
)) {
800 QPRINTK(qdev
, IFUP
, ERR
, "Invalid MAC address.\n");
805 memcpy(qdev
->ndev
->dev_addr
,
807 qdev
->ndev
->addr_len
);
810 ql_sem_unlock(qdev
, SEM_FLASH_MASK
);
814 static int ql_get_8012_flash_params(struct ql_adapter
*qdev
)
818 __le32
*p
= (__le32
*)&qdev
->flash
;
820 u32 size
= sizeof(struct flash_params_8012
) / sizeof(u32
);
822 /* Second function's parameters follow the first
828 if (ql_sem_spinlock(qdev
, SEM_FLASH_MASK
))
831 for (i
= 0; i
< size
; i
++, p
++) {
832 status
= ql_read_flash_word(qdev
, i
+offset
, p
);
834 QPRINTK(qdev
, IFUP
, ERR
, "Error reading flash.\n");
840 status
= ql_validate_flash(qdev
,
841 sizeof(struct flash_params_8012
) / sizeof(u16
),
844 QPRINTK(qdev
, IFUP
, ERR
, "Invalid flash.\n");
849 if (!is_valid_ether_addr(qdev
->flash
.flash_params_8012
.mac_addr
)) {
854 memcpy(qdev
->ndev
->dev_addr
,
855 qdev
->flash
.flash_params_8012
.mac_addr
,
856 qdev
->ndev
->addr_len
);
859 ql_sem_unlock(qdev
, SEM_FLASH_MASK
);
863 /* xgmac register are located behind the xgmac_addr and xgmac_data
864 * register pair. Each read/write requires us to wait for the ready
865 * bit before reading/writing the data.
867 static int ql_write_xgmac_reg(struct ql_adapter
*qdev
, u32 reg
, u32 data
)
870 /* wait for reg to come ready */
871 status
= ql_wait_reg_rdy(qdev
,
872 XGMAC_ADDR
, XGMAC_ADDR_RDY
, XGMAC_ADDR_XME
);
875 /* write the data to the data reg */
876 ql_write32(qdev
, XGMAC_DATA
, data
);
877 /* trigger the write */
878 ql_write32(qdev
, XGMAC_ADDR
, reg
);
882 /* xgmac register are located behind the xgmac_addr and xgmac_data
883 * register pair. Each read/write requires us to wait for the ready
884 * bit before reading/writing the data.
886 int ql_read_xgmac_reg(struct ql_adapter
*qdev
, u32 reg
, u32
*data
)
889 /* wait for reg to come ready */
890 status
= ql_wait_reg_rdy(qdev
,
891 XGMAC_ADDR
, XGMAC_ADDR_RDY
, XGMAC_ADDR_XME
);
894 /* set up for reg read */
895 ql_write32(qdev
, XGMAC_ADDR
, reg
| XGMAC_ADDR_R
);
896 /* wait for reg to come ready */
897 status
= ql_wait_reg_rdy(qdev
,
898 XGMAC_ADDR
, XGMAC_ADDR_RDY
, XGMAC_ADDR_XME
);
902 *data
= ql_read32(qdev
, XGMAC_DATA
);
907 /* This is used for reading the 64-bit statistics regs. */
908 int ql_read_xgmac_reg64(struct ql_adapter
*qdev
, u32 reg
, u64
*data
)
914 status
= ql_read_xgmac_reg(qdev
, reg
, &lo
);
918 status
= ql_read_xgmac_reg(qdev
, reg
+ 4, &hi
);
922 *data
= (u64
) lo
| ((u64
) hi
<< 32);
928 static int ql_8000_port_initialize(struct ql_adapter
*qdev
)
932 * Get MPI firmware version for driver banner
935 status
= ql_mb_about_fw(qdev
);
938 status
= ql_mb_get_fw_state(qdev
);
941 /* Wake up a worker to get/set the TX/RX frame sizes. */
942 queue_delayed_work(qdev
->workqueue
, &qdev
->mpi_port_cfg_work
, 0);
947 /* Take the MAC Core out of reset.
948 * Enable statistics counting.
949 * Take the transmitter/receiver out of reset.
950 * This functionality may be done in the MPI firmware at a
953 static int ql_8012_port_initialize(struct ql_adapter
*qdev
)
958 if (ql_sem_trylock(qdev
, qdev
->xg_sem_mask
)) {
959 /* Another function has the semaphore, so
960 * wait for the port init bit to come ready.
962 QPRINTK(qdev
, LINK
, INFO
,
963 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
964 status
= ql_wait_reg_rdy(qdev
, STS
, qdev
->port_init
, 0);
966 QPRINTK(qdev
, LINK
, CRIT
,
967 "Port initialize timed out.\n");
972 QPRINTK(qdev
, LINK
, INFO
, "Got xgmac semaphore!.\n");
973 /* Set the core reset. */
974 status
= ql_read_xgmac_reg(qdev
, GLOBAL_CFG
, &data
);
977 data
|= GLOBAL_CFG_RESET
;
978 status
= ql_write_xgmac_reg(qdev
, GLOBAL_CFG
, data
);
982 /* Clear the core reset and turn on jumbo for receiver. */
983 data
&= ~GLOBAL_CFG_RESET
; /* Clear core reset. */
984 data
|= GLOBAL_CFG_JUMBO
; /* Turn on jumbo. */
985 data
|= GLOBAL_CFG_TX_STAT_EN
;
986 data
|= GLOBAL_CFG_RX_STAT_EN
;
987 status
= ql_write_xgmac_reg(qdev
, GLOBAL_CFG
, data
);
991 /* Enable transmitter, and clear it's reset. */
992 status
= ql_read_xgmac_reg(qdev
, TX_CFG
, &data
);
995 data
&= ~TX_CFG_RESET
; /* Clear the TX MAC reset. */
996 data
|= TX_CFG_EN
; /* Enable the transmitter. */
997 status
= ql_write_xgmac_reg(qdev
, TX_CFG
, data
);
1001 /* Enable receiver and clear it's reset. */
1002 status
= ql_read_xgmac_reg(qdev
, RX_CFG
, &data
);
1005 data
&= ~RX_CFG_RESET
; /* Clear the RX MAC reset. */
1006 data
|= RX_CFG_EN
; /* Enable the receiver. */
1007 status
= ql_write_xgmac_reg(qdev
, RX_CFG
, data
);
1011 /* Turn on jumbo. */
1013 ql_write_xgmac_reg(qdev
, MAC_TX_PARAMS
, MAC_TX_PARAMS_JUMBO
| (0x2580 << 16));
1017 ql_write_xgmac_reg(qdev
, MAC_RX_PARAMS
, 0x2580);
1021 /* Signal to the world that the port is enabled. */
1022 ql_write32(qdev
, STS
, ((qdev
->port_init
<< 16) | qdev
->port_init
));
1024 ql_sem_unlock(qdev
, qdev
->xg_sem_mask
);
1028 /* Get the next large buffer. */
1029 static struct bq_desc
*ql_get_curr_lbuf(struct rx_ring
*rx_ring
)
1031 struct bq_desc
*lbq_desc
= &rx_ring
->lbq
[rx_ring
->lbq_curr_idx
];
1032 rx_ring
->lbq_curr_idx
++;
1033 if (rx_ring
->lbq_curr_idx
== rx_ring
->lbq_len
)
1034 rx_ring
->lbq_curr_idx
= 0;
1035 rx_ring
->lbq_free_cnt
++;
1039 /* Get the next small buffer. */
1040 static struct bq_desc
*ql_get_curr_sbuf(struct rx_ring
*rx_ring
)
1042 struct bq_desc
*sbq_desc
= &rx_ring
->sbq
[rx_ring
->sbq_curr_idx
];
1043 rx_ring
->sbq_curr_idx
++;
1044 if (rx_ring
->sbq_curr_idx
== rx_ring
->sbq_len
)
1045 rx_ring
->sbq_curr_idx
= 0;
1046 rx_ring
->sbq_free_cnt
++;
1050 /* Update an rx ring index. */
1051 static void ql_update_cq(struct rx_ring
*rx_ring
)
1053 rx_ring
->cnsmr_idx
++;
1054 rx_ring
->curr_entry
++;
1055 if (unlikely(rx_ring
->cnsmr_idx
== rx_ring
->cq_len
)) {
1056 rx_ring
->cnsmr_idx
= 0;
1057 rx_ring
->curr_entry
= rx_ring
->cq_base
;
1061 static void ql_write_cq_idx(struct rx_ring
*rx_ring
)
1063 ql_write_db_reg(rx_ring
->cnsmr_idx
, rx_ring
->cnsmr_idx_db_reg
);
1066 /* Process (refill) a large buffer queue. */
1067 static void ql_update_lbq(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
1069 u32 clean_idx
= rx_ring
->lbq_clean_idx
;
1070 u32 start_idx
= clean_idx
;
1071 struct bq_desc
*lbq_desc
;
1075 while (rx_ring
->lbq_free_cnt
> 16) {
1076 for (i
= 0; i
< 16; i
++) {
1077 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1078 "lbq: try cleaning clean_idx = %d.\n",
1080 lbq_desc
= &rx_ring
->lbq
[clean_idx
];
1081 if (lbq_desc
->p
.lbq_page
== NULL
) {
1082 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1083 "lbq: getting new page for index %d.\n",
1085 lbq_desc
->p
.lbq_page
= alloc_page(GFP_ATOMIC
);
1086 if (lbq_desc
->p
.lbq_page
== NULL
) {
1087 rx_ring
->lbq_clean_idx
= clean_idx
;
1088 QPRINTK(qdev
, RX_STATUS
, ERR
,
1089 "Couldn't get a page.\n");
1092 map
= pci_map_page(qdev
->pdev
,
1093 lbq_desc
->p
.lbq_page
,
1095 PCI_DMA_FROMDEVICE
);
1096 if (pci_dma_mapping_error(qdev
->pdev
, map
)) {
1097 rx_ring
->lbq_clean_idx
= clean_idx
;
1098 put_page(lbq_desc
->p
.lbq_page
);
1099 lbq_desc
->p
.lbq_page
= NULL
;
1100 QPRINTK(qdev
, RX_STATUS
, ERR
,
1101 "PCI mapping failed.\n");
1104 pci_unmap_addr_set(lbq_desc
, mapaddr
, map
);
1105 pci_unmap_len_set(lbq_desc
, maplen
, PAGE_SIZE
);
1106 *lbq_desc
->addr
= cpu_to_le64(map
);
1109 if (clean_idx
== rx_ring
->lbq_len
)
1113 rx_ring
->lbq_clean_idx
= clean_idx
;
1114 rx_ring
->lbq_prod_idx
+= 16;
1115 if (rx_ring
->lbq_prod_idx
== rx_ring
->lbq_len
)
1116 rx_ring
->lbq_prod_idx
= 0;
1117 rx_ring
->lbq_free_cnt
-= 16;
1120 if (start_idx
!= clean_idx
) {
1121 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1122 "lbq: updating prod idx = %d.\n",
1123 rx_ring
->lbq_prod_idx
);
1124 ql_write_db_reg(rx_ring
->lbq_prod_idx
,
1125 rx_ring
->lbq_prod_idx_db_reg
);
1129 /* Process (refill) a small buffer queue. */
1130 static void ql_update_sbq(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
1132 u32 clean_idx
= rx_ring
->sbq_clean_idx
;
1133 u32 start_idx
= clean_idx
;
1134 struct bq_desc
*sbq_desc
;
1138 while (rx_ring
->sbq_free_cnt
> 16) {
1139 for (i
= 0; i
< 16; i
++) {
1140 sbq_desc
= &rx_ring
->sbq
[clean_idx
];
1141 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1142 "sbq: try cleaning clean_idx = %d.\n",
1144 if (sbq_desc
->p
.skb
== NULL
) {
1145 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1146 "sbq: getting new skb for index %d.\n",
1149 netdev_alloc_skb(qdev
->ndev
,
1150 rx_ring
->sbq_buf_size
);
1151 if (sbq_desc
->p
.skb
== NULL
) {
1152 QPRINTK(qdev
, PROBE
, ERR
,
1153 "Couldn't get an skb.\n");
1154 rx_ring
->sbq_clean_idx
= clean_idx
;
1157 skb_reserve(sbq_desc
->p
.skb
, QLGE_SB_PAD
);
1158 map
= pci_map_single(qdev
->pdev
,
1159 sbq_desc
->p
.skb
->data
,
1160 rx_ring
->sbq_buf_size
/
1161 2, PCI_DMA_FROMDEVICE
);
1162 if (pci_dma_mapping_error(qdev
->pdev
, map
)) {
1163 QPRINTK(qdev
, IFUP
, ERR
, "PCI mapping failed.\n");
1164 rx_ring
->sbq_clean_idx
= clean_idx
;
1165 dev_kfree_skb_any(sbq_desc
->p
.skb
);
1166 sbq_desc
->p
.skb
= NULL
;
1169 pci_unmap_addr_set(sbq_desc
, mapaddr
, map
);
1170 pci_unmap_len_set(sbq_desc
, maplen
,
1171 rx_ring
->sbq_buf_size
/ 2);
1172 *sbq_desc
->addr
= cpu_to_le64(map
);
1176 if (clean_idx
== rx_ring
->sbq_len
)
1179 rx_ring
->sbq_clean_idx
= clean_idx
;
1180 rx_ring
->sbq_prod_idx
+= 16;
1181 if (rx_ring
->sbq_prod_idx
== rx_ring
->sbq_len
)
1182 rx_ring
->sbq_prod_idx
= 0;
1183 rx_ring
->sbq_free_cnt
-= 16;
1186 if (start_idx
!= clean_idx
) {
1187 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1188 "sbq: updating prod idx = %d.\n",
1189 rx_ring
->sbq_prod_idx
);
1190 ql_write_db_reg(rx_ring
->sbq_prod_idx
,
1191 rx_ring
->sbq_prod_idx_db_reg
);
1195 static void ql_update_buffer_queues(struct ql_adapter
*qdev
,
1196 struct rx_ring
*rx_ring
)
1198 ql_update_sbq(qdev
, rx_ring
);
1199 ql_update_lbq(qdev
, rx_ring
);
1202 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1203 * fails at some stage, or from the interrupt when a tx completes.
1205 static void ql_unmap_send(struct ql_adapter
*qdev
,
1206 struct tx_ring_desc
*tx_ring_desc
, int mapped
)
1209 for (i
= 0; i
< mapped
; i
++) {
1210 if (i
== 0 || (i
== 7 && mapped
> 7)) {
1212 * Unmap the skb->data area, or the
1213 * external sglist (AKA the Outbound
1214 * Address List (OAL)).
1215 * If its the zeroeth element, then it's
1216 * the skb->data area. If it's the 7th
1217 * element and there is more than 6 frags,
1221 QPRINTK(qdev
, TX_DONE
, DEBUG
,
1222 "unmapping OAL area.\n");
1224 pci_unmap_single(qdev
->pdev
,
1225 pci_unmap_addr(&tx_ring_desc
->map
[i
],
1227 pci_unmap_len(&tx_ring_desc
->map
[i
],
1231 QPRINTK(qdev
, TX_DONE
, DEBUG
, "unmapping frag %d.\n",
1233 pci_unmap_page(qdev
->pdev
,
1234 pci_unmap_addr(&tx_ring_desc
->map
[i
],
1236 pci_unmap_len(&tx_ring_desc
->map
[i
],
1237 maplen
), PCI_DMA_TODEVICE
);
1243 /* Map the buffers for this transmit. This will return
1244 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1246 static int ql_map_send(struct ql_adapter
*qdev
,
1247 struct ob_mac_iocb_req
*mac_iocb_ptr
,
1248 struct sk_buff
*skb
, struct tx_ring_desc
*tx_ring_desc
)
1250 int len
= skb_headlen(skb
);
1252 int frag_idx
, err
, map_idx
= 0;
1253 struct tx_buf_desc
*tbd
= mac_iocb_ptr
->tbd
;
1254 int frag_cnt
= skb_shinfo(skb
)->nr_frags
;
1257 QPRINTK(qdev
, TX_QUEUED
, DEBUG
, "frag_cnt = %d.\n", frag_cnt
);
1260 * Map the skb buffer first.
1262 map
= pci_map_single(qdev
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
1264 err
= pci_dma_mapping_error(qdev
->pdev
, map
);
1266 QPRINTK(qdev
, TX_QUEUED
, ERR
,
1267 "PCI mapping failed with error: %d\n", err
);
1269 return NETDEV_TX_BUSY
;
1272 tbd
->len
= cpu_to_le32(len
);
1273 tbd
->addr
= cpu_to_le64(map
);
1274 pci_unmap_addr_set(&tx_ring_desc
->map
[map_idx
], mapaddr
, map
);
1275 pci_unmap_len_set(&tx_ring_desc
->map
[map_idx
], maplen
, len
);
1279 * This loop fills the remainder of the 8 address descriptors
1280 * in the IOCB. If there are more than 7 fragments, then the
1281 * eighth address desc will point to an external list (OAL).
1282 * When this happens, the remainder of the frags will be stored
1285 for (frag_idx
= 0; frag_idx
< frag_cnt
; frag_idx
++, map_idx
++) {
1286 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[frag_idx
];
1288 if (frag_idx
== 6 && frag_cnt
> 7) {
1289 /* Let's tack on an sglist.
1290 * Our control block will now
1292 * iocb->seg[0] = skb->data
1293 * iocb->seg[1] = frag[0]
1294 * iocb->seg[2] = frag[1]
1295 * iocb->seg[3] = frag[2]
1296 * iocb->seg[4] = frag[3]
1297 * iocb->seg[5] = frag[4]
1298 * iocb->seg[6] = frag[5]
1299 * iocb->seg[7] = ptr to OAL (external sglist)
1300 * oal->seg[0] = frag[6]
1301 * oal->seg[1] = frag[7]
1302 * oal->seg[2] = frag[8]
1303 * oal->seg[3] = frag[9]
1304 * oal->seg[4] = frag[10]
1307 /* Tack on the OAL in the eighth segment of IOCB. */
1308 map
= pci_map_single(qdev
->pdev
, &tx_ring_desc
->oal
,
1311 err
= pci_dma_mapping_error(qdev
->pdev
, map
);
1313 QPRINTK(qdev
, TX_QUEUED
, ERR
,
1314 "PCI mapping outbound address list with error: %d\n",
1319 tbd
->addr
= cpu_to_le64(map
);
1321 * The length is the number of fragments
1322 * that remain to be mapped times the length
1323 * of our sglist (OAL).
1326 cpu_to_le32((sizeof(struct tx_buf_desc
) *
1327 (frag_cnt
- frag_idx
)) | TX_DESC_C
);
1328 pci_unmap_addr_set(&tx_ring_desc
->map
[map_idx
], mapaddr
,
1330 pci_unmap_len_set(&tx_ring_desc
->map
[map_idx
], maplen
,
1331 sizeof(struct oal
));
1332 tbd
= (struct tx_buf_desc
*)&tx_ring_desc
->oal
;
1337 pci_map_page(qdev
->pdev
, frag
->page
,
1338 frag
->page_offset
, frag
->size
,
1341 err
= pci_dma_mapping_error(qdev
->pdev
, map
);
1343 QPRINTK(qdev
, TX_QUEUED
, ERR
,
1344 "PCI mapping frags failed with error: %d.\n",
1349 tbd
->addr
= cpu_to_le64(map
);
1350 tbd
->len
= cpu_to_le32(frag
->size
);
1351 pci_unmap_addr_set(&tx_ring_desc
->map
[map_idx
], mapaddr
, map
);
1352 pci_unmap_len_set(&tx_ring_desc
->map
[map_idx
], maplen
,
1356 /* Save the number of segments we've mapped. */
1357 tx_ring_desc
->map_cnt
= map_idx
;
1358 /* Terminate the last segment. */
1359 tbd
->len
= cpu_to_le32(le32_to_cpu(tbd
->len
) | TX_DESC_E
);
1360 return NETDEV_TX_OK
;
1364 * If the first frag mapping failed, then i will be zero.
1365 * This causes the unmap of the skb->data area. Otherwise
1366 * we pass in the number of frags that mapped successfully
1367 * so they can be umapped.
1369 ql_unmap_send(qdev
, tx_ring_desc
, map_idx
);
1370 return NETDEV_TX_BUSY
;
1373 static void ql_realign_skb(struct sk_buff
*skb
, int len
)
1375 void *temp_addr
= skb
->data
;
1377 /* Undo the skb_reserve(skb,32) we did before
1378 * giving to hardware, and realign data on
1379 * a 2-byte boundary.
1381 skb
->data
-= QLGE_SB_PAD
- NET_IP_ALIGN
;
1382 skb
->tail
-= QLGE_SB_PAD
- NET_IP_ALIGN
;
1383 skb_copy_to_linear_data(skb
, temp_addr
,
1388 * This function builds an skb for the given inbound
1389 * completion. It will be rewritten for readability in the near
1390 * future, but for not it works well.
1392 static struct sk_buff
*ql_build_rx_skb(struct ql_adapter
*qdev
,
1393 struct rx_ring
*rx_ring
,
1394 struct ib_mac_iocb_rsp
*ib_mac_rsp
)
1396 struct bq_desc
*lbq_desc
;
1397 struct bq_desc
*sbq_desc
;
1398 struct sk_buff
*skb
= NULL
;
1399 u32 length
= le32_to_cpu(ib_mac_rsp
->data_len
);
1400 u32 hdr_len
= le32_to_cpu(ib_mac_rsp
->hdr_len
);
1403 * Handle the header buffer if present.
1405 if (ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HV
&&
1406 ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
) {
1407 QPRINTK(qdev
, RX_STATUS
, DEBUG
, "Header of %d bytes in small buffer.\n", hdr_len
);
1409 * Headers fit nicely into a small buffer.
1411 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1412 pci_unmap_single(qdev
->pdev
,
1413 pci_unmap_addr(sbq_desc
, mapaddr
),
1414 pci_unmap_len(sbq_desc
, maplen
),
1415 PCI_DMA_FROMDEVICE
);
1416 skb
= sbq_desc
->p
.skb
;
1417 ql_realign_skb(skb
, hdr_len
);
1418 skb_put(skb
, hdr_len
);
1419 sbq_desc
->p
.skb
= NULL
;
1423 * Handle the data buffer(s).
1425 if (unlikely(!length
)) { /* Is there data too? */
1426 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1427 "No Data buffer in this packet.\n");
1431 if (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_DS
) {
1432 if (ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
) {
1433 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1434 "Headers in small, data of %d bytes in small, combine them.\n", length
);
1436 * Data is less than small buffer size so it's
1437 * stuffed in a small buffer.
1438 * For this case we append the data
1439 * from the "data" small buffer to the "header" small
1442 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1443 pci_dma_sync_single_for_cpu(qdev
->pdev
,
1445 (sbq_desc
, mapaddr
),
1448 PCI_DMA_FROMDEVICE
);
1449 memcpy(skb_put(skb
, length
),
1450 sbq_desc
->p
.skb
->data
, length
);
1451 pci_dma_sync_single_for_device(qdev
->pdev
,
1458 PCI_DMA_FROMDEVICE
);
1460 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1461 "%d bytes in a single small buffer.\n", length
);
1462 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1463 skb
= sbq_desc
->p
.skb
;
1464 ql_realign_skb(skb
, length
);
1465 skb_put(skb
, length
);
1466 pci_unmap_single(qdev
->pdev
,
1467 pci_unmap_addr(sbq_desc
,
1469 pci_unmap_len(sbq_desc
,
1471 PCI_DMA_FROMDEVICE
);
1472 sbq_desc
->p
.skb
= NULL
;
1474 } else if (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_DL
) {
1475 if (ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
) {
1476 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1477 "Header in small, %d bytes in large. Chain large to small!\n", length
);
1479 * The data is in a single large buffer. We
1480 * chain it to the header buffer's skb and let
1483 lbq_desc
= ql_get_curr_lbuf(rx_ring
);
1484 pci_unmap_page(qdev
->pdev
,
1485 pci_unmap_addr(lbq_desc
,
1487 pci_unmap_len(lbq_desc
, maplen
),
1488 PCI_DMA_FROMDEVICE
);
1489 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1490 "Chaining page to skb.\n");
1491 skb_fill_page_desc(skb
, 0, lbq_desc
->p
.lbq_page
,
1494 skb
->data_len
+= length
;
1495 skb
->truesize
+= length
;
1496 lbq_desc
->p
.lbq_page
= NULL
;
1499 * The headers and data are in a single large buffer. We
1500 * copy it to a new skb and let it go. This can happen with
1501 * jumbo mtu on a non-TCP/UDP frame.
1503 lbq_desc
= ql_get_curr_lbuf(rx_ring
);
1504 skb
= netdev_alloc_skb(qdev
->ndev
, length
);
1506 QPRINTK(qdev
, PROBE
, DEBUG
,
1507 "No skb available, drop the packet.\n");
1510 pci_unmap_page(qdev
->pdev
,
1511 pci_unmap_addr(lbq_desc
,
1513 pci_unmap_len(lbq_desc
, maplen
),
1514 PCI_DMA_FROMDEVICE
);
1515 skb_reserve(skb
, NET_IP_ALIGN
);
1516 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1517 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length
);
1518 skb_fill_page_desc(skb
, 0, lbq_desc
->p
.lbq_page
,
1521 skb
->data_len
+= length
;
1522 skb
->truesize
+= length
;
1524 lbq_desc
->p
.lbq_page
= NULL
;
1525 __pskb_pull_tail(skb
,
1526 (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) ?
1527 VLAN_ETH_HLEN
: ETH_HLEN
);
1531 * The data is in a chain of large buffers
1532 * pointed to by a small buffer. We loop
1533 * thru and chain them to the our small header
1535 * frags: There are 18 max frags and our small
1536 * buffer will hold 32 of them. The thing is,
1537 * we'll use 3 max for our 9000 byte jumbo
1538 * frames. If the MTU goes up we could
1539 * eventually be in trouble.
1541 int size
, offset
, i
= 0;
1542 __le64
*bq
, bq_array
[8];
1543 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1544 pci_unmap_single(qdev
->pdev
,
1545 pci_unmap_addr(sbq_desc
, mapaddr
),
1546 pci_unmap_len(sbq_desc
, maplen
),
1547 PCI_DMA_FROMDEVICE
);
1548 if (!(ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
)) {
1550 * This is an non TCP/UDP IP frame, so
1551 * the headers aren't split into a small
1552 * buffer. We have to use the small buffer
1553 * that contains our sg list as our skb to
1554 * send upstairs. Copy the sg list here to
1555 * a local buffer and use it to find the
1558 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1559 "%d bytes of headers & data in chain of large.\n", length
);
1560 skb
= sbq_desc
->p
.skb
;
1562 memcpy(bq
, skb
->data
, sizeof(bq_array
));
1563 sbq_desc
->p
.skb
= NULL
;
1564 skb_reserve(skb
, NET_IP_ALIGN
);
1566 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1567 "Headers in small, %d bytes of data in chain of large.\n", length
);
1568 bq
= (__le64
*)sbq_desc
->p
.skb
->data
;
1570 while (length
> 0) {
1571 lbq_desc
= ql_get_curr_lbuf(rx_ring
);
1572 pci_unmap_page(qdev
->pdev
,
1573 pci_unmap_addr(lbq_desc
,
1575 pci_unmap_len(lbq_desc
,
1577 PCI_DMA_FROMDEVICE
);
1578 size
= (length
< PAGE_SIZE
) ? length
: PAGE_SIZE
;
1581 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1582 "Adding page %d to skb for %d bytes.\n",
1584 skb_fill_page_desc(skb
, i
, lbq_desc
->p
.lbq_page
,
1587 skb
->data_len
+= size
;
1588 skb
->truesize
+= size
;
1590 lbq_desc
->p
.lbq_page
= NULL
;
1594 __pskb_pull_tail(skb
, (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) ?
1595 VLAN_ETH_HLEN
: ETH_HLEN
);
1600 /* Process an inbound completion from an rx ring. */
1601 static void ql_process_mac_rx_intr(struct ql_adapter
*qdev
,
1602 struct rx_ring
*rx_ring
,
1603 struct ib_mac_iocb_rsp
*ib_mac_rsp
)
1605 struct net_device
*ndev
= qdev
->ndev
;
1606 struct sk_buff
*skb
= NULL
;
1607 u16 vlan_id
= (le16_to_cpu(ib_mac_rsp
->vlan_id
) &
1608 IB_MAC_IOCB_RSP_VLAN_MASK
)
1610 QL_DUMP_IB_MAC_RSP(ib_mac_rsp
);
1612 skb
= ql_build_rx_skb(qdev
, rx_ring
, ib_mac_rsp
);
1613 if (unlikely(!skb
)) {
1614 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1615 "No skb available, drop packet.\n");
1619 /* Frame error, so drop the packet. */
1620 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_ERR_MASK
) {
1621 QPRINTK(qdev
, DRV
, ERR
, "Receive error, flags2 = 0x%x\n",
1622 ib_mac_rsp
->flags2
);
1623 dev_kfree_skb_any(skb
);
1627 /* The max framesize filter on this chip is set higher than
1628 * MTU since FCoE uses 2k frames.
1630 if (skb
->len
> ndev
->mtu
+ ETH_HLEN
) {
1631 dev_kfree_skb_any(skb
);
1635 prefetch(skb
->data
);
1637 if (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) {
1638 QPRINTK(qdev
, RX_STATUS
, DEBUG
, "%s%s%s Multicast.\n",
1639 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1640 IB_MAC_IOCB_RSP_M_HASH
? "Hash" : "",
1641 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1642 IB_MAC_IOCB_RSP_M_REG
? "Registered" : "",
1643 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1644 IB_MAC_IOCB_RSP_M_PROM
? "Promiscuous" : "");
1646 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_P
) {
1647 QPRINTK(qdev
, RX_STATUS
, DEBUG
, "Promiscuous Packet.\n");
1650 skb
->protocol
= eth_type_trans(skb
, ndev
);
1651 skb
->ip_summed
= CHECKSUM_NONE
;
1653 /* If rx checksum is on, and there are no
1654 * csum or frame errors.
1656 if (qdev
->rx_csum
&&
1657 !(ib_mac_rsp
->flags1
& IB_MAC_CSUM_ERR_MASK
)) {
1659 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_T
) {
1660 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1661 "TCP checksum done!\n");
1662 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1663 } else if ((ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_U
) &&
1664 (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_V4
)) {
1665 /* Unfragmented ipv4 UDP frame. */
1666 struct iphdr
*iph
= (struct iphdr
*) skb
->data
;
1667 if (!(iph
->frag_off
&
1668 cpu_to_be16(IP_MF
|IP_OFFSET
))) {
1669 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1670 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1671 "TCP checksum done!\n");
1676 qdev
->stats
.rx_packets
++;
1677 qdev
->stats
.rx_bytes
+= skb
->len
;
1678 skb_record_rx_queue(skb
, rx_ring
->cq_id
);
1679 if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
) {
1681 (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) &&
1683 vlan_gro_receive(&rx_ring
->napi
, qdev
->vlgrp
,
1686 napi_gro_receive(&rx_ring
->napi
, skb
);
1689 (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) &&
1691 vlan_hwaccel_receive_skb(skb
, qdev
->vlgrp
, vlan_id
);
1693 netif_receive_skb(skb
);
1697 /* Process an outbound completion from an rx ring. */
1698 static void ql_process_mac_tx_intr(struct ql_adapter
*qdev
,
1699 struct ob_mac_iocb_rsp
*mac_rsp
)
1701 struct tx_ring
*tx_ring
;
1702 struct tx_ring_desc
*tx_ring_desc
;
1704 QL_DUMP_OB_MAC_RSP(mac_rsp
);
1705 tx_ring
= &qdev
->tx_ring
[mac_rsp
->txq_idx
];
1706 tx_ring_desc
= &tx_ring
->q
[mac_rsp
->tid
];
1707 ql_unmap_send(qdev
, tx_ring_desc
, tx_ring_desc
->map_cnt
);
1708 qdev
->stats
.tx_bytes
+= (tx_ring_desc
->skb
)->len
;
1709 qdev
->stats
.tx_packets
++;
1710 dev_kfree_skb(tx_ring_desc
->skb
);
1711 tx_ring_desc
->skb
= NULL
;
1713 if (unlikely(mac_rsp
->flags1
& (OB_MAC_IOCB_RSP_E
|
1716 OB_MAC_IOCB_RSP_P
| OB_MAC_IOCB_RSP_B
))) {
1717 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_E
) {
1718 QPRINTK(qdev
, TX_DONE
, WARNING
,
1719 "Total descriptor length did not match transfer length.\n");
1721 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_S
) {
1722 QPRINTK(qdev
, TX_DONE
, WARNING
,
1723 "Frame too short to be legal, not sent.\n");
1725 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_L
) {
1726 QPRINTK(qdev
, TX_DONE
, WARNING
,
1727 "Frame too long, but sent anyway.\n");
1729 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_B
) {
1730 QPRINTK(qdev
, TX_DONE
, WARNING
,
1731 "PCI backplane error. Frame not sent.\n");
1734 atomic_inc(&tx_ring
->tx_count
);
1737 /* Fire up a handler to reset the MPI processor. */
1738 void ql_queue_fw_error(struct ql_adapter
*qdev
)
1741 queue_delayed_work(qdev
->workqueue
, &qdev
->mpi_reset_work
, 0);
1744 void ql_queue_asic_error(struct ql_adapter
*qdev
)
1747 ql_disable_interrupts(qdev
);
1748 /* Clear adapter up bit to signal the recovery
1749 * process that it shouldn't kill the reset worker
1752 clear_bit(QL_ADAPTER_UP
, &qdev
->flags
);
1753 queue_delayed_work(qdev
->workqueue
, &qdev
->asic_reset_work
, 0);
1756 static void ql_process_chip_ae_intr(struct ql_adapter
*qdev
,
1757 struct ib_ae_iocb_rsp
*ib_ae_rsp
)
1759 switch (ib_ae_rsp
->event
) {
1760 case MGMT_ERR_EVENT
:
1761 QPRINTK(qdev
, RX_ERR
, ERR
,
1762 "Management Processor Fatal Error.\n");
1763 ql_queue_fw_error(qdev
);
1766 case CAM_LOOKUP_ERR_EVENT
:
1767 QPRINTK(qdev
, LINK
, ERR
,
1768 "Multiple CAM hits lookup occurred.\n");
1769 QPRINTK(qdev
, DRV
, ERR
, "This event shouldn't occur.\n");
1770 ql_queue_asic_error(qdev
);
1773 case SOFT_ECC_ERROR_EVENT
:
1774 QPRINTK(qdev
, RX_ERR
, ERR
, "Soft ECC error detected.\n");
1775 ql_queue_asic_error(qdev
);
1778 case PCI_ERR_ANON_BUF_RD
:
1779 QPRINTK(qdev
, RX_ERR
, ERR
,
1780 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
1782 ql_queue_asic_error(qdev
);
1786 QPRINTK(qdev
, DRV
, ERR
, "Unexpected event %d.\n",
1788 ql_queue_asic_error(qdev
);
1793 static int ql_clean_outbound_rx_ring(struct rx_ring
*rx_ring
)
1795 struct ql_adapter
*qdev
= rx_ring
->qdev
;
1796 u32 prod
= ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
);
1797 struct ob_mac_iocb_rsp
*net_rsp
= NULL
;
1800 struct tx_ring
*tx_ring
;
1801 /* While there are entries in the completion queue. */
1802 while (prod
!= rx_ring
->cnsmr_idx
) {
1804 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1805 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring
->cq_id
,
1806 prod
, rx_ring
->cnsmr_idx
);
1808 net_rsp
= (struct ob_mac_iocb_rsp
*)rx_ring
->curr_entry
;
1810 switch (net_rsp
->opcode
) {
1812 case OPCODE_OB_MAC_TSO_IOCB
:
1813 case OPCODE_OB_MAC_IOCB
:
1814 ql_process_mac_tx_intr(qdev
, net_rsp
);
1817 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1818 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1822 ql_update_cq(rx_ring
);
1823 prod
= ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
);
1825 ql_write_cq_idx(rx_ring
);
1826 tx_ring
= &qdev
->tx_ring
[net_rsp
->txq_idx
];
1827 if (__netif_subqueue_stopped(qdev
->ndev
, tx_ring
->wq_id
) &&
1829 if (atomic_read(&tx_ring
->queue_stopped
) &&
1830 (atomic_read(&tx_ring
->tx_count
) > (tx_ring
->wq_len
/ 4)))
1832 * The queue got stopped because the tx_ring was full.
1833 * Wake it up, because it's now at least 25% empty.
1835 netif_wake_subqueue(qdev
->ndev
, tx_ring
->wq_id
);
1841 static int ql_clean_inbound_rx_ring(struct rx_ring
*rx_ring
, int budget
)
1843 struct ql_adapter
*qdev
= rx_ring
->qdev
;
1844 u32 prod
= ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
);
1845 struct ql_net_rsp_iocb
*net_rsp
;
1848 /* While there are entries in the completion queue. */
1849 while (prod
!= rx_ring
->cnsmr_idx
) {
1851 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1852 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring
->cq_id
,
1853 prod
, rx_ring
->cnsmr_idx
);
1855 net_rsp
= rx_ring
->curr_entry
;
1857 switch (net_rsp
->opcode
) {
1858 case OPCODE_IB_MAC_IOCB
:
1859 ql_process_mac_rx_intr(qdev
, rx_ring
,
1860 (struct ib_mac_iocb_rsp
*)
1864 case OPCODE_IB_AE_IOCB
:
1865 ql_process_chip_ae_intr(qdev
, (struct ib_ae_iocb_rsp
*)
1870 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1871 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1876 ql_update_cq(rx_ring
);
1877 prod
= ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
);
1878 if (count
== budget
)
1881 ql_update_buffer_queues(qdev
, rx_ring
);
1882 ql_write_cq_idx(rx_ring
);
1886 static int ql_napi_poll_msix(struct napi_struct
*napi
, int budget
)
1888 struct rx_ring
*rx_ring
= container_of(napi
, struct rx_ring
, napi
);
1889 struct ql_adapter
*qdev
= rx_ring
->qdev
;
1890 struct rx_ring
*trx_ring
;
1891 int i
, work_done
= 0;
1892 struct intr_context
*ctx
= &qdev
->intr_context
[rx_ring
->cq_id
];
1894 QPRINTK(qdev
, RX_STATUS
, DEBUG
, "Enter, NAPI POLL cq_id = %d.\n",
1897 /* Service the TX rings first. They start
1898 * right after the RSS rings. */
1899 for (i
= qdev
->rss_ring_count
; i
< qdev
->rx_ring_count
; i
++) {
1900 trx_ring
= &qdev
->rx_ring
[i
];
1901 /* If this TX completion ring belongs to this vector and
1902 * it's not empty then service it.
1904 if ((ctx
->irq_mask
& (1 << trx_ring
->cq_id
)) &&
1905 (ql_read_sh_reg(trx_ring
->prod_idx_sh_reg
) !=
1906 trx_ring
->cnsmr_idx
)) {
1907 QPRINTK(qdev
, INTR
, DEBUG
,
1908 "%s: Servicing TX completion ring %d.\n",
1909 __func__
, trx_ring
->cq_id
);
1910 ql_clean_outbound_rx_ring(trx_ring
);
1915 * Now service the RSS ring if it's active.
1917 if (ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
) !=
1918 rx_ring
->cnsmr_idx
) {
1919 QPRINTK(qdev
, INTR
, DEBUG
,
1920 "%s: Servicing RX completion ring %d.\n",
1921 __func__
, rx_ring
->cq_id
);
1922 work_done
= ql_clean_inbound_rx_ring(rx_ring
, budget
);
1925 if (work_done
< budget
) {
1926 napi_complete(napi
);
1927 ql_enable_completion_interrupt(qdev
, rx_ring
->irq
);
1932 static void ql_vlan_rx_register(struct net_device
*ndev
, struct vlan_group
*grp
)
1934 struct ql_adapter
*qdev
= netdev_priv(ndev
);
1938 QPRINTK(qdev
, IFUP
, DEBUG
, "Turning on VLAN in NIC_RCV_CFG.\n");
1939 ql_write32(qdev
, NIC_RCV_CFG
, NIC_RCV_CFG_VLAN_MASK
|
1940 NIC_RCV_CFG_VLAN_MATCH_AND_NON
);
1942 QPRINTK(qdev
, IFUP
, DEBUG
,
1943 "Turning off VLAN in NIC_RCV_CFG.\n");
1944 ql_write32(qdev
, NIC_RCV_CFG
, NIC_RCV_CFG_VLAN_MASK
);
1948 static void ql_vlan_rx_add_vid(struct net_device
*ndev
, u16 vid
)
1950 struct ql_adapter
*qdev
= netdev_priv(ndev
);
1951 u32 enable_bit
= MAC_ADDR_E
;
1954 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
1957 if (ql_set_mac_addr_reg
1958 (qdev
, (u8
*) &enable_bit
, MAC_ADDR_TYPE_VLAN
, vid
)) {
1959 QPRINTK(qdev
, IFUP
, ERR
, "Failed to init vlan address.\n");
1961 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
1964 static void ql_vlan_rx_kill_vid(struct net_device
*ndev
, u16 vid
)
1966 struct ql_adapter
*qdev
= netdev_priv(ndev
);
1970 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
1974 if (ql_set_mac_addr_reg
1975 (qdev
, (u8
*) &enable_bit
, MAC_ADDR_TYPE_VLAN
, vid
)) {
1976 QPRINTK(qdev
, IFUP
, ERR
, "Failed to clear vlan address.\n");
1978 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
1982 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
1983 static irqreturn_t
qlge_msix_rx_isr(int irq
, void *dev_id
)
1985 struct rx_ring
*rx_ring
= dev_id
;
1986 napi_schedule(&rx_ring
->napi
);
1990 /* This handles a fatal error, MPI activity, and the default
1991 * rx_ring in an MSI-X multiple vector environment.
1992 * In MSI/Legacy environment it also process the rest of
1995 static irqreturn_t
qlge_isr(int irq
, void *dev_id
)
1997 struct rx_ring
*rx_ring
= dev_id
;
1998 struct ql_adapter
*qdev
= rx_ring
->qdev
;
1999 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
2003 spin_lock(&qdev
->hw_lock
);
2004 if (atomic_read(&qdev
->intr_context
[0].irq_cnt
)) {
2005 QPRINTK(qdev
, INTR
, DEBUG
, "Shared Interrupt, Not ours!\n");
2006 spin_unlock(&qdev
->hw_lock
);
2009 spin_unlock(&qdev
->hw_lock
);
2011 var
= ql_disable_completion_interrupt(qdev
, intr_context
->intr
);
2014 * Check for fatal error.
2017 ql_queue_asic_error(qdev
);
2018 QPRINTK(qdev
, INTR
, ERR
, "Got fatal error, STS = %x.\n", var
);
2019 var
= ql_read32(qdev
, ERR_STS
);
2020 QPRINTK(qdev
, INTR
, ERR
,
2021 "Resetting chip. Error Status Register = 0x%x\n", var
);
2026 * Check MPI processor activity.
2028 if ((var
& STS_PI
) &&
2029 (ql_read32(qdev
, INTR_MASK
) & INTR_MASK_PI
)) {
2031 * We've got an async event or mailbox completion.
2032 * Handle it and clear the source of the interrupt.
2034 QPRINTK(qdev
, INTR
, ERR
, "Got MPI processor interrupt.\n");
2035 ql_disable_completion_interrupt(qdev
, intr_context
->intr
);
2036 ql_write32(qdev
, INTR_MASK
, (INTR_MASK_PI
<< 16));
2037 queue_delayed_work_on(smp_processor_id(),
2038 qdev
->workqueue
, &qdev
->mpi_work
, 0);
2043 * Get the bit-mask that shows the active queues for this
2044 * pass. Compare it to the queues that this irq services
2045 * and call napi if there's a match.
2047 var
= ql_read32(qdev
, ISR1
);
2048 if (var
& intr_context
->irq_mask
) {
2049 QPRINTK(qdev
, INTR
, INFO
,
2050 "Waking handler for rx_ring[0].\n");
2051 ql_disable_completion_interrupt(qdev
, intr_context
->intr
);
2052 napi_schedule(&rx_ring
->napi
);
2055 ql_enable_completion_interrupt(qdev
, intr_context
->intr
);
2056 return work_done
? IRQ_HANDLED
: IRQ_NONE
;
2059 static int ql_tso(struct sk_buff
*skb
, struct ob_mac_tso_iocb_req
*mac_iocb_ptr
)
2062 if (skb_is_gso(skb
)) {
2064 if (skb_header_cloned(skb
)) {
2065 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2070 mac_iocb_ptr
->opcode
= OPCODE_OB_MAC_TSO_IOCB
;
2071 mac_iocb_ptr
->flags3
|= OB_MAC_TSO_IOCB_IC
;
2072 mac_iocb_ptr
->frame_len
= cpu_to_le32((u32
) skb
->len
);
2073 mac_iocb_ptr
->total_hdrs_len
=
2074 cpu_to_le16(skb_transport_offset(skb
) + tcp_hdrlen(skb
));
2075 mac_iocb_ptr
->net_trans_offset
=
2076 cpu_to_le16(skb_network_offset(skb
) |
2077 skb_transport_offset(skb
)
2078 << OB_MAC_TRANSPORT_HDR_SHIFT
);
2079 mac_iocb_ptr
->mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
2080 mac_iocb_ptr
->flags2
|= OB_MAC_TSO_IOCB_LSO
;
2081 if (likely(skb
->protocol
== htons(ETH_P_IP
))) {
2082 struct iphdr
*iph
= ip_hdr(skb
);
2084 mac_iocb_ptr
->flags1
|= OB_MAC_TSO_IOCB_IP4
;
2085 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
2089 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
2090 mac_iocb_ptr
->flags1
|= OB_MAC_TSO_IOCB_IP6
;
2091 tcp_hdr(skb
)->check
=
2092 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
2093 &ipv6_hdr(skb
)->daddr
,
2101 static void ql_hw_csum_setup(struct sk_buff
*skb
,
2102 struct ob_mac_tso_iocb_req
*mac_iocb_ptr
)
2105 struct iphdr
*iph
= ip_hdr(skb
);
2107 mac_iocb_ptr
->opcode
= OPCODE_OB_MAC_TSO_IOCB
;
2108 mac_iocb_ptr
->frame_len
= cpu_to_le32((u32
) skb
->len
);
2109 mac_iocb_ptr
->net_trans_offset
=
2110 cpu_to_le16(skb_network_offset(skb
) |
2111 skb_transport_offset(skb
) << OB_MAC_TRANSPORT_HDR_SHIFT
);
2113 mac_iocb_ptr
->flags1
|= OB_MAC_TSO_IOCB_IP4
;
2114 len
= (ntohs(iph
->tot_len
) - (iph
->ihl
<< 2));
2115 if (likely(iph
->protocol
== IPPROTO_TCP
)) {
2116 check
= &(tcp_hdr(skb
)->check
);
2117 mac_iocb_ptr
->flags2
|= OB_MAC_TSO_IOCB_TC
;
2118 mac_iocb_ptr
->total_hdrs_len
=
2119 cpu_to_le16(skb_transport_offset(skb
) +
2120 (tcp_hdr(skb
)->doff
<< 2));
2122 check
= &(udp_hdr(skb
)->check
);
2123 mac_iocb_ptr
->flags2
|= OB_MAC_TSO_IOCB_UC
;
2124 mac_iocb_ptr
->total_hdrs_len
=
2125 cpu_to_le16(skb_transport_offset(skb
) +
2126 sizeof(struct udphdr
));
2128 *check
= ~csum_tcpudp_magic(iph
->saddr
,
2129 iph
->daddr
, len
, iph
->protocol
, 0);
2132 static netdev_tx_t
qlge_send(struct sk_buff
*skb
, struct net_device
*ndev
)
2134 struct tx_ring_desc
*tx_ring_desc
;
2135 struct ob_mac_iocb_req
*mac_iocb_ptr
;
2136 struct ql_adapter
*qdev
= netdev_priv(ndev
);
2138 struct tx_ring
*tx_ring
;
2139 u32 tx_ring_idx
= (u32
) skb
->queue_mapping
;
2141 tx_ring
= &qdev
->tx_ring
[tx_ring_idx
];
2143 if (skb_padto(skb
, ETH_ZLEN
))
2144 return NETDEV_TX_OK
;
2146 if (unlikely(atomic_read(&tx_ring
->tx_count
) < 2)) {
2147 QPRINTK(qdev
, TX_QUEUED
, INFO
,
2148 "%s: shutting down tx queue %d du to lack of resources.\n",
2149 __func__
, tx_ring_idx
);
2150 netif_stop_subqueue(ndev
, tx_ring
->wq_id
);
2151 atomic_inc(&tx_ring
->queue_stopped
);
2152 return NETDEV_TX_BUSY
;
2154 tx_ring_desc
= &tx_ring
->q
[tx_ring
->prod_idx
];
2155 mac_iocb_ptr
= tx_ring_desc
->queue_entry
;
2156 memset((void *)mac_iocb_ptr
, 0, sizeof(*mac_iocb_ptr
));
2158 mac_iocb_ptr
->opcode
= OPCODE_OB_MAC_IOCB
;
2159 mac_iocb_ptr
->tid
= tx_ring_desc
->index
;
2160 /* We use the upper 32-bits to store the tx queue for this IO.
2161 * When we get the completion we can use it to establish the context.
2163 mac_iocb_ptr
->txq_idx
= tx_ring_idx
;
2164 tx_ring_desc
->skb
= skb
;
2166 mac_iocb_ptr
->frame_len
= cpu_to_le16((u16
) skb
->len
);
2168 if (qdev
->vlgrp
&& vlan_tx_tag_present(skb
)) {
2169 QPRINTK(qdev
, TX_QUEUED
, DEBUG
, "Adding a vlan tag %d.\n",
2170 vlan_tx_tag_get(skb
));
2171 mac_iocb_ptr
->flags3
|= OB_MAC_IOCB_V
;
2172 mac_iocb_ptr
->vlan_tci
= cpu_to_le16(vlan_tx_tag_get(skb
));
2174 tso
= ql_tso(skb
, (struct ob_mac_tso_iocb_req
*)mac_iocb_ptr
);
2176 dev_kfree_skb_any(skb
);
2177 return NETDEV_TX_OK
;
2178 } else if (unlikely(!tso
) && (skb
->ip_summed
== CHECKSUM_PARTIAL
)) {
2179 ql_hw_csum_setup(skb
,
2180 (struct ob_mac_tso_iocb_req
*)mac_iocb_ptr
);
2182 if (ql_map_send(qdev
, mac_iocb_ptr
, skb
, tx_ring_desc
) !=
2184 QPRINTK(qdev
, TX_QUEUED
, ERR
,
2185 "Could not map the segments.\n");
2186 return NETDEV_TX_BUSY
;
2188 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr
);
2189 tx_ring
->prod_idx
++;
2190 if (tx_ring
->prod_idx
== tx_ring
->wq_len
)
2191 tx_ring
->prod_idx
= 0;
2194 ql_write_db_reg(tx_ring
->prod_idx
, tx_ring
->prod_idx_db_reg
);
2195 QPRINTK(qdev
, TX_QUEUED
, DEBUG
, "tx queued, slot %d, len %d\n",
2196 tx_ring
->prod_idx
, skb
->len
);
2198 atomic_dec(&tx_ring
->tx_count
);
2199 return NETDEV_TX_OK
;
2202 static void ql_free_shadow_space(struct ql_adapter
*qdev
)
2204 if (qdev
->rx_ring_shadow_reg_area
) {
2205 pci_free_consistent(qdev
->pdev
,
2207 qdev
->rx_ring_shadow_reg_area
,
2208 qdev
->rx_ring_shadow_reg_dma
);
2209 qdev
->rx_ring_shadow_reg_area
= NULL
;
2211 if (qdev
->tx_ring_shadow_reg_area
) {
2212 pci_free_consistent(qdev
->pdev
,
2214 qdev
->tx_ring_shadow_reg_area
,
2215 qdev
->tx_ring_shadow_reg_dma
);
2216 qdev
->tx_ring_shadow_reg_area
= NULL
;
2220 static int ql_alloc_shadow_space(struct ql_adapter
*qdev
)
2222 qdev
->rx_ring_shadow_reg_area
=
2223 pci_alloc_consistent(qdev
->pdev
,
2224 PAGE_SIZE
, &qdev
->rx_ring_shadow_reg_dma
);
2225 if (qdev
->rx_ring_shadow_reg_area
== NULL
) {
2226 QPRINTK(qdev
, IFUP
, ERR
,
2227 "Allocation of RX shadow space failed.\n");
2230 memset(qdev
->rx_ring_shadow_reg_area
, 0, PAGE_SIZE
);
2231 qdev
->tx_ring_shadow_reg_area
=
2232 pci_alloc_consistent(qdev
->pdev
, PAGE_SIZE
,
2233 &qdev
->tx_ring_shadow_reg_dma
);
2234 if (qdev
->tx_ring_shadow_reg_area
== NULL
) {
2235 QPRINTK(qdev
, IFUP
, ERR
,
2236 "Allocation of TX shadow space failed.\n");
2237 goto err_wqp_sh_area
;
2239 memset(qdev
->tx_ring_shadow_reg_area
, 0, PAGE_SIZE
);
2243 pci_free_consistent(qdev
->pdev
,
2245 qdev
->rx_ring_shadow_reg_area
,
2246 qdev
->rx_ring_shadow_reg_dma
);
2250 static void ql_init_tx_ring(struct ql_adapter
*qdev
, struct tx_ring
*tx_ring
)
2252 struct tx_ring_desc
*tx_ring_desc
;
2254 struct ob_mac_iocb_req
*mac_iocb_ptr
;
2256 mac_iocb_ptr
= tx_ring
->wq_base
;
2257 tx_ring_desc
= tx_ring
->q
;
2258 for (i
= 0; i
< tx_ring
->wq_len
; i
++) {
2259 tx_ring_desc
->index
= i
;
2260 tx_ring_desc
->skb
= NULL
;
2261 tx_ring_desc
->queue_entry
= mac_iocb_ptr
;
2265 atomic_set(&tx_ring
->tx_count
, tx_ring
->wq_len
);
2266 atomic_set(&tx_ring
->queue_stopped
, 0);
2269 static void ql_free_tx_resources(struct ql_adapter
*qdev
,
2270 struct tx_ring
*tx_ring
)
2272 if (tx_ring
->wq_base
) {
2273 pci_free_consistent(qdev
->pdev
, tx_ring
->wq_size
,
2274 tx_ring
->wq_base
, tx_ring
->wq_base_dma
);
2275 tx_ring
->wq_base
= NULL
;
2281 static int ql_alloc_tx_resources(struct ql_adapter
*qdev
,
2282 struct tx_ring
*tx_ring
)
2285 pci_alloc_consistent(qdev
->pdev
, tx_ring
->wq_size
,
2286 &tx_ring
->wq_base_dma
);
2288 if ((tx_ring
->wq_base
== NULL
)
2289 || tx_ring
->wq_base_dma
& WQ_ADDR_ALIGN
) {
2290 QPRINTK(qdev
, IFUP
, ERR
, "tx_ring alloc failed.\n");
2294 kmalloc(tx_ring
->wq_len
* sizeof(struct tx_ring_desc
), GFP_KERNEL
);
2295 if (tx_ring
->q
== NULL
)
2300 pci_free_consistent(qdev
->pdev
, tx_ring
->wq_size
,
2301 tx_ring
->wq_base
, tx_ring
->wq_base_dma
);
2305 static void ql_free_lbq_buffers(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
2308 struct bq_desc
*lbq_desc
;
2310 for (i
= 0; i
< rx_ring
->lbq_len
; i
++) {
2311 lbq_desc
= &rx_ring
->lbq
[i
];
2312 if (lbq_desc
->p
.lbq_page
) {
2313 pci_unmap_page(qdev
->pdev
,
2314 pci_unmap_addr(lbq_desc
, mapaddr
),
2315 pci_unmap_len(lbq_desc
, maplen
),
2316 PCI_DMA_FROMDEVICE
);
2318 put_page(lbq_desc
->p
.lbq_page
);
2319 lbq_desc
->p
.lbq_page
= NULL
;
2324 static void ql_free_sbq_buffers(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
2327 struct bq_desc
*sbq_desc
;
2329 for (i
= 0; i
< rx_ring
->sbq_len
; i
++) {
2330 sbq_desc
= &rx_ring
->sbq
[i
];
2331 if (sbq_desc
== NULL
) {
2332 QPRINTK(qdev
, IFUP
, ERR
, "sbq_desc %d is NULL.\n", i
);
2335 if (sbq_desc
->p
.skb
) {
2336 pci_unmap_single(qdev
->pdev
,
2337 pci_unmap_addr(sbq_desc
, mapaddr
),
2338 pci_unmap_len(sbq_desc
, maplen
),
2339 PCI_DMA_FROMDEVICE
);
2340 dev_kfree_skb(sbq_desc
->p
.skb
);
2341 sbq_desc
->p
.skb
= NULL
;
2346 /* Free all large and small rx buffers associated
2347 * with the completion queues for this device.
2349 static void ql_free_rx_buffers(struct ql_adapter
*qdev
)
2352 struct rx_ring
*rx_ring
;
2354 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
2355 rx_ring
= &qdev
->rx_ring
[i
];
2357 ql_free_lbq_buffers(qdev
, rx_ring
);
2359 ql_free_sbq_buffers(qdev
, rx_ring
);
2363 static void ql_alloc_rx_buffers(struct ql_adapter
*qdev
)
2365 struct rx_ring
*rx_ring
;
2368 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
2369 rx_ring
= &qdev
->rx_ring
[i
];
2370 if (rx_ring
->type
!= TX_Q
)
2371 ql_update_buffer_queues(qdev
, rx_ring
);
2375 static void ql_init_lbq_ring(struct ql_adapter
*qdev
,
2376 struct rx_ring
*rx_ring
)
2379 struct bq_desc
*lbq_desc
;
2380 __le64
*bq
= rx_ring
->lbq_base
;
2382 memset(rx_ring
->lbq
, 0, rx_ring
->lbq_len
* sizeof(struct bq_desc
));
2383 for (i
= 0; i
< rx_ring
->lbq_len
; i
++) {
2384 lbq_desc
= &rx_ring
->lbq
[i
];
2385 memset(lbq_desc
, 0, sizeof(*lbq_desc
));
2386 lbq_desc
->index
= i
;
2387 lbq_desc
->addr
= bq
;
2392 static void ql_init_sbq_ring(struct ql_adapter
*qdev
,
2393 struct rx_ring
*rx_ring
)
2396 struct bq_desc
*sbq_desc
;
2397 __le64
*bq
= rx_ring
->sbq_base
;
2399 memset(rx_ring
->sbq
, 0, rx_ring
->sbq_len
* sizeof(struct bq_desc
));
2400 for (i
= 0; i
< rx_ring
->sbq_len
; i
++) {
2401 sbq_desc
= &rx_ring
->sbq
[i
];
2402 memset(sbq_desc
, 0, sizeof(*sbq_desc
));
2403 sbq_desc
->index
= i
;
2404 sbq_desc
->addr
= bq
;
2409 static void ql_free_rx_resources(struct ql_adapter
*qdev
,
2410 struct rx_ring
*rx_ring
)
2412 /* Free the small buffer queue. */
2413 if (rx_ring
->sbq_base
) {
2414 pci_free_consistent(qdev
->pdev
,
2416 rx_ring
->sbq_base
, rx_ring
->sbq_base_dma
);
2417 rx_ring
->sbq_base
= NULL
;
2420 /* Free the small buffer queue control blocks. */
2421 kfree(rx_ring
->sbq
);
2422 rx_ring
->sbq
= NULL
;
2424 /* Free the large buffer queue. */
2425 if (rx_ring
->lbq_base
) {
2426 pci_free_consistent(qdev
->pdev
,
2428 rx_ring
->lbq_base
, rx_ring
->lbq_base_dma
);
2429 rx_ring
->lbq_base
= NULL
;
2432 /* Free the large buffer queue control blocks. */
2433 kfree(rx_ring
->lbq
);
2434 rx_ring
->lbq
= NULL
;
2436 /* Free the rx queue. */
2437 if (rx_ring
->cq_base
) {
2438 pci_free_consistent(qdev
->pdev
,
2440 rx_ring
->cq_base
, rx_ring
->cq_base_dma
);
2441 rx_ring
->cq_base
= NULL
;
2445 /* Allocate queues and buffers for this completions queue based
2446 * on the values in the parameter structure. */
2447 static int ql_alloc_rx_resources(struct ql_adapter
*qdev
,
2448 struct rx_ring
*rx_ring
)
2452 * Allocate the completion queue for this rx_ring.
2455 pci_alloc_consistent(qdev
->pdev
, rx_ring
->cq_size
,
2456 &rx_ring
->cq_base_dma
);
2458 if (rx_ring
->cq_base
== NULL
) {
2459 QPRINTK(qdev
, IFUP
, ERR
, "rx_ring alloc failed.\n");
2463 if (rx_ring
->sbq_len
) {
2465 * Allocate small buffer queue.
2468 pci_alloc_consistent(qdev
->pdev
, rx_ring
->sbq_size
,
2469 &rx_ring
->sbq_base_dma
);
2471 if (rx_ring
->sbq_base
== NULL
) {
2472 QPRINTK(qdev
, IFUP
, ERR
,
2473 "Small buffer queue allocation failed.\n");
2478 * Allocate small buffer queue control blocks.
2481 kmalloc(rx_ring
->sbq_len
* sizeof(struct bq_desc
),
2483 if (rx_ring
->sbq
== NULL
) {
2484 QPRINTK(qdev
, IFUP
, ERR
,
2485 "Small buffer queue control block allocation failed.\n");
2489 ql_init_sbq_ring(qdev
, rx_ring
);
2492 if (rx_ring
->lbq_len
) {
2494 * Allocate large buffer queue.
2497 pci_alloc_consistent(qdev
->pdev
, rx_ring
->lbq_size
,
2498 &rx_ring
->lbq_base_dma
);
2500 if (rx_ring
->lbq_base
== NULL
) {
2501 QPRINTK(qdev
, IFUP
, ERR
,
2502 "Large buffer queue allocation failed.\n");
2506 * Allocate large buffer queue control blocks.
2509 kmalloc(rx_ring
->lbq_len
* sizeof(struct bq_desc
),
2511 if (rx_ring
->lbq
== NULL
) {
2512 QPRINTK(qdev
, IFUP
, ERR
,
2513 "Large buffer queue control block allocation failed.\n");
2517 ql_init_lbq_ring(qdev
, rx_ring
);
2523 ql_free_rx_resources(qdev
, rx_ring
);
2527 static void ql_tx_ring_clean(struct ql_adapter
*qdev
)
2529 struct tx_ring
*tx_ring
;
2530 struct tx_ring_desc
*tx_ring_desc
;
2534 * Loop through all queues and free
2537 for (j
= 0; j
< qdev
->tx_ring_count
; j
++) {
2538 tx_ring
= &qdev
->tx_ring
[j
];
2539 for (i
= 0; i
< tx_ring
->wq_len
; i
++) {
2540 tx_ring_desc
= &tx_ring
->q
[i
];
2541 if (tx_ring_desc
&& tx_ring_desc
->skb
) {
2542 QPRINTK(qdev
, IFDOWN
, ERR
,
2543 "Freeing lost SKB %p, from queue %d, index %d.\n",
2544 tx_ring_desc
->skb
, j
,
2545 tx_ring_desc
->index
);
2546 ql_unmap_send(qdev
, tx_ring_desc
,
2547 tx_ring_desc
->map_cnt
);
2548 dev_kfree_skb(tx_ring_desc
->skb
);
2549 tx_ring_desc
->skb
= NULL
;
2555 static void ql_free_mem_resources(struct ql_adapter
*qdev
)
2559 for (i
= 0; i
< qdev
->tx_ring_count
; i
++)
2560 ql_free_tx_resources(qdev
, &qdev
->tx_ring
[i
]);
2561 for (i
= 0; i
< qdev
->rx_ring_count
; i
++)
2562 ql_free_rx_resources(qdev
, &qdev
->rx_ring
[i
]);
2563 ql_free_shadow_space(qdev
);
2566 static int ql_alloc_mem_resources(struct ql_adapter
*qdev
)
2570 /* Allocate space for our shadow registers and such. */
2571 if (ql_alloc_shadow_space(qdev
))
2574 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
2575 if (ql_alloc_rx_resources(qdev
, &qdev
->rx_ring
[i
]) != 0) {
2576 QPRINTK(qdev
, IFUP
, ERR
,
2577 "RX resource allocation failed.\n");
2581 /* Allocate tx queue resources */
2582 for (i
= 0; i
< qdev
->tx_ring_count
; i
++) {
2583 if (ql_alloc_tx_resources(qdev
, &qdev
->tx_ring
[i
]) != 0) {
2584 QPRINTK(qdev
, IFUP
, ERR
,
2585 "TX resource allocation failed.\n");
2592 ql_free_mem_resources(qdev
);
2596 /* Set up the rx ring control block and pass it to the chip.
2597 * The control block is defined as
2598 * "Completion Queue Initialization Control Block", or cqicb.
2600 static int ql_start_rx_ring(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
2602 struct cqicb
*cqicb
= &rx_ring
->cqicb
;
2603 void *shadow_reg
= qdev
->rx_ring_shadow_reg_area
+
2604 (rx_ring
->cq_id
* RX_RING_SHADOW_SPACE
);
2605 u64 shadow_reg_dma
= qdev
->rx_ring_shadow_reg_dma
+
2606 (rx_ring
->cq_id
* RX_RING_SHADOW_SPACE
);
2607 void __iomem
*doorbell_area
=
2608 qdev
->doorbell_area
+ (DB_PAGE_SIZE
* (128 + rx_ring
->cq_id
));
2612 __le64
*base_indirect_ptr
;
2615 /* Set up the shadow registers for this ring. */
2616 rx_ring
->prod_idx_sh_reg
= shadow_reg
;
2617 rx_ring
->prod_idx_sh_reg_dma
= shadow_reg_dma
;
2618 shadow_reg
+= sizeof(u64
);
2619 shadow_reg_dma
+= sizeof(u64
);
2620 rx_ring
->lbq_base_indirect
= shadow_reg
;
2621 rx_ring
->lbq_base_indirect_dma
= shadow_reg_dma
;
2622 shadow_reg
+= (sizeof(u64
) * MAX_DB_PAGES_PER_BQ(rx_ring
->lbq_len
));
2623 shadow_reg_dma
+= (sizeof(u64
) * MAX_DB_PAGES_PER_BQ(rx_ring
->lbq_len
));
2624 rx_ring
->sbq_base_indirect
= shadow_reg
;
2625 rx_ring
->sbq_base_indirect_dma
= shadow_reg_dma
;
2627 /* PCI doorbell mem area + 0x00 for consumer index register */
2628 rx_ring
->cnsmr_idx_db_reg
= (u32 __iomem
*) doorbell_area
;
2629 rx_ring
->cnsmr_idx
= 0;
2630 rx_ring
->curr_entry
= rx_ring
->cq_base
;
2632 /* PCI doorbell mem area + 0x04 for valid register */
2633 rx_ring
->valid_db_reg
= doorbell_area
+ 0x04;
2635 /* PCI doorbell mem area + 0x18 for large buffer consumer */
2636 rx_ring
->lbq_prod_idx_db_reg
= (u32 __iomem
*) (doorbell_area
+ 0x18);
2638 /* PCI doorbell mem area + 0x1c */
2639 rx_ring
->sbq_prod_idx_db_reg
= (u32 __iomem
*) (doorbell_area
+ 0x1c);
2641 memset((void *)cqicb
, 0, sizeof(struct cqicb
));
2642 cqicb
->msix_vect
= rx_ring
->irq
;
2644 bq_len
= (rx_ring
->cq_len
== 65536) ? 0 : (u16
) rx_ring
->cq_len
;
2645 cqicb
->len
= cpu_to_le16(bq_len
| LEN_V
| LEN_CPP_CONT
);
2647 cqicb
->addr
= cpu_to_le64(rx_ring
->cq_base_dma
);
2649 cqicb
->prod_idx_addr
= cpu_to_le64(rx_ring
->prod_idx_sh_reg_dma
);
2652 * Set up the control block load flags.
2654 cqicb
->flags
= FLAGS_LC
| /* Load queue base address */
2655 FLAGS_LV
| /* Load MSI-X vector */
2656 FLAGS_LI
; /* Load irq delay values */
2657 if (rx_ring
->lbq_len
) {
2658 cqicb
->flags
|= FLAGS_LL
; /* Load lbq values */
2659 tmp
= (u64
)rx_ring
->lbq_base_dma
;
2660 base_indirect_ptr
= (__le64
*) rx_ring
->lbq_base_indirect
;
2663 *base_indirect_ptr
= cpu_to_le64(tmp
);
2664 tmp
+= DB_PAGE_SIZE
;
2665 base_indirect_ptr
++;
2667 } while (page_entries
< MAX_DB_PAGES_PER_BQ(rx_ring
->lbq_len
));
2669 cpu_to_le64(rx_ring
->lbq_base_indirect_dma
);
2670 bq_len
= (rx_ring
->lbq_buf_size
== 65536) ? 0 :
2671 (u16
) rx_ring
->lbq_buf_size
;
2672 cqicb
->lbq_buf_size
= cpu_to_le16(bq_len
);
2673 bq_len
= (rx_ring
->lbq_len
== 65536) ? 0 :
2674 (u16
) rx_ring
->lbq_len
;
2675 cqicb
->lbq_len
= cpu_to_le16(bq_len
);
2676 rx_ring
->lbq_prod_idx
= 0;
2677 rx_ring
->lbq_curr_idx
= 0;
2678 rx_ring
->lbq_clean_idx
= 0;
2679 rx_ring
->lbq_free_cnt
= rx_ring
->lbq_len
;
2681 if (rx_ring
->sbq_len
) {
2682 cqicb
->flags
|= FLAGS_LS
; /* Load sbq values */
2683 tmp
= (u64
)rx_ring
->sbq_base_dma
;
2684 base_indirect_ptr
= (__le64
*) rx_ring
->sbq_base_indirect
;
2687 *base_indirect_ptr
= cpu_to_le64(tmp
);
2688 tmp
+= DB_PAGE_SIZE
;
2689 base_indirect_ptr
++;
2691 } while (page_entries
< MAX_DB_PAGES_PER_BQ(rx_ring
->sbq_len
));
2693 cpu_to_le64(rx_ring
->sbq_base_indirect_dma
);
2694 cqicb
->sbq_buf_size
=
2695 cpu_to_le16((u16
)(rx_ring
->sbq_buf_size
/2));
2696 bq_len
= (rx_ring
->sbq_len
== 65536) ? 0 :
2697 (u16
) rx_ring
->sbq_len
;
2698 cqicb
->sbq_len
= cpu_to_le16(bq_len
);
2699 rx_ring
->sbq_prod_idx
= 0;
2700 rx_ring
->sbq_curr_idx
= 0;
2701 rx_ring
->sbq_clean_idx
= 0;
2702 rx_ring
->sbq_free_cnt
= rx_ring
->sbq_len
;
2704 switch (rx_ring
->type
) {
2706 cqicb
->irq_delay
= cpu_to_le16(qdev
->tx_coalesce_usecs
);
2707 cqicb
->pkt_delay
= cpu_to_le16(qdev
->tx_max_coalesced_frames
);
2710 /* Inbound completion handling rx_rings run in
2711 * separate NAPI contexts.
2713 netif_napi_add(qdev
->ndev
, &rx_ring
->napi
, ql_napi_poll_msix
,
2715 cqicb
->irq_delay
= cpu_to_le16(qdev
->rx_coalesce_usecs
);
2716 cqicb
->pkt_delay
= cpu_to_le16(qdev
->rx_max_coalesced_frames
);
2719 QPRINTK(qdev
, IFUP
, DEBUG
, "Invalid rx_ring->type = %d.\n",
2722 QPRINTK(qdev
, IFUP
, DEBUG
, "Initializing rx work queue.\n");
2723 err
= ql_write_cfg(qdev
, cqicb
, sizeof(struct cqicb
),
2724 CFG_LCQ
, rx_ring
->cq_id
);
2726 QPRINTK(qdev
, IFUP
, ERR
, "Failed to load CQICB.\n");
2732 static int ql_start_tx_ring(struct ql_adapter
*qdev
, struct tx_ring
*tx_ring
)
2734 struct wqicb
*wqicb
= (struct wqicb
*)tx_ring
;
2735 void __iomem
*doorbell_area
=
2736 qdev
->doorbell_area
+ (DB_PAGE_SIZE
* tx_ring
->wq_id
);
2737 void *shadow_reg
= qdev
->tx_ring_shadow_reg_area
+
2738 (tx_ring
->wq_id
* sizeof(u64
));
2739 u64 shadow_reg_dma
= qdev
->tx_ring_shadow_reg_dma
+
2740 (tx_ring
->wq_id
* sizeof(u64
));
2744 * Assign doorbell registers for this tx_ring.
2746 /* TX PCI doorbell mem area for tx producer index */
2747 tx_ring
->prod_idx_db_reg
= (u32 __iomem
*) doorbell_area
;
2748 tx_ring
->prod_idx
= 0;
2749 /* TX PCI doorbell mem area + 0x04 */
2750 tx_ring
->valid_db_reg
= doorbell_area
+ 0x04;
2753 * Assign shadow registers for this tx_ring.
2755 tx_ring
->cnsmr_idx_sh_reg
= shadow_reg
;
2756 tx_ring
->cnsmr_idx_sh_reg_dma
= shadow_reg_dma
;
2758 wqicb
->len
= cpu_to_le16(tx_ring
->wq_len
| Q_LEN_V
| Q_LEN_CPP_CONT
);
2759 wqicb
->flags
= cpu_to_le16(Q_FLAGS_LC
|
2760 Q_FLAGS_LB
| Q_FLAGS_LI
| Q_FLAGS_LO
);
2761 wqicb
->cq_id_rss
= cpu_to_le16(tx_ring
->cq_id
);
2763 wqicb
->addr
= cpu_to_le64(tx_ring
->wq_base_dma
);
2765 wqicb
->cnsmr_idx_addr
= cpu_to_le64(tx_ring
->cnsmr_idx_sh_reg_dma
);
2767 ql_init_tx_ring(qdev
, tx_ring
);
2769 err
= ql_write_cfg(qdev
, wqicb
, sizeof(*wqicb
), CFG_LRQ
,
2770 (u16
) tx_ring
->wq_id
);
2772 QPRINTK(qdev
, IFUP
, ERR
, "Failed to load tx_ring.\n");
2775 QPRINTK(qdev
, IFUP
, DEBUG
, "Successfully loaded WQICB.\n");
2779 static void ql_disable_msix(struct ql_adapter
*qdev
)
2781 if (test_bit(QL_MSIX_ENABLED
, &qdev
->flags
)) {
2782 pci_disable_msix(qdev
->pdev
);
2783 clear_bit(QL_MSIX_ENABLED
, &qdev
->flags
);
2784 kfree(qdev
->msi_x_entry
);
2785 qdev
->msi_x_entry
= NULL
;
2786 } else if (test_bit(QL_MSI_ENABLED
, &qdev
->flags
)) {
2787 pci_disable_msi(qdev
->pdev
);
2788 clear_bit(QL_MSI_ENABLED
, &qdev
->flags
);
2792 /* We start by trying to get the number of vectors
2793 * stored in qdev->intr_count. If we don't get that
2794 * many then we reduce the count and try again.
2796 static void ql_enable_msix(struct ql_adapter
*qdev
)
2800 /* Get the MSIX vectors. */
2801 if (irq_type
== MSIX_IRQ
) {
2802 /* Try to alloc space for the msix struct,
2803 * if it fails then go to MSI/legacy.
2805 qdev
->msi_x_entry
= kcalloc(qdev
->intr_count
,
2806 sizeof(struct msix_entry
),
2808 if (!qdev
->msi_x_entry
) {
2813 for (i
= 0; i
< qdev
->intr_count
; i
++)
2814 qdev
->msi_x_entry
[i
].entry
= i
;
2816 /* Loop to get our vectors. We start with
2817 * what we want and settle for what we get.
2820 err
= pci_enable_msix(qdev
->pdev
,
2821 qdev
->msi_x_entry
, qdev
->intr_count
);
2823 qdev
->intr_count
= err
;
2827 kfree(qdev
->msi_x_entry
);
2828 qdev
->msi_x_entry
= NULL
;
2829 QPRINTK(qdev
, IFUP
, WARNING
,
2830 "MSI-X Enable failed, trying MSI.\n");
2831 qdev
->intr_count
= 1;
2833 } else if (err
== 0) {
2834 set_bit(QL_MSIX_ENABLED
, &qdev
->flags
);
2835 QPRINTK(qdev
, IFUP
, INFO
,
2836 "MSI-X Enabled, got %d vectors.\n",
2842 qdev
->intr_count
= 1;
2843 if (irq_type
== MSI_IRQ
) {
2844 if (!pci_enable_msi(qdev
->pdev
)) {
2845 set_bit(QL_MSI_ENABLED
, &qdev
->flags
);
2846 QPRINTK(qdev
, IFUP
, INFO
,
2847 "Running with MSI interrupts.\n");
2852 QPRINTK(qdev
, IFUP
, DEBUG
, "Running with legacy interrupts.\n");
2855 /* Each vector services 1 RSS ring and and 1 or more
2856 * TX completion rings. This function loops through
2857 * the TX completion rings and assigns the vector that
2858 * will service it. An example would be if there are
2859 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
2860 * This would mean that vector 0 would service RSS ring 0
2861 * and TX competion rings 0,1,2 and 3. Vector 1 would
2862 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
2864 static void ql_set_tx_vect(struct ql_adapter
*qdev
)
2867 u32 tx_rings_per_vector
= qdev
->tx_ring_count
/ qdev
->intr_count
;
2869 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
))) {
2870 /* Assign irq vectors to TX rx_rings.*/
2871 for (vect
= 0, j
= 0, i
= qdev
->rss_ring_count
;
2872 i
< qdev
->rx_ring_count
; i
++) {
2873 if (j
== tx_rings_per_vector
) {
2877 qdev
->rx_ring
[i
].irq
= vect
;
2881 /* For single vector all rings have an irq
2884 for (i
= 0; i
< qdev
->rx_ring_count
; i
++)
2885 qdev
->rx_ring
[i
].irq
= 0;
2889 /* Set the interrupt mask for this vector. Each vector
2890 * will service 1 RSS ring and 1 or more TX completion
2891 * rings. This function sets up a bit mask per vector
2892 * that indicates which rings it services.
2894 static void ql_set_irq_mask(struct ql_adapter
*qdev
, struct intr_context
*ctx
)
2896 int j
, vect
= ctx
->intr
;
2897 u32 tx_rings_per_vector
= qdev
->tx_ring_count
/ qdev
->intr_count
;
2899 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
))) {
2900 /* Add the RSS ring serviced by this vector
2903 ctx
->irq_mask
= (1 << qdev
->rx_ring
[vect
].cq_id
);
2904 /* Add the TX ring(s) serviced by this vector
2906 for (j
= 0; j
< tx_rings_per_vector
; j
++) {
2908 (1 << qdev
->rx_ring
[qdev
->rss_ring_count
+
2909 (vect
* tx_rings_per_vector
) + j
].cq_id
);
2912 /* For single vector we just shift each queue's
2915 for (j
= 0; j
< qdev
->rx_ring_count
; j
++)
2916 ctx
->irq_mask
|= (1 << qdev
->rx_ring
[j
].cq_id
);
2921 * Here we build the intr_context structures based on
2922 * our rx_ring count and intr vector count.
2923 * The intr_context structure is used to hook each vector
2924 * to possibly different handlers.
2926 static void ql_resolve_queues_to_irqs(struct ql_adapter
*qdev
)
2929 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
2931 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
))) {
2932 /* Each rx_ring has it's
2933 * own intr_context since we have separate
2934 * vectors for each queue.
2936 for (i
= 0; i
< qdev
->intr_count
; i
++, intr_context
++) {
2937 qdev
->rx_ring
[i
].irq
= i
;
2938 intr_context
->intr
= i
;
2939 intr_context
->qdev
= qdev
;
2940 /* Set up this vector's bit-mask that indicates
2941 * which queues it services.
2943 ql_set_irq_mask(qdev
, intr_context
);
2945 * We set up each vectors enable/disable/read bits so
2946 * there's no bit/mask calculations in the critical path.
2948 intr_context
->intr_en_mask
=
2949 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
2950 INTR_EN_TYPE_ENABLE
| INTR_EN_IHD_MASK
| INTR_EN_IHD
2952 intr_context
->intr_dis_mask
=
2953 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
2954 INTR_EN_TYPE_DISABLE
| INTR_EN_IHD_MASK
|
2956 intr_context
->intr_read_mask
=
2957 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
2958 INTR_EN_TYPE_READ
| INTR_EN_IHD_MASK
| INTR_EN_IHD
|
2961 /* The first vector/queue handles
2962 * broadcast/multicast, fatal errors,
2963 * and firmware events. This in addition
2964 * to normal inbound NAPI processing.
2966 intr_context
->handler
= qlge_isr
;
2967 sprintf(intr_context
->name
, "%s-rx-%d",
2968 qdev
->ndev
->name
, i
);
2971 * Inbound queues handle unicast frames only.
2973 intr_context
->handler
= qlge_msix_rx_isr
;
2974 sprintf(intr_context
->name
, "%s-rx-%d",
2975 qdev
->ndev
->name
, i
);
2980 * All rx_rings use the same intr_context since
2981 * there is only one vector.
2983 intr_context
->intr
= 0;
2984 intr_context
->qdev
= qdev
;
2986 * We set up each vectors enable/disable/read bits so
2987 * there's no bit/mask calculations in the critical path.
2989 intr_context
->intr_en_mask
=
2990 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
| INTR_EN_TYPE_ENABLE
;
2991 intr_context
->intr_dis_mask
=
2992 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
2993 INTR_EN_TYPE_DISABLE
;
2994 intr_context
->intr_read_mask
=
2995 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
| INTR_EN_TYPE_READ
;
2997 * Single interrupt means one handler for all rings.
2999 intr_context
->handler
= qlge_isr
;
3000 sprintf(intr_context
->name
, "%s-single_irq", qdev
->ndev
->name
);
3001 /* Set up this vector's bit-mask that indicates
3002 * which queues it services. In this case there is
3003 * a single vector so it will service all RSS and
3004 * TX completion rings.
3006 ql_set_irq_mask(qdev
, intr_context
);
3008 /* Tell the TX completion rings which MSIx vector
3009 * they will be using.
3011 ql_set_tx_vect(qdev
);
3014 static void ql_free_irq(struct ql_adapter
*qdev
)
3017 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
3019 for (i
= 0; i
< qdev
->intr_count
; i
++, intr_context
++) {
3020 if (intr_context
->hooked
) {
3021 if (test_bit(QL_MSIX_ENABLED
, &qdev
->flags
)) {
3022 free_irq(qdev
->msi_x_entry
[i
].vector
,
3024 QPRINTK(qdev
, IFDOWN
, DEBUG
,
3025 "freeing msix interrupt %d.\n", i
);
3027 free_irq(qdev
->pdev
->irq
, &qdev
->rx_ring
[0]);
3028 QPRINTK(qdev
, IFDOWN
, DEBUG
,
3029 "freeing msi interrupt %d.\n", i
);
3033 ql_disable_msix(qdev
);
3036 static int ql_request_irq(struct ql_adapter
*qdev
)
3040 struct pci_dev
*pdev
= qdev
->pdev
;
3041 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
3043 ql_resolve_queues_to_irqs(qdev
);
3045 for (i
= 0; i
< qdev
->intr_count
; i
++, intr_context
++) {
3046 atomic_set(&intr_context
->irq_cnt
, 0);
3047 if (test_bit(QL_MSIX_ENABLED
, &qdev
->flags
)) {
3048 status
= request_irq(qdev
->msi_x_entry
[i
].vector
,
3049 intr_context
->handler
,
3054 QPRINTK(qdev
, IFUP
, ERR
,
3055 "Failed request for MSIX interrupt %d.\n",
3059 QPRINTK(qdev
, IFUP
, DEBUG
,
3060 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3062 qdev
->rx_ring
[i
].type
==
3063 DEFAULT_Q
? "DEFAULT_Q" : "",
3064 qdev
->rx_ring
[i
].type
==
3066 qdev
->rx_ring
[i
].type
==
3067 RX_Q
? "RX_Q" : "", intr_context
->name
);
3070 QPRINTK(qdev
, IFUP
, DEBUG
,
3071 "trying msi or legacy interrupts.\n");
3072 QPRINTK(qdev
, IFUP
, DEBUG
,
3073 "%s: irq = %d.\n", __func__
, pdev
->irq
);
3074 QPRINTK(qdev
, IFUP
, DEBUG
,
3075 "%s: context->name = %s.\n", __func__
,
3076 intr_context
->name
);
3077 QPRINTK(qdev
, IFUP
, DEBUG
,
3078 "%s: dev_id = 0x%p.\n", __func__
,
3081 request_irq(pdev
->irq
, qlge_isr
,
3082 test_bit(QL_MSI_ENABLED
,
3084 flags
) ? 0 : IRQF_SHARED
,
3085 intr_context
->name
, &qdev
->rx_ring
[0]);
3089 QPRINTK(qdev
, IFUP
, ERR
,
3090 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3092 qdev
->rx_ring
[0].type
==
3093 DEFAULT_Q
? "DEFAULT_Q" : "",
3094 qdev
->rx_ring
[0].type
== TX_Q
? "TX_Q" : "",
3095 qdev
->rx_ring
[0].type
== RX_Q
? "RX_Q" : "",
3096 intr_context
->name
);
3098 intr_context
->hooked
= 1;
3102 QPRINTK(qdev
, IFUP
, ERR
, "Failed to get the interrupts!!!/n");
3107 static int ql_start_rss(struct ql_adapter
*qdev
)
3109 u8 init_hash_seed
[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3110 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3111 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3112 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3113 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3114 0xbe, 0xac, 0x01, 0xfa};
3115 struct ricb
*ricb
= &qdev
->ricb
;
3118 u8
*hash_id
= (u8
*) ricb
->hash_cq_id
;
3120 memset((void *)ricb
, 0, sizeof(*ricb
));
3122 ricb
->base_cq
= RSS_L4K
;
3124 (RSS_L6K
| RSS_LI
| RSS_LB
| RSS_LM
| RSS_RT4
| RSS_RT6
);
3125 ricb
->mask
= cpu_to_le16((u16
)(0x3ff));
3128 * Fill out the Indirection Table.
3130 for (i
= 0; i
< 1024; i
++)
3131 hash_id
[i
] = (i
& (qdev
->rss_ring_count
- 1));
3133 memcpy((void *)&ricb
->ipv6_hash_key
[0], init_hash_seed
, 40);
3134 memcpy((void *)&ricb
->ipv4_hash_key
[0], init_hash_seed
, 16);
3136 QPRINTK(qdev
, IFUP
, DEBUG
, "Initializing RSS.\n");
3138 status
= ql_write_cfg(qdev
, ricb
, sizeof(*ricb
), CFG_LR
, 0);
3140 QPRINTK(qdev
, IFUP
, ERR
, "Failed to load RICB.\n");
3143 QPRINTK(qdev
, IFUP
, DEBUG
, "Successfully loaded RICB.\n");
3147 static int ql_clear_routing_entries(struct ql_adapter
*qdev
)
3151 status
= ql_sem_spinlock(qdev
, SEM_RT_IDX_MASK
);
3154 /* Clear all the entries in the routing table. */
3155 for (i
= 0; i
< 16; i
++) {
3156 status
= ql_set_routing_reg(qdev
, i
, 0, 0);
3158 QPRINTK(qdev
, IFUP
, ERR
,
3159 "Failed to init routing register for CAM "
3164 ql_sem_unlock(qdev
, SEM_RT_IDX_MASK
);
3168 /* Initialize the frame-to-queue routing. */
3169 static int ql_route_initialize(struct ql_adapter
*qdev
)
3173 /* Clear all the entries in the routing table. */
3174 status
= ql_clear_routing_entries(qdev
);
3178 status
= ql_sem_spinlock(qdev
, SEM_RT_IDX_MASK
);
3182 status
= ql_set_routing_reg(qdev
, RT_IDX_ALL_ERR_SLOT
, RT_IDX_ERR
, 1);
3184 QPRINTK(qdev
, IFUP
, ERR
,
3185 "Failed to init routing register for error packets.\n");
3188 status
= ql_set_routing_reg(qdev
, RT_IDX_BCAST_SLOT
, RT_IDX_BCAST
, 1);
3190 QPRINTK(qdev
, IFUP
, ERR
,
3191 "Failed to init routing register for broadcast packets.\n");
3194 /* If we have more than one inbound queue, then turn on RSS in the
3197 if (qdev
->rss_ring_count
> 1) {
3198 status
= ql_set_routing_reg(qdev
, RT_IDX_RSS_MATCH_SLOT
,
3199 RT_IDX_RSS_MATCH
, 1);
3201 QPRINTK(qdev
, IFUP
, ERR
,
3202 "Failed to init routing register for MATCH RSS packets.\n");
3207 status
= ql_set_routing_reg(qdev
, RT_IDX_CAM_HIT_SLOT
,
3210 QPRINTK(qdev
, IFUP
, ERR
,
3211 "Failed to init routing register for CAM packets.\n");
3213 ql_sem_unlock(qdev
, SEM_RT_IDX_MASK
);
3217 int ql_cam_route_initialize(struct ql_adapter
*qdev
)
3221 /* If check if the link is up and use to
3222 * determine if we are setting or clearing
3223 * the MAC address in the CAM.
3225 set
= ql_read32(qdev
, STS
);
3226 set
&= qdev
->port_link_up
;
3227 status
= ql_set_mac_addr(qdev
, set
);
3229 QPRINTK(qdev
, IFUP
, ERR
, "Failed to init mac address.\n");
3233 status
= ql_route_initialize(qdev
);
3235 QPRINTK(qdev
, IFUP
, ERR
, "Failed to init routing table.\n");
3240 static int ql_adapter_initialize(struct ql_adapter
*qdev
)
3247 * Set up the System register to halt on errors.
3249 value
= SYS_EFE
| SYS_FAE
;
3251 ql_write32(qdev
, SYS
, mask
| value
);
3253 /* Set the default queue, and VLAN behavior. */
3254 value
= NIC_RCV_CFG_DFQ
| NIC_RCV_CFG_RV
;
3255 mask
= NIC_RCV_CFG_DFQ_MASK
| (NIC_RCV_CFG_RV
<< 16);
3256 ql_write32(qdev
, NIC_RCV_CFG
, (mask
| value
));
3258 /* Set the MPI interrupt to enabled. */
3259 ql_write32(qdev
, INTR_MASK
, (INTR_MASK_PI
<< 16) | INTR_MASK_PI
);
3261 /* Enable the function, set pagesize, enable error checking. */
3262 value
= FSC_FE
| FSC_EPC_INBOUND
| FSC_EPC_OUTBOUND
|
3263 FSC_EC
| FSC_VM_PAGE_4K
| FSC_SH
;
3265 /* Set/clear header splitting. */
3266 mask
= FSC_VM_PAGESIZE_MASK
|
3267 FSC_DBL_MASK
| FSC_DBRST_MASK
| (value
<< 16);
3268 ql_write32(qdev
, FSC
, mask
| value
);
3270 ql_write32(qdev
, SPLT_HDR
, SPLT_HDR_EP
|
3271 min(SMALL_BUFFER_SIZE
, MAX_SPLIT_SIZE
));
3273 /* Set RX packet routing to use port/pci function on which the
3274 * packet arrived on in addition to usual frame routing.
3275 * This is helpful on bonding where both interfaces can have
3276 * the same MAC address.
3278 ql_write32(qdev
, RST_FO
, RST_FO_RR_MASK
| RST_FO_RR_RCV_FUNC_CQ
);
3280 /* Start up the rx queues. */
3281 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
3282 status
= ql_start_rx_ring(qdev
, &qdev
->rx_ring
[i
]);
3284 QPRINTK(qdev
, IFUP
, ERR
,
3285 "Failed to start rx ring[%d].\n", i
);
3290 /* If there is more than one inbound completion queue
3291 * then download a RICB to configure RSS.
3293 if (qdev
->rss_ring_count
> 1) {
3294 status
= ql_start_rss(qdev
);
3296 QPRINTK(qdev
, IFUP
, ERR
, "Failed to start RSS.\n");
3301 /* Start up the tx queues. */
3302 for (i
= 0; i
< qdev
->tx_ring_count
; i
++) {
3303 status
= ql_start_tx_ring(qdev
, &qdev
->tx_ring
[i
]);
3305 QPRINTK(qdev
, IFUP
, ERR
,
3306 "Failed to start tx ring[%d].\n", i
);
3311 /* Initialize the port and set the max framesize. */
3312 status
= qdev
->nic_ops
->port_initialize(qdev
);
3314 QPRINTK(qdev
, IFUP
, ERR
, "Failed to start port.\n");
3318 /* Set up the MAC address and frame routing filter. */
3319 status
= ql_cam_route_initialize(qdev
);
3321 QPRINTK(qdev
, IFUP
, ERR
,
3322 "Failed to init CAM/Routing tables.\n");
3326 /* Start NAPI for the RSS queues. */
3327 for (i
= 0; i
< qdev
->rss_ring_count
; i
++) {
3328 QPRINTK(qdev
, IFUP
, DEBUG
, "Enabling NAPI for rx_ring[%d].\n",
3330 napi_enable(&qdev
->rx_ring
[i
].napi
);
3336 /* Issue soft reset to chip. */
3337 static int ql_adapter_reset(struct ql_adapter
*qdev
)
3341 unsigned long end_jiffies
;
3343 /* Clear all the entries in the routing table. */
3344 status
= ql_clear_routing_entries(qdev
);
3346 QPRINTK(qdev
, IFUP
, ERR
, "Failed to clear routing bits.\n");
3350 end_jiffies
= jiffies
+
3351 max((unsigned long)1, usecs_to_jiffies(30));
3353 /* Stop management traffic. */
3354 ql_mb_set_mgmnt_traffic_ctl(qdev
, MB_SET_MPI_TFK_STOP
);
3356 /* Wait for the NIC and MGMNT FIFOs to empty. */
3357 ql_wait_fifo_empty(qdev
);
3359 ql_write32(qdev
, RST_FO
, (RST_FO_FR
<< 16) | RST_FO_FR
);
3362 value
= ql_read32(qdev
, RST_FO
);
3363 if ((value
& RST_FO_FR
) == 0)
3366 } while (time_before(jiffies
, end_jiffies
));
3368 if (value
& RST_FO_FR
) {
3369 QPRINTK(qdev
, IFDOWN
, ERR
,
3370 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3371 status
= -ETIMEDOUT
;
3374 /* Resume management traffic. */
3375 ql_mb_set_mgmnt_traffic_ctl(qdev
, MB_SET_MPI_TFK_RESUME
);
3379 static void ql_display_dev_info(struct net_device
*ndev
)
3381 struct ql_adapter
*qdev
= (struct ql_adapter
*)netdev_priv(ndev
);
3383 QPRINTK(qdev
, PROBE
, INFO
,
3384 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3385 "XG Roll = %d, XG Rev = %d.\n",
3388 qdev
->chip_rev_id
& 0x0000000f,
3389 qdev
->chip_rev_id
>> 4 & 0x0000000f,
3390 qdev
->chip_rev_id
>> 8 & 0x0000000f,
3391 qdev
->chip_rev_id
>> 12 & 0x0000000f);
3392 QPRINTK(qdev
, PROBE
, INFO
, "MAC address %pM\n", ndev
->dev_addr
);
3395 static int ql_adapter_down(struct ql_adapter
*qdev
)
3401 /* Don't kill the reset worker thread if we
3402 * are in the process of recovery.
3404 if (test_bit(QL_ADAPTER_UP
, &qdev
->flags
))
3405 cancel_delayed_work_sync(&qdev
->asic_reset_work
);
3406 cancel_delayed_work_sync(&qdev
->mpi_reset_work
);
3407 cancel_delayed_work_sync(&qdev
->mpi_work
);
3408 cancel_delayed_work_sync(&qdev
->mpi_idc_work
);
3409 cancel_delayed_work_sync(&qdev
->mpi_port_cfg_work
);
3411 for (i
= 0; i
< qdev
->rss_ring_count
; i
++)
3412 napi_disable(&qdev
->rx_ring
[i
].napi
);
3414 clear_bit(QL_ADAPTER_UP
, &qdev
->flags
);
3416 ql_disable_interrupts(qdev
);
3418 ql_tx_ring_clean(qdev
);
3420 /* Call netif_napi_del() from common point.
3422 for (i
= 0; i
< qdev
->rss_ring_count
; i
++)
3423 netif_napi_del(&qdev
->rx_ring
[i
].napi
);
3425 ql_free_rx_buffers(qdev
);
3427 status
= ql_adapter_reset(qdev
);
3429 QPRINTK(qdev
, IFDOWN
, ERR
, "reset(func #%d) FAILED!\n",
3434 static int ql_adapter_up(struct ql_adapter
*qdev
)
3438 err
= ql_adapter_initialize(qdev
);
3440 QPRINTK(qdev
, IFUP
, INFO
, "Unable to initialize adapter.\n");
3443 set_bit(QL_ADAPTER_UP
, &qdev
->flags
);
3444 ql_alloc_rx_buffers(qdev
);
3445 /* If the port is initialized and the
3446 * link is up the turn on the carrier.
3448 if ((ql_read32(qdev
, STS
) & qdev
->port_init
) &&
3449 (ql_read32(qdev
, STS
) & qdev
->port_link_up
))
3451 ql_enable_interrupts(qdev
);
3452 ql_enable_all_completion_interrupts(qdev
);
3453 netif_tx_start_all_queues(qdev
->ndev
);
3457 ql_adapter_reset(qdev
);
3461 static void ql_release_adapter_resources(struct ql_adapter
*qdev
)
3463 ql_free_mem_resources(qdev
);
3467 static int ql_get_adapter_resources(struct ql_adapter
*qdev
)
3471 if (ql_alloc_mem_resources(qdev
)) {
3472 QPRINTK(qdev
, IFUP
, ERR
, "Unable to allocate memory.\n");
3475 status
= ql_request_irq(qdev
);
3479 static int qlge_close(struct net_device
*ndev
)
3481 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3484 * Wait for device to recover from a reset.
3485 * (Rarely happens, but possible.)
3487 while (!test_bit(QL_ADAPTER_UP
, &qdev
->flags
))
3489 ql_adapter_down(qdev
);
3490 ql_release_adapter_resources(qdev
);
3494 static int ql_configure_rings(struct ql_adapter
*qdev
)
3497 struct rx_ring
*rx_ring
;
3498 struct tx_ring
*tx_ring
;
3499 int cpu_cnt
= min(MAX_CPUS
, (int)num_online_cpus());
3501 /* In a perfect world we have one RSS ring for each CPU
3502 * and each has it's own vector. To do that we ask for
3503 * cpu_cnt vectors. ql_enable_msix() will adjust the
3504 * vector count to what we actually get. We then
3505 * allocate an RSS ring for each.
3506 * Essentially, we are doing min(cpu_count, msix_vector_count).
3508 qdev
->intr_count
= cpu_cnt
;
3509 ql_enable_msix(qdev
);
3510 /* Adjust the RSS ring count to the actual vector count. */
3511 qdev
->rss_ring_count
= qdev
->intr_count
;
3512 qdev
->tx_ring_count
= cpu_cnt
;
3513 qdev
->rx_ring_count
= qdev
->tx_ring_count
+ qdev
->rss_ring_count
;
3515 for (i
= 0; i
< qdev
->tx_ring_count
; i
++) {
3516 tx_ring
= &qdev
->tx_ring
[i
];
3517 memset((void *)tx_ring
, 0, sizeof(*tx_ring
));
3518 tx_ring
->qdev
= qdev
;
3520 tx_ring
->wq_len
= qdev
->tx_ring_size
;
3522 tx_ring
->wq_len
* sizeof(struct ob_mac_iocb_req
);
3525 * The completion queue ID for the tx rings start
3526 * immediately after the rss rings.
3528 tx_ring
->cq_id
= qdev
->rss_ring_count
+ i
;
3531 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
3532 rx_ring
= &qdev
->rx_ring
[i
];
3533 memset((void *)rx_ring
, 0, sizeof(*rx_ring
));
3534 rx_ring
->qdev
= qdev
;
3536 rx_ring
->cpu
= i
% cpu_cnt
; /* CPU to run handler on. */
3537 if (i
< qdev
->rss_ring_count
) {
3539 * Inbound (RSS) queues.
3541 rx_ring
->cq_len
= qdev
->rx_ring_size
;
3543 rx_ring
->cq_len
* sizeof(struct ql_net_rsp_iocb
);
3544 rx_ring
->lbq_len
= NUM_LARGE_BUFFERS
;
3546 rx_ring
->lbq_len
* sizeof(__le64
);
3547 rx_ring
->lbq_buf_size
= LARGE_BUFFER_SIZE
;
3548 rx_ring
->sbq_len
= NUM_SMALL_BUFFERS
;
3550 rx_ring
->sbq_len
* sizeof(__le64
);
3551 rx_ring
->sbq_buf_size
= SMALL_BUFFER_SIZE
* 2;
3552 rx_ring
->type
= RX_Q
;
3555 * Outbound queue handles outbound completions only.
3557 /* outbound cq is same size as tx_ring it services. */
3558 rx_ring
->cq_len
= qdev
->tx_ring_size
;
3560 rx_ring
->cq_len
* sizeof(struct ql_net_rsp_iocb
);
3561 rx_ring
->lbq_len
= 0;
3562 rx_ring
->lbq_size
= 0;
3563 rx_ring
->lbq_buf_size
= 0;
3564 rx_ring
->sbq_len
= 0;
3565 rx_ring
->sbq_size
= 0;
3566 rx_ring
->sbq_buf_size
= 0;
3567 rx_ring
->type
= TX_Q
;
3573 static int qlge_open(struct net_device
*ndev
)
3576 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3578 err
= ql_configure_rings(qdev
);
3582 err
= ql_get_adapter_resources(qdev
);
3586 err
= ql_adapter_up(qdev
);
3593 ql_release_adapter_resources(qdev
);
3597 static int qlge_change_mtu(struct net_device
*ndev
, int new_mtu
)
3599 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3601 if (ndev
->mtu
== 1500 && new_mtu
== 9000) {
3602 QPRINTK(qdev
, IFUP
, ERR
, "Changing to jumbo MTU.\n");
3603 queue_delayed_work(qdev
->workqueue
,
3604 &qdev
->mpi_port_cfg_work
, 0);
3605 } else if (ndev
->mtu
== 9000 && new_mtu
== 1500) {
3606 QPRINTK(qdev
, IFUP
, ERR
, "Changing to normal MTU.\n");
3607 } else if ((ndev
->mtu
== 1500 && new_mtu
== 1500) ||
3608 (ndev
->mtu
== 9000 && new_mtu
== 9000)) {
3612 ndev
->mtu
= new_mtu
;
3616 static struct net_device_stats
*qlge_get_stats(struct net_device
3619 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3620 return &qdev
->stats
;
3623 static void qlge_set_multicast_list(struct net_device
*ndev
)
3625 struct ql_adapter
*qdev
= (struct ql_adapter
*)netdev_priv(ndev
);
3626 struct dev_mc_list
*mc_ptr
;
3629 status
= ql_sem_spinlock(qdev
, SEM_RT_IDX_MASK
);
3633 * Set or clear promiscuous mode if a
3634 * transition is taking place.
3636 if (ndev
->flags
& IFF_PROMISC
) {
3637 if (!test_bit(QL_PROMISCUOUS
, &qdev
->flags
)) {
3638 if (ql_set_routing_reg
3639 (qdev
, RT_IDX_PROMISCUOUS_SLOT
, RT_IDX_VALID
, 1)) {
3640 QPRINTK(qdev
, HW
, ERR
,
3641 "Failed to set promiscous mode.\n");
3643 set_bit(QL_PROMISCUOUS
, &qdev
->flags
);
3647 if (test_bit(QL_PROMISCUOUS
, &qdev
->flags
)) {
3648 if (ql_set_routing_reg
3649 (qdev
, RT_IDX_PROMISCUOUS_SLOT
, RT_IDX_VALID
, 0)) {
3650 QPRINTK(qdev
, HW
, ERR
,
3651 "Failed to clear promiscous mode.\n");
3653 clear_bit(QL_PROMISCUOUS
, &qdev
->flags
);
3659 * Set or clear all multicast mode if a
3660 * transition is taking place.
3662 if ((ndev
->flags
& IFF_ALLMULTI
) ||
3663 (ndev
->mc_count
> MAX_MULTICAST_ENTRIES
)) {
3664 if (!test_bit(QL_ALLMULTI
, &qdev
->flags
)) {
3665 if (ql_set_routing_reg
3666 (qdev
, RT_IDX_ALLMULTI_SLOT
, RT_IDX_MCAST
, 1)) {
3667 QPRINTK(qdev
, HW
, ERR
,
3668 "Failed to set all-multi mode.\n");
3670 set_bit(QL_ALLMULTI
, &qdev
->flags
);
3674 if (test_bit(QL_ALLMULTI
, &qdev
->flags
)) {
3675 if (ql_set_routing_reg
3676 (qdev
, RT_IDX_ALLMULTI_SLOT
, RT_IDX_MCAST
, 0)) {
3677 QPRINTK(qdev
, HW
, ERR
,
3678 "Failed to clear all-multi mode.\n");
3680 clear_bit(QL_ALLMULTI
, &qdev
->flags
);
3685 if (ndev
->mc_count
) {
3686 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
3689 for (i
= 0, mc_ptr
= ndev
->mc_list
; mc_ptr
;
3690 i
++, mc_ptr
= mc_ptr
->next
)
3691 if (ql_set_mac_addr_reg(qdev
, (u8
*) mc_ptr
->dmi_addr
,
3692 MAC_ADDR_TYPE_MULTI_MAC
, i
)) {
3693 QPRINTK(qdev
, HW
, ERR
,
3694 "Failed to loadmulticast address.\n");
3695 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
3698 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
3699 if (ql_set_routing_reg
3700 (qdev
, RT_IDX_MCAST_MATCH_SLOT
, RT_IDX_MCAST_MATCH
, 1)) {
3701 QPRINTK(qdev
, HW
, ERR
,
3702 "Failed to set multicast match mode.\n");
3704 set_bit(QL_ALLMULTI
, &qdev
->flags
);
3708 ql_sem_unlock(qdev
, SEM_RT_IDX_MASK
);
3711 static int qlge_set_mac_address(struct net_device
*ndev
, void *p
)
3713 struct ql_adapter
*qdev
= (struct ql_adapter
*)netdev_priv(ndev
);
3714 struct sockaddr
*addr
= p
;
3717 if (netif_running(ndev
))
3720 if (!is_valid_ether_addr(addr
->sa_data
))
3721 return -EADDRNOTAVAIL
;
3722 memcpy(ndev
->dev_addr
, addr
->sa_data
, ndev
->addr_len
);
3724 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
3727 status
= ql_set_mac_addr_reg(qdev
, (u8
*) ndev
->dev_addr
,
3728 MAC_ADDR_TYPE_CAM_MAC
, qdev
->func
* MAX_CQ
);
3730 QPRINTK(qdev
, HW
, ERR
, "Failed to load MAC address.\n");
3731 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
3735 static void qlge_tx_timeout(struct net_device
*ndev
)
3737 struct ql_adapter
*qdev
= (struct ql_adapter
*)netdev_priv(ndev
);
3738 ql_queue_asic_error(qdev
);
3741 static void ql_asic_reset_work(struct work_struct
*work
)
3743 struct ql_adapter
*qdev
=
3744 container_of(work
, struct ql_adapter
, asic_reset_work
.work
);
3747 status
= ql_adapter_down(qdev
);
3751 status
= ql_adapter_up(qdev
);
3755 /* Restore rx mode. */
3756 clear_bit(QL_ALLMULTI
, &qdev
->flags
);
3757 clear_bit(QL_PROMISCUOUS
, &qdev
->flags
);
3758 qlge_set_multicast_list(qdev
->ndev
);
3763 QPRINTK(qdev
, IFUP
, ALERT
,
3764 "Driver up/down cycle failed, closing device\n");
3766 set_bit(QL_ADAPTER_UP
, &qdev
->flags
);
3767 dev_close(qdev
->ndev
);
3771 static struct nic_operations qla8012_nic_ops
= {
3772 .get_flash
= ql_get_8012_flash_params
,
3773 .port_initialize
= ql_8012_port_initialize
,
3776 static struct nic_operations qla8000_nic_ops
= {
3777 .get_flash
= ql_get_8000_flash_params
,
3778 .port_initialize
= ql_8000_port_initialize
,
3781 /* Find the pcie function number for the other NIC
3782 * on this chip. Since both NIC functions share a
3783 * common firmware we have the lowest enabled function
3784 * do any common work. Examples would be resetting
3785 * after a fatal firmware error, or doing a firmware
3788 static int ql_get_alt_pcie_func(struct ql_adapter
*qdev
)
3792 u32 nic_func1
, nic_func2
;
3794 status
= ql_read_mpi_reg(qdev
, MPI_TEST_FUNC_PORT_CFG
,
3799 nic_func1
= ((temp
>> MPI_TEST_NIC1_FUNC_SHIFT
) &
3800 MPI_TEST_NIC_FUNC_MASK
);
3801 nic_func2
= ((temp
>> MPI_TEST_NIC2_FUNC_SHIFT
) &
3802 MPI_TEST_NIC_FUNC_MASK
);
3804 if (qdev
->func
== nic_func1
)
3805 qdev
->alt_func
= nic_func2
;
3806 else if (qdev
->func
== nic_func2
)
3807 qdev
->alt_func
= nic_func1
;
3814 static int ql_get_board_info(struct ql_adapter
*qdev
)
3818 (ql_read32(qdev
, STS
) & STS_FUNC_ID_MASK
) >> STS_FUNC_ID_SHIFT
;
3822 status
= ql_get_alt_pcie_func(qdev
);
3826 qdev
->port
= (qdev
->func
< qdev
->alt_func
) ? 0 : 1;
3828 qdev
->xg_sem_mask
= SEM_XGMAC1_MASK
;
3829 qdev
->port_link_up
= STS_PL1
;
3830 qdev
->port_init
= STS_PI1
;
3831 qdev
->mailbox_in
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC2_MBI
;
3832 qdev
->mailbox_out
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC2_MBO
;
3834 qdev
->xg_sem_mask
= SEM_XGMAC0_MASK
;
3835 qdev
->port_link_up
= STS_PL0
;
3836 qdev
->port_init
= STS_PI0
;
3837 qdev
->mailbox_in
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC0_MBI
;
3838 qdev
->mailbox_out
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC0_MBO
;
3840 qdev
->chip_rev_id
= ql_read32(qdev
, REV_ID
);
3841 qdev
->device_id
= qdev
->pdev
->device
;
3842 if (qdev
->device_id
== QLGE_DEVICE_ID_8012
)
3843 qdev
->nic_ops
= &qla8012_nic_ops
;
3844 else if (qdev
->device_id
== QLGE_DEVICE_ID_8000
)
3845 qdev
->nic_ops
= &qla8000_nic_ops
;
3849 static void ql_release_all(struct pci_dev
*pdev
)
3851 struct net_device
*ndev
= pci_get_drvdata(pdev
);
3852 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3854 if (qdev
->workqueue
) {
3855 destroy_workqueue(qdev
->workqueue
);
3856 qdev
->workqueue
= NULL
;
3860 iounmap(qdev
->reg_base
);
3861 if (qdev
->doorbell_area
)
3862 iounmap(qdev
->doorbell_area
);
3863 pci_release_regions(pdev
);
3864 pci_set_drvdata(pdev
, NULL
);
3867 static int __devinit
ql_init_device(struct pci_dev
*pdev
,
3868 struct net_device
*ndev
, int cards_found
)
3870 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3874 memset((void *)qdev
, 0, sizeof(*qdev
));
3875 err
= pci_enable_device(pdev
);
3877 dev_err(&pdev
->dev
, "PCI device enable failed.\n");
3883 pci_set_drvdata(pdev
, ndev
);
3884 pos
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
3886 dev_err(&pdev
->dev
, PFX
"Cannot find PCI Express capability, "
3890 pci_read_config_word(pdev
, pos
+ PCI_EXP_DEVCTL
, &val16
);
3891 val16
&= ~PCI_EXP_DEVCTL_NOSNOOP_EN
;
3892 val16
|= (PCI_EXP_DEVCTL_CERE
|
3893 PCI_EXP_DEVCTL_NFERE
|
3894 PCI_EXP_DEVCTL_FERE
| PCI_EXP_DEVCTL_URRE
);
3895 pci_write_config_word(pdev
, pos
+ PCI_EXP_DEVCTL
, val16
);
3898 err
= pci_request_regions(pdev
, DRV_NAME
);
3900 dev_err(&pdev
->dev
, "PCI region request failed.\n");
3904 pci_set_master(pdev
);
3905 if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(64))) {
3906 set_bit(QL_DMA64
, &qdev
->flags
);
3907 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
3909 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
3911 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
3915 dev_err(&pdev
->dev
, "No usable DMA configuration.\n");
3920 ioremap_nocache(pci_resource_start(pdev
, 1),
3921 pci_resource_len(pdev
, 1));
3922 if (!qdev
->reg_base
) {
3923 dev_err(&pdev
->dev
, "Register mapping failed.\n");
3928 qdev
->doorbell_area_size
= pci_resource_len(pdev
, 3);
3929 qdev
->doorbell_area
=
3930 ioremap_nocache(pci_resource_start(pdev
, 3),
3931 pci_resource_len(pdev
, 3));
3932 if (!qdev
->doorbell_area
) {
3933 dev_err(&pdev
->dev
, "Doorbell register mapping failed.\n");
3938 err
= ql_get_board_info(qdev
);
3940 dev_err(&pdev
->dev
, "Register access failed.\n");
3944 qdev
->msg_enable
= netif_msg_init(debug
, default_msg
);
3945 spin_lock_init(&qdev
->hw_lock
);
3946 spin_lock_init(&qdev
->stats_lock
);
3948 /* make sure the EEPROM is good */
3949 err
= qdev
->nic_ops
->get_flash(qdev
);
3951 dev_err(&pdev
->dev
, "Invalid FLASH.\n");
3955 memcpy(ndev
->perm_addr
, ndev
->dev_addr
, ndev
->addr_len
);
3957 /* Set up the default ring sizes. */
3958 qdev
->tx_ring_size
= NUM_TX_RING_ENTRIES
;
3959 qdev
->rx_ring_size
= NUM_RX_RING_ENTRIES
;
3961 /* Set up the coalescing parameters. */
3962 qdev
->rx_coalesce_usecs
= DFLT_COALESCE_WAIT
;
3963 qdev
->tx_coalesce_usecs
= DFLT_COALESCE_WAIT
;
3964 qdev
->rx_max_coalesced_frames
= DFLT_INTER_FRAME_WAIT
;
3965 qdev
->tx_max_coalesced_frames
= DFLT_INTER_FRAME_WAIT
;
3968 * Set up the operating parameters.
3971 qdev
->workqueue
= create_singlethread_workqueue(ndev
->name
);
3972 INIT_DELAYED_WORK(&qdev
->asic_reset_work
, ql_asic_reset_work
);
3973 INIT_DELAYED_WORK(&qdev
->mpi_reset_work
, ql_mpi_reset_work
);
3974 INIT_DELAYED_WORK(&qdev
->mpi_work
, ql_mpi_work
);
3975 INIT_DELAYED_WORK(&qdev
->mpi_port_cfg_work
, ql_mpi_port_cfg_work
);
3976 INIT_DELAYED_WORK(&qdev
->mpi_idc_work
, ql_mpi_idc_work
);
3977 init_completion(&qdev
->ide_completion
);
3980 dev_info(&pdev
->dev
, "%s\n", DRV_STRING
);
3981 dev_info(&pdev
->dev
, "Driver name: %s, Version: %s.\n",
3982 DRV_NAME
, DRV_VERSION
);
3986 ql_release_all(pdev
);
3987 pci_disable_device(pdev
);
3992 static const struct net_device_ops qlge_netdev_ops
= {
3993 .ndo_open
= qlge_open
,
3994 .ndo_stop
= qlge_close
,
3995 .ndo_start_xmit
= qlge_send
,
3996 .ndo_change_mtu
= qlge_change_mtu
,
3997 .ndo_get_stats
= qlge_get_stats
,
3998 .ndo_set_multicast_list
= qlge_set_multicast_list
,
3999 .ndo_set_mac_address
= qlge_set_mac_address
,
4000 .ndo_validate_addr
= eth_validate_addr
,
4001 .ndo_tx_timeout
= qlge_tx_timeout
,
4002 .ndo_vlan_rx_register
= ql_vlan_rx_register
,
4003 .ndo_vlan_rx_add_vid
= ql_vlan_rx_add_vid
,
4004 .ndo_vlan_rx_kill_vid
= ql_vlan_rx_kill_vid
,
4007 static int __devinit
qlge_probe(struct pci_dev
*pdev
,
4008 const struct pci_device_id
*pci_entry
)
4010 struct net_device
*ndev
= NULL
;
4011 struct ql_adapter
*qdev
= NULL
;
4012 static int cards_found
= 0;
4015 ndev
= alloc_etherdev_mq(sizeof(struct ql_adapter
),
4016 min(MAX_CPUS
, (int)num_online_cpus()));
4020 err
= ql_init_device(pdev
, ndev
, cards_found
);
4026 qdev
= netdev_priv(ndev
);
4027 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
4034 | NETIF_F_HW_VLAN_TX
4035 | NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_FILTER
);
4036 ndev
->features
|= NETIF_F_GRO
;
4038 if (test_bit(QL_DMA64
, &qdev
->flags
))
4039 ndev
->features
|= NETIF_F_HIGHDMA
;
4042 * Set up net_device structure.
4044 ndev
->tx_queue_len
= qdev
->tx_ring_size
;
4045 ndev
->irq
= pdev
->irq
;
4047 ndev
->netdev_ops
= &qlge_netdev_ops
;
4048 SET_ETHTOOL_OPS(ndev
, &qlge_ethtool_ops
);
4049 ndev
->watchdog_timeo
= 10 * HZ
;
4051 err
= register_netdev(ndev
);
4053 dev_err(&pdev
->dev
, "net device registration failed.\n");
4054 ql_release_all(pdev
);
4055 pci_disable_device(pdev
);
4059 ql_display_dev_info(ndev
);
4064 static void __devexit
qlge_remove(struct pci_dev
*pdev
)
4066 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4067 unregister_netdev(ndev
);
4068 ql_release_all(pdev
);
4069 pci_disable_device(pdev
);
4074 * This callback is called by the PCI subsystem whenever
4075 * a PCI bus error is detected.
4077 static pci_ers_result_t
qlge_io_error_detected(struct pci_dev
*pdev
,
4078 enum pci_channel_state state
)
4080 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4081 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4083 netif_device_detach(ndev
);
4085 if (state
== pci_channel_io_perm_failure
)
4086 return PCI_ERS_RESULT_DISCONNECT
;
4088 if (netif_running(ndev
))
4089 ql_adapter_down(qdev
);
4091 pci_disable_device(pdev
);
4093 /* Request a slot reset. */
4094 return PCI_ERS_RESULT_NEED_RESET
;
4098 * This callback is called after the PCI buss has been reset.
4099 * Basically, this tries to restart the card from scratch.
4100 * This is a shortened version of the device probe/discovery code,
4101 * it resembles the first-half of the () routine.
4103 static pci_ers_result_t
qlge_io_slot_reset(struct pci_dev
*pdev
)
4105 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4106 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4108 if (pci_enable_device(pdev
)) {
4109 QPRINTK(qdev
, IFUP
, ERR
,
4110 "Cannot re-enable PCI device after reset.\n");
4111 return PCI_ERS_RESULT_DISCONNECT
;
4114 pci_set_master(pdev
);
4116 netif_carrier_off(ndev
);
4117 ql_adapter_reset(qdev
);
4119 /* Make sure the EEPROM is good */
4120 memcpy(ndev
->perm_addr
, ndev
->dev_addr
, ndev
->addr_len
);
4122 if (!is_valid_ether_addr(ndev
->perm_addr
)) {
4123 QPRINTK(qdev
, IFUP
, ERR
, "After reset, invalid MAC address.\n");
4124 return PCI_ERS_RESULT_DISCONNECT
;
4127 return PCI_ERS_RESULT_RECOVERED
;
4130 static void qlge_io_resume(struct pci_dev
*pdev
)
4132 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4133 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4135 pci_set_master(pdev
);
4137 if (netif_running(ndev
)) {
4138 if (ql_adapter_up(qdev
)) {
4139 QPRINTK(qdev
, IFUP
, ERR
,
4140 "Device initialization failed after reset.\n");
4145 netif_device_attach(ndev
);
4148 static struct pci_error_handlers qlge_err_handler
= {
4149 .error_detected
= qlge_io_error_detected
,
4150 .slot_reset
= qlge_io_slot_reset
,
4151 .resume
= qlge_io_resume
,
4154 static int qlge_suspend(struct pci_dev
*pdev
, pm_message_t state
)
4156 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4157 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4160 netif_device_detach(ndev
);
4162 if (netif_running(ndev
)) {
4163 err
= ql_adapter_down(qdev
);
4168 err
= pci_save_state(pdev
);
4172 pci_disable_device(pdev
);
4174 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
4180 static int qlge_resume(struct pci_dev
*pdev
)
4182 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4183 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4186 pci_set_power_state(pdev
, PCI_D0
);
4187 pci_restore_state(pdev
);
4188 err
= pci_enable_device(pdev
);
4190 QPRINTK(qdev
, IFUP
, ERR
, "Cannot enable PCI device from suspend\n");
4193 pci_set_master(pdev
);
4195 pci_enable_wake(pdev
, PCI_D3hot
, 0);
4196 pci_enable_wake(pdev
, PCI_D3cold
, 0);
4198 if (netif_running(ndev
)) {
4199 err
= ql_adapter_up(qdev
);
4204 netif_device_attach(ndev
);
4208 #endif /* CONFIG_PM */
4210 static void qlge_shutdown(struct pci_dev
*pdev
)
4212 qlge_suspend(pdev
, PMSG_SUSPEND
);
4215 static struct pci_driver qlge_driver
= {
4217 .id_table
= qlge_pci_tbl
,
4218 .probe
= qlge_probe
,
4219 .remove
= __devexit_p(qlge_remove
),
4221 .suspend
= qlge_suspend
,
4222 .resume
= qlge_resume
,
4224 .shutdown
= qlge_shutdown
,
4225 .err_handler
= &qlge_err_handler
4228 static int __init
qlge_init_module(void)
4230 return pci_register_driver(&qlge_driver
);
4233 static void __exit
qlge_exit(void)
4235 pci_unregister_driver(&qlge_driver
);
4238 module_init(qlge_init_module
);
4239 module_exit(qlge_exit
);