2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
18 #include <linux/bitops.h>
19 #include <linux/netdevice.h>
20 #include <linux/skbuff.h>
21 #include <linux/etherdevice.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_vlan.h>
25 #include <linux/if_ether.h>
27 #include <linux/prefetch.h>
28 #include <linux/module.h>
34 static DEFINE_MUTEX(bnad_fwimg_mutex
);
39 static uint bnad_msix_disable
;
40 module_param(bnad_msix_disable
, uint
, 0444);
41 MODULE_PARM_DESC(bnad_msix_disable
, "Disable MSIX mode");
43 static uint bnad_ioc_auto_recover
= 1;
44 module_param(bnad_ioc_auto_recover
, uint
, 0444);
45 MODULE_PARM_DESC(bnad_ioc_auto_recover
, "Enable / Disable auto recovery");
50 u32 bnad_rxqs_per_cq
= 2;
52 static const u8 bnad_bcast_addr
[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
57 #define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
59 #define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
61 #define BNAD_GET_MBOX_IRQ(_bnad) \
62 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
63 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
64 ((_bnad)->pcidev->irq))
66 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
68 (_res_info)->res_type = BNA_RES_T_MEM; \
69 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
70 (_res_info)->res_u.mem_info.num = (_num); \
71 (_res_info)->res_u.mem_info.len = \
72 sizeof(struct bnad_unmap_q) + \
73 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
76 #define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */
79 * Reinitialize completions in CQ, once Rx is taken down
82 bnad_cq_cmpl_init(struct bnad
*bnad
, struct bna_ccb
*ccb
)
84 struct bna_cq_entry
*cmpl
, *next_cmpl
;
85 unsigned int wi_range
, wis
= 0, ccb_prod
= 0;
88 BNA_CQ_QPGE_PTR_GET(ccb_prod
, ccb
->sw_qpt
, cmpl
,
91 for (i
= 0; i
< ccb
->q_depth
; i
++) {
93 if (likely(--wi_range
))
96 BNA_QE_INDX_ADD(ccb_prod
, wis
, ccb
->q_depth
);
98 BNA_CQ_QPGE_PTR_GET(ccb_prod
, ccb
->sw_qpt
,
107 bnad_pci_unmap_skb(struct device
*pdev
, struct bnad_skb_unmap
*array
,
108 u32 index
, u32 depth
, struct sk_buff
*skb
, u32 frag
)
111 array
[index
].skb
= NULL
;
113 dma_unmap_single(pdev
, dma_unmap_addr(&array
[index
], dma_addr
),
114 skb_headlen(skb
), DMA_TO_DEVICE
);
115 dma_unmap_addr_set(&array
[index
], dma_addr
, 0);
116 BNA_QE_INDX_ADD(index
, 1, depth
);
118 for (j
= 0; j
< frag
; j
++) {
119 dma_unmap_page(pdev
, dma_unmap_addr(&array
[index
], dma_addr
),
120 skb_frag_size(&skb_shinfo(skb
)->frags
[j
]), DMA_TO_DEVICE
);
121 dma_unmap_addr_set(&array
[index
], dma_addr
, 0);
122 BNA_QE_INDX_ADD(index
, 1, depth
);
129 * Frees all pending Tx Bufs
130 * At this point no activity is expected on the Q,
131 * so DMA unmap & freeing is fine.
134 bnad_free_all_txbufs(struct bnad
*bnad
,
138 struct bnad_unmap_q
*unmap_q
= tcb
->unmap_q
;
139 struct bnad_skb_unmap
*unmap_array
;
140 struct sk_buff
*skb
= NULL
;
143 unmap_array
= unmap_q
->unmap_array
;
145 for (q
= 0; q
< unmap_q
->q_depth
; q
++) {
146 skb
= unmap_array
[q
].skb
;
151 unmap_cons
= bnad_pci_unmap_skb(&bnad
->pcidev
->dev
, unmap_array
,
152 unmap_cons
, unmap_q
->q_depth
, skb
,
153 skb_shinfo(skb
)->nr_frags
);
155 dev_kfree_skb_any(skb
);
159 /* Data Path Handlers */
162 * bnad_free_txbufs : Frees the Tx bufs on Tx completion
163 * Can be called in a) Interrupt context
168 bnad_free_txbufs(struct bnad
*bnad
,
171 u32 unmap_cons
, sent_packets
= 0, sent_bytes
= 0;
172 u16 wis
, updated_hw_cons
;
173 struct bnad_unmap_q
*unmap_q
= tcb
->unmap_q
;
174 struct bnad_skb_unmap
*unmap_array
;
178 * Just return if TX is stopped. This check is useful
179 * when bnad_free_txbufs() runs out of a tasklet scheduled
180 * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
181 * but this routine runs actually after the cleanup has been
184 if (!test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
))
187 updated_hw_cons
= *(tcb
->hw_consumer_index
);
189 wis
= BNA_Q_INDEX_CHANGE(tcb
->consumer_index
,
190 updated_hw_cons
, tcb
->q_depth
);
192 BUG_ON(!(wis
<= BNA_QE_IN_USE_CNT(tcb
, tcb
->q_depth
)));
194 unmap_array
= unmap_q
->unmap_array
;
195 unmap_cons
= unmap_q
->consumer_index
;
197 prefetch(&unmap_array
[unmap_cons
+ 1]);
199 skb
= unmap_array
[unmap_cons
].skb
;
202 sent_bytes
+= skb
->len
;
203 wis
-= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb
)->nr_frags
);
205 unmap_cons
= bnad_pci_unmap_skb(&bnad
->pcidev
->dev
, unmap_array
,
206 unmap_cons
, unmap_q
->q_depth
, skb
,
207 skb_shinfo(skb
)->nr_frags
);
209 dev_kfree_skb_any(skb
);
212 /* Update consumer pointers. */
213 tcb
->consumer_index
= updated_hw_cons
;
214 unmap_q
->consumer_index
= unmap_cons
;
216 tcb
->txq
->tx_packets
+= sent_packets
;
217 tcb
->txq
->tx_bytes
+= sent_bytes
;
222 /* Tx Free Tasklet function */
223 /* Frees for all the tcb's in all the Tx's */
225 * Scheduled from sending context, so that
226 * the fat Tx lock is not held for too long
227 * in the sending context.
230 bnad_tx_free_tasklet(unsigned long bnad_ptr
)
232 struct bnad
*bnad
= (struct bnad
*)bnad_ptr
;
237 for (i
= 0; i
< bnad
->num_tx
; i
++) {
238 for (j
= 0; j
< bnad
->num_txq_per_tx
; j
++) {
239 tcb
= bnad
->tx_info
[i
].tcb
[j
];
242 if (((u16
) (*tcb
->hw_consumer_index
) !=
243 tcb
->consumer_index
) &&
244 (!test_and_set_bit(BNAD_TXQ_FREE_SENT
,
246 acked
= bnad_free_txbufs(bnad
, tcb
);
247 if (likely(test_bit(BNAD_TXQ_TX_STARTED
,
249 bna_ib_ack(tcb
->i_dbell
, acked
);
250 smp_mb__before_clear_bit();
251 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
253 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED
,
256 if (netif_queue_stopped(bnad
->netdev
)) {
257 if (acked
&& netif_carrier_ok(bnad
->netdev
) &&
258 BNA_QE_FREE_CNT(tcb
, tcb
->q_depth
) >=
259 BNAD_NETIF_WAKE_THRESHOLD
) {
260 netif_wake_queue(bnad
->netdev
);
262 /* Counters for individual TxQs? */
263 BNAD_UPDATE_CTR(bnad
,
272 bnad_tx(struct bnad
*bnad
, struct bna_tcb
*tcb
)
274 struct net_device
*netdev
= bnad
->netdev
;
277 if (test_and_set_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
))
280 sent
= bnad_free_txbufs(bnad
, tcb
);
282 if (netif_queue_stopped(netdev
) &&
283 netif_carrier_ok(netdev
) &&
284 BNA_QE_FREE_CNT(tcb
, tcb
->q_depth
) >=
285 BNAD_NETIF_WAKE_THRESHOLD
) {
286 if (test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)) {
287 netif_wake_queue(netdev
);
288 BNAD_UPDATE_CTR(bnad
, netif_queue_wakeup
);
293 if (likely(test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)))
294 bna_ib_ack(tcb
->i_dbell
, sent
);
296 smp_mb__before_clear_bit();
297 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
302 /* MSIX Tx Completion Handler */
304 bnad_msix_tx(int irq
, void *data
)
306 struct bna_tcb
*tcb
= (struct bna_tcb
*)data
;
307 struct bnad
*bnad
= tcb
->bnad
;
315 bnad_reset_rcb(struct bnad
*bnad
, struct bna_rcb
*rcb
)
317 struct bnad_unmap_q
*unmap_q
= rcb
->unmap_q
;
319 rcb
->producer_index
= 0;
320 rcb
->consumer_index
= 0;
322 unmap_q
->producer_index
= 0;
323 unmap_q
->consumer_index
= 0;
327 bnad_free_all_rxbufs(struct bnad
*bnad
, struct bna_rcb
*rcb
)
329 struct bnad_unmap_q
*unmap_q
;
330 struct bnad_skb_unmap
*unmap_array
;
334 unmap_q
= rcb
->unmap_q
;
335 unmap_array
= unmap_q
->unmap_array
;
336 for (unmap_cons
= 0; unmap_cons
< unmap_q
->q_depth
; unmap_cons
++) {
337 skb
= unmap_array
[unmap_cons
].skb
;
340 unmap_array
[unmap_cons
].skb
= NULL
;
341 dma_unmap_single(&bnad
->pcidev
->dev
,
342 dma_unmap_addr(&unmap_array
[unmap_cons
],
344 rcb
->rxq
->buffer_size
,
348 bnad_reset_rcb(bnad
, rcb
);
352 bnad_alloc_n_post_rxbufs(struct bnad
*bnad
, struct bna_rcb
*rcb
)
354 u16 to_alloc
, alloced
, unmap_prod
, wi_range
;
355 struct bnad_unmap_q
*unmap_q
= rcb
->unmap_q
;
356 struct bnad_skb_unmap
*unmap_array
;
357 struct bna_rxq_entry
*rxent
;
363 BNA_QE_FREE_CNT(unmap_q
, unmap_q
->q_depth
);
365 unmap_array
= unmap_q
->unmap_array
;
366 unmap_prod
= unmap_q
->producer_index
;
368 BNA_RXQ_QPGE_PTR_GET(unmap_prod
, rcb
->sw_qpt
, rxent
, wi_range
);
372 BNA_RXQ_QPGE_PTR_GET(unmap_prod
, rcb
->sw_qpt
, rxent
,
374 skb
= netdev_alloc_skb_ip_align(bnad
->netdev
,
375 rcb
->rxq
->buffer_size
);
376 if (unlikely(!skb
)) {
377 BNAD_UPDATE_CTR(bnad
, rxbuf_alloc_failed
);
378 rcb
->rxq
->rxbuf_alloc_failed
++;
381 unmap_array
[unmap_prod
].skb
= skb
;
382 dma_addr
= dma_map_single(&bnad
->pcidev
->dev
, skb
->data
,
383 rcb
->rxq
->buffer_size
,
385 dma_unmap_addr_set(&unmap_array
[unmap_prod
], dma_addr
,
387 BNA_SET_DMA_ADDR(dma_addr
, &rxent
->host_addr
);
388 BNA_QE_INDX_ADD(unmap_prod
, 1, unmap_q
->q_depth
);
396 if (likely(alloced
)) {
397 unmap_q
->producer_index
= unmap_prod
;
398 rcb
->producer_index
= unmap_prod
;
400 if (likely(test_bit(BNAD_RXQ_POST_OK
, &rcb
->flags
)))
401 bna_rxq_prod_indx_doorbell(rcb
);
406 bnad_refill_rxq(struct bnad
*bnad
, struct bna_rcb
*rcb
)
408 struct bnad_unmap_q
*unmap_q
= rcb
->unmap_q
;
410 if (!test_and_set_bit(BNAD_RXQ_REFILL
, &rcb
->flags
)) {
411 if (BNA_QE_FREE_CNT(unmap_q
, unmap_q
->q_depth
)
412 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT
)
413 bnad_alloc_n_post_rxbufs(bnad
, rcb
);
414 smp_mb__before_clear_bit();
415 clear_bit(BNAD_RXQ_REFILL
, &rcb
->flags
);
420 bnad_poll_cq(struct bnad
*bnad
, struct bna_ccb
*ccb
, int budget
)
422 struct bna_cq_entry
*cmpl
, *next_cmpl
;
423 struct bna_rcb
*rcb
= NULL
;
424 unsigned int wi_range
, packets
= 0, wis
= 0;
425 struct bnad_unmap_q
*unmap_q
;
426 struct bnad_skb_unmap
*unmap_array
;
428 u32 flags
, unmap_cons
;
429 struct bna_pkt_rate
*pkt_rt
= &ccb
->pkt_rate
;
430 struct bnad_rx_ctrl
*rx_ctrl
= (struct bnad_rx_ctrl
*)(ccb
->ctrl
);
432 set_bit(BNAD_FP_IN_RX_PATH
, &rx_ctrl
->flags
);
434 if (!test_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[0]->flags
)) {
435 clear_bit(BNAD_FP_IN_RX_PATH
, &rx_ctrl
->flags
);
439 prefetch(bnad
->netdev
);
440 BNA_CQ_QPGE_PTR_GET(ccb
->producer_index
, ccb
->sw_qpt
, cmpl
,
442 BUG_ON(!(wi_range
<= ccb
->q_depth
));
443 while (cmpl
->valid
&& packets
< budget
) {
445 BNA_UPDATE_PKT_CNT(pkt_rt
, ntohs(cmpl
->length
));
447 if (bna_is_small_rxq(cmpl
->rxq_id
))
452 unmap_q
= rcb
->unmap_q
;
453 unmap_array
= unmap_q
->unmap_array
;
454 unmap_cons
= unmap_q
->consumer_index
;
456 skb
= unmap_array
[unmap_cons
].skb
;
458 unmap_array
[unmap_cons
].skb
= NULL
;
459 dma_unmap_single(&bnad
->pcidev
->dev
,
460 dma_unmap_addr(&unmap_array
[unmap_cons
],
462 rcb
->rxq
->buffer_size
,
464 BNA_QE_INDX_ADD(unmap_q
->consumer_index
, 1, unmap_q
->q_depth
);
466 /* Should be more efficient ? Performance ? */
467 BNA_QE_INDX_ADD(rcb
->consumer_index
, 1, rcb
->q_depth
);
470 if (likely(--wi_range
))
471 next_cmpl
= cmpl
+ 1;
473 BNA_QE_INDX_ADD(ccb
->producer_index
, wis
, ccb
->q_depth
);
475 BNA_CQ_QPGE_PTR_GET(ccb
->producer_index
, ccb
->sw_qpt
,
476 next_cmpl
, wi_range
);
477 BUG_ON(!(wi_range
<= ccb
->q_depth
));
481 flags
= ntohl(cmpl
->flags
);
484 (BNA_CQ_EF_MAC_ERROR
| BNA_CQ_EF_FCS_ERROR
|
485 BNA_CQ_EF_TOO_LONG
))) {
486 dev_kfree_skb_any(skb
);
487 rcb
->rxq
->rx_packets_with_error
++;
491 skb_put(skb
, ntohs(cmpl
->length
));
493 ((bnad
->netdev
->features
& NETIF_F_RXCSUM
) &&
494 (((flags
& BNA_CQ_EF_IPV4
) &&
495 (flags
& BNA_CQ_EF_L3_CKSUM_OK
)) ||
496 (flags
& BNA_CQ_EF_IPV6
)) &&
497 (flags
& (BNA_CQ_EF_TCP
| BNA_CQ_EF_UDP
)) &&
498 (flags
& BNA_CQ_EF_L4_CKSUM_OK
)))
499 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
501 skb_checksum_none_assert(skb
);
503 rcb
->rxq
->rx_packets
++;
504 rcb
->rxq
->rx_bytes
+= skb
->len
;
505 skb
->protocol
= eth_type_trans(skb
, bnad
->netdev
);
507 if (flags
& BNA_CQ_EF_VLAN
)
508 __vlan_hwaccel_put_tag(skb
, ntohs(cmpl
->vlan_tag
));
510 if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
)
511 napi_gro_receive(&rx_ctrl
->napi
, skb
);
513 netif_receive_skb(skb
);
521 BNA_QE_INDX_ADD(ccb
->producer_index
, wis
, ccb
->q_depth
);
523 if (likely(test_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[0]->flags
)))
524 bna_ib_ack_disable_irq(ccb
->i_dbell
, packets
);
526 bnad_refill_rxq(bnad
, ccb
->rcb
[0]);
528 bnad_refill_rxq(bnad
, ccb
->rcb
[1]);
530 clear_bit(BNAD_FP_IN_RX_PATH
, &rx_ctrl
->flags
);
536 bnad_netif_rx_schedule_poll(struct bnad
*bnad
, struct bna_ccb
*ccb
)
538 struct bnad_rx_ctrl
*rx_ctrl
= (struct bnad_rx_ctrl
*)(ccb
->ctrl
);
539 struct napi_struct
*napi
= &rx_ctrl
->napi
;
541 if (likely(napi_schedule_prep(napi
))) {
542 __napi_schedule(napi
);
543 rx_ctrl
->rx_schedule
++;
547 /* MSIX Rx Path Handler */
549 bnad_msix_rx(int irq
, void *data
)
551 struct bna_ccb
*ccb
= (struct bna_ccb
*)data
;
554 ((struct bnad_rx_ctrl
*)(ccb
->ctrl
))->rx_intr_ctr
++;
555 bnad_netif_rx_schedule_poll(ccb
->bnad
, ccb
);
561 /* Interrupt handlers */
563 /* Mbox Interrupt Handlers */
565 bnad_msix_mbox_handler(int irq
, void *data
)
569 struct bnad
*bnad
= (struct bnad
*)data
;
571 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
572 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
))) {
573 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
577 bna_intr_status_get(&bnad
->bna
, intr_status
);
579 if (BNA_IS_MBOX_ERR_INTR(&bnad
->bna
, intr_status
))
580 bna_mbox_handler(&bnad
->bna
, intr_status
);
582 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
588 bnad_isr(int irq
, void *data
)
593 struct bnad
*bnad
= (struct bnad
*)data
;
594 struct bnad_rx_info
*rx_info
;
595 struct bnad_rx_ctrl
*rx_ctrl
;
596 struct bna_tcb
*tcb
= NULL
;
598 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
599 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
))) {
600 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
604 bna_intr_status_get(&bnad
->bna
, intr_status
);
606 if (unlikely(!intr_status
)) {
607 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
611 if (BNA_IS_MBOX_ERR_INTR(&bnad
->bna
, intr_status
))
612 bna_mbox_handler(&bnad
->bna
, intr_status
);
614 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
616 if (!BNA_IS_INTX_DATA_INTR(intr_status
))
619 /* Process data interrupts */
621 for (i
= 0; i
< bnad
->num_tx
; i
++) {
622 for (j
= 0; j
< bnad
->num_txq_per_tx
; j
++) {
623 tcb
= bnad
->tx_info
[i
].tcb
[j
];
624 if (tcb
&& test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
))
625 bnad_tx(bnad
, bnad
->tx_info
[i
].tcb
[j
]);
629 for (i
= 0; i
< bnad
->num_rx
; i
++) {
630 rx_info
= &bnad
->rx_info
[i
];
633 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
634 rx_ctrl
= &rx_info
->rx_ctrl
[j
];
636 bnad_netif_rx_schedule_poll(bnad
,
644 * Called in interrupt / callback context
645 * with bna_lock held, so cfg_flags access is OK
648 bnad_enable_mbox_irq(struct bnad
*bnad
)
650 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
);
652 BNAD_UPDATE_CTR(bnad
, mbox_intr_enabled
);
656 * Called with bnad->bna_lock held b'cos of
657 * bnad->cfg_flags access.
660 bnad_disable_mbox_irq(struct bnad
*bnad
)
662 set_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
);
664 BNAD_UPDATE_CTR(bnad
, mbox_intr_disabled
);
668 bnad_set_netdev_perm_addr(struct bnad
*bnad
)
670 struct net_device
*netdev
= bnad
->netdev
;
672 memcpy(netdev
->perm_addr
, &bnad
->perm_addr
, netdev
->addr_len
);
673 if (is_zero_ether_addr(netdev
->dev_addr
))
674 memcpy(netdev
->dev_addr
, &bnad
->perm_addr
, netdev
->addr_len
);
677 /* Control Path Handlers */
681 bnad_cb_mbox_intr_enable(struct bnad
*bnad
)
683 bnad_enable_mbox_irq(bnad
);
687 bnad_cb_mbox_intr_disable(struct bnad
*bnad
)
689 bnad_disable_mbox_irq(bnad
);
693 bnad_cb_ioceth_ready(struct bnad
*bnad
)
695 bnad
->bnad_completions
.ioc_comp_status
= BNA_CB_SUCCESS
;
696 complete(&bnad
->bnad_completions
.ioc_comp
);
700 bnad_cb_ioceth_failed(struct bnad
*bnad
)
702 bnad
->bnad_completions
.ioc_comp_status
= BNA_CB_FAIL
;
703 complete(&bnad
->bnad_completions
.ioc_comp
);
707 bnad_cb_ioceth_disabled(struct bnad
*bnad
)
709 bnad
->bnad_completions
.ioc_comp_status
= BNA_CB_SUCCESS
;
710 complete(&bnad
->bnad_completions
.ioc_comp
);
714 bnad_cb_enet_disabled(void *arg
)
716 struct bnad
*bnad
= (struct bnad
*)arg
;
718 netif_carrier_off(bnad
->netdev
);
719 complete(&bnad
->bnad_completions
.enet_comp
);
723 bnad_cb_ethport_link_status(struct bnad
*bnad
,
724 enum bna_link_status link_status
)
728 link_up
= (link_status
== BNA_LINK_UP
) || (link_status
== BNA_CEE_UP
);
730 if (link_status
== BNA_CEE_UP
) {
731 if (!test_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
))
732 BNAD_UPDATE_CTR(bnad
, cee_toggle
);
733 set_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
);
735 if (test_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
))
736 BNAD_UPDATE_CTR(bnad
, cee_toggle
);
737 clear_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
);
741 if (!netif_carrier_ok(bnad
->netdev
)) {
743 printk(KERN_WARNING
"bna: %s link up\n",
745 netif_carrier_on(bnad
->netdev
);
746 BNAD_UPDATE_CTR(bnad
, link_toggle
);
747 for (tx_id
= 0; tx_id
< bnad
->num_tx
; tx_id
++) {
748 for (tcb_id
= 0; tcb_id
< bnad
->num_txq_per_tx
;
750 struct bna_tcb
*tcb
=
751 bnad
->tx_info
[tx_id
].tcb
[tcb_id
];
758 if (test_bit(BNAD_TXQ_TX_STARTED
,
762 * Transmit Schedule */
763 printk(KERN_INFO
"bna: %s %d "
770 BNAD_UPDATE_CTR(bnad
,
776 BNAD_UPDATE_CTR(bnad
,
783 if (netif_carrier_ok(bnad
->netdev
)) {
784 printk(KERN_WARNING
"bna: %s link down\n",
786 netif_carrier_off(bnad
->netdev
);
787 BNAD_UPDATE_CTR(bnad
, link_toggle
);
793 bnad_cb_tx_disabled(void *arg
, struct bna_tx
*tx
)
795 struct bnad
*bnad
= (struct bnad
*)arg
;
797 complete(&bnad
->bnad_completions
.tx_comp
);
801 bnad_cb_tcb_setup(struct bnad
*bnad
, struct bna_tcb
*tcb
)
803 struct bnad_tx_info
*tx_info
=
804 (struct bnad_tx_info
*)tcb
->txq
->tx
->priv
;
805 struct bnad_unmap_q
*unmap_q
= tcb
->unmap_q
;
807 tx_info
->tcb
[tcb
->id
] = tcb
;
808 unmap_q
->producer_index
= 0;
809 unmap_q
->consumer_index
= 0;
810 unmap_q
->q_depth
= BNAD_TX_UNMAPQ_DEPTH
;
814 bnad_cb_tcb_destroy(struct bnad
*bnad
, struct bna_tcb
*tcb
)
816 struct bnad_tx_info
*tx_info
=
817 (struct bnad_tx_info
*)tcb
->txq
->tx
->priv
;
818 struct bnad_unmap_q
*unmap_q
= tcb
->unmap_q
;
820 while (test_and_set_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
))
823 bnad_free_all_txbufs(bnad
, tcb
);
825 unmap_q
->producer_index
= 0;
826 unmap_q
->consumer_index
= 0;
828 smp_mb__before_clear_bit();
829 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
831 tx_info
->tcb
[tcb
->id
] = NULL
;
835 bnad_cb_rcb_setup(struct bnad
*bnad
, struct bna_rcb
*rcb
)
837 struct bnad_unmap_q
*unmap_q
= rcb
->unmap_q
;
839 unmap_q
->producer_index
= 0;
840 unmap_q
->consumer_index
= 0;
841 unmap_q
->q_depth
= BNAD_RX_UNMAPQ_DEPTH
;
845 bnad_cb_rcb_destroy(struct bnad
*bnad
, struct bna_rcb
*rcb
)
847 bnad_free_all_rxbufs(bnad
, rcb
);
851 bnad_cb_ccb_setup(struct bnad
*bnad
, struct bna_ccb
*ccb
)
853 struct bnad_rx_info
*rx_info
=
854 (struct bnad_rx_info
*)ccb
->cq
->rx
->priv
;
856 rx_info
->rx_ctrl
[ccb
->id
].ccb
= ccb
;
857 ccb
->ctrl
= &rx_info
->rx_ctrl
[ccb
->id
];
861 bnad_cb_ccb_destroy(struct bnad
*bnad
, struct bna_ccb
*ccb
)
863 struct bnad_rx_info
*rx_info
=
864 (struct bnad_rx_info
*)ccb
->cq
->rx
->priv
;
866 rx_info
->rx_ctrl
[ccb
->id
].ccb
= NULL
;
870 bnad_cb_tx_stall(struct bnad
*bnad
, struct bna_tx
*tx
)
872 struct bnad_tx_info
*tx_info
=
873 (struct bnad_tx_info
*)tx
->priv
;
878 for (i
= 0; i
< BNAD_MAX_TXQ_PER_TX
; i
++) {
879 tcb
= tx_info
->tcb
[i
];
883 clear_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
);
884 netif_stop_subqueue(bnad
->netdev
, txq_id
);
885 printk(KERN_INFO
"bna: %s %d TXQ_STOPPED\n",
886 bnad
->netdev
->name
, txq_id
);
891 bnad_cb_tx_resume(struct bnad
*bnad
, struct bna_tx
*tx
)
893 struct bnad_tx_info
*tx_info
= (struct bnad_tx_info
*)tx
->priv
;
895 struct bnad_unmap_q
*unmap_q
;
899 for (i
= 0; i
< BNAD_MAX_TXQ_PER_TX
; i
++) {
900 tcb
= tx_info
->tcb
[i
];
905 unmap_q
= tcb
->unmap_q
;
907 if (test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
))
910 while (test_and_set_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
))
913 bnad_free_all_txbufs(bnad
, tcb
);
915 unmap_q
->producer_index
= 0;
916 unmap_q
->consumer_index
= 0;
918 smp_mb__before_clear_bit();
919 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
921 set_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
);
923 if (netif_carrier_ok(bnad
->netdev
)) {
924 printk(KERN_INFO
"bna: %s %d TXQ_STARTED\n",
925 bnad
->netdev
->name
, txq_id
);
926 netif_wake_subqueue(bnad
->netdev
, txq_id
);
927 BNAD_UPDATE_CTR(bnad
, netif_queue_wakeup
);
932 * Workaround for first ioceth enable failure & we
933 * get a 0 MAC address. We try to get the MAC address
936 if (is_zero_ether_addr(&bnad
->perm_addr
.mac
[0])) {
937 bna_enet_perm_mac_get(&bnad
->bna
.enet
, &bnad
->perm_addr
);
938 bnad_set_netdev_perm_addr(bnad
);
943 bnad_cb_tx_cleanup(struct bnad
*bnad
, struct bna_tx
*tx
)
945 struct bnad_tx_info
*tx_info
= (struct bnad_tx_info
*)tx
->priv
;
949 for (i
= 0; i
< BNAD_MAX_TXQ_PER_TX
; i
++) {
950 tcb
= tx_info
->tcb
[i
];
955 mdelay(BNAD_TXRX_SYNC_MDELAY
);
956 bna_tx_cleanup_complete(tx
);
960 bnad_cb_rx_stall(struct bnad
*bnad
, struct bna_rx
*rx
)
962 struct bnad_rx_info
*rx_info
= (struct bnad_rx_info
*)rx
->priv
;
964 struct bnad_rx_ctrl
*rx_ctrl
;
967 for (i
= 0; i
< BNAD_MAX_RXP_PER_RX
; i
++) {
968 rx_ctrl
= &rx_info
->rx_ctrl
[i
];
973 clear_bit(BNAD_RXQ_POST_OK
, &ccb
->rcb
[0]->flags
);
976 clear_bit(BNAD_RXQ_POST_OK
, &ccb
->rcb
[1]->flags
);
981 bnad_cb_rx_cleanup(struct bnad
*bnad
, struct bna_rx
*rx
)
983 struct bnad_rx_info
*rx_info
= (struct bnad_rx_info
*)rx
->priv
;
985 struct bnad_rx_ctrl
*rx_ctrl
;
988 mdelay(BNAD_TXRX_SYNC_MDELAY
);
990 for (i
= 0; i
< BNAD_MAX_RXP_PER_RX
; i
++) {
991 rx_ctrl
= &rx_info
->rx_ctrl
[i
];
996 clear_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[0]->flags
);
999 clear_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[1]->flags
);
1001 while (test_bit(BNAD_FP_IN_RX_PATH
, &rx_ctrl
->flags
))
1005 bna_rx_cleanup_complete(rx
);
1009 bnad_cb_rx_post(struct bnad
*bnad
, struct bna_rx
*rx
)
1011 struct bnad_rx_info
*rx_info
= (struct bnad_rx_info
*)rx
->priv
;
1012 struct bna_ccb
*ccb
;
1013 struct bna_rcb
*rcb
;
1014 struct bnad_rx_ctrl
*rx_ctrl
;
1015 struct bnad_unmap_q
*unmap_q
;
1019 for (i
= 0; i
< BNAD_MAX_RXP_PER_RX
; i
++) {
1020 rx_ctrl
= &rx_info
->rx_ctrl
[i
];
1025 bnad_cq_cmpl_init(bnad
, ccb
);
1027 for (j
= 0; j
< BNAD_MAX_RXQ_PER_RXP
; j
++) {
1031 bnad_free_all_rxbufs(bnad
, rcb
);
1033 set_bit(BNAD_RXQ_STARTED
, &rcb
->flags
);
1034 set_bit(BNAD_RXQ_POST_OK
, &rcb
->flags
);
1035 unmap_q
= rcb
->unmap_q
;
1037 /* Now allocate & post buffers for this RCB */
1038 /* !!Allocation in callback context */
1039 if (!test_and_set_bit(BNAD_RXQ_REFILL
, &rcb
->flags
)) {
1040 if (BNA_QE_FREE_CNT(unmap_q
, unmap_q
->q_depth
)
1041 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT
)
1042 bnad_alloc_n_post_rxbufs(bnad
, rcb
);
1043 smp_mb__before_clear_bit();
1044 clear_bit(BNAD_RXQ_REFILL
, &rcb
->flags
);
1051 bnad_cb_rx_disabled(void *arg
, struct bna_rx
*rx
)
1053 struct bnad
*bnad
= (struct bnad
*)arg
;
1055 complete(&bnad
->bnad_completions
.rx_comp
);
1059 bnad_cb_rx_mcast_add(struct bnad
*bnad
, struct bna_rx
*rx
)
1061 bnad
->bnad_completions
.mcast_comp_status
= BNA_CB_SUCCESS
;
1062 complete(&bnad
->bnad_completions
.mcast_comp
);
1066 bnad_cb_stats_get(struct bnad
*bnad
, enum bna_cb_status status
,
1067 struct bna_stats
*stats
)
1069 if (status
== BNA_CB_SUCCESS
)
1070 BNAD_UPDATE_CTR(bnad
, hw_stats_updates
);
1072 if (!netif_running(bnad
->netdev
) ||
1073 !test_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
))
1076 mod_timer(&bnad
->stats_timer
,
1077 jiffies
+ msecs_to_jiffies(BNAD_STATS_TIMER_FREQ
));
1081 bnad_cb_enet_mtu_set(struct bnad
*bnad
)
1083 bnad
->bnad_completions
.mtu_comp_status
= BNA_CB_SUCCESS
;
1084 complete(&bnad
->bnad_completions
.mtu_comp
);
1087 /* Resource allocation, free functions */
1090 bnad_mem_free(struct bnad
*bnad
,
1091 struct bna_mem_info
*mem_info
)
1096 if (mem_info
->mdl
== NULL
)
1099 for (i
= 0; i
< mem_info
->num
; i
++) {
1100 if (mem_info
->mdl
[i
].kva
!= NULL
) {
1101 if (mem_info
->mem_type
== BNA_MEM_T_DMA
) {
1102 BNA_GET_DMA_ADDR(&(mem_info
->mdl
[i
].dma
),
1104 dma_free_coherent(&bnad
->pcidev
->dev
,
1105 mem_info
->mdl
[i
].len
,
1106 mem_info
->mdl
[i
].kva
, dma_pa
);
1108 kfree(mem_info
->mdl
[i
].kva
);
1111 kfree(mem_info
->mdl
);
1112 mem_info
->mdl
= NULL
;
1116 bnad_mem_alloc(struct bnad
*bnad
,
1117 struct bna_mem_info
*mem_info
)
1122 if ((mem_info
->num
== 0) || (mem_info
->len
== 0)) {
1123 mem_info
->mdl
= NULL
;
1127 mem_info
->mdl
= kcalloc(mem_info
->num
, sizeof(struct bna_mem_descr
),
1129 if (mem_info
->mdl
== NULL
)
1132 if (mem_info
->mem_type
== BNA_MEM_T_DMA
) {
1133 for (i
= 0; i
< mem_info
->num
; i
++) {
1134 mem_info
->mdl
[i
].len
= mem_info
->len
;
1135 mem_info
->mdl
[i
].kva
=
1136 dma_alloc_coherent(&bnad
->pcidev
->dev
,
1137 mem_info
->len
, &dma_pa
,
1140 if (mem_info
->mdl
[i
].kva
== NULL
)
1143 BNA_SET_DMA_ADDR(dma_pa
,
1144 &(mem_info
->mdl
[i
].dma
));
1147 for (i
= 0; i
< mem_info
->num
; i
++) {
1148 mem_info
->mdl
[i
].len
= mem_info
->len
;
1149 mem_info
->mdl
[i
].kva
= kzalloc(mem_info
->len
,
1151 if (mem_info
->mdl
[i
].kva
== NULL
)
1159 bnad_mem_free(bnad
, mem_info
);
1163 /* Free IRQ for Mailbox */
1165 bnad_mbox_irq_free(struct bnad
*bnad
)
1168 unsigned long flags
;
1170 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1171 bnad_disable_mbox_irq(bnad
);
1172 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1174 irq
= BNAD_GET_MBOX_IRQ(bnad
);
1175 free_irq(irq
, bnad
);
1179 * Allocates IRQ for Mailbox, but keep it disabled
1180 * This will be enabled once we get the mbox enable callback
1184 bnad_mbox_irq_alloc(struct bnad
*bnad
)
1187 unsigned long irq_flags
, flags
;
1189 irq_handler_t irq_handler
;
1191 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1192 if (bnad
->cfg_flags
& BNAD_CF_MSIX
) {
1193 irq_handler
= (irq_handler_t
)bnad_msix_mbox_handler
;
1194 irq
= bnad
->msix_table
[BNAD_MAILBOX_MSIX_INDEX
].vector
;
1197 irq_handler
= (irq_handler_t
)bnad_isr
;
1198 irq
= bnad
->pcidev
->irq
;
1199 irq_flags
= IRQF_SHARED
;
1202 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1203 sprintf(bnad
->mbox_irq_name
, "%s", BNAD_NAME
);
1206 * Set the Mbox IRQ disable flag, so that the IRQ handler
1207 * called from request_irq() for SHARED IRQs do not execute
1209 set_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
);
1211 BNAD_UPDATE_CTR(bnad
, mbox_intr_disabled
);
1213 err
= request_irq(irq
, irq_handler
, irq_flags
,
1214 bnad
->mbox_irq_name
, bnad
);
1220 bnad_txrx_irq_free(struct bnad
*bnad
, struct bna_intr_info
*intr_info
)
1222 kfree(intr_info
->idl
);
1223 intr_info
->idl
= NULL
;
1226 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1228 bnad_txrx_irq_alloc(struct bnad
*bnad
, enum bnad_intr_source src
,
1229 u32 txrx_id
, struct bna_intr_info
*intr_info
)
1231 int i
, vector_start
= 0;
1233 unsigned long flags
;
1235 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1236 cfg_flags
= bnad
->cfg_flags
;
1237 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1239 if (cfg_flags
& BNAD_CF_MSIX
) {
1240 intr_info
->intr_type
= BNA_INTR_T_MSIX
;
1241 intr_info
->idl
= kcalloc(intr_info
->num
,
1242 sizeof(struct bna_intr_descr
),
1244 if (!intr_info
->idl
)
1249 vector_start
= BNAD_MAILBOX_MSIX_VECTORS
+ txrx_id
;
1253 vector_start
= BNAD_MAILBOX_MSIX_VECTORS
+
1254 (bnad
->num_tx
* bnad
->num_txq_per_tx
) +
1262 for (i
= 0; i
< intr_info
->num
; i
++)
1263 intr_info
->idl
[i
].vector
= vector_start
+ i
;
1265 intr_info
->intr_type
= BNA_INTR_T_INTX
;
1267 intr_info
->idl
= kcalloc(intr_info
->num
,
1268 sizeof(struct bna_intr_descr
),
1270 if (!intr_info
->idl
)
1275 intr_info
->idl
[0].vector
= BNAD_INTX_TX_IB_BITMASK
;
1279 intr_info
->idl
[0].vector
= BNAD_INTX_RX_IB_BITMASK
;
1287 * NOTE: Should be called for MSIX only
1288 * Unregisters Tx MSIX vector(s) from the kernel
1291 bnad_tx_msix_unregister(struct bnad
*bnad
, struct bnad_tx_info
*tx_info
,
1297 for (i
= 0; i
< num_txqs
; i
++) {
1298 if (tx_info
->tcb
[i
] == NULL
)
1301 vector_num
= tx_info
->tcb
[i
]->intr_vector
;
1302 free_irq(bnad
->msix_table
[vector_num
].vector
, tx_info
->tcb
[i
]);
1307 * NOTE: Should be called for MSIX only
1308 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1311 bnad_tx_msix_register(struct bnad
*bnad
, struct bnad_tx_info
*tx_info
,
1312 u32 tx_id
, int num_txqs
)
1318 for (i
= 0; i
< num_txqs
; i
++) {
1319 vector_num
= tx_info
->tcb
[i
]->intr_vector
;
1320 sprintf(tx_info
->tcb
[i
]->name
, "%s TXQ %d", bnad
->netdev
->name
,
1321 tx_id
+ tx_info
->tcb
[i
]->id
);
1322 err
= request_irq(bnad
->msix_table
[vector_num
].vector
,
1323 (irq_handler_t
)bnad_msix_tx
, 0,
1324 tx_info
->tcb
[i
]->name
,
1334 bnad_tx_msix_unregister(bnad
, tx_info
, (i
- 1));
1339 * NOTE: Should be called for MSIX only
1340 * Unregisters Rx MSIX vector(s) from the kernel
1343 bnad_rx_msix_unregister(struct bnad
*bnad
, struct bnad_rx_info
*rx_info
,
1349 for (i
= 0; i
< num_rxps
; i
++) {
1350 if (rx_info
->rx_ctrl
[i
].ccb
== NULL
)
1353 vector_num
= rx_info
->rx_ctrl
[i
].ccb
->intr_vector
;
1354 free_irq(bnad
->msix_table
[vector_num
].vector
,
1355 rx_info
->rx_ctrl
[i
].ccb
);
1360 * NOTE: Should be called for MSIX only
1361 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1364 bnad_rx_msix_register(struct bnad
*bnad
, struct bnad_rx_info
*rx_info
,
1365 u32 rx_id
, int num_rxps
)
1371 for (i
= 0; i
< num_rxps
; i
++) {
1372 vector_num
= rx_info
->rx_ctrl
[i
].ccb
->intr_vector
;
1373 sprintf(rx_info
->rx_ctrl
[i
].ccb
->name
, "%s CQ %d",
1375 rx_id
+ rx_info
->rx_ctrl
[i
].ccb
->id
);
1376 err
= request_irq(bnad
->msix_table
[vector_num
].vector
,
1377 (irq_handler_t
)bnad_msix_rx
, 0,
1378 rx_info
->rx_ctrl
[i
].ccb
->name
,
1379 rx_info
->rx_ctrl
[i
].ccb
);
1388 bnad_rx_msix_unregister(bnad
, rx_info
, (i
- 1));
1392 /* Free Tx object Resources */
1394 bnad_tx_res_free(struct bnad
*bnad
, struct bna_res_info
*res_info
)
1398 for (i
= 0; i
< BNA_TX_RES_T_MAX
; i
++) {
1399 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1400 bnad_mem_free(bnad
, &res_info
[i
].res_u
.mem_info
);
1401 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1402 bnad_txrx_irq_free(bnad
, &res_info
[i
].res_u
.intr_info
);
1406 /* Allocates memory and interrupt resources for Tx object */
1408 bnad_tx_res_alloc(struct bnad
*bnad
, struct bna_res_info
*res_info
,
1413 for (i
= 0; i
< BNA_TX_RES_T_MAX
; i
++) {
1414 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1415 err
= bnad_mem_alloc(bnad
,
1416 &res_info
[i
].res_u
.mem_info
);
1417 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1418 err
= bnad_txrx_irq_alloc(bnad
, BNAD_INTR_TX
, tx_id
,
1419 &res_info
[i
].res_u
.intr_info
);
1426 bnad_tx_res_free(bnad
, res_info
);
1430 /* Free Rx object Resources */
1432 bnad_rx_res_free(struct bnad
*bnad
, struct bna_res_info
*res_info
)
1436 for (i
= 0; i
< BNA_RX_RES_T_MAX
; i
++) {
1437 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1438 bnad_mem_free(bnad
, &res_info
[i
].res_u
.mem_info
);
1439 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1440 bnad_txrx_irq_free(bnad
, &res_info
[i
].res_u
.intr_info
);
1444 /* Allocates memory and interrupt resources for Rx object */
1446 bnad_rx_res_alloc(struct bnad
*bnad
, struct bna_res_info
*res_info
,
1451 /* All memory needs to be allocated before setup_ccbs */
1452 for (i
= 0; i
< BNA_RX_RES_T_MAX
; i
++) {
1453 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1454 err
= bnad_mem_alloc(bnad
,
1455 &res_info
[i
].res_u
.mem_info
);
1456 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1457 err
= bnad_txrx_irq_alloc(bnad
, BNAD_INTR_RX
, rx_id
,
1458 &res_info
[i
].res_u
.intr_info
);
1465 bnad_rx_res_free(bnad
, res_info
);
1469 /* Timer callbacks */
1472 bnad_ioc_timeout(unsigned long data
)
1474 struct bnad
*bnad
= (struct bnad
*)data
;
1475 unsigned long flags
;
1477 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1478 bfa_nw_ioc_timeout((void *) &bnad
->bna
.ioceth
.ioc
);
1479 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1483 bnad_ioc_hb_check(unsigned long data
)
1485 struct bnad
*bnad
= (struct bnad
*)data
;
1486 unsigned long flags
;
1488 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1489 bfa_nw_ioc_hb_check((void *) &bnad
->bna
.ioceth
.ioc
);
1490 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1494 bnad_iocpf_timeout(unsigned long data
)
1496 struct bnad
*bnad
= (struct bnad
*)data
;
1497 unsigned long flags
;
1499 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1500 bfa_nw_iocpf_timeout((void *) &bnad
->bna
.ioceth
.ioc
);
1501 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1505 bnad_iocpf_sem_timeout(unsigned long data
)
1507 struct bnad
*bnad
= (struct bnad
*)data
;
1508 unsigned long flags
;
1510 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1511 bfa_nw_iocpf_sem_timeout((void *) &bnad
->bna
.ioceth
.ioc
);
1512 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1516 * All timer routines use bnad->bna_lock to protect against
1517 * the following race, which may occur in case of no locking:
1525 /* b) Dynamic Interrupt Moderation Timer */
1527 bnad_dim_timeout(unsigned long data
)
1529 struct bnad
*bnad
= (struct bnad
*)data
;
1530 struct bnad_rx_info
*rx_info
;
1531 struct bnad_rx_ctrl
*rx_ctrl
;
1533 unsigned long flags
;
1535 if (!netif_carrier_ok(bnad
->netdev
))
1538 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1539 for (i
= 0; i
< bnad
->num_rx
; i
++) {
1540 rx_info
= &bnad
->rx_info
[i
];
1543 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
1544 rx_ctrl
= &rx_info
->rx_ctrl
[j
];
1547 bna_rx_dim_update(rx_ctrl
->ccb
);
1551 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1552 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
))
1553 mod_timer(&bnad
->dim_timer
,
1554 jiffies
+ msecs_to_jiffies(BNAD_DIM_TIMER_FREQ
));
1555 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1558 /* c) Statistics Timer */
1560 bnad_stats_timeout(unsigned long data
)
1562 struct bnad
*bnad
= (struct bnad
*)data
;
1563 unsigned long flags
;
1565 if (!netif_running(bnad
->netdev
) ||
1566 !test_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
))
1569 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1570 bna_hw_stats_get(&bnad
->bna
);
1571 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1575 * Set up timer for DIM
1576 * Called with bnad->bna_lock held
1579 bnad_dim_timer_start(struct bnad
*bnad
)
1581 if (bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
&&
1582 !test_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
)) {
1583 setup_timer(&bnad
->dim_timer
, bnad_dim_timeout
,
1584 (unsigned long)bnad
);
1585 set_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
);
1586 mod_timer(&bnad
->dim_timer
,
1587 jiffies
+ msecs_to_jiffies(BNAD_DIM_TIMER_FREQ
));
1592 * Set up timer for statistics
1593 * Called with mutex_lock(&bnad->conf_mutex) held
1596 bnad_stats_timer_start(struct bnad
*bnad
)
1598 unsigned long flags
;
1600 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1601 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
)) {
1602 setup_timer(&bnad
->stats_timer
, bnad_stats_timeout
,
1603 (unsigned long)bnad
);
1604 mod_timer(&bnad
->stats_timer
,
1605 jiffies
+ msecs_to_jiffies(BNAD_STATS_TIMER_FREQ
));
1607 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1611 * Stops the stats timer
1612 * Called with mutex_lock(&bnad->conf_mutex) held
1615 bnad_stats_timer_stop(struct bnad
*bnad
)
1618 unsigned long flags
;
1620 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1621 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
))
1623 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1625 del_timer_sync(&bnad
->stats_timer
);
1631 bnad_netdev_mc_list_get(struct net_device
*netdev
, u8
*mc_list
)
1633 int i
= 1; /* Index 0 has broadcast address */
1634 struct netdev_hw_addr
*mc_addr
;
1636 netdev_for_each_mc_addr(mc_addr
, netdev
) {
1637 memcpy(&mc_list
[i
* ETH_ALEN
], &mc_addr
->addr
[0],
1644 bnad_napi_poll_rx(struct napi_struct
*napi
, int budget
)
1646 struct bnad_rx_ctrl
*rx_ctrl
=
1647 container_of(napi
, struct bnad_rx_ctrl
, napi
);
1648 struct bnad
*bnad
= rx_ctrl
->bnad
;
1651 rx_ctrl
->rx_poll_ctr
++;
1653 if (!netif_carrier_ok(bnad
->netdev
))
1656 rcvd
= bnad_poll_cq(bnad
, rx_ctrl
->ccb
, budget
);
1661 napi_complete(napi
);
1663 rx_ctrl
->rx_complete
++;
1666 bnad_enable_rx_irq_unsafe(rx_ctrl
->ccb
);
1671 #define BNAD_NAPI_POLL_QUOTA 64
1673 bnad_napi_init(struct bnad
*bnad
, u32 rx_id
)
1675 struct bnad_rx_ctrl
*rx_ctrl
;
1678 /* Initialize & enable NAPI */
1679 for (i
= 0; i
< bnad
->num_rxp_per_rx
; i
++) {
1680 rx_ctrl
= &bnad
->rx_info
[rx_id
].rx_ctrl
[i
];
1681 netif_napi_add(bnad
->netdev
, &rx_ctrl
->napi
,
1682 bnad_napi_poll_rx
, BNAD_NAPI_POLL_QUOTA
);
1687 bnad_napi_enable(struct bnad
*bnad
, u32 rx_id
)
1689 struct bnad_rx_ctrl
*rx_ctrl
;
1692 /* Initialize & enable NAPI */
1693 for (i
= 0; i
< bnad
->num_rxp_per_rx
; i
++) {
1694 rx_ctrl
= &bnad
->rx_info
[rx_id
].rx_ctrl
[i
];
1696 napi_enable(&rx_ctrl
->napi
);
1701 bnad_napi_disable(struct bnad
*bnad
, u32 rx_id
)
1705 /* First disable and then clean up */
1706 for (i
= 0; i
< bnad
->num_rxp_per_rx
; i
++) {
1707 napi_disable(&bnad
->rx_info
[rx_id
].rx_ctrl
[i
].napi
);
1708 netif_napi_del(&bnad
->rx_info
[rx_id
].rx_ctrl
[i
].napi
);
1712 /* Should be held with conf_lock held */
1714 bnad_cleanup_tx(struct bnad
*bnad
, u32 tx_id
)
1716 struct bnad_tx_info
*tx_info
= &bnad
->tx_info
[tx_id
];
1717 struct bna_res_info
*res_info
= &bnad
->tx_res_info
[tx_id
].res_info
[0];
1718 unsigned long flags
;
1723 init_completion(&bnad
->bnad_completions
.tx_comp
);
1724 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1725 bna_tx_disable(tx_info
->tx
, BNA_HARD_CLEANUP
, bnad_cb_tx_disabled
);
1726 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1727 wait_for_completion(&bnad
->bnad_completions
.tx_comp
);
1729 if (tx_info
->tcb
[0]->intr_type
== BNA_INTR_T_MSIX
)
1730 bnad_tx_msix_unregister(bnad
, tx_info
,
1731 bnad
->num_txq_per_tx
);
1734 tasklet_kill(&bnad
->tx_free_tasklet
);
1736 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1737 bna_tx_destroy(tx_info
->tx
);
1738 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1743 bnad_tx_res_free(bnad
, res_info
);
1746 /* Should be held with conf_lock held */
1748 bnad_setup_tx(struct bnad
*bnad
, u32 tx_id
)
1751 struct bnad_tx_info
*tx_info
= &bnad
->tx_info
[tx_id
];
1752 struct bna_res_info
*res_info
= &bnad
->tx_res_info
[tx_id
].res_info
[0];
1753 struct bna_intr_info
*intr_info
=
1754 &res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_u
.intr_info
;
1755 struct bna_tx_config
*tx_config
= &bnad
->tx_config
[tx_id
];
1756 static const struct bna_tx_event_cbfn tx_cbfn
= {
1757 .tcb_setup_cbfn
= bnad_cb_tcb_setup
,
1758 .tcb_destroy_cbfn
= bnad_cb_tcb_destroy
,
1759 .tx_stall_cbfn
= bnad_cb_tx_stall
,
1760 .tx_resume_cbfn
= bnad_cb_tx_resume
,
1761 .tx_cleanup_cbfn
= bnad_cb_tx_cleanup
,
1765 unsigned long flags
;
1767 tx_info
->tx_id
= tx_id
;
1769 /* Initialize the Tx object configuration */
1770 tx_config
->num_txq
= bnad
->num_txq_per_tx
;
1771 tx_config
->txq_depth
= bnad
->txq_depth
;
1772 tx_config
->tx_type
= BNA_TX_T_REGULAR
;
1773 tx_config
->coalescing_timeo
= bnad
->tx_coalescing_timeo
;
1775 /* Get BNA's resource requirement for one tx object */
1776 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1777 bna_tx_res_req(bnad
->num_txq_per_tx
,
1778 bnad
->txq_depth
, res_info
);
1779 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1781 /* Fill Unmap Q memory requirements */
1782 BNAD_FILL_UNMAPQ_MEM_REQ(
1783 &res_info
[BNA_TX_RES_MEM_T_UNMAPQ
],
1784 bnad
->num_txq_per_tx
,
1785 BNAD_TX_UNMAPQ_DEPTH
);
1787 /* Allocate resources */
1788 err
= bnad_tx_res_alloc(bnad
, res_info
, tx_id
);
1792 /* Ask BNA to create one Tx object, supplying required resources */
1793 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1794 tx
= bna_tx_create(&bnad
->bna
, bnad
, tx_config
, &tx_cbfn
, res_info
,
1796 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1801 /* Register ISR for the Tx object */
1802 if (intr_info
->intr_type
== BNA_INTR_T_MSIX
) {
1803 err
= bnad_tx_msix_register(bnad
, tx_info
,
1804 tx_id
, bnad
->num_txq_per_tx
);
1809 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1811 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1816 bnad_tx_res_free(bnad
, res_info
);
1820 /* Setup the rx config for bna_rx_create */
1821 /* bnad decides the configuration */
1823 bnad_init_rx_config(struct bnad
*bnad
, struct bna_rx_config
*rx_config
)
1825 rx_config
->rx_type
= BNA_RX_T_REGULAR
;
1826 rx_config
->num_paths
= bnad
->num_rxp_per_rx
;
1827 rx_config
->coalescing_timeo
= bnad
->rx_coalescing_timeo
;
1829 if (bnad
->num_rxp_per_rx
> 1) {
1830 rx_config
->rss_status
= BNA_STATUS_T_ENABLED
;
1831 rx_config
->rss_config
.hash_type
=
1832 (BFI_ENET_RSS_IPV6
|
1833 BFI_ENET_RSS_IPV6_TCP
|
1835 BFI_ENET_RSS_IPV4_TCP
);
1836 rx_config
->rss_config
.hash_mask
=
1837 bnad
->num_rxp_per_rx
- 1;
1838 get_random_bytes(rx_config
->rss_config
.toeplitz_hash_key
,
1839 sizeof(rx_config
->rss_config
.toeplitz_hash_key
));
1841 rx_config
->rss_status
= BNA_STATUS_T_DISABLED
;
1842 memset(&rx_config
->rss_config
, 0,
1843 sizeof(rx_config
->rss_config
));
1845 rx_config
->rxp_type
= BNA_RXP_SLR
;
1846 rx_config
->q_depth
= bnad
->rxq_depth
;
1848 rx_config
->small_buff_size
= BFI_SMALL_RXBUF_SIZE
;
1850 rx_config
->vlan_strip_status
= BNA_STATUS_T_ENABLED
;
1854 bnad_rx_ctrl_init(struct bnad
*bnad
, u32 rx_id
)
1856 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[rx_id
];
1859 for (i
= 0; i
< bnad
->num_rxp_per_rx
; i
++)
1860 rx_info
->rx_ctrl
[i
].bnad
= bnad
;
1863 /* Called with mutex_lock(&bnad->conf_mutex) held */
1865 bnad_cleanup_rx(struct bnad
*bnad
, u32 rx_id
)
1867 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[rx_id
];
1868 struct bna_rx_config
*rx_config
= &bnad
->rx_config
[rx_id
];
1869 struct bna_res_info
*res_info
= &bnad
->rx_res_info
[rx_id
].res_info
[0];
1870 unsigned long flags
;
1877 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1878 if (bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
&&
1879 test_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
)) {
1880 clear_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
);
1883 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1885 del_timer_sync(&bnad
->dim_timer
);
1888 init_completion(&bnad
->bnad_completions
.rx_comp
);
1889 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1890 bna_rx_disable(rx_info
->rx
, BNA_HARD_CLEANUP
, bnad_cb_rx_disabled
);
1891 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1892 wait_for_completion(&bnad
->bnad_completions
.rx_comp
);
1894 if (rx_info
->rx_ctrl
[0].ccb
->intr_type
== BNA_INTR_T_MSIX
)
1895 bnad_rx_msix_unregister(bnad
, rx_info
, rx_config
->num_paths
);
1897 bnad_napi_disable(bnad
, rx_id
);
1899 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1900 bna_rx_destroy(rx_info
->rx
);
1904 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1906 bnad_rx_res_free(bnad
, res_info
);
1909 /* Called with mutex_lock(&bnad->conf_mutex) held */
1911 bnad_setup_rx(struct bnad
*bnad
, u32 rx_id
)
1914 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[rx_id
];
1915 struct bna_res_info
*res_info
= &bnad
->rx_res_info
[rx_id
].res_info
[0];
1916 struct bna_intr_info
*intr_info
=
1917 &res_info
[BNA_RX_RES_T_INTR
].res_u
.intr_info
;
1918 struct bna_rx_config
*rx_config
= &bnad
->rx_config
[rx_id
];
1919 static const struct bna_rx_event_cbfn rx_cbfn
= {
1920 .rcb_setup_cbfn
= bnad_cb_rcb_setup
,
1921 .rcb_destroy_cbfn
= bnad_cb_rcb_destroy
,
1922 .ccb_setup_cbfn
= bnad_cb_ccb_setup
,
1923 .ccb_destroy_cbfn
= bnad_cb_ccb_destroy
,
1924 .rx_stall_cbfn
= bnad_cb_rx_stall
,
1925 .rx_cleanup_cbfn
= bnad_cb_rx_cleanup
,
1926 .rx_post_cbfn
= bnad_cb_rx_post
,
1929 unsigned long flags
;
1931 rx_info
->rx_id
= rx_id
;
1933 /* Initialize the Rx object configuration */
1934 bnad_init_rx_config(bnad
, rx_config
);
1936 /* Get BNA's resource requirement for one Rx object */
1937 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1938 bna_rx_res_req(rx_config
, res_info
);
1939 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1941 /* Fill Unmap Q memory requirements */
1942 BNAD_FILL_UNMAPQ_MEM_REQ(
1943 &res_info
[BNA_RX_RES_MEM_T_UNMAPQ
],
1944 rx_config
->num_paths
+
1945 ((rx_config
->rxp_type
== BNA_RXP_SINGLE
) ? 0 :
1946 rx_config
->num_paths
), BNAD_RX_UNMAPQ_DEPTH
);
1948 /* Allocate resource */
1949 err
= bnad_rx_res_alloc(bnad
, res_info
, rx_id
);
1953 bnad_rx_ctrl_init(bnad
, rx_id
);
1955 /* Ask BNA to create one Rx object, supplying required resources */
1956 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1957 rx
= bna_rx_create(&bnad
->bna
, bnad
, rx_config
, &rx_cbfn
, res_info
,
1961 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1965 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1968 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
1969 * so that IRQ handler cannot schedule NAPI at this point.
1971 bnad_napi_init(bnad
, rx_id
);
1973 /* Register ISR for the Rx object */
1974 if (intr_info
->intr_type
== BNA_INTR_T_MSIX
) {
1975 err
= bnad_rx_msix_register(bnad
, rx_info
, rx_id
,
1976 rx_config
->num_paths
);
1981 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1983 /* Set up Dynamic Interrupt Moderation Vector */
1984 if (bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
)
1985 bna_rx_dim_reconfig(&bnad
->bna
, bna_napi_dim_vector
);
1987 /* Enable VLAN filtering only on the default Rx */
1988 bna_rx_vlanfilter_enable(rx
);
1990 /* Start the DIM timer */
1991 bnad_dim_timer_start(bnad
);
1995 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1997 /* Enable scheduling of NAPI */
1998 bnad_napi_enable(bnad
, rx_id
);
2003 bnad_cleanup_rx(bnad
, rx_id
);
2007 /* Called with conf_lock & bnad->bna_lock held */
2009 bnad_tx_coalescing_timeo_set(struct bnad
*bnad
)
2011 struct bnad_tx_info
*tx_info
;
2013 tx_info
= &bnad
->tx_info
[0];
2017 bna_tx_coalescing_timeo_set(tx_info
->tx
, bnad
->tx_coalescing_timeo
);
2020 /* Called with conf_lock & bnad->bna_lock held */
2022 bnad_rx_coalescing_timeo_set(struct bnad
*bnad
)
2024 struct bnad_rx_info
*rx_info
;
2027 for (i
= 0; i
< bnad
->num_rx
; i
++) {
2028 rx_info
= &bnad
->rx_info
[i
];
2031 bna_rx_coalescing_timeo_set(rx_info
->rx
,
2032 bnad
->rx_coalescing_timeo
);
2037 * Called with bnad->bna_lock held
2040 bnad_mac_addr_set_locked(struct bnad
*bnad
, u8
*mac_addr
)
2044 if (!is_valid_ether_addr(mac_addr
))
2045 return -EADDRNOTAVAIL
;
2047 /* If datapath is down, pretend everything went through */
2048 if (!bnad
->rx_info
[0].rx
)
2051 ret
= bna_rx_ucast_set(bnad
->rx_info
[0].rx
, mac_addr
, NULL
);
2052 if (ret
!= BNA_CB_SUCCESS
)
2053 return -EADDRNOTAVAIL
;
2058 /* Should be called with conf_lock held */
2060 bnad_enable_default_bcast(struct bnad
*bnad
)
2062 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[0];
2064 unsigned long flags
;
2066 init_completion(&bnad
->bnad_completions
.mcast_comp
);
2068 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2069 ret
= bna_rx_mcast_add(rx_info
->rx
, (u8
*)bnad_bcast_addr
,
2070 bnad_cb_rx_mcast_add
);
2071 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2073 if (ret
== BNA_CB_SUCCESS
)
2074 wait_for_completion(&bnad
->bnad_completions
.mcast_comp
);
2078 if (bnad
->bnad_completions
.mcast_comp_status
!= BNA_CB_SUCCESS
)
2084 /* Called with mutex_lock(&bnad->conf_mutex) held */
2086 bnad_restore_vlans(struct bnad
*bnad
, u32 rx_id
)
2089 unsigned long flags
;
2091 for_each_set_bit(vid
, bnad
->active_vlans
, VLAN_N_VID
) {
2092 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2093 bna_rx_vlan_add(bnad
->rx_info
[rx_id
].rx
, vid
);
2094 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2098 /* Statistics utilities */
2100 bnad_netdev_qstats_fill(struct bnad
*bnad
, struct rtnl_link_stats64
*stats
)
2104 for (i
= 0; i
< bnad
->num_rx
; i
++) {
2105 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
2106 if (bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
) {
2107 stats
->rx_packets
+= bnad
->rx_info
[i
].
2108 rx_ctrl
[j
].ccb
->rcb
[0]->rxq
->rx_packets
;
2109 stats
->rx_bytes
+= bnad
->rx_info
[i
].
2110 rx_ctrl
[j
].ccb
->rcb
[0]->rxq
->rx_bytes
;
2111 if (bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->rcb
[1] &&
2112 bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->
2114 stats
->rx_packets
+=
2115 bnad
->rx_info
[i
].rx_ctrl
[j
].
2116 ccb
->rcb
[1]->rxq
->rx_packets
;
2118 bnad
->rx_info
[i
].rx_ctrl
[j
].
2119 ccb
->rcb
[1]->rxq
->rx_bytes
;
2124 for (i
= 0; i
< bnad
->num_tx
; i
++) {
2125 for (j
= 0; j
< bnad
->num_txq_per_tx
; j
++) {
2126 if (bnad
->tx_info
[i
].tcb
[j
]) {
2127 stats
->tx_packets
+=
2128 bnad
->tx_info
[i
].tcb
[j
]->txq
->tx_packets
;
2130 bnad
->tx_info
[i
].tcb
[j
]->txq
->tx_bytes
;
2137 * Must be called with the bna_lock held.
2140 bnad_netdev_hwstats_fill(struct bnad
*bnad
, struct rtnl_link_stats64
*stats
)
2142 struct bfi_enet_stats_mac
*mac_stats
;
2146 mac_stats
= &bnad
->stats
.bna_stats
->hw_stats
.mac_stats
;
2148 mac_stats
->rx_fcs_error
+ mac_stats
->rx_alignment_error
+
2149 mac_stats
->rx_frame_length_error
+ mac_stats
->rx_code_error
+
2150 mac_stats
->rx_undersize
;
2151 stats
->tx_errors
= mac_stats
->tx_fcs_error
+
2152 mac_stats
->tx_undersize
;
2153 stats
->rx_dropped
= mac_stats
->rx_drop
;
2154 stats
->tx_dropped
= mac_stats
->tx_drop
;
2155 stats
->multicast
= mac_stats
->rx_multicast
;
2156 stats
->collisions
= mac_stats
->tx_total_collision
;
2158 stats
->rx_length_errors
= mac_stats
->rx_frame_length_error
;
2160 /* receive ring buffer overflow ?? */
2162 stats
->rx_crc_errors
= mac_stats
->rx_fcs_error
;
2163 stats
->rx_frame_errors
= mac_stats
->rx_alignment_error
;
2164 /* recv'r fifo overrun */
2165 bmap
= bna_rx_rid_mask(&bnad
->bna
);
2166 for (i
= 0; bmap
; i
++) {
2168 stats
->rx_fifo_errors
+=
2169 bnad
->stats
.bna_stats
->
2170 hw_stats
.rxf_stats
[i
].frame_drops
;
2178 bnad_mbox_irq_sync(struct bnad
*bnad
)
2181 unsigned long flags
;
2183 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2184 if (bnad
->cfg_flags
& BNAD_CF_MSIX
)
2185 irq
= bnad
->msix_table
[BNAD_MAILBOX_MSIX_INDEX
].vector
;
2187 irq
= bnad
->pcidev
->irq
;
2188 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2190 synchronize_irq(irq
);
2193 /* Utility used by bnad_start_xmit, for doing TSO */
2195 bnad_tso_prepare(struct bnad
*bnad
, struct sk_buff
*skb
)
2199 if (skb_header_cloned(skb
)) {
2200 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2202 BNAD_UPDATE_CTR(bnad
, tso_err
);
2208 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2209 * excluding the length field.
2211 if (skb
->protocol
== htons(ETH_P_IP
)) {
2212 struct iphdr
*iph
= ip_hdr(skb
);
2214 /* Do we really need these? */
2218 tcp_hdr(skb
)->check
=
2219 ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
, 0,
2221 BNAD_UPDATE_CTR(bnad
, tso4
);
2223 struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
2225 ipv6h
->payload_len
= 0;
2226 tcp_hdr(skb
)->check
=
2227 ~csum_ipv6_magic(&ipv6h
->saddr
, &ipv6h
->daddr
, 0,
2229 BNAD_UPDATE_CTR(bnad
, tso6
);
2236 * Initialize Q numbers depending on Rx Paths
2237 * Called with bnad->bna_lock held, because of cfg_flags
2241 bnad_q_num_init(struct bnad
*bnad
)
2245 rxps
= min((uint
)num_online_cpus(),
2246 (uint
)(BNAD_MAX_RX
* BNAD_MAX_RXP_PER_RX
));
2248 if (!(bnad
->cfg_flags
& BNAD_CF_MSIX
))
2249 rxps
= 1; /* INTx */
2253 bnad
->num_rxp_per_rx
= rxps
;
2254 bnad
->num_txq_per_tx
= BNAD_TXQ_NUM
;
2258 * Adjusts the Q numbers, given a number of msix vectors
2259 * Give preference to RSS as opposed to Tx priority Queues,
2260 * in such a case, just use 1 Tx Q
2261 * Called with bnad->bna_lock held b'cos of cfg_flags access
2264 bnad_q_num_adjust(struct bnad
*bnad
, int msix_vectors
, int temp
)
2266 bnad
->num_txq_per_tx
= 1;
2267 if ((msix_vectors
>= (bnad
->num_tx
* bnad
->num_txq_per_tx
) +
2268 bnad_rxqs_per_cq
+ BNAD_MAILBOX_MSIX_VECTORS
) &&
2269 (bnad
->cfg_flags
& BNAD_CF_MSIX
)) {
2270 bnad
->num_rxp_per_rx
= msix_vectors
-
2271 (bnad
->num_tx
* bnad
->num_txq_per_tx
) -
2272 BNAD_MAILBOX_MSIX_VECTORS
;
2274 bnad
->num_rxp_per_rx
= 1;
2277 /* Enable / disable ioceth */
2279 bnad_ioceth_disable(struct bnad
*bnad
)
2281 unsigned long flags
;
2284 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2285 init_completion(&bnad
->bnad_completions
.ioc_comp
);
2286 bna_ioceth_disable(&bnad
->bna
.ioceth
, BNA_HARD_CLEANUP
);
2287 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2289 wait_for_completion_timeout(&bnad
->bnad_completions
.ioc_comp
,
2290 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT
));
2292 err
= bnad
->bnad_completions
.ioc_comp_status
;
2297 bnad_ioceth_enable(struct bnad
*bnad
)
2300 unsigned long flags
;
2302 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2303 init_completion(&bnad
->bnad_completions
.ioc_comp
);
2304 bnad
->bnad_completions
.ioc_comp_status
= BNA_CB_WAITING
;
2305 bna_ioceth_enable(&bnad
->bna
.ioceth
);
2306 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2308 wait_for_completion_timeout(&bnad
->bnad_completions
.ioc_comp
,
2309 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT
));
2311 err
= bnad
->bnad_completions
.ioc_comp_status
;
2316 /* Free BNA resources */
2318 bnad_res_free(struct bnad
*bnad
, struct bna_res_info
*res_info
,
2323 for (i
= 0; i
< res_val_max
; i
++)
2324 bnad_mem_free(bnad
, &res_info
[i
].res_u
.mem_info
);
2327 /* Allocates memory and interrupt resources for BNA */
2329 bnad_res_alloc(struct bnad
*bnad
, struct bna_res_info
*res_info
,
2334 for (i
= 0; i
< res_val_max
; i
++) {
2335 err
= bnad_mem_alloc(bnad
, &res_info
[i
].res_u
.mem_info
);
2342 bnad_res_free(bnad
, res_info
, res_val_max
);
2346 /* Interrupt enable / disable */
2348 bnad_enable_msix(struct bnad
*bnad
)
2351 unsigned long flags
;
2353 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2354 if (!(bnad
->cfg_flags
& BNAD_CF_MSIX
)) {
2355 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2358 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2360 if (bnad
->msix_table
)
2364 kcalloc(bnad
->msix_num
, sizeof(struct msix_entry
), GFP_KERNEL
);
2366 if (!bnad
->msix_table
)
2369 for (i
= 0; i
< bnad
->msix_num
; i
++)
2370 bnad
->msix_table
[i
].entry
= i
;
2372 ret
= pci_enable_msix(bnad
->pcidev
, bnad
->msix_table
, bnad
->msix_num
);
2374 /* Not enough MSI-X vectors. */
2375 pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
2376 ret
, bnad
->msix_num
);
2378 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2379 /* ret = #of vectors that we got */
2380 bnad_q_num_adjust(bnad
, (ret
- BNAD_MAILBOX_MSIX_VECTORS
) / 2,
2381 (ret
- BNAD_MAILBOX_MSIX_VECTORS
) / 2);
2382 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2384 bnad
->msix_num
= BNAD_NUM_TXQ
+ BNAD_NUM_RXP
+
2385 BNAD_MAILBOX_MSIX_VECTORS
;
2387 if (bnad
->msix_num
> ret
)
2390 /* Try once more with adjusted numbers */
2391 /* If this fails, fall back to INTx */
2392 ret
= pci_enable_msix(bnad
->pcidev
, bnad
->msix_table
,
2400 pci_intx(bnad
->pcidev
, 0);
2405 pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
2407 kfree(bnad
->msix_table
);
2408 bnad
->msix_table
= NULL
;
2410 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2411 bnad
->cfg_flags
&= ~BNAD_CF_MSIX
;
2412 bnad_q_num_init(bnad
);
2413 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2417 bnad_disable_msix(struct bnad
*bnad
)
2420 unsigned long flags
;
2422 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2423 cfg_flags
= bnad
->cfg_flags
;
2424 if (bnad
->cfg_flags
& BNAD_CF_MSIX
)
2425 bnad
->cfg_flags
&= ~BNAD_CF_MSIX
;
2426 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2428 if (cfg_flags
& BNAD_CF_MSIX
) {
2429 pci_disable_msix(bnad
->pcidev
);
2430 kfree(bnad
->msix_table
);
2431 bnad
->msix_table
= NULL
;
2435 /* Netdev entry points */
2437 bnad_open(struct net_device
*netdev
)
2440 struct bnad
*bnad
= netdev_priv(netdev
);
2441 struct bna_pause_config pause_config
;
2443 unsigned long flags
;
2445 mutex_lock(&bnad
->conf_mutex
);
2448 err
= bnad_setup_tx(bnad
, 0);
2453 err
= bnad_setup_rx(bnad
, 0);
2458 pause_config
.tx_pause
= 0;
2459 pause_config
.rx_pause
= 0;
2461 mtu
= ETH_HLEN
+ VLAN_HLEN
+ bnad
->netdev
->mtu
+ ETH_FCS_LEN
;
2463 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2464 bna_enet_mtu_set(&bnad
->bna
.enet
, mtu
, NULL
);
2465 bna_enet_pause_config(&bnad
->bna
.enet
, &pause_config
, NULL
);
2466 bna_enet_enable(&bnad
->bna
.enet
);
2467 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2469 /* Enable broadcast */
2470 bnad_enable_default_bcast(bnad
);
2472 /* Restore VLANs, if any */
2473 bnad_restore_vlans(bnad
, 0);
2475 /* Set the UCAST address */
2476 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2477 bnad_mac_addr_set_locked(bnad
, netdev
->dev_addr
);
2478 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2480 /* Start the stats timer */
2481 bnad_stats_timer_start(bnad
);
2483 mutex_unlock(&bnad
->conf_mutex
);
2488 bnad_cleanup_tx(bnad
, 0);
2491 mutex_unlock(&bnad
->conf_mutex
);
2496 bnad_stop(struct net_device
*netdev
)
2498 struct bnad
*bnad
= netdev_priv(netdev
);
2499 unsigned long flags
;
2501 mutex_lock(&bnad
->conf_mutex
);
2503 /* Stop the stats timer */
2504 bnad_stats_timer_stop(bnad
);
2506 init_completion(&bnad
->bnad_completions
.enet_comp
);
2508 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2509 bna_enet_disable(&bnad
->bna
.enet
, BNA_HARD_CLEANUP
,
2510 bnad_cb_enet_disabled
);
2511 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2513 wait_for_completion(&bnad
->bnad_completions
.enet_comp
);
2515 bnad_cleanup_tx(bnad
, 0);
2516 bnad_cleanup_rx(bnad
, 0);
2518 /* Synchronize mailbox IRQ */
2519 bnad_mbox_irq_sync(bnad
);
2521 mutex_unlock(&bnad
->conf_mutex
);
2528 * bnad_start_xmit : Netdev entry point for Transmit
2529 * Called under lock held by net_device
2532 bnad_start_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
2534 struct bnad
*bnad
= netdev_priv(netdev
);
2536 struct bna_tcb
*tcb
= bnad
->tx_info
[0].tcb
[txq_id
];
2538 u16 txq_prod
, vlan_tag
= 0;
2539 u32 unmap_prod
, wis
, wis_used
, wi_range
;
2540 u32 vectors
, vect_id
, i
, acked
;
2545 struct bnad_unmap_q
*unmap_q
= tcb
->unmap_q
;
2546 dma_addr_t dma_addr
;
2547 struct bna_txq_entry
*txqent
;
2550 if (unlikely(skb
->len
<= ETH_HLEN
)) {
2552 BNAD_UPDATE_CTR(bnad
, tx_skb_too_short
);
2553 return NETDEV_TX_OK
;
2555 if (unlikely(skb_headlen(skb
) > BFI_TX_MAX_DATA_PER_VECTOR
)) {
2557 BNAD_UPDATE_CTR(bnad
, tx_skb_headlen_too_long
);
2558 return NETDEV_TX_OK
;
2560 if (unlikely(skb_headlen(skb
) == 0)) {
2562 BNAD_UPDATE_CTR(bnad
, tx_skb_headlen_zero
);
2563 return NETDEV_TX_OK
;
2567 * Takes care of the Tx that is scheduled between clearing the flag
2568 * and the netif_tx_stop_all_queues() call.
2570 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
))) {
2572 BNAD_UPDATE_CTR(bnad
, tx_skb_stopping
);
2573 return NETDEV_TX_OK
;
2576 vectors
= 1 + skb_shinfo(skb
)->nr_frags
;
2577 if (unlikely(vectors
> BFI_TX_MAX_VECTORS_PER_PKT
)) {
2579 BNAD_UPDATE_CTR(bnad
, tx_skb_max_vectors
);
2580 return NETDEV_TX_OK
;
2582 wis
= BNA_TXQ_WI_NEEDED(vectors
); /* 4 vectors per work item */
2584 if (unlikely(wis
> BNA_QE_FREE_CNT(tcb
, tcb
->q_depth
) ||
2585 vectors
> BNA_QE_FREE_CNT(unmap_q
, unmap_q
->q_depth
))) {
2586 if ((u16
) (*tcb
->hw_consumer_index
) !=
2587 tcb
->consumer_index
&&
2588 !test_and_set_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
)) {
2589 acked
= bnad_free_txbufs(bnad
, tcb
);
2590 if (likely(test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)))
2591 bna_ib_ack(tcb
->i_dbell
, acked
);
2592 smp_mb__before_clear_bit();
2593 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
2595 netif_stop_queue(netdev
);
2596 BNAD_UPDATE_CTR(bnad
, netif_queue_stop
);
2601 * Check again to deal with race condition between
2602 * netif_stop_queue here, and netif_wake_queue in
2603 * interrupt handler which is not inside netif tx lock.
2606 (wis
> BNA_QE_FREE_CNT(tcb
, tcb
->q_depth
) ||
2607 vectors
> BNA_QE_FREE_CNT(unmap_q
, unmap_q
->q_depth
))) {
2608 BNAD_UPDATE_CTR(bnad
, netif_queue_stop
);
2609 return NETDEV_TX_BUSY
;
2611 netif_wake_queue(netdev
);
2612 BNAD_UPDATE_CTR(bnad
, netif_queue_wakeup
);
2616 unmap_prod
= unmap_q
->producer_index
;
2619 txq_prod
= tcb
->producer_index
;
2620 BNA_TXQ_QPGE_PTR_GET(txq_prod
, tcb
->sw_qpt
, txqent
, wi_range
);
2621 txqent
->hdr
.wi
.reserved
= 0;
2622 txqent
->hdr
.wi
.num_vectors
= vectors
;
2624 if (vlan_tx_tag_present(skb
)) {
2625 vlan_tag
= (u16
) vlan_tx_tag_get(skb
);
2626 flags
|= (BNA_TXQ_WI_CF_INS_PRIO
| BNA_TXQ_WI_CF_INS_VLAN
);
2628 if (test_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
)) {
2630 (tcb
->priority
& 0x7) << 13 | (vlan_tag
& 0x1fff);
2631 flags
|= (BNA_TXQ_WI_CF_INS_PRIO
| BNA_TXQ_WI_CF_INS_VLAN
);
2634 txqent
->hdr
.wi
.vlan_tag
= htons(vlan_tag
);
2636 if (skb_is_gso(skb
)) {
2637 gso_size
= skb_shinfo(skb
)->gso_size
;
2639 if (unlikely(gso_size
> netdev
->mtu
)) {
2641 BNAD_UPDATE_CTR(bnad
, tx_skb_mss_too_long
);
2642 return NETDEV_TX_OK
;
2644 if (unlikely((gso_size
+ skb_transport_offset(skb
) +
2645 tcp_hdrlen(skb
)) >= skb
->len
)) {
2646 txqent
->hdr
.wi
.opcode
=
2647 __constant_htons(BNA_TXQ_WI_SEND
);
2648 txqent
->hdr
.wi
.lso_mss
= 0;
2649 BNAD_UPDATE_CTR(bnad
, tx_skb_tso_too_short
);
2651 txqent
->hdr
.wi
.opcode
=
2652 __constant_htons(BNA_TXQ_WI_SEND_LSO
);
2653 txqent
->hdr
.wi
.lso_mss
= htons(gso_size
);
2656 err
= bnad_tso_prepare(bnad
, skb
);
2657 if (unlikely(err
)) {
2659 BNAD_UPDATE_CTR(bnad
, tx_skb_tso_prepare
);
2660 return NETDEV_TX_OK
;
2662 flags
|= (BNA_TXQ_WI_CF_IP_CKSUM
| BNA_TXQ_WI_CF_TCP_CKSUM
);
2663 txqent
->hdr
.wi
.l4_hdr_size_n_offset
=
2664 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2665 (tcp_hdrlen(skb
) >> 2,
2666 skb_transport_offset(skb
)));
2668 txqent
->hdr
.wi
.opcode
= __constant_htons(BNA_TXQ_WI_SEND
);
2669 txqent
->hdr
.wi
.lso_mss
= 0;
2671 if (unlikely(skb
->len
> (netdev
->mtu
+ ETH_HLEN
))) {
2673 BNAD_UPDATE_CTR(bnad
, tx_skb_non_tso_too_long
);
2674 return NETDEV_TX_OK
;
2677 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2680 if (skb
->protocol
== __constant_htons(ETH_P_IP
))
2681 proto
= ip_hdr(skb
)->protocol
;
2682 else if (skb
->protocol
==
2683 __constant_htons(ETH_P_IPV6
)) {
2684 /* nexthdr may not be TCP immediately. */
2685 proto
= ipv6_hdr(skb
)->nexthdr
;
2687 if (proto
== IPPROTO_TCP
) {
2688 flags
|= BNA_TXQ_WI_CF_TCP_CKSUM
;
2689 txqent
->hdr
.wi
.l4_hdr_size_n_offset
=
2690 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2691 (0, skb_transport_offset(skb
)));
2693 BNAD_UPDATE_CTR(bnad
, tcpcsum_offload
);
2695 if (unlikely(skb_headlen(skb
) <
2696 skb_transport_offset(skb
) + tcp_hdrlen(skb
))) {
2698 BNAD_UPDATE_CTR(bnad
, tx_skb_tcp_hdr
);
2699 return NETDEV_TX_OK
;
2702 } else if (proto
== IPPROTO_UDP
) {
2703 flags
|= BNA_TXQ_WI_CF_UDP_CKSUM
;
2704 txqent
->hdr
.wi
.l4_hdr_size_n_offset
=
2705 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2706 (0, skb_transport_offset(skb
)));
2708 BNAD_UPDATE_CTR(bnad
, udpcsum_offload
);
2709 if (unlikely(skb_headlen(skb
) <
2710 skb_transport_offset(skb
) +
2711 sizeof(struct udphdr
))) {
2713 BNAD_UPDATE_CTR(bnad
, tx_skb_udp_hdr
);
2714 return NETDEV_TX_OK
;
2718 BNAD_UPDATE_CTR(bnad
, tx_skb_csum_err
);
2719 return NETDEV_TX_OK
;
2722 txqent
->hdr
.wi
.l4_hdr_size_n_offset
= 0;
2726 txqent
->hdr
.wi
.flags
= htons(flags
);
2728 txqent
->hdr
.wi
.frame_length
= htonl(skb
->len
);
2730 unmap_q
->unmap_array
[unmap_prod
].skb
= skb
;
2731 len
= skb_headlen(skb
);
2732 txqent
->vector
[0].length
= htons(len
);
2733 dma_addr
= dma_map_single(&bnad
->pcidev
->dev
, skb
->data
,
2734 skb_headlen(skb
), DMA_TO_DEVICE
);
2735 dma_unmap_addr_set(&unmap_q
->unmap_array
[unmap_prod
], dma_addr
,
2738 BNA_SET_DMA_ADDR(dma_addr
, &txqent
->vector
[0].host_addr
);
2739 BNA_QE_INDX_ADD(unmap_prod
, 1, unmap_q
->q_depth
);
2744 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2745 const struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
2746 u16 size
= skb_frag_size(frag
);
2748 if (unlikely(size
== 0)) {
2749 unmap_prod
= unmap_q
->producer_index
;
2751 unmap_prod
= bnad_pci_unmap_skb(&bnad
->pcidev
->dev
,
2752 unmap_q
->unmap_array
,
2753 unmap_prod
, unmap_q
->q_depth
, skb
,
2756 BNAD_UPDATE_CTR(bnad
, tx_skb_frag_zero
);
2757 return NETDEV_TX_OK
;
2762 if (++vect_id
== BFI_TX_MAX_VECTORS_PER_WI
) {
2767 BNA_QE_INDX_ADD(txq_prod
, wis_used
,
2770 BNA_TXQ_QPGE_PTR_GET(txq_prod
, tcb
->sw_qpt
,
2774 txqent
->hdr
.wi_ext
.opcode
=
2775 __constant_htons(BNA_TXQ_WI_EXTENSION
);
2778 BUG_ON(!(size
<= BFI_TX_MAX_DATA_PER_VECTOR
));
2779 txqent
->vector
[vect_id
].length
= htons(size
);
2780 dma_addr
= skb_frag_dma_map(&bnad
->pcidev
->dev
, frag
,
2781 0, size
, DMA_TO_DEVICE
);
2782 dma_unmap_addr_set(&unmap_q
->unmap_array
[unmap_prod
], dma_addr
,
2784 BNA_SET_DMA_ADDR(dma_addr
, &txqent
->vector
[vect_id
].host_addr
);
2785 BNA_QE_INDX_ADD(unmap_prod
, 1, unmap_q
->q_depth
);
2788 if (unlikely(len
!= skb
->len
)) {
2789 unmap_prod
= unmap_q
->producer_index
;
2791 unmap_prod
= bnad_pci_unmap_skb(&bnad
->pcidev
->dev
,
2792 unmap_q
->unmap_array
, unmap_prod
,
2793 unmap_q
->q_depth
, skb
,
2794 skb_shinfo(skb
)->nr_frags
);
2796 BNAD_UPDATE_CTR(bnad
, tx_skb_len_mismatch
);
2797 return NETDEV_TX_OK
;
2800 unmap_q
->producer_index
= unmap_prod
;
2801 BNA_QE_INDX_ADD(txq_prod
, wis_used
, tcb
->q_depth
);
2802 tcb
->producer_index
= txq_prod
;
2806 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)))
2807 return NETDEV_TX_OK
;
2809 bna_txq_prod_indx_doorbell(tcb
);
2812 if ((u16
) (*tcb
->hw_consumer_index
) != tcb
->consumer_index
)
2813 tasklet_schedule(&bnad
->tx_free_tasklet
);
2815 return NETDEV_TX_OK
;
2819 * Used spin_lock to synchronize reading of stats structures, which
2820 * is written by BNA under the same lock.
2822 static struct rtnl_link_stats64
*
2823 bnad_get_stats64(struct net_device
*netdev
, struct rtnl_link_stats64
*stats
)
2825 struct bnad
*bnad
= netdev_priv(netdev
);
2826 unsigned long flags
;
2828 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2830 bnad_netdev_qstats_fill(bnad
, stats
);
2831 bnad_netdev_hwstats_fill(bnad
, stats
);
2833 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2839 bnad_set_rx_mode(struct net_device
*netdev
)
2841 struct bnad
*bnad
= netdev_priv(netdev
);
2842 u32 new_mask
, valid_mask
;
2843 unsigned long flags
;
2845 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2847 new_mask
= valid_mask
= 0;
2849 if (netdev
->flags
& IFF_PROMISC
) {
2850 if (!(bnad
->cfg_flags
& BNAD_CF_PROMISC
)) {
2851 new_mask
= BNAD_RXMODE_PROMISC_DEFAULT
;
2852 valid_mask
= BNAD_RXMODE_PROMISC_DEFAULT
;
2853 bnad
->cfg_flags
|= BNAD_CF_PROMISC
;
2856 if (bnad
->cfg_flags
& BNAD_CF_PROMISC
) {
2857 new_mask
= ~BNAD_RXMODE_PROMISC_DEFAULT
;
2858 valid_mask
= BNAD_RXMODE_PROMISC_DEFAULT
;
2859 bnad
->cfg_flags
&= ~BNAD_CF_PROMISC
;
2863 if (netdev
->flags
& IFF_ALLMULTI
) {
2864 if (!(bnad
->cfg_flags
& BNAD_CF_ALLMULTI
)) {
2865 new_mask
|= BNA_RXMODE_ALLMULTI
;
2866 valid_mask
|= BNA_RXMODE_ALLMULTI
;
2867 bnad
->cfg_flags
|= BNAD_CF_ALLMULTI
;
2870 if (bnad
->cfg_flags
& BNAD_CF_ALLMULTI
) {
2871 new_mask
&= ~BNA_RXMODE_ALLMULTI
;
2872 valid_mask
|= BNA_RXMODE_ALLMULTI
;
2873 bnad
->cfg_flags
&= ~BNAD_CF_ALLMULTI
;
2877 if (bnad
->rx_info
[0].rx
== NULL
)
2880 bna_rx_mode_set(bnad
->rx_info
[0].rx
, new_mask
, valid_mask
, NULL
);
2882 if (!netdev_mc_empty(netdev
)) {
2884 int mc_count
= netdev_mc_count(netdev
);
2886 /* Index 0 holds the broadcast address */
2888 kzalloc((mc_count
+ 1) * ETH_ALEN
,
2893 memcpy(&mcaddr_list
[0], &bnad_bcast_addr
[0], ETH_ALEN
);
2895 /* Copy rest of the MC addresses */
2896 bnad_netdev_mc_list_get(netdev
, mcaddr_list
);
2898 bna_rx_mcast_listset(bnad
->rx_info
[0].rx
, mc_count
+ 1,
2901 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2905 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2909 * bna_lock is used to sync writes to netdev->addr
2910 * conf_lock cannot be used since this call may be made
2911 * in a non-blocking context.
2914 bnad_set_mac_address(struct net_device
*netdev
, void *mac_addr
)
2917 struct bnad
*bnad
= netdev_priv(netdev
);
2918 struct sockaddr
*sa
= (struct sockaddr
*)mac_addr
;
2919 unsigned long flags
;
2921 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2923 err
= bnad_mac_addr_set_locked(bnad
, sa
->sa_data
);
2926 memcpy(netdev
->dev_addr
, sa
->sa_data
, netdev
->addr_len
);
2928 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2934 bnad_mtu_set(struct bnad
*bnad
, int mtu
)
2936 unsigned long flags
;
2938 init_completion(&bnad
->bnad_completions
.mtu_comp
);
2940 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2941 bna_enet_mtu_set(&bnad
->bna
.enet
, mtu
, bnad_cb_enet_mtu_set
);
2942 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2944 wait_for_completion(&bnad
->bnad_completions
.mtu_comp
);
2946 return bnad
->bnad_completions
.mtu_comp_status
;
2950 bnad_change_mtu(struct net_device
*netdev
, int new_mtu
)
2952 int err
, mtu
= netdev
->mtu
;
2953 struct bnad
*bnad
= netdev_priv(netdev
);
2955 if (new_mtu
+ ETH_HLEN
< ETH_ZLEN
|| new_mtu
> BNAD_JUMBO_MTU
)
2958 mutex_lock(&bnad
->conf_mutex
);
2960 netdev
->mtu
= new_mtu
;
2962 mtu
= ETH_HLEN
+ VLAN_HLEN
+ new_mtu
+ ETH_FCS_LEN
;
2963 err
= bnad_mtu_set(bnad
, mtu
);
2967 mutex_unlock(&bnad
->conf_mutex
);
2972 bnad_vlan_rx_add_vid(struct net_device
*netdev
,
2975 struct bnad
*bnad
= netdev_priv(netdev
);
2976 unsigned long flags
;
2978 if (!bnad
->rx_info
[0].rx
)
2981 mutex_lock(&bnad
->conf_mutex
);
2983 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2984 bna_rx_vlan_add(bnad
->rx_info
[0].rx
, vid
);
2985 set_bit(vid
, bnad
->active_vlans
);
2986 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2988 mutex_unlock(&bnad
->conf_mutex
);
2992 bnad_vlan_rx_kill_vid(struct net_device
*netdev
,
2995 struct bnad
*bnad
= netdev_priv(netdev
);
2996 unsigned long flags
;
2998 if (!bnad
->rx_info
[0].rx
)
3001 mutex_lock(&bnad
->conf_mutex
);
3003 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3004 clear_bit(vid
, bnad
->active_vlans
);
3005 bna_rx_vlan_del(bnad
->rx_info
[0].rx
, vid
);
3006 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3008 mutex_unlock(&bnad
->conf_mutex
);
3011 #ifdef CONFIG_NET_POLL_CONTROLLER
3013 bnad_netpoll(struct net_device
*netdev
)
3015 struct bnad
*bnad
= netdev_priv(netdev
);
3016 struct bnad_rx_info
*rx_info
;
3017 struct bnad_rx_ctrl
*rx_ctrl
;
3021 if (!(bnad
->cfg_flags
& BNAD_CF_MSIX
)) {
3022 bna_intx_disable(&bnad
->bna
, curr_mask
);
3023 bnad_isr(bnad
->pcidev
->irq
, netdev
);
3024 bna_intx_enable(&bnad
->bna
, curr_mask
);
3027 * Tx processing may happen in sending context, so no need
3028 * to explicitly process completions here
3032 for (i
= 0; i
< bnad
->num_rx
; i
++) {
3033 rx_info
= &bnad
->rx_info
[i
];
3036 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
3037 rx_ctrl
= &rx_info
->rx_ctrl
[j
];
3039 bnad_netif_rx_schedule_poll(bnad
,
3047 static const struct net_device_ops bnad_netdev_ops
= {
3048 .ndo_open
= bnad_open
,
3049 .ndo_stop
= bnad_stop
,
3050 .ndo_start_xmit
= bnad_start_xmit
,
3051 .ndo_get_stats64
= bnad_get_stats64
,
3052 .ndo_set_rx_mode
= bnad_set_rx_mode
,
3053 .ndo_validate_addr
= eth_validate_addr
,
3054 .ndo_set_mac_address
= bnad_set_mac_address
,
3055 .ndo_change_mtu
= bnad_change_mtu
,
3056 .ndo_vlan_rx_add_vid
= bnad_vlan_rx_add_vid
,
3057 .ndo_vlan_rx_kill_vid
= bnad_vlan_rx_kill_vid
,
3058 #ifdef CONFIG_NET_POLL_CONTROLLER
3059 .ndo_poll_controller
= bnad_netpoll
3064 bnad_netdev_init(struct bnad
*bnad
, bool using_dac
)
3066 struct net_device
*netdev
= bnad
->netdev
;
3068 netdev
->hw_features
= NETIF_F_SG
| NETIF_F_RXCSUM
|
3069 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
3070 NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_HW_VLAN_TX
;
3072 netdev
->vlan_features
= NETIF_F_SG
| NETIF_F_HIGHDMA
|
3073 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
3074 NETIF_F_TSO
| NETIF_F_TSO6
;
3076 netdev
->features
|= netdev
->hw_features
|
3077 NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_FILTER
;
3080 netdev
->features
|= NETIF_F_HIGHDMA
;
3082 netdev
->mem_start
= bnad
->mmio_start
;
3083 netdev
->mem_end
= bnad
->mmio_start
+ bnad
->mmio_len
- 1;
3085 netdev
->netdev_ops
= &bnad_netdev_ops
;
3086 bnad_set_ethtool_ops(netdev
);
3090 * 1. Initialize the bnad structure
3091 * 2. Setup netdev pointer in pci_dev
3092 * 3. Initialze Tx free tasklet
3093 * 4. Initialize no. of TxQ & CQs & MSIX vectors
3096 bnad_init(struct bnad
*bnad
,
3097 struct pci_dev
*pdev
, struct net_device
*netdev
)
3099 unsigned long flags
;
3101 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3102 pci_set_drvdata(pdev
, netdev
);
3104 bnad
->netdev
= netdev
;
3105 bnad
->pcidev
= pdev
;
3106 bnad
->mmio_start
= pci_resource_start(pdev
, 0);
3107 bnad
->mmio_len
= pci_resource_len(pdev
, 0);
3108 bnad
->bar0
= ioremap_nocache(bnad
->mmio_start
, bnad
->mmio_len
);
3110 dev_err(&pdev
->dev
, "ioremap for bar0 failed\n");
3111 pci_set_drvdata(pdev
, NULL
);
3114 pr_info("bar0 mapped to %p, len %llu\n", bnad
->bar0
,
3115 (unsigned long long) bnad
->mmio_len
);
3117 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3118 if (!bnad_msix_disable
)
3119 bnad
->cfg_flags
= BNAD_CF_MSIX
;
3121 bnad
->cfg_flags
|= BNAD_CF_DIM_ENABLED
;
3123 bnad_q_num_init(bnad
);
3124 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3126 bnad
->msix_num
= (bnad
->num_tx
* bnad
->num_txq_per_tx
) +
3127 (bnad
->num_rx
* bnad
->num_rxp_per_rx
) +
3128 BNAD_MAILBOX_MSIX_VECTORS
;
3130 bnad
->txq_depth
= BNAD_TXQ_DEPTH
;
3131 bnad
->rxq_depth
= BNAD_RXQ_DEPTH
;
3133 bnad
->tx_coalescing_timeo
= BFI_TX_COALESCING_TIMEO
;
3134 bnad
->rx_coalescing_timeo
= BFI_RX_COALESCING_TIMEO
;
3136 tasklet_init(&bnad
->tx_free_tasklet
, bnad_tx_free_tasklet
,
3137 (unsigned long)bnad
);
3143 * Must be called after bnad_pci_uninit()
3144 * so that iounmap() and pci_set_drvdata(NULL)
3145 * happens only after PCI uninitialization.
3148 bnad_uninit(struct bnad
*bnad
)
3151 iounmap(bnad
->bar0
);
3152 pci_set_drvdata(bnad
->pcidev
, NULL
);
3157 a) Per ioceth mutes used for serializing configuration
3158 changes from OS interface
3159 b) spin lock used to protect bna state machine
3162 bnad_lock_init(struct bnad
*bnad
)
3164 spin_lock_init(&bnad
->bna_lock
);
3165 mutex_init(&bnad
->conf_mutex
);
3169 bnad_lock_uninit(struct bnad
*bnad
)
3171 mutex_destroy(&bnad
->conf_mutex
);
3174 /* PCI Initialization */
3176 bnad_pci_init(struct bnad
*bnad
,
3177 struct pci_dev
*pdev
, bool *using_dac
)
3181 err
= pci_enable_device(pdev
);
3184 err
= pci_request_regions(pdev
, BNAD_NAME
);
3186 goto disable_device
;
3187 if (!dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64)) &&
3188 !dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(64))) {
3191 err
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32));
3193 err
= dma_set_coherent_mask(&pdev
->dev
,
3196 goto release_regions
;
3200 pci_set_master(pdev
);
3204 pci_release_regions(pdev
);
3206 pci_disable_device(pdev
);
3212 bnad_pci_uninit(struct pci_dev
*pdev
)
3214 pci_release_regions(pdev
);
3215 pci_disable_device(pdev
);
3218 static int __devinit
3219 bnad_pci_probe(struct pci_dev
*pdev
,
3220 const struct pci_device_id
*pcidev_id
)
3226 struct net_device
*netdev
;
3227 struct bfa_pcidev pcidev_info
;
3228 unsigned long flags
;
3230 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3231 pdev
, pcidev_id
, PCI_FUNC(pdev
->devfn
));
3233 mutex_lock(&bnad_fwimg_mutex
);
3234 if (!cna_get_firmware_buf(pdev
)) {
3235 mutex_unlock(&bnad_fwimg_mutex
);
3236 pr_warn("Failed to load Firmware Image!\n");
3239 mutex_unlock(&bnad_fwimg_mutex
);
3242 * Allocates sizeof(struct net_device + struct bnad)
3243 * bnad = netdev->priv
3245 netdev
= alloc_etherdev(sizeof(struct bnad
));
3247 dev_err(&pdev
->dev
, "netdev allocation failed\n");
3251 bnad
= netdev_priv(netdev
);
3253 bnad_lock_init(bnad
);
3255 mutex_lock(&bnad
->conf_mutex
);
3257 * PCI initialization
3258 * Output : using_dac = 1 for 64 bit DMA
3259 * = 0 for 32 bit DMA
3261 err
= bnad_pci_init(bnad
, pdev
, &using_dac
);
3266 * Initialize bnad structure
3267 * Setup relation between pci_dev & netdev
3268 * Init Tx free tasklet
3270 err
= bnad_init(bnad
, pdev
, netdev
);
3274 /* Initialize netdev structure, set up ethtool ops */
3275 bnad_netdev_init(bnad
, using_dac
);
3277 /* Set link to down state */
3278 netif_carrier_off(netdev
);
3280 /* Get resource requirement form bna */
3281 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3282 bna_res_req(&bnad
->res_info
[0]);
3283 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3285 /* Allocate resources from bna */
3286 err
= bnad_res_alloc(bnad
, &bnad
->res_info
[0], BNA_RES_T_MAX
);
3292 /* Setup pcidev_info for bna_init() */
3293 pcidev_info
.pci_slot
= PCI_SLOT(bnad
->pcidev
->devfn
);
3294 pcidev_info
.pci_func
= PCI_FUNC(bnad
->pcidev
->devfn
);
3295 pcidev_info
.device_id
= bnad
->pcidev
->device
;
3296 pcidev_info
.pci_bar_kva
= bnad
->bar0
;
3298 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3299 bna_init(bna
, bnad
, &pcidev_info
, &bnad
->res_info
[0]);
3300 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3302 bnad
->stats
.bna_stats
= &bna
->stats
;
3304 bnad_enable_msix(bnad
);
3305 err
= bnad_mbox_irq_alloc(bnad
);
3311 setup_timer(&bnad
->bna
.ioceth
.ioc
.ioc_timer
, bnad_ioc_timeout
,
3312 ((unsigned long)bnad
));
3313 setup_timer(&bnad
->bna
.ioceth
.ioc
.hb_timer
, bnad_ioc_hb_check
,
3314 ((unsigned long)bnad
));
3315 setup_timer(&bnad
->bna
.ioceth
.ioc
.iocpf_timer
, bnad_iocpf_timeout
,
3316 ((unsigned long)bnad
));
3317 setup_timer(&bnad
->bna
.ioceth
.ioc
.sem_timer
, bnad_iocpf_sem_timeout
,
3318 ((unsigned long)bnad
));
3320 /* Now start the timer before calling IOC */
3321 mod_timer(&bnad
->bna
.ioceth
.ioc
.iocpf_timer
,
3322 jiffies
+ msecs_to_jiffies(BNA_IOC_TIMER_FREQ
));
3326 * If the call back comes with error, we bail out.
3327 * This is a catastrophic error.
3329 err
= bnad_ioceth_enable(bnad
);
3331 pr_err("BNA: Initialization failed err=%d\n",
3336 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3337 if (bna_num_txq_set(bna
, BNAD_NUM_TXQ
+ 1) ||
3338 bna_num_rxp_set(bna
, BNAD_NUM_RXP
+ 1)) {
3339 bnad_q_num_adjust(bnad
, bna_attr(bna
)->num_txq
- 1,
3340 bna_attr(bna
)->num_rxp
- 1);
3341 if (bna_num_txq_set(bna
, BNAD_NUM_TXQ
+ 1) ||
3342 bna_num_rxp_set(bna
, BNAD_NUM_RXP
+ 1))
3345 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3347 goto disable_ioceth
;
3349 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3350 bna_mod_res_req(&bnad
->bna
, &bnad
->mod_res_info
[0]);
3351 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3353 err
= bnad_res_alloc(bnad
, &bnad
->mod_res_info
[0], BNA_MOD_RES_T_MAX
);
3356 goto disable_ioceth
;
3359 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3360 bna_mod_init(&bnad
->bna
, &bnad
->mod_res_info
[0]);
3361 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3363 /* Get the burnt-in mac */
3364 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3365 bna_enet_perm_mac_get(&bna
->enet
, &bnad
->perm_addr
);
3366 bnad_set_netdev_perm_addr(bnad
);
3367 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3369 mutex_unlock(&bnad
->conf_mutex
);
3371 /* Finally, reguister with net_device layer */
3372 err
= register_netdev(netdev
);
3374 pr_err("BNA : Registering with netdev failed\n");
3377 set_bit(BNAD_RF_NETDEV_REGISTERED
, &bnad
->run_flags
);
3382 mutex_unlock(&bnad
->conf_mutex
);
3386 mutex_lock(&bnad
->conf_mutex
);
3387 bnad_res_free(bnad
, &bnad
->mod_res_info
[0], BNA_MOD_RES_T_MAX
);
3389 bnad_ioceth_disable(bnad
);
3390 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.ioc_timer
);
3391 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.sem_timer
);
3392 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.hb_timer
);
3393 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3395 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3396 bnad_mbox_irq_free(bnad
);
3397 bnad_disable_msix(bnad
);
3399 bnad_res_free(bnad
, &bnad
->res_info
[0], BNA_RES_T_MAX
);
3403 bnad_pci_uninit(pdev
);
3405 mutex_unlock(&bnad
->conf_mutex
);
3406 bnad_lock_uninit(bnad
);
3407 free_netdev(netdev
);
3411 static void __devexit
3412 bnad_pci_remove(struct pci_dev
*pdev
)
3414 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3417 unsigned long flags
;
3422 pr_info("%s bnad_pci_remove\n", netdev
->name
);
3423 bnad
= netdev_priv(netdev
);
3426 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED
, &bnad
->run_flags
))
3427 unregister_netdev(netdev
);
3429 mutex_lock(&bnad
->conf_mutex
);
3430 bnad_ioceth_disable(bnad
);
3431 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.ioc_timer
);
3432 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.sem_timer
);
3433 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.hb_timer
);
3434 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3436 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3438 bnad_res_free(bnad
, &bnad
->mod_res_info
[0], BNA_MOD_RES_T_MAX
);
3439 bnad_res_free(bnad
, &bnad
->res_info
[0], BNA_RES_T_MAX
);
3440 bnad_mbox_irq_free(bnad
);
3441 bnad_disable_msix(bnad
);
3442 bnad_pci_uninit(pdev
);
3443 mutex_unlock(&bnad
->conf_mutex
);
3444 bnad_lock_uninit(bnad
);
3446 free_netdev(netdev
);
3449 static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table
) = {
3451 PCI_DEVICE(PCI_VENDOR_ID_BROCADE
,
3452 PCI_DEVICE_ID_BROCADE_CT
),
3453 .class = PCI_CLASS_NETWORK_ETHERNET
<< 8,
3454 .class_mask
= 0xffff00
3457 PCI_DEVICE(PCI_VENDOR_ID_BROCADE
,
3458 BFA_PCI_DEVICE_ID_CT2
),
3459 .class = PCI_CLASS_NETWORK_ETHERNET
<< 8,
3460 .class_mask
= 0xffff00
3465 MODULE_DEVICE_TABLE(pci
, bnad_pci_id_table
);
3467 static struct pci_driver bnad_pci_driver
= {
3469 .id_table
= bnad_pci_id_table
,
3470 .probe
= bnad_pci_probe
,
3471 .remove
= __devexit_p(bnad_pci_remove
),
3475 bnad_module_init(void)
3479 pr_info("Brocade 10G Ethernet driver - version: %s\n",
3482 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover
);
3484 err
= pci_register_driver(&bnad_pci_driver
);
3486 pr_err("bna : PCI registration failed in module init "
3495 bnad_module_exit(void)
3497 pci_unregister_driver(&bnad_pci_driver
);
3500 release_firmware(bfi_fw
);
3503 module_init(bnad_module_init
);
3504 module_exit(bnad_module_exit
);
3506 MODULE_AUTHOR("Brocade");
3507 MODULE_LICENSE("GPL");
3508 MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3509 MODULE_VERSION(BNAD_VERSION
);
3510 MODULE_FIRMWARE(CNA_FW_FILE_CT
);
3511 MODULE_FIRMWARE(CNA_FW_FILE_CT2
);