Merge tag 'v3.3.7' into 3.3/master
[zen-stable.git] / drivers / net / ethernet / brocade / bna / bnad.c
blobbe7d91e4b7852701005947bcefe09aa5bf1d39f3
1 /*
2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
18 #include <linux/bitops.h>
19 #include <linux/netdevice.h>
20 #include <linux/skbuff.h>
21 #include <linux/etherdevice.h>
22 #include <linux/in.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_vlan.h>
25 #include <linux/if_ether.h>
26 #include <linux/ip.h>
27 #include <linux/prefetch.h>
28 #include <linux/module.h>
30 #include "bnad.h"
31 #include "bna.h"
32 #include "cna.h"
34 static DEFINE_MUTEX(bnad_fwimg_mutex);
37 * Module params
39 static uint bnad_msix_disable;
40 module_param(bnad_msix_disable, uint, 0444);
41 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
43 static uint bnad_ioc_auto_recover = 1;
44 module_param(bnad_ioc_auto_recover, uint, 0444);
45 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
47 static uint bna_debugfs_enable = 1;
48 module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR);
49 MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
50 " Range[false:0|true:1]");
53 * Global variables
55 u32 bnad_rxqs_per_cq = 2;
56 static u32 bna_id;
57 static struct mutex bnad_list_mutex;
58 static LIST_HEAD(bnad_list);
59 static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
62 * Local MACROS
64 #define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
66 #define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
68 #define BNAD_GET_MBOX_IRQ(_bnad) \
69 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
70 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
71 ((_bnad)->pcidev->irq))
73 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
74 do { \
75 (_res_info)->res_type = BNA_RES_T_MEM; \
76 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
77 (_res_info)->res_u.mem_info.num = (_num); \
78 (_res_info)->res_u.mem_info.len = \
79 sizeof(struct bnad_unmap_q) + \
80 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
81 } while (0)
83 #define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */
85 static void
86 bnad_add_to_list(struct bnad *bnad)
88 mutex_lock(&bnad_list_mutex);
89 list_add_tail(&bnad->list_entry, &bnad_list);
90 bnad->id = bna_id++;
91 mutex_unlock(&bnad_list_mutex);
94 static void
95 bnad_remove_from_list(struct bnad *bnad)
97 mutex_lock(&bnad_list_mutex);
98 list_del(&bnad->list_entry);
99 mutex_unlock(&bnad_list_mutex);
103 * Reinitialize completions in CQ, once Rx is taken down
105 static void
106 bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
108 struct bna_cq_entry *cmpl, *next_cmpl;
109 unsigned int wi_range, wis = 0, ccb_prod = 0;
110 int i;
112 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
113 wi_range);
115 for (i = 0; i < ccb->q_depth; i++) {
116 wis++;
117 if (likely(--wi_range))
118 next_cmpl = cmpl + 1;
119 else {
120 BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
121 wis = 0;
122 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
123 next_cmpl, wi_range);
125 cmpl->valid = 0;
126 cmpl = next_cmpl;
130 static u32
131 bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
132 u32 index, u32 depth, struct sk_buff *skb, u32 frag)
134 int j;
135 array[index].skb = NULL;
137 dma_unmap_single(pdev, dma_unmap_addr(&array[index], dma_addr),
138 skb_headlen(skb), DMA_TO_DEVICE);
139 dma_unmap_addr_set(&array[index], dma_addr, 0);
140 BNA_QE_INDX_ADD(index, 1, depth);
142 for (j = 0; j < frag; j++) {
143 dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr),
144 skb_frag_size(&skb_shinfo(skb)->frags[j]), DMA_TO_DEVICE);
145 dma_unmap_addr_set(&array[index], dma_addr, 0);
146 BNA_QE_INDX_ADD(index, 1, depth);
149 return index;
153 * Frees all pending Tx Bufs
154 * At this point no activity is expected on the Q,
155 * so DMA unmap & freeing is fine.
157 static void
158 bnad_free_all_txbufs(struct bnad *bnad,
159 struct bna_tcb *tcb)
161 u32 unmap_cons;
162 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
163 struct bnad_skb_unmap *unmap_array;
164 struct sk_buff *skb = NULL;
165 int q;
167 unmap_array = unmap_q->unmap_array;
169 for (q = 0; q < unmap_q->q_depth; q++) {
170 skb = unmap_array[q].skb;
171 if (!skb)
172 continue;
174 unmap_cons = q;
175 unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
176 unmap_cons, unmap_q->q_depth, skb,
177 skb_shinfo(skb)->nr_frags);
179 dev_kfree_skb_any(skb);
183 /* Data Path Handlers */
186 * bnad_free_txbufs : Frees the Tx bufs on Tx completion
187 * Can be called in a) Interrupt context
188 * b) Sending context
189 * c) Tasklet context
191 static u32
192 bnad_free_txbufs(struct bnad *bnad,
193 struct bna_tcb *tcb)
195 u32 unmap_cons, sent_packets = 0, sent_bytes = 0;
196 u16 wis, updated_hw_cons;
197 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
198 struct bnad_skb_unmap *unmap_array;
199 struct sk_buff *skb;
202 * Just return if TX is stopped. This check is useful
203 * when bnad_free_txbufs() runs out of a tasklet scheduled
204 * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
205 * but this routine runs actually after the cleanup has been
206 * executed.
208 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
209 return 0;
211 updated_hw_cons = *(tcb->hw_consumer_index);
213 wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
214 updated_hw_cons, tcb->q_depth);
216 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
218 unmap_array = unmap_q->unmap_array;
219 unmap_cons = unmap_q->consumer_index;
221 prefetch(&unmap_array[unmap_cons + 1]);
222 while (wis) {
223 skb = unmap_array[unmap_cons].skb;
225 sent_packets++;
226 sent_bytes += skb->len;
227 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
229 unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
230 unmap_cons, unmap_q->q_depth, skb,
231 skb_shinfo(skb)->nr_frags);
233 dev_kfree_skb_any(skb);
236 /* Update consumer pointers. */
237 tcb->consumer_index = updated_hw_cons;
238 unmap_q->consumer_index = unmap_cons;
240 tcb->txq->tx_packets += sent_packets;
241 tcb->txq->tx_bytes += sent_bytes;
243 return sent_packets;
246 /* Tx Free Tasklet function */
247 /* Frees for all the tcb's in all the Tx's */
249 * Scheduled from sending context, so that
250 * the fat Tx lock is not held for too long
251 * in the sending context.
253 static void
254 bnad_tx_free_tasklet(unsigned long bnad_ptr)
256 struct bnad *bnad = (struct bnad *)bnad_ptr;
257 struct bna_tcb *tcb;
258 u32 acked = 0;
259 int i, j;
261 for (i = 0; i < bnad->num_tx; i++) {
262 for (j = 0; j < bnad->num_txq_per_tx; j++) {
263 tcb = bnad->tx_info[i].tcb[j];
264 if (!tcb)
265 continue;
266 if (((u16) (*tcb->hw_consumer_index) !=
267 tcb->consumer_index) &&
268 (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
269 &tcb->flags))) {
270 acked = bnad_free_txbufs(bnad, tcb);
271 if (likely(test_bit(BNAD_TXQ_TX_STARTED,
272 &tcb->flags)))
273 bna_ib_ack(tcb->i_dbell, acked);
274 smp_mb__before_clear_bit();
275 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
277 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
278 &tcb->flags)))
279 continue;
280 if (netif_queue_stopped(bnad->netdev)) {
281 if (acked && netif_carrier_ok(bnad->netdev) &&
282 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
283 BNAD_NETIF_WAKE_THRESHOLD) {
284 netif_wake_queue(bnad->netdev);
285 /* TODO */
286 /* Counters for individual TxQs? */
287 BNAD_UPDATE_CTR(bnad,
288 netif_queue_wakeup);
295 static u32
296 bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
298 struct net_device *netdev = bnad->netdev;
299 u32 sent = 0;
301 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
302 return 0;
304 sent = bnad_free_txbufs(bnad, tcb);
305 if (sent) {
306 if (netif_queue_stopped(netdev) &&
307 netif_carrier_ok(netdev) &&
308 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
309 BNAD_NETIF_WAKE_THRESHOLD) {
310 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
311 netif_wake_queue(netdev);
312 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
317 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
318 bna_ib_ack(tcb->i_dbell, sent);
320 smp_mb__before_clear_bit();
321 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
323 return sent;
326 /* MSIX Tx Completion Handler */
327 static irqreturn_t
328 bnad_msix_tx(int irq, void *data)
330 struct bna_tcb *tcb = (struct bna_tcb *)data;
331 struct bnad *bnad = tcb->bnad;
333 bnad_tx(bnad, tcb);
335 return IRQ_HANDLED;
338 static void
339 bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
341 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
343 rcb->producer_index = 0;
344 rcb->consumer_index = 0;
346 unmap_q->producer_index = 0;
347 unmap_q->consumer_index = 0;
350 static void
351 bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
353 struct bnad_unmap_q *unmap_q;
354 struct bnad_skb_unmap *unmap_array;
355 struct sk_buff *skb;
356 int unmap_cons;
358 unmap_q = rcb->unmap_q;
359 unmap_array = unmap_q->unmap_array;
360 for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
361 skb = unmap_array[unmap_cons].skb;
362 if (!skb)
363 continue;
364 unmap_array[unmap_cons].skb = NULL;
365 dma_unmap_single(&bnad->pcidev->dev,
366 dma_unmap_addr(&unmap_array[unmap_cons],
367 dma_addr),
368 rcb->rxq->buffer_size,
369 DMA_FROM_DEVICE);
370 dev_kfree_skb(skb);
372 bnad_reset_rcb(bnad, rcb);
375 static void
376 bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
378 u16 to_alloc, alloced, unmap_prod, wi_range;
379 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
380 struct bnad_skb_unmap *unmap_array;
381 struct bna_rxq_entry *rxent;
382 struct sk_buff *skb;
383 dma_addr_t dma_addr;
385 alloced = 0;
386 to_alloc =
387 BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
389 unmap_array = unmap_q->unmap_array;
390 unmap_prod = unmap_q->producer_index;
392 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
394 while (to_alloc--) {
395 if (!wi_range)
396 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
397 wi_range);
398 skb = netdev_alloc_skb_ip_align(bnad->netdev,
399 rcb->rxq->buffer_size);
400 if (unlikely(!skb)) {
401 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
402 rcb->rxq->rxbuf_alloc_failed++;
403 goto finishing;
405 unmap_array[unmap_prod].skb = skb;
406 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
407 rcb->rxq->buffer_size,
408 DMA_FROM_DEVICE);
409 dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
410 dma_addr);
411 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
412 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
414 rxent++;
415 wi_range--;
416 alloced++;
419 finishing:
420 if (likely(alloced)) {
421 unmap_q->producer_index = unmap_prod;
422 rcb->producer_index = unmap_prod;
423 smp_mb();
424 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
425 bna_rxq_prod_indx_doorbell(rcb);
429 static inline void
430 bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
432 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
434 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
435 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
436 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
437 bnad_alloc_n_post_rxbufs(bnad, rcb);
438 smp_mb__before_clear_bit();
439 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
443 static u32
444 bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
446 struct bna_cq_entry *cmpl, *next_cmpl;
447 struct bna_rcb *rcb = NULL;
448 unsigned int wi_range, packets = 0, wis = 0;
449 struct bnad_unmap_q *unmap_q;
450 struct bnad_skb_unmap *unmap_array;
451 struct sk_buff *skb;
452 u32 flags, unmap_cons;
453 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
454 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
456 set_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
458 if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) {
459 clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
460 return 0;
463 prefetch(bnad->netdev);
464 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
465 wi_range);
466 BUG_ON(!(wi_range <= ccb->q_depth));
467 while (cmpl->valid && packets < budget) {
468 packets++;
469 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
471 if (bna_is_small_rxq(cmpl->rxq_id))
472 rcb = ccb->rcb[1];
473 else
474 rcb = ccb->rcb[0];
476 unmap_q = rcb->unmap_q;
477 unmap_array = unmap_q->unmap_array;
478 unmap_cons = unmap_q->consumer_index;
480 skb = unmap_array[unmap_cons].skb;
481 BUG_ON(!(skb));
482 unmap_array[unmap_cons].skb = NULL;
483 dma_unmap_single(&bnad->pcidev->dev,
484 dma_unmap_addr(&unmap_array[unmap_cons],
485 dma_addr),
486 rcb->rxq->buffer_size,
487 DMA_FROM_DEVICE);
488 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
490 /* Should be more efficient ? Performance ? */
491 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
493 wis++;
494 if (likely(--wi_range))
495 next_cmpl = cmpl + 1;
496 else {
497 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
498 wis = 0;
499 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
500 next_cmpl, wi_range);
501 BUG_ON(!(wi_range <= ccb->q_depth));
503 prefetch(next_cmpl);
505 flags = ntohl(cmpl->flags);
506 if (unlikely
507 (flags &
508 (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
509 BNA_CQ_EF_TOO_LONG))) {
510 dev_kfree_skb_any(skb);
511 rcb->rxq->rx_packets_with_error++;
512 goto next;
515 skb_put(skb, ntohs(cmpl->length));
516 if (likely
517 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
518 (((flags & BNA_CQ_EF_IPV4) &&
519 (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
520 (flags & BNA_CQ_EF_IPV6)) &&
521 (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
522 (flags & BNA_CQ_EF_L4_CKSUM_OK)))
523 skb->ip_summed = CHECKSUM_UNNECESSARY;
524 else
525 skb_checksum_none_assert(skb);
527 rcb->rxq->rx_packets++;
528 rcb->rxq->rx_bytes += skb->len;
529 skb->protocol = eth_type_trans(skb, bnad->netdev);
531 if (flags & BNA_CQ_EF_VLAN)
532 __vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag));
534 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
535 napi_gro_receive(&rx_ctrl->napi, skb);
536 else {
537 netif_receive_skb(skb);
540 next:
541 cmpl->valid = 0;
542 cmpl = next_cmpl;
545 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
547 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
548 bna_ib_ack_disable_irq(ccb->i_dbell, packets);
550 bnad_refill_rxq(bnad, ccb->rcb[0]);
551 if (ccb->rcb[1])
552 bnad_refill_rxq(bnad, ccb->rcb[1]);
554 clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
556 return packets;
559 static void
560 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
562 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
563 struct napi_struct *napi = &rx_ctrl->napi;
565 if (likely(napi_schedule_prep(napi))) {
566 __napi_schedule(napi);
567 rx_ctrl->rx_schedule++;
571 /* MSIX Rx Path Handler */
572 static irqreturn_t
573 bnad_msix_rx(int irq, void *data)
575 struct bna_ccb *ccb = (struct bna_ccb *)data;
577 if (ccb) {
578 ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
579 bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
582 return IRQ_HANDLED;
585 /* Interrupt handlers */
587 /* Mbox Interrupt Handlers */
588 static irqreturn_t
589 bnad_msix_mbox_handler(int irq, void *data)
591 u32 intr_status;
592 unsigned long flags;
593 struct bnad *bnad = (struct bnad *)data;
595 spin_lock_irqsave(&bnad->bna_lock, flags);
596 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
597 spin_unlock_irqrestore(&bnad->bna_lock, flags);
598 return IRQ_HANDLED;
601 bna_intr_status_get(&bnad->bna, intr_status);
603 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
604 bna_mbox_handler(&bnad->bna, intr_status);
606 spin_unlock_irqrestore(&bnad->bna_lock, flags);
608 return IRQ_HANDLED;
611 static irqreturn_t
612 bnad_isr(int irq, void *data)
614 int i, j;
615 u32 intr_status;
616 unsigned long flags;
617 struct bnad *bnad = (struct bnad *)data;
618 struct bnad_rx_info *rx_info;
619 struct bnad_rx_ctrl *rx_ctrl;
620 struct bna_tcb *tcb = NULL;
622 spin_lock_irqsave(&bnad->bna_lock, flags);
623 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
624 spin_unlock_irqrestore(&bnad->bna_lock, flags);
625 return IRQ_NONE;
628 bna_intr_status_get(&bnad->bna, intr_status);
630 if (unlikely(!intr_status)) {
631 spin_unlock_irqrestore(&bnad->bna_lock, flags);
632 return IRQ_NONE;
635 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
636 bna_mbox_handler(&bnad->bna, intr_status);
638 spin_unlock_irqrestore(&bnad->bna_lock, flags);
640 if (!BNA_IS_INTX_DATA_INTR(intr_status))
641 return IRQ_HANDLED;
643 /* Process data interrupts */
644 /* Tx processing */
645 for (i = 0; i < bnad->num_tx; i++) {
646 for (j = 0; j < bnad->num_txq_per_tx; j++) {
647 tcb = bnad->tx_info[i].tcb[j];
648 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
649 bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
652 /* Rx processing */
653 for (i = 0; i < bnad->num_rx; i++) {
654 rx_info = &bnad->rx_info[i];
655 if (!rx_info->rx)
656 continue;
657 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
658 rx_ctrl = &rx_info->rx_ctrl[j];
659 if (rx_ctrl->ccb)
660 bnad_netif_rx_schedule_poll(bnad,
661 rx_ctrl->ccb);
664 return IRQ_HANDLED;
668 * Called in interrupt / callback context
669 * with bna_lock held, so cfg_flags access is OK
671 static void
672 bnad_enable_mbox_irq(struct bnad *bnad)
674 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
676 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
680 * Called with bnad->bna_lock held b'cos of
681 * bnad->cfg_flags access.
683 static void
684 bnad_disable_mbox_irq(struct bnad *bnad)
686 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
688 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
691 static void
692 bnad_set_netdev_perm_addr(struct bnad *bnad)
694 struct net_device *netdev = bnad->netdev;
696 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
697 if (is_zero_ether_addr(netdev->dev_addr))
698 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
701 /* Control Path Handlers */
703 /* Callbacks */
704 void
705 bnad_cb_mbox_intr_enable(struct bnad *bnad)
707 bnad_enable_mbox_irq(bnad);
710 void
711 bnad_cb_mbox_intr_disable(struct bnad *bnad)
713 bnad_disable_mbox_irq(bnad);
716 void
717 bnad_cb_ioceth_ready(struct bnad *bnad)
719 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
720 complete(&bnad->bnad_completions.ioc_comp);
723 void
724 bnad_cb_ioceth_failed(struct bnad *bnad)
726 bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
727 complete(&bnad->bnad_completions.ioc_comp);
730 void
731 bnad_cb_ioceth_disabled(struct bnad *bnad)
733 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
734 complete(&bnad->bnad_completions.ioc_comp);
737 static void
738 bnad_cb_enet_disabled(void *arg)
740 struct bnad *bnad = (struct bnad *)arg;
742 netif_carrier_off(bnad->netdev);
743 complete(&bnad->bnad_completions.enet_comp);
746 void
747 bnad_cb_ethport_link_status(struct bnad *bnad,
748 enum bna_link_status link_status)
750 bool link_up = false;
752 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
754 if (link_status == BNA_CEE_UP) {
755 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
756 BNAD_UPDATE_CTR(bnad, cee_toggle);
757 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
758 } else {
759 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
760 BNAD_UPDATE_CTR(bnad, cee_toggle);
761 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
764 if (link_up) {
765 if (!netif_carrier_ok(bnad->netdev)) {
766 uint tx_id, tcb_id;
767 printk(KERN_WARNING "bna: %s link up\n",
768 bnad->netdev->name);
769 netif_carrier_on(bnad->netdev);
770 BNAD_UPDATE_CTR(bnad, link_toggle);
771 for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
772 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
773 tcb_id++) {
774 struct bna_tcb *tcb =
775 bnad->tx_info[tx_id].tcb[tcb_id];
776 u32 txq_id;
777 if (!tcb)
778 continue;
780 txq_id = tcb->id;
782 if (test_bit(BNAD_TXQ_TX_STARTED,
783 &tcb->flags)) {
785 * Force an immediate
786 * Transmit Schedule */
787 printk(KERN_INFO "bna: %s %d "
788 "TXQ_STARTED\n",
789 bnad->netdev->name,
790 txq_id);
791 netif_wake_subqueue(
792 bnad->netdev,
793 txq_id);
794 BNAD_UPDATE_CTR(bnad,
795 netif_queue_wakeup);
796 } else {
797 netif_stop_subqueue(
798 bnad->netdev,
799 txq_id);
800 BNAD_UPDATE_CTR(bnad,
801 netif_queue_stop);
806 } else {
807 if (netif_carrier_ok(bnad->netdev)) {
808 printk(KERN_WARNING "bna: %s link down\n",
809 bnad->netdev->name);
810 netif_carrier_off(bnad->netdev);
811 BNAD_UPDATE_CTR(bnad, link_toggle);
816 static void
817 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
819 struct bnad *bnad = (struct bnad *)arg;
821 complete(&bnad->bnad_completions.tx_comp);
824 static void
825 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
827 struct bnad_tx_info *tx_info =
828 (struct bnad_tx_info *)tcb->txq->tx->priv;
829 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
831 tx_info->tcb[tcb->id] = tcb;
832 unmap_q->producer_index = 0;
833 unmap_q->consumer_index = 0;
834 unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
837 static void
838 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
840 struct bnad_tx_info *tx_info =
841 (struct bnad_tx_info *)tcb->txq->tx->priv;
842 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
844 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
845 cpu_relax();
847 bnad_free_all_txbufs(bnad, tcb);
849 unmap_q->producer_index = 0;
850 unmap_q->consumer_index = 0;
852 smp_mb__before_clear_bit();
853 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
855 tx_info->tcb[tcb->id] = NULL;
858 static void
859 bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
861 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
863 unmap_q->producer_index = 0;
864 unmap_q->consumer_index = 0;
865 unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
868 static void
869 bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
871 bnad_free_all_rxbufs(bnad, rcb);
874 static void
875 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
877 struct bnad_rx_info *rx_info =
878 (struct bnad_rx_info *)ccb->cq->rx->priv;
880 rx_info->rx_ctrl[ccb->id].ccb = ccb;
881 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
884 static void
885 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
887 struct bnad_rx_info *rx_info =
888 (struct bnad_rx_info *)ccb->cq->rx->priv;
890 rx_info->rx_ctrl[ccb->id].ccb = NULL;
893 static void
894 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
896 struct bnad_tx_info *tx_info =
897 (struct bnad_tx_info *)tx->priv;
898 struct bna_tcb *tcb;
899 u32 txq_id;
900 int i;
902 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
903 tcb = tx_info->tcb[i];
904 if (!tcb)
905 continue;
906 txq_id = tcb->id;
907 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
908 netif_stop_subqueue(bnad->netdev, txq_id);
909 printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
910 bnad->netdev->name, txq_id);
914 static void
915 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
917 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
918 struct bna_tcb *tcb;
919 struct bnad_unmap_q *unmap_q;
920 u32 txq_id;
921 int i;
923 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
924 tcb = tx_info->tcb[i];
925 if (!tcb)
926 continue;
927 txq_id = tcb->id;
929 unmap_q = tcb->unmap_q;
931 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
932 continue;
934 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
935 cpu_relax();
937 bnad_free_all_txbufs(bnad, tcb);
939 unmap_q->producer_index = 0;
940 unmap_q->consumer_index = 0;
942 smp_mb__before_clear_bit();
943 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
945 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
947 if (netif_carrier_ok(bnad->netdev)) {
948 printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
949 bnad->netdev->name, txq_id);
950 netif_wake_subqueue(bnad->netdev, txq_id);
951 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
956 * Workaround for first ioceth enable failure & we
957 * get a 0 MAC address. We try to get the MAC address
958 * again here.
960 if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
961 bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
962 bnad_set_netdev_perm_addr(bnad);
966 static void
967 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
969 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
970 struct bna_tcb *tcb;
971 int i;
973 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
974 tcb = tx_info->tcb[i];
975 if (!tcb)
976 continue;
979 mdelay(BNAD_TXRX_SYNC_MDELAY);
980 bna_tx_cleanup_complete(tx);
983 static void
984 bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
986 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
987 struct bna_ccb *ccb;
988 struct bnad_rx_ctrl *rx_ctrl;
989 int i;
991 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
992 rx_ctrl = &rx_info->rx_ctrl[i];
993 ccb = rx_ctrl->ccb;
994 if (!ccb)
995 continue;
997 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
999 if (ccb->rcb[1])
1000 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
1004 static void
1005 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1007 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1008 struct bna_ccb *ccb;
1009 struct bnad_rx_ctrl *rx_ctrl;
1010 int i;
1012 mdelay(BNAD_TXRX_SYNC_MDELAY);
1014 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1015 rx_ctrl = &rx_info->rx_ctrl[i];
1016 ccb = rx_ctrl->ccb;
1017 if (!ccb)
1018 continue;
1020 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1022 if (ccb->rcb[1])
1023 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1025 while (test_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags))
1026 cpu_relax();
1029 bna_rx_cleanup_complete(rx);
1032 static void
1033 bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1035 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1036 struct bna_ccb *ccb;
1037 struct bna_rcb *rcb;
1038 struct bnad_rx_ctrl *rx_ctrl;
1039 struct bnad_unmap_q *unmap_q;
1040 int i;
1041 int j;
1043 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1044 rx_ctrl = &rx_info->rx_ctrl[i];
1045 ccb = rx_ctrl->ccb;
1046 if (!ccb)
1047 continue;
1049 bnad_cq_cmpl_init(bnad, ccb);
1051 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1052 rcb = ccb->rcb[j];
1053 if (!rcb)
1054 continue;
1055 bnad_free_all_rxbufs(bnad, rcb);
1057 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1058 set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
1059 unmap_q = rcb->unmap_q;
1061 /* Now allocate & post buffers for this RCB */
1062 /* !!Allocation in callback context */
1063 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
1064 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
1065 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
1066 bnad_alloc_n_post_rxbufs(bnad, rcb);
1067 smp_mb__before_clear_bit();
1068 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
1074 static void
1075 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1077 struct bnad *bnad = (struct bnad *)arg;
1079 complete(&bnad->bnad_completions.rx_comp);
1082 static void
1083 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1085 bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1086 complete(&bnad->bnad_completions.mcast_comp);
1089 void
1090 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1091 struct bna_stats *stats)
1093 if (status == BNA_CB_SUCCESS)
1094 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1096 if (!netif_running(bnad->netdev) ||
1097 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1098 return;
1100 mod_timer(&bnad->stats_timer,
1101 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1104 static void
1105 bnad_cb_enet_mtu_set(struct bnad *bnad)
1107 bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1108 complete(&bnad->bnad_completions.mtu_comp);
1111 void
1112 bnad_cb_completion(void *arg, enum bfa_status status)
1114 struct bnad_iocmd_comp *iocmd_comp =
1115 (struct bnad_iocmd_comp *)arg;
1117 iocmd_comp->comp_status = (u32) status;
1118 complete(&iocmd_comp->comp);
1121 /* Resource allocation, free functions */
1123 static void
1124 bnad_mem_free(struct bnad *bnad,
1125 struct bna_mem_info *mem_info)
1127 int i;
1128 dma_addr_t dma_pa;
1130 if (mem_info->mdl == NULL)
1131 return;
1133 for (i = 0; i < mem_info->num; i++) {
1134 if (mem_info->mdl[i].kva != NULL) {
1135 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1136 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1137 dma_pa);
1138 dma_free_coherent(&bnad->pcidev->dev,
1139 mem_info->mdl[i].len,
1140 mem_info->mdl[i].kva, dma_pa);
1141 } else
1142 kfree(mem_info->mdl[i].kva);
1145 kfree(mem_info->mdl);
1146 mem_info->mdl = NULL;
1149 static int
1150 bnad_mem_alloc(struct bnad *bnad,
1151 struct bna_mem_info *mem_info)
1153 int i;
1154 dma_addr_t dma_pa;
1156 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1157 mem_info->mdl = NULL;
1158 return 0;
1161 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1162 GFP_KERNEL);
1163 if (mem_info->mdl == NULL)
1164 return -ENOMEM;
1166 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1167 for (i = 0; i < mem_info->num; i++) {
1168 mem_info->mdl[i].len = mem_info->len;
1169 mem_info->mdl[i].kva =
1170 dma_alloc_coherent(&bnad->pcidev->dev,
1171 mem_info->len, &dma_pa,
1172 GFP_KERNEL);
1174 if (mem_info->mdl[i].kva == NULL)
1175 goto err_return;
1177 BNA_SET_DMA_ADDR(dma_pa,
1178 &(mem_info->mdl[i].dma));
1180 } else {
1181 for (i = 0; i < mem_info->num; i++) {
1182 mem_info->mdl[i].len = mem_info->len;
1183 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1184 GFP_KERNEL);
1185 if (mem_info->mdl[i].kva == NULL)
1186 goto err_return;
1190 return 0;
1192 err_return:
1193 bnad_mem_free(bnad, mem_info);
1194 return -ENOMEM;
1197 /* Free IRQ for Mailbox */
1198 static void
1199 bnad_mbox_irq_free(struct bnad *bnad)
1201 int irq;
1202 unsigned long flags;
1204 spin_lock_irqsave(&bnad->bna_lock, flags);
1205 bnad_disable_mbox_irq(bnad);
1206 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1208 irq = BNAD_GET_MBOX_IRQ(bnad);
1209 free_irq(irq, bnad);
1213 * Allocates IRQ for Mailbox, but keep it disabled
1214 * This will be enabled once we get the mbox enable callback
1215 * from bna
1217 static int
1218 bnad_mbox_irq_alloc(struct bnad *bnad)
1220 int err = 0;
1221 unsigned long irq_flags, flags;
1222 u32 irq;
1223 irq_handler_t irq_handler;
1225 spin_lock_irqsave(&bnad->bna_lock, flags);
1226 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1227 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1228 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1229 irq_flags = 0;
1230 } else {
1231 irq_handler = (irq_handler_t)bnad_isr;
1232 irq = bnad->pcidev->irq;
1233 irq_flags = IRQF_SHARED;
1236 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1237 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1240 * Set the Mbox IRQ disable flag, so that the IRQ handler
1241 * called from request_irq() for SHARED IRQs do not execute
1243 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1245 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1247 err = request_irq(irq, irq_handler, irq_flags,
1248 bnad->mbox_irq_name, bnad);
1250 return err;
1253 static void
1254 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1256 kfree(intr_info->idl);
1257 intr_info->idl = NULL;
1260 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1261 static int
1262 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1263 u32 txrx_id, struct bna_intr_info *intr_info)
1265 int i, vector_start = 0;
1266 u32 cfg_flags;
1267 unsigned long flags;
1269 spin_lock_irqsave(&bnad->bna_lock, flags);
1270 cfg_flags = bnad->cfg_flags;
1271 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1273 if (cfg_flags & BNAD_CF_MSIX) {
1274 intr_info->intr_type = BNA_INTR_T_MSIX;
1275 intr_info->idl = kcalloc(intr_info->num,
1276 sizeof(struct bna_intr_descr),
1277 GFP_KERNEL);
1278 if (!intr_info->idl)
1279 return -ENOMEM;
1281 switch (src) {
1282 case BNAD_INTR_TX:
1283 vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1284 break;
1286 case BNAD_INTR_RX:
1287 vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1288 (bnad->num_tx * bnad->num_txq_per_tx) +
1289 txrx_id;
1290 break;
1292 default:
1293 BUG();
1296 for (i = 0; i < intr_info->num; i++)
1297 intr_info->idl[i].vector = vector_start + i;
1298 } else {
1299 intr_info->intr_type = BNA_INTR_T_INTX;
1300 intr_info->num = 1;
1301 intr_info->idl = kcalloc(intr_info->num,
1302 sizeof(struct bna_intr_descr),
1303 GFP_KERNEL);
1304 if (!intr_info->idl)
1305 return -ENOMEM;
1307 switch (src) {
1308 case BNAD_INTR_TX:
1309 intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1310 break;
1312 case BNAD_INTR_RX:
1313 intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1314 break;
1317 return 0;
1321 * NOTE: Should be called for MSIX only
1322 * Unregisters Tx MSIX vector(s) from the kernel
1324 static void
1325 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1326 int num_txqs)
1328 int i;
1329 int vector_num;
1331 for (i = 0; i < num_txqs; i++) {
1332 if (tx_info->tcb[i] == NULL)
1333 continue;
1335 vector_num = tx_info->tcb[i]->intr_vector;
1336 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1341 * NOTE: Should be called for MSIX only
1342 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1344 static int
1345 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1346 u32 tx_id, int num_txqs)
1348 int i;
1349 int err;
1350 int vector_num;
1352 for (i = 0; i < num_txqs; i++) {
1353 vector_num = tx_info->tcb[i]->intr_vector;
1354 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1355 tx_id + tx_info->tcb[i]->id);
1356 err = request_irq(bnad->msix_table[vector_num].vector,
1357 (irq_handler_t)bnad_msix_tx, 0,
1358 tx_info->tcb[i]->name,
1359 tx_info->tcb[i]);
1360 if (err)
1361 goto err_return;
1364 return 0;
1366 err_return:
1367 if (i > 0)
1368 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1369 return -1;
1373 * NOTE: Should be called for MSIX only
1374 * Unregisters Rx MSIX vector(s) from the kernel
1376 static void
1377 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1378 int num_rxps)
1380 int i;
1381 int vector_num;
1383 for (i = 0; i < num_rxps; i++) {
1384 if (rx_info->rx_ctrl[i].ccb == NULL)
1385 continue;
1387 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1388 free_irq(bnad->msix_table[vector_num].vector,
1389 rx_info->rx_ctrl[i].ccb);
1394 * NOTE: Should be called for MSIX only
1395 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1397 static int
1398 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1399 u32 rx_id, int num_rxps)
1401 int i;
1402 int err;
1403 int vector_num;
1405 for (i = 0; i < num_rxps; i++) {
1406 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1407 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1408 bnad->netdev->name,
1409 rx_id + rx_info->rx_ctrl[i].ccb->id);
1410 err = request_irq(bnad->msix_table[vector_num].vector,
1411 (irq_handler_t)bnad_msix_rx, 0,
1412 rx_info->rx_ctrl[i].ccb->name,
1413 rx_info->rx_ctrl[i].ccb);
1414 if (err)
1415 goto err_return;
1418 return 0;
1420 err_return:
1421 if (i > 0)
1422 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1423 return -1;
1426 /* Free Tx object Resources */
1427 static void
1428 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1430 int i;
1432 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1433 if (res_info[i].res_type == BNA_RES_T_MEM)
1434 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1435 else if (res_info[i].res_type == BNA_RES_T_INTR)
1436 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1440 /* Allocates memory and interrupt resources for Tx object */
1441 static int
1442 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1443 u32 tx_id)
1445 int i, err = 0;
1447 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1448 if (res_info[i].res_type == BNA_RES_T_MEM)
1449 err = bnad_mem_alloc(bnad,
1450 &res_info[i].res_u.mem_info);
1451 else if (res_info[i].res_type == BNA_RES_T_INTR)
1452 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1453 &res_info[i].res_u.intr_info);
1454 if (err)
1455 goto err_return;
1457 return 0;
1459 err_return:
1460 bnad_tx_res_free(bnad, res_info);
1461 return err;
1464 /* Free Rx object Resources */
1465 static void
1466 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1468 int i;
1470 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1471 if (res_info[i].res_type == BNA_RES_T_MEM)
1472 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1473 else if (res_info[i].res_type == BNA_RES_T_INTR)
1474 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1478 /* Allocates memory and interrupt resources for Rx object */
1479 static int
1480 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1481 uint rx_id)
1483 int i, err = 0;
1485 /* All memory needs to be allocated before setup_ccbs */
1486 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1487 if (res_info[i].res_type == BNA_RES_T_MEM)
1488 err = bnad_mem_alloc(bnad,
1489 &res_info[i].res_u.mem_info);
1490 else if (res_info[i].res_type == BNA_RES_T_INTR)
1491 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1492 &res_info[i].res_u.intr_info);
1493 if (err)
1494 goto err_return;
1496 return 0;
1498 err_return:
1499 bnad_rx_res_free(bnad, res_info);
1500 return err;
1503 /* Timer callbacks */
1504 /* a) IOC timer */
1505 static void
1506 bnad_ioc_timeout(unsigned long data)
1508 struct bnad *bnad = (struct bnad *)data;
1509 unsigned long flags;
1511 spin_lock_irqsave(&bnad->bna_lock, flags);
1512 bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
1513 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1516 static void
1517 bnad_ioc_hb_check(unsigned long data)
1519 struct bnad *bnad = (struct bnad *)data;
1520 unsigned long flags;
1522 spin_lock_irqsave(&bnad->bna_lock, flags);
1523 bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
1524 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1527 static void
1528 bnad_iocpf_timeout(unsigned long data)
1530 struct bnad *bnad = (struct bnad *)data;
1531 unsigned long flags;
1533 spin_lock_irqsave(&bnad->bna_lock, flags);
1534 bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
1535 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1538 static void
1539 bnad_iocpf_sem_timeout(unsigned long data)
1541 struct bnad *bnad = (struct bnad *)data;
1542 unsigned long flags;
1544 spin_lock_irqsave(&bnad->bna_lock, flags);
1545 bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
1546 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1550 * All timer routines use bnad->bna_lock to protect against
1551 * the following race, which may occur in case of no locking:
1552 * Time CPU m CPU n
1553 * 0 1 = test_bit
1554 * 1 clear_bit
1555 * 2 del_timer_sync
1556 * 3 mod_timer
1559 /* b) Dynamic Interrupt Moderation Timer */
1560 static void
1561 bnad_dim_timeout(unsigned long data)
1563 struct bnad *bnad = (struct bnad *)data;
1564 struct bnad_rx_info *rx_info;
1565 struct bnad_rx_ctrl *rx_ctrl;
1566 int i, j;
1567 unsigned long flags;
1569 if (!netif_carrier_ok(bnad->netdev))
1570 return;
1572 spin_lock_irqsave(&bnad->bna_lock, flags);
1573 for (i = 0; i < bnad->num_rx; i++) {
1574 rx_info = &bnad->rx_info[i];
1575 if (!rx_info->rx)
1576 continue;
1577 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1578 rx_ctrl = &rx_info->rx_ctrl[j];
1579 if (!rx_ctrl->ccb)
1580 continue;
1581 bna_rx_dim_update(rx_ctrl->ccb);
1585 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1586 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1587 mod_timer(&bnad->dim_timer,
1588 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1589 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1592 /* c) Statistics Timer */
1593 static void
1594 bnad_stats_timeout(unsigned long data)
1596 struct bnad *bnad = (struct bnad *)data;
1597 unsigned long flags;
1599 if (!netif_running(bnad->netdev) ||
1600 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1601 return;
1603 spin_lock_irqsave(&bnad->bna_lock, flags);
1604 bna_hw_stats_get(&bnad->bna);
1605 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1609 * Set up timer for DIM
1610 * Called with bnad->bna_lock held
1612 void
1613 bnad_dim_timer_start(struct bnad *bnad)
1615 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1616 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1617 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1618 (unsigned long)bnad);
1619 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1620 mod_timer(&bnad->dim_timer,
1621 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1626 * Set up timer for statistics
1627 * Called with mutex_lock(&bnad->conf_mutex) held
1629 static void
1630 bnad_stats_timer_start(struct bnad *bnad)
1632 unsigned long flags;
1634 spin_lock_irqsave(&bnad->bna_lock, flags);
1635 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1636 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1637 (unsigned long)bnad);
1638 mod_timer(&bnad->stats_timer,
1639 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1641 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1645 * Stops the stats timer
1646 * Called with mutex_lock(&bnad->conf_mutex) held
1648 static void
1649 bnad_stats_timer_stop(struct bnad *bnad)
1651 int to_del = 0;
1652 unsigned long flags;
1654 spin_lock_irqsave(&bnad->bna_lock, flags);
1655 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1656 to_del = 1;
1657 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1658 if (to_del)
1659 del_timer_sync(&bnad->stats_timer);
1662 /* Utilities */
1664 static void
1665 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1667 int i = 1; /* Index 0 has broadcast address */
1668 struct netdev_hw_addr *mc_addr;
1670 netdev_for_each_mc_addr(mc_addr, netdev) {
1671 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1672 ETH_ALEN);
1673 i++;
1677 static int
1678 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1680 struct bnad_rx_ctrl *rx_ctrl =
1681 container_of(napi, struct bnad_rx_ctrl, napi);
1682 struct bnad *bnad = rx_ctrl->bnad;
1683 int rcvd = 0;
1685 rx_ctrl->rx_poll_ctr++;
1687 if (!netif_carrier_ok(bnad->netdev))
1688 goto poll_exit;
1690 rcvd = bnad_poll_cq(bnad, rx_ctrl->ccb, budget);
1691 if (rcvd >= budget)
1692 return rcvd;
1694 poll_exit:
1695 napi_complete(napi);
1697 rx_ctrl->rx_complete++;
1699 if (rx_ctrl->ccb)
1700 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1702 return rcvd;
1705 #define BNAD_NAPI_POLL_QUOTA 64
1706 static void
1707 bnad_napi_init(struct bnad *bnad, u32 rx_id)
1709 struct bnad_rx_ctrl *rx_ctrl;
1710 int i;
1712 /* Initialize & enable NAPI */
1713 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1714 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1715 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1716 bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1720 static void
1721 bnad_napi_enable(struct bnad *bnad, u32 rx_id)
1723 struct bnad_rx_ctrl *rx_ctrl;
1724 int i;
1726 /* Initialize & enable NAPI */
1727 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1728 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1730 napi_enable(&rx_ctrl->napi);
1734 static void
1735 bnad_napi_disable(struct bnad *bnad, u32 rx_id)
1737 int i;
1739 /* First disable and then clean up */
1740 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1741 napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1742 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1746 /* Should be held with conf_lock held */
1747 void
1748 bnad_cleanup_tx(struct bnad *bnad, u32 tx_id)
1750 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1751 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1752 unsigned long flags;
1754 if (!tx_info->tx)
1755 return;
1757 init_completion(&bnad->bnad_completions.tx_comp);
1758 spin_lock_irqsave(&bnad->bna_lock, flags);
1759 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1760 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1761 wait_for_completion(&bnad->bnad_completions.tx_comp);
1763 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1764 bnad_tx_msix_unregister(bnad, tx_info,
1765 bnad->num_txq_per_tx);
1767 if (0 == tx_id)
1768 tasklet_kill(&bnad->tx_free_tasklet);
1770 spin_lock_irqsave(&bnad->bna_lock, flags);
1771 bna_tx_destroy(tx_info->tx);
1772 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1774 tx_info->tx = NULL;
1775 tx_info->tx_id = 0;
1777 bnad_tx_res_free(bnad, res_info);
1780 /* Should be held with conf_lock held */
1782 bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1784 int err;
1785 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1786 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1787 struct bna_intr_info *intr_info =
1788 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1789 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1790 static const struct bna_tx_event_cbfn tx_cbfn = {
1791 .tcb_setup_cbfn = bnad_cb_tcb_setup,
1792 .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1793 .tx_stall_cbfn = bnad_cb_tx_stall,
1794 .tx_resume_cbfn = bnad_cb_tx_resume,
1795 .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1798 struct bna_tx *tx;
1799 unsigned long flags;
1801 tx_info->tx_id = tx_id;
1803 /* Initialize the Tx object configuration */
1804 tx_config->num_txq = bnad->num_txq_per_tx;
1805 tx_config->txq_depth = bnad->txq_depth;
1806 tx_config->tx_type = BNA_TX_T_REGULAR;
1807 tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1809 /* Get BNA's resource requirement for one tx object */
1810 spin_lock_irqsave(&bnad->bna_lock, flags);
1811 bna_tx_res_req(bnad->num_txq_per_tx,
1812 bnad->txq_depth, res_info);
1813 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1815 /* Fill Unmap Q memory requirements */
1816 BNAD_FILL_UNMAPQ_MEM_REQ(
1817 &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1818 bnad->num_txq_per_tx,
1819 BNAD_TX_UNMAPQ_DEPTH);
1821 /* Allocate resources */
1822 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1823 if (err)
1824 return err;
1826 /* Ask BNA to create one Tx object, supplying required resources */
1827 spin_lock_irqsave(&bnad->bna_lock, flags);
1828 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1829 tx_info);
1830 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1831 if (!tx)
1832 goto err_return;
1833 tx_info->tx = tx;
1835 /* Register ISR for the Tx object */
1836 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1837 err = bnad_tx_msix_register(bnad, tx_info,
1838 tx_id, bnad->num_txq_per_tx);
1839 if (err)
1840 goto err_return;
1843 spin_lock_irqsave(&bnad->bna_lock, flags);
1844 bna_tx_enable(tx);
1845 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1847 return 0;
1849 err_return:
1850 bnad_tx_res_free(bnad, res_info);
1851 return err;
1854 /* Setup the rx config for bna_rx_create */
1855 /* bnad decides the configuration */
1856 static void
1857 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1859 rx_config->rx_type = BNA_RX_T_REGULAR;
1860 rx_config->num_paths = bnad->num_rxp_per_rx;
1861 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
1863 if (bnad->num_rxp_per_rx > 1) {
1864 rx_config->rss_status = BNA_STATUS_T_ENABLED;
1865 rx_config->rss_config.hash_type =
1866 (BFI_ENET_RSS_IPV6 |
1867 BFI_ENET_RSS_IPV6_TCP |
1868 BFI_ENET_RSS_IPV4 |
1869 BFI_ENET_RSS_IPV4_TCP);
1870 rx_config->rss_config.hash_mask =
1871 bnad->num_rxp_per_rx - 1;
1872 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
1873 sizeof(rx_config->rss_config.toeplitz_hash_key));
1874 } else {
1875 rx_config->rss_status = BNA_STATUS_T_DISABLED;
1876 memset(&rx_config->rss_config, 0,
1877 sizeof(rx_config->rss_config));
1879 rx_config->rxp_type = BNA_RXP_SLR;
1880 rx_config->q_depth = bnad->rxq_depth;
1882 rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
1884 rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
1887 static void
1888 bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
1890 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1891 int i;
1893 for (i = 0; i < bnad->num_rxp_per_rx; i++)
1894 rx_info->rx_ctrl[i].bnad = bnad;
1897 /* Called with mutex_lock(&bnad->conf_mutex) held */
1898 void
1899 bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
1901 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1902 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1903 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1904 unsigned long flags;
1905 int to_del = 0;
1907 if (!rx_info->rx)
1908 return;
1910 if (0 == rx_id) {
1911 spin_lock_irqsave(&bnad->bna_lock, flags);
1912 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1913 test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1914 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1915 to_del = 1;
1917 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1918 if (to_del)
1919 del_timer_sync(&bnad->dim_timer);
1922 init_completion(&bnad->bnad_completions.rx_comp);
1923 spin_lock_irqsave(&bnad->bna_lock, flags);
1924 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
1925 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1926 wait_for_completion(&bnad->bnad_completions.rx_comp);
1928 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1929 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1931 bnad_napi_disable(bnad, rx_id);
1933 spin_lock_irqsave(&bnad->bna_lock, flags);
1934 bna_rx_destroy(rx_info->rx);
1936 rx_info->rx = NULL;
1937 rx_info->rx_id = 0;
1938 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1940 bnad_rx_res_free(bnad, res_info);
1943 /* Called with mutex_lock(&bnad->conf_mutex) held */
1945 bnad_setup_rx(struct bnad *bnad, u32 rx_id)
1947 int err;
1948 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1949 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1950 struct bna_intr_info *intr_info =
1951 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
1952 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1953 static const struct bna_rx_event_cbfn rx_cbfn = {
1954 .rcb_setup_cbfn = bnad_cb_rcb_setup,
1955 .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
1956 .ccb_setup_cbfn = bnad_cb_ccb_setup,
1957 .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
1958 .rx_stall_cbfn = bnad_cb_rx_stall,
1959 .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
1960 .rx_post_cbfn = bnad_cb_rx_post,
1962 struct bna_rx *rx;
1963 unsigned long flags;
1965 rx_info->rx_id = rx_id;
1967 /* Initialize the Rx object configuration */
1968 bnad_init_rx_config(bnad, rx_config);
1970 /* Get BNA's resource requirement for one Rx object */
1971 spin_lock_irqsave(&bnad->bna_lock, flags);
1972 bna_rx_res_req(rx_config, res_info);
1973 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1975 /* Fill Unmap Q memory requirements */
1976 BNAD_FILL_UNMAPQ_MEM_REQ(
1977 &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
1978 rx_config->num_paths +
1979 ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
1980 rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
1982 /* Allocate resource */
1983 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
1984 if (err)
1985 return err;
1987 bnad_rx_ctrl_init(bnad, rx_id);
1989 /* Ask BNA to create one Rx object, supplying required resources */
1990 spin_lock_irqsave(&bnad->bna_lock, flags);
1991 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
1992 rx_info);
1993 if (!rx) {
1994 err = -ENOMEM;
1995 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1996 goto err_return;
1998 rx_info->rx = rx;
1999 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2002 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2003 * so that IRQ handler cannot schedule NAPI at this point.
2005 bnad_napi_init(bnad, rx_id);
2007 /* Register ISR for the Rx object */
2008 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2009 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
2010 rx_config->num_paths);
2011 if (err)
2012 goto err_return;
2015 spin_lock_irqsave(&bnad->bna_lock, flags);
2016 if (0 == rx_id) {
2017 /* Set up Dynamic Interrupt Moderation Vector */
2018 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
2019 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
2021 /* Enable VLAN filtering only on the default Rx */
2022 bna_rx_vlanfilter_enable(rx);
2024 /* Start the DIM timer */
2025 bnad_dim_timer_start(bnad);
2028 bna_rx_enable(rx);
2029 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2031 /* Enable scheduling of NAPI */
2032 bnad_napi_enable(bnad, rx_id);
2034 return 0;
2036 err_return:
2037 bnad_cleanup_rx(bnad, rx_id);
2038 return err;
2041 /* Called with conf_lock & bnad->bna_lock held */
2042 void
2043 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2045 struct bnad_tx_info *tx_info;
2047 tx_info = &bnad->tx_info[0];
2048 if (!tx_info->tx)
2049 return;
2051 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2054 /* Called with conf_lock & bnad->bna_lock held */
2055 void
2056 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2058 struct bnad_rx_info *rx_info;
2059 int i;
2061 for (i = 0; i < bnad->num_rx; i++) {
2062 rx_info = &bnad->rx_info[i];
2063 if (!rx_info->rx)
2064 continue;
2065 bna_rx_coalescing_timeo_set(rx_info->rx,
2066 bnad->rx_coalescing_timeo);
2071 * Called with bnad->bna_lock held
2074 bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
2076 int ret;
2078 if (!is_valid_ether_addr(mac_addr))
2079 return -EADDRNOTAVAIL;
2081 /* If datapath is down, pretend everything went through */
2082 if (!bnad->rx_info[0].rx)
2083 return 0;
2085 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
2086 if (ret != BNA_CB_SUCCESS)
2087 return -EADDRNOTAVAIL;
2089 return 0;
2092 /* Should be called with conf_lock held */
2094 bnad_enable_default_bcast(struct bnad *bnad)
2096 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2097 int ret;
2098 unsigned long flags;
2100 init_completion(&bnad->bnad_completions.mcast_comp);
2102 spin_lock_irqsave(&bnad->bna_lock, flags);
2103 ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
2104 bnad_cb_rx_mcast_add);
2105 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2107 if (ret == BNA_CB_SUCCESS)
2108 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2109 else
2110 return -ENODEV;
2112 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2113 return -ENODEV;
2115 return 0;
2118 /* Called with mutex_lock(&bnad->conf_mutex) held */
2119 void
2120 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2122 u16 vid;
2123 unsigned long flags;
2125 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2126 spin_lock_irqsave(&bnad->bna_lock, flags);
2127 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2128 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2132 /* Statistics utilities */
2133 void
2134 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2136 int i, j;
2138 for (i = 0; i < bnad->num_rx; i++) {
2139 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2140 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2141 stats->rx_packets += bnad->rx_info[i].
2142 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2143 stats->rx_bytes += bnad->rx_info[i].
2144 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2145 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2146 bnad->rx_info[i].rx_ctrl[j].ccb->
2147 rcb[1]->rxq) {
2148 stats->rx_packets +=
2149 bnad->rx_info[i].rx_ctrl[j].
2150 ccb->rcb[1]->rxq->rx_packets;
2151 stats->rx_bytes +=
2152 bnad->rx_info[i].rx_ctrl[j].
2153 ccb->rcb[1]->rxq->rx_bytes;
2158 for (i = 0; i < bnad->num_tx; i++) {
2159 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2160 if (bnad->tx_info[i].tcb[j]) {
2161 stats->tx_packets +=
2162 bnad->tx_info[i].tcb[j]->txq->tx_packets;
2163 stats->tx_bytes +=
2164 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2171 * Must be called with the bna_lock held.
2173 void
2174 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2176 struct bfi_enet_stats_mac *mac_stats;
2177 u32 bmap;
2178 int i;
2180 mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2181 stats->rx_errors =
2182 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2183 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2184 mac_stats->rx_undersize;
2185 stats->tx_errors = mac_stats->tx_fcs_error +
2186 mac_stats->tx_undersize;
2187 stats->rx_dropped = mac_stats->rx_drop;
2188 stats->tx_dropped = mac_stats->tx_drop;
2189 stats->multicast = mac_stats->rx_multicast;
2190 stats->collisions = mac_stats->tx_total_collision;
2192 stats->rx_length_errors = mac_stats->rx_frame_length_error;
2194 /* receive ring buffer overflow ?? */
2196 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2197 stats->rx_frame_errors = mac_stats->rx_alignment_error;
2198 /* recv'r fifo overrun */
2199 bmap = bna_rx_rid_mask(&bnad->bna);
2200 for (i = 0; bmap; i++) {
2201 if (bmap & 1) {
2202 stats->rx_fifo_errors +=
2203 bnad->stats.bna_stats->
2204 hw_stats.rxf_stats[i].frame_drops;
2205 break;
2207 bmap >>= 1;
2211 static void
2212 bnad_mbox_irq_sync(struct bnad *bnad)
2214 u32 irq;
2215 unsigned long flags;
2217 spin_lock_irqsave(&bnad->bna_lock, flags);
2218 if (bnad->cfg_flags & BNAD_CF_MSIX)
2219 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2220 else
2221 irq = bnad->pcidev->irq;
2222 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2224 synchronize_irq(irq);
2227 /* Utility used by bnad_start_xmit, for doing TSO */
2228 static int
2229 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2231 int err;
2233 if (skb_header_cloned(skb)) {
2234 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2235 if (err) {
2236 BNAD_UPDATE_CTR(bnad, tso_err);
2237 return err;
2242 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2243 * excluding the length field.
2245 if (skb->protocol == htons(ETH_P_IP)) {
2246 struct iphdr *iph = ip_hdr(skb);
2248 /* Do we really need these? */
2249 iph->tot_len = 0;
2250 iph->check = 0;
2252 tcp_hdr(skb)->check =
2253 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2254 IPPROTO_TCP, 0);
2255 BNAD_UPDATE_CTR(bnad, tso4);
2256 } else {
2257 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2259 ipv6h->payload_len = 0;
2260 tcp_hdr(skb)->check =
2261 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2262 IPPROTO_TCP, 0);
2263 BNAD_UPDATE_CTR(bnad, tso6);
2266 return 0;
2270 * Initialize Q numbers depending on Rx Paths
2271 * Called with bnad->bna_lock held, because of cfg_flags
2272 * access.
2274 static void
2275 bnad_q_num_init(struct bnad *bnad)
2277 int rxps;
2279 rxps = min((uint)num_online_cpus(),
2280 (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
2282 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2283 rxps = 1; /* INTx */
2285 bnad->num_rx = 1;
2286 bnad->num_tx = 1;
2287 bnad->num_rxp_per_rx = rxps;
2288 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2292 * Adjusts the Q numbers, given a number of msix vectors
2293 * Give preference to RSS as opposed to Tx priority Queues,
2294 * in such a case, just use 1 Tx Q
2295 * Called with bnad->bna_lock held b'cos of cfg_flags access
2297 static void
2298 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2300 bnad->num_txq_per_tx = 1;
2301 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2302 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2303 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2304 bnad->num_rxp_per_rx = msix_vectors -
2305 (bnad->num_tx * bnad->num_txq_per_tx) -
2306 BNAD_MAILBOX_MSIX_VECTORS;
2307 } else
2308 bnad->num_rxp_per_rx = 1;
2311 /* Enable / disable ioceth */
2312 static int
2313 bnad_ioceth_disable(struct bnad *bnad)
2315 unsigned long flags;
2316 int err = 0;
2318 spin_lock_irqsave(&bnad->bna_lock, flags);
2319 init_completion(&bnad->bnad_completions.ioc_comp);
2320 bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2321 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2323 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2324 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2326 err = bnad->bnad_completions.ioc_comp_status;
2327 return err;
2330 static int
2331 bnad_ioceth_enable(struct bnad *bnad)
2333 int err = 0;
2334 unsigned long flags;
2336 spin_lock_irqsave(&bnad->bna_lock, flags);
2337 init_completion(&bnad->bnad_completions.ioc_comp);
2338 bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2339 bna_ioceth_enable(&bnad->bna.ioceth);
2340 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2342 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2343 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2345 err = bnad->bnad_completions.ioc_comp_status;
2347 return err;
2350 /* Free BNA resources */
2351 static void
2352 bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2353 u32 res_val_max)
2355 int i;
2357 for (i = 0; i < res_val_max; i++)
2358 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2361 /* Allocates memory and interrupt resources for BNA */
2362 static int
2363 bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2364 u32 res_val_max)
2366 int i, err;
2368 for (i = 0; i < res_val_max; i++) {
2369 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2370 if (err)
2371 goto err_return;
2373 return 0;
2375 err_return:
2376 bnad_res_free(bnad, res_info, res_val_max);
2377 return err;
2380 /* Interrupt enable / disable */
2381 static void
2382 bnad_enable_msix(struct bnad *bnad)
2384 int i, ret;
2385 unsigned long flags;
2387 spin_lock_irqsave(&bnad->bna_lock, flags);
2388 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2389 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2390 return;
2392 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2394 if (bnad->msix_table)
2395 return;
2397 bnad->msix_table =
2398 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2400 if (!bnad->msix_table)
2401 goto intx_mode;
2403 for (i = 0; i < bnad->msix_num; i++)
2404 bnad->msix_table[i].entry = i;
2406 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
2407 if (ret > 0) {
2408 /* Not enough MSI-X vectors. */
2409 pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
2410 ret, bnad->msix_num);
2412 spin_lock_irqsave(&bnad->bna_lock, flags);
2413 /* ret = #of vectors that we got */
2414 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2415 (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
2416 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2418 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2419 BNAD_MAILBOX_MSIX_VECTORS;
2421 if (bnad->msix_num > ret)
2422 goto intx_mode;
2424 /* Try once more with adjusted numbers */
2425 /* If this fails, fall back to INTx */
2426 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
2427 bnad->msix_num);
2428 if (ret)
2429 goto intx_mode;
2431 } else if (ret < 0)
2432 goto intx_mode;
2434 pci_intx(bnad->pcidev, 0);
2436 return;
2438 intx_mode:
2439 pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
2441 kfree(bnad->msix_table);
2442 bnad->msix_table = NULL;
2443 bnad->msix_num = 0;
2444 spin_lock_irqsave(&bnad->bna_lock, flags);
2445 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2446 bnad_q_num_init(bnad);
2447 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2450 static void
2451 bnad_disable_msix(struct bnad *bnad)
2453 u32 cfg_flags;
2454 unsigned long flags;
2456 spin_lock_irqsave(&bnad->bna_lock, flags);
2457 cfg_flags = bnad->cfg_flags;
2458 if (bnad->cfg_flags & BNAD_CF_MSIX)
2459 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2460 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2462 if (cfg_flags & BNAD_CF_MSIX) {
2463 pci_disable_msix(bnad->pcidev);
2464 kfree(bnad->msix_table);
2465 bnad->msix_table = NULL;
2469 /* Netdev entry points */
2470 static int
2471 bnad_open(struct net_device *netdev)
2473 int err;
2474 struct bnad *bnad = netdev_priv(netdev);
2475 struct bna_pause_config pause_config;
2476 int mtu;
2477 unsigned long flags;
2479 mutex_lock(&bnad->conf_mutex);
2481 /* Tx */
2482 err = bnad_setup_tx(bnad, 0);
2483 if (err)
2484 goto err_return;
2486 /* Rx */
2487 err = bnad_setup_rx(bnad, 0);
2488 if (err)
2489 goto cleanup_tx;
2491 /* Port */
2492 pause_config.tx_pause = 0;
2493 pause_config.rx_pause = 0;
2495 mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
2497 spin_lock_irqsave(&bnad->bna_lock, flags);
2498 bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL);
2499 bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
2500 bna_enet_enable(&bnad->bna.enet);
2501 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2503 /* Enable broadcast */
2504 bnad_enable_default_bcast(bnad);
2506 /* Restore VLANs, if any */
2507 bnad_restore_vlans(bnad, 0);
2509 /* Set the UCAST address */
2510 spin_lock_irqsave(&bnad->bna_lock, flags);
2511 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2512 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2514 /* Start the stats timer */
2515 bnad_stats_timer_start(bnad);
2517 mutex_unlock(&bnad->conf_mutex);
2519 return 0;
2521 cleanup_tx:
2522 bnad_cleanup_tx(bnad, 0);
2524 err_return:
2525 mutex_unlock(&bnad->conf_mutex);
2526 return err;
2529 static int
2530 bnad_stop(struct net_device *netdev)
2532 struct bnad *bnad = netdev_priv(netdev);
2533 unsigned long flags;
2535 mutex_lock(&bnad->conf_mutex);
2537 /* Stop the stats timer */
2538 bnad_stats_timer_stop(bnad);
2540 init_completion(&bnad->bnad_completions.enet_comp);
2542 spin_lock_irqsave(&bnad->bna_lock, flags);
2543 bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2544 bnad_cb_enet_disabled);
2545 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2547 wait_for_completion(&bnad->bnad_completions.enet_comp);
2549 bnad_cleanup_tx(bnad, 0);
2550 bnad_cleanup_rx(bnad, 0);
2552 /* Synchronize mailbox IRQ */
2553 bnad_mbox_irq_sync(bnad);
2555 mutex_unlock(&bnad->conf_mutex);
2557 return 0;
2560 /* TX */
2562 * bnad_start_xmit : Netdev entry point for Transmit
2563 * Called under lock held by net_device
2565 static netdev_tx_t
2566 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2568 struct bnad *bnad = netdev_priv(netdev);
2569 u32 txq_id = 0;
2570 struct bna_tcb *tcb = bnad->tx_info[0].tcb[txq_id];
2572 u16 txq_prod, vlan_tag = 0;
2573 u32 unmap_prod, wis, wis_used, wi_range;
2574 u32 vectors, vect_id, i, acked;
2575 int err;
2576 unsigned int len;
2577 u32 gso_size;
2579 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
2580 dma_addr_t dma_addr;
2581 struct bna_txq_entry *txqent;
2582 u16 flags;
2584 if (unlikely(skb->len <= ETH_HLEN)) {
2585 dev_kfree_skb(skb);
2586 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2587 return NETDEV_TX_OK;
2589 if (unlikely(skb_headlen(skb) > BFI_TX_MAX_DATA_PER_VECTOR)) {
2590 dev_kfree_skb(skb);
2591 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_too_long);
2592 return NETDEV_TX_OK;
2594 if (unlikely(skb_headlen(skb) == 0)) {
2595 dev_kfree_skb(skb);
2596 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2597 return NETDEV_TX_OK;
2601 * Takes care of the Tx that is scheduled between clearing the flag
2602 * and the netif_tx_stop_all_queues() call.
2604 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2605 dev_kfree_skb(skb);
2606 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2607 return NETDEV_TX_OK;
2610 vectors = 1 + skb_shinfo(skb)->nr_frags;
2611 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2612 dev_kfree_skb(skb);
2613 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2614 return NETDEV_TX_OK;
2616 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2617 acked = 0;
2618 if (unlikely(wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2619 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2620 if ((u16) (*tcb->hw_consumer_index) !=
2621 tcb->consumer_index &&
2622 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2623 acked = bnad_free_txbufs(bnad, tcb);
2624 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2625 bna_ib_ack(tcb->i_dbell, acked);
2626 smp_mb__before_clear_bit();
2627 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2628 } else {
2629 netif_stop_queue(netdev);
2630 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2633 smp_mb();
2635 * Check again to deal with race condition between
2636 * netif_stop_queue here, and netif_wake_queue in
2637 * interrupt handler which is not inside netif tx lock.
2639 if (likely
2640 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2641 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2642 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2643 return NETDEV_TX_BUSY;
2644 } else {
2645 netif_wake_queue(netdev);
2646 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2650 unmap_prod = unmap_q->producer_index;
2651 flags = 0;
2653 txq_prod = tcb->producer_index;
2654 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
2655 txqent->hdr.wi.reserved = 0;
2656 txqent->hdr.wi.num_vectors = vectors;
2658 if (vlan_tx_tag_present(skb)) {
2659 vlan_tag = (u16) vlan_tx_tag_get(skb);
2660 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2662 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2663 vlan_tag =
2664 (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
2665 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2668 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2670 if (skb_is_gso(skb)) {
2671 gso_size = skb_shinfo(skb)->gso_size;
2673 if (unlikely(gso_size > netdev->mtu)) {
2674 dev_kfree_skb(skb);
2675 BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2676 return NETDEV_TX_OK;
2678 if (unlikely((gso_size + skb_transport_offset(skb) +
2679 tcp_hdrlen(skb)) >= skb->len)) {
2680 txqent->hdr.wi.opcode =
2681 __constant_htons(BNA_TXQ_WI_SEND);
2682 txqent->hdr.wi.lso_mss = 0;
2683 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2684 } else {
2685 txqent->hdr.wi.opcode =
2686 __constant_htons(BNA_TXQ_WI_SEND_LSO);
2687 txqent->hdr.wi.lso_mss = htons(gso_size);
2690 err = bnad_tso_prepare(bnad, skb);
2691 if (unlikely(err)) {
2692 dev_kfree_skb(skb);
2693 BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2694 return NETDEV_TX_OK;
2696 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2697 txqent->hdr.wi.l4_hdr_size_n_offset =
2698 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2699 (tcp_hdrlen(skb) >> 2,
2700 skb_transport_offset(skb)));
2701 } else {
2702 txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND);
2703 txqent->hdr.wi.lso_mss = 0;
2705 if (unlikely(skb->len > (netdev->mtu + ETH_HLEN))) {
2706 dev_kfree_skb(skb);
2707 BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2708 return NETDEV_TX_OK;
2711 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2712 u8 proto = 0;
2714 if (skb->protocol == __constant_htons(ETH_P_IP))
2715 proto = ip_hdr(skb)->protocol;
2716 else if (skb->protocol ==
2717 __constant_htons(ETH_P_IPV6)) {
2718 /* nexthdr may not be TCP immediately. */
2719 proto = ipv6_hdr(skb)->nexthdr;
2721 if (proto == IPPROTO_TCP) {
2722 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2723 txqent->hdr.wi.l4_hdr_size_n_offset =
2724 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2725 (0, skb_transport_offset(skb)));
2727 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2729 if (unlikely(skb_headlen(skb) <
2730 skb_transport_offset(skb) + tcp_hdrlen(skb))) {
2731 dev_kfree_skb(skb);
2732 BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2733 return NETDEV_TX_OK;
2736 } else if (proto == IPPROTO_UDP) {
2737 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2738 txqent->hdr.wi.l4_hdr_size_n_offset =
2739 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2740 (0, skb_transport_offset(skb)));
2742 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2743 if (unlikely(skb_headlen(skb) <
2744 skb_transport_offset(skb) +
2745 sizeof(struct udphdr))) {
2746 dev_kfree_skb(skb);
2747 BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2748 return NETDEV_TX_OK;
2750 } else {
2751 dev_kfree_skb(skb);
2752 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2753 return NETDEV_TX_OK;
2755 } else {
2756 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2760 txqent->hdr.wi.flags = htons(flags);
2762 txqent->hdr.wi.frame_length = htonl(skb->len);
2764 unmap_q->unmap_array[unmap_prod].skb = skb;
2765 len = skb_headlen(skb);
2766 txqent->vector[0].length = htons(len);
2767 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
2768 skb_headlen(skb), DMA_TO_DEVICE);
2769 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2770 dma_addr);
2772 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
2773 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2775 vect_id = 0;
2776 wis_used = 1;
2778 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2779 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
2780 u16 size = skb_frag_size(frag);
2782 if (unlikely(size == 0)) {
2783 unmap_prod = unmap_q->producer_index;
2785 unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
2786 unmap_q->unmap_array,
2787 unmap_prod, unmap_q->q_depth, skb,
2789 dev_kfree_skb(skb);
2790 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
2791 return NETDEV_TX_OK;
2794 len += size;
2796 if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2797 vect_id = 0;
2798 if (--wi_range)
2799 txqent++;
2800 else {
2801 BNA_QE_INDX_ADD(txq_prod, wis_used,
2802 tcb->q_depth);
2803 wis_used = 0;
2804 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
2805 txqent, wi_range);
2807 wis_used++;
2808 txqent->hdr.wi_ext.opcode =
2809 __constant_htons(BNA_TXQ_WI_EXTENSION);
2812 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2813 txqent->vector[vect_id].length = htons(size);
2814 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
2815 0, size, DMA_TO_DEVICE);
2816 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2817 dma_addr);
2818 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2819 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2822 if (unlikely(len != skb->len)) {
2823 unmap_prod = unmap_q->producer_index;
2825 unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
2826 unmap_q->unmap_array, unmap_prod,
2827 unmap_q->q_depth, skb,
2828 skb_shinfo(skb)->nr_frags);
2829 dev_kfree_skb(skb);
2830 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
2831 return NETDEV_TX_OK;
2834 unmap_q->producer_index = unmap_prod;
2835 BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
2836 tcb->producer_index = txq_prod;
2838 smp_mb();
2840 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2841 return NETDEV_TX_OK;
2843 bna_txq_prod_indx_doorbell(tcb);
2844 smp_mb();
2846 if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
2847 tasklet_schedule(&bnad->tx_free_tasklet);
2849 return NETDEV_TX_OK;
2853 * Used spin_lock to synchronize reading of stats structures, which
2854 * is written by BNA under the same lock.
2856 static struct rtnl_link_stats64 *
2857 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
2859 struct bnad *bnad = netdev_priv(netdev);
2860 unsigned long flags;
2862 spin_lock_irqsave(&bnad->bna_lock, flags);
2864 bnad_netdev_qstats_fill(bnad, stats);
2865 bnad_netdev_hwstats_fill(bnad, stats);
2867 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2869 return stats;
2872 void
2873 bnad_set_rx_mode(struct net_device *netdev)
2875 struct bnad *bnad = netdev_priv(netdev);
2876 u32 new_mask, valid_mask;
2877 unsigned long flags;
2879 spin_lock_irqsave(&bnad->bna_lock, flags);
2881 new_mask = valid_mask = 0;
2883 if (netdev->flags & IFF_PROMISC) {
2884 if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
2885 new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2886 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2887 bnad->cfg_flags |= BNAD_CF_PROMISC;
2889 } else {
2890 if (bnad->cfg_flags & BNAD_CF_PROMISC) {
2891 new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
2892 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2893 bnad->cfg_flags &= ~BNAD_CF_PROMISC;
2897 if (netdev->flags & IFF_ALLMULTI) {
2898 if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
2899 new_mask |= BNA_RXMODE_ALLMULTI;
2900 valid_mask |= BNA_RXMODE_ALLMULTI;
2901 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
2903 } else {
2904 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
2905 new_mask &= ~BNA_RXMODE_ALLMULTI;
2906 valid_mask |= BNA_RXMODE_ALLMULTI;
2907 bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
2911 if (bnad->rx_info[0].rx == NULL)
2912 goto unlock;
2914 bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
2916 if (!netdev_mc_empty(netdev)) {
2917 u8 *mcaddr_list;
2918 int mc_count = netdev_mc_count(netdev);
2920 /* Index 0 holds the broadcast address */
2921 mcaddr_list =
2922 kzalloc((mc_count + 1) * ETH_ALEN,
2923 GFP_ATOMIC);
2924 if (!mcaddr_list)
2925 goto unlock;
2927 memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
2929 /* Copy rest of the MC addresses */
2930 bnad_netdev_mc_list_get(netdev, mcaddr_list);
2932 bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
2933 mcaddr_list, NULL);
2935 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2936 kfree(mcaddr_list);
2938 unlock:
2939 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2943 * bna_lock is used to sync writes to netdev->addr
2944 * conf_lock cannot be used since this call may be made
2945 * in a non-blocking context.
2947 static int
2948 bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
2950 int err;
2951 struct bnad *bnad = netdev_priv(netdev);
2952 struct sockaddr *sa = (struct sockaddr *)mac_addr;
2953 unsigned long flags;
2955 spin_lock_irqsave(&bnad->bna_lock, flags);
2957 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
2959 if (!err)
2960 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
2962 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2964 return err;
2967 static int
2968 bnad_mtu_set(struct bnad *bnad, int mtu)
2970 unsigned long flags;
2972 init_completion(&bnad->bnad_completions.mtu_comp);
2974 spin_lock_irqsave(&bnad->bna_lock, flags);
2975 bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set);
2976 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2978 wait_for_completion(&bnad->bnad_completions.mtu_comp);
2980 return bnad->bnad_completions.mtu_comp_status;
2983 static int
2984 bnad_change_mtu(struct net_device *netdev, int new_mtu)
2986 int err, mtu = netdev->mtu;
2987 struct bnad *bnad = netdev_priv(netdev);
2989 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
2990 return -EINVAL;
2992 mutex_lock(&bnad->conf_mutex);
2994 netdev->mtu = new_mtu;
2996 mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
2997 err = bnad_mtu_set(bnad, mtu);
2998 if (err)
2999 err = -EBUSY;
3001 mutex_unlock(&bnad->conf_mutex);
3002 return err;
3005 static int
3006 bnad_vlan_rx_add_vid(struct net_device *netdev,
3007 unsigned short vid)
3009 struct bnad *bnad = netdev_priv(netdev);
3010 unsigned long flags;
3012 if (!bnad->rx_info[0].rx)
3013 return 0;
3015 mutex_lock(&bnad->conf_mutex);
3017 spin_lock_irqsave(&bnad->bna_lock, flags);
3018 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
3019 set_bit(vid, bnad->active_vlans);
3020 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3022 mutex_unlock(&bnad->conf_mutex);
3024 return 0;
3027 static int
3028 bnad_vlan_rx_kill_vid(struct net_device *netdev,
3029 unsigned short vid)
3031 struct bnad *bnad = netdev_priv(netdev);
3032 unsigned long flags;
3034 if (!bnad->rx_info[0].rx)
3035 return 0;
3037 mutex_lock(&bnad->conf_mutex);
3039 spin_lock_irqsave(&bnad->bna_lock, flags);
3040 clear_bit(vid, bnad->active_vlans);
3041 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3042 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3044 mutex_unlock(&bnad->conf_mutex);
3046 return 0;
3049 #ifdef CONFIG_NET_POLL_CONTROLLER
3050 static void
3051 bnad_netpoll(struct net_device *netdev)
3053 struct bnad *bnad = netdev_priv(netdev);
3054 struct bnad_rx_info *rx_info;
3055 struct bnad_rx_ctrl *rx_ctrl;
3056 u32 curr_mask;
3057 int i, j;
3059 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3060 bna_intx_disable(&bnad->bna, curr_mask);
3061 bnad_isr(bnad->pcidev->irq, netdev);
3062 bna_intx_enable(&bnad->bna, curr_mask);
3063 } else {
3065 * Tx processing may happen in sending context, so no need
3066 * to explicitly process completions here
3069 /* Rx processing */
3070 for (i = 0; i < bnad->num_rx; i++) {
3071 rx_info = &bnad->rx_info[i];
3072 if (!rx_info->rx)
3073 continue;
3074 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3075 rx_ctrl = &rx_info->rx_ctrl[j];
3076 if (rx_ctrl->ccb)
3077 bnad_netif_rx_schedule_poll(bnad,
3078 rx_ctrl->ccb);
3083 #endif
3085 static const struct net_device_ops bnad_netdev_ops = {
3086 .ndo_open = bnad_open,
3087 .ndo_stop = bnad_stop,
3088 .ndo_start_xmit = bnad_start_xmit,
3089 .ndo_get_stats64 = bnad_get_stats64,
3090 .ndo_set_rx_mode = bnad_set_rx_mode,
3091 .ndo_validate_addr = eth_validate_addr,
3092 .ndo_set_mac_address = bnad_set_mac_address,
3093 .ndo_change_mtu = bnad_change_mtu,
3094 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
3095 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
3096 #ifdef CONFIG_NET_POLL_CONTROLLER
3097 .ndo_poll_controller = bnad_netpoll
3098 #endif
3101 static void
3102 bnad_netdev_init(struct bnad *bnad, bool using_dac)
3104 struct net_device *netdev = bnad->netdev;
3106 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3107 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3108 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX;
3110 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3111 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3112 NETIF_F_TSO | NETIF_F_TSO6;
3114 netdev->features |= netdev->hw_features |
3115 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3117 if (using_dac)
3118 netdev->features |= NETIF_F_HIGHDMA;
3120 netdev->mem_start = bnad->mmio_start;
3121 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3123 netdev->netdev_ops = &bnad_netdev_ops;
3124 bnad_set_ethtool_ops(netdev);
3128 * 1. Initialize the bnad structure
3129 * 2. Setup netdev pointer in pci_dev
3130 * 3. Initialze Tx free tasklet
3131 * 4. Initialize no. of TxQ & CQs & MSIX vectors
3133 static int
3134 bnad_init(struct bnad *bnad,
3135 struct pci_dev *pdev, struct net_device *netdev)
3137 unsigned long flags;
3139 SET_NETDEV_DEV(netdev, &pdev->dev);
3140 pci_set_drvdata(pdev, netdev);
3142 bnad->netdev = netdev;
3143 bnad->pcidev = pdev;
3144 bnad->mmio_start = pci_resource_start(pdev, 0);
3145 bnad->mmio_len = pci_resource_len(pdev, 0);
3146 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3147 if (!bnad->bar0) {
3148 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3149 pci_set_drvdata(pdev, NULL);
3150 return -ENOMEM;
3152 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
3153 (unsigned long long) bnad->mmio_len);
3155 spin_lock_irqsave(&bnad->bna_lock, flags);
3156 if (!bnad_msix_disable)
3157 bnad->cfg_flags = BNAD_CF_MSIX;
3159 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3161 bnad_q_num_init(bnad);
3162 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3164 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3165 (bnad->num_rx * bnad->num_rxp_per_rx) +
3166 BNAD_MAILBOX_MSIX_VECTORS;
3168 bnad->txq_depth = BNAD_TXQ_DEPTH;
3169 bnad->rxq_depth = BNAD_RXQ_DEPTH;
3171 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3172 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3174 tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
3175 (unsigned long)bnad);
3177 return 0;
3181 * Must be called after bnad_pci_uninit()
3182 * so that iounmap() and pci_set_drvdata(NULL)
3183 * happens only after PCI uninitialization.
3185 static void
3186 bnad_uninit(struct bnad *bnad)
3188 if (bnad->bar0)
3189 iounmap(bnad->bar0);
3190 pci_set_drvdata(bnad->pcidev, NULL);
3194 * Initialize locks
3195 a) Per ioceth mutes used for serializing configuration
3196 changes from OS interface
3197 b) spin lock used to protect bna state machine
3199 static void
3200 bnad_lock_init(struct bnad *bnad)
3202 spin_lock_init(&bnad->bna_lock);
3203 mutex_init(&bnad->conf_mutex);
3204 mutex_init(&bnad_list_mutex);
3207 static void
3208 bnad_lock_uninit(struct bnad *bnad)
3210 mutex_destroy(&bnad->conf_mutex);
3211 mutex_destroy(&bnad_list_mutex);
3214 /* PCI Initialization */
3215 static int
3216 bnad_pci_init(struct bnad *bnad,
3217 struct pci_dev *pdev, bool *using_dac)
3219 int err;
3221 err = pci_enable_device(pdev);
3222 if (err)
3223 return err;
3224 err = pci_request_regions(pdev, BNAD_NAME);
3225 if (err)
3226 goto disable_device;
3227 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3228 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3229 *using_dac = true;
3230 } else {
3231 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3232 if (err) {
3233 err = dma_set_coherent_mask(&pdev->dev,
3234 DMA_BIT_MASK(32));
3235 if (err)
3236 goto release_regions;
3238 *using_dac = false;
3240 pci_set_master(pdev);
3241 return 0;
3243 release_regions:
3244 pci_release_regions(pdev);
3245 disable_device:
3246 pci_disable_device(pdev);
3248 return err;
3251 static void
3252 bnad_pci_uninit(struct pci_dev *pdev)
3254 pci_release_regions(pdev);
3255 pci_disable_device(pdev);
3258 static int __devinit
3259 bnad_pci_probe(struct pci_dev *pdev,
3260 const struct pci_device_id *pcidev_id)
3262 bool using_dac;
3263 int err;
3264 struct bnad *bnad;
3265 struct bna *bna;
3266 struct net_device *netdev;
3267 struct bfa_pcidev pcidev_info;
3268 unsigned long flags;
3270 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3271 pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3273 mutex_lock(&bnad_fwimg_mutex);
3274 if (!cna_get_firmware_buf(pdev)) {
3275 mutex_unlock(&bnad_fwimg_mutex);
3276 pr_warn("Failed to load Firmware Image!\n");
3277 return -ENODEV;
3279 mutex_unlock(&bnad_fwimg_mutex);
3282 * Allocates sizeof(struct net_device + struct bnad)
3283 * bnad = netdev->priv
3285 netdev = alloc_etherdev(sizeof(struct bnad));
3286 if (!netdev) {
3287 dev_err(&pdev->dev, "netdev allocation failed\n");
3288 err = -ENOMEM;
3289 return err;
3291 bnad = netdev_priv(netdev);
3292 bnad_lock_init(bnad);
3293 bnad_add_to_list(bnad);
3295 mutex_lock(&bnad->conf_mutex);
3297 * PCI initialization
3298 * Output : using_dac = 1 for 64 bit DMA
3299 * = 0 for 32 bit DMA
3301 err = bnad_pci_init(bnad, pdev, &using_dac);
3302 if (err)
3303 goto unlock_mutex;
3306 * Initialize bnad structure
3307 * Setup relation between pci_dev & netdev
3308 * Init Tx free tasklet
3310 err = bnad_init(bnad, pdev, netdev);
3311 if (err)
3312 goto pci_uninit;
3314 /* Initialize netdev structure, set up ethtool ops */
3315 bnad_netdev_init(bnad, using_dac);
3317 /* Set link to down state */
3318 netif_carrier_off(netdev);
3320 /* Setup the debugfs node for this bfad */
3321 if (bna_debugfs_enable)
3322 bnad_debugfs_init(bnad);
3324 /* Get resource requirement form bna */
3325 spin_lock_irqsave(&bnad->bna_lock, flags);
3326 bna_res_req(&bnad->res_info[0]);
3327 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3329 /* Allocate resources from bna */
3330 err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3331 if (err)
3332 goto drv_uninit;
3334 bna = &bnad->bna;
3336 /* Setup pcidev_info for bna_init() */
3337 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3338 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3339 pcidev_info.device_id = bnad->pcidev->device;
3340 pcidev_info.pci_bar_kva = bnad->bar0;
3342 spin_lock_irqsave(&bnad->bna_lock, flags);
3343 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3344 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3346 bnad->stats.bna_stats = &bna->stats;
3348 bnad_enable_msix(bnad);
3349 err = bnad_mbox_irq_alloc(bnad);
3350 if (err)
3351 goto res_free;
3354 /* Set up timers */
3355 setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
3356 ((unsigned long)bnad));
3357 setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
3358 ((unsigned long)bnad));
3359 setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
3360 ((unsigned long)bnad));
3361 setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3362 ((unsigned long)bnad));
3364 /* Now start the timer before calling IOC */
3365 mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
3366 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3369 * Start the chip
3370 * If the call back comes with error, we bail out.
3371 * This is a catastrophic error.
3373 err = bnad_ioceth_enable(bnad);
3374 if (err) {
3375 pr_err("BNA: Initialization failed err=%d\n",
3376 err);
3377 goto probe_success;
3380 spin_lock_irqsave(&bnad->bna_lock, flags);
3381 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3382 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3383 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3384 bna_attr(bna)->num_rxp - 1);
3385 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3386 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3387 err = -EIO;
3389 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3390 if (err)
3391 goto disable_ioceth;
3393 spin_lock_irqsave(&bnad->bna_lock, flags);
3394 bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3395 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3397 err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3398 if (err) {
3399 err = -EIO;
3400 goto disable_ioceth;
3403 spin_lock_irqsave(&bnad->bna_lock, flags);
3404 bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3405 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3407 /* Get the burnt-in mac */
3408 spin_lock_irqsave(&bnad->bna_lock, flags);
3409 bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
3410 bnad_set_netdev_perm_addr(bnad);
3411 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3413 mutex_unlock(&bnad->conf_mutex);
3415 /* Finally, reguister with net_device layer */
3416 err = register_netdev(netdev);
3417 if (err) {
3418 pr_err("BNA : Registering with netdev failed\n");
3419 goto probe_uninit;
3421 set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3423 return 0;
3425 probe_success:
3426 mutex_unlock(&bnad->conf_mutex);
3427 return 0;
3429 probe_uninit:
3430 mutex_lock(&bnad->conf_mutex);
3431 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3432 disable_ioceth:
3433 bnad_ioceth_disable(bnad);
3434 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3435 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3436 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3437 spin_lock_irqsave(&bnad->bna_lock, flags);
3438 bna_uninit(bna);
3439 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3440 bnad_mbox_irq_free(bnad);
3441 bnad_disable_msix(bnad);
3442 res_free:
3443 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3444 drv_uninit:
3445 /* Remove the debugfs node for this bnad */
3446 kfree(bnad->regdata);
3447 bnad_debugfs_uninit(bnad);
3448 bnad_uninit(bnad);
3449 pci_uninit:
3450 bnad_pci_uninit(pdev);
3451 unlock_mutex:
3452 mutex_unlock(&bnad->conf_mutex);
3453 bnad_remove_from_list(bnad);
3454 bnad_lock_uninit(bnad);
3455 free_netdev(netdev);
3456 return err;
3459 static void __devexit
3460 bnad_pci_remove(struct pci_dev *pdev)
3462 struct net_device *netdev = pci_get_drvdata(pdev);
3463 struct bnad *bnad;
3464 struct bna *bna;
3465 unsigned long flags;
3467 if (!netdev)
3468 return;
3470 pr_info("%s bnad_pci_remove\n", netdev->name);
3471 bnad = netdev_priv(netdev);
3472 bna = &bnad->bna;
3474 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3475 unregister_netdev(netdev);
3477 mutex_lock(&bnad->conf_mutex);
3478 bnad_ioceth_disable(bnad);
3479 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3480 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3481 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3482 spin_lock_irqsave(&bnad->bna_lock, flags);
3483 bna_uninit(bna);
3484 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3486 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3487 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3488 bnad_mbox_irq_free(bnad);
3489 bnad_disable_msix(bnad);
3490 bnad_pci_uninit(pdev);
3491 mutex_unlock(&bnad->conf_mutex);
3492 bnad_remove_from_list(bnad);
3493 bnad_lock_uninit(bnad);
3494 /* Remove the debugfs node for this bnad */
3495 kfree(bnad->regdata);
3496 bnad_debugfs_uninit(bnad);
3497 bnad_uninit(bnad);
3498 free_netdev(netdev);
3501 static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
3503 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3504 PCI_DEVICE_ID_BROCADE_CT),
3505 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3506 .class_mask = 0xffff00
3509 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3510 BFA_PCI_DEVICE_ID_CT2),
3511 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3512 .class_mask = 0xffff00
3514 {0, },
3517 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3519 static struct pci_driver bnad_pci_driver = {
3520 .name = BNAD_NAME,
3521 .id_table = bnad_pci_id_table,
3522 .probe = bnad_pci_probe,
3523 .remove = __devexit_p(bnad_pci_remove),
3526 static int __init
3527 bnad_module_init(void)
3529 int err;
3531 pr_info("Brocade 10G Ethernet driver - version: %s\n",
3532 BNAD_VERSION);
3534 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3536 err = pci_register_driver(&bnad_pci_driver);
3537 if (err < 0) {
3538 pr_err("bna : PCI registration failed in module init "
3539 "(%d)\n", err);
3540 return err;
3543 return 0;
3546 static void __exit
3547 bnad_module_exit(void)
3549 pci_unregister_driver(&bnad_pci_driver);
3551 if (bfi_fw)
3552 release_firmware(bfi_fw);
3555 module_init(bnad_module_init);
3556 module_exit(bnad_module_exit);
3558 MODULE_AUTHOR("Brocade");
3559 MODULE_LICENSE("GPL");
3560 MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3561 MODULE_VERSION(BNAD_VERSION);
3562 MODULE_FIRMWARE(CNA_FW_FILE_CT);
3563 MODULE_FIRMWARE(CNA_FW_FILE_CT2);