sfc: Don't use enums as a bitmask.
[zen-stable.git] / drivers / net / bna / bnad.c
blobe588511f47fb0560a4aa1f5e31b3b18c9a238ba8
1 /*
2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
18 #include <linux/netdevice.h>
19 #include <linux/skbuff.h>
20 #include <linux/etherdevice.h>
21 #include <linux/in.h>
22 #include <linux/ethtool.h>
23 #include <linux/if_vlan.h>
24 #include <linux/if_ether.h>
25 #include <linux/ip.h>
27 #include "bnad.h"
28 #include "bna.h"
29 #include "cna.h"
31 static DEFINE_MUTEX(bnad_fwimg_mutex);
34 * Module params
36 static uint bnad_msix_disable;
37 module_param(bnad_msix_disable, uint, 0444);
38 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
40 static uint bnad_ioc_auto_recover = 1;
41 module_param(bnad_ioc_auto_recover, uint, 0444);
42 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
45 * Global variables
47 u32 bnad_rxqs_per_cq = 2;
49 static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
52 * Local MACROS
54 #define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
56 #define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
58 #define BNAD_GET_MBOX_IRQ(_bnad) \
59 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
60 ((_bnad)->msix_table[(_bnad)->msix_num - 1].vector) : \
61 ((_bnad)->pcidev->irq))
63 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
64 do { \
65 (_res_info)->res_type = BNA_RES_T_MEM; \
66 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
67 (_res_info)->res_u.mem_info.num = (_num); \
68 (_res_info)->res_u.mem_info.len = \
69 sizeof(struct bnad_unmap_q) + \
70 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
71 } while (0)
73 #define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */
76 * Reinitialize completions in CQ, once Rx is taken down
78 static void
79 bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
81 struct bna_cq_entry *cmpl, *next_cmpl;
82 unsigned int wi_range, wis = 0, ccb_prod = 0;
83 int i;
85 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
86 wi_range);
88 for (i = 0; i < ccb->q_depth; i++) {
89 wis++;
90 if (likely(--wi_range))
91 next_cmpl = cmpl + 1;
92 else {
93 BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
94 wis = 0;
95 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
96 next_cmpl, wi_range);
98 cmpl->valid = 0;
99 cmpl = next_cmpl;
104 * Frees all pending Tx Bufs
105 * At this point no activity is expected on the Q,
106 * so DMA unmap & freeing is fine.
108 static void
109 bnad_free_all_txbufs(struct bnad *bnad,
110 struct bna_tcb *tcb)
112 u32 unmap_cons;
113 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
114 struct bnad_skb_unmap *unmap_array;
115 struct sk_buff *skb = NULL;
116 int i;
118 unmap_array = unmap_q->unmap_array;
120 unmap_cons = 0;
121 while (unmap_cons < unmap_q->q_depth) {
122 skb = unmap_array[unmap_cons].skb;
123 if (!skb) {
124 unmap_cons++;
125 continue;
127 unmap_array[unmap_cons].skb = NULL;
129 dma_unmap_single(&bnad->pcidev->dev,
130 dma_unmap_addr(&unmap_array[unmap_cons],
131 dma_addr), skb_headlen(skb),
132 DMA_TO_DEVICE);
134 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
135 if (++unmap_cons >= unmap_q->q_depth)
136 break;
138 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
139 dma_unmap_page(&bnad->pcidev->dev,
140 dma_unmap_addr(&unmap_array[unmap_cons],
141 dma_addr),
142 skb_shinfo(skb)->frags[i].size,
143 DMA_TO_DEVICE);
144 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
146 if (++unmap_cons >= unmap_q->q_depth)
147 break;
149 dev_kfree_skb_any(skb);
153 /* Data Path Handlers */
156 * bnad_free_txbufs : Frees the Tx bufs on Tx completion
157 * Can be called in a) Interrupt context
158 * b) Sending context
159 * c) Tasklet context
161 static u32
162 bnad_free_txbufs(struct bnad *bnad,
163 struct bna_tcb *tcb)
165 u32 sent_packets = 0, sent_bytes = 0;
166 u16 wis, unmap_cons, updated_hw_cons;
167 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
168 struct bnad_skb_unmap *unmap_array;
169 struct sk_buff *skb;
170 int i;
173 * Just return if TX is stopped. This check is useful
174 * when bnad_free_txbufs() runs out of a tasklet scheduled
175 * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
176 * but this routine runs actually after the cleanup has been
177 * executed.
179 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
180 return 0;
182 updated_hw_cons = *(tcb->hw_consumer_index);
184 wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
185 updated_hw_cons, tcb->q_depth);
187 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
189 unmap_array = unmap_q->unmap_array;
190 unmap_cons = unmap_q->consumer_index;
192 prefetch(&unmap_array[unmap_cons + 1]);
193 while (wis) {
194 skb = unmap_array[unmap_cons].skb;
196 unmap_array[unmap_cons].skb = NULL;
198 sent_packets++;
199 sent_bytes += skb->len;
200 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
202 dma_unmap_single(&bnad->pcidev->dev,
203 dma_unmap_addr(&unmap_array[unmap_cons],
204 dma_addr), skb_headlen(skb),
205 DMA_TO_DEVICE);
206 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
207 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
209 prefetch(&unmap_array[unmap_cons + 1]);
210 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
211 prefetch(&unmap_array[unmap_cons + 1]);
213 dma_unmap_page(&bnad->pcidev->dev,
214 dma_unmap_addr(&unmap_array[unmap_cons],
215 dma_addr),
216 skb_shinfo(skb)->frags[i].size,
217 DMA_TO_DEVICE);
218 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
220 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
222 dev_kfree_skb_any(skb);
225 /* Update consumer pointers. */
226 tcb->consumer_index = updated_hw_cons;
227 unmap_q->consumer_index = unmap_cons;
229 tcb->txq->tx_packets += sent_packets;
230 tcb->txq->tx_bytes += sent_bytes;
232 return sent_packets;
235 /* Tx Free Tasklet function */
236 /* Frees for all the tcb's in all the Tx's */
238 * Scheduled from sending context, so that
239 * the fat Tx lock is not held for too long
240 * in the sending context.
242 static void
243 bnad_tx_free_tasklet(unsigned long bnad_ptr)
245 struct bnad *bnad = (struct bnad *)bnad_ptr;
246 struct bna_tcb *tcb;
247 u32 acked = 0;
248 int i, j;
250 for (i = 0; i < bnad->num_tx; i++) {
251 for (j = 0; j < bnad->num_txq_per_tx; j++) {
252 tcb = bnad->tx_info[i].tcb[j];
253 if (!tcb)
254 continue;
255 if (((u16) (*tcb->hw_consumer_index) !=
256 tcb->consumer_index) &&
257 (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
258 &tcb->flags))) {
259 acked = bnad_free_txbufs(bnad, tcb);
260 if (likely(test_bit(BNAD_TXQ_TX_STARTED,
261 &tcb->flags)))
262 bna_ib_ack(tcb->i_dbell, acked);
263 smp_mb__before_clear_bit();
264 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
266 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
267 &tcb->flags)))
268 continue;
269 if (netif_queue_stopped(bnad->netdev)) {
270 if (acked && netif_carrier_ok(bnad->netdev) &&
271 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
272 BNAD_NETIF_WAKE_THRESHOLD) {
273 netif_wake_queue(bnad->netdev);
274 /* TODO */
275 /* Counters for individual TxQs? */
276 BNAD_UPDATE_CTR(bnad,
277 netif_queue_wakeup);
284 static u32
285 bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
287 struct net_device *netdev = bnad->netdev;
288 u32 sent = 0;
290 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
291 return 0;
293 sent = bnad_free_txbufs(bnad, tcb);
294 if (sent) {
295 if (netif_queue_stopped(netdev) &&
296 netif_carrier_ok(netdev) &&
297 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
298 BNAD_NETIF_WAKE_THRESHOLD) {
299 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
300 netif_wake_queue(netdev);
301 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
306 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
307 bna_ib_ack(tcb->i_dbell, sent);
309 smp_mb__before_clear_bit();
310 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
312 return sent;
315 /* MSIX Tx Completion Handler */
316 static irqreturn_t
317 bnad_msix_tx(int irq, void *data)
319 struct bna_tcb *tcb = (struct bna_tcb *)data;
320 struct bnad *bnad = tcb->bnad;
322 bnad_tx(bnad, tcb);
324 return IRQ_HANDLED;
327 static void
328 bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
330 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
332 rcb->producer_index = 0;
333 rcb->consumer_index = 0;
335 unmap_q->producer_index = 0;
336 unmap_q->consumer_index = 0;
339 static void
340 bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
342 struct bnad_unmap_q *unmap_q;
343 struct bnad_skb_unmap *unmap_array;
344 struct sk_buff *skb;
345 int unmap_cons;
347 unmap_q = rcb->unmap_q;
348 unmap_array = unmap_q->unmap_array;
349 for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
350 skb = unmap_array[unmap_cons].skb;
351 if (!skb)
352 continue;
353 unmap_array[unmap_cons].skb = NULL;
354 dma_unmap_single(&bnad->pcidev->dev,
355 dma_unmap_addr(&unmap_array[unmap_cons],
356 dma_addr),
357 rcb->rxq->buffer_size,
358 DMA_FROM_DEVICE);
359 dev_kfree_skb(skb);
361 bnad_reset_rcb(bnad, rcb);
364 static void
365 bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
367 u16 to_alloc, alloced, unmap_prod, wi_range;
368 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
369 struct bnad_skb_unmap *unmap_array;
370 struct bna_rxq_entry *rxent;
371 struct sk_buff *skb;
372 dma_addr_t dma_addr;
374 alloced = 0;
375 to_alloc =
376 BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
378 unmap_array = unmap_q->unmap_array;
379 unmap_prod = unmap_q->producer_index;
381 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
383 while (to_alloc--) {
384 if (!wi_range) {
385 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
386 wi_range);
388 skb = alloc_skb(rcb->rxq->buffer_size + NET_IP_ALIGN,
389 GFP_ATOMIC);
390 if (unlikely(!skb)) {
391 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
392 goto finishing;
394 skb->dev = bnad->netdev;
395 skb_reserve(skb, NET_IP_ALIGN);
396 unmap_array[unmap_prod].skb = skb;
397 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
398 rcb->rxq->buffer_size,
399 DMA_FROM_DEVICE);
400 dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
401 dma_addr);
402 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
403 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
405 rxent++;
406 wi_range--;
407 alloced++;
410 finishing:
411 if (likely(alloced)) {
412 unmap_q->producer_index = unmap_prod;
413 rcb->producer_index = unmap_prod;
414 smp_mb();
415 if (likely(test_bit(BNAD_RXQ_STARTED, &rcb->flags)))
416 bna_rxq_prod_indx_doorbell(rcb);
420 static inline void
421 bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
423 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
425 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
426 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
427 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
428 bnad_alloc_n_post_rxbufs(bnad, rcb);
429 smp_mb__before_clear_bit();
430 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
434 static u32
435 bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
437 struct bna_cq_entry *cmpl, *next_cmpl;
438 struct bna_rcb *rcb = NULL;
439 unsigned int wi_range, packets = 0, wis = 0;
440 struct bnad_unmap_q *unmap_q;
441 struct bnad_skb_unmap *unmap_array;
442 struct sk_buff *skb;
443 u32 flags, unmap_cons;
444 u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
445 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
447 if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))
448 return 0;
450 prefetch(bnad->netdev);
451 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
452 wi_range);
453 BUG_ON(!(wi_range <= ccb->q_depth));
454 while (cmpl->valid && packets < budget) {
455 packets++;
456 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
458 if (qid0 == cmpl->rxq_id)
459 rcb = ccb->rcb[0];
460 else
461 rcb = ccb->rcb[1];
463 unmap_q = rcb->unmap_q;
464 unmap_array = unmap_q->unmap_array;
465 unmap_cons = unmap_q->consumer_index;
467 skb = unmap_array[unmap_cons].skb;
468 BUG_ON(!(skb));
469 unmap_array[unmap_cons].skb = NULL;
470 dma_unmap_single(&bnad->pcidev->dev,
471 dma_unmap_addr(&unmap_array[unmap_cons],
472 dma_addr),
473 rcb->rxq->buffer_size,
474 DMA_FROM_DEVICE);
475 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
477 /* Should be more efficient ? Performance ? */
478 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
480 wis++;
481 if (likely(--wi_range))
482 next_cmpl = cmpl + 1;
483 else {
484 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
485 wis = 0;
486 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
487 next_cmpl, wi_range);
488 BUG_ON(!(wi_range <= ccb->q_depth));
490 prefetch(next_cmpl);
492 flags = ntohl(cmpl->flags);
493 if (unlikely
494 (flags &
495 (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
496 BNA_CQ_EF_TOO_LONG))) {
497 dev_kfree_skb_any(skb);
498 rcb->rxq->rx_packets_with_error++;
499 goto next;
502 skb_put(skb, ntohs(cmpl->length));
503 if (likely
504 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
505 (((flags & BNA_CQ_EF_IPV4) &&
506 (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
507 (flags & BNA_CQ_EF_IPV6)) &&
508 (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
509 (flags & BNA_CQ_EF_L4_CKSUM_OK)))
510 skb->ip_summed = CHECKSUM_UNNECESSARY;
511 else
512 skb_checksum_none_assert(skb);
514 rcb->rxq->rx_packets++;
515 rcb->rxq->rx_bytes += skb->len;
516 skb->protocol = eth_type_trans(skb, bnad->netdev);
518 if (bnad->vlan_grp && (flags & BNA_CQ_EF_VLAN)) {
519 struct bnad_rx_ctrl *rx_ctrl =
520 (struct bnad_rx_ctrl *)ccb->ctrl;
521 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
522 vlan_gro_receive(&rx_ctrl->napi, bnad->vlan_grp,
523 ntohs(cmpl->vlan_tag), skb);
524 else
525 vlan_hwaccel_receive_skb(skb,
526 bnad->vlan_grp,
527 ntohs(cmpl->vlan_tag));
529 } else { /* Not VLAN tagged/stripped */
530 struct bnad_rx_ctrl *rx_ctrl =
531 (struct bnad_rx_ctrl *)ccb->ctrl;
532 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
533 napi_gro_receive(&rx_ctrl->napi, skb);
534 else
535 netif_receive_skb(skb);
538 next:
539 cmpl->valid = 0;
540 cmpl = next_cmpl;
543 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
545 if (likely(ccb)) {
546 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
547 bna_ib_ack(ccb->i_dbell, packets);
548 bnad_refill_rxq(bnad, ccb->rcb[0]);
549 if (ccb->rcb[1])
550 bnad_refill_rxq(bnad, ccb->rcb[1]);
551 } else {
552 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
553 bna_ib_ack(ccb->i_dbell, 0);
556 return packets;
559 static void
560 bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
562 if (unlikely(!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
563 return;
565 bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
566 bna_ib_ack(ccb->i_dbell, 0);
569 static void
570 bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
572 unsigned long flags;
574 /* Because of polling context */
575 spin_lock_irqsave(&bnad->bna_lock, flags);
576 bnad_enable_rx_irq_unsafe(ccb);
577 spin_unlock_irqrestore(&bnad->bna_lock, flags);
580 static void
581 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
583 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
584 struct napi_struct *napi = &rx_ctrl->napi;
586 if (likely(napi_schedule_prep(napi))) {
587 bnad_disable_rx_irq(bnad, ccb);
588 __napi_schedule(napi);
590 BNAD_UPDATE_CTR(bnad, netif_rx_schedule);
593 /* MSIX Rx Path Handler */
594 static irqreturn_t
595 bnad_msix_rx(int irq, void *data)
597 struct bna_ccb *ccb = (struct bna_ccb *)data;
598 struct bnad *bnad = ccb->bnad;
600 bnad_netif_rx_schedule_poll(bnad, ccb);
602 return IRQ_HANDLED;
605 /* Interrupt handlers */
607 /* Mbox Interrupt Handlers */
608 static irqreturn_t
609 bnad_msix_mbox_handler(int irq, void *data)
611 u32 intr_status;
612 unsigned long flags;
613 struct bnad *bnad = (struct bnad *)data;
615 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
616 return IRQ_HANDLED;
618 spin_lock_irqsave(&bnad->bna_lock, flags);
620 bna_intr_status_get(&bnad->bna, intr_status);
622 if (BNA_IS_MBOX_ERR_INTR(intr_status))
623 bna_mbox_handler(&bnad->bna, intr_status);
625 spin_unlock_irqrestore(&bnad->bna_lock, flags);
627 return IRQ_HANDLED;
630 static irqreturn_t
631 bnad_isr(int irq, void *data)
633 int i, j;
634 u32 intr_status;
635 unsigned long flags;
636 struct bnad *bnad = (struct bnad *)data;
637 struct bnad_rx_info *rx_info;
638 struct bnad_rx_ctrl *rx_ctrl;
640 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
641 return IRQ_NONE;
643 bna_intr_status_get(&bnad->bna, intr_status);
645 if (unlikely(!intr_status))
646 return IRQ_NONE;
648 spin_lock_irqsave(&bnad->bna_lock, flags);
650 if (BNA_IS_MBOX_ERR_INTR(intr_status))
651 bna_mbox_handler(&bnad->bna, intr_status);
653 spin_unlock_irqrestore(&bnad->bna_lock, flags);
655 if (!BNA_IS_INTX_DATA_INTR(intr_status))
656 return IRQ_HANDLED;
658 /* Process data interrupts */
659 /* Tx processing */
660 for (i = 0; i < bnad->num_tx; i++) {
661 for (j = 0; j < bnad->num_txq_per_tx; j++)
662 bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
664 /* Rx processing */
665 for (i = 0; i < bnad->num_rx; i++) {
666 rx_info = &bnad->rx_info[i];
667 if (!rx_info->rx)
668 continue;
669 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
670 rx_ctrl = &rx_info->rx_ctrl[j];
671 if (rx_ctrl->ccb)
672 bnad_netif_rx_schedule_poll(bnad,
673 rx_ctrl->ccb);
676 return IRQ_HANDLED;
680 * Called in interrupt / callback context
681 * with bna_lock held, so cfg_flags access is OK
683 static void
684 bnad_enable_mbox_irq(struct bnad *bnad)
686 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
688 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
692 * Called with bnad->bna_lock held b'cos of
693 * bnad->cfg_flags access.
695 static void
696 bnad_disable_mbox_irq(struct bnad *bnad)
698 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
700 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
703 static void
704 bnad_set_netdev_perm_addr(struct bnad *bnad)
706 struct net_device *netdev = bnad->netdev;
708 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
709 if (is_zero_ether_addr(netdev->dev_addr))
710 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
713 /* Control Path Handlers */
715 /* Callbacks */
716 void
717 bnad_cb_device_enable_mbox_intr(struct bnad *bnad)
719 bnad_enable_mbox_irq(bnad);
722 void
723 bnad_cb_device_disable_mbox_intr(struct bnad *bnad)
725 bnad_disable_mbox_irq(bnad);
728 void
729 bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status)
731 complete(&bnad->bnad_completions.ioc_comp);
732 bnad->bnad_completions.ioc_comp_status = status;
735 void
736 bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status)
738 complete(&bnad->bnad_completions.ioc_comp);
739 bnad->bnad_completions.ioc_comp_status = status;
742 static void
743 bnad_cb_port_disabled(void *arg, enum bna_cb_status status)
745 struct bnad *bnad = (struct bnad *)arg;
747 complete(&bnad->bnad_completions.port_comp);
749 netif_carrier_off(bnad->netdev);
752 void
753 bnad_cb_port_link_status(struct bnad *bnad,
754 enum bna_link_status link_status)
756 bool link_up = 0;
758 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
760 if (link_status == BNA_CEE_UP) {
761 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
762 BNAD_UPDATE_CTR(bnad, cee_up);
763 } else
764 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
766 if (link_up) {
767 if (!netif_carrier_ok(bnad->netdev)) {
768 struct bna_tcb *tcb = bnad->tx_info[0].tcb[0];
769 if (!tcb)
770 return;
771 pr_warn("bna: %s link up\n",
772 bnad->netdev->name);
773 netif_carrier_on(bnad->netdev);
774 BNAD_UPDATE_CTR(bnad, link_toggle);
775 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
776 /* Force an immediate Transmit Schedule */
777 pr_info("bna: %s TX_STARTED\n",
778 bnad->netdev->name);
779 netif_wake_queue(bnad->netdev);
780 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
781 } else {
782 netif_stop_queue(bnad->netdev);
783 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
786 } else {
787 if (netif_carrier_ok(bnad->netdev)) {
788 pr_warn("bna: %s link down\n",
789 bnad->netdev->name);
790 netif_carrier_off(bnad->netdev);
791 BNAD_UPDATE_CTR(bnad, link_toggle);
796 static void
797 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx,
798 enum bna_cb_status status)
800 struct bnad *bnad = (struct bnad *)arg;
802 complete(&bnad->bnad_completions.tx_comp);
805 static void
806 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
808 struct bnad_tx_info *tx_info =
809 (struct bnad_tx_info *)tcb->txq->tx->priv;
810 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
812 tx_info->tcb[tcb->id] = tcb;
813 unmap_q->producer_index = 0;
814 unmap_q->consumer_index = 0;
815 unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
818 static void
819 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
821 struct bnad_tx_info *tx_info =
822 (struct bnad_tx_info *)tcb->txq->tx->priv;
823 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
825 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
826 cpu_relax();
828 bnad_free_all_txbufs(bnad, tcb);
830 unmap_q->producer_index = 0;
831 unmap_q->consumer_index = 0;
833 smp_mb__before_clear_bit();
834 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
836 tx_info->tcb[tcb->id] = NULL;
839 static void
840 bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
842 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
844 unmap_q->producer_index = 0;
845 unmap_q->consumer_index = 0;
846 unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
849 static void
850 bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
852 bnad_free_all_rxbufs(bnad, rcb);
855 static void
856 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
858 struct bnad_rx_info *rx_info =
859 (struct bnad_rx_info *)ccb->cq->rx->priv;
861 rx_info->rx_ctrl[ccb->id].ccb = ccb;
862 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
865 static void
866 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
868 struct bnad_rx_info *rx_info =
869 (struct bnad_rx_info *)ccb->cq->rx->priv;
871 rx_info->rx_ctrl[ccb->id].ccb = NULL;
874 static void
875 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb)
877 struct bnad_tx_info *tx_info =
878 (struct bnad_tx_info *)tcb->txq->tx->priv;
880 if (tx_info != &bnad->tx_info[0])
881 return;
883 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
884 netif_stop_queue(bnad->netdev);
885 pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name);
888 static void
889 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb)
891 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
893 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
894 return;
896 clear_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags);
898 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
899 cpu_relax();
901 bnad_free_all_txbufs(bnad, tcb);
903 unmap_q->producer_index = 0;
904 unmap_q->consumer_index = 0;
906 smp_mb__before_clear_bit();
907 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
910 * Workaround for first device enable failure & we
911 * get a 0 MAC address. We try to get the MAC address
912 * again here.
914 if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
915 bna_port_mac_get(&bnad->bna.port, &bnad->perm_addr);
916 bnad_set_netdev_perm_addr(bnad);
919 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
921 if (netif_carrier_ok(bnad->netdev)) {
922 pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
923 netif_wake_queue(bnad->netdev);
924 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
928 static void
929 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
931 /* Delay only once for the whole Tx Path Shutdown */
932 if (!test_and_set_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags))
933 mdelay(BNAD_TXRX_SYNC_MDELAY);
936 static void
937 bnad_cb_rx_cleanup(struct bnad *bnad,
938 struct bna_ccb *ccb)
940 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
942 if (ccb->rcb[1])
943 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
945 if (!test_and_set_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags))
946 mdelay(BNAD_TXRX_SYNC_MDELAY);
949 static void
950 bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb)
952 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
954 clear_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags);
956 if (rcb == rcb->cq->ccb->rcb[0])
957 bnad_cq_cmpl_init(bnad, rcb->cq->ccb);
959 bnad_free_all_rxbufs(bnad, rcb);
961 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
963 /* Now allocate & post buffers for this RCB */
964 /* !!Allocation in callback context */
965 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
966 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
967 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
968 bnad_alloc_n_post_rxbufs(bnad, rcb);
969 smp_mb__before_clear_bit();
970 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
974 static void
975 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx,
976 enum bna_cb_status status)
978 struct bnad *bnad = (struct bnad *)arg;
980 complete(&bnad->bnad_completions.rx_comp);
983 static void
984 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx,
985 enum bna_cb_status status)
987 bnad->bnad_completions.mcast_comp_status = status;
988 complete(&bnad->bnad_completions.mcast_comp);
991 void
992 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
993 struct bna_stats *stats)
995 if (status == BNA_CB_SUCCESS)
996 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
998 if (!netif_running(bnad->netdev) ||
999 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1000 return;
1002 mod_timer(&bnad->stats_timer,
1003 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1006 /* Resource allocation, free functions */
1008 static void
1009 bnad_mem_free(struct bnad *bnad,
1010 struct bna_mem_info *mem_info)
1012 int i;
1013 dma_addr_t dma_pa;
1015 if (mem_info->mdl == NULL)
1016 return;
1018 for (i = 0; i < mem_info->num; i++) {
1019 if (mem_info->mdl[i].kva != NULL) {
1020 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1021 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1022 dma_pa);
1023 dma_free_coherent(&bnad->pcidev->dev,
1024 mem_info->mdl[i].len,
1025 mem_info->mdl[i].kva, dma_pa);
1026 } else
1027 kfree(mem_info->mdl[i].kva);
1030 kfree(mem_info->mdl);
1031 mem_info->mdl = NULL;
1034 static int
1035 bnad_mem_alloc(struct bnad *bnad,
1036 struct bna_mem_info *mem_info)
1038 int i;
1039 dma_addr_t dma_pa;
1041 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1042 mem_info->mdl = NULL;
1043 return 0;
1046 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1047 GFP_KERNEL);
1048 if (mem_info->mdl == NULL)
1049 return -ENOMEM;
1051 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1052 for (i = 0; i < mem_info->num; i++) {
1053 mem_info->mdl[i].len = mem_info->len;
1054 mem_info->mdl[i].kva =
1055 dma_alloc_coherent(&bnad->pcidev->dev,
1056 mem_info->len, &dma_pa,
1057 GFP_KERNEL);
1059 if (mem_info->mdl[i].kva == NULL)
1060 goto err_return;
1062 BNA_SET_DMA_ADDR(dma_pa,
1063 &(mem_info->mdl[i].dma));
1065 } else {
1066 for (i = 0; i < mem_info->num; i++) {
1067 mem_info->mdl[i].len = mem_info->len;
1068 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1069 GFP_KERNEL);
1070 if (mem_info->mdl[i].kva == NULL)
1071 goto err_return;
1075 return 0;
1077 err_return:
1078 bnad_mem_free(bnad, mem_info);
1079 return -ENOMEM;
1082 /* Free IRQ for Mailbox */
1083 static void
1084 bnad_mbox_irq_free(struct bnad *bnad,
1085 struct bna_intr_info *intr_info)
1087 int irq;
1088 unsigned long flags;
1090 if (intr_info->idl == NULL)
1091 return;
1093 spin_lock_irqsave(&bnad->bna_lock, flags);
1094 bnad_disable_mbox_irq(bnad);
1095 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1097 irq = BNAD_GET_MBOX_IRQ(bnad);
1098 free_irq(irq, bnad);
1100 kfree(intr_info->idl);
1104 * Allocates IRQ for Mailbox, but keep it disabled
1105 * This will be enabled once we get the mbox enable callback
1106 * from bna
1108 static int
1109 bnad_mbox_irq_alloc(struct bnad *bnad,
1110 struct bna_intr_info *intr_info)
1112 int err = 0;
1113 unsigned long flags;
1114 u32 irq;
1115 irq_handler_t irq_handler;
1117 /* Mbox should use only 1 vector */
1119 intr_info->idl = kzalloc(sizeof(*(intr_info->idl)), GFP_KERNEL);
1120 if (!intr_info->idl)
1121 return -ENOMEM;
1123 spin_lock_irqsave(&bnad->bna_lock, flags);
1124 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1125 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1126 irq = bnad->msix_table[bnad->msix_num - 1].vector;
1127 flags = 0;
1128 intr_info->intr_type = BNA_INTR_T_MSIX;
1129 intr_info->idl[0].vector = bnad->msix_num - 1;
1130 } else {
1131 irq_handler = (irq_handler_t)bnad_isr;
1132 irq = bnad->pcidev->irq;
1133 flags = IRQF_SHARED;
1134 intr_info->intr_type = BNA_INTR_T_INTX;
1135 /* intr_info->idl.vector = 0 ? */
1137 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1139 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1142 * Set the Mbox IRQ disable flag, so that the IRQ handler
1143 * called from request_irq() for SHARED IRQs do not execute
1145 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1147 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1149 err = request_irq(irq, irq_handler, flags,
1150 bnad->mbox_irq_name, bnad);
1152 if (err) {
1153 kfree(intr_info->idl);
1154 intr_info->idl = NULL;
1157 return err;
1160 static void
1161 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1163 kfree(intr_info->idl);
1164 intr_info->idl = NULL;
1167 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1168 static int
1169 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1170 uint txrx_id, struct bna_intr_info *intr_info)
1172 int i, vector_start = 0;
1173 u32 cfg_flags;
1174 unsigned long flags;
1176 spin_lock_irqsave(&bnad->bna_lock, flags);
1177 cfg_flags = bnad->cfg_flags;
1178 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1180 if (cfg_flags & BNAD_CF_MSIX) {
1181 intr_info->intr_type = BNA_INTR_T_MSIX;
1182 intr_info->idl = kcalloc(intr_info->num,
1183 sizeof(struct bna_intr_descr),
1184 GFP_KERNEL);
1185 if (!intr_info->idl)
1186 return -ENOMEM;
1188 switch (src) {
1189 case BNAD_INTR_TX:
1190 vector_start = txrx_id;
1191 break;
1193 case BNAD_INTR_RX:
1194 vector_start = bnad->num_tx * bnad->num_txq_per_tx +
1195 txrx_id;
1196 break;
1198 default:
1199 BUG();
1202 for (i = 0; i < intr_info->num; i++)
1203 intr_info->idl[i].vector = vector_start + i;
1204 } else {
1205 intr_info->intr_type = BNA_INTR_T_INTX;
1206 intr_info->num = 1;
1207 intr_info->idl = kcalloc(intr_info->num,
1208 sizeof(struct bna_intr_descr),
1209 GFP_KERNEL);
1210 if (!intr_info->idl)
1211 return -ENOMEM;
1213 switch (src) {
1214 case BNAD_INTR_TX:
1215 intr_info->idl[0].vector = 0x1; /* Bit mask : Tx IB */
1216 break;
1218 case BNAD_INTR_RX:
1219 intr_info->idl[0].vector = 0x2; /* Bit mask : Rx IB */
1220 break;
1223 return 0;
1227 * NOTE: Should be called for MSIX only
1228 * Unregisters Tx MSIX vector(s) from the kernel
1230 static void
1231 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1232 int num_txqs)
1234 int i;
1235 int vector_num;
1237 for (i = 0; i < num_txqs; i++) {
1238 if (tx_info->tcb[i] == NULL)
1239 continue;
1241 vector_num = tx_info->tcb[i]->intr_vector;
1242 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1247 * NOTE: Should be called for MSIX only
1248 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1250 static int
1251 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1252 uint tx_id, int num_txqs)
1254 int i;
1255 int err;
1256 int vector_num;
1258 for (i = 0; i < num_txqs; i++) {
1259 vector_num = tx_info->tcb[i]->intr_vector;
1260 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1261 tx_id + tx_info->tcb[i]->id);
1262 err = request_irq(bnad->msix_table[vector_num].vector,
1263 (irq_handler_t)bnad_msix_tx, 0,
1264 tx_info->tcb[i]->name,
1265 tx_info->tcb[i]);
1266 if (err)
1267 goto err_return;
1270 return 0;
1272 err_return:
1273 if (i > 0)
1274 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1275 return -1;
1279 * NOTE: Should be called for MSIX only
1280 * Unregisters Rx MSIX vector(s) from the kernel
1282 static void
1283 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1284 int num_rxps)
1286 int i;
1287 int vector_num;
1289 for (i = 0; i < num_rxps; i++) {
1290 if (rx_info->rx_ctrl[i].ccb == NULL)
1291 continue;
1293 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1294 free_irq(bnad->msix_table[vector_num].vector,
1295 rx_info->rx_ctrl[i].ccb);
1300 * NOTE: Should be called for MSIX only
1301 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1303 static int
1304 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1305 uint rx_id, int num_rxps)
1307 int i;
1308 int err;
1309 int vector_num;
1311 for (i = 0; i < num_rxps; i++) {
1312 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1313 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1314 bnad->netdev->name,
1315 rx_id + rx_info->rx_ctrl[i].ccb->id);
1316 err = request_irq(bnad->msix_table[vector_num].vector,
1317 (irq_handler_t)bnad_msix_rx, 0,
1318 rx_info->rx_ctrl[i].ccb->name,
1319 rx_info->rx_ctrl[i].ccb);
1320 if (err)
1321 goto err_return;
1324 return 0;
1326 err_return:
1327 if (i > 0)
1328 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1329 return -1;
1332 /* Free Tx object Resources */
1333 static void
1334 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1336 int i;
1338 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1339 if (res_info[i].res_type == BNA_RES_T_MEM)
1340 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1341 else if (res_info[i].res_type == BNA_RES_T_INTR)
1342 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1346 /* Allocates memory and interrupt resources for Tx object */
1347 static int
1348 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1349 uint tx_id)
1351 int i, err = 0;
1353 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1354 if (res_info[i].res_type == BNA_RES_T_MEM)
1355 err = bnad_mem_alloc(bnad,
1356 &res_info[i].res_u.mem_info);
1357 else if (res_info[i].res_type == BNA_RES_T_INTR)
1358 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1359 &res_info[i].res_u.intr_info);
1360 if (err)
1361 goto err_return;
1363 return 0;
1365 err_return:
1366 bnad_tx_res_free(bnad, res_info);
1367 return err;
1370 /* Free Rx object Resources */
1371 static void
1372 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1374 int i;
1376 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1377 if (res_info[i].res_type == BNA_RES_T_MEM)
1378 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1379 else if (res_info[i].res_type == BNA_RES_T_INTR)
1380 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1384 /* Allocates memory and interrupt resources for Rx object */
1385 static int
1386 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1387 uint rx_id)
1389 int i, err = 0;
1391 /* All memory needs to be allocated before setup_ccbs */
1392 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1393 if (res_info[i].res_type == BNA_RES_T_MEM)
1394 err = bnad_mem_alloc(bnad,
1395 &res_info[i].res_u.mem_info);
1396 else if (res_info[i].res_type == BNA_RES_T_INTR)
1397 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1398 &res_info[i].res_u.intr_info);
1399 if (err)
1400 goto err_return;
1402 return 0;
1404 err_return:
1405 bnad_rx_res_free(bnad, res_info);
1406 return err;
1409 /* Timer callbacks */
1410 /* a) IOC timer */
1411 static void
1412 bnad_ioc_timeout(unsigned long data)
1414 struct bnad *bnad = (struct bnad *)data;
1415 unsigned long flags;
1417 spin_lock_irqsave(&bnad->bna_lock, flags);
1418 bfa_nw_ioc_timeout((void *) &bnad->bna.device.ioc);
1419 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1422 static void
1423 bnad_ioc_hb_check(unsigned long data)
1425 struct bnad *bnad = (struct bnad *)data;
1426 unsigned long flags;
1428 spin_lock_irqsave(&bnad->bna_lock, flags);
1429 bfa_nw_ioc_hb_check((void *) &bnad->bna.device.ioc);
1430 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1433 static void
1434 bnad_iocpf_timeout(unsigned long data)
1436 struct bnad *bnad = (struct bnad *)data;
1437 unsigned long flags;
1439 spin_lock_irqsave(&bnad->bna_lock, flags);
1440 bfa_nw_iocpf_timeout((void *) &bnad->bna.device.ioc);
1441 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1444 static void
1445 bnad_iocpf_sem_timeout(unsigned long data)
1447 struct bnad *bnad = (struct bnad *)data;
1448 unsigned long flags;
1450 spin_lock_irqsave(&bnad->bna_lock, flags);
1451 bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.device.ioc);
1452 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1456 * All timer routines use bnad->bna_lock to protect against
1457 * the following race, which may occur in case of no locking:
1458 * Time CPU m CPU n
1459 * 0 1 = test_bit
1460 * 1 clear_bit
1461 * 2 del_timer_sync
1462 * 3 mod_timer
1465 /* b) Dynamic Interrupt Moderation Timer */
1466 static void
1467 bnad_dim_timeout(unsigned long data)
1469 struct bnad *bnad = (struct bnad *)data;
1470 struct bnad_rx_info *rx_info;
1471 struct bnad_rx_ctrl *rx_ctrl;
1472 int i, j;
1473 unsigned long flags;
1475 if (!netif_carrier_ok(bnad->netdev))
1476 return;
1478 spin_lock_irqsave(&bnad->bna_lock, flags);
1479 for (i = 0; i < bnad->num_rx; i++) {
1480 rx_info = &bnad->rx_info[i];
1481 if (!rx_info->rx)
1482 continue;
1483 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1484 rx_ctrl = &rx_info->rx_ctrl[j];
1485 if (!rx_ctrl->ccb)
1486 continue;
1487 bna_rx_dim_update(rx_ctrl->ccb);
1491 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1492 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1493 mod_timer(&bnad->dim_timer,
1494 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1495 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1498 /* c) Statistics Timer */
1499 static void
1500 bnad_stats_timeout(unsigned long data)
1502 struct bnad *bnad = (struct bnad *)data;
1503 unsigned long flags;
1505 if (!netif_running(bnad->netdev) ||
1506 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1507 return;
1509 spin_lock_irqsave(&bnad->bna_lock, flags);
1510 bna_stats_get(&bnad->bna);
1511 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1515 * Set up timer for DIM
1516 * Called with bnad->bna_lock held
1518 void
1519 bnad_dim_timer_start(struct bnad *bnad)
1521 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1522 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1523 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1524 (unsigned long)bnad);
1525 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1526 mod_timer(&bnad->dim_timer,
1527 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1532 * Set up timer for statistics
1533 * Called with mutex_lock(&bnad->conf_mutex) held
1535 static void
1536 bnad_stats_timer_start(struct bnad *bnad)
1538 unsigned long flags;
1540 spin_lock_irqsave(&bnad->bna_lock, flags);
1541 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1542 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1543 (unsigned long)bnad);
1544 mod_timer(&bnad->stats_timer,
1545 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1547 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1551 * Stops the stats timer
1552 * Called with mutex_lock(&bnad->conf_mutex) held
1554 static void
1555 bnad_stats_timer_stop(struct bnad *bnad)
1557 int to_del = 0;
1558 unsigned long flags;
1560 spin_lock_irqsave(&bnad->bna_lock, flags);
1561 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1562 to_del = 1;
1563 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1564 if (to_del)
1565 del_timer_sync(&bnad->stats_timer);
1568 /* Utilities */
1570 static void
1571 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1573 int i = 1; /* Index 0 has broadcast address */
1574 struct netdev_hw_addr *mc_addr;
1576 netdev_for_each_mc_addr(mc_addr, netdev) {
1577 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1578 ETH_ALEN);
1579 i++;
1583 static int
1584 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1586 struct bnad_rx_ctrl *rx_ctrl =
1587 container_of(napi, struct bnad_rx_ctrl, napi);
1588 struct bna_ccb *ccb;
1589 struct bnad *bnad;
1590 int rcvd = 0;
1592 ccb = rx_ctrl->ccb;
1594 bnad = ccb->bnad;
1596 if (!netif_carrier_ok(bnad->netdev))
1597 goto poll_exit;
1599 rcvd = bnad_poll_cq(bnad, ccb, budget);
1600 if (rcvd == budget)
1601 return rcvd;
1603 poll_exit:
1604 napi_complete((napi));
1606 BNAD_UPDATE_CTR(bnad, netif_rx_complete);
1608 bnad_enable_rx_irq(bnad, ccb);
1609 return rcvd;
1612 static void
1613 bnad_napi_enable(struct bnad *bnad, u32 rx_id)
1615 struct bnad_rx_ctrl *rx_ctrl;
1616 int i;
1618 /* Initialize & enable NAPI */
1619 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1620 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1622 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1623 bnad_napi_poll_rx, 64);
1625 napi_enable(&rx_ctrl->napi);
1629 static void
1630 bnad_napi_disable(struct bnad *bnad, u32 rx_id)
1632 int i;
1634 /* First disable and then clean up */
1635 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1636 napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1637 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1641 /* Should be held with conf_lock held */
1642 void
1643 bnad_cleanup_tx(struct bnad *bnad, uint tx_id)
1645 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1646 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1647 unsigned long flags;
1649 if (!tx_info->tx)
1650 return;
1652 init_completion(&bnad->bnad_completions.tx_comp);
1653 spin_lock_irqsave(&bnad->bna_lock, flags);
1654 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1655 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1656 wait_for_completion(&bnad->bnad_completions.tx_comp);
1658 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1659 bnad_tx_msix_unregister(bnad, tx_info,
1660 bnad->num_txq_per_tx);
1662 spin_lock_irqsave(&bnad->bna_lock, flags);
1663 bna_tx_destroy(tx_info->tx);
1664 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1666 tx_info->tx = NULL;
1668 if (0 == tx_id)
1669 tasklet_kill(&bnad->tx_free_tasklet);
1671 bnad_tx_res_free(bnad, res_info);
1674 /* Should be held with conf_lock held */
1676 bnad_setup_tx(struct bnad *bnad, uint tx_id)
1678 int err;
1679 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1680 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1681 struct bna_intr_info *intr_info =
1682 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1683 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1684 struct bna_tx_event_cbfn tx_cbfn;
1685 struct bna_tx *tx;
1686 unsigned long flags;
1688 /* Initialize the Tx object configuration */
1689 tx_config->num_txq = bnad->num_txq_per_tx;
1690 tx_config->txq_depth = bnad->txq_depth;
1691 tx_config->tx_type = BNA_TX_T_REGULAR;
1693 /* Initialize the tx event handlers */
1694 tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
1695 tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
1696 tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
1697 tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
1698 tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
1700 /* Get BNA's resource requirement for one tx object */
1701 spin_lock_irqsave(&bnad->bna_lock, flags);
1702 bna_tx_res_req(bnad->num_txq_per_tx,
1703 bnad->txq_depth, res_info);
1704 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1706 /* Fill Unmap Q memory requirements */
1707 BNAD_FILL_UNMAPQ_MEM_REQ(
1708 &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1709 bnad->num_txq_per_tx,
1710 BNAD_TX_UNMAPQ_DEPTH);
1712 /* Allocate resources */
1713 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1714 if (err)
1715 return err;
1717 /* Ask BNA to create one Tx object, supplying required resources */
1718 spin_lock_irqsave(&bnad->bna_lock, flags);
1719 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1720 tx_info);
1721 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1722 if (!tx)
1723 goto err_return;
1724 tx_info->tx = tx;
1726 /* Register ISR for the Tx object */
1727 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1728 err = bnad_tx_msix_register(bnad, tx_info,
1729 tx_id, bnad->num_txq_per_tx);
1730 if (err)
1731 goto err_return;
1734 spin_lock_irqsave(&bnad->bna_lock, flags);
1735 bna_tx_enable(tx);
1736 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1738 return 0;
1740 err_return:
1741 bnad_tx_res_free(bnad, res_info);
1742 return err;
1745 /* Setup the rx config for bna_rx_create */
1746 /* bnad decides the configuration */
1747 static void
1748 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1750 rx_config->rx_type = BNA_RX_T_REGULAR;
1751 rx_config->num_paths = bnad->num_rxp_per_rx;
1753 if (bnad->num_rxp_per_rx > 1) {
1754 rx_config->rss_status = BNA_STATUS_T_ENABLED;
1755 rx_config->rss_config.hash_type =
1756 (BFI_RSS_T_V4_TCP |
1757 BFI_RSS_T_V6_TCP |
1758 BFI_RSS_T_V4_IP |
1759 BFI_RSS_T_V6_IP);
1760 rx_config->rss_config.hash_mask =
1761 bnad->num_rxp_per_rx - 1;
1762 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
1763 sizeof(rx_config->rss_config.toeplitz_hash_key));
1764 } else {
1765 rx_config->rss_status = BNA_STATUS_T_DISABLED;
1766 memset(&rx_config->rss_config, 0,
1767 sizeof(rx_config->rss_config));
1769 rx_config->rxp_type = BNA_RXP_SLR;
1770 rx_config->q_depth = bnad->rxq_depth;
1772 rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
1774 rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
1777 /* Called with mutex_lock(&bnad->conf_mutex) held */
1778 void
1779 bnad_cleanup_rx(struct bnad *bnad, uint rx_id)
1781 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1782 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1783 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1784 unsigned long flags;
1785 int dim_timer_del = 0;
1787 if (!rx_info->rx)
1788 return;
1790 if (0 == rx_id) {
1791 spin_lock_irqsave(&bnad->bna_lock, flags);
1792 dim_timer_del = bnad_dim_timer_running(bnad);
1793 if (dim_timer_del)
1794 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1795 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1796 if (dim_timer_del)
1797 del_timer_sync(&bnad->dim_timer);
1800 bnad_napi_disable(bnad, rx_id);
1802 init_completion(&bnad->bnad_completions.rx_comp);
1803 spin_lock_irqsave(&bnad->bna_lock, flags);
1804 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
1805 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1806 wait_for_completion(&bnad->bnad_completions.rx_comp);
1808 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1809 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1811 spin_lock_irqsave(&bnad->bna_lock, flags);
1812 bna_rx_destroy(rx_info->rx);
1813 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1815 rx_info->rx = NULL;
1817 bnad_rx_res_free(bnad, res_info);
1820 /* Called with mutex_lock(&bnad->conf_mutex) held */
1822 bnad_setup_rx(struct bnad *bnad, uint rx_id)
1824 int err;
1825 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1826 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1827 struct bna_intr_info *intr_info =
1828 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
1829 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1830 struct bna_rx_event_cbfn rx_cbfn;
1831 struct bna_rx *rx;
1832 unsigned long flags;
1834 /* Initialize the Rx object configuration */
1835 bnad_init_rx_config(bnad, rx_config);
1837 /* Initialize the Rx event handlers */
1838 rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
1839 rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
1840 rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
1841 rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
1842 rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
1843 rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
1845 /* Get BNA's resource requirement for one Rx object */
1846 spin_lock_irqsave(&bnad->bna_lock, flags);
1847 bna_rx_res_req(rx_config, res_info);
1848 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1850 /* Fill Unmap Q memory requirements */
1851 BNAD_FILL_UNMAPQ_MEM_REQ(
1852 &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
1853 rx_config->num_paths +
1854 ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
1855 rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
1857 /* Allocate resource */
1858 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
1859 if (err)
1860 return err;
1862 /* Ask BNA to create one Rx object, supplying required resources */
1863 spin_lock_irqsave(&bnad->bna_lock, flags);
1864 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
1865 rx_info);
1866 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1867 if (!rx)
1868 goto err_return;
1869 rx_info->rx = rx;
1871 /* Register ISR for the Rx object */
1872 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1873 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
1874 rx_config->num_paths);
1875 if (err)
1876 goto err_return;
1879 /* Enable NAPI */
1880 bnad_napi_enable(bnad, rx_id);
1882 spin_lock_irqsave(&bnad->bna_lock, flags);
1883 if (0 == rx_id) {
1884 /* Set up Dynamic Interrupt Moderation Vector */
1885 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
1886 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
1888 /* Enable VLAN filtering only on the default Rx */
1889 bna_rx_vlanfilter_enable(rx);
1891 /* Start the DIM timer */
1892 bnad_dim_timer_start(bnad);
1895 bna_rx_enable(rx);
1896 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1898 return 0;
1900 err_return:
1901 bnad_cleanup_rx(bnad, rx_id);
1902 return err;
1905 /* Called with conf_lock & bnad->bna_lock held */
1906 void
1907 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
1909 struct bnad_tx_info *tx_info;
1911 tx_info = &bnad->tx_info[0];
1912 if (!tx_info->tx)
1913 return;
1915 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
1918 /* Called with conf_lock & bnad->bna_lock held */
1919 void
1920 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
1922 struct bnad_rx_info *rx_info;
1923 int i;
1925 for (i = 0; i < bnad->num_rx; i++) {
1926 rx_info = &bnad->rx_info[i];
1927 if (!rx_info->rx)
1928 continue;
1929 bna_rx_coalescing_timeo_set(rx_info->rx,
1930 bnad->rx_coalescing_timeo);
1935 * Called with bnad->bna_lock held
1937 static int
1938 bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
1940 int ret;
1942 if (!is_valid_ether_addr(mac_addr))
1943 return -EADDRNOTAVAIL;
1945 /* If datapath is down, pretend everything went through */
1946 if (!bnad->rx_info[0].rx)
1947 return 0;
1949 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
1950 if (ret != BNA_CB_SUCCESS)
1951 return -EADDRNOTAVAIL;
1953 return 0;
1956 /* Should be called with conf_lock held */
1957 static int
1958 bnad_enable_default_bcast(struct bnad *bnad)
1960 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
1961 int ret;
1962 unsigned long flags;
1964 init_completion(&bnad->bnad_completions.mcast_comp);
1966 spin_lock_irqsave(&bnad->bna_lock, flags);
1967 ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
1968 bnad_cb_rx_mcast_add);
1969 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1971 if (ret == BNA_CB_SUCCESS)
1972 wait_for_completion(&bnad->bnad_completions.mcast_comp);
1973 else
1974 return -ENODEV;
1976 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
1977 return -ENODEV;
1979 return 0;
1982 /* Called with bnad_conf_lock() held */
1983 static void
1984 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
1986 u16 vlan_id;
1987 unsigned long flags;
1989 if (!bnad->vlan_grp)
1990 return;
1992 BUG_ON(!(VLAN_N_VID == (BFI_MAX_VLAN + 1)));
1994 for (vlan_id = 0; vlan_id < VLAN_N_VID; vlan_id++) {
1995 if (!vlan_group_get_device(bnad->vlan_grp, vlan_id))
1996 continue;
1997 spin_lock_irqsave(&bnad->bna_lock, flags);
1998 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vlan_id);
1999 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2003 /* Statistics utilities */
2004 void
2005 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2007 int i, j;
2009 for (i = 0; i < bnad->num_rx; i++) {
2010 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2011 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2012 stats->rx_packets += bnad->rx_info[i].
2013 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2014 stats->rx_bytes += bnad->rx_info[i].
2015 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2016 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2017 bnad->rx_info[i].rx_ctrl[j].ccb->
2018 rcb[1]->rxq) {
2019 stats->rx_packets +=
2020 bnad->rx_info[i].rx_ctrl[j].
2021 ccb->rcb[1]->rxq->rx_packets;
2022 stats->rx_bytes +=
2023 bnad->rx_info[i].rx_ctrl[j].
2024 ccb->rcb[1]->rxq->rx_bytes;
2029 for (i = 0; i < bnad->num_tx; i++) {
2030 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2031 if (bnad->tx_info[i].tcb[j]) {
2032 stats->tx_packets +=
2033 bnad->tx_info[i].tcb[j]->txq->tx_packets;
2034 stats->tx_bytes +=
2035 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2042 * Must be called with the bna_lock held.
2044 void
2045 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2047 struct bfi_ll_stats_mac *mac_stats;
2048 u64 bmap;
2049 int i;
2051 mac_stats = &bnad->stats.bna_stats->hw_stats->mac_stats;
2052 stats->rx_errors =
2053 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2054 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2055 mac_stats->rx_undersize;
2056 stats->tx_errors = mac_stats->tx_fcs_error +
2057 mac_stats->tx_undersize;
2058 stats->rx_dropped = mac_stats->rx_drop;
2059 stats->tx_dropped = mac_stats->tx_drop;
2060 stats->multicast = mac_stats->rx_multicast;
2061 stats->collisions = mac_stats->tx_total_collision;
2063 stats->rx_length_errors = mac_stats->rx_frame_length_error;
2065 /* receive ring buffer overflow ?? */
2067 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2068 stats->rx_frame_errors = mac_stats->rx_alignment_error;
2069 /* recv'r fifo overrun */
2070 bmap = (u64)bnad->stats.bna_stats->rxf_bmap[0] |
2071 ((u64)bnad->stats.bna_stats->rxf_bmap[1] << 32);
2072 for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
2073 if (bmap & 1) {
2074 stats->rx_fifo_errors +=
2075 bnad->stats.bna_stats->
2076 hw_stats->rxf_stats[i].frame_drops;
2077 break;
2079 bmap >>= 1;
2083 static void
2084 bnad_mbox_irq_sync(struct bnad *bnad)
2086 u32 irq;
2087 unsigned long flags;
2089 spin_lock_irqsave(&bnad->bna_lock, flags);
2090 if (bnad->cfg_flags & BNAD_CF_MSIX)
2091 irq = bnad->msix_table[bnad->msix_num - 1].vector;
2092 else
2093 irq = bnad->pcidev->irq;
2094 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2096 synchronize_irq(irq);
2099 /* Utility used by bnad_start_xmit, for doing TSO */
2100 static int
2101 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2103 int err;
2105 /* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */
2106 BUG_ON(!(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ||
2107 skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6));
2108 if (skb_header_cloned(skb)) {
2109 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2110 if (err) {
2111 BNAD_UPDATE_CTR(bnad, tso_err);
2112 return err;
2117 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2118 * excluding the length field.
2120 if (skb->protocol == htons(ETH_P_IP)) {
2121 struct iphdr *iph = ip_hdr(skb);
2123 /* Do we really need these? */
2124 iph->tot_len = 0;
2125 iph->check = 0;
2127 tcp_hdr(skb)->check =
2128 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2129 IPPROTO_TCP, 0);
2130 BNAD_UPDATE_CTR(bnad, tso4);
2131 } else {
2132 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2134 BUG_ON(!(skb->protocol == htons(ETH_P_IPV6)));
2135 ipv6h->payload_len = 0;
2136 tcp_hdr(skb)->check =
2137 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2138 IPPROTO_TCP, 0);
2139 BNAD_UPDATE_CTR(bnad, tso6);
2142 return 0;
2146 * Initialize Q numbers depending on Rx Paths
2147 * Called with bnad->bna_lock held, because of cfg_flags
2148 * access.
2150 static void
2151 bnad_q_num_init(struct bnad *bnad)
2153 int rxps;
2155 rxps = min((uint)num_online_cpus(),
2156 (uint)(BNAD_MAX_RXS * BNAD_MAX_RXPS_PER_RX));
2158 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2159 rxps = 1; /* INTx */
2161 bnad->num_rx = 1;
2162 bnad->num_tx = 1;
2163 bnad->num_rxp_per_rx = rxps;
2164 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2168 * Adjusts the Q numbers, given a number of msix vectors
2169 * Give preference to RSS as opposed to Tx priority Queues,
2170 * in such a case, just use 1 Tx Q
2171 * Called with bnad->bna_lock held b'cos of cfg_flags access
2173 static void
2174 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors)
2176 bnad->num_txq_per_tx = 1;
2177 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2178 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2179 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2180 bnad->num_rxp_per_rx = msix_vectors -
2181 (bnad->num_tx * bnad->num_txq_per_tx) -
2182 BNAD_MAILBOX_MSIX_VECTORS;
2183 } else
2184 bnad->num_rxp_per_rx = 1;
2187 /* Enable / disable device */
2188 static void
2189 bnad_device_disable(struct bnad *bnad)
2191 unsigned long flags;
2193 init_completion(&bnad->bnad_completions.ioc_comp);
2195 spin_lock_irqsave(&bnad->bna_lock, flags);
2196 bna_device_disable(&bnad->bna.device, BNA_HARD_CLEANUP);
2197 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2199 wait_for_completion(&bnad->bnad_completions.ioc_comp);
2202 static int
2203 bnad_device_enable(struct bnad *bnad)
2205 int err = 0;
2206 unsigned long flags;
2208 init_completion(&bnad->bnad_completions.ioc_comp);
2210 spin_lock_irqsave(&bnad->bna_lock, flags);
2211 bna_device_enable(&bnad->bna.device);
2212 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2214 wait_for_completion(&bnad->bnad_completions.ioc_comp);
2216 if (bnad->bnad_completions.ioc_comp_status)
2217 err = bnad->bnad_completions.ioc_comp_status;
2219 return err;
2222 /* Free BNA resources */
2223 static void
2224 bnad_res_free(struct bnad *bnad)
2226 int i;
2227 struct bna_res_info *res_info = &bnad->res_info[0];
2229 for (i = 0; i < BNA_RES_T_MAX; i++) {
2230 if (res_info[i].res_type == BNA_RES_T_MEM)
2231 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2232 else
2233 bnad_mbox_irq_free(bnad, &res_info[i].res_u.intr_info);
2237 /* Allocates memory and interrupt resources for BNA */
2238 static int
2239 bnad_res_alloc(struct bnad *bnad)
2241 int i, err;
2242 struct bna_res_info *res_info = &bnad->res_info[0];
2244 for (i = 0; i < BNA_RES_T_MAX; i++) {
2245 if (res_info[i].res_type == BNA_RES_T_MEM)
2246 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2247 else
2248 err = bnad_mbox_irq_alloc(bnad,
2249 &res_info[i].res_u.intr_info);
2250 if (err)
2251 goto err_return;
2253 return 0;
2255 err_return:
2256 bnad_res_free(bnad);
2257 return err;
2260 /* Interrupt enable / disable */
2261 static void
2262 bnad_enable_msix(struct bnad *bnad)
2264 int i, ret;
2265 unsigned long flags;
2267 spin_lock_irqsave(&bnad->bna_lock, flags);
2268 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2269 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2270 return;
2272 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2274 if (bnad->msix_table)
2275 return;
2277 bnad->msix_table =
2278 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2280 if (!bnad->msix_table)
2281 goto intx_mode;
2283 for (i = 0; i < bnad->msix_num; i++)
2284 bnad->msix_table[i].entry = i;
2286 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
2287 if (ret > 0) {
2288 /* Not enough MSI-X vectors. */
2290 spin_lock_irqsave(&bnad->bna_lock, flags);
2291 /* ret = #of vectors that we got */
2292 bnad_q_num_adjust(bnad, ret);
2293 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2295 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx)
2296 + (bnad->num_rx
2297 * bnad->num_rxp_per_rx) +
2298 BNAD_MAILBOX_MSIX_VECTORS;
2300 /* Try once more with adjusted numbers */
2301 /* If this fails, fall back to INTx */
2302 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
2303 bnad->msix_num);
2304 if (ret)
2305 goto intx_mode;
2307 } else if (ret < 0)
2308 goto intx_mode;
2309 return;
2311 intx_mode:
2313 kfree(bnad->msix_table);
2314 bnad->msix_table = NULL;
2315 bnad->msix_num = 0;
2316 spin_lock_irqsave(&bnad->bna_lock, flags);
2317 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2318 bnad_q_num_init(bnad);
2319 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2322 static void
2323 bnad_disable_msix(struct bnad *bnad)
2325 u32 cfg_flags;
2326 unsigned long flags;
2328 spin_lock_irqsave(&bnad->bna_lock, flags);
2329 cfg_flags = bnad->cfg_flags;
2330 if (bnad->cfg_flags & BNAD_CF_MSIX)
2331 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2332 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2334 if (cfg_flags & BNAD_CF_MSIX) {
2335 pci_disable_msix(bnad->pcidev);
2336 kfree(bnad->msix_table);
2337 bnad->msix_table = NULL;
2341 /* Netdev entry points */
2342 static int
2343 bnad_open(struct net_device *netdev)
2345 int err;
2346 struct bnad *bnad = netdev_priv(netdev);
2347 struct bna_pause_config pause_config;
2348 int mtu;
2349 unsigned long flags;
2351 mutex_lock(&bnad->conf_mutex);
2353 /* Tx */
2354 err = bnad_setup_tx(bnad, 0);
2355 if (err)
2356 goto err_return;
2358 /* Rx */
2359 err = bnad_setup_rx(bnad, 0);
2360 if (err)
2361 goto cleanup_tx;
2363 /* Port */
2364 pause_config.tx_pause = 0;
2365 pause_config.rx_pause = 0;
2367 mtu = ETH_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
2369 spin_lock_irqsave(&bnad->bna_lock, flags);
2370 bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
2371 bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
2372 bna_port_enable(&bnad->bna.port);
2373 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2375 /* Enable broadcast */
2376 bnad_enable_default_bcast(bnad);
2378 /* Restore VLANs, if any */
2379 bnad_restore_vlans(bnad, 0);
2381 /* Set the UCAST address */
2382 spin_lock_irqsave(&bnad->bna_lock, flags);
2383 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2384 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2386 /* Start the stats timer */
2387 bnad_stats_timer_start(bnad);
2389 mutex_unlock(&bnad->conf_mutex);
2391 return 0;
2393 cleanup_tx:
2394 bnad_cleanup_tx(bnad, 0);
2396 err_return:
2397 mutex_unlock(&bnad->conf_mutex);
2398 return err;
2401 static int
2402 bnad_stop(struct net_device *netdev)
2404 struct bnad *bnad = netdev_priv(netdev);
2405 unsigned long flags;
2407 mutex_lock(&bnad->conf_mutex);
2409 /* Stop the stats timer */
2410 bnad_stats_timer_stop(bnad);
2412 init_completion(&bnad->bnad_completions.port_comp);
2414 spin_lock_irqsave(&bnad->bna_lock, flags);
2415 bna_port_disable(&bnad->bna.port, BNA_HARD_CLEANUP,
2416 bnad_cb_port_disabled);
2417 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2419 wait_for_completion(&bnad->bnad_completions.port_comp);
2421 bnad_cleanup_tx(bnad, 0);
2422 bnad_cleanup_rx(bnad, 0);
2424 /* Synchronize mailbox IRQ */
2425 bnad_mbox_irq_sync(bnad);
2427 mutex_unlock(&bnad->conf_mutex);
2429 return 0;
2432 /* TX */
2434 * bnad_start_xmit : Netdev entry point for Transmit
2435 * Called under lock held by net_device
2437 static netdev_tx_t
2438 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2440 struct bnad *bnad = netdev_priv(netdev);
2442 u16 txq_prod, vlan_tag = 0;
2443 u32 unmap_prod, wis, wis_used, wi_range;
2444 u32 vectors, vect_id, i, acked;
2445 u32 tx_id;
2446 int err;
2448 struct bnad_tx_info *tx_info;
2449 struct bna_tcb *tcb;
2450 struct bnad_unmap_q *unmap_q;
2451 dma_addr_t dma_addr;
2452 struct bna_txq_entry *txqent;
2453 bna_txq_wi_ctrl_flag_t flags;
2455 if (unlikely
2456 (skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) {
2457 dev_kfree_skb(skb);
2458 return NETDEV_TX_OK;
2461 tx_id = 0;
2463 tx_info = &bnad->tx_info[tx_id];
2464 tcb = tx_info->tcb[tx_id];
2465 unmap_q = tcb->unmap_q;
2468 * Takes care of the Tx that is scheduled between clearing the flag
2469 * and the netif_stop_queue() call.
2471 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2472 dev_kfree_skb(skb);
2473 return NETDEV_TX_OK;
2476 vectors = 1 + skb_shinfo(skb)->nr_frags;
2477 if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) {
2478 dev_kfree_skb(skb);
2479 return NETDEV_TX_OK;
2481 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2482 acked = 0;
2483 if (unlikely
2484 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2485 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2486 if ((u16) (*tcb->hw_consumer_index) !=
2487 tcb->consumer_index &&
2488 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2489 acked = bnad_free_txbufs(bnad, tcb);
2490 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2491 bna_ib_ack(tcb->i_dbell, acked);
2492 smp_mb__before_clear_bit();
2493 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2494 } else {
2495 netif_stop_queue(netdev);
2496 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2499 smp_mb();
2501 * Check again to deal with race condition between
2502 * netif_stop_queue here, and netif_wake_queue in
2503 * interrupt handler which is not inside netif tx lock.
2505 if (likely
2506 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2507 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2508 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2509 return NETDEV_TX_BUSY;
2510 } else {
2511 netif_wake_queue(netdev);
2512 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2516 unmap_prod = unmap_q->producer_index;
2517 wis_used = 1;
2518 vect_id = 0;
2519 flags = 0;
2521 txq_prod = tcb->producer_index;
2522 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
2523 BUG_ON(!(wi_range <= tcb->q_depth));
2524 txqent->hdr.wi.reserved = 0;
2525 txqent->hdr.wi.num_vectors = vectors;
2526 txqent->hdr.wi.opcode =
2527 htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
2528 BNA_TXQ_WI_SEND));
2530 if (vlan_tx_tag_present(skb)) {
2531 vlan_tag = (u16) vlan_tx_tag_get(skb);
2532 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2534 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2535 vlan_tag =
2536 (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
2537 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2540 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2542 if (skb_is_gso(skb)) {
2543 err = bnad_tso_prepare(bnad, skb);
2544 if (err) {
2545 dev_kfree_skb(skb);
2546 return NETDEV_TX_OK;
2548 txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
2549 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2550 txqent->hdr.wi.l4_hdr_size_n_offset =
2551 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2552 (tcp_hdrlen(skb) >> 2,
2553 skb_transport_offset(skb)));
2554 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2555 u8 proto = 0;
2557 txqent->hdr.wi.lso_mss = 0;
2559 if (skb->protocol == htons(ETH_P_IP))
2560 proto = ip_hdr(skb)->protocol;
2561 else if (skb->protocol == htons(ETH_P_IPV6)) {
2562 /* nexthdr may not be TCP immediately. */
2563 proto = ipv6_hdr(skb)->nexthdr;
2565 if (proto == IPPROTO_TCP) {
2566 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2567 txqent->hdr.wi.l4_hdr_size_n_offset =
2568 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2569 (0, skb_transport_offset(skb)));
2571 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2573 BUG_ON(!(skb_headlen(skb) >=
2574 skb_transport_offset(skb) + tcp_hdrlen(skb)));
2576 } else if (proto == IPPROTO_UDP) {
2577 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2578 txqent->hdr.wi.l4_hdr_size_n_offset =
2579 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2580 (0, skb_transport_offset(skb)));
2582 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2584 BUG_ON(!(skb_headlen(skb) >=
2585 skb_transport_offset(skb) +
2586 sizeof(struct udphdr)));
2587 } else {
2588 err = skb_checksum_help(skb);
2589 BNAD_UPDATE_CTR(bnad, csum_help);
2590 if (err) {
2591 dev_kfree_skb(skb);
2592 BNAD_UPDATE_CTR(bnad, csum_help_err);
2593 return NETDEV_TX_OK;
2596 } else {
2597 txqent->hdr.wi.lso_mss = 0;
2598 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2601 txqent->hdr.wi.flags = htons(flags);
2603 txqent->hdr.wi.frame_length = htonl(skb->len);
2605 unmap_q->unmap_array[unmap_prod].skb = skb;
2606 BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
2607 txqent->vector[vect_id].length = htons(skb_headlen(skb));
2608 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
2609 skb_headlen(skb), DMA_TO_DEVICE);
2610 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2611 dma_addr);
2613 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2614 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2616 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2617 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
2618 u32 size = frag->size;
2620 if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2621 vect_id = 0;
2622 if (--wi_range)
2623 txqent++;
2624 else {
2625 BNA_QE_INDX_ADD(txq_prod, wis_used,
2626 tcb->q_depth);
2627 wis_used = 0;
2628 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
2629 txqent, wi_range);
2630 BUG_ON(!(wi_range <= tcb->q_depth));
2632 wis_used++;
2633 txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
2636 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2637 txqent->vector[vect_id].length = htons(size);
2638 dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page,
2639 frag->page_offset, size, DMA_TO_DEVICE);
2640 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2641 dma_addr);
2642 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2643 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2646 unmap_q->producer_index = unmap_prod;
2647 BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
2648 tcb->producer_index = txq_prod;
2650 smp_mb();
2652 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2653 return NETDEV_TX_OK;
2655 bna_txq_prod_indx_doorbell(tcb);
2657 if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
2658 tasklet_schedule(&bnad->tx_free_tasklet);
2660 return NETDEV_TX_OK;
2664 * Used spin_lock to synchronize reading of stats structures, which
2665 * is written by BNA under the same lock.
2667 static struct rtnl_link_stats64 *
2668 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
2670 struct bnad *bnad = netdev_priv(netdev);
2671 unsigned long flags;
2673 spin_lock_irqsave(&bnad->bna_lock, flags);
2675 bnad_netdev_qstats_fill(bnad, stats);
2676 bnad_netdev_hwstats_fill(bnad, stats);
2678 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2680 return stats;
2683 static void
2684 bnad_set_rx_mode(struct net_device *netdev)
2686 struct bnad *bnad = netdev_priv(netdev);
2687 u32 new_mask, valid_mask;
2688 unsigned long flags;
2690 spin_lock_irqsave(&bnad->bna_lock, flags);
2692 new_mask = valid_mask = 0;
2694 if (netdev->flags & IFF_PROMISC) {
2695 if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
2696 new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2697 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2698 bnad->cfg_flags |= BNAD_CF_PROMISC;
2700 } else {
2701 if (bnad->cfg_flags & BNAD_CF_PROMISC) {
2702 new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
2703 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2704 bnad->cfg_flags &= ~BNAD_CF_PROMISC;
2708 if (netdev->flags & IFF_ALLMULTI) {
2709 if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
2710 new_mask |= BNA_RXMODE_ALLMULTI;
2711 valid_mask |= BNA_RXMODE_ALLMULTI;
2712 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
2714 } else {
2715 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
2716 new_mask &= ~BNA_RXMODE_ALLMULTI;
2717 valid_mask |= BNA_RXMODE_ALLMULTI;
2718 bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
2722 bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
2724 if (!netdev_mc_empty(netdev)) {
2725 u8 *mcaddr_list;
2726 int mc_count = netdev_mc_count(netdev);
2728 /* Index 0 holds the broadcast address */
2729 mcaddr_list =
2730 kzalloc((mc_count + 1) * ETH_ALEN,
2731 GFP_ATOMIC);
2732 if (!mcaddr_list)
2733 goto unlock;
2735 memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
2737 /* Copy rest of the MC addresses */
2738 bnad_netdev_mc_list_get(netdev, mcaddr_list);
2740 bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
2741 mcaddr_list, NULL);
2743 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2744 kfree(mcaddr_list);
2746 unlock:
2747 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2751 * bna_lock is used to sync writes to netdev->addr
2752 * conf_lock cannot be used since this call may be made
2753 * in a non-blocking context.
2755 static int
2756 bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
2758 int err;
2759 struct bnad *bnad = netdev_priv(netdev);
2760 struct sockaddr *sa = (struct sockaddr *)mac_addr;
2761 unsigned long flags;
2763 spin_lock_irqsave(&bnad->bna_lock, flags);
2765 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
2767 if (!err)
2768 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
2770 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2772 return err;
2775 static int
2776 bnad_change_mtu(struct net_device *netdev, int new_mtu)
2778 int mtu, err = 0;
2779 unsigned long flags;
2781 struct bnad *bnad = netdev_priv(netdev);
2783 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
2784 return -EINVAL;
2786 mutex_lock(&bnad->conf_mutex);
2788 netdev->mtu = new_mtu;
2790 mtu = ETH_HLEN + new_mtu + ETH_FCS_LEN;
2792 spin_lock_irqsave(&bnad->bna_lock, flags);
2793 bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
2794 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2796 mutex_unlock(&bnad->conf_mutex);
2797 return err;
2800 static void
2801 bnad_vlan_rx_register(struct net_device *netdev,
2802 struct vlan_group *vlan_grp)
2804 struct bnad *bnad = netdev_priv(netdev);
2806 mutex_lock(&bnad->conf_mutex);
2807 bnad->vlan_grp = vlan_grp;
2808 mutex_unlock(&bnad->conf_mutex);
2811 static void
2812 bnad_vlan_rx_add_vid(struct net_device *netdev,
2813 unsigned short vid)
2815 struct bnad *bnad = netdev_priv(netdev);
2816 unsigned long flags;
2818 if (!bnad->rx_info[0].rx)
2819 return;
2821 mutex_lock(&bnad->conf_mutex);
2823 spin_lock_irqsave(&bnad->bna_lock, flags);
2824 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
2825 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2827 mutex_unlock(&bnad->conf_mutex);
2830 static void
2831 bnad_vlan_rx_kill_vid(struct net_device *netdev,
2832 unsigned short vid)
2834 struct bnad *bnad = netdev_priv(netdev);
2835 unsigned long flags;
2837 if (!bnad->rx_info[0].rx)
2838 return;
2840 mutex_lock(&bnad->conf_mutex);
2842 spin_lock_irqsave(&bnad->bna_lock, flags);
2843 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
2844 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2846 mutex_unlock(&bnad->conf_mutex);
2849 #ifdef CONFIG_NET_POLL_CONTROLLER
2850 static void
2851 bnad_netpoll(struct net_device *netdev)
2853 struct bnad *bnad = netdev_priv(netdev);
2854 struct bnad_rx_info *rx_info;
2855 struct bnad_rx_ctrl *rx_ctrl;
2856 u32 curr_mask;
2857 int i, j;
2859 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2860 bna_intx_disable(&bnad->bna, curr_mask);
2861 bnad_isr(bnad->pcidev->irq, netdev);
2862 bna_intx_enable(&bnad->bna, curr_mask);
2863 } else {
2864 for (i = 0; i < bnad->num_rx; i++) {
2865 rx_info = &bnad->rx_info[i];
2866 if (!rx_info->rx)
2867 continue;
2868 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2869 rx_ctrl = &rx_info->rx_ctrl[j];
2870 if (rx_ctrl->ccb) {
2871 bnad_disable_rx_irq(bnad,
2872 rx_ctrl->ccb);
2873 bnad_netif_rx_schedule_poll(bnad,
2874 rx_ctrl->ccb);
2880 #endif
2882 static const struct net_device_ops bnad_netdev_ops = {
2883 .ndo_open = bnad_open,
2884 .ndo_stop = bnad_stop,
2885 .ndo_start_xmit = bnad_start_xmit,
2886 .ndo_get_stats64 = bnad_get_stats64,
2887 .ndo_set_rx_mode = bnad_set_rx_mode,
2888 .ndo_set_multicast_list = bnad_set_rx_mode,
2889 .ndo_validate_addr = eth_validate_addr,
2890 .ndo_set_mac_address = bnad_set_mac_address,
2891 .ndo_change_mtu = bnad_change_mtu,
2892 .ndo_vlan_rx_register = bnad_vlan_rx_register,
2893 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
2894 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
2895 #ifdef CONFIG_NET_POLL_CONTROLLER
2896 .ndo_poll_controller = bnad_netpoll
2897 #endif
2900 static void
2901 bnad_netdev_init(struct bnad *bnad, bool using_dac)
2903 struct net_device *netdev = bnad->netdev;
2905 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
2906 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2907 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX;
2909 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
2910 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2911 NETIF_F_TSO | NETIF_F_TSO6;
2913 netdev->features |= netdev->hw_features |
2914 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2916 if (using_dac)
2917 netdev->features |= NETIF_F_HIGHDMA;
2919 netdev->mem_start = bnad->mmio_start;
2920 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
2922 netdev->netdev_ops = &bnad_netdev_ops;
2923 bnad_set_ethtool_ops(netdev);
2927 * 1. Initialize the bnad structure
2928 * 2. Setup netdev pointer in pci_dev
2929 * 3. Initialze Tx free tasklet
2930 * 4. Initialize no. of TxQ & CQs & MSIX vectors
2932 static int
2933 bnad_init(struct bnad *bnad,
2934 struct pci_dev *pdev, struct net_device *netdev)
2936 unsigned long flags;
2938 SET_NETDEV_DEV(netdev, &pdev->dev);
2939 pci_set_drvdata(pdev, netdev);
2941 bnad->netdev = netdev;
2942 bnad->pcidev = pdev;
2943 bnad->mmio_start = pci_resource_start(pdev, 0);
2944 bnad->mmio_len = pci_resource_len(pdev, 0);
2945 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
2946 if (!bnad->bar0) {
2947 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
2948 pci_set_drvdata(pdev, NULL);
2949 return -ENOMEM;
2951 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
2952 (unsigned long long) bnad->mmio_len);
2954 spin_lock_irqsave(&bnad->bna_lock, flags);
2955 if (!bnad_msix_disable)
2956 bnad->cfg_flags = BNAD_CF_MSIX;
2958 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
2960 bnad_q_num_init(bnad);
2961 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2963 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
2964 (bnad->num_rx * bnad->num_rxp_per_rx) +
2965 BNAD_MAILBOX_MSIX_VECTORS;
2967 bnad->txq_depth = BNAD_TXQ_DEPTH;
2968 bnad->rxq_depth = BNAD_RXQ_DEPTH;
2970 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
2971 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
2973 tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
2974 (unsigned long)bnad);
2976 return 0;
2980 * Must be called after bnad_pci_uninit()
2981 * so that iounmap() and pci_set_drvdata(NULL)
2982 * happens only after PCI uninitialization.
2984 static void
2985 bnad_uninit(struct bnad *bnad)
2987 if (bnad->bar0)
2988 iounmap(bnad->bar0);
2989 pci_set_drvdata(bnad->pcidev, NULL);
2993 * Initialize locks
2994 a) Per device mutes used for serializing configuration
2995 changes from OS interface
2996 b) spin lock used to protect bna state machine
2998 static void
2999 bnad_lock_init(struct bnad *bnad)
3001 spin_lock_init(&bnad->bna_lock);
3002 mutex_init(&bnad->conf_mutex);
3005 static void
3006 bnad_lock_uninit(struct bnad *bnad)
3008 mutex_destroy(&bnad->conf_mutex);
3011 /* PCI Initialization */
3012 static int
3013 bnad_pci_init(struct bnad *bnad,
3014 struct pci_dev *pdev, bool *using_dac)
3016 int err;
3018 err = pci_enable_device(pdev);
3019 if (err)
3020 return err;
3021 err = pci_request_regions(pdev, BNAD_NAME);
3022 if (err)
3023 goto disable_device;
3024 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3025 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3026 *using_dac = 1;
3027 } else {
3028 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3029 if (err) {
3030 err = dma_set_coherent_mask(&pdev->dev,
3031 DMA_BIT_MASK(32));
3032 if (err)
3033 goto release_regions;
3035 *using_dac = 0;
3037 pci_set_master(pdev);
3038 return 0;
3040 release_regions:
3041 pci_release_regions(pdev);
3042 disable_device:
3043 pci_disable_device(pdev);
3045 return err;
3048 static void
3049 bnad_pci_uninit(struct pci_dev *pdev)
3051 pci_release_regions(pdev);
3052 pci_disable_device(pdev);
3055 static int __devinit
3056 bnad_pci_probe(struct pci_dev *pdev,
3057 const struct pci_device_id *pcidev_id)
3059 bool using_dac = false;
3060 int err;
3061 struct bnad *bnad;
3062 struct bna *bna;
3063 struct net_device *netdev;
3064 struct bfa_pcidev pcidev_info;
3065 unsigned long flags;
3067 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3068 pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3070 mutex_lock(&bnad_fwimg_mutex);
3071 if (!cna_get_firmware_buf(pdev)) {
3072 mutex_unlock(&bnad_fwimg_mutex);
3073 pr_warn("Failed to load Firmware Image!\n");
3074 return -ENODEV;
3076 mutex_unlock(&bnad_fwimg_mutex);
3079 * Allocates sizeof(struct net_device + struct bnad)
3080 * bnad = netdev->priv
3082 netdev = alloc_etherdev(sizeof(struct bnad));
3083 if (!netdev) {
3084 dev_err(&pdev->dev, "alloc_etherdev failed\n");
3085 err = -ENOMEM;
3086 return err;
3088 bnad = netdev_priv(netdev);
3091 * PCI initialization
3092 * Output : using_dac = 1 for 64 bit DMA
3093 * = 0 for 32 bit DMA
3095 err = bnad_pci_init(bnad, pdev, &using_dac);
3096 if (err)
3097 goto free_netdev;
3099 bnad_lock_init(bnad);
3101 * Initialize bnad structure
3102 * Setup relation between pci_dev & netdev
3103 * Init Tx free tasklet
3105 err = bnad_init(bnad, pdev, netdev);
3106 if (err)
3107 goto pci_uninit;
3108 /* Initialize netdev structure, set up ethtool ops */
3109 bnad_netdev_init(bnad, using_dac);
3111 /* Set link to down state */
3112 netif_carrier_off(netdev);
3114 bnad_enable_msix(bnad);
3116 /* Get resource requirement form bna */
3117 bna_res_req(&bnad->res_info[0]);
3119 /* Allocate resources from bna */
3120 err = bnad_res_alloc(bnad);
3121 if (err)
3122 goto free_netdev;
3124 bna = &bnad->bna;
3126 /* Setup pcidev_info for bna_init() */
3127 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3128 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3129 pcidev_info.device_id = bnad->pcidev->device;
3130 pcidev_info.pci_bar_kva = bnad->bar0;
3132 mutex_lock(&bnad->conf_mutex);
3134 spin_lock_irqsave(&bnad->bna_lock, flags);
3135 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3136 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3138 bnad->stats.bna_stats = &bna->stats;
3140 /* Set up timers */
3141 setup_timer(&bnad->bna.device.ioc.ioc_timer, bnad_ioc_timeout,
3142 ((unsigned long)bnad));
3143 setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check,
3144 ((unsigned long)bnad));
3145 setup_timer(&bnad->bna.device.ioc.iocpf_timer, bnad_iocpf_timeout,
3146 ((unsigned long)bnad));
3147 setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_iocpf_sem_timeout,
3148 ((unsigned long)bnad));
3150 /* Now start the timer before calling IOC */
3151 mod_timer(&bnad->bna.device.ioc.iocpf_timer,
3152 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3155 * Start the chip
3156 * Don't care even if err != 0, bna state machine will
3157 * deal with it
3159 err = bnad_device_enable(bnad);
3161 /* Get the burnt-in mac */
3162 spin_lock_irqsave(&bnad->bna_lock, flags);
3163 bna_port_mac_get(&bna->port, &bnad->perm_addr);
3164 bnad_set_netdev_perm_addr(bnad);
3165 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3167 mutex_unlock(&bnad->conf_mutex);
3169 /* Finally, reguister with net_device layer */
3170 err = register_netdev(netdev);
3171 if (err) {
3172 pr_err("BNA : Registering with netdev failed\n");
3173 goto disable_device;
3176 return 0;
3178 disable_device:
3179 mutex_lock(&bnad->conf_mutex);
3180 bnad_device_disable(bnad);
3181 del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
3182 del_timer_sync(&bnad->bna.device.ioc.sem_timer);
3183 del_timer_sync(&bnad->bna.device.ioc.hb_timer);
3184 spin_lock_irqsave(&bnad->bna_lock, flags);
3185 bna_uninit(bna);
3186 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3187 mutex_unlock(&bnad->conf_mutex);
3189 bnad_res_free(bnad);
3190 bnad_disable_msix(bnad);
3191 pci_uninit:
3192 bnad_pci_uninit(pdev);
3193 bnad_lock_uninit(bnad);
3194 bnad_uninit(bnad);
3195 free_netdev:
3196 free_netdev(netdev);
3197 return err;
3200 static void __devexit
3201 bnad_pci_remove(struct pci_dev *pdev)
3203 struct net_device *netdev = pci_get_drvdata(pdev);
3204 struct bnad *bnad;
3205 struct bna *bna;
3206 unsigned long flags;
3208 if (!netdev)
3209 return;
3211 pr_info("%s bnad_pci_remove\n", netdev->name);
3212 bnad = netdev_priv(netdev);
3213 bna = &bnad->bna;
3215 unregister_netdev(netdev);
3217 mutex_lock(&bnad->conf_mutex);
3218 bnad_device_disable(bnad);
3219 del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
3220 del_timer_sync(&bnad->bna.device.ioc.sem_timer);
3221 del_timer_sync(&bnad->bna.device.ioc.hb_timer);
3222 spin_lock_irqsave(&bnad->bna_lock, flags);
3223 bna_uninit(bna);
3224 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3225 mutex_unlock(&bnad->conf_mutex);
3227 bnad_res_free(bnad);
3228 bnad_disable_msix(bnad);
3229 bnad_pci_uninit(pdev);
3230 bnad_lock_uninit(bnad);
3231 bnad_uninit(bnad);
3232 free_netdev(netdev);
3235 static const struct pci_device_id bnad_pci_id_table[] = {
3237 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3238 PCI_DEVICE_ID_BROCADE_CT),
3239 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3240 .class_mask = 0xffff00
3241 }, {0, }
3244 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3246 static struct pci_driver bnad_pci_driver = {
3247 .name = BNAD_NAME,
3248 .id_table = bnad_pci_id_table,
3249 .probe = bnad_pci_probe,
3250 .remove = __devexit_p(bnad_pci_remove),
3253 static int __init
3254 bnad_module_init(void)
3256 int err;
3258 pr_info("Brocade 10G Ethernet driver\n");
3260 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3262 err = pci_register_driver(&bnad_pci_driver);
3263 if (err < 0) {
3264 pr_err("bna : PCI registration failed in module init "
3265 "(%d)\n", err);
3266 return err;
3269 return 0;
3272 static void __exit
3273 bnad_module_exit(void)
3275 pci_unregister_driver(&bnad_pci_driver);
3277 if (bfi_fw)
3278 release_firmware(bfi_fw);
3281 module_init(bnad_module_init);
3282 module_exit(bnad_module_exit);
3284 MODULE_AUTHOR("Brocade");
3285 MODULE_LICENSE("GPL");
3286 MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3287 MODULE_VERSION(BNAD_VERSION);
3288 MODULE_FIRMWARE(CNA_FW_FILE_CT);