PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / net / ethernet / brocade / bna / bnad.c
blobcf64f3d0b60d91a1de68836523c6304086dc9ece
1 /*
2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
18 #include <linux/bitops.h>
19 #include <linux/netdevice.h>
20 #include <linux/skbuff.h>
21 #include <linux/etherdevice.h>
22 #include <linux/in.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_vlan.h>
25 #include <linux/if_ether.h>
26 #include <linux/ip.h>
27 #include <linux/prefetch.h>
28 #include <linux/module.h>
30 #include "bnad.h"
31 #include "bna.h"
32 #include "cna.h"
34 static DEFINE_MUTEX(bnad_fwimg_mutex);
37 * Module params
39 static uint bnad_msix_disable;
40 module_param(bnad_msix_disable, uint, 0444);
41 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
43 static uint bnad_ioc_auto_recover = 1;
44 module_param(bnad_ioc_auto_recover, uint, 0444);
45 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
47 static uint bna_debugfs_enable = 1;
48 module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR);
49 MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
50 " Range[false:0|true:1]");
53 * Global variables
55 static u32 bnad_rxqs_per_cq = 2;
56 static u32 bna_id;
57 static struct mutex bnad_list_mutex;
58 static LIST_HEAD(bnad_list);
59 static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
62 * Local MACROS
64 #define BNAD_GET_MBOX_IRQ(_bnad) \
65 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
66 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
67 ((_bnad)->pcidev->irq))
69 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size) \
70 do { \
71 (_res_info)->res_type = BNA_RES_T_MEM; \
72 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
73 (_res_info)->res_u.mem_info.num = (_num); \
74 (_res_info)->res_u.mem_info.len = (_size); \
75 } while (0)
77 static void
78 bnad_add_to_list(struct bnad *bnad)
80 mutex_lock(&bnad_list_mutex);
81 list_add_tail(&bnad->list_entry, &bnad_list);
82 bnad->id = bna_id++;
83 mutex_unlock(&bnad_list_mutex);
86 static void
87 bnad_remove_from_list(struct bnad *bnad)
89 mutex_lock(&bnad_list_mutex);
90 list_del(&bnad->list_entry);
91 mutex_unlock(&bnad_list_mutex);
95 * Reinitialize completions in CQ, once Rx is taken down
97 static void
98 bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
100 struct bna_cq_entry *cmpl;
101 int i;
103 for (i = 0; i < ccb->q_depth; i++) {
104 cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
105 cmpl->valid = 0;
109 /* Tx Datapath functions */
112 /* Caller should ensure that the entry at unmap_q[index] is valid */
113 static u32
114 bnad_tx_buff_unmap(struct bnad *bnad,
115 struct bnad_tx_unmap *unmap_q,
116 u32 q_depth, u32 index)
118 struct bnad_tx_unmap *unmap;
119 struct sk_buff *skb;
120 int vector, nvecs;
122 unmap = &unmap_q[index];
123 nvecs = unmap->nvecs;
125 skb = unmap->skb;
126 unmap->skb = NULL;
127 unmap->nvecs = 0;
128 dma_unmap_single(&bnad->pcidev->dev,
129 dma_unmap_addr(&unmap->vectors[0], dma_addr),
130 skb_headlen(skb), DMA_TO_DEVICE);
131 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
132 nvecs--;
134 vector = 0;
135 while (nvecs) {
136 vector++;
137 if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
138 vector = 0;
139 BNA_QE_INDX_INC(index, q_depth);
140 unmap = &unmap_q[index];
143 dma_unmap_page(&bnad->pcidev->dev,
144 dma_unmap_addr(&unmap->vectors[vector], dma_addr),
145 dma_unmap_len(&unmap->vectors[vector], dma_len),
146 DMA_TO_DEVICE);
147 dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
148 nvecs--;
151 BNA_QE_INDX_INC(index, q_depth);
153 return index;
157 * Frees all pending Tx Bufs
158 * At this point no activity is expected on the Q,
159 * so DMA unmap & freeing is fine.
161 static void
162 bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
164 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
165 struct sk_buff *skb;
166 int i;
168 for (i = 0; i < tcb->q_depth; i++) {
169 skb = unmap_q[i].skb;
170 if (!skb)
171 continue;
172 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
174 dev_kfree_skb_any(skb);
179 * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
180 * Can be called in a) Interrupt context
181 * b) Sending context
183 static u32
184 bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
186 u32 sent_packets = 0, sent_bytes = 0;
187 u32 wis, unmap_wis, hw_cons, cons, q_depth;
188 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
189 struct bnad_tx_unmap *unmap;
190 struct sk_buff *skb;
192 /* Just return if TX is stopped */
193 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
194 return 0;
196 hw_cons = *(tcb->hw_consumer_index);
197 cons = tcb->consumer_index;
198 q_depth = tcb->q_depth;
200 wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
201 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
203 while (wis) {
204 unmap = &unmap_q[cons];
206 skb = unmap->skb;
208 sent_packets++;
209 sent_bytes += skb->len;
211 unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
212 wis -= unmap_wis;
214 cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
215 dev_kfree_skb_any(skb);
218 /* Update consumer pointers. */
219 tcb->consumer_index = hw_cons;
221 tcb->txq->tx_packets += sent_packets;
222 tcb->txq->tx_bytes += sent_bytes;
224 return sent_packets;
227 static u32
228 bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
230 struct net_device *netdev = bnad->netdev;
231 u32 sent = 0;
233 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
234 return 0;
236 sent = bnad_txcmpl_process(bnad, tcb);
237 if (sent) {
238 if (netif_queue_stopped(netdev) &&
239 netif_carrier_ok(netdev) &&
240 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
241 BNAD_NETIF_WAKE_THRESHOLD) {
242 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
243 netif_wake_queue(netdev);
244 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
249 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
250 bna_ib_ack(tcb->i_dbell, sent);
252 smp_mb__before_clear_bit();
253 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
255 return sent;
258 /* MSIX Tx Completion Handler */
259 static irqreturn_t
260 bnad_msix_tx(int irq, void *data)
262 struct bna_tcb *tcb = (struct bna_tcb *)data;
263 struct bnad *bnad = tcb->bnad;
265 bnad_tx_complete(bnad, tcb);
267 return IRQ_HANDLED;
270 static inline void
271 bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
273 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
275 unmap_q->reuse_pi = -1;
276 unmap_q->alloc_order = -1;
277 unmap_q->map_size = 0;
278 unmap_q->type = BNAD_RXBUF_NONE;
281 /* Default is page-based allocation. Multi-buffer support - TBD */
282 static int
283 bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
285 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
286 int order;
288 bnad_rxq_alloc_uninit(bnad, rcb);
290 order = get_order(rcb->rxq->buffer_size);
292 unmap_q->type = BNAD_RXBUF_PAGE;
294 if (bna_is_small_rxq(rcb->id)) {
295 unmap_q->alloc_order = 0;
296 unmap_q->map_size = rcb->rxq->buffer_size;
297 } else {
298 if (rcb->rxq->multi_buffer) {
299 unmap_q->alloc_order = 0;
300 unmap_q->map_size = rcb->rxq->buffer_size;
301 unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
302 } else {
303 unmap_q->alloc_order = order;
304 unmap_q->map_size =
305 (rcb->rxq->buffer_size > 2048) ?
306 PAGE_SIZE << order : 2048;
310 BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size));
312 return 0;
315 static inline void
316 bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
318 if (!unmap->page)
319 return;
321 dma_unmap_page(&bnad->pcidev->dev,
322 dma_unmap_addr(&unmap->vector, dma_addr),
323 unmap->vector.len, DMA_FROM_DEVICE);
324 put_page(unmap->page);
325 unmap->page = NULL;
326 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
327 unmap->vector.len = 0;
330 static inline void
331 bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
333 if (!unmap->skb)
334 return;
336 dma_unmap_single(&bnad->pcidev->dev,
337 dma_unmap_addr(&unmap->vector, dma_addr),
338 unmap->vector.len, DMA_FROM_DEVICE);
339 dev_kfree_skb_any(unmap->skb);
340 unmap->skb = NULL;
341 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
342 unmap->vector.len = 0;
345 static void
346 bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
348 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
349 int i;
351 for (i = 0; i < rcb->q_depth; i++) {
352 struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
354 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
355 bnad_rxq_cleanup_skb(bnad, unmap);
356 else
357 bnad_rxq_cleanup_page(bnad, unmap);
359 bnad_rxq_alloc_uninit(bnad, rcb);
362 static u32
363 bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
365 u32 alloced, prod, q_depth;
366 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
367 struct bnad_rx_unmap *unmap, *prev;
368 struct bna_rxq_entry *rxent;
369 struct page *page;
370 u32 page_offset, alloc_size;
371 dma_addr_t dma_addr;
373 prod = rcb->producer_index;
374 q_depth = rcb->q_depth;
376 alloc_size = PAGE_SIZE << unmap_q->alloc_order;
377 alloced = 0;
379 while (nalloc--) {
380 unmap = &unmap_q->unmap[prod];
382 if (unmap_q->reuse_pi < 0) {
383 page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
384 unmap_q->alloc_order);
385 page_offset = 0;
386 } else {
387 prev = &unmap_q->unmap[unmap_q->reuse_pi];
388 page = prev->page;
389 page_offset = prev->page_offset + unmap_q->map_size;
390 get_page(page);
393 if (unlikely(!page)) {
394 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
395 rcb->rxq->rxbuf_alloc_failed++;
396 goto finishing;
399 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
400 unmap_q->map_size, DMA_FROM_DEVICE);
402 unmap->page = page;
403 unmap->page_offset = page_offset;
404 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
405 unmap->vector.len = unmap_q->map_size;
406 page_offset += unmap_q->map_size;
408 if (page_offset < alloc_size)
409 unmap_q->reuse_pi = prod;
410 else
411 unmap_q->reuse_pi = -1;
413 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
414 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
415 BNA_QE_INDX_INC(prod, q_depth);
416 alloced++;
419 finishing:
420 if (likely(alloced)) {
421 rcb->producer_index = prod;
422 smp_mb();
423 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
424 bna_rxq_prod_indx_doorbell(rcb);
427 return alloced;
430 static u32
431 bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
433 u32 alloced, prod, q_depth, buff_sz;
434 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
435 struct bnad_rx_unmap *unmap;
436 struct bna_rxq_entry *rxent;
437 struct sk_buff *skb;
438 dma_addr_t dma_addr;
440 buff_sz = rcb->rxq->buffer_size;
441 prod = rcb->producer_index;
442 q_depth = rcb->q_depth;
444 alloced = 0;
445 while (nalloc--) {
446 unmap = &unmap_q->unmap[prod];
448 skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
450 if (unlikely(!skb)) {
451 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
452 rcb->rxq->rxbuf_alloc_failed++;
453 goto finishing;
455 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
456 buff_sz, DMA_FROM_DEVICE);
458 unmap->skb = skb;
459 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
460 unmap->vector.len = buff_sz;
462 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
463 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
464 BNA_QE_INDX_INC(prod, q_depth);
465 alloced++;
468 finishing:
469 if (likely(alloced)) {
470 rcb->producer_index = prod;
471 smp_mb();
472 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
473 bna_rxq_prod_indx_doorbell(rcb);
476 return alloced;
479 static inline void
480 bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
482 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
483 u32 to_alloc;
485 to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
486 if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
487 return;
489 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
490 bnad_rxq_refill_skb(bnad, rcb, to_alloc);
491 else
492 bnad_rxq_refill_page(bnad, rcb, to_alloc);
495 #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
496 BNA_CQ_EF_IPV6 | \
497 BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
498 BNA_CQ_EF_L4_CKSUM_OK)
500 #define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
501 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
502 #define flags_tcp6 (BNA_CQ_EF_IPV6 | \
503 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
504 #define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
505 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
506 #define flags_udp6 (BNA_CQ_EF_IPV6 | \
507 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
509 static void
510 bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
511 u32 sop_ci, u32 nvecs)
513 struct bnad_rx_unmap_q *unmap_q;
514 struct bnad_rx_unmap *unmap;
515 u32 ci, vec;
517 unmap_q = rcb->unmap_q;
518 for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
519 unmap = &unmap_q->unmap[ci];
520 BNA_QE_INDX_INC(ci, rcb->q_depth);
522 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
523 bnad_rxq_cleanup_skb(bnad, unmap);
524 else
525 bnad_rxq_cleanup_page(bnad, unmap);
529 static void
530 bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
531 u32 sop_ci, u32 nvecs, u32 last_fraglen)
533 struct bnad *bnad;
534 u32 ci, vec, len, totlen = 0;
535 struct bnad_rx_unmap_q *unmap_q;
536 struct bnad_rx_unmap *unmap;
538 unmap_q = rcb->unmap_q;
539 bnad = rcb->bnad;
541 /* prefetch header */
542 prefetch(page_address(unmap_q->unmap[sop_ci].page) +
543 unmap_q->unmap[sop_ci].page_offset);
545 for (vec = 1, ci = sop_ci; vec <= nvecs; vec++) {
546 unmap = &unmap_q->unmap[ci];
547 BNA_QE_INDX_INC(ci, rcb->q_depth);
549 dma_unmap_page(&bnad->pcidev->dev,
550 dma_unmap_addr(&unmap->vector, dma_addr),
551 unmap->vector.len, DMA_FROM_DEVICE);
553 len = (vec == nvecs) ?
554 last_fraglen : unmap->vector.len;
555 totlen += len;
557 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
558 unmap->page, unmap->page_offset, len);
560 unmap->page = NULL;
561 unmap->vector.len = 0;
564 skb->len += totlen;
565 skb->data_len += totlen;
566 skb->truesize += totlen;
569 static inline void
570 bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
571 struct bnad_rx_unmap *unmap, u32 len)
573 prefetch(skb->data);
575 dma_unmap_single(&bnad->pcidev->dev,
576 dma_unmap_addr(&unmap->vector, dma_addr),
577 unmap->vector.len, DMA_FROM_DEVICE);
579 skb_put(skb, len);
580 skb->protocol = eth_type_trans(skb, bnad->netdev);
582 unmap->skb = NULL;
583 unmap->vector.len = 0;
586 static u32
587 bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
589 struct bna_cq_entry *cq, *cmpl, *next_cmpl;
590 struct bna_rcb *rcb = NULL;
591 struct bnad_rx_unmap_q *unmap_q;
592 struct bnad_rx_unmap *unmap = NULL;
593 struct sk_buff *skb = NULL;
594 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
595 struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
596 u32 packets = 0, len = 0, totlen = 0;
597 u32 pi, vec, sop_ci = 0, nvecs = 0;
598 u32 flags, masked_flags;
600 prefetch(bnad->netdev);
602 cq = ccb->sw_q;
603 cmpl = &cq[ccb->producer_index];
605 while (packets < budget) {
606 if (!cmpl->valid)
607 break;
608 /* The 'valid' field is set by the adapter, only after writing
609 * the other fields of completion entry. Hence, do not load
610 * other fields of completion entry *before* the 'valid' is
611 * loaded. Adding the rmb() here prevents the compiler and/or
612 * CPU from reordering the reads which would potentially result
613 * in reading stale values in completion entry.
615 rmb();
617 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
619 if (bna_is_small_rxq(cmpl->rxq_id))
620 rcb = ccb->rcb[1];
621 else
622 rcb = ccb->rcb[0];
624 unmap_q = rcb->unmap_q;
626 /* start of packet ci */
627 sop_ci = rcb->consumer_index;
629 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
630 unmap = &unmap_q->unmap[sop_ci];
631 skb = unmap->skb;
632 } else {
633 skb = napi_get_frags(&rx_ctrl->napi);
634 if (unlikely(!skb))
635 break;
637 prefetch(skb);
639 flags = ntohl(cmpl->flags);
640 len = ntohs(cmpl->length);
641 totlen = len;
642 nvecs = 1;
644 /* Check all the completions for this frame.
645 * busy-wait doesn't help much, break here.
647 if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
648 (flags & BNA_CQ_EF_EOP) == 0) {
649 pi = ccb->producer_index;
650 do {
651 BNA_QE_INDX_INC(pi, ccb->q_depth);
652 next_cmpl = &cq[pi];
654 if (!next_cmpl->valid)
655 break;
656 /* The 'valid' field is set by the adapter, only
657 * after writing the other fields of completion
658 * entry. Hence, do not load other fields of
659 * completion entry *before* the 'valid' is
660 * loaded. Adding the rmb() here prevents the
661 * compiler and/or CPU from reordering the reads
662 * which would potentially result in reading
663 * stale values in completion entry.
665 rmb();
667 len = ntohs(next_cmpl->length);
668 flags = ntohl(next_cmpl->flags);
670 nvecs++;
671 totlen += len;
672 } while ((flags & BNA_CQ_EF_EOP) == 0);
674 if (!next_cmpl->valid)
675 break;
678 /* TODO: BNA_CQ_EF_LOCAL ? */
679 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
680 BNA_CQ_EF_FCS_ERROR |
681 BNA_CQ_EF_TOO_LONG))) {
682 bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
683 rcb->rxq->rx_packets_with_error++;
685 goto next;
688 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
689 bnad_cq_setup_skb(bnad, skb, unmap, len);
690 else
691 bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
693 packets++;
694 rcb->rxq->rx_packets++;
695 rcb->rxq->rx_bytes += totlen;
696 ccb->bytes_per_intr += totlen;
698 masked_flags = flags & flags_cksum_prot_mask;
700 if (likely
701 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
702 ((masked_flags == flags_tcp4) ||
703 (masked_flags == flags_udp4) ||
704 (masked_flags == flags_tcp6) ||
705 (masked_flags == flags_udp6))))
706 skb->ip_summed = CHECKSUM_UNNECESSARY;
707 else
708 skb_checksum_none_assert(skb);
710 if (flags & BNA_CQ_EF_VLAN)
711 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
713 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
714 netif_receive_skb(skb);
715 else
716 napi_gro_frags(&rx_ctrl->napi);
718 next:
719 BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
720 for (vec = 0; vec < nvecs; vec++) {
721 cmpl = &cq[ccb->producer_index];
722 cmpl->valid = 0;
723 BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
725 cmpl = &cq[ccb->producer_index];
728 napi_gro_flush(&rx_ctrl->napi, false);
729 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
730 bna_ib_ack_disable_irq(ccb->i_dbell, packets);
732 bnad_rxq_post(bnad, ccb->rcb[0]);
733 if (ccb->rcb[1])
734 bnad_rxq_post(bnad, ccb->rcb[1]);
736 return packets;
739 static void
740 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
742 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
743 struct napi_struct *napi = &rx_ctrl->napi;
745 if (likely(napi_schedule_prep(napi))) {
746 __napi_schedule(napi);
747 rx_ctrl->rx_schedule++;
751 /* MSIX Rx Path Handler */
752 static irqreturn_t
753 bnad_msix_rx(int irq, void *data)
755 struct bna_ccb *ccb = (struct bna_ccb *)data;
757 if (ccb) {
758 ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
759 bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
762 return IRQ_HANDLED;
765 /* Interrupt handlers */
767 /* Mbox Interrupt Handlers */
768 static irqreturn_t
769 bnad_msix_mbox_handler(int irq, void *data)
771 u32 intr_status;
772 unsigned long flags;
773 struct bnad *bnad = (struct bnad *)data;
775 spin_lock_irqsave(&bnad->bna_lock, flags);
776 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
777 spin_unlock_irqrestore(&bnad->bna_lock, flags);
778 return IRQ_HANDLED;
781 bna_intr_status_get(&bnad->bna, intr_status);
783 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
784 bna_mbox_handler(&bnad->bna, intr_status);
786 spin_unlock_irqrestore(&bnad->bna_lock, flags);
788 return IRQ_HANDLED;
791 static irqreturn_t
792 bnad_isr(int irq, void *data)
794 int i, j;
795 u32 intr_status;
796 unsigned long flags;
797 struct bnad *bnad = (struct bnad *)data;
798 struct bnad_rx_info *rx_info;
799 struct bnad_rx_ctrl *rx_ctrl;
800 struct bna_tcb *tcb = NULL;
802 spin_lock_irqsave(&bnad->bna_lock, flags);
803 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
804 spin_unlock_irqrestore(&bnad->bna_lock, flags);
805 return IRQ_NONE;
808 bna_intr_status_get(&bnad->bna, intr_status);
810 if (unlikely(!intr_status)) {
811 spin_unlock_irqrestore(&bnad->bna_lock, flags);
812 return IRQ_NONE;
815 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
816 bna_mbox_handler(&bnad->bna, intr_status);
818 spin_unlock_irqrestore(&bnad->bna_lock, flags);
820 if (!BNA_IS_INTX_DATA_INTR(intr_status))
821 return IRQ_HANDLED;
823 /* Process data interrupts */
824 /* Tx processing */
825 for (i = 0; i < bnad->num_tx; i++) {
826 for (j = 0; j < bnad->num_txq_per_tx; j++) {
827 tcb = bnad->tx_info[i].tcb[j];
828 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
829 bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
832 /* Rx processing */
833 for (i = 0; i < bnad->num_rx; i++) {
834 rx_info = &bnad->rx_info[i];
835 if (!rx_info->rx)
836 continue;
837 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
838 rx_ctrl = &rx_info->rx_ctrl[j];
839 if (rx_ctrl->ccb)
840 bnad_netif_rx_schedule_poll(bnad,
841 rx_ctrl->ccb);
844 return IRQ_HANDLED;
848 * Called in interrupt / callback context
849 * with bna_lock held, so cfg_flags access is OK
851 static void
852 bnad_enable_mbox_irq(struct bnad *bnad)
854 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
856 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
860 * Called with bnad->bna_lock held b'cos of
861 * bnad->cfg_flags access.
863 static void
864 bnad_disable_mbox_irq(struct bnad *bnad)
866 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
868 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
871 static void
872 bnad_set_netdev_perm_addr(struct bnad *bnad)
874 struct net_device *netdev = bnad->netdev;
876 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
877 if (is_zero_ether_addr(netdev->dev_addr))
878 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
881 /* Control Path Handlers */
883 /* Callbacks */
884 void
885 bnad_cb_mbox_intr_enable(struct bnad *bnad)
887 bnad_enable_mbox_irq(bnad);
890 void
891 bnad_cb_mbox_intr_disable(struct bnad *bnad)
893 bnad_disable_mbox_irq(bnad);
896 void
897 bnad_cb_ioceth_ready(struct bnad *bnad)
899 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
900 complete(&bnad->bnad_completions.ioc_comp);
903 void
904 bnad_cb_ioceth_failed(struct bnad *bnad)
906 bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
907 complete(&bnad->bnad_completions.ioc_comp);
910 void
911 bnad_cb_ioceth_disabled(struct bnad *bnad)
913 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
914 complete(&bnad->bnad_completions.ioc_comp);
917 static void
918 bnad_cb_enet_disabled(void *arg)
920 struct bnad *bnad = (struct bnad *)arg;
922 netif_carrier_off(bnad->netdev);
923 complete(&bnad->bnad_completions.enet_comp);
926 void
927 bnad_cb_ethport_link_status(struct bnad *bnad,
928 enum bna_link_status link_status)
930 bool link_up = false;
932 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
934 if (link_status == BNA_CEE_UP) {
935 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
936 BNAD_UPDATE_CTR(bnad, cee_toggle);
937 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
938 } else {
939 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
940 BNAD_UPDATE_CTR(bnad, cee_toggle);
941 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
944 if (link_up) {
945 if (!netif_carrier_ok(bnad->netdev)) {
946 uint tx_id, tcb_id;
947 printk(KERN_WARNING "bna: %s link up\n",
948 bnad->netdev->name);
949 netif_carrier_on(bnad->netdev);
950 BNAD_UPDATE_CTR(bnad, link_toggle);
951 for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
952 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
953 tcb_id++) {
954 struct bna_tcb *tcb =
955 bnad->tx_info[tx_id].tcb[tcb_id];
956 u32 txq_id;
957 if (!tcb)
958 continue;
960 txq_id = tcb->id;
962 if (test_bit(BNAD_TXQ_TX_STARTED,
963 &tcb->flags)) {
965 * Force an immediate
966 * Transmit Schedule */
967 printk(KERN_INFO "bna: %s %d "
968 "TXQ_STARTED\n",
969 bnad->netdev->name,
970 txq_id);
971 netif_wake_subqueue(
972 bnad->netdev,
973 txq_id);
974 BNAD_UPDATE_CTR(bnad,
975 netif_queue_wakeup);
976 } else {
977 netif_stop_subqueue(
978 bnad->netdev,
979 txq_id);
980 BNAD_UPDATE_CTR(bnad,
981 netif_queue_stop);
986 } else {
987 if (netif_carrier_ok(bnad->netdev)) {
988 printk(KERN_WARNING "bna: %s link down\n",
989 bnad->netdev->name);
990 netif_carrier_off(bnad->netdev);
991 BNAD_UPDATE_CTR(bnad, link_toggle);
996 static void
997 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
999 struct bnad *bnad = (struct bnad *)arg;
1001 complete(&bnad->bnad_completions.tx_comp);
1004 static void
1005 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
1007 struct bnad_tx_info *tx_info =
1008 (struct bnad_tx_info *)tcb->txq->tx->priv;
1010 tcb->priv = tcb;
1011 tx_info->tcb[tcb->id] = tcb;
1014 static void
1015 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
1017 struct bnad_tx_info *tx_info =
1018 (struct bnad_tx_info *)tcb->txq->tx->priv;
1020 tx_info->tcb[tcb->id] = NULL;
1021 tcb->priv = NULL;
1024 static void
1025 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
1027 struct bnad_rx_info *rx_info =
1028 (struct bnad_rx_info *)ccb->cq->rx->priv;
1030 rx_info->rx_ctrl[ccb->id].ccb = ccb;
1031 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
1034 static void
1035 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
1037 struct bnad_rx_info *rx_info =
1038 (struct bnad_rx_info *)ccb->cq->rx->priv;
1040 rx_info->rx_ctrl[ccb->id].ccb = NULL;
1043 static void
1044 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
1046 struct bnad_tx_info *tx_info =
1047 (struct bnad_tx_info *)tx->priv;
1048 struct bna_tcb *tcb;
1049 u32 txq_id;
1050 int i;
1052 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1053 tcb = tx_info->tcb[i];
1054 if (!tcb)
1055 continue;
1056 txq_id = tcb->id;
1057 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1058 netif_stop_subqueue(bnad->netdev, txq_id);
1059 printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
1060 bnad->netdev->name, txq_id);
1064 static void
1065 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
1067 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1068 struct bna_tcb *tcb;
1069 u32 txq_id;
1070 int i;
1072 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1073 tcb = tx_info->tcb[i];
1074 if (!tcb)
1075 continue;
1076 txq_id = tcb->id;
1078 BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
1079 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1080 BUG_ON(*(tcb->hw_consumer_index) != 0);
1082 if (netif_carrier_ok(bnad->netdev)) {
1083 printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
1084 bnad->netdev->name, txq_id);
1085 netif_wake_subqueue(bnad->netdev, txq_id);
1086 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
1091 * Workaround for first ioceth enable failure & we
1092 * get a 0 MAC address. We try to get the MAC address
1093 * again here.
1095 if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
1096 bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
1097 bnad_set_netdev_perm_addr(bnad);
1102 * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
1104 static void
1105 bnad_tx_cleanup(struct delayed_work *work)
1107 struct bnad_tx_info *tx_info =
1108 container_of(work, struct bnad_tx_info, tx_cleanup_work);
1109 struct bnad *bnad = NULL;
1110 struct bna_tcb *tcb;
1111 unsigned long flags;
1112 u32 i, pending = 0;
1114 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1115 tcb = tx_info->tcb[i];
1116 if (!tcb)
1117 continue;
1119 bnad = tcb->bnad;
1121 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
1122 pending++;
1123 continue;
1126 bnad_txq_cleanup(bnad, tcb);
1128 smp_mb__before_clear_bit();
1129 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
1132 if (pending) {
1133 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
1134 msecs_to_jiffies(1));
1135 return;
1138 spin_lock_irqsave(&bnad->bna_lock, flags);
1139 bna_tx_cleanup_complete(tx_info->tx);
1140 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1143 static void
1144 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
1146 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1147 struct bna_tcb *tcb;
1148 int i;
1150 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1151 tcb = tx_info->tcb[i];
1152 if (!tcb)
1153 continue;
1156 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
1159 static void
1160 bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1162 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1163 struct bna_ccb *ccb;
1164 struct bnad_rx_ctrl *rx_ctrl;
1165 int i;
1167 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1168 rx_ctrl = &rx_info->rx_ctrl[i];
1169 ccb = rx_ctrl->ccb;
1170 if (!ccb)
1171 continue;
1173 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
1175 if (ccb->rcb[1])
1176 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
1181 * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
1183 static void
1184 bnad_rx_cleanup(void *work)
1186 struct bnad_rx_info *rx_info =
1187 container_of(work, struct bnad_rx_info, rx_cleanup_work);
1188 struct bnad_rx_ctrl *rx_ctrl;
1189 struct bnad *bnad = NULL;
1190 unsigned long flags;
1191 u32 i;
1193 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1194 rx_ctrl = &rx_info->rx_ctrl[i];
1196 if (!rx_ctrl->ccb)
1197 continue;
1199 bnad = rx_ctrl->ccb->bnad;
1202 * Wait till the poll handler has exited
1203 * and nothing can be scheduled anymore
1205 napi_disable(&rx_ctrl->napi);
1207 bnad_cq_cleanup(bnad, rx_ctrl->ccb);
1208 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
1209 if (rx_ctrl->ccb->rcb[1])
1210 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
1213 spin_lock_irqsave(&bnad->bna_lock, flags);
1214 bna_rx_cleanup_complete(rx_info->rx);
1215 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1218 static void
1219 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1221 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1222 struct bna_ccb *ccb;
1223 struct bnad_rx_ctrl *rx_ctrl;
1224 int i;
1226 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1227 rx_ctrl = &rx_info->rx_ctrl[i];
1228 ccb = rx_ctrl->ccb;
1229 if (!ccb)
1230 continue;
1232 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1234 if (ccb->rcb[1])
1235 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1238 queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
1241 static void
1242 bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1244 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1245 struct bna_ccb *ccb;
1246 struct bna_rcb *rcb;
1247 struct bnad_rx_ctrl *rx_ctrl;
1248 int i, j;
1250 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1251 rx_ctrl = &rx_info->rx_ctrl[i];
1252 ccb = rx_ctrl->ccb;
1253 if (!ccb)
1254 continue;
1256 napi_enable(&rx_ctrl->napi);
1258 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1259 rcb = ccb->rcb[j];
1260 if (!rcb)
1261 continue;
1263 bnad_rxq_alloc_init(bnad, rcb);
1264 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1265 set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
1266 bnad_rxq_post(bnad, rcb);
1271 static void
1272 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1274 struct bnad *bnad = (struct bnad *)arg;
1276 complete(&bnad->bnad_completions.rx_comp);
1279 static void
1280 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1282 bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1283 complete(&bnad->bnad_completions.mcast_comp);
1286 void
1287 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1288 struct bna_stats *stats)
1290 if (status == BNA_CB_SUCCESS)
1291 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1293 if (!netif_running(bnad->netdev) ||
1294 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1295 return;
1297 mod_timer(&bnad->stats_timer,
1298 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1301 static void
1302 bnad_cb_enet_mtu_set(struct bnad *bnad)
1304 bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1305 complete(&bnad->bnad_completions.mtu_comp);
1308 void
1309 bnad_cb_completion(void *arg, enum bfa_status status)
1311 struct bnad_iocmd_comp *iocmd_comp =
1312 (struct bnad_iocmd_comp *)arg;
1314 iocmd_comp->comp_status = (u32) status;
1315 complete(&iocmd_comp->comp);
1318 /* Resource allocation, free functions */
1320 static void
1321 bnad_mem_free(struct bnad *bnad,
1322 struct bna_mem_info *mem_info)
1324 int i;
1325 dma_addr_t dma_pa;
1327 if (mem_info->mdl == NULL)
1328 return;
1330 for (i = 0; i < mem_info->num; i++) {
1331 if (mem_info->mdl[i].kva != NULL) {
1332 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1333 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1334 dma_pa);
1335 dma_free_coherent(&bnad->pcidev->dev,
1336 mem_info->mdl[i].len,
1337 mem_info->mdl[i].kva, dma_pa);
1338 } else
1339 kfree(mem_info->mdl[i].kva);
1342 kfree(mem_info->mdl);
1343 mem_info->mdl = NULL;
1346 static int
1347 bnad_mem_alloc(struct bnad *bnad,
1348 struct bna_mem_info *mem_info)
1350 int i;
1351 dma_addr_t dma_pa;
1353 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1354 mem_info->mdl = NULL;
1355 return 0;
1358 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1359 GFP_KERNEL);
1360 if (mem_info->mdl == NULL)
1361 return -ENOMEM;
1363 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1364 for (i = 0; i < mem_info->num; i++) {
1365 mem_info->mdl[i].len = mem_info->len;
1366 mem_info->mdl[i].kva =
1367 dma_alloc_coherent(&bnad->pcidev->dev,
1368 mem_info->len, &dma_pa,
1369 GFP_KERNEL);
1370 if (mem_info->mdl[i].kva == NULL)
1371 goto err_return;
1373 BNA_SET_DMA_ADDR(dma_pa,
1374 &(mem_info->mdl[i].dma));
1376 } else {
1377 for (i = 0; i < mem_info->num; i++) {
1378 mem_info->mdl[i].len = mem_info->len;
1379 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1380 GFP_KERNEL);
1381 if (mem_info->mdl[i].kva == NULL)
1382 goto err_return;
1386 return 0;
1388 err_return:
1389 bnad_mem_free(bnad, mem_info);
1390 return -ENOMEM;
1393 /* Free IRQ for Mailbox */
1394 static void
1395 bnad_mbox_irq_free(struct bnad *bnad)
1397 int irq;
1398 unsigned long flags;
1400 spin_lock_irqsave(&bnad->bna_lock, flags);
1401 bnad_disable_mbox_irq(bnad);
1402 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1404 irq = BNAD_GET_MBOX_IRQ(bnad);
1405 free_irq(irq, bnad);
1409 * Allocates IRQ for Mailbox, but keep it disabled
1410 * This will be enabled once we get the mbox enable callback
1411 * from bna
1413 static int
1414 bnad_mbox_irq_alloc(struct bnad *bnad)
1416 int err = 0;
1417 unsigned long irq_flags, flags;
1418 u32 irq;
1419 irq_handler_t irq_handler;
1421 spin_lock_irqsave(&bnad->bna_lock, flags);
1422 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1423 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1424 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1425 irq_flags = 0;
1426 } else {
1427 irq_handler = (irq_handler_t)bnad_isr;
1428 irq = bnad->pcidev->irq;
1429 irq_flags = IRQF_SHARED;
1432 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1433 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1436 * Set the Mbox IRQ disable flag, so that the IRQ handler
1437 * called from request_irq() for SHARED IRQs do not execute
1439 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1441 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1443 err = request_irq(irq, irq_handler, irq_flags,
1444 bnad->mbox_irq_name, bnad);
1446 return err;
1449 static void
1450 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1452 kfree(intr_info->idl);
1453 intr_info->idl = NULL;
1456 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1457 static int
1458 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1459 u32 txrx_id, struct bna_intr_info *intr_info)
1461 int i, vector_start = 0;
1462 u32 cfg_flags;
1463 unsigned long flags;
1465 spin_lock_irqsave(&bnad->bna_lock, flags);
1466 cfg_flags = bnad->cfg_flags;
1467 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1469 if (cfg_flags & BNAD_CF_MSIX) {
1470 intr_info->intr_type = BNA_INTR_T_MSIX;
1471 intr_info->idl = kcalloc(intr_info->num,
1472 sizeof(struct bna_intr_descr),
1473 GFP_KERNEL);
1474 if (!intr_info->idl)
1475 return -ENOMEM;
1477 switch (src) {
1478 case BNAD_INTR_TX:
1479 vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1480 break;
1482 case BNAD_INTR_RX:
1483 vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1484 (bnad->num_tx * bnad->num_txq_per_tx) +
1485 txrx_id;
1486 break;
1488 default:
1489 BUG();
1492 for (i = 0; i < intr_info->num; i++)
1493 intr_info->idl[i].vector = vector_start + i;
1494 } else {
1495 intr_info->intr_type = BNA_INTR_T_INTX;
1496 intr_info->num = 1;
1497 intr_info->idl = kcalloc(intr_info->num,
1498 sizeof(struct bna_intr_descr),
1499 GFP_KERNEL);
1500 if (!intr_info->idl)
1501 return -ENOMEM;
1503 switch (src) {
1504 case BNAD_INTR_TX:
1505 intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1506 break;
1508 case BNAD_INTR_RX:
1509 intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1510 break;
1513 return 0;
1516 /* NOTE: Should be called for MSIX only
1517 * Unregisters Tx MSIX vector(s) from the kernel
1519 static void
1520 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1521 int num_txqs)
1523 int i;
1524 int vector_num;
1526 for (i = 0; i < num_txqs; i++) {
1527 if (tx_info->tcb[i] == NULL)
1528 continue;
1530 vector_num = tx_info->tcb[i]->intr_vector;
1531 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1535 /* NOTE: Should be called for MSIX only
1536 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1538 static int
1539 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1540 u32 tx_id, int num_txqs)
1542 int i;
1543 int err;
1544 int vector_num;
1546 for (i = 0; i < num_txqs; i++) {
1547 vector_num = tx_info->tcb[i]->intr_vector;
1548 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1549 tx_id + tx_info->tcb[i]->id);
1550 err = request_irq(bnad->msix_table[vector_num].vector,
1551 (irq_handler_t)bnad_msix_tx, 0,
1552 tx_info->tcb[i]->name,
1553 tx_info->tcb[i]);
1554 if (err)
1555 goto err_return;
1558 return 0;
1560 err_return:
1561 if (i > 0)
1562 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1563 return -1;
1566 /* NOTE: Should be called for MSIX only
1567 * Unregisters Rx MSIX vector(s) from the kernel
1569 static void
1570 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1571 int num_rxps)
1573 int i;
1574 int vector_num;
1576 for (i = 0; i < num_rxps; i++) {
1577 if (rx_info->rx_ctrl[i].ccb == NULL)
1578 continue;
1580 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1581 free_irq(bnad->msix_table[vector_num].vector,
1582 rx_info->rx_ctrl[i].ccb);
1586 /* NOTE: Should be called for MSIX only
1587 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1589 static int
1590 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1591 u32 rx_id, int num_rxps)
1593 int i;
1594 int err;
1595 int vector_num;
1597 for (i = 0; i < num_rxps; i++) {
1598 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1599 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1600 bnad->netdev->name,
1601 rx_id + rx_info->rx_ctrl[i].ccb->id);
1602 err = request_irq(bnad->msix_table[vector_num].vector,
1603 (irq_handler_t)bnad_msix_rx, 0,
1604 rx_info->rx_ctrl[i].ccb->name,
1605 rx_info->rx_ctrl[i].ccb);
1606 if (err)
1607 goto err_return;
1610 return 0;
1612 err_return:
1613 if (i > 0)
1614 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1615 return -1;
1618 /* Free Tx object Resources */
1619 static void
1620 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1622 int i;
1624 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1625 if (res_info[i].res_type == BNA_RES_T_MEM)
1626 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1627 else if (res_info[i].res_type == BNA_RES_T_INTR)
1628 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1632 /* Allocates memory and interrupt resources for Tx object */
1633 static int
1634 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1635 u32 tx_id)
1637 int i, err = 0;
1639 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1640 if (res_info[i].res_type == BNA_RES_T_MEM)
1641 err = bnad_mem_alloc(bnad,
1642 &res_info[i].res_u.mem_info);
1643 else if (res_info[i].res_type == BNA_RES_T_INTR)
1644 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1645 &res_info[i].res_u.intr_info);
1646 if (err)
1647 goto err_return;
1649 return 0;
1651 err_return:
1652 bnad_tx_res_free(bnad, res_info);
1653 return err;
1656 /* Free Rx object Resources */
1657 static void
1658 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1660 int i;
1662 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1663 if (res_info[i].res_type == BNA_RES_T_MEM)
1664 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1665 else if (res_info[i].res_type == BNA_RES_T_INTR)
1666 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1670 /* Allocates memory and interrupt resources for Rx object */
1671 static int
1672 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1673 uint rx_id)
1675 int i, err = 0;
1677 /* All memory needs to be allocated before setup_ccbs */
1678 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1679 if (res_info[i].res_type == BNA_RES_T_MEM)
1680 err = bnad_mem_alloc(bnad,
1681 &res_info[i].res_u.mem_info);
1682 else if (res_info[i].res_type == BNA_RES_T_INTR)
1683 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1684 &res_info[i].res_u.intr_info);
1685 if (err)
1686 goto err_return;
1688 return 0;
1690 err_return:
1691 bnad_rx_res_free(bnad, res_info);
1692 return err;
1695 /* Timer callbacks */
1696 /* a) IOC timer */
1697 static void
1698 bnad_ioc_timeout(unsigned long data)
1700 struct bnad *bnad = (struct bnad *)data;
1701 unsigned long flags;
1703 spin_lock_irqsave(&bnad->bna_lock, flags);
1704 bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
1705 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1708 static void
1709 bnad_ioc_hb_check(unsigned long data)
1711 struct bnad *bnad = (struct bnad *)data;
1712 unsigned long flags;
1714 spin_lock_irqsave(&bnad->bna_lock, flags);
1715 bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
1716 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1719 static void
1720 bnad_iocpf_timeout(unsigned long data)
1722 struct bnad *bnad = (struct bnad *)data;
1723 unsigned long flags;
1725 spin_lock_irqsave(&bnad->bna_lock, flags);
1726 bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
1727 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1730 static void
1731 bnad_iocpf_sem_timeout(unsigned long data)
1733 struct bnad *bnad = (struct bnad *)data;
1734 unsigned long flags;
1736 spin_lock_irqsave(&bnad->bna_lock, flags);
1737 bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
1738 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1742 * All timer routines use bnad->bna_lock to protect against
1743 * the following race, which may occur in case of no locking:
1744 * Time CPU m CPU n
1745 * 0 1 = test_bit
1746 * 1 clear_bit
1747 * 2 del_timer_sync
1748 * 3 mod_timer
1751 /* b) Dynamic Interrupt Moderation Timer */
1752 static void
1753 bnad_dim_timeout(unsigned long data)
1755 struct bnad *bnad = (struct bnad *)data;
1756 struct bnad_rx_info *rx_info;
1757 struct bnad_rx_ctrl *rx_ctrl;
1758 int i, j;
1759 unsigned long flags;
1761 if (!netif_carrier_ok(bnad->netdev))
1762 return;
1764 spin_lock_irqsave(&bnad->bna_lock, flags);
1765 for (i = 0; i < bnad->num_rx; i++) {
1766 rx_info = &bnad->rx_info[i];
1767 if (!rx_info->rx)
1768 continue;
1769 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1770 rx_ctrl = &rx_info->rx_ctrl[j];
1771 if (!rx_ctrl->ccb)
1772 continue;
1773 bna_rx_dim_update(rx_ctrl->ccb);
1777 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1778 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1779 mod_timer(&bnad->dim_timer,
1780 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1781 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1784 /* c) Statistics Timer */
1785 static void
1786 bnad_stats_timeout(unsigned long data)
1788 struct bnad *bnad = (struct bnad *)data;
1789 unsigned long flags;
1791 if (!netif_running(bnad->netdev) ||
1792 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1793 return;
1795 spin_lock_irqsave(&bnad->bna_lock, flags);
1796 bna_hw_stats_get(&bnad->bna);
1797 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1801 * Set up timer for DIM
1802 * Called with bnad->bna_lock held
1804 void
1805 bnad_dim_timer_start(struct bnad *bnad)
1807 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1808 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1809 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1810 (unsigned long)bnad);
1811 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1812 mod_timer(&bnad->dim_timer,
1813 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1818 * Set up timer for statistics
1819 * Called with mutex_lock(&bnad->conf_mutex) held
1821 static void
1822 bnad_stats_timer_start(struct bnad *bnad)
1824 unsigned long flags;
1826 spin_lock_irqsave(&bnad->bna_lock, flags);
1827 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1828 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1829 (unsigned long)bnad);
1830 mod_timer(&bnad->stats_timer,
1831 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1833 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1837 * Stops the stats timer
1838 * Called with mutex_lock(&bnad->conf_mutex) held
1840 static void
1841 bnad_stats_timer_stop(struct bnad *bnad)
1843 int to_del = 0;
1844 unsigned long flags;
1846 spin_lock_irqsave(&bnad->bna_lock, flags);
1847 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1848 to_del = 1;
1849 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1850 if (to_del)
1851 del_timer_sync(&bnad->stats_timer);
1854 /* Utilities */
1856 static void
1857 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1859 int i = 1; /* Index 0 has broadcast address */
1860 struct netdev_hw_addr *mc_addr;
1862 netdev_for_each_mc_addr(mc_addr, netdev) {
1863 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1864 ETH_ALEN);
1865 i++;
1869 static int
1870 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1872 struct bnad_rx_ctrl *rx_ctrl =
1873 container_of(napi, struct bnad_rx_ctrl, napi);
1874 struct bnad *bnad = rx_ctrl->bnad;
1875 int rcvd = 0;
1877 rx_ctrl->rx_poll_ctr++;
1879 if (!netif_carrier_ok(bnad->netdev))
1880 goto poll_exit;
1882 rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
1883 if (rcvd >= budget)
1884 return rcvd;
1886 poll_exit:
1887 napi_complete(napi);
1889 rx_ctrl->rx_complete++;
1891 if (rx_ctrl->ccb)
1892 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1894 return rcvd;
1897 #define BNAD_NAPI_POLL_QUOTA 64
1898 static void
1899 bnad_napi_add(struct bnad *bnad, u32 rx_id)
1901 struct bnad_rx_ctrl *rx_ctrl;
1902 int i;
1904 /* Initialize & enable NAPI */
1905 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1906 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1907 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1908 bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1912 static void
1913 bnad_napi_delete(struct bnad *bnad, u32 rx_id)
1915 int i;
1917 /* First disable and then clean up */
1918 for (i = 0; i < bnad->num_rxp_per_rx; i++)
1919 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1922 /* Should be held with conf_lock held */
1923 void
1924 bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
1926 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1927 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1928 unsigned long flags;
1930 if (!tx_info->tx)
1931 return;
1933 init_completion(&bnad->bnad_completions.tx_comp);
1934 spin_lock_irqsave(&bnad->bna_lock, flags);
1935 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1936 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1937 wait_for_completion(&bnad->bnad_completions.tx_comp);
1939 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1940 bnad_tx_msix_unregister(bnad, tx_info,
1941 bnad->num_txq_per_tx);
1943 spin_lock_irqsave(&bnad->bna_lock, flags);
1944 bna_tx_destroy(tx_info->tx);
1945 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1947 tx_info->tx = NULL;
1948 tx_info->tx_id = 0;
1950 bnad_tx_res_free(bnad, res_info);
1953 /* Should be held with conf_lock held */
1955 bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1957 int err;
1958 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1959 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1960 struct bna_intr_info *intr_info =
1961 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1962 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1963 static const struct bna_tx_event_cbfn tx_cbfn = {
1964 .tcb_setup_cbfn = bnad_cb_tcb_setup,
1965 .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1966 .tx_stall_cbfn = bnad_cb_tx_stall,
1967 .tx_resume_cbfn = bnad_cb_tx_resume,
1968 .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1971 struct bna_tx *tx;
1972 unsigned long flags;
1974 tx_info->tx_id = tx_id;
1976 /* Initialize the Tx object configuration */
1977 tx_config->num_txq = bnad->num_txq_per_tx;
1978 tx_config->txq_depth = bnad->txq_depth;
1979 tx_config->tx_type = BNA_TX_T_REGULAR;
1980 tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1982 /* Get BNA's resource requirement for one tx object */
1983 spin_lock_irqsave(&bnad->bna_lock, flags);
1984 bna_tx_res_req(bnad->num_txq_per_tx,
1985 bnad->txq_depth, res_info);
1986 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1988 /* Fill Unmap Q memory requirements */
1989 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1990 bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
1991 bnad->txq_depth));
1993 /* Allocate resources */
1994 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1995 if (err)
1996 return err;
1998 /* Ask BNA to create one Tx object, supplying required resources */
1999 spin_lock_irqsave(&bnad->bna_lock, flags);
2000 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
2001 tx_info);
2002 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2003 if (!tx) {
2004 err = -ENOMEM;
2005 goto err_return;
2007 tx_info->tx = tx;
2009 INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
2010 (work_func_t)bnad_tx_cleanup);
2012 /* Register ISR for the Tx object */
2013 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2014 err = bnad_tx_msix_register(bnad, tx_info,
2015 tx_id, bnad->num_txq_per_tx);
2016 if (err)
2017 goto cleanup_tx;
2020 spin_lock_irqsave(&bnad->bna_lock, flags);
2021 bna_tx_enable(tx);
2022 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2024 return 0;
2026 cleanup_tx:
2027 spin_lock_irqsave(&bnad->bna_lock, flags);
2028 bna_tx_destroy(tx_info->tx);
2029 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2030 tx_info->tx = NULL;
2031 tx_info->tx_id = 0;
2032 err_return:
2033 bnad_tx_res_free(bnad, res_info);
2034 return err;
2037 /* Setup the rx config for bna_rx_create */
2038 /* bnad decides the configuration */
2039 static void
2040 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
2042 memset(rx_config, 0, sizeof(*rx_config));
2043 rx_config->rx_type = BNA_RX_T_REGULAR;
2044 rx_config->num_paths = bnad->num_rxp_per_rx;
2045 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
2047 if (bnad->num_rxp_per_rx > 1) {
2048 rx_config->rss_status = BNA_STATUS_T_ENABLED;
2049 rx_config->rss_config.hash_type =
2050 (BFI_ENET_RSS_IPV6 |
2051 BFI_ENET_RSS_IPV6_TCP |
2052 BFI_ENET_RSS_IPV4 |
2053 BFI_ENET_RSS_IPV4_TCP);
2054 rx_config->rss_config.hash_mask =
2055 bnad->num_rxp_per_rx - 1;
2056 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
2057 sizeof(rx_config->rss_config.toeplitz_hash_key));
2058 } else {
2059 rx_config->rss_status = BNA_STATUS_T_DISABLED;
2060 memset(&rx_config->rss_config, 0,
2061 sizeof(rx_config->rss_config));
2064 rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
2065 rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
2067 /* BNA_RXP_SINGLE - one data-buffer queue
2068 * BNA_RXP_SLR - one small-buffer and one large-buffer queues
2069 * BNA_RXP_HDS - one header-buffer and one data-buffer queues
2071 /* TODO: configurable param for queue type */
2072 rx_config->rxp_type = BNA_RXP_SLR;
2074 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
2075 rx_config->frame_size > 4096) {
2076 /* though size_routing_enable is set in SLR,
2077 * small packets may get routed to same rxq.
2078 * set buf_size to 2048 instead of PAGE_SIZE.
2080 rx_config->q0_buf_size = 2048;
2081 /* this should be in multiples of 2 */
2082 rx_config->q0_num_vecs = 4;
2083 rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
2084 rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
2085 } else {
2086 rx_config->q0_buf_size = rx_config->frame_size;
2087 rx_config->q0_num_vecs = 1;
2088 rx_config->q0_depth = bnad->rxq_depth;
2091 /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
2092 if (rx_config->rxp_type == BNA_RXP_SLR) {
2093 rx_config->q1_depth = bnad->rxq_depth;
2094 rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
2097 rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
2100 static void
2101 bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
2103 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2104 int i;
2106 for (i = 0; i < bnad->num_rxp_per_rx; i++)
2107 rx_info->rx_ctrl[i].bnad = bnad;
2110 /* Called with mutex_lock(&bnad->conf_mutex) held */
2111 static u32
2112 bnad_reinit_rx(struct bnad *bnad)
2114 struct net_device *netdev = bnad->netdev;
2115 u32 err = 0, current_err = 0;
2116 u32 rx_id = 0, count = 0;
2117 unsigned long flags;
2119 /* destroy and create new rx objects */
2120 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2121 if (!bnad->rx_info[rx_id].rx)
2122 continue;
2123 bnad_destroy_rx(bnad, rx_id);
2126 spin_lock_irqsave(&bnad->bna_lock, flags);
2127 bna_enet_mtu_set(&bnad->bna.enet,
2128 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2129 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2131 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2132 count++;
2133 current_err = bnad_setup_rx(bnad, rx_id);
2134 if (current_err && !err) {
2135 err = current_err;
2136 pr_err("RXQ:%u setup failed\n", rx_id);
2140 /* restore rx configuration */
2141 if (bnad->rx_info[0].rx && !err) {
2142 bnad_restore_vlans(bnad, 0);
2143 bnad_enable_default_bcast(bnad);
2144 spin_lock_irqsave(&bnad->bna_lock, flags);
2145 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2146 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2147 bnad_set_rx_mode(netdev);
2150 return count;
2153 /* Called with bnad_conf_lock() held */
2154 void
2155 bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
2157 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2158 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2159 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2160 unsigned long flags;
2161 int to_del = 0;
2163 if (!rx_info->rx)
2164 return;
2166 if (0 == rx_id) {
2167 spin_lock_irqsave(&bnad->bna_lock, flags);
2168 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
2169 test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
2170 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
2171 to_del = 1;
2173 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2174 if (to_del)
2175 del_timer_sync(&bnad->dim_timer);
2178 init_completion(&bnad->bnad_completions.rx_comp);
2179 spin_lock_irqsave(&bnad->bna_lock, flags);
2180 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
2181 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2182 wait_for_completion(&bnad->bnad_completions.rx_comp);
2184 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
2185 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
2187 bnad_napi_delete(bnad, rx_id);
2189 spin_lock_irqsave(&bnad->bna_lock, flags);
2190 bna_rx_destroy(rx_info->rx);
2192 rx_info->rx = NULL;
2193 rx_info->rx_id = 0;
2194 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2196 bnad_rx_res_free(bnad, res_info);
2199 /* Called with mutex_lock(&bnad->conf_mutex) held */
2201 bnad_setup_rx(struct bnad *bnad, u32 rx_id)
2203 int err;
2204 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2205 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2206 struct bna_intr_info *intr_info =
2207 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2208 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2209 static const struct bna_rx_event_cbfn rx_cbfn = {
2210 .rcb_setup_cbfn = NULL,
2211 .rcb_destroy_cbfn = NULL,
2212 .ccb_setup_cbfn = bnad_cb_ccb_setup,
2213 .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
2214 .rx_stall_cbfn = bnad_cb_rx_stall,
2215 .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
2216 .rx_post_cbfn = bnad_cb_rx_post,
2218 struct bna_rx *rx;
2219 unsigned long flags;
2221 rx_info->rx_id = rx_id;
2223 /* Initialize the Rx object configuration */
2224 bnad_init_rx_config(bnad, rx_config);
2226 /* Get BNA's resource requirement for one Rx object */
2227 spin_lock_irqsave(&bnad->bna_lock, flags);
2228 bna_rx_res_req(rx_config, res_info);
2229 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2231 /* Fill Unmap Q memory requirements */
2232 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
2233 rx_config->num_paths,
2234 (rx_config->q0_depth *
2235 sizeof(struct bnad_rx_unmap)) +
2236 sizeof(struct bnad_rx_unmap_q));
2238 if (rx_config->rxp_type != BNA_RXP_SINGLE) {
2239 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
2240 rx_config->num_paths,
2241 (rx_config->q1_depth *
2242 sizeof(struct bnad_rx_unmap) +
2243 sizeof(struct bnad_rx_unmap_q)));
2245 /* Allocate resource */
2246 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
2247 if (err)
2248 return err;
2250 bnad_rx_ctrl_init(bnad, rx_id);
2252 /* Ask BNA to create one Rx object, supplying required resources */
2253 spin_lock_irqsave(&bnad->bna_lock, flags);
2254 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
2255 rx_info);
2256 if (!rx) {
2257 err = -ENOMEM;
2258 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2259 goto err_return;
2261 rx_info->rx = rx;
2262 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2264 INIT_WORK(&rx_info->rx_cleanup_work,
2265 (work_func_t)(bnad_rx_cleanup));
2268 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2269 * so that IRQ handler cannot schedule NAPI at this point.
2271 bnad_napi_add(bnad, rx_id);
2273 /* Register ISR for the Rx object */
2274 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2275 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
2276 rx_config->num_paths);
2277 if (err)
2278 goto err_return;
2281 spin_lock_irqsave(&bnad->bna_lock, flags);
2282 if (0 == rx_id) {
2283 /* Set up Dynamic Interrupt Moderation Vector */
2284 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
2285 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
2287 /* Enable VLAN filtering only on the default Rx */
2288 bna_rx_vlanfilter_enable(rx);
2290 /* Start the DIM timer */
2291 bnad_dim_timer_start(bnad);
2294 bna_rx_enable(rx);
2295 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2297 return 0;
2299 err_return:
2300 bnad_destroy_rx(bnad, rx_id);
2301 return err;
2304 /* Called with conf_lock & bnad->bna_lock held */
2305 void
2306 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2308 struct bnad_tx_info *tx_info;
2310 tx_info = &bnad->tx_info[0];
2311 if (!tx_info->tx)
2312 return;
2314 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2317 /* Called with conf_lock & bnad->bna_lock held */
2318 void
2319 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2321 struct bnad_rx_info *rx_info;
2322 int i;
2324 for (i = 0; i < bnad->num_rx; i++) {
2325 rx_info = &bnad->rx_info[i];
2326 if (!rx_info->rx)
2327 continue;
2328 bna_rx_coalescing_timeo_set(rx_info->rx,
2329 bnad->rx_coalescing_timeo);
2334 * Called with bnad->bna_lock held
2337 bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
2339 int ret;
2341 if (!is_valid_ether_addr(mac_addr))
2342 return -EADDRNOTAVAIL;
2344 /* If datapath is down, pretend everything went through */
2345 if (!bnad->rx_info[0].rx)
2346 return 0;
2348 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
2349 if (ret != BNA_CB_SUCCESS)
2350 return -EADDRNOTAVAIL;
2352 return 0;
2355 /* Should be called with conf_lock held */
2357 bnad_enable_default_bcast(struct bnad *bnad)
2359 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2360 int ret;
2361 unsigned long flags;
2363 init_completion(&bnad->bnad_completions.mcast_comp);
2365 spin_lock_irqsave(&bnad->bna_lock, flags);
2366 ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
2367 bnad_cb_rx_mcast_add);
2368 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2370 if (ret == BNA_CB_SUCCESS)
2371 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2372 else
2373 return -ENODEV;
2375 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2376 return -ENODEV;
2378 return 0;
2381 /* Called with mutex_lock(&bnad->conf_mutex) held */
2382 void
2383 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2385 u16 vid;
2386 unsigned long flags;
2388 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2389 spin_lock_irqsave(&bnad->bna_lock, flags);
2390 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2391 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2395 /* Statistics utilities */
2396 void
2397 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2399 int i, j;
2401 for (i = 0; i < bnad->num_rx; i++) {
2402 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2403 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2404 stats->rx_packets += bnad->rx_info[i].
2405 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2406 stats->rx_bytes += bnad->rx_info[i].
2407 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2408 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2409 bnad->rx_info[i].rx_ctrl[j].ccb->
2410 rcb[1]->rxq) {
2411 stats->rx_packets +=
2412 bnad->rx_info[i].rx_ctrl[j].
2413 ccb->rcb[1]->rxq->rx_packets;
2414 stats->rx_bytes +=
2415 bnad->rx_info[i].rx_ctrl[j].
2416 ccb->rcb[1]->rxq->rx_bytes;
2421 for (i = 0; i < bnad->num_tx; i++) {
2422 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2423 if (bnad->tx_info[i].tcb[j]) {
2424 stats->tx_packets +=
2425 bnad->tx_info[i].tcb[j]->txq->tx_packets;
2426 stats->tx_bytes +=
2427 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2434 * Must be called with the bna_lock held.
2436 void
2437 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2439 struct bfi_enet_stats_mac *mac_stats;
2440 u32 bmap;
2441 int i;
2443 mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2444 stats->rx_errors =
2445 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2446 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2447 mac_stats->rx_undersize;
2448 stats->tx_errors = mac_stats->tx_fcs_error +
2449 mac_stats->tx_undersize;
2450 stats->rx_dropped = mac_stats->rx_drop;
2451 stats->tx_dropped = mac_stats->tx_drop;
2452 stats->multicast = mac_stats->rx_multicast;
2453 stats->collisions = mac_stats->tx_total_collision;
2455 stats->rx_length_errors = mac_stats->rx_frame_length_error;
2457 /* receive ring buffer overflow ?? */
2459 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2460 stats->rx_frame_errors = mac_stats->rx_alignment_error;
2461 /* recv'r fifo overrun */
2462 bmap = bna_rx_rid_mask(&bnad->bna);
2463 for (i = 0; bmap; i++) {
2464 if (bmap & 1) {
2465 stats->rx_fifo_errors +=
2466 bnad->stats.bna_stats->
2467 hw_stats.rxf_stats[i].frame_drops;
2468 break;
2470 bmap >>= 1;
2474 static void
2475 bnad_mbox_irq_sync(struct bnad *bnad)
2477 u32 irq;
2478 unsigned long flags;
2480 spin_lock_irqsave(&bnad->bna_lock, flags);
2481 if (bnad->cfg_flags & BNAD_CF_MSIX)
2482 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2483 else
2484 irq = bnad->pcidev->irq;
2485 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2487 synchronize_irq(irq);
2490 /* Utility used by bnad_start_xmit, for doing TSO */
2491 static int
2492 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2494 int err;
2496 if (skb_header_cloned(skb)) {
2497 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2498 if (err) {
2499 BNAD_UPDATE_CTR(bnad, tso_err);
2500 return err;
2505 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2506 * excluding the length field.
2508 if (skb->protocol == htons(ETH_P_IP)) {
2509 struct iphdr *iph = ip_hdr(skb);
2511 /* Do we really need these? */
2512 iph->tot_len = 0;
2513 iph->check = 0;
2515 tcp_hdr(skb)->check =
2516 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2517 IPPROTO_TCP, 0);
2518 BNAD_UPDATE_CTR(bnad, tso4);
2519 } else {
2520 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2522 ipv6h->payload_len = 0;
2523 tcp_hdr(skb)->check =
2524 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2525 IPPROTO_TCP, 0);
2526 BNAD_UPDATE_CTR(bnad, tso6);
2529 return 0;
2533 * Initialize Q numbers depending on Rx Paths
2534 * Called with bnad->bna_lock held, because of cfg_flags
2535 * access.
2537 static void
2538 bnad_q_num_init(struct bnad *bnad)
2540 int rxps;
2542 rxps = min((uint)num_online_cpus(),
2543 (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
2545 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2546 rxps = 1; /* INTx */
2548 bnad->num_rx = 1;
2549 bnad->num_tx = 1;
2550 bnad->num_rxp_per_rx = rxps;
2551 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2555 * Adjusts the Q numbers, given a number of msix vectors
2556 * Give preference to RSS as opposed to Tx priority Queues,
2557 * in such a case, just use 1 Tx Q
2558 * Called with bnad->bna_lock held b'cos of cfg_flags access
2560 static void
2561 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2563 bnad->num_txq_per_tx = 1;
2564 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2565 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2566 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2567 bnad->num_rxp_per_rx = msix_vectors -
2568 (bnad->num_tx * bnad->num_txq_per_tx) -
2569 BNAD_MAILBOX_MSIX_VECTORS;
2570 } else
2571 bnad->num_rxp_per_rx = 1;
2574 /* Enable / disable ioceth */
2575 static int
2576 bnad_ioceth_disable(struct bnad *bnad)
2578 unsigned long flags;
2579 int err = 0;
2581 spin_lock_irqsave(&bnad->bna_lock, flags);
2582 init_completion(&bnad->bnad_completions.ioc_comp);
2583 bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2584 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2586 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2587 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2589 err = bnad->bnad_completions.ioc_comp_status;
2590 return err;
2593 static int
2594 bnad_ioceth_enable(struct bnad *bnad)
2596 int err = 0;
2597 unsigned long flags;
2599 spin_lock_irqsave(&bnad->bna_lock, flags);
2600 init_completion(&bnad->bnad_completions.ioc_comp);
2601 bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2602 bna_ioceth_enable(&bnad->bna.ioceth);
2603 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2605 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2606 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2608 err = bnad->bnad_completions.ioc_comp_status;
2610 return err;
2613 /* Free BNA resources */
2614 static void
2615 bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2616 u32 res_val_max)
2618 int i;
2620 for (i = 0; i < res_val_max; i++)
2621 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2624 /* Allocates memory and interrupt resources for BNA */
2625 static int
2626 bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2627 u32 res_val_max)
2629 int i, err;
2631 for (i = 0; i < res_val_max; i++) {
2632 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2633 if (err)
2634 goto err_return;
2636 return 0;
2638 err_return:
2639 bnad_res_free(bnad, res_info, res_val_max);
2640 return err;
2643 /* Interrupt enable / disable */
2644 static void
2645 bnad_enable_msix(struct bnad *bnad)
2647 int i, ret;
2648 unsigned long flags;
2650 spin_lock_irqsave(&bnad->bna_lock, flags);
2651 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2652 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2653 return;
2655 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2657 if (bnad->msix_table)
2658 return;
2660 bnad->msix_table =
2661 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2663 if (!bnad->msix_table)
2664 goto intx_mode;
2666 for (i = 0; i < bnad->msix_num; i++)
2667 bnad->msix_table[i].entry = i;
2669 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
2670 if (ret > 0) {
2671 /* Not enough MSI-X vectors. */
2672 pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
2673 ret, bnad->msix_num);
2675 spin_lock_irqsave(&bnad->bna_lock, flags);
2676 /* ret = #of vectors that we got */
2677 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2678 (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
2679 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2681 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2682 BNAD_MAILBOX_MSIX_VECTORS;
2684 if (bnad->msix_num > ret)
2685 goto intx_mode;
2687 /* Try once more with adjusted numbers */
2688 /* If this fails, fall back to INTx */
2689 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
2690 bnad->msix_num);
2691 if (ret)
2692 goto intx_mode;
2694 } else if (ret < 0)
2695 goto intx_mode;
2697 pci_intx(bnad->pcidev, 0);
2699 return;
2701 intx_mode:
2702 pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
2704 kfree(bnad->msix_table);
2705 bnad->msix_table = NULL;
2706 bnad->msix_num = 0;
2707 spin_lock_irqsave(&bnad->bna_lock, flags);
2708 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2709 bnad_q_num_init(bnad);
2710 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2713 static void
2714 bnad_disable_msix(struct bnad *bnad)
2716 u32 cfg_flags;
2717 unsigned long flags;
2719 spin_lock_irqsave(&bnad->bna_lock, flags);
2720 cfg_flags = bnad->cfg_flags;
2721 if (bnad->cfg_flags & BNAD_CF_MSIX)
2722 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2723 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2725 if (cfg_flags & BNAD_CF_MSIX) {
2726 pci_disable_msix(bnad->pcidev);
2727 kfree(bnad->msix_table);
2728 bnad->msix_table = NULL;
2732 /* Netdev entry points */
2733 static int
2734 bnad_open(struct net_device *netdev)
2736 int err;
2737 struct bnad *bnad = netdev_priv(netdev);
2738 struct bna_pause_config pause_config;
2739 unsigned long flags;
2741 mutex_lock(&bnad->conf_mutex);
2743 /* Tx */
2744 err = bnad_setup_tx(bnad, 0);
2745 if (err)
2746 goto err_return;
2748 /* Rx */
2749 err = bnad_setup_rx(bnad, 0);
2750 if (err)
2751 goto cleanup_tx;
2753 /* Port */
2754 pause_config.tx_pause = 0;
2755 pause_config.rx_pause = 0;
2757 spin_lock_irqsave(&bnad->bna_lock, flags);
2758 bna_enet_mtu_set(&bnad->bna.enet,
2759 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2760 bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
2761 bna_enet_enable(&bnad->bna.enet);
2762 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2764 /* Enable broadcast */
2765 bnad_enable_default_bcast(bnad);
2767 /* Restore VLANs, if any */
2768 bnad_restore_vlans(bnad, 0);
2770 /* Set the UCAST address */
2771 spin_lock_irqsave(&bnad->bna_lock, flags);
2772 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2773 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2775 /* Start the stats timer */
2776 bnad_stats_timer_start(bnad);
2778 mutex_unlock(&bnad->conf_mutex);
2780 return 0;
2782 cleanup_tx:
2783 bnad_destroy_tx(bnad, 0);
2785 err_return:
2786 mutex_unlock(&bnad->conf_mutex);
2787 return err;
2790 static int
2791 bnad_stop(struct net_device *netdev)
2793 struct bnad *bnad = netdev_priv(netdev);
2794 unsigned long flags;
2796 mutex_lock(&bnad->conf_mutex);
2798 /* Stop the stats timer */
2799 bnad_stats_timer_stop(bnad);
2801 init_completion(&bnad->bnad_completions.enet_comp);
2803 spin_lock_irqsave(&bnad->bna_lock, flags);
2804 bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2805 bnad_cb_enet_disabled);
2806 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2808 wait_for_completion(&bnad->bnad_completions.enet_comp);
2810 bnad_destroy_tx(bnad, 0);
2811 bnad_destroy_rx(bnad, 0);
2813 /* Synchronize mailbox IRQ */
2814 bnad_mbox_irq_sync(bnad);
2816 mutex_unlock(&bnad->conf_mutex);
2818 return 0;
2821 /* TX */
2822 /* Returns 0 for success */
2823 static int
2824 bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2825 struct sk_buff *skb, struct bna_txq_entry *txqent)
2827 u16 flags = 0;
2828 u32 gso_size;
2829 u16 vlan_tag = 0;
2831 if (vlan_tx_tag_present(skb)) {
2832 vlan_tag = (u16)vlan_tx_tag_get(skb);
2833 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2835 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2836 vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT)
2837 | (vlan_tag & 0x1fff);
2838 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2840 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2842 if (skb_is_gso(skb)) {
2843 gso_size = skb_shinfo(skb)->gso_size;
2844 if (unlikely(gso_size > bnad->netdev->mtu)) {
2845 BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2846 return -EINVAL;
2848 if (unlikely((gso_size + skb_transport_offset(skb) +
2849 tcp_hdrlen(skb)) >= skb->len)) {
2850 txqent->hdr.wi.opcode =
2851 __constant_htons(BNA_TXQ_WI_SEND);
2852 txqent->hdr.wi.lso_mss = 0;
2853 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2854 } else {
2855 txqent->hdr.wi.opcode =
2856 __constant_htons(BNA_TXQ_WI_SEND_LSO);
2857 txqent->hdr.wi.lso_mss = htons(gso_size);
2860 if (bnad_tso_prepare(bnad, skb)) {
2861 BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2862 return -EINVAL;
2865 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2866 txqent->hdr.wi.l4_hdr_size_n_offset =
2867 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2868 tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
2869 } else {
2870 txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND);
2871 txqent->hdr.wi.lso_mss = 0;
2873 if (unlikely(skb->len > (bnad->netdev->mtu + ETH_HLEN))) {
2874 BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2875 return -EINVAL;
2878 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2879 u8 proto = 0;
2881 if (skb->protocol == __constant_htons(ETH_P_IP))
2882 proto = ip_hdr(skb)->protocol;
2883 #ifdef NETIF_F_IPV6_CSUM
2884 else if (skb->protocol ==
2885 __constant_htons(ETH_P_IPV6)) {
2886 /* nexthdr may not be TCP immediately. */
2887 proto = ipv6_hdr(skb)->nexthdr;
2889 #endif
2890 if (proto == IPPROTO_TCP) {
2891 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2892 txqent->hdr.wi.l4_hdr_size_n_offset =
2893 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2894 (0, skb_transport_offset(skb)));
2896 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2898 if (unlikely(skb_headlen(skb) <
2899 skb_transport_offset(skb) +
2900 tcp_hdrlen(skb))) {
2901 BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2902 return -EINVAL;
2904 } else if (proto == IPPROTO_UDP) {
2905 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2906 txqent->hdr.wi.l4_hdr_size_n_offset =
2907 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2908 (0, skb_transport_offset(skb)));
2910 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2911 if (unlikely(skb_headlen(skb) <
2912 skb_transport_offset(skb) +
2913 sizeof(struct udphdr))) {
2914 BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2915 return -EINVAL;
2917 } else {
2919 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2920 return -EINVAL;
2922 } else
2923 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2926 txqent->hdr.wi.flags = htons(flags);
2927 txqent->hdr.wi.frame_length = htonl(skb->len);
2929 return 0;
2933 * bnad_start_xmit : Netdev entry point for Transmit
2934 * Called under lock held by net_device
2936 static netdev_tx_t
2937 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2939 struct bnad *bnad = netdev_priv(netdev);
2940 u32 txq_id = 0;
2941 struct bna_tcb *tcb = NULL;
2942 struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap;
2943 u32 prod, q_depth, vect_id;
2944 u32 wis, vectors, len;
2945 int i;
2946 dma_addr_t dma_addr;
2947 struct bna_txq_entry *txqent;
2949 len = skb_headlen(skb);
2951 /* Sanity checks for the skb */
2953 if (unlikely(skb->len <= ETH_HLEN)) {
2954 dev_kfree_skb(skb);
2955 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2956 return NETDEV_TX_OK;
2958 if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
2959 dev_kfree_skb(skb);
2960 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2961 return NETDEV_TX_OK;
2963 if (unlikely(len == 0)) {
2964 dev_kfree_skb(skb);
2965 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2966 return NETDEV_TX_OK;
2969 tcb = bnad->tx_info[0].tcb[txq_id];
2972 * Takes care of the Tx that is scheduled between clearing the flag
2973 * and the netif_tx_stop_all_queues() call.
2975 if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2976 dev_kfree_skb(skb);
2977 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2978 return NETDEV_TX_OK;
2981 q_depth = tcb->q_depth;
2982 prod = tcb->producer_index;
2983 unmap_q = tcb->unmap_q;
2985 vectors = 1 + skb_shinfo(skb)->nr_frags;
2986 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2988 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2989 dev_kfree_skb(skb);
2990 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2991 return NETDEV_TX_OK;
2994 /* Check for available TxQ resources */
2995 if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2996 if ((*tcb->hw_consumer_index != tcb->consumer_index) &&
2997 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2998 u32 sent;
2999 sent = bnad_txcmpl_process(bnad, tcb);
3000 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3001 bna_ib_ack(tcb->i_dbell, sent);
3002 smp_mb__before_clear_bit();
3003 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
3004 } else {
3005 netif_stop_queue(netdev);
3006 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
3009 smp_mb();
3011 * Check again to deal with race condition between
3012 * netif_stop_queue here, and netif_wake_queue in
3013 * interrupt handler which is not inside netif tx lock.
3015 if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
3016 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
3017 return NETDEV_TX_BUSY;
3018 } else {
3019 netif_wake_queue(netdev);
3020 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
3024 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3025 head_unmap = &unmap_q[prod];
3027 /* Program the opcode, flags, frame_len, num_vectors in WI */
3028 if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
3029 dev_kfree_skb(skb);
3030 return NETDEV_TX_OK;
3032 txqent->hdr.wi.reserved = 0;
3033 txqent->hdr.wi.num_vectors = vectors;
3035 head_unmap->skb = skb;
3036 head_unmap->nvecs = 0;
3038 /* Program the vectors */
3039 unmap = head_unmap;
3040 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
3041 len, DMA_TO_DEVICE);
3042 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
3043 txqent->vector[0].length = htons(len);
3044 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
3045 head_unmap->nvecs++;
3047 for (i = 0, vect_id = 0; i < vectors - 1; i++) {
3048 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
3049 u32 size = skb_frag_size(frag);
3051 if (unlikely(size == 0)) {
3052 /* Undo the changes starting at tcb->producer_index */
3053 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3054 tcb->producer_index);
3055 dev_kfree_skb(skb);
3056 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
3057 return NETDEV_TX_OK;
3060 len += size;
3062 vect_id++;
3063 if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
3064 vect_id = 0;
3065 BNA_QE_INDX_INC(prod, q_depth);
3066 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3067 txqent->hdr.wi_ext.opcode =
3068 __constant_htons(BNA_TXQ_WI_EXTENSION);
3069 unmap = &unmap_q[prod];
3072 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
3073 0, size, DMA_TO_DEVICE);
3074 dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
3075 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
3076 txqent->vector[vect_id].length = htons(size);
3077 dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
3078 dma_addr);
3079 head_unmap->nvecs++;
3082 if (unlikely(len != skb->len)) {
3083 /* Undo the changes starting at tcb->producer_index */
3084 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
3085 dev_kfree_skb(skb);
3086 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
3087 return NETDEV_TX_OK;
3090 BNA_QE_INDX_INC(prod, q_depth);
3091 tcb->producer_index = prod;
3093 smp_mb();
3095 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3096 return NETDEV_TX_OK;
3098 skb_tx_timestamp(skb);
3100 bna_txq_prod_indx_doorbell(tcb);
3101 smp_mb();
3103 return NETDEV_TX_OK;
3107 * Used spin_lock to synchronize reading of stats structures, which
3108 * is written by BNA under the same lock.
3110 static struct rtnl_link_stats64 *
3111 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
3113 struct bnad *bnad = netdev_priv(netdev);
3114 unsigned long flags;
3116 spin_lock_irqsave(&bnad->bna_lock, flags);
3118 bnad_netdev_qstats_fill(bnad, stats);
3119 bnad_netdev_hwstats_fill(bnad, stats);
3121 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3123 return stats;
3126 static void
3127 bnad_set_rx_ucast_fltr(struct bnad *bnad)
3129 struct net_device *netdev = bnad->netdev;
3130 int uc_count = netdev_uc_count(netdev);
3131 enum bna_cb_status ret;
3132 u8 *mac_list;
3133 struct netdev_hw_addr *ha;
3134 int entry;
3136 if (netdev_uc_empty(bnad->netdev)) {
3137 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
3138 return;
3141 if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
3142 goto mode_default;
3144 mac_list = kzalloc(uc_count * ETH_ALEN, GFP_ATOMIC);
3145 if (mac_list == NULL)
3146 goto mode_default;
3148 entry = 0;
3149 netdev_for_each_uc_addr(ha, netdev) {
3150 memcpy(&mac_list[entry * ETH_ALEN],
3151 &ha->addr[0], ETH_ALEN);
3152 entry++;
3155 ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry,
3156 mac_list, NULL);
3157 kfree(mac_list);
3159 if (ret != BNA_CB_SUCCESS)
3160 goto mode_default;
3162 return;
3164 /* ucast packets not in UCAM are routed to default function */
3165 mode_default:
3166 bnad->cfg_flags |= BNAD_CF_DEFAULT;
3167 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
3170 static void
3171 bnad_set_rx_mcast_fltr(struct bnad *bnad)
3173 struct net_device *netdev = bnad->netdev;
3174 int mc_count = netdev_mc_count(netdev);
3175 enum bna_cb_status ret;
3176 u8 *mac_list;
3178 if (netdev->flags & IFF_ALLMULTI)
3179 goto mode_allmulti;
3181 if (netdev_mc_empty(netdev))
3182 return;
3184 if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
3185 goto mode_allmulti;
3187 mac_list = kzalloc((mc_count + 1) * ETH_ALEN, GFP_ATOMIC);
3189 if (mac_list == NULL)
3190 goto mode_allmulti;
3192 memcpy(&mac_list[0], &bnad_bcast_addr[0], ETH_ALEN);
3194 /* copy rest of the MCAST addresses */
3195 bnad_netdev_mc_list_get(netdev, mac_list);
3196 ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
3197 mac_list, NULL);
3198 kfree(mac_list);
3200 if (ret != BNA_CB_SUCCESS)
3201 goto mode_allmulti;
3203 return;
3205 mode_allmulti:
3206 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
3207 bna_rx_mcast_delall(bnad->rx_info[0].rx, NULL);
3210 void
3211 bnad_set_rx_mode(struct net_device *netdev)
3213 struct bnad *bnad = netdev_priv(netdev);
3214 enum bna_rxmode new_mode, mode_mask;
3215 unsigned long flags;
3217 spin_lock_irqsave(&bnad->bna_lock, flags);
3219 if (bnad->rx_info[0].rx == NULL) {
3220 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3221 return;
3224 /* clear bnad flags to update it with new settings */
3225 bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
3226 BNAD_CF_ALLMULTI);
3228 new_mode = 0;
3229 if (netdev->flags & IFF_PROMISC) {
3230 new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
3231 bnad->cfg_flags |= BNAD_CF_PROMISC;
3232 } else {
3233 bnad_set_rx_mcast_fltr(bnad);
3235 if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
3236 new_mode |= BNA_RXMODE_ALLMULTI;
3238 bnad_set_rx_ucast_fltr(bnad);
3240 if (bnad->cfg_flags & BNAD_CF_DEFAULT)
3241 new_mode |= BNA_RXMODE_DEFAULT;
3244 mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
3245 BNA_RXMODE_ALLMULTI;
3246 bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL);
3248 if (bnad->cfg_flags & BNAD_CF_PROMISC)
3249 bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
3250 else
3251 bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
3253 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3257 * bna_lock is used to sync writes to netdev->addr
3258 * conf_lock cannot be used since this call may be made
3259 * in a non-blocking context.
3261 static int
3262 bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
3264 int err;
3265 struct bnad *bnad = netdev_priv(netdev);
3266 struct sockaddr *sa = (struct sockaddr *)mac_addr;
3267 unsigned long flags;
3269 spin_lock_irqsave(&bnad->bna_lock, flags);
3271 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
3273 if (!err)
3274 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
3276 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3278 return err;
3281 static int
3282 bnad_mtu_set(struct bnad *bnad, int frame_size)
3284 unsigned long flags;
3286 init_completion(&bnad->bnad_completions.mtu_comp);
3288 spin_lock_irqsave(&bnad->bna_lock, flags);
3289 bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
3290 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3292 wait_for_completion(&bnad->bnad_completions.mtu_comp);
3294 return bnad->bnad_completions.mtu_comp_status;
3297 static int
3298 bnad_change_mtu(struct net_device *netdev, int new_mtu)
3300 int err, mtu;
3301 struct bnad *bnad = netdev_priv(netdev);
3302 u32 rx_count = 0, frame, new_frame;
3304 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
3305 return -EINVAL;
3307 mutex_lock(&bnad->conf_mutex);
3309 mtu = netdev->mtu;
3310 netdev->mtu = new_mtu;
3312 frame = BNAD_FRAME_SIZE(mtu);
3313 new_frame = BNAD_FRAME_SIZE(new_mtu);
3315 /* check if multi-buffer needs to be enabled */
3316 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
3317 netif_running(bnad->netdev)) {
3318 /* only when transition is over 4K */
3319 if ((frame <= 4096 && new_frame > 4096) ||
3320 (frame > 4096 && new_frame <= 4096))
3321 rx_count = bnad_reinit_rx(bnad);
3324 /* rx_count > 0 - new rx created
3325 * - Linux set err = 0 and return
3327 err = bnad_mtu_set(bnad, new_frame);
3328 if (err)
3329 err = -EBUSY;
3331 mutex_unlock(&bnad->conf_mutex);
3332 return err;
3335 static int
3336 bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3338 struct bnad *bnad = netdev_priv(netdev);
3339 unsigned long flags;
3341 if (!bnad->rx_info[0].rx)
3342 return 0;
3344 mutex_lock(&bnad->conf_mutex);
3346 spin_lock_irqsave(&bnad->bna_lock, flags);
3347 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
3348 set_bit(vid, bnad->active_vlans);
3349 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3351 mutex_unlock(&bnad->conf_mutex);
3353 return 0;
3356 static int
3357 bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3359 struct bnad *bnad = netdev_priv(netdev);
3360 unsigned long flags;
3362 if (!bnad->rx_info[0].rx)
3363 return 0;
3365 mutex_lock(&bnad->conf_mutex);
3367 spin_lock_irqsave(&bnad->bna_lock, flags);
3368 clear_bit(vid, bnad->active_vlans);
3369 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3370 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3372 mutex_unlock(&bnad->conf_mutex);
3374 return 0;
3377 #ifdef CONFIG_NET_POLL_CONTROLLER
3378 static void
3379 bnad_netpoll(struct net_device *netdev)
3381 struct bnad *bnad = netdev_priv(netdev);
3382 struct bnad_rx_info *rx_info;
3383 struct bnad_rx_ctrl *rx_ctrl;
3384 u32 curr_mask;
3385 int i, j;
3387 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3388 bna_intx_disable(&bnad->bna, curr_mask);
3389 bnad_isr(bnad->pcidev->irq, netdev);
3390 bna_intx_enable(&bnad->bna, curr_mask);
3391 } else {
3393 * Tx processing may happen in sending context, so no need
3394 * to explicitly process completions here
3397 /* Rx processing */
3398 for (i = 0; i < bnad->num_rx; i++) {
3399 rx_info = &bnad->rx_info[i];
3400 if (!rx_info->rx)
3401 continue;
3402 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3403 rx_ctrl = &rx_info->rx_ctrl[j];
3404 if (rx_ctrl->ccb)
3405 bnad_netif_rx_schedule_poll(bnad,
3406 rx_ctrl->ccb);
3411 #endif
3413 static const struct net_device_ops bnad_netdev_ops = {
3414 .ndo_open = bnad_open,
3415 .ndo_stop = bnad_stop,
3416 .ndo_start_xmit = bnad_start_xmit,
3417 .ndo_get_stats64 = bnad_get_stats64,
3418 .ndo_set_rx_mode = bnad_set_rx_mode,
3419 .ndo_validate_addr = eth_validate_addr,
3420 .ndo_set_mac_address = bnad_set_mac_address,
3421 .ndo_change_mtu = bnad_change_mtu,
3422 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
3423 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
3424 #ifdef CONFIG_NET_POLL_CONTROLLER
3425 .ndo_poll_controller = bnad_netpoll
3426 #endif
3429 static void
3430 bnad_netdev_init(struct bnad *bnad, bool using_dac)
3432 struct net_device *netdev = bnad->netdev;
3434 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3435 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3436 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX;
3438 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3439 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3440 NETIF_F_TSO | NETIF_F_TSO6;
3442 netdev->features |= netdev->hw_features |
3443 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3445 if (using_dac)
3446 netdev->features |= NETIF_F_HIGHDMA;
3448 netdev->mem_start = bnad->mmio_start;
3449 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3451 netdev->netdev_ops = &bnad_netdev_ops;
3452 bnad_set_ethtool_ops(netdev);
3456 * 1. Initialize the bnad structure
3457 * 2. Setup netdev pointer in pci_dev
3458 * 3. Initialize no. of TxQ & CQs & MSIX vectors
3459 * 4. Initialize work queue.
3461 static int
3462 bnad_init(struct bnad *bnad,
3463 struct pci_dev *pdev, struct net_device *netdev)
3465 unsigned long flags;
3467 SET_NETDEV_DEV(netdev, &pdev->dev);
3468 pci_set_drvdata(pdev, netdev);
3470 bnad->netdev = netdev;
3471 bnad->pcidev = pdev;
3472 bnad->mmio_start = pci_resource_start(pdev, 0);
3473 bnad->mmio_len = pci_resource_len(pdev, 0);
3474 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3475 if (!bnad->bar0) {
3476 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3477 return -ENOMEM;
3479 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
3480 (unsigned long long) bnad->mmio_len);
3482 spin_lock_irqsave(&bnad->bna_lock, flags);
3483 if (!bnad_msix_disable)
3484 bnad->cfg_flags = BNAD_CF_MSIX;
3486 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3488 bnad_q_num_init(bnad);
3489 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3491 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3492 (bnad->num_rx * bnad->num_rxp_per_rx) +
3493 BNAD_MAILBOX_MSIX_VECTORS;
3495 bnad->txq_depth = BNAD_TXQ_DEPTH;
3496 bnad->rxq_depth = BNAD_RXQ_DEPTH;
3498 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3499 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3501 sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3502 bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3503 if (!bnad->work_q) {
3504 iounmap(bnad->bar0);
3505 return -ENOMEM;
3508 return 0;
3512 * Must be called after bnad_pci_uninit()
3513 * so that iounmap() and pci_set_drvdata(NULL)
3514 * happens only after PCI uninitialization.
3516 static void
3517 bnad_uninit(struct bnad *bnad)
3519 if (bnad->work_q) {
3520 flush_workqueue(bnad->work_q);
3521 destroy_workqueue(bnad->work_q);
3522 bnad->work_q = NULL;
3525 if (bnad->bar0)
3526 iounmap(bnad->bar0);
3530 * Initialize locks
3531 a) Per ioceth mutes used for serializing configuration
3532 changes from OS interface
3533 b) spin lock used to protect bna state machine
3535 static void
3536 bnad_lock_init(struct bnad *bnad)
3538 spin_lock_init(&bnad->bna_lock);
3539 mutex_init(&bnad->conf_mutex);
3540 mutex_init(&bnad_list_mutex);
3543 static void
3544 bnad_lock_uninit(struct bnad *bnad)
3546 mutex_destroy(&bnad->conf_mutex);
3547 mutex_destroy(&bnad_list_mutex);
3550 /* PCI Initialization */
3551 static int
3552 bnad_pci_init(struct bnad *bnad,
3553 struct pci_dev *pdev, bool *using_dac)
3555 int err;
3557 err = pci_enable_device(pdev);
3558 if (err)
3559 return err;
3560 err = pci_request_regions(pdev, BNAD_NAME);
3561 if (err)
3562 goto disable_device;
3563 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3564 *using_dac = true;
3565 } else {
3566 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3567 if (err)
3568 goto release_regions;
3569 *using_dac = false;
3571 pci_set_master(pdev);
3572 return 0;
3574 release_regions:
3575 pci_release_regions(pdev);
3576 disable_device:
3577 pci_disable_device(pdev);
3579 return err;
3582 static void
3583 bnad_pci_uninit(struct pci_dev *pdev)
3585 pci_release_regions(pdev);
3586 pci_disable_device(pdev);
3589 static int
3590 bnad_pci_probe(struct pci_dev *pdev,
3591 const struct pci_device_id *pcidev_id)
3593 bool using_dac;
3594 int err;
3595 struct bnad *bnad;
3596 struct bna *bna;
3597 struct net_device *netdev;
3598 struct bfa_pcidev pcidev_info;
3599 unsigned long flags;
3601 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3602 pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3604 mutex_lock(&bnad_fwimg_mutex);
3605 if (!cna_get_firmware_buf(pdev)) {
3606 mutex_unlock(&bnad_fwimg_mutex);
3607 pr_warn("Failed to load Firmware Image!\n");
3608 return -ENODEV;
3610 mutex_unlock(&bnad_fwimg_mutex);
3613 * Allocates sizeof(struct net_device + struct bnad)
3614 * bnad = netdev->priv
3616 netdev = alloc_etherdev(sizeof(struct bnad));
3617 if (!netdev) {
3618 err = -ENOMEM;
3619 return err;
3621 bnad = netdev_priv(netdev);
3622 bnad_lock_init(bnad);
3623 bnad_add_to_list(bnad);
3625 mutex_lock(&bnad->conf_mutex);
3627 * PCI initialization
3628 * Output : using_dac = 1 for 64 bit DMA
3629 * = 0 for 32 bit DMA
3631 using_dac = false;
3632 err = bnad_pci_init(bnad, pdev, &using_dac);
3633 if (err)
3634 goto unlock_mutex;
3637 * Initialize bnad structure
3638 * Setup relation between pci_dev & netdev
3640 err = bnad_init(bnad, pdev, netdev);
3641 if (err)
3642 goto pci_uninit;
3644 /* Initialize netdev structure, set up ethtool ops */
3645 bnad_netdev_init(bnad, using_dac);
3647 /* Set link to down state */
3648 netif_carrier_off(netdev);
3650 /* Setup the debugfs node for this bfad */
3651 if (bna_debugfs_enable)
3652 bnad_debugfs_init(bnad);
3654 /* Get resource requirement form bna */
3655 spin_lock_irqsave(&bnad->bna_lock, flags);
3656 bna_res_req(&bnad->res_info[0]);
3657 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3659 /* Allocate resources from bna */
3660 err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3661 if (err)
3662 goto drv_uninit;
3664 bna = &bnad->bna;
3666 /* Setup pcidev_info for bna_init() */
3667 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3668 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3669 pcidev_info.device_id = bnad->pcidev->device;
3670 pcidev_info.pci_bar_kva = bnad->bar0;
3672 spin_lock_irqsave(&bnad->bna_lock, flags);
3673 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3674 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3676 bnad->stats.bna_stats = &bna->stats;
3678 bnad_enable_msix(bnad);
3679 err = bnad_mbox_irq_alloc(bnad);
3680 if (err)
3681 goto res_free;
3683 /* Set up timers */
3684 setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
3685 ((unsigned long)bnad));
3686 setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
3687 ((unsigned long)bnad));
3688 setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
3689 ((unsigned long)bnad));
3690 setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3691 ((unsigned long)bnad));
3693 /* Now start the timer before calling IOC */
3694 mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
3695 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3698 * Start the chip
3699 * If the call back comes with error, we bail out.
3700 * This is a catastrophic error.
3702 err = bnad_ioceth_enable(bnad);
3703 if (err) {
3704 pr_err("BNA: Initialization failed err=%d\n",
3705 err);
3706 goto probe_success;
3709 spin_lock_irqsave(&bnad->bna_lock, flags);
3710 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3711 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3712 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3713 bna_attr(bna)->num_rxp - 1);
3714 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3715 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3716 err = -EIO;
3718 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3719 if (err)
3720 goto disable_ioceth;
3722 spin_lock_irqsave(&bnad->bna_lock, flags);
3723 bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3724 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3726 err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3727 if (err) {
3728 err = -EIO;
3729 goto disable_ioceth;
3732 spin_lock_irqsave(&bnad->bna_lock, flags);
3733 bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3734 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3736 /* Get the burnt-in mac */
3737 spin_lock_irqsave(&bnad->bna_lock, flags);
3738 bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
3739 bnad_set_netdev_perm_addr(bnad);
3740 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3742 mutex_unlock(&bnad->conf_mutex);
3744 /* Finally, reguister with net_device layer */
3745 err = register_netdev(netdev);
3746 if (err) {
3747 pr_err("BNA : Registering with netdev failed\n");
3748 goto probe_uninit;
3750 set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3752 return 0;
3754 probe_success:
3755 mutex_unlock(&bnad->conf_mutex);
3756 return 0;
3758 probe_uninit:
3759 mutex_lock(&bnad->conf_mutex);
3760 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3761 disable_ioceth:
3762 bnad_ioceth_disable(bnad);
3763 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3764 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3765 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3766 spin_lock_irqsave(&bnad->bna_lock, flags);
3767 bna_uninit(bna);
3768 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3769 bnad_mbox_irq_free(bnad);
3770 bnad_disable_msix(bnad);
3771 res_free:
3772 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3773 drv_uninit:
3774 /* Remove the debugfs node for this bnad */
3775 kfree(bnad->regdata);
3776 bnad_debugfs_uninit(bnad);
3777 bnad_uninit(bnad);
3778 pci_uninit:
3779 bnad_pci_uninit(pdev);
3780 unlock_mutex:
3781 mutex_unlock(&bnad->conf_mutex);
3782 bnad_remove_from_list(bnad);
3783 bnad_lock_uninit(bnad);
3784 free_netdev(netdev);
3785 return err;
3788 static void
3789 bnad_pci_remove(struct pci_dev *pdev)
3791 struct net_device *netdev = pci_get_drvdata(pdev);
3792 struct bnad *bnad;
3793 struct bna *bna;
3794 unsigned long flags;
3796 if (!netdev)
3797 return;
3799 pr_info("%s bnad_pci_remove\n", netdev->name);
3800 bnad = netdev_priv(netdev);
3801 bna = &bnad->bna;
3803 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3804 unregister_netdev(netdev);
3806 mutex_lock(&bnad->conf_mutex);
3807 bnad_ioceth_disable(bnad);
3808 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3809 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3810 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3811 spin_lock_irqsave(&bnad->bna_lock, flags);
3812 bna_uninit(bna);
3813 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3815 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3816 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3817 bnad_mbox_irq_free(bnad);
3818 bnad_disable_msix(bnad);
3819 bnad_pci_uninit(pdev);
3820 mutex_unlock(&bnad->conf_mutex);
3821 bnad_remove_from_list(bnad);
3822 bnad_lock_uninit(bnad);
3823 /* Remove the debugfs node for this bnad */
3824 kfree(bnad->regdata);
3825 bnad_debugfs_uninit(bnad);
3826 bnad_uninit(bnad);
3827 free_netdev(netdev);
3830 static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
3832 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3833 PCI_DEVICE_ID_BROCADE_CT),
3834 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3835 .class_mask = 0xffff00
3838 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3839 BFA_PCI_DEVICE_ID_CT2),
3840 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3841 .class_mask = 0xffff00
3843 {0, },
3846 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3848 static struct pci_driver bnad_pci_driver = {
3849 .name = BNAD_NAME,
3850 .id_table = bnad_pci_id_table,
3851 .probe = bnad_pci_probe,
3852 .remove = bnad_pci_remove,
3855 static int __init
3856 bnad_module_init(void)
3858 int err;
3860 pr_info("Brocade 10G Ethernet driver - version: %s\n",
3861 BNAD_VERSION);
3863 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3865 err = pci_register_driver(&bnad_pci_driver);
3866 if (err < 0) {
3867 pr_err("bna : PCI registration failed in module init "
3868 "(%d)\n", err);
3869 return err;
3872 return 0;
3875 static void __exit
3876 bnad_module_exit(void)
3878 pci_unregister_driver(&bnad_pci_driver);
3879 release_firmware(bfi_fw);
3882 module_init(bnad_module_init);
3883 module_exit(bnad_module_exit);
3885 MODULE_AUTHOR("Brocade");
3886 MODULE_LICENSE("GPL");
3887 MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3888 MODULE_VERSION(BNAD_VERSION);
3889 MODULE_FIRMWARE(CNA_FW_FILE_CT);
3890 MODULE_FIRMWARE(CNA_FW_FILE_CT2);