WIP FPC-III support
[linux/fpc-iii.git] / drivers / net / ethernet / brocade / bna / bnad.c
blob7e4e831d720f83c1719acc495899c25b6a06fb49
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Linux network driver for QLogic BR-series Converged Network Adapter.
4 */
5 /*
6 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
7 * Copyright (c) 2014-2015 QLogic Corporation
8 * All rights reserved
9 * www.qlogic.com
11 #include <linux/bitops.h>
12 #include <linux/netdevice.h>
13 #include <linux/skbuff.h>
14 #include <linux/etherdevice.h>
15 #include <linux/in.h>
16 #include <linux/ethtool.h>
17 #include <linux/if_vlan.h>
18 #include <linux/if_ether.h>
19 #include <linux/ip.h>
20 #include <linux/prefetch.h>
21 #include <linux/module.h>
23 #include "bnad.h"
24 #include "bna.h"
25 #include "cna.h"
27 static DEFINE_MUTEX(bnad_fwimg_mutex);
30 * Module params
32 static uint bnad_msix_disable;
33 module_param(bnad_msix_disable, uint, 0444);
34 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
36 static uint bnad_ioc_auto_recover = 1;
37 module_param(bnad_ioc_auto_recover, uint, 0444);
38 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
40 static uint bna_debugfs_enable = 1;
41 module_param(bna_debugfs_enable, uint, 0644);
42 MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
43 " Range[false:0|true:1]");
46 * Global variables
48 static u32 bnad_rxqs_per_cq = 2;
49 static atomic_t bna_id;
50 static const u8 bnad_bcast_addr[] __aligned(2) =
51 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
54 * Local MACROS
56 #define BNAD_GET_MBOX_IRQ(_bnad) \
57 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
58 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
59 ((_bnad)->pcidev->irq))
61 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size) \
62 do { \
63 (_res_info)->res_type = BNA_RES_T_MEM; \
64 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
65 (_res_info)->res_u.mem_info.num = (_num); \
66 (_res_info)->res_u.mem_info.len = (_size); \
67 } while (0)
70 * Reinitialize completions in CQ, once Rx is taken down
72 static void
73 bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
75 struct bna_cq_entry *cmpl;
76 int i;
78 for (i = 0; i < ccb->q_depth; i++) {
79 cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
80 cmpl->valid = 0;
84 /* Tx Datapath functions */
87 /* Caller should ensure that the entry at unmap_q[index] is valid */
88 static u32
89 bnad_tx_buff_unmap(struct bnad *bnad,
90 struct bnad_tx_unmap *unmap_q,
91 u32 q_depth, u32 index)
93 struct bnad_tx_unmap *unmap;
94 struct sk_buff *skb;
95 int vector, nvecs;
97 unmap = &unmap_q[index];
98 nvecs = unmap->nvecs;
100 skb = unmap->skb;
101 unmap->skb = NULL;
102 unmap->nvecs = 0;
103 dma_unmap_single(&bnad->pcidev->dev,
104 dma_unmap_addr(&unmap->vectors[0], dma_addr),
105 skb_headlen(skb), DMA_TO_DEVICE);
106 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
107 nvecs--;
109 vector = 0;
110 while (nvecs) {
111 vector++;
112 if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
113 vector = 0;
114 BNA_QE_INDX_INC(index, q_depth);
115 unmap = &unmap_q[index];
118 dma_unmap_page(&bnad->pcidev->dev,
119 dma_unmap_addr(&unmap->vectors[vector], dma_addr),
120 dma_unmap_len(&unmap->vectors[vector], dma_len),
121 DMA_TO_DEVICE);
122 dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
123 nvecs--;
126 BNA_QE_INDX_INC(index, q_depth);
128 return index;
132 * Frees all pending Tx Bufs
133 * At this point no activity is expected on the Q,
134 * so DMA unmap & freeing is fine.
136 static void
137 bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
139 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
140 struct sk_buff *skb;
141 int i;
143 for (i = 0; i < tcb->q_depth; i++) {
144 skb = unmap_q[i].skb;
145 if (!skb)
146 continue;
147 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
149 dev_kfree_skb_any(skb);
154 * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
155 * Can be called in a) Interrupt context
156 * b) Sending context
158 static u32
159 bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
161 u32 sent_packets = 0, sent_bytes = 0;
162 u32 wis, unmap_wis, hw_cons, cons, q_depth;
163 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
164 struct bnad_tx_unmap *unmap;
165 struct sk_buff *skb;
167 /* Just return if TX is stopped */
168 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
169 return 0;
171 hw_cons = *(tcb->hw_consumer_index);
172 rmb();
173 cons = tcb->consumer_index;
174 q_depth = tcb->q_depth;
176 wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
177 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
179 while (wis) {
180 unmap = &unmap_q[cons];
182 skb = unmap->skb;
184 sent_packets++;
185 sent_bytes += skb->len;
187 unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
188 wis -= unmap_wis;
190 cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
191 dev_kfree_skb_any(skb);
194 /* Update consumer pointers. */
195 tcb->consumer_index = hw_cons;
197 tcb->txq->tx_packets += sent_packets;
198 tcb->txq->tx_bytes += sent_bytes;
200 return sent_packets;
203 static u32
204 bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
206 struct net_device *netdev = bnad->netdev;
207 u32 sent = 0;
209 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
210 return 0;
212 sent = bnad_txcmpl_process(bnad, tcb);
213 if (sent) {
214 if (netif_queue_stopped(netdev) &&
215 netif_carrier_ok(netdev) &&
216 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
217 BNAD_NETIF_WAKE_THRESHOLD) {
218 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
219 netif_wake_queue(netdev);
220 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
225 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
226 bna_ib_ack(tcb->i_dbell, sent);
228 smp_mb__before_atomic();
229 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
231 return sent;
234 /* MSIX Tx Completion Handler */
235 static irqreturn_t
236 bnad_msix_tx(int irq, void *data)
238 struct bna_tcb *tcb = (struct bna_tcb *)data;
239 struct bnad *bnad = tcb->bnad;
241 bnad_tx_complete(bnad, tcb);
243 return IRQ_HANDLED;
246 static inline void
247 bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
249 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
251 unmap_q->reuse_pi = -1;
252 unmap_q->alloc_order = -1;
253 unmap_q->map_size = 0;
254 unmap_q->type = BNAD_RXBUF_NONE;
257 /* Default is page-based allocation. Multi-buffer support - TBD */
258 static int
259 bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
261 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
262 int order;
264 bnad_rxq_alloc_uninit(bnad, rcb);
266 order = get_order(rcb->rxq->buffer_size);
268 unmap_q->type = BNAD_RXBUF_PAGE;
270 if (bna_is_small_rxq(rcb->id)) {
271 unmap_q->alloc_order = 0;
272 unmap_q->map_size = rcb->rxq->buffer_size;
273 } else {
274 if (rcb->rxq->multi_buffer) {
275 unmap_q->alloc_order = 0;
276 unmap_q->map_size = rcb->rxq->buffer_size;
277 unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
278 } else {
279 unmap_q->alloc_order = order;
280 unmap_q->map_size =
281 (rcb->rxq->buffer_size > 2048) ?
282 PAGE_SIZE << order : 2048;
286 BUG_ON((PAGE_SIZE << order) % unmap_q->map_size);
288 return 0;
291 static inline void
292 bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
294 if (!unmap->page)
295 return;
297 dma_unmap_page(&bnad->pcidev->dev,
298 dma_unmap_addr(&unmap->vector, dma_addr),
299 unmap->vector.len, DMA_FROM_DEVICE);
300 put_page(unmap->page);
301 unmap->page = NULL;
302 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
303 unmap->vector.len = 0;
306 static inline void
307 bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
309 if (!unmap->skb)
310 return;
312 dma_unmap_single(&bnad->pcidev->dev,
313 dma_unmap_addr(&unmap->vector, dma_addr),
314 unmap->vector.len, DMA_FROM_DEVICE);
315 dev_kfree_skb_any(unmap->skb);
316 unmap->skb = NULL;
317 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
318 unmap->vector.len = 0;
321 static void
322 bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
324 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
325 int i;
327 for (i = 0; i < rcb->q_depth; i++) {
328 struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
330 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
331 bnad_rxq_cleanup_skb(bnad, unmap);
332 else
333 bnad_rxq_cleanup_page(bnad, unmap);
335 bnad_rxq_alloc_uninit(bnad, rcb);
338 static u32
339 bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
341 u32 alloced, prod, q_depth;
342 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
343 struct bnad_rx_unmap *unmap, *prev;
344 struct bna_rxq_entry *rxent;
345 struct page *page;
346 u32 page_offset, alloc_size;
347 dma_addr_t dma_addr;
349 prod = rcb->producer_index;
350 q_depth = rcb->q_depth;
352 alloc_size = PAGE_SIZE << unmap_q->alloc_order;
353 alloced = 0;
355 while (nalloc--) {
356 unmap = &unmap_q->unmap[prod];
358 if (unmap_q->reuse_pi < 0) {
359 page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
360 unmap_q->alloc_order);
361 page_offset = 0;
362 } else {
363 prev = &unmap_q->unmap[unmap_q->reuse_pi];
364 page = prev->page;
365 page_offset = prev->page_offset + unmap_q->map_size;
366 get_page(page);
369 if (unlikely(!page)) {
370 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
371 rcb->rxq->rxbuf_alloc_failed++;
372 goto finishing;
375 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
376 unmap_q->map_size, DMA_FROM_DEVICE);
377 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
378 put_page(page);
379 BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
380 rcb->rxq->rxbuf_map_failed++;
381 goto finishing;
384 unmap->page = page;
385 unmap->page_offset = page_offset;
386 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
387 unmap->vector.len = unmap_q->map_size;
388 page_offset += unmap_q->map_size;
390 if (page_offset < alloc_size)
391 unmap_q->reuse_pi = prod;
392 else
393 unmap_q->reuse_pi = -1;
395 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
396 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
397 BNA_QE_INDX_INC(prod, q_depth);
398 alloced++;
401 finishing:
402 if (likely(alloced)) {
403 rcb->producer_index = prod;
404 smp_mb();
405 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
406 bna_rxq_prod_indx_doorbell(rcb);
409 return alloced;
412 static u32
413 bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
415 u32 alloced, prod, q_depth, buff_sz;
416 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
417 struct bnad_rx_unmap *unmap;
418 struct bna_rxq_entry *rxent;
419 struct sk_buff *skb;
420 dma_addr_t dma_addr;
422 buff_sz = rcb->rxq->buffer_size;
423 prod = rcb->producer_index;
424 q_depth = rcb->q_depth;
426 alloced = 0;
427 while (nalloc--) {
428 unmap = &unmap_q->unmap[prod];
430 skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
432 if (unlikely(!skb)) {
433 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
434 rcb->rxq->rxbuf_alloc_failed++;
435 goto finishing;
438 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
439 buff_sz, DMA_FROM_DEVICE);
440 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
441 dev_kfree_skb_any(skb);
442 BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
443 rcb->rxq->rxbuf_map_failed++;
444 goto finishing;
447 unmap->skb = skb;
448 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
449 unmap->vector.len = buff_sz;
451 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
452 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
453 BNA_QE_INDX_INC(prod, q_depth);
454 alloced++;
457 finishing:
458 if (likely(alloced)) {
459 rcb->producer_index = prod;
460 smp_mb();
461 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
462 bna_rxq_prod_indx_doorbell(rcb);
465 return alloced;
468 static inline void
469 bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
471 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
472 u32 to_alloc;
474 to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
475 if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
476 return;
478 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
479 bnad_rxq_refill_skb(bnad, rcb, to_alloc);
480 else
481 bnad_rxq_refill_page(bnad, rcb, to_alloc);
484 #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
485 BNA_CQ_EF_IPV6 | \
486 BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
487 BNA_CQ_EF_L4_CKSUM_OK)
489 #define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
490 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
491 #define flags_tcp6 (BNA_CQ_EF_IPV6 | \
492 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
493 #define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
494 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
495 #define flags_udp6 (BNA_CQ_EF_IPV6 | \
496 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
498 static void
499 bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
500 u32 sop_ci, u32 nvecs)
502 struct bnad_rx_unmap_q *unmap_q;
503 struct bnad_rx_unmap *unmap;
504 u32 ci, vec;
506 unmap_q = rcb->unmap_q;
507 for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
508 unmap = &unmap_q->unmap[ci];
509 BNA_QE_INDX_INC(ci, rcb->q_depth);
511 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
512 bnad_rxq_cleanup_skb(bnad, unmap);
513 else
514 bnad_rxq_cleanup_page(bnad, unmap);
518 static void
519 bnad_cq_setup_skb_frags(struct bna_ccb *ccb, struct sk_buff *skb, u32 nvecs)
521 struct bna_rcb *rcb;
522 struct bnad *bnad;
523 struct bnad_rx_unmap_q *unmap_q;
524 struct bna_cq_entry *cq, *cmpl;
525 u32 ci, pi, totlen = 0;
527 cq = ccb->sw_q;
528 pi = ccb->producer_index;
529 cmpl = &cq[pi];
531 rcb = bna_is_small_rxq(cmpl->rxq_id) ? ccb->rcb[1] : ccb->rcb[0];
532 unmap_q = rcb->unmap_q;
533 bnad = rcb->bnad;
534 ci = rcb->consumer_index;
536 /* prefetch header */
537 prefetch(page_address(unmap_q->unmap[ci].page) +
538 unmap_q->unmap[ci].page_offset);
540 while (nvecs--) {
541 struct bnad_rx_unmap *unmap;
542 u32 len;
544 unmap = &unmap_q->unmap[ci];
545 BNA_QE_INDX_INC(ci, rcb->q_depth);
547 dma_unmap_page(&bnad->pcidev->dev,
548 dma_unmap_addr(&unmap->vector, dma_addr),
549 unmap->vector.len, DMA_FROM_DEVICE);
551 len = ntohs(cmpl->length);
552 skb->truesize += unmap->vector.len;
553 totlen += len;
555 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
556 unmap->page, unmap->page_offset, len);
558 unmap->page = NULL;
559 unmap->vector.len = 0;
561 BNA_QE_INDX_INC(pi, ccb->q_depth);
562 cmpl = &cq[pi];
565 skb->len += totlen;
566 skb->data_len += totlen;
569 static inline void
570 bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
571 struct bnad_rx_unmap *unmap, u32 len)
573 prefetch(skb->data);
575 dma_unmap_single(&bnad->pcidev->dev,
576 dma_unmap_addr(&unmap->vector, dma_addr),
577 unmap->vector.len, DMA_FROM_DEVICE);
579 skb_put(skb, len);
580 skb->protocol = eth_type_trans(skb, bnad->netdev);
582 unmap->skb = NULL;
583 unmap->vector.len = 0;
586 static u32
587 bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
589 struct bna_cq_entry *cq, *cmpl, *next_cmpl;
590 struct bna_rcb *rcb = NULL;
591 struct bnad_rx_unmap_q *unmap_q;
592 struct bnad_rx_unmap *unmap = NULL;
593 struct sk_buff *skb = NULL;
594 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
595 struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
596 u32 packets = 0, len = 0, totlen = 0;
597 u32 pi, vec, sop_ci = 0, nvecs = 0;
598 u32 flags, masked_flags;
600 prefetch(bnad->netdev);
602 cq = ccb->sw_q;
604 while (packets < budget) {
605 cmpl = &cq[ccb->producer_index];
606 if (!cmpl->valid)
607 break;
608 /* The 'valid' field is set by the adapter, only after writing
609 * the other fields of completion entry. Hence, do not load
610 * other fields of completion entry *before* the 'valid' is
611 * loaded. Adding the rmb() here prevents the compiler and/or
612 * CPU from reordering the reads which would potentially result
613 * in reading stale values in completion entry.
615 rmb();
617 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
619 if (bna_is_small_rxq(cmpl->rxq_id))
620 rcb = ccb->rcb[1];
621 else
622 rcb = ccb->rcb[0];
624 unmap_q = rcb->unmap_q;
626 /* start of packet ci */
627 sop_ci = rcb->consumer_index;
629 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
630 unmap = &unmap_q->unmap[sop_ci];
631 skb = unmap->skb;
632 } else {
633 skb = napi_get_frags(&rx_ctrl->napi);
634 if (unlikely(!skb))
635 break;
637 prefetch(skb);
639 flags = ntohl(cmpl->flags);
640 len = ntohs(cmpl->length);
641 totlen = len;
642 nvecs = 1;
644 /* Check all the completions for this frame.
645 * busy-wait doesn't help much, break here.
647 if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
648 (flags & BNA_CQ_EF_EOP) == 0) {
649 pi = ccb->producer_index;
650 do {
651 BNA_QE_INDX_INC(pi, ccb->q_depth);
652 next_cmpl = &cq[pi];
654 if (!next_cmpl->valid)
655 break;
656 /* The 'valid' field is set by the adapter, only
657 * after writing the other fields of completion
658 * entry. Hence, do not load other fields of
659 * completion entry *before* the 'valid' is
660 * loaded. Adding the rmb() here prevents the
661 * compiler and/or CPU from reordering the reads
662 * which would potentially result in reading
663 * stale values in completion entry.
665 rmb();
667 len = ntohs(next_cmpl->length);
668 flags = ntohl(next_cmpl->flags);
670 nvecs++;
671 totlen += len;
672 } while ((flags & BNA_CQ_EF_EOP) == 0);
674 if (!next_cmpl->valid)
675 break;
677 packets++;
679 /* TODO: BNA_CQ_EF_LOCAL ? */
680 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
681 BNA_CQ_EF_FCS_ERROR |
682 BNA_CQ_EF_TOO_LONG))) {
683 bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
684 rcb->rxq->rx_packets_with_error++;
686 goto next;
689 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
690 bnad_cq_setup_skb(bnad, skb, unmap, len);
691 else
692 bnad_cq_setup_skb_frags(ccb, skb, nvecs);
694 rcb->rxq->rx_packets++;
695 rcb->rxq->rx_bytes += totlen;
696 ccb->bytes_per_intr += totlen;
698 masked_flags = flags & flags_cksum_prot_mask;
700 if (likely
701 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
702 ((masked_flags == flags_tcp4) ||
703 (masked_flags == flags_udp4) ||
704 (masked_flags == flags_tcp6) ||
705 (masked_flags == flags_udp6))))
706 skb->ip_summed = CHECKSUM_UNNECESSARY;
707 else
708 skb_checksum_none_assert(skb);
710 if ((flags & BNA_CQ_EF_VLAN) &&
711 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
712 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
714 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
715 netif_receive_skb(skb);
716 else
717 napi_gro_frags(&rx_ctrl->napi);
719 next:
720 BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
721 for (vec = 0; vec < nvecs; vec++) {
722 cmpl = &cq[ccb->producer_index];
723 cmpl->valid = 0;
724 BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
728 napi_gro_flush(&rx_ctrl->napi, false);
729 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
730 bna_ib_ack_disable_irq(ccb->i_dbell, packets);
732 bnad_rxq_post(bnad, ccb->rcb[0]);
733 if (ccb->rcb[1])
734 bnad_rxq_post(bnad, ccb->rcb[1]);
736 return packets;
739 static void
740 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
742 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
743 struct napi_struct *napi = &rx_ctrl->napi;
745 if (likely(napi_schedule_prep(napi))) {
746 __napi_schedule(napi);
747 rx_ctrl->rx_schedule++;
751 /* MSIX Rx Path Handler */
752 static irqreturn_t
753 bnad_msix_rx(int irq, void *data)
755 struct bna_ccb *ccb = (struct bna_ccb *)data;
757 if (ccb) {
758 ((struct bnad_rx_ctrl *)ccb->ctrl)->rx_intr_ctr++;
759 bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
762 return IRQ_HANDLED;
765 /* Interrupt handlers */
767 /* Mbox Interrupt Handlers */
768 static irqreturn_t
769 bnad_msix_mbox_handler(int irq, void *data)
771 u32 intr_status;
772 unsigned long flags;
773 struct bnad *bnad = (struct bnad *)data;
775 spin_lock_irqsave(&bnad->bna_lock, flags);
776 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
777 spin_unlock_irqrestore(&bnad->bna_lock, flags);
778 return IRQ_HANDLED;
781 bna_intr_status_get(&bnad->bna, intr_status);
783 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
784 bna_mbox_handler(&bnad->bna, intr_status);
786 spin_unlock_irqrestore(&bnad->bna_lock, flags);
788 return IRQ_HANDLED;
791 static irqreturn_t
792 bnad_isr(int irq, void *data)
794 int i, j;
795 u32 intr_status;
796 unsigned long flags;
797 struct bnad *bnad = (struct bnad *)data;
798 struct bnad_rx_info *rx_info;
799 struct bnad_rx_ctrl *rx_ctrl;
800 struct bna_tcb *tcb = NULL;
802 spin_lock_irqsave(&bnad->bna_lock, flags);
803 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
804 spin_unlock_irqrestore(&bnad->bna_lock, flags);
805 return IRQ_NONE;
808 bna_intr_status_get(&bnad->bna, intr_status);
810 if (unlikely(!intr_status)) {
811 spin_unlock_irqrestore(&bnad->bna_lock, flags);
812 return IRQ_NONE;
815 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
816 bna_mbox_handler(&bnad->bna, intr_status);
818 spin_unlock_irqrestore(&bnad->bna_lock, flags);
820 if (!BNA_IS_INTX_DATA_INTR(intr_status))
821 return IRQ_HANDLED;
823 /* Process data interrupts */
824 /* Tx processing */
825 for (i = 0; i < bnad->num_tx; i++) {
826 for (j = 0; j < bnad->num_txq_per_tx; j++) {
827 tcb = bnad->tx_info[i].tcb[j];
828 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
829 bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
832 /* Rx processing */
833 for (i = 0; i < bnad->num_rx; i++) {
834 rx_info = &bnad->rx_info[i];
835 if (!rx_info->rx)
836 continue;
837 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
838 rx_ctrl = &rx_info->rx_ctrl[j];
839 if (rx_ctrl->ccb)
840 bnad_netif_rx_schedule_poll(bnad,
841 rx_ctrl->ccb);
844 return IRQ_HANDLED;
848 * Called in interrupt / callback context
849 * with bna_lock held, so cfg_flags access is OK
851 static void
852 bnad_enable_mbox_irq(struct bnad *bnad)
854 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
856 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
860 * Called with bnad->bna_lock held b'cos of
861 * bnad->cfg_flags access.
863 static void
864 bnad_disable_mbox_irq(struct bnad *bnad)
866 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
868 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
871 static void
872 bnad_set_netdev_perm_addr(struct bnad *bnad)
874 struct net_device *netdev = bnad->netdev;
876 ether_addr_copy(netdev->perm_addr, bnad->perm_addr);
877 if (is_zero_ether_addr(netdev->dev_addr))
878 ether_addr_copy(netdev->dev_addr, bnad->perm_addr);
881 /* Control Path Handlers */
883 /* Callbacks */
884 void
885 bnad_cb_mbox_intr_enable(struct bnad *bnad)
887 bnad_enable_mbox_irq(bnad);
890 void
891 bnad_cb_mbox_intr_disable(struct bnad *bnad)
893 bnad_disable_mbox_irq(bnad);
896 void
897 bnad_cb_ioceth_ready(struct bnad *bnad)
899 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
900 complete(&bnad->bnad_completions.ioc_comp);
903 void
904 bnad_cb_ioceth_failed(struct bnad *bnad)
906 bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
907 complete(&bnad->bnad_completions.ioc_comp);
910 void
911 bnad_cb_ioceth_disabled(struct bnad *bnad)
913 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
914 complete(&bnad->bnad_completions.ioc_comp);
917 static void
918 bnad_cb_enet_disabled(void *arg)
920 struct bnad *bnad = (struct bnad *)arg;
922 netif_carrier_off(bnad->netdev);
923 complete(&bnad->bnad_completions.enet_comp);
926 void
927 bnad_cb_ethport_link_status(struct bnad *bnad,
928 enum bna_link_status link_status)
930 bool link_up = false;
932 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
934 if (link_status == BNA_CEE_UP) {
935 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
936 BNAD_UPDATE_CTR(bnad, cee_toggle);
937 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
938 } else {
939 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
940 BNAD_UPDATE_CTR(bnad, cee_toggle);
941 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
944 if (link_up) {
945 if (!netif_carrier_ok(bnad->netdev)) {
946 uint tx_id, tcb_id;
947 netdev_info(bnad->netdev, "link up\n");
948 netif_carrier_on(bnad->netdev);
949 BNAD_UPDATE_CTR(bnad, link_toggle);
950 for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
951 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
952 tcb_id++) {
953 struct bna_tcb *tcb =
954 bnad->tx_info[tx_id].tcb[tcb_id];
955 u32 txq_id;
956 if (!tcb)
957 continue;
959 txq_id = tcb->id;
961 if (test_bit(BNAD_TXQ_TX_STARTED,
962 &tcb->flags)) {
964 * Force an immediate
965 * Transmit Schedule */
966 netif_wake_subqueue(
967 bnad->netdev,
968 txq_id);
969 BNAD_UPDATE_CTR(bnad,
970 netif_queue_wakeup);
971 } else {
972 netif_stop_subqueue(
973 bnad->netdev,
974 txq_id);
975 BNAD_UPDATE_CTR(bnad,
976 netif_queue_stop);
981 } else {
982 if (netif_carrier_ok(bnad->netdev)) {
983 netdev_info(bnad->netdev, "link down\n");
984 netif_carrier_off(bnad->netdev);
985 BNAD_UPDATE_CTR(bnad, link_toggle);
990 static void
991 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
993 struct bnad *bnad = (struct bnad *)arg;
995 complete(&bnad->bnad_completions.tx_comp);
998 static void
999 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
1001 struct bnad_tx_info *tx_info =
1002 (struct bnad_tx_info *)tcb->txq->tx->priv;
1004 tcb->priv = tcb;
1005 tx_info->tcb[tcb->id] = tcb;
1008 static void
1009 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
1011 struct bnad_tx_info *tx_info =
1012 (struct bnad_tx_info *)tcb->txq->tx->priv;
1014 tx_info->tcb[tcb->id] = NULL;
1015 tcb->priv = NULL;
1018 static void
1019 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
1021 struct bnad_rx_info *rx_info =
1022 (struct bnad_rx_info *)ccb->cq->rx->priv;
1024 rx_info->rx_ctrl[ccb->id].ccb = ccb;
1025 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
1028 static void
1029 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
1031 struct bnad_rx_info *rx_info =
1032 (struct bnad_rx_info *)ccb->cq->rx->priv;
1034 rx_info->rx_ctrl[ccb->id].ccb = NULL;
1037 static void
1038 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
1040 struct bnad_tx_info *tx_info =
1041 (struct bnad_tx_info *)tx->priv;
1042 struct bna_tcb *tcb;
1043 u32 txq_id;
1044 int i;
1046 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1047 tcb = tx_info->tcb[i];
1048 if (!tcb)
1049 continue;
1050 txq_id = tcb->id;
1051 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1052 netif_stop_subqueue(bnad->netdev, txq_id);
1056 static void
1057 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
1059 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1060 struct bna_tcb *tcb;
1061 u32 txq_id;
1062 int i;
1064 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1065 tcb = tx_info->tcb[i];
1066 if (!tcb)
1067 continue;
1068 txq_id = tcb->id;
1070 BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
1071 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1072 BUG_ON(*(tcb->hw_consumer_index) != 0);
1074 if (netif_carrier_ok(bnad->netdev)) {
1075 netif_wake_subqueue(bnad->netdev, txq_id);
1076 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
1081 * Workaround for first ioceth enable failure & we
1082 * get a 0 MAC address. We try to get the MAC address
1083 * again here.
1085 if (is_zero_ether_addr(bnad->perm_addr)) {
1086 bna_enet_perm_mac_get(&bnad->bna.enet, bnad->perm_addr);
1087 bnad_set_netdev_perm_addr(bnad);
1092 * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
1094 static void
1095 bnad_tx_cleanup(struct delayed_work *work)
1097 struct bnad_tx_info *tx_info =
1098 container_of(work, struct bnad_tx_info, tx_cleanup_work);
1099 struct bnad *bnad = NULL;
1100 struct bna_tcb *tcb;
1101 unsigned long flags;
1102 u32 i, pending = 0;
1104 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1105 tcb = tx_info->tcb[i];
1106 if (!tcb)
1107 continue;
1109 bnad = tcb->bnad;
1111 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
1112 pending++;
1113 continue;
1116 bnad_txq_cleanup(bnad, tcb);
1118 smp_mb__before_atomic();
1119 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
1122 if (pending) {
1123 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
1124 msecs_to_jiffies(1));
1125 return;
1128 spin_lock_irqsave(&bnad->bna_lock, flags);
1129 bna_tx_cleanup_complete(tx_info->tx);
1130 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1133 static void
1134 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
1136 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1137 struct bna_tcb *tcb;
1138 int i;
1140 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1141 tcb = tx_info->tcb[i];
1142 if (!tcb)
1143 continue;
1146 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
1149 static void
1150 bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1152 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1153 struct bna_ccb *ccb;
1154 struct bnad_rx_ctrl *rx_ctrl;
1155 int i;
1157 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1158 rx_ctrl = &rx_info->rx_ctrl[i];
1159 ccb = rx_ctrl->ccb;
1160 if (!ccb)
1161 continue;
1163 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
1165 if (ccb->rcb[1])
1166 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
1171 * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
1173 static void
1174 bnad_rx_cleanup(void *work)
1176 struct bnad_rx_info *rx_info =
1177 container_of(work, struct bnad_rx_info, rx_cleanup_work);
1178 struct bnad_rx_ctrl *rx_ctrl;
1179 struct bnad *bnad = NULL;
1180 unsigned long flags;
1181 u32 i;
1183 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1184 rx_ctrl = &rx_info->rx_ctrl[i];
1186 if (!rx_ctrl->ccb)
1187 continue;
1189 bnad = rx_ctrl->ccb->bnad;
1192 * Wait till the poll handler has exited
1193 * and nothing can be scheduled anymore
1195 napi_disable(&rx_ctrl->napi);
1197 bnad_cq_cleanup(bnad, rx_ctrl->ccb);
1198 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
1199 if (rx_ctrl->ccb->rcb[1])
1200 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
1203 spin_lock_irqsave(&bnad->bna_lock, flags);
1204 bna_rx_cleanup_complete(rx_info->rx);
1205 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1208 static void
1209 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1211 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1212 struct bna_ccb *ccb;
1213 struct bnad_rx_ctrl *rx_ctrl;
1214 int i;
1216 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1217 rx_ctrl = &rx_info->rx_ctrl[i];
1218 ccb = rx_ctrl->ccb;
1219 if (!ccb)
1220 continue;
1222 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1224 if (ccb->rcb[1])
1225 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1228 queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
1231 static void
1232 bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1234 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1235 struct bna_ccb *ccb;
1236 struct bna_rcb *rcb;
1237 struct bnad_rx_ctrl *rx_ctrl;
1238 int i, j;
1240 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1241 rx_ctrl = &rx_info->rx_ctrl[i];
1242 ccb = rx_ctrl->ccb;
1243 if (!ccb)
1244 continue;
1246 napi_enable(&rx_ctrl->napi);
1248 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1249 rcb = ccb->rcb[j];
1250 if (!rcb)
1251 continue;
1253 bnad_rxq_alloc_init(bnad, rcb);
1254 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1255 set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
1256 bnad_rxq_post(bnad, rcb);
1261 static void
1262 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1264 struct bnad *bnad = (struct bnad *)arg;
1266 complete(&bnad->bnad_completions.rx_comp);
1269 static void
1270 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1272 bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1273 complete(&bnad->bnad_completions.mcast_comp);
1276 void
1277 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1278 struct bna_stats *stats)
1280 if (status == BNA_CB_SUCCESS)
1281 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1283 if (!netif_running(bnad->netdev) ||
1284 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1285 return;
1287 mod_timer(&bnad->stats_timer,
1288 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1291 static void
1292 bnad_cb_enet_mtu_set(struct bnad *bnad)
1294 bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1295 complete(&bnad->bnad_completions.mtu_comp);
1298 void
1299 bnad_cb_completion(void *arg, enum bfa_status status)
1301 struct bnad_iocmd_comp *iocmd_comp =
1302 (struct bnad_iocmd_comp *)arg;
1304 iocmd_comp->comp_status = (u32) status;
1305 complete(&iocmd_comp->comp);
1308 /* Resource allocation, free functions */
1310 static void
1311 bnad_mem_free(struct bnad *bnad,
1312 struct bna_mem_info *mem_info)
1314 int i;
1315 dma_addr_t dma_pa;
1317 if (mem_info->mdl == NULL)
1318 return;
1320 for (i = 0; i < mem_info->num; i++) {
1321 if (mem_info->mdl[i].kva != NULL) {
1322 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1323 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1324 dma_pa);
1325 dma_free_coherent(&bnad->pcidev->dev,
1326 mem_info->mdl[i].len,
1327 mem_info->mdl[i].kva, dma_pa);
1328 } else
1329 kfree(mem_info->mdl[i].kva);
1332 kfree(mem_info->mdl);
1333 mem_info->mdl = NULL;
1336 static int
1337 bnad_mem_alloc(struct bnad *bnad,
1338 struct bna_mem_info *mem_info)
1340 int i;
1341 dma_addr_t dma_pa;
1343 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1344 mem_info->mdl = NULL;
1345 return 0;
1348 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1349 GFP_KERNEL);
1350 if (mem_info->mdl == NULL)
1351 return -ENOMEM;
1353 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1354 for (i = 0; i < mem_info->num; i++) {
1355 mem_info->mdl[i].len = mem_info->len;
1356 mem_info->mdl[i].kva =
1357 dma_alloc_coherent(&bnad->pcidev->dev,
1358 mem_info->len, &dma_pa,
1359 GFP_KERNEL);
1360 if (mem_info->mdl[i].kva == NULL)
1361 goto err_return;
1363 BNA_SET_DMA_ADDR(dma_pa,
1364 &(mem_info->mdl[i].dma));
1366 } else {
1367 for (i = 0; i < mem_info->num; i++) {
1368 mem_info->mdl[i].len = mem_info->len;
1369 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1370 GFP_KERNEL);
1371 if (mem_info->mdl[i].kva == NULL)
1372 goto err_return;
1376 return 0;
1378 err_return:
1379 bnad_mem_free(bnad, mem_info);
1380 return -ENOMEM;
1383 /* Free IRQ for Mailbox */
1384 static void
1385 bnad_mbox_irq_free(struct bnad *bnad)
1387 int irq;
1388 unsigned long flags;
1390 spin_lock_irqsave(&bnad->bna_lock, flags);
1391 bnad_disable_mbox_irq(bnad);
1392 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1394 irq = BNAD_GET_MBOX_IRQ(bnad);
1395 free_irq(irq, bnad);
1399 * Allocates IRQ for Mailbox, but keep it disabled
1400 * This will be enabled once we get the mbox enable callback
1401 * from bna
1403 static int
1404 bnad_mbox_irq_alloc(struct bnad *bnad)
1406 int err = 0;
1407 unsigned long irq_flags, flags;
1408 u32 irq;
1409 irq_handler_t irq_handler;
1411 spin_lock_irqsave(&bnad->bna_lock, flags);
1412 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1413 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1414 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1415 irq_flags = 0;
1416 } else {
1417 irq_handler = (irq_handler_t)bnad_isr;
1418 irq = bnad->pcidev->irq;
1419 irq_flags = IRQF_SHARED;
1422 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1423 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1426 * Set the Mbox IRQ disable flag, so that the IRQ handler
1427 * called from request_irq() for SHARED IRQs do not execute
1429 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1431 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1433 err = request_irq(irq, irq_handler, irq_flags,
1434 bnad->mbox_irq_name, bnad);
1436 return err;
1439 static void
1440 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1442 kfree(intr_info->idl);
1443 intr_info->idl = NULL;
1446 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1447 static int
1448 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1449 u32 txrx_id, struct bna_intr_info *intr_info)
1451 int i, vector_start = 0;
1452 u32 cfg_flags;
1453 unsigned long flags;
1455 spin_lock_irqsave(&bnad->bna_lock, flags);
1456 cfg_flags = bnad->cfg_flags;
1457 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1459 if (cfg_flags & BNAD_CF_MSIX) {
1460 intr_info->intr_type = BNA_INTR_T_MSIX;
1461 intr_info->idl = kcalloc(intr_info->num,
1462 sizeof(struct bna_intr_descr),
1463 GFP_KERNEL);
1464 if (!intr_info->idl)
1465 return -ENOMEM;
1467 switch (src) {
1468 case BNAD_INTR_TX:
1469 vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1470 break;
1472 case BNAD_INTR_RX:
1473 vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1474 (bnad->num_tx * bnad->num_txq_per_tx) +
1475 txrx_id;
1476 break;
1478 default:
1479 BUG();
1482 for (i = 0; i < intr_info->num; i++)
1483 intr_info->idl[i].vector = vector_start + i;
1484 } else {
1485 intr_info->intr_type = BNA_INTR_T_INTX;
1486 intr_info->num = 1;
1487 intr_info->idl = kcalloc(intr_info->num,
1488 sizeof(struct bna_intr_descr),
1489 GFP_KERNEL);
1490 if (!intr_info->idl)
1491 return -ENOMEM;
1493 switch (src) {
1494 case BNAD_INTR_TX:
1495 intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1496 break;
1498 case BNAD_INTR_RX:
1499 intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1500 break;
1503 return 0;
1506 /* NOTE: Should be called for MSIX only
1507 * Unregisters Tx MSIX vector(s) from the kernel
1509 static void
1510 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1511 int num_txqs)
1513 int i;
1514 int vector_num;
1516 for (i = 0; i < num_txqs; i++) {
1517 if (tx_info->tcb[i] == NULL)
1518 continue;
1520 vector_num = tx_info->tcb[i]->intr_vector;
1521 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1525 /* NOTE: Should be called for MSIX only
1526 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1528 static int
1529 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1530 u32 tx_id, int num_txqs)
1532 int i;
1533 int err;
1534 int vector_num;
1536 for (i = 0; i < num_txqs; i++) {
1537 vector_num = tx_info->tcb[i]->intr_vector;
1538 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1539 tx_id + tx_info->tcb[i]->id);
1540 err = request_irq(bnad->msix_table[vector_num].vector,
1541 (irq_handler_t)bnad_msix_tx, 0,
1542 tx_info->tcb[i]->name,
1543 tx_info->tcb[i]);
1544 if (err)
1545 goto err_return;
1548 return 0;
1550 err_return:
1551 if (i > 0)
1552 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1553 return -1;
1556 /* NOTE: Should be called for MSIX only
1557 * Unregisters Rx MSIX vector(s) from the kernel
1559 static void
1560 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1561 int num_rxps)
1563 int i;
1564 int vector_num;
1566 for (i = 0; i < num_rxps; i++) {
1567 if (rx_info->rx_ctrl[i].ccb == NULL)
1568 continue;
1570 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1571 free_irq(bnad->msix_table[vector_num].vector,
1572 rx_info->rx_ctrl[i].ccb);
1576 /* NOTE: Should be called for MSIX only
1577 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1579 static int
1580 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1581 u32 rx_id, int num_rxps)
1583 int i;
1584 int err;
1585 int vector_num;
1587 for (i = 0; i < num_rxps; i++) {
1588 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1589 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1590 bnad->netdev->name,
1591 rx_id + rx_info->rx_ctrl[i].ccb->id);
1592 err = request_irq(bnad->msix_table[vector_num].vector,
1593 (irq_handler_t)bnad_msix_rx, 0,
1594 rx_info->rx_ctrl[i].ccb->name,
1595 rx_info->rx_ctrl[i].ccb);
1596 if (err)
1597 goto err_return;
1600 return 0;
1602 err_return:
1603 if (i > 0)
1604 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1605 return -1;
1608 /* Free Tx object Resources */
1609 static void
1610 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1612 int i;
1614 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1615 if (res_info[i].res_type == BNA_RES_T_MEM)
1616 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1617 else if (res_info[i].res_type == BNA_RES_T_INTR)
1618 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1622 /* Allocates memory and interrupt resources for Tx object */
1623 static int
1624 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1625 u32 tx_id)
1627 int i, err = 0;
1629 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1630 if (res_info[i].res_type == BNA_RES_T_MEM)
1631 err = bnad_mem_alloc(bnad,
1632 &res_info[i].res_u.mem_info);
1633 else if (res_info[i].res_type == BNA_RES_T_INTR)
1634 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1635 &res_info[i].res_u.intr_info);
1636 if (err)
1637 goto err_return;
1639 return 0;
1641 err_return:
1642 bnad_tx_res_free(bnad, res_info);
1643 return err;
1646 /* Free Rx object Resources */
1647 static void
1648 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1650 int i;
1652 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1653 if (res_info[i].res_type == BNA_RES_T_MEM)
1654 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1655 else if (res_info[i].res_type == BNA_RES_T_INTR)
1656 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1660 /* Allocates memory and interrupt resources for Rx object */
1661 static int
1662 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1663 uint rx_id)
1665 int i, err = 0;
1667 /* All memory needs to be allocated before setup_ccbs */
1668 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1669 if (res_info[i].res_type == BNA_RES_T_MEM)
1670 err = bnad_mem_alloc(bnad,
1671 &res_info[i].res_u.mem_info);
1672 else if (res_info[i].res_type == BNA_RES_T_INTR)
1673 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1674 &res_info[i].res_u.intr_info);
1675 if (err)
1676 goto err_return;
1678 return 0;
1680 err_return:
1681 bnad_rx_res_free(bnad, res_info);
1682 return err;
1685 /* Timer callbacks */
1686 /* a) IOC timer */
1687 static void
1688 bnad_ioc_timeout(struct timer_list *t)
1690 struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.ioc_timer);
1691 unsigned long flags;
1693 spin_lock_irqsave(&bnad->bna_lock, flags);
1694 bfa_nw_ioc_timeout(&bnad->bna.ioceth.ioc);
1695 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1698 static void
1699 bnad_ioc_hb_check(struct timer_list *t)
1701 struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.hb_timer);
1702 unsigned long flags;
1704 spin_lock_irqsave(&bnad->bna_lock, flags);
1705 bfa_nw_ioc_hb_check(&bnad->bna.ioceth.ioc);
1706 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1709 static void
1710 bnad_iocpf_timeout(struct timer_list *t)
1712 struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.iocpf_timer);
1713 unsigned long flags;
1715 spin_lock_irqsave(&bnad->bna_lock, flags);
1716 bfa_nw_iocpf_timeout(&bnad->bna.ioceth.ioc);
1717 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1720 static void
1721 bnad_iocpf_sem_timeout(struct timer_list *t)
1723 struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.sem_timer);
1724 unsigned long flags;
1726 spin_lock_irqsave(&bnad->bna_lock, flags);
1727 bfa_nw_iocpf_sem_timeout(&bnad->bna.ioceth.ioc);
1728 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1732 * All timer routines use bnad->bna_lock to protect against
1733 * the following race, which may occur in case of no locking:
1734 * Time CPU m CPU n
1735 * 0 1 = test_bit
1736 * 1 clear_bit
1737 * 2 del_timer_sync
1738 * 3 mod_timer
1741 /* b) Dynamic Interrupt Moderation Timer */
1742 static void
1743 bnad_dim_timeout(struct timer_list *t)
1745 struct bnad *bnad = from_timer(bnad, t, dim_timer);
1746 struct bnad_rx_info *rx_info;
1747 struct bnad_rx_ctrl *rx_ctrl;
1748 int i, j;
1749 unsigned long flags;
1751 if (!netif_carrier_ok(bnad->netdev))
1752 return;
1754 spin_lock_irqsave(&bnad->bna_lock, flags);
1755 for (i = 0; i < bnad->num_rx; i++) {
1756 rx_info = &bnad->rx_info[i];
1757 if (!rx_info->rx)
1758 continue;
1759 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1760 rx_ctrl = &rx_info->rx_ctrl[j];
1761 if (!rx_ctrl->ccb)
1762 continue;
1763 bna_rx_dim_update(rx_ctrl->ccb);
1767 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1768 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1769 mod_timer(&bnad->dim_timer,
1770 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1771 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1774 /* c) Statistics Timer */
1775 static void
1776 bnad_stats_timeout(struct timer_list *t)
1778 struct bnad *bnad = from_timer(bnad, t, stats_timer);
1779 unsigned long flags;
1781 if (!netif_running(bnad->netdev) ||
1782 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1783 return;
1785 spin_lock_irqsave(&bnad->bna_lock, flags);
1786 bna_hw_stats_get(&bnad->bna);
1787 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1791 * Set up timer for DIM
1792 * Called with bnad->bna_lock held
1794 void
1795 bnad_dim_timer_start(struct bnad *bnad)
1797 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1798 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1799 timer_setup(&bnad->dim_timer, bnad_dim_timeout, 0);
1800 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1801 mod_timer(&bnad->dim_timer,
1802 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1807 * Set up timer for statistics
1808 * Called with mutex_lock(&bnad->conf_mutex) held
1810 static void
1811 bnad_stats_timer_start(struct bnad *bnad)
1813 unsigned long flags;
1815 spin_lock_irqsave(&bnad->bna_lock, flags);
1816 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1817 timer_setup(&bnad->stats_timer, bnad_stats_timeout, 0);
1818 mod_timer(&bnad->stats_timer,
1819 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1821 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1825 * Stops the stats timer
1826 * Called with mutex_lock(&bnad->conf_mutex) held
1828 static void
1829 bnad_stats_timer_stop(struct bnad *bnad)
1831 int to_del = 0;
1832 unsigned long flags;
1834 spin_lock_irqsave(&bnad->bna_lock, flags);
1835 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1836 to_del = 1;
1837 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1838 if (to_del)
1839 del_timer_sync(&bnad->stats_timer);
1842 /* Utilities */
1844 static void
1845 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1847 int i = 1; /* Index 0 has broadcast address */
1848 struct netdev_hw_addr *mc_addr;
1850 netdev_for_each_mc_addr(mc_addr, netdev) {
1851 ether_addr_copy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0]);
1852 i++;
1856 static int
1857 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1859 struct bnad_rx_ctrl *rx_ctrl =
1860 container_of(napi, struct bnad_rx_ctrl, napi);
1861 struct bnad *bnad = rx_ctrl->bnad;
1862 int rcvd = 0;
1864 rx_ctrl->rx_poll_ctr++;
1866 if (!netif_carrier_ok(bnad->netdev))
1867 goto poll_exit;
1869 rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
1870 if (rcvd >= budget)
1871 return rcvd;
1873 poll_exit:
1874 napi_complete_done(napi, rcvd);
1876 rx_ctrl->rx_complete++;
1878 if (rx_ctrl->ccb)
1879 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1881 return rcvd;
1884 #define BNAD_NAPI_POLL_QUOTA 64
1885 static void
1886 bnad_napi_add(struct bnad *bnad, u32 rx_id)
1888 struct bnad_rx_ctrl *rx_ctrl;
1889 int i;
1891 /* Initialize & enable NAPI */
1892 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1893 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1894 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1895 bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1899 static void
1900 bnad_napi_delete(struct bnad *bnad, u32 rx_id)
1902 int i;
1904 /* First disable and then clean up */
1905 for (i = 0; i < bnad->num_rxp_per_rx; i++)
1906 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1909 /* Should be held with conf_lock held */
1910 void
1911 bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
1913 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1914 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1915 unsigned long flags;
1917 if (!tx_info->tx)
1918 return;
1920 init_completion(&bnad->bnad_completions.tx_comp);
1921 spin_lock_irqsave(&bnad->bna_lock, flags);
1922 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1923 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1924 wait_for_completion(&bnad->bnad_completions.tx_comp);
1926 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1927 bnad_tx_msix_unregister(bnad, tx_info,
1928 bnad->num_txq_per_tx);
1930 spin_lock_irqsave(&bnad->bna_lock, flags);
1931 bna_tx_destroy(tx_info->tx);
1932 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1934 tx_info->tx = NULL;
1935 tx_info->tx_id = 0;
1937 bnad_tx_res_free(bnad, res_info);
1940 /* Should be held with conf_lock held */
1942 bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1944 int err;
1945 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1946 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1947 struct bna_intr_info *intr_info =
1948 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1949 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1950 static const struct bna_tx_event_cbfn tx_cbfn = {
1951 .tcb_setup_cbfn = bnad_cb_tcb_setup,
1952 .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1953 .tx_stall_cbfn = bnad_cb_tx_stall,
1954 .tx_resume_cbfn = bnad_cb_tx_resume,
1955 .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1958 struct bna_tx *tx;
1959 unsigned long flags;
1961 tx_info->tx_id = tx_id;
1963 /* Initialize the Tx object configuration */
1964 tx_config->num_txq = bnad->num_txq_per_tx;
1965 tx_config->txq_depth = bnad->txq_depth;
1966 tx_config->tx_type = BNA_TX_T_REGULAR;
1967 tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1969 /* Get BNA's resource requirement for one tx object */
1970 spin_lock_irqsave(&bnad->bna_lock, flags);
1971 bna_tx_res_req(bnad->num_txq_per_tx,
1972 bnad->txq_depth, res_info);
1973 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1975 /* Fill Unmap Q memory requirements */
1976 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1977 bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
1978 bnad->txq_depth));
1980 /* Allocate resources */
1981 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1982 if (err)
1983 return err;
1985 /* Ask BNA to create one Tx object, supplying required resources */
1986 spin_lock_irqsave(&bnad->bna_lock, flags);
1987 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1988 tx_info);
1989 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1990 if (!tx) {
1991 err = -ENOMEM;
1992 goto err_return;
1994 tx_info->tx = tx;
1996 INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
1997 (work_func_t)bnad_tx_cleanup);
1999 /* Register ISR for the Tx object */
2000 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2001 err = bnad_tx_msix_register(bnad, tx_info,
2002 tx_id, bnad->num_txq_per_tx);
2003 if (err)
2004 goto cleanup_tx;
2007 spin_lock_irqsave(&bnad->bna_lock, flags);
2008 bna_tx_enable(tx);
2009 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2011 return 0;
2013 cleanup_tx:
2014 spin_lock_irqsave(&bnad->bna_lock, flags);
2015 bna_tx_destroy(tx_info->tx);
2016 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2017 tx_info->tx = NULL;
2018 tx_info->tx_id = 0;
2019 err_return:
2020 bnad_tx_res_free(bnad, res_info);
2021 return err;
2024 /* Setup the rx config for bna_rx_create */
2025 /* bnad decides the configuration */
2026 static void
2027 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
2029 memset(rx_config, 0, sizeof(*rx_config));
2030 rx_config->rx_type = BNA_RX_T_REGULAR;
2031 rx_config->num_paths = bnad->num_rxp_per_rx;
2032 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
2034 if (bnad->num_rxp_per_rx > 1) {
2035 rx_config->rss_status = BNA_STATUS_T_ENABLED;
2036 rx_config->rss_config.hash_type =
2037 (BFI_ENET_RSS_IPV6 |
2038 BFI_ENET_RSS_IPV6_TCP |
2039 BFI_ENET_RSS_IPV4 |
2040 BFI_ENET_RSS_IPV4_TCP);
2041 rx_config->rss_config.hash_mask =
2042 bnad->num_rxp_per_rx - 1;
2043 netdev_rss_key_fill(rx_config->rss_config.toeplitz_hash_key,
2044 sizeof(rx_config->rss_config.toeplitz_hash_key));
2045 } else {
2046 rx_config->rss_status = BNA_STATUS_T_DISABLED;
2047 memset(&rx_config->rss_config, 0,
2048 sizeof(rx_config->rss_config));
2051 rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
2052 rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
2054 /* BNA_RXP_SINGLE - one data-buffer queue
2055 * BNA_RXP_SLR - one small-buffer and one large-buffer queues
2056 * BNA_RXP_HDS - one header-buffer and one data-buffer queues
2058 /* TODO: configurable param for queue type */
2059 rx_config->rxp_type = BNA_RXP_SLR;
2061 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
2062 rx_config->frame_size > 4096) {
2063 /* though size_routing_enable is set in SLR,
2064 * small packets may get routed to same rxq.
2065 * set buf_size to 2048 instead of PAGE_SIZE.
2067 rx_config->q0_buf_size = 2048;
2068 /* this should be in multiples of 2 */
2069 rx_config->q0_num_vecs = 4;
2070 rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
2071 rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
2072 } else {
2073 rx_config->q0_buf_size = rx_config->frame_size;
2074 rx_config->q0_num_vecs = 1;
2075 rx_config->q0_depth = bnad->rxq_depth;
2078 /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
2079 if (rx_config->rxp_type == BNA_RXP_SLR) {
2080 rx_config->q1_depth = bnad->rxq_depth;
2081 rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
2084 rx_config->vlan_strip_status =
2085 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ?
2086 BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
2089 static void
2090 bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
2092 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2093 int i;
2095 for (i = 0; i < bnad->num_rxp_per_rx; i++)
2096 rx_info->rx_ctrl[i].bnad = bnad;
2099 /* Called with mutex_lock(&bnad->conf_mutex) held */
2100 static u32
2101 bnad_reinit_rx(struct bnad *bnad)
2103 struct net_device *netdev = bnad->netdev;
2104 u32 err = 0, current_err = 0;
2105 u32 rx_id = 0, count = 0;
2106 unsigned long flags;
2108 /* destroy and create new rx objects */
2109 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2110 if (!bnad->rx_info[rx_id].rx)
2111 continue;
2112 bnad_destroy_rx(bnad, rx_id);
2115 spin_lock_irqsave(&bnad->bna_lock, flags);
2116 bna_enet_mtu_set(&bnad->bna.enet,
2117 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2118 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2120 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2121 count++;
2122 current_err = bnad_setup_rx(bnad, rx_id);
2123 if (current_err && !err) {
2124 err = current_err;
2125 netdev_err(netdev, "RXQ:%u setup failed\n", rx_id);
2129 /* restore rx configuration */
2130 if (bnad->rx_info[0].rx && !err) {
2131 bnad_restore_vlans(bnad, 0);
2132 bnad_enable_default_bcast(bnad);
2133 spin_lock_irqsave(&bnad->bna_lock, flags);
2134 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2135 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2136 bnad_set_rx_mode(netdev);
2139 return count;
2142 /* Called with bnad_conf_lock() held */
2143 void
2144 bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
2146 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2147 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2148 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2149 unsigned long flags;
2150 int to_del = 0;
2152 if (!rx_info->rx)
2153 return;
2155 if (0 == rx_id) {
2156 spin_lock_irqsave(&bnad->bna_lock, flags);
2157 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
2158 test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
2159 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
2160 to_del = 1;
2162 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2163 if (to_del)
2164 del_timer_sync(&bnad->dim_timer);
2167 init_completion(&bnad->bnad_completions.rx_comp);
2168 spin_lock_irqsave(&bnad->bna_lock, flags);
2169 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
2170 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2171 wait_for_completion(&bnad->bnad_completions.rx_comp);
2173 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
2174 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
2176 bnad_napi_delete(bnad, rx_id);
2178 spin_lock_irqsave(&bnad->bna_lock, flags);
2179 bna_rx_destroy(rx_info->rx);
2181 rx_info->rx = NULL;
2182 rx_info->rx_id = 0;
2183 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2185 bnad_rx_res_free(bnad, res_info);
2188 /* Called with mutex_lock(&bnad->conf_mutex) held */
2190 bnad_setup_rx(struct bnad *bnad, u32 rx_id)
2192 int err;
2193 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2194 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2195 struct bna_intr_info *intr_info =
2196 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2197 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2198 static const struct bna_rx_event_cbfn rx_cbfn = {
2199 .rcb_setup_cbfn = NULL,
2200 .rcb_destroy_cbfn = NULL,
2201 .ccb_setup_cbfn = bnad_cb_ccb_setup,
2202 .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
2203 .rx_stall_cbfn = bnad_cb_rx_stall,
2204 .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
2205 .rx_post_cbfn = bnad_cb_rx_post,
2207 struct bna_rx *rx;
2208 unsigned long flags;
2210 rx_info->rx_id = rx_id;
2212 /* Initialize the Rx object configuration */
2213 bnad_init_rx_config(bnad, rx_config);
2215 /* Get BNA's resource requirement for one Rx object */
2216 spin_lock_irqsave(&bnad->bna_lock, flags);
2217 bna_rx_res_req(rx_config, res_info);
2218 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2220 /* Fill Unmap Q memory requirements */
2221 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
2222 rx_config->num_paths,
2223 (rx_config->q0_depth *
2224 sizeof(struct bnad_rx_unmap)) +
2225 sizeof(struct bnad_rx_unmap_q));
2227 if (rx_config->rxp_type != BNA_RXP_SINGLE) {
2228 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
2229 rx_config->num_paths,
2230 (rx_config->q1_depth *
2231 sizeof(struct bnad_rx_unmap) +
2232 sizeof(struct bnad_rx_unmap_q)));
2234 /* Allocate resource */
2235 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
2236 if (err)
2237 return err;
2239 bnad_rx_ctrl_init(bnad, rx_id);
2241 /* Ask BNA to create one Rx object, supplying required resources */
2242 spin_lock_irqsave(&bnad->bna_lock, flags);
2243 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
2244 rx_info);
2245 if (!rx) {
2246 err = -ENOMEM;
2247 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2248 goto err_return;
2250 rx_info->rx = rx;
2251 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2253 INIT_WORK(&rx_info->rx_cleanup_work,
2254 (work_func_t)(bnad_rx_cleanup));
2257 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2258 * so that IRQ handler cannot schedule NAPI at this point.
2260 bnad_napi_add(bnad, rx_id);
2262 /* Register ISR for the Rx object */
2263 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2264 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
2265 rx_config->num_paths);
2266 if (err)
2267 goto err_return;
2270 spin_lock_irqsave(&bnad->bna_lock, flags);
2271 if (0 == rx_id) {
2272 /* Set up Dynamic Interrupt Moderation Vector */
2273 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
2274 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
2276 /* Enable VLAN filtering only on the default Rx */
2277 bna_rx_vlanfilter_enable(rx);
2279 /* Start the DIM timer */
2280 bnad_dim_timer_start(bnad);
2283 bna_rx_enable(rx);
2284 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2286 return 0;
2288 err_return:
2289 bnad_destroy_rx(bnad, rx_id);
2290 return err;
2293 /* Called with conf_lock & bnad->bna_lock held */
2294 void
2295 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2297 struct bnad_tx_info *tx_info;
2299 tx_info = &bnad->tx_info[0];
2300 if (!tx_info->tx)
2301 return;
2303 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2306 /* Called with conf_lock & bnad->bna_lock held */
2307 void
2308 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2310 struct bnad_rx_info *rx_info;
2311 int i;
2313 for (i = 0; i < bnad->num_rx; i++) {
2314 rx_info = &bnad->rx_info[i];
2315 if (!rx_info->rx)
2316 continue;
2317 bna_rx_coalescing_timeo_set(rx_info->rx,
2318 bnad->rx_coalescing_timeo);
2323 * Called with bnad->bna_lock held
2326 bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr)
2328 int ret;
2330 if (!is_valid_ether_addr(mac_addr))
2331 return -EADDRNOTAVAIL;
2333 /* If datapath is down, pretend everything went through */
2334 if (!bnad->rx_info[0].rx)
2335 return 0;
2337 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr);
2338 if (ret != BNA_CB_SUCCESS)
2339 return -EADDRNOTAVAIL;
2341 return 0;
2344 /* Should be called with conf_lock held */
2346 bnad_enable_default_bcast(struct bnad *bnad)
2348 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2349 int ret;
2350 unsigned long flags;
2352 init_completion(&bnad->bnad_completions.mcast_comp);
2354 spin_lock_irqsave(&bnad->bna_lock, flags);
2355 ret = bna_rx_mcast_add(rx_info->rx, bnad_bcast_addr,
2356 bnad_cb_rx_mcast_add);
2357 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2359 if (ret == BNA_CB_SUCCESS)
2360 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2361 else
2362 return -ENODEV;
2364 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2365 return -ENODEV;
2367 return 0;
2370 /* Called with mutex_lock(&bnad->conf_mutex) held */
2371 void
2372 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2374 u16 vid;
2375 unsigned long flags;
2377 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2378 spin_lock_irqsave(&bnad->bna_lock, flags);
2379 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2380 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2384 /* Statistics utilities */
2385 void
2386 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2388 int i, j;
2390 for (i = 0; i < bnad->num_rx; i++) {
2391 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2392 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2393 stats->rx_packets += bnad->rx_info[i].
2394 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2395 stats->rx_bytes += bnad->rx_info[i].
2396 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2397 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2398 bnad->rx_info[i].rx_ctrl[j].ccb->
2399 rcb[1]->rxq) {
2400 stats->rx_packets +=
2401 bnad->rx_info[i].rx_ctrl[j].
2402 ccb->rcb[1]->rxq->rx_packets;
2403 stats->rx_bytes +=
2404 bnad->rx_info[i].rx_ctrl[j].
2405 ccb->rcb[1]->rxq->rx_bytes;
2410 for (i = 0; i < bnad->num_tx; i++) {
2411 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2412 if (bnad->tx_info[i].tcb[j]) {
2413 stats->tx_packets +=
2414 bnad->tx_info[i].tcb[j]->txq->tx_packets;
2415 stats->tx_bytes +=
2416 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2423 * Must be called with the bna_lock held.
2425 void
2426 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2428 struct bfi_enet_stats_mac *mac_stats;
2429 u32 bmap;
2430 int i;
2432 mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2433 stats->rx_errors =
2434 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2435 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2436 mac_stats->rx_undersize;
2437 stats->tx_errors = mac_stats->tx_fcs_error +
2438 mac_stats->tx_undersize;
2439 stats->rx_dropped = mac_stats->rx_drop;
2440 stats->tx_dropped = mac_stats->tx_drop;
2441 stats->multicast = mac_stats->rx_multicast;
2442 stats->collisions = mac_stats->tx_total_collision;
2444 stats->rx_length_errors = mac_stats->rx_frame_length_error;
2446 /* receive ring buffer overflow ?? */
2448 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2449 stats->rx_frame_errors = mac_stats->rx_alignment_error;
2450 /* recv'r fifo overrun */
2451 bmap = bna_rx_rid_mask(&bnad->bna);
2452 for (i = 0; bmap; i++) {
2453 if (bmap & 1) {
2454 stats->rx_fifo_errors +=
2455 bnad->stats.bna_stats->
2456 hw_stats.rxf_stats[i].frame_drops;
2457 break;
2459 bmap >>= 1;
2463 static void
2464 bnad_mbox_irq_sync(struct bnad *bnad)
2466 u32 irq;
2467 unsigned long flags;
2469 spin_lock_irqsave(&bnad->bna_lock, flags);
2470 if (bnad->cfg_flags & BNAD_CF_MSIX)
2471 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2472 else
2473 irq = bnad->pcidev->irq;
2474 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2476 synchronize_irq(irq);
2479 /* Utility used by bnad_start_xmit, for doing TSO */
2480 static int
2481 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2483 int err;
2485 err = skb_cow_head(skb, 0);
2486 if (err < 0) {
2487 BNAD_UPDATE_CTR(bnad, tso_err);
2488 return err;
2492 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2493 * excluding the length field.
2495 if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
2496 struct iphdr *iph = ip_hdr(skb);
2498 /* Do we really need these? */
2499 iph->tot_len = 0;
2500 iph->check = 0;
2502 tcp_hdr(skb)->check =
2503 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2504 IPPROTO_TCP, 0);
2505 BNAD_UPDATE_CTR(bnad, tso4);
2506 } else {
2507 tcp_v6_gso_csum_prep(skb);
2508 BNAD_UPDATE_CTR(bnad, tso6);
2511 return 0;
2515 * Initialize Q numbers depending on Rx Paths
2516 * Called with bnad->bna_lock held, because of cfg_flags
2517 * access.
2519 static void
2520 bnad_q_num_init(struct bnad *bnad)
2522 int rxps;
2524 rxps = min((uint)num_online_cpus(),
2525 (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
2527 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2528 rxps = 1; /* INTx */
2530 bnad->num_rx = 1;
2531 bnad->num_tx = 1;
2532 bnad->num_rxp_per_rx = rxps;
2533 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2537 * Adjusts the Q numbers, given a number of msix vectors
2538 * Give preference to RSS as opposed to Tx priority Queues,
2539 * in such a case, just use 1 Tx Q
2540 * Called with bnad->bna_lock held b'cos of cfg_flags access
2542 static void
2543 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2545 bnad->num_txq_per_tx = 1;
2546 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2547 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2548 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2549 bnad->num_rxp_per_rx = msix_vectors -
2550 (bnad->num_tx * bnad->num_txq_per_tx) -
2551 BNAD_MAILBOX_MSIX_VECTORS;
2552 } else
2553 bnad->num_rxp_per_rx = 1;
2556 /* Enable / disable ioceth */
2557 static int
2558 bnad_ioceth_disable(struct bnad *bnad)
2560 unsigned long flags;
2561 int err = 0;
2563 spin_lock_irqsave(&bnad->bna_lock, flags);
2564 init_completion(&bnad->bnad_completions.ioc_comp);
2565 bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2566 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2568 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2569 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2571 err = bnad->bnad_completions.ioc_comp_status;
2572 return err;
2575 static int
2576 bnad_ioceth_enable(struct bnad *bnad)
2578 int err = 0;
2579 unsigned long flags;
2581 spin_lock_irqsave(&bnad->bna_lock, flags);
2582 init_completion(&bnad->bnad_completions.ioc_comp);
2583 bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2584 bna_ioceth_enable(&bnad->bna.ioceth);
2585 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2587 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2588 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2590 err = bnad->bnad_completions.ioc_comp_status;
2592 return err;
2595 /* Free BNA resources */
2596 static void
2597 bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2598 u32 res_val_max)
2600 int i;
2602 for (i = 0; i < res_val_max; i++)
2603 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2606 /* Allocates memory and interrupt resources for BNA */
2607 static int
2608 bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2609 u32 res_val_max)
2611 int i, err;
2613 for (i = 0; i < res_val_max; i++) {
2614 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2615 if (err)
2616 goto err_return;
2618 return 0;
2620 err_return:
2621 bnad_res_free(bnad, res_info, res_val_max);
2622 return err;
2625 /* Interrupt enable / disable */
2626 static void
2627 bnad_enable_msix(struct bnad *bnad)
2629 int i, ret;
2630 unsigned long flags;
2632 spin_lock_irqsave(&bnad->bna_lock, flags);
2633 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2634 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2635 return;
2637 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2639 if (bnad->msix_table)
2640 return;
2642 bnad->msix_table =
2643 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2645 if (!bnad->msix_table)
2646 goto intx_mode;
2648 for (i = 0; i < bnad->msix_num; i++)
2649 bnad->msix_table[i].entry = i;
2651 ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table,
2652 1, bnad->msix_num);
2653 if (ret < 0) {
2654 goto intx_mode;
2655 } else if (ret < bnad->msix_num) {
2656 dev_warn(&bnad->pcidev->dev,
2657 "%d MSI-X vectors allocated < %d requested\n",
2658 ret, bnad->msix_num);
2660 spin_lock_irqsave(&bnad->bna_lock, flags);
2661 /* ret = #of vectors that we got */
2662 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2663 (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
2664 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2666 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2667 BNAD_MAILBOX_MSIX_VECTORS;
2669 if (bnad->msix_num > ret) {
2670 pci_disable_msix(bnad->pcidev);
2671 goto intx_mode;
2675 pci_intx(bnad->pcidev, 0);
2677 return;
2679 intx_mode:
2680 dev_warn(&bnad->pcidev->dev,
2681 "MSI-X enable failed - operating in INTx mode\n");
2683 kfree(bnad->msix_table);
2684 bnad->msix_table = NULL;
2685 bnad->msix_num = 0;
2686 spin_lock_irqsave(&bnad->bna_lock, flags);
2687 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2688 bnad_q_num_init(bnad);
2689 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2692 static void
2693 bnad_disable_msix(struct bnad *bnad)
2695 u32 cfg_flags;
2696 unsigned long flags;
2698 spin_lock_irqsave(&bnad->bna_lock, flags);
2699 cfg_flags = bnad->cfg_flags;
2700 if (bnad->cfg_flags & BNAD_CF_MSIX)
2701 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2702 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2704 if (cfg_flags & BNAD_CF_MSIX) {
2705 pci_disable_msix(bnad->pcidev);
2706 kfree(bnad->msix_table);
2707 bnad->msix_table = NULL;
2711 /* Netdev entry points */
2712 static int
2713 bnad_open(struct net_device *netdev)
2715 int err;
2716 struct bnad *bnad = netdev_priv(netdev);
2717 struct bna_pause_config pause_config;
2718 unsigned long flags;
2720 mutex_lock(&bnad->conf_mutex);
2722 /* Tx */
2723 err = bnad_setup_tx(bnad, 0);
2724 if (err)
2725 goto err_return;
2727 /* Rx */
2728 err = bnad_setup_rx(bnad, 0);
2729 if (err)
2730 goto cleanup_tx;
2732 /* Port */
2733 pause_config.tx_pause = 0;
2734 pause_config.rx_pause = 0;
2736 spin_lock_irqsave(&bnad->bna_lock, flags);
2737 bna_enet_mtu_set(&bnad->bna.enet,
2738 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2739 bna_enet_pause_config(&bnad->bna.enet, &pause_config);
2740 bna_enet_enable(&bnad->bna.enet);
2741 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2743 /* Enable broadcast */
2744 bnad_enable_default_bcast(bnad);
2746 /* Restore VLANs, if any */
2747 bnad_restore_vlans(bnad, 0);
2749 /* Set the UCAST address */
2750 spin_lock_irqsave(&bnad->bna_lock, flags);
2751 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2752 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2754 /* Start the stats timer */
2755 bnad_stats_timer_start(bnad);
2757 mutex_unlock(&bnad->conf_mutex);
2759 return 0;
2761 cleanup_tx:
2762 bnad_destroy_tx(bnad, 0);
2764 err_return:
2765 mutex_unlock(&bnad->conf_mutex);
2766 return err;
2769 static int
2770 bnad_stop(struct net_device *netdev)
2772 struct bnad *bnad = netdev_priv(netdev);
2773 unsigned long flags;
2775 mutex_lock(&bnad->conf_mutex);
2777 /* Stop the stats timer */
2778 bnad_stats_timer_stop(bnad);
2780 init_completion(&bnad->bnad_completions.enet_comp);
2782 spin_lock_irqsave(&bnad->bna_lock, flags);
2783 bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2784 bnad_cb_enet_disabled);
2785 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2787 wait_for_completion(&bnad->bnad_completions.enet_comp);
2789 bnad_destroy_tx(bnad, 0);
2790 bnad_destroy_rx(bnad, 0);
2792 /* Synchronize mailbox IRQ */
2793 bnad_mbox_irq_sync(bnad);
2795 mutex_unlock(&bnad->conf_mutex);
2797 return 0;
2800 /* TX */
2801 /* Returns 0 for success */
2802 static int
2803 bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2804 struct sk_buff *skb, struct bna_txq_entry *txqent)
2806 u16 flags = 0;
2807 u32 gso_size;
2808 u16 vlan_tag = 0;
2810 if (skb_vlan_tag_present(skb)) {
2811 vlan_tag = (u16)skb_vlan_tag_get(skb);
2812 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2814 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2815 vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT)
2816 | (vlan_tag & 0x1fff);
2817 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2819 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2821 if (skb_is_gso(skb)) {
2822 gso_size = skb_shinfo(skb)->gso_size;
2823 if (unlikely(gso_size > bnad->netdev->mtu)) {
2824 BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2825 return -EINVAL;
2827 if (unlikely((gso_size + skb_transport_offset(skb) +
2828 tcp_hdrlen(skb)) >= skb->len)) {
2829 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2830 txqent->hdr.wi.lso_mss = 0;
2831 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2832 } else {
2833 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO);
2834 txqent->hdr.wi.lso_mss = htons(gso_size);
2837 if (bnad_tso_prepare(bnad, skb)) {
2838 BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2839 return -EINVAL;
2842 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2843 txqent->hdr.wi.l4_hdr_size_n_offset =
2844 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2845 tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
2846 } else {
2847 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2848 txqent->hdr.wi.lso_mss = 0;
2850 if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) {
2851 BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2852 return -EINVAL;
2855 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2856 __be16 net_proto = vlan_get_protocol(skb);
2857 u8 proto = 0;
2859 if (net_proto == htons(ETH_P_IP))
2860 proto = ip_hdr(skb)->protocol;
2861 #ifdef NETIF_F_IPV6_CSUM
2862 else if (net_proto == htons(ETH_P_IPV6)) {
2863 /* nexthdr may not be TCP immediately. */
2864 proto = ipv6_hdr(skb)->nexthdr;
2866 #endif
2867 if (proto == IPPROTO_TCP) {
2868 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2869 txqent->hdr.wi.l4_hdr_size_n_offset =
2870 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2871 (0, skb_transport_offset(skb)));
2873 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2875 if (unlikely(skb_headlen(skb) <
2876 skb_transport_offset(skb) +
2877 tcp_hdrlen(skb))) {
2878 BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2879 return -EINVAL;
2881 } else if (proto == IPPROTO_UDP) {
2882 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2883 txqent->hdr.wi.l4_hdr_size_n_offset =
2884 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2885 (0, skb_transport_offset(skb)));
2887 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2888 if (unlikely(skb_headlen(skb) <
2889 skb_transport_offset(skb) +
2890 sizeof(struct udphdr))) {
2891 BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2892 return -EINVAL;
2894 } else {
2896 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2897 return -EINVAL;
2899 } else
2900 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2903 txqent->hdr.wi.flags = htons(flags);
2904 txqent->hdr.wi.frame_length = htonl(skb->len);
2906 return 0;
2910 * bnad_start_xmit : Netdev entry point for Transmit
2911 * Called under lock held by net_device
2913 static netdev_tx_t
2914 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2916 struct bnad *bnad = netdev_priv(netdev);
2917 u32 txq_id = 0;
2918 struct bna_tcb *tcb = NULL;
2919 struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap;
2920 u32 prod, q_depth, vect_id;
2921 u32 wis, vectors, len;
2922 int i;
2923 dma_addr_t dma_addr;
2924 struct bna_txq_entry *txqent;
2926 len = skb_headlen(skb);
2928 /* Sanity checks for the skb */
2930 if (unlikely(skb->len <= ETH_HLEN)) {
2931 dev_kfree_skb_any(skb);
2932 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2933 return NETDEV_TX_OK;
2935 if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
2936 dev_kfree_skb_any(skb);
2937 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2938 return NETDEV_TX_OK;
2940 if (unlikely(len == 0)) {
2941 dev_kfree_skb_any(skb);
2942 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2943 return NETDEV_TX_OK;
2946 tcb = bnad->tx_info[0].tcb[txq_id];
2949 * Takes care of the Tx that is scheduled between clearing the flag
2950 * and the netif_tx_stop_all_queues() call.
2952 if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2953 dev_kfree_skb_any(skb);
2954 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2955 return NETDEV_TX_OK;
2958 q_depth = tcb->q_depth;
2959 prod = tcb->producer_index;
2960 unmap_q = tcb->unmap_q;
2962 vectors = 1 + skb_shinfo(skb)->nr_frags;
2963 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2965 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2966 dev_kfree_skb_any(skb);
2967 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2968 return NETDEV_TX_OK;
2971 /* Check for available TxQ resources */
2972 if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2973 if ((*tcb->hw_consumer_index != tcb->consumer_index) &&
2974 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2975 u32 sent;
2976 sent = bnad_txcmpl_process(bnad, tcb);
2977 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2978 bna_ib_ack(tcb->i_dbell, sent);
2979 smp_mb__before_atomic();
2980 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2981 } else {
2982 netif_stop_queue(netdev);
2983 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2986 smp_mb();
2988 * Check again to deal with race condition between
2989 * netif_stop_queue here, and netif_wake_queue in
2990 * interrupt handler which is not inside netif tx lock.
2992 if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2993 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2994 return NETDEV_TX_BUSY;
2995 } else {
2996 netif_wake_queue(netdev);
2997 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
3001 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3002 head_unmap = &unmap_q[prod];
3004 /* Program the opcode, flags, frame_len, num_vectors in WI */
3005 if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
3006 dev_kfree_skb_any(skb);
3007 return NETDEV_TX_OK;
3009 txqent->hdr.wi.reserved = 0;
3010 txqent->hdr.wi.num_vectors = vectors;
3012 head_unmap->skb = skb;
3013 head_unmap->nvecs = 0;
3015 /* Program the vectors */
3016 unmap = head_unmap;
3017 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
3018 len, DMA_TO_DEVICE);
3019 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3020 dev_kfree_skb_any(skb);
3021 BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3022 return NETDEV_TX_OK;
3024 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
3025 txqent->vector[0].length = htons(len);
3026 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
3027 head_unmap->nvecs++;
3029 for (i = 0, vect_id = 0; i < vectors - 1; i++) {
3030 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3031 u32 size = skb_frag_size(frag);
3033 if (unlikely(size == 0)) {
3034 /* Undo the changes starting at tcb->producer_index */
3035 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3036 tcb->producer_index);
3037 dev_kfree_skb_any(skb);
3038 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
3039 return NETDEV_TX_OK;
3042 len += size;
3044 vect_id++;
3045 if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
3046 vect_id = 0;
3047 BNA_QE_INDX_INC(prod, q_depth);
3048 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3049 txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
3050 unmap = &unmap_q[prod];
3053 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
3054 0, size, DMA_TO_DEVICE);
3055 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3056 /* Undo the changes starting at tcb->producer_index */
3057 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3058 tcb->producer_index);
3059 dev_kfree_skb_any(skb);
3060 BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3061 return NETDEV_TX_OK;
3064 dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
3065 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
3066 txqent->vector[vect_id].length = htons(size);
3067 dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
3068 dma_addr);
3069 head_unmap->nvecs++;
3072 if (unlikely(len != skb->len)) {
3073 /* Undo the changes starting at tcb->producer_index */
3074 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
3075 dev_kfree_skb_any(skb);
3076 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
3077 return NETDEV_TX_OK;
3080 BNA_QE_INDX_INC(prod, q_depth);
3081 tcb->producer_index = prod;
3083 wmb();
3085 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3086 return NETDEV_TX_OK;
3088 skb_tx_timestamp(skb);
3090 bna_txq_prod_indx_doorbell(tcb);
3092 return NETDEV_TX_OK;
3096 * Used spin_lock to synchronize reading of stats structures, which
3097 * is written by BNA under the same lock.
3099 static void
3100 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
3102 struct bnad *bnad = netdev_priv(netdev);
3103 unsigned long flags;
3105 spin_lock_irqsave(&bnad->bna_lock, flags);
3107 bnad_netdev_qstats_fill(bnad, stats);
3108 bnad_netdev_hwstats_fill(bnad, stats);
3110 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3113 static void
3114 bnad_set_rx_ucast_fltr(struct bnad *bnad)
3116 struct net_device *netdev = bnad->netdev;
3117 int uc_count = netdev_uc_count(netdev);
3118 enum bna_cb_status ret;
3119 u8 *mac_list;
3120 struct netdev_hw_addr *ha;
3121 int entry;
3123 if (netdev_uc_empty(bnad->netdev)) {
3124 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
3125 return;
3128 if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
3129 goto mode_default;
3131 mac_list = kcalloc(ETH_ALEN, uc_count, GFP_ATOMIC);
3132 if (mac_list == NULL)
3133 goto mode_default;
3135 entry = 0;
3136 netdev_for_each_uc_addr(ha, netdev) {
3137 ether_addr_copy(&mac_list[entry * ETH_ALEN], &ha->addr[0]);
3138 entry++;
3141 ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, mac_list);
3142 kfree(mac_list);
3144 if (ret != BNA_CB_SUCCESS)
3145 goto mode_default;
3147 return;
3149 /* ucast packets not in UCAM are routed to default function */
3150 mode_default:
3151 bnad->cfg_flags |= BNAD_CF_DEFAULT;
3152 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
3155 static void
3156 bnad_set_rx_mcast_fltr(struct bnad *bnad)
3158 struct net_device *netdev = bnad->netdev;
3159 int mc_count = netdev_mc_count(netdev);
3160 enum bna_cb_status ret;
3161 u8 *mac_list;
3163 if (netdev->flags & IFF_ALLMULTI)
3164 goto mode_allmulti;
3166 if (netdev_mc_empty(netdev))
3167 return;
3169 if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
3170 goto mode_allmulti;
3172 mac_list = kcalloc(mc_count + 1, ETH_ALEN, GFP_ATOMIC);
3174 if (mac_list == NULL)
3175 goto mode_allmulti;
3177 ether_addr_copy(&mac_list[0], &bnad_bcast_addr[0]);
3179 /* copy rest of the MCAST addresses */
3180 bnad_netdev_mc_list_get(netdev, mac_list);
3181 ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, mac_list);
3182 kfree(mac_list);
3184 if (ret != BNA_CB_SUCCESS)
3185 goto mode_allmulti;
3187 return;
3189 mode_allmulti:
3190 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
3191 bna_rx_mcast_delall(bnad->rx_info[0].rx);
3194 void
3195 bnad_set_rx_mode(struct net_device *netdev)
3197 struct bnad *bnad = netdev_priv(netdev);
3198 enum bna_rxmode new_mode, mode_mask;
3199 unsigned long flags;
3201 spin_lock_irqsave(&bnad->bna_lock, flags);
3203 if (bnad->rx_info[0].rx == NULL) {
3204 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3205 return;
3208 /* clear bnad flags to update it with new settings */
3209 bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
3210 BNAD_CF_ALLMULTI);
3212 new_mode = 0;
3213 if (netdev->flags & IFF_PROMISC) {
3214 new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
3215 bnad->cfg_flags |= BNAD_CF_PROMISC;
3216 } else {
3217 bnad_set_rx_mcast_fltr(bnad);
3219 if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
3220 new_mode |= BNA_RXMODE_ALLMULTI;
3222 bnad_set_rx_ucast_fltr(bnad);
3224 if (bnad->cfg_flags & BNAD_CF_DEFAULT)
3225 new_mode |= BNA_RXMODE_DEFAULT;
3228 mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
3229 BNA_RXMODE_ALLMULTI;
3230 bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask);
3232 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3236 * bna_lock is used to sync writes to netdev->addr
3237 * conf_lock cannot be used since this call may be made
3238 * in a non-blocking context.
3240 static int
3241 bnad_set_mac_address(struct net_device *netdev, void *addr)
3243 int err;
3244 struct bnad *bnad = netdev_priv(netdev);
3245 struct sockaddr *sa = (struct sockaddr *)addr;
3246 unsigned long flags;
3248 spin_lock_irqsave(&bnad->bna_lock, flags);
3250 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
3251 if (!err)
3252 ether_addr_copy(netdev->dev_addr, sa->sa_data);
3254 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3256 return err;
3259 static int
3260 bnad_mtu_set(struct bnad *bnad, int frame_size)
3262 unsigned long flags;
3264 init_completion(&bnad->bnad_completions.mtu_comp);
3266 spin_lock_irqsave(&bnad->bna_lock, flags);
3267 bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
3268 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3270 wait_for_completion(&bnad->bnad_completions.mtu_comp);
3272 return bnad->bnad_completions.mtu_comp_status;
3275 static int
3276 bnad_change_mtu(struct net_device *netdev, int new_mtu)
3278 int err, mtu;
3279 struct bnad *bnad = netdev_priv(netdev);
3280 u32 frame, new_frame;
3282 mutex_lock(&bnad->conf_mutex);
3284 mtu = netdev->mtu;
3285 netdev->mtu = new_mtu;
3287 frame = BNAD_FRAME_SIZE(mtu);
3288 new_frame = BNAD_FRAME_SIZE(new_mtu);
3290 /* check if multi-buffer needs to be enabled */
3291 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
3292 netif_running(bnad->netdev)) {
3293 /* only when transition is over 4K */
3294 if ((frame <= 4096 && new_frame > 4096) ||
3295 (frame > 4096 && new_frame <= 4096))
3296 bnad_reinit_rx(bnad);
3299 err = bnad_mtu_set(bnad, new_frame);
3300 if (err)
3301 err = -EBUSY;
3303 mutex_unlock(&bnad->conf_mutex);
3304 return err;
3307 static int
3308 bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3310 struct bnad *bnad = netdev_priv(netdev);
3311 unsigned long flags;
3313 if (!bnad->rx_info[0].rx)
3314 return 0;
3316 mutex_lock(&bnad->conf_mutex);
3318 spin_lock_irqsave(&bnad->bna_lock, flags);
3319 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
3320 set_bit(vid, bnad->active_vlans);
3321 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3323 mutex_unlock(&bnad->conf_mutex);
3325 return 0;
3328 static int
3329 bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3331 struct bnad *bnad = netdev_priv(netdev);
3332 unsigned long flags;
3334 if (!bnad->rx_info[0].rx)
3335 return 0;
3337 mutex_lock(&bnad->conf_mutex);
3339 spin_lock_irqsave(&bnad->bna_lock, flags);
3340 clear_bit(vid, bnad->active_vlans);
3341 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3342 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3344 mutex_unlock(&bnad->conf_mutex);
3346 return 0;
3349 static int bnad_set_features(struct net_device *dev, netdev_features_t features)
3351 struct bnad *bnad = netdev_priv(dev);
3352 netdev_features_t changed = features ^ dev->features;
3354 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) {
3355 unsigned long flags;
3357 spin_lock_irqsave(&bnad->bna_lock, flags);
3359 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3360 bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
3361 else
3362 bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
3364 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3367 return 0;
3370 #ifdef CONFIG_NET_POLL_CONTROLLER
3371 static void
3372 bnad_netpoll(struct net_device *netdev)
3374 struct bnad *bnad = netdev_priv(netdev);
3375 struct bnad_rx_info *rx_info;
3376 struct bnad_rx_ctrl *rx_ctrl;
3377 u32 curr_mask;
3378 int i, j;
3380 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3381 bna_intx_disable(&bnad->bna, curr_mask);
3382 bnad_isr(bnad->pcidev->irq, netdev);
3383 bna_intx_enable(&bnad->bna, curr_mask);
3384 } else {
3386 * Tx processing may happen in sending context, so no need
3387 * to explicitly process completions here
3390 /* Rx processing */
3391 for (i = 0; i < bnad->num_rx; i++) {
3392 rx_info = &bnad->rx_info[i];
3393 if (!rx_info->rx)
3394 continue;
3395 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3396 rx_ctrl = &rx_info->rx_ctrl[j];
3397 if (rx_ctrl->ccb)
3398 bnad_netif_rx_schedule_poll(bnad,
3399 rx_ctrl->ccb);
3404 #endif
3406 static const struct net_device_ops bnad_netdev_ops = {
3407 .ndo_open = bnad_open,
3408 .ndo_stop = bnad_stop,
3409 .ndo_start_xmit = bnad_start_xmit,
3410 .ndo_get_stats64 = bnad_get_stats64,
3411 .ndo_set_rx_mode = bnad_set_rx_mode,
3412 .ndo_validate_addr = eth_validate_addr,
3413 .ndo_set_mac_address = bnad_set_mac_address,
3414 .ndo_change_mtu = bnad_change_mtu,
3415 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
3416 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
3417 .ndo_set_features = bnad_set_features,
3418 #ifdef CONFIG_NET_POLL_CONTROLLER
3419 .ndo_poll_controller = bnad_netpoll
3420 #endif
3423 static void
3424 bnad_netdev_init(struct bnad *bnad, bool using_dac)
3426 struct net_device *netdev = bnad->netdev;
3428 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3429 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3430 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX |
3431 NETIF_F_HW_VLAN_CTAG_RX;
3433 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3434 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3435 NETIF_F_TSO | NETIF_F_TSO6;
3437 netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
3439 if (using_dac)
3440 netdev->features |= NETIF_F_HIGHDMA;
3442 netdev->mem_start = bnad->mmio_start;
3443 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3445 /* MTU range: 46 - 9000 */
3446 netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
3447 netdev->max_mtu = BNAD_JUMBO_MTU;
3449 netdev->netdev_ops = &bnad_netdev_ops;
3450 bnad_set_ethtool_ops(netdev);
3454 * 1. Initialize the bnad structure
3455 * 2. Setup netdev pointer in pci_dev
3456 * 3. Initialize no. of TxQ & CQs & MSIX vectors
3457 * 4. Initialize work queue.
3459 static int
3460 bnad_init(struct bnad *bnad,
3461 struct pci_dev *pdev, struct net_device *netdev)
3463 unsigned long flags;
3465 SET_NETDEV_DEV(netdev, &pdev->dev);
3466 pci_set_drvdata(pdev, netdev);
3468 bnad->netdev = netdev;
3469 bnad->pcidev = pdev;
3470 bnad->mmio_start = pci_resource_start(pdev, 0);
3471 bnad->mmio_len = pci_resource_len(pdev, 0);
3472 bnad->bar0 = ioremap(bnad->mmio_start, bnad->mmio_len);
3473 if (!bnad->bar0) {
3474 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3475 return -ENOMEM;
3477 dev_info(&pdev->dev, "bar0 mapped to %p, len %llu\n", bnad->bar0,
3478 (unsigned long long) bnad->mmio_len);
3480 spin_lock_irqsave(&bnad->bna_lock, flags);
3481 if (!bnad_msix_disable)
3482 bnad->cfg_flags = BNAD_CF_MSIX;
3484 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3486 bnad_q_num_init(bnad);
3487 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3489 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3490 (bnad->num_rx * bnad->num_rxp_per_rx) +
3491 BNAD_MAILBOX_MSIX_VECTORS;
3493 bnad->txq_depth = BNAD_TXQ_DEPTH;
3494 bnad->rxq_depth = BNAD_RXQ_DEPTH;
3496 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3497 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3499 sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3500 bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3501 if (!bnad->work_q) {
3502 iounmap(bnad->bar0);
3503 return -ENOMEM;
3506 return 0;
3510 * Must be called after bnad_pci_uninit()
3511 * so that iounmap() and pci_set_drvdata(NULL)
3512 * happens only after PCI uninitialization.
3514 static void
3515 bnad_uninit(struct bnad *bnad)
3517 if (bnad->work_q) {
3518 flush_workqueue(bnad->work_q);
3519 destroy_workqueue(bnad->work_q);
3520 bnad->work_q = NULL;
3523 if (bnad->bar0)
3524 iounmap(bnad->bar0);
3528 * Initialize locks
3529 a) Per ioceth mutes used for serializing configuration
3530 changes from OS interface
3531 b) spin lock used to protect bna state machine
3533 static void
3534 bnad_lock_init(struct bnad *bnad)
3536 spin_lock_init(&bnad->bna_lock);
3537 mutex_init(&bnad->conf_mutex);
3540 static void
3541 bnad_lock_uninit(struct bnad *bnad)
3543 mutex_destroy(&bnad->conf_mutex);
3546 /* PCI Initialization */
3547 static int
3548 bnad_pci_init(struct bnad *bnad,
3549 struct pci_dev *pdev, bool *using_dac)
3551 int err;
3553 err = pci_enable_device(pdev);
3554 if (err)
3555 return err;
3556 err = pci_request_regions(pdev, BNAD_NAME);
3557 if (err)
3558 goto disable_device;
3559 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3560 *using_dac = true;
3561 } else {
3562 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3563 if (err)
3564 goto release_regions;
3565 *using_dac = false;
3567 pci_set_master(pdev);
3568 return 0;
3570 release_regions:
3571 pci_release_regions(pdev);
3572 disable_device:
3573 pci_disable_device(pdev);
3575 return err;
3578 static void
3579 bnad_pci_uninit(struct pci_dev *pdev)
3581 pci_release_regions(pdev);
3582 pci_disable_device(pdev);
3585 static int
3586 bnad_pci_probe(struct pci_dev *pdev,
3587 const struct pci_device_id *pcidev_id)
3589 bool using_dac;
3590 int err;
3591 struct bnad *bnad;
3592 struct bna *bna;
3593 struct net_device *netdev;
3594 struct bfa_pcidev pcidev_info;
3595 unsigned long flags;
3597 mutex_lock(&bnad_fwimg_mutex);
3598 if (!cna_get_firmware_buf(pdev)) {
3599 mutex_unlock(&bnad_fwimg_mutex);
3600 dev_err(&pdev->dev, "failed to load firmware image!\n");
3601 return -ENODEV;
3603 mutex_unlock(&bnad_fwimg_mutex);
3606 * Allocates sizeof(struct net_device + struct bnad)
3607 * bnad = netdev->priv
3609 netdev = alloc_etherdev(sizeof(struct bnad));
3610 if (!netdev) {
3611 err = -ENOMEM;
3612 return err;
3614 bnad = netdev_priv(netdev);
3615 bnad_lock_init(bnad);
3616 bnad->id = atomic_inc_return(&bna_id) - 1;
3618 mutex_lock(&bnad->conf_mutex);
3620 * PCI initialization
3621 * Output : using_dac = 1 for 64 bit DMA
3622 * = 0 for 32 bit DMA
3624 using_dac = false;
3625 err = bnad_pci_init(bnad, pdev, &using_dac);
3626 if (err)
3627 goto unlock_mutex;
3630 * Initialize bnad structure
3631 * Setup relation between pci_dev & netdev
3633 err = bnad_init(bnad, pdev, netdev);
3634 if (err)
3635 goto pci_uninit;
3637 /* Initialize netdev structure, set up ethtool ops */
3638 bnad_netdev_init(bnad, using_dac);
3640 /* Set link to down state */
3641 netif_carrier_off(netdev);
3643 /* Setup the debugfs node for this bfad */
3644 if (bna_debugfs_enable)
3645 bnad_debugfs_init(bnad);
3647 /* Get resource requirement form bna */
3648 spin_lock_irqsave(&bnad->bna_lock, flags);
3649 bna_res_req(&bnad->res_info[0]);
3650 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3652 /* Allocate resources from bna */
3653 err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3654 if (err)
3655 goto drv_uninit;
3657 bna = &bnad->bna;
3659 /* Setup pcidev_info for bna_init() */
3660 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3661 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3662 pcidev_info.device_id = bnad->pcidev->device;
3663 pcidev_info.pci_bar_kva = bnad->bar0;
3665 spin_lock_irqsave(&bnad->bna_lock, flags);
3666 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3667 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3669 bnad->stats.bna_stats = &bna->stats;
3671 bnad_enable_msix(bnad);
3672 err = bnad_mbox_irq_alloc(bnad);
3673 if (err)
3674 goto res_free;
3676 /* Set up timers */
3677 timer_setup(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout, 0);
3678 timer_setup(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check, 0);
3679 timer_setup(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout, 0);
3680 timer_setup(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3684 * Start the chip
3685 * If the call back comes with error, we bail out.
3686 * This is a catastrophic error.
3688 err = bnad_ioceth_enable(bnad);
3689 if (err) {
3690 dev_err(&pdev->dev, "initialization failed err=%d\n", err);
3691 goto probe_success;
3694 spin_lock_irqsave(&bnad->bna_lock, flags);
3695 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3696 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3697 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3698 bna_attr(bna)->num_rxp - 1);
3699 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3700 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3701 err = -EIO;
3703 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3704 if (err)
3705 goto disable_ioceth;
3707 spin_lock_irqsave(&bnad->bna_lock, flags);
3708 bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3709 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3711 err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3712 if (err) {
3713 err = -EIO;
3714 goto disable_ioceth;
3717 spin_lock_irqsave(&bnad->bna_lock, flags);
3718 bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3719 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3721 /* Get the burnt-in mac */
3722 spin_lock_irqsave(&bnad->bna_lock, flags);
3723 bna_enet_perm_mac_get(&bna->enet, bnad->perm_addr);
3724 bnad_set_netdev_perm_addr(bnad);
3725 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3727 mutex_unlock(&bnad->conf_mutex);
3729 /* Finally, reguister with net_device layer */
3730 err = register_netdev(netdev);
3731 if (err) {
3732 dev_err(&pdev->dev, "registering net device failed\n");
3733 goto probe_uninit;
3735 set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3737 return 0;
3739 probe_success:
3740 mutex_unlock(&bnad->conf_mutex);
3741 return 0;
3743 probe_uninit:
3744 mutex_lock(&bnad->conf_mutex);
3745 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3746 disable_ioceth:
3747 bnad_ioceth_disable(bnad);
3748 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3749 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3750 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3751 spin_lock_irqsave(&bnad->bna_lock, flags);
3752 bna_uninit(bna);
3753 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3754 bnad_mbox_irq_free(bnad);
3755 bnad_disable_msix(bnad);
3756 res_free:
3757 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3758 drv_uninit:
3759 /* Remove the debugfs node for this bnad */
3760 kfree(bnad->regdata);
3761 bnad_debugfs_uninit(bnad);
3762 bnad_uninit(bnad);
3763 pci_uninit:
3764 bnad_pci_uninit(pdev);
3765 unlock_mutex:
3766 mutex_unlock(&bnad->conf_mutex);
3767 bnad_lock_uninit(bnad);
3768 free_netdev(netdev);
3769 return err;
3772 static void
3773 bnad_pci_remove(struct pci_dev *pdev)
3775 struct net_device *netdev = pci_get_drvdata(pdev);
3776 struct bnad *bnad;
3777 struct bna *bna;
3778 unsigned long flags;
3780 if (!netdev)
3781 return;
3783 bnad = netdev_priv(netdev);
3784 bna = &bnad->bna;
3786 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3787 unregister_netdev(netdev);
3789 mutex_lock(&bnad->conf_mutex);
3790 bnad_ioceth_disable(bnad);
3791 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3792 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3793 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3794 spin_lock_irqsave(&bnad->bna_lock, flags);
3795 bna_uninit(bna);
3796 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3798 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3799 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3800 bnad_mbox_irq_free(bnad);
3801 bnad_disable_msix(bnad);
3802 bnad_pci_uninit(pdev);
3803 mutex_unlock(&bnad->conf_mutex);
3804 bnad_lock_uninit(bnad);
3805 /* Remove the debugfs node for this bnad */
3806 kfree(bnad->regdata);
3807 bnad_debugfs_uninit(bnad);
3808 bnad_uninit(bnad);
3809 free_netdev(netdev);
3812 static const struct pci_device_id bnad_pci_id_table[] = {
3814 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3815 PCI_DEVICE_ID_BROCADE_CT),
3816 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3817 .class_mask = 0xffff00
3820 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3821 BFA_PCI_DEVICE_ID_CT2),
3822 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3823 .class_mask = 0xffff00
3825 {0, },
3828 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3830 static struct pci_driver bnad_pci_driver = {
3831 .name = BNAD_NAME,
3832 .id_table = bnad_pci_id_table,
3833 .probe = bnad_pci_probe,
3834 .remove = bnad_pci_remove,
3837 static int __init
3838 bnad_module_init(void)
3840 int err;
3842 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3844 err = pci_register_driver(&bnad_pci_driver);
3845 if (err < 0) {
3846 pr_err("bna: PCI driver registration failed err=%d\n", err);
3847 return err;
3850 return 0;
3853 static void __exit
3854 bnad_module_exit(void)
3856 pci_unregister_driver(&bnad_pci_driver);
3857 release_firmware(bfi_fw);
3860 module_init(bnad_module_init);
3861 module_exit(bnad_module_exit);
3863 MODULE_AUTHOR("Brocade");
3864 MODULE_LICENSE("GPL");
3865 MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver");
3866 MODULE_FIRMWARE(CNA_FW_FILE_CT);
3867 MODULE_FIRMWARE(CNA_FW_FILE_CT2);