2 * Linux network driver for QLogic BR-series Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15 * Copyright (c) 2014-2015 QLogic Corporation
19 #include <linux/bitops.h>
20 #include <linux/netdevice.h>
21 #include <linux/skbuff.h>
22 #include <linux/etherdevice.h>
24 #include <linux/ethtool.h>
25 #include <linux/if_vlan.h>
26 #include <linux/if_ether.h>
28 #include <linux/prefetch.h>
29 #include <linux/module.h>
35 static DEFINE_MUTEX(bnad_fwimg_mutex
);
40 static uint bnad_msix_disable
;
41 module_param(bnad_msix_disable
, uint
, 0444);
42 MODULE_PARM_DESC(bnad_msix_disable
, "Disable MSIX mode");
44 static uint bnad_ioc_auto_recover
= 1;
45 module_param(bnad_ioc_auto_recover
, uint
, 0444);
46 MODULE_PARM_DESC(bnad_ioc_auto_recover
, "Enable / Disable auto recovery");
48 static uint bna_debugfs_enable
= 1;
49 module_param(bna_debugfs_enable
, uint
, S_IRUGO
| S_IWUSR
);
50 MODULE_PARM_DESC(bna_debugfs_enable
, "Enables debugfs feature, default=1,"
51 " Range[false:0|true:1]");
56 static u32 bnad_rxqs_per_cq
= 2;
57 static atomic_t bna_id
;
58 static const u8 bnad_bcast_addr
[] __aligned(2) =
59 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
64 #define BNAD_GET_MBOX_IRQ(_bnad) \
65 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
66 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
67 ((_bnad)->pcidev->irq))
69 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size) \
71 (_res_info)->res_type = BNA_RES_T_MEM; \
72 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
73 (_res_info)->res_u.mem_info.num = (_num); \
74 (_res_info)->res_u.mem_info.len = (_size); \
78 * Reinitialize completions in CQ, once Rx is taken down
81 bnad_cq_cleanup(struct bnad
*bnad
, struct bna_ccb
*ccb
)
83 struct bna_cq_entry
*cmpl
;
86 for (i
= 0; i
< ccb
->q_depth
; i
++) {
87 cmpl
= &((struct bna_cq_entry
*)ccb
->sw_q
)[i
];
92 /* Tx Datapath functions */
95 /* Caller should ensure that the entry at unmap_q[index] is valid */
97 bnad_tx_buff_unmap(struct bnad
*bnad
,
98 struct bnad_tx_unmap
*unmap_q
,
99 u32 q_depth
, u32 index
)
101 struct bnad_tx_unmap
*unmap
;
105 unmap
= &unmap_q
[index
];
106 nvecs
= unmap
->nvecs
;
111 dma_unmap_single(&bnad
->pcidev
->dev
,
112 dma_unmap_addr(&unmap
->vectors
[0], dma_addr
),
113 skb_headlen(skb
), DMA_TO_DEVICE
);
114 dma_unmap_addr_set(&unmap
->vectors
[0], dma_addr
, 0);
120 if (vector
== BFI_TX_MAX_VECTORS_PER_WI
) {
122 BNA_QE_INDX_INC(index
, q_depth
);
123 unmap
= &unmap_q
[index
];
126 dma_unmap_page(&bnad
->pcidev
->dev
,
127 dma_unmap_addr(&unmap
->vectors
[vector
], dma_addr
),
128 dma_unmap_len(&unmap
->vectors
[vector
], dma_len
),
130 dma_unmap_addr_set(&unmap
->vectors
[vector
], dma_addr
, 0);
134 BNA_QE_INDX_INC(index
, q_depth
);
140 * Frees all pending Tx Bufs
141 * At this point no activity is expected on the Q,
142 * so DMA unmap & freeing is fine.
145 bnad_txq_cleanup(struct bnad
*bnad
, struct bna_tcb
*tcb
)
147 struct bnad_tx_unmap
*unmap_q
= tcb
->unmap_q
;
151 for (i
= 0; i
< tcb
->q_depth
; i
++) {
152 skb
= unmap_q
[i
].skb
;
155 bnad_tx_buff_unmap(bnad
, unmap_q
, tcb
->q_depth
, i
);
157 dev_kfree_skb_any(skb
);
162 * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
163 * Can be called in a) Interrupt context
167 bnad_txcmpl_process(struct bnad
*bnad
, struct bna_tcb
*tcb
)
169 u32 sent_packets
= 0, sent_bytes
= 0;
170 u32 wis
, unmap_wis
, hw_cons
, cons
, q_depth
;
171 struct bnad_tx_unmap
*unmap_q
= tcb
->unmap_q
;
172 struct bnad_tx_unmap
*unmap
;
175 /* Just return if TX is stopped */
176 if (!test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
))
179 hw_cons
= *(tcb
->hw_consumer_index
);
181 cons
= tcb
->consumer_index
;
182 q_depth
= tcb
->q_depth
;
184 wis
= BNA_Q_INDEX_CHANGE(cons
, hw_cons
, q_depth
);
185 BUG_ON(!(wis
<= BNA_QE_IN_USE_CNT(tcb
, tcb
->q_depth
)));
188 unmap
= &unmap_q
[cons
];
193 sent_bytes
+= skb
->len
;
195 unmap_wis
= BNA_TXQ_WI_NEEDED(unmap
->nvecs
);
198 cons
= bnad_tx_buff_unmap(bnad
, unmap_q
, q_depth
, cons
);
199 dev_kfree_skb_any(skb
);
202 /* Update consumer pointers. */
203 tcb
->consumer_index
= hw_cons
;
205 tcb
->txq
->tx_packets
+= sent_packets
;
206 tcb
->txq
->tx_bytes
+= sent_bytes
;
212 bnad_tx_complete(struct bnad
*bnad
, struct bna_tcb
*tcb
)
214 struct net_device
*netdev
= bnad
->netdev
;
217 if (test_and_set_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
))
220 sent
= bnad_txcmpl_process(bnad
, tcb
);
222 if (netif_queue_stopped(netdev
) &&
223 netif_carrier_ok(netdev
) &&
224 BNA_QE_FREE_CNT(tcb
, tcb
->q_depth
) >=
225 BNAD_NETIF_WAKE_THRESHOLD
) {
226 if (test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)) {
227 netif_wake_queue(netdev
);
228 BNAD_UPDATE_CTR(bnad
, netif_queue_wakeup
);
233 if (likely(test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)))
234 bna_ib_ack(tcb
->i_dbell
, sent
);
236 smp_mb__before_atomic();
237 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
242 /* MSIX Tx Completion Handler */
244 bnad_msix_tx(int irq
, void *data
)
246 struct bna_tcb
*tcb
= (struct bna_tcb
*)data
;
247 struct bnad
*bnad
= tcb
->bnad
;
249 bnad_tx_complete(bnad
, tcb
);
255 bnad_rxq_alloc_uninit(struct bnad
*bnad
, struct bna_rcb
*rcb
)
257 struct bnad_rx_unmap_q
*unmap_q
= rcb
->unmap_q
;
259 unmap_q
->reuse_pi
= -1;
260 unmap_q
->alloc_order
= -1;
261 unmap_q
->map_size
= 0;
262 unmap_q
->type
= BNAD_RXBUF_NONE
;
265 /* Default is page-based allocation. Multi-buffer support - TBD */
267 bnad_rxq_alloc_init(struct bnad
*bnad
, struct bna_rcb
*rcb
)
269 struct bnad_rx_unmap_q
*unmap_q
= rcb
->unmap_q
;
272 bnad_rxq_alloc_uninit(bnad
, rcb
);
274 order
= get_order(rcb
->rxq
->buffer_size
);
276 unmap_q
->type
= BNAD_RXBUF_PAGE
;
278 if (bna_is_small_rxq(rcb
->id
)) {
279 unmap_q
->alloc_order
= 0;
280 unmap_q
->map_size
= rcb
->rxq
->buffer_size
;
282 if (rcb
->rxq
->multi_buffer
) {
283 unmap_q
->alloc_order
= 0;
284 unmap_q
->map_size
= rcb
->rxq
->buffer_size
;
285 unmap_q
->type
= BNAD_RXBUF_MULTI_BUFF
;
287 unmap_q
->alloc_order
= order
;
289 (rcb
->rxq
->buffer_size
> 2048) ?
290 PAGE_SIZE
<< order
: 2048;
294 BUG_ON((PAGE_SIZE
<< order
) % unmap_q
->map_size
);
300 bnad_rxq_cleanup_page(struct bnad
*bnad
, struct bnad_rx_unmap
*unmap
)
305 dma_unmap_page(&bnad
->pcidev
->dev
,
306 dma_unmap_addr(&unmap
->vector
, dma_addr
),
307 unmap
->vector
.len
, DMA_FROM_DEVICE
);
308 put_page(unmap
->page
);
310 dma_unmap_addr_set(&unmap
->vector
, dma_addr
, 0);
311 unmap
->vector
.len
= 0;
315 bnad_rxq_cleanup_skb(struct bnad
*bnad
, struct bnad_rx_unmap
*unmap
)
320 dma_unmap_single(&bnad
->pcidev
->dev
,
321 dma_unmap_addr(&unmap
->vector
, dma_addr
),
322 unmap
->vector
.len
, DMA_FROM_DEVICE
);
323 dev_kfree_skb_any(unmap
->skb
);
325 dma_unmap_addr_set(&unmap
->vector
, dma_addr
, 0);
326 unmap
->vector
.len
= 0;
330 bnad_rxq_cleanup(struct bnad
*bnad
, struct bna_rcb
*rcb
)
332 struct bnad_rx_unmap_q
*unmap_q
= rcb
->unmap_q
;
335 for (i
= 0; i
< rcb
->q_depth
; i
++) {
336 struct bnad_rx_unmap
*unmap
= &unmap_q
->unmap
[i
];
338 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q
->type
))
339 bnad_rxq_cleanup_skb(bnad
, unmap
);
341 bnad_rxq_cleanup_page(bnad
, unmap
);
343 bnad_rxq_alloc_uninit(bnad
, rcb
);
347 bnad_rxq_refill_page(struct bnad
*bnad
, struct bna_rcb
*rcb
, u32 nalloc
)
349 u32 alloced
, prod
, q_depth
;
350 struct bnad_rx_unmap_q
*unmap_q
= rcb
->unmap_q
;
351 struct bnad_rx_unmap
*unmap
, *prev
;
352 struct bna_rxq_entry
*rxent
;
354 u32 page_offset
, alloc_size
;
357 prod
= rcb
->producer_index
;
358 q_depth
= rcb
->q_depth
;
360 alloc_size
= PAGE_SIZE
<< unmap_q
->alloc_order
;
364 unmap
= &unmap_q
->unmap
[prod
];
366 if (unmap_q
->reuse_pi
< 0) {
367 page
= alloc_pages(GFP_ATOMIC
| __GFP_COMP
,
368 unmap_q
->alloc_order
);
371 prev
= &unmap_q
->unmap
[unmap_q
->reuse_pi
];
373 page_offset
= prev
->page_offset
+ unmap_q
->map_size
;
377 if (unlikely(!page
)) {
378 BNAD_UPDATE_CTR(bnad
, rxbuf_alloc_failed
);
379 rcb
->rxq
->rxbuf_alloc_failed
++;
383 dma_addr
= dma_map_page(&bnad
->pcidev
->dev
, page
, page_offset
,
384 unmap_q
->map_size
, DMA_FROM_DEVICE
);
385 if (dma_mapping_error(&bnad
->pcidev
->dev
, dma_addr
)) {
387 BNAD_UPDATE_CTR(bnad
, rxbuf_map_failed
);
388 rcb
->rxq
->rxbuf_map_failed
++;
393 unmap
->page_offset
= page_offset
;
394 dma_unmap_addr_set(&unmap
->vector
, dma_addr
, dma_addr
);
395 unmap
->vector
.len
= unmap_q
->map_size
;
396 page_offset
+= unmap_q
->map_size
;
398 if (page_offset
< alloc_size
)
399 unmap_q
->reuse_pi
= prod
;
401 unmap_q
->reuse_pi
= -1;
403 rxent
= &((struct bna_rxq_entry
*)rcb
->sw_q
)[prod
];
404 BNA_SET_DMA_ADDR(dma_addr
, &rxent
->host_addr
);
405 BNA_QE_INDX_INC(prod
, q_depth
);
410 if (likely(alloced
)) {
411 rcb
->producer_index
= prod
;
413 if (likely(test_bit(BNAD_RXQ_POST_OK
, &rcb
->flags
)))
414 bna_rxq_prod_indx_doorbell(rcb
);
421 bnad_rxq_refill_skb(struct bnad
*bnad
, struct bna_rcb
*rcb
, u32 nalloc
)
423 u32 alloced
, prod
, q_depth
, buff_sz
;
424 struct bnad_rx_unmap_q
*unmap_q
= rcb
->unmap_q
;
425 struct bnad_rx_unmap
*unmap
;
426 struct bna_rxq_entry
*rxent
;
430 buff_sz
= rcb
->rxq
->buffer_size
;
431 prod
= rcb
->producer_index
;
432 q_depth
= rcb
->q_depth
;
436 unmap
= &unmap_q
->unmap
[prod
];
438 skb
= netdev_alloc_skb_ip_align(bnad
->netdev
, buff_sz
);
440 if (unlikely(!skb
)) {
441 BNAD_UPDATE_CTR(bnad
, rxbuf_alloc_failed
);
442 rcb
->rxq
->rxbuf_alloc_failed
++;
446 dma_addr
= dma_map_single(&bnad
->pcidev
->dev
, skb
->data
,
447 buff_sz
, DMA_FROM_DEVICE
);
448 if (dma_mapping_error(&bnad
->pcidev
->dev
, dma_addr
)) {
449 dev_kfree_skb_any(skb
);
450 BNAD_UPDATE_CTR(bnad
, rxbuf_map_failed
);
451 rcb
->rxq
->rxbuf_map_failed
++;
456 dma_unmap_addr_set(&unmap
->vector
, dma_addr
, dma_addr
);
457 unmap
->vector
.len
= buff_sz
;
459 rxent
= &((struct bna_rxq_entry
*)rcb
->sw_q
)[prod
];
460 BNA_SET_DMA_ADDR(dma_addr
, &rxent
->host_addr
);
461 BNA_QE_INDX_INC(prod
, q_depth
);
466 if (likely(alloced
)) {
467 rcb
->producer_index
= prod
;
469 if (likely(test_bit(BNAD_RXQ_POST_OK
, &rcb
->flags
)))
470 bna_rxq_prod_indx_doorbell(rcb
);
477 bnad_rxq_post(struct bnad
*bnad
, struct bna_rcb
*rcb
)
479 struct bnad_rx_unmap_q
*unmap_q
= rcb
->unmap_q
;
482 to_alloc
= BNA_QE_FREE_CNT(rcb
, rcb
->q_depth
);
483 if (!(to_alloc
>> BNAD_RXQ_REFILL_THRESHOLD_SHIFT
))
486 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q
->type
))
487 bnad_rxq_refill_skb(bnad
, rcb
, to_alloc
);
489 bnad_rxq_refill_page(bnad
, rcb
, to_alloc
);
492 #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
494 BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
495 BNA_CQ_EF_L4_CKSUM_OK)
497 #define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
498 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
499 #define flags_tcp6 (BNA_CQ_EF_IPV6 | \
500 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
501 #define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
502 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
503 #define flags_udp6 (BNA_CQ_EF_IPV6 | \
504 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
507 bnad_cq_drop_packet(struct bnad
*bnad
, struct bna_rcb
*rcb
,
508 u32 sop_ci
, u32 nvecs
)
510 struct bnad_rx_unmap_q
*unmap_q
;
511 struct bnad_rx_unmap
*unmap
;
514 unmap_q
= rcb
->unmap_q
;
515 for (vec
= 0, ci
= sop_ci
; vec
< nvecs
; vec
++) {
516 unmap
= &unmap_q
->unmap
[ci
];
517 BNA_QE_INDX_INC(ci
, rcb
->q_depth
);
519 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q
->type
))
520 bnad_rxq_cleanup_skb(bnad
, unmap
);
522 bnad_rxq_cleanup_page(bnad
, unmap
);
527 bnad_cq_setup_skb_frags(struct bna_ccb
*ccb
, struct sk_buff
*skb
, u32 nvecs
)
531 struct bnad_rx_unmap_q
*unmap_q
;
532 struct bna_cq_entry
*cq
, *cmpl
;
533 u32 ci
, pi
, totlen
= 0;
536 pi
= ccb
->producer_index
;
539 rcb
= bna_is_small_rxq(cmpl
->rxq_id
) ? ccb
->rcb
[1] : ccb
->rcb
[0];
540 unmap_q
= rcb
->unmap_q
;
542 ci
= rcb
->consumer_index
;
544 /* prefetch header */
545 prefetch(page_address(unmap_q
->unmap
[ci
].page
) +
546 unmap_q
->unmap
[ci
].page_offset
);
549 struct bnad_rx_unmap
*unmap
;
552 unmap
= &unmap_q
->unmap
[ci
];
553 BNA_QE_INDX_INC(ci
, rcb
->q_depth
);
555 dma_unmap_page(&bnad
->pcidev
->dev
,
556 dma_unmap_addr(&unmap
->vector
, dma_addr
),
557 unmap
->vector
.len
, DMA_FROM_DEVICE
);
559 len
= ntohs(cmpl
->length
);
560 skb
->truesize
+= unmap
->vector
.len
;
563 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
564 unmap
->page
, unmap
->page_offset
, len
);
567 unmap
->vector
.len
= 0;
569 BNA_QE_INDX_INC(pi
, ccb
->q_depth
);
574 skb
->data_len
+= totlen
;
578 bnad_cq_setup_skb(struct bnad
*bnad
, struct sk_buff
*skb
,
579 struct bnad_rx_unmap
*unmap
, u32 len
)
583 dma_unmap_single(&bnad
->pcidev
->dev
,
584 dma_unmap_addr(&unmap
->vector
, dma_addr
),
585 unmap
->vector
.len
, DMA_FROM_DEVICE
);
588 skb
->protocol
= eth_type_trans(skb
, bnad
->netdev
);
591 unmap
->vector
.len
= 0;
595 bnad_cq_process(struct bnad
*bnad
, struct bna_ccb
*ccb
, int budget
)
597 struct bna_cq_entry
*cq
, *cmpl
, *next_cmpl
;
598 struct bna_rcb
*rcb
= NULL
;
599 struct bnad_rx_unmap_q
*unmap_q
;
600 struct bnad_rx_unmap
*unmap
= NULL
;
601 struct sk_buff
*skb
= NULL
;
602 struct bna_pkt_rate
*pkt_rt
= &ccb
->pkt_rate
;
603 struct bnad_rx_ctrl
*rx_ctrl
= ccb
->ctrl
;
604 u32 packets
= 0, len
= 0, totlen
= 0;
605 u32 pi
, vec
, sop_ci
= 0, nvecs
= 0;
606 u32 flags
, masked_flags
;
608 prefetch(bnad
->netdev
);
612 while (packets
< budget
) {
613 cmpl
= &cq
[ccb
->producer_index
];
616 /* The 'valid' field is set by the adapter, only after writing
617 * the other fields of completion entry. Hence, do not load
618 * other fields of completion entry *before* the 'valid' is
619 * loaded. Adding the rmb() here prevents the compiler and/or
620 * CPU from reordering the reads which would potentially result
621 * in reading stale values in completion entry.
625 BNA_UPDATE_PKT_CNT(pkt_rt
, ntohs(cmpl
->length
));
627 if (bna_is_small_rxq(cmpl
->rxq_id
))
632 unmap_q
= rcb
->unmap_q
;
634 /* start of packet ci */
635 sop_ci
= rcb
->consumer_index
;
637 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q
->type
)) {
638 unmap
= &unmap_q
->unmap
[sop_ci
];
641 skb
= napi_get_frags(&rx_ctrl
->napi
);
647 flags
= ntohl(cmpl
->flags
);
648 len
= ntohs(cmpl
->length
);
652 /* Check all the completions for this frame.
653 * busy-wait doesn't help much, break here.
655 if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q
->type
) &&
656 (flags
& BNA_CQ_EF_EOP
) == 0) {
657 pi
= ccb
->producer_index
;
659 BNA_QE_INDX_INC(pi
, ccb
->q_depth
);
662 if (!next_cmpl
->valid
)
664 /* The 'valid' field is set by the adapter, only
665 * after writing the other fields of completion
666 * entry. Hence, do not load other fields of
667 * completion entry *before* the 'valid' is
668 * loaded. Adding the rmb() here prevents the
669 * compiler and/or CPU from reordering the reads
670 * which would potentially result in reading
671 * stale values in completion entry.
675 len
= ntohs(next_cmpl
->length
);
676 flags
= ntohl(next_cmpl
->flags
);
680 } while ((flags
& BNA_CQ_EF_EOP
) == 0);
682 if (!next_cmpl
->valid
)
687 /* TODO: BNA_CQ_EF_LOCAL ? */
688 if (unlikely(flags
& (BNA_CQ_EF_MAC_ERROR
|
689 BNA_CQ_EF_FCS_ERROR
|
690 BNA_CQ_EF_TOO_LONG
))) {
691 bnad_cq_drop_packet(bnad
, rcb
, sop_ci
, nvecs
);
692 rcb
->rxq
->rx_packets_with_error
++;
697 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q
->type
))
698 bnad_cq_setup_skb(bnad
, skb
, unmap
, len
);
700 bnad_cq_setup_skb_frags(ccb
, skb
, nvecs
);
702 rcb
->rxq
->rx_packets
++;
703 rcb
->rxq
->rx_bytes
+= totlen
;
704 ccb
->bytes_per_intr
+= totlen
;
706 masked_flags
= flags
& flags_cksum_prot_mask
;
709 ((bnad
->netdev
->features
& NETIF_F_RXCSUM
) &&
710 ((masked_flags
== flags_tcp4
) ||
711 (masked_flags
== flags_udp4
) ||
712 (masked_flags
== flags_tcp6
) ||
713 (masked_flags
== flags_udp6
))))
714 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
716 skb_checksum_none_assert(skb
);
718 if ((flags
& BNA_CQ_EF_VLAN
) &&
719 (bnad
->netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
))
720 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), ntohs(cmpl
->vlan_tag
));
722 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q
->type
))
723 netif_receive_skb(skb
);
725 napi_gro_frags(&rx_ctrl
->napi
);
728 BNA_QE_INDX_ADD(rcb
->consumer_index
, nvecs
, rcb
->q_depth
);
729 for (vec
= 0; vec
< nvecs
; vec
++) {
730 cmpl
= &cq
[ccb
->producer_index
];
732 BNA_QE_INDX_INC(ccb
->producer_index
, ccb
->q_depth
);
736 napi_gro_flush(&rx_ctrl
->napi
, false);
737 if (likely(test_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[0]->flags
)))
738 bna_ib_ack_disable_irq(ccb
->i_dbell
, packets
);
740 bnad_rxq_post(bnad
, ccb
->rcb
[0]);
742 bnad_rxq_post(bnad
, ccb
->rcb
[1]);
748 bnad_netif_rx_schedule_poll(struct bnad
*bnad
, struct bna_ccb
*ccb
)
750 struct bnad_rx_ctrl
*rx_ctrl
= (struct bnad_rx_ctrl
*)(ccb
->ctrl
);
751 struct napi_struct
*napi
= &rx_ctrl
->napi
;
753 if (likely(napi_schedule_prep(napi
))) {
754 __napi_schedule(napi
);
755 rx_ctrl
->rx_schedule
++;
759 /* MSIX Rx Path Handler */
761 bnad_msix_rx(int irq
, void *data
)
763 struct bna_ccb
*ccb
= (struct bna_ccb
*)data
;
766 ((struct bnad_rx_ctrl
*)ccb
->ctrl
)->rx_intr_ctr
++;
767 bnad_netif_rx_schedule_poll(ccb
->bnad
, ccb
);
773 /* Interrupt handlers */
775 /* Mbox Interrupt Handlers */
777 bnad_msix_mbox_handler(int irq
, void *data
)
781 struct bnad
*bnad
= (struct bnad
*)data
;
783 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
784 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
))) {
785 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
789 bna_intr_status_get(&bnad
->bna
, intr_status
);
791 if (BNA_IS_MBOX_ERR_INTR(&bnad
->bna
, intr_status
))
792 bna_mbox_handler(&bnad
->bna
, intr_status
);
794 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
800 bnad_isr(int irq
, void *data
)
805 struct bnad
*bnad
= (struct bnad
*)data
;
806 struct bnad_rx_info
*rx_info
;
807 struct bnad_rx_ctrl
*rx_ctrl
;
808 struct bna_tcb
*tcb
= NULL
;
810 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
811 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
))) {
812 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
816 bna_intr_status_get(&bnad
->bna
, intr_status
);
818 if (unlikely(!intr_status
)) {
819 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
823 if (BNA_IS_MBOX_ERR_INTR(&bnad
->bna
, intr_status
))
824 bna_mbox_handler(&bnad
->bna
, intr_status
);
826 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
828 if (!BNA_IS_INTX_DATA_INTR(intr_status
))
831 /* Process data interrupts */
833 for (i
= 0; i
< bnad
->num_tx
; i
++) {
834 for (j
= 0; j
< bnad
->num_txq_per_tx
; j
++) {
835 tcb
= bnad
->tx_info
[i
].tcb
[j
];
836 if (tcb
&& test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
))
837 bnad_tx_complete(bnad
, bnad
->tx_info
[i
].tcb
[j
]);
841 for (i
= 0; i
< bnad
->num_rx
; i
++) {
842 rx_info
= &bnad
->rx_info
[i
];
845 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
846 rx_ctrl
= &rx_info
->rx_ctrl
[j
];
848 bnad_netif_rx_schedule_poll(bnad
,
856 * Called in interrupt / callback context
857 * with bna_lock held, so cfg_flags access is OK
860 bnad_enable_mbox_irq(struct bnad
*bnad
)
862 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
);
864 BNAD_UPDATE_CTR(bnad
, mbox_intr_enabled
);
868 * Called with bnad->bna_lock held b'cos of
869 * bnad->cfg_flags access.
872 bnad_disable_mbox_irq(struct bnad
*bnad
)
874 set_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
);
876 BNAD_UPDATE_CTR(bnad
, mbox_intr_disabled
);
880 bnad_set_netdev_perm_addr(struct bnad
*bnad
)
882 struct net_device
*netdev
= bnad
->netdev
;
884 ether_addr_copy(netdev
->perm_addr
, bnad
->perm_addr
);
885 if (is_zero_ether_addr(netdev
->dev_addr
))
886 ether_addr_copy(netdev
->dev_addr
, bnad
->perm_addr
);
889 /* Control Path Handlers */
893 bnad_cb_mbox_intr_enable(struct bnad
*bnad
)
895 bnad_enable_mbox_irq(bnad
);
899 bnad_cb_mbox_intr_disable(struct bnad
*bnad
)
901 bnad_disable_mbox_irq(bnad
);
905 bnad_cb_ioceth_ready(struct bnad
*bnad
)
907 bnad
->bnad_completions
.ioc_comp_status
= BNA_CB_SUCCESS
;
908 complete(&bnad
->bnad_completions
.ioc_comp
);
912 bnad_cb_ioceth_failed(struct bnad
*bnad
)
914 bnad
->bnad_completions
.ioc_comp_status
= BNA_CB_FAIL
;
915 complete(&bnad
->bnad_completions
.ioc_comp
);
919 bnad_cb_ioceth_disabled(struct bnad
*bnad
)
921 bnad
->bnad_completions
.ioc_comp_status
= BNA_CB_SUCCESS
;
922 complete(&bnad
->bnad_completions
.ioc_comp
);
926 bnad_cb_enet_disabled(void *arg
)
928 struct bnad
*bnad
= (struct bnad
*)arg
;
930 netif_carrier_off(bnad
->netdev
);
931 complete(&bnad
->bnad_completions
.enet_comp
);
935 bnad_cb_ethport_link_status(struct bnad
*bnad
,
936 enum bna_link_status link_status
)
938 bool link_up
= false;
940 link_up
= (link_status
== BNA_LINK_UP
) || (link_status
== BNA_CEE_UP
);
942 if (link_status
== BNA_CEE_UP
) {
943 if (!test_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
))
944 BNAD_UPDATE_CTR(bnad
, cee_toggle
);
945 set_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
);
947 if (test_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
))
948 BNAD_UPDATE_CTR(bnad
, cee_toggle
);
949 clear_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
);
953 if (!netif_carrier_ok(bnad
->netdev
)) {
955 netdev_info(bnad
->netdev
, "link up\n");
956 netif_carrier_on(bnad
->netdev
);
957 BNAD_UPDATE_CTR(bnad
, link_toggle
);
958 for (tx_id
= 0; tx_id
< bnad
->num_tx
; tx_id
++) {
959 for (tcb_id
= 0; tcb_id
< bnad
->num_txq_per_tx
;
961 struct bna_tcb
*tcb
=
962 bnad
->tx_info
[tx_id
].tcb
[tcb_id
];
969 if (test_bit(BNAD_TXQ_TX_STARTED
,
973 * Transmit Schedule */
977 BNAD_UPDATE_CTR(bnad
,
983 BNAD_UPDATE_CTR(bnad
,
990 if (netif_carrier_ok(bnad
->netdev
)) {
991 netdev_info(bnad
->netdev
, "link down\n");
992 netif_carrier_off(bnad
->netdev
);
993 BNAD_UPDATE_CTR(bnad
, link_toggle
);
999 bnad_cb_tx_disabled(void *arg
, struct bna_tx
*tx
)
1001 struct bnad
*bnad
= (struct bnad
*)arg
;
1003 complete(&bnad
->bnad_completions
.tx_comp
);
1007 bnad_cb_tcb_setup(struct bnad
*bnad
, struct bna_tcb
*tcb
)
1009 struct bnad_tx_info
*tx_info
=
1010 (struct bnad_tx_info
*)tcb
->txq
->tx
->priv
;
1013 tx_info
->tcb
[tcb
->id
] = tcb
;
1017 bnad_cb_tcb_destroy(struct bnad
*bnad
, struct bna_tcb
*tcb
)
1019 struct bnad_tx_info
*tx_info
=
1020 (struct bnad_tx_info
*)tcb
->txq
->tx
->priv
;
1022 tx_info
->tcb
[tcb
->id
] = NULL
;
1027 bnad_cb_ccb_setup(struct bnad
*bnad
, struct bna_ccb
*ccb
)
1029 struct bnad_rx_info
*rx_info
=
1030 (struct bnad_rx_info
*)ccb
->cq
->rx
->priv
;
1032 rx_info
->rx_ctrl
[ccb
->id
].ccb
= ccb
;
1033 ccb
->ctrl
= &rx_info
->rx_ctrl
[ccb
->id
];
1037 bnad_cb_ccb_destroy(struct bnad
*bnad
, struct bna_ccb
*ccb
)
1039 struct bnad_rx_info
*rx_info
=
1040 (struct bnad_rx_info
*)ccb
->cq
->rx
->priv
;
1042 rx_info
->rx_ctrl
[ccb
->id
].ccb
= NULL
;
1046 bnad_cb_tx_stall(struct bnad
*bnad
, struct bna_tx
*tx
)
1048 struct bnad_tx_info
*tx_info
=
1049 (struct bnad_tx_info
*)tx
->priv
;
1050 struct bna_tcb
*tcb
;
1054 for (i
= 0; i
< BNAD_MAX_TXQ_PER_TX
; i
++) {
1055 tcb
= tx_info
->tcb
[i
];
1059 clear_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
);
1060 netif_stop_subqueue(bnad
->netdev
, txq_id
);
1065 bnad_cb_tx_resume(struct bnad
*bnad
, struct bna_tx
*tx
)
1067 struct bnad_tx_info
*tx_info
= (struct bnad_tx_info
*)tx
->priv
;
1068 struct bna_tcb
*tcb
;
1072 for (i
= 0; i
< BNAD_MAX_TXQ_PER_TX
; i
++) {
1073 tcb
= tx_info
->tcb
[i
];
1078 BUG_ON(test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
));
1079 set_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
);
1080 BUG_ON(*(tcb
->hw_consumer_index
) != 0);
1082 if (netif_carrier_ok(bnad
->netdev
)) {
1083 netif_wake_subqueue(bnad
->netdev
, txq_id
);
1084 BNAD_UPDATE_CTR(bnad
, netif_queue_wakeup
);
1089 * Workaround for first ioceth enable failure & we
1090 * get a 0 MAC address. We try to get the MAC address
1093 if (is_zero_ether_addr(bnad
->perm_addr
)) {
1094 bna_enet_perm_mac_get(&bnad
->bna
.enet
, bnad
->perm_addr
);
1095 bnad_set_netdev_perm_addr(bnad
);
1100 * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
1103 bnad_tx_cleanup(struct delayed_work
*work
)
1105 struct bnad_tx_info
*tx_info
=
1106 container_of(work
, struct bnad_tx_info
, tx_cleanup_work
);
1107 struct bnad
*bnad
= NULL
;
1108 struct bna_tcb
*tcb
;
1109 unsigned long flags
;
1112 for (i
= 0; i
< BNAD_MAX_TXQ_PER_TX
; i
++) {
1113 tcb
= tx_info
->tcb
[i
];
1119 if (test_and_set_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
)) {
1124 bnad_txq_cleanup(bnad
, tcb
);
1126 smp_mb__before_atomic();
1127 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
1131 queue_delayed_work(bnad
->work_q
, &tx_info
->tx_cleanup_work
,
1132 msecs_to_jiffies(1));
1136 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1137 bna_tx_cleanup_complete(tx_info
->tx
);
1138 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1142 bnad_cb_tx_cleanup(struct bnad
*bnad
, struct bna_tx
*tx
)
1144 struct bnad_tx_info
*tx_info
= (struct bnad_tx_info
*)tx
->priv
;
1145 struct bna_tcb
*tcb
;
1148 for (i
= 0; i
< BNAD_MAX_TXQ_PER_TX
; i
++) {
1149 tcb
= tx_info
->tcb
[i
];
1154 queue_delayed_work(bnad
->work_q
, &tx_info
->tx_cleanup_work
, 0);
1158 bnad_cb_rx_stall(struct bnad
*bnad
, struct bna_rx
*rx
)
1160 struct bnad_rx_info
*rx_info
= (struct bnad_rx_info
*)rx
->priv
;
1161 struct bna_ccb
*ccb
;
1162 struct bnad_rx_ctrl
*rx_ctrl
;
1165 for (i
= 0; i
< BNAD_MAX_RXP_PER_RX
; i
++) {
1166 rx_ctrl
= &rx_info
->rx_ctrl
[i
];
1171 clear_bit(BNAD_RXQ_POST_OK
, &ccb
->rcb
[0]->flags
);
1174 clear_bit(BNAD_RXQ_POST_OK
, &ccb
->rcb
[1]->flags
);
1179 * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
1182 bnad_rx_cleanup(void *work
)
1184 struct bnad_rx_info
*rx_info
=
1185 container_of(work
, struct bnad_rx_info
, rx_cleanup_work
);
1186 struct bnad_rx_ctrl
*rx_ctrl
;
1187 struct bnad
*bnad
= NULL
;
1188 unsigned long flags
;
1191 for (i
= 0; i
< BNAD_MAX_RXP_PER_RX
; i
++) {
1192 rx_ctrl
= &rx_info
->rx_ctrl
[i
];
1197 bnad
= rx_ctrl
->ccb
->bnad
;
1200 * Wait till the poll handler has exited
1201 * and nothing can be scheduled anymore
1203 napi_disable(&rx_ctrl
->napi
);
1205 bnad_cq_cleanup(bnad
, rx_ctrl
->ccb
);
1206 bnad_rxq_cleanup(bnad
, rx_ctrl
->ccb
->rcb
[0]);
1207 if (rx_ctrl
->ccb
->rcb
[1])
1208 bnad_rxq_cleanup(bnad
, rx_ctrl
->ccb
->rcb
[1]);
1211 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1212 bna_rx_cleanup_complete(rx_info
->rx
);
1213 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1217 bnad_cb_rx_cleanup(struct bnad
*bnad
, struct bna_rx
*rx
)
1219 struct bnad_rx_info
*rx_info
= (struct bnad_rx_info
*)rx
->priv
;
1220 struct bna_ccb
*ccb
;
1221 struct bnad_rx_ctrl
*rx_ctrl
;
1224 for (i
= 0; i
< BNAD_MAX_RXP_PER_RX
; i
++) {
1225 rx_ctrl
= &rx_info
->rx_ctrl
[i
];
1230 clear_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[0]->flags
);
1233 clear_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[1]->flags
);
1236 queue_work(bnad
->work_q
, &rx_info
->rx_cleanup_work
);
1240 bnad_cb_rx_post(struct bnad
*bnad
, struct bna_rx
*rx
)
1242 struct bnad_rx_info
*rx_info
= (struct bnad_rx_info
*)rx
->priv
;
1243 struct bna_ccb
*ccb
;
1244 struct bna_rcb
*rcb
;
1245 struct bnad_rx_ctrl
*rx_ctrl
;
1248 for (i
= 0; i
< BNAD_MAX_RXP_PER_RX
; i
++) {
1249 rx_ctrl
= &rx_info
->rx_ctrl
[i
];
1254 napi_enable(&rx_ctrl
->napi
);
1256 for (j
= 0; j
< BNAD_MAX_RXQ_PER_RXP
; j
++) {
1261 bnad_rxq_alloc_init(bnad
, rcb
);
1262 set_bit(BNAD_RXQ_STARTED
, &rcb
->flags
);
1263 set_bit(BNAD_RXQ_POST_OK
, &rcb
->flags
);
1264 bnad_rxq_post(bnad
, rcb
);
1270 bnad_cb_rx_disabled(void *arg
, struct bna_rx
*rx
)
1272 struct bnad
*bnad
= (struct bnad
*)arg
;
1274 complete(&bnad
->bnad_completions
.rx_comp
);
1278 bnad_cb_rx_mcast_add(struct bnad
*bnad
, struct bna_rx
*rx
)
1280 bnad
->bnad_completions
.mcast_comp_status
= BNA_CB_SUCCESS
;
1281 complete(&bnad
->bnad_completions
.mcast_comp
);
1285 bnad_cb_stats_get(struct bnad
*bnad
, enum bna_cb_status status
,
1286 struct bna_stats
*stats
)
1288 if (status
== BNA_CB_SUCCESS
)
1289 BNAD_UPDATE_CTR(bnad
, hw_stats_updates
);
1291 if (!netif_running(bnad
->netdev
) ||
1292 !test_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
))
1295 mod_timer(&bnad
->stats_timer
,
1296 jiffies
+ msecs_to_jiffies(BNAD_STATS_TIMER_FREQ
));
1300 bnad_cb_enet_mtu_set(struct bnad
*bnad
)
1302 bnad
->bnad_completions
.mtu_comp_status
= BNA_CB_SUCCESS
;
1303 complete(&bnad
->bnad_completions
.mtu_comp
);
1307 bnad_cb_completion(void *arg
, enum bfa_status status
)
1309 struct bnad_iocmd_comp
*iocmd_comp
=
1310 (struct bnad_iocmd_comp
*)arg
;
1312 iocmd_comp
->comp_status
= (u32
) status
;
1313 complete(&iocmd_comp
->comp
);
1316 /* Resource allocation, free functions */
1319 bnad_mem_free(struct bnad
*bnad
,
1320 struct bna_mem_info
*mem_info
)
1325 if (mem_info
->mdl
== NULL
)
1328 for (i
= 0; i
< mem_info
->num
; i
++) {
1329 if (mem_info
->mdl
[i
].kva
!= NULL
) {
1330 if (mem_info
->mem_type
== BNA_MEM_T_DMA
) {
1331 BNA_GET_DMA_ADDR(&(mem_info
->mdl
[i
].dma
),
1333 dma_free_coherent(&bnad
->pcidev
->dev
,
1334 mem_info
->mdl
[i
].len
,
1335 mem_info
->mdl
[i
].kva
, dma_pa
);
1337 kfree(mem_info
->mdl
[i
].kva
);
1340 kfree(mem_info
->mdl
);
1341 mem_info
->mdl
= NULL
;
1345 bnad_mem_alloc(struct bnad
*bnad
,
1346 struct bna_mem_info
*mem_info
)
1351 if ((mem_info
->num
== 0) || (mem_info
->len
== 0)) {
1352 mem_info
->mdl
= NULL
;
1356 mem_info
->mdl
= kcalloc(mem_info
->num
, sizeof(struct bna_mem_descr
),
1358 if (mem_info
->mdl
== NULL
)
1361 if (mem_info
->mem_type
== BNA_MEM_T_DMA
) {
1362 for (i
= 0; i
< mem_info
->num
; i
++) {
1363 mem_info
->mdl
[i
].len
= mem_info
->len
;
1364 mem_info
->mdl
[i
].kva
=
1365 dma_alloc_coherent(&bnad
->pcidev
->dev
,
1366 mem_info
->len
, &dma_pa
,
1368 if (mem_info
->mdl
[i
].kva
== NULL
)
1371 BNA_SET_DMA_ADDR(dma_pa
,
1372 &(mem_info
->mdl
[i
].dma
));
1375 for (i
= 0; i
< mem_info
->num
; i
++) {
1376 mem_info
->mdl
[i
].len
= mem_info
->len
;
1377 mem_info
->mdl
[i
].kva
= kzalloc(mem_info
->len
,
1379 if (mem_info
->mdl
[i
].kva
== NULL
)
1387 bnad_mem_free(bnad
, mem_info
);
1391 /* Free IRQ for Mailbox */
1393 bnad_mbox_irq_free(struct bnad
*bnad
)
1396 unsigned long flags
;
1398 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1399 bnad_disable_mbox_irq(bnad
);
1400 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1402 irq
= BNAD_GET_MBOX_IRQ(bnad
);
1403 free_irq(irq
, bnad
);
1407 * Allocates IRQ for Mailbox, but keep it disabled
1408 * This will be enabled once we get the mbox enable callback
1412 bnad_mbox_irq_alloc(struct bnad
*bnad
)
1415 unsigned long irq_flags
, flags
;
1417 irq_handler_t irq_handler
;
1419 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1420 if (bnad
->cfg_flags
& BNAD_CF_MSIX
) {
1421 irq_handler
= (irq_handler_t
)bnad_msix_mbox_handler
;
1422 irq
= bnad
->msix_table
[BNAD_MAILBOX_MSIX_INDEX
].vector
;
1425 irq_handler
= (irq_handler_t
)bnad_isr
;
1426 irq
= bnad
->pcidev
->irq
;
1427 irq_flags
= IRQF_SHARED
;
1430 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1431 sprintf(bnad
->mbox_irq_name
, "%s", BNAD_NAME
);
1434 * Set the Mbox IRQ disable flag, so that the IRQ handler
1435 * called from request_irq() for SHARED IRQs do not execute
1437 set_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
);
1439 BNAD_UPDATE_CTR(bnad
, mbox_intr_disabled
);
1441 err
= request_irq(irq
, irq_handler
, irq_flags
,
1442 bnad
->mbox_irq_name
, bnad
);
1448 bnad_txrx_irq_free(struct bnad
*bnad
, struct bna_intr_info
*intr_info
)
1450 kfree(intr_info
->idl
);
1451 intr_info
->idl
= NULL
;
1454 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1456 bnad_txrx_irq_alloc(struct bnad
*bnad
, enum bnad_intr_source src
,
1457 u32 txrx_id
, struct bna_intr_info
*intr_info
)
1459 int i
, vector_start
= 0;
1461 unsigned long flags
;
1463 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1464 cfg_flags
= bnad
->cfg_flags
;
1465 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1467 if (cfg_flags
& BNAD_CF_MSIX
) {
1468 intr_info
->intr_type
= BNA_INTR_T_MSIX
;
1469 intr_info
->idl
= kcalloc(intr_info
->num
,
1470 sizeof(struct bna_intr_descr
),
1472 if (!intr_info
->idl
)
1477 vector_start
= BNAD_MAILBOX_MSIX_VECTORS
+ txrx_id
;
1481 vector_start
= BNAD_MAILBOX_MSIX_VECTORS
+
1482 (bnad
->num_tx
* bnad
->num_txq_per_tx
) +
1490 for (i
= 0; i
< intr_info
->num
; i
++)
1491 intr_info
->idl
[i
].vector
= vector_start
+ i
;
1493 intr_info
->intr_type
= BNA_INTR_T_INTX
;
1495 intr_info
->idl
= kcalloc(intr_info
->num
,
1496 sizeof(struct bna_intr_descr
),
1498 if (!intr_info
->idl
)
1503 intr_info
->idl
[0].vector
= BNAD_INTX_TX_IB_BITMASK
;
1507 intr_info
->idl
[0].vector
= BNAD_INTX_RX_IB_BITMASK
;
1514 /* NOTE: Should be called for MSIX only
1515 * Unregisters Tx MSIX vector(s) from the kernel
1518 bnad_tx_msix_unregister(struct bnad
*bnad
, struct bnad_tx_info
*tx_info
,
1524 for (i
= 0; i
< num_txqs
; i
++) {
1525 if (tx_info
->tcb
[i
] == NULL
)
1528 vector_num
= tx_info
->tcb
[i
]->intr_vector
;
1529 free_irq(bnad
->msix_table
[vector_num
].vector
, tx_info
->tcb
[i
]);
1533 /* NOTE: Should be called for MSIX only
1534 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1537 bnad_tx_msix_register(struct bnad
*bnad
, struct bnad_tx_info
*tx_info
,
1538 u32 tx_id
, int num_txqs
)
1544 for (i
= 0; i
< num_txqs
; i
++) {
1545 vector_num
= tx_info
->tcb
[i
]->intr_vector
;
1546 sprintf(tx_info
->tcb
[i
]->name
, "%s TXQ %d", bnad
->netdev
->name
,
1547 tx_id
+ tx_info
->tcb
[i
]->id
);
1548 err
= request_irq(bnad
->msix_table
[vector_num
].vector
,
1549 (irq_handler_t
)bnad_msix_tx
, 0,
1550 tx_info
->tcb
[i
]->name
,
1560 bnad_tx_msix_unregister(bnad
, tx_info
, (i
- 1));
1564 /* NOTE: Should be called for MSIX only
1565 * Unregisters Rx MSIX vector(s) from the kernel
1568 bnad_rx_msix_unregister(struct bnad
*bnad
, struct bnad_rx_info
*rx_info
,
1574 for (i
= 0; i
< num_rxps
; i
++) {
1575 if (rx_info
->rx_ctrl
[i
].ccb
== NULL
)
1578 vector_num
= rx_info
->rx_ctrl
[i
].ccb
->intr_vector
;
1579 free_irq(bnad
->msix_table
[vector_num
].vector
,
1580 rx_info
->rx_ctrl
[i
].ccb
);
1584 /* NOTE: Should be called for MSIX only
1585 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1588 bnad_rx_msix_register(struct bnad
*bnad
, struct bnad_rx_info
*rx_info
,
1589 u32 rx_id
, int num_rxps
)
1595 for (i
= 0; i
< num_rxps
; i
++) {
1596 vector_num
= rx_info
->rx_ctrl
[i
].ccb
->intr_vector
;
1597 sprintf(rx_info
->rx_ctrl
[i
].ccb
->name
, "%s CQ %d",
1599 rx_id
+ rx_info
->rx_ctrl
[i
].ccb
->id
);
1600 err
= request_irq(bnad
->msix_table
[vector_num
].vector
,
1601 (irq_handler_t
)bnad_msix_rx
, 0,
1602 rx_info
->rx_ctrl
[i
].ccb
->name
,
1603 rx_info
->rx_ctrl
[i
].ccb
);
1612 bnad_rx_msix_unregister(bnad
, rx_info
, (i
- 1));
1616 /* Free Tx object Resources */
1618 bnad_tx_res_free(struct bnad
*bnad
, struct bna_res_info
*res_info
)
1622 for (i
= 0; i
< BNA_TX_RES_T_MAX
; i
++) {
1623 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1624 bnad_mem_free(bnad
, &res_info
[i
].res_u
.mem_info
);
1625 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1626 bnad_txrx_irq_free(bnad
, &res_info
[i
].res_u
.intr_info
);
1630 /* Allocates memory and interrupt resources for Tx object */
1632 bnad_tx_res_alloc(struct bnad
*bnad
, struct bna_res_info
*res_info
,
1637 for (i
= 0; i
< BNA_TX_RES_T_MAX
; i
++) {
1638 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1639 err
= bnad_mem_alloc(bnad
,
1640 &res_info
[i
].res_u
.mem_info
);
1641 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1642 err
= bnad_txrx_irq_alloc(bnad
, BNAD_INTR_TX
, tx_id
,
1643 &res_info
[i
].res_u
.intr_info
);
1650 bnad_tx_res_free(bnad
, res_info
);
1654 /* Free Rx object Resources */
1656 bnad_rx_res_free(struct bnad
*bnad
, struct bna_res_info
*res_info
)
1660 for (i
= 0; i
< BNA_RX_RES_T_MAX
; i
++) {
1661 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1662 bnad_mem_free(bnad
, &res_info
[i
].res_u
.mem_info
);
1663 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1664 bnad_txrx_irq_free(bnad
, &res_info
[i
].res_u
.intr_info
);
1668 /* Allocates memory and interrupt resources for Rx object */
1670 bnad_rx_res_alloc(struct bnad
*bnad
, struct bna_res_info
*res_info
,
1675 /* All memory needs to be allocated before setup_ccbs */
1676 for (i
= 0; i
< BNA_RX_RES_T_MAX
; i
++) {
1677 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1678 err
= bnad_mem_alloc(bnad
,
1679 &res_info
[i
].res_u
.mem_info
);
1680 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1681 err
= bnad_txrx_irq_alloc(bnad
, BNAD_INTR_RX
, rx_id
,
1682 &res_info
[i
].res_u
.intr_info
);
1689 bnad_rx_res_free(bnad
, res_info
);
1693 /* Timer callbacks */
1696 bnad_ioc_timeout(struct timer_list
*t
)
1698 struct bnad
*bnad
= from_timer(bnad
, t
, bna
.ioceth
.ioc
.ioc_timer
);
1699 unsigned long flags
;
1701 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1702 bfa_nw_ioc_timeout(&bnad
->bna
.ioceth
.ioc
);
1703 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1707 bnad_ioc_hb_check(struct timer_list
*t
)
1709 struct bnad
*bnad
= from_timer(bnad
, t
, bna
.ioceth
.ioc
.hb_timer
);
1710 unsigned long flags
;
1712 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1713 bfa_nw_ioc_hb_check(&bnad
->bna
.ioceth
.ioc
);
1714 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1718 bnad_iocpf_timeout(struct timer_list
*t
)
1720 struct bnad
*bnad
= from_timer(bnad
, t
, bna
.ioceth
.ioc
.iocpf_timer
);
1721 unsigned long flags
;
1723 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1724 bfa_nw_iocpf_timeout(&bnad
->bna
.ioceth
.ioc
);
1725 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1729 bnad_iocpf_sem_timeout(struct timer_list
*t
)
1731 struct bnad
*bnad
= from_timer(bnad
, t
, bna
.ioceth
.ioc
.sem_timer
);
1732 unsigned long flags
;
1734 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1735 bfa_nw_iocpf_sem_timeout(&bnad
->bna
.ioceth
.ioc
);
1736 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1740 * All timer routines use bnad->bna_lock to protect against
1741 * the following race, which may occur in case of no locking:
1749 /* b) Dynamic Interrupt Moderation Timer */
1751 bnad_dim_timeout(struct timer_list
*t
)
1753 struct bnad
*bnad
= from_timer(bnad
, t
, dim_timer
);
1754 struct bnad_rx_info
*rx_info
;
1755 struct bnad_rx_ctrl
*rx_ctrl
;
1757 unsigned long flags
;
1759 if (!netif_carrier_ok(bnad
->netdev
))
1762 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1763 for (i
= 0; i
< bnad
->num_rx
; i
++) {
1764 rx_info
= &bnad
->rx_info
[i
];
1767 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
1768 rx_ctrl
= &rx_info
->rx_ctrl
[j
];
1771 bna_rx_dim_update(rx_ctrl
->ccb
);
1775 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1776 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
))
1777 mod_timer(&bnad
->dim_timer
,
1778 jiffies
+ msecs_to_jiffies(BNAD_DIM_TIMER_FREQ
));
1779 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1782 /* c) Statistics Timer */
1784 bnad_stats_timeout(struct timer_list
*t
)
1786 struct bnad
*bnad
= from_timer(bnad
, t
, stats_timer
);
1787 unsigned long flags
;
1789 if (!netif_running(bnad
->netdev
) ||
1790 !test_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
))
1793 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1794 bna_hw_stats_get(&bnad
->bna
);
1795 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1799 * Set up timer for DIM
1800 * Called with bnad->bna_lock held
1803 bnad_dim_timer_start(struct bnad
*bnad
)
1805 if (bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
&&
1806 !test_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
)) {
1807 timer_setup(&bnad
->dim_timer
, bnad_dim_timeout
, 0);
1808 set_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
);
1809 mod_timer(&bnad
->dim_timer
,
1810 jiffies
+ msecs_to_jiffies(BNAD_DIM_TIMER_FREQ
));
1815 * Set up timer for statistics
1816 * Called with mutex_lock(&bnad->conf_mutex) held
1819 bnad_stats_timer_start(struct bnad
*bnad
)
1821 unsigned long flags
;
1823 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1824 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
)) {
1825 timer_setup(&bnad
->stats_timer
, bnad_stats_timeout
, 0);
1826 mod_timer(&bnad
->stats_timer
,
1827 jiffies
+ msecs_to_jiffies(BNAD_STATS_TIMER_FREQ
));
1829 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1833 * Stops the stats timer
1834 * Called with mutex_lock(&bnad->conf_mutex) held
1837 bnad_stats_timer_stop(struct bnad
*bnad
)
1840 unsigned long flags
;
1842 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1843 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
))
1845 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1847 del_timer_sync(&bnad
->stats_timer
);
1853 bnad_netdev_mc_list_get(struct net_device
*netdev
, u8
*mc_list
)
1855 int i
= 1; /* Index 0 has broadcast address */
1856 struct netdev_hw_addr
*mc_addr
;
1858 netdev_for_each_mc_addr(mc_addr
, netdev
) {
1859 ether_addr_copy(&mc_list
[i
* ETH_ALEN
], &mc_addr
->addr
[0]);
1865 bnad_napi_poll_rx(struct napi_struct
*napi
, int budget
)
1867 struct bnad_rx_ctrl
*rx_ctrl
=
1868 container_of(napi
, struct bnad_rx_ctrl
, napi
);
1869 struct bnad
*bnad
= rx_ctrl
->bnad
;
1872 rx_ctrl
->rx_poll_ctr
++;
1874 if (!netif_carrier_ok(bnad
->netdev
))
1877 rcvd
= bnad_cq_process(bnad
, rx_ctrl
->ccb
, budget
);
1882 napi_complete_done(napi
, rcvd
);
1884 rx_ctrl
->rx_complete
++;
1887 bnad_enable_rx_irq_unsafe(rx_ctrl
->ccb
);
1892 #define BNAD_NAPI_POLL_QUOTA 64
1894 bnad_napi_add(struct bnad
*bnad
, u32 rx_id
)
1896 struct bnad_rx_ctrl
*rx_ctrl
;
1899 /* Initialize & enable NAPI */
1900 for (i
= 0; i
< bnad
->num_rxp_per_rx
; i
++) {
1901 rx_ctrl
= &bnad
->rx_info
[rx_id
].rx_ctrl
[i
];
1902 netif_napi_add(bnad
->netdev
, &rx_ctrl
->napi
,
1903 bnad_napi_poll_rx
, BNAD_NAPI_POLL_QUOTA
);
1908 bnad_napi_delete(struct bnad
*bnad
, u32 rx_id
)
1912 /* First disable and then clean up */
1913 for (i
= 0; i
< bnad
->num_rxp_per_rx
; i
++)
1914 netif_napi_del(&bnad
->rx_info
[rx_id
].rx_ctrl
[i
].napi
);
1917 /* Should be held with conf_lock held */
1919 bnad_destroy_tx(struct bnad
*bnad
, u32 tx_id
)
1921 struct bnad_tx_info
*tx_info
= &bnad
->tx_info
[tx_id
];
1922 struct bna_res_info
*res_info
= &bnad
->tx_res_info
[tx_id
].res_info
[0];
1923 unsigned long flags
;
1928 init_completion(&bnad
->bnad_completions
.tx_comp
);
1929 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1930 bna_tx_disable(tx_info
->tx
, BNA_HARD_CLEANUP
, bnad_cb_tx_disabled
);
1931 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1932 wait_for_completion(&bnad
->bnad_completions
.tx_comp
);
1934 if (tx_info
->tcb
[0]->intr_type
== BNA_INTR_T_MSIX
)
1935 bnad_tx_msix_unregister(bnad
, tx_info
,
1936 bnad
->num_txq_per_tx
);
1938 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1939 bna_tx_destroy(tx_info
->tx
);
1940 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1945 bnad_tx_res_free(bnad
, res_info
);
1948 /* Should be held with conf_lock held */
1950 bnad_setup_tx(struct bnad
*bnad
, u32 tx_id
)
1953 struct bnad_tx_info
*tx_info
= &bnad
->tx_info
[tx_id
];
1954 struct bna_res_info
*res_info
= &bnad
->tx_res_info
[tx_id
].res_info
[0];
1955 struct bna_intr_info
*intr_info
=
1956 &res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_u
.intr_info
;
1957 struct bna_tx_config
*tx_config
= &bnad
->tx_config
[tx_id
];
1958 static const struct bna_tx_event_cbfn tx_cbfn
= {
1959 .tcb_setup_cbfn
= bnad_cb_tcb_setup
,
1960 .tcb_destroy_cbfn
= bnad_cb_tcb_destroy
,
1961 .tx_stall_cbfn
= bnad_cb_tx_stall
,
1962 .tx_resume_cbfn
= bnad_cb_tx_resume
,
1963 .tx_cleanup_cbfn
= bnad_cb_tx_cleanup
,
1967 unsigned long flags
;
1969 tx_info
->tx_id
= tx_id
;
1971 /* Initialize the Tx object configuration */
1972 tx_config
->num_txq
= bnad
->num_txq_per_tx
;
1973 tx_config
->txq_depth
= bnad
->txq_depth
;
1974 tx_config
->tx_type
= BNA_TX_T_REGULAR
;
1975 tx_config
->coalescing_timeo
= bnad
->tx_coalescing_timeo
;
1977 /* Get BNA's resource requirement for one tx object */
1978 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1979 bna_tx_res_req(bnad
->num_txq_per_tx
,
1980 bnad
->txq_depth
, res_info
);
1981 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1983 /* Fill Unmap Q memory requirements */
1984 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info
[BNA_TX_RES_MEM_T_UNMAPQ
],
1985 bnad
->num_txq_per_tx
, (sizeof(struct bnad_tx_unmap
) *
1988 /* Allocate resources */
1989 err
= bnad_tx_res_alloc(bnad
, res_info
, tx_id
);
1993 /* Ask BNA to create one Tx object, supplying required resources */
1994 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1995 tx
= bna_tx_create(&bnad
->bna
, bnad
, tx_config
, &tx_cbfn
, res_info
,
1997 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2004 INIT_DELAYED_WORK(&tx_info
->tx_cleanup_work
,
2005 (work_func_t
)bnad_tx_cleanup
);
2007 /* Register ISR for the Tx object */
2008 if (intr_info
->intr_type
== BNA_INTR_T_MSIX
) {
2009 err
= bnad_tx_msix_register(bnad
, tx_info
,
2010 tx_id
, bnad
->num_txq_per_tx
);
2015 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2017 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2022 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2023 bna_tx_destroy(tx_info
->tx
);
2024 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2028 bnad_tx_res_free(bnad
, res_info
);
2032 /* Setup the rx config for bna_rx_create */
2033 /* bnad decides the configuration */
2035 bnad_init_rx_config(struct bnad
*bnad
, struct bna_rx_config
*rx_config
)
2037 memset(rx_config
, 0, sizeof(*rx_config
));
2038 rx_config
->rx_type
= BNA_RX_T_REGULAR
;
2039 rx_config
->num_paths
= bnad
->num_rxp_per_rx
;
2040 rx_config
->coalescing_timeo
= bnad
->rx_coalescing_timeo
;
2042 if (bnad
->num_rxp_per_rx
> 1) {
2043 rx_config
->rss_status
= BNA_STATUS_T_ENABLED
;
2044 rx_config
->rss_config
.hash_type
=
2045 (BFI_ENET_RSS_IPV6
|
2046 BFI_ENET_RSS_IPV6_TCP
|
2048 BFI_ENET_RSS_IPV4_TCP
);
2049 rx_config
->rss_config
.hash_mask
=
2050 bnad
->num_rxp_per_rx
- 1;
2051 netdev_rss_key_fill(rx_config
->rss_config
.toeplitz_hash_key
,
2052 sizeof(rx_config
->rss_config
.toeplitz_hash_key
));
2054 rx_config
->rss_status
= BNA_STATUS_T_DISABLED
;
2055 memset(&rx_config
->rss_config
, 0,
2056 sizeof(rx_config
->rss_config
));
2059 rx_config
->frame_size
= BNAD_FRAME_SIZE(bnad
->netdev
->mtu
);
2060 rx_config
->q0_multi_buf
= BNA_STATUS_T_DISABLED
;
2062 /* BNA_RXP_SINGLE - one data-buffer queue
2063 * BNA_RXP_SLR - one small-buffer and one large-buffer queues
2064 * BNA_RXP_HDS - one header-buffer and one data-buffer queues
2066 /* TODO: configurable param for queue type */
2067 rx_config
->rxp_type
= BNA_RXP_SLR
;
2069 if (BNAD_PCI_DEV_IS_CAT2(bnad
) &&
2070 rx_config
->frame_size
> 4096) {
2071 /* though size_routing_enable is set in SLR,
2072 * small packets may get routed to same rxq.
2073 * set buf_size to 2048 instead of PAGE_SIZE.
2075 rx_config
->q0_buf_size
= 2048;
2076 /* this should be in multiples of 2 */
2077 rx_config
->q0_num_vecs
= 4;
2078 rx_config
->q0_depth
= bnad
->rxq_depth
* rx_config
->q0_num_vecs
;
2079 rx_config
->q0_multi_buf
= BNA_STATUS_T_ENABLED
;
2081 rx_config
->q0_buf_size
= rx_config
->frame_size
;
2082 rx_config
->q0_num_vecs
= 1;
2083 rx_config
->q0_depth
= bnad
->rxq_depth
;
2086 /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
2087 if (rx_config
->rxp_type
== BNA_RXP_SLR
) {
2088 rx_config
->q1_depth
= bnad
->rxq_depth
;
2089 rx_config
->q1_buf_size
= BFI_SMALL_RXBUF_SIZE
;
2092 rx_config
->vlan_strip_status
=
2093 (bnad
->netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) ?
2094 BNA_STATUS_T_ENABLED
: BNA_STATUS_T_DISABLED
;
2098 bnad_rx_ctrl_init(struct bnad
*bnad
, u32 rx_id
)
2100 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[rx_id
];
2103 for (i
= 0; i
< bnad
->num_rxp_per_rx
; i
++)
2104 rx_info
->rx_ctrl
[i
].bnad
= bnad
;
2107 /* Called with mutex_lock(&bnad->conf_mutex) held */
2109 bnad_reinit_rx(struct bnad
*bnad
)
2111 struct net_device
*netdev
= bnad
->netdev
;
2112 u32 err
= 0, current_err
= 0;
2113 u32 rx_id
= 0, count
= 0;
2114 unsigned long flags
;
2116 /* destroy and create new rx objects */
2117 for (rx_id
= 0; rx_id
< bnad
->num_rx
; rx_id
++) {
2118 if (!bnad
->rx_info
[rx_id
].rx
)
2120 bnad_destroy_rx(bnad
, rx_id
);
2123 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2124 bna_enet_mtu_set(&bnad
->bna
.enet
,
2125 BNAD_FRAME_SIZE(bnad
->netdev
->mtu
), NULL
);
2126 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2128 for (rx_id
= 0; rx_id
< bnad
->num_rx
; rx_id
++) {
2130 current_err
= bnad_setup_rx(bnad
, rx_id
);
2131 if (current_err
&& !err
) {
2133 netdev_err(netdev
, "RXQ:%u setup failed\n", rx_id
);
2137 /* restore rx configuration */
2138 if (bnad
->rx_info
[0].rx
&& !err
) {
2139 bnad_restore_vlans(bnad
, 0);
2140 bnad_enable_default_bcast(bnad
);
2141 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2142 bnad_mac_addr_set_locked(bnad
, netdev
->dev_addr
);
2143 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2144 bnad_set_rx_mode(netdev
);
2150 /* Called with bnad_conf_lock() held */
2152 bnad_destroy_rx(struct bnad
*bnad
, u32 rx_id
)
2154 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[rx_id
];
2155 struct bna_rx_config
*rx_config
= &bnad
->rx_config
[rx_id
];
2156 struct bna_res_info
*res_info
= &bnad
->rx_res_info
[rx_id
].res_info
[0];
2157 unsigned long flags
;
2164 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2165 if (bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
&&
2166 test_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
)) {
2167 clear_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
);
2170 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2172 del_timer_sync(&bnad
->dim_timer
);
2175 init_completion(&bnad
->bnad_completions
.rx_comp
);
2176 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2177 bna_rx_disable(rx_info
->rx
, BNA_HARD_CLEANUP
, bnad_cb_rx_disabled
);
2178 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2179 wait_for_completion(&bnad
->bnad_completions
.rx_comp
);
2181 if (rx_info
->rx_ctrl
[0].ccb
->intr_type
== BNA_INTR_T_MSIX
)
2182 bnad_rx_msix_unregister(bnad
, rx_info
, rx_config
->num_paths
);
2184 bnad_napi_delete(bnad
, rx_id
);
2186 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2187 bna_rx_destroy(rx_info
->rx
);
2191 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2193 bnad_rx_res_free(bnad
, res_info
);
2196 /* Called with mutex_lock(&bnad->conf_mutex) held */
2198 bnad_setup_rx(struct bnad
*bnad
, u32 rx_id
)
2201 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[rx_id
];
2202 struct bna_res_info
*res_info
= &bnad
->rx_res_info
[rx_id
].res_info
[0];
2203 struct bna_intr_info
*intr_info
=
2204 &res_info
[BNA_RX_RES_T_INTR
].res_u
.intr_info
;
2205 struct bna_rx_config
*rx_config
= &bnad
->rx_config
[rx_id
];
2206 static const struct bna_rx_event_cbfn rx_cbfn
= {
2207 .rcb_setup_cbfn
= NULL
,
2208 .rcb_destroy_cbfn
= NULL
,
2209 .ccb_setup_cbfn
= bnad_cb_ccb_setup
,
2210 .ccb_destroy_cbfn
= bnad_cb_ccb_destroy
,
2211 .rx_stall_cbfn
= bnad_cb_rx_stall
,
2212 .rx_cleanup_cbfn
= bnad_cb_rx_cleanup
,
2213 .rx_post_cbfn
= bnad_cb_rx_post
,
2216 unsigned long flags
;
2218 rx_info
->rx_id
= rx_id
;
2220 /* Initialize the Rx object configuration */
2221 bnad_init_rx_config(bnad
, rx_config
);
2223 /* Get BNA's resource requirement for one Rx object */
2224 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2225 bna_rx_res_req(rx_config
, res_info
);
2226 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2228 /* Fill Unmap Q memory requirements */
2229 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info
[BNA_RX_RES_MEM_T_UNMAPDQ
],
2230 rx_config
->num_paths
,
2231 (rx_config
->q0_depth
*
2232 sizeof(struct bnad_rx_unmap
)) +
2233 sizeof(struct bnad_rx_unmap_q
));
2235 if (rx_config
->rxp_type
!= BNA_RXP_SINGLE
) {
2236 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info
[BNA_RX_RES_MEM_T_UNMAPHQ
],
2237 rx_config
->num_paths
,
2238 (rx_config
->q1_depth
*
2239 sizeof(struct bnad_rx_unmap
) +
2240 sizeof(struct bnad_rx_unmap_q
)));
2242 /* Allocate resource */
2243 err
= bnad_rx_res_alloc(bnad
, res_info
, rx_id
);
2247 bnad_rx_ctrl_init(bnad
, rx_id
);
2249 /* Ask BNA to create one Rx object, supplying required resources */
2250 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2251 rx
= bna_rx_create(&bnad
->bna
, bnad
, rx_config
, &rx_cbfn
, res_info
,
2255 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2259 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2261 INIT_WORK(&rx_info
->rx_cleanup_work
,
2262 (work_func_t
)(bnad_rx_cleanup
));
2265 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2266 * so that IRQ handler cannot schedule NAPI at this point.
2268 bnad_napi_add(bnad
, rx_id
);
2270 /* Register ISR for the Rx object */
2271 if (intr_info
->intr_type
== BNA_INTR_T_MSIX
) {
2272 err
= bnad_rx_msix_register(bnad
, rx_info
, rx_id
,
2273 rx_config
->num_paths
);
2278 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2280 /* Set up Dynamic Interrupt Moderation Vector */
2281 if (bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
)
2282 bna_rx_dim_reconfig(&bnad
->bna
, bna_napi_dim_vector
);
2284 /* Enable VLAN filtering only on the default Rx */
2285 bna_rx_vlanfilter_enable(rx
);
2287 /* Start the DIM timer */
2288 bnad_dim_timer_start(bnad
);
2292 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2297 bnad_destroy_rx(bnad
, rx_id
);
2301 /* Called with conf_lock & bnad->bna_lock held */
2303 bnad_tx_coalescing_timeo_set(struct bnad
*bnad
)
2305 struct bnad_tx_info
*tx_info
;
2307 tx_info
= &bnad
->tx_info
[0];
2311 bna_tx_coalescing_timeo_set(tx_info
->tx
, bnad
->tx_coalescing_timeo
);
2314 /* Called with conf_lock & bnad->bna_lock held */
2316 bnad_rx_coalescing_timeo_set(struct bnad
*bnad
)
2318 struct bnad_rx_info
*rx_info
;
2321 for (i
= 0; i
< bnad
->num_rx
; i
++) {
2322 rx_info
= &bnad
->rx_info
[i
];
2325 bna_rx_coalescing_timeo_set(rx_info
->rx
,
2326 bnad
->rx_coalescing_timeo
);
2331 * Called with bnad->bna_lock held
2334 bnad_mac_addr_set_locked(struct bnad
*bnad
, const u8
*mac_addr
)
2338 if (!is_valid_ether_addr(mac_addr
))
2339 return -EADDRNOTAVAIL
;
2341 /* If datapath is down, pretend everything went through */
2342 if (!bnad
->rx_info
[0].rx
)
2345 ret
= bna_rx_ucast_set(bnad
->rx_info
[0].rx
, mac_addr
);
2346 if (ret
!= BNA_CB_SUCCESS
)
2347 return -EADDRNOTAVAIL
;
2352 /* Should be called with conf_lock held */
2354 bnad_enable_default_bcast(struct bnad
*bnad
)
2356 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[0];
2358 unsigned long flags
;
2360 init_completion(&bnad
->bnad_completions
.mcast_comp
);
2362 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2363 ret
= bna_rx_mcast_add(rx_info
->rx
, bnad_bcast_addr
,
2364 bnad_cb_rx_mcast_add
);
2365 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2367 if (ret
== BNA_CB_SUCCESS
)
2368 wait_for_completion(&bnad
->bnad_completions
.mcast_comp
);
2372 if (bnad
->bnad_completions
.mcast_comp_status
!= BNA_CB_SUCCESS
)
2378 /* Called with mutex_lock(&bnad->conf_mutex) held */
2380 bnad_restore_vlans(struct bnad
*bnad
, u32 rx_id
)
2383 unsigned long flags
;
2385 for_each_set_bit(vid
, bnad
->active_vlans
, VLAN_N_VID
) {
2386 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2387 bna_rx_vlan_add(bnad
->rx_info
[rx_id
].rx
, vid
);
2388 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2392 /* Statistics utilities */
2394 bnad_netdev_qstats_fill(struct bnad
*bnad
, struct rtnl_link_stats64
*stats
)
2398 for (i
= 0; i
< bnad
->num_rx
; i
++) {
2399 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
2400 if (bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
) {
2401 stats
->rx_packets
+= bnad
->rx_info
[i
].
2402 rx_ctrl
[j
].ccb
->rcb
[0]->rxq
->rx_packets
;
2403 stats
->rx_bytes
+= bnad
->rx_info
[i
].
2404 rx_ctrl
[j
].ccb
->rcb
[0]->rxq
->rx_bytes
;
2405 if (bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->rcb
[1] &&
2406 bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->
2408 stats
->rx_packets
+=
2409 bnad
->rx_info
[i
].rx_ctrl
[j
].
2410 ccb
->rcb
[1]->rxq
->rx_packets
;
2412 bnad
->rx_info
[i
].rx_ctrl
[j
].
2413 ccb
->rcb
[1]->rxq
->rx_bytes
;
2418 for (i
= 0; i
< bnad
->num_tx
; i
++) {
2419 for (j
= 0; j
< bnad
->num_txq_per_tx
; j
++) {
2420 if (bnad
->tx_info
[i
].tcb
[j
]) {
2421 stats
->tx_packets
+=
2422 bnad
->tx_info
[i
].tcb
[j
]->txq
->tx_packets
;
2424 bnad
->tx_info
[i
].tcb
[j
]->txq
->tx_bytes
;
2431 * Must be called with the bna_lock held.
2434 bnad_netdev_hwstats_fill(struct bnad
*bnad
, struct rtnl_link_stats64
*stats
)
2436 struct bfi_enet_stats_mac
*mac_stats
;
2440 mac_stats
= &bnad
->stats
.bna_stats
->hw_stats
.mac_stats
;
2442 mac_stats
->rx_fcs_error
+ mac_stats
->rx_alignment_error
+
2443 mac_stats
->rx_frame_length_error
+ mac_stats
->rx_code_error
+
2444 mac_stats
->rx_undersize
;
2445 stats
->tx_errors
= mac_stats
->tx_fcs_error
+
2446 mac_stats
->tx_undersize
;
2447 stats
->rx_dropped
= mac_stats
->rx_drop
;
2448 stats
->tx_dropped
= mac_stats
->tx_drop
;
2449 stats
->multicast
= mac_stats
->rx_multicast
;
2450 stats
->collisions
= mac_stats
->tx_total_collision
;
2452 stats
->rx_length_errors
= mac_stats
->rx_frame_length_error
;
2454 /* receive ring buffer overflow ?? */
2456 stats
->rx_crc_errors
= mac_stats
->rx_fcs_error
;
2457 stats
->rx_frame_errors
= mac_stats
->rx_alignment_error
;
2458 /* recv'r fifo overrun */
2459 bmap
= bna_rx_rid_mask(&bnad
->bna
);
2460 for (i
= 0; bmap
; i
++) {
2462 stats
->rx_fifo_errors
+=
2463 bnad
->stats
.bna_stats
->
2464 hw_stats
.rxf_stats
[i
].frame_drops
;
2472 bnad_mbox_irq_sync(struct bnad
*bnad
)
2475 unsigned long flags
;
2477 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2478 if (bnad
->cfg_flags
& BNAD_CF_MSIX
)
2479 irq
= bnad
->msix_table
[BNAD_MAILBOX_MSIX_INDEX
].vector
;
2481 irq
= bnad
->pcidev
->irq
;
2482 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2484 synchronize_irq(irq
);
2487 /* Utility used by bnad_start_xmit, for doing TSO */
2489 bnad_tso_prepare(struct bnad
*bnad
, struct sk_buff
*skb
)
2493 err
= skb_cow_head(skb
, 0);
2495 BNAD_UPDATE_CTR(bnad
, tso_err
);
2500 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2501 * excluding the length field.
2503 if (vlan_get_protocol(skb
) == htons(ETH_P_IP
)) {
2504 struct iphdr
*iph
= ip_hdr(skb
);
2506 /* Do we really need these? */
2510 tcp_hdr(skb
)->check
=
2511 ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
, 0,
2513 BNAD_UPDATE_CTR(bnad
, tso4
);
2515 struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
2517 ipv6h
->payload_len
= 0;
2518 tcp_hdr(skb
)->check
=
2519 ~csum_ipv6_magic(&ipv6h
->saddr
, &ipv6h
->daddr
, 0,
2521 BNAD_UPDATE_CTR(bnad
, tso6
);
2528 * Initialize Q numbers depending on Rx Paths
2529 * Called with bnad->bna_lock held, because of cfg_flags
2533 bnad_q_num_init(struct bnad
*bnad
)
2537 rxps
= min((uint
)num_online_cpus(),
2538 (uint
)(BNAD_MAX_RX
* BNAD_MAX_RXP_PER_RX
));
2540 if (!(bnad
->cfg_flags
& BNAD_CF_MSIX
))
2541 rxps
= 1; /* INTx */
2545 bnad
->num_rxp_per_rx
= rxps
;
2546 bnad
->num_txq_per_tx
= BNAD_TXQ_NUM
;
2550 * Adjusts the Q numbers, given a number of msix vectors
2551 * Give preference to RSS as opposed to Tx priority Queues,
2552 * in such a case, just use 1 Tx Q
2553 * Called with bnad->bna_lock held b'cos of cfg_flags access
2556 bnad_q_num_adjust(struct bnad
*bnad
, int msix_vectors
, int temp
)
2558 bnad
->num_txq_per_tx
= 1;
2559 if ((msix_vectors
>= (bnad
->num_tx
* bnad
->num_txq_per_tx
) +
2560 bnad_rxqs_per_cq
+ BNAD_MAILBOX_MSIX_VECTORS
) &&
2561 (bnad
->cfg_flags
& BNAD_CF_MSIX
)) {
2562 bnad
->num_rxp_per_rx
= msix_vectors
-
2563 (bnad
->num_tx
* bnad
->num_txq_per_tx
) -
2564 BNAD_MAILBOX_MSIX_VECTORS
;
2566 bnad
->num_rxp_per_rx
= 1;
2569 /* Enable / disable ioceth */
2571 bnad_ioceth_disable(struct bnad
*bnad
)
2573 unsigned long flags
;
2576 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2577 init_completion(&bnad
->bnad_completions
.ioc_comp
);
2578 bna_ioceth_disable(&bnad
->bna
.ioceth
, BNA_HARD_CLEANUP
);
2579 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2581 wait_for_completion_timeout(&bnad
->bnad_completions
.ioc_comp
,
2582 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT
));
2584 err
= bnad
->bnad_completions
.ioc_comp_status
;
2589 bnad_ioceth_enable(struct bnad
*bnad
)
2592 unsigned long flags
;
2594 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2595 init_completion(&bnad
->bnad_completions
.ioc_comp
);
2596 bnad
->bnad_completions
.ioc_comp_status
= BNA_CB_WAITING
;
2597 bna_ioceth_enable(&bnad
->bna
.ioceth
);
2598 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2600 wait_for_completion_timeout(&bnad
->bnad_completions
.ioc_comp
,
2601 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT
));
2603 err
= bnad
->bnad_completions
.ioc_comp_status
;
2608 /* Free BNA resources */
2610 bnad_res_free(struct bnad
*bnad
, struct bna_res_info
*res_info
,
2615 for (i
= 0; i
< res_val_max
; i
++)
2616 bnad_mem_free(bnad
, &res_info
[i
].res_u
.mem_info
);
2619 /* Allocates memory and interrupt resources for BNA */
2621 bnad_res_alloc(struct bnad
*bnad
, struct bna_res_info
*res_info
,
2626 for (i
= 0; i
< res_val_max
; i
++) {
2627 err
= bnad_mem_alloc(bnad
, &res_info
[i
].res_u
.mem_info
);
2634 bnad_res_free(bnad
, res_info
, res_val_max
);
2638 /* Interrupt enable / disable */
2640 bnad_enable_msix(struct bnad
*bnad
)
2643 unsigned long flags
;
2645 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2646 if (!(bnad
->cfg_flags
& BNAD_CF_MSIX
)) {
2647 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2650 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2652 if (bnad
->msix_table
)
2656 kcalloc(bnad
->msix_num
, sizeof(struct msix_entry
), GFP_KERNEL
);
2658 if (!bnad
->msix_table
)
2661 for (i
= 0; i
< bnad
->msix_num
; i
++)
2662 bnad
->msix_table
[i
].entry
= i
;
2664 ret
= pci_enable_msix_range(bnad
->pcidev
, bnad
->msix_table
,
2668 } else if (ret
< bnad
->msix_num
) {
2669 dev_warn(&bnad
->pcidev
->dev
,
2670 "%d MSI-X vectors allocated < %d requested\n",
2671 ret
, bnad
->msix_num
);
2673 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2674 /* ret = #of vectors that we got */
2675 bnad_q_num_adjust(bnad
, (ret
- BNAD_MAILBOX_MSIX_VECTORS
) / 2,
2676 (ret
- BNAD_MAILBOX_MSIX_VECTORS
) / 2);
2677 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2679 bnad
->msix_num
= BNAD_NUM_TXQ
+ BNAD_NUM_RXP
+
2680 BNAD_MAILBOX_MSIX_VECTORS
;
2682 if (bnad
->msix_num
> ret
) {
2683 pci_disable_msix(bnad
->pcidev
);
2688 pci_intx(bnad
->pcidev
, 0);
2693 dev_warn(&bnad
->pcidev
->dev
,
2694 "MSI-X enable failed - operating in INTx mode\n");
2696 kfree(bnad
->msix_table
);
2697 bnad
->msix_table
= NULL
;
2699 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2700 bnad
->cfg_flags
&= ~BNAD_CF_MSIX
;
2701 bnad_q_num_init(bnad
);
2702 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2706 bnad_disable_msix(struct bnad
*bnad
)
2709 unsigned long flags
;
2711 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2712 cfg_flags
= bnad
->cfg_flags
;
2713 if (bnad
->cfg_flags
& BNAD_CF_MSIX
)
2714 bnad
->cfg_flags
&= ~BNAD_CF_MSIX
;
2715 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2717 if (cfg_flags
& BNAD_CF_MSIX
) {
2718 pci_disable_msix(bnad
->pcidev
);
2719 kfree(bnad
->msix_table
);
2720 bnad
->msix_table
= NULL
;
2724 /* Netdev entry points */
2726 bnad_open(struct net_device
*netdev
)
2729 struct bnad
*bnad
= netdev_priv(netdev
);
2730 struct bna_pause_config pause_config
;
2731 unsigned long flags
;
2733 mutex_lock(&bnad
->conf_mutex
);
2736 err
= bnad_setup_tx(bnad
, 0);
2741 err
= bnad_setup_rx(bnad
, 0);
2746 pause_config
.tx_pause
= 0;
2747 pause_config
.rx_pause
= 0;
2749 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2750 bna_enet_mtu_set(&bnad
->bna
.enet
,
2751 BNAD_FRAME_SIZE(bnad
->netdev
->mtu
), NULL
);
2752 bna_enet_pause_config(&bnad
->bna
.enet
, &pause_config
);
2753 bna_enet_enable(&bnad
->bna
.enet
);
2754 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2756 /* Enable broadcast */
2757 bnad_enable_default_bcast(bnad
);
2759 /* Restore VLANs, if any */
2760 bnad_restore_vlans(bnad
, 0);
2762 /* Set the UCAST address */
2763 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2764 bnad_mac_addr_set_locked(bnad
, netdev
->dev_addr
);
2765 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2767 /* Start the stats timer */
2768 bnad_stats_timer_start(bnad
);
2770 mutex_unlock(&bnad
->conf_mutex
);
2775 bnad_destroy_tx(bnad
, 0);
2778 mutex_unlock(&bnad
->conf_mutex
);
2783 bnad_stop(struct net_device
*netdev
)
2785 struct bnad
*bnad
= netdev_priv(netdev
);
2786 unsigned long flags
;
2788 mutex_lock(&bnad
->conf_mutex
);
2790 /* Stop the stats timer */
2791 bnad_stats_timer_stop(bnad
);
2793 init_completion(&bnad
->bnad_completions
.enet_comp
);
2795 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2796 bna_enet_disable(&bnad
->bna
.enet
, BNA_HARD_CLEANUP
,
2797 bnad_cb_enet_disabled
);
2798 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2800 wait_for_completion(&bnad
->bnad_completions
.enet_comp
);
2802 bnad_destroy_tx(bnad
, 0);
2803 bnad_destroy_rx(bnad
, 0);
2805 /* Synchronize mailbox IRQ */
2806 bnad_mbox_irq_sync(bnad
);
2808 mutex_unlock(&bnad
->conf_mutex
);
2814 /* Returns 0 for success */
2816 bnad_txq_wi_prepare(struct bnad
*bnad
, struct bna_tcb
*tcb
,
2817 struct sk_buff
*skb
, struct bna_txq_entry
*txqent
)
2823 if (skb_vlan_tag_present(skb
)) {
2824 vlan_tag
= (u16
)skb_vlan_tag_get(skb
);
2825 flags
|= (BNA_TXQ_WI_CF_INS_PRIO
| BNA_TXQ_WI_CF_INS_VLAN
);
2827 if (test_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
)) {
2828 vlan_tag
= ((tcb
->priority
& 0x7) << VLAN_PRIO_SHIFT
)
2829 | (vlan_tag
& 0x1fff);
2830 flags
|= (BNA_TXQ_WI_CF_INS_PRIO
| BNA_TXQ_WI_CF_INS_VLAN
);
2832 txqent
->hdr
.wi
.vlan_tag
= htons(vlan_tag
);
2834 if (skb_is_gso(skb
)) {
2835 gso_size
= skb_shinfo(skb
)->gso_size
;
2836 if (unlikely(gso_size
> bnad
->netdev
->mtu
)) {
2837 BNAD_UPDATE_CTR(bnad
, tx_skb_mss_too_long
);
2840 if (unlikely((gso_size
+ skb_transport_offset(skb
) +
2841 tcp_hdrlen(skb
)) >= skb
->len
)) {
2842 txqent
->hdr
.wi
.opcode
= htons(BNA_TXQ_WI_SEND
);
2843 txqent
->hdr
.wi
.lso_mss
= 0;
2844 BNAD_UPDATE_CTR(bnad
, tx_skb_tso_too_short
);
2846 txqent
->hdr
.wi
.opcode
= htons(BNA_TXQ_WI_SEND_LSO
);
2847 txqent
->hdr
.wi
.lso_mss
= htons(gso_size
);
2850 if (bnad_tso_prepare(bnad
, skb
)) {
2851 BNAD_UPDATE_CTR(bnad
, tx_skb_tso_prepare
);
2855 flags
|= (BNA_TXQ_WI_CF_IP_CKSUM
| BNA_TXQ_WI_CF_TCP_CKSUM
);
2856 txqent
->hdr
.wi
.l4_hdr_size_n_offset
=
2857 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2858 tcp_hdrlen(skb
) >> 2, skb_transport_offset(skb
)));
2860 txqent
->hdr
.wi
.opcode
= htons(BNA_TXQ_WI_SEND
);
2861 txqent
->hdr
.wi
.lso_mss
= 0;
2863 if (unlikely(skb
->len
> (bnad
->netdev
->mtu
+ VLAN_ETH_HLEN
))) {
2864 BNAD_UPDATE_CTR(bnad
, tx_skb_non_tso_too_long
);
2868 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2869 __be16 net_proto
= vlan_get_protocol(skb
);
2872 if (net_proto
== htons(ETH_P_IP
))
2873 proto
= ip_hdr(skb
)->protocol
;
2874 #ifdef NETIF_F_IPV6_CSUM
2875 else if (net_proto
== htons(ETH_P_IPV6
)) {
2876 /* nexthdr may not be TCP immediately. */
2877 proto
= ipv6_hdr(skb
)->nexthdr
;
2880 if (proto
== IPPROTO_TCP
) {
2881 flags
|= BNA_TXQ_WI_CF_TCP_CKSUM
;
2882 txqent
->hdr
.wi
.l4_hdr_size_n_offset
=
2883 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2884 (0, skb_transport_offset(skb
)));
2886 BNAD_UPDATE_CTR(bnad
, tcpcsum_offload
);
2888 if (unlikely(skb_headlen(skb
) <
2889 skb_transport_offset(skb
) +
2891 BNAD_UPDATE_CTR(bnad
, tx_skb_tcp_hdr
);
2894 } else if (proto
== IPPROTO_UDP
) {
2895 flags
|= BNA_TXQ_WI_CF_UDP_CKSUM
;
2896 txqent
->hdr
.wi
.l4_hdr_size_n_offset
=
2897 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2898 (0, skb_transport_offset(skb
)));
2900 BNAD_UPDATE_CTR(bnad
, udpcsum_offload
);
2901 if (unlikely(skb_headlen(skb
) <
2902 skb_transport_offset(skb
) +
2903 sizeof(struct udphdr
))) {
2904 BNAD_UPDATE_CTR(bnad
, tx_skb_udp_hdr
);
2909 BNAD_UPDATE_CTR(bnad
, tx_skb_csum_err
);
2913 txqent
->hdr
.wi
.l4_hdr_size_n_offset
= 0;
2916 txqent
->hdr
.wi
.flags
= htons(flags
);
2917 txqent
->hdr
.wi
.frame_length
= htonl(skb
->len
);
2923 * bnad_start_xmit : Netdev entry point for Transmit
2924 * Called under lock held by net_device
2927 bnad_start_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
2929 struct bnad
*bnad
= netdev_priv(netdev
);
2931 struct bna_tcb
*tcb
= NULL
;
2932 struct bnad_tx_unmap
*unmap_q
, *unmap
, *head_unmap
;
2933 u32 prod
, q_depth
, vect_id
;
2934 u32 wis
, vectors
, len
;
2936 dma_addr_t dma_addr
;
2937 struct bna_txq_entry
*txqent
;
2939 len
= skb_headlen(skb
);
2941 /* Sanity checks for the skb */
2943 if (unlikely(skb
->len
<= ETH_HLEN
)) {
2944 dev_kfree_skb_any(skb
);
2945 BNAD_UPDATE_CTR(bnad
, tx_skb_too_short
);
2946 return NETDEV_TX_OK
;
2948 if (unlikely(len
> BFI_TX_MAX_DATA_PER_VECTOR
)) {
2949 dev_kfree_skb_any(skb
);
2950 BNAD_UPDATE_CTR(bnad
, tx_skb_headlen_zero
);
2951 return NETDEV_TX_OK
;
2953 if (unlikely(len
== 0)) {
2954 dev_kfree_skb_any(skb
);
2955 BNAD_UPDATE_CTR(bnad
, tx_skb_headlen_zero
);
2956 return NETDEV_TX_OK
;
2959 tcb
= bnad
->tx_info
[0].tcb
[txq_id
];
2962 * Takes care of the Tx that is scheduled between clearing the flag
2963 * and the netif_tx_stop_all_queues() call.
2965 if (unlikely(!tcb
|| !test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
))) {
2966 dev_kfree_skb_any(skb
);
2967 BNAD_UPDATE_CTR(bnad
, tx_skb_stopping
);
2968 return NETDEV_TX_OK
;
2971 q_depth
= tcb
->q_depth
;
2972 prod
= tcb
->producer_index
;
2973 unmap_q
= tcb
->unmap_q
;
2975 vectors
= 1 + skb_shinfo(skb
)->nr_frags
;
2976 wis
= BNA_TXQ_WI_NEEDED(vectors
); /* 4 vectors per work item */
2978 if (unlikely(vectors
> BFI_TX_MAX_VECTORS_PER_PKT
)) {
2979 dev_kfree_skb_any(skb
);
2980 BNAD_UPDATE_CTR(bnad
, tx_skb_max_vectors
);
2981 return NETDEV_TX_OK
;
2984 /* Check for available TxQ resources */
2985 if (unlikely(wis
> BNA_QE_FREE_CNT(tcb
, q_depth
))) {
2986 if ((*tcb
->hw_consumer_index
!= tcb
->consumer_index
) &&
2987 !test_and_set_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
)) {
2989 sent
= bnad_txcmpl_process(bnad
, tcb
);
2990 if (likely(test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)))
2991 bna_ib_ack(tcb
->i_dbell
, sent
);
2992 smp_mb__before_atomic();
2993 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
2995 netif_stop_queue(netdev
);
2996 BNAD_UPDATE_CTR(bnad
, netif_queue_stop
);
3001 * Check again to deal with race condition between
3002 * netif_stop_queue here, and netif_wake_queue in
3003 * interrupt handler which is not inside netif tx lock.
3005 if (likely(wis
> BNA_QE_FREE_CNT(tcb
, q_depth
))) {
3006 BNAD_UPDATE_CTR(bnad
, netif_queue_stop
);
3007 return NETDEV_TX_BUSY
;
3009 netif_wake_queue(netdev
);
3010 BNAD_UPDATE_CTR(bnad
, netif_queue_wakeup
);
3014 txqent
= &((struct bna_txq_entry
*)tcb
->sw_q
)[prod
];
3015 head_unmap
= &unmap_q
[prod
];
3017 /* Program the opcode, flags, frame_len, num_vectors in WI */
3018 if (bnad_txq_wi_prepare(bnad
, tcb
, skb
, txqent
)) {
3019 dev_kfree_skb_any(skb
);
3020 return NETDEV_TX_OK
;
3022 txqent
->hdr
.wi
.reserved
= 0;
3023 txqent
->hdr
.wi
.num_vectors
= vectors
;
3025 head_unmap
->skb
= skb
;
3026 head_unmap
->nvecs
= 0;
3028 /* Program the vectors */
3030 dma_addr
= dma_map_single(&bnad
->pcidev
->dev
, skb
->data
,
3031 len
, DMA_TO_DEVICE
);
3032 if (dma_mapping_error(&bnad
->pcidev
->dev
, dma_addr
)) {
3033 dev_kfree_skb_any(skb
);
3034 BNAD_UPDATE_CTR(bnad
, tx_skb_map_failed
);
3035 return NETDEV_TX_OK
;
3037 BNA_SET_DMA_ADDR(dma_addr
, &txqent
->vector
[0].host_addr
);
3038 txqent
->vector
[0].length
= htons(len
);
3039 dma_unmap_addr_set(&unmap
->vectors
[0], dma_addr
, dma_addr
);
3040 head_unmap
->nvecs
++;
3042 for (i
= 0, vect_id
= 0; i
< vectors
- 1; i
++) {
3043 const struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
3044 u32 size
= skb_frag_size(frag
);
3046 if (unlikely(size
== 0)) {
3047 /* Undo the changes starting at tcb->producer_index */
3048 bnad_tx_buff_unmap(bnad
, unmap_q
, q_depth
,
3049 tcb
->producer_index
);
3050 dev_kfree_skb_any(skb
);
3051 BNAD_UPDATE_CTR(bnad
, tx_skb_frag_zero
);
3052 return NETDEV_TX_OK
;
3058 if (vect_id
== BFI_TX_MAX_VECTORS_PER_WI
) {
3060 BNA_QE_INDX_INC(prod
, q_depth
);
3061 txqent
= &((struct bna_txq_entry
*)tcb
->sw_q
)[prod
];
3062 txqent
->hdr
.wi_ext
.opcode
= htons(BNA_TXQ_WI_EXTENSION
);
3063 unmap
= &unmap_q
[prod
];
3066 dma_addr
= skb_frag_dma_map(&bnad
->pcidev
->dev
, frag
,
3067 0, size
, DMA_TO_DEVICE
);
3068 if (dma_mapping_error(&bnad
->pcidev
->dev
, dma_addr
)) {
3069 /* Undo the changes starting at tcb->producer_index */
3070 bnad_tx_buff_unmap(bnad
, unmap_q
, q_depth
,
3071 tcb
->producer_index
);
3072 dev_kfree_skb_any(skb
);
3073 BNAD_UPDATE_CTR(bnad
, tx_skb_map_failed
);
3074 return NETDEV_TX_OK
;
3077 dma_unmap_len_set(&unmap
->vectors
[vect_id
], dma_len
, size
);
3078 BNA_SET_DMA_ADDR(dma_addr
, &txqent
->vector
[vect_id
].host_addr
);
3079 txqent
->vector
[vect_id
].length
= htons(size
);
3080 dma_unmap_addr_set(&unmap
->vectors
[vect_id
], dma_addr
,
3082 head_unmap
->nvecs
++;
3085 if (unlikely(len
!= skb
->len
)) {
3086 /* Undo the changes starting at tcb->producer_index */
3087 bnad_tx_buff_unmap(bnad
, unmap_q
, q_depth
, tcb
->producer_index
);
3088 dev_kfree_skb_any(skb
);
3089 BNAD_UPDATE_CTR(bnad
, tx_skb_len_mismatch
);
3090 return NETDEV_TX_OK
;
3093 BNA_QE_INDX_INC(prod
, q_depth
);
3094 tcb
->producer_index
= prod
;
3098 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)))
3099 return NETDEV_TX_OK
;
3101 skb_tx_timestamp(skb
);
3103 bna_txq_prod_indx_doorbell(tcb
);
3105 return NETDEV_TX_OK
;
3109 * Used spin_lock to synchronize reading of stats structures, which
3110 * is written by BNA under the same lock.
3113 bnad_get_stats64(struct net_device
*netdev
, struct rtnl_link_stats64
*stats
)
3115 struct bnad
*bnad
= netdev_priv(netdev
);
3116 unsigned long flags
;
3118 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3120 bnad_netdev_qstats_fill(bnad
, stats
);
3121 bnad_netdev_hwstats_fill(bnad
, stats
);
3123 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3127 bnad_set_rx_ucast_fltr(struct bnad
*bnad
)
3129 struct net_device
*netdev
= bnad
->netdev
;
3130 int uc_count
= netdev_uc_count(netdev
);
3131 enum bna_cb_status ret
;
3133 struct netdev_hw_addr
*ha
;
3136 if (netdev_uc_empty(bnad
->netdev
)) {
3137 bna_rx_ucast_listset(bnad
->rx_info
[0].rx
, 0, NULL
);
3141 if (uc_count
> bna_attr(&bnad
->bna
)->num_ucmac
)
3144 mac_list
= kzalloc(uc_count
* ETH_ALEN
, GFP_ATOMIC
);
3145 if (mac_list
== NULL
)
3149 netdev_for_each_uc_addr(ha
, netdev
) {
3150 ether_addr_copy(&mac_list
[entry
* ETH_ALEN
], &ha
->addr
[0]);
3154 ret
= bna_rx_ucast_listset(bnad
->rx_info
[0].rx
, entry
, mac_list
);
3157 if (ret
!= BNA_CB_SUCCESS
)
3162 /* ucast packets not in UCAM are routed to default function */
3164 bnad
->cfg_flags
|= BNAD_CF_DEFAULT
;
3165 bna_rx_ucast_listset(bnad
->rx_info
[0].rx
, 0, NULL
);
3169 bnad_set_rx_mcast_fltr(struct bnad
*bnad
)
3171 struct net_device
*netdev
= bnad
->netdev
;
3172 int mc_count
= netdev_mc_count(netdev
);
3173 enum bna_cb_status ret
;
3176 if (netdev
->flags
& IFF_ALLMULTI
)
3179 if (netdev_mc_empty(netdev
))
3182 if (mc_count
> bna_attr(&bnad
->bna
)->num_mcmac
)
3185 mac_list
= kzalloc((mc_count
+ 1) * ETH_ALEN
, GFP_ATOMIC
);
3187 if (mac_list
== NULL
)
3190 ether_addr_copy(&mac_list
[0], &bnad_bcast_addr
[0]);
3192 /* copy rest of the MCAST addresses */
3193 bnad_netdev_mc_list_get(netdev
, mac_list
);
3194 ret
= bna_rx_mcast_listset(bnad
->rx_info
[0].rx
, mc_count
+ 1, mac_list
);
3197 if (ret
!= BNA_CB_SUCCESS
)
3203 bnad
->cfg_flags
|= BNAD_CF_ALLMULTI
;
3204 bna_rx_mcast_delall(bnad
->rx_info
[0].rx
);
3208 bnad_set_rx_mode(struct net_device
*netdev
)
3210 struct bnad
*bnad
= netdev_priv(netdev
);
3211 enum bna_rxmode new_mode
, mode_mask
;
3212 unsigned long flags
;
3214 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3216 if (bnad
->rx_info
[0].rx
== NULL
) {
3217 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3221 /* clear bnad flags to update it with new settings */
3222 bnad
->cfg_flags
&= ~(BNAD_CF_PROMISC
| BNAD_CF_DEFAULT
|
3226 if (netdev
->flags
& IFF_PROMISC
) {
3227 new_mode
|= BNAD_RXMODE_PROMISC_DEFAULT
;
3228 bnad
->cfg_flags
|= BNAD_CF_PROMISC
;
3230 bnad_set_rx_mcast_fltr(bnad
);
3232 if (bnad
->cfg_flags
& BNAD_CF_ALLMULTI
)
3233 new_mode
|= BNA_RXMODE_ALLMULTI
;
3235 bnad_set_rx_ucast_fltr(bnad
);
3237 if (bnad
->cfg_flags
& BNAD_CF_DEFAULT
)
3238 new_mode
|= BNA_RXMODE_DEFAULT
;
3241 mode_mask
= BNA_RXMODE_PROMISC
| BNA_RXMODE_DEFAULT
|
3242 BNA_RXMODE_ALLMULTI
;
3243 bna_rx_mode_set(bnad
->rx_info
[0].rx
, new_mode
, mode_mask
);
3245 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3249 * bna_lock is used to sync writes to netdev->addr
3250 * conf_lock cannot be used since this call may be made
3251 * in a non-blocking context.
3254 bnad_set_mac_address(struct net_device
*netdev
, void *addr
)
3257 struct bnad
*bnad
= netdev_priv(netdev
);
3258 struct sockaddr
*sa
= (struct sockaddr
*)addr
;
3259 unsigned long flags
;
3261 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3263 err
= bnad_mac_addr_set_locked(bnad
, sa
->sa_data
);
3265 ether_addr_copy(netdev
->dev_addr
, sa
->sa_data
);
3267 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3273 bnad_mtu_set(struct bnad
*bnad
, int frame_size
)
3275 unsigned long flags
;
3277 init_completion(&bnad
->bnad_completions
.mtu_comp
);
3279 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3280 bna_enet_mtu_set(&bnad
->bna
.enet
, frame_size
, bnad_cb_enet_mtu_set
);
3281 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3283 wait_for_completion(&bnad
->bnad_completions
.mtu_comp
);
3285 return bnad
->bnad_completions
.mtu_comp_status
;
3289 bnad_change_mtu(struct net_device
*netdev
, int new_mtu
)
3292 struct bnad
*bnad
= netdev_priv(netdev
);
3293 u32 rx_count
= 0, frame
, new_frame
;
3295 mutex_lock(&bnad
->conf_mutex
);
3298 netdev
->mtu
= new_mtu
;
3300 frame
= BNAD_FRAME_SIZE(mtu
);
3301 new_frame
= BNAD_FRAME_SIZE(new_mtu
);
3303 /* check if multi-buffer needs to be enabled */
3304 if (BNAD_PCI_DEV_IS_CAT2(bnad
) &&
3305 netif_running(bnad
->netdev
)) {
3306 /* only when transition is over 4K */
3307 if ((frame
<= 4096 && new_frame
> 4096) ||
3308 (frame
> 4096 && new_frame
<= 4096))
3309 rx_count
= bnad_reinit_rx(bnad
);
3312 /* rx_count > 0 - new rx created
3313 * - Linux set err = 0 and return
3315 err
= bnad_mtu_set(bnad
, new_frame
);
3319 mutex_unlock(&bnad
->conf_mutex
);
3324 bnad_vlan_rx_add_vid(struct net_device
*netdev
, __be16 proto
, u16 vid
)
3326 struct bnad
*bnad
= netdev_priv(netdev
);
3327 unsigned long flags
;
3329 if (!bnad
->rx_info
[0].rx
)
3332 mutex_lock(&bnad
->conf_mutex
);
3334 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3335 bna_rx_vlan_add(bnad
->rx_info
[0].rx
, vid
);
3336 set_bit(vid
, bnad
->active_vlans
);
3337 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3339 mutex_unlock(&bnad
->conf_mutex
);
3345 bnad_vlan_rx_kill_vid(struct net_device
*netdev
, __be16 proto
, u16 vid
)
3347 struct bnad
*bnad
= netdev_priv(netdev
);
3348 unsigned long flags
;
3350 if (!bnad
->rx_info
[0].rx
)
3353 mutex_lock(&bnad
->conf_mutex
);
3355 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3356 clear_bit(vid
, bnad
->active_vlans
);
3357 bna_rx_vlan_del(bnad
->rx_info
[0].rx
, vid
);
3358 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3360 mutex_unlock(&bnad
->conf_mutex
);
3365 static int bnad_set_features(struct net_device
*dev
, netdev_features_t features
)
3367 struct bnad
*bnad
= netdev_priv(dev
);
3368 netdev_features_t changed
= features
^ dev
->features
;
3370 if ((changed
& NETIF_F_HW_VLAN_CTAG_RX
) && netif_running(dev
)) {
3371 unsigned long flags
;
3373 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3375 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
3376 bna_rx_vlan_strip_enable(bnad
->rx_info
[0].rx
);
3378 bna_rx_vlan_strip_disable(bnad
->rx_info
[0].rx
);
3380 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3386 #ifdef CONFIG_NET_POLL_CONTROLLER
3388 bnad_netpoll(struct net_device
*netdev
)
3390 struct bnad
*bnad
= netdev_priv(netdev
);
3391 struct bnad_rx_info
*rx_info
;
3392 struct bnad_rx_ctrl
*rx_ctrl
;
3396 if (!(bnad
->cfg_flags
& BNAD_CF_MSIX
)) {
3397 bna_intx_disable(&bnad
->bna
, curr_mask
);
3398 bnad_isr(bnad
->pcidev
->irq
, netdev
);
3399 bna_intx_enable(&bnad
->bna
, curr_mask
);
3402 * Tx processing may happen in sending context, so no need
3403 * to explicitly process completions here
3407 for (i
= 0; i
< bnad
->num_rx
; i
++) {
3408 rx_info
= &bnad
->rx_info
[i
];
3411 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
3412 rx_ctrl
= &rx_info
->rx_ctrl
[j
];
3414 bnad_netif_rx_schedule_poll(bnad
,
3422 static const struct net_device_ops bnad_netdev_ops
= {
3423 .ndo_open
= bnad_open
,
3424 .ndo_stop
= bnad_stop
,
3425 .ndo_start_xmit
= bnad_start_xmit
,
3426 .ndo_get_stats64
= bnad_get_stats64
,
3427 .ndo_set_rx_mode
= bnad_set_rx_mode
,
3428 .ndo_validate_addr
= eth_validate_addr
,
3429 .ndo_set_mac_address
= bnad_set_mac_address
,
3430 .ndo_change_mtu
= bnad_change_mtu
,
3431 .ndo_vlan_rx_add_vid
= bnad_vlan_rx_add_vid
,
3432 .ndo_vlan_rx_kill_vid
= bnad_vlan_rx_kill_vid
,
3433 .ndo_set_features
= bnad_set_features
,
3434 #ifdef CONFIG_NET_POLL_CONTROLLER
3435 .ndo_poll_controller
= bnad_netpoll
3440 bnad_netdev_init(struct bnad
*bnad
, bool using_dac
)
3442 struct net_device
*netdev
= bnad
->netdev
;
3444 netdev
->hw_features
= NETIF_F_SG
| NETIF_F_RXCSUM
|
3445 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
3446 NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_HW_VLAN_CTAG_TX
|
3447 NETIF_F_HW_VLAN_CTAG_RX
;
3449 netdev
->vlan_features
= NETIF_F_SG
| NETIF_F_HIGHDMA
|
3450 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
3451 NETIF_F_TSO
| NETIF_F_TSO6
;
3453 netdev
->features
|= netdev
->hw_features
| NETIF_F_HW_VLAN_CTAG_FILTER
;
3456 netdev
->features
|= NETIF_F_HIGHDMA
;
3458 netdev
->mem_start
= bnad
->mmio_start
;
3459 netdev
->mem_end
= bnad
->mmio_start
+ bnad
->mmio_len
- 1;
3461 /* MTU range: 46 - 9000 */
3462 netdev
->min_mtu
= ETH_ZLEN
- ETH_HLEN
;
3463 netdev
->max_mtu
= BNAD_JUMBO_MTU
;
3465 netdev
->netdev_ops
= &bnad_netdev_ops
;
3466 bnad_set_ethtool_ops(netdev
);
3470 * 1. Initialize the bnad structure
3471 * 2. Setup netdev pointer in pci_dev
3472 * 3. Initialize no. of TxQ & CQs & MSIX vectors
3473 * 4. Initialize work queue.
3476 bnad_init(struct bnad
*bnad
,
3477 struct pci_dev
*pdev
, struct net_device
*netdev
)
3479 unsigned long flags
;
3481 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3482 pci_set_drvdata(pdev
, netdev
);
3484 bnad
->netdev
= netdev
;
3485 bnad
->pcidev
= pdev
;
3486 bnad
->mmio_start
= pci_resource_start(pdev
, 0);
3487 bnad
->mmio_len
= pci_resource_len(pdev
, 0);
3488 bnad
->bar0
= ioremap_nocache(bnad
->mmio_start
, bnad
->mmio_len
);
3490 dev_err(&pdev
->dev
, "ioremap for bar0 failed\n");
3493 dev_info(&pdev
->dev
, "bar0 mapped to %p, len %llu\n", bnad
->bar0
,
3494 (unsigned long long) bnad
->mmio_len
);
3496 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3497 if (!bnad_msix_disable
)
3498 bnad
->cfg_flags
= BNAD_CF_MSIX
;
3500 bnad
->cfg_flags
|= BNAD_CF_DIM_ENABLED
;
3502 bnad_q_num_init(bnad
);
3503 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3505 bnad
->msix_num
= (bnad
->num_tx
* bnad
->num_txq_per_tx
) +
3506 (bnad
->num_rx
* bnad
->num_rxp_per_rx
) +
3507 BNAD_MAILBOX_MSIX_VECTORS
;
3509 bnad
->txq_depth
= BNAD_TXQ_DEPTH
;
3510 bnad
->rxq_depth
= BNAD_RXQ_DEPTH
;
3512 bnad
->tx_coalescing_timeo
= BFI_TX_COALESCING_TIMEO
;
3513 bnad
->rx_coalescing_timeo
= BFI_RX_COALESCING_TIMEO
;
3515 sprintf(bnad
->wq_name
, "%s_wq_%d", BNAD_NAME
, bnad
->id
);
3516 bnad
->work_q
= create_singlethread_workqueue(bnad
->wq_name
);
3517 if (!bnad
->work_q
) {
3518 iounmap(bnad
->bar0
);
3526 * Must be called after bnad_pci_uninit()
3527 * so that iounmap() and pci_set_drvdata(NULL)
3528 * happens only after PCI uninitialization.
3531 bnad_uninit(struct bnad
*bnad
)
3534 flush_workqueue(bnad
->work_q
);
3535 destroy_workqueue(bnad
->work_q
);
3536 bnad
->work_q
= NULL
;
3540 iounmap(bnad
->bar0
);
3545 a) Per ioceth mutes used for serializing configuration
3546 changes from OS interface
3547 b) spin lock used to protect bna state machine
3550 bnad_lock_init(struct bnad
*bnad
)
3552 spin_lock_init(&bnad
->bna_lock
);
3553 mutex_init(&bnad
->conf_mutex
);
3557 bnad_lock_uninit(struct bnad
*bnad
)
3559 mutex_destroy(&bnad
->conf_mutex
);
3562 /* PCI Initialization */
3564 bnad_pci_init(struct bnad
*bnad
,
3565 struct pci_dev
*pdev
, bool *using_dac
)
3569 err
= pci_enable_device(pdev
);
3572 err
= pci_request_regions(pdev
, BNAD_NAME
);
3574 goto disable_device
;
3575 if (!dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64))) {
3578 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
3580 goto release_regions
;
3583 pci_set_master(pdev
);
3587 pci_release_regions(pdev
);
3589 pci_disable_device(pdev
);
3595 bnad_pci_uninit(struct pci_dev
*pdev
)
3597 pci_release_regions(pdev
);
3598 pci_disable_device(pdev
);
3602 bnad_pci_probe(struct pci_dev
*pdev
,
3603 const struct pci_device_id
*pcidev_id
)
3609 struct net_device
*netdev
;
3610 struct bfa_pcidev pcidev_info
;
3611 unsigned long flags
;
3613 mutex_lock(&bnad_fwimg_mutex
);
3614 if (!cna_get_firmware_buf(pdev
)) {
3615 mutex_unlock(&bnad_fwimg_mutex
);
3616 dev_err(&pdev
->dev
, "failed to load firmware image!\n");
3619 mutex_unlock(&bnad_fwimg_mutex
);
3622 * Allocates sizeof(struct net_device + struct bnad)
3623 * bnad = netdev->priv
3625 netdev
= alloc_etherdev(sizeof(struct bnad
));
3630 bnad
= netdev_priv(netdev
);
3631 bnad_lock_init(bnad
);
3632 bnad
->id
= atomic_inc_return(&bna_id
) - 1;
3634 mutex_lock(&bnad
->conf_mutex
);
3636 * PCI initialization
3637 * Output : using_dac = 1 for 64 bit DMA
3638 * = 0 for 32 bit DMA
3641 err
= bnad_pci_init(bnad
, pdev
, &using_dac
);
3646 * Initialize bnad structure
3647 * Setup relation between pci_dev & netdev
3649 err
= bnad_init(bnad
, pdev
, netdev
);
3653 /* Initialize netdev structure, set up ethtool ops */
3654 bnad_netdev_init(bnad
, using_dac
);
3656 /* Set link to down state */
3657 netif_carrier_off(netdev
);
3659 /* Setup the debugfs node for this bfad */
3660 if (bna_debugfs_enable
)
3661 bnad_debugfs_init(bnad
);
3663 /* Get resource requirement form bna */
3664 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3665 bna_res_req(&bnad
->res_info
[0]);
3666 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3668 /* Allocate resources from bna */
3669 err
= bnad_res_alloc(bnad
, &bnad
->res_info
[0], BNA_RES_T_MAX
);
3675 /* Setup pcidev_info for bna_init() */
3676 pcidev_info
.pci_slot
= PCI_SLOT(bnad
->pcidev
->devfn
);
3677 pcidev_info
.pci_func
= PCI_FUNC(bnad
->pcidev
->devfn
);
3678 pcidev_info
.device_id
= bnad
->pcidev
->device
;
3679 pcidev_info
.pci_bar_kva
= bnad
->bar0
;
3681 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3682 bna_init(bna
, bnad
, &pcidev_info
, &bnad
->res_info
[0]);
3683 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3685 bnad
->stats
.bna_stats
= &bna
->stats
;
3687 bnad_enable_msix(bnad
);
3688 err
= bnad_mbox_irq_alloc(bnad
);
3693 timer_setup(&bnad
->bna
.ioceth
.ioc
.ioc_timer
, bnad_ioc_timeout
, 0);
3694 timer_setup(&bnad
->bna
.ioceth
.ioc
.hb_timer
, bnad_ioc_hb_check
, 0);
3695 timer_setup(&bnad
->bna
.ioceth
.ioc
.iocpf_timer
, bnad_iocpf_timeout
, 0);
3696 timer_setup(&bnad
->bna
.ioceth
.ioc
.sem_timer
, bnad_iocpf_sem_timeout
,
3701 * If the call back comes with error, we bail out.
3702 * This is a catastrophic error.
3704 err
= bnad_ioceth_enable(bnad
);
3706 dev_err(&pdev
->dev
, "initialization failed err=%d\n", err
);
3710 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3711 if (bna_num_txq_set(bna
, BNAD_NUM_TXQ
+ 1) ||
3712 bna_num_rxp_set(bna
, BNAD_NUM_RXP
+ 1)) {
3713 bnad_q_num_adjust(bnad
, bna_attr(bna
)->num_txq
- 1,
3714 bna_attr(bna
)->num_rxp
- 1);
3715 if (bna_num_txq_set(bna
, BNAD_NUM_TXQ
+ 1) ||
3716 bna_num_rxp_set(bna
, BNAD_NUM_RXP
+ 1))
3719 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3721 goto disable_ioceth
;
3723 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3724 bna_mod_res_req(&bnad
->bna
, &bnad
->mod_res_info
[0]);
3725 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3727 err
= bnad_res_alloc(bnad
, &bnad
->mod_res_info
[0], BNA_MOD_RES_T_MAX
);
3730 goto disable_ioceth
;
3733 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3734 bna_mod_init(&bnad
->bna
, &bnad
->mod_res_info
[0]);
3735 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3737 /* Get the burnt-in mac */
3738 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3739 bna_enet_perm_mac_get(&bna
->enet
, bnad
->perm_addr
);
3740 bnad_set_netdev_perm_addr(bnad
);
3741 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3743 mutex_unlock(&bnad
->conf_mutex
);
3745 /* Finally, reguister with net_device layer */
3746 err
= register_netdev(netdev
);
3748 dev_err(&pdev
->dev
, "registering net device failed\n");
3751 set_bit(BNAD_RF_NETDEV_REGISTERED
, &bnad
->run_flags
);
3756 mutex_unlock(&bnad
->conf_mutex
);
3760 mutex_lock(&bnad
->conf_mutex
);
3761 bnad_res_free(bnad
, &bnad
->mod_res_info
[0], BNA_MOD_RES_T_MAX
);
3763 bnad_ioceth_disable(bnad
);
3764 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.ioc_timer
);
3765 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.sem_timer
);
3766 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.hb_timer
);
3767 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3769 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3770 bnad_mbox_irq_free(bnad
);
3771 bnad_disable_msix(bnad
);
3773 bnad_res_free(bnad
, &bnad
->res_info
[0], BNA_RES_T_MAX
);
3775 /* Remove the debugfs node for this bnad */
3776 kfree(bnad
->regdata
);
3777 bnad_debugfs_uninit(bnad
);
3780 bnad_pci_uninit(pdev
);
3782 mutex_unlock(&bnad
->conf_mutex
);
3783 bnad_lock_uninit(bnad
);
3784 free_netdev(netdev
);
3789 bnad_pci_remove(struct pci_dev
*pdev
)
3791 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3794 unsigned long flags
;
3799 bnad
= netdev_priv(netdev
);
3802 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED
, &bnad
->run_flags
))
3803 unregister_netdev(netdev
);
3805 mutex_lock(&bnad
->conf_mutex
);
3806 bnad_ioceth_disable(bnad
);
3807 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.ioc_timer
);
3808 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.sem_timer
);
3809 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.hb_timer
);
3810 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3812 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3814 bnad_res_free(bnad
, &bnad
->mod_res_info
[0], BNA_MOD_RES_T_MAX
);
3815 bnad_res_free(bnad
, &bnad
->res_info
[0], BNA_RES_T_MAX
);
3816 bnad_mbox_irq_free(bnad
);
3817 bnad_disable_msix(bnad
);
3818 bnad_pci_uninit(pdev
);
3819 mutex_unlock(&bnad
->conf_mutex
);
3820 bnad_lock_uninit(bnad
);
3821 /* Remove the debugfs node for this bnad */
3822 kfree(bnad
->regdata
);
3823 bnad_debugfs_uninit(bnad
);
3825 free_netdev(netdev
);
3828 static const struct pci_device_id bnad_pci_id_table
[] = {
3830 PCI_DEVICE(PCI_VENDOR_ID_BROCADE
,
3831 PCI_DEVICE_ID_BROCADE_CT
),
3832 .class = PCI_CLASS_NETWORK_ETHERNET
<< 8,
3833 .class_mask
= 0xffff00
3836 PCI_DEVICE(PCI_VENDOR_ID_BROCADE
,
3837 BFA_PCI_DEVICE_ID_CT2
),
3838 .class = PCI_CLASS_NETWORK_ETHERNET
<< 8,
3839 .class_mask
= 0xffff00
3844 MODULE_DEVICE_TABLE(pci
, bnad_pci_id_table
);
3846 static struct pci_driver bnad_pci_driver
= {
3848 .id_table
= bnad_pci_id_table
,
3849 .probe
= bnad_pci_probe
,
3850 .remove
= bnad_pci_remove
,
3854 bnad_module_init(void)
3858 pr_info("bna: QLogic BR-series 10G Ethernet driver - version: %s\n",
3861 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover
);
3863 err
= pci_register_driver(&bnad_pci_driver
);
3865 pr_err("bna: PCI driver registration failed err=%d\n", err
);
3873 bnad_module_exit(void)
3875 pci_unregister_driver(&bnad_pci_driver
);
3876 release_firmware(bfi_fw
);
3879 module_init(bnad_module_init
);
3880 module_exit(bnad_module_exit
);
3882 MODULE_AUTHOR("Brocade");
3883 MODULE_LICENSE("GPL");
3884 MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver");
3885 MODULE_VERSION(BNAD_VERSION
);
3886 MODULE_FIRMWARE(CNA_FW_FILE_CT
);
3887 MODULE_FIRMWARE(CNA_FW_FILE_CT2
);