2 * Linux network driver for QLogic BR-series Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15 * Copyright (c) 2014-2015 QLogic Corporation
19 #include <linux/bitops.h>
20 #include <linux/netdevice.h>
21 #include <linux/skbuff.h>
22 #include <linux/etherdevice.h>
24 #include <linux/ethtool.h>
25 #include <linux/if_vlan.h>
26 #include <linux/if_ether.h>
28 #include <linux/prefetch.h>
29 #include <linux/module.h>
35 static DEFINE_MUTEX(bnad_fwimg_mutex
);
40 static uint bnad_msix_disable
;
41 module_param(bnad_msix_disable
, uint
, 0444);
42 MODULE_PARM_DESC(bnad_msix_disable
, "Disable MSIX mode");
44 static uint bnad_ioc_auto_recover
= 1;
45 module_param(bnad_ioc_auto_recover
, uint
, 0444);
46 MODULE_PARM_DESC(bnad_ioc_auto_recover
, "Enable / Disable auto recovery");
48 static uint bna_debugfs_enable
= 1;
49 module_param(bna_debugfs_enable
, uint
, S_IRUGO
| S_IWUSR
);
50 MODULE_PARM_DESC(bna_debugfs_enable
, "Enables debugfs feature, default=1,"
51 " Range[false:0|true:1]");
56 static u32 bnad_rxqs_per_cq
= 2;
58 static struct mutex bnad_list_mutex
;
59 static LIST_HEAD(bnad_list
);
60 static const u8 bnad_bcast_addr
[] __aligned(2) =
61 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
66 #define BNAD_GET_MBOX_IRQ(_bnad) \
67 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
68 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
69 ((_bnad)->pcidev->irq))
71 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size) \
73 (_res_info)->res_type = BNA_RES_T_MEM; \
74 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
75 (_res_info)->res_u.mem_info.num = (_num); \
76 (_res_info)->res_u.mem_info.len = (_size); \
80 bnad_add_to_list(struct bnad
*bnad
)
82 mutex_lock(&bnad_list_mutex
);
83 list_add_tail(&bnad
->list_entry
, &bnad_list
);
85 mutex_unlock(&bnad_list_mutex
);
89 bnad_remove_from_list(struct bnad
*bnad
)
91 mutex_lock(&bnad_list_mutex
);
92 list_del(&bnad
->list_entry
);
93 mutex_unlock(&bnad_list_mutex
);
97 * Reinitialize completions in CQ, once Rx is taken down
100 bnad_cq_cleanup(struct bnad
*bnad
, struct bna_ccb
*ccb
)
102 struct bna_cq_entry
*cmpl
;
105 for (i
= 0; i
< ccb
->q_depth
; i
++) {
106 cmpl
= &((struct bna_cq_entry
*)ccb
->sw_q
)[i
];
111 /* Tx Datapath functions */
114 /* Caller should ensure that the entry at unmap_q[index] is valid */
116 bnad_tx_buff_unmap(struct bnad
*bnad
,
117 struct bnad_tx_unmap
*unmap_q
,
118 u32 q_depth
, u32 index
)
120 struct bnad_tx_unmap
*unmap
;
124 unmap
= &unmap_q
[index
];
125 nvecs
= unmap
->nvecs
;
130 dma_unmap_single(&bnad
->pcidev
->dev
,
131 dma_unmap_addr(&unmap
->vectors
[0], dma_addr
),
132 skb_headlen(skb
), DMA_TO_DEVICE
);
133 dma_unmap_addr_set(&unmap
->vectors
[0], dma_addr
, 0);
139 if (vector
== BFI_TX_MAX_VECTORS_PER_WI
) {
141 BNA_QE_INDX_INC(index
, q_depth
);
142 unmap
= &unmap_q
[index
];
145 dma_unmap_page(&bnad
->pcidev
->dev
,
146 dma_unmap_addr(&unmap
->vectors
[vector
], dma_addr
),
147 dma_unmap_len(&unmap
->vectors
[vector
], dma_len
),
149 dma_unmap_addr_set(&unmap
->vectors
[vector
], dma_addr
, 0);
153 BNA_QE_INDX_INC(index
, q_depth
);
159 * Frees all pending Tx Bufs
160 * At this point no activity is expected on the Q,
161 * so DMA unmap & freeing is fine.
164 bnad_txq_cleanup(struct bnad
*bnad
, struct bna_tcb
*tcb
)
166 struct bnad_tx_unmap
*unmap_q
= tcb
->unmap_q
;
170 for (i
= 0; i
< tcb
->q_depth
; i
++) {
171 skb
= unmap_q
[i
].skb
;
174 bnad_tx_buff_unmap(bnad
, unmap_q
, tcb
->q_depth
, i
);
176 dev_kfree_skb_any(skb
);
181 * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
182 * Can be called in a) Interrupt context
186 bnad_txcmpl_process(struct bnad
*bnad
, struct bna_tcb
*tcb
)
188 u32 sent_packets
= 0, sent_bytes
= 0;
189 u32 wis
, unmap_wis
, hw_cons
, cons
, q_depth
;
190 struct bnad_tx_unmap
*unmap_q
= tcb
->unmap_q
;
191 struct bnad_tx_unmap
*unmap
;
194 /* Just return if TX is stopped */
195 if (!test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
))
198 hw_cons
= *(tcb
->hw_consumer_index
);
199 cons
= tcb
->consumer_index
;
200 q_depth
= tcb
->q_depth
;
202 wis
= BNA_Q_INDEX_CHANGE(cons
, hw_cons
, q_depth
);
203 BUG_ON(!(wis
<= BNA_QE_IN_USE_CNT(tcb
, tcb
->q_depth
)));
206 unmap
= &unmap_q
[cons
];
211 sent_bytes
+= skb
->len
;
213 unmap_wis
= BNA_TXQ_WI_NEEDED(unmap
->nvecs
);
216 cons
= bnad_tx_buff_unmap(bnad
, unmap_q
, q_depth
, cons
);
217 dev_kfree_skb_any(skb
);
220 /* Update consumer pointers. */
221 tcb
->consumer_index
= hw_cons
;
223 tcb
->txq
->tx_packets
+= sent_packets
;
224 tcb
->txq
->tx_bytes
+= sent_bytes
;
230 bnad_tx_complete(struct bnad
*bnad
, struct bna_tcb
*tcb
)
232 struct net_device
*netdev
= bnad
->netdev
;
235 if (test_and_set_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
))
238 sent
= bnad_txcmpl_process(bnad
, tcb
);
240 if (netif_queue_stopped(netdev
) &&
241 netif_carrier_ok(netdev
) &&
242 BNA_QE_FREE_CNT(tcb
, tcb
->q_depth
) >=
243 BNAD_NETIF_WAKE_THRESHOLD
) {
244 if (test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)) {
245 netif_wake_queue(netdev
);
246 BNAD_UPDATE_CTR(bnad
, netif_queue_wakeup
);
251 if (likely(test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)))
252 bna_ib_ack(tcb
->i_dbell
, sent
);
254 smp_mb__before_atomic();
255 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
260 /* MSIX Tx Completion Handler */
262 bnad_msix_tx(int irq
, void *data
)
264 struct bna_tcb
*tcb
= (struct bna_tcb
*)data
;
265 struct bnad
*bnad
= tcb
->bnad
;
267 bnad_tx_complete(bnad
, tcb
);
273 bnad_rxq_alloc_uninit(struct bnad
*bnad
, struct bna_rcb
*rcb
)
275 struct bnad_rx_unmap_q
*unmap_q
= rcb
->unmap_q
;
277 unmap_q
->reuse_pi
= -1;
278 unmap_q
->alloc_order
= -1;
279 unmap_q
->map_size
= 0;
280 unmap_q
->type
= BNAD_RXBUF_NONE
;
283 /* Default is page-based allocation. Multi-buffer support - TBD */
285 bnad_rxq_alloc_init(struct bnad
*bnad
, struct bna_rcb
*rcb
)
287 struct bnad_rx_unmap_q
*unmap_q
= rcb
->unmap_q
;
290 bnad_rxq_alloc_uninit(bnad
, rcb
);
292 order
= get_order(rcb
->rxq
->buffer_size
);
294 unmap_q
->type
= BNAD_RXBUF_PAGE
;
296 if (bna_is_small_rxq(rcb
->id
)) {
297 unmap_q
->alloc_order
= 0;
298 unmap_q
->map_size
= rcb
->rxq
->buffer_size
;
300 if (rcb
->rxq
->multi_buffer
) {
301 unmap_q
->alloc_order
= 0;
302 unmap_q
->map_size
= rcb
->rxq
->buffer_size
;
303 unmap_q
->type
= BNAD_RXBUF_MULTI_BUFF
;
305 unmap_q
->alloc_order
= order
;
307 (rcb
->rxq
->buffer_size
> 2048) ?
308 PAGE_SIZE
<< order
: 2048;
312 BUG_ON((PAGE_SIZE
<< order
) % unmap_q
->map_size
);
318 bnad_rxq_cleanup_page(struct bnad
*bnad
, struct bnad_rx_unmap
*unmap
)
323 dma_unmap_page(&bnad
->pcidev
->dev
,
324 dma_unmap_addr(&unmap
->vector
, dma_addr
),
325 unmap
->vector
.len
, DMA_FROM_DEVICE
);
326 put_page(unmap
->page
);
328 dma_unmap_addr_set(&unmap
->vector
, dma_addr
, 0);
329 unmap
->vector
.len
= 0;
333 bnad_rxq_cleanup_skb(struct bnad
*bnad
, struct bnad_rx_unmap
*unmap
)
338 dma_unmap_single(&bnad
->pcidev
->dev
,
339 dma_unmap_addr(&unmap
->vector
, dma_addr
),
340 unmap
->vector
.len
, DMA_FROM_DEVICE
);
341 dev_kfree_skb_any(unmap
->skb
);
343 dma_unmap_addr_set(&unmap
->vector
, dma_addr
, 0);
344 unmap
->vector
.len
= 0;
348 bnad_rxq_cleanup(struct bnad
*bnad
, struct bna_rcb
*rcb
)
350 struct bnad_rx_unmap_q
*unmap_q
= rcb
->unmap_q
;
353 for (i
= 0; i
< rcb
->q_depth
; i
++) {
354 struct bnad_rx_unmap
*unmap
= &unmap_q
->unmap
[i
];
356 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q
->type
))
357 bnad_rxq_cleanup_skb(bnad
, unmap
);
359 bnad_rxq_cleanup_page(bnad
, unmap
);
361 bnad_rxq_alloc_uninit(bnad
, rcb
);
365 bnad_rxq_refill_page(struct bnad
*bnad
, struct bna_rcb
*rcb
, u32 nalloc
)
367 u32 alloced
, prod
, q_depth
;
368 struct bnad_rx_unmap_q
*unmap_q
= rcb
->unmap_q
;
369 struct bnad_rx_unmap
*unmap
, *prev
;
370 struct bna_rxq_entry
*rxent
;
372 u32 page_offset
, alloc_size
;
375 prod
= rcb
->producer_index
;
376 q_depth
= rcb
->q_depth
;
378 alloc_size
= PAGE_SIZE
<< unmap_q
->alloc_order
;
382 unmap
= &unmap_q
->unmap
[prod
];
384 if (unmap_q
->reuse_pi
< 0) {
385 page
= alloc_pages(GFP_ATOMIC
| __GFP_COMP
,
386 unmap_q
->alloc_order
);
389 prev
= &unmap_q
->unmap
[unmap_q
->reuse_pi
];
391 page_offset
= prev
->page_offset
+ unmap_q
->map_size
;
395 if (unlikely(!page
)) {
396 BNAD_UPDATE_CTR(bnad
, rxbuf_alloc_failed
);
397 rcb
->rxq
->rxbuf_alloc_failed
++;
401 dma_addr
= dma_map_page(&bnad
->pcidev
->dev
, page
, page_offset
,
402 unmap_q
->map_size
, DMA_FROM_DEVICE
);
403 if (dma_mapping_error(&bnad
->pcidev
->dev
, dma_addr
)) {
405 BNAD_UPDATE_CTR(bnad
, rxbuf_map_failed
);
406 rcb
->rxq
->rxbuf_map_failed
++;
411 unmap
->page_offset
= page_offset
;
412 dma_unmap_addr_set(&unmap
->vector
, dma_addr
, dma_addr
);
413 unmap
->vector
.len
= unmap_q
->map_size
;
414 page_offset
+= unmap_q
->map_size
;
416 if (page_offset
< alloc_size
)
417 unmap_q
->reuse_pi
= prod
;
419 unmap_q
->reuse_pi
= -1;
421 rxent
= &((struct bna_rxq_entry
*)rcb
->sw_q
)[prod
];
422 BNA_SET_DMA_ADDR(dma_addr
, &rxent
->host_addr
);
423 BNA_QE_INDX_INC(prod
, q_depth
);
428 if (likely(alloced
)) {
429 rcb
->producer_index
= prod
;
431 if (likely(test_bit(BNAD_RXQ_POST_OK
, &rcb
->flags
)))
432 bna_rxq_prod_indx_doorbell(rcb
);
439 bnad_rxq_refill_skb(struct bnad
*bnad
, struct bna_rcb
*rcb
, u32 nalloc
)
441 u32 alloced
, prod
, q_depth
, buff_sz
;
442 struct bnad_rx_unmap_q
*unmap_q
= rcb
->unmap_q
;
443 struct bnad_rx_unmap
*unmap
;
444 struct bna_rxq_entry
*rxent
;
448 buff_sz
= rcb
->rxq
->buffer_size
;
449 prod
= rcb
->producer_index
;
450 q_depth
= rcb
->q_depth
;
454 unmap
= &unmap_q
->unmap
[prod
];
456 skb
= netdev_alloc_skb_ip_align(bnad
->netdev
, buff_sz
);
458 if (unlikely(!skb
)) {
459 BNAD_UPDATE_CTR(bnad
, rxbuf_alloc_failed
);
460 rcb
->rxq
->rxbuf_alloc_failed
++;
464 dma_addr
= dma_map_single(&bnad
->pcidev
->dev
, skb
->data
,
465 buff_sz
, DMA_FROM_DEVICE
);
466 if (dma_mapping_error(&bnad
->pcidev
->dev
, dma_addr
)) {
467 dev_kfree_skb_any(skb
);
468 BNAD_UPDATE_CTR(bnad
, rxbuf_map_failed
);
469 rcb
->rxq
->rxbuf_map_failed
++;
474 dma_unmap_addr_set(&unmap
->vector
, dma_addr
, dma_addr
);
475 unmap
->vector
.len
= buff_sz
;
477 rxent
= &((struct bna_rxq_entry
*)rcb
->sw_q
)[prod
];
478 BNA_SET_DMA_ADDR(dma_addr
, &rxent
->host_addr
);
479 BNA_QE_INDX_INC(prod
, q_depth
);
484 if (likely(alloced
)) {
485 rcb
->producer_index
= prod
;
487 if (likely(test_bit(BNAD_RXQ_POST_OK
, &rcb
->flags
)))
488 bna_rxq_prod_indx_doorbell(rcb
);
495 bnad_rxq_post(struct bnad
*bnad
, struct bna_rcb
*rcb
)
497 struct bnad_rx_unmap_q
*unmap_q
= rcb
->unmap_q
;
500 to_alloc
= BNA_QE_FREE_CNT(rcb
, rcb
->q_depth
);
501 if (!(to_alloc
>> BNAD_RXQ_REFILL_THRESHOLD_SHIFT
))
504 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q
->type
))
505 bnad_rxq_refill_skb(bnad
, rcb
, to_alloc
);
507 bnad_rxq_refill_page(bnad
, rcb
, to_alloc
);
510 #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
512 BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
513 BNA_CQ_EF_L4_CKSUM_OK)
515 #define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
516 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
517 #define flags_tcp6 (BNA_CQ_EF_IPV6 | \
518 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
519 #define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
520 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
521 #define flags_udp6 (BNA_CQ_EF_IPV6 | \
522 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
525 bnad_cq_drop_packet(struct bnad
*bnad
, struct bna_rcb
*rcb
,
526 u32 sop_ci
, u32 nvecs
)
528 struct bnad_rx_unmap_q
*unmap_q
;
529 struct bnad_rx_unmap
*unmap
;
532 unmap_q
= rcb
->unmap_q
;
533 for (vec
= 0, ci
= sop_ci
; vec
< nvecs
; vec
++) {
534 unmap
= &unmap_q
->unmap
[ci
];
535 BNA_QE_INDX_INC(ci
, rcb
->q_depth
);
537 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q
->type
))
538 bnad_rxq_cleanup_skb(bnad
, unmap
);
540 bnad_rxq_cleanup_page(bnad
, unmap
);
545 bnad_cq_setup_skb_frags(struct bna_ccb
*ccb
, struct sk_buff
*skb
, u32 nvecs
)
549 struct bnad_rx_unmap_q
*unmap_q
;
550 struct bna_cq_entry
*cq
, *cmpl
;
551 u32 ci
, pi
, totlen
= 0;
554 pi
= ccb
->producer_index
;
557 rcb
= bna_is_small_rxq(cmpl
->rxq_id
) ? ccb
->rcb
[1] : ccb
->rcb
[0];
558 unmap_q
= rcb
->unmap_q
;
560 ci
= rcb
->consumer_index
;
562 /* prefetch header */
563 prefetch(page_address(unmap_q
->unmap
[ci
].page
) +
564 unmap_q
->unmap
[ci
].page_offset
);
567 struct bnad_rx_unmap
*unmap
;
570 unmap
= &unmap_q
->unmap
[ci
];
571 BNA_QE_INDX_INC(ci
, rcb
->q_depth
);
573 dma_unmap_page(&bnad
->pcidev
->dev
,
574 dma_unmap_addr(&unmap
->vector
, dma_addr
),
575 unmap
->vector
.len
, DMA_FROM_DEVICE
);
577 len
= ntohs(cmpl
->length
);
578 skb
->truesize
+= unmap
->vector
.len
;
581 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
582 unmap
->page
, unmap
->page_offset
, len
);
585 unmap
->vector
.len
= 0;
587 BNA_QE_INDX_INC(pi
, ccb
->q_depth
);
592 skb
->data_len
+= totlen
;
596 bnad_cq_setup_skb(struct bnad
*bnad
, struct sk_buff
*skb
,
597 struct bnad_rx_unmap
*unmap
, u32 len
)
601 dma_unmap_single(&bnad
->pcidev
->dev
,
602 dma_unmap_addr(&unmap
->vector
, dma_addr
),
603 unmap
->vector
.len
, DMA_FROM_DEVICE
);
606 skb
->protocol
= eth_type_trans(skb
, bnad
->netdev
);
609 unmap
->vector
.len
= 0;
613 bnad_cq_process(struct bnad
*bnad
, struct bna_ccb
*ccb
, int budget
)
615 struct bna_cq_entry
*cq
, *cmpl
, *next_cmpl
;
616 struct bna_rcb
*rcb
= NULL
;
617 struct bnad_rx_unmap_q
*unmap_q
;
618 struct bnad_rx_unmap
*unmap
= NULL
;
619 struct sk_buff
*skb
= NULL
;
620 struct bna_pkt_rate
*pkt_rt
= &ccb
->pkt_rate
;
621 struct bnad_rx_ctrl
*rx_ctrl
= ccb
->ctrl
;
622 u32 packets
= 0, len
= 0, totlen
= 0;
623 u32 pi
, vec
, sop_ci
= 0, nvecs
= 0;
624 u32 flags
, masked_flags
;
626 prefetch(bnad
->netdev
);
630 while (packets
< budget
) {
631 cmpl
= &cq
[ccb
->producer_index
];
634 /* The 'valid' field is set by the adapter, only after writing
635 * the other fields of completion entry. Hence, do not load
636 * other fields of completion entry *before* the 'valid' is
637 * loaded. Adding the rmb() here prevents the compiler and/or
638 * CPU from reordering the reads which would potentially result
639 * in reading stale values in completion entry.
643 BNA_UPDATE_PKT_CNT(pkt_rt
, ntohs(cmpl
->length
));
645 if (bna_is_small_rxq(cmpl
->rxq_id
))
650 unmap_q
= rcb
->unmap_q
;
652 /* start of packet ci */
653 sop_ci
= rcb
->consumer_index
;
655 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q
->type
)) {
656 unmap
= &unmap_q
->unmap
[sop_ci
];
659 skb
= napi_get_frags(&rx_ctrl
->napi
);
665 flags
= ntohl(cmpl
->flags
);
666 len
= ntohs(cmpl
->length
);
670 /* Check all the completions for this frame.
671 * busy-wait doesn't help much, break here.
673 if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q
->type
) &&
674 (flags
& BNA_CQ_EF_EOP
) == 0) {
675 pi
= ccb
->producer_index
;
677 BNA_QE_INDX_INC(pi
, ccb
->q_depth
);
680 if (!next_cmpl
->valid
)
682 /* The 'valid' field is set by the adapter, only
683 * after writing the other fields of completion
684 * entry. Hence, do not load other fields of
685 * completion entry *before* the 'valid' is
686 * loaded. Adding the rmb() here prevents the
687 * compiler and/or CPU from reordering the reads
688 * which would potentially result in reading
689 * stale values in completion entry.
693 len
= ntohs(next_cmpl
->length
);
694 flags
= ntohl(next_cmpl
->flags
);
698 } while ((flags
& BNA_CQ_EF_EOP
) == 0);
700 if (!next_cmpl
->valid
)
705 /* TODO: BNA_CQ_EF_LOCAL ? */
706 if (unlikely(flags
& (BNA_CQ_EF_MAC_ERROR
|
707 BNA_CQ_EF_FCS_ERROR
|
708 BNA_CQ_EF_TOO_LONG
))) {
709 bnad_cq_drop_packet(bnad
, rcb
, sop_ci
, nvecs
);
710 rcb
->rxq
->rx_packets_with_error
++;
715 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q
->type
))
716 bnad_cq_setup_skb(bnad
, skb
, unmap
, len
);
718 bnad_cq_setup_skb_frags(ccb
, skb
, nvecs
);
720 rcb
->rxq
->rx_packets
++;
721 rcb
->rxq
->rx_bytes
+= totlen
;
722 ccb
->bytes_per_intr
+= totlen
;
724 masked_flags
= flags
& flags_cksum_prot_mask
;
727 ((bnad
->netdev
->features
& NETIF_F_RXCSUM
) &&
728 ((masked_flags
== flags_tcp4
) ||
729 (masked_flags
== flags_udp4
) ||
730 (masked_flags
== flags_tcp6
) ||
731 (masked_flags
== flags_udp6
))))
732 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
734 skb_checksum_none_assert(skb
);
736 if ((flags
& BNA_CQ_EF_VLAN
) &&
737 (bnad
->netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
))
738 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), ntohs(cmpl
->vlan_tag
));
740 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q
->type
))
741 netif_receive_skb(skb
);
743 napi_gro_frags(&rx_ctrl
->napi
);
746 BNA_QE_INDX_ADD(rcb
->consumer_index
, nvecs
, rcb
->q_depth
);
747 for (vec
= 0; vec
< nvecs
; vec
++) {
748 cmpl
= &cq
[ccb
->producer_index
];
750 BNA_QE_INDX_INC(ccb
->producer_index
, ccb
->q_depth
);
754 napi_gro_flush(&rx_ctrl
->napi
, false);
755 if (likely(test_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[0]->flags
)))
756 bna_ib_ack_disable_irq(ccb
->i_dbell
, packets
);
758 bnad_rxq_post(bnad
, ccb
->rcb
[0]);
760 bnad_rxq_post(bnad
, ccb
->rcb
[1]);
766 bnad_netif_rx_schedule_poll(struct bnad
*bnad
, struct bna_ccb
*ccb
)
768 struct bnad_rx_ctrl
*rx_ctrl
= (struct bnad_rx_ctrl
*)(ccb
->ctrl
);
769 struct napi_struct
*napi
= &rx_ctrl
->napi
;
771 if (likely(napi_schedule_prep(napi
))) {
772 __napi_schedule(napi
);
773 rx_ctrl
->rx_schedule
++;
777 /* MSIX Rx Path Handler */
779 bnad_msix_rx(int irq
, void *data
)
781 struct bna_ccb
*ccb
= (struct bna_ccb
*)data
;
784 ((struct bnad_rx_ctrl
*)ccb
->ctrl
)->rx_intr_ctr
++;
785 bnad_netif_rx_schedule_poll(ccb
->bnad
, ccb
);
791 /* Interrupt handlers */
793 /* Mbox Interrupt Handlers */
795 bnad_msix_mbox_handler(int irq
, void *data
)
799 struct bnad
*bnad
= (struct bnad
*)data
;
801 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
802 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
))) {
803 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
807 bna_intr_status_get(&bnad
->bna
, intr_status
);
809 if (BNA_IS_MBOX_ERR_INTR(&bnad
->bna
, intr_status
))
810 bna_mbox_handler(&bnad
->bna
, intr_status
);
812 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
818 bnad_isr(int irq
, void *data
)
823 struct bnad
*bnad
= (struct bnad
*)data
;
824 struct bnad_rx_info
*rx_info
;
825 struct bnad_rx_ctrl
*rx_ctrl
;
826 struct bna_tcb
*tcb
= NULL
;
828 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
829 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
))) {
830 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
834 bna_intr_status_get(&bnad
->bna
, intr_status
);
836 if (unlikely(!intr_status
)) {
837 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
841 if (BNA_IS_MBOX_ERR_INTR(&bnad
->bna
, intr_status
))
842 bna_mbox_handler(&bnad
->bna
, intr_status
);
844 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
846 if (!BNA_IS_INTX_DATA_INTR(intr_status
))
849 /* Process data interrupts */
851 for (i
= 0; i
< bnad
->num_tx
; i
++) {
852 for (j
= 0; j
< bnad
->num_txq_per_tx
; j
++) {
853 tcb
= bnad
->tx_info
[i
].tcb
[j
];
854 if (tcb
&& test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
))
855 bnad_tx_complete(bnad
, bnad
->tx_info
[i
].tcb
[j
]);
859 for (i
= 0; i
< bnad
->num_rx
; i
++) {
860 rx_info
= &bnad
->rx_info
[i
];
863 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
864 rx_ctrl
= &rx_info
->rx_ctrl
[j
];
866 bnad_netif_rx_schedule_poll(bnad
,
874 * Called in interrupt / callback context
875 * with bna_lock held, so cfg_flags access is OK
878 bnad_enable_mbox_irq(struct bnad
*bnad
)
880 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
);
882 BNAD_UPDATE_CTR(bnad
, mbox_intr_enabled
);
886 * Called with bnad->bna_lock held b'cos of
887 * bnad->cfg_flags access.
890 bnad_disable_mbox_irq(struct bnad
*bnad
)
892 set_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
);
894 BNAD_UPDATE_CTR(bnad
, mbox_intr_disabled
);
898 bnad_set_netdev_perm_addr(struct bnad
*bnad
)
900 struct net_device
*netdev
= bnad
->netdev
;
902 ether_addr_copy(netdev
->perm_addr
, bnad
->perm_addr
);
903 if (is_zero_ether_addr(netdev
->dev_addr
))
904 ether_addr_copy(netdev
->dev_addr
, bnad
->perm_addr
);
907 /* Control Path Handlers */
911 bnad_cb_mbox_intr_enable(struct bnad
*bnad
)
913 bnad_enable_mbox_irq(bnad
);
917 bnad_cb_mbox_intr_disable(struct bnad
*bnad
)
919 bnad_disable_mbox_irq(bnad
);
923 bnad_cb_ioceth_ready(struct bnad
*bnad
)
925 bnad
->bnad_completions
.ioc_comp_status
= BNA_CB_SUCCESS
;
926 complete(&bnad
->bnad_completions
.ioc_comp
);
930 bnad_cb_ioceth_failed(struct bnad
*bnad
)
932 bnad
->bnad_completions
.ioc_comp_status
= BNA_CB_FAIL
;
933 complete(&bnad
->bnad_completions
.ioc_comp
);
937 bnad_cb_ioceth_disabled(struct bnad
*bnad
)
939 bnad
->bnad_completions
.ioc_comp_status
= BNA_CB_SUCCESS
;
940 complete(&bnad
->bnad_completions
.ioc_comp
);
944 bnad_cb_enet_disabled(void *arg
)
946 struct bnad
*bnad
= (struct bnad
*)arg
;
948 netif_carrier_off(bnad
->netdev
);
949 complete(&bnad
->bnad_completions
.enet_comp
);
953 bnad_cb_ethport_link_status(struct bnad
*bnad
,
954 enum bna_link_status link_status
)
956 bool link_up
= false;
958 link_up
= (link_status
== BNA_LINK_UP
) || (link_status
== BNA_CEE_UP
);
960 if (link_status
== BNA_CEE_UP
) {
961 if (!test_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
))
962 BNAD_UPDATE_CTR(bnad
, cee_toggle
);
963 set_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
);
965 if (test_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
))
966 BNAD_UPDATE_CTR(bnad
, cee_toggle
);
967 clear_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
);
971 if (!netif_carrier_ok(bnad
->netdev
)) {
973 netdev_info(bnad
->netdev
, "link up\n");
974 netif_carrier_on(bnad
->netdev
);
975 BNAD_UPDATE_CTR(bnad
, link_toggle
);
976 for (tx_id
= 0; tx_id
< bnad
->num_tx
; tx_id
++) {
977 for (tcb_id
= 0; tcb_id
< bnad
->num_txq_per_tx
;
979 struct bna_tcb
*tcb
=
980 bnad
->tx_info
[tx_id
].tcb
[tcb_id
];
987 if (test_bit(BNAD_TXQ_TX_STARTED
,
991 * Transmit Schedule */
995 BNAD_UPDATE_CTR(bnad
,
1001 BNAD_UPDATE_CTR(bnad
,
1008 if (netif_carrier_ok(bnad
->netdev
)) {
1009 netdev_info(bnad
->netdev
, "link down\n");
1010 netif_carrier_off(bnad
->netdev
);
1011 BNAD_UPDATE_CTR(bnad
, link_toggle
);
1017 bnad_cb_tx_disabled(void *arg
, struct bna_tx
*tx
)
1019 struct bnad
*bnad
= (struct bnad
*)arg
;
1021 complete(&bnad
->bnad_completions
.tx_comp
);
1025 bnad_cb_tcb_setup(struct bnad
*bnad
, struct bna_tcb
*tcb
)
1027 struct bnad_tx_info
*tx_info
=
1028 (struct bnad_tx_info
*)tcb
->txq
->tx
->priv
;
1031 tx_info
->tcb
[tcb
->id
] = tcb
;
1035 bnad_cb_tcb_destroy(struct bnad
*bnad
, struct bna_tcb
*tcb
)
1037 struct bnad_tx_info
*tx_info
=
1038 (struct bnad_tx_info
*)tcb
->txq
->tx
->priv
;
1040 tx_info
->tcb
[tcb
->id
] = NULL
;
1045 bnad_cb_ccb_setup(struct bnad
*bnad
, struct bna_ccb
*ccb
)
1047 struct bnad_rx_info
*rx_info
=
1048 (struct bnad_rx_info
*)ccb
->cq
->rx
->priv
;
1050 rx_info
->rx_ctrl
[ccb
->id
].ccb
= ccb
;
1051 ccb
->ctrl
= &rx_info
->rx_ctrl
[ccb
->id
];
1055 bnad_cb_ccb_destroy(struct bnad
*bnad
, struct bna_ccb
*ccb
)
1057 struct bnad_rx_info
*rx_info
=
1058 (struct bnad_rx_info
*)ccb
->cq
->rx
->priv
;
1060 rx_info
->rx_ctrl
[ccb
->id
].ccb
= NULL
;
1064 bnad_cb_tx_stall(struct bnad
*bnad
, struct bna_tx
*tx
)
1066 struct bnad_tx_info
*tx_info
=
1067 (struct bnad_tx_info
*)tx
->priv
;
1068 struct bna_tcb
*tcb
;
1072 for (i
= 0; i
< BNAD_MAX_TXQ_PER_TX
; i
++) {
1073 tcb
= tx_info
->tcb
[i
];
1077 clear_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
);
1078 netif_stop_subqueue(bnad
->netdev
, txq_id
);
1083 bnad_cb_tx_resume(struct bnad
*bnad
, struct bna_tx
*tx
)
1085 struct bnad_tx_info
*tx_info
= (struct bnad_tx_info
*)tx
->priv
;
1086 struct bna_tcb
*tcb
;
1090 for (i
= 0; i
< BNAD_MAX_TXQ_PER_TX
; i
++) {
1091 tcb
= tx_info
->tcb
[i
];
1096 BUG_ON(test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
));
1097 set_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
);
1098 BUG_ON(*(tcb
->hw_consumer_index
) != 0);
1100 if (netif_carrier_ok(bnad
->netdev
)) {
1101 netif_wake_subqueue(bnad
->netdev
, txq_id
);
1102 BNAD_UPDATE_CTR(bnad
, netif_queue_wakeup
);
1107 * Workaround for first ioceth enable failure & we
1108 * get a 0 MAC address. We try to get the MAC address
1111 if (is_zero_ether_addr(bnad
->perm_addr
)) {
1112 bna_enet_perm_mac_get(&bnad
->bna
.enet
, bnad
->perm_addr
);
1113 bnad_set_netdev_perm_addr(bnad
);
1118 * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
1121 bnad_tx_cleanup(struct delayed_work
*work
)
1123 struct bnad_tx_info
*tx_info
=
1124 container_of(work
, struct bnad_tx_info
, tx_cleanup_work
);
1125 struct bnad
*bnad
= NULL
;
1126 struct bna_tcb
*tcb
;
1127 unsigned long flags
;
1130 for (i
= 0; i
< BNAD_MAX_TXQ_PER_TX
; i
++) {
1131 tcb
= tx_info
->tcb
[i
];
1137 if (test_and_set_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
)) {
1142 bnad_txq_cleanup(bnad
, tcb
);
1144 smp_mb__before_atomic();
1145 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
1149 queue_delayed_work(bnad
->work_q
, &tx_info
->tx_cleanup_work
,
1150 msecs_to_jiffies(1));
1154 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1155 bna_tx_cleanup_complete(tx_info
->tx
);
1156 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1160 bnad_cb_tx_cleanup(struct bnad
*bnad
, struct bna_tx
*tx
)
1162 struct bnad_tx_info
*tx_info
= (struct bnad_tx_info
*)tx
->priv
;
1163 struct bna_tcb
*tcb
;
1166 for (i
= 0; i
< BNAD_MAX_TXQ_PER_TX
; i
++) {
1167 tcb
= tx_info
->tcb
[i
];
1172 queue_delayed_work(bnad
->work_q
, &tx_info
->tx_cleanup_work
, 0);
1176 bnad_cb_rx_stall(struct bnad
*bnad
, struct bna_rx
*rx
)
1178 struct bnad_rx_info
*rx_info
= (struct bnad_rx_info
*)rx
->priv
;
1179 struct bna_ccb
*ccb
;
1180 struct bnad_rx_ctrl
*rx_ctrl
;
1183 for (i
= 0; i
< BNAD_MAX_RXP_PER_RX
; i
++) {
1184 rx_ctrl
= &rx_info
->rx_ctrl
[i
];
1189 clear_bit(BNAD_RXQ_POST_OK
, &ccb
->rcb
[0]->flags
);
1192 clear_bit(BNAD_RXQ_POST_OK
, &ccb
->rcb
[1]->flags
);
1197 * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
1200 bnad_rx_cleanup(void *work
)
1202 struct bnad_rx_info
*rx_info
=
1203 container_of(work
, struct bnad_rx_info
, rx_cleanup_work
);
1204 struct bnad_rx_ctrl
*rx_ctrl
;
1205 struct bnad
*bnad
= NULL
;
1206 unsigned long flags
;
1209 for (i
= 0; i
< BNAD_MAX_RXP_PER_RX
; i
++) {
1210 rx_ctrl
= &rx_info
->rx_ctrl
[i
];
1215 bnad
= rx_ctrl
->ccb
->bnad
;
1218 * Wait till the poll handler has exited
1219 * and nothing can be scheduled anymore
1221 napi_disable(&rx_ctrl
->napi
);
1223 bnad_cq_cleanup(bnad
, rx_ctrl
->ccb
);
1224 bnad_rxq_cleanup(bnad
, rx_ctrl
->ccb
->rcb
[0]);
1225 if (rx_ctrl
->ccb
->rcb
[1])
1226 bnad_rxq_cleanup(bnad
, rx_ctrl
->ccb
->rcb
[1]);
1229 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1230 bna_rx_cleanup_complete(rx_info
->rx
);
1231 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1235 bnad_cb_rx_cleanup(struct bnad
*bnad
, struct bna_rx
*rx
)
1237 struct bnad_rx_info
*rx_info
= (struct bnad_rx_info
*)rx
->priv
;
1238 struct bna_ccb
*ccb
;
1239 struct bnad_rx_ctrl
*rx_ctrl
;
1242 for (i
= 0; i
< BNAD_MAX_RXP_PER_RX
; i
++) {
1243 rx_ctrl
= &rx_info
->rx_ctrl
[i
];
1248 clear_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[0]->flags
);
1251 clear_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[1]->flags
);
1254 queue_work(bnad
->work_q
, &rx_info
->rx_cleanup_work
);
1258 bnad_cb_rx_post(struct bnad
*bnad
, struct bna_rx
*rx
)
1260 struct bnad_rx_info
*rx_info
= (struct bnad_rx_info
*)rx
->priv
;
1261 struct bna_ccb
*ccb
;
1262 struct bna_rcb
*rcb
;
1263 struct bnad_rx_ctrl
*rx_ctrl
;
1266 for (i
= 0; i
< BNAD_MAX_RXP_PER_RX
; i
++) {
1267 rx_ctrl
= &rx_info
->rx_ctrl
[i
];
1272 napi_enable(&rx_ctrl
->napi
);
1274 for (j
= 0; j
< BNAD_MAX_RXQ_PER_RXP
; j
++) {
1279 bnad_rxq_alloc_init(bnad
, rcb
);
1280 set_bit(BNAD_RXQ_STARTED
, &rcb
->flags
);
1281 set_bit(BNAD_RXQ_POST_OK
, &rcb
->flags
);
1282 bnad_rxq_post(bnad
, rcb
);
1288 bnad_cb_rx_disabled(void *arg
, struct bna_rx
*rx
)
1290 struct bnad
*bnad
= (struct bnad
*)arg
;
1292 complete(&bnad
->bnad_completions
.rx_comp
);
1296 bnad_cb_rx_mcast_add(struct bnad
*bnad
, struct bna_rx
*rx
)
1298 bnad
->bnad_completions
.mcast_comp_status
= BNA_CB_SUCCESS
;
1299 complete(&bnad
->bnad_completions
.mcast_comp
);
1303 bnad_cb_stats_get(struct bnad
*bnad
, enum bna_cb_status status
,
1304 struct bna_stats
*stats
)
1306 if (status
== BNA_CB_SUCCESS
)
1307 BNAD_UPDATE_CTR(bnad
, hw_stats_updates
);
1309 if (!netif_running(bnad
->netdev
) ||
1310 !test_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
))
1313 mod_timer(&bnad
->stats_timer
,
1314 jiffies
+ msecs_to_jiffies(BNAD_STATS_TIMER_FREQ
));
1318 bnad_cb_enet_mtu_set(struct bnad
*bnad
)
1320 bnad
->bnad_completions
.mtu_comp_status
= BNA_CB_SUCCESS
;
1321 complete(&bnad
->bnad_completions
.mtu_comp
);
1325 bnad_cb_completion(void *arg
, enum bfa_status status
)
1327 struct bnad_iocmd_comp
*iocmd_comp
=
1328 (struct bnad_iocmd_comp
*)arg
;
1330 iocmd_comp
->comp_status
= (u32
) status
;
1331 complete(&iocmd_comp
->comp
);
1334 /* Resource allocation, free functions */
1337 bnad_mem_free(struct bnad
*bnad
,
1338 struct bna_mem_info
*mem_info
)
1343 if (mem_info
->mdl
== NULL
)
1346 for (i
= 0; i
< mem_info
->num
; i
++) {
1347 if (mem_info
->mdl
[i
].kva
!= NULL
) {
1348 if (mem_info
->mem_type
== BNA_MEM_T_DMA
) {
1349 BNA_GET_DMA_ADDR(&(mem_info
->mdl
[i
].dma
),
1351 dma_free_coherent(&bnad
->pcidev
->dev
,
1352 mem_info
->mdl
[i
].len
,
1353 mem_info
->mdl
[i
].kva
, dma_pa
);
1355 kfree(mem_info
->mdl
[i
].kva
);
1358 kfree(mem_info
->mdl
);
1359 mem_info
->mdl
= NULL
;
1363 bnad_mem_alloc(struct bnad
*bnad
,
1364 struct bna_mem_info
*mem_info
)
1369 if ((mem_info
->num
== 0) || (mem_info
->len
== 0)) {
1370 mem_info
->mdl
= NULL
;
1374 mem_info
->mdl
= kcalloc(mem_info
->num
, sizeof(struct bna_mem_descr
),
1376 if (mem_info
->mdl
== NULL
)
1379 if (mem_info
->mem_type
== BNA_MEM_T_DMA
) {
1380 for (i
= 0; i
< mem_info
->num
; i
++) {
1381 mem_info
->mdl
[i
].len
= mem_info
->len
;
1382 mem_info
->mdl
[i
].kva
=
1383 dma_alloc_coherent(&bnad
->pcidev
->dev
,
1384 mem_info
->len
, &dma_pa
,
1386 if (mem_info
->mdl
[i
].kva
== NULL
)
1389 BNA_SET_DMA_ADDR(dma_pa
,
1390 &(mem_info
->mdl
[i
].dma
));
1393 for (i
= 0; i
< mem_info
->num
; i
++) {
1394 mem_info
->mdl
[i
].len
= mem_info
->len
;
1395 mem_info
->mdl
[i
].kva
= kzalloc(mem_info
->len
,
1397 if (mem_info
->mdl
[i
].kva
== NULL
)
1405 bnad_mem_free(bnad
, mem_info
);
1409 /* Free IRQ for Mailbox */
1411 bnad_mbox_irq_free(struct bnad
*bnad
)
1414 unsigned long flags
;
1416 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1417 bnad_disable_mbox_irq(bnad
);
1418 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1420 irq
= BNAD_GET_MBOX_IRQ(bnad
);
1421 free_irq(irq
, bnad
);
1425 * Allocates IRQ for Mailbox, but keep it disabled
1426 * This will be enabled once we get the mbox enable callback
1430 bnad_mbox_irq_alloc(struct bnad
*bnad
)
1433 unsigned long irq_flags
, flags
;
1435 irq_handler_t irq_handler
;
1437 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1438 if (bnad
->cfg_flags
& BNAD_CF_MSIX
) {
1439 irq_handler
= (irq_handler_t
)bnad_msix_mbox_handler
;
1440 irq
= bnad
->msix_table
[BNAD_MAILBOX_MSIX_INDEX
].vector
;
1443 irq_handler
= (irq_handler_t
)bnad_isr
;
1444 irq
= bnad
->pcidev
->irq
;
1445 irq_flags
= IRQF_SHARED
;
1448 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1449 sprintf(bnad
->mbox_irq_name
, "%s", BNAD_NAME
);
1452 * Set the Mbox IRQ disable flag, so that the IRQ handler
1453 * called from request_irq() for SHARED IRQs do not execute
1455 set_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
);
1457 BNAD_UPDATE_CTR(bnad
, mbox_intr_disabled
);
1459 err
= request_irq(irq
, irq_handler
, irq_flags
,
1460 bnad
->mbox_irq_name
, bnad
);
1466 bnad_txrx_irq_free(struct bnad
*bnad
, struct bna_intr_info
*intr_info
)
1468 kfree(intr_info
->idl
);
1469 intr_info
->idl
= NULL
;
1472 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1474 bnad_txrx_irq_alloc(struct bnad
*bnad
, enum bnad_intr_source src
,
1475 u32 txrx_id
, struct bna_intr_info
*intr_info
)
1477 int i
, vector_start
= 0;
1479 unsigned long flags
;
1481 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1482 cfg_flags
= bnad
->cfg_flags
;
1483 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1485 if (cfg_flags
& BNAD_CF_MSIX
) {
1486 intr_info
->intr_type
= BNA_INTR_T_MSIX
;
1487 intr_info
->idl
= kcalloc(intr_info
->num
,
1488 sizeof(struct bna_intr_descr
),
1490 if (!intr_info
->idl
)
1495 vector_start
= BNAD_MAILBOX_MSIX_VECTORS
+ txrx_id
;
1499 vector_start
= BNAD_MAILBOX_MSIX_VECTORS
+
1500 (bnad
->num_tx
* bnad
->num_txq_per_tx
) +
1508 for (i
= 0; i
< intr_info
->num
; i
++)
1509 intr_info
->idl
[i
].vector
= vector_start
+ i
;
1511 intr_info
->intr_type
= BNA_INTR_T_INTX
;
1513 intr_info
->idl
= kcalloc(intr_info
->num
,
1514 sizeof(struct bna_intr_descr
),
1516 if (!intr_info
->idl
)
1521 intr_info
->idl
[0].vector
= BNAD_INTX_TX_IB_BITMASK
;
1525 intr_info
->idl
[0].vector
= BNAD_INTX_RX_IB_BITMASK
;
1532 /* NOTE: Should be called for MSIX only
1533 * Unregisters Tx MSIX vector(s) from the kernel
1536 bnad_tx_msix_unregister(struct bnad
*bnad
, struct bnad_tx_info
*tx_info
,
1542 for (i
= 0; i
< num_txqs
; i
++) {
1543 if (tx_info
->tcb
[i
] == NULL
)
1546 vector_num
= tx_info
->tcb
[i
]->intr_vector
;
1547 free_irq(bnad
->msix_table
[vector_num
].vector
, tx_info
->tcb
[i
]);
1551 /* NOTE: Should be called for MSIX only
1552 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1555 bnad_tx_msix_register(struct bnad
*bnad
, struct bnad_tx_info
*tx_info
,
1556 u32 tx_id
, int num_txqs
)
1562 for (i
= 0; i
< num_txqs
; i
++) {
1563 vector_num
= tx_info
->tcb
[i
]->intr_vector
;
1564 sprintf(tx_info
->tcb
[i
]->name
, "%s TXQ %d", bnad
->netdev
->name
,
1565 tx_id
+ tx_info
->tcb
[i
]->id
);
1566 err
= request_irq(bnad
->msix_table
[vector_num
].vector
,
1567 (irq_handler_t
)bnad_msix_tx
, 0,
1568 tx_info
->tcb
[i
]->name
,
1578 bnad_tx_msix_unregister(bnad
, tx_info
, (i
- 1));
1582 /* NOTE: Should be called for MSIX only
1583 * Unregisters Rx MSIX vector(s) from the kernel
1586 bnad_rx_msix_unregister(struct bnad
*bnad
, struct bnad_rx_info
*rx_info
,
1592 for (i
= 0; i
< num_rxps
; i
++) {
1593 if (rx_info
->rx_ctrl
[i
].ccb
== NULL
)
1596 vector_num
= rx_info
->rx_ctrl
[i
].ccb
->intr_vector
;
1597 free_irq(bnad
->msix_table
[vector_num
].vector
,
1598 rx_info
->rx_ctrl
[i
].ccb
);
1602 /* NOTE: Should be called for MSIX only
1603 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1606 bnad_rx_msix_register(struct bnad
*bnad
, struct bnad_rx_info
*rx_info
,
1607 u32 rx_id
, int num_rxps
)
1613 for (i
= 0; i
< num_rxps
; i
++) {
1614 vector_num
= rx_info
->rx_ctrl
[i
].ccb
->intr_vector
;
1615 sprintf(rx_info
->rx_ctrl
[i
].ccb
->name
, "%s CQ %d",
1617 rx_id
+ rx_info
->rx_ctrl
[i
].ccb
->id
);
1618 err
= request_irq(bnad
->msix_table
[vector_num
].vector
,
1619 (irq_handler_t
)bnad_msix_rx
, 0,
1620 rx_info
->rx_ctrl
[i
].ccb
->name
,
1621 rx_info
->rx_ctrl
[i
].ccb
);
1630 bnad_rx_msix_unregister(bnad
, rx_info
, (i
- 1));
1634 /* Free Tx object Resources */
1636 bnad_tx_res_free(struct bnad
*bnad
, struct bna_res_info
*res_info
)
1640 for (i
= 0; i
< BNA_TX_RES_T_MAX
; i
++) {
1641 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1642 bnad_mem_free(bnad
, &res_info
[i
].res_u
.mem_info
);
1643 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1644 bnad_txrx_irq_free(bnad
, &res_info
[i
].res_u
.intr_info
);
1648 /* Allocates memory and interrupt resources for Tx object */
1650 bnad_tx_res_alloc(struct bnad
*bnad
, struct bna_res_info
*res_info
,
1655 for (i
= 0; i
< BNA_TX_RES_T_MAX
; i
++) {
1656 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1657 err
= bnad_mem_alloc(bnad
,
1658 &res_info
[i
].res_u
.mem_info
);
1659 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1660 err
= bnad_txrx_irq_alloc(bnad
, BNAD_INTR_TX
, tx_id
,
1661 &res_info
[i
].res_u
.intr_info
);
1668 bnad_tx_res_free(bnad
, res_info
);
1672 /* Free Rx object Resources */
1674 bnad_rx_res_free(struct bnad
*bnad
, struct bna_res_info
*res_info
)
1678 for (i
= 0; i
< BNA_RX_RES_T_MAX
; i
++) {
1679 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1680 bnad_mem_free(bnad
, &res_info
[i
].res_u
.mem_info
);
1681 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1682 bnad_txrx_irq_free(bnad
, &res_info
[i
].res_u
.intr_info
);
1686 /* Allocates memory and interrupt resources for Rx object */
1688 bnad_rx_res_alloc(struct bnad
*bnad
, struct bna_res_info
*res_info
,
1693 /* All memory needs to be allocated before setup_ccbs */
1694 for (i
= 0; i
< BNA_RX_RES_T_MAX
; i
++) {
1695 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1696 err
= bnad_mem_alloc(bnad
,
1697 &res_info
[i
].res_u
.mem_info
);
1698 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1699 err
= bnad_txrx_irq_alloc(bnad
, BNAD_INTR_RX
, rx_id
,
1700 &res_info
[i
].res_u
.intr_info
);
1707 bnad_rx_res_free(bnad
, res_info
);
1711 /* Timer callbacks */
1714 bnad_ioc_timeout(unsigned long data
)
1716 struct bnad
*bnad
= (struct bnad
*)data
;
1717 unsigned long flags
;
1719 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1720 bfa_nw_ioc_timeout(&bnad
->bna
.ioceth
.ioc
);
1721 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1725 bnad_ioc_hb_check(unsigned long data
)
1727 struct bnad
*bnad
= (struct bnad
*)data
;
1728 unsigned long flags
;
1730 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1731 bfa_nw_ioc_hb_check(&bnad
->bna
.ioceth
.ioc
);
1732 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1736 bnad_iocpf_timeout(unsigned long data
)
1738 struct bnad
*bnad
= (struct bnad
*)data
;
1739 unsigned long flags
;
1741 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1742 bfa_nw_iocpf_timeout(&bnad
->bna
.ioceth
.ioc
);
1743 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1747 bnad_iocpf_sem_timeout(unsigned long data
)
1749 struct bnad
*bnad
= (struct bnad
*)data
;
1750 unsigned long flags
;
1752 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1753 bfa_nw_iocpf_sem_timeout(&bnad
->bna
.ioceth
.ioc
);
1754 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1758 * All timer routines use bnad->bna_lock to protect against
1759 * the following race, which may occur in case of no locking:
1767 /* b) Dynamic Interrupt Moderation Timer */
1769 bnad_dim_timeout(unsigned long data
)
1771 struct bnad
*bnad
= (struct bnad
*)data
;
1772 struct bnad_rx_info
*rx_info
;
1773 struct bnad_rx_ctrl
*rx_ctrl
;
1775 unsigned long flags
;
1777 if (!netif_carrier_ok(bnad
->netdev
))
1780 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1781 for (i
= 0; i
< bnad
->num_rx
; i
++) {
1782 rx_info
= &bnad
->rx_info
[i
];
1785 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
1786 rx_ctrl
= &rx_info
->rx_ctrl
[j
];
1789 bna_rx_dim_update(rx_ctrl
->ccb
);
1793 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1794 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
))
1795 mod_timer(&bnad
->dim_timer
,
1796 jiffies
+ msecs_to_jiffies(BNAD_DIM_TIMER_FREQ
));
1797 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1800 /* c) Statistics Timer */
1802 bnad_stats_timeout(unsigned long data
)
1804 struct bnad
*bnad
= (struct bnad
*)data
;
1805 unsigned long flags
;
1807 if (!netif_running(bnad
->netdev
) ||
1808 !test_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
))
1811 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1812 bna_hw_stats_get(&bnad
->bna
);
1813 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1817 * Set up timer for DIM
1818 * Called with bnad->bna_lock held
1821 bnad_dim_timer_start(struct bnad
*bnad
)
1823 if (bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
&&
1824 !test_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
)) {
1825 setup_timer(&bnad
->dim_timer
, bnad_dim_timeout
,
1826 (unsigned long)bnad
);
1827 set_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
);
1828 mod_timer(&bnad
->dim_timer
,
1829 jiffies
+ msecs_to_jiffies(BNAD_DIM_TIMER_FREQ
));
1834 * Set up timer for statistics
1835 * Called with mutex_lock(&bnad->conf_mutex) held
1838 bnad_stats_timer_start(struct bnad
*bnad
)
1840 unsigned long flags
;
1842 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1843 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
)) {
1844 setup_timer(&bnad
->stats_timer
, bnad_stats_timeout
,
1845 (unsigned long)bnad
);
1846 mod_timer(&bnad
->stats_timer
,
1847 jiffies
+ msecs_to_jiffies(BNAD_STATS_TIMER_FREQ
));
1849 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1853 * Stops the stats timer
1854 * Called with mutex_lock(&bnad->conf_mutex) held
1857 bnad_stats_timer_stop(struct bnad
*bnad
)
1860 unsigned long flags
;
1862 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1863 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
))
1865 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1867 del_timer_sync(&bnad
->stats_timer
);
1873 bnad_netdev_mc_list_get(struct net_device
*netdev
, u8
*mc_list
)
1875 int i
= 1; /* Index 0 has broadcast address */
1876 struct netdev_hw_addr
*mc_addr
;
1878 netdev_for_each_mc_addr(mc_addr
, netdev
) {
1879 ether_addr_copy(&mc_list
[i
* ETH_ALEN
], &mc_addr
->addr
[0]);
1885 bnad_napi_poll_rx(struct napi_struct
*napi
, int budget
)
1887 struct bnad_rx_ctrl
*rx_ctrl
=
1888 container_of(napi
, struct bnad_rx_ctrl
, napi
);
1889 struct bnad
*bnad
= rx_ctrl
->bnad
;
1892 rx_ctrl
->rx_poll_ctr
++;
1894 if (!netif_carrier_ok(bnad
->netdev
))
1897 rcvd
= bnad_cq_process(bnad
, rx_ctrl
->ccb
, budget
);
1902 napi_complete(napi
);
1904 rx_ctrl
->rx_complete
++;
1907 bnad_enable_rx_irq_unsafe(rx_ctrl
->ccb
);
1912 #define BNAD_NAPI_POLL_QUOTA 64
1914 bnad_napi_add(struct bnad
*bnad
, u32 rx_id
)
1916 struct bnad_rx_ctrl
*rx_ctrl
;
1919 /* Initialize & enable NAPI */
1920 for (i
= 0; i
< bnad
->num_rxp_per_rx
; i
++) {
1921 rx_ctrl
= &bnad
->rx_info
[rx_id
].rx_ctrl
[i
];
1922 netif_napi_add(bnad
->netdev
, &rx_ctrl
->napi
,
1923 bnad_napi_poll_rx
, BNAD_NAPI_POLL_QUOTA
);
1928 bnad_napi_delete(struct bnad
*bnad
, u32 rx_id
)
1932 /* First disable and then clean up */
1933 for (i
= 0; i
< bnad
->num_rxp_per_rx
; i
++)
1934 netif_napi_del(&bnad
->rx_info
[rx_id
].rx_ctrl
[i
].napi
);
1937 /* Should be held with conf_lock held */
1939 bnad_destroy_tx(struct bnad
*bnad
, u32 tx_id
)
1941 struct bnad_tx_info
*tx_info
= &bnad
->tx_info
[tx_id
];
1942 struct bna_res_info
*res_info
= &bnad
->tx_res_info
[tx_id
].res_info
[0];
1943 unsigned long flags
;
1948 init_completion(&bnad
->bnad_completions
.tx_comp
);
1949 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1950 bna_tx_disable(tx_info
->tx
, BNA_HARD_CLEANUP
, bnad_cb_tx_disabled
);
1951 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1952 wait_for_completion(&bnad
->bnad_completions
.tx_comp
);
1954 if (tx_info
->tcb
[0]->intr_type
== BNA_INTR_T_MSIX
)
1955 bnad_tx_msix_unregister(bnad
, tx_info
,
1956 bnad
->num_txq_per_tx
);
1958 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1959 bna_tx_destroy(tx_info
->tx
);
1960 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1965 bnad_tx_res_free(bnad
, res_info
);
1968 /* Should be held with conf_lock held */
1970 bnad_setup_tx(struct bnad
*bnad
, u32 tx_id
)
1973 struct bnad_tx_info
*tx_info
= &bnad
->tx_info
[tx_id
];
1974 struct bna_res_info
*res_info
= &bnad
->tx_res_info
[tx_id
].res_info
[0];
1975 struct bna_intr_info
*intr_info
=
1976 &res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_u
.intr_info
;
1977 struct bna_tx_config
*tx_config
= &bnad
->tx_config
[tx_id
];
1978 static const struct bna_tx_event_cbfn tx_cbfn
= {
1979 .tcb_setup_cbfn
= bnad_cb_tcb_setup
,
1980 .tcb_destroy_cbfn
= bnad_cb_tcb_destroy
,
1981 .tx_stall_cbfn
= bnad_cb_tx_stall
,
1982 .tx_resume_cbfn
= bnad_cb_tx_resume
,
1983 .tx_cleanup_cbfn
= bnad_cb_tx_cleanup
,
1987 unsigned long flags
;
1989 tx_info
->tx_id
= tx_id
;
1991 /* Initialize the Tx object configuration */
1992 tx_config
->num_txq
= bnad
->num_txq_per_tx
;
1993 tx_config
->txq_depth
= bnad
->txq_depth
;
1994 tx_config
->tx_type
= BNA_TX_T_REGULAR
;
1995 tx_config
->coalescing_timeo
= bnad
->tx_coalescing_timeo
;
1997 /* Get BNA's resource requirement for one tx object */
1998 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1999 bna_tx_res_req(bnad
->num_txq_per_tx
,
2000 bnad
->txq_depth
, res_info
);
2001 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2003 /* Fill Unmap Q memory requirements */
2004 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info
[BNA_TX_RES_MEM_T_UNMAPQ
],
2005 bnad
->num_txq_per_tx
, (sizeof(struct bnad_tx_unmap
) *
2008 /* Allocate resources */
2009 err
= bnad_tx_res_alloc(bnad
, res_info
, tx_id
);
2013 /* Ask BNA to create one Tx object, supplying required resources */
2014 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2015 tx
= bna_tx_create(&bnad
->bna
, bnad
, tx_config
, &tx_cbfn
, res_info
,
2017 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2024 INIT_DELAYED_WORK(&tx_info
->tx_cleanup_work
,
2025 (work_func_t
)bnad_tx_cleanup
);
2027 /* Register ISR for the Tx object */
2028 if (intr_info
->intr_type
== BNA_INTR_T_MSIX
) {
2029 err
= bnad_tx_msix_register(bnad
, tx_info
,
2030 tx_id
, bnad
->num_txq_per_tx
);
2035 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2037 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2042 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2043 bna_tx_destroy(tx_info
->tx
);
2044 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2048 bnad_tx_res_free(bnad
, res_info
);
2052 /* Setup the rx config for bna_rx_create */
2053 /* bnad decides the configuration */
2055 bnad_init_rx_config(struct bnad
*bnad
, struct bna_rx_config
*rx_config
)
2057 memset(rx_config
, 0, sizeof(*rx_config
));
2058 rx_config
->rx_type
= BNA_RX_T_REGULAR
;
2059 rx_config
->num_paths
= bnad
->num_rxp_per_rx
;
2060 rx_config
->coalescing_timeo
= bnad
->rx_coalescing_timeo
;
2062 if (bnad
->num_rxp_per_rx
> 1) {
2063 rx_config
->rss_status
= BNA_STATUS_T_ENABLED
;
2064 rx_config
->rss_config
.hash_type
=
2065 (BFI_ENET_RSS_IPV6
|
2066 BFI_ENET_RSS_IPV6_TCP
|
2068 BFI_ENET_RSS_IPV4_TCP
);
2069 rx_config
->rss_config
.hash_mask
=
2070 bnad
->num_rxp_per_rx
- 1;
2071 netdev_rss_key_fill(rx_config
->rss_config
.toeplitz_hash_key
,
2072 sizeof(rx_config
->rss_config
.toeplitz_hash_key
));
2074 rx_config
->rss_status
= BNA_STATUS_T_DISABLED
;
2075 memset(&rx_config
->rss_config
, 0,
2076 sizeof(rx_config
->rss_config
));
2079 rx_config
->frame_size
= BNAD_FRAME_SIZE(bnad
->netdev
->mtu
);
2080 rx_config
->q0_multi_buf
= BNA_STATUS_T_DISABLED
;
2082 /* BNA_RXP_SINGLE - one data-buffer queue
2083 * BNA_RXP_SLR - one small-buffer and one large-buffer queues
2084 * BNA_RXP_HDS - one header-buffer and one data-buffer queues
2086 /* TODO: configurable param for queue type */
2087 rx_config
->rxp_type
= BNA_RXP_SLR
;
2089 if (BNAD_PCI_DEV_IS_CAT2(bnad
) &&
2090 rx_config
->frame_size
> 4096) {
2091 /* though size_routing_enable is set in SLR,
2092 * small packets may get routed to same rxq.
2093 * set buf_size to 2048 instead of PAGE_SIZE.
2095 rx_config
->q0_buf_size
= 2048;
2096 /* this should be in multiples of 2 */
2097 rx_config
->q0_num_vecs
= 4;
2098 rx_config
->q0_depth
= bnad
->rxq_depth
* rx_config
->q0_num_vecs
;
2099 rx_config
->q0_multi_buf
= BNA_STATUS_T_ENABLED
;
2101 rx_config
->q0_buf_size
= rx_config
->frame_size
;
2102 rx_config
->q0_num_vecs
= 1;
2103 rx_config
->q0_depth
= bnad
->rxq_depth
;
2106 /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
2107 if (rx_config
->rxp_type
== BNA_RXP_SLR
) {
2108 rx_config
->q1_depth
= bnad
->rxq_depth
;
2109 rx_config
->q1_buf_size
= BFI_SMALL_RXBUF_SIZE
;
2112 rx_config
->vlan_strip_status
=
2113 (bnad
->netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) ?
2114 BNA_STATUS_T_ENABLED
: BNA_STATUS_T_DISABLED
;
2118 bnad_rx_ctrl_init(struct bnad
*bnad
, u32 rx_id
)
2120 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[rx_id
];
2123 for (i
= 0; i
< bnad
->num_rxp_per_rx
; i
++)
2124 rx_info
->rx_ctrl
[i
].bnad
= bnad
;
2127 /* Called with mutex_lock(&bnad->conf_mutex) held */
2129 bnad_reinit_rx(struct bnad
*bnad
)
2131 struct net_device
*netdev
= bnad
->netdev
;
2132 u32 err
= 0, current_err
= 0;
2133 u32 rx_id
= 0, count
= 0;
2134 unsigned long flags
;
2136 /* destroy and create new rx objects */
2137 for (rx_id
= 0; rx_id
< bnad
->num_rx
; rx_id
++) {
2138 if (!bnad
->rx_info
[rx_id
].rx
)
2140 bnad_destroy_rx(bnad
, rx_id
);
2143 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2144 bna_enet_mtu_set(&bnad
->bna
.enet
,
2145 BNAD_FRAME_SIZE(bnad
->netdev
->mtu
), NULL
);
2146 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2148 for (rx_id
= 0; rx_id
< bnad
->num_rx
; rx_id
++) {
2150 current_err
= bnad_setup_rx(bnad
, rx_id
);
2151 if (current_err
&& !err
) {
2153 netdev_err(netdev
, "RXQ:%u setup failed\n", rx_id
);
2157 /* restore rx configuration */
2158 if (bnad
->rx_info
[0].rx
&& !err
) {
2159 bnad_restore_vlans(bnad
, 0);
2160 bnad_enable_default_bcast(bnad
);
2161 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2162 bnad_mac_addr_set_locked(bnad
, netdev
->dev_addr
);
2163 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2164 bnad_set_rx_mode(netdev
);
2170 /* Called with bnad_conf_lock() held */
2172 bnad_destroy_rx(struct bnad
*bnad
, u32 rx_id
)
2174 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[rx_id
];
2175 struct bna_rx_config
*rx_config
= &bnad
->rx_config
[rx_id
];
2176 struct bna_res_info
*res_info
= &bnad
->rx_res_info
[rx_id
].res_info
[0];
2177 unsigned long flags
;
2184 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2185 if (bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
&&
2186 test_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
)) {
2187 clear_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
);
2190 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2192 del_timer_sync(&bnad
->dim_timer
);
2195 init_completion(&bnad
->bnad_completions
.rx_comp
);
2196 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2197 bna_rx_disable(rx_info
->rx
, BNA_HARD_CLEANUP
, bnad_cb_rx_disabled
);
2198 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2199 wait_for_completion(&bnad
->bnad_completions
.rx_comp
);
2201 if (rx_info
->rx_ctrl
[0].ccb
->intr_type
== BNA_INTR_T_MSIX
)
2202 bnad_rx_msix_unregister(bnad
, rx_info
, rx_config
->num_paths
);
2204 bnad_napi_delete(bnad
, rx_id
);
2206 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2207 bna_rx_destroy(rx_info
->rx
);
2211 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2213 bnad_rx_res_free(bnad
, res_info
);
2216 /* Called with mutex_lock(&bnad->conf_mutex) held */
2218 bnad_setup_rx(struct bnad
*bnad
, u32 rx_id
)
2221 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[rx_id
];
2222 struct bna_res_info
*res_info
= &bnad
->rx_res_info
[rx_id
].res_info
[0];
2223 struct bna_intr_info
*intr_info
=
2224 &res_info
[BNA_RX_RES_T_INTR
].res_u
.intr_info
;
2225 struct bna_rx_config
*rx_config
= &bnad
->rx_config
[rx_id
];
2226 static const struct bna_rx_event_cbfn rx_cbfn
= {
2227 .rcb_setup_cbfn
= NULL
,
2228 .rcb_destroy_cbfn
= NULL
,
2229 .ccb_setup_cbfn
= bnad_cb_ccb_setup
,
2230 .ccb_destroy_cbfn
= bnad_cb_ccb_destroy
,
2231 .rx_stall_cbfn
= bnad_cb_rx_stall
,
2232 .rx_cleanup_cbfn
= bnad_cb_rx_cleanup
,
2233 .rx_post_cbfn
= bnad_cb_rx_post
,
2236 unsigned long flags
;
2238 rx_info
->rx_id
= rx_id
;
2240 /* Initialize the Rx object configuration */
2241 bnad_init_rx_config(bnad
, rx_config
);
2243 /* Get BNA's resource requirement for one Rx object */
2244 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2245 bna_rx_res_req(rx_config
, res_info
);
2246 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2248 /* Fill Unmap Q memory requirements */
2249 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info
[BNA_RX_RES_MEM_T_UNMAPDQ
],
2250 rx_config
->num_paths
,
2251 (rx_config
->q0_depth
*
2252 sizeof(struct bnad_rx_unmap
)) +
2253 sizeof(struct bnad_rx_unmap_q
));
2255 if (rx_config
->rxp_type
!= BNA_RXP_SINGLE
) {
2256 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info
[BNA_RX_RES_MEM_T_UNMAPHQ
],
2257 rx_config
->num_paths
,
2258 (rx_config
->q1_depth
*
2259 sizeof(struct bnad_rx_unmap
) +
2260 sizeof(struct bnad_rx_unmap_q
)));
2262 /* Allocate resource */
2263 err
= bnad_rx_res_alloc(bnad
, res_info
, rx_id
);
2267 bnad_rx_ctrl_init(bnad
, rx_id
);
2269 /* Ask BNA to create one Rx object, supplying required resources */
2270 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2271 rx
= bna_rx_create(&bnad
->bna
, bnad
, rx_config
, &rx_cbfn
, res_info
,
2275 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2279 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2281 INIT_WORK(&rx_info
->rx_cleanup_work
,
2282 (work_func_t
)(bnad_rx_cleanup
));
2285 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2286 * so that IRQ handler cannot schedule NAPI at this point.
2288 bnad_napi_add(bnad
, rx_id
);
2290 /* Register ISR for the Rx object */
2291 if (intr_info
->intr_type
== BNA_INTR_T_MSIX
) {
2292 err
= bnad_rx_msix_register(bnad
, rx_info
, rx_id
,
2293 rx_config
->num_paths
);
2298 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2300 /* Set up Dynamic Interrupt Moderation Vector */
2301 if (bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
)
2302 bna_rx_dim_reconfig(&bnad
->bna
, bna_napi_dim_vector
);
2304 /* Enable VLAN filtering only on the default Rx */
2305 bna_rx_vlanfilter_enable(rx
);
2307 /* Start the DIM timer */
2308 bnad_dim_timer_start(bnad
);
2312 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2317 bnad_destroy_rx(bnad
, rx_id
);
2321 /* Called with conf_lock & bnad->bna_lock held */
2323 bnad_tx_coalescing_timeo_set(struct bnad
*bnad
)
2325 struct bnad_tx_info
*tx_info
;
2327 tx_info
= &bnad
->tx_info
[0];
2331 bna_tx_coalescing_timeo_set(tx_info
->tx
, bnad
->tx_coalescing_timeo
);
2334 /* Called with conf_lock & bnad->bna_lock held */
2336 bnad_rx_coalescing_timeo_set(struct bnad
*bnad
)
2338 struct bnad_rx_info
*rx_info
;
2341 for (i
= 0; i
< bnad
->num_rx
; i
++) {
2342 rx_info
= &bnad
->rx_info
[i
];
2345 bna_rx_coalescing_timeo_set(rx_info
->rx
,
2346 bnad
->rx_coalescing_timeo
);
2351 * Called with bnad->bna_lock held
2354 bnad_mac_addr_set_locked(struct bnad
*bnad
, const u8
*mac_addr
)
2358 if (!is_valid_ether_addr(mac_addr
))
2359 return -EADDRNOTAVAIL
;
2361 /* If datapath is down, pretend everything went through */
2362 if (!bnad
->rx_info
[0].rx
)
2365 ret
= bna_rx_ucast_set(bnad
->rx_info
[0].rx
, mac_addr
);
2366 if (ret
!= BNA_CB_SUCCESS
)
2367 return -EADDRNOTAVAIL
;
2372 /* Should be called with conf_lock held */
2374 bnad_enable_default_bcast(struct bnad
*bnad
)
2376 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[0];
2378 unsigned long flags
;
2380 init_completion(&bnad
->bnad_completions
.mcast_comp
);
2382 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2383 ret
= bna_rx_mcast_add(rx_info
->rx
, bnad_bcast_addr
,
2384 bnad_cb_rx_mcast_add
);
2385 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2387 if (ret
== BNA_CB_SUCCESS
)
2388 wait_for_completion(&bnad
->bnad_completions
.mcast_comp
);
2392 if (bnad
->bnad_completions
.mcast_comp_status
!= BNA_CB_SUCCESS
)
2398 /* Called with mutex_lock(&bnad->conf_mutex) held */
2400 bnad_restore_vlans(struct bnad
*bnad
, u32 rx_id
)
2403 unsigned long flags
;
2405 for_each_set_bit(vid
, bnad
->active_vlans
, VLAN_N_VID
) {
2406 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2407 bna_rx_vlan_add(bnad
->rx_info
[rx_id
].rx
, vid
);
2408 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2412 /* Statistics utilities */
2414 bnad_netdev_qstats_fill(struct bnad
*bnad
, struct rtnl_link_stats64
*stats
)
2418 for (i
= 0; i
< bnad
->num_rx
; i
++) {
2419 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
2420 if (bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
) {
2421 stats
->rx_packets
+= bnad
->rx_info
[i
].
2422 rx_ctrl
[j
].ccb
->rcb
[0]->rxq
->rx_packets
;
2423 stats
->rx_bytes
+= bnad
->rx_info
[i
].
2424 rx_ctrl
[j
].ccb
->rcb
[0]->rxq
->rx_bytes
;
2425 if (bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->rcb
[1] &&
2426 bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->
2428 stats
->rx_packets
+=
2429 bnad
->rx_info
[i
].rx_ctrl
[j
].
2430 ccb
->rcb
[1]->rxq
->rx_packets
;
2432 bnad
->rx_info
[i
].rx_ctrl
[j
].
2433 ccb
->rcb
[1]->rxq
->rx_bytes
;
2438 for (i
= 0; i
< bnad
->num_tx
; i
++) {
2439 for (j
= 0; j
< bnad
->num_txq_per_tx
; j
++) {
2440 if (bnad
->tx_info
[i
].tcb
[j
]) {
2441 stats
->tx_packets
+=
2442 bnad
->tx_info
[i
].tcb
[j
]->txq
->tx_packets
;
2444 bnad
->tx_info
[i
].tcb
[j
]->txq
->tx_bytes
;
2451 * Must be called with the bna_lock held.
2454 bnad_netdev_hwstats_fill(struct bnad
*bnad
, struct rtnl_link_stats64
*stats
)
2456 struct bfi_enet_stats_mac
*mac_stats
;
2460 mac_stats
= &bnad
->stats
.bna_stats
->hw_stats
.mac_stats
;
2462 mac_stats
->rx_fcs_error
+ mac_stats
->rx_alignment_error
+
2463 mac_stats
->rx_frame_length_error
+ mac_stats
->rx_code_error
+
2464 mac_stats
->rx_undersize
;
2465 stats
->tx_errors
= mac_stats
->tx_fcs_error
+
2466 mac_stats
->tx_undersize
;
2467 stats
->rx_dropped
= mac_stats
->rx_drop
;
2468 stats
->tx_dropped
= mac_stats
->tx_drop
;
2469 stats
->multicast
= mac_stats
->rx_multicast
;
2470 stats
->collisions
= mac_stats
->tx_total_collision
;
2472 stats
->rx_length_errors
= mac_stats
->rx_frame_length_error
;
2474 /* receive ring buffer overflow ?? */
2476 stats
->rx_crc_errors
= mac_stats
->rx_fcs_error
;
2477 stats
->rx_frame_errors
= mac_stats
->rx_alignment_error
;
2478 /* recv'r fifo overrun */
2479 bmap
= bna_rx_rid_mask(&bnad
->bna
);
2480 for (i
= 0; bmap
; i
++) {
2482 stats
->rx_fifo_errors
+=
2483 bnad
->stats
.bna_stats
->
2484 hw_stats
.rxf_stats
[i
].frame_drops
;
2492 bnad_mbox_irq_sync(struct bnad
*bnad
)
2495 unsigned long flags
;
2497 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2498 if (bnad
->cfg_flags
& BNAD_CF_MSIX
)
2499 irq
= bnad
->msix_table
[BNAD_MAILBOX_MSIX_INDEX
].vector
;
2501 irq
= bnad
->pcidev
->irq
;
2502 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2504 synchronize_irq(irq
);
2507 /* Utility used by bnad_start_xmit, for doing TSO */
2509 bnad_tso_prepare(struct bnad
*bnad
, struct sk_buff
*skb
)
2513 err
= skb_cow_head(skb
, 0);
2515 BNAD_UPDATE_CTR(bnad
, tso_err
);
2520 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2521 * excluding the length field.
2523 if (vlan_get_protocol(skb
) == htons(ETH_P_IP
)) {
2524 struct iphdr
*iph
= ip_hdr(skb
);
2526 /* Do we really need these? */
2530 tcp_hdr(skb
)->check
=
2531 ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
, 0,
2533 BNAD_UPDATE_CTR(bnad
, tso4
);
2535 struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
2537 ipv6h
->payload_len
= 0;
2538 tcp_hdr(skb
)->check
=
2539 ~csum_ipv6_magic(&ipv6h
->saddr
, &ipv6h
->daddr
, 0,
2541 BNAD_UPDATE_CTR(bnad
, tso6
);
2548 * Initialize Q numbers depending on Rx Paths
2549 * Called with bnad->bna_lock held, because of cfg_flags
2553 bnad_q_num_init(struct bnad
*bnad
)
2557 rxps
= min((uint
)num_online_cpus(),
2558 (uint
)(BNAD_MAX_RX
* BNAD_MAX_RXP_PER_RX
));
2560 if (!(bnad
->cfg_flags
& BNAD_CF_MSIX
))
2561 rxps
= 1; /* INTx */
2565 bnad
->num_rxp_per_rx
= rxps
;
2566 bnad
->num_txq_per_tx
= BNAD_TXQ_NUM
;
2570 * Adjusts the Q numbers, given a number of msix vectors
2571 * Give preference to RSS as opposed to Tx priority Queues,
2572 * in such a case, just use 1 Tx Q
2573 * Called with bnad->bna_lock held b'cos of cfg_flags access
2576 bnad_q_num_adjust(struct bnad
*bnad
, int msix_vectors
, int temp
)
2578 bnad
->num_txq_per_tx
= 1;
2579 if ((msix_vectors
>= (bnad
->num_tx
* bnad
->num_txq_per_tx
) +
2580 bnad_rxqs_per_cq
+ BNAD_MAILBOX_MSIX_VECTORS
) &&
2581 (bnad
->cfg_flags
& BNAD_CF_MSIX
)) {
2582 bnad
->num_rxp_per_rx
= msix_vectors
-
2583 (bnad
->num_tx
* bnad
->num_txq_per_tx
) -
2584 BNAD_MAILBOX_MSIX_VECTORS
;
2586 bnad
->num_rxp_per_rx
= 1;
2589 /* Enable / disable ioceth */
2591 bnad_ioceth_disable(struct bnad
*bnad
)
2593 unsigned long flags
;
2596 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2597 init_completion(&bnad
->bnad_completions
.ioc_comp
);
2598 bna_ioceth_disable(&bnad
->bna
.ioceth
, BNA_HARD_CLEANUP
);
2599 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2601 wait_for_completion_timeout(&bnad
->bnad_completions
.ioc_comp
,
2602 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT
));
2604 err
= bnad
->bnad_completions
.ioc_comp_status
;
2609 bnad_ioceth_enable(struct bnad
*bnad
)
2612 unsigned long flags
;
2614 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2615 init_completion(&bnad
->bnad_completions
.ioc_comp
);
2616 bnad
->bnad_completions
.ioc_comp_status
= BNA_CB_WAITING
;
2617 bna_ioceth_enable(&bnad
->bna
.ioceth
);
2618 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2620 wait_for_completion_timeout(&bnad
->bnad_completions
.ioc_comp
,
2621 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT
));
2623 err
= bnad
->bnad_completions
.ioc_comp_status
;
2628 /* Free BNA resources */
2630 bnad_res_free(struct bnad
*bnad
, struct bna_res_info
*res_info
,
2635 for (i
= 0; i
< res_val_max
; i
++)
2636 bnad_mem_free(bnad
, &res_info
[i
].res_u
.mem_info
);
2639 /* Allocates memory and interrupt resources for BNA */
2641 bnad_res_alloc(struct bnad
*bnad
, struct bna_res_info
*res_info
,
2646 for (i
= 0; i
< res_val_max
; i
++) {
2647 err
= bnad_mem_alloc(bnad
, &res_info
[i
].res_u
.mem_info
);
2654 bnad_res_free(bnad
, res_info
, res_val_max
);
2658 /* Interrupt enable / disable */
2660 bnad_enable_msix(struct bnad
*bnad
)
2663 unsigned long flags
;
2665 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2666 if (!(bnad
->cfg_flags
& BNAD_CF_MSIX
)) {
2667 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2670 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2672 if (bnad
->msix_table
)
2676 kcalloc(bnad
->msix_num
, sizeof(struct msix_entry
), GFP_KERNEL
);
2678 if (!bnad
->msix_table
)
2681 for (i
= 0; i
< bnad
->msix_num
; i
++)
2682 bnad
->msix_table
[i
].entry
= i
;
2684 ret
= pci_enable_msix_range(bnad
->pcidev
, bnad
->msix_table
,
2688 } else if (ret
< bnad
->msix_num
) {
2689 dev_warn(&bnad
->pcidev
->dev
,
2690 "%d MSI-X vectors allocated < %d requested\n",
2691 ret
, bnad
->msix_num
);
2693 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2694 /* ret = #of vectors that we got */
2695 bnad_q_num_adjust(bnad
, (ret
- BNAD_MAILBOX_MSIX_VECTORS
) / 2,
2696 (ret
- BNAD_MAILBOX_MSIX_VECTORS
) / 2);
2697 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2699 bnad
->msix_num
= BNAD_NUM_TXQ
+ BNAD_NUM_RXP
+
2700 BNAD_MAILBOX_MSIX_VECTORS
;
2702 if (bnad
->msix_num
> ret
) {
2703 pci_disable_msix(bnad
->pcidev
);
2708 pci_intx(bnad
->pcidev
, 0);
2713 dev_warn(&bnad
->pcidev
->dev
,
2714 "MSI-X enable failed - operating in INTx mode\n");
2716 kfree(bnad
->msix_table
);
2717 bnad
->msix_table
= NULL
;
2719 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2720 bnad
->cfg_flags
&= ~BNAD_CF_MSIX
;
2721 bnad_q_num_init(bnad
);
2722 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2726 bnad_disable_msix(struct bnad
*bnad
)
2729 unsigned long flags
;
2731 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2732 cfg_flags
= bnad
->cfg_flags
;
2733 if (bnad
->cfg_flags
& BNAD_CF_MSIX
)
2734 bnad
->cfg_flags
&= ~BNAD_CF_MSIX
;
2735 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2737 if (cfg_flags
& BNAD_CF_MSIX
) {
2738 pci_disable_msix(bnad
->pcidev
);
2739 kfree(bnad
->msix_table
);
2740 bnad
->msix_table
= NULL
;
2744 /* Netdev entry points */
2746 bnad_open(struct net_device
*netdev
)
2749 struct bnad
*bnad
= netdev_priv(netdev
);
2750 struct bna_pause_config pause_config
;
2751 unsigned long flags
;
2753 mutex_lock(&bnad
->conf_mutex
);
2756 err
= bnad_setup_tx(bnad
, 0);
2761 err
= bnad_setup_rx(bnad
, 0);
2766 pause_config
.tx_pause
= 0;
2767 pause_config
.rx_pause
= 0;
2769 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2770 bna_enet_mtu_set(&bnad
->bna
.enet
,
2771 BNAD_FRAME_SIZE(bnad
->netdev
->mtu
), NULL
);
2772 bna_enet_pause_config(&bnad
->bna
.enet
, &pause_config
);
2773 bna_enet_enable(&bnad
->bna
.enet
);
2774 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2776 /* Enable broadcast */
2777 bnad_enable_default_bcast(bnad
);
2779 /* Restore VLANs, if any */
2780 bnad_restore_vlans(bnad
, 0);
2782 /* Set the UCAST address */
2783 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2784 bnad_mac_addr_set_locked(bnad
, netdev
->dev_addr
);
2785 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2787 /* Start the stats timer */
2788 bnad_stats_timer_start(bnad
);
2790 mutex_unlock(&bnad
->conf_mutex
);
2795 bnad_destroy_tx(bnad
, 0);
2798 mutex_unlock(&bnad
->conf_mutex
);
2803 bnad_stop(struct net_device
*netdev
)
2805 struct bnad
*bnad
= netdev_priv(netdev
);
2806 unsigned long flags
;
2808 mutex_lock(&bnad
->conf_mutex
);
2810 /* Stop the stats timer */
2811 bnad_stats_timer_stop(bnad
);
2813 init_completion(&bnad
->bnad_completions
.enet_comp
);
2815 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2816 bna_enet_disable(&bnad
->bna
.enet
, BNA_HARD_CLEANUP
,
2817 bnad_cb_enet_disabled
);
2818 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2820 wait_for_completion(&bnad
->bnad_completions
.enet_comp
);
2822 bnad_destroy_tx(bnad
, 0);
2823 bnad_destroy_rx(bnad
, 0);
2825 /* Synchronize mailbox IRQ */
2826 bnad_mbox_irq_sync(bnad
);
2828 mutex_unlock(&bnad
->conf_mutex
);
2834 /* Returns 0 for success */
2836 bnad_txq_wi_prepare(struct bnad
*bnad
, struct bna_tcb
*tcb
,
2837 struct sk_buff
*skb
, struct bna_txq_entry
*txqent
)
2843 if (skb_vlan_tag_present(skb
)) {
2844 vlan_tag
= (u16
)skb_vlan_tag_get(skb
);
2845 flags
|= (BNA_TXQ_WI_CF_INS_PRIO
| BNA_TXQ_WI_CF_INS_VLAN
);
2847 if (test_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
)) {
2848 vlan_tag
= ((tcb
->priority
& 0x7) << VLAN_PRIO_SHIFT
)
2849 | (vlan_tag
& 0x1fff);
2850 flags
|= (BNA_TXQ_WI_CF_INS_PRIO
| BNA_TXQ_WI_CF_INS_VLAN
);
2852 txqent
->hdr
.wi
.vlan_tag
= htons(vlan_tag
);
2854 if (skb_is_gso(skb
)) {
2855 gso_size
= skb_shinfo(skb
)->gso_size
;
2856 if (unlikely(gso_size
> bnad
->netdev
->mtu
)) {
2857 BNAD_UPDATE_CTR(bnad
, tx_skb_mss_too_long
);
2860 if (unlikely((gso_size
+ skb_transport_offset(skb
) +
2861 tcp_hdrlen(skb
)) >= skb
->len
)) {
2862 txqent
->hdr
.wi
.opcode
= htons(BNA_TXQ_WI_SEND
);
2863 txqent
->hdr
.wi
.lso_mss
= 0;
2864 BNAD_UPDATE_CTR(bnad
, tx_skb_tso_too_short
);
2866 txqent
->hdr
.wi
.opcode
= htons(BNA_TXQ_WI_SEND_LSO
);
2867 txqent
->hdr
.wi
.lso_mss
= htons(gso_size
);
2870 if (bnad_tso_prepare(bnad
, skb
)) {
2871 BNAD_UPDATE_CTR(bnad
, tx_skb_tso_prepare
);
2875 flags
|= (BNA_TXQ_WI_CF_IP_CKSUM
| BNA_TXQ_WI_CF_TCP_CKSUM
);
2876 txqent
->hdr
.wi
.l4_hdr_size_n_offset
=
2877 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2878 tcp_hdrlen(skb
) >> 2, skb_transport_offset(skb
)));
2880 txqent
->hdr
.wi
.opcode
= htons(BNA_TXQ_WI_SEND
);
2881 txqent
->hdr
.wi
.lso_mss
= 0;
2883 if (unlikely(skb
->len
> (bnad
->netdev
->mtu
+ VLAN_ETH_HLEN
))) {
2884 BNAD_UPDATE_CTR(bnad
, tx_skb_non_tso_too_long
);
2888 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2889 __be16 net_proto
= vlan_get_protocol(skb
);
2892 if (net_proto
== htons(ETH_P_IP
))
2893 proto
= ip_hdr(skb
)->protocol
;
2894 #ifdef NETIF_F_IPV6_CSUM
2895 else if (net_proto
== htons(ETH_P_IPV6
)) {
2896 /* nexthdr may not be TCP immediately. */
2897 proto
= ipv6_hdr(skb
)->nexthdr
;
2900 if (proto
== IPPROTO_TCP
) {
2901 flags
|= BNA_TXQ_WI_CF_TCP_CKSUM
;
2902 txqent
->hdr
.wi
.l4_hdr_size_n_offset
=
2903 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2904 (0, skb_transport_offset(skb
)));
2906 BNAD_UPDATE_CTR(bnad
, tcpcsum_offload
);
2908 if (unlikely(skb_headlen(skb
) <
2909 skb_transport_offset(skb
) +
2911 BNAD_UPDATE_CTR(bnad
, tx_skb_tcp_hdr
);
2914 } else if (proto
== IPPROTO_UDP
) {
2915 flags
|= BNA_TXQ_WI_CF_UDP_CKSUM
;
2916 txqent
->hdr
.wi
.l4_hdr_size_n_offset
=
2917 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2918 (0, skb_transport_offset(skb
)));
2920 BNAD_UPDATE_CTR(bnad
, udpcsum_offload
);
2921 if (unlikely(skb_headlen(skb
) <
2922 skb_transport_offset(skb
) +
2923 sizeof(struct udphdr
))) {
2924 BNAD_UPDATE_CTR(bnad
, tx_skb_udp_hdr
);
2929 BNAD_UPDATE_CTR(bnad
, tx_skb_csum_err
);
2933 txqent
->hdr
.wi
.l4_hdr_size_n_offset
= 0;
2936 txqent
->hdr
.wi
.flags
= htons(flags
);
2937 txqent
->hdr
.wi
.frame_length
= htonl(skb
->len
);
2943 * bnad_start_xmit : Netdev entry point for Transmit
2944 * Called under lock held by net_device
2947 bnad_start_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
2949 struct bnad
*bnad
= netdev_priv(netdev
);
2951 struct bna_tcb
*tcb
= NULL
;
2952 struct bnad_tx_unmap
*unmap_q
, *unmap
, *head_unmap
;
2953 u32 prod
, q_depth
, vect_id
;
2954 u32 wis
, vectors
, len
;
2956 dma_addr_t dma_addr
;
2957 struct bna_txq_entry
*txqent
;
2959 len
= skb_headlen(skb
);
2961 /* Sanity checks for the skb */
2963 if (unlikely(skb
->len
<= ETH_HLEN
)) {
2964 dev_kfree_skb_any(skb
);
2965 BNAD_UPDATE_CTR(bnad
, tx_skb_too_short
);
2966 return NETDEV_TX_OK
;
2968 if (unlikely(len
> BFI_TX_MAX_DATA_PER_VECTOR
)) {
2969 dev_kfree_skb_any(skb
);
2970 BNAD_UPDATE_CTR(bnad
, tx_skb_headlen_zero
);
2971 return NETDEV_TX_OK
;
2973 if (unlikely(len
== 0)) {
2974 dev_kfree_skb_any(skb
);
2975 BNAD_UPDATE_CTR(bnad
, tx_skb_headlen_zero
);
2976 return NETDEV_TX_OK
;
2979 tcb
= bnad
->tx_info
[0].tcb
[txq_id
];
2982 * Takes care of the Tx that is scheduled between clearing the flag
2983 * and the netif_tx_stop_all_queues() call.
2985 if (unlikely(!tcb
|| !test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
))) {
2986 dev_kfree_skb_any(skb
);
2987 BNAD_UPDATE_CTR(bnad
, tx_skb_stopping
);
2988 return NETDEV_TX_OK
;
2991 q_depth
= tcb
->q_depth
;
2992 prod
= tcb
->producer_index
;
2993 unmap_q
= tcb
->unmap_q
;
2995 vectors
= 1 + skb_shinfo(skb
)->nr_frags
;
2996 wis
= BNA_TXQ_WI_NEEDED(vectors
); /* 4 vectors per work item */
2998 if (unlikely(vectors
> BFI_TX_MAX_VECTORS_PER_PKT
)) {
2999 dev_kfree_skb_any(skb
);
3000 BNAD_UPDATE_CTR(bnad
, tx_skb_max_vectors
);
3001 return NETDEV_TX_OK
;
3004 /* Check for available TxQ resources */
3005 if (unlikely(wis
> BNA_QE_FREE_CNT(tcb
, q_depth
))) {
3006 if ((*tcb
->hw_consumer_index
!= tcb
->consumer_index
) &&
3007 !test_and_set_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
)) {
3009 sent
= bnad_txcmpl_process(bnad
, tcb
);
3010 if (likely(test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)))
3011 bna_ib_ack(tcb
->i_dbell
, sent
);
3012 smp_mb__before_atomic();
3013 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
3015 netif_stop_queue(netdev
);
3016 BNAD_UPDATE_CTR(bnad
, netif_queue_stop
);
3021 * Check again to deal with race condition between
3022 * netif_stop_queue here, and netif_wake_queue in
3023 * interrupt handler which is not inside netif tx lock.
3025 if (likely(wis
> BNA_QE_FREE_CNT(tcb
, q_depth
))) {
3026 BNAD_UPDATE_CTR(bnad
, netif_queue_stop
);
3027 return NETDEV_TX_BUSY
;
3029 netif_wake_queue(netdev
);
3030 BNAD_UPDATE_CTR(bnad
, netif_queue_wakeup
);
3034 txqent
= &((struct bna_txq_entry
*)tcb
->sw_q
)[prod
];
3035 head_unmap
= &unmap_q
[prod
];
3037 /* Program the opcode, flags, frame_len, num_vectors in WI */
3038 if (bnad_txq_wi_prepare(bnad
, tcb
, skb
, txqent
)) {
3039 dev_kfree_skb_any(skb
);
3040 return NETDEV_TX_OK
;
3042 txqent
->hdr
.wi
.reserved
= 0;
3043 txqent
->hdr
.wi
.num_vectors
= vectors
;
3045 head_unmap
->skb
= skb
;
3046 head_unmap
->nvecs
= 0;
3048 /* Program the vectors */
3050 dma_addr
= dma_map_single(&bnad
->pcidev
->dev
, skb
->data
,
3051 len
, DMA_TO_DEVICE
);
3052 if (dma_mapping_error(&bnad
->pcidev
->dev
, dma_addr
)) {
3053 dev_kfree_skb_any(skb
);
3054 BNAD_UPDATE_CTR(bnad
, tx_skb_map_failed
);
3055 return NETDEV_TX_OK
;
3057 BNA_SET_DMA_ADDR(dma_addr
, &txqent
->vector
[0].host_addr
);
3058 txqent
->vector
[0].length
= htons(len
);
3059 dma_unmap_addr_set(&unmap
->vectors
[0], dma_addr
, dma_addr
);
3060 head_unmap
->nvecs
++;
3062 for (i
= 0, vect_id
= 0; i
< vectors
- 1; i
++) {
3063 const struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
3064 u32 size
= skb_frag_size(frag
);
3066 if (unlikely(size
== 0)) {
3067 /* Undo the changes starting at tcb->producer_index */
3068 bnad_tx_buff_unmap(bnad
, unmap_q
, q_depth
,
3069 tcb
->producer_index
);
3070 dev_kfree_skb_any(skb
);
3071 BNAD_UPDATE_CTR(bnad
, tx_skb_frag_zero
);
3072 return NETDEV_TX_OK
;
3078 if (vect_id
== BFI_TX_MAX_VECTORS_PER_WI
) {
3080 BNA_QE_INDX_INC(prod
, q_depth
);
3081 txqent
= &((struct bna_txq_entry
*)tcb
->sw_q
)[prod
];
3082 txqent
->hdr
.wi_ext
.opcode
= htons(BNA_TXQ_WI_EXTENSION
);
3083 unmap
= &unmap_q
[prod
];
3086 dma_addr
= skb_frag_dma_map(&bnad
->pcidev
->dev
, frag
,
3087 0, size
, DMA_TO_DEVICE
);
3088 if (dma_mapping_error(&bnad
->pcidev
->dev
, dma_addr
)) {
3089 /* Undo the changes starting at tcb->producer_index */
3090 bnad_tx_buff_unmap(bnad
, unmap_q
, q_depth
,
3091 tcb
->producer_index
);
3092 dev_kfree_skb_any(skb
);
3093 BNAD_UPDATE_CTR(bnad
, tx_skb_map_failed
);
3094 return NETDEV_TX_OK
;
3097 dma_unmap_len_set(&unmap
->vectors
[vect_id
], dma_len
, size
);
3098 BNA_SET_DMA_ADDR(dma_addr
, &txqent
->vector
[vect_id
].host_addr
);
3099 txqent
->vector
[vect_id
].length
= htons(size
);
3100 dma_unmap_addr_set(&unmap
->vectors
[vect_id
], dma_addr
,
3102 head_unmap
->nvecs
++;
3105 if (unlikely(len
!= skb
->len
)) {
3106 /* Undo the changes starting at tcb->producer_index */
3107 bnad_tx_buff_unmap(bnad
, unmap_q
, q_depth
, tcb
->producer_index
);
3108 dev_kfree_skb_any(skb
);
3109 BNAD_UPDATE_CTR(bnad
, tx_skb_len_mismatch
);
3110 return NETDEV_TX_OK
;
3113 BNA_QE_INDX_INC(prod
, q_depth
);
3114 tcb
->producer_index
= prod
;
3118 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)))
3119 return NETDEV_TX_OK
;
3121 skb_tx_timestamp(skb
);
3123 bna_txq_prod_indx_doorbell(tcb
);
3126 return NETDEV_TX_OK
;
3130 * Used spin_lock to synchronize reading of stats structures, which
3131 * is written by BNA under the same lock.
3133 static struct rtnl_link_stats64
*
3134 bnad_get_stats64(struct net_device
*netdev
, struct rtnl_link_stats64
*stats
)
3136 struct bnad
*bnad
= netdev_priv(netdev
);
3137 unsigned long flags
;
3139 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3141 bnad_netdev_qstats_fill(bnad
, stats
);
3142 bnad_netdev_hwstats_fill(bnad
, stats
);
3144 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3150 bnad_set_rx_ucast_fltr(struct bnad
*bnad
)
3152 struct net_device
*netdev
= bnad
->netdev
;
3153 int uc_count
= netdev_uc_count(netdev
);
3154 enum bna_cb_status ret
;
3156 struct netdev_hw_addr
*ha
;
3159 if (netdev_uc_empty(bnad
->netdev
)) {
3160 bna_rx_ucast_listset(bnad
->rx_info
[0].rx
, 0, NULL
);
3164 if (uc_count
> bna_attr(&bnad
->bna
)->num_ucmac
)
3167 mac_list
= kzalloc(uc_count
* ETH_ALEN
, GFP_ATOMIC
);
3168 if (mac_list
== NULL
)
3172 netdev_for_each_uc_addr(ha
, netdev
) {
3173 ether_addr_copy(&mac_list
[entry
* ETH_ALEN
], &ha
->addr
[0]);
3177 ret
= bna_rx_ucast_listset(bnad
->rx_info
[0].rx
, entry
, mac_list
);
3180 if (ret
!= BNA_CB_SUCCESS
)
3185 /* ucast packets not in UCAM are routed to default function */
3187 bnad
->cfg_flags
|= BNAD_CF_DEFAULT
;
3188 bna_rx_ucast_listset(bnad
->rx_info
[0].rx
, 0, NULL
);
3192 bnad_set_rx_mcast_fltr(struct bnad
*bnad
)
3194 struct net_device
*netdev
= bnad
->netdev
;
3195 int mc_count
= netdev_mc_count(netdev
);
3196 enum bna_cb_status ret
;
3199 if (netdev
->flags
& IFF_ALLMULTI
)
3202 if (netdev_mc_empty(netdev
))
3205 if (mc_count
> bna_attr(&bnad
->bna
)->num_mcmac
)
3208 mac_list
= kzalloc((mc_count
+ 1) * ETH_ALEN
, GFP_ATOMIC
);
3210 if (mac_list
== NULL
)
3213 ether_addr_copy(&mac_list
[0], &bnad_bcast_addr
[0]);
3215 /* copy rest of the MCAST addresses */
3216 bnad_netdev_mc_list_get(netdev
, mac_list
);
3217 ret
= bna_rx_mcast_listset(bnad
->rx_info
[0].rx
, mc_count
+ 1, mac_list
);
3220 if (ret
!= BNA_CB_SUCCESS
)
3226 bnad
->cfg_flags
|= BNAD_CF_ALLMULTI
;
3227 bna_rx_mcast_delall(bnad
->rx_info
[0].rx
);
3231 bnad_set_rx_mode(struct net_device
*netdev
)
3233 struct bnad
*bnad
= netdev_priv(netdev
);
3234 enum bna_rxmode new_mode
, mode_mask
;
3235 unsigned long flags
;
3237 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3239 if (bnad
->rx_info
[0].rx
== NULL
) {
3240 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3244 /* clear bnad flags to update it with new settings */
3245 bnad
->cfg_flags
&= ~(BNAD_CF_PROMISC
| BNAD_CF_DEFAULT
|
3249 if (netdev
->flags
& IFF_PROMISC
) {
3250 new_mode
|= BNAD_RXMODE_PROMISC_DEFAULT
;
3251 bnad
->cfg_flags
|= BNAD_CF_PROMISC
;
3253 bnad_set_rx_mcast_fltr(bnad
);
3255 if (bnad
->cfg_flags
& BNAD_CF_ALLMULTI
)
3256 new_mode
|= BNA_RXMODE_ALLMULTI
;
3258 bnad_set_rx_ucast_fltr(bnad
);
3260 if (bnad
->cfg_flags
& BNAD_CF_DEFAULT
)
3261 new_mode
|= BNA_RXMODE_DEFAULT
;
3264 mode_mask
= BNA_RXMODE_PROMISC
| BNA_RXMODE_DEFAULT
|
3265 BNA_RXMODE_ALLMULTI
;
3266 bna_rx_mode_set(bnad
->rx_info
[0].rx
, new_mode
, mode_mask
);
3268 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3272 * bna_lock is used to sync writes to netdev->addr
3273 * conf_lock cannot be used since this call may be made
3274 * in a non-blocking context.
3277 bnad_set_mac_address(struct net_device
*netdev
, void *addr
)
3280 struct bnad
*bnad
= netdev_priv(netdev
);
3281 struct sockaddr
*sa
= (struct sockaddr
*)addr
;
3282 unsigned long flags
;
3284 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3286 err
= bnad_mac_addr_set_locked(bnad
, sa
->sa_data
);
3288 ether_addr_copy(netdev
->dev_addr
, sa
->sa_data
);
3290 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3296 bnad_mtu_set(struct bnad
*bnad
, int frame_size
)
3298 unsigned long flags
;
3300 init_completion(&bnad
->bnad_completions
.mtu_comp
);
3302 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3303 bna_enet_mtu_set(&bnad
->bna
.enet
, frame_size
, bnad_cb_enet_mtu_set
);
3304 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3306 wait_for_completion(&bnad
->bnad_completions
.mtu_comp
);
3308 return bnad
->bnad_completions
.mtu_comp_status
;
3312 bnad_change_mtu(struct net_device
*netdev
, int new_mtu
)
3315 struct bnad
*bnad
= netdev_priv(netdev
);
3316 u32 rx_count
= 0, frame
, new_frame
;
3318 if (new_mtu
+ ETH_HLEN
< ETH_ZLEN
|| new_mtu
> BNAD_JUMBO_MTU
)
3321 mutex_lock(&bnad
->conf_mutex
);
3324 netdev
->mtu
= new_mtu
;
3326 frame
= BNAD_FRAME_SIZE(mtu
);
3327 new_frame
= BNAD_FRAME_SIZE(new_mtu
);
3329 /* check if multi-buffer needs to be enabled */
3330 if (BNAD_PCI_DEV_IS_CAT2(bnad
) &&
3331 netif_running(bnad
->netdev
)) {
3332 /* only when transition is over 4K */
3333 if ((frame
<= 4096 && new_frame
> 4096) ||
3334 (frame
> 4096 && new_frame
<= 4096))
3335 rx_count
= bnad_reinit_rx(bnad
);
3338 /* rx_count > 0 - new rx created
3339 * - Linux set err = 0 and return
3341 err
= bnad_mtu_set(bnad
, new_frame
);
3345 mutex_unlock(&bnad
->conf_mutex
);
3350 bnad_vlan_rx_add_vid(struct net_device
*netdev
, __be16 proto
, u16 vid
)
3352 struct bnad
*bnad
= netdev_priv(netdev
);
3353 unsigned long flags
;
3355 if (!bnad
->rx_info
[0].rx
)
3358 mutex_lock(&bnad
->conf_mutex
);
3360 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3361 bna_rx_vlan_add(bnad
->rx_info
[0].rx
, vid
);
3362 set_bit(vid
, bnad
->active_vlans
);
3363 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3365 mutex_unlock(&bnad
->conf_mutex
);
3371 bnad_vlan_rx_kill_vid(struct net_device
*netdev
, __be16 proto
, u16 vid
)
3373 struct bnad
*bnad
= netdev_priv(netdev
);
3374 unsigned long flags
;
3376 if (!bnad
->rx_info
[0].rx
)
3379 mutex_lock(&bnad
->conf_mutex
);
3381 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3382 clear_bit(vid
, bnad
->active_vlans
);
3383 bna_rx_vlan_del(bnad
->rx_info
[0].rx
, vid
);
3384 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3386 mutex_unlock(&bnad
->conf_mutex
);
3391 static int bnad_set_features(struct net_device
*dev
, netdev_features_t features
)
3393 struct bnad
*bnad
= netdev_priv(dev
);
3394 netdev_features_t changed
= features
^ dev
->features
;
3396 if ((changed
& NETIF_F_HW_VLAN_CTAG_RX
) && netif_running(dev
)) {
3397 unsigned long flags
;
3399 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3401 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
3402 bna_rx_vlan_strip_enable(bnad
->rx_info
[0].rx
);
3404 bna_rx_vlan_strip_disable(bnad
->rx_info
[0].rx
);
3406 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3412 #ifdef CONFIG_NET_POLL_CONTROLLER
3414 bnad_netpoll(struct net_device
*netdev
)
3416 struct bnad
*bnad
= netdev_priv(netdev
);
3417 struct bnad_rx_info
*rx_info
;
3418 struct bnad_rx_ctrl
*rx_ctrl
;
3422 if (!(bnad
->cfg_flags
& BNAD_CF_MSIX
)) {
3423 bna_intx_disable(&bnad
->bna
, curr_mask
);
3424 bnad_isr(bnad
->pcidev
->irq
, netdev
);
3425 bna_intx_enable(&bnad
->bna
, curr_mask
);
3428 * Tx processing may happen in sending context, so no need
3429 * to explicitly process completions here
3433 for (i
= 0; i
< bnad
->num_rx
; i
++) {
3434 rx_info
= &bnad
->rx_info
[i
];
3437 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
3438 rx_ctrl
= &rx_info
->rx_ctrl
[j
];
3440 bnad_netif_rx_schedule_poll(bnad
,
3448 static const struct net_device_ops bnad_netdev_ops
= {
3449 .ndo_open
= bnad_open
,
3450 .ndo_stop
= bnad_stop
,
3451 .ndo_start_xmit
= bnad_start_xmit
,
3452 .ndo_get_stats64
= bnad_get_stats64
,
3453 .ndo_set_rx_mode
= bnad_set_rx_mode
,
3454 .ndo_validate_addr
= eth_validate_addr
,
3455 .ndo_set_mac_address
= bnad_set_mac_address
,
3456 .ndo_change_mtu
= bnad_change_mtu
,
3457 .ndo_vlan_rx_add_vid
= bnad_vlan_rx_add_vid
,
3458 .ndo_vlan_rx_kill_vid
= bnad_vlan_rx_kill_vid
,
3459 .ndo_set_features
= bnad_set_features
,
3460 #ifdef CONFIG_NET_POLL_CONTROLLER
3461 .ndo_poll_controller
= bnad_netpoll
3466 bnad_netdev_init(struct bnad
*bnad
, bool using_dac
)
3468 struct net_device
*netdev
= bnad
->netdev
;
3470 netdev
->hw_features
= NETIF_F_SG
| NETIF_F_RXCSUM
|
3471 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
3472 NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_HW_VLAN_CTAG_TX
|
3473 NETIF_F_HW_VLAN_CTAG_RX
;
3475 netdev
->vlan_features
= NETIF_F_SG
| NETIF_F_HIGHDMA
|
3476 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
3477 NETIF_F_TSO
| NETIF_F_TSO6
;
3479 netdev
->features
|= netdev
->hw_features
| NETIF_F_HW_VLAN_CTAG_FILTER
;
3482 netdev
->features
|= NETIF_F_HIGHDMA
;
3484 netdev
->mem_start
= bnad
->mmio_start
;
3485 netdev
->mem_end
= bnad
->mmio_start
+ bnad
->mmio_len
- 1;
3487 netdev
->netdev_ops
= &bnad_netdev_ops
;
3488 bnad_set_ethtool_ops(netdev
);
3492 * 1. Initialize the bnad structure
3493 * 2. Setup netdev pointer in pci_dev
3494 * 3. Initialize no. of TxQ & CQs & MSIX vectors
3495 * 4. Initialize work queue.
3498 bnad_init(struct bnad
*bnad
,
3499 struct pci_dev
*pdev
, struct net_device
*netdev
)
3501 unsigned long flags
;
3503 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3504 pci_set_drvdata(pdev
, netdev
);
3506 bnad
->netdev
= netdev
;
3507 bnad
->pcidev
= pdev
;
3508 bnad
->mmio_start
= pci_resource_start(pdev
, 0);
3509 bnad
->mmio_len
= pci_resource_len(pdev
, 0);
3510 bnad
->bar0
= ioremap_nocache(bnad
->mmio_start
, bnad
->mmio_len
);
3512 dev_err(&pdev
->dev
, "ioremap for bar0 failed\n");
3515 dev_info(&pdev
->dev
, "bar0 mapped to %p, len %llu\n", bnad
->bar0
,
3516 (unsigned long long) bnad
->mmio_len
);
3518 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3519 if (!bnad_msix_disable
)
3520 bnad
->cfg_flags
= BNAD_CF_MSIX
;
3522 bnad
->cfg_flags
|= BNAD_CF_DIM_ENABLED
;
3524 bnad_q_num_init(bnad
);
3525 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3527 bnad
->msix_num
= (bnad
->num_tx
* bnad
->num_txq_per_tx
) +
3528 (bnad
->num_rx
* bnad
->num_rxp_per_rx
) +
3529 BNAD_MAILBOX_MSIX_VECTORS
;
3531 bnad
->txq_depth
= BNAD_TXQ_DEPTH
;
3532 bnad
->rxq_depth
= BNAD_RXQ_DEPTH
;
3534 bnad
->tx_coalescing_timeo
= BFI_TX_COALESCING_TIMEO
;
3535 bnad
->rx_coalescing_timeo
= BFI_RX_COALESCING_TIMEO
;
3537 sprintf(bnad
->wq_name
, "%s_wq_%d", BNAD_NAME
, bnad
->id
);
3538 bnad
->work_q
= create_singlethread_workqueue(bnad
->wq_name
);
3539 if (!bnad
->work_q
) {
3540 iounmap(bnad
->bar0
);
3548 * Must be called after bnad_pci_uninit()
3549 * so that iounmap() and pci_set_drvdata(NULL)
3550 * happens only after PCI uninitialization.
3553 bnad_uninit(struct bnad
*bnad
)
3556 flush_workqueue(bnad
->work_q
);
3557 destroy_workqueue(bnad
->work_q
);
3558 bnad
->work_q
= NULL
;
3562 iounmap(bnad
->bar0
);
3567 a) Per ioceth mutes used for serializing configuration
3568 changes from OS interface
3569 b) spin lock used to protect bna state machine
3572 bnad_lock_init(struct bnad
*bnad
)
3574 spin_lock_init(&bnad
->bna_lock
);
3575 mutex_init(&bnad
->conf_mutex
);
3576 mutex_init(&bnad_list_mutex
);
3580 bnad_lock_uninit(struct bnad
*bnad
)
3582 mutex_destroy(&bnad
->conf_mutex
);
3583 mutex_destroy(&bnad_list_mutex
);
3586 /* PCI Initialization */
3588 bnad_pci_init(struct bnad
*bnad
,
3589 struct pci_dev
*pdev
, bool *using_dac
)
3593 err
= pci_enable_device(pdev
);
3596 err
= pci_request_regions(pdev
, BNAD_NAME
);
3598 goto disable_device
;
3599 if (!dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64))) {
3602 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
3604 goto release_regions
;
3607 pci_set_master(pdev
);
3611 pci_release_regions(pdev
);
3613 pci_disable_device(pdev
);
3619 bnad_pci_uninit(struct pci_dev
*pdev
)
3621 pci_release_regions(pdev
);
3622 pci_disable_device(pdev
);
3626 bnad_pci_probe(struct pci_dev
*pdev
,
3627 const struct pci_device_id
*pcidev_id
)
3633 struct net_device
*netdev
;
3634 struct bfa_pcidev pcidev_info
;
3635 unsigned long flags
;
3637 mutex_lock(&bnad_fwimg_mutex
);
3638 if (!cna_get_firmware_buf(pdev
)) {
3639 mutex_unlock(&bnad_fwimg_mutex
);
3640 dev_err(&pdev
->dev
, "failed to load firmware image!\n");
3643 mutex_unlock(&bnad_fwimg_mutex
);
3646 * Allocates sizeof(struct net_device + struct bnad)
3647 * bnad = netdev->priv
3649 netdev
= alloc_etherdev(sizeof(struct bnad
));
3654 bnad
= netdev_priv(netdev
);
3655 bnad_lock_init(bnad
);
3656 bnad_add_to_list(bnad
);
3658 mutex_lock(&bnad
->conf_mutex
);
3660 * PCI initialization
3661 * Output : using_dac = 1 for 64 bit DMA
3662 * = 0 for 32 bit DMA
3665 err
= bnad_pci_init(bnad
, pdev
, &using_dac
);
3670 * Initialize bnad structure
3671 * Setup relation between pci_dev & netdev
3673 err
= bnad_init(bnad
, pdev
, netdev
);
3677 /* Initialize netdev structure, set up ethtool ops */
3678 bnad_netdev_init(bnad
, using_dac
);
3680 /* Set link to down state */
3681 netif_carrier_off(netdev
);
3683 /* Setup the debugfs node for this bfad */
3684 if (bna_debugfs_enable
)
3685 bnad_debugfs_init(bnad
);
3687 /* Get resource requirement form bna */
3688 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3689 bna_res_req(&bnad
->res_info
[0]);
3690 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3692 /* Allocate resources from bna */
3693 err
= bnad_res_alloc(bnad
, &bnad
->res_info
[0], BNA_RES_T_MAX
);
3699 /* Setup pcidev_info for bna_init() */
3700 pcidev_info
.pci_slot
= PCI_SLOT(bnad
->pcidev
->devfn
);
3701 pcidev_info
.pci_func
= PCI_FUNC(bnad
->pcidev
->devfn
);
3702 pcidev_info
.device_id
= bnad
->pcidev
->device
;
3703 pcidev_info
.pci_bar_kva
= bnad
->bar0
;
3705 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3706 bna_init(bna
, bnad
, &pcidev_info
, &bnad
->res_info
[0]);
3707 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3709 bnad
->stats
.bna_stats
= &bna
->stats
;
3711 bnad_enable_msix(bnad
);
3712 err
= bnad_mbox_irq_alloc(bnad
);
3717 setup_timer(&bnad
->bna
.ioceth
.ioc
.ioc_timer
, bnad_ioc_timeout
,
3718 (unsigned long)bnad
);
3719 setup_timer(&bnad
->bna
.ioceth
.ioc
.hb_timer
, bnad_ioc_hb_check
,
3720 (unsigned long)bnad
);
3721 setup_timer(&bnad
->bna
.ioceth
.ioc
.iocpf_timer
, bnad_iocpf_timeout
,
3722 (unsigned long)bnad
);
3723 setup_timer(&bnad
->bna
.ioceth
.ioc
.sem_timer
, bnad_iocpf_sem_timeout
,
3724 (unsigned long)bnad
);
3728 * If the call back comes with error, we bail out.
3729 * This is a catastrophic error.
3731 err
= bnad_ioceth_enable(bnad
);
3733 dev_err(&pdev
->dev
, "initialization failed err=%d\n", err
);
3737 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3738 if (bna_num_txq_set(bna
, BNAD_NUM_TXQ
+ 1) ||
3739 bna_num_rxp_set(bna
, BNAD_NUM_RXP
+ 1)) {
3740 bnad_q_num_adjust(bnad
, bna_attr(bna
)->num_txq
- 1,
3741 bna_attr(bna
)->num_rxp
- 1);
3742 if (bna_num_txq_set(bna
, BNAD_NUM_TXQ
+ 1) ||
3743 bna_num_rxp_set(bna
, BNAD_NUM_RXP
+ 1))
3746 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3748 goto disable_ioceth
;
3750 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3751 bna_mod_res_req(&bnad
->bna
, &bnad
->mod_res_info
[0]);
3752 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3754 err
= bnad_res_alloc(bnad
, &bnad
->mod_res_info
[0], BNA_MOD_RES_T_MAX
);
3757 goto disable_ioceth
;
3760 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3761 bna_mod_init(&bnad
->bna
, &bnad
->mod_res_info
[0]);
3762 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3764 /* Get the burnt-in mac */
3765 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3766 bna_enet_perm_mac_get(&bna
->enet
, bnad
->perm_addr
);
3767 bnad_set_netdev_perm_addr(bnad
);
3768 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3770 mutex_unlock(&bnad
->conf_mutex
);
3772 /* Finally, reguister with net_device layer */
3773 err
= register_netdev(netdev
);
3775 dev_err(&pdev
->dev
, "registering net device failed\n");
3778 set_bit(BNAD_RF_NETDEV_REGISTERED
, &bnad
->run_flags
);
3783 mutex_unlock(&bnad
->conf_mutex
);
3787 mutex_lock(&bnad
->conf_mutex
);
3788 bnad_res_free(bnad
, &bnad
->mod_res_info
[0], BNA_MOD_RES_T_MAX
);
3790 bnad_ioceth_disable(bnad
);
3791 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.ioc_timer
);
3792 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.sem_timer
);
3793 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.hb_timer
);
3794 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3796 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3797 bnad_mbox_irq_free(bnad
);
3798 bnad_disable_msix(bnad
);
3800 bnad_res_free(bnad
, &bnad
->res_info
[0], BNA_RES_T_MAX
);
3802 /* Remove the debugfs node for this bnad */
3803 kfree(bnad
->regdata
);
3804 bnad_debugfs_uninit(bnad
);
3807 bnad_pci_uninit(pdev
);
3809 mutex_unlock(&bnad
->conf_mutex
);
3810 bnad_remove_from_list(bnad
);
3811 bnad_lock_uninit(bnad
);
3812 free_netdev(netdev
);
3817 bnad_pci_remove(struct pci_dev
*pdev
)
3819 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3822 unsigned long flags
;
3827 bnad
= netdev_priv(netdev
);
3830 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED
, &bnad
->run_flags
))
3831 unregister_netdev(netdev
);
3833 mutex_lock(&bnad
->conf_mutex
);
3834 bnad_ioceth_disable(bnad
);
3835 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.ioc_timer
);
3836 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.sem_timer
);
3837 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.hb_timer
);
3838 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3840 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3842 bnad_res_free(bnad
, &bnad
->mod_res_info
[0], BNA_MOD_RES_T_MAX
);
3843 bnad_res_free(bnad
, &bnad
->res_info
[0], BNA_RES_T_MAX
);
3844 bnad_mbox_irq_free(bnad
);
3845 bnad_disable_msix(bnad
);
3846 bnad_pci_uninit(pdev
);
3847 mutex_unlock(&bnad
->conf_mutex
);
3848 bnad_remove_from_list(bnad
);
3849 bnad_lock_uninit(bnad
);
3850 /* Remove the debugfs node for this bnad */
3851 kfree(bnad
->regdata
);
3852 bnad_debugfs_uninit(bnad
);
3854 free_netdev(netdev
);
3857 static const struct pci_device_id bnad_pci_id_table
[] = {
3859 PCI_DEVICE(PCI_VENDOR_ID_BROCADE
,
3860 PCI_DEVICE_ID_BROCADE_CT
),
3861 .class = PCI_CLASS_NETWORK_ETHERNET
<< 8,
3862 .class_mask
= 0xffff00
3865 PCI_DEVICE(PCI_VENDOR_ID_BROCADE
,
3866 BFA_PCI_DEVICE_ID_CT2
),
3867 .class = PCI_CLASS_NETWORK_ETHERNET
<< 8,
3868 .class_mask
= 0xffff00
3873 MODULE_DEVICE_TABLE(pci
, bnad_pci_id_table
);
3875 static struct pci_driver bnad_pci_driver
= {
3877 .id_table
= bnad_pci_id_table
,
3878 .probe
= bnad_pci_probe
,
3879 .remove
= bnad_pci_remove
,
3883 bnad_module_init(void)
3887 pr_info("bna: QLogic BR-series 10G Ethernet driver - version: %s\n",
3890 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover
);
3892 err
= pci_register_driver(&bnad_pci_driver
);
3894 pr_err("bna: PCI driver registration failed err=%d\n", err
);
3902 bnad_module_exit(void)
3904 pci_unregister_driver(&bnad_pci_driver
);
3905 release_firmware(bfi_fw
);
3908 module_init(bnad_module_init
);
3909 module_exit(bnad_module_exit
);
3911 MODULE_AUTHOR("Brocade");
3912 MODULE_LICENSE("GPL");
3913 MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver");
3914 MODULE_VERSION(BNAD_VERSION
);
3915 MODULE_FIRMWARE(CNA_FW_FILE_CT
);
3916 MODULE_FIRMWARE(CNA_FW_FILE_CT2
);