1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/etherdevice.h>
19 #include <linux/if_vlan.h>
22 #include <net/ip6_checksum.h>
23 #include <linux/firmware.h>
24 #include "bnx2x_cmn.h"
26 #include "bnx2x_init.h"
28 static int bnx2x_setup_irqs(struct bnx2x
*bp
);
30 /* free skb in the packet ring at pos idx
31 * return idx of last bd freed
33 static u16
bnx2x_free_tx_pkt(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
36 struct sw_tx_bd
*tx_buf
= &fp
->tx_buf_ring
[idx
];
37 struct eth_tx_start_bd
*tx_start_bd
;
38 struct eth_tx_bd
*tx_data_bd
;
39 struct sk_buff
*skb
= tx_buf
->skb
;
40 u16 bd_idx
= TX_BD(tx_buf
->first_bd
), new_cons
;
43 /* prefetch skb end pointer to speedup dev_kfree_skb() */
46 DP(BNX2X_MSG_OFF
, "pkt_idx %d buff @(%p)->skb %p\n",
50 DP(BNX2X_MSG_OFF
, "free bd_idx %d\n", bd_idx
);
51 tx_start_bd
= &fp
->tx_desc_ring
[bd_idx
].start_bd
;
52 dma_unmap_single(&bp
->pdev
->dev
, BD_UNMAP_ADDR(tx_start_bd
),
53 BD_UNMAP_LEN(tx_start_bd
), DMA_TO_DEVICE
);
55 nbd
= le16_to_cpu(tx_start_bd
->nbd
) - 1;
56 #ifdef BNX2X_STOP_ON_ERROR
57 if ((nbd
- 1) > (MAX_SKB_FRAGS
+ 2)) {
58 BNX2X_ERR("BAD nbd!\n");
62 new_cons
= nbd
+ tx_buf
->first_bd
;
65 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
67 /* Skip a parse bd... */
69 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
71 /* ...and the TSO split header bd since they have no mapping */
72 if (tx_buf
->flags
& BNX2X_TSO_SPLIT_BD
) {
74 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
80 DP(BNX2X_MSG_OFF
, "free frag bd_idx %d\n", bd_idx
);
81 tx_data_bd
= &fp
->tx_desc_ring
[bd_idx
].reg_bd
;
82 dma_unmap_page(&bp
->pdev
->dev
, BD_UNMAP_ADDR(tx_data_bd
),
83 BD_UNMAP_LEN(tx_data_bd
), DMA_TO_DEVICE
);
85 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
97 int bnx2x_tx_int(struct bnx2x_fastpath
*fp
)
99 struct bnx2x
*bp
= fp
->bp
;
100 struct netdev_queue
*txq
;
101 u16 hw_cons
, sw_cons
, bd_cons
= fp
->tx_bd_cons
;
103 #ifdef BNX2X_STOP_ON_ERROR
104 if (unlikely(bp
->panic
))
108 txq
= netdev_get_tx_queue(bp
->dev
, fp
->index
);
109 hw_cons
= le16_to_cpu(*fp
->tx_cons_sb
);
110 sw_cons
= fp
->tx_pkt_cons
;
112 while (sw_cons
!= hw_cons
) {
115 pkt_cons
= TX_BD(sw_cons
);
117 DP(NETIF_MSG_TX_DONE
, "queue[%d]: hw_cons %u sw_cons %u "
119 fp
->index
, hw_cons
, sw_cons
, pkt_cons
);
121 bd_cons
= bnx2x_free_tx_pkt(bp
, fp
, pkt_cons
);
125 fp
->tx_pkt_cons
= sw_cons
;
126 fp
->tx_bd_cons
= bd_cons
;
128 /* Need to make the tx_bd_cons update visible to start_xmit()
129 * before checking for netif_tx_queue_stopped(). Without the
130 * memory barrier, there is a small possibility that
131 * start_xmit() will miss it and cause the queue to be stopped
136 if (unlikely(netif_tx_queue_stopped(txq
))) {
137 /* Taking tx_lock() is needed to prevent reenabling the queue
138 * while it's empty. This could have happen if rx_action() gets
139 * suspended in bnx2x_tx_int() after the condition before
140 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
142 * stops the queue->sees fresh tx_bd_cons->releases the queue->
143 * sends some packets consuming the whole queue again->
147 __netif_tx_lock(txq
, smp_processor_id());
149 if ((netif_tx_queue_stopped(txq
)) &&
150 (bp
->state
== BNX2X_STATE_OPEN
) &&
151 (bnx2x_tx_avail(fp
) >= MAX_SKB_FRAGS
+ 3))
152 netif_tx_wake_queue(txq
);
154 __netif_tx_unlock(txq
);
159 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath
*fp
,
162 u16 last_max
= fp
->last_max_sge
;
164 if (SUB_S16(idx
, last_max
) > 0)
165 fp
->last_max_sge
= idx
;
168 static void bnx2x_update_sge_prod(struct bnx2x_fastpath
*fp
,
169 struct eth_fast_path_rx_cqe
*fp_cqe
)
171 struct bnx2x
*bp
= fp
->bp
;
172 u16 sge_len
= SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe
->pkt_len
) -
173 le16_to_cpu(fp_cqe
->len_on_bd
)) >>
175 u16 last_max
, last_elem
, first_elem
;
182 /* First mark all used pages */
183 for (i
= 0; i
< sge_len
; i
++)
184 SGE_MASK_CLEAR_BIT(fp
,
185 RX_SGE(le16_to_cpu(fp_cqe
->sgl_or_raw_data
.sgl
[i
])));
187 DP(NETIF_MSG_RX_STATUS
, "fp_cqe->sgl[%d] = %d\n",
188 sge_len
- 1, le16_to_cpu(fp_cqe
->sgl_or_raw_data
.sgl
[sge_len
- 1]));
190 /* Here we assume that the last SGE index is the biggest */
191 prefetch((void *)(fp
->sge_mask
));
192 bnx2x_update_last_max_sge(fp
,
193 le16_to_cpu(fp_cqe
->sgl_or_raw_data
.sgl
[sge_len
- 1]));
195 last_max
= RX_SGE(fp
->last_max_sge
);
196 last_elem
= last_max
>> RX_SGE_MASK_ELEM_SHIFT
;
197 first_elem
= RX_SGE(fp
->rx_sge_prod
) >> RX_SGE_MASK_ELEM_SHIFT
;
199 /* If ring is not full */
200 if (last_elem
+ 1 != first_elem
)
203 /* Now update the prod */
204 for (i
= first_elem
; i
!= last_elem
; i
= NEXT_SGE_MASK_ELEM(i
)) {
205 if (likely(fp
->sge_mask
[i
]))
208 fp
->sge_mask
[i
] = RX_SGE_MASK_ELEM_ONE_MASK
;
209 delta
+= RX_SGE_MASK_ELEM_SZ
;
213 fp
->rx_sge_prod
+= delta
;
214 /* clear page-end entries */
215 bnx2x_clear_sge_mask_next_elems(fp
);
218 DP(NETIF_MSG_RX_STATUS
,
219 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
220 fp
->last_max_sge
, fp
->rx_sge_prod
);
223 static void bnx2x_tpa_start(struct bnx2x_fastpath
*fp
, u16 queue
,
224 struct sk_buff
*skb
, u16 cons
, u16 prod
)
226 struct bnx2x
*bp
= fp
->bp
;
227 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
228 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
229 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
232 /* move empty skb from pool to prod and map it */
233 prod_rx_buf
->skb
= fp
->tpa_pool
[queue
].skb
;
234 mapping
= dma_map_single(&bp
->pdev
->dev
, fp
->tpa_pool
[queue
].skb
->data
,
235 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
236 dma_unmap_addr_set(prod_rx_buf
, mapping
, mapping
);
238 /* move partial skb from cons to pool (don't unmap yet) */
239 fp
->tpa_pool
[queue
] = *cons_rx_buf
;
241 /* mark bin state as start - print error if current state != stop */
242 if (fp
->tpa_state
[queue
] != BNX2X_TPA_STOP
)
243 BNX2X_ERR("start of bin not in stop [%d]\n", queue
);
245 fp
->tpa_state
[queue
] = BNX2X_TPA_START
;
247 /* point prod_bd to new skb */
248 prod_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
249 prod_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
251 #ifdef BNX2X_STOP_ON_ERROR
252 fp
->tpa_queue_used
|= (1 << queue
);
253 #ifdef _ASM_GENERIC_INT_L64_H
254 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%lx\n",
256 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%llx\n",
262 /* Timestamp option length allowed for TPA aggregation:
264 * nop nop kind length echo val
266 #define TPA_TSTAMP_OPT_LEN 12
268 * Calculate the approximate value of the MSS for this
269 * aggregation using the first packet of it.
272 * @param parsing_flags Parsing flags from the START CQE
273 * @param len_on_bd Total length of the first packet for the
276 static inline u16
bnx2x_set_lro_mss(struct bnx2x
*bp
, u16 parsing_flags
,
279 /* TPA arrgregation won't have an IP options and TCP options
280 * other than timestamp.
282 u16 hdrs_len
= ETH_HLEN
+ sizeof(struct iphdr
) + sizeof(struct tcphdr
);
285 /* Check if there was a TCP timestamp, if there is it's will
286 * always be 12 bytes length: nop nop kind length echo val.
288 * Otherwise FW would close the aggregation.
290 if (parsing_flags
& PARSING_FLAGS_TIME_STAMP_EXIST_FLAG
)
291 hdrs_len
+= TPA_TSTAMP_OPT_LEN
;
293 return len_on_bd
- hdrs_len
;
296 static int bnx2x_fill_frag_skb(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
298 struct eth_fast_path_rx_cqe
*fp_cqe
,
299 u16 cqe_idx
, u16 parsing_flags
)
301 struct sw_rx_page
*rx_pg
, old_rx_pg
;
302 u16 len_on_bd
= le16_to_cpu(fp_cqe
->len_on_bd
);
303 u32 i
, frag_len
, frag_size
, pages
;
307 frag_size
= le16_to_cpu(fp_cqe
->pkt_len
) - len_on_bd
;
308 pages
= SGE_PAGE_ALIGN(frag_size
) >> SGE_PAGE_SHIFT
;
310 /* This is needed in order to enable forwarding support */
312 skb_shinfo(skb
)->gso_size
= bnx2x_set_lro_mss(bp
, parsing_flags
,
315 #ifdef BNX2X_STOP_ON_ERROR
316 if (pages
> min_t(u32
, 8, MAX_SKB_FRAGS
)*SGE_PAGE_SIZE
*PAGES_PER_SGE
) {
317 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
319 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
320 fp_cqe
->pkt_len
, len_on_bd
);
326 /* Run through the SGL and compose the fragmented skb */
327 for (i
= 0, j
= 0; i
< pages
; i
+= PAGES_PER_SGE
, j
++) {
329 RX_SGE(le16_to_cpu(fp_cqe
->sgl_or_raw_data
.sgl
[j
]));
331 /* FW gives the indices of the SGE as if the ring is an array
332 (meaning that "next" element will consume 2 indices) */
333 frag_len
= min(frag_size
, (u32
)(SGE_PAGE_SIZE
*PAGES_PER_SGE
));
334 rx_pg
= &fp
->rx_page_ring
[sge_idx
];
337 /* If we fail to allocate a substitute page, we simply stop
338 where we are and drop the whole packet */
339 err
= bnx2x_alloc_rx_sge(bp
, fp
, sge_idx
);
341 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
345 /* Unmap the page as we r going to pass it to the stack */
346 dma_unmap_page(&bp
->pdev
->dev
,
347 dma_unmap_addr(&old_rx_pg
, mapping
),
348 SGE_PAGE_SIZE
*PAGES_PER_SGE
, DMA_FROM_DEVICE
);
350 /* Add one frag and update the appropriate fields in the skb */
351 skb_fill_page_desc(skb
, j
, old_rx_pg
.page
, 0, frag_len
);
353 skb
->data_len
+= frag_len
;
354 skb
->truesize
+= frag_len
;
355 skb
->len
+= frag_len
;
357 frag_size
-= frag_len
;
363 static void bnx2x_tpa_stop(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
364 u16 queue
, int pad
, int len
, union eth_rx_cqe
*cqe
,
367 struct sw_rx_bd
*rx_buf
= &fp
->tpa_pool
[queue
];
368 struct sk_buff
*skb
= rx_buf
->skb
;
370 struct sk_buff
*new_skb
= netdev_alloc_skb(bp
->dev
, fp
->rx_buf_size
);
372 /* Unmap skb in the pool anyway, as we are going to change
373 pool entry status to BNX2X_TPA_STOP even if new skb allocation
375 dma_unmap_single(&bp
->pdev
->dev
, dma_unmap_addr(rx_buf
, mapping
),
376 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
378 if (likely(new_skb
)) {
379 /* fix ip xsum and give it to the stack */
380 /* (no need to map the new skb) */
382 le16_to_cpu(cqe
->fast_path_cqe
.pars_flags
.flags
);
385 prefetch(((char *)(skb
)) + L1_CACHE_BYTES
);
387 #ifdef BNX2X_STOP_ON_ERROR
388 if (pad
+ len
> fp
->rx_buf_size
) {
389 BNX2X_ERR("skb_put is about to fail... "
390 "pad %d len %d rx_buf_size %d\n",
391 pad
, len
, fp
->rx_buf_size
);
397 skb_reserve(skb
, pad
);
400 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
401 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
406 iph
= (struct iphdr
*)skb
->data
;
408 iph
->check
= ip_fast_csum((u8
*)iph
, iph
->ihl
);
411 if (!bnx2x_fill_frag_skb(bp
, fp
, skb
,
412 &cqe
->fast_path_cqe
, cqe_idx
,
414 if (parsing_flags
& PARSING_FLAGS_VLAN
)
415 __vlan_hwaccel_put_tag(skb
,
416 le16_to_cpu(cqe
->fast_path_cqe
.
418 napi_gro_receive(&fp
->napi
, skb
);
420 DP(NETIF_MSG_RX_STATUS
, "Failed to allocate new pages"
421 " - dropping packet!\n");
426 /* put new skb in bin */
427 fp
->tpa_pool
[queue
].skb
= new_skb
;
430 /* else drop the packet and keep the buffer in the bin */
431 DP(NETIF_MSG_RX_STATUS
,
432 "Failed to allocate new skb - dropping packet!\n");
433 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
436 fp
->tpa_state
[queue
] = BNX2X_TPA_STOP
;
439 /* Set Toeplitz hash value in the skb using the value from the
440 * CQE (calculated by HW).
442 static inline void bnx2x_set_skb_rxhash(struct bnx2x
*bp
, union eth_rx_cqe
*cqe
,
445 /* Set Toeplitz hash from CQE */
446 if ((bp
->dev
->features
& NETIF_F_RXHASH
) &&
447 (cqe
->fast_path_cqe
.status_flags
&
448 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG
))
450 le32_to_cpu(cqe
->fast_path_cqe
.rss_hash_result
);
453 int bnx2x_rx_int(struct bnx2x_fastpath
*fp
, int budget
)
455 struct bnx2x
*bp
= fp
->bp
;
456 u16 bd_cons
, bd_prod
, bd_prod_fw
, comp_ring_cons
;
457 u16 hw_comp_cons
, sw_comp_cons
, sw_comp_prod
;
460 #ifdef BNX2X_STOP_ON_ERROR
461 if (unlikely(bp
->panic
))
465 /* CQ "next element" is of the size of the regular element,
466 that's why it's ok here */
467 hw_comp_cons
= le16_to_cpu(*fp
->rx_cons_sb
);
468 if ((hw_comp_cons
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
471 bd_cons
= fp
->rx_bd_cons
;
472 bd_prod
= fp
->rx_bd_prod
;
473 bd_prod_fw
= bd_prod
;
474 sw_comp_cons
= fp
->rx_comp_cons
;
475 sw_comp_prod
= fp
->rx_comp_prod
;
477 /* Memory barrier necessary as speculative reads of the rx
478 * buffer can be ahead of the index in the status block
482 DP(NETIF_MSG_RX_STATUS
,
483 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
484 fp
->index
, hw_comp_cons
, sw_comp_cons
);
486 while (sw_comp_cons
!= hw_comp_cons
) {
487 struct sw_rx_bd
*rx_buf
= NULL
;
489 union eth_rx_cqe
*cqe
;
493 comp_ring_cons
= RCQ_BD(sw_comp_cons
);
494 bd_prod
= RX_BD(bd_prod
);
495 bd_cons
= RX_BD(bd_cons
);
497 /* Prefetch the page containing the BD descriptor
498 at producer's index. It will be needed when new skb is
500 prefetch((void *)(PAGE_ALIGN((unsigned long)
501 (&fp
->rx_desc_ring
[bd_prod
])) -
504 cqe
= &fp
->rx_comp_ring
[comp_ring_cons
];
505 cqe_fp_flags
= cqe
->fast_path_cqe
.type_error_flags
;
507 DP(NETIF_MSG_RX_STATUS
, "CQE type %x err %x status %x"
508 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags
),
509 cqe_fp_flags
, cqe
->fast_path_cqe
.status_flags
,
510 le32_to_cpu(cqe
->fast_path_cqe
.rss_hash_result
),
511 le16_to_cpu(cqe
->fast_path_cqe
.vlan_tag
),
512 le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
));
514 /* is this a slowpath msg? */
515 if (unlikely(CQE_TYPE(cqe_fp_flags
))) {
516 bnx2x_sp_event(fp
, cqe
);
519 /* this is an rx packet */
521 rx_buf
= &fp
->rx_buf_ring
[bd_cons
];
524 len
= le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
);
525 pad
= cqe
->fast_path_cqe
.placement_offset
;
527 /* - If CQE is marked both TPA_START and TPA_END it is
529 * - FP CQE will always have either TPA_START or/and
530 * TPA_STOP flags set.
532 if ((!fp
->disable_tpa
) &&
533 (TPA_TYPE(cqe_fp_flags
) !=
534 (TPA_TYPE_START
| TPA_TYPE_END
))) {
535 u16 queue
= cqe
->fast_path_cqe
.queue_index
;
537 if (TPA_TYPE(cqe_fp_flags
) == TPA_TYPE_START
) {
538 DP(NETIF_MSG_RX_STATUS
,
539 "calling tpa_start on queue %d\n",
542 bnx2x_tpa_start(fp
, queue
, skb
,
545 /* Set Toeplitz hash for an LRO skb */
546 bnx2x_set_skb_rxhash(bp
, cqe
, skb
);
549 } else { /* TPA_STOP */
550 DP(NETIF_MSG_RX_STATUS
,
551 "calling tpa_stop on queue %d\n",
554 if (!BNX2X_RX_SUM_FIX(cqe
))
555 BNX2X_ERR("STOP on none TCP "
558 /* This is a size of the linear data
560 len
= le16_to_cpu(cqe
->fast_path_cqe
.
562 bnx2x_tpa_stop(bp
, fp
, queue
, pad
,
563 len
, cqe
, comp_ring_cons
);
564 #ifdef BNX2X_STOP_ON_ERROR
569 bnx2x_update_sge_prod(fp
,
570 &cqe
->fast_path_cqe
);
575 dma_sync_single_for_device(&bp
->pdev
->dev
,
576 dma_unmap_addr(rx_buf
, mapping
),
577 pad
+ RX_COPY_THRESH
,
579 prefetch(((char *)(skb
)) + L1_CACHE_BYTES
);
581 /* is this an error packet? */
582 if (unlikely(cqe_fp_flags
& ETH_RX_ERROR_FALGS
)) {
584 "ERROR flags %x rx packet %u\n",
585 cqe_fp_flags
, sw_comp_cons
);
586 fp
->eth_q_stats
.rx_err_discard_pkt
++;
590 /* Since we don't have a jumbo ring
591 * copy small packets if mtu > 1500
593 if ((bp
->dev
->mtu
> ETH_MAX_PACKET_SIZE
) &&
594 (len
<= RX_COPY_THRESH
)) {
595 struct sk_buff
*new_skb
;
597 new_skb
= netdev_alloc_skb(bp
->dev
,
599 if (new_skb
== NULL
) {
601 "ERROR packet dropped "
602 "because of alloc failure\n");
603 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
608 skb_copy_from_linear_data_offset(skb
, pad
,
609 new_skb
->data
+ pad
, len
);
610 skb_reserve(new_skb
, pad
);
611 skb_put(new_skb
, len
);
613 bnx2x_reuse_rx_skb(fp
, bd_cons
, bd_prod
);
618 if (likely(bnx2x_alloc_rx_skb(bp
, fp
, bd_prod
) == 0)) {
619 dma_unmap_single(&bp
->pdev
->dev
,
620 dma_unmap_addr(rx_buf
, mapping
),
623 skb_reserve(skb
, pad
);
628 "ERROR packet dropped because "
629 "of alloc failure\n");
630 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
632 bnx2x_reuse_rx_skb(fp
, bd_cons
, bd_prod
);
636 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
638 /* Set Toeplitz hash for a none-LRO skb */
639 bnx2x_set_skb_rxhash(bp
, cqe
, skb
);
641 skb_checksum_none_assert(skb
);
644 if (likely(BNX2X_RX_CSUM_OK(cqe
)))
645 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
647 fp
->eth_q_stats
.hw_csum_err
++;
651 skb_record_rx_queue(skb
, fp
->index
);
653 if (le16_to_cpu(cqe
->fast_path_cqe
.pars_flags
.flags
) &
655 __vlan_hwaccel_put_tag(skb
,
656 le16_to_cpu(cqe
->fast_path_cqe
.vlan_tag
));
657 napi_gro_receive(&fp
->napi
, skb
);
663 bd_cons
= NEXT_RX_IDX(bd_cons
);
664 bd_prod
= NEXT_RX_IDX(bd_prod
);
665 bd_prod_fw
= NEXT_RX_IDX(bd_prod_fw
);
668 sw_comp_prod
= NEXT_RCQ_IDX(sw_comp_prod
);
669 sw_comp_cons
= NEXT_RCQ_IDX(sw_comp_cons
);
671 if (rx_pkt
== budget
)
675 fp
->rx_bd_cons
= bd_cons
;
676 fp
->rx_bd_prod
= bd_prod_fw
;
677 fp
->rx_comp_cons
= sw_comp_cons
;
678 fp
->rx_comp_prod
= sw_comp_prod
;
680 /* Update producers */
681 bnx2x_update_rx_prod(bp
, fp
, bd_prod_fw
, sw_comp_prod
,
684 fp
->rx_pkt
+= rx_pkt
;
690 static irqreturn_t
bnx2x_msix_fp_int(int irq
, void *fp_cookie
)
692 struct bnx2x_fastpath
*fp
= fp_cookie
;
693 struct bnx2x
*bp
= fp
->bp
;
695 /* Return here if interrupt is disabled */
696 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
697 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
701 DP(BNX2X_MSG_FP
, "got an MSI-X interrupt on IDX:SB "
702 "[fp %d fw_sd %d igusb %d]\n",
703 fp
->index
, fp
->fw_sb_id
, fp
->igu_sb_id
);
704 bnx2x_ack_sb(bp
, fp
->igu_sb_id
, USTORM_ID
, 0, IGU_INT_DISABLE
, 0);
706 #ifdef BNX2X_STOP_ON_ERROR
707 if (unlikely(bp
->panic
))
711 /* Handle Rx and Tx according to MSI-X vector */
712 prefetch(fp
->rx_cons_sb
);
713 prefetch(fp
->tx_cons_sb
);
714 prefetch(&fp
->sb_running_index
[SM_RX_ID
]);
715 napi_schedule(&bnx2x_fp(bp
, fp
->index
, napi
));
720 /* HW Lock for shared dual port PHYs */
721 void bnx2x_acquire_phy_lock(struct bnx2x
*bp
)
723 mutex_lock(&bp
->port
.phy_mutex
);
725 if (bp
->port
.need_hw_lock
)
726 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
729 void bnx2x_release_phy_lock(struct bnx2x
*bp
)
731 if (bp
->port
.need_hw_lock
)
732 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
734 mutex_unlock(&bp
->port
.phy_mutex
);
737 /* calculates MF speed according to current linespeed and MF configuration */
738 u16
bnx2x_get_mf_speed(struct bnx2x
*bp
)
740 u16 line_speed
= bp
->link_vars
.line_speed
;
742 u16 maxCfg
= bnx2x_extract_max_cfg(bp
,
743 bp
->mf_config
[BP_VN(bp
)]);
745 /* Calculate the current MAX line speed limit for the MF
749 line_speed
= (line_speed
* maxCfg
) / 100;
751 u16 vn_max_rate
= maxCfg
* 100;
753 if (vn_max_rate
< line_speed
)
754 line_speed
= vn_max_rate
;
761 void bnx2x_link_report(struct bnx2x
*bp
)
763 if (bp
->flags
& MF_FUNC_DIS
) {
764 netif_carrier_off(bp
->dev
);
765 netdev_err(bp
->dev
, "NIC Link is Down\n");
769 if (bp
->link_vars
.link_up
) {
772 if (bp
->state
== BNX2X_STATE_OPEN
)
773 netif_carrier_on(bp
->dev
);
774 netdev_info(bp
->dev
, "NIC Link is Up, ");
776 line_speed
= bnx2x_get_mf_speed(bp
);
778 pr_cont("%d Mbps ", line_speed
);
780 if (bp
->link_vars
.duplex
== DUPLEX_FULL
)
781 pr_cont("full duplex");
783 pr_cont("half duplex");
785 if (bp
->link_vars
.flow_ctrl
!= BNX2X_FLOW_CTRL_NONE
) {
786 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_RX
) {
787 pr_cont(", receive ");
788 if (bp
->link_vars
.flow_ctrl
&
790 pr_cont("& transmit ");
792 pr_cont(", transmit ");
794 pr_cont("flow control ON");
798 } else { /* link_down */
799 netif_carrier_off(bp
->dev
);
800 netdev_err(bp
->dev
, "NIC Link is Down\n");
804 /* Returns the number of actually allocated BDs */
805 static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath
*fp
,
808 struct bnx2x
*bp
= fp
->bp
;
809 u16 ring_prod
, cqe_ring_prod
;
812 fp
->rx_comp_cons
= 0;
813 cqe_ring_prod
= ring_prod
= 0;
814 for (i
= 0; i
< rx_ring_size
; i
++) {
815 if (bnx2x_alloc_rx_skb(bp
, fp
, ring_prod
) < 0) {
816 BNX2X_ERR("was only able to allocate "
817 "%d rx skbs on queue[%d]\n", i
, fp
->index
);
818 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
821 ring_prod
= NEXT_RX_IDX(ring_prod
);
822 cqe_ring_prod
= NEXT_RCQ_IDX(cqe_ring_prod
);
823 WARN_ON(ring_prod
<= i
);
826 fp
->rx_bd_prod
= ring_prod
;
827 /* Limit the CQE producer by the CQE ring size */
828 fp
->rx_comp_prod
= min_t(u16
, NUM_RCQ_RINGS
*RCQ_DESC_CNT
,
830 fp
->rx_pkt
= fp
->rx_calls
= 0;
835 static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath
*fp
)
837 struct bnx2x
*bp
= fp
->bp
;
838 int rx_ring_size
= bp
->rx_ring_size
? bp
->rx_ring_size
:
839 MAX_RX_AVAIL
/bp
->num_queues
;
841 rx_ring_size
= max_t(int, MIN_RX_AVAIL
, rx_ring_size
);
843 bnx2x_alloc_rx_bds(fp
, rx_ring_size
);
846 * this will generate an interrupt (to the TSTORM)
847 * must only be done after chip is initialized
849 bnx2x_update_rx_prod(bp
, fp
, fp
->rx_bd_prod
, fp
->rx_comp_prod
,
853 void bnx2x_init_rx_rings(struct bnx2x
*bp
)
855 int func
= BP_FUNC(bp
);
856 int max_agg_queues
= CHIP_IS_E1(bp
) ? ETH_MAX_AGGREGATION_QUEUES_E1
:
857 ETH_MAX_AGGREGATION_QUEUES_E1H
;
861 for_each_rx_queue(bp
, j
) {
862 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
865 "mtu %d rx_buf_size %d\n", bp
->dev
->mtu
, fp
->rx_buf_size
);
867 if (!fp
->disable_tpa
) {
868 for (i
= 0; i
< max_agg_queues
; i
++) {
869 fp
->tpa_pool
[i
].skb
=
870 netdev_alloc_skb(bp
->dev
, fp
->rx_buf_size
);
871 if (!fp
->tpa_pool
[i
].skb
) {
872 BNX2X_ERR("Failed to allocate TPA "
873 "skb pool for queue[%d] - "
874 "disabling TPA on this "
876 bnx2x_free_tpa_pool(bp
, fp
, i
);
880 dma_unmap_addr_set((struct sw_rx_bd
*)
881 &bp
->fp
->tpa_pool
[i
],
883 fp
->tpa_state
[i
] = BNX2X_TPA_STOP
;
886 /* "next page" elements initialization */
887 bnx2x_set_next_page_sgl(fp
);
889 /* set SGEs bit mask */
890 bnx2x_init_sge_ring_bit_mask(fp
);
892 /* Allocate SGEs and initialize the ring elements */
893 for (i
= 0, ring_prod
= 0;
894 i
< MAX_RX_SGE_CNT
*NUM_RX_SGE_PAGES
; i
++) {
896 if (bnx2x_alloc_rx_sge(bp
, fp
, ring_prod
) < 0) {
897 BNX2X_ERR("was only able to allocate "
899 BNX2X_ERR("disabling TPA for"
901 /* Cleanup already allocated elements */
902 bnx2x_free_rx_sge_range(bp
,
904 bnx2x_free_tpa_pool(bp
,
910 ring_prod
= NEXT_SGE_IDX(ring_prod
);
913 fp
->rx_sge_prod
= ring_prod
;
917 for_each_rx_queue(bp
, j
) {
918 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
922 bnx2x_set_next_page_rx_bd(fp
);
925 bnx2x_set_next_page_rx_cq(fp
);
927 /* Allocate BDs and initialize BD ring */
928 bnx2x_alloc_rx_bd_ring(fp
);
933 if (!CHIP_IS_E2(bp
)) {
934 REG_WR(bp
, BAR_USTRORM_INTMEM
+
935 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
),
936 U64_LO(fp
->rx_comp_mapping
));
937 REG_WR(bp
, BAR_USTRORM_INTMEM
+
938 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
) + 4,
939 U64_HI(fp
->rx_comp_mapping
));
944 static void bnx2x_free_tx_skbs(struct bnx2x
*bp
)
948 for_each_tx_queue(bp
, i
) {
949 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
951 u16 bd_cons
= fp
->tx_bd_cons
;
952 u16 sw_prod
= fp
->tx_pkt_prod
;
953 u16 sw_cons
= fp
->tx_pkt_cons
;
955 while (sw_cons
!= sw_prod
) {
956 bd_cons
= bnx2x_free_tx_pkt(bp
, fp
, TX_BD(sw_cons
));
962 static void bnx2x_free_rx_skbs(struct bnx2x
*bp
)
966 for_each_rx_queue(bp
, j
) {
967 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
969 for (i
= 0; i
< NUM_RX_BD
; i
++) {
970 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[i
];
971 struct sk_buff
*skb
= rx_buf
->skb
;
976 dma_unmap_single(&bp
->pdev
->dev
,
977 dma_unmap_addr(rx_buf
, mapping
),
978 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
983 if (!fp
->disable_tpa
)
984 bnx2x_free_tpa_pool(bp
, fp
, CHIP_IS_E1(bp
) ?
985 ETH_MAX_AGGREGATION_QUEUES_E1
:
986 ETH_MAX_AGGREGATION_QUEUES_E1H
);
990 void bnx2x_free_skbs(struct bnx2x
*bp
)
992 bnx2x_free_tx_skbs(bp
);
993 bnx2x_free_rx_skbs(bp
);
996 void bnx2x_update_max_mf_config(struct bnx2x
*bp
, u32 value
)
998 /* load old values */
999 u32 mf_cfg
= bp
->mf_config
[BP_VN(bp
)];
1001 if (value
!= bnx2x_extract_max_cfg(bp
, mf_cfg
)) {
1002 /* leave all but MAX value */
1003 mf_cfg
&= ~FUNC_MF_CFG_MAX_BW_MASK
;
1005 /* set new MAX value */
1006 mf_cfg
|= (value
<< FUNC_MF_CFG_MAX_BW_SHIFT
)
1007 & FUNC_MF_CFG_MAX_BW_MASK
;
1009 bnx2x_fw_command(bp
, DRV_MSG_CODE_SET_MF_BW
, mf_cfg
);
1013 static void bnx2x_free_msix_irqs(struct bnx2x
*bp
)
1017 free_irq(bp
->msix_table
[0].vector
, bp
->dev
);
1018 DP(NETIF_MSG_IFDOWN
, "released sp irq (%d)\n",
1019 bp
->msix_table
[0].vector
);
1024 for_each_eth_queue(bp
, i
) {
1025 DP(NETIF_MSG_IFDOWN
, "about to release fp #%d->%d irq "
1026 "state %x\n", i
, bp
->msix_table
[i
+ offset
].vector
,
1027 bnx2x_fp(bp
, i
, state
));
1029 free_irq(bp
->msix_table
[i
+ offset
].vector
, &bp
->fp
[i
]);
1033 void bnx2x_free_irq(struct bnx2x
*bp
)
1035 if (bp
->flags
& USING_MSIX_FLAG
)
1036 bnx2x_free_msix_irqs(bp
);
1037 else if (bp
->flags
& USING_MSI_FLAG
)
1038 free_irq(bp
->pdev
->irq
, bp
->dev
);
1040 free_irq(bp
->pdev
->irq
, bp
->dev
);
1043 int bnx2x_enable_msix(struct bnx2x
*bp
)
1045 int msix_vec
= 0, i
, rc
, req_cnt
;
1047 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1048 DP(NETIF_MSG_IFUP
, "msix_table[0].entry = %d (slowpath)\n",
1049 bp
->msix_table
[0].entry
);
1053 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1054 DP(NETIF_MSG_IFUP
, "msix_table[%d].entry = %d (CNIC)\n",
1055 bp
->msix_table
[msix_vec
].entry
, bp
->msix_table
[msix_vec
].entry
);
1058 for_each_eth_queue(bp
, i
) {
1059 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1060 DP(NETIF_MSG_IFUP
, "msix_table[%d].entry = %d "
1061 "(fastpath #%u)\n", msix_vec
, msix_vec
, i
);
1065 req_cnt
= BNX2X_NUM_ETH_QUEUES(bp
) + CNIC_CONTEXT_USE
+ 1;
1067 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0], req_cnt
);
1070 * reconfigure number of tx/rx queues according to available
1073 if (rc
>= BNX2X_MIN_MSIX_VEC_CNT
) {
1074 /* how less vectors we will have? */
1075 int diff
= req_cnt
- rc
;
1078 "Trying to use less MSI-X vectors: %d\n", rc
);
1080 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0], rc
);
1084 "MSI-X is not attainable rc %d\n", rc
);
1088 * decrease number of queues by number of unallocated entries
1090 bp
->num_queues
-= diff
;
1092 DP(NETIF_MSG_IFUP
, "New queue configuration set: %d\n",
1095 /* fall to INTx if not enough memory */
1097 bp
->flags
|= DISABLE_MSI_FLAG
;
1098 DP(NETIF_MSG_IFUP
, "MSI-X is not attainable rc %d\n", rc
);
1102 bp
->flags
|= USING_MSIX_FLAG
;
1107 static int bnx2x_req_msix_irqs(struct bnx2x
*bp
)
1109 int i
, rc
, offset
= 1;
1111 rc
= request_irq(bp
->msix_table
[0].vector
, bnx2x_msix_sp_int
, 0,
1112 bp
->dev
->name
, bp
->dev
);
1114 BNX2X_ERR("request sp irq failed\n");
1121 for_each_eth_queue(bp
, i
) {
1122 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1123 snprintf(fp
->name
, sizeof(fp
->name
), "%s-fp-%d",
1126 rc
= request_irq(bp
->msix_table
[offset
].vector
,
1127 bnx2x_msix_fp_int
, 0, fp
->name
, fp
);
1129 BNX2X_ERR("request fp #%d irq failed rc %d\n", i
, rc
);
1130 bnx2x_free_msix_irqs(bp
);
1135 fp
->state
= BNX2X_FP_STATE_IRQ
;
1138 i
= BNX2X_NUM_ETH_QUEUES(bp
);
1139 offset
= 1 + CNIC_CONTEXT_USE
;
1140 netdev_info(bp
->dev
, "using MSI-X IRQs: sp %d fp[%d] %d"
1142 bp
->msix_table
[0].vector
,
1143 0, bp
->msix_table
[offset
].vector
,
1144 i
- 1, bp
->msix_table
[offset
+ i
- 1].vector
);
1149 int bnx2x_enable_msi(struct bnx2x
*bp
)
1153 rc
= pci_enable_msi(bp
->pdev
);
1155 DP(NETIF_MSG_IFUP
, "MSI is not attainable\n");
1158 bp
->flags
|= USING_MSI_FLAG
;
1163 static int bnx2x_req_irq(struct bnx2x
*bp
)
1165 unsigned long flags
;
1168 if (bp
->flags
& USING_MSI_FLAG
)
1171 flags
= IRQF_SHARED
;
1173 rc
= request_irq(bp
->pdev
->irq
, bnx2x_interrupt
, flags
,
1174 bp
->dev
->name
, bp
->dev
);
1176 bnx2x_fp(bp
, 0, state
) = BNX2X_FP_STATE_IRQ
;
1181 static void bnx2x_napi_enable(struct bnx2x
*bp
)
1185 for_each_napi_queue(bp
, i
)
1186 napi_enable(&bnx2x_fp(bp
, i
, napi
));
1189 static void bnx2x_napi_disable(struct bnx2x
*bp
)
1193 for_each_napi_queue(bp
, i
)
1194 napi_disable(&bnx2x_fp(bp
, i
, napi
));
1197 void bnx2x_netif_start(struct bnx2x
*bp
)
1201 intr_sem
= atomic_dec_and_test(&bp
->intr_sem
);
1202 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1205 if (netif_running(bp
->dev
)) {
1206 bnx2x_napi_enable(bp
);
1207 bnx2x_int_enable(bp
);
1208 if (bp
->state
== BNX2X_STATE_OPEN
)
1209 netif_tx_wake_all_queues(bp
->dev
);
1214 void bnx2x_netif_stop(struct bnx2x
*bp
, int disable_hw
)
1216 bnx2x_int_disable_sync(bp
, disable_hw
);
1217 bnx2x_napi_disable(bp
);
1218 netif_tx_disable(bp
->dev
);
1221 u16
bnx2x_select_queue(struct net_device
*dev
, struct sk_buff
*skb
)
1224 struct bnx2x
*bp
= netdev_priv(dev
);
1226 return skb_tx_hash(dev
, skb
);
1228 struct ethhdr
*hdr
= (struct ethhdr
*)skb
->data
;
1229 u16 ether_type
= ntohs(hdr
->h_proto
);
1231 /* Skip VLAN tag if present */
1232 if (ether_type
== ETH_P_8021Q
) {
1233 struct vlan_ethhdr
*vhdr
=
1234 (struct vlan_ethhdr
*)skb
->data
;
1236 ether_type
= ntohs(vhdr
->h_vlan_encapsulated_proto
);
1239 /* If ethertype is FCoE or FIP - use FCoE ring */
1240 if ((ether_type
== ETH_P_FCOE
) || (ether_type
== ETH_P_FIP
))
1241 return bnx2x_fcoe(bp
, index
);
1244 /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring
1246 return __skb_tx_hash(dev
, skb
,
1247 dev
->real_num_tx_queues
- FCOE_CONTEXT_USE
);
1250 void bnx2x_set_num_queues(struct bnx2x
*bp
)
1252 switch (bp
->multi_mode
) {
1253 case ETH_RSS_MODE_DISABLED
:
1256 case ETH_RSS_MODE_REGULAR
:
1257 bp
->num_queues
= bnx2x_calc_num_queues(bp
);
1265 /* Add special queues */
1266 bp
->num_queues
+= NONE_ETH_CONTEXT_USE
;
1270 static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x
*bp
)
1274 bnx2x_set_fip_eth_mac_addr(bp
, 1);
1275 bnx2x_set_all_enode_macs(bp
, 1);
1276 bp
->flags
|= FCOE_MACS_SET
;
1281 static void bnx2x_release_firmware(struct bnx2x
*bp
)
1283 kfree(bp
->init_ops_offsets
);
1284 kfree(bp
->init_ops
);
1285 kfree(bp
->init_data
);
1286 release_firmware(bp
->firmware
);
1289 static inline int bnx2x_set_real_num_queues(struct bnx2x
*bp
)
1291 int rc
, num
= bp
->num_queues
;
1295 num
-= FCOE_CONTEXT_USE
;
1298 netif_set_real_num_tx_queues(bp
->dev
, num
);
1299 rc
= netif_set_real_num_rx_queues(bp
->dev
, num
);
1303 static inline void bnx2x_set_rx_buf_size(struct bnx2x
*bp
)
1307 for_each_queue(bp
, i
) {
1308 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1310 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1313 * Although there are no IP frames expected to arrive to
1314 * this ring we still want to add an
1315 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1319 BNX2X_FCOE_MINI_JUMBO_MTU
+ ETH_OVREHEAD
+
1320 BNX2X_RX_ALIGN
+ IP_HEADER_ALIGNMENT_PADDING
;
1323 bp
->dev
->mtu
+ ETH_OVREHEAD
+ BNX2X_RX_ALIGN
+
1324 IP_HEADER_ALIGNMENT_PADDING
;
1328 /* must be called with rtnl_lock */
1329 int bnx2x_nic_load(struct bnx2x
*bp
, int load_mode
)
1334 /* Set init arrays */
1335 rc
= bnx2x_init_firmware(bp
);
1337 BNX2X_ERR("Error loading firmware\n");
1341 #ifdef BNX2X_STOP_ON_ERROR
1342 if (unlikely(bp
->panic
))
1346 bp
->state
= BNX2X_STATE_OPENING_WAIT4_LOAD
;
1348 /* must be called before memory allocation and HW init */
1349 bnx2x_ilt_set_info(bp
);
1351 /* Set the receive queues buffer size */
1352 bnx2x_set_rx_buf_size(bp
);
1354 if (bnx2x_alloc_mem(bp
))
1357 rc
= bnx2x_set_real_num_queues(bp
);
1359 BNX2X_ERR("Unable to set real_num_queues\n");
1363 for_each_queue(bp
, i
)
1364 bnx2x_fp(bp
, i
, disable_tpa
) =
1365 ((bp
->flags
& TPA_ENABLE_FLAG
) == 0);
1368 /* We don't want TPA on FCoE L2 ring */
1369 bnx2x_fcoe(bp
, disable_tpa
) = 1;
1371 bnx2x_napi_enable(bp
);
1373 /* Send LOAD_REQUEST command to MCP
1374 Returns the type of LOAD command:
1375 if it is the first port to be initialized
1376 common blocks should be initialized, otherwise - not
1378 if (!BP_NOMCP(bp
)) {
1379 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_REQ
, 0);
1381 BNX2X_ERR("MCP response failure, aborting\n");
1385 if (load_code
== FW_MSG_CODE_DRV_LOAD_REFUSED
) {
1386 rc
= -EBUSY
; /* other port in diagnostic mode */
1391 int path
= BP_PATH(bp
);
1392 int port
= BP_PORT(bp
);
1394 DP(NETIF_MSG_IFUP
, "NO MCP - load counts[%d] %d, %d, %d\n",
1395 path
, load_count
[path
][0], load_count
[path
][1],
1396 load_count
[path
][2]);
1397 load_count
[path
][0]++;
1398 load_count
[path
][1 + port
]++;
1399 DP(NETIF_MSG_IFUP
, "NO MCP - new load counts[%d] %d, %d, %d\n",
1400 path
, load_count
[path
][0], load_count
[path
][1],
1401 load_count
[path
][2]);
1402 if (load_count
[path
][0] == 1)
1403 load_code
= FW_MSG_CODE_DRV_LOAD_COMMON
;
1404 else if (load_count
[path
][1 + port
] == 1)
1405 load_code
= FW_MSG_CODE_DRV_LOAD_PORT
;
1407 load_code
= FW_MSG_CODE_DRV_LOAD_FUNCTION
;
1410 if ((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
1411 (load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
) ||
1412 (load_code
== FW_MSG_CODE_DRV_LOAD_PORT
))
1416 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
1419 rc
= bnx2x_init_hw(bp
, load_code
);
1421 BNX2X_ERR("HW init failed, aborting\n");
1422 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
1426 /* Connect to IRQs */
1427 rc
= bnx2x_setup_irqs(bp
);
1429 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
1433 /* Setup NIC internals and enable interrupts */
1434 bnx2x_nic_init(bp
, load_code
);
1436 if (((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
1437 (load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
)) &&
1438 (bp
->common
.shmem2_base
))
1439 SHMEM2_WR(bp
, dcc_support
,
1440 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV
|
1441 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV
));
1443 /* Send LOAD_DONE command to MCP */
1444 if (!BP_NOMCP(bp
)) {
1445 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
1447 BNX2X_ERR("MCP response failure, aborting\n");
1453 bnx2x_dcbx_init(bp
);
1455 bp
->state
= BNX2X_STATE_OPENING_WAIT4_PORT
;
1457 rc
= bnx2x_func_start(bp
);
1459 BNX2X_ERR("Function start failed!\n");
1460 #ifndef BNX2X_STOP_ON_ERROR
1468 rc
= bnx2x_setup_client(bp
, &bp
->fp
[0], 1 /* Leading */);
1470 BNX2X_ERR("Setup leading failed!\n");
1471 #ifndef BNX2X_STOP_ON_ERROR
1479 if (!CHIP_IS_E1(bp
) &&
1480 (bp
->mf_config
[BP_VN(bp
)] & FUNC_MF_CFG_FUNC_DISABLED
)) {
1481 DP(NETIF_MSG_IFUP
, "mf_cfg function disabled\n");
1482 bp
->flags
|= MF_FUNC_DIS
;
1486 /* Enable Timer scan */
1487 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ BP_PORT(bp
)*4, 1);
1490 for_each_nondefault_queue(bp
, i
) {
1491 rc
= bnx2x_setup_client(bp
, &bp
->fp
[i
], 0);
1500 /* Now when Clients are configured we are ready to work */
1501 bp
->state
= BNX2X_STATE_OPEN
;
1504 bnx2x_set_fcoe_eth_macs(bp
);
1507 bnx2x_set_eth_mac(bp
, 1);
1509 /* Clear MC configuration */
1511 bnx2x_invalidate_e1_mc_list(bp
);
1513 bnx2x_invalidate_e1h_mc_list(bp
);
1515 /* Clear UC lists configuration */
1516 bnx2x_invalidate_uc_list(bp
);
1518 if (bp
->pending_max
) {
1519 bnx2x_update_max_mf_config(bp
, bp
->pending_max
);
1520 bp
->pending_max
= 0;
1524 bnx2x_initial_phy_init(bp
, load_mode
);
1526 /* Initialize Rx filtering */
1527 bnx2x_set_rx_mode(bp
->dev
);
1529 /* Start fast path */
1530 switch (load_mode
) {
1532 /* Tx queue should be only reenabled */
1533 netif_tx_wake_all_queues(bp
->dev
);
1534 /* Initialize the receive filter. */
1538 netif_tx_start_all_queues(bp
->dev
);
1539 smp_mb__after_clear_bit();
1543 bp
->state
= BNX2X_STATE_DIAG
;
1551 bnx2x__link_status_update(bp
);
1553 /* start the timer */
1554 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
1557 bnx2x_setup_cnic_irq_info(bp
);
1558 if (bp
->state
== BNX2X_STATE_OPEN
)
1559 bnx2x_cnic_notify(bp
, CNIC_CTL_START_CMD
);
1561 bnx2x_inc_load_cnt(bp
);
1563 bnx2x_release_firmware(bp
);
1569 /* Disable Timer scan */
1570 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ BP_PORT(bp
)*4, 0);
1573 bnx2x_int_disable_sync(bp
, 1);
1575 /* Free SKBs, SGEs, TPA pool and driver internals */
1576 bnx2x_free_skbs(bp
);
1577 for_each_rx_queue(bp
, i
)
1578 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
1583 if (!BP_NOMCP(bp
)) {
1584 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
, 0);
1585 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
, 0);
1590 bnx2x_napi_disable(bp
);
1594 bnx2x_release_firmware(bp
);
1599 /* must be called with rtnl_lock */
1600 int bnx2x_nic_unload(struct bnx2x
*bp
, int unload_mode
)
1604 if (bp
->state
== BNX2X_STATE_CLOSED
) {
1605 /* Interface has been removed - nothing to recover */
1606 bp
->recovery_state
= BNX2X_RECOVERY_DONE
;
1608 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_RESERVED_08
);
1615 bnx2x_cnic_notify(bp
, CNIC_CTL_STOP_CMD
);
1617 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_HALT
;
1619 /* Set "drop all" */
1620 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
1621 bnx2x_set_storm_rx_mode(bp
);
1624 bnx2x_tx_disable(bp
);
1626 del_timer_sync(&bp
->timer
);
1628 SHMEM_WR(bp
, func_mb
[BP_FW_MB_IDX(bp
)].drv_pulse_mb
,
1629 (DRV_PULSE_ALWAYS_ALIVE
| bp
->fw_drv_pulse_wr_seq
));
1631 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
1633 /* Cleanup the chip if needed */
1634 if (unload_mode
!= UNLOAD_RECOVERY
)
1635 bnx2x_chip_cleanup(bp
, unload_mode
);
1637 /* Disable HW interrupts, NAPI and Tx */
1638 bnx2x_netif_stop(bp
, 1);
1646 /* Free SKBs, SGEs, TPA pool and driver internals */
1647 bnx2x_free_skbs(bp
);
1648 for_each_rx_queue(bp
, i
)
1649 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
1653 bp
->state
= BNX2X_STATE_CLOSED
;
1655 /* The last driver must disable a "close the gate" if there is no
1656 * parity attention or "process kill" pending.
1658 if ((!bnx2x_dec_load_cnt(bp
)) && (!bnx2x_chk_parity_attn(bp
)) &&
1659 bnx2x_reset_is_done(bp
))
1660 bnx2x_disable_close_the_gate(bp
);
1662 /* Reset MCP mail box sequence if there is on going recovery */
1663 if (unload_mode
== UNLOAD_RECOVERY
)
1669 int bnx2x_set_power_state(struct bnx2x
*bp
, pci_power_t state
)
1673 /* If there is no power capability, silently succeed */
1675 DP(NETIF_MSG_HW
, "No power capability. Breaking.\n");
1679 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
1683 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
1684 ((pmcsr
& ~PCI_PM_CTRL_STATE_MASK
) |
1685 PCI_PM_CTRL_PME_STATUS
));
1687 if (pmcsr
& PCI_PM_CTRL_STATE_MASK
)
1688 /* delay required during transition out of D3hot */
1693 /* If there are other clients above don't
1694 shut down the power */
1695 if (atomic_read(&bp
->pdev
->enable_cnt
) != 1)
1697 /* Don't shut down the power for emulation and FPGA */
1698 if (CHIP_REV_IS_SLOW(bp
))
1701 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
1705 pmcsr
|= PCI_PM_CTRL_PME_ENABLE
;
1707 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
1710 /* No more memory access after this point until
1711 * device is brought back to D0.
1722 * net_device service functions
1724 int bnx2x_poll(struct napi_struct
*napi
, int budget
)
1727 struct bnx2x_fastpath
*fp
= container_of(napi
, struct bnx2x_fastpath
,
1729 struct bnx2x
*bp
= fp
->bp
;
1732 #ifdef BNX2X_STOP_ON_ERROR
1733 if (unlikely(bp
->panic
)) {
1734 napi_complete(napi
);
1739 if (bnx2x_has_tx_work(fp
))
1742 if (bnx2x_has_rx_work(fp
)) {
1743 work_done
+= bnx2x_rx_int(fp
, budget
- work_done
);
1745 /* must not complete if we consumed full budget */
1746 if (work_done
>= budget
)
1750 /* Fall out from the NAPI loop if needed */
1751 if (!(bnx2x_has_rx_work(fp
) || bnx2x_has_tx_work(fp
))) {
1753 /* No need to update SB for FCoE L2 ring as long as
1754 * it's connected to the default SB and the SB
1755 * has been updated when NAPI was scheduled.
1757 if (IS_FCOE_FP(fp
)) {
1758 napi_complete(napi
);
1763 bnx2x_update_fpsb_idx(fp
);
1764 /* bnx2x_has_rx_work() reads the status block,
1765 * thus we need to ensure that status block indices
1766 * have been actually read (bnx2x_update_fpsb_idx)
1767 * prior to this check (bnx2x_has_rx_work) so that
1768 * we won't write the "newer" value of the status block
1769 * to IGU (if there was a DMA right after
1770 * bnx2x_has_rx_work and if there is no rmb, the memory
1771 * reading (bnx2x_update_fpsb_idx) may be postponed
1772 * to right before bnx2x_ack_sb). In this case there
1773 * will never be another interrupt until there is
1774 * another update of the status block, while there
1775 * is still unhandled work.
1779 if (!(bnx2x_has_rx_work(fp
) || bnx2x_has_tx_work(fp
))) {
1780 napi_complete(napi
);
1781 /* Re-enable interrupts */
1783 "Update index to %d\n", fp
->fp_hc_idx
);
1784 bnx2x_ack_sb(bp
, fp
->igu_sb_id
, USTORM_ID
,
1785 le16_to_cpu(fp
->fp_hc_idx
),
1795 /* we split the first BD into headers and data BDs
1796 * to ease the pain of our fellow microcode engineers
1797 * we use one mapping for both BDs
1798 * So far this has only been observed to happen
1799 * in Other Operating Systems(TM)
1801 static noinline u16
bnx2x_tx_split(struct bnx2x
*bp
,
1802 struct bnx2x_fastpath
*fp
,
1803 struct sw_tx_bd
*tx_buf
,
1804 struct eth_tx_start_bd
**tx_bd
, u16 hlen
,
1805 u16 bd_prod
, int nbd
)
1807 struct eth_tx_start_bd
*h_tx_bd
= *tx_bd
;
1808 struct eth_tx_bd
*d_tx_bd
;
1810 int old_len
= le16_to_cpu(h_tx_bd
->nbytes
);
1812 /* first fix first BD */
1813 h_tx_bd
->nbd
= cpu_to_le16(nbd
);
1814 h_tx_bd
->nbytes
= cpu_to_le16(hlen
);
1816 DP(NETIF_MSG_TX_QUEUED
, "TSO split header size is %d "
1817 "(%x:%x) nbd %d\n", h_tx_bd
->nbytes
, h_tx_bd
->addr_hi
,
1818 h_tx_bd
->addr_lo
, h_tx_bd
->nbd
);
1820 /* now get a new data BD
1821 * (after the pbd) and fill it */
1822 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
1823 d_tx_bd
= &fp
->tx_desc_ring
[bd_prod
].reg_bd
;
1825 mapping
= HILO_U64(le32_to_cpu(h_tx_bd
->addr_hi
),
1826 le32_to_cpu(h_tx_bd
->addr_lo
)) + hlen
;
1828 d_tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
1829 d_tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
1830 d_tx_bd
->nbytes
= cpu_to_le16(old_len
- hlen
);
1832 /* this marks the BD as one that has no individual mapping */
1833 tx_buf
->flags
|= BNX2X_TSO_SPLIT_BD
;
1835 DP(NETIF_MSG_TX_QUEUED
,
1836 "TSO split data size is %d (%x:%x)\n",
1837 d_tx_bd
->nbytes
, d_tx_bd
->addr_hi
, d_tx_bd
->addr_lo
);
1840 *tx_bd
= (struct eth_tx_start_bd
*)d_tx_bd
;
1845 static inline u16
bnx2x_csum_fix(unsigned char *t_header
, u16 csum
, s8 fix
)
1848 csum
= (u16
) ~csum_fold(csum_sub(csum
,
1849 csum_partial(t_header
- fix
, fix
, 0)));
1852 csum
= (u16
) ~csum_fold(csum_add(csum
,
1853 csum_partial(t_header
, -fix
, 0)));
1855 return swab16(csum
);
1858 static inline u32
bnx2x_xmit_type(struct bnx2x
*bp
, struct sk_buff
*skb
)
1862 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1866 if (vlan_get_protocol(skb
) == htons(ETH_P_IPV6
)) {
1868 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
1869 rc
|= XMIT_CSUM_TCP
;
1873 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
1874 rc
|= XMIT_CSUM_TCP
;
1878 if (skb_is_gso_v6(skb
))
1879 rc
|= XMIT_GSO_V6
| XMIT_CSUM_TCP
| XMIT_CSUM_V6
;
1880 else if (skb_is_gso(skb
))
1881 rc
|= XMIT_GSO_V4
| XMIT_CSUM_V4
| XMIT_CSUM_TCP
;
1886 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1887 /* check if packet requires linearization (packet is too fragmented)
1888 no need to check fragmentation if page size > 8K (there will be no
1889 violation to FW restrictions) */
1890 static int bnx2x_pkt_req_lin(struct bnx2x
*bp
, struct sk_buff
*skb
,
1895 int first_bd_sz
= 0;
1897 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1898 if (skb_shinfo(skb
)->nr_frags
>= (MAX_FETCH_BD
- 3)) {
1900 if (xmit_type
& XMIT_GSO
) {
1901 unsigned short lso_mss
= skb_shinfo(skb
)->gso_size
;
1902 /* Check if LSO packet needs to be copied:
1903 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
1904 int wnd_size
= MAX_FETCH_BD
- 3;
1905 /* Number of windows to check */
1906 int num_wnds
= skb_shinfo(skb
)->nr_frags
- wnd_size
;
1911 /* Headers length */
1912 hlen
= (int)(skb_transport_header(skb
) - skb
->data
) +
1915 /* Amount of data (w/o headers) on linear part of SKB*/
1916 first_bd_sz
= skb_headlen(skb
) - hlen
;
1918 wnd_sum
= first_bd_sz
;
1920 /* Calculate the first sum - it's special */
1921 for (frag_idx
= 0; frag_idx
< wnd_size
- 1; frag_idx
++)
1923 skb_shinfo(skb
)->frags
[frag_idx
].size
;
1925 /* If there was data on linear skb data - check it */
1926 if (first_bd_sz
> 0) {
1927 if (unlikely(wnd_sum
< lso_mss
)) {
1932 wnd_sum
-= first_bd_sz
;
1935 /* Others are easier: run through the frag list and
1936 check all windows */
1937 for (wnd_idx
= 0; wnd_idx
<= num_wnds
; wnd_idx
++) {
1939 skb_shinfo(skb
)->frags
[wnd_idx
+ wnd_size
- 1].size
;
1941 if (unlikely(wnd_sum
< lso_mss
)) {
1946 skb_shinfo(skb
)->frags
[wnd_idx
].size
;
1949 /* in non-LSO too fragmented packet should always
1956 if (unlikely(to_copy
))
1957 DP(NETIF_MSG_TX_QUEUED
,
1958 "Linearization IS REQUIRED for %s packet. "
1959 "num_frags %d hlen %d first_bd_sz %d\n",
1960 (xmit_type
& XMIT_GSO
) ? "LSO" : "non-LSO",
1961 skb_shinfo(skb
)->nr_frags
, hlen
, first_bd_sz
);
1967 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff
*skb
, u32
*parsing_data
,
1970 *parsing_data
|= (skb_shinfo(skb
)->gso_size
<<
1971 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT
) &
1972 ETH_TX_PARSE_BD_E2_LSO_MSS
;
1973 if ((xmit_type
& XMIT_GSO_V6
) &&
1974 (ipv6_hdr(skb
)->nexthdr
== NEXTHDR_IPV6
))
1975 *parsing_data
|= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR
;
1979 * Update PBD in GSO case.
1982 * @param tx_start_bd
1986 static inline void bnx2x_set_pbd_gso(struct sk_buff
*skb
,
1987 struct eth_tx_parse_bd_e1x
*pbd
,
1990 pbd
->lso_mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
1991 pbd
->tcp_send_seq
= swab32(tcp_hdr(skb
)->seq
);
1992 pbd
->tcp_flags
= pbd_tcp_flags(skb
);
1994 if (xmit_type
& XMIT_GSO_V4
) {
1995 pbd
->ip_id
= swab16(ip_hdr(skb
)->id
);
1996 pbd
->tcp_pseudo_csum
=
1997 swab16(~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
1999 0, IPPROTO_TCP
, 0));
2002 pbd
->tcp_pseudo_csum
=
2003 swab16(~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
2004 &ipv6_hdr(skb
)->daddr
,
2005 0, IPPROTO_TCP
, 0));
2007 pbd
->global_data
|= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN
;
2013 * @param tx_start_bd
2017 * @return header len
2019 static inline u8
bnx2x_set_pbd_csum_e2(struct bnx2x
*bp
, struct sk_buff
*skb
,
2020 u32
*parsing_data
, u32 xmit_type
)
2023 ((((u8
*)skb_transport_header(skb
) - skb
->data
) >> 1) <<
2024 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT
) &
2025 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W
;
2027 if (xmit_type
& XMIT_CSUM_TCP
) {
2028 *parsing_data
|= ((tcp_hdrlen(skb
) / 4) <<
2029 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT
) &
2030 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW
;
2032 return skb_transport_header(skb
) + tcp_hdrlen(skb
) - skb
->data
;
2034 /* We support checksum offload for TCP and UDP only.
2035 * No need to pass the UDP header length - it's a constant.
2037 return skb_transport_header(skb
) +
2038 sizeof(struct udphdr
) - skb
->data
;
2044 * @param tx_start_bd
2048 * @return Header length
2050 static inline u8
bnx2x_set_pbd_csum(struct bnx2x
*bp
, struct sk_buff
*skb
,
2051 struct eth_tx_parse_bd_e1x
*pbd
,
2054 u8 hlen
= (skb_network_header(skb
) - skb
->data
) >> 1;
2056 /* for now NS flag is not used in Linux */
2058 (hlen
| ((skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) <<
2059 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT
));
2061 pbd
->ip_hlen_w
= (skb_transport_header(skb
) -
2062 skb_network_header(skb
)) >> 1;
2064 hlen
+= pbd
->ip_hlen_w
;
2066 /* We support checksum offload for TCP and UDP only */
2067 if (xmit_type
& XMIT_CSUM_TCP
)
2068 hlen
+= tcp_hdrlen(skb
) / 2;
2070 hlen
+= sizeof(struct udphdr
) / 2;
2072 pbd
->total_hlen_w
= cpu_to_le16(hlen
);
2075 if (xmit_type
& XMIT_CSUM_TCP
) {
2076 pbd
->tcp_pseudo_csum
= swab16(tcp_hdr(skb
)->check
);
2079 s8 fix
= SKB_CS_OFF(skb
); /* signed! */
2081 DP(NETIF_MSG_TX_QUEUED
,
2082 "hlen %d fix %d csum before fix %x\n",
2083 le16_to_cpu(pbd
->total_hlen_w
), fix
, SKB_CS(skb
));
2085 /* HW bug: fixup the CSUM */
2086 pbd
->tcp_pseudo_csum
=
2087 bnx2x_csum_fix(skb_transport_header(skb
),
2090 DP(NETIF_MSG_TX_QUEUED
, "csum after fix %x\n",
2091 pbd
->tcp_pseudo_csum
);
2097 /* called with netif_tx_lock
2098 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2099 * netif_wake_queue()
2101 netdev_tx_t
bnx2x_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
2103 struct bnx2x
*bp
= netdev_priv(dev
);
2104 struct bnx2x_fastpath
*fp
;
2105 struct netdev_queue
*txq
;
2106 struct sw_tx_bd
*tx_buf
;
2107 struct eth_tx_start_bd
*tx_start_bd
;
2108 struct eth_tx_bd
*tx_data_bd
, *total_pkt_bd
= NULL
;
2109 struct eth_tx_parse_bd_e1x
*pbd_e1x
= NULL
;
2110 struct eth_tx_parse_bd_e2
*pbd_e2
= NULL
;
2111 u32 pbd_e2_parsing_data
= 0;
2112 u16 pkt_prod
, bd_prod
;
2115 u32 xmit_type
= bnx2x_xmit_type(bp
, skb
);
2118 __le16 pkt_size
= 0;
2120 u8 mac_type
= UNICAST_ADDRESS
;
2122 #ifdef BNX2X_STOP_ON_ERROR
2123 if (unlikely(bp
->panic
))
2124 return NETDEV_TX_BUSY
;
2127 fp_index
= skb_get_queue_mapping(skb
);
2128 txq
= netdev_get_tx_queue(dev
, fp_index
);
2130 fp
= &bp
->fp
[fp_index
];
2132 if (unlikely(bnx2x_tx_avail(fp
) < (skb_shinfo(skb
)->nr_frags
+ 3))) {
2133 fp
->eth_q_stats
.driver_xoff
++;
2134 netif_tx_stop_queue(txq
);
2135 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2136 return NETDEV_TX_BUSY
;
2139 DP(NETIF_MSG_TX_QUEUED
, "queue[%d]: SKB: summed %x protocol %x "
2140 "protocol(%x,%x) gso type %x xmit_type %x\n",
2141 fp_index
, skb
->ip_summed
, skb
->protocol
, ipv6_hdr(skb
)->nexthdr
,
2142 ip_hdr(skb
)->protocol
, skb_shinfo(skb
)->gso_type
, xmit_type
);
2144 eth
= (struct ethhdr
*)skb
->data
;
2146 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2147 if (unlikely(is_multicast_ether_addr(eth
->h_dest
))) {
2148 if (is_broadcast_ether_addr(eth
->h_dest
))
2149 mac_type
= BROADCAST_ADDRESS
;
2151 mac_type
= MULTICAST_ADDRESS
;
2154 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2155 /* First, check if we need to linearize the skb (due to FW
2156 restrictions). No need to check fragmentation if page size > 8K
2157 (there will be no violation to FW restrictions) */
2158 if (bnx2x_pkt_req_lin(bp
, skb
, xmit_type
)) {
2159 /* Statistics of linearization */
2161 if (skb_linearize(skb
) != 0) {
2162 DP(NETIF_MSG_TX_QUEUED
, "SKB linearization failed - "
2163 "silently dropping this SKB\n");
2164 dev_kfree_skb_any(skb
);
2165 return NETDEV_TX_OK
;
2171 Please read carefully. First we use one BD which we mark as start,
2172 then we have a parsing info BD (used for TSO or xsum),
2173 and only then we have the rest of the TSO BDs.
2174 (don't forget to mark the last one as last,
2175 and to unmap only AFTER you write to the BD ...)
2176 And above all, all pdb sizes are in words - NOT DWORDS!
2179 pkt_prod
= fp
->tx_pkt_prod
++;
2180 bd_prod
= TX_BD(fp
->tx_bd_prod
);
2182 /* get a tx_buf and first BD */
2183 tx_buf
= &fp
->tx_buf_ring
[TX_BD(pkt_prod
)];
2184 tx_start_bd
= &fp
->tx_desc_ring
[bd_prod
].start_bd
;
2186 tx_start_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
2187 SET_FLAG(tx_start_bd
->general_data
, ETH_TX_START_BD_ETH_ADDR_TYPE
,
2191 SET_FLAG(tx_start_bd
->general_data
, ETH_TX_START_BD_HDR_NBDS
, 1);
2193 /* remember the first BD of the packet */
2194 tx_buf
->first_bd
= fp
->tx_bd_prod
;
2198 DP(NETIF_MSG_TX_QUEUED
,
2199 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2200 pkt_prod
, tx_buf
, fp
->tx_pkt_prod
, bd_prod
, tx_start_bd
);
2202 if (vlan_tx_tag_present(skb
)) {
2203 tx_start_bd
->vlan_or_ethertype
=
2204 cpu_to_le16(vlan_tx_tag_get(skb
));
2205 tx_start_bd
->bd_flags
.as_bitfield
|=
2206 (X_ETH_OUTBAND_VLAN
<< ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT
);
2208 tx_start_bd
->vlan_or_ethertype
= cpu_to_le16(pkt_prod
);
2210 /* turn on parsing and get a BD */
2211 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
2213 if (xmit_type
& XMIT_CSUM
) {
2214 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_L4_CSUM
;
2216 if (xmit_type
& XMIT_CSUM_V4
)
2217 tx_start_bd
->bd_flags
.as_bitfield
|=
2218 ETH_TX_BD_FLAGS_IP_CSUM
;
2220 tx_start_bd
->bd_flags
.as_bitfield
|=
2221 ETH_TX_BD_FLAGS_IPV6
;
2223 if (!(xmit_type
& XMIT_CSUM_TCP
))
2224 tx_start_bd
->bd_flags
.as_bitfield
|=
2225 ETH_TX_BD_FLAGS_IS_UDP
;
2228 if (CHIP_IS_E2(bp
)) {
2229 pbd_e2
= &fp
->tx_desc_ring
[bd_prod
].parse_bd_e2
;
2230 memset(pbd_e2
, 0, sizeof(struct eth_tx_parse_bd_e2
));
2231 /* Set PBD in checksum offload case */
2232 if (xmit_type
& XMIT_CSUM
)
2233 hlen
= bnx2x_set_pbd_csum_e2(bp
, skb
,
2234 &pbd_e2_parsing_data
,
2237 pbd_e1x
= &fp
->tx_desc_ring
[bd_prod
].parse_bd_e1x
;
2238 memset(pbd_e1x
, 0, sizeof(struct eth_tx_parse_bd_e1x
));
2239 /* Set PBD in checksum offload case */
2240 if (xmit_type
& XMIT_CSUM
)
2241 hlen
= bnx2x_set_pbd_csum(bp
, skb
, pbd_e1x
, xmit_type
);
2245 /* Map skb linear data for DMA */
2246 mapping
= dma_map_single(&bp
->pdev
->dev
, skb
->data
,
2247 skb_headlen(skb
), DMA_TO_DEVICE
);
2249 /* Setup the data pointer of the first BD of the packet */
2250 tx_start_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
2251 tx_start_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
2252 nbd
= skb_shinfo(skb
)->nr_frags
+ 2; /* start_bd + pbd + frags */
2253 tx_start_bd
->nbd
= cpu_to_le16(nbd
);
2254 tx_start_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
2255 pkt_size
= tx_start_bd
->nbytes
;
2257 DP(NETIF_MSG_TX_QUEUED
, "first bd @%p addr (%x:%x) nbd %d"
2258 " nbytes %d flags %x vlan %x\n",
2259 tx_start_bd
, tx_start_bd
->addr_hi
, tx_start_bd
->addr_lo
,
2260 le16_to_cpu(tx_start_bd
->nbd
), le16_to_cpu(tx_start_bd
->nbytes
),
2261 tx_start_bd
->bd_flags
.as_bitfield
,
2262 le16_to_cpu(tx_start_bd
->vlan_or_ethertype
));
2264 if (xmit_type
& XMIT_GSO
) {
2266 DP(NETIF_MSG_TX_QUEUED
,
2267 "TSO packet len %d hlen %d total len %d tso size %d\n",
2268 skb
->len
, hlen
, skb_headlen(skb
),
2269 skb_shinfo(skb
)->gso_size
);
2271 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_SW_LSO
;
2273 if (unlikely(skb_headlen(skb
) > hlen
))
2274 bd_prod
= bnx2x_tx_split(bp
, fp
, tx_buf
, &tx_start_bd
,
2275 hlen
, bd_prod
, ++nbd
);
2277 bnx2x_set_pbd_gso_e2(skb
, &pbd_e2_parsing_data
,
2280 bnx2x_set_pbd_gso(skb
, pbd_e1x
, xmit_type
);
2283 /* Set the PBD's parsing_data field if not zero
2284 * (for the chips newer than 57711).
2286 if (pbd_e2_parsing_data
)
2287 pbd_e2
->parsing_data
= cpu_to_le32(pbd_e2_parsing_data
);
2289 tx_data_bd
= (struct eth_tx_bd
*)tx_start_bd
;
2291 /* Handle fragmented skb */
2292 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2293 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2295 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
2296 tx_data_bd
= &fp
->tx_desc_ring
[bd_prod
].reg_bd
;
2297 if (total_pkt_bd
== NULL
)
2298 total_pkt_bd
= &fp
->tx_desc_ring
[bd_prod
].reg_bd
;
2300 mapping
= dma_map_page(&bp
->pdev
->dev
, frag
->page
,
2302 frag
->size
, DMA_TO_DEVICE
);
2304 tx_data_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
2305 tx_data_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
2306 tx_data_bd
->nbytes
= cpu_to_le16(frag
->size
);
2307 le16_add_cpu(&pkt_size
, frag
->size
);
2309 DP(NETIF_MSG_TX_QUEUED
,
2310 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2311 i
, tx_data_bd
, tx_data_bd
->addr_hi
, tx_data_bd
->addr_lo
,
2312 le16_to_cpu(tx_data_bd
->nbytes
));
2315 DP(NETIF_MSG_TX_QUEUED
, "last bd @%p\n", tx_data_bd
);
2317 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
2319 /* now send a tx doorbell, counting the next BD
2320 * if the packet contains or ends with it
2322 if (TX_BD_POFF(bd_prod
) < nbd
)
2325 if (total_pkt_bd
!= NULL
)
2326 total_pkt_bd
->total_pkt_bytes
= pkt_size
;
2329 DP(NETIF_MSG_TX_QUEUED
,
2330 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2331 " tcp_flags %x xsum %x seq %u hlen %u\n",
2332 pbd_e1x
, pbd_e1x
->global_data
, pbd_e1x
->ip_hlen_w
,
2333 pbd_e1x
->ip_id
, pbd_e1x
->lso_mss
, pbd_e1x
->tcp_flags
,
2334 pbd_e1x
->tcp_pseudo_csum
, pbd_e1x
->tcp_send_seq
,
2335 le16_to_cpu(pbd_e1x
->total_hlen_w
));
2337 DP(NETIF_MSG_TX_QUEUED
,
2338 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2339 pbd_e2
, pbd_e2
->dst_mac_addr_hi
, pbd_e2
->dst_mac_addr_mid
,
2340 pbd_e2
->dst_mac_addr_lo
, pbd_e2
->src_mac_addr_hi
,
2341 pbd_e2
->src_mac_addr_mid
, pbd_e2
->src_mac_addr_lo
,
2342 pbd_e2
->parsing_data
);
2343 DP(NETIF_MSG_TX_QUEUED
, "doorbell: nbd %d bd %u\n", nbd
, bd_prod
);
2346 * Make sure that the BD data is updated before updating the producer
2347 * since FW might read the BD right after the producer is updated.
2348 * This is only applicable for weak-ordered memory model archs such
2349 * as IA-64. The following barrier is also mandatory since FW will
2350 * assumes packets must have BDs.
2354 fp
->tx_db
.data
.prod
+= nbd
;
2357 DOORBELL(bp
, fp
->cid
, fp
->tx_db
.raw
);
2361 fp
->tx_bd_prod
+= nbd
;
2363 if (unlikely(bnx2x_tx_avail(fp
) < MAX_SKB_FRAGS
+ 3)) {
2364 netif_tx_stop_queue(txq
);
2366 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2367 * ordering of set_bit() in netif_tx_stop_queue() and read of
2371 fp
->eth_q_stats
.driver_xoff
++;
2372 if (bnx2x_tx_avail(fp
) >= MAX_SKB_FRAGS
+ 3)
2373 netif_tx_wake_queue(txq
);
2377 return NETDEV_TX_OK
;
2380 /* called with rtnl_lock */
2381 int bnx2x_change_mac_addr(struct net_device
*dev
, void *p
)
2383 struct sockaddr
*addr
= p
;
2384 struct bnx2x
*bp
= netdev_priv(dev
);
2386 if (!is_valid_ether_addr((u8
*)(addr
->sa_data
)))
2389 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
2390 if (netif_running(dev
))
2391 bnx2x_set_eth_mac(bp
, 1);
2397 static int bnx2x_setup_irqs(struct bnx2x
*bp
)
2400 if (bp
->flags
& USING_MSIX_FLAG
) {
2401 rc
= bnx2x_req_msix_irqs(bp
);
2406 rc
= bnx2x_req_irq(bp
);
2408 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc
);
2411 if (bp
->flags
& USING_MSI_FLAG
) {
2412 bp
->dev
->irq
= bp
->pdev
->irq
;
2413 netdev_info(bp
->dev
, "using MSI IRQ %d\n",
2421 void bnx2x_free_mem_bp(struct bnx2x
*bp
)
2424 kfree(bp
->msix_table
);
2428 int __devinit
bnx2x_alloc_mem_bp(struct bnx2x
*bp
)
2430 struct bnx2x_fastpath
*fp
;
2431 struct msix_entry
*tbl
;
2432 struct bnx2x_ilt
*ilt
;
2435 fp
= kzalloc(L2_FP_COUNT(bp
->l2_cid_count
)*sizeof(*fp
), GFP_KERNEL
);
2441 tbl
= kzalloc((FP_SB_COUNT(bp
->l2_cid_count
) + 1) * sizeof(*tbl
),
2445 bp
->msix_table
= tbl
;
2448 ilt
= kzalloc(sizeof(*ilt
), GFP_KERNEL
);
2455 bnx2x_free_mem_bp(bp
);
2460 /* called with rtnl_lock */
2461 int bnx2x_change_mtu(struct net_device
*dev
, int new_mtu
)
2463 struct bnx2x
*bp
= netdev_priv(dev
);
2466 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
2467 printk(KERN_ERR
"Handling parity error recovery. Try again later\n");
2471 if ((new_mtu
> ETH_MAX_JUMBO_PACKET_SIZE
) ||
2472 ((new_mtu
+ ETH_HLEN
) < ETH_MIN_PACKET_SIZE
))
2475 /* This does not race with packet allocation
2476 * because the actual alloc size is
2477 * only updated as part of load
2481 if (netif_running(dev
)) {
2482 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
2483 rc
= bnx2x_nic_load(bp
, LOAD_NORMAL
);
2489 void bnx2x_tx_timeout(struct net_device
*dev
)
2491 struct bnx2x
*bp
= netdev_priv(dev
);
2493 #ifdef BNX2X_STOP_ON_ERROR
2497 /* This allows the netif to be shutdown gracefully before resetting */
2498 schedule_delayed_work(&bp
->reset_task
, 0);
2501 int bnx2x_suspend(struct pci_dev
*pdev
, pm_message_t state
)
2503 struct net_device
*dev
= pci_get_drvdata(pdev
);
2507 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
2510 bp
= netdev_priv(dev
);
2514 pci_save_state(pdev
);
2516 if (!netif_running(dev
)) {
2521 netif_device_detach(dev
);
2523 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
2525 bnx2x_set_power_state(bp
, pci_choose_state(pdev
, state
));
2532 int bnx2x_resume(struct pci_dev
*pdev
)
2534 struct net_device
*dev
= pci_get_drvdata(pdev
);
2539 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
2542 bp
= netdev_priv(dev
);
2544 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
2545 printk(KERN_ERR
"Handling parity error recovery. Try again later\n");
2551 pci_restore_state(pdev
);
2553 if (!netif_running(dev
)) {
2558 bnx2x_set_power_state(bp
, PCI_D0
);
2559 netif_device_attach(dev
);
2561 /* Since the chip was reset, clear the FW sequence number */
2563 rc
= bnx2x_nic_load(bp
, LOAD_OPEN
);