1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2011 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/etherdevice.h>
19 #include <linux/if_vlan.h>
22 #include <net/ip6_checksum.h>
23 #include <linux/firmware.h>
24 #include <linux/prefetch.h>
25 #include "bnx2x_cmn.h"
27 #include "bnx2x_init.h"
29 static int bnx2x_setup_irqs(struct bnx2x
*bp
);
32 * bnx2x_bz_fp - zero content of the fastpath structure.
35 * @index: fastpath index to be zeroed
37 * Makes sure the contents of the bp->fp[index].napi is kept
40 static inline void bnx2x_bz_fp(struct bnx2x
*bp
, int index
)
42 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
43 struct napi_struct orig_napi
= fp
->napi
;
44 /* bzero bnx2x_fastpath contents */
45 memset(fp
, 0, sizeof(*fp
));
47 /* Restore the NAPI object as it has been already initialized */
52 * bnx2x_move_fp - move content of the fastpath structure.
55 * @from: source FP index
56 * @to: destination FP index
58 * Makes sure the contents of the bp->fp[to].napi is kept
61 static inline void bnx2x_move_fp(struct bnx2x
*bp
, int from
, int to
)
63 struct bnx2x_fastpath
*from_fp
= &bp
->fp
[from
];
64 struct bnx2x_fastpath
*to_fp
= &bp
->fp
[to
];
65 struct napi_struct orig_napi
= to_fp
->napi
;
66 /* Move bnx2x_fastpath contents */
67 memcpy(to_fp
, from_fp
, sizeof(*to_fp
));
70 /* Restore the NAPI object as it has been already initialized */
71 to_fp
->napi
= orig_napi
;
74 /* free skb in the packet ring at pos idx
75 * return idx of last bd freed
77 static u16
bnx2x_free_tx_pkt(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
80 struct sw_tx_bd
*tx_buf
= &fp
->tx_buf_ring
[idx
];
81 struct eth_tx_start_bd
*tx_start_bd
;
82 struct eth_tx_bd
*tx_data_bd
;
83 struct sk_buff
*skb
= tx_buf
->skb
;
84 u16 bd_idx
= TX_BD(tx_buf
->first_bd
), new_cons
;
87 /* prefetch skb end pointer to speedup dev_kfree_skb() */
90 DP(BNX2X_MSG_OFF
, "pkt_idx %d buff @(%p)->skb %p\n",
94 DP(BNX2X_MSG_OFF
, "free bd_idx %d\n", bd_idx
);
95 tx_start_bd
= &fp
->tx_desc_ring
[bd_idx
].start_bd
;
96 dma_unmap_single(&bp
->pdev
->dev
, BD_UNMAP_ADDR(tx_start_bd
),
97 BD_UNMAP_LEN(tx_start_bd
), DMA_TO_DEVICE
);
99 nbd
= le16_to_cpu(tx_start_bd
->nbd
) - 1;
100 #ifdef BNX2X_STOP_ON_ERROR
101 if ((nbd
- 1) > (MAX_SKB_FRAGS
+ 2)) {
102 BNX2X_ERR("BAD nbd!\n");
106 new_cons
= nbd
+ tx_buf
->first_bd
;
108 /* Get the next bd */
109 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
111 /* Skip a parse bd... */
113 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
115 /* ...and the TSO split header bd since they have no mapping */
116 if (tx_buf
->flags
& BNX2X_TSO_SPLIT_BD
) {
118 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
124 DP(BNX2X_MSG_OFF
, "free frag bd_idx %d\n", bd_idx
);
125 tx_data_bd
= &fp
->tx_desc_ring
[bd_idx
].reg_bd
;
126 dma_unmap_page(&bp
->pdev
->dev
, BD_UNMAP_ADDR(tx_data_bd
),
127 BD_UNMAP_LEN(tx_data_bd
), DMA_TO_DEVICE
);
129 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
134 dev_kfree_skb_any(skb
);
135 tx_buf
->first_bd
= 0;
141 int bnx2x_tx_int(struct bnx2x_fastpath
*fp
)
143 struct bnx2x
*bp
= fp
->bp
;
144 struct netdev_queue
*txq
;
145 u16 hw_cons
, sw_cons
, bd_cons
= fp
->tx_bd_cons
;
147 #ifdef BNX2X_STOP_ON_ERROR
148 if (unlikely(bp
->panic
))
152 txq
= netdev_get_tx_queue(bp
->dev
, fp
->index
);
153 hw_cons
= le16_to_cpu(*fp
->tx_cons_sb
);
154 sw_cons
= fp
->tx_pkt_cons
;
156 while (sw_cons
!= hw_cons
) {
159 pkt_cons
= TX_BD(sw_cons
);
161 DP(NETIF_MSG_TX_DONE
, "queue[%d]: hw_cons %u sw_cons %u "
163 fp
->index
, hw_cons
, sw_cons
, pkt_cons
);
165 bd_cons
= bnx2x_free_tx_pkt(bp
, fp
, pkt_cons
);
169 fp
->tx_pkt_cons
= sw_cons
;
170 fp
->tx_bd_cons
= bd_cons
;
172 /* Need to make the tx_bd_cons update visible to start_xmit()
173 * before checking for netif_tx_queue_stopped(). Without the
174 * memory barrier, there is a small possibility that
175 * start_xmit() will miss it and cause the queue to be stopped
180 if (unlikely(netif_tx_queue_stopped(txq
))) {
181 /* Taking tx_lock() is needed to prevent reenabling the queue
182 * while it's empty. This could have happen if rx_action() gets
183 * suspended in bnx2x_tx_int() after the condition before
184 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
186 * stops the queue->sees fresh tx_bd_cons->releases the queue->
187 * sends some packets consuming the whole queue again->
191 __netif_tx_lock(txq
, smp_processor_id());
193 if ((netif_tx_queue_stopped(txq
)) &&
194 (bp
->state
== BNX2X_STATE_OPEN
) &&
195 (bnx2x_tx_avail(fp
) >= MAX_SKB_FRAGS
+ 3))
196 netif_tx_wake_queue(txq
);
198 __netif_tx_unlock(txq
);
203 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath
*fp
,
206 u16 last_max
= fp
->last_max_sge
;
208 if (SUB_S16(idx
, last_max
) > 0)
209 fp
->last_max_sge
= idx
;
212 static void bnx2x_update_sge_prod(struct bnx2x_fastpath
*fp
,
213 struct eth_fast_path_rx_cqe
*fp_cqe
)
215 struct bnx2x
*bp
= fp
->bp
;
216 u16 sge_len
= SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe
->pkt_len
) -
217 le16_to_cpu(fp_cqe
->len_on_bd
)) >>
219 u16 last_max
, last_elem
, first_elem
;
226 /* First mark all used pages */
227 for (i
= 0; i
< sge_len
; i
++)
228 SGE_MASK_CLEAR_BIT(fp
,
229 RX_SGE(le16_to_cpu(fp_cqe
->sgl_or_raw_data
.sgl
[i
])));
231 DP(NETIF_MSG_RX_STATUS
, "fp_cqe->sgl[%d] = %d\n",
232 sge_len
- 1, le16_to_cpu(fp_cqe
->sgl_or_raw_data
.sgl
[sge_len
- 1]));
234 /* Here we assume that the last SGE index is the biggest */
235 prefetch((void *)(fp
->sge_mask
));
236 bnx2x_update_last_max_sge(fp
,
237 le16_to_cpu(fp_cqe
->sgl_or_raw_data
.sgl
[sge_len
- 1]));
239 last_max
= RX_SGE(fp
->last_max_sge
);
240 last_elem
= last_max
>> RX_SGE_MASK_ELEM_SHIFT
;
241 first_elem
= RX_SGE(fp
->rx_sge_prod
) >> RX_SGE_MASK_ELEM_SHIFT
;
243 /* If ring is not full */
244 if (last_elem
+ 1 != first_elem
)
247 /* Now update the prod */
248 for (i
= first_elem
; i
!= last_elem
; i
= NEXT_SGE_MASK_ELEM(i
)) {
249 if (likely(fp
->sge_mask
[i
]))
252 fp
->sge_mask
[i
] = RX_SGE_MASK_ELEM_ONE_MASK
;
253 delta
+= RX_SGE_MASK_ELEM_SZ
;
257 fp
->rx_sge_prod
+= delta
;
258 /* clear page-end entries */
259 bnx2x_clear_sge_mask_next_elems(fp
);
262 DP(NETIF_MSG_RX_STATUS
,
263 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
264 fp
->last_max_sge
, fp
->rx_sge_prod
);
267 static void bnx2x_tpa_start(struct bnx2x_fastpath
*fp
, u16 queue
,
268 struct sk_buff
*skb
, u16 cons
, u16 prod
)
270 struct bnx2x
*bp
= fp
->bp
;
271 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
272 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
273 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
276 /* move empty skb from pool to prod and map it */
277 prod_rx_buf
->skb
= fp
->tpa_pool
[queue
].skb
;
278 mapping
= dma_map_single(&bp
->pdev
->dev
, fp
->tpa_pool
[queue
].skb
->data
,
279 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
280 dma_unmap_addr_set(prod_rx_buf
, mapping
, mapping
);
282 /* move partial skb from cons to pool (don't unmap yet) */
283 fp
->tpa_pool
[queue
] = *cons_rx_buf
;
285 /* mark bin state as start - print error if current state != stop */
286 if (fp
->tpa_state
[queue
] != BNX2X_TPA_STOP
)
287 BNX2X_ERR("start of bin not in stop [%d]\n", queue
);
289 fp
->tpa_state
[queue
] = BNX2X_TPA_START
;
291 /* point prod_bd to new skb */
292 prod_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
293 prod_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
295 #ifdef BNX2X_STOP_ON_ERROR
296 fp
->tpa_queue_used
|= (1 << queue
);
297 #ifdef _ASM_GENERIC_INT_L64_H
298 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%lx\n",
300 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%llx\n",
306 /* Timestamp option length allowed for TPA aggregation:
308 * nop nop kind length echo val
310 #define TPA_TSTAMP_OPT_LEN 12
312 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
315 * @parsing_flags: parsing flags from the START CQE
316 * @len_on_bd: total length of the first packet for the
319 * Approximate value of the MSS for this aggregation calculated using
320 * the first packet of it.
322 static inline u16
bnx2x_set_lro_mss(struct bnx2x
*bp
, u16 parsing_flags
,
325 /* TPA arrgregation won't have an IP options and TCP options
326 * other than timestamp.
328 u16 hdrs_len
= ETH_HLEN
+ sizeof(struct iphdr
) + sizeof(struct tcphdr
);
331 /* Check if there was a TCP timestamp, if there is it's will
332 * always be 12 bytes length: nop nop kind length echo val.
334 * Otherwise FW would close the aggregation.
336 if (parsing_flags
& PARSING_FLAGS_TIME_STAMP_EXIST_FLAG
)
337 hdrs_len
+= TPA_TSTAMP_OPT_LEN
;
339 return len_on_bd
- hdrs_len
;
342 static int bnx2x_fill_frag_skb(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
344 struct eth_fast_path_rx_cqe
*fp_cqe
,
345 u16 cqe_idx
, u16 parsing_flags
)
347 struct sw_rx_page
*rx_pg
, old_rx_pg
;
348 u16 len_on_bd
= le16_to_cpu(fp_cqe
->len_on_bd
);
349 u32 i
, frag_len
, frag_size
, pages
;
353 frag_size
= le16_to_cpu(fp_cqe
->pkt_len
) - len_on_bd
;
354 pages
= SGE_PAGE_ALIGN(frag_size
) >> SGE_PAGE_SHIFT
;
356 /* This is needed in order to enable forwarding support */
358 skb_shinfo(skb
)->gso_size
= bnx2x_set_lro_mss(bp
, parsing_flags
,
361 #ifdef BNX2X_STOP_ON_ERROR
362 if (pages
> min_t(u32
, 8, MAX_SKB_FRAGS
)*SGE_PAGE_SIZE
*PAGES_PER_SGE
) {
363 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
365 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
366 fp_cqe
->pkt_len
, len_on_bd
);
372 /* Run through the SGL and compose the fragmented skb */
373 for (i
= 0, j
= 0; i
< pages
; i
+= PAGES_PER_SGE
, j
++) {
375 RX_SGE(le16_to_cpu(fp_cqe
->sgl_or_raw_data
.sgl
[j
]));
377 /* FW gives the indices of the SGE as if the ring is an array
378 (meaning that "next" element will consume 2 indices) */
379 frag_len
= min(frag_size
, (u32
)(SGE_PAGE_SIZE
*PAGES_PER_SGE
));
380 rx_pg
= &fp
->rx_page_ring
[sge_idx
];
383 /* If we fail to allocate a substitute page, we simply stop
384 where we are and drop the whole packet */
385 err
= bnx2x_alloc_rx_sge(bp
, fp
, sge_idx
);
387 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
391 /* Unmap the page as we r going to pass it to the stack */
392 dma_unmap_page(&bp
->pdev
->dev
,
393 dma_unmap_addr(&old_rx_pg
, mapping
),
394 SGE_PAGE_SIZE
*PAGES_PER_SGE
, DMA_FROM_DEVICE
);
396 /* Add one frag and update the appropriate fields in the skb */
397 skb_fill_page_desc(skb
, j
, old_rx_pg
.page
, 0, frag_len
);
399 skb
->data_len
+= frag_len
;
400 skb
->truesize
+= frag_len
;
401 skb
->len
+= frag_len
;
403 frag_size
-= frag_len
;
409 static void bnx2x_tpa_stop(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
410 u16 queue
, int pad
, int len
, union eth_rx_cqe
*cqe
,
413 struct sw_rx_bd
*rx_buf
= &fp
->tpa_pool
[queue
];
414 struct sk_buff
*skb
= rx_buf
->skb
;
416 struct sk_buff
*new_skb
= netdev_alloc_skb(bp
->dev
, fp
->rx_buf_size
);
418 /* Unmap skb in the pool anyway, as we are going to change
419 pool entry status to BNX2X_TPA_STOP even if new skb allocation
421 dma_unmap_single(&bp
->pdev
->dev
, dma_unmap_addr(rx_buf
, mapping
),
422 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
424 if (likely(new_skb
)) {
425 /* fix ip xsum and give it to the stack */
426 /* (no need to map the new skb) */
428 le16_to_cpu(cqe
->fast_path_cqe
.pars_flags
.flags
);
431 prefetch(((char *)(skb
)) + L1_CACHE_BYTES
);
433 #ifdef BNX2X_STOP_ON_ERROR
434 if (pad
+ len
> fp
->rx_buf_size
) {
435 BNX2X_ERR("skb_put is about to fail... "
436 "pad %d len %d rx_buf_size %d\n",
437 pad
, len
, fp
->rx_buf_size
);
443 skb_reserve(skb
, pad
);
446 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
447 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
452 iph
= (struct iphdr
*)skb
->data
;
454 iph
->check
= ip_fast_csum((u8
*)iph
, iph
->ihl
);
457 if (!bnx2x_fill_frag_skb(bp
, fp
, skb
,
458 &cqe
->fast_path_cqe
, cqe_idx
,
460 if (parsing_flags
& PARSING_FLAGS_VLAN
)
461 __vlan_hwaccel_put_tag(skb
,
462 le16_to_cpu(cqe
->fast_path_cqe
.
464 napi_gro_receive(&fp
->napi
, skb
);
466 DP(NETIF_MSG_RX_STATUS
, "Failed to allocate new pages"
467 " - dropping packet!\n");
468 dev_kfree_skb_any(skb
);
472 /* put new skb in bin */
473 fp
->tpa_pool
[queue
].skb
= new_skb
;
476 /* else drop the packet and keep the buffer in the bin */
477 DP(NETIF_MSG_RX_STATUS
,
478 "Failed to allocate new skb - dropping packet!\n");
479 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
482 fp
->tpa_state
[queue
] = BNX2X_TPA_STOP
;
485 /* Set Toeplitz hash value in the skb using the value from the
486 * CQE (calculated by HW).
488 static inline void bnx2x_set_skb_rxhash(struct bnx2x
*bp
, union eth_rx_cqe
*cqe
,
491 /* Set Toeplitz hash from CQE */
492 if ((bp
->dev
->features
& NETIF_F_RXHASH
) &&
493 (cqe
->fast_path_cqe
.status_flags
&
494 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG
))
496 le32_to_cpu(cqe
->fast_path_cqe
.rss_hash_result
);
499 int bnx2x_rx_int(struct bnx2x_fastpath
*fp
, int budget
)
501 struct bnx2x
*bp
= fp
->bp
;
502 u16 bd_cons
, bd_prod
, bd_prod_fw
, comp_ring_cons
;
503 u16 hw_comp_cons
, sw_comp_cons
, sw_comp_prod
;
506 #ifdef BNX2X_STOP_ON_ERROR
507 if (unlikely(bp
->panic
))
511 /* CQ "next element" is of the size of the regular element,
512 that's why it's ok here */
513 hw_comp_cons
= le16_to_cpu(*fp
->rx_cons_sb
);
514 if ((hw_comp_cons
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
517 bd_cons
= fp
->rx_bd_cons
;
518 bd_prod
= fp
->rx_bd_prod
;
519 bd_prod_fw
= bd_prod
;
520 sw_comp_cons
= fp
->rx_comp_cons
;
521 sw_comp_prod
= fp
->rx_comp_prod
;
523 /* Memory barrier necessary as speculative reads of the rx
524 * buffer can be ahead of the index in the status block
528 DP(NETIF_MSG_RX_STATUS
,
529 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
530 fp
->index
, hw_comp_cons
, sw_comp_cons
);
532 while (sw_comp_cons
!= hw_comp_cons
) {
533 struct sw_rx_bd
*rx_buf
= NULL
;
535 union eth_rx_cqe
*cqe
;
539 comp_ring_cons
= RCQ_BD(sw_comp_cons
);
540 bd_prod
= RX_BD(bd_prod
);
541 bd_cons
= RX_BD(bd_cons
);
543 /* Prefetch the page containing the BD descriptor
544 at producer's index. It will be needed when new skb is
546 prefetch((void *)(PAGE_ALIGN((unsigned long)
547 (&fp
->rx_desc_ring
[bd_prod
])) -
550 cqe
= &fp
->rx_comp_ring
[comp_ring_cons
];
551 cqe_fp_flags
= cqe
->fast_path_cqe
.type_error_flags
;
553 DP(NETIF_MSG_RX_STATUS
, "CQE type %x err %x status %x"
554 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags
),
555 cqe_fp_flags
, cqe
->fast_path_cqe
.status_flags
,
556 le32_to_cpu(cqe
->fast_path_cqe
.rss_hash_result
),
557 le16_to_cpu(cqe
->fast_path_cqe
.vlan_tag
),
558 le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
));
560 /* is this a slowpath msg? */
561 if (unlikely(CQE_TYPE(cqe_fp_flags
))) {
562 bnx2x_sp_event(fp
, cqe
);
565 /* this is an rx packet */
567 rx_buf
= &fp
->rx_buf_ring
[bd_cons
];
570 len
= le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
);
571 pad
= cqe
->fast_path_cqe
.placement_offset
;
573 /* - If CQE is marked both TPA_START and TPA_END it is
575 * - FP CQE will always have either TPA_START or/and
576 * TPA_STOP flags set.
578 if ((!fp
->disable_tpa
) &&
579 (TPA_TYPE(cqe_fp_flags
) !=
580 (TPA_TYPE_START
| TPA_TYPE_END
))) {
581 u16 queue
= cqe
->fast_path_cqe
.queue_index
;
583 if (TPA_TYPE(cqe_fp_flags
) == TPA_TYPE_START
) {
584 DP(NETIF_MSG_RX_STATUS
,
585 "calling tpa_start on queue %d\n",
588 bnx2x_tpa_start(fp
, queue
, skb
,
591 /* Set Toeplitz hash for an LRO skb */
592 bnx2x_set_skb_rxhash(bp
, cqe
, skb
);
595 } else { /* TPA_STOP */
596 DP(NETIF_MSG_RX_STATUS
,
597 "calling tpa_stop on queue %d\n",
600 if (!BNX2X_RX_SUM_FIX(cqe
))
601 BNX2X_ERR("STOP on none TCP "
604 /* This is a size of the linear data
606 len
= le16_to_cpu(cqe
->fast_path_cqe
.
608 bnx2x_tpa_stop(bp
, fp
, queue
, pad
,
609 len
, cqe
, comp_ring_cons
);
610 #ifdef BNX2X_STOP_ON_ERROR
615 bnx2x_update_sge_prod(fp
,
616 &cqe
->fast_path_cqe
);
621 dma_sync_single_for_device(&bp
->pdev
->dev
,
622 dma_unmap_addr(rx_buf
, mapping
),
623 pad
+ RX_COPY_THRESH
,
625 prefetch(((char *)(skb
)) + L1_CACHE_BYTES
);
627 /* is this an error packet? */
628 if (unlikely(cqe_fp_flags
& ETH_RX_ERROR_FALGS
)) {
630 "ERROR flags %x rx packet %u\n",
631 cqe_fp_flags
, sw_comp_cons
);
632 fp
->eth_q_stats
.rx_err_discard_pkt
++;
636 /* Since we don't have a jumbo ring
637 * copy small packets if mtu > 1500
639 if ((bp
->dev
->mtu
> ETH_MAX_PACKET_SIZE
) &&
640 (len
<= RX_COPY_THRESH
)) {
641 struct sk_buff
*new_skb
;
643 new_skb
= netdev_alloc_skb(bp
->dev
,
645 if (new_skb
== NULL
) {
647 "ERROR packet dropped "
648 "because of alloc failure\n");
649 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
654 skb_copy_from_linear_data_offset(skb
, pad
,
655 new_skb
->data
+ pad
, len
);
656 skb_reserve(new_skb
, pad
);
657 skb_put(new_skb
, len
);
659 bnx2x_reuse_rx_skb(fp
, bd_cons
, bd_prod
);
664 if (likely(bnx2x_alloc_rx_skb(bp
, fp
, bd_prod
) == 0)) {
665 dma_unmap_single(&bp
->pdev
->dev
,
666 dma_unmap_addr(rx_buf
, mapping
),
669 skb_reserve(skb
, pad
);
674 "ERROR packet dropped because "
675 "of alloc failure\n");
676 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
678 bnx2x_reuse_rx_skb(fp
, bd_cons
, bd_prod
);
682 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
684 /* Set Toeplitz hash for a none-LRO skb */
685 bnx2x_set_skb_rxhash(bp
, cqe
, skb
);
687 skb_checksum_none_assert(skb
);
689 if (bp
->dev
->features
& NETIF_F_RXCSUM
) {
690 if (likely(BNX2X_RX_CSUM_OK(cqe
)))
691 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
693 fp
->eth_q_stats
.hw_csum_err
++;
697 skb_record_rx_queue(skb
, fp
->index
);
699 if (le16_to_cpu(cqe
->fast_path_cqe
.pars_flags
.flags
) &
701 __vlan_hwaccel_put_tag(skb
,
702 le16_to_cpu(cqe
->fast_path_cqe
.vlan_tag
));
703 napi_gro_receive(&fp
->napi
, skb
);
709 bd_cons
= NEXT_RX_IDX(bd_cons
);
710 bd_prod
= NEXT_RX_IDX(bd_prod
);
711 bd_prod_fw
= NEXT_RX_IDX(bd_prod_fw
);
714 sw_comp_prod
= NEXT_RCQ_IDX(sw_comp_prod
);
715 sw_comp_cons
= NEXT_RCQ_IDX(sw_comp_cons
);
717 if (rx_pkt
== budget
)
721 fp
->rx_bd_cons
= bd_cons
;
722 fp
->rx_bd_prod
= bd_prod_fw
;
723 fp
->rx_comp_cons
= sw_comp_cons
;
724 fp
->rx_comp_prod
= sw_comp_prod
;
726 /* Update producers */
727 bnx2x_update_rx_prod(bp
, fp
, bd_prod_fw
, sw_comp_prod
,
730 fp
->rx_pkt
+= rx_pkt
;
736 static irqreturn_t
bnx2x_msix_fp_int(int irq
, void *fp_cookie
)
738 struct bnx2x_fastpath
*fp
= fp_cookie
;
739 struct bnx2x
*bp
= fp
->bp
;
741 /* Return here if interrupt is disabled */
742 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
743 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
747 DP(BNX2X_MSG_FP
, "got an MSI-X interrupt on IDX:SB "
748 "[fp %d fw_sd %d igusb %d]\n",
749 fp
->index
, fp
->fw_sb_id
, fp
->igu_sb_id
);
750 bnx2x_ack_sb(bp
, fp
->igu_sb_id
, USTORM_ID
, 0, IGU_INT_DISABLE
, 0);
752 #ifdef BNX2X_STOP_ON_ERROR
753 if (unlikely(bp
->panic
))
757 /* Handle Rx and Tx according to MSI-X vector */
758 prefetch(fp
->rx_cons_sb
);
759 prefetch(fp
->tx_cons_sb
);
760 prefetch(&fp
->sb_running_index
[SM_RX_ID
]);
761 napi_schedule(&bnx2x_fp(bp
, fp
->index
, napi
));
766 /* HW Lock for shared dual port PHYs */
767 void bnx2x_acquire_phy_lock(struct bnx2x
*bp
)
769 mutex_lock(&bp
->port
.phy_mutex
);
771 if (bp
->port
.need_hw_lock
)
772 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
775 void bnx2x_release_phy_lock(struct bnx2x
*bp
)
777 if (bp
->port
.need_hw_lock
)
778 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
780 mutex_unlock(&bp
->port
.phy_mutex
);
783 /* calculates MF speed according to current linespeed and MF configuration */
784 u16
bnx2x_get_mf_speed(struct bnx2x
*bp
)
786 u16 line_speed
= bp
->link_vars
.line_speed
;
788 u16 maxCfg
= bnx2x_extract_max_cfg(bp
,
789 bp
->mf_config
[BP_VN(bp
)]);
791 /* Calculate the current MAX line speed limit for the MF
795 line_speed
= (line_speed
* maxCfg
) / 100;
797 u16 vn_max_rate
= maxCfg
* 100;
799 if (vn_max_rate
< line_speed
)
800 line_speed
= vn_max_rate
;
808 * bnx2x_fill_report_data - fill link report data to report
811 * @data: link state to update
813 * It uses a none-atomic bit operations because is called under the mutex.
815 static inline void bnx2x_fill_report_data(struct bnx2x
*bp
,
816 struct bnx2x_link_report_data
*data
)
818 u16 line_speed
= bnx2x_get_mf_speed(bp
);
820 memset(data
, 0, sizeof(*data
));
822 /* Fill the report data: efective line speed */
823 data
->line_speed
= line_speed
;
826 if (!bp
->link_vars
.link_up
|| (bp
->flags
& MF_FUNC_DIS
))
827 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
828 &data
->link_report_flags
);
831 if (bp
->link_vars
.duplex
== DUPLEX_FULL
)
832 __set_bit(BNX2X_LINK_REPORT_FD
, &data
->link_report_flags
);
834 /* Rx Flow Control is ON */
835 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_RX
)
836 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON
, &data
->link_report_flags
);
838 /* Tx Flow Control is ON */
839 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_TX
)
840 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON
, &data
->link_report_flags
);
844 * bnx2x_link_report - report link status to OS.
848 * Calls the __bnx2x_link_report() under the same locking scheme
849 * as a link/PHY state managing code to ensure a consistent link
853 void bnx2x_link_report(struct bnx2x
*bp
)
855 bnx2x_acquire_phy_lock(bp
);
856 __bnx2x_link_report(bp
);
857 bnx2x_release_phy_lock(bp
);
861 * __bnx2x_link_report - report link status to OS.
865 * None atomic inmlementation.
866 * Should be called under the phy_lock.
868 void __bnx2x_link_report(struct bnx2x
*bp
)
870 struct bnx2x_link_report_data cur_data
;
874 bnx2x_read_mf_cfg(bp
);
876 /* Read the current link report info */
877 bnx2x_fill_report_data(bp
, &cur_data
);
879 /* Don't report link down or exactly the same link status twice */
880 if (!memcmp(&cur_data
, &bp
->last_reported_link
, sizeof(cur_data
)) ||
881 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
882 &bp
->last_reported_link
.link_report_flags
) &&
883 test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
884 &cur_data
.link_report_flags
)))
889 /* We are going to report a new link parameters now -
890 * remember the current data for the next time.
892 memcpy(&bp
->last_reported_link
, &cur_data
, sizeof(cur_data
));
894 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
895 &cur_data
.link_report_flags
)) {
896 netif_carrier_off(bp
->dev
);
897 netdev_err(bp
->dev
, "NIC Link is Down\n");
900 netif_carrier_on(bp
->dev
);
901 netdev_info(bp
->dev
, "NIC Link is Up, ");
902 pr_cont("%d Mbps ", cur_data
.line_speed
);
904 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD
,
905 &cur_data
.link_report_flags
))
906 pr_cont("full duplex");
908 pr_cont("half duplex");
910 /* Handle the FC at the end so that only these flags would be
911 * possibly set. This way we may easily check if there is no FC
914 if (cur_data
.link_report_flags
) {
915 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON
,
916 &cur_data
.link_report_flags
)) {
917 pr_cont(", receive ");
918 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON
,
919 &cur_data
.link_report_flags
))
920 pr_cont("& transmit ");
922 pr_cont(", transmit ");
924 pr_cont("flow control ON");
930 void bnx2x_init_rx_rings(struct bnx2x
*bp
)
932 int func
= BP_FUNC(bp
);
933 int max_agg_queues
= CHIP_IS_E1(bp
) ? ETH_MAX_AGGREGATION_QUEUES_E1
:
934 ETH_MAX_AGGREGATION_QUEUES_E1H
;
938 /* Allocate TPA resources */
939 for_each_rx_queue(bp
, j
) {
940 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
943 "mtu %d rx_buf_size %d\n", bp
->dev
->mtu
, fp
->rx_buf_size
);
945 if (!fp
->disable_tpa
) {
946 /* Fill the per-aggregation pool */
947 for (i
= 0; i
< max_agg_queues
; i
++) {
948 fp
->tpa_pool
[i
].skb
=
949 netdev_alloc_skb(bp
->dev
, fp
->rx_buf_size
);
950 if (!fp
->tpa_pool
[i
].skb
) {
951 BNX2X_ERR("Failed to allocate TPA "
952 "skb pool for queue[%d] - "
953 "disabling TPA on this "
955 bnx2x_free_tpa_pool(bp
, fp
, i
);
959 dma_unmap_addr_set((struct sw_rx_bd
*)
960 &bp
->fp
->tpa_pool
[i
],
962 fp
->tpa_state
[i
] = BNX2X_TPA_STOP
;
965 /* "next page" elements initialization */
966 bnx2x_set_next_page_sgl(fp
);
968 /* set SGEs bit mask */
969 bnx2x_init_sge_ring_bit_mask(fp
);
971 /* Allocate SGEs and initialize the ring elements */
972 for (i
= 0, ring_prod
= 0;
973 i
< MAX_RX_SGE_CNT
*NUM_RX_SGE_PAGES
; i
++) {
975 if (bnx2x_alloc_rx_sge(bp
, fp
, ring_prod
) < 0) {
976 BNX2X_ERR("was only able to allocate "
978 BNX2X_ERR("disabling TPA for"
980 /* Cleanup already allocated elements */
981 bnx2x_free_rx_sge_range(bp
,
983 bnx2x_free_tpa_pool(bp
,
989 ring_prod
= NEXT_SGE_IDX(ring_prod
);
992 fp
->rx_sge_prod
= ring_prod
;
996 for_each_rx_queue(bp
, j
) {
997 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1001 /* Activate BD ring */
1003 * this will generate an interrupt (to the TSTORM)
1004 * must only be done after chip is initialized
1006 bnx2x_update_rx_prod(bp
, fp
, fp
->rx_bd_prod
, fp
->rx_comp_prod
,
1012 if (!CHIP_IS_E2(bp
)) {
1013 REG_WR(bp
, BAR_USTRORM_INTMEM
+
1014 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
),
1015 U64_LO(fp
->rx_comp_mapping
));
1016 REG_WR(bp
, BAR_USTRORM_INTMEM
+
1017 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
) + 4,
1018 U64_HI(fp
->rx_comp_mapping
));
1023 static void bnx2x_free_tx_skbs(struct bnx2x
*bp
)
1027 for_each_tx_queue(bp
, i
) {
1028 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1030 u16 bd_cons
= fp
->tx_bd_cons
;
1031 u16 sw_prod
= fp
->tx_pkt_prod
;
1032 u16 sw_cons
= fp
->tx_pkt_cons
;
1034 while (sw_cons
!= sw_prod
) {
1035 bd_cons
= bnx2x_free_tx_pkt(bp
, fp
, TX_BD(sw_cons
));
1041 static void bnx2x_free_rx_bds(struct bnx2x_fastpath
*fp
)
1043 struct bnx2x
*bp
= fp
->bp
;
1046 /* ring wasn't allocated */
1047 if (fp
->rx_buf_ring
== NULL
)
1050 for (i
= 0; i
< NUM_RX_BD
; i
++) {
1051 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[i
];
1052 struct sk_buff
*skb
= rx_buf
->skb
;
1057 dma_unmap_single(&bp
->pdev
->dev
,
1058 dma_unmap_addr(rx_buf
, mapping
),
1059 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
1066 static void bnx2x_free_rx_skbs(struct bnx2x
*bp
)
1070 for_each_rx_queue(bp
, j
) {
1071 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1073 bnx2x_free_rx_bds(fp
);
1075 if (!fp
->disable_tpa
)
1076 bnx2x_free_tpa_pool(bp
, fp
, CHIP_IS_E1(bp
) ?
1077 ETH_MAX_AGGREGATION_QUEUES_E1
:
1078 ETH_MAX_AGGREGATION_QUEUES_E1H
);
1082 void bnx2x_free_skbs(struct bnx2x
*bp
)
1084 bnx2x_free_tx_skbs(bp
);
1085 bnx2x_free_rx_skbs(bp
);
1088 void bnx2x_update_max_mf_config(struct bnx2x
*bp
, u32 value
)
1090 /* load old values */
1091 u32 mf_cfg
= bp
->mf_config
[BP_VN(bp
)];
1093 if (value
!= bnx2x_extract_max_cfg(bp
, mf_cfg
)) {
1094 /* leave all but MAX value */
1095 mf_cfg
&= ~FUNC_MF_CFG_MAX_BW_MASK
;
1097 /* set new MAX value */
1098 mf_cfg
|= (value
<< FUNC_MF_CFG_MAX_BW_SHIFT
)
1099 & FUNC_MF_CFG_MAX_BW_MASK
;
1101 bnx2x_fw_command(bp
, DRV_MSG_CODE_SET_MF_BW
, mf_cfg
);
1105 static void bnx2x_free_msix_irqs(struct bnx2x
*bp
)
1109 free_irq(bp
->msix_table
[0].vector
, bp
->dev
);
1110 DP(NETIF_MSG_IFDOWN
, "released sp irq (%d)\n",
1111 bp
->msix_table
[0].vector
);
1116 for_each_eth_queue(bp
, i
) {
1117 DP(NETIF_MSG_IFDOWN
, "about to release fp #%d->%d irq "
1118 "state %x\n", i
, bp
->msix_table
[i
+ offset
].vector
,
1119 bnx2x_fp(bp
, i
, state
));
1121 free_irq(bp
->msix_table
[i
+ offset
].vector
, &bp
->fp
[i
]);
1125 void bnx2x_free_irq(struct bnx2x
*bp
)
1127 if (bp
->flags
& USING_MSIX_FLAG
)
1128 bnx2x_free_msix_irqs(bp
);
1129 else if (bp
->flags
& USING_MSI_FLAG
)
1130 free_irq(bp
->pdev
->irq
, bp
->dev
);
1132 free_irq(bp
->pdev
->irq
, bp
->dev
);
1135 int bnx2x_enable_msix(struct bnx2x
*bp
)
1137 int msix_vec
= 0, i
, rc
, req_cnt
;
1139 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1140 DP(NETIF_MSG_IFUP
, "msix_table[0].entry = %d (slowpath)\n",
1141 bp
->msix_table
[0].entry
);
1145 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1146 DP(NETIF_MSG_IFUP
, "msix_table[%d].entry = %d (CNIC)\n",
1147 bp
->msix_table
[msix_vec
].entry
, bp
->msix_table
[msix_vec
].entry
);
1150 for_each_eth_queue(bp
, i
) {
1151 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1152 DP(NETIF_MSG_IFUP
, "msix_table[%d].entry = %d "
1153 "(fastpath #%u)\n", msix_vec
, msix_vec
, i
);
1157 req_cnt
= BNX2X_NUM_ETH_QUEUES(bp
) + CNIC_CONTEXT_USE
+ 1;
1159 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0], req_cnt
);
1162 * reconfigure number of tx/rx queues according to available
1165 if (rc
>= BNX2X_MIN_MSIX_VEC_CNT
) {
1166 /* how less vectors we will have? */
1167 int diff
= req_cnt
- rc
;
1170 "Trying to use less MSI-X vectors: %d\n", rc
);
1172 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0], rc
);
1176 "MSI-X is not attainable rc %d\n", rc
);
1180 * decrease number of queues by number of unallocated entries
1182 bp
->num_queues
-= diff
;
1184 DP(NETIF_MSG_IFUP
, "New queue configuration set: %d\n",
1187 /* fall to INTx if not enough memory */
1189 bp
->flags
|= DISABLE_MSI_FLAG
;
1190 DP(NETIF_MSG_IFUP
, "MSI-X is not attainable rc %d\n", rc
);
1194 bp
->flags
|= USING_MSIX_FLAG
;
1199 static int bnx2x_req_msix_irqs(struct bnx2x
*bp
)
1201 int i
, rc
, offset
= 1;
1203 rc
= request_irq(bp
->msix_table
[0].vector
, bnx2x_msix_sp_int
, 0,
1204 bp
->dev
->name
, bp
->dev
);
1206 BNX2X_ERR("request sp irq failed\n");
1213 for_each_eth_queue(bp
, i
) {
1214 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1215 snprintf(fp
->name
, sizeof(fp
->name
), "%s-fp-%d",
1218 rc
= request_irq(bp
->msix_table
[offset
].vector
,
1219 bnx2x_msix_fp_int
, 0, fp
->name
, fp
);
1221 BNX2X_ERR("request fp #%d irq failed rc %d\n", i
, rc
);
1222 bnx2x_free_msix_irqs(bp
);
1227 fp
->state
= BNX2X_FP_STATE_IRQ
;
1230 i
= BNX2X_NUM_ETH_QUEUES(bp
);
1231 offset
= 1 + CNIC_CONTEXT_USE
;
1232 netdev_info(bp
->dev
, "using MSI-X IRQs: sp %d fp[%d] %d"
1234 bp
->msix_table
[0].vector
,
1235 0, bp
->msix_table
[offset
].vector
,
1236 i
- 1, bp
->msix_table
[offset
+ i
- 1].vector
);
1241 int bnx2x_enable_msi(struct bnx2x
*bp
)
1245 rc
= pci_enable_msi(bp
->pdev
);
1247 DP(NETIF_MSG_IFUP
, "MSI is not attainable\n");
1250 bp
->flags
|= USING_MSI_FLAG
;
1255 static int bnx2x_req_irq(struct bnx2x
*bp
)
1257 unsigned long flags
;
1260 if (bp
->flags
& USING_MSI_FLAG
)
1263 flags
= IRQF_SHARED
;
1265 rc
= request_irq(bp
->pdev
->irq
, bnx2x_interrupt
, flags
,
1266 bp
->dev
->name
, bp
->dev
);
1268 bnx2x_fp(bp
, 0, state
) = BNX2X_FP_STATE_IRQ
;
1273 static void bnx2x_napi_enable(struct bnx2x
*bp
)
1277 for_each_napi_queue(bp
, i
)
1278 napi_enable(&bnx2x_fp(bp
, i
, napi
));
1281 static void bnx2x_napi_disable(struct bnx2x
*bp
)
1285 for_each_napi_queue(bp
, i
)
1286 napi_disable(&bnx2x_fp(bp
, i
, napi
));
1289 void bnx2x_netif_start(struct bnx2x
*bp
)
1293 intr_sem
= atomic_dec_and_test(&bp
->intr_sem
);
1294 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1297 if (netif_running(bp
->dev
)) {
1298 bnx2x_napi_enable(bp
);
1299 bnx2x_int_enable(bp
);
1300 if (bp
->state
== BNX2X_STATE_OPEN
)
1301 netif_tx_wake_all_queues(bp
->dev
);
1306 void bnx2x_netif_stop(struct bnx2x
*bp
, int disable_hw
)
1308 bnx2x_int_disable_sync(bp
, disable_hw
);
1309 bnx2x_napi_disable(bp
);
1310 netif_tx_disable(bp
->dev
);
1313 u16
bnx2x_select_queue(struct net_device
*dev
, struct sk_buff
*skb
)
1316 struct bnx2x
*bp
= netdev_priv(dev
);
1318 return skb_tx_hash(dev
, skb
);
1320 struct ethhdr
*hdr
= (struct ethhdr
*)skb
->data
;
1321 u16 ether_type
= ntohs(hdr
->h_proto
);
1323 /* Skip VLAN tag if present */
1324 if (ether_type
== ETH_P_8021Q
) {
1325 struct vlan_ethhdr
*vhdr
=
1326 (struct vlan_ethhdr
*)skb
->data
;
1328 ether_type
= ntohs(vhdr
->h_vlan_encapsulated_proto
);
1331 /* If ethertype is FCoE or FIP - use FCoE ring */
1332 if ((ether_type
== ETH_P_FCOE
) || (ether_type
== ETH_P_FIP
))
1333 return bnx2x_fcoe(bp
, index
);
1336 /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring
1338 return __skb_tx_hash(dev
, skb
,
1339 dev
->real_num_tx_queues
- FCOE_CONTEXT_USE
);
1342 void bnx2x_set_num_queues(struct bnx2x
*bp
)
1344 switch (bp
->multi_mode
) {
1345 case ETH_RSS_MODE_DISABLED
:
1348 case ETH_RSS_MODE_REGULAR
:
1349 bp
->num_queues
= bnx2x_calc_num_queues(bp
);
1357 /* Add special queues */
1358 bp
->num_queues
+= NONE_ETH_CONTEXT_USE
;
1362 static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x
*bp
)
1366 bnx2x_set_fip_eth_mac_addr(bp
, 1);
1367 bnx2x_set_all_enode_macs(bp
, 1);
1368 bp
->flags
|= FCOE_MACS_SET
;
1373 static void bnx2x_release_firmware(struct bnx2x
*bp
)
1375 kfree(bp
->init_ops_offsets
);
1376 kfree(bp
->init_ops
);
1377 kfree(bp
->init_data
);
1378 release_firmware(bp
->firmware
);
1381 static inline int bnx2x_set_real_num_queues(struct bnx2x
*bp
)
1383 int rc
, num
= bp
->num_queues
;
1387 num
-= FCOE_CONTEXT_USE
;
1390 netif_set_real_num_tx_queues(bp
->dev
, num
);
1391 rc
= netif_set_real_num_rx_queues(bp
->dev
, num
);
1395 static inline void bnx2x_set_rx_buf_size(struct bnx2x
*bp
)
1399 for_each_queue(bp
, i
) {
1400 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1402 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1405 * Although there are no IP frames expected to arrive to
1406 * this ring we still want to add an
1407 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1411 BNX2X_FCOE_MINI_JUMBO_MTU
+ ETH_OVREHEAD
+
1412 BNX2X_RX_ALIGN
+ IP_HEADER_ALIGNMENT_PADDING
;
1415 bp
->dev
->mtu
+ ETH_OVREHEAD
+ BNX2X_RX_ALIGN
+
1416 IP_HEADER_ALIGNMENT_PADDING
;
1420 /* must be called with rtnl_lock */
1421 int bnx2x_nic_load(struct bnx2x
*bp
, int load_mode
)
1426 /* Set init arrays */
1427 rc
= bnx2x_init_firmware(bp
);
1429 BNX2X_ERR("Error loading firmware\n");
1433 #ifdef BNX2X_STOP_ON_ERROR
1434 if (unlikely(bp
->panic
))
1438 bp
->state
= BNX2X_STATE_OPENING_WAIT4_LOAD
;
1440 /* Set the initial link reported state to link down */
1441 bnx2x_acquire_phy_lock(bp
);
1442 memset(&bp
->last_reported_link
, 0, sizeof(bp
->last_reported_link
));
1443 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
1444 &bp
->last_reported_link
.link_report_flags
);
1445 bnx2x_release_phy_lock(bp
);
1447 /* must be called before memory allocation and HW init */
1448 bnx2x_ilt_set_info(bp
);
1450 /* zero fastpath structures preserving invariants like napi which are
1451 * allocated only once
1453 for_each_queue(bp
, i
)
1456 /* Set the receive queues buffer size */
1457 bnx2x_set_rx_buf_size(bp
);
1459 for_each_queue(bp
, i
)
1460 bnx2x_fp(bp
, i
, disable_tpa
) =
1461 ((bp
->flags
& TPA_ENABLE_FLAG
) == 0);
1464 /* We don't want TPA on FCoE L2 ring */
1465 bnx2x_fcoe(bp
, disable_tpa
) = 1;
1468 if (bnx2x_alloc_mem(bp
))
1471 /* As long as bnx2x_alloc_mem() may possibly update
1472 * bp->num_queues, bnx2x_set_real_num_queues() should always
1475 rc
= bnx2x_set_real_num_queues(bp
);
1477 BNX2X_ERR("Unable to set real_num_queues\n");
1481 bnx2x_napi_enable(bp
);
1483 /* Send LOAD_REQUEST command to MCP
1484 Returns the type of LOAD command:
1485 if it is the first port to be initialized
1486 common blocks should be initialized, otherwise - not
1488 if (!BP_NOMCP(bp
)) {
1489 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_REQ
, 0);
1491 BNX2X_ERR("MCP response failure, aborting\n");
1495 if (load_code
== FW_MSG_CODE_DRV_LOAD_REFUSED
) {
1496 rc
= -EBUSY
; /* other port in diagnostic mode */
1501 int path
= BP_PATH(bp
);
1502 int port
= BP_PORT(bp
);
1504 DP(NETIF_MSG_IFUP
, "NO MCP - load counts[%d] %d, %d, %d\n",
1505 path
, load_count
[path
][0], load_count
[path
][1],
1506 load_count
[path
][2]);
1507 load_count
[path
][0]++;
1508 load_count
[path
][1 + port
]++;
1509 DP(NETIF_MSG_IFUP
, "NO MCP - new load counts[%d] %d, %d, %d\n",
1510 path
, load_count
[path
][0], load_count
[path
][1],
1511 load_count
[path
][2]);
1512 if (load_count
[path
][0] == 1)
1513 load_code
= FW_MSG_CODE_DRV_LOAD_COMMON
;
1514 else if (load_count
[path
][1 + port
] == 1)
1515 load_code
= FW_MSG_CODE_DRV_LOAD_PORT
;
1517 load_code
= FW_MSG_CODE_DRV_LOAD_FUNCTION
;
1520 if ((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
1521 (load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
) ||
1522 (load_code
== FW_MSG_CODE_DRV_LOAD_PORT
))
1526 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
1529 rc
= bnx2x_init_hw(bp
, load_code
);
1531 BNX2X_ERR("HW init failed, aborting\n");
1532 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
1536 /* Connect to IRQs */
1537 rc
= bnx2x_setup_irqs(bp
);
1539 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
1543 /* Setup NIC internals and enable interrupts */
1544 bnx2x_nic_init(bp
, load_code
);
1546 if (((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
1547 (load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
)) &&
1548 (bp
->common
.shmem2_base
))
1549 SHMEM2_WR(bp
, dcc_support
,
1550 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV
|
1551 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV
));
1553 /* Send LOAD_DONE command to MCP */
1554 if (!BP_NOMCP(bp
)) {
1555 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
1557 BNX2X_ERR("MCP response failure, aborting\n");
1563 bnx2x_dcbx_init(bp
);
1565 bp
->state
= BNX2X_STATE_OPENING_WAIT4_PORT
;
1567 rc
= bnx2x_func_start(bp
);
1569 BNX2X_ERR("Function start failed!\n");
1570 #ifndef BNX2X_STOP_ON_ERROR
1578 rc
= bnx2x_setup_client(bp
, &bp
->fp
[0], 1 /* Leading */);
1580 BNX2X_ERR("Setup leading failed!\n");
1581 #ifndef BNX2X_STOP_ON_ERROR
1589 if (!CHIP_IS_E1(bp
) &&
1590 (bp
->mf_config
[BP_VN(bp
)] & FUNC_MF_CFG_FUNC_DISABLED
)) {
1591 DP(NETIF_MSG_IFUP
, "mf_cfg function disabled\n");
1592 bp
->flags
|= MF_FUNC_DIS
;
1596 /* Enable Timer scan */
1597 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ BP_PORT(bp
)*4, 1);
1600 for_each_nondefault_queue(bp
, i
) {
1601 rc
= bnx2x_setup_client(bp
, &bp
->fp
[i
], 0);
1610 /* Now when Clients are configured we are ready to work */
1611 bp
->state
= BNX2X_STATE_OPEN
;
1614 bnx2x_set_fcoe_eth_macs(bp
);
1617 bnx2x_set_eth_mac(bp
, 1);
1619 /* Clear MC configuration */
1621 bnx2x_invalidate_e1_mc_list(bp
);
1623 bnx2x_invalidate_e1h_mc_list(bp
);
1625 /* Clear UC lists configuration */
1626 bnx2x_invalidate_uc_list(bp
);
1628 if (bp
->pending_max
) {
1629 bnx2x_update_max_mf_config(bp
, bp
->pending_max
);
1630 bp
->pending_max
= 0;
1634 bnx2x_initial_phy_init(bp
, load_mode
);
1636 /* Initialize Rx filtering */
1637 bnx2x_set_rx_mode(bp
->dev
);
1639 /* Start fast path */
1640 switch (load_mode
) {
1642 /* Tx queue should be only reenabled */
1643 netif_tx_wake_all_queues(bp
->dev
);
1644 /* Initialize the receive filter. */
1648 netif_tx_start_all_queues(bp
->dev
);
1649 smp_mb__after_clear_bit();
1653 bp
->state
= BNX2X_STATE_DIAG
;
1661 bnx2x__link_status_update(bp
);
1663 /* start the timer */
1664 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
1667 bnx2x_setup_cnic_irq_info(bp
);
1668 if (bp
->state
== BNX2X_STATE_OPEN
)
1669 bnx2x_cnic_notify(bp
, CNIC_CTL_START_CMD
);
1671 bnx2x_inc_load_cnt(bp
);
1673 bnx2x_release_firmware(bp
);
1679 /* Disable Timer scan */
1680 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ BP_PORT(bp
)*4, 0);
1683 bnx2x_int_disable_sync(bp
, 1);
1685 /* Free SKBs, SGEs, TPA pool and driver internals */
1686 bnx2x_free_skbs(bp
);
1687 for_each_rx_queue(bp
, i
)
1688 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
1693 if (!BP_NOMCP(bp
)) {
1694 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
, 0);
1695 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
, 0);
1700 bnx2x_napi_disable(bp
);
1704 bnx2x_release_firmware(bp
);
1709 /* must be called with rtnl_lock */
1710 int bnx2x_nic_unload(struct bnx2x
*bp
, int unload_mode
)
1714 if (bp
->state
== BNX2X_STATE_CLOSED
) {
1715 /* Interface has been removed - nothing to recover */
1716 bp
->recovery_state
= BNX2X_RECOVERY_DONE
;
1718 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_RESERVED_08
);
1725 bnx2x_cnic_notify(bp
, CNIC_CTL_STOP_CMD
);
1727 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_HALT
;
1729 /* Set "drop all" */
1730 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
1731 bnx2x_set_storm_rx_mode(bp
);
1734 bnx2x_tx_disable(bp
);
1736 del_timer_sync(&bp
->timer
);
1738 SHMEM_WR(bp
, func_mb
[BP_FW_MB_IDX(bp
)].drv_pulse_mb
,
1739 (DRV_PULSE_ALWAYS_ALIVE
| bp
->fw_drv_pulse_wr_seq
));
1741 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
1743 /* Cleanup the chip if needed */
1744 if (unload_mode
!= UNLOAD_RECOVERY
)
1745 bnx2x_chip_cleanup(bp
, unload_mode
);
1747 /* Disable HW interrupts, NAPI and Tx */
1748 bnx2x_netif_stop(bp
, 1);
1756 /* Free SKBs, SGEs, TPA pool and driver internals */
1757 bnx2x_free_skbs(bp
);
1758 for_each_rx_queue(bp
, i
)
1759 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
1763 bp
->state
= BNX2X_STATE_CLOSED
;
1765 /* The last driver must disable a "close the gate" if there is no
1766 * parity attention or "process kill" pending.
1768 if ((!bnx2x_dec_load_cnt(bp
)) && (!bnx2x_chk_parity_attn(bp
)) &&
1769 bnx2x_reset_is_done(bp
))
1770 bnx2x_disable_close_the_gate(bp
);
1772 /* Reset MCP mail box sequence if there is on going recovery */
1773 if (unload_mode
== UNLOAD_RECOVERY
)
1779 int bnx2x_set_power_state(struct bnx2x
*bp
, pci_power_t state
)
1783 /* If there is no power capability, silently succeed */
1785 DP(NETIF_MSG_HW
, "No power capability. Breaking.\n");
1789 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
1793 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
1794 ((pmcsr
& ~PCI_PM_CTRL_STATE_MASK
) |
1795 PCI_PM_CTRL_PME_STATUS
));
1797 if (pmcsr
& PCI_PM_CTRL_STATE_MASK
)
1798 /* delay required during transition out of D3hot */
1803 /* If there are other clients above don't
1804 shut down the power */
1805 if (atomic_read(&bp
->pdev
->enable_cnt
) != 1)
1807 /* Don't shut down the power for emulation and FPGA */
1808 if (CHIP_REV_IS_SLOW(bp
))
1811 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
1815 pmcsr
|= PCI_PM_CTRL_PME_ENABLE
;
1817 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
1820 /* No more memory access after this point until
1821 * device is brought back to D0.
1832 * net_device service functions
1834 int bnx2x_poll(struct napi_struct
*napi
, int budget
)
1837 struct bnx2x_fastpath
*fp
= container_of(napi
, struct bnx2x_fastpath
,
1839 struct bnx2x
*bp
= fp
->bp
;
1842 #ifdef BNX2X_STOP_ON_ERROR
1843 if (unlikely(bp
->panic
)) {
1844 napi_complete(napi
);
1849 if (bnx2x_has_tx_work(fp
))
1852 if (bnx2x_has_rx_work(fp
)) {
1853 work_done
+= bnx2x_rx_int(fp
, budget
- work_done
);
1855 /* must not complete if we consumed full budget */
1856 if (work_done
>= budget
)
1860 /* Fall out from the NAPI loop if needed */
1861 if (!(bnx2x_has_rx_work(fp
) || bnx2x_has_tx_work(fp
))) {
1863 /* No need to update SB for FCoE L2 ring as long as
1864 * it's connected to the default SB and the SB
1865 * has been updated when NAPI was scheduled.
1867 if (IS_FCOE_FP(fp
)) {
1868 napi_complete(napi
);
1873 bnx2x_update_fpsb_idx(fp
);
1874 /* bnx2x_has_rx_work() reads the status block,
1875 * thus we need to ensure that status block indices
1876 * have been actually read (bnx2x_update_fpsb_idx)
1877 * prior to this check (bnx2x_has_rx_work) so that
1878 * we won't write the "newer" value of the status block
1879 * to IGU (if there was a DMA right after
1880 * bnx2x_has_rx_work and if there is no rmb, the memory
1881 * reading (bnx2x_update_fpsb_idx) may be postponed
1882 * to right before bnx2x_ack_sb). In this case there
1883 * will never be another interrupt until there is
1884 * another update of the status block, while there
1885 * is still unhandled work.
1889 if (!(bnx2x_has_rx_work(fp
) || bnx2x_has_tx_work(fp
))) {
1890 napi_complete(napi
);
1891 /* Re-enable interrupts */
1893 "Update index to %d\n", fp
->fp_hc_idx
);
1894 bnx2x_ack_sb(bp
, fp
->igu_sb_id
, USTORM_ID
,
1895 le16_to_cpu(fp
->fp_hc_idx
),
1905 /* we split the first BD into headers and data BDs
1906 * to ease the pain of our fellow microcode engineers
1907 * we use one mapping for both BDs
1908 * So far this has only been observed to happen
1909 * in Other Operating Systems(TM)
1911 static noinline u16
bnx2x_tx_split(struct bnx2x
*bp
,
1912 struct bnx2x_fastpath
*fp
,
1913 struct sw_tx_bd
*tx_buf
,
1914 struct eth_tx_start_bd
**tx_bd
, u16 hlen
,
1915 u16 bd_prod
, int nbd
)
1917 struct eth_tx_start_bd
*h_tx_bd
= *tx_bd
;
1918 struct eth_tx_bd
*d_tx_bd
;
1920 int old_len
= le16_to_cpu(h_tx_bd
->nbytes
);
1922 /* first fix first BD */
1923 h_tx_bd
->nbd
= cpu_to_le16(nbd
);
1924 h_tx_bd
->nbytes
= cpu_to_le16(hlen
);
1926 DP(NETIF_MSG_TX_QUEUED
, "TSO split header size is %d "
1927 "(%x:%x) nbd %d\n", h_tx_bd
->nbytes
, h_tx_bd
->addr_hi
,
1928 h_tx_bd
->addr_lo
, h_tx_bd
->nbd
);
1930 /* now get a new data BD
1931 * (after the pbd) and fill it */
1932 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
1933 d_tx_bd
= &fp
->tx_desc_ring
[bd_prod
].reg_bd
;
1935 mapping
= HILO_U64(le32_to_cpu(h_tx_bd
->addr_hi
),
1936 le32_to_cpu(h_tx_bd
->addr_lo
)) + hlen
;
1938 d_tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
1939 d_tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
1940 d_tx_bd
->nbytes
= cpu_to_le16(old_len
- hlen
);
1942 /* this marks the BD as one that has no individual mapping */
1943 tx_buf
->flags
|= BNX2X_TSO_SPLIT_BD
;
1945 DP(NETIF_MSG_TX_QUEUED
,
1946 "TSO split data size is %d (%x:%x)\n",
1947 d_tx_bd
->nbytes
, d_tx_bd
->addr_hi
, d_tx_bd
->addr_lo
);
1950 *tx_bd
= (struct eth_tx_start_bd
*)d_tx_bd
;
1955 static inline u16
bnx2x_csum_fix(unsigned char *t_header
, u16 csum
, s8 fix
)
1958 csum
= (u16
) ~csum_fold(csum_sub(csum
,
1959 csum_partial(t_header
- fix
, fix
, 0)));
1962 csum
= (u16
) ~csum_fold(csum_add(csum
,
1963 csum_partial(t_header
, -fix
, 0)));
1965 return swab16(csum
);
1968 static inline u32
bnx2x_xmit_type(struct bnx2x
*bp
, struct sk_buff
*skb
)
1972 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1976 if (vlan_get_protocol(skb
) == htons(ETH_P_IPV6
)) {
1978 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
1979 rc
|= XMIT_CSUM_TCP
;
1983 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
1984 rc
|= XMIT_CSUM_TCP
;
1988 if (skb_is_gso_v6(skb
))
1989 rc
|= XMIT_GSO_V6
| XMIT_CSUM_TCP
| XMIT_CSUM_V6
;
1990 else if (skb_is_gso(skb
))
1991 rc
|= XMIT_GSO_V4
| XMIT_CSUM_V4
| XMIT_CSUM_TCP
;
1996 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1997 /* check if packet requires linearization (packet is too fragmented)
1998 no need to check fragmentation if page size > 8K (there will be no
1999 violation to FW restrictions) */
2000 static int bnx2x_pkt_req_lin(struct bnx2x
*bp
, struct sk_buff
*skb
,
2005 int first_bd_sz
= 0;
2007 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2008 if (skb_shinfo(skb
)->nr_frags
>= (MAX_FETCH_BD
- 3)) {
2010 if (xmit_type
& XMIT_GSO
) {
2011 unsigned short lso_mss
= skb_shinfo(skb
)->gso_size
;
2012 /* Check if LSO packet needs to be copied:
2013 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2014 int wnd_size
= MAX_FETCH_BD
- 3;
2015 /* Number of windows to check */
2016 int num_wnds
= skb_shinfo(skb
)->nr_frags
- wnd_size
;
2021 /* Headers length */
2022 hlen
= (int)(skb_transport_header(skb
) - skb
->data
) +
2025 /* Amount of data (w/o headers) on linear part of SKB*/
2026 first_bd_sz
= skb_headlen(skb
) - hlen
;
2028 wnd_sum
= first_bd_sz
;
2030 /* Calculate the first sum - it's special */
2031 for (frag_idx
= 0; frag_idx
< wnd_size
- 1; frag_idx
++)
2033 skb_shinfo(skb
)->frags
[frag_idx
].size
;
2035 /* If there was data on linear skb data - check it */
2036 if (first_bd_sz
> 0) {
2037 if (unlikely(wnd_sum
< lso_mss
)) {
2042 wnd_sum
-= first_bd_sz
;
2045 /* Others are easier: run through the frag list and
2046 check all windows */
2047 for (wnd_idx
= 0; wnd_idx
<= num_wnds
; wnd_idx
++) {
2049 skb_shinfo(skb
)->frags
[wnd_idx
+ wnd_size
- 1].size
;
2051 if (unlikely(wnd_sum
< lso_mss
)) {
2056 skb_shinfo(skb
)->frags
[wnd_idx
].size
;
2059 /* in non-LSO too fragmented packet should always
2066 if (unlikely(to_copy
))
2067 DP(NETIF_MSG_TX_QUEUED
,
2068 "Linearization IS REQUIRED for %s packet. "
2069 "num_frags %d hlen %d first_bd_sz %d\n",
2070 (xmit_type
& XMIT_GSO
) ? "LSO" : "non-LSO",
2071 skb_shinfo(skb
)->nr_frags
, hlen
, first_bd_sz
);
2077 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff
*skb
, u32
*parsing_data
,
2080 *parsing_data
|= (skb_shinfo(skb
)->gso_size
<<
2081 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT
) &
2082 ETH_TX_PARSE_BD_E2_LSO_MSS
;
2083 if ((xmit_type
& XMIT_GSO_V6
) &&
2084 (ipv6_hdr(skb
)->nexthdr
== NEXTHDR_IPV6
))
2085 *parsing_data
|= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR
;
2089 * bnx2x_set_pbd_gso - update PBD in GSO case.
2093 * @xmit_type: xmit flags
2095 static inline void bnx2x_set_pbd_gso(struct sk_buff
*skb
,
2096 struct eth_tx_parse_bd_e1x
*pbd
,
2099 pbd
->lso_mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
2100 pbd
->tcp_send_seq
= swab32(tcp_hdr(skb
)->seq
);
2101 pbd
->tcp_flags
= pbd_tcp_flags(skb
);
2103 if (xmit_type
& XMIT_GSO_V4
) {
2104 pbd
->ip_id
= swab16(ip_hdr(skb
)->id
);
2105 pbd
->tcp_pseudo_csum
=
2106 swab16(~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
2108 0, IPPROTO_TCP
, 0));
2111 pbd
->tcp_pseudo_csum
=
2112 swab16(~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
2113 &ipv6_hdr(skb
)->daddr
,
2114 0, IPPROTO_TCP
, 0));
2116 pbd
->global_data
|= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN
;
2120 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2122 * @bp: driver handle
2124 * @parsing_data: data to be updated
2125 * @xmit_type: xmit flags
2129 static inline u8
bnx2x_set_pbd_csum_e2(struct bnx2x
*bp
, struct sk_buff
*skb
,
2130 u32
*parsing_data
, u32 xmit_type
)
2133 ((((u8
*)skb_transport_header(skb
) - skb
->data
) >> 1) <<
2134 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT
) &
2135 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W
;
2137 if (xmit_type
& XMIT_CSUM_TCP
) {
2138 *parsing_data
|= ((tcp_hdrlen(skb
) / 4) <<
2139 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT
) &
2140 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW
;
2142 return skb_transport_header(skb
) + tcp_hdrlen(skb
) - skb
->data
;
2144 /* We support checksum offload for TCP and UDP only.
2145 * No need to pass the UDP header length - it's a constant.
2147 return skb_transport_header(skb
) +
2148 sizeof(struct udphdr
) - skb
->data
;
2152 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
2154 * @bp: driver handle
2156 * @pbd: parse BD to be updated
2157 * @xmit_type: xmit flags
2159 static inline u8
bnx2x_set_pbd_csum(struct bnx2x
*bp
, struct sk_buff
*skb
,
2160 struct eth_tx_parse_bd_e1x
*pbd
,
2163 u8 hlen
= (skb_network_header(skb
) - skb
->data
) >> 1;
2165 /* for now NS flag is not used in Linux */
2167 (hlen
| ((skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) <<
2168 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT
));
2170 pbd
->ip_hlen_w
= (skb_transport_header(skb
) -
2171 skb_network_header(skb
)) >> 1;
2173 hlen
+= pbd
->ip_hlen_w
;
2175 /* We support checksum offload for TCP and UDP only */
2176 if (xmit_type
& XMIT_CSUM_TCP
)
2177 hlen
+= tcp_hdrlen(skb
) / 2;
2179 hlen
+= sizeof(struct udphdr
) / 2;
2181 pbd
->total_hlen_w
= cpu_to_le16(hlen
);
2184 if (xmit_type
& XMIT_CSUM_TCP
) {
2185 pbd
->tcp_pseudo_csum
= swab16(tcp_hdr(skb
)->check
);
2188 s8 fix
= SKB_CS_OFF(skb
); /* signed! */
2190 DP(NETIF_MSG_TX_QUEUED
,
2191 "hlen %d fix %d csum before fix %x\n",
2192 le16_to_cpu(pbd
->total_hlen_w
), fix
, SKB_CS(skb
));
2194 /* HW bug: fixup the CSUM */
2195 pbd
->tcp_pseudo_csum
=
2196 bnx2x_csum_fix(skb_transport_header(skb
),
2199 DP(NETIF_MSG_TX_QUEUED
, "csum after fix %x\n",
2200 pbd
->tcp_pseudo_csum
);
2206 /* called with netif_tx_lock
2207 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2208 * netif_wake_queue()
2210 netdev_tx_t
bnx2x_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
2212 struct bnx2x
*bp
= netdev_priv(dev
);
2213 struct bnx2x_fastpath
*fp
;
2214 struct netdev_queue
*txq
;
2215 struct sw_tx_bd
*tx_buf
;
2216 struct eth_tx_start_bd
*tx_start_bd
;
2217 struct eth_tx_bd
*tx_data_bd
, *total_pkt_bd
= NULL
;
2218 struct eth_tx_parse_bd_e1x
*pbd_e1x
= NULL
;
2219 struct eth_tx_parse_bd_e2
*pbd_e2
= NULL
;
2220 u32 pbd_e2_parsing_data
= 0;
2221 u16 pkt_prod
, bd_prod
;
2224 u32 xmit_type
= bnx2x_xmit_type(bp
, skb
);
2227 __le16 pkt_size
= 0;
2229 u8 mac_type
= UNICAST_ADDRESS
;
2231 #ifdef BNX2X_STOP_ON_ERROR
2232 if (unlikely(bp
->panic
))
2233 return NETDEV_TX_BUSY
;
2236 fp_index
= skb_get_queue_mapping(skb
);
2237 txq
= netdev_get_tx_queue(dev
, fp_index
);
2239 fp
= &bp
->fp
[fp_index
];
2241 if (unlikely(bnx2x_tx_avail(fp
) < (skb_shinfo(skb
)->nr_frags
+ 3))) {
2242 fp
->eth_q_stats
.driver_xoff
++;
2243 netif_tx_stop_queue(txq
);
2244 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2245 return NETDEV_TX_BUSY
;
2248 DP(NETIF_MSG_TX_QUEUED
, "queue[%d]: SKB: summed %x protocol %x "
2249 "protocol(%x,%x) gso type %x xmit_type %x\n",
2250 fp_index
, skb
->ip_summed
, skb
->protocol
, ipv6_hdr(skb
)->nexthdr
,
2251 ip_hdr(skb
)->protocol
, skb_shinfo(skb
)->gso_type
, xmit_type
);
2253 eth
= (struct ethhdr
*)skb
->data
;
2255 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2256 if (unlikely(is_multicast_ether_addr(eth
->h_dest
))) {
2257 if (is_broadcast_ether_addr(eth
->h_dest
))
2258 mac_type
= BROADCAST_ADDRESS
;
2260 mac_type
= MULTICAST_ADDRESS
;
2263 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2264 /* First, check if we need to linearize the skb (due to FW
2265 restrictions). No need to check fragmentation if page size > 8K
2266 (there will be no violation to FW restrictions) */
2267 if (bnx2x_pkt_req_lin(bp
, skb
, xmit_type
)) {
2268 /* Statistics of linearization */
2270 if (skb_linearize(skb
) != 0) {
2271 DP(NETIF_MSG_TX_QUEUED
, "SKB linearization failed - "
2272 "silently dropping this SKB\n");
2273 dev_kfree_skb_any(skb
);
2274 return NETDEV_TX_OK
;
2280 Please read carefully. First we use one BD which we mark as start,
2281 then we have a parsing info BD (used for TSO or xsum),
2282 and only then we have the rest of the TSO BDs.
2283 (don't forget to mark the last one as last,
2284 and to unmap only AFTER you write to the BD ...)
2285 And above all, all pdb sizes are in words - NOT DWORDS!
2288 pkt_prod
= fp
->tx_pkt_prod
++;
2289 bd_prod
= TX_BD(fp
->tx_bd_prod
);
2291 /* get a tx_buf and first BD */
2292 tx_buf
= &fp
->tx_buf_ring
[TX_BD(pkt_prod
)];
2293 tx_start_bd
= &fp
->tx_desc_ring
[bd_prod
].start_bd
;
2295 tx_start_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
2296 SET_FLAG(tx_start_bd
->general_data
, ETH_TX_START_BD_ETH_ADDR_TYPE
,
2300 SET_FLAG(tx_start_bd
->general_data
, ETH_TX_START_BD_HDR_NBDS
, 1);
2302 /* remember the first BD of the packet */
2303 tx_buf
->first_bd
= fp
->tx_bd_prod
;
2307 DP(NETIF_MSG_TX_QUEUED
,
2308 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2309 pkt_prod
, tx_buf
, fp
->tx_pkt_prod
, bd_prod
, tx_start_bd
);
2311 if (vlan_tx_tag_present(skb
)) {
2312 tx_start_bd
->vlan_or_ethertype
=
2313 cpu_to_le16(vlan_tx_tag_get(skb
));
2314 tx_start_bd
->bd_flags
.as_bitfield
|=
2315 (X_ETH_OUTBAND_VLAN
<< ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT
);
2317 tx_start_bd
->vlan_or_ethertype
= cpu_to_le16(pkt_prod
);
2319 /* turn on parsing and get a BD */
2320 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
2322 if (xmit_type
& XMIT_CSUM
) {
2323 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_L4_CSUM
;
2325 if (xmit_type
& XMIT_CSUM_V4
)
2326 tx_start_bd
->bd_flags
.as_bitfield
|=
2327 ETH_TX_BD_FLAGS_IP_CSUM
;
2329 tx_start_bd
->bd_flags
.as_bitfield
|=
2330 ETH_TX_BD_FLAGS_IPV6
;
2332 if (!(xmit_type
& XMIT_CSUM_TCP
))
2333 tx_start_bd
->bd_flags
.as_bitfield
|=
2334 ETH_TX_BD_FLAGS_IS_UDP
;
2337 if (CHIP_IS_E2(bp
)) {
2338 pbd_e2
= &fp
->tx_desc_ring
[bd_prod
].parse_bd_e2
;
2339 memset(pbd_e2
, 0, sizeof(struct eth_tx_parse_bd_e2
));
2340 /* Set PBD in checksum offload case */
2341 if (xmit_type
& XMIT_CSUM
)
2342 hlen
= bnx2x_set_pbd_csum_e2(bp
, skb
,
2343 &pbd_e2_parsing_data
,
2346 pbd_e1x
= &fp
->tx_desc_ring
[bd_prod
].parse_bd_e1x
;
2347 memset(pbd_e1x
, 0, sizeof(struct eth_tx_parse_bd_e1x
));
2348 /* Set PBD in checksum offload case */
2349 if (xmit_type
& XMIT_CSUM
)
2350 hlen
= bnx2x_set_pbd_csum(bp
, skb
, pbd_e1x
, xmit_type
);
2354 /* Map skb linear data for DMA */
2355 mapping
= dma_map_single(&bp
->pdev
->dev
, skb
->data
,
2356 skb_headlen(skb
), DMA_TO_DEVICE
);
2358 /* Setup the data pointer of the first BD of the packet */
2359 tx_start_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
2360 tx_start_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
2361 nbd
= skb_shinfo(skb
)->nr_frags
+ 2; /* start_bd + pbd + frags */
2362 tx_start_bd
->nbd
= cpu_to_le16(nbd
);
2363 tx_start_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
2364 pkt_size
= tx_start_bd
->nbytes
;
2366 DP(NETIF_MSG_TX_QUEUED
, "first bd @%p addr (%x:%x) nbd %d"
2367 " nbytes %d flags %x vlan %x\n",
2368 tx_start_bd
, tx_start_bd
->addr_hi
, tx_start_bd
->addr_lo
,
2369 le16_to_cpu(tx_start_bd
->nbd
), le16_to_cpu(tx_start_bd
->nbytes
),
2370 tx_start_bd
->bd_flags
.as_bitfield
,
2371 le16_to_cpu(tx_start_bd
->vlan_or_ethertype
));
2373 if (xmit_type
& XMIT_GSO
) {
2375 DP(NETIF_MSG_TX_QUEUED
,
2376 "TSO packet len %d hlen %d total len %d tso size %d\n",
2377 skb
->len
, hlen
, skb_headlen(skb
),
2378 skb_shinfo(skb
)->gso_size
);
2380 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_SW_LSO
;
2382 if (unlikely(skb_headlen(skb
) > hlen
))
2383 bd_prod
= bnx2x_tx_split(bp
, fp
, tx_buf
, &tx_start_bd
,
2384 hlen
, bd_prod
, ++nbd
);
2386 bnx2x_set_pbd_gso_e2(skb
, &pbd_e2_parsing_data
,
2389 bnx2x_set_pbd_gso(skb
, pbd_e1x
, xmit_type
);
2392 /* Set the PBD's parsing_data field if not zero
2393 * (for the chips newer than 57711).
2395 if (pbd_e2_parsing_data
)
2396 pbd_e2
->parsing_data
= cpu_to_le32(pbd_e2_parsing_data
);
2398 tx_data_bd
= (struct eth_tx_bd
*)tx_start_bd
;
2400 /* Handle fragmented skb */
2401 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2402 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2404 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
2405 tx_data_bd
= &fp
->tx_desc_ring
[bd_prod
].reg_bd
;
2406 if (total_pkt_bd
== NULL
)
2407 total_pkt_bd
= &fp
->tx_desc_ring
[bd_prod
].reg_bd
;
2409 mapping
= dma_map_page(&bp
->pdev
->dev
, frag
->page
,
2411 frag
->size
, DMA_TO_DEVICE
);
2413 tx_data_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
2414 tx_data_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
2415 tx_data_bd
->nbytes
= cpu_to_le16(frag
->size
);
2416 le16_add_cpu(&pkt_size
, frag
->size
);
2418 DP(NETIF_MSG_TX_QUEUED
,
2419 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2420 i
, tx_data_bd
, tx_data_bd
->addr_hi
, tx_data_bd
->addr_lo
,
2421 le16_to_cpu(tx_data_bd
->nbytes
));
2424 DP(NETIF_MSG_TX_QUEUED
, "last bd @%p\n", tx_data_bd
);
2426 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
2428 /* now send a tx doorbell, counting the next BD
2429 * if the packet contains or ends with it
2431 if (TX_BD_POFF(bd_prod
) < nbd
)
2434 if (total_pkt_bd
!= NULL
)
2435 total_pkt_bd
->total_pkt_bytes
= pkt_size
;
2438 DP(NETIF_MSG_TX_QUEUED
,
2439 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2440 " tcp_flags %x xsum %x seq %u hlen %u\n",
2441 pbd_e1x
, pbd_e1x
->global_data
, pbd_e1x
->ip_hlen_w
,
2442 pbd_e1x
->ip_id
, pbd_e1x
->lso_mss
, pbd_e1x
->tcp_flags
,
2443 pbd_e1x
->tcp_pseudo_csum
, pbd_e1x
->tcp_send_seq
,
2444 le16_to_cpu(pbd_e1x
->total_hlen_w
));
2446 DP(NETIF_MSG_TX_QUEUED
,
2447 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2448 pbd_e2
, pbd_e2
->dst_mac_addr_hi
, pbd_e2
->dst_mac_addr_mid
,
2449 pbd_e2
->dst_mac_addr_lo
, pbd_e2
->src_mac_addr_hi
,
2450 pbd_e2
->src_mac_addr_mid
, pbd_e2
->src_mac_addr_lo
,
2451 pbd_e2
->parsing_data
);
2452 DP(NETIF_MSG_TX_QUEUED
, "doorbell: nbd %d bd %u\n", nbd
, bd_prod
);
2455 * Make sure that the BD data is updated before updating the producer
2456 * since FW might read the BD right after the producer is updated.
2457 * This is only applicable for weak-ordered memory model archs such
2458 * as IA-64. The following barrier is also mandatory since FW will
2459 * assumes packets must have BDs.
2463 fp
->tx_db
.data
.prod
+= nbd
;
2466 DOORBELL(bp
, fp
->cid
, fp
->tx_db
.raw
);
2470 fp
->tx_bd_prod
+= nbd
;
2472 if (unlikely(bnx2x_tx_avail(fp
) < MAX_SKB_FRAGS
+ 3)) {
2473 netif_tx_stop_queue(txq
);
2475 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2476 * ordering of set_bit() in netif_tx_stop_queue() and read of
2480 fp
->eth_q_stats
.driver_xoff
++;
2481 if (bnx2x_tx_avail(fp
) >= MAX_SKB_FRAGS
+ 3)
2482 netif_tx_wake_queue(txq
);
2486 return NETDEV_TX_OK
;
2489 /* called with rtnl_lock */
2490 int bnx2x_change_mac_addr(struct net_device
*dev
, void *p
)
2492 struct sockaddr
*addr
= p
;
2493 struct bnx2x
*bp
= netdev_priv(dev
);
2495 if (!is_valid_ether_addr((u8
*)(addr
->sa_data
)))
2498 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
2499 if (netif_running(dev
))
2500 bnx2x_set_eth_mac(bp
, 1);
2505 static void bnx2x_free_fp_mem_at(struct bnx2x
*bp
, int fp_index
)
2507 union host_hc_status_block
*sb
= &bnx2x_fp(bp
, fp_index
, status_blk
);
2508 struct bnx2x_fastpath
*fp
= &bp
->fp
[fp_index
];
2512 if (IS_FCOE_IDX(fp_index
)) {
2513 memset(sb
, 0, sizeof(union host_hc_status_block
));
2514 fp
->status_blk_mapping
= 0;
2520 BNX2X_PCI_FREE(sb
->e2_sb
,
2521 bnx2x_fp(bp
, fp_index
,
2522 status_blk_mapping
),
2523 sizeof(struct host_hc_status_block_e2
));
2525 BNX2X_PCI_FREE(sb
->e1x_sb
,
2526 bnx2x_fp(bp
, fp_index
,
2527 status_blk_mapping
),
2528 sizeof(struct host_hc_status_block_e1x
));
2533 if (!skip_rx_queue(bp
, fp_index
)) {
2534 bnx2x_free_rx_bds(fp
);
2536 /* fastpath rx rings: rx_buf rx_desc rx_comp */
2537 BNX2X_FREE(bnx2x_fp(bp
, fp_index
, rx_buf_ring
));
2538 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_desc_ring
),
2539 bnx2x_fp(bp
, fp_index
, rx_desc_mapping
),
2540 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
2542 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_comp_ring
),
2543 bnx2x_fp(bp
, fp_index
, rx_comp_mapping
),
2544 sizeof(struct eth_fast_path_rx_cqe
) *
2548 BNX2X_FREE(bnx2x_fp(bp
, fp_index
, rx_page_ring
));
2549 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_sge_ring
),
2550 bnx2x_fp(bp
, fp_index
, rx_sge_mapping
),
2551 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
2555 if (!skip_tx_queue(bp
, fp_index
)) {
2556 /* fastpath tx rings: tx_buf tx_desc */
2557 BNX2X_FREE(bnx2x_fp(bp
, fp_index
, tx_buf_ring
));
2558 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, tx_desc_ring
),
2559 bnx2x_fp(bp
, fp_index
, tx_desc_mapping
),
2560 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
2562 /* end of fastpath */
2565 void bnx2x_free_fp_mem(struct bnx2x
*bp
)
2568 for_each_queue(bp
, i
)
2569 bnx2x_free_fp_mem_at(bp
, i
);
2572 static inline void set_sb_shortcuts(struct bnx2x
*bp
, int index
)
2574 union host_hc_status_block status_blk
= bnx2x_fp(bp
, index
, status_blk
);
2575 if (CHIP_IS_E2(bp
)) {
2576 bnx2x_fp(bp
, index
, sb_index_values
) =
2577 (__le16
*)status_blk
.e2_sb
->sb
.index_values
;
2578 bnx2x_fp(bp
, index
, sb_running_index
) =
2579 (__le16
*)status_blk
.e2_sb
->sb
.running_index
;
2581 bnx2x_fp(bp
, index
, sb_index_values
) =
2582 (__le16
*)status_blk
.e1x_sb
->sb
.index_values
;
2583 bnx2x_fp(bp
, index
, sb_running_index
) =
2584 (__le16
*)status_blk
.e1x_sb
->sb
.running_index
;
2588 static int bnx2x_alloc_fp_mem_at(struct bnx2x
*bp
, int index
)
2590 union host_hc_status_block
*sb
;
2591 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
2594 /* if rx_ring_size specified - use it */
2595 int rx_ring_size
= bp
->rx_ring_size
? bp
->rx_ring_size
:
2596 MAX_RX_AVAIL
/bp
->num_queues
;
2598 /* allocate at least number of buffers required by FW */
2599 rx_ring_size
= max_t(int, fp
->disable_tpa
? MIN_RX_SIZE_NONTPA
:
2603 bnx2x_fp(bp
, index
, bp
) = bp
;
2604 bnx2x_fp(bp
, index
, index
) = index
;
2607 sb
= &bnx2x_fp(bp
, index
, status_blk
);
2609 if (!IS_FCOE_IDX(index
)) {
2613 BNX2X_PCI_ALLOC(sb
->e2_sb
,
2614 &bnx2x_fp(bp
, index
, status_blk_mapping
),
2615 sizeof(struct host_hc_status_block_e2
));
2617 BNX2X_PCI_ALLOC(sb
->e1x_sb
,
2618 &bnx2x_fp(bp
, index
, status_blk_mapping
),
2619 sizeof(struct host_hc_status_block_e1x
));
2623 set_sb_shortcuts(bp
, index
);
2626 if (!skip_tx_queue(bp
, index
)) {
2627 /* fastpath tx rings: tx_buf tx_desc */
2628 BNX2X_ALLOC(bnx2x_fp(bp
, index
, tx_buf_ring
),
2629 sizeof(struct sw_tx_bd
) * NUM_TX_BD
);
2630 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, index
, tx_desc_ring
),
2631 &bnx2x_fp(bp
, index
, tx_desc_mapping
),
2632 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
2636 if (!skip_rx_queue(bp
, index
)) {
2637 /* fastpath rx rings: rx_buf rx_desc rx_comp */
2638 BNX2X_ALLOC(bnx2x_fp(bp
, index
, rx_buf_ring
),
2639 sizeof(struct sw_rx_bd
) * NUM_RX_BD
);
2640 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, index
, rx_desc_ring
),
2641 &bnx2x_fp(bp
, index
, rx_desc_mapping
),
2642 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
2644 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, index
, rx_comp_ring
),
2645 &bnx2x_fp(bp
, index
, rx_comp_mapping
),
2646 sizeof(struct eth_fast_path_rx_cqe
) *
2650 BNX2X_ALLOC(bnx2x_fp(bp
, index
, rx_page_ring
),
2651 sizeof(struct sw_rx_page
) * NUM_RX_SGE
);
2652 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, index
, rx_sge_ring
),
2653 &bnx2x_fp(bp
, index
, rx_sge_mapping
),
2654 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
2656 bnx2x_set_next_page_rx_bd(fp
);
2659 bnx2x_set_next_page_rx_cq(fp
);
2662 ring_size
= bnx2x_alloc_rx_bds(fp
, rx_ring_size
);
2663 if (ring_size
< rx_ring_size
)
2669 /* handles low memory cases */
2671 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
2673 /* FW will drop all packets if queue is not big enough,
2674 * In these cases we disable the queue
2675 * Min size diferent for TPA and non-TPA queues
2677 if (ring_size
< (fp
->disable_tpa
?
2678 MIN_RX_SIZE_NONTPA
: MIN_RX_SIZE_TPA
)) {
2679 /* release memory allocated for this queue */
2680 bnx2x_free_fp_mem_at(bp
, index
);
2686 int bnx2x_alloc_fp_mem(struct bnx2x
*bp
)
2691 * 1. Allocate FP for leading - fatal if error
2692 * 2. {CNIC} Allocate FCoE FP - fatal if error
2693 * 3. Allocate RSS - fix number of queues if error
2697 if (bnx2x_alloc_fp_mem_at(bp
, 0))
2701 if (bnx2x_alloc_fp_mem_at(bp
, FCOE_IDX
))
2705 for_each_nondefault_eth_queue(bp
, i
)
2706 if (bnx2x_alloc_fp_mem_at(bp
, i
))
2709 /* handle memory failures */
2710 if (i
!= BNX2X_NUM_ETH_QUEUES(bp
)) {
2711 int delta
= BNX2X_NUM_ETH_QUEUES(bp
) - i
;
2716 * move non eth FPs next to last eth FP
2717 * must be done in that order
2718 * FCOE_IDX < FWD_IDX < OOO_IDX
2722 bnx2x_move_fp(bp
, FCOE_IDX
, FCOE_IDX
- delta
);
2724 bp
->num_queues
-= delta
;
2725 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
2726 bp
->num_queues
+ delta
, bp
->num_queues
);
2732 static int bnx2x_setup_irqs(struct bnx2x
*bp
)
2735 if (bp
->flags
& USING_MSIX_FLAG
) {
2736 rc
= bnx2x_req_msix_irqs(bp
);
2741 rc
= bnx2x_req_irq(bp
);
2743 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc
);
2746 if (bp
->flags
& USING_MSI_FLAG
) {
2747 bp
->dev
->irq
= bp
->pdev
->irq
;
2748 netdev_info(bp
->dev
, "using MSI IRQ %d\n",
2756 void bnx2x_free_mem_bp(struct bnx2x
*bp
)
2759 kfree(bp
->msix_table
);
2763 int __devinit
bnx2x_alloc_mem_bp(struct bnx2x
*bp
)
2765 struct bnx2x_fastpath
*fp
;
2766 struct msix_entry
*tbl
;
2767 struct bnx2x_ilt
*ilt
;
2770 fp
= kzalloc(L2_FP_COUNT(bp
->l2_cid_count
)*sizeof(*fp
), GFP_KERNEL
);
2776 tbl
= kzalloc((FP_SB_COUNT(bp
->l2_cid_count
) + 1) * sizeof(*tbl
),
2780 bp
->msix_table
= tbl
;
2783 ilt
= kzalloc(sizeof(*ilt
), GFP_KERNEL
);
2790 bnx2x_free_mem_bp(bp
);
2795 static int bnx2x_reload_if_running(struct net_device
*dev
)
2797 struct bnx2x
*bp
= netdev_priv(dev
);
2799 if (unlikely(!netif_running(dev
)))
2802 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
2803 return bnx2x_nic_load(bp
, LOAD_NORMAL
);
2806 /* called with rtnl_lock */
2807 int bnx2x_change_mtu(struct net_device
*dev
, int new_mtu
)
2809 struct bnx2x
*bp
= netdev_priv(dev
);
2811 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
2812 printk(KERN_ERR
"Handling parity error recovery. Try again later\n");
2816 if ((new_mtu
> ETH_MAX_JUMBO_PACKET_SIZE
) ||
2817 ((new_mtu
+ ETH_HLEN
) < ETH_MIN_PACKET_SIZE
))
2820 /* This does not race with packet allocation
2821 * because the actual alloc size is
2822 * only updated as part of load
2826 return bnx2x_reload_if_running(dev
);
2829 u32
bnx2x_fix_features(struct net_device
*dev
, u32 features
)
2831 struct bnx2x
*bp
= netdev_priv(dev
);
2833 /* TPA requires Rx CSUM offloading */
2834 if (!(features
& NETIF_F_RXCSUM
) || bp
->disable_tpa
)
2835 features
&= ~NETIF_F_LRO
;
2840 int bnx2x_set_features(struct net_device
*dev
, u32 features
)
2842 struct bnx2x
*bp
= netdev_priv(dev
);
2843 u32 flags
= bp
->flags
;
2844 bool bnx2x_reload
= false;
2846 if (features
& NETIF_F_LRO
)
2847 flags
|= TPA_ENABLE_FLAG
;
2849 flags
&= ~TPA_ENABLE_FLAG
;
2851 if (features
& NETIF_F_LOOPBACK
) {
2852 if (bp
->link_params
.loopback_mode
!= LOOPBACK_BMAC
) {
2853 bp
->link_params
.loopback_mode
= LOOPBACK_BMAC
;
2854 bnx2x_reload
= true;
2857 if (bp
->link_params
.loopback_mode
!= LOOPBACK_NONE
) {
2858 bp
->link_params
.loopback_mode
= LOOPBACK_NONE
;
2859 bnx2x_reload
= true;
2863 if (flags
^ bp
->flags
) {
2865 bnx2x_reload
= true;
2869 if (bp
->recovery_state
== BNX2X_RECOVERY_DONE
)
2870 return bnx2x_reload_if_running(dev
);
2871 /* else: bnx2x_nic_load() will be called at end of recovery */
2877 void bnx2x_tx_timeout(struct net_device
*dev
)
2879 struct bnx2x
*bp
= netdev_priv(dev
);
2881 #ifdef BNX2X_STOP_ON_ERROR
2885 /* This allows the netif to be shutdown gracefully before resetting */
2886 schedule_delayed_work(&bp
->reset_task
, 0);
2889 int bnx2x_suspend(struct pci_dev
*pdev
, pm_message_t state
)
2891 struct net_device
*dev
= pci_get_drvdata(pdev
);
2895 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
2898 bp
= netdev_priv(dev
);
2902 pci_save_state(pdev
);
2904 if (!netif_running(dev
)) {
2909 netif_device_detach(dev
);
2911 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
2913 bnx2x_set_power_state(bp
, pci_choose_state(pdev
, state
));
2920 int bnx2x_resume(struct pci_dev
*pdev
)
2922 struct net_device
*dev
= pci_get_drvdata(pdev
);
2927 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
2930 bp
= netdev_priv(dev
);
2932 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
2933 printk(KERN_ERR
"Handling parity error recovery. Try again later\n");
2939 pci_restore_state(pdev
);
2941 if (!netif_running(dev
)) {
2946 bnx2x_set_power_state(bp
, PCI_D0
);
2947 netif_device_attach(dev
);
2949 /* Since the chip was reset, clear the FW sequence number */
2951 rc
= bnx2x_nic_load(bp
, LOAD_OPEN
);