1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2011 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/etherdevice.h>
19 #include <linux/if_vlan.h>
22 #include <net/ip6_checksum.h>
23 #include <linux/firmware.h>
24 #include "bnx2x_cmn.h"
26 #include "bnx2x_init.h"
28 static int bnx2x_setup_irqs(struct bnx2x
*bp
);
31 * bnx2x_bz_fp - zero content of the fastpath structure.
34 * @index: fastpath index to be zeroed
36 * Makes sure the contents of the bp->fp[index].napi is kept
39 static inline void bnx2x_bz_fp(struct bnx2x
*bp
, int index
)
41 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
42 struct napi_struct orig_napi
= fp
->napi
;
43 /* bzero bnx2x_fastpath contents */
44 memset(fp
, 0, sizeof(*fp
));
46 /* Restore the NAPI object as it has been already initialized */
51 * bnx2x_move_fp - move content of the fastpath structure.
54 * @from: source FP index
55 * @to: destination FP index
57 * Makes sure the contents of the bp->fp[to].napi is kept
60 static inline void bnx2x_move_fp(struct bnx2x
*bp
, int from
, int to
)
62 struct bnx2x_fastpath
*from_fp
= &bp
->fp
[from
];
63 struct bnx2x_fastpath
*to_fp
= &bp
->fp
[to
];
64 struct napi_struct orig_napi
= to_fp
->napi
;
65 /* Move bnx2x_fastpath contents */
66 memcpy(to_fp
, from_fp
, sizeof(*to_fp
));
69 /* Restore the NAPI object as it has been already initialized */
70 to_fp
->napi
= orig_napi
;
73 /* free skb in the packet ring at pos idx
74 * return idx of last bd freed
76 static u16
bnx2x_free_tx_pkt(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
79 struct sw_tx_bd
*tx_buf
= &fp
->tx_buf_ring
[idx
];
80 struct eth_tx_start_bd
*tx_start_bd
;
81 struct eth_tx_bd
*tx_data_bd
;
82 struct sk_buff
*skb
= tx_buf
->skb
;
83 u16 bd_idx
= TX_BD(tx_buf
->first_bd
), new_cons
;
86 /* prefetch skb end pointer to speedup dev_kfree_skb() */
89 DP(BNX2X_MSG_OFF
, "pkt_idx %d buff @(%p)->skb %p\n",
93 DP(BNX2X_MSG_OFF
, "free bd_idx %d\n", bd_idx
);
94 tx_start_bd
= &fp
->tx_desc_ring
[bd_idx
].start_bd
;
95 dma_unmap_single(&bp
->pdev
->dev
, BD_UNMAP_ADDR(tx_start_bd
),
96 BD_UNMAP_LEN(tx_start_bd
), DMA_TO_DEVICE
);
98 nbd
= le16_to_cpu(tx_start_bd
->nbd
) - 1;
99 #ifdef BNX2X_STOP_ON_ERROR
100 if ((nbd
- 1) > (MAX_SKB_FRAGS
+ 2)) {
101 BNX2X_ERR("BAD nbd!\n");
105 new_cons
= nbd
+ tx_buf
->first_bd
;
107 /* Get the next bd */
108 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
110 /* Skip a parse bd... */
112 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
114 /* ...and the TSO split header bd since they have no mapping */
115 if (tx_buf
->flags
& BNX2X_TSO_SPLIT_BD
) {
117 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
123 DP(BNX2X_MSG_OFF
, "free frag bd_idx %d\n", bd_idx
);
124 tx_data_bd
= &fp
->tx_desc_ring
[bd_idx
].reg_bd
;
125 dma_unmap_page(&bp
->pdev
->dev
, BD_UNMAP_ADDR(tx_data_bd
),
126 BD_UNMAP_LEN(tx_data_bd
), DMA_TO_DEVICE
);
128 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
134 tx_buf
->first_bd
= 0;
140 int bnx2x_tx_int(struct bnx2x_fastpath
*fp
)
142 struct bnx2x
*bp
= fp
->bp
;
143 struct netdev_queue
*txq
;
144 u16 hw_cons
, sw_cons
, bd_cons
= fp
->tx_bd_cons
;
146 #ifdef BNX2X_STOP_ON_ERROR
147 if (unlikely(bp
->panic
))
151 txq
= netdev_get_tx_queue(bp
->dev
, fp
->index
);
152 hw_cons
= le16_to_cpu(*fp
->tx_cons_sb
);
153 sw_cons
= fp
->tx_pkt_cons
;
155 while (sw_cons
!= hw_cons
) {
158 pkt_cons
= TX_BD(sw_cons
);
160 DP(NETIF_MSG_TX_DONE
, "queue[%d]: hw_cons %u sw_cons %u "
162 fp
->index
, hw_cons
, sw_cons
, pkt_cons
);
164 bd_cons
= bnx2x_free_tx_pkt(bp
, fp
, pkt_cons
);
168 fp
->tx_pkt_cons
= sw_cons
;
169 fp
->tx_bd_cons
= bd_cons
;
171 /* Need to make the tx_bd_cons update visible to start_xmit()
172 * before checking for netif_tx_queue_stopped(). Without the
173 * memory barrier, there is a small possibility that
174 * start_xmit() will miss it and cause the queue to be stopped
179 if (unlikely(netif_tx_queue_stopped(txq
))) {
180 /* Taking tx_lock() is needed to prevent reenabling the queue
181 * while it's empty. This could have happen if rx_action() gets
182 * suspended in bnx2x_tx_int() after the condition before
183 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
185 * stops the queue->sees fresh tx_bd_cons->releases the queue->
186 * sends some packets consuming the whole queue again->
190 __netif_tx_lock(txq
, smp_processor_id());
192 if ((netif_tx_queue_stopped(txq
)) &&
193 (bp
->state
== BNX2X_STATE_OPEN
) &&
194 (bnx2x_tx_avail(fp
) >= MAX_SKB_FRAGS
+ 3))
195 netif_tx_wake_queue(txq
);
197 __netif_tx_unlock(txq
);
202 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath
*fp
,
205 u16 last_max
= fp
->last_max_sge
;
207 if (SUB_S16(idx
, last_max
) > 0)
208 fp
->last_max_sge
= idx
;
211 static void bnx2x_update_sge_prod(struct bnx2x_fastpath
*fp
,
212 struct eth_fast_path_rx_cqe
*fp_cqe
)
214 struct bnx2x
*bp
= fp
->bp
;
215 u16 sge_len
= SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe
->pkt_len
) -
216 le16_to_cpu(fp_cqe
->len_on_bd
)) >>
218 u16 last_max
, last_elem
, first_elem
;
225 /* First mark all used pages */
226 for (i
= 0; i
< sge_len
; i
++)
227 SGE_MASK_CLEAR_BIT(fp
,
228 RX_SGE(le16_to_cpu(fp_cqe
->sgl_or_raw_data
.sgl
[i
])));
230 DP(NETIF_MSG_RX_STATUS
, "fp_cqe->sgl[%d] = %d\n",
231 sge_len
- 1, le16_to_cpu(fp_cqe
->sgl_or_raw_data
.sgl
[sge_len
- 1]));
233 /* Here we assume that the last SGE index is the biggest */
234 prefetch((void *)(fp
->sge_mask
));
235 bnx2x_update_last_max_sge(fp
,
236 le16_to_cpu(fp_cqe
->sgl_or_raw_data
.sgl
[sge_len
- 1]));
238 last_max
= RX_SGE(fp
->last_max_sge
);
239 last_elem
= last_max
>> RX_SGE_MASK_ELEM_SHIFT
;
240 first_elem
= RX_SGE(fp
->rx_sge_prod
) >> RX_SGE_MASK_ELEM_SHIFT
;
242 /* If ring is not full */
243 if (last_elem
+ 1 != first_elem
)
246 /* Now update the prod */
247 for (i
= first_elem
; i
!= last_elem
; i
= NEXT_SGE_MASK_ELEM(i
)) {
248 if (likely(fp
->sge_mask
[i
]))
251 fp
->sge_mask
[i
] = RX_SGE_MASK_ELEM_ONE_MASK
;
252 delta
+= RX_SGE_MASK_ELEM_SZ
;
256 fp
->rx_sge_prod
+= delta
;
257 /* clear page-end entries */
258 bnx2x_clear_sge_mask_next_elems(fp
);
261 DP(NETIF_MSG_RX_STATUS
,
262 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
263 fp
->last_max_sge
, fp
->rx_sge_prod
);
266 static void bnx2x_tpa_start(struct bnx2x_fastpath
*fp
, u16 queue
,
267 struct sk_buff
*skb
, u16 cons
, u16 prod
)
269 struct bnx2x
*bp
= fp
->bp
;
270 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
271 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
272 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
275 /* move empty skb from pool to prod and map it */
276 prod_rx_buf
->skb
= fp
->tpa_pool
[queue
].skb
;
277 mapping
= dma_map_single(&bp
->pdev
->dev
, fp
->tpa_pool
[queue
].skb
->data
,
278 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
279 dma_unmap_addr_set(prod_rx_buf
, mapping
, mapping
);
281 /* move partial skb from cons to pool (don't unmap yet) */
282 fp
->tpa_pool
[queue
] = *cons_rx_buf
;
284 /* mark bin state as start - print error if current state != stop */
285 if (fp
->tpa_state
[queue
] != BNX2X_TPA_STOP
)
286 BNX2X_ERR("start of bin not in stop [%d]\n", queue
);
288 fp
->tpa_state
[queue
] = BNX2X_TPA_START
;
290 /* point prod_bd to new skb */
291 prod_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
292 prod_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
294 #ifdef BNX2X_STOP_ON_ERROR
295 fp
->tpa_queue_used
|= (1 << queue
);
296 #ifdef _ASM_GENERIC_INT_L64_H
297 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%lx\n",
299 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%llx\n",
305 /* Timestamp option length allowed for TPA aggregation:
307 * nop nop kind length echo val
309 #define TPA_TSTAMP_OPT_LEN 12
311 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
314 * @parsing_flags: parsing flags from the START CQE
315 * @len_on_bd: total length of the first packet for the
318 * Approximate value of the MSS for this aggregation calculated using
319 * the first packet of it.
321 static inline u16
bnx2x_set_lro_mss(struct bnx2x
*bp
, u16 parsing_flags
,
324 /* TPA arrgregation won't have an IP options and TCP options
325 * other than timestamp.
327 u16 hdrs_len
= ETH_HLEN
+ sizeof(struct iphdr
) + sizeof(struct tcphdr
);
330 /* Check if there was a TCP timestamp, if there is it's will
331 * always be 12 bytes length: nop nop kind length echo val.
333 * Otherwise FW would close the aggregation.
335 if (parsing_flags
& PARSING_FLAGS_TIME_STAMP_EXIST_FLAG
)
336 hdrs_len
+= TPA_TSTAMP_OPT_LEN
;
338 return len_on_bd
- hdrs_len
;
341 static int bnx2x_fill_frag_skb(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
343 struct eth_fast_path_rx_cqe
*fp_cqe
,
344 u16 cqe_idx
, u16 parsing_flags
)
346 struct sw_rx_page
*rx_pg
, old_rx_pg
;
347 u16 len_on_bd
= le16_to_cpu(fp_cqe
->len_on_bd
);
348 u32 i
, frag_len
, frag_size
, pages
;
352 frag_size
= le16_to_cpu(fp_cqe
->pkt_len
) - len_on_bd
;
353 pages
= SGE_PAGE_ALIGN(frag_size
) >> SGE_PAGE_SHIFT
;
355 /* This is needed in order to enable forwarding support */
357 skb_shinfo(skb
)->gso_size
= bnx2x_set_lro_mss(bp
, parsing_flags
,
360 #ifdef BNX2X_STOP_ON_ERROR
361 if (pages
> min_t(u32
, 8, MAX_SKB_FRAGS
)*SGE_PAGE_SIZE
*PAGES_PER_SGE
) {
362 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
364 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
365 fp_cqe
->pkt_len
, len_on_bd
);
371 /* Run through the SGL and compose the fragmented skb */
372 for (i
= 0, j
= 0; i
< pages
; i
+= PAGES_PER_SGE
, j
++) {
374 RX_SGE(le16_to_cpu(fp_cqe
->sgl_or_raw_data
.sgl
[j
]));
376 /* FW gives the indices of the SGE as if the ring is an array
377 (meaning that "next" element will consume 2 indices) */
378 frag_len
= min(frag_size
, (u32
)(SGE_PAGE_SIZE
*PAGES_PER_SGE
));
379 rx_pg
= &fp
->rx_page_ring
[sge_idx
];
382 /* If we fail to allocate a substitute page, we simply stop
383 where we are and drop the whole packet */
384 err
= bnx2x_alloc_rx_sge(bp
, fp
, sge_idx
);
386 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
390 /* Unmap the page as we r going to pass it to the stack */
391 dma_unmap_page(&bp
->pdev
->dev
,
392 dma_unmap_addr(&old_rx_pg
, mapping
),
393 SGE_PAGE_SIZE
*PAGES_PER_SGE
, DMA_FROM_DEVICE
);
395 /* Add one frag and update the appropriate fields in the skb */
396 skb_fill_page_desc(skb
, j
, old_rx_pg
.page
, 0, frag_len
);
398 skb
->data_len
+= frag_len
;
399 skb
->truesize
+= frag_len
;
400 skb
->len
+= frag_len
;
402 frag_size
-= frag_len
;
408 static void bnx2x_tpa_stop(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
409 u16 queue
, int pad
, int len
, union eth_rx_cqe
*cqe
,
412 struct sw_rx_bd
*rx_buf
= &fp
->tpa_pool
[queue
];
413 struct sk_buff
*skb
= rx_buf
->skb
;
415 struct sk_buff
*new_skb
= netdev_alloc_skb(bp
->dev
, fp
->rx_buf_size
);
417 /* Unmap skb in the pool anyway, as we are going to change
418 pool entry status to BNX2X_TPA_STOP even if new skb allocation
420 dma_unmap_single(&bp
->pdev
->dev
, dma_unmap_addr(rx_buf
, mapping
),
421 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
423 if (likely(new_skb
)) {
424 /* fix ip xsum and give it to the stack */
425 /* (no need to map the new skb) */
427 le16_to_cpu(cqe
->fast_path_cqe
.pars_flags
.flags
);
430 prefetch(((char *)(skb
)) + L1_CACHE_BYTES
);
432 #ifdef BNX2X_STOP_ON_ERROR
433 if (pad
+ len
> fp
->rx_buf_size
) {
434 BNX2X_ERR("skb_put is about to fail... "
435 "pad %d len %d rx_buf_size %d\n",
436 pad
, len
, fp
->rx_buf_size
);
442 skb_reserve(skb
, pad
);
445 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
446 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
451 iph
= (struct iphdr
*)skb
->data
;
453 iph
->check
= ip_fast_csum((u8
*)iph
, iph
->ihl
);
456 if (!bnx2x_fill_frag_skb(bp
, fp
, skb
,
457 &cqe
->fast_path_cqe
, cqe_idx
,
459 if (parsing_flags
& PARSING_FLAGS_VLAN
)
460 __vlan_hwaccel_put_tag(skb
,
461 le16_to_cpu(cqe
->fast_path_cqe
.
463 napi_gro_receive(&fp
->napi
, skb
);
465 DP(NETIF_MSG_RX_STATUS
, "Failed to allocate new pages"
466 " - dropping packet!\n");
471 /* put new skb in bin */
472 fp
->tpa_pool
[queue
].skb
= new_skb
;
475 /* else drop the packet and keep the buffer in the bin */
476 DP(NETIF_MSG_RX_STATUS
,
477 "Failed to allocate new skb - dropping packet!\n");
478 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
481 fp
->tpa_state
[queue
] = BNX2X_TPA_STOP
;
484 /* Set Toeplitz hash value in the skb using the value from the
485 * CQE (calculated by HW).
487 static inline void bnx2x_set_skb_rxhash(struct bnx2x
*bp
, union eth_rx_cqe
*cqe
,
490 /* Set Toeplitz hash from CQE */
491 if ((bp
->dev
->features
& NETIF_F_RXHASH
) &&
492 (cqe
->fast_path_cqe
.status_flags
&
493 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG
))
495 le32_to_cpu(cqe
->fast_path_cqe
.rss_hash_result
);
498 int bnx2x_rx_int(struct bnx2x_fastpath
*fp
, int budget
)
500 struct bnx2x
*bp
= fp
->bp
;
501 u16 bd_cons
, bd_prod
, bd_prod_fw
, comp_ring_cons
;
502 u16 hw_comp_cons
, sw_comp_cons
, sw_comp_prod
;
505 #ifdef BNX2X_STOP_ON_ERROR
506 if (unlikely(bp
->panic
))
510 /* CQ "next element" is of the size of the regular element,
511 that's why it's ok here */
512 hw_comp_cons
= le16_to_cpu(*fp
->rx_cons_sb
);
513 if ((hw_comp_cons
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
516 bd_cons
= fp
->rx_bd_cons
;
517 bd_prod
= fp
->rx_bd_prod
;
518 bd_prod_fw
= bd_prod
;
519 sw_comp_cons
= fp
->rx_comp_cons
;
520 sw_comp_prod
= fp
->rx_comp_prod
;
522 /* Memory barrier necessary as speculative reads of the rx
523 * buffer can be ahead of the index in the status block
527 DP(NETIF_MSG_RX_STATUS
,
528 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
529 fp
->index
, hw_comp_cons
, sw_comp_cons
);
531 while (sw_comp_cons
!= hw_comp_cons
) {
532 struct sw_rx_bd
*rx_buf
= NULL
;
534 union eth_rx_cqe
*cqe
;
538 comp_ring_cons
= RCQ_BD(sw_comp_cons
);
539 bd_prod
= RX_BD(bd_prod
);
540 bd_cons
= RX_BD(bd_cons
);
542 /* Prefetch the page containing the BD descriptor
543 at producer's index. It will be needed when new skb is
545 prefetch((void *)(PAGE_ALIGN((unsigned long)
546 (&fp
->rx_desc_ring
[bd_prod
])) -
549 cqe
= &fp
->rx_comp_ring
[comp_ring_cons
];
550 cqe_fp_flags
= cqe
->fast_path_cqe
.type_error_flags
;
552 DP(NETIF_MSG_RX_STATUS
, "CQE type %x err %x status %x"
553 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags
),
554 cqe_fp_flags
, cqe
->fast_path_cqe
.status_flags
,
555 le32_to_cpu(cqe
->fast_path_cqe
.rss_hash_result
),
556 le16_to_cpu(cqe
->fast_path_cqe
.vlan_tag
),
557 le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
));
559 /* is this a slowpath msg? */
560 if (unlikely(CQE_TYPE(cqe_fp_flags
))) {
561 bnx2x_sp_event(fp
, cqe
);
564 /* this is an rx packet */
566 rx_buf
= &fp
->rx_buf_ring
[bd_cons
];
569 len
= le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
);
570 pad
= cqe
->fast_path_cqe
.placement_offset
;
572 /* - If CQE is marked both TPA_START and TPA_END it is
574 * - FP CQE will always have either TPA_START or/and
575 * TPA_STOP flags set.
577 if ((!fp
->disable_tpa
) &&
578 (TPA_TYPE(cqe_fp_flags
) !=
579 (TPA_TYPE_START
| TPA_TYPE_END
))) {
580 u16 queue
= cqe
->fast_path_cqe
.queue_index
;
582 if (TPA_TYPE(cqe_fp_flags
) == TPA_TYPE_START
) {
583 DP(NETIF_MSG_RX_STATUS
,
584 "calling tpa_start on queue %d\n",
587 bnx2x_tpa_start(fp
, queue
, skb
,
590 /* Set Toeplitz hash for an LRO skb */
591 bnx2x_set_skb_rxhash(bp
, cqe
, skb
);
594 } else { /* TPA_STOP */
595 DP(NETIF_MSG_RX_STATUS
,
596 "calling tpa_stop on queue %d\n",
599 if (!BNX2X_RX_SUM_FIX(cqe
))
600 BNX2X_ERR("STOP on none TCP "
603 /* This is a size of the linear data
605 len
= le16_to_cpu(cqe
->fast_path_cqe
.
607 bnx2x_tpa_stop(bp
, fp
, queue
, pad
,
608 len
, cqe
, comp_ring_cons
);
609 #ifdef BNX2X_STOP_ON_ERROR
614 bnx2x_update_sge_prod(fp
,
615 &cqe
->fast_path_cqe
);
620 dma_sync_single_for_device(&bp
->pdev
->dev
,
621 dma_unmap_addr(rx_buf
, mapping
),
622 pad
+ RX_COPY_THRESH
,
624 prefetch(((char *)(skb
)) + L1_CACHE_BYTES
);
626 /* is this an error packet? */
627 if (unlikely(cqe_fp_flags
& ETH_RX_ERROR_FALGS
)) {
629 "ERROR flags %x rx packet %u\n",
630 cqe_fp_flags
, sw_comp_cons
);
631 fp
->eth_q_stats
.rx_err_discard_pkt
++;
635 /* Since we don't have a jumbo ring
636 * copy small packets if mtu > 1500
638 if ((bp
->dev
->mtu
> ETH_MAX_PACKET_SIZE
) &&
639 (len
<= RX_COPY_THRESH
)) {
640 struct sk_buff
*new_skb
;
642 new_skb
= netdev_alloc_skb(bp
->dev
,
644 if (new_skb
== NULL
) {
646 "ERROR packet dropped "
647 "because of alloc failure\n");
648 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
653 skb_copy_from_linear_data_offset(skb
, pad
,
654 new_skb
->data
+ pad
, len
);
655 skb_reserve(new_skb
, pad
);
656 skb_put(new_skb
, len
);
658 bnx2x_reuse_rx_skb(fp
, bd_cons
, bd_prod
);
663 if (likely(bnx2x_alloc_rx_skb(bp
, fp
, bd_prod
) == 0)) {
664 dma_unmap_single(&bp
->pdev
->dev
,
665 dma_unmap_addr(rx_buf
, mapping
),
668 skb_reserve(skb
, pad
);
673 "ERROR packet dropped because "
674 "of alloc failure\n");
675 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
677 bnx2x_reuse_rx_skb(fp
, bd_cons
, bd_prod
);
681 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
683 /* Set Toeplitz hash for a none-LRO skb */
684 bnx2x_set_skb_rxhash(bp
, cqe
, skb
);
686 skb_checksum_none_assert(skb
);
688 if (bp
->dev
->features
& NETIF_F_RXCSUM
) {
689 if (likely(BNX2X_RX_CSUM_OK(cqe
)))
690 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
692 fp
->eth_q_stats
.hw_csum_err
++;
696 skb_record_rx_queue(skb
, fp
->index
);
698 if (le16_to_cpu(cqe
->fast_path_cqe
.pars_flags
.flags
) &
700 __vlan_hwaccel_put_tag(skb
,
701 le16_to_cpu(cqe
->fast_path_cqe
.vlan_tag
));
702 napi_gro_receive(&fp
->napi
, skb
);
708 bd_cons
= NEXT_RX_IDX(bd_cons
);
709 bd_prod
= NEXT_RX_IDX(bd_prod
);
710 bd_prod_fw
= NEXT_RX_IDX(bd_prod_fw
);
713 sw_comp_prod
= NEXT_RCQ_IDX(sw_comp_prod
);
714 sw_comp_cons
= NEXT_RCQ_IDX(sw_comp_cons
);
716 if (rx_pkt
== budget
)
720 fp
->rx_bd_cons
= bd_cons
;
721 fp
->rx_bd_prod
= bd_prod_fw
;
722 fp
->rx_comp_cons
= sw_comp_cons
;
723 fp
->rx_comp_prod
= sw_comp_prod
;
725 /* Update producers */
726 bnx2x_update_rx_prod(bp
, fp
, bd_prod_fw
, sw_comp_prod
,
729 fp
->rx_pkt
+= rx_pkt
;
735 static irqreturn_t
bnx2x_msix_fp_int(int irq
, void *fp_cookie
)
737 struct bnx2x_fastpath
*fp
= fp_cookie
;
738 struct bnx2x
*bp
= fp
->bp
;
740 /* Return here if interrupt is disabled */
741 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
742 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
746 DP(BNX2X_MSG_FP
, "got an MSI-X interrupt on IDX:SB "
747 "[fp %d fw_sd %d igusb %d]\n",
748 fp
->index
, fp
->fw_sb_id
, fp
->igu_sb_id
);
749 bnx2x_ack_sb(bp
, fp
->igu_sb_id
, USTORM_ID
, 0, IGU_INT_DISABLE
, 0);
751 #ifdef BNX2X_STOP_ON_ERROR
752 if (unlikely(bp
->panic
))
756 /* Handle Rx and Tx according to MSI-X vector */
757 prefetch(fp
->rx_cons_sb
);
758 prefetch(fp
->tx_cons_sb
);
759 prefetch(&fp
->sb_running_index
[SM_RX_ID
]);
760 napi_schedule(&bnx2x_fp(bp
, fp
->index
, napi
));
765 /* HW Lock for shared dual port PHYs */
766 void bnx2x_acquire_phy_lock(struct bnx2x
*bp
)
768 mutex_lock(&bp
->port
.phy_mutex
);
770 if (bp
->port
.need_hw_lock
)
771 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
774 void bnx2x_release_phy_lock(struct bnx2x
*bp
)
776 if (bp
->port
.need_hw_lock
)
777 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
779 mutex_unlock(&bp
->port
.phy_mutex
);
782 /* calculates MF speed according to current linespeed and MF configuration */
783 u16
bnx2x_get_mf_speed(struct bnx2x
*bp
)
785 u16 line_speed
= bp
->link_vars
.line_speed
;
787 u16 maxCfg
= bnx2x_extract_max_cfg(bp
,
788 bp
->mf_config
[BP_VN(bp
)]);
790 /* Calculate the current MAX line speed limit for the MF
794 line_speed
= (line_speed
* maxCfg
) / 100;
796 u16 vn_max_rate
= maxCfg
* 100;
798 if (vn_max_rate
< line_speed
)
799 line_speed
= vn_max_rate
;
807 * bnx2x_fill_report_data - fill link report data to report
810 * @data: link state to update
812 * It uses a none-atomic bit operations because is called under the mutex.
814 static inline void bnx2x_fill_report_data(struct bnx2x
*bp
,
815 struct bnx2x_link_report_data
*data
)
817 u16 line_speed
= bnx2x_get_mf_speed(bp
);
819 memset(data
, 0, sizeof(*data
));
821 /* Fill the report data: efective line speed */
822 data
->line_speed
= line_speed
;
825 if (!bp
->link_vars
.link_up
|| (bp
->flags
& MF_FUNC_DIS
))
826 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
827 &data
->link_report_flags
);
830 if (bp
->link_vars
.duplex
== DUPLEX_FULL
)
831 __set_bit(BNX2X_LINK_REPORT_FD
, &data
->link_report_flags
);
833 /* Rx Flow Control is ON */
834 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_RX
)
835 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON
, &data
->link_report_flags
);
837 /* Tx Flow Control is ON */
838 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_TX
)
839 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON
, &data
->link_report_flags
);
843 * bnx2x_link_report - report link status to OS.
847 * Calls the __bnx2x_link_report() under the same locking scheme
848 * as a link/PHY state managing code to ensure a consistent link
852 void bnx2x_link_report(struct bnx2x
*bp
)
854 bnx2x_acquire_phy_lock(bp
);
855 __bnx2x_link_report(bp
);
856 bnx2x_release_phy_lock(bp
);
860 * __bnx2x_link_report - report link status to OS.
864 * None atomic inmlementation.
865 * Should be called under the phy_lock.
867 void __bnx2x_link_report(struct bnx2x
*bp
)
869 struct bnx2x_link_report_data cur_data
;
873 bnx2x_read_mf_cfg(bp
);
875 /* Read the current link report info */
876 bnx2x_fill_report_data(bp
, &cur_data
);
878 /* Don't report link down or exactly the same link status twice */
879 if (!memcmp(&cur_data
, &bp
->last_reported_link
, sizeof(cur_data
)) ||
880 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
881 &bp
->last_reported_link
.link_report_flags
) &&
882 test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
883 &cur_data
.link_report_flags
)))
888 /* We are going to report a new link parameters now -
889 * remember the current data for the next time.
891 memcpy(&bp
->last_reported_link
, &cur_data
, sizeof(cur_data
));
893 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
894 &cur_data
.link_report_flags
)) {
895 netif_carrier_off(bp
->dev
);
896 netdev_err(bp
->dev
, "NIC Link is Down\n");
899 netif_carrier_on(bp
->dev
);
900 netdev_info(bp
->dev
, "NIC Link is Up, ");
901 pr_cont("%d Mbps ", cur_data
.line_speed
);
903 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD
,
904 &cur_data
.link_report_flags
))
905 pr_cont("full duplex");
907 pr_cont("half duplex");
909 /* Handle the FC at the end so that only these flags would be
910 * possibly set. This way we may easily check if there is no FC
913 if (cur_data
.link_report_flags
) {
914 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON
,
915 &cur_data
.link_report_flags
)) {
916 pr_cont(", receive ");
917 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON
,
918 &cur_data
.link_report_flags
))
919 pr_cont("& transmit ");
921 pr_cont(", transmit ");
923 pr_cont("flow control ON");
929 void bnx2x_init_rx_rings(struct bnx2x
*bp
)
931 int func
= BP_FUNC(bp
);
932 int max_agg_queues
= CHIP_IS_E1(bp
) ? ETH_MAX_AGGREGATION_QUEUES_E1
:
933 ETH_MAX_AGGREGATION_QUEUES_E1H
;
937 /* Allocate TPA resources */
938 for_each_rx_queue(bp
, j
) {
939 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
942 "mtu %d rx_buf_size %d\n", bp
->dev
->mtu
, fp
->rx_buf_size
);
944 if (!fp
->disable_tpa
) {
945 /* Fill the per-aggregation pool */
946 for (i
= 0; i
< max_agg_queues
; i
++) {
947 fp
->tpa_pool
[i
].skb
=
948 netdev_alloc_skb(bp
->dev
, fp
->rx_buf_size
);
949 if (!fp
->tpa_pool
[i
].skb
) {
950 BNX2X_ERR("Failed to allocate TPA "
951 "skb pool for queue[%d] - "
952 "disabling TPA on this "
954 bnx2x_free_tpa_pool(bp
, fp
, i
);
958 dma_unmap_addr_set((struct sw_rx_bd
*)
959 &bp
->fp
->tpa_pool
[i
],
961 fp
->tpa_state
[i
] = BNX2X_TPA_STOP
;
964 /* "next page" elements initialization */
965 bnx2x_set_next_page_sgl(fp
);
967 /* set SGEs bit mask */
968 bnx2x_init_sge_ring_bit_mask(fp
);
970 /* Allocate SGEs and initialize the ring elements */
971 for (i
= 0, ring_prod
= 0;
972 i
< MAX_RX_SGE_CNT
*NUM_RX_SGE_PAGES
; i
++) {
974 if (bnx2x_alloc_rx_sge(bp
, fp
, ring_prod
) < 0) {
975 BNX2X_ERR("was only able to allocate "
977 BNX2X_ERR("disabling TPA for"
979 /* Cleanup already allocated elements */
980 bnx2x_free_rx_sge_range(bp
,
982 bnx2x_free_tpa_pool(bp
,
988 ring_prod
= NEXT_SGE_IDX(ring_prod
);
991 fp
->rx_sge_prod
= ring_prod
;
995 for_each_rx_queue(bp
, j
) {
996 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1000 /* Activate BD ring */
1002 * this will generate an interrupt (to the TSTORM)
1003 * must only be done after chip is initialized
1005 bnx2x_update_rx_prod(bp
, fp
, fp
->rx_bd_prod
, fp
->rx_comp_prod
,
1011 if (!CHIP_IS_E2(bp
)) {
1012 REG_WR(bp
, BAR_USTRORM_INTMEM
+
1013 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
),
1014 U64_LO(fp
->rx_comp_mapping
));
1015 REG_WR(bp
, BAR_USTRORM_INTMEM
+
1016 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
) + 4,
1017 U64_HI(fp
->rx_comp_mapping
));
1022 static void bnx2x_free_tx_skbs(struct bnx2x
*bp
)
1026 for_each_tx_queue(bp
, i
) {
1027 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1029 u16 bd_cons
= fp
->tx_bd_cons
;
1030 u16 sw_prod
= fp
->tx_pkt_prod
;
1031 u16 sw_cons
= fp
->tx_pkt_cons
;
1033 while (sw_cons
!= sw_prod
) {
1034 bd_cons
= bnx2x_free_tx_pkt(bp
, fp
, TX_BD(sw_cons
));
1040 static void bnx2x_free_rx_bds(struct bnx2x_fastpath
*fp
)
1042 struct bnx2x
*bp
= fp
->bp
;
1045 /* ring wasn't allocated */
1046 if (fp
->rx_buf_ring
== NULL
)
1049 for (i
= 0; i
< NUM_RX_BD
; i
++) {
1050 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[i
];
1051 struct sk_buff
*skb
= rx_buf
->skb
;
1056 dma_unmap_single(&bp
->pdev
->dev
,
1057 dma_unmap_addr(rx_buf
, mapping
),
1058 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
1065 static void bnx2x_free_rx_skbs(struct bnx2x
*bp
)
1069 for_each_rx_queue(bp
, j
) {
1070 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1072 bnx2x_free_rx_bds(fp
);
1074 if (!fp
->disable_tpa
)
1075 bnx2x_free_tpa_pool(bp
, fp
, CHIP_IS_E1(bp
) ?
1076 ETH_MAX_AGGREGATION_QUEUES_E1
:
1077 ETH_MAX_AGGREGATION_QUEUES_E1H
);
1081 void bnx2x_free_skbs(struct bnx2x
*bp
)
1083 bnx2x_free_tx_skbs(bp
);
1084 bnx2x_free_rx_skbs(bp
);
1087 void bnx2x_update_max_mf_config(struct bnx2x
*bp
, u32 value
)
1089 /* load old values */
1090 u32 mf_cfg
= bp
->mf_config
[BP_VN(bp
)];
1092 if (value
!= bnx2x_extract_max_cfg(bp
, mf_cfg
)) {
1093 /* leave all but MAX value */
1094 mf_cfg
&= ~FUNC_MF_CFG_MAX_BW_MASK
;
1096 /* set new MAX value */
1097 mf_cfg
|= (value
<< FUNC_MF_CFG_MAX_BW_SHIFT
)
1098 & FUNC_MF_CFG_MAX_BW_MASK
;
1100 bnx2x_fw_command(bp
, DRV_MSG_CODE_SET_MF_BW
, mf_cfg
);
1104 static void bnx2x_free_msix_irqs(struct bnx2x
*bp
)
1108 free_irq(bp
->msix_table
[0].vector
, bp
->dev
);
1109 DP(NETIF_MSG_IFDOWN
, "released sp irq (%d)\n",
1110 bp
->msix_table
[0].vector
);
1115 for_each_eth_queue(bp
, i
) {
1116 DP(NETIF_MSG_IFDOWN
, "about to release fp #%d->%d irq "
1117 "state %x\n", i
, bp
->msix_table
[i
+ offset
].vector
,
1118 bnx2x_fp(bp
, i
, state
));
1120 free_irq(bp
->msix_table
[i
+ offset
].vector
, &bp
->fp
[i
]);
1124 void bnx2x_free_irq(struct bnx2x
*bp
)
1126 if (bp
->flags
& USING_MSIX_FLAG
)
1127 bnx2x_free_msix_irqs(bp
);
1128 else if (bp
->flags
& USING_MSI_FLAG
)
1129 free_irq(bp
->pdev
->irq
, bp
->dev
);
1131 free_irq(bp
->pdev
->irq
, bp
->dev
);
1134 int bnx2x_enable_msix(struct bnx2x
*bp
)
1136 int msix_vec
= 0, i
, rc
, req_cnt
;
1138 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1139 DP(NETIF_MSG_IFUP
, "msix_table[0].entry = %d (slowpath)\n",
1140 bp
->msix_table
[0].entry
);
1144 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1145 DP(NETIF_MSG_IFUP
, "msix_table[%d].entry = %d (CNIC)\n",
1146 bp
->msix_table
[msix_vec
].entry
, bp
->msix_table
[msix_vec
].entry
);
1149 for_each_eth_queue(bp
, i
) {
1150 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1151 DP(NETIF_MSG_IFUP
, "msix_table[%d].entry = %d "
1152 "(fastpath #%u)\n", msix_vec
, msix_vec
, i
);
1156 req_cnt
= BNX2X_NUM_ETH_QUEUES(bp
) + CNIC_CONTEXT_USE
+ 1;
1158 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0], req_cnt
);
1161 * reconfigure number of tx/rx queues according to available
1164 if (rc
>= BNX2X_MIN_MSIX_VEC_CNT
) {
1165 /* how less vectors we will have? */
1166 int diff
= req_cnt
- rc
;
1169 "Trying to use less MSI-X vectors: %d\n", rc
);
1171 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0], rc
);
1175 "MSI-X is not attainable rc %d\n", rc
);
1179 * decrease number of queues by number of unallocated entries
1181 bp
->num_queues
-= diff
;
1183 DP(NETIF_MSG_IFUP
, "New queue configuration set: %d\n",
1186 /* fall to INTx if not enough memory */
1188 bp
->flags
|= DISABLE_MSI_FLAG
;
1189 DP(NETIF_MSG_IFUP
, "MSI-X is not attainable rc %d\n", rc
);
1193 bp
->flags
|= USING_MSIX_FLAG
;
1198 static int bnx2x_req_msix_irqs(struct bnx2x
*bp
)
1200 int i
, rc
, offset
= 1;
1202 rc
= request_irq(bp
->msix_table
[0].vector
, bnx2x_msix_sp_int
, 0,
1203 bp
->dev
->name
, bp
->dev
);
1205 BNX2X_ERR("request sp irq failed\n");
1212 for_each_eth_queue(bp
, i
) {
1213 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1214 snprintf(fp
->name
, sizeof(fp
->name
), "%s-fp-%d",
1217 rc
= request_irq(bp
->msix_table
[offset
].vector
,
1218 bnx2x_msix_fp_int
, 0, fp
->name
, fp
);
1220 BNX2X_ERR("request fp #%d irq failed rc %d\n", i
, rc
);
1221 bnx2x_free_msix_irqs(bp
);
1226 fp
->state
= BNX2X_FP_STATE_IRQ
;
1229 i
= BNX2X_NUM_ETH_QUEUES(bp
);
1230 offset
= 1 + CNIC_CONTEXT_USE
;
1231 netdev_info(bp
->dev
, "using MSI-X IRQs: sp %d fp[%d] %d"
1233 bp
->msix_table
[0].vector
,
1234 0, bp
->msix_table
[offset
].vector
,
1235 i
- 1, bp
->msix_table
[offset
+ i
- 1].vector
);
1240 int bnx2x_enable_msi(struct bnx2x
*bp
)
1244 rc
= pci_enable_msi(bp
->pdev
);
1246 DP(NETIF_MSG_IFUP
, "MSI is not attainable\n");
1249 bp
->flags
|= USING_MSI_FLAG
;
1254 static int bnx2x_req_irq(struct bnx2x
*bp
)
1256 unsigned long flags
;
1259 if (bp
->flags
& USING_MSI_FLAG
)
1262 flags
= IRQF_SHARED
;
1264 rc
= request_irq(bp
->pdev
->irq
, bnx2x_interrupt
, flags
,
1265 bp
->dev
->name
, bp
->dev
);
1267 bnx2x_fp(bp
, 0, state
) = BNX2X_FP_STATE_IRQ
;
1272 static void bnx2x_napi_enable(struct bnx2x
*bp
)
1276 for_each_napi_queue(bp
, i
)
1277 napi_enable(&bnx2x_fp(bp
, i
, napi
));
1280 static void bnx2x_napi_disable(struct bnx2x
*bp
)
1284 for_each_napi_queue(bp
, i
)
1285 napi_disable(&bnx2x_fp(bp
, i
, napi
));
1288 void bnx2x_netif_start(struct bnx2x
*bp
)
1292 intr_sem
= atomic_dec_and_test(&bp
->intr_sem
);
1293 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1296 if (netif_running(bp
->dev
)) {
1297 bnx2x_napi_enable(bp
);
1298 bnx2x_int_enable(bp
);
1299 if (bp
->state
== BNX2X_STATE_OPEN
)
1300 netif_tx_wake_all_queues(bp
->dev
);
1305 void bnx2x_netif_stop(struct bnx2x
*bp
, int disable_hw
)
1307 bnx2x_int_disable_sync(bp
, disable_hw
);
1308 bnx2x_napi_disable(bp
);
1309 netif_tx_disable(bp
->dev
);
1312 u16
bnx2x_select_queue(struct net_device
*dev
, struct sk_buff
*skb
)
1315 struct bnx2x
*bp
= netdev_priv(dev
);
1317 return skb_tx_hash(dev
, skb
);
1319 struct ethhdr
*hdr
= (struct ethhdr
*)skb
->data
;
1320 u16 ether_type
= ntohs(hdr
->h_proto
);
1322 /* Skip VLAN tag if present */
1323 if (ether_type
== ETH_P_8021Q
) {
1324 struct vlan_ethhdr
*vhdr
=
1325 (struct vlan_ethhdr
*)skb
->data
;
1327 ether_type
= ntohs(vhdr
->h_vlan_encapsulated_proto
);
1330 /* If ethertype is FCoE or FIP - use FCoE ring */
1331 if ((ether_type
== ETH_P_FCOE
) || (ether_type
== ETH_P_FIP
))
1332 return bnx2x_fcoe(bp
, index
);
1335 /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring
1337 return __skb_tx_hash(dev
, skb
,
1338 dev
->real_num_tx_queues
- FCOE_CONTEXT_USE
);
1341 void bnx2x_set_num_queues(struct bnx2x
*bp
)
1343 switch (bp
->multi_mode
) {
1344 case ETH_RSS_MODE_DISABLED
:
1347 case ETH_RSS_MODE_REGULAR
:
1348 bp
->num_queues
= bnx2x_calc_num_queues(bp
);
1356 /* Add special queues */
1357 bp
->num_queues
+= NONE_ETH_CONTEXT_USE
;
1361 static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x
*bp
)
1365 bnx2x_set_fip_eth_mac_addr(bp
, 1);
1366 bnx2x_set_all_enode_macs(bp
, 1);
1367 bp
->flags
|= FCOE_MACS_SET
;
1372 static void bnx2x_release_firmware(struct bnx2x
*bp
)
1374 kfree(bp
->init_ops_offsets
);
1375 kfree(bp
->init_ops
);
1376 kfree(bp
->init_data
);
1377 release_firmware(bp
->firmware
);
1380 static inline int bnx2x_set_real_num_queues(struct bnx2x
*bp
)
1382 int rc
, num
= bp
->num_queues
;
1386 num
-= FCOE_CONTEXT_USE
;
1389 netif_set_real_num_tx_queues(bp
->dev
, num
);
1390 rc
= netif_set_real_num_rx_queues(bp
->dev
, num
);
1394 static inline void bnx2x_set_rx_buf_size(struct bnx2x
*bp
)
1398 for_each_queue(bp
, i
) {
1399 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1401 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1404 * Although there are no IP frames expected to arrive to
1405 * this ring we still want to add an
1406 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1410 BNX2X_FCOE_MINI_JUMBO_MTU
+ ETH_OVREHEAD
+
1411 BNX2X_RX_ALIGN
+ IP_HEADER_ALIGNMENT_PADDING
;
1414 bp
->dev
->mtu
+ ETH_OVREHEAD
+ BNX2X_RX_ALIGN
+
1415 IP_HEADER_ALIGNMENT_PADDING
;
1419 /* must be called with rtnl_lock */
1420 int bnx2x_nic_load(struct bnx2x
*bp
, int load_mode
)
1425 /* Set init arrays */
1426 rc
= bnx2x_init_firmware(bp
);
1428 BNX2X_ERR("Error loading firmware\n");
1432 #ifdef BNX2X_STOP_ON_ERROR
1433 if (unlikely(bp
->panic
))
1437 bp
->state
= BNX2X_STATE_OPENING_WAIT4_LOAD
;
1439 /* Set the initial link reported state to link down */
1440 bnx2x_acquire_phy_lock(bp
);
1441 memset(&bp
->last_reported_link
, 0, sizeof(bp
->last_reported_link
));
1442 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
1443 &bp
->last_reported_link
.link_report_flags
);
1444 bnx2x_release_phy_lock(bp
);
1446 /* must be called before memory allocation and HW init */
1447 bnx2x_ilt_set_info(bp
);
1449 /* zero fastpath structures preserving invariants like napi which are
1450 * allocated only once
1452 for_each_queue(bp
, i
)
1455 /* Set the receive queues buffer size */
1456 bnx2x_set_rx_buf_size(bp
);
1458 for_each_queue(bp
, i
)
1459 bnx2x_fp(bp
, i
, disable_tpa
) =
1460 ((bp
->flags
& TPA_ENABLE_FLAG
) == 0);
1463 /* We don't want TPA on FCoE L2 ring */
1464 bnx2x_fcoe(bp
, disable_tpa
) = 1;
1467 if (bnx2x_alloc_mem(bp
))
1470 /* As long as bnx2x_alloc_mem() may possibly update
1471 * bp->num_queues, bnx2x_set_real_num_queues() should always
1474 rc
= bnx2x_set_real_num_queues(bp
);
1476 BNX2X_ERR("Unable to set real_num_queues\n");
1480 bnx2x_napi_enable(bp
);
1482 /* Send LOAD_REQUEST command to MCP
1483 Returns the type of LOAD command:
1484 if it is the first port to be initialized
1485 common blocks should be initialized, otherwise - not
1487 if (!BP_NOMCP(bp
)) {
1488 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_REQ
, 0);
1490 BNX2X_ERR("MCP response failure, aborting\n");
1494 if (load_code
== FW_MSG_CODE_DRV_LOAD_REFUSED
) {
1495 rc
= -EBUSY
; /* other port in diagnostic mode */
1500 int path
= BP_PATH(bp
);
1501 int port
= BP_PORT(bp
);
1503 DP(NETIF_MSG_IFUP
, "NO MCP - load counts[%d] %d, %d, %d\n",
1504 path
, load_count
[path
][0], load_count
[path
][1],
1505 load_count
[path
][2]);
1506 load_count
[path
][0]++;
1507 load_count
[path
][1 + port
]++;
1508 DP(NETIF_MSG_IFUP
, "NO MCP - new load counts[%d] %d, %d, %d\n",
1509 path
, load_count
[path
][0], load_count
[path
][1],
1510 load_count
[path
][2]);
1511 if (load_count
[path
][0] == 1)
1512 load_code
= FW_MSG_CODE_DRV_LOAD_COMMON
;
1513 else if (load_count
[path
][1 + port
] == 1)
1514 load_code
= FW_MSG_CODE_DRV_LOAD_PORT
;
1516 load_code
= FW_MSG_CODE_DRV_LOAD_FUNCTION
;
1519 if ((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
1520 (load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
) ||
1521 (load_code
== FW_MSG_CODE_DRV_LOAD_PORT
))
1525 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
1528 rc
= bnx2x_init_hw(bp
, load_code
);
1530 BNX2X_ERR("HW init failed, aborting\n");
1531 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
1535 /* Connect to IRQs */
1536 rc
= bnx2x_setup_irqs(bp
);
1538 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
1542 /* Setup NIC internals and enable interrupts */
1543 bnx2x_nic_init(bp
, load_code
);
1545 if (((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
1546 (load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
)) &&
1547 (bp
->common
.shmem2_base
))
1548 SHMEM2_WR(bp
, dcc_support
,
1549 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV
|
1550 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV
));
1552 /* Send LOAD_DONE command to MCP */
1553 if (!BP_NOMCP(bp
)) {
1554 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
1556 BNX2X_ERR("MCP response failure, aborting\n");
1562 bnx2x_dcbx_init(bp
);
1564 bp
->state
= BNX2X_STATE_OPENING_WAIT4_PORT
;
1566 rc
= bnx2x_func_start(bp
);
1568 BNX2X_ERR("Function start failed!\n");
1569 #ifndef BNX2X_STOP_ON_ERROR
1577 rc
= bnx2x_setup_client(bp
, &bp
->fp
[0], 1 /* Leading */);
1579 BNX2X_ERR("Setup leading failed!\n");
1580 #ifndef BNX2X_STOP_ON_ERROR
1588 if (!CHIP_IS_E1(bp
) &&
1589 (bp
->mf_config
[BP_VN(bp
)] & FUNC_MF_CFG_FUNC_DISABLED
)) {
1590 DP(NETIF_MSG_IFUP
, "mf_cfg function disabled\n");
1591 bp
->flags
|= MF_FUNC_DIS
;
1595 /* Enable Timer scan */
1596 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ BP_PORT(bp
)*4, 1);
1599 for_each_nondefault_queue(bp
, i
) {
1600 rc
= bnx2x_setup_client(bp
, &bp
->fp
[i
], 0);
1609 /* Now when Clients are configured we are ready to work */
1610 bp
->state
= BNX2X_STATE_OPEN
;
1613 bnx2x_set_fcoe_eth_macs(bp
);
1616 bnx2x_set_eth_mac(bp
, 1);
1618 /* Clear MC configuration */
1620 bnx2x_invalidate_e1_mc_list(bp
);
1622 bnx2x_invalidate_e1h_mc_list(bp
);
1624 /* Clear UC lists configuration */
1625 bnx2x_invalidate_uc_list(bp
);
1627 if (bp
->pending_max
) {
1628 bnx2x_update_max_mf_config(bp
, bp
->pending_max
);
1629 bp
->pending_max
= 0;
1633 bnx2x_initial_phy_init(bp
, load_mode
);
1635 /* Initialize Rx filtering */
1636 bnx2x_set_rx_mode(bp
->dev
);
1638 /* Start fast path */
1639 switch (load_mode
) {
1641 /* Tx queue should be only reenabled */
1642 netif_tx_wake_all_queues(bp
->dev
);
1643 /* Initialize the receive filter. */
1647 netif_tx_start_all_queues(bp
->dev
);
1648 smp_mb__after_clear_bit();
1652 bp
->state
= BNX2X_STATE_DIAG
;
1660 bnx2x__link_status_update(bp
);
1662 /* start the timer */
1663 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
1666 bnx2x_setup_cnic_irq_info(bp
);
1667 if (bp
->state
== BNX2X_STATE_OPEN
)
1668 bnx2x_cnic_notify(bp
, CNIC_CTL_START_CMD
);
1670 bnx2x_inc_load_cnt(bp
);
1672 bnx2x_release_firmware(bp
);
1678 /* Disable Timer scan */
1679 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ BP_PORT(bp
)*4, 0);
1682 bnx2x_int_disable_sync(bp
, 1);
1684 /* Free SKBs, SGEs, TPA pool and driver internals */
1685 bnx2x_free_skbs(bp
);
1686 for_each_rx_queue(bp
, i
)
1687 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
1692 if (!BP_NOMCP(bp
)) {
1693 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
, 0);
1694 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
, 0);
1699 bnx2x_napi_disable(bp
);
1703 bnx2x_release_firmware(bp
);
1708 /* must be called with rtnl_lock */
1709 int bnx2x_nic_unload(struct bnx2x
*bp
, int unload_mode
)
1713 if (bp
->state
== BNX2X_STATE_CLOSED
) {
1714 /* Interface has been removed - nothing to recover */
1715 bp
->recovery_state
= BNX2X_RECOVERY_DONE
;
1717 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_RESERVED_08
);
1724 bnx2x_cnic_notify(bp
, CNIC_CTL_STOP_CMD
);
1726 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_HALT
;
1728 /* Set "drop all" */
1729 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
1730 bnx2x_set_storm_rx_mode(bp
);
1733 bnx2x_tx_disable(bp
);
1735 del_timer_sync(&bp
->timer
);
1737 SHMEM_WR(bp
, func_mb
[BP_FW_MB_IDX(bp
)].drv_pulse_mb
,
1738 (DRV_PULSE_ALWAYS_ALIVE
| bp
->fw_drv_pulse_wr_seq
));
1740 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
1742 /* Cleanup the chip if needed */
1743 if (unload_mode
!= UNLOAD_RECOVERY
)
1744 bnx2x_chip_cleanup(bp
, unload_mode
);
1746 /* Disable HW interrupts, NAPI and Tx */
1747 bnx2x_netif_stop(bp
, 1);
1755 /* Free SKBs, SGEs, TPA pool and driver internals */
1756 bnx2x_free_skbs(bp
);
1757 for_each_rx_queue(bp
, i
)
1758 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
1762 bp
->state
= BNX2X_STATE_CLOSED
;
1764 /* The last driver must disable a "close the gate" if there is no
1765 * parity attention or "process kill" pending.
1767 if ((!bnx2x_dec_load_cnt(bp
)) && (!bnx2x_chk_parity_attn(bp
)) &&
1768 bnx2x_reset_is_done(bp
))
1769 bnx2x_disable_close_the_gate(bp
);
1771 /* Reset MCP mail box sequence if there is on going recovery */
1772 if (unload_mode
== UNLOAD_RECOVERY
)
1778 int bnx2x_set_power_state(struct bnx2x
*bp
, pci_power_t state
)
1782 /* If there is no power capability, silently succeed */
1784 DP(NETIF_MSG_HW
, "No power capability. Breaking.\n");
1788 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
1792 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
1793 ((pmcsr
& ~PCI_PM_CTRL_STATE_MASK
) |
1794 PCI_PM_CTRL_PME_STATUS
));
1796 if (pmcsr
& PCI_PM_CTRL_STATE_MASK
)
1797 /* delay required during transition out of D3hot */
1802 /* If there are other clients above don't
1803 shut down the power */
1804 if (atomic_read(&bp
->pdev
->enable_cnt
) != 1)
1806 /* Don't shut down the power for emulation and FPGA */
1807 if (CHIP_REV_IS_SLOW(bp
))
1810 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
1814 pmcsr
|= PCI_PM_CTRL_PME_ENABLE
;
1816 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
1819 /* No more memory access after this point until
1820 * device is brought back to D0.
1831 * net_device service functions
1833 int bnx2x_poll(struct napi_struct
*napi
, int budget
)
1836 struct bnx2x_fastpath
*fp
= container_of(napi
, struct bnx2x_fastpath
,
1838 struct bnx2x
*bp
= fp
->bp
;
1841 #ifdef BNX2X_STOP_ON_ERROR
1842 if (unlikely(bp
->panic
)) {
1843 napi_complete(napi
);
1848 if (bnx2x_has_tx_work(fp
))
1851 if (bnx2x_has_rx_work(fp
)) {
1852 work_done
+= bnx2x_rx_int(fp
, budget
- work_done
);
1854 /* must not complete if we consumed full budget */
1855 if (work_done
>= budget
)
1859 /* Fall out from the NAPI loop if needed */
1860 if (!(bnx2x_has_rx_work(fp
) || bnx2x_has_tx_work(fp
))) {
1862 /* No need to update SB for FCoE L2 ring as long as
1863 * it's connected to the default SB and the SB
1864 * has been updated when NAPI was scheduled.
1866 if (IS_FCOE_FP(fp
)) {
1867 napi_complete(napi
);
1872 bnx2x_update_fpsb_idx(fp
);
1873 /* bnx2x_has_rx_work() reads the status block,
1874 * thus we need to ensure that status block indices
1875 * have been actually read (bnx2x_update_fpsb_idx)
1876 * prior to this check (bnx2x_has_rx_work) so that
1877 * we won't write the "newer" value of the status block
1878 * to IGU (if there was a DMA right after
1879 * bnx2x_has_rx_work and if there is no rmb, the memory
1880 * reading (bnx2x_update_fpsb_idx) may be postponed
1881 * to right before bnx2x_ack_sb). In this case there
1882 * will never be another interrupt until there is
1883 * another update of the status block, while there
1884 * is still unhandled work.
1888 if (!(bnx2x_has_rx_work(fp
) || bnx2x_has_tx_work(fp
))) {
1889 napi_complete(napi
);
1890 /* Re-enable interrupts */
1892 "Update index to %d\n", fp
->fp_hc_idx
);
1893 bnx2x_ack_sb(bp
, fp
->igu_sb_id
, USTORM_ID
,
1894 le16_to_cpu(fp
->fp_hc_idx
),
1904 /* we split the first BD into headers and data BDs
1905 * to ease the pain of our fellow microcode engineers
1906 * we use one mapping for both BDs
1907 * So far this has only been observed to happen
1908 * in Other Operating Systems(TM)
1910 static noinline u16
bnx2x_tx_split(struct bnx2x
*bp
,
1911 struct bnx2x_fastpath
*fp
,
1912 struct sw_tx_bd
*tx_buf
,
1913 struct eth_tx_start_bd
**tx_bd
, u16 hlen
,
1914 u16 bd_prod
, int nbd
)
1916 struct eth_tx_start_bd
*h_tx_bd
= *tx_bd
;
1917 struct eth_tx_bd
*d_tx_bd
;
1919 int old_len
= le16_to_cpu(h_tx_bd
->nbytes
);
1921 /* first fix first BD */
1922 h_tx_bd
->nbd
= cpu_to_le16(nbd
);
1923 h_tx_bd
->nbytes
= cpu_to_le16(hlen
);
1925 DP(NETIF_MSG_TX_QUEUED
, "TSO split header size is %d "
1926 "(%x:%x) nbd %d\n", h_tx_bd
->nbytes
, h_tx_bd
->addr_hi
,
1927 h_tx_bd
->addr_lo
, h_tx_bd
->nbd
);
1929 /* now get a new data BD
1930 * (after the pbd) and fill it */
1931 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
1932 d_tx_bd
= &fp
->tx_desc_ring
[bd_prod
].reg_bd
;
1934 mapping
= HILO_U64(le32_to_cpu(h_tx_bd
->addr_hi
),
1935 le32_to_cpu(h_tx_bd
->addr_lo
)) + hlen
;
1937 d_tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
1938 d_tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
1939 d_tx_bd
->nbytes
= cpu_to_le16(old_len
- hlen
);
1941 /* this marks the BD as one that has no individual mapping */
1942 tx_buf
->flags
|= BNX2X_TSO_SPLIT_BD
;
1944 DP(NETIF_MSG_TX_QUEUED
,
1945 "TSO split data size is %d (%x:%x)\n",
1946 d_tx_bd
->nbytes
, d_tx_bd
->addr_hi
, d_tx_bd
->addr_lo
);
1949 *tx_bd
= (struct eth_tx_start_bd
*)d_tx_bd
;
1954 static inline u16
bnx2x_csum_fix(unsigned char *t_header
, u16 csum
, s8 fix
)
1957 csum
= (u16
) ~csum_fold(csum_sub(csum
,
1958 csum_partial(t_header
- fix
, fix
, 0)));
1961 csum
= (u16
) ~csum_fold(csum_add(csum
,
1962 csum_partial(t_header
, -fix
, 0)));
1964 return swab16(csum
);
1967 static inline u32
bnx2x_xmit_type(struct bnx2x
*bp
, struct sk_buff
*skb
)
1971 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1975 if (vlan_get_protocol(skb
) == htons(ETH_P_IPV6
)) {
1977 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
1978 rc
|= XMIT_CSUM_TCP
;
1982 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
1983 rc
|= XMIT_CSUM_TCP
;
1987 if (skb_is_gso_v6(skb
))
1988 rc
|= XMIT_GSO_V6
| XMIT_CSUM_TCP
| XMIT_CSUM_V6
;
1989 else if (skb_is_gso(skb
))
1990 rc
|= XMIT_GSO_V4
| XMIT_CSUM_V4
| XMIT_CSUM_TCP
;
1995 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1996 /* check if packet requires linearization (packet is too fragmented)
1997 no need to check fragmentation if page size > 8K (there will be no
1998 violation to FW restrictions) */
1999 static int bnx2x_pkt_req_lin(struct bnx2x
*bp
, struct sk_buff
*skb
,
2004 int first_bd_sz
= 0;
2006 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2007 if (skb_shinfo(skb
)->nr_frags
>= (MAX_FETCH_BD
- 3)) {
2009 if (xmit_type
& XMIT_GSO
) {
2010 unsigned short lso_mss
= skb_shinfo(skb
)->gso_size
;
2011 /* Check if LSO packet needs to be copied:
2012 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2013 int wnd_size
= MAX_FETCH_BD
- 3;
2014 /* Number of windows to check */
2015 int num_wnds
= skb_shinfo(skb
)->nr_frags
- wnd_size
;
2020 /* Headers length */
2021 hlen
= (int)(skb_transport_header(skb
) - skb
->data
) +
2024 /* Amount of data (w/o headers) on linear part of SKB*/
2025 first_bd_sz
= skb_headlen(skb
) - hlen
;
2027 wnd_sum
= first_bd_sz
;
2029 /* Calculate the first sum - it's special */
2030 for (frag_idx
= 0; frag_idx
< wnd_size
- 1; frag_idx
++)
2032 skb_shinfo(skb
)->frags
[frag_idx
].size
;
2034 /* If there was data on linear skb data - check it */
2035 if (first_bd_sz
> 0) {
2036 if (unlikely(wnd_sum
< lso_mss
)) {
2041 wnd_sum
-= first_bd_sz
;
2044 /* Others are easier: run through the frag list and
2045 check all windows */
2046 for (wnd_idx
= 0; wnd_idx
<= num_wnds
; wnd_idx
++) {
2048 skb_shinfo(skb
)->frags
[wnd_idx
+ wnd_size
- 1].size
;
2050 if (unlikely(wnd_sum
< lso_mss
)) {
2055 skb_shinfo(skb
)->frags
[wnd_idx
].size
;
2058 /* in non-LSO too fragmented packet should always
2065 if (unlikely(to_copy
))
2066 DP(NETIF_MSG_TX_QUEUED
,
2067 "Linearization IS REQUIRED for %s packet. "
2068 "num_frags %d hlen %d first_bd_sz %d\n",
2069 (xmit_type
& XMIT_GSO
) ? "LSO" : "non-LSO",
2070 skb_shinfo(skb
)->nr_frags
, hlen
, first_bd_sz
);
2076 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff
*skb
, u32
*parsing_data
,
2079 *parsing_data
|= (skb_shinfo(skb
)->gso_size
<<
2080 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT
) &
2081 ETH_TX_PARSE_BD_E2_LSO_MSS
;
2082 if ((xmit_type
& XMIT_GSO_V6
) &&
2083 (ipv6_hdr(skb
)->nexthdr
== NEXTHDR_IPV6
))
2084 *parsing_data
|= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR
;
2088 * bnx2x_set_pbd_gso - update PBD in GSO case.
2092 * @xmit_type: xmit flags
2094 static inline void bnx2x_set_pbd_gso(struct sk_buff
*skb
,
2095 struct eth_tx_parse_bd_e1x
*pbd
,
2098 pbd
->lso_mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
2099 pbd
->tcp_send_seq
= swab32(tcp_hdr(skb
)->seq
);
2100 pbd
->tcp_flags
= pbd_tcp_flags(skb
);
2102 if (xmit_type
& XMIT_GSO_V4
) {
2103 pbd
->ip_id
= swab16(ip_hdr(skb
)->id
);
2104 pbd
->tcp_pseudo_csum
=
2105 swab16(~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
2107 0, IPPROTO_TCP
, 0));
2110 pbd
->tcp_pseudo_csum
=
2111 swab16(~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
2112 &ipv6_hdr(skb
)->daddr
,
2113 0, IPPROTO_TCP
, 0));
2115 pbd
->global_data
|= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN
;
2119 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2121 * @bp: driver handle
2123 * @parsing_data: data to be updated
2124 * @xmit_type: xmit flags
2128 static inline u8
bnx2x_set_pbd_csum_e2(struct bnx2x
*bp
, struct sk_buff
*skb
,
2129 u32
*parsing_data
, u32 xmit_type
)
2132 ((((u8
*)skb_transport_header(skb
) - skb
->data
) >> 1) <<
2133 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT
) &
2134 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W
;
2136 if (xmit_type
& XMIT_CSUM_TCP
) {
2137 *parsing_data
|= ((tcp_hdrlen(skb
) / 4) <<
2138 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT
) &
2139 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW
;
2141 return skb_transport_header(skb
) + tcp_hdrlen(skb
) - skb
->data
;
2143 /* We support checksum offload for TCP and UDP only.
2144 * No need to pass the UDP header length - it's a constant.
2146 return skb_transport_header(skb
) +
2147 sizeof(struct udphdr
) - skb
->data
;
2151 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
2153 * @bp: driver handle
2155 * @pbd: parse BD to be updated
2156 * @xmit_type: xmit flags
2158 static inline u8
bnx2x_set_pbd_csum(struct bnx2x
*bp
, struct sk_buff
*skb
,
2159 struct eth_tx_parse_bd_e1x
*pbd
,
2162 u8 hlen
= (skb_network_header(skb
) - skb
->data
) >> 1;
2164 /* for now NS flag is not used in Linux */
2166 (hlen
| ((skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) <<
2167 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT
));
2169 pbd
->ip_hlen_w
= (skb_transport_header(skb
) -
2170 skb_network_header(skb
)) >> 1;
2172 hlen
+= pbd
->ip_hlen_w
;
2174 /* We support checksum offload for TCP and UDP only */
2175 if (xmit_type
& XMIT_CSUM_TCP
)
2176 hlen
+= tcp_hdrlen(skb
) / 2;
2178 hlen
+= sizeof(struct udphdr
) / 2;
2180 pbd
->total_hlen_w
= cpu_to_le16(hlen
);
2183 if (xmit_type
& XMIT_CSUM_TCP
) {
2184 pbd
->tcp_pseudo_csum
= swab16(tcp_hdr(skb
)->check
);
2187 s8 fix
= SKB_CS_OFF(skb
); /* signed! */
2189 DP(NETIF_MSG_TX_QUEUED
,
2190 "hlen %d fix %d csum before fix %x\n",
2191 le16_to_cpu(pbd
->total_hlen_w
), fix
, SKB_CS(skb
));
2193 /* HW bug: fixup the CSUM */
2194 pbd
->tcp_pseudo_csum
=
2195 bnx2x_csum_fix(skb_transport_header(skb
),
2198 DP(NETIF_MSG_TX_QUEUED
, "csum after fix %x\n",
2199 pbd
->tcp_pseudo_csum
);
2205 /* called with netif_tx_lock
2206 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2207 * netif_wake_queue()
2209 netdev_tx_t
bnx2x_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
2211 struct bnx2x
*bp
= netdev_priv(dev
);
2212 struct bnx2x_fastpath
*fp
;
2213 struct netdev_queue
*txq
;
2214 struct sw_tx_bd
*tx_buf
;
2215 struct eth_tx_start_bd
*tx_start_bd
;
2216 struct eth_tx_bd
*tx_data_bd
, *total_pkt_bd
= NULL
;
2217 struct eth_tx_parse_bd_e1x
*pbd_e1x
= NULL
;
2218 struct eth_tx_parse_bd_e2
*pbd_e2
= NULL
;
2219 u32 pbd_e2_parsing_data
= 0;
2220 u16 pkt_prod
, bd_prod
;
2223 u32 xmit_type
= bnx2x_xmit_type(bp
, skb
);
2226 __le16 pkt_size
= 0;
2228 u8 mac_type
= UNICAST_ADDRESS
;
2230 #ifdef BNX2X_STOP_ON_ERROR
2231 if (unlikely(bp
->panic
))
2232 return NETDEV_TX_BUSY
;
2235 fp_index
= skb_get_queue_mapping(skb
);
2236 txq
= netdev_get_tx_queue(dev
, fp_index
);
2238 fp
= &bp
->fp
[fp_index
];
2240 if (unlikely(bnx2x_tx_avail(fp
) < (skb_shinfo(skb
)->nr_frags
+ 3))) {
2241 fp
->eth_q_stats
.driver_xoff
++;
2242 netif_tx_stop_queue(txq
);
2243 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2244 return NETDEV_TX_BUSY
;
2247 DP(NETIF_MSG_TX_QUEUED
, "queue[%d]: SKB: summed %x protocol %x "
2248 "protocol(%x,%x) gso type %x xmit_type %x\n",
2249 fp_index
, skb
->ip_summed
, skb
->protocol
, ipv6_hdr(skb
)->nexthdr
,
2250 ip_hdr(skb
)->protocol
, skb_shinfo(skb
)->gso_type
, xmit_type
);
2252 eth
= (struct ethhdr
*)skb
->data
;
2254 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2255 if (unlikely(is_multicast_ether_addr(eth
->h_dest
))) {
2256 if (is_broadcast_ether_addr(eth
->h_dest
))
2257 mac_type
= BROADCAST_ADDRESS
;
2259 mac_type
= MULTICAST_ADDRESS
;
2262 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2263 /* First, check if we need to linearize the skb (due to FW
2264 restrictions). No need to check fragmentation if page size > 8K
2265 (there will be no violation to FW restrictions) */
2266 if (bnx2x_pkt_req_lin(bp
, skb
, xmit_type
)) {
2267 /* Statistics of linearization */
2269 if (skb_linearize(skb
) != 0) {
2270 DP(NETIF_MSG_TX_QUEUED
, "SKB linearization failed - "
2271 "silently dropping this SKB\n");
2272 dev_kfree_skb_any(skb
);
2273 return NETDEV_TX_OK
;
2279 Please read carefully. First we use one BD which we mark as start,
2280 then we have a parsing info BD (used for TSO or xsum),
2281 and only then we have the rest of the TSO BDs.
2282 (don't forget to mark the last one as last,
2283 and to unmap only AFTER you write to the BD ...)
2284 And above all, all pdb sizes are in words - NOT DWORDS!
2287 pkt_prod
= fp
->tx_pkt_prod
++;
2288 bd_prod
= TX_BD(fp
->tx_bd_prod
);
2290 /* get a tx_buf and first BD */
2291 tx_buf
= &fp
->tx_buf_ring
[TX_BD(pkt_prod
)];
2292 tx_start_bd
= &fp
->tx_desc_ring
[bd_prod
].start_bd
;
2294 tx_start_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
2295 SET_FLAG(tx_start_bd
->general_data
, ETH_TX_START_BD_ETH_ADDR_TYPE
,
2299 SET_FLAG(tx_start_bd
->general_data
, ETH_TX_START_BD_HDR_NBDS
, 1);
2301 /* remember the first BD of the packet */
2302 tx_buf
->first_bd
= fp
->tx_bd_prod
;
2306 DP(NETIF_MSG_TX_QUEUED
,
2307 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2308 pkt_prod
, tx_buf
, fp
->tx_pkt_prod
, bd_prod
, tx_start_bd
);
2310 if (vlan_tx_tag_present(skb
)) {
2311 tx_start_bd
->vlan_or_ethertype
=
2312 cpu_to_le16(vlan_tx_tag_get(skb
));
2313 tx_start_bd
->bd_flags
.as_bitfield
|=
2314 (X_ETH_OUTBAND_VLAN
<< ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT
);
2316 tx_start_bd
->vlan_or_ethertype
= cpu_to_le16(pkt_prod
);
2318 /* turn on parsing and get a BD */
2319 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
2321 if (xmit_type
& XMIT_CSUM
) {
2322 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_L4_CSUM
;
2324 if (xmit_type
& XMIT_CSUM_V4
)
2325 tx_start_bd
->bd_flags
.as_bitfield
|=
2326 ETH_TX_BD_FLAGS_IP_CSUM
;
2328 tx_start_bd
->bd_flags
.as_bitfield
|=
2329 ETH_TX_BD_FLAGS_IPV6
;
2331 if (!(xmit_type
& XMIT_CSUM_TCP
))
2332 tx_start_bd
->bd_flags
.as_bitfield
|=
2333 ETH_TX_BD_FLAGS_IS_UDP
;
2336 if (CHIP_IS_E2(bp
)) {
2337 pbd_e2
= &fp
->tx_desc_ring
[bd_prod
].parse_bd_e2
;
2338 memset(pbd_e2
, 0, sizeof(struct eth_tx_parse_bd_e2
));
2339 /* Set PBD in checksum offload case */
2340 if (xmit_type
& XMIT_CSUM
)
2341 hlen
= bnx2x_set_pbd_csum_e2(bp
, skb
,
2342 &pbd_e2_parsing_data
,
2345 pbd_e1x
= &fp
->tx_desc_ring
[bd_prod
].parse_bd_e1x
;
2346 memset(pbd_e1x
, 0, sizeof(struct eth_tx_parse_bd_e1x
));
2347 /* Set PBD in checksum offload case */
2348 if (xmit_type
& XMIT_CSUM
)
2349 hlen
= bnx2x_set_pbd_csum(bp
, skb
, pbd_e1x
, xmit_type
);
2353 /* Map skb linear data for DMA */
2354 mapping
= dma_map_single(&bp
->pdev
->dev
, skb
->data
,
2355 skb_headlen(skb
), DMA_TO_DEVICE
);
2357 /* Setup the data pointer of the first BD of the packet */
2358 tx_start_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
2359 tx_start_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
2360 nbd
= skb_shinfo(skb
)->nr_frags
+ 2; /* start_bd + pbd + frags */
2361 tx_start_bd
->nbd
= cpu_to_le16(nbd
);
2362 tx_start_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
2363 pkt_size
= tx_start_bd
->nbytes
;
2365 DP(NETIF_MSG_TX_QUEUED
, "first bd @%p addr (%x:%x) nbd %d"
2366 " nbytes %d flags %x vlan %x\n",
2367 tx_start_bd
, tx_start_bd
->addr_hi
, tx_start_bd
->addr_lo
,
2368 le16_to_cpu(tx_start_bd
->nbd
), le16_to_cpu(tx_start_bd
->nbytes
),
2369 tx_start_bd
->bd_flags
.as_bitfield
,
2370 le16_to_cpu(tx_start_bd
->vlan_or_ethertype
));
2372 if (xmit_type
& XMIT_GSO
) {
2374 DP(NETIF_MSG_TX_QUEUED
,
2375 "TSO packet len %d hlen %d total len %d tso size %d\n",
2376 skb
->len
, hlen
, skb_headlen(skb
),
2377 skb_shinfo(skb
)->gso_size
);
2379 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_SW_LSO
;
2381 if (unlikely(skb_headlen(skb
) > hlen
))
2382 bd_prod
= bnx2x_tx_split(bp
, fp
, tx_buf
, &tx_start_bd
,
2383 hlen
, bd_prod
, ++nbd
);
2385 bnx2x_set_pbd_gso_e2(skb
, &pbd_e2_parsing_data
,
2388 bnx2x_set_pbd_gso(skb
, pbd_e1x
, xmit_type
);
2391 /* Set the PBD's parsing_data field if not zero
2392 * (for the chips newer than 57711).
2394 if (pbd_e2_parsing_data
)
2395 pbd_e2
->parsing_data
= cpu_to_le32(pbd_e2_parsing_data
);
2397 tx_data_bd
= (struct eth_tx_bd
*)tx_start_bd
;
2399 /* Handle fragmented skb */
2400 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2401 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2403 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
2404 tx_data_bd
= &fp
->tx_desc_ring
[bd_prod
].reg_bd
;
2405 if (total_pkt_bd
== NULL
)
2406 total_pkt_bd
= &fp
->tx_desc_ring
[bd_prod
].reg_bd
;
2408 mapping
= dma_map_page(&bp
->pdev
->dev
, frag
->page
,
2410 frag
->size
, DMA_TO_DEVICE
);
2412 tx_data_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
2413 tx_data_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
2414 tx_data_bd
->nbytes
= cpu_to_le16(frag
->size
);
2415 le16_add_cpu(&pkt_size
, frag
->size
);
2417 DP(NETIF_MSG_TX_QUEUED
,
2418 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2419 i
, tx_data_bd
, tx_data_bd
->addr_hi
, tx_data_bd
->addr_lo
,
2420 le16_to_cpu(tx_data_bd
->nbytes
));
2423 DP(NETIF_MSG_TX_QUEUED
, "last bd @%p\n", tx_data_bd
);
2425 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
2427 /* now send a tx doorbell, counting the next BD
2428 * if the packet contains or ends with it
2430 if (TX_BD_POFF(bd_prod
) < nbd
)
2433 if (total_pkt_bd
!= NULL
)
2434 total_pkt_bd
->total_pkt_bytes
= pkt_size
;
2437 DP(NETIF_MSG_TX_QUEUED
,
2438 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2439 " tcp_flags %x xsum %x seq %u hlen %u\n",
2440 pbd_e1x
, pbd_e1x
->global_data
, pbd_e1x
->ip_hlen_w
,
2441 pbd_e1x
->ip_id
, pbd_e1x
->lso_mss
, pbd_e1x
->tcp_flags
,
2442 pbd_e1x
->tcp_pseudo_csum
, pbd_e1x
->tcp_send_seq
,
2443 le16_to_cpu(pbd_e1x
->total_hlen_w
));
2445 DP(NETIF_MSG_TX_QUEUED
,
2446 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2447 pbd_e2
, pbd_e2
->dst_mac_addr_hi
, pbd_e2
->dst_mac_addr_mid
,
2448 pbd_e2
->dst_mac_addr_lo
, pbd_e2
->src_mac_addr_hi
,
2449 pbd_e2
->src_mac_addr_mid
, pbd_e2
->src_mac_addr_lo
,
2450 pbd_e2
->parsing_data
);
2451 DP(NETIF_MSG_TX_QUEUED
, "doorbell: nbd %d bd %u\n", nbd
, bd_prod
);
2454 * Make sure that the BD data is updated before updating the producer
2455 * since FW might read the BD right after the producer is updated.
2456 * This is only applicable for weak-ordered memory model archs such
2457 * as IA-64. The following barrier is also mandatory since FW will
2458 * assumes packets must have BDs.
2462 fp
->tx_db
.data
.prod
+= nbd
;
2465 DOORBELL(bp
, fp
->cid
, fp
->tx_db
.raw
);
2469 fp
->tx_bd_prod
+= nbd
;
2471 if (unlikely(bnx2x_tx_avail(fp
) < MAX_SKB_FRAGS
+ 3)) {
2472 netif_tx_stop_queue(txq
);
2474 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2475 * ordering of set_bit() in netif_tx_stop_queue() and read of
2479 fp
->eth_q_stats
.driver_xoff
++;
2480 if (bnx2x_tx_avail(fp
) >= MAX_SKB_FRAGS
+ 3)
2481 netif_tx_wake_queue(txq
);
2485 return NETDEV_TX_OK
;
2488 /* called with rtnl_lock */
2489 int bnx2x_change_mac_addr(struct net_device
*dev
, void *p
)
2491 struct sockaddr
*addr
= p
;
2492 struct bnx2x
*bp
= netdev_priv(dev
);
2494 if (!is_valid_ether_addr((u8
*)(addr
->sa_data
)))
2497 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
2498 if (netif_running(dev
))
2499 bnx2x_set_eth_mac(bp
, 1);
2504 static void bnx2x_free_fp_mem_at(struct bnx2x
*bp
, int fp_index
)
2506 union host_hc_status_block
*sb
= &bnx2x_fp(bp
, fp_index
, status_blk
);
2507 struct bnx2x_fastpath
*fp
= &bp
->fp
[fp_index
];
2511 if (IS_FCOE_IDX(fp_index
)) {
2512 memset(sb
, 0, sizeof(union host_hc_status_block
));
2513 fp
->status_blk_mapping
= 0;
2519 BNX2X_PCI_FREE(sb
->e2_sb
,
2520 bnx2x_fp(bp
, fp_index
,
2521 status_blk_mapping
),
2522 sizeof(struct host_hc_status_block_e2
));
2524 BNX2X_PCI_FREE(sb
->e1x_sb
,
2525 bnx2x_fp(bp
, fp_index
,
2526 status_blk_mapping
),
2527 sizeof(struct host_hc_status_block_e1x
));
2532 if (!skip_rx_queue(bp
, fp_index
)) {
2533 bnx2x_free_rx_bds(fp
);
2535 /* fastpath rx rings: rx_buf rx_desc rx_comp */
2536 BNX2X_FREE(bnx2x_fp(bp
, fp_index
, rx_buf_ring
));
2537 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_desc_ring
),
2538 bnx2x_fp(bp
, fp_index
, rx_desc_mapping
),
2539 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
2541 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_comp_ring
),
2542 bnx2x_fp(bp
, fp_index
, rx_comp_mapping
),
2543 sizeof(struct eth_fast_path_rx_cqe
) *
2547 BNX2X_FREE(bnx2x_fp(bp
, fp_index
, rx_page_ring
));
2548 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_sge_ring
),
2549 bnx2x_fp(bp
, fp_index
, rx_sge_mapping
),
2550 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
2554 if (!skip_tx_queue(bp
, fp_index
)) {
2555 /* fastpath tx rings: tx_buf tx_desc */
2556 BNX2X_FREE(bnx2x_fp(bp
, fp_index
, tx_buf_ring
));
2557 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, tx_desc_ring
),
2558 bnx2x_fp(bp
, fp_index
, tx_desc_mapping
),
2559 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
2561 /* end of fastpath */
2564 void bnx2x_free_fp_mem(struct bnx2x
*bp
)
2567 for_each_queue(bp
, i
)
2568 bnx2x_free_fp_mem_at(bp
, i
);
2571 static inline void set_sb_shortcuts(struct bnx2x
*bp
, int index
)
2573 union host_hc_status_block status_blk
= bnx2x_fp(bp
, index
, status_blk
);
2574 if (CHIP_IS_E2(bp
)) {
2575 bnx2x_fp(bp
, index
, sb_index_values
) =
2576 (__le16
*)status_blk
.e2_sb
->sb
.index_values
;
2577 bnx2x_fp(bp
, index
, sb_running_index
) =
2578 (__le16
*)status_blk
.e2_sb
->sb
.running_index
;
2580 bnx2x_fp(bp
, index
, sb_index_values
) =
2581 (__le16
*)status_blk
.e1x_sb
->sb
.index_values
;
2582 bnx2x_fp(bp
, index
, sb_running_index
) =
2583 (__le16
*)status_blk
.e1x_sb
->sb
.running_index
;
2587 static int bnx2x_alloc_fp_mem_at(struct bnx2x
*bp
, int index
)
2589 union host_hc_status_block
*sb
;
2590 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
2593 /* if rx_ring_size specified - use it */
2594 int rx_ring_size
= bp
->rx_ring_size
? bp
->rx_ring_size
:
2595 MAX_RX_AVAIL
/bp
->num_queues
;
2597 /* allocate at least number of buffers required by FW */
2598 rx_ring_size
= max_t(int, fp
->disable_tpa
? MIN_RX_SIZE_NONTPA
:
2602 bnx2x_fp(bp
, index
, bp
) = bp
;
2603 bnx2x_fp(bp
, index
, index
) = index
;
2606 sb
= &bnx2x_fp(bp
, index
, status_blk
);
2608 if (!IS_FCOE_IDX(index
)) {
2612 BNX2X_PCI_ALLOC(sb
->e2_sb
,
2613 &bnx2x_fp(bp
, index
, status_blk_mapping
),
2614 sizeof(struct host_hc_status_block_e2
));
2616 BNX2X_PCI_ALLOC(sb
->e1x_sb
,
2617 &bnx2x_fp(bp
, index
, status_blk_mapping
),
2618 sizeof(struct host_hc_status_block_e1x
));
2622 set_sb_shortcuts(bp
, index
);
2625 if (!skip_tx_queue(bp
, index
)) {
2626 /* fastpath tx rings: tx_buf tx_desc */
2627 BNX2X_ALLOC(bnx2x_fp(bp
, index
, tx_buf_ring
),
2628 sizeof(struct sw_tx_bd
) * NUM_TX_BD
);
2629 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, index
, tx_desc_ring
),
2630 &bnx2x_fp(bp
, index
, tx_desc_mapping
),
2631 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
2635 if (!skip_rx_queue(bp
, index
)) {
2636 /* fastpath rx rings: rx_buf rx_desc rx_comp */
2637 BNX2X_ALLOC(bnx2x_fp(bp
, index
, rx_buf_ring
),
2638 sizeof(struct sw_rx_bd
) * NUM_RX_BD
);
2639 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, index
, rx_desc_ring
),
2640 &bnx2x_fp(bp
, index
, rx_desc_mapping
),
2641 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
2643 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, index
, rx_comp_ring
),
2644 &bnx2x_fp(bp
, index
, rx_comp_mapping
),
2645 sizeof(struct eth_fast_path_rx_cqe
) *
2649 BNX2X_ALLOC(bnx2x_fp(bp
, index
, rx_page_ring
),
2650 sizeof(struct sw_rx_page
) * NUM_RX_SGE
);
2651 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, index
, rx_sge_ring
),
2652 &bnx2x_fp(bp
, index
, rx_sge_mapping
),
2653 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
2655 bnx2x_set_next_page_rx_bd(fp
);
2658 bnx2x_set_next_page_rx_cq(fp
);
2661 ring_size
= bnx2x_alloc_rx_bds(fp
, rx_ring_size
);
2662 if (ring_size
< rx_ring_size
)
2668 /* handles low memory cases */
2670 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
2672 /* FW will drop all packets if queue is not big enough,
2673 * In these cases we disable the queue
2674 * Min size diferent for TPA and non-TPA queues
2676 if (ring_size
< (fp
->disable_tpa
?
2677 MIN_RX_SIZE_TPA
: MIN_RX_SIZE_NONTPA
)) {
2678 /* release memory allocated for this queue */
2679 bnx2x_free_fp_mem_at(bp
, index
);
2685 int bnx2x_alloc_fp_mem(struct bnx2x
*bp
)
2690 * 1. Allocate FP for leading - fatal if error
2691 * 2. {CNIC} Allocate FCoE FP - fatal if error
2692 * 3. Allocate RSS - fix number of queues if error
2696 if (bnx2x_alloc_fp_mem_at(bp
, 0))
2700 if (bnx2x_alloc_fp_mem_at(bp
, FCOE_IDX
))
2704 for_each_nondefault_eth_queue(bp
, i
)
2705 if (bnx2x_alloc_fp_mem_at(bp
, i
))
2708 /* handle memory failures */
2709 if (i
!= BNX2X_NUM_ETH_QUEUES(bp
)) {
2710 int delta
= BNX2X_NUM_ETH_QUEUES(bp
) - i
;
2715 * move non eth FPs next to last eth FP
2716 * must be done in that order
2717 * FCOE_IDX < FWD_IDX < OOO_IDX
2721 bnx2x_move_fp(bp
, FCOE_IDX
, FCOE_IDX
- delta
);
2723 bp
->num_queues
-= delta
;
2724 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
2725 bp
->num_queues
+ delta
, bp
->num_queues
);
2731 static int bnx2x_setup_irqs(struct bnx2x
*bp
)
2734 if (bp
->flags
& USING_MSIX_FLAG
) {
2735 rc
= bnx2x_req_msix_irqs(bp
);
2740 rc
= bnx2x_req_irq(bp
);
2742 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc
);
2745 if (bp
->flags
& USING_MSI_FLAG
) {
2746 bp
->dev
->irq
= bp
->pdev
->irq
;
2747 netdev_info(bp
->dev
, "using MSI IRQ %d\n",
2755 void bnx2x_free_mem_bp(struct bnx2x
*bp
)
2758 kfree(bp
->msix_table
);
2762 int __devinit
bnx2x_alloc_mem_bp(struct bnx2x
*bp
)
2764 struct bnx2x_fastpath
*fp
;
2765 struct msix_entry
*tbl
;
2766 struct bnx2x_ilt
*ilt
;
2769 fp
= kzalloc(L2_FP_COUNT(bp
->l2_cid_count
)*sizeof(*fp
), GFP_KERNEL
);
2775 tbl
= kzalloc((FP_SB_COUNT(bp
->l2_cid_count
) + 1) * sizeof(*tbl
),
2779 bp
->msix_table
= tbl
;
2782 ilt
= kzalloc(sizeof(*ilt
), GFP_KERNEL
);
2789 bnx2x_free_mem_bp(bp
);
2794 static int bnx2x_reload_if_running(struct net_device
*dev
)
2796 struct bnx2x
*bp
= netdev_priv(dev
);
2798 if (unlikely(!netif_running(dev
)))
2801 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
2802 return bnx2x_nic_load(bp
, LOAD_NORMAL
);
2805 /* called with rtnl_lock */
2806 int bnx2x_change_mtu(struct net_device
*dev
, int new_mtu
)
2808 struct bnx2x
*bp
= netdev_priv(dev
);
2810 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
2811 printk(KERN_ERR
"Handling parity error recovery. Try again later\n");
2815 if ((new_mtu
> ETH_MAX_JUMBO_PACKET_SIZE
) ||
2816 ((new_mtu
+ ETH_HLEN
) < ETH_MIN_PACKET_SIZE
))
2819 /* This does not race with packet allocation
2820 * because the actual alloc size is
2821 * only updated as part of load
2825 return bnx2x_reload_if_running(dev
);
2828 u32
bnx2x_fix_features(struct net_device
*dev
, u32 features
)
2830 struct bnx2x
*bp
= netdev_priv(dev
);
2832 /* TPA requires Rx CSUM offloading */
2833 if (!(features
& NETIF_F_RXCSUM
) || bp
->disable_tpa
)
2834 features
&= ~NETIF_F_LRO
;
2839 int bnx2x_set_features(struct net_device
*dev
, u32 features
)
2841 struct bnx2x
*bp
= netdev_priv(dev
);
2842 u32 flags
= bp
->flags
;
2843 bool bnx2x_reload
= false;
2845 if (features
& NETIF_F_LRO
)
2846 flags
|= TPA_ENABLE_FLAG
;
2848 flags
&= ~TPA_ENABLE_FLAG
;
2850 if (features
& NETIF_F_LOOPBACK
) {
2851 if (bp
->link_params
.loopback_mode
!= LOOPBACK_BMAC
) {
2852 bp
->link_params
.loopback_mode
= LOOPBACK_BMAC
;
2853 bnx2x_reload
= true;
2856 if (bp
->link_params
.loopback_mode
!= LOOPBACK_NONE
) {
2857 bp
->link_params
.loopback_mode
= LOOPBACK_NONE
;
2858 bnx2x_reload
= true;
2862 if (flags
^ bp
->flags
) {
2864 bnx2x_reload
= true;
2868 if (bp
->recovery_state
== BNX2X_RECOVERY_DONE
)
2869 return bnx2x_reload_if_running(dev
);
2870 /* else: bnx2x_nic_load() will be called at end of recovery */
2876 void bnx2x_tx_timeout(struct net_device
*dev
)
2878 struct bnx2x
*bp
= netdev_priv(dev
);
2880 #ifdef BNX2X_STOP_ON_ERROR
2884 /* This allows the netif to be shutdown gracefully before resetting */
2885 schedule_delayed_work(&bp
->reset_task
, 0);
2888 int bnx2x_suspend(struct pci_dev
*pdev
, pm_message_t state
)
2890 struct net_device
*dev
= pci_get_drvdata(pdev
);
2894 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
2897 bp
= netdev_priv(dev
);
2901 pci_save_state(pdev
);
2903 if (!netif_running(dev
)) {
2908 netif_device_detach(dev
);
2910 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
2912 bnx2x_set_power_state(bp
, pci_choose_state(pdev
, state
));
2919 int bnx2x_resume(struct pci_dev
*pdev
)
2921 struct net_device
*dev
= pci_get_drvdata(pdev
);
2926 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
2929 bp
= netdev_priv(dev
);
2931 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
2932 printk(KERN_ERR
"Handling parity error recovery. Try again later\n");
2938 pci_restore_state(pdev
);
2940 if (!netif_running(dev
)) {
2945 bnx2x_set_power_state(bp
, PCI_D0
);
2946 netif_device_attach(dev
);
2948 /* Since the chip was reset, clear the FW sequence number */
2950 rc
= bnx2x_nic_load(bp
, LOAD_OPEN
);