1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2011 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
25 #include <net/ip6_checksum.h>
26 #include <linux/firmware.h>
27 #include <linux/prefetch.h>
28 #include "bnx2x_cmn.h"
29 #include "bnx2x_init.h"
35 * bnx2x_bz_fp - zero content of the fastpath structure.
38 * @index: fastpath index to be zeroed
40 * Makes sure the contents of the bp->fp[index].napi is kept
43 static inline void bnx2x_bz_fp(struct bnx2x
*bp
, int index
)
45 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
46 struct napi_struct orig_napi
= fp
->napi
;
47 /* bzero bnx2x_fastpath contents */
48 memset(fp
, 0, sizeof(*fp
));
50 /* Restore the NAPI object as it has been already initialized */
56 fp
->max_cos
= bp
->max_cos
;
58 /* Special queues support only one CoS */
62 * set the tpa flag for each queue. The tpa flag determines the queue
63 * minimal size so it must be set prior to queue memory allocation
65 fp
->disable_tpa
= ((bp
->flags
& TPA_ENABLE_FLAG
) == 0);
68 /* We don't want TPA on an FCoE L2 ring */
75 * bnx2x_move_fp - move content of the fastpath structure.
78 * @from: source FP index
79 * @to: destination FP index
81 * Makes sure the contents of the bp->fp[to].napi is kept
82 * intact. This is done by first copying the napi struct from
83 * the target to the source, and then mem copying the entire
84 * source onto the target
86 static inline void bnx2x_move_fp(struct bnx2x
*bp
, int from
, int to
)
88 struct bnx2x_fastpath
*from_fp
= &bp
->fp
[from
];
89 struct bnx2x_fastpath
*to_fp
= &bp
->fp
[to
];
91 /* Copy the NAPI object as it has been already initialized */
92 from_fp
->napi
= to_fp
->napi
;
94 /* Move bnx2x_fastpath contents */
95 memcpy(to_fp
, from_fp
, sizeof(*to_fp
));
99 int load_count
[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
101 /* free skb in the packet ring at pos idx
102 * return idx of last bd freed
104 static u16
bnx2x_free_tx_pkt(struct bnx2x
*bp
, struct bnx2x_fp_txdata
*txdata
,
105 u16 idx
, unsigned int *pkts_compl
,
106 unsigned int *bytes_compl
)
108 struct sw_tx_bd
*tx_buf
= &txdata
->tx_buf_ring
[idx
];
109 struct eth_tx_start_bd
*tx_start_bd
;
110 struct eth_tx_bd
*tx_data_bd
;
111 struct sk_buff
*skb
= tx_buf
->skb
;
112 u16 bd_idx
= TX_BD(tx_buf
->first_bd
), new_cons
;
115 /* prefetch skb end pointer to speedup dev_kfree_skb() */
118 DP(BNX2X_MSG_FP
, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
119 txdata
->txq_index
, idx
, tx_buf
, skb
);
122 DP(BNX2X_MSG_OFF
, "free bd_idx %d\n", bd_idx
);
123 tx_start_bd
= &txdata
->tx_desc_ring
[bd_idx
].start_bd
;
124 dma_unmap_single(&bp
->pdev
->dev
, BD_UNMAP_ADDR(tx_start_bd
),
125 BD_UNMAP_LEN(tx_start_bd
), DMA_TO_DEVICE
);
128 nbd
= le16_to_cpu(tx_start_bd
->nbd
) - 1;
129 #ifdef BNX2X_STOP_ON_ERROR
130 if ((nbd
- 1) > (MAX_SKB_FRAGS
+ 2)) {
131 BNX2X_ERR("BAD nbd!\n");
135 new_cons
= nbd
+ tx_buf
->first_bd
;
137 /* Get the next bd */
138 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
140 /* Skip a parse bd... */
142 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
144 /* ...and the TSO split header bd since they have no mapping */
145 if (tx_buf
->flags
& BNX2X_TSO_SPLIT_BD
) {
147 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
153 DP(BNX2X_MSG_OFF
, "free frag bd_idx %d\n", bd_idx
);
154 tx_data_bd
= &txdata
->tx_desc_ring
[bd_idx
].reg_bd
;
155 dma_unmap_page(&bp
->pdev
->dev
, BD_UNMAP_ADDR(tx_data_bd
),
156 BD_UNMAP_LEN(tx_data_bd
), DMA_TO_DEVICE
);
158 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
165 (*bytes_compl
) += skb
->len
;
167 dev_kfree_skb_any(skb
);
168 tx_buf
->first_bd
= 0;
174 int bnx2x_tx_int(struct bnx2x
*bp
, struct bnx2x_fp_txdata
*txdata
)
176 struct netdev_queue
*txq
;
177 u16 hw_cons
, sw_cons
, bd_cons
= txdata
->tx_bd_cons
;
178 unsigned int pkts_compl
= 0, bytes_compl
= 0;
180 #ifdef BNX2X_STOP_ON_ERROR
181 if (unlikely(bp
->panic
))
185 txq
= netdev_get_tx_queue(bp
->dev
, txdata
->txq_index
);
186 hw_cons
= le16_to_cpu(*txdata
->tx_cons_sb
);
187 sw_cons
= txdata
->tx_pkt_cons
;
189 while (sw_cons
!= hw_cons
) {
192 pkt_cons
= TX_BD(sw_cons
);
194 DP(NETIF_MSG_TX_DONE
, "queue[%d]: hw_cons %u sw_cons %u "
196 txdata
->txq_index
, hw_cons
, sw_cons
, pkt_cons
);
198 bd_cons
= bnx2x_free_tx_pkt(bp
, txdata
, pkt_cons
,
199 &pkts_compl
, &bytes_compl
);
204 netdev_tx_completed_queue(txq
, pkts_compl
, bytes_compl
);
206 txdata
->tx_pkt_cons
= sw_cons
;
207 txdata
->tx_bd_cons
= bd_cons
;
209 /* Need to make the tx_bd_cons update visible to start_xmit()
210 * before checking for netif_tx_queue_stopped(). Without the
211 * memory barrier, there is a small possibility that
212 * start_xmit() will miss it and cause the queue to be stopped
214 * On the other hand we need an rmb() here to ensure the proper
215 * ordering of bit testing in the following
216 * netif_tx_queue_stopped(txq) call.
220 if (unlikely(netif_tx_queue_stopped(txq
))) {
221 /* Taking tx_lock() is needed to prevent reenabling the queue
222 * while it's empty. This could have happen if rx_action() gets
223 * suspended in bnx2x_tx_int() after the condition before
224 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
226 * stops the queue->sees fresh tx_bd_cons->releases the queue->
227 * sends some packets consuming the whole queue again->
231 __netif_tx_lock(txq
, smp_processor_id());
233 if ((netif_tx_queue_stopped(txq
)) &&
234 (bp
->state
== BNX2X_STATE_OPEN
) &&
235 (bnx2x_tx_avail(bp
, txdata
) >= MAX_SKB_FRAGS
+ 3))
236 netif_tx_wake_queue(txq
);
238 __netif_tx_unlock(txq
);
243 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath
*fp
,
246 u16 last_max
= fp
->last_max_sge
;
248 if (SUB_S16(idx
, last_max
) > 0)
249 fp
->last_max_sge
= idx
;
252 static void bnx2x_update_sge_prod(struct bnx2x_fastpath
*fp
,
253 struct eth_fast_path_rx_cqe
*fp_cqe
)
255 struct bnx2x
*bp
= fp
->bp
;
256 u16 sge_len
= SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe
->pkt_len
) -
257 le16_to_cpu(fp_cqe
->len_on_bd
)) >>
259 u16 last_max
, last_elem
, first_elem
;
266 /* First mark all used pages */
267 for (i
= 0; i
< sge_len
; i
++)
268 BIT_VEC64_CLEAR_BIT(fp
->sge_mask
,
269 RX_SGE(le16_to_cpu(fp_cqe
->sgl_or_raw_data
.sgl
[i
])));
271 DP(NETIF_MSG_RX_STATUS
, "fp_cqe->sgl[%d] = %d\n",
272 sge_len
- 1, le16_to_cpu(fp_cqe
->sgl_or_raw_data
.sgl
[sge_len
- 1]));
274 /* Here we assume that the last SGE index is the biggest */
275 prefetch((void *)(fp
->sge_mask
));
276 bnx2x_update_last_max_sge(fp
,
277 le16_to_cpu(fp_cqe
->sgl_or_raw_data
.sgl
[sge_len
- 1]));
279 last_max
= RX_SGE(fp
->last_max_sge
);
280 last_elem
= last_max
>> BIT_VEC64_ELEM_SHIFT
;
281 first_elem
= RX_SGE(fp
->rx_sge_prod
) >> BIT_VEC64_ELEM_SHIFT
;
283 /* If ring is not full */
284 if (last_elem
+ 1 != first_elem
)
287 /* Now update the prod */
288 for (i
= first_elem
; i
!= last_elem
; i
= NEXT_SGE_MASK_ELEM(i
)) {
289 if (likely(fp
->sge_mask
[i
]))
292 fp
->sge_mask
[i
] = BIT_VEC64_ELEM_ONE_MASK
;
293 delta
+= BIT_VEC64_ELEM_SZ
;
297 fp
->rx_sge_prod
+= delta
;
298 /* clear page-end entries */
299 bnx2x_clear_sge_mask_next_elems(fp
);
302 DP(NETIF_MSG_RX_STATUS
,
303 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
304 fp
->last_max_sge
, fp
->rx_sge_prod
);
307 /* Set Toeplitz hash value in the skb using the value from the
308 * CQE (calculated by HW).
310 static u32
bnx2x_get_rxhash(const struct bnx2x
*bp
,
311 const struct eth_fast_path_rx_cqe
*cqe
)
313 /* Set Toeplitz hash from CQE */
314 if ((bp
->dev
->features
& NETIF_F_RXHASH
) &&
315 (cqe
->status_flags
& ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG
))
316 return le32_to_cpu(cqe
->rss_hash_result
);
320 static void bnx2x_tpa_start(struct bnx2x_fastpath
*fp
, u16 queue
,
322 struct eth_fast_path_rx_cqe
*cqe
)
324 struct bnx2x
*bp
= fp
->bp
;
325 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
326 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
327 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
329 struct bnx2x_agg_info
*tpa_info
= &fp
->tpa_info
[queue
];
330 struct sw_rx_bd
*first_buf
= &tpa_info
->first_buf
;
332 /* print error if current state != stop */
333 if (tpa_info
->tpa_state
!= BNX2X_TPA_STOP
)
334 BNX2X_ERR("start of bin not in stop [%d]\n", queue
);
336 /* Try to map an empty data buffer from the aggregation info */
337 mapping
= dma_map_single(&bp
->pdev
->dev
,
338 first_buf
->data
+ NET_SKB_PAD
,
339 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
341 * ...if it fails - move the skb from the consumer to the producer
342 * and set the current aggregation state as ERROR to drop it
343 * when TPA_STOP arrives.
346 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
347 /* Move the BD from the consumer to the producer */
348 bnx2x_reuse_rx_data(fp
, cons
, prod
);
349 tpa_info
->tpa_state
= BNX2X_TPA_ERROR
;
353 /* move empty data from pool to prod */
354 prod_rx_buf
->data
= first_buf
->data
;
355 dma_unmap_addr_set(prod_rx_buf
, mapping
, mapping
);
356 /* point prod_bd to new data */
357 prod_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
358 prod_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
360 /* move partial skb from cons to pool (don't unmap yet) */
361 *first_buf
= *cons_rx_buf
;
363 /* mark bin state as START */
364 tpa_info
->parsing_flags
=
365 le16_to_cpu(cqe
->pars_flags
.flags
);
366 tpa_info
->vlan_tag
= le16_to_cpu(cqe
->vlan_tag
);
367 tpa_info
->tpa_state
= BNX2X_TPA_START
;
368 tpa_info
->len_on_bd
= le16_to_cpu(cqe
->len_on_bd
);
369 tpa_info
->placement_offset
= cqe
->placement_offset
;
370 tpa_info
->rxhash
= bnx2x_get_rxhash(bp
, cqe
);
372 #ifdef BNX2X_STOP_ON_ERROR
373 fp
->tpa_queue_used
|= (1 << queue
);
374 #ifdef _ASM_GENERIC_INT_L64_H
375 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%lx\n",
377 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%llx\n",
383 /* Timestamp option length allowed for TPA aggregation:
385 * nop nop kind length echo val
387 #define TPA_TSTAMP_OPT_LEN 12
389 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
392 * @parsing_flags: parsing flags from the START CQE
393 * @len_on_bd: total length of the first packet for the
396 * Approximate value of the MSS for this aggregation calculated using
397 * the first packet of it.
399 static inline u16
bnx2x_set_lro_mss(struct bnx2x
*bp
, u16 parsing_flags
,
403 * TPA arrgregation won't have either IP options or TCP options
404 * other than timestamp or IPv6 extension headers.
406 u16 hdrs_len
= ETH_HLEN
+ sizeof(struct tcphdr
);
408 if (GET_FLAG(parsing_flags
, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL
) ==
409 PRS_FLAG_OVERETH_IPV6
)
410 hdrs_len
+= sizeof(struct ipv6hdr
);
412 hdrs_len
+= sizeof(struct iphdr
);
415 /* Check if there was a TCP timestamp, if there is it's will
416 * always be 12 bytes length: nop nop kind length echo val.
418 * Otherwise FW would close the aggregation.
420 if (parsing_flags
& PARSING_FLAGS_TIME_STAMP_EXIST_FLAG
)
421 hdrs_len
+= TPA_TSTAMP_OPT_LEN
;
423 return len_on_bd
- hdrs_len
;
426 static int bnx2x_fill_frag_skb(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
427 u16 queue
, struct sk_buff
*skb
,
428 struct eth_end_agg_rx_cqe
*cqe
,
431 struct sw_rx_page
*rx_pg
, old_rx_pg
;
432 u32 i
, frag_len
, frag_size
, pages
;
435 struct bnx2x_agg_info
*tpa_info
= &fp
->tpa_info
[queue
];
436 u16 len_on_bd
= tpa_info
->len_on_bd
;
438 frag_size
= le16_to_cpu(cqe
->pkt_len
) - len_on_bd
;
439 pages
= SGE_PAGE_ALIGN(frag_size
) >> SGE_PAGE_SHIFT
;
441 /* This is needed in order to enable forwarding support */
443 skb_shinfo(skb
)->gso_size
= bnx2x_set_lro_mss(bp
,
444 tpa_info
->parsing_flags
, len_on_bd
);
446 #ifdef BNX2X_STOP_ON_ERROR
447 if (pages
> min_t(u32
, 8, MAX_SKB_FRAGS
)*SGE_PAGE_SIZE
*PAGES_PER_SGE
) {
448 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
450 BNX2X_ERR("cqe->pkt_len = %d\n", cqe
->pkt_len
);
456 /* Run through the SGL and compose the fragmented skb */
457 for (i
= 0, j
= 0; i
< pages
; i
+= PAGES_PER_SGE
, j
++) {
458 u16 sge_idx
= RX_SGE(le16_to_cpu(cqe
->sgl_or_raw_data
.sgl
[j
]));
460 /* FW gives the indices of the SGE as if the ring is an array
461 (meaning that "next" element will consume 2 indices) */
462 frag_len
= min(frag_size
, (u32
)(SGE_PAGE_SIZE
*PAGES_PER_SGE
));
463 rx_pg
= &fp
->rx_page_ring
[sge_idx
];
466 /* If we fail to allocate a substitute page, we simply stop
467 where we are and drop the whole packet */
468 err
= bnx2x_alloc_rx_sge(bp
, fp
, sge_idx
);
470 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
474 /* Unmap the page as we r going to pass it to the stack */
475 dma_unmap_page(&bp
->pdev
->dev
,
476 dma_unmap_addr(&old_rx_pg
, mapping
),
477 SGE_PAGE_SIZE
*PAGES_PER_SGE
, DMA_FROM_DEVICE
);
479 /* Add one frag and update the appropriate fields in the skb */
480 skb_fill_page_desc(skb
, j
, old_rx_pg
.page
, 0, frag_len
);
482 skb
->data_len
+= frag_len
;
483 skb
->truesize
+= SGE_PAGE_SIZE
* PAGES_PER_SGE
;
484 skb
->len
+= frag_len
;
486 frag_size
-= frag_len
;
492 static void bnx2x_tpa_stop(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
493 u16 queue
, struct eth_end_agg_rx_cqe
*cqe
,
496 struct bnx2x_agg_info
*tpa_info
= &fp
->tpa_info
[queue
];
497 struct sw_rx_bd
*rx_buf
= &tpa_info
->first_buf
;
498 u32 pad
= tpa_info
->placement_offset
;
499 u16 len
= tpa_info
->len_on_bd
;
500 struct sk_buff
*skb
= NULL
;
501 u8
*data
= rx_buf
->data
;
504 u8 old_tpa_state
= tpa_info
->tpa_state
;
506 tpa_info
->tpa_state
= BNX2X_TPA_STOP
;
508 /* If we there was an error during the handling of the TPA_START -
509 * drop this aggregation.
511 if (old_tpa_state
== BNX2X_TPA_ERROR
)
514 /* Try to allocate the new data */
515 new_data
= kmalloc(fp
->rx_buf_size
+ NET_SKB_PAD
, GFP_ATOMIC
);
517 /* Unmap skb in the pool anyway, as we are going to change
518 pool entry status to BNX2X_TPA_STOP even if new skb allocation
520 dma_unmap_single(&bp
->pdev
->dev
, dma_unmap_addr(rx_buf
, mapping
),
521 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
522 if (likely(new_data
))
523 skb
= build_skb(data
);
526 #ifdef BNX2X_STOP_ON_ERROR
527 if (pad
+ len
> fp
->rx_buf_size
) {
528 BNX2X_ERR("skb_put is about to fail... "
529 "pad %d len %d rx_buf_size %d\n",
530 pad
, len
, fp
->rx_buf_size
);
536 skb_reserve(skb
, pad
+ NET_SKB_PAD
);
538 skb
->rxhash
= tpa_info
->rxhash
;
540 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
541 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
543 if (!bnx2x_fill_frag_skb(bp
, fp
, queue
, skb
, cqe
, cqe_idx
)) {
544 if (tpa_info
->parsing_flags
& PARSING_FLAGS_VLAN
)
545 __vlan_hwaccel_put_tag(skb
, tpa_info
->vlan_tag
);
546 napi_gro_receive(&fp
->napi
, skb
);
548 DP(NETIF_MSG_RX_STATUS
, "Failed to allocate new pages"
549 " - dropping packet!\n");
550 dev_kfree_skb_any(skb
);
554 /* put new data in bin */
555 rx_buf
->data
= new_data
;
561 /* drop the packet and keep the buffer in the bin */
562 DP(NETIF_MSG_RX_STATUS
,
563 "Failed to allocate or map a new skb - dropping packet!\n");
564 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
568 int bnx2x_rx_int(struct bnx2x_fastpath
*fp
, int budget
)
570 struct bnx2x
*bp
= fp
->bp
;
571 u16 bd_cons
, bd_prod
, bd_prod_fw
, comp_ring_cons
;
572 u16 hw_comp_cons
, sw_comp_cons
, sw_comp_prod
;
575 #ifdef BNX2X_STOP_ON_ERROR
576 if (unlikely(bp
->panic
))
580 /* CQ "next element" is of the size of the regular element,
581 that's why it's ok here */
582 hw_comp_cons
= le16_to_cpu(*fp
->rx_cons_sb
);
583 if ((hw_comp_cons
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
586 bd_cons
= fp
->rx_bd_cons
;
587 bd_prod
= fp
->rx_bd_prod
;
588 bd_prod_fw
= bd_prod
;
589 sw_comp_cons
= fp
->rx_comp_cons
;
590 sw_comp_prod
= fp
->rx_comp_prod
;
592 /* Memory barrier necessary as speculative reads of the rx
593 * buffer can be ahead of the index in the status block
597 DP(NETIF_MSG_RX_STATUS
,
598 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
599 fp
->index
, hw_comp_cons
, sw_comp_cons
);
601 while (sw_comp_cons
!= hw_comp_cons
) {
602 struct sw_rx_bd
*rx_buf
= NULL
;
604 union eth_rx_cqe
*cqe
;
605 struct eth_fast_path_rx_cqe
*cqe_fp
;
607 enum eth_rx_cqe_type cqe_fp_type
;
611 #ifdef BNX2X_STOP_ON_ERROR
612 if (unlikely(bp
->panic
))
616 comp_ring_cons
= RCQ_BD(sw_comp_cons
);
617 bd_prod
= RX_BD(bd_prod
);
618 bd_cons
= RX_BD(bd_cons
);
620 cqe
= &fp
->rx_comp_ring
[comp_ring_cons
];
621 cqe_fp
= &cqe
->fast_path_cqe
;
622 cqe_fp_flags
= cqe_fp
->type_error_flags
;
623 cqe_fp_type
= cqe_fp_flags
& ETH_FAST_PATH_RX_CQE_TYPE
;
625 DP(NETIF_MSG_RX_STATUS
, "CQE type %x err %x status %x"
626 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags
),
627 cqe_fp_flags
, cqe_fp
->status_flags
,
628 le32_to_cpu(cqe_fp
->rss_hash_result
),
629 le16_to_cpu(cqe_fp
->vlan_tag
), le16_to_cpu(cqe_fp
->pkt_len
));
631 /* is this a slowpath msg? */
632 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type
))) {
633 bnx2x_sp_event(fp
, cqe
);
636 rx_buf
= &fp
->rx_buf_ring
[bd_cons
];
639 if (!CQE_TYPE_FAST(cqe_fp_type
)) {
640 #ifdef BNX2X_STOP_ON_ERROR
642 if (fp
->disable_tpa
&&
643 (CQE_TYPE_START(cqe_fp_type
) ||
644 CQE_TYPE_STOP(cqe_fp_type
)))
645 BNX2X_ERR("START/STOP packet while "
646 "disable_tpa type %x\n",
647 CQE_TYPE(cqe_fp_type
));
650 if (CQE_TYPE_START(cqe_fp_type
)) {
651 u16 queue
= cqe_fp
->queue_index
;
652 DP(NETIF_MSG_RX_STATUS
,
653 "calling tpa_start on queue %d\n",
656 bnx2x_tpa_start(fp
, queue
,
662 cqe
->end_agg_cqe
.queue_index
;
663 DP(NETIF_MSG_RX_STATUS
,
664 "calling tpa_stop on queue %d\n",
667 bnx2x_tpa_stop(bp
, fp
, queue
,
670 #ifdef BNX2X_STOP_ON_ERROR
675 bnx2x_update_sge_prod(fp
, cqe_fp
);
680 len
= le16_to_cpu(cqe_fp
->pkt_len
);
681 pad
= cqe_fp
->placement_offset
;
682 dma_sync_single_for_cpu(&bp
->pdev
->dev
,
683 dma_unmap_addr(rx_buf
, mapping
),
684 pad
+ RX_COPY_THRESH
,
687 prefetch(data
+ pad
); /* speedup eth_type_trans() */
688 /* is this an error packet? */
689 if (unlikely(cqe_fp_flags
& ETH_RX_ERROR_FALGS
)) {
691 "ERROR flags %x rx packet %u\n",
692 cqe_fp_flags
, sw_comp_cons
);
693 fp
->eth_q_stats
.rx_err_discard_pkt
++;
697 /* Since we don't have a jumbo ring
698 * copy small packets if mtu > 1500
700 if ((bp
->dev
->mtu
> ETH_MAX_PACKET_SIZE
) &&
701 (len
<= RX_COPY_THRESH
)) {
702 skb
= netdev_alloc_skb_ip_align(bp
->dev
, len
);
705 "ERROR packet dropped because of alloc failure\n");
706 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
709 memcpy(skb
->data
, data
+ pad
, len
);
710 bnx2x_reuse_rx_data(fp
, bd_cons
, bd_prod
);
712 if (likely(bnx2x_alloc_rx_data(bp
, fp
, bd_prod
) == 0)) {
713 dma_unmap_single(&bp
->pdev
->dev
,
714 dma_unmap_addr(rx_buf
, mapping
),
717 skb
= build_skb(data
);
718 if (unlikely(!skb
)) {
720 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
723 skb_reserve(skb
, pad
);
726 "ERROR packet dropped because "
727 "of alloc failure\n");
728 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
730 bnx2x_reuse_rx_data(fp
, bd_cons
, bd_prod
);
736 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
738 /* Set Toeplitz hash for a none-LRO skb */
739 skb
->rxhash
= bnx2x_get_rxhash(bp
, cqe_fp
);
741 skb_checksum_none_assert(skb
);
743 if (bp
->dev
->features
& NETIF_F_RXCSUM
) {
745 if (likely(BNX2X_RX_CSUM_OK(cqe
)))
746 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
748 fp
->eth_q_stats
.hw_csum_err
++;
751 skb_record_rx_queue(skb
, fp
->rx_queue
);
753 if (le16_to_cpu(cqe_fp
->pars_flags
.flags
) &
755 __vlan_hwaccel_put_tag(skb
,
756 le16_to_cpu(cqe_fp
->vlan_tag
));
757 napi_gro_receive(&fp
->napi
, skb
);
763 bd_cons
= NEXT_RX_IDX(bd_cons
);
764 bd_prod
= NEXT_RX_IDX(bd_prod
);
765 bd_prod_fw
= NEXT_RX_IDX(bd_prod_fw
);
768 sw_comp_prod
= NEXT_RCQ_IDX(sw_comp_prod
);
769 sw_comp_cons
= NEXT_RCQ_IDX(sw_comp_cons
);
771 if (rx_pkt
== budget
)
775 fp
->rx_bd_cons
= bd_cons
;
776 fp
->rx_bd_prod
= bd_prod_fw
;
777 fp
->rx_comp_cons
= sw_comp_cons
;
778 fp
->rx_comp_prod
= sw_comp_prod
;
780 /* Update producers */
781 bnx2x_update_rx_prod(bp
, fp
, bd_prod_fw
, sw_comp_prod
,
784 fp
->rx_pkt
+= rx_pkt
;
790 static irqreturn_t
bnx2x_msix_fp_int(int irq
, void *fp_cookie
)
792 struct bnx2x_fastpath
*fp
= fp_cookie
;
793 struct bnx2x
*bp
= fp
->bp
;
796 DP(BNX2X_MSG_FP
, "got an MSI-X interrupt on IDX:SB "
797 "[fp %d fw_sd %d igusb %d]\n",
798 fp
->index
, fp
->fw_sb_id
, fp
->igu_sb_id
);
799 bnx2x_ack_sb(bp
, fp
->igu_sb_id
, USTORM_ID
, 0, IGU_INT_DISABLE
, 0);
801 #ifdef BNX2X_STOP_ON_ERROR
802 if (unlikely(bp
->panic
))
806 /* Handle Rx and Tx according to MSI-X vector */
807 prefetch(fp
->rx_cons_sb
);
809 for_each_cos_in_tx_queue(fp
, cos
)
810 prefetch(fp
->txdata
[cos
].tx_cons_sb
);
812 prefetch(&fp
->sb_running_index
[SM_RX_ID
]);
813 napi_schedule(&bnx2x_fp(bp
, fp
->index
, napi
));
818 /* HW Lock for shared dual port PHYs */
819 void bnx2x_acquire_phy_lock(struct bnx2x
*bp
)
821 mutex_lock(&bp
->port
.phy_mutex
);
823 if (bp
->port
.need_hw_lock
)
824 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
827 void bnx2x_release_phy_lock(struct bnx2x
*bp
)
829 if (bp
->port
.need_hw_lock
)
830 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
832 mutex_unlock(&bp
->port
.phy_mutex
);
835 /* calculates MF speed according to current linespeed and MF configuration */
836 u16
bnx2x_get_mf_speed(struct bnx2x
*bp
)
838 u16 line_speed
= bp
->link_vars
.line_speed
;
840 u16 maxCfg
= bnx2x_extract_max_cfg(bp
,
841 bp
->mf_config
[BP_VN(bp
)]);
843 /* Calculate the current MAX line speed limit for the MF
847 line_speed
= (line_speed
* maxCfg
) / 100;
849 u16 vn_max_rate
= maxCfg
* 100;
851 if (vn_max_rate
< line_speed
)
852 line_speed
= vn_max_rate
;
860 * bnx2x_fill_report_data - fill link report data to report
863 * @data: link state to update
865 * It uses a none-atomic bit operations because is called under the mutex.
867 static inline void bnx2x_fill_report_data(struct bnx2x
*bp
,
868 struct bnx2x_link_report_data
*data
)
870 u16 line_speed
= bnx2x_get_mf_speed(bp
);
872 memset(data
, 0, sizeof(*data
));
874 /* Fill the report data: efective line speed */
875 data
->line_speed
= line_speed
;
878 if (!bp
->link_vars
.link_up
|| (bp
->flags
& MF_FUNC_DIS
))
879 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
880 &data
->link_report_flags
);
883 if (bp
->link_vars
.duplex
== DUPLEX_FULL
)
884 __set_bit(BNX2X_LINK_REPORT_FD
, &data
->link_report_flags
);
886 /* Rx Flow Control is ON */
887 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_RX
)
888 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON
, &data
->link_report_flags
);
890 /* Tx Flow Control is ON */
891 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_TX
)
892 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON
, &data
->link_report_flags
);
896 * bnx2x_link_report - report link status to OS.
900 * Calls the __bnx2x_link_report() under the same locking scheme
901 * as a link/PHY state managing code to ensure a consistent link
905 void bnx2x_link_report(struct bnx2x
*bp
)
907 bnx2x_acquire_phy_lock(bp
);
908 __bnx2x_link_report(bp
);
909 bnx2x_release_phy_lock(bp
);
913 * __bnx2x_link_report - report link status to OS.
917 * None atomic inmlementation.
918 * Should be called under the phy_lock.
920 void __bnx2x_link_report(struct bnx2x
*bp
)
922 struct bnx2x_link_report_data cur_data
;
926 bnx2x_read_mf_cfg(bp
);
928 /* Read the current link report info */
929 bnx2x_fill_report_data(bp
, &cur_data
);
931 /* Don't report link down or exactly the same link status twice */
932 if (!memcmp(&cur_data
, &bp
->last_reported_link
, sizeof(cur_data
)) ||
933 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
934 &bp
->last_reported_link
.link_report_flags
) &&
935 test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
936 &cur_data
.link_report_flags
)))
941 /* We are going to report a new link parameters now -
942 * remember the current data for the next time.
944 memcpy(&bp
->last_reported_link
, &cur_data
, sizeof(cur_data
));
946 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
947 &cur_data
.link_report_flags
)) {
948 netif_carrier_off(bp
->dev
);
949 netdev_err(bp
->dev
, "NIC Link is Down\n");
955 netif_carrier_on(bp
->dev
);
957 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD
,
958 &cur_data
.link_report_flags
))
963 /* Handle the FC at the end so that only these flags would be
964 * possibly set. This way we may easily check if there is no FC
967 if (cur_data
.link_report_flags
) {
968 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON
,
969 &cur_data
.link_report_flags
)) {
970 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON
,
971 &cur_data
.link_report_flags
))
972 flow
= "ON - receive & transmit";
974 flow
= "ON - receive";
976 flow
= "ON - transmit";
981 netdev_info(bp
->dev
, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
982 cur_data
.line_speed
, duplex
, flow
);
986 void bnx2x_init_rx_rings(struct bnx2x
*bp
)
988 int func
= BP_FUNC(bp
);
992 /* Allocate TPA resources */
993 for_each_rx_queue(bp
, j
) {
994 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
997 "mtu %d rx_buf_size %d\n", bp
->dev
->mtu
, fp
->rx_buf_size
);
999 if (!fp
->disable_tpa
) {
1000 /* Fill the per-aggregtion pool */
1001 for (i
= 0; i
< MAX_AGG_QS(bp
); i
++) {
1002 struct bnx2x_agg_info
*tpa_info
=
1004 struct sw_rx_bd
*first_buf
=
1005 &tpa_info
->first_buf
;
1007 first_buf
->data
= kmalloc(fp
->rx_buf_size
+ NET_SKB_PAD
,
1009 if (!first_buf
->data
) {
1010 BNX2X_ERR("Failed to allocate TPA "
1011 "skb pool for queue[%d] - "
1012 "disabling TPA on this "
1014 bnx2x_free_tpa_pool(bp
, fp
, i
);
1015 fp
->disable_tpa
= 1;
1018 dma_unmap_addr_set(first_buf
, mapping
, 0);
1019 tpa_info
->tpa_state
= BNX2X_TPA_STOP
;
1022 /* "next page" elements initialization */
1023 bnx2x_set_next_page_sgl(fp
);
1025 /* set SGEs bit mask */
1026 bnx2x_init_sge_ring_bit_mask(fp
);
1028 /* Allocate SGEs and initialize the ring elements */
1029 for (i
= 0, ring_prod
= 0;
1030 i
< MAX_RX_SGE_CNT
*NUM_RX_SGE_PAGES
; i
++) {
1032 if (bnx2x_alloc_rx_sge(bp
, fp
, ring_prod
) < 0) {
1033 BNX2X_ERR("was only able to allocate "
1035 BNX2X_ERR("disabling TPA for "
1037 /* Cleanup already allocated elements */
1038 bnx2x_free_rx_sge_range(bp
, fp
,
1040 bnx2x_free_tpa_pool(bp
, fp
,
1042 fp
->disable_tpa
= 1;
1046 ring_prod
= NEXT_SGE_IDX(ring_prod
);
1049 fp
->rx_sge_prod
= ring_prod
;
1053 for_each_rx_queue(bp
, j
) {
1054 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1058 /* Activate BD ring */
1060 * this will generate an interrupt (to the TSTORM)
1061 * must only be done after chip is initialized
1063 bnx2x_update_rx_prod(bp
, fp
, fp
->rx_bd_prod
, fp
->rx_comp_prod
,
1069 if (CHIP_IS_E1(bp
)) {
1070 REG_WR(bp
, BAR_USTRORM_INTMEM
+
1071 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
),
1072 U64_LO(fp
->rx_comp_mapping
));
1073 REG_WR(bp
, BAR_USTRORM_INTMEM
+
1074 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
) + 4,
1075 U64_HI(fp
->rx_comp_mapping
));
1080 static void bnx2x_free_tx_skbs(struct bnx2x
*bp
)
1085 for_each_tx_queue(bp
, i
) {
1086 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1087 for_each_cos_in_tx_queue(fp
, cos
) {
1088 struct bnx2x_fp_txdata
*txdata
= &fp
->txdata
[cos
];
1089 unsigned pkts_compl
= 0, bytes_compl
= 0;
1091 u16 sw_prod
= txdata
->tx_pkt_prod
;
1092 u16 sw_cons
= txdata
->tx_pkt_cons
;
1094 while (sw_cons
!= sw_prod
) {
1095 bnx2x_free_tx_pkt(bp
, txdata
, TX_BD(sw_cons
),
1096 &pkts_compl
, &bytes_compl
);
1099 netdev_tx_reset_queue(
1100 netdev_get_tx_queue(bp
->dev
, txdata
->txq_index
));
1105 static void bnx2x_free_rx_bds(struct bnx2x_fastpath
*fp
)
1107 struct bnx2x
*bp
= fp
->bp
;
1110 /* ring wasn't allocated */
1111 if (fp
->rx_buf_ring
== NULL
)
1114 for (i
= 0; i
< NUM_RX_BD
; i
++) {
1115 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[i
];
1116 u8
*data
= rx_buf
->data
;
1120 dma_unmap_single(&bp
->pdev
->dev
,
1121 dma_unmap_addr(rx_buf
, mapping
),
1122 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
1124 rx_buf
->data
= NULL
;
1129 static void bnx2x_free_rx_skbs(struct bnx2x
*bp
)
1133 for_each_rx_queue(bp
, j
) {
1134 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1136 bnx2x_free_rx_bds(fp
);
1138 if (!fp
->disable_tpa
)
1139 bnx2x_free_tpa_pool(bp
, fp
, MAX_AGG_QS(bp
));
1143 void bnx2x_free_skbs(struct bnx2x
*bp
)
1145 bnx2x_free_tx_skbs(bp
);
1146 bnx2x_free_rx_skbs(bp
);
1149 void bnx2x_update_max_mf_config(struct bnx2x
*bp
, u32 value
)
1151 /* load old values */
1152 u32 mf_cfg
= bp
->mf_config
[BP_VN(bp
)];
1154 if (value
!= bnx2x_extract_max_cfg(bp
, mf_cfg
)) {
1155 /* leave all but MAX value */
1156 mf_cfg
&= ~FUNC_MF_CFG_MAX_BW_MASK
;
1158 /* set new MAX value */
1159 mf_cfg
|= (value
<< FUNC_MF_CFG_MAX_BW_SHIFT
)
1160 & FUNC_MF_CFG_MAX_BW_MASK
;
1162 bnx2x_fw_command(bp
, DRV_MSG_CODE_SET_MF_BW
, mf_cfg
);
1167 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1169 * @bp: driver handle
1170 * @nvecs: number of vectors to be released
1172 static void bnx2x_free_msix_irqs(struct bnx2x
*bp
, int nvecs
)
1176 if (nvecs
== offset
)
1178 free_irq(bp
->msix_table
[offset
].vector
, bp
->dev
);
1179 DP(NETIF_MSG_IFDOWN
, "released sp irq (%d)\n",
1180 bp
->msix_table
[offset
].vector
);
1183 if (nvecs
== offset
)
1188 for_each_eth_queue(bp
, i
) {
1189 if (nvecs
== offset
)
1191 DP(NETIF_MSG_IFDOWN
, "about to release fp #%d->%d "
1192 "irq\n", i
, bp
->msix_table
[offset
].vector
);
1194 free_irq(bp
->msix_table
[offset
++].vector
, &bp
->fp
[i
]);
1198 void bnx2x_free_irq(struct bnx2x
*bp
)
1200 if (bp
->flags
& USING_MSIX_FLAG
)
1201 bnx2x_free_msix_irqs(bp
, BNX2X_NUM_ETH_QUEUES(bp
) +
1203 else if (bp
->flags
& USING_MSI_FLAG
)
1204 free_irq(bp
->pdev
->irq
, bp
->dev
);
1206 free_irq(bp
->pdev
->irq
, bp
->dev
);
1209 int bnx2x_enable_msix(struct bnx2x
*bp
)
1211 int msix_vec
= 0, i
, rc
, req_cnt
;
1213 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1214 DP(NETIF_MSG_IFUP
, "msix_table[0].entry = %d (slowpath)\n",
1215 bp
->msix_table
[0].entry
);
1219 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1220 DP(NETIF_MSG_IFUP
, "msix_table[%d].entry = %d (CNIC)\n",
1221 bp
->msix_table
[msix_vec
].entry
, bp
->msix_table
[msix_vec
].entry
);
1224 /* We need separate vectors for ETH queues only (not FCoE) */
1225 for_each_eth_queue(bp
, i
) {
1226 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1227 DP(NETIF_MSG_IFUP
, "msix_table[%d].entry = %d "
1228 "(fastpath #%u)\n", msix_vec
, msix_vec
, i
);
1232 req_cnt
= BNX2X_NUM_ETH_QUEUES(bp
) + CNIC_PRESENT
+ 1;
1234 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0], req_cnt
);
1237 * reconfigure number of tx/rx queues according to available
1240 if (rc
>= BNX2X_MIN_MSIX_VEC_CNT
) {
1241 /* how less vectors we will have? */
1242 int diff
= req_cnt
- rc
;
1245 "Trying to use less MSI-X vectors: %d\n", rc
);
1247 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0], rc
);
1251 "MSI-X is not attainable rc %d\n", rc
);
1255 * decrease number of queues by number of unallocated entries
1257 bp
->num_queues
-= diff
;
1259 DP(NETIF_MSG_IFUP
, "New queue configuration set: %d\n",
1262 /* fall to INTx if not enough memory */
1264 bp
->flags
|= DISABLE_MSI_FLAG
;
1265 DP(NETIF_MSG_IFUP
, "MSI-X is not attainable rc %d\n", rc
);
1269 bp
->flags
|= USING_MSIX_FLAG
;
1274 static int bnx2x_req_msix_irqs(struct bnx2x
*bp
)
1276 int i
, rc
, offset
= 0;
1278 rc
= request_irq(bp
->msix_table
[offset
++].vector
,
1279 bnx2x_msix_sp_int
, 0,
1280 bp
->dev
->name
, bp
->dev
);
1282 BNX2X_ERR("request sp irq failed\n");
1289 for_each_eth_queue(bp
, i
) {
1290 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1291 snprintf(fp
->name
, sizeof(fp
->name
), "%s-fp-%d",
1294 rc
= request_irq(bp
->msix_table
[offset
].vector
,
1295 bnx2x_msix_fp_int
, 0, fp
->name
, fp
);
1297 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i
,
1298 bp
->msix_table
[offset
].vector
, rc
);
1299 bnx2x_free_msix_irqs(bp
, offset
);
1306 i
= BNX2X_NUM_ETH_QUEUES(bp
);
1307 offset
= 1 + CNIC_PRESENT
;
1308 netdev_info(bp
->dev
, "using MSI-X IRQs: sp %d fp[%d] %d"
1310 bp
->msix_table
[0].vector
,
1311 0, bp
->msix_table
[offset
].vector
,
1312 i
- 1, bp
->msix_table
[offset
+ i
- 1].vector
);
1317 int bnx2x_enable_msi(struct bnx2x
*bp
)
1321 rc
= pci_enable_msi(bp
->pdev
);
1323 DP(NETIF_MSG_IFUP
, "MSI is not attainable\n");
1326 bp
->flags
|= USING_MSI_FLAG
;
1331 static int bnx2x_req_irq(struct bnx2x
*bp
)
1333 unsigned long flags
;
1336 if (bp
->flags
& USING_MSI_FLAG
)
1339 flags
= IRQF_SHARED
;
1341 rc
= request_irq(bp
->pdev
->irq
, bnx2x_interrupt
, flags
,
1342 bp
->dev
->name
, bp
->dev
);
1346 static inline int bnx2x_setup_irqs(struct bnx2x
*bp
)
1349 if (bp
->flags
& USING_MSIX_FLAG
) {
1350 rc
= bnx2x_req_msix_irqs(bp
);
1355 rc
= bnx2x_req_irq(bp
);
1357 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc
);
1360 if (bp
->flags
& USING_MSI_FLAG
) {
1361 bp
->dev
->irq
= bp
->pdev
->irq
;
1362 netdev_info(bp
->dev
, "using MSI IRQ %d\n",
1370 static inline void bnx2x_napi_enable(struct bnx2x
*bp
)
1374 for_each_rx_queue(bp
, i
)
1375 napi_enable(&bnx2x_fp(bp
, i
, napi
));
1378 static inline void bnx2x_napi_disable(struct bnx2x
*bp
)
1382 for_each_rx_queue(bp
, i
)
1383 napi_disable(&bnx2x_fp(bp
, i
, napi
));
1386 void bnx2x_netif_start(struct bnx2x
*bp
)
1388 if (netif_running(bp
->dev
)) {
1389 bnx2x_napi_enable(bp
);
1390 bnx2x_int_enable(bp
);
1391 if (bp
->state
== BNX2X_STATE_OPEN
)
1392 netif_tx_wake_all_queues(bp
->dev
);
1396 void bnx2x_netif_stop(struct bnx2x
*bp
, int disable_hw
)
1398 bnx2x_int_disable_sync(bp
, disable_hw
);
1399 bnx2x_napi_disable(bp
);
1402 u16
bnx2x_select_queue(struct net_device
*dev
, struct sk_buff
*skb
)
1404 struct bnx2x
*bp
= netdev_priv(dev
);
1408 struct ethhdr
*hdr
= (struct ethhdr
*)skb
->data
;
1409 u16 ether_type
= ntohs(hdr
->h_proto
);
1411 /* Skip VLAN tag if present */
1412 if (ether_type
== ETH_P_8021Q
) {
1413 struct vlan_ethhdr
*vhdr
=
1414 (struct vlan_ethhdr
*)skb
->data
;
1416 ether_type
= ntohs(vhdr
->h_vlan_encapsulated_proto
);
1419 /* If ethertype is FCoE or FIP - use FCoE ring */
1420 if ((ether_type
== ETH_P_FCOE
) || (ether_type
== ETH_P_FIP
))
1421 return bnx2x_fcoe_tx(bp
, txq_index
);
1424 /* select a non-FCoE queue */
1425 return __skb_tx_hash(dev
, skb
, BNX2X_NUM_ETH_QUEUES(bp
));
1428 void bnx2x_set_num_queues(struct bnx2x
*bp
)
1430 switch (bp
->multi_mode
) {
1431 case ETH_RSS_MODE_DISABLED
:
1434 case ETH_RSS_MODE_REGULAR
:
1435 bp
->num_queues
= bnx2x_calc_num_queues(bp
);
1444 /* override in ISCSI SD mod */
1445 if (IS_MF_ISCSI_SD(bp
))
1448 /* Add special queues */
1449 bp
->num_queues
+= NON_ETH_CONTEXT_USE
;
1453 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1455 * @bp: Driver handle
1457 * We currently support for at most 16 Tx queues for each CoS thus we will
1458 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1461 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1462 * index after all ETH L2 indices.
1464 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1465 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1466 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1468 * The proper configuration of skb->queue_mapping is handled by
1469 * bnx2x_select_queue() and __skb_tx_hash().
1471 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1472 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1474 static inline int bnx2x_set_real_num_queues(struct bnx2x
*bp
)
1478 tx
= MAX_TXQS_PER_COS
* bp
->max_cos
;
1479 rx
= BNX2X_NUM_ETH_QUEUES(bp
);
1481 /* account for fcoe queue */
1489 rc
= netif_set_real_num_tx_queues(bp
->dev
, tx
);
1491 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc
);
1494 rc
= netif_set_real_num_rx_queues(bp
->dev
, rx
);
1496 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc
);
1500 DP(NETIF_MSG_DRV
, "Setting real num queues to (tx, rx) (%d, %d)\n",
1506 static inline void bnx2x_set_rx_buf_size(struct bnx2x
*bp
)
1510 for_each_queue(bp
, i
) {
1511 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1514 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1517 * Although there are no IP frames expected to arrive to
1518 * this ring we still want to add an
1519 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1522 mtu
= BNX2X_FCOE_MINI_JUMBO_MTU
;
1525 fp
->rx_buf_size
= BNX2X_FW_RX_ALIGN_START
+
1526 IP_HEADER_ALIGNMENT_PADDING
+
1529 BNX2X_FW_RX_ALIGN_END
;
1530 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
1534 static inline int bnx2x_init_rss_pf(struct bnx2x
*bp
)
1537 u8 ind_table
[T_ETH_INDIRECTION_TABLE_SIZE
] = {0};
1538 u8 num_eth_queues
= BNX2X_NUM_ETH_QUEUES(bp
);
1541 * Prepare the inital contents fo the indirection table if RSS is
1544 if (bp
->multi_mode
!= ETH_RSS_MODE_DISABLED
) {
1545 for (i
= 0; i
< sizeof(ind_table
); i
++)
1548 ethtool_rxfh_indir_default(i
, num_eth_queues
);
1552 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1553 * per-port, so if explicit configuration is needed , do it only
1556 * For 57712 and newer on the other hand it's a per-function
1559 return bnx2x_config_rss_pf(bp
, ind_table
,
1560 bp
->port
.pmf
|| !CHIP_IS_E1x(bp
));
1563 int bnx2x_config_rss_pf(struct bnx2x
*bp
, u8
*ind_table
, bool config_hash
)
1565 struct bnx2x_config_rss_params params
= {0};
1568 /* Although RSS is meaningless when there is a single HW queue we
1569 * still need it enabled in order to have HW Rx hash generated.
1571 * if (!is_eth_multi(bp))
1572 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1575 params
.rss_obj
= &bp
->rss_conf_obj
;
1577 __set_bit(RAMROD_COMP_WAIT
, ¶ms
.ramrod_flags
);
1580 switch (bp
->multi_mode
) {
1581 case ETH_RSS_MODE_DISABLED
:
1582 __set_bit(BNX2X_RSS_MODE_DISABLED
, ¶ms
.rss_flags
);
1584 case ETH_RSS_MODE_REGULAR
:
1585 __set_bit(BNX2X_RSS_MODE_REGULAR
, ¶ms
.rss_flags
);
1587 case ETH_RSS_MODE_VLAN_PRI
:
1588 __set_bit(BNX2X_RSS_MODE_VLAN_PRI
, ¶ms
.rss_flags
);
1590 case ETH_RSS_MODE_E1HOV_PRI
:
1591 __set_bit(BNX2X_RSS_MODE_E1HOV_PRI
, ¶ms
.rss_flags
);
1593 case ETH_RSS_MODE_IP_DSCP
:
1594 __set_bit(BNX2X_RSS_MODE_IP_DSCP
, ¶ms
.rss_flags
);
1597 BNX2X_ERR("Unknown multi_mode: %d\n", bp
->multi_mode
);
1601 /* If RSS is enabled */
1602 if (bp
->multi_mode
!= ETH_RSS_MODE_DISABLED
) {
1603 /* RSS configuration */
1604 __set_bit(BNX2X_RSS_IPV4
, ¶ms
.rss_flags
);
1605 __set_bit(BNX2X_RSS_IPV4_TCP
, ¶ms
.rss_flags
);
1606 __set_bit(BNX2X_RSS_IPV6
, ¶ms
.rss_flags
);
1607 __set_bit(BNX2X_RSS_IPV6_TCP
, ¶ms
.rss_flags
);
1610 params
.rss_result_mask
= MULTI_MASK
;
1612 memcpy(params
.ind_table
, ind_table
, sizeof(params
.ind_table
));
1616 for (i
= 0; i
< sizeof(params
.rss_key
) / 4; i
++)
1617 params
.rss_key
[i
] = random32();
1619 __set_bit(BNX2X_RSS_SET_SRCH
, ¶ms
.rss_flags
);
1623 return bnx2x_config_rss(bp
, ¶ms
);
1626 static inline int bnx2x_init_hw(struct bnx2x
*bp
, u32 load_code
)
1628 struct bnx2x_func_state_params func_params
= {0};
1630 /* Prepare parameters for function state transitions */
1631 __set_bit(RAMROD_COMP_WAIT
, &func_params
.ramrod_flags
);
1633 func_params
.f_obj
= &bp
->func_obj
;
1634 func_params
.cmd
= BNX2X_F_CMD_HW_INIT
;
1636 func_params
.params
.hw_init
.load_phase
= load_code
;
1638 return bnx2x_func_state_change(bp
, &func_params
);
1642 * Cleans the object that have internal lists without sending
1643 * ramrods. Should be run when interrutps are disabled.
1645 static void bnx2x_squeeze_objects(struct bnx2x
*bp
)
1648 unsigned long ramrod_flags
= 0, vlan_mac_flags
= 0;
1649 struct bnx2x_mcast_ramrod_params rparam
= {0};
1650 struct bnx2x_vlan_mac_obj
*mac_obj
= &bp
->fp
->mac_obj
;
1652 /***************** Cleanup MACs' object first *************************/
1654 /* Wait for completion of requested */
1655 __set_bit(RAMROD_COMP_WAIT
, &ramrod_flags
);
1656 /* Perform a dry cleanup */
1657 __set_bit(RAMROD_DRV_CLR_ONLY
, &ramrod_flags
);
1659 /* Clean ETH primary MAC */
1660 __set_bit(BNX2X_ETH_MAC
, &vlan_mac_flags
);
1661 rc
= mac_obj
->delete_all(bp
, &bp
->fp
->mac_obj
, &vlan_mac_flags
,
1664 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc
);
1666 /* Cleanup UC list */
1668 __set_bit(BNX2X_UC_LIST_MAC
, &vlan_mac_flags
);
1669 rc
= mac_obj
->delete_all(bp
, mac_obj
, &vlan_mac_flags
,
1672 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc
);
1674 /***************** Now clean mcast object *****************************/
1675 rparam
.mcast_obj
= &bp
->mcast_obj
;
1676 __set_bit(RAMROD_DRV_CLR_ONLY
, &rparam
.ramrod_flags
);
1678 /* Add a DEL command... */
1679 rc
= bnx2x_config_mcast(bp
, &rparam
, BNX2X_MCAST_CMD_DEL
);
1681 BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
1682 "object: %d\n", rc
);
1684 /* ...and wait until all pending commands are cleared */
1685 rc
= bnx2x_config_mcast(bp
, &rparam
, BNX2X_MCAST_CMD_CONT
);
1688 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1693 rc
= bnx2x_config_mcast(bp
, &rparam
, BNX2X_MCAST_CMD_CONT
);
1697 #ifndef BNX2X_STOP_ON_ERROR
1698 #define LOAD_ERROR_EXIT(bp, label) \
1700 (bp)->state = BNX2X_STATE_ERROR; \
1704 #define LOAD_ERROR_EXIT(bp, label) \
1706 (bp)->state = BNX2X_STATE_ERROR; \
1712 /* must be called with rtnl_lock */
1713 int bnx2x_nic_load(struct bnx2x
*bp
, int load_mode
)
1715 int port
= BP_PORT(bp
);
1719 #ifdef BNX2X_STOP_ON_ERROR
1720 if (unlikely(bp
->panic
))
1724 bp
->state
= BNX2X_STATE_OPENING_WAIT4_LOAD
;
1726 /* Set the initial link reported state to link down */
1727 bnx2x_acquire_phy_lock(bp
);
1728 memset(&bp
->last_reported_link
, 0, sizeof(bp
->last_reported_link
));
1729 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
1730 &bp
->last_reported_link
.link_report_flags
);
1731 bnx2x_release_phy_lock(bp
);
1733 /* must be called before memory allocation and HW init */
1734 bnx2x_ilt_set_info(bp
);
1737 * Zero fastpath structures preserving invariants like napi, which are
1738 * allocated only once, fp index, max_cos, bp pointer.
1739 * Also set fp->disable_tpa.
1741 for_each_queue(bp
, i
)
1745 /* Set the receive queues buffer size */
1746 bnx2x_set_rx_buf_size(bp
);
1748 if (bnx2x_alloc_mem(bp
))
1751 /* As long as bnx2x_alloc_mem() may possibly update
1752 * bp->num_queues, bnx2x_set_real_num_queues() should always
1755 rc
= bnx2x_set_real_num_queues(bp
);
1757 BNX2X_ERR("Unable to set real_num_queues\n");
1758 LOAD_ERROR_EXIT(bp
, load_error0
);
1761 /* configure multi cos mappings in kernel.
1762 * this configuration may be overriden by a multi class queue discipline
1763 * or by a dcbx negotiation result.
1765 bnx2x_setup_tc(bp
->dev
, bp
->max_cos
);
1767 bnx2x_napi_enable(bp
);
1769 /* Send LOAD_REQUEST command to MCP
1770 * Returns the type of LOAD command:
1771 * if it is the first port to be initialized
1772 * common blocks should be initialized, otherwise - not
1774 if (!BP_NOMCP(bp
)) {
1775 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_REQ
, 0);
1777 BNX2X_ERR("MCP response failure, aborting\n");
1779 LOAD_ERROR_EXIT(bp
, load_error1
);
1781 if (load_code
== FW_MSG_CODE_DRV_LOAD_REFUSED
) {
1782 rc
= -EBUSY
; /* other port in diagnostic mode */
1783 LOAD_ERROR_EXIT(bp
, load_error1
);
1787 int path
= BP_PATH(bp
);
1789 DP(NETIF_MSG_IFUP
, "NO MCP - load counts[%d] %d, %d, %d\n",
1790 path
, load_count
[path
][0], load_count
[path
][1],
1791 load_count
[path
][2]);
1792 load_count
[path
][0]++;
1793 load_count
[path
][1 + port
]++;
1794 DP(NETIF_MSG_IFUP
, "NO MCP - new load counts[%d] %d, %d, %d\n",
1795 path
, load_count
[path
][0], load_count
[path
][1],
1796 load_count
[path
][2]);
1797 if (load_count
[path
][0] == 1)
1798 load_code
= FW_MSG_CODE_DRV_LOAD_COMMON
;
1799 else if (load_count
[path
][1 + port
] == 1)
1800 load_code
= FW_MSG_CODE_DRV_LOAD_PORT
;
1802 load_code
= FW_MSG_CODE_DRV_LOAD_FUNCTION
;
1805 if ((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
1806 (load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
) ||
1807 (load_code
== FW_MSG_CODE_DRV_LOAD_PORT
)) {
1810 * We need the barrier to ensure the ordering between the
1811 * writing to bp->port.pmf here and reading it from the
1812 * bnx2x_periodic_task().
1815 queue_delayed_work(bnx2x_wq
, &bp
->period_task
, 0);
1819 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
1821 /* Init Function state controlling object */
1822 bnx2x__init_func_obj(bp
);
1825 rc
= bnx2x_init_hw(bp
, load_code
);
1827 BNX2X_ERR("HW init failed, aborting\n");
1828 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
1829 LOAD_ERROR_EXIT(bp
, load_error2
);
1832 /* Connect to IRQs */
1833 rc
= bnx2x_setup_irqs(bp
);
1835 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
1836 LOAD_ERROR_EXIT(bp
, load_error2
);
1839 /* Setup NIC internals and enable interrupts */
1840 bnx2x_nic_init(bp
, load_code
);
1842 /* Init per-function objects */
1843 bnx2x_init_bp_objs(bp
);
1845 if (((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
1846 (load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
)) &&
1847 (bp
->common
.shmem2_base
)) {
1848 if (SHMEM2_HAS(bp
, dcc_support
))
1849 SHMEM2_WR(bp
, dcc_support
,
1850 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV
|
1851 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV
));
1854 bp
->state
= BNX2X_STATE_OPENING_WAIT4_PORT
;
1855 rc
= bnx2x_func_start(bp
);
1857 BNX2X_ERR("Function start failed!\n");
1858 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
1859 LOAD_ERROR_EXIT(bp
, load_error3
);
1862 /* Send LOAD_DONE command to MCP */
1863 if (!BP_NOMCP(bp
)) {
1864 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
1866 BNX2X_ERR("MCP response failure, aborting\n");
1868 LOAD_ERROR_EXIT(bp
, load_error3
);
1872 rc
= bnx2x_setup_leading(bp
);
1874 BNX2X_ERR("Setup leading failed!\n");
1875 LOAD_ERROR_EXIT(bp
, load_error3
);
1879 /* Enable Timer scan */
1880 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ port
*4, 1);
1883 for_each_nondefault_queue(bp
, i
) {
1884 rc
= bnx2x_setup_queue(bp
, &bp
->fp
[i
], 0);
1886 LOAD_ERROR_EXIT(bp
, load_error4
);
1889 rc
= bnx2x_init_rss_pf(bp
);
1891 LOAD_ERROR_EXIT(bp
, load_error4
);
1893 /* Now when Clients are configured we are ready to work */
1894 bp
->state
= BNX2X_STATE_OPEN
;
1896 /* Configure a ucast MAC */
1897 rc
= bnx2x_set_eth_mac(bp
, true);
1899 LOAD_ERROR_EXIT(bp
, load_error4
);
1901 if (bp
->pending_max
) {
1902 bnx2x_update_max_mf_config(bp
, bp
->pending_max
);
1903 bp
->pending_max
= 0;
1907 bnx2x_initial_phy_init(bp
, load_mode
);
1909 /* Start fast path */
1911 /* Initialize Rx filter. */
1912 netif_addr_lock_bh(bp
->dev
);
1913 bnx2x_set_rx_mode(bp
->dev
);
1914 netif_addr_unlock_bh(bp
->dev
);
1917 switch (load_mode
) {
1919 /* Tx queue should be only reenabled */
1920 netif_tx_wake_all_queues(bp
->dev
);
1924 netif_tx_start_all_queues(bp
->dev
);
1925 smp_mb__after_clear_bit();
1929 bp
->state
= BNX2X_STATE_DIAG
;
1937 bnx2x_update_drv_flags(bp
, 1 << DRV_FLAGS_DCB_CONFIGURED
, 0);
1939 bnx2x__link_status_update(bp
);
1941 /* start the timer */
1942 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
1945 /* re-read iscsi info */
1946 bnx2x_get_iscsi_info(bp
);
1947 bnx2x_setup_cnic_irq_info(bp
);
1948 if (bp
->state
== BNX2X_STATE_OPEN
)
1949 bnx2x_cnic_notify(bp
, CNIC_CTL_START_CMD
);
1951 bnx2x_inc_load_cnt(bp
);
1953 /* Wait for all pending SP commands to complete */
1954 if (!bnx2x_wait_sp_comp(bp
, ~0x0UL
)) {
1955 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
1956 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
1960 bnx2x_dcbx_init(bp
);
1963 #ifndef BNX2X_STOP_ON_ERROR
1966 /* Disable Timer scan */
1967 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ port
*4, 0);
1970 bnx2x_int_disable_sync(bp
, 1);
1972 /* Clean queueable objects */
1973 bnx2x_squeeze_objects(bp
);
1975 /* Free SKBs, SGEs, TPA pool and driver internals */
1976 bnx2x_free_skbs(bp
);
1977 for_each_rx_queue(bp
, i
)
1978 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
1983 if (!BP_NOMCP(bp
)) {
1984 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
, 0);
1985 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
, 0);
1990 bnx2x_napi_disable(bp
);
1995 #endif /* ! BNX2X_STOP_ON_ERROR */
1998 /* must be called with rtnl_lock */
1999 int bnx2x_nic_unload(struct bnx2x
*bp
, int unload_mode
)
2002 bool global
= false;
2004 if ((bp
->state
== BNX2X_STATE_CLOSED
) ||
2005 (bp
->state
== BNX2X_STATE_ERROR
)) {
2006 /* We can get here if the driver has been unloaded
2007 * during parity error recovery and is either waiting for a
2008 * leader to complete or for other functions to unload and
2009 * then ifdown has been issued. In this case we want to
2010 * unload and let other functions to complete a recovery
2013 bp
->recovery_state
= BNX2X_RECOVERY_DONE
;
2015 bnx2x_release_leader_lock(bp
);
2018 DP(NETIF_MSG_HW
, "Releasing a leadership...\n");
2024 * It's important to set the bp->state to the value different from
2025 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2026 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2028 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_HALT
;
2032 bnx2x_tx_disable(bp
);
2035 bnx2x_cnic_notify(bp
, CNIC_CTL_STOP_CMD
);
2038 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
2040 del_timer_sync(&bp
->timer
);
2042 /* Set ALWAYS_ALIVE bit in shmem */
2043 bp
->fw_drv_pulse_wr_seq
|= DRV_PULSE_ALWAYS_ALIVE
;
2045 bnx2x_drv_pulse(bp
);
2047 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
2049 /* Cleanup the chip if needed */
2050 if (unload_mode
!= UNLOAD_RECOVERY
)
2051 bnx2x_chip_cleanup(bp
, unload_mode
);
2053 /* Send the UNLOAD_REQUEST to the MCP */
2054 bnx2x_send_unload_req(bp
, unload_mode
);
2057 * Prevent transactions to host from the functions on the
2058 * engine that doesn't reset global blocks in case of global
2059 * attention once gloabl blocks are reset and gates are opened
2060 * (the engine which leader will perform the recovery
2063 if (!CHIP_IS_E1x(bp
))
2064 bnx2x_pf_disable(bp
);
2066 /* Disable HW interrupts, NAPI */
2067 bnx2x_netif_stop(bp
, 1);
2072 /* Report UNLOAD_DONE to MCP */
2073 bnx2x_send_unload_done(bp
);
2077 * At this stage no more interrupts will arrive so we may safly clean
2078 * the queueable objects here in case they failed to get cleaned so far.
2080 bnx2x_squeeze_objects(bp
);
2082 /* There should be no more pending SP commands at this stage */
2087 /* Free SKBs, SGEs, TPA pool and driver internals */
2088 bnx2x_free_skbs(bp
);
2089 for_each_rx_queue(bp
, i
)
2090 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
2094 bp
->state
= BNX2X_STATE_CLOSED
;
2096 /* Check if there are pending parity attentions. If there are - set
2097 * RECOVERY_IN_PROGRESS.
2099 if (bnx2x_chk_parity_attn(bp
, &global
, false)) {
2100 bnx2x_set_reset_in_progress(bp
);
2102 /* Set RESET_IS_GLOBAL if needed */
2104 bnx2x_set_reset_global(bp
);
2108 /* The last driver must disable a "close the gate" if there is no
2109 * parity attention or "process kill" pending.
2111 if (!bnx2x_dec_load_cnt(bp
) && bnx2x_reset_is_done(bp
, BP_PATH(bp
)))
2112 bnx2x_disable_close_the_gate(bp
);
2117 int bnx2x_set_power_state(struct bnx2x
*bp
, pci_power_t state
)
2121 /* If there is no power capability, silently succeed */
2123 DP(NETIF_MSG_HW
, "No power capability. Breaking.\n");
2127 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
2131 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
2132 ((pmcsr
& ~PCI_PM_CTRL_STATE_MASK
) |
2133 PCI_PM_CTRL_PME_STATUS
));
2135 if (pmcsr
& PCI_PM_CTRL_STATE_MASK
)
2136 /* delay required during transition out of D3hot */
2141 /* If there are other clients above don't
2142 shut down the power */
2143 if (atomic_read(&bp
->pdev
->enable_cnt
) != 1)
2145 /* Don't shut down the power for emulation and FPGA */
2146 if (CHIP_REV_IS_SLOW(bp
))
2149 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
2153 pmcsr
|= PCI_PM_CTRL_PME_ENABLE
;
2155 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
2158 /* No more memory access after this point until
2159 * device is brought back to D0.
2170 * net_device service functions
2172 int bnx2x_poll(struct napi_struct
*napi
, int budget
)
2176 struct bnx2x_fastpath
*fp
= container_of(napi
, struct bnx2x_fastpath
,
2178 struct bnx2x
*bp
= fp
->bp
;
2181 #ifdef BNX2X_STOP_ON_ERROR
2182 if (unlikely(bp
->panic
)) {
2183 napi_complete(napi
);
2188 for_each_cos_in_tx_queue(fp
, cos
)
2189 if (bnx2x_tx_queue_has_work(&fp
->txdata
[cos
]))
2190 bnx2x_tx_int(bp
, &fp
->txdata
[cos
]);
2193 if (bnx2x_has_rx_work(fp
)) {
2194 work_done
+= bnx2x_rx_int(fp
, budget
- work_done
);
2196 /* must not complete if we consumed full budget */
2197 if (work_done
>= budget
)
2201 /* Fall out from the NAPI loop if needed */
2202 if (!(bnx2x_has_rx_work(fp
) || bnx2x_has_tx_work(fp
))) {
2204 /* No need to update SB for FCoE L2 ring as long as
2205 * it's connected to the default SB and the SB
2206 * has been updated when NAPI was scheduled.
2208 if (IS_FCOE_FP(fp
)) {
2209 napi_complete(napi
);
2214 bnx2x_update_fpsb_idx(fp
);
2215 /* bnx2x_has_rx_work() reads the status block,
2216 * thus we need to ensure that status block indices
2217 * have been actually read (bnx2x_update_fpsb_idx)
2218 * prior to this check (bnx2x_has_rx_work) so that
2219 * we won't write the "newer" value of the status block
2220 * to IGU (if there was a DMA right after
2221 * bnx2x_has_rx_work and if there is no rmb, the memory
2222 * reading (bnx2x_update_fpsb_idx) may be postponed
2223 * to right before bnx2x_ack_sb). In this case there
2224 * will never be another interrupt until there is
2225 * another update of the status block, while there
2226 * is still unhandled work.
2230 if (!(bnx2x_has_rx_work(fp
) || bnx2x_has_tx_work(fp
))) {
2231 napi_complete(napi
);
2232 /* Re-enable interrupts */
2234 "Update index to %d\n", fp
->fp_hc_idx
);
2235 bnx2x_ack_sb(bp
, fp
->igu_sb_id
, USTORM_ID
,
2236 le16_to_cpu(fp
->fp_hc_idx
),
2246 /* we split the first BD into headers and data BDs
2247 * to ease the pain of our fellow microcode engineers
2248 * we use one mapping for both BDs
2249 * So far this has only been observed to happen
2250 * in Other Operating Systems(TM)
2252 static noinline u16
bnx2x_tx_split(struct bnx2x
*bp
,
2253 struct bnx2x_fp_txdata
*txdata
,
2254 struct sw_tx_bd
*tx_buf
,
2255 struct eth_tx_start_bd
**tx_bd
, u16 hlen
,
2256 u16 bd_prod
, int nbd
)
2258 struct eth_tx_start_bd
*h_tx_bd
= *tx_bd
;
2259 struct eth_tx_bd
*d_tx_bd
;
2261 int old_len
= le16_to_cpu(h_tx_bd
->nbytes
);
2263 /* first fix first BD */
2264 h_tx_bd
->nbd
= cpu_to_le16(nbd
);
2265 h_tx_bd
->nbytes
= cpu_to_le16(hlen
);
2267 DP(NETIF_MSG_TX_QUEUED
, "TSO split header size is %d "
2268 "(%x:%x) nbd %d\n", h_tx_bd
->nbytes
, h_tx_bd
->addr_hi
,
2269 h_tx_bd
->addr_lo
, h_tx_bd
->nbd
);
2271 /* now get a new data BD
2272 * (after the pbd) and fill it */
2273 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
2274 d_tx_bd
= &txdata
->tx_desc_ring
[bd_prod
].reg_bd
;
2276 mapping
= HILO_U64(le32_to_cpu(h_tx_bd
->addr_hi
),
2277 le32_to_cpu(h_tx_bd
->addr_lo
)) + hlen
;
2279 d_tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
2280 d_tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
2281 d_tx_bd
->nbytes
= cpu_to_le16(old_len
- hlen
);
2283 /* this marks the BD as one that has no individual mapping */
2284 tx_buf
->flags
|= BNX2X_TSO_SPLIT_BD
;
2286 DP(NETIF_MSG_TX_QUEUED
,
2287 "TSO split data size is %d (%x:%x)\n",
2288 d_tx_bd
->nbytes
, d_tx_bd
->addr_hi
, d_tx_bd
->addr_lo
);
2291 *tx_bd
= (struct eth_tx_start_bd
*)d_tx_bd
;
2296 static inline u16
bnx2x_csum_fix(unsigned char *t_header
, u16 csum
, s8 fix
)
2299 csum
= (u16
) ~csum_fold(csum_sub(csum
,
2300 csum_partial(t_header
- fix
, fix
, 0)));
2303 csum
= (u16
) ~csum_fold(csum_add(csum
,
2304 csum_partial(t_header
, -fix
, 0)));
2306 return swab16(csum
);
2309 static inline u32
bnx2x_xmit_type(struct bnx2x
*bp
, struct sk_buff
*skb
)
2313 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
2317 if (vlan_get_protocol(skb
) == htons(ETH_P_IPV6
)) {
2319 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
2320 rc
|= XMIT_CSUM_TCP
;
2324 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
2325 rc
|= XMIT_CSUM_TCP
;
2329 if (skb_is_gso_v6(skb
))
2330 rc
|= XMIT_GSO_V6
| XMIT_CSUM_TCP
| XMIT_CSUM_V6
;
2331 else if (skb_is_gso(skb
))
2332 rc
|= XMIT_GSO_V4
| XMIT_CSUM_V4
| XMIT_CSUM_TCP
;
2337 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2338 /* check if packet requires linearization (packet is too fragmented)
2339 no need to check fragmentation if page size > 8K (there will be no
2340 violation to FW restrictions) */
2341 static int bnx2x_pkt_req_lin(struct bnx2x
*bp
, struct sk_buff
*skb
,
2346 int first_bd_sz
= 0;
2348 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2349 if (skb_shinfo(skb
)->nr_frags
>= (MAX_FETCH_BD
- 3)) {
2351 if (xmit_type
& XMIT_GSO
) {
2352 unsigned short lso_mss
= skb_shinfo(skb
)->gso_size
;
2353 /* Check if LSO packet needs to be copied:
2354 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2355 int wnd_size
= MAX_FETCH_BD
- 3;
2356 /* Number of windows to check */
2357 int num_wnds
= skb_shinfo(skb
)->nr_frags
- wnd_size
;
2362 /* Headers length */
2363 hlen
= (int)(skb_transport_header(skb
) - skb
->data
) +
2366 /* Amount of data (w/o headers) on linear part of SKB*/
2367 first_bd_sz
= skb_headlen(skb
) - hlen
;
2369 wnd_sum
= first_bd_sz
;
2371 /* Calculate the first sum - it's special */
2372 for (frag_idx
= 0; frag_idx
< wnd_size
- 1; frag_idx
++)
2374 skb_frag_size(&skb_shinfo(skb
)->frags
[frag_idx
]);
2376 /* If there was data on linear skb data - check it */
2377 if (first_bd_sz
> 0) {
2378 if (unlikely(wnd_sum
< lso_mss
)) {
2383 wnd_sum
-= first_bd_sz
;
2386 /* Others are easier: run through the frag list and
2387 check all windows */
2388 for (wnd_idx
= 0; wnd_idx
<= num_wnds
; wnd_idx
++) {
2390 skb_frag_size(&skb_shinfo(skb
)->frags
[wnd_idx
+ wnd_size
- 1]);
2392 if (unlikely(wnd_sum
< lso_mss
)) {
2397 skb_frag_size(&skb_shinfo(skb
)->frags
[wnd_idx
]);
2400 /* in non-LSO too fragmented packet should always
2407 if (unlikely(to_copy
))
2408 DP(NETIF_MSG_TX_QUEUED
,
2409 "Linearization IS REQUIRED for %s packet. "
2410 "num_frags %d hlen %d first_bd_sz %d\n",
2411 (xmit_type
& XMIT_GSO
) ? "LSO" : "non-LSO",
2412 skb_shinfo(skb
)->nr_frags
, hlen
, first_bd_sz
);
2418 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff
*skb
, u32
*parsing_data
,
2421 *parsing_data
|= (skb_shinfo(skb
)->gso_size
<<
2422 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT
) &
2423 ETH_TX_PARSE_BD_E2_LSO_MSS
;
2424 if ((xmit_type
& XMIT_GSO_V6
) &&
2425 (ipv6_hdr(skb
)->nexthdr
== NEXTHDR_IPV6
))
2426 *parsing_data
|= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR
;
2430 * bnx2x_set_pbd_gso - update PBD in GSO case.
2434 * @xmit_type: xmit flags
2436 static inline void bnx2x_set_pbd_gso(struct sk_buff
*skb
,
2437 struct eth_tx_parse_bd_e1x
*pbd
,
2440 pbd
->lso_mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
2441 pbd
->tcp_send_seq
= swab32(tcp_hdr(skb
)->seq
);
2442 pbd
->tcp_flags
= pbd_tcp_flags(skb
);
2444 if (xmit_type
& XMIT_GSO_V4
) {
2445 pbd
->ip_id
= swab16(ip_hdr(skb
)->id
);
2446 pbd
->tcp_pseudo_csum
=
2447 swab16(~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
2449 0, IPPROTO_TCP
, 0));
2452 pbd
->tcp_pseudo_csum
=
2453 swab16(~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
2454 &ipv6_hdr(skb
)->daddr
,
2455 0, IPPROTO_TCP
, 0));
2457 pbd
->global_data
|= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN
;
2461 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2463 * @bp: driver handle
2465 * @parsing_data: data to be updated
2466 * @xmit_type: xmit flags
2470 static inline u8
bnx2x_set_pbd_csum_e2(struct bnx2x
*bp
, struct sk_buff
*skb
,
2471 u32
*parsing_data
, u32 xmit_type
)
2474 ((((u8
*)skb_transport_header(skb
) - skb
->data
) >> 1) <<
2475 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT
) &
2476 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W
;
2478 if (xmit_type
& XMIT_CSUM_TCP
) {
2479 *parsing_data
|= ((tcp_hdrlen(skb
) / 4) <<
2480 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT
) &
2481 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW
;
2483 return skb_transport_header(skb
) + tcp_hdrlen(skb
) - skb
->data
;
2485 /* We support checksum offload for TCP and UDP only.
2486 * No need to pass the UDP header length - it's a constant.
2488 return skb_transport_header(skb
) +
2489 sizeof(struct udphdr
) - skb
->data
;
2492 static inline void bnx2x_set_sbd_csum(struct bnx2x
*bp
, struct sk_buff
*skb
,
2493 struct eth_tx_start_bd
*tx_start_bd
, u32 xmit_type
)
2495 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_L4_CSUM
;
2497 if (xmit_type
& XMIT_CSUM_V4
)
2498 tx_start_bd
->bd_flags
.as_bitfield
|=
2499 ETH_TX_BD_FLAGS_IP_CSUM
;
2501 tx_start_bd
->bd_flags
.as_bitfield
|=
2502 ETH_TX_BD_FLAGS_IPV6
;
2504 if (!(xmit_type
& XMIT_CSUM_TCP
))
2505 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_IS_UDP
;
2509 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
2511 * @bp: driver handle
2513 * @pbd: parse BD to be updated
2514 * @xmit_type: xmit flags
2516 static inline u8
bnx2x_set_pbd_csum(struct bnx2x
*bp
, struct sk_buff
*skb
,
2517 struct eth_tx_parse_bd_e1x
*pbd
,
2520 u8 hlen
= (skb_network_header(skb
) - skb
->data
) >> 1;
2522 /* for now NS flag is not used in Linux */
2524 (hlen
| ((skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) <<
2525 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT
));
2527 pbd
->ip_hlen_w
= (skb_transport_header(skb
) -
2528 skb_network_header(skb
)) >> 1;
2530 hlen
+= pbd
->ip_hlen_w
;
2532 /* We support checksum offload for TCP and UDP only */
2533 if (xmit_type
& XMIT_CSUM_TCP
)
2534 hlen
+= tcp_hdrlen(skb
) / 2;
2536 hlen
+= sizeof(struct udphdr
) / 2;
2538 pbd
->total_hlen_w
= cpu_to_le16(hlen
);
2541 if (xmit_type
& XMIT_CSUM_TCP
) {
2542 pbd
->tcp_pseudo_csum
= swab16(tcp_hdr(skb
)->check
);
2545 s8 fix
= SKB_CS_OFF(skb
); /* signed! */
2547 DP(NETIF_MSG_TX_QUEUED
,
2548 "hlen %d fix %d csum before fix %x\n",
2549 le16_to_cpu(pbd
->total_hlen_w
), fix
, SKB_CS(skb
));
2551 /* HW bug: fixup the CSUM */
2552 pbd
->tcp_pseudo_csum
=
2553 bnx2x_csum_fix(skb_transport_header(skb
),
2556 DP(NETIF_MSG_TX_QUEUED
, "csum after fix %x\n",
2557 pbd
->tcp_pseudo_csum
);
2563 /* called with netif_tx_lock
2564 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2565 * netif_wake_queue()
2567 netdev_tx_t
bnx2x_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
2569 struct bnx2x
*bp
= netdev_priv(dev
);
2571 struct bnx2x_fastpath
*fp
;
2572 struct netdev_queue
*txq
;
2573 struct bnx2x_fp_txdata
*txdata
;
2574 struct sw_tx_bd
*tx_buf
;
2575 struct eth_tx_start_bd
*tx_start_bd
, *first_bd
;
2576 struct eth_tx_bd
*tx_data_bd
, *total_pkt_bd
= NULL
;
2577 struct eth_tx_parse_bd_e1x
*pbd_e1x
= NULL
;
2578 struct eth_tx_parse_bd_e2
*pbd_e2
= NULL
;
2579 u32 pbd_e2_parsing_data
= 0;
2580 u16 pkt_prod
, bd_prod
;
2581 int nbd
, txq_index
, fp_index
, txdata_index
;
2583 u32 xmit_type
= bnx2x_xmit_type(bp
, skb
);
2586 __le16 pkt_size
= 0;
2588 u8 mac_type
= UNICAST_ADDRESS
;
2590 #ifdef BNX2X_STOP_ON_ERROR
2591 if (unlikely(bp
->panic
))
2592 return NETDEV_TX_BUSY
;
2595 txq_index
= skb_get_queue_mapping(skb
);
2596 txq
= netdev_get_tx_queue(dev
, txq_index
);
2598 BUG_ON(txq_index
>= MAX_ETH_TXQ_IDX(bp
) + FCOE_PRESENT
);
2600 /* decode the fastpath index and the cos index from the txq */
2601 fp_index
= TXQ_TO_FP(txq_index
);
2602 txdata_index
= TXQ_TO_COS(txq_index
);
2606 * Override the above for the FCoE queue:
2607 * - FCoE fp entry is right after the ETH entries.
2608 * - FCoE L2 queue uses bp->txdata[0] only.
2610 if (unlikely(!NO_FCOE(bp
) && (txq_index
==
2611 bnx2x_fcoe_tx(bp
, txq_index
)))) {
2612 fp_index
= FCOE_IDX
;
2617 /* enable this debug print to view the transmission queue being used
2618 DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d\n",
2619 txq_index, fp_index, txdata_index); */
2621 /* locate the fastpath and the txdata */
2622 fp
= &bp
->fp
[fp_index
];
2623 txdata
= &fp
->txdata
[txdata_index
];
2625 /* enable this debug print to view the tranmission details
2626 DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d"
2627 " tx_data ptr %p fp pointer %p\n",
2628 txdata->cid, fp_index, txdata_index, txdata, fp); */
2630 if (unlikely(bnx2x_tx_avail(bp
, txdata
) <
2631 (skb_shinfo(skb
)->nr_frags
+ 3))) {
2632 fp
->eth_q_stats
.driver_xoff
++;
2633 netif_tx_stop_queue(txq
);
2634 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2635 return NETDEV_TX_BUSY
;
2638 DP(NETIF_MSG_TX_QUEUED
, "queue[%d]: SKB: summed %x protocol %x "
2639 "protocol(%x,%x) gso type %x xmit_type %x\n",
2640 txq_index
, skb
->ip_summed
, skb
->protocol
, ipv6_hdr(skb
)->nexthdr
,
2641 ip_hdr(skb
)->protocol
, skb_shinfo(skb
)->gso_type
, xmit_type
);
2643 eth
= (struct ethhdr
*)skb
->data
;
2645 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2646 if (unlikely(is_multicast_ether_addr(eth
->h_dest
))) {
2647 if (is_broadcast_ether_addr(eth
->h_dest
))
2648 mac_type
= BROADCAST_ADDRESS
;
2650 mac_type
= MULTICAST_ADDRESS
;
2653 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2654 /* First, check if we need to linearize the skb (due to FW
2655 restrictions). No need to check fragmentation if page size > 8K
2656 (there will be no violation to FW restrictions) */
2657 if (bnx2x_pkt_req_lin(bp
, skb
, xmit_type
)) {
2658 /* Statistics of linearization */
2660 if (skb_linearize(skb
) != 0) {
2661 DP(NETIF_MSG_TX_QUEUED
, "SKB linearization failed - "
2662 "silently dropping this SKB\n");
2663 dev_kfree_skb_any(skb
);
2664 return NETDEV_TX_OK
;
2668 /* Map skb linear data for DMA */
2669 mapping
= dma_map_single(&bp
->pdev
->dev
, skb
->data
,
2670 skb_headlen(skb
), DMA_TO_DEVICE
);
2671 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
2672 DP(NETIF_MSG_TX_QUEUED
, "SKB mapping failed - "
2673 "silently dropping this SKB\n");
2674 dev_kfree_skb_any(skb
);
2675 return NETDEV_TX_OK
;
2678 Please read carefully. First we use one BD which we mark as start,
2679 then we have a parsing info BD (used for TSO or xsum),
2680 and only then we have the rest of the TSO BDs.
2681 (don't forget to mark the last one as last,
2682 and to unmap only AFTER you write to the BD ...)
2683 And above all, all pdb sizes are in words - NOT DWORDS!
2686 /* get current pkt produced now - advance it just before sending packet
2687 * since mapping of pages may fail and cause packet to be dropped
2689 pkt_prod
= txdata
->tx_pkt_prod
;
2690 bd_prod
= TX_BD(txdata
->tx_bd_prod
);
2692 /* get a tx_buf and first BD
2693 * tx_start_bd may be changed during SPLIT,
2694 * but first_bd will always stay first
2696 tx_buf
= &txdata
->tx_buf_ring
[TX_BD(pkt_prod
)];
2697 tx_start_bd
= &txdata
->tx_desc_ring
[bd_prod
].start_bd
;
2698 first_bd
= tx_start_bd
;
2700 tx_start_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
2701 SET_FLAG(tx_start_bd
->general_data
, ETH_TX_START_BD_ETH_ADDR_TYPE
,
2705 SET_FLAG(tx_start_bd
->general_data
, ETH_TX_START_BD_HDR_NBDS
, 1);
2707 /* remember the first BD of the packet */
2708 tx_buf
->first_bd
= txdata
->tx_bd_prod
;
2712 DP(NETIF_MSG_TX_QUEUED
,
2713 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2714 pkt_prod
, tx_buf
, txdata
->tx_pkt_prod
, bd_prod
, tx_start_bd
);
2716 if (vlan_tx_tag_present(skb
)) {
2717 tx_start_bd
->vlan_or_ethertype
=
2718 cpu_to_le16(vlan_tx_tag_get(skb
));
2719 tx_start_bd
->bd_flags
.as_bitfield
|=
2720 (X_ETH_OUTBAND_VLAN
<< ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT
);
2722 tx_start_bd
->vlan_or_ethertype
= cpu_to_le16(pkt_prod
);
2724 /* turn on parsing and get a BD */
2725 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
2727 if (xmit_type
& XMIT_CSUM
)
2728 bnx2x_set_sbd_csum(bp
, skb
, tx_start_bd
, xmit_type
);
2730 if (!CHIP_IS_E1x(bp
)) {
2731 pbd_e2
= &txdata
->tx_desc_ring
[bd_prod
].parse_bd_e2
;
2732 memset(pbd_e2
, 0, sizeof(struct eth_tx_parse_bd_e2
));
2733 /* Set PBD in checksum offload case */
2734 if (xmit_type
& XMIT_CSUM
)
2735 hlen
= bnx2x_set_pbd_csum_e2(bp
, skb
,
2736 &pbd_e2_parsing_data
,
2740 * fill in the MAC addresses in the PBD - for local
2743 bnx2x_set_fw_mac_addr(&pbd_e2
->src_mac_addr_hi
,
2744 &pbd_e2
->src_mac_addr_mid
,
2745 &pbd_e2
->src_mac_addr_lo
,
2747 bnx2x_set_fw_mac_addr(&pbd_e2
->dst_mac_addr_hi
,
2748 &pbd_e2
->dst_mac_addr_mid
,
2749 &pbd_e2
->dst_mac_addr_lo
,
2753 pbd_e1x
= &txdata
->tx_desc_ring
[bd_prod
].parse_bd_e1x
;
2754 memset(pbd_e1x
, 0, sizeof(struct eth_tx_parse_bd_e1x
));
2755 /* Set PBD in checksum offload case */
2756 if (xmit_type
& XMIT_CSUM
)
2757 hlen
= bnx2x_set_pbd_csum(bp
, skb
, pbd_e1x
, xmit_type
);
2761 /* Setup the data pointer of the first BD of the packet */
2762 tx_start_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
2763 tx_start_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
2764 nbd
= 2; /* start_bd + pbd + frags (updated when pages are mapped) */
2765 tx_start_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
2766 pkt_size
= tx_start_bd
->nbytes
;
2768 DP(NETIF_MSG_TX_QUEUED
, "first bd @%p addr (%x:%x) nbd %d"
2769 " nbytes %d flags %x vlan %x\n",
2770 tx_start_bd
, tx_start_bd
->addr_hi
, tx_start_bd
->addr_lo
,
2771 le16_to_cpu(tx_start_bd
->nbd
), le16_to_cpu(tx_start_bd
->nbytes
),
2772 tx_start_bd
->bd_flags
.as_bitfield
,
2773 le16_to_cpu(tx_start_bd
->vlan_or_ethertype
));
2775 if (xmit_type
& XMIT_GSO
) {
2777 DP(NETIF_MSG_TX_QUEUED
,
2778 "TSO packet len %d hlen %d total len %d tso size %d\n",
2779 skb
->len
, hlen
, skb_headlen(skb
),
2780 skb_shinfo(skb
)->gso_size
);
2782 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_SW_LSO
;
2784 if (unlikely(skb_headlen(skb
) > hlen
))
2785 bd_prod
= bnx2x_tx_split(bp
, txdata
, tx_buf
,
2788 if (!CHIP_IS_E1x(bp
))
2789 bnx2x_set_pbd_gso_e2(skb
, &pbd_e2_parsing_data
,
2792 bnx2x_set_pbd_gso(skb
, pbd_e1x
, xmit_type
);
2795 /* Set the PBD's parsing_data field if not zero
2796 * (for the chips newer than 57711).
2798 if (pbd_e2_parsing_data
)
2799 pbd_e2
->parsing_data
= cpu_to_le32(pbd_e2_parsing_data
);
2801 tx_data_bd
= (struct eth_tx_bd
*)tx_start_bd
;
2803 /* Handle fragmented skb */
2804 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2805 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2807 mapping
= skb_frag_dma_map(&bp
->pdev
->dev
, frag
, 0,
2808 skb_frag_size(frag
), DMA_TO_DEVICE
);
2809 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
2810 unsigned int pkts_compl
= 0, bytes_compl
= 0;
2812 DP(NETIF_MSG_TX_QUEUED
, "Unable to map page - "
2813 "dropping packet...\n");
2815 /* we need unmap all buffers already mapped
2817 * first_bd->nbd need to be properly updated
2818 * before call to bnx2x_free_tx_pkt
2820 first_bd
->nbd
= cpu_to_le16(nbd
);
2821 bnx2x_free_tx_pkt(bp
, txdata
,
2822 TX_BD(txdata
->tx_pkt_prod
),
2823 &pkts_compl
, &bytes_compl
);
2824 return NETDEV_TX_OK
;
2827 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
2828 tx_data_bd
= &txdata
->tx_desc_ring
[bd_prod
].reg_bd
;
2829 if (total_pkt_bd
== NULL
)
2830 total_pkt_bd
= &txdata
->tx_desc_ring
[bd_prod
].reg_bd
;
2832 tx_data_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
2833 tx_data_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
2834 tx_data_bd
->nbytes
= cpu_to_le16(skb_frag_size(frag
));
2835 le16_add_cpu(&pkt_size
, skb_frag_size(frag
));
2838 DP(NETIF_MSG_TX_QUEUED
,
2839 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2840 i
, tx_data_bd
, tx_data_bd
->addr_hi
, tx_data_bd
->addr_lo
,
2841 le16_to_cpu(tx_data_bd
->nbytes
));
2844 DP(NETIF_MSG_TX_QUEUED
, "last bd @%p\n", tx_data_bd
);
2846 /* update with actual num BDs */
2847 first_bd
->nbd
= cpu_to_le16(nbd
);
2849 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
2851 /* now send a tx doorbell, counting the next BD
2852 * if the packet contains or ends with it
2854 if (TX_BD_POFF(bd_prod
) < nbd
)
2857 /* total_pkt_bytes should be set on the first data BD if
2858 * it's not an LSO packet and there is more than one
2859 * data BD. In this case pkt_size is limited by an MTU value.
2860 * However we prefer to set it for an LSO packet (while we don't
2861 * have to) in order to save some CPU cycles in a none-LSO
2862 * case, when we much more care about them.
2864 if (total_pkt_bd
!= NULL
)
2865 total_pkt_bd
->total_pkt_bytes
= pkt_size
;
2868 DP(NETIF_MSG_TX_QUEUED
,
2869 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2870 " tcp_flags %x xsum %x seq %u hlen %u\n",
2871 pbd_e1x
, pbd_e1x
->global_data
, pbd_e1x
->ip_hlen_w
,
2872 pbd_e1x
->ip_id
, pbd_e1x
->lso_mss
, pbd_e1x
->tcp_flags
,
2873 pbd_e1x
->tcp_pseudo_csum
, pbd_e1x
->tcp_send_seq
,
2874 le16_to_cpu(pbd_e1x
->total_hlen_w
));
2876 DP(NETIF_MSG_TX_QUEUED
,
2877 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2878 pbd_e2
, pbd_e2
->dst_mac_addr_hi
, pbd_e2
->dst_mac_addr_mid
,
2879 pbd_e2
->dst_mac_addr_lo
, pbd_e2
->src_mac_addr_hi
,
2880 pbd_e2
->src_mac_addr_mid
, pbd_e2
->src_mac_addr_lo
,
2881 pbd_e2
->parsing_data
);
2882 DP(NETIF_MSG_TX_QUEUED
, "doorbell: nbd %d bd %u\n", nbd
, bd_prod
);
2884 netdev_tx_sent_queue(txq
, skb
->len
);
2886 txdata
->tx_pkt_prod
++;
2888 * Make sure that the BD data is updated before updating the producer
2889 * since FW might read the BD right after the producer is updated.
2890 * This is only applicable for weak-ordered memory model archs such
2891 * as IA-64. The following barrier is also mandatory since FW will
2892 * assumes packets must have BDs.
2896 txdata
->tx_db
.data
.prod
+= nbd
;
2899 DOORBELL(bp
, txdata
->cid
, txdata
->tx_db
.raw
);
2903 txdata
->tx_bd_prod
+= nbd
;
2905 if (unlikely(bnx2x_tx_avail(bp
, txdata
) < MAX_SKB_FRAGS
+ 3)) {
2906 netif_tx_stop_queue(txq
);
2908 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2909 * ordering of set_bit() in netif_tx_stop_queue() and read of
2913 fp
->eth_q_stats
.driver_xoff
++;
2914 if (bnx2x_tx_avail(bp
, txdata
) >= MAX_SKB_FRAGS
+ 3)
2915 netif_tx_wake_queue(txq
);
2919 return NETDEV_TX_OK
;
2923 * bnx2x_setup_tc - routine to configure net_device for multi tc
2925 * @netdev: net device to configure
2926 * @tc: number of traffic classes to enable
2928 * callback connected to the ndo_setup_tc function pointer
2930 int bnx2x_setup_tc(struct net_device
*dev
, u8 num_tc
)
2932 int cos
, prio
, count
, offset
;
2933 struct bnx2x
*bp
= netdev_priv(dev
);
2935 /* setup tc must be called under rtnl lock */
2938 /* no traffic classes requested. aborting */
2940 netdev_reset_tc(dev
);
2944 /* requested to support too many traffic classes */
2945 if (num_tc
> bp
->max_cos
) {
2946 DP(NETIF_MSG_TX_ERR
, "support for too many traffic classes"
2947 " requested: %d. max supported is %d\n",
2948 num_tc
, bp
->max_cos
);
2952 /* declare amount of supported traffic classes */
2953 if (netdev_set_num_tc(dev
, num_tc
)) {
2954 DP(NETIF_MSG_TX_ERR
, "failed to declare %d traffic classes\n",
2959 /* configure priority to traffic class mapping */
2960 for (prio
= 0; prio
< BNX2X_MAX_PRIORITY
; prio
++) {
2961 netdev_set_prio_tc_map(dev
, prio
, bp
->prio_to_cos
[prio
]);
2962 DP(BNX2X_MSG_SP
, "mapping priority %d to tc %d\n",
2963 prio
, bp
->prio_to_cos
[prio
]);
2967 /* Use this configuration to diffrentiate tc0 from other COSes
2968 This can be used for ets or pfc, and save the effort of setting
2969 up a multio class queue disc or negotiating DCBX with a switch
2970 netdev_set_prio_tc_map(dev, 0, 0);
2971 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
2972 for (prio = 1; prio < 16; prio++) {
2973 netdev_set_prio_tc_map(dev, prio, 1);
2974 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
2977 /* configure traffic class to transmission queue mapping */
2978 for (cos
= 0; cos
< bp
->max_cos
; cos
++) {
2979 count
= BNX2X_NUM_ETH_QUEUES(bp
);
2980 offset
= cos
* MAX_TXQS_PER_COS
;
2981 netdev_set_tc_queue(dev
, cos
, count
, offset
);
2982 DP(BNX2X_MSG_SP
, "mapping tc %d to offset %d count %d\n",
2983 cos
, offset
, count
);
2989 /* called with rtnl_lock */
2990 int bnx2x_change_mac_addr(struct net_device
*dev
, void *p
)
2992 struct sockaddr
*addr
= p
;
2993 struct bnx2x
*bp
= netdev_priv(dev
);
2996 if (!bnx2x_is_valid_ether_addr(bp
, addr
->sa_data
))
3000 if (IS_MF_ISCSI_SD(bp
) && !is_zero_ether_addr(addr
->sa_data
))
3004 if (netif_running(dev
)) {
3005 rc
= bnx2x_set_eth_mac(bp
, false);
3010 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
3012 if (netif_running(dev
))
3013 rc
= bnx2x_set_eth_mac(bp
, true);
3018 static void bnx2x_free_fp_mem_at(struct bnx2x
*bp
, int fp_index
)
3020 union host_hc_status_block
*sb
= &bnx2x_fp(bp
, fp_index
, status_blk
);
3021 struct bnx2x_fastpath
*fp
= &bp
->fp
[fp_index
];
3026 if (IS_FCOE_IDX(fp_index
)) {
3027 memset(sb
, 0, sizeof(union host_hc_status_block
));
3028 fp
->status_blk_mapping
= 0;
3033 if (!CHIP_IS_E1x(bp
))
3034 BNX2X_PCI_FREE(sb
->e2_sb
,
3035 bnx2x_fp(bp
, fp_index
,
3036 status_blk_mapping
),
3037 sizeof(struct host_hc_status_block_e2
));
3039 BNX2X_PCI_FREE(sb
->e1x_sb
,
3040 bnx2x_fp(bp
, fp_index
,
3041 status_blk_mapping
),
3042 sizeof(struct host_hc_status_block_e1x
));
3047 if (!skip_rx_queue(bp
, fp_index
)) {
3048 bnx2x_free_rx_bds(fp
);
3050 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3051 BNX2X_FREE(bnx2x_fp(bp
, fp_index
, rx_buf_ring
));
3052 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_desc_ring
),
3053 bnx2x_fp(bp
, fp_index
, rx_desc_mapping
),
3054 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
3056 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_comp_ring
),
3057 bnx2x_fp(bp
, fp_index
, rx_comp_mapping
),
3058 sizeof(struct eth_fast_path_rx_cqe
) *
3062 BNX2X_FREE(bnx2x_fp(bp
, fp_index
, rx_page_ring
));
3063 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_sge_ring
),
3064 bnx2x_fp(bp
, fp_index
, rx_sge_mapping
),
3065 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
3069 if (!skip_tx_queue(bp
, fp_index
)) {
3070 /* fastpath tx rings: tx_buf tx_desc */
3071 for_each_cos_in_tx_queue(fp
, cos
) {
3072 struct bnx2x_fp_txdata
*txdata
= &fp
->txdata
[cos
];
3075 "freeing tx memory of fp %d cos %d cid %d\n",
3076 fp_index
, cos
, txdata
->cid
);
3078 BNX2X_FREE(txdata
->tx_buf_ring
);
3079 BNX2X_PCI_FREE(txdata
->tx_desc_ring
,
3080 txdata
->tx_desc_mapping
,
3081 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
3084 /* end of fastpath */
3087 void bnx2x_free_fp_mem(struct bnx2x
*bp
)
3090 for_each_queue(bp
, i
)
3091 bnx2x_free_fp_mem_at(bp
, i
);
3094 static inline void set_sb_shortcuts(struct bnx2x
*bp
, int index
)
3096 union host_hc_status_block status_blk
= bnx2x_fp(bp
, index
, status_blk
);
3097 if (!CHIP_IS_E1x(bp
)) {
3098 bnx2x_fp(bp
, index
, sb_index_values
) =
3099 (__le16
*)status_blk
.e2_sb
->sb
.index_values
;
3100 bnx2x_fp(bp
, index
, sb_running_index
) =
3101 (__le16
*)status_blk
.e2_sb
->sb
.running_index
;
3103 bnx2x_fp(bp
, index
, sb_index_values
) =
3104 (__le16
*)status_blk
.e1x_sb
->sb
.index_values
;
3105 bnx2x_fp(bp
, index
, sb_running_index
) =
3106 (__le16
*)status_blk
.e1x_sb
->sb
.running_index
;
3110 static int bnx2x_alloc_fp_mem_at(struct bnx2x
*bp
, int index
)
3112 union host_hc_status_block
*sb
;
3113 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
3116 int rx_ring_size
= 0;
3119 if (!bp
->rx_ring_size
&& IS_MF_ISCSI_SD(bp
)) {
3120 rx_ring_size
= MIN_RX_SIZE_NONTPA
;
3121 bp
->rx_ring_size
= rx_ring_size
;
3124 if (!bp
->rx_ring_size
) {
3126 rx_ring_size
= MAX_RX_AVAIL
/BNX2X_NUM_RX_QUEUES(bp
);
3128 /* allocate at least number of buffers required by FW */
3129 rx_ring_size
= max_t(int, bp
->disable_tpa
? MIN_RX_SIZE_NONTPA
:
3130 MIN_RX_SIZE_TPA
, rx_ring_size
);
3132 bp
->rx_ring_size
= rx_ring_size
;
3133 } else /* if rx_ring_size specified - use it */
3134 rx_ring_size
= bp
->rx_ring_size
;
3137 sb
= &bnx2x_fp(bp
, index
, status_blk
);
3139 if (!IS_FCOE_IDX(index
)) {
3142 if (!CHIP_IS_E1x(bp
))
3143 BNX2X_PCI_ALLOC(sb
->e2_sb
,
3144 &bnx2x_fp(bp
, index
, status_blk_mapping
),
3145 sizeof(struct host_hc_status_block_e2
));
3147 BNX2X_PCI_ALLOC(sb
->e1x_sb
,
3148 &bnx2x_fp(bp
, index
, status_blk_mapping
),
3149 sizeof(struct host_hc_status_block_e1x
));
3154 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3155 * set shortcuts for it.
3157 if (!IS_FCOE_IDX(index
))
3158 set_sb_shortcuts(bp
, index
);
3161 if (!skip_tx_queue(bp
, index
)) {
3162 /* fastpath tx rings: tx_buf tx_desc */
3163 for_each_cos_in_tx_queue(fp
, cos
) {
3164 struct bnx2x_fp_txdata
*txdata
= &fp
->txdata
[cos
];
3166 DP(BNX2X_MSG_SP
, "allocating tx memory of "
3170 BNX2X_ALLOC(txdata
->tx_buf_ring
,
3171 sizeof(struct sw_tx_bd
) * NUM_TX_BD
);
3172 BNX2X_PCI_ALLOC(txdata
->tx_desc_ring
,
3173 &txdata
->tx_desc_mapping
,
3174 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
3179 if (!skip_rx_queue(bp
, index
)) {
3180 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3181 BNX2X_ALLOC(bnx2x_fp(bp
, index
, rx_buf_ring
),
3182 sizeof(struct sw_rx_bd
) * NUM_RX_BD
);
3183 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, index
, rx_desc_ring
),
3184 &bnx2x_fp(bp
, index
, rx_desc_mapping
),
3185 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
3187 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, index
, rx_comp_ring
),
3188 &bnx2x_fp(bp
, index
, rx_comp_mapping
),
3189 sizeof(struct eth_fast_path_rx_cqe
) *
3193 BNX2X_ALLOC(bnx2x_fp(bp
, index
, rx_page_ring
),
3194 sizeof(struct sw_rx_page
) * NUM_RX_SGE
);
3195 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, index
, rx_sge_ring
),
3196 &bnx2x_fp(bp
, index
, rx_sge_mapping
),
3197 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
3199 bnx2x_set_next_page_rx_bd(fp
);
3202 bnx2x_set_next_page_rx_cq(fp
);
3205 ring_size
= bnx2x_alloc_rx_bds(fp
, rx_ring_size
);
3206 if (ring_size
< rx_ring_size
)
3212 /* handles low memory cases */
3214 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3216 /* FW will drop all packets if queue is not big enough,
3217 * In these cases we disable the queue
3218 * Min size is different for OOO, TPA and non-TPA queues
3220 if (ring_size
< (fp
->disable_tpa
?
3221 MIN_RX_SIZE_NONTPA
: MIN_RX_SIZE_TPA
)) {
3222 /* release memory allocated for this queue */
3223 bnx2x_free_fp_mem_at(bp
, index
);
3229 int bnx2x_alloc_fp_mem(struct bnx2x
*bp
)
3234 * 1. Allocate FP for leading - fatal if error
3235 * 2. {CNIC} Allocate FCoE FP - fatal if error
3236 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3237 * 4. Allocate RSS - fix number of queues if error
3241 if (bnx2x_alloc_fp_mem_at(bp
, 0))
3247 if (bnx2x_alloc_fp_mem_at(bp
, FCOE_IDX
))
3248 /* we will fail load process instead of mark
3255 for_each_nondefault_eth_queue(bp
, i
)
3256 if (bnx2x_alloc_fp_mem_at(bp
, i
))
3259 /* handle memory failures */
3260 if (i
!= BNX2X_NUM_ETH_QUEUES(bp
)) {
3261 int delta
= BNX2X_NUM_ETH_QUEUES(bp
) - i
;
3266 * move non eth FPs next to last eth FP
3267 * must be done in that order
3268 * FCOE_IDX < FWD_IDX < OOO_IDX
3271 /* move FCoE fp even NO_FCOE_FLAG is on */
3272 bnx2x_move_fp(bp
, FCOE_IDX
, FCOE_IDX
- delta
);
3274 bp
->num_queues
-= delta
;
3275 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3276 bp
->num_queues
+ delta
, bp
->num_queues
);
3282 void bnx2x_free_mem_bp(struct bnx2x
*bp
)
3285 kfree(bp
->msix_table
);
3289 int __devinit
bnx2x_alloc_mem_bp(struct bnx2x
*bp
)
3291 struct bnx2x_fastpath
*fp
;
3292 struct msix_entry
*tbl
;
3293 struct bnx2x_ilt
*ilt
;
3294 int msix_table_size
= 0;
3297 * The biggest MSI-X table we might need is as a maximum number of fast
3298 * path IGU SBs plus default SB (for PF).
3300 msix_table_size
= bp
->igu_sb_cnt
+ 1;
3302 /* fp array: RSS plus CNIC related L2 queues */
3303 fp
= kcalloc(BNX2X_MAX_RSS_COUNT(bp
) + NON_ETH_CONTEXT_USE
,
3304 sizeof(*fp
), GFP_KERNEL
);
3310 tbl
= kcalloc(msix_table_size
, sizeof(*tbl
), GFP_KERNEL
);
3313 bp
->msix_table
= tbl
;
3316 ilt
= kzalloc(sizeof(*ilt
), GFP_KERNEL
);
3323 bnx2x_free_mem_bp(bp
);
3328 int bnx2x_reload_if_running(struct net_device
*dev
)
3330 struct bnx2x
*bp
= netdev_priv(dev
);
3332 if (unlikely(!netif_running(dev
)))
3335 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
3336 return bnx2x_nic_load(bp
, LOAD_NORMAL
);
3339 int bnx2x_get_cur_phy_idx(struct bnx2x
*bp
)
3341 u32 sel_phy_idx
= 0;
3342 if (bp
->link_params
.num_phys
<= 1)
3345 if (bp
->link_vars
.link_up
) {
3346 sel_phy_idx
= EXT_PHY1
;
3347 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3348 if ((bp
->link_vars
.link_status
& LINK_STATUS_SERDES_LINK
) &&
3349 (bp
->link_params
.phy
[EXT_PHY2
].supported
& SUPPORTED_FIBRE
))
3350 sel_phy_idx
= EXT_PHY2
;
3353 switch (bnx2x_phy_selection(&bp
->link_params
)) {
3354 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT
:
3355 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY
:
3356 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY
:
3357 sel_phy_idx
= EXT_PHY1
;
3359 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY
:
3360 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY
:
3361 sel_phy_idx
= EXT_PHY2
;
3369 int bnx2x_get_link_cfg_idx(struct bnx2x
*bp
)
3371 u32 sel_phy_idx
= bnx2x_get_cur_phy_idx(bp
);
3373 * The selected actived PHY is always after swapping (in case PHY
3374 * swapping is enabled). So when swapping is enabled, we need to reverse
3378 if (bp
->link_params
.multi_phy_config
&
3379 PORT_HW_CFG_PHY_SWAPPED_ENABLED
) {
3380 if (sel_phy_idx
== EXT_PHY1
)
3381 sel_phy_idx
= EXT_PHY2
;
3382 else if (sel_phy_idx
== EXT_PHY2
)
3383 sel_phy_idx
= EXT_PHY1
;
3385 return LINK_CONFIG_IDX(sel_phy_idx
);
3388 #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3389 int bnx2x_fcoe_get_wwn(struct net_device
*dev
, u64
*wwn
, int type
)
3391 struct bnx2x
*bp
= netdev_priv(dev
);
3392 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
3395 case NETDEV_FCOE_WWNN
:
3396 *wwn
= HILO_U64(cp
->fcoe_wwn_node_name_hi
,
3397 cp
->fcoe_wwn_node_name_lo
);
3399 case NETDEV_FCOE_WWPN
:
3400 *wwn
= HILO_U64(cp
->fcoe_wwn_port_name_hi
,
3401 cp
->fcoe_wwn_port_name_lo
);
3411 /* called with rtnl_lock */
3412 int bnx2x_change_mtu(struct net_device
*dev
, int new_mtu
)
3414 struct bnx2x
*bp
= netdev_priv(dev
);
3416 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
3417 pr_err("Handling parity error recovery. Try again later\n");
3421 if ((new_mtu
> ETH_MAX_JUMBO_PACKET_SIZE
) ||
3422 ((new_mtu
+ ETH_HLEN
) < ETH_MIN_PACKET_SIZE
))
3425 /* This does not race with packet allocation
3426 * because the actual alloc size is
3427 * only updated as part of load
3431 return bnx2x_reload_if_running(dev
);
3434 netdev_features_t
bnx2x_fix_features(struct net_device
*dev
,
3435 netdev_features_t features
)
3437 struct bnx2x
*bp
= netdev_priv(dev
);
3439 /* TPA requires Rx CSUM offloading */
3440 if (!(features
& NETIF_F_RXCSUM
) || bp
->disable_tpa
)
3441 features
&= ~NETIF_F_LRO
;
3446 int bnx2x_set_features(struct net_device
*dev
, netdev_features_t features
)
3448 struct bnx2x
*bp
= netdev_priv(dev
);
3449 u32 flags
= bp
->flags
;
3450 bool bnx2x_reload
= false;
3452 if (features
& NETIF_F_LRO
)
3453 flags
|= TPA_ENABLE_FLAG
;
3455 flags
&= ~TPA_ENABLE_FLAG
;
3457 if (features
& NETIF_F_LOOPBACK
) {
3458 if (bp
->link_params
.loopback_mode
!= LOOPBACK_BMAC
) {
3459 bp
->link_params
.loopback_mode
= LOOPBACK_BMAC
;
3460 bnx2x_reload
= true;
3463 if (bp
->link_params
.loopback_mode
!= LOOPBACK_NONE
) {
3464 bp
->link_params
.loopback_mode
= LOOPBACK_NONE
;
3465 bnx2x_reload
= true;
3469 if (flags
^ bp
->flags
) {
3471 bnx2x_reload
= true;
3475 if (bp
->recovery_state
== BNX2X_RECOVERY_DONE
)
3476 return bnx2x_reload_if_running(dev
);
3477 /* else: bnx2x_nic_load() will be called at end of recovery */
3483 void bnx2x_tx_timeout(struct net_device
*dev
)
3485 struct bnx2x
*bp
= netdev_priv(dev
);
3487 #ifdef BNX2X_STOP_ON_ERROR
3492 smp_mb__before_clear_bit();
3493 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT
, &bp
->sp_rtnl_state
);
3494 smp_mb__after_clear_bit();
3496 /* This allows the netif to be shutdown gracefully before resetting */
3497 schedule_delayed_work(&bp
->sp_rtnl_task
, 0);
3500 int bnx2x_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3502 struct net_device
*dev
= pci_get_drvdata(pdev
);
3506 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
3509 bp
= netdev_priv(dev
);
3513 pci_save_state(pdev
);
3515 if (!netif_running(dev
)) {
3520 netif_device_detach(dev
);
3522 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
3524 bnx2x_set_power_state(bp
, pci_choose_state(pdev
, state
));
3531 int bnx2x_resume(struct pci_dev
*pdev
)
3533 struct net_device
*dev
= pci_get_drvdata(pdev
);
3538 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
3541 bp
= netdev_priv(dev
);
3543 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
3544 pr_err("Handling parity error recovery. Try again later\n");
3550 pci_restore_state(pdev
);
3552 if (!netif_running(dev
)) {
3557 bnx2x_set_power_state(bp
, PCI_D0
);
3558 netif_device_attach(dev
);
3560 /* Since the chip was reset, clear the FW sequence number */
3562 rc
= bnx2x_nic_load(bp
, LOAD_OPEN
);
3570 void bnx2x_set_ctx_validation(struct bnx2x
*bp
, struct eth_context
*cxt
,
3573 /* ustorm cxt validation */
3574 cxt
->ustorm_ag_context
.cdu_usage
=
3575 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, cid
),
3576 CDU_REGION_NUMBER_UCM_AG
, ETH_CONNECTION_TYPE
);
3577 /* xcontext validation */
3578 cxt
->xstorm_ag_context
.cdu_reserved
=
3579 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, cid
),
3580 CDU_REGION_NUMBER_XCM_AG
, ETH_CONNECTION_TYPE
);
3583 static inline void storm_memset_hc_timeout(struct bnx2x
*bp
, u8 port
,
3584 u8 fw_sb_id
, u8 sb_index
,
3588 u32 addr
= BAR_CSTRORM_INTMEM
+
3589 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id
, sb_index
);
3590 REG_WR8(bp
, addr
, ticks
);
3591 DP(NETIF_MSG_HW
, "port %x fw_sb_id %d sb_index %d ticks %d\n",
3592 port
, fw_sb_id
, sb_index
, ticks
);
3595 static inline void storm_memset_hc_disable(struct bnx2x
*bp
, u8 port
,
3596 u16 fw_sb_id
, u8 sb_index
,
3599 u32 enable_flag
= disable
? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT
);
3600 u32 addr
= BAR_CSTRORM_INTMEM
+
3601 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id
, sb_index
);
3602 u16 flags
= REG_RD16(bp
, addr
);
3604 flags
&= ~HC_INDEX_DATA_HC_ENABLED
;
3605 flags
|= enable_flag
;
3606 REG_WR16(bp
, addr
, flags
);
3607 DP(NETIF_MSG_HW
, "port %x fw_sb_id %d sb_index %d disable %d\n",
3608 port
, fw_sb_id
, sb_index
, disable
);
3611 void bnx2x_update_coalesce_sb_index(struct bnx2x
*bp
, u8 fw_sb_id
,
3612 u8 sb_index
, u8 disable
, u16 usec
)
3614 int port
= BP_PORT(bp
);
3615 u8 ticks
= usec
/ BNX2X_BTR
;
3617 storm_memset_hc_timeout(bp
, port
, fw_sb_id
, sb_index
, ticks
);
3619 disable
= disable
? 1 : (usec
? 0 : 1);
3620 storm_memset_hc_disable(bp
, port
, fw_sb_id
, sb_index
, disable
);