1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2011 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/etherdevice.h>
19 #include <linux/if_vlan.h>
20 #include <linux/interrupt.h>
23 #include <net/ip6_checksum.h>
24 #include <linux/firmware.h>
25 #include <linux/prefetch.h>
26 #include "bnx2x_cmn.h"
27 #include "bnx2x_init.h"
33 * bnx2x_bz_fp - zero content of the fastpath structure.
36 * @index: fastpath index to be zeroed
38 * Makes sure the contents of the bp->fp[index].napi is kept
41 static inline void bnx2x_bz_fp(struct bnx2x
*bp
, int index
)
43 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
44 struct napi_struct orig_napi
= fp
->napi
;
45 /* bzero bnx2x_fastpath contents */
46 memset(fp
, 0, sizeof(*fp
));
48 /* Restore the NAPI object as it has been already initialized */
54 fp
->max_cos
= bp
->max_cos
;
56 /* Special queues support only one CoS */
60 * set the tpa flag for each queue. The tpa flag determines the queue
61 * minimal size so it must be set prior to queue memory allocation
63 fp
->disable_tpa
= ((bp
->flags
& TPA_ENABLE_FLAG
) == 0);
66 /* We don't want TPA on an FCoE L2 ring */
73 * bnx2x_move_fp - move content of the fastpath structure.
76 * @from: source FP index
77 * @to: destination FP index
79 * Makes sure the contents of the bp->fp[to].napi is kept
82 static inline void bnx2x_move_fp(struct bnx2x
*bp
, int from
, int to
)
84 struct bnx2x_fastpath
*from_fp
= &bp
->fp
[from
];
85 struct bnx2x_fastpath
*to_fp
= &bp
->fp
[to
];
86 struct napi_struct orig_napi
= to_fp
->napi
;
87 /* Move bnx2x_fastpath contents */
88 memcpy(to_fp
, from_fp
, sizeof(*to_fp
));
91 /* Restore the NAPI object as it has been already initialized */
92 to_fp
->napi
= orig_napi
;
95 int load_count
[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
97 /* free skb in the packet ring at pos idx
98 * return idx of last bd freed
100 static u16
bnx2x_free_tx_pkt(struct bnx2x
*bp
, struct bnx2x_fp_txdata
*txdata
,
103 struct sw_tx_bd
*tx_buf
= &txdata
->tx_buf_ring
[idx
];
104 struct eth_tx_start_bd
*tx_start_bd
;
105 struct eth_tx_bd
*tx_data_bd
;
106 struct sk_buff
*skb
= tx_buf
->skb
;
107 u16 bd_idx
= TX_BD(tx_buf
->first_bd
), new_cons
;
110 /* prefetch skb end pointer to speedup dev_kfree_skb() */
113 DP(BNX2X_MSG_FP
, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
114 txdata
->txq_index
, idx
, tx_buf
, skb
);
117 DP(BNX2X_MSG_OFF
, "free bd_idx %d\n", bd_idx
);
118 tx_start_bd
= &txdata
->tx_desc_ring
[bd_idx
].start_bd
;
119 dma_unmap_single(&bp
->pdev
->dev
, BD_UNMAP_ADDR(tx_start_bd
),
120 BD_UNMAP_LEN(tx_start_bd
), DMA_TO_DEVICE
);
123 nbd
= le16_to_cpu(tx_start_bd
->nbd
) - 1;
124 #ifdef BNX2X_STOP_ON_ERROR
125 if ((nbd
- 1) > (MAX_SKB_FRAGS
+ 2)) {
126 BNX2X_ERR("BAD nbd!\n");
130 new_cons
= nbd
+ tx_buf
->first_bd
;
132 /* Get the next bd */
133 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
135 /* Skip a parse bd... */
137 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
139 /* ...and the TSO split header bd since they have no mapping */
140 if (tx_buf
->flags
& BNX2X_TSO_SPLIT_BD
) {
142 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
148 DP(BNX2X_MSG_OFF
, "free frag bd_idx %d\n", bd_idx
);
149 tx_data_bd
= &txdata
->tx_desc_ring
[bd_idx
].reg_bd
;
150 dma_unmap_page(&bp
->pdev
->dev
, BD_UNMAP_ADDR(tx_data_bd
),
151 BD_UNMAP_LEN(tx_data_bd
), DMA_TO_DEVICE
);
153 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
158 dev_kfree_skb_any(skb
);
159 tx_buf
->first_bd
= 0;
165 int bnx2x_tx_int(struct bnx2x
*bp
, struct bnx2x_fp_txdata
*txdata
)
167 struct netdev_queue
*txq
;
168 u16 hw_cons
, sw_cons
, bd_cons
= txdata
->tx_bd_cons
;
170 #ifdef BNX2X_STOP_ON_ERROR
171 if (unlikely(bp
->panic
))
175 txq
= netdev_get_tx_queue(bp
->dev
, txdata
->txq_index
);
176 hw_cons
= le16_to_cpu(*txdata
->tx_cons_sb
);
177 sw_cons
= txdata
->tx_pkt_cons
;
179 while (sw_cons
!= hw_cons
) {
182 pkt_cons
= TX_BD(sw_cons
);
184 DP(NETIF_MSG_TX_DONE
, "queue[%d]: hw_cons %u sw_cons %u "
186 txdata
->txq_index
, hw_cons
, sw_cons
, pkt_cons
);
188 bd_cons
= bnx2x_free_tx_pkt(bp
, txdata
, pkt_cons
);
192 txdata
->tx_pkt_cons
= sw_cons
;
193 txdata
->tx_bd_cons
= bd_cons
;
195 /* Need to make the tx_bd_cons update visible to start_xmit()
196 * before checking for netif_tx_queue_stopped(). Without the
197 * memory barrier, there is a small possibility that
198 * start_xmit() will miss it and cause the queue to be stopped
200 * On the other hand we need an rmb() here to ensure the proper
201 * ordering of bit testing in the following
202 * netif_tx_queue_stopped(txq) call.
206 if (unlikely(netif_tx_queue_stopped(txq
))) {
207 /* Taking tx_lock() is needed to prevent reenabling the queue
208 * while it's empty. This could have happen if rx_action() gets
209 * suspended in bnx2x_tx_int() after the condition before
210 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
212 * stops the queue->sees fresh tx_bd_cons->releases the queue->
213 * sends some packets consuming the whole queue again->
217 __netif_tx_lock(txq
, smp_processor_id());
219 if ((netif_tx_queue_stopped(txq
)) &&
220 (bp
->state
== BNX2X_STATE_OPEN
) &&
221 (bnx2x_tx_avail(bp
, txdata
) >= MAX_SKB_FRAGS
+ 3))
222 netif_tx_wake_queue(txq
);
224 __netif_tx_unlock(txq
);
229 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath
*fp
,
232 u16 last_max
= fp
->last_max_sge
;
234 if (SUB_S16(idx
, last_max
) > 0)
235 fp
->last_max_sge
= idx
;
238 static void bnx2x_update_sge_prod(struct bnx2x_fastpath
*fp
,
239 struct eth_fast_path_rx_cqe
*fp_cqe
)
241 struct bnx2x
*bp
= fp
->bp
;
242 u16 sge_len
= SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe
->pkt_len
) -
243 le16_to_cpu(fp_cqe
->len_on_bd
)) >>
245 u16 last_max
, last_elem
, first_elem
;
252 /* First mark all used pages */
253 for (i
= 0; i
< sge_len
; i
++)
254 BIT_VEC64_CLEAR_BIT(fp
->sge_mask
,
255 RX_SGE(le16_to_cpu(fp_cqe
->sgl_or_raw_data
.sgl
[i
])));
257 DP(NETIF_MSG_RX_STATUS
, "fp_cqe->sgl[%d] = %d\n",
258 sge_len
- 1, le16_to_cpu(fp_cqe
->sgl_or_raw_data
.sgl
[sge_len
- 1]));
260 /* Here we assume that the last SGE index is the biggest */
261 prefetch((void *)(fp
->sge_mask
));
262 bnx2x_update_last_max_sge(fp
,
263 le16_to_cpu(fp_cqe
->sgl_or_raw_data
.sgl
[sge_len
- 1]));
265 last_max
= RX_SGE(fp
->last_max_sge
);
266 last_elem
= last_max
>> BIT_VEC64_ELEM_SHIFT
;
267 first_elem
= RX_SGE(fp
->rx_sge_prod
) >> BIT_VEC64_ELEM_SHIFT
;
269 /* If ring is not full */
270 if (last_elem
+ 1 != first_elem
)
273 /* Now update the prod */
274 for (i
= first_elem
; i
!= last_elem
; i
= NEXT_SGE_MASK_ELEM(i
)) {
275 if (likely(fp
->sge_mask
[i
]))
278 fp
->sge_mask
[i
] = BIT_VEC64_ELEM_ONE_MASK
;
279 delta
+= BIT_VEC64_ELEM_SZ
;
283 fp
->rx_sge_prod
+= delta
;
284 /* clear page-end entries */
285 bnx2x_clear_sge_mask_next_elems(fp
);
288 DP(NETIF_MSG_RX_STATUS
,
289 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
290 fp
->last_max_sge
, fp
->rx_sge_prod
);
293 static void bnx2x_tpa_start(struct bnx2x_fastpath
*fp
, u16 queue
,
294 struct sk_buff
*skb
, u16 cons
, u16 prod
,
295 struct eth_fast_path_rx_cqe
*cqe
)
297 struct bnx2x
*bp
= fp
->bp
;
298 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
299 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
300 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
302 struct bnx2x_agg_info
*tpa_info
= &fp
->tpa_info
[queue
];
303 struct sw_rx_bd
*first_buf
= &tpa_info
->first_buf
;
305 /* print error if current state != stop */
306 if (tpa_info
->tpa_state
!= BNX2X_TPA_STOP
)
307 BNX2X_ERR("start of bin not in stop [%d]\n", queue
);
309 /* Try to map an empty skb from the aggregation info */
310 mapping
= dma_map_single(&bp
->pdev
->dev
,
311 first_buf
->skb
->data
,
312 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
314 * ...if it fails - move the skb from the consumer to the producer
315 * and set the current aggregation state as ERROR to drop it
316 * when TPA_STOP arrives.
319 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
320 /* Move the BD from the consumer to the producer */
321 bnx2x_reuse_rx_skb(fp
, cons
, prod
);
322 tpa_info
->tpa_state
= BNX2X_TPA_ERROR
;
326 /* move empty skb from pool to prod */
327 prod_rx_buf
->skb
= first_buf
->skb
;
328 dma_unmap_addr_set(prod_rx_buf
, mapping
, mapping
);
329 /* point prod_bd to new skb */
330 prod_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
331 prod_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
333 /* move partial skb from cons to pool (don't unmap yet) */
334 *first_buf
= *cons_rx_buf
;
336 /* mark bin state as START */
337 tpa_info
->parsing_flags
=
338 le16_to_cpu(cqe
->pars_flags
.flags
);
339 tpa_info
->vlan_tag
= le16_to_cpu(cqe
->vlan_tag
);
340 tpa_info
->tpa_state
= BNX2X_TPA_START
;
341 tpa_info
->len_on_bd
= le16_to_cpu(cqe
->len_on_bd
);
342 tpa_info
->placement_offset
= cqe
->placement_offset
;
344 #ifdef BNX2X_STOP_ON_ERROR
345 fp
->tpa_queue_used
|= (1 << queue
);
346 #ifdef _ASM_GENERIC_INT_L64_H
347 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%lx\n",
349 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%llx\n",
355 /* Timestamp option length allowed for TPA aggregation:
357 * nop nop kind length echo val
359 #define TPA_TSTAMP_OPT_LEN 12
361 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
364 * @parsing_flags: parsing flags from the START CQE
365 * @len_on_bd: total length of the first packet for the
368 * Approximate value of the MSS for this aggregation calculated using
369 * the first packet of it.
371 static inline u16
bnx2x_set_lro_mss(struct bnx2x
*bp
, u16 parsing_flags
,
375 * TPA arrgregation won't have either IP options or TCP options
376 * other than timestamp or IPv6 extension headers.
378 u16 hdrs_len
= ETH_HLEN
+ sizeof(struct tcphdr
);
380 if (GET_FLAG(parsing_flags
, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL
) ==
381 PRS_FLAG_OVERETH_IPV6
)
382 hdrs_len
+= sizeof(struct ipv6hdr
);
384 hdrs_len
+= sizeof(struct iphdr
);
387 /* Check if there was a TCP timestamp, if there is it's will
388 * always be 12 bytes length: nop nop kind length echo val.
390 * Otherwise FW would close the aggregation.
392 if (parsing_flags
& PARSING_FLAGS_TIME_STAMP_EXIST_FLAG
)
393 hdrs_len
+= TPA_TSTAMP_OPT_LEN
;
395 return len_on_bd
- hdrs_len
;
398 static int bnx2x_fill_frag_skb(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
399 u16 queue
, struct sk_buff
*skb
,
400 struct eth_end_agg_rx_cqe
*cqe
,
403 struct sw_rx_page
*rx_pg
, old_rx_pg
;
404 u32 i
, frag_len
, frag_size
, pages
;
407 struct bnx2x_agg_info
*tpa_info
= &fp
->tpa_info
[queue
];
408 u16 len_on_bd
= tpa_info
->len_on_bd
;
410 frag_size
= le16_to_cpu(cqe
->pkt_len
) - len_on_bd
;
411 pages
= SGE_PAGE_ALIGN(frag_size
) >> SGE_PAGE_SHIFT
;
413 /* This is needed in order to enable forwarding support */
415 skb_shinfo(skb
)->gso_size
= bnx2x_set_lro_mss(bp
,
416 tpa_info
->parsing_flags
, len_on_bd
);
418 #ifdef BNX2X_STOP_ON_ERROR
419 if (pages
> min_t(u32
, 8, MAX_SKB_FRAGS
)*SGE_PAGE_SIZE
*PAGES_PER_SGE
) {
420 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
422 BNX2X_ERR("cqe->pkt_len = %d\n", cqe
->pkt_len
);
428 /* Run through the SGL and compose the fragmented skb */
429 for (i
= 0, j
= 0; i
< pages
; i
+= PAGES_PER_SGE
, j
++) {
430 u16 sge_idx
= RX_SGE(le16_to_cpu(cqe
->sgl_or_raw_data
.sgl
[j
]));
432 /* FW gives the indices of the SGE as if the ring is an array
433 (meaning that "next" element will consume 2 indices) */
434 frag_len
= min(frag_size
, (u32
)(SGE_PAGE_SIZE
*PAGES_PER_SGE
));
435 rx_pg
= &fp
->rx_page_ring
[sge_idx
];
438 /* If we fail to allocate a substitute page, we simply stop
439 where we are and drop the whole packet */
440 err
= bnx2x_alloc_rx_sge(bp
, fp
, sge_idx
);
442 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
446 /* Unmap the page as we r going to pass it to the stack */
447 dma_unmap_page(&bp
->pdev
->dev
,
448 dma_unmap_addr(&old_rx_pg
, mapping
),
449 SGE_PAGE_SIZE
*PAGES_PER_SGE
, DMA_FROM_DEVICE
);
451 /* Add one frag and update the appropriate fields in the skb */
452 skb_fill_page_desc(skb
, j
, old_rx_pg
.page
, 0, frag_len
);
454 skb
->data_len
+= frag_len
;
455 skb
->truesize
+= frag_len
;
456 skb
->len
+= frag_len
;
458 frag_size
-= frag_len
;
464 static void bnx2x_tpa_stop(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
465 u16 queue
, struct eth_end_agg_rx_cqe
*cqe
,
468 struct bnx2x_agg_info
*tpa_info
= &fp
->tpa_info
[queue
];
469 struct sw_rx_bd
*rx_buf
= &tpa_info
->first_buf
;
470 u8 pad
= tpa_info
->placement_offset
;
471 u16 len
= tpa_info
->len_on_bd
;
472 struct sk_buff
*skb
= rx_buf
->skb
;
474 struct sk_buff
*new_skb
;
475 u8 old_tpa_state
= tpa_info
->tpa_state
;
477 tpa_info
->tpa_state
= BNX2X_TPA_STOP
;
479 /* If we there was an error during the handling of the TPA_START -
480 * drop this aggregation.
482 if (old_tpa_state
== BNX2X_TPA_ERROR
)
485 /* Try to allocate the new skb */
486 new_skb
= netdev_alloc_skb(bp
->dev
, fp
->rx_buf_size
);
488 /* Unmap skb in the pool anyway, as we are going to change
489 pool entry status to BNX2X_TPA_STOP even if new skb allocation
491 dma_unmap_single(&bp
->pdev
->dev
, dma_unmap_addr(rx_buf
, mapping
),
492 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
494 if (likely(new_skb
)) {
496 prefetch(((char *)(skb
)) + L1_CACHE_BYTES
);
498 #ifdef BNX2X_STOP_ON_ERROR
499 if (pad
+ len
> fp
->rx_buf_size
) {
500 BNX2X_ERR("skb_put is about to fail... "
501 "pad %d len %d rx_buf_size %d\n",
502 pad
, len
, fp
->rx_buf_size
);
508 skb_reserve(skb
, pad
);
511 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
512 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
514 if (!bnx2x_fill_frag_skb(bp
, fp
, queue
, skb
, cqe
, cqe_idx
)) {
515 if (tpa_info
->parsing_flags
& PARSING_FLAGS_VLAN
)
516 __vlan_hwaccel_put_tag(skb
, tpa_info
->vlan_tag
);
517 napi_gro_receive(&fp
->napi
, skb
);
519 DP(NETIF_MSG_RX_STATUS
, "Failed to allocate new pages"
520 " - dropping packet!\n");
521 dev_kfree_skb_any(skb
);
525 /* put new skb in bin */
526 rx_buf
->skb
= new_skb
;
532 /* drop the packet and keep the buffer in the bin */
533 DP(NETIF_MSG_RX_STATUS
,
534 "Failed to allocate or map a new skb - dropping packet!\n");
535 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
538 /* Set Toeplitz hash value in the skb using the value from the
539 * CQE (calculated by HW).
541 static inline void bnx2x_set_skb_rxhash(struct bnx2x
*bp
, union eth_rx_cqe
*cqe
,
544 /* Set Toeplitz hash from CQE */
545 if ((bp
->dev
->features
& NETIF_F_RXHASH
) &&
546 (cqe
->fast_path_cqe
.status_flags
&
547 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG
))
549 le32_to_cpu(cqe
->fast_path_cqe
.rss_hash_result
);
552 int bnx2x_rx_int(struct bnx2x_fastpath
*fp
, int budget
)
554 struct bnx2x
*bp
= fp
->bp
;
555 u16 bd_cons
, bd_prod
, bd_prod_fw
, comp_ring_cons
;
556 u16 hw_comp_cons
, sw_comp_cons
, sw_comp_prod
;
559 #ifdef BNX2X_STOP_ON_ERROR
560 if (unlikely(bp
->panic
))
564 /* CQ "next element" is of the size of the regular element,
565 that's why it's ok here */
566 hw_comp_cons
= le16_to_cpu(*fp
->rx_cons_sb
);
567 if ((hw_comp_cons
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
570 bd_cons
= fp
->rx_bd_cons
;
571 bd_prod
= fp
->rx_bd_prod
;
572 bd_prod_fw
= bd_prod
;
573 sw_comp_cons
= fp
->rx_comp_cons
;
574 sw_comp_prod
= fp
->rx_comp_prod
;
576 /* Memory barrier necessary as speculative reads of the rx
577 * buffer can be ahead of the index in the status block
581 DP(NETIF_MSG_RX_STATUS
,
582 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
583 fp
->index
, hw_comp_cons
, sw_comp_cons
);
585 while (sw_comp_cons
!= hw_comp_cons
) {
586 struct sw_rx_bd
*rx_buf
= NULL
;
588 union eth_rx_cqe
*cqe
;
589 struct eth_fast_path_rx_cqe
*cqe_fp
;
591 enum eth_rx_cqe_type cqe_fp_type
;
594 #ifdef BNX2X_STOP_ON_ERROR
595 if (unlikely(bp
->panic
))
599 comp_ring_cons
= RCQ_BD(sw_comp_cons
);
600 bd_prod
= RX_BD(bd_prod
);
601 bd_cons
= RX_BD(bd_cons
);
603 /* Prefetch the page containing the BD descriptor
604 at producer's index. It will be needed when new skb is
606 prefetch((void *)(PAGE_ALIGN((unsigned long)
607 (&fp
->rx_desc_ring
[bd_prod
])) -
610 cqe
= &fp
->rx_comp_ring
[comp_ring_cons
];
611 cqe_fp
= &cqe
->fast_path_cqe
;
612 cqe_fp_flags
= cqe_fp
->type_error_flags
;
613 cqe_fp_type
= cqe_fp_flags
& ETH_FAST_PATH_RX_CQE_TYPE
;
615 DP(NETIF_MSG_RX_STATUS
, "CQE type %x err %x status %x"
616 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags
),
617 cqe_fp_flags
, cqe_fp
->status_flags
,
618 le32_to_cpu(cqe_fp
->rss_hash_result
),
619 le16_to_cpu(cqe_fp
->vlan_tag
), le16_to_cpu(cqe_fp
->pkt_len
));
621 /* is this a slowpath msg? */
622 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type
))) {
623 bnx2x_sp_event(fp
, cqe
);
626 /* this is an rx packet */
628 rx_buf
= &fp
->rx_buf_ring
[bd_cons
];
632 if (!CQE_TYPE_FAST(cqe_fp_type
)) {
633 #ifdef BNX2X_STOP_ON_ERROR
635 if (fp
->disable_tpa
&&
636 (CQE_TYPE_START(cqe_fp_type
) ||
637 CQE_TYPE_STOP(cqe_fp_type
)))
638 BNX2X_ERR("START/STOP packet while "
639 "disable_tpa type %x\n",
640 CQE_TYPE(cqe_fp_type
));
643 if (CQE_TYPE_START(cqe_fp_type
)) {
644 u16 queue
= cqe_fp
->queue_index
;
645 DP(NETIF_MSG_RX_STATUS
,
646 "calling tpa_start on queue %d\n",
649 bnx2x_tpa_start(fp
, queue
, skb
,
653 /* Set Toeplitz hash for LRO skb */
654 bnx2x_set_skb_rxhash(bp
, cqe
, skb
);
660 cqe
->end_agg_cqe
.queue_index
;
661 DP(NETIF_MSG_RX_STATUS
,
662 "calling tpa_stop on queue %d\n",
665 bnx2x_tpa_stop(bp
, fp
, queue
,
668 #ifdef BNX2X_STOP_ON_ERROR
673 bnx2x_update_sge_prod(fp
, cqe_fp
);
678 len
= le16_to_cpu(cqe_fp
->pkt_len
);
679 pad
= cqe_fp
->placement_offset
;
680 dma_sync_single_for_cpu(&bp
->pdev
->dev
,
681 dma_unmap_addr(rx_buf
, mapping
),
682 pad
+ RX_COPY_THRESH
,
684 prefetch(((char *)(skb
)) + L1_CACHE_BYTES
);
686 /* is this an error packet? */
687 if (unlikely(cqe_fp_flags
& ETH_RX_ERROR_FALGS
)) {
689 "ERROR flags %x rx packet %u\n",
690 cqe_fp_flags
, sw_comp_cons
);
691 fp
->eth_q_stats
.rx_err_discard_pkt
++;
695 /* Since we don't have a jumbo ring
696 * copy small packets if mtu > 1500
698 if ((bp
->dev
->mtu
> ETH_MAX_PACKET_SIZE
) &&
699 (len
<= RX_COPY_THRESH
)) {
700 struct sk_buff
*new_skb
;
702 new_skb
= netdev_alloc_skb(bp
->dev
, len
+ pad
);
703 if (new_skb
== NULL
) {
705 "ERROR packet dropped "
706 "because of alloc failure\n");
707 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
712 skb_copy_from_linear_data_offset(skb
, pad
,
713 new_skb
->data
+ pad
, len
);
714 skb_reserve(new_skb
, pad
);
715 skb_put(new_skb
, len
);
717 bnx2x_reuse_rx_skb(fp
, bd_cons
, bd_prod
);
722 if (likely(bnx2x_alloc_rx_skb(bp
, fp
, bd_prod
) == 0)) {
723 dma_unmap_single(&bp
->pdev
->dev
,
724 dma_unmap_addr(rx_buf
, mapping
),
727 skb_reserve(skb
, pad
);
732 "ERROR packet dropped because "
733 "of alloc failure\n");
734 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
736 bnx2x_reuse_rx_skb(fp
, bd_cons
, bd_prod
);
740 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
742 /* Set Toeplitz hash for a none-LRO skb */
743 bnx2x_set_skb_rxhash(bp
, cqe
, skb
);
745 skb_checksum_none_assert(skb
);
747 if (bp
->dev
->features
& NETIF_F_RXCSUM
) {
749 if (likely(BNX2X_RX_CSUM_OK(cqe
)))
750 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
752 fp
->eth_q_stats
.hw_csum_err
++;
756 skb_record_rx_queue(skb
, fp
->index
);
758 if (le16_to_cpu(cqe_fp
->pars_flags
.flags
) &
760 __vlan_hwaccel_put_tag(skb
,
761 le16_to_cpu(cqe_fp
->vlan_tag
));
762 napi_gro_receive(&fp
->napi
, skb
);
768 bd_cons
= NEXT_RX_IDX(bd_cons
);
769 bd_prod
= NEXT_RX_IDX(bd_prod
);
770 bd_prod_fw
= NEXT_RX_IDX(bd_prod_fw
);
773 sw_comp_prod
= NEXT_RCQ_IDX(sw_comp_prod
);
774 sw_comp_cons
= NEXT_RCQ_IDX(sw_comp_cons
);
776 if (rx_pkt
== budget
)
780 fp
->rx_bd_cons
= bd_cons
;
781 fp
->rx_bd_prod
= bd_prod_fw
;
782 fp
->rx_comp_cons
= sw_comp_cons
;
783 fp
->rx_comp_prod
= sw_comp_prod
;
785 /* Update producers */
786 bnx2x_update_rx_prod(bp
, fp
, bd_prod_fw
, sw_comp_prod
,
789 fp
->rx_pkt
+= rx_pkt
;
795 static irqreturn_t
bnx2x_msix_fp_int(int irq
, void *fp_cookie
)
797 struct bnx2x_fastpath
*fp
= fp_cookie
;
798 struct bnx2x
*bp
= fp
->bp
;
801 DP(BNX2X_MSG_FP
, "got an MSI-X interrupt on IDX:SB "
802 "[fp %d fw_sd %d igusb %d]\n",
803 fp
->index
, fp
->fw_sb_id
, fp
->igu_sb_id
);
804 bnx2x_ack_sb(bp
, fp
->igu_sb_id
, USTORM_ID
, 0, IGU_INT_DISABLE
, 0);
806 #ifdef BNX2X_STOP_ON_ERROR
807 if (unlikely(bp
->panic
))
811 /* Handle Rx and Tx according to MSI-X vector */
812 prefetch(fp
->rx_cons_sb
);
814 for_each_cos_in_tx_queue(fp
, cos
)
815 prefetch(fp
->txdata
[cos
].tx_cons_sb
);
817 prefetch(&fp
->sb_running_index
[SM_RX_ID
]);
818 napi_schedule(&bnx2x_fp(bp
, fp
->index
, napi
));
823 /* HW Lock for shared dual port PHYs */
824 void bnx2x_acquire_phy_lock(struct bnx2x
*bp
)
826 mutex_lock(&bp
->port
.phy_mutex
);
828 if (bp
->port
.need_hw_lock
)
829 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
832 void bnx2x_release_phy_lock(struct bnx2x
*bp
)
834 if (bp
->port
.need_hw_lock
)
835 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
837 mutex_unlock(&bp
->port
.phy_mutex
);
840 /* calculates MF speed according to current linespeed and MF configuration */
841 u16
bnx2x_get_mf_speed(struct bnx2x
*bp
)
843 u16 line_speed
= bp
->link_vars
.line_speed
;
845 u16 maxCfg
= bnx2x_extract_max_cfg(bp
,
846 bp
->mf_config
[BP_VN(bp
)]);
848 /* Calculate the current MAX line speed limit for the MF
852 line_speed
= (line_speed
* maxCfg
) / 100;
854 u16 vn_max_rate
= maxCfg
* 100;
856 if (vn_max_rate
< line_speed
)
857 line_speed
= vn_max_rate
;
865 * bnx2x_fill_report_data - fill link report data to report
868 * @data: link state to update
870 * It uses a none-atomic bit operations because is called under the mutex.
872 static inline void bnx2x_fill_report_data(struct bnx2x
*bp
,
873 struct bnx2x_link_report_data
*data
)
875 u16 line_speed
= bnx2x_get_mf_speed(bp
);
877 memset(data
, 0, sizeof(*data
));
879 /* Fill the report data: efective line speed */
880 data
->line_speed
= line_speed
;
883 if (!bp
->link_vars
.link_up
|| (bp
->flags
& MF_FUNC_DIS
))
884 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
885 &data
->link_report_flags
);
888 if (bp
->link_vars
.duplex
== DUPLEX_FULL
)
889 __set_bit(BNX2X_LINK_REPORT_FD
, &data
->link_report_flags
);
891 /* Rx Flow Control is ON */
892 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_RX
)
893 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON
, &data
->link_report_flags
);
895 /* Tx Flow Control is ON */
896 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_TX
)
897 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON
, &data
->link_report_flags
);
901 * bnx2x_link_report - report link status to OS.
905 * Calls the __bnx2x_link_report() under the same locking scheme
906 * as a link/PHY state managing code to ensure a consistent link
910 void bnx2x_link_report(struct bnx2x
*bp
)
912 bnx2x_acquire_phy_lock(bp
);
913 __bnx2x_link_report(bp
);
914 bnx2x_release_phy_lock(bp
);
918 * __bnx2x_link_report - report link status to OS.
922 * None atomic inmlementation.
923 * Should be called under the phy_lock.
925 void __bnx2x_link_report(struct bnx2x
*bp
)
927 struct bnx2x_link_report_data cur_data
;
931 bnx2x_read_mf_cfg(bp
);
933 /* Read the current link report info */
934 bnx2x_fill_report_data(bp
, &cur_data
);
936 /* Don't report link down or exactly the same link status twice */
937 if (!memcmp(&cur_data
, &bp
->last_reported_link
, sizeof(cur_data
)) ||
938 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
939 &bp
->last_reported_link
.link_report_flags
) &&
940 test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
941 &cur_data
.link_report_flags
)))
946 /* We are going to report a new link parameters now -
947 * remember the current data for the next time.
949 memcpy(&bp
->last_reported_link
, &cur_data
, sizeof(cur_data
));
951 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
952 &cur_data
.link_report_flags
)) {
953 netif_carrier_off(bp
->dev
);
954 netdev_err(bp
->dev
, "NIC Link is Down\n");
957 netif_carrier_on(bp
->dev
);
958 netdev_info(bp
->dev
, "NIC Link is Up, ");
959 pr_cont("%d Mbps ", cur_data
.line_speed
);
961 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD
,
962 &cur_data
.link_report_flags
))
963 pr_cont("full duplex");
965 pr_cont("half duplex");
967 /* Handle the FC at the end so that only these flags would be
968 * possibly set. This way we may easily check if there is no FC
971 if (cur_data
.link_report_flags
) {
972 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON
,
973 &cur_data
.link_report_flags
)) {
974 pr_cont(", receive ");
975 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON
,
976 &cur_data
.link_report_flags
))
977 pr_cont("& transmit ");
979 pr_cont(", transmit ");
981 pr_cont("flow control ON");
987 void bnx2x_init_rx_rings(struct bnx2x
*bp
)
989 int func
= BP_FUNC(bp
);
993 /* Allocate TPA resources */
994 for_each_rx_queue(bp
, j
) {
995 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
998 "mtu %d rx_buf_size %d\n", bp
->dev
->mtu
, fp
->rx_buf_size
);
1000 if (!fp
->disable_tpa
) {
1001 /* Fill the per-aggregtion pool */
1002 for (i
= 0; i
< MAX_AGG_QS(bp
); i
++) {
1003 struct bnx2x_agg_info
*tpa_info
=
1005 struct sw_rx_bd
*first_buf
=
1006 &tpa_info
->first_buf
;
1008 first_buf
->skb
= netdev_alloc_skb(bp
->dev
,
1010 if (!first_buf
->skb
) {
1011 BNX2X_ERR("Failed to allocate TPA "
1012 "skb pool for queue[%d] - "
1013 "disabling TPA on this "
1015 bnx2x_free_tpa_pool(bp
, fp
, i
);
1016 fp
->disable_tpa
= 1;
1019 dma_unmap_addr_set(first_buf
, mapping
, 0);
1020 tpa_info
->tpa_state
= BNX2X_TPA_STOP
;
1023 /* "next page" elements initialization */
1024 bnx2x_set_next_page_sgl(fp
);
1026 /* set SGEs bit mask */
1027 bnx2x_init_sge_ring_bit_mask(fp
);
1029 /* Allocate SGEs and initialize the ring elements */
1030 for (i
= 0, ring_prod
= 0;
1031 i
< MAX_RX_SGE_CNT
*NUM_RX_SGE_PAGES
; i
++) {
1033 if (bnx2x_alloc_rx_sge(bp
, fp
, ring_prod
) < 0) {
1034 BNX2X_ERR("was only able to allocate "
1036 BNX2X_ERR("disabling TPA for "
1038 /* Cleanup already allocated elements */
1039 bnx2x_free_rx_sge_range(bp
, fp
,
1041 bnx2x_free_tpa_pool(bp
, fp
,
1043 fp
->disable_tpa
= 1;
1047 ring_prod
= NEXT_SGE_IDX(ring_prod
);
1050 fp
->rx_sge_prod
= ring_prod
;
1054 for_each_rx_queue(bp
, j
) {
1055 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1059 /* Activate BD ring */
1061 * this will generate an interrupt (to the TSTORM)
1062 * must only be done after chip is initialized
1064 bnx2x_update_rx_prod(bp
, fp
, fp
->rx_bd_prod
, fp
->rx_comp_prod
,
1070 if (CHIP_IS_E1(bp
)) {
1071 REG_WR(bp
, BAR_USTRORM_INTMEM
+
1072 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
),
1073 U64_LO(fp
->rx_comp_mapping
));
1074 REG_WR(bp
, BAR_USTRORM_INTMEM
+
1075 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
) + 4,
1076 U64_HI(fp
->rx_comp_mapping
));
1081 static void bnx2x_free_tx_skbs(struct bnx2x
*bp
)
1086 for_each_tx_queue(bp
, i
) {
1087 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1088 for_each_cos_in_tx_queue(fp
, cos
) {
1089 struct bnx2x_fp_txdata
*txdata
= &fp
->txdata
[cos
];
1091 u16 bd_cons
= txdata
->tx_bd_cons
;
1092 u16 sw_prod
= txdata
->tx_pkt_prod
;
1093 u16 sw_cons
= txdata
->tx_pkt_cons
;
1095 while (sw_cons
!= sw_prod
) {
1096 bd_cons
= bnx2x_free_tx_pkt(bp
, txdata
,
1104 static void bnx2x_free_rx_bds(struct bnx2x_fastpath
*fp
)
1106 struct bnx2x
*bp
= fp
->bp
;
1109 /* ring wasn't allocated */
1110 if (fp
->rx_buf_ring
== NULL
)
1113 for (i
= 0; i
< NUM_RX_BD
; i
++) {
1114 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[i
];
1115 struct sk_buff
*skb
= rx_buf
->skb
;
1119 dma_unmap_single(&bp
->pdev
->dev
,
1120 dma_unmap_addr(rx_buf
, mapping
),
1121 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
1128 static void bnx2x_free_rx_skbs(struct bnx2x
*bp
)
1132 for_each_rx_queue(bp
, j
) {
1133 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1135 bnx2x_free_rx_bds(fp
);
1137 if (!fp
->disable_tpa
)
1138 bnx2x_free_tpa_pool(bp
, fp
, MAX_AGG_QS(bp
));
1142 void bnx2x_free_skbs(struct bnx2x
*bp
)
1144 bnx2x_free_tx_skbs(bp
);
1145 bnx2x_free_rx_skbs(bp
);
1148 void bnx2x_update_max_mf_config(struct bnx2x
*bp
, u32 value
)
1150 /* load old values */
1151 u32 mf_cfg
= bp
->mf_config
[BP_VN(bp
)];
1153 if (value
!= bnx2x_extract_max_cfg(bp
, mf_cfg
)) {
1154 /* leave all but MAX value */
1155 mf_cfg
&= ~FUNC_MF_CFG_MAX_BW_MASK
;
1157 /* set new MAX value */
1158 mf_cfg
|= (value
<< FUNC_MF_CFG_MAX_BW_SHIFT
)
1159 & FUNC_MF_CFG_MAX_BW_MASK
;
1161 bnx2x_fw_command(bp
, DRV_MSG_CODE_SET_MF_BW
, mf_cfg
);
1166 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1168 * @bp: driver handle
1169 * @nvecs: number of vectors to be released
1171 static void bnx2x_free_msix_irqs(struct bnx2x
*bp
, int nvecs
)
1175 if (nvecs
== offset
)
1177 free_irq(bp
->msix_table
[offset
].vector
, bp
->dev
);
1178 DP(NETIF_MSG_IFDOWN
, "released sp irq (%d)\n",
1179 bp
->msix_table
[offset
].vector
);
1182 if (nvecs
== offset
)
1187 for_each_eth_queue(bp
, i
) {
1188 if (nvecs
== offset
)
1190 DP(NETIF_MSG_IFDOWN
, "about to release fp #%d->%d "
1191 "irq\n", i
, bp
->msix_table
[offset
].vector
);
1193 free_irq(bp
->msix_table
[offset
++].vector
, &bp
->fp
[i
]);
1197 void bnx2x_free_irq(struct bnx2x
*bp
)
1199 if (bp
->flags
& USING_MSIX_FLAG
)
1200 bnx2x_free_msix_irqs(bp
, BNX2X_NUM_ETH_QUEUES(bp
) +
1202 else if (bp
->flags
& USING_MSI_FLAG
)
1203 free_irq(bp
->pdev
->irq
, bp
->dev
);
1205 free_irq(bp
->pdev
->irq
, bp
->dev
);
1208 int bnx2x_enable_msix(struct bnx2x
*bp
)
1210 int msix_vec
= 0, i
, rc
, req_cnt
;
1212 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1213 DP(NETIF_MSG_IFUP
, "msix_table[0].entry = %d (slowpath)\n",
1214 bp
->msix_table
[0].entry
);
1218 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1219 DP(NETIF_MSG_IFUP
, "msix_table[%d].entry = %d (CNIC)\n",
1220 bp
->msix_table
[msix_vec
].entry
, bp
->msix_table
[msix_vec
].entry
);
1223 /* We need separate vectors for ETH queues only (not FCoE) */
1224 for_each_eth_queue(bp
, i
) {
1225 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1226 DP(NETIF_MSG_IFUP
, "msix_table[%d].entry = %d "
1227 "(fastpath #%u)\n", msix_vec
, msix_vec
, i
);
1231 req_cnt
= BNX2X_NUM_ETH_QUEUES(bp
) + CNIC_PRESENT
+ 1;
1233 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0], req_cnt
);
1236 * reconfigure number of tx/rx queues according to available
1239 if (rc
>= BNX2X_MIN_MSIX_VEC_CNT
) {
1240 /* how less vectors we will have? */
1241 int diff
= req_cnt
- rc
;
1244 "Trying to use less MSI-X vectors: %d\n", rc
);
1246 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0], rc
);
1250 "MSI-X is not attainable rc %d\n", rc
);
1254 * decrease number of queues by number of unallocated entries
1256 bp
->num_queues
-= diff
;
1258 DP(NETIF_MSG_IFUP
, "New queue configuration set: %d\n",
1261 /* fall to INTx if not enough memory */
1263 bp
->flags
|= DISABLE_MSI_FLAG
;
1264 DP(NETIF_MSG_IFUP
, "MSI-X is not attainable rc %d\n", rc
);
1268 bp
->flags
|= USING_MSIX_FLAG
;
1273 static int bnx2x_req_msix_irqs(struct bnx2x
*bp
)
1275 int i
, rc
, offset
= 0;
1277 rc
= request_irq(bp
->msix_table
[offset
++].vector
,
1278 bnx2x_msix_sp_int
, 0,
1279 bp
->dev
->name
, bp
->dev
);
1281 BNX2X_ERR("request sp irq failed\n");
1288 for_each_eth_queue(bp
, i
) {
1289 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1290 snprintf(fp
->name
, sizeof(fp
->name
), "%s-fp-%d",
1293 rc
= request_irq(bp
->msix_table
[offset
].vector
,
1294 bnx2x_msix_fp_int
, 0, fp
->name
, fp
);
1296 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i
,
1297 bp
->msix_table
[offset
].vector
, rc
);
1298 bnx2x_free_msix_irqs(bp
, offset
);
1305 i
= BNX2X_NUM_ETH_QUEUES(bp
);
1306 offset
= 1 + CNIC_PRESENT
;
1307 netdev_info(bp
->dev
, "using MSI-X IRQs: sp %d fp[%d] %d"
1309 bp
->msix_table
[0].vector
,
1310 0, bp
->msix_table
[offset
].vector
,
1311 i
- 1, bp
->msix_table
[offset
+ i
- 1].vector
);
1316 int bnx2x_enable_msi(struct bnx2x
*bp
)
1320 rc
= pci_enable_msi(bp
->pdev
);
1322 DP(NETIF_MSG_IFUP
, "MSI is not attainable\n");
1325 bp
->flags
|= USING_MSI_FLAG
;
1330 static int bnx2x_req_irq(struct bnx2x
*bp
)
1332 unsigned long flags
;
1335 if (bp
->flags
& USING_MSI_FLAG
)
1338 flags
= IRQF_SHARED
;
1340 rc
= request_irq(bp
->pdev
->irq
, bnx2x_interrupt
, flags
,
1341 bp
->dev
->name
, bp
->dev
);
1345 static inline int bnx2x_setup_irqs(struct bnx2x
*bp
)
1348 if (bp
->flags
& USING_MSIX_FLAG
) {
1349 rc
= bnx2x_req_msix_irqs(bp
);
1354 rc
= bnx2x_req_irq(bp
);
1356 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc
);
1359 if (bp
->flags
& USING_MSI_FLAG
) {
1360 bp
->dev
->irq
= bp
->pdev
->irq
;
1361 netdev_info(bp
->dev
, "using MSI IRQ %d\n",
1369 static inline void bnx2x_napi_enable(struct bnx2x
*bp
)
1373 for_each_rx_queue(bp
, i
)
1374 napi_enable(&bnx2x_fp(bp
, i
, napi
));
1377 static inline void bnx2x_napi_disable(struct bnx2x
*bp
)
1381 for_each_rx_queue(bp
, i
)
1382 napi_disable(&bnx2x_fp(bp
, i
, napi
));
1385 void bnx2x_netif_start(struct bnx2x
*bp
)
1387 if (netif_running(bp
->dev
)) {
1388 bnx2x_napi_enable(bp
);
1389 bnx2x_int_enable(bp
);
1390 if (bp
->state
== BNX2X_STATE_OPEN
)
1391 netif_tx_wake_all_queues(bp
->dev
);
1395 void bnx2x_netif_stop(struct bnx2x
*bp
, int disable_hw
)
1397 bnx2x_int_disable_sync(bp
, disable_hw
);
1398 bnx2x_napi_disable(bp
);
1401 u16
bnx2x_select_queue(struct net_device
*dev
, struct sk_buff
*skb
)
1403 struct bnx2x
*bp
= netdev_priv(dev
);
1407 struct ethhdr
*hdr
= (struct ethhdr
*)skb
->data
;
1408 u16 ether_type
= ntohs(hdr
->h_proto
);
1410 /* Skip VLAN tag if present */
1411 if (ether_type
== ETH_P_8021Q
) {
1412 struct vlan_ethhdr
*vhdr
=
1413 (struct vlan_ethhdr
*)skb
->data
;
1415 ether_type
= ntohs(vhdr
->h_vlan_encapsulated_proto
);
1418 /* If ethertype is FCoE or FIP - use FCoE ring */
1419 if ((ether_type
== ETH_P_FCOE
) || (ether_type
== ETH_P_FIP
))
1420 return bnx2x_fcoe_tx(bp
, txq_index
);
1423 /* select a non-FCoE queue */
1424 return __skb_tx_hash(dev
, skb
, BNX2X_NUM_ETH_QUEUES(bp
));
1427 void bnx2x_set_num_queues(struct bnx2x
*bp
)
1429 switch (bp
->multi_mode
) {
1430 case ETH_RSS_MODE_DISABLED
:
1433 case ETH_RSS_MODE_REGULAR
:
1434 bp
->num_queues
= bnx2x_calc_num_queues(bp
);
1442 /* Add special queues */
1443 bp
->num_queues
+= NON_ETH_CONTEXT_USE
;
1447 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1449 * @bp: Driver handle
1451 * We currently support for at most 16 Tx queues for each CoS thus we will
1452 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1455 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1456 * index after all ETH L2 indices.
1458 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1459 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1460 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1462 * The proper configuration of skb->queue_mapping is handled by
1463 * bnx2x_select_queue() and __skb_tx_hash().
1465 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1466 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1468 static inline int bnx2x_set_real_num_queues(struct bnx2x
*bp
)
1472 tx
= MAX_TXQS_PER_COS
* bp
->max_cos
;
1473 rx
= BNX2X_NUM_ETH_QUEUES(bp
);
1475 /* account for fcoe queue */
1483 rc
= netif_set_real_num_tx_queues(bp
->dev
, tx
);
1485 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc
);
1488 rc
= netif_set_real_num_rx_queues(bp
->dev
, rx
);
1490 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc
);
1494 DP(NETIF_MSG_DRV
, "Setting real num queues to (tx, rx) (%d, %d)\n",
1500 static inline void bnx2x_set_rx_buf_size(struct bnx2x
*bp
)
1504 for_each_queue(bp
, i
) {
1505 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1507 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1510 * Although there are no IP frames expected to arrive to
1511 * this ring we still want to add an
1512 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1516 BNX2X_FCOE_MINI_JUMBO_MTU
+ ETH_OVREHEAD
+
1517 BNX2X_FW_RX_ALIGN
+ IP_HEADER_ALIGNMENT_PADDING
;
1520 bp
->dev
->mtu
+ ETH_OVREHEAD
+
1521 BNX2X_FW_RX_ALIGN
+ IP_HEADER_ALIGNMENT_PADDING
;
1525 static inline int bnx2x_init_rss_pf(struct bnx2x
*bp
)
1528 u8 ind_table
[T_ETH_INDIRECTION_TABLE_SIZE
] = {0};
1529 u8 num_eth_queues
= BNX2X_NUM_ETH_QUEUES(bp
);
1532 * Prepare the inital contents fo the indirection table if RSS is
1535 if (bp
->multi_mode
!= ETH_RSS_MODE_DISABLED
) {
1536 for (i
= 0; i
< sizeof(ind_table
); i
++)
1538 bp
->fp
->cl_id
+ (i
% num_eth_queues
);
1542 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1543 * per-port, so if explicit configuration is needed , do it only
1546 * For 57712 and newer on the other hand it's a per-function
1549 return bnx2x_config_rss_pf(bp
, ind_table
,
1550 bp
->port
.pmf
|| !CHIP_IS_E1x(bp
));
1553 int bnx2x_config_rss_pf(struct bnx2x
*bp
, u8
*ind_table
, bool config_hash
)
1555 struct bnx2x_config_rss_params params
= {0};
1558 /* Although RSS is meaningless when there is a single HW queue we
1559 * still need it enabled in order to have HW Rx hash generated.
1561 * if (!is_eth_multi(bp))
1562 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1565 params
.rss_obj
= &bp
->rss_conf_obj
;
1567 __set_bit(RAMROD_COMP_WAIT
, ¶ms
.ramrod_flags
);
1570 switch (bp
->multi_mode
) {
1571 case ETH_RSS_MODE_DISABLED
:
1572 __set_bit(BNX2X_RSS_MODE_DISABLED
, ¶ms
.rss_flags
);
1574 case ETH_RSS_MODE_REGULAR
:
1575 __set_bit(BNX2X_RSS_MODE_REGULAR
, ¶ms
.rss_flags
);
1577 case ETH_RSS_MODE_VLAN_PRI
:
1578 __set_bit(BNX2X_RSS_MODE_VLAN_PRI
, ¶ms
.rss_flags
);
1580 case ETH_RSS_MODE_E1HOV_PRI
:
1581 __set_bit(BNX2X_RSS_MODE_E1HOV_PRI
, ¶ms
.rss_flags
);
1583 case ETH_RSS_MODE_IP_DSCP
:
1584 __set_bit(BNX2X_RSS_MODE_IP_DSCP
, ¶ms
.rss_flags
);
1587 BNX2X_ERR("Unknown multi_mode: %d\n", bp
->multi_mode
);
1591 /* If RSS is enabled */
1592 if (bp
->multi_mode
!= ETH_RSS_MODE_DISABLED
) {
1593 /* RSS configuration */
1594 __set_bit(BNX2X_RSS_IPV4
, ¶ms
.rss_flags
);
1595 __set_bit(BNX2X_RSS_IPV4_TCP
, ¶ms
.rss_flags
);
1596 __set_bit(BNX2X_RSS_IPV6
, ¶ms
.rss_flags
);
1597 __set_bit(BNX2X_RSS_IPV6_TCP
, ¶ms
.rss_flags
);
1600 params
.rss_result_mask
= MULTI_MASK
;
1602 memcpy(params
.ind_table
, ind_table
, sizeof(params
.ind_table
));
1606 for (i
= 0; i
< sizeof(params
.rss_key
) / 4; i
++)
1607 params
.rss_key
[i
] = random32();
1609 __set_bit(BNX2X_RSS_SET_SRCH
, ¶ms
.rss_flags
);
1613 return bnx2x_config_rss(bp
, ¶ms
);
1616 static inline int bnx2x_init_hw(struct bnx2x
*bp
, u32 load_code
)
1618 struct bnx2x_func_state_params func_params
= {0};
1620 /* Prepare parameters for function state transitions */
1621 __set_bit(RAMROD_COMP_WAIT
, &func_params
.ramrod_flags
);
1623 func_params
.f_obj
= &bp
->func_obj
;
1624 func_params
.cmd
= BNX2X_F_CMD_HW_INIT
;
1626 func_params
.params
.hw_init
.load_phase
= load_code
;
1628 return bnx2x_func_state_change(bp
, &func_params
);
1632 * Cleans the object that have internal lists without sending
1633 * ramrods. Should be run when interrutps are disabled.
1635 static void bnx2x_squeeze_objects(struct bnx2x
*bp
)
1638 unsigned long ramrod_flags
= 0, vlan_mac_flags
= 0;
1639 struct bnx2x_mcast_ramrod_params rparam
= {0};
1640 struct bnx2x_vlan_mac_obj
*mac_obj
= &bp
->fp
->mac_obj
;
1642 /***************** Cleanup MACs' object first *************************/
1644 /* Wait for completion of requested */
1645 __set_bit(RAMROD_COMP_WAIT
, &ramrod_flags
);
1646 /* Perform a dry cleanup */
1647 __set_bit(RAMROD_DRV_CLR_ONLY
, &ramrod_flags
);
1649 /* Clean ETH primary MAC */
1650 __set_bit(BNX2X_ETH_MAC
, &vlan_mac_flags
);
1651 rc
= mac_obj
->delete_all(bp
, &bp
->fp
->mac_obj
, &vlan_mac_flags
,
1654 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc
);
1656 /* Cleanup UC list */
1658 __set_bit(BNX2X_UC_LIST_MAC
, &vlan_mac_flags
);
1659 rc
= mac_obj
->delete_all(bp
, mac_obj
, &vlan_mac_flags
,
1662 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc
);
1664 /***************** Now clean mcast object *****************************/
1665 rparam
.mcast_obj
= &bp
->mcast_obj
;
1666 __set_bit(RAMROD_DRV_CLR_ONLY
, &rparam
.ramrod_flags
);
1668 /* Add a DEL command... */
1669 rc
= bnx2x_config_mcast(bp
, &rparam
, BNX2X_MCAST_CMD_DEL
);
1671 BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
1672 "object: %d\n", rc
);
1674 /* ...and wait until all pending commands are cleared */
1675 rc
= bnx2x_config_mcast(bp
, &rparam
, BNX2X_MCAST_CMD_CONT
);
1678 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1683 rc
= bnx2x_config_mcast(bp
, &rparam
, BNX2X_MCAST_CMD_CONT
);
1687 #ifndef BNX2X_STOP_ON_ERROR
1688 #define LOAD_ERROR_EXIT(bp, label) \
1690 (bp)->state = BNX2X_STATE_ERROR; \
1694 #define LOAD_ERROR_EXIT(bp, label) \
1696 (bp)->state = BNX2X_STATE_ERROR; \
1702 /* must be called with rtnl_lock */
1703 int bnx2x_nic_load(struct bnx2x
*bp
, int load_mode
)
1705 int port
= BP_PORT(bp
);
1709 #ifdef BNX2X_STOP_ON_ERROR
1710 if (unlikely(bp
->panic
))
1714 bp
->state
= BNX2X_STATE_OPENING_WAIT4_LOAD
;
1716 /* Set the initial link reported state to link down */
1717 bnx2x_acquire_phy_lock(bp
);
1718 memset(&bp
->last_reported_link
, 0, sizeof(bp
->last_reported_link
));
1719 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
1720 &bp
->last_reported_link
.link_report_flags
);
1721 bnx2x_release_phy_lock(bp
);
1723 /* must be called before memory allocation and HW init */
1724 bnx2x_ilt_set_info(bp
);
1727 * Zero fastpath structures preserving invariants like napi, which are
1728 * allocated only once, fp index, max_cos, bp pointer.
1729 * Also set fp->disable_tpa.
1731 for_each_queue(bp
, i
)
1735 /* Set the receive queues buffer size */
1736 bnx2x_set_rx_buf_size(bp
);
1738 if (bnx2x_alloc_mem(bp
))
1741 /* As long as bnx2x_alloc_mem() may possibly update
1742 * bp->num_queues, bnx2x_set_real_num_queues() should always
1745 rc
= bnx2x_set_real_num_queues(bp
);
1747 BNX2X_ERR("Unable to set real_num_queues\n");
1748 LOAD_ERROR_EXIT(bp
, load_error0
);
1751 /* configure multi cos mappings in kernel.
1752 * this configuration may be overriden by a multi class queue discipline
1753 * or by a dcbx negotiation result.
1755 bnx2x_setup_tc(bp
->dev
, bp
->max_cos
);
1757 bnx2x_napi_enable(bp
);
1759 /* Send LOAD_REQUEST command to MCP
1760 * Returns the type of LOAD command:
1761 * if it is the first port to be initialized
1762 * common blocks should be initialized, otherwise - not
1764 if (!BP_NOMCP(bp
)) {
1765 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_REQ
, 0);
1767 BNX2X_ERR("MCP response failure, aborting\n");
1769 LOAD_ERROR_EXIT(bp
, load_error1
);
1771 if (load_code
== FW_MSG_CODE_DRV_LOAD_REFUSED
) {
1772 rc
= -EBUSY
; /* other port in diagnostic mode */
1773 LOAD_ERROR_EXIT(bp
, load_error1
);
1777 int path
= BP_PATH(bp
);
1779 DP(NETIF_MSG_IFUP
, "NO MCP - load counts[%d] %d, %d, %d\n",
1780 path
, load_count
[path
][0], load_count
[path
][1],
1781 load_count
[path
][2]);
1782 load_count
[path
][0]++;
1783 load_count
[path
][1 + port
]++;
1784 DP(NETIF_MSG_IFUP
, "NO MCP - new load counts[%d] %d, %d, %d\n",
1785 path
, load_count
[path
][0], load_count
[path
][1],
1786 load_count
[path
][2]);
1787 if (load_count
[path
][0] == 1)
1788 load_code
= FW_MSG_CODE_DRV_LOAD_COMMON
;
1789 else if (load_count
[path
][1 + port
] == 1)
1790 load_code
= FW_MSG_CODE_DRV_LOAD_PORT
;
1792 load_code
= FW_MSG_CODE_DRV_LOAD_FUNCTION
;
1795 if ((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
1796 (load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
) ||
1797 (load_code
== FW_MSG_CODE_DRV_LOAD_PORT
)) {
1800 * We need the barrier to ensure the ordering between the
1801 * writing to bp->port.pmf here and reading it from the
1802 * bnx2x_periodic_task().
1805 queue_delayed_work(bnx2x_wq
, &bp
->period_task
, 0);
1809 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
1811 /* Init Function state controlling object */
1812 bnx2x__init_func_obj(bp
);
1815 rc
= bnx2x_init_hw(bp
, load_code
);
1817 BNX2X_ERR("HW init failed, aborting\n");
1818 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
1819 LOAD_ERROR_EXIT(bp
, load_error2
);
1822 /* Connect to IRQs */
1823 rc
= bnx2x_setup_irqs(bp
);
1825 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
1826 LOAD_ERROR_EXIT(bp
, load_error2
);
1829 /* Setup NIC internals and enable interrupts */
1830 bnx2x_nic_init(bp
, load_code
);
1832 /* Init per-function objects */
1833 bnx2x_init_bp_objs(bp
);
1835 if (((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
1836 (load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
)) &&
1837 (bp
->common
.shmem2_base
)) {
1838 if (SHMEM2_HAS(bp
, dcc_support
))
1839 SHMEM2_WR(bp
, dcc_support
,
1840 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV
|
1841 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV
));
1844 bp
->state
= BNX2X_STATE_OPENING_WAIT4_PORT
;
1845 rc
= bnx2x_func_start(bp
);
1847 BNX2X_ERR("Function start failed!\n");
1848 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
1849 LOAD_ERROR_EXIT(bp
, load_error3
);
1852 /* Send LOAD_DONE command to MCP */
1853 if (!BP_NOMCP(bp
)) {
1854 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
1856 BNX2X_ERR("MCP response failure, aborting\n");
1858 LOAD_ERROR_EXIT(bp
, load_error3
);
1862 rc
= bnx2x_setup_leading(bp
);
1864 BNX2X_ERR("Setup leading failed!\n");
1865 LOAD_ERROR_EXIT(bp
, load_error3
);
1869 /* Enable Timer scan */
1870 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ port
*4, 1);
1873 for_each_nondefault_queue(bp
, i
) {
1874 rc
= bnx2x_setup_queue(bp
, &bp
->fp
[i
], 0);
1876 LOAD_ERROR_EXIT(bp
, load_error4
);
1879 rc
= bnx2x_init_rss_pf(bp
);
1881 LOAD_ERROR_EXIT(bp
, load_error4
);
1883 /* Now when Clients are configured we are ready to work */
1884 bp
->state
= BNX2X_STATE_OPEN
;
1886 /* Configure a ucast MAC */
1887 rc
= bnx2x_set_eth_mac(bp
, true);
1889 LOAD_ERROR_EXIT(bp
, load_error4
);
1891 if (bp
->pending_max
) {
1892 bnx2x_update_max_mf_config(bp
, bp
->pending_max
);
1893 bp
->pending_max
= 0;
1897 bnx2x_initial_phy_init(bp
, load_mode
);
1899 /* Start fast path */
1901 /* Initialize Rx filter. */
1902 netif_addr_lock_bh(bp
->dev
);
1903 bnx2x_set_rx_mode(bp
->dev
);
1904 netif_addr_unlock_bh(bp
->dev
);
1907 switch (load_mode
) {
1909 /* Tx queue should be only reenabled */
1910 netif_tx_wake_all_queues(bp
->dev
);
1914 netif_tx_start_all_queues(bp
->dev
);
1915 smp_mb__after_clear_bit();
1919 bp
->state
= BNX2X_STATE_DIAG
;
1927 bnx2x__link_status_update(bp
);
1929 /* start the timer */
1930 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
1933 bnx2x_setup_cnic_irq_info(bp
);
1934 if (bp
->state
== BNX2X_STATE_OPEN
)
1935 bnx2x_cnic_notify(bp
, CNIC_CTL_START_CMD
);
1937 bnx2x_inc_load_cnt(bp
);
1939 /* Wait for all pending SP commands to complete */
1940 if (!bnx2x_wait_sp_comp(bp
, ~0x0UL
)) {
1941 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
1942 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
1946 bnx2x_dcbx_init(bp
);
1949 #ifndef BNX2X_STOP_ON_ERROR
1952 /* Disable Timer scan */
1953 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ port
*4, 0);
1956 bnx2x_int_disable_sync(bp
, 1);
1958 /* Clean queueable objects */
1959 bnx2x_squeeze_objects(bp
);
1961 /* Free SKBs, SGEs, TPA pool and driver internals */
1962 bnx2x_free_skbs(bp
);
1963 for_each_rx_queue(bp
, i
)
1964 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
1969 if (!BP_NOMCP(bp
)) {
1970 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
, 0);
1971 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
, 0);
1976 bnx2x_napi_disable(bp
);
1981 #endif /* ! BNX2X_STOP_ON_ERROR */
1984 /* must be called with rtnl_lock */
1985 int bnx2x_nic_unload(struct bnx2x
*bp
, int unload_mode
)
1988 bool global
= false;
1990 if ((bp
->state
== BNX2X_STATE_CLOSED
) ||
1991 (bp
->state
== BNX2X_STATE_ERROR
)) {
1992 /* We can get here if the driver has been unloaded
1993 * during parity error recovery and is either waiting for a
1994 * leader to complete or for other functions to unload and
1995 * then ifdown has been issued. In this case we want to
1996 * unload and let other functions to complete a recovery
1999 bp
->recovery_state
= BNX2X_RECOVERY_DONE
;
2001 bnx2x_release_leader_lock(bp
);
2004 DP(NETIF_MSG_HW
, "Releasing a leadership...\n");
2010 * It's important to set the bp->state to the value different from
2011 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2012 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2014 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_HALT
;
2018 bnx2x_tx_disable(bp
);
2021 bnx2x_cnic_notify(bp
, CNIC_CTL_STOP_CMD
);
2024 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
2026 del_timer_sync(&bp
->timer
);
2028 /* Set ALWAYS_ALIVE bit in shmem */
2029 bp
->fw_drv_pulse_wr_seq
|= DRV_PULSE_ALWAYS_ALIVE
;
2031 bnx2x_drv_pulse(bp
);
2033 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
2035 /* Cleanup the chip if needed */
2036 if (unload_mode
!= UNLOAD_RECOVERY
)
2037 bnx2x_chip_cleanup(bp
, unload_mode
);
2039 /* Send the UNLOAD_REQUEST to the MCP */
2040 bnx2x_send_unload_req(bp
, unload_mode
);
2043 * Prevent transactions to host from the functions on the
2044 * engine that doesn't reset global blocks in case of global
2045 * attention once gloabl blocks are reset and gates are opened
2046 * (the engine which leader will perform the recovery
2049 if (!CHIP_IS_E1x(bp
))
2050 bnx2x_pf_disable(bp
);
2052 /* Disable HW interrupts, NAPI */
2053 bnx2x_netif_stop(bp
, 1);
2058 /* Report UNLOAD_DONE to MCP */
2059 bnx2x_send_unload_done(bp
);
2063 * At this stage no more interrupts will arrive so we may safly clean
2064 * the queueable objects here in case they failed to get cleaned so far.
2066 bnx2x_squeeze_objects(bp
);
2068 /* There should be no more pending SP commands at this stage */
2073 /* Free SKBs, SGEs, TPA pool and driver internals */
2074 bnx2x_free_skbs(bp
);
2075 for_each_rx_queue(bp
, i
)
2076 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
2080 bp
->state
= BNX2X_STATE_CLOSED
;
2082 /* Check if there are pending parity attentions. If there are - set
2083 * RECOVERY_IN_PROGRESS.
2085 if (bnx2x_chk_parity_attn(bp
, &global
, false)) {
2086 bnx2x_set_reset_in_progress(bp
);
2088 /* Set RESET_IS_GLOBAL if needed */
2090 bnx2x_set_reset_global(bp
);
2094 /* The last driver must disable a "close the gate" if there is no
2095 * parity attention or "process kill" pending.
2097 if (!bnx2x_dec_load_cnt(bp
) && bnx2x_reset_is_done(bp
, BP_PATH(bp
)))
2098 bnx2x_disable_close_the_gate(bp
);
2103 int bnx2x_set_power_state(struct bnx2x
*bp
, pci_power_t state
)
2107 /* If there is no power capability, silently succeed */
2109 DP(NETIF_MSG_HW
, "No power capability. Breaking.\n");
2113 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
2117 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
2118 ((pmcsr
& ~PCI_PM_CTRL_STATE_MASK
) |
2119 PCI_PM_CTRL_PME_STATUS
));
2121 if (pmcsr
& PCI_PM_CTRL_STATE_MASK
)
2122 /* delay required during transition out of D3hot */
2127 /* If there are other clients above don't
2128 shut down the power */
2129 if (atomic_read(&bp
->pdev
->enable_cnt
) != 1)
2131 /* Don't shut down the power for emulation and FPGA */
2132 if (CHIP_REV_IS_SLOW(bp
))
2135 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
2139 pmcsr
|= PCI_PM_CTRL_PME_ENABLE
;
2141 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
2144 /* No more memory access after this point until
2145 * device is brought back to D0.
2156 * net_device service functions
2158 int bnx2x_poll(struct napi_struct
*napi
, int budget
)
2162 struct bnx2x_fastpath
*fp
= container_of(napi
, struct bnx2x_fastpath
,
2164 struct bnx2x
*bp
= fp
->bp
;
2167 #ifdef BNX2X_STOP_ON_ERROR
2168 if (unlikely(bp
->panic
)) {
2169 napi_complete(napi
);
2174 for_each_cos_in_tx_queue(fp
, cos
)
2175 if (bnx2x_tx_queue_has_work(&fp
->txdata
[cos
]))
2176 bnx2x_tx_int(bp
, &fp
->txdata
[cos
]);
2179 if (bnx2x_has_rx_work(fp
)) {
2180 work_done
+= bnx2x_rx_int(fp
, budget
- work_done
);
2182 /* must not complete if we consumed full budget */
2183 if (work_done
>= budget
)
2187 /* Fall out from the NAPI loop if needed */
2188 if (!(bnx2x_has_rx_work(fp
) || bnx2x_has_tx_work(fp
))) {
2190 /* No need to update SB for FCoE L2 ring as long as
2191 * it's connected to the default SB and the SB
2192 * has been updated when NAPI was scheduled.
2194 if (IS_FCOE_FP(fp
)) {
2195 napi_complete(napi
);
2200 bnx2x_update_fpsb_idx(fp
);
2201 /* bnx2x_has_rx_work() reads the status block,
2202 * thus we need to ensure that status block indices
2203 * have been actually read (bnx2x_update_fpsb_idx)
2204 * prior to this check (bnx2x_has_rx_work) so that
2205 * we won't write the "newer" value of the status block
2206 * to IGU (if there was a DMA right after
2207 * bnx2x_has_rx_work and if there is no rmb, the memory
2208 * reading (bnx2x_update_fpsb_idx) may be postponed
2209 * to right before bnx2x_ack_sb). In this case there
2210 * will never be another interrupt until there is
2211 * another update of the status block, while there
2212 * is still unhandled work.
2216 if (!(bnx2x_has_rx_work(fp
) || bnx2x_has_tx_work(fp
))) {
2217 napi_complete(napi
);
2218 /* Re-enable interrupts */
2220 "Update index to %d\n", fp
->fp_hc_idx
);
2221 bnx2x_ack_sb(bp
, fp
->igu_sb_id
, USTORM_ID
,
2222 le16_to_cpu(fp
->fp_hc_idx
),
2232 /* we split the first BD into headers and data BDs
2233 * to ease the pain of our fellow microcode engineers
2234 * we use one mapping for both BDs
2235 * So far this has only been observed to happen
2236 * in Other Operating Systems(TM)
2238 static noinline u16
bnx2x_tx_split(struct bnx2x
*bp
,
2239 struct bnx2x_fp_txdata
*txdata
,
2240 struct sw_tx_bd
*tx_buf
,
2241 struct eth_tx_start_bd
**tx_bd
, u16 hlen
,
2242 u16 bd_prod
, int nbd
)
2244 struct eth_tx_start_bd
*h_tx_bd
= *tx_bd
;
2245 struct eth_tx_bd
*d_tx_bd
;
2247 int old_len
= le16_to_cpu(h_tx_bd
->nbytes
);
2249 /* first fix first BD */
2250 h_tx_bd
->nbd
= cpu_to_le16(nbd
);
2251 h_tx_bd
->nbytes
= cpu_to_le16(hlen
);
2253 DP(NETIF_MSG_TX_QUEUED
, "TSO split header size is %d "
2254 "(%x:%x) nbd %d\n", h_tx_bd
->nbytes
, h_tx_bd
->addr_hi
,
2255 h_tx_bd
->addr_lo
, h_tx_bd
->nbd
);
2257 /* now get a new data BD
2258 * (after the pbd) and fill it */
2259 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
2260 d_tx_bd
= &txdata
->tx_desc_ring
[bd_prod
].reg_bd
;
2262 mapping
= HILO_U64(le32_to_cpu(h_tx_bd
->addr_hi
),
2263 le32_to_cpu(h_tx_bd
->addr_lo
)) + hlen
;
2265 d_tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
2266 d_tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
2267 d_tx_bd
->nbytes
= cpu_to_le16(old_len
- hlen
);
2269 /* this marks the BD as one that has no individual mapping */
2270 tx_buf
->flags
|= BNX2X_TSO_SPLIT_BD
;
2272 DP(NETIF_MSG_TX_QUEUED
,
2273 "TSO split data size is %d (%x:%x)\n",
2274 d_tx_bd
->nbytes
, d_tx_bd
->addr_hi
, d_tx_bd
->addr_lo
);
2277 *tx_bd
= (struct eth_tx_start_bd
*)d_tx_bd
;
2282 static inline u16
bnx2x_csum_fix(unsigned char *t_header
, u16 csum
, s8 fix
)
2285 csum
= (u16
) ~csum_fold(csum_sub(csum
,
2286 csum_partial(t_header
- fix
, fix
, 0)));
2289 csum
= (u16
) ~csum_fold(csum_add(csum
,
2290 csum_partial(t_header
, -fix
, 0)));
2292 return swab16(csum
);
2295 static inline u32
bnx2x_xmit_type(struct bnx2x
*bp
, struct sk_buff
*skb
)
2299 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
2303 if (vlan_get_protocol(skb
) == htons(ETH_P_IPV6
)) {
2305 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
2306 rc
|= XMIT_CSUM_TCP
;
2310 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
2311 rc
|= XMIT_CSUM_TCP
;
2315 if (skb_is_gso_v6(skb
))
2316 rc
|= XMIT_GSO_V6
| XMIT_CSUM_TCP
| XMIT_CSUM_V6
;
2317 else if (skb_is_gso(skb
))
2318 rc
|= XMIT_GSO_V4
| XMIT_CSUM_V4
| XMIT_CSUM_TCP
;
2323 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2324 /* check if packet requires linearization (packet is too fragmented)
2325 no need to check fragmentation if page size > 8K (there will be no
2326 violation to FW restrictions) */
2327 static int bnx2x_pkt_req_lin(struct bnx2x
*bp
, struct sk_buff
*skb
,
2332 int first_bd_sz
= 0;
2334 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2335 if (skb_shinfo(skb
)->nr_frags
>= (MAX_FETCH_BD
- 3)) {
2337 if (xmit_type
& XMIT_GSO
) {
2338 unsigned short lso_mss
= skb_shinfo(skb
)->gso_size
;
2339 /* Check if LSO packet needs to be copied:
2340 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2341 int wnd_size
= MAX_FETCH_BD
- 3;
2342 /* Number of windows to check */
2343 int num_wnds
= skb_shinfo(skb
)->nr_frags
- wnd_size
;
2348 /* Headers length */
2349 hlen
= (int)(skb_transport_header(skb
) - skb
->data
) +
2352 /* Amount of data (w/o headers) on linear part of SKB*/
2353 first_bd_sz
= skb_headlen(skb
) - hlen
;
2355 wnd_sum
= first_bd_sz
;
2357 /* Calculate the first sum - it's special */
2358 for (frag_idx
= 0; frag_idx
< wnd_size
- 1; frag_idx
++)
2360 skb_shinfo(skb
)->frags
[frag_idx
].size
;
2362 /* If there was data on linear skb data - check it */
2363 if (first_bd_sz
> 0) {
2364 if (unlikely(wnd_sum
< lso_mss
)) {
2369 wnd_sum
-= first_bd_sz
;
2372 /* Others are easier: run through the frag list and
2373 check all windows */
2374 for (wnd_idx
= 0; wnd_idx
<= num_wnds
; wnd_idx
++) {
2376 skb_shinfo(skb
)->frags
[wnd_idx
+ wnd_size
- 1].size
;
2378 if (unlikely(wnd_sum
< lso_mss
)) {
2383 skb_shinfo(skb
)->frags
[wnd_idx
].size
;
2386 /* in non-LSO too fragmented packet should always
2393 if (unlikely(to_copy
))
2394 DP(NETIF_MSG_TX_QUEUED
,
2395 "Linearization IS REQUIRED for %s packet. "
2396 "num_frags %d hlen %d first_bd_sz %d\n",
2397 (xmit_type
& XMIT_GSO
) ? "LSO" : "non-LSO",
2398 skb_shinfo(skb
)->nr_frags
, hlen
, first_bd_sz
);
2404 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff
*skb
, u32
*parsing_data
,
2407 *parsing_data
|= (skb_shinfo(skb
)->gso_size
<<
2408 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT
) &
2409 ETH_TX_PARSE_BD_E2_LSO_MSS
;
2410 if ((xmit_type
& XMIT_GSO_V6
) &&
2411 (ipv6_hdr(skb
)->nexthdr
== NEXTHDR_IPV6
))
2412 *parsing_data
|= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR
;
2416 * bnx2x_set_pbd_gso - update PBD in GSO case.
2420 * @xmit_type: xmit flags
2422 static inline void bnx2x_set_pbd_gso(struct sk_buff
*skb
,
2423 struct eth_tx_parse_bd_e1x
*pbd
,
2426 pbd
->lso_mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
2427 pbd
->tcp_send_seq
= swab32(tcp_hdr(skb
)->seq
);
2428 pbd
->tcp_flags
= pbd_tcp_flags(skb
);
2430 if (xmit_type
& XMIT_GSO_V4
) {
2431 pbd
->ip_id
= swab16(ip_hdr(skb
)->id
);
2432 pbd
->tcp_pseudo_csum
=
2433 swab16(~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
2435 0, IPPROTO_TCP
, 0));
2438 pbd
->tcp_pseudo_csum
=
2439 swab16(~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
2440 &ipv6_hdr(skb
)->daddr
,
2441 0, IPPROTO_TCP
, 0));
2443 pbd
->global_data
|= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN
;
2447 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2449 * @bp: driver handle
2451 * @parsing_data: data to be updated
2452 * @xmit_type: xmit flags
2456 static inline u8
bnx2x_set_pbd_csum_e2(struct bnx2x
*bp
, struct sk_buff
*skb
,
2457 u32
*parsing_data
, u32 xmit_type
)
2460 ((((u8
*)skb_transport_header(skb
) - skb
->data
) >> 1) <<
2461 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT
) &
2462 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W
;
2464 if (xmit_type
& XMIT_CSUM_TCP
) {
2465 *parsing_data
|= ((tcp_hdrlen(skb
) / 4) <<
2466 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT
) &
2467 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW
;
2469 return skb_transport_header(skb
) + tcp_hdrlen(skb
) - skb
->data
;
2471 /* We support checksum offload for TCP and UDP only.
2472 * No need to pass the UDP header length - it's a constant.
2474 return skb_transport_header(skb
) +
2475 sizeof(struct udphdr
) - skb
->data
;
2478 static inline void bnx2x_set_sbd_csum(struct bnx2x
*bp
, struct sk_buff
*skb
,
2479 struct eth_tx_start_bd
*tx_start_bd
, u32 xmit_type
)
2481 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_L4_CSUM
;
2483 if (xmit_type
& XMIT_CSUM_V4
)
2484 tx_start_bd
->bd_flags
.as_bitfield
|=
2485 ETH_TX_BD_FLAGS_IP_CSUM
;
2487 tx_start_bd
->bd_flags
.as_bitfield
|=
2488 ETH_TX_BD_FLAGS_IPV6
;
2490 if (!(xmit_type
& XMIT_CSUM_TCP
))
2491 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_IS_UDP
;
2495 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
2497 * @bp: driver handle
2499 * @pbd: parse BD to be updated
2500 * @xmit_type: xmit flags
2502 static inline u8
bnx2x_set_pbd_csum(struct bnx2x
*bp
, struct sk_buff
*skb
,
2503 struct eth_tx_parse_bd_e1x
*pbd
,
2506 u8 hlen
= (skb_network_header(skb
) - skb
->data
) >> 1;
2508 /* for now NS flag is not used in Linux */
2510 (hlen
| ((skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) <<
2511 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT
));
2513 pbd
->ip_hlen_w
= (skb_transport_header(skb
) -
2514 skb_network_header(skb
)) >> 1;
2516 hlen
+= pbd
->ip_hlen_w
;
2518 /* We support checksum offload for TCP and UDP only */
2519 if (xmit_type
& XMIT_CSUM_TCP
)
2520 hlen
+= tcp_hdrlen(skb
) / 2;
2522 hlen
+= sizeof(struct udphdr
) / 2;
2524 pbd
->total_hlen_w
= cpu_to_le16(hlen
);
2527 if (xmit_type
& XMIT_CSUM_TCP
) {
2528 pbd
->tcp_pseudo_csum
= swab16(tcp_hdr(skb
)->check
);
2531 s8 fix
= SKB_CS_OFF(skb
); /* signed! */
2533 DP(NETIF_MSG_TX_QUEUED
,
2534 "hlen %d fix %d csum before fix %x\n",
2535 le16_to_cpu(pbd
->total_hlen_w
), fix
, SKB_CS(skb
));
2537 /* HW bug: fixup the CSUM */
2538 pbd
->tcp_pseudo_csum
=
2539 bnx2x_csum_fix(skb_transport_header(skb
),
2542 DP(NETIF_MSG_TX_QUEUED
, "csum after fix %x\n",
2543 pbd
->tcp_pseudo_csum
);
2549 /* called with netif_tx_lock
2550 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2551 * netif_wake_queue()
2553 netdev_tx_t
bnx2x_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
2555 struct bnx2x
*bp
= netdev_priv(dev
);
2557 struct bnx2x_fastpath
*fp
;
2558 struct netdev_queue
*txq
;
2559 struct bnx2x_fp_txdata
*txdata
;
2560 struct sw_tx_bd
*tx_buf
;
2561 struct eth_tx_start_bd
*tx_start_bd
, *first_bd
;
2562 struct eth_tx_bd
*tx_data_bd
, *total_pkt_bd
= NULL
;
2563 struct eth_tx_parse_bd_e1x
*pbd_e1x
= NULL
;
2564 struct eth_tx_parse_bd_e2
*pbd_e2
= NULL
;
2565 u32 pbd_e2_parsing_data
= 0;
2566 u16 pkt_prod
, bd_prod
;
2567 int nbd
, txq_index
, fp_index
, txdata_index
;
2569 u32 xmit_type
= bnx2x_xmit_type(bp
, skb
);
2572 __le16 pkt_size
= 0;
2574 u8 mac_type
= UNICAST_ADDRESS
;
2576 #ifdef BNX2X_STOP_ON_ERROR
2577 if (unlikely(bp
->panic
))
2578 return NETDEV_TX_BUSY
;
2581 txq_index
= skb_get_queue_mapping(skb
);
2582 txq
= netdev_get_tx_queue(dev
, txq_index
);
2584 BUG_ON(txq_index
>= MAX_ETH_TXQ_IDX(bp
) + FCOE_PRESENT
);
2586 /* decode the fastpath index and the cos index from the txq */
2587 fp_index
= TXQ_TO_FP(txq_index
);
2588 txdata_index
= TXQ_TO_COS(txq_index
);
2592 * Override the above for the FCoE queue:
2593 * - FCoE fp entry is right after the ETH entries.
2594 * - FCoE L2 queue uses bp->txdata[0] only.
2596 if (unlikely(!NO_FCOE(bp
) && (txq_index
==
2597 bnx2x_fcoe_tx(bp
, txq_index
)))) {
2598 fp_index
= FCOE_IDX
;
2603 /* enable this debug print to view the transmission queue being used
2604 DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d",
2605 txq_index, fp_index, txdata_index); */
2607 /* locate the fastpath and the txdata */
2608 fp
= &bp
->fp
[fp_index
];
2609 txdata
= &fp
->txdata
[txdata_index
];
2611 /* enable this debug print to view the tranmission details
2612 DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d"
2613 " tx_data ptr %p fp pointer %p",
2614 txdata->cid, fp_index, txdata_index, txdata, fp); */
2616 if (unlikely(bnx2x_tx_avail(bp
, txdata
) <
2617 (skb_shinfo(skb
)->nr_frags
+ 3))) {
2618 fp
->eth_q_stats
.driver_xoff
++;
2619 netif_tx_stop_queue(txq
);
2620 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2621 return NETDEV_TX_BUSY
;
2624 DP(NETIF_MSG_TX_QUEUED
, "queue[%d]: SKB: summed %x protocol %x "
2625 "protocol(%x,%x) gso type %x xmit_type %x\n",
2626 txq_index
, skb
->ip_summed
, skb
->protocol
, ipv6_hdr(skb
)->nexthdr
,
2627 ip_hdr(skb
)->protocol
, skb_shinfo(skb
)->gso_type
, xmit_type
);
2629 eth
= (struct ethhdr
*)skb
->data
;
2631 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2632 if (unlikely(is_multicast_ether_addr(eth
->h_dest
))) {
2633 if (is_broadcast_ether_addr(eth
->h_dest
))
2634 mac_type
= BROADCAST_ADDRESS
;
2636 mac_type
= MULTICAST_ADDRESS
;
2639 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2640 /* First, check if we need to linearize the skb (due to FW
2641 restrictions). No need to check fragmentation if page size > 8K
2642 (there will be no violation to FW restrictions) */
2643 if (bnx2x_pkt_req_lin(bp
, skb
, xmit_type
)) {
2644 /* Statistics of linearization */
2646 if (skb_linearize(skb
) != 0) {
2647 DP(NETIF_MSG_TX_QUEUED
, "SKB linearization failed - "
2648 "silently dropping this SKB\n");
2649 dev_kfree_skb_any(skb
);
2650 return NETDEV_TX_OK
;
2654 /* Map skb linear data for DMA */
2655 mapping
= dma_map_single(&bp
->pdev
->dev
, skb
->data
,
2656 skb_headlen(skb
), DMA_TO_DEVICE
);
2657 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
2658 DP(NETIF_MSG_TX_QUEUED
, "SKB mapping failed - "
2659 "silently dropping this SKB\n");
2660 dev_kfree_skb_any(skb
);
2661 return NETDEV_TX_OK
;
2664 Please read carefully. First we use one BD which we mark as start,
2665 then we have a parsing info BD (used for TSO or xsum),
2666 and only then we have the rest of the TSO BDs.
2667 (don't forget to mark the last one as last,
2668 and to unmap only AFTER you write to the BD ...)
2669 And above all, all pdb sizes are in words - NOT DWORDS!
2672 /* get current pkt produced now - advance it just before sending packet
2673 * since mapping of pages may fail and cause packet to be dropped
2675 pkt_prod
= txdata
->tx_pkt_prod
;
2676 bd_prod
= TX_BD(txdata
->tx_bd_prod
);
2678 /* get a tx_buf and first BD
2679 * tx_start_bd may be changed during SPLIT,
2680 * but first_bd will always stay first
2682 tx_buf
= &txdata
->tx_buf_ring
[TX_BD(pkt_prod
)];
2683 tx_start_bd
= &txdata
->tx_desc_ring
[bd_prod
].start_bd
;
2684 first_bd
= tx_start_bd
;
2686 tx_start_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
2687 SET_FLAG(tx_start_bd
->general_data
, ETH_TX_START_BD_ETH_ADDR_TYPE
,
2691 SET_FLAG(tx_start_bd
->general_data
, ETH_TX_START_BD_HDR_NBDS
, 1);
2693 /* remember the first BD of the packet */
2694 tx_buf
->first_bd
= txdata
->tx_bd_prod
;
2698 DP(NETIF_MSG_TX_QUEUED
,
2699 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2700 pkt_prod
, tx_buf
, txdata
->tx_pkt_prod
, bd_prod
, tx_start_bd
);
2702 if (vlan_tx_tag_present(skb
)) {
2703 tx_start_bd
->vlan_or_ethertype
=
2704 cpu_to_le16(vlan_tx_tag_get(skb
));
2705 tx_start_bd
->bd_flags
.as_bitfield
|=
2706 (X_ETH_OUTBAND_VLAN
<< ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT
);
2708 tx_start_bd
->vlan_or_ethertype
= cpu_to_le16(pkt_prod
);
2710 /* turn on parsing and get a BD */
2711 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
2713 if (xmit_type
& XMIT_CSUM
)
2714 bnx2x_set_sbd_csum(bp
, skb
, tx_start_bd
, xmit_type
);
2716 if (!CHIP_IS_E1x(bp
)) {
2717 pbd_e2
= &txdata
->tx_desc_ring
[bd_prod
].parse_bd_e2
;
2718 memset(pbd_e2
, 0, sizeof(struct eth_tx_parse_bd_e2
));
2719 /* Set PBD in checksum offload case */
2720 if (xmit_type
& XMIT_CSUM
)
2721 hlen
= bnx2x_set_pbd_csum_e2(bp
, skb
,
2722 &pbd_e2_parsing_data
,
2726 * fill in the MAC addresses in the PBD - for local
2729 bnx2x_set_fw_mac_addr(&pbd_e2
->src_mac_addr_hi
,
2730 &pbd_e2
->src_mac_addr_mid
,
2731 &pbd_e2
->src_mac_addr_lo
,
2733 bnx2x_set_fw_mac_addr(&pbd_e2
->dst_mac_addr_hi
,
2734 &pbd_e2
->dst_mac_addr_mid
,
2735 &pbd_e2
->dst_mac_addr_lo
,
2739 pbd_e1x
= &txdata
->tx_desc_ring
[bd_prod
].parse_bd_e1x
;
2740 memset(pbd_e1x
, 0, sizeof(struct eth_tx_parse_bd_e1x
));
2741 /* Set PBD in checksum offload case */
2742 if (xmit_type
& XMIT_CSUM
)
2743 hlen
= bnx2x_set_pbd_csum(bp
, skb
, pbd_e1x
, xmit_type
);
2747 /* Setup the data pointer of the first BD of the packet */
2748 tx_start_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
2749 tx_start_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
2750 nbd
= 2; /* start_bd + pbd + frags (updated when pages are mapped) */
2751 tx_start_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
2752 pkt_size
= tx_start_bd
->nbytes
;
2754 DP(NETIF_MSG_TX_QUEUED
, "first bd @%p addr (%x:%x) nbd %d"
2755 " nbytes %d flags %x vlan %x\n",
2756 tx_start_bd
, tx_start_bd
->addr_hi
, tx_start_bd
->addr_lo
,
2757 le16_to_cpu(tx_start_bd
->nbd
), le16_to_cpu(tx_start_bd
->nbytes
),
2758 tx_start_bd
->bd_flags
.as_bitfield
,
2759 le16_to_cpu(tx_start_bd
->vlan_or_ethertype
));
2761 if (xmit_type
& XMIT_GSO
) {
2763 DP(NETIF_MSG_TX_QUEUED
,
2764 "TSO packet len %d hlen %d total len %d tso size %d\n",
2765 skb
->len
, hlen
, skb_headlen(skb
),
2766 skb_shinfo(skb
)->gso_size
);
2768 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_SW_LSO
;
2770 if (unlikely(skb_headlen(skb
) > hlen
))
2771 bd_prod
= bnx2x_tx_split(bp
, txdata
, tx_buf
,
2774 if (!CHIP_IS_E1x(bp
))
2775 bnx2x_set_pbd_gso_e2(skb
, &pbd_e2_parsing_data
,
2778 bnx2x_set_pbd_gso(skb
, pbd_e1x
, xmit_type
);
2781 /* Set the PBD's parsing_data field if not zero
2782 * (for the chips newer than 57711).
2784 if (pbd_e2_parsing_data
)
2785 pbd_e2
->parsing_data
= cpu_to_le32(pbd_e2_parsing_data
);
2787 tx_data_bd
= (struct eth_tx_bd
*)tx_start_bd
;
2789 /* Handle fragmented skb */
2790 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2791 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2793 mapping
= dma_map_page(&bp
->pdev
->dev
, frag
->page
,
2794 frag
->page_offset
, frag
->size
,
2796 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
2798 DP(NETIF_MSG_TX_QUEUED
, "Unable to map page - "
2799 "dropping packet...\n");
2801 /* we need unmap all buffers already mapped
2803 * first_bd->nbd need to be properly updated
2804 * before call to bnx2x_free_tx_pkt
2806 first_bd
->nbd
= cpu_to_le16(nbd
);
2807 bnx2x_free_tx_pkt(bp
, txdata
,
2808 TX_BD(txdata
->tx_pkt_prod
));
2809 return NETDEV_TX_OK
;
2812 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
2813 tx_data_bd
= &txdata
->tx_desc_ring
[bd_prod
].reg_bd
;
2814 if (total_pkt_bd
== NULL
)
2815 total_pkt_bd
= &txdata
->tx_desc_ring
[bd_prod
].reg_bd
;
2817 tx_data_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
2818 tx_data_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
2819 tx_data_bd
->nbytes
= cpu_to_le16(frag
->size
);
2820 le16_add_cpu(&pkt_size
, frag
->size
);
2823 DP(NETIF_MSG_TX_QUEUED
,
2824 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2825 i
, tx_data_bd
, tx_data_bd
->addr_hi
, tx_data_bd
->addr_lo
,
2826 le16_to_cpu(tx_data_bd
->nbytes
));
2829 DP(NETIF_MSG_TX_QUEUED
, "last bd @%p\n", tx_data_bd
);
2831 /* update with actual num BDs */
2832 first_bd
->nbd
= cpu_to_le16(nbd
);
2834 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
2836 /* now send a tx doorbell, counting the next BD
2837 * if the packet contains or ends with it
2839 if (TX_BD_POFF(bd_prod
) < nbd
)
2842 /* total_pkt_bytes should be set on the first data BD if
2843 * it's not an LSO packet and there is more than one
2844 * data BD. In this case pkt_size is limited by an MTU value.
2845 * However we prefer to set it for an LSO packet (while we don't
2846 * have to) in order to save some CPU cycles in a none-LSO
2847 * case, when we much more care about them.
2849 if (total_pkt_bd
!= NULL
)
2850 total_pkt_bd
->total_pkt_bytes
= pkt_size
;
2853 DP(NETIF_MSG_TX_QUEUED
,
2854 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2855 " tcp_flags %x xsum %x seq %u hlen %u\n",
2856 pbd_e1x
, pbd_e1x
->global_data
, pbd_e1x
->ip_hlen_w
,
2857 pbd_e1x
->ip_id
, pbd_e1x
->lso_mss
, pbd_e1x
->tcp_flags
,
2858 pbd_e1x
->tcp_pseudo_csum
, pbd_e1x
->tcp_send_seq
,
2859 le16_to_cpu(pbd_e1x
->total_hlen_w
));
2861 DP(NETIF_MSG_TX_QUEUED
,
2862 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2863 pbd_e2
, pbd_e2
->dst_mac_addr_hi
, pbd_e2
->dst_mac_addr_mid
,
2864 pbd_e2
->dst_mac_addr_lo
, pbd_e2
->src_mac_addr_hi
,
2865 pbd_e2
->src_mac_addr_mid
, pbd_e2
->src_mac_addr_lo
,
2866 pbd_e2
->parsing_data
);
2867 DP(NETIF_MSG_TX_QUEUED
, "doorbell: nbd %d bd %u\n", nbd
, bd_prod
);
2869 txdata
->tx_pkt_prod
++;
2871 * Make sure that the BD data is updated before updating the producer
2872 * since FW might read the BD right after the producer is updated.
2873 * This is only applicable for weak-ordered memory model archs such
2874 * as IA-64. The following barrier is also mandatory since FW will
2875 * assumes packets must have BDs.
2879 txdata
->tx_db
.data
.prod
+= nbd
;
2882 DOORBELL(bp
, txdata
->cid
, txdata
->tx_db
.raw
);
2886 txdata
->tx_bd_prod
+= nbd
;
2888 if (unlikely(bnx2x_tx_avail(bp
, txdata
) < MAX_SKB_FRAGS
+ 3)) {
2889 netif_tx_stop_queue(txq
);
2891 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2892 * ordering of set_bit() in netif_tx_stop_queue() and read of
2896 fp
->eth_q_stats
.driver_xoff
++;
2897 if (bnx2x_tx_avail(bp
, txdata
) >= MAX_SKB_FRAGS
+ 3)
2898 netif_tx_wake_queue(txq
);
2902 return NETDEV_TX_OK
;
2906 * bnx2x_setup_tc - routine to configure net_device for multi tc
2908 * @netdev: net device to configure
2909 * @tc: number of traffic classes to enable
2911 * callback connected to the ndo_setup_tc function pointer
2913 int bnx2x_setup_tc(struct net_device
*dev
, u8 num_tc
)
2915 int cos
, prio
, count
, offset
;
2916 struct bnx2x
*bp
= netdev_priv(dev
);
2918 /* setup tc must be called under rtnl lock */
2921 /* no traffic classes requested. aborting */
2923 netdev_reset_tc(dev
);
2927 /* requested to support too many traffic classes */
2928 if (num_tc
> bp
->max_cos
) {
2929 DP(NETIF_MSG_TX_ERR
, "support for too many traffic classes"
2930 " requested: %d. max supported is %d",
2931 num_tc
, bp
->max_cos
);
2935 /* declare amount of supported traffic classes */
2936 if (netdev_set_num_tc(dev
, num_tc
)) {
2937 DP(NETIF_MSG_TX_ERR
, "failed to declare %d traffic classes",
2942 /* configure priority to traffic class mapping */
2943 for (prio
= 0; prio
< BNX2X_MAX_PRIORITY
; prio
++) {
2944 netdev_set_prio_tc_map(dev
, prio
, bp
->prio_to_cos
[prio
]);
2945 DP(BNX2X_MSG_SP
, "mapping priority %d to tc %d",
2946 prio
, bp
->prio_to_cos
[prio
]);
2950 /* Use this configuration to diffrentiate tc0 from other COSes
2951 This can be used for ets or pfc, and save the effort of setting
2952 up a multio class queue disc or negotiating DCBX with a switch
2953 netdev_set_prio_tc_map(dev, 0, 0);
2954 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", 0, 0);
2955 for (prio = 1; prio < 16; prio++) {
2956 netdev_set_prio_tc_map(dev, prio, 1);
2957 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", prio, 1);
2960 /* configure traffic class to transmission queue mapping */
2961 for (cos
= 0; cos
< bp
->max_cos
; cos
++) {
2962 count
= BNX2X_NUM_ETH_QUEUES(bp
);
2963 offset
= cos
* MAX_TXQS_PER_COS
;
2964 netdev_set_tc_queue(dev
, cos
, count
, offset
);
2965 DP(BNX2X_MSG_SP
, "mapping tc %d to offset %d count %d",
2966 cos
, offset
, count
);
2972 /* called with rtnl_lock */
2973 int bnx2x_change_mac_addr(struct net_device
*dev
, void *p
)
2975 struct sockaddr
*addr
= p
;
2976 struct bnx2x
*bp
= netdev_priv(dev
);
2979 if (!is_valid_ether_addr((u8
*)(addr
->sa_data
)))
2982 if (netif_running(dev
)) {
2983 rc
= bnx2x_set_eth_mac(bp
, false);
2988 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
2990 if (netif_running(dev
))
2991 rc
= bnx2x_set_eth_mac(bp
, true);
2996 static void bnx2x_free_fp_mem_at(struct bnx2x
*bp
, int fp_index
)
2998 union host_hc_status_block
*sb
= &bnx2x_fp(bp
, fp_index
, status_blk
);
2999 struct bnx2x_fastpath
*fp
= &bp
->fp
[fp_index
];
3004 if (IS_FCOE_IDX(fp_index
)) {
3005 memset(sb
, 0, sizeof(union host_hc_status_block
));
3006 fp
->status_blk_mapping
= 0;
3011 if (!CHIP_IS_E1x(bp
))
3012 BNX2X_PCI_FREE(sb
->e2_sb
,
3013 bnx2x_fp(bp
, fp_index
,
3014 status_blk_mapping
),
3015 sizeof(struct host_hc_status_block_e2
));
3017 BNX2X_PCI_FREE(sb
->e1x_sb
,
3018 bnx2x_fp(bp
, fp_index
,
3019 status_blk_mapping
),
3020 sizeof(struct host_hc_status_block_e1x
));
3025 if (!skip_rx_queue(bp
, fp_index
)) {
3026 bnx2x_free_rx_bds(fp
);
3028 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3029 BNX2X_FREE(bnx2x_fp(bp
, fp_index
, rx_buf_ring
));
3030 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_desc_ring
),
3031 bnx2x_fp(bp
, fp_index
, rx_desc_mapping
),
3032 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
3034 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_comp_ring
),
3035 bnx2x_fp(bp
, fp_index
, rx_comp_mapping
),
3036 sizeof(struct eth_fast_path_rx_cqe
) *
3040 BNX2X_FREE(bnx2x_fp(bp
, fp_index
, rx_page_ring
));
3041 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_sge_ring
),
3042 bnx2x_fp(bp
, fp_index
, rx_sge_mapping
),
3043 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
3047 if (!skip_tx_queue(bp
, fp_index
)) {
3048 /* fastpath tx rings: tx_buf tx_desc */
3049 for_each_cos_in_tx_queue(fp
, cos
) {
3050 struct bnx2x_fp_txdata
*txdata
= &fp
->txdata
[cos
];
3053 "freeing tx memory of fp %d cos %d cid %d",
3054 fp_index
, cos
, txdata
->cid
);
3056 BNX2X_FREE(txdata
->tx_buf_ring
);
3057 BNX2X_PCI_FREE(txdata
->tx_desc_ring
,
3058 txdata
->tx_desc_mapping
,
3059 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
3062 /* end of fastpath */
3065 void bnx2x_free_fp_mem(struct bnx2x
*bp
)
3068 for_each_queue(bp
, i
)
3069 bnx2x_free_fp_mem_at(bp
, i
);
3072 static inline void set_sb_shortcuts(struct bnx2x
*bp
, int index
)
3074 union host_hc_status_block status_blk
= bnx2x_fp(bp
, index
, status_blk
);
3075 if (!CHIP_IS_E1x(bp
)) {
3076 bnx2x_fp(bp
, index
, sb_index_values
) =
3077 (__le16
*)status_blk
.e2_sb
->sb
.index_values
;
3078 bnx2x_fp(bp
, index
, sb_running_index
) =
3079 (__le16
*)status_blk
.e2_sb
->sb
.running_index
;
3081 bnx2x_fp(bp
, index
, sb_index_values
) =
3082 (__le16
*)status_blk
.e1x_sb
->sb
.index_values
;
3083 bnx2x_fp(bp
, index
, sb_running_index
) =
3084 (__le16
*)status_blk
.e1x_sb
->sb
.running_index
;
3088 static int bnx2x_alloc_fp_mem_at(struct bnx2x
*bp
, int index
)
3090 union host_hc_status_block
*sb
;
3091 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
3094 int rx_ring_size
= 0;
3096 /* if rx_ring_size specified - use it */
3097 if (!bp
->rx_ring_size
) {
3099 rx_ring_size
= MAX_RX_AVAIL
/BNX2X_NUM_RX_QUEUES(bp
);
3101 /* allocate at least number of buffers required by FW */
3102 rx_ring_size
= max_t(int, bp
->disable_tpa
? MIN_RX_SIZE_NONTPA
:
3103 MIN_RX_SIZE_TPA
, rx_ring_size
);
3105 bp
->rx_ring_size
= rx_ring_size
;
3107 rx_ring_size
= bp
->rx_ring_size
;
3110 sb
= &bnx2x_fp(bp
, index
, status_blk
);
3112 if (!IS_FCOE_IDX(index
)) {
3115 if (!CHIP_IS_E1x(bp
))
3116 BNX2X_PCI_ALLOC(sb
->e2_sb
,
3117 &bnx2x_fp(bp
, index
, status_blk_mapping
),
3118 sizeof(struct host_hc_status_block_e2
));
3120 BNX2X_PCI_ALLOC(sb
->e1x_sb
,
3121 &bnx2x_fp(bp
, index
, status_blk_mapping
),
3122 sizeof(struct host_hc_status_block_e1x
));
3127 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3128 * set shortcuts for it.
3130 if (!IS_FCOE_IDX(index
))
3131 set_sb_shortcuts(bp
, index
);
3134 if (!skip_tx_queue(bp
, index
)) {
3135 /* fastpath tx rings: tx_buf tx_desc */
3136 for_each_cos_in_tx_queue(fp
, cos
) {
3137 struct bnx2x_fp_txdata
*txdata
= &fp
->txdata
[cos
];
3139 DP(BNX2X_MSG_SP
, "allocating tx memory of "
3143 BNX2X_ALLOC(txdata
->tx_buf_ring
,
3144 sizeof(struct sw_tx_bd
) * NUM_TX_BD
);
3145 BNX2X_PCI_ALLOC(txdata
->tx_desc_ring
,
3146 &txdata
->tx_desc_mapping
,
3147 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
3152 if (!skip_rx_queue(bp
, index
)) {
3153 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3154 BNX2X_ALLOC(bnx2x_fp(bp
, index
, rx_buf_ring
),
3155 sizeof(struct sw_rx_bd
) * NUM_RX_BD
);
3156 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, index
, rx_desc_ring
),
3157 &bnx2x_fp(bp
, index
, rx_desc_mapping
),
3158 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
3160 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, index
, rx_comp_ring
),
3161 &bnx2x_fp(bp
, index
, rx_comp_mapping
),
3162 sizeof(struct eth_fast_path_rx_cqe
) *
3166 BNX2X_ALLOC(bnx2x_fp(bp
, index
, rx_page_ring
),
3167 sizeof(struct sw_rx_page
) * NUM_RX_SGE
);
3168 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, index
, rx_sge_ring
),
3169 &bnx2x_fp(bp
, index
, rx_sge_mapping
),
3170 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
3172 bnx2x_set_next_page_rx_bd(fp
);
3175 bnx2x_set_next_page_rx_cq(fp
);
3178 ring_size
= bnx2x_alloc_rx_bds(fp
, rx_ring_size
);
3179 if (ring_size
< rx_ring_size
)
3185 /* handles low memory cases */
3187 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3189 /* FW will drop all packets if queue is not big enough,
3190 * In these cases we disable the queue
3191 * Min size is different for OOO, TPA and non-TPA queues
3193 if (ring_size
< (fp
->disable_tpa
?
3194 MIN_RX_SIZE_NONTPA
: MIN_RX_SIZE_TPA
)) {
3195 /* release memory allocated for this queue */
3196 bnx2x_free_fp_mem_at(bp
, index
);
3202 int bnx2x_alloc_fp_mem(struct bnx2x
*bp
)
3207 * 1. Allocate FP for leading - fatal if error
3208 * 2. {CNIC} Allocate FCoE FP - fatal if error
3209 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3210 * 4. Allocate RSS - fix number of queues if error
3214 if (bnx2x_alloc_fp_mem_at(bp
, 0))
3220 if (bnx2x_alloc_fp_mem_at(bp
, FCOE_IDX
))
3221 /* we will fail load process instead of mark
3228 for_each_nondefault_eth_queue(bp
, i
)
3229 if (bnx2x_alloc_fp_mem_at(bp
, i
))
3232 /* handle memory failures */
3233 if (i
!= BNX2X_NUM_ETH_QUEUES(bp
)) {
3234 int delta
= BNX2X_NUM_ETH_QUEUES(bp
) - i
;
3239 * move non eth FPs next to last eth FP
3240 * must be done in that order
3241 * FCOE_IDX < FWD_IDX < OOO_IDX
3244 /* move FCoE fp even NO_FCOE_FLAG is on */
3245 bnx2x_move_fp(bp
, FCOE_IDX
, FCOE_IDX
- delta
);
3247 bp
->num_queues
-= delta
;
3248 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3249 bp
->num_queues
+ delta
, bp
->num_queues
);
3255 void bnx2x_free_mem_bp(struct bnx2x
*bp
)
3258 kfree(bp
->msix_table
);
3262 int __devinit
bnx2x_alloc_mem_bp(struct bnx2x
*bp
)
3264 struct bnx2x_fastpath
*fp
;
3265 struct msix_entry
*tbl
;
3266 struct bnx2x_ilt
*ilt
;
3267 int msix_table_size
= 0;
3270 * The biggest MSI-X table we might need is as a maximum number of fast
3271 * path IGU SBs plus default SB (for PF).
3273 msix_table_size
= bp
->igu_sb_cnt
+ 1;
3275 /* fp array: RSS plus CNIC related L2 queues */
3276 fp
= kzalloc((BNX2X_MAX_RSS_COUNT(bp
) + NON_ETH_CONTEXT_USE
) *
3277 sizeof(*fp
), GFP_KERNEL
);
3283 tbl
= kzalloc(msix_table_size
* sizeof(*tbl
), GFP_KERNEL
);
3286 bp
->msix_table
= tbl
;
3289 ilt
= kzalloc(sizeof(*ilt
), GFP_KERNEL
);
3296 bnx2x_free_mem_bp(bp
);
3301 int bnx2x_reload_if_running(struct net_device
*dev
)
3303 struct bnx2x
*bp
= netdev_priv(dev
);
3305 if (unlikely(!netif_running(dev
)))
3308 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
3309 return bnx2x_nic_load(bp
, LOAD_NORMAL
);
3312 int bnx2x_get_cur_phy_idx(struct bnx2x
*bp
)
3314 u32 sel_phy_idx
= 0;
3315 if (bp
->link_params
.num_phys
<= 1)
3318 if (bp
->link_vars
.link_up
) {
3319 sel_phy_idx
= EXT_PHY1
;
3320 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3321 if ((bp
->link_vars
.link_status
& LINK_STATUS_SERDES_LINK
) &&
3322 (bp
->link_params
.phy
[EXT_PHY2
].supported
& SUPPORTED_FIBRE
))
3323 sel_phy_idx
= EXT_PHY2
;
3326 switch (bnx2x_phy_selection(&bp
->link_params
)) {
3327 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT
:
3328 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY
:
3329 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY
:
3330 sel_phy_idx
= EXT_PHY1
;
3332 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY
:
3333 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY
:
3334 sel_phy_idx
= EXT_PHY2
;
3342 int bnx2x_get_link_cfg_idx(struct bnx2x
*bp
)
3344 u32 sel_phy_idx
= bnx2x_get_cur_phy_idx(bp
);
3346 * The selected actived PHY is always after swapping (in case PHY
3347 * swapping is enabled). So when swapping is enabled, we need to reverse
3351 if (bp
->link_params
.multi_phy_config
&
3352 PORT_HW_CFG_PHY_SWAPPED_ENABLED
) {
3353 if (sel_phy_idx
== EXT_PHY1
)
3354 sel_phy_idx
= EXT_PHY2
;
3355 else if (sel_phy_idx
== EXT_PHY2
)
3356 sel_phy_idx
= EXT_PHY1
;
3358 return LINK_CONFIG_IDX(sel_phy_idx
);
3361 #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3362 int bnx2x_fcoe_get_wwn(struct net_device
*dev
, u64
*wwn
, int type
)
3364 struct bnx2x
*bp
= netdev_priv(dev
);
3365 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
3368 case NETDEV_FCOE_WWNN
:
3369 *wwn
= HILO_U64(cp
->fcoe_wwn_node_name_hi
,
3370 cp
->fcoe_wwn_node_name_lo
);
3372 case NETDEV_FCOE_WWPN
:
3373 *wwn
= HILO_U64(cp
->fcoe_wwn_port_name_hi
,
3374 cp
->fcoe_wwn_port_name_lo
);
3384 /* called with rtnl_lock */
3385 int bnx2x_change_mtu(struct net_device
*dev
, int new_mtu
)
3387 struct bnx2x
*bp
= netdev_priv(dev
);
3389 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
3390 printk(KERN_ERR
"Handling parity error recovery. Try again later\n");
3394 if ((new_mtu
> ETH_MAX_JUMBO_PACKET_SIZE
) ||
3395 ((new_mtu
+ ETH_HLEN
) < ETH_MIN_PACKET_SIZE
))
3398 /* This does not race with packet allocation
3399 * because the actual alloc size is
3400 * only updated as part of load
3404 return bnx2x_reload_if_running(dev
);
3407 u32
bnx2x_fix_features(struct net_device
*dev
, u32 features
)
3409 struct bnx2x
*bp
= netdev_priv(dev
);
3411 /* TPA requires Rx CSUM offloading */
3412 if (!(features
& NETIF_F_RXCSUM
) || bp
->disable_tpa
)
3413 features
&= ~NETIF_F_LRO
;
3418 int bnx2x_set_features(struct net_device
*dev
, u32 features
)
3420 struct bnx2x
*bp
= netdev_priv(dev
);
3421 u32 flags
= bp
->flags
;
3422 bool bnx2x_reload
= false;
3424 if (features
& NETIF_F_LRO
)
3425 flags
|= TPA_ENABLE_FLAG
;
3427 flags
&= ~TPA_ENABLE_FLAG
;
3429 if (features
& NETIF_F_LOOPBACK
) {
3430 if (bp
->link_params
.loopback_mode
!= LOOPBACK_BMAC
) {
3431 bp
->link_params
.loopback_mode
= LOOPBACK_BMAC
;
3432 bnx2x_reload
= true;
3435 if (bp
->link_params
.loopback_mode
!= LOOPBACK_NONE
) {
3436 bp
->link_params
.loopback_mode
= LOOPBACK_NONE
;
3437 bnx2x_reload
= true;
3441 if (flags
^ bp
->flags
) {
3443 bnx2x_reload
= true;
3447 if (bp
->recovery_state
== BNX2X_RECOVERY_DONE
)
3448 return bnx2x_reload_if_running(dev
);
3449 /* else: bnx2x_nic_load() will be called at end of recovery */
3455 void bnx2x_tx_timeout(struct net_device
*dev
)
3457 struct bnx2x
*bp
= netdev_priv(dev
);
3459 #ifdef BNX2X_STOP_ON_ERROR
3464 smp_mb__before_clear_bit();
3465 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT
, &bp
->sp_rtnl_state
);
3466 smp_mb__after_clear_bit();
3468 /* This allows the netif to be shutdown gracefully before resetting */
3469 schedule_delayed_work(&bp
->sp_rtnl_task
, 0);
3472 int bnx2x_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3474 struct net_device
*dev
= pci_get_drvdata(pdev
);
3478 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
3481 bp
= netdev_priv(dev
);
3485 pci_save_state(pdev
);
3487 if (!netif_running(dev
)) {
3492 netif_device_detach(dev
);
3494 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
3496 bnx2x_set_power_state(bp
, pci_choose_state(pdev
, state
));
3503 int bnx2x_resume(struct pci_dev
*pdev
)
3505 struct net_device
*dev
= pci_get_drvdata(pdev
);
3510 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
3513 bp
= netdev_priv(dev
);
3515 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
3516 printk(KERN_ERR
"Handling parity error recovery. Try again later\n");
3522 pci_restore_state(pdev
);
3524 if (!netif_running(dev
)) {
3529 bnx2x_set_power_state(bp
, PCI_D0
);
3530 netif_device_attach(dev
);
3532 /* Since the chip was reset, clear the FW sequence number */
3534 rc
= bnx2x_nic_load(bp
, LOAD_OPEN
);
3542 void bnx2x_set_ctx_validation(struct bnx2x
*bp
, struct eth_context
*cxt
,
3545 /* ustorm cxt validation */
3546 cxt
->ustorm_ag_context
.cdu_usage
=
3547 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, cid
),
3548 CDU_REGION_NUMBER_UCM_AG
, ETH_CONNECTION_TYPE
);
3549 /* xcontext validation */
3550 cxt
->xstorm_ag_context
.cdu_reserved
=
3551 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, cid
),
3552 CDU_REGION_NUMBER_XCM_AG
, ETH_CONNECTION_TYPE
);
3555 static inline void storm_memset_hc_timeout(struct bnx2x
*bp
, u8 port
,
3556 u8 fw_sb_id
, u8 sb_index
,
3560 u32 addr
= BAR_CSTRORM_INTMEM
+
3561 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id
, sb_index
);
3562 REG_WR8(bp
, addr
, ticks
);
3563 DP(NETIF_MSG_HW
, "port %x fw_sb_id %d sb_index %d ticks %d\n",
3564 port
, fw_sb_id
, sb_index
, ticks
);
3567 static inline void storm_memset_hc_disable(struct bnx2x
*bp
, u8 port
,
3568 u16 fw_sb_id
, u8 sb_index
,
3571 u32 enable_flag
= disable
? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT
);
3572 u32 addr
= BAR_CSTRORM_INTMEM
+
3573 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id
, sb_index
);
3574 u16 flags
= REG_RD16(bp
, addr
);
3576 flags
&= ~HC_INDEX_DATA_HC_ENABLED
;
3577 flags
|= enable_flag
;
3578 REG_WR16(bp
, addr
, flags
);
3579 DP(NETIF_MSG_HW
, "port %x fw_sb_id %d sb_index %d disable %d\n",
3580 port
, fw_sb_id
, sb_index
, disable
);
3583 void bnx2x_update_coalesce_sb_index(struct bnx2x
*bp
, u8 fw_sb_id
,
3584 u8 sb_index
, u8 disable
, u16 usec
)
3586 int port
= BP_PORT(bp
);
3587 u8 ticks
= usec
/ BNX2X_BTR
;
3589 storm_memset_hc_timeout(bp
, port
, fw_sb_id
, sb_index
, ticks
);
3591 disable
= disable
? 1 : (usec
? 0 : 1);
3592 storm_memset_hc_disable(bp
, port
, fw_sb_id
, sb_index
, disable
);