1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2011 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
25 #include <net/ip6_checksum.h>
26 #include <linux/firmware.h>
27 #include <linux/prefetch.h>
28 #include "bnx2x_cmn.h"
29 #include "bnx2x_init.h"
35 * bnx2x_bz_fp - zero content of the fastpath structure.
38 * @index: fastpath index to be zeroed
40 * Makes sure the contents of the bp->fp[index].napi is kept
43 static inline void bnx2x_bz_fp(struct bnx2x
*bp
, int index
)
45 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
46 struct napi_struct orig_napi
= fp
->napi
;
47 /* bzero bnx2x_fastpath contents */
48 memset(fp
, 0, sizeof(*fp
));
50 /* Restore the NAPI object as it has been already initialized */
56 fp
->max_cos
= bp
->max_cos
;
58 /* Special queues support only one CoS */
62 * set the tpa flag for each queue. The tpa flag determines the queue
63 * minimal size so it must be set prior to queue memory allocation
65 fp
->disable_tpa
= ((bp
->flags
& TPA_ENABLE_FLAG
) == 0);
68 /* We don't want TPA on an FCoE L2 ring */
75 * bnx2x_move_fp - move content of the fastpath structure.
78 * @from: source FP index
79 * @to: destination FP index
81 * Makes sure the contents of the bp->fp[to].napi is kept
84 static inline void bnx2x_move_fp(struct bnx2x
*bp
, int from
, int to
)
86 struct bnx2x_fastpath
*from_fp
= &bp
->fp
[from
];
87 struct bnx2x_fastpath
*to_fp
= &bp
->fp
[to
];
88 struct napi_struct orig_napi
= to_fp
->napi
;
89 /* Move bnx2x_fastpath contents */
90 memcpy(to_fp
, from_fp
, sizeof(*to_fp
));
93 /* Restore the NAPI object as it has been already initialized */
94 to_fp
->napi
= orig_napi
;
97 int load_count
[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
99 /* free skb in the packet ring at pos idx
100 * return idx of last bd freed
102 static u16
bnx2x_free_tx_pkt(struct bnx2x
*bp
, struct bnx2x_fp_txdata
*txdata
,
105 struct sw_tx_bd
*tx_buf
= &txdata
->tx_buf_ring
[idx
];
106 struct eth_tx_start_bd
*tx_start_bd
;
107 struct eth_tx_bd
*tx_data_bd
;
108 struct sk_buff
*skb
= tx_buf
->skb
;
109 u16 bd_idx
= TX_BD(tx_buf
->first_bd
), new_cons
;
112 /* prefetch skb end pointer to speedup dev_kfree_skb() */
115 DP(BNX2X_MSG_FP
, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
116 txdata
->txq_index
, idx
, tx_buf
, skb
);
119 DP(BNX2X_MSG_OFF
, "free bd_idx %d\n", bd_idx
);
120 tx_start_bd
= &txdata
->tx_desc_ring
[bd_idx
].start_bd
;
121 dma_unmap_single(&bp
->pdev
->dev
, BD_UNMAP_ADDR(tx_start_bd
),
122 BD_UNMAP_LEN(tx_start_bd
), DMA_TO_DEVICE
);
125 nbd
= le16_to_cpu(tx_start_bd
->nbd
) - 1;
126 #ifdef BNX2X_STOP_ON_ERROR
127 if ((nbd
- 1) > (MAX_SKB_FRAGS
+ 2)) {
128 BNX2X_ERR("BAD nbd!\n");
132 new_cons
= nbd
+ tx_buf
->first_bd
;
134 /* Get the next bd */
135 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
137 /* Skip a parse bd... */
139 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
141 /* ...and the TSO split header bd since they have no mapping */
142 if (tx_buf
->flags
& BNX2X_TSO_SPLIT_BD
) {
144 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
150 DP(BNX2X_MSG_OFF
, "free frag bd_idx %d\n", bd_idx
);
151 tx_data_bd
= &txdata
->tx_desc_ring
[bd_idx
].reg_bd
;
152 dma_unmap_page(&bp
->pdev
->dev
, BD_UNMAP_ADDR(tx_data_bd
),
153 BD_UNMAP_LEN(tx_data_bd
), DMA_TO_DEVICE
);
155 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
160 dev_kfree_skb_any(skb
);
161 tx_buf
->first_bd
= 0;
167 int bnx2x_tx_int(struct bnx2x
*bp
, struct bnx2x_fp_txdata
*txdata
)
169 struct netdev_queue
*txq
;
170 u16 hw_cons
, sw_cons
, bd_cons
= txdata
->tx_bd_cons
;
172 #ifdef BNX2X_STOP_ON_ERROR
173 if (unlikely(bp
->panic
))
177 txq
= netdev_get_tx_queue(bp
->dev
, txdata
->txq_index
);
178 hw_cons
= le16_to_cpu(*txdata
->tx_cons_sb
);
179 sw_cons
= txdata
->tx_pkt_cons
;
181 while (sw_cons
!= hw_cons
) {
184 pkt_cons
= TX_BD(sw_cons
);
186 DP(NETIF_MSG_TX_DONE
, "queue[%d]: hw_cons %u sw_cons %u "
188 txdata
->txq_index
, hw_cons
, sw_cons
, pkt_cons
);
190 bd_cons
= bnx2x_free_tx_pkt(bp
, txdata
, pkt_cons
);
194 txdata
->tx_pkt_cons
= sw_cons
;
195 txdata
->tx_bd_cons
= bd_cons
;
197 /* Need to make the tx_bd_cons update visible to start_xmit()
198 * before checking for netif_tx_queue_stopped(). Without the
199 * memory barrier, there is a small possibility that
200 * start_xmit() will miss it and cause the queue to be stopped
202 * On the other hand we need an rmb() here to ensure the proper
203 * ordering of bit testing in the following
204 * netif_tx_queue_stopped(txq) call.
208 if (unlikely(netif_tx_queue_stopped(txq
))) {
209 /* Taking tx_lock() is needed to prevent reenabling the queue
210 * while it's empty. This could have happen if rx_action() gets
211 * suspended in bnx2x_tx_int() after the condition before
212 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
214 * stops the queue->sees fresh tx_bd_cons->releases the queue->
215 * sends some packets consuming the whole queue again->
219 __netif_tx_lock(txq
, smp_processor_id());
221 if ((netif_tx_queue_stopped(txq
)) &&
222 (bp
->state
== BNX2X_STATE_OPEN
) &&
223 (bnx2x_tx_avail(bp
, txdata
) >= MAX_SKB_FRAGS
+ 3))
224 netif_tx_wake_queue(txq
);
226 __netif_tx_unlock(txq
);
231 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath
*fp
,
234 u16 last_max
= fp
->last_max_sge
;
236 if (SUB_S16(idx
, last_max
) > 0)
237 fp
->last_max_sge
= idx
;
240 static void bnx2x_update_sge_prod(struct bnx2x_fastpath
*fp
,
241 struct eth_fast_path_rx_cqe
*fp_cqe
)
243 struct bnx2x
*bp
= fp
->bp
;
244 u16 sge_len
= SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe
->pkt_len
) -
245 le16_to_cpu(fp_cqe
->len_on_bd
)) >>
247 u16 last_max
, last_elem
, first_elem
;
254 /* First mark all used pages */
255 for (i
= 0; i
< sge_len
; i
++)
256 BIT_VEC64_CLEAR_BIT(fp
->sge_mask
,
257 RX_SGE(le16_to_cpu(fp_cqe
->sgl_or_raw_data
.sgl
[i
])));
259 DP(NETIF_MSG_RX_STATUS
, "fp_cqe->sgl[%d] = %d\n",
260 sge_len
- 1, le16_to_cpu(fp_cqe
->sgl_or_raw_data
.sgl
[sge_len
- 1]));
262 /* Here we assume that the last SGE index is the biggest */
263 prefetch((void *)(fp
->sge_mask
));
264 bnx2x_update_last_max_sge(fp
,
265 le16_to_cpu(fp_cqe
->sgl_or_raw_data
.sgl
[sge_len
- 1]));
267 last_max
= RX_SGE(fp
->last_max_sge
);
268 last_elem
= last_max
>> BIT_VEC64_ELEM_SHIFT
;
269 first_elem
= RX_SGE(fp
->rx_sge_prod
) >> BIT_VEC64_ELEM_SHIFT
;
271 /* If ring is not full */
272 if (last_elem
+ 1 != first_elem
)
275 /* Now update the prod */
276 for (i
= first_elem
; i
!= last_elem
; i
= NEXT_SGE_MASK_ELEM(i
)) {
277 if (likely(fp
->sge_mask
[i
]))
280 fp
->sge_mask
[i
] = BIT_VEC64_ELEM_ONE_MASK
;
281 delta
+= BIT_VEC64_ELEM_SZ
;
285 fp
->rx_sge_prod
+= delta
;
286 /* clear page-end entries */
287 bnx2x_clear_sge_mask_next_elems(fp
);
290 DP(NETIF_MSG_RX_STATUS
,
291 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
292 fp
->last_max_sge
, fp
->rx_sge_prod
);
295 static void bnx2x_tpa_start(struct bnx2x_fastpath
*fp
, u16 queue
,
296 struct sk_buff
*skb
, u16 cons
, u16 prod
,
297 struct eth_fast_path_rx_cqe
*cqe
)
299 struct bnx2x
*bp
= fp
->bp
;
300 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
301 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
302 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
304 struct bnx2x_agg_info
*tpa_info
= &fp
->tpa_info
[queue
];
305 struct sw_rx_bd
*first_buf
= &tpa_info
->first_buf
;
307 /* print error if current state != stop */
308 if (tpa_info
->tpa_state
!= BNX2X_TPA_STOP
)
309 BNX2X_ERR("start of bin not in stop [%d]\n", queue
);
311 /* Try to map an empty skb from the aggregation info */
312 mapping
= dma_map_single(&bp
->pdev
->dev
,
313 first_buf
->skb
->data
,
314 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
316 * ...if it fails - move the skb from the consumer to the producer
317 * and set the current aggregation state as ERROR to drop it
318 * when TPA_STOP arrives.
321 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
322 /* Move the BD from the consumer to the producer */
323 bnx2x_reuse_rx_skb(fp
, cons
, prod
);
324 tpa_info
->tpa_state
= BNX2X_TPA_ERROR
;
328 /* move empty skb from pool to prod */
329 prod_rx_buf
->skb
= first_buf
->skb
;
330 dma_unmap_addr_set(prod_rx_buf
, mapping
, mapping
);
331 /* point prod_bd to new skb */
332 prod_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
333 prod_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
335 /* move partial skb from cons to pool (don't unmap yet) */
336 *first_buf
= *cons_rx_buf
;
338 /* mark bin state as START */
339 tpa_info
->parsing_flags
=
340 le16_to_cpu(cqe
->pars_flags
.flags
);
341 tpa_info
->vlan_tag
= le16_to_cpu(cqe
->vlan_tag
);
342 tpa_info
->tpa_state
= BNX2X_TPA_START
;
343 tpa_info
->len_on_bd
= le16_to_cpu(cqe
->len_on_bd
);
344 tpa_info
->placement_offset
= cqe
->placement_offset
;
346 #ifdef BNX2X_STOP_ON_ERROR
347 fp
->tpa_queue_used
|= (1 << queue
);
348 #ifdef _ASM_GENERIC_INT_L64_H
349 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%lx\n",
351 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%llx\n",
357 /* Timestamp option length allowed for TPA aggregation:
359 * nop nop kind length echo val
361 #define TPA_TSTAMP_OPT_LEN 12
363 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
366 * @parsing_flags: parsing flags from the START CQE
367 * @len_on_bd: total length of the first packet for the
370 * Approximate value of the MSS for this aggregation calculated using
371 * the first packet of it.
373 static inline u16
bnx2x_set_lro_mss(struct bnx2x
*bp
, u16 parsing_flags
,
377 * TPA arrgregation won't have either IP options or TCP options
378 * other than timestamp or IPv6 extension headers.
380 u16 hdrs_len
= ETH_HLEN
+ sizeof(struct tcphdr
);
382 if (GET_FLAG(parsing_flags
, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL
) ==
383 PRS_FLAG_OVERETH_IPV6
)
384 hdrs_len
+= sizeof(struct ipv6hdr
);
386 hdrs_len
+= sizeof(struct iphdr
);
389 /* Check if there was a TCP timestamp, if there is it's will
390 * always be 12 bytes length: nop nop kind length echo val.
392 * Otherwise FW would close the aggregation.
394 if (parsing_flags
& PARSING_FLAGS_TIME_STAMP_EXIST_FLAG
)
395 hdrs_len
+= TPA_TSTAMP_OPT_LEN
;
397 return len_on_bd
- hdrs_len
;
400 static int bnx2x_fill_frag_skb(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
401 u16 queue
, struct sk_buff
*skb
,
402 struct eth_end_agg_rx_cqe
*cqe
,
405 struct sw_rx_page
*rx_pg
, old_rx_pg
;
406 u32 i
, frag_len
, frag_size
, pages
;
409 struct bnx2x_agg_info
*tpa_info
= &fp
->tpa_info
[queue
];
410 u16 len_on_bd
= tpa_info
->len_on_bd
;
412 frag_size
= le16_to_cpu(cqe
->pkt_len
) - len_on_bd
;
413 pages
= SGE_PAGE_ALIGN(frag_size
) >> SGE_PAGE_SHIFT
;
415 /* This is needed in order to enable forwarding support */
417 skb_shinfo(skb
)->gso_size
= bnx2x_set_lro_mss(bp
,
418 tpa_info
->parsing_flags
, len_on_bd
);
420 #ifdef BNX2X_STOP_ON_ERROR
421 if (pages
> min_t(u32
, 8, MAX_SKB_FRAGS
)*SGE_PAGE_SIZE
*PAGES_PER_SGE
) {
422 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
424 BNX2X_ERR("cqe->pkt_len = %d\n", cqe
->pkt_len
);
430 /* Run through the SGL and compose the fragmented skb */
431 for (i
= 0, j
= 0; i
< pages
; i
+= PAGES_PER_SGE
, j
++) {
432 u16 sge_idx
= RX_SGE(le16_to_cpu(cqe
->sgl_or_raw_data
.sgl
[j
]));
434 /* FW gives the indices of the SGE as if the ring is an array
435 (meaning that "next" element will consume 2 indices) */
436 frag_len
= min(frag_size
, (u32
)(SGE_PAGE_SIZE
*PAGES_PER_SGE
));
437 rx_pg
= &fp
->rx_page_ring
[sge_idx
];
440 /* If we fail to allocate a substitute page, we simply stop
441 where we are and drop the whole packet */
442 err
= bnx2x_alloc_rx_sge(bp
, fp
, sge_idx
);
444 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
448 /* Unmap the page as we r going to pass it to the stack */
449 dma_unmap_page(&bp
->pdev
->dev
,
450 dma_unmap_addr(&old_rx_pg
, mapping
),
451 SGE_PAGE_SIZE
*PAGES_PER_SGE
, DMA_FROM_DEVICE
);
453 /* Add one frag and update the appropriate fields in the skb */
454 skb_fill_page_desc(skb
, j
, old_rx_pg
.page
, 0, frag_len
);
456 skb
->data_len
+= frag_len
;
457 skb
->truesize
+= SGE_PAGE_SIZE
* PAGES_PER_SGE
;
458 skb
->len
+= frag_len
;
460 frag_size
-= frag_len
;
466 static void bnx2x_tpa_stop(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
467 u16 queue
, struct eth_end_agg_rx_cqe
*cqe
,
470 struct bnx2x_agg_info
*tpa_info
= &fp
->tpa_info
[queue
];
471 struct sw_rx_bd
*rx_buf
= &tpa_info
->first_buf
;
472 u8 pad
= tpa_info
->placement_offset
;
473 u16 len
= tpa_info
->len_on_bd
;
474 struct sk_buff
*skb
= rx_buf
->skb
;
476 struct sk_buff
*new_skb
;
477 u8 old_tpa_state
= tpa_info
->tpa_state
;
479 tpa_info
->tpa_state
= BNX2X_TPA_STOP
;
481 /* If we there was an error during the handling of the TPA_START -
482 * drop this aggregation.
484 if (old_tpa_state
== BNX2X_TPA_ERROR
)
487 /* Try to allocate the new skb */
488 new_skb
= netdev_alloc_skb(bp
->dev
, fp
->rx_buf_size
);
490 /* Unmap skb in the pool anyway, as we are going to change
491 pool entry status to BNX2X_TPA_STOP even if new skb allocation
493 dma_unmap_single(&bp
->pdev
->dev
, dma_unmap_addr(rx_buf
, mapping
),
494 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
496 if (likely(new_skb
)) {
498 prefetch(((char *)(skb
)) + L1_CACHE_BYTES
);
500 #ifdef BNX2X_STOP_ON_ERROR
501 if (pad
+ len
> fp
->rx_buf_size
) {
502 BNX2X_ERR("skb_put is about to fail... "
503 "pad %d len %d rx_buf_size %d\n",
504 pad
, len
, fp
->rx_buf_size
);
510 skb_reserve(skb
, pad
);
513 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
514 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
516 if (!bnx2x_fill_frag_skb(bp
, fp
, queue
, skb
, cqe
, cqe_idx
)) {
517 if (tpa_info
->parsing_flags
& PARSING_FLAGS_VLAN
)
518 __vlan_hwaccel_put_tag(skb
, tpa_info
->vlan_tag
);
519 napi_gro_receive(&fp
->napi
, skb
);
521 DP(NETIF_MSG_RX_STATUS
, "Failed to allocate new pages"
522 " - dropping packet!\n");
523 dev_kfree_skb_any(skb
);
527 /* put new skb in bin */
528 rx_buf
->skb
= new_skb
;
534 /* drop the packet and keep the buffer in the bin */
535 DP(NETIF_MSG_RX_STATUS
,
536 "Failed to allocate or map a new skb - dropping packet!\n");
537 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
540 /* Set Toeplitz hash value in the skb using the value from the
541 * CQE (calculated by HW).
543 static inline void bnx2x_set_skb_rxhash(struct bnx2x
*bp
, union eth_rx_cqe
*cqe
,
546 /* Set Toeplitz hash from CQE */
547 if ((bp
->dev
->features
& NETIF_F_RXHASH
) &&
548 (cqe
->fast_path_cqe
.status_flags
&
549 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG
))
551 le32_to_cpu(cqe
->fast_path_cqe
.rss_hash_result
);
554 int bnx2x_rx_int(struct bnx2x_fastpath
*fp
, int budget
)
556 struct bnx2x
*bp
= fp
->bp
;
557 u16 bd_cons
, bd_prod
, bd_prod_fw
, comp_ring_cons
;
558 u16 hw_comp_cons
, sw_comp_cons
, sw_comp_prod
;
561 #ifdef BNX2X_STOP_ON_ERROR
562 if (unlikely(bp
->panic
))
566 /* CQ "next element" is of the size of the regular element,
567 that's why it's ok here */
568 hw_comp_cons
= le16_to_cpu(*fp
->rx_cons_sb
);
569 if ((hw_comp_cons
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
572 bd_cons
= fp
->rx_bd_cons
;
573 bd_prod
= fp
->rx_bd_prod
;
574 bd_prod_fw
= bd_prod
;
575 sw_comp_cons
= fp
->rx_comp_cons
;
576 sw_comp_prod
= fp
->rx_comp_prod
;
578 /* Memory barrier necessary as speculative reads of the rx
579 * buffer can be ahead of the index in the status block
583 DP(NETIF_MSG_RX_STATUS
,
584 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
585 fp
->index
, hw_comp_cons
, sw_comp_cons
);
587 while (sw_comp_cons
!= hw_comp_cons
) {
588 struct sw_rx_bd
*rx_buf
= NULL
;
590 union eth_rx_cqe
*cqe
;
591 struct eth_fast_path_rx_cqe
*cqe_fp
;
593 enum eth_rx_cqe_type cqe_fp_type
;
596 #ifdef BNX2X_STOP_ON_ERROR
597 if (unlikely(bp
->panic
))
601 comp_ring_cons
= RCQ_BD(sw_comp_cons
);
602 bd_prod
= RX_BD(bd_prod
);
603 bd_cons
= RX_BD(bd_cons
);
605 /* Prefetch the page containing the BD descriptor
606 at producer's index. It will be needed when new skb is
608 prefetch((void *)(PAGE_ALIGN((unsigned long)
609 (&fp
->rx_desc_ring
[bd_prod
])) -
612 cqe
= &fp
->rx_comp_ring
[comp_ring_cons
];
613 cqe_fp
= &cqe
->fast_path_cqe
;
614 cqe_fp_flags
= cqe_fp
->type_error_flags
;
615 cqe_fp_type
= cqe_fp_flags
& ETH_FAST_PATH_RX_CQE_TYPE
;
617 DP(NETIF_MSG_RX_STATUS
, "CQE type %x err %x status %x"
618 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags
),
619 cqe_fp_flags
, cqe_fp
->status_flags
,
620 le32_to_cpu(cqe_fp
->rss_hash_result
),
621 le16_to_cpu(cqe_fp
->vlan_tag
), le16_to_cpu(cqe_fp
->pkt_len
));
623 /* is this a slowpath msg? */
624 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type
))) {
625 bnx2x_sp_event(fp
, cqe
);
628 /* this is an rx packet */
630 rx_buf
= &fp
->rx_buf_ring
[bd_cons
];
634 if (!CQE_TYPE_FAST(cqe_fp_type
)) {
635 #ifdef BNX2X_STOP_ON_ERROR
637 if (fp
->disable_tpa
&&
638 (CQE_TYPE_START(cqe_fp_type
) ||
639 CQE_TYPE_STOP(cqe_fp_type
)))
640 BNX2X_ERR("START/STOP packet while "
641 "disable_tpa type %x\n",
642 CQE_TYPE(cqe_fp_type
));
645 if (CQE_TYPE_START(cqe_fp_type
)) {
646 u16 queue
= cqe_fp
->queue_index
;
647 DP(NETIF_MSG_RX_STATUS
,
648 "calling tpa_start on queue %d\n",
651 bnx2x_tpa_start(fp
, queue
, skb
,
655 /* Set Toeplitz hash for LRO skb */
656 bnx2x_set_skb_rxhash(bp
, cqe
, skb
);
662 cqe
->end_agg_cqe
.queue_index
;
663 DP(NETIF_MSG_RX_STATUS
,
664 "calling tpa_stop on queue %d\n",
667 bnx2x_tpa_stop(bp
, fp
, queue
,
670 #ifdef BNX2X_STOP_ON_ERROR
675 bnx2x_update_sge_prod(fp
, cqe_fp
);
680 len
= le16_to_cpu(cqe_fp
->pkt_len
);
681 pad
= cqe_fp
->placement_offset
;
682 dma_sync_single_for_cpu(&bp
->pdev
->dev
,
683 dma_unmap_addr(rx_buf
, mapping
),
684 pad
+ RX_COPY_THRESH
,
686 prefetch(((char *)(skb
)) + L1_CACHE_BYTES
);
688 /* is this an error packet? */
689 if (unlikely(cqe_fp_flags
& ETH_RX_ERROR_FALGS
)) {
691 "ERROR flags %x rx packet %u\n",
692 cqe_fp_flags
, sw_comp_cons
);
693 fp
->eth_q_stats
.rx_err_discard_pkt
++;
697 /* Since we don't have a jumbo ring
698 * copy small packets if mtu > 1500
700 if ((bp
->dev
->mtu
> ETH_MAX_PACKET_SIZE
) &&
701 (len
<= RX_COPY_THRESH
)) {
702 struct sk_buff
*new_skb
;
704 new_skb
= netdev_alloc_skb(bp
->dev
, len
+ pad
);
705 if (new_skb
== NULL
) {
707 "ERROR packet dropped "
708 "because of alloc failure\n");
709 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
714 skb_copy_from_linear_data_offset(skb
, pad
,
715 new_skb
->data
+ pad
, len
);
716 skb_reserve(new_skb
, pad
);
717 skb_put(new_skb
, len
);
719 bnx2x_reuse_rx_skb(fp
, bd_cons
, bd_prod
);
724 if (likely(bnx2x_alloc_rx_skb(bp
, fp
, bd_prod
) == 0)) {
725 dma_unmap_single(&bp
->pdev
->dev
,
726 dma_unmap_addr(rx_buf
, mapping
),
729 skb_reserve(skb
, pad
);
734 "ERROR packet dropped because "
735 "of alloc failure\n");
736 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
738 bnx2x_reuse_rx_skb(fp
, bd_cons
, bd_prod
);
742 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
744 /* Set Toeplitz hash for a none-LRO skb */
745 bnx2x_set_skb_rxhash(bp
, cqe
, skb
);
747 skb_checksum_none_assert(skb
);
749 if (bp
->dev
->features
& NETIF_F_RXCSUM
) {
751 if (likely(BNX2X_RX_CSUM_OK(cqe
)))
752 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
754 fp
->eth_q_stats
.hw_csum_err
++;
758 skb_record_rx_queue(skb
, fp
->index
);
760 if (le16_to_cpu(cqe_fp
->pars_flags
.flags
) &
762 __vlan_hwaccel_put_tag(skb
,
763 le16_to_cpu(cqe_fp
->vlan_tag
));
764 napi_gro_receive(&fp
->napi
, skb
);
770 bd_cons
= NEXT_RX_IDX(bd_cons
);
771 bd_prod
= NEXT_RX_IDX(bd_prod
);
772 bd_prod_fw
= NEXT_RX_IDX(bd_prod_fw
);
775 sw_comp_prod
= NEXT_RCQ_IDX(sw_comp_prod
);
776 sw_comp_cons
= NEXT_RCQ_IDX(sw_comp_cons
);
778 if (rx_pkt
== budget
)
782 fp
->rx_bd_cons
= bd_cons
;
783 fp
->rx_bd_prod
= bd_prod_fw
;
784 fp
->rx_comp_cons
= sw_comp_cons
;
785 fp
->rx_comp_prod
= sw_comp_prod
;
787 /* Update producers */
788 bnx2x_update_rx_prod(bp
, fp
, bd_prod_fw
, sw_comp_prod
,
791 fp
->rx_pkt
+= rx_pkt
;
797 static irqreturn_t
bnx2x_msix_fp_int(int irq
, void *fp_cookie
)
799 struct bnx2x_fastpath
*fp
= fp_cookie
;
800 struct bnx2x
*bp
= fp
->bp
;
803 DP(BNX2X_MSG_FP
, "got an MSI-X interrupt on IDX:SB "
804 "[fp %d fw_sd %d igusb %d]\n",
805 fp
->index
, fp
->fw_sb_id
, fp
->igu_sb_id
);
806 bnx2x_ack_sb(bp
, fp
->igu_sb_id
, USTORM_ID
, 0, IGU_INT_DISABLE
, 0);
808 #ifdef BNX2X_STOP_ON_ERROR
809 if (unlikely(bp
->panic
))
813 /* Handle Rx and Tx according to MSI-X vector */
814 prefetch(fp
->rx_cons_sb
);
816 for_each_cos_in_tx_queue(fp
, cos
)
817 prefetch(fp
->txdata
[cos
].tx_cons_sb
);
819 prefetch(&fp
->sb_running_index
[SM_RX_ID
]);
820 napi_schedule(&bnx2x_fp(bp
, fp
->index
, napi
));
825 /* HW Lock for shared dual port PHYs */
826 void bnx2x_acquire_phy_lock(struct bnx2x
*bp
)
828 mutex_lock(&bp
->port
.phy_mutex
);
830 if (bp
->port
.need_hw_lock
)
831 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
834 void bnx2x_release_phy_lock(struct bnx2x
*bp
)
836 if (bp
->port
.need_hw_lock
)
837 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
839 mutex_unlock(&bp
->port
.phy_mutex
);
842 /* calculates MF speed according to current linespeed and MF configuration */
843 u16
bnx2x_get_mf_speed(struct bnx2x
*bp
)
845 u16 line_speed
= bp
->link_vars
.line_speed
;
847 u16 maxCfg
= bnx2x_extract_max_cfg(bp
,
848 bp
->mf_config
[BP_VN(bp
)]);
850 /* Calculate the current MAX line speed limit for the MF
854 line_speed
= (line_speed
* maxCfg
) / 100;
856 u16 vn_max_rate
= maxCfg
* 100;
858 if (vn_max_rate
< line_speed
)
859 line_speed
= vn_max_rate
;
867 * bnx2x_fill_report_data - fill link report data to report
870 * @data: link state to update
872 * It uses a none-atomic bit operations because is called under the mutex.
874 static inline void bnx2x_fill_report_data(struct bnx2x
*bp
,
875 struct bnx2x_link_report_data
*data
)
877 u16 line_speed
= bnx2x_get_mf_speed(bp
);
879 memset(data
, 0, sizeof(*data
));
881 /* Fill the report data: efective line speed */
882 data
->line_speed
= line_speed
;
885 if (!bp
->link_vars
.link_up
|| (bp
->flags
& MF_FUNC_DIS
))
886 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
887 &data
->link_report_flags
);
890 if (bp
->link_vars
.duplex
== DUPLEX_FULL
)
891 __set_bit(BNX2X_LINK_REPORT_FD
, &data
->link_report_flags
);
893 /* Rx Flow Control is ON */
894 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_RX
)
895 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON
, &data
->link_report_flags
);
897 /* Tx Flow Control is ON */
898 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_TX
)
899 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON
, &data
->link_report_flags
);
903 * bnx2x_link_report - report link status to OS.
907 * Calls the __bnx2x_link_report() under the same locking scheme
908 * as a link/PHY state managing code to ensure a consistent link
912 void bnx2x_link_report(struct bnx2x
*bp
)
914 bnx2x_acquire_phy_lock(bp
);
915 __bnx2x_link_report(bp
);
916 bnx2x_release_phy_lock(bp
);
920 * __bnx2x_link_report - report link status to OS.
924 * None atomic inmlementation.
925 * Should be called under the phy_lock.
927 void __bnx2x_link_report(struct bnx2x
*bp
)
929 struct bnx2x_link_report_data cur_data
;
933 bnx2x_read_mf_cfg(bp
);
935 /* Read the current link report info */
936 bnx2x_fill_report_data(bp
, &cur_data
);
938 /* Don't report link down or exactly the same link status twice */
939 if (!memcmp(&cur_data
, &bp
->last_reported_link
, sizeof(cur_data
)) ||
940 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
941 &bp
->last_reported_link
.link_report_flags
) &&
942 test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
943 &cur_data
.link_report_flags
)))
948 /* We are going to report a new link parameters now -
949 * remember the current data for the next time.
951 memcpy(&bp
->last_reported_link
, &cur_data
, sizeof(cur_data
));
953 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
954 &cur_data
.link_report_flags
)) {
955 netif_carrier_off(bp
->dev
);
956 netdev_err(bp
->dev
, "NIC Link is Down\n");
962 netif_carrier_on(bp
->dev
);
964 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD
,
965 &cur_data
.link_report_flags
))
970 /* Handle the FC at the end so that only these flags would be
971 * possibly set. This way we may easily check if there is no FC
974 if (cur_data
.link_report_flags
) {
975 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON
,
976 &cur_data
.link_report_flags
)) {
977 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON
,
978 &cur_data
.link_report_flags
))
979 flow
= "ON - receive & transmit";
981 flow
= "ON - receive";
983 flow
= "ON - transmit";
988 netdev_info(bp
->dev
, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
989 cur_data
.line_speed
, duplex
, flow
);
993 void bnx2x_init_rx_rings(struct bnx2x
*bp
)
995 int func
= BP_FUNC(bp
);
999 /* Allocate TPA resources */
1000 for_each_rx_queue(bp
, j
) {
1001 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1004 "mtu %d rx_buf_size %d\n", bp
->dev
->mtu
, fp
->rx_buf_size
);
1006 if (!fp
->disable_tpa
) {
1007 /* Fill the per-aggregtion pool */
1008 for (i
= 0; i
< MAX_AGG_QS(bp
); i
++) {
1009 struct bnx2x_agg_info
*tpa_info
=
1011 struct sw_rx_bd
*first_buf
=
1012 &tpa_info
->first_buf
;
1014 first_buf
->skb
= netdev_alloc_skb(bp
->dev
,
1016 if (!first_buf
->skb
) {
1017 BNX2X_ERR("Failed to allocate TPA "
1018 "skb pool for queue[%d] - "
1019 "disabling TPA on this "
1021 bnx2x_free_tpa_pool(bp
, fp
, i
);
1022 fp
->disable_tpa
= 1;
1025 dma_unmap_addr_set(first_buf
, mapping
, 0);
1026 tpa_info
->tpa_state
= BNX2X_TPA_STOP
;
1029 /* "next page" elements initialization */
1030 bnx2x_set_next_page_sgl(fp
);
1032 /* set SGEs bit mask */
1033 bnx2x_init_sge_ring_bit_mask(fp
);
1035 /* Allocate SGEs and initialize the ring elements */
1036 for (i
= 0, ring_prod
= 0;
1037 i
< MAX_RX_SGE_CNT
*NUM_RX_SGE_PAGES
; i
++) {
1039 if (bnx2x_alloc_rx_sge(bp
, fp
, ring_prod
) < 0) {
1040 BNX2X_ERR("was only able to allocate "
1042 BNX2X_ERR("disabling TPA for "
1044 /* Cleanup already allocated elements */
1045 bnx2x_free_rx_sge_range(bp
, fp
,
1047 bnx2x_free_tpa_pool(bp
, fp
,
1049 fp
->disable_tpa
= 1;
1053 ring_prod
= NEXT_SGE_IDX(ring_prod
);
1056 fp
->rx_sge_prod
= ring_prod
;
1060 for_each_rx_queue(bp
, j
) {
1061 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1065 /* Activate BD ring */
1067 * this will generate an interrupt (to the TSTORM)
1068 * must only be done after chip is initialized
1070 bnx2x_update_rx_prod(bp
, fp
, fp
->rx_bd_prod
, fp
->rx_comp_prod
,
1076 if (CHIP_IS_E1(bp
)) {
1077 REG_WR(bp
, BAR_USTRORM_INTMEM
+
1078 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
),
1079 U64_LO(fp
->rx_comp_mapping
));
1080 REG_WR(bp
, BAR_USTRORM_INTMEM
+
1081 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
) + 4,
1082 U64_HI(fp
->rx_comp_mapping
));
1087 static void bnx2x_free_tx_skbs(struct bnx2x
*bp
)
1092 for_each_tx_queue(bp
, i
) {
1093 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1094 for_each_cos_in_tx_queue(fp
, cos
) {
1095 struct bnx2x_fp_txdata
*txdata
= &fp
->txdata
[cos
];
1097 u16 bd_cons
= txdata
->tx_bd_cons
;
1098 u16 sw_prod
= txdata
->tx_pkt_prod
;
1099 u16 sw_cons
= txdata
->tx_pkt_cons
;
1101 while (sw_cons
!= sw_prod
) {
1102 bd_cons
= bnx2x_free_tx_pkt(bp
, txdata
,
1110 static void bnx2x_free_rx_bds(struct bnx2x_fastpath
*fp
)
1112 struct bnx2x
*bp
= fp
->bp
;
1115 /* ring wasn't allocated */
1116 if (fp
->rx_buf_ring
== NULL
)
1119 for (i
= 0; i
< NUM_RX_BD
; i
++) {
1120 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[i
];
1121 struct sk_buff
*skb
= rx_buf
->skb
;
1125 dma_unmap_single(&bp
->pdev
->dev
,
1126 dma_unmap_addr(rx_buf
, mapping
),
1127 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
1134 static void bnx2x_free_rx_skbs(struct bnx2x
*bp
)
1138 for_each_rx_queue(bp
, j
) {
1139 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1141 bnx2x_free_rx_bds(fp
);
1143 if (!fp
->disable_tpa
)
1144 bnx2x_free_tpa_pool(bp
, fp
, MAX_AGG_QS(bp
));
1148 void bnx2x_free_skbs(struct bnx2x
*bp
)
1150 bnx2x_free_tx_skbs(bp
);
1151 bnx2x_free_rx_skbs(bp
);
1154 void bnx2x_update_max_mf_config(struct bnx2x
*bp
, u32 value
)
1156 /* load old values */
1157 u32 mf_cfg
= bp
->mf_config
[BP_VN(bp
)];
1159 if (value
!= bnx2x_extract_max_cfg(bp
, mf_cfg
)) {
1160 /* leave all but MAX value */
1161 mf_cfg
&= ~FUNC_MF_CFG_MAX_BW_MASK
;
1163 /* set new MAX value */
1164 mf_cfg
|= (value
<< FUNC_MF_CFG_MAX_BW_SHIFT
)
1165 & FUNC_MF_CFG_MAX_BW_MASK
;
1167 bnx2x_fw_command(bp
, DRV_MSG_CODE_SET_MF_BW
, mf_cfg
);
1172 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1174 * @bp: driver handle
1175 * @nvecs: number of vectors to be released
1177 static void bnx2x_free_msix_irqs(struct bnx2x
*bp
, int nvecs
)
1181 if (nvecs
== offset
)
1183 free_irq(bp
->msix_table
[offset
].vector
, bp
->dev
);
1184 DP(NETIF_MSG_IFDOWN
, "released sp irq (%d)\n",
1185 bp
->msix_table
[offset
].vector
);
1188 if (nvecs
== offset
)
1193 for_each_eth_queue(bp
, i
) {
1194 if (nvecs
== offset
)
1196 DP(NETIF_MSG_IFDOWN
, "about to release fp #%d->%d "
1197 "irq\n", i
, bp
->msix_table
[offset
].vector
);
1199 free_irq(bp
->msix_table
[offset
++].vector
, &bp
->fp
[i
]);
1203 void bnx2x_free_irq(struct bnx2x
*bp
)
1205 if (bp
->flags
& USING_MSIX_FLAG
)
1206 bnx2x_free_msix_irqs(bp
, BNX2X_NUM_ETH_QUEUES(bp
) +
1208 else if (bp
->flags
& USING_MSI_FLAG
)
1209 free_irq(bp
->pdev
->irq
, bp
->dev
);
1211 free_irq(bp
->pdev
->irq
, bp
->dev
);
1214 int bnx2x_enable_msix(struct bnx2x
*bp
)
1216 int msix_vec
= 0, i
, rc
, req_cnt
;
1218 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1219 DP(NETIF_MSG_IFUP
, "msix_table[0].entry = %d (slowpath)\n",
1220 bp
->msix_table
[0].entry
);
1224 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1225 DP(NETIF_MSG_IFUP
, "msix_table[%d].entry = %d (CNIC)\n",
1226 bp
->msix_table
[msix_vec
].entry
, bp
->msix_table
[msix_vec
].entry
);
1229 /* We need separate vectors for ETH queues only (not FCoE) */
1230 for_each_eth_queue(bp
, i
) {
1231 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1232 DP(NETIF_MSG_IFUP
, "msix_table[%d].entry = %d "
1233 "(fastpath #%u)\n", msix_vec
, msix_vec
, i
);
1237 req_cnt
= BNX2X_NUM_ETH_QUEUES(bp
) + CNIC_PRESENT
+ 1;
1239 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0], req_cnt
);
1242 * reconfigure number of tx/rx queues according to available
1245 if (rc
>= BNX2X_MIN_MSIX_VEC_CNT
) {
1246 /* how less vectors we will have? */
1247 int diff
= req_cnt
- rc
;
1250 "Trying to use less MSI-X vectors: %d\n", rc
);
1252 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0], rc
);
1256 "MSI-X is not attainable rc %d\n", rc
);
1260 * decrease number of queues by number of unallocated entries
1262 bp
->num_queues
-= diff
;
1264 DP(NETIF_MSG_IFUP
, "New queue configuration set: %d\n",
1267 /* fall to INTx if not enough memory */
1269 bp
->flags
|= DISABLE_MSI_FLAG
;
1270 DP(NETIF_MSG_IFUP
, "MSI-X is not attainable rc %d\n", rc
);
1274 bp
->flags
|= USING_MSIX_FLAG
;
1279 static int bnx2x_req_msix_irqs(struct bnx2x
*bp
)
1281 int i
, rc
, offset
= 0;
1283 rc
= request_irq(bp
->msix_table
[offset
++].vector
,
1284 bnx2x_msix_sp_int
, 0,
1285 bp
->dev
->name
, bp
->dev
);
1287 BNX2X_ERR("request sp irq failed\n");
1294 for_each_eth_queue(bp
, i
) {
1295 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1296 snprintf(fp
->name
, sizeof(fp
->name
), "%s-fp-%d",
1299 rc
= request_irq(bp
->msix_table
[offset
].vector
,
1300 bnx2x_msix_fp_int
, 0, fp
->name
, fp
);
1302 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i
,
1303 bp
->msix_table
[offset
].vector
, rc
);
1304 bnx2x_free_msix_irqs(bp
, offset
);
1311 i
= BNX2X_NUM_ETH_QUEUES(bp
);
1312 offset
= 1 + CNIC_PRESENT
;
1313 netdev_info(bp
->dev
, "using MSI-X IRQs: sp %d fp[%d] %d"
1315 bp
->msix_table
[0].vector
,
1316 0, bp
->msix_table
[offset
].vector
,
1317 i
- 1, bp
->msix_table
[offset
+ i
- 1].vector
);
1322 int bnx2x_enable_msi(struct bnx2x
*bp
)
1326 rc
= pci_enable_msi(bp
->pdev
);
1328 DP(NETIF_MSG_IFUP
, "MSI is not attainable\n");
1331 bp
->flags
|= USING_MSI_FLAG
;
1336 static int bnx2x_req_irq(struct bnx2x
*bp
)
1338 unsigned long flags
;
1341 if (bp
->flags
& USING_MSI_FLAG
)
1344 flags
= IRQF_SHARED
;
1346 rc
= request_irq(bp
->pdev
->irq
, bnx2x_interrupt
, flags
,
1347 bp
->dev
->name
, bp
->dev
);
1351 static inline int bnx2x_setup_irqs(struct bnx2x
*bp
)
1354 if (bp
->flags
& USING_MSIX_FLAG
) {
1355 rc
= bnx2x_req_msix_irqs(bp
);
1360 rc
= bnx2x_req_irq(bp
);
1362 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc
);
1365 if (bp
->flags
& USING_MSI_FLAG
) {
1366 bp
->dev
->irq
= bp
->pdev
->irq
;
1367 netdev_info(bp
->dev
, "using MSI IRQ %d\n",
1375 static inline void bnx2x_napi_enable(struct bnx2x
*bp
)
1379 for_each_rx_queue(bp
, i
)
1380 napi_enable(&bnx2x_fp(bp
, i
, napi
));
1383 static inline void bnx2x_napi_disable(struct bnx2x
*bp
)
1387 for_each_rx_queue(bp
, i
)
1388 napi_disable(&bnx2x_fp(bp
, i
, napi
));
1391 void bnx2x_netif_start(struct bnx2x
*bp
)
1393 if (netif_running(bp
->dev
)) {
1394 bnx2x_napi_enable(bp
);
1395 bnx2x_int_enable(bp
);
1396 if (bp
->state
== BNX2X_STATE_OPEN
)
1397 netif_tx_wake_all_queues(bp
->dev
);
1401 void bnx2x_netif_stop(struct bnx2x
*bp
, int disable_hw
)
1403 bnx2x_int_disable_sync(bp
, disable_hw
);
1404 bnx2x_napi_disable(bp
);
1407 u16
bnx2x_select_queue(struct net_device
*dev
, struct sk_buff
*skb
)
1409 struct bnx2x
*bp
= netdev_priv(dev
);
1413 struct ethhdr
*hdr
= (struct ethhdr
*)skb
->data
;
1414 u16 ether_type
= ntohs(hdr
->h_proto
);
1416 /* Skip VLAN tag if present */
1417 if (ether_type
== ETH_P_8021Q
) {
1418 struct vlan_ethhdr
*vhdr
=
1419 (struct vlan_ethhdr
*)skb
->data
;
1421 ether_type
= ntohs(vhdr
->h_vlan_encapsulated_proto
);
1424 /* If ethertype is FCoE or FIP - use FCoE ring */
1425 if ((ether_type
== ETH_P_FCOE
) || (ether_type
== ETH_P_FIP
))
1426 return bnx2x_fcoe_tx(bp
, txq_index
);
1429 /* select a non-FCoE queue */
1430 return __skb_tx_hash(dev
, skb
, BNX2X_NUM_ETH_QUEUES(bp
));
1433 void bnx2x_set_num_queues(struct bnx2x
*bp
)
1435 switch (bp
->multi_mode
) {
1436 case ETH_RSS_MODE_DISABLED
:
1439 case ETH_RSS_MODE_REGULAR
:
1440 bp
->num_queues
= bnx2x_calc_num_queues(bp
);
1448 /* Add special queues */
1449 bp
->num_queues
+= NON_ETH_CONTEXT_USE
;
1453 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1455 * @bp: Driver handle
1457 * We currently support for at most 16 Tx queues for each CoS thus we will
1458 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1461 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1462 * index after all ETH L2 indices.
1464 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1465 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1466 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1468 * The proper configuration of skb->queue_mapping is handled by
1469 * bnx2x_select_queue() and __skb_tx_hash().
1471 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1472 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1474 static inline int bnx2x_set_real_num_queues(struct bnx2x
*bp
)
1478 tx
= MAX_TXQS_PER_COS
* bp
->max_cos
;
1479 rx
= BNX2X_NUM_ETH_QUEUES(bp
);
1481 /* account for fcoe queue */
1489 rc
= netif_set_real_num_tx_queues(bp
->dev
, tx
);
1491 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc
);
1494 rc
= netif_set_real_num_rx_queues(bp
->dev
, rx
);
1496 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc
);
1500 DP(NETIF_MSG_DRV
, "Setting real num queues to (tx, rx) (%d, %d)\n",
1506 static inline void bnx2x_set_rx_buf_size(struct bnx2x
*bp
)
1510 for_each_queue(bp
, i
) {
1511 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1513 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1516 * Although there are no IP frames expected to arrive to
1517 * this ring we still want to add an
1518 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1522 BNX2X_FCOE_MINI_JUMBO_MTU
+ ETH_OVREHEAD
+
1523 BNX2X_FW_RX_ALIGN
+ IP_HEADER_ALIGNMENT_PADDING
;
1526 bp
->dev
->mtu
+ ETH_OVREHEAD
+
1527 BNX2X_FW_RX_ALIGN
+ IP_HEADER_ALIGNMENT_PADDING
;
1531 static inline int bnx2x_init_rss_pf(struct bnx2x
*bp
)
1534 u8 ind_table
[T_ETH_INDIRECTION_TABLE_SIZE
] = {0};
1535 u8 num_eth_queues
= BNX2X_NUM_ETH_QUEUES(bp
);
1538 * Prepare the inital contents fo the indirection table if RSS is
1541 if (bp
->multi_mode
!= ETH_RSS_MODE_DISABLED
) {
1542 for (i
= 0; i
< sizeof(ind_table
); i
++)
1544 bp
->fp
->cl_id
+ (i
% num_eth_queues
);
1548 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1549 * per-port, so if explicit configuration is needed , do it only
1552 * For 57712 and newer on the other hand it's a per-function
1555 return bnx2x_config_rss_pf(bp
, ind_table
,
1556 bp
->port
.pmf
|| !CHIP_IS_E1x(bp
));
1559 int bnx2x_config_rss_pf(struct bnx2x
*bp
, u8
*ind_table
, bool config_hash
)
1561 struct bnx2x_config_rss_params params
= {0};
1564 /* Although RSS is meaningless when there is a single HW queue we
1565 * still need it enabled in order to have HW Rx hash generated.
1567 * if (!is_eth_multi(bp))
1568 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1571 params
.rss_obj
= &bp
->rss_conf_obj
;
1573 __set_bit(RAMROD_COMP_WAIT
, ¶ms
.ramrod_flags
);
1576 switch (bp
->multi_mode
) {
1577 case ETH_RSS_MODE_DISABLED
:
1578 __set_bit(BNX2X_RSS_MODE_DISABLED
, ¶ms
.rss_flags
);
1580 case ETH_RSS_MODE_REGULAR
:
1581 __set_bit(BNX2X_RSS_MODE_REGULAR
, ¶ms
.rss_flags
);
1583 case ETH_RSS_MODE_VLAN_PRI
:
1584 __set_bit(BNX2X_RSS_MODE_VLAN_PRI
, ¶ms
.rss_flags
);
1586 case ETH_RSS_MODE_E1HOV_PRI
:
1587 __set_bit(BNX2X_RSS_MODE_E1HOV_PRI
, ¶ms
.rss_flags
);
1589 case ETH_RSS_MODE_IP_DSCP
:
1590 __set_bit(BNX2X_RSS_MODE_IP_DSCP
, ¶ms
.rss_flags
);
1593 BNX2X_ERR("Unknown multi_mode: %d\n", bp
->multi_mode
);
1597 /* If RSS is enabled */
1598 if (bp
->multi_mode
!= ETH_RSS_MODE_DISABLED
) {
1599 /* RSS configuration */
1600 __set_bit(BNX2X_RSS_IPV4
, ¶ms
.rss_flags
);
1601 __set_bit(BNX2X_RSS_IPV4_TCP
, ¶ms
.rss_flags
);
1602 __set_bit(BNX2X_RSS_IPV6
, ¶ms
.rss_flags
);
1603 __set_bit(BNX2X_RSS_IPV6_TCP
, ¶ms
.rss_flags
);
1606 params
.rss_result_mask
= MULTI_MASK
;
1608 memcpy(params
.ind_table
, ind_table
, sizeof(params
.ind_table
));
1612 for (i
= 0; i
< sizeof(params
.rss_key
) / 4; i
++)
1613 params
.rss_key
[i
] = random32();
1615 __set_bit(BNX2X_RSS_SET_SRCH
, ¶ms
.rss_flags
);
1619 return bnx2x_config_rss(bp
, ¶ms
);
1622 static inline int bnx2x_init_hw(struct bnx2x
*bp
, u32 load_code
)
1624 struct bnx2x_func_state_params func_params
= {0};
1626 /* Prepare parameters for function state transitions */
1627 __set_bit(RAMROD_COMP_WAIT
, &func_params
.ramrod_flags
);
1629 func_params
.f_obj
= &bp
->func_obj
;
1630 func_params
.cmd
= BNX2X_F_CMD_HW_INIT
;
1632 func_params
.params
.hw_init
.load_phase
= load_code
;
1634 return bnx2x_func_state_change(bp
, &func_params
);
1638 * Cleans the object that have internal lists without sending
1639 * ramrods. Should be run when interrutps are disabled.
1641 static void bnx2x_squeeze_objects(struct bnx2x
*bp
)
1644 unsigned long ramrod_flags
= 0, vlan_mac_flags
= 0;
1645 struct bnx2x_mcast_ramrod_params rparam
= {0};
1646 struct bnx2x_vlan_mac_obj
*mac_obj
= &bp
->fp
->mac_obj
;
1648 /***************** Cleanup MACs' object first *************************/
1650 /* Wait for completion of requested */
1651 __set_bit(RAMROD_COMP_WAIT
, &ramrod_flags
);
1652 /* Perform a dry cleanup */
1653 __set_bit(RAMROD_DRV_CLR_ONLY
, &ramrod_flags
);
1655 /* Clean ETH primary MAC */
1656 __set_bit(BNX2X_ETH_MAC
, &vlan_mac_flags
);
1657 rc
= mac_obj
->delete_all(bp
, &bp
->fp
->mac_obj
, &vlan_mac_flags
,
1660 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc
);
1662 /* Cleanup UC list */
1664 __set_bit(BNX2X_UC_LIST_MAC
, &vlan_mac_flags
);
1665 rc
= mac_obj
->delete_all(bp
, mac_obj
, &vlan_mac_flags
,
1668 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc
);
1670 /***************** Now clean mcast object *****************************/
1671 rparam
.mcast_obj
= &bp
->mcast_obj
;
1672 __set_bit(RAMROD_DRV_CLR_ONLY
, &rparam
.ramrod_flags
);
1674 /* Add a DEL command... */
1675 rc
= bnx2x_config_mcast(bp
, &rparam
, BNX2X_MCAST_CMD_DEL
);
1677 BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
1678 "object: %d\n", rc
);
1680 /* ...and wait until all pending commands are cleared */
1681 rc
= bnx2x_config_mcast(bp
, &rparam
, BNX2X_MCAST_CMD_CONT
);
1684 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1689 rc
= bnx2x_config_mcast(bp
, &rparam
, BNX2X_MCAST_CMD_CONT
);
1693 #ifndef BNX2X_STOP_ON_ERROR
1694 #define LOAD_ERROR_EXIT(bp, label) \
1696 (bp)->state = BNX2X_STATE_ERROR; \
1700 #define LOAD_ERROR_EXIT(bp, label) \
1702 (bp)->state = BNX2X_STATE_ERROR; \
1708 /* must be called with rtnl_lock */
1709 int bnx2x_nic_load(struct bnx2x
*bp
, int load_mode
)
1711 int port
= BP_PORT(bp
);
1715 #ifdef BNX2X_STOP_ON_ERROR
1716 if (unlikely(bp
->panic
))
1720 bp
->state
= BNX2X_STATE_OPENING_WAIT4_LOAD
;
1722 /* Set the initial link reported state to link down */
1723 bnx2x_acquire_phy_lock(bp
);
1724 memset(&bp
->last_reported_link
, 0, sizeof(bp
->last_reported_link
));
1725 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
1726 &bp
->last_reported_link
.link_report_flags
);
1727 bnx2x_release_phy_lock(bp
);
1729 /* must be called before memory allocation and HW init */
1730 bnx2x_ilt_set_info(bp
);
1733 * Zero fastpath structures preserving invariants like napi, which are
1734 * allocated only once, fp index, max_cos, bp pointer.
1735 * Also set fp->disable_tpa.
1737 for_each_queue(bp
, i
)
1741 /* Set the receive queues buffer size */
1742 bnx2x_set_rx_buf_size(bp
);
1744 if (bnx2x_alloc_mem(bp
))
1747 /* As long as bnx2x_alloc_mem() may possibly update
1748 * bp->num_queues, bnx2x_set_real_num_queues() should always
1751 rc
= bnx2x_set_real_num_queues(bp
);
1753 BNX2X_ERR("Unable to set real_num_queues\n");
1754 LOAD_ERROR_EXIT(bp
, load_error0
);
1757 /* configure multi cos mappings in kernel.
1758 * this configuration may be overriden by a multi class queue discipline
1759 * or by a dcbx negotiation result.
1761 bnx2x_setup_tc(bp
->dev
, bp
->max_cos
);
1763 bnx2x_napi_enable(bp
);
1765 /* Send LOAD_REQUEST command to MCP
1766 * Returns the type of LOAD command:
1767 * if it is the first port to be initialized
1768 * common blocks should be initialized, otherwise - not
1770 if (!BP_NOMCP(bp
)) {
1771 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_REQ
, 0);
1773 BNX2X_ERR("MCP response failure, aborting\n");
1775 LOAD_ERROR_EXIT(bp
, load_error1
);
1777 if (load_code
== FW_MSG_CODE_DRV_LOAD_REFUSED
) {
1778 rc
= -EBUSY
; /* other port in diagnostic mode */
1779 LOAD_ERROR_EXIT(bp
, load_error1
);
1783 int path
= BP_PATH(bp
);
1785 DP(NETIF_MSG_IFUP
, "NO MCP - load counts[%d] %d, %d, %d\n",
1786 path
, load_count
[path
][0], load_count
[path
][1],
1787 load_count
[path
][2]);
1788 load_count
[path
][0]++;
1789 load_count
[path
][1 + port
]++;
1790 DP(NETIF_MSG_IFUP
, "NO MCP - new load counts[%d] %d, %d, %d\n",
1791 path
, load_count
[path
][0], load_count
[path
][1],
1792 load_count
[path
][2]);
1793 if (load_count
[path
][0] == 1)
1794 load_code
= FW_MSG_CODE_DRV_LOAD_COMMON
;
1795 else if (load_count
[path
][1 + port
] == 1)
1796 load_code
= FW_MSG_CODE_DRV_LOAD_PORT
;
1798 load_code
= FW_MSG_CODE_DRV_LOAD_FUNCTION
;
1801 if ((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
1802 (load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
) ||
1803 (load_code
== FW_MSG_CODE_DRV_LOAD_PORT
)) {
1806 * We need the barrier to ensure the ordering between the
1807 * writing to bp->port.pmf here and reading it from the
1808 * bnx2x_periodic_task().
1811 queue_delayed_work(bnx2x_wq
, &bp
->period_task
, 0);
1815 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
1817 /* Init Function state controlling object */
1818 bnx2x__init_func_obj(bp
);
1821 rc
= bnx2x_init_hw(bp
, load_code
);
1823 BNX2X_ERR("HW init failed, aborting\n");
1824 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
1825 LOAD_ERROR_EXIT(bp
, load_error2
);
1828 /* Connect to IRQs */
1829 rc
= bnx2x_setup_irqs(bp
);
1831 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
1832 LOAD_ERROR_EXIT(bp
, load_error2
);
1835 /* Setup NIC internals and enable interrupts */
1836 bnx2x_nic_init(bp
, load_code
);
1838 /* Init per-function objects */
1839 bnx2x_init_bp_objs(bp
);
1841 if (((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
1842 (load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
)) &&
1843 (bp
->common
.shmem2_base
)) {
1844 if (SHMEM2_HAS(bp
, dcc_support
))
1845 SHMEM2_WR(bp
, dcc_support
,
1846 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV
|
1847 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV
));
1850 bp
->state
= BNX2X_STATE_OPENING_WAIT4_PORT
;
1851 rc
= bnx2x_func_start(bp
);
1853 BNX2X_ERR("Function start failed!\n");
1854 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
1855 LOAD_ERROR_EXIT(bp
, load_error3
);
1858 /* Send LOAD_DONE command to MCP */
1859 if (!BP_NOMCP(bp
)) {
1860 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
1862 BNX2X_ERR("MCP response failure, aborting\n");
1864 LOAD_ERROR_EXIT(bp
, load_error3
);
1868 rc
= bnx2x_setup_leading(bp
);
1870 BNX2X_ERR("Setup leading failed!\n");
1871 LOAD_ERROR_EXIT(bp
, load_error3
);
1875 /* Enable Timer scan */
1876 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ port
*4, 1);
1879 for_each_nondefault_queue(bp
, i
) {
1880 rc
= bnx2x_setup_queue(bp
, &bp
->fp
[i
], 0);
1882 LOAD_ERROR_EXIT(bp
, load_error4
);
1885 rc
= bnx2x_init_rss_pf(bp
);
1887 LOAD_ERROR_EXIT(bp
, load_error4
);
1889 /* Now when Clients are configured we are ready to work */
1890 bp
->state
= BNX2X_STATE_OPEN
;
1892 /* Configure a ucast MAC */
1893 rc
= bnx2x_set_eth_mac(bp
, true);
1895 LOAD_ERROR_EXIT(bp
, load_error4
);
1897 if (bp
->pending_max
) {
1898 bnx2x_update_max_mf_config(bp
, bp
->pending_max
);
1899 bp
->pending_max
= 0;
1903 bnx2x_initial_phy_init(bp
, load_mode
);
1905 /* Start fast path */
1907 /* Initialize Rx filter. */
1908 netif_addr_lock_bh(bp
->dev
);
1909 bnx2x_set_rx_mode(bp
->dev
);
1910 netif_addr_unlock_bh(bp
->dev
);
1913 switch (load_mode
) {
1915 /* Tx queue should be only reenabled */
1916 netif_tx_wake_all_queues(bp
->dev
);
1920 netif_tx_start_all_queues(bp
->dev
);
1921 smp_mb__after_clear_bit();
1925 bp
->state
= BNX2X_STATE_DIAG
;
1933 bnx2x__link_status_update(bp
);
1935 /* start the timer */
1936 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
1939 bnx2x_setup_cnic_irq_info(bp
);
1940 if (bp
->state
== BNX2X_STATE_OPEN
)
1941 bnx2x_cnic_notify(bp
, CNIC_CTL_START_CMD
);
1943 bnx2x_inc_load_cnt(bp
);
1945 /* Wait for all pending SP commands to complete */
1946 if (!bnx2x_wait_sp_comp(bp
, ~0x0UL
)) {
1947 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
1948 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
1952 bnx2x_dcbx_init(bp
);
1955 #ifndef BNX2X_STOP_ON_ERROR
1958 /* Disable Timer scan */
1959 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ port
*4, 0);
1962 bnx2x_int_disable_sync(bp
, 1);
1964 /* Clean queueable objects */
1965 bnx2x_squeeze_objects(bp
);
1967 /* Free SKBs, SGEs, TPA pool and driver internals */
1968 bnx2x_free_skbs(bp
);
1969 for_each_rx_queue(bp
, i
)
1970 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
1975 if (!BP_NOMCP(bp
)) {
1976 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
, 0);
1977 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
, 0);
1982 bnx2x_napi_disable(bp
);
1987 #endif /* ! BNX2X_STOP_ON_ERROR */
1990 /* must be called with rtnl_lock */
1991 int bnx2x_nic_unload(struct bnx2x
*bp
, int unload_mode
)
1994 bool global
= false;
1996 if ((bp
->state
== BNX2X_STATE_CLOSED
) ||
1997 (bp
->state
== BNX2X_STATE_ERROR
)) {
1998 /* We can get here if the driver has been unloaded
1999 * during parity error recovery and is either waiting for a
2000 * leader to complete or for other functions to unload and
2001 * then ifdown has been issued. In this case we want to
2002 * unload and let other functions to complete a recovery
2005 bp
->recovery_state
= BNX2X_RECOVERY_DONE
;
2007 bnx2x_release_leader_lock(bp
);
2010 DP(NETIF_MSG_HW
, "Releasing a leadership...\n");
2016 * It's important to set the bp->state to the value different from
2017 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2018 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2020 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_HALT
;
2024 bnx2x_tx_disable(bp
);
2027 bnx2x_cnic_notify(bp
, CNIC_CTL_STOP_CMD
);
2030 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
2032 del_timer_sync(&bp
->timer
);
2034 /* Set ALWAYS_ALIVE bit in shmem */
2035 bp
->fw_drv_pulse_wr_seq
|= DRV_PULSE_ALWAYS_ALIVE
;
2037 bnx2x_drv_pulse(bp
);
2039 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
2041 /* Cleanup the chip if needed */
2042 if (unload_mode
!= UNLOAD_RECOVERY
)
2043 bnx2x_chip_cleanup(bp
, unload_mode
);
2045 /* Send the UNLOAD_REQUEST to the MCP */
2046 bnx2x_send_unload_req(bp
, unload_mode
);
2049 * Prevent transactions to host from the functions on the
2050 * engine that doesn't reset global blocks in case of global
2051 * attention once gloabl blocks are reset and gates are opened
2052 * (the engine which leader will perform the recovery
2055 if (!CHIP_IS_E1x(bp
))
2056 bnx2x_pf_disable(bp
);
2058 /* Disable HW interrupts, NAPI */
2059 bnx2x_netif_stop(bp
, 1);
2064 /* Report UNLOAD_DONE to MCP */
2065 bnx2x_send_unload_done(bp
);
2069 * At this stage no more interrupts will arrive so we may safly clean
2070 * the queueable objects here in case they failed to get cleaned so far.
2072 bnx2x_squeeze_objects(bp
);
2074 /* There should be no more pending SP commands at this stage */
2079 /* Free SKBs, SGEs, TPA pool and driver internals */
2080 bnx2x_free_skbs(bp
);
2081 for_each_rx_queue(bp
, i
)
2082 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
2086 bp
->state
= BNX2X_STATE_CLOSED
;
2088 /* Check if there are pending parity attentions. If there are - set
2089 * RECOVERY_IN_PROGRESS.
2091 if (bnx2x_chk_parity_attn(bp
, &global
, false)) {
2092 bnx2x_set_reset_in_progress(bp
);
2094 /* Set RESET_IS_GLOBAL if needed */
2096 bnx2x_set_reset_global(bp
);
2100 /* The last driver must disable a "close the gate" if there is no
2101 * parity attention or "process kill" pending.
2103 if (!bnx2x_dec_load_cnt(bp
) && bnx2x_reset_is_done(bp
, BP_PATH(bp
)))
2104 bnx2x_disable_close_the_gate(bp
);
2109 int bnx2x_set_power_state(struct bnx2x
*bp
, pci_power_t state
)
2113 /* If there is no power capability, silently succeed */
2115 DP(NETIF_MSG_HW
, "No power capability. Breaking.\n");
2119 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
2123 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
2124 ((pmcsr
& ~PCI_PM_CTRL_STATE_MASK
) |
2125 PCI_PM_CTRL_PME_STATUS
));
2127 if (pmcsr
& PCI_PM_CTRL_STATE_MASK
)
2128 /* delay required during transition out of D3hot */
2133 /* If there are other clients above don't
2134 shut down the power */
2135 if (atomic_read(&bp
->pdev
->enable_cnt
) != 1)
2137 /* Don't shut down the power for emulation and FPGA */
2138 if (CHIP_REV_IS_SLOW(bp
))
2141 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
2145 pmcsr
|= PCI_PM_CTRL_PME_ENABLE
;
2147 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
2150 /* No more memory access after this point until
2151 * device is brought back to D0.
2162 * net_device service functions
2164 int bnx2x_poll(struct napi_struct
*napi
, int budget
)
2168 struct bnx2x_fastpath
*fp
= container_of(napi
, struct bnx2x_fastpath
,
2170 struct bnx2x
*bp
= fp
->bp
;
2173 #ifdef BNX2X_STOP_ON_ERROR
2174 if (unlikely(bp
->panic
)) {
2175 napi_complete(napi
);
2180 for_each_cos_in_tx_queue(fp
, cos
)
2181 if (bnx2x_tx_queue_has_work(&fp
->txdata
[cos
]))
2182 bnx2x_tx_int(bp
, &fp
->txdata
[cos
]);
2185 if (bnx2x_has_rx_work(fp
)) {
2186 work_done
+= bnx2x_rx_int(fp
, budget
- work_done
);
2188 /* must not complete if we consumed full budget */
2189 if (work_done
>= budget
)
2193 /* Fall out from the NAPI loop if needed */
2194 if (!(bnx2x_has_rx_work(fp
) || bnx2x_has_tx_work(fp
))) {
2196 /* No need to update SB for FCoE L2 ring as long as
2197 * it's connected to the default SB and the SB
2198 * has been updated when NAPI was scheduled.
2200 if (IS_FCOE_FP(fp
)) {
2201 napi_complete(napi
);
2206 bnx2x_update_fpsb_idx(fp
);
2207 /* bnx2x_has_rx_work() reads the status block,
2208 * thus we need to ensure that status block indices
2209 * have been actually read (bnx2x_update_fpsb_idx)
2210 * prior to this check (bnx2x_has_rx_work) so that
2211 * we won't write the "newer" value of the status block
2212 * to IGU (if there was a DMA right after
2213 * bnx2x_has_rx_work and if there is no rmb, the memory
2214 * reading (bnx2x_update_fpsb_idx) may be postponed
2215 * to right before bnx2x_ack_sb). In this case there
2216 * will never be another interrupt until there is
2217 * another update of the status block, while there
2218 * is still unhandled work.
2222 if (!(bnx2x_has_rx_work(fp
) || bnx2x_has_tx_work(fp
))) {
2223 napi_complete(napi
);
2224 /* Re-enable interrupts */
2226 "Update index to %d\n", fp
->fp_hc_idx
);
2227 bnx2x_ack_sb(bp
, fp
->igu_sb_id
, USTORM_ID
,
2228 le16_to_cpu(fp
->fp_hc_idx
),
2238 /* we split the first BD into headers and data BDs
2239 * to ease the pain of our fellow microcode engineers
2240 * we use one mapping for both BDs
2241 * So far this has only been observed to happen
2242 * in Other Operating Systems(TM)
2244 static noinline u16
bnx2x_tx_split(struct bnx2x
*bp
,
2245 struct bnx2x_fp_txdata
*txdata
,
2246 struct sw_tx_bd
*tx_buf
,
2247 struct eth_tx_start_bd
**tx_bd
, u16 hlen
,
2248 u16 bd_prod
, int nbd
)
2250 struct eth_tx_start_bd
*h_tx_bd
= *tx_bd
;
2251 struct eth_tx_bd
*d_tx_bd
;
2253 int old_len
= le16_to_cpu(h_tx_bd
->nbytes
);
2255 /* first fix first BD */
2256 h_tx_bd
->nbd
= cpu_to_le16(nbd
);
2257 h_tx_bd
->nbytes
= cpu_to_le16(hlen
);
2259 DP(NETIF_MSG_TX_QUEUED
, "TSO split header size is %d "
2260 "(%x:%x) nbd %d\n", h_tx_bd
->nbytes
, h_tx_bd
->addr_hi
,
2261 h_tx_bd
->addr_lo
, h_tx_bd
->nbd
);
2263 /* now get a new data BD
2264 * (after the pbd) and fill it */
2265 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
2266 d_tx_bd
= &txdata
->tx_desc_ring
[bd_prod
].reg_bd
;
2268 mapping
= HILO_U64(le32_to_cpu(h_tx_bd
->addr_hi
),
2269 le32_to_cpu(h_tx_bd
->addr_lo
)) + hlen
;
2271 d_tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
2272 d_tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
2273 d_tx_bd
->nbytes
= cpu_to_le16(old_len
- hlen
);
2275 /* this marks the BD as one that has no individual mapping */
2276 tx_buf
->flags
|= BNX2X_TSO_SPLIT_BD
;
2278 DP(NETIF_MSG_TX_QUEUED
,
2279 "TSO split data size is %d (%x:%x)\n",
2280 d_tx_bd
->nbytes
, d_tx_bd
->addr_hi
, d_tx_bd
->addr_lo
);
2283 *tx_bd
= (struct eth_tx_start_bd
*)d_tx_bd
;
2288 static inline u16
bnx2x_csum_fix(unsigned char *t_header
, u16 csum
, s8 fix
)
2291 csum
= (u16
) ~csum_fold(csum_sub(csum
,
2292 csum_partial(t_header
- fix
, fix
, 0)));
2295 csum
= (u16
) ~csum_fold(csum_add(csum
,
2296 csum_partial(t_header
, -fix
, 0)));
2298 return swab16(csum
);
2301 static inline u32
bnx2x_xmit_type(struct bnx2x
*bp
, struct sk_buff
*skb
)
2305 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
2309 if (vlan_get_protocol(skb
) == htons(ETH_P_IPV6
)) {
2311 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
2312 rc
|= XMIT_CSUM_TCP
;
2316 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
2317 rc
|= XMIT_CSUM_TCP
;
2321 if (skb_is_gso_v6(skb
))
2322 rc
|= XMIT_GSO_V6
| XMIT_CSUM_TCP
| XMIT_CSUM_V6
;
2323 else if (skb_is_gso(skb
))
2324 rc
|= XMIT_GSO_V4
| XMIT_CSUM_V4
| XMIT_CSUM_TCP
;
2329 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2330 /* check if packet requires linearization (packet is too fragmented)
2331 no need to check fragmentation if page size > 8K (there will be no
2332 violation to FW restrictions) */
2333 static int bnx2x_pkt_req_lin(struct bnx2x
*bp
, struct sk_buff
*skb
,
2338 int first_bd_sz
= 0;
2340 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2341 if (skb_shinfo(skb
)->nr_frags
>= (MAX_FETCH_BD
- 3)) {
2343 if (xmit_type
& XMIT_GSO
) {
2344 unsigned short lso_mss
= skb_shinfo(skb
)->gso_size
;
2345 /* Check if LSO packet needs to be copied:
2346 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2347 int wnd_size
= MAX_FETCH_BD
- 3;
2348 /* Number of windows to check */
2349 int num_wnds
= skb_shinfo(skb
)->nr_frags
- wnd_size
;
2354 /* Headers length */
2355 hlen
= (int)(skb_transport_header(skb
) - skb
->data
) +
2358 /* Amount of data (w/o headers) on linear part of SKB*/
2359 first_bd_sz
= skb_headlen(skb
) - hlen
;
2361 wnd_sum
= first_bd_sz
;
2363 /* Calculate the first sum - it's special */
2364 for (frag_idx
= 0; frag_idx
< wnd_size
- 1; frag_idx
++)
2366 skb_frag_size(&skb_shinfo(skb
)->frags
[frag_idx
]);
2368 /* If there was data on linear skb data - check it */
2369 if (first_bd_sz
> 0) {
2370 if (unlikely(wnd_sum
< lso_mss
)) {
2375 wnd_sum
-= first_bd_sz
;
2378 /* Others are easier: run through the frag list and
2379 check all windows */
2380 for (wnd_idx
= 0; wnd_idx
<= num_wnds
; wnd_idx
++) {
2382 skb_frag_size(&skb_shinfo(skb
)->frags
[wnd_idx
+ wnd_size
- 1]);
2384 if (unlikely(wnd_sum
< lso_mss
)) {
2389 skb_frag_size(&skb_shinfo(skb
)->frags
[wnd_idx
]);
2392 /* in non-LSO too fragmented packet should always
2399 if (unlikely(to_copy
))
2400 DP(NETIF_MSG_TX_QUEUED
,
2401 "Linearization IS REQUIRED for %s packet. "
2402 "num_frags %d hlen %d first_bd_sz %d\n",
2403 (xmit_type
& XMIT_GSO
) ? "LSO" : "non-LSO",
2404 skb_shinfo(skb
)->nr_frags
, hlen
, first_bd_sz
);
2410 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff
*skb
, u32
*parsing_data
,
2413 *parsing_data
|= (skb_shinfo(skb
)->gso_size
<<
2414 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT
) &
2415 ETH_TX_PARSE_BD_E2_LSO_MSS
;
2416 if ((xmit_type
& XMIT_GSO_V6
) &&
2417 (ipv6_hdr(skb
)->nexthdr
== NEXTHDR_IPV6
))
2418 *parsing_data
|= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR
;
2422 * bnx2x_set_pbd_gso - update PBD in GSO case.
2426 * @xmit_type: xmit flags
2428 static inline void bnx2x_set_pbd_gso(struct sk_buff
*skb
,
2429 struct eth_tx_parse_bd_e1x
*pbd
,
2432 pbd
->lso_mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
2433 pbd
->tcp_send_seq
= swab32(tcp_hdr(skb
)->seq
);
2434 pbd
->tcp_flags
= pbd_tcp_flags(skb
);
2436 if (xmit_type
& XMIT_GSO_V4
) {
2437 pbd
->ip_id
= swab16(ip_hdr(skb
)->id
);
2438 pbd
->tcp_pseudo_csum
=
2439 swab16(~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
2441 0, IPPROTO_TCP
, 0));
2444 pbd
->tcp_pseudo_csum
=
2445 swab16(~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
2446 &ipv6_hdr(skb
)->daddr
,
2447 0, IPPROTO_TCP
, 0));
2449 pbd
->global_data
|= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN
;
2453 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2455 * @bp: driver handle
2457 * @parsing_data: data to be updated
2458 * @xmit_type: xmit flags
2462 static inline u8
bnx2x_set_pbd_csum_e2(struct bnx2x
*bp
, struct sk_buff
*skb
,
2463 u32
*parsing_data
, u32 xmit_type
)
2466 ((((u8
*)skb_transport_header(skb
) - skb
->data
) >> 1) <<
2467 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT
) &
2468 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W
;
2470 if (xmit_type
& XMIT_CSUM_TCP
) {
2471 *parsing_data
|= ((tcp_hdrlen(skb
) / 4) <<
2472 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT
) &
2473 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW
;
2475 return skb_transport_header(skb
) + tcp_hdrlen(skb
) - skb
->data
;
2477 /* We support checksum offload for TCP and UDP only.
2478 * No need to pass the UDP header length - it's a constant.
2480 return skb_transport_header(skb
) +
2481 sizeof(struct udphdr
) - skb
->data
;
2484 static inline void bnx2x_set_sbd_csum(struct bnx2x
*bp
, struct sk_buff
*skb
,
2485 struct eth_tx_start_bd
*tx_start_bd
, u32 xmit_type
)
2487 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_L4_CSUM
;
2489 if (xmit_type
& XMIT_CSUM_V4
)
2490 tx_start_bd
->bd_flags
.as_bitfield
|=
2491 ETH_TX_BD_FLAGS_IP_CSUM
;
2493 tx_start_bd
->bd_flags
.as_bitfield
|=
2494 ETH_TX_BD_FLAGS_IPV6
;
2496 if (!(xmit_type
& XMIT_CSUM_TCP
))
2497 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_IS_UDP
;
2501 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
2503 * @bp: driver handle
2505 * @pbd: parse BD to be updated
2506 * @xmit_type: xmit flags
2508 static inline u8
bnx2x_set_pbd_csum(struct bnx2x
*bp
, struct sk_buff
*skb
,
2509 struct eth_tx_parse_bd_e1x
*pbd
,
2512 u8 hlen
= (skb_network_header(skb
) - skb
->data
) >> 1;
2514 /* for now NS flag is not used in Linux */
2516 (hlen
| ((skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) <<
2517 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT
));
2519 pbd
->ip_hlen_w
= (skb_transport_header(skb
) -
2520 skb_network_header(skb
)) >> 1;
2522 hlen
+= pbd
->ip_hlen_w
;
2524 /* We support checksum offload for TCP and UDP only */
2525 if (xmit_type
& XMIT_CSUM_TCP
)
2526 hlen
+= tcp_hdrlen(skb
) / 2;
2528 hlen
+= sizeof(struct udphdr
) / 2;
2530 pbd
->total_hlen_w
= cpu_to_le16(hlen
);
2533 if (xmit_type
& XMIT_CSUM_TCP
) {
2534 pbd
->tcp_pseudo_csum
= swab16(tcp_hdr(skb
)->check
);
2537 s8 fix
= SKB_CS_OFF(skb
); /* signed! */
2539 DP(NETIF_MSG_TX_QUEUED
,
2540 "hlen %d fix %d csum before fix %x\n",
2541 le16_to_cpu(pbd
->total_hlen_w
), fix
, SKB_CS(skb
));
2543 /* HW bug: fixup the CSUM */
2544 pbd
->tcp_pseudo_csum
=
2545 bnx2x_csum_fix(skb_transport_header(skb
),
2548 DP(NETIF_MSG_TX_QUEUED
, "csum after fix %x\n",
2549 pbd
->tcp_pseudo_csum
);
2555 /* called with netif_tx_lock
2556 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2557 * netif_wake_queue()
2559 netdev_tx_t
bnx2x_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
2561 struct bnx2x
*bp
= netdev_priv(dev
);
2563 struct bnx2x_fastpath
*fp
;
2564 struct netdev_queue
*txq
;
2565 struct bnx2x_fp_txdata
*txdata
;
2566 struct sw_tx_bd
*tx_buf
;
2567 struct eth_tx_start_bd
*tx_start_bd
, *first_bd
;
2568 struct eth_tx_bd
*tx_data_bd
, *total_pkt_bd
= NULL
;
2569 struct eth_tx_parse_bd_e1x
*pbd_e1x
= NULL
;
2570 struct eth_tx_parse_bd_e2
*pbd_e2
= NULL
;
2571 u32 pbd_e2_parsing_data
= 0;
2572 u16 pkt_prod
, bd_prod
;
2573 int nbd
, txq_index
, fp_index
, txdata_index
;
2575 u32 xmit_type
= bnx2x_xmit_type(bp
, skb
);
2578 __le16 pkt_size
= 0;
2580 u8 mac_type
= UNICAST_ADDRESS
;
2582 #ifdef BNX2X_STOP_ON_ERROR
2583 if (unlikely(bp
->panic
))
2584 return NETDEV_TX_BUSY
;
2587 txq_index
= skb_get_queue_mapping(skb
);
2588 txq
= netdev_get_tx_queue(dev
, txq_index
);
2590 BUG_ON(txq_index
>= MAX_ETH_TXQ_IDX(bp
) + FCOE_PRESENT
);
2592 /* decode the fastpath index and the cos index from the txq */
2593 fp_index
= TXQ_TO_FP(txq_index
);
2594 txdata_index
= TXQ_TO_COS(txq_index
);
2598 * Override the above for the FCoE queue:
2599 * - FCoE fp entry is right after the ETH entries.
2600 * - FCoE L2 queue uses bp->txdata[0] only.
2602 if (unlikely(!NO_FCOE(bp
) && (txq_index
==
2603 bnx2x_fcoe_tx(bp
, txq_index
)))) {
2604 fp_index
= FCOE_IDX
;
2609 /* enable this debug print to view the transmission queue being used
2610 DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d\n",
2611 txq_index, fp_index, txdata_index); */
2613 /* locate the fastpath and the txdata */
2614 fp
= &bp
->fp
[fp_index
];
2615 txdata
= &fp
->txdata
[txdata_index
];
2617 /* enable this debug print to view the tranmission details
2618 DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d"
2619 " tx_data ptr %p fp pointer %p\n",
2620 txdata->cid, fp_index, txdata_index, txdata, fp); */
2622 if (unlikely(bnx2x_tx_avail(bp
, txdata
) <
2623 (skb_shinfo(skb
)->nr_frags
+ 3))) {
2624 fp
->eth_q_stats
.driver_xoff
++;
2625 netif_tx_stop_queue(txq
);
2626 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2627 return NETDEV_TX_BUSY
;
2630 DP(NETIF_MSG_TX_QUEUED
, "queue[%d]: SKB: summed %x protocol %x "
2631 "protocol(%x,%x) gso type %x xmit_type %x\n",
2632 txq_index
, skb
->ip_summed
, skb
->protocol
, ipv6_hdr(skb
)->nexthdr
,
2633 ip_hdr(skb
)->protocol
, skb_shinfo(skb
)->gso_type
, xmit_type
);
2635 eth
= (struct ethhdr
*)skb
->data
;
2637 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2638 if (unlikely(is_multicast_ether_addr(eth
->h_dest
))) {
2639 if (is_broadcast_ether_addr(eth
->h_dest
))
2640 mac_type
= BROADCAST_ADDRESS
;
2642 mac_type
= MULTICAST_ADDRESS
;
2645 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2646 /* First, check if we need to linearize the skb (due to FW
2647 restrictions). No need to check fragmentation if page size > 8K
2648 (there will be no violation to FW restrictions) */
2649 if (bnx2x_pkt_req_lin(bp
, skb
, xmit_type
)) {
2650 /* Statistics of linearization */
2652 if (skb_linearize(skb
) != 0) {
2653 DP(NETIF_MSG_TX_QUEUED
, "SKB linearization failed - "
2654 "silently dropping this SKB\n");
2655 dev_kfree_skb_any(skb
);
2656 return NETDEV_TX_OK
;
2660 /* Map skb linear data for DMA */
2661 mapping
= dma_map_single(&bp
->pdev
->dev
, skb
->data
,
2662 skb_headlen(skb
), DMA_TO_DEVICE
);
2663 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
2664 DP(NETIF_MSG_TX_QUEUED
, "SKB mapping failed - "
2665 "silently dropping this SKB\n");
2666 dev_kfree_skb_any(skb
);
2667 return NETDEV_TX_OK
;
2670 Please read carefully. First we use one BD which we mark as start,
2671 then we have a parsing info BD (used for TSO or xsum),
2672 and only then we have the rest of the TSO BDs.
2673 (don't forget to mark the last one as last,
2674 and to unmap only AFTER you write to the BD ...)
2675 And above all, all pdb sizes are in words - NOT DWORDS!
2678 /* get current pkt produced now - advance it just before sending packet
2679 * since mapping of pages may fail and cause packet to be dropped
2681 pkt_prod
= txdata
->tx_pkt_prod
;
2682 bd_prod
= TX_BD(txdata
->tx_bd_prod
);
2684 /* get a tx_buf and first BD
2685 * tx_start_bd may be changed during SPLIT,
2686 * but first_bd will always stay first
2688 tx_buf
= &txdata
->tx_buf_ring
[TX_BD(pkt_prod
)];
2689 tx_start_bd
= &txdata
->tx_desc_ring
[bd_prod
].start_bd
;
2690 first_bd
= tx_start_bd
;
2692 tx_start_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
2693 SET_FLAG(tx_start_bd
->general_data
, ETH_TX_START_BD_ETH_ADDR_TYPE
,
2697 SET_FLAG(tx_start_bd
->general_data
, ETH_TX_START_BD_HDR_NBDS
, 1);
2699 /* remember the first BD of the packet */
2700 tx_buf
->first_bd
= txdata
->tx_bd_prod
;
2704 DP(NETIF_MSG_TX_QUEUED
,
2705 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2706 pkt_prod
, tx_buf
, txdata
->tx_pkt_prod
, bd_prod
, tx_start_bd
);
2708 if (vlan_tx_tag_present(skb
)) {
2709 tx_start_bd
->vlan_or_ethertype
=
2710 cpu_to_le16(vlan_tx_tag_get(skb
));
2711 tx_start_bd
->bd_flags
.as_bitfield
|=
2712 (X_ETH_OUTBAND_VLAN
<< ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT
);
2714 tx_start_bd
->vlan_or_ethertype
= cpu_to_le16(pkt_prod
);
2716 /* turn on parsing and get a BD */
2717 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
2719 if (xmit_type
& XMIT_CSUM
)
2720 bnx2x_set_sbd_csum(bp
, skb
, tx_start_bd
, xmit_type
);
2722 if (!CHIP_IS_E1x(bp
)) {
2723 pbd_e2
= &txdata
->tx_desc_ring
[bd_prod
].parse_bd_e2
;
2724 memset(pbd_e2
, 0, sizeof(struct eth_tx_parse_bd_e2
));
2725 /* Set PBD in checksum offload case */
2726 if (xmit_type
& XMIT_CSUM
)
2727 hlen
= bnx2x_set_pbd_csum_e2(bp
, skb
,
2728 &pbd_e2_parsing_data
,
2732 * fill in the MAC addresses in the PBD - for local
2735 bnx2x_set_fw_mac_addr(&pbd_e2
->src_mac_addr_hi
,
2736 &pbd_e2
->src_mac_addr_mid
,
2737 &pbd_e2
->src_mac_addr_lo
,
2739 bnx2x_set_fw_mac_addr(&pbd_e2
->dst_mac_addr_hi
,
2740 &pbd_e2
->dst_mac_addr_mid
,
2741 &pbd_e2
->dst_mac_addr_lo
,
2745 pbd_e1x
= &txdata
->tx_desc_ring
[bd_prod
].parse_bd_e1x
;
2746 memset(pbd_e1x
, 0, sizeof(struct eth_tx_parse_bd_e1x
));
2747 /* Set PBD in checksum offload case */
2748 if (xmit_type
& XMIT_CSUM
)
2749 hlen
= bnx2x_set_pbd_csum(bp
, skb
, pbd_e1x
, xmit_type
);
2753 /* Setup the data pointer of the first BD of the packet */
2754 tx_start_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
2755 tx_start_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
2756 nbd
= 2; /* start_bd + pbd + frags (updated when pages are mapped) */
2757 tx_start_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
2758 pkt_size
= tx_start_bd
->nbytes
;
2760 DP(NETIF_MSG_TX_QUEUED
, "first bd @%p addr (%x:%x) nbd %d"
2761 " nbytes %d flags %x vlan %x\n",
2762 tx_start_bd
, tx_start_bd
->addr_hi
, tx_start_bd
->addr_lo
,
2763 le16_to_cpu(tx_start_bd
->nbd
), le16_to_cpu(tx_start_bd
->nbytes
),
2764 tx_start_bd
->bd_flags
.as_bitfield
,
2765 le16_to_cpu(tx_start_bd
->vlan_or_ethertype
));
2767 if (xmit_type
& XMIT_GSO
) {
2769 DP(NETIF_MSG_TX_QUEUED
,
2770 "TSO packet len %d hlen %d total len %d tso size %d\n",
2771 skb
->len
, hlen
, skb_headlen(skb
),
2772 skb_shinfo(skb
)->gso_size
);
2774 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_SW_LSO
;
2776 if (unlikely(skb_headlen(skb
) > hlen
))
2777 bd_prod
= bnx2x_tx_split(bp
, txdata
, tx_buf
,
2780 if (!CHIP_IS_E1x(bp
))
2781 bnx2x_set_pbd_gso_e2(skb
, &pbd_e2_parsing_data
,
2784 bnx2x_set_pbd_gso(skb
, pbd_e1x
, xmit_type
);
2787 /* Set the PBD's parsing_data field if not zero
2788 * (for the chips newer than 57711).
2790 if (pbd_e2_parsing_data
)
2791 pbd_e2
->parsing_data
= cpu_to_le32(pbd_e2_parsing_data
);
2793 tx_data_bd
= (struct eth_tx_bd
*)tx_start_bd
;
2795 /* Handle fragmented skb */
2796 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2797 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2799 mapping
= skb_frag_dma_map(&bp
->pdev
->dev
, frag
, 0,
2800 skb_frag_size(frag
), DMA_TO_DEVICE
);
2801 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
2803 DP(NETIF_MSG_TX_QUEUED
, "Unable to map page - "
2804 "dropping packet...\n");
2806 /* we need unmap all buffers already mapped
2808 * first_bd->nbd need to be properly updated
2809 * before call to bnx2x_free_tx_pkt
2811 first_bd
->nbd
= cpu_to_le16(nbd
);
2812 bnx2x_free_tx_pkt(bp
, txdata
,
2813 TX_BD(txdata
->tx_pkt_prod
));
2814 return NETDEV_TX_OK
;
2817 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
2818 tx_data_bd
= &txdata
->tx_desc_ring
[bd_prod
].reg_bd
;
2819 if (total_pkt_bd
== NULL
)
2820 total_pkt_bd
= &txdata
->tx_desc_ring
[bd_prod
].reg_bd
;
2822 tx_data_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
2823 tx_data_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
2824 tx_data_bd
->nbytes
= cpu_to_le16(skb_frag_size(frag
));
2825 le16_add_cpu(&pkt_size
, skb_frag_size(frag
));
2828 DP(NETIF_MSG_TX_QUEUED
,
2829 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2830 i
, tx_data_bd
, tx_data_bd
->addr_hi
, tx_data_bd
->addr_lo
,
2831 le16_to_cpu(tx_data_bd
->nbytes
));
2834 DP(NETIF_MSG_TX_QUEUED
, "last bd @%p\n", tx_data_bd
);
2836 /* update with actual num BDs */
2837 first_bd
->nbd
= cpu_to_le16(nbd
);
2839 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
2841 /* now send a tx doorbell, counting the next BD
2842 * if the packet contains or ends with it
2844 if (TX_BD_POFF(bd_prod
) < nbd
)
2847 /* total_pkt_bytes should be set on the first data BD if
2848 * it's not an LSO packet and there is more than one
2849 * data BD. In this case pkt_size is limited by an MTU value.
2850 * However we prefer to set it for an LSO packet (while we don't
2851 * have to) in order to save some CPU cycles in a none-LSO
2852 * case, when we much more care about them.
2854 if (total_pkt_bd
!= NULL
)
2855 total_pkt_bd
->total_pkt_bytes
= pkt_size
;
2858 DP(NETIF_MSG_TX_QUEUED
,
2859 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2860 " tcp_flags %x xsum %x seq %u hlen %u\n",
2861 pbd_e1x
, pbd_e1x
->global_data
, pbd_e1x
->ip_hlen_w
,
2862 pbd_e1x
->ip_id
, pbd_e1x
->lso_mss
, pbd_e1x
->tcp_flags
,
2863 pbd_e1x
->tcp_pseudo_csum
, pbd_e1x
->tcp_send_seq
,
2864 le16_to_cpu(pbd_e1x
->total_hlen_w
));
2866 DP(NETIF_MSG_TX_QUEUED
,
2867 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2868 pbd_e2
, pbd_e2
->dst_mac_addr_hi
, pbd_e2
->dst_mac_addr_mid
,
2869 pbd_e2
->dst_mac_addr_lo
, pbd_e2
->src_mac_addr_hi
,
2870 pbd_e2
->src_mac_addr_mid
, pbd_e2
->src_mac_addr_lo
,
2871 pbd_e2
->parsing_data
);
2872 DP(NETIF_MSG_TX_QUEUED
, "doorbell: nbd %d bd %u\n", nbd
, bd_prod
);
2874 txdata
->tx_pkt_prod
++;
2876 * Make sure that the BD data is updated before updating the producer
2877 * since FW might read the BD right after the producer is updated.
2878 * This is only applicable for weak-ordered memory model archs such
2879 * as IA-64. The following barrier is also mandatory since FW will
2880 * assumes packets must have BDs.
2884 txdata
->tx_db
.data
.prod
+= nbd
;
2887 DOORBELL(bp
, txdata
->cid
, txdata
->tx_db
.raw
);
2891 txdata
->tx_bd_prod
+= nbd
;
2893 if (unlikely(bnx2x_tx_avail(bp
, txdata
) < MAX_SKB_FRAGS
+ 3)) {
2894 netif_tx_stop_queue(txq
);
2896 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2897 * ordering of set_bit() in netif_tx_stop_queue() and read of
2901 fp
->eth_q_stats
.driver_xoff
++;
2902 if (bnx2x_tx_avail(bp
, txdata
) >= MAX_SKB_FRAGS
+ 3)
2903 netif_tx_wake_queue(txq
);
2907 return NETDEV_TX_OK
;
2911 * bnx2x_setup_tc - routine to configure net_device for multi tc
2913 * @netdev: net device to configure
2914 * @tc: number of traffic classes to enable
2916 * callback connected to the ndo_setup_tc function pointer
2918 int bnx2x_setup_tc(struct net_device
*dev
, u8 num_tc
)
2920 int cos
, prio
, count
, offset
;
2921 struct bnx2x
*bp
= netdev_priv(dev
);
2923 /* setup tc must be called under rtnl lock */
2926 /* no traffic classes requested. aborting */
2928 netdev_reset_tc(dev
);
2932 /* requested to support too many traffic classes */
2933 if (num_tc
> bp
->max_cos
) {
2934 DP(NETIF_MSG_TX_ERR
, "support for too many traffic classes"
2935 " requested: %d. max supported is %d\n",
2936 num_tc
, bp
->max_cos
);
2940 /* declare amount of supported traffic classes */
2941 if (netdev_set_num_tc(dev
, num_tc
)) {
2942 DP(NETIF_MSG_TX_ERR
, "failed to declare %d traffic classes\n",
2947 /* configure priority to traffic class mapping */
2948 for (prio
= 0; prio
< BNX2X_MAX_PRIORITY
; prio
++) {
2949 netdev_set_prio_tc_map(dev
, prio
, bp
->prio_to_cos
[prio
]);
2950 DP(BNX2X_MSG_SP
, "mapping priority %d to tc %d\n",
2951 prio
, bp
->prio_to_cos
[prio
]);
2955 /* Use this configuration to diffrentiate tc0 from other COSes
2956 This can be used for ets or pfc, and save the effort of setting
2957 up a multio class queue disc or negotiating DCBX with a switch
2958 netdev_set_prio_tc_map(dev, 0, 0);
2959 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
2960 for (prio = 1; prio < 16; prio++) {
2961 netdev_set_prio_tc_map(dev, prio, 1);
2962 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
2965 /* configure traffic class to transmission queue mapping */
2966 for (cos
= 0; cos
< bp
->max_cos
; cos
++) {
2967 count
= BNX2X_NUM_ETH_QUEUES(bp
);
2968 offset
= cos
* MAX_TXQS_PER_COS
;
2969 netdev_set_tc_queue(dev
, cos
, count
, offset
);
2970 DP(BNX2X_MSG_SP
, "mapping tc %d to offset %d count %d\n",
2971 cos
, offset
, count
);
2977 /* called with rtnl_lock */
2978 int bnx2x_change_mac_addr(struct net_device
*dev
, void *p
)
2980 struct sockaddr
*addr
= p
;
2981 struct bnx2x
*bp
= netdev_priv(dev
);
2984 if (!is_valid_ether_addr((u8
*)(addr
->sa_data
)))
2987 if (netif_running(dev
)) {
2988 rc
= bnx2x_set_eth_mac(bp
, false);
2993 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
2995 if (netif_running(dev
))
2996 rc
= bnx2x_set_eth_mac(bp
, true);
3001 static void bnx2x_free_fp_mem_at(struct bnx2x
*bp
, int fp_index
)
3003 union host_hc_status_block
*sb
= &bnx2x_fp(bp
, fp_index
, status_blk
);
3004 struct bnx2x_fastpath
*fp
= &bp
->fp
[fp_index
];
3009 if (IS_FCOE_IDX(fp_index
)) {
3010 memset(sb
, 0, sizeof(union host_hc_status_block
));
3011 fp
->status_blk_mapping
= 0;
3016 if (!CHIP_IS_E1x(bp
))
3017 BNX2X_PCI_FREE(sb
->e2_sb
,
3018 bnx2x_fp(bp
, fp_index
,
3019 status_blk_mapping
),
3020 sizeof(struct host_hc_status_block_e2
));
3022 BNX2X_PCI_FREE(sb
->e1x_sb
,
3023 bnx2x_fp(bp
, fp_index
,
3024 status_blk_mapping
),
3025 sizeof(struct host_hc_status_block_e1x
));
3030 if (!skip_rx_queue(bp
, fp_index
)) {
3031 bnx2x_free_rx_bds(fp
);
3033 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3034 BNX2X_FREE(bnx2x_fp(bp
, fp_index
, rx_buf_ring
));
3035 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_desc_ring
),
3036 bnx2x_fp(bp
, fp_index
, rx_desc_mapping
),
3037 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
3039 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_comp_ring
),
3040 bnx2x_fp(bp
, fp_index
, rx_comp_mapping
),
3041 sizeof(struct eth_fast_path_rx_cqe
) *
3045 BNX2X_FREE(bnx2x_fp(bp
, fp_index
, rx_page_ring
));
3046 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_sge_ring
),
3047 bnx2x_fp(bp
, fp_index
, rx_sge_mapping
),
3048 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
3052 if (!skip_tx_queue(bp
, fp_index
)) {
3053 /* fastpath tx rings: tx_buf tx_desc */
3054 for_each_cos_in_tx_queue(fp
, cos
) {
3055 struct bnx2x_fp_txdata
*txdata
= &fp
->txdata
[cos
];
3058 "freeing tx memory of fp %d cos %d cid %d\n",
3059 fp_index
, cos
, txdata
->cid
);
3061 BNX2X_FREE(txdata
->tx_buf_ring
);
3062 BNX2X_PCI_FREE(txdata
->tx_desc_ring
,
3063 txdata
->tx_desc_mapping
,
3064 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
3067 /* end of fastpath */
3070 void bnx2x_free_fp_mem(struct bnx2x
*bp
)
3073 for_each_queue(bp
, i
)
3074 bnx2x_free_fp_mem_at(bp
, i
);
3077 static inline void set_sb_shortcuts(struct bnx2x
*bp
, int index
)
3079 union host_hc_status_block status_blk
= bnx2x_fp(bp
, index
, status_blk
);
3080 if (!CHIP_IS_E1x(bp
)) {
3081 bnx2x_fp(bp
, index
, sb_index_values
) =
3082 (__le16
*)status_blk
.e2_sb
->sb
.index_values
;
3083 bnx2x_fp(bp
, index
, sb_running_index
) =
3084 (__le16
*)status_blk
.e2_sb
->sb
.running_index
;
3086 bnx2x_fp(bp
, index
, sb_index_values
) =
3087 (__le16
*)status_blk
.e1x_sb
->sb
.index_values
;
3088 bnx2x_fp(bp
, index
, sb_running_index
) =
3089 (__le16
*)status_blk
.e1x_sb
->sb
.running_index
;
3093 static int bnx2x_alloc_fp_mem_at(struct bnx2x
*bp
, int index
)
3095 union host_hc_status_block
*sb
;
3096 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
3099 int rx_ring_size
= 0;
3101 /* if rx_ring_size specified - use it */
3102 if (!bp
->rx_ring_size
) {
3104 rx_ring_size
= MAX_RX_AVAIL
/BNX2X_NUM_RX_QUEUES(bp
);
3106 /* allocate at least number of buffers required by FW */
3107 rx_ring_size
= max_t(int, bp
->disable_tpa
? MIN_RX_SIZE_NONTPA
:
3108 MIN_RX_SIZE_TPA
, rx_ring_size
);
3110 bp
->rx_ring_size
= rx_ring_size
;
3112 rx_ring_size
= bp
->rx_ring_size
;
3115 sb
= &bnx2x_fp(bp
, index
, status_blk
);
3117 if (!IS_FCOE_IDX(index
)) {
3120 if (!CHIP_IS_E1x(bp
))
3121 BNX2X_PCI_ALLOC(sb
->e2_sb
,
3122 &bnx2x_fp(bp
, index
, status_blk_mapping
),
3123 sizeof(struct host_hc_status_block_e2
));
3125 BNX2X_PCI_ALLOC(sb
->e1x_sb
,
3126 &bnx2x_fp(bp
, index
, status_blk_mapping
),
3127 sizeof(struct host_hc_status_block_e1x
));
3132 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3133 * set shortcuts for it.
3135 if (!IS_FCOE_IDX(index
))
3136 set_sb_shortcuts(bp
, index
);
3139 if (!skip_tx_queue(bp
, index
)) {
3140 /* fastpath tx rings: tx_buf tx_desc */
3141 for_each_cos_in_tx_queue(fp
, cos
) {
3142 struct bnx2x_fp_txdata
*txdata
= &fp
->txdata
[cos
];
3144 DP(BNX2X_MSG_SP
, "allocating tx memory of "
3148 BNX2X_ALLOC(txdata
->tx_buf_ring
,
3149 sizeof(struct sw_tx_bd
) * NUM_TX_BD
);
3150 BNX2X_PCI_ALLOC(txdata
->tx_desc_ring
,
3151 &txdata
->tx_desc_mapping
,
3152 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
3157 if (!skip_rx_queue(bp
, index
)) {
3158 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3159 BNX2X_ALLOC(bnx2x_fp(bp
, index
, rx_buf_ring
),
3160 sizeof(struct sw_rx_bd
) * NUM_RX_BD
);
3161 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, index
, rx_desc_ring
),
3162 &bnx2x_fp(bp
, index
, rx_desc_mapping
),
3163 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
3165 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, index
, rx_comp_ring
),
3166 &bnx2x_fp(bp
, index
, rx_comp_mapping
),
3167 sizeof(struct eth_fast_path_rx_cqe
) *
3171 BNX2X_ALLOC(bnx2x_fp(bp
, index
, rx_page_ring
),
3172 sizeof(struct sw_rx_page
) * NUM_RX_SGE
);
3173 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, index
, rx_sge_ring
),
3174 &bnx2x_fp(bp
, index
, rx_sge_mapping
),
3175 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
3177 bnx2x_set_next_page_rx_bd(fp
);
3180 bnx2x_set_next_page_rx_cq(fp
);
3183 ring_size
= bnx2x_alloc_rx_bds(fp
, rx_ring_size
);
3184 if (ring_size
< rx_ring_size
)
3190 /* handles low memory cases */
3192 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3194 /* FW will drop all packets if queue is not big enough,
3195 * In these cases we disable the queue
3196 * Min size is different for OOO, TPA and non-TPA queues
3198 if (ring_size
< (fp
->disable_tpa
?
3199 MIN_RX_SIZE_NONTPA
: MIN_RX_SIZE_TPA
)) {
3200 /* release memory allocated for this queue */
3201 bnx2x_free_fp_mem_at(bp
, index
);
3207 int bnx2x_alloc_fp_mem(struct bnx2x
*bp
)
3212 * 1. Allocate FP for leading - fatal if error
3213 * 2. {CNIC} Allocate FCoE FP - fatal if error
3214 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3215 * 4. Allocate RSS - fix number of queues if error
3219 if (bnx2x_alloc_fp_mem_at(bp
, 0))
3225 if (bnx2x_alloc_fp_mem_at(bp
, FCOE_IDX
))
3226 /* we will fail load process instead of mark
3233 for_each_nondefault_eth_queue(bp
, i
)
3234 if (bnx2x_alloc_fp_mem_at(bp
, i
))
3237 /* handle memory failures */
3238 if (i
!= BNX2X_NUM_ETH_QUEUES(bp
)) {
3239 int delta
= BNX2X_NUM_ETH_QUEUES(bp
) - i
;
3244 * move non eth FPs next to last eth FP
3245 * must be done in that order
3246 * FCOE_IDX < FWD_IDX < OOO_IDX
3249 /* move FCoE fp even NO_FCOE_FLAG is on */
3250 bnx2x_move_fp(bp
, FCOE_IDX
, FCOE_IDX
- delta
);
3252 bp
->num_queues
-= delta
;
3253 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3254 bp
->num_queues
+ delta
, bp
->num_queues
);
3260 void bnx2x_free_mem_bp(struct bnx2x
*bp
)
3263 kfree(bp
->msix_table
);
3267 int __devinit
bnx2x_alloc_mem_bp(struct bnx2x
*bp
)
3269 struct bnx2x_fastpath
*fp
;
3270 struct msix_entry
*tbl
;
3271 struct bnx2x_ilt
*ilt
;
3272 int msix_table_size
= 0;
3275 * The biggest MSI-X table we might need is as a maximum number of fast
3276 * path IGU SBs plus default SB (for PF).
3278 msix_table_size
= bp
->igu_sb_cnt
+ 1;
3280 /* fp array: RSS plus CNIC related L2 queues */
3281 fp
= kzalloc((BNX2X_MAX_RSS_COUNT(bp
) + NON_ETH_CONTEXT_USE
) *
3282 sizeof(*fp
), GFP_KERNEL
);
3288 tbl
= kzalloc(msix_table_size
* sizeof(*tbl
), GFP_KERNEL
);
3291 bp
->msix_table
= tbl
;
3294 ilt
= kzalloc(sizeof(*ilt
), GFP_KERNEL
);
3301 bnx2x_free_mem_bp(bp
);
3306 int bnx2x_reload_if_running(struct net_device
*dev
)
3308 struct bnx2x
*bp
= netdev_priv(dev
);
3310 if (unlikely(!netif_running(dev
)))
3313 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
3314 return bnx2x_nic_load(bp
, LOAD_NORMAL
);
3317 int bnx2x_get_cur_phy_idx(struct bnx2x
*bp
)
3319 u32 sel_phy_idx
= 0;
3320 if (bp
->link_params
.num_phys
<= 1)
3323 if (bp
->link_vars
.link_up
) {
3324 sel_phy_idx
= EXT_PHY1
;
3325 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3326 if ((bp
->link_vars
.link_status
& LINK_STATUS_SERDES_LINK
) &&
3327 (bp
->link_params
.phy
[EXT_PHY2
].supported
& SUPPORTED_FIBRE
))
3328 sel_phy_idx
= EXT_PHY2
;
3331 switch (bnx2x_phy_selection(&bp
->link_params
)) {
3332 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT
:
3333 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY
:
3334 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY
:
3335 sel_phy_idx
= EXT_PHY1
;
3337 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY
:
3338 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY
:
3339 sel_phy_idx
= EXT_PHY2
;
3347 int bnx2x_get_link_cfg_idx(struct bnx2x
*bp
)
3349 u32 sel_phy_idx
= bnx2x_get_cur_phy_idx(bp
);
3351 * The selected actived PHY is always after swapping (in case PHY
3352 * swapping is enabled). So when swapping is enabled, we need to reverse
3356 if (bp
->link_params
.multi_phy_config
&
3357 PORT_HW_CFG_PHY_SWAPPED_ENABLED
) {
3358 if (sel_phy_idx
== EXT_PHY1
)
3359 sel_phy_idx
= EXT_PHY2
;
3360 else if (sel_phy_idx
== EXT_PHY2
)
3361 sel_phy_idx
= EXT_PHY1
;
3363 return LINK_CONFIG_IDX(sel_phy_idx
);
3366 #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3367 int bnx2x_fcoe_get_wwn(struct net_device
*dev
, u64
*wwn
, int type
)
3369 struct bnx2x
*bp
= netdev_priv(dev
);
3370 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
3373 case NETDEV_FCOE_WWNN
:
3374 *wwn
= HILO_U64(cp
->fcoe_wwn_node_name_hi
,
3375 cp
->fcoe_wwn_node_name_lo
);
3377 case NETDEV_FCOE_WWPN
:
3378 *wwn
= HILO_U64(cp
->fcoe_wwn_port_name_hi
,
3379 cp
->fcoe_wwn_port_name_lo
);
3389 /* called with rtnl_lock */
3390 int bnx2x_change_mtu(struct net_device
*dev
, int new_mtu
)
3392 struct bnx2x
*bp
= netdev_priv(dev
);
3394 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
3395 pr_err("Handling parity error recovery. Try again later\n");
3399 if ((new_mtu
> ETH_MAX_JUMBO_PACKET_SIZE
) ||
3400 ((new_mtu
+ ETH_HLEN
) < ETH_MIN_PACKET_SIZE
))
3403 /* This does not race with packet allocation
3404 * because the actual alloc size is
3405 * only updated as part of load
3409 return bnx2x_reload_if_running(dev
);
3412 u32
bnx2x_fix_features(struct net_device
*dev
, u32 features
)
3414 struct bnx2x
*bp
= netdev_priv(dev
);
3416 /* TPA requires Rx CSUM offloading */
3417 if (!(features
& NETIF_F_RXCSUM
) || bp
->disable_tpa
)
3418 features
&= ~NETIF_F_LRO
;
3423 int bnx2x_set_features(struct net_device
*dev
, u32 features
)
3425 struct bnx2x
*bp
= netdev_priv(dev
);
3426 u32 flags
= bp
->flags
;
3427 bool bnx2x_reload
= false;
3429 if (features
& NETIF_F_LRO
)
3430 flags
|= TPA_ENABLE_FLAG
;
3432 flags
&= ~TPA_ENABLE_FLAG
;
3434 if (features
& NETIF_F_LOOPBACK
) {
3435 if (bp
->link_params
.loopback_mode
!= LOOPBACK_BMAC
) {
3436 bp
->link_params
.loopback_mode
= LOOPBACK_BMAC
;
3437 bnx2x_reload
= true;
3440 if (bp
->link_params
.loopback_mode
!= LOOPBACK_NONE
) {
3441 bp
->link_params
.loopback_mode
= LOOPBACK_NONE
;
3442 bnx2x_reload
= true;
3446 if (flags
^ bp
->flags
) {
3448 bnx2x_reload
= true;
3452 if (bp
->recovery_state
== BNX2X_RECOVERY_DONE
)
3453 return bnx2x_reload_if_running(dev
);
3454 /* else: bnx2x_nic_load() will be called at end of recovery */
3460 void bnx2x_tx_timeout(struct net_device
*dev
)
3462 struct bnx2x
*bp
= netdev_priv(dev
);
3464 #ifdef BNX2X_STOP_ON_ERROR
3469 smp_mb__before_clear_bit();
3470 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT
, &bp
->sp_rtnl_state
);
3471 smp_mb__after_clear_bit();
3473 /* This allows the netif to be shutdown gracefully before resetting */
3474 schedule_delayed_work(&bp
->sp_rtnl_task
, 0);
3477 int bnx2x_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3479 struct net_device
*dev
= pci_get_drvdata(pdev
);
3483 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
3486 bp
= netdev_priv(dev
);
3490 pci_save_state(pdev
);
3492 if (!netif_running(dev
)) {
3497 netif_device_detach(dev
);
3499 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
3501 bnx2x_set_power_state(bp
, pci_choose_state(pdev
, state
));
3508 int bnx2x_resume(struct pci_dev
*pdev
)
3510 struct net_device
*dev
= pci_get_drvdata(pdev
);
3515 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
3518 bp
= netdev_priv(dev
);
3520 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
3521 pr_err("Handling parity error recovery. Try again later\n");
3527 pci_restore_state(pdev
);
3529 if (!netif_running(dev
)) {
3534 bnx2x_set_power_state(bp
, PCI_D0
);
3535 netif_device_attach(dev
);
3537 /* Since the chip was reset, clear the FW sequence number */
3539 rc
= bnx2x_nic_load(bp
, LOAD_OPEN
);
3547 void bnx2x_set_ctx_validation(struct bnx2x
*bp
, struct eth_context
*cxt
,
3550 /* ustorm cxt validation */
3551 cxt
->ustorm_ag_context
.cdu_usage
=
3552 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, cid
),
3553 CDU_REGION_NUMBER_UCM_AG
, ETH_CONNECTION_TYPE
);
3554 /* xcontext validation */
3555 cxt
->xstorm_ag_context
.cdu_reserved
=
3556 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, cid
),
3557 CDU_REGION_NUMBER_XCM_AG
, ETH_CONNECTION_TYPE
);
3560 static inline void storm_memset_hc_timeout(struct bnx2x
*bp
, u8 port
,
3561 u8 fw_sb_id
, u8 sb_index
,
3565 u32 addr
= BAR_CSTRORM_INTMEM
+
3566 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id
, sb_index
);
3567 REG_WR8(bp
, addr
, ticks
);
3568 DP(NETIF_MSG_HW
, "port %x fw_sb_id %d sb_index %d ticks %d\n",
3569 port
, fw_sb_id
, sb_index
, ticks
);
3572 static inline void storm_memset_hc_disable(struct bnx2x
*bp
, u8 port
,
3573 u16 fw_sb_id
, u8 sb_index
,
3576 u32 enable_flag
= disable
? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT
);
3577 u32 addr
= BAR_CSTRORM_INTMEM
+
3578 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id
, sb_index
);
3579 u16 flags
= REG_RD16(bp
, addr
);
3581 flags
&= ~HC_INDEX_DATA_HC_ENABLED
;
3582 flags
|= enable_flag
;
3583 REG_WR16(bp
, addr
, flags
);
3584 DP(NETIF_MSG_HW
, "port %x fw_sb_id %d sb_index %d disable %d\n",
3585 port
, fw_sb_id
, sb_index
, disable
);
3588 void bnx2x_update_coalesce_sb_index(struct bnx2x
*bp
, u8 fw_sb_id
,
3589 u8 sb_index
, u8 disable
, u16 usec
)
3591 int port
= BP_PORT(bp
);
3592 u8 ticks
= usec
/ BNX2X_BTR
;
3594 storm_memset_hc_timeout(bp
, port
, fw_sb_id
, sb_index
, ticks
);
3596 disable
= disable
? 1 : (usec
? 0 : 1);
3597 storm_memset_hc_disable(bp
, port
, fw_sb_id
, sb_index
, disable
);