1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
59 #define DRV_MODULE_VERSION "1.48.113-1"
60 #define DRV_MODULE_RELDATE "2009/07/21"
61 #define BNX2X_BC_VER 0x040200
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
66 #define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT (5*HZ)
72 static char version
[] __devinitdata
=
73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74 DRV_MODULE_NAME
" " DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION
);
81 static int multi_mode
= 1;
82 module_param(multi_mode
, int, 0);
83 MODULE_PARM_DESC(multi_mode
, " Use per-CPU queues");
85 static int disable_tpa
;
86 module_param(disable_tpa
, int, 0);
87 MODULE_PARM_DESC(disable_tpa
, " Disable the TPA (LRO) feature");
90 module_param(int_mode
, int, 0);
91 MODULE_PARM_DESC(int_mode
, " Force interrupt mode (1 INT#x; 2 MSI)");
94 module_param(poll
, int, 0);
95 MODULE_PARM_DESC(poll
, " Use polling (for debug)");
98 module_param(mrrs
, int, 0);
99 MODULE_PARM_DESC(mrrs
, " Force Max Read Req Size (0..3) (for debug)");
102 module_param(debug
, int, 0);
103 MODULE_PARM_DESC(debug
, " Default debug msglevel");
105 static int load_count
[3]; /* 0-common, 1-port0, 2-port1 */
107 static struct workqueue_struct
*bnx2x_wq
;
109 enum bnx2x_board_type
{
115 /* indexed by board_type, above */
118 } board_info
[] __devinitdata
= {
119 { "Broadcom NetXtreme II BCM57710 XGb" },
120 { "Broadcom NetXtreme II BCM57711 XGb" },
121 { "Broadcom NetXtreme II BCM57711E XGb" }
125 static const struct pci_device_id bnx2x_pci_tbl
[] = {
126 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_57710
,
127 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM57710
},
128 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_57711
,
129 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM57711
},
130 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_57711E
,
131 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM57711E
},
135 MODULE_DEVICE_TABLE(pci
, bnx2x_pci_tbl
);
137 /****************************************************************************
138 * General service functions
139 ****************************************************************************/
142 * locking is done by mcp
144 static void bnx2x_reg_wr_ind(struct bnx2x
*bp
, u32 addr
, u32 val
)
146 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
, addr
);
147 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_DATA
, val
);
148 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
149 PCICFG_VENDOR_ID_OFFSET
);
152 static u32
bnx2x_reg_rd_ind(struct bnx2x
*bp
, u32 addr
)
156 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
, addr
);
157 pci_read_config_dword(bp
->pdev
, PCICFG_GRC_DATA
, &val
);
158 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
159 PCICFG_VENDOR_ID_OFFSET
);
164 static const u32 dmae_reg_go_c
[] = {
165 DMAE_REG_GO_C0
, DMAE_REG_GO_C1
, DMAE_REG_GO_C2
, DMAE_REG_GO_C3
,
166 DMAE_REG_GO_C4
, DMAE_REG_GO_C5
, DMAE_REG_GO_C6
, DMAE_REG_GO_C7
,
167 DMAE_REG_GO_C8
, DMAE_REG_GO_C9
, DMAE_REG_GO_C10
, DMAE_REG_GO_C11
,
168 DMAE_REG_GO_C12
, DMAE_REG_GO_C13
, DMAE_REG_GO_C14
, DMAE_REG_GO_C15
171 /* copy command into DMAE command memory and set DMAE command go */
172 static void bnx2x_post_dmae(struct bnx2x
*bp
, struct dmae_command
*dmae
,
178 cmd_offset
= (DMAE_REG_CMD_MEM
+ sizeof(struct dmae_command
) * idx
);
179 for (i
= 0; i
< (sizeof(struct dmae_command
)/4); i
++) {
180 REG_WR(bp
, cmd_offset
+ i
*4, *(((u32
*)dmae
) + i
));
182 DP(BNX2X_MSG_OFF
, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
183 idx
, i
, cmd_offset
+ i
*4, *(((u32
*)dmae
) + i
));
185 REG_WR(bp
, dmae_reg_go_c
[idx
], 1);
188 void bnx2x_write_dmae(struct bnx2x
*bp
, dma_addr_t dma_addr
, u32 dst_addr
,
191 struct dmae_command
*dmae
= &bp
->init_dmae
;
192 u32
*wb_comp
= bnx2x_sp(bp
, wb_comp
);
195 if (!bp
->dmae_ready
) {
196 u32
*data
= bnx2x_sp(bp
, wb_data
[0]);
198 DP(BNX2X_MSG_OFF
, "DMAE is not ready (dst_addr %08x len32 %d)"
199 " using indirect\n", dst_addr
, len32
);
200 bnx2x_init_ind_wr(bp
, dst_addr
, data
, len32
);
204 mutex_lock(&bp
->dmae_mutex
);
206 memset(dmae
, 0, sizeof(struct dmae_command
));
208 dmae
->opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
209 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
210 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
212 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
214 DMAE_CMD_ENDIANITY_DW_SWAP
|
216 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
217 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
218 dmae
->src_addr_lo
= U64_LO(dma_addr
);
219 dmae
->src_addr_hi
= U64_HI(dma_addr
);
220 dmae
->dst_addr_lo
= dst_addr
>> 2;
221 dmae
->dst_addr_hi
= 0;
223 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_comp
));
224 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_comp
));
225 dmae
->comp_val
= DMAE_COMP_VAL
;
227 DP(BNX2X_MSG_OFF
, "DMAE: opcode 0x%08x\n"
228 DP_LEVEL
"src_addr [%x:%08x] len [%d *4] "
229 "dst_addr [%x:%08x (%08x)]\n"
230 DP_LEVEL
"comp_addr [%x:%08x] comp_val 0x%08x\n",
231 dmae
->opcode
, dmae
->src_addr_hi
, dmae
->src_addr_lo
,
232 dmae
->len
, dmae
->dst_addr_hi
, dmae
->dst_addr_lo
, dst_addr
,
233 dmae
->comp_addr_hi
, dmae
->comp_addr_lo
, dmae
->comp_val
);
234 DP(BNX2X_MSG_OFF
, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
235 bp
->slowpath
->wb_data
[0], bp
->slowpath
->wb_data
[1],
236 bp
->slowpath
->wb_data
[2], bp
->slowpath
->wb_data
[3]);
240 bnx2x_post_dmae(bp
, dmae
, INIT_DMAE_C(bp
));
244 while (*wb_comp
!= DMAE_COMP_VAL
) {
245 DP(BNX2X_MSG_OFF
, "wb_comp 0x%08x\n", *wb_comp
);
248 BNX2X_ERR("DMAE timeout!\n");
252 /* adjust delay for emulation/FPGA */
253 if (CHIP_REV_IS_SLOW(bp
))
259 mutex_unlock(&bp
->dmae_mutex
);
262 void bnx2x_read_dmae(struct bnx2x
*bp
, u32 src_addr
, u32 len32
)
264 struct dmae_command
*dmae
= &bp
->init_dmae
;
265 u32
*wb_comp
= bnx2x_sp(bp
, wb_comp
);
268 if (!bp
->dmae_ready
) {
269 u32
*data
= bnx2x_sp(bp
, wb_data
[0]);
272 DP(BNX2X_MSG_OFF
, "DMAE is not ready (src_addr %08x len32 %d)"
273 " using indirect\n", src_addr
, len32
);
274 for (i
= 0; i
< len32
; i
++)
275 data
[i
] = bnx2x_reg_rd_ind(bp
, src_addr
+ i
*4);
279 mutex_lock(&bp
->dmae_mutex
);
281 memset(bnx2x_sp(bp
, wb_data
[0]), 0, sizeof(u32
) * 4);
282 memset(dmae
, 0, sizeof(struct dmae_command
));
284 dmae
->opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
285 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
286 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
288 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
290 DMAE_CMD_ENDIANITY_DW_SWAP
|
292 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
293 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
294 dmae
->src_addr_lo
= src_addr
>> 2;
295 dmae
->src_addr_hi
= 0;
296 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_data
));
297 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_data
));
299 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_comp
));
300 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_comp
));
301 dmae
->comp_val
= DMAE_COMP_VAL
;
303 DP(BNX2X_MSG_OFF
, "DMAE: opcode 0x%08x\n"
304 DP_LEVEL
"src_addr [%x:%08x] len [%d *4] "
305 "dst_addr [%x:%08x (%08x)]\n"
306 DP_LEVEL
"comp_addr [%x:%08x] comp_val 0x%08x\n",
307 dmae
->opcode
, dmae
->src_addr_hi
, dmae
->src_addr_lo
,
308 dmae
->len
, dmae
->dst_addr_hi
, dmae
->dst_addr_lo
, src_addr
,
309 dmae
->comp_addr_hi
, dmae
->comp_addr_lo
, dmae
->comp_val
);
313 bnx2x_post_dmae(bp
, dmae
, INIT_DMAE_C(bp
));
317 while (*wb_comp
!= DMAE_COMP_VAL
) {
320 BNX2X_ERR("DMAE timeout!\n");
324 /* adjust delay for emulation/FPGA */
325 if (CHIP_REV_IS_SLOW(bp
))
330 DP(BNX2X_MSG_OFF
, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
331 bp
->slowpath
->wb_data
[0], bp
->slowpath
->wb_data
[1],
332 bp
->slowpath
->wb_data
[2], bp
->slowpath
->wb_data
[3]);
334 mutex_unlock(&bp
->dmae_mutex
);
337 /* used only for slowpath so not inlined */
338 static void bnx2x_wb_wr(struct bnx2x
*bp
, int reg
, u32 val_hi
, u32 val_lo
)
342 wb_write
[0] = val_hi
;
343 wb_write
[1] = val_lo
;
344 REG_WR_DMAE(bp
, reg
, wb_write
, 2);
348 static u64
bnx2x_wb_rd(struct bnx2x
*bp
, int reg
)
352 REG_RD_DMAE(bp
, reg
, wb_data
, 2);
354 return HILO_U64(wb_data
[0], wb_data
[1]);
358 static int bnx2x_mc_assert(struct bnx2x
*bp
)
362 u32 row0
, row1
, row2
, row3
;
365 last_idx
= REG_RD8(bp
, BAR_XSTRORM_INTMEM
+
366 XSTORM_ASSERT_LIST_INDEX_OFFSET
);
368 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
370 /* print the asserts */
371 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
373 row0
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
374 XSTORM_ASSERT_LIST_OFFSET(i
));
375 row1
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
376 XSTORM_ASSERT_LIST_OFFSET(i
) + 4);
377 row2
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
378 XSTORM_ASSERT_LIST_OFFSET(i
) + 8);
379 row3
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
380 XSTORM_ASSERT_LIST_OFFSET(i
) + 12);
382 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
383 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
384 " 0x%08x 0x%08x 0x%08x\n",
385 i
, row3
, row2
, row1
, row0
);
393 last_idx
= REG_RD8(bp
, BAR_TSTRORM_INTMEM
+
394 TSTORM_ASSERT_LIST_INDEX_OFFSET
);
396 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
398 /* print the asserts */
399 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
401 row0
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
402 TSTORM_ASSERT_LIST_OFFSET(i
));
403 row1
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
404 TSTORM_ASSERT_LIST_OFFSET(i
) + 4);
405 row2
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
406 TSTORM_ASSERT_LIST_OFFSET(i
) + 8);
407 row3
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
408 TSTORM_ASSERT_LIST_OFFSET(i
) + 12);
410 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
411 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
412 " 0x%08x 0x%08x 0x%08x\n",
413 i
, row3
, row2
, row1
, row0
);
421 last_idx
= REG_RD8(bp
, BAR_CSTRORM_INTMEM
+
422 CSTORM_ASSERT_LIST_INDEX_OFFSET
);
424 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
426 /* print the asserts */
427 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
429 row0
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
430 CSTORM_ASSERT_LIST_OFFSET(i
));
431 row1
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
432 CSTORM_ASSERT_LIST_OFFSET(i
) + 4);
433 row2
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
434 CSTORM_ASSERT_LIST_OFFSET(i
) + 8);
435 row3
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
436 CSTORM_ASSERT_LIST_OFFSET(i
) + 12);
438 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
439 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
440 " 0x%08x 0x%08x 0x%08x\n",
441 i
, row3
, row2
, row1
, row0
);
449 last_idx
= REG_RD8(bp
, BAR_USTRORM_INTMEM
+
450 USTORM_ASSERT_LIST_INDEX_OFFSET
);
452 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
454 /* print the asserts */
455 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
457 row0
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
458 USTORM_ASSERT_LIST_OFFSET(i
));
459 row1
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
460 USTORM_ASSERT_LIST_OFFSET(i
) + 4);
461 row2
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
462 USTORM_ASSERT_LIST_OFFSET(i
) + 8);
463 row3
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
464 USTORM_ASSERT_LIST_OFFSET(i
) + 12);
466 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
467 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
468 " 0x%08x 0x%08x 0x%08x\n",
469 i
, row3
, row2
, row1
, row0
);
479 static void bnx2x_fw_dump(struct bnx2x
*bp
)
485 mark
= REG_RD(bp
, MCP_REG_MCPR_SCRATCH
+ 0xf104);
486 mark
= ((mark
+ 0x3) & ~0x3);
487 printk(KERN_ERR PFX
"begin fw dump (mark 0x%x)\n" KERN_ERR
, mark
);
489 for (offset
= mark
- 0x08000000; offset
<= 0xF900; offset
+= 0x8*4) {
490 for (word
= 0; word
< 8; word
++)
491 data
[word
] = htonl(REG_RD(bp
, MCP_REG_MCPR_SCRATCH
+
494 printk(KERN_CONT
"%s", (char *)data
);
496 for (offset
= 0xF108; offset
<= mark
- 0x08000000; offset
+= 0x8*4) {
497 for (word
= 0; word
< 8; word
++)
498 data
[word
] = htonl(REG_RD(bp
, MCP_REG_MCPR_SCRATCH
+
501 printk(KERN_CONT
"%s", (char *)data
);
503 printk("\n" KERN_ERR PFX
"end of fw dump\n");
506 static void bnx2x_panic_dump(struct bnx2x
*bp
)
511 bp
->stats_state
= STATS_STATE_DISABLED
;
512 DP(BNX2X_MSG_STATS
, "stats_state - DISABLED\n");
514 BNX2X_ERR("begin crash dump -----------------\n");
518 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
519 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
520 " spq_prod_idx(%u)\n",
521 bp
->def_c_idx
, bp
->def_u_idx
, bp
->def_x_idx
, bp
->def_t_idx
,
522 bp
->def_att_idx
, bp
->attn_state
, bp
->spq_prod_idx
);
525 for_each_rx_queue(bp
, i
) {
526 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
528 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
529 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
530 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
531 i
, fp
->rx_bd_prod
, fp
->rx_bd_cons
,
532 le16_to_cpu(*fp
->rx_bd_cons_sb
), fp
->rx_comp_prod
,
533 fp
->rx_comp_cons
, le16_to_cpu(*fp
->rx_cons_sb
));
534 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
535 " fp_u_idx(%x) *sb_u_idx(%x)\n",
536 fp
->rx_sge_prod
, fp
->last_max_sge
,
537 le16_to_cpu(fp
->fp_u_idx
),
538 fp
->status_blk
->u_status_block
.status_block_index
);
542 for_each_tx_queue(bp
, i
) {
543 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
544 struct eth_tx_db_data
*hw_prods
= fp
->hw_tx_prods
;
546 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
547 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
548 i
, fp
->tx_pkt_prod
, fp
->tx_pkt_cons
, fp
->tx_bd_prod
,
549 fp
->tx_bd_cons
, le16_to_cpu(*fp
->tx_cons_sb
));
550 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
551 " bd data(%x,%x)\n", le16_to_cpu(fp
->fp_c_idx
),
552 fp
->status_blk
->c_status_block
.status_block_index
,
553 hw_prods
->packets_prod
, hw_prods
->bds_prod
);
558 for_each_rx_queue(bp
, i
) {
559 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
561 start
= RX_BD(le16_to_cpu(*fp
->rx_cons_sb
) - 10);
562 end
= RX_BD(le16_to_cpu(*fp
->rx_cons_sb
) + 503);
563 for (j
= start
; j
!= end
; j
= RX_BD(j
+ 1)) {
564 u32
*rx_bd
= (u32
*)&fp
->rx_desc_ring
[j
];
565 struct sw_rx_bd
*sw_bd
= &fp
->rx_buf_ring
[j
];
567 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
568 i
, j
, rx_bd
[1], rx_bd
[0], sw_bd
->skb
);
571 start
= RX_SGE(fp
->rx_sge_prod
);
572 end
= RX_SGE(fp
->last_max_sge
);
573 for (j
= start
; j
!= end
; j
= RX_SGE(j
+ 1)) {
574 u32
*rx_sge
= (u32
*)&fp
->rx_sge_ring
[j
];
575 struct sw_rx_page
*sw_page
= &fp
->rx_page_ring
[j
];
577 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
578 i
, j
, rx_sge
[1], rx_sge
[0], sw_page
->page
);
581 start
= RCQ_BD(fp
->rx_comp_cons
- 10);
582 end
= RCQ_BD(fp
->rx_comp_cons
+ 503);
583 for (j
= start
; j
!= end
; j
= RCQ_BD(j
+ 1)) {
584 u32
*cqe
= (u32
*)&fp
->rx_comp_ring
[j
];
586 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
587 i
, j
, cqe
[0], cqe
[1], cqe
[2], cqe
[3]);
592 for_each_tx_queue(bp
, i
) {
593 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
595 start
= TX_BD(le16_to_cpu(*fp
->tx_cons_sb
) - 10);
596 end
= TX_BD(le16_to_cpu(*fp
->tx_cons_sb
) + 245);
597 for (j
= start
; j
!= end
; j
= TX_BD(j
+ 1)) {
598 struct sw_tx_bd
*sw_bd
= &fp
->tx_buf_ring
[j
];
600 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
601 i
, j
, sw_bd
->skb
, sw_bd
->first_bd
);
604 start
= TX_BD(fp
->tx_bd_cons
- 10);
605 end
= TX_BD(fp
->tx_bd_cons
+ 254);
606 for (j
= start
; j
!= end
; j
= TX_BD(j
+ 1)) {
607 u32
*tx_bd
= (u32
*)&fp
->tx_desc_ring
[j
];
609 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
610 i
, j
, tx_bd
[0], tx_bd
[1], tx_bd
[2], tx_bd
[3]);
616 BNX2X_ERR("end crash dump -----------------\n");
619 static void bnx2x_int_enable(struct bnx2x
*bp
)
621 int port
= BP_PORT(bp
);
622 u32 addr
= port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
623 u32 val
= REG_RD(bp
, addr
);
624 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
625 int msi
= (bp
->flags
& USING_MSI_FLAG
) ? 1 : 0;
628 val
&= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
629 HC_CONFIG_0_REG_INT_LINE_EN_0
);
630 val
|= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
631 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
633 val
&= ~HC_CONFIG_0_REG_INT_LINE_EN_0
;
634 val
|= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
635 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
636 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
638 val
|= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
639 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
640 HC_CONFIG_0_REG_INT_LINE_EN_0
|
641 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
643 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x)\n",
646 REG_WR(bp
, addr
, val
);
648 val
&= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
;
651 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x) mode %s\n",
652 val
, port
, addr
, (msix
? "MSI-X" : (msi
? "MSI" : "INTx")));
654 REG_WR(bp
, addr
, val
);
656 * Ensure that HC_CONFIG is written before leading/trailing edge config
661 if (CHIP_IS_E1H(bp
)) {
662 /* init leading/trailing edge */
664 val
= (0xee0f | (1 << (BP_E1HVN(bp
) + 4)));
666 /* enable nig and gpio3 attention */
671 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, val
);
672 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, val
);
675 /* Make sure that interrupts are indeed enabled from here on */
679 static void bnx2x_int_disable(struct bnx2x
*bp
)
681 int port
= BP_PORT(bp
);
682 u32 addr
= port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
683 u32 val
= REG_RD(bp
, addr
);
685 val
&= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
686 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
687 HC_CONFIG_0_REG_INT_LINE_EN_0
|
688 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
690 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x)\n",
693 /* flush all outstanding writes */
696 REG_WR(bp
, addr
, val
);
697 if (REG_RD(bp
, addr
) != val
)
698 BNX2X_ERR("BUG! proper val not read from IGU!\n");
702 static void bnx2x_int_disable_sync(struct bnx2x
*bp
, int disable_hw
)
704 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
707 /* disable interrupt handling */
708 atomic_inc(&bp
->intr_sem
);
709 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
712 /* prevent the HW from sending interrupts */
713 bnx2x_int_disable(bp
);
715 /* make sure all ISRs are done */
717 synchronize_irq(bp
->msix_table
[0].vector
);
719 for_each_queue(bp
, i
)
720 synchronize_irq(bp
->msix_table
[i
+ offset
].vector
);
722 synchronize_irq(bp
->pdev
->irq
);
724 /* make sure sp_task is not running */
725 cancel_delayed_work(&bp
->sp_task
);
726 flush_workqueue(bnx2x_wq
);
732 * General service functions
735 static inline void bnx2x_ack_sb(struct bnx2x
*bp
, u8 sb_id
,
736 u8 storm
, u16 index
, u8 op
, u8 update
)
738 u32 hc_addr
= (HC_REG_COMMAND_REG
+ BP_PORT(bp
)*32 +
739 COMMAND_REG_INT_ACK
);
740 struct igu_ack_register igu_ack
;
742 igu_ack
.status_block_index
= index
;
743 igu_ack
.sb_id_and_flags
=
744 ((sb_id
<< IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT
) |
745 (storm
<< IGU_ACK_REGISTER_STORM_ID_SHIFT
) |
746 (update
<< IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT
) |
747 (op
<< IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT
));
749 DP(BNX2X_MSG_OFF
, "write 0x%08x to HC addr 0x%x\n",
750 (*(u32
*)&igu_ack
), hc_addr
);
751 REG_WR(bp
, hc_addr
, (*(u32
*)&igu_ack
));
753 /* Make sure that ACK is written */
758 static inline u16
bnx2x_update_fpsb_idx(struct bnx2x_fastpath
*fp
)
760 struct host_status_block
*fpsb
= fp
->status_blk
;
763 barrier(); /* status block is written to by the chip */
764 if (fp
->fp_c_idx
!= fpsb
->c_status_block
.status_block_index
) {
765 fp
->fp_c_idx
= fpsb
->c_status_block
.status_block_index
;
768 if (fp
->fp_u_idx
!= fpsb
->u_status_block
.status_block_index
) {
769 fp
->fp_u_idx
= fpsb
->u_status_block
.status_block_index
;
775 static u16
bnx2x_ack_int(struct bnx2x
*bp
)
777 u32 hc_addr
= (HC_REG_COMMAND_REG
+ BP_PORT(bp
)*32 +
778 COMMAND_REG_SIMD_MASK
);
779 u32 result
= REG_RD(bp
, hc_addr
);
781 DP(BNX2X_MSG_OFF
, "read 0x%08x from HC addr 0x%x\n",
789 * fast path service functions
792 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath
*fp
)
796 /* Tell compiler that status block fields can change */
798 tx_cons_sb
= le16_to_cpu(*fp
->tx_cons_sb
);
799 return (fp
->tx_pkt_cons
!= tx_cons_sb
);
802 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath
*fp
)
804 /* Tell compiler that consumer and producer can change */
806 return (fp
->tx_pkt_prod
!= fp
->tx_pkt_cons
);
809 /* free skb in the packet ring at pos idx
810 * return idx of last bd freed
812 static u16
bnx2x_free_tx_pkt(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
815 struct sw_tx_bd
*tx_buf
= &fp
->tx_buf_ring
[idx
];
816 struct eth_tx_bd
*tx_bd
;
817 struct sk_buff
*skb
= tx_buf
->skb
;
818 u16 bd_idx
= TX_BD(tx_buf
->first_bd
), new_cons
;
821 DP(BNX2X_MSG_OFF
, "pkt_idx %d buff @(%p)->skb %p\n",
825 DP(BNX2X_MSG_OFF
, "free bd_idx %d\n", bd_idx
);
826 tx_bd
= &fp
->tx_desc_ring
[bd_idx
];
827 pci_unmap_single(bp
->pdev
, BD_UNMAP_ADDR(tx_bd
),
828 BD_UNMAP_LEN(tx_bd
), PCI_DMA_TODEVICE
);
830 nbd
= le16_to_cpu(tx_bd
->nbd
) - 1;
831 new_cons
= nbd
+ tx_buf
->first_bd
;
832 #ifdef BNX2X_STOP_ON_ERROR
833 if (nbd
> (MAX_SKB_FRAGS
+ 2)) {
834 BNX2X_ERR("BAD nbd!\n");
839 /* Skip a parse bd and the TSO split header bd
840 since they have no mapping */
842 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
844 if (tx_bd
->bd_flags
.as_bitfield
& (ETH_TX_BD_FLAGS_IP_CSUM
|
845 ETH_TX_BD_FLAGS_TCP_CSUM
|
846 ETH_TX_BD_FLAGS_SW_LSO
)) {
848 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
849 tx_bd
= &fp
->tx_desc_ring
[bd_idx
];
850 /* is this a TSO split header bd? */
851 if (tx_bd
->bd_flags
.as_bitfield
& ETH_TX_BD_FLAGS_SW_LSO
) {
853 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
860 DP(BNX2X_MSG_OFF
, "free frag bd_idx %d\n", bd_idx
);
861 tx_bd
= &fp
->tx_desc_ring
[bd_idx
];
862 pci_unmap_page(bp
->pdev
, BD_UNMAP_ADDR(tx_bd
),
863 BD_UNMAP_LEN(tx_bd
), PCI_DMA_TODEVICE
);
865 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
871 tx_buf
->first_bd
= 0;
877 static inline u16
bnx2x_tx_avail(struct bnx2x_fastpath
*fp
)
883 barrier(); /* Tell compiler that prod and cons can change */
884 prod
= fp
->tx_bd_prod
;
885 cons
= fp
->tx_bd_cons
;
887 /* NUM_TX_RINGS = number of "next-page" entries
888 It will be used as a threshold */
889 used
= SUB_S16(prod
, cons
) + (s16
)NUM_TX_RINGS
;
891 #ifdef BNX2X_STOP_ON_ERROR
893 WARN_ON(used
> fp
->bp
->tx_ring_size
);
894 WARN_ON((fp
->bp
->tx_ring_size
- used
) > MAX_TX_AVAIL
);
897 return (s16
)(fp
->bp
->tx_ring_size
) - used
;
900 static void bnx2x_tx_int(struct bnx2x_fastpath
*fp
)
902 struct bnx2x
*bp
= fp
->bp
;
903 struct netdev_queue
*txq
;
904 u16 hw_cons
, sw_cons
, bd_cons
= fp
->tx_bd_cons
;
907 #ifdef BNX2X_STOP_ON_ERROR
908 if (unlikely(bp
->panic
))
912 txq
= netdev_get_tx_queue(bp
->dev
, fp
->index
);
913 hw_cons
= le16_to_cpu(*fp
->tx_cons_sb
);
914 sw_cons
= fp
->tx_pkt_cons
;
916 while (sw_cons
!= hw_cons
) {
919 pkt_cons
= TX_BD(sw_cons
);
921 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
923 DP(NETIF_MSG_TX_DONE
, "hw_cons %u sw_cons %u pkt_cons %u\n",
924 hw_cons
, sw_cons
, pkt_cons
);
926 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
928 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
931 bd_cons
= bnx2x_free_tx_pkt(bp
, fp
, pkt_cons
);
936 fp
->tx_pkt_cons
= sw_cons
;
937 fp
->tx_bd_cons
= bd_cons
;
939 /* TBD need a thresh? */
940 if (unlikely(netif_tx_queue_stopped(txq
))) {
942 __netif_tx_lock(txq
, smp_processor_id());
944 /* Need to make the tx_bd_cons update visible to start_xmit()
945 * before checking for netif_tx_queue_stopped(). Without the
946 * memory barrier, there is a small possibility that
947 * start_xmit() will miss it and cause the queue to be stopped
952 if ((netif_tx_queue_stopped(txq
)) &&
953 (bp
->state
== BNX2X_STATE_OPEN
) &&
954 (bnx2x_tx_avail(fp
) >= MAX_SKB_FRAGS
+ 3))
955 netif_tx_wake_queue(txq
);
957 __netif_tx_unlock(txq
);
962 static void bnx2x_sp_event(struct bnx2x_fastpath
*fp
,
963 union eth_rx_cqe
*rr_cqe
)
965 struct bnx2x
*bp
= fp
->bp
;
966 int cid
= SW_CID(rr_cqe
->ramrod_cqe
.conn_and_cmd_data
);
967 int command
= CQE_CMD(rr_cqe
->ramrod_cqe
.conn_and_cmd_data
);
970 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
971 fp
->index
, cid
, command
, bp
->state
,
972 rr_cqe
->ramrod_cqe
.ramrod_type
);
977 switch (command
| fp
->state
) {
978 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP
|
979 BNX2X_FP_STATE_OPENING
):
980 DP(NETIF_MSG_IFUP
, "got MULTI[%d] setup ramrod\n",
982 fp
->state
= BNX2X_FP_STATE_OPEN
;
985 case (RAMROD_CMD_ID_ETH_HALT
| BNX2X_FP_STATE_HALTING
):
986 DP(NETIF_MSG_IFDOWN
, "got MULTI[%d] halt ramrod\n",
988 fp
->state
= BNX2X_FP_STATE_HALTED
;
992 BNX2X_ERR("unexpected MC reply (%d) "
993 "fp->state is %x\n", command
, fp
->state
);
996 mb(); /* force bnx2x_wait_ramrod() to see the change */
1000 switch (command
| bp
->state
) {
1001 case (RAMROD_CMD_ID_ETH_PORT_SETUP
| BNX2X_STATE_OPENING_WAIT4_PORT
):
1002 DP(NETIF_MSG_IFUP
, "got setup ramrod\n");
1003 bp
->state
= BNX2X_STATE_OPEN
;
1006 case (RAMROD_CMD_ID_ETH_HALT
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
1007 DP(NETIF_MSG_IFDOWN
, "got halt ramrod\n");
1008 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_DELETE
;
1009 fp
->state
= BNX2X_FP_STATE_HALTED
;
1012 case (RAMROD_CMD_ID_ETH_CFC_DEL
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
1013 DP(NETIF_MSG_IFDOWN
, "got delete ramrod for MULTI[%d]\n", cid
);
1014 bnx2x_fp(bp
, cid
, state
) = BNX2X_FP_STATE_CLOSED
;
1018 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_OPEN
):
1019 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_DIAG
):
1020 DP(NETIF_MSG_IFUP
, "got set mac ramrod\n");
1021 bp
->set_mac_pending
= 0;
1024 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
1025 DP(NETIF_MSG_IFDOWN
, "got (un)set mac ramrod\n");
1029 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1030 command
, bp
->state
);
1033 mb(); /* force bnx2x_wait_ramrod() to see the change */
1036 static inline void bnx2x_free_rx_sge(struct bnx2x
*bp
,
1037 struct bnx2x_fastpath
*fp
, u16 index
)
1039 struct sw_rx_page
*sw_buf
= &fp
->rx_page_ring
[index
];
1040 struct page
*page
= sw_buf
->page
;
1041 struct eth_rx_sge
*sge
= &fp
->rx_sge_ring
[index
];
1043 /* Skip "next page" elements */
1047 pci_unmap_page(bp
->pdev
, pci_unmap_addr(sw_buf
, mapping
),
1048 SGE_PAGE_SIZE
*PAGES_PER_SGE
, PCI_DMA_FROMDEVICE
);
1049 __free_pages(page
, PAGES_PER_SGE_SHIFT
);
1051 sw_buf
->page
= NULL
;
1056 static inline void bnx2x_free_rx_sge_range(struct bnx2x
*bp
,
1057 struct bnx2x_fastpath
*fp
, int last
)
1061 for (i
= 0; i
< last
; i
++)
1062 bnx2x_free_rx_sge(bp
, fp
, i
);
1065 static inline int bnx2x_alloc_rx_sge(struct bnx2x
*bp
,
1066 struct bnx2x_fastpath
*fp
, u16 index
)
1068 struct page
*page
= alloc_pages(GFP_ATOMIC
, PAGES_PER_SGE_SHIFT
);
1069 struct sw_rx_page
*sw_buf
= &fp
->rx_page_ring
[index
];
1070 struct eth_rx_sge
*sge
= &fp
->rx_sge_ring
[index
];
1073 if (unlikely(page
== NULL
))
1076 mapping
= pci_map_page(bp
->pdev
, page
, 0, SGE_PAGE_SIZE
*PAGES_PER_SGE
,
1077 PCI_DMA_FROMDEVICE
);
1078 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
1079 __free_pages(page
, PAGES_PER_SGE_SHIFT
);
1083 sw_buf
->page
= page
;
1084 pci_unmap_addr_set(sw_buf
, mapping
, mapping
);
1086 sge
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
1087 sge
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
1092 static inline int bnx2x_alloc_rx_skb(struct bnx2x
*bp
,
1093 struct bnx2x_fastpath
*fp
, u16 index
)
1095 struct sk_buff
*skb
;
1096 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[index
];
1097 struct eth_rx_bd
*rx_bd
= &fp
->rx_desc_ring
[index
];
1100 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
1101 if (unlikely(skb
== NULL
))
1104 mapping
= pci_map_single(bp
->pdev
, skb
->data
, bp
->rx_buf_size
,
1105 PCI_DMA_FROMDEVICE
);
1106 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
1112 pci_unmap_addr_set(rx_buf
, mapping
, mapping
);
1114 rx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
1115 rx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
1120 /* note that we are not allocating a new skb,
1121 * we are just moving one from cons to prod
1122 * we are not creating a new mapping,
1123 * so there is no need to check for dma_mapping_error().
1125 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath
*fp
,
1126 struct sk_buff
*skb
, u16 cons
, u16 prod
)
1128 struct bnx2x
*bp
= fp
->bp
;
1129 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
1130 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
1131 struct eth_rx_bd
*cons_bd
= &fp
->rx_desc_ring
[cons
];
1132 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
1134 pci_dma_sync_single_for_device(bp
->pdev
,
1135 pci_unmap_addr(cons_rx_buf
, mapping
),
1136 RX_COPY_THRESH
, PCI_DMA_FROMDEVICE
);
1138 prod_rx_buf
->skb
= cons_rx_buf
->skb
;
1139 pci_unmap_addr_set(prod_rx_buf
, mapping
,
1140 pci_unmap_addr(cons_rx_buf
, mapping
));
1141 *prod_bd
= *cons_bd
;
1144 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath
*fp
,
1147 u16 last_max
= fp
->last_max_sge
;
1149 if (SUB_S16(idx
, last_max
) > 0)
1150 fp
->last_max_sge
= idx
;
1153 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath
*fp
)
1157 for (i
= 1; i
<= NUM_RX_SGE_PAGES
; i
++) {
1158 int idx
= RX_SGE_CNT
* i
- 1;
1160 for (j
= 0; j
< 2; j
++) {
1161 SGE_MASK_CLEAR_BIT(fp
, idx
);
1167 static void bnx2x_update_sge_prod(struct bnx2x_fastpath
*fp
,
1168 struct eth_fast_path_rx_cqe
*fp_cqe
)
1170 struct bnx2x
*bp
= fp
->bp
;
1171 u16 sge_len
= SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe
->pkt_len
) -
1172 le16_to_cpu(fp_cqe
->len_on_bd
)) >>
1174 u16 last_max
, last_elem
, first_elem
;
1181 /* First mark all used pages */
1182 for (i
= 0; i
< sge_len
; i
++)
1183 SGE_MASK_CLEAR_BIT(fp
, RX_SGE(le16_to_cpu(fp_cqe
->sgl
[i
])));
1185 DP(NETIF_MSG_RX_STATUS
, "fp_cqe->sgl[%d] = %d\n",
1186 sge_len
- 1, le16_to_cpu(fp_cqe
->sgl
[sge_len
- 1]));
1188 /* Here we assume that the last SGE index is the biggest */
1189 prefetch((void *)(fp
->sge_mask
));
1190 bnx2x_update_last_max_sge(fp
, le16_to_cpu(fp_cqe
->sgl
[sge_len
- 1]));
1192 last_max
= RX_SGE(fp
->last_max_sge
);
1193 last_elem
= last_max
>> RX_SGE_MASK_ELEM_SHIFT
;
1194 first_elem
= RX_SGE(fp
->rx_sge_prod
) >> RX_SGE_MASK_ELEM_SHIFT
;
1196 /* If ring is not full */
1197 if (last_elem
+ 1 != first_elem
)
1200 /* Now update the prod */
1201 for (i
= first_elem
; i
!= last_elem
; i
= NEXT_SGE_MASK_ELEM(i
)) {
1202 if (likely(fp
->sge_mask
[i
]))
1205 fp
->sge_mask
[i
] = RX_SGE_MASK_ELEM_ONE_MASK
;
1206 delta
+= RX_SGE_MASK_ELEM_SZ
;
1210 fp
->rx_sge_prod
+= delta
;
1211 /* clear page-end entries */
1212 bnx2x_clear_sge_mask_next_elems(fp
);
1215 DP(NETIF_MSG_RX_STATUS
,
1216 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1217 fp
->last_max_sge
, fp
->rx_sge_prod
);
1220 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath
*fp
)
1222 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1223 memset(fp
->sge_mask
, 0xff,
1224 (NUM_RX_SGE
>> RX_SGE_MASK_ELEM_SHIFT
)*sizeof(u64
));
1226 /* Clear the two last indices in the page to 1:
1227 these are the indices that correspond to the "next" element,
1228 hence will never be indicated and should be removed from
1229 the calculations. */
1230 bnx2x_clear_sge_mask_next_elems(fp
);
1233 static void bnx2x_tpa_start(struct bnx2x_fastpath
*fp
, u16 queue
,
1234 struct sk_buff
*skb
, u16 cons
, u16 prod
)
1236 struct bnx2x
*bp
= fp
->bp
;
1237 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
1238 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
1239 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
1242 /* move empty skb from pool to prod and map it */
1243 prod_rx_buf
->skb
= fp
->tpa_pool
[queue
].skb
;
1244 mapping
= pci_map_single(bp
->pdev
, fp
->tpa_pool
[queue
].skb
->data
,
1245 bp
->rx_buf_size
, PCI_DMA_FROMDEVICE
);
1246 pci_unmap_addr_set(prod_rx_buf
, mapping
, mapping
);
1248 /* move partial skb from cons to pool (don't unmap yet) */
1249 fp
->tpa_pool
[queue
] = *cons_rx_buf
;
1251 /* mark bin state as start - print error if current state != stop */
1252 if (fp
->tpa_state
[queue
] != BNX2X_TPA_STOP
)
1253 BNX2X_ERR("start of bin not in stop [%d]\n", queue
);
1255 fp
->tpa_state
[queue
] = BNX2X_TPA_START
;
1257 /* point prod_bd to new skb */
1258 prod_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
1259 prod_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
1261 #ifdef BNX2X_STOP_ON_ERROR
1262 fp
->tpa_queue_used
|= (1 << queue
);
1263 #ifdef __powerpc64__
1264 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%lx\n",
1266 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%llx\n",
1268 fp
->tpa_queue_used
);
1272 static int bnx2x_fill_frag_skb(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
1273 struct sk_buff
*skb
,
1274 struct eth_fast_path_rx_cqe
*fp_cqe
,
1277 struct sw_rx_page
*rx_pg
, old_rx_pg
;
1278 u16 len_on_bd
= le16_to_cpu(fp_cqe
->len_on_bd
);
1279 u32 i
, frag_len
, frag_size
, pages
;
1283 frag_size
= le16_to_cpu(fp_cqe
->pkt_len
) - len_on_bd
;
1284 pages
= SGE_PAGE_ALIGN(frag_size
) >> SGE_PAGE_SHIFT
;
1286 /* This is needed in order to enable forwarding support */
1288 skb_shinfo(skb
)->gso_size
= min((u32
)SGE_PAGE_SIZE
,
1289 max(frag_size
, (u32
)len_on_bd
));
1291 #ifdef BNX2X_STOP_ON_ERROR
1293 min((u32
)8, (u32
)MAX_SKB_FRAGS
) * SGE_PAGE_SIZE
* PAGES_PER_SGE
) {
1294 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1296 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1297 fp_cqe
->pkt_len
, len_on_bd
);
1303 /* Run through the SGL and compose the fragmented skb */
1304 for (i
= 0, j
= 0; i
< pages
; i
+= PAGES_PER_SGE
, j
++) {
1305 u16 sge_idx
= RX_SGE(le16_to_cpu(fp_cqe
->sgl
[j
]));
1307 /* FW gives the indices of the SGE as if the ring is an array
1308 (meaning that "next" element will consume 2 indices) */
1309 frag_len
= min(frag_size
, (u32
)(SGE_PAGE_SIZE
*PAGES_PER_SGE
));
1310 rx_pg
= &fp
->rx_page_ring
[sge_idx
];
1313 /* If we fail to allocate a substitute page, we simply stop
1314 where we are and drop the whole packet */
1315 err
= bnx2x_alloc_rx_sge(bp
, fp
, sge_idx
);
1316 if (unlikely(err
)) {
1317 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
1321 /* Unmap the page as we r going to pass it to the stack */
1322 pci_unmap_page(bp
->pdev
, pci_unmap_addr(&old_rx_pg
, mapping
),
1323 SGE_PAGE_SIZE
*PAGES_PER_SGE
, PCI_DMA_FROMDEVICE
);
1325 /* Add one frag and update the appropriate fields in the skb */
1326 skb_fill_page_desc(skb
, j
, old_rx_pg
.page
, 0, frag_len
);
1328 skb
->data_len
+= frag_len
;
1329 skb
->truesize
+= frag_len
;
1330 skb
->len
+= frag_len
;
1332 frag_size
-= frag_len
;
1338 static void bnx2x_tpa_stop(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
1339 u16 queue
, int pad
, int len
, union eth_rx_cqe
*cqe
,
1342 struct sw_rx_bd
*rx_buf
= &fp
->tpa_pool
[queue
];
1343 struct sk_buff
*skb
= rx_buf
->skb
;
1345 struct sk_buff
*new_skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
1347 /* Unmap skb in the pool anyway, as we are going to change
1348 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1350 pci_unmap_single(bp
->pdev
, pci_unmap_addr(rx_buf
, mapping
),
1351 bp
->rx_buf_size
, PCI_DMA_FROMDEVICE
);
1353 if (likely(new_skb
)) {
1354 /* fix ip xsum and give it to the stack */
1355 /* (no need to map the new skb) */
1358 (le16_to_cpu(cqe
->fast_path_cqe
.pars_flags
.flags
) &
1359 PARSING_FLAGS_VLAN
);
1360 int is_not_hwaccel_vlan_cqe
=
1361 (is_vlan_cqe
&& (!(bp
->flags
& HW_VLAN_RX_FLAG
)));
1365 prefetch(((char *)(skb
)) + 128);
1367 #ifdef BNX2X_STOP_ON_ERROR
1368 if (pad
+ len
> bp
->rx_buf_size
) {
1369 BNX2X_ERR("skb_put is about to fail... "
1370 "pad %d len %d rx_buf_size %d\n",
1371 pad
, len
, bp
->rx_buf_size
);
1377 skb_reserve(skb
, pad
);
1380 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1381 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1386 iph
= (struct iphdr
*)skb
->data
;
1388 /* If there is no Rx VLAN offloading -
1389 take VLAN tag into an account */
1390 if (unlikely(is_not_hwaccel_vlan_cqe
))
1391 iph
= (struct iphdr
*)((u8
*)iph
+ VLAN_HLEN
);
1394 iph
->check
= ip_fast_csum((u8
*)iph
, iph
->ihl
);
1397 if (!bnx2x_fill_frag_skb(bp
, fp
, skb
,
1398 &cqe
->fast_path_cqe
, cqe_idx
)) {
1400 if ((bp
->vlgrp
!= NULL
) && is_vlan_cqe
&&
1401 (!is_not_hwaccel_vlan_cqe
))
1402 vlan_hwaccel_receive_skb(skb
, bp
->vlgrp
,
1403 le16_to_cpu(cqe
->fast_path_cqe
.
1407 netif_receive_skb(skb
);
1409 DP(NETIF_MSG_RX_STATUS
, "Failed to allocate new pages"
1410 " - dropping packet!\n");
1415 /* put new skb in bin */
1416 fp
->tpa_pool
[queue
].skb
= new_skb
;
1419 /* else drop the packet and keep the buffer in the bin */
1420 DP(NETIF_MSG_RX_STATUS
,
1421 "Failed to allocate new skb - dropping packet!\n");
1422 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
1425 fp
->tpa_state
[queue
] = BNX2X_TPA_STOP
;
1428 static inline void bnx2x_update_rx_prod(struct bnx2x
*bp
,
1429 struct bnx2x_fastpath
*fp
,
1430 u16 bd_prod
, u16 rx_comp_prod
,
1433 struct ustorm_eth_rx_producers rx_prods
= {0};
1436 /* Update producers */
1437 rx_prods
.bd_prod
= bd_prod
;
1438 rx_prods
.cqe_prod
= rx_comp_prod
;
1439 rx_prods
.sge_prod
= rx_sge_prod
;
1442 * Make sure that the BD and SGE data is updated before updating the
1443 * producers since FW might read the BD/SGE right after the producer
1445 * This is only applicable for weak-ordered memory model archs such
1446 * as IA-64. The following barrier is also mandatory since FW will
1447 * assumes BDs must have buffers.
1451 for (i
= 0; i
< sizeof(struct ustorm_eth_rx_producers
)/4; i
++)
1452 REG_WR(bp
, BAR_USTRORM_INTMEM
+
1453 USTORM_RX_PRODS_OFFSET(BP_PORT(bp
), fp
->cl_id
) + i
*4,
1454 ((u32
*)&rx_prods
)[i
]);
1456 mmiowb(); /* keep prod updates ordered */
1458 DP(NETIF_MSG_RX_STATUS
,
1459 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1460 fp
->index
, bd_prod
, rx_comp_prod
, rx_sge_prod
);
1463 static int bnx2x_rx_int(struct bnx2x_fastpath
*fp
, int budget
)
1465 struct bnx2x
*bp
= fp
->bp
;
1466 u16 bd_cons
, bd_prod
, bd_prod_fw
, comp_ring_cons
;
1467 u16 hw_comp_cons
, sw_comp_cons
, sw_comp_prod
;
1470 #ifdef BNX2X_STOP_ON_ERROR
1471 if (unlikely(bp
->panic
))
1475 /* CQ "next element" is of the size of the regular element,
1476 that's why it's ok here */
1477 hw_comp_cons
= le16_to_cpu(*fp
->rx_cons_sb
);
1478 if ((hw_comp_cons
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
1481 bd_cons
= fp
->rx_bd_cons
;
1482 bd_prod
= fp
->rx_bd_prod
;
1483 bd_prod_fw
= bd_prod
;
1484 sw_comp_cons
= fp
->rx_comp_cons
;
1485 sw_comp_prod
= fp
->rx_comp_prod
;
1487 /* Memory barrier necessary as speculative reads of the rx
1488 * buffer can be ahead of the index in the status block
1492 DP(NETIF_MSG_RX_STATUS
,
1493 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1494 fp
->index
, hw_comp_cons
, sw_comp_cons
);
1496 while (sw_comp_cons
!= hw_comp_cons
) {
1497 struct sw_rx_bd
*rx_buf
= NULL
;
1498 struct sk_buff
*skb
;
1499 union eth_rx_cqe
*cqe
;
1503 comp_ring_cons
= RCQ_BD(sw_comp_cons
);
1504 bd_prod
= RX_BD(bd_prod
);
1505 bd_cons
= RX_BD(bd_cons
);
1507 cqe
= &fp
->rx_comp_ring
[comp_ring_cons
];
1508 cqe_fp_flags
= cqe
->fast_path_cqe
.type_error_flags
;
1510 DP(NETIF_MSG_RX_STATUS
, "CQE type %x err %x status %x"
1511 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags
),
1512 cqe_fp_flags
, cqe
->fast_path_cqe
.status_flags
,
1513 le32_to_cpu(cqe
->fast_path_cqe
.rss_hash_result
),
1514 le16_to_cpu(cqe
->fast_path_cqe
.vlan_tag
),
1515 le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
));
1517 /* is this a slowpath msg? */
1518 if (unlikely(CQE_TYPE(cqe_fp_flags
))) {
1519 bnx2x_sp_event(fp
, cqe
);
1522 /* this is an rx packet */
1524 rx_buf
= &fp
->rx_buf_ring
[bd_cons
];
1526 len
= le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
);
1527 pad
= cqe
->fast_path_cqe
.placement_offset
;
1529 /* If CQE is marked both TPA_START and TPA_END
1530 it is a non-TPA CQE */
1531 if ((!fp
->disable_tpa
) &&
1532 (TPA_TYPE(cqe_fp_flags
) !=
1533 (TPA_TYPE_START
| TPA_TYPE_END
))) {
1534 u16 queue
= cqe
->fast_path_cqe
.queue_index
;
1536 if (TPA_TYPE(cqe_fp_flags
) == TPA_TYPE_START
) {
1537 DP(NETIF_MSG_RX_STATUS
,
1538 "calling tpa_start on queue %d\n",
1541 bnx2x_tpa_start(fp
, queue
, skb
,
1546 if (TPA_TYPE(cqe_fp_flags
) == TPA_TYPE_END
) {
1547 DP(NETIF_MSG_RX_STATUS
,
1548 "calling tpa_stop on queue %d\n",
1551 if (!BNX2X_RX_SUM_FIX(cqe
))
1552 BNX2X_ERR("STOP on none TCP "
1555 /* This is a size of the linear data
1557 len
= le16_to_cpu(cqe
->fast_path_cqe
.
1559 bnx2x_tpa_stop(bp
, fp
, queue
, pad
,
1560 len
, cqe
, comp_ring_cons
);
1561 #ifdef BNX2X_STOP_ON_ERROR
1566 bnx2x_update_sge_prod(fp
,
1567 &cqe
->fast_path_cqe
);
1572 pci_dma_sync_single_for_device(bp
->pdev
,
1573 pci_unmap_addr(rx_buf
, mapping
),
1574 pad
+ RX_COPY_THRESH
,
1575 PCI_DMA_FROMDEVICE
);
1577 prefetch(((char *)(skb
)) + 128);
1579 /* is this an error packet? */
1580 if (unlikely(cqe_fp_flags
& ETH_RX_ERROR_FALGS
)) {
1581 DP(NETIF_MSG_RX_ERR
,
1582 "ERROR flags %x rx packet %u\n",
1583 cqe_fp_flags
, sw_comp_cons
);
1584 fp
->eth_q_stats
.rx_err_discard_pkt
++;
1588 /* Since we don't have a jumbo ring
1589 * copy small packets if mtu > 1500
1591 if ((bp
->dev
->mtu
> ETH_MAX_PACKET_SIZE
) &&
1592 (len
<= RX_COPY_THRESH
)) {
1593 struct sk_buff
*new_skb
;
1595 new_skb
= netdev_alloc_skb(bp
->dev
,
1597 if (new_skb
== NULL
) {
1598 DP(NETIF_MSG_RX_ERR
,
1599 "ERROR packet dropped "
1600 "because of alloc failure\n");
1601 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
1606 skb_copy_from_linear_data_offset(skb
, pad
,
1607 new_skb
->data
+ pad
, len
);
1608 skb_reserve(new_skb
, pad
);
1609 skb_put(new_skb
, len
);
1611 bnx2x_reuse_rx_skb(fp
, skb
, bd_cons
, bd_prod
);
1615 } else if (bnx2x_alloc_rx_skb(bp
, fp
, bd_prod
) == 0) {
1616 pci_unmap_single(bp
->pdev
,
1617 pci_unmap_addr(rx_buf
, mapping
),
1619 PCI_DMA_FROMDEVICE
);
1620 skb_reserve(skb
, pad
);
1624 DP(NETIF_MSG_RX_ERR
,
1625 "ERROR packet dropped because "
1626 "of alloc failure\n");
1627 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
1629 bnx2x_reuse_rx_skb(fp
, skb
, bd_cons
, bd_prod
);
1633 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1635 skb
->ip_summed
= CHECKSUM_NONE
;
1637 if (likely(BNX2X_RX_CSUM_OK(cqe
)))
1638 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1640 fp
->eth_q_stats
.hw_csum_err
++;
1644 skb_record_rx_queue(skb
, fp
->index
);
1646 if ((bp
->vlgrp
!= NULL
) && (bp
->flags
& HW_VLAN_RX_FLAG
) &&
1647 (le16_to_cpu(cqe
->fast_path_cqe
.pars_flags
.flags
) &
1648 PARSING_FLAGS_VLAN
))
1649 vlan_hwaccel_receive_skb(skb
, bp
->vlgrp
,
1650 le16_to_cpu(cqe
->fast_path_cqe
.vlan_tag
));
1653 netif_receive_skb(skb
);
1659 bd_cons
= NEXT_RX_IDX(bd_cons
);
1660 bd_prod
= NEXT_RX_IDX(bd_prod
);
1661 bd_prod_fw
= NEXT_RX_IDX(bd_prod_fw
);
1664 sw_comp_prod
= NEXT_RCQ_IDX(sw_comp_prod
);
1665 sw_comp_cons
= NEXT_RCQ_IDX(sw_comp_cons
);
1667 if (rx_pkt
== budget
)
1671 fp
->rx_bd_cons
= bd_cons
;
1672 fp
->rx_bd_prod
= bd_prod_fw
;
1673 fp
->rx_comp_cons
= sw_comp_cons
;
1674 fp
->rx_comp_prod
= sw_comp_prod
;
1676 /* Update producers */
1677 bnx2x_update_rx_prod(bp
, fp
, bd_prod_fw
, sw_comp_prod
,
1680 fp
->rx_pkt
+= rx_pkt
;
1686 static irqreturn_t
bnx2x_msix_fp_int(int irq
, void *fp_cookie
)
1688 struct bnx2x_fastpath
*fp
= fp_cookie
;
1689 struct bnx2x
*bp
= fp
->bp
;
1690 int index
= fp
->index
;
1692 /* Return here if interrupt is disabled */
1693 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
1694 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
1698 DP(BNX2X_MSG_FP
, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1700 bnx2x_ack_sb(bp
, fp
->sb_id
, USTORM_ID
, 0, IGU_INT_DISABLE
, 0);
1702 #ifdef BNX2X_STOP_ON_ERROR
1703 if (unlikely(bp
->panic
))
1707 prefetch(fp
->rx_cons_sb
);
1708 prefetch(fp
->tx_cons_sb
);
1709 prefetch(&fp
->status_blk
->c_status_block
.status_block_index
);
1710 prefetch(&fp
->status_blk
->u_status_block
.status_block_index
);
1712 napi_schedule(&bnx2x_fp(bp
, index
, napi
));
1717 static irqreturn_t
bnx2x_interrupt(int irq
, void *dev_instance
)
1719 struct bnx2x
*bp
= netdev_priv(dev_instance
);
1720 u16 status
= bnx2x_ack_int(bp
);
1723 /* Return here if interrupt is shared and it's not for us */
1724 if (unlikely(status
== 0)) {
1725 DP(NETIF_MSG_INTR
, "not our interrupt!\n");
1728 DP(NETIF_MSG_INTR
, "got an interrupt status 0x%x\n", status
);
1730 /* Return here if interrupt is disabled */
1731 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
1732 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
1736 #ifdef BNX2X_STOP_ON_ERROR
1737 if (unlikely(bp
->panic
))
1741 mask
= 0x2 << bp
->fp
[0].sb_id
;
1742 if (status
& mask
) {
1743 struct bnx2x_fastpath
*fp
= &bp
->fp
[0];
1745 prefetch(fp
->rx_cons_sb
);
1746 prefetch(fp
->tx_cons_sb
);
1747 prefetch(&fp
->status_blk
->c_status_block
.status_block_index
);
1748 prefetch(&fp
->status_blk
->u_status_block
.status_block_index
);
1750 napi_schedule(&bnx2x_fp(bp
, 0, napi
));
1756 if (unlikely(status
& 0x1)) {
1757 queue_delayed_work(bnx2x_wq
, &bp
->sp_task
, 0);
1765 DP(NETIF_MSG_INTR
, "got an unknown interrupt! (status %u)\n",
1771 /* end of fast path */
1773 static void bnx2x_stats_handle(struct bnx2x
*bp
, enum bnx2x_stats_event event
);
1778 * General service functions
1781 static int bnx2x_acquire_hw_lock(struct bnx2x
*bp
, u32 resource
)
1784 u32 resource_bit
= (1 << resource
);
1785 int func
= BP_FUNC(bp
);
1786 u32 hw_lock_control_reg
;
1789 /* Validating that the resource is within range */
1790 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
1792 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1793 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
1798 hw_lock_control_reg
= (MISC_REG_DRIVER_CONTROL_1
+ func
*8);
1800 hw_lock_control_reg
=
1801 (MISC_REG_DRIVER_CONTROL_7
+ (func
- 6)*8);
1804 /* Validating that the resource is not already taken */
1805 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1806 if (lock_status
& resource_bit
) {
1807 DP(NETIF_MSG_HW
, "lock_status 0x%x resource_bit 0x%x\n",
1808 lock_status
, resource_bit
);
1812 /* Try for 5 second every 5ms */
1813 for (cnt
= 0; cnt
< 1000; cnt
++) {
1814 /* Try to acquire the lock */
1815 REG_WR(bp
, hw_lock_control_reg
+ 4, resource_bit
);
1816 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1817 if (lock_status
& resource_bit
)
1822 DP(NETIF_MSG_HW
, "Timeout\n");
1826 static int bnx2x_release_hw_lock(struct bnx2x
*bp
, u32 resource
)
1829 u32 resource_bit
= (1 << resource
);
1830 int func
= BP_FUNC(bp
);
1831 u32 hw_lock_control_reg
;
1833 /* Validating that the resource is within range */
1834 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
1836 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1837 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
1842 hw_lock_control_reg
= (MISC_REG_DRIVER_CONTROL_1
+ func
*8);
1844 hw_lock_control_reg
=
1845 (MISC_REG_DRIVER_CONTROL_7
+ (func
- 6)*8);
1848 /* Validating that the resource is currently taken */
1849 lock_status
= REG_RD(bp
, hw_lock_control_reg
);
1850 if (!(lock_status
& resource_bit
)) {
1851 DP(NETIF_MSG_HW
, "lock_status 0x%x resource_bit 0x%x\n",
1852 lock_status
, resource_bit
);
1856 REG_WR(bp
, hw_lock_control_reg
, resource_bit
);
1860 /* HW Lock for shared dual port PHYs */
1861 static void bnx2x_acquire_phy_lock(struct bnx2x
*bp
)
1863 mutex_lock(&bp
->port
.phy_mutex
);
1865 if (bp
->port
.need_hw_lock
)
1866 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
1869 static void bnx2x_release_phy_lock(struct bnx2x
*bp
)
1871 if (bp
->port
.need_hw_lock
)
1872 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
1874 mutex_unlock(&bp
->port
.phy_mutex
);
1877 int bnx2x_get_gpio(struct bnx2x
*bp
, int gpio_num
, u8 port
)
1879 /* The GPIO should be swapped if swap register is set and active */
1880 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1881 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ port
;
1882 int gpio_shift
= gpio_num
+
1883 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1884 u32 gpio_mask
= (1 << gpio_shift
);
1888 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1889 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
1893 /* read GPIO value */
1894 gpio_reg
= REG_RD(bp
, MISC_REG_GPIO
);
1896 /* get the requested pin value */
1897 if ((gpio_reg
& gpio_mask
) == gpio_mask
)
1902 DP(NETIF_MSG_LINK
, "pin %d value 0x%x\n", gpio_num
, value
);
1907 int bnx2x_set_gpio(struct bnx2x
*bp
, int gpio_num
, u32 mode
, u8 port
)
1909 /* The GPIO should be swapped if swap register is set and active */
1910 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1911 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ port
;
1912 int gpio_shift
= gpio_num
+
1913 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1914 u32 gpio_mask
= (1 << gpio_shift
);
1917 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1918 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
1922 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1923 /* read GPIO and mask except the float bits */
1924 gpio_reg
= (REG_RD(bp
, MISC_REG_GPIO
) & MISC_REGISTERS_GPIO_FLOAT
);
1927 case MISC_REGISTERS_GPIO_OUTPUT_LOW
:
1928 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> output low\n",
1929 gpio_num
, gpio_shift
);
1930 /* clear FLOAT and set CLR */
1931 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1932 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_CLR_POS
);
1935 case MISC_REGISTERS_GPIO_OUTPUT_HIGH
:
1936 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> output high\n",
1937 gpio_num
, gpio_shift
);
1938 /* clear FLOAT and set SET */
1939 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1940 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_SET_POS
);
1943 case MISC_REGISTERS_GPIO_INPUT_HI_Z
:
1944 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> input\n",
1945 gpio_num
, gpio_shift
);
1947 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1954 REG_WR(bp
, MISC_REG_GPIO
, gpio_reg
);
1955 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1960 int bnx2x_set_gpio_int(struct bnx2x
*bp
, int gpio_num
, u32 mode
, u8 port
)
1962 /* The GPIO should be swapped if swap register is set and active */
1963 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1964 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ port
;
1965 int gpio_shift
= gpio_num
+
1966 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1967 u32 gpio_mask
= (1 << gpio_shift
);
1970 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1971 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
1975 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1977 gpio_reg
= REG_RD(bp
, MISC_REG_GPIO_INT
);
1980 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR
:
1981 DP(NETIF_MSG_LINK
, "Clear GPIO INT %d (shift %d) -> "
1982 "output low\n", gpio_num
, gpio_shift
);
1983 /* clear SET and set CLR */
1984 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_INT_SET_POS
);
1985 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_INT_CLR_POS
);
1988 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET
:
1989 DP(NETIF_MSG_LINK
, "Set GPIO INT %d (shift %d) -> "
1990 "output high\n", gpio_num
, gpio_shift
);
1991 /* clear CLR and set SET */
1992 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_INT_CLR_POS
);
1993 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_INT_SET_POS
);
2000 REG_WR(bp
, MISC_REG_GPIO_INT
, gpio_reg
);
2001 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
2006 static int bnx2x_set_spio(struct bnx2x
*bp
, int spio_num
, u32 mode
)
2008 u32 spio_mask
= (1 << spio_num
);
2011 if ((spio_num
< MISC_REGISTERS_SPIO_4
) ||
2012 (spio_num
> MISC_REGISTERS_SPIO_7
)) {
2013 BNX2X_ERR("Invalid SPIO %d\n", spio_num
);
2017 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_SPIO
);
2018 /* read SPIO and mask except the float bits */
2019 spio_reg
= (REG_RD(bp
, MISC_REG_SPIO
) & MISC_REGISTERS_SPIO_FLOAT
);
2022 case MISC_REGISTERS_SPIO_OUTPUT_LOW
:
2023 DP(NETIF_MSG_LINK
, "Set SPIO %d -> output low\n", spio_num
);
2024 /* clear FLOAT and set CLR */
2025 spio_reg
&= ~(spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
2026 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_CLR_POS
);
2029 case MISC_REGISTERS_SPIO_OUTPUT_HIGH
:
2030 DP(NETIF_MSG_LINK
, "Set SPIO %d -> output high\n", spio_num
);
2031 /* clear FLOAT and set SET */
2032 spio_reg
&= ~(spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
2033 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_SET_POS
);
2036 case MISC_REGISTERS_SPIO_INPUT_HI_Z
:
2037 DP(NETIF_MSG_LINK
, "Set SPIO %d -> input\n", spio_num
);
2039 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
2046 REG_WR(bp
, MISC_REG_SPIO
, spio_reg
);
2047 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_SPIO
);
2052 static void bnx2x_calc_fc_adv(struct bnx2x
*bp
)
2054 switch (bp
->link_vars
.ieee_fc
&
2055 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK
) {
2056 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE
:
2057 bp
->port
.advertising
&= ~(ADVERTISED_Asym_Pause
|
2061 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH
:
2062 bp
->port
.advertising
|= (ADVERTISED_Asym_Pause
|
2066 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC
:
2067 bp
->port
.advertising
|= ADVERTISED_Asym_Pause
;
2071 bp
->port
.advertising
&= ~(ADVERTISED_Asym_Pause
|
2077 static void bnx2x_link_report(struct bnx2x
*bp
)
2079 if (bp
->link_vars
.link_up
) {
2080 if (bp
->state
== BNX2X_STATE_OPEN
)
2081 netif_carrier_on(bp
->dev
);
2082 printk(KERN_INFO PFX
"%s NIC Link is Up, ", bp
->dev
->name
);
2084 printk("%d Mbps ", bp
->link_vars
.line_speed
);
2086 if (bp
->link_vars
.duplex
== DUPLEX_FULL
)
2087 printk("full duplex");
2089 printk("half duplex");
2091 if (bp
->link_vars
.flow_ctrl
!= BNX2X_FLOW_CTRL_NONE
) {
2092 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_RX
) {
2093 printk(", receive ");
2094 if (bp
->link_vars
.flow_ctrl
&
2096 printk("& transmit ");
2098 printk(", transmit ");
2100 printk("flow control ON");
2104 } else { /* link_down */
2105 netif_carrier_off(bp
->dev
);
2106 printk(KERN_ERR PFX
"%s NIC Link is Down\n", bp
->dev
->name
);
2110 static u8
bnx2x_initial_phy_init(struct bnx2x
*bp
, int load_mode
)
2112 if (!BP_NOMCP(bp
)) {
2115 /* Initialize link parameters structure variables */
2116 /* It is recommended to turn off RX FC for jumbo frames
2117 for better performance */
2119 bp
->link_params
.req_fc_auto_adv
= BNX2X_FLOW_CTRL_BOTH
;
2120 else if (bp
->dev
->mtu
> 5000)
2121 bp
->link_params
.req_fc_auto_adv
= BNX2X_FLOW_CTRL_TX
;
2123 bp
->link_params
.req_fc_auto_adv
= BNX2X_FLOW_CTRL_BOTH
;
2125 bnx2x_acquire_phy_lock(bp
);
2127 if (load_mode
== LOAD_DIAG
)
2128 bp
->link_params
.loopback_mode
= LOOPBACK_XGXS_10
;
2130 rc
= bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
2132 bnx2x_release_phy_lock(bp
);
2134 bnx2x_calc_fc_adv(bp
);
2136 if (CHIP_REV_IS_SLOW(bp
) && bp
->link_vars
.link_up
) {
2137 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
2138 bnx2x_link_report(bp
);
2143 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2147 static void bnx2x_link_set(struct bnx2x
*bp
)
2149 if (!BP_NOMCP(bp
)) {
2150 bnx2x_acquire_phy_lock(bp
);
2151 bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
2152 bnx2x_release_phy_lock(bp
);
2154 bnx2x_calc_fc_adv(bp
);
2156 BNX2X_ERR("Bootcode is missing - can not set link\n");
2159 static void bnx2x__link_reset(struct bnx2x
*bp
)
2161 if (!BP_NOMCP(bp
)) {
2162 bnx2x_acquire_phy_lock(bp
);
2163 bnx2x_link_reset(&bp
->link_params
, &bp
->link_vars
, 1);
2164 bnx2x_release_phy_lock(bp
);
2166 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2169 static u8
bnx2x_link_test(struct bnx2x
*bp
)
2173 bnx2x_acquire_phy_lock(bp
);
2174 rc
= bnx2x_test_link(&bp
->link_params
, &bp
->link_vars
);
2175 bnx2x_release_phy_lock(bp
);
2180 static void bnx2x_init_port_minmax(struct bnx2x
*bp
)
2182 u32 r_param
= bp
->link_vars
.line_speed
/ 8;
2183 u32 fair_periodic_timeout_usec
;
2186 memset(&(bp
->cmng
.rs_vars
), 0,
2187 sizeof(struct rate_shaping_vars_per_port
));
2188 memset(&(bp
->cmng
.fair_vars
), 0, sizeof(struct fairness_vars_per_port
));
2190 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2191 bp
->cmng
.rs_vars
.rs_periodic_timeout
= RS_PERIODIC_TIMEOUT_USEC
/ 4;
2193 /* this is the threshold below which no timer arming will occur
2194 1.25 coefficient is for the threshold to be a little bigger
2195 than the real time, to compensate for timer in-accuracy */
2196 bp
->cmng
.rs_vars
.rs_threshold
=
2197 (RS_PERIODIC_TIMEOUT_USEC
* r_param
* 5) / 4;
2199 /* resolution of fairness timer */
2200 fair_periodic_timeout_usec
= QM_ARB_BYTES
/ r_param
;
2201 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2202 t_fair
= T_FAIR_COEF
/ bp
->link_vars
.line_speed
;
2204 /* this is the threshold below which we won't arm the timer anymore */
2205 bp
->cmng
.fair_vars
.fair_threshold
= QM_ARB_BYTES
;
2207 /* we multiply by 1e3/8 to get bytes/msec.
2208 We don't want the credits to pass a credit
2209 of the t_fair*FAIR_MEM (algorithm resolution) */
2210 bp
->cmng
.fair_vars
.upper_bound
= r_param
* t_fair
* FAIR_MEM
;
2211 /* since each tick is 4 usec */
2212 bp
->cmng
.fair_vars
.fairness_timeout
= fair_periodic_timeout_usec
/ 4;
2215 static void bnx2x_init_vn_minmax(struct bnx2x
*bp
, int func
)
2217 struct rate_shaping_vars_per_vn m_rs_vn
;
2218 struct fairness_vars_per_vn m_fair_vn
;
2219 u32 vn_cfg
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
2220 u16 vn_min_rate
, vn_max_rate
;
2223 /* If function is hidden - set min and max to zeroes */
2224 if (vn_cfg
& FUNC_MF_CFG_FUNC_HIDE
) {
2229 vn_min_rate
= ((vn_cfg
& FUNC_MF_CFG_MIN_BW_MASK
) >>
2230 FUNC_MF_CFG_MIN_BW_SHIFT
) * 100;
2231 /* If fairness is enabled (not all min rates are zeroes) and
2232 if current min rate is zero - set it to 1.
2233 This is a requirement of the algorithm. */
2234 if (bp
->vn_weight_sum
&& (vn_min_rate
== 0))
2235 vn_min_rate
= DEF_MIN_RATE
;
2236 vn_max_rate
= ((vn_cfg
& FUNC_MF_CFG_MAX_BW_MASK
) >>
2237 FUNC_MF_CFG_MAX_BW_SHIFT
) * 100;
2241 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2242 func
, vn_min_rate
, vn_max_rate
, bp
->vn_weight_sum
);
2244 memset(&m_rs_vn
, 0, sizeof(struct rate_shaping_vars_per_vn
));
2245 memset(&m_fair_vn
, 0, sizeof(struct fairness_vars_per_vn
));
2247 /* global vn counter - maximal Mbps for this vn */
2248 m_rs_vn
.vn_counter
.rate
= vn_max_rate
;
2250 /* quota - number of bytes transmitted in this period */
2251 m_rs_vn
.vn_counter
.quota
=
2252 (vn_max_rate
* RS_PERIODIC_TIMEOUT_USEC
) / 8;
2254 if (bp
->vn_weight_sum
) {
2255 /* credit for each period of the fairness algorithm:
2256 number of bytes in T_FAIR (the vn share the port rate).
2257 vn_weight_sum should not be larger than 10000, thus
2258 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2260 m_fair_vn
.vn_credit_delta
=
2261 max((u32
)(vn_min_rate
* (T_FAIR_COEF
/
2262 (8 * bp
->vn_weight_sum
))),
2263 (u32
)(bp
->cmng
.fair_vars
.fair_threshold
* 2));
2264 DP(NETIF_MSG_IFUP
, "m_fair_vn.vn_credit_delta=%d\n",
2265 m_fair_vn
.vn_credit_delta
);
2268 /* Store it to internal memory */
2269 for (i
= 0; i
< sizeof(struct rate_shaping_vars_per_vn
)/4; i
++)
2270 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2271 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func
) + i
* 4,
2272 ((u32
*)(&m_rs_vn
))[i
]);
2274 for (i
= 0; i
< sizeof(struct fairness_vars_per_vn
)/4; i
++)
2275 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2276 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func
) + i
* 4,
2277 ((u32
*)(&m_fair_vn
))[i
]);
2281 /* This function is called upon link interrupt */
2282 static void bnx2x_link_attn(struct bnx2x
*bp
)
2284 /* Make sure that we are synced with the current statistics */
2285 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
2287 bnx2x_link_update(&bp
->link_params
, &bp
->link_vars
);
2289 if (bp
->link_vars
.link_up
) {
2291 /* dropless flow control */
2292 if (CHIP_IS_E1H(bp
)) {
2293 int port
= BP_PORT(bp
);
2294 u32 pause_enabled
= 0;
2296 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_TX
)
2299 REG_WR(bp
, BAR_USTRORM_INTMEM
+
2300 USTORM_PAUSE_ENABLED_OFFSET(port
),
2304 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
) {
2305 struct host_port_stats
*pstats
;
2307 pstats
= bnx2x_sp(bp
, port_stats
);
2308 /* reset old bmac stats */
2309 memset(&(pstats
->mac_stx
[0]), 0,
2310 sizeof(struct mac_stx
));
2312 if ((bp
->state
== BNX2X_STATE_OPEN
) ||
2313 (bp
->state
== BNX2X_STATE_DISABLED
))
2314 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
2317 /* indicate link status */
2318 bnx2x_link_report(bp
);
2321 int port
= BP_PORT(bp
);
2325 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++) {
2326 if (vn
== BP_E1HVN(bp
))
2329 func
= ((vn
<< 1) | port
);
2331 /* Set the attention towards other drivers
2333 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_0
+
2334 (LINK_SYNC_ATTENTION_BIT_FUNC_0
+ func
)*4, 1);
2337 if (bp
->link_vars
.link_up
) {
2340 /* Init rate shaping and fairness contexts */
2341 bnx2x_init_port_minmax(bp
);
2343 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++)
2344 bnx2x_init_vn_minmax(bp
, 2*vn
+ port
);
2346 /* Store it to internal memory */
2348 i
< sizeof(struct cmng_struct_per_port
) / 4; i
++)
2349 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2350 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port
) + i
*4,
2351 ((u32
*)(&bp
->cmng
))[i
]);
2356 static void bnx2x__link_status_update(struct bnx2x
*bp
)
2358 if (bp
->state
!= BNX2X_STATE_OPEN
)
2361 bnx2x_link_status_update(&bp
->link_params
, &bp
->link_vars
);
2363 if (bp
->link_vars
.link_up
)
2364 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
2366 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
2368 /* indicate link status */
2369 bnx2x_link_report(bp
);
2372 static void bnx2x_pmf_update(struct bnx2x
*bp
)
2374 int port
= BP_PORT(bp
);
2378 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
2380 /* enable nig attention */
2381 val
= (0xff0f | (1 << (BP_E1HVN(bp
) + 4)));
2382 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, val
);
2383 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, val
);
2385 bnx2x_stats_handle(bp
, STATS_EVENT_PMF
);
2393 * General service functions
2396 /* the slow path queue is odd since completions arrive on the fastpath ring */
2397 static int bnx2x_sp_post(struct bnx2x
*bp
, int command
, int cid
,
2398 u32 data_hi
, u32 data_lo
, int common
)
2400 int func
= BP_FUNC(bp
);
2402 DP(BNX2X_MSG_SP
/*NETIF_MSG_TIMER*/,
2403 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2404 (u32
)U64_HI(bp
->spq_mapping
), (u32
)(U64_LO(bp
->spq_mapping
) +
2405 (void *)bp
->spq_prod_bd
- (void *)bp
->spq
), command
,
2406 HW_CID(bp
, cid
), data_hi
, data_lo
, bp
->spq_left
);
2408 #ifdef BNX2X_STOP_ON_ERROR
2409 if (unlikely(bp
->panic
))
2413 spin_lock_bh(&bp
->spq_lock
);
2415 if (!bp
->spq_left
) {
2416 BNX2X_ERR("BUG! SPQ ring full!\n");
2417 spin_unlock_bh(&bp
->spq_lock
);
2422 /* CID needs port number to be encoded int it */
2423 bp
->spq_prod_bd
->hdr
.conn_and_cmd_data
=
2424 cpu_to_le32(((command
<< SPE_HDR_CMD_ID_SHIFT
) |
2426 bp
->spq_prod_bd
->hdr
.type
= cpu_to_le16(ETH_CONNECTION_TYPE
);
2428 bp
->spq_prod_bd
->hdr
.type
|=
2429 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT
));
2431 bp
->spq_prod_bd
->data
.mac_config_addr
.hi
= cpu_to_le32(data_hi
);
2432 bp
->spq_prod_bd
->data
.mac_config_addr
.lo
= cpu_to_le32(data_lo
);
2436 if (bp
->spq_prod_bd
== bp
->spq_last_bd
) {
2437 bp
->spq_prod_bd
= bp
->spq
;
2438 bp
->spq_prod_idx
= 0;
2439 DP(NETIF_MSG_TIMER
, "end of spq\n");
2446 /* Make sure that BD data is updated before writing the producer */
2449 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_SPQ_PROD_OFFSET(func
),
2454 spin_unlock_bh(&bp
->spq_lock
);
2458 /* acquire split MCP access lock register */
2459 static int bnx2x_acquire_alr(struct bnx2x
*bp
)
2466 for (j
= 0; j
< i
*10; j
++) {
2468 REG_WR(bp
, GRCBASE_MCP
+ 0x9c, val
);
2469 val
= REG_RD(bp
, GRCBASE_MCP
+ 0x9c);
2470 if (val
& (1L << 31))
2475 if (!(val
& (1L << 31))) {
2476 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2483 /* release split MCP access lock register */
2484 static void bnx2x_release_alr(struct bnx2x
*bp
)
2488 REG_WR(bp
, GRCBASE_MCP
+ 0x9c, val
);
2491 static inline u16
bnx2x_update_dsb_idx(struct bnx2x
*bp
)
2493 struct host_def_status_block
*def_sb
= bp
->def_status_blk
;
2496 barrier(); /* status block is written to by the chip */
2497 if (bp
->def_att_idx
!= def_sb
->atten_status_block
.attn_bits_index
) {
2498 bp
->def_att_idx
= def_sb
->atten_status_block
.attn_bits_index
;
2501 if (bp
->def_c_idx
!= def_sb
->c_def_status_block
.status_block_index
) {
2502 bp
->def_c_idx
= def_sb
->c_def_status_block
.status_block_index
;
2505 if (bp
->def_u_idx
!= def_sb
->u_def_status_block
.status_block_index
) {
2506 bp
->def_u_idx
= def_sb
->u_def_status_block
.status_block_index
;
2509 if (bp
->def_x_idx
!= def_sb
->x_def_status_block
.status_block_index
) {
2510 bp
->def_x_idx
= def_sb
->x_def_status_block
.status_block_index
;
2513 if (bp
->def_t_idx
!= def_sb
->t_def_status_block
.status_block_index
) {
2514 bp
->def_t_idx
= def_sb
->t_def_status_block
.status_block_index
;
2521 * slow path service functions
2524 static void bnx2x_attn_int_asserted(struct bnx2x
*bp
, u32 asserted
)
2526 int port
= BP_PORT(bp
);
2527 u32 hc_addr
= (HC_REG_COMMAND_REG
+ port
*32 +
2528 COMMAND_REG_ATTN_BITS_SET
);
2529 u32 aeu_addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
2530 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
2531 u32 nig_int_mask_addr
= port
? NIG_REG_MASK_INTERRUPT_PORT1
:
2532 NIG_REG_MASK_INTERRUPT_PORT0
;
2536 if (bp
->attn_state
& asserted
)
2537 BNX2X_ERR("IGU ERROR\n");
2539 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2540 aeu_mask
= REG_RD(bp
, aeu_addr
);
2542 DP(NETIF_MSG_HW
, "aeu_mask %x newly asserted %x\n",
2543 aeu_mask
, asserted
);
2544 aeu_mask
&= ~(asserted
& 0xff);
2545 DP(NETIF_MSG_HW
, "new mask %x\n", aeu_mask
);
2547 REG_WR(bp
, aeu_addr
, aeu_mask
);
2548 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2550 DP(NETIF_MSG_HW
, "attn_state %x\n", bp
->attn_state
);
2551 bp
->attn_state
|= asserted
;
2552 DP(NETIF_MSG_HW
, "new state %x\n", bp
->attn_state
);
2554 if (asserted
& ATTN_HARD_WIRED_MASK
) {
2555 if (asserted
& ATTN_NIG_FOR_FUNC
) {
2557 bnx2x_acquire_phy_lock(bp
);
2559 /* save nig interrupt mask */
2560 nig_mask
= REG_RD(bp
, nig_int_mask_addr
);
2561 REG_WR(bp
, nig_int_mask_addr
, 0);
2563 bnx2x_link_attn(bp
);
2565 /* handle unicore attn? */
2567 if (asserted
& ATTN_SW_TIMER_4_FUNC
)
2568 DP(NETIF_MSG_HW
, "ATTN_SW_TIMER_4_FUNC!\n");
2570 if (asserted
& GPIO_2_FUNC
)
2571 DP(NETIF_MSG_HW
, "GPIO_2_FUNC!\n");
2573 if (asserted
& GPIO_3_FUNC
)
2574 DP(NETIF_MSG_HW
, "GPIO_3_FUNC!\n");
2576 if (asserted
& GPIO_4_FUNC
)
2577 DP(NETIF_MSG_HW
, "GPIO_4_FUNC!\n");
2580 if (asserted
& ATTN_GENERAL_ATTN_1
) {
2581 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_1!\n");
2582 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_1
, 0x0);
2584 if (asserted
& ATTN_GENERAL_ATTN_2
) {
2585 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_2!\n");
2586 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_2
, 0x0);
2588 if (asserted
& ATTN_GENERAL_ATTN_3
) {
2589 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_3!\n");
2590 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_3
, 0x0);
2593 if (asserted
& ATTN_GENERAL_ATTN_4
) {
2594 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_4!\n");
2595 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_4
, 0x0);
2597 if (asserted
& ATTN_GENERAL_ATTN_5
) {
2598 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_5!\n");
2599 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_5
, 0x0);
2601 if (asserted
& ATTN_GENERAL_ATTN_6
) {
2602 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_6!\n");
2603 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_6
, 0x0);
2607 } /* if hardwired */
2609 DP(NETIF_MSG_HW
, "about to mask 0x%08x at HC addr 0x%x\n",
2611 REG_WR(bp
, hc_addr
, asserted
);
2613 /* now set back the mask */
2614 if (asserted
& ATTN_NIG_FOR_FUNC
) {
2615 REG_WR(bp
, nig_int_mask_addr
, nig_mask
);
2616 bnx2x_release_phy_lock(bp
);
2620 static inline void bnx2x_fan_failure(struct bnx2x
*bp
)
2622 int port
= BP_PORT(bp
);
2624 /* mark the failure */
2625 bp
->link_params
.ext_phy_config
&= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK
;
2626 bp
->link_params
.ext_phy_config
|= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
;
2627 SHMEM_WR(bp
, dev_info
.port_hw_config
[port
].external_phy_config
,
2628 bp
->link_params
.ext_phy_config
);
2630 /* log the failure */
2631 printk(KERN_ERR PFX
"Fan Failure on Network Controller %s has caused"
2632 " the driver to shutdown the card to prevent permanent"
2633 " damage. Please contact Dell Support for assistance\n",
2636 static inline void bnx2x_attn_int_deasserted0(struct bnx2x
*bp
, u32 attn
)
2638 int port
= BP_PORT(bp
);
2640 u32 val
, swap_val
, swap_override
;
2642 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
2643 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
2645 if (attn
& AEU_INPUTS_ATTN_BITS_SPIO5
) {
2647 val
= REG_RD(bp
, reg_offset
);
2648 val
&= ~AEU_INPUTS_ATTN_BITS_SPIO5
;
2649 REG_WR(bp
, reg_offset
, val
);
2651 BNX2X_ERR("SPIO5 hw attention\n");
2653 /* Fan failure attention */
2654 switch (XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
)) {
2655 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
2656 /* Low power mode is controlled by GPIO 2 */
2657 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_2
,
2658 MISC_REGISTERS_GPIO_OUTPUT_LOW
, port
);
2659 /* The PHY reset is controlled by GPIO 1 */
2660 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_1
,
2661 MISC_REGISTERS_GPIO_OUTPUT_LOW
, port
);
2664 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
:
2665 /* The PHY reset is controlled by GPIO 1 */
2666 /* fake the port number to cancel the swap done in
2668 swap_val
= REG_RD(bp
, NIG_REG_PORT_SWAP
);
2669 swap_override
= REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
);
2670 port
= (swap_val
&& swap_override
) ^ 1;
2671 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_1
,
2672 MISC_REGISTERS_GPIO_OUTPUT_LOW
, port
);
2678 bnx2x_fan_failure(bp
);
2681 if (attn
& (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0
|
2682 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1
)) {
2683 bnx2x_acquire_phy_lock(bp
);
2684 bnx2x_handle_module_detect_int(&bp
->link_params
);
2685 bnx2x_release_phy_lock(bp
);
2688 if (attn
& HW_INTERRUT_ASSERT_SET_0
) {
2690 val
= REG_RD(bp
, reg_offset
);
2691 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_0
);
2692 REG_WR(bp
, reg_offset
, val
);
2694 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2695 (attn
& HW_INTERRUT_ASSERT_SET_0
));
2700 static inline void bnx2x_attn_int_deasserted1(struct bnx2x
*bp
, u32 attn
)
2704 if (attn
& AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT
) {
2706 val
= REG_RD(bp
, DORQ_REG_DORQ_INT_STS_CLR
);
2707 BNX2X_ERR("DB hw attention 0x%x\n", val
);
2708 /* DORQ discard attention */
2710 BNX2X_ERR("FATAL error from DORQ\n");
2713 if (attn
& HW_INTERRUT_ASSERT_SET_1
) {
2715 int port
= BP_PORT(bp
);
2718 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1
:
2719 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1
);
2721 val
= REG_RD(bp
, reg_offset
);
2722 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_1
);
2723 REG_WR(bp
, reg_offset
, val
);
2725 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2726 (attn
& HW_INTERRUT_ASSERT_SET_1
));
2731 static inline void bnx2x_attn_int_deasserted2(struct bnx2x
*bp
, u32 attn
)
2735 if (attn
& AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT
) {
2737 val
= REG_RD(bp
, CFC_REG_CFC_INT_STS_CLR
);
2738 BNX2X_ERR("CFC hw attention 0x%x\n", val
);
2739 /* CFC error attention */
2741 BNX2X_ERR("FATAL error from CFC\n");
2744 if (attn
& AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT
) {
2746 val
= REG_RD(bp
, PXP_REG_PXP_INT_STS_CLR_0
);
2747 BNX2X_ERR("PXP hw attention 0x%x\n", val
);
2748 /* RQ_USDMDP_FIFO_OVERFLOW */
2750 BNX2X_ERR("FATAL error from PXP\n");
2753 if (attn
& HW_INTERRUT_ASSERT_SET_2
) {
2755 int port
= BP_PORT(bp
);
2758 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2
:
2759 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2
);
2761 val
= REG_RD(bp
, reg_offset
);
2762 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_2
);
2763 REG_WR(bp
, reg_offset
, val
);
2765 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2766 (attn
& HW_INTERRUT_ASSERT_SET_2
));
2771 static inline void bnx2x_attn_int_deasserted3(struct bnx2x
*bp
, u32 attn
)
2775 if (attn
& EVEREST_GEN_ATTN_IN_USE_MASK
) {
2777 if (attn
& BNX2X_PMF_LINK_ASSERT
) {
2778 int func
= BP_FUNC(bp
);
2780 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
*4, 0);
2781 bnx2x__link_status_update(bp
);
2782 if (SHMEM_RD(bp
, func_mb
[func
].drv_status
) &
2784 bnx2x_pmf_update(bp
);
2786 } else if (attn
& BNX2X_MC_ASSERT_BITS
) {
2788 BNX2X_ERR("MC assert!\n");
2789 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_10
, 0);
2790 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_9
, 0);
2791 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_8
, 0);
2792 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_7
, 0);
2795 } else if (attn
& BNX2X_MCP_ASSERT
) {
2797 BNX2X_ERR("MCP assert!\n");
2798 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_11
, 0);
2802 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn
);
2805 if (attn
& EVEREST_LATCHED_ATTN_IN_USE_MASK
) {
2806 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn
);
2807 if (attn
& BNX2X_GRC_TIMEOUT
) {
2808 val
= CHIP_IS_E1H(bp
) ?
2809 REG_RD(bp
, MISC_REG_GRC_TIMEOUT_ATTN
) : 0;
2810 BNX2X_ERR("GRC time-out 0x%08x\n", val
);
2812 if (attn
& BNX2X_GRC_RSV
) {
2813 val
= CHIP_IS_E1H(bp
) ?
2814 REG_RD(bp
, MISC_REG_GRC_RSV_ATTN
) : 0;
2815 BNX2X_ERR("GRC reserved 0x%08x\n", val
);
2817 REG_WR(bp
, MISC_REG_AEU_CLR_LATCH_SIGNAL
, 0x7ff);
2821 static void bnx2x_attn_int_deasserted(struct bnx2x
*bp
, u32 deasserted
)
2823 struct attn_route attn
;
2824 struct attn_route group_mask
;
2825 int port
= BP_PORT(bp
);
2831 /* need to take HW lock because MCP or other port might also
2832 try to handle this event */
2833 bnx2x_acquire_alr(bp
);
2835 attn
.sig
[0] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0
+ port
*4);
2836 attn
.sig
[1] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0
+ port
*4);
2837 attn
.sig
[2] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0
+ port
*4);
2838 attn
.sig
[3] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0
+ port
*4);
2839 DP(NETIF_MSG_HW
, "attn: %08x %08x %08x %08x\n",
2840 attn
.sig
[0], attn
.sig
[1], attn
.sig
[2], attn
.sig
[3]);
2842 for (index
= 0; index
< MAX_DYNAMIC_ATTN_GRPS
; index
++) {
2843 if (deasserted
& (1 << index
)) {
2844 group_mask
= bp
->attn_group
[index
];
2846 DP(NETIF_MSG_HW
, "group[%d]: %08x %08x %08x %08x\n",
2847 index
, group_mask
.sig
[0], group_mask
.sig
[1],
2848 group_mask
.sig
[2], group_mask
.sig
[3]);
2850 bnx2x_attn_int_deasserted3(bp
,
2851 attn
.sig
[3] & group_mask
.sig
[3]);
2852 bnx2x_attn_int_deasserted1(bp
,
2853 attn
.sig
[1] & group_mask
.sig
[1]);
2854 bnx2x_attn_int_deasserted2(bp
,
2855 attn
.sig
[2] & group_mask
.sig
[2]);
2856 bnx2x_attn_int_deasserted0(bp
,
2857 attn
.sig
[0] & group_mask
.sig
[0]);
2859 if ((attn
.sig
[0] & group_mask
.sig
[0] &
2860 HW_PRTY_ASSERT_SET_0
) ||
2861 (attn
.sig
[1] & group_mask
.sig
[1] &
2862 HW_PRTY_ASSERT_SET_1
) ||
2863 (attn
.sig
[2] & group_mask
.sig
[2] &
2864 HW_PRTY_ASSERT_SET_2
))
2865 BNX2X_ERR("FATAL HW block parity attention\n");
2869 bnx2x_release_alr(bp
);
2871 reg_addr
= (HC_REG_COMMAND_REG
+ port
*32 + COMMAND_REG_ATTN_BITS_CLR
);
2874 DP(NETIF_MSG_HW
, "about to mask 0x%08x at HC addr 0x%x\n",
2876 REG_WR(bp
, reg_addr
, val
);
2878 if (~bp
->attn_state
& deasserted
)
2879 BNX2X_ERR("IGU ERROR\n");
2881 reg_addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
2882 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
2884 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2885 aeu_mask
= REG_RD(bp
, reg_addr
);
2887 DP(NETIF_MSG_HW
, "aeu_mask %x newly deasserted %x\n",
2888 aeu_mask
, deasserted
);
2889 aeu_mask
|= (deasserted
& 0xff);
2890 DP(NETIF_MSG_HW
, "new mask %x\n", aeu_mask
);
2892 REG_WR(bp
, reg_addr
, aeu_mask
);
2893 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2895 DP(NETIF_MSG_HW
, "attn_state %x\n", bp
->attn_state
);
2896 bp
->attn_state
&= ~deasserted
;
2897 DP(NETIF_MSG_HW
, "new state %x\n", bp
->attn_state
);
2900 static void bnx2x_attn_int(struct bnx2x
*bp
)
2902 /* read local copy of bits */
2903 u32 attn_bits
= le32_to_cpu(bp
->def_status_blk
->atten_status_block
.
2905 u32 attn_ack
= le32_to_cpu(bp
->def_status_blk
->atten_status_block
.
2907 u32 attn_state
= bp
->attn_state
;
2909 /* look for changed bits */
2910 u32 asserted
= attn_bits
& ~attn_ack
& ~attn_state
;
2911 u32 deasserted
= ~attn_bits
& attn_ack
& attn_state
;
2914 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2915 attn_bits
, attn_ack
, asserted
, deasserted
);
2917 if (~(attn_bits
^ attn_ack
) & (attn_bits
^ attn_state
))
2918 BNX2X_ERR("BAD attention state\n");
2920 /* handle bits that were raised */
2922 bnx2x_attn_int_asserted(bp
, asserted
);
2925 bnx2x_attn_int_deasserted(bp
, deasserted
);
2928 static void bnx2x_sp_task(struct work_struct
*work
)
2930 struct bnx2x
*bp
= container_of(work
, struct bnx2x
, sp_task
.work
);
2934 /* Return here if interrupt is disabled */
2935 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
2936 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
2940 status
= bnx2x_update_dsb_idx(bp
);
2941 /* if (status == 0) */
2942 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2944 DP(NETIF_MSG_INTR
, "got a slowpath interrupt (updated %x)\n", status
);
2950 bnx2x_ack_sb(bp
, DEF_SB_ID
, ATTENTION_ID
, le16_to_cpu(bp
->def_att_idx
),
2952 bnx2x_ack_sb(bp
, DEF_SB_ID
, USTORM_ID
, le16_to_cpu(bp
->def_u_idx
),
2954 bnx2x_ack_sb(bp
, DEF_SB_ID
, CSTORM_ID
, le16_to_cpu(bp
->def_c_idx
),
2956 bnx2x_ack_sb(bp
, DEF_SB_ID
, XSTORM_ID
, le16_to_cpu(bp
->def_x_idx
),
2958 bnx2x_ack_sb(bp
, DEF_SB_ID
, TSTORM_ID
, le16_to_cpu(bp
->def_t_idx
),
2963 static irqreturn_t
bnx2x_msix_sp_int(int irq
, void *dev_instance
)
2965 struct net_device
*dev
= dev_instance
;
2966 struct bnx2x
*bp
= netdev_priv(dev
);
2968 /* Return here if interrupt is disabled */
2969 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
2970 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
2974 bnx2x_ack_sb(bp
, DEF_SB_ID
, TSTORM_ID
, 0, IGU_INT_DISABLE
, 0);
2976 #ifdef BNX2X_STOP_ON_ERROR
2977 if (unlikely(bp
->panic
))
2981 queue_delayed_work(bnx2x_wq
, &bp
->sp_task
, 0);
2986 /* end of slow path */
2990 /****************************************************************************
2992 ****************************************************************************/
2994 /* sum[hi:lo] += add[hi:lo] */
2995 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2998 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3001 /* difference = minuend - subtrahend */
3002 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3004 if (m_lo < s_lo) { \
3006 d_hi = m_hi - s_hi; \
3008 /* we can 'loan' 1 */ \
3010 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3012 /* m_hi <= s_hi */ \
3017 /* m_lo >= s_lo */ \
3018 if (m_hi < s_hi) { \
3022 /* m_hi >= s_hi */ \
3023 d_hi = m_hi - s_hi; \
3024 d_lo = m_lo - s_lo; \
3029 #define UPDATE_STAT64(s, t) \
3031 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3032 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3033 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3034 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3035 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3036 pstats->mac_stx[1].t##_lo, diff.lo); \
3039 #define UPDATE_STAT64_NIG(s, t) \
3041 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3042 diff.lo, new->s##_lo, old->s##_lo); \
3043 ADD_64(estats->t##_hi, diff.hi, \
3044 estats->t##_lo, diff.lo); \
3047 /* sum[hi:lo] += add */
3048 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3051 s_hi += (s_lo < a) ? 1 : 0; \
3054 #define UPDATE_EXTEND_STAT(s) \
3056 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3057 pstats->mac_stx[1].s##_lo, \
3061 #define UPDATE_EXTEND_TSTAT(s, t) \
3063 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3064 old_tclient->s = tclient->s; \
3065 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3068 #define UPDATE_EXTEND_USTAT(s, t) \
3070 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3071 old_uclient->s = uclient->s; \
3072 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3075 #define UPDATE_EXTEND_XSTAT(s, t) \
3077 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3078 old_xclient->s = xclient->s; \
3079 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3082 /* minuend -= subtrahend */
3083 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3085 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3088 /* minuend[hi:lo] -= subtrahend */
3089 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3091 SUB_64(m_hi, 0, m_lo, s); \
3094 #define SUB_EXTEND_USTAT(s, t) \
3096 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3097 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3101 * General service functions
3104 static inline long bnx2x_hilo(u32
*hiref
)
3106 u32 lo
= *(hiref
+ 1);
3107 #if (BITS_PER_LONG == 64)
3110 return HILO_U64(hi
, lo
);
3117 * Init service functions
3120 static void bnx2x_storm_stats_post(struct bnx2x
*bp
)
3122 if (!bp
->stats_pending
) {
3123 struct eth_query_ramrod_data ramrod_data
= {0};
3126 ramrod_data
.drv_counter
= bp
->stats_counter
++;
3127 ramrod_data
.collect_port
= bp
->port
.pmf
? 1 : 0;
3128 for_each_queue(bp
, i
)
3129 ramrod_data
.ctr_id_vector
|= (1 << bp
->fp
[i
].cl_id
);
3131 rc
= bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_STAT_QUERY
, 0,
3132 ((u32
*)&ramrod_data
)[1],
3133 ((u32
*)&ramrod_data
)[0], 0);
3135 /* stats ramrod has it's own slot on the spq */
3137 bp
->stats_pending
= 1;
3142 static void bnx2x_stats_init(struct bnx2x
*bp
)
3144 int port
= BP_PORT(bp
);
3147 bp
->stats_pending
= 0;
3148 bp
->executer_idx
= 0;
3149 bp
->stats_counter
= 0;
3153 bp
->port
.port_stx
= SHMEM_RD(bp
, port_mb
[port
].port_stx
);
3155 bp
->port
.port_stx
= 0;
3156 DP(BNX2X_MSG_STATS
, "port_stx 0x%x\n", bp
->port
.port_stx
);
3158 memset(&(bp
->port
.old_nig_stats
), 0, sizeof(struct nig_stats
));
3159 bp
->port
.old_nig_stats
.brb_discard
=
3160 REG_RD(bp
, NIG_REG_STAT0_BRB_DISCARD
+ port
*0x38);
3161 bp
->port
.old_nig_stats
.brb_truncate
=
3162 REG_RD(bp
, NIG_REG_STAT0_BRB_TRUNCATE
+ port
*0x38);
3163 REG_RD_DMAE(bp
, NIG_REG_STAT0_EGRESS_MAC_PKT0
+ port
*0x50,
3164 &(bp
->port
.old_nig_stats
.egress_mac_pkt0_lo
), 2);
3165 REG_RD_DMAE(bp
, NIG_REG_STAT0_EGRESS_MAC_PKT1
+ port
*0x50,
3166 &(bp
->port
.old_nig_stats
.egress_mac_pkt1_lo
), 2);
3168 /* function stats */
3169 for_each_queue(bp
, i
) {
3170 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
3172 memset(&fp
->old_tclient
, 0,
3173 sizeof(struct tstorm_per_client_stats
));
3174 memset(&fp
->old_uclient
, 0,
3175 sizeof(struct ustorm_per_client_stats
));
3176 memset(&fp
->old_xclient
, 0,
3177 sizeof(struct xstorm_per_client_stats
));
3178 memset(&fp
->eth_q_stats
, 0, sizeof(struct bnx2x_eth_q_stats
));
3181 memset(&bp
->dev
->stats
, 0, sizeof(struct net_device_stats
));
3182 memset(&bp
->eth_stats
, 0, sizeof(struct bnx2x_eth_stats
));
3184 bp
->stats_state
= STATS_STATE_DISABLED
;
3185 if (IS_E1HMF(bp
) && bp
->port
.pmf
&& bp
->port
.port_stx
)
3186 bnx2x_stats_handle(bp
, STATS_EVENT_PMF
);
3189 static void bnx2x_hw_stats_post(struct bnx2x
*bp
)
3191 struct dmae_command
*dmae
= &bp
->stats_dmae
;
3192 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3194 *stats_comp
= DMAE_COMP_VAL
;
3195 if (CHIP_REV_IS_SLOW(bp
))
3199 if (bp
->executer_idx
) {
3200 int loader_idx
= PMF_DMAE_C(bp
);
3202 memset(dmae
, 0, sizeof(struct dmae_command
));
3204 dmae
->opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
3205 DMAE_CMD_C_DST_GRC
| DMAE_CMD_C_ENABLE
|
3206 DMAE_CMD_DST_RESET
|
3208 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3210 DMAE_CMD_ENDIANITY_DW_SWAP
|
3212 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
:
3214 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
3215 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, dmae
[0]));
3216 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, dmae
[0]));
3217 dmae
->dst_addr_lo
= (DMAE_REG_CMD_MEM
+
3218 sizeof(struct dmae_command
) *
3219 (loader_idx
+ 1)) >> 2;
3220 dmae
->dst_addr_hi
= 0;
3221 dmae
->len
= sizeof(struct dmae_command
) >> 2;
3224 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
+ 1] >> 2;
3225 dmae
->comp_addr_hi
= 0;
3229 bnx2x_post_dmae(bp
, dmae
, loader_idx
);
3231 } else if (bp
->func_stx
) {
3233 bnx2x_post_dmae(bp
, dmae
, INIT_DMAE_C(bp
));
3237 static int bnx2x_stats_comp(struct bnx2x
*bp
)
3239 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3243 while (*stats_comp
!= DMAE_COMP_VAL
) {
3245 BNX2X_ERR("timeout waiting for stats finished\n");
3255 * Statistics service functions
3258 static void bnx2x_stats_pmf_update(struct bnx2x
*bp
)
3260 struct dmae_command
*dmae
;
3262 int loader_idx
= PMF_DMAE_C(bp
);
3263 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3266 if (!IS_E1HMF(bp
) || !bp
->port
.pmf
|| !bp
->port
.port_stx
) {
3267 BNX2X_ERR("BUG!\n");
3271 bp
->executer_idx
= 0;
3273 opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
3275 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3277 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3279 DMAE_CMD_ENDIANITY_DW_SWAP
|
3281 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3282 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
3284 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3285 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_GRC
);
3286 dmae
->src_addr_lo
= bp
->port
.port_stx
>> 2;
3287 dmae
->src_addr_hi
= 0;
3288 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
3289 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
3290 dmae
->len
= DMAE_LEN32_RD_MAX
;
3291 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3292 dmae
->comp_addr_hi
= 0;
3295 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3296 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_PCI
);
3297 dmae
->src_addr_lo
= (bp
->port
.port_stx
>> 2) + DMAE_LEN32_RD_MAX
;
3298 dmae
->src_addr_hi
= 0;
3299 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
) +
3300 DMAE_LEN32_RD_MAX
* 4);
3301 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
) +
3302 DMAE_LEN32_RD_MAX
* 4);
3303 dmae
->len
= (sizeof(struct host_port_stats
) >> 2) - DMAE_LEN32_RD_MAX
;
3304 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3305 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3306 dmae
->comp_val
= DMAE_COMP_VAL
;
3309 bnx2x_hw_stats_post(bp
);
3310 bnx2x_stats_comp(bp
);
3313 static void bnx2x_port_stats_init(struct bnx2x
*bp
)
3315 struct dmae_command
*dmae
;
3316 int port
= BP_PORT(bp
);
3317 int vn
= BP_E1HVN(bp
);
3319 int loader_idx
= PMF_DMAE_C(bp
);
3321 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3324 if (!bp
->link_vars
.link_up
|| !bp
->port
.pmf
) {
3325 BNX2X_ERR("BUG!\n");
3329 bp
->executer_idx
= 0;
3332 opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
3333 DMAE_CMD_C_DST_GRC
| DMAE_CMD_C_ENABLE
|
3334 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3336 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3338 DMAE_CMD_ENDIANITY_DW_SWAP
|
3340 (port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3341 (vn
<< DMAE_CMD_E1HVN_SHIFT
));
3343 if (bp
->port
.port_stx
) {
3345 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3346 dmae
->opcode
= opcode
;
3347 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
3348 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
3349 dmae
->dst_addr_lo
= bp
->port
.port_stx
>> 2;
3350 dmae
->dst_addr_hi
= 0;
3351 dmae
->len
= sizeof(struct host_port_stats
) >> 2;
3352 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3353 dmae
->comp_addr_hi
= 0;
3359 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3360 dmae
->opcode
= opcode
;
3361 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
3362 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
3363 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
3364 dmae
->dst_addr_hi
= 0;
3365 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
3366 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3367 dmae
->comp_addr_hi
= 0;
3372 opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
3373 DMAE_CMD_C_DST_GRC
| DMAE_CMD_C_ENABLE
|
3374 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3376 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3378 DMAE_CMD_ENDIANITY_DW_SWAP
|
3380 (port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3381 (vn
<< DMAE_CMD_E1HVN_SHIFT
));
3383 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
) {
3385 mac_addr
= (port
? NIG_REG_INGRESS_BMAC1_MEM
:
3386 NIG_REG_INGRESS_BMAC0_MEM
);
3388 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3389 BIGMAC_REGISTER_TX_STAT_GTBYT */
3390 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3391 dmae
->opcode
= opcode
;
3392 dmae
->src_addr_lo
= (mac_addr
+
3393 BIGMAC_REGISTER_TX_STAT_GTPKT
) >> 2;
3394 dmae
->src_addr_hi
= 0;
3395 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
));
3396 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
));
3397 dmae
->len
= (8 + BIGMAC_REGISTER_TX_STAT_GTBYT
-
3398 BIGMAC_REGISTER_TX_STAT_GTPKT
) >> 2;
3399 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3400 dmae
->comp_addr_hi
= 0;
3403 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3404 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3405 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3406 dmae
->opcode
= opcode
;
3407 dmae
->src_addr_lo
= (mac_addr
+
3408 BIGMAC_REGISTER_RX_STAT_GR64
) >> 2;
3409 dmae
->src_addr_hi
= 0;
3410 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
3411 offsetof(struct bmac_stats
, rx_stat_gr64_lo
));
3412 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
3413 offsetof(struct bmac_stats
, rx_stat_gr64_lo
));
3414 dmae
->len
= (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ
-
3415 BIGMAC_REGISTER_RX_STAT_GR64
) >> 2;
3416 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3417 dmae
->comp_addr_hi
= 0;
3420 } else if (bp
->link_vars
.mac_type
== MAC_TYPE_EMAC
) {
3422 mac_addr
= (port
? GRCBASE_EMAC1
: GRCBASE_EMAC0
);
3424 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3425 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3426 dmae
->opcode
= opcode
;
3427 dmae
->src_addr_lo
= (mac_addr
+
3428 EMAC_REG_EMAC_RX_STAT_AC
) >> 2;
3429 dmae
->src_addr_hi
= 0;
3430 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
));
3431 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
));
3432 dmae
->len
= EMAC_REG_EMAC_RX_STAT_AC_COUNT
;
3433 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3434 dmae
->comp_addr_hi
= 0;
3437 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3438 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3439 dmae
->opcode
= opcode
;
3440 dmae
->src_addr_lo
= (mac_addr
+
3441 EMAC_REG_EMAC_RX_STAT_AC_28
) >> 2;
3442 dmae
->src_addr_hi
= 0;
3443 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
3444 offsetof(struct emac_stats
, rx_stat_falsecarriererrors
));
3445 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
3446 offsetof(struct emac_stats
, rx_stat_falsecarriererrors
));
3448 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3449 dmae
->comp_addr_hi
= 0;
3452 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3453 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3454 dmae
->opcode
= opcode
;
3455 dmae
->src_addr_lo
= (mac_addr
+
3456 EMAC_REG_EMAC_TX_STAT_AC
) >> 2;
3457 dmae
->src_addr_hi
= 0;
3458 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
3459 offsetof(struct emac_stats
, tx_stat_ifhcoutoctets
));
3460 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
3461 offsetof(struct emac_stats
, tx_stat_ifhcoutoctets
));
3462 dmae
->len
= EMAC_REG_EMAC_TX_STAT_AC_COUNT
;
3463 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3464 dmae
->comp_addr_hi
= 0;
3469 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3470 dmae
->opcode
= opcode
;
3471 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_BRB_DISCARD
:
3472 NIG_REG_STAT0_BRB_DISCARD
) >> 2;
3473 dmae
->src_addr_hi
= 0;
3474 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
));
3475 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
));
3476 dmae
->len
= (sizeof(struct nig_stats
) - 4*sizeof(u32
)) >> 2;
3477 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3478 dmae
->comp_addr_hi
= 0;
3481 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3482 dmae
->opcode
= opcode
;
3483 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_EGRESS_MAC_PKT0
:
3484 NIG_REG_STAT0_EGRESS_MAC_PKT0
) >> 2;
3485 dmae
->src_addr_hi
= 0;
3486 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
) +
3487 offsetof(struct nig_stats
, egress_mac_pkt0_lo
));
3488 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
) +
3489 offsetof(struct nig_stats
, egress_mac_pkt0_lo
));
3490 dmae
->len
= (2*sizeof(u32
)) >> 2;
3491 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3492 dmae
->comp_addr_hi
= 0;
3495 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3496 dmae
->opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
3497 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
3498 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3500 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3502 DMAE_CMD_ENDIANITY_DW_SWAP
|
3504 (port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3505 (vn
<< DMAE_CMD_E1HVN_SHIFT
));
3506 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_EGRESS_MAC_PKT1
:
3507 NIG_REG_STAT0_EGRESS_MAC_PKT1
) >> 2;
3508 dmae
->src_addr_hi
= 0;
3509 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
) +
3510 offsetof(struct nig_stats
, egress_mac_pkt1_lo
));
3511 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
) +
3512 offsetof(struct nig_stats
, egress_mac_pkt1_lo
));
3513 dmae
->len
= (2*sizeof(u32
)) >> 2;
3514 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3515 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3516 dmae
->comp_val
= DMAE_COMP_VAL
;
3521 static void bnx2x_func_stats_init(struct bnx2x
*bp
)
3523 struct dmae_command
*dmae
= &bp
->stats_dmae
;
3524 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3527 if (!bp
->func_stx
) {
3528 BNX2X_ERR("BUG!\n");
3532 bp
->executer_idx
= 0;
3533 memset(dmae
, 0, sizeof(struct dmae_command
));
3535 dmae
->opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
3536 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
3537 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3539 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3541 DMAE_CMD_ENDIANITY_DW_SWAP
|
3543 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3544 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
3545 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
3546 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
3547 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
3548 dmae
->dst_addr_hi
= 0;
3549 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
3550 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3551 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3552 dmae
->comp_val
= DMAE_COMP_VAL
;
3557 static void bnx2x_stats_start(struct bnx2x
*bp
)
3560 bnx2x_port_stats_init(bp
);
3562 else if (bp
->func_stx
)
3563 bnx2x_func_stats_init(bp
);
3565 bnx2x_hw_stats_post(bp
);
3566 bnx2x_storm_stats_post(bp
);
3569 static void bnx2x_stats_pmf_start(struct bnx2x
*bp
)
3571 bnx2x_stats_comp(bp
);
3572 bnx2x_stats_pmf_update(bp
);
3573 bnx2x_stats_start(bp
);
3576 static void bnx2x_stats_restart(struct bnx2x
*bp
)
3578 bnx2x_stats_comp(bp
);
3579 bnx2x_stats_start(bp
);
3582 static void bnx2x_bmac_stats_update(struct bnx2x
*bp
)
3584 struct bmac_stats
*new = bnx2x_sp(bp
, mac_stats
.bmac_stats
);
3585 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
3586 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3592 UPDATE_STAT64(rx_stat_grerb
, rx_stat_ifhcinbadoctets
);
3593 UPDATE_STAT64(rx_stat_grfcs
, rx_stat_dot3statsfcserrors
);
3594 UPDATE_STAT64(rx_stat_grund
, rx_stat_etherstatsundersizepkts
);
3595 UPDATE_STAT64(rx_stat_grovr
, rx_stat_dot3statsframestoolong
);
3596 UPDATE_STAT64(rx_stat_grfrg
, rx_stat_etherstatsfragments
);
3597 UPDATE_STAT64(rx_stat_grjbr
, rx_stat_etherstatsjabbers
);
3598 UPDATE_STAT64(rx_stat_grxcf
, rx_stat_maccontrolframesreceived
);
3599 UPDATE_STAT64(rx_stat_grxpf
, rx_stat_xoffstateentered
);
3600 UPDATE_STAT64(rx_stat_grxpf
, rx_stat_bmac_xpf
);
3601 UPDATE_STAT64(tx_stat_gtxpf
, tx_stat_outxoffsent
);
3602 UPDATE_STAT64(tx_stat_gtxpf
, tx_stat_flowcontroldone
);
3603 UPDATE_STAT64(tx_stat_gt64
, tx_stat_etherstatspkts64octets
);
3604 UPDATE_STAT64(tx_stat_gt127
,
3605 tx_stat_etherstatspkts65octetsto127octets
);
3606 UPDATE_STAT64(tx_stat_gt255
,
3607 tx_stat_etherstatspkts128octetsto255octets
);
3608 UPDATE_STAT64(tx_stat_gt511
,
3609 tx_stat_etherstatspkts256octetsto511octets
);
3610 UPDATE_STAT64(tx_stat_gt1023
,
3611 tx_stat_etherstatspkts512octetsto1023octets
);
3612 UPDATE_STAT64(tx_stat_gt1518
,
3613 tx_stat_etherstatspkts1024octetsto1522octets
);
3614 UPDATE_STAT64(tx_stat_gt2047
, tx_stat_bmac_2047
);
3615 UPDATE_STAT64(tx_stat_gt4095
, tx_stat_bmac_4095
);
3616 UPDATE_STAT64(tx_stat_gt9216
, tx_stat_bmac_9216
);
3617 UPDATE_STAT64(tx_stat_gt16383
, tx_stat_bmac_16383
);
3618 UPDATE_STAT64(tx_stat_gterr
,
3619 tx_stat_dot3statsinternalmactransmiterrors
);
3620 UPDATE_STAT64(tx_stat_gtufl
, tx_stat_bmac_ufl
);
3622 estats
->pause_frames_received_hi
=
3623 pstats
->mac_stx
[1].rx_stat_bmac_xpf_hi
;
3624 estats
->pause_frames_received_lo
=
3625 pstats
->mac_stx
[1].rx_stat_bmac_xpf_lo
;
3627 estats
->pause_frames_sent_hi
=
3628 pstats
->mac_stx
[1].tx_stat_outxoffsent_hi
;
3629 estats
->pause_frames_sent_lo
=
3630 pstats
->mac_stx
[1].tx_stat_outxoffsent_lo
;
3633 static void bnx2x_emac_stats_update(struct bnx2x
*bp
)
3635 struct emac_stats
*new = bnx2x_sp(bp
, mac_stats
.emac_stats
);
3636 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
3637 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3639 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets
);
3640 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets
);
3641 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors
);
3642 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors
);
3643 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors
);
3644 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors
);
3645 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts
);
3646 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong
);
3647 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments
);
3648 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers
);
3649 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived
);
3650 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered
);
3651 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived
);
3652 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived
);
3653 UPDATE_EXTEND_STAT(tx_stat_outxonsent
);
3654 UPDATE_EXTEND_STAT(tx_stat_outxoffsent
);
3655 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone
);
3656 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions
);
3657 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes
);
3658 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes
);
3659 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions
);
3660 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions
);
3661 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions
);
3662 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets
);
3663 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets
);
3664 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets
);
3665 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets
);
3666 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets
);
3667 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets
);
3668 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets
);
3669 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors
);
3671 estats
->pause_frames_received_hi
=
3672 pstats
->mac_stx
[1].rx_stat_xonpauseframesreceived_hi
;
3673 estats
->pause_frames_received_lo
=
3674 pstats
->mac_stx
[1].rx_stat_xonpauseframesreceived_lo
;
3675 ADD_64(estats
->pause_frames_received_hi
,
3676 pstats
->mac_stx
[1].rx_stat_xoffpauseframesreceived_hi
,
3677 estats
->pause_frames_received_lo
,
3678 pstats
->mac_stx
[1].rx_stat_xoffpauseframesreceived_lo
);
3680 estats
->pause_frames_sent_hi
=
3681 pstats
->mac_stx
[1].tx_stat_outxonsent_hi
;
3682 estats
->pause_frames_sent_lo
=
3683 pstats
->mac_stx
[1].tx_stat_outxonsent_lo
;
3684 ADD_64(estats
->pause_frames_sent_hi
,
3685 pstats
->mac_stx
[1].tx_stat_outxoffsent_hi
,
3686 estats
->pause_frames_sent_lo
,
3687 pstats
->mac_stx
[1].tx_stat_outxoffsent_lo
);
3690 static int bnx2x_hw_stats_update(struct bnx2x
*bp
)
3692 struct nig_stats
*new = bnx2x_sp(bp
, nig_stats
);
3693 struct nig_stats
*old
= &(bp
->port
.old_nig_stats
);
3694 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
3695 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3702 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
)
3703 bnx2x_bmac_stats_update(bp
);
3705 else if (bp
->link_vars
.mac_type
== MAC_TYPE_EMAC
)
3706 bnx2x_emac_stats_update(bp
);
3708 else { /* unreached */
3709 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3713 ADD_EXTEND_64(pstats
->brb_drop_hi
, pstats
->brb_drop_lo
,
3714 new->brb_discard
- old
->brb_discard
);
3715 ADD_EXTEND_64(estats
->brb_truncate_hi
, estats
->brb_truncate_lo
,
3716 new->brb_truncate
- old
->brb_truncate
);
3718 UPDATE_STAT64_NIG(egress_mac_pkt0
,
3719 etherstatspkts1024octetsto1522octets
);
3720 UPDATE_STAT64_NIG(egress_mac_pkt1
, etherstatspktsover1522octets
);
3722 memcpy(old
, new, sizeof(struct nig_stats
));
3724 memcpy(&(estats
->rx_stat_ifhcinbadoctets_hi
), &(pstats
->mac_stx
[1]),
3725 sizeof(struct mac_stx
));
3726 estats
->brb_drop_hi
= pstats
->brb_drop_hi
;
3727 estats
->brb_drop_lo
= pstats
->brb_drop_lo
;
3729 pstats
->host_port_stats_start
= ++pstats
->host_port_stats_end
;
3731 nig_timer_max
= SHMEM_RD(bp
, port_mb
[BP_PORT(bp
)].stat_nig_timer
);
3732 if (nig_timer_max
!= estats
->nig_timer_max
) {
3733 estats
->nig_timer_max
= nig_timer_max
;
3734 BNX2X_ERR("NIG timer max (%u)\n", estats
->nig_timer_max
);
3740 static int bnx2x_storm_stats_update(struct bnx2x
*bp
)
3742 struct eth_stats_query
*stats
= bnx2x_sp(bp
, fw_stats
);
3743 struct tstorm_per_port_stats
*tport
=
3744 &stats
->tstorm_common
.port_statistics
;
3745 struct host_func_stats
*fstats
= bnx2x_sp(bp
, func_stats
);
3746 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3749 memset(&(fstats
->total_bytes_received_hi
), 0,
3750 sizeof(struct host_func_stats
) - 2*sizeof(u32
));
3751 estats
->error_bytes_received_hi
= 0;
3752 estats
->error_bytes_received_lo
= 0;
3753 estats
->etherstatsoverrsizepkts_hi
= 0;
3754 estats
->etherstatsoverrsizepkts_lo
= 0;
3755 estats
->no_buff_discard_hi
= 0;
3756 estats
->no_buff_discard_lo
= 0;
3758 for_each_queue(bp
, i
) {
3759 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
3760 int cl_id
= fp
->cl_id
;
3761 struct tstorm_per_client_stats
*tclient
=
3762 &stats
->tstorm_common
.client_statistics
[cl_id
];
3763 struct tstorm_per_client_stats
*old_tclient
= &fp
->old_tclient
;
3764 struct ustorm_per_client_stats
*uclient
=
3765 &stats
->ustorm_common
.client_statistics
[cl_id
];
3766 struct ustorm_per_client_stats
*old_uclient
= &fp
->old_uclient
;
3767 struct xstorm_per_client_stats
*xclient
=
3768 &stats
->xstorm_common
.client_statistics
[cl_id
];
3769 struct xstorm_per_client_stats
*old_xclient
= &fp
->old_xclient
;
3770 struct bnx2x_eth_q_stats
*qstats
= &fp
->eth_q_stats
;
3773 /* are storm stats valid? */
3774 if ((u16
)(le16_to_cpu(xclient
->stats_counter
) + 1) !=
3775 bp
->stats_counter
) {
3776 DP(BNX2X_MSG_STATS
, "[%d] stats not updated by xstorm"
3777 " xstorm counter (%d) != stats_counter (%d)\n",
3778 i
, xclient
->stats_counter
, bp
->stats_counter
);
3781 if ((u16
)(le16_to_cpu(tclient
->stats_counter
) + 1) !=
3782 bp
->stats_counter
) {
3783 DP(BNX2X_MSG_STATS
, "[%d] stats not updated by tstorm"
3784 " tstorm counter (%d) != stats_counter (%d)\n",
3785 i
, tclient
->stats_counter
, bp
->stats_counter
);
3788 if ((u16
)(le16_to_cpu(uclient
->stats_counter
) + 1) !=
3789 bp
->stats_counter
) {
3790 DP(BNX2X_MSG_STATS
, "[%d] stats not updated by ustorm"
3791 " ustorm counter (%d) != stats_counter (%d)\n",
3792 i
, uclient
->stats_counter
, bp
->stats_counter
);
3796 qstats
->total_bytes_received_hi
=
3797 qstats
->valid_bytes_received_hi
=
3798 le32_to_cpu(tclient
->total_rcv_bytes
.hi
);
3799 qstats
->total_bytes_received_lo
=
3800 qstats
->valid_bytes_received_lo
=
3801 le32_to_cpu(tclient
->total_rcv_bytes
.lo
);
3803 qstats
->error_bytes_received_hi
=
3804 le32_to_cpu(tclient
->rcv_error_bytes
.hi
);
3805 qstats
->error_bytes_received_lo
=
3806 le32_to_cpu(tclient
->rcv_error_bytes
.lo
);
3808 ADD_64(qstats
->total_bytes_received_hi
,
3809 qstats
->error_bytes_received_hi
,
3810 qstats
->total_bytes_received_lo
,
3811 qstats
->error_bytes_received_lo
);
3813 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts
,
3814 total_unicast_packets_received
);
3815 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts
,
3816 total_multicast_packets_received
);
3817 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts
,
3818 total_broadcast_packets_received
);
3819 UPDATE_EXTEND_TSTAT(packets_too_big_discard
,
3820 etherstatsoverrsizepkts
);
3821 UPDATE_EXTEND_TSTAT(no_buff_discard
, no_buff_discard
);
3823 SUB_EXTEND_USTAT(ucast_no_buff_pkts
,
3824 total_unicast_packets_received
);
3825 SUB_EXTEND_USTAT(mcast_no_buff_pkts
,
3826 total_multicast_packets_received
);
3827 SUB_EXTEND_USTAT(bcast_no_buff_pkts
,
3828 total_broadcast_packets_received
);
3829 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts
, no_buff_discard
);
3830 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts
, no_buff_discard
);
3831 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts
, no_buff_discard
);
3833 qstats
->total_bytes_transmitted_hi
=
3834 le32_to_cpu(xclient
->total_sent_bytes
.hi
);
3835 qstats
->total_bytes_transmitted_lo
=
3836 le32_to_cpu(xclient
->total_sent_bytes
.lo
);
3838 UPDATE_EXTEND_XSTAT(unicast_pkts_sent
,
3839 total_unicast_packets_transmitted
);
3840 UPDATE_EXTEND_XSTAT(multicast_pkts_sent
,
3841 total_multicast_packets_transmitted
);
3842 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent
,
3843 total_broadcast_packets_transmitted
);
3845 old_tclient
->checksum_discard
= tclient
->checksum_discard
;
3846 old_tclient
->ttl0_discard
= tclient
->ttl0_discard
;
3848 ADD_64(fstats
->total_bytes_received_hi
,
3849 qstats
->total_bytes_received_hi
,
3850 fstats
->total_bytes_received_lo
,
3851 qstats
->total_bytes_received_lo
);
3852 ADD_64(fstats
->total_bytes_transmitted_hi
,
3853 qstats
->total_bytes_transmitted_hi
,
3854 fstats
->total_bytes_transmitted_lo
,
3855 qstats
->total_bytes_transmitted_lo
);
3856 ADD_64(fstats
->total_unicast_packets_received_hi
,
3857 qstats
->total_unicast_packets_received_hi
,
3858 fstats
->total_unicast_packets_received_lo
,
3859 qstats
->total_unicast_packets_received_lo
);
3860 ADD_64(fstats
->total_multicast_packets_received_hi
,
3861 qstats
->total_multicast_packets_received_hi
,
3862 fstats
->total_multicast_packets_received_lo
,
3863 qstats
->total_multicast_packets_received_lo
);
3864 ADD_64(fstats
->total_broadcast_packets_received_hi
,
3865 qstats
->total_broadcast_packets_received_hi
,
3866 fstats
->total_broadcast_packets_received_lo
,
3867 qstats
->total_broadcast_packets_received_lo
);
3868 ADD_64(fstats
->total_unicast_packets_transmitted_hi
,
3869 qstats
->total_unicast_packets_transmitted_hi
,
3870 fstats
->total_unicast_packets_transmitted_lo
,
3871 qstats
->total_unicast_packets_transmitted_lo
);
3872 ADD_64(fstats
->total_multicast_packets_transmitted_hi
,
3873 qstats
->total_multicast_packets_transmitted_hi
,
3874 fstats
->total_multicast_packets_transmitted_lo
,
3875 qstats
->total_multicast_packets_transmitted_lo
);
3876 ADD_64(fstats
->total_broadcast_packets_transmitted_hi
,
3877 qstats
->total_broadcast_packets_transmitted_hi
,
3878 fstats
->total_broadcast_packets_transmitted_lo
,
3879 qstats
->total_broadcast_packets_transmitted_lo
);
3880 ADD_64(fstats
->valid_bytes_received_hi
,
3881 qstats
->valid_bytes_received_hi
,
3882 fstats
->valid_bytes_received_lo
,
3883 qstats
->valid_bytes_received_lo
);
3885 ADD_64(estats
->error_bytes_received_hi
,
3886 qstats
->error_bytes_received_hi
,
3887 estats
->error_bytes_received_lo
,
3888 qstats
->error_bytes_received_lo
);
3889 ADD_64(estats
->etherstatsoverrsizepkts_hi
,
3890 qstats
->etherstatsoverrsizepkts_hi
,
3891 estats
->etherstatsoverrsizepkts_lo
,
3892 qstats
->etherstatsoverrsizepkts_lo
);
3893 ADD_64(estats
->no_buff_discard_hi
, qstats
->no_buff_discard_hi
,
3894 estats
->no_buff_discard_lo
, qstats
->no_buff_discard_lo
);
3897 ADD_64(fstats
->total_bytes_received_hi
,
3898 estats
->rx_stat_ifhcinbadoctets_hi
,
3899 fstats
->total_bytes_received_lo
,
3900 estats
->rx_stat_ifhcinbadoctets_lo
);
3902 memcpy(estats
, &(fstats
->total_bytes_received_hi
),
3903 sizeof(struct host_func_stats
) - 2*sizeof(u32
));
3905 ADD_64(estats
->etherstatsoverrsizepkts_hi
,
3906 estats
->rx_stat_dot3statsframestoolong_hi
,
3907 estats
->etherstatsoverrsizepkts_lo
,
3908 estats
->rx_stat_dot3statsframestoolong_lo
);
3909 ADD_64(estats
->error_bytes_received_hi
,
3910 estats
->rx_stat_ifhcinbadoctets_hi
,
3911 estats
->error_bytes_received_lo
,
3912 estats
->rx_stat_ifhcinbadoctets_lo
);
3915 estats
->mac_filter_discard
=
3916 le32_to_cpu(tport
->mac_filter_discard
);
3917 estats
->xxoverflow_discard
=
3918 le32_to_cpu(tport
->xxoverflow_discard
);
3919 estats
->brb_truncate_discard
=
3920 le32_to_cpu(tport
->brb_truncate_discard
);
3921 estats
->mac_discard
= le32_to_cpu(tport
->mac_discard
);
3924 fstats
->host_func_stats_start
= ++fstats
->host_func_stats_end
;
3926 bp
->stats_pending
= 0;
3931 static void bnx2x_net_stats_update(struct bnx2x
*bp
)
3933 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3934 struct net_device_stats
*nstats
= &bp
->dev
->stats
;
3937 nstats
->rx_packets
=
3938 bnx2x_hilo(&estats
->total_unicast_packets_received_hi
) +
3939 bnx2x_hilo(&estats
->total_multicast_packets_received_hi
) +
3940 bnx2x_hilo(&estats
->total_broadcast_packets_received_hi
);
3942 nstats
->tx_packets
=
3943 bnx2x_hilo(&estats
->total_unicast_packets_transmitted_hi
) +
3944 bnx2x_hilo(&estats
->total_multicast_packets_transmitted_hi
) +
3945 bnx2x_hilo(&estats
->total_broadcast_packets_transmitted_hi
);
3947 nstats
->rx_bytes
= bnx2x_hilo(&estats
->total_bytes_received_hi
);
3949 nstats
->tx_bytes
= bnx2x_hilo(&estats
->total_bytes_transmitted_hi
);
3951 nstats
->rx_dropped
= estats
->mac_discard
;
3952 for_each_queue(bp
, i
)
3953 nstats
->rx_dropped
+=
3954 le32_to_cpu(bp
->fp
[i
].old_tclient
.checksum_discard
);
3956 nstats
->tx_dropped
= 0;
3959 bnx2x_hilo(&estats
->total_multicast_packets_received_hi
);
3961 nstats
->collisions
=
3962 bnx2x_hilo(&estats
->tx_stat_etherstatscollisions_hi
);
3964 nstats
->rx_length_errors
=
3965 bnx2x_hilo(&estats
->rx_stat_etherstatsundersizepkts_hi
) +
3966 bnx2x_hilo(&estats
->etherstatsoverrsizepkts_hi
);
3967 nstats
->rx_over_errors
= bnx2x_hilo(&estats
->brb_drop_hi
) +
3968 bnx2x_hilo(&estats
->brb_truncate_hi
);
3969 nstats
->rx_crc_errors
=
3970 bnx2x_hilo(&estats
->rx_stat_dot3statsfcserrors_hi
);
3971 nstats
->rx_frame_errors
=
3972 bnx2x_hilo(&estats
->rx_stat_dot3statsalignmenterrors_hi
);
3973 nstats
->rx_fifo_errors
= bnx2x_hilo(&estats
->no_buff_discard_hi
);
3974 nstats
->rx_missed_errors
= estats
->xxoverflow_discard
;
3976 nstats
->rx_errors
= nstats
->rx_length_errors
+
3977 nstats
->rx_over_errors
+
3978 nstats
->rx_crc_errors
+
3979 nstats
->rx_frame_errors
+
3980 nstats
->rx_fifo_errors
+
3981 nstats
->rx_missed_errors
;
3983 nstats
->tx_aborted_errors
=
3984 bnx2x_hilo(&estats
->tx_stat_dot3statslatecollisions_hi
) +
3985 bnx2x_hilo(&estats
->tx_stat_dot3statsexcessivecollisions_hi
);
3986 nstats
->tx_carrier_errors
=
3987 bnx2x_hilo(&estats
->rx_stat_dot3statscarriersenseerrors_hi
);
3988 nstats
->tx_fifo_errors
= 0;
3989 nstats
->tx_heartbeat_errors
= 0;
3990 nstats
->tx_window_errors
= 0;
3992 nstats
->tx_errors
= nstats
->tx_aborted_errors
+
3993 nstats
->tx_carrier_errors
+
3994 bnx2x_hilo(&estats
->tx_stat_dot3statsinternalmactransmiterrors_hi
);
3997 static void bnx2x_drv_stats_update(struct bnx2x
*bp
)
3999 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
4002 estats
->driver_xoff
= 0;
4003 estats
->rx_err_discard_pkt
= 0;
4004 estats
->rx_skb_alloc_failed
= 0;
4005 estats
->hw_csum_err
= 0;
4006 for_each_queue(bp
, i
) {
4007 struct bnx2x_eth_q_stats
*qstats
= &bp
->fp
[i
].eth_q_stats
;
4009 estats
->driver_xoff
+= qstats
->driver_xoff
;
4010 estats
->rx_err_discard_pkt
+= qstats
->rx_err_discard_pkt
;
4011 estats
->rx_skb_alloc_failed
+= qstats
->rx_skb_alloc_failed
;
4012 estats
->hw_csum_err
+= qstats
->hw_csum_err
;
4016 static void bnx2x_stats_update(struct bnx2x
*bp
)
4018 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
4020 if (*stats_comp
!= DMAE_COMP_VAL
)
4024 bnx2x_hw_stats_update(bp
);
4026 if (bnx2x_storm_stats_update(bp
) && (bp
->stats_pending
++ == 3)) {
4027 BNX2X_ERR("storm stats were not updated for 3 times\n");
4032 bnx2x_net_stats_update(bp
);
4033 bnx2x_drv_stats_update(bp
);
4035 if (bp
->msglevel
& NETIF_MSG_TIMER
) {
4036 struct tstorm_per_client_stats
*old_tclient
=
4037 &bp
->fp
->old_tclient
;
4038 struct bnx2x_eth_q_stats
*qstats
= &bp
->fp
->eth_q_stats
;
4039 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
4040 struct net_device_stats
*nstats
= &bp
->dev
->stats
;
4043 printk(KERN_DEBUG
"%s:\n", bp
->dev
->name
);
4044 printk(KERN_DEBUG
" tx avail (%4x) tx hc idx (%x)"
4046 bnx2x_tx_avail(bp
->fp
),
4047 le16_to_cpu(*bp
->fp
->tx_cons_sb
), nstats
->tx_packets
);
4048 printk(KERN_DEBUG
" rx usage (%4x) rx hc idx (%x)"
4050 (u16
)(le16_to_cpu(*bp
->fp
->rx_cons_sb
) -
4051 bp
->fp
->rx_comp_cons
),
4052 le16_to_cpu(*bp
->fp
->rx_cons_sb
), nstats
->rx_packets
);
4053 printk(KERN_DEBUG
" %s (Xoff events %u) brb drops %u "
4054 "brb truncate %u\n",
4055 (netif_queue_stopped(bp
->dev
) ? "Xoff" : "Xon"),
4056 qstats
->driver_xoff
,
4057 estats
->brb_drop_lo
, estats
->brb_truncate_lo
);
4058 printk(KERN_DEBUG
"tstats: checksum_discard %u "
4059 "packets_too_big_discard %lu no_buff_discard %lu "
4060 "mac_discard %u mac_filter_discard %u "
4061 "xxovrflow_discard %u brb_truncate_discard %u "
4062 "ttl0_discard %u\n",
4063 le32_to_cpu(old_tclient
->checksum_discard
),
4064 bnx2x_hilo(&qstats
->etherstatsoverrsizepkts_hi
),
4065 bnx2x_hilo(&qstats
->no_buff_discard_hi
),
4066 estats
->mac_discard
, estats
->mac_filter_discard
,
4067 estats
->xxoverflow_discard
, estats
->brb_truncate_discard
,
4068 le32_to_cpu(old_tclient
->ttl0_discard
));
4070 for_each_queue(bp
, i
) {
4071 printk(KERN_DEBUG
"[%d]: %lu\t%lu\t%lu\n", i
,
4072 bnx2x_fp(bp
, i
, tx_pkt
),
4073 bnx2x_fp(bp
, i
, rx_pkt
),
4074 bnx2x_fp(bp
, i
, rx_calls
));
4078 bnx2x_hw_stats_post(bp
);
4079 bnx2x_storm_stats_post(bp
);
4082 static void bnx2x_port_stats_stop(struct bnx2x
*bp
)
4084 struct dmae_command
*dmae
;
4086 int loader_idx
= PMF_DMAE_C(bp
);
4087 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
4089 bp
->executer_idx
= 0;
4091 opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
4093 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
4095 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
4097 DMAE_CMD_ENDIANITY_DW_SWAP
|
4099 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
4100 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
4102 if (bp
->port
.port_stx
) {
4104 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
4106 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_GRC
);
4108 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_PCI
);
4109 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
4110 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
4111 dmae
->dst_addr_lo
= bp
->port
.port_stx
>> 2;
4112 dmae
->dst_addr_hi
= 0;
4113 dmae
->len
= sizeof(struct host_port_stats
) >> 2;
4115 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
4116 dmae
->comp_addr_hi
= 0;
4119 dmae
->comp_addr_lo
=
4120 U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
4121 dmae
->comp_addr_hi
=
4122 U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
4123 dmae
->comp_val
= DMAE_COMP_VAL
;
4131 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
4132 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_PCI
);
4133 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
4134 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
4135 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
4136 dmae
->dst_addr_hi
= 0;
4137 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
4138 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
4139 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
4140 dmae
->comp_val
= DMAE_COMP_VAL
;
4146 static void bnx2x_stats_stop(struct bnx2x
*bp
)
4150 bnx2x_stats_comp(bp
);
4153 update
= (bnx2x_hw_stats_update(bp
) == 0);
4155 update
|= (bnx2x_storm_stats_update(bp
) == 0);
4158 bnx2x_net_stats_update(bp
);
4161 bnx2x_port_stats_stop(bp
);
4163 bnx2x_hw_stats_post(bp
);
4164 bnx2x_stats_comp(bp
);
4168 static void bnx2x_stats_do_nothing(struct bnx2x
*bp
)
4172 static const struct {
4173 void (*action
)(struct bnx2x
*bp
);
4174 enum bnx2x_stats_state next_state
;
4175 } bnx2x_stats_stm
[STATS_STATE_MAX
][STATS_EVENT_MAX
] = {
4178 /* DISABLED PMF */ {bnx2x_stats_pmf_update
, STATS_STATE_DISABLED
},
4179 /* LINK_UP */ {bnx2x_stats_start
, STATS_STATE_ENABLED
},
4180 /* UPDATE */ {bnx2x_stats_do_nothing
, STATS_STATE_DISABLED
},
4181 /* STOP */ {bnx2x_stats_do_nothing
, STATS_STATE_DISABLED
}
4184 /* ENABLED PMF */ {bnx2x_stats_pmf_start
, STATS_STATE_ENABLED
},
4185 /* LINK_UP */ {bnx2x_stats_restart
, STATS_STATE_ENABLED
},
4186 /* UPDATE */ {bnx2x_stats_update
, STATS_STATE_ENABLED
},
4187 /* STOP */ {bnx2x_stats_stop
, STATS_STATE_DISABLED
}
4191 static void bnx2x_stats_handle(struct bnx2x
*bp
, enum bnx2x_stats_event event
)
4193 enum bnx2x_stats_state state
= bp
->stats_state
;
4195 bnx2x_stats_stm
[state
][event
].action(bp
);
4196 bp
->stats_state
= bnx2x_stats_stm
[state
][event
].next_state
;
4198 if ((event
!= STATS_EVENT_UPDATE
) || (bp
->msglevel
& NETIF_MSG_TIMER
))
4199 DP(BNX2X_MSG_STATS
, "state %d -> event %d -> state %d\n",
4200 state
, event
, bp
->stats_state
);
4203 static void bnx2x_timer(unsigned long data
)
4205 struct bnx2x
*bp
= (struct bnx2x
*) data
;
4207 if (!netif_running(bp
->dev
))
4210 if (atomic_read(&bp
->intr_sem
) != 0)
4214 struct bnx2x_fastpath
*fp
= &bp
->fp
[0];
4218 rc
= bnx2x_rx_int(fp
, 1000);
4221 if (!BP_NOMCP(bp
)) {
4222 int func
= BP_FUNC(bp
);
4226 ++bp
->fw_drv_pulse_wr_seq
;
4227 bp
->fw_drv_pulse_wr_seq
&= DRV_PULSE_SEQ_MASK
;
4228 /* TBD - add SYSTEM_TIME */
4229 drv_pulse
= bp
->fw_drv_pulse_wr_seq
;
4230 SHMEM_WR(bp
, func_mb
[func
].drv_pulse_mb
, drv_pulse
);
4232 mcp_pulse
= (SHMEM_RD(bp
, func_mb
[func
].mcp_pulse_mb
) &
4233 MCP_PULSE_SEQ_MASK
);
4234 /* The delta between driver pulse and mcp response
4235 * should be 1 (before mcp response) or 0 (after mcp response)
4237 if ((drv_pulse
!= mcp_pulse
) &&
4238 (drv_pulse
!= ((mcp_pulse
+ 1) & MCP_PULSE_SEQ_MASK
))) {
4239 /* someone lost a heartbeat... */
4240 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4241 drv_pulse
, mcp_pulse
);
4245 if ((bp
->state
== BNX2X_STATE_OPEN
) ||
4246 (bp
->state
== BNX2X_STATE_DISABLED
))
4247 bnx2x_stats_handle(bp
, STATS_EVENT_UPDATE
);
4250 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
4253 /* end of Statistics */
4258 * nic init service functions
4261 static void bnx2x_zero_sb(struct bnx2x
*bp
, int sb_id
)
4263 int port
= BP_PORT(bp
);
4265 bnx2x_init_fill(bp
, USTORM_INTMEM_ADDR
+
4266 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port
, sb_id
), 0,
4267 sizeof(struct ustorm_status_block
)/4);
4268 bnx2x_init_fill(bp
, CSTORM_INTMEM_ADDR
+
4269 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port
, sb_id
), 0,
4270 sizeof(struct cstorm_status_block
)/4);
4273 static void bnx2x_init_sb(struct bnx2x
*bp
, struct host_status_block
*sb
,
4274 dma_addr_t mapping
, int sb_id
)
4276 int port
= BP_PORT(bp
);
4277 int func
= BP_FUNC(bp
);
4282 section
= ((u64
)mapping
) + offsetof(struct host_status_block
,
4284 sb
->u_status_block
.status_block_id
= sb_id
;
4286 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4287 USTORM_SB_HOST_SB_ADDR_OFFSET(port
, sb_id
), U64_LO(section
));
4288 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4289 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port
, sb_id
)) + 4),
4291 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ FP_USB_FUNC_OFF
+
4292 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port
, sb_id
), func
);
4294 for (index
= 0; index
< HC_USTORM_SB_NUM_INDICES
; index
++)
4295 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
4296 USTORM_SB_HC_DISABLE_OFFSET(port
, sb_id
, index
), 1);
4299 section
= ((u64
)mapping
) + offsetof(struct host_status_block
,
4301 sb
->c_status_block
.status_block_id
= sb_id
;
4303 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4304 CSTORM_SB_HOST_SB_ADDR_OFFSET(port
, sb_id
), U64_LO(section
));
4305 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4306 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port
, sb_id
)) + 4),
4308 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ FP_CSB_FUNC_OFF
+
4309 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port
, sb_id
), func
);
4311 for (index
= 0; index
< HC_CSTORM_SB_NUM_INDICES
; index
++)
4312 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4313 CSTORM_SB_HC_DISABLE_OFFSET(port
, sb_id
, index
), 1);
4315 bnx2x_ack_sb(bp
, sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
4318 static void bnx2x_zero_def_sb(struct bnx2x
*bp
)
4320 int func
= BP_FUNC(bp
);
4322 bnx2x_init_fill(bp
, TSTORM_INTMEM_ADDR
+
4323 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
4324 sizeof(struct tstorm_def_status_block
)/4);
4325 bnx2x_init_fill(bp
, USTORM_INTMEM_ADDR
+
4326 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
4327 sizeof(struct ustorm_def_status_block
)/4);
4328 bnx2x_init_fill(bp
, CSTORM_INTMEM_ADDR
+
4329 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
4330 sizeof(struct cstorm_def_status_block
)/4);
4331 bnx2x_init_fill(bp
, XSTORM_INTMEM_ADDR
+
4332 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
4333 sizeof(struct xstorm_def_status_block
)/4);
4336 static void bnx2x_init_def_sb(struct bnx2x
*bp
,
4337 struct host_def_status_block
*def_sb
,
4338 dma_addr_t mapping
, int sb_id
)
4340 int port
= BP_PORT(bp
);
4341 int func
= BP_FUNC(bp
);
4342 int index
, val
, reg_offset
;
4346 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4347 atten_status_block
);
4348 def_sb
->atten_status_block
.status_block_id
= sb_id
;
4352 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
4353 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
4355 for (index
= 0; index
< MAX_DYNAMIC_ATTN_GRPS
; index
++) {
4356 bp
->attn_group
[index
].sig
[0] = REG_RD(bp
,
4357 reg_offset
+ 0x10*index
);
4358 bp
->attn_group
[index
].sig
[1] = REG_RD(bp
,
4359 reg_offset
+ 0x4 + 0x10*index
);
4360 bp
->attn_group
[index
].sig
[2] = REG_RD(bp
,
4361 reg_offset
+ 0x8 + 0x10*index
);
4362 bp
->attn_group
[index
].sig
[3] = REG_RD(bp
,
4363 reg_offset
+ 0xc + 0x10*index
);
4366 reg_offset
= (port
? HC_REG_ATTN_MSG1_ADDR_L
:
4367 HC_REG_ATTN_MSG0_ADDR_L
);
4369 REG_WR(bp
, reg_offset
, U64_LO(section
));
4370 REG_WR(bp
, reg_offset
+ 4, U64_HI(section
));
4372 reg_offset
= (port
? HC_REG_ATTN_NUM_P1
: HC_REG_ATTN_NUM_P0
);
4374 val
= REG_RD(bp
, reg_offset
);
4376 REG_WR(bp
, reg_offset
, val
);
4379 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4380 u_def_status_block
);
4381 def_sb
->u_def_status_block
.status_block_id
= sb_id
;
4383 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4384 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
4385 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4386 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
4388 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ DEF_USB_FUNC_OFF
+
4389 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
4391 for (index
= 0; index
< HC_USTORM_DEF_SB_NUM_INDICES
; index
++)
4392 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
4393 USTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
4396 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4397 c_def_status_block
);
4398 def_sb
->c_def_status_block
.status_block_id
= sb_id
;
4400 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4401 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
4402 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4403 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
4405 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ DEF_CSB_FUNC_OFF
+
4406 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
4408 for (index
= 0; index
< HC_CSTORM_DEF_SB_NUM_INDICES
; index
++)
4409 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4410 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
4413 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4414 t_def_status_block
);
4415 def_sb
->t_def_status_block
.status_block_id
= sb_id
;
4417 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4418 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
4419 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4420 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
4422 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ DEF_TSB_FUNC_OFF
+
4423 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
4425 for (index
= 0; index
< HC_TSTORM_DEF_SB_NUM_INDICES
; index
++)
4426 REG_WR16(bp
, BAR_TSTRORM_INTMEM
+
4427 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
4430 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4431 x_def_status_block
);
4432 def_sb
->x_def_status_block
.status_block_id
= sb_id
;
4434 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
4435 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
4436 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
4437 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
4439 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ DEF_XSB_FUNC_OFF
+
4440 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
4442 for (index
= 0; index
< HC_XSTORM_DEF_SB_NUM_INDICES
; index
++)
4443 REG_WR16(bp
, BAR_XSTRORM_INTMEM
+
4444 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
4446 bp
->stats_pending
= 0;
4447 bp
->set_mac_pending
= 0;
4449 bnx2x_ack_sb(bp
, sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
4452 static void bnx2x_update_coalesce(struct bnx2x
*bp
)
4454 int port
= BP_PORT(bp
);
4457 for_each_queue(bp
, i
) {
4458 int sb_id
= bp
->fp
[i
].sb_id
;
4460 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4461 REG_WR8(bp
, BAR_USTRORM_INTMEM
+
4462 USTORM_SB_HC_TIMEOUT_OFFSET(port
, sb_id
,
4463 U_SB_ETH_RX_CQ_INDEX
),
4465 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
4466 USTORM_SB_HC_DISABLE_OFFSET(port
, sb_id
,
4467 U_SB_ETH_RX_CQ_INDEX
),
4468 (bp
->rx_ticks
/12) ? 0 : 1);
4470 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4471 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+
4472 CSTORM_SB_HC_TIMEOUT_OFFSET(port
, sb_id
,
4473 C_SB_ETH_TX_CQ_INDEX
),
4475 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4476 CSTORM_SB_HC_DISABLE_OFFSET(port
, sb_id
,
4477 C_SB_ETH_TX_CQ_INDEX
),
4478 (bp
->tx_ticks
/12) ? 0 : 1);
4482 static inline void bnx2x_free_tpa_pool(struct bnx2x
*bp
,
4483 struct bnx2x_fastpath
*fp
, int last
)
4487 for (i
= 0; i
< last
; i
++) {
4488 struct sw_rx_bd
*rx_buf
= &(fp
->tpa_pool
[i
]);
4489 struct sk_buff
*skb
= rx_buf
->skb
;
4492 DP(NETIF_MSG_IFDOWN
, "tpa bin %d empty on free\n", i
);
4496 if (fp
->tpa_state
[i
] == BNX2X_TPA_START
)
4497 pci_unmap_single(bp
->pdev
,
4498 pci_unmap_addr(rx_buf
, mapping
),
4499 bp
->rx_buf_size
, PCI_DMA_FROMDEVICE
);
4506 static void bnx2x_init_rx_rings(struct bnx2x
*bp
)
4508 int func
= BP_FUNC(bp
);
4509 int max_agg_queues
= CHIP_IS_E1(bp
) ? ETH_MAX_AGGREGATION_QUEUES_E1
:
4510 ETH_MAX_AGGREGATION_QUEUES_E1H
;
4511 u16 ring_prod
, cqe_ring_prod
;
4514 bp
->rx_buf_size
= bp
->dev
->mtu
+ ETH_OVREHEAD
+ BNX2X_RX_ALIGN
;
4516 "mtu %d rx_buf_size %d\n", bp
->dev
->mtu
, bp
->rx_buf_size
);
4518 if (bp
->flags
& TPA_ENABLE_FLAG
) {
4520 for_each_rx_queue(bp
, j
) {
4521 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
4523 for (i
= 0; i
< max_agg_queues
; i
++) {
4524 fp
->tpa_pool
[i
].skb
=
4525 netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
4526 if (!fp
->tpa_pool
[i
].skb
) {
4527 BNX2X_ERR("Failed to allocate TPA "
4528 "skb pool for queue[%d] - "
4529 "disabling TPA on this "
4531 bnx2x_free_tpa_pool(bp
, fp
, i
);
4532 fp
->disable_tpa
= 1;
4535 pci_unmap_addr_set((struct sw_rx_bd
*)
4536 &bp
->fp
->tpa_pool
[i
],
4538 fp
->tpa_state
[i
] = BNX2X_TPA_STOP
;
4543 for_each_rx_queue(bp
, j
) {
4544 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
4547 fp
->rx_cons_sb
= BNX2X_RX_SB_INDEX
;
4548 fp
->rx_bd_cons_sb
= BNX2X_RX_SB_BD_INDEX
;
4550 /* "next page" elements initialization */
4552 for (i
= 1; i
<= NUM_RX_SGE_PAGES
; i
++) {
4553 struct eth_rx_sge
*sge
;
4555 sge
= &fp
->rx_sge_ring
[RX_SGE_CNT
* i
- 2];
4557 cpu_to_le32(U64_HI(fp
->rx_sge_mapping
+
4558 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
4560 cpu_to_le32(U64_LO(fp
->rx_sge_mapping
+
4561 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
4564 bnx2x_init_sge_ring_bit_mask(fp
);
4567 for (i
= 1; i
<= NUM_RX_RINGS
; i
++) {
4568 struct eth_rx_bd
*rx_bd
;
4570 rx_bd
= &fp
->rx_desc_ring
[RX_DESC_CNT
* i
- 2];
4572 cpu_to_le32(U64_HI(fp
->rx_desc_mapping
+
4573 BCM_PAGE_SIZE
*(i
% NUM_RX_RINGS
)));
4575 cpu_to_le32(U64_LO(fp
->rx_desc_mapping
+
4576 BCM_PAGE_SIZE
*(i
% NUM_RX_RINGS
)));
4580 for (i
= 1; i
<= NUM_RCQ_RINGS
; i
++) {
4581 struct eth_rx_cqe_next_page
*nextpg
;
4583 nextpg
= (struct eth_rx_cqe_next_page
*)
4584 &fp
->rx_comp_ring
[RCQ_DESC_CNT
* i
- 1];
4586 cpu_to_le32(U64_HI(fp
->rx_comp_mapping
+
4587 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
4589 cpu_to_le32(U64_LO(fp
->rx_comp_mapping
+
4590 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
4593 /* Allocate SGEs and initialize the ring elements */
4594 for (i
= 0, ring_prod
= 0;
4595 i
< MAX_RX_SGE_CNT
*NUM_RX_SGE_PAGES
; i
++) {
4597 if (bnx2x_alloc_rx_sge(bp
, fp
, ring_prod
) < 0) {
4598 BNX2X_ERR("was only able to allocate "
4600 BNX2X_ERR("disabling TPA for queue[%d]\n", j
);
4601 /* Cleanup already allocated elements */
4602 bnx2x_free_rx_sge_range(bp
, fp
, ring_prod
);
4603 bnx2x_free_tpa_pool(bp
, fp
, max_agg_queues
);
4604 fp
->disable_tpa
= 1;
4608 ring_prod
= NEXT_SGE_IDX(ring_prod
);
4610 fp
->rx_sge_prod
= ring_prod
;
4612 /* Allocate BDs and initialize BD ring */
4613 fp
->rx_comp_cons
= 0;
4614 cqe_ring_prod
= ring_prod
= 0;
4615 for (i
= 0; i
< bp
->rx_ring_size
; i
++) {
4616 if (bnx2x_alloc_rx_skb(bp
, fp
, ring_prod
) < 0) {
4617 BNX2X_ERR("was only able to allocate "
4618 "%d rx skbs on queue[%d]\n", i
, j
);
4619 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
4622 ring_prod
= NEXT_RX_IDX(ring_prod
);
4623 cqe_ring_prod
= NEXT_RCQ_IDX(cqe_ring_prod
);
4624 WARN_ON(ring_prod
<= i
);
4627 fp
->rx_bd_prod
= ring_prod
;
4628 /* must not have more available CQEs than BDs */
4629 fp
->rx_comp_prod
= min((u16
)(NUM_RCQ_RINGS
*RCQ_DESC_CNT
),
4631 fp
->rx_pkt
= fp
->rx_calls
= 0;
4634 * this will generate an interrupt (to the TSTORM)
4635 * must only be done after chip is initialized
4637 bnx2x_update_rx_prod(bp
, fp
, ring_prod
, fp
->rx_comp_prod
,
4642 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4643 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
),
4644 U64_LO(fp
->rx_comp_mapping
));
4645 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4646 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
) + 4,
4647 U64_HI(fp
->rx_comp_mapping
));
4651 static void bnx2x_init_tx_ring(struct bnx2x
*bp
)
4655 for_each_tx_queue(bp
, j
) {
4656 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
4658 for (i
= 1; i
<= NUM_TX_RINGS
; i
++) {
4659 struct eth_tx_bd
*tx_bd
=
4660 &fp
->tx_desc_ring
[TX_DESC_CNT
* i
- 1];
4663 cpu_to_le32(U64_HI(fp
->tx_desc_mapping
+
4664 BCM_PAGE_SIZE
*(i
% NUM_TX_RINGS
)));
4666 cpu_to_le32(U64_LO(fp
->tx_desc_mapping
+
4667 BCM_PAGE_SIZE
*(i
% NUM_TX_RINGS
)));
4670 fp
->tx_pkt_prod
= 0;
4671 fp
->tx_pkt_cons
= 0;
4674 fp
->tx_cons_sb
= BNX2X_TX_SB_INDEX
;
4679 static void bnx2x_init_sp_ring(struct bnx2x
*bp
)
4681 int func
= BP_FUNC(bp
);
4683 spin_lock_init(&bp
->spq_lock
);
4685 bp
->spq_left
= MAX_SPQ_PENDING
;
4686 bp
->spq_prod_idx
= 0;
4687 bp
->dsb_sp_prod
= BNX2X_SP_DSB_INDEX
;
4688 bp
->spq_prod_bd
= bp
->spq
;
4689 bp
->spq_last_bd
= bp
->spq_prod_bd
+ MAX_SP_DESC_CNT
;
4691 REG_WR(bp
, XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PAGE_BASE_OFFSET(func
),
4692 U64_LO(bp
->spq_mapping
));
4694 XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PAGE_BASE_OFFSET(func
) + 4,
4695 U64_HI(bp
->spq_mapping
));
4697 REG_WR(bp
, XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PROD_OFFSET(func
),
4701 static void bnx2x_init_context(struct bnx2x
*bp
)
4705 for_each_queue(bp
, i
) {
4706 struct eth_context
*context
= bnx2x_sp(bp
, context
[i
].eth
);
4707 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
4708 u8 cl_id
= fp
->cl_id
;
4709 u8 sb_id
= fp
->sb_id
;
4711 context
->ustorm_st_context
.common
.sb_index_numbers
=
4712 BNX2X_RX_SB_INDEX_NUM
;
4713 context
->ustorm_st_context
.common
.clientId
= cl_id
;
4714 context
->ustorm_st_context
.common
.status_block_id
= sb_id
;
4715 context
->ustorm_st_context
.common
.flags
=
4716 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT
|
4717 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS
);
4718 context
->ustorm_st_context
.common
.statistics_counter_id
=
4720 context
->ustorm_st_context
.common
.mc_alignment_log_size
=
4721 BNX2X_RX_ALIGN_SHIFT
;
4722 context
->ustorm_st_context
.common
.bd_buff_size
=
4724 context
->ustorm_st_context
.common
.bd_page_base_hi
=
4725 U64_HI(fp
->rx_desc_mapping
);
4726 context
->ustorm_st_context
.common
.bd_page_base_lo
=
4727 U64_LO(fp
->rx_desc_mapping
);
4728 if (!fp
->disable_tpa
) {
4729 context
->ustorm_st_context
.common
.flags
|=
4730 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA
|
4731 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING
);
4732 context
->ustorm_st_context
.common
.sge_buff_size
=
4733 (u16
)min((u32
)SGE_PAGE_SIZE
*PAGES_PER_SGE
,
4735 context
->ustorm_st_context
.common
.sge_page_base_hi
=
4736 U64_HI(fp
->rx_sge_mapping
);
4737 context
->ustorm_st_context
.common
.sge_page_base_lo
=
4738 U64_LO(fp
->rx_sge_mapping
);
4741 context
->ustorm_ag_context
.cdu_usage
=
4742 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, i
),
4743 CDU_REGION_NUMBER_UCM_AG
,
4744 ETH_CONNECTION_TYPE
);
4746 context
->xstorm_st_context
.tx_bd_page_base_hi
=
4747 U64_HI(fp
->tx_desc_mapping
);
4748 context
->xstorm_st_context
.tx_bd_page_base_lo
=
4749 U64_LO(fp
->tx_desc_mapping
);
4750 context
->xstorm_st_context
.db_data_addr_hi
=
4751 U64_HI(fp
->tx_prods_mapping
);
4752 context
->xstorm_st_context
.db_data_addr_lo
=
4753 U64_LO(fp
->tx_prods_mapping
);
4754 context
->xstorm_st_context
.statistics_data
= (cl_id
|
4755 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE
);
4756 context
->cstorm_st_context
.sb_index_number
=
4757 C_SB_ETH_TX_CQ_INDEX
;
4758 context
->cstorm_st_context
.status_block_id
= sb_id
;
4760 context
->xstorm_ag_context
.cdu_reserved
=
4761 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, i
),
4762 CDU_REGION_NUMBER_XCM_AG
,
4763 ETH_CONNECTION_TYPE
);
4767 static void bnx2x_init_ind_table(struct bnx2x
*bp
)
4769 int func
= BP_FUNC(bp
);
4772 if (bp
->multi_mode
== ETH_RSS_MODE_DISABLED
)
4776 "Initializing indirection table multi_mode %d\n", bp
->multi_mode
);
4777 for (i
= 0; i
< TSTORM_INDIRECTION_TABLE_SIZE
; i
++)
4778 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+
4779 TSTORM_INDIRECTION_TABLE_OFFSET(func
) + i
,
4780 bp
->fp
->cl_id
+ (i
% bp
->num_rx_queues
));
4783 static void bnx2x_set_client_config(struct bnx2x
*bp
)
4785 struct tstorm_eth_client_config tstorm_client
= {0};
4786 int port
= BP_PORT(bp
);
4789 tstorm_client
.mtu
= bp
->dev
->mtu
;
4790 tstorm_client
.config_flags
=
4791 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE
|
4792 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE
);
4794 if (bp
->rx_mode
&& bp
->vlgrp
&& (bp
->flags
& HW_VLAN_RX_FLAG
)) {
4795 tstorm_client
.config_flags
|=
4796 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE
;
4797 DP(NETIF_MSG_IFUP
, "vlan removal enabled\n");
4801 if (bp
->flags
& TPA_ENABLE_FLAG
) {
4802 tstorm_client
.max_sges_for_packet
=
4803 SGE_PAGE_ALIGN(tstorm_client
.mtu
) >> SGE_PAGE_SHIFT
;
4804 tstorm_client
.max_sges_for_packet
=
4805 ((tstorm_client
.max_sges_for_packet
+
4806 PAGES_PER_SGE
- 1) & (~(PAGES_PER_SGE
- 1))) >>
4807 PAGES_PER_SGE_SHIFT
;
4809 tstorm_client
.config_flags
|=
4810 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING
;
4813 for_each_queue(bp
, i
) {
4814 tstorm_client
.statistics_counter_id
= bp
->fp
[i
].cl_id
;
4816 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4817 TSTORM_CLIENT_CONFIG_OFFSET(port
, bp
->fp
[i
].cl_id
),
4818 ((u32
*)&tstorm_client
)[0]);
4819 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4820 TSTORM_CLIENT_CONFIG_OFFSET(port
, bp
->fp
[i
].cl_id
) + 4,
4821 ((u32
*)&tstorm_client
)[1]);
4824 DP(BNX2X_MSG_OFF
, "tstorm_client: 0x%08x 0x%08x\n",
4825 ((u32
*)&tstorm_client
)[0], ((u32
*)&tstorm_client
)[1]);
4828 static void bnx2x_set_storm_rx_mode(struct bnx2x
*bp
)
4830 struct tstorm_eth_mac_filter_config tstorm_mac_filter
= {0};
4831 int mode
= bp
->rx_mode
;
4832 int mask
= (1 << BP_L_ID(bp
));
4833 int func
= BP_FUNC(bp
);
4836 DP(NETIF_MSG_IFUP
, "rx mode %d mask 0x%x\n", mode
, mask
);
4839 case BNX2X_RX_MODE_NONE
: /* no Rx */
4840 tstorm_mac_filter
.ucast_drop_all
= mask
;
4841 tstorm_mac_filter
.mcast_drop_all
= mask
;
4842 tstorm_mac_filter
.bcast_drop_all
= mask
;
4845 case BNX2X_RX_MODE_NORMAL
:
4846 tstorm_mac_filter
.bcast_accept_all
= mask
;
4849 case BNX2X_RX_MODE_ALLMULTI
:
4850 tstorm_mac_filter
.mcast_accept_all
= mask
;
4851 tstorm_mac_filter
.bcast_accept_all
= mask
;
4854 case BNX2X_RX_MODE_PROMISC
:
4855 tstorm_mac_filter
.ucast_accept_all
= mask
;
4856 tstorm_mac_filter
.mcast_accept_all
= mask
;
4857 tstorm_mac_filter
.bcast_accept_all
= mask
;
4861 BNX2X_ERR("BAD rx mode (%d)\n", mode
);
4865 for (i
= 0; i
< sizeof(struct tstorm_eth_mac_filter_config
)/4; i
++) {
4866 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4867 TSTORM_MAC_FILTER_CONFIG_OFFSET(func
) + i
* 4,
4868 ((u32
*)&tstorm_mac_filter
)[i
]);
4870 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4871 ((u32 *)&tstorm_mac_filter)[i]); */
4874 if (mode
!= BNX2X_RX_MODE_NONE
)
4875 bnx2x_set_client_config(bp
);
4878 static void bnx2x_init_internal_common(struct bnx2x
*bp
)
4882 if (bp
->flags
& TPA_ENABLE_FLAG
) {
4883 struct tstorm_eth_tpa_exist tpa
= {0};
4887 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_TPA_EXIST_OFFSET
,
4889 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_TPA_EXIST_OFFSET
+ 4,
4893 /* Zero this manually as its initialization is
4894 currently missing in the initTool */
4895 for (i
= 0; i
< (USTORM_AGG_DATA_SIZE
>> 2); i
++)
4896 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4897 USTORM_AGG_DATA_OFFSET
+ i
* 4, 0);
4900 static void bnx2x_init_internal_port(struct bnx2x
*bp
)
4902 int port
= BP_PORT(bp
);
4904 REG_WR(bp
, BAR_USTRORM_INTMEM
+ USTORM_HC_BTR_OFFSET(port
), BNX2X_BTR
);
4905 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_HC_BTR_OFFSET(port
), BNX2X_BTR
);
4906 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_HC_BTR_OFFSET(port
), BNX2X_BTR
);
4907 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_HC_BTR_OFFSET(port
), BNX2X_BTR
);
4910 /* Calculates the sum of vn_min_rates.
4911 It's needed for further normalizing of the min_rates.
4913 sum of vn_min_rates.
4915 0 - if all the min_rates are 0.
4916 In the later case fainess algorithm should be deactivated.
4917 If not all min_rates are zero then those that are zeroes will be set to 1.
4919 static void bnx2x_calc_vn_weight_sum(struct bnx2x
*bp
)
4922 int port
= BP_PORT(bp
);
4925 bp
->vn_weight_sum
= 0;
4926 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++) {
4927 int func
= 2*vn
+ port
;
4929 SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
4930 u32 vn_min_rate
= ((vn_cfg
& FUNC_MF_CFG_MIN_BW_MASK
) >>
4931 FUNC_MF_CFG_MIN_BW_SHIFT
) * 100;
4933 /* Skip hidden vns */
4934 if (vn_cfg
& FUNC_MF_CFG_FUNC_HIDE
)
4937 /* If min rate is zero - set it to 1 */
4939 vn_min_rate
= DEF_MIN_RATE
;
4943 bp
->vn_weight_sum
+= vn_min_rate
;
4946 /* ... only if all min rates are zeros - disable fairness */
4948 bp
->vn_weight_sum
= 0;
4951 static void bnx2x_init_internal_func(struct bnx2x
*bp
)
4953 struct tstorm_eth_function_common_config tstorm_config
= {0};
4954 struct stats_indication_flags stats_flags
= {0};
4955 int port
= BP_PORT(bp
);
4956 int func
= BP_FUNC(bp
);
4962 tstorm_config
.config_flags
= MULTI_FLAGS(bp
);
4963 tstorm_config
.rss_result_mask
= MULTI_MASK
;
4966 tstorm_config
.config_flags
|=
4967 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM
;
4969 tstorm_config
.leading_client_id
= BP_L_ID(bp
);
4971 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4972 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func
),
4973 (*(u32
*)&tstorm_config
));
4975 bp
->rx_mode
= BNX2X_RX_MODE_NONE
; /* no rx until link is up */
4976 bnx2x_set_storm_rx_mode(bp
);
4978 for_each_queue(bp
, i
) {
4979 u8 cl_id
= bp
->fp
[i
].cl_id
;
4981 /* reset xstorm per client statistics */
4982 offset
= BAR_XSTRORM_INTMEM
+
4983 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cl_id
);
4985 j
< sizeof(struct xstorm_per_client_stats
) / 4; j
++)
4986 REG_WR(bp
, offset
+ j
*4, 0);
4988 /* reset tstorm per client statistics */
4989 offset
= BAR_TSTRORM_INTMEM
+
4990 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cl_id
);
4992 j
< sizeof(struct tstorm_per_client_stats
) / 4; j
++)
4993 REG_WR(bp
, offset
+ j
*4, 0);
4995 /* reset ustorm per client statistics */
4996 offset
= BAR_USTRORM_INTMEM
+
4997 USTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cl_id
);
4999 j
< sizeof(struct ustorm_per_client_stats
) / 4; j
++)
5000 REG_WR(bp
, offset
+ j
*4, 0);
5003 /* Init statistics related context */
5004 stats_flags
.collect_eth
= 1;
5006 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_STATS_FLAGS_OFFSET(func
),
5007 ((u32
*)&stats_flags
)[0]);
5008 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_STATS_FLAGS_OFFSET(func
) + 4,
5009 ((u32
*)&stats_flags
)[1]);
5011 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_STATS_FLAGS_OFFSET(func
),
5012 ((u32
*)&stats_flags
)[0]);
5013 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_STATS_FLAGS_OFFSET(func
) + 4,
5014 ((u32
*)&stats_flags
)[1]);
5016 REG_WR(bp
, BAR_USTRORM_INTMEM
+ USTORM_STATS_FLAGS_OFFSET(func
),
5017 ((u32
*)&stats_flags
)[0]);
5018 REG_WR(bp
, BAR_USTRORM_INTMEM
+ USTORM_STATS_FLAGS_OFFSET(func
) + 4,
5019 ((u32
*)&stats_flags
)[1]);
5021 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_STATS_FLAGS_OFFSET(func
),
5022 ((u32
*)&stats_flags
)[0]);
5023 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_STATS_FLAGS_OFFSET(func
) + 4,
5024 ((u32
*)&stats_flags
)[1]);
5026 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
5027 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
),
5028 U64_LO(bnx2x_sp_mapping(bp
, fw_stats
)));
5029 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
5030 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
) + 4,
5031 U64_HI(bnx2x_sp_mapping(bp
, fw_stats
)));
5033 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
5034 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
),
5035 U64_LO(bnx2x_sp_mapping(bp
, fw_stats
)));
5036 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
5037 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
) + 4,
5038 U64_HI(bnx2x_sp_mapping(bp
, fw_stats
)));
5040 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5041 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
),
5042 U64_LO(bnx2x_sp_mapping(bp
, fw_stats
)));
5043 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5044 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
) + 4,
5045 U64_HI(bnx2x_sp_mapping(bp
, fw_stats
)));
5047 if (CHIP_IS_E1H(bp
)) {
5048 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_FUNCTION_MODE_OFFSET
,
5050 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_FUNCTION_MODE_OFFSET
,
5052 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_FUNCTION_MODE_OFFSET
,
5054 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ USTORM_FUNCTION_MODE_OFFSET
,
5057 REG_WR16(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_E1HOV_OFFSET(func
),
5061 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5063 min((u32
)(min((u32
)8, (u32
)MAX_SKB_FRAGS
) *
5064 SGE_PAGE_SIZE
* PAGES_PER_SGE
),
5066 for_each_rx_queue(bp
, i
) {
5067 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
5069 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5070 USTORM_CQE_PAGE_BASE_OFFSET(port
, fp
->cl_id
),
5071 U64_LO(fp
->rx_comp_mapping
));
5072 REG_WR(bp
, BAR_USTRORM_INTMEM
+
5073 USTORM_CQE_PAGE_BASE_OFFSET(port
, fp
->cl_id
) + 4,
5074 U64_HI(fp
->rx_comp_mapping
));
5076 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
5077 USTORM_MAX_AGG_SIZE_OFFSET(port
, fp
->cl_id
),
5081 /* dropless flow control */
5082 if (CHIP_IS_E1H(bp
)) {
5083 struct ustorm_eth_rx_pause_data_e1h rx_pause
= {0};
5085 rx_pause
.bd_thr_low
= 250;
5086 rx_pause
.cqe_thr_low
= 250;
5088 rx_pause
.sge_thr_low
= 0;
5089 rx_pause
.bd_thr_high
= 350;
5090 rx_pause
.cqe_thr_high
= 350;
5091 rx_pause
.sge_thr_high
= 0;
5093 for_each_rx_queue(bp
, i
) {
5094 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
5096 if (!fp
->disable_tpa
) {
5097 rx_pause
.sge_thr_low
= 150;
5098 rx_pause
.sge_thr_high
= 250;
5102 offset
= BAR_USTRORM_INTMEM
+
5103 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port
,
5106 j
< sizeof(struct ustorm_eth_rx_pause_data_e1h
)/4;
5108 REG_WR(bp
, offset
+ j
*4,
5109 ((u32
*)&rx_pause
)[j
]);
5113 memset(&(bp
->cmng
), 0, sizeof(struct cmng_struct_per_port
));
5115 /* Init rate shaping and fairness contexts */
5119 /* During init there is no active link
5120 Until link is up, set link rate to 10Gbps */
5121 bp
->link_vars
.line_speed
= SPEED_10000
;
5122 bnx2x_init_port_minmax(bp
);
5124 bnx2x_calc_vn_weight_sum(bp
);
5126 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++)
5127 bnx2x_init_vn_minmax(bp
, 2*vn
+ port
);
5129 /* Enable rate shaping and fairness */
5130 bp
->cmng
.flags
.cmng_enables
=
5131 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN
;
5132 if (bp
->vn_weight_sum
)
5133 bp
->cmng
.flags
.cmng_enables
|=
5134 CMNG_FLAGS_PER_PORT_FAIRNESS_VN
;
5136 DP(NETIF_MSG_IFUP
, "All MIN values are zeroes"
5137 " fairness will be disabled\n");
5139 /* rate shaping and fairness are disabled */
5141 "single function mode minmax will be disabled\n");
5145 /* Store it to internal memory */
5147 for (i
= 0; i
< sizeof(struct cmng_struct_per_port
) / 4; i
++)
5148 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
5149 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port
) + i
* 4,
5150 ((u32
*)(&bp
->cmng
))[i
]);
5153 static void bnx2x_init_internal(struct bnx2x
*bp
, u32 load_code
)
5155 switch (load_code
) {
5156 case FW_MSG_CODE_DRV_LOAD_COMMON
:
5157 bnx2x_init_internal_common(bp
);
5160 case FW_MSG_CODE_DRV_LOAD_PORT
:
5161 bnx2x_init_internal_port(bp
);
5164 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
5165 bnx2x_init_internal_func(bp
);
5169 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code
);
5174 static void bnx2x_nic_init(struct bnx2x
*bp
, u32 load_code
)
5178 for_each_queue(bp
, i
) {
5179 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
5182 fp
->state
= BNX2X_FP_STATE_CLOSED
;
5184 fp
->cl_id
= BP_L_ID(bp
) + i
;
5185 fp
->sb_id
= fp
->cl_id
;
5187 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5188 i
, bp
, fp
->status_blk
, fp
->cl_id
, fp
->sb_id
);
5189 bnx2x_init_sb(bp
, fp
->status_blk
, fp
->status_blk_mapping
,
5191 bnx2x_update_fpsb_idx(fp
);
5194 /* ensure status block indices were read */
5198 bnx2x_init_def_sb(bp
, bp
->def_status_blk
, bp
->def_status_blk_mapping
,
5200 bnx2x_update_dsb_idx(bp
);
5201 bnx2x_update_coalesce(bp
);
5202 bnx2x_init_rx_rings(bp
);
5203 bnx2x_init_tx_ring(bp
);
5204 bnx2x_init_sp_ring(bp
);
5205 bnx2x_init_context(bp
);
5206 bnx2x_init_internal(bp
, load_code
);
5207 bnx2x_init_ind_table(bp
);
5208 bnx2x_stats_init(bp
);
5210 /* At this point, we are ready for interrupts */
5211 atomic_set(&bp
->intr_sem
, 0);
5213 /* flush all before enabling interrupts */
5217 bnx2x_int_enable(bp
);
5219 /* Check for SPIO5 */
5220 bnx2x_attn_int_deasserted0(bp
,
5221 REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0
+ BP_PORT(bp
)*4) &
5222 AEU_INPUTS_ATTN_BITS_SPIO5
);
5225 /* end of nic init */
5228 * gzip service functions
5231 static int bnx2x_gunzip_init(struct bnx2x
*bp
)
5233 bp
->gunzip_buf
= pci_alloc_consistent(bp
->pdev
, FW_BUF_SIZE
,
5234 &bp
->gunzip_mapping
);
5235 if (bp
->gunzip_buf
== NULL
)
5238 bp
->strm
= kmalloc(sizeof(*bp
->strm
), GFP_KERNEL
);
5239 if (bp
->strm
== NULL
)
5242 bp
->strm
->workspace
= kmalloc(zlib_inflate_workspacesize(),
5244 if (bp
->strm
->workspace
== NULL
)
5254 pci_free_consistent(bp
->pdev
, FW_BUF_SIZE
, bp
->gunzip_buf
,
5255 bp
->gunzip_mapping
);
5256 bp
->gunzip_buf
= NULL
;
5259 printk(KERN_ERR PFX
"%s: Cannot allocate firmware buffer for"
5260 " un-compression\n", bp
->dev
->name
);
5264 static void bnx2x_gunzip_end(struct bnx2x
*bp
)
5266 kfree(bp
->strm
->workspace
);
5271 if (bp
->gunzip_buf
) {
5272 pci_free_consistent(bp
->pdev
, FW_BUF_SIZE
, bp
->gunzip_buf
,
5273 bp
->gunzip_mapping
);
5274 bp
->gunzip_buf
= NULL
;
5278 static int bnx2x_gunzip(struct bnx2x
*bp
, const u8
*zbuf
, int len
)
5282 /* check gzip header */
5283 if ((zbuf
[0] != 0x1f) || (zbuf
[1] != 0x8b) || (zbuf
[2] != Z_DEFLATED
)) {
5284 BNX2X_ERR("Bad gzip header\n");
5292 if (zbuf
[3] & FNAME
)
5293 while ((zbuf
[n
++] != 0) && (n
< len
));
5295 bp
->strm
->next_in
= (typeof(bp
->strm
->next_in
))zbuf
+ n
;
5296 bp
->strm
->avail_in
= len
- n
;
5297 bp
->strm
->next_out
= bp
->gunzip_buf
;
5298 bp
->strm
->avail_out
= FW_BUF_SIZE
;
5300 rc
= zlib_inflateInit2(bp
->strm
, -MAX_WBITS
);
5304 rc
= zlib_inflate(bp
->strm
, Z_FINISH
);
5305 if ((rc
!= Z_OK
) && (rc
!= Z_STREAM_END
))
5306 printk(KERN_ERR PFX
"%s: Firmware decompression error: %s\n",
5307 bp
->dev
->name
, bp
->strm
->msg
);
5309 bp
->gunzip_outlen
= (FW_BUF_SIZE
- bp
->strm
->avail_out
);
5310 if (bp
->gunzip_outlen
& 0x3)
5311 printk(KERN_ERR PFX
"%s: Firmware decompression error:"
5312 " gunzip_outlen (%d) not aligned\n",
5313 bp
->dev
->name
, bp
->gunzip_outlen
);
5314 bp
->gunzip_outlen
>>= 2;
5316 zlib_inflateEnd(bp
->strm
);
5318 if (rc
== Z_STREAM_END
)
5324 /* nic load/unload */
5327 * General service functions
5330 /* send a NIG loopback debug packet */
5331 static void bnx2x_lb_pckt(struct bnx2x
*bp
)
5335 /* Ethernet source and destination addresses */
5336 wb_write
[0] = 0x55555555;
5337 wb_write
[1] = 0x55555555;
5338 wb_write
[2] = 0x20; /* SOP */
5339 REG_WR_DMAE(bp
, NIG_REG_DEBUG_PACKET_LB
, wb_write
, 3);
5341 /* NON-IP protocol */
5342 wb_write
[0] = 0x09000000;
5343 wb_write
[1] = 0x55555555;
5344 wb_write
[2] = 0x10; /* EOP, eop_bvalid = 0 */
5345 REG_WR_DMAE(bp
, NIG_REG_DEBUG_PACKET_LB
, wb_write
, 3);
5348 /* some of the internal memories
5349 * are not directly readable from the driver
5350 * to test them we send debug packets
5352 static int bnx2x_int_mem_test(struct bnx2x
*bp
)
5358 if (CHIP_REV_IS_FPGA(bp
))
5360 else if (CHIP_REV_IS_EMUL(bp
))
5365 DP(NETIF_MSG_HW
, "start part1\n");
5367 /* Disable inputs of parser neighbor blocks */
5368 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x0);
5369 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x0);
5370 REG_WR(bp
, CFC_REG_DEBUG0
, 0x1);
5371 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x0);
5373 /* Write 0 to parser credits for CFC search request */
5374 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x0);
5376 /* send Ethernet packet */
5379 /* TODO do i reset NIG statistic? */
5380 /* Wait until NIG register shows 1 packet of size 0x10 */
5381 count
= 1000 * factor
;
5384 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
5385 val
= *bnx2x_sp(bp
, wb_data
[0]);
5393 BNX2X_ERR("NIG timeout val = 0x%x\n", val
);
5397 /* Wait until PRS register shows 1 packet */
5398 count
= 1000 * factor
;
5400 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
5408 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
5412 /* Reset and init BRB, PRS */
5413 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
, 0x03);
5415 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0x03);
5417 bnx2x_init_block(bp
, BRB1_BLOCK
, COMMON_STAGE
);
5418 bnx2x_init_block(bp
, PRS_BLOCK
, COMMON_STAGE
);
5420 DP(NETIF_MSG_HW
, "part2\n");
5422 /* Disable inputs of parser neighbor blocks */
5423 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x0);
5424 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x0);
5425 REG_WR(bp
, CFC_REG_DEBUG0
, 0x1);
5426 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x0);
5428 /* Write 0 to parser credits for CFC search request */
5429 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x0);
5431 /* send 10 Ethernet packets */
5432 for (i
= 0; i
< 10; i
++)
5435 /* Wait until NIG register shows 10 + 1
5436 packets of size 11*0x10 = 0xb0 */
5437 count
= 1000 * factor
;
5440 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
5441 val
= *bnx2x_sp(bp
, wb_data
[0]);
5449 BNX2X_ERR("NIG timeout val = 0x%x\n", val
);
5453 /* Wait until PRS register shows 2 packets */
5454 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
5456 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
5458 /* Write 1 to parser credits for CFC search request */
5459 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x1);
5461 /* Wait until PRS register shows 3 packets */
5462 msleep(10 * factor
);
5463 /* Wait until NIG register shows 1 packet of size 0x10 */
5464 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
5466 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
5468 /* clear NIG EOP FIFO */
5469 for (i
= 0; i
< 11; i
++)
5470 REG_RD(bp
, NIG_REG_INGRESS_EOP_LB_FIFO
);
5471 val
= REG_RD(bp
, NIG_REG_INGRESS_EOP_LB_EMPTY
);
5473 BNX2X_ERR("clear of NIG failed\n");
5477 /* Reset and init BRB, PRS, NIG */
5478 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
, 0x03);
5480 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0x03);
5482 bnx2x_init_block(bp
, BRB1_BLOCK
, COMMON_STAGE
);
5483 bnx2x_init_block(bp
, PRS_BLOCK
, COMMON_STAGE
);
5486 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
5489 /* Enable inputs of parser neighbor blocks */
5490 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x7fffffff);
5491 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x1);
5492 REG_WR(bp
, CFC_REG_DEBUG0
, 0x0);
5493 REG_WR(bp
, NIG_REG_PRS_REQ_IN_EN
, 0x1);
5495 DP(NETIF_MSG_HW
, "done\n");
5500 static void enable_blocks_attention(struct bnx2x
*bp
)
5502 REG_WR(bp
, PXP_REG_PXP_INT_MASK_0
, 0);
5503 REG_WR(bp
, PXP_REG_PXP_INT_MASK_1
, 0);
5504 REG_WR(bp
, DORQ_REG_DORQ_INT_MASK
, 0);
5505 REG_WR(bp
, CFC_REG_CFC_INT_MASK
, 0);
5506 REG_WR(bp
, QM_REG_QM_INT_MASK
, 0);
5507 REG_WR(bp
, TM_REG_TM_INT_MASK
, 0);
5508 REG_WR(bp
, XSDM_REG_XSDM_INT_MASK_0
, 0);
5509 REG_WR(bp
, XSDM_REG_XSDM_INT_MASK_1
, 0);
5510 REG_WR(bp
, XCM_REG_XCM_INT_MASK
, 0);
5511 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5512 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5513 REG_WR(bp
, USDM_REG_USDM_INT_MASK_0
, 0);
5514 REG_WR(bp
, USDM_REG_USDM_INT_MASK_1
, 0);
5515 REG_WR(bp
, UCM_REG_UCM_INT_MASK
, 0);
5516 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5517 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5518 REG_WR(bp
, GRCBASE_UPB
+ PB_REG_PB_INT_MASK
, 0);
5519 REG_WR(bp
, CSDM_REG_CSDM_INT_MASK_0
, 0);
5520 REG_WR(bp
, CSDM_REG_CSDM_INT_MASK_1
, 0);
5521 REG_WR(bp
, CCM_REG_CCM_INT_MASK
, 0);
5522 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5523 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5524 if (CHIP_REV_IS_FPGA(bp
))
5525 REG_WR(bp
, PXP2_REG_PXP2_INT_MASK_0
, 0x580000);
5527 REG_WR(bp
, PXP2_REG_PXP2_INT_MASK_0
, 0x480000);
5528 REG_WR(bp
, TSDM_REG_TSDM_INT_MASK_0
, 0);
5529 REG_WR(bp
, TSDM_REG_TSDM_INT_MASK_1
, 0);
5530 REG_WR(bp
, TCM_REG_TCM_INT_MASK
, 0);
5531 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5532 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5533 REG_WR(bp
, CDU_REG_CDU_INT_MASK
, 0);
5534 REG_WR(bp
, DMAE_REG_DMAE_INT_MASK
, 0);
5535 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5536 REG_WR(bp
, PBF_REG_PBF_INT_MASK
, 0X18); /* bit 3,4 masked */
5540 static void bnx2x_reset_common(struct bnx2x
*bp
)
5543 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
5545 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
, 0x1403);
5549 static void bnx2x_setup_fan_failure_detection(struct bnx2x
*bp
)
5555 val
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.config2
) &
5556 SHARED_HW_CFG_FAN_FAILURE_MASK
;
5558 if (val
== SHARED_HW_CFG_FAN_FAILURE_ENABLED
)
5562 * The fan failure mechanism is usually related to the PHY type since
5563 * the power consumption of the board is affected by the PHY. Currently,
5564 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5566 else if (val
== SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE
)
5567 for (port
= PORT_0
; port
< PORT_MAX
; port
++) {
5569 SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].
5570 external_phy_config
) &
5571 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK
;
5574 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
) ||
5576 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
) ||
5578 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481
));
5581 DP(NETIF_MSG_HW
, "fan detection setting: %d\n", is_required
);
5583 if (is_required
== 0)
5586 /* Fan failure is indicated by SPIO 5 */
5587 bnx2x_set_spio(bp
, MISC_REGISTERS_SPIO_5
,
5588 MISC_REGISTERS_SPIO_INPUT_HI_Z
);
5590 /* set to active low mode */
5591 val
= REG_RD(bp
, MISC_REG_SPIO_INT
);
5592 val
|= ((1 << MISC_REGISTERS_SPIO_5
) <<
5593 MISC_REGISTERS_SPIO_INT_OLD_SET_POS
);
5594 REG_WR(bp
, MISC_REG_SPIO_INT
, val
);
5596 /* enable interrupt to signal the IGU */
5597 val
= REG_RD(bp
, MISC_REG_SPIO_EVENT_EN
);
5598 val
|= (1 << MISC_REGISTERS_SPIO_5
);
5599 REG_WR(bp
, MISC_REG_SPIO_EVENT_EN
, val
);
5602 static int bnx2x_init_common(struct bnx2x
*bp
)
5606 DP(BNX2X_MSG_MCP
, "starting common init func %d\n", BP_FUNC(bp
));
5608 bnx2x_reset_common(bp
);
5609 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0xffffffff);
5610 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_SET
, 0xfffc);
5612 bnx2x_init_block(bp
, MISC_BLOCK
, COMMON_STAGE
);
5613 if (CHIP_IS_E1H(bp
))
5614 REG_WR(bp
, MISC_REG_E1HMF_MODE
, IS_E1HMF(bp
));
5616 REG_WR(bp
, MISC_REG_LCPLL_CTRL_REG_2
, 0x100);
5618 REG_WR(bp
, MISC_REG_LCPLL_CTRL_REG_2
, 0x0);
5620 bnx2x_init_block(bp
, PXP_BLOCK
, COMMON_STAGE
);
5621 if (CHIP_IS_E1(bp
)) {
5622 /* enable HW interrupt from PXP on USDM overflow
5623 bit 16 on INT_MASK_0 */
5624 REG_WR(bp
, PXP_REG_PXP_INT_MASK_0
, 0);
5627 bnx2x_init_block(bp
, PXP2_BLOCK
, COMMON_STAGE
);
5631 REG_WR(bp
, PXP2_REG_RQ_QM_ENDIAN_M
, 1);
5632 REG_WR(bp
, PXP2_REG_RQ_TM_ENDIAN_M
, 1);
5633 REG_WR(bp
, PXP2_REG_RQ_SRC_ENDIAN_M
, 1);
5634 REG_WR(bp
, PXP2_REG_RQ_CDU_ENDIAN_M
, 1);
5635 REG_WR(bp
, PXP2_REG_RQ_DBG_ENDIAN_M
, 1);
5636 /* make sure this value is 0 */
5637 REG_WR(bp
, PXP2_REG_RQ_HC_ENDIAN_M
, 0);
5639 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5640 REG_WR(bp
, PXP2_REG_RD_QM_SWAP_MODE
, 1);
5641 REG_WR(bp
, PXP2_REG_RD_TM_SWAP_MODE
, 1);
5642 REG_WR(bp
, PXP2_REG_RD_SRC_SWAP_MODE
, 1);
5643 REG_WR(bp
, PXP2_REG_RD_CDURD_SWAP_MODE
, 1);
5646 REG_WR(bp
, PXP2_REG_RQ_CDU_P_SIZE
, 2);
5648 REG_WR(bp
, PXP2_REG_RQ_TM_P_SIZE
, 5);
5649 REG_WR(bp
, PXP2_REG_RQ_QM_P_SIZE
, 5);
5650 REG_WR(bp
, PXP2_REG_RQ_SRC_P_SIZE
, 5);
5653 if (CHIP_REV_IS_FPGA(bp
) && CHIP_IS_E1H(bp
))
5654 REG_WR(bp
, PXP2_REG_PGL_TAGS_LIMIT
, 0x1);
5656 /* let the HW do it's magic ... */
5658 /* finish PXP init */
5659 val
= REG_RD(bp
, PXP2_REG_RQ_CFG_DONE
);
5661 BNX2X_ERR("PXP2 CFG failed\n");
5664 val
= REG_RD(bp
, PXP2_REG_RD_INIT_DONE
);
5666 BNX2X_ERR("PXP2 RD_INIT failed\n");
5670 REG_WR(bp
, PXP2_REG_RQ_DISABLE_INPUTS
, 0);
5671 REG_WR(bp
, PXP2_REG_RD_DISABLE_INPUTS
, 0);
5673 bnx2x_init_block(bp
, DMAE_BLOCK
, COMMON_STAGE
);
5675 /* clean the DMAE memory */
5677 bnx2x_init_fill(bp
, TSEM_REG_PRAM
, 0, 8);
5679 bnx2x_init_block(bp
, TCM_BLOCK
, COMMON_STAGE
);
5680 bnx2x_init_block(bp
, UCM_BLOCK
, COMMON_STAGE
);
5681 bnx2x_init_block(bp
, CCM_BLOCK
, COMMON_STAGE
);
5682 bnx2x_init_block(bp
, XCM_BLOCK
, COMMON_STAGE
);
5684 bnx2x_read_dmae(bp
, XSEM_REG_PASSIVE_BUFFER
, 3);
5685 bnx2x_read_dmae(bp
, CSEM_REG_PASSIVE_BUFFER
, 3);
5686 bnx2x_read_dmae(bp
, TSEM_REG_PASSIVE_BUFFER
, 3);
5687 bnx2x_read_dmae(bp
, USEM_REG_PASSIVE_BUFFER
, 3);
5689 bnx2x_init_block(bp
, QM_BLOCK
, COMMON_STAGE
);
5690 /* soft reset pulse */
5691 REG_WR(bp
, QM_REG_SOFT_RESET
, 1);
5692 REG_WR(bp
, QM_REG_SOFT_RESET
, 0);
5695 bnx2x_init_block(bp
, TIMERS_BLOCK
, COMMON_STAGE
);
5698 bnx2x_init_block(bp
, DQ_BLOCK
, COMMON_STAGE
);
5699 REG_WR(bp
, DORQ_REG_DPM_CID_OFST
, BCM_PAGE_SHIFT
);
5700 if (!CHIP_REV_IS_SLOW(bp
)) {
5701 /* enable hw interrupt from doorbell Q */
5702 REG_WR(bp
, DORQ_REG_DORQ_INT_MASK
, 0);
5705 bnx2x_init_block(bp
, BRB1_BLOCK
, COMMON_STAGE
);
5706 bnx2x_init_block(bp
, PRS_BLOCK
, COMMON_STAGE
);
5707 REG_WR(bp
, PRS_REG_A_PRSU_20
, 0xf);
5709 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
5710 if (CHIP_IS_E1H(bp
))
5711 REG_WR(bp
, PRS_REG_E1HOV_MODE
, IS_E1HMF(bp
));
5713 bnx2x_init_block(bp
, TSDM_BLOCK
, COMMON_STAGE
);
5714 bnx2x_init_block(bp
, CSDM_BLOCK
, COMMON_STAGE
);
5715 bnx2x_init_block(bp
, USDM_BLOCK
, COMMON_STAGE
);
5716 bnx2x_init_block(bp
, XSDM_BLOCK
, COMMON_STAGE
);
5718 bnx2x_init_fill(bp
, TSTORM_INTMEM_ADDR
, 0, STORM_INTMEM_SIZE(bp
));
5719 bnx2x_init_fill(bp
, USTORM_INTMEM_ADDR
, 0, STORM_INTMEM_SIZE(bp
));
5720 bnx2x_init_fill(bp
, CSTORM_INTMEM_ADDR
, 0, STORM_INTMEM_SIZE(bp
));
5721 bnx2x_init_fill(bp
, XSTORM_INTMEM_ADDR
, 0, STORM_INTMEM_SIZE(bp
));
5723 bnx2x_init_block(bp
, TSEM_BLOCK
, COMMON_STAGE
);
5724 bnx2x_init_block(bp
, USEM_BLOCK
, COMMON_STAGE
);
5725 bnx2x_init_block(bp
, CSEM_BLOCK
, COMMON_STAGE
);
5726 bnx2x_init_block(bp
, XSEM_BLOCK
, COMMON_STAGE
);
5729 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
5731 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
,
5734 bnx2x_init_block(bp
, UPB_BLOCK
, COMMON_STAGE
);
5735 bnx2x_init_block(bp
, XPB_BLOCK
, COMMON_STAGE
);
5736 bnx2x_init_block(bp
, PBF_BLOCK
, COMMON_STAGE
);
5738 REG_WR(bp
, SRC_REG_SOFT_RST
, 1);
5739 for (i
= SRC_REG_KEYRSS0_0
; i
<= SRC_REG_KEYRSS1_9
; i
+= 4) {
5740 REG_WR(bp
, i
, 0xc0cac01a);
5741 /* TODO: replace with something meaningful */
5743 bnx2x_init_block(bp
, SRCH_BLOCK
, COMMON_STAGE
);
5744 REG_WR(bp
, SRC_REG_SOFT_RST
, 0);
5746 if (sizeof(union cdu_context
) != 1024)
5747 /* we currently assume that a context is 1024 bytes */
5748 printk(KERN_ALERT PFX
"please adjust the size of"
5749 " cdu_context(%ld)\n", (long)sizeof(union cdu_context
));
5751 bnx2x_init_block(bp
, CDU_BLOCK
, COMMON_STAGE
);
5752 val
= (4 << 24) + (0 << 12) + 1024;
5753 REG_WR(bp
, CDU_REG_CDU_GLOBAL_PARAMS
, val
);
5754 if (CHIP_IS_E1(bp
)) {
5755 /* !!! fix pxp client crdit until excel update */
5756 REG_WR(bp
, CDU_REG_CDU_DEBUG
, 0x264);
5757 REG_WR(bp
, CDU_REG_CDU_DEBUG
, 0);
5760 bnx2x_init_block(bp
, CFC_BLOCK
, COMMON_STAGE
);
5761 REG_WR(bp
, CFC_REG_INIT_REG
, 0x7FF);
5762 /* enable context validation interrupt from CFC */
5763 REG_WR(bp
, CFC_REG_CFC_INT_MASK
, 0);
5765 /* set the thresholds to prevent CFC/CDU race */
5766 REG_WR(bp
, CFC_REG_DEBUG0
, 0x20020000);
5768 bnx2x_init_block(bp
, HC_BLOCK
, COMMON_STAGE
);
5769 bnx2x_init_block(bp
, MISC_AEU_BLOCK
, COMMON_STAGE
);
5771 /* PXPCS COMMON comes here */
5772 bnx2x_init_block(bp
, PXPCS_BLOCK
, COMMON_STAGE
);
5773 /* Reset PCIE errors for debug */
5774 REG_WR(bp
, 0x2814, 0xffffffff);
5775 REG_WR(bp
, 0x3820, 0xffffffff);
5777 /* EMAC0 COMMON comes here */
5778 bnx2x_init_block(bp
, EMAC0_BLOCK
, COMMON_STAGE
);
5779 /* EMAC1 COMMON comes here */
5780 bnx2x_init_block(bp
, EMAC1_BLOCK
, COMMON_STAGE
);
5781 /* DBU COMMON comes here */
5782 bnx2x_init_block(bp
, DBU_BLOCK
, COMMON_STAGE
);
5783 /* DBG COMMON comes here */
5784 bnx2x_init_block(bp
, DBG_BLOCK
, COMMON_STAGE
);
5786 bnx2x_init_block(bp
, NIG_BLOCK
, COMMON_STAGE
);
5787 if (CHIP_IS_E1H(bp
)) {
5788 REG_WR(bp
, NIG_REG_LLH_MF_MODE
, IS_E1HMF(bp
));
5789 REG_WR(bp
, NIG_REG_LLH_E1HOV_MODE
, IS_E1HMF(bp
));
5792 if (CHIP_REV_IS_SLOW(bp
))
5795 /* finish CFC init */
5796 val
= reg_poll(bp
, CFC_REG_LL_INIT_DONE
, 1, 100, 10);
5798 BNX2X_ERR("CFC LL_INIT failed\n");
5801 val
= reg_poll(bp
, CFC_REG_AC_INIT_DONE
, 1, 100, 10);
5803 BNX2X_ERR("CFC AC_INIT failed\n");
5806 val
= reg_poll(bp
, CFC_REG_CAM_INIT_DONE
, 1, 100, 10);
5808 BNX2X_ERR("CFC CAM_INIT failed\n");
5811 REG_WR(bp
, CFC_REG_DEBUG0
, 0);
5813 /* read NIG statistic
5814 to see if this is our first up since powerup */
5815 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
5816 val
= *bnx2x_sp(bp
, wb_data
[0]);
5818 /* do internal memory self test */
5819 if ((CHIP_IS_E1(bp
)) && (val
== 0) && bnx2x_int_mem_test(bp
)) {
5820 BNX2X_ERR("internal mem self test failed\n");
5824 switch (XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
)) {
5825 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
:
5826 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
:
5827 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726
:
5828 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
:
5829 bp
->port
.need_hw_lock
= 1;
5836 bnx2x_setup_fan_failure_detection(bp
);
5838 /* clear PXP2 attentions */
5839 REG_RD(bp
, PXP2_REG_PXP2_INT_STS_CLR_0
);
5841 enable_blocks_attention(bp
);
5843 if (!BP_NOMCP(bp
)) {
5844 bnx2x_acquire_phy_lock(bp
);
5845 bnx2x_common_init_phy(bp
, bp
->common
.shmem_base
);
5846 bnx2x_release_phy_lock(bp
);
5848 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5853 static int bnx2x_init_port(struct bnx2x
*bp
)
5855 int port
= BP_PORT(bp
);
5856 int init_stage
= port
? PORT1_STAGE
: PORT0_STAGE
;
5860 DP(BNX2X_MSG_MCP
, "starting port init port %x\n", port
);
5862 REG_WR(bp
, NIG_REG_MASK_INTERRUPT_PORT0
+ port
*4, 0);
5864 /* Port PXP comes here */
5865 bnx2x_init_block(bp
, PXP_BLOCK
, init_stage
);
5866 /* Port PXP2 comes here */
5867 bnx2x_init_block(bp
, PXP2_BLOCK
, init_stage
);
5872 wb_write
[0] = ONCHIP_ADDR1(bp
->timers_mapping
);
5873 wb_write
[1] = ONCHIP_ADDR2(bp
->timers_mapping
);
5874 REG_WR_DMAE(bp
, PXP2_REG_RQ_ONCHIP_AT
+ i
*8, wb_write
, 2);
5875 REG_WR(bp
, PXP2_REG_PSWRQ_TM0_L2P
+ func
*4, PXP_ONE_ILT(i
));
5880 wb_write
[0] = ONCHIP_ADDR1(bp
->qm_mapping
);
5881 wb_write
[1] = ONCHIP_ADDR2(bp
->qm_mapping
);
5882 REG_WR_DMAE(bp
, PXP2_REG_RQ_ONCHIP_AT
+ i
*8, wb_write
, 2);
5883 REG_WR(bp
, PXP2_REG_PSWRQ_QM0_L2P
+ func
*4, PXP_ONE_ILT(i
));
5888 wb_write
[0] = ONCHIP_ADDR1(bp
->t1_mapping
);
5889 wb_write
[1] = ONCHIP_ADDR2(bp
->t1_mapping
);
5890 REG_WR_DMAE(bp
, PXP2_REG_RQ_ONCHIP_AT
+ i
*8, wb_write
, 2);
5891 REG_WR(bp
, PXP2_REG_PSWRQ_SRC0_L2P
+ func
*4, PXP_ONE_ILT(i
));
5893 /* Port CMs come here */
5894 bnx2x_init_block(bp
, XCM_BLOCK
, init_stage
);
5896 /* Port QM comes here */
5898 REG_WR(bp
, TM_REG_LIN0_SCAN_TIME
+ func
*4, 1024/64*20);
5899 REG_WR(bp
, TM_REG_LIN0_MAX_ACTIVE_CID
+ func
*4, 31);
5901 bnx2x_init_block(bp
, TIMERS_BLOCK
, init_stage
);
5903 /* Port DQ comes here */
5904 bnx2x_init_block(bp
, DQ_BLOCK
, init_stage
);
5906 bnx2x_init_block(bp
, BRB1_BLOCK
, init_stage
);
5907 if (CHIP_REV_IS_SLOW(bp
) && !CHIP_IS_E1H(bp
)) {
5908 /* no pause for emulation and FPGA */
5913 low
= ((bp
->flags
& ONE_PORT_FLAG
) ? 160 : 246);
5914 else if (bp
->dev
->mtu
> 4096) {
5915 if (bp
->flags
& ONE_PORT_FLAG
)
5919 /* (24*1024 + val*4)/256 */
5920 low
= 96 + (val
/64) + ((val
% 64) ? 1 : 0);
5923 low
= ((bp
->flags
& ONE_PORT_FLAG
) ? 80 : 160);
5924 high
= low
+ 56; /* 14*1024/256 */
5926 REG_WR(bp
, BRB1_REG_PAUSE_LOW_THRESHOLD_0
+ port
*4, low
);
5927 REG_WR(bp
, BRB1_REG_PAUSE_HIGH_THRESHOLD_0
+ port
*4, high
);
5930 /* Port PRS comes here */
5931 bnx2x_init_block(bp
, PRS_BLOCK
, init_stage
);
5932 /* Port TSDM comes here */
5933 bnx2x_init_block(bp
, TSDM_BLOCK
, init_stage
);
5934 /* Port CSDM comes here */
5935 bnx2x_init_block(bp
, CSDM_BLOCK
, init_stage
);
5936 /* Port USDM comes here */
5937 bnx2x_init_block(bp
, USDM_BLOCK
, init_stage
);
5938 /* Port XSDM comes here */
5939 bnx2x_init_block(bp
, XSDM_BLOCK
, init_stage
);
5941 bnx2x_init_block(bp
, TSEM_BLOCK
, init_stage
);
5942 bnx2x_init_block(bp
, USEM_BLOCK
, init_stage
);
5943 bnx2x_init_block(bp
, CSEM_BLOCK
, init_stage
);
5944 bnx2x_init_block(bp
, XSEM_BLOCK
, init_stage
);
5946 /* Port UPB comes here */
5947 bnx2x_init_block(bp
, UPB_BLOCK
, init_stage
);
5948 /* Port XPB comes here */
5949 bnx2x_init_block(bp
, XPB_BLOCK
, init_stage
);
5951 bnx2x_init_block(bp
, PBF_BLOCK
, init_stage
);
5953 /* configure PBF to work without PAUSE mtu 9000 */
5954 REG_WR(bp
, PBF_REG_P0_PAUSE_ENABLE
+ port
*4, 0);
5956 /* update threshold */
5957 REG_WR(bp
, PBF_REG_P0_ARB_THRSH
+ port
*4, (9040/16));
5958 /* update init credit */
5959 REG_WR(bp
, PBF_REG_P0_INIT_CRD
+ port
*4, (9040/16) + 553 - 22);
5962 REG_WR(bp
, PBF_REG_INIT_P0
+ port
*4, 1);
5964 REG_WR(bp
, PBF_REG_INIT_P0
+ port
*4, 0);
5967 /* tell the searcher where the T2 table is */
5968 REG_WR(bp
, SRC_REG_COUNTFREE0
+ func
*4, 16*1024/64);
5970 wb_write
[0] = U64_LO(bp
->t2_mapping
);
5971 wb_write
[1] = U64_HI(bp
->t2_mapping
);
5972 REG_WR_DMAE(bp
, SRC_REG_FIRSTFREE0
+ func
*4, wb_write
, 2);
5973 wb_write
[0] = U64_LO((u64
)bp
->t2_mapping
+ 16*1024 - 64);
5974 wb_write
[1] = U64_HI((u64
)bp
->t2_mapping
+ 16*1024 - 64);
5975 REG_WR_DMAE(bp
, SRC_REG_LASTFREE0
+ func
*4, wb_write
, 2);
5977 REG_WR(bp
, SRC_REG_NUMBER_HASH_BITS0
+ func
*4, 10);
5978 /* Port SRCH comes here */
5980 /* Port CDU comes here */
5981 bnx2x_init_block(bp
, CDU_BLOCK
, init_stage
);
5982 /* Port CFC comes here */
5983 bnx2x_init_block(bp
, CFC_BLOCK
, init_stage
);
5985 if (CHIP_IS_E1(bp
)) {
5986 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
5987 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
5989 bnx2x_init_block(bp
, HC_BLOCK
, init_stage
);
5991 bnx2x_init_block(bp
, MISC_AEU_BLOCK
, init_stage
);
5992 /* init aeu_mask_attn_func_0/1:
5993 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5994 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5995 * bits 4-7 are used for "per vn group attention" */
5996 REG_WR(bp
, MISC_REG_AEU_MASK_ATTN_FUNC_0
+ port
*4,
5997 (IS_E1HMF(bp
) ? 0xF7 : 0x7));
5999 /* Port PXPCS comes here */
6000 bnx2x_init_block(bp
, PXPCS_BLOCK
, init_stage
);
6001 /* Port EMAC0 comes here */
6002 bnx2x_init_block(bp
, EMAC0_BLOCK
, init_stage
);
6003 /* Port EMAC1 comes here */
6004 bnx2x_init_block(bp
, EMAC1_BLOCK
, init_stage
);
6005 /* Port DBU comes here */
6006 bnx2x_init_block(bp
, DBU_BLOCK
, init_stage
);
6007 /* Port DBG comes here */
6008 bnx2x_init_block(bp
, DBG_BLOCK
, init_stage
);
6010 bnx2x_init_block(bp
, NIG_BLOCK
, init_stage
);
6012 REG_WR(bp
, NIG_REG_XGXS_SERDES0_MODE_SEL
+ port
*4, 1);
6014 if (CHIP_IS_E1H(bp
)) {
6015 /* 0x2 disable e1hov, 0x1 enable */
6016 REG_WR(bp
, NIG_REG_LLH0_BRB1_DRV_MASK_MF
+ port
*4,
6017 (IS_E1HMF(bp
) ? 0x1 : 0x2));
6019 /* support pause requests from USDM, TSDM and BRB */
6020 REG_WR(bp
, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0
+ port
*4, 0x7);
6023 REG_WR(bp
, NIG_REG_LLFC_ENABLE_0
+ port
*4, 0);
6024 REG_WR(bp
, NIG_REG_LLFC_OUT_EN_0
+ port
*4, 0);
6025 REG_WR(bp
, NIG_REG_PAUSE_ENABLE_0
+ port
*4, 1);
6029 /* Port MCP comes here */
6030 bnx2x_init_block(bp
, MCP_BLOCK
, init_stage
);
6031 /* Port DMAE comes here */
6032 bnx2x_init_block(bp
, DMAE_BLOCK
, init_stage
);
6034 switch (XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
)) {
6035 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726
:
6037 u32 swap_val
, swap_override
, aeu_gpio_mask
, offset
;
6039 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_3
,
6040 MISC_REGISTERS_GPIO_INPUT_HI_Z
, port
);
6042 /* The GPIO should be swapped if the swap register is
6044 swap_val
= REG_RD(bp
, NIG_REG_PORT_SWAP
);
6045 swap_override
= REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
);
6047 /* Select function upon port-swap configuration */
6049 offset
= MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
;
6050 aeu_gpio_mask
= (swap_val
&& swap_override
) ?
6051 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1
:
6052 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0
;
6054 offset
= MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
;
6055 aeu_gpio_mask
= (swap_val
&& swap_override
) ?
6056 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0
:
6057 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1
;
6059 val
= REG_RD(bp
, offset
);
6060 /* add GPIO3 to group */
6061 val
|= aeu_gpio_mask
;
6062 REG_WR(bp
, offset
, val
);
6066 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
6067 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
:
6068 /* add SPIO 5 to group 0 */
6070 u32 reg_addr
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
6071 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
6072 val
= REG_RD(bp
, reg_addr
);
6073 val
|= AEU_INPUTS_ATTN_BITS_SPIO5
;
6074 REG_WR(bp
, reg_addr
, val
);
6082 bnx2x__link_reset(bp
);
6087 #define ILT_PER_FUNC (768/2)
6088 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6089 /* the phys address is shifted right 12 bits and has an added
6090 1=valid bit added to the 53rd bit
6091 then since this is a wide register(TM)
6092 we split it into two 32 bit writes
6094 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6095 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6096 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6097 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6099 #define CNIC_ILT_LINES 0
6101 static void bnx2x_ilt_wr(struct bnx2x
*bp
, u32 index
, dma_addr_t addr
)
6105 if (CHIP_IS_E1H(bp
))
6106 reg
= PXP2_REG_RQ_ONCHIP_AT_B0
+ index
*8;
6108 reg
= PXP2_REG_RQ_ONCHIP_AT
+ index
*8;
6110 bnx2x_wb_wr(bp
, reg
, ONCHIP_ADDR1(addr
), ONCHIP_ADDR2(addr
));
6113 static int bnx2x_init_func(struct bnx2x
*bp
)
6115 int port
= BP_PORT(bp
);
6116 int func
= BP_FUNC(bp
);
6120 DP(BNX2X_MSG_MCP
, "starting func init func %x\n", func
);
6122 /* set MSI reconfigure capability */
6123 addr
= (port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
);
6124 val
= REG_RD(bp
, addr
);
6125 val
|= HC_CONFIG_0_REG_MSI_ATTN_EN_0
;
6126 REG_WR(bp
, addr
, val
);
6128 i
= FUNC_ILT_BASE(func
);
6130 bnx2x_ilt_wr(bp
, i
, bnx2x_sp_mapping(bp
, context
));
6131 if (CHIP_IS_E1H(bp
)) {
6132 REG_WR(bp
, PXP2_REG_RQ_CDU_FIRST_ILT
, i
);
6133 REG_WR(bp
, PXP2_REG_RQ_CDU_LAST_ILT
, i
+ CNIC_ILT_LINES
);
6135 REG_WR(bp
, PXP2_REG_PSWRQ_CDU0_L2P
+ func
*4,
6136 PXP_ILT_RANGE(i
, i
+ CNIC_ILT_LINES
));
6139 if (CHIP_IS_E1H(bp
)) {
6140 for (i
= 0; i
< 9; i
++)
6141 bnx2x_init_block(bp
,
6142 cm_blocks
[i
], FUNC0_STAGE
+ func
);
6144 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 1);
6145 REG_WR(bp
, NIG_REG_LLH0_FUNC_VLAN_ID
+ port
*8, bp
->e1hov
);
6148 /* HC init per function */
6149 if (CHIP_IS_E1H(bp
)) {
6150 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
*4, 0);
6152 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
6153 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
6155 bnx2x_init_block(bp
, HC_BLOCK
, FUNC0_STAGE
+ func
);
6157 /* Reset PCIE errors for debug */
6158 REG_WR(bp
, 0x2114, 0xffffffff);
6159 REG_WR(bp
, 0x2120, 0xffffffff);
6164 static int bnx2x_init_hw(struct bnx2x
*bp
, u32 load_code
)
6168 DP(BNX2X_MSG_MCP
, "function %d load_code %x\n",
6169 BP_FUNC(bp
), load_code
);
6172 mutex_init(&bp
->dmae_mutex
);
6173 bnx2x_gunzip_init(bp
);
6175 switch (load_code
) {
6176 case FW_MSG_CODE_DRV_LOAD_COMMON
:
6177 rc
= bnx2x_init_common(bp
);
6182 case FW_MSG_CODE_DRV_LOAD_PORT
:
6184 rc
= bnx2x_init_port(bp
);
6189 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
6191 rc
= bnx2x_init_func(bp
);
6197 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code
);
6201 if (!BP_NOMCP(bp
)) {
6202 int func
= BP_FUNC(bp
);
6204 bp
->fw_drv_pulse_wr_seq
=
6205 (SHMEM_RD(bp
, func_mb
[func
].drv_pulse_mb
) &
6206 DRV_PULSE_SEQ_MASK
);
6207 bp
->func_stx
= SHMEM_RD(bp
, func_mb
[func
].fw_mb_param
);
6208 DP(BNX2X_MSG_MCP
, "drv_pulse 0x%x func_stx 0x%x\n",
6209 bp
->fw_drv_pulse_wr_seq
, bp
->func_stx
);
6213 /* this needs to be done before gunzip end */
6214 bnx2x_zero_def_sb(bp
);
6215 for_each_queue(bp
, i
)
6216 bnx2x_zero_sb(bp
, BP_L_ID(bp
) + i
);
6219 bnx2x_gunzip_end(bp
);
6224 /* send the MCP a request, block until there is a reply */
6225 u32
bnx2x_fw_command(struct bnx2x
*bp
, u32 command
)
6227 int func
= BP_FUNC(bp
);
6228 u32 seq
= ++bp
->fw_seq
;
6231 u8 delay
= CHIP_REV_IS_SLOW(bp
) ? 100 : 10;
6233 SHMEM_WR(bp
, func_mb
[func
].drv_mb_header
, (command
| seq
));
6234 DP(BNX2X_MSG_MCP
, "wrote command (%x) to FW MB\n", (command
| seq
));
6237 /* let the FW do it's magic ... */
6240 rc
= SHMEM_RD(bp
, func_mb
[func
].fw_mb_header
);
6242 /* Give the FW up to 2 second (200*10ms) */
6243 } while ((seq
!= (rc
& FW_MSG_SEQ_NUMBER_MASK
)) && (cnt
++ < 200));
6245 DP(BNX2X_MSG_MCP
, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6246 cnt
*delay
, rc
, seq
);
6248 /* is this a reply to our command? */
6249 if (seq
== (rc
& FW_MSG_SEQ_NUMBER_MASK
)) {
6250 rc
&= FW_MSG_CODE_MASK
;
6254 BNX2X_ERR("FW failed to respond!\n");
6262 static void bnx2x_free_mem(struct bnx2x
*bp
)
6265 #define BNX2X_PCI_FREE(x, y, size) \
6268 pci_free_consistent(bp->pdev, size, x, y); \
6274 #define BNX2X_FREE(x) \
6286 for_each_queue(bp
, i
) {
6289 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, status_blk
),
6290 bnx2x_fp(bp
, i
, status_blk_mapping
),
6291 sizeof(struct host_status_block
) +
6292 sizeof(struct eth_tx_db_data
));
6295 for_each_rx_queue(bp
, i
) {
6297 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6298 BNX2X_FREE(bnx2x_fp(bp
, i
, rx_buf_ring
));
6299 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_desc_ring
),
6300 bnx2x_fp(bp
, i
, rx_desc_mapping
),
6301 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
6303 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_comp_ring
),
6304 bnx2x_fp(bp
, i
, rx_comp_mapping
),
6305 sizeof(struct eth_fast_path_rx_cqe
) *
6309 BNX2X_FREE(bnx2x_fp(bp
, i
, rx_page_ring
));
6310 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_sge_ring
),
6311 bnx2x_fp(bp
, i
, rx_sge_mapping
),
6312 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
6315 for_each_tx_queue(bp
, i
) {
6317 /* fastpath tx rings: tx_buf tx_desc */
6318 BNX2X_FREE(bnx2x_fp(bp
, i
, tx_buf_ring
));
6319 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, tx_desc_ring
),
6320 bnx2x_fp(bp
, i
, tx_desc_mapping
),
6321 sizeof(struct eth_tx_bd
) * NUM_TX_BD
);
6323 /* end of fastpath */
6325 BNX2X_PCI_FREE(bp
->def_status_blk
, bp
->def_status_blk_mapping
,
6326 sizeof(struct host_def_status_block
));
6328 BNX2X_PCI_FREE(bp
->slowpath
, bp
->slowpath_mapping
,
6329 sizeof(struct bnx2x_slowpath
));
6332 BNX2X_PCI_FREE(bp
->t1
, bp
->t1_mapping
, 64*1024);
6333 BNX2X_PCI_FREE(bp
->t2
, bp
->t2_mapping
, 16*1024);
6334 BNX2X_PCI_FREE(bp
->timers
, bp
->timers_mapping
, 8*1024);
6335 BNX2X_PCI_FREE(bp
->qm
, bp
->qm_mapping
, 128*1024);
6337 BNX2X_PCI_FREE(bp
->spq
, bp
->spq_mapping
, BCM_PAGE_SIZE
);
6339 #undef BNX2X_PCI_FREE
6343 static int bnx2x_alloc_mem(struct bnx2x
*bp
)
6346 #define BNX2X_PCI_ALLOC(x, y, size) \
6348 x = pci_alloc_consistent(bp->pdev, size, y); \
6350 goto alloc_mem_err; \
6351 memset(x, 0, size); \
6354 #define BNX2X_ALLOC(x, size) \
6356 x = vmalloc(size); \
6358 goto alloc_mem_err; \
6359 memset(x, 0, size); \
6366 for_each_queue(bp
, i
) {
6367 bnx2x_fp(bp
, i
, bp
) = bp
;
6370 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, status_blk
),
6371 &bnx2x_fp(bp
, i
, status_blk_mapping
),
6372 sizeof(struct host_status_block
) +
6373 sizeof(struct eth_tx_db_data
));
6376 for_each_rx_queue(bp
, i
) {
6378 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6379 BNX2X_ALLOC(bnx2x_fp(bp
, i
, rx_buf_ring
),
6380 sizeof(struct sw_rx_bd
) * NUM_RX_BD
);
6381 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_desc_ring
),
6382 &bnx2x_fp(bp
, i
, rx_desc_mapping
),
6383 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
6385 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_comp_ring
),
6386 &bnx2x_fp(bp
, i
, rx_comp_mapping
),
6387 sizeof(struct eth_fast_path_rx_cqe
) *
6391 BNX2X_ALLOC(bnx2x_fp(bp
, i
, rx_page_ring
),
6392 sizeof(struct sw_rx_page
) * NUM_RX_SGE
);
6393 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_sge_ring
),
6394 &bnx2x_fp(bp
, i
, rx_sge_mapping
),
6395 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
6398 for_each_tx_queue(bp
, i
) {
6400 bnx2x_fp(bp
, i
, hw_tx_prods
) =
6401 (void *)(bnx2x_fp(bp
, i
, status_blk
) + 1);
6403 bnx2x_fp(bp
, i
, tx_prods_mapping
) =
6404 bnx2x_fp(bp
, i
, status_blk_mapping
) +
6405 sizeof(struct host_status_block
);
6407 /* fastpath tx rings: tx_buf tx_desc */
6408 BNX2X_ALLOC(bnx2x_fp(bp
, i
, tx_buf_ring
),
6409 sizeof(struct sw_tx_bd
) * NUM_TX_BD
);
6410 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, tx_desc_ring
),
6411 &bnx2x_fp(bp
, i
, tx_desc_mapping
),
6412 sizeof(struct eth_tx_bd
) * NUM_TX_BD
);
6414 /* end of fastpath */
6416 BNX2X_PCI_ALLOC(bp
->def_status_blk
, &bp
->def_status_blk_mapping
,
6417 sizeof(struct host_def_status_block
));
6419 BNX2X_PCI_ALLOC(bp
->slowpath
, &bp
->slowpath_mapping
,
6420 sizeof(struct bnx2x_slowpath
));
6423 BNX2X_PCI_ALLOC(bp
->t1
, &bp
->t1_mapping
, 64*1024);
6426 for (i
= 0; i
< 64*1024; i
+= 64) {
6427 *(u64
*)((char *)bp
->t1
+ i
+ 56) = 0x0UL
;
6428 *(u64
*)((char *)bp
->t1
+ i
+ 3) = 0x0UL
;
6431 /* allocate searcher T2 table
6432 we allocate 1/4 of alloc num for T2
6433 (which is not entered into the ILT) */
6434 BNX2X_PCI_ALLOC(bp
->t2
, &bp
->t2_mapping
, 16*1024);
6437 for (i
= 0; i
< 16*1024; i
+= 64)
6438 * (u64
*)((char *)bp
->t2
+ i
+ 56) = bp
->t2_mapping
+ i
+ 64;
6440 /* now fixup the last line in the block to point to the next block */
6441 *(u64
*)((char *)bp
->t2
+ 1024*16-8) = bp
->t2_mapping
;
6443 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6444 BNX2X_PCI_ALLOC(bp
->timers
, &bp
->timers_mapping
, 8*1024);
6446 /* QM queues (128*MAX_CONN) */
6447 BNX2X_PCI_ALLOC(bp
->qm
, &bp
->qm_mapping
, 128*1024);
6450 /* Slow path ring */
6451 BNX2X_PCI_ALLOC(bp
->spq
, &bp
->spq_mapping
, BCM_PAGE_SIZE
);
6459 #undef BNX2X_PCI_ALLOC
6463 static void bnx2x_free_tx_skbs(struct bnx2x
*bp
)
6467 for_each_tx_queue(bp
, i
) {
6468 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
6470 u16 bd_cons
= fp
->tx_bd_cons
;
6471 u16 sw_prod
= fp
->tx_pkt_prod
;
6472 u16 sw_cons
= fp
->tx_pkt_cons
;
6474 while (sw_cons
!= sw_prod
) {
6475 bd_cons
= bnx2x_free_tx_pkt(bp
, fp
, TX_BD(sw_cons
));
6481 static void bnx2x_free_rx_skbs(struct bnx2x
*bp
)
6485 for_each_rx_queue(bp
, j
) {
6486 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
6488 for (i
= 0; i
< NUM_RX_BD
; i
++) {
6489 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[i
];
6490 struct sk_buff
*skb
= rx_buf
->skb
;
6495 pci_unmap_single(bp
->pdev
,
6496 pci_unmap_addr(rx_buf
, mapping
),
6497 bp
->rx_buf_size
, PCI_DMA_FROMDEVICE
);
6502 if (!fp
->disable_tpa
)
6503 bnx2x_free_tpa_pool(bp
, fp
, CHIP_IS_E1(bp
) ?
6504 ETH_MAX_AGGREGATION_QUEUES_E1
:
6505 ETH_MAX_AGGREGATION_QUEUES_E1H
);
6509 static void bnx2x_free_skbs(struct bnx2x
*bp
)
6511 bnx2x_free_tx_skbs(bp
);
6512 bnx2x_free_rx_skbs(bp
);
6515 static void bnx2x_free_msix_irqs(struct bnx2x
*bp
)
6519 free_irq(bp
->msix_table
[0].vector
, bp
->dev
);
6520 DP(NETIF_MSG_IFDOWN
, "released sp irq (%d)\n",
6521 bp
->msix_table
[0].vector
);
6523 for_each_queue(bp
, i
) {
6524 DP(NETIF_MSG_IFDOWN
, "about to release fp #%d->%d irq "
6525 "state %x\n", i
, bp
->msix_table
[i
+ offset
].vector
,
6526 bnx2x_fp(bp
, i
, state
));
6528 free_irq(bp
->msix_table
[i
+ offset
].vector
, &bp
->fp
[i
]);
6532 static void bnx2x_free_irq(struct bnx2x
*bp
)
6534 if (bp
->flags
& USING_MSIX_FLAG
) {
6535 bnx2x_free_msix_irqs(bp
);
6536 pci_disable_msix(bp
->pdev
);
6537 bp
->flags
&= ~USING_MSIX_FLAG
;
6539 } else if (bp
->flags
& USING_MSI_FLAG
) {
6540 free_irq(bp
->pdev
->irq
, bp
->dev
);
6541 pci_disable_msi(bp
->pdev
);
6542 bp
->flags
&= ~USING_MSI_FLAG
;
6545 free_irq(bp
->pdev
->irq
, bp
->dev
);
6548 static int bnx2x_enable_msix(struct bnx2x
*bp
)
6550 int i
, rc
, offset
= 1;
6553 bp
->msix_table
[0].entry
= igu_vec
;
6554 DP(NETIF_MSG_IFUP
, "msix_table[0].entry = %d (slowpath)\n", igu_vec
);
6556 for_each_queue(bp
, i
) {
6557 igu_vec
= BP_L_ID(bp
) + offset
+ i
;
6558 bp
->msix_table
[i
+ offset
].entry
= igu_vec
;
6559 DP(NETIF_MSG_IFUP
, "msix_table[%d].entry = %d "
6560 "(fastpath #%u)\n", i
+ offset
, igu_vec
, i
);
6563 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0],
6564 BNX2X_NUM_QUEUES(bp
) + offset
);
6566 DP(NETIF_MSG_IFUP
, "MSI-X is not attainable rc %d\n", rc
);
6570 bp
->flags
|= USING_MSIX_FLAG
;
6575 static int bnx2x_req_msix_irqs(struct bnx2x
*bp
)
6577 int i
, rc
, offset
= 1;
6579 rc
= request_irq(bp
->msix_table
[0].vector
, bnx2x_msix_sp_int
, 0,
6580 bp
->dev
->name
, bp
->dev
);
6582 BNX2X_ERR("request sp irq failed\n");
6586 for_each_queue(bp
, i
) {
6587 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
6589 sprintf(fp
->name
, "%s.fp%d", bp
->dev
->name
, i
);
6590 rc
= request_irq(bp
->msix_table
[i
+ offset
].vector
,
6591 bnx2x_msix_fp_int
, 0, fp
->name
, fp
);
6593 BNX2X_ERR("request fp #%d irq failed rc %d\n", i
, rc
);
6594 bnx2x_free_msix_irqs(bp
);
6598 fp
->state
= BNX2X_FP_STATE_IRQ
;
6601 i
= BNX2X_NUM_QUEUES(bp
);
6603 printk(KERN_INFO PFX
6604 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6605 bp
->dev
->name
, bp
->msix_table
[0].vector
,
6606 bp
->msix_table
[offset
].vector
,
6607 bp
->msix_table
[offset
+ i
- 1].vector
);
6609 printk(KERN_INFO PFX
"%s: using MSI-X IRQs: sp %d fp %d\n",
6610 bp
->dev
->name
, bp
->msix_table
[0].vector
,
6611 bp
->msix_table
[offset
+ i
- 1].vector
);
6616 static int bnx2x_enable_msi(struct bnx2x
*bp
)
6620 rc
= pci_enable_msi(bp
->pdev
);
6622 DP(NETIF_MSG_IFUP
, "MSI is not attainable\n");
6625 bp
->flags
|= USING_MSI_FLAG
;
6630 static int bnx2x_req_irq(struct bnx2x
*bp
)
6632 unsigned long flags
;
6635 if (bp
->flags
& USING_MSI_FLAG
)
6638 flags
= IRQF_SHARED
;
6640 rc
= request_irq(bp
->pdev
->irq
, bnx2x_interrupt
, flags
,
6641 bp
->dev
->name
, bp
->dev
);
6643 bnx2x_fp(bp
, 0, state
) = BNX2X_FP_STATE_IRQ
;
6648 static void bnx2x_napi_enable(struct bnx2x
*bp
)
6652 for_each_rx_queue(bp
, i
)
6653 napi_enable(&bnx2x_fp(bp
, i
, napi
));
6656 static void bnx2x_napi_disable(struct bnx2x
*bp
)
6660 for_each_rx_queue(bp
, i
)
6661 napi_disable(&bnx2x_fp(bp
, i
, napi
));
6664 static void bnx2x_netif_start(struct bnx2x
*bp
)
6668 intr_sem
= atomic_dec_and_test(&bp
->intr_sem
);
6669 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6672 if (netif_running(bp
->dev
)) {
6673 bnx2x_napi_enable(bp
);
6674 bnx2x_int_enable(bp
);
6675 if (bp
->state
== BNX2X_STATE_OPEN
)
6676 netif_tx_wake_all_queues(bp
->dev
);
6681 static void bnx2x_netif_stop(struct bnx2x
*bp
, int disable_hw
)
6683 bnx2x_int_disable_sync(bp
, disable_hw
);
6684 bnx2x_napi_disable(bp
);
6685 netif_tx_disable(bp
->dev
);
6686 bp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
6690 * Init service functions
6693 static void bnx2x_set_mac_addr_e1(struct bnx2x
*bp
, int set
)
6695 struct mac_configuration_cmd
*config
= bnx2x_sp(bp
, mac_config
);
6696 int port
= BP_PORT(bp
);
6699 * unicasts 0-31:port0 32-63:port1
6700 * multicast 64-127:port0 128-191:port1
6702 config
->hdr
.length
= 2;
6703 config
->hdr
.offset
= port
? 32 : 0;
6704 config
->hdr
.client_id
= bp
->fp
->cl_id
;
6705 config
->hdr
.reserved1
= 0;
6708 config
->config_table
[0].cam_entry
.msb_mac_addr
=
6709 swab16(*(u16
*)&bp
->dev
->dev_addr
[0]);
6710 config
->config_table
[0].cam_entry
.middle_mac_addr
=
6711 swab16(*(u16
*)&bp
->dev
->dev_addr
[2]);
6712 config
->config_table
[0].cam_entry
.lsb_mac_addr
=
6713 swab16(*(u16
*)&bp
->dev
->dev_addr
[4]);
6714 config
->config_table
[0].cam_entry
.flags
= cpu_to_le16(port
);
6716 config
->config_table
[0].target_table_entry
.flags
= 0;
6718 CAM_INVALIDATE(config
->config_table
[0]);
6719 config
->config_table
[0].target_table_entry
.client_id
= 0;
6720 config
->config_table
[0].target_table_entry
.vlan_id
= 0;
6722 DP(NETIF_MSG_IFUP
, "%s MAC (%04x:%04x:%04x)\n",
6723 (set
? "setting" : "clearing"),
6724 config
->config_table
[0].cam_entry
.msb_mac_addr
,
6725 config
->config_table
[0].cam_entry
.middle_mac_addr
,
6726 config
->config_table
[0].cam_entry
.lsb_mac_addr
);
6729 config
->config_table
[1].cam_entry
.msb_mac_addr
= cpu_to_le16(0xffff);
6730 config
->config_table
[1].cam_entry
.middle_mac_addr
= cpu_to_le16(0xffff);
6731 config
->config_table
[1].cam_entry
.lsb_mac_addr
= cpu_to_le16(0xffff);
6732 config
->config_table
[1].cam_entry
.flags
= cpu_to_le16(port
);
6734 config
->config_table
[1].target_table_entry
.flags
=
6735 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST
;
6737 CAM_INVALIDATE(config
->config_table
[1]);
6738 config
->config_table
[1].target_table_entry
.client_id
= 0;
6739 config
->config_table
[1].target_table_entry
.vlan_id
= 0;
6741 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
6742 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
6743 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 0);
6746 static void bnx2x_set_mac_addr_e1h(struct bnx2x
*bp
, int set
)
6748 struct mac_configuration_cmd_e1h
*config
=
6749 (struct mac_configuration_cmd_e1h
*)bnx2x_sp(bp
, mac_config
);
6751 if (set
&& (bp
->state
!= BNX2X_STATE_OPEN
)) {
6752 DP(NETIF_MSG_IFUP
, "state is %x, returning\n", bp
->state
);
6756 /* CAM allocation for E1H
6757 * unicasts: by func number
6758 * multicast: 20+FUNC*20, 20 each
6760 config
->hdr
.length
= 1;
6761 config
->hdr
.offset
= BP_FUNC(bp
);
6762 config
->hdr
.client_id
= bp
->fp
->cl_id
;
6763 config
->hdr
.reserved1
= 0;
6766 config
->config_table
[0].msb_mac_addr
=
6767 swab16(*(u16
*)&bp
->dev
->dev_addr
[0]);
6768 config
->config_table
[0].middle_mac_addr
=
6769 swab16(*(u16
*)&bp
->dev
->dev_addr
[2]);
6770 config
->config_table
[0].lsb_mac_addr
=
6771 swab16(*(u16
*)&bp
->dev
->dev_addr
[4]);
6772 config
->config_table
[0].client_id
= BP_L_ID(bp
);
6773 config
->config_table
[0].vlan_id
= 0;
6774 config
->config_table
[0].e1hov_id
= cpu_to_le16(bp
->e1hov
);
6776 config
->config_table
[0].flags
= BP_PORT(bp
);
6778 config
->config_table
[0].flags
=
6779 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE
;
6781 DP(NETIF_MSG_IFUP
, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6782 (set
? "setting" : "clearing"),
6783 config
->config_table
[0].msb_mac_addr
,
6784 config
->config_table
[0].middle_mac_addr
,
6785 config
->config_table
[0].lsb_mac_addr
, bp
->e1hov
, BP_L_ID(bp
));
6787 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
6788 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
6789 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 0);
6792 static int bnx2x_wait_ramrod(struct bnx2x
*bp
, int state
, int idx
,
6793 int *state_p
, int poll
)
6795 /* can take a while if any port is running */
6798 DP(NETIF_MSG_IFUP
, "%s for state to become %x on IDX [%d]\n",
6799 poll
? "polling" : "waiting", state
, idx
);
6804 bnx2x_rx_int(bp
->fp
, 10);
6805 /* if index is different from 0
6806 * the reply for some commands will
6807 * be on the non default queue
6810 bnx2x_rx_int(&bp
->fp
[idx
], 10);
6813 mb(); /* state is changed by bnx2x_sp_event() */
6814 if (*state_p
== state
) {
6815 #ifdef BNX2X_STOP_ON_ERROR
6816 DP(NETIF_MSG_IFUP
, "exit (cnt %d)\n", 5000 - cnt
);
6825 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6826 poll
? "polling" : "waiting", state
, idx
);
6827 #ifdef BNX2X_STOP_ON_ERROR
6834 static int bnx2x_setup_leading(struct bnx2x
*bp
)
6838 /* reset IGU state */
6839 bnx2x_ack_sb(bp
, bp
->fp
[0].sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
6842 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_PORT_SETUP
, 0, 0, 0, 0);
6844 /* Wait for completion */
6845 rc
= bnx2x_wait_ramrod(bp
, BNX2X_STATE_OPEN
, 0, &(bp
->state
), 0);
6850 static int bnx2x_setup_multi(struct bnx2x
*bp
, int index
)
6852 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
6854 /* reset IGU state */
6855 bnx2x_ack_sb(bp
, fp
->sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
6858 fp
->state
= BNX2X_FP_STATE_OPENING
;
6859 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_CLIENT_SETUP
, index
, 0,
6862 /* Wait for completion */
6863 return bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_OPEN
, index
,
6867 static int bnx2x_poll(struct napi_struct
*napi
, int budget
);
6869 static void bnx2x_set_int_mode(struct bnx2x
*bp
)
6877 bp
->num_rx_queues
= num_queues
;
6878 bp
->num_tx_queues
= num_queues
;
6880 "set number of queues to %d\n", num_queues
);
6885 if (bp
->multi_mode
== ETH_RSS_MODE_REGULAR
)
6886 num_queues
= min_t(u32
, num_online_cpus(),
6887 BNX2X_MAX_QUEUES(bp
));
6890 bp
->num_rx_queues
= num_queues
;
6891 bp
->num_tx_queues
= num_queues
;
6892 DP(NETIF_MSG_IFUP
, "set number of rx queues to %d"
6893 " number of tx queues to %d\n",
6894 bp
->num_rx_queues
, bp
->num_tx_queues
);
6895 /* if we can't use MSI-X we only need one fp,
6896 * so try to enable MSI-X with the requested number of fp's
6897 * and fallback to MSI or legacy INTx with one fp
6899 if (bnx2x_enable_msix(bp
)) {
6900 /* failed to enable MSI-X */
6902 bp
->num_rx_queues
= num_queues
;
6903 bp
->num_tx_queues
= num_queues
;
6905 BNX2X_ERR("Multi requested but failed to "
6906 "enable MSI-X set number of "
6907 "queues to %d\n", num_queues
);
6911 bp
->dev
->real_num_tx_queues
= bp
->num_tx_queues
;
6914 static void bnx2x_set_rx_mode(struct net_device
*dev
);
6916 /* must be called with rtnl_lock */
6917 static int bnx2x_nic_load(struct bnx2x
*bp
, int load_mode
)
6921 #ifdef BNX2X_STOP_ON_ERROR
6922 DP(NETIF_MSG_IFUP
, "enter load_mode %d\n", load_mode
);
6923 if (unlikely(bp
->panic
))
6927 bp
->state
= BNX2X_STATE_OPENING_WAIT4_LOAD
;
6929 bnx2x_set_int_mode(bp
);
6931 if (bnx2x_alloc_mem(bp
))
6934 for_each_rx_queue(bp
, i
)
6935 bnx2x_fp(bp
, i
, disable_tpa
) =
6936 ((bp
->flags
& TPA_ENABLE_FLAG
) == 0);
6938 for_each_rx_queue(bp
, i
)
6939 netif_napi_add(bp
->dev
, &bnx2x_fp(bp
, i
, napi
),
6942 #ifdef BNX2X_STOP_ON_ERROR
6943 for_each_rx_queue(bp
, i
) {
6944 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
6946 fp
->poll_no_work
= 0;
6948 fp
->poll_max_calls
= 0;
6949 fp
->poll_complete
= 0;
6953 bnx2x_napi_enable(bp
);
6955 if (bp
->flags
& USING_MSIX_FLAG
) {
6956 rc
= bnx2x_req_msix_irqs(bp
);
6958 pci_disable_msix(bp
->pdev
);
6962 if ((rc
!= -ENOMEM
) && (int_mode
!= INT_MODE_INTx
))
6963 bnx2x_enable_msi(bp
);
6965 rc
= bnx2x_req_irq(bp
);
6967 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc
);
6968 if (bp
->flags
& USING_MSI_FLAG
)
6969 pci_disable_msi(bp
->pdev
);
6972 if (bp
->flags
& USING_MSI_FLAG
) {
6973 bp
->dev
->irq
= bp
->pdev
->irq
;
6974 printk(KERN_INFO PFX
"%s: using MSI IRQ %d\n",
6975 bp
->dev
->name
, bp
->pdev
->irq
);
6979 /* Send LOAD_REQUEST command to MCP
6980 Returns the type of LOAD command:
6981 if it is the first port to be initialized
6982 common blocks should be initialized, otherwise - not
6984 if (!BP_NOMCP(bp
)) {
6985 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_REQ
);
6987 BNX2X_ERR("MCP response failure, aborting\n");
6991 if (load_code
== FW_MSG_CODE_DRV_LOAD_REFUSED
) {
6992 rc
= -EBUSY
; /* other port in diagnostic mode */
6997 int port
= BP_PORT(bp
);
6999 DP(NETIF_MSG_IFUP
, "NO MCP - load counts %d, %d, %d\n",
7000 load_count
[0], load_count
[1], load_count
[2]);
7002 load_count
[1 + port
]++;
7003 DP(NETIF_MSG_IFUP
, "NO MCP - new load counts %d, %d, %d\n",
7004 load_count
[0], load_count
[1], load_count
[2]);
7005 if (load_count
[0] == 1)
7006 load_code
= FW_MSG_CODE_DRV_LOAD_COMMON
;
7007 else if (load_count
[1 + port
] == 1)
7008 load_code
= FW_MSG_CODE_DRV_LOAD_PORT
;
7010 load_code
= FW_MSG_CODE_DRV_LOAD_FUNCTION
;
7013 if ((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
7014 (load_code
== FW_MSG_CODE_DRV_LOAD_PORT
))
7018 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
7021 rc
= bnx2x_init_hw(bp
, load_code
);
7023 BNX2X_ERR("HW init failed, aborting\n");
7027 /* Setup NIC internals and enable interrupts */
7028 bnx2x_nic_init(bp
, load_code
);
7030 /* Send LOAD_DONE command to MCP */
7031 if (!BP_NOMCP(bp
)) {
7032 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
);
7034 BNX2X_ERR("MCP response failure, aborting\n");
7040 bp
->state
= BNX2X_STATE_OPENING_WAIT4_PORT
;
7042 rc
= bnx2x_setup_leading(bp
);
7044 BNX2X_ERR("Setup leading failed!\n");
7048 if (CHIP_IS_E1H(bp
))
7049 if (bp
->mf_config
& FUNC_MF_CFG_FUNC_DISABLED
) {
7050 DP(NETIF_MSG_IFUP
, "mf_cfg function disabled\n");
7051 bp
->state
= BNX2X_STATE_DISABLED
;
7054 if (bp
->state
== BNX2X_STATE_OPEN
)
7055 for_each_nondefault_queue(bp
, i
) {
7056 rc
= bnx2x_setup_multi(bp
, i
);
7062 bnx2x_set_mac_addr_e1(bp
, 1);
7064 bnx2x_set_mac_addr_e1h(bp
, 1);
7067 bnx2x_initial_phy_init(bp
, load_mode
);
7069 /* Start fast path */
7070 switch (load_mode
) {
7072 /* Tx queue should be only reenabled */
7073 netif_tx_wake_all_queues(bp
->dev
);
7074 /* Initialize the receive filter. */
7075 bnx2x_set_rx_mode(bp
->dev
);
7079 netif_tx_start_all_queues(bp
->dev
);
7080 /* Initialize the receive filter. */
7081 bnx2x_set_rx_mode(bp
->dev
);
7085 /* Initialize the receive filter. */
7086 bnx2x_set_rx_mode(bp
->dev
);
7087 bp
->state
= BNX2X_STATE_DIAG
;
7095 bnx2x__link_status_update(bp
);
7097 /* start the timer */
7098 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
7104 bnx2x_int_disable_sync(bp
, 1);
7105 if (!BP_NOMCP(bp
)) {
7106 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
);
7107 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
7110 /* Free SKBs, SGEs, TPA pool and driver internals */
7111 bnx2x_free_skbs(bp
);
7112 for_each_rx_queue(bp
, i
)
7113 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
7118 bnx2x_napi_disable(bp
);
7119 for_each_rx_queue(bp
, i
)
7120 netif_napi_del(&bnx2x_fp(bp
, i
, napi
));
7126 static int bnx2x_stop_multi(struct bnx2x
*bp
, int index
)
7128 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
7131 /* halt the connection */
7132 fp
->state
= BNX2X_FP_STATE_HALTING
;
7133 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_HALT
, index
, 0, fp
->cl_id
, 0);
7135 /* Wait for completion */
7136 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_HALTED
, index
,
7138 if (rc
) /* timeout */
7141 /* delete cfc entry */
7142 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_CFC_DEL
, index
, 0, 0, 1);
7144 /* Wait for completion */
7145 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_CLOSED
, index
,
7150 static int bnx2x_stop_leading(struct bnx2x
*bp
)
7152 __le16 dsb_sp_prod_idx
;
7153 /* if the other port is handling traffic,
7154 this can take a lot of time */
7160 /* Send HALT ramrod */
7161 bp
->fp
[0].state
= BNX2X_FP_STATE_HALTING
;
7162 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_HALT
, 0, 0, bp
->fp
->cl_id
, 0);
7164 /* Wait for completion */
7165 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_HALTED
, 0,
7166 &(bp
->fp
[0].state
), 1);
7167 if (rc
) /* timeout */
7170 dsb_sp_prod_idx
= *bp
->dsb_sp_prod
;
7172 /* Send PORT_DELETE ramrod */
7173 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_PORT_DEL
, 0, 0, 0, 1);
7175 /* Wait for completion to arrive on default status block
7176 we are going to reset the chip anyway
7177 so there is not much to do if this times out
7179 while (dsb_sp_prod_idx
== *bp
->dsb_sp_prod
) {
7181 DP(NETIF_MSG_IFDOWN
, "timeout waiting for port del "
7182 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7183 *bp
->dsb_sp_prod
, dsb_sp_prod_idx
);
7184 #ifdef BNX2X_STOP_ON_ERROR
7192 rmb(); /* Refresh the dsb_sp_prod */
7194 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_UNLOAD
;
7195 bp
->fp
[0].state
= BNX2X_FP_STATE_CLOSED
;
7200 static void bnx2x_reset_func(struct bnx2x
*bp
)
7202 int port
= BP_PORT(bp
);
7203 int func
= BP_FUNC(bp
);
7207 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
7208 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
7211 base
= FUNC_ILT_BASE(func
);
7212 for (i
= base
; i
< base
+ ILT_PER_FUNC
; i
++)
7213 bnx2x_ilt_wr(bp
, i
, 0);
7216 static void bnx2x_reset_port(struct bnx2x
*bp
)
7218 int port
= BP_PORT(bp
);
7221 REG_WR(bp
, NIG_REG_MASK_INTERRUPT_PORT0
+ port
*4, 0);
7223 /* Do not rcv packets to BRB */
7224 REG_WR(bp
, NIG_REG_LLH0_BRB1_DRV_MASK
+ port
*4, 0x0);
7225 /* Do not direct rcv packets that are not for MCP to the BRB */
7226 REG_WR(bp
, (port
? NIG_REG_LLH1_BRB1_NOT_MCP
:
7227 NIG_REG_LLH0_BRB1_NOT_MCP
), 0x0);
7230 REG_WR(bp
, MISC_REG_AEU_MASK_ATTN_FUNC_0
+ port
*4, 0);
7233 /* Check for BRB port occupancy */
7234 val
= REG_RD(bp
, BRB1_REG_PORT_NUM_OCC_BLOCKS_0
+ port
*4);
7236 DP(NETIF_MSG_IFDOWN
,
7237 "BRB1 is not empty %d blocks are occupied\n", val
);
7239 /* TODO: Close Doorbell port? */
7242 static void bnx2x_reset_chip(struct bnx2x
*bp
, u32 reset_code
)
7244 DP(BNX2X_MSG_MCP
, "function %d reset_code %x\n",
7245 BP_FUNC(bp
), reset_code
);
7247 switch (reset_code
) {
7248 case FW_MSG_CODE_DRV_UNLOAD_COMMON
:
7249 bnx2x_reset_port(bp
);
7250 bnx2x_reset_func(bp
);
7251 bnx2x_reset_common(bp
);
7254 case FW_MSG_CODE_DRV_UNLOAD_PORT
:
7255 bnx2x_reset_port(bp
);
7256 bnx2x_reset_func(bp
);
7259 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION
:
7260 bnx2x_reset_func(bp
);
7264 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code
);
7269 /* must be called with rtnl_lock */
7270 static int bnx2x_nic_unload(struct bnx2x
*bp
, int unload_mode
)
7272 int port
= BP_PORT(bp
);
7276 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_HALT
;
7278 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
7279 bnx2x_set_storm_rx_mode(bp
);
7281 bnx2x_netif_stop(bp
, 1);
7283 del_timer_sync(&bp
->timer
);
7284 SHMEM_WR(bp
, func_mb
[BP_FUNC(bp
)].drv_pulse_mb
,
7285 (DRV_PULSE_ALWAYS_ALIVE
| bp
->fw_drv_pulse_wr_seq
));
7286 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
7291 /* Wait until tx fastpath tasks complete */
7292 for_each_tx_queue(bp
, i
) {
7293 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
7296 while (bnx2x_has_tx_work_unload(fp
)) {
7300 BNX2X_ERR("timeout waiting for queue[%d]\n",
7302 #ifdef BNX2X_STOP_ON_ERROR
7313 /* Give HW time to discard old tx messages */
7316 if (CHIP_IS_E1(bp
)) {
7317 struct mac_configuration_cmd
*config
=
7318 bnx2x_sp(bp
, mcast_config
);
7320 bnx2x_set_mac_addr_e1(bp
, 0);
7322 for (i
= 0; i
< config
->hdr
.length
; i
++)
7323 CAM_INVALIDATE(config
->config_table
[i
]);
7325 config
->hdr
.length
= i
;
7326 if (CHIP_REV_IS_SLOW(bp
))
7327 config
->hdr
.offset
= BNX2X_MAX_EMUL_MULTI
*(1 + port
);
7329 config
->hdr
.offset
= BNX2X_MAX_MULTICAST
*(1 + port
);
7330 config
->hdr
.client_id
= bp
->fp
->cl_id
;
7331 config
->hdr
.reserved1
= 0;
7333 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
7334 U64_HI(bnx2x_sp_mapping(bp
, mcast_config
)),
7335 U64_LO(bnx2x_sp_mapping(bp
, mcast_config
)), 0);
7338 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 0);
7340 bnx2x_set_mac_addr_e1h(bp
, 0);
7342 for (i
= 0; i
< MC_HASH_SIZE
; i
++)
7343 REG_WR(bp
, MC_HASH_OFFSET(bp
, i
), 0);
7346 if (unload_mode
== UNLOAD_NORMAL
)
7347 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
7349 else if (bp
->flags
& NO_WOL_FLAG
) {
7350 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
;
7351 if (CHIP_IS_E1H(bp
))
7352 REG_WR(bp
, MISC_REG_E1HMF_MODE
, 0);
7354 } else if (bp
->wol
) {
7355 u32 emac_base
= port
? GRCBASE_EMAC1
: GRCBASE_EMAC0
;
7356 u8
*mac_addr
= bp
->dev
->dev_addr
;
7358 /* The mac address is written to entries 1-4 to
7359 preserve entry 0 which is used by the PMF */
7360 u8 entry
= (BP_E1HVN(bp
) + 1)*8;
7362 val
= (mac_addr
[0] << 8) | mac_addr
[1];
7363 EMAC_WR(bp
, EMAC_REG_EMAC_MAC_MATCH
+ entry
, val
);
7365 val
= (mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
7366 (mac_addr
[4] << 8) | mac_addr
[5];
7367 EMAC_WR(bp
, EMAC_REG_EMAC_MAC_MATCH
+ entry
+ 4, val
);
7369 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_EN
;
7372 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
7374 /* Close multi and leading connections
7375 Completions for ramrods are collected in a synchronous way */
7376 for_each_nondefault_queue(bp
, i
)
7377 if (bnx2x_stop_multi(bp
, i
))
7380 rc
= bnx2x_stop_leading(bp
);
7382 BNX2X_ERR("Stop leading failed!\n");
7383 #ifdef BNX2X_STOP_ON_ERROR
7392 reset_code
= bnx2x_fw_command(bp
, reset_code
);
7394 DP(NETIF_MSG_IFDOWN
, "NO MCP - load counts %d, %d, %d\n",
7395 load_count
[0], load_count
[1], load_count
[2]);
7397 load_count
[1 + port
]--;
7398 DP(NETIF_MSG_IFDOWN
, "NO MCP - new load counts %d, %d, %d\n",
7399 load_count
[0], load_count
[1], load_count
[2]);
7400 if (load_count
[0] == 0)
7401 reset_code
= FW_MSG_CODE_DRV_UNLOAD_COMMON
;
7402 else if (load_count
[1 + port
] == 0)
7403 reset_code
= FW_MSG_CODE_DRV_UNLOAD_PORT
;
7405 reset_code
= FW_MSG_CODE_DRV_UNLOAD_FUNCTION
;
7408 if ((reset_code
== FW_MSG_CODE_DRV_UNLOAD_COMMON
) ||
7409 (reset_code
== FW_MSG_CODE_DRV_UNLOAD_PORT
))
7410 bnx2x__link_reset(bp
);
7412 /* Reset the chip */
7413 bnx2x_reset_chip(bp
, reset_code
);
7415 /* Report UNLOAD_DONE to MCP */
7417 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
7421 /* Free SKBs, SGEs, TPA pool and driver internals */
7422 bnx2x_free_skbs(bp
);
7423 for_each_rx_queue(bp
, i
)
7424 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
7425 for_each_rx_queue(bp
, i
)
7426 netif_napi_del(&bnx2x_fp(bp
, i
, napi
));
7429 bp
->state
= BNX2X_STATE_CLOSED
;
7431 netif_carrier_off(bp
->dev
);
7436 static void bnx2x_reset_task(struct work_struct
*work
)
7438 struct bnx2x
*bp
= container_of(work
, struct bnx2x
, reset_task
);
7440 #ifdef BNX2X_STOP_ON_ERROR
7441 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7442 " so reset not done to allow debug dump,\n"
7443 KERN_ERR
" you will need to reboot when done\n");
7449 if (!netif_running(bp
->dev
))
7450 goto reset_task_exit
;
7452 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
7453 bnx2x_nic_load(bp
, LOAD_NORMAL
);
7459 /* end of nic load/unload */
7464 * Init service functions
7467 static inline u32
bnx2x_get_pretend_reg(struct bnx2x
*bp
, int func
)
7470 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0
;
7471 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1
;
7472 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2
;
7473 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3
;
7474 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4
;
7475 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5
;
7476 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6
;
7477 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7
;
7479 BNX2X_ERR("Unsupported function index: %d\n", func
);
7484 static void bnx2x_undi_int_disable_e1h(struct bnx2x
*bp
, int orig_func
)
7486 u32 reg
= bnx2x_get_pretend_reg(bp
, orig_func
), new_val
;
7488 /* Flush all outstanding writes */
7491 /* Pretend to be function 0 */
7493 /* Flush the GRC transaction (in the chip) */
7494 new_val
= REG_RD(bp
, reg
);
7496 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7501 /* From now we are in the "like-E1" mode */
7502 bnx2x_int_disable(bp
);
7504 /* Flush all outstanding writes */
7507 /* Restore the original funtion settings */
7508 REG_WR(bp
, reg
, orig_func
);
7509 new_val
= REG_RD(bp
, reg
);
7510 if (new_val
!= orig_func
) {
7511 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7512 orig_func
, new_val
);
7517 static inline void bnx2x_undi_int_disable(struct bnx2x
*bp
, int func
)
7519 if (CHIP_IS_E1H(bp
))
7520 bnx2x_undi_int_disable_e1h(bp
, func
);
7522 bnx2x_int_disable(bp
);
7525 static void __devinit
bnx2x_undi_unload(struct bnx2x
*bp
)
7529 /* Check if there is any driver already loaded */
7530 val
= REG_RD(bp
, MISC_REG_UNPREPARED
);
7532 /* Check if it is the UNDI driver
7533 * UNDI driver initializes CID offset for normal bell to 0x7
7535 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
7536 val
= REG_RD(bp
, DORQ_REG_NORM_CID_OFST
);
7538 u32 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
7540 int func
= BP_FUNC(bp
);
7544 /* clear the UNDI indication */
7545 REG_WR(bp
, DORQ_REG_NORM_CID_OFST
, 0);
7547 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7549 /* try unload UNDI on port 0 */
7552 (SHMEM_RD(bp
, func_mb
[bp
->func
].drv_mb_header
) &
7553 DRV_MSG_SEQ_NUMBER_MASK
);
7554 reset_code
= bnx2x_fw_command(bp
, reset_code
);
7556 /* if UNDI is loaded on the other port */
7557 if (reset_code
!= FW_MSG_CODE_DRV_UNLOAD_COMMON
) {
7559 /* send "DONE" for previous unload */
7560 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
7562 /* unload UNDI on port 1 */
7565 (SHMEM_RD(bp
, func_mb
[bp
->func
].drv_mb_header
) &
7566 DRV_MSG_SEQ_NUMBER_MASK
);
7567 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
7569 bnx2x_fw_command(bp
, reset_code
);
7572 /* now it's safe to release the lock */
7573 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
7575 bnx2x_undi_int_disable(bp
, func
);
7577 /* close input traffic and wait for it */
7578 /* Do not rcv packets to BRB */
7580 (BP_PORT(bp
) ? NIG_REG_LLH1_BRB1_DRV_MASK
:
7581 NIG_REG_LLH0_BRB1_DRV_MASK
), 0x0);
7582 /* Do not direct rcv packets that are not for MCP to
7585 (BP_PORT(bp
) ? NIG_REG_LLH1_BRB1_NOT_MCP
:
7586 NIG_REG_LLH0_BRB1_NOT_MCP
), 0x0);
7589 (BP_PORT(bp
) ? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
7590 MISC_REG_AEU_MASK_ATTN_FUNC_0
), 0);
7593 /* save NIG port swap info */
7594 swap_val
= REG_RD(bp
, NIG_REG_PORT_SWAP
);
7595 swap_en
= REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
);
7598 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
7601 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
,
7603 /* take the NIG out of reset and restore swap values */
7605 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
,
7606 MISC_REGISTERS_RESET_REG_1_RST_NIG
);
7607 REG_WR(bp
, NIG_REG_PORT_SWAP
, swap_val
);
7608 REG_WR(bp
, NIG_REG_STRAP_OVERRIDE
, swap_en
);
7610 /* send unload done to the MCP */
7611 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
7613 /* restore our func and fw_seq */
7616 (SHMEM_RD(bp
, func_mb
[bp
->func
].drv_mb_header
) &
7617 DRV_MSG_SEQ_NUMBER_MASK
);
7620 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_UNDI
);
7624 static void __devinit
bnx2x_get_common_hwinfo(struct bnx2x
*bp
)
7626 u32 val
, val2
, val3
, val4
, id
;
7629 /* Get the chip revision id and number. */
7630 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7631 val
= REG_RD(bp
, MISC_REG_CHIP_NUM
);
7632 id
= ((val
& 0xffff) << 16);
7633 val
= REG_RD(bp
, MISC_REG_CHIP_REV
);
7634 id
|= ((val
& 0xf) << 12);
7635 val
= REG_RD(bp
, MISC_REG_CHIP_METAL
);
7636 id
|= ((val
& 0xff) << 4);
7637 val
= REG_RD(bp
, MISC_REG_BOND_ID
);
7639 bp
->common
.chip_id
= id
;
7640 bp
->link_params
.chip_id
= bp
->common
.chip_id
;
7641 BNX2X_DEV_INFO("chip ID is 0x%x\n", id
);
7643 val
= (REG_RD(bp
, 0x2874) & 0x55);
7644 if ((bp
->common
.chip_id
& 0x1) ||
7645 (CHIP_IS_E1(bp
) && val
) || (CHIP_IS_E1H(bp
) && (val
== 0x55))) {
7646 bp
->flags
|= ONE_PORT_FLAG
;
7647 BNX2X_DEV_INFO("single port device\n");
7650 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_CFG4
);
7651 bp
->common
.flash_size
= (NVRAM_1MB_SIZE
<<
7652 (val
& MCPR_NVM_CFG4_FLASH_SIZE
));
7653 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7654 bp
->common
.flash_size
, bp
->common
.flash_size
);
7656 bp
->common
.shmem_base
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
7657 bp
->link_params
.shmem_base
= bp
->common
.shmem_base
;
7658 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp
->common
.shmem_base
);
7660 if (!bp
->common
.shmem_base
||
7661 (bp
->common
.shmem_base
< 0xA0000) ||
7662 (bp
->common
.shmem_base
>= 0xC0000)) {
7663 BNX2X_DEV_INFO("MCP not active\n");
7664 bp
->flags
|= NO_MCP_FLAG
;
7668 val
= SHMEM_RD(bp
, validity_map
[BP_PORT(bp
)]);
7669 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
7670 != (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
7671 BNX2X_ERR("BAD MCP validity signature\n");
7673 bp
->common
.hw_config
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.config
);
7674 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp
->common
.hw_config
);
7676 bp
->link_params
.hw_led_mode
= ((bp
->common
.hw_config
&
7677 SHARED_HW_CFG_LED_MODE_MASK
) >>
7678 SHARED_HW_CFG_LED_MODE_SHIFT
);
7680 bp
->link_params
.feature_config_flags
= 0;
7681 val
= SHMEM_RD(bp
, dev_info
.shared_feature_config
.config
);
7682 if (val
& SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED
)
7683 bp
->link_params
.feature_config_flags
|=
7684 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED
;
7686 bp
->link_params
.feature_config_flags
&=
7687 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED
;
7689 val
= SHMEM_RD(bp
, dev_info
.bc_rev
) >> 8;
7690 bp
->common
.bc_ver
= val
;
7691 BNX2X_DEV_INFO("bc_ver %X\n", val
);
7692 if (val
< BNX2X_BC_VER
) {
7693 /* for now only warn
7694 * later we might need to enforce this */
7695 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7696 " please upgrade BC\n", BNX2X_BC_VER
, val
);
7698 bp
->link_params
.feature_config_flags
|=
7699 (val
>= REQ_BC_VER_4_VRFY_OPT_MDL
) ?
7700 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY
: 0;
7702 if (BP_E1HVN(bp
) == 0) {
7703 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_PMC
, &pmc
);
7704 bp
->flags
|= (pmc
& PCI_PM_CAP_PME_D3cold
) ? 0 : NO_WOL_FLAG
;
7706 /* no WOL capability for E1HVN != 0 */
7707 bp
->flags
|= NO_WOL_FLAG
;
7709 BNX2X_DEV_INFO("%sWoL capable\n",
7710 (bp
->flags
& NO_WOL_FLAG
) ? "not " : "");
7712 val
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
);
7713 val2
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[4]);
7714 val3
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[8]);
7715 val4
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[12]);
7717 printk(KERN_INFO PFX
"part number %X-%X-%X-%X\n",
7718 val
, val2
, val3
, val4
);
7721 static void __devinit
bnx2x_link_settings_supported(struct bnx2x
*bp
,
7724 int port
= BP_PORT(bp
);
7727 switch (switch_cfg
) {
7729 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg
);
7732 SERDES_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
7733 switch (ext_phy_type
) {
7734 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT
:
7735 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7738 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
7739 SUPPORTED_10baseT_Full
|
7740 SUPPORTED_100baseT_Half
|
7741 SUPPORTED_100baseT_Full
|
7742 SUPPORTED_1000baseT_Full
|
7743 SUPPORTED_2500baseX_Full
|
7748 SUPPORTED_Asym_Pause
);
7751 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482
:
7752 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7755 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
7756 SUPPORTED_10baseT_Full
|
7757 SUPPORTED_100baseT_Half
|
7758 SUPPORTED_100baseT_Full
|
7759 SUPPORTED_1000baseT_Full
|
7764 SUPPORTED_Asym_Pause
);
7768 BNX2X_ERR("NVRAM config error. "
7769 "BAD SerDes ext_phy_config 0x%x\n",
7770 bp
->link_params
.ext_phy_config
);
7774 bp
->port
.phy_addr
= REG_RD(bp
, NIG_REG_SERDES0_CTRL_PHY_ADDR
+
7776 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp
->port
.phy_addr
);
7779 case SWITCH_CFG_10G
:
7780 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg
);
7783 XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
7784 switch (ext_phy_type
) {
7785 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT
:
7786 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7789 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
7790 SUPPORTED_10baseT_Full
|
7791 SUPPORTED_100baseT_Half
|
7792 SUPPORTED_100baseT_Full
|
7793 SUPPORTED_1000baseT_Full
|
7794 SUPPORTED_2500baseX_Full
|
7795 SUPPORTED_10000baseT_Full
|
7800 SUPPORTED_Asym_Pause
);
7803 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
:
7804 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7807 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
7808 SUPPORTED_1000baseT_Full
|
7812 SUPPORTED_Asym_Pause
);
7815 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
:
7816 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7819 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
7820 SUPPORTED_2500baseX_Full
|
7821 SUPPORTED_1000baseT_Full
|
7825 SUPPORTED_Asym_Pause
);
7828 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705
:
7829 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7832 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
7835 SUPPORTED_Asym_Pause
);
7838 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706
:
7839 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7842 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
7843 SUPPORTED_1000baseT_Full
|
7846 SUPPORTED_Asym_Pause
);
7849 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726
:
7850 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
7853 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
7854 SUPPORTED_1000baseT_Full
|
7858 SUPPORTED_Asym_Pause
);
7861 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
:
7862 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
7865 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
7866 SUPPORTED_1000baseT_Full
|
7870 SUPPORTED_Asym_Pause
);
7873 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
7874 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7877 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
7881 SUPPORTED_Asym_Pause
);
7884 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481
:
7885 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7888 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
7889 SUPPORTED_10baseT_Full
|
7890 SUPPORTED_100baseT_Half
|
7891 SUPPORTED_100baseT_Full
|
7892 SUPPORTED_1000baseT_Full
|
7893 SUPPORTED_10000baseT_Full
|
7897 SUPPORTED_Asym_Pause
);
7900 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
:
7901 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7902 bp
->link_params
.ext_phy_config
);
7906 BNX2X_ERR("NVRAM config error. "
7907 "BAD XGXS ext_phy_config 0x%x\n",
7908 bp
->link_params
.ext_phy_config
);
7912 bp
->port
.phy_addr
= REG_RD(bp
, NIG_REG_XGXS0_CTRL_PHY_ADDR
+
7914 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp
->port
.phy_addr
);
7919 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7920 bp
->port
.link_config
);
7923 bp
->link_params
.phy_addr
= bp
->port
.phy_addr
;
7925 /* mask what we support according to speed_cap_mask */
7926 if (!(bp
->link_params
.speed_cap_mask
&
7927 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF
))
7928 bp
->port
.supported
&= ~SUPPORTED_10baseT_Half
;
7930 if (!(bp
->link_params
.speed_cap_mask
&
7931 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL
))
7932 bp
->port
.supported
&= ~SUPPORTED_10baseT_Full
;
7934 if (!(bp
->link_params
.speed_cap_mask
&
7935 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF
))
7936 bp
->port
.supported
&= ~SUPPORTED_100baseT_Half
;
7938 if (!(bp
->link_params
.speed_cap_mask
&
7939 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL
))
7940 bp
->port
.supported
&= ~SUPPORTED_100baseT_Full
;
7942 if (!(bp
->link_params
.speed_cap_mask
&
7943 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G
))
7944 bp
->port
.supported
&= ~(SUPPORTED_1000baseT_Half
|
7945 SUPPORTED_1000baseT_Full
);
7947 if (!(bp
->link_params
.speed_cap_mask
&
7948 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G
))
7949 bp
->port
.supported
&= ~SUPPORTED_2500baseX_Full
;
7951 if (!(bp
->link_params
.speed_cap_mask
&
7952 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G
))
7953 bp
->port
.supported
&= ~SUPPORTED_10000baseT_Full
;
7955 BNX2X_DEV_INFO("supported 0x%x\n", bp
->port
.supported
);
7958 static void __devinit
bnx2x_link_settings_requested(struct bnx2x
*bp
)
7960 bp
->link_params
.req_duplex
= DUPLEX_FULL
;
7962 switch (bp
->port
.link_config
& PORT_FEATURE_LINK_SPEED_MASK
) {
7963 case PORT_FEATURE_LINK_SPEED_AUTO
:
7964 if (bp
->port
.supported
& SUPPORTED_Autoneg
) {
7965 bp
->link_params
.req_line_speed
= SPEED_AUTO_NEG
;
7966 bp
->port
.advertising
= bp
->port
.supported
;
7969 XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
7971 if ((ext_phy_type
==
7972 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705
) ||
7974 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706
)) {
7975 /* force 10G, no AN */
7976 bp
->link_params
.req_line_speed
= SPEED_10000
;
7977 bp
->port
.advertising
=
7978 (ADVERTISED_10000baseT_Full
|
7982 BNX2X_ERR("NVRAM config error. "
7983 "Invalid link_config 0x%x"
7984 " Autoneg not supported\n",
7985 bp
->port
.link_config
);
7990 case PORT_FEATURE_LINK_SPEED_10M_FULL
:
7991 if (bp
->port
.supported
& SUPPORTED_10baseT_Full
) {
7992 bp
->link_params
.req_line_speed
= SPEED_10
;
7993 bp
->port
.advertising
= (ADVERTISED_10baseT_Full
|
7996 BNX2X_ERR("NVRAM config error. "
7997 "Invalid link_config 0x%x"
7998 " speed_cap_mask 0x%x\n",
7999 bp
->port
.link_config
,
8000 bp
->link_params
.speed_cap_mask
);
8005 case PORT_FEATURE_LINK_SPEED_10M_HALF
:
8006 if (bp
->port
.supported
& SUPPORTED_10baseT_Half
) {
8007 bp
->link_params
.req_line_speed
= SPEED_10
;
8008 bp
->link_params
.req_duplex
= DUPLEX_HALF
;
8009 bp
->port
.advertising
= (ADVERTISED_10baseT_Half
|
8012 BNX2X_ERR("NVRAM config error. "
8013 "Invalid link_config 0x%x"
8014 " speed_cap_mask 0x%x\n",
8015 bp
->port
.link_config
,
8016 bp
->link_params
.speed_cap_mask
);
8021 case PORT_FEATURE_LINK_SPEED_100M_FULL
:
8022 if (bp
->port
.supported
& SUPPORTED_100baseT_Full
) {
8023 bp
->link_params
.req_line_speed
= SPEED_100
;
8024 bp
->port
.advertising
= (ADVERTISED_100baseT_Full
|
8027 BNX2X_ERR("NVRAM config error. "
8028 "Invalid link_config 0x%x"
8029 " speed_cap_mask 0x%x\n",
8030 bp
->port
.link_config
,
8031 bp
->link_params
.speed_cap_mask
);
8036 case PORT_FEATURE_LINK_SPEED_100M_HALF
:
8037 if (bp
->port
.supported
& SUPPORTED_100baseT_Half
) {
8038 bp
->link_params
.req_line_speed
= SPEED_100
;
8039 bp
->link_params
.req_duplex
= DUPLEX_HALF
;
8040 bp
->port
.advertising
= (ADVERTISED_100baseT_Half
|
8043 BNX2X_ERR("NVRAM config error. "
8044 "Invalid link_config 0x%x"
8045 " speed_cap_mask 0x%x\n",
8046 bp
->port
.link_config
,
8047 bp
->link_params
.speed_cap_mask
);
8052 case PORT_FEATURE_LINK_SPEED_1G
:
8053 if (bp
->port
.supported
& SUPPORTED_1000baseT_Full
) {
8054 bp
->link_params
.req_line_speed
= SPEED_1000
;
8055 bp
->port
.advertising
= (ADVERTISED_1000baseT_Full
|
8058 BNX2X_ERR("NVRAM config error. "
8059 "Invalid link_config 0x%x"
8060 " speed_cap_mask 0x%x\n",
8061 bp
->port
.link_config
,
8062 bp
->link_params
.speed_cap_mask
);
8067 case PORT_FEATURE_LINK_SPEED_2_5G
:
8068 if (bp
->port
.supported
& SUPPORTED_2500baseX_Full
) {
8069 bp
->link_params
.req_line_speed
= SPEED_2500
;
8070 bp
->port
.advertising
= (ADVERTISED_2500baseX_Full
|
8073 BNX2X_ERR("NVRAM config error. "
8074 "Invalid link_config 0x%x"
8075 " speed_cap_mask 0x%x\n",
8076 bp
->port
.link_config
,
8077 bp
->link_params
.speed_cap_mask
);
8082 case PORT_FEATURE_LINK_SPEED_10G_CX4
:
8083 case PORT_FEATURE_LINK_SPEED_10G_KX4
:
8084 case PORT_FEATURE_LINK_SPEED_10G_KR
:
8085 if (bp
->port
.supported
& SUPPORTED_10000baseT_Full
) {
8086 bp
->link_params
.req_line_speed
= SPEED_10000
;
8087 bp
->port
.advertising
= (ADVERTISED_10000baseT_Full
|
8090 BNX2X_ERR("NVRAM config error. "
8091 "Invalid link_config 0x%x"
8092 " speed_cap_mask 0x%x\n",
8093 bp
->port
.link_config
,
8094 bp
->link_params
.speed_cap_mask
);
8100 BNX2X_ERR("NVRAM config error. "
8101 "BAD link speed link_config 0x%x\n",
8102 bp
->port
.link_config
);
8103 bp
->link_params
.req_line_speed
= SPEED_AUTO_NEG
;
8104 bp
->port
.advertising
= bp
->port
.supported
;
8108 bp
->link_params
.req_flow_ctrl
= (bp
->port
.link_config
&
8109 PORT_FEATURE_FLOW_CONTROL_MASK
);
8110 if ((bp
->link_params
.req_flow_ctrl
== BNX2X_FLOW_CTRL_AUTO
) &&
8111 !(bp
->port
.supported
& SUPPORTED_Autoneg
))
8112 bp
->link_params
.req_flow_ctrl
= BNX2X_FLOW_CTRL_NONE
;
8114 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
8115 " advertising 0x%x\n",
8116 bp
->link_params
.req_line_speed
,
8117 bp
->link_params
.req_duplex
,
8118 bp
->link_params
.req_flow_ctrl
, bp
->port
.advertising
);
8121 static void __devinit
bnx2x_get_port_hwinfo(struct bnx2x
*bp
)
8123 int port
= BP_PORT(bp
);
8128 bp
->link_params
.bp
= bp
;
8129 bp
->link_params
.port
= port
;
8131 bp
->link_params
.lane_config
=
8132 SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].lane_config
);
8133 bp
->link_params
.ext_phy_config
=
8135 dev_info
.port_hw_config
[port
].external_phy_config
);
8136 /* BCM8727_NOC => BCM8727 no over current */
8137 if (XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
) ==
8138 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC
) {
8139 bp
->link_params
.ext_phy_config
&=
8140 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK
;
8141 bp
->link_params
.ext_phy_config
|=
8142 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
;
8143 bp
->link_params
.feature_config_flags
|=
8144 FEATURE_CONFIG_BCM8727_NOC
;
8147 bp
->link_params
.speed_cap_mask
=
8149 dev_info
.port_hw_config
[port
].speed_capability_mask
);
8151 bp
->port
.link_config
=
8152 SHMEM_RD(bp
, dev_info
.port_feature_config
[port
].link_config
);
8154 /* Get the 4 lanes xgxs config rx and tx */
8155 for (i
= 0; i
< 2; i
++) {
8157 dev_info
.port_hw_config
[port
].xgxs_config_rx
[i
<<1]);
8158 bp
->link_params
.xgxs_config_rx
[i
<< 1] = ((val
>>16) & 0xffff);
8159 bp
->link_params
.xgxs_config_rx
[(i
<< 1) + 1] = (val
& 0xffff);
8162 dev_info
.port_hw_config
[port
].xgxs_config_tx
[i
<<1]);
8163 bp
->link_params
.xgxs_config_tx
[i
<< 1] = ((val
>>16) & 0xffff);
8164 bp
->link_params
.xgxs_config_tx
[(i
<< 1) + 1] = (val
& 0xffff);
8167 /* If the device is capable of WoL, set the default state according
8170 config
= SHMEM_RD(bp
, dev_info
.port_feature_config
[port
].config
);
8171 bp
->wol
= (!(bp
->flags
& NO_WOL_FLAG
) &&
8172 (config
& PORT_FEATURE_WOL_ENABLED
));
8174 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8175 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8176 bp
->link_params
.lane_config
,
8177 bp
->link_params
.ext_phy_config
,
8178 bp
->link_params
.speed_cap_mask
, bp
->port
.link_config
);
8180 bp
->link_params
.switch_cfg
|= (bp
->port
.link_config
&
8181 PORT_FEATURE_CONNECTED_SWITCH_MASK
);
8182 bnx2x_link_settings_supported(bp
, bp
->link_params
.switch_cfg
);
8184 bnx2x_link_settings_requested(bp
);
8186 val2
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].mac_upper
);
8187 val
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].mac_lower
);
8188 bp
->dev
->dev_addr
[0] = (u8
)(val2
>> 8 & 0xff);
8189 bp
->dev
->dev_addr
[1] = (u8
)(val2
& 0xff);
8190 bp
->dev
->dev_addr
[2] = (u8
)(val
>> 24 & 0xff);
8191 bp
->dev
->dev_addr
[3] = (u8
)(val
>> 16 & 0xff);
8192 bp
->dev
->dev_addr
[4] = (u8
)(val
>> 8 & 0xff);
8193 bp
->dev
->dev_addr
[5] = (u8
)(val
& 0xff);
8194 memcpy(bp
->link_params
.mac_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
8195 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
8198 static int __devinit
bnx2x_get_hwinfo(struct bnx2x
*bp
)
8200 int func
= BP_FUNC(bp
);
8204 bnx2x_get_common_hwinfo(bp
);
8208 if (CHIP_IS_E1H(bp
)) {
8210 SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
8212 val
= (SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].e1hov_tag
) &
8213 FUNC_MF_CFG_E1HOV_TAG_MASK
);
8214 if (val
!= FUNC_MF_CFG_E1HOV_TAG_DEFAULT
) {
8218 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8220 func
, bp
->e1hov
, bp
->e1hov
);
8222 BNX2X_DEV_INFO("single function mode\n");
8224 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8225 " aborting\n", func
);
8231 if (!BP_NOMCP(bp
)) {
8232 bnx2x_get_port_hwinfo(bp
);
8234 bp
->fw_seq
= (SHMEM_RD(bp
, func_mb
[func
].drv_mb_header
) &
8235 DRV_MSG_SEQ_NUMBER_MASK
);
8236 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp
->fw_seq
);
8240 val2
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].mac_upper
);
8241 val
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].mac_lower
);
8242 if ((val2
!= FUNC_MF_CFG_UPPERMAC_DEFAULT
) &&
8243 (val
!= FUNC_MF_CFG_LOWERMAC_DEFAULT
)) {
8244 bp
->dev
->dev_addr
[0] = (u8
)(val2
>> 8 & 0xff);
8245 bp
->dev
->dev_addr
[1] = (u8
)(val2
& 0xff);
8246 bp
->dev
->dev_addr
[2] = (u8
)(val
>> 24 & 0xff);
8247 bp
->dev
->dev_addr
[3] = (u8
)(val
>> 16 & 0xff);
8248 bp
->dev
->dev_addr
[4] = (u8
)(val
>> 8 & 0xff);
8249 bp
->dev
->dev_addr
[5] = (u8
)(val
& 0xff);
8250 memcpy(bp
->link_params
.mac_addr
, bp
->dev
->dev_addr
,
8252 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
,
8260 /* only supposed to happen on emulation/FPGA */
8261 BNX2X_ERR("warning random MAC workaround active\n");
8262 random_ether_addr(bp
->dev
->dev_addr
);
8263 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
8269 static int __devinit
bnx2x_init_bp(struct bnx2x
*bp
)
8271 int func
= BP_FUNC(bp
);
8275 /* Disable interrupt handling until HW is initialized */
8276 atomic_set(&bp
->intr_sem
, 1);
8277 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8279 mutex_init(&bp
->port
.phy_mutex
);
8281 INIT_DELAYED_WORK(&bp
->sp_task
, bnx2x_sp_task
);
8282 INIT_WORK(&bp
->reset_task
, bnx2x_reset_task
);
8284 rc
= bnx2x_get_hwinfo(bp
);
8286 /* need to reset chip if undi was active */
8288 bnx2x_undi_unload(bp
);
8290 if (CHIP_REV_IS_FPGA(bp
))
8291 printk(KERN_ERR PFX
"FPGA detected\n");
8293 if (BP_NOMCP(bp
) && (func
== 0))
8295 "MCP disabled, must load devices in order!\n");
8297 /* Set multi queue mode */
8298 if ((multi_mode
!= ETH_RSS_MODE_DISABLED
) &&
8299 ((int_mode
== INT_MODE_INTx
) || (int_mode
== INT_MODE_MSI
))) {
8301 "Multi disabled since int_mode requested is not MSI-X\n");
8302 multi_mode
= ETH_RSS_MODE_DISABLED
;
8304 bp
->multi_mode
= multi_mode
;
8309 bp
->flags
&= ~TPA_ENABLE_FLAG
;
8310 bp
->dev
->features
&= ~NETIF_F_LRO
;
8312 bp
->flags
|= TPA_ENABLE_FLAG
;
8313 bp
->dev
->features
|= NETIF_F_LRO
;
8318 bp
->tx_ring_size
= MAX_TX_AVAIL
;
8319 bp
->rx_ring_size
= MAX_RX_AVAIL
;
8326 timer_interval
= (CHIP_REV_IS_SLOW(bp
) ? 5*HZ
: HZ
);
8327 bp
->current_interval
= (poll
? poll
: timer_interval
);
8329 init_timer(&bp
->timer
);
8330 bp
->timer
.expires
= jiffies
+ bp
->current_interval
;
8331 bp
->timer
.data
= (unsigned long) bp
;
8332 bp
->timer
.function
= bnx2x_timer
;
8338 * ethtool service functions
8341 /* All ethtool functions called with rtnl_lock */
8343 static int bnx2x_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
8345 struct bnx2x
*bp
= netdev_priv(dev
);
8347 cmd
->supported
= bp
->port
.supported
;
8348 cmd
->advertising
= bp
->port
.advertising
;
8350 if (netif_carrier_ok(dev
)) {
8351 cmd
->speed
= bp
->link_vars
.line_speed
;
8352 cmd
->duplex
= bp
->link_vars
.duplex
;
8354 cmd
->speed
= bp
->link_params
.req_line_speed
;
8355 cmd
->duplex
= bp
->link_params
.req_duplex
;
8360 vn_max_rate
= ((bp
->mf_config
& FUNC_MF_CFG_MAX_BW_MASK
) >>
8361 FUNC_MF_CFG_MAX_BW_SHIFT
) * 100;
8362 if (vn_max_rate
< cmd
->speed
)
8363 cmd
->speed
= vn_max_rate
;
8366 if (bp
->link_params
.switch_cfg
== SWITCH_CFG_10G
) {
8368 XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
8370 switch (ext_phy_type
) {
8371 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT
:
8372 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
:
8373 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
:
8374 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705
:
8375 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706
:
8376 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726
:
8377 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727
:
8378 cmd
->port
= PORT_FIBRE
;
8381 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
8382 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481
:
8383 cmd
->port
= PORT_TP
;
8386 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
:
8387 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8388 bp
->link_params
.ext_phy_config
);
8392 DP(NETIF_MSG_LINK
, "BAD XGXS ext_phy_config 0x%x\n",
8393 bp
->link_params
.ext_phy_config
);
8397 cmd
->port
= PORT_TP
;
8399 cmd
->phy_address
= bp
->port
.phy_addr
;
8400 cmd
->transceiver
= XCVR_INTERNAL
;
8402 if (bp
->link_params
.req_line_speed
== SPEED_AUTO_NEG
)
8403 cmd
->autoneg
= AUTONEG_ENABLE
;
8405 cmd
->autoneg
= AUTONEG_DISABLE
;
8410 DP(NETIF_MSG_LINK
, "ethtool_cmd: cmd %d\n"
8411 DP_LEVEL
" supported 0x%x advertising 0x%x speed %d\n"
8412 DP_LEVEL
" duplex %d port %d phy_address %d transceiver %d\n"
8413 DP_LEVEL
" autoneg %d maxtxpkt %d maxrxpkt %d\n",
8414 cmd
->cmd
, cmd
->supported
, cmd
->advertising
, cmd
->speed
,
8415 cmd
->duplex
, cmd
->port
, cmd
->phy_address
, cmd
->transceiver
,
8416 cmd
->autoneg
, cmd
->maxtxpkt
, cmd
->maxrxpkt
);
8421 static int bnx2x_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
8423 struct bnx2x
*bp
= netdev_priv(dev
);
8429 DP(NETIF_MSG_LINK
, "ethtool_cmd: cmd %d\n"
8430 DP_LEVEL
" supported 0x%x advertising 0x%x speed %d\n"
8431 DP_LEVEL
" duplex %d port %d phy_address %d transceiver %d\n"
8432 DP_LEVEL
" autoneg %d maxtxpkt %d maxrxpkt %d\n",
8433 cmd
->cmd
, cmd
->supported
, cmd
->advertising
, cmd
->speed
,
8434 cmd
->duplex
, cmd
->port
, cmd
->phy_address
, cmd
->transceiver
,
8435 cmd
->autoneg
, cmd
->maxtxpkt
, cmd
->maxrxpkt
);
8437 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
8438 if (!(bp
->port
.supported
& SUPPORTED_Autoneg
)) {
8439 DP(NETIF_MSG_LINK
, "Autoneg not supported\n");
8443 /* advertise the requested speed and duplex if supported */
8444 cmd
->advertising
&= bp
->port
.supported
;
8446 bp
->link_params
.req_line_speed
= SPEED_AUTO_NEG
;
8447 bp
->link_params
.req_duplex
= DUPLEX_FULL
;
8448 bp
->port
.advertising
|= (ADVERTISED_Autoneg
|
8451 } else { /* forced speed */
8452 /* advertise the requested speed and duplex if supported */
8453 switch (cmd
->speed
) {
8455 if (cmd
->duplex
== DUPLEX_FULL
) {
8456 if (!(bp
->port
.supported
&
8457 SUPPORTED_10baseT_Full
)) {
8459 "10M full not supported\n");
8463 advertising
= (ADVERTISED_10baseT_Full
|
8466 if (!(bp
->port
.supported
&
8467 SUPPORTED_10baseT_Half
)) {
8469 "10M half not supported\n");
8473 advertising
= (ADVERTISED_10baseT_Half
|
8479 if (cmd
->duplex
== DUPLEX_FULL
) {
8480 if (!(bp
->port
.supported
&
8481 SUPPORTED_100baseT_Full
)) {
8483 "100M full not supported\n");
8487 advertising
= (ADVERTISED_100baseT_Full
|
8490 if (!(bp
->port
.supported
&
8491 SUPPORTED_100baseT_Half
)) {
8493 "100M half not supported\n");
8497 advertising
= (ADVERTISED_100baseT_Half
|
8503 if (cmd
->duplex
!= DUPLEX_FULL
) {
8504 DP(NETIF_MSG_LINK
, "1G half not supported\n");
8508 if (!(bp
->port
.supported
& SUPPORTED_1000baseT_Full
)) {
8509 DP(NETIF_MSG_LINK
, "1G full not supported\n");
8513 advertising
= (ADVERTISED_1000baseT_Full
|
8518 if (cmd
->duplex
!= DUPLEX_FULL
) {
8520 "2.5G half not supported\n");
8524 if (!(bp
->port
.supported
& SUPPORTED_2500baseX_Full
)) {
8526 "2.5G full not supported\n");
8530 advertising
= (ADVERTISED_2500baseX_Full
|
8535 if (cmd
->duplex
!= DUPLEX_FULL
) {
8536 DP(NETIF_MSG_LINK
, "10G half not supported\n");
8540 if (!(bp
->port
.supported
& SUPPORTED_10000baseT_Full
)) {
8541 DP(NETIF_MSG_LINK
, "10G full not supported\n");
8545 advertising
= (ADVERTISED_10000baseT_Full
|
8550 DP(NETIF_MSG_LINK
, "Unsupported speed\n");
8554 bp
->link_params
.req_line_speed
= cmd
->speed
;
8555 bp
->link_params
.req_duplex
= cmd
->duplex
;
8556 bp
->port
.advertising
= advertising
;
8559 DP(NETIF_MSG_LINK
, "req_line_speed %d\n"
8560 DP_LEVEL
" req_duplex %d advertising 0x%x\n",
8561 bp
->link_params
.req_line_speed
, bp
->link_params
.req_duplex
,
8562 bp
->port
.advertising
);
8564 if (netif_running(dev
)) {
8565 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
8572 #define PHY_FW_VER_LEN 10
8574 static void bnx2x_get_drvinfo(struct net_device
*dev
,
8575 struct ethtool_drvinfo
*info
)
8577 struct bnx2x
*bp
= netdev_priv(dev
);
8578 u8 phy_fw_ver
[PHY_FW_VER_LEN
];
8580 strcpy(info
->driver
, DRV_MODULE_NAME
);
8581 strcpy(info
->version
, DRV_MODULE_VERSION
);
8583 phy_fw_ver
[0] = '\0';
8585 bnx2x_acquire_phy_lock(bp
);
8586 bnx2x_get_ext_phy_fw_version(&bp
->link_params
,
8587 (bp
->state
!= BNX2X_STATE_CLOSED
),
8588 phy_fw_ver
, PHY_FW_VER_LEN
);
8589 bnx2x_release_phy_lock(bp
);
8592 snprintf(info
->fw_version
, 32, "BC:%d.%d.%d%s%s",
8593 (bp
->common
.bc_ver
& 0xff0000) >> 16,
8594 (bp
->common
.bc_ver
& 0xff00) >> 8,
8595 (bp
->common
.bc_ver
& 0xff),
8596 ((phy_fw_ver
[0] != '\0') ? " PHY:" : ""), phy_fw_ver
);
8597 strcpy(info
->bus_info
, pci_name(bp
->pdev
));
8598 info
->n_stats
= BNX2X_NUM_STATS
;
8599 info
->testinfo_len
= BNX2X_NUM_TESTS
;
8600 info
->eedump_len
= bp
->common
.flash_size
;
8601 info
->regdump_len
= 0;
8604 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8605 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8607 static int bnx2x_get_regs_len(struct net_device
*dev
)
8609 static u32 regdump_len
;
8610 struct bnx2x
*bp
= netdev_priv(dev
);
8616 if (CHIP_IS_E1(bp
)) {
8617 for (i
= 0; i
< REGS_COUNT
; i
++)
8618 if (IS_E1_ONLINE(reg_addrs
[i
].info
))
8619 regdump_len
+= reg_addrs
[i
].size
;
8621 for (i
= 0; i
< WREGS_COUNT_E1
; i
++)
8622 if (IS_E1_ONLINE(wreg_addrs_e1
[i
].info
))
8623 regdump_len
+= wreg_addrs_e1
[i
].size
*
8624 (1 + wreg_addrs_e1
[i
].read_regs_count
);
8627 for (i
= 0; i
< REGS_COUNT
; i
++)
8628 if (IS_E1H_ONLINE(reg_addrs
[i
].info
))
8629 regdump_len
+= reg_addrs
[i
].size
;
8631 for (i
= 0; i
< WREGS_COUNT_E1H
; i
++)
8632 if (IS_E1H_ONLINE(wreg_addrs_e1h
[i
].info
))
8633 regdump_len
+= wreg_addrs_e1h
[i
].size
*
8634 (1 + wreg_addrs_e1h
[i
].read_regs_count
);
8637 regdump_len
+= sizeof(struct dump_hdr
);
8642 static void bnx2x_get_regs(struct net_device
*dev
,
8643 struct ethtool_regs
*regs
, void *_p
)
8646 struct bnx2x
*bp
= netdev_priv(dev
);
8647 struct dump_hdr dump_hdr
= {0};
8650 memset(p
, 0, regs
->len
);
8652 if (!netif_running(bp
->dev
))
8655 dump_hdr
.hdr_size
= (sizeof(struct dump_hdr
) / 4) - 1;
8656 dump_hdr
.dump_sign
= dump_sign_all
;
8657 dump_hdr
.xstorm_waitp
= REG_RD(bp
, XSTORM_WAITP_ADDR
);
8658 dump_hdr
.tstorm_waitp
= REG_RD(bp
, TSTORM_WAITP_ADDR
);
8659 dump_hdr
.ustorm_waitp
= REG_RD(bp
, USTORM_WAITP_ADDR
);
8660 dump_hdr
.cstorm_waitp
= REG_RD(bp
, CSTORM_WAITP_ADDR
);
8661 dump_hdr
.info
= CHIP_IS_E1(bp
) ? RI_E1_ONLINE
: RI_E1H_ONLINE
;
8663 memcpy(p
, &dump_hdr
, sizeof(struct dump_hdr
));
8664 p
+= dump_hdr
.hdr_size
+ 1;
8666 if (CHIP_IS_E1(bp
)) {
8667 for (i
= 0; i
< REGS_COUNT
; i
++)
8668 if (IS_E1_ONLINE(reg_addrs
[i
].info
))
8669 for (j
= 0; j
< reg_addrs
[i
].size
; j
++)
8671 reg_addrs
[i
].addr
+ j
*4);
8674 for (i
= 0; i
< REGS_COUNT
; i
++)
8675 if (IS_E1H_ONLINE(reg_addrs
[i
].info
))
8676 for (j
= 0; j
< reg_addrs
[i
].size
; j
++)
8678 reg_addrs
[i
].addr
+ j
*4);
8682 static void bnx2x_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
8684 struct bnx2x
*bp
= netdev_priv(dev
);
8686 if (bp
->flags
& NO_WOL_FLAG
) {
8690 wol
->supported
= WAKE_MAGIC
;
8692 wol
->wolopts
= WAKE_MAGIC
;
8696 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
8699 static int bnx2x_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
8701 struct bnx2x
*bp
= netdev_priv(dev
);
8703 if (wol
->wolopts
& ~WAKE_MAGIC
)
8706 if (wol
->wolopts
& WAKE_MAGIC
) {
8707 if (bp
->flags
& NO_WOL_FLAG
)
8717 static u32
bnx2x_get_msglevel(struct net_device
*dev
)
8719 struct bnx2x
*bp
= netdev_priv(dev
);
8721 return bp
->msglevel
;
8724 static void bnx2x_set_msglevel(struct net_device
*dev
, u32 level
)
8726 struct bnx2x
*bp
= netdev_priv(dev
);
8728 if (capable(CAP_NET_ADMIN
))
8729 bp
->msglevel
= level
;
8732 static int bnx2x_nway_reset(struct net_device
*dev
)
8734 struct bnx2x
*bp
= netdev_priv(dev
);
8739 if (netif_running(dev
)) {
8740 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
8748 bnx2x_get_link(struct net_device
*dev
)
8750 struct bnx2x
*bp
= netdev_priv(dev
);
8752 return bp
->link_vars
.link_up
;
8755 static int bnx2x_get_eeprom_len(struct net_device
*dev
)
8757 struct bnx2x
*bp
= netdev_priv(dev
);
8759 return bp
->common
.flash_size
;
8762 static int bnx2x_acquire_nvram_lock(struct bnx2x
*bp
)
8764 int port
= BP_PORT(bp
);
8768 /* adjust timeout for emulation/FPGA */
8769 count
= NVRAM_TIMEOUT_COUNT
;
8770 if (CHIP_REV_IS_SLOW(bp
))
8773 /* request access to nvram interface */
8774 REG_WR(bp
, MCP_REG_MCPR_NVM_SW_ARB
,
8775 (MCPR_NVM_SW_ARB_ARB_REQ_SET1
<< port
));
8777 for (i
= 0; i
< count
*10; i
++) {
8778 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_SW_ARB
);
8779 if (val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
))
8785 if (!(val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
))) {
8786 DP(BNX2X_MSG_NVM
, "cannot get access to nvram interface\n");
8793 static int bnx2x_release_nvram_lock(struct bnx2x
*bp
)
8795 int port
= BP_PORT(bp
);
8799 /* adjust timeout for emulation/FPGA */
8800 count
= NVRAM_TIMEOUT_COUNT
;
8801 if (CHIP_REV_IS_SLOW(bp
))
8804 /* relinquish nvram interface */
8805 REG_WR(bp
, MCP_REG_MCPR_NVM_SW_ARB
,
8806 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1
<< port
));
8808 for (i
= 0; i
< count
*10; i
++) {
8809 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_SW_ARB
);
8810 if (!(val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
)))
8816 if (val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
)) {
8817 DP(BNX2X_MSG_NVM
, "cannot free access to nvram interface\n");
8824 static void bnx2x_enable_nvram_access(struct bnx2x
*bp
)
8828 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
);
8830 /* enable both bits, even on read */
8831 REG_WR(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
,
8832 (val
| MCPR_NVM_ACCESS_ENABLE_EN
|
8833 MCPR_NVM_ACCESS_ENABLE_WR_EN
));
8836 static void bnx2x_disable_nvram_access(struct bnx2x
*bp
)
8840 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
);
8842 /* disable both bits, even after read */
8843 REG_WR(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
,
8844 (val
& ~(MCPR_NVM_ACCESS_ENABLE_EN
|
8845 MCPR_NVM_ACCESS_ENABLE_WR_EN
)));
8848 static int bnx2x_nvram_read_dword(struct bnx2x
*bp
, u32 offset
, __be32
*ret_val
,
8854 /* build the command word */
8855 cmd_flags
|= MCPR_NVM_COMMAND_DOIT
;
8857 /* need to clear DONE bit separately */
8858 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, MCPR_NVM_COMMAND_DONE
);
8860 /* address of the NVRAM to read from */
8861 REG_WR(bp
, MCP_REG_MCPR_NVM_ADDR
,
8862 (offset
& MCPR_NVM_ADDR_NVM_ADDR_VALUE
));
8864 /* issue a read command */
8865 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, cmd_flags
);
8867 /* adjust timeout for emulation/FPGA */
8868 count
= NVRAM_TIMEOUT_COUNT
;
8869 if (CHIP_REV_IS_SLOW(bp
))
8872 /* wait for completion */
8875 for (i
= 0; i
< count
; i
++) {
8877 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_COMMAND
);
8879 if (val
& MCPR_NVM_COMMAND_DONE
) {
8880 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_READ
);
8881 /* we read nvram data in cpu order
8882 * but ethtool sees it as an array of bytes
8883 * converting to big-endian will do the work */
8884 *ret_val
= cpu_to_be32(val
);
8893 static int bnx2x_nvram_read(struct bnx2x
*bp
, u32 offset
, u8
*ret_buf
,
8900 if ((offset
& 0x03) || (buf_size
& 0x03) || (buf_size
== 0)) {
8902 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8907 if (offset
+ buf_size
> bp
->common
.flash_size
) {
8908 DP(BNX2X_MSG_NVM
, "Invalid parameter: offset (0x%x) +"
8909 " buf_size (0x%x) > flash_size (0x%x)\n",
8910 offset
, buf_size
, bp
->common
.flash_size
);
8914 /* request access to nvram interface */
8915 rc
= bnx2x_acquire_nvram_lock(bp
);
8919 /* enable access to nvram interface */
8920 bnx2x_enable_nvram_access(bp
);
8922 /* read the first word(s) */
8923 cmd_flags
= MCPR_NVM_COMMAND_FIRST
;
8924 while ((buf_size
> sizeof(u32
)) && (rc
== 0)) {
8925 rc
= bnx2x_nvram_read_dword(bp
, offset
, &val
, cmd_flags
);
8926 memcpy(ret_buf
, &val
, 4);
8928 /* advance to the next dword */
8929 offset
+= sizeof(u32
);
8930 ret_buf
+= sizeof(u32
);
8931 buf_size
-= sizeof(u32
);
8936 cmd_flags
|= MCPR_NVM_COMMAND_LAST
;
8937 rc
= bnx2x_nvram_read_dword(bp
, offset
, &val
, cmd_flags
);
8938 memcpy(ret_buf
, &val
, 4);
8941 /* disable access to nvram interface */
8942 bnx2x_disable_nvram_access(bp
);
8943 bnx2x_release_nvram_lock(bp
);
8948 static int bnx2x_get_eeprom(struct net_device
*dev
,
8949 struct ethtool_eeprom
*eeprom
, u8
*eebuf
)
8951 struct bnx2x
*bp
= netdev_priv(dev
);
8954 if (!netif_running(dev
))
8957 DP(BNX2X_MSG_NVM
, "ethtool_eeprom: cmd %d\n"
8958 DP_LEVEL
" magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8959 eeprom
->cmd
, eeprom
->magic
, eeprom
->offset
, eeprom
->offset
,
8960 eeprom
->len
, eeprom
->len
);
8962 /* parameters already validated in ethtool_get_eeprom */
8964 rc
= bnx2x_nvram_read(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
8969 static int bnx2x_nvram_write_dword(struct bnx2x
*bp
, u32 offset
, u32 val
,
8974 /* build the command word */
8975 cmd_flags
|= MCPR_NVM_COMMAND_DOIT
| MCPR_NVM_COMMAND_WR
;
8977 /* need to clear DONE bit separately */
8978 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, MCPR_NVM_COMMAND_DONE
);
8980 /* write the data */
8981 REG_WR(bp
, MCP_REG_MCPR_NVM_WRITE
, val
);
8983 /* address of the NVRAM to write to */
8984 REG_WR(bp
, MCP_REG_MCPR_NVM_ADDR
,
8985 (offset
& MCPR_NVM_ADDR_NVM_ADDR_VALUE
));
8987 /* issue the write command */
8988 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, cmd_flags
);
8990 /* adjust timeout for emulation/FPGA */
8991 count
= NVRAM_TIMEOUT_COUNT
;
8992 if (CHIP_REV_IS_SLOW(bp
))
8995 /* wait for completion */
8997 for (i
= 0; i
< count
; i
++) {
8999 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_COMMAND
);
9000 if (val
& MCPR_NVM_COMMAND_DONE
) {
9009 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
9011 static int bnx2x_nvram_write1(struct bnx2x
*bp
, u32 offset
, u8
*data_buf
,
9019 if (offset
+ buf_size
> bp
->common
.flash_size
) {
9020 DP(BNX2X_MSG_NVM
, "Invalid parameter: offset (0x%x) +"
9021 " buf_size (0x%x) > flash_size (0x%x)\n",
9022 offset
, buf_size
, bp
->common
.flash_size
);
9026 /* request access to nvram interface */
9027 rc
= bnx2x_acquire_nvram_lock(bp
);
9031 /* enable access to nvram interface */
9032 bnx2x_enable_nvram_access(bp
);
9034 cmd_flags
= (MCPR_NVM_COMMAND_FIRST
| MCPR_NVM_COMMAND_LAST
);
9035 align_offset
= (offset
& ~0x03);
9036 rc
= bnx2x_nvram_read_dword(bp
, align_offset
, &val
, cmd_flags
);
9039 val
&= ~(0xff << BYTE_OFFSET(offset
));
9040 val
|= (*data_buf
<< BYTE_OFFSET(offset
));
9042 /* nvram data is returned as an array of bytes
9043 * convert it back to cpu order */
9044 val
= be32_to_cpu(val
);
9046 rc
= bnx2x_nvram_write_dword(bp
, align_offset
, val
,
9050 /* disable access to nvram interface */
9051 bnx2x_disable_nvram_access(bp
);
9052 bnx2x_release_nvram_lock(bp
);
9057 static int bnx2x_nvram_write(struct bnx2x
*bp
, u32 offset
, u8
*data_buf
,
9065 if (buf_size
== 1) /* ethtool */
9066 return bnx2x_nvram_write1(bp
, offset
, data_buf
, buf_size
);
9068 if ((offset
& 0x03) || (buf_size
& 0x03) || (buf_size
== 0)) {
9070 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9075 if (offset
+ buf_size
> bp
->common
.flash_size
) {
9076 DP(BNX2X_MSG_NVM
, "Invalid parameter: offset (0x%x) +"
9077 " buf_size (0x%x) > flash_size (0x%x)\n",
9078 offset
, buf_size
, bp
->common
.flash_size
);
9082 /* request access to nvram interface */
9083 rc
= bnx2x_acquire_nvram_lock(bp
);
9087 /* enable access to nvram interface */
9088 bnx2x_enable_nvram_access(bp
);
9091 cmd_flags
= MCPR_NVM_COMMAND_FIRST
;
9092 while ((written_so_far
< buf_size
) && (rc
== 0)) {
9093 if (written_so_far
== (buf_size
- sizeof(u32
)))
9094 cmd_flags
|= MCPR_NVM_COMMAND_LAST
;
9095 else if (((offset
+ 4) % NVRAM_PAGE_SIZE
) == 0)
9096 cmd_flags
|= MCPR_NVM_COMMAND_LAST
;
9097 else if ((offset
% NVRAM_PAGE_SIZE
) == 0)
9098 cmd_flags
|= MCPR_NVM_COMMAND_FIRST
;
9100 memcpy(&val
, data_buf
, 4);
9102 rc
= bnx2x_nvram_write_dword(bp
, offset
, val
, cmd_flags
);
9104 /* advance to the next dword */
9105 offset
+= sizeof(u32
);
9106 data_buf
+= sizeof(u32
);
9107 written_so_far
+= sizeof(u32
);
9111 /* disable access to nvram interface */
9112 bnx2x_disable_nvram_access(bp
);
9113 bnx2x_release_nvram_lock(bp
);
9118 static int bnx2x_set_eeprom(struct net_device
*dev
,
9119 struct ethtool_eeprom
*eeprom
, u8
*eebuf
)
9121 struct bnx2x
*bp
= netdev_priv(dev
);
9124 if (!netif_running(dev
))
9127 DP(BNX2X_MSG_NVM
, "ethtool_eeprom: cmd %d\n"
9128 DP_LEVEL
" magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9129 eeprom
->cmd
, eeprom
->magic
, eeprom
->offset
, eeprom
->offset
,
9130 eeprom
->len
, eeprom
->len
);
9132 /* parameters already validated in ethtool_set_eeprom */
9134 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
9135 if (eeprom
->magic
== 0x00504859)
9138 bnx2x_acquire_phy_lock(bp
);
9139 rc
= bnx2x_flash_download(bp
, BP_PORT(bp
),
9140 bp
->link_params
.ext_phy_config
,
9141 (bp
->state
!= BNX2X_STATE_CLOSED
),
9142 eebuf
, eeprom
->len
);
9143 if ((bp
->state
== BNX2X_STATE_OPEN
) ||
9144 (bp
->state
== BNX2X_STATE_DISABLED
)) {
9145 rc
|= bnx2x_link_reset(&bp
->link_params
,
9147 rc
|= bnx2x_phy_init(&bp
->link_params
,
9150 bnx2x_release_phy_lock(bp
);
9152 } else /* Only the PMF can access the PHY */
9155 rc
= bnx2x_nvram_write(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
9160 static int bnx2x_get_coalesce(struct net_device
*dev
,
9161 struct ethtool_coalesce
*coal
)
9163 struct bnx2x
*bp
= netdev_priv(dev
);
9165 memset(coal
, 0, sizeof(struct ethtool_coalesce
));
9167 coal
->rx_coalesce_usecs
= bp
->rx_ticks
;
9168 coal
->tx_coalesce_usecs
= bp
->tx_ticks
;
9173 static int bnx2x_set_coalesce(struct net_device
*dev
,
9174 struct ethtool_coalesce
*coal
)
9176 struct bnx2x
*bp
= netdev_priv(dev
);
9178 bp
->rx_ticks
= (u16
) coal
->rx_coalesce_usecs
;
9179 if (bp
->rx_ticks
> BNX2X_MAX_COALESCE_TOUT
)
9180 bp
->rx_ticks
= BNX2X_MAX_COALESCE_TOUT
;
9182 bp
->tx_ticks
= (u16
) coal
->tx_coalesce_usecs
;
9183 if (bp
->tx_ticks
> BNX2X_MAX_COALESCE_TOUT
)
9184 bp
->tx_ticks
= BNX2X_MAX_COALESCE_TOUT
;
9186 if (netif_running(dev
))
9187 bnx2x_update_coalesce(bp
);
9192 static void bnx2x_get_ringparam(struct net_device
*dev
,
9193 struct ethtool_ringparam
*ering
)
9195 struct bnx2x
*bp
= netdev_priv(dev
);
9197 ering
->rx_max_pending
= MAX_RX_AVAIL
;
9198 ering
->rx_mini_max_pending
= 0;
9199 ering
->rx_jumbo_max_pending
= 0;
9201 ering
->rx_pending
= bp
->rx_ring_size
;
9202 ering
->rx_mini_pending
= 0;
9203 ering
->rx_jumbo_pending
= 0;
9205 ering
->tx_max_pending
= MAX_TX_AVAIL
;
9206 ering
->tx_pending
= bp
->tx_ring_size
;
9209 static int bnx2x_set_ringparam(struct net_device
*dev
,
9210 struct ethtool_ringparam
*ering
)
9212 struct bnx2x
*bp
= netdev_priv(dev
);
9215 if ((ering
->rx_pending
> MAX_RX_AVAIL
) ||
9216 (ering
->tx_pending
> MAX_TX_AVAIL
) ||
9217 (ering
->tx_pending
<= MAX_SKB_FRAGS
+ 4))
9220 bp
->rx_ring_size
= ering
->rx_pending
;
9221 bp
->tx_ring_size
= ering
->tx_pending
;
9223 if (netif_running(dev
)) {
9224 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
9225 rc
= bnx2x_nic_load(bp
, LOAD_NORMAL
);
9231 static void bnx2x_get_pauseparam(struct net_device
*dev
,
9232 struct ethtool_pauseparam
*epause
)
9234 struct bnx2x
*bp
= netdev_priv(dev
);
9236 epause
->autoneg
= (bp
->link_params
.req_flow_ctrl
==
9237 BNX2X_FLOW_CTRL_AUTO
) &&
9238 (bp
->link_params
.req_line_speed
== SPEED_AUTO_NEG
);
9240 epause
->rx_pause
= ((bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_RX
) ==
9241 BNX2X_FLOW_CTRL_RX
);
9242 epause
->tx_pause
= ((bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_TX
) ==
9243 BNX2X_FLOW_CTRL_TX
);
9245 DP(NETIF_MSG_LINK
, "ethtool_pauseparam: cmd %d\n"
9246 DP_LEVEL
" autoneg %d rx_pause %d tx_pause %d\n",
9247 epause
->cmd
, epause
->autoneg
, epause
->rx_pause
, epause
->tx_pause
);
9250 static int bnx2x_set_pauseparam(struct net_device
*dev
,
9251 struct ethtool_pauseparam
*epause
)
9253 struct bnx2x
*bp
= netdev_priv(dev
);
9258 DP(NETIF_MSG_LINK
, "ethtool_pauseparam: cmd %d\n"
9259 DP_LEVEL
" autoneg %d rx_pause %d tx_pause %d\n",
9260 epause
->cmd
, epause
->autoneg
, epause
->rx_pause
, epause
->tx_pause
);
9262 bp
->link_params
.req_flow_ctrl
= BNX2X_FLOW_CTRL_AUTO
;
9264 if (epause
->rx_pause
)
9265 bp
->link_params
.req_flow_ctrl
|= BNX2X_FLOW_CTRL_RX
;
9267 if (epause
->tx_pause
)
9268 bp
->link_params
.req_flow_ctrl
|= BNX2X_FLOW_CTRL_TX
;
9270 if (bp
->link_params
.req_flow_ctrl
== BNX2X_FLOW_CTRL_AUTO
)
9271 bp
->link_params
.req_flow_ctrl
= BNX2X_FLOW_CTRL_NONE
;
9273 if (epause
->autoneg
) {
9274 if (!(bp
->port
.supported
& SUPPORTED_Autoneg
)) {
9275 DP(NETIF_MSG_LINK
, "autoneg not supported\n");
9279 if (bp
->link_params
.req_line_speed
== SPEED_AUTO_NEG
)
9280 bp
->link_params
.req_flow_ctrl
= BNX2X_FLOW_CTRL_AUTO
;
9284 "req_flow_ctrl 0x%x\n", bp
->link_params
.req_flow_ctrl
);
9286 if (netif_running(dev
)) {
9287 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
9294 static int bnx2x_set_flags(struct net_device
*dev
, u32 data
)
9296 struct bnx2x
*bp
= netdev_priv(dev
);
9300 /* TPA requires Rx CSUM offloading */
9301 if ((data
& ETH_FLAG_LRO
) && bp
->rx_csum
) {
9302 if (!(dev
->features
& NETIF_F_LRO
)) {
9303 dev
->features
|= NETIF_F_LRO
;
9304 bp
->flags
|= TPA_ENABLE_FLAG
;
9308 } else if (dev
->features
& NETIF_F_LRO
) {
9309 dev
->features
&= ~NETIF_F_LRO
;
9310 bp
->flags
&= ~TPA_ENABLE_FLAG
;
9314 if (changed
&& netif_running(dev
)) {
9315 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
9316 rc
= bnx2x_nic_load(bp
, LOAD_NORMAL
);
9322 static u32
bnx2x_get_rx_csum(struct net_device
*dev
)
9324 struct bnx2x
*bp
= netdev_priv(dev
);
9329 static int bnx2x_set_rx_csum(struct net_device
*dev
, u32 data
)
9331 struct bnx2x
*bp
= netdev_priv(dev
);
9336 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9337 TPA'ed packets will be discarded due to wrong TCP CSUM */
9339 u32 flags
= ethtool_op_get_flags(dev
);
9341 rc
= bnx2x_set_flags(dev
, (flags
& ~ETH_FLAG_LRO
));
9347 static int bnx2x_set_tso(struct net_device
*dev
, u32 data
)
9350 dev
->features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
9351 dev
->features
|= NETIF_F_TSO6
;
9353 dev
->vlan_features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
9354 dev
->vlan_features
|= NETIF_F_TSO6
;
9357 dev
->features
&= ~(NETIF_F_TSO
| NETIF_F_TSO_ECN
);
9358 dev
->features
&= ~NETIF_F_TSO6
;
9360 dev
->vlan_features
&= ~(NETIF_F_TSO
| NETIF_F_TSO_ECN
);
9361 dev
->vlan_features
&= ~NETIF_F_TSO6
;
9368 static const struct {
9369 char string
[ETH_GSTRING_LEN
];
9370 } bnx2x_tests_str_arr
[BNX2X_NUM_TESTS
] = {
9371 { "register_test (offline)" },
9372 { "memory_test (offline)" },
9373 { "loopback_test (offline)" },
9374 { "nvram_test (online)" },
9375 { "interrupt_test (online)" },
9376 { "link_test (online)" },
9377 { "idle check (online)" }
9380 static int bnx2x_self_test_count(struct net_device
*dev
)
9382 return BNX2X_NUM_TESTS
;
9385 static int bnx2x_test_registers(struct bnx2x
*bp
)
9387 int idx
, i
, rc
= -ENODEV
;
9389 int port
= BP_PORT(bp
);
9390 static const struct {
9395 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0
, 4, 0x000003ff },
9396 { DORQ_REG_DB_ADDR0
, 4, 0xffffffff },
9397 { HC_REG_AGG_INT_0
, 4, 0x000003ff },
9398 { PBF_REG_MAC_IF0_ENABLE
, 4, 0x00000001 },
9399 { PBF_REG_P0_INIT_CRD
, 4, 0x000007ff },
9400 { PRS_REG_CID_PORT_0
, 4, 0x00ffffff },
9401 { PXP2_REG_PSWRQ_CDU0_L2P
, 4, 0x000fffff },
9402 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR
, 8, 0x0003ffff },
9403 { PXP2_REG_PSWRQ_TM0_L2P
, 4, 0x000fffff },
9404 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR
, 8, 0x0003ffff },
9405 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P
, 4, 0x000fffff },
9406 { QM_REG_CONNNUM_0
, 4, 0x000fffff },
9407 { TM_REG_LIN0_MAX_ACTIVE_CID
, 4, 0x0003ffff },
9408 { SRC_REG_KEYRSS0_0
, 40, 0xffffffff },
9409 { SRC_REG_KEYRSS0_7
, 40, 0xffffffff },
9410 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00
, 4, 0x00000001 },
9411 { XCM_REG_WU_DA_CNT_CMD00
, 4, 0x00000003 },
9412 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0
, 4, 0x000000ff },
9413 { NIG_REG_EGRESS_MNG0_FIFO
, 20, 0xffffffff },
9414 { NIG_REG_LLH0_T_BIT
, 4, 0x00000001 },
9415 /* 20 */ { NIG_REG_EMAC0_IN_EN
, 4, 0x00000001 },
9416 { NIG_REG_BMAC0_IN_EN
, 4, 0x00000001 },
9417 { NIG_REG_XCM0_OUT_EN
, 4, 0x00000001 },
9418 { NIG_REG_BRB0_OUT_EN
, 4, 0x00000001 },
9419 { NIG_REG_LLH0_XCM_MASK
, 4, 0x00000007 },
9420 { NIG_REG_LLH0_ACPI_PAT_6_LEN
, 68, 0x000000ff },
9421 { NIG_REG_LLH0_ACPI_PAT_0_CRC
, 68, 0xffffffff },
9422 { NIG_REG_LLH0_DEST_MAC_0_0
, 160, 0xffffffff },
9423 { NIG_REG_LLH0_DEST_IP_0_1
, 160, 0xffffffff },
9424 { NIG_REG_LLH0_IPV4_IPV6_0
, 160, 0x00000001 },
9425 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0
, 160, 0x0000ffff },
9426 { NIG_REG_LLH0_DEST_TCP_0
, 160, 0x0000ffff },
9427 { NIG_REG_LLH0_VLAN_ID_0
, 160, 0x00000fff },
9428 { NIG_REG_XGXS_SERDES0_MODE_SEL
, 4, 0x00000001 },
9429 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
, 4, 0x00000001 },
9430 { NIG_REG_STATUS_INTERRUPT_PORT0
, 4, 0x07ffffff },
9431 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST
, 24, 0x00000001 },
9432 { NIG_REG_SERDES0_CTRL_PHY_ADDR
, 16, 0x0000001f },
9434 { 0xffffffff, 0, 0x00000000 }
9437 if (!netif_running(bp
->dev
))
9440 /* Repeat the test twice:
9441 First by writing 0x00000000, second by writing 0xffffffff */
9442 for (idx
= 0; idx
< 2; idx
++) {
9449 wr_val
= 0xffffffff;
9453 for (i
= 0; reg_tbl
[i
].offset0
!= 0xffffffff; i
++) {
9454 u32 offset
, mask
, save_val
, val
;
9456 offset
= reg_tbl
[i
].offset0
+ port
*reg_tbl
[i
].offset1
;
9457 mask
= reg_tbl
[i
].mask
;
9459 save_val
= REG_RD(bp
, offset
);
9461 REG_WR(bp
, offset
, wr_val
);
9462 val
= REG_RD(bp
, offset
);
9464 /* Restore the original register's value */
9465 REG_WR(bp
, offset
, save_val
);
9467 /* verify that value is as expected value */
9468 if ((val
& mask
) != (wr_val
& mask
))
9479 static int bnx2x_test_memory(struct bnx2x
*bp
)
9481 int i
, j
, rc
= -ENODEV
;
9483 static const struct {
9487 { CCM_REG_XX_DESCR_TABLE
, CCM_REG_XX_DESCR_TABLE_SIZE
},
9488 { CFC_REG_ACTIVITY_COUNTER
, CFC_REG_ACTIVITY_COUNTER_SIZE
},
9489 { CFC_REG_LINK_LIST
, CFC_REG_LINK_LIST_SIZE
},
9490 { DMAE_REG_CMD_MEM
, DMAE_REG_CMD_MEM_SIZE
},
9491 { TCM_REG_XX_DESCR_TABLE
, TCM_REG_XX_DESCR_TABLE_SIZE
},
9492 { UCM_REG_XX_DESCR_TABLE
, UCM_REG_XX_DESCR_TABLE_SIZE
},
9493 { XCM_REG_XX_DESCR_TABLE
, XCM_REG_XX_DESCR_TABLE_SIZE
},
9497 static const struct {
9503 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS
, 0x3ffc0, 0 },
9504 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS
, 0x2, 0x2 },
9505 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS
, 0, 0 },
9506 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS
, 0x3ffc0, 0 },
9507 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS
, 0x3ffc0, 0 },
9508 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS
, 0x3ffc1, 0 },
9510 { NULL
, 0xffffffff, 0, 0 }
9513 if (!netif_running(bp
->dev
))
9516 /* Go through all the memories */
9517 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++)
9518 for (j
= 0; j
< mem_tbl
[i
].size
; j
++)
9519 REG_RD(bp
, mem_tbl
[i
].offset
+ j
*4);
9521 /* Check the parity status */
9522 for (i
= 0; prty_tbl
[i
].offset
!= 0xffffffff; i
++) {
9523 val
= REG_RD(bp
, prty_tbl
[i
].offset
);
9524 if ((CHIP_IS_E1(bp
) && (val
& ~(prty_tbl
[i
].e1_mask
))) ||
9525 (CHIP_IS_E1H(bp
) && (val
& ~(prty_tbl
[i
].e1h_mask
)))) {
9527 "%s is 0x%x\n", prty_tbl
[i
].name
, val
);
9538 static void bnx2x_wait_for_link(struct bnx2x
*bp
, u8 link_up
)
9543 while (bnx2x_link_test(bp
) && cnt
--)
9547 static int bnx2x_run_loopback(struct bnx2x
*bp
, int loopback_mode
, u8 link_up
)
9549 unsigned int pkt_size
, num_pkts
, i
;
9550 struct sk_buff
*skb
;
9551 unsigned char *packet
;
9552 struct bnx2x_fastpath
*fp
= &bp
->fp
[0];
9553 u16 tx_start_idx
, tx_idx
;
9554 u16 rx_start_idx
, rx_idx
;
9556 struct sw_tx_bd
*tx_buf
;
9557 struct eth_tx_bd
*tx_bd
;
9559 union eth_rx_cqe
*cqe
;
9561 struct sw_rx_bd
*rx_buf
;
9565 /* check the loopback mode */
9566 switch (loopback_mode
) {
9567 case BNX2X_PHY_LOOPBACK
:
9568 if (bp
->link_params
.loopback_mode
!= LOOPBACK_XGXS_10
)
9571 case BNX2X_MAC_LOOPBACK
:
9572 bp
->link_params
.loopback_mode
= LOOPBACK_BMAC
;
9573 bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
9579 /* prepare the loopback packet */
9580 pkt_size
= (((bp
->dev
->mtu
< ETH_MAX_PACKET_SIZE
) ?
9581 bp
->dev
->mtu
: ETH_MAX_PACKET_SIZE
) + ETH_HLEN
);
9582 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
9585 goto test_loopback_exit
;
9587 packet
= skb_put(skb
, pkt_size
);
9588 memcpy(packet
, bp
->dev
->dev_addr
, ETH_ALEN
);
9589 memset(packet
+ ETH_ALEN
, 0, (ETH_HLEN
- ETH_ALEN
));
9590 for (i
= ETH_HLEN
; i
< pkt_size
; i
++)
9591 packet
[i
] = (unsigned char) (i
& 0xff);
9593 /* send the loopback packet */
9595 tx_start_idx
= le16_to_cpu(*fp
->tx_cons_sb
);
9596 rx_start_idx
= le16_to_cpu(*fp
->rx_cons_sb
);
9598 pkt_prod
= fp
->tx_pkt_prod
++;
9599 tx_buf
= &fp
->tx_buf_ring
[TX_BD(pkt_prod
)];
9600 tx_buf
->first_bd
= fp
->tx_bd_prod
;
9603 tx_bd
= &fp
->tx_desc_ring
[TX_BD(fp
->tx_bd_prod
)];
9604 mapping
= pci_map_single(bp
->pdev
, skb
->data
,
9605 skb_headlen(skb
), PCI_DMA_TODEVICE
);
9606 tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
9607 tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
9608 tx_bd
->nbd
= cpu_to_le16(1);
9609 tx_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
9610 tx_bd
->vlan
= cpu_to_le16(pkt_prod
);
9611 tx_bd
->bd_flags
.as_bitfield
= (ETH_TX_BD_FLAGS_START_BD
|
9612 ETH_TX_BD_FLAGS_END_BD
);
9613 tx_bd
->general_data
= ((UNICAST_ADDRESS
<<
9614 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT
) | 1);
9618 le16_add_cpu(&fp
->hw_tx_prods
->bds_prod
, 1);
9619 mb(); /* FW restriction: must not reorder writing nbd and packets */
9620 le32_add_cpu(&fp
->hw_tx_prods
->packets_prod
, 1);
9621 DOORBELL(bp
, fp
->index
, 0);
9627 bp
->dev
->trans_start
= jiffies
;
9631 tx_idx
= le16_to_cpu(*fp
->tx_cons_sb
);
9632 if (tx_idx
!= tx_start_idx
+ num_pkts
)
9633 goto test_loopback_exit
;
9635 rx_idx
= le16_to_cpu(*fp
->rx_cons_sb
);
9636 if (rx_idx
!= rx_start_idx
+ num_pkts
)
9637 goto test_loopback_exit
;
9639 cqe
= &fp
->rx_comp_ring
[RCQ_BD(fp
->rx_comp_cons
)];
9640 cqe_fp_flags
= cqe
->fast_path_cqe
.type_error_flags
;
9641 if (CQE_TYPE(cqe_fp_flags
) || (cqe_fp_flags
& ETH_RX_ERROR_FALGS
))
9642 goto test_loopback_rx_exit
;
9644 len
= le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
);
9645 if (len
!= pkt_size
)
9646 goto test_loopback_rx_exit
;
9648 rx_buf
= &fp
->rx_buf_ring
[RX_BD(fp
->rx_bd_cons
)];
9650 skb_reserve(skb
, cqe
->fast_path_cqe
.placement_offset
);
9651 for (i
= ETH_HLEN
; i
< pkt_size
; i
++)
9652 if (*(skb
->data
+ i
) != (unsigned char) (i
& 0xff))
9653 goto test_loopback_rx_exit
;
9657 test_loopback_rx_exit
:
9659 fp
->rx_bd_cons
= NEXT_RX_IDX(fp
->rx_bd_cons
);
9660 fp
->rx_bd_prod
= NEXT_RX_IDX(fp
->rx_bd_prod
);
9661 fp
->rx_comp_cons
= NEXT_RCQ_IDX(fp
->rx_comp_cons
);
9662 fp
->rx_comp_prod
= NEXT_RCQ_IDX(fp
->rx_comp_prod
);
9664 /* Update producers */
9665 bnx2x_update_rx_prod(bp
, fp
, fp
->rx_bd_prod
, fp
->rx_comp_prod
,
9669 bp
->link_params
.loopback_mode
= LOOPBACK_NONE
;
9674 static int bnx2x_test_loopback(struct bnx2x
*bp
, u8 link_up
)
9678 if (!netif_running(bp
->dev
))
9679 return BNX2X_LOOPBACK_FAILED
;
9681 bnx2x_netif_stop(bp
, 1);
9682 bnx2x_acquire_phy_lock(bp
);
9684 res
= bnx2x_run_loopback(bp
, BNX2X_PHY_LOOPBACK
, link_up
);
9686 DP(NETIF_MSG_PROBE
, " PHY loopback failed (res %d)\n", res
);
9687 rc
|= BNX2X_PHY_LOOPBACK_FAILED
;
9690 res
= bnx2x_run_loopback(bp
, BNX2X_MAC_LOOPBACK
, link_up
);
9692 DP(NETIF_MSG_PROBE
, " MAC loopback failed (res %d)\n", res
);
9693 rc
|= BNX2X_MAC_LOOPBACK_FAILED
;
9696 bnx2x_release_phy_lock(bp
);
9697 bnx2x_netif_start(bp
);
9702 #define CRC32_RESIDUAL 0xdebb20e3
9704 static int bnx2x_test_nvram(struct bnx2x
*bp
)
9706 static const struct {
9710 { 0, 0x14 }, /* bootstrap */
9711 { 0x14, 0xec }, /* dir */
9712 { 0x100, 0x350 }, /* manuf_info */
9713 { 0x450, 0xf0 }, /* feature_info */
9714 { 0x640, 0x64 }, /* upgrade_key_info */
9716 { 0x708, 0x70 }, /* manuf_key_info */
9720 __be32 buf
[0x350 / 4];
9721 u8
*data
= (u8
*)buf
;
9725 rc
= bnx2x_nvram_read(bp
, 0, data
, 4);
9727 DP(NETIF_MSG_PROBE
, "magic value read (rc %d)\n", rc
);
9728 goto test_nvram_exit
;
9731 magic
= be32_to_cpu(buf
[0]);
9732 if (magic
!= 0x669955aa) {
9733 DP(NETIF_MSG_PROBE
, "magic value (0x%08x)\n", magic
);
9735 goto test_nvram_exit
;
9738 for (i
= 0; nvram_tbl
[i
].size
; i
++) {
9740 rc
= bnx2x_nvram_read(bp
, nvram_tbl
[i
].offset
, data
,
9744 "nvram_tbl[%d] read data (rc %d)\n", i
, rc
);
9745 goto test_nvram_exit
;
9748 csum
= ether_crc_le(nvram_tbl
[i
].size
, data
);
9749 if (csum
!= CRC32_RESIDUAL
) {
9751 "nvram_tbl[%d] csum value (0x%08x)\n", i
, csum
);
9753 goto test_nvram_exit
;
9761 static int bnx2x_test_intr(struct bnx2x
*bp
)
9763 struct mac_configuration_cmd
*config
= bnx2x_sp(bp
, mac_config
);
9766 if (!netif_running(bp
->dev
))
9769 config
->hdr
.length
= 0;
9771 config
->hdr
.offset
= (BP_PORT(bp
) ? 32 : 0);
9773 config
->hdr
.offset
= BP_FUNC(bp
);
9774 config
->hdr
.client_id
= bp
->fp
->cl_id
;
9775 config
->hdr
.reserved1
= 0;
9777 rc
= bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
9778 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
9779 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 0);
9781 bp
->set_mac_pending
++;
9782 for (i
= 0; i
< 10; i
++) {
9783 if (!bp
->set_mac_pending
)
9785 msleep_interruptible(10);
9794 static void bnx2x_self_test(struct net_device
*dev
,
9795 struct ethtool_test
*etest
, u64
*buf
)
9797 struct bnx2x
*bp
= netdev_priv(dev
);
9799 memset(buf
, 0, sizeof(u64
) * BNX2X_NUM_TESTS
);
9801 if (!netif_running(dev
))
9804 /* offline tests are not supported in MF mode */
9806 etest
->flags
&= ~ETH_TEST_FL_OFFLINE
;
9808 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
9809 int port
= BP_PORT(bp
);
9813 /* save current value of input enable for TX port IF */
9814 val
= REG_RD(bp
, NIG_REG_EGRESS_UMP0_IN_EN
+ port
*4);
9815 /* disable input for TX port IF */
9816 REG_WR(bp
, NIG_REG_EGRESS_UMP0_IN_EN
+ port
*4, 0);
9818 link_up
= bp
->link_vars
.link_up
;
9819 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
9820 bnx2x_nic_load(bp
, LOAD_DIAG
);
9821 /* wait until link state is restored */
9822 bnx2x_wait_for_link(bp
, link_up
);
9824 if (bnx2x_test_registers(bp
) != 0) {
9826 etest
->flags
|= ETH_TEST_FL_FAILED
;
9828 if (bnx2x_test_memory(bp
) != 0) {
9830 etest
->flags
|= ETH_TEST_FL_FAILED
;
9832 buf
[2] = bnx2x_test_loopback(bp
, link_up
);
9834 etest
->flags
|= ETH_TEST_FL_FAILED
;
9836 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
9838 /* restore input for TX port IF */
9839 REG_WR(bp
, NIG_REG_EGRESS_UMP0_IN_EN
+ port
*4, val
);
9841 bnx2x_nic_load(bp
, LOAD_NORMAL
);
9842 /* wait until link state is restored */
9843 bnx2x_wait_for_link(bp
, link_up
);
9845 if (bnx2x_test_nvram(bp
) != 0) {
9847 etest
->flags
|= ETH_TEST_FL_FAILED
;
9849 if (bnx2x_test_intr(bp
) != 0) {
9851 etest
->flags
|= ETH_TEST_FL_FAILED
;
9854 if (bnx2x_link_test(bp
) != 0) {
9856 etest
->flags
|= ETH_TEST_FL_FAILED
;
9859 #ifdef BNX2X_EXTRA_DEBUG
9860 bnx2x_panic_dump(bp
);
9864 static const struct {
9867 u8 string
[ETH_GSTRING_LEN
];
9868 } bnx2x_q_stats_arr
[BNX2X_NUM_Q_STATS
] = {
9869 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi
), 8, "[%d]: rx_bytes" },
9870 { Q_STATS_OFFSET32(error_bytes_received_hi
),
9871 8, "[%d]: rx_error_bytes" },
9872 { Q_STATS_OFFSET32(total_unicast_packets_received_hi
),
9873 8, "[%d]: rx_ucast_packets" },
9874 { Q_STATS_OFFSET32(total_multicast_packets_received_hi
),
9875 8, "[%d]: rx_mcast_packets" },
9876 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi
),
9877 8, "[%d]: rx_bcast_packets" },
9878 { Q_STATS_OFFSET32(no_buff_discard_hi
), 8, "[%d]: rx_discards" },
9879 { Q_STATS_OFFSET32(rx_err_discard_pkt
),
9880 4, "[%d]: rx_phy_ip_err_discards"},
9881 { Q_STATS_OFFSET32(rx_skb_alloc_failed
),
9882 4, "[%d]: rx_skb_alloc_discard" },
9883 { Q_STATS_OFFSET32(hw_csum_err
), 4, "[%d]: rx_csum_offload_errors" },
9885 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi
), 8, "[%d]: tx_bytes" },
9886 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi
),
9887 8, "[%d]: tx_packets" }
9890 static const struct {
9894 #define STATS_FLAGS_PORT 1
9895 #define STATS_FLAGS_FUNC 2
9896 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9897 u8 string
[ETH_GSTRING_LEN
];
9898 } bnx2x_stats_arr
[BNX2X_NUM_STATS
] = {
9899 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi
),
9900 8, STATS_FLAGS_BOTH
, "rx_bytes" },
9901 { STATS_OFFSET32(error_bytes_received_hi
),
9902 8, STATS_FLAGS_BOTH
, "rx_error_bytes" },
9903 { STATS_OFFSET32(total_unicast_packets_received_hi
),
9904 8, STATS_FLAGS_BOTH
, "rx_ucast_packets" },
9905 { STATS_OFFSET32(total_multicast_packets_received_hi
),
9906 8, STATS_FLAGS_BOTH
, "rx_mcast_packets" },
9907 { STATS_OFFSET32(total_broadcast_packets_received_hi
),
9908 8, STATS_FLAGS_BOTH
, "rx_bcast_packets" },
9909 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi
),
9910 8, STATS_FLAGS_PORT
, "rx_crc_errors" },
9911 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi
),
9912 8, STATS_FLAGS_PORT
, "rx_align_errors" },
9913 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi
),
9914 8, STATS_FLAGS_PORT
, "rx_undersize_packets" },
9915 { STATS_OFFSET32(etherstatsoverrsizepkts_hi
),
9916 8, STATS_FLAGS_PORT
, "rx_oversize_packets" },
9917 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi
),
9918 8, STATS_FLAGS_PORT
, "rx_fragments" },
9919 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi
),
9920 8, STATS_FLAGS_PORT
, "rx_jabbers" },
9921 { STATS_OFFSET32(no_buff_discard_hi
),
9922 8, STATS_FLAGS_BOTH
, "rx_discards" },
9923 { STATS_OFFSET32(mac_filter_discard
),
9924 4, STATS_FLAGS_PORT
, "rx_filtered_packets" },
9925 { STATS_OFFSET32(xxoverflow_discard
),
9926 4, STATS_FLAGS_PORT
, "rx_fw_discards" },
9927 { STATS_OFFSET32(brb_drop_hi
),
9928 8, STATS_FLAGS_PORT
, "rx_brb_discard" },
9929 { STATS_OFFSET32(brb_truncate_hi
),
9930 8, STATS_FLAGS_PORT
, "rx_brb_truncate" },
9931 { STATS_OFFSET32(pause_frames_received_hi
),
9932 8, STATS_FLAGS_PORT
, "rx_pause_frames" },
9933 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi
),
9934 8, STATS_FLAGS_PORT
, "rx_mac_ctrl_frames" },
9935 { STATS_OFFSET32(nig_timer_max
),
9936 4, STATS_FLAGS_PORT
, "rx_constant_pause_events" },
9937 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt
),
9938 4, STATS_FLAGS_BOTH
, "rx_phy_ip_err_discards"},
9939 { STATS_OFFSET32(rx_skb_alloc_failed
),
9940 4, STATS_FLAGS_BOTH
, "rx_skb_alloc_discard" },
9941 { STATS_OFFSET32(hw_csum_err
),
9942 4, STATS_FLAGS_BOTH
, "rx_csum_offload_errors" },
9944 { STATS_OFFSET32(total_bytes_transmitted_hi
),
9945 8, STATS_FLAGS_BOTH
, "tx_bytes" },
9946 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi
),
9947 8, STATS_FLAGS_PORT
, "tx_error_bytes" },
9948 { STATS_OFFSET32(total_unicast_packets_transmitted_hi
),
9949 8, STATS_FLAGS_BOTH
, "tx_packets" },
9950 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi
),
9951 8, STATS_FLAGS_PORT
, "tx_mac_errors" },
9952 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi
),
9953 8, STATS_FLAGS_PORT
, "tx_carrier_errors" },
9954 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi
),
9955 8, STATS_FLAGS_PORT
, "tx_single_collisions" },
9956 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi
),
9957 8, STATS_FLAGS_PORT
, "tx_multi_collisions" },
9958 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi
),
9959 8, STATS_FLAGS_PORT
, "tx_deferred" },
9960 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi
),
9961 8, STATS_FLAGS_PORT
, "tx_excess_collisions" },
9962 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi
),
9963 8, STATS_FLAGS_PORT
, "tx_late_collisions" },
9964 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi
),
9965 8, STATS_FLAGS_PORT
, "tx_total_collisions" },
9966 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi
),
9967 8, STATS_FLAGS_PORT
, "tx_64_byte_packets" },
9968 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi
),
9969 8, STATS_FLAGS_PORT
, "tx_65_to_127_byte_packets" },
9970 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi
),
9971 8, STATS_FLAGS_PORT
, "tx_128_to_255_byte_packets" },
9972 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi
),
9973 8, STATS_FLAGS_PORT
, "tx_256_to_511_byte_packets" },
9974 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi
),
9975 8, STATS_FLAGS_PORT
, "tx_512_to_1023_byte_packets" },
9976 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi
),
9977 8, STATS_FLAGS_PORT
, "tx_1024_to_1522_byte_packets" },
9978 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi
),
9979 8, STATS_FLAGS_PORT
, "tx_1523_to_9022_byte_packets" },
9980 { STATS_OFFSET32(pause_frames_sent_hi
),
9981 8, STATS_FLAGS_PORT
, "tx_pause_frames" }
9984 #define IS_PORT_STAT(i) \
9985 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9986 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9987 #define IS_E1HMF_MODE_STAT(bp) \
9988 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
9990 static void bnx2x_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
9992 struct bnx2x
*bp
= netdev_priv(dev
);
9995 switch (stringset
) {
9999 for_each_queue(bp
, i
) {
10000 for (j
= 0; j
< BNX2X_NUM_Q_STATS
; j
++)
10001 sprintf(buf
+ (k
+ j
)*ETH_GSTRING_LEN
,
10002 bnx2x_q_stats_arr
[j
].string
, i
);
10003 k
+= BNX2X_NUM_Q_STATS
;
10005 if (IS_E1HMF_MODE_STAT(bp
))
10007 for (j
= 0; j
< BNX2X_NUM_STATS
; j
++)
10008 strcpy(buf
+ (k
+ j
)*ETH_GSTRING_LEN
,
10009 bnx2x_stats_arr
[j
].string
);
10011 for (i
= 0, j
= 0; i
< BNX2X_NUM_STATS
; i
++) {
10012 if (IS_E1HMF_MODE_STAT(bp
) && IS_PORT_STAT(i
))
10014 strcpy(buf
+ j
*ETH_GSTRING_LEN
,
10015 bnx2x_stats_arr
[i
].string
);
10022 memcpy(buf
, bnx2x_tests_str_arr
, sizeof(bnx2x_tests_str_arr
));
10027 static int bnx2x_get_stats_count(struct net_device
*dev
)
10029 struct bnx2x
*bp
= netdev_priv(dev
);
10032 if (is_multi(bp
)) {
10033 num_stats
= BNX2X_NUM_Q_STATS
* BNX2X_NUM_QUEUES(bp
);
10034 if (!IS_E1HMF_MODE_STAT(bp
))
10035 num_stats
+= BNX2X_NUM_STATS
;
10037 if (IS_E1HMF_MODE_STAT(bp
)) {
10039 for (i
= 0; i
< BNX2X_NUM_STATS
; i
++)
10040 if (IS_FUNC_STAT(i
))
10043 num_stats
= BNX2X_NUM_STATS
;
10049 static void bnx2x_get_ethtool_stats(struct net_device
*dev
,
10050 struct ethtool_stats
*stats
, u64
*buf
)
10052 struct bnx2x
*bp
= netdev_priv(dev
);
10053 u32
*hw_stats
, *offset
;
10056 if (is_multi(bp
)) {
10058 for_each_queue(bp
, i
) {
10059 hw_stats
= (u32
*)&bp
->fp
[i
].eth_q_stats
;
10060 for (j
= 0; j
< BNX2X_NUM_Q_STATS
; j
++) {
10061 if (bnx2x_q_stats_arr
[j
].size
== 0) {
10062 /* skip this counter */
10066 offset
= (hw_stats
+
10067 bnx2x_q_stats_arr
[j
].offset
);
10068 if (bnx2x_q_stats_arr
[j
].size
== 4) {
10069 /* 4-byte counter */
10070 buf
[k
+ j
] = (u64
) *offset
;
10073 /* 8-byte counter */
10074 buf
[k
+ j
] = HILO_U64(*offset
, *(offset
+ 1));
10076 k
+= BNX2X_NUM_Q_STATS
;
10078 if (IS_E1HMF_MODE_STAT(bp
))
10080 hw_stats
= (u32
*)&bp
->eth_stats
;
10081 for (j
= 0; j
< BNX2X_NUM_STATS
; j
++) {
10082 if (bnx2x_stats_arr
[j
].size
== 0) {
10083 /* skip this counter */
10087 offset
= (hw_stats
+ bnx2x_stats_arr
[j
].offset
);
10088 if (bnx2x_stats_arr
[j
].size
== 4) {
10089 /* 4-byte counter */
10090 buf
[k
+ j
] = (u64
) *offset
;
10093 /* 8-byte counter */
10094 buf
[k
+ j
] = HILO_U64(*offset
, *(offset
+ 1));
10097 hw_stats
= (u32
*)&bp
->eth_stats
;
10098 for (i
= 0, j
= 0; i
< BNX2X_NUM_STATS
; i
++) {
10099 if (IS_E1HMF_MODE_STAT(bp
) && IS_PORT_STAT(i
))
10101 if (bnx2x_stats_arr
[i
].size
== 0) {
10102 /* skip this counter */
10107 offset
= (hw_stats
+ bnx2x_stats_arr
[i
].offset
);
10108 if (bnx2x_stats_arr
[i
].size
== 4) {
10109 /* 4-byte counter */
10110 buf
[j
] = (u64
) *offset
;
10114 /* 8-byte counter */
10115 buf
[j
] = HILO_U64(*offset
, *(offset
+ 1));
10121 static int bnx2x_phys_id(struct net_device
*dev
, u32 data
)
10123 struct bnx2x
*bp
= netdev_priv(dev
);
10124 int port
= BP_PORT(bp
);
10127 if (!netif_running(dev
))
10136 for (i
= 0; i
< (data
* 2); i
++) {
10138 bnx2x_set_led(bp
, port
, LED_MODE_OPER
, SPEED_1000
,
10139 bp
->link_params
.hw_led_mode
,
10140 bp
->link_params
.chip_id
);
10142 bnx2x_set_led(bp
, port
, LED_MODE_OFF
, 0,
10143 bp
->link_params
.hw_led_mode
,
10144 bp
->link_params
.chip_id
);
10146 msleep_interruptible(500);
10147 if (signal_pending(current
))
10151 if (bp
->link_vars
.link_up
)
10152 bnx2x_set_led(bp
, port
, LED_MODE_OPER
,
10153 bp
->link_vars
.line_speed
,
10154 bp
->link_params
.hw_led_mode
,
10155 bp
->link_params
.chip_id
);
10160 static struct ethtool_ops bnx2x_ethtool_ops
= {
10161 .get_settings
= bnx2x_get_settings
,
10162 .set_settings
= bnx2x_set_settings
,
10163 .get_drvinfo
= bnx2x_get_drvinfo
,
10164 .get_regs_len
= bnx2x_get_regs_len
,
10165 .get_regs
= bnx2x_get_regs
,
10166 .get_wol
= bnx2x_get_wol
,
10167 .set_wol
= bnx2x_set_wol
,
10168 .get_msglevel
= bnx2x_get_msglevel
,
10169 .set_msglevel
= bnx2x_set_msglevel
,
10170 .nway_reset
= bnx2x_nway_reset
,
10171 .get_link
= bnx2x_get_link
,
10172 .get_eeprom_len
= bnx2x_get_eeprom_len
,
10173 .get_eeprom
= bnx2x_get_eeprom
,
10174 .set_eeprom
= bnx2x_set_eeprom
,
10175 .get_coalesce
= bnx2x_get_coalesce
,
10176 .set_coalesce
= bnx2x_set_coalesce
,
10177 .get_ringparam
= bnx2x_get_ringparam
,
10178 .set_ringparam
= bnx2x_set_ringparam
,
10179 .get_pauseparam
= bnx2x_get_pauseparam
,
10180 .set_pauseparam
= bnx2x_set_pauseparam
,
10181 .get_rx_csum
= bnx2x_get_rx_csum
,
10182 .set_rx_csum
= bnx2x_set_rx_csum
,
10183 .get_tx_csum
= ethtool_op_get_tx_csum
,
10184 .set_tx_csum
= ethtool_op_set_tx_hw_csum
,
10185 .set_flags
= bnx2x_set_flags
,
10186 .get_flags
= ethtool_op_get_flags
,
10187 .get_sg
= ethtool_op_get_sg
,
10188 .set_sg
= ethtool_op_set_sg
,
10189 .get_tso
= ethtool_op_get_tso
,
10190 .set_tso
= bnx2x_set_tso
,
10191 .self_test_count
= bnx2x_self_test_count
,
10192 .self_test
= bnx2x_self_test
,
10193 .get_strings
= bnx2x_get_strings
,
10194 .phys_id
= bnx2x_phys_id
,
10195 .get_stats_count
= bnx2x_get_stats_count
,
10196 .get_ethtool_stats
= bnx2x_get_ethtool_stats
,
10199 /* end of ethtool_ops */
10201 /****************************************************************************
10202 * General service functions
10203 ****************************************************************************/
10205 static int bnx2x_set_power_state(struct bnx2x
*bp
, pci_power_t state
)
10209 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
10213 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
10214 ((pmcsr
& ~PCI_PM_CTRL_STATE_MASK
) |
10215 PCI_PM_CTRL_PME_STATUS
));
10217 if (pmcsr
& PCI_PM_CTRL_STATE_MASK
)
10218 /* delay required during transition out of D3hot */
10223 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
10227 pmcsr
|= PCI_PM_CTRL_PME_ENABLE
;
10229 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
10232 /* No more memory access after this point until
10233 * device is brought back to D0.
10243 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath
*fp
)
10247 /* Tell compiler that status block fields can change */
10249 rx_cons_sb
= le16_to_cpu(*fp
->rx_cons_sb
);
10250 if ((rx_cons_sb
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
10252 return (fp
->rx_comp_cons
!= rx_cons_sb
);
10256 * net_device service functions
10259 static int bnx2x_poll(struct napi_struct
*napi
, int budget
)
10261 struct bnx2x_fastpath
*fp
= container_of(napi
, struct bnx2x_fastpath
,
10263 struct bnx2x
*bp
= fp
->bp
;
10266 #ifdef BNX2X_STOP_ON_ERROR
10267 if (unlikely(bp
->panic
))
10271 prefetch(fp
->tx_buf_ring
[TX_BD(fp
->tx_pkt_cons
)].skb
);
10272 prefetch(fp
->rx_buf_ring
[RX_BD(fp
->rx_bd_cons
)].skb
);
10273 prefetch((char *)(fp
->rx_buf_ring
[RX_BD(fp
->rx_bd_cons
)].skb
) + 256);
10275 bnx2x_update_fpsb_idx(fp
);
10277 if (bnx2x_has_tx_work(fp
))
10280 if (bnx2x_has_rx_work(fp
)) {
10281 work_done
= bnx2x_rx_int(fp
, budget
);
10283 /* must not complete if we consumed full budget */
10284 if (work_done
>= budget
)
10288 /* BNX2X_HAS_WORK() reads the status block, thus we need to
10289 * ensure that status block indices have been actually read
10290 * (bnx2x_update_fpsb_idx) prior to this check (BNX2X_HAS_WORK)
10291 * so that we won't write the "newer" value of the status block to IGU
10292 * (if there was a DMA right after BNX2X_HAS_WORK and
10293 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10294 * may be postponed to right before bnx2x_ack_sb). In this case
10295 * there will never be another interrupt until there is another update
10296 * of the status block, while there is still unhandled work.
10300 if (!BNX2X_HAS_WORK(fp
)) {
10301 #ifdef BNX2X_STOP_ON_ERROR
10304 napi_complete(napi
);
10306 bnx2x_ack_sb(bp
, fp
->sb_id
, USTORM_ID
,
10307 le16_to_cpu(fp
->fp_u_idx
), IGU_INT_NOP
, 1);
10308 bnx2x_ack_sb(bp
, fp
->sb_id
, CSTORM_ID
,
10309 le16_to_cpu(fp
->fp_c_idx
), IGU_INT_ENABLE
, 1);
10317 /* we split the first BD into headers and data BDs
10318 * to ease the pain of our fellow microcode engineers
10319 * we use one mapping for both BDs
10320 * So far this has only been observed to happen
10321 * in Other Operating Systems(TM)
10323 static noinline u16
bnx2x_tx_split(struct bnx2x
*bp
,
10324 struct bnx2x_fastpath
*fp
,
10325 struct eth_tx_bd
**tx_bd
, u16 hlen
,
10326 u16 bd_prod
, int nbd
)
10328 struct eth_tx_bd
*h_tx_bd
= *tx_bd
;
10329 struct eth_tx_bd
*d_tx_bd
;
10330 dma_addr_t mapping
;
10331 int old_len
= le16_to_cpu(h_tx_bd
->nbytes
);
10333 /* first fix first BD */
10334 h_tx_bd
->nbd
= cpu_to_le16(nbd
);
10335 h_tx_bd
->nbytes
= cpu_to_le16(hlen
);
10337 DP(NETIF_MSG_TX_QUEUED
, "TSO split header size is %d "
10338 "(%x:%x) nbd %d\n", h_tx_bd
->nbytes
, h_tx_bd
->addr_hi
,
10339 h_tx_bd
->addr_lo
, h_tx_bd
->nbd
);
10341 /* now get a new data BD
10342 * (after the pbd) and fill it */
10343 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
10344 d_tx_bd
= &fp
->tx_desc_ring
[bd_prod
];
10346 mapping
= HILO_U64(le32_to_cpu(h_tx_bd
->addr_hi
),
10347 le32_to_cpu(h_tx_bd
->addr_lo
)) + hlen
;
10349 d_tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
10350 d_tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
10351 d_tx_bd
->nbytes
= cpu_to_le16(old_len
- hlen
);
10353 /* this marks the BD as one that has no individual mapping
10354 * the FW ignores this flag in a BD not marked start
10356 d_tx_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_SW_LSO
;
10357 DP(NETIF_MSG_TX_QUEUED
,
10358 "TSO split data size is %d (%x:%x)\n",
10359 d_tx_bd
->nbytes
, d_tx_bd
->addr_hi
, d_tx_bd
->addr_lo
);
10361 /* update tx_bd for marking the last BD flag */
10367 static inline u16
bnx2x_csum_fix(unsigned char *t_header
, u16 csum
, s8 fix
)
10370 csum
= (u16
) ~csum_fold(csum_sub(csum
,
10371 csum_partial(t_header
- fix
, fix
, 0)));
10374 csum
= (u16
) ~csum_fold(csum_add(csum
,
10375 csum_partial(t_header
, -fix
, 0)));
10377 return swab16(csum
);
10380 static inline u32
bnx2x_xmit_type(struct bnx2x
*bp
, struct sk_buff
*skb
)
10384 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
10388 if (skb
->protocol
== htons(ETH_P_IPV6
)) {
10390 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
10391 rc
|= XMIT_CSUM_TCP
;
10395 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
10396 rc
|= XMIT_CSUM_TCP
;
10400 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
)
10403 else if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
)
10409 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10410 /* check if packet requires linearization (packet is too fragmented)
10411 no need to check fragmentation if page size > 8K (there will be no
10412 violation to FW restrictions) */
10413 static int bnx2x_pkt_req_lin(struct bnx2x
*bp
, struct sk_buff
*skb
,
10418 int first_bd_sz
= 0;
10420 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10421 if (skb_shinfo(skb
)->nr_frags
>= (MAX_FETCH_BD
- 3)) {
10423 if (xmit_type
& XMIT_GSO
) {
10424 unsigned short lso_mss
= skb_shinfo(skb
)->gso_size
;
10425 /* Check if LSO packet needs to be copied:
10426 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10427 int wnd_size
= MAX_FETCH_BD
- 3;
10428 /* Number of windows to check */
10429 int num_wnds
= skb_shinfo(skb
)->nr_frags
- wnd_size
;
10434 /* Headers length */
10435 hlen
= (int)(skb_transport_header(skb
) - skb
->data
) +
10438 /* Amount of data (w/o headers) on linear part of SKB*/
10439 first_bd_sz
= skb_headlen(skb
) - hlen
;
10441 wnd_sum
= first_bd_sz
;
10443 /* Calculate the first sum - it's special */
10444 for (frag_idx
= 0; frag_idx
< wnd_size
- 1; frag_idx
++)
10446 skb_shinfo(skb
)->frags
[frag_idx
].size
;
10448 /* If there was data on linear skb data - check it */
10449 if (first_bd_sz
> 0) {
10450 if (unlikely(wnd_sum
< lso_mss
)) {
10455 wnd_sum
-= first_bd_sz
;
10458 /* Others are easier: run through the frag list and
10459 check all windows */
10460 for (wnd_idx
= 0; wnd_idx
<= num_wnds
; wnd_idx
++) {
10462 skb_shinfo(skb
)->frags
[wnd_idx
+ wnd_size
- 1].size
;
10464 if (unlikely(wnd_sum
< lso_mss
)) {
10469 skb_shinfo(skb
)->frags
[wnd_idx
].size
;
10472 /* in non-LSO too fragmented packet should always
10479 if (unlikely(to_copy
))
10480 DP(NETIF_MSG_TX_QUEUED
,
10481 "Linearization IS REQUIRED for %s packet. "
10482 "num_frags %d hlen %d first_bd_sz %d\n",
10483 (xmit_type
& XMIT_GSO
) ? "LSO" : "non-LSO",
10484 skb_shinfo(skb
)->nr_frags
, hlen
, first_bd_sz
);
10490 /* called with netif_tx_lock
10491 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10492 * netif_wake_queue()
10494 static int bnx2x_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
10496 struct bnx2x
*bp
= netdev_priv(dev
);
10497 struct bnx2x_fastpath
*fp
;
10498 struct netdev_queue
*txq
;
10499 struct sw_tx_bd
*tx_buf
;
10500 struct eth_tx_bd
*tx_bd
;
10501 struct eth_tx_parse_bd
*pbd
= NULL
;
10502 u16 pkt_prod
, bd_prod
;
10504 dma_addr_t mapping
;
10505 u32 xmit_type
= bnx2x_xmit_type(bp
, skb
);
10506 int vlan_off
= (bp
->e1hov
? 4 : 0);
10510 #ifdef BNX2X_STOP_ON_ERROR
10511 if (unlikely(bp
->panic
))
10512 return NETDEV_TX_BUSY
;
10515 fp_index
= skb_get_queue_mapping(skb
);
10516 txq
= netdev_get_tx_queue(dev
, fp_index
);
10518 fp
= &bp
->fp
[fp_index
];
10520 if (unlikely(bnx2x_tx_avail(fp
) < (skb_shinfo(skb
)->nr_frags
+ 3))) {
10521 fp
->eth_q_stats
.driver_xoff
++,
10522 netif_tx_stop_queue(txq
);
10523 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10524 return NETDEV_TX_BUSY
;
10527 DP(NETIF_MSG_TX_QUEUED
, "SKB: summed %x protocol %x protocol(%x,%x)"
10528 " gso type %x xmit_type %x\n",
10529 skb
->ip_summed
, skb
->protocol
, ipv6_hdr(skb
)->nexthdr
,
10530 ip_hdr(skb
)->protocol
, skb_shinfo(skb
)->gso_type
, xmit_type
);
10532 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10533 /* First, check if we need to linearize the skb (due to FW
10534 restrictions). No need to check fragmentation if page size > 8K
10535 (there will be no violation to FW restrictions) */
10536 if (bnx2x_pkt_req_lin(bp
, skb
, xmit_type
)) {
10537 /* Statistics of linearization */
10539 if (skb_linearize(skb
) != 0) {
10540 DP(NETIF_MSG_TX_QUEUED
, "SKB linearization failed - "
10541 "silently dropping this SKB\n");
10542 dev_kfree_skb_any(skb
);
10543 return NETDEV_TX_OK
;
10549 Please read carefully. First we use one BD which we mark as start,
10550 then for TSO or xsum we have a parsing info BD,
10551 and only then we have the rest of the TSO BDs.
10552 (don't forget to mark the last one as last,
10553 and to unmap only AFTER you write to the BD ...)
10554 And above all, all pdb sizes are in words - NOT DWORDS!
10557 pkt_prod
= fp
->tx_pkt_prod
++;
10558 bd_prod
= TX_BD(fp
->tx_bd_prod
);
10560 /* get a tx_buf and first BD */
10561 tx_buf
= &fp
->tx_buf_ring
[TX_BD(pkt_prod
)];
10562 tx_bd
= &fp
->tx_desc_ring
[bd_prod
];
10564 tx_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
10565 tx_bd
->general_data
= (UNICAST_ADDRESS
<<
10566 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT
);
10568 tx_bd
->general_data
|= (1 << ETH_TX_BD_HDR_NBDS_SHIFT
);
10570 /* remember the first BD of the packet */
10571 tx_buf
->first_bd
= fp
->tx_bd_prod
;
10574 DP(NETIF_MSG_TX_QUEUED
,
10575 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10576 pkt_prod
, tx_buf
, fp
->tx_pkt_prod
, bd_prod
, tx_bd
);
10579 if ((bp
->vlgrp
!= NULL
) && vlan_tx_tag_present(skb
) &&
10580 (bp
->flags
& HW_VLAN_TX_FLAG
)) {
10581 tx_bd
->vlan
= cpu_to_le16(vlan_tx_tag_get(skb
));
10582 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_VLAN_TAG
;
10586 tx_bd
->vlan
= cpu_to_le16(pkt_prod
);
10589 /* turn on parsing and get a BD */
10590 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
10591 pbd
= (void *)&fp
->tx_desc_ring
[bd_prod
];
10593 memset(pbd
, 0, sizeof(struct eth_tx_parse_bd
));
10596 if (xmit_type
& XMIT_CSUM
) {
10597 hlen
= (skb_network_header(skb
) - skb
->data
+ vlan_off
) / 2;
10599 /* for now NS flag is not used in Linux */
10601 (hlen
| ((skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) <<
10602 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT
));
10604 pbd
->ip_hlen
= (skb_transport_header(skb
) -
10605 skb_network_header(skb
)) / 2;
10607 hlen
+= pbd
->ip_hlen
+ tcp_hdrlen(skb
) / 2;
10609 pbd
->total_hlen
= cpu_to_le16(hlen
);
10610 hlen
= hlen
*2 - vlan_off
;
10612 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_TCP_CSUM
;
10614 if (xmit_type
& XMIT_CSUM_V4
)
10615 tx_bd
->bd_flags
.as_bitfield
|=
10616 ETH_TX_BD_FLAGS_IP_CSUM
;
10618 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_IPV6
;
10620 if (xmit_type
& XMIT_CSUM_TCP
) {
10621 pbd
->tcp_pseudo_csum
= swab16(tcp_hdr(skb
)->check
);
10624 s8 fix
= SKB_CS_OFF(skb
); /* signed! */
10626 pbd
->global_data
|= ETH_TX_PARSE_BD_CS_ANY_FLG
;
10627 pbd
->cs_offset
= fix
/ 2;
10629 DP(NETIF_MSG_TX_QUEUED
,
10630 "hlen %d offset %d fix %d csum before fix %x\n",
10631 le16_to_cpu(pbd
->total_hlen
), pbd
->cs_offset
, fix
,
10634 /* HW bug: fixup the CSUM */
10635 pbd
->tcp_pseudo_csum
=
10636 bnx2x_csum_fix(skb_transport_header(skb
),
10639 DP(NETIF_MSG_TX_QUEUED
, "csum after fix %x\n",
10640 pbd
->tcp_pseudo_csum
);
10644 mapping
= pci_map_single(bp
->pdev
, skb
->data
,
10645 skb_headlen(skb
), PCI_DMA_TODEVICE
);
10647 tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
10648 tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
10649 nbd
= skb_shinfo(skb
)->nr_frags
+ ((pbd
== NULL
) ? 1 : 2);
10650 tx_bd
->nbd
= cpu_to_le16(nbd
);
10651 tx_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
10653 DP(NETIF_MSG_TX_QUEUED
, "first bd @%p addr (%x:%x) nbd %d"
10654 " nbytes %d flags %x vlan %x\n",
10655 tx_bd
, tx_bd
->addr_hi
, tx_bd
->addr_lo
, le16_to_cpu(tx_bd
->nbd
),
10656 le16_to_cpu(tx_bd
->nbytes
), tx_bd
->bd_flags
.as_bitfield
,
10657 le16_to_cpu(tx_bd
->vlan
));
10659 if (xmit_type
& XMIT_GSO
) {
10661 DP(NETIF_MSG_TX_QUEUED
,
10662 "TSO packet len %d hlen %d total len %d tso size %d\n",
10663 skb
->len
, hlen
, skb_headlen(skb
),
10664 skb_shinfo(skb
)->gso_size
);
10666 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_SW_LSO
;
10668 if (unlikely(skb_headlen(skb
) > hlen
))
10669 bd_prod
= bnx2x_tx_split(bp
, fp
, &tx_bd
, hlen
,
10672 pbd
->lso_mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
10673 pbd
->tcp_send_seq
= swab32(tcp_hdr(skb
)->seq
);
10674 pbd
->tcp_flags
= pbd_tcp_flags(skb
);
10676 if (xmit_type
& XMIT_GSO_V4
) {
10677 pbd
->ip_id
= swab16(ip_hdr(skb
)->id
);
10678 pbd
->tcp_pseudo_csum
=
10679 swab16(~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
10680 ip_hdr(skb
)->daddr
,
10681 0, IPPROTO_TCP
, 0));
10684 pbd
->tcp_pseudo_csum
=
10685 swab16(~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
10686 &ipv6_hdr(skb
)->daddr
,
10687 0, IPPROTO_TCP
, 0));
10689 pbd
->global_data
|= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN
;
10692 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
10693 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
10695 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
10696 tx_bd
= &fp
->tx_desc_ring
[bd_prod
];
10698 mapping
= pci_map_page(bp
->pdev
, frag
->page
, frag
->page_offset
,
10699 frag
->size
, PCI_DMA_TODEVICE
);
10701 tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
10702 tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
10703 tx_bd
->nbytes
= cpu_to_le16(frag
->size
);
10704 tx_bd
->vlan
= cpu_to_le16(pkt_prod
);
10705 tx_bd
->bd_flags
.as_bitfield
= 0;
10707 DP(NETIF_MSG_TX_QUEUED
,
10708 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10709 i
, tx_bd
, tx_bd
->addr_hi
, tx_bd
->addr_lo
,
10710 le16_to_cpu(tx_bd
->nbytes
), tx_bd
->bd_flags
.as_bitfield
);
10713 /* now at last mark the BD as the last BD */
10714 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_END_BD
;
10716 DP(NETIF_MSG_TX_QUEUED
, "last bd @%p flags %x\n",
10717 tx_bd
, tx_bd
->bd_flags
.as_bitfield
);
10719 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
10721 /* now send a tx doorbell, counting the next BD
10722 * if the packet contains or ends with it
10724 if (TX_BD_POFF(bd_prod
) < nbd
)
10728 DP(NETIF_MSG_TX_QUEUED
,
10729 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10730 " tcp_flags %x xsum %x seq %u hlen %u\n",
10731 pbd
, pbd
->global_data
, pbd
->ip_hlen
, pbd
->ip_id
,
10732 pbd
->lso_mss
, pbd
->tcp_flags
, pbd
->tcp_pseudo_csum
,
10733 pbd
->tcp_send_seq
, le16_to_cpu(pbd
->total_hlen
));
10735 DP(NETIF_MSG_TX_QUEUED
, "doorbell: nbd %d bd %u\n", nbd
, bd_prod
);
10738 * Make sure that the BD data is updated before updating the producer
10739 * since FW might read the BD right after the producer is updated.
10740 * This is only applicable for weak-ordered memory model archs such
10741 * as IA-64. The following barrier is also mandatory since FW will
10742 * assumes packets must have BDs.
10746 le16_add_cpu(&fp
->hw_tx_prods
->bds_prod
, nbd
);
10747 mb(); /* FW restriction: must not reorder writing nbd and packets */
10748 le32_add_cpu(&fp
->hw_tx_prods
->packets_prod
, 1);
10749 DOORBELL(bp
, fp
->index
, 0);
10753 fp
->tx_bd_prod
+= nbd
;
10755 if (unlikely(bnx2x_tx_avail(fp
) < MAX_SKB_FRAGS
+ 3)) {
10756 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10757 if we put Tx into XOFF state. */
10759 netif_tx_stop_queue(txq
);
10760 fp
->eth_q_stats
.driver_xoff
++;
10761 if (bnx2x_tx_avail(fp
) >= MAX_SKB_FRAGS
+ 3)
10762 netif_tx_wake_queue(txq
);
10766 return NETDEV_TX_OK
;
10769 /* called with rtnl_lock */
10770 static int bnx2x_open(struct net_device
*dev
)
10772 struct bnx2x
*bp
= netdev_priv(dev
);
10774 netif_carrier_off(dev
);
10776 bnx2x_set_power_state(bp
, PCI_D0
);
10778 return bnx2x_nic_load(bp
, LOAD_OPEN
);
10781 /* called with rtnl_lock */
10782 static int bnx2x_close(struct net_device
*dev
)
10784 struct bnx2x
*bp
= netdev_priv(dev
);
10786 /* Unload the driver, release IRQs */
10787 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
10788 if (atomic_read(&bp
->pdev
->enable_cnt
) == 1)
10789 if (!CHIP_REV_IS_SLOW(bp
))
10790 bnx2x_set_power_state(bp
, PCI_D3hot
);
10795 /* called with netif_tx_lock from dev_mcast.c */
10796 static void bnx2x_set_rx_mode(struct net_device
*dev
)
10798 struct bnx2x
*bp
= netdev_priv(dev
);
10799 u32 rx_mode
= BNX2X_RX_MODE_NORMAL
;
10800 int port
= BP_PORT(bp
);
10802 if (bp
->state
!= BNX2X_STATE_OPEN
) {
10803 DP(NETIF_MSG_IFUP
, "state is %x, returning\n", bp
->state
);
10807 DP(NETIF_MSG_IFUP
, "dev->flags = %x\n", dev
->flags
);
10809 if (dev
->flags
& IFF_PROMISC
)
10810 rx_mode
= BNX2X_RX_MODE_PROMISC
;
10812 else if ((dev
->flags
& IFF_ALLMULTI
) ||
10813 ((dev
->mc_count
> BNX2X_MAX_MULTICAST
) && CHIP_IS_E1(bp
)))
10814 rx_mode
= BNX2X_RX_MODE_ALLMULTI
;
10816 else { /* some multicasts */
10817 if (CHIP_IS_E1(bp
)) {
10818 int i
, old
, offset
;
10819 struct dev_mc_list
*mclist
;
10820 struct mac_configuration_cmd
*config
=
10821 bnx2x_sp(bp
, mcast_config
);
10823 for (i
= 0, mclist
= dev
->mc_list
;
10824 mclist
&& (i
< dev
->mc_count
);
10825 i
++, mclist
= mclist
->next
) {
10827 config
->config_table
[i
].
10828 cam_entry
.msb_mac_addr
=
10829 swab16(*(u16
*)&mclist
->dmi_addr
[0]);
10830 config
->config_table
[i
].
10831 cam_entry
.middle_mac_addr
=
10832 swab16(*(u16
*)&mclist
->dmi_addr
[2]);
10833 config
->config_table
[i
].
10834 cam_entry
.lsb_mac_addr
=
10835 swab16(*(u16
*)&mclist
->dmi_addr
[4]);
10836 config
->config_table
[i
].cam_entry
.flags
=
10838 config
->config_table
[i
].
10839 target_table_entry
.flags
= 0;
10840 config
->config_table
[i
].
10841 target_table_entry
.client_id
= 0;
10842 config
->config_table
[i
].
10843 target_table_entry
.vlan_id
= 0;
10846 "setting MCAST[%d] (%04x:%04x:%04x)\n", i
,
10847 config
->config_table
[i
].
10848 cam_entry
.msb_mac_addr
,
10849 config
->config_table
[i
].
10850 cam_entry
.middle_mac_addr
,
10851 config
->config_table
[i
].
10852 cam_entry
.lsb_mac_addr
);
10854 old
= config
->hdr
.length
;
10856 for (; i
< old
; i
++) {
10857 if (CAM_IS_INVALID(config
->
10858 config_table
[i
])) {
10859 /* already invalidated */
10863 CAM_INVALIDATE(config
->
10868 if (CHIP_REV_IS_SLOW(bp
))
10869 offset
= BNX2X_MAX_EMUL_MULTI
*(1 + port
);
10871 offset
= BNX2X_MAX_MULTICAST
*(1 + port
);
10873 config
->hdr
.length
= i
;
10874 config
->hdr
.offset
= offset
;
10875 config
->hdr
.client_id
= bp
->fp
->cl_id
;
10876 config
->hdr
.reserved1
= 0;
10878 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
10879 U64_HI(bnx2x_sp_mapping(bp
, mcast_config
)),
10880 U64_LO(bnx2x_sp_mapping(bp
, mcast_config
)),
10883 /* Accept one or more multicasts */
10884 struct dev_mc_list
*mclist
;
10885 u32 mc_filter
[MC_HASH_SIZE
];
10886 u32 crc
, bit
, regidx
;
10889 memset(mc_filter
, 0, 4 * MC_HASH_SIZE
);
10891 for (i
= 0, mclist
= dev
->mc_list
;
10892 mclist
&& (i
< dev
->mc_count
);
10893 i
++, mclist
= mclist
->next
) {
10895 DP(NETIF_MSG_IFUP
, "Adding mcast MAC: %pM\n",
10898 crc
= crc32c_le(0, mclist
->dmi_addr
, ETH_ALEN
);
10899 bit
= (crc
>> 24) & 0xff;
10902 mc_filter
[regidx
] |= (1 << bit
);
10905 for (i
= 0; i
< MC_HASH_SIZE
; i
++)
10906 REG_WR(bp
, MC_HASH_OFFSET(bp
, i
),
10911 bp
->rx_mode
= rx_mode
;
10912 bnx2x_set_storm_rx_mode(bp
);
10915 /* called with rtnl_lock */
10916 static int bnx2x_change_mac_addr(struct net_device
*dev
, void *p
)
10918 struct sockaddr
*addr
= p
;
10919 struct bnx2x
*bp
= netdev_priv(dev
);
10921 if (!is_valid_ether_addr((u8
*)(addr
->sa_data
)))
10924 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
10925 if (netif_running(dev
)) {
10926 if (CHIP_IS_E1(bp
))
10927 bnx2x_set_mac_addr_e1(bp
, 1);
10929 bnx2x_set_mac_addr_e1h(bp
, 1);
10935 /* called with rtnl_lock */
10936 static int bnx2x_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
10938 struct mii_ioctl_data
*data
= if_mii(ifr
);
10939 struct bnx2x
*bp
= netdev_priv(dev
);
10940 int port
= BP_PORT(bp
);
10945 data
->phy_id
= bp
->port
.phy_addr
;
10949 case SIOCGMIIREG
: {
10952 if (!netif_running(dev
))
10955 mutex_lock(&bp
->port
.phy_mutex
);
10956 err
= bnx2x_cl45_read(bp
, port
, 0, bp
->port
.phy_addr
,
10957 DEFAULT_PHY_DEV_ADDR
,
10958 (data
->reg_num
& 0x1f), &mii_regval
);
10959 data
->val_out
= mii_regval
;
10960 mutex_unlock(&bp
->port
.phy_mutex
);
10965 if (!capable(CAP_NET_ADMIN
))
10968 if (!netif_running(dev
))
10971 mutex_lock(&bp
->port
.phy_mutex
);
10972 err
= bnx2x_cl45_write(bp
, port
, 0, bp
->port
.phy_addr
,
10973 DEFAULT_PHY_DEV_ADDR
,
10974 (data
->reg_num
& 0x1f), data
->val_in
);
10975 mutex_unlock(&bp
->port
.phy_mutex
);
10983 return -EOPNOTSUPP
;
10986 /* called with rtnl_lock */
10987 static int bnx2x_change_mtu(struct net_device
*dev
, int new_mtu
)
10989 struct bnx2x
*bp
= netdev_priv(dev
);
10992 if ((new_mtu
> ETH_MAX_JUMBO_PACKET_SIZE
) ||
10993 ((new_mtu
+ ETH_HLEN
) < ETH_MIN_PACKET_SIZE
))
10996 /* This does not race with packet allocation
10997 * because the actual alloc size is
10998 * only updated as part of load
11000 dev
->mtu
= new_mtu
;
11002 if (netif_running(dev
)) {
11003 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
11004 rc
= bnx2x_nic_load(bp
, LOAD_NORMAL
);
11010 static void bnx2x_tx_timeout(struct net_device
*dev
)
11012 struct bnx2x
*bp
= netdev_priv(dev
);
11014 #ifdef BNX2X_STOP_ON_ERROR
11018 /* This allows the netif to be shutdown gracefully before resetting */
11019 schedule_work(&bp
->reset_task
);
11023 /* called with rtnl_lock */
11024 static void bnx2x_vlan_rx_register(struct net_device
*dev
,
11025 struct vlan_group
*vlgrp
)
11027 struct bnx2x
*bp
= netdev_priv(dev
);
11031 /* Set flags according to the required capabilities */
11032 bp
->flags
&= ~(HW_VLAN_RX_FLAG
| HW_VLAN_TX_FLAG
);
11034 if (dev
->features
& NETIF_F_HW_VLAN_TX
)
11035 bp
->flags
|= HW_VLAN_TX_FLAG
;
11037 if (dev
->features
& NETIF_F_HW_VLAN_RX
)
11038 bp
->flags
|= HW_VLAN_RX_FLAG
;
11040 if (netif_running(dev
))
11041 bnx2x_set_client_config(bp
);
11046 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11047 static void poll_bnx2x(struct net_device
*dev
)
11049 struct bnx2x
*bp
= netdev_priv(dev
);
11051 disable_irq(bp
->pdev
->irq
);
11052 bnx2x_interrupt(bp
->pdev
->irq
, dev
);
11053 enable_irq(bp
->pdev
->irq
);
11057 static const struct net_device_ops bnx2x_netdev_ops
= {
11058 .ndo_open
= bnx2x_open
,
11059 .ndo_stop
= bnx2x_close
,
11060 .ndo_start_xmit
= bnx2x_start_xmit
,
11061 .ndo_set_multicast_list
= bnx2x_set_rx_mode
,
11062 .ndo_set_mac_address
= bnx2x_change_mac_addr
,
11063 .ndo_validate_addr
= eth_validate_addr
,
11064 .ndo_do_ioctl
= bnx2x_ioctl
,
11065 .ndo_change_mtu
= bnx2x_change_mtu
,
11066 .ndo_tx_timeout
= bnx2x_tx_timeout
,
11068 .ndo_vlan_rx_register
= bnx2x_vlan_rx_register
,
11070 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11071 .ndo_poll_controller
= poll_bnx2x
,
11075 static int __devinit
bnx2x_init_dev(struct pci_dev
*pdev
,
11076 struct net_device
*dev
)
11081 SET_NETDEV_DEV(dev
, &pdev
->dev
);
11082 bp
= netdev_priv(dev
);
11087 bp
->func
= PCI_FUNC(pdev
->devfn
);
11089 rc
= pci_enable_device(pdev
);
11091 printk(KERN_ERR PFX
"Cannot enable PCI device, aborting\n");
11095 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
11096 printk(KERN_ERR PFX
"Cannot find PCI device base address,"
11099 goto err_out_disable
;
11102 if (!(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
11103 printk(KERN_ERR PFX
"Cannot find second PCI device"
11104 " base address, aborting\n");
11106 goto err_out_disable
;
11109 if (atomic_read(&pdev
->enable_cnt
) == 1) {
11110 rc
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
11112 printk(KERN_ERR PFX
"Cannot obtain PCI resources,"
11114 goto err_out_disable
;
11117 pci_set_master(pdev
);
11118 pci_save_state(pdev
);
11121 bp
->pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
11122 if (bp
->pm_cap
== 0) {
11123 printk(KERN_ERR PFX
"Cannot find power management"
11124 " capability, aborting\n");
11126 goto err_out_release
;
11129 bp
->pcie_cap
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
11130 if (bp
->pcie_cap
== 0) {
11131 printk(KERN_ERR PFX
"Cannot find PCI Express capability,"
11134 goto err_out_release
;
11137 if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(64)) == 0) {
11138 bp
->flags
|= USING_DAC_FLAG
;
11139 if (pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64)) != 0) {
11140 printk(KERN_ERR PFX
"pci_set_consistent_dma_mask"
11141 " failed, aborting\n");
11143 goto err_out_release
;
11146 } else if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(32)) != 0) {
11147 printk(KERN_ERR PFX
"System does not support DMA,"
11150 goto err_out_release
;
11153 dev
->mem_start
= pci_resource_start(pdev
, 0);
11154 dev
->base_addr
= dev
->mem_start
;
11155 dev
->mem_end
= pci_resource_end(pdev
, 0);
11157 dev
->irq
= pdev
->irq
;
11159 bp
->regview
= pci_ioremap_bar(pdev
, 0);
11160 if (!bp
->regview
) {
11161 printk(KERN_ERR PFX
"Cannot map register space, aborting\n");
11163 goto err_out_release
;
11166 bp
->doorbells
= ioremap_nocache(pci_resource_start(pdev
, 2),
11167 min_t(u64
, BNX2X_DB_SIZE
,
11168 pci_resource_len(pdev
, 2)));
11169 if (!bp
->doorbells
) {
11170 printk(KERN_ERR PFX
"Cannot map doorbell space, aborting\n");
11172 goto err_out_unmap
;
11175 bnx2x_set_power_state(bp
, PCI_D0
);
11177 /* clean indirect addresses */
11178 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
11179 PCICFG_VENDOR_ID_OFFSET
);
11180 REG_WR(bp
, PXP2_REG_PGL_ADDR_88_F0
+ BP_PORT(bp
)*16, 0);
11181 REG_WR(bp
, PXP2_REG_PGL_ADDR_8C_F0
+ BP_PORT(bp
)*16, 0);
11182 REG_WR(bp
, PXP2_REG_PGL_ADDR_90_F0
+ BP_PORT(bp
)*16, 0);
11183 REG_WR(bp
, PXP2_REG_PGL_ADDR_94_F0
+ BP_PORT(bp
)*16, 0);
11185 dev
->watchdog_timeo
= TX_TIMEOUT
;
11187 dev
->netdev_ops
= &bnx2x_netdev_ops
;
11188 dev
->ethtool_ops
= &bnx2x_ethtool_ops
;
11189 dev
->features
|= NETIF_F_SG
;
11190 dev
->features
|= NETIF_F_HW_CSUM
;
11191 if (bp
->flags
& USING_DAC_FLAG
)
11192 dev
->features
|= NETIF_F_HIGHDMA
;
11193 dev
->features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
11194 dev
->features
|= NETIF_F_TSO6
;
11196 dev
->features
|= (NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
);
11197 bp
->flags
|= (HW_VLAN_RX_FLAG
| HW_VLAN_TX_FLAG
);
11199 dev
->vlan_features
|= NETIF_F_SG
;
11200 dev
->vlan_features
|= NETIF_F_HW_CSUM
;
11201 if (bp
->flags
& USING_DAC_FLAG
)
11202 dev
->vlan_features
|= NETIF_F_HIGHDMA
;
11203 dev
->vlan_features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
11204 dev
->vlan_features
|= NETIF_F_TSO6
;
11211 iounmap(bp
->regview
);
11212 bp
->regview
= NULL
;
11214 if (bp
->doorbells
) {
11215 iounmap(bp
->doorbells
);
11216 bp
->doorbells
= NULL
;
11220 if (atomic_read(&pdev
->enable_cnt
) == 1)
11221 pci_release_regions(pdev
);
11224 pci_disable_device(pdev
);
11225 pci_set_drvdata(pdev
, NULL
);
11231 static int __devinit
bnx2x_get_pcie_width(struct bnx2x
*bp
)
11233 u32 val
= REG_RD(bp
, PCICFG_OFFSET
+ PCICFG_LINK_CONTROL
);
11235 val
= (val
& PCICFG_LINK_WIDTH
) >> PCICFG_LINK_WIDTH_SHIFT
;
11239 /* return value of 1=2.5GHz 2=5GHz */
11240 static int __devinit
bnx2x_get_pcie_speed(struct bnx2x
*bp
)
11242 u32 val
= REG_RD(bp
, PCICFG_OFFSET
+ PCICFG_LINK_CONTROL
);
11244 val
= (val
& PCICFG_LINK_SPEED
) >> PCICFG_LINK_SPEED_SHIFT
;
11247 static int __devinit
bnx2x_check_firmware(struct bnx2x
*bp
)
11249 struct bnx2x_fw_file_hdr
*fw_hdr
;
11250 struct bnx2x_fw_file_section
*sections
;
11252 u32 offset
, len
, num_ops
;
11254 const struct firmware
*firmware
= bp
->firmware
;
11257 if (firmware
->size
< sizeof(struct bnx2x_fw_file_hdr
))
11260 fw_hdr
= (struct bnx2x_fw_file_hdr
*)firmware
->data
;
11261 sections
= (struct bnx2x_fw_file_section
*)fw_hdr
;
11263 /* Make sure none of the offsets and sizes make us read beyond
11264 * the end of the firmware data */
11265 for (i
= 0; i
< sizeof(*fw_hdr
) / sizeof(*sections
); i
++) {
11266 offset
= be32_to_cpu(sections
[i
].offset
);
11267 len
= be32_to_cpu(sections
[i
].len
);
11268 if (offset
+ len
> firmware
->size
) {
11269 printk(KERN_ERR PFX
"Section %d length is out of bounds\n", i
);
11274 /* Likewise for the init_ops offsets */
11275 offset
= be32_to_cpu(fw_hdr
->init_ops_offsets
.offset
);
11276 ops_offsets
= (u16
*)(firmware
->data
+ offset
);
11277 num_ops
= be32_to_cpu(fw_hdr
->init_ops
.len
) / sizeof(struct raw_op
);
11279 for (i
= 0; i
< be32_to_cpu(fw_hdr
->init_ops_offsets
.len
) / 2; i
++) {
11280 if (be16_to_cpu(ops_offsets
[i
]) > num_ops
) {
11281 printk(KERN_ERR PFX
"Section offset %d is out of bounds\n", i
);
11286 /* Check FW version */
11287 offset
= be32_to_cpu(fw_hdr
->fw_version
.offset
);
11288 fw_ver
= firmware
->data
+ offset
;
11289 if ((fw_ver
[0] != BCM_5710_FW_MAJOR_VERSION
) ||
11290 (fw_ver
[1] != BCM_5710_FW_MINOR_VERSION
) ||
11291 (fw_ver
[2] != BCM_5710_FW_REVISION_VERSION
) ||
11292 (fw_ver
[3] != BCM_5710_FW_ENGINEERING_VERSION
)) {
11293 printk(KERN_ERR PFX
"Bad FW version:%d.%d.%d.%d."
11294 " Should be %d.%d.%d.%d\n",
11295 fw_ver
[0], fw_ver
[1], fw_ver
[2],
11296 fw_ver
[3], BCM_5710_FW_MAJOR_VERSION
,
11297 BCM_5710_FW_MINOR_VERSION
,
11298 BCM_5710_FW_REVISION_VERSION
,
11299 BCM_5710_FW_ENGINEERING_VERSION
);
11306 static void inline be32_to_cpu_n(const u8
*_source
, u8
*_target
, u32 n
)
11309 const __be32
*source
= (const __be32
*)_source
;
11310 u32
*target
= (u32
*)_target
;
11312 for (i
= 0; i
< n
/4; i
++)
11313 target
[i
] = be32_to_cpu(source
[i
]);
11317 Ops array is stored in the following format:
11318 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11320 static void inline bnx2x_prep_ops(const u8
*_source
, u8
*_target
, u32 n
)
11323 const __be32
*source
= (const __be32
*)_source
;
11324 struct raw_op
*target
= (struct raw_op
*)_target
;
11326 for (i
= 0, j
= 0; i
< n
/8; i
++, j
+=2) {
11327 tmp
= be32_to_cpu(source
[j
]);
11328 target
[i
].op
= (tmp
>> 24) & 0xff;
11329 target
[i
].offset
= tmp
& 0xffffff;
11330 target
[i
].raw_data
= be32_to_cpu(source
[j
+1]);
11333 static void inline be16_to_cpu_n(const u8
*_source
, u8
*_target
, u32 n
)
11336 u16
*target
= (u16
*)_target
;
11337 const __be16
*source
= (const __be16
*)_source
;
11339 for (i
= 0; i
< n
/2; i
++)
11340 target
[i
] = be16_to_cpu(source
[i
]);
11343 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11345 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11346 bp->arr = kmalloc(len, GFP_KERNEL); \
11348 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11351 func(bp->firmware->data + \
11352 be32_to_cpu(fw_hdr->arr.offset), \
11353 (u8*)bp->arr, len); \
11357 static int __devinit
bnx2x_init_firmware(struct bnx2x
*bp
, struct device
*dev
)
11359 char fw_file_name
[40] = {0};
11361 struct bnx2x_fw_file_hdr
*fw_hdr
;
11363 /* Create a FW file name */
11364 if (CHIP_IS_E1(bp
))
11365 offset
= sprintf(fw_file_name
, FW_FILE_PREFIX_E1
);
11367 offset
= sprintf(fw_file_name
, FW_FILE_PREFIX_E1H
);
11369 sprintf(fw_file_name
+ offset
, "%d.%d.%d.%d.fw",
11370 BCM_5710_FW_MAJOR_VERSION
,
11371 BCM_5710_FW_MINOR_VERSION
,
11372 BCM_5710_FW_REVISION_VERSION
,
11373 BCM_5710_FW_ENGINEERING_VERSION
);
11375 printk(KERN_INFO PFX
"Loading %s\n", fw_file_name
);
11377 rc
= request_firmware(&bp
->firmware
, fw_file_name
, dev
);
11379 printk(KERN_ERR PFX
"Can't load firmware file %s\n", fw_file_name
);
11380 goto request_firmware_exit
;
11383 rc
= bnx2x_check_firmware(bp
);
11385 printk(KERN_ERR PFX
"Corrupt firmware file %s\n", fw_file_name
);
11386 goto request_firmware_exit
;
11389 fw_hdr
= (struct bnx2x_fw_file_hdr
*)bp
->firmware
->data
;
11391 /* Initialize the pointers to the init arrays */
11393 BNX2X_ALLOC_AND_SET(init_data
, request_firmware_exit
, be32_to_cpu_n
);
11396 BNX2X_ALLOC_AND_SET(init_ops
, init_ops_alloc_err
, bnx2x_prep_ops
);
11399 BNX2X_ALLOC_AND_SET(init_ops_offsets
, init_offsets_alloc_err
, be16_to_cpu_n
);
11401 /* STORMs firmware */
11402 bp
->tsem_int_table_data
= bp
->firmware
->data
+
11403 be32_to_cpu(fw_hdr
->tsem_int_table_data
.offset
);
11404 bp
->tsem_pram_data
= bp
->firmware
->data
+
11405 be32_to_cpu(fw_hdr
->tsem_pram_data
.offset
);
11406 bp
->usem_int_table_data
= bp
->firmware
->data
+
11407 be32_to_cpu(fw_hdr
->usem_int_table_data
.offset
);
11408 bp
->usem_pram_data
= bp
->firmware
->data
+
11409 be32_to_cpu(fw_hdr
->usem_pram_data
.offset
);
11410 bp
->xsem_int_table_data
= bp
->firmware
->data
+
11411 be32_to_cpu(fw_hdr
->xsem_int_table_data
.offset
);
11412 bp
->xsem_pram_data
= bp
->firmware
->data
+
11413 be32_to_cpu(fw_hdr
->xsem_pram_data
.offset
);
11414 bp
->csem_int_table_data
= bp
->firmware
->data
+
11415 be32_to_cpu(fw_hdr
->csem_int_table_data
.offset
);
11416 bp
->csem_pram_data
= bp
->firmware
->data
+
11417 be32_to_cpu(fw_hdr
->csem_pram_data
.offset
);
11420 init_offsets_alloc_err
:
11421 kfree(bp
->init_ops
);
11422 init_ops_alloc_err
:
11423 kfree(bp
->init_data
);
11424 request_firmware_exit
:
11425 release_firmware(bp
->firmware
);
11432 static int __devinit
bnx2x_init_one(struct pci_dev
*pdev
,
11433 const struct pci_device_id
*ent
)
11435 static int version_printed
;
11436 struct net_device
*dev
= NULL
;
11440 if (version_printed
++ == 0)
11441 printk(KERN_INFO
"%s", version
);
11443 /* dev zeroed in init_etherdev */
11444 dev
= alloc_etherdev_mq(sizeof(*bp
), MAX_CONTEXT
);
11446 printk(KERN_ERR PFX
"Cannot allocate net device\n");
11450 bp
= netdev_priv(dev
);
11451 bp
->msglevel
= debug
;
11453 rc
= bnx2x_init_dev(pdev
, dev
);
11459 pci_set_drvdata(pdev
, dev
);
11461 rc
= bnx2x_init_bp(bp
);
11463 goto init_one_exit
;
11465 /* Set init arrays */
11466 rc
= bnx2x_init_firmware(bp
, &pdev
->dev
);
11468 printk(KERN_ERR PFX
"Error loading firmware\n");
11469 goto init_one_exit
;
11472 rc
= register_netdev(dev
);
11474 dev_err(&pdev
->dev
, "Cannot register net device\n");
11475 goto init_one_exit
;
11478 printk(KERN_INFO
"%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11479 " IRQ %d, ", dev
->name
, board_info
[ent
->driver_data
].name
,
11480 (CHIP_REV(bp
) >> 12) + 'A', (CHIP_METAL(bp
) >> 4),
11481 bnx2x_get_pcie_width(bp
),
11482 (bnx2x_get_pcie_speed(bp
) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11483 dev
->base_addr
, bp
->pdev
->irq
);
11484 printk(KERN_CONT
"node addr %pM\n", dev
->dev_addr
);
11490 iounmap(bp
->regview
);
11493 iounmap(bp
->doorbells
);
11497 if (atomic_read(&pdev
->enable_cnt
) == 1)
11498 pci_release_regions(pdev
);
11500 pci_disable_device(pdev
);
11501 pci_set_drvdata(pdev
, NULL
);
11506 static void __devexit
bnx2x_remove_one(struct pci_dev
*pdev
)
11508 struct net_device
*dev
= pci_get_drvdata(pdev
);
11512 printk(KERN_ERR PFX
"BAD net device from bnx2x_init_one\n");
11515 bp
= netdev_priv(dev
);
11517 unregister_netdev(dev
);
11519 kfree(bp
->init_ops_offsets
);
11520 kfree(bp
->init_ops
);
11521 kfree(bp
->init_data
);
11522 release_firmware(bp
->firmware
);
11525 iounmap(bp
->regview
);
11528 iounmap(bp
->doorbells
);
11532 if (atomic_read(&pdev
->enable_cnt
) == 1)
11533 pci_release_regions(pdev
);
11535 pci_disable_device(pdev
);
11536 pci_set_drvdata(pdev
, NULL
);
11539 static int bnx2x_suspend(struct pci_dev
*pdev
, pm_message_t state
)
11541 struct net_device
*dev
= pci_get_drvdata(pdev
);
11545 printk(KERN_ERR PFX
"BAD net device from bnx2x_init_one\n");
11548 bp
= netdev_priv(dev
);
11552 pci_save_state(pdev
);
11554 if (!netif_running(dev
)) {
11559 netif_device_detach(dev
);
11561 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
11563 bnx2x_set_power_state(bp
, pci_choose_state(pdev
, state
));
11570 static int bnx2x_resume(struct pci_dev
*pdev
)
11572 struct net_device
*dev
= pci_get_drvdata(pdev
);
11577 printk(KERN_ERR PFX
"BAD net device from bnx2x_init_one\n");
11580 bp
= netdev_priv(dev
);
11584 pci_restore_state(pdev
);
11586 if (!netif_running(dev
)) {
11591 bnx2x_set_power_state(bp
, PCI_D0
);
11592 netif_device_attach(dev
);
11594 rc
= bnx2x_nic_load(bp
, LOAD_OPEN
);
11601 static int bnx2x_eeh_nic_unload(struct bnx2x
*bp
)
11605 bp
->state
= BNX2X_STATE_ERROR
;
11607 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
11609 bnx2x_netif_stop(bp
, 0);
11611 del_timer_sync(&bp
->timer
);
11612 bp
->stats_state
= STATS_STATE_DISABLED
;
11613 DP(BNX2X_MSG_STATS
, "stats_state - DISABLED\n");
11616 bnx2x_free_irq(bp
);
11618 if (CHIP_IS_E1(bp
)) {
11619 struct mac_configuration_cmd
*config
=
11620 bnx2x_sp(bp
, mcast_config
);
11622 for (i
= 0; i
< config
->hdr
.length
; i
++)
11623 CAM_INVALIDATE(config
->config_table
[i
]);
11626 /* Free SKBs, SGEs, TPA pool and driver internals */
11627 bnx2x_free_skbs(bp
);
11628 for_each_rx_queue(bp
, i
)
11629 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
11630 for_each_rx_queue(bp
, i
)
11631 netif_napi_del(&bnx2x_fp(bp
, i
, napi
));
11632 bnx2x_free_mem(bp
);
11634 bp
->state
= BNX2X_STATE_CLOSED
;
11636 netif_carrier_off(bp
->dev
);
11641 static void bnx2x_eeh_recover(struct bnx2x
*bp
)
11645 mutex_init(&bp
->port
.phy_mutex
);
11647 bp
->common
.shmem_base
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
11648 bp
->link_params
.shmem_base
= bp
->common
.shmem_base
;
11649 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp
->common
.shmem_base
);
11651 if (!bp
->common
.shmem_base
||
11652 (bp
->common
.shmem_base
< 0xA0000) ||
11653 (bp
->common
.shmem_base
>= 0xC0000)) {
11654 BNX2X_DEV_INFO("MCP not active\n");
11655 bp
->flags
|= NO_MCP_FLAG
;
11659 val
= SHMEM_RD(bp
, validity_map
[BP_PORT(bp
)]);
11660 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
11661 != (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
11662 BNX2X_ERR("BAD MCP validity signature\n");
11664 if (!BP_NOMCP(bp
)) {
11665 bp
->fw_seq
= (SHMEM_RD(bp
, func_mb
[BP_FUNC(bp
)].drv_mb_header
)
11666 & DRV_MSG_SEQ_NUMBER_MASK
);
11667 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp
->fw_seq
);
11672 * bnx2x_io_error_detected - called when PCI error is detected
11673 * @pdev: Pointer to PCI device
11674 * @state: The current pci connection state
11676 * This function is called after a PCI bus error affecting
11677 * this device has been detected.
11679 static pci_ers_result_t
bnx2x_io_error_detected(struct pci_dev
*pdev
,
11680 pci_channel_state_t state
)
11682 struct net_device
*dev
= pci_get_drvdata(pdev
);
11683 struct bnx2x
*bp
= netdev_priv(dev
);
11687 netif_device_detach(dev
);
11689 if (netif_running(dev
))
11690 bnx2x_eeh_nic_unload(bp
);
11692 pci_disable_device(pdev
);
11696 /* Request a slot reset */
11697 return PCI_ERS_RESULT_NEED_RESET
;
11701 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11702 * @pdev: Pointer to PCI device
11704 * Restart the card from scratch, as if from a cold-boot.
11706 static pci_ers_result_t
bnx2x_io_slot_reset(struct pci_dev
*pdev
)
11708 struct net_device
*dev
= pci_get_drvdata(pdev
);
11709 struct bnx2x
*bp
= netdev_priv(dev
);
11713 if (pci_enable_device(pdev
)) {
11714 dev_err(&pdev
->dev
,
11715 "Cannot re-enable PCI device after reset\n");
11717 return PCI_ERS_RESULT_DISCONNECT
;
11720 pci_set_master(pdev
);
11721 pci_restore_state(pdev
);
11723 if (netif_running(dev
))
11724 bnx2x_set_power_state(bp
, PCI_D0
);
11728 return PCI_ERS_RESULT_RECOVERED
;
11732 * bnx2x_io_resume - called when traffic can start flowing again
11733 * @pdev: Pointer to PCI device
11735 * This callback is called when the error recovery driver tells us that
11736 * its OK to resume normal operation.
11738 static void bnx2x_io_resume(struct pci_dev
*pdev
)
11740 struct net_device
*dev
= pci_get_drvdata(pdev
);
11741 struct bnx2x
*bp
= netdev_priv(dev
);
11745 bnx2x_eeh_recover(bp
);
11747 if (netif_running(dev
))
11748 bnx2x_nic_load(bp
, LOAD_NORMAL
);
11750 netif_device_attach(dev
);
11755 static struct pci_error_handlers bnx2x_err_handler
= {
11756 .error_detected
= bnx2x_io_error_detected
,
11757 .slot_reset
= bnx2x_io_slot_reset
,
11758 .resume
= bnx2x_io_resume
,
11761 static struct pci_driver bnx2x_pci_driver
= {
11762 .name
= DRV_MODULE_NAME
,
11763 .id_table
= bnx2x_pci_tbl
,
11764 .probe
= bnx2x_init_one
,
11765 .remove
= __devexit_p(bnx2x_remove_one
),
11766 .suspend
= bnx2x_suspend
,
11767 .resume
= bnx2x_resume
,
11768 .err_handler
= &bnx2x_err_handler
,
11771 static int __init
bnx2x_init(void)
11775 bnx2x_wq
= create_singlethread_workqueue("bnx2x");
11776 if (bnx2x_wq
== NULL
) {
11777 printk(KERN_ERR PFX
"Cannot create workqueue\n");
11781 ret
= pci_register_driver(&bnx2x_pci_driver
);
11783 printk(KERN_ERR PFX
"Cannot register driver\n");
11784 destroy_workqueue(bnx2x_wq
);
11789 static void __exit
bnx2x_cleanup(void)
11791 pci_unregister_driver(&bnx2x_pci_driver
);
11793 destroy_workqueue(bnx2x_wq
);
11796 module_init(bnx2x_init
);
11797 module_exit(bnx2x_cleanup
);