1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
12 #include <linux/config.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <asm/bitops.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
45 #include <net/checksum.h>
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/prefetch.h>
51 #include <linux/cache.h>
56 #define DRV_MODULE_NAME "bnx2"
57 #define PFX DRV_MODULE_NAME ": "
58 #define DRV_MODULE_VERSION "1.4.40"
59 #define DRV_MODULE_RELDATE "May 22, 2006"
61 #define RUN_AT(x) (jiffies + (x))
63 /* Time in jiffies before concluding the transmitter is hung. */
64 #define TX_TIMEOUT (5*HZ)
66 static const char version
[] __devinitdata
=
67 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME
" v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
69 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
70 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
71 MODULE_LICENSE("GPL");
72 MODULE_VERSION(DRV_MODULE_VERSION
);
74 static int disable_msi
= 0;
76 module_param(disable_msi
, int, 0);
77 MODULE_PARM_DESC(disable_msi
, "Disable Message Signaled Interrupt (MSI)");
89 /* indexed by board_t, above */
92 } board_info
[] __devinitdata
= {
93 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
94 { "HP NC370T Multifunction Gigabit Server Adapter" },
95 { "HP NC370i Multifunction Gigabit Server Adapter" },
96 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
97 { "HP NC370F Multifunction Gigabit Server Adapter" },
98 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
102 static struct pci_device_id bnx2_pci_tbl
[] = {
103 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706
,
104 PCI_VENDOR_ID_HP
, 0x3101, 0, 0, NC370T
},
105 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706
,
106 PCI_VENDOR_ID_HP
, 0x3106, 0, 0, NC370I
},
107 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706
,
108 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5706
},
109 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5708
,
110 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5708
},
111 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706S
,
112 PCI_VENDOR_ID_HP
, 0x3102, 0, 0, NC370F
},
113 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706S
,
114 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5706S
},
115 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5708S
,
116 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5708S
},
120 static struct flash_spec flash_table
[] =
123 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
124 1, SEEPROM_PAGE_BITS
, SEEPROM_PAGE_SIZE
,
125 SEEPROM_BYTE_ADDR_MASK
, SEEPROM_TOTAL_SIZE
,
127 /* Expansion entry 0001 */
128 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
129 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
130 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
132 /* Saifun SA25F010 (non-buffered flash) */
133 /* strap, cfg1, & write1 need updates */
134 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
135 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
136 SAIFUN_FLASH_BYTE_ADDR_MASK
, SAIFUN_FLASH_BASE_TOTAL_SIZE
*2,
137 "Non-buffered flash (128kB)"},
138 /* Saifun SA25F020 (non-buffered flash) */
139 /* strap, cfg1, & write1 need updates */
140 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
141 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
142 SAIFUN_FLASH_BYTE_ADDR_MASK
, SAIFUN_FLASH_BASE_TOTAL_SIZE
*4,
143 "Non-buffered flash (256kB)"},
144 /* Expansion entry 0100 */
145 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
146 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
147 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
149 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
150 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
151 0, ST_MICRO_FLASH_PAGE_BITS
, ST_MICRO_FLASH_PAGE_SIZE
,
152 ST_MICRO_FLASH_BYTE_ADDR_MASK
, ST_MICRO_FLASH_BASE_TOTAL_SIZE
*2,
153 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
154 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
155 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
156 0, ST_MICRO_FLASH_PAGE_BITS
, ST_MICRO_FLASH_PAGE_SIZE
,
157 ST_MICRO_FLASH_BYTE_ADDR_MASK
, ST_MICRO_FLASH_BASE_TOTAL_SIZE
*4,
158 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
159 /* Saifun SA25F005 (non-buffered flash) */
160 /* strap, cfg1, & write1 need updates */
161 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
162 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
163 SAIFUN_FLASH_BYTE_ADDR_MASK
, SAIFUN_FLASH_BASE_TOTAL_SIZE
,
164 "Non-buffered flash (64kB)"},
166 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
167 1, SEEPROM_PAGE_BITS
, SEEPROM_PAGE_SIZE
,
168 SEEPROM_BYTE_ADDR_MASK
, SEEPROM_TOTAL_SIZE
,
170 /* Expansion entry 1001 */
171 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
172 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
173 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
175 /* Expansion entry 1010 */
176 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
177 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
178 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
180 /* ATMEL AT45DB011B (buffered flash) */
181 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
182 1, BUFFERED_FLASH_PAGE_BITS
, BUFFERED_FLASH_PAGE_SIZE
,
183 BUFFERED_FLASH_BYTE_ADDR_MASK
, BUFFERED_FLASH_TOTAL_SIZE
,
184 "Buffered flash (128kB)"},
185 /* Expansion entry 1100 */
186 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
187 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
188 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
190 /* Expansion entry 1101 */
191 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
192 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
193 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
195 /* Ateml Expansion entry 1110 */
196 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
197 1, BUFFERED_FLASH_PAGE_BITS
, BUFFERED_FLASH_PAGE_SIZE
,
198 BUFFERED_FLASH_BYTE_ADDR_MASK
, 0,
199 "Entry 1110 (Atmel)"},
200 /* ATMEL AT45DB021B (buffered flash) */
201 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
202 1, BUFFERED_FLASH_PAGE_BITS
, BUFFERED_FLASH_PAGE_SIZE
,
203 BUFFERED_FLASH_BYTE_ADDR_MASK
, BUFFERED_FLASH_TOTAL_SIZE
*2,
204 "Buffered flash (256kB)"},
207 MODULE_DEVICE_TABLE(pci
, bnx2_pci_tbl
);
209 static inline u32
bnx2_tx_avail(struct bnx2
*bp
)
211 u32 diff
= TX_RING_IDX(bp
->tx_prod
) - TX_RING_IDX(bp
->tx_cons
);
213 if (diff
> MAX_TX_DESC_CNT
)
214 diff
= (diff
& MAX_TX_DESC_CNT
) - 1;
215 return (bp
->tx_ring_size
- diff
);
219 bnx2_reg_rd_ind(struct bnx2
*bp
, u32 offset
)
221 REG_WR(bp
, BNX2_PCICFG_REG_WINDOW_ADDRESS
, offset
);
222 return (REG_RD(bp
, BNX2_PCICFG_REG_WINDOW
));
226 bnx2_reg_wr_ind(struct bnx2
*bp
, u32 offset
, u32 val
)
228 REG_WR(bp
, BNX2_PCICFG_REG_WINDOW_ADDRESS
, offset
);
229 REG_WR(bp
, BNX2_PCICFG_REG_WINDOW
, val
);
233 bnx2_ctx_wr(struct bnx2
*bp
, u32 cid_addr
, u32 offset
, u32 val
)
236 REG_WR(bp
, BNX2_CTX_DATA_ADR
, offset
);
237 REG_WR(bp
, BNX2_CTX_DATA
, val
);
241 bnx2_read_phy(struct bnx2
*bp
, u32 reg
, u32
*val
)
246 if (bp
->phy_flags
& PHY_INT_MODE_AUTO_POLLING_FLAG
) {
247 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
248 val1
&= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL
;
250 REG_WR(bp
, BNX2_EMAC_MDIO_MODE
, val1
);
251 REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
256 val1
= (bp
->phy_addr
<< 21) | (reg
<< 16) |
257 BNX2_EMAC_MDIO_COMM_COMMAND_READ
| BNX2_EMAC_MDIO_COMM_DISEXT
|
258 BNX2_EMAC_MDIO_COMM_START_BUSY
;
259 REG_WR(bp
, BNX2_EMAC_MDIO_COMM
, val1
);
261 for (i
= 0; i
< 50; i
++) {
264 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_COMM
);
265 if (!(val1
& BNX2_EMAC_MDIO_COMM_START_BUSY
)) {
268 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_COMM
);
269 val1
&= BNX2_EMAC_MDIO_COMM_DATA
;
275 if (val1
& BNX2_EMAC_MDIO_COMM_START_BUSY
) {
284 if (bp
->phy_flags
& PHY_INT_MODE_AUTO_POLLING_FLAG
) {
285 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
286 val1
|= BNX2_EMAC_MDIO_MODE_AUTO_POLL
;
288 REG_WR(bp
, BNX2_EMAC_MDIO_MODE
, val1
);
289 REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
298 bnx2_write_phy(struct bnx2
*bp
, u32 reg
, u32 val
)
303 if (bp
->phy_flags
& PHY_INT_MODE_AUTO_POLLING_FLAG
) {
304 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
305 val1
&= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL
;
307 REG_WR(bp
, BNX2_EMAC_MDIO_MODE
, val1
);
308 REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
313 val1
= (bp
->phy_addr
<< 21) | (reg
<< 16) | val
|
314 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE
|
315 BNX2_EMAC_MDIO_COMM_START_BUSY
| BNX2_EMAC_MDIO_COMM_DISEXT
;
316 REG_WR(bp
, BNX2_EMAC_MDIO_COMM
, val1
);
318 for (i
= 0; i
< 50; i
++) {
321 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_COMM
);
322 if (!(val1
& BNX2_EMAC_MDIO_COMM_START_BUSY
)) {
328 if (val1
& BNX2_EMAC_MDIO_COMM_START_BUSY
)
333 if (bp
->phy_flags
& PHY_INT_MODE_AUTO_POLLING_FLAG
) {
334 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
335 val1
|= BNX2_EMAC_MDIO_MODE_AUTO_POLL
;
337 REG_WR(bp
, BNX2_EMAC_MDIO_MODE
, val1
);
338 REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
347 bnx2_disable_int(struct bnx2
*bp
)
349 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
350 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
351 REG_RD(bp
, BNX2_PCICFG_INT_ACK_CMD
);
355 bnx2_enable_int(struct bnx2
*bp
)
357 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
358 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
359 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
| bp
->last_status_idx
);
361 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
362 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
| bp
->last_status_idx
);
364 REG_WR(bp
, BNX2_HC_COMMAND
, bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW
);
368 bnx2_disable_int_sync(struct bnx2
*bp
)
370 atomic_inc(&bp
->intr_sem
);
371 bnx2_disable_int(bp
);
372 synchronize_irq(bp
->pdev
->irq
);
376 bnx2_netif_stop(struct bnx2
*bp
)
378 bnx2_disable_int_sync(bp
);
379 if (netif_running(bp
->dev
)) {
380 netif_poll_disable(bp
->dev
);
381 netif_tx_disable(bp
->dev
);
382 bp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
387 bnx2_netif_start(struct bnx2
*bp
)
389 if (atomic_dec_and_test(&bp
->intr_sem
)) {
390 if (netif_running(bp
->dev
)) {
391 netif_wake_queue(bp
->dev
);
392 netif_poll_enable(bp
->dev
);
399 bnx2_free_mem(struct bnx2
*bp
)
403 if (bp
->status_blk
) {
404 pci_free_consistent(bp
->pdev
, bp
->status_stats_size
,
405 bp
->status_blk
, bp
->status_blk_mapping
);
406 bp
->status_blk
= NULL
;
407 bp
->stats_blk
= NULL
;
409 if (bp
->tx_desc_ring
) {
410 pci_free_consistent(bp
->pdev
,
411 sizeof(struct tx_bd
) * TX_DESC_CNT
,
412 bp
->tx_desc_ring
, bp
->tx_desc_mapping
);
413 bp
->tx_desc_ring
= NULL
;
415 kfree(bp
->tx_buf_ring
);
416 bp
->tx_buf_ring
= NULL
;
417 for (i
= 0; i
< bp
->rx_max_ring
; i
++) {
418 if (bp
->rx_desc_ring
[i
])
419 pci_free_consistent(bp
->pdev
,
420 sizeof(struct rx_bd
) * RX_DESC_CNT
,
422 bp
->rx_desc_mapping
[i
]);
423 bp
->rx_desc_ring
[i
] = NULL
;
425 vfree(bp
->rx_buf_ring
);
426 bp
->rx_buf_ring
= NULL
;
430 bnx2_alloc_mem(struct bnx2
*bp
)
432 int i
, status_blk_size
;
434 bp
->tx_buf_ring
= kzalloc(sizeof(struct sw_bd
) * TX_DESC_CNT
,
436 if (bp
->tx_buf_ring
== NULL
)
439 bp
->tx_desc_ring
= pci_alloc_consistent(bp
->pdev
,
440 sizeof(struct tx_bd
) *
442 &bp
->tx_desc_mapping
);
443 if (bp
->tx_desc_ring
== NULL
)
446 bp
->rx_buf_ring
= vmalloc(sizeof(struct sw_bd
) * RX_DESC_CNT
*
448 if (bp
->rx_buf_ring
== NULL
)
451 memset(bp
->rx_buf_ring
, 0, sizeof(struct sw_bd
) * RX_DESC_CNT
*
454 for (i
= 0; i
< bp
->rx_max_ring
; i
++) {
455 bp
->rx_desc_ring
[i
] =
456 pci_alloc_consistent(bp
->pdev
,
457 sizeof(struct rx_bd
) * RX_DESC_CNT
,
458 &bp
->rx_desc_mapping
[i
]);
459 if (bp
->rx_desc_ring
[i
] == NULL
)
464 /* Combine status and statistics blocks into one allocation. */
465 status_blk_size
= L1_CACHE_ALIGN(sizeof(struct status_block
));
466 bp
->status_stats_size
= status_blk_size
+
467 sizeof(struct statistics_block
);
469 bp
->status_blk
= pci_alloc_consistent(bp
->pdev
, bp
->status_stats_size
,
470 &bp
->status_blk_mapping
);
471 if (bp
->status_blk
== NULL
)
474 memset(bp
->status_blk
, 0, bp
->status_stats_size
);
476 bp
->stats_blk
= (void *) ((unsigned long) bp
->status_blk
+
479 bp
->stats_blk_mapping
= bp
->status_blk_mapping
+ status_blk_size
;
489 bnx2_report_fw_link(struct bnx2
*bp
)
491 u32 fw_link_status
= 0;
496 switch (bp
->line_speed
) {
498 if (bp
->duplex
== DUPLEX_HALF
)
499 fw_link_status
= BNX2_LINK_STATUS_10HALF
;
501 fw_link_status
= BNX2_LINK_STATUS_10FULL
;
504 if (bp
->duplex
== DUPLEX_HALF
)
505 fw_link_status
= BNX2_LINK_STATUS_100HALF
;
507 fw_link_status
= BNX2_LINK_STATUS_100FULL
;
510 if (bp
->duplex
== DUPLEX_HALF
)
511 fw_link_status
= BNX2_LINK_STATUS_1000HALF
;
513 fw_link_status
= BNX2_LINK_STATUS_1000FULL
;
516 if (bp
->duplex
== DUPLEX_HALF
)
517 fw_link_status
= BNX2_LINK_STATUS_2500HALF
;
519 fw_link_status
= BNX2_LINK_STATUS_2500FULL
;
523 fw_link_status
|= BNX2_LINK_STATUS_LINK_UP
;
526 fw_link_status
|= BNX2_LINK_STATUS_AN_ENABLED
;
528 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
529 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
531 if (!(bmsr
& BMSR_ANEGCOMPLETE
) ||
532 bp
->phy_flags
& PHY_PARALLEL_DETECT_FLAG
)
533 fw_link_status
|= BNX2_LINK_STATUS_PARALLEL_DET
;
535 fw_link_status
|= BNX2_LINK_STATUS_AN_COMPLETE
;
539 fw_link_status
= BNX2_LINK_STATUS_LINK_DOWN
;
541 REG_WR_IND(bp
, bp
->shmem_base
+ BNX2_LINK_STATUS
, fw_link_status
);
545 bnx2_report_link(struct bnx2
*bp
)
548 netif_carrier_on(bp
->dev
);
549 printk(KERN_INFO PFX
"%s NIC Link is Up, ", bp
->dev
->name
);
551 printk("%d Mbps ", bp
->line_speed
);
553 if (bp
->duplex
== DUPLEX_FULL
)
554 printk("full duplex");
556 printk("half duplex");
559 if (bp
->flow_ctrl
& FLOW_CTRL_RX
) {
560 printk(", receive ");
561 if (bp
->flow_ctrl
& FLOW_CTRL_TX
)
562 printk("& transmit ");
565 printk(", transmit ");
567 printk("flow control ON");
572 netif_carrier_off(bp
->dev
);
573 printk(KERN_ERR PFX
"%s NIC Link is Down\n", bp
->dev
->name
);
576 bnx2_report_fw_link(bp
);
580 bnx2_resolve_flow_ctrl(struct bnx2
*bp
)
582 u32 local_adv
, remote_adv
;
585 if ((bp
->autoneg
& (AUTONEG_SPEED
| AUTONEG_FLOW_CTRL
)) !=
586 (AUTONEG_SPEED
| AUTONEG_FLOW_CTRL
)) {
588 if (bp
->duplex
== DUPLEX_FULL
) {
589 bp
->flow_ctrl
= bp
->req_flow_ctrl
;
594 if (bp
->duplex
!= DUPLEX_FULL
) {
598 if ((bp
->phy_flags
& PHY_SERDES_FLAG
) &&
599 (CHIP_NUM(bp
) == CHIP_NUM_5708
)) {
602 bnx2_read_phy(bp
, BCM5708S_1000X_STAT1
, &val
);
603 if (val
& BCM5708S_1000X_STAT1_TX_PAUSE
)
604 bp
->flow_ctrl
|= FLOW_CTRL_TX
;
605 if (val
& BCM5708S_1000X_STAT1_RX_PAUSE
)
606 bp
->flow_ctrl
|= FLOW_CTRL_RX
;
610 bnx2_read_phy(bp
, MII_ADVERTISE
, &local_adv
);
611 bnx2_read_phy(bp
, MII_LPA
, &remote_adv
);
613 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
614 u32 new_local_adv
= 0;
615 u32 new_remote_adv
= 0;
617 if (local_adv
& ADVERTISE_1000XPAUSE
)
618 new_local_adv
|= ADVERTISE_PAUSE_CAP
;
619 if (local_adv
& ADVERTISE_1000XPSE_ASYM
)
620 new_local_adv
|= ADVERTISE_PAUSE_ASYM
;
621 if (remote_adv
& ADVERTISE_1000XPAUSE
)
622 new_remote_adv
|= ADVERTISE_PAUSE_CAP
;
623 if (remote_adv
& ADVERTISE_1000XPSE_ASYM
)
624 new_remote_adv
|= ADVERTISE_PAUSE_ASYM
;
626 local_adv
= new_local_adv
;
627 remote_adv
= new_remote_adv
;
630 /* See Table 28B-3 of 802.3ab-1999 spec. */
631 if (local_adv
& ADVERTISE_PAUSE_CAP
) {
632 if(local_adv
& ADVERTISE_PAUSE_ASYM
) {
633 if (remote_adv
& ADVERTISE_PAUSE_CAP
) {
634 bp
->flow_ctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
636 else if (remote_adv
& ADVERTISE_PAUSE_ASYM
) {
637 bp
->flow_ctrl
= FLOW_CTRL_RX
;
641 if (remote_adv
& ADVERTISE_PAUSE_CAP
) {
642 bp
->flow_ctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
646 else if (local_adv
& ADVERTISE_PAUSE_ASYM
) {
647 if ((remote_adv
& ADVERTISE_PAUSE_CAP
) &&
648 (remote_adv
& ADVERTISE_PAUSE_ASYM
)) {
650 bp
->flow_ctrl
= FLOW_CTRL_TX
;
656 bnx2_5708s_linkup(struct bnx2
*bp
)
661 bnx2_read_phy(bp
, BCM5708S_1000X_STAT1
, &val
);
662 switch (val
& BCM5708S_1000X_STAT1_SPEED_MASK
) {
663 case BCM5708S_1000X_STAT1_SPEED_10
:
664 bp
->line_speed
= SPEED_10
;
666 case BCM5708S_1000X_STAT1_SPEED_100
:
667 bp
->line_speed
= SPEED_100
;
669 case BCM5708S_1000X_STAT1_SPEED_1G
:
670 bp
->line_speed
= SPEED_1000
;
672 case BCM5708S_1000X_STAT1_SPEED_2G5
:
673 bp
->line_speed
= SPEED_2500
;
676 if (val
& BCM5708S_1000X_STAT1_FD
)
677 bp
->duplex
= DUPLEX_FULL
;
679 bp
->duplex
= DUPLEX_HALF
;
685 bnx2_5706s_linkup(struct bnx2
*bp
)
687 u32 bmcr
, local_adv
, remote_adv
, common
;
690 bp
->line_speed
= SPEED_1000
;
692 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
693 if (bmcr
& BMCR_FULLDPLX
) {
694 bp
->duplex
= DUPLEX_FULL
;
697 bp
->duplex
= DUPLEX_HALF
;
700 if (!(bmcr
& BMCR_ANENABLE
)) {
704 bnx2_read_phy(bp
, MII_ADVERTISE
, &local_adv
);
705 bnx2_read_phy(bp
, MII_LPA
, &remote_adv
);
707 common
= local_adv
& remote_adv
;
708 if (common
& (ADVERTISE_1000XHALF
| ADVERTISE_1000XFULL
)) {
710 if (common
& ADVERTISE_1000XFULL
) {
711 bp
->duplex
= DUPLEX_FULL
;
714 bp
->duplex
= DUPLEX_HALF
;
722 bnx2_copper_linkup(struct bnx2
*bp
)
726 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
727 if (bmcr
& BMCR_ANENABLE
) {
728 u32 local_adv
, remote_adv
, common
;
730 bnx2_read_phy(bp
, MII_CTRL1000
, &local_adv
);
731 bnx2_read_phy(bp
, MII_STAT1000
, &remote_adv
);
733 common
= local_adv
& (remote_adv
>> 2);
734 if (common
& ADVERTISE_1000FULL
) {
735 bp
->line_speed
= SPEED_1000
;
736 bp
->duplex
= DUPLEX_FULL
;
738 else if (common
& ADVERTISE_1000HALF
) {
739 bp
->line_speed
= SPEED_1000
;
740 bp
->duplex
= DUPLEX_HALF
;
743 bnx2_read_phy(bp
, MII_ADVERTISE
, &local_adv
);
744 bnx2_read_phy(bp
, MII_LPA
, &remote_adv
);
746 common
= local_adv
& remote_adv
;
747 if (common
& ADVERTISE_100FULL
) {
748 bp
->line_speed
= SPEED_100
;
749 bp
->duplex
= DUPLEX_FULL
;
751 else if (common
& ADVERTISE_100HALF
) {
752 bp
->line_speed
= SPEED_100
;
753 bp
->duplex
= DUPLEX_HALF
;
755 else if (common
& ADVERTISE_10FULL
) {
756 bp
->line_speed
= SPEED_10
;
757 bp
->duplex
= DUPLEX_FULL
;
759 else if (common
& ADVERTISE_10HALF
) {
760 bp
->line_speed
= SPEED_10
;
761 bp
->duplex
= DUPLEX_HALF
;
770 if (bmcr
& BMCR_SPEED100
) {
771 bp
->line_speed
= SPEED_100
;
774 bp
->line_speed
= SPEED_10
;
776 if (bmcr
& BMCR_FULLDPLX
) {
777 bp
->duplex
= DUPLEX_FULL
;
780 bp
->duplex
= DUPLEX_HALF
;
788 bnx2_set_mac_link(struct bnx2
*bp
)
792 REG_WR(bp
, BNX2_EMAC_TX_LENGTHS
, 0x2620);
793 if (bp
->link_up
&& (bp
->line_speed
== SPEED_1000
) &&
794 (bp
->duplex
== DUPLEX_HALF
)) {
795 REG_WR(bp
, BNX2_EMAC_TX_LENGTHS
, 0x26ff);
798 /* Configure the EMAC mode register. */
799 val
= REG_RD(bp
, BNX2_EMAC_MODE
);
801 val
&= ~(BNX2_EMAC_MODE_PORT
| BNX2_EMAC_MODE_HALF_DUPLEX
|
802 BNX2_EMAC_MODE_MAC_LOOP
| BNX2_EMAC_MODE_FORCE_LINK
|
806 switch (bp
->line_speed
) {
808 if (CHIP_NUM(bp
) == CHIP_NUM_5708
) {
809 val
|= BNX2_EMAC_MODE_PORT_MII_10
;
814 val
|= BNX2_EMAC_MODE_PORT_MII
;
817 val
|= BNX2_EMAC_MODE_25G
;
820 val
|= BNX2_EMAC_MODE_PORT_GMII
;
825 val
|= BNX2_EMAC_MODE_PORT_GMII
;
828 /* Set the MAC to operate in the appropriate duplex mode. */
829 if (bp
->duplex
== DUPLEX_HALF
)
830 val
|= BNX2_EMAC_MODE_HALF_DUPLEX
;
831 REG_WR(bp
, BNX2_EMAC_MODE
, val
);
833 /* Enable/disable rx PAUSE. */
834 bp
->rx_mode
&= ~BNX2_EMAC_RX_MODE_FLOW_EN
;
836 if (bp
->flow_ctrl
& FLOW_CTRL_RX
)
837 bp
->rx_mode
|= BNX2_EMAC_RX_MODE_FLOW_EN
;
838 REG_WR(bp
, BNX2_EMAC_RX_MODE
, bp
->rx_mode
);
840 /* Enable/disable tx PAUSE. */
841 val
= REG_RD(bp
, BNX2_EMAC_TX_MODE
);
842 val
&= ~BNX2_EMAC_TX_MODE_FLOW_EN
;
844 if (bp
->flow_ctrl
& FLOW_CTRL_TX
)
845 val
|= BNX2_EMAC_TX_MODE_FLOW_EN
;
846 REG_WR(bp
, BNX2_EMAC_TX_MODE
, val
);
848 /* Acknowledge the interrupt. */
849 REG_WR(bp
, BNX2_EMAC_STATUS
, BNX2_EMAC_STATUS_LINK_CHANGE
);
855 bnx2_set_link(struct bnx2
*bp
)
860 if (bp
->loopback
== MAC_LOOPBACK
) {
865 link_up
= bp
->link_up
;
867 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
868 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
870 if ((bp
->phy_flags
& PHY_SERDES_FLAG
) &&
871 (CHIP_NUM(bp
) == CHIP_NUM_5706
)) {
874 val
= REG_RD(bp
, BNX2_EMAC_STATUS
);
875 if (val
& BNX2_EMAC_STATUS_LINK
)
876 bmsr
|= BMSR_LSTATUS
;
878 bmsr
&= ~BMSR_LSTATUS
;
881 if (bmsr
& BMSR_LSTATUS
) {
884 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
885 if (CHIP_NUM(bp
) == CHIP_NUM_5706
)
886 bnx2_5706s_linkup(bp
);
887 else if (CHIP_NUM(bp
) == CHIP_NUM_5708
)
888 bnx2_5708s_linkup(bp
);
891 bnx2_copper_linkup(bp
);
893 bnx2_resolve_flow_ctrl(bp
);
896 if ((bp
->phy_flags
& PHY_SERDES_FLAG
) &&
897 (bp
->autoneg
& AUTONEG_SPEED
)) {
901 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
902 if (!(bmcr
& BMCR_ANENABLE
)) {
903 bnx2_write_phy(bp
, MII_BMCR
, bmcr
|
907 bp
->phy_flags
&= ~PHY_PARALLEL_DETECT_FLAG
;
911 if (bp
->link_up
!= link_up
) {
912 bnx2_report_link(bp
);
915 bnx2_set_mac_link(bp
);
921 bnx2_reset_phy(struct bnx2
*bp
)
926 bnx2_write_phy(bp
, MII_BMCR
, BMCR_RESET
);
928 #define PHY_RESET_MAX_WAIT 100
929 for (i
= 0; i
< PHY_RESET_MAX_WAIT
; i
++) {
932 bnx2_read_phy(bp
, MII_BMCR
, ®
);
933 if (!(reg
& BMCR_RESET
)) {
938 if (i
== PHY_RESET_MAX_WAIT
) {
945 bnx2_phy_get_pause_adv(struct bnx2
*bp
)
949 if ((bp
->req_flow_ctrl
& (FLOW_CTRL_RX
| FLOW_CTRL_TX
)) ==
950 (FLOW_CTRL_RX
| FLOW_CTRL_TX
)) {
952 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
953 adv
= ADVERTISE_1000XPAUSE
;
956 adv
= ADVERTISE_PAUSE_CAP
;
959 else if (bp
->req_flow_ctrl
& FLOW_CTRL_TX
) {
960 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
961 adv
= ADVERTISE_1000XPSE_ASYM
;
964 adv
= ADVERTISE_PAUSE_ASYM
;
967 else if (bp
->req_flow_ctrl
& FLOW_CTRL_RX
) {
968 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
969 adv
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
972 adv
= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
979 bnx2_setup_serdes_phy(struct bnx2
*bp
)
984 if (!(bp
->autoneg
& AUTONEG_SPEED
)) {
986 int force_link_down
= 0;
988 if (CHIP_NUM(bp
) == CHIP_NUM_5708
) {
989 bnx2_read_phy(bp
, BCM5708S_UP1
, &up1
);
990 if (up1
& BCM5708S_UP1_2G5
) {
991 up1
&= ~BCM5708S_UP1_2G5
;
992 bnx2_write_phy(bp
, BCM5708S_UP1
, up1
);
997 bnx2_read_phy(bp
, MII_ADVERTISE
, &adv
);
998 adv
&= ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
);
1000 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
1001 new_bmcr
= bmcr
& ~BMCR_ANENABLE
;
1002 new_bmcr
|= BMCR_SPEED1000
;
1003 if (bp
->req_duplex
== DUPLEX_FULL
) {
1004 adv
|= ADVERTISE_1000XFULL
;
1005 new_bmcr
|= BMCR_FULLDPLX
;
1008 adv
|= ADVERTISE_1000XHALF
;
1009 new_bmcr
&= ~BMCR_FULLDPLX
;
1011 if ((new_bmcr
!= bmcr
) || (force_link_down
)) {
1012 /* Force a link down visible on the other side */
1014 bnx2_write_phy(bp
, MII_ADVERTISE
, adv
&
1015 ~(ADVERTISE_1000XFULL
|
1016 ADVERTISE_1000XHALF
));
1017 bnx2_write_phy(bp
, MII_BMCR
, bmcr
|
1018 BMCR_ANRESTART
| BMCR_ANENABLE
);
1021 netif_carrier_off(bp
->dev
);
1022 bnx2_write_phy(bp
, MII_BMCR
, new_bmcr
);
1024 bnx2_write_phy(bp
, MII_ADVERTISE
, adv
);
1025 bnx2_write_phy(bp
, MII_BMCR
, new_bmcr
);
1030 if (bp
->phy_flags
& PHY_2_5G_CAPABLE_FLAG
) {
1031 bnx2_read_phy(bp
, BCM5708S_UP1
, &up1
);
1032 up1
|= BCM5708S_UP1_2G5
;
1033 bnx2_write_phy(bp
, BCM5708S_UP1
, up1
);
1036 if (bp
->advertising
& ADVERTISED_1000baseT_Full
)
1037 new_adv
|= ADVERTISE_1000XFULL
;
1039 new_adv
|= bnx2_phy_get_pause_adv(bp
);
1041 bnx2_read_phy(bp
, MII_ADVERTISE
, &adv
);
1042 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
1044 bp
->serdes_an_pending
= 0;
1045 if ((adv
!= new_adv
) || ((bmcr
& BMCR_ANENABLE
) == 0)) {
1046 /* Force a link down visible on the other side */
1050 bnx2_write_phy(bp
, MII_BMCR
, BMCR_LOOPBACK
);
1051 for (i
= 0; i
< 110; i
++) {
1056 bnx2_write_phy(bp
, MII_ADVERTISE
, new_adv
);
1057 bnx2_write_phy(bp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
1059 if (CHIP_NUM(bp
) == CHIP_NUM_5706
) {
1060 /* Speed up link-up time when the link partner
1061 * does not autonegotiate which is very common
1062 * in blade servers. Some blade servers use
1063 * IPMI for kerboard input and it's important
1064 * to minimize link disruptions. Autoneg. involves
1065 * exchanging base pages plus 3 next pages and
1066 * normally completes in about 120 msec.
1068 bp
->current_interval
= SERDES_AN_TIMEOUT
;
1069 bp
->serdes_an_pending
= 1;
1070 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
1077 #define ETHTOOL_ALL_FIBRE_SPEED \
1078 (ADVERTISED_1000baseT_Full)
1080 #define ETHTOOL_ALL_COPPER_SPEED \
1081 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1082 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1083 ADVERTISED_1000baseT_Full)
1085 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1086 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1088 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1091 bnx2_setup_copper_phy(struct bnx2
*bp
)
1096 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
1098 if (bp
->autoneg
& AUTONEG_SPEED
) {
1099 u32 adv_reg
, adv1000_reg
;
1100 u32 new_adv_reg
= 0;
1101 u32 new_adv1000_reg
= 0;
1103 bnx2_read_phy(bp
, MII_ADVERTISE
, &adv_reg
);
1104 adv_reg
&= (PHY_ALL_10_100_SPEED
| ADVERTISE_PAUSE_CAP
|
1105 ADVERTISE_PAUSE_ASYM
);
1107 bnx2_read_phy(bp
, MII_CTRL1000
, &adv1000_reg
);
1108 adv1000_reg
&= PHY_ALL_1000_SPEED
;
1110 if (bp
->advertising
& ADVERTISED_10baseT_Half
)
1111 new_adv_reg
|= ADVERTISE_10HALF
;
1112 if (bp
->advertising
& ADVERTISED_10baseT_Full
)
1113 new_adv_reg
|= ADVERTISE_10FULL
;
1114 if (bp
->advertising
& ADVERTISED_100baseT_Half
)
1115 new_adv_reg
|= ADVERTISE_100HALF
;
1116 if (bp
->advertising
& ADVERTISED_100baseT_Full
)
1117 new_adv_reg
|= ADVERTISE_100FULL
;
1118 if (bp
->advertising
& ADVERTISED_1000baseT_Full
)
1119 new_adv1000_reg
|= ADVERTISE_1000FULL
;
1121 new_adv_reg
|= ADVERTISE_CSMA
;
1123 new_adv_reg
|= bnx2_phy_get_pause_adv(bp
);
1125 if ((adv1000_reg
!= new_adv1000_reg
) ||
1126 (adv_reg
!= new_adv_reg
) ||
1127 ((bmcr
& BMCR_ANENABLE
) == 0)) {
1129 bnx2_write_phy(bp
, MII_ADVERTISE
, new_adv_reg
);
1130 bnx2_write_phy(bp
, MII_CTRL1000
, new_adv1000_reg
);
1131 bnx2_write_phy(bp
, MII_BMCR
, BMCR_ANRESTART
|
1134 else if (bp
->link_up
) {
1135 /* Flow ctrl may have changed from auto to forced */
1136 /* or vice-versa. */
1138 bnx2_resolve_flow_ctrl(bp
);
1139 bnx2_set_mac_link(bp
);
1145 if (bp
->req_line_speed
== SPEED_100
) {
1146 new_bmcr
|= BMCR_SPEED100
;
1148 if (bp
->req_duplex
== DUPLEX_FULL
) {
1149 new_bmcr
|= BMCR_FULLDPLX
;
1151 if (new_bmcr
!= bmcr
) {
1155 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
1156 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
1158 if (bmsr
& BMSR_LSTATUS
) {
1159 /* Force link down */
1160 bnx2_write_phy(bp
, MII_BMCR
, BMCR_LOOPBACK
);
1163 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
1164 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
1166 } while ((bmsr
& BMSR_LSTATUS
) && (i
< 620));
1169 bnx2_write_phy(bp
, MII_BMCR
, new_bmcr
);
1171 /* Normally, the new speed is setup after the link has
1172 * gone down and up again. In some cases, link will not go
1173 * down so we need to set up the new speed here.
1175 if (bmsr
& BMSR_LSTATUS
) {
1176 bp
->line_speed
= bp
->req_line_speed
;
1177 bp
->duplex
= bp
->req_duplex
;
1178 bnx2_resolve_flow_ctrl(bp
);
1179 bnx2_set_mac_link(bp
);
1186 bnx2_setup_phy(struct bnx2
*bp
)
1188 if (bp
->loopback
== MAC_LOOPBACK
)
1191 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
1192 return (bnx2_setup_serdes_phy(bp
));
1195 return (bnx2_setup_copper_phy(bp
));
1200 bnx2_init_5708s_phy(struct bnx2
*bp
)
1204 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
, BCM5708S_BLK_ADDR_DIG3
);
1205 bnx2_write_phy(bp
, BCM5708S_DIG_3_0
, BCM5708S_DIG_3_0_USE_IEEE
);
1206 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
, BCM5708S_BLK_ADDR_DIG
);
1208 bnx2_read_phy(bp
, BCM5708S_1000X_CTL1
, &val
);
1209 val
|= BCM5708S_1000X_CTL1_FIBER_MODE
| BCM5708S_1000X_CTL1_AUTODET_EN
;
1210 bnx2_write_phy(bp
, BCM5708S_1000X_CTL1
, val
);
1212 bnx2_read_phy(bp
, BCM5708S_1000X_CTL2
, &val
);
1213 val
|= BCM5708S_1000X_CTL2_PLLEL_DET_EN
;
1214 bnx2_write_phy(bp
, BCM5708S_1000X_CTL2
, val
);
1216 if (bp
->phy_flags
& PHY_2_5G_CAPABLE_FLAG
) {
1217 bnx2_read_phy(bp
, BCM5708S_UP1
, &val
);
1218 val
|= BCM5708S_UP1_2G5
;
1219 bnx2_write_phy(bp
, BCM5708S_UP1
, val
);
1222 if ((CHIP_ID(bp
) == CHIP_ID_5708_A0
) ||
1223 (CHIP_ID(bp
) == CHIP_ID_5708_B0
) ||
1224 (CHIP_ID(bp
) == CHIP_ID_5708_B1
)) {
1225 /* increase tx signal amplitude */
1226 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
,
1227 BCM5708S_BLK_ADDR_TX_MISC
);
1228 bnx2_read_phy(bp
, BCM5708S_TX_ACTL1
, &val
);
1229 val
&= ~BCM5708S_TX_ACTL1_DRIVER_VCM
;
1230 bnx2_write_phy(bp
, BCM5708S_TX_ACTL1
, val
);
1231 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
, BCM5708S_BLK_ADDR_DIG
);
1234 val
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_PORT_HW_CFG_CONFIG
) &
1235 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK
;
1240 is_backplane
= REG_RD_IND(bp
, bp
->shmem_base
+
1241 BNX2_SHARED_HW_CFG_CONFIG
);
1242 if (is_backplane
& BNX2_SHARED_HW_CFG_PHY_BACKPLANE
) {
1243 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
,
1244 BCM5708S_BLK_ADDR_TX_MISC
);
1245 bnx2_write_phy(bp
, BCM5708S_TX_ACTL3
, val
);
1246 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
,
1247 BCM5708S_BLK_ADDR_DIG
);
1254 bnx2_init_5706s_phy(struct bnx2
*bp
)
1256 bp
->phy_flags
&= ~PHY_PARALLEL_DETECT_FLAG
;
1258 if (CHIP_NUM(bp
) == CHIP_NUM_5706
) {
1259 REG_WR(bp
, BNX2_MISC_UNUSED0
, 0x300);
1262 if (bp
->dev
->mtu
> 1500) {
1265 /* Set extended packet length bit */
1266 bnx2_write_phy(bp
, 0x18, 0x7);
1267 bnx2_read_phy(bp
, 0x18, &val
);
1268 bnx2_write_phy(bp
, 0x18, (val
& 0xfff8) | 0x4000);
1270 bnx2_write_phy(bp
, 0x1c, 0x6c00);
1271 bnx2_read_phy(bp
, 0x1c, &val
);
1272 bnx2_write_phy(bp
, 0x1c, (val
& 0x3ff) | 0xec02);
1277 bnx2_write_phy(bp
, 0x18, 0x7);
1278 bnx2_read_phy(bp
, 0x18, &val
);
1279 bnx2_write_phy(bp
, 0x18, val
& ~0x4007);
1281 bnx2_write_phy(bp
, 0x1c, 0x6c00);
1282 bnx2_read_phy(bp
, 0x1c, &val
);
1283 bnx2_write_phy(bp
, 0x1c, (val
& 0x3fd) | 0xec00);
1290 bnx2_init_copper_phy(struct bnx2
*bp
)
1294 bp
->phy_flags
|= PHY_CRC_FIX_FLAG
;
1296 if (bp
->phy_flags
& PHY_CRC_FIX_FLAG
) {
1297 bnx2_write_phy(bp
, 0x18, 0x0c00);
1298 bnx2_write_phy(bp
, 0x17, 0x000a);
1299 bnx2_write_phy(bp
, 0x15, 0x310b);
1300 bnx2_write_phy(bp
, 0x17, 0x201f);
1301 bnx2_write_phy(bp
, 0x15, 0x9506);
1302 bnx2_write_phy(bp
, 0x17, 0x401f);
1303 bnx2_write_phy(bp
, 0x15, 0x14e2);
1304 bnx2_write_phy(bp
, 0x18, 0x0400);
1307 if (bp
->dev
->mtu
> 1500) {
1308 /* Set extended packet length bit */
1309 bnx2_write_phy(bp
, 0x18, 0x7);
1310 bnx2_read_phy(bp
, 0x18, &val
);
1311 bnx2_write_phy(bp
, 0x18, val
| 0x4000);
1313 bnx2_read_phy(bp
, 0x10, &val
);
1314 bnx2_write_phy(bp
, 0x10, val
| 0x1);
1317 bnx2_write_phy(bp
, 0x18, 0x7);
1318 bnx2_read_phy(bp
, 0x18, &val
);
1319 bnx2_write_phy(bp
, 0x18, val
& ~0x4007);
1321 bnx2_read_phy(bp
, 0x10, &val
);
1322 bnx2_write_phy(bp
, 0x10, val
& ~0x1);
1325 /* ethernet@wirespeed */
1326 bnx2_write_phy(bp
, 0x18, 0x7007);
1327 bnx2_read_phy(bp
, 0x18, &val
);
1328 bnx2_write_phy(bp
, 0x18, val
| (1 << 15) | (1 << 4));
1334 bnx2_init_phy(struct bnx2
*bp
)
1339 bp
->phy_flags
&= ~PHY_INT_MODE_MASK_FLAG
;
1340 bp
->phy_flags
|= PHY_INT_MODE_LINK_READY_FLAG
;
1342 REG_WR(bp
, BNX2_EMAC_ATTENTION_ENA
, BNX2_EMAC_ATTENTION_ENA_LINK
);
1346 bnx2_read_phy(bp
, MII_PHYSID1
, &val
);
1347 bp
->phy_id
= val
<< 16;
1348 bnx2_read_phy(bp
, MII_PHYSID2
, &val
);
1349 bp
->phy_id
|= val
& 0xffff;
1351 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
1352 if (CHIP_NUM(bp
) == CHIP_NUM_5706
)
1353 rc
= bnx2_init_5706s_phy(bp
);
1354 else if (CHIP_NUM(bp
) == CHIP_NUM_5708
)
1355 rc
= bnx2_init_5708s_phy(bp
);
1358 rc
= bnx2_init_copper_phy(bp
);
1367 bnx2_set_mac_loopback(struct bnx2
*bp
)
1371 mac_mode
= REG_RD(bp
, BNX2_EMAC_MODE
);
1372 mac_mode
&= ~BNX2_EMAC_MODE_PORT
;
1373 mac_mode
|= BNX2_EMAC_MODE_MAC_LOOP
| BNX2_EMAC_MODE_FORCE_LINK
;
1374 REG_WR(bp
, BNX2_EMAC_MODE
, mac_mode
);
1379 static int bnx2_test_link(struct bnx2
*);
1382 bnx2_set_phy_loopback(struct bnx2
*bp
)
1387 spin_lock_bh(&bp
->phy_lock
);
1388 rc
= bnx2_write_phy(bp
, MII_BMCR
, BMCR_LOOPBACK
| BMCR_FULLDPLX
|
1390 spin_unlock_bh(&bp
->phy_lock
);
1394 for (i
= 0; i
< 10; i
++) {
1395 if (bnx2_test_link(bp
) == 0)
1400 mac_mode
= REG_RD(bp
, BNX2_EMAC_MODE
);
1401 mac_mode
&= ~(BNX2_EMAC_MODE_PORT
| BNX2_EMAC_MODE_HALF_DUPLEX
|
1402 BNX2_EMAC_MODE_MAC_LOOP
| BNX2_EMAC_MODE_FORCE_LINK
|
1403 BNX2_EMAC_MODE_25G
);
1405 mac_mode
|= BNX2_EMAC_MODE_PORT_GMII
;
1406 REG_WR(bp
, BNX2_EMAC_MODE
, mac_mode
);
1412 bnx2_fw_sync(struct bnx2
*bp
, u32 msg_data
, int silent
)
1418 msg_data
|= bp
->fw_wr_seq
;
1420 REG_WR_IND(bp
, bp
->shmem_base
+ BNX2_DRV_MB
, msg_data
);
1422 /* wait for an acknowledgement. */
1423 for (i
= 0; i
< (FW_ACK_TIME_OUT_MS
/ 10); i
++) {
1426 val
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_FW_MB
);
1428 if ((val
& BNX2_FW_MSG_ACK
) == (msg_data
& BNX2_DRV_MSG_SEQ
))
1431 if ((msg_data
& BNX2_DRV_MSG_DATA
) == BNX2_DRV_MSG_DATA_WAIT0
)
1434 /* If we timed out, inform the firmware that this is the case. */
1435 if ((val
& BNX2_FW_MSG_ACK
) != (msg_data
& BNX2_DRV_MSG_SEQ
)) {
1437 printk(KERN_ERR PFX
"fw sync timeout, reset code = "
1440 msg_data
&= ~BNX2_DRV_MSG_CODE
;
1441 msg_data
|= BNX2_DRV_MSG_CODE_FW_TIMEOUT
;
1443 REG_WR_IND(bp
, bp
->shmem_base
+ BNX2_DRV_MB
, msg_data
);
1448 if ((val
& BNX2_FW_MSG_STATUS_MASK
) != BNX2_FW_MSG_STATUS_OK
)
1455 bnx2_init_context(struct bnx2
*bp
)
1461 u32 vcid_addr
, pcid_addr
, offset
;
1465 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
1468 vcid_addr
= GET_PCID_ADDR(vcid
);
1470 new_vcid
= 0x60 + (vcid
& 0xf0) + (vcid
& 0x7);
1475 pcid_addr
= GET_PCID_ADDR(new_vcid
);
1478 vcid_addr
= GET_CID_ADDR(vcid
);
1479 pcid_addr
= vcid_addr
;
1482 REG_WR(bp
, BNX2_CTX_VIRT_ADDR
, 0x00);
1483 REG_WR(bp
, BNX2_CTX_PAGE_TBL
, pcid_addr
);
1485 /* Zero out the context. */
1486 for (offset
= 0; offset
< PHY_CTX_SIZE
; offset
+= 4) {
1487 CTX_WR(bp
, 0x00, offset
, 0);
1490 REG_WR(bp
, BNX2_CTX_VIRT_ADDR
, vcid_addr
);
1491 REG_WR(bp
, BNX2_CTX_PAGE_TBL
, pcid_addr
);
1496 bnx2_alloc_bad_rbuf(struct bnx2
*bp
)
1502 good_mbuf
= kmalloc(512 * sizeof(u16
), GFP_KERNEL
);
1503 if (good_mbuf
== NULL
) {
1504 printk(KERN_ERR PFX
"Failed to allocate memory in "
1505 "bnx2_alloc_bad_rbuf\n");
1509 REG_WR(bp
, BNX2_MISC_ENABLE_SET_BITS
,
1510 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE
);
1514 /* Allocate a bunch of mbufs and save the good ones in an array. */
1515 val
= REG_RD_IND(bp
, BNX2_RBUF_STATUS1
);
1516 while (val
& BNX2_RBUF_STATUS1_FREE_COUNT
) {
1517 REG_WR_IND(bp
, BNX2_RBUF_COMMAND
, BNX2_RBUF_COMMAND_ALLOC_REQ
);
1519 val
= REG_RD_IND(bp
, BNX2_RBUF_FW_BUF_ALLOC
);
1521 val
&= BNX2_RBUF_FW_BUF_ALLOC_VALUE
;
1523 /* The addresses with Bit 9 set are bad memory blocks. */
1524 if (!(val
& (1 << 9))) {
1525 good_mbuf
[good_mbuf_cnt
] = (u16
) val
;
1529 val
= REG_RD_IND(bp
, BNX2_RBUF_STATUS1
);
1532 /* Free the good ones back to the mbuf pool thus discarding
1533 * all the bad ones. */
1534 while (good_mbuf_cnt
) {
1537 val
= good_mbuf
[good_mbuf_cnt
];
1538 val
= (val
<< 9) | val
| 1;
1540 REG_WR_IND(bp
, BNX2_RBUF_FW_BUF_FREE
, val
);
1547 bnx2_set_mac_addr(struct bnx2
*bp
)
1550 u8
*mac_addr
= bp
->dev
->dev_addr
;
1552 val
= (mac_addr
[0] << 8) | mac_addr
[1];
1554 REG_WR(bp
, BNX2_EMAC_MAC_MATCH0
, val
);
1556 val
= (mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
1557 (mac_addr
[4] << 8) | mac_addr
[5];
1559 REG_WR(bp
, BNX2_EMAC_MAC_MATCH1
, val
);
1563 bnx2_alloc_rx_skb(struct bnx2
*bp
, u16 index
)
1565 struct sk_buff
*skb
;
1566 struct sw_bd
*rx_buf
= &bp
->rx_buf_ring
[index
];
1568 struct rx_bd
*rxbd
= &bp
->rx_desc_ring
[RX_RING(index
)][RX_IDX(index
)];
1569 unsigned long align
;
1571 skb
= dev_alloc_skb(bp
->rx_buf_size
);
1576 if (unlikely((align
= (unsigned long) skb
->data
& 0x7))) {
1577 skb_reserve(skb
, 8 - align
);
1581 mapping
= pci_map_single(bp
->pdev
, skb
->data
, bp
->rx_buf_use_size
,
1582 PCI_DMA_FROMDEVICE
);
1585 pci_unmap_addr_set(rx_buf
, mapping
, mapping
);
1587 rxbd
->rx_bd_haddr_hi
= (u64
) mapping
>> 32;
1588 rxbd
->rx_bd_haddr_lo
= (u64
) mapping
& 0xffffffff;
1590 bp
->rx_prod_bseq
+= bp
->rx_buf_use_size
;
1596 bnx2_phy_int(struct bnx2
*bp
)
1598 u32 new_link_state
, old_link_state
;
1600 new_link_state
= bp
->status_blk
->status_attn_bits
&
1601 STATUS_ATTN_BITS_LINK_STATE
;
1602 old_link_state
= bp
->status_blk
->status_attn_bits_ack
&
1603 STATUS_ATTN_BITS_LINK_STATE
;
1604 if (new_link_state
!= old_link_state
) {
1605 if (new_link_state
) {
1606 REG_WR(bp
, BNX2_PCICFG_STATUS_BIT_SET_CMD
,
1607 STATUS_ATTN_BITS_LINK_STATE
);
1610 REG_WR(bp
, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD
,
1611 STATUS_ATTN_BITS_LINK_STATE
);
1618 bnx2_tx_int(struct bnx2
*bp
)
1620 struct status_block
*sblk
= bp
->status_blk
;
1621 u16 hw_cons
, sw_cons
, sw_ring_cons
;
1624 hw_cons
= bp
->hw_tx_cons
= sblk
->status_tx_quick_consumer_index0
;
1625 if ((hw_cons
& MAX_TX_DESC_CNT
) == MAX_TX_DESC_CNT
) {
1628 sw_cons
= bp
->tx_cons
;
1630 while (sw_cons
!= hw_cons
) {
1631 struct sw_bd
*tx_buf
;
1632 struct sk_buff
*skb
;
1635 sw_ring_cons
= TX_RING_IDX(sw_cons
);
1637 tx_buf
= &bp
->tx_buf_ring
[sw_ring_cons
];
1640 /* partial BD completions possible with TSO packets */
1641 if (skb_shinfo(skb
)->tso_size
) {
1642 u16 last_idx
, last_ring_idx
;
1644 last_idx
= sw_cons
+
1645 skb_shinfo(skb
)->nr_frags
+ 1;
1646 last_ring_idx
= sw_ring_cons
+
1647 skb_shinfo(skb
)->nr_frags
+ 1;
1648 if (unlikely(last_ring_idx
>= MAX_TX_DESC_CNT
)) {
1651 if (((s16
) ((s16
) last_idx
- (s16
) hw_cons
)) > 0) {
1656 pci_unmap_single(bp
->pdev
, pci_unmap_addr(tx_buf
, mapping
),
1657 skb_headlen(skb
), PCI_DMA_TODEVICE
);
1660 last
= skb_shinfo(skb
)->nr_frags
;
1662 for (i
= 0; i
< last
; i
++) {
1663 sw_cons
= NEXT_TX_BD(sw_cons
);
1665 pci_unmap_page(bp
->pdev
,
1667 &bp
->tx_buf_ring
[TX_RING_IDX(sw_cons
)],
1669 skb_shinfo(skb
)->frags
[i
].size
,
1673 sw_cons
= NEXT_TX_BD(sw_cons
);
1675 tx_free_bd
+= last
+ 1;
1677 dev_kfree_skb_irq(skb
);
1679 hw_cons
= bp
->hw_tx_cons
=
1680 sblk
->status_tx_quick_consumer_index0
;
1682 if ((hw_cons
& MAX_TX_DESC_CNT
) == MAX_TX_DESC_CNT
) {
1687 bp
->tx_cons
= sw_cons
;
1689 if (unlikely(netif_queue_stopped(bp
->dev
))) {
1690 spin_lock(&bp
->tx_lock
);
1691 if ((netif_queue_stopped(bp
->dev
)) &&
1692 (bnx2_tx_avail(bp
) > MAX_SKB_FRAGS
)) {
1694 netif_wake_queue(bp
->dev
);
1696 spin_unlock(&bp
->tx_lock
);
1701 bnx2_reuse_rx_skb(struct bnx2
*bp
, struct sk_buff
*skb
,
1704 struct sw_bd
*cons_rx_buf
, *prod_rx_buf
;
1705 struct rx_bd
*cons_bd
, *prod_bd
;
1707 cons_rx_buf
= &bp
->rx_buf_ring
[cons
];
1708 prod_rx_buf
= &bp
->rx_buf_ring
[prod
];
1710 pci_dma_sync_single_for_device(bp
->pdev
,
1711 pci_unmap_addr(cons_rx_buf
, mapping
),
1712 bp
->rx_offset
+ RX_COPY_THRESH
, PCI_DMA_FROMDEVICE
);
1714 bp
->rx_prod_bseq
+= bp
->rx_buf_use_size
;
1716 prod_rx_buf
->skb
= skb
;
1721 pci_unmap_addr_set(prod_rx_buf
, mapping
,
1722 pci_unmap_addr(cons_rx_buf
, mapping
));
1724 cons_bd
= &bp
->rx_desc_ring
[RX_RING(cons
)][RX_IDX(cons
)];
1725 prod_bd
= &bp
->rx_desc_ring
[RX_RING(prod
)][RX_IDX(prod
)];
1726 prod_bd
->rx_bd_haddr_hi
= cons_bd
->rx_bd_haddr_hi
;
1727 prod_bd
->rx_bd_haddr_lo
= cons_bd
->rx_bd_haddr_lo
;
1731 bnx2_rx_int(struct bnx2
*bp
, int budget
)
1733 struct status_block
*sblk
= bp
->status_blk
;
1734 u16 hw_cons
, sw_cons
, sw_ring_cons
, sw_prod
, sw_ring_prod
;
1735 struct l2_fhdr
*rx_hdr
;
1738 hw_cons
= bp
->hw_rx_cons
= sblk
->status_rx_quick_consumer_index0
;
1739 if ((hw_cons
& MAX_RX_DESC_CNT
) == MAX_RX_DESC_CNT
) {
1742 sw_cons
= bp
->rx_cons
;
1743 sw_prod
= bp
->rx_prod
;
1745 /* Memory barrier necessary as speculative reads of the rx
1746 * buffer can be ahead of the index in the status block
1749 while (sw_cons
!= hw_cons
) {
1752 struct sw_bd
*rx_buf
;
1753 struct sk_buff
*skb
;
1754 dma_addr_t dma_addr
;
1756 sw_ring_cons
= RX_RING_IDX(sw_cons
);
1757 sw_ring_prod
= RX_RING_IDX(sw_prod
);
1759 rx_buf
= &bp
->rx_buf_ring
[sw_ring_cons
];
1764 dma_addr
= pci_unmap_addr(rx_buf
, mapping
);
1766 pci_dma_sync_single_for_cpu(bp
->pdev
, dma_addr
,
1767 bp
->rx_offset
+ RX_COPY_THRESH
, PCI_DMA_FROMDEVICE
);
1769 rx_hdr
= (struct l2_fhdr
*) skb
->data
;
1770 len
= rx_hdr
->l2_fhdr_pkt_len
- 4;
1772 if ((status
= rx_hdr
->l2_fhdr_status
) &
1773 (L2_FHDR_ERRORS_BAD_CRC
|
1774 L2_FHDR_ERRORS_PHY_DECODE
|
1775 L2_FHDR_ERRORS_ALIGNMENT
|
1776 L2_FHDR_ERRORS_TOO_SHORT
|
1777 L2_FHDR_ERRORS_GIANT_FRAME
)) {
1782 /* Since we don't have a jumbo ring, copy small packets
1785 if ((bp
->dev
->mtu
> 1500) && (len
<= RX_COPY_THRESH
)) {
1786 struct sk_buff
*new_skb
;
1788 new_skb
= dev_alloc_skb(len
+ 2);
1789 if (new_skb
== NULL
)
1793 memcpy(new_skb
->data
,
1794 skb
->data
+ bp
->rx_offset
- 2,
1797 skb_reserve(new_skb
, 2);
1798 skb_put(new_skb
, len
);
1799 new_skb
->dev
= bp
->dev
;
1801 bnx2_reuse_rx_skb(bp
, skb
,
1802 sw_ring_cons
, sw_ring_prod
);
1806 else if (bnx2_alloc_rx_skb(bp
, sw_ring_prod
) == 0) {
1807 pci_unmap_single(bp
->pdev
, dma_addr
,
1808 bp
->rx_buf_use_size
, PCI_DMA_FROMDEVICE
);
1810 skb_reserve(skb
, bp
->rx_offset
);
1815 bnx2_reuse_rx_skb(bp
, skb
,
1816 sw_ring_cons
, sw_ring_prod
);
1820 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1822 if ((len
> (bp
->dev
->mtu
+ ETH_HLEN
)) &&
1823 (htons(skb
->protocol
) != 0x8100)) {
1825 dev_kfree_skb_irq(skb
);
1830 skb
->ip_summed
= CHECKSUM_NONE
;
1832 (status
& (L2_FHDR_STATUS_TCP_SEGMENT
|
1833 L2_FHDR_STATUS_UDP_DATAGRAM
))) {
1835 if (likely((status
& (L2_FHDR_ERRORS_TCP_XSUM
|
1836 L2_FHDR_ERRORS_UDP_XSUM
)) == 0))
1837 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1841 if ((status
& L2_FHDR_STATUS_L2_VLAN_TAG
) && (bp
->vlgrp
!= 0)) {
1842 vlan_hwaccel_receive_skb(skb
, bp
->vlgrp
,
1843 rx_hdr
->l2_fhdr_vlan_tag
);
1847 netif_receive_skb(skb
);
1849 bp
->dev
->last_rx
= jiffies
;
1853 sw_cons
= NEXT_RX_BD(sw_cons
);
1854 sw_prod
= NEXT_RX_BD(sw_prod
);
1856 if ((rx_pkt
== budget
))
1859 /* Refresh hw_cons to see if there is new work */
1860 if (sw_cons
== hw_cons
) {
1861 hw_cons
= bp
->hw_rx_cons
=
1862 sblk
->status_rx_quick_consumer_index0
;
1863 if ((hw_cons
& MAX_RX_DESC_CNT
) == MAX_RX_DESC_CNT
)
1868 bp
->rx_cons
= sw_cons
;
1869 bp
->rx_prod
= sw_prod
;
1871 REG_WR16(bp
, MB_RX_CID_ADDR
+ BNX2_L2CTX_HOST_BDIDX
, sw_prod
);
1873 REG_WR(bp
, MB_RX_CID_ADDR
+ BNX2_L2CTX_HOST_BSEQ
, bp
->rx_prod_bseq
);
1881 /* MSI ISR - The only difference between this and the INTx ISR
1882 * is that the MSI interrupt is always serviced.
1885 bnx2_msi(int irq
, void *dev_instance
, struct pt_regs
*regs
)
1887 struct net_device
*dev
= dev_instance
;
1888 struct bnx2
*bp
= netdev_priv(dev
);
1890 prefetch(bp
->status_blk
);
1891 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
1892 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM
|
1893 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
1895 /* Return here if interrupt is disabled. */
1896 if (unlikely(atomic_read(&bp
->intr_sem
) != 0))
1899 netif_rx_schedule(dev
);
1905 bnx2_interrupt(int irq
, void *dev_instance
, struct pt_regs
*regs
)
1907 struct net_device
*dev
= dev_instance
;
1908 struct bnx2
*bp
= netdev_priv(dev
);
1910 /* When using INTx, it is possible for the interrupt to arrive
1911 * at the CPU before the status block posted prior to the
1912 * interrupt. Reading a register will flush the status block.
1913 * When using MSI, the MSI message will always complete after
1914 * the status block write.
1916 if ((bp
->status_blk
->status_idx
== bp
->last_status_idx
) &&
1917 (REG_RD(bp
, BNX2_PCICFG_MISC_STATUS
) &
1918 BNX2_PCICFG_MISC_STATUS_INTA_VALUE
))
1921 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
1922 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM
|
1923 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
1925 /* Return here if interrupt is shared and is disabled. */
1926 if (unlikely(atomic_read(&bp
->intr_sem
) != 0))
1929 netif_rx_schedule(dev
);
1935 bnx2_has_work(struct bnx2
*bp
)
1937 struct status_block
*sblk
= bp
->status_blk
;
1939 if ((sblk
->status_rx_quick_consumer_index0
!= bp
->hw_rx_cons
) ||
1940 (sblk
->status_tx_quick_consumer_index0
!= bp
->hw_tx_cons
))
1943 if (((sblk
->status_attn_bits
& STATUS_ATTN_BITS_LINK_STATE
) != 0) !=
1951 bnx2_poll(struct net_device
*dev
, int *budget
)
1953 struct bnx2
*bp
= netdev_priv(dev
);
1955 if ((bp
->status_blk
->status_attn_bits
&
1956 STATUS_ATTN_BITS_LINK_STATE
) !=
1957 (bp
->status_blk
->status_attn_bits_ack
&
1958 STATUS_ATTN_BITS_LINK_STATE
)) {
1960 spin_lock(&bp
->phy_lock
);
1962 spin_unlock(&bp
->phy_lock
);
1964 /* This is needed to take care of transient status
1965 * during link changes.
1967 REG_WR(bp
, BNX2_HC_COMMAND
,
1968 bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
1969 REG_RD(bp
, BNX2_HC_COMMAND
);
1972 if (bp
->status_blk
->status_tx_quick_consumer_index0
!= bp
->hw_tx_cons
)
1975 if (bp
->status_blk
->status_rx_quick_consumer_index0
!= bp
->hw_rx_cons
) {
1976 int orig_budget
= *budget
;
1979 if (orig_budget
> dev
->quota
)
1980 orig_budget
= dev
->quota
;
1982 work_done
= bnx2_rx_int(bp
, orig_budget
);
1983 *budget
-= work_done
;
1984 dev
->quota
-= work_done
;
1987 bp
->last_status_idx
= bp
->status_blk
->status_idx
;
1990 if (!bnx2_has_work(bp
)) {
1991 netif_rx_complete(dev
);
1992 if (likely(bp
->flags
& USING_MSI_FLAG
)) {
1993 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
1994 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
1995 bp
->last_status_idx
);
1998 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
1999 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
2000 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
|
2001 bp
->last_status_idx
);
2003 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
2004 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
2005 bp
->last_status_idx
);
2012 /* Called with rtnl_lock from vlan functions and also dev->xmit_lock
2013 * from set_multicast.
2016 bnx2_set_rx_mode(struct net_device
*dev
)
2018 struct bnx2
*bp
= netdev_priv(dev
);
2019 u32 rx_mode
, sort_mode
;
2022 spin_lock_bh(&bp
->phy_lock
);
2024 rx_mode
= bp
->rx_mode
& ~(BNX2_EMAC_RX_MODE_PROMISCUOUS
|
2025 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG
);
2026 sort_mode
= 1 | BNX2_RPM_SORT_USER0_BC_EN
;
2028 if (!bp
->vlgrp
&& !(bp
->flags
& ASF_ENABLE_FLAG
))
2029 rx_mode
|= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG
;
2031 if (!(bp
->flags
& ASF_ENABLE_FLAG
))
2032 rx_mode
|= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG
;
2034 if (dev
->flags
& IFF_PROMISC
) {
2035 /* Promiscuous mode. */
2036 rx_mode
|= BNX2_EMAC_RX_MODE_PROMISCUOUS
;
2037 sort_mode
|= BNX2_RPM_SORT_USER0_PROM_EN
;
2039 else if (dev
->flags
& IFF_ALLMULTI
) {
2040 for (i
= 0; i
< NUM_MC_HASH_REGISTERS
; i
++) {
2041 REG_WR(bp
, BNX2_EMAC_MULTICAST_HASH0
+ (i
* 4),
2044 sort_mode
|= BNX2_RPM_SORT_USER0_MC_EN
;
2047 /* Accept one or more multicast(s). */
2048 struct dev_mc_list
*mclist
;
2049 u32 mc_filter
[NUM_MC_HASH_REGISTERS
];
2054 memset(mc_filter
, 0, 4 * NUM_MC_HASH_REGISTERS
);
2056 for (i
= 0, mclist
= dev
->mc_list
; mclist
&& i
< dev
->mc_count
;
2057 i
++, mclist
= mclist
->next
) {
2059 crc
= ether_crc_le(ETH_ALEN
, mclist
->dmi_addr
);
2061 regidx
= (bit
& 0xe0) >> 5;
2063 mc_filter
[regidx
] |= (1 << bit
);
2066 for (i
= 0; i
< NUM_MC_HASH_REGISTERS
; i
++) {
2067 REG_WR(bp
, BNX2_EMAC_MULTICAST_HASH0
+ (i
* 4),
2071 sort_mode
|= BNX2_RPM_SORT_USER0_MC_HSH_EN
;
2074 if (rx_mode
!= bp
->rx_mode
) {
2075 bp
->rx_mode
= rx_mode
;
2076 REG_WR(bp
, BNX2_EMAC_RX_MODE
, rx_mode
);
2079 REG_WR(bp
, BNX2_RPM_SORT_USER0
, 0x0);
2080 REG_WR(bp
, BNX2_RPM_SORT_USER0
, sort_mode
);
2081 REG_WR(bp
, BNX2_RPM_SORT_USER0
, sort_mode
| BNX2_RPM_SORT_USER0_ENA
);
2083 spin_unlock_bh(&bp
->phy_lock
);
2087 load_rv2p_fw(struct bnx2
*bp
, u32
*rv2p_code
, u32 rv2p_code_len
,
2094 for (i
= 0; i
< rv2p_code_len
; i
+= 8) {
2095 REG_WR(bp
, BNX2_RV2P_INSTR_HIGH
, *rv2p_code
);
2097 REG_WR(bp
, BNX2_RV2P_INSTR_LOW
, *rv2p_code
);
2100 if (rv2p_proc
== RV2P_PROC1
) {
2101 val
= (i
/ 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR
;
2102 REG_WR(bp
, BNX2_RV2P_PROC1_ADDR_CMD
, val
);
2105 val
= (i
/ 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR
;
2106 REG_WR(bp
, BNX2_RV2P_PROC2_ADDR_CMD
, val
);
2110 /* Reset the processor, un-stall is done later. */
2111 if (rv2p_proc
== RV2P_PROC1
) {
2112 REG_WR(bp
, BNX2_RV2P_COMMAND
, BNX2_RV2P_COMMAND_PROC1_RESET
);
2115 REG_WR(bp
, BNX2_RV2P_COMMAND
, BNX2_RV2P_COMMAND_PROC2_RESET
);
2120 load_cpu_fw(struct bnx2
*bp
, struct cpu_reg
*cpu_reg
, struct fw_info
*fw
)
2126 val
= REG_RD_IND(bp
, cpu_reg
->mode
);
2127 val
|= cpu_reg
->mode_value_halt
;
2128 REG_WR_IND(bp
, cpu_reg
->mode
, val
);
2129 REG_WR_IND(bp
, cpu_reg
->state
, cpu_reg
->state_value_clear
);
2131 /* Load the Text area. */
2132 offset
= cpu_reg
->spad_base
+ (fw
->text_addr
- cpu_reg
->mips_view_base
);
2136 for (j
= 0; j
< (fw
->text_len
/ 4); j
++, offset
+= 4) {
2137 REG_WR_IND(bp
, offset
, fw
->text
[j
]);
2141 /* Load the Data area. */
2142 offset
= cpu_reg
->spad_base
+ (fw
->data_addr
- cpu_reg
->mips_view_base
);
2146 for (j
= 0; j
< (fw
->data_len
/ 4); j
++, offset
+= 4) {
2147 REG_WR_IND(bp
, offset
, fw
->data
[j
]);
2151 /* Load the SBSS area. */
2152 offset
= cpu_reg
->spad_base
+ (fw
->sbss_addr
- cpu_reg
->mips_view_base
);
2156 for (j
= 0; j
< (fw
->sbss_len
/ 4); j
++, offset
+= 4) {
2157 REG_WR_IND(bp
, offset
, fw
->sbss
[j
]);
2161 /* Load the BSS area. */
2162 offset
= cpu_reg
->spad_base
+ (fw
->bss_addr
- cpu_reg
->mips_view_base
);
2166 for (j
= 0; j
< (fw
->bss_len
/4); j
++, offset
+= 4) {
2167 REG_WR_IND(bp
, offset
, fw
->bss
[j
]);
2171 /* Load the Read-Only area. */
2172 offset
= cpu_reg
->spad_base
+
2173 (fw
->rodata_addr
- cpu_reg
->mips_view_base
);
2177 for (j
= 0; j
< (fw
->rodata_len
/ 4); j
++, offset
+= 4) {
2178 REG_WR_IND(bp
, offset
, fw
->rodata
[j
]);
2182 /* Clear the pre-fetch instruction. */
2183 REG_WR_IND(bp
, cpu_reg
->inst
, 0);
2184 REG_WR_IND(bp
, cpu_reg
->pc
, fw
->start_addr
);
2186 /* Start the CPU. */
2187 val
= REG_RD_IND(bp
, cpu_reg
->mode
);
2188 val
&= ~cpu_reg
->mode_value_halt
;
2189 REG_WR_IND(bp
, cpu_reg
->state
, cpu_reg
->state_value_clear
);
2190 REG_WR_IND(bp
, cpu_reg
->mode
, val
);
2194 bnx2_init_cpus(struct bnx2
*bp
)
2196 struct cpu_reg cpu_reg
;
2199 /* Initialize the RV2P processor. */
2200 load_rv2p_fw(bp
, bnx2_rv2p_proc1
, sizeof(bnx2_rv2p_proc1
), RV2P_PROC1
);
2201 load_rv2p_fw(bp
, bnx2_rv2p_proc2
, sizeof(bnx2_rv2p_proc2
), RV2P_PROC2
);
2203 /* Initialize the RX Processor. */
2204 cpu_reg
.mode
= BNX2_RXP_CPU_MODE
;
2205 cpu_reg
.mode_value_halt
= BNX2_RXP_CPU_MODE_SOFT_HALT
;
2206 cpu_reg
.mode_value_sstep
= BNX2_RXP_CPU_MODE_STEP_ENA
;
2207 cpu_reg
.state
= BNX2_RXP_CPU_STATE
;
2208 cpu_reg
.state_value_clear
= 0xffffff;
2209 cpu_reg
.gpr0
= BNX2_RXP_CPU_REG_FILE
;
2210 cpu_reg
.evmask
= BNX2_RXP_CPU_EVENT_MASK
;
2211 cpu_reg
.pc
= BNX2_RXP_CPU_PROGRAM_COUNTER
;
2212 cpu_reg
.inst
= BNX2_RXP_CPU_INSTRUCTION
;
2213 cpu_reg
.bp
= BNX2_RXP_CPU_HW_BREAKPOINT
;
2214 cpu_reg
.spad_base
= BNX2_RXP_SCRATCH
;
2215 cpu_reg
.mips_view_base
= 0x8000000;
2217 fw
.ver_major
= bnx2_RXP_b06FwReleaseMajor
;
2218 fw
.ver_minor
= bnx2_RXP_b06FwReleaseMinor
;
2219 fw
.ver_fix
= bnx2_RXP_b06FwReleaseFix
;
2220 fw
.start_addr
= bnx2_RXP_b06FwStartAddr
;
2222 fw
.text_addr
= bnx2_RXP_b06FwTextAddr
;
2223 fw
.text_len
= bnx2_RXP_b06FwTextLen
;
2225 fw
.text
= bnx2_RXP_b06FwText
;
2227 fw
.data_addr
= bnx2_RXP_b06FwDataAddr
;
2228 fw
.data_len
= bnx2_RXP_b06FwDataLen
;
2230 fw
.data
= bnx2_RXP_b06FwData
;
2232 fw
.sbss_addr
= bnx2_RXP_b06FwSbssAddr
;
2233 fw
.sbss_len
= bnx2_RXP_b06FwSbssLen
;
2235 fw
.sbss
= bnx2_RXP_b06FwSbss
;
2237 fw
.bss_addr
= bnx2_RXP_b06FwBssAddr
;
2238 fw
.bss_len
= bnx2_RXP_b06FwBssLen
;
2240 fw
.bss
= bnx2_RXP_b06FwBss
;
2242 fw
.rodata_addr
= bnx2_RXP_b06FwRodataAddr
;
2243 fw
.rodata_len
= bnx2_RXP_b06FwRodataLen
;
2244 fw
.rodata_index
= 0;
2245 fw
.rodata
= bnx2_RXP_b06FwRodata
;
2247 load_cpu_fw(bp
, &cpu_reg
, &fw
);
2249 /* Initialize the TX Processor. */
2250 cpu_reg
.mode
= BNX2_TXP_CPU_MODE
;
2251 cpu_reg
.mode_value_halt
= BNX2_TXP_CPU_MODE_SOFT_HALT
;
2252 cpu_reg
.mode_value_sstep
= BNX2_TXP_CPU_MODE_STEP_ENA
;
2253 cpu_reg
.state
= BNX2_TXP_CPU_STATE
;
2254 cpu_reg
.state_value_clear
= 0xffffff;
2255 cpu_reg
.gpr0
= BNX2_TXP_CPU_REG_FILE
;
2256 cpu_reg
.evmask
= BNX2_TXP_CPU_EVENT_MASK
;
2257 cpu_reg
.pc
= BNX2_TXP_CPU_PROGRAM_COUNTER
;
2258 cpu_reg
.inst
= BNX2_TXP_CPU_INSTRUCTION
;
2259 cpu_reg
.bp
= BNX2_TXP_CPU_HW_BREAKPOINT
;
2260 cpu_reg
.spad_base
= BNX2_TXP_SCRATCH
;
2261 cpu_reg
.mips_view_base
= 0x8000000;
2263 fw
.ver_major
= bnx2_TXP_b06FwReleaseMajor
;
2264 fw
.ver_minor
= bnx2_TXP_b06FwReleaseMinor
;
2265 fw
.ver_fix
= bnx2_TXP_b06FwReleaseFix
;
2266 fw
.start_addr
= bnx2_TXP_b06FwStartAddr
;
2268 fw
.text_addr
= bnx2_TXP_b06FwTextAddr
;
2269 fw
.text_len
= bnx2_TXP_b06FwTextLen
;
2271 fw
.text
= bnx2_TXP_b06FwText
;
2273 fw
.data_addr
= bnx2_TXP_b06FwDataAddr
;
2274 fw
.data_len
= bnx2_TXP_b06FwDataLen
;
2276 fw
.data
= bnx2_TXP_b06FwData
;
2278 fw
.sbss_addr
= bnx2_TXP_b06FwSbssAddr
;
2279 fw
.sbss_len
= bnx2_TXP_b06FwSbssLen
;
2281 fw
.sbss
= bnx2_TXP_b06FwSbss
;
2283 fw
.bss_addr
= bnx2_TXP_b06FwBssAddr
;
2284 fw
.bss_len
= bnx2_TXP_b06FwBssLen
;
2286 fw
.bss
= bnx2_TXP_b06FwBss
;
2288 fw
.rodata_addr
= bnx2_TXP_b06FwRodataAddr
;
2289 fw
.rodata_len
= bnx2_TXP_b06FwRodataLen
;
2290 fw
.rodata_index
= 0;
2291 fw
.rodata
= bnx2_TXP_b06FwRodata
;
2293 load_cpu_fw(bp
, &cpu_reg
, &fw
);
2295 /* Initialize the TX Patch-up Processor. */
2296 cpu_reg
.mode
= BNX2_TPAT_CPU_MODE
;
2297 cpu_reg
.mode_value_halt
= BNX2_TPAT_CPU_MODE_SOFT_HALT
;
2298 cpu_reg
.mode_value_sstep
= BNX2_TPAT_CPU_MODE_STEP_ENA
;
2299 cpu_reg
.state
= BNX2_TPAT_CPU_STATE
;
2300 cpu_reg
.state_value_clear
= 0xffffff;
2301 cpu_reg
.gpr0
= BNX2_TPAT_CPU_REG_FILE
;
2302 cpu_reg
.evmask
= BNX2_TPAT_CPU_EVENT_MASK
;
2303 cpu_reg
.pc
= BNX2_TPAT_CPU_PROGRAM_COUNTER
;
2304 cpu_reg
.inst
= BNX2_TPAT_CPU_INSTRUCTION
;
2305 cpu_reg
.bp
= BNX2_TPAT_CPU_HW_BREAKPOINT
;
2306 cpu_reg
.spad_base
= BNX2_TPAT_SCRATCH
;
2307 cpu_reg
.mips_view_base
= 0x8000000;
2309 fw
.ver_major
= bnx2_TPAT_b06FwReleaseMajor
;
2310 fw
.ver_minor
= bnx2_TPAT_b06FwReleaseMinor
;
2311 fw
.ver_fix
= bnx2_TPAT_b06FwReleaseFix
;
2312 fw
.start_addr
= bnx2_TPAT_b06FwStartAddr
;
2314 fw
.text_addr
= bnx2_TPAT_b06FwTextAddr
;
2315 fw
.text_len
= bnx2_TPAT_b06FwTextLen
;
2317 fw
.text
= bnx2_TPAT_b06FwText
;
2319 fw
.data_addr
= bnx2_TPAT_b06FwDataAddr
;
2320 fw
.data_len
= bnx2_TPAT_b06FwDataLen
;
2322 fw
.data
= bnx2_TPAT_b06FwData
;
2324 fw
.sbss_addr
= bnx2_TPAT_b06FwSbssAddr
;
2325 fw
.sbss_len
= bnx2_TPAT_b06FwSbssLen
;
2327 fw
.sbss
= bnx2_TPAT_b06FwSbss
;
2329 fw
.bss_addr
= bnx2_TPAT_b06FwBssAddr
;
2330 fw
.bss_len
= bnx2_TPAT_b06FwBssLen
;
2332 fw
.bss
= bnx2_TPAT_b06FwBss
;
2334 fw
.rodata_addr
= bnx2_TPAT_b06FwRodataAddr
;
2335 fw
.rodata_len
= bnx2_TPAT_b06FwRodataLen
;
2336 fw
.rodata_index
= 0;
2337 fw
.rodata
= bnx2_TPAT_b06FwRodata
;
2339 load_cpu_fw(bp
, &cpu_reg
, &fw
);
2341 /* Initialize the Completion Processor. */
2342 cpu_reg
.mode
= BNX2_COM_CPU_MODE
;
2343 cpu_reg
.mode_value_halt
= BNX2_COM_CPU_MODE_SOFT_HALT
;
2344 cpu_reg
.mode_value_sstep
= BNX2_COM_CPU_MODE_STEP_ENA
;
2345 cpu_reg
.state
= BNX2_COM_CPU_STATE
;
2346 cpu_reg
.state_value_clear
= 0xffffff;
2347 cpu_reg
.gpr0
= BNX2_COM_CPU_REG_FILE
;
2348 cpu_reg
.evmask
= BNX2_COM_CPU_EVENT_MASK
;
2349 cpu_reg
.pc
= BNX2_COM_CPU_PROGRAM_COUNTER
;
2350 cpu_reg
.inst
= BNX2_COM_CPU_INSTRUCTION
;
2351 cpu_reg
.bp
= BNX2_COM_CPU_HW_BREAKPOINT
;
2352 cpu_reg
.spad_base
= BNX2_COM_SCRATCH
;
2353 cpu_reg
.mips_view_base
= 0x8000000;
2355 fw
.ver_major
= bnx2_COM_b06FwReleaseMajor
;
2356 fw
.ver_minor
= bnx2_COM_b06FwReleaseMinor
;
2357 fw
.ver_fix
= bnx2_COM_b06FwReleaseFix
;
2358 fw
.start_addr
= bnx2_COM_b06FwStartAddr
;
2360 fw
.text_addr
= bnx2_COM_b06FwTextAddr
;
2361 fw
.text_len
= bnx2_COM_b06FwTextLen
;
2363 fw
.text
= bnx2_COM_b06FwText
;
2365 fw
.data_addr
= bnx2_COM_b06FwDataAddr
;
2366 fw
.data_len
= bnx2_COM_b06FwDataLen
;
2368 fw
.data
= bnx2_COM_b06FwData
;
2370 fw
.sbss_addr
= bnx2_COM_b06FwSbssAddr
;
2371 fw
.sbss_len
= bnx2_COM_b06FwSbssLen
;
2373 fw
.sbss
= bnx2_COM_b06FwSbss
;
2375 fw
.bss_addr
= bnx2_COM_b06FwBssAddr
;
2376 fw
.bss_len
= bnx2_COM_b06FwBssLen
;
2378 fw
.bss
= bnx2_COM_b06FwBss
;
2380 fw
.rodata_addr
= bnx2_COM_b06FwRodataAddr
;
2381 fw
.rodata_len
= bnx2_COM_b06FwRodataLen
;
2382 fw
.rodata_index
= 0;
2383 fw
.rodata
= bnx2_COM_b06FwRodata
;
2385 load_cpu_fw(bp
, &cpu_reg
, &fw
);
2390 bnx2_set_power_state(struct bnx2
*bp
, pci_power_t state
)
2394 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
2400 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
2401 (pmcsr
& ~PCI_PM_CTRL_STATE_MASK
) |
2402 PCI_PM_CTRL_PME_STATUS
);
2404 if (pmcsr
& PCI_PM_CTRL_STATE_MASK
)
2405 /* delay required during transition out of D3hot */
2408 val
= REG_RD(bp
, BNX2_EMAC_MODE
);
2409 val
|= BNX2_EMAC_MODE_MPKT_RCVD
| BNX2_EMAC_MODE_ACPI_RCVD
;
2410 val
&= ~BNX2_EMAC_MODE_MPKT
;
2411 REG_WR(bp
, BNX2_EMAC_MODE
, val
);
2413 val
= REG_RD(bp
, BNX2_RPM_CONFIG
);
2414 val
&= ~BNX2_RPM_CONFIG_ACPI_ENA
;
2415 REG_WR(bp
, BNX2_RPM_CONFIG
, val
);
2426 autoneg
= bp
->autoneg
;
2427 advertising
= bp
->advertising
;
2429 bp
->autoneg
= AUTONEG_SPEED
;
2430 bp
->advertising
= ADVERTISED_10baseT_Half
|
2431 ADVERTISED_10baseT_Full
|
2432 ADVERTISED_100baseT_Half
|
2433 ADVERTISED_100baseT_Full
|
2436 bnx2_setup_copper_phy(bp
);
2438 bp
->autoneg
= autoneg
;
2439 bp
->advertising
= advertising
;
2441 bnx2_set_mac_addr(bp
);
2443 val
= REG_RD(bp
, BNX2_EMAC_MODE
);
2445 /* Enable port mode. */
2446 val
&= ~BNX2_EMAC_MODE_PORT
;
2447 val
|= BNX2_EMAC_MODE_PORT_MII
|
2448 BNX2_EMAC_MODE_MPKT_RCVD
|
2449 BNX2_EMAC_MODE_ACPI_RCVD
|
2450 BNX2_EMAC_MODE_MPKT
;
2452 REG_WR(bp
, BNX2_EMAC_MODE
, val
);
2454 /* receive all multicast */
2455 for (i
= 0; i
< NUM_MC_HASH_REGISTERS
; i
++) {
2456 REG_WR(bp
, BNX2_EMAC_MULTICAST_HASH0
+ (i
* 4),
2459 REG_WR(bp
, BNX2_EMAC_RX_MODE
,
2460 BNX2_EMAC_RX_MODE_SORT_MODE
);
2462 val
= 1 | BNX2_RPM_SORT_USER0_BC_EN
|
2463 BNX2_RPM_SORT_USER0_MC_EN
;
2464 REG_WR(bp
, BNX2_RPM_SORT_USER0
, 0x0);
2465 REG_WR(bp
, BNX2_RPM_SORT_USER0
, val
);
2466 REG_WR(bp
, BNX2_RPM_SORT_USER0
, val
|
2467 BNX2_RPM_SORT_USER0_ENA
);
2469 /* Need to enable EMAC and RPM for WOL. */
2470 REG_WR(bp
, BNX2_MISC_ENABLE_SET_BITS
,
2471 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE
|
2472 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE
|
2473 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE
);
2475 val
= REG_RD(bp
, BNX2_RPM_CONFIG
);
2476 val
&= ~BNX2_RPM_CONFIG_ACPI_ENA
;
2477 REG_WR(bp
, BNX2_RPM_CONFIG
, val
);
2479 wol_msg
= BNX2_DRV_MSG_CODE_SUSPEND_WOL
;
2482 wol_msg
= BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL
;
2485 if (!(bp
->flags
& NO_WOL_FLAG
))
2486 bnx2_fw_sync(bp
, BNX2_DRV_MSG_DATA_WAIT3
| wol_msg
, 0);
2488 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
2489 if ((CHIP_ID(bp
) == CHIP_ID_5706_A0
) ||
2490 (CHIP_ID(bp
) == CHIP_ID_5706_A1
)) {
2499 pmcsr
|= PCI_PM_CTRL_PME_ENABLE
;
2501 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
2504 /* No more memory access after this point until
2505 * device is brought back to D0.
2517 bnx2_acquire_nvram_lock(struct bnx2
*bp
)
2522 /* Request access to the flash interface. */
2523 REG_WR(bp
, BNX2_NVM_SW_ARB
, BNX2_NVM_SW_ARB_ARB_REQ_SET2
);
2524 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
2525 val
= REG_RD(bp
, BNX2_NVM_SW_ARB
);
2526 if (val
& BNX2_NVM_SW_ARB_ARB_ARB2
)
2532 if (j
>= NVRAM_TIMEOUT_COUNT
)
2539 bnx2_release_nvram_lock(struct bnx2
*bp
)
2544 /* Relinquish nvram interface. */
2545 REG_WR(bp
, BNX2_NVM_SW_ARB
, BNX2_NVM_SW_ARB_ARB_REQ_CLR2
);
2547 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
2548 val
= REG_RD(bp
, BNX2_NVM_SW_ARB
);
2549 if (!(val
& BNX2_NVM_SW_ARB_ARB_ARB2
))
2555 if (j
>= NVRAM_TIMEOUT_COUNT
)
2563 bnx2_enable_nvram_write(struct bnx2
*bp
)
2567 val
= REG_RD(bp
, BNX2_MISC_CFG
);
2568 REG_WR(bp
, BNX2_MISC_CFG
, val
| BNX2_MISC_CFG_NVM_WR_EN_PCI
);
2570 if (!bp
->flash_info
->buffered
) {
2573 REG_WR(bp
, BNX2_NVM_COMMAND
, BNX2_NVM_COMMAND_DONE
);
2574 REG_WR(bp
, BNX2_NVM_COMMAND
,
2575 BNX2_NVM_COMMAND_WREN
| BNX2_NVM_COMMAND_DOIT
);
2577 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
2580 val
= REG_RD(bp
, BNX2_NVM_COMMAND
);
2581 if (val
& BNX2_NVM_COMMAND_DONE
)
2585 if (j
>= NVRAM_TIMEOUT_COUNT
)
2592 bnx2_disable_nvram_write(struct bnx2
*bp
)
2596 val
= REG_RD(bp
, BNX2_MISC_CFG
);
2597 REG_WR(bp
, BNX2_MISC_CFG
, val
& ~BNX2_MISC_CFG_NVM_WR_EN
);
2602 bnx2_enable_nvram_access(struct bnx2
*bp
)
2606 val
= REG_RD(bp
, BNX2_NVM_ACCESS_ENABLE
);
2607 /* Enable both bits, even on read. */
2608 REG_WR(bp
, BNX2_NVM_ACCESS_ENABLE
,
2609 val
| BNX2_NVM_ACCESS_ENABLE_EN
| BNX2_NVM_ACCESS_ENABLE_WR_EN
);
2613 bnx2_disable_nvram_access(struct bnx2
*bp
)
2617 val
= REG_RD(bp
, BNX2_NVM_ACCESS_ENABLE
);
2618 /* Disable both bits, even after read. */
2619 REG_WR(bp
, BNX2_NVM_ACCESS_ENABLE
,
2620 val
& ~(BNX2_NVM_ACCESS_ENABLE_EN
|
2621 BNX2_NVM_ACCESS_ENABLE_WR_EN
));
2625 bnx2_nvram_erase_page(struct bnx2
*bp
, u32 offset
)
2630 if (bp
->flash_info
->buffered
)
2631 /* Buffered flash, no erase needed */
2634 /* Build an erase command */
2635 cmd
= BNX2_NVM_COMMAND_ERASE
| BNX2_NVM_COMMAND_WR
|
2636 BNX2_NVM_COMMAND_DOIT
;
2638 /* Need to clear DONE bit separately. */
2639 REG_WR(bp
, BNX2_NVM_COMMAND
, BNX2_NVM_COMMAND_DONE
);
2641 /* Address of the NVRAM to read from. */
2642 REG_WR(bp
, BNX2_NVM_ADDR
, offset
& BNX2_NVM_ADDR_NVM_ADDR_VALUE
);
2644 /* Issue an erase command. */
2645 REG_WR(bp
, BNX2_NVM_COMMAND
, cmd
);
2647 /* Wait for completion. */
2648 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
2653 val
= REG_RD(bp
, BNX2_NVM_COMMAND
);
2654 if (val
& BNX2_NVM_COMMAND_DONE
)
2658 if (j
>= NVRAM_TIMEOUT_COUNT
)
2665 bnx2_nvram_read_dword(struct bnx2
*bp
, u32 offset
, u8
*ret_val
, u32 cmd_flags
)
2670 /* Build the command word. */
2671 cmd
= BNX2_NVM_COMMAND_DOIT
| cmd_flags
;
2673 /* Calculate an offset of a buffered flash. */
2674 if (bp
->flash_info
->buffered
) {
2675 offset
= ((offset
/ bp
->flash_info
->page_size
) <<
2676 bp
->flash_info
->page_bits
) +
2677 (offset
% bp
->flash_info
->page_size
);
2680 /* Need to clear DONE bit separately. */
2681 REG_WR(bp
, BNX2_NVM_COMMAND
, BNX2_NVM_COMMAND_DONE
);
2683 /* Address of the NVRAM to read from. */
2684 REG_WR(bp
, BNX2_NVM_ADDR
, offset
& BNX2_NVM_ADDR_NVM_ADDR_VALUE
);
2686 /* Issue a read command. */
2687 REG_WR(bp
, BNX2_NVM_COMMAND
, cmd
);
2689 /* Wait for completion. */
2690 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
2695 val
= REG_RD(bp
, BNX2_NVM_COMMAND
);
2696 if (val
& BNX2_NVM_COMMAND_DONE
) {
2697 val
= REG_RD(bp
, BNX2_NVM_READ
);
2699 val
= be32_to_cpu(val
);
2700 memcpy(ret_val
, &val
, 4);
2704 if (j
>= NVRAM_TIMEOUT_COUNT
)
2712 bnx2_nvram_write_dword(struct bnx2
*bp
, u32 offset
, u8
*val
, u32 cmd_flags
)
2717 /* Build the command word. */
2718 cmd
= BNX2_NVM_COMMAND_DOIT
| BNX2_NVM_COMMAND_WR
| cmd_flags
;
2720 /* Calculate an offset of a buffered flash. */
2721 if (bp
->flash_info
->buffered
) {
2722 offset
= ((offset
/ bp
->flash_info
->page_size
) <<
2723 bp
->flash_info
->page_bits
) +
2724 (offset
% bp
->flash_info
->page_size
);
2727 /* Need to clear DONE bit separately. */
2728 REG_WR(bp
, BNX2_NVM_COMMAND
, BNX2_NVM_COMMAND_DONE
);
2730 memcpy(&val32
, val
, 4);
2731 val32
= cpu_to_be32(val32
);
2733 /* Write the data. */
2734 REG_WR(bp
, BNX2_NVM_WRITE
, val32
);
2736 /* Address of the NVRAM to write to. */
2737 REG_WR(bp
, BNX2_NVM_ADDR
, offset
& BNX2_NVM_ADDR_NVM_ADDR_VALUE
);
2739 /* Issue the write command. */
2740 REG_WR(bp
, BNX2_NVM_COMMAND
, cmd
);
2742 /* Wait for completion. */
2743 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
2746 if (REG_RD(bp
, BNX2_NVM_COMMAND
) & BNX2_NVM_COMMAND_DONE
)
2749 if (j
>= NVRAM_TIMEOUT_COUNT
)
2756 bnx2_init_nvram(struct bnx2
*bp
)
2759 int j
, entry_count
, rc
;
2760 struct flash_spec
*flash
;
2762 /* Determine the selected interface. */
2763 val
= REG_RD(bp
, BNX2_NVM_CFG1
);
2765 entry_count
= sizeof(flash_table
) / sizeof(struct flash_spec
);
2768 if (val
& 0x40000000) {
2770 /* Flash interface has been reconfigured */
2771 for (j
= 0, flash
= &flash_table
[0]; j
< entry_count
;
2773 if ((val
& FLASH_BACKUP_STRAP_MASK
) ==
2774 (flash
->config1
& FLASH_BACKUP_STRAP_MASK
)) {
2775 bp
->flash_info
= flash
;
2782 /* Not yet been reconfigured */
2784 if (val
& (1 << 23))
2785 mask
= FLASH_BACKUP_STRAP_MASK
;
2787 mask
= FLASH_STRAP_MASK
;
2789 for (j
= 0, flash
= &flash_table
[0]; j
< entry_count
;
2792 if ((val
& mask
) == (flash
->strapping
& mask
)) {
2793 bp
->flash_info
= flash
;
2795 /* Request access to the flash interface. */
2796 if ((rc
= bnx2_acquire_nvram_lock(bp
)) != 0)
2799 /* Enable access to flash interface */
2800 bnx2_enable_nvram_access(bp
);
2802 /* Reconfigure the flash interface */
2803 REG_WR(bp
, BNX2_NVM_CFG1
, flash
->config1
);
2804 REG_WR(bp
, BNX2_NVM_CFG2
, flash
->config2
);
2805 REG_WR(bp
, BNX2_NVM_CFG3
, flash
->config3
);
2806 REG_WR(bp
, BNX2_NVM_WRITE1
, flash
->write1
);
2808 /* Disable access to flash interface */
2809 bnx2_disable_nvram_access(bp
);
2810 bnx2_release_nvram_lock(bp
);
2815 } /* if (val & 0x40000000) */
2817 if (j
== entry_count
) {
2818 bp
->flash_info
= NULL
;
2819 printk(KERN_ALERT PFX
"Unknown flash/EEPROM type.\n");
2823 val
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_SHARED_HW_CFG_CONFIG2
);
2824 val
&= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK
;
2826 bp
->flash_size
= val
;
2828 bp
->flash_size
= bp
->flash_info
->total_size
;
2834 bnx2_nvram_read(struct bnx2
*bp
, u32 offset
, u8
*ret_buf
,
2838 u32 cmd_flags
, offset32
, len32
, extra
;
2843 /* Request access to the flash interface. */
2844 if ((rc
= bnx2_acquire_nvram_lock(bp
)) != 0)
2847 /* Enable access to flash interface */
2848 bnx2_enable_nvram_access(bp
);
2861 pre_len
= 4 - (offset
& 3);
2863 if (pre_len
>= len32
) {
2865 cmd_flags
= BNX2_NVM_COMMAND_FIRST
|
2866 BNX2_NVM_COMMAND_LAST
;
2869 cmd_flags
= BNX2_NVM_COMMAND_FIRST
;
2872 rc
= bnx2_nvram_read_dword(bp
, offset32
, buf
, cmd_flags
);
2877 memcpy(ret_buf
, buf
+ (offset
& 3), pre_len
);
2884 extra
= 4 - (len32
& 3);
2885 len32
= (len32
+ 4) & ~3;
2892 cmd_flags
= BNX2_NVM_COMMAND_LAST
;
2894 cmd_flags
= BNX2_NVM_COMMAND_FIRST
|
2895 BNX2_NVM_COMMAND_LAST
;
2897 rc
= bnx2_nvram_read_dword(bp
, offset32
, buf
, cmd_flags
);
2899 memcpy(ret_buf
, buf
, 4 - extra
);
2901 else if (len32
> 0) {
2904 /* Read the first word. */
2908 cmd_flags
= BNX2_NVM_COMMAND_FIRST
;
2910 rc
= bnx2_nvram_read_dword(bp
, offset32
, ret_buf
, cmd_flags
);
2912 /* Advance to the next dword. */
2917 while (len32
> 4 && rc
== 0) {
2918 rc
= bnx2_nvram_read_dword(bp
, offset32
, ret_buf
, 0);
2920 /* Advance to the next dword. */
2929 cmd_flags
= BNX2_NVM_COMMAND_LAST
;
2930 rc
= bnx2_nvram_read_dword(bp
, offset32
, buf
, cmd_flags
);
2932 memcpy(ret_buf
, buf
, 4 - extra
);
2935 /* Disable access to flash interface */
2936 bnx2_disable_nvram_access(bp
);
2938 bnx2_release_nvram_lock(bp
);
2944 bnx2_nvram_write(struct bnx2
*bp
, u32 offset
, u8
*data_buf
,
2947 u32 written
, offset32
, len32
;
2948 u8
*buf
, start
[4], end
[4], *flash_buffer
= NULL
;
2950 int align_start
, align_end
;
2955 align_start
= align_end
= 0;
2957 if ((align_start
= (offset32
& 3))) {
2959 len32
+= align_start
;
2960 if ((rc
= bnx2_nvram_read(bp
, offset32
, start
, 4)))
2965 if ((len32
> 4) || !align_start
) {
2966 align_end
= 4 - (len32
& 3);
2968 if ((rc
= bnx2_nvram_read(bp
, offset32
+ len32
- 4,
2975 if (align_start
|| align_end
) {
2976 buf
= kmalloc(len32
, GFP_KERNEL
);
2980 memcpy(buf
, start
, 4);
2983 memcpy(buf
+ len32
- 4, end
, 4);
2985 memcpy(buf
+ align_start
, data_buf
, buf_size
);
2988 if (bp
->flash_info
->buffered
== 0) {
2989 flash_buffer
= kmalloc(264, GFP_KERNEL
);
2990 if (flash_buffer
== NULL
) {
2992 goto nvram_write_end
;
2997 while ((written
< len32
) && (rc
== 0)) {
2998 u32 page_start
, page_end
, data_start
, data_end
;
2999 u32 addr
, cmd_flags
;
3002 /* Find the page_start addr */
3003 page_start
= offset32
+ written
;
3004 page_start
-= (page_start
% bp
->flash_info
->page_size
);
3005 /* Find the page_end addr */
3006 page_end
= page_start
+ bp
->flash_info
->page_size
;
3007 /* Find the data_start addr */
3008 data_start
= (written
== 0) ? offset32
: page_start
;
3009 /* Find the data_end addr */
3010 data_end
= (page_end
> offset32
+ len32
) ?
3011 (offset32
+ len32
) : page_end
;
3013 /* Request access to the flash interface. */
3014 if ((rc
= bnx2_acquire_nvram_lock(bp
)) != 0)
3015 goto nvram_write_end
;
3017 /* Enable access to flash interface */
3018 bnx2_enable_nvram_access(bp
);
3020 cmd_flags
= BNX2_NVM_COMMAND_FIRST
;
3021 if (bp
->flash_info
->buffered
== 0) {
3024 /* Read the whole page into the buffer
3025 * (non-buffer flash only) */
3026 for (j
= 0; j
< bp
->flash_info
->page_size
; j
+= 4) {
3027 if (j
== (bp
->flash_info
->page_size
- 4)) {
3028 cmd_flags
|= BNX2_NVM_COMMAND_LAST
;
3030 rc
= bnx2_nvram_read_dword(bp
,
3036 goto nvram_write_end
;
3042 /* Enable writes to flash interface (unlock write-protect) */
3043 if ((rc
= bnx2_enable_nvram_write(bp
)) != 0)
3044 goto nvram_write_end
;
3046 /* Erase the page */
3047 if ((rc
= bnx2_nvram_erase_page(bp
, page_start
)) != 0)
3048 goto nvram_write_end
;
3050 /* Re-enable the write again for the actual write */
3051 bnx2_enable_nvram_write(bp
);
3053 /* Loop to write back the buffer data from page_start to
3056 if (bp
->flash_info
->buffered
== 0) {
3057 for (addr
= page_start
; addr
< data_start
;
3058 addr
+= 4, i
+= 4) {
3060 rc
= bnx2_nvram_write_dword(bp
, addr
,
3061 &flash_buffer
[i
], cmd_flags
);
3064 goto nvram_write_end
;
3070 /* Loop to write the new data from data_start to data_end */
3071 for (addr
= data_start
; addr
< data_end
; addr
+= 4, i
+= 4) {
3072 if ((addr
== page_end
- 4) ||
3073 ((bp
->flash_info
->buffered
) &&
3074 (addr
== data_end
- 4))) {
3076 cmd_flags
|= BNX2_NVM_COMMAND_LAST
;
3078 rc
= bnx2_nvram_write_dword(bp
, addr
, buf
,
3082 goto nvram_write_end
;
3088 /* Loop to write back the buffer data from data_end
3090 if (bp
->flash_info
->buffered
== 0) {
3091 for (addr
= data_end
; addr
< page_end
;
3092 addr
+= 4, i
+= 4) {
3094 if (addr
== page_end
-4) {
3095 cmd_flags
= BNX2_NVM_COMMAND_LAST
;
3097 rc
= bnx2_nvram_write_dword(bp
, addr
,
3098 &flash_buffer
[i
], cmd_flags
);
3101 goto nvram_write_end
;
3107 /* Disable writes to flash interface (lock write-protect) */
3108 bnx2_disable_nvram_write(bp
);
3110 /* Disable access to flash interface */
3111 bnx2_disable_nvram_access(bp
);
3112 bnx2_release_nvram_lock(bp
);
3114 /* Increment written */
3115 written
+= data_end
- data_start
;
3119 if (bp
->flash_info
->buffered
== 0)
3120 kfree(flash_buffer
);
3122 if (align_start
|| align_end
)
3128 bnx2_reset_chip(struct bnx2
*bp
, u32 reset_code
)
3133 /* Wait for the current PCI transaction to complete before
3134 * issuing a reset. */
3135 REG_WR(bp
, BNX2_MISC_ENABLE_CLR_BITS
,
3136 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE
|
3137 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE
|
3138 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE
|
3139 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE
);
3140 val
= REG_RD(bp
, BNX2_MISC_ENABLE_CLR_BITS
);
3143 /* Wait for the firmware to tell us it is ok to issue a reset. */
3144 bnx2_fw_sync(bp
, BNX2_DRV_MSG_DATA_WAIT0
| reset_code
, 1);
3146 /* Deposit a driver reset signature so the firmware knows that
3147 * this is a soft reset. */
3148 REG_WR_IND(bp
, bp
->shmem_base
+ BNX2_DRV_RESET_SIGNATURE
,
3149 BNX2_DRV_RESET_SIGNATURE_MAGIC
);
3151 /* Do a dummy read to force the chip to complete all current transaction
3152 * before we issue a reset. */
3153 val
= REG_RD(bp
, BNX2_MISC_ID
);
3155 val
= BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ
|
3156 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA
|
3157 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP
;
3160 REG_WR(bp
, BNX2_PCICFG_MISC_CONFIG
, val
);
3162 if ((CHIP_ID(bp
) == CHIP_ID_5706_A0
) ||
3163 (CHIP_ID(bp
) == CHIP_ID_5706_A1
))
3166 /* Reset takes approximate 30 usec */
3167 for (i
= 0; i
< 10; i
++) {
3168 val
= REG_RD(bp
, BNX2_PCICFG_MISC_CONFIG
);
3169 if ((val
& (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ
|
3170 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY
)) == 0) {
3176 if (val
& (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ
|
3177 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY
)) {
3178 printk(KERN_ERR PFX
"Chip reset did not complete\n");
3182 /* Make sure byte swapping is properly configured. */
3183 val
= REG_RD(bp
, BNX2_PCI_SWAP_DIAG0
);
3184 if (val
!= 0x01020304) {
3185 printk(KERN_ERR PFX
"Chip not in correct endian mode\n");
3189 /* Wait for the firmware to finish its initialization. */
3190 rc
= bnx2_fw_sync(bp
, BNX2_DRV_MSG_DATA_WAIT1
| reset_code
, 0);
3194 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
3195 /* Adjust the voltage regular to two steps lower. The default
3196 * of this register is 0x0000000e. */
3197 REG_WR(bp
, BNX2_MISC_VREG_CONTROL
, 0x000000fa);
3199 /* Remove bad rbuf memory from the free pool. */
3200 rc
= bnx2_alloc_bad_rbuf(bp
);
3207 bnx2_init_chip(struct bnx2
*bp
)
3212 /* Make sure the interrupt is not active. */
3213 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
, BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
3215 val
= BNX2_DMA_CONFIG_DATA_BYTE_SWAP
|
3216 BNX2_DMA_CONFIG_DATA_WORD_SWAP
|
3218 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP
|
3220 BNX2_DMA_CONFIG_CNTL_WORD_SWAP
|
3221 DMA_READ_CHANS
<< 12 |
3222 DMA_WRITE_CHANS
<< 16;
3224 val
|= (0x2 << 20) | (1 << 11);
3226 if ((bp
->flags
& PCIX_FLAG
) && (bp
->bus_speed_mhz
== 133))
3229 if ((CHIP_NUM(bp
) == CHIP_NUM_5706
) &&
3230 (CHIP_ID(bp
) != CHIP_ID_5706_A0
) && !(bp
->flags
& PCIX_FLAG
))
3231 val
|= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA
;
3233 REG_WR(bp
, BNX2_DMA_CONFIG
, val
);
3235 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
3236 val
= REG_RD(bp
, BNX2_TDMA_CONFIG
);
3237 val
|= BNX2_TDMA_CONFIG_ONE_DMA
;
3238 REG_WR(bp
, BNX2_TDMA_CONFIG
, val
);
3241 if (bp
->flags
& PCIX_FLAG
) {
3244 pci_read_config_word(bp
->pdev
, bp
->pcix_cap
+ PCI_X_CMD
,
3246 pci_write_config_word(bp
->pdev
, bp
->pcix_cap
+ PCI_X_CMD
,
3247 val16
& ~PCI_X_CMD_ERO
);
3250 REG_WR(bp
, BNX2_MISC_ENABLE_SET_BITS
,
3251 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE
|
3252 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE
|
3253 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE
);
3255 /* Initialize context mapping and zero out the quick contexts. The
3256 * context block must have already been enabled. */
3257 bnx2_init_context(bp
);
3260 bnx2_init_nvram(bp
);
3262 bnx2_set_mac_addr(bp
);
3264 val
= REG_RD(bp
, BNX2_MQ_CONFIG
);
3265 val
&= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE
;
3266 val
|= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256
;
3267 REG_WR(bp
, BNX2_MQ_CONFIG
, val
);
3269 val
= 0x10000 + (MAX_CID_CNT
* MB_KERNEL_CTX_SIZE
);
3270 REG_WR(bp
, BNX2_MQ_KNL_BYP_WIND_START
, val
);
3271 REG_WR(bp
, BNX2_MQ_KNL_WIND_END
, val
);
3273 val
= (BCM_PAGE_BITS
- 8) << 24;
3274 REG_WR(bp
, BNX2_RV2P_CONFIG
, val
);
3276 /* Configure page size. */
3277 val
= REG_RD(bp
, BNX2_TBDR_CONFIG
);
3278 val
&= ~BNX2_TBDR_CONFIG_PAGE_SIZE
;
3279 val
|= (BCM_PAGE_BITS
- 8) << 24 | 0x40;
3280 REG_WR(bp
, BNX2_TBDR_CONFIG
, val
);
3282 val
= bp
->mac_addr
[0] +
3283 (bp
->mac_addr
[1] << 8) +
3284 (bp
->mac_addr
[2] << 16) +
3286 (bp
->mac_addr
[4] << 8) +
3287 (bp
->mac_addr
[5] << 16);
3288 REG_WR(bp
, BNX2_EMAC_BACKOFF_SEED
, val
);
3290 /* Program the MTU. Also include 4 bytes for CRC32. */
3291 val
= bp
->dev
->mtu
+ ETH_HLEN
+ 4;
3292 if (val
> (MAX_ETHERNET_PACKET_SIZE
+ 4))
3293 val
|= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA
;
3294 REG_WR(bp
, BNX2_EMAC_RX_MTU_SIZE
, val
);
3296 bp
->last_status_idx
= 0;
3297 bp
->rx_mode
= BNX2_EMAC_RX_MODE_SORT_MODE
;
3299 /* Set up how to generate a link change interrupt. */
3300 REG_WR(bp
, BNX2_EMAC_ATTENTION_ENA
, BNX2_EMAC_ATTENTION_ENA_LINK
);
3302 REG_WR(bp
, BNX2_HC_STATUS_ADDR_L
,
3303 (u64
) bp
->status_blk_mapping
& 0xffffffff);
3304 REG_WR(bp
, BNX2_HC_STATUS_ADDR_H
, (u64
) bp
->status_blk_mapping
>> 32);
3306 REG_WR(bp
, BNX2_HC_STATISTICS_ADDR_L
,
3307 (u64
) bp
->stats_blk_mapping
& 0xffffffff);
3308 REG_WR(bp
, BNX2_HC_STATISTICS_ADDR_H
,
3309 (u64
) bp
->stats_blk_mapping
>> 32);
3311 REG_WR(bp
, BNX2_HC_TX_QUICK_CONS_TRIP
,
3312 (bp
->tx_quick_cons_trip_int
<< 16) | bp
->tx_quick_cons_trip
);
3314 REG_WR(bp
, BNX2_HC_RX_QUICK_CONS_TRIP
,
3315 (bp
->rx_quick_cons_trip_int
<< 16) | bp
->rx_quick_cons_trip
);
3317 REG_WR(bp
, BNX2_HC_COMP_PROD_TRIP
,
3318 (bp
->comp_prod_trip_int
<< 16) | bp
->comp_prod_trip
);
3320 REG_WR(bp
, BNX2_HC_TX_TICKS
, (bp
->tx_ticks_int
<< 16) | bp
->tx_ticks
);
3322 REG_WR(bp
, BNX2_HC_RX_TICKS
, (bp
->rx_ticks_int
<< 16) | bp
->rx_ticks
);
3324 REG_WR(bp
, BNX2_HC_COM_TICKS
,
3325 (bp
->com_ticks_int
<< 16) | bp
->com_ticks
);
3327 REG_WR(bp
, BNX2_HC_CMD_TICKS
,
3328 (bp
->cmd_ticks_int
<< 16) | bp
->cmd_ticks
);
3330 REG_WR(bp
, BNX2_HC_STATS_TICKS
, bp
->stats_ticks
& 0xffff00);
3331 REG_WR(bp
, BNX2_HC_STAT_COLLECT_TICKS
, 0xbb8); /* 3ms */
3333 if (CHIP_ID(bp
) == CHIP_ID_5706_A1
)
3334 REG_WR(bp
, BNX2_HC_CONFIG
, BNX2_HC_CONFIG_COLLECT_STATS
);
3336 REG_WR(bp
, BNX2_HC_CONFIG
, BNX2_HC_CONFIG_RX_TMR_MODE
|
3337 BNX2_HC_CONFIG_TX_TMR_MODE
|
3338 BNX2_HC_CONFIG_COLLECT_STATS
);
3341 /* Clear internal stats counters. */
3342 REG_WR(bp
, BNX2_HC_COMMAND
, BNX2_HC_COMMAND_CLR_STAT_NOW
);
3344 REG_WR(bp
, BNX2_HC_ATTN_BITS_ENABLE
, STATUS_ATTN_BITS_LINK_STATE
);
3346 if (REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_PORT_FEATURE
) &
3347 BNX2_PORT_FEATURE_ASF_ENABLED
)
3348 bp
->flags
|= ASF_ENABLE_FLAG
;
3350 /* Initialize the receive filter. */
3351 bnx2_set_rx_mode(bp
->dev
);
3353 rc
= bnx2_fw_sync(bp
, BNX2_DRV_MSG_DATA_WAIT2
| BNX2_DRV_MSG_CODE_RESET
,
3356 REG_WR(bp
, BNX2_MISC_ENABLE_SET_BITS
, 0x5ffffff);
3357 REG_RD(bp
, BNX2_MISC_ENABLE_SET_BITS
);
3361 bp
->hc_cmd
= REG_RD(bp
, BNX2_HC_COMMAND
);
3368 bnx2_init_tx_ring(struct bnx2
*bp
)
3373 txbd
= &bp
->tx_desc_ring
[MAX_TX_DESC_CNT
];
3375 txbd
->tx_bd_haddr_hi
= (u64
) bp
->tx_desc_mapping
>> 32;
3376 txbd
->tx_bd_haddr_lo
= (u64
) bp
->tx_desc_mapping
& 0xffffffff;
3381 bp
->tx_prod_bseq
= 0;
3383 val
= BNX2_L2CTX_TYPE_TYPE_L2
;
3384 val
|= BNX2_L2CTX_TYPE_SIZE_L2
;
3385 CTX_WR(bp
, GET_CID_ADDR(TX_CID
), BNX2_L2CTX_TYPE
, val
);
3387 val
= BNX2_L2CTX_CMD_TYPE_TYPE_L2
;
3389 CTX_WR(bp
, GET_CID_ADDR(TX_CID
), BNX2_L2CTX_CMD_TYPE
, val
);
3391 val
= (u64
) bp
->tx_desc_mapping
>> 32;
3392 CTX_WR(bp
, GET_CID_ADDR(TX_CID
), BNX2_L2CTX_TBDR_BHADDR_HI
, val
);
3394 val
= (u64
) bp
->tx_desc_mapping
& 0xffffffff;
3395 CTX_WR(bp
, GET_CID_ADDR(TX_CID
), BNX2_L2CTX_TBDR_BHADDR_LO
, val
);
3399 bnx2_init_rx_ring(struct bnx2
*bp
)
3403 u16 prod
, ring_prod
;
3406 /* 8 for CRC and VLAN */
3407 bp
->rx_buf_use_size
= bp
->dev
->mtu
+ ETH_HLEN
+ bp
->rx_offset
+ 8;
3408 /* 8 for alignment */
3409 bp
->rx_buf_size
= bp
->rx_buf_use_size
+ 8;
3411 ring_prod
= prod
= bp
->rx_prod
= 0;
3414 bp
->rx_prod_bseq
= 0;
3416 for (i
= 0; i
< bp
->rx_max_ring
; i
++) {
3419 rxbd
= &bp
->rx_desc_ring
[i
][0];
3420 for (j
= 0; j
< MAX_RX_DESC_CNT
; j
++, rxbd
++) {
3421 rxbd
->rx_bd_len
= bp
->rx_buf_use_size
;
3422 rxbd
->rx_bd_flags
= RX_BD_FLAGS_START
| RX_BD_FLAGS_END
;
3424 if (i
== (bp
->rx_max_ring
- 1))
3428 rxbd
->rx_bd_haddr_hi
= (u64
) bp
->rx_desc_mapping
[j
] >> 32;
3429 rxbd
->rx_bd_haddr_lo
= (u64
) bp
->rx_desc_mapping
[j
] &
3433 val
= BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE
;
3434 val
|= BNX2_L2CTX_CTX_TYPE_SIZE_L2
;
3436 CTX_WR(bp
, GET_CID_ADDR(RX_CID
), BNX2_L2CTX_CTX_TYPE
, val
);
3438 val
= (u64
) bp
->rx_desc_mapping
[0] >> 32;
3439 CTX_WR(bp
, GET_CID_ADDR(RX_CID
), BNX2_L2CTX_NX_BDHADDR_HI
, val
);
3441 val
= (u64
) bp
->rx_desc_mapping
[0] & 0xffffffff;
3442 CTX_WR(bp
, GET_CID_ADDR(RX_CID
), BNX2_L2CTX_NX_BDHADDR_LO
, val
);
3444 for (i
= 0; i
< bp
->rx_ring_size
; i
++) {
3445 if (bnx2_alloc_rx_skb(bp
, ring_prod
) < 0) {
3448 prod
= NEXT_RX_BD(prod
);
3449 ring_prod
= RX_RING_IDX(prod
);
3453 REG_WR16(bp
, MB_RX_CID_ADDR
+ BNX2_L2CTX_HOST_BDIDX
, prod
);
3455 REG_WR(bp
, MB_RX_CID_ADDR
+ BNX2_L2CTX_HOST_BSEQ
, bp
->rx_prod_bseq
);
3459 bnx2_set_rx_ring_size(struct bnx2
*bp
, u32 size
)
3463 bp
->rx_ring_size
= size
;
3465 while (size
> MAX_RX_DESC_CNT
) {
3466 size
-= MAX_RX_DESC_CNT
;
3469 /* round to next power of 2 */
3471 while ((max
& num_rings
) == 0)
3474 if (num_rings
!= max
)
3477 bp
->rx_max_ring
= max
;
3478 bp
->rx_max_ring_idx
= (bp
->rx_max_ring
* RX_DESC_CNT
) - 1;
3482 bnx2_free_tx_skbs(struct bnx2
*bp
)
3486 if (bp
->tx_buf_ring
== NULL
)
3489 for (i
= 0; i
< TX_DESC_CNT
; ) {
3490 struct sw_bd
*tx_buf
= &bp
->tx_buf_ring
[i
];
3491 struct sk_buff
*skb
= tx_buf
->skb
;
3499 pci_unmap_single(bp
->pdev
, pci_unmap_addr(tx_buf
, mapping
),
3500 skb_headlen(skb
), PCI_DMA_TODEVICE
);
3504 last
= skb_shinfo(skb
)->nr_frags
;
3505 for (j
= 0; j
< last
; j
++) {
3506 tx_buf
= &bp
->tx_buf_ring
[i
+ j
+ 1];
3507 pci_unmap_page(bp
->pdev
,
3508 pci_unmap_addr(tx_buf
, mapping
),
3509 skb_shinfo(skb
)->frags
[j
].size
,
3512 dev_kfree_skb_any(skb
);
3519 bnx2_free_rx_skbs(struct bnx2
*bp
)
3523 if (bp
->rx_buf_ring
== NULL
)
3526 for (i
= 0; i
< bp
->rx_max_ring_idx
; i
++) {
3527 struct sw_bd
*rx_buf
= &bp
->rx_buf_ring
[i
];
3528 struct sk_buff
*skb
= rx_buf
->skb
;
3533 pci_unmap_single(bp
->pdev
, pci_unmap_addr(rx_buf
, mapping
),
3534 bp
->rx_buf_use_size
, PCI_DMA_FROMDEVICE
);
3538 dev_kfree_skb_any(skb
);
3543 bnx2_free_skbs(struct bnx2
*bp
)
3545 bnx2_free_tx_skbs(bp
);
3546 bnx2_free_rx_skbs(bp
);
3550 bnx2_reset_nic(struct bnx2
*bp
, u32 reset_code
)
3554 rc
= bnx2_reset_chip(bp
, reset_code
);
3560 bnx2_init_tx_ring(bp
);
3561 bnx2_init_rx_ring(bp
);
3566 bnx2_init_nic(struct bnx2
*bp
)
3570 if ((rc
= bnx2_reset_nic(bp
, BNX2_DRV_MSG_CODE_RESET
)) != 0)
3579 bnx2_test_registers(struct bnx2
*bp
)
3583 static const struct {
3589 { 0x006c, 0, 0x00000000, 0x0000003f },
3590 { 0x0090, 0, 0xffffffff, 0x00000000 },
3591 { 0x0094, 0, 0x00000000, 0x00000000 },
3593 { 0x0404, 0, 0x00003f00, 0x00000000 },
3594 { 0x0418, 0, 0x00000000, 0xffffffff },
3595 { 0x041c, 0, 0x00000000, 0xffffffff },
3596 { 0x0420, 0, 0x00000000, 0x80ffffff },
3597 { 0x0424, 0, 0x00000000, 0x00000000 },
3598 { 0x0428, 0, 0x00000000, 0x00000001 },
3599 { 0x0450, 0, 0x00000000, 0x0000ffff },
3600 { 0x0454, 0, 0x00000000, 0xffffffff },
3601 { 0x0458, 0, 0x00000000, 0xffffffff },
3603 { 0x0808, 0, 0x00000000, 0xffffffff },
3604 { 0x0854, 0, 0x00000000, 0xffffffff },
3605 { 0x0868, 0, 0x00000000, 0x77777777 },
3606 { 0x086c, 0, 0x00000000, 0x77777777 },
3607 { 0x0870, 0, 0x00000000, 0x77777777 },
3608 { 0x0874, 0, 0x00000000, 0x77777777 },
3610 { 0x0c00, 0, 0x00000000, 0x00000001 },
3611 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3612 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3614 { 0x1000, 0, 0x00000000, 0x00000001 },
3615 { 0x1004, 0, 0x00000000, 0x000f0001 },
3617 { 0x1408, 0, 0x01c00800, 0x00000000 },
3618 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3619 { 0x14a8, 0, 0x00000000, 0x000001ff },
3620 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3621 { 0x14b0, 0, 0x00000002, 0x00000001 },
3622 { 0x14b8, 0, 0x00000000, 0x00000000 },
3623 { 0x14c0, 0, 0x00000000, 0x00000009 },
3624 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3625 { 0x14cc, 0, 0x00000000, 0x00000001 },
3626 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3628 { 0x1800, 0, 0x00000000, 0x00000001 },
3629 { 0x1804, 0, 0x00000000, 0x00000003 },
3631 { 0x2800, 0, 0x00000000, 0x00000001 },
3632 { 0x2804, 0, 0x00000000, 0x00003f01 },
3633 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3634 { 0x2810, 0, 0xffff0000, 0x00000000 },
3635 { 0x2814, 0, 0xffff0000, 0x00000000 },
3636 { 0x2818, 0, 0xffff0000, 0x00000000 },
3637 { 0x281c, 0, 0xffff0000, 0x00000000 },
3638 { 0x2834, 0, 0xffffffff, 0x00000000 },
3639 { 0x2840, 0, 0x00000000, 0xffffffff },
3640 { 0x2844, 0, 0x00000000, 0xffffffff },
3641 { 0x2848, 0, 0xffffffff, 0x00000000 },
3642 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3644 { 0x2c00, 0, 0x00000000, 0x00000011 },
3645 { 0x2c04, 0, 0x00000000, 0x00030007 },
3647 { 0x3c00, 0, 0x00000000, 0x00000001 },
3648 { 0x3c04, 0, 0x00000000, 0x00070000 },
3649 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3650 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3651 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3652 { 0x3c14, 0, 0x00000000, 0xffffffff },
3653 { 0x3c18, 0, 0x00000000, 0xffffffff },
3654 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3655 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3657 { 0x5004, 0, 0x00000000, 0x0000007f },
3658 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3659 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3661 { 0x5c00, 0, 0x00000000, 0x00000001 },
3662 { 0x5c04, 0, 0x00000000, 0x0003000f },
3663 { 0x5c08, 0, 0x00000003, 0x00000000 },
3664 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3665 { 0x5c10, 0, 0x00000000, 0xffffffff },
3666 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3667 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3668 { 0x5c88, 0, 0x00000000, 0x00077373 },
3669 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3671 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3672 { 0x680c, 0, 0xffffffff, 0x00000000 },
3673 { 0x6810, 0, 0xffffffff, 0x00000000 },
3674 { 0x6814, 0, 0xffffffff, 0x00000000 },
3675 { 0x6818, 0, 0xffffffff, 0x00000000 },
3676 { 0x681c, 0, 0xffffffff, 0x00000000 },
3677 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3678 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3679 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3680 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3681 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3682 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3683 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3684 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3685 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3686 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3687 { 0x684c, 0, 0xffffffff, 0x00000000 },
3688 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3689 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3690 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3691 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3692 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3693 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3695 { 0xffff, 0, 0x00000000, 0x00000000 },
3699 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
3700 u32 offset
, rw_mask
, ro_mask
, save_val
, val
;
3702 offset
= (u32
) reg_tbl
[i
].offset
;
3703 rw_mask
= reg_tbl
[i
].rw_mask
;
3704 ro_mask
= reg_tbl
[i
].ro_mask
;
3706 save_val
= readl(bp
->regview
+ offset
);
3708 writel(0, bp
->regview
+ offset
);
3710 val
= readl(bp
->regview
+ offset
);
3711 if ((val
& rw_mask
) != 0) {
3715 if ((val
& ro_mask
) != (save_val
& ro_mask
)) {
3719 writel(0xffffffff, bp
->regview
+ offset
);
3721 val
= readl(bp
->regview
+ offset
);
3722 if ((val
& rw_mask
) != rw_mask
) {
3726 if ((val
& ro_mask
) != (save_val
& ro_mask
)) {
3730 writel(save_val
, bp
->regview
+ offset
);
3734 writel(save_val
, bp
->regview
+ offset
);
3742 bnx2_do_mem_test(struct bnx2
*bp
, u32 start
, u32 size
)
3744 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0x55555555,
3745 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3748 for (i
= 0; i
< sizeof(test_pattern
) / 4; i
++) {
3751 for (offset
= 0; offset
< size
; offset
+= 4) {
3753 REG_WR_IND(bp
, start
+ offset
, test_pattern
[i
]);
3755 if (REG_RD_IND(bp
, start
+ offset
) !=
3765 bnx2_test_memory(struct bnx2
*bp
)
3769 static const struct {
3773 { 0x60000, 0x4000 },
3774 { 0xa0000, 0x3000 },
3775 { 0xe0000, 0x4000 },
3776 { 0x120000, 0x4000 },
3777 { 0x1a0000, 0x4000 },
3778 { 0x160000, 0x4000 },
3782 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
3783 if ((ret
= bnx2_do_mem_test(bp
, mem_tbl
[i
].offset
,
3784 mem_tbl
[i
].len
)) != 0) {
3792 #define BNX2_MAC_LOOPBACK 0
3793 #define BNX2_PHY_LOOPBACK 1
3796 bnx2_run_loopback(struct bnx2
*bp
, int loopback_mode
)
3798 unsigned int pkt_size
, num_pkts
, i
;
3799 struct sk_buff
*skb
, *rx_skb
;
3800 unsigned char *packet
;
3801 u16 rx_start_idx
, rx_idx
;
3804 struct sw_bd
*rx_buf
;
3805 struct l2_fhdr
*rx_hdr
;
3808 if (loopback_mode
== BNX2_MAC_LOOPBACK
) {
3809 bp
->loopback
= MAC_LOOPBACK
;
3810 bnx2_set_mac_loopback(bp
);
3812 else if (loopback_mode
== BNX2_PHY_LOOPBACK
) {
3814 bnx2_set_phy_loopback(bp
);
3820 skb
= dev_alloc_skb(pkt_size
);
3823 packet
= skb_put(skb
, pkt_size
);
3824 memcpy(packet
, bp
->mac_addr
, 6);
3825 memset(packet
+ 6, 0x0, 8);
3826 for (i
= 14; i
< pkt_size
; i
++)
3827 packet
[i
] = (unsigned char) (i
& 0xff);
3829 map
= pci_map_single(bp
->pdev
, skb
->data
, pkt_size
,
3832 REG_WR(bp
, BNX2_HC_COMMAND
,
3833 bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
3835 REG_RD(bp
, BNX2_HC_COMMAND
);
3838 rx_start_idx
= bp
->status_blk
->status_rx_quick_consumer_index0
;
3842 txbd
= &bp
->tx_desc_ring
[TX_RING_IDX(bp
->tx_prod
)];
3844 txbd
->tx_bd_haddr_hi
= (u64
) map
>> 32;
3845 txbd
->tx_bd_haddr_lo
= (u64
) map
& 0xffffffff;
3846 txbd
->tx_bd_mss_nbytes
= pkt_size
;
3847 txbd
->tx_bd_vlan_tag_flags
= TX_BD_FLAGS_START
| TX_BD_FLAGS_END
;
3850 bp
->tx_prod
= NEXT_TX_BD(bp
->tx_prod
);
3851 bp
->tx_prod_bseq
+= pkt_size
;
3853 REG_WR16(bp
, MB_TX_CID_ADDR
+ BNX2_L2CTX_TX_HOST_BIDX
, bp
->tx_prod
);
3854 REG_WR(bp
, MB_TX_CID_ADDR
+ BNX2_L2CTX_TX_HOST_BSEQ
, bp
->tx_prod_bseq
);
3858 REG_WR(bp
, BNX2_HC_COMMAND
,
3859 bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
3861 REG_RD(bp
, BNX2_HC_COMMAND
);
3865 pci_unmap_single(bp
->pdev
, map
, pkt_size
, PCI_DMA_TODEVICE
);
3866 dev_kfree_skb_irq(skb
);
3868 if (bp
->status_blk
->status_tx_quick_consumer_index0
!= bp
->tx_prod
) {
3869 goto loopback_test_done
;
3872 rx_idx
= bp
->status_blk
->status_rx_quick_consumer_index0
;
3873 if (rx_idx
!= rx_start_idx
+ num_pkts
) {
3874 goto loopback_test_done
;
3877 rx_buf
= &bp
->rx_buf_ring
[rx_start_idx
];
3878 rx_skb
= rx_buf
->skb
;
3880 rx_hdr
= (struct l2_fhdr
*) rx_skb
->data
;
3881 skb_reserve(rx_skb
, bp
->rx_offset
);
3883 pci_dma_sync_single_for_cpu(bp
->pdev
,
3884 pci_unmap_addr(rx_buf
, mapping
),
3885 bp
->rx_buf_size
, PCI_DMA_FROMDEVICE
);
3887 if (rx_hdr
->l2_fhdr_status
&
3888 (L2_FHDR_ERRORS_BAD_CRC
|
3889 L2_FHDR_ERRORS_PHY_DECODE
|
3890 L2_FHDR_ERRORS_ALIGNMENT
|
3891 L2_FHDR_ERRORS_TOO_SHORT
|
3892 L2_FHDR_ERRORS_GIANT_FRAME
)) {
3894 goto loopback_test_done
;
3897 if ((rx_hdr
->l2_fhdr_pkt_len
- 4) != pkt_size
) {
3898 goto loopback_test_done
;
3901 for (i
= 14; i
< pkt_size
; i
++) {
3902 if (*(rx_skb
->data
+ i
) != (unsigned char) (i
& 0xff)) {
3903 goto loopback_test_done
;
3914 #define BNX2_MAC_LOOPBACK_FAILED 1
3915 #define BNX2_PHY_LOOPBACK_FAILED 2
3916 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
3917 BNX2_PHY_LOOPBACK_FAILED)
3920 bnx2_test_loopback(struct bnx2
*bp
)
3924 if (!netif_running(bp
->dev
))
3925 return BNX2_LOOPBACK_FAILED
;
3927 bnx2_reset_nic(bp
, BNX2_DRV_MSG_CODE_RESET
);
3928 spin_lock_bh(&bp
->phy_lock
);
3930 spin_unlock_bh(&bp
->phy_lock
);
3931 if (bnx2_run_loopback(bp
, BNX2_MAC_LOOPBACK
))
3932 rc
|= BNX2_MAC_LOOPBACK_FAILED
;
3933 if (bnx2_run_loopback(bp
, BNX2_PHY_LOOPBACK
))
3934 rc
|= BNX2_PHY_LOOPBACK_FAILED
;
3938 #define NVRAM_SIZE 0x200
3939 #define CRC32_RESIDUAL 0xdebb20e3
3942 bnx2_test_nvram(struct bnx2
*bp
)
3944 u32 buf
[NVRAM_SIZE
/ 4];
3945 u8
*data
= (u8
*) buf
;
3949 if ((rc
= bnx2_nvram_read(bp
, 0, data
, 4)) != 0)
3950 goto test_nvram_done
;
3952 magic
= be32_to_cpu(buf
[0]);
3953 if (magic
!= 0x669955aa) {
3955 goto test_nvram_done
;
3958 if ((rc
= bnx2_nvram_read(bp
, 0x100, data
, NVRAM_SIZE
)) != 0)
3959 goto test_nvram_done
;
3961 csum
= ether_crc_le(0x100, data
);
3962 if (csum
!= CRC32_RESIDUAL
) {
3964 goto test_nvram_done
;
3967 csum
= ether_crc_le(0x100, data
+ 0x100);
3968 if (csum
!= CRC32_RESIDUAL
) {
3977 bnx2_test_link(struct bnx2
*bp
)
3981 spin_lock_bh(&bp
->phy_lock
);
3982 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
3983 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
3984 spin_unlock_bh(&bp
->phy_lock
);
3986 if (bmsr
& BMSR_LSTATUS
) {
3993 bnx2_test_intr(struct bnx2
*bp
)
3998 if (!netif_running(bp
->dev
))
4001 status_idx
= REG_RD(bp
, BNX2_PCICFG_INT_ACK_CMD
) & 0xffff;
4003 /* This register is not touched during run-time. */
4004 REG_WR(bp
, BNX2_HC_COMMAND
, bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW
);
4005 REG_RD(bp
, BNX2_HC_COMMAND
);
4007 for (i
= 0; i
< 10; i
++) {
4008 if ((REG_RD(bp
, BNX2_PCICFG_INT_ACK_CMD
) & 0xffff) !=
4014 msleep_interruptible(10);
4023 bnx2_timer(unsigned long data
)
4025 struct bnx2
*bp
= (struct bnx2
*) data
;
4028 if (!netif_running(bp
->dev
))
4031 if (atomic_read(&bp
->intr_sem
) != 0)
4032 goto bnx2_restart_timer
;
4034 msg
= (u32
) ++bp
->fw_drv_pulse_wr_seq
;
4035 REG_WR_IND(bp
, bp
->shmem_base
+ BNX2_DRV_PULSE_MB
, msg
);
4037 if ((bp
->phy_flags
& PHY_SERDES_FLAG
) &&
4038 (CHIP_NUM(bp
) == CHIP_NUM_5706
)) {
4040 spin_lock(&bp
->phy_lock
);
4041 if (bp
->serdes_an_pending
) {
4042 bp
->serdes_an_pending
--;
4044 else if ((bp
->link_up
== 0) && (bp
->autoneg
& AUTONEG_SPEED
)) {
4047 bp
->current_interval
= bp
->timer_interval
;
4049 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
4051 if (bmcr
& BMCR_ANENABLE
) {
4054 bnx2_write_phy(bp
, 0x1c, 0x7c00);
4055 bnx2_read_phy(bp
, 0x1c, &phy1
);
4057 bnx2_write_phy(bp
, 0x17, 0x0f01);
4058 bnx2_read_phy(bp
, 0x15, &phy2
);
4059 bnx2_write_phy(bp
, 0x17, 0x0f01);
4060 bnx2_read_phy(bp
, 0x15, &phy2
);
4062 if ((phy1
& 0x10) && /* SIGNAL DETECT */
4063 !(phy2
& 0x20)) { /* no CONFIG */
4065 bmcr
&= ~BMCR_ANENABLE
;
4066 bmcr
|= BMCR_SPEED1000
|
4068 bnx2_write_phy(bp
, MII_BMCR
, bmcr
);
4070 PHY_PARALLEL_DETECT_FLAG
;
4074 else if ((bp
->link_up
) && (bp
->autoneg
& AUTONEG_SPEED
) &&
4075 (bp
->phy_flags
& PHY_PARALLEL_DETECT_FLAG
)) {
4078 bnx2_write_phy(bp
, 0x17, 0x0f01);
4079 bnx2_read_phy(bp
, 0x15, &phy2
);
4083 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
4084 bmcr
|= BMCR_ANENABLE
;
4085 bnx2_write_phy(bp
, MII_BMCR
, bmcr
);
4087 bp
->phy_flags
&= ~PHY_PARALLEL_DETECT_FLAG
;
4092 bp
->current_interval
= bp
->timer_interval
;
4094 spin_unlock(&bp
->phy_lock
);
4098 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
4101 /* Called with rtnl_lock */
4103 bnx2_open(struct net_device
*dev
)
4105 struct bnx2
*bp
= netdev_priv(dev
);
4108 bnx2_set_power_state(bp
, PCI_D0
);
4109 bnx2_disable_int(bp
);
4111 rc
= bnx2_alloc_mem(bp
);
4115 if ((CHIP_ID(bp
) != CHIP_ID_5706_A0
) &&
4116 (CHIP_ID(bp
) != CHIP_ID_5706_A1
) &&
4119 if (pci_enable_msi(bp
->pdev
) == 0) {
4120 bp
->flags
|= USING_MSI_FLAG
;
4121 rc
= request_irq(bp
->pdev
->irq
, bnx2_msi
, 0, dev
->name
,
4125 rc
= request_irq(bp
->pdev
->irq
, bnx2_interrupt
,
4126 SA_SHIRQ
, dev
->name
, dev
);
4130 rc
= request_irq(bp
->pdev
->irq
, bnx2_interrupt
, SA_SHIRQ
,
4138 rc
= bnx2_init_nic(bp
);
4141 free_irq(bp
->pdev
->irq
, dev
);
4142 if (bp
->flags
& USING_MSI_FLAG
) {
4143 pci_disable_msi(bp
->pdev
);
4144 bp
->flags
&= ~USING_MSI_FLAG
;
4151 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
4153 atomic_set(&bp
->intr_sem
, 0);
4155 bnx2_enable_int(bp
);
4157 if (bp
->flags
& USING_MSI_FLAG
) {
4158 /* Test MSI to make sure it is working
4159 * If MSI test fails, go back to INTx mode
4161 if (bnx2_test_intr(bp
) != 0) {
4162 printk(KERN_WARNING PFX
"%s: No interrupt was generated"
4163 " using MSI, switching to INTx mode. Please"
4164 " report this failure to the PCI maintainer"
4165 " and include system chipset information.\n",
4168 bnx2_disable_int(bp
);
4169 free_irq(bp
->pdev
->irq
, dev
);
4170 pci_disable_msi(bp
->pdev
);
4171 bp
->flags
&= ~USING_MSI_FLAG
;
4173 rc
= bnx2_init_nic(bp
);
4176 rc
= request_irq(bp
->pdev
->irq
, bnx2_interrupt
,
4177 SA_SHIRQ
, dev
->name
, dev
);
4182 del_timer_sync(&bp
->timer
);
4185 bnx2_enable_int(bp
);
4188 if (bp
->flags
& USING_MSI_FLAG
) {
4189 printk(KERN_INFO PFX
"%s: using MSI\n", dev
->name
);
4192 netif_start_queue(dev
);
4198 bnx2_reset_task(void *data
)
4200 struct bnx2
*bp
= data
;
4202 if (!netif_running(bp
->dev
))
4205 bp
->in_reset_task
= 1;
4206 bnx2_netif_stop(bp
);
4210 atomic_set(&bp
->intr_sem
, 1);
4211 bnx2_netif_start(bp
);
4212 bp
->in_reset_task
= 0;
4216 bnx2_tx_timeout(struct net_device
*dev
)
4218 struct bnx2
*bp
= netdev_priv(dev
);
4220 /* This allows the netif to be shutdown gracefully before resetting */
4221 schedule_work(&bp
->reset_task
);
4225 /* Called with rtnl_lock */
4227 bnx2_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*vlgrp
)
4229 struct bnx2
*bp
= netdev_priv(dev
);
4231 bnx2_netif_stop(bp
);
4234 bnx2_set_rx_mode(dev
);
4236 bnx2_netif_start(bp
);
4239 /* Called with rtnl_lock */
4241 bnx2_vlan_rx_kill_vid(struct net_device
*dev
, uint16_t vid
)
4243 struct bnx2
*bp
= netdev_priv(dev
);
4245 bnx2_netif_stop(bp
);
4248 bp
->vlgrp
->vlan_devices
[vid
] = NULL
;
4249 bnx2_set_rx_mode(dev
);
4251 bnx2_netif_start(bp
);
4255 /* Called with dev->xmit_lock.
4256 * hard_start_xmit is pseudo-lockless - a lock is only required when
4257 * the tx queue is full. This way, we get the benefit of lockless
4258 * operations most of the time without the complexities to handle
4259 * netif_stop_queue/wake_queue race conditions.
4262 bnx2_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
4264 struct bnx2
*bp
= netdev_priv(dev
);
4267 struct sw_bd
*tx_buf
;
4268 u32 len
, vlan_tag_flags
, last_frag
, mss
;
4269 u16 prod
, ring_prod
;
4272 if (unlikely(bnx2_tx_avail(bp
) < (skb_shinfo(skb
)->nr_frags
+ 1))) {
4273 netif_stop_queue(dev
);
4274 printk(KERN_ERR PFX
"%s: BUG! Tx ring full when queue awake!\n",
4277 return NETDEV_TX_BUSY
;
4279 len
= skb_headlen(skb
);
4281 ring_prod
= TX_RING_IDX(prod
);
4284 if (skb
->ip_summed
== CHECKSUM_HW
) {
4285 vlan_tag_flags
|= TX_BD_FLAGS_TCP_UDP_CKSUM
;
4288 if (bp
->vlgrp
!= 0 && vlan_tx_tag_present(skb
)) {
4290 (TX_BD_FLAGS_VLAN_TAG
| (vlan_tx_tag_get(skb
) << 16));
4293 if ((mss
= skb_shinfo(skb
)->tso_size
) &&
4294 (skb
->len
> (bp
->dev
->mtu
+ ETH_HLEN
))) {
4295 u32 tcp_opt_len
, ip_tcp_len
;
4297 if (skb_header_cloned(skb
) &&
4298 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
)) {
4300 return NETDEV_TX_OK
;
4303 tcp_opt_len
= ((skb
->h
.th
->doff
- 5) * 4);
4304 vlan_tag_flags
|= TX_BD_FLAGS_SW_LSO
;
4307 if (skb
->h
.th
->doff
> 5) {
4308 tcp_opt_len
= (skb
->h
.th
->doff
- 5) << 2;
4310 ip_tcp_len
= (skb
->nh
.iph
->ihl
<< 2) + sizeof(struct tcphdr
);
4312 skb
->nh
.iph
->check
= 0;
4313 skb
->nh
.iph
->tot_len
= ntohs(mss
+ ip_tcp_len
+ tcp_opt_len
);
4315 ~csum_tcpudp_magic(skb
->nh
.iph
->saddr
,
4319 if (tcp_opt_len
|| (skb
->nh
.iph
->ihl
> 5)) {
4320 vlan_tag_flags
|= ((skb
->nh
.iph
->ihl
- 5) +
4321 (tcp_opt_len
>> 2)) << 8;
4330 mapping
= pci_map_single(bp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
4332 tx_buf
= &bp
->tx_buf_ring
[ring_prod
];
4334 pci_unmap_addr_set(tx_buf
, mapping
, mapping
);
4336 txbd
= &bp
->tx_desc_ring
[ring_prod
];
4338 txbd
->tx_bd_haddr_hi
= (u64
) mapping
>> 32;
4339 txbd
->tx_bd_haddr_lo
= (u64
) mapping
& 0xffffffff;
4340 txbd
->tx_bd_mss_nbytes
= len
| (mss
<< 16);
4341 txbd
->tx_bd_vlan_tag_flags
= vlan_tag_flags
| TX_BD_FLAGS_START
;
4343 last_frag
= skb_shinfo(skb
)->nr_frags
;
4345 for (i
= 0; i
< last_frag
; i
++) {
4346 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
4348 prod
= NEXT_TX_BD(prod
);
4349 ring_prod
= TX_RING_IDX(prod
);
4350 txbd
= &bp
->tx_desc_ring
[ring_prod
];
4353 mapping
= pci_map_page(bp
->pdev
, frag
->page
, frag
->page_offset
,
4354 len
, PCI_DMA_TODEVICE
);
4355 pci_unmap_addr_set(&bp
->tx_buf_ring
[ring_prod
],
4358 txbd
->tx_bd_haddr_hi
= (u64
) mapping
>> 32;
4359 txbd
->tx_bd_haddr_lo
= (u64
) mapping
& 0xffffffff;
4360 txbd
->tx_bd_mss_nbytes
= len
| (mss
<< 16);
4361 txbd
->tx_bd_vlan_tag_flags
= vlan_tag_flags
;
4364 txbd
->tx_bd_vlan_tag_flags
|= TX_BD_FLAGS_END
;
4366 prod
= NEXT_TX_BD(prod
);
4367 bp
->tx_prod_bseq
+= skb
->len
;
4369 REG_WR16(bp
, MB_TX_CID_ADDR
+ BNX2_L2CTX_TX_HOST_BIDX
, prod
);
4370 REG_WR(bp
, MB_TX_CID_ADDR
+ BNX2_L2CTX_TX_HOST_BSEQ
, bp
->tx_prod_bseq
);
4375 dev
->trans_start
= jiffies
;
4377 if (unlikely(bnx2_tx_avail(bp
) <= MAX_SKB_FRAGS
)) {
4378 spin_lock(&bp
->tx_lock
);
4379 netif_stop_queue(dev
);
4381 if (bnx2_tx_avail(bp
) > MAX_SKB_FRAGS
)
4382 netif_wake_queue(dev
);
4383 spin_unlock(&bp
->tx_lock
);
4386 return NETDEV_TX_OK
;
4389 /* Called with rtnl_lock */
4391 bnx2_close(struct net_device
*dev
)
4393 struct bnx2
*bp
= netdev_priv(dev
);
4396 /* Calling flush_scheduled_work() may deadlock because
4397 * linkwatch_event() may be on the workqueue and it will try to get
4398 * the rtnl_lock which we are holding.
4400 while (bp
->in_reset_task
)
4403 bnx2_netif_stop(bp
);
4404 del_timer_sync(&bp
->timer
);
4405 if (bp
->flags
& NO_WOL_FLAG
)
4406 reset_code
= BNX2_DRV_MSG_CODE_UNLOAD
;
4408 reset_code
= BNX2_DRV_MSG_CODE_SUSPEND_WOL
;
4410 reset_code
= BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL
;
4411 bnx2_reset_chip(bp
, reset_code
);
4412 free_irq(bp
->pdev
->irq
, dev
);
4413 if (bp
->flags
& USING_MSI_FLAG
) {
4414 pci_disable_msi(bp
->pdev
);
4415 bp
->flags
&= ~USING_MSI_FLAG
;
4420 netif_carrier_off(bp
->dev
);
4421 bnx2_set_power_state(bp
, PCI_D3hot
);
4425 #define GET_NET_STATS64(ctr) \
4426 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4427 (unsigned long) (ctr##_lo)
4429 #define GET_NET_STATS32(ctr) \
4432 #if (BITS_PER_LONG == 64)
4433 #define GET_NET_STATS GET_NET_STATS64
4435 #define GET_NET_STATS GET_NET_STATS32
4438 static struct net_device_stats
*
4439 bnx2_get_stats(struct net_device
*dev
)
4441 struct bnx2
*bp
= netdev_priv(dev
);
4442 struct statistics_block
*stats_blk
= bp
->stats_blk
;
4443 struct net_device_stats
*net_stats
= &bp
->net_stats
;
4445 if (bp
->stats_blk
== NULL
) {
4448 net_stats
->rx_packets
=
4449 GET_NET_STATS(stats_blk
->stat_IfHCInUcastPkts
) +
4450 GET_NET_STATS(stats_blk
->stat_IfHCInMulticastPkts
) +
4451 GET_NET_STATS(stats_blk
->stat_IfHCInBroadcastPkts
);
4453 net_stats
->tx_packets
=
4454 GET_NET_STATS(stats_blk
->stat_IfHCOutUcastPkts
) +
4455 GET_NET_STATS(stats_blk
->stat_IfHCOutMulticastPkts
) +
4456 GET_NET_STATS(stats_blk
->stat_IfHCOutBroadcastPkts
);
4458 net_stats
->rx_bytes
=
4459 GET_NET_STATS(stats_blk
->stat_IfHCInOctets
);
4461 net_stats
->tx_bytes
=
4462 GET_NET_STATS(stats_blk
->stat_IfHCOutOctets
);
4464 net_stats
->multicast
=
4465 GET_NET_STATS(stats_blk
->stat_IfHCOutMulticastPkts
);
4467 net_stats
->collisions
=
4468 (unsigned long) stats_blk
->stat_EtherStatsCollisions
;
4470 net_stats
->rx_length_errors
=
4471 (unsigned long) (stats_blk
->stat_EtherStatsUndersizePkts
+
4472 stats_blk
->stat_EtherStatsOverrsizePkts
);
4474 net_stats
->rx_over_errors
=
4475 (unsigned long) stats_blk
->stat_IfInMBUFDiscards
;
4477 net_stats
->rx_frame_errors
=
4478 (unsigned long) stats_blk
->stat_Dot3StatsAlignmentErrors
;
4480 net_stats
->rx_crc_errors
=
4481 (unsigned long) stats_blk
->stat_Dot3StatsFCSErrors
;
4483 net_stats
->rx_errors
= net_stats
->rx_length_errors
+
4484 net_stats
->rx_over_errors
+ net_stats
->rx_frame_errors
+
4485 net_stats
->rx_crc_errors
;
4487 net_stats
->tx_aborted_errors
=
4488 (unsigned long) (stats_blk
->stat_Dot3StatsExcessiveCollisions
+
4489 stats_blk
->stat_Dot3StatsLateCollisions
);
4491 if ((CHIP_NUM(bp
) == CHIP_NUM_5706
) ||
4492 (CHIP_ID(bp
) == CHIP_ID_5708_A0
))
4493 net_stats
->tx_carrier_errors
= 0;
4495 net_stats
->tx_carrier_errors
=
4497 stats_blk
->stat_Dot3StatsCarrierSenseErrors
;
4500 net_stats
->tx_errors
=
4502 stats_blk
->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4504 net_stats
->tx_aborted_errors
+
4505 net_stats
->tx_carrier_errors
;
4510 /* All ethtool functions called with rtnl_lock */
4513 bnx2_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
4515 struct bnx2
*bp
= netdev_priv(dev
);
4517 cmd
->supported
= SUPPORTED_Autoneg
;
4518 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
4519 cmd
->supported
|= SUPPORTED_1000baseT_Full
|
4522 cmd
->port
= PORT_FIBRE
;
4525 cmd
->supported
|= SUPPORTED_10baseT_Half
|
4526 SUPPORTED_10baseT_Full
|
4527 SUPPORTED_100baseT_Half
|
4528 SUPPORTED_100baseT_Full
|
4529 SUPPORTED_1000baseT_Full
|
4532 cmd
->port
= PORT_TP
;
4535 cmd
->advertising
= bp
->advertising
;
4537 if (bp
->autoneg
& AUTONEG_SPEED
) {
4538 cmd
->autoneg
= AUTONEG_ENABLE
;
4541 cmd
->autoneg
= AUTONEG_DISABLE
;
4544 if (netif_carrier_ok(dev
)) {
4545 cmd
->speed
= bp
->line_speed
;
4546 cmd
->duplex
= bp
->duplex
;
4553 cmd
->transceiver
= XCVR_INTERNAL
;
4554 cmd
->phy_address
= bp
->phy_addr
;
4560 bnx2_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
4562 struct bnx2
*bp
= netdev_priv(dev
);
4563 u8 autoneg
= bp
->autoneg
;
4564 u8 req_duplex
= bp
->req_duplex
;
4565 u16 req_line_speed
= bp
->req_line_speed
;
4566 u32 advertising
= bp
->advertising
;
4568 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
4569 autoneg
|= AUTONEG_SPEED
;
4571 cmd
->advertising
&= ETHTOOL_ALL_COPPER_SPEED
;
4573 /* allow advertising 1 speed */
4574 if ((cmd
->advertising
== ADVERTISED_10baseT_Half
) ||
4575 (cmd
->advertising
== ADVERTISED_10baseT_Full
) ||
4576 (cmd
->advertising
== ADVERTISED_100baseT_Half
) ||
4577 (cmd
->advertising
== ADVERTISED_100baseT_Full
)) {
4579 if (bp
->phy_flags
& PHY_SERDES_FLAG
)
4582 advertising
= cmd
->advertising
;
4585 else if (cmd
->advertising
== ADVERTISED_1000baseT_Full
) {
4586 advertising
= cmd
->advertising
;
4588 else if (cmd
->advertising
== ADVERTISED_1000baseT_Half
) {
4592 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
4593 advertising
= ETHTOOL_ALL_FIBRE_SPEED
;
4596 advertising
= ETHTOOL_ALL_COPPER_SPEED
;
4599 advertising
|= ADVERTISED_Autoneg
;
4602 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
4603 if ((cmd
->speed
!= SPEED_1000
) ||
4604 (cmd
->duplex
!= DUPLEX_FULL
)) {
4608 else if (cmd
->speed
== SPEED_1000
) {
4611 autoneg
&= ~AUTONEG_SPEED
;
4612 req_line_speed
= cmd
->speed
;
4613 req_duplex
= cmd
->duplex
;
4617 bp
->autoneg
= autoneg
;
4618 bp
->advertising
= advertising
;
4619 bp
->req_line_speed
= req_line_speed
;
4620 bp
->req_duplex
= req_duplex
;
4622 spin_lock_bh(&bp
->phy_lock
);
4626 spin_unlock_bh(&bp
->phy_lock
);
4632 bnx2_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
4634 struct bnx2
*bp
= netdev_priv(dev
);
4636 strcpy(info
->driver
, DRV_MODULE_NAME
);
4637 strcpy(info
->version
, DRV_MODULE_VERSION
);
4638 strcpy(info
->bus_info
, pci_name(bp
->pdev
));
4639 info
->fw_version
[0] = ((bp
->fw_ver
& 0xff000000) >> 24) + '0';
4640 info
->fw_version
[2] = ((bp
->fw_ver
& 0xff0000) >> 16) + '0';
4641 info
->fw_version
[4] = ((bp
->fw_ver
& 0xff00) >> 8) + '0';
4642 info
->fw_version
[1] = info
->fw_version
[3] = '.';
4643 info
->fw_version
[5] = 0;
4646 #define BNX2_REGDUMP_LEN (32 * 1024)
4649 bnx2_get_regs_len(struct net_device
*dev
)
4651 return BNX2_REGDUMP_LEN
;
4655 bnx2_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
, void *_p
)
4657 u32
*p
= _p
, i
, offset
;
4659 struct bnx2
*bp
= netdev_priv(dev
);
4660 u32 reg_boundaries
[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4661 0x0800, 0x0880, 0x0c00, 0x0c10,
4662 0x0c30, 0x0d08, 0x1000, 0x101c,
4663 0x1040, 0x1048, 0x1080, 0x10a4,
4664 0x1400, 0x1490, 0x1498, 0x14f0,
4665 0x1500, 0x155c, 0x1580, 0x15dc,
4666 0x1600, 0x1658, 0x1680, 0x16d8,
4667 0x1800, 0x1820, 0x1840, 0x1854,
4668 0x1880, 0x1894, 0x1900, 0x1984,
4669 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4670 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4671 0x2000, 0x2030, 0x23c0, 0x2400,
4672 0x2800, 0x2820, 0x2830, 0x2850,
4673 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4674 0x3c00, 0x3c94, 0x4000, 0x4010,
4675 0x4080, 0x4090, 0x43c0, 0x4458,
4676 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4677 0x4fc0, 0x5010, 0x53c0, 0x5444,
4678 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4679 0x5fc0, 0x6000, 0x6400, 0x6428,
4680 0x6800, 0x6848, 0x684c, 0x6860,
4681 0x6888, 0x6910, 0x8000 };
4685 memset(p
, 0, BNX2_REGDUMP_LEN
);
4687 if (!netif_running(bp
->dev
))
4691 offset
= reg_boundaries
[0];
4693 while (offset
< BNX2_REGDUMP_LEN
) {
4694 *p
++ = REG_RD(bp
, offset
);
4696 if (offset
== reg_boundaries
[i
+ 1]) {
4697 offset
= reg_boundaries
[i
+ 2];
4698 p
= (u32
*) (orig_p
+ offset
);
4705 bnx2_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
4707 struct bnx2
*bp
= netdev_priv(dev
);
4709 if (bp
->flags
& NO_WOL_FLAG
) {
4714 wol
->supported
= WAKE_MAGIC
;
4716 wol
->wolopts
= WAKE_MAGIC
;
4720 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
4724 bnx2_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
4726 struct bnx2
*bp
= netdev_priv(dev
);
4728 if (wol
->wolopts
& ~WAKE_MAGIC
)
4731 if (wol
->wolopts
& WAKE_MAGIC
) {
4732 if (bp
->flags
& NO_WOL_FLAG
)
4744 bnx2_nway_reset(struct net_device
*dev
)
4746 struct bnx2
*bp
= netdev_priv(dev
);
4749 if (!(bp
->autoneg
& AUTONEG_SPEED
)) {
4753 spin_lock_bh(&bp
->phy_lock
);
4755 /* Force a link down visible on the other side */
4756 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
4757 bnx2_write_phy(bp
, MII_BMCR
, BMCR_LOOPBACK
);
4758 spin_unlock_bh(&bp
->phy_lock
);
4762 spin_lock_bh(&bp
->phy_lock
);
4763 if (CHIP_NUM(bp
) == CHIP_NUM_5706
) {
4764 bp
->current_interval
= SERDES_AN_TIMEOUT
;
4765 bp
->serdes_an_pending
= 1;
4766 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
4770 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
4771 bmcr
&= ~BMCR_LOOPBACK
;
4772 bnx2_write_phy(bp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
| BMCR_ANENABLE
);
4774 spin_unlock_bh(&bp
->phy_lock
);
4780 bnx2_get_eeprom_len(struct net_device
*dev
)
4782 struct bnx2
*bp
= netdev_priv(dev
);
4784 if (bp
->flash_info
== NULL
)
4787 return (int) bp
->flash_size
;
4791 bnx2_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
,
4794 struct bnx2
*bp
= netdev_priv(dev
);
4797 /* parameters already validated in ethtool_get_eeprom */
4799 rc
= bnx2_nvram_read(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
4805 bnx2_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
,
4808 struct bnx2
*bp
= netdev_priv(dev
);
4811 /* parameters already validated in ethtool_set_eeprom */
4813 rc
= bnx2_nvram_write(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
4819 bnx2_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*coal
)
4821 struct bnx2
*bp
= netdev_priv(dev
);
4823 memset(coal
, 0, sizeof(struct ethtool_coalesce
));
4825 coal
->rx_coalesce_usecs
= bp
->rx_ticks
;
4826 coal
->rx_max_coalesced_frames
= bp
->rx_quick_cons_trip
;
4827 coal
->rx_coalesce_usecs_irq
= bp
->rx_ticks_int
;
4828 coal
->rx_max_coalesced_frames_irq
= bp
->rx_quick_cons_trip_int
;
4830 coal
->tx_coalesce_usecs
= bp
->tx_ticks
;
4831 coal
->tx_max_coalesced_frames
= bp
->tx_quick_cons_trip
;
4832 coal
->tx_coalesce_usecs_irq
= bp
->tx_ticks_int
;
4833 coal
->tx_max_coalesced_frames_irq
= bp
->tx_quick_cons_trip_int
;
4835 coal
->stats_block_coalesce_usecs
= bp
->stats_ticks
;
4841 bnx2_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*coal
)
4843 struct bnx2
*bp
= netdev_priv(dev
);
4845 bp
->rx_ticks
= (u16
) coal
->rx_coalesce_usecs
;
4846 if (bp
->rx_ticks
> 0x3ff) bp
->rx_ticks
= 0x3ff;
4848 bp
->rx_quick_cons_trip
= (u16
) coal
->rx_max_coalesced_frames
;
4849 if (bp
->rx_quick_cons_trip
> 0xff) bp
->rx_quick_cons_trip
= 0xff;
4851 bp
->rx_ticks_int
= (u16
) coal
->rx_coalesce_usecs_irq
;
4852 if (bp
->rx_ticks_int
> 0x3ff) bp
->rx_ticks_int
= 0x3ff;
4854 bp
->rx_quick_cons_trip_int
= (u16
) coal
->rx_max_coalesced_frames_irq
;
4855 if (bp
->rx_quick_cons_trip_int
> 0xff)
4856 bp
->rx_quick_cons_trip_int
= 0xff;
4858 bp
->tx_ticks
= (u16
) coal
->tx_coalesce_usecs
;
4859 if (bp
->tx_ticks
> 0x3ff) bp
->tx_ticks
= 0x3ff;
4861 bp
->tx_quick_cons_trip
= (u16
) coal
->tx_max_coalesced_frames
;
4862 if (bp
->tx_quick_cons_trip
> 0xff) bp
->tx_quick_cons_trip
= 0xff;
4864 bp
->tx_ticks_int
= (u16
) coal
->tx_coalesce_usecs_irq
;
4865 if (bp
->tx_ticks_int
> 0x3ff) bp
->tx_ticks_int
= 0x3ff;
4867 bp
->tx_quick_cons_trip_int
= (u16
) coal
->tx_max_coalesced_frames_irq
;
4868 if (bp
->tx_quick_cons_trip_int
> 0xff) bp
->tx_quick_cons_trip_int
=
4871 bp
->stats_ticks
= coal
->stats_block_coalesce_usecs
;
4872 if (bp
->stats_ticks
> 0xffff00) bp
->stats_ticks
= 0xffff00;
4873 bp
->stats_ticks
&= 0xffff00;
4875 if (netif_running(bp
->dev
)) {
4876 bnx2_netif_stop(bp
);
4878 bnx2_netif_start(bp
);
4885 bnx2_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
4887 struct bnx2
*bp
= netdev_priv(dev
);
4889 ering
->rx_max_pending
= MAX_TOTAL_RX_DESC_CNT
;
4890 ering
->rx_mini_max_pending
= 0;
4891 ering
->rx_jumbo_max_pending
= 0;
4893 ering
->rx_pending
= bp
->rx_ring_size
;
4894 ering
->rx_mini_pending
= 0;
4895 ering
->rx_jumbo_pending
= 0;
4897 ering
->tx_max_pending
= MAX_TX_DESC_CNT
;
4898 ering
->tx_pending
= bp
->tx_ring_size
;
4902 bnx2_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
4904 struct bnx2
*bp
= netdev_priv(dev
);
4906 if ((ering
->rx_pending
> MAX_TOTAL_RX_DESC_CNT
) ||
4907 (ering
->tx_pending
> MAX_TX_DESC_CNT
) ||
4908 (ering
->tx_pending
<= MAX_SKB_FRAGS
)) {
4912 if (netif_running(bp
->dev
)) {
4913 bnx2_netif_stop(bp
);
4914 bnx2_reset_chip(bp
, BNX2_DRV_MSG_CODE_RESET
);
4919 bnx2_set_rx_ring_size(bp
, ering
->rx_pending
);
4920 bp
->tx_ring_size
= ering
->tx_pending
;
4922 if (netif_running(bp
->dev
)) {
4925 rc
= bnx2_alloc_mem(bp
);
4929 bnx2_netif_start(bp
);
4936 bnx2_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
4938 struct bnx2
*bp
= netdev_priv(dev
);
4940 epause
->autoneg
= ((bp
->autoneg
& AUTONEG_FLOW_CTRL
) != 0);
4941 epause
->rx_pause
= ((bp
->flow_ctrl
& FLOW_CTRL_RX
) != 0);
4942 epause
->tx_pause
= ((bp
->flow_ctrl
& FLOW_CTRL_TX
) != 0);
4946 bnx2_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
4948 struct bnx2
*bp
= netdev_priv(dev
);
4950 bp
->req_flow_ctrl
= 0;
4951 if (epause
->rx_pause
)
4952 bp
->req_flow_ctrl
|= FLOW_CTRL_RX
;
4953 if (epause
->tx_pause
)
4954 bp
->req_flow_ctrl
|= FLOW_CTRL_TX
;
4956 if (epause
->autoneg
) {
4957 bp
->autoneg
|= AUTONEG_FLOW_CTRL
;
4960 bp
->autoneg
&= ~AUTONEG_FLOW_CTRL
;
4963 spin_lock_bh(&bp
->phy_lock
);
4967 spin_unlock_bh(&bp
->phy_lock
);
4973 bnx2_get_rx_csum(struct net_device
*dev
)
4975 struct bnx2
*bp
= netdev_priv(dev
);
4981 bnx2_set_rx_csum(struct net_device
*dev
, u32 data
)
4983 struct bnx2
*bp
= netdev_priv(dev
);
4989 #define BNX2_NUM_STATS 45
4992 char string
[ETH_GSTRING_LEN
];
4993 } bnx2_stats_str_arr
[BNX2_NUM_STATS
] = {
4995 { "rx_error_bytes" },
4997 { "tx_error_bytes" },
4998 { "rx_ucast_packets" },
4999 { "rx_mcast_packets" },
5000 { "rx_bcast_packets" },
5001 { "tx_ucast_packets" },
5002 { "tx_mcast_packets" },
5003 { "tx_bcast_packets" },
5004 { "tx_mac_errors" },
5005 { "tx_carrier_errors" },
5006 { "rx_crc_errors" },
5007 { "rx_align_errors" },
5008 { "tx_single_collisions" },
5009 { "tx_multi_collisions" },
5011 { "tx_excess_collisions" },
5012 { "tx_late_collisions" },
5013 { "tx_total_collisions" },
5016 { "rx_undersize_packets" },
5017 { "rx_oversize_packets" },
5018 { "rx_64_byte_packets" },
5019 { "rx_65_to_127_byte_packets" },
5020 { "rx_128_to_255_byte_packets" },
5021 { "rx_256_to_511_byte_packets" },
5022 { "rx_512_to_1023_byte_packets" },
5023 { "rx_1024_to_1522_byte_packets" },
5024 { "rx_1523_to_9022_byte_packets" },
5025 { "tx_64_byte_packets" },
5026 { "tx_65_to_127_byte_packets" },
5027 { "tx_128_to_255_byte_packets" },
5028 { "tx_256_to_511_byte_packets" },
5029 { "tx_512_to_1023_byte_packets" },
5030 { "tx_1024_to_1522_byte_packets" },
5031 { "tx_1523_to_9022_byte_packets" },
5032 { "rx_xon_frames" },
5033 { "rx_xoff_frames" },
5034 { "tx_xon_frames" },
5035 { "tx_xoff_frames" },
5036 { "rx_mac_ctrl_frames" },
5037 { "rx_filtered_packets" },
5041 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5043 static const unsigned long bnx2_stats_offset_arr
[BNX2_NUM_STATS
] = {
5044 STATS_OFFSET32(stat_IfHCInOctets_hi
),
5045 STATS_OFFSET32(stat_IfHCInBadOctets_hi
),
5046 STATS_OFFSET32(stat_IfHCOutOctets_hi
),
5047 STATS_OFFSET32(stat_IfHCOutBadOctets_hi
),
5048 STATS_OFFSET32(stat_IfHCInUcastPkts_hi
),
5049 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi
),
5050 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi
),
5051 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi
),
5052 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi
),
5053 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi
),
5054 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors
),
5055 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors
),
5056 STATS_OFFSET32(stat_Dot3StatsFCSErrors
),
5057 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors
),
5058 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames
),
5059 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames
),
5060 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions
),
5061 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions
),
5062 STATS_OFFSET32(stat_Dot3StatsLateCollisions
),
5063 STATS_OFFSET32(stat_EtherStatsCollisions
),
5064 STATS_OFFSET32(stat_EtherStatsFragments
),
5065 STATS_OFFSET32(stat_EtherStatsJabbers
),
5066 STATS_OFFSET32(stat_EtherStatsUndersizePkts
),
5067 STATS_OFFSET32(stat_EtherStatsOverrsizePkts
),
5068 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets
),
5069 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets
),
5070 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets
),
5071 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets
),
5072 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets
),
5073 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets
),
5074 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets
),
5075 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets
),
5076 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets
),
5077 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets
),
5078 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets
),
5079 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets
),
5080 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets
),
5081 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets
),
5082 STATS_OFFSET32(stat_XonPauseFramesReceived
),
5083 STATS_OFFSET32(stat_XoffPauseFramesReceived
),
5084 STATS_OFFSET32(stat_OutXonSent
),
5085 STATS_OFFSET32(stat_OutXoffSent
),
5086 STATS_OFFSET32(stat_MacControlFramesReceived
),
5087 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards
),
5088 STATS_OFFSET32(stat_IfInMBUFDiscards
),
5091 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5092 * skipped because of errata.
5094 static u8 bnx2_5706_stats_len_arr
[BNX2_NUM_STATS
] = {
5095 8,0,8,8,8,8,8,8,8,8,
5096 4,0,4,4,4,4,4,4,4,4,
5097 4,4,4,4,4,4,4,4,4,4,
5098 4,4,4,4,4,4,4,4,4,4,
5102 static u8 bnx2_5708_stats_len_arr
[BNX2_NUM_STATS
] = {
5103 8,0,8,8,8,8,8,8,8,8,
5104 4,4,4,4,4,4,4,4,4,4,
5105 4,4,4,4,4,4,4,4,4,4,
5106 4,4,4,4,4,4,4,4,4,4,
5110 #define BNX2_NUM_TESTS 6
5113 char string
[ETH_GSTRING_LEN
];
5114 } bnx2_tests_str_arr
[BNX2_NUM_TESTS
] = {
5115 { "register_test (offline)" },
5116 { "memory_test (offline)" },
5117 { "loopback_test (offline)" },
5118 { "nvram_test (online)" },
5119 { "interrupt_test (online)" },
5120 { "link_test (online)" },
5124 bnx2_self_test_count(struct net_device
*dev
)
5126 return BNX2_NUM_TESTS
;
5130 bnx2_self_test(struct net_device
*dev
, struct ethtool_test
*etest
, u64
*buf
)
5132 struct bnx2
*bp
= netdev_priv(dev
);
5134 memset(buf
, 0, sizeof(u64
) * BNX2_NUM_TESTS
);
5135 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
5136 bnx2_netif_stop(bp
);
5137 bnx2_reset_chip(bp
, BNX2_DRV_MSG_CODE_DIAG
);
5140 if (bnx2_test_registers(bp
) != 0) {
5142 etest
->flags
|= ETH_TEST_FL_FAILED
;
5144 if (bnx2_test_memory(bp
) != 0) {
5146 etest
->flags
|= ETH_TEST_FL_FAILED
;
5148 if ((buf
[2] = bnx2_test_loopback(bp
)) != 0)
5149 etest
->flags
|= ETH_TEST_FL_FAILED
;
5151 if (!netif_running(bp
->dev
)) {
5152 bnx2_reset_chip(bp
, BNX2_DRV_MSG_CODE_RESET
);
5156 bnx2_netif_start(bp
);
5159 /* wait for link up */
5160 msleep_interruptible(3000);
5161 if ((!bp
->link_up
) && !(bp
->phy_flags
& PHY_SERDES_FLAG
))
5162 msleep_interruptible(4000);
5165 if (bnx2_test_nvram(bp
) != 0) {
5167 etest
->flags
|= ETH_TEST_FL_FAILED
;
5169 if (bnx2_test_intr(bp
) != 0) {
5171 etest
->flags
|= ETH_TEST_FL_FAILED
;
5174 if (bnx2_test_link(bp
) != 0) {
5176 etest
->flags
|= ETH_TEST_FL_FAILED
;
5182 bnx2_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
5184 switch (stringset
) {
5186 memcpy(buf
, bnx2_stats_str_arr
,
5187 sizeof(bnx2_stats_str_arr
));
5190 memcpy(buf
, bnx2_tests_str_arr
,
5191 sizeof(bnx2_tests_str_arr
));
5197 bnx2_get_stats_count(struct net_device
*dev
)
5199 return BNX2_NUM_STATS
;
5203 bnx2_get_ethtool_stats(struct net_device
*dev
,
5204 struct ethtool_stats
*stats
, u64
*buf
)
5206 struct bnx2
*bp
= netdev_priv(dev
);
5208 u32
*hw_stats
= (u32
*) bp
->stats_blk
;
5209 u8
*stats_len_arr
= NULL
;
5211 if (hw_stats
== NULL
) {
5212 memset(buf
, 0, sizeof(u64
) * BNX2_NUM_STATS
);
5216 if ((CHIP_ID(bp
) == CHIP_ID_5706_A0
) ||
5217 (CHIP_ID(bp
) == CHIP_ID_5706_A1
) ||
5218 (CHIP_ID(bp
) == CHIP_ID_5706_A2
) ||
5219 (CHIP_ID(bp
) == CHIP_ID_5708_A0
))
5220 stats_len_arr
= bnx2_5706_stats_len_arr
;
5222 stats_len_arr
= bnx2_5708_stats_len_arr
;
5224 for (i
= 0; i
< BNX2_NUM_STATS
; i
++) {
5225 if (stats_len_arr
[i
] == 0) {
5226 /* skip this counter */
5230 if (stats_len_arr
[i
] == 4) {
5231 /* 4-byte counter */
5233 *(hw_stats
+ bnx2_stats_offset_arr
[i
]);
5236 /* 8-byte counter */
5237 buf
[i
] = (((u64
) *(hw_stats
+
5238 bnx2_stats_offset_arr
[i
])) << 32) +
5239 *(hw_stats
+ bnx2_stats_offset_arr
[i
] + 1);
5244 bnx2_phys_id(struct net_device
*dev
, u32 data
)
5246 struct bnx2
*bp
= netdev_priv(dev
);
5253 save
= REG_RD(bp
, BNX2_MISC_CFG
);
5254 REG_WR(bp
, BNX2_MISC_CFG
, BNX2_MISC_CFG_LEDMODE_MAC
);
5256 for (i
= 0; i
< (data
* 2); i
++) {
5258 REG_WR(bp
, BNX2_EMAC_LED
, BNX2_EMAC_LED_OVERRIDE
);
5261 REG_WR(bp
, BNX2_EMAC_LED
, BNX2_EMAC_LED_OVERRIDE
|
5262 BNX2_EMAC_LED_1000MB_OVERRIDE
|
5263 BNX2_EMAC_LED_100MB_OVERRIDE
|
5264 BNX2_EMAC_LED_10MB_OVERRIDE
|
5265 BNX2_EMAC_LED_TRAFFIC_OVERRIDE
|
5266 BNX2_EMAC_LED_TRAFFIC
);
5268 msleep_interruptible(500);
5269 if (signal_pending(current
))
5272 REG_WR(bp
, BNX2_EMAC_LED
, 0);
5273 REG_WR(bp
, BNX2_MISC_CFG
, save
);
5277 static struct ethtool_ops bnx2_ethtool_ops
= {
5278 .get_settings
= bnx2_get_settings
,
5279 .set_settings
= bnx2_set_settings
,
5280 .get_drvinfo
= bnx2_get_drvinfo
,
5281 .get_regs_len
= bnx2_get_regs_len
,
5282 .get_regs
= bnx2_get_regs
,
5283 .get_wol
= bnx2_get_wol
,
5284 .set_wol
= bnx2_set_wol
,
5285 .nway_reset
= bnx2_nway_reset
,
5286 .get_link
= ethtool_op_get_link
,
5287 .get_eeprom_len
= bnx2_get_eeprom_len
,
5288 .get_eeprom
= bnx2_get_eeprom
,
5289 .set_eeprom
= bnx2_set_eeprom
,
5290 .get_coalesce
= bnx2_get_coalesce
,
5291 .set_coalesce
= bnx2_set_coalesce
,
5292 .get_ringparam
= bnx2_get_ringparam
,
5293 .set_ringparam
= bnx2_set_ringparam
,
5294 .get_pauseparam
= bnx2_get_pauseparam
,
5295 .set_pauseparam
= bnx2_set_pauseparam
,
5296 .get_rx_csum
= bnx2_get_rx_csum
,
5297 .set_rx_csum
= bnx2_set_rx_csum
,
5298 .get_tx_csum
= ethtool_op_get_tx_csum
,
5299 .set_tx_csum
= ethtool_op_set_tx_csum
,
5300 .get_sg
= ethtool_op_get_sg
,
5301 .set_sg
= ethtool_op_set_sg
,
5303 .get_tso
= ethtool_op_get_tso
,
5304 .set_tso
= ethtool_op_set_tso
,
5306 .self_test_count
= bnx2_self_test_count
,
5307 .self_test
= bnx2_self_test
,
5308 .get_strings
= bnx2_get_strings
,
5309 .phys_id
= bnx2_phys_id
,
5310 .get_stats_count
= bnx2_get_stats_count
,
5311 .get_ethtool_stats
= bnx2_get_ethtool_stats
,
5312 .get_perm_addr
= ethtool_op_get_perm_addr
,
5315 /* Called with rtnl_lock */
5317 bnx2_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
5319 struct mii_ioctl_data
*data
= if_mii(ifr
);
5320 struct bnx2
*bp
= netdev_priv(dev
);
5325 data
->phy_id
= bp
->phy_addr
;
5331 spin_lock_bh(&bp
->phy_lock
);
5332 err
= bnx2_read_phy(bp
, data
->reg_num
& 0x1f, &mii_regval
);
5333 spin_unlock_bh(&bp
->phy_lock
);
5335 data
->val_out
= mii_regval
;
5341 if (!capable(CAP_NET_ADMIN
))
5344 spin_lock_bh(&bp
->phy_lock
);
5345 err
= bnx2_write_phy(bp
, data
->reg_num
& 0x1f, data
->val_in
);
5346 spin_unlock_bh(&bp
->phy_lock
);
5357 /* Called with rtnl_lock */
5359 bnx2_change_mac_addr(struct net_device
*dev
, void *p
)
5361 struct sockaddr
*addr
= p
;
5362 struct bnx2
*bp
= netdev_priv(dev
);
5364 if (!is_valid_ether_addr(addr
->sa_data
))
5367 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
5368 if (netif_running(dev
))
5369 bnx2_set_mac_addr(bp
);
5374 /* Called with rtnl_lock */
5376 bnx2_change_mtu(struct net_device
*dev
, int new_mtu
)
5378 struct bnx2
*bp
= netdev_priv(dev
);
5380 if (((new_mtu
+ ETH_HLEN
) > MAX_ETHERNET_JUMBO_PACKET_SIZE
) ||
5381 ((new_mtu
+ ETH_HLEN
) < MIN_ETHERNET_PACKET_SIZE
))
5385 if (netif_running(dev
)) {
5386 bnx2_netif_stop(bp
);
5390 bnx2_netif_start(bp
);
5395 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5397 poll_bnx2(struct net_device
*dev
)
5399 struct bnx2
*bp
= netdev_priv(dev
);
5401 disable_irq(bp
->pdev
->irq
);
5402 bnx2_interrupt(bp
->pdev
->irq
, dev
, NULL
);
5403 enable_irq(bp
->pdev
->irq
);
5407 static int __devinit
5408 bnx2_init_board(struct pci_dev
*pdev
, struct net_device
*dev
)
5411 unsigned long mem_len
;
5415 SET_MODULE_OWNER(dev
);
5416 SET_NETDEV_DEV(dev
, &pdev
->dev
);
5417 bp
= netdev_priv(dev
);
5422 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5423 rc
= pci_enable_device(pdev
);
5425 printk(KERN_ERR PFX
"Cannot enable PCI device, aborting.");
5429 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
5430 printk(KERN_ERR PFX
"Cannot find PCI device base address, "
5433 goto err_out_disable
;
5436 rc
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
5438 printk(KERN_ERR PFX
"Cannot obtain PCI resources, aborting.\n");
5439 goto err_out_disable
;
5442 pci_set_master(pdev
);
5444 bp
->pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
5445 if (bp
->pm_cap
== 0) {
5446 printk(KERN_ERR PFX
"Cannot find power management capability, "
5449 goto err_out_release
;
5452 bp
->pcix_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PCIX
);
5453 if (bp
->pcix_cap
== 0) {
5454 printk(KERN_ERR PFX
"Cannot find PCIX capability, aborting.\n");
5456 goto err_out_release
;
5459 if (pci_set_dma_mask(pdev
, DMA_64BIT_MASK
) == 0) {
5460 bp
->flags
|= USING_DAC_FLAG
;
5461 if (pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
) != 0) {
5462 printk(KERN_ERR PFX
"pci_set_consistent_dma_mask "
5463 "failed, aborting.\n");
5465 goto err_out_release
;
5468 else if (pci_set_dma_mask(pdev
, DMA_32BIT_MASK
) != 0) {
5469 printk(KERN_ERR PFX
"System does not support DMA, aborting.\n");
5471 goto err_out_release
;
5477 spin_lock_init(&bp
->phy_lock
);
5478 spin_lock_init(&bp
->tx_lock
);
5479 INIT_WORK(&bp
->reset_task
, bnx2_reset_task
, bp
);
5481 dev
->base_addr
= dev
->mem_start
= pci_resource_start(pdev
, 0);
5482 mem_len
= MB_GET_CID_ADDR(17);
5483 dev
->mem_end
= dev
->mem_start
+ mem_len
;
5484 dev
->irq
= pdev
->irq
;
5486 bp
->regview
= ioremap_nocache(dev
->base_addr
, mem_len
);
5489 printk(KERN_ERR PFX
"Cannot map register space, aborting.\n");
5491 goto err_out_release
;
5494 /* Configure byte swap and enable write to the reg_window registers.
5495 * Rely on CPU to do target byte swapping on big endian systems
5496 * The chip's target access swapping will not swap all accesses
5498 pci_write_config_dword(bp
->pdev
, BNX2_PCICFG_MISC_CONFIG
,
5499 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA
|
5500 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP
);
5502 bnx2_set_power_state(bp
, PCI_D0
);
5504 bp
->chip_id
= REG_RD(bp
, BNX2_MISC_ID
);
5506 /* Get bus information. */
5507 reg
= REG_RD(bp
, BNX2_PCICFG_MISC_STATUS
);
5508 if (reg
& BNX2_PCICFG_MISC_STATUS_PCIX_DET
) {
5511 bp
->flags
|= PCIX_FLAG
;
5513 clkreg
= REG_RD(bp
, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS
);
5515 clkreg
&= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET
;
5517 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ
:
5518 bp
->bus_speed_mhz
= 133;
5521 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ
:
5522 bp
->bus_speed_mhz
= 100;
5525 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ
:
5526 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ
:
5527 bp
->bus_speed_mhz
= 66;
5530 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ
:
5531 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ
:
5532 bp
->bus_speed_mhz
= 50;
5535 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW
:
5536 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ
:
5537 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ
:
5538 bp
->bus_speed_mhz
= 33;
5543 if (reg
& BNX2_PCICFG_MISC_STATUS_M66EN
)
5544 bp
->bus_speed_mhz
= 66;
5546 bp
->bus_speed_mhz
= 33;
5549 if (reg
& BNX2_PCICFG_MISC_STATUS_32BIT_DET
)
5550 bp
->flags
|= PCI_32BIT_FLAG
;
5552 /* 5706A0 may falsely detect SERR and PERR. */
5553 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
5554 reg
= REG_RD(bp
, PCI_COMMAND
);
5555 reg
&= ~(PCI_COMMAND_SERR
| PCI_COMMAND_PARITY
);
5556 REG_WR(bp
, PCI_COMMAND
, reg
);
5558 else if ((CHIP_ID(bp
) == CHIP_ID_5706_A1
) &&
5559 !(bp
->flags
& PCIX_FLAG
)) {
5561 printk(KERN_ERR PFX
"5706 A1 can only be used in a PCIX bus, "
5566 bnx2_init_nvram(bp
);
5568 reg
= REG_RD_IND(bp
, BNX2_SHM_HDR_SIGNATURE
);
5570 if ((reg
& BNX2_SHM_HDR_SIGNATURE_SIG_MASK
) ==
5571 BNX2_SHM_HDR_SIGNATURE_SIG
)
5572 bp
->shmem_base
= REG_RD_IND(bp
, BNX2_SHM_HDR_ADDR_0
);
5574 bp
->shmem_base
= HOST_VIEW_SHMEM_BASE
;
5576 /* Get the permanent MAC address. First we need to make sure the
5577 * firmware is actually running.
5579 reg
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_DEV_INFO_SIGNATURE
);
5581 if ((reg
& BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK
) !=
5582 BNX2_DEV_INFO_SIGNATURE_MAGIC
) {
5583 printk(KERN_ERR PFX
"Firmware not running, aborting.\n");
5588 bp
->fw_ver
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_DEV_INFO_BC_REV
);
5590 reg
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_PORT_HW_CFG_MAC_UPPER
);
5591 bp
->mac_addr
[0] = (u8
) (reg
>> 8);
5592 bp
->mac_addr
[1] = (u8
) reg
;
5594 reg
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_PORT_HW_CFG_MAC_LOWER
);
5595 bp
->mac_addr
[2] = (u8
) (reg
>> 24);
5596 bp
->mac_addr
[3] = (u8
) (reg
>> 16);
5597 bp
->mac_addr
[4] = (u8
) (reg
>> 8);
5598 bp
->mac_addr
[5] = (u8
) reg
;
5600 bp
->tx_ring_size
= MAX_TX_DESC_CNT
;
5601 bnx2_set_rx_ring_size(bp
, 100);
5605 bp
->rx_offset
= sizeof(struct l2_fhdr
) + 2;
5607 bp
->tx_quick_cons_trip_int
= 20;
5608 bp
->tx_quick_cons_trip
= 20;
5609 bp
->tx_ticks_int
= 80;
5612 bp
->rx_quick_cons_trip_int
= 6;
5613 bp
->rx_quick_cons_trip
= 6;
5614 bp
->rx_ticks_int
= 18;
5617 bp
->stats_ticks
= 1000000 & 0xffff00;
5619 bp
->timer_interval
= HZ
;
5620 bp
->current_interval
= HZ
;
5624 /* Disable WOL support if we are running on a SERDES chip. */
5625 if (CHIP_BOND_ID(bp
) & CHIP_BOND_ID_SERDES_BIT
) {
5626 bp
->phy_flags
|= PHY_SERDES_FLAG
;
5627 bp
->flags
|= NO_WOL_FLAG
;
5628 if (CHIP_NUM(bp
) == CHIP_NUM_5708
) {
5630 reg
= REG_RD_IND(bp
, bp
->shmem_base
+
5631 BNX2_SHARED_HW_CFG_CONFIG
);
5632 if (reg
& BNX2_SHARED_HW_CFG_PHY_2_5G
)
5633 bp
->phy_flags
|= PHY_2_5G_CAPABLE_FLAG
;
5637 if (CHIP_NUM(bp
) == CHIP_NUM_5708
)
5638 bp
->flags
|= NO_WOL_FLAG
;
5640 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
5641 bp
->tx_quick_cons_trip_int
=
5642 bp
->tx_quick_cons_trip
;
5643 bp
->tx_ticks_int
= bp
->tx_ticks
;
5644 bp
->rx_quick_cons_trip_int
=
5645 bp
->rx_quick_cons_trip
;
5646 bp
->rx_ticks_int
= bp
->rx_ticks
;
5647 bp
->comp_prod_trip_int
= bp
->comp_prod_trip
;
5648 bp
->com_ticks_int
= bp
->com_ticks
;
5649 bp
->cmd_ticks_int
= bp
->cmd_ticks
;
5652 bp
->autoneg
= AUTONEG_SPEED
| AUTONEG_FLOW_CTRL
;
5653 bp
->req_line_speed
= 0;
5654 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
5655 bp
->advertising
= ETHTOOL_ALL_FIBRE_SPEED
| ADVERTISED_Autoneg
;
5657 reg
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_PORT_HW_CFG_CONFIG
);
5658 reg
&= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK
;
5659 if (reg
== BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G
) {
5661 bp
->req_line_speed
= bp
->line_speed
= SPEED_1000
;
5662 bp
->req_duplex
= DUPLEX_FULL
;
5666 bp
->advertising
= ETHTOOL_ALL_COPPER_SPEED
| ADVERTISED_Autoneg
;
5669 bp
->req_flow_ctrl
= FLOW_CTRL_RX
| FLOW_CTRL_TX
;
5671 init_timer(&bp
->timer
);
5672 bp
->timer
.expires
= RUN_AT(bp
->timer_interval
);
5673 bp
->timer
.data
= (unsigned long) bp
;
5674 bp
->timer
.function
= bnx2_timer
;
5680 iounmap(bp
->regview
);
5685 pci_release_regions(pdev
);
5688 pci_disable_device(pdev
);
5689 pci_set_drvdata(pdev
, NULL
);
5695 static int __devinit
5696 bnx2_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
5698 static int version_printed
= 0;
5699 struct net_device
*dev
= NULL
;
5703 if (version_printed
++ == 0)
5704 printk(KERN_INFO
"%s", version
);
5706 /* dev zeroed in init_etherdev */
5707 dev
= alloc_etherdev(sizeof(*bp
));
5712 rc
= bnx2_init_board(pdev
, dev
);
5718 dev
->open
= bnx2_open
;
5719 dev
->hard_start_xmit
= bnx2_start_xmit
;
5720 dev
->stop
= bnx2_close
;
5721 dev
->get_stats
= bnx2_get_stats
;
5722 dev
->set_multicast_list
= bnx2_set_rx_mode
;
5723 dev
->do_ioctl
= bnx2_ioctl
;
5724 dev
->set_mac_address
= bnx2_change_mac_addr
;
5725 dev
->change_mtu
= bnx2_change_mtu
;
5726 dev
->tx_timeout
= bnx2_tx_timeout
;
5727 dev
->watchdog_timeo
= TX_TIMEOUT
;
5729 dev
->vlan_rx_register
= bnx2_vlan_rx_register
;
5730 dev
->vlan_rx_kill_vid
= bnx2_vlan_rx_kill_vid
;
5732 dev
->poll
= bnx2_poll
;
5733 dev
->ethtool_ops
= &bnx2_ethtool_ops
;
5736 bp
= netdev_priv(dev
);
5738 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5739 dev
->poll_controller
= poll_bnx2
;
5742 if ((rc
= register_netdev(dev
))) {
5743 printk(KERN_ERR PFX
"Cannot register net device\n");
5745 iounmap(bp
->regview
);
5746 pci_release_regions(pdev
);
5747 pci_disable_device(pdev
);
5748 pci_set_drvdata(pdev
, NULL
);
5753 pci_set_drvdata(pdev
, dev
);
5755 memcpy(dev
->dev_addr
, bp
->mac_addr
, 6);
5756 memcpy(dev
->perm_addr
, bp
->mac_addr
, 6);
5757 bp
->name
= board_info
[ent
->driver_data
].name
,
5758 printk(KERN_INFO
"%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5762 ((CHIP_ID(bp
) & 0xf000) >> 12) + 'A',
5763 ((CHIP_ID(bp
) & 0x0ff0) >> 4),
5764 ((bp
->flags
& PCIX_FLAG
) ? "-X" : ""),
5765 ((bp
->flags
& PCI_32BIT_FLAG
) ? "32-bit" : "64-bit"),
5770 printk("node addr ");
5771 for (i
= 0; i
< 6; i
++)
5772 printk("%2.2x", dev
->dev_addr
[i
]);
5775 dev
->features
|= NETIF_F_SG
;
5776 if (bp
->flags
& USING_DAC_FLAG
)
5777 dev
->features
|= NETIF_F_HIGHDMA
;
5778 dev
->features
|= NETIF_F_IP_CSUM
;
5780 dev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
5783 dev
->features
|= NETIF_F_TSO
;
5786 netif_carrier_off(bp
->dev
);
5791 static void __devexit
5792 bnx2_remove_one(struct pci_dev
*pdev
)
5794 struct net_device
*dev
= pci_get_drvdata(pdev
);
5795 struct bnx2
*bp
= netdev_priv(dev
);
5797 flush_scheduled_work();
5799 unregister_netdev(dev
);
5802 iounmap(bp
->regview
);
5805 pci_release_regions(pdev
);
5806 pci_disable_device(pdev
);
5807 pci_set_drvdata(pdev
, NULL
);
5811 bnx2_suspend(struct pci_dev
*pdev
, pm_message_t state
)
5813 struct net_device
*dev
= pci_get_drvdata(pdev
);
5814 struct bnx2
*bp
= netdev_priv(dev
);
5817 if (!netif_running(dev
))
5820 flush_scheduled_work();
5821 bnx2_netif_stop(bp
);
5822 netif_device_detach(dev
);
5823 del_timer_sync(&bp
->timer
);
5824 if (bp
->flags
& NO_WOL_FLAG
)
5825 reset_code
= BNX2_DRV_MSG_CODE_UNLOAD
;
5827 reset_code
= BNX2_DRV_MSG_CODE_SUSPEND_WOL
;
5829 reset_code
= BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL
;
5830 bnx2_reset_chip(bp
, reset_code
);
5832 bnx2_set_power_state(bp
, pci_choose_state(pdev
, state
));
5837 bnx2_resume(struct pci_dev
*pdev
)
5839 struct net_device
*dev
= pci_get_drvdata(pdev
);
5840 struct bnx2
*bp
= netdev_priv(dev
);
5842 if (!netif_running(dev
))
5845 bnx2_set_power_state(bp
, PCI_D0
);
5846 netif_device_attach(dev
);
5848 bnx2_netif_start(bp
);
5852 static struct pci_driver bnx2_pci_driver
= {
5853 .name
= DRV_MODULE_NAME
,
5854 .id_table
= bnx2_pci_tbl
,
5855 .probe
= bnx2_init_one
,
5856 .remove
= __devexit_p(bnx2_remove_one
),
5857 .suspend
= bnx2_suspend
,
5858 .resume
= bnx2_resume
,
5861 static int __init
bnx2_init(void)
5863 return pci_module_init(&bnx2_pci_driver
);
5866 static void __exit
bnx2_cleanup(void)
5868 pci_unregister_driver(&bnx2_pci_driver
);
5871 module_init(bnx2_init
);
5872 module_exit(bnx2_cleanup
);