remove unused TIF_NOTIFY_RESUME flag
[pv_ops_mirror.git] / drivers / net / bnx2.c
bloba729da061bbb649ab51ee0d53775f2a536f049fc
1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2007 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
55 #define DRV_MODULE_NAME "bnx2"
56 #define PFX DRV_MODULE_NAME ": "
57 #define DRV_MODULE_VERSION "1.6.3"
58 #define DRV_MODULE_RELDATE "July 16, 2007"
60 #define RUN_AT(x) (jiffies + (x))
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT (5*HZ)
65 static const char version[] __devinitdata =
66 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
68 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
73 static int disable_msi = 0;
75 module_param(disable_msi, int, 0);
76 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
78 typedef enum {
79 BCM5706 = 0,
80 NC370T,
81 NC370I,
82 BCM5706S,
83 NC370F,
84 BCM5708,
85 BCM5708S,
86 BCM5709,
87 BCM5709S,
88 } board_t;
90 /* indexed by board_t, above */
91 static const struct {
92 char *name;
93 } board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
101 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105 static struct pci_device_id bnx2_pci_tbl[] = {
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
124 { 0, }
127 static struct flash_spec flash_table[] =
129 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
130 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
131 /* Slow EEPROM */
132 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
133 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
134 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
135 "EEPROM - slow"},
136 /* Expansion entry 0001 */
137 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
138 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
139 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
140 "Entry 0001"},
141 /* Saifun SA25F010 (non-buffered flash) */
142 /* strap, cfg1, & write1 need updates */
143 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
144 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
145 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
146 "Non-buffered flash (128kB)"},
147 /* Saifun SA25F020 (non-buffered flash) */
148 /* strap, cfg1, & write1 need updates */
149 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
150 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
151 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
152 "Non-buffered flash (256kB)"},
153 /* Expansion entry 0100 */
154 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
155 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
156 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
157 "Entry 0100"},
158 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
159 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
160 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
161 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
162 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
163 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
164 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
165 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
166 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
167 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
168 /* Saifun SA25F005 (non-buffered flash) */
169 /* strap, cfg1, & write1 need updates */
170 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
171 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
172 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
173 "Non-buffered flash (64kB)"},
174 /* Fast EEPROM */
175 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
176 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
177 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
178 "EEPROM - fast"},
179 /* Expansion entry 1001 */
180 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
181 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
182 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
183 "Entry 1001"},
184 /* Expansion entry 1010 */
185 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
186 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
187 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
188 "Entry 1010"},
189 /* ATMEL AT45DB011B (buffered flash) */
190 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
191 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
192 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
193 "Buffered flash (128kB)"},
194 /* Expansion entry 1100 */
195 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
196 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
197 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
198 "Entry 1100"},
199 /* Expansion entry 1101 */
200 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
201 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
202 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
203 "Entry 1101"},
204 /* Ateml Expansion entry 1110 */
205 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
206 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
207 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
208 "Entry 1110 (Atmel)"},
209 /* ATMEL AT45DB021B (buffered flash) */
210 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
211 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
212 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
213 "Buffered flash (256kB)"},
216 static struct flash_spec flash_5709 = {
217 .flags = BNX2_NV_BUFFERED,
218 .page_bits = BCM5709_FLASH_PAGE_BITS,
219 .page_size = BCM5709_FLASH_PAGE_SIZE,
220 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
221 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
222 .name = "5709 Buffered flash (256kB)",
225 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
227 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
229 u32 diff;
231 smp_mb();
233 /* The ring uses 256 indices for 255 entries, one of them
234 * needs to be skipped.
236 diff = bp->tx_prod - bp->tx_cons;
237 if (unlikely(diff >= TX_DESC_CNT)) {
238 diff &= 0xffff;
239 if (diff == TX_DESC_CNT)
240 diff = MAX_TX_DESC_CNT;
242 return (bp->tx_ring_size - diff);
245 static u32
246 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
248 u32 val;
250 spin_lock_bh(&bp->indirect_lock);
251 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
252 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
253 spin_unlock_bh(&bp->indirect_lock);
254 return val;
257 static void
258 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
260 spin_lock_bh(&bp->indirect_lock);
261 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
262 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
263 spin_unlock_bh(&bp->indirect_lock);
266 static void
267 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
269 offset += cid_addr;
270 spin_lock_bh(&bp->indirect_lock);
271 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
272 int i;
274 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
275 REG_WR(bp, BNX2_CTX_CTX_CTRL,
276 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
277 for (i = 0; i < 5; i++) {
278 u32 val;
279 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
280 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
281 break;
282 udelay(5);
284 } else {
285 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
286 REG_WR(bp, BNX2_CTX_DATA, val);
288 spin_unlock_bh(&bp->indirect_lock);
291 static int
292 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
294 u32 val1;
295 int i, ret;
297 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
298 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
299 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
301 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
302 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
304 udelay(40);
307 val1 = (bp->phy_addr << 21) | (reg << 16) |
308 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
309 BNX2_EMAC_MDIO_COMM_START_BUSY;
310 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
312 for (i = 0; i < 50; i++) {
313 udelay(10);
315 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
316 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
317 udelay(5);
319 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
320 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
322 break;
326 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
327 *val = 0x0;
328 ret = -EBUSY;
330 else {
331 *val = val1;
332 ret = 0;
335 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
336 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
337 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
339 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
340 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
342 udelay(40);
345 return ret;
348 static int
349 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
351 u32 val1;
352 int i, ret;
354 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
355 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
356 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
358 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
359 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
361 udelay(40);
364 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
365 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
366 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
367 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
369 for (i = 0; i < 50; i++) {
370 udelay(10);
372 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
373 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
374 udelay(5);
375 break;
379 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
380 ret = -EBUSY;
381 else
382 ret = 0;
384 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
385 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
386 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
388 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
389 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
391 udelay(40);
394 return ret;
397 static void
398 bnx2_disable_int(struct bnx2 *bp)
400 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
401 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
402 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
405 static void
406 bnx2_enable_int(struct bnx2 *bp)
408 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
409 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
410 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
412 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
413 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
415 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
418 static void
419 bnx2_disable_int_sync(struct bnx2 *bp)
421 atomic_inc(&bp->intr_sem);
422 bnx2_disable_int(bp);
423 synchronize_irq(bp->pdev->irq);
426 static void
427 bnx2_netif_stop(struct bnx2 *bp)
429 bnx2_disable_int_sync(bp);
430 if (netif_running(bp->dev)) {
431 netif_poll_disable(bp->dev);
432 netif_tx_disable(bp->dev);
433 bp->dev->trans_start = jiffies; /* prevent tx timeout */
437 static void
438 bnx2_netif_start(struct bnx2 *bp)
440 if (atomic_dec_and_test(&bp->intr_sem)) {
441 if (netif_running(bp->dev)) {
442 netif_wake_queue(bp->dev);
443 netif_poll_enable(bp->dev);
444 bnx2_enable_int(bp);
449 static void
450 bnx2_free_mem(struct bnx2 *bp)
452 int i;
454 for (i = 0; i < bp->ctx_pages; i++) {
455 if (bp->ctx_blk[i]) {
456 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
457 bp->ctx_blk[i],
458 bp->ctx_blk_mapping[i]);
459 bp->ctx_blk[i] = NULL;
462 if (bp->status_blk) {
463 pci_free_consistent(bp->pdev, bp->status_stats_size,
464 bp->status_blk, bp->status_blk_mapping);
465 bp->status_blk = NULL;
466 bp->stats_blk = NULL;
468 if (bp->tx_desc_ring) {
469 pci_free_consistent(bp->pdev,
470 sizeof(struct tx_bd) * TX_DESC_CNT,
471 bp->tx_desc_ring, bp->tx_desc_mapping);
472 bp->tx_desc_ring = NULL;
474 kfree(bp->tx_buf_ring);
475 bp->tx_buf_ring = NULL;
476 for (i = 0; i < bp->rx_max_ring; i++) {
477 if (bp->rx_desc_ring[i])
478 pci_free_consistent(bp->pdev,
479 sizeof(struct rx_bd) * RX_DESC_CNT,
480 bp->rx_desc_ring[i],
481 bp->rx_desc_mapping[i]);
482 bp->rx_desc_ring[i] = NULL;
484 vfree(bp->rx_buf_ring);
485 bp->rx_buf_ring = NULL;
488 static int
489 bnx2_alloc_mem(struct bnx2 *bp)
491 int i, status_blk_size;
493 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
494 GFP_KERNEL);
495 if (bp->tx_buf_ring == NULL)
496 return -ENOMEM;
498 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
499 sizeof(struct tx_bd) *
500 TX_DESC_CNT,
501 &bp->tx_desc_mapping);
502 if (bp->tx_desc_ring == NULL)
503 goto alloc_mem_err;
505 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
506 bp->rx_max_ring);
507 if (bp->rx_buf_ring == NULL)
508 goto alloc_mem_err;
510 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
511 bp->rx_max_ring);
513 for (i = 0; i < bp->rx_max_ring; i++) {
514 bp->rx_desc_ring[i] =
515 pci_alloc_consistent(bp->pdev,
516 sizeof(struct rx_bd) * RX_DESC_CNT,
517 &bp->rx_desc_mapping[i]);
518 if (bp->rx_desc_ring[i] == NULL)
519 goto alloc_mem_err;
523 /* Combine status and statistics blocks into one allocation. */
524 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
525 bp->status_stats_size = status_blk_size +
526 sizeof(struct statistics_block);
528 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
529 &bp->status_blk_mapping);
530 if (bp->status_blk == NULL)
531 goto alloc_mem_err;
533 memset(bp->status_blk, 0, bp->status_stats_size);
535 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
536 status_blk_size);
538 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
540 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
541 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
542 if (bp->ctx_pages == 0)
543 bp->ctx_pages = 1;
544 for (i = 0; i < bp->ctx_pages; i++) {
545 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
546 BCM_PAGE_SIZE,
547 &bp->ctx_blk_mapping[i]);
548 if (bp->ctx_blk[i] == NULL)
549 goto alloc_mem_err;
552 return 0;
554 alloc_mem_err:
555 bnx2_free_mem(bp);
556 return -ENOMEM;
559 static void
560 bnx2_report_fw_link(struct bnx2 *bp)
562 u32 fw_link_status = 0;
564 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
565 return;
567 if (bp->link_up) {
568 u32 bmsr;
570 switch (bp->line_speed) {
571 case SPEED_10:
572 if (bp->duplex == DUPLEX_HALF)
573 fw_link_status = BNX2_LINK_STATUS_10HALF;
574 else
575 fw_link_status = BNX2_LINK_STATUS_10FULL;
576 break;
577 case SPEED_100:
578 if (bp->duplex == DUPLEX_HALF)
579 fw_link_status = BNX2_LINK_STATUS_100HALF;
580 else
581 fw_link_status = BNX2_LINK_STATUS_100FULL;
582 break;
583 case SPEED_1000:
584 if (bp->duplex == DUPLEX_HALF)
585 fw_link_status = BNX2_LINK_STATUS_1000HALF;
586 else
587 fw_link_status = BNX2_LINK_STATUS_1000FULL;
588 break;
589 case SPEED_2500:
590 if (bp->duplex == DUPLEX_HALF)
591 fw_link_status = BNX2_LINK_STATUS_2500HALF;
592 else
593 fw_link_status = BNX2_LINK_STATUS_2500FULL;
594 break;
597 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
599 if (bp->autoneg) {
600 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
602 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
603 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
605 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
606 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
607 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
608 else
609 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
612 else
613 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
615 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
618 static char *
619 bnx2_xceiver_str(struct bnx2 *bp)
621 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
622 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
623 "Copper"));
626 static void
627 bnx2_report_link(struct bnx2 *bp)
629 if (bp->link_up) {
630 netif_carrier_on(bp->dev);
631 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
632 bnx2_xceiver_str(bp));
634 printk("%d Mbps ", bp->line_speed);
636 if (bp->duplex == DUPLEX_FULL)
637 printk("full duplex");
638 else
639 printk("half duplex");
641 if (bp->flow_ctrl) {
642 if (bp->flow_ctrl & FLOW_CTRL_RX) {
643 printk(", receive ");
644 if (bp->flow_ctrl & FLOW_CTRL_TX)
645 printk("& transmit ");
647 else {
648 printk(", transmit ");
650 printk("flow control ON");
652 printk("\n");
654 else {
655 netif_carrier_off(bp->dev);
656 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
657 bnx2_xceiver_str(bp));
660 bnx2_report_fw_link(bp);
663 static void
664 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
666 u32 local_adv, remote_adv;
668 bp->flow_ctrl = 0;
669 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
670 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
672 if (bp->duplex == DUPLEX_FULL) {
673 bp->flow_ctrl = bp->req_flow_ctrl;
675 return;
678 if (bp->duplex != DUPLEX_FULL) {
679 return;
682 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
683 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
684 u32 val;
686 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
687 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
688 bp->flow_ctrl |= FLOW_CTRL_TX;
689 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
690 bp->flow_ctrl |= FLOW_CTRL_RX;
691 return;
694 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
695 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
697 if (bp->phy_flags & PHY_SERDES_FLAG) {
698 u32 new_local_adv = 0;
699 u32 new_remote_adv = 0;
701 if (local_adv & ADVERTISE_1000XPAUSE)
702 new_local_adv |= ADVERTISE_PAUSE_CAP;
703 if (local_adv & ADVERTISE_1000XPSE_ASYM)
704 new_local_adv |= ADVERTISE_PAUSE_ASYM;
705 if (remote_adv & ADVERTISE_1000XPAUSE)
706 new_remote_adv |= ADVERTISE_PAUSE_CAP;
707 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
708 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
710 local_adv = new_local_adv;
711 remote_adv = new_remote_adv;
714 /* See Table 28B-3 of 802.3ab-1999 spec. */
715 if (local_adv & ADVERTISE_PAUSE_CAP) {
716 if(local_adv & ADVERTISE_PAUSE_ASYM) {
717 if (remote_adv & ADVERTISE_PAUSE_CAP) {
718 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
720 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
721 bp->flow_ctrl = FLOW_CTRL_RX;
724 else {
725 if (remote_adv & ADVERTISE_PAUSE_CAP) {
726 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
730 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
731 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
732 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
734 bp->flow_ctrl = FLOW_CTRL_TX;
739 static int
740 bnx2_5709s_linkup(struct bnx2 *bp)
742 u32 val, speed;
744 bp->link_up = 1;
746 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
747 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
748 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
750 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
751 bp->line_speed = bp->req_line_speed;
752 bp->duplex = bp->req_duplex;
753 return 0;
755 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
756 switch (speed) {
757 case MII_BNX2_GP_TOP_AN_SPEED_10:
758 bp->line_speed = SPEED_10;
759 break;
760 case MII_BNX2_GP_TOP_AN_SPEED_100:
761 bp->line_speed = SPEED_100;
762 break;
763 case MII_BNX2_GP_TOP_AN_SPEED_1G:
764 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
765 bp->line_speed = SPEED_1000;
766 break;
767 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
768 bp->line_speed = SPEED_2500;
769 break;
771 if (val & MII_BNX2_GP_TOP_AN_FD)
772 bp->duplex = DUPLEX_FULL;
773 else
774 bp->duplex = DUPLEX_HALF;
775 return 0;
778 static int
779 bnx2_5708s_linkup(struct bnx2 *bp)
781 u32 val;
783 bp->link_up = 1;
784 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
785 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
786 case BCM5708S_1000X_STAT1_SPEED_10:
787 bp->line_speed = SPEED_10;
788 break;
789 case BCM5708S_1000X_STAT1_SPEED_100:
790 bp->line_speed = SPEED_100;
791 break;
792 case BCM5708S_1000X_STAT1_SPEED_1G:
793 bp->line_speed = SPEED_1000;
794 break;
795 case BCM5708S_1000X_STAT1_SPEED_2G5:
796 bp->line_speed = SPEED_2500;
797 break;
799 if (val & BCM5708S_1000X_STAT1_FD)
800 bp->duplex = DUPLEX_FULL;
801 else
802 bp->duplex = DUPLEX_HALF;
804 return 0;
807 static int
808 bnx2_5706s_linkup(struct bnx2 *bp)
810 u32 bmcr, local_adv, remote_adv, common;
812 bp->link_up = 1;
813 bp->line_speed = SPEED_1000;
815 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
816 if (bmcr & BMCR_FULLDPLX) {
817 bp->duplex = DUPLEX_FULL;
819 else {
820 bp->duplex = DUPLEX_HALF;
823 if (!(bmcr & BMCR_ANENABLE)) {
824 return 0;
827 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
828 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
830 common = local_adv & remote_adv;
831 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
833 if (common & ADVERTISE_1000XFULL) {
834 bp->duplex = DUPLEX_FULL;
836 else {
837 bp->duplex = DUPLEX_HALF;
841 return 0;
844 static int
845 bnx2_copper_linkup(struct bnx2 *bp)
847 u32 bmcr;
849 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
850 if (bmcr & BMCR_ANENABLE) {
851 u32 local_adv, remote_adv, common;
853 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
854 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
856 common = local_adv & (remote_adv >> 2);
857 if (common & ADVERTISE_1000FULL) {
858 bp->line_speed = SPEED_1000;
859 bp->duplex = DUPLEX_FULL;
861 else if (common & ADVERTISE_1000HALF) {
862 bp->line_speed = SPEED_1000;
863 bp->duplex = DUPLEX_HALF;
865 else {
866 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
867 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
869 common = local_adv & remote_adv;
870 if (common & ADVERTISE_100FULL) {
871 bp->line_speed = SPEED_100;
872 bp->duplex = DUPLEX_FULL;
874 else if (common & ADVERTISE_100HALF) {
875 bp->line_speed = SPEED_100;
876 bp->duplex = DUPLEX_HALF;
878 else if (common & ADVERTISE_10FULL) {
879 bp->line_speed = SPEED_10;
880 bp->duplex = DUPLEX_FULL;
882 else if (common & ADVERTISE_10HALF) {
883 bp->line_speed = SPEED_10;
884 bp->duplex = DUPLEX_HALF;
886 else {
887 bp->line_speed = 0;
888 bp->link_up = 0;
892 else {
893 if (bmcr & BMCR_SPEED100) {
894 bp->line_speed = SPEED_100;
896 else {
897 bp->line_speed = SPEED_10;
899 if (bmcr & BMCR_FULLDPLX) {
900 bp->duplex = DUPLEX_FULL;
902 else {
903 bp->duplex = DUPLEX_HALF;
907 return 0;
910 static int
911 bnx2_set_mac_link(struct bnx2 *bp)
913 u32 val;
915 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
916 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
917 (bp->duplex == DUPLEX_HALF)) {
918 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
921 /* Configure the EMAC mode register. */
922 val = REG_RD(bp, BNX2_EMAC_MODE);
924 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
925 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
926 BNX2_EMAC_MODE_25G_MODE);
928 if (bp->link_up) {
929 switch (bp->line_speed) {
930 case SPEED_10:
931 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
932 val |= BNX2_EMAC_MODE_PORT_MII_10M;
933 break;
935 /* fall through */
936 case SPEED_100:
937 val |= BNX2_EMAC_MODE_PORT_MII;
938 break;
939 case SPEED_2500:
940 val |= BNX2_EMAC_MODE_25G_MODE;
941 /* fall through */
942 case SPEED_1000:
943 val |= BNX2_EMAC_MODE_PORT_GMII;
944 break;
947 else {
948 val |= BNX2_EMAC_MODE_PORT_GMII;
951 /* Set the MAC to operate in the appropriate duplex mode. */
952 if (bp->duplex == DUPLEX_HALF)
953 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
954 REG_WR(bp, BNX2_EMAC_MODE, val);
956 /* Enable/disable rx PAUSE. */
957 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
959 if (bp->flow_ctrl & FLOW_CTRL_RX)
960 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
961 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
963 /* Enable/disable tx PAUSE. */
964 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
965 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
967 if (bp->flow_ctrl & FLOW_CTRL_TX)
968 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
969 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
971 /* Acknowledge the interrupt. */
972 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
974 return 0;
977 static void
978 bnx2_enable_bmsr1(struct bnx2 *bp)
980 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
981 (CHIP_NUM(bp) == CHIP_NUM_5709))
982 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
983 MII_BNX2_BLK_ADDR_GP_STATUS);
986 static void
987 bnx2_disable_bmsr1(struct bnx2 *bp)
989 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
990 (CHIP_NUM(bp) == CHIP_NUM_5709))
991 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
992 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
995 static int
996 bnx2_test_and_enable_2g5(struct bnx2 *bp)
998 u32 up1;
999 int ret = 1;
1001 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1002 return 0;
1004 if (bp->autoneg & AUTONEG_SPEED)
1005 bp->advertising |= ADVERTISED_2500baseX_Full;
1007 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1008 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1010 bnx2_read_phy(bp, bp->mii_up1, &up1);
1011 if (!(up1 & BCM5708S_UP1_2G5)) {
1012 up1 |= BCM5708S_UP1_2G5;
1013 bnx2_write_phy(bp, bp->mii_up1, up1);
1014 ret = 0;
1017 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1018 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1019 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1021 return ret;
1024 static int
1025 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1027 u32 up1;
1028 int ret = 0;
1030 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1031 return 0;
1033 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1034 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1036 bnx2_read_phy(bp, bp->mii_up1, &up1);
1037 if (up1 & BCM5708S_UP1_2G5) {
1038 up1 &= ~BCM5708S_UP1_2G5;
1039 bnx2_write_phy(bp, bp->mii_up1, up1);
1040 ret = 1;
1043 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1044 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1045 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1047 return ret;
1050 static void
1051 bnx2_enable_forced_2g5(struct bnx2 *bp)
1053 u32 bmcr;
1055 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1056 return;
1058 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1059 u32 val;
1061 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1062 MII_BNX2_BLK_ADDR_SERDES_DIG);
1063 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1064 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1065 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1066 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1068 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1069 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1070 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1072 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1073 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1074 bmcr |= BCM5708S_BMCR_FORCE_2500;
1077 if (bp->autoneg & AUTONEG_SPEED) {
1078 bmcr &= ~BMCR_ANENABLE;
1079 if (bp->req_duplex == DUPLEX_FULL)
1080 bmcr |= BMCR_FULLDPLX;
1082 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1085 static void
1086 bnx2_disable_forced_2g5(struct bnx2 *bp)
1088 u32 bmcr;
1090 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1091 return;
1093 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1094 u32 val;
1096 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1097 MII_BNX2_BLK_ADDR_SERDES_DIG);
1098 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1099 val &= ~MII_BNX2_SD_MISC1_FORCE;
1100 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1102 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1103 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1104 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1106 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1107 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1108 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1111 if (bp->autoneg & AUTONEG_SPEED)
1112 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1113 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1116 static int
1117 bnx2_set_link(struct bnx2 *bp)
1119 u32 bmsr;
1120 u8 link_up;
1122 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1123 bp->link_up = 1;
1124 return 0;
1127 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1128 return 0;
1130 link_up = bp->link_up;
1132 bnx2_enable_bmsr1(bp);
1133 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1134 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1135 bnx2_disable_bmsr1(bp);
1137 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1138 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1139 u32 val;
1141 val = REG_RD(bp, BNX2_EMAC_STATUS);
1142 if (val & BNX2_EMAC_STATUS_LINK)
1143 bmsr |= BMSR_LSTATUS;
1144 else
1145 bmsr &= ~BMSR_LSTATUS;
1148 if (bmsr & BMSR_LSTATUS) {
1149 bp->link_up = 1;
1151 if (bp->phy_flags & PHY_SERDES_FLAG) {
1152 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1153 bnx2_5706s_linkup(bp);
1154 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1155 bnx2_5708s_linkup(bp);
1156 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1157 bnx2_5709s_linkup(bp);
1159 else {
1160 bnx2_copper_linkup(bp);
1162 bnx2_resolve_flow_ctrl(bp);
1164 else {
1165 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1166 (bp->autoneg & AUTONEG_SPEED))
1167 bnx2_disable_forced_2g5(bp);
1169 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1170 bp->link_up = 0;
1173 if (bp->link_up != link_up) {
1174 bnx2_report_link(bp);
1177 bnx2_set_mac_link(bp);
1179 return 0;
1182 static int
1183 bnx2_reset_phy(struct bnx2 *bp)
1185 int i;
1186 u32 reg;
1188 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1190 #define PHY_RESET_MAX_WAIT 100
1191 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1192 udelay(10);
1194 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1195 if (!(reg & BMCR_RESET)) {
1196 udelay(20);
1197 break;
1200 if (i == PHY_RESET_MAX_WAIT) {
1201 return -EBUSY;
1203 return 0;
1206 static u32
1207 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1209 u32 adv = 0;
1211 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1212 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1214 if (bp->phy_flags & PHY_SERDES_FLAG) {
1215 adv = ADVERTISE_1000XPAUSE;
1217 else {
1218 adv = ADVERTISE_PAUSE_CAP;
1221 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1222 if (bp->phy_flags & PHY_SERDES_FLAG) {
1223 adv = ADVERTISE_1000XPSE_ASYM;
1225 else {
1226 adv = ADVERTISE_PAUSE_ASYM;
1229 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1230 if (bp->phy_flags & PHY_SERDES_FLAG) {
1231 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1233 else {
1234 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1237 return adv;
1240 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1242 static int
1243 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1245 u32 speed_arg = 0, pause_adv;
1247 pause_adv = bnx2_phy_get_pause_adv(bp);
1249 if (bp->autoneg & AUTONEG_SPEED) {
1250 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1251 if (bp->advertising & ADVERTISED_10baseT_Half)
1252 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1253 if (bp->advertising & ADVERTISED_10baseT_Full)
1254 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1255 if (bp->advertising & ADVERTISED_100baseT_Half)
1256 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1257 if (bp->advertising & ADVERTISED_100baseT_Full)
1258 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1259 if (bp->advertising & ADVERTISED_1000baseT_Full)
1260 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1261 if (bp->advertising & ADVERTISED_2500baseX_Full)
1262 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1263 } else {
1264 if (bp->req_line_speed == SPEED_2500)
1265 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1266 else if (bp->req_line_speed == SPEED_1000)
1267 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1268 else if (bp->req_line_speed == SPEED_100) {
1269 if (bp->req_duplex == DUPLEX_FULL)
1270 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1271 else
1272 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1273 } else if (bp->req_line_speed == SPEED_10) {
1274 if (bp->req_duplex == DUPLEX_FULL)
1275 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1276 else
1277 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1281 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1282 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1283 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1284 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1286 if (port == PORT_TP)
1287 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1288 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1290 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1292 spin_unlock_bh(&bp->phy_lock);
1293 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1294 spin_lock_bh(&bp->phy_lock);
1296 return 0;
1299 static int
1300 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1302 u32 adv, bmcr;
1303 u32 new_adv = 0;
1305 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1306 return (bnx2_setup_remote_phy(bp, port));
1308 if (!(bp->autoneg & AUTONEG_SPEED)) {
1309 u32 new_bmcr;
1310 int force_link_down = 0;
1312 if (bp->req_line_speed == SPEED_2500) {
1313 if (!bnx2_test_and_enable_2g5(bp))
1314 force_link_down = 1;
1315 } else if (bp->req_line_speed == SPEED_1000) {
1316 if (bnx2_test_and_disable_2g5(bp))
1317 force_link_down = 1;
1319 bnx2_read_phy(bp, bp->mii_adv, &adv);
1320 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1322 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1323 new_bmcr = bmcr & ~BMCR_ANENABLE;
1324 new_bmcr |= BMCR_SPEED1000;
1326 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1327 if (bp->req_line_speed == SPEED_2500)
1328 bnx2_enable_forced_2g5(bp);
1329 else if (bp->req_line_speed == SPEED_1000) {
1330 bnx2_disable_forced_2g5(bp);
1331 new_bmcr &= ~0x2000;
1334 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1335 if (bp->req_line_speed == SPEED_2500)
1336 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1337 else
1338 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1341 if (bp->req_duplex == DUPLEX_FULL) {
1342 adv |= ADVERTISE_1000XFULL;
1343 new_bmcr |= BMCR_FULLDPLX;
1345 else {
1346 adv |= ADVERTISE_1000XHALF;
1347 new_bmcr &= ~BMCR_FULLDPLX;
1349 if ((new_bmcr != bmcr) || (force_link_down)) {
1350 /* Force a link down visible on the other side */
1351 if (bp->link_up) {
1352 bnx2_write_phy(bp, bp->mii_adv, adv &
1353 ~(ADVERTISE_1000XFULL |
1354 ADVERTISE_1000XHALF));
1355 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1356 BMCR_ANRESTART | BMCR_ANENABLE);
1358 bp->link_up = 0;
1359 netif_carrier_off(bp->dev);
1360 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1361 bnx2_report_link(bp);
1363 bnx2_write_phy(bp, bp->mii_adv, adv);
1364 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1365 } else {
1366 bnx2_resolve_flow_ctrl(bp);
1367 bnx2_set_mac_link(bp);
1369 return 0;
1372 bnx2_test_and_enable_2g5(bp);
1374 if (bp->advertising & ADVERTISED_1000baseT_Full)
1375 new_adv |= ADVERTISE_1000XFULL;
1377 new_adv |= bnx2_phy_get_pause_adv(bp);
1379 bnx2_read_phy(bp, bp->mii_adv, &adv);
1380 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1382 bp->serdes_an_pending = 0;
1383 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1384 /* Force a link down visible on the other side */
1385 if (bp->link_up) {
1386 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1387 spin_unlock_bh(&bp->phy_lock);
1388 msleep(20);
1389 spin_lock_bh(&bp->phy_lock);
1392 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1393 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1394 BMCR_ANENABLE);
1395 /* Speed up link-up time when the link partner
1396 * does not autonegotiate which is very common
1397 * in blade servers. Some blade servers use
1398 * IPMI for kerboard input and it's important
1399 * to minimize link disruptions. Autoneg. involves
1400 * exchanging base pages plus 3 next pages and
1401 * normally completes in about 120 msec.
1403 bp->current_interval = SERDES_AN_TIMEOUT;
1404 bp->serdes_an_pending = 1;
1405 mod_timer(&bp->timer, jiffies + bp->current_interval);
1406 } else {
1407 bnx2_resolve_flow_ctrl(bp);
1408 bnx2_set_mac_link(bp);
1411 return 0;
1414 #define ETHTOOL_ALL_FIBRE_SPEED \
1415 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1416 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1417 (ADVERTISED_1000baseT_Full)
1419 #define ETHTOOL_ALL_COPPER_SPEED \
1420 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1421 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1422 ADVERTISED_1000baseT_Full)
1424 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1425 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1427 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1429 static void
1430 bnx2_set_default_remote_link(struct bnx2 *bp)
1432 u32 link;
1434 if (bp->phy_port == PORT_TP)
1435 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1436 else
1437 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1439 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1440 bp->req_line_speed = 0;
1441 bp->autoneg |= AUTONEG_SPEED;
1442 bp->advertising = ADVERTISED_Autoneg;
1443 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1444 bp->advertising |= ADVERTISED_10baseT_Half;
1445 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1446 bp->advertising |= ADVERTISED_10baseT_Full;
1447 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1448 bp->advertising |= ADVERTISED_100baseT_Half;
1449 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1450 bp->advertising |= ADVERTISED_100baseT_Full;
1451 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1452 bp->advertising |= ADVERTISED_1000baseT_Full;
1453 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1454 bp->advertising |= ADVERTISED_2500baseX_Full;
1455 } else {
1456 bp->autoneg = 0;
1457 bp->advertising = 0;
1458 bp->req_duplex = DUPLEX_FULL;
1459 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1460 bp->req_line_speed = SPEED_10;
1461 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1462 bp->req_duplex = DUPLEX_HALF;
1464 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1465 bp->req_line_speed = SPEED_100;
1466 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1467 bp->req_duplex = DUPLEX_HALF;
1469 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1470 bp->req_line_speed = SPEED_1000;
1471 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1472 bp->req_line_speed = SPEED_2500;
1476 static void
1477 bnx2_set_default_link(struct bnx2 *bp)
1479 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1480 return bnx2_set_default_remote_link(bp);
1482 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1483 bp->req_line_speed = 0;
1484 if (bp->phy_flags & PHY_SERDES_FLAG) {
1485 u32 reg;
1487 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1489 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1490 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1491 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1492 bp->autoneg = 0;
1493 bp->req_line_speed = bp->line_speed = SPEED_1000;
1494 bp->req_duplex = DUPLEX_FULL;
1496 } else
1497 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1500 static void
1501 bnx2_send_heart_beat(struct bnx2 *bp)
1503 u32 msg;
1504 u32 addr;
1506 spin_lock(&bp->indirect_lock);
1507 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1508 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1509 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1510 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1511 spin_unlock(&bp->indirect_lock);
1514 static void
1515 bnx2_remote_phy_event(struct bnx2 *bp)
1517 u32 msg;
1518 u8 link_up = bp->link_up;
1519 u8 old_port;
1521 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1523 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1524 bnx2_send_heart_beat(bp);
1526 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1528 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1529 bp->link_up = 0;
1530 else {
1531 u32 speed;
1533 bp->link_up = 1;
1534 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1535 bp->duplex = DUPLEX_FULL;
1536 switch (speed) {
1537 case BNX2_LINK_STATUS_10HALF:
1538 bp->duplex = DUPLEX_HALF;
1539 case BNX2_LINK_STATUS_10FULL:
1540 bp->line_speed = SPEED_10;
1541 break;
1542 case BNX2_LINK_STATUS_100HALF:
1543 bp->duplex = DUPLEX_HALF;
1544 case BNX2_LINK_STATUS_100BASE_T4:
1545 case BNX2_LINK_STATUS_100FULL:
1546 bp->line_speed = SPEED_100;
1547 break;
1548 case BNX2_LINK_STATUS_1000HALF:
1549 bp->duplex = DUPLEX_HALF;
1550 case BNX2_LINK_STATUS_1000FULL:
1551 bp->line_speed = SPEED_1000;
1552 break;
1553 case BNX2_LINK_STATUS_2500HALF:
1554 bp->duplex = DUPLEX_HALF;
1555 case BNX2_LINK_STATUS_2500FULL:
1556 bp->line_speed = SPEED_2500;
1557 break;
1558 default:
1559 bp->line_speed = 0;
1560 break;
1563 spin_lock(&bp->phy_lock);
1564 bp->flow_ctrl = 0;
1565 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1566 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1567 if (bp->duplex == DUPLEX_FULL)
1568 bp->flow_ctrl = bp->req_flow_ctrl;
1569 } else {
1570 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1571 bp->flow_ctrl |= FLOW_CTRL_TX;
1572 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1573 bp->flow_ctrl |= FLOW_CTRL_RX;
1576 old_port = bp->phy_port;
1577 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1578 bp->phy_port = PORT_FIBRE;
1579 else
1580 bp->phy_port = PORT_TP;
1582 if (old_port != bp->phy_port)
1583 bnx2_set_default_link(bp);
1585 spin_unlock(&bp->phy_lock);
1587 if (bp->link_up != link_up)
1588 bnx2_report_link(bp);
1590 bnx2_set_mac_link(bp);
1593 static int
1594 bnx2_set_remote_link(struct bnx2 *bp)
1596 u32 evt_code;
1598 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1599 switch (evt_code) {
1600 case BNX2_FW_EVT_CODE_LINK_EVENT:
1601 bnx2_remote_phy_event(bp);
1602 break;
1603 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1604 default:
1605 bnx2_send_heart_beat(bp);
1606 break;
1608 return 0;
1611 static int
1612 bnx2_setup_copper_phy(struct bnx2 *bp)
1614 u32 bmcr;
1615 u32 new_bmcr;
1617 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1619 if (bp->autoneg & AUTONEG_SPEED) {
1620 u32 adv_reg, adv1000_reg;
1621 u32 new_adv_reg = 0;
1622 u32 new_adv1000_reg = 0;
1624 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1625 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1626 ADVERTISE_PAUSE_ASYM);
1628 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1629 adv1000_reg &= PHY_ALL_1000_SPEED;
1631 if (bp->advertising & ADVERTISED_10baseT_Half)
1632 new_adv_reg |= ADVERTISE_10HALF;
1633 if (bp->advertising & ADVERTISED_10baseT_Full)
1634 new_adv_reg |= ADVERTISE_10FULL;
1635 if (bp->advertising & ADVERTISED_100baseT_Half)
1636 new_adv_reg |= ADVERTISE_100HALF;
1637 if (bp->advertising & ADVERTISED_100baseT_Full)
1638 new_adv_reg |= ADVERTISE_100FULL;
1639 if (bp->advertising & ADVERTISED_1000baseT_Full)
1640 new_adv1000_reg |= ADVERTISE_1000FULL;
1642 new_adv_reg |= ADVERTISE_CSMA;
1644 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1646 if ((adv1000_reg != new_adv1000_reg) ||
1647 (adv_reg != new_adv_reg) ||
1648 ((bmcr & BMCR_ANENABLE) == 0)) {
1650 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1651 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1652 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1653 BMCR_ANENABLE);
1655 else if (bp->link_up) {
1656 /* Flow ctrl may have changed from auto to forced */
1657 /* or vice-versa. */
1659 bnx2_resolve_flow_ctrl(bp);
1660 bnx2_set_mac_link(bp);
1662 return 0;
1665 new_bmcr = 0;
1666 if (bp->req_line_speed == SPEED_100) {
1667 new_bmcr |= BMCR_SPEED100;
1669 if (bp->req_duplex == DUPLEX_FULL) {
1670 new_bmcr |= BMCR_FULLDPLX;
1672 if (new_bmcr != bmcr) {
1673 u32 bmsr;
1675 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1676 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1678 if (bmsr & BMSR_LSTATUS) {
1679 /* Force link down */
1680 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1681 spin_unlock_bh(&bp->phy_lock);
1682 msleep(50);
1683 spin_lock_bh(&bp->phy_lock);
1685 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1686 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1689 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1691 /* Normally, the new speed is setup after the link has
1692 * gone down and up again. In some cases, link will not go
1693 * down so we need to set up the new speed here.
1695 if (bmsr & BMSR_LSTATUS) {
1696 bp->line_speed = bp->req_line_speed;
1697 bp->duplex = bp->req_duplex;
1698 bnx2_resolve_flow_ctrl(bp);
1699 bnx2_set_mac_link(bp);
1701 } else {
1702 bnx2_resolve_flow_ctrl(bp);
1703 bnx2_set_mac_link(bp);
1705 return 0;
1708 static int
1709 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1711 if (bp->loopback == MAC_LOOPBACK)
1712 return 0;
1714 if (bp->phy_flags & PHY_SERDES_FLAG) {
1715 return (bnx2_setup_serdes_phy(bp, port));
1717 else {
1718 return (bnx2_setup_copper_phy(bp));
1722 static int
1723 bnx2_init_5709s_phy(struct bnx2 *bp)
1725 u32 val;
1727 bp->mii_bmcr = MII_BMCR + 0x10;
1728 bp->mii_bmsr = MII_BMSR + 0x10;
1729 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1730 bp->mii_adv = MII_ADVERTISE + 0x10;
1731 bp->mii_lpa = MII_LPA + 0x10;
1732 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1734 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1735 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1737 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1738 bnx2_reset_phy(bp);
1740 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1742 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1743 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1744 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1745 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1747 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1748 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1749 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1750 val |= BCM5708S_UP1_2G5;
1751 else
1752 val &= ~BCM5708S_UP1_2G5;
1753 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1755 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1756 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1757 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1758 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1760 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1762 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1763 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1764 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1766 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1768 return 0;
1771 static int
1772 bnx2_init_5708s_phy(struct bnx2 *bp)
1774 u32 val;
1776 bnx2_reset_phy(bp);
1778 bp->mii_up1 = BCM5708S_UP1;
1780 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1781 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1782 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1784 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1785 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1786 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1788 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1789 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1790 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1792 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1793 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1794 val |= BCM5708S_UP1_2G5;
1795 bnx2_write_phy(bp, BCM5708S_UP1, val);
1798 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1799 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1800 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1801 /* increase tx signal amplitude */
1802 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1803 BCM5708S_BLK_ADDR_TX_MISC);
1804 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1805 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1806 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1807 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1810 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1811 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1813 if (val) {
1814 u32 is_backplane;
1816 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1817 BNX2_SHARED_HW_CFG_CONFIG);
1818 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1819 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1820 BCM5708S_BLK_ADDR_TX_MISC);
1821 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1822 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1823 BCM5708S_BLK_ADDR_DIG);
1826 return 0;
1829 static int
1830 bnx2_init_5706s_phy(struct bnx2 *bp)
1832 bnx2_reset_phy(bp);
1834 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1836 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1837 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1839 if (bp->dev->mtu > 1500) {
1840 u32 val;
1842 /* Set extended packet length bit */
1843 bnx2_write_phy(bp, 0x18, 0x7);
1844 bnx2_read_phy(bp, 0x18, &val);
1845 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1847 bnx2_write_phy(bp, 0x1c, 0x6c00);
1848 bnx2_read_phy(bp, 0x1c, &val);
1849 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1851 else {
1852 u32 val;
1854 bnx2_write_phy(bp, 0x18, 0x7);
1855 bnx2_read_phy(bp, 0x18, &val);
1856 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1858 bnx2_write_phy(bp, 0x1c, 0x6c00);
1859 bnx2_read_phy(bp, 0x1c, &val);
1860 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1863 return 0;
1866 static int
1867 bnx2_init_copper_phy(struct bnx2 *bp)
1869 u32 val;
1871 bnx2_reset_phy(bp);
1873 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1874 bnx2_write_phy(bp, 0x18, 0x0c00);
1875 bnx2_write_phy(bp, 0x17, 0x000a);
1876 bnx2_write_phy(bp, 0x15, 0x310b);
1877 bnx2_write_phy(bp, 0x17, 0x201f);
1878 bnx2_write_phy(bp, 0x15, 0x9506);
1879 bnx2_write_phy(bp, 0x17, 0x401f);
1880 bnx2_write_phy(bp, 0x15, 0x14e2);
1881 bnx2_write_phy(bp, 0x18, 0x0400);
1884 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1885 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1886 MII_BNX2_DSP_EXPAND_REG | 0x8);
1887 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1888 val &= ~(1 << 8);
1889 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1892 if (bp->dev->mtu > 1500) {
1893 /* Set extended packet length bit */
1894 bnx2_write_phy(bp, 0x18, 0x7);
1895 bnx2_read_phy(bp, 0x18, &val);
1896 bnx2_write_phy(bp, 0x18, val | 0x4000);
1898 bnx2_read_phy(bp, 0x10, &val);
1899 bnx2_write_phy(bp, 0x10, val | 0x1);
1901 else {
1902 bnx2_write_phy(bp, 0x18, 0x7);
1903 bnx2_read_phy(bp, 0x18, &val);
1904 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1906 bnx2_read_phy(bp, 0x10, &val);
1907 bnx2_write_phy(bp, 0x10, val & ~0x1);
1910 /* ethernet@wirespeed */
1911 bnx2_write_phy(bp, 0x18, 0x7007);
1912 bnx2_read_phy(bp, 0x18, &val);
1913 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1914 return 0;
1918 static int
1919 bnx2_init_phy(struct bnx2 *bp)
1921 u32 val;
1922 int rc = 0;
1924 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1925 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1927 bp->mii_bmcr = MII_BMCR;
1928 bp->mii_bmsr = MII_BMSR;
1929 bp->mii_bmsr1 = MII_BMSR;
1930 bp->mii_adv = MII_ADVERTISE;
1931 bp->mii_lpa = MII_LPA;
1933 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1935 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1936 goto setup_phy;
1938 bnx2_read_phy(bp, MII_PHYSID1, &val);
1939 bp->phy_id = val << 16;
1940 bnx2_read_phy(bp, MII_PHYSID2, &val);
1941 bp->phy_id |= val & 0xffff;
1943 if (bp->phy_flags & PHY_SERDES_FLAG) {
1944 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1945 rc = bnx2_init_5706s_phy(bp);
1946 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1947 rc = bnx2_init_5708s_phy(bp);
1948 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1949 rc = bnx2_init_5709s_phy(bp);
1951 else {
1952 rc = bnx2_init_copper_phy(bp);
1955 setup_phy:
1956 if (!rc)
1957 rc = bnx2_setup_phy(bp, bp->phy_port);
1959 return rc;
1962 static int
1963 bnx2_set_mac_loopback(struct bnx2 *bp)
1965 u32 mac_mode;
1967 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1968 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1969 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1970 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1971 bp->link_up = 1;
1972 return 0;
1975 static int bnx2_test_link(struct bnx2 *);
1977 static int
1978 bnx2_set_phy_loopback(struct bnx2 *bp)
1980 u32 mac_mode;
1981 int rc, i;
1983 spin_lock_bh(&bp->phy_lock);
1984 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1985 BMCR_SPEED1000);
1986 spin_unlock_bh(&bp->phy_lock);
1987 if (rc)
1988 return rc;
1990 for (i = 0; i < 10; i++) {
1991 if (bnx2_test_link(bp) == 0)
1992 break;
1993 msleep(100);
1996 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1997 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1998 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1999 BNX2_EMAC_MODE_25G_MODE);
2001 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2002 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2003 bp->link_up = 1;
2004 return 0;
2007 static int
2008 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2010 int i;
2011 u32 val;
2013 bp->fw_wr_seq++;
2014 msg_data |= bp->fw_wr_seq;
2016 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2018 /* wait for an acknowledgement. */
2019 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2020 msleep(10);
2022 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2024 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2025 break;
2027 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2028 return 0;
2030 /* If we timed out, inform the firmware that this is the case. */
2031 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2032 if (!silent)
2033 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2034 "%x\n", msg_data);
2036 msg_data &= ~BNX2_DRV_MSG_CODE;
2037 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2039 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2041 return -EBUSY;
2044 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2045 return -EIO;
2047 return 0;
2050 static int
2051 bnx2_init_5709_context(struct bnx2 *bp)
2053 int i, ret = 0;
2054 u32 val;
2056 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2057 val |= (BCM_PAGE_BITS - 8) << 16;
2058 REG_WR(bp, BNX2_CTX_COMMAND, val);
2059 for (i = 0; i < 10; i++) {
2060 val = REG_RD(bp, BNX2_CTX_COMMAND);
2061 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2062 break;
2063 udelay(2);
2065 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2066 return -EBUSY;
2068 for (i = 0; i < bp->ctx_pages; i++) {
2069 int j;
2071 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2072 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2073 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2074 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2075 (u64) bp->ctx_blk_mapping[i] >> 32);
2076 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2077 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2078 for (j = 0; j < 10; j++) {
2080 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2081 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2082 break;
2083 udelay(5);
2085 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2086 ret = -EBUSY;
2087 break;
2090 return ret;
2093 static void
2094 bnx2_init_context(struct bnx2 *bp)
2096 u32 vcid;
2098 vcid = 96;
2099 while (vcid) {
2100 u32 vcid_addr, pcid_addr, offset;
2101 int i;
2103 vcid--;
2105 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2106 u32 new_vcid;
2108 vcid_addr = GET_PCID_ADDR(vcid);
2109 if (vcid & 0x8) {
2110 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2112 else {
2113 new_vcid = vcid;
2115 pcid_addr = GET_PCID_ADDR(new_vcid);
2117 else {
2118 vcid_addr = GET_CID_ADDR(vcid);
2119 pcid_addr = vcid_addr;
2122 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2123 vcid_addr += (i << PHY_CTX_SHIFT);
2124 pcid_addr += (i << PHY_CTX_SHIFT);
2126 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2127 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2129 /* Zero out the context. */
2130 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2131 CTX_WR(bp, 0x00, offset, 0);
2133 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2134 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2139 static int
2140 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2142 u16 *good_mbuf;
2143 u32 good_mbuf_cnt;
2144 u32 val;
2146 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2147 if (good_mbuf == NULL) {
2148 printk(KERN_ERR PFX "Failed to allocate memory in "
2149 "bnx2_alloc_bad_rbuf\n");
2150 return -ENOMEM;
2153 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2154 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2156 good_mbuf_cnt = 0;
2158 /* Allocate a bunch of mbufs and save the good ones in an array. */
2159 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2160 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2161 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2163 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2165 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2167 /* The addresses with Bit 9 set are bad memory blocks. */
2168 if (!(val & (1 << 9))) {
2169 good_mbuf[good_mbuf_cnt] = (u16) val;
2170 good_mbuf_cnt++;
2173 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2176 /* Free the good ones back to the mbuf pool thus discarding
2177 * all the bad ones. */
2178 while (good_mbuf_cnt) {
2179 good_mbuf_cnt--;
2181 val = good_mbuf[good_mbuf_cnt];
2182 val = (val << 9) | val | 1;
2184 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2186 kfree(good_mbuf);
2187 return 0;
2190 static void
2191 bnx2_set_mac_addr(struct bnx2 *bp)
2193 u32 val;
2194 u8 *mac_addr = bp->dev->dev_addr;
2196 val = (mac_addr[0] << 8) | mac_addr[1];
2198 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2200 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2201 (mac_addr[4] << 8) | mac_addr[5];
2203 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2206 static inline int
2207 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2209 struct sk_buff *skb;
2210 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2211 dma_addr_t mapping;
2212 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2213 unsigned long align;
2215 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2216 if (skb == NULL) {
2217 return -ENOMEM;
2220 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2221 skb_reserve(skb, BNX2_RX_ALIGN - align);
2223 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2224 PCI_DMA_FROMDEVICE);
2226 rx_buf->skb = skb;
2227 pci_unmap_addr_set(rx_buf, mapping, mapping);
2229 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2230 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2232 bp->rx_prod_bseq += bp->rx_buf_use_size;
2234 return 0;
2237 static int
2238 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2240 struct status_block *sblk = bp->status_blk;
2241 u32 new_link_state, old_link_state;
2242 int is_set = 1;
2244 new_link_state = sblk->status_attn_bits & event;
2245 old_link_state = sblk->status_attn_bits_ack & event;
2246 if (new_link_state != old_link_state) {
2247 if (new_link_state)
2248 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2249 else
2250 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2251 } else
2252 is_set = 0;
2254 return is_set;
2257 static void
2258 bnx2_phy_int(struct bnx2 *bp)
2260 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2261 spin_lock(&bp->phy_lock);
2262 bnx2_set_link(bp);
2263 spin_unlock(&bp->phy_lock);
2265 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2266 bnx2_set_remote_link(bp);
2270 static void
2271 bnx2_tx_int(struct bnx2 *bp)
2273 struct status_block *sblk = bp->status_blk;
2274 u16 hw_cons, sw_cons, sw_ring_cons;
2275 int tx_free_bd = 0;
2277 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
2278 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2279 hw_cons++;
2281 sw_cons = bp->tx_cons;
2283 while (sw_cons != hw_cons) {
2284 struct sw_bd *tx_buf;
2285 struct sk_buff *skb;
2286 int i, last;
2288 sw_ring_cons = TX_RING_IDX(sw_cons);
2290 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2291 skb = tx_buf->skb;
2293 /* partial BD completions possible with TSO packets */
2294 if (skb_is_gso(skb)) {
2295 u16 last_idx, last_ring_idx;
2297 last_idx = sw_cons +
2298 skb_shinfo(skb)->nr_frags + 1;
2299 last_ring_idx = sw_ring_cons +
2300 skb_shinfo(skb)->nr_frags + 1;
2301 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2302 last_idx++;
2304 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2305 break;
2309 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2310 skb_headlen(skb), PCI_DMA_TODEVICE);
2312 tx_buf->skb = NULL;
2313 last = skb_shinfo(skb)->nr_frags;
2315 for (i = 0; i < last; i++) {
2316 sw_cons = NEXT_TX_BD(sw_cons);
2318 pci_unmap_page(bp->pdev,
2319 pci_unmap_addr(
2320 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2321 mapping),
2322 skb_shinfo(skb)->frags[i].size,
2323 PCI_DMA_TODEVICE);
2326 sw_cons = NEXT_TX_BD(sw_cons);
2328 tx_free_bd += last + 1;
2330 dev_kfree_skb(skb);
2332 hw_cons = bp->hw_tx_cons =
2333 sblk->status_tx_quick_consumer_index0;
2335 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2336 hw_cons++;
2340 bp->tx_cons = sw_cons;
2341 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2342 * before checking for netif_queue_stopped(). Without the
2343 * memory barrier, there is a small possibility that bnx2_start_xmit()
2344 * will miss it and cause the queue to be stopped forever.
2346 smp_mb();
2348 if (unlikely(netif_queue_stopped(bp->dev)) &&
2349 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2350 netif_tx_lock(bp->dev);
2351 if ((netif_queue_stopped(bp->dev)) &&
2352 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2353 netif_wake_queue(bp->dev);
2354 netif_tx_unlock(bp->dev);
2358 static inline void
2359 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2360 u16 cons, u16 prod)
2362 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2363 struct rx_bd *cons_bd, *prod_bd;
2365 cons_rx_buf = &bp->rx_buf_ring[cons];
2366 prod_rx_buf = &bp->rx_buf_ring[prod];
2368 pci_dma_sync_single_for_device(bp->pdev,
2369 pci_unmap_addr(cons_rx_buf, mapping),
2370 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2372 bp->rx_prod_bseq += bp->rx_buf_use_size;
2374 prod_rx_buf->skb = skb;
2376 if (cons == prod)
2377 return;
2379 pci_unmap_addr_set(prod_rx_buf, mapping,
2380 pci_unmap_addr(cons_rx_buf, mapping));
2382 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2383 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2384 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2385 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2388 static int
2389 bnx2_rx_int(struct bnx2 *bp, int budget)
2391 struct status_block *sblk = bp->status_blk;
2392 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2393 struct l2_fhdr *rx_hdr;
2394 int rx_pkt = 0;
2396 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
2397 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2398 hw_cons++;
2400 sw_cons = bp->rx_cons;
2401 sw_prod = bp->rx_prod;
2403 /* Memory barrier necessary as speculative reads of the rx
2404 * buffer can be ahead of the index in the status block
2406 rmb();
2407 while (sw_cons != hw_cons) {
2408 unsigned int len;
2409 u32 status;
2410 struct sw_bd *rx_buf;
2411 struct sk_buff *skb;
2412 dma_addr_t dma_addr;
2414 sw_ring_cons = RX_RING_IDX(sw_cons);
2415 sw_ring_prod = RX_RING_IDX(sw_prod);
2417 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2418 skb = rx_buf->skb;
2420 rx_buf->skb = NULL;
2422 dma_addr = pci_unmap_addr(rx_buf, mapping);
2424 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2425 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2427 rx_hdr = (struct l2_fhdr *) skb->data;
2428 len = rx_hdr->l2_fhdr_pkt_len - 4;
2430 if ((status = rx_hdr->l2_fhdr_status) &
2431 (L2_FHDR_ERRORS_BAD_CRC |
2432 L2_FHDR_ERRORS_PHY_DECODE |
2433 L2_FHDR_ERRORS_ALIGNMENT |
2434 L2_FHDR_ERRORS_TOO_SHORT |
2435 L2_FHDR_ERRORS_GIANT_FRAME)) {
2437 goto reuse_rx;
2440 /* Since we don't have a jumbo ring, copy small packets
2441 * if mtu > 1500
2443 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2444 struct sk_buff *new_skb;
2446 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2447 if (new_skb == NULL)
2448 goto reuse_rx;
2450 /* aligned copy */
2451 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2452 new_skb->data, len + 2);
2453 skb_reserve(new_skb, 2);
2454 skb_put(new_skb, len);
2456 bnx2_reuse_rx_skb(bp, skb,
2457 sw_ring_cons, sw_ring_prod);
2459 skb = new_skb;
2461 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
2462 pci_unmap_single(bp->pdev, dma_addr,
2463 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2465 skb_reserve(skb, bp->rx_offset);
2466 skb_put(skb, len);
2468 else {
2469 reuse_rx:
2470 bnx2_reuse_rx_skb(bp, skb,
2471 sw_ring_cons, sw_ring_prod);
2472 goto next_rx;
2475 skb->protocol = eth_type_trans(skb, bp->dev);
2477 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2478 (ntohs(skb->protocol) != 0x8100)) {
2480 dev_kfree_skb(skb);
2481 goto next_rx;
2485 skb->ip_summed = CHECKSUM_NONE;
2486 if (bp->rx_csum &&
2487 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2488 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2490 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2491 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2492 skb->ip_summed = CHECKSUM_UNNECESSARY;
2495 #ifdef BCM_VLAN
2496 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2497 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2498 rx_hdr->l2_fhdr_vlan_tag);
2500 else
2501 #endif
2502 netif_receive_skb(skb);
2504 bp->dev->last_rx = jiffies;
2505 rx_pkt++;
2507 next_rx:
2508 sw_cons = NEXT_RX_BD(sw_cons);
2509 sw_prod = NEXT_RX_BD(sw_prod);
2511 if ((rx_pkt == budget))
2512 break;
2514 /* Refresh hw_cons to see if there is new work */
2515 if (sw_cons == hw_cons) {
2516 hw_cons = bp->hw_rx_cons =
2517 sblk->status_rx_quick_consumer_index0;
2518 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2519 hw_cons++;
2520 rmb();
2523 bp->rx_cons = sw_cons;
2524 bp->rx_prod = sw_prod;
2526 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2528 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2530 mmiowb();
2532 return rx_pkt;
2536 /* MSI ISR - The only difference between this and the INTx ISR
2537 * is that the MSI interrupt is always serviced.
2539 static irqreturn_t
2540 bnx2_msi(int irq, void *dev_instance)
2542 struct net_device *dev = dev_instance;
2543 struct bnx2 *bp = netdev_priv(dev);
2545 prefetch(bp->status_blk);
2546 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2547 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2548 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2550 /* Return here if interrupt is disabled. */
2551 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2552 return IRQ_HANDLED;
2554 netif_rx_schedule(dev);
2556 return IRQ_HANDLED;
2559 static irqreturn_t
2560 bnx2_msi_1shot(int irq, void *dev_instance)
2562 struct net_device *dev = dev_instance;
2563 struct bnx2 *bp = netdev_priv(dev);
2565 prefetch(bp->status_blk);
2567 /* Return here if interrupt is disabled. */
2568 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2569 return IRQ_HANDLED;
2571 netif_rx_schedule(dev);
2573 return IRQ_HANDLED;
2576 static irqreturn_t
2577 bnx2_interrupt(int irq, void *dev_instance)
2579 struct net_device *dev = dev_instance;
2580 struct bnx2 *bp = netdev_priv(dev);
2581 struct status_block *sblk = bp->status_blk;
2583 /* When using INTx, it is possible for the interrupt to arrive
2584 * at the CPU before the status block posted prior to the
2585 * interrupt. Reading a register will flush the status block.
2586 * When using MSI, the MSI message will always complete after
2587 * the status block write.
2589 if ((sblk->status_idx == bp->last_status_idx) &&
2590 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2591 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2592 return IRQ_NONE;
2594 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2595 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2596 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2598 /* Read back to deassert IRQ immediately to avoid too many
2599 * spurious interrupts.
2601 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2603 /* Return here if interrupt is shared and is disabled. */
2604 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2605 return IRQ_HANDLED;
2607 if (netif_rx_schedule_prep(dev)) {
2608 bp->last_status_idx = sblk->status_idx;
2609 __netif_rx_schedule(dev);
2612 return IRQ_HANDLED;
2615 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2616 STATUS_ATTN_BITS_TIMER_ABORT)
2618 static inline int
2619 bnx2_has_work(struct bnx2 *bp)
2621 struct status_block *sblk = bp->status_blk;
2623 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2624 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2625 return 1;
2627 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2628 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2629 return 1;
2631 return 0;
2634 static int
2635 bnx2_poll(struct net_device *dev, int *budget)
2637 struct bnx2 *bp = netdev_priv(dev);
2638 struct status_block *sblk = bp->status_blk;
2639 u32 status_attn_bits = sblk->status_attn_bits;
2640 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2642 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2643 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2645 bnx2_phy_int(bp);
2647 /* This is needed to take care of transient status
2648 * during link changes.
2650 REG_WR(bp, BNX2_HC_COMMAND,
2651 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2652 REG_RD(bp, BNX2_HC_COMMAND);
2655 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2656 bnx2_tx_int(bp);
2658 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2659 int orig_budget = *budget;
2660 int work_done;
2662 if (orig_budget > dev->quota)
2663 orig_budget = dev->quota;
2665 work_done = bnx2_rx_int(bp, orig_budget);
2666 *budget -= work_done;
2667 dev->quota -= work_done;
2670 bp->last_status_idx = bp->status_blk->status_idx;
2671 rmb();
2673 if (!bnx2_has_work(bp)) {
2674 netif_rx_complete(dev);
2675 if (likely(bp->flags & USING_MSI_FLAG)) {
2676 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2677 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2678 bp->last_status_idx);
2679 return 0;
2681 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2682 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2683 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2684 bp->last_status_idx);
2686 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2687 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2688 bp->last_status_idx);
2689 return 0;
2692 return 1;
2695 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2696 * from set_multicast.
2698 static void
2699 bnx2_set_rx_mode(struct net_device *dev)
2701 struct bnx2 *bp = netdev_priv(dev);
2702 u32 rx_mode, sort_mode;
2703 int i;
2705 spin_lock_bh(&bp->phy_lock);
2707 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2708 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2709 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2710 #ifdef BCM_VLAN
2711 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2712 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2713 #else
2714 if (!(bp->flags & ASF_ENABLE_FLAG))
2715 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2716 #endif
2717 if (dev->flags & IFF_PROMISC) {
2718 /* Promiscuous mode. */
2719 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2720 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2721 BNX2_RPM_SORT_USER0_PROM_VLAN;
2723 else if (dev->flags & IFF_ALLMULTI) {
2724 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2725 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2726 0xffffffff);
2728 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2730 else {
2731 /* Accept one or more multicast(s). */
2732 struct dev_mc_list *mclist;
2733 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2734 u32 regidx;
2735 u32 bit;
2736 u32 crc;
2738 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2740 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2741 i++, mclist = mclist->next) {
2743 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2744 bit = crc & 0xff;
2745 regidx = (bit & 0xe0) >> 5;
2746 bit &= 0x1f;
2747 mc_filter[regidx] |= (1 << bit);
2750 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2751 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2752 mc_filter[i]);
2755 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2758 if (rx_mode != bp->rx_mode) {
2759 bp->rx_mode = rx_mode;
2760 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2763 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2764 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2765 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2767 spin_unlock_bh(&bp->phy_lock);
2770 #define FW_BUF_SIZE 0x8000
2772 static int
2773 bnx2_gunzip_init(struct bnx2 *bp)
2775 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2776 goto gunzip_nomem1;
2778 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2779 goto gunzip_nomem2;
2781 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2782 if (bp->strm->workspace == NULL)
2783 goto gunzip_nomem3;
2785 return 0;
2787 gunzip_nomem3:
2788 kfree(bp->strm);
2789 bp->strm = NULL;
2791 gunzip_nomem2:
2792 vfree(bp->gunzip_buf);
2793 bp->gunzip_buf = NULL;
2795 gunzip_nomem1:
2796 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2797 "uncompression.\n", bp->dev->name);
2798 return -ENOMEM;
2801 static void
2802 bnx2_gunzip_end(struct bnx2 *bp)
2804 kfree(bp->strm->workspace);
2806 kfree(bp->strm);
2807 bp->strm = NULL;
2809 if (bp->gunzip_buf) {
2810 vfree(bp->gunzip_buf);
2811 bp->gunzip_buf = NULL;
2815 static int
2816 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2818 int n, rc;
2820 /* check gzip header */
2821 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2822 return -EINVAL;
2824 n = 10;
2826 #define FNAME 0x8
2827 if (zbuf[3] & FNAME)
2828 while ((zbuf[n++] != 0) && (n < len));
2830 bp->strm->next_in = zbuf + n;
2831 bp->strm->avail_in = len - n;
2832 bp->strm->next_out = bp->gunzip_buf;
2833 bp->strm->avail_out = FW_BUF_SIZE;
2835 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2836 if (rc != Z_OK)
2837 return rc;
2839 rc = zlib_inflate(bp->strm, Z_FINISH);
2841 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2842 *outbuf = bp->gunzip_buf;
2844 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2845 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2846 bp->dev->name, bp->strm->msg);
2848 zlib_inflateEnd(bp->strm);
2850 if (rc == Z_STREAM_END)
2851 return 0;
2853 return rc;
2856 static void
2857 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2858 u32 rv2p_proc)
2860 int i;
2861 u32 val;
2864 for (i = 0; i < rv2p_code_len; i += 8) {
2865 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2866 rv2p_code++;
2867 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2868 rv2p_code++;
2870 if (rv2p_proc == RV2P_PROC1) {
2871 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2872 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2874 else {
2875 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2876 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2880 /* Reset the processor, un-stall is done later. */
2881 if (rv2p_proc == RV2P_PROC1) {
2882 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2884 else {
2885 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2889 static int
2890 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2892 u32 offset;
2893 u32 val;
2894 int rc;
2896 /* Halt the CPU. */
2897 val = REG_RD_IND(bp, cpu_reg->mode);
2898 val |= cpu_reg->mode_value_halt;
2899 REG_WR_IND(bp, cpu_reg->mode, val);
2900 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2902 /* Load the Text area. */
2903 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2904 if (fw->gz_text) {
2905 u32 text_len;
2906 void *text;
2908 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2909 &text_len);
2910 if (rc)
2911 return rc;
2913 fw->text = text;
2915 if (fw->gz_text) {
2916 int j;
2918 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2919 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2923 /* Load the Data area. */
2924 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2925 if (fw->data) {
2926 int j;
2928 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2929 REG_WR_IND(bp, offset, fw->data[j]);
2933 /* Load the SBSS area. */
2934 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2935 if (fw->sbss) {
2936 int j;
2938 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2939 REG_WR_IND(bp, offset, fw->sbss[j]);
2943 /* Load the BSS area. */
2944 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2945 if (fw->bss) {
2946 int j;
2948 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2949 REG_WR_IND(bp, offset, fw->bss[j]);
2953 /* Load the Read-Only area. */
2954 offset = cpu_reg->spad_base +
2955 (fw->rodata_addr - cpu_reg->mips_view_base);
2956 if (fw->rodata) {
2957 int j;
2959 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2960 REG_WR_IND(bp, offset, fw->rodata[j]);
2964 /* Clear the pre-fetch instruction. */
2965 REG_WR_IND(bp, cpu_reg->inst, 0);
2966 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2968 /* Start the CPU. */
2969 val = REG_RD_IND(bp, cpu_reg->mode);
2970 val &= ~cpu_reg->mode_value_halt;
2971 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2972 REG_WR_IND(bp, cpu_reg->mode, val);
2974 return 0;
2977 static int
2978 bnx2_init_cpus(struct bnx2 *bp)
2980 struct cpu_reg cpu_reg;
2981 struct fw_info *fw;
2982 int rc = 0;
2983 void *text;
2984 u32 text_len;
2986 if ((rc = bnx2_gunzip_init(bp)) != 0)
2987 return rc;
2989 /* Initialize the RV2P processor. */
2990 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2991 &text_len);
2992 if (rc)
2993 goto init_cpu_err;
2995 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2997 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2998 &text_len);
2999 if (rc)
3000 goto init_cpu_err;
3002 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
3004 /* Initialize the RX Processor. */
3005 cpu_reg.mode = BNX2_RXP_CPU_MODE;
3006 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3007 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3008 cpu_reg.state = BNX2_RXP_CPU_STATE;
3009 cpu_reg.state_value_clear = 0xffffff;
3010 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3011 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3012 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3013 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3014 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3015 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3016 cpu_reg.mips_view_base = 0x8000000;
3018 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3019 fw = &bnx2_rxp_fw_09;
3020 else
3021 fw = &bnx2_rxp_fw_06;
3023 rc = load_cpu_fw(bp, &cpu_reg, fw);
3024 if (rc)
3025 goto init_cpu_err;
3027 /* Initialize the TX Processor. */
3028 cpu_reg.mode = BNX2_TXP_CPU_MODE;
3029 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3030 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3031 cpu_reg.state = BNX2_TXP_CPU_STATE;
3032 cpu_reg.state_value_clear = 0xffffff;
3033 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3034 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3035 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3036 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3037 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3038 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3039 cpu_reg.mips_view_base = 0x8000000;
3041 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3042 fw = &bnx2_txp_fw_09;
3043 else
3044 fw = &bnx2_txp_fw_06;
3046 rc = load_cpu_fw(bp, &cpu_reg, fw);
3047 if (rc)
3048 goto init_cpu_err;
3050 /* Initialize the TX Patch-up Processor. */
3051 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3052 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3053 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3054 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3055 cpu_reg.state_value_clear = 0xffffff;
3056 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3057 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3058 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3059 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3060 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3061 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3062 cpu_reg.mips_view_base = 0x8000000;
3064 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3065 fw = &bnx2_tpat_fw_09;
3066 else
3067 fw = &bnx2_tpat_fw_06;
3069 rc = load_cpu_fw(bp, &cpu_reg, fw);
3070 if (rc)
3071 goto init_cpu_err;
3073 /* Initialize the Completion Processor. */
3074 cpu_reg.mode = BNX2_COM_CPU_MODE;
3075 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3076 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3077 cpu_reg.state = BNX2_COM_CPU_STATE;
3078 cpu_reg.state_value_clear = 0xffffff;
3079 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3080 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3081 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3082 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3083 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3084 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3085 cpu_reg.mips_view_base = 0x8000000;
3087 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3088 fw = &bnx2_com_fw_09;
3089 else
3090 fw = &bnx2_com_fw_06;
3092 rc = load_cpu_fw(bp, &cpu_reg, fw);
3093 if (rc)
3094 goto init_cpu_err;
3096 /* Initialize the Command Processor. */
3097 cpu_reg.mode = BNX2_CP_CPU_MODE;
3098 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3099 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3100 cpu_reg.state = BNX2_CP_CPU_STATE;
3101 cpu_reg.state_value_clear = 0xffffff;
3102 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3103 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3104 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3105 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3106 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3107 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3108 cpu_reg.mips_view_base = 0x8000000;
3110 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3111 fw = &bnx2_cp_fw_09;
3113 rc = load_cpu_fw(bp, &cpu_reg, fw);
3114 if (rc)
3115 goto init_cpu_err;
3117 init_cpu_err:
3118 bnx2_gunzip_end(bp);
3119 return rc;
3122 static int
3123 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3125 u16 pmcsr;
3127 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3129 switch (state) {
3130 case PCI_D0: {
3131 u32 val;
3133 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3134 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3135 PCI_PM_CTRL_PME_STATUS);
3137 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3138 /* delay required during transition out of D3hot */
3139 msleep(20);
3141 val = REG_RD(bp, BNX2_EMAC_MODE);
3142 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3143 val &= ~BNX2_EMAC_MODE_MPKT;
3144 REG_WR(bp, BNX2_EMAC_MODE, val);
3146 val = REG_RD(bp, BNX2_RPM_CONFIG);
3147 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3148 REG_WR(bp, BNX2_RPM_CONFIG, val);
3149 break;
3151 case PCI_D3hot: {
3152 int i;
3153 u32 val, wol_msg;
3155 if (bp->wol) {
3156 u32 advertising;
3157 u8 autoneg;
3159 autoneg = bp->autoneg;
3160 advertising = bp->advertising;
3162 bp->autoneg = AUTONEG_SPEED;
3163 bp->advertising = ADVERTISED_10baseT_Half |
3164 ADVERTISED_10baseT_Full |
3165 ADVERTISED_100baseT_Half |
3166 ADVERTISED_100baseT_Full |
3167 ADVERTISED_Autoneg;
3169 bnx2_setup_copper_phy(bp);
3171 bp->autoneg = autoneg;
3172 bp->advertising = advertising;
3174 bnx2_set_mac_addr(bp);
3176 val = REG_RD(bp, BNX2_EMAC_MODE);
3178 /* Enable port mode. */
3179 val &= ~BNX2_EMAC_MODE_PORT;
3180 val |= BNX2_EMAC_MODE_PORT_MII |
3181 BNX2_EMAC_MODE_MPKT_RCVD |
3182 BNX2_EMAC_MODE_ACPI_RCVD |
3183 BNX2_EMAC_MODE_MPKT;
3185 REG_WR(bp, BNX2_EMAC_MODE, val);
3187 /* receive all multicast */
3188 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3189 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3190 0xffffffff);
3192 REG_WR(bp, BNX2_EMAC_RX_MODE,
3193 BNX2_EMAC_RX_MODE_SORT_MODE);
3195 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3196 BNX2_RPM_SORT_USER0_MC_EN;
3197 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3198 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3199 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3200 BNX2_RPM_SORT_USER0_ENA);
3202 /* Need to enable EMAC and RPM for WOL. */
3203 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3204 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3205 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3206 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3208 val = REG_RD(bp, BNX2_RPM_CONFIG);
3209 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3210 REG_WR(bp, BNX2_RPM_CONFIG, val);
3212 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3214 else {
3215 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3218 if (!(bp->flags & NO_WOL_FLAG))
3219 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3221 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3222 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3223 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3225 if (bp->wol)
3226 pmcsr |= 3;
3228 else {
3229 pmcsr |= 3;
3231 if (bp->wol) {
3232 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3234 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3235 pmcsr);
3237 /* No more memory access after this point until
3238 * device is brought back to D0.
3240 udelay(50);
3241 break;
3243 default:
3244 return -EINVAL;
3246 return 0;
3249 static int
3250 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3252 u32 val;
3253 int j;
3255 /* Request access to the flash interface. */
3256 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3257 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3258 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3259 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3260 break;
3262 udelay(5);
3265 if (j >= NVRAM_TIMEOUT_COUNT)
3266 return -EBUSY;
3268 return 0;
3271 static int
3272 bnx2_release_nvram_lock(struct bnx2 *bp)
3274 int j;
3275 u32 val;
3277 /* Relinquish nvram interface. */
3278 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3280 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3281 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3282 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3283 break;
3285 udelay(5);
3288 if (j >= NVRAM_TIMEOUT_COUNT)
3289 return -EBUSY;
3291 return 0;
3295 static int
3296 bnx2_enable_nvram_write(struct bnx2 *bp)
3298 u32 val;
3300 val = REG_RD(bp, BNX2_MISC_CFG);
3301 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3303 if (bp->flash_info->flags & BNX2_NV_WREN) {
3304 int j;
3306 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3307 REG_WR(bp, BNX2_NVM_COMMAND,
3308 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3310 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3311 udelay(5);
3313 val = REG_RD(bp, BNX2_NVM_COMMAND);
3314 if (val & BNX2_NVM_COMMAND_DONE)
3315 break;
3318 if (j >= NVRAM_TIMEOUT_COUNT)
3319 return -EBUSY;
3321 return 0;
3324 static void
3325 bnx2_disable_nvram_write(struct bnx2 *bp)
3327 u32 val;
3329 val = REG_RD(bp, BNX2_MISC_CFG);
3330 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3334 static void
3335 bnx2_enable_nvram_access(struct bnx2 *bp)
3337 u32 val;
3339 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3340 /* Enable both bits, even on read. */
3341 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3342 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3345 static void
3346 bnx2_disable_nvram_access(struct bnx2 *bp)
3348 u32 val;
3350 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3351 /* Disable both bits, even after read. */
3352 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3353 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3354 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3357 static int
3358 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3360 u32 cmd;
3361 int j;
3363 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3364 /* Buffered flash, no erase needed */
3365 return 0;
3367 /* Build an erase command */
3368 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3369 BNX2_NVM_COMMAND_DOIT;
3371 /* Need to clear DONE bit separately. */
3372 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3374 /* Address of the NVRAM to read from. */
3375 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3377 /* Issue an erase command. */
3378 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3380 /* Wait for completion. */
3381 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3382 u32 val;
3384 udelay(5);
3386 val = REG_RD(bp, BNX2_NVM_COMMAND);
3387 if (val & BNX2_NVM_COMMAND_DONE)
3388 break;
3391 if (j >= NVRAM_TIMEOUT_COUNT)
3392 return -EBUSY;
3394 return 0;
3397 static int
3398 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3400 u32 cmd;
3401 int j;
3403 /* Build the command word. */
3404 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3406 /* Calculate an offset of a buffered flash, not needed for 5709. */
3407 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3408 offset = ((offset / bp->flash_info->page_size) <<
3409 bp->flash_info->page_bits) +
3410 (offset % bp->flash_info->page_size);
3413 /* Need to clear DONE bit separately. */
3414 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3416 /* Address of the NVRAM to read from. */
3417 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3419 /* Issue a read command. */
3420 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3422 /* Wait for completion. */
3423 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3424 u32 val;
3426 udelay(5);
3428 val = REG_RD(bp, BNX2_NVM_COMMAND);
3429 if (val & BNX2_NVM_COMMAND_DONE) {
3430 val = REG_RD(bp, BNX2_NVM_READ);
3432 val = be32_to_cpu(val);
3433 memcpy(ret_val, &val, 4);
3434 break;
3437 if (j >= NVRAM_TIMEOUT_COUNT)
3438 return -EBUSY;
3440 return 0;
3444 static int
3445 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3447 u32 cmd, val32;
3448 int j;
3450 /* Build the command word. */
3451 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3453 /* Calculate an offset of a buffered flash, not needed for 5709. */
3454 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3455 offset = ((offset / bp->flash_info->page_size) <<
3456 bp->flash_info->page_bits) +
3457 (offset % bp->flash_info->page_size);
3460 /* Need to clear DONE bit separately. */
3461 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3463 memcpy(&val32, val, 4);
3464 val32 = cpu_to_be32(val32);
3466 /* Write the data. */
3467 REG_WR(bp, BNX2_NVM_WRITE, val32);
3469 /* Address of the NVRAM to write to. */
3470 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3472 /* Issue the write command. */
3473 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3475 /* Wait for completion. */
3476 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3477 udelay(5);
3479 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3480 break;
3482 if (j >= NVRAM_TIMEOUT_COUNT)
3483 return -EBUSY;
3485 return 0;
3488 static int
3489 bnx2_init_nvram(struct bnx2 *bp)
3491 u32 val;
3492 int j, entry_count, rc = 0;
3493 struct flash_spec *flash;
3495 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3496 bp->flash_info = &flash_5709;
3497 goto get_flash_size;
3500 /* Determine the selected interface. */
3501 val = REG_RD(bp, BNX2_NVM_CFG1);
3503 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3505 if (val & 0x40000000) {
3507 /* Flash interface has been reconfigured */
3508 for (j = 0, flash = &flash_table[0]; j < entry_count;
3509 j++, flash++) {
3510 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3511 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3512 bp->flash_info = flash;
3513 break;
3517 else {
3518 u32 mask;
3519 /* Not yet been reconfigured */
3521 if (val & (1 << 23))
3522 mask = FLASH_BACKUP_STRAP_MASK;
3523 else
3524 mask = FLASH_STRAP_MASK;
3526 for (j = 0, flash = &flash_table[0]; j < entry_count;
3527 j++, flash++) {
3529 if ((val & mask) == (flash->strapping & mask)) {
3530 bp->flash_info = flash;
3532 /* Request access to the flash interface. */
3533 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3534 return rc;
3536 /* Enable access to flash interface */
3537 bnx2_enable_nvram_access(bp);
3539 /* Reconfigure the flash interface */
3540 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3541 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3542 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3543 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3545 /* Disable access to flash interface */
3546 bnx2_disable_nvram_access(bp);
3547 bnx2_release_nvram_lock(bp);
3549 break;
3552 } /* if (val & 0x40000000) */
3554 if (j == entry_count) {
3555 bp->flash_info = NULL;
3556 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3557 return -ENODEV;
3560 get_flash_size:
3561 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3562 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3563 if (val)
3564 bp->flash_size = val;
3565 else
3566 bp->flash_size = bp->flash_info->total_size;
3568 return rc;
3571 static int
3572 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3573 int buf_size)
3575 int rc = 0;
3576 u32 cmd_flags, offset32, len32, extra;
3578 if (buf_size == 0)
3579 return 0;
3581 /* Request access to the flash interface. */
3582 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3583 return rc;
3585 /* Enable access to flash interface */
3586 bnx2_enable_nvram_access(bp);
3588 len32 = buf_size;
3589 offset32 = offset;
3590 extra = 0;
3592 cmd_flags = 0;
3594 if (offset32 & 3) {
3595 u8 buf[4];
3596 u32 pre_len;
3598 offset32 &= ~3;
3599 pre_len = 4 - (offset & 3);
3601 if (pre_len >= len32) {
3602 pre_len = len32;
3603 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3604 BNX2_NVM_COMMAND_LAST;
3606 else {
3607 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3610 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3612 if (rc)
3613 return rc;
3615 memcpy(ret_buf, buf + (offset & 3), pre_len);
3617 offset32 += 4;
3618 ret_buf += pre_len;
3619 len32 -= pre_len;
3621 if (len32 & 3) {
3622 extra = 4 - (len32 & 3);
3623 len32 = (len32 + 4) & ~3;
3626 if (len32 == 4) {
3627 u8 buf[4];
3629 if (cmd_flags)
3630 cmd_flags = BNX2_NVM_COMMAND_LAST;
3631 else
3632 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3633 BNX2_NVM_COMMAND_LAST;
3635 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3637 memcpy(ret_buf, buf, 4 - extra);
3639 else if (len32 > 0) {
3640 u8 buf[4];
3642 /* Read the first word. */
3643 if (cmd_flags)
3644 cmd_flags = 0;
3645 else
3646 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3648 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3650 /* Advance to the next dword. */
3651 offset32 += 4;
3652 ret_buf += 4;
3653 len32 -= 4;
3655 while (len32 > 4 && rc == 0) {
3656 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3658 /* Advance to the next dword. */
3659 offset32 += 4;
3660 ret_buf += 4;
3661 len32 -= 4;
3664 if (rc)
3665 return rc;
3667 cmd_flags = BNX2_NVM_COMMAND_LAST;
3668 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3670 memcpy(ret_buf, buf, 4 - extra);
3673 /* Disable access to flash interface */
3674 bnx2_disable_nvram_access(bp);
3676 bnx2_release_nvram_lock(bp);
3678 return rc;
3681 static int
3682 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3683 int buf_size)
3685 u32 written, offset32, len32;
3686 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3687 int rc = 0;
3688 int align_start, align_end;
3690 buf = data_buf;
3691 offset32 = offset;
3692 len32 = buf_size;
3693 align_start = align_end = 0;
3695 if ((align_start = (offset32 & 3))) {
3696 offset32 &= ~3;
3697 len32 += align_start;
3698 if (len32 < 4)
3699 len32 = 4;
3700 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3701 return rc;
3704 if (len32 & 3) {
3705 align_end = 4 - (len32 & 3);
3706 len32 += align_end;
3707 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3708 return rc;
3711 if (align_start || align_end) {
3712 align_buf = kmalloc(len32, GFP_KERNEL);
3713 if (align_buf == NULL)
3714 return -ENOMEM;
3715 if (align_start) {
3716 memcpy(align_buf, start, 4);
3718 if (align_end) {
3719 memcpy(align_buf + len32 - 4, end, 4);
3721 memcpy(align_buf + align_start, data_buf, buf_size);
3722 buf = align_buf;
3725 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3726 flash_buffer = kmalloc(264, GFP_KERNEL);
3727 if (flash_buffer == NULL) {
3728 rc = -ENOMEM;
3729 goto nvram_write_end;
3733 written = 0;
3734 while ((written < len32) && (rc == 0)) {
3735 u32 page_start, page_end, data_start, data_end;
3736 u32 addr, cmd_flags;
3737 int i;
3739 /* Find the page_start addr */
3740 page_start = offset32 + written;
3741 page_start -= (page_start % bp->flash_info->page_size);
3742 /* Find the page_end addr */
3743 page_end = page_start + bp->flash_info->page_size;
3744 /* Find the data_start addr */
3745 data_start = (written == 0) ? offset32 : page_start;
3746 /* Find the data_end addr */
3747 data_end = (page_end > offset32 + len32) ?
3748 (offset32 + len32) : page_end;
3750 /* Request access to the flash interface. */
3751 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3752 goto nvram_write_end;
3754 /* Enable access to flash interface */
3755 bnx2_enable_nvram_access(bp);
3757 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3758 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3759 int j;
3761 /* Read the whole page into the buffer
3762 * (non-buffer flash only) */
3763 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3764 if (j == (bp->flash_info->page_size - 4)) {
3765 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3767 rc = bnx2_nvram_read_dword(bp,
3768 page_start + j,
3769 &flash_buffer[j],
3770 cmd_flags);
3772 if (rc)
3773 goto nvram_write_end;
3775 cmd_flags = 0;
3779 /* Enable writes to flash interface (unlock write-protect) */
3780 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3781 goto nvram_write_end;
3783 /* Loop to write back the buffer data from page_start to
3784 * data_start */
3785 i = 0;
3786 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3787 /* Erase the page */
3788 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3789 goto nvram_write_end;
3791 /* Re-enable the write again for the actual write */
3792 bnx2_enable_nvram_write(bp);
3794 for (addr = page_start; addr < data_start;
3795 addr += 4, i += 4) {
3797 rc = bnx2_nvram_write_dword(bp, addr,
3798 &flash_buffer[i], cmd_flags);
3800 if (rc != 0)
3801 goto nvram_write_end;
3803 cmd_flags = 0;
3807 /* Loop to write the new data from data_start to data_end */
3808 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3809 if ((addr == page_end - 4) ||
3810 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
3811 (addr == data_end - 4))) {
3813 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3815 rc = bnx2_nvram_write_dword(bp, addr, buf,
3816 cmd_flags);
3818 if (rc != 0)
3819 goto nvram_write_end;
3821 cmd_flags = 0;
3822 buf += 4;
3825 /* Loop to write back the buffer data from data_end
3826 * to page_end */
3827 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3828 for (addr = data_end; addr < page_end;
3829 addr += 4, i += 4) {
3831 if (addr == page_end-4) {
3832 cmd_flags = BNX2_NVM_COMMAND_LAST;
3834 rc = bnx2_nvram_write_dword(bp, addr,
3835 &flash_buffer[i], cmd_flags);
3837 if (rc != 0)
3838 goto nvram_write_end;
3840 cmd_flags = 0;
3844 /* Disable writes to flash interface (lock write-protect) */
3845 bnx2_disable_nvram_write(bp);
3847 /* Disable access to flash interface */
3848 bnx2_disable_nvram_access(bp);
3849 bnx2_release_nvram_lock(bp);
3851 /* Increment written */
3852 written += data_end - data_start;
3855 nvram_write_end:
3856 kfree(flash_buffer);
3857 kfree(align_buf);
3858 return rc;
3861 static void
3862 bnx2_init_remote_phy(struct bnx2 *bp)
3864 u32 val;
3866 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3867 if (!(bp->phy_flags & PHY_SERDES_FLAG))
3868 return;
3870 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3871 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3872 return;
3874 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3875 if (netif_running(bp->dev)) {
3876 val = BNX2_DRV_ACK_CAP_SIGNATURE |
3877 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3878 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3879 val);
3881 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3883 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3884 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3885 bp->phy_port = PORT_FIBRE;
3886 else
3887 bp->phy_port = PORT_TP;
3891 static int
3892 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3894 u32 val;
3895 int i, rc = 0;
3897 /* Wait for the current PCI transaction to complete before
3898 * issuing a reset. */
3899 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3900 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3901 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3902 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3903 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3904 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3905 udelay(5);
3907 /* Wait for the firmware to tell us it is ok to issue a reset. */
3908 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3910 /* Deposit a driver reset signature so the firmware knows that
3911 * this is a soft reset. */
3912 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3913 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3915 /* Do a dummy read to force the chip to complete all current transaction
3916 * before we issue a reset. */
3917 val = REG_RD(bp, BNX2_MISC_ID);
3919 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3920 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3921 REG_RD(bp, BNX2_MISC_COMMAND);
3922 udelay(5);
3924 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3925 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3927 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3929 } else {
3930 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3931 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3932 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3934 /* Chip reset. */
3935 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3937 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3938 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3939 current->state = TASK_UNINTERRUPTIBLE;
3940 schedule_timeout(HZ / 50);
3943 /* Reset takes approximate 30 usec */
3944 for (i = 0; i < 10; i++) {
3945 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3946 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3947 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3948 break;
3949 udelay(10);
3952 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3953 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3954 printk(KERN_ERR PFX "Chip reset did not complete\n");
3955 return -EBUSY;
3959 /* Make sure byte swapping is properly configured. */
3960 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3961 if (val != 0x01020304) {
3962 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3963 return -ENODEV;
3966 /* Wait for the firmware to finish its initialization. */
3967 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3968 if (rc)
3969 return rc;
3971 spin_lock_bh(&bp->phy_lock);
3972 bnx2_init_remote_phy(bp);
3973 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
3974 bnx2_set_default_remote_link(bp);
3975 spin_unlock_bh(&bp->phy_lock);
3977 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3978 /* Adjust the voltage regular to two steps lower. The default
3979 * of this register is 0x0000000e. */
3980 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3982 /* Remove bad rbuf memory from the free pool. */
3983 rc = bnx2_alloc_bad_rbuf(bp);
3986 return rc;
3989 static int
3990 bnx2_init_chip(struct bnx2 *bp)
3992 u32 val;
3993 int rc;
3995 /* Make sure the interrupt is not active. */
3996 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3998 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3999 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4000 #ifdef __BIG_ENDIAN
4001 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4002 #endif
4003 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4004 DMA_READ_CHANS << 12 |
4005 DMA_WRITE_CHANS << 16;
4007 val |= (0x2 << 20) | (1 << 11);
4009 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
4010 val |= (1 << 23);
4012 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4013 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
4014 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4016 REG_WR(bp, BNX2_DMA_CONFIG, val);
4018 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4019 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4020 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4021 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4024 if (bp->flags & PCIX_FLAG) {
4025 u16 val16;
4027 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4028 &val16);
4029 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4030 val16 & ~PCI_X_CMD_ERO);
4033 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4034 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4035 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4036 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4038 /* Initialize context mapping and zero out the quick contexts. The
4039 * context block must have already been enabled. */
4040 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4041 rc = bnx2_init_5709_context(bp);
4042 if (rc)
4043 return rc;
4044 } else
4045 bnx2_init_context(bp);
4047 if ((rc = bnx2_init_cpus(bp)) != 0)
4048 return rc;
4050 bnx2_init_nvram(bp);
4052 bnx2_set_mac_addr(bp);
4054 val = REG_RD(bp, BNX2_MQ_CONFIG);
4055 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4056 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4057 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4058 val |= BNX2_MQ_CONFIG_HALT_DIS;
4060 REG_WR(bp, BNX2_MQ_CONFIG, val);
4062 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4063 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4064 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4066 val = (BCM_PAGE_BITS - 8) << 24;
4067 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4069 /* Configure page size. */
4070 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4071 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4072 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4073 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4075 val = bp->mac_addr[0] +
4076 (bp->mac_addr[1] << 8) +
4077 (bp->mac_addr[2] << 16) +
4078 bp->mac_addr[3] +
4079 (bp->mac_addr[4] << 8) +
4080 (bp->mac_addr[5] << 16);
4081 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4083 /* Program the MTU. Also include 4 bytes for CRC32. */
4084 val = bp->dev->mtu + ETH_HLEN + 4;
4085 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4086 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4087 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4089 bp->last_status_idx = 0;
4090 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4092 /* Set up how to generate a link change interrupt. */
4093 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4095 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4096 (u64) bp->status_blk_mapping & 0xffffffff);
4097 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4099 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4100 (u64) bp->stats_blk_mapping & 0xffffffff);
4101 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4102 (u64) bp->stats_blk_mapping >> 32);
4104 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4105 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4107 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4108 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4110 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4111 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4113 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4115 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4117 REG_WR(bp, BNX2_HC_COM_TICKS,
4118 (bp->com_ticks_int << 16) | bp->com_ticks);
4120 REG_WR(bp, BNX2_HC_CMD_TICKS,
4121 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4123 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4124 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4125 else
4126 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4127 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4129 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4130 val = BNX2_HC_CONFIG_COLLECT_STATS;
4131 else {
4132 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4133 BNX2_HC_CONFIG_COLLECT_STATS;
4136 if (bp->flags & ONE_SHOT_MSI_FLAG)
4137 val |= BNX2_HC_CONFIG_ONE_SHOT;
4139 REG_WR(bp, BNX2_HC_CONFIG, val);
4141 /* Clear internal stats counters. */
4142 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4144 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4146 /* Initialize the receive filter. */
4147 bnx2_set_rx_mode(bp->dev);
4149 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4150 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4151 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4152 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4154 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4157 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4158 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4160 udelay(20);
4162 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4164 return rc;
4167 static void
4168 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4170 u32 val, offset0, offset1, offset2, offset3;
4172 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4173 offset0 = BNX2_L2CTX_TYPE_XI;
4174 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4175 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4176 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4177 } else {
4178 offset0 = BNX2_L2CTX_TYPE;
4179 offset1 = BNX2_L2CTX_CMD_TYPE;
4180 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4181 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4183 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4184 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4186 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4187 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4189 val = (u64) bp->tx_desc_mapping >> 32;
4190 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4192 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4193 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4196 static void
4197 bnx2_init_tx_ring(struct bnx2 *bp)
4199 struct tx_bd *txbd;
4200 u32 cid;
4202 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4204 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4206 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4207 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4209 bp->tx_prod = 0;
4210 bp->tx_cons = 0;
4211 bp->hw_tx_cons = 0;
4212 bp->tx_prod_bseq = 0;
4214 cid = TX_CID;
4215 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4216 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4218 bnx2_init_tx_context(bp, cid);
4221 static void
4222 bnx2_init_rx_ring(struct bnx2 *bp)
4224 struct rx_bd *rxbd;
4225 int i;
4226 u16 prod, ring_prod;
4227 u32 val;
4229 /* 8 for CRC and VLAN */
4230 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4231 /* hw alignment */
4232 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4234 ring_prod = prod = bp->rx_prod = 0;
4235 bp->rx_cons = 0;
4236 bp->hw_rx_cons = 0;
4237 bp->rx_prod_bseq = 0;
4239 for (i = 0; i < bp->rx_max_ring; i++) {
4240 int j;
4242 rxbd = &bp->rx_desc_ring[i][0];
4243 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4244 rxbd->rx_bd_len = bp->rx_buf_use_size;
4245 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4247 if (i == (bp->rx_max_ring - 1))
4248 j = 0;
4249 else
4250 j = i + 1;
4251 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
4252 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
4253 0xffffffff;
4256 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4257 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4258 val |= 0x02 << 8;
4259 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
4261 val = (u64) bp->rx_desc_mapping[0] >> 32;
4262 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
4264 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4265 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
4267 for (i = 0; i < bp->rx_ring_size; i++) {
4268 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4269 break;
4271 prod = NEXT_RX_BD(prod);
4272 ring_prod = RX_RING_IDX(prod);
4274 bp->rx_prod = prod;
4276 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4278 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4281 static void
4282 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4284 u32 num_rings, max;
4286 bp->rx_ring_size = size;
4287 num_rings = 1;
4288 while (size > MAX_RX_DESC_CNT) {
4289 size -= MAX_RX_DESC_CNT;
4290 num_rings++;
4292 /* round to next power of 2 */
4293 max = MAX_RX_RINGS;
4294 while ((max & num_rings) == 0)
4295 max >>= 1;
4297 if (num_rings != max)
4298 max <<= 1;
4300 bp->rx_max_ring = max;
4301 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4304 static void
4305 bnx2_free_tx_skbs(struct bnx2 *bp)
4307 int i;
4309 if (bp->tx_buf_ring == NULL)
4310 return;
4312 for (i = 0; i < TX_DESC_CNT; ) {
4313 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4314 struct sk_buff *skb = tx_buf->skb;
4315 int j, last;
4317 if (skb == NULL) {
4318 i++;
4319 continue;
4322 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4323 skb_headlen(skb), PCI_DMA_TODEVICE);
4325 tx_buf->skb = NULL;
4327 last = skb_shinfo(skb)->nr_frags;
4328 for (j = 0; j < last; j++) {
4329 tx_buf = &bp->tx_buf_ring[i + j + 1];
4330 pci_unmap_page(bp->pdev,
4331 pci_unmap_addr(tx_buf, mapping),
4332 skb_shinfo(skb)->frags[j].size,
4333 PCI_DMA_TODEVICE);
4335 dev_kfree_skb(skb);
4336 i += j + 1;
4341 static void
4342 bnx2_free_rx_skbs(struct bnx2 *bp)
4344 int i;
4346 if (bp->rx_buf_ring == NULL)
4347 return;
4349 for (i = 0; i < bp->rx_max_ring_idx; i++) {
4350 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4351 struct sk_buff *skb = rx_buf->skb;
4353 if (skb == NULL)
4354 continue;
4356 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4357 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4359 rx_buf->skb = NULL;
4361 dev_kfree_skb(skb);
4365 static void
4366 bnx2_free_skbs(struct bnx2 *bp)
4368 bnx2_free_tx_skbs(bp);
4369 bnx2_free_rx_skbs(bp);
4372 static int
4373 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4375 int rc;
4377 rc = bnx2_reset_chip(bp, reset_code);
4378 bnx2_free_skbs(bp);
4379 if (rc)
4380 return rc;
4382 if ((rc = bnx2_init_chip(bp)) != 0)
4383 return rc;
4385 bnx2_init_tx_ring(bp);
4386 bnx2_init_rx_ring(bp);
4387 return 0;
4390 static int
4391 bnx2_init_nic(struct bnx2 *bp)
4393 int rc;
4395 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4396 return rc;
4398 spin_lock_bh(&bp->phy_lock);
4399 bnx2_init_phy(bp);
4400 bnx2_set_link(bp);
4401 spin_unlock_bh(&bp->phy_lock);
4402 return 0;
4405 static int
4406 bnx2_test_registers(struct bnx2 *bp)
4408 int ret;
4409 int i, is_5709;
4410 static const struct {
4411 u16 offset;
4412 u16 flags;
4413 #define BNX2_FL_NOT_5709 1
4414 u32 rw_mask;
4415 u32 ro_mask;
4416 } reg_tbl[] = {
4417 { 0x006c, 0, 0x00000000, 0x0000003f },
4418 { 0x0090, 0, 0xffffffff, 0x00000000 },
4419 { 0x0094, 0, 0x00000000, 0x00000000 },
4421 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4422 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4423 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4424 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4425 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4426 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4427 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4428 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4429 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4431 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4432 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4433 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4434 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4435 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4436 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4438 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4439 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4440 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4442 { 0x1000, 0, 0x00000000, 0x00000001 },
4443 { 0x1004, 0, 0x00000000, 0x000f0001 },
4445 { 0x1408, 0, 0x01c00800, 0x00000000 },
4446 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4447 { 0x14a8, 0, 0x00000000, 0x000001ff },
4448 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4449 { 0x14b0, 0, 0x00000002, 0x00000001 },
4450 { 0x14b8, 0, 0x00000000, 0x00000000 },
4451 { 0x14c0, 0, 0x00000000, 0x00000009 },
4452 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4453 { 0x14cc, 0, 0x00000000, 0x00000001 },
4454 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4456 { 0x1800, 0, 0x00000000, 0x00000001 },
4457 { 0x1804, 0, 0x00000000, 0x00000003 },
4459 { 0x2800, 0, 0x00000000, 0x00000001 },
4460 { 0x2804, 0, 0x00000000, 0x00003f01 },
4461 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4462 { 0x2810, 0, 0xffff0000, 0x00000000 },
4463 { 0x2814, 0, 0xffff0000, 0x00000000 },
4464 { 0x2818, 0, 0xffff0000, 0x00000000 },
4465 { 0x281c, 0, 0xffff0000, 0x00000000 },
4466 { 0x2834, 0, 0xffffffff, 0x00000000 },
4467 { 0x2840, 0, 0x00000000, 0xffffffff },
4468 { 0x2844, 0, 0x00000000, 0xffffffff },
4469 { 0x2848, 0, 0xffffffff, 0x00000000 },
4470 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4472 { 0x2c00, 0, 0x00000000, 0x00000011 },
4473 { 0x2c04, 0, 0x00000000, 0x00030007 },
4475 { 0x3c00, 0, 0x00000000, 0x00000001 },
4476 { 0x3c04, 0, 0x00000000, 0x00070000 },
4477 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4478 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4479 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4480 { 0x3c14, 0, 0x00000000, 0xffffffff },
4481 { 0x3c18, 0, 0x00000000, 0xffffffff },
4482 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4483 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4485 { 0x5004, 0, 0x00000000, 0x0000007f },
4486 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4488 { 0x5c00, 0, 0x00000000, 0x00000001 },
4489 { 0x5c04, 0, 0x00000000, 0x0003000f },
4490 { 0x5c08, 0, 0x00000003, 0x00000000 },
4491 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4492 { 0x5c10, 0, 0x00000000, 0xffffffff },
4493 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4494 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4495 { 0x5c88, 0, 0x00000000, 0x00077373 },
4496 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4498 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4499 { 0x680c, 0, 0xffffffff, 0x00000000 },
4500 { 0x6810, 0, 0xffffffff, 0x00000000 },
4501 { 0x6814, 0, 0xffffffff, 0x00000000 },
4502 { 0x6818, 0, 0xffffffff, 0x00000000 },
4503 { 0x681c, 0, 0xffffffff, 0x00000000 },
4504 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4505 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4506 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4507 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4508 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4509 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4510 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4511 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4512 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4513 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4514 { 0x684c, 0, 0xffffffff, 0x00000000 },
4515 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4516 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4517 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4518 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4519 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4520 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4522 { 0xffff, 0, 0x00000000, 0x00000000 },
4525 ret = 0;
4526 is_5709 = 0;
4527 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4528 is_5709 = 1;
4530 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4531 u32 offset, rw_mask, ro_mask, save_val, val;
4532 u16 flags = reg_tbl[i].flags;
4534 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4535 continue;
4537 offset = (u32) reg_tbl[i].offset;
4538 rw_mask = reg_tbl[i].rw_mask;
4539 ro_mask = reg_tbl[i].ro_mask;
4541 save_val = readl(bp->regview + offset);
4543 writel(0, bp->regview + offset);
4545 val = readl(bp->regview + offset);
4546 if ((val & rw_mask) != 0) {
4547 goto reg_test_err;
4550 if ((val & ro_mask) != (save_val & ro_mask)) {
4551 goto reg_test_err;
4554 writel(0xffffffff, bp->regview + offset);
4556 val = readl(bp->regview + offset);
4557 if ((val & rw_mask) != rw_mask) {
4558 goto reg_test_err;
4561 if ((val & ro_mask) != (save_val & ro_mask)) {
4562 goto reg_test_err;
4565 writel(save_val, bp->regview + offset);
4566 continue;
4568 reg_test_err:
4569 writel(save_val, bp->regview + offset);
4570 ret = -ENODEV;
4571 break;
4573 return ret;
4576 static int
4577 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4579 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4580 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4581 int i;
4583 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4584 u32 offset;
4586 for (offset = 0; offset < size; offset += 4) {
4588 REG_WR_IND(bp, start + offset, test_pattern[i]);
4590 if (REG_RD_IND(bp, start + offset) !=
4591 test_pattern[i]) {
4592 return -ENODEV;
4596 return 0;
4599 static int
4600 bnx2_test_memory(struct bnx2 *bp)
4602 int ret = 0;
4603 int i;
4604 static struct mem_entry {
4605 u32 offset;
4606 u32 len;
4607 } mem_tbl_5706[] = {
4608 { 0x60000, 0x4000 },
4609 { 0xa0000, 0x3000 },
4610 { 0xe0000, 0x4000 },
4611 { 0x120000, 0x4000 },
4612 { 0x1a0000, 0x4000 },
4613 { 0x160000, 0x4000 },
4614 { 0xffffffff, 0 },
4616 mem_tbl_5709[] = {
4617 { 0x60000, 0x4000 },
4618 { 0xa0000, 0x3000 },
4619 { 0xe0000, 0x4000 },
4620 { 0x120000, 0x4000 },
4621 { 0x1a0000, 0x4000 },
4622 { 0xffffffff, 0 },
4624 struct mem_entry *mem_tbl;
4626 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4627 mem_tbl = mem_tbl_5709;
4628 else
4629 mem_tbl = mem_tbl_5706;
4631 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4632 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4633 mem_tbl[i].len)) != 0) {
4634 return ret;
4638 return ret;
4641 #define BNX2_MAC_LOOPBACK 0
4642 #define BNX2_PHY_LOOPBACK 1
4644 static int
4645 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4647 unsigned int pkt_size, num_pkts, i;
4648 struct sk_buff *skb, *rx_skb;
4649 unsigned char *packet;
4650 u16 rx_start_idx, rx_idx;
4651 dma_addr_t map;
4652 struct tx_bd *txbd;
4653 struct sw_bd *rx_buf;
4654 struct l2_fhdr *rx_hdr;
4655 int ret = -ENODEV;
4657 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4658 bp->loopback = MAC_LOOPBACK;
4659 bnx2_set_mac_loopback(bp);
4661 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4662 bp->loopback = PHY_LOOPBACK;
4663 bnx2_set_phy_loopback(bp);
4665 else
4666 return -EINVAL;
4668 pkt_size = 1514;
4669 skb = netdev_alloc_skb(bp->dev, pkt_size);
4670 if (!skb)
4671 return -ENOMEM;
4672 packet = skb_put(skb, pkt_size);
4673 memcpy(packet, bp->dev->dev_addr, 6);
4674 memset(packet + 6, 0x0, 8);
4675 for (i = 14; i < pkt_size; i++)
4676 packet[i] = (unsigned char) (i & 0xff);
4678 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4679 PCI_DMA_TODEVICE);
4681 REG_WR(bp, BNX2_HC_COMMAND,
4682 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4684 REG_RD(bp, BNX2_HC_COMMAND);
4686 udelay(5);
4687 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4689 num_pkts = 0;
4691 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4693 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4694 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4695 txbd->tx_bd_mss_nbytes = pkt_size;
4696 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4698 num_pkts++;
4699 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4700 bp->tx_prod_bseq += pkt_size;
4702 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4703 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4705 udelay(100);
4707 REG_WR(bp, BNX2_HC_COMMAND,
4708 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4710 REG_RD(bp, BNX2_HC_COMMAND);
4712 udelay(5);
4714 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4715 dev_kfree_skb(skb);
4717 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4718 goto loopback_test_done;
4721 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4722 if (rx_idx != rx_start_idx + num_pkts) {
4723 goto loopback_test_done;
4726 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4727 rx_skb = rx_buf->skb;
4729 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4730 skb_reserve(rx_skb, bp->rx_offset);
4732 pci_dma_sync_single_for_cpu(bp->pdev,
4733 pci_unmap_addr(rx_buf, mapping),
4734 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4736 if (rx_hdr->l2_fhdr_status &
4737 (L2_FHDR_ERRORS_BAD_CRC |
4738 L2_FHDR_ERRORS_PHY_DECODE |
4739 L2_FHDR_ERRORS_ALIGNMENT |
4740 L2_FHDR_ERRORS_TOO_SHORT |
4741 L2_FHDR_ERRORS_GIANT_FRAME)) {
4743 goto loopback_test_done;
4746 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4747 goto loopback_test_done;
4750 for (i = 14; i < pkt_size; i++) {
4751 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4752 goto loopback_test_done;
4756 ret = 0;
4758 loopback_test_done:
4759 bp->loopback = 0;
4760 return ret;
4763 #define BNX2_MAC_LOOPBACK_FAILED 1
4764 #define BNX2_PHY_LOOPBACK_FAILED 2
4765 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4766 BNX2_PHY_LOOPBACK_FAILED)
4768 static int
4769 bnx2_test_loopback(struct bnx2 *bp)
4771 int rc = 0;
4773 if (!netif_running(bp->dev))
4774 return BNX2_LOOPBACK_FAILED;
4776 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4777 spin_lock_bh(&bp->phy_lock);
4778 bnx2_init_phy(bp);
4779 spin_unlock_bh(&bp->phy_lock);
4780 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4781 rc |= BNX2_MAC_LOOPBACK_FAILED;
4782 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4783 rc |= BNX2_PHY_LOOPBACK_FAILED;
4784 return rc;
4787 #define NVRAM_SIZE 0x200
4788 #define CRC32_RESIDUAL 0xdebb20e3
4790 static int
4791 bnx2_test_nvram(struct bnx2 *bp)
4793 u32 buf[NVRAM_SIZE / 4];
4794 u8 *data = (u8 *) buf;
4795 int rc = 0;
4796 u32 magic, csum;
4798 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4799 goto test_nvram_done;
4801 magic = be32_to_cpu(buf[0]);
4802 if (magic != 0x669955aa) {
4803 rc = -ENODEV;
4804 goto test_nvram_done;
4807 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4808 goto test_nvram_done;
4810 csum = ether_crc_le(0x100, data);
4811 if (csum != CRC32_RESIDUAL) {
4812 rc = -ENODEV;
4813 goto test_nvram_done;
4816 csum = ether_crc_le(0x100, data + 0x100);
4817 if (csum != CRC32_RESIDUAL) {
4818 rc = -ENODEV;
4821 test_nvram_done:
4822 return rc;
4825 static int
4826 bnx2_test_link(struct bnx2 *bp)
4828 u32 bmsr;
4830 spin_lock_bh(&bp->phy_lock);
4831 bnx2_enable_bmsr1(bp);
4832 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4833 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4834 bnx2_disable_bmsr1(bp);
4835 spin_unlock_bh(&bp->phy_lock);
4837 if (bmsr & BMSR_LSTATUS) {
4838 return 0;
4840 return -ENODEV;
4843 static int
4844 bnx2_test_intr(struct bnx2 *bp)
4846 int i;
4847 u16 status_idx;
4849 if (!netif_running(bp->dev))
4850 return -ENODEV;
4852 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4854 /* This register is not touched during run-time. */
4855 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4856 REG_RD(bp, BNX2_HC_COMMAND);
4858 for (i = 0; i < 10; i++) {
4859 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4860 status_idx) {
4862 break;
4865 msleep_interruptible(10);
4867 if (i < 10)
4868 return 0;
4870 return -ENODEV;
4873 static void
4874 bnx2_5706_serdes_timer(struct bnx2 *bp)
4876 spin_lock(&bp->phy_lock);
4877 if (bp->serdes_an_pending)
4878 bp->serdes_an_pending--;
4879 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4880 u32 bmcr;
4882 bp->current_interval = bp->timer_interval;
4884 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4886 if (bmcr & BMCR_ANENABLE) {
4887 u32 phy1, phy2;
4889 bnx2_write_phy(bp, 0x1c, 0x7c00);
4890 bnx2_read_phy(bp, 0x1c, &phy1);
4892 bnx2_write_phy(bp, 0x17, 0x0f01);
4893 bnx2_read_phy(bp, 0x15, &phy2);
4894 bnx2_write_phy(bp, 0x17, 0x0f01);
4895 bnx2_read_phy(bp, 0x15, &phy2);
4897 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4898 !(phy2 & 0x20)) { /* no CONFIG */
4900 bmcr &= ~BMCR_ANENABLE;
4901 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4902 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4903 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4907 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4908 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4909 u32 phy2;
4911 bnx2_write_phy(bp, 0x17, 0x0f01);
4912 bnx2_read_phy(bp, 0x15, &phy2);
4913 if (phy2 & 0x20) {
4914 u32 bmcr;
4916 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4917 bmcr |= BMCR_ANENABLE;
4918 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4920 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4922 } else
4923 bp->current_interval = bp->timer_interval;
4925 spin_unlock(&bp->phy_lock);
4928 static void
4929 bnx2_5708_serdes_timer(struct bnx2 *bp)
4931 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4932 return;
4934 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4935 bp->serdes_an_pending = 0;
4936 return;
4939 spin_lock(&bp->phy_lock);
4940 if (bp->serdes_an_pending)
4941 bp->serdes_an_pending--;
4942 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4943 u32 bmcr;
4945 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4946 if (bmcr & BMCR_ANENABLE) {
4947 bnx2_enable_forced_2g5(bp);
4948 bp->current_interval = SERDES_FORCED_TIMEOUT;
4949 } else {
4950 bnx2_disable_forced_2g5(bp);
4951 bp->serdes_an_pending = 2;
4952 bp->current_interval = bp->timer_interval;
4955 } else
4956 bp->current_interval = bp->timer_interval;
4958 spin_unlock(&bp->phy_lock);
4961 static void
4962 bnx2_timer(unsigned long data)
4964 struct bnx2 *bp = (struct bnx2 *) data;
4966 if (!netif_running(bp->dev))
4967 return;
4969 if (atomic_read(&bp->intr_sem) != 0)
4970 goto bnx2_restart_timer;
4972 bnx2_send_heart_beat(bp);
4974 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4976 /* workaround occasional corrupted counters */
4977 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4978 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4979 BNX2_HC_COMMAND_STATS_NOW);
4981 if (bp->phy_flags & PHY_SERDES_FLAG) {
4982 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4983 bnx2_5706_serdes_timer(bp);
4984 else
4985 bnx2_5708_serdes_timer(bp);
4988 bnx2_restart_timer:
4989 mod_timer(&bp->timer, jiffies + bp->current_interval);
4992 static int
4993 bnx2_request_irq(struct bnx2 *bp)
4995 struct net_device *dev = bp->dev;
4996 int rc = 0;
4998 if (bp->flags & USING_MSI_FLAG) {
4999 irq_handler_t fn = bnx2_msi;
5001 if (bp->flags & ONE_SHOT_MSI_FLAG)
5002 fn = bnx2_msi_1shot;
5004 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
5005 } else
5006 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
5007 IRQF_SHARED, dev->name, dev);
5008 return rc;
5011 static void
5012 bnx2_free_irq(struct bnx2 *bp)
5014 struct net_device *dev = bp->dev;
5016 if (bp->flags & USING_MSI_FLAG) {
5017 free_irq(bp->pdev->irq, dev);
5018 pci_disable_msi(bp->pdev);
5019 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
5020 } else
5021 free_irq(bp->pdev->irq, dev);
5024 /* Called with rtnl_lock */
5025 static int
5026 bnx2_open(struct net_device *dev)
5028 struct bnx2 *bp = netdev_priv(dev);
5029 int rc;
5031 netif_carrier_off(dev);
5033 bnx2_set_power_state(bp, PCI_D0);
5034 bnx2_disable_int(bp);
5036 rc = bnx2_alloc_mem(bp);
5037 if (rc)
5038 return rc;
5040 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
5041 if (pci_enable_msi(bp->pdev) == 0) {
5042 bp->flags |= USING_MSI_FLAG;
5043 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5044 bp->flags |= ONE_SHOT_MSI_FLAG;
5047 rc = bnx2_request_irq(bp);
5049 if (rc) {
5050 bnx2_free_mem(bp);
5051 return rc;
5054 rc = bnx2_init_nic(bp);
5056 if (rc) {
5057 bnx2_free_irq(bp);
5058 bnx2_free_skbs(bp);
5059 bnx2_free_mem(bp);
5060 return rc;
5063 mod_timer(&bp->timer, jiffies + bp->current_interval);
5065 atomic_set(&bp->intr_sem, 0);
5067 bnx2_enable_int(bp);
5069 if (bp->flags & USING_MSI_FLAG) {
5070 /* Test MSI to make sure it is working
5071 * If MSI test fails, go back to INTx mode
5073 if (bnx2_test_intr(bp) != 0) {
5074 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5075 " using MSI, switching to INTx mode. Please"
5076 " report this failure to the PCI maintainer"
5077 " and include system chipset information.\n",
5078 bp->dev->name);
5080 bnx2_disable_int(bp);
5081 bnx2_free_irq(bp);
5083 rc = bnx2_init_nic(bp);
5085 if (!rc)
5086 rc = bnx2_request_irq(bp);
5088 if (rc) {
5089 bnx2_free_skbs(bp);
5090 bnx2_free_mem(bp);
5091 del_timer_sync(&bp->timer);
5092 return rc;
5094 bnx2_enable_int(bp);
5097 if (bp->flags & USING_MSI_FLAG) {
5098 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5101 netif_start_queue(dev);
5103 return 0;
5106 static void
5107 bnx2_reset_task(struct work_struct *work)
5109 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5111 if (!netif_running(bp->dev))
5112 return;
5114 bp->in_reset_task = 1;
5115 bnx2_netif_stop(bp);
5117 bnx2_init_nic(bp);
5119 atomic_set(&bp->intr_sem, 1);
5120 bnx2_netif_start(bp);
5121 bp->in_reset_task = 0;
5124 static void
5125 bnx2_tx_timeout(struct net_device *dev)
5127 struct bnx2 *bp = netdev_priv(dev);
5129 /* This allows the netif to be shutdown gracefully before resetting */
5130 schedule_work(&bp->reset_task);
5133 #ifdef BCM_VLAN
5134 /* Called with rtnl_lock */
5135 static void
5136 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5138 struct bnx2 *bp = netdev_priv(dev);
5140 bnx2_netif_stop(bp);
5142 bp->vlgrp = vlgrp;
5143 bnx2_set_rx_mode(dev);
5145 bnx2_netif_start(bp);
5147 #endif
5149 /* Called with netif_tx_lock.
5150 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5151 * netif_wake_queue().
5153 static int
5154 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5156 struct bnx2 *bp = netdev_priv(dev);
5157 dma_addr_t mapping;
5158 struct tx_bd *txbd;
5159 struct sw_bd *tx_buf;
5160 u32 len, vlan_tag_flags, last_frag, mss;
5161 u16 prod, ring_prod;
5162 int i;
5164 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
5165 netif_stop_queue(dev);
5166 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5167 dev->name);
5169 return NETDEV_TX_BUSY;
5171 len = skb_headlen(skb);
5172 prod = bp->tx_prod;
5173 ring_prod = TX_RING_IDX(prod);
5175 vlan_tag_flags = 0;
5176 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5177 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5180 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5181 vlan_tag_flags |=
5182 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5184 if ((mss = skb_shinfo(skb)->gso_size)) {
5185 u32 tcp_opt_len, ip_tcp_len;
5186 struct iphdr *iph;
5188 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5190 tcp_opt_len = tcp_optlen(skb);
5192 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5193 u32 tcp_off = skb_transport_offset(skb) -
5194 sizeof(struct ipv6hdr) - ETH_HLEN;
5196 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5197 TX_BD_FLAGS_SW_FLAGS;
5198 if (likely(tcp_off == 0))
5199 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5200 else {
5201 tcp_off >>= 3;
5202 vlan_tag_flags |= ((tcp_off & 0x3) <<
5203 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5204 ((tcp_off & 0x10) <<
5205 TX_BD_FLAGS_TCP6_OFF4_SHL);
5206 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5208 } else {
5209 if (skb_header_cloned(skb) &&
5210 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5211 dev_kfree_skb(skb);
5212 return NETDEV_TX_OK;
5215 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5217 iph = ip_hdr(skb);
5218 iph->check = 0;
5219 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5220 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5221 iph->daddr, 0,
5222 IPPROTO_TCP,
5224 if (tcp_opt_len || (iph->ihl > 5)) {
5225 vlan_tag_flags |= ((iph->ihl - 5) +
5226 (tcp_opt_len >> 2)) << 8;
5229 } else
5230 mss = 0;
5232 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5234 tx_buf = &bp->tx_buf_ring[ring_prod];
5235 tx_buf->skb = skb;
5236 pci_unmap_addr_set(tx_buf, mapping, mapping);
5238 txbd = &bp->tx_desc_ring[ring_prod];
5240 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5241 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5242 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5243 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5245 last_frag = skb_shinfo(skb)->nr_frags;
5247 for (i = 0; i < last_frag; i++) {
5248 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5250 prod = NEXT_TX_BD(prod);
5251 ring_prod = TX_RING_IDX(prod);
5252 txbd = &bp->tx_desc_ring[ring_prod];
5254 len = frag->size;
5255 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5256 len, PCI_DMA_TODEVICE);
5257 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5258 mapping, mapping);
5260 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5261 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5262 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5263 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5266 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5268 prod = NEXT_TX_BD(prod);
5269 bp->tx_prod_bseq += skb->len;
5271 REG_WR16(bp, bp->tx_bidx_addr, prod);
5272 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5274 mmiowb();
5276 bp->tx_prod = prod;
5277 dev->trans_start = jiffies;
5279 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
5280 netif_stop_queue(dev);
5281 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
5282 netif_wake_queue(dev);
5285 return NETDEV_TX_OK;
5288 /* Called with rtnl_lock */
5289 static int
5290 bnx2_close(struct net_device *dev)
5292 struct bnx2 *bp = netdev_priv(dev);
5293 u32 reset_code;
5295 /* Calling flush_scheduled_work() may deadlock because
5296 * linkwatch_event() may be on the workqueue and it will try to get
5297 * the rtnl_lock which we are holding.
5299 while (bp->in_reset_task)
5300 msleep(1);
5302 bnx2_netif_stop(bp);
5303 del_timer_sync(&bp->timer);
5304 if (bp->flags & NO_WOL_FLAG)
5305 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5306 else if (bp->wol)
5307 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5308 else
5309 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5310 bnx2_reset_chip(bp, reset_code);
5311 bnx2_free_irq(bp);
5312 bnx2_free_skbs(bp);
5313 bnx2_free_mem(bp);
5314 bp->link_up = 0;
5315 netif_carrier_off(bp->dev);
5316 bnx2_set_power_state(bp, PCI_D3hot);
5317 return 0;
5320 #define GET_NET_STATS64(ctr) \
5321 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5322 (unsigned long) (ctr##_lo)
5324 #define GET_NET_STATS32(ctr) \
5325 (ctr##_lo)
5327 #if (BITS_PER_LONG == 64)
5328 #define GET_NET_STATS GET_NET_STATS64
5329 #else
5330 #define GET_NET_STATS GET_NET_STATS32
5331 #endif
5333 static struct net_device_stats *
5334 bnx2_get_stats(struct net_device *dev)
5336 struct bnx2 *bp = netdev_priv(dev);
5337 struct statistics_block *stats_blk = bp->stats_blk;
5338 struct net_device_stats *net_stats = &bp->net_stats;
5340 if (bp->stats_blk == NULL) {
5341 return net_stats;
5343 net_stats->rx_packets =
5344 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5345 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5346 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5348 net_stats->tx_packets =
5349 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5350 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5351 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5353 net_stats->rx_bytes =
5354 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5356 net_stats->tx_bytes =
5357 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5359 net_stats->multicast =
5360 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5362 net_stats->collisions =
5363 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5365 net_stats->rx_length_errors =
5366 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5367 stats_blk->stat_EtherStatsOverrsizePkts);
5369 net_stats->rx_over_errors =
5370 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5372 net_stats->rx_frame_errors =
5373 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5375 net_stats->rx_crc_errors =
5376 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5378 net_stats->rx_errors = net_stats->rx_length_errors +
5379 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5380 net_stats->rx_crc_errors;
5382 net_stats->tx_aborted_errors =
5383 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5384 stats_blk->stat_Dot3StatsLateCollisions);
5386 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5387 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5388 net_stats->tx_carrier_errors = 0;
5389 else {
5390 net_stats->tx_carrier_errors =
5391 (unsigned long)
5392 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5395 net_stats->tx_errors =
5396 (unsigned long)
5397 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5399 net_stats->tx_aborted_errors +
5400 net_stats->tx_carrier_errors;
5402 net_stats->rx_missed_errors =
5403 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5404 stats_blk->stat_FwRxDrop);
5406 return net_stats;
5409 /* All ethtool functions called with rtnl_lock */
5411 static int
5412 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5414 struct bnx2 *bp = netdev_priv(dev);
5415 int support_serdes = 0, support_copper = 0;
5417 cmd->supported = SUPPORTED_Autoneg;
5418 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5419 support_serdes = 1;
5420 support_copper = 1;
5421 } else if (bp->phy_port == PORT_FIBRE)
5422 support_serdes = 1;
5423 else
5424 support_copper = 1;
5426 if (support_serdes) {
5427 cmd->supported |= SUPPORTED_1000baseT_Full |
5428 SUPPORTED_FIBRE;
5429 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5430 cmd->supported |= SUPPORTED_2500baseX_Full;
5433 if (support_copper) {
5434 cmd->supported |= SUPPORTED_10baseT_Half |
5435 SUPPORTED_10baseT_Full |
5436 SUPPORTED_100baseT_Half |
5437 SUPPORTED_100baseT_Full |
5438 SUPPORTED_1000baseT_Full |
5439 SUPPORTED_TP;
5443 spin_lock_bh(&bp->phy_lock);
5444 cmd->port = bp->phy_port;
5445 cmd->advertising = bp->advertising;
5447 if (bp->autoneg & AUTONEG_SPEED) {
5448 cmd->autoneg = AUTONEG_ENABLE;
5450 else {
5451 cmd->autoneg = AUTONEG_DISABLE;
5454 if (netif_carrier_ok(dev)) {
5455 cmd->speed = bp->line_speed;
5456 cmd->duplex = bp->duplex;
5458 else {
5459 cmd->speed = -1;
5460 cmd->duplex = -1;
5462 spin_unlock_bh(&bp->phy_lock);
5464 cmd->transceiver = XCVR_INTERNAL;
5465 cmd->phy_address = bp->phy_addr;
5467 return 0;
5470 static int
5471 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5473 struct bnx2 *bp = netdev_priv(dev);
5474 u8 autoneg = bp->autoneg;
5475 u8 req_duplex = bp->req_duplex;
5476 u16 req_line_speed = bp->req_line_speed;
5477 u32 advertising = bp->advertising;
5478 int err = -EINVAL;
5480 spin_lock_bh(&bp->phy_lock);
5482 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5483 goto err_out_unlock;
5485 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5486 goto err_out_unlock;
5488 if (cmd->autoneg == AUTONEG_ENABLE) {
5489 autoneg |= AUTONEG_SPEED;
5491 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5493 /* allow advertising 1 speed */
5494 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5495 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5496 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5497 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5499 if (cmd->port == PORT_FIBRE)
5500 goto err_out_unlock;
5502 advertising = cmd->advertising;
5504 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5505 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5506 (cmd->port == PORT_TP))
5507 goto err_out_unlock;
5508 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5509 advertising = cmd->advertising;
5510 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5511 goto err_out_unlock;
5512 else {
5513 if (cmd->port == PORT_FIBRE)
5514 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5515 else
5516 advertising = ETHTOOL_ALL_COPPER_SPEED;
5518 advertising |= ADVERTISED_Autoneg;
5520 else {
5521 if (cmd->port == PORT_FIBRE) {
5522 if ((cmd->speed != SPEED_1000 &&
5523 cmd->speed != SPEED_2500) ||
5524 (cmd->duplex != DUPLEX_FULL))
5525 goto err_out_unlock;
5527 if (cmd->speed == SPEED_2500 &&
5528 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5529 goto err_out_unlock;
5531 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5532 goto err_out_unlock;
5534 autoneg &= ~AUTONEG_SPEED;
5535 req_line_speed = cmd->speed;
5536 req_duplex = cmd->duplex;
5537 advertising = 0;
5540 bp->autoneg = autoneg;
5541 bp->advertising = advertising;
5542 bp->req_line_speed = req_line_speed;
5543 bp->req_duplex = req_duplex;
5545 err = bnx2_setup_phy(bp, cmd->port);
5547 err_out_unlock:
5548 spin_unlock_bh(&bp->phy_lock);
5550 return err;
5553 static void
5554 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5556 struct bnx2 *bp = netdev_priv(dev);
5558 strcpy(info->driver, DRV_MODULE_NAME);
5559 strcpy(info->version, DRV_MODULE_VERSION);
5560 strcpy(info->bus_info, pci_name(bp->pdev));
5561 strcpy(info->fw_version, bp->fw_version);
5564 #define BNX2_REGDUMP_LEN (32 * 1024)
5566 static int
5567 bnx2_get_regs_len(struct net_device *dev)
5569 return BNX2_REGDUMP_LEN;
5572 static void
5573 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5575 u32 *p = _p, i, offset;
5576 u8 *orig_p = _p;
5577 struct bnx2 *bp = netdev_priv(dev);
5578 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5579 0x0800, 0x0880, 0x0c00, 0x0c10,
5580 0x0c30, 0x0d08, 0x1000, 0x101c,
5581 0x1040, 0x1048, 0x1080, 0x10a4,
5582 0x1400, 0x1490, 0x1498, 0x14f0,
5583 0x1500, 0x155c, 0x1580, 0x15dc,
5584 0x1600, 0x1658, 0x1680, 0x16d8,
5585 0x1800, 0x1820, 0x1840, 0x1854,
5586 0x1880, 0x1894, 0x1900, 0x1984,
5587 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5588 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5589 0x2000, 0x2030, 0x23c0, 0x2400,
5590 0x2800, 0x2820, 0x2830, 0x2850,
5591 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5592 0x3c00, 0x3c94, 0x4000, 0x4010,
5593 0x4080, 0x4090, 0x43c0, 0x4458,
5594 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5595 0x4fc0, 0x5010, 0x53c0, 0x5444,
5596 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5597 0x5fc0, 0x6000, 0x6400, 0x6428,
5598 0x6800, 0x6848, 0x684c, 0x6860,
5599 0x6888, 0x6910, 0x8000 };
5601 regs->version = 0;
5603 memset(p, 0, BNX2_REGDUMP_LEN);
5605 if (!netif_running(bp->dev))
5606 return;
5608 i = 0;
5609 offset = reg_boundaries[0];
5610 p += offset;
5611 while (offset < BNX2_REGDUMP_LEN) {
5612 *p++ = REG_RD(bp, offset);
5613 offset += 4;
5614 if (offset == reg_boundaries[i + 1]) {
5615 offset = reg_boundaries[i + 2];
5616 p = (u32 *) (orig_p + offset);
5617 i += 2;
5622 static void
5623 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5625 struct bnx2 *bp = netdev_priv(dev);
5627 if (bp->flags & NO_WOL_FLAG) {
5628 wol->supported = 0;
5629 wol->wolopts = 0;
5631 else {
5632 wol->supported = WAKE_MAGIC;
5633 if (bp->wol)
5634 wol->wolopts = WAKE_MAGIC;
5635 else
5636 wol->wolopts = 0;
5638 memset(&wol->sopass, 0, sizeof(wol->sopass));
5641 static int
5642 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5644 struct bnx2 *bp = netdev_priv(dev);
5646 if (wol->wolopts & ~WAKE_MAGIC)
5647 return -EINVAL;
5649 if (wol->wolopts & WAKE_MAGIC) {
5650 if (bp->flags & NO_WOL_FLAG)
5651 return -EINVAL;
5653 bp->wol = 1;
5655 else {
5656 bp->wol = 0;
5658 return 0;
5661 static int
5662 bnx2_nway_reset(struct net_device *dev)
5664 struct bnx2 *bp = netdev_priv(dev);
5665 u32 bmcr;
5667 if (!(bp->autoneg & AUTONEG_SPEED)) {
5668 return -EINVAL;
5671 spin_lock_bh(&bp->phy_lock);
5673 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5674 int rc;
5676 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5677 spin_unlock_bh(&bp->phy_lock);
5678 return rc;
5681 /* Force a link down visible on the other side */
5682 if (bp->phy_flags & PHY_SERDES_FLAG) {
5683 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5684 spin_unlock_bh(&bp->phy_lock);
5686 msleep(20);
5688 spin_lock_bh(&bp->phy_lock);
5690 bp->current_interval = SERDES_AN_TIMEOUT;
5691 bp->serdes_an_pending = 1;
5692 mod_timer(&bp->timer, jiffies + bp->current_interval);
5695 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5696 bmcr &= ~BMCR_LOOPBACK;
5697 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5699 spin_unlock_bh(&bp->phy_lock);
5701 return 0;
5704 static int
5705 bnx2_get_eeprom_len(struct net_device *dev)
5707 struct bnx2 *bp = netdev_priv(dev);
5709 if (bp->flash_info == NULL)
5710 return 0;
5712 return (int) bp->flash_size;
5715 static int
5716 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5717 u8 *eebuf)
5719 struct bnx2 *bp = netdev_priv(dev);
5720 int rc;
5722 /* parameters already validated in ethtool_get_eeprom */
5724 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5726 return rc;
5729 static int
5730 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5731 u8 *eebuf)
5733 struct bnx2 *bp = netdev_priv(dev);
5734 int rc;
5736 /* parameters already validated in ethtool_set_eeprom */
5738 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5740 return rc;
5743 static int
5744 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5746 struct bnx2 *bp = netdev_priv(dev);
5748 memset(coal, 0, sizeof(struct ethtool_coalesce));
5750 coal->rx_coalesce_usecs = bp->rx_ticks;
5751 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5752 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5753 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5755 coal->tx_coalesce_usecs = bp->tx_ticks;
5756 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5757 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5758 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5760 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5762 return 0;
5765 static int
5766 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5768 struct bnx2 *bp = netdev_priv(dev);
5770 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5771 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5773 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5774 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5776 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5777 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5779 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5780 if (bp->rx_quick_cons_trip_int > 0xff)
5781 bp->rx_quick_cons_trip_int = 0xff;
5783 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5784 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5786 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5787 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5789 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5790 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5792 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5793 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5794 0xff;
5796 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5797 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5798 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5799 bp->stats_ticks = USEC_PER_SEC;
5801 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
5802 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5803 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5805 if (netif_running(bp->dev)) {
5806 bnx2_netif_stop(bp);
5807 bnx2_init_nic(bp);
5808 bnx2_netif_start(bp);
5811 return 0;
5814 static void
5815 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5817 struct bnx2 *bp = netdev_priv(dev);
5819 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5820 ering->rx_mini_max_pending = 0;
5821 ering->rx_jumbo_max_pending = 0;
5823 ering->rx_pending = bp->rx_ring_size;
5824 ering->rx_mini_pending = 0;
5825 ering->rx_jumbo_pending = 0;
5827 ering->tx_max_pending = MAX_TX_DESC_CNT;
5828 ering->tx_pending = bp->tx_ring_size;
5831 static int
5832 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5834 struct bnx2 *bp = netdev_priv(dev);
5836 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5837 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5838 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5840 return -EINVAL;
5842 if (netif_running(bp->dev)) {
5843 bnx2_netif_stop(bp);
5844 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5845 bnx2_free_skbs(bp);
5846 bnx2_free_mem(bp);
5849 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5850 bp->tx_ring_size = ering->tx_pending;
5852 if (netif_running(bp->dev)) {
5853 int rc;
5855 rc = bnx2_alloc_mem(bp);
5856 if (rc)
5857 return rc;
5858 bnx2_init_nic(bp);
5859 bnx2_netif_start(bp);
5862 return 0;
5865 static void
5866 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5868 struct bnx2 *bp = netdev_priv(dev);
5870 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5871 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5872 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5875 static int
5876 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5878 struct bnx2 *bp = netdev_priv(dev);
5880 bp->req_flow_ctrl = 0;
5881 if (epause->rx_pause)
5882 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5883 if (epause->tx_pause)
5884 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5886 if (epause->autoneg) {
5887 bp->autoneg |= AUTONEG_FLOW_CTRL;
5889 else {
5890 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5893 spin_lock_bh(&bp->phy_lock);
5895 bnx2_setup_phy(bp, bp->phy_port);
5897 spin_unlock_bh(&bp->phy_lock);
5899 return 0;
5902 static u32
5903 bnx2_get_rx_csum(struct net_device *dev)
5905 struct bnx2 *bp = netdev_priv(dev);
5907 return bp->rx_csum;
5910 static int
5911 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5913 struct bnx2 *bp = netdev_priv(dev);
5915 bp->rx_csum = data;
5916 return 0;
5919 static int
5920 bnx2_set_tso(struct net_device *dev, u32 data)
5922 struct bnx2 *bp = netdev_priv(dev);
5924 if (data) {
5925 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5926 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5927 dev->features |= NETIF_F_TSO6;
5928 } else
5929 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5930 NETIF_F_TSO_ECN);
5931 return 0;
5934 #define BNX2_NUM_STATS 46
5936 static struct {
5937 char string[ETH_GSTRING_LEN];
5938 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5939 { "rx_bytes" },
5940 { "rx_error_bytes" },
5941 { "tx_bytes" },
5942 { "tx_error_bytes" },
5943 { "rx_ucast_packets" },
5944 { "rx_mcast_packets" },
5945 { "rx_bcast_packets" },
5946 { "tx_ucast_packets" },
5947 { "tx_mcast_packets" },
5948 { "tx_bcast_packets" },
5949 { "tx_mac_errors" },
5950 { "tx_carrier_errors" },
5951 { "rx_crc_errors" },
5952 { "rx_align_errors" },
5953 { "tx_single_collisions" },
5954 { "tx_multi_collisions" },
5955 { "tx_deferred" },
5956 { "tx_excess_collisions" },
5957 { "tx_late_collisions" },
5958 { "tx_total_collisions" },
5959 { "rx_fragments" },
5960 { "rx_jabbers" },
5961 { "rx_undersize_packets" },
5962 { "rx_oversize_packets" },
5963 { "rx_64_byte_packets" },
5964 { "rx_65_to_127_byte_packets" },
5965 { "rx_128_to_255_byte_packets" },
5966 { "rx_256_to_511_byte_packets" },
5967 { "rx_512_to_1023_byte_packets" },
5968 { "rx_1024_to_1522_byte_packets" },
5969 { "rx_1523_to_9022_byte_packets" },
5970 { "tx_64_byte_packets" },
5971 { "tx_65_to_127_byte_packets" },
5972 { "tx_128_to_255_byte_packets" },
5973 { "tx_256_to_511_byte_packets" },
5974 { "tx_512_to_1023_byte_packets" },
5975 { "tx_1024_to_1522_byte_packets" },
5976 { "tx_1523_to_9022_byte_packets" },
5977 { "rx_xon_frames" },
5978 { "rx_xoff_frames" },
5979 { "tx_xon_frames" },
5980 { "tx_xoff_frames" },
5981 { "rx_mac_ctrl_frames" },
5982 { "rx_filtered_packets" },
5983 { "rx_discards" },
5984 { "rx_fw_discards" },
5987 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5989 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5990 STATS_OFFSET32(stat_IfHCInOctets_hi),
5991 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5992 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5993 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5994 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5995 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5996 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5997 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5998 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5999 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6000 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6001 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6002 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6003 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6004 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6005 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6006 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6007 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6008 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6009 STATS_OFFSET32(stat_EtherStatsCollisions),
6010 STATS_OFFSET32(stat_EtherStatsFragments),
6011 STATS_OFFSET32(stat_EtherStatsJabbers),
6012 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6013 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6014 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6015 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6016 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6017 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6018 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6019 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6020 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6021 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6022 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6023 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6024 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6025 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6026 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6027 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6028 STATS_OFFSET32(stat_XonPauseFramesReceived),
6029 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6030 STATS_OFFSET32(stat_OutXonSent),
6031 STATS_OFFSET32(stat_OutXoffSent),
6032 STATS_OFFSET32(stat_MacControlFramesReceived),
6033 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6034 STATS_OFFSET32(stat_IfInMBUFDiscards),
6035 STATS_OFFSET32(stat_FwRxDrop),
6038 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6039 * skipped because of errata.
6041 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6042 8,0,8,8,8,8,8,8,8,8,
6043 4,0,4,4,4,4,4,4,4,4,
6044 4,4,4,4,4,4,4,4,4,4,
6045 4,4,4,4,4,4,4,4,4,4,
6046 4,4,4,4,4,4,
6049 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6050 8,0,8,8,8,8,8,8,8,8,
6051 4,4,4,4,4,4,4,4,4,4,
6052 4,4,4,4,4,4,4,4,4,4,
6053 4,4,4,4,4,4,4,4,4,4,
6054 4,4,4,4,4,4,
6057 #define BNX2_NUM_TESTS 6
6059 static struct {
6060 char string[ETH_GSTRING_LEN];
6061 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6062 { "register_test (offline)" },
6063 { "memory_test (offline)" },
6064 { "loopback_test (offline)" },
6065 { "nvram_test (online)" },
6066 { "interrupt_test (online)" },
6067 { "link_test (online)" },
6070 static int
6071 bnx2_self_test_count(struct net_device *dev)
6073 return BNX2_NUM_TESTS;
6076 static void
6077 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6079 struct bnx2 *bp = netdev_priv(dev);
6081 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6082 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6083 int i;
6085 bnx2_netif_stop(bp);
6086 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6087 bnx2_free_skbs(bp);
6089 if (bnx2_test_registers(bp) != 0) {
6090 buf[0] = 1;
6091 etest->flags |= ETH_TEST_FL_FAILED;
6093 if (bnx2_test_memory(bp) != 0) {
6094 buf[1] = 1;
6095 etest->flags |= ETH_TEST_FL_FAILED;
6097 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6098 etest->flags |= ETH_TEST_FL_FAILED;
6100 if (!netif_running(bp->dev)) {
6101 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6103 else {
6104 bnx2_init_nic(bp);
6105 bnx2_netif_start(bp);
6108 /* wait for link up */
6109 for (i = 0; i < 7; i++) {
6110 if (bp->link_up)
6111 break;
6112 msleep_interruptible(1000);
6116 if (bnx2_test_nvram(bp) != 0) {
6117 buf[3] = 1;
6118 etest->flags |= ETH_TEST_FL_FAILED;
6120 if (bnx2_test_intr(bp) != 0) {
6121 buf[4] = 1;
6122 etest->flags |= ETH_TEST_FL_FAILED;
6125 if (bnx2_test_link(bp) != 0) {
6126 buf[5] = 1;
6127 etest->flags |= ETH_TEST_FL_FAILED;
6132 static void
6133 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6135 switch (stringset) {
6136 case ETH_SS_STATS:
6137 memcpy(buf, bnx2_stats_str_arr,
6138 sizeof(bnx2_stats_str_arr));
6139 break;
6140 case ETH_SS_TEST:
6141 memcpy(buf, bnx2_tests_str_arr,
6142 sizeof(bnx2_tests_str_arr));
6143 break;
6147 static int
6148 bnx2_get_stats_count(struct net_device *dev)
6150 return BNX2_NUM_STATS;
6153 static void
6154 bnx2_get_ethtool_stats(struct net_device *dev,
6155 struct ethtool_stats *stats, u64 *buf)
6157 struct bnx2 *bp = netdev_priv(dev);
6158 int i;
6159 u32 *hw_stats = (u32 *) bp->stats_blk;
6160 u8 *stats_len_arr = NULL;
6162 if (hw_stats == NULL) {
6163 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6164 return;
6167 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6168 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6169 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6170 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6171 stats_len_arr = bnx2_5706_stats_len_arr;
6172 else
6173 stats_len_arr = bnx2_5708_stats_len_arr;
6175 for (i = 0; i < BNX2_NUM_STATS; i++) {
6176 if (stats_len_arr[i] == 0) {
6177 /* skip this counter */
6178 buf[i] = 0;
6179 continue;
6181 if (stats_len_arr[i] == 4) {
6182 /* 4-byte counter */
6183 buf[i] = (u64)
6184 *(hw_stats + bnx2_stats_offset_arr[i]);
6185 continue;
6187 /* 8-byte counter */
6188 buf[i] = (((u64) *(hw_stats +
6189 bnx2_stats_offset_arr[i])) << 32) +
6190 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6194 static int
6195 bnx2_phys_id(struct net_device *dev, u32 data)
6197 struct bnx2 *bp = netdev_priv(dev);
6198 int i;
6199 u32 save;
6201 if (data == 0)
6202 data = 2;
6204 save = REG_RD(bp, BNX2_MISC_CFG);
6205 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6207 for (i = 0; i < (data * 2); i++) {
6208 if ((i % 2) == 0) {
6209 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6211 else {
6212 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6213 BNX2_EMAC_LED_1000MB_OVERRIDE |
6214 BNX2_EMAC_LED_100MB_OVERRIDE |
6215 BNX2_EMAC_LED_10MB_OVERRIDE |
6216 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6217 BNX2_EMAC_LED_TRAFFIC);
6219 msleep_interruptible(500);
6220 if (signal_pending(current))
6221 break;
6223 REG_WR(bp, BNX2_EMAC_LED, 0);
6224 REG_WR(bp, BNX2_MISC_CFG, save);
6225 return 0;
6228 static int
6229 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6231 struct bnx2 *bp = netdev_priv(dev);
6233 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6234 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6235 else
6236 return (ethtool_op_set_tx_csum(dev, data));
6239 static const struct ethtool_ops bnx2_ethtool_ops = {
6240 .get_settings = bnx2_get_settings,
6241 .set_settings = bnx2_set_settings,
6242 .get_drvinfo = bnx2_get_drvinfo,
6243 .get_regs_len = bnx2_get_regs_len,
6244 .get_regs = bnx2_get_regs,
6245 .get_wol = bnx2_get_wol,
6246 .set_wol = bnx2_set_wol,
6247 .nway_reset = bnx2_nway_reset,
6248 .get_link = ethtool_op_get_link,
6249 .get_eeprom_len = bnx2_get_eeprom_len,
6250 .get_eeprom = bnx2_get_eeprom,
6251 .set_eeprom = bnx2_set_eeprom,
6252 .get_coalesce = bnx2_get_coalesce,
6253 .set_coalesce = bnx2_set_coalesce,
6254 .get_ringparam = bnx2_get_ringparam,
6255 .set_ringparam = bnx2_set_ringparam,
6256 .get_pauseparam = bnx2_get_pauseparam,
6257 .set_pauseparam = bnx2_set_pauseparam,
6258 .get_rx_csum = bnx2_get_rx_csum,
6259 .set_rx_csum = bnx2_set_rx_csum,
6260 .get_tx_csum = ethtool_op_get_tx_csum,
6261 .set_tx_csum = bnx2_set_tx_csum,
6262 .get_sg = ethtool_op_get_sg,
6263 .set_sg = ethtool_op_set_sg,
6264 .get_tso = ethtool_op_get_tso,
6265 .set_tso = bnx2_set_tso,
6266 .self_test_count = bnx2_self_test_count,
6267 .self_test = bnx2_self_test,
6268 .get_strings = bnx2_get_strings,
6269 .phys_id = bnx2_phys_id,
6270 .get_stats_count = bnx2_get_stats_count,
6271 .get_ethtool_stats = bnx2_get_ethtool_stats,
6272 .get_perm_addr = ethtool_op_get_perm_addr,
6275 /* Called with rtnl_lock */
6276 static int
6277 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6279 struct mii_ioctl_data *data = if_mii(ifr);
6280 struct bnx2 *bp = netdev_priv(dev);
6281 int err;
6283 switch(cmd) {
6284 case SIOCGMIIPHY:
6285 data->phy_id = bp->phy_addr;
6287 /* fallthru */
6288 case SIOCGMIIREG: {
6289 u32 mii_regval;
6291 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6292 return -EOPNOTSUPP;
6294 if (!netif_running(dev))
6295 return -EAGAIN;
6297 spin_lock_bh(&bp->phy_lock);
6298 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6299 spin_unlock_bh(&bp->phy_lock);
6301 data->val_out = mii_regval;
6303 return err;
6306 case SIOCSMIIREG:
6307 if (!capable(CAP_NET_ADMIN))
6308 return -EPERM;
6310 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6311 return -EOPNOTSUPP;
6313 if (!netif_running(dev))
6314 return -EAGAIN;
6316 spin_lock_bh(&bp->phy_lock);
6317 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6318 spin_unlock_bh(&bp->phy_lock);
6320 return err;
6322 default:
6323 /* do nothing */
6324 break;
6326 return -EOPNOTSUPP;
6329 /* Called with rtnl_lock */
6330 static int
6331 bnx2_change_mac_addr(struct net_device *dev, void *p)
6333 struct sockaddr *addr = p;
6334 struct bnx2 *bp = netdev_priv(dev);
6336 if (!is_valid_ether_addr(addr->sa_data))
6337 return -EINVAL;
6339 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6340 if (netif_running(dev))
6341 bnx2_set_mac_addr(bp);
6343 return 0;
6346 /* Called with rtnl_lock */
6347 static int
6348 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6350 struct bnx2 *bp = netdev_priv(dev);
6352 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6353 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6354 return -EINVAL;
6356 dev->mtu = new_mtu;
6357 if (netif_running(dev)) {
6358 bnx2_netif_stop(bp);
6360 bnx2_init_nic(bp);
6362 bnx2_netif_start(bp);
6364 return 0;
6367 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6368 static void
6369 poll_bnx2(struct net_device *dev)
6371 struct bnx2 *bp = netdev_priv(dev);
6373 disable_irq(bp->pdev->irq);
6374 bnx2_interrupt(bp->pdev->irq, dev);
6375 enable_irq(bp->pdev->irq);
6377 #endif
6379 static void __devinit
6380 bnx2_get_5709_media(struct bnx2 *bp)
6382 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6383 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6384 u32 strap;
6386 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6387 return;
6388 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6389 bp->phy_flags |= PHY_SERDES_FLAG;
6390 return;
6393 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6394 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6395 else
6396 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6398 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6399 switch (strap) {
6400 case 0x4:
6401 case 0x5:
6402 case 0x6:
6403 bp->phy_flags |= PHY_SERDES_FLAG;
6404 return;
6406 } else {
6407 switch (strap) {
6408 case 0x1:
6409 case 0x2:
6410 case 0x4:
6411 bp->phy_flags |= PHY_SERDES_FLAG;
6412 return;
6417 static void __devinit
6418 bnx2_get_pci_speed(struct bnx2 *bp)
6420 u32 reg;
6422 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6423 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6424 u32 clkreg;
6426 bp->flags |= PCIX_FLAG;
6428 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6430 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6431 switch (clkreg) {
6432 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6433 bp->bus_speed_mhz = 133;
6434 break;
6436 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6437 bp->bus_speed_mhz = 100;
6438 break;
6440 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6441 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6442 bp->bus_speed_mhz = 66;
6443 break;
6445 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6446 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6447 bp->bus_speed_mhz = 50;
6448 break;
6450 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6451 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6452 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6453 bp->bus_speed_mhz = 33;
6454 break;
6457 else {
6458 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6459 bp->bus_speed_mhz = 66;
6460 else
6461 bp->bus_speed_mhz = 33;
6464 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6465 bp->flags |= PCI_32BIT_FLAG;
6469 static int __devinit
6470 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6472 struct bnx2 *bp;
6473 unsigned long mem_len;
6474 int rc, i, j;
6475 u32 reg;
6476 u64 dma_mask, persist_dma_mask;
6478 SET_MODULE_OWNER(dev);
6479 SET_NETDEV_DEV(dev, &pdev->dev);
6480 bp = netdev_priv(dev);
6482 bp->flags = 0;
6483 bp->phy_flags = 0;
6485 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6486 rc = pci_enable_device(pdev);
6487 if (rc) {
6488 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
6489 goto err_out;
6492 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6493 dev_err(&pdev->dev,
6494 "Cannot find PCI device base address, aborting.\n");
6495 rc = -ENODEV;
6496 goto err_out_disable;
6499 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6500 if (rc) {
6501 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6502 goto err_out_disable;
6505 pci_set_master(pdev);
6507 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6508 if (bp->pm_cap == 0) {
6509 dev_err(&pdev->dev,
6510 "Cannot find power management capability, aborting.\n");
6511 rc = -EIO;
6512 goto err_out_release;
6515 bp->dev = dev;
6516 bp->pdev = pdev;
6518 spin_lock_init(&bp->phy_lock);
6519 spin_lock_init(&bp->indirect_lock);
6520 INIT_WORK(&bp->reset_task, bnx2_reset_task);
6522 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6523 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6524 dev->mem_end = dev->mem_start + mem_len;
6525 dev->irq = pdev->irq;
6527 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6529 if (!bp->regview) {
6530 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6531 rc = -ENOMEM;
6532 goto err_out_release;
6535 /* Configure byte swap and enable write to the reg_window registers.
6536 * Rely on CPU to do target byte swapping on big endian systems
6537 * The chip's target access swapping will not swap all accesses
6539 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6540 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6541 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6543 bnx2_set_power_state(bp, PCI_D0);
6545 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6547 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6548 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6549 dev_err(&pdev->dev,
6550 "Cannot find PCIE capability, aborting.\n");
6551 rc = -EIO;
6552 goto err_out_unmap;
6554 bp->flags |= PCIE_FLAG;
6555 } else {
6556 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6557 if (bp->pcix_cap == 0) {
6558 dev_err(&pdev->dev,
6559 "Cannot find PCIX capability, aborting.\n");
6560 rc = -EIO;
6561 goto err_out_unmap;
6565 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6566 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6567 bp->flags |= MSI_CAP_FLAG;
6570 /* 5708 cannot support DMA addresses > 40-bit. */
6571 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6572 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6573 else
6574 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6576 /* Configure DMA attributes. */
6577 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6578 dev->features |= NETIF_F_HIGHDMA;
6579 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6580 if (rc) {
6581 dev_err(&pdev->dev,
6582 "pci_set_consistent_dma_mask failed, aborting.\n");
6583 goto err_out_unmap;
6585 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6586 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6587 goto err_out_unmap;
6590 if (!(bp->flags & PCIE_FLAG))
6591 bnx2_get_pci_speed(bp);
6593 /* 5706A0 may falsely detect SERR and PERR. */
6594 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6595 reg = REG_RD(bp, PCI_COMMAND);
6596 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6597 REG_WR(bp, PCI_COMMAND, reg);
6599 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6600 !(bp->flags & PCIX_FLAG)) {
6602 dev_err(&pdev->dev,
6603 "5706 A1 can only be used in a PCIX bus, aborting.\n");
6604 goto err_out_unmap;
6607 bnx2_init_nvram(bp);
6609 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6611 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6612 BNX2_SHM_HDR_SIGNATURE_SIG) {
6613 u32 off = PCI_FUNC(pdev->devfn) << 2;
6615 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6616 } else
6617 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6619 /* Get the permanent MAC address. First we need to make sure the
6620 * firmware is actually running.
6622 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6624 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6625 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
6626 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6627 rc = -ENODEV;
6628 goto err_out_unmap;
6631 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6632 for (i = 0, j = 0; i < 3; i++) {
6633 u8 num, k, skip0;
6635 num = (u8) (reg >> (24 - (i * 8)));
6636 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6637 if (num >= k || !skip0 || k == 1) {
6638 bp->fw_version[j++] = (num / k) + '0';
6639 skip0 = 0;
6642 if (i != 2)
6643 bp->fw_version[j++] = '.';
6645 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
6646 BNX2_PORT_FEATURE_ASF_ENABLED) {
6647 bp->flags |= ASF_ENABLE_FLAG;
6649 for (i = 0; i < 30; i++) {
6650 reg = REG_RD_IND(bp, bp->shmem_base +
6651 BNX2_BC_STATE_CONDITION);
6652 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
6653 break;
6654 msleep(10);
6657 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6658 reg &= BNX2_CONDITION_MFW_RUN_MASK;
6659 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6660 reg != BNX2_CONDITION_MFW_RUN_NONE) {
6661 int i;
6662 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6664 bp->fw_version[j++] = ' ';
6665 for (i = 0; i < 3; i++) {
6666 reg = REG_RD_IND(bp, addr + i * 4);
6667 reg = swab32(reg);
6668 memcpy(&bp->fw_version[j], &reg, 4);
6669 j += 4;
6673 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6674 bp->mac_addr[0] = (u8) (reg >> 8);
6675 bp->mac_addr[1] = (u8) reg;
6677 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6678 bp->mac_addr[2] = (u8) (reg >> 24);
6679 bp->mac_addr[3] = (u8) (reg >> 16);
6680 bp->mac_addr[4] = (u8) (reg >> 8);
6681 bp->mac_addr[5] = (u8) reg;
6683 bp->tx_ring_size = MAX_TX_DESC_CNT;
6684 bnx2_set_rx_ring_size(bp, 255);
6686 bp->rx_csum = 1;
6688 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6690 bp->tx_quick_cons_trip_int = 20;
6691 bp->tx_quick_cons_trip = 20;
6692 bp->tx_ticks_int = 80;
6693 bp->tx_ticks = 80;
6695 bp->rx_quick_cons_trip_int = 6;
6696 bp->rx_quick_cons_trip = 6;
6697 bp->rx_ticks_int = 18;
6698 bp->rx_ticks = 18;
6700 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6702 bp->timer_interval = HZ;
6703 bp->current_interval = HZ;
6705 bp->phy_addr = 1;
6707 /* Disable WOL support if we are running on a SERDES chip. */
6708 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6709 bnx2_get_5709_media(bp);
6710 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6711 bp->phy_flags |= PHY_SERDES_FLAG;
6713 bp->phy_port = PORT_TP;
6714 if (bp->phy_flags & PHY_SERDES_FLAG) {
6715 bp->phy_port = PORT_FIBRE;
6716 bp->flags |= NO_WOL_FLAG;
6717 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6718 bp->phy_addr = 2;
6719 reg = REG_RD_IND(bp, bp->shmem_base +
6720 BNX2_SHARED_HW_CFG_CONFIG);
6721 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6722 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6724 bnx2_init_remote_phy(bp);
6726 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6727 CHIP_NUM(bp) == CHIP_NUM_5708)
6728 bp->phy_flags |= PHY_CRC_FIX_FLAG;
6729 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
6730 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6732 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6733 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6734 (CHIP_ID(bp) == CHIP_ID_5708_B1))
6735 bp->flags |= NO_WOL_FLAG;
6737 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6738 bp->tx_quick_cons_trip_int =
6739 bp->tx_quick_cons_trip;
6740 bp->tx_ticks_int = bp->tx_ticks;
6741 bp->rx_quick_cons_trip_int =
6742 bp->rx_quick_cons_trip;
6743 bp->rx_ticks_int = bp->rx_ticks;
6744 bp->comp_prod_trip_int = bp->comp_prod_trip;
6745 bp->com_ticks_int = bp->com_ticks;
6746 bp->cmd_ticks_int = bp->cmd_ticks;
6749 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6751 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6752 * with byte enables disabled on the unused 32-bit word. This is legal
6753 * but causes problems on the AMD 8132 which will eventually stop
6754 * responding after a while.
6756 * AMD believes this incompatibility is unique to the 5706, and
6757 * prefers to locally disable MSI rather than globally disabling it.
6759 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6760 struct pci_dev *amd_8132 = NULL;
6762 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6763 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6764 amd_8132))) {
6766 if (amd_8132->revision >= 0x10 &&
6767 amd_8132->revision <= 0x13) {
6768 disable_msi = 1;
6769 pci_dev_put(amd_8132);
6770 break;
6775 bnx2_set_default_link(bp);
6776 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6778 init_timer(&bp->timer);
6779 bp->timer.expires = RUN_AT(bp->timer_interval);
6780 bp->timer.data = (unsigned long) bp;
6781 bp->timer.function = bnx2_timer;
6783 return 0;
6785 err_out_unmap:
6786 if (bp->regview) {
6787 iounmap(bp->regview);
6788 bp->regview = NULL;
6791 err_out_release:
6792 pci_release_regions(pdev);
6794 err_out_disable:
6795 pci_disable_device(pdev);
6796 pci_set_drvdata(pdev, NULL);
6798 err_out:
6799 return rc;
6802 static char * __devinit
6803 bnx2_bus_string(struct bnx2 *bp, char *str)
6805 char *s = str;
6807 if (bp->flags & PCIE_FLAG) {
6808 s += sprintf(s, "PCI Express");
6809 } else {
6810 s += sprintf(s, "PCI");
6811 if (bp->flags & PCIX_FLAG)
6812 s += sprintf(s, "-X");
6813 if (bp->flags & PCI_32BIT_FLAG)
6814 s += sprintf(s, " 32-bit");
6815 else
6816 s += sprintf(s, " 64-bit");
6817 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6819 return str;
6822 static int __devinit
6823 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6825 static int version_printed = 0;
6826 struct net_device *dev = NULL;
6827 struct bnx2 *bp;
6828 int rc, i;
6829 char str[40];
6831 if (version_printed++ == 0)
6832 printk(KERN_INFO "%s", version);
6834 /* dev zeroed in init_etherdev */
6835 dev = alloc_etherdev(sizeof(*bp));
6837 if (!dev)
6838 return -ENOMEM;
6840 rc = bnx2_init_board(pdev, dev);
6841 if (rc < 0) {
6842 free_netdev(dev);
6843 return rc;
6846 dev->open = bnx2_open;
6847 dev->hard_start_xmit = bnx2_start_xmit;
6848 dev->stop = bnx2_close;
6849 dev->get_stats = bnx2_get_stats;
6850 dev->set_multicast_list = bnx2_set_rx_mode;
6851 dev->do_ioctl = bnx2_ioctl;
6852 dev->set_mac_address = bnx2_change_mac_addr;
6853 dev->change_mtu = bnx2_change_mtu;
6854 dev->tx_timeout = bnx2_tx_timeout;
6855 dev->watchdog_timeo = TX_TIMEOUT;
6856 #ifdef BCM_VLAN
6857 dev->vlan_rx_register = bnx2_vlan_rx_register;
6858 #endif
6859 dev->poll = bnx2_poll;
6860 dev->ethtool_ops = &bnx2_ethtool_ops;
6861 dev->weight = 64;
6863 bp = netdev_priv(dev);
6865 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6866 dev->poll_controller = poll_bnx2;
6867 #endif
6869 pci_set_drvdata(pdev, dev);
6871 memcpy(dev->dev_addr, bp->mac_addr, 6);
6872 memcpy(dev->perm_addr, bp->mac_addr, 6);
6873 bp->name = board_info[ent->driver_data].name;
6875 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6876 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6877 dev->features |= NETIF_F_IPV6_CSUM;
6879 #ifdef BCM_VLAN
6880 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6881 #endif
6882 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6883 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6884 dev->features |= NETIF_F_TSO6;
6886 if ((rc = register_netdev(dev))) {
6887 dev_err(&pdev->dev, "Cannot register net device\n");
6888 if (bp->regview)
6889 iounmap(bp->regview);
6890 pci_release_regions(pdev);
6891 pci_disable_device(pdev);
6892 pci_set_drvdata(pdev, NULL);
6893 free_netdev(dev);
6894 return rc;
6897 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
6898 "IRQ %d, ",
6899 dev->name,
6900 bp->name,
6901 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6902 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6903 bnx2_bus_string(bp, str),
6904 dev->base_addr,
6905 bp->pdev->irq);
6907 printk("node addr ");
6908 for (i = 0; i < 6; i++)
6909 printk("%2.2x", dev->dev_addr[i]);
6910 printk("\n");
6912 return 0;
6915 static void __devexit
6916 bnx2_remove_one(struct pci_dev *pdev)
6918 struct net_device *dev = pci_get_drvdata(pdev);
6919 struct bnx2 *bp = netdev_priv(dev);
6921 flush_scheduled_work();
6923 unregister_netdev(dev);
6925 if (bp->regview)
6926 iounmap(bp->regview);
6928 free_netdev(dev);
6929 pci_release_regions(pdev);
6930 pci_disable_device(pdev);
6931 pci_set_drvdata(pdev, NULL);
6934 static int
6935 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6937 struct net_device *dev = pci_get_drvdata(pdev);
6938 struct bnx2 *bp = netdev_priv(dev);
6939 u32 reset_code;
6941 if (!netif_running(dev))
6942 return 0;
6944 flush_scheduled_work();
6945 bnx2_netif_stop(bp);
6946 netif_device_detach(dev);
6947 del_timer_sync(&bp->timer);
6948 if (bp->flags & NO_WOL_FLAG)
6949 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6950 else if (bp->wol)
6951 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6952 else
6953 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6954 bnx2_reset_chip(bp, reset_code);
6955 bnx2_free_skbs(bp);
6956 pci_save_state(pdev);
6957 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6958 return 0;
6961 static int
6962 bnx2_resume(struct pci_dev *pdev)
6964 struct net_device *dev = pci_get_drvdata(pdev);
6965 struct bnx2 *bp = netdev_priv(dev);
6967 if (!netif_running(dev))
6968 return 0;
6970 pci_restore_state(pdev);
6971 bnx2_set_power_state(bp, PCI_D0);
6972 netif_device_attach(dev);
6973 bnx2_init_nic(bp);
6974 bnx2_netif_start(bp);
6975 return 0;
6978 static struct pci_driver bnx2_pci_driver = {
6979 .name = DRV_MODULE_NAME,
6980 .id_table = bnx2_pci_tbl,
6981 .probe = bnx2_init_one,
6982 .remove = __devexit_p(bnx2_remove_one),
6983 .suspend = bnx2_suspend,
6984 .resume = bnx2_resume,
6987 static int __init bnx2_init(void)
6989 return pci_register_driver(&bnx2_pci_driver);
6992 static void __exit bnx2_cleanup(void)
6994 pci_unregister_driver(&bnx2_pci_driver);
6997 module_init(bnx2_init);
6998 module_exit(bnx2_cleanup);