airport: remove useless return in a function returning void
[linux/fpc-iii.git] / drivers / net / bnx2.c
blob0e2218dadb3de7e3d76009e03cfe7740b3f61ec0
1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/if_vlan.h>
39 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50 #include <linux/log2.h>
52 #include "bnx2.h"
53 #include "bnx2_fw.h"
54 #include "bnx2_fw2.h"
56 #define FW_BUF_SIZE 0x10000
58 #define DRV_MODULE_NAME "bnx2"
59 #define PFX DRV_MODULE_NAME ": "
60 #define DRV_MODULE_VERSION "1.8.2"
61 #define DRV_MODULE_RELDATE "Nov 10, 2008"
63 #define RUN_AT(x) (jiffies + (x))
65 /* Time in jiffies before concluding the transmitter is hung. */
66 #define TX_TIMEOUT (5*HZ)
68 static char version[] __devinitdata =
69 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
71 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
76 static int disable_msi = 0;
78 module_param(disable_msi, int, 0);
79 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
81 typedef enum {
82 BCM5706 = 0,
83 NC370T,
84 NC370I,
85 BCM5706S,
86 NC370F,
87 BCM5708,
88 BCM5708S,
89 BCM5709,
90 BCM5709S,
91 BCM5716,
92 BCM5716S,
93 } board_t;
95 /* indexed by board_t, above */
96 static struct {
97 char *name;
98 } board_info[] __devinitdata = {
99 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
100 { "HP NC370T Multifunction Gigabit Server Adapter" },
101 { "HP NC370i Multifunction Gigabit Server Adapter" },
102 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
103 { "HP NC370F Multifunction Gigabit Server Adapter" },
104 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
105 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
106 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
107 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
108 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
109 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
112 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
113 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
114 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
116 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
122 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
123 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
124 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
125 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
126 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
127 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
128 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
129 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
131 { PCI_VENDOR_ID_BROADCOM, 0x163b,
132 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
133 { PCI_VENDOR_ID_BROADCOM, 0x163c,
134 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
135 { 0, }
138 static struct flash_spec flash_table[] =
140 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
141 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
142 /* Slow EEPROM */
143 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
144 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
145 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
146 "EEPROM - slow"},
147 /* Expansion entry 0001 */
148 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
149 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
150 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
151 "Entry 0001"},
152 /* Saifun SA25F010 (non-buffered flash) */
153 /* strap, cfg1, & write1 need updates */
154 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
155 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
156 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
157 "Non-buffered flash (128kB)"},
158 /* Saifun SA25F020 (non-buffered flash) */
159 /* strap, cfg1, & write1 need updates */
160 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
161 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
163 "Non-buffered flash (256kB)"},
164 /* Expansion entry 0100 */
165 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
166 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
168 "Entry 0100"},
169 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
170 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
171 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
172 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
173 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
174 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
175 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
176 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
177 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
178 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
179 /* Saifun SA25F005 (non-buffered flash) */
180 /* strap, cfg1, & write1 need updates */
181 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
182 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
183 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
184 "Non-buffered flash (64kB)"},
185 /* Fast EEPROM */
186 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
187 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
188 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
189 "EEPROM - fast"},
190 /* Expansion entry 1001 */
191 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
192 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
193 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
194 "Entry 1001"},
195 /* Expansion entry 1010 */
196 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
197 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
198 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
199 "Entry 1010"},
200 /* ATMEL AT45DB011B (buffered flash) */
201 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
202 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
203 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
204 "Buffered flash (128kB)"},
205 /* Expansion entry 1100 */
206 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
207 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
208 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
209 "Entry 1100"},
210 /* Expansion entry 1101 */
211 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
212 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
213 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
214 "Entry 1101"},
215 /* Ateml Expansion entry 1110 */
216 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
217 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
218 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
219 "Entry 1110 (Atmel)"},
220 /* ATMEL AT45DB021B (buffered flash) */
221 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
222 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
223 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
224 "Buffered flash (256kB)"},
227 static struct flash_spec flash_5709 = {
228 .flags = BNX2_NV_BUFFERED,
229 .page_bits = BCM5709_FLASH_PAGE_BITS,
230 .page_size = BCM5709_FLASH_PAGE_SIZE,
231 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
232 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
233 .name = "5709 Buffered flash (256kB)",
236 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
238 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
240 u32 diff;
242 smp_mb();
244 /* The ring uses 256 indices for 255 entries, one of them
245 * needs to be skipped.
247 diff = txr->tx_prod - txr->tx_cons;
248 if (unlikely(diff >= TX_DESC_CNT)) {
249 diff &= 0xffff;
250 if (diff == TX_DESC_CNT)
251 diff = MAX_TX_DESC_CNT;
253 return (bp->tx_ring_size - diff);
256 static u32
257 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
259 u32 val;
261 spin_lock_bh(&bp->indirect_lock);
262 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
263 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
264 spin_unlock_bh(&bp->indirect_lock);
265 return val;
268 static void
269 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
271 spin_lock_bh(&bp->indirect_lock);
272 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
273 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
274 spin_unlock_bh(&bp->indirect_lock);
277 static void
278 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
280 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
283 static u32
284 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
286 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
289 static void
290 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
292 offset += cid_addr;
293 spin_lock_bh(&bp->indirect_lock);
294 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
295 int i;
297 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
298 REG_WR(bp, BNX2_CTX_CTX_CTRL,
299 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
300 for (i = 0; i < 5; i++) {
301 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
302 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
303 break;
304 udelay(5);
306 } else {
307 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
308 REG_WR(bp, BNX2_CTX_DATA, val);
310 spin_unlock_bh(&bp->indirect_lock);
313 static int
314 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
316 u32 val1;
317 int i, ret;
319 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
320 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
321 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
323 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
324 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
326 udelay(40);
329 val1 = (bp->phy_addr << 21) | (reg << 16) |
330 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
331 BNX2_EMAC_MDIO_COMM_START_BUSY;
332 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
334 for (i = 0; i < 50; i++) {
335 udelay(10);
337 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
338 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
339 udelay(5);
341 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
342 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
344 break;
348 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
349 *val = 0x0;
350 ret = -EBUSY;
352 else {
353 *val = val1;
354 ret = 0;
357 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
358 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
359 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
361 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
362 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
364 udelay(40);
367 return ret;
370 static int
371 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
373 u32 val1;
374 int i, ret;
376 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
377 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
378 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
380 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
381 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
383 udelay(40);
386 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
387 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
388 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
389 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
391 for (i = 0; i < 50; i++) {
392 udelay(10);
394 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
395 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
396 udelay(5);
397 break;
401 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
402 ret = -EBUSY;
403 else
404 ret = 0;
406 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
407 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
408 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
410 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
411 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
413 udelay(40);
416 return ret;
419 static void
420 bnx2_disable_int(struct bnx2 *bp)
422 int i;
423 struct bnx2_napi *bnapi;
425 for (i = 0; i < bp->irq_nvecs; i++) {
426 bnapi = &bp->bnx2_napi[i];
427 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
428 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
430 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
433 static void
434 bnx2_enable_int(struct bnx2 *bp)
436 int i;
437 struct bnx2_napi *bnapi;
439 for (i = 0; i < bp->irq_nvecs; i++) {
440 bnapi = &bp->bnx2_napi[i];
442 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
443 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
444 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
445 bnapi->last_status_idx);
447 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
448 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
449 bnapi->last_status_idx);
451 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
454 static void
455 bnx2_disable_int_sync(struct bnx2 *bp)
457 int i;
459 atomic_inc(&bp->intr_sem);
460 bnx2_disable_int(bp);
461 for (i = 0; i < bp->irq_nvecs; i++)
462 synchronize_irq(bp->irq_tbl[i].vector);
465 static void
466 bnx2_napi_disable(struct bnx2 *bp)
468 int i;
470 for (i = 0; i < bp->irq_nvecs; i++)
471 napi_disable(&bp->bnx2_napi[i].napi);
474 static void
475 bnx2_napi_enable(struct bnx2 *bp)
477 int i;
479 for (i = 0; i < bp->irq_nvecs; i++)
480 napi_enable(&bp->bnx2_napi[i].napi);
483 static void
484 bnx2_netif_stop(struct bnx2 *bp)
486 bnx2_disable_int_sync(bp);
487 if (netif_running(bp->dev)) {
488 bnx2_napi_disable(bp);
489 netif_tx_disable(bp->dev);
490 bp->dev->trans_start = jiffies; /* prevent tx timeout */
494 static void
495 bnx2_netif_start(struct bnx2 *bp)
497 if (atomic_dec_and_test(&bp->intr_sem)) {
498 if (netif_running(bp->dev)) {
499 netif_tx_wake_all_queues(bp->dev);
500 bnx2_napi_enable(bp);
501 bnx2_enable_int(bp);
506 static void
507 bnx2_free_tx_mem(struct bnx2 *bp)
509 int i;
511 for (i = 0; i < bp->num_tx_rings; i++) {
512 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
513 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
515 if (txr->tx_desc_ring) {
516 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
517 txr->tx_desc_ring,
518 txr->tx_desc_mapping);
519 txr->tx_desc_ring = NULL;
521 kfree(txr->tx_buf_ring);
522 txr->tx_buf_ring = NULL;
526 static void
527 bnx2_free_rx_mem(struct bnx2 *bp)
529 int i;
531 for (i = 0; i < bp->num_rx_rings; i++) {
532 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
533 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
534 int j;
536 for (j = 0; j < bp->rx_max_ring; j++) {
537 if (rxr->rx_desc_ring[j])
538 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
539 rxr->rx_desc_ring[j],
540 rxr->rx_desc_mapping[j]);
541 rxr->rx_desc_ring[j] = NULL;
543 if (rxr->rx_buf_ring)
544 vfree(rxr->rx_buf_ring);
545 rxr->rx_buf_ring = NULL;
547 for (j = 0; j < bp->rx_max_pg_ring; j++) {
548 if (rxr->rx_pg_desc_ring[j])
549 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
550 rxr->rx_pg_desc_ring[i],
551 rxr->rx_pg_desc_mapping[i]);
552 rxr->rx_pg_desc_ring[i] = NULL;
554 if (rxr->rx_pg_ring)
555 vfree(rxr->rx_pg_ring);
556 rxr->rx_pg_ring = NULL;
560 static int
561 bnx2_alloc_tx_mem(struct bnx2 *bp)
563 int i;
565 for (i = 0; i < bp->num_tx_rings; i++) {
566 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
567 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
569 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
570 if (txr->tx_buf_ring == NULL)
571 return -ENOMEM;
573 txr->tx_desc_ring =
574 pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
575 &txr->tx_desc_mapping);
576 if (txr->tx_desc_ring == NULL)
577 return -ENOMEM;
579 return 0;
582 static int
583 bnx2_alloc_rx_mem(struct bnx2 *bp)
585 int i;
587 for (i = 0; i < bp->num_rx_rings; i++) {
588 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
589 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
590 int j;
592 rxr->rx_buf_ring =
593 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
594 if (rxr->rx_buf_ring == NULL)
595 return -ENOMEM;
597 memset(rxr->rx_buf_ring, 0,
598 SW_RXBD_RING_SIZE * bp->rx_max_ring);
600 for (j = 0; j < bp->rx_max_ring; j++) {
601 rxr->rx_desc_ring[j] =
602 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
603 &rxr->rx_desc_mapping[j]);
604 if (rxr->rx_desc_ring[j] == NULL)
605 return -ENOMEM;
609 if (bp->rx_pg_ring_size) {
610 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
611 bp->rx_max_pg_ring);
612 if (rxr->rx_pg_ring == NULL)
613 return -ENOMEM;
615 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
616 bp->rx_max_pg_ring);
619 for (j = 0; j < bp->rx_max_pg_ring; j++) {
620 rxr->rx_pg_desc_ring[j] =
621 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
622 &rxr->rx_pg_desc_mapping[j]);
623 if (rxr->rx_pg_desc_ring[j] == NULL)
624 return -ENOMEM;
628 return 0;
631 static void
632 bnx2_free_mem(struct bnx2 *bp)
634 int i;
635 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
637 bnx2_free_tx_mem(bp);
638 bnx2_free_rx_mem(bp);
640 for (i = 0; i < bp->ctx_pages; i++) {
641 if (bp->ctx_blk[i]) {
642 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
643 bp->ctx_blk[i],
644 bp->ctx_blk_mapping[i]);
645 bp->ctx_blk[i] = NULL;
648 if (bnapi->status_blk.msi) {
649 pci_free_consistent(bp->pdev, bp->status_stats_size,
650 bnapi->status_blk.msi,
651 bp->status_blk_mapping);
652 bnapi->status_blk.msi = NULL;
653 bp->stats_blk = NULL;
657 static int
658 bnx2_alloc_mem(struct bnx2 *bp)
660 int i, status_blk_size, err;
661 struct bnx2_napi *bnapi;
662 void *status_blk;
664 /* Combine status and statistics blocks into one allocation. */
665 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
666 if (bp->flags & BNX2_FLAG_MSIX_CAP)
667 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
668 BNX2_SBLK_MSIX_ALIGN_SIZE);
669 bp->status_stats_size = status_blk_size +
670 sizeof(struct statistics_block);
672 status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
673 &bp->status_blk_mapping);
674 if (status_blk == NULL)
675 goto alloc_mem_err;
677 memset(status_blk, 0, bp->status_stats_size);
679 bnapi = &bp->bnx2_napi[0];
680 bnapi->status_blk.msi = status_blk;
681 bnapi->hw_tx_cons_ptr =
682 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
683 bnapi->hw_rx_cons_ptr =
684 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
685 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
686 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
687 struct status_block_msix *sblk;
689 bnapi = &bp->bnx2_napi[i];
691 sblk = (void *) (status_blk +
692 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
693 bnapi->status_blk.msix = sblk;
694 bnapi->hw_tx_cons_ptr =
695 &sblk->status_tx_quick_consumer_index;
696 bnapi->hw_rx_cons_ptr =
697 &sblk->status_rx_quick_consumer_index;
698 bnapi->int_num = i << 24;
702 bp->stats_blk = status_blk + status_blk_size;
704 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
706 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
707 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
708 if (bp->ctx_pages == 0)
709 bp->ctx_pages = 1;
710 for (i = 0; i < bp->ctx_pages; i++) {
711 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
712 BCM_PAGE_SIZE,
713 &bp->ctx_blk_mapping[i]);
714 if (bp->ctx_blk[i] == NULL)
715 goto alloc_mem_err;
719 err = bnx2_alloc_rx_mem(bp);
720 if (err)
721 goto alloc_mem_err;
723 err = bnx2_alloc_tx_mem(bp);
724 if (err)
725 goto alloc_mem_err;
727 return 0;
729 alloc_mem_err:
730 bnx2_free_mem(bp);
731 return -ENOMEM;
734 static void
735 bnx2_report_fw_link(struct bnx2 *bp)
737 u32 fw_link_status = 0;
739 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
740 return;
742 if (bp->link_up) {
743 u32 bmsr;
745 switch (bp->line_speed) {
746 case SPEED_10:
747 if (bp->duplex == DUPLEX_HALF)
748 fw_link_status = BNX2_LINK_STATUS_10HALF;
749 else
750 fw_link_status = BNX2_LINK_STATUS_10FULL;
751 break;
752 case SPEED_100:
753 if (bp->duplex == DUPLEX_HALF)
754 fw_link_status = BNX2_LINK_STATUS_100HALF;
755 else
756 fw_link_status = BNX2_LINK_STATUS_100FULL;
757 break;
758 case SPEED_1000:
759 if (bp->duplex == DUPLEX_HALF)
760 fw_link_status = BNX2_LINK_STATUS_1000HALF;
761 else
762 fw_link_status = BNX2_LINK_STATUS_1000FULL;
763 break;
764 case SPEED_2500:
765 if (bp->duplex == DUPLEX_HALF)
766 fw_link_status = BNX2_LINK_STATUS_2500HALF;
767 else
768 fw_link_status = BNX2_LINK_STATUS_2500FULL;
769 break;
772 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
774 if (bp->autoneg) {
775 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
777 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
778 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
780 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
781 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
782 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
783 else
784 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
787 else
788 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
790 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
793 static char *
794 bnx2_xceiver_str(struct bnx2 *bp)
796 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
797 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
798 "Copper"));
801 static void
802 bnx2_report_link(struct bnx2 *bp)
804 if (bp->link_up) {
805 netif_carrier_on(bp->dev);
806 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
807 bnx2_xceiver_str(bp));
809 printk("%d Mbps ", bp->line_speed);
811 if (bp->duplex == DUPLEX_FULL)
812 printk("full duplex");
813 else
814 printk("half duplex");
816 if (bp->flow_ctrl) {
817 if (bp->flow_ctrl & FLOW_CTRL_RX) {
818 printk(", receive ");
819 if (bp->flow_ctrl & FLOW_CTRL_TX)
820 printk("& transmit ");
822 else {
823 printk(", transmit ");
825 printk("flow control ON");
827 printk("\n");
829 else {
830 netif_carrier_off(bp->dev);
831 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
832 bnx2_xceiver_str(bp));
835 bnx2_report_fw_link(bp);
838 static void
839 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
841 u32 local_adv, remote_adv;
843 bp->flow_ctrl = 0;
844 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
845 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
847 if (bp->duplex == DUPLEX_FULL) {
848 bp->flow_ctrl = bp->req_flow_ctrl;
850 return;
853 if (bp->duplex != DUPLEX_FULL) {
854 return;
857 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
858 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
859 u32 val;
861 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
862 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
863 bp->flow_ctrl |= FLOW_CTRL_TX;
864 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
865 bp->flow_ctrl |= FLOW_CTRL_RX;
866 return;
869 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
870 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
872 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
873 u32 new_local_adv = 0;
874 u32 new_remote_adv = 0;
876 if (local_adv & ADVERTISE_1000XPAUSE)
877 new_local_adv |= ADVERTISE_PAUSE_CAP;
878 if (local_adv & ADVERTISE_1000XPSE_ASYM)
879 new_local_adv |= ADVERTISE_PAUSE_ASYM;
880 if (remote_adv & ADVERTISE_1000XPAUSE)
881 new_remote_adv |= ADVERTISE_PAUSE_CAP;
882 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
883 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
885 local_adv = new_local_adv;
886 remote_adv = new_remote_adv;
889 /* See Table 28B-3 of 802.3ab-1999 spec. */
890 if (local_adv & ADVERTISE_PAUSE_CAP) {
891 if(local_adv & ADVERTISE_PAUSE_ASYM) {
892 if (remote_adv & ADVERTISE_PAUSE_CAP) {
893 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
895 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
896 bp->flow_ctrl = FLOW_CTRL_RX;
899 else {
900 if (remote_adv & ADVERTISE_PAUSE_CAP) {
901 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
905 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
906 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
907 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
909 bp->flow_ctrl = FLOW_CTRL_TX;
914 static int
915 bnx2_5709s_linkup(struct bnx2 *bp)
917 u32 val, speed;
919 bp->link_up = 1;
921 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
922 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
923 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
925 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
926 bp->line_speed = bp->req_line_speed;
927 bp->duplex = bp->req_duplex;
928 return 0;
930 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
931 switch (speed) {
932 case MII_BNX2_GP_TOP_AN_SPEED_10:
933 bp->line_speed = SPEED_10;
934 break;
935 case MII_BNX2_GP_TOP_AN_SPEED_100:
936 bp->line_speed = SPEED_100;
937 break;
938 case MII_BNX2_GP_TOP_AN_SPEED_1G:
939 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
940 bp->line_speed = SPEED_1000;
941 break;
942 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
943 bp->line_speed = SPEED_2500;
944 break;
946 if (val & MII_BNX2_GP_TOP_AN_FD)
947 bp->duplex = DUPLEX_FULL;
948 else
949 bp->duplex = DUPLEX_HALF;
950 return 0;
953 static int
954 bnx2_5708s_linkup(struct bnx2 *bp)
956 u32 val;
958 bp->link_up = 1;
959 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
960 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
961 case BCM5708S_1000X_STAT1_SPEED_10:
962 bp->line_speed = SPEED_10;
963 break;
964 case BCM5708S_1000X_STAT1_SPEED_100:
965 bp->line_speed = SPEED_100;
966 break;
967 case BCM5708S_1000X_STAT1_SPEED_1G:
968 bp->line_speed = SPEED_1000;
969 break;
970 case BCM5708S_1000X_STAT1_SPEED_2G5:
971 bp->line_speed = SPEED_2500;
972 break;
974 if (val & BCM5708S_1000X_STAT1_FD)
975 bp->duplex = DUPLEX_FULL;
976 else
977 bp->duplex = DUPLEX_HALF;
979 return 0;
982 static int
983 bnx2_5706s_linkup(struct bnx2 *bp)
985 u32 bmcr, local_adv, remote_adv, common;
987 bp->link_up = 1;
988 bp->line_speed = SPEED_1000;
990 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
991 if (bmcr & BMCR_FULLDPLX) {
992 bp->duplex = DUPLEX_FULL;
994 else {
995 bp->duplex = DUPLEX_HALF;
998 if (!(bmcr & BMCR_ANENABLE)) {
999 return 0;
1002 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1003 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1005 common = local_adv & remote_adv;
1006 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1008 if (common & ADVERTISE_1000XFULL) {
1009 bp->duplex = DUPLEX_FULL;
1011 else {
1012 bp->duplex = DUPLEX_HALF;
1016 return 0;
1019 static int
1020 bnx2_copper_linkup(struct bnx2 *bp)
1022 u32 bmcr;
1024 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1025 if (bmcr & BMCR_ANENABLE) {
1026 u32 local_adv, remote_adv, common;
1028 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1029 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1031 common = local_adv & (remote_adv >> 2);
1032 if (common & ADVERTISE_1000FULL) {
1033 bp->line_speed = SPEED_1000;
1034 bp->duplex = DUPLEX_FULL;
1036 else if (common & ADVERTISE_1000HALF) {
1037 bp->line_speed = SPEED_1000;
1038 bp->duplex = DUPLEX_HALF;
1040 else {
1041 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1042 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1044 common = local_adv & remote_adv;
1045 if (common & ADVERTISE_100FULL) {
1046 bp->line_speed = SPEED_100;
1047 bp->duplex = DUPLEX_FULL;
1049 else if (common & ADVERTISE_100HALF) {
1050 bp->line_speed = SPEED_100;
1051 bp->duplex = DUPLEX_HALF;
1053 else if (common & ADVERTISE_10FULL) {
1054 bp->line_speed = SPEED_10;
1055 bp->duplex = DUPLEX_FULL;
1057 else if (common & ADVERTISE_10HALF) {
1058 bp->line_speed = SPEED_10;
1059 bp->duplex = DUPLEX_HALF;
1061 else {
1062 bp->line_speed = 0;
1063 bp->link_up = 0;
1067 else {
1068 if (bmcr & BMCR_SPEED100) {
1069 bp->line_speed = SPEED_100;
1071 else {
1072 bp->line_speed = SPEED_10;
1074 if (bmcr & BMCR_FULLDPLX) {
1075 bp->duplex = DUPLEX_FULL;
1077 else {
1078 bp->duplex = DUPLEX_HALF;
1082 return 0;
1085 static void
1086 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1088 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1090 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1091 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1092 val |= 0x02 << 8;
1094 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1095 u32 lo_water, hi_water;
1097 if (bp->flow_ctrl & FLOW_CTRL_TX)
1098 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1099 else
1100 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1101 if (lo_water >= bp->rx_ring_size)
1102 lo_water = 0;
1104 hi_water = bp->rx_ring_size / 4;
1106 if (hi_water <= lo_water)
1107 lo_water = 0;
1109 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1110 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1112 if (hi_water > 0xf)
1113 hi_water = 0xf;
1114 else if (hi_water == 0)
1115 lo_water = 0;
1116 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1118 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1121 static void
1122 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1124 int i;
1125 u32 cid;
1127 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1128 if (i == 1)
1129 cid = RX_RSS_CID;
1130 bnx2_init_rx_context(bp, cid);
1134 static void
1135 bnx2_set_mac_link(struct bnx2 *bp)
1137 u32 val;
1139 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1140 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1141 (bp->duplex == DUPLEX_HALF)) {
1142 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1145 /* Configure the EMAC mode register. */
1146 val = REG_RD(bp, BNX2_EMAC_MODE);
1148 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1149 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1150 BNX2_EMAC_MODE_25G_MODE);
1152 if (bp->link_up) {
1153 switch (bp->line_speed) {
1154 case SPEED_10:
1155 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1156 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1157 break;
1159 /* fall through */
1160 case SPEED_100:
1161 val |= BNX2_EMAC_MODE_PORT_MII;
1162 break;
1163 case SPEED_2500:
1164 val |= BNX2_EMAC_MODE_25G_MODE;
1165 /* fall through */
1166 case SPEED_1000:
1167 val |= BNX2_EMAC_MODE_PORT_GMII;
1168 break;
1171 else {
1172 val |= BNX2_EMAC_MODE_PORT_GMII;
1175 /* Set the MAC to operate in the appropriate duplex mode. */
1176 if (bp->duplex == DUPLEX_HALF)
1177 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1178 REG_WR(bp, BNX2_EMAC_MODE, val);
1180 /* Enable/disable rx PAUSE. */
1181 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1183 if (bp->flow_ctrl & FLOW_CTRL_RX)
1184 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1185 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1187 /* Enable/disable tx PAUSE. */
1188 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1189 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1191 if (bp->flow_ctrl & FLOW_CTRL_TX)
1192 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1193 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1195 /* Acknowledge the interrupt. */
1196 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1198 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1199 bnx2_init_all_rx_contexts(bp);
1202 static void
1203 bnx2_enable_bmsr1(struct bnx2 *bp)
1205 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1206 (CHIP_NUM(bp) == CHIP_NUM_5709))
1207 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1208 MII_BNX2_BLK_ADDR_GP_STATUS);
1211 static void
1212 bnx2_disable_bmsr1(struct bnx2 *bp)
1214 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1215 (CHIP_NUM(bp) == CHIP_NUM_5709))
1216 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1217 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1220 static int
1221 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1223 u32 up1;
1224 int ret = 1;
1226 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1227 return 0;
1229 if (bp->autoneg & AUTONEG_SPEED)
1230 bp->advertising |= ADVERTISED_2500baseX_Full;
1232 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1233 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1235 bnx2_read_phy(bp, bp->mii_up1, &up1);
1236 if (!(up1 & BCM5708S_UP1_2G5)) {
1237 up1 |= BCM5708S_UP1_2G5;
1238 bnx2_write_phy(bp, bp->mii_up1, up1);
1239 ret = 0;
1242 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1243 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1244 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1246 return ret;
1249 static int
1250 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1252 u32 up1;
1253 int ret = 0;
1255 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1256 return 0;
1258 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1259 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1261 bnx2_read_phy(bp, bp->mii_up1, &up1);
1262 if (up1 & BCM5708S_UP1_2G5) {
1263 up1 &= ~BCM5708S_UP1_2G5;
1264 bnx2_write_phy(bp, bp->mii_up1, up1);
1265 ret = 1;
1268 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1269 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1270 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1272 return ret;
1275 static void
1276 bnx2_enable_forced_2g5(struct bnx2 *bp)
1278 u32 bmcr;
1280 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1281 return;
1283 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1284 u32 val;
1286 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1287 MII_BNX2_BLK_ADDR_SERDES_DIG);
1288 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1289 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1290 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1291 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1293 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1294 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1295 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1297 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1298 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1299 bmcr |= BCM5708S_BMCR_FORCE_2500;
1302 if (bp->autoneg & AUTONEG_SPEED) {
1303 bmcr &= ~BMCR_ANENABLE;
1304 if (bp->req_duplex == DUPLEX_FULL)
1305 bmcr |= BMCR_FULLDPLX;
1307 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1310 static void
1311 bnx2_disable_forced_2g5(struct bnx2 *bp)
1313 u32 bmcr;
1315 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1316 return;
1318 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1319 u32 val;
1321 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1322 MII_BNX2_BLK_ADDR_SERDES_DIG);
1323 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1324 val &= ~MII_BNX2_SD_MISC1_FORCE;
1325 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1327 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1328 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1329 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1331 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1332 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1333 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1336 if (bp->autoneg & AUTONEG_SPEED)
1337 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1338 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1341 static void
1342 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1344 u32 val;
1346 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1347 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1348 if (start)
1349 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1350 else
1351 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1354 static int
1355 bnx2_set_link(struct bnx2 *bp)
1357 u32 bmsr;
1358 u8 link_up;
1360 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1361 bp->link_up = 1;
1362 return 0;
1365 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1366 return 0;
1368 link_up = bp->link_up;
1370 bnx2_enable_bmsr1(bp);
1371 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1372 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1373 bnx2_disable_bmsr1(bp);
1375 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1376 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1377 u32 val, an_dbg;
1379 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1380 bnx2_5706s_force_link_dn(bp, 0);
1381 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1383 val = REG_RD(bp, BNX2_EMAC_STATUS);
1385 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1386 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1387 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1389 if ((val & BNX2_EMAC_STATUS_LINK) &&
1390 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1391 bmsr |= BMSR_LSTATUS;
1392 else
1393 bmsr &= ~BMSR_LSTATUS;
1396 if (bmsr & BMSR_LSTATUS) {
1397 bp->link_up = 1;
1399 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1400 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1401 bnx2_5706s_linkup(bp);
1402 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1403 bnx2_5708s_linkup(bp);
1404 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1405 bnx2_5709s_linkup(bp);
1407 else {
1408 bnx2_copper_linkup(bp);
1410 bnx2_resolve_flow_ctrl(bp);
1412 else {
1413 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1414 (bp->autoneg & AUTONEG_SPEED))
1415 bnx2_disable_forced_2g5(bp);
1417 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1418 u32 bmcr;
1420 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1421 bmcr |= BMCR_ANENABLE;
1422 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1424 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1426 bp->link_up = 0;
1429 if (bp->link_up != link_up) {
1430 bnx2_report_link(bp);
1433 bnx2_set_mac_link(bp);
1435 return 0;
1438 static int
1439 bnx2_reset_phy(struct bnx2 *bp)
1441 int i;
1442 u32 reg;
1444 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1446 #define PHY_RESET_MAX_WAIT 100
1447 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1448 udelay(10);
1450 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1451 if (!(reg & BMCR_RESET)) {
1452 udelay(20);
1453 break;
1456 if (i == PHY_RESET_MAX_WAIT) {
1457 return -EBUSY;
1459 return 0;
1462 static u32
1463 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1465 u32 adv = 0;
1467 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1468 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1470 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1471 adv = ADVERTISE_1000XPAUSE;
1473 else {
1474 adv = ADVERTISE_PAUSE_CAP;
1477 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1478 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1479 adv = ADVERTISE_1000XPSE_ASYM;
1481 else {
1482 adv = ADVERTISE_PAUSE_ASYM;
1485 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1486 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1487 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1489 else {
1490 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1493 return adv;
1496 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1498 static int
1499 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1501 u32 speed_arg = 0, pause_adv;
1503 pause_adv = bnx2_phy_get_pause_adv(bp);
1505 if (bp->autoneg & AUTONEG_SPEED) {
1506 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1507 if (bp->advertising & ADVERTISED_10baseT_Half)
1508 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1509 if (bp->advertising & ADVERTISED_10baseT_Full)
1510 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1511 if (bp->advertising & ADVERTISED_100baseT_Half)
1512 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1513 if (bp->advertising & ADVERTISED_100baseT_Full)
1514 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1515 if (bp->advertising & ADVERTISED_1000baseT_Full)
1516 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1517 if (bp->advertising & ADVERTISED_2500baseX_Full)
1518 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1519 } else {
1520 if (bp->req_line_speed == SPEED_2500)
1521 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1522 else if (bp->req_line_speed == SPEED_1000)
1523 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1524 else if (bp->req_line_speed == SPEED_100) {
1525 if (bp->req_duplex == DUPLEX_FULL)
1526 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1527 else
1528 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1529 } else if (bp->req_line_speed == SPEED_10) {
1530 if (bp->req_duplex == DUPLEX_FULL)
1531 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1532 else
1533 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1537 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1538 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1539 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1540 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1542 if (port == PORT_TP)
1543 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1544 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1546 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1548 spin_unlock_bh(&bp->phy_lock);
1549 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1550 spin_lock_bh(&bp->phy_lock);
1552 return 0;
1555 static int
1556 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1558 u32 adv, bmcr;
1559 u32 new_adv = 0;
1561 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1562 return (bnx2_setup_remote_phy(bp, port));
1564 if (!(bp->autoneg & AUTONEG_SPEED)) {
1565 u32 new_bmcr;
1566 int force_link_down = 0;
1568 if (bp->req_line_speed == SPEED_2500) {
1569 if (!bnx2_test_and_enable_2g5(bp))
1570 force_link_down = 1;
1571 } else if (bp->req_line_speed == SPEED_1000) {
1572 if (bnx2_test_and_disable_2g5(bp))
1573 force_link_down = 1;
1575 bnx2_read_phy(bp, bp->mii_adv, &adv);
1576 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1578 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1579 new_bmcr = bmcr & ~BMCR_ANENABLE;
1580 new_bmcr |= BMCR_SPEED1000;
1582 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1583 if (bp->req_line_speed == SPEED_2500)
1584 bnx2_enable_forced_2g5(bp);
1585 else if (bp->req_line_speed == SPEED_1000) {
1586 bnx2_disable_forced_2g5(bp);
1587 new_bmcr &= ~0x2000;
1590 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1591 if (bp->req_line_speed == SPEED_2500)
1592 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1593 else
1594 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1597 if (bp->req_duplex == DUPLEX_FULL) {
1598 adv |= ADVERTISE_1000XFULL;
1599 new_bmcr |= BMCR_FULLDPLX;
1601 else {
1602 adv |= ADVERTISE_1000XHALF;
1603 new_bmcr &= ~BMCR_FULLDPLX;
1605 if ((new_bmcr != bmcr) || (force_link_down)) {
1606 /* Force a link down visible on the other side */
1607 if (bp->link_up) {
1608 bnx2_write_phy(bp, bp->mii_adv, adv &
1609 ~(ADVERTISE_1000XFULL |
1610 ADVERTISE_1000XHALF));
1611 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1612 BMCR_ANRESTART | BMCR_ANENABLE);
1614 bp->link_up = 0;
1615 netif_carrier_off(bp->dev);
1616 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1617 bnx2_report_link(bp);
1619 bnx2_write_phy(bp, bp->mii_adv, adv);
1620 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1621 } else {
1622 bnx2_resolve_flow_ctrl(bp);
1623 bnx2_set_mac_link(bp);
1625 return 0;
1628 bnx2_test_and_enable_2g5(bp);
1630 if (bp->advertising & ADVERTISED_1000baseT_Full)
1631 new_adv |= ADVERTISE_1000XFULL;
1633 new_adv |= bnx2_phy_get_pause_adv(bp);
1635 bnx2_read_phy(bp, bp->mii_adv, &adv);
1636 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1638 bp->serdes_an_pending = 0;
1639 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1640 /* Force a link down visible on the other side */
1641 if (bp->link_up) {
1642 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1643 spin_unlock_bh(&bp->phy_lock);
1644 msleep(20);
1645 spin_lock_bh(&bp->phy_lock);
1648 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1649 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1650 BMCR_ANENABLE);
1651 /* Speed up link-up time when the link partner
1652 * does not autonegotiate which is very common
1653 * in blade servers. Some blade servers use
1654 * IPMI for kerboard input and it's important
1655 * to minimize link disruptions. Autoneg. involves
1656 * exchanging base pages plus 3 next pages and
1657 * normally completes in about 120 msec.
1659 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1660 bp->serdes_an_pending = 1;
1661 mod_timer(&bp->timer, jiffies + bp->current_interval);
1662 } else {
1663 bnx2_resolve_flow_ctrl(bp);
1664 bnx2_set_mac_link(bp);
1667 return 0;
1670 #define ETHTOOL_ALL_FIBRE_SPEED \
1671 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1672 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1673 (ADVERTISED_1000baseT_Full)
1675 #define ETHTOOL_ALL_COPPER_SPEED \
1676 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1677 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1678 ADVERTISED_1000baseT_Full)
1680 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1681 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1683 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1685 static void
1686 bnx2_set_default_remote_link(struct bnx2 *bp)
1688 u32 link;
1690 if (bp->phy_port == PORT_TP)
1691 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1692 else
1693 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1695 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1696 bp->req_line_speed = 0;
1697 bp->autoneg |= AUTONEG_SPEED;
1698 bp->advertising = ADVERTISED_Autoneg;
1699 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1700 bp->advertising |= ADVERTISED_10baseT_Half;
1701 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1702 bp->advertising |= ADVERTISED_10baseT_Full;
1703 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1704 bp->advertising |= ADVERTISED_100baseT_Half;
1705 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1706 bp->advertising |= ADVERTISED_100baseT_Full;
1707 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1708 bp->advertising |= ADVERTISED_1000baseT_Full;
1709 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1710 bp->advertising |= ADVERTISED_2500baseX_Full;
1711 } else {
1712 bp->autoneg = 0;
1713 bp->advertising = 0;
1714 bp->req_duplex = DUPLEX_FULL;
1715 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1716 bp->req_line_speed = SPEED_10;
1717 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1718 bp->req_duplex = DUPLEX_HALF;
1720 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1721 bp->req_line_speed = SPEED_100;
1722 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1723 bp->req_duplex = DUPLEX_HALF;
1725 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1726 bp->req_line_speed = SPEED_1000;
1727 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1728 bp->req_line_speed = SPEED_2500;
1732 static void
1733 bnx2_set_default_link(struct bnx2 *bp)
1735 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1736 bnx2_set_default_remote_link(bp);
1737 return;
1740 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1741 bp->req_line_speed = 0;
1742 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1743 u32 reg;
1745 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1747 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1748 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1749 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1750 bp->autoneg = 0;
1751 bp->req_line_speed = bp->line_speed = SPEED_1000;
1752 bp->req_duplex = DUPLEX_FULL;
1754 } else
1755 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1758 static void
1759 bnx2_send_heart_beat(struct bnx2 *bp)
1761 u32 msg;
1762 u32 addr;
1764 spin_lock(&bp->indirect_lock);
1765 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1766 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1767 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1768 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1769 spin_unlock(&bp->indirect_lock);
1772 static void
1773 bnx2_remote_phy_event(struct bnx2 *bp)
1775 u32 msg;
1776 u8 link_up = bp->link_up;
1777 u8 old_port;
1779 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1781 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1782 bnx2_send_heart_beat(bp);
1784 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1786 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1787 bp->link_up = 0;
1788 else {
1789 u32 speed;
1791 bp->link_up = 1;
1792 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1793 bp->duplex = DUPLEX_FULL;
1794 switch (speed) {
1795 case BNX2_LINK_STATUS_10HALF:
1796 bp->duplex = DUPLEX_HALF;
1797 case BNX2_LINK_STATUS_10FULL:
1798 bp->line_speed = SPEED_10;
1799 break;
1800 case BNX2_LINK_STATUS_100HALF:
1801 bp->duplex = DUPLEX_HALF;
1802 case BNX2_LINK_STATUS_100BASE_T4:
1803 case BNX2_LINK_STATUS_100FULL:
1804 bp->line_speed = SPEED_100;
1805 break;
1806 case BNX2_LINK_STATUS_1000HALF:
1807 bp->duplex = DUPLEX_HALF;
1808 case BNX2_LINK_STATUS_1000FULL:
1809 bp->line_speed = SPEED_1000;
1810 break;
1811 case BNX2_LINK_STATUS_2500HALF:
1812 bp->duplex = DUPLEX_HALF;
1813 case BNX2_LINK_STATUS_2500FULL:
1814 bp->line_speed = SPEED_2500;
1815 break;
1816 default:
1817 bp->line_speed = 0;
1818 break;
1821 bp->flow_ctrl = 0;
1822 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1823 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1824 if (bp->duplex == DUPLEX_FULL)
1825 bp->flow_ctrl = bp->req_flow_ctrl;
1826 } else {
1827 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1828 bp->flow_ctrl |= FLOW_CTRL_TX;
1829 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1830 bp->flow_ctrl |= FLOW_CTRL_RX;
1833 old_port = bp->phy_port;
1834 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1835 bp->phy_port = PORT_FIBRE;
1836 else
1837 bp->phy_port = PORT_TP;
1839 if (old_port != bp->phy_port)
1840 bnx2_set_default_link(bp);
1843 if (bp->link_up != link_up)
1844 bnx2_report_link(bp);
1846 bnx2_set_mac_link(bp);
1849 static int
1850 bnx2_set_remote_link(struct bnx2 *bp)
1852 u32 evt_code;
1854 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1855 switch (evt_code) {
1856 case BNX2_FW_EVT_CODE_LINK_EVENT:
1857 bnx2_remote_phy_event(bp);
1858 break;
1859 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1860 default:
1861 bnx2_send_heart_beat(bp);
1862 break;
1864 return 0;
1867 static int
1868 bnx2_setup_copper_phy(struct bnx2 *bp)
1870 u32 bmcr;
1871 u32 new_bmcr;
1873 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1875 if (bp->autoneg & AUTONEG_SPEED) {
1876 u32 adv_reg, adv1000_reg;
1877 u32 new_adv_reg = 0;
1878 u32 new_adv1000_reg = 0;
1880 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1881 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1882 ADVERTISE_PAUSE_ASYM);
1884 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1885 adv1000_reg &= PHY_ALL_1000_SPEED;
1887 if (bp->advertising & ADVERTISED_10baseT_Half)
1888 new_adv_reg |= ADVERTISE_10HALF;
1889 if (bp->advertising & ADVERTISED_10baseT_Full)
1890 new_adv_reg |= ADVERTISE_10FULL;
1891 if (bp->advertising & ADVERTISED_100baseT_Half)
1892 new_adv_reg |= ADVERTISE_100HALF;
1893 if (bp->advertising & ADVERTISED_100baseT_Full)
1894 new_adv_reg |= ADVERTISE_100FULL;
1895 if (bp->advertising & ADVERTISED_1000baseT_Full)
1896 new_adv1000_reg |= ADVERTISE_1000FULL;
1898 new_adv_reg |= ADVERTISE_CSMA;
1900 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1902 if ((adv1000_reg != new_adv1000_reg) ||
1903 (adv_reg != new_adv_reg) ||
1904 ((bmcr & BMCR_ANENABLE) == 0)) {
1906 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1907 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1908 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1909 BMCR_ANENABLE);
1911 else if (bp->link_up) {
1912 /* Flow ctrl may have changed from auto to forced */
1913 /* or vice-versa. */
1915 bnx2_resolve_flow_ctrl(bp);
1916 bnx2_set_mac_link(bp);
1918 return 0;
1921 new_bmcr = 0;
1922 if (bp->req_line_speed == SPEED_100) {
1923 new_bmcr |= BMCR_SPEED100;
1925 if (bp->req_duplex == DUPLEX_FULL) {
1926 new_bmcr |= BMCR_FULLDPLX;
1928 if (new_bmcr != bmcr) {
1929 u32 bmsr;
1931 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1932 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1934 if (bmsr & BMSR_LSTATUS) {
1935 /* Force link down */
1936 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1937 spin_unlock_bh(&bp->phy_lock);
1938 msleep(50);
1939 spin_lock_bh(&bp->phy_lock);
1941 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1942 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1945 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1947 /* Normally, the new speed is setup after the link has
1948 * gone down and up again. In some cases, link will not go
1949 * down so we need to set up the new speed here.
1951 if (bmsr & BMSR_LSTATUS) {
1952 bp->line_speed = bp->req_line_speed;
1953 bp->duplex = bp->req_duplex;
1954 bnx2_resolve_flow_ctrl(bp);
1955 bnx2_set_mac_link(bp);
1957 } else {
1958 bnx2_resolve_flow_ctrl(bp);
1959 bnx2_set_mac_link(bp);
1961 return 0;
1964 static int
1965 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1967 if (bp->loopback == MAC_LOOPBACK)
1968 return 0;
1970 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1971 return (bnx2_setup_serdes_phy(bp, port));
1973 else {
1974 return (bnx2_setup_copper_phy(bp));
1978 static int
1979 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
1981 u32 val;
1983 bp->mii_bmcr = MII_BMCR + 0x10;
1984 bp->mii_bmsr = MII_BMSR + 0x10;
1985 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1986 bp->mii_adv = MII_ADVERTISE + 0x10;
1987 bp->mii_lpa = MII_LPA + 0x10;
1988 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1990 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1991 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1993 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1994 if (reset_phy)
1995 bnx2_reset_phy(bp);
1997 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1999 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2000 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2001 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2002 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2004 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2005 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2006 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2007 val |= BCM5708S_UP1_2G5;
2008 else
2009 val &= ~BCM5708S_UP1_2G5;
2010 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2012 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2013 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2014 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2015 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2017 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2019 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2020 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2021 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2023 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2025 return 0;
2028 static int
2029 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2031 u32 val;
2033 if (reset_phy)
2034 bnx2_reset_phy(bp);
2036 bp->mii_up1 = BCM5708S_UP1;
2038 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2039 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2040 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2042 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2043 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2044 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2046 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2047 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2048 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2050 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2051 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2052 val |= BCM5708S_UP1_2G5;
2053 bnx2_write_phy(bp, BCM5708S_UP1, val);
2056 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2057 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2058 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2059 /* increase tx signal amplitude */
2060 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2061 BCM5708S_BLK_ADDR_TX_MISC);
2062 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2063 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2064 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2065 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2068 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2069 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2071 if (val) {
2072 u32 is_backplane;
2074 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2075 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2076 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2077 BCM5708S_BLK_ADDR_TX_MISC);
2078 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2079 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2080 BCM5708S_BLK_ADDR_DIG);
2083 return 0;
2086 static int
2087 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2089 if (reset_phy)
2090 bnx2_reset_phy(bp);
2092 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2094 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2095 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2097 if (bp->dev->mtu > 1500) {
2098 u32 val;
2100 /* Set extended packet length bit */
2101 bnx2_write_phy(bp, 0x18, 0x7);
2102 bnx2_read_phy(bp, 0x18, &val);
2103 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2105 bnx2_write_phy(bp, 0x1c, 0x6c00);
2106 bnx2_read_phy(bp, 0x1c, &val);
2107 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2109 else {
2110 u32 val;
2112 bnx2_write_phy(bp, 0x18, 0x7);
2113 bnx2_read_phy(bp, 0x18, &val);
2114 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2116 bnx2_write_phy(bp, 0x1c, 0x6c00);
2117 bnx2_read_phy(bp, 0x1c, &val);
2118 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2121 return 0;
2124 static int
2125 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2127 u32 val;
2129 if (reset_phy)
2130 bnx2_reset_phy(bp);
2132 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2133 bnx2_write_phy(bp, 0x18, 0x0c00);
2134 bnx2_write_phy(bp, 0x17, 0x000a);
2135 bnx2_write_phy(bp, 0x15, 0x310b);
2136 bnx2_write_phy(bp, 0x17, 0x201f);
2137 bnx2_write_phy(bp, 0x15, 0x9506);
2138 bnx2_write_phy(bp, 0x17, 0x401f);
2139 bnx2_write_phy(bp, 0x15, 0x14e2);
2140 bnx2_write_phy(bp, 0x18, 0x0400);
2143 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2144 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2145 MII_BNX2_DSP_EXPAND_REG | 0x8);
2146 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2147 val &= ~(1 << 8);
2148 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2151 if (bp->dev->mtu > 1500) {
2152 /* Set extended packet length bit */
2153 bnx2_write_phy(bp, 0x18, 0x7);
2154 bnx2_read_phy(bp, 0x18, &val);
2155 bnx2_write_phy(bp, 0x18, val | 0x4000);
2157 bnx2_read_phy(bp, 0x10, &val);
2158 bnx2_write_phy(bp, 0x10, val | 0x1);
2160 else {
2161 bnx2_write_phy(bp, 0x18, 0x7);
2162 bnx2_read_phy(bp, 0x18, &val);
2163 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2165 bnx2_read_phy(bp, 0x10, &val);
2166 bnx2_write_phy(bp, 0x10, val & ~0x1);
2169 /* ethernet@wirespeed */
2170 bnx2_write_phy(bp, 0x18, 0x7007);
2171 bnx2_read_phy(bp, 0x18, &val);
2172 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2173 return 0;
2177 static int
2178 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2180 u32 val;
2181 int rc = 0;
2183 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2184 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2186 bp->mii_bmcr = MII_BMCR;
2187 bp->mii_bmsr = MII_BMSR;
2188 bp->mii_bmsr1 = MII_BMSR;
2189 bp->mii_adv = MII_ADVERTISE;
2190 bp->mii_lpa = MII_LPA;
2192 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2194 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2195 goto setup_phy;
2197 bnx2_read_phy(bp, MII_PHYSID1, &val);
2198 bp->phy_id = val << 16;
2199 bnx2_read_phy(bp, MII_PHYSID2, &val);
2200 bp->phy_id |= val & 0xffff;
2202 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2203 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2204 rc = bnx2_init_5706s_phy(bp, reset_phy);
2205 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2206 rc = bnx2_init_5708s_phy(bp, reset_phy);
2207 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2208 rc = bnx2_init_5709s_phy(bp, reset_phy);
2210 else {
2211 rc = bnx2_init_copper_phy(bp, reset_phy);
2214 setup_phy:
2215 if (!rc)
2216 rc = bnx2_setup_phy(bp, bp->phy_port);
2218 return rc;
2221 static int
2222 bnx2_set_mac_loopback(struct bnx2 *bp)
2224 u32 mac_mode;
2226 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2227 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2228 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2229 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2230 bp->link_up = 1;
2231 return 0;
2234 static int bnx2_test_link(struct bnx2 *);
2236 static int
2237 bnx2_set_phy_loopback(struct bnx2 *bp)
2239 u32 mac_mode;
2240 int rc, i;
2242 spin_lock_bh(&bp->phy_lock);
2243 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2244 BMCR_SPEED1000);
2245 spin_unlock_bh(&bp->phy_lock);
2246 if (rc)
2247 return rc;
2249 for (i = 0; i < 10; i++) {
2250 if (bnx2_test_link(bp) == 0)
2251 break;
2252 msleep(100);
2255 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2256 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2257 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2258 BNX2_EMAC_MODE_25G_MODE);
2260 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2261 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2262 bp->link_up = 1;
2263 return 0;
2266 static int
2267 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2269 int i;
2270 u32 val;
2272 bp->fw_wr_seq++;
2273 msg_data |= bp->fw_wr_seq;
2275 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2277 if (!ack)
2278 return 0;
2280 /* wait for an acknowledgement. */
2281 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2282 msleep(10);
2284 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2286 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2287 break;
2289 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2290 return 0;
2292 /* If we timed out, inform the firmware that this is the case. */
2293 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2294 if (!silent)
2295 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2296 "%x\n", msg_data);
2298 msg_data &= ~BNX2_DRV_MSG_CODE;
2299 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2301 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2303 return -EBUSY;
2306 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2307 return -EIO;
2309 return 0;
2312 static int
2313 bnx2_init_5709_context(struct bnx2 *bp)
2315 int i, ret = 0;
2316 u32 val;
2318 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2319 val |= (BCM_PAGE_BITS - 8) << 16;
2320 REG_WR(bp, BNX2_CTX_COMMAND, val);
2321 for (i = 0; i < 10; i++) {
2322 val = REG_RD(bp, BNX2_CTX_COMMAND);
2323 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2324 break;
2325 udelay(2);
2327 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2328 return -EBUSY;
2330 for (i = 0; i < bp->ctx_pages; i++) {
2331 int j;
2333 if (bp->ctx_blk[i])
2334 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2335 else
2336 return -ENOMEM;
2338 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2339 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2340 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2341 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2342 (u64) bp->ctx_blk_mapping[i] >> 32);
2343 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2344 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2345 for (j = 0; j < 10; j++) {
2347 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2348 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2349 break;
2350 udelay(5);
2352 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2353 ret = -EBUSY;
2354 break;
2357 return ret;
2360 static void
2361 bnx2_init_context(struct bnx2 *bp)
2363 u32 vcid;
2365 vcid = 96;
2366 while (vcid) {
2367 u32 vcid_addr, pcid_addr, offset;
2368 int i;
2370 vcid--;
2372 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2373 u32 new_vcid;
2375 vcid_addr = GET_PCID_ADDR(vcid);
2376 if (vcid & 0x8) {
2377 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2379 else {
2380 new_vcid = vcid;
2382 pcid_addr = GET_PCID_ADDR(new_vcid);
2384 else {
2385 vcid_addr = GET_CID_ADDR(vcid);
2386 pcid_addr = vcid_addr;
2389 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2390 vcid_addr += (i << PHY_CTX_SHIFT);
2391 pcid_addr += (i << PHY_CTX_SHIFT);
2393 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2394 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2396 /* Zero out the context. */
2397 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2398 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2403 static int
2404 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2406 u16 *good_mbuf;
2407 u32 good_mbuf_cnt;
2408 u32 val;
2410 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2411 if (good_mbuf == NULL) {
2412 printk(KERN_ERR PFX "Failed to allocate memory in "
2413 "bnx2_alloc_bad_rbuf\n");
2414 return -ENOMEM;
2417 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2418 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2420 good_mbuf_cnt = 0;
2422 /* Allocate a bunch of mbufs and save the good ones in an array. */
2423 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2424 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2425 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2426 BNX2_RBUF_COMMAND_ALLOC_REQ);
2428 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2430 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2432 /* The addresses with Bit 9 set are bad memory blocks. */
2433 if (!(val & (1 << 9))) {
2434 good_mbuf[good_mbuf_cnt] = (u16) val;
2435 good_mbuf_cnt++;
2438 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2441 /* Free the good ones back to the mbuf pool thus discarding
2442 * all the bad ones. */
2443 while (good_mbuf_cnt) {
2444 good_mbuf_cnt--;
2446 val = good_mbuf[good_mbuf_cnt];
2447 val = (val << 9) | val | 1;
2449 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2451 kfree(good_mbuf);
2452 return 0;
2455 static void
2456 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2458 u32 val;
2460 val = (mac_addr[0] << 8) | mac_addr[1];
2462 REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2464 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2465 (mac_addr[4] << 8) | mac_addr[5];
2467 REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2470 static inline int
2471 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2473 dma_addr_t mapping;
2474 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2475 struct rx_bd *rxbd =
2476 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2477 struct page *page = alloc_page(GFP_ATOMIC);
2479 if (!page)
2480 return -ENOMEM;
2481 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2482 PCI_DMA_FROMDEVICE);
2483 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2484 __free_page(page);
2485 return -EIO;
2488 rx_pg->page = page;
2489 pci_unmap_addr_set(rx_pg, mapping, mapping);
2490 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2491 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2492 return 0;
2495 static void
2496 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2498 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2499 struct page *page = rx_pg->page;
2501 if (!page)
2502 return;
2504 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2505 PCI_DMA_FROMDEVICE);
2507 __free_page(page);
2508 rx_pg->page = NULL;
2511 static inline int
2512 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2514 struct sk_buff *skb;
2515 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2516 dma_addr_t mapping;
2517 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2518 unsigned long align;
2520 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2521 if (skb == NULL) {
2522 return -ENOMEM;
2525 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2526 skb_reserve(skb, BNX2_RX_ALIGN - align);
2528 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2529 PCI_DMA_FROMDEVICE);
2530 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2531 dev_kfree_skb(skb);
2532 return -EIO;
2535 rx_buf->skb = skb;
2536 pci_unmap_addr_set(rx_buf, mapping, mapping);
2538 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2539 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2541 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2543 return 0;
2546 static int
2547 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2549 struct status_block *sblk = bnapi->status_blk.msi;
2550 u32 new_link_state, old_link_state;
2551 int is_set = 1;
2553 new_link_state = sblk->status_attn_bits & event;
2554 old_link_state = sblk->status_attn_bits_ack & event;
2555 if (new_link_state != old_link_state) {
2556 if (new_link_state)
2557 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2558 else
2559 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2560 } else
2561 is_set = 0;
2563 return is_set;
2566 static void
2567 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2569 spin_lock(&bp->phy_lock);
2571 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2572 bnx2_set_link(bp);
2573 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2574 bnx2_set_remote_link(bp);
2576 spin_unlock(&bp->phy_lock);
2580 static inline u16
2581 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2583 u16 cons;
2585 /* Tell compiler that status block fields can change. */
2586 barrier();
2587 cons = *bnapi->hw_tx_cons_ptr;
2588 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2589 cons++;
2590 return cons;
2593 static int
2594 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2596 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2597 u16 hw_cons, sw_cons, sw_ring_cons;
2598 int tx_pkt = 0, index;
2599 struct netdev_queue *txq;
2601 index = (bnapi - bp->bnx2_napi);
2602 txq = netdev_get_tx_queue(bp->dev, index);
2604 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2605 sw_cons = txr->tx_cons;
2607 while (sw_cons != hw_cons) {
2608 struct sw_tx_bd *tx_buf;
2609 struct sk_buff *skb;
2610 int i, last;
2612 sw_ring_cons = TX_RING_IDX(sw_cons);
2614 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2615 skb = tx_buf->skb;
2617 /* partial BD completions possible with TSO packets */
2618 if (skb_is_gso(skb)) {
2619 u16 last_idx, last_ring_idx;
2621 last_idx = sw_cons +
2622 skb_shinfo(skb)->nr_frags + 1;
2623 last_ring_idx = sw_ring_cons +
2624 skb_shinfo(skb)->nr_frags + 1;
2625 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2626 last_idx++;
2628 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2629 break;
2633 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
2635 tx_buf->skb = NULL;
2636 last = skb_shinfo(skb)->nr_frags;
2638 for (i = 0; i < last; i++) {
2639 sw_cons = NEXT_TX_BD(sw_cons);
2642 sw_cons = NEXT_TX_BD(sw_cons);
2644 dev_kfree_skb(skb);
2645 tx_pkt++;
2646 if (tx_pkt == budget)
2647 break;
2649 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2652 txr->hw_tx_cons = hw_cons;
2653 txr->tx_cons = sw_cons;
2655 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2656 * before checking for netif_tx_queue_stopped(). Without the
2657 * memory barrier, there is a small possibility that bnx2_start_xmit()
2658 * will miss it and cause the queue to be stopped forever.
2660 smp_mb();
2662 if (unlikely(netif_tx_queue_stopped(txq)) &&
2663 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2664 __netif_tx_lock(txq, smp_processor_id());
2665 if ((netif_tx_queue_stopped(txq)) &&
2666 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2667 netif_tx_wake_queue(txq);
2668 __netif_tx_unlock(txq);
2671 return tx_pkt;
2674 static void
2675 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2676 struct sk_buff *skb, int count)
2678 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2679 struct rx_bd *cons_bd, *prod_bd;
2680 int i;
2681 u16 hw_prod, prod;
2682 u16 cons = rxr->rx_pg_cons;
2684 cons_rx_pg = &rxr->rx_pg_ring[cons];
2686 /* The caller was unable to allocate a new page to replace the
2687 * last one in the frags array, so we need to recycle that page
2688 * and then free the skb.
2690 if (skb) {
2691 struct page *page;
2692 struct skb_shared_info *shinfo;
2694 shinfo = skb_shinfo(skb);
2695 shinfo->nr_frags--;
2696 page = shinfo->frags[shinfo->nr_frags].page;
2697 shinfo->frags[shinfo->nr_frags].page = NULL;
2699 cons_rx_pg->page = page;
2700 dev_kfree_skb(skb);
2703 hw_prod = rxr->rx_pg_prod;
2705 for (i = 0; i < count; i++) {
2706 prod = RX_PG_RING_IDX(hw_prod);
2708 prod_rx_pg = &rxr->rx_pg_ring[prod];
2709 cons_rx_pg = &rxr->rx_pg_ring[cons];
2710 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2711 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2713 if (prod != cons) {
2714 prod_rx_pg->page = cons_rx_pg->page;
2715 cons_rx_pg->page = NULL;
2716 pci_unmap_addr_set(prod_rx_pg, mapping,
2717 pci_unmap_addr(cons_rx_pg, mapping));
2719 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2720 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2723 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2724 hw_prod = NEXT_RX_BD(hw_prod);
2726 rxr->rx_pg_prod = hw_prod;
2727 rxr->rx_pg_cons = cons;
2730 static inline void
2731 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2732 struct sk_buff *skb, u16 cons, u16 prod)
2734 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2735 struct rx_bd *cons_bd, *prod_bd;
2737 cons_rx_buf = &rxr->rx_buf_ring[cons];
2738 prod_rx_buf = &rxr->rx_buf_ring[prod];
2740 pci_dma_sync_single_for_device(bp->pdev,
2741 pci_unmap_addr(cons_rx_buf, mapping),
2742 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2744 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2746 prod_rx_buf->skb = skb;
2748 if (cons == prod)
2749 return;
2751 pci_unmap_addr_set(prod_rx_buf, mapping,
2752 pci_unmap_addr(cons_rx_buf, mapping));
2754 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2755 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2756 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2757 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2760 static int
2761 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2762 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2763 u32 ring_idx)
2765 int err;
2766 u16 prod = ring_idx & 0xffff;
2768 err = bnx2_alloc_rx_skb(bp, rxr, prod);
2769 if (unlikely(err)) {
2770 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2771 if (hdr_len) {
2772 unsigned int raw_len = len + 4;
2773 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2775 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2777 return err;
2780 skb_reserve(skb, BNX2_RX_OFFSET);
2781 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2782 PCI_DMA_FROMDEVICE);
2784 if (hdr_len == 0) {
2785 skb_put(skb, len);
2786 return 0;
2787 } else {
2788 unsigned int i, frag_len, frag_size, pages;
2789 struct sw_pg *rx_pg;
2790 u16 pg_cons = rxr->rx_pg_cons;
2791 u16 pg_prod = rxr->rx_pg_prod;
2793 frag_size = len + 4 - hdr_len;
2794 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2795 skb_put(skb, hdr_len);
2797 for (i = 0; i < pages; i++) {
2798 dma_addr_t mapping_old;
2800 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2801 if (unlikely(frag_len <= 4)) {
2802 unsigned int tail = 4 - frag_len;
2804 rxr->rx_pg_cons = pg_cons;
2805 rxr->rx_pg_prod = pg_prod;
2806 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2807 pages - i);
2808 skb->len -= tail;
2809 if (i == 0) {
2810 skb->tail -= tail;
2811 } else {
2812 skb_frag_t *frag =
2813 &skb_shinfo(skb)->frags[i - 1];
2814 frag->size -= tail;
2815 skb->data_len -= tail;
2816 skb->truesize -= tail;
2818 return 0;
2820 rx_pg = &rxr->rx_pg_ring[pg_cons];
2822 /* Don't unmap yet. If we're unable to allocate a new
2823 * page, we need to recycle the page and the DMA addr.
2825 mapping_old = pci_unmap_addr(rx_pg, mapping);
2826 if (i == pages - 1)
2827 frag_len -= 4;
2829 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2830 rx_pg->page = NULL;
2832 err = bnx2_alloc_rx_page(bp, rxr,
2833 RX_PG_RING_IDX(pg_prod));
2834 if (unlikely(err)) {
2835 rxr->rx_pg_cons = pg_cons;
2836 rxr->rx_pg_prod = pg_prod;
2837 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
2838 pages - i);
2839 return err;
2842 pci_unmap_page(bp->pdev, mapping_old,
2843 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2845 frag_size -= frag_len;
2846 skb->data_len += frag_len;
2847 skb->truesize += frag_len;
2848 skb->len += frag_len;
2850 pg_prod = NEXT_RX_BD(pg_prod);
2851 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2853 rxr->rx_pg_prod = pg_prod;
2854 rxr->rx_pg_cons = pg_cons;
2856 return 0;
2859 static inline u16
2860 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2862 u16 cons;
2864 /* Tell compiler that status block fields can change. */
2865 barrier();
2866 cons = *bnapi->hw_rx_cons_ptr;
2867 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2868 cons++;
2869 return cons;
2872 static int
2873 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2875 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
2876 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2877 struct l2_fhdr *rx_hdr;
2878 int rx_pkt = 0, pg_ring_used = 0;
2880 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2881 sw_cons = rxr->rx_cons;
2882 sw_prod = rxr->rx_prod;
2884 /* Memory barrier necessary as speculative reads of the rx
2885 * buffer can be ahead of the index in the status block
2887 rmb();
2888 while (sw_cons != hw_cons) {
2889 unsigned int len, hdr_len;
2890 u32 status;
2891 struct sw_bd *rx_buf;
2892 struct sk_buff *skb;
2893 dma_addr_t dma_addr;
2894 u16 vtag = 0;
2895 int hw_vlan __maybe_unused = 0;
2897 sw_ring_cons = RX_RING_IDX(sw_cons);
2898 sw_ring_prod = RX_RING_IDX(sw_prod);
2900 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
2901 skb = rx_buf->skb;
2903 rx_buf->skb = NULL;
2905 dma_addr = pci_unmap_addr(rx_buf, mapping);
2907 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2908 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2909 PCI_DMA_FROMDEVICE);
2911 rx_hdr = (struct l2_fhdr *) skb->data;
2912 len = rx_hdr->l2_fhdr_pkt_len;
2914 if ((status = rx_hdr->l2_fhdr_status) &
2915 (L2_FHDR_ERRORS_BAD_CRC |
2916 L2_FHDR_ERRORS_PHY_DECODE |
2917 L2_FHDR_ERRORS_ALIGNMENT |
2918 L2_FHDR_ERRORS_TOO_SHORT |
2919 L2_FHDR_ERRORS_GIANT_FRAME)) {
2921 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2922 sw_ring_prod);
2923 goto next_rx;
2925 hdr_len = 0;
2926 if (status & L2_FHDR_STATUS_SPLIT) {
2927 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2928 pg_ring_used = 1;
2929 } else if (len > bp->rx_jumbo_thresh) {
2930 hdr_len = bp->rx_jumbo_thresh;
2931 pg_ring_used = 1;
2934 len -= 4;
2936 if (len <= bp->rx_copy_thresh) {
2937 struct sk_buff *new_skb;
2939 new_skb = netdev_alloc_skb(bp->dev, len + 6);
2940 if (new_skb == NULL) {
2941 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2942 sw_ring_prod);
2943 goto next_rx;
2946 /* aligned copy */
2947 skb_copy_from_linear_data_offset(skb,
2948 BNX2_RX_OFFSET - 6,
2949 new_skb->data, len + 6);
2950 skb_reserve(new_skb, 6);
2951 skb_put(new_skb, len);
2953 bnx2_reuse_rx_skb(bp, rxr, skb,
2954 sw_ring_cons, sw_ring_prod);
2956 skb = new_skb;
2957 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
2958 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2959 goto next_rx;
2961 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
2962 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
2963 vtag = rx_hdr->l2_fhdr_vlan_tag;
2964 #ifdef BCM_VLAN
2965 if (bp->vlgrp)
2966 hw_vlan = 1;
2967 else
2968 #endif
2970 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
2971 __skb_push(skb, 4);
2973 memmove(ve, skb->data + 4, ETH_ALEN * 2);
2974 ve->h_vlan_proto = htons(ETH_P_8021Q);
2975 ve->h_vlan_TCI = htons(vtag);
2976 len += 4;
2980 skb->protocol = eth_type_trans(skb, bp->dev);
2982 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2983 (ntohs(skb->protocol) != 0x8100)) {
2985 dev_kfree_skb(skb);
2986 goto next_rx;
2990 skb->ip_summed = CHECKSUM_NONE;
2991 if (bp->rx_csum &&
2992 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2993 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2995 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2996 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2997 skb->ip_summed = CHECKSUM_UNNECESSARY;
3000 #ifdef BCM_VLAN
3001 if (hw_vlan)
3002 vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
3003 else
3004 #endif
3005 netif_receive_skb(skb);
3007 rx_pkt++;
3009 next_rx:
3010 sw_cons = NEXT_RX_BD(sw_cons);
3011 sw_prod = NEXT_RX_BD(sw_prod);
3013 if ((rx_pkt == budget))
3014 break;
3016 /* Refresh hw_cons to see if there is new work */
3017 if (sw_cons == hw_cons) {
3018 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3019 rmb();
3022 rxr->rx_cons = sw_cons;
3023 rxr->rx_prod = sw_prod;
3025 if (pg_ring_used)
3026 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3028 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3030 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3032 mmiowb();
3034 return rx_pkt;
3038 /* MSI ISR - The only difference between this and the INTx ISR
3039 * is that the MSI interrupt is always serviced.
3041 static irqreturn_t
3042 bnx2_msi(int irq, void *dev_instance)
3044 struct bnx2_napi *bnapi = dev_instance;
3045 struct bnx2 *bp = bnapi->bp;
3046 struct net_device *dev = bp->dev;
3048 prefetch(bnapi->status_blk.msi);
3049 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3050 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3051 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3053 /* Return here if interrupt is disabled. */
3054 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3055 return IRQ_HANDLED;
3057 netif_rx_schedule(dev, &bnapi->napi);
3059 return IRQ_HANDLED;
3062 static irqreturn_t
3063 bnx2_msi_1shot(int irq, void *dev_instance)
3065 struct bnx2_napi *bnapi = dev_instance;
3066 struct bnx2 *bp = bnapi->bp;
3067 struct net_device *dev = bp->dev;
3069 prefetch(bnapi->status_blk.msi);
3071 /* Return here if interrupt is disabled. */
3072 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3073 return IRQ_HANDLED;
3075 netif_rx_schedule(dev, &bnapi->napi);
3077 return IRQ_HANDLED;
3080 static irqreturn_t
3081 bnx2_interrupt(int irq, void *dev_instance)
3083 struct bnx2_napi *bnapi = dev_instance;
3084 struct bnx2 *bp = bnapi->bp;
3085 struct net_device *dev = bp->dev;
3086 struct status_block *sblk = bnapi->status_blk.msi;
3088 /* When using INTx, it is possible for the interrupt to arrive
3089 * at the CPU before the status block posted prior to the
3090 * interrupt. Reading a register will flush the status block.
3091 * When using MSI, the MSI message will always complete after
3092 * the status block write.
3094 if ((sblk->status_idx == bnapi->last_status_idx) &&
3095 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3096 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3097 return IRQ_NONE;
3099 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3100 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3101 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3103 /* Read back to deassert IRQ immediately to avoid too many
3104 * spurious interrupts.
3106 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3108 /* Return here if interrupt is shared and is disabled. */
3109 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3110 return IRQ_HANDLED;
3112 if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
3113 bnapi->last_status_idx = sblk->status_idx;
3114 __netif_rx_schedule(dev, &bnapi->napi);
3117 return IRQ_HANDLED;
3120 static inline int
3121 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3123 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3124 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3126 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3127 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3128 return 1;
3129 return 0;
3132 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3133 STATUS_ATTN_BITS_TIMER_ABORT)
3135 static inline int
3136 bnx2_has_work(struct bnx2_napi *bnapi)
3138 struct status_block *sblk = bnapi->status_blk.msi;
3140 if (bnx2_has_fast_work(bnapi))
3141 return 1;
3143 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3144 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3145 return 1;
3147 return 0;
3150 static void
3151 bnx2_chk_missed_msi(struct bnx2 *bp)
3153 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3154 u32 msi_ctrl;
3156 if (bnx2_has_work(bnapi)) {
3157 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3158 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3159 return;
3161 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3162 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3163 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3164 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3165 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3169 bp->idle_chk_status_idx = bnapi->last_status_idx;
3172 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3174 struct status_block *sblk = bnapi->status_blk.msi;
3175 u32 status_attn_bits = sblk->status_attn_bits;
3176 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3178 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3179 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3181 bnx2_phy_int(bp, bnapi);
3183 /* This is needed to take care of transient status
3184 * during link changes.
3186 REG_WR(bp, BNX2_HC_COMMAND,
3187 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3188 REG_RD(bp, BNX2_HC_COMMAND);
3192 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3193 int work_done, int budget)
3195 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3196 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3198 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3199 bnx2_tx_int(bp, bnapi, 0);
3201 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3202 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3204 return work_done;
3207 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3209 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3210 struct bnx2 *bp = bnapi->bp;
3211 int work_done = 0;
3212 struct status_block_msix *sblk = bnapi->status_blk.msix;
3214 while (1) {
3215 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3216 if (unlikely(work_done >= budget))
3217 break;
3219 bnapi->last_status_idx = sblk->status_idx;
3220 /* status idx must be read before checking for more work. */
3221 rmb();
3222 if (likely(!bnx2_has_fast_work(bnapi))) {
3224 netif_rx_complete(bp->dev, napi);
3225 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3226 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3227 bnapi->last_status_idx);
3228 break;
3231 return work_done;
3234 static int bnx2_poll(struct napi_struct *napi, int budget)
3236 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3237 struct bnx2 *bp = bnapi->bp;
3238 int work_done = 0;
3239 struct status_block *sblk = bnapi->status_blk.msi;
3241 while (1) {
3242 bnx2_poll_link(bp, bnapi);
3244 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3246 /* bnapi->last_status_idx is used below to tell the hw how
3247 * much work has been processed, so we must read it before
3248 * checking for more work.
3250 bnapi->last_status_idx = sblk->status_idx;
3252 if (unlikely(work_done >= budget))
3253 break;
3255 rmb();
3256 if (likely(!bnx2_has_work(bnapi))) {
3257 netif_rx_complete(bp->dev, napi);
3258 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3259 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3260 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3261 bnapi->last_status_idx);
3262 break;
3264 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3265 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3266 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3267 bnapi->last_status_idx);
3269 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3270 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3271 bnapi->last_status_idx);
3272 break;
3276 return work_done;
3279 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3280 * from set_multicast.
3282 static void
3283 bnx2_set_rx_mode(struct net_device *dev)
3285 struct bnx2 *bp = netdev_priv(dev);
3286 u32 rx_mode, sort_mode;
3287 struct dev_addr_list *uc_ptr;
3288 int i;
3290 if (!netif_running(dev))
3291 return;
3293 spin_lock_bh(&bp->phy_lock);
3295 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3296 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3297 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3298 #ifdef BCM_VLAN
3299 if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3300 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3301 #else
3302 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3303 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3304 #endif
3305 if (dev->flags & IFF_PROMISC) {
3306 /* Promiscuous mode. */
3307 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3308 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3309 BNX2_RPM_SORT_USER0_PROM_VLAN;
3311 else if (dev->flags & IFF_ALLMULTI) {
3312 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3313 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3314 0xffffffff);
3316 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3318 else {
3319 /* Accept one or more multicast(s). */
3320 struct dev_mc_list *mclist;
3321 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3322 u32 regidx;
3323 u32 bit;
3324 u32 crc;
3326 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3328 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3329 i++, mclist = mclist->next) {
3331 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3332 bit = crc & 0xff;
3333 regidx = (bit & 0xe0) >> 5;
3334 bit &= 0x1f;
3335 mc_filter[regidx] |= (1 << bit);
3338 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3339 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3340 mc_filter[i]);
3343 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3346 uc_ptr = NULL;
3347 if (dev->uc_count > BNX2_MAX_UNICAST_ADDRESSES) {
3348 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3349 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3350 BNX2_RPM_SORT_USER0_PROM_VLAN;
3351 } else if (!(dev->flags & IFF_PROMISC)) {
3352 uc_ptr = dev->uc_list;
3354 /* Add all entries into to the match filter list */
3355 for (i = 0; i < dev->uc_count; i++) {
3356 bnx2_set_mac_addr(bp, uc_ptr->da_addr,
3357 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3358 sort_mode |= (1 <<
3359 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3360 uc_ptr = uc_ptr->next;
3365 if (rx_mode != bp->rx_mode) {
3366 bp->rx_mode = rx_mode;
3367 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3370 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3371 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3372 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3374 spin_unlock_bh(&bp->phy_lock);
3377 static void
3378 load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3379 u32 rv2p_proc)
3381 int i;
3382 u32 val;
3384 if (rv2p_proc == RV2P_PROC2 && CHIP_NUM(bp) == CHIP_NUM_5709) {
3385 val = le32_to_cpu(rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC]);
3386 val &= ~XI_RV2P_PROC2_BD_PAGE_SIZE_MSK;
3387 val |= XI_RV2P_PROC2_BD_PAGE_SIZE;
3388 rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC] = cpu_to_le32(val);
3391 for (i = 0; i < rv2p_code_len; i += 8) {
3392 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
3393 rv2p_code++;
3394 REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
3395 rv2p_code++;
3397 if (rv2p_proc == RV2P_PROC1) {
3398 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3399 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3401 else {
3402 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3403 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3407 /* Reset the processor, un-stall is done later. */
3408 if (rv2p_proc == RV2P_PROC1) {
3409 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3411 else {
3412 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3416 static int
3417 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, struct fw_info *fw)
3419 u32 offset;
3420 u32 val;
3421 int rc;
3423 /* Halt the CPU. */
3424 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3425 val |= cpu_reg->mode_value_halt;
3426 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3427 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3429 /* Load the Text area. */
3430 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3431 if (fw->gz_text) {
3432 int j;
3434 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3435 fw->gz_text_len);
3436 if (rc < 0)
3437 return rc;
3439 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3440 bnx2_reg_wr_ind(bp, offset, le32_to_cpu(fw->text[j]));
3444 /* Load the Data area. */
3445 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3446 if (fw->data) {
3447 int j;
3449 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3450 bnx2_reg_wr_ind(bp, offset, fw->data[j]);
3454 /* Load the SBSS area. */
3455 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3456 if (fw->sbss_len) {
3457 int j;
3459 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3460 bnx2_reg_wr_ind(bp, offset, 0);
3464 /* Load the BSS area. */
3465 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3466 if (fw->bss_len) {
3467 int j;
3469 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3470 bnx2_reg_wr_ind(bp, offset, 0);
3474 /* Load the Read-Only area. */
3475 offset = cpu_reg->spad_base +
3476 (fw->rodata_addr - cpu_reg->mips_view_base);
3477 if (fw->rodata) {
3478 int j;
3480 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3481 bnx2_reg_wr_ind(bp, offset, fw->rodata[j]);
3485 /* Clear the pre-fetch instruction. */
3486 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3487 bnx2_reg_wr_ind(bp, cpu_reg->pc, fw->start_addr);
3489 /* Start the CPU. */
3490 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3491 val &= ~cpu_reg->mode_value_halt;
3492 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3493 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3495 return 0;
3498 static int
3499 bnx2_init_cpus(struct bnx2 *bp)
3501 struct fw_info *fw;
3502 int rc, rv2p_len;
3503 void *text, *rv2p;
3505 /* Initialize the RV2P processor. */
3506 text = vmalloc(FW_BUF_SIZE);
3507 if (!text)
3508 return -ENOMEM;
3509 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3510 rv2p = bnx2_xi_rv2p_proc1;
3511 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3512 } else {
3513 rv2p = bnx2_rv2p_proc1;
3514 rv2p_len = sizeof(bnx2_rv2p_proc1);
3516 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3517 if (rc < 0)
3518 goto init_cpu_err;
3520 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3522 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3523 rv2p = bnx2_xi_rv2p_proc2;
3524 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3525 } else {
3526 rv2p = bnx2_rv2p_proc2;
3527 rv2p_len = sizeof(bnx2_rv2p_proc2);
3529 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3530 if (rc < 0)
3531 goto init_cpu_err;
3533 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3535 /* Initialize the RX Processor. */
3536 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3537 fw = &bnx2_rxp_fw_09;
3538 else
3539 fw = &bnx2_rxp_fw_06;
3541 fw->text = text;
3542 rc = load_cpu_fw(bp, &cpu_reg_rxp, fw);
3543 if (rc)
3544 goto init_cpu_err;
3546 /* Initialize the TX Processor. */
3547 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3548 fw = &bnx2_txp_fw_09;
3549 else
3550 fw = &bnx2_txp_fw_06;
3552 fw->text = text;
3553 rc = load_cpu_fw(bp, &cpu_reg_txp, fw);
3554 if (rc)
3555 goto init_cpu_err;
3557 /* Initialize the TX Patch-up Processor. */
3558 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3559 fw = &bnx2_tpat_fw_09;
3560 else
3561 fw = &bnx2_tpat_fw_06;
3563 fw->text = text;
3564 rc = load_cpu_fw(bp, &cpu_reg_tpat, fw);
3565 if (rc)
3566 goto init_cpu_err;
3568 /* Initialize the Completion Processor. */
3569 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3570 fw = &bnx2_com_fw_09;
3571 else
3572 fw = &bnx2_com_fw_06;
3574 fw->text = text;
3575 rc = load_cpu_fw(bp, &cpu_reg_com, fw);
3576 if (rc)
3577 goto init_cpu_err;
3579 /* Initialize the Command Processor. */
3580 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3581 fw = &bnx2_cp_fw_09;
3582 else
3583 fw = &bnx2_cp_fw_06;
3585 fw->text = text;
3586 rc = load_cpu_fw(bp, &cpu_reg_cp, fw);
3588 init_cpu_err:
3589 vfree(text);
3590 return rc;
3593 static int
3594 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3596 u16 pmcsr;
3598 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3600 switch (state) {
3601 case PCI_D0: {
3602 u32 val;
3604 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3605 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3606 PCI_PM_CTRL_PME_STATUS);
3608 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3609 /* delay required during transition out of D3hot */
3610 msleep(20);
3612 val = REG_RD(bp, BNX2_EMAC_MODE);
3613 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3614 val &= ~BNX2_EMAC_MODE_MPKT;
3615 REG_WR(bp, BNX2_EMAC_MODE, val);
3617 val = REG_RD(bp, BNX2_RPM_CONFIG);
3618 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3619 REG_WR(bp, BNX2_RPM_CONFIG, val);
3620 break;
3622 case PCI_D3hot: {
3623 int i;
3624 u32 val, wol_msg;
3626 if (bp->wol) {
3627 u32 advertising;
3628 u8 autoneg;
3630 autoneg = bp->autoneg;
3631 advertising = bp->advertising;
3633 if (bp->phy_port == PORT_TP) {
3634 bp->autoneg = AUTONEG_SPEED;
3635 bp->advertising = ADVERTISED_10baseT_Half |
3636 ADVERTISED_10baseT_Full |
3637 ADVERTISED_100baseT_Half |
3638 ADVERTISED_100baseT_Full |
3639 ADVERTISED_Autoneg;
3642 spin_lock_bh(&bp->phy_lock);
3643 bnx2_setup_phy(bp, bp->phy_port);
3644 spin_unlock_bh(&bp->phy_lock);
3646 bp->autoneg = autoneg;
3647 bp->advertising = advertising;
3649 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3651 val = REG_RD(bp, BNX2_EMAC_MODE);
3653 /* Enable port mode. */
3654 val &= ~BNX2_EMAC_MODE_PORT;
3655 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3656 BNX2_EMAC_MODE_ACPI_RCVD |
3657 BNX2_EMAC_MODE_MPKT;
3658 if (bp->phy_port == PORT_TP)
3659 val |= BNX2_EMAC_MODE_PORT_MII;
3660 else {
3661 val |= BNX2_EMAC_MODE_PORT_GMII;
3662 if (bp->line_speed == SPEED_2500)
3663 val |= BNX2_EMAC_MODE_25G_MODE;
3666 REG_WR(bp, BNX2_EMAC_MODE, val);
3668 /* receive all multicast */
3669 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3670 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3671 0xffffffff);
3673 REG_WR(bp, BNX2_EMAC_RX_MODE,
3674 BNX2_EMAC_RX_MODE_SORT_MODE);
3676 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3677 BNX2_RPM_SORT_USER0_MC_EN;
3678 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3679 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3680 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3681 BNX2_RPM_SORT_USER0_ENA);
3683 /* Need to enable EMAC and RPM for WOL. */
3684 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3685 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3686 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3687 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3689 val = REG_RD(bp, BNX2_RPM_CONFIG);
3690 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3691 REG_WR(bp, BNX2_RPM_CONFIG, val);
3693 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3695 else {
3696 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3699 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3700 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3701 1, 0);
3703 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3704 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3705 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3707 if (bp->wol)
3708 pmcsr |= 3;
3710 else {
3711 pmcsr |= 3;
3713 if (bp->wol) {
3714 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3716 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3717 pmcsr);
3719 /* No more memory access after this point until
3720 * device is brought back to D0.
3722 udelay(50);
3723 break;
3725 default:
3726 return -EINVAL;
3728 return 0;
3731 static int
3732 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3734 u32 val;
3735 int j;
3737 /* Request access to the flash interface. */
3738 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3739 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3740 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3741 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3742 break;
3744 udelay(5);
3747 if (j >= NVRAM_TIMEOUT_COUNT)
3748 return -EBUSY;
3750 return 0;
3753 static int
3754 bnx2_release_nvram_lock(struct bnx2 *bp)
3756 int j;
3757 u32 val;
3759 /* Relinquish nvram interface. */
3760 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3762 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3763 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3764 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3765 break;
3767 udelay(5);
3770 if (j >= NVRAM_TIMEOUT_COUNT)
3771 return -EBUSY;
3773 return 0;
3777 static int
3778 bnx2_enable_nvram_write(struct bnx2 *bp)
3780 u32 val;
3782 val = REG_RD(bp, BNX2_MISC_CFG);
3783 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3785 if (bp->flash_info->flags & BNX2_NV_WREN) {
3786 int j;
3788 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3789 REG_WR(bp, BNX2_NVM_COMMAND,
3790 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3792 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3793 udelay(5);
3795 val = REG_RD(bp, BNX2_NVM_COMMAND);
3796 if (val & BNX2_NVM_COMMAND_DONE)
3797 break;
3800 if (j >= NVRAM_TIMEOUT_COUNT)
3801 return -EBUSY;
3803 return 0;
3806 static void
3807 bnx2_disable_nvram_write(struct bnx2 *bp)
3809 u32 val;
3811 val = REG_RD(bp, BNX2_MISC_CFG);
3812 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3816 static void
3817 bnx2_enable_nvram_access(struct bnx2 *bp)
3819 u32 val;
3821 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3822 /* Enable both bits, even on read. */
3823 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3824 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3827 static void
3828 bnx2_disable_nvram_access(struct bnx2 *bp)
3830 u32 val;
3832 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3833 /* Disable both bits, even after read. */
3834 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3835 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3836 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3839 static int
3840 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3842 u32 cmd;
3843 int j;
3845 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3846 /* Buffered flash, no erase needed */
3847 return 0;
3849 /* Build an erase command */
3850 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3851 BNX2_NVM_COMMAND_DOIT;
3853 /* Need to clear DONE bit separately. */
3854 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3856 /* Address of the NVRAM to read from. */
3857 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3859 /* Issue an erase command. */
3860 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3862 /* Wait for completion. */
3863 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3864 u32 val;
3866 udelay(5);
3868 val = REG_RD(bp, BNX2_NVM_COMMAND);
3869 if (val & BNX2_NVM_COMMAND_DONE)
3870 break;
3873 if (j >= NVRAM_TIMEOUT_COUNT)
3874 return -EBUSY;
3876 return 0;
3879 static int
3880 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3882 u32 cmd;
3883 int j;
3885 /* Build the command word. */
3886 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3888 /* Calculate an offset of a buffered flash, not needed for 5709. */
3889 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3890 offset = ((offset / bp->flash_info->page_size) <<
3891 bp->flash_info->page_bits) +
3892 (offset % bp->flash_info->page_size);
3895 /* Need to clear DONE bit separately. */
3896 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3898 /* Address of the NVRAM to read from. */
3899 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3901 /* Issue a read command. */
3902 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3904 /* Wait for completion. */
3905 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3906 u32 val;
3908 udelay(5);
3910 val = REG_RD(bp, BNX2_NVM_COMMAND);
3911 if (val & BNX2_NVM_COMMAND_DONE) {
3912 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3913 memcpy(ret_val, &v, 4);
3914 break;
3917 if (j >= NVRAM_TIMEOUT_COUNT)
3918 return -EBUSY;
3920 return 0;
3924 static int
3925 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3927 u32 cmd;
3928 __be32 val32;
3929 int j;
3931 /* Build the command word. */
3932 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3934 /* Calculate an offset of a buffered flash, not needed for 5709. */
3935 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3936 offset = ((offset / bp->flash_info->page_size) <<
3937 bp->flash_info->page_bits) +
3938 (offset % bp->flash_info->page_size);
3941 /* Need to clear DONE bit separately. */
3942 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3944 memcpy(&val32, val, 4);
3946 /* Write the data. */
3947 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
3949 /* Address of the NVRAM to write to. */
3950 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3952 /* Issue the write command. */
3953 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3955 /* Wait for completion. */
3956 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3957 udelay(5);
3959 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3960 break;
3962 if (j >= NVRAM_TIMEOUT_COUNT)
3963 return -EBUSY;
3965 return 0;
3968 static int
3969 bnx2_init_nvram(struct bnx2 *bp)
3971 u32 val;
3972 int j, entry_count, rc = 0;
3973 struct flash_spec *flash;
3975 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3976 bp->flash_info = &flash_5709;
3977 goto get_flash_size;
3980 /* Determine the selected interface. */
3981 val = REG_RD(bp, BNX2_NVM_CFG1);
3983 entry_count = ARRAY_SIZE(flash_table);
3985 if (val & 0x40000000) {
3987 /* Flash interface has been reconfigured */
3988 for (j = 0, flash = &flash_table[0]; j < entry_count;
3989 j++, flash++) {
3990 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3991 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3992 bp->flash_info = flash;
3993 break;
3997 else {
3998 u32 mask;
3999 /* Not yet been reconfigured */
4001 if (val & (1 << 23))
4002 mask = FLASH_BACKUP_STRAP_MASK;
4003 else
4004 mask = FLASH_STRAP_MASK;
4006 for (j = 0, flash = &flash_table[0]; j < entry_count;
4007 j++, flash++) {
4009 if ((val & mask) == (flash->strapping & mask)) {
4010 bp->flash_info = flash;
4012 /* Request access to the flash interface. */
4013 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4014 return rc;
4016 /* Enable access to flash interface */
4017 bnx2_enable_nvram_access(bp);
4019 /* Reconfigure the flash interface */
4020 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4021 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4022 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4023 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4025 /* Disable access to flash interface */
4026 bnx2_disable_nvram_access(bp);
4027 bnx2_release_nvram_lock(bp);
4029 break;
4032 } /* if (val & 0x40000000) */
4034 if (j == entry_count) {
4035 bp->flash_info = NULL;
4036 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
4037 return -ENODEV;
4040 get_flash_size:
4041 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4042 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4043 if (val)
4044 bp->flash_size = val;
4045 else
4046 bp->flash_size = bp->flash_info->total_size;
4048 return rc;
4051 static int
4052 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4053 int buf_size)
4055 int rc = 0;
4056 u32 cmd_flags, offset32, len32, extra;
4058 if (buf_size == 0)
4059 return 0;
4061 /* Request access to the flash interface. */
4062 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4063 return rc;
4065 /* Enable access to flash interface */
4066 bnx2_enable_nvram_access(bp);
4068 len32 = buf_size;
4069 offset32 = offset;
4070 extra = 0;
4072 cmd_flags = 0;
4074 if (offset32 & 3) {
4075 u8 buf[4];
4076 u32 pre_len;
4078 offset32 &= ~3;
4079 pre_len = 4 - (offset & 3);
4081 if (pre_len >= len32) {
4082 pre_len = len32;
4083 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4084 BNX2_NVM_COMMAND_LAST;
4086 else {
4087 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4090 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4092 if (rc)
4093 return rc;
4095 memcpy(ret_buf, buf + (offset & 3), pre_len);
4097 offset32 += 4;
4098 ret_buf += pre_len;
4099 len32 -= pre_len;
4101 if (len32 & 3) {
4102 extra = 4 - (len32 & 3);
4103 len32 = (len32 + 4) & ~3;
4106 if (len32 == 4) {
4107 u8 buf[4];
4109 if (cmd_flags)
4110 cmd_flags = BNX2_NVM_COMMAND_LAST;
4111 else
4112 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4113 BNX2_NVM_COMMAND_LAST;
4115 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4117 memcpy(ret_buf, buf, 4 - extra);
4119 else if (len32 > 0) {
4120 u8 buf[4];
4122 /* Read the first word. */
4123 if (cmd_flags)
4124 cmd_flags = 0;
4125 else
4126 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4128 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4130 /* Advance to the next dword. */
4131 offset32 += 4;
4132 ret_buf += 4;
4133 len32 -= 4;
4135 while (len32 > 4 && rc == 0) {
4136 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4138 /* Advance to the next dword. */
4139 offset32 += 4;
4140 ret_buf += 4;
4141 len32 -= 4;
4144 if (rc)
4145 return rc;
4147 cmd_flags = BNX2_NVM_COMMAND_LAST;
4148 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4150 memcpy(ret_buf, buf, 4 - extra);
4153 /* Disable access to flash interface */
4154 bnx2_disable_nvram_access(bp);
4156 bnx2_release_nvram_lock(bp);
4158 return rc;
4161 static int
4162 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4163 int buf_size)
4165 u32 written, offset32, len32;
4166 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4167 int rc = 0;
4168 int align_start, align_end;
4170 buf = data_buf;
4171 offset32 = offset;
4172 len32 = buf_size;
4173 align_start = align_end = 0;
4175 if ((align_start = (offset32 & 3))) {
4176 offset32 &= ~3;
4177 len32 += align_start;
4178 if (len32 < 4)
4179 len32 = 4;
4180 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4181 return rc;
4184 if (len32 & 3) {
4185 align_end = 4 - (len32 & 3);
4186 len32 += align_end;
4187 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4188 return rc;
4191 if (align_start || align_end) {
4192 align_buf = kmalloc(len32, GFP_KERNEL);
4193 if (align_buf == NULL)
4194 return -ENOMEM;
4195 if (align_start) {
4196 memcpy(align_buf, start, 4);
4198 if (align_end) {
4199 memcpy(align_buf + len32 - 4, end, 4);
4201 memcpy(align_buf + align_start, data_buf, buf_size);
4202 buf = align_buf;
4205 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4206 flash_buffer = kmalloc(264, GFP_KERNEL);
4207 if (flash_buffer == NULL) {
4208 rc = -ENOMEM;
4209 goto nvram_write_end;
4213 written = 0;
4214 while ((written < len32) && (rc == 0)) {
4215 u32 page_start, page_end, data_start, data_end;
4216 u32 addr, cmd_flags;
4217 int i;
4219 /* Find the page_start addr */
4220 page_start = offset32 + written;
4221 page_start -= (page_start % bp->flash_info->page_size);
4222 /* Find the page_end addr */
4223 page_end = page_start + bp->flash_info->page_size;
4224 /* Find the data_start addr */
4225 data_start = (written == 0) ? offset32 : page_start;
4226 /* Find the data_end addr */
4227 data_end = (page_end > offset32 + len32) ?
4228 (offset32 + len32) : page_end;
4230 /* Request access to the flash interface. */
4231 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4232 goto nvram_write_end;
4234 /* Enable access to flash interface */
4235 bnx2_enable_nvram_access(bp);
4237 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4238 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4239 int j;
4241 /* Read the whole page into the buffer
4242 * (non-buffer flash only) */
4243 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4244 if (j == (bp->flash_info->page_size - 4)) {
4245 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4247 rc = bnx2_nvram_read_dword(bp,
4248 page_start + j,
4249 &flash_buffer[j],
4250 cmd_flags);
4252 if (rc)
4253 goto nvram_write_end;
4255 cmd_flags = 0;
4259 /* Enable writes to flash interface (unlock write-protect) */
4260 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4261 goto nvram_write_end;
4263 /* Loop to write back the buffer data from page_start to
4264 * data_start */
4265 i = 0;
4266 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4267 /* Erase the page */
4268 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4269 goto nvram_write_end;
4271 /* Re-enable the write again for the actual write */
4272 bnx2_enable_nvram_write(bp);
4274 for (addr = page_start; addr < data_start;
4275 addr += 4, i += 4) {
4277 rc = bnx2_nvram_write_dword(bp, addr,
4278 &flash_buffer[i], cmd_flags);
4280 if (rc != 0)
4281 goto nvram_write_end;
4283 cmd_flags = 0;
4287 /* Loop to write the new data from data_start to data_end */
4288 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4289 if ((addr == page_end - 4) ||
4290 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4291 (addr == data_end - 4))) {
4293 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4295 rc = bnx2_nvram_write_dword(bp, addr, buf,
4296 cmd_flags);
4298 if (rc != 0)
4299 goto nvram_write_end;
4301 cmd_flags = 0;
4302 buf += 4;
4305 /* Loop to write back the buffer data from data_end
4306 * to page_end */
4307 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4308 for (addr = data_end; addr < page_end;
4309 addr += 4, i += 4) {
4311 if (addr == page_end-4) {
4312 cmd_flags = BNX2_NVM_COMMAND_LAST;
4314 rc = bnx2_nvram_write_dword(bp, addr,
4315 &flash_buffer[i], cmd_flags);
4317 if (rc != 0)
4318 goto nvram_write_end;
4320 cmd_flags = 0;
4324 /* Disable writes to flash interface (lock write-protect) */
4325 bnx2_disable_nvram_write(bp);
4327 /* Disable access to flash interface */
4328 bnx2_disable_nvram_access(bp);
4329 bnx2_release_nvram_lock(bp);
4331 /* Increment written */
4332 written += data_end - data_start;
4335 nvram_write_end:
4336 kfree(flash_buffer);
4337 kfree(align_buf);
4338 return rc;
4341 static void
4342 bnx2_init_fw_cap(struct bnx2 *bp)
4344 u32 val, sig = 0;
4346 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4347 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4349 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4350 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4352 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4353 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4354 return;
4356 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4357 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4358 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4361 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4362 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4363 u32 link;
4365 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4367 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4368 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4369 bp->phy_port = PORT_FIBRE;
4370 else
4371 bp->phy_port = PORT_TP;
4373 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4374 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4377 if (netif_running(bp->dev) && sig)
4378 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4381 static void
4382 bnx2_setup_msix_tbl(struct bnx2 *bp)
4384 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4386 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4387 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4390 static int
4391 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4393 u32 val;
4394 int i, rc = 0;
4395 u8 old_port;
4397 /* Wait for the current PCI transaction to complete before
4398 * issuing a reset. */
4399 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4400 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4401 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4402 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4403 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4404 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4405 udelay(5);
4407 /* Wait for the firmware to tell us it is ok to issue a reset. */
4408 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4410 /* Deposit a driver reset signature so the firmware knows that
4411 * this is a soft reset. */
4412 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4413 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4415 /* Do a dummy read to force the chip to complete all current transaction
4416 * before we issue a reset. */
4417 val = REG_RD(bp, BNX2_MISC_ID);
4419 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4420 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4421 REG_RD(bp, BNX2_MISC_COMMAND);
4422 udelay(5);
4424 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4425 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4427 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4429 } else {
4430 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4431 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4432 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4434 /* Chip reset. */
4435 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4437 /* Reading back any register after chip reset will hang the
4438 * bus on 5706 A0 and A1. The msleep below provides plenty
4439 * of margin for write posting.
4441 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4442 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4443 msleep(20);
4445 /* Reset takes approximate 30 usec */
4446 for (i = 0; i < 10; i++) {
4447 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4448 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4449 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4450 break;
4451 udelay(10);
4454 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4455 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4456 printk(KERN_ERR PFX "Chip reset did not complete\n");
4457 return -EBUSY;
4461 /* Make sure byte swapping is properly configured. */
4462 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4463 if (val != 0x01020304) {
4464 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4465 return -ENODEV;
4468 /* Wait for the firmware to finish its initialization. */
4469 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4470 if (rc)
4471 return rc;
4473 spin_lock_bh(&bp->phy_lock);
4474 old_port = bp->phy_port;
4475 bnx2_init_fw_cap(bp);
4476 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4477 old_port != bp->phy_port)
4478 bnx2_set_default_remote_link(bp);
4479 spin_unlock_bh(&bp->phy_lock);
4481 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4482 /* Adjust the voltage regular to two steps lower. The default
4483 * of this register is 0x0000000e. */
4484 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4486 /* Remove bad rbuf memory from the free pool. */
4487 rc = bnx2_alloc_bad_rbuf(bp);
4490 if (bp->flags & BNX2_FLAG_USING_MSIX)
4491 bnx2_setup_msix_tbl(bp);
4493 return rc;
4496 static int
4497 bnx2_init_chip(struct bnx2 *bp)
4499 u32 val, mtu;
4500 int rc, i;
4502 /* Make sure the interrupt is not active. */
4503 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4505 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4506 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4507 #ifdef __BIG_ENDIAN
4508 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4509 #endif
4510 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4511 DMA_READ_CHANS << 12 |
4512 DMA_WRITE_CHANS << 16;
4514 val |= (0x2 << 20) | (1 << 11);
4516 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4517 val |= (1 << 23);
4519 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4520 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4521 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4523 REG_WR(bp, BNX2_DMA_CONFIG, val);
4525 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4526 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4527 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4528 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4531 if (bp->flags & BNX2_FLAG_PCIX) {
4532 u16 val16;
4534 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4535 &val16);
4536 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4537 val16 & ~PCI_X_CMD_ERO);
4540 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4541 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4542 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4543 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4545 /* Initialize context mapping and zero out the quick contexts. The
4546 * context block must have already been enabled. */
4547 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4548 rc = bnx2_init_5709_context(bp);
4549 if (rc)
4550 return rc;
4551 } else
4552 bnx2_init_context(bp);
4554 if ((rc = bnx2_init_cpus(bp)) != 0)
4555 return rc;
4557 bnx2_init_nvram(bp);
4559 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4561 val = REG_RD(bp, BNX2_MQ_CONFIG);
4562 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4563 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4564 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4565 val |= BNX2_MQ_CONFIG_HALT_DIS;
4567 REG_WR(bp, BNX2_MQ_CONFIG, val);
4569 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4570 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4571 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4573 val = (BCM_PAGE_BITS - 8) << 24;
4574 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4576 /* Configure page size. */
4577 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4578 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4579 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4580 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4582 val = bp->mac_addr[0] +
4583 (bp->mac_addr[1] << 8) +
4584 (bp->mac_addr[2] << 16) +
4585 bp->mac_addr[3] +
4586 (bp->mac_addr[4] << 8) +
4587 (bp->mac_addr[5] << 16);
4588 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4590 /* Program the MTU. Also include 4 bytes for CRC32. */
4591 mtu = bp->dev->mtu;
4592 val = mtu + ETH_HLEN + ETH_FCS_LEN;
4593 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4594 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4595 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4597 if (mtu < 1500)
4598 mtu = 1500;
4600 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4601 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4602 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4604 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4605 bp->bnx2_napi[i].last_status_idx = 0;
4607 bp->idle_chk_status_idx = 0xffff;
4609 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4611 /* Set up how to generate a link change interrupt. */
4612 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4614 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4615 (u64) bp->status_blk_mapping & 0xffffffff);
4616 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4618 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4619 (u64) bp->stats_blk_mapping & 0xffffffff);
4620 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4621 (u64) bp->stats_blk_mapping >> 32);
4623 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4624 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4626 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4627 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4629 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4630 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4632 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4634 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4636 REG_WR(bp, BNX2_HC_COM_TICKS,
4637 (bp->com_ticks_int << 16) | bp->com_ticks);
4639 REG_WR(bp, BNX2_HC_CMD_TICKS,
4640 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4642 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4643 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4644 else
4645 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4646 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4648 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4649 val = BNX2_HC_CONFIG_COLLECT_STATS;
4650 else {
4651 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4652 BNX2_HC_CONFIG_COLLECT_STATS;
4655 if (bp->irq_nvecs > 1) {
4656 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4657 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4659 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4662 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4663 val |= BNX2_HC_CONFIG_ONE_SHOT;
4665 REG_WR(bp, BNX2_HC_CONFIG, val);
4667 for (i = 1; i < bp->irq_nvecs; i++) {
4668 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4669 BNX2_HC_SB_CONFIG_1;
4671 REG_WR(bp, base,
4672 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4673 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4674 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4676 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4677 (bp->tx_quick_cons_trip_int << 16) |
4678 bp->tx_quick_cons_trip);
4680 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4681 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4683 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4684 (bp->rx_quick_cons_trip_int << 16) |
4685 bp->rx_quick_cons_trip);
4687 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4688 (bp->rx_ticks_int << 16) | bp->rx_ticks);
4691 /* Clear internal stats counters. */
4692 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4694 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4696 /* Initialize the receive filter. */
4697 bnx2_set_rx_mode(bp->dev);
4699 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4700 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4701 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4702 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4704 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4705 1, 0);
4707 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4708 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4710 udelay(20);
4712 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4714 return rc;
4717 static void
4718 bnx2_clear_ring_states(struct bnx2 *bp)
4720 struct bnx2_napi *bnapi;
4721 struct bnx2_tx_ring_info *txr;
4722 struct bnx2_rx_ring_info *rxr;
4723 int i;
4725 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4726 bnapi = &bp->bnx2_napi[i];
4727 txr = &bnapi->tx_ring;
4728 rxr = &bnapi->rx_ring;
4730 txr->tx_cons = 0;
4731 txr->hw_tx_cons = 0;
4732 rxr->rx_prod_bseq = 0;
4733 rxr->rx_prod = 0;
4734 rxr->rx_cons = 0;
4735 rxr->rx_pg_prod = 0;
4736 rxr->rx_pg_cons = 0;
4740 static void
4741 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
4743 u32 val, offset0, offset1, offset2, offset3;
4744 u32 cid_addr = GET_CID_ADDR(cid);
4746 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4747 offset0 = BNX2_L2CTX_TYPE_XI;
4748 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4749 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4750 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4751 } else {
4752 offset0 = BNX2_L2CTX_TYPE;
4753 offset1 = BNX2_L2CTX_CMD_TYPE;
4754 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4755 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4757 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4758 bnx2_ctx_wr(bp, cid_addr, offset0, val);
4760 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4761 bnx2_ctx_wr(bp, cid_addr, offset1, val);
4763 val = (u64) txr->tx_desc_mapping >> 32;
4764 bnx2_ctx_wr(bp, cid_addr, offset2, val);
4766 val = (u64) txr->tx_desc_mapping & 0xffffffff;
4767 bnx2_ctx_wr(bp, cid_addr, offset3, val);
4770 static void
4771 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
4773 struct tx_bd *txbd;
4774 u32 cid = TX_CID;
4775 struct bnx2_napi *bnapi;
4776 struct bnx2_tx_ring_info *txr;
4778 bnapi = &bp->bnx2_napi[ring_num];
4779 txr = &bnapi->tx_ring;
4781 if (ring_num == 0)
4782 cid = TX_CID;
4783 else
4784 cid = TX_TSS_CID + ring_num - 1;
4786 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4788 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
4790 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
4791 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
4793 txr->tx_prod = 0;
4794 txr->tx_prod_bseq = 0;
4796 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4797 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4799 bnx2_init_tx_context(bp, cid, txr);
4802 static void
4803 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4804 int num_rings)
4806 int i;
4807 struct rx_bd *rxbd;
4809 for (i = 0; i < num_rings; i++) {
4810 int j;
4812 rxbd = &rx_ring[i][0];
4813 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4814 rxbd->rx_bd_len = buf_size;
4815 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4817 if (i == (num_rings - 1))
4818 j = 0;
4819 else
4820 j = i + 1;
4821 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4822 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4826 static void
4827 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
4829 int i;
4830 u16 prod, ring_prod;
4831 u32 cid, rx_cid_addr, val;
4832 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
4833 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4835 if (ring_num == 0)
4836 cid = RX_CID;
4837 else
4838 cid = RX_RSS_CID + ring_num - 1;
4840 rx_cid_addr = GET_CID_ADDR(cid);
4842 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
4843 bp->rx_buf_use_size, bp->rx_max_ring);
4845 bnx2_init_rx_context(bp, cid);
4847 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4848 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4849 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4852 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4853 if (bp->rx_pg_ring_size) {
4854 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
4855 rxr->rx_pg_desc_mapping,
4856 PAGE_SIZE, bp->rx_max_pg_ring);
4857 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4858 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4859 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4860 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
4862 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
4863 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4865 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
4866 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4868 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4869 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4872 val = (u64) rxr->rx_desc_mapping[0] >> 32;
4873 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4875 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
4876 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4878 ring_prod = prod = rxr->rx_pg_prod;
4879 for (i = 0; i < bp->rx_pg_ring_size; i++) {
4880 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
4881 break;
4882 prod = NEXT_RX_BD(prod);
4883 ring_prod = RX_PG_RING_IDX(prod);
4885 rxr->rx_pg_prod = prod;
4887 ring_prod = prod = rxr->rx_prod;
4888 for (i = 0; i < bp->rx_ring_size; i++) {
4889 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
4890 break;
4891 prod = NEXT_RX_BD(prod);
4892 ring_prod = RX_RING_IDX(prod);
4894 rxr->rx_prod = prod;
4896 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
4897 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
4898 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
4900 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
4901 REG_WR16(bp, rxr->rx_bidx_addr, prod);
4903 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
4906 static void
4907 bnx2_init_all_rings(struct bnx2 *bp)
4909 int i;
4910 u32 val;
4912 bnx2_clear_ring_states(bp);
4914 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
4915 for (i = 0; i < bp->num_tx_rings; i++)
4916 bnx2_init_tx_ring(bp, i);
4918 if (bp->num_tx_rings > 1)
4919 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
4920 (TX_TSS_CID << 7));
4922 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
4923 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
4925 for (i = 0; i < bp->num_rx_rings; i++)
4926 bnx2_init_rx_ring(bp, i);
4928 if (bp->num_rx_rings > 1) {
4929 u32 tbl_32;
4930 u8 *tbl = (u8 *) &tbl_32;
4932 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
4933 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
4935 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
4936 tbl[i % 4] = i % (bp->num_rx_rings - 1);
4937 if ((i % 4) == 3)
4938 bnx2_reg_wr_ind(bp,
4939 BNX2_RXP_SCRATCH_RSS_TBL + i,
4940 cpu_to_be32(tbl_32));
4943 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
4944 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
4946 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
4951 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4953 u32 max, num_rings = 1;
4955 while (ring_size > MAX_RX_DESC_CNT) {
4956 ring_size -= MAX_RX_DESC_CNT;
4957 num_rings++;
4959 /* round to next power of 2 */
4960 max = max_size;
4961 while ((max & num_rings) == 0)
4962 max >>= 1;
4964 if (num_rings != max)
4965 max <<= 1;
4967 return max;
4970 static void
4971 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4973 u32 rx_size, rx_space, jumbo_size;
4975 /* 8 for CRC and VLAN */
4976 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
4978 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4979 sizeof(struct skb_shared_info);
4981 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
4982 bp->rx_pg_ring_size = 0;
4983 bp->rx_max_pg_ring = 0;
4984 bp->rx_max_pg_ring_idx = 0;
4985 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
4986 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4988 jumbo_size = size * pages;
4989 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4990 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4992 bp->rx_pg_ring_size = jumbo_size;
4993 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4994 MAX_RX_PG_RINGS);
4995 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4996 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
4997 bp->rx_copy_thresh = 0;
5000 bp->rx_buf_use_size = rx_size;
5001 /* hw alignment */
5002 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5003 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5004 bp->rx_ring_size = size;
5005 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5006 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5009 static void
5010 bnx2_free_tx_skbs(struct bnx2 *bp)
5012 int i;
5014 for (i = 0; i < bp->num_tx_rings; i++) {
5015 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5016 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5017 int j;
5019 if (txr->tx_buf_ring == NULL)
5020 continue;
5022 for (j = 0; j < TX_DESC_CNT; ) {
5023 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5024 struct sk_buff *skb = tx_buf->skb;
5026 if (skb == NULL) {
5027 j++;
5028 continue;
5031 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5033 tx_buf->skb = NULL;
5035 j += skb_shinfo(skb)->nr_frags + 1;
5036 dev_kfree_skb(skb);
5041 static void
5042 bnx2_free_rx_skbs(struct bnx2 *bp)
5044 int i;
5046 for (i = 0; i < bp->num_rx_rings; i++) {
5047 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5048 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5049 int j;
5051 if (rxr->rx_buf_ring == NULL)
5052 return;
5054 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5055 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5056 struct sk_buff *skb = rx_buf->skb;
5058 if (skb == NULL)
5059 continue;
5061 pci_unmap_single(bp->pdev,
5062 pci_unmap_addr(rx_buf, mapping),
5063 bp->rx_buf_use_size,
5064 PCI_DMA_FROMDEVICE);
5066 rx_buf->skb = NULL;
5068 dev_kfree_skb(skb);
5070 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5071 bnx2_free_rx_page(bp, rxr, j);
5075 static void
5076 bnx2_free_skbs(struct bnx2 *bp)
5078 bnx2_free_tx_skbs(bp);
5079 bnx2_free_rx_skbs(bp);
5082 static int
5083 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5085 int rc;
5087 rc = bnx2_reset_chip(bp, reset_code);
5088 bnx2_free_skbs(bp);
5089 if (rc)
5090 return rc;
5092 if ((rc = bnx2_init_chip(bp)) != 0)
5093 return rc;
5095 bnx2_init_all_rings(bp);
5096 return 0;
5099 static int
5100 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5102 int rc;
5104 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5105 return rc;
5107 spin_lock_bh(&bp->phy_lock);
5108 bnx2_init_phy(bp, reset_phy);
5109 bnx2_set_link(bp);
5110 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5111 bnx2_remote_phy_event(bp);
5112 spin_unlock_bh(&bp->phy_lock);
5113 return 0;
5116 static int
5117 bnx2_shutdown_chip(struct bnx2 *bp)
5119 u32 reset_code;
5121 if (bp->flags & BNX2_FLAG_NO_WOL)
5122 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5123 else if (bp->wol)
5124 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5125 else
5126 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5128 return bnx2_reset_chip(bp, reset_code);
5131 static int
5132 bnx2_test_registers(struct bnx2 *bp)
5134 int ret;
5135 int i, is_5709;
5136 static const struct {
5137 u16 offset;
5138 u16 flags;
5139 #define BNX2_FL_NOT_5709 1
5140 u32 rw_mask;
5141 u32 ro_mask;
5142 } reg_tbl[] = {
5143 { 0x006c, 0, 0x00000000, 0x0000003f },
5144 { 0x0090, 0, 0xffffffff, 0x00000000 },
5145 { 0x0094, 0, 0x00000000, 0x00000000 },
5147 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5148 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5149 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5150 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5151 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5152 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5153 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5154 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5155 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5157 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5158 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5159 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5160 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5161 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5162 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5164 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5165 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5166 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5168 { 0x1000, 0, 0x00000000, 0x00000001 },
5169 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5171 { 0x1408, 0, 0x01c00800, 0x00000000 },
5172 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5173 { 0x14a8, 0, 0x00000000, 0x000001ff },
5174 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5175 { 0x14b0, 0, 0x00000002, 0x00000001 },
5176 { 0x14b8, 0, 0x00000000, 0x00000000 },
5177 { 0x14c0, 0, 0x00000000, 0x00000009 },
5178 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5179 { 0x14cc, 0, 0x00000000, 0x00000001 },
5180 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5182 { 0x1800, 0, 0x00000000, 0x00000001 },
5183 { 0x1804, 0, 0x00000000, 0x00000003 },
5185 { 0x2800, 0, 0x00000000, 0x00000001 },
5186 { 0x2804, 0, 0x00000000, 0x00003f01 },
5187 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5188 { 0x2810, 0, 0xffff0000, 0x00000000 },
5189 { 0x2814, 0, 0xffff0000, 0x00000000 },
5190 { 0x2818, 0, 0xffff0000, 0x00000000 },
5191 { 0x281c, 0, 0xffff0000, 0x00000000 },
5192 { 0x2834, 0, 0xffffffff, 0x00000000 },
5193 { 0x2840, 0, 0x00000000, 0xffffffff },
5194 { 0x2844, 0, 0x00000000, 0xffffffff },
5195 { 0x2848, 0, 0xffffffff, 0x00000000 },
5196 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5198 { 0x2c00, 0, 0x00000000, 0x00000011 },
5199 { 0x2c04, 0, 0x00000000, 0x00030007 },
5201 { 0x3c00, 0, 0x00000000, 0x00000001 },
5202 { 0x3c04, 0, 0x00000000, 0x00070000 },
5203 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5204 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5205 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5206 { 0x3c14, 0, 0x00000000, 0xffffffff },
5207 { 0x3c18, 0, 0x00000000, 0xffffffff },
5208 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5209 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5211 { 0x5004, 0, 0x00000000, 0x0000007f },
5212 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5214 { 0x5c00, 0, 0x00000000, 0x00000001 },
5215 { 0x5c04, 0, 0x00000000, 0x0003000f },
5216 { 0x5c08, 0, 0x00000003, 0x00000000 },
5217 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5218 { 0x5c10, 0, 0x00000000, 0xffffffff },
5219 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5220 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5221 { 0x5c88, 0, 0x00000000, 0x00077373 },
5222 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5224 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5225 { 0x680c, 0, 0xffffffff, 0x00000000 },
5226 { 0x6810, 0, 0xffffffff, 0x00000000 },
5227 { 0x6814, 0, 0xffffffff, 0x00000000 },
5228 { 0x6818, 0, 0xffffffff, 0x00000000 },
5229 { 0x681c, 0, 0xffffffff, 0x00000000 },
5230 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5231 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5232 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5233 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5234 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5235 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5236 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5237 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5238 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5239 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5240 { 0x684c, 0, 0xffffffff, 0x00000000 },
5241 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5242 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5243 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5244 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5245 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5246 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5248 { 0xffff, 0, 0x00000000, 0x00000000 },
5251 ret = 0;
5252 is_5709 = 0;
5253 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5254 is_5709 = 1;
5256 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5257 u32 offset, rw_mask, ro_mask, save_val, val;
5258 u16 flags = reg_tbl[i].flags;
5260 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5261 continue;
5263 offset = (u32) reg_tbl[i].offset;
5264 rw_mask = reg_tbl[i].rw_mask;
5265 ro_mask = reg_tbl[i].ro_mask;
5267 save_val = readl(bp->regview + offset);
5269 writel(0, bp->regview + offset);
5271 val = readl(bp->regview + offset);
5272 if ((val & rw_mask) != 0) {
5273 goto reg_test_err;
5276 if ((val & ro_mask) != (save_val & ro_mask)) {
5277 goto reg_test_err;
5280 writel(0xffffffff, bp->regview + offset);
5282 val = readl(bp->regview + offset);
5283 if ((val & rw_mask) != rw_mask) {
5284 goto reg_test_err;
5287 if ((val & ro_mask) != (save_val & ro_mask)) {
5288 goto reg_test_err;
5291 writel(save_val, bp->regview + offset);
5292 continue;
5294 reg_test_err:
5295 writel(save_val, bp->regview + offset);
5296 ret = -ENODEV;
5297 break;
5299 return ret;
5302 static int
5303 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5305 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5306 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5307 int i;
5309 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5310 u32 offset;
5312 for (offset = 0; offset < size; offset += 4) {
5314 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5316 if (bnx2_reg_rd_ind(bp, start + offset) !=
5317 test_pattern[i]) {
5318 return -ENODEV;
5322 return 0;
5325 static int
5326 bnx2_test_memory(struct bnx2 *bp)
5328 int ret = 0;
5329 int i;
5330 static struct mem_entry {
5331 u32 offset;
5332 u32 len;
5333 } mem_tbl_5706[] = {
5334 { 0x60000, 0x4000 },
5335 { 0xa0000, 0x3000 },
5336 { 0xe0000, 0x4000 },
5337 { 0x120000, 0x4000 },
5338 { 0x1a0000, 0x4000 },
5339 { 0x160000, 0x4000 },
5340 { 0xffffffff, 0 },
5342 mem_tbl_5709[] = {
5343 { 0x60000, 0x4000 },
5344 { 0xa0000, 0x3000 },
5345 { 0xe0000, 0x4000 },
5346 { 0x120000, 0x4000 },
5347 { 0x1a0000, 0x4000 },
5348 { 0xffffffff, 0 },
5350 struct mem_entry *mem_tbl;
5352 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5353 mem_tbl = mem_tbl_5709;
5354 else
5355 mem_tbl = mem_tbl_5706;
5357 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5358 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5359 mem_tbl[i].len)) != 0) {
5360 return ret;
5364 return ret;
5367 #define BNX2_MAC_LOOPBACK 0
5368 #define BNX2_PHY_LOOPBACK 1
5370 static int
5371 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5373 unsigned int pkt_size, num_pkts, i;
5374 struct sk_buff *skb, *rx_skb;
5375 unsigned char *packet;
5376 u16 rx_start_idx, rx_idx;
5377 dma_addr_t map;
5378 struct tx_bd *txbd;
5379 struct sw_bd *rx_buf;
5380 struct l2_fhdr *rx_hdr;
5381 int ret = -ENODEV;
5382 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5383 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5384 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5386 tx_napi = bnapi;
5388 txr = &tx_napi->tx_ring;
5389 rxr = &bnapi->rx_ring;
5390 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5391 bp->loopback = MAC_LOOPBACK;
5392 bnx2_set_mac_loopback(bp);
5394 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5395 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5396 return 0;
5398 bp->loopback = PHY_LOOPBACK;
5399 bnx2_set_phy_loopback(bp);
5401 else
5402 return -EINVAL;
5404 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5405 skb = netdev_alloc_skb(bp->dev, pkt_size);
5406 if (!skb)
5407 return -ENOMEM;
5408 packet = skb_put(skb, pkt_size);
5409 memcpy(packet, bp->dev->dev_addr, 6);
5410 memset(packet + 6, 0x0, 8);
5411 for (i = 14; i < pkt_size; i++)
5412 packet[i] = (unsigned char) (i & 0xff);
5414 if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
5415 dev_kfree_skb(skb);
5416 return -EIO;
5418 map = skb_shinfo(skb)->dma_maps[0];
5420 REG_WR(bp, BNX2_HC_COMMAND,
5421 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5423 REG_RD(bp, BNX2_HC_COMMAND);
5425 udelay(5);
5426 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5428 num_pkts = 0;
5430 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5432 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5433 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5434 txbd->tx_bd_mss_nbytes = pkt_size;
5435 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5437 num_pkts++;
5438 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5439 txr->tx_prod_bseq += pkt_size;
5441 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5442 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5444 udelay(100);
5446 REG_WR(bp, BNX2_HC_COMMAND,
5447 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5449 REG_RD(bp, BNX2_HC_COMMAND);
5451 udelay(5);
5453 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5454 dev_kfree_skb(skb);
5456 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5457 goto loopback_test_done;
5459 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5460 if (rx_idx != rx_start_idx + num_pkts) {
5461 goto loopback_test_done;
5464 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5465 rx_skb = rx_buf->skb;
5467 rx_hdr = (struct l2_fhdr *) rx_skb->data;
5468 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5470 pci_dma_sync_single_for_cpu(bp->pdev,
5471 pci_unmap_addr(rx_buf, mapping),
5472 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5474 if (rx_hdr->l2_fhdr_status &
5475 (L2_FHDR_ERRORS_BAD_CRC |
5476 L2_FHDR_ERRORS_PHY_DECODE |
5477 L2_FHDR_ERRORS_ALIGNMENT |
5478 L2_FHDR_ERRORS_TOO_SHORT |
5479 L2_FHDR_ERRORS_GIANT_FRAME)) {
5481 goto loopback_test_done;
5484 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5485 goto loopback_test_done;
5488 for (i = 14; i < pkt_size; i++) {
5489 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5490 goto loopback_test_done;
5494 ret = 0;
5496 loopback_test_done:
5497 bp->loopback = 0;
5498 return ret;
5501 #define BNX2_MAC_LOOPBACK_FAILED 1
5502 #define BNX2_PHY_LOOPBACK_FAILED 2
5503 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5504 BNX2_PHY_LOOPBACK_FAILED)
5506 static int
5507 bnx2_test_loopback(struct bnx2 *bp)
5509 int rc = 0;
5511 if (!netif_running(bp->dev))
5512 return BNX2_LOOPBACK_FAILED;
5514 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5515 spin_lock_bh(&bp->phy_lock);
5516 bnx2_init_phy(bp, 1);
5517 spin_unlock_bh(&bp->phy_lock);
5518 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5519 rc |= BNX2_MAC_LOOPBACK_FAILED;
5520 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5521 rc |= BNX2_PHY_LOOPBACK_FAILED;
5522 return rc;
5525 #define NVRAM_SIZE 0x200
5526 #define CRC32_RESIDUAL 0xdebb20e3
5528 static int
5529 bnx2_test_nvram(struct bnx2 *bp)
5531 __be32 buf[NVRAM_SIZE / 4];
5532 u8 *data = (u8 *) buf;
5533 int rc = 0;
5534 u32 magic, csum;
5536 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5537 goto test_nvram_done;
5539 magic = be32_to_cpu(buf[0]);
5540 if (magic != 0x669955aa) {
5541 rc = -ENODEV;
5542 goto test_nvram_done;
5545 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5546 goto test_nvram_done;
5548 csum = ether_crc_le(0x100, data);
5549 if (csum != CRC32_RESIDUAL) {
5550 rc = -ENODEV;
5551 goto test_nvram_done;
5554 csum = ether_crc_le(0x100, data + 0x100);
5555 if (csum != CRC32_RESIDUAL) {
5556 rc = -ENODEV;
5559 test_nvram_done:
5560 return rc;
5563 static int
5564 bnx2_test_link(struct bnx2 *bp)
5566 u32 bmsr;
5568 if (!netif_running(bp->dev))
5569 return -ENODEV;
5571 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5572 if (bp->link_up)
5573 return 0;
5574 return -ENODEV;
5576 spin_lock_bh(&bp->phy_lock);
5577 bnx2_enable_bmsr1(bp);
5578 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5579 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5580 bnx2_disable_bmsr1(bp);
5581 spin_unlock_bh(&bp->phy_lock);
5583 if (bmsr & BMSR_LSTATUS) {
5584 return 0;
5586 return -ENODEV;
5589 static int
5590 bnx2_test_intr(struct bnx2 *bp)
5592 int i;
5593 u16 status_idx;
5595 if (!netif_running(bp->dev))
5596 return -ENODEV;
5598 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5600 /* This register is not touched during run-time. */
5601 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5602 REG_RD(bp, BNX2_HC_COMMAND);
5604 for (i = 0; i < 10; i++) {
5605 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5606 status_idx) {
5608 break;
5611 msleep_interruptible(10);
5613 if (i < 10)
5614 return 0;
5616 return -ENODEV;
5619 /* Determining link for parallel detection. */
5620 static int
5621 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5623 u32 mode_ctl, an_dbg, exp;
5625 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5626 return 0;
5628 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5629 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5631 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5632 return 0;
5634 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5635 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5636 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5638 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5639 return 0;
5641 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5642 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5643 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5645 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5646 return 0;
5648 return 1;
5651 static void
5652 bnx2_5706_serdes_timer(struct bnx2 *bp)
5654 int check_link = 1;
5656 spin_lock(&bp->phy_lock);
5657 if (bp->serdes_an_pending) {
5658 bp->serdes_an_pending--;
5659 check_link = 0;
5660 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5661 u32 bmcr;
5663 bp->current_interval = BNX2_TIMER_INTERVAL;
5665 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5667 if (bmcr & BMCR_ANENABLE) {
5668 if (bnx2_5706_serdes_has_link(bp)) {
5669 bmcr &= ~BMCR_ANENABLE;
5670 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5671 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5672 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5676 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5677 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5678 u32 phy2;
5680 bnx2_write_phy(bp, 0x17, 0x0f01);
5681 bnx2_read_phy(bp, 0x15, &phy2);
5682 if (phy2 & 0x20) {
5683 u32 bmcr;
5685 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5686 bmcr |= BMCR_ANENABLE;
5687 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5689 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5691 } else
5692 bp->current_interval = BNX2_TIMER_INTERVAL;
5694 if (check_link) {
5695 u32 val;
5697 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5698 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5699 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5701 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5702 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5703 bnx2_5706s_force_link_dn(bp, 1);
5704 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5705 } else
5706 bnx2_set_link(bp);
5707 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5708 bnx2_set_link(bp);
5710 spin_unlock(&bp->phy_lock);
5713 static void
5714 bnx2_5708_serdes_timer(struct bnx2 *bp)
5716 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5717 return;
5719 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5720 bp->serdes_an_pending = 0;
5721 return;
5724 spin_lock(&bp->phy_lock);
5725 if (bp->serdes_an_pending)
5726 bp->serdes_an_pending--;
5727 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5728 u32 bmcr;
5730 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5731 if (bmcr & BMCR_ANENABLE) {
5732 bnx2_enable_forced_2g5(bp);
5733 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
5734 } else {
5735 bnx2_disable_forced_2g5(bp);
5736 bp->serdes_an_pending = 2;
5737 bp->current_interval = BNX2_TIMER_INTERVAL;
5740 } else
5741 bp->current_interval = BNX2_TIMER_INTERVAL;
5743 spin_unlock(&bp->phy_lock);
5746 static void
5747 bnx2_timer(unsigned long data)
5749 struct bnx2 *bp = (struct bnx2 *) data;
5751 if (!netif_running(bp->dev))
5752 return;
5754 if (atomic_read(&bp->intr_sem) != 0)
5755 goto bnx2_restart_timer;
5757 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
5758 BNX2_FLAG_USING_MSI)
5759 bnx2_chk_missed_msi(bp);
5761 bnx2_send_heart_beat(bp);
5763 bp->stats_blk->stat_FwRxDrop =
5764 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5766 /* workaround occasional corrupted counters */
5767 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5768 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5769 BNX2_HC_COMMAND_STATS_NOW);
5771 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5772 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5773 bnx2_5706_serdes_timer(bp);
5774 else
5775 bnx2_5708_serdes_timer(bp);
5778 bnx2_restart_timer:
5779 mod_timer(&bp->timer, jiffies + bp->current_interval);
5782 static int
5783 bnx2_request_irq(struct bnx2 *bp)
5785 unsigned long flags;
5786 struct bnx2_irq *irq;
5787 int rc = 0, i;
5789 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5790 flags = 0;
5791 else
5792 flags = IRQF_SHARED;
5794 for (i = 0; i < bp->irq_nvecs; i++) {
5795 irq = &bp->irq_tbl[i];
5796 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5797 &bp->bnx2_napi[i]);
5798 if (rc)
5799 break;
5800 irq->requested = 1;
5802 return rc;
5805 static void
5806 bnx2_free_irq(struct bnx2 *bp)
5808 struct bnx2_irq *irq;
5809 int i;
5811 for (i = 0; i < bp->irq_nvecs; i++) {
5812 irq = &bp->irq_tbl[i];
5813 if (irq->requested)
5814 free_irq(irq->vector, &bp->bnx2_napi[i]);
5815 irq->requested = 0;
5817 if (bp->flags & BNX2_FLAG_USING_MSI)
5818 pci_disable_msi(bp->pdev);
5819 else if (bp->flags & BNX2_FLAG_USING_MSIX)
5820 pci_disable_msix(bp->pdev);
5822 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5825 static void
5826 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
5828 int i, rc;
5829 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5831 bnx2_setup_msix_tbl(bp);
5832 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5833 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5834 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5836 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5837 msix_ent[i].entry = i;
5838 msix_ent[i].vector = 0;
5840 strcpy(bp->irq_tbl[i].name, bp->dev->name);
5841 bp->irq_tbl[i].handler = bnx2_msi_1shot;
5844 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5845 if (rc != 0)
5846 return;
5848 bp->irq_nvecs = msix_vecs;
5849 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5850 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5851 bp->irq_tbl[i].vector = msix_ent[i].vector;
5854 static void
5855 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5857 int cpus = num_online_cpus();
5858 int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
5860 bp->irq_tbl[0].handler = bnx2_interrupt;
5861 strcpy(bp->irq_tbl[0].name, bp->dev->name);
5862 bp->irq_nvecs = 1;
5863 bp->irq_tbl[0].vector = bp->pdev->irq;
5865 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
5866 bnx2_enable_msix(bp, msix_vecs);
5868 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5869 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5870 if (pci_enable_msi(bp->pdev) == 0) {
5871 bp->flags |= BNX2_FLAG_USING_MSI;
5872 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5873 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5874 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5875 } else
5876 bp->irq_tbl[0].handler = bnx2_msi;
5878 bp->irq_tbl[0].vector = bp->pdev->irq;
5882 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
5883 bp->dev->real_num_tx_queues = bp->num_tx_rings;
5885 bp->num_rx_rings = bp->irq_nvecs;
5888 /* Called with rtnl_lock */
5889 static int
5890 bnx2_open(struct net_device *dev)
5892 struct bnx2 *bp = netdev_priv(dev);
5893 int rc;
5895 netif_carrier_off(dev);
5897 bnx2_set_power_state(bp, PCI_D0);
5898 bnx2_disable_int(bp);
5900 bnx2_setup_int_mode(bp, disable_msi);
5901 bnx2_napi_enable(bp);
5902 rc = bnx2_alloc_mem(bp);
5903 if (rc)
5904 goto open_err;
5906 rc = bnx2_request_irq(bp);
5907 if (rc)
5908 goto open_err;
5910 rc = bnx2_init_nic(bp, 1);
5911 if (rc)
5912 goto open_err;
5914 mod_timer(&bp->timer, jiffies + bp->current_interval);
5916 atomic_set(&bp->intr_sem, 0);
5918 bnx2_enable_int(bp);
5920 if (bp->flags & BNX2_FLAG_USING_MSI) {
5921 /* Test MSI to make sure it is working
5922 * If MSI test fails, go back to INTx mode
5924 if (bnx2_test_intr(bp) != 0) {
5925 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5926 " using MSI, switching to INTx mode. Please"
5927 " report this failure to the PCI maintainer"
5928 " and include system chipset information.\n",
5929 bp->dev->name);
5931 bnx2_disable_int(bp);
5932 bnx2_free_irq(bp);
5934 bnx2_setup_int_mode(bp, 1);
5936 rc = bnx2_init_nic(bp, 0);
5938 if (!rc)
5939 rc = bnx2_request_irq(bp);
5941 if (rc) {
5942 del_timer_sync(&bp->timer);
5943 goto open_err;
5945 bnx2_enable_int(bp);
5948 if (bp->flags & BNX2_FLAG_USING_MSI)
5949 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5950 else if (bp->flags & BNX2_FLAG_USING_MSIX)
5951 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5953 netif_tx_start_all_queues(dev);
5955 return 0;
5957 open_err:
5958 bnx2_napi_disable(bp);
5959 bnx2_free_skbs(bp);
5960 bnx2_free_irq(bp);
5961 bnx2_free_mem(bp);
5962 return rc;
5965 static void
5966 bnx2_reset_task(struct work_struct *work)
5968 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5970 if (!netif_running(bp->dev))
5971 return;
5973 bnx2_netif_stop(bp);
5975 bnx2_init_nic(bp, 1);
5977 atomic_set(&bp->intr_sem, 1);
5978 bnx2_netif_start(bp);
5981 static void
5982 bnx2_tx_timeout(struct net_device *dev)
5984 struct bnx2 *bp = netdev_priv(dev);
5986 /* This allows the netif to be shutdown gracefully before resetting */
5987 schedule_work(&bp->reset_task);
5990 #ifdef BCM_VLAN
5991 /* Called with rtnl_lock */
5992 static void
5993 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5995 struct bnx2 *bp = netdev_priv(dev);
5997 bnx2_netif_stop(bp);
5999 bp->vlgrp = vlgrp;
6000 bnx2_set_rx_mode(dev);
6001 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6002 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6004 bnx2_netif_start(bp);
6006 #endif
6008 /* Called with netif_tx_lock.
6009 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6010 * netif_wake_queue().
6012 static int
6013 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6015 struct bnx2 *bp = netdev_priv(dev);
6016 dma_addr_t mapping;
6017 struct tx_bd *txbd;
6018 struct sw_tx_bd *tx_buf;
6019 u32 len, vlan_tag_flags, last_frag, mss;
6020 u16 prod, ring_prod;
6021 int i;
6022 struct bnx2_napi *bnapi;
6023 struct bnx2_tx_ring_info *txr;
6024 struct netdev_queue *txq;
6025 struct skb_shared_info *sp;
6027 /* Determine which tx ring we will be placed on */
6028 i = skb_get_queue_mapping(skb);
6029 bnapi = &bp->bnx2_napi[i];
6030 txr = &bnapi->tx_ring;
6031 txq = netdev_get_tx_queue(dev, i);
6033 if (unlikely(bnx2_tx_avail(bp, txr) <
6034 (skb_shinfo(skb)->nr_frags + 1))) {
6035 netif_tx_stop_queue(txq);
6036 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
6037 dev->name);
6039 return NETDEV_TX_BUSY;
6041 len = skb_headlen(skb);
6042 prod = txr->tx_prod;
6043 ring_prod = TX_RING_IDX(prod);
6045 vlan_tag_flags = 0;
6046 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6047 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6050 #ifdef BCM_VLAN
6051 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6052 vlan_tag_flags |=
6053 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6055 #endif
6056 if ((mss = skb_shinfo(skb)->gso_size)) {
6057 u32 tcp_opt_len;
6058 struct iphdr *iph;
6060 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6062 tcp_opt_len = tcp_optlen(skb);
6064 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6065 u32 tcp_off = skb_transport_offset(skb) -
6066 sizeof(struct ipv6hdr) - ETH_HLEN;
6068 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6069 TX_BD_FLAGS_SW_FLAGS;
6070 if (likely(tcp_off == 0))
6071 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6072 else {
6073 tcp_off >>= 3;
6074 vlan_tag_flags |= ((tcp_off & 0x3) <<
6075 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6076 ((tcp_off & 0x10) <<
6077 TX_BD_FLAGS_TCP6_OFF4_SHL);
6078 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6080 } else {
6081 iph = ip_hdr(skb);
6082 if (tcp_opt_len || (iph->ihl > 5)) {
6083 vlan_tag_flags |= ((iph->ihl - 5) +
6084 (tcp_opt_len >> 2)) << 8;
6087 } else
6088 mss = 0;
6090 if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
6091 dev_kfree_skb(skb);
6092 return NETDEV_TX_OK;
6095 sp = skb_shinfo(skb);
6096 mapping = sp->dma_maps[0];
6098 tx_buf = &txr->tx_buf_ring[ring_prod];
6099 tx_buf->skb = skb;
6101 txbd = &txr->tx_desc_ring[ring_prod];
6103 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6104 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6105 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6106 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6108 last_frag = skb_shinfo(skb)->nr_frags;
6110 for (i = 0; i < last_frag; i++) {
6111 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6113 prod = NEXT_TX_BD(prod);
6114 ring_prod = TX_RING_IDX(prod);
6115 txbd = &txr->tx_desc_ring[ring_prod];
6117 len = frag->size;
6118 mapping = sp->dma_maps[i + 1];
6120 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6121 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6122 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6123 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6126 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6128 prod = NEXT_TX_BD(prod);
6129 txr->tx_prod_bseq += skb->len;
6131 REG_WR16(bp, txr->tx_bidx_addr, prod);
6132 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6134 mmiowb();
6136 txr->tx_prod = prod;
6137 dev->trans_start = jiffies;
6139 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6140 netif_tx_stop_queue(txq);
6141 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6142 netif_tx_wake_queue(txq);
6145 return NETDEV_TX_OK;
6148 /* Called with rtnl_lock */
6149 static int
6150 bnx2_close(struct net_device *dev)
6152 struct bnx2 *bp = netdev_priv(dev);
6154 cancel_work_sync(&bp->reset_task);
6156 bnx2_disable_int_sync(bp);
6157 bnx2_napi_disable(bp);
6158 del_timer_sync(&bp->timer);
6159 bnx2_shutdown_chip(bp);
6160 bnx2_free_irq(bp);
6161 bnx2_free_skbs(bp);
6162 bnx2_free_mem(bp);
6163 bp->link_up = 0;
6164 netif_carrier_off(bp->dev);
6165 bnx2_set_power_state(bp, PCI_D3hot);
6166 return 0;
6169 #define GET_NET_STATS64(ctr) \
6170 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6171 (unsigned long) (ctr##_lo)
6173 #define GET_NET_STATS32(ctr) \
6174 (ctr##_lo)
6176 #if (BITS_PER_LONG == 64)
6177 #define GET_NET_STATS GET_NET_STATS64
6178 #else
6179 #define GET_NET_STATS GET_NET_STATS32
6180 #endif
6182 static struct net_device_stats *
6183 bnx2_get_stats(struct net_device *dev)
6185 struct bnx2 *bp = netdev_priv(dev);
6186 struct statistics_block *stats_blk = bp->stats_blk;
6187 struct net_device_stats *net_stats = &dev->stats;
6189 if (bp->stats_blk == NULL) {
6190 return net_stats;
6192 net_stats->rx_packets =
6193 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6194 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6195 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6197 net_stats->tx_packets =
6198 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6199 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6200 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6202 net_stats->rx_bytes =
6203 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6205 net_stats->tx_bytes =
6206 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6208 net_stats->multicast =
6209 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6211 net_stats->collisions =
6212 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6214 net_stats->rx_length_errors =
6215 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6216 stats_blk->stat_EtherStatsOverrsizePkts);
6218 net_stats->rx_over_errors =
6219 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
6221 net_stats->rx_frame_errors =
6222 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6224 net_stats->rx_crc_errors =
6225 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6227 net_stats->rx_errors = net_stats->rx_length_errors +
6228 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6229 net_stats->rx_crc_errors;
6231 net_stats->tx_aborted_errors =
6232 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6233 stats_blk->stat_Dot3StatsLateCollisions);
6235 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6236 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6237 net_stats->tx_carrier_errors = 0;
6238 else {
6239 net_stats->tx_carrier_errors =
6240 (unsigned long)
6241 stats_blk->stat_Dot3StatsCarrierSenseErrors;
6244 net_stats->tx_errors =
6245 (unsigned long)
6246 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6248 net_stats->tx_aborted_errors +
6249 net_stats->tx_carrier_errors;
6251 net_stats->rx_missed_errors =
6252 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6253 stats_blk->stat_FwRxDrop);
6255 return net_stats;
6258 /* All ethtool functions called with rtnl_lock */
6260 static int
6261 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6263 struct bnx2 *bp = netdev_priv(dev);
6264 int support_serdes = 0, support_copper = 0;
6266 cmd->supported = SUPPORTED_Autoneg;
6267 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6268 support_serdes = 1;
6269 support_copper = 1;
6270 } else if (bp->phy_port == PORT_FIBRE)
6271 support_serdes = 1;
6272 else
6273 support_copper = 1;
6275 if (support_serdes) {
6276 cmd->supported |= SUPPORTED_1000baseT_Full |
6277 SUPPORTED_FIBRE;
6278 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6279 cmd->supported |= SUPPORTED_2500baseX_Full;
6282 if (support_copper) {
6283 cmd->supported |= SUPPORTED_10baseT_Half |
6284 SUPPORTED_10baseT_Full |
6285 SUPPORTED_100baseT_Half |
6286 SUPPORTED_100baseT_Full |
6287 SUPPORTED_1000baseT_Full |
6288 SUPPORTED_TP;
6292 spin_lock_bh(&bp->phy_lock);
6293 cmd->port = bp->phy_port;
6294 cmd->advertising = bp->advertising;
6296 if (bp->autoneg & AUTONEG_SPEED) {
6297 cmd->autoneg = AUTONEG_ENABLE;
6299 else {
6300 cmd->autoneg = AUTONEG_DISABLE;
6303 if (netif_carrier_ok(dev)) {
6304 cmd->speed = bp->line_speed;
6305 cmd->duplex = bp->duplex;
6307 else {
6308 cmd->speed = -1;
6309 cmd->duplex = -1;
6311 spin_unlock_bh(&bp->phy_lock);
6313 cmd->transceiver = XCVR_INTERNAL;
6314 cmd->phy_address = bp->phy_addr;
6316 return 0;
6319 static int
6320 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6322 struct bnx2 *bp = netdev_priv(dev);
6323 u8 autoneg = bp->autoneg;
6324 u8 req_duplex = bp->req_duplex;
6325 u16 req_line_speed = bp->req_line_speed;
6326 u32 advertising = bp->advertising;
6327 int err = -EINVAL;
6329 spin_lock_bh(&bp->phy_lock);
6331 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6332 goto err_out_unlock;
6334 if (cmd->port != bp->phy_port &&
6335 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6336 goto err_out_unlock;
6338 /* If device is down, we can store the settings only if the user
6339 * is setting the currently active port.
6341 if (!netif_running(dev) && cmd->port != bp->phy_port)
6342 goto err_out_unlock;
6344 if (cmd->autoneg == AUTONEG_ENABLE) {
6345 autoneg |= AUTONEG_SPEED;
6347 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6349 /* allow advertising 1 speed */
6350 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6351 (cmd->advertising == ADVERTISED_10baseT_Full) ||
6352 (cmd->advertising == ADVERTISED_100baseT_Half) ||
6353 (cmd->advertising == ADVERTISED_100baseT_Full)) {
6355 if (cmd->port == PORT_FIBRE)
6356 goto err_out_unlock;
6358 advertising = cmd->advertising;
6360 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6361 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6362 (cmd->port == PORT_TP))
6363 goto err_out_unlock;
6364 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6365 advertising = cmd->advertising;
6366 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6367 goto err_out_unlock;
6368 else {
6369 if (cmd->port == PORT_FIBRE)
6370 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6371 else
6372 advertising = ETHTOOL_ALL_COPPER_SPEED;
6374 advertising |= ADVERTISED_Autoneg;
6376 else {
6377 if (cmd->port == PORT_FIBRE) {
6378 if ((cmd->speed != SPEED_1000 &&
6379 cmd->speed != SPEED_2500) ||
6380 (cmd->duplex != DUPLEX_FULL))
6381 goto err_out_unlock;
6383 if (cmd->speed == SPEED_2500 &&
6384 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6385 goto err_out_unlock;
6387 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6388 goto err_out_unlock;
6390 autoneg &= ~AUTONEG_SPEED;
6391 req_line_speed = cmd->speed;
6392 req_duplex = cmd->duplex;
6393 advertising = 0;
6396 bp->autoneg = autoneg;
6397 bp->advertising = advertising;
6398 bp->req_line_speed = req_line_speed;
6399 bp->req_duplex = req_duplex;
6401 err = 0;
6402 /* If device is down, the new settings will be picked up when it is
6403 * brought up.
6405 if (netif_running(dev))
6406 err = bnx2_setup_phy(bp, cmd->port);
6408 err_out_unlock:
6409 spin_unlock_bh(&bp->phy_lock);
6411 return err;
6414 static void
6415 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6417 struct bnx2 *bp = netdev_priv(dev);
6419 strcpy(info->driver, DRV_MODULE_NAME);
6420 strcpy(info->version, DRV_MODULE_VERSION);
6421 strcpy(info->bus_info, pci_name(bp->pdev));
6422 strcpy(info->fw_version, bp->fw_version);
6425 #define BNX2_REGDUMP_LEN (32 * 1024)
6427 static int
6428 bnx2_get_regs_len(struct net_device *dev)
6430 return BNX2_REGDUMP_LEN;
6433 static void
6434 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6436 u32 *p = _p, i, offset;
6437 u8 *orig_p = _p;
6438 struct bnx2 *bp = netdev_priv(dev);
6439 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6440 0x0800, 0x0880, 0x0c00, 0x0c10,
6441 0x0c30, 0x0d08, 0x1000, 0x101c,
6442 0x1040, 0x1048, 0x1080, 0x10a4,
6443 0x1400, 0x1490, 0x1498, 0x14f0,
6444 0x1500, 0x155c, 0x1580, 0x15dc,
6445 0x1600, 0x1658, 0x1680, 0x16d8,
6446 0x1800, 0x1820, 0x1840, 0x1854,
6447 0x1880, 0x1894, 0x1900, 0x1984,
6448 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6449 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6450 0x2000, 0x2030, 0x23c0, 0x2400,
6451 0x2800, 0x2820, 0x2830, 0x2850,
6452 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6453 0x3c00, 0x3c94, 0x4000, 0x4010,
6454 0x4080, 0x4090, 0x43c0, 0x4458,
6455 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6456 0x4fc0, 0x5010, 0x53c0, 0x5444,
6457 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6458 0x5fc0, 0x6000, 0x6400, 0x6428,
6459 0x6800, 0x6848, 0x684c, 0x6860,
6460 0x6888, 0x6910, 0x8000 };
6462 regs->version = 0;
6464 memset(p, 0, BNX2_REGDUMP_LEN);
6466 if (!netif_running(bp->dev))
6467 return;
6469 i = 0;
6470 offset = reg_boundaries[0];
6471 p += offset;
6472 while (offset < BNX2_REGDUMP_LEN) {
6473 *p++ = REG_RD(bp, offset);
6474 offset += 4;
6475 if (offset == reg_boundaries[i + 1]) {
6476 offset = reg_boundaries[i + 2];
6477 p = (u32 *) (orig_p + offset);
6478 i += 2;
6483 static void
6484 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6486 struct bnx2 *bp = netdev_priv(dev);
6488 if (bp->flags & BNX2_FLAG_NO_WOL) {
6489 wol->supported = 0;
6490 wol->wolopts = 0;
6492 else {
6493 wol->supported = WAKE_MAGIC;
6494 if (bp->wol)
6495 wol->wolopts = WAKE_MAGIC;
6496 else
6497 wol->wolopts = 0;
6499 memset(&wol->sopass, 0, sizeof(wol->sopass));
6502 static int
6503 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6505 struct bnx2 *bp = netdev_priv(dev);
6507 if (wol->wolopts & ~WAKE_MAGIC)
6508 return -EINVAL;
6510 if (wol->wolopts & WAKE_MAGIC) {
6511 if (bp->flags & BNX2_FLAG_NO_WOL)
6512 return -EINVAL;
6514 bp->wol = 1;
6516 else {
6517 bp->wol = 0;
6519 return 0;
6522 static int
6523 bnx2_nway_reset(struct net_device *dev)
6525 struct bnx2 *bp = netdev_priv(dev);
6526 u32 bmcr;
6528 if (!netif_running(dev))
6529 return -EAGAIN;
6531 if (!(bp->autoneg & AUTONEG_SPEED)) {
6532 return -EINVAL;
6535 spin_lock_bh(&bp->phy_lock);
6537 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6538 int rc;
6540 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6541 spin_unlock_bh(&bp->phy_lock);
6542 return rc;
6545 /* Force a link down visible on the other side */
6546 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6547 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6548 spin_unlock_bh(&bp->phy_lock);
6550 msleep(20);
6552 spin_lock_bh(&bp->phy_lock);
6554 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6555 bp->serdes_an_pending = 1;
6556 mod_timer(&bp->timer, jiffies + bp->current_interval);
6559 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6560 bmcr &= ~BMCR_LOOPBACK;
6561 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6563 spin_unlock_bh(&bp->phy_lock);
6565 return 0;
6568 static int
6569 bnx2_get_eeprom_len(struct net_device *dev)
6571 struct bnx2 *bp = netdev_priv(dev);
6573 if (bp->flash_info == NULL)
6574 return 0;
6576 return (int) bp->flash_size;
6579 static int
6580 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6581 u8 *eebuf)
6583 struct bnx2 *bp = netdev_priv(dev);
6584 int rc;
6586 if (!netif_running(dev))
6587 return -EAGAIN;
6589 /* parameters already validated in ethtool_get_eeprom */
6591 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6593 return rc;
6596 static int
6597 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6598 u8 *eebuf)
6600 struct bnx2 *bp = netdev_priv(dev);
6601 int rc;
6603 if (!netif_running(dev))
6604 return -EAGAIN;
6606 /* parameters already validated in ethtool_set_eeprom */
6608 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6610 return rc;
6613 static int
6614 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6616 struct bnx2 *bp = netdev_priv(dev);
6618 memset(coal, 0, sizeof(struct ethtool_coalesce));
6620 coal->rx_coalesce_usecs = bp->rx_ticks;
6621 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6622 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6623 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6625 coal->tx_coalesce_usecs = bp->tx_ticks;
6626 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6627 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6628 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6630 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6632 return 0;
6635 static int
6636 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6638 struct bnx2 *bp = netdev_priv(dev);
6640 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6641 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6643 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6644 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6646 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6647 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6649 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6650 if (bp->rx_quick_cons_trip_int > 0xff)
6651 bp->rx_quick_cons_trip_int = 0xff;
6653 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6654 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6656 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6657 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6659 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6660 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6662 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6663 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6664 0xff;
6666 bp->stats_ticks = coal->stats_block_coalesce_usecs;
6667 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6668 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6669 bp->stats_ticks = USEC_PER_SEC;
6671 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6672 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6673 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6675 if (netif_running(bp->dev)) {
6676 bnx2_netif_stop(bp);
6677 bnx2_init_nic(bp, 0);
6678 bnx2_netif_start(bp);
6681 return 0;
6684 static void
6685 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6687 struct bnx2 *bp = netdev_priv(dev);
6689 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6690 ering->rx_mini_max_pending = 0;
6691 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6693 ering->rx_pending = bp->rx_ring_size;
6694 ering->rx_mini_pending = 0;
6695 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6697 ering->tx_max_pending = MAX_TX_DESC_CNT;
6698 ering->tx_pending = bp->tx_ring_size;
6701 static int
6702 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6704 if (netif_running(bp->dev)) {
6705 bnx2_netif_stop(bp);
6706 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6707 bnx2_free_skbs(bp);
6708 bnx2_free_mem(bp);
6711 bnx2_set_rx_ring_size(bp, rx);
6712 bp->tx_ring_size = tx;
6714 if (netif_running(bp->dev)) {
6715 int rc;
6717 rc = bnx2_alloc_mem(bp);
6718 if (rc)
6719 return rc;
6720 bnx2_init_nic(bp, 0);
6721 bnx2_netif_start(bp);
6723 return 0;
6726 static int
6727 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6729 struct bnx2 *bp = netdev_priv(dev);
6730 int rc;
6732 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6733 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6734 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6736 return -EINVAL;
6738 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6739 return rc;
6742 static void
6743 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6745 struct bnx2 *bp = netdev_priv(dev);
6747 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6748 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6749 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6752 static int
6753 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6755 struct bnx2 *bp = netdev_priv(dev);
6757 bp->req_flow_ctrl = 0;
6758 if (epause->rx_pause)
6759 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6760 if (epause->tx_pause)
6761 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6763 if (epause->autoneg) {
6764 bp->autoneg |= AUTONEG_FLOW_CTRL;
6766 else {
6767 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6770 if (netif_running(dev)) {
6771 spin_lock_bh(&bp->phy_lock);
6772 bnx2_setup_phy(bp, bp->phy_port);
6773 spin_unlock_bh(&bp->phy_lock);
6776 return 0;
6779 static u32
6780 bnx2_get_rx_csum(struct net_device *dev)
6782 struct bnx2 *bp = netdev_priv(dev);
6784 return bp->rx_csum;
6787 static int
6788 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6790 struct bnx2 *bp = netdev_priv(dev);
6792 bp->rx_csum = data;
6793 return 0;
6796 static int
6797 bnx2_set_tso(struct net_device *dev, u32 data)
6799 struct bnx2 *bp = netdev_priv(dev);
6801 if (data) {
6802 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6803 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6804 dev->features |= NETIF_F_TSO6;
6805 } else
6806 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6807 NETIF_F_TSO_ECN);
6808 return 0;
6811 #define BNX2_NUM_STATS 46
6813 static struct {
6814 char string[ETH_GSTRING_LEN];
6815 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6816 { "rx_bytes" },
6817 { "rx_error_bytes" },
6818 { "tx_bytes" },
6819 { "tx_error_bytes" },
6820 { "rx_ucast_packets" },
6821 { "rx_mcast_packets" },
6822 { "rx_bcast_packets" },
6823 { "tx_ucast_packets" },
6824 { "tx_mcast_packets" },
6825 { "tx_bcast_packets" },
6826 { "tx_mac_errors" },
6827 { "tx_carrier_errors" },
6828 { "rx_crc_errors" },
6829 { "rx_align_errors" },
6830 { "tx_single_collisions" },
6831 { "tx_multi_collisions" },
6832 { "tx_deferred" },
6833 { "tx_excess_collisions" },
6834 { "tx_late_collisions" },
6835 { "tx_total_collisions" },
6836 { "rx_fragments" },
6837 { "rx_jabbers" },
6838 { "rx_undersize_packets" },
6839 { "rx_oversize_packets" },
6840 { "rx_64_byte_packets" },
6841 { "rx_65_to_127_byte_packets" },
6842 { "rx_128_to_255_byte_packets" },
6843 { "rx_256_to_511_byte_packets" },
6844 { "rx_512_to_1023_byte_packets" },
6845 { "rx_1024_to_1522_byte_packets" },
6846 { "rx_1523_to_9022_byte_packets" },
6847 { "tx_64_byte_packets" },
6848 { "tx_65_to_127_byte_packets" },
6849 { "tx_128_to_255_byte_packets" },
6850 { "tx_256_to_511_byte_packets" },
6851 { "tx_512_to_1023_byte_packets" },
6852 { "tx_1024_to_1522_byte_packets" },
6853 { "tx_1523_to_9022_byte_packets" },
6854 { "rx_xon_frames" },
6855 { "rx_xoff_frames" },
6856 { "tx_xon_frames" },
6857 { "tx_xoff_frames" },
6858 { "rx_mac_ctrl_frames" },
6859 { "rx_filtered_packets" },
6860 { "rx_discards" },
6861 { "rx_fw_discards" },
6864 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6866 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6867 STATS_OFFSET32(stat_IfHCInOctets_hi),
6868 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6869 STATS_OFFSET32(stat_IfHCOutOctets_hi),
6870 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6871 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6872 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6873 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6874 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6875 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6876 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6877 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6878 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6879 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6880 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6881 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6882 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6883 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6884 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6885 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6886 STATS_OFFSET32(stat_EtherStatsCollisions),
6887 STATS_OFFSET32(stat_EtherStatsFragments),
6888 STATS_OFFSET32(stat_EtherStatsJabbers),
6889 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6890 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6891 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6892 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6893 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6894 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6895 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6896 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6897 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6898 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6899 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6900 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6901 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6902 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6903 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6904 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6905 STATS_OFFSET32(stat_XonPauseFramesReceived),
6906 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6907 STATS_OFFSET32(stat_OutXonSent),
6908 STATS_OFFSET32(stat_OutXoffSent),
6909 STATS_OFFSET32(stat_MacControlFramesReceived),
6910 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6911 STATS_OFFSET32(stat_IfInMBUFDiscards),
6912 STATS_OFFSET32(stat_FwRxDrop),
6915 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6916 * skipped because of errata.
6918 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6919 8,0,8,8,8,8,8,8,8,8,
6920 4,0,4,4,4,4,4,4,4,4,
6921 4,4,4,4,4,4,4,4,4,4,
6922 4,4,4,4,4,4,4,4,4,4,
6923 4,4,4,4,4,4,
6926 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6927 8,0,8,8,8,8,8,8,8,8,
6928 4,4,4,4,4,4,4,4,4,4,
6929 4,4,4,4,4,4,4,4,4,4,
6930 4,4,4,4,4,4,4,4,4,4,
6931 4,4,4,4,4,4,
6934 #define BNX2_NUM_TESTS 6
6936 static struct {
6937 char string[ETH_GSTRING_LEN];
6938 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6939 { "register_test (offline)" },
6940 { "memory_test (offline)" },
6941 { "loopback_test (offline)" },
6942 { "nvram_test (online)" },
6943 { "interrupt_test (online)" },
6944 { "link_test (online)" },
6947 static int
6948 bnx2_get_sset_count(struct net_device *dev, int sset)
6950 switch (sset) {
6951 case ETH_SS_TEST:
6952 return BNX2_NUM_TESTS;
6953 case ETH_SS_STATS:
6954 return BNX2_NUM_STATS;
6955 default:
6956 return -EOPNOTSUPP;
6960 static void
6961 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6963 struct bnx2 *bp = netdev_priv(dev);
6965 bnx2_set_power_state(bp, PCI_D0);
6967 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6968 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6969 int i;
6971 bnx2_netif_stop(bp);
6972 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6973 bnx2_free_skbs(bp);
6975 if (bnx2_test_registers(bp) != 0) {
6976 buf[0] = 1;
6977 etest->flags |= ETH_TEST_FL_FAILED;
6979 if (bnx2_test_memory(bp) != 0) {
6980 buf[1] = 1;
6981 etest->flags |= ETH_TEST_FL_FAILED;
6983 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6984 etest->flags |= ETH_TEST_FL_FAILED;
6986 if (!netif_running(bp->dev))
6987 bnx2_shutdown_chip(bp);
6988 else {
6989 bnx2_init_nic(bp, 1);
6990 bnx2_netif_start(bp);
6993 /* wait for link up */
6994 for (i = 0; i < 7; i++) {
6995 if (bp->link_up)
6996 break;
6997 msleep_interruptible(1000);
7001 if (bnx2_test_nvram(bp) != 0) {
7002 buf[3] = 1;
7003 etest->flags |= ETH_TEST_FL_FAILED;
7005 if (bnx2_test_intr(bp) != 0) {
7006 buf[4] = 1;
7007 etest->flags |= ETH_TEST_FL_FAILED;
7010 if (bnx2_test_link(bp) != 0) {
7011 buf[5] = 1;
7012 etest->flags |= ETH_TEST_FL_FAILED;
7015 if (!netif_running(bp->dev))
7016 bnx2_set_power_state(bp, PCI_D3hot);
7019 static void
7020 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7022 switch (stringset) {
7023 case ETH_SS_STATS:
7024 memcpy(buf, bnx2_stats_str_arr,
7025 sizeof(bnx2_stats_str_arr));
7026 break;
7027 case ETH_SS_TEST:
7028 memcpy(buf, bnx2_tests_str_arr,
7029 sizeof(bnx2_tests_str_arr));
7030 break;
7034 static void
7035 bnx2_get_ethtool_stats(struct net_device *dev,
7036 struct ethtool_stats *stats, u64 *buf)
7038 struct bnx2 *bp = netdev_priv(dev);
7039 int i;
7040 u32 *hw_stats = (u32 *) bp->stats_blk;
7041 u8 *stats_len_arr = NULL;
7043 if (hw_stats == NULL) {
7044 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7045 return;
7048 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7049 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7050 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7051 (CHIP_ID(bp) == CHIP_ID_5708_A0))
7052 stats_len_arr = bnx2_5706_stats_len_arr;
7053 else
7054 stats_len_arr = bnx2_5708_stats_len_arr;
7056 for (i = 0; i < BNX2_NUM_STATS; i++) {
7057 if (stats_len_arr[i] == 0) {
7058 /* skip this counter */
7059 buf[i] = 0;
7060 continue;
7062 if (stats_len_arr[i] == 4) {
7063 /* 4-byte counter */
7064 buf[i] = (u64)
7065 *(hw_stats + bnx2_stats_offset_arr[i]);
7066 continue;
7068 /* 8-byte counter */
7069 buf[i] = (((u64) *(hw_stats +
7070 bnx2_stats_offset_arr[i])) << 32) +
7071 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
7075 static int
7076 bnx2_phys_id(struct net_device *dev, u32 data)
7078 struct bnx2 *bp = netdev_priv(dev);
7079 int i;
7080 u32 save;
7082 bnx2_set_power_state(bp, PCI_D0);
7084 if (data == 0)
7085 data = 2;
7087 save = REG_RD(bp, BNX2_MISC_CFG);
7088 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7090 for (i = 0; i < (data * 2); i++) {
7091 if ((i % 2) == 0) {
7092 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7094 else {
7095 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7096 BNX2_EMAC_LED_1000MB_OVERRIDE |
7097 BNX2_EMAC_LED_100MB_OVERRIDE |
7098 BNX2_EMAC_LED_10MB_OVERRIDE |
7099 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7100 BNX2_EMAC_LED_TRAFFIC);
7102 msleep_interruptible(500);
7103 if (signal_pending(current))
7104 break;
7106 REG_WR(bp, BNX2_EMAC_LED, 0);
7107 REG_WR(bp, BNX2_MISC_CFG, save);
7109 if (!netif_running(dev))
7110 bnx2_set_power_state(bp, PCI_D3hot);
7112 return 0;
7115 static int
7116 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7118 struct bnx2 *bp = netdev_priv(dev);
7120 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7121 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7122 else
7123 return (ethtool_op_set_tx_csum(dev, data));
7126 static const struct ethtool_ops bnx2_ethtool_ops = {
7127 .get_settings = bnx2_get_settings,
7128 .set_settings = bnx2_set_settings,
7129 .get_drvinfo = bnx2_get_drvinfo,
7130 .get_regs_len = bnx2_get_regs_len,
7131 .get_regs = bnx2_get_regs,
7132 .get_wol = bnx2_get_wol,
7133 .set_wol = bnx2_set_wol,
7134 .nway_reset = bnx2_nway_reset,
7135 .get_link = ethtool_op_get_link,
7136 .get_eeprom_len = bnx2_get_eeprom_len,
7137 .get_eeprom = bnx2_get_eeprom,
7138 .set_eeprom = bnx2_set_eeprom,
7139 .get_coalesce = bnx2_get_coalesce,
7140 .set_coalesce = bnx2_set_coalesce,
7141 .get_ringparam = bnx2_get_ringparam,
7142 .set_ringparam = bnx2_set_ringparam,
7143 .get_pauseparam = bnx2_get_pauseparam,
7144 .set_pauseparam = bnx2_set_pauseparam,
7145 .get_rx_csum = bnx2_get_rx_csum,
7146 .set_rx_csum = bnx2_set_rx_csum,
7147 .set_tx_csum = bnx2_set_tx_csum,
7148 .set_sg = ethtool_op_set_sg,
7149 .set_tso = bnx2_set_tso,
7150 .self_test = bnx2_self_test,
7151 .get_strings = bnx2_get_strings,
7152 .phys_id = bnx2_phys_id,
7153 .get_ethtool_stats = bnx2_get_ethtool_stats,
7154 .get_sset_count = bnx2_get_sset_count,
7157 /* Called with rtnl_lock */
7158 static int
7159 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7161 struct mii_ioctl_data *data = if_mii(ifr);
7162 struct bnx2 *bp = netdev_priv(dev);
7163 int err;
7165 switch(cmd) {
7166 case SIOCGMIIPHY:
7167 data->phy_id = bp->phy_addr;
7169 /* fallthru */
7170 case SIOCGMIIREG: {
7171 u32 mii_regval;
7173 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7174 return -EOPNOTSUPP;
7176 if (!netif_running(dev))
7177 return -EAGAIN;
7179 spin_lock_bh(&bp->phy_lock);
7180 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7181 spin_unlock_bh(&bp->phy_lock);
7183 data->val_out = mii_regval;
7185 return err;
7188 case SIOCSMIIREG:
7189 if (!capable(CAP_NET_ADMIN))
7190 return -EPERM;
7192 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7193 return -EOPNOTSUPP;
7195 if (!netif_running(dev))
7196 return -EAGAIN;
7198 spin_lock_bh(&bp->phy_lock);
7199 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7200 spin_unlock_bh(&bp->phy_lock);
7202 return err;
7204 default:
7205 /* do nothing */
7206 break;
7208 return -EOPNOTSUPP;
7211 /* Called with rtnl_lock */
7212 static int
7213 bnx2_change_mac_addr(struct net_device *dev, void *p)
7215 struct sockaddr *addr = p;
7216 struct bnx2 *bp = netdev_priv(dev);
7218 if (!is_valid_ether_addr(addr->sa_data))
7219 return -EINVAL;
7221 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7222 if (netif_running(dev))
7223 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7225 return 0;
7228 /* Called with rtnl_lock */
7229 static int
7230 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7232 struct bnx2 *bp = netdev_priv(dev);
7234 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7235 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7236 return -EINVAL;
7238 dev->mtu = new_mtu;
7239 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7242 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7243 static void
7244 poll_bnx2(struct net_device *dev)
7246 struct bnx2 *bp = netdev_priv(dev);
7247 int i;
7249 for (i = 0; i < bp->irq_nvecs; i++) {
7250 disable_irq(bp->irq_tbl[i].vector);
7251 bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
7252 enable_irq(bp->irq_tbl[i].vector);
7255 #endif
7257 static void __devinit
7258 bnx2_get_5709_media(struct bnx2 *bp)
7260 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7261 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7262 u32 strap;
7264 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7265 return;
7266 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7267 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7268 return;
7271 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7272 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7273 else
7274 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7276 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7277 switch (strap) {
7278 case 0x4:
7279 case 0x5:
7280 case 0x6:
7281 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7282 return;
7284 } else {
7285 switch (strap) {
7286 case 0x1:
7287 case 0x2:
7288 case 0x4:
7289 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7290 return;
7295 static void __devinit
7296 bnx2_get_pci_speed(struct bnx2 *bp)
7298 u32 reg;
7300 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7301 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7302 u32 clkreg;
7304 bp->flags |= BNX2_FLAG_PCIX;
7306 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7308 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7309 switch (clkreg) {
7310 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7311 bp->bus_speed_mhz = 133;
7312 break;
7314 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7315 bp->bus_speed_mhz = 100;
7316 break;
7318 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7319 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7320 bp->bus_speed_mhz = 66;
7321 break;
7323 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7324 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7325 bp->bus_speed_mhz = 50;
7326 break;
7328 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7329 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7330 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7331 bp->bus_speed_mhz = 33;
7332 break;
7335 else {
7336 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7337 bp->bus_speed_mhz = 66;
7338 else
7339 bp->bus_speed_mhz = 33;
7342 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7343 bp->flags |= BNX2_FLAG_PCI_32BIT;
7347 static int __devinit
7348 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7350 struct bnx2 *bp;
7351 unsigned long mem_len;
7352 int rc, i, j;
7353 u32 reg;
7354 u64 dma_mask, persist_dma_mask;
7356 SET_NETDEV_DEV(dev, &pdev->dev);
7357 bp = netdev_priv(dev);
7359 bp->flags = 0;
7360 bp->phy_flags = 0;
7362 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7363 rc = pci_enable_device(pdev);
7364 if (rc) {
7365 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7366 goto err_out;
7369 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7370 dev_err(&pdev->dev,
7371 "Cannot find PCI device base address, aborting.\n");
7372 rc = -ENODEV;
7373 goto err_out_disable;
7376 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7377 if (rc) {
7378 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7379 goto err_out_disable;
7382 pci_set_master(pdev);
7383 pci_save_state(pdev);
7385 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7386 if (bp->pm_cap == 0) {
7387 dev_err(&pdev->dev,
7388 "Cannot find power management capability, aborting.\n");
7389 rc = -EIO;
7390 goto err_out_release;
7393 bp->dev = dev;
7394 bp->pdev = pdev;
7396 spin_lock_init(&bp->phy_lock);
7397 spin_lock_init(&bp->indirect_lock);
7398 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7400 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7401 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS);
7402 dev->mem_end = dev->mem_start + mem_len;
7403 dev->irq = pdev->irq;
7405 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7407 if (!bp->regview) {
7408 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7409 rc = -ENOMEM;
7410 goto err_out_release;
7413 /* Configure byte swap and enable write to the reg_window registers.
7414 * Rely on CPU to do target byte swapping on big endian systems
7415 * The chip's target access swapping will not swap all accesses
7417 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7418 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7419 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7421 bnx2_set_power_state(bp, PCI_D0);
7423 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7425 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7426 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7427 dev_err(&pdev->dev,
7428 "Cannot find PCIE capability, aborting.\n");
7429 rc = -EIO;
7430 goto err_out_unmap;
7432 bp->flags |= BNX2_FLAG_PCIE;
7433 if (CHIP_REV(bp) == CHIP_REV_Ax)
7434 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7435 } else {
7436 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7437 if (bp->pcix_cap == 0) {
7438 dev_err(&pdev->dev,
7439 "Cannot find PCIX capability, aborting.\n");
7440 rc = -EIO;
7441 goto err_out_unmap;
7445 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7446 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7447 bp->flags |= BNX2_FLAG_MSIX_CAP;
7450 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7451 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7452 bp->flags |= BNX2_FLAG_MSI_CAP;
7455 /* 5708 cannot support DMA addresses > 40-bit. */
7456 if (CHIP_NUM(bp) == CHIP_NUM_5708)
7457 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7458 else
7459 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7461 /* Configure DMA attributes. */
7462 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7463 dev->features |= NETIF_F_HIGHDMA;
7464 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7465 if (rc) {
7466 dev_err(&pdev->dev,
7467 "pci_set_consistent_dma_mask failed, aborting.\n");
7468 goto err_out_unmap;
7470 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7471 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7472 goto err_out_unmap;
7475 if (!(bp->flags & BNX2_FLAG_PCIE))
7476 bnx2_get_pci_speed(bp);
7478 /* 5706A0 may falsely detect SERR and PERR. */
7479 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7480 reg = REG_RD(bp, PCI_COMMAND);
7481 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7482 REG_WR(bp, PCI_COMMAND, reg);
7484 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7485 !(bp->flags & BNX2_FLAG_PCIX)) {
7487 dev_err(&pdev->dev,
7488 "5706 A1 can only be used in a PCIX bus, aborting.\n");
7489 goto err_out_unmap;
7492 bnx2_init_nvram(bp);
7494 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7496 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7497 BNX2_SHM_HDR_SIGNATURE_SIG) {
7498 u32 off = PCI_FUNC(pdev->devfn) << 2;
7500 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7501 } else
7502 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7504 /* Get the permanent MAC address. First we need to make sure the
7505 * firmware is actually running.
7507 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7509 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7510 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7511 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7512 rc = -ENODEV;
7513 goto err_out_unmap;
7516 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7517 for (i = 0, j = 0; i < 3; i++) {
7518 u8 num, k, skip0;
7520 num = (u8) (reg >> (24 - (i * 8)));
7521 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7522 if (num >= k || !skip0 || k == 1) {
7523 bp->fw_version[j++] = (num / k) + '0';
7524 skip0 = 0;
7527 if (i != 2)
7528 bp->fw_version[j++] = '.';
7530 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7531 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7532 bp->wol = 1;
7534 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7535 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7537 for (i = 0; i < 30; i++) {
7538 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7539 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7540 break;
7541 msleep(10);
7544 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7545 reg &= BNX2_CONDITION_MFW_RUN_MASK;
7546 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7547 reg != BNX2_CONDITION_MFW_RUN_NONE) {
7548 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7550 bp->fw_version[j++] = ' ';
7551 for (i = 0; i < 3; i++) {
7552 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7553 reg = swab32(reg);
7554 memcpy(&bp->fw_version[j], &reg, 4);
7555 j += 4;
7559 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7560 bp->mac_addr[0] = (u8) (reg >> 8);
7561 bp->mac_addr[1] = (u8) reg;
7563 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7564 bp->mac_addr[2] = (u8) (reg >> 24);
7565 bp->mac_addr[3] = (u8) (reg >> 16);
7566 bp->mac_addr[4] = (u8) (reg >> 8);
7567 bp->mac_addr[5] = (u8) reg;
7569 bp->tx_ring_size = MAX_TX_DESC_CNT;
7570 bnx2_set_rx_ring_size(bp, 255);
7572 bp->rx_csum = 1;
7574 bp->tx_quick_cons_trip_int = 20;
7575 bp->tx_quick_cons_trip = 20;
7576 bp->tx_ticks_int = 80;
7577 bp->tx_ticks = 80;
7579 bp->rx_quick_cons_trip_int = 6;
7580 bp->rx_quick_cons_trip = 6;
7581 bp->rx_ticks_int = 18;
7582 bp->rx_ticks = 18;
7584 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7586 bp->current_interval = BNX2_TIMER_INTERVAL;
7588 bp->phy_addr = 1;
7590 /* Disable WOL support if we are running on a SERDES chip. */
7591 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7592 bnx2_get_5709_media(bp);
7593 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7594 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7596 bp->phy_port = PORT_TP;
7597 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7598 bp->phy_port = PORT_FIBRE;
7599 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7600 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7601 bp->flags |= BNX2_FLAG_NO_WOL;
7602 bp->wol = 0;
7604 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7605 /* Don't do parallel detect on this board because of
7606 * some board problems. The link will not go down
7607 * if we do parallel detect.
7609 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7610 pdev->subsystem_device == 0x310c)
7611 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7612 } else {
7613 bp->phy_addr = 2;
7614 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7615 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7617 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7618 CHIP_NUM(bp) == CHIP_NUM_5708)
7619 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7620 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7621 (CHIP_REV(bp) == CHIP_REV_Ax ||
7622 CHIP_REV(bp) == CHIP_REV_Bx))
7623 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7625 bnx2_init_fw_cap(bp);
7627 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7628 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7629 (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
7630 !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
7631 bp->flags |= BNX2_FLAG_NO_WOL;
7632 bp->wol = 0;
7635 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7636 bp->tx_quick_cons_trip_int =
7637 bp->tx_quick_cons_trip;
7638 bp->tx_ticks_int = bp->tx_ticks;
7639 bp->rx_quick_cons_trip_int =
7640 bp->rx_quick_cons_trip;
7641 bp->rx_ticks_int = bp->rx_ticks;
7642 bp->comp_prod_trip_int = bp->comp_prod_trip;
7643 bp->com_ticks_int = bp->com_ticks;
7644 bp->cmd_ticks_int = bp->cmd_ticks;
7647 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7649 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7650 * with byte enables disabled on the unused 32-bit word. This is legal
7651 * but causes problems on the AMD 8132 which will eventually stop
7652 * responding after a while.
7654 * AMD believes this incompatibility is unique to the 5706, and
7655 * prefers to locally disable MSI rather than globally disabling it.
7657 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7658 struct pci_dev *amd_8132 = NULL;
7660 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7661 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7662 amd_8132))) {
7664 if (amd_8132->revision >= 0x10 &&
7665 amd_8132->revision <= 0x13) {
7666 disable_msi = 1;
7667 pci_dev_put(amd_8132);
7668 break;
7673 bnx2_set_default_link(bp);
7674 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7676 init_timer(&bp->timer);
7677 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
7678 bp->timer.data = (unsigned long) bp;
7679 bp->timer.function = bnx2_timer;
7681 return 0;
7683 err_out_unmap:
7684 if (bp->regview) {
7685 iounmap(bp->regview);
7686 bp->regview = NULL;
7689 err_out_release:
7690 pci_release_regions(pdev);
7692 err_out_disable:
7693 pci_disable_device(pdev);
7694 pci_set_drvdata(pdev, NULL);
7696 err_out:
7697 return rc;
7700 static char * __devinit
7701 bnx2_bus_string(struct bnx2 *bp, char *str)
7703 char *s = str;
7705 if (bp->flags & BNX2_FLAG_PCIE) {
7706 s += sprintf(s, "PCI Express");
7707 } else {
7708 s += sprintf(s, "PCI");
7709 if (bp->flags & BNX2_FLAG_PCIX)
7710 s += sprintf(s, "-X");
7711 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7712 s += sprintf(s, " 32-bit");
7713 else
7714 s += sprintf(s, " 64-bit");
7715 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7717 return str;
7720 static void __devinit
7721 bnx2_init_napi(struct bnx2 *bp)
7723 int i;
7725 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7726 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
7727 int (*poll)(struct napi_struct *, int);
7729 if (i == 0)
7730 poll = bnx2_poll;
7731 else
7732 poll = bnx2_poll_msix;
7734 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
7735 bnapi->bp = bp;
7739 static const struct net_device_ops bnx2_netdev_ops = {
7740 .ndo_open = bnx2_open,
7741 .ndo_start_xmit = bnx2_start_xmit,
7742 .ndo_stop = bnx2_close,
7743 .ndo_get_stats = bnx2_get_stats,
7744 .ndo_set_rx_mode = bnx2_set_rx_mode,
7745 .ndo_do_ioctl = bnx2_ioctl,
7746 .ndo_validate_addr = eth_validate_addr,
7747 .ndo_set_mac_address = bnx2_change_mac_addr,
7748 .ndo_change_mtu = bnx2_change_mtu,
7749 .ndo_tx_timeout = bnx2_tx_timeout,
7750 #ifdef BCM_VLAN
7751 .ndo_vlan_rx_register = bnx2_vlan_rx_register,
7752 #endif
7753 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7754 .ndo_poll_controller = poll_bnx2,
7755 #endif
7758 static int __devinit
7759 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7761 static int version_printed = 0;
7762 struct net_device *dev = NULL;
7763 struct bnx2 *bp;
7764 int rc;
7765 char str[40];
7767 if (version_printed++ == 0)
7768 printk(KERN_INFO "%s", version);
7770 /* dev zeroed in init_etherdev */
7771 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
7773 if (!dev)
7774 return -ENOMEM;
7776 rc = bnx2_init_board(pdev, dev);
7777 if (rc < 0) {
7778 free_netdev(dev);
7779 return rc;
7782 dev->netdev_ops = &bnx2_netdev_ops;
7783 dev->watchdog_timeo = TX_TIMEOUT;
7784 dev->ethtool_ops = &bnx2_ethtool_ops;
7786 bp = netdev_priv(dev);
7787 bnx2_init_napi(bp);
7789 pci_set_drvdata(pdev, dev);
7791 memcpy(dev->dev_addr, bp->mac_addr, 6);
7792 memcpy(dev->perm_addr, bp->mac_addr, 6);
7794 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7795 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7796 dev->features |= NETIF_F_IPV6_CSUM;
7798 #ifdef BCM_VLAN
7799 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7800 #endif
7801 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7802 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7803 dev->features |= NETIF_F_TSO6;
7805 if ((rc = register_netdev(dev))) {
7806 dev_err(&pdev->dev, "Cannot register net device\n");
7807 if (bp->regview)
7808 iounmap(bp->regview);
7809 pci_release_regions(pdev);
7810 pci_disable_device(pdev);
7811 pci_set_drvdata(pdev, NULL);
7812 free_netdev(dev);
7813 return rc;
7816 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7817 "IRQ %d, node addr %pM\n",
7818 dev->name,
7819 board_info[ent->driver_data].name,
7820 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7821 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7822 bnx2_bus_string(bp, str),
7823 dev->base_addr,
7824 bp->pdev->irq, dev->dev_addr);
7826 return 0;
7829 static void __devexit
7830 bnx2_remove_one(struct pci_dev *pdev)
7832 struct net_device *dev = pci_get_drvdata(pdev);
7833 struct bnx2 *bp = netdev_priv(dev);
7835 flush_scheduled_work();
7837 unregister_netdev(dev);
7839 if (bp->regview)
7840 iounmap(bp->regview);
7842 free_netdev(dev);
7843 pci_release_regions(pdev);
7844 pci_disable_device(pdev);
7845 pci_set_drvdata(pdev, NULL);
7848 static int
7849 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7851 struct net_device *dev = pci_get_drvdata(pdev);
7852 struct bnx2 *bp = netdev_priv(dev);
7854 /* PCI register 4 needs to be saved whether netif_running() or not.
7855 * MSI address and data need to be saved if using MSI and
7856 * netif_running().
7858 pci_save_state(pdev);
7859 if (!netif_running(dev))
7860 return 0;
7862 flush_scheduled_work();
7863 bnx2_netif_stop(bp);
7864 netif_device_detach(dev);
7865 del_timer_sync(&bp->timer);
7866 bnx2_shutdown_chip(bp);
7867 bnx2_free_skbs(bp);
7868 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7869 return 0;
7872 static int
7873 bnx2_resume(struct pci_dev *pdev)
7875 struct net_device *dev = pci_get_drvdata(pdev);
7876 struct bnx2 *bp = netdev_priv(dev);
7878 pci_restore_state(pdev);
7879 if (!netif_running(dev))
7880 return 0;
7882 bnx2_set_power_state(bp, PCI_D0);
7883 netif_device_attach(dev);
7884 bnx2_init_nic(bp, 1);
7885 bnx2_netif_start(bp);
7886 return 0;
7890 * bnx2_io_error_detected - called when PCI error is detected
7891 * @pdev: Pointer to PCI device
7892 * @state: The current pci connection state
7894 * This function is called after a PCI bus error affecting
7895 * this device has been detected.
7897 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
7898 pci_channel_state_t state)
7900 struct net_device *dev = pci_get_drvdata(pdev);
7901 struct bnx2 *bp = netdev_priv(dev);
7903 rtnl_lock();
7904 netif_device_detach(dev);
7906 if (netif_running(dev)) {
7907 bnx2_netif_stop(bp);
7908 del_timer_sync(&bp->timer);
7909 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
7912 pci_disable_device(pdev);
7913 rtnl_unlock();
7915 /* Request a slot slot reset. */
7916 return PCI_ERS_RESULT_NEED_RESET;
7920 * bnx2_io_slot_reset - called after the pci bus has been reset.
7921 * @pdev: Pointer to PCI device
7923 * Restart the card from scratch, as if from a cold-boot.
7925 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
7927 struct net_device *dev = pci_get_drvdata(pdev);
7928 struct bnx2 *bp = netdev_priv(dev);
7930 rtnl_lock();
7931 if (pci_enable_device(pdev)) {
7932 dev_err(&pdev->dev,
7933 "Cannot re-enable PCI device after reset.\n");
7934 rtnl_unlock();
7935 return PCI_ERS_RESULT_DISCONNECT;
7937 pci_set_master(pdev);
7938 pci_restore_state(pdev);
7940 if (netif_running(dev)) {
7941 bnx2_set_power_state(bp, PCI_D0);
7942 bnx2_init_nic(bp, 1);
7945 rtnl_unlock();
7946 return PCI_ERS_RESULT_RECOVERED;
7950 * bnx2_io_resume - called when traffic can start flowing again.
7951 * @pdev: Pointer to PCI device
7953 * This callback is called when the error recovery driver tells us that
7954 * its OK to resume normal operation.
7956 static void bnx2_io_resume(struct pci_dev *pdev)
7958 struct net_device *dev = pci_get_drvdata(pdev);
7959 struct bnx2 *bp = netdev_priv(dev);
7961 rtnl_lock();
7962 if (netif_running(dev))
7963 bnx2_netif_start(bp);
7965 netif_device_attach(dev);
7966 rtnl_unlock();
7969 static struct pci_error_handlers bnx2_err_handler = {
7970 .error_detected = bnx2_io_error_detected,
7971 .slot_reset = bnx2_io_slot_reset,
7972 .resume = bnx2_io_resume,
7975 static struct pci_driver bnx2_pci_driver = {
7976 .name = DRV_MODULE_NAME,
7977 .id_table = bnx2_pci_tbl,
7978 .probe = bnx2_init_one,
7979 .remove = __devexit_p(bnx2_remove_one),
7980 .suspend = bnx2_suspend,
7981 .resume = bnx2_resume,
7982 .err_handler = &bnx2_err_handler,
7985 static int __init bnx2_init(void)
7987 return pci_register_driver(&bnx2_pci_driver);
7990 static void __exit bnx2_cleanup(void)
7992 pci_unregister_driver(&bnx2_pci_driver);
7995 module_init(bnx2_init);
7996 module_exit(bnx2_cleanup);