sfc: Don't use enums as a bitmask.
[zen-stable.git] / drivers / net / bnx2.c
blob57d3293c65bd8829638a2b23bb71e2d2b328c026
1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2011 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/bitops.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <asm/page.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/if_vlan.h>
40 #include <net/ip.h>
41 #include <net/tcp.h>
42 #include <net/checksum.h>
43 #include <linux/workqueue.h>
44 #include <linux/crc32.h>
45 #include <linux/prefetch.h>
46 #include <linux/cache.h>
47 #include <linux/firmware.h>
48 #include <linux/log2.h>
49 #include <linux/aer.h>
51 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
52 #define BCM_CNIC 1
53 #include "cnic_if.h"
54 #endif
55 #include "bnx2.h"
56 #include "bnx2_fw.h"
58 #define DRV_MODULE_NAME "bnx2"
59 #define DRV_MODULE_VERSION "2.1.6"
60 #define DRV_MODULE_RELDATE "Mar 7, 2011"
61 #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.1.fw"
62 #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
63 #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1a.fw"
64 #define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
65 #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
67 #define RUN_AT(x) (jiffies + (x))
69 /* Time in jiffies before concluding the transmitter is hung. */
70 #define TX_TIMEOUT (5*HZ)
72 static char version[] __devinitdata =
73 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
76 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
77 MODULE_LICENSE("GPL");
78 MODULE_VERSION(DRV_MODULE_VERSION);
79 MODULE_FIRMWARE(FW_MIPS_FILE_06);
80 MODULE_FIRMWARE(FW_RV2P_FILE_06);
81 MODULE_FIRMWARE(FW_MIPS_FILE_09);
82 MODULE_FIRMWARE(FW_RV2P_FILE_09);
83 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
85 static int disable_msi = 0;
87 module_param(disable_msi, int, 0);
88 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
90 typedef enum {
91 BCM5706 = 0,
92 NC370T,
93 NC370I,
94 BCM5706S,
95 NC370F,
96 BCM5708,
97 BCM5708S,
98 BCM5709,
99 BCM5709S,
100 BCM5716,
101 BCM5716S,
102 } board_t;
104 /* indexed by board_t, above */
105 static struct {
106 char *name;
107 } board_info[] __devinitdata = {
108 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
109 { "HP NC370T Multifunction Gigabit Server Adapter" },
110 { "HP NC370i Multifunction Gigabit Server Adapter" },
111 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
112 { "HP NC370F Multifunction Gigabit Server Adapter" },
113 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
114 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
115 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
116 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
117 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
118 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
123 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
131 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
132 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
134 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
136 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
138 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
140 { PCI_VENDOR_ID_BROADCOM, 0x163b,
141 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
142 { PCI_VENDOR_ID_BROADCOM, 0x163c,
143 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
144 { 0, }
147 static const struct flash_spec flash_table[] =
149 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
150 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
151 /* Slow EEPROM */
152 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
153 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
154 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
155 "EEPROM - slow"},
156 /* Expansion entry 0001 */
157 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
158 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
159 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
160 "Entry 0001"},
161 /* Saifun SA25F010 (non-buffered flash) */
162 /* strap, cfg1, & write1 need updates */
163 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
164 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
165 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
166 "Non-buffered flash (128kB)"},
167 /* Saifun SA25F020 (non-buffered flash) */
168 /* strap, cfg1, & write1 need updates */
169 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
170 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
171 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
172 "Non-buffered flash (256kB)"},
173 /* Expansion entry 0100 */
174 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
175 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
176 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
177 "Entry 0100"},
178 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
179 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
180 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
181 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
182 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
183 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
184 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
185 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
186 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
187 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
188 /* Saifun SA25F005 (non-buffered flash) */
189 /* strap, cfg1, & write1 need updates */
190 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
191 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
192 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
193 "Non-buffered flash (64kB)"},
194 /* Fast EEPROM */
195 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
196 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
197 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
198 "EEPROM - fast"},
199 /* Expansion entry 1001 */
200 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
201 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
202 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
203 "Entry 1001"},
204 /* Expansion entry 1010 */
205 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
206 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
207 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
208 "Entry 1010"},
209 /* ATMEL AT45DB011B (buffered flash) */
210 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
211 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
212 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
213 "Buffered flash (128kB)"},
214 /* Expansion entry 1100 */
215 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
216 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
217 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
218 "Entry 1100"},
219 /* Expansion entry 1101 */
220 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
221 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
222 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
223 "Entry 1101"},
224 /* Ateml Expansion entry 1110 */
225 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
226 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
227 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
228 "Entry 1110 (Atmel)"},
229 /* ATMEL AT45DB021B (buffered flash) */
230 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
231 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
232 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
233 "Buffered flash (256kB)"},
236 static const struct flash_spec flash_5709 = {
237 .flags = BNX2_NV_BUFFERED,
238 .page_bits = BCM5709_FLASH_PAGE_BITS,
239 .page_size = BCM5709_FLASH_PAGE_SIZE,
240 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
241 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
242 .name = "5709 Buffered flash (256kB)",
245 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
247 static void bnx2_init_napi(struct bnx2 *bp);
248 static void bnx2_del_napi(struct bnx2 *bp);
250 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
252 u32 diff;
254 /* Tell compiler to fetch tx_prod and tx_cons from memory. */
255 barrier();
257 /* The ring uses 256 indices for 255 entries, one of them
258 * needs to be skipped.
260 diff = txr->tx_prod - txr->tx_cons;
261 if (unlikely(diff >= TX_DESC_CNT)) {
262 diff &= 0xffff;
263 if (diff == TX_DESC_CNT)
264 diff = MAX_TX_DESC_CNT;
266 return bp->tx_ring_size - diff;
269 static u32
270 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
272 u32 val;
274 spin_lock_bh(&bp->indirect_lock);
275 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
276 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
277 spin_unlock_bh(&bp->indirect_lock);
278 return val;
281 static void
282 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
284 spin_lock_bh(&bp->indirect_lock);
285 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
286 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
287 spin_unlock_bh(&bp->indirect_lock);
290 static void
291 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
293 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
296 static u32
297 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
299 return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
302 static void
303 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
305 offset += cid_addr;
306 spin_lock_bh(&bp->indirect_lock);
307 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
308 int i;
310 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
311 REG_WR(bp, BNX2_CTX_CTX_CTRL,
312 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
313 for (i = 0; i < 5; i++) {
314 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
315 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
316 break;
317 udelay(5);
319 } else {
320 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
321 REG_WR(bp, BNX2_CTX_DATA, val);
323 spin_unlock_bh(&bp->indirect_lock);
326 #ifdef BCM_CNIC
327 static int
328 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
330 struct bnx2 *bp = netdev_priv(dev);
331 struct drv_ctl_io *io = &info->data.io;
333 switch (info->cmd) {
334 case DRV_CTL_IO_WR_CMD:
335 bnx2_reg_wr_ind(bp, io->offset, io->data);
336 break;
337 case DRV_CTL_IO_RD_CMD:
338 io->data = bnx2_reg_rd_ind(bp, io->offset);
339 break;
340 case DRV_CTL_CTX_WR_CMD:
341 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
342 break;
343 default:
344 return -EINVAL;
346 return 0;
349 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
351 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
352 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
353 int sb_id;
355 if (bp->flags & BNX2_FLAG_USING_MSIX) {
356 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
357 bnapi->cnic_present = 0;
358 sb_id = bp->irq_nvecs;
359 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
360 } else {
361 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
362 bnapi->cnic_tag = bnapi->last_status_idx;
363 bnapi->cnic_present = 1;
364 sb_id = 0;
365 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
368 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
369 cp->irq_arr[0].status_blk = (void *)
370 ((unsigned long) bnapi->status_blk.msi +
371 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
372 cp->irq_arr[0].status_blk_num = sb_id;
373 cp->num_irq = 1;
376 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
377 void *data)
379 struct bnx2 *bp = netdev_priv(dev);
380 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
382 if (ops == NULL)
383 return -EINVAL;
385 if (cp->drv_state & CNIC_DRV_STATE_REGD)
386 return -EBUSY;
388 bp->cnic_data = data;
389 rcu_assign_pointer(bp->cnic_ops, ops);
391 cp->num_irq = 0;
392 cp->drv_state = CNIC_DRV_STATE_REGD;
394 bnx2_setup_cnic_irq_info(bp);
396 return 0;
399 static int bnx2_unregister_cnic(struct net_device *dev)
401 struct bnx2 *bp = netdev_priv(dev);
402 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
403 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
405 mutex_lock(&bp->cnic_lock);
406 cp->drv_state = 0;
407 bnapi->cnic_present = 0;
408 rcu_assign_pointer(bp->cnic_ops, NULL);
409 mutex_unlock(&bp->cnic_lock);
410 synchronize_rcu();
411 return 0;
414 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
416 struct bnx2 *bp = netdev_priv(dev);
417 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
419 cp->drv_owner = THIS_MODULE;
420 cp->chip_id = bp->chip_id;
421 cp->pdev = bp->pdev;
422 cp->io_base = bp->regview;
423 cp->drv_ctl = bnx2_drv_ctl;
424 cp->drv_register_cnic = bnx2_register_cnic;
425 cp->drv_unregister_cnic = bnx2_unregister_cnic;
427 return cp;
429 EXPORT_SYMBOL(bnx2_cnic_probe);
431 static void
432 bnx2_cnic_stop(struct bnx2 *bp)
434 struct cnic_ops *c_ops;
435 struct cnic_ctl_info info;
437 mutex_lock(&bp->cnic_lock);
438 c_ops = rcu_dereference_protected(bp->cnic_ops,
439 lockdep_is_held(&bp->cnic_lock));
440 if (c_ops) {
441 info.cmd = CNIC_CTL_STOP_CMD;
442 c_ops->cnic_ctl(bp->cnic_data, &info);
444 mutex_unlock(&bp->cnic_lock);
447 static void
448 bnx2_cnic_start(struct bnx2 *bp)
450 struct cnic_ops *c_ops;
451 struct cnic_ctl_info info;
453 mutex_lock(&bp->cnic_lock);
454 c_ops = rcu_dereference_protected(bp->cnic_ops,
455 lockdep_is_held(&bp->cnic_lock));
456 if (c_ops) {
457 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
458 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
460 bnapi->cnic_tag = bnapi->last_status_idx;
462 info.cmd = CNIC_CTL_START_CMD;
463 c_ops->cnic_ctl(bp->cnic_data, &info);
465 mutex_unlock(&bp->cnic_lock);
468 #else
470 static void
471 bnx2_cnic_stop(struct bnx2 *bp)
475 static void
476 bnx2_cnic_start(struct bnx2 *bp)
480 #endif
482 static int
483 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
485 u32 val1;
486 int i, ret;
488 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
489 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
490 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
492 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
493 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
495 udelay(40);
498 val1 = (bp->phy_addr << 21) | (reg << 16) |
499 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
500 BNX2_EMAC_MDIO_COMM_START_BUSY;
501 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
503 for (i = 0; i < 50; i++) {
504 udelay(10);
506 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
507 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
508 udelay(5);
510 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
511 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
513 break;
517 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
518 *val = 0x0;
519 ret = -EBUSY;
521 else {
522 *val = val1;
523 ret = 0;
526 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
527 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
528 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
530 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
531 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
533 udelay(40);
536 return ret;
539 static int
540 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
542 u32 val1;
543 int i, ret;
545 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
546 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
547 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
549 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
550 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
552 udelay(40);
555 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
556 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
557 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
558 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
560 for (i = 0; i < 50; i++) {
561 udelay(10);
563 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
564 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
565 udelay(5);
566 break;
570 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
571 ret = -EBUSY;
572 else
573 ret = 0;
575 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
576 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
577 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
579 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
580 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
582 udelay(40);
585 return ret;
588 static void
589 bnx2_disable_int(struct bnx2 *bp)
591 int i;
592 struct bnx2_napi *bnapi;
594 for (i = 0; i < bp->irq_nvecs; i++) {
595 bnapi = &bp->bnx2_napi[i];
596 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
597 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
599 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
602 static void
603 bnx2_enable_int(struct bnx2 *bp)
605 int i;
606 struct bnx2_napi *bnapi;
608 for (i = 0; i < bp->irq_nvecs; i++) {
609 bnapi = &bp->bnx2_napi[i];
611 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
612 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
613 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
614 bnapi->last_status_idx);
616 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
617 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
618 bnapi->last_status_idx);
620 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
623 static void
624 bnx2_disable_int_sync(struct bnx2 *bp)
626 int i;
628 atomic_inc(&bp->intr_sem);
629 if (!netif_running(bp->dev))
630 return;
632 bnx2_disable_int(bp);
633 for (i = 0; i < bp->irq_nvecs; i++)
634 synchronize_irq(bp->irq_tbl[i].vector);
637 static void
638 bnx2_napi_disable(struct bnx2 *bp)
640 int i;
642 for (i = 0; i < bp->irq_nvecs; i++)
643 napi_disable(&bp->bnx2_napi[i].napi);
646 static void
647 bnx2_napi_enable(struct bnx2 *bp)
649 int i;
651 for (i = 0; i < bp->irq_nvecs; i++)
652 napi_enable(&bp->bnx2_napi[i].napi);
655 static void
656 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
658 if (stop_cnic)
659 bnx2_cnic_stop(bp);
660 if (netif_running(bp->dev)) {
661 bnx2_napi_disable(bp);
662 netif_tx_disable(bp->dev);
664 bnx2_disable_int_sync(bp);
665 netif_carrier_off(bp->dev); /* prevent tx timeout */
668 static void
669 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
671 if (atomic_dec_and_test(&bp->intr_sem)) {
672 if (netif_running(bp->dev)) {
673 netif_tx_wake_all_queues(bp->dev);
674 spin_lock_bh(&bp->phy_lock);
675 if (bp->link_up)
676 netif_carrier_on(bp->dev);
677 spin_unlock_bh(&bp->phy_lock);
678 bnx2_napi_enable(bp);
679 bnx2_enable_int(bp);
680 if (start_cnic)
681 bnx2_cnic_start(bp);
686 static void
687 bnx2_free_tx_mem(struct bnx2 *bp)
689 int i;
691 for (i = 0; i < bp->num_tx_rings; i++) {
692 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
693 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
695 if (txr->tx_desc_ring) {
696 dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
697 txr->tx_desc_ring,
698 txr->tx_desc_mapping);
699 txr->tx_desc_ring = NULL;
701 kfree(txr->tx_buf_ring);
702 txr->tx_buf_ring = NULL;
706 static void
707 bnx2_free_rx_mem(struct bnx2 *bp)
709 int i;
711 for (i = 0; i < bp->num_rx_rings; i++) {
712 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
713 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
714 int j;
716 for (j = 0; j < bp->rx_max_ring; j++) {
717 if (rxr->rx_desc_ring[j])
718 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
719 rxr->rx_desc_ring[j],
720 rxr->rx_desc_mapping[j]);
721 rxr->rx_desc_ring[j] = NULL;
723 vfree(rxr->rx_buf_ring);
724 rxr->rx_buf_ring = NULL;
726 for (j = 0; j < bp->rx_max_pg_ring; j++) {
727 if (rxr->rx_pg_desc_ring[j])
728 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
729 rxr->rx_pg_desc_ring[j],
730 rxr->rx_pg_desc_mapping[j]);
731 rxr->rx_pg_desc_ring[j] = NULL;
733 vfree(rxr->rx_pg_ring);
734 rxr->rx_pg_ring = NULL;
738 static int
739 bnx2_alloc_tx_mem(struct bnx2 *bp)
741 int i;
743 for (i = 0; i < bp->num_tx_rings; i++) {
744 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
745 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
747 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
748 if (txr->tx_buf_ring == NULL)
749 return -ENOMEM;
751 txr->tx_desc_ring =
752 dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
753 &txr->tx_desc_mapping, GFP_KERNEL);
754 if (txr->tx_desc_ring == NULL)
755 return -ENOMEM;
757 return 0;
760 static int
761 bnx2_alloc_rx_mem(struct bnx2 *bp)
763 int i;
765 for (i = 0; i < bp->num_rx_rings; i++) {
766 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
767 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
768 int j;
770 rxr->rx_buf_ring =
771 vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
772 if (rxr->rx_buf_ring == NULL)
773 return -ENOMEM;
775 for (j = 0; j < bp->rx_max_ring; j++) {
776 rxr->rx_desc_ring[j] =
777 dma_alloc_coherent(&bp->pdev->dev,
778 RXBD_RING_SIZE,
779 &rxr->rx_desc_mapping[j],
780 GFP_KERNEL);
781 if (rxr->rx_desc_ring[j] == NULL)
782 return -ENOMEM;
786 if (bp->rx_pg_ring_size) {
787 rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
788 bp->rx_max_pg_ring);
789 if (rxr->rx_pg_ring == NULL)
790 return -ENOMEM;
794 for (j = 0; j < bp->rx_max_pg_ring; j++) {
795 rxr->rx_pg_desc_ring[j] =
796 dma_alloc_coherent(&bp->pdev->dev,
797 RXBD_RING_SIZE,
798 &rxr->rx_pg_desc_mapping[j],
799 GFP_KERNEL);
800 if (rxr->rx_pg_desc_ring[j] == NULL)
801 return -ENOMEM;
805 return 0;
808 static void
809 bnx2_free_mem(struct bnx2 *bp)
811 int i;
812 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
814 bnx2_free_tx_mem(bp);
815 bnx2_free_rx_mem(bp);
817 for (i = 0; i < bp->ctx_pages; i++) {
818 if (bp->ctx_blk[i]) {
819 dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE,
820 bp->ctx_blk[i],
821 bp->ctx_blk_mapping[i]);
822 bp->ctx_blk[i] = NULL;
825 if (bnapi->status_blk.msi) {
826 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
827 bnapi->status_blk.msi,
828 bp->status_blk_mapping);
829 bnapi->status_blk.msi = NULL;
830 bp->stats_blk = NULL;
834 static int
835 bnx2_alloc_mem(struct bnx2 *bp)
837 int i, status_blk_size, err;
838 struct bnx2_napi *bnapi;
839 void *status_blk;
841 /* Combine status and statistics blocks into one allocation. */
842 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
843 if (bp->flags & BNX2_FLAG_MSIX_CAP)
844 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
845 BNX2_SBLK_MSIX_ALIGN_SIZE);
846 bp->status_stats_size = status_blk_size +
847 sizeof(struct statistics_block);
849 status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
850 &bp->status_blk_mapping, GFP_KERNEL);
851 if (status_blk == NULL)
852 goto alloc_mem_err;
854 memset(status_blk, 0, bp->status_stats_size);
856 bnapi = &bp->bnx2_napi[0];
857 bnapi->status_blk.msi = status_blk;
858 bnapi->hw_tx_cons_ptr =
859 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
860 bnapi->hw_rx_cons_ptr =
861 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
862 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
863 for (i = 1; i < bp->irq_nvecs; i++) {
864 struct status_block_msix *sblk;
866 bnapi = &bp->bnx2_napi[i];
868 sblk = (void *) (status_blk +
869 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
870 bnapi->status_blk.msix = sblk;
871 bnapi->hw_tx_cons_ptr =
872 &sblk->status_tx_quick_consumer_index;
873 bnapi->hw_rx_cons_ptr =
874 &sblk->status_rx_quick_consumer_index;
875 bnapi->int_num = i << 24;
879 bp->stats_blk = status_blk + status_blk_size;
881 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
883 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
884 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
885 if (bp->ctx_pages == 0)
886 bp->ctx_pages = 1;
887 for (i = 0; i < bp->ctx_pages; i++) {
888 bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
889 BCM_PAGE_SIZE,
890 &bp->ctx_blk_mapping[i],
891 GFP_KERNEL);
892 if (bp->ctx_blk[i] == NULL)
893 goto alloc_mem_err;
897 err = bnx2_alloc_rx_mem(bp);
898 if (err)
899 goto alloc_mem_err;
901 err = bnx2_alloc_tx_mem(bp);
902 if (err)
903 goto alloc_mem_err;
905 return 0;
907 alloc_mem_err:
908 bnx2_free_mem(bp);
909 return -ENOMEM;
912 static void
913 bnx2_report_fw_link(struct bnx2 *bp)
915 u32 fw_link_status = 0;
917 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
918 return;
920 if (bp->link_up) {
921 u32 bmsr;
923 switch (bp->line_speed) {
924 case SPEED_10:
925 if (bp->duplex == DUPLEX_HALF)
926 fw_link_status = BNX2_LINK_STATUS_10HALF;
927 else
928 fw_link_status = BNX2_LINK_STATUS_10FULL;
929 break;
930 case SPEED_100:
931 if (bp->duplex == DUPLEX_HALF)
932 fw_link_status = BNX2_LINK_STATUS_100HALF;
933 else
934 fw_link_status = BNX2_LINK_STATUS_100FULL;
935 break;
936 case SPEED_1000:
937 if (bp->duplex == DUPLEX_HALF)
938 fw_link_status = BNX2_LINK_STATUS_1000HALF;
939 else
940 fw_link_status = BNX2_LINK_STATUS_1000FULL;
941 break;
942 case SPEED_2500:
943 if (bp->duplex == DUPLEX_HALF)
944 fw_link_status = BNX2_LINK_STATUS_2500HALF;
945 else
946 fw_link_status = BNX2_LINK_STATUS_2500FULL;
947 break;
950 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
952 if (bp->autoneg) {
953 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
955 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
956 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
958 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
959 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
960 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
961 else
962 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
965 else
966 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
968 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
971 static char *
972 bnx2_xceiver_str(struct bnx2 *bp)
974 return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
975 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
976 "Copper");
979 static void
980 bnx2_report_link(struct bnx2 *bp)
982 if (bp->link_up) {
983 netif_carrier_on(bp->dev);
984 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
985 bnx2_xceiver_str(bp),
986 bp->line_speed,
987 bp->duplex == DUPLEX_FULL ? "full" : "half");
989 if (bp->flow_ctrl) {
990 if (bp->flow_ctrl & FLOW_CTRL_RX) {
991 pr_cont(", receive ");
992 if (bp->flow_ctrl & FLOW_CTRL_TX)
993 pr_cont("& transmit ");
995 else {
996 pr_cont(", transmit ");
998 pr_cont("flow control ON");
1000 pr_cont("\n");
1001 } else {
1002 netif_carrier_off(bp->dev);
1003 netdev_err(bp->dev, "NIC %s Link is Down\n",
1004 bnx2_xceiver_str(bp));
1007 bnx2_report_fw_link(bp);
1010 static void
1011 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1013 u32 local_adv, remote_adv;
1015 bp->flow_ctrl = 0;
1016 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1017 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1019 if (bp->duplex == DUPLEX_FULL) {
1020 bp->flow_ctrl = bp->req_flow_ctrl;
1022 return;
1025 if (bp->duplex != DUPLEX_FULL) {
1026 return;
1029 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1030 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1031 u32 val;
1033 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1034 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1035 bp->flow_ctrl |= FLOW_CTRL_TX;
1036 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1037 bp->flow_ctrl |= FLOW_CTRL_RX;
1038 return;
1041 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1042 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1044 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1045 u32 new_local_adv = 0;
1046 u32 new_remote_adv = 0;
1048 if (local_adv & ADVERTISE_1000XPAUSE)
1049 new_local_adv |= ADVERTISE_PAUSE_CAP;
1050 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1051 new_local_adv |= ADVERTISE_PAUSE_ASYM;
1052 if (remote_adv & ADVERTISE_1000XPAUSE)
1053 new_remote_adv |= ADVERTISE_PAUSE_CAP;
1054 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1055 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1057 local_adv = new_local_adv;
1058 remote_adv = new_remote_adv;
1061 /* See Table 28B-3 of 802.3ab-1999 spec. */
1062 if (local_adv & ADVERTISE_PAUSE_CAP) {
1063 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1064 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1065 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1067 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1068 bp->flow_ctrl = FLOW_CTRL_RX;
1071 else {
1072 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1073 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1077 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1078 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1079 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1081 bp->flow_ctrl = FLOW_CTRL_TX;
1086 static int
1087 bnx2_5709s_linkup(struct bnx2 *bp)
1089 u32 val, speed;
1091 bp->link_up = 1;
1093 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1094 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1095 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1097 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1098 bp->line_speed = bp->req_line_speed;
1099 bp->duplex = bp->req_duplex;
1100 return 0;
1102 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1103 switch (speed) {
1104 case MII_BNX2_GP_TOP_AN_SPEED_10:
1105 bp->line_speed = SPEED_10;
1106 break;
1107 case MII_BNX2_GP_TOP_AN_SPEED_100:
1108 bp->line_speed = SPEED_100;
1109 break;
1110 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1111 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1112 bp->line_speed = SPEED_1000;
1113 break;
1114 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1115 bp->line_speed = SPEED_2500;
1116 break;
1118 if (val & MII_BNX2_GP_TOP_AN_FD)
1119 bp->duplex = DUPLEX_FULL;
1120 else
1121 bp->duplex = DUPLEX_HALF;
1122 return 0;
1125 static int
1126 bnx2_5708s_linkup(struct bnx2 *bp)
1128 u32 val;
1130 bp->link_up = 1;
1131 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1132 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1133 case BCM5708S_1000X_STAT1_SPEED_10:
1134 bp->line_speed = SPEED_10;
1135 break;
1136 case BCM5708S_1000X_STAT1_SPEED_100:
1137 bp->line_speed = SPEED_100;
1138 break;
1139 case BCM5708S_1000X_STAT1_SPEED_1G:
1140 bp->line_speed = SPEED_1000;
1141 break;
1142 case BCM5708S_1000X_STAT1_SPEED_2G5:
1143 bp->line_speed = SPEED_2500;
1144 break;
1146 if (val & BCM5708S_1000X_STAT1_FD)
1147 bp->duplex = DUPLEX_FULL;
1148 else
1149 bp->duplex = DUPLEX_HALF;
1151 return 0;
1154 static int
1155 bnx2_5706s_linkup(struct bnx2 *bp)
1157 u32 bmcr, local_adv, remote_adv, common;
1159 bp->link_up = 1;
1160 bp->line_speed = SPEED_1000;
1162 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1163 if (bmcr & BMCR_FULLDPLX) {
1164 bp->duplex = DUPLEX_FULL;
1166 else {
1167 bp->duplex = DUPLEX_HALF;
1170 if (!(bmcr & BMCR_ANENABLE)) {
1171 return 0;
1174 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1175 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1177 common = local_adv & remote_adv;
1178 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1180 if (common & ADVERTISE_1000XFULL) {
1181 bp->duplex = DUPLEX_FULL;
1183 else {
1184 bp->duplex = DUPLEX_HALF;
1188 return 0;
1191 static int
1192 bnx2_copper_linkup(struct bnx2 *bp)
1194 u32 bmcr;
1196 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1197 if (bmcr & BMCR_ANENABLE) {
1198 u32 local_adv, remote_adv, common;
1200 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1201 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1203 common = local_adv & (remote_adv >> 2);
1204 if (common & ADVERTISE_1000FULL) {
1205 bp->line_speed = SPEED_1000;
1206 bp->duplex = DUPLEX_FULL;
1208 else if (common & ADVERTISE_1000HALF) {
1209 bp->line_speed = SPEED_1000;
1210 bp->duplex = DUPLEX_HALF;
1212 else {
1213 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1214 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1216 common = local_adv & remote_adv;
1217 if (common & ADVERTISE_100FULL) {
1218 bp->line_speed = SPEED_100;
1219 bp->duplex = DUPLEX_FULL;
1221 else if (common & ADVERTISE_100HALF) {
1222 bp->line_speed = SPEED_100;
1223 bp->duplex = DUPLEX_HALF;
1225 else if (common & ADVERTISE_10FULL) {
1226 bp->line_speed = SPEED_10;
1227 bp->duplex = DUPLEX_FULL;
1229 else if (common & ADVERTISE_10HALF) {
1230 bp->line_speed = SPEED_10;
1231 bp->duplex = DUPLEX_HALF;
1233 else {
1234 bp->line_speed = 0;
1235 bp->link_up = 0;
1239 else {
1240 if (bmcr & BMCR_SPEED100) {
1241 bp->line_speed = SPEED_100;
1243 else {
1244 bp->line_speed = SPEED_10;
1246 if (bmcr & BMCR_FULLDPLX) {
1247 bp->duplex = DUPLEX_FULL;
1249 else {
1250 bp->duplex = DUPLEX_HALF;
1254 return 0;
1257 static void
1258 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1260 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1262 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1263 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1264 val |= 0x02 << 8;
1266 if (bp->flow_ctrl & FLOW_CTRL_TX)
1267 val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1269 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1272 static void
1273 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1275 int i;
1276 u32 cid;
1278 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1279 if (i == 1)
1280 cid = RX_RSS_CID;
1281 bnx2_init_rx_context(bp, cid);
1285 static void
1286 bnx2_set_mac_link(struct bnx2 *bp)
1288 u32 val;
1290 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1291 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1292 (bp->duplex == DUPLEX_HALF)) {
1293 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1296 /* Configure the EMAC mode register. */
1297 val = REG_RD(bp, BNX2_EMAC_MODE);
1299 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1300 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1301 BNX2_EMAC_MODE_25G_MODE);
1303 if (bp->link_up) {
1304 switch (bp->line_speed) {
1305 case SPEED_10:
1306 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1307 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1308 break;
1310 /* fall through */
1311 case SPEED_100:
1312 val |= BNX2_EMAC_MODE_PORT_MII;
1313 break;
1314 case SPEED_2500:
1315 val |= BNX2_EMAC_MODE_25G_MODE;
1316 /* fall through */
1317 case SPEED_1000:
1318 val |= BNX2_EMAC_MODE_PORT_GMII;
1319 break;
1322 else {
1323 val |= BNX2_EMAC_MODE_PORT_GMII;
1326 /* Set the MAC to operate in the appropriate duplex mode. */
1327 if (bp->duplex == DUPLEX_HALF)
1328 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1329 REG_WR(bp, BNX2_EMAC_MODE, val);
1331 /* Enable/disable rx PAUSE. */
1332 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1334 if (bp->flow_ctrl & FLOW_CTRL_RX)
1335 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1336 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1338 /* Enable/disable tx PAUSE. */
1339 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1340 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1342 if (bp->flow_ctrl & FLOW_CTRL_TX)
1343 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1344 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1346 /* Acknowledge the interrupt. */
1347 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1349 bnx2_init_all_rx_contexts(bp);
1352 static void
1353 bnx2_enable_bmsr1(struct bnx2 *bp)
1355 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1356 (CHIP_NUM(bp) == CHIP_NUM_5709))
1357 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1358 MII_BNX2_BLK_ADDR_GP_STATUS);
1361 static void
1362 bnx2_disable_bmsr1(struct bnx2 *bp)
1364 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1365 (CHIP_NUM(bp) == CHIP_NUM_5709))
1366 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1367 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1370 static int
1371 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1373 u32 up1;
1374 int ret = 1;
1376 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1377 return 0;
1379 if (bp->autoneg & AUTONEG_SPEED)
1380 bp->advertising |= ADVERTISED_2500baseX_Full;
1382 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1383 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1385 bnx2_read_phy(bp, bp->mii_up1, &up1);
1386 if (!(up1 & BCM5708S_UP1_2G5)) {
1387 up1 |= BCM5708S_UP1_2G5;
1388 bnx2_write_phy(bp, bp->mii_up1, up1);
1389 ret = 0;
1392 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1393 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1394 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1396 return ret;
1399 static int
1400 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1402 u32 up1;
1403 int ret = 0;
1405 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1406 return 0;
1408 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1409 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1411 bnx2_read_phy(bp, bp->mii_up1, &up1);
1412 if (up1 & BCM5708S_UP1_2G5) {
1413 up1 &= ~BCM5708S_UP1_2G5;
1414 bnx2_write_phy(bp, bp->mii_up1, up1);
1415 ret = 1;
1418 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1419 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1420 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1422 return ret;
1425 static void
1426 bnx2_enable_forced_2g5(struct bnx2 *bp)
1428 u32 uninitialized_var(bmcr);
1429 int err;
1431 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1432 return;
1434 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1435 u32 val;
1437 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1438 MII_BNX2_BLK_ADDR_SERDES_DIG);
1439 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1440 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1441 val |= MII_BNX2_SD_MISC1_FORCE |
1442 MII_BNX2_SD_MISC1_FORCE_2_5G;
1443 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1446 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1447 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1448 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1450 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1451 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1452 if (!err)
1453 bmcr |= BCM5708S_BMCR_FORCE_2500;
1454 } else {
1455 return;
1458 if (err)
1459 return;
1461 if (bp->autoneg & AUTONEG_SPEED) {
1462 bmcr &= ~BMCR_ANENABLE;
1463 if (bp->req_duplex == DUPLEX_FULL)
1464 bmcr |= BMCR_FULLDPLX;
1466 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1469 static void
1470 bnx2_disable_forced_2g5(struct bnx2 *bp)
1472 u32 uninitialized_var(bmcr);
1473 int err;
1475 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1476 return;
1478 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1479 u32 val;
1481 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1482 MII_BNX2_BLK_ADDR_SERDES_DIG);
1483 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1484 val &= ~MII_BNX2_SD_MISC1_FORCE;
1485 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1488 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1489 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1490 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1492 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1493 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1494 if (!err)
1495 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1496 } else {
1497 return;
1500 if (err)
1501 return;
1503 if (bp->autoneg & AUTONEG_SPEED)
1504 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1505 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1508 static void
1509 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1511 u32 val;
1513 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1514 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1515 if (start)
1516 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1517 else
1518 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1521 static int
1522 bnx2_set_link(struct bnx2 *bp)
1524 u32 bmsr;
1525 u8 link_up;
1527 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1528 bp->link_up = 1;
1529 return 0;
1532 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1533 return 0;
1535 link_up = bp->link_up;
1537 bnx2_enable_bmsr1(bp);
1538 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1539 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1540 bnx2_disable_bmsr1(bp);
1542 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1543 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1544 u32 val, an_dbg;
1546 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1547 bnx2_5706s_force_link_dn(bp, 0);
1548 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1550 val = REG_RD(bp, BNX2_EMAC_STATUS);
1552 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1553 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1554 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1556 if ((val & BNX2_EMAC_STATUS_LINK) &&
1557 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1558 bmsr |= BMSR_LSTATUS;
1559 else
1560 bmsr &= ~BMSR_LSTATUS;
1563 if (bmsr & BMSR_LSTATUS) {
1564 bp->link_up = 1;
1566 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1567 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1568 bnx2_5706s_linkup(bp);
1569 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1570 bnx2_5708s_linkup(bp);
1571 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1572 bnx2_5709s_linkup(bp);
1574 else {
1575 bnx2_copper_linkup(bp);
1577 bnx2_resolve_flow_ctrl(bp);
1579 else {
1580 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1581 (bp->autoneg & AUTONEG_SPEED))
1582 bnx2_disable_forced_2g5(bp);
1584 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1585 u32 bmcr;
1587 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1588 bmcr |= BMCR_ANENABLE;
1589 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1591 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1593 bp->link_up = 0;
1596 if (bp->link_up != link_up) {
1597 bnx2_report_link(bp);
1600 bnx2_set_mac_link(bp);
1602 return 0;
1605 static int
1606 bnx2_reset_phy(struct bnx2 *bp)
1608 int i;
1609 u32 reg;
1611 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1613 #define PHY_RESET_MAX_WAIT 100
1614 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1615 udelay(10);
1617 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1618 if (!(reg & BMCR_RESET)) {
1619 udelay(20);
1620 break;
1623 if (i == PHY_RESET_MAX_WAIT) {
1624 return -EBUSY;
1626 return 0;
1629 static u32
1630 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1632 u32 adv = 0;
1634 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1635 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1637 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1638 adv = ADVERTISE_1000XPAUSE;
1640 else {
1641 adv = ADVERTISE_PAUSE_CAP;
1644 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1645 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1646 adv = ADVERTISE_1000XPSE_ASYM;
1648 else {
1649 adv = ADVERTISE_PAUSE_ASYM;
1652 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1653 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1654 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1656 else {
1657 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1660 return adv;
1663 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1665 static int
1666 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1667 __releases(&bp->phy_lock)
1668 __acquires(&bp->phy_lock)
1670 u32 speed_arg = 0, pause_adv;
1672 pause_adv = bnx2_phy_get_pause_adv(bp);
1674 if (bp->autoneg & AUTONEG_SPEED) {
1675 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1676 if (bp->advertising & ADVERTISED_10baseT_Half)
1677 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1678 if (bp->advertising & ADVERTISED_10baseT_Full)
1679 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1680 if (bp->advertising & ADVERTISED_100baseT_Half)
1681 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1682 if (bp->advertising & ADVERTISED_100baseT_Full)
1683 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1684 if (bp->advertising & ADVERTISED_1000baseT_Full)
1685 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1686 if (bp->advertising & ADVERTISED_2500baseX_Full)
1687 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1688 } else {
1689 if (bp->req_line_speed == SPEED_2500)
1690 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1691 else if (bp->req_line_speed == SPEED_1000)
1692 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1693 else if (bp->req_line_speed == SPEED_100) {
1694 if (bp->req_duplex == DUPLEX_FULL)
1695 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1696 else
1697 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1698 } else if (bp->req_line_speed == SPEED_10) {
1699 if (bp->req_duplex == DUPLEX_FULL)
1700 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1701 else
1702 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1706 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1707 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1708 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1709 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1711 if (port == PORT_TP)
1712 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1713 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1715 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1717 spin_unlock_bh(&bp->phy_lock);
1718 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1719 spin_lock_bh(&bp->phy_lock);
1721 return 0;
1724 static int
1725 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1726 __releases(&bp->phy_lock)
1727 __acquires(&bp->phy_lock)
1729 u32 adv, bmcr;
1730 u32 new_adv = 0;
1732 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1733 return bnx2_setup_remote_phy(bp, port);
1735 if (!(bp->autoneg & AUTONEG_SPEED)) {
1736 u32 new_bmcr;
1737 int force_link_down = 0;
1739 if (bp->req_line_speed == SPEED_2500) {
1740 if (!bnx2_test_and_enable_2g5(bp))
1741 force_link_down = 1;
1742 } else if (bp->req_line_speed == SPEED_1000) {
1743 if (bnx2_test_and_disable_2g5(bp))
1744 force_link_down = 1;
1746 bnx2_read_phy(bp, bp->mii_adv, &adv);
1747 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1749 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1750 new_bmcr = bmcr & ~BMCR_ANENABLE;
1751 new_bmcr |= BMCR_SPEED1000;
1753 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1754 if (bp->req_line_speed == SPEED_2500)
1755 bnx2_enable_forced_2g5(bp);
1756 else if (bp->req_line_speed == SPEED_1000) {
1757 bnx2_disable_forced_2g5(bp);
1758 new_bmcr &= ~0x2000;
1761 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1762 if (bp->req_line_speed == SPEED_2500)
1763 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1764 else
1765 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1768 if (bp->req_duplex == DUPLEX_FULL) {
1769 adv |= ADVERTISE_1000XFULL;
1770 new_bmcr |= BMCR_FULLDPLX;
1772 else {
1773 adv |= ADVERTISE_1000XHALF;
1774 new_bmcr &= ~BMCR_FULLDPLX;
1776 if ((new_bmcr != bmcr) || (force_link_down)) {
1777 /* Force a link down visible on the other side */
1778 if (bp->link_up) {
1779 bnx2_write_phy(bp, bp->mii_adv, adv &
1780 ~(ADVERTISE_1000XFULL |
1781 ADVERTISE_1000XHALF));
1782 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1783 BMCR_ANRESTART | BMCR_ANENABLE);
1785 bp->link_up = 0;
1786 netif_carrier_off(bp->dev);
1787 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1788 bnx2_report_link(bp);
1790 bnx2_write_phy(bp, bp->mii_adv, adv);
1791 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1792 } else {
1793 bnx2_resolve_flow_ctrl(bp);
1794 bnx2_set_mac_link(bp);
1796 return 0;
1799 bnx2_test_and_enable_2g5(bp);
1801 if (bp->advertising & ADVERTISED_1000baseT_Full)
1802 new_adv |= ADVERTISE_1000XFULL;
1804 new_adv |= bnx2_phy_get_pause_adv(bp);
1806 bnx2_read_phy(bp, bp->mii_adv, &adv);
1807 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1809 bp->serdes_an_pending = 0;
1810 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1811 /* Force a link down visible on the other side */
1812 if (bp->link_up) {
1813 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1814 spin_unlock_bh(&bp->phy_lock);
1815 msleep(20);
1816 spin_lock_bh(&bp->phy_lock);
1819 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1820 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1821 BMCR_ANENABLE);
1822 /* Speed up link-up time when the link partner
1823 * does not autonegotiate which is very common
1824 * in blade servers. Some blade servers use
1825 * IPMI for kerboard input and it's important
1826 * to minimize link disruptions. Autoneg. involves
1827 * exchanging base pages plus 3 next pages and
1828 * normally completes in about 120 msec.
1830 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1831 bp->serdes_an_pending = 1;
1832 mod_timer(&bp->timer, jiffies + bp->current_interval);
1833 } else {
1834 bnx2_resolve_flow_ctrl(bp);
1835 bnx2_set_mac_link(bp);
1838 return 0;
1841 #define ETHTOOL_ALL_FIBRE_SPEED \
1842 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1843 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1844 (ADVERTISED_1000baseT_Full)
1846 #define ETHTOOL_ALL_COPPER_SPEED \
1847 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1848 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1849 ADVERTISED_1000baseT_Full)
1851 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1852 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1854 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1856 static void
1857 bnx2_set_default_remote_link(struct bnx2 *bp)
1859 u32 link;
1861 if (bp->phy_port == PORT_TP)
1862 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1863 else
1864 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1866 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1867 bp->req_line_speed = 0;
1868 bp->autoneg |= AUTONEG_SPEED;
1869 bp->advertising = ADVERTISED_Autoneg;
1870 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1871 bp->advertising |= ADVERTISED_10baseT_Half;
1872 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1873 bp->advertising |= ADVERTISED_10baseT_Full;
1874 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1875 bp->advertising |= ADVERTISED_100baseT_Half;
1876 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1877 bp->advertising |= ADVERTISED_100baseT_Full;
1878 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1879 bp->advertising |= ADVERTISED_1000baseT_Full;
1880 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1881 bp->advertising |= ADVERTISED_2500baseX_Full;
1882 } else {
1883 bp->autoneg = 0;
1884 bp->advertising = 0;
1885 bp->req_duplex = DUPLEX_FULL;
1886 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1887 bp->req_line_speed = SPEED_10;
1888 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1889 bp->req_duplex = DUPLEX_HALF;
1891 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1892 bp->req_line_speed = SPEED_100;
1893 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1894 bp->req_duplex = DUPLEX_HALF;
1896 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1897 bp->req_line_speed = SPEED_1000;
1898 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1899 bp->req_line_speed = SPEED_2500;
1903 static void
1904 bnx2_set_default_link(struct bnx2 *bp)
1906 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1907 bnx2_set_default_remote_link(bp);
1908 return;
1911 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1912 bp->req_line_speed = 0;
1913 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1914 u32 reg;
1916 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1918 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1919 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1920 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1921 bp->autoneg = 0;
1922 bp->req_line_speed = bp->line_speed = SPEED_1000;
1923 bp->req_duplex = DUPLEX_FULL;
1925 } else
1926 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1929 static void
1930 bnx2_send_heart_beat(struct bnx2 *bp)
1932 u32 msg;
1933 u32 addr;
1935 spin_lock(&bp->indirect_lock);
1936 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1937 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1938 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1939 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1940 spin_unlock(&bp->indirect_lock);
1943 static void
1944 bnx2_remote_phy_event(struct bnx2 *bp)
1946 u32 msg;
1947 u8 link_up = bp->link_up;
1948 u8 old_port;
1950 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1952 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1953 bnx2_send_heart_beat(bp);
1955 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1957 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1958 bp->link_up = 0;
1959 else {
1960 u32 speed;
1962 bp->link_up = 1;
1963 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1964 bp->duplex = DUPLEX_FULL;
1965 switch (speed) {
1966 case BNX2_LINK_STATUS_10HALF:
1967 bp->duplex = DUPLEX_HALF;
1968 case BNX2_LINK_STATUS_10FULL:
1969 bp->line_speed = SPEED_10;
1970 break;
1971 case BNX2_LINK_STATUS_100HALF:
1972 bp->duplex = DUPLEX_HALF;
1973 case BNX2_LINK_STATUS_100BASE_T4:
1974 case BNX2_LINK_STATUS_100FULL:
1975 bp->line_speed = SPEED_100;
1976 break;
1977 case BNX2_LINK_STATUS_1000HALF:
1978 bp->duplex = DUPLEX_HALF;
1979 case BNX2_LINK_STATUS_1000FULL:
1980 bp->line_speed = SPEED_1000;
1981 break;
1982 case BNX2_LINK_STATUS_2500HALF:
1983 bp->duplex = DUPLEX_HALF;
1984 case BNX2_LINK_STATUS_2500FULL:
1985 bp->line_speed = SPEED_2500;
1986 break;
1987 default:
1988 bp->line_speed = 0;
1989 break;
1992 bp->flow_ctrl = 0;
1993 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1994 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1995 if (bp->duplex == DUPLEX_FULL)
1996 bp->flow_ctrl = bp->req_flow_ctrl;
1997 } else {
1998 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1999 bp->flow_ctrl |= FLOW_CTRL_TX;
2000 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2001 bp->flow_ctrl |= FLOW_CTRL_RX;
2004 old_port = bp->phy_port;
2005 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2006 bp->phy_port = PORT_FIBRE;
2007 else
2008 bp->phy_port = PORT_TP;
2010 if (old_port != bp->phy_port)
2011 bnx2_set_default_link(bp);
2014 if (bp->link_up != link_up)
2015 bnx2_report_link(bp);
2017 bnx2_set_mac_link(bp);
2020 static int
2021 bnx2_set_remote_link(struct bnx2 *bp)
2023 u32 evt_code;
2025 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2026 switch (evt_code) {
2027 case BNX2_FW_EVT_CODE_LINK_EVENT:
2028 bnx2_remote_phy_event(bp);
2029 break;
2030 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2031 default:
2032 bnx2_send_heart_beat(bp);
2033 break;
2035 return 0;
2038 static int
2039 bnx2_setup_copper_phy(struct bnx2 *bp)
2040 __releases(&bp->phy_lock)
2041 __acquires(&bp->phy_lock)
2043 u32 bmcr;
2044 u32 new_bmcr;
2046 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2048 if (bp->autoneg & AUTONEG_SPEED) {
2049 u32 adv_reg, adv1000_reg;
2050 u32 new_adv_reg = 0;
2051 u32 new_adv1000_reg = 0;
2053 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2054 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2055 ADVERTISE_PAUSE_ASYM);
2057 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2058 adv1000_reg &= PHY_ALL_1000_SPEED;
2060 if (bp->advertising & ADVERTISED_10baseT_Half)
2061 new_adv_reg |= ADVERTISE_10HALF;
2062 if (bp->advertising & ADVERTISED_10baseT_Full)
2063 new_adv_reg |= ADVERTISE_10FULL;
2064 if (bp->advertising & ADVERTISED_100baseT_Half)
2065 new_adv_reg |= ADVERTISE_100HALF;
2066 if (bp->advertising & ADVERTISED_100baseT_Full)
2067 new_adv_reg |= ADVERTISE_100FULL;
2068 if (bp->advertising & ADVERTISED_1000baseT_Full)
2069 new_adv1000_reg |= ADVERTISE_1000FULL;
2071 new_adv_reg |= ADVERTISE_CSMA;
2073 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2075 if ((adv1000_reg != new_adv1000_reg) ||
2076 (adv_reg != new_adv_reg) ||
2077 ((bmcr & BMCR_ANENABLE) == 0)) {
2079 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2080 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2081 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2082 BMCR_ANENABLE);
2084 else if (bp->link_up) {
2085 /* Flow ctrl may have changed from auto to forced */
2086 /* or vice-versa. */
2088 bnx2_resolve_flow_ctrl(bp);
2089 bnx2_set_mac_link(bp);
2091 return 0;
2094 new_bmcr = 0;
2095 if (bp->req_line_speed == SPEED_100) {
2096 new_bmcr |= BMCR_SPEED100;
2098 if (bp->req_duplex == DUPLEX_FULL) {
2099 new_bmcr |= BMCR_FULLDPLX;
2101 if (new_bmcr != bmcr) {
2102 u32 bmsr;
2104 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2105 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2107 if (bmsr & BMSR_LSTATUS) {
2108 /* Force link down */
2109 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2110 spin_unlock_bh(&bp->phy_lock);
2111 msleep(50);
2112 spin_lock_bh(&bp->phy_lock);
2114 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2115 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2118 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2120 /* Normally, the new speed is setup after the link has
2121 * gone down and up again. In some cases, link will not go
2122 * down so we need to set up the new speed here.
2124 if (bmsr & BMSR_LSTATUS) {
2125 bp->line_speed = bp->req_line_speed;
2126 bp->duplex = bp->req_duplex;
2127 bnx2_resolve_flow_ctrl(bp);
2128 bnx2_set_mac_link(bp);
2130 } else {
2131 bnx2_resolve_flow_ctrl(bp);
2132 bnx2_set_mac_link(bp);
2134 return 0;
2137 static int
2138 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2139 __releases(&bp->phy_lock)
2140 __acquires(&bp->phy_lock)
2142 if (bp->loopback == MAC_LOOPBACK)
2143 return 0;
2145 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2146 return bnx2_setup_serdes_phy(bp, port);
2148 else {
2149 return bnx2_setup_copper_phy(bp);
2153 static int
2154 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2156 u32 val;
2158 bp->mii_bmcr = MII_BMCR + 0x10;
2159 bp->mii_bmsr = MII_BMSR + 0x10;
2160 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2161 bp->mii_adv = MII_ADVERTISE + 0x10;
2162 bp->mii_lpa = MII_LPA + 0x10;
2163 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2165 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2166 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2168 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2169 if (reset_phy)
2170 bnx2_reset_phy(bp);
2172 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2174 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2175 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2176 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2177 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2179 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2180 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2181 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2182 val |= BCM5708S_UP1_2G5;
2183 else
2184 val &= ~BCM5708S_UP1_2G5;
2185 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2187 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2188 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2189 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2190 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2192 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2194 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2195 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2196 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2198 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2200 return 0;
2203 static int
2204 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2206 u32 val;
2208 if (reset_phy)
2209 bnx2_reset_phy(bp);
2211 bp->mii_up1 = BCM5708S_UP1;
2213 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2214 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2215 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2217 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2218 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2219 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2221 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2222 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2223 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2225 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2226 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2227 val |= BCM5708S_UP1_2G5;
2228 bnx2_write_phy(bp, BCM5708S_UP1, val);
2231 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2232 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2233 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2234 /* increase tx signal amplitude */
2235 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2236 BCM5708S_BLK_ADDR_TX_MISC);
2237 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2238 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2239 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2240 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2243 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2244 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2246 if (val) {
2247 u32 is_backplane;
2249 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2250 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2251 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2252 BCM5708S_BLK_ADDR_TX_MISC);
2253 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2254 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2255 BCM5708S_BLK_ADDR_DIG);
2258 return 0;
2261 static int
2262 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2264 if (reset_phy)
2265 bnx2_reset_phy(bp);
2267 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2269 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2270 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2272 if (bp->dev->mtu > 1500) {
2273 u32 val;
2275 /* Set extended packet length bit */
2276 bnx2_write_phy(bp, 0x18, 0x7);
2277 bnx2_read_phy(bp, 0x18, &val);
2278 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2280 bnx2_write_phy(bp, 0x1c, 0x6c00);
2281 bnx2_read_phy(bp, 0x1c, &val);
2282 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2284 else {
2285 u32 val;
2287 bnx2_write_phy(bp, 0x18, 0x7);
2288 bnx2_read_phy(bp, 0x18, &val);
2289 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2291 bnx2_write_phy(bp, 0x1c, 0x6c00);
2292 bnx2_read_phy(bp, 0x1c, &val);
2293 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2296 return 0;
2299 static int
2300 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2302 u32 val;
2304 if (reset_phy)
2305 bnx2_reset_phy(bp);
2307 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2308 bnx2_write_phy(bp, 0x18, 0x0c00);
2309 bnx2_write_phy(bp, 0x17, 0x000a);
2310 bnx2_write_phy(bp, 0x15, 0x310b);
2311 bnx2_write_phy(bp, 0x17, 0x201f);
2312 bnx2_write_phy(bp, 0x15, 0x9506);
2313 bnx2_write_phy(bp, 0x17, 0x401f);
2314 bnx2_write_phy(bp, 0x15, 0x14e2);
2315 bnx2_write_phy(bp, 0x18, 0x0400);
2318 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2319 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2320 MII_BNX2_DSP_EXPAND_REG | 0x8);
2321 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2322 val &= ~(1 << 8);
2323 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2326 if (bp->dev->mtu > 1500) {
2327 /* Set extended packet length bit */
2328 bnx2_write_phy(bp, 0x18, 0x7);
2329 bnx2_read_phy(bp, 0x18, &val);
2330 bnx2_write_phy(bp, 0x18, val | 0x4000);
2332 bnx2_read_phy(bp, 0x10, &val);
2333 bnx2_write_phy(bp, 0x10, val | 0x1);
2335 else {
2336 bnx2_write_phy(bp, 0x18, 0x7);
2337 bnx2_read_phy(bp, 0x18, &val);
2338 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2340 bnx2_read_phy(bp, 0x10, &val);
2341 bnx2_write_phy(bp, 0x10, val & ~0x1);
2344 /* ethernet@wirespeed */
2345 bnx2_write_phy(bp, 0x18, 0x7007);
2346 bnx2_read_phy(bp, 0x18, &val);
2347 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2348 return 0;
2352 static int
2353 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2354 __releases(&bp->phy_lock)
2355 __acquires(&bp->phy_lock)
2357 u32 val;
2358 int rc = 0;
2360 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2361 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2363 bp->mii_bmcr = MII_BMCR;
2364 bp->mii_bmsr = MII_BMSR;
2365 bp->mii_bmsr1 = MII_BMSR;
2366 bp->mii_adv = MII_ADVERTISE;
2367 bp->mii_lpa = MII_LPA;
2369 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2371 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2372 goto setup_phy;
2374 bnx2_read_phy(bp, MII_PHYSID1, &val);
2375 bp->phy_id = val << 16;
2376 bnx2_read_phy(bp, MII_PHYSID2, &val);
2377 bp->phy_id |= val & 0xffff;
2379 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2380 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2381 rc = bnx2_init_5706s_phy(bp, reset_phy);
2382 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2383 rc = bnx2_init_5708s_phy(bp, reset_phy);
2384 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2385 rc = bnx2_init_5709s_phy(bp, reset_phy);
2387 else {
2388 rc = bnx2_init_copper_phy(bp, reset_phy);
2391 setup_phy:
2392 if (!rc)
2393 rc = bnx2_setup_phy(bp, bp->phy_port);
2395 return rc;
2398 static int
2399 bnx2_set_mac_loopback(struct bnx2 *bp)
2401 u32 mac_mode;
2403 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2404 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2405 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2406 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2407 bp->link_up = 1;
2408 return 0;
2411 static int bnx2_test_link(struct bnx2 *);
2413 static int
2414 bnx2_set_phy_loopback(struct bnx2 *bp)
2416 u32 mac_mode;
2417 int rc, i;
2419 spin_lock_bh(&bp->phy_lock);
2420 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2421 BMCR_SPEED1000);
2422 spin_unlock_bh(&bp->phy_lock);
2423 if (rc)
2424 return rc;
2426 for (i = 0; i < 10; i++) {
2427 if (bnx2_test_link(bp) == 0)
2428 break;
2429 msleep(100);
2432 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2433 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2434 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2435 BNX2_EMAC_MODE_25G_MODE);
2437 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2438 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2439 bp->link_up = 1;
2440 return 0;
2443 static int
2444 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2446 int i;
2447 u32 val;
2449 bp->fw_wr_seq++;
2450 msg_data |= bp->fw_wr_seq;
2452 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2454 if (!ack)
2455 return 0;
2457 /* wait for an acknowledgement. */
2458 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2459 msleep(10);
2461 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2463 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2464 break;
2466 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2467 return 0;
2469 /* If we timed out, inform the firmware that this is the case. */
2470 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2471 if (!silent)
2472 pr_err("fw sync timeout, reset code = %x\n", msg_data);
2474 msg_data &= ~BNX2_DRV_MSG_CODE;
2475 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2477 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2479 return -EBUSY;
2482 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2483 return -EIO;
2485 return 0;
2488 static int
2489 bnx2_init_5709_context(struct bnx2 *bp)
2491 int i, ret = 0;
2492 u32 val;
2494 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2495 val |= (BCM_PAGE_BITS - 8) << 16;
2496 REG_WR(bp, BNX2_CTX_COMMAND, val);
2497 for (i = 0; i < 10; i++) {
2498 val = REG_RD(bp, BNX2_CTX_COMMAND);
2499 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2500 break;
2501 udelay(2);
2503 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2504 return -EBUSY;
2506 for (i = 0; i < bp->ctx_pages; i++) {
2507 int j;
2509 if (bp->ctx_blk[i])
2510 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2511 else
2512 return -ENOMEM;
2514 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2515 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2516 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2517 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2518 (u64) bp->ctx_blk_mapping[i] >> 32);
2519 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2520 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2521 for (j = 0; j < 10; j++) {
2523 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2524 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2525 break;
2526 udelay(5);
2528 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2529 ret = -EBUSY;
2530 break;
2533 return ret;
2536 static void
2537 bnx2_init_context(struct bnx2 *bp)
2539 u32 vcid;
2541 vcid = 96;
2542 while (vcid) {
2543 u32 vcid_addr, pcid_addr, offset;
2544 int i;
2546 vcid--;
2548 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2549 u32 new_vcid;
2551 vcid_addr = GET_PCID_ADDR(vcid);
2552 if (vcid & 0x8) {
2553 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2555 else {
2556 new_vcid = vcid;
2558 pcid_addr = GET_PCID_ADDR(new_vcid);
2560 else {
2561 vcid_addr = GET_CID_ADDR(vcid);
2562 pcid_addr = vcid_addr;
2565 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2566 vcid_addr += (i << PHY_CTX_SHIFT);
2567 pcid_addr += (i << PHY_CTX_SHIFT);
2569 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2570 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2572 /* Zero out the context. */
2573 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2574 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2579 static int
2580 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2582 u16 *good_mbuf;
2583 u32 good_mbuf_cnt;
2584 u32 val;
2586 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2587 if (good_mbuf == NULL) {
2588 pr_err("Failed to allocate memory in %s\n", __func__);
2589 return -ENOMEM;
2592 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2593 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2595 good_mbuf_cnt = 0;
2597 /* Allocate a bunch of mbufs and save the good ones in an array. */
2598 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2599 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2600 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2601 BNX2_RBUF_COMMAND_ALLOC_REQ);
2603 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2605 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2607 /* The addresses with Bit 9 set are bad memory blocks. */
2608 if (!(val & (1 << 9))) {
2609 good_mbuf[good_mbuf_cnt] = (u16) val;
2610 good_mbuf_cnt++;
2613 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2616 /* Free the good ones back to the mbuf pool thus discarding
2617 * all the bad ones. */
2618 while (good_mbuf_cnt) {
2619 good_mbuf_cnt--;
2621 val = good_mbuf[good_mbuf_cnt];
2622 val = (val << 9) | val | 1;
2624 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2626 kfree(good_mbuf);
2627 return 0;
2630 static void
2631 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2633 u32 val;
2635 val = (mac_addr[0] << 8) | mac_addr[1];
2637 REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2639 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2640 (mac_addr[4] << 8) | mac_addr[5];
2642 REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2645 static inline int
2646 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2648 dma_addr_t mapping;
2649 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2650 struct rx_bd *rxbd =
2651 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2652 struct page *page = alloc_page(gfp);
2654 if (!page)
2655 return -ENOMEM;
2656 mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2657 PCI_DMA_FROMDEVICE);
2658 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2659 __free_page(page);
2660 return -EIO;
2663 rx_pg->page = page;
2664 dma_unmap_addr_set(rx_pg, mapping, mapping);
2665 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2666 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2667 return 0;
2670 static void
2671 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2673 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2674 struct page *page = rx_pg->page;
2676 if (!page)
2677 return;
2679 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2680 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2682 __free_page(page);
2683 rx_pg->page = NULL;
2686 static inline int
2687 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2689 struct sk_buff *skb;
2690 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2691 dma_addr_t mapping;
2692 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2693 unsigned long align;
2695 skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
2696 if (skb == NULL) {
2697 return -ENOMEM;
2700 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2701 skb_reserve(skb, BNX2_RX_ALIGN - align);
2703 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size,
2704 PCI_DMA_FROMDEVICE);
2705 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2706 dev_kfree_skb(skb);
2707 return -EIO;
2710 rx_buf->skb = skb;
2711 rx_buf->desc = (struct l2_fhdr *) skb->data;
2712 dma_unmap_addr_set(rx_buf, mapping, mapping);
2714 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2715 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2717 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2719 return 0;
2722 static int
2723 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2725 struct status_block *sblk = bnapi->status_blk.msi;
2726 u32 new_link_state, old_link_state;
2727 int is_set = 1;
2729 new_link_state = sblk->status_attn_bits & event;
2730 old_link_state = sblk->status_attn_bits_ack & event;
2731 if (new_link_state != old_link_state) {
2732 if (new_link_state)
2733 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2734 else
2735 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2736 } else
2737 is_set = 0;
2739 return is_set;
2742 static void
2743 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2745 spin_lock(&bp->phy_lock);
2747 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2748 bnx2_set_link(bp);
2749 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2750 bnx2_set_remote_link(bp);
2752 spin_unlock(&bp->phy_lock);
2756 static inline u16
2757 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2759 u16 cons;
2761 /* Tell compiler that status block fields can change. */
2762 barrier();
2763 cons = *bnapi->hw_tx_cons_ptr;
2764 barrier();
2765 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2766 cons++;
2767 return cons;
2770 static int
2771 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2773 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2774 u16 hw_cons, sw_cons, sw_ring_cons;
2775 int tx_pkt = 0, index;
2776 struct netdev_queue *txq;
2778 index = (bnapi - bp->bnx2_napi);
2779 txq = netdev_get_tx_queue(bp->dev, index);
2781 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2782 sw_cons = txr->tx_cons;
2784 while (sw_cons != hw_cons) {
2785 struct sw_tx_bd *tx_buf;
2786 struct sk_buff *skb;
2787 int i, last;
2789 sw_ring_cons = TX_RING_IDX(sw_cons);
2791 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2792 skb = tx_buf->skb;
2794 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2795 prefetch(&skb->end);
2797 /* partial BD completions possible with TSO packets */
2798 if (tx_buf->is_gso) {
2799 u16 last_idx, last_ring_idx;
2801 last_idx = sw_cons + tx_buf->nr_frags + 1;
2802 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2803 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2804 last_idx++;
2806 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2807 break;
2811 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2812 skb_headlen(skb), PCI_DMA_TODEVICE);
2814 tx_buf->skb = NULL;
2815 last = tx_buf->nr_frags;
2817 for (i = 0; i < last; i++) {
2818 sw_cons = NEXT_TX_BD(sw_cons);
2820 dma_unmap_page(&bp->pdev->dev,
2821 dma_unmap_addr(
2822 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2823 mapping),
2824 skb_shinfo(skb)->frags[i].size,
2825 PCI_DMA_TODEVICE);
2828 sw_cons = NEXT_TX_BD(sw_cons);
2830 dev_kfree_skb(skb);
2831 tx_pkt++;
2832 if (tx_pkt == budget)
2833 break;
2835 if (hw_cons == sw_cons)
2836 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2839 txr->hw_tx_cons = hw_cons;
2840 txr->tx_cons = sw_cons;
2842 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2843 * before checking for netif_tx_queue_stopped(). Without the
2844 * memory barrier, there is a small possibility that bnx2_start_xmit()
2845 * will miss it and cause the queue to be stopped forever.
2847 smp_mb();
2849 if (unlikely(netif_tx_queue_stopped(txq)) &&
2850 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2851 __netif_tx_lock(txq, smp_processor_id());
2852 if ((netif_tx_queue_stopped(txq)) &&
2853 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2854 netif_tx_wake_queue(txq);
2855 __netif_tx_unlock(txq);
2858 return tx_pkt;
2861 static void
2862 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2863 struct sk_buff *skb, int count)
2865 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2866 struct rx_bd *cons_bd, *prod_bd;
2867 int i;
2868 u16 hw_prod, prod;
2869 u16 cons = rxr->rx_pg_cons;
2871 cons_rx_pg = &rxr->rx_pg_ring[cons];
2873 /* The caller was unable to allocate a new page to replace the
2874 * last one in the frags array, so we need to recycle that page
2875 * and then free the skb.
2877 if (skb) {
2878 struct page *page;
2879 struct skb_shared_info *shinfo;
2881 shinfo = skb_shinfo(skb);
2882 shinfo->nr_frags--;
2883 page = shinfo->frags[shinfo->nr_frags].page;
2884 shinfo->frags[shinfo->nr_frags].page = NULL;
2886 cons_rx_pg->page = page;
2887 dev_kfree_skb(skb);
2890 hw_prod = rxr->rx_pg_prod;
2892 for (i = 0; i < count; i++) {
2893 prod = RX_PG_RING_IDX(hw_prod);
2895 prod_rx_pg = &rxr->rx_pg_ring[prod];
2896 cons_rx_pg = &rxr->rx_pg_ring[cons];
2897 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2898 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2900 if (prod != cons) {
2901 prod_rx_pg->page = cons_rx_pg->page;
2902 cons_rx_pg->page = NULL;
2903 dma_unmap_addr_set(prod_rx_pg, mapping,
2904 dma_unmap_addr(cons_rx_pg, mapping));
2906 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2907 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2910 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2911 hw_prod = NEXT_RX_BD(hw_prod);
2913 rxr->rx_pg_prod = hw_prod;
2914 rxr->rx_pg_cons = cons;
2917 static inline void
2918 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2919 struct sk_buff *skb, u16 cons, u16 prod)
2921 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2922 struct rx_bd *cons_bd, *prod_bd;
2924 cons_rx_buf = &rxr->rx_buf_ring[cons];
2925 prod_rx_buf = &rxr->rx_buf_ring[prod];
2927 dma_sync_single_for_device(&bp->pdev->dev,
2928 dma_unmap_addr(cons_rx_buf, mapping),
2929 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2931 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2933 prod_rx_buf->skb = skb;
2934 prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
2936 if (cons == prod)
2937 return;
2939 dma_unmap_addr_set(prod_rx_buf, mapping,
2940 dma_unmap_addr(cons_rx_buf, mapping));
2942 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2943 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2944 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2945 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2948 static int
2949 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2950 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2951 u32 ring_idx)
2953 int err;
2954 u16 prod = ring_idx & 0xffff;
2956 err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
2957 if (unlikely(err)) {
2958 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2959 if (hdr_len) {
2960 unsigned int raw_len = len + 4;
2961 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2963 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2965 return err;
2968 skb_reserve(skb, BNX2_RX_OFFSET);
2969 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
2970 PCI_DMA_FROMDEVICE);
2972 if (hdr_len == 0) {
2973 skb_put(skb, len);
2974 return 0;
2975 } else {
2976 unsigned int i, frag_len, frag_size, pages;
2977 struct sw_pg *rx_pg;
2978 u16 pg_cons = rxr->rx_pg_cons;
2979 u16 pg_prod = rxr->rx_pg_prod;
2981 frag_size = len + 4 - hdr_len;
2982 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2983 skb_put(skb, hdr_len);
2985 for (i = 0; i < pages; i++) {
2986 dma_addr_t mapping_old;
2988 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2989 if (unlikely(frag_len <= 4)) {
2990 unsigned int tail = 4 - frag_len;
2992 rxr->rx_pg_cons = pg_cons;
2993 rxr->rx_pg_prod = pg_prod;
2994 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2995 pages - i);
2996 skb->len -= tail;
2997 if (i == 0) {
2998 skb->tail -= tail;
2999 } else {
3000 skb_frag_t *frag =
3001 &skb_shinfo(skb)->frags[i - 1];
3002 frag->size -= tail;
3003 skb->data_len -= tail;
3004 skb->truesize -= tail;
3006 return 0;
3008 rx_pg = &rxr->rx_pg_ring[pg_cons];
3010 /* Don't unmap yet. If we're unable to allocate a new
3011 * page, we need to recycle the page and the DMA addr.
3013 mapping_old = dma_unmap_addr(rx_pg, mapping);
3014 if (i == pages - 1)
3015 frag_len -= 4;
3017 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3018 rx_pg->page = NULL;
3020 err = bnx2_alloc_rx_page(bp, rxr,
3021 RX_PG_RING_IDX(pg_prod),
3022 GFP_ATOMIC);
3023 if (unlikely(err)) {
3024 rxr->rx_pg_cons = pg_cons;
3025 rxr->rx_pg_prod = pg_prod;
3026 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3027 pages - i);
3028 return err;
3031 dma_unmap_page(&bp->pdev->dev, mapping_old,
3032 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3034 frag_size -= frag_len;
3035 skb->data_len += frag_len;
3036 skb->truesize += frag_len;
3037 skb->len += frag_len;
3039 pg_prod = NEXT_RX_BD(pg_prod);
3040 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3042 rxr->rx_pg_prod = pg_prod;
3043 rxr->rx_pg_cons = pg_cons;
3045 return 0;
3048 static inline u16
3049 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3051 u16 cons;
3053 /* Tell compiler that status block fields can change. */
3054 barrier();
3055 cons = *bnapi->hw_rx_cons_ptr;
3056 barrier();
3057 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3058 cons++;
3059 return cons;
3062 static int
3063 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3065 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3066 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3067 struct l2_fhdr *rx_hdr;
3068 int rx_pkt = 0, pg_ring_used = 0;
3070 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3071 sw_cons = rxr->rx_cons;
3072 sw_prod = rxr->rx_prod;
3074 /* Memory barrier necessary as speculative reads of the rx
3075 * buffer can be ahead of the index in the status block
3077 rmb();
3078 while (sw_cons != hw_cons) {
3079 unsigned int len, hdr_len;
3080 u32 status;
3081 struct sw_bd *rx_buf, *next_rx_buf;
3082 struct sk_buff *skb;
3083 dma_addr_t dma_addr;
3085 sw_ring_cons = RX_RING_IDX(sw_cons);
3086 sw_ring_prod = RX_RING_IDX(sw_prod);
3088 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3089 skb = rx_buf->skb;
3090 prefetchw(skb);
3092 next_rx_buf =
3093 &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3094 prefetch(next_rx_buf->desc);
3096 rx_buf->skb = NULL;
3098 dma_addr = dma_unmap_addr(rx_buf, mapping);
3100 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3101 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3102 PCI_DMA_FROMDEVICE);
3104 rx_hdr = rx_buf->desc;
3105 len = rx_hdr->l2_fhdr_pkt_len;
3106 status = rx_hdr->l2_fhdr_status;
3108 hdr_len = 0;
3109 if (status & L2_FHDR_STATUS_SPLIT) {
3110 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3111 pg_ring_used = 1;
3112 } else if (len > bp->rx_jumbo_thresh) {
3113 hdr_len = bp->rx_jumbo_thresh;
3114 pg_ring_used = 1;
3117 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3118 L2_FHDR_ERRORS_PHY_DECODE |
3119 L2_FHDR_ERRORS_ALIGNMENT |
3120 L2_FHDR_ERRORS_TOO_SHORT |
3121 L2_FHDR_ERRORS_GIANT_FRAME))) {
3123 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3124 sw_ring_prod);
3125 if (pg_ring_used) {
3126 int pages;
3128 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3130 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3132 goto next_rx;
3135 len -= 4;
3137 if (len <= bp->rx_copy_thresh) {
3138 struct sk_buff *new_skb;
3140 new_skb = netdev_alloc_skb(bp->dev, len + 6);
3141 if (new_skb == NULL) {
3142 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3143 sw_ring_prod);
3144 goto next_rx;
3147 /* aligned copy */
3148 skb_copy_from_linear_data_offset(skb,
3149 BNX2_RX_OFFSET - 6,
3150 new_skb->data, len + 6);
3151 skb_reserve(new_skb, 6);
3152 skb_put(new_skb, len);
3154 bnx2_reuse_rx_skb(bp, rxr, skb,
3155 sw_ring_cons, sw_ring_prod);
3157 skb = new_skb;
3158 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3159 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3160 goto next_rx;
3162 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3163 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3164 __vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
3166 skb->protocol = eth_type_trans(skb, bp->dev);
3168 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3169 (ntohs(skb->protocol) != 0x8100)) {
3171 dev_kfree_skb(skb);
3172 goto next_rx;
3176 skb_checksum_none_assert(skb);
3177 if ((bp->dev->features & NETIF_F_RXCSUM) &&
3178 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3179 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3181 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3182 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3183 skb->ip_summed = CHECKSUM_UNNECESSARY;
3185 if ((bp->dev->features & NETIF_F_RXHASH) &&
3186 ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3187 L2_FHDR_STATUS_USE_RXHASH))
3188 skb->rxhash = rx_hdr->l2_fhdr_hash;
3190 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3191 napi_gro_receive(&bnapi->napi, skb);
3192 rx_pkt++;
3194 next_rx:
3195 sw_cons = NEXT_RX_BD(sw_cons);
3196 sw_prod = NEXT_RX_BD(sw_prod);
3198 if ((rx_pkt == budget))
3199 break;
3201 /* Refresh hw_cons to see if there is new work */
3202 if (sw_cons == hw_cons) {
3203 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3204 rmb();
3207 rxr->rx_cons = sw_cons;
3208 rxr->rx_prod = sw_prod;
3210 if (pg_ring_used)
3211 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3213 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3215 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3217 mmiowb();
3219 return rx_pkt;
3223 /* MSI ISR - The only difference between this and the INTx ISR
3224 * is that the MSI interrupt is always serviced.
3226 static irqreturn_t
3227 bnx2_msi(int irq, void *dev_instance)
3229 struct bnx2_napi *bnapi = dev_instance;
3230 struct bnx2 *bp = bnapi->bp;
3232 prefetch(bnapi->status_blk.msi);
3233 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3234 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3235 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3237 /* Return here if interrupt is disabled. */
3238 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3239 return IRQ_HANDLED;
3241 napi_schedule(&bnapi->napi);
3243 return IRQ_HANDLED;
3246 static irqreturn_t
3247 bnx2_msi_1shot(int irq, void *dev_instance)
3249 struct bnx2_napi *bnapi = dev_instance;
3250 struct bnx2 *bp = bnapi->bp;
3252 prefetch(bnapi->status_blk.msi);
3254 /* Return here if interrupt is disabled. */
3255 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3256 return IRQ_HANDLED;
3258 napi_schedule(&bnapi->napi);
3260 return IRQ_HANDLED;
3263 static irqreturn_t
3264 bnx2_interrupt(int irq, void *dev_instance)
3266 struct bnx2_napi *bnapi = dev_instance;
3267 struct bnx2 *bp = bnapi->bp;
3268 struct status_block *sblk = bnapi->status_blk.msi;
3270 /* When using INTx, it is possible for the interrupt to arrive
3271 * at the CPU before the status block posted prior to the
3272 * interrupt. Reading a register will flush the status block.
3273 * When using MSI, the MSI message will always complete after
3274 * the status block write.
3276 if ((sblk->status_idx == bnapi->last_status_idx) &&
3277 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3278 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3279 return IRQ_NONE;
3281 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3282 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3283 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3285 /* Read back to deassert IRQ immediately to avoid too many
3286 * spurious interrupts.
3288 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3290 /* Return here if interrupt is shared and is disabled. */
3291 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3292 return IRQ_HANDLED;
3294 if (napi_schedule_prep(&bnapi->napi)) {
3295 bnapi->last_status_idx = sblk->status_idx;
3296 __napi_schedule(&bnapi->napi);
3299 return IRQ_HANDLED;
3302 static inline int
3303 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3305 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3306 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3308 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3309 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3310 return 1;
3311 return 0;
3314 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3315 STATUS_ATTN_BITS_TIMER_ABORT)
3317 static inline int
3318 bnx2_has_work(struct bnx2_napi *bnapi)
3320 struct status_block *sblk = bnapi->status_blk.msi;
3322 if (bnx2_has_fast_work(bnapi))
3323 return 1;
3325 #ifdef BCM_CNIC
3326 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3327 return 1;
3328 #endif
3330 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3331 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3332 return 1;
3334 return 0;
3337 static void
3338 bnx2_chk_missed_msi(struct bnx2 *bp)
3340 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3341 u32 msi_ctrl;
3343 if (bnx2_has_work(bnapi)) {
3344 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3345 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3346 return;
3348 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3349 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3350 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3351 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3352 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3356 bp->idle_chk_status_idx = bnapi->last_status_idx;
3359 #ifdef BCM_CNIC
3360 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3362 struct cnic_ops *c_ops;
3364 if (!bnapi->cnic_present)
3365 return;
3367 rcu_read_lock();
3368 c_ops = rcu_dereference(bp->cnic_ops);
3369 if (c_ops)
3370 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3371 bnapi->status_blk.msi);
3372 rcu_read_unlock();
3374 #endif
3376 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3378 struct status_block *sblk = bnapi->status_blk.msi;
3379 u32 status_attn_bits = sblk->status_attn_bits;
3380 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3382 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3383 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3385 bnx2_phy_int(bp, bnapi);
3387 /* This is needed to take care of transient status
3388 * during link changes.
3390 REG_WR(bp, BNX2_HC_COMMAND,
3391 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3392 REG_RD(bp, BNX2_HC_COMMAND);
3396 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3397 int work_done, int budget)
3399 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3400 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3402 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3403 bnx2_tx_int(bp, bnapi, 0);
3405 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3406 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3408 return work_done;
3411 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3413 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3414 struct bnx2 *bp = bnapi->bp;
3415 int work_done = 0;
3416 struct status_block_msix *sblk = bnapi->status_blk.msix;
3418 while (1) {
3419 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3420 if (unlikely(work_done >= budget))
3421 break;
3423 bnapi->last_status_idx = sblk->status_idx;
3424 /* status idx must be read before checking for more work. */
3425 rmb();
3426 if (likely(!bnx2_has_fast_work(bnapi))) {
3428 napi_complete(napi);
3429 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3430 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3431 bnapi->last_status_idx);
3432 break;
3435 return work_done;
3438 static int bnx2_poll(struct napi_struct *napi, int budget)
3440 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3441 struct bnx2 *bp = bnapi->bp;
3442 int work_done = 0;
3443 struct status_block *sblk = bnapi->status_blk.msi;
3445 while (1) {
3446 bnx2_poll_link(bp, bnapi);
3448 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3450 #ifdef BCM_CNIC
3451 bnx2_poll_cnic(bp, bnapi);
3452 #endif
3454 /* bnapi->last_status_idx is used below to tell the hw how
3455 * much work has been processed, so we must read it before
3456 * checking for more work.
3458 bnapi->last_status_idx = sblk->status_idx;
3460 if (unlikely(work_done >= budget))
3461 break;
3463 rmb();
3464 if (likely(!bnx2_has_work(bnapi))) {
3465 napi_complete(napi);
3466 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3467 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3468 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3469 bnapi->last_status_idx);
3470 break;
3472 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3473 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3474 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3475 bnapi->last_status_idx);
3477 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3478 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3479 bnapi->last_status_idx);
3480 break;
3484 return work_done;
3487 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3488 * from set_multicast.
3490 static void
3491 bnx2_set_rx_mode(struct net_device *dev)
3493 struct bnx2 *bp = netdev_priv(dev);
3494 u32 rx_mode, sort_mode;
3495 struct netdev_hw_addr *ha;
3496 int i;
3498 if (!netif_running(dev))
3499 return;
3501 spin_lock_bh(&bp->phy_lock);
3503 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3504 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3505 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3506 if (!(dev->features & NETIF_F_HW_VLAN_RX) &&
3507 (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3508 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3509 if (dev->flags & IFF_PROMISC) {
3510 /* Promiscuous mode. */
3511 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3512 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3513 BNX2_RPM_SORT_USER0_PROM_VLAN;
3515 else if (dev->flags & IFF_ALLMULTI) {
3516 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3517 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3518 0xffffffff);
3520 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3522 else {
3523 /* Accept one or more multicast(s). */
3524 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3525 u32 regidx;
3526 u32 bit;
3527 u32 crc;
3529 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3531 netdev_for_each_mc_addr(ha, dev) {
3532 crc = ether_crc_le(ETH_ALEN, ha->addr);
3533 bit = crc & 0xff;
3534 regidx = (bit & 0xe0) >> 5;
3535 bit &= 0x1f;
3536 mc_filter[regidx] |= (1 << bit);
3539 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3540 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3541 mc_filter[i]);
3544 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3547 if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3548 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3549 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3550 BNX2_RPM_SORT_USER0_PROM_VLAN;
3551 } else if (!(dev->flags & IFF_PROMISC)) {
3552 /* Add all entries into to the match filter list */
3553 i = 0;
3554 netdev_for_each_uc_addr(ha, dev) {
3555 bnx2_set_mac_addr(bp, ha->addr,
3556 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3557 sort_mode |= (1 <<
3558 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3559 i++;
3564 if (rx_mode != bp->rx_mode) {
3565 bp->rx_mode = rx_mode;
3566 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3569 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3570 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3571 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3573 spin_unlock_bh(&bp->phy_lock);
3576 static int __devinit
3577 check_fw_section(const struct firmware *fw,
3578 const struct bnx2_fw_file_section *section,
3579 u32 alignment, bool non_empty)
3581 u32 offset = be32_to_cpu(section->offset);
3582 u32 len = be32_to_cpu(section->len);
3584 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3585 return -EINVAL;
3586 if ((non_empty && len == 0) || len > fw->size - offset ||
3587 len & (alignment - 1))
3588 return -EINVAL;
3589 return 0;
3592 static int __devinit
3593 check_mips_fw_entry(const struct firmware *fw,
3594 const struct bnx2_mips_fw_file_entry *entry)
3596 if (check_fw_section(fw, &entry->text, 4, true) ||
3597 check_fw_section(fw, &entry->data, 4, false) ||
3598 check_fw_section(fw, &entry->rodata, 4, false))
3599 return -EINVAL;
3600 return 0;
3603 static int __devinit
3604 bnx2_request_firmware(struct bnx2 *bp)
3606 const char *mips_fw_file, *rv2p_fw_file;
3607 const struct bnx2_mips_fw_file *mips_fw;
3608 const struct bnx2_rv2p_fw_file *rv2p_fw;
3609 int rc;
3611 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3612 mips_fw_file = FW_MIPS_FILE_09;
3613 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3614 (CHIP_ID(bp) == CHIP_ID_5709_A1))
3615 rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3616 else
3617 rv2p_fw_file = FW_RV2P_FILE_09;
3618 } else {
3619 mips_fw_file = FW_MIPS_FILE_06;
3620 rv2p_fw_file = FW_RV2P_FILE_06;
3623 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3624 if (rc) {
3625 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3626 return rc;
3629 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3630 if (rc) {
3631 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3632 return rc;
3634 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3635 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3636 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3637 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3638 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3639 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3640 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3641 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3642 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3643 return -EINVAL;
3645 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3646 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3647 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3648 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3649 return -EINVAL;
3652 return 0;
3655 static u32
3656 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3658 switch (idx) {
3659 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3660 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3661 rv2p_code |= RV2P_BD_PAGE_SIZE;
3662 break;
3664 return rv2p_code;
3667 static int
3668 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3669 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3671 u32 rv2p_code_len, file_offset;
3672 __be32 *rv2p_code;
3673 int i;
3674 u32 val, cmd, addr;
3676 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3677 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3679 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3681 if (rv2p_proc == RV2P_PROC1) {
3682 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3683 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3684 } else {
3685 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3686 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3689 for (i = 0; i < rv2p_code_len; i += 8) {
3690 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3691 rv2p_code++;
3692 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3693 rv2p_code++;
3695 val = (i / 8) | cmd;
3696 REG_WR(bp, addr, val);
3699 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3700 for (i = 0; i < 8; i++) {
3701 u32 loc, code;
3703 loc = be32_to_cpu(fw_entry->fixup[i]);
3704 if (loc && ((loc * 4) < rv2p_code_len)) {
3705 code = be32_to_cpu(*(rv2p_code + loc - 1));
3706 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3707 code = be32_to_cpu(*(rv2p_code + loc));
3708 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3709 REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3711 val = (loc / 2) | cmd;
3712 REG_WR(bp, addr, val);
3716 /* Reset the processor, un-stall is done later. */
3717 if (rv2p_proc == RV2P_PROC1) {
3718 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3720 else {
3721 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3724 return 0;
3727 static int
3728 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3729 const struct bnx2_mips_fw_file_entry *fw_entry)
3731 u32 addr, len, file_offset;
3732 __be32 *data;
3733 u32 offset;
3734 u32 val;
3736 /* Halt the CPU. */
3737 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3738 val |= cpu_reg->mode_value_halt;
3739 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3740 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3742 /* Load the Text area. */
3743 addr = be32_to_cpu(fw_entry->text.addr);
3744 len = be32_to_cpu(fw_entry->text.len);
3745 file_offset = be32_to_cpu(fw_entry->text.offset);
3746 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3748 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3749 if (len) {
3750 int j;
3752 for (j = 0; j < (len / 4); j++, offset += 4)
3753 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3756 /* Load the Data area. */
3757 addr = be32_to_cpu(fw_entry->data.addr);
3758 len = be32_to_cpu(fw_entry->data.len);
3759 file_offset = be32_to_cpu(fw_entry->data.offset);
3760 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3762 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3763 if (len) {
3764 int j;
3766 for (j = 0; j < (len / 4); j++, offset += 4)
3767 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3770 /* Load the Read-Only area. */
3771 addr = be32_to_cpu(fw_entry->rodata.addr);
3772 len = be32_to_cpu(fw_entry->rodata.len);
3773 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3774 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3776 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3777 if (len) {
3778 int j;
3780 for (j = 0; j < (len / 4); j++, offset += 4)
3781 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3784 /* Clear the pre-fetch instruction. */
3785 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3787 val = be32_to_cpu(fw_entry->start_addr);
3788 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3790 /* Start the CPU. */
3791 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3792 val &= ~cpu_reg->mode_value_halt;
3793 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3794 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3796 return 0;
3799 static int
3800 bnx2_init_cpus(struct bnx2 *bp)
3802 const struct bnx2_mips_fw_file *mips_fw =
3803 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3804 const struct bnx2_rv2p_fw_file *rv2p_fw =
3805 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3806 int rc;
3808 /* Initialize the RV2P processor. */
3809 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3810 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3812 /* Initialize the RX Processor. */
3813 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3814 if (rc)
3815 goto init_cpu_err;
3817 /* Initialize the TX Processor. */
3818 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3819 if (rc)
3820 goto init_cpu_err;
3822 /* Initialize the TX Patch-up Processor. */
3823 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3824 if (rc)
3825 goto init_cpu_err;
3827 /* Initialize the Completion Processor. */
3828 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3829 if (rc)
3830 goto init_cpu_err;
3832 /* Initialize the Command Processor. */
3833 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3835 init_cpu_err:
3836 return rc;
3839 static int
3840 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3842 u16 pmcsr;
3844 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3846 switch (state) {
3847 case PCI_D0: {
3848 u32 val;
3850 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3851 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3852 PCI_PM_CTRL_PME_STATUS);
3854 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3855 /* delay required during transition out of D3hot */
3856 msleep(20);
3858 val = REG_RD(bp, BNX2_EMAC_MODE);
3859 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3860 val &= ~BNX2_EMAC_MODE_MPKT;
3861 REG_WR(bp, BNX2_EMAC_MODE, val);
3863 val = REG_RD(bp, BNX2_RPM_CONFIG);
3864 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3865 REG_WR(bp, BNX2_RPM_CONFIG, val);
3866 break;
3868 case PCI_D3hot: {
3869 int i;
3870 u32 val, wol_msg;
3872 if (bp->wol) {
3873 u32 advertising;
3874 u8 autoneg;
3876 autoneg = bp->autoneg;
3877 advertising = bp->advertising;
3879 if (bp->phy_port == PORT_TP) {
3880 bp->autoneg = AUTONEG_SPEED;
3881 bp->advertising = ADVERTISED_10baseT_Half |
3882 ADVERTISED_10baseT_Full |
3883 ADVERTISED_100baseT_Half |
3884 ADVERTISED_100baseT_Full |
3885 ADVERTISED_Autoneg;
3888 spin_lock_bh(&bp->phy_lock);
3889 bnx2_setup_phy(bp, bp->phy_port);
3890 spin_unlock_bh(&bp->phy_lock);
3892 bp->autoneg = autoneg;
3893 bp->advertising = advertising;
3895 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3897 val = REG_RD(bp, BNX2_EMAC_MODE);
3899 /* Enable port mode. */
3900 val &= ~BNX2_EMAC_MODE_PORT;
3901 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3902 BNX2_EMAC_MODE_ACPI_RCVD |
3903 BNX2_EMAC_MODE_MPKT;
3904 if (bp->phy_port == PORT_TP)
3905 val |= BNX2_EMAC_MODE_PORT_MII;
3906 else {
3907 val |= BNX2_EMAC_MODE_PORT_GMII;
3908 if (bp->line_speed == SPEED_2500)
3909 val |= BNX2_EMAC_MODE_25G_MODE;
3912 REG_WR(bp, BNX2_EMAC_MODE, val);
3914 /* receive all multicast */
3915 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3916 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3917 0xffffffff);
3919 REG_WR(bp, BNX2_EMAC_RX_MODE,
3920 BNX2_EMAC_RX_MODE_SORT_MODE);
3922 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3923 BNX2_RPM_SORT_USER0_MC_EN;
3924 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3925 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3926 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3927 BNX2_RPM_SORT_USER0_ENA);
3929 /* Need to enable EMAC and RPM for WOL. */
3930 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3931 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3932 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3933 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3935 val = REG_RD(bp, BNX2_RPM_CONFIG);
3936 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3937 REG_WR(bp, BNX2_RPM_CONFIG, val);
3939 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3941 else {
3942 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3945 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3946 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3947 1, 0);
3949 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3950 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3951 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3953 if (bp->wol)
3954 pmcsr |= 3;
3956 else {
3957 pmcsr |= 3;
3959 if (bp->wol) {
3960 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3962 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3963 pmcsr);
3965 /* No more memory access after this point until
3966 * device is brought back to D0.
3968 udelay(50);
3969 break;
3971 default:
3972 return -EINVAL;
3974 return 0;
3977 static int
3978 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3980 u32 val;
3981 int j;
3983 /* Request access to the flash interface. */
3984 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3985 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3986 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3987 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3988 break;
3990 udelay(5);
3993 if (j >= NVRAM_TIMEOUT_COUNT)
3994 return -EBUSY;
3996 return 0;
3999 static int
4000 bnx2_release_nvram_lock(struct bnx2 *bp)
4002 int j;
4003 u32 val;
4005 /* Relinquish nvram interface. */
4006 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4008 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4009 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4010 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4011 break;
4013 udelay(5);
4016 if (j >= NVRAM_TIMEOUT_COUNT)
4017 return -EBUSY;
4019 return 0;
4023 static int
4024 bnx2_enable_nvram_write(struct bnx2 *bp)
4026 u32 val;
4028 val = REG_RD(bp, BNX2_MISC_CFG);
4029 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4031 if (bp->flash_info->flags & BNX2_NV_WREN) {
4032 int j;
4034 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4035 REG_WR(bp, BNX2_NVM_COMMAND,
4036 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4038 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4039 udelay(5);
4041 val = REG_RD(bp, BNX2_NVM_COMMAND);
4042 if (val & BNX2_NVM_COMMAND_DONE)
4043 break;
4046 if (j >= NVRAM_TIMEOUT_COUNT)
4047 return -EBUSY;
4049 return 0;
4052 static void
4053 bnx2_disable_nvram_write(struct bnx2 *bp)
4055 u32 val;
4057 val = REG_RD(bp, BNX2_MISC_CFG);
4058 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4062 static void
4063 bnx2_enable_nvram_access(struct bnx2 *bp)
4065 u32 val;
4067 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4068 /* Enable both bits, even on read. */
4069 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4070 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4073 static void
4074 bnx2_disable_nvram_access(struct bnx2 *bp)
4076 u32 val;
4078 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4079 /* Disable both bits, even after read. */
4080 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4081 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4082 BNX2_NVM_ACCESS_ENABLE_WR_EN));
4085 static int
4086 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4088 u32 cmd;
4089 int j;
4091 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4092 /* Buffered flash, no erase needed */
4093 return 0;
4095 /* Build an erase command */
4096 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4097 BNX2_NVM_COMMAND_DOIT;
4099 /* Need to clear DONE bit separately. */
4100 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4102 /* Address of the NVRAM to read from. */
4103 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4105 /* Issue an erase command. */
4106 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4108 /* Wait for completion. */
4109 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4110 u32 val;
4112 udelay(5);
4114 val = REG_RD(bp, BNX2_NVM_COMMAND);
4115 if (val & BNX2_NVM_COMMAND_DONE)
4116 break;
4119 if (j >= NVRAM_TIMEOUT_COUNT)
4120 return -EBUSY;
4122 return 0;
4125 static int
4126 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4128 u32 cmd;
4129 int j;
4131 /* Build the command word. */
4132 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4134 /* Calculate an offset of a buffered flash, not needed for 5709. */
4135 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4136 offset = ((offset / bp->flash_info->page_size) <<
4137 bp->flash_info->page_bits) +
4138 (offset % bp->flash_info->page_size);
4141 /* Need to clear DONE bit separately. */
4142 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4144 /* Address of the NVRAM to read from. */
4145 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4147 /* Issue a read command. */
4148 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4150 /* Wait for completion. */
4151 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4152 u32 val;
4154 udelay(5);
4156 val = REG_RD(bp, BNX2_NVM_COMMAND);
4157 if (val & BNX2_NVM_COMMAND_DONE) {
4158 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4159 memcpy(ret_val, &v, 4);
4160 break;
4163 if (j >= NVRAM_TIMEOUT_COUNT)
4164 return -EBUSY;
4166 return 0;
4170 static int
4171 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4173 u32 cmd;
4174 __be32 val32;
4175 int j;
4177 /* Build the command word. */
4178 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4180 /* Calculate an offset of a buffered flash, not needed for 5709. */
4181 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4182 offset = ((offset / bp->flash_info->page_size) <<
4183 bp->flash_info->page_bits) +
4184 (offset % bp->flash_info->page_size);
4187 /* Need to clear DONE bit separately. */
4188 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4190 memcpy(&val32, val, 4);
4192 /* Write the data. */
4193 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4195 /* Address of the NVRAM to write to. */
4196 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4198 /* Issue the write command. */
4199 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4201 /* Wait for completion. */
4202 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4203 udelay(5);
4205 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4206 break;
4208 if (j >= NVRAM_TIMEOUT_COUNT)
4209 return -EBUSY;
4211 return 0;
4214 static int
4215 bnx2_init_nvram(struct bnx2 *bp)
4217 u32 val;
4218 int j, entry_count, rc = 0;
4219 const struct flash_spec *flash;
4221 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4222 bp->flash_info = &flash_5709;
4223 goto get_flash_size;
4226 /* Determine the selected interface. */
4227 val = REG_RD(bp, BNX2_NVM_CFG1);
4229 entry_count = ARRAY_SIZE(flash_table);
4231 if (val & 0x40000000) {
4233 /* Flash interface has been reconfigured */
4234 for (j = 0, flash = &flash_table[0]; j < entry_count;
4235 j++, flash++) {
4236 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4237 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4238 bp->flash_info = flash;
4239 break;
4243 else {
4244 u32 mask;
4245 /* Not yet been reconfigured */
4247 if (val & (1 << 23))
4248 mask = FLASH_BACKUP_STRAP_MASK;
4249 else
4250 mask = FLASH_STRAP_MASK;
4252 for (j = 0, flash = &flash_table[0]; j < entry_count;
4253 j++, flash++) {
4255 if ((val & mask) == (flash->strapping & mask)) {
4256 bp->flash_info = flash;
4258 /* Request access to the flash interface. */
4259 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4260 return rc;
4262 /* Enable access to flash interface */
4263 bnx2_enable_nvram_access(bp);
4265 /* Reconfigure the flash interface */
4266 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4267 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4268 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4269 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4271 /* Disable access to flash interface */
4272 bnx2_disable_nvram_access(bp);
4273 bnx2_release_nvram_lock(bp);
4275 break;
4278 } /* if (val & 0x40000000) */
4280 if (j == entry_count) {
4281 bp->flash_info = NULL;
4282 pr_alert("Unknown flash/EEPROM type\n");
4283 return -ENODEV;
4286 get_flash_size:
4287 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4288 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4289 if (val)
4290 bp->flash_size = val;
4291 else
4292 bp->flash_size = bp->flash_info->total_size;
4294 return rc;
4297 static int
4298 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4299 int buf_size)
4301 int rc = 0;
4302 u32 cmd_flags, offset32, len32, extra;
4304 if (buf_size == 0)
4305 return 0;
4307 /* Request access to the flash interface. */
4308 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4309 return rc;
4311 /* Enable access to flash interface */
4312 bnx2_enable_nvram_access(bp);
4314 len32 = buf_size;
4315 offset32 = offset;
4316 extra = 0;
4318 cmd_flags = 0;
4320 if (offset32 & 3) {
4321 u8 buf[4];
4322 u32 pre_len;
4324 offset32 &= ~3;
4325 pre_len = 4 - (offset & 3);
4327 if (pre_len >= len32) {
4328 pre_len = len32;
4329 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4330 BNX2_NVM_COMMAND_LAST;
4332 else {
4333 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4336 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4338 if (rc)
4339 return rc;
4341 memcpy(ret_buf, buf + (offset & 3), pre_len);
4343 offset32 += 4;
4344 ret_buf += pre_len;
4345 len32 -= pre_len;
4347 if (len32 & 3) {
4348 extra = 4 - (len32 & 3);
4349 len32 = (len32 + 4) & ~3;
4352 if (len32 == 4) {
4353 u8 buf[4];
4355 if (cmd_flags)
4356 cmd_flags = BNX2_NVM_COMMAND_LAST;
4357 else
4358 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4359 BNX2_NVM_COMMAND_LAST;
4361 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4363 memcpy(ret_buf, buf, 4 - extra);
4365 else if (len32 > 0) {
4366 u8 buf[4];
4368 /* Read the first word. */
4369 if (cmd_flags)
4370 cmd_flags = 0;
4371 else
4372 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4374 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4376 /* Advance to the next dword. */
4377 offset32 += 4;
4378 ret_buf += 4;
4379 len32 -= 4;
4381 while (len32 > 4 && rc == 0) {
4382 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4384 /* Advance to the next dword. */
4385 offset32 += 4;
4386 ret_buf += 4;
4387 len32 -= 4;
4390 if (rc)
4391 return rc;
4393 cmd_flags = BNX2_NVM_COMMAND_LAST;
4394 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4396 memcpy(ret_buf, buf, 4 - extra);
4399 /* Disable access to flash interface */
4400 bnx2_disable_nvram_access(bp);
4402 bnx2_release_nvram_lock(bp);
4404 return rc;
4407 static int
4408 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4409 int buf_size)
4411 u32 written, offset32, len32;
4412 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4413 int rc = 0;
4414 int align_start, align_end;
4416 buf = data_buf;
4417 offset32 = offset;
4418 len32 = buf_size;
4419 align_start = align_end = 0;
4421 if ((align_start = (offset32 & 3))) {
4422 offset32 &= ~3;
4423 len32 += align_start;
4424 if (len32 < 4)
4425 len32 = 4;
4426 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4427 return rc;
4430 if (len32 & 3) {
4431 align_end = 4 - (len32 & 3);
4432 len32 += align_end;
4433 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4434 return rc;
4437 if (align_start || align_end) {
4438 align_buf = kmalloc(len32, GFP_KERNEL);
4439 if (align_buf == NULL)
4440 return -ENOMEM;
4441 if (align_start) {
4442 memcpy(align_buf, start, 4);
4444 if (align_end) {
4445 memcpy(align_buf + len32 - 4, end, 4);
4447 memcpy(align_buf + align_start, data_buf, buf_size);
4448 buf = align_buf;
4451 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4452 flash_buffer = kmalloc(264, GFP_KERNEL);
4453 if (flash_buffer == NULL) {
4454 rc = -ENOMEM;
4455 goto nvram_write_end;
4459 written = 0;
4460 while ((written < len32) && (rc == 0)) {
4461 u32 page_start, page_end, data_start, data_end;
4462 u32 addr, cmd_flags;
4463 int i;
4465 /* Find the page_start addr */
4466 page_start = offset32 + written;
4467 page_start -= (page_start % bp->flash_info->page_size);
4468 /* Find the page_end addr */
4469 page_end = page_start + bp->flash_info->page_size;
4470 /* Find the data_start addr */
4471 data_start = (written == 0) ? offset32 : page_start;
4472 /* Find the data_end addr */
4473 data_end = (page_end > offset32 + len32) ?
4474 (offset32 + len32) : page_end;
4476 /* Request access to the flash interface. */
4477 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4478 goto nvram_write_end;
4480 /* Enable access to flash interface */
4481 bnx2_enable_nvram_access(bp);
4483 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4484 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4485 int j;
4487 /* Read the whole page into the buffer
4488 * (non-buffer flash only) */
4489 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4490 if (j == (bp->flash_info->page_size - 4)) {
4491 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4493 rc = bnx2_nvram_read_dword(bp,
4494 page_start + j,
4495 &flash_buffer[j],
4496 cmd_flags);
4498 if (rc)
4499 goto nvram_write_end;
4501 cmd_flags = 0;
4505 /* Enable writes to flash interface (unlock write-protect) */
4506 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4507 goto nvram_write_end;
4509 /* Loop to write back the buffer data from page_start to
4510 * data_start */
4511 i = 0;
4512 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4513 /* Erase the page */
4514 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4515 goto nvram_write_end;
4517 /* Re-enable the write again for the actual write */
4518 bnx2_enable_nvram_write(bp);
4520 for (addr = page_start; addr < data_start;
4521 addr += 4, i += 4) {
4523 rc = bnx2_nvram_write_dword(bp, addr,
4524 &flash_buffer[i], cmd_flags);
4526 if (rc != 0)
4527 goto nvram_write_end;
4529 cmd_flags = 0;
4533 /* Loop to write the new data from data_start to data_end */
4534 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4535 if ((addr == page_end - 4) ||
4536 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4537 (addr == data_end - 4))) {
4539 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4541 rc = bnx2_nvram_write_dword(bp, addr, buf,
4542 cmd_flags);
4544 if (rc != 0)
4545 goto nvram_write_end;
4547 cmd_flags = 0;
4548 buf += 4;
4551 /* Loop to write back the buffer data from data_end
4552 * to page_end */
4553 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4554 for (addr = data_end; addr < page_end;
4555 addr += 4, i += 4) {
4557 if (addr == page_end-4) {
4558 cmd_flags = BNX2_NVM_COMMAND_LAST;
4560 rc = bnx2_nvram_write_dword(bp, addr,
4561 &flash_buffer[i], cmd_flags);
4563 if (rc != 0)
4564 goto nvram_write_end;
4566 cmd_flags = 0;
4570 /* Disable writes to flash interface (lock write-protect) */
4571 bnx2_disable_nvram_write(bp);
4573 /* Disable access to flash interface */
4574 bnx2_disable_nvram_access(bp);
4575 bnx2_release_nvram_lock(bp);
4577 /* Increment written */
4578 written += data_end - data_start;
4581 nvram_write_end:
4582 kfree(flash_buffer);
4583 kfree(align_buf);
4584 return rc;
4587 static void
4588 bnx2_init_fw_cap(struct bnx2 *bp)
4590 u32 val, sig = 0;
4592 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4593 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4595 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4596 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4598 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4599 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4600 return;
4602 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4603 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4604 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4607 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4608 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4609 u32 link;
4611 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4613 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4614 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4615 bp->phy_port = PORT_FIBRE;
4616 else
4617 bp->phy_port = PORT_TP;
4619 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4620 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4623 if (netif_running(bp->dev) && sig)
4624 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4627 static void
4628 bnx2_setup_msix_tbl(struct bnx2 *bp)
4630 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4632 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4633 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4636 static int
4637 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4639 u32 val;
4640 int i, rc = 0;
4641 u8 old_port;
4643 /* Wait for the current PCI transaction to complete before
4644 * issuing a reset. */
4645 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4646 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
4647 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4648 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4649 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4650 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4651 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4652 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4653 udelay(5);
4654 } else { /* 5709 */
4655 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4656 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4657 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4658 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4660 for (i = 0; i < 100; i++) {
4661 msleep(1);
4662 val = REG_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4663 if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4664 break;
4668 /* Wait for the firmware to tell us it is ok to issue a reset. */
4669 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4671 /* Deposit a driver reset signature so the firmware knows that
4672 * this is a soft reset. */
4673 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4674 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4676 /* Do a dummy read to force the chip to complete all current transaction
4677 * before we issue a reset. */
4678 val = REG_RD(bp, BNX2_MISC_ID);
4680 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4681 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4682 REG_RD(bp, BNX2_MISC_COMMAND);
4683 udelay(5);
4685 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4686 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4688 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4690 } else {
4691 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4692 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4693 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4695 /* Chip reset. */
4696 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4698 /* Reading back any register after chip reset will hang the
4699 * bus on 5706 A0 and A1. The msleep below provides plenty
4700 * of margin for write posting.
4702 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4703 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4704 msleep(20);
4706 /* Reset takes approximate 30 usec */
4707 for (i = 0; i < 10; i++) {
4708 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4709 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4710 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4711 break;
4712 udelay(10);
4715 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4716 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4717 pr_err("Chip reset did not complete\n");
4718 return -EBUSY;
4722 /* Make sure byte swapping is properly configured. */
4723 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4724 if (val != 0x01020304) {
4725 pr_err("Chip not in correct endian mode\n");
4726 return -ENODEV;
4729 /* Wait for the firmware to finish its initialization. */
4730 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4731 if (rc)
4732 return rc;
4734 spin_lock_bh(&bp->phy_lock);
4735 old_port = bp->phy_port;
4736 bnx2_init_fw_cap(bp);
4737 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4738 old_port != bp->phy_port)
4739 bnx2_set_default_remote_link(bp);
4740 spin_unlock_bh(&bp->phy_lock);
4742 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4743 /* Adjust the voltage regular to two steps lower. The default
4744 * of this register is 0x0000000e. */
4745 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4747 /* Remove bad rbuf memory from the free pool. */
4748 rc = bnx2_alloc_bad_rbuf(bp);
4751 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4752 bnx2_setup_msix_tbl(bp);
4753 /* Prevent MSIX table reads and write from timing out */
4754 REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4755 BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4758 return rc;
4761 static int
4762 bnx2_init_chip(struct bnx2 *bp)
4764 u32 val, mtu;
4765 int rc, i;
4767 /* Make sure the interrupt is not active. */
4768 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4770 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4771 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4772 #ifdef __BIG_ENDIAN
4773 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4774 #endif
4775 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4776 DMA_READ_CHANS << 12 |
4777 DMA_WRITE_CHANS << 16;
4779 val |= (0x2 << 20) | (1 << 11);
4781 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4782 val |= (1 << 23);
4784 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4785 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4786 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4788 REG_WR(bp, BNX2_DMA_CONFIG, val);
4790 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4791 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4792 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4793 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4796 if (bp->flags & BNX2_FLAG_PCIX) {
4797 u16 val16;
4799 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4800 &val16);
4801 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4802 val16 & ~PCI_X_CMD_ERO);
4805 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4806 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4807 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4808 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4810 /* Initialize context mapping and zero out the quick contexts. The
4811 * context block must have already been enabled. */
4812 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4813 rc = bnx2_init_5709_context(bp);
4814 if (rc)
4815 return rc;
4816 } else
4817 bnx2_init_context(bp);
4819 if ((rc = bnx2_init_cpus(bp)) != 0)
4820 return rc;
4822 bnx2_init_nvram(bp);
4824 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4826 val = REG_RD(bp, BNX2_MQ_CONFIG);
4827 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4828 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4829 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4830 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4831 if (CHIP_REV(bp) == CHIP_REV_Ax)
4832 val |= BNX2_MQ_CONFIG_HALT_DIS;
4835 REG_WR(bp, BNX2_MQ_CONFIG, val);
4837 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4838 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4839 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4841 val = (BCM_PAGE_BITS - 8) << 24;
4842 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4844 /* Configure page size. */
4845 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4846 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4847 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4848 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4850 val = bp->mac_addr[0] +
4851 (bp->mac_addr[1] << 8) +
4852 (bp->mac_addr[2] << 16) +
4853 bp->mac_addr[3] +
4854 (bp->mac_addr[4] << 8) +
4855 (bp->mac_addr[5] << 16);
4856 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4858 /* Program the MTU. Also include 4 bytes for CRC32. */
4859 mtu = bp->dev->mtu;
4860 val = mtu + ETH_HLEN + ETH_FCS_LEN;
4861 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4862 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4863 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4865 if (mtu < 1500)
4866 mtu = 1500;
4868 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4869 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4870 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4872 memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4873 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4874 bp->bnx2_napi[i].last_status_idx = 0;
4876 bp->idle_chk_status_idx = 0xffff;
4878 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4880 /* Set up how to generate a link change interrupt. */
4881 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4883 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4884 (u64) bp->status_blk_mapping & 0xffffffff);
4885 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4887 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4888 (u64) bp->stats_blk_mapping & 0xffffffff);
4889 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4890 (u64) bp->stats_blk_mapping >> 32);
4892 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4893 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4895 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4896 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4898 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4899 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4901 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4903 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4905 REG_WR(bp, BNX2_HC_COM_TICKS,
4906 (bp->com_ticks_int << 16) | bp->com_ticks);
4908 REG_WR(bp, BNX2_HC_CMD_TICKS,
4909 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4911 if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4912 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4913 else
4914 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4915 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4917 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4918 val = BNX2_HC_CONFIG_COLLECT_STATS;
4919 else {
4920 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4921 BNX2_HC_CONFIG_COLLECT_STATS;
4924 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4925 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4926 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4928 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4931 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4932 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4934 REG_WR(bp, BNX2_HC_CONFIG, val);
4936 if (bp->rx_ticks < 25)
4937 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
4938 else
4939 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
4941 for (i = 1; i < bp->irq_nvecs; i++) {
4942 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4943 BNX2_HC_SB_CONFIG_1;
4945 REG_WR(bp, base,
4946 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4947 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4948 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4950 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4951 (bp->tx_quick_cons_trip_int << 16) |
4952 bp->tx_quick_cons_trip);
4954 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4955 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4957 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4958 (bp->rx_quick_cons_trip_int << 16) |
4959 bp->rx_quick_cons_trip);
4961 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4962 (bp->rx_ticks_int << 16) | bp->rx_ticks);
4965 /* Clear internal stats counters. */
4966 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4968 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4970 /* Initialize the receive filter. */
4971 bnx2_set_rx_mode(bp->dev);
4973 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4974 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4975 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4976 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4978 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4979 1, 0);
4981 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4982 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4984 udelay(20);
4986 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4988 return rc;
4991 static void
4992 bnx2_clear_ring_states(struct bnx2 *bp)
4994 struct bnx2_napi *bnapi;
4995 struct bnx2_tx_ring_info *txr;
4996 struct bnx2_rx_ring_info *rxr;
4997 int i;
4999 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5000 bnapi = &bp->bnx2_napi[i];
5001 txr = &bnapi->tx_ring;
5002 rxr = &bnapi->rx_ring;
5004 txr->tx_cons = 0;
5005 txr->hw_tx_cons = 0;
5006 rxr->rx_prod_bseq = 0;
5007 rxr->rx_prod = 0;
5008 rxr->rx_cons = 0;
5009 rxr->rx_pg_prod = 0;
5010 rxr->rx_pg_cons = 0;
5014 static void
5015 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5017 u32 val, offset0, offset1, offset2, offset3;
5018 u32 cid_addr = GET_CID_ADDR(cid);
5020 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5021 offset0 = BNX2_L2CTX_TYPE_XI;
5022 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5023 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5024 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5025 } else {
5026 offset0 = BNX2_L2CTX_TYPE;
5027 offset1 = BNX2_L2CTX_CMD_TYPE;
5028 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5029 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5031 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5032 bnx2_ctx_wr(bp, cid_addr, offset0, val);
5034 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5035 bnx2_ctx_wr(bp, cid_addr, offset1, val);
5037 val = (u64) txr->tx_desc_mapping >> 32;
5038 bnx2_ctx_wr(bp, cid_addr, offset2, val);
5040 val = (u64) txr->tx_desc_mapping & 0xffffffff;
5041 bnx2_ctx_wr(bp, cid_addr, offset3, val);
5044 static void
5045 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5047 struct tx_bd *txbd;
5048 u32 cid = TX_CID;
5049 struct bnx2_napi *bnapi;
5050 struct bnx2_tx_ring_info *txr;
5052 bnapi = &bp->bnx2_napi[ring_num];
5053 txr = &bnapi->tx_ring;
5055 if (ring_num == 0)
5056 cid = TX_CID;
5057 else
5058 cid = TX_TSS_CID + ring_num - 1;
5060 bp->tx_wake_thresh = bp->tx_ring_size / 2;
5062 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5064 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5065 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5067 txr->tx_prod = 0;
5068 txr->tx_prod_bseq = 0;
5070 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5071 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5073 bnx2_init_tx_context(bp, cid, txr);
5076 static void
5077 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5078 int num_rings)
5080 int i;
5081 struct rx_bd *rxbd;
5083 for (i = 0; i < num_rings; i++) {
5084 int j;
5086 rxbd = &rx_ring[i][0];
5087 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5088 rxbd->rx_bd_len = buf_size;
5089 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5091 if (i == (num_rings - 1))
5092 j = 0;
5093 else
5094 j = i + 1;
5095 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5096 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5100 static void
5101 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5103 int i;
5104 u16 prod, ring_prod;
5105 u32 cid, rx_cid_addr, val;
5106 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5107 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5109 if (ring_num == 0)
5110 cid = RX_CID;
5111 else
5112 cid = RX_RSS_CID + ring_num - 1;
5114 rx_cid_addr = GET_CID_ADDR(cid);
5116 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5117 bp->rx_buf_use_size, bp->rx_max_ring);
5119 bnx2_init_rx_context(bp, cid);
5121 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5122 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5123 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5126 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5127 if (bp->rx_pg_ring_size) {
5128 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5129 rxr->rx_pg_desc_mapping,
5130 PAGE_SIZE, bp->rx_max_pg_ring);
5131 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5132 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5133 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5134 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5136 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5137 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5139 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5140 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5142 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5143 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5146 val = (u64) rxr->rx_desc_mapping[0] >> 32;
5147 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5149 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5150 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5152 ring_prod = prod = rxr->rx_pg_prod;
5153 for (i = 0; i < bp->rx_pg_ring_size; i++) {
5154 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5155 netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5156 ring_num, i, bp->rx_pg_ring_size);
5157 break;
5159 prod = NEXT_RX_BD(prod);
5160 ring_prod = RX_PG_RING_IDX(prod);
5162 rxr->rx_pg_prod = prod;
5164 ring_prod = prod = rxr->rx_prod;
5165 for (i = 0; i < bp->rx_ring_size; i++) {
5166 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5167 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5168 ring_num, i, bp->rx_ring_size);
5169 break;
5171 prod = NEXT_RX_BD(prod);
5172 ring_prod = RX_RING_IDX(prod);
5174 rxr->rx_prod = prod;
5176 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5177 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5178 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5180 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5181 REG_WR16(bp, rxr->rx_bidx_addr, prod);
5183 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5186 static void
5187 bnx2_init_all_rings(struct bnx2 *bp)
5189 int i;
5190 u32 val;
5192 bnx2_clear_ring_states(bp);
5194 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5195 for (i = 0; i < bp->num_tx_rings; i++)
5196 bnx2_init_tx_ring(bp, i);
5198 if (bp->num_tx_rings > 1)
5199 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5200 (TX_TSS_CID << 7));
5202 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5203 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5205 for (i = 0; i < bp->num_rx_rings; i++)
5206 bnx2_init_rx_ring(bp, i);
5208 if (bp->num_rx_rings > 1) {
5209 u32 tbl_32 = 0;
5211 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5212 int shift = (i % 8) << 2;
5214 tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5215 if ((i % 8) == 7) {
5216 REG_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5217 REG_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5218 BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5219 BNX2_RLUP_RSS_COMMAND_WRITE |
5220 BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5221 tbl_32 = 0;
5225 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5226 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5228 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5233 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5235 u32 max, num_rings = 1;
5237 while (ring_size > MAX_RX_DESC_CNT) {
5238 ring_size -= MAX_RX_DESC_CNT;
5239 num_rings++;
5241 /* round to next power of 2 */
5242 max = max_size;
5243 while ((max & num_rings) == 0)
5244 max >>= 1;
5246 if (num_rings != max)
5247 max <<= 1;
5249 return max;
5252 static void
5253 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5255 u32 rx_size, rx_space, jumbo_size;
5257 /* 8 for CRC and VLAN */
5258 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5260 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5261 sizeof(struct skb_shared_info);
5263 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5264 bp->rx_pg_ring_size = 0;
5265 bp->rx_max_pg_ring = 0;
5266 bp->rx_max_pg_ring_idx = 0;
5267 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5268 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5270 jumbo_size = size * pages;
5271 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5272 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5274 bp->rx_pg_ring_size = jumbo_size;
5275 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5276 MAX_RX_PG_RINGS);
5277 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5278 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5279 bp->rx_copy_thresh = 0;
5282 bp->rx_buf_use_size = rx_size;
5283 /* hw alignment */
5284 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5285 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5286 bp->rx_ring_size = size;
5287 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5288 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5291 static void
5292 bnx2_free_tx_skbs(struct bnx2 *bp)
5294 int i;
5296 for (i = 0; i < bp->num_tx_rings; i++) {
5297 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5298 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5299 int j;
5301 if (txr->tx_buf_ring == NULL)
5302 continue;
5304 for (j = 0; j < TX_DESC_CNT; ) {
5305 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5306 struct sk_buff *skb = tx_buf->skb;
5307 int k, last;
5309 if (skb == NULL) {
5310 j++;
5311 continue;
5314 dma_unmap_single(&bp->pdev->dev,
5315 dma_unmap_addr(tx_buf, mapping),
5316 skb_headlen(skb),
5317 PCI_DMA_TODEVICE);
5319 tx_buf->skb = NULL;
5321 last = tx_buf->nr_frags;
5322 j++;
5323 for (k = 0; k < last; k++, j++) {
5324 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5325 dma_unmap_page(&bp->pdev->dev,
5326 dma_unmap_addr(tx_buf, mapping),
5327 skb_shinfo(skb)->frags[k].size,
5328 PCI_DMA_TODEVICE);
5330 dev_kfree_skb(skb);
5335 static void
5336 bnx2_free_rx_skbs(struct bnx2 *bp)
5338 int i;
5340 for (i = 0; i < bp->num_rx_rings; i++) {
5341 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5342 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5343 int j;
5345 if (rxr->rx_buf_ring == NULL)
5346 return;
5348 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5349 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5350 struct sk_buff *skb = rx_buf->skb;
5352 if (skb == NULL)
5353 continue;
5355 dma_unmap_single(&bp->pdev->dev,
5356 dma_unmap_addr(rx_buf, mapping),
5357 bp->rx_buf_use_size,
5358 PCI_DMA_FROMDEVICE);
5360 rx_buf->skb = NULL;
5362 dev_kfree_skb(skb);
5364 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5365 bnx2_free_rx_page(bp, rxr, j);
5369 static void
5370 bnx2_free_skbs(struct bnx2 *bp)
5372 bnx2_free_tx_skbs(bp);
5373 bnx2_free_rx_skbs(bp);
5376 static int
5377 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5379 int rc;
5381 rc = bnx2_reset_chip(bp, reset_code);
5382 bnx2_free_skbs(bp);
5383 if (rc)
5384 return rc;
5386 if ((rc = bnx2_init_chip(bp)) != 0)
5387 return rc;
5389 bnx2_init_all_rings(bp);
5390 return 0;
5393 static int
5394 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5396 int rc;
5398 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5399 return rc;
5401 spin_lock_bh(&bp->phy_lock);
5402 bnx2_init_phy(bp, reset_phy);
5403 bnx2_set_link(bp);
5404 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5405 bnx2_remote_phy_event(bp);
5406 spin_unlock_bh(&bp->phy_lock);
5407 return 0;
5410 static int
5411 bnx2_shutdown_chip(struct bnx2 *bp)
5413 u32 reset_code;
5415 if (bp->flags & BNX2_FLAG_NO_WOL)
5416 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5417 else if (bp->wol)
5418 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5419 else
5420 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5422 return bnx2_reset_chip(bp, reset_code);
5425 static int
5426 bnx2_test_registers(struct bnx2 *bp)
5428 int ret;
5429 int i, is_5709;
5430 static const struct {
5431 u16 offset;
5432 u16 flags;
5433 #define BNX2_FL_NOT_5709 1
5434 u32 rw_mask;
5435 u32 ro_mask;
5436 } reg_tbl[] = {
5437 { 0x006c, 0, 0x00000000, 0x0000003f },
5438 { 0x0090, 0, 0xffffffff, 0x00000000 },
5439 { 0x0094, 0, 0x00000000, 0x00000000 },
5441 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5442 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5443 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5444 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5445 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5446 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5447 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5448 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5449 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5451 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5452 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5453 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5454 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5455 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5456 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5458 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5459 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5460 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5462 { 0x1000, 0, 0x00000000, 0x00000001 },
5463 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5465 { 0x1408, 0, 0x01c00800, 0x00000000 },
5466 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5467 { 0x14a8, 0, 0x00000000, 0x000001ff },
5468 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5469 { 0x14b0, 0, 0x00000002, 0x00000001 },
5470 { 0x14b8, 0, 0x00000000, 0x00000000 },
5471 { 0x14c0, 0, 0x00000000, 0x00000009 },
5472 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5473 { 0x14cc, 0, 0x00000000, 0x00000001 },
5474 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5476 { 0x1800, 0, 0x00000000, 0x00000001 },
5477 { 0x1804, 0, 0x00000000, 0x00000003 },
5479 { 0x2800, 0, 0x00000000, 0x00000001 },
5480 { 0x2804, 0, 0x00000000, 0x00003f01 },
5481 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5482 { 0x2810, 0, 0xffff0000, 0x00000000 },
5483 { 0x2814, 0, 0xffff0000, 0x00000000 },
5484 { 0x2818, 0, 0xffff0000, 0x00000000 },
5485 { 0x281c, 0, 0xffff0000, 0x00000000 },
5486 { 0x2834, 0, 0xffffffff, 0x00000000 },
5487 { 0x2840, 0, 0x00000000, 0xffffffff },
5488 { 0x2844, 0, 0x00000000, 0xffffffff },
5489 { 0x2848, 0, 0xffffffff, 0x00000000 },
5490 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5492 { 0x2c00, 0, 0x00000000, 0x00000011 },
5493 { 0x2c04, 0, 0x00000000, 0x00030007 },
5495 { 0x3c00, 0, 0x00000000, 0x00000001 },
5496 { 0x3c04, 0, 0x00000000, 0x00070000 },
5497 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5498 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5499 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5500 { 0x3c14, 0, 0x00000000, 0xffffffff },
5501 { 0x3c18, 0, 0x00000000, 0xffffffff },
5502 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5503 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5505 { 0x5004, 0, 0x00000000, 0x0000007f },
5506 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5508 { 0x5c00, 0, 0x00000000, 0x00000001 },
5509 { 0x5c04, 0, 0x00000000, 0x0003000f },
5510 { 0x5c08, 0, 0x00000003, 0x00000000 },
5511 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5512 { 0x5c10, 0, 0x00000000, 0xffffffff },
5513 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5514 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5515 { 0x5c88, 0, 0x00000000, 0x00077373 },
5516 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5518 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5519 { 0x680c, 0, 0xffffffff, 0x00000000 },
5520 { 0x6810, 0, 0xffffffff, 0x00000000 },
5521 { 0x6814, 0, 0xffffffff, 0x00000000 },
5522 { 0x6818, 0, 0xffffffff, 0x00000000 },
5523 { 0x681c, 0, 0xffffffff, 0x00000000 },
5524 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5525 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5526 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5527 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5528 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5529 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5530 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5531 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5532 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5533 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5534 { 0x684c, 0, 0xffffffff, 0x00000000 },
5535 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5536 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5537 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5538 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5539 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5540 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5542 { 0xffff, 0, 0x00000000, 0x00000000 },
5545 ret = 0;
5546 is_5709 = 0;
5547 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5548 is_5709 = 1;
5550 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5551 u32 offset, rw_mask, ro_mask, save_val, val;
5552 u16 flags = reg_tbl[i].flags;
5554 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5555 continue;
5557 offset = (u32) reg_tbl[i].offset;
5558 rw_mask = reg_tbl[i].rw_mask;
5559 ro_mask = reg_tbl[i].ro_mask;
5561 save_val = readl(bp->regview + offset);
5563 writel(0, bp->regview + offset);
5565 val = readl(bp->regview + offset);
5566 if ((val & rw_mask) != 0) {
5567 goto reg_test_err;
5570 if ((val & ro_mask) != (save_val & ro_mask)) {
5571 goto reg_test_err;
5574 writel(0xffffffff, bp->regview + offset);
5576 val = readl(bp->regview + offset);
5577 if ((val & rw_mask) != rw_mask) {
5578 goto reg_test_err;
5581 if ((val & ro_mask) != (save_val & ro_mask)) {
5582 goto reg_test_err;
5585 writel(save_val, bp->regview + offset);
5586 continue;
5588 reg_test_err:
5589 writel(save_val, bp->regview + offset);
5590 ret = -ENODEV;
5591 break;
5593 return ret;
5596 static int
5597 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5599 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5600 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5601 int i;
5603 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5604 u32 offset;
5606 for (offset = 0; offset < size; offset += 4) {
5608 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5610 if (bnx2_reg_rd_ind(bp, start + offset) !=
5611 test_pattern[i]) {
5612 return -ENODEV;
5616 return 0;
5619 static int
5620 bnx2_test_memory(struct bnx2 *bp)
5622 int ret = 0;
5623 int i;
5624 static struct mem_entry {
5625 u32 offset;
5626 u32 len;
5627 } mem_tbl_5706[] = {
5628 { 0x60000, 0x4000 },
5629 { 0xa0000, 0x3000 },
5630 { 0xe0000, 0x4000 },
5631 { 0x120000, 0x4000 },
5632 { 0x1a0000, 0x4000 },
5633 { 0x160000, 0x4000 },
5634 { 0xffffffff, 0 },
5636 mem_tbl_5709[] = {
5637 { 0x60000, 0x4000 },
5638 { 0xa0000, 0x3000 },
5639 { 0xe0000, 0x4000 },
5640 { 0x120000, 0x4000 },
5641 { 0x1a0000, 0x4000 },
5642 { 0xffffffff, 0 },
5644 struct mem_entry *mem_tbl;
5646 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5647 mem_tbl = mem_tbl_5709;
5648 else
5649 mem_tbl = mem_tbl_5706;
5651 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5652 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5653 mem_tbl[i].len)) != 0) {
5654 return ret;
5658 return ret;
5661 #define BNX2_MAC_LOOPBACK 0
5662 #define BNX2_PHY_LOOPBACK 1
5664 static int
5665 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5667 unsigned int pkt_size, num_pkts, i;
5668 struct sk_buff *skb, *rx_skb;
5669 unsigned char *packet;
5670 u16 rx_start_idx, rx_idx;
5671 dma_addr_t map;
5672 struct tx_bd *txbd;
5673 struct sw_bd *rx_buf;
5674 struct l2_fhdr *rx_hdr;
5675 int ret = -ENODEV;
5676 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5677 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5678 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5680 tx_napi = bnapi;
5682 txr = &tx_napi->tx_ring;
5683 rxr = &bnapi->rx_ring;
5684 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5685 bp->loopback = MAC_LOOPBACK;
5686 bnx2_set_mac_loopback(bp);
5688 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5689 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5690 return 0;
5692 bp->loopback = PHY_LOOPBACK;
5693 bnx2_set_phy_loopback(bp);
5695 else
5696 return -EINVAL;
5698 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5699 skb = netdev_alloc_skb(bp->dev, pkt_size);
5700 if (!skb)
5701 return -ENOMEM;
5702 packet = skb_put(skb, pkt_size);
5703 memcpy(packet, bp->dev->dev_addr, 6);
5704 memset(packet + 6, 0x0, 8);
5705 for (i = 14; i < pkt_size; i++)
5706 packet[i] = (unsigned char) (i & 0xff);
5708 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5709 PCI_DMA_TODEVICE);
5710 if (dma_mapping_error(&bp->pdev->dev, map)) {
5711 dev_kfree_skb(skb);
5712 return -EIO;
5715 REG_WR(bp, BNX2_HC_COMMAND,
5716 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5718 REG_RD(bp, BNX2_HC_COMMAND);
5720 udelay(5);
5721 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5723 num_pkts = 0;
5725 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5727 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5728 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5729 txbd->tx_bd_mss_nbytes = pkt_size;
5730 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5732 num_pkts++;
5733 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5734 txr->tx_prod_bseq += pkt_size;
5736 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5737 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5739 udelay(100);
5741 REG_WR(bp, BNX2_HC_COMMAND,
5742 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5744 REG_RD(bp, BNX2_HC_COMMAND);
5746 udelay(5);
5748 dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5749 dev_kfree_skb(skb);
5751 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5752 goto loopback_test_done;
5754 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5755 if (rx_idx != rx_start_idx + num_pkts) {
5756 goto loopback_test_done;
5759 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5760 rx_skb = rx_buf->skb;
5762 rx_hdr = rx_buf->desc;
5763 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5765 dma_sync_single_for_cpu(&bp->pdev->dev,
5766 dma_unmap_addr(rx_buf, mapping),
5767 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5769 if (rx_hdr->l2_fhdr_status &
5770 (L2_FHDR_ERRORS_BAD_CRC |
5771 L2_FHDR_ERRORS_PHY_DECODE |
5772 L2_FHDR_ERRORS_ALIGNMENT |
5773 L2_FHDR_ERRORS_TOO_SHORT |
5774 L2_FHDR_ERRORS_GIANT_FRAME)) {
5776 goto loopback_test_done;
5779 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5780 goto loopback_test_done;
5783 for (i = 14; i < pkt_size; i++) {
5784 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5785 goto loopback_test_done;
5789 ret = 0;
5791 loopback_test_done:
5792 bp->loopback = 0;
5793 return ret;
5796 #define BNX2_MAC_LOOPBACK_FAILED 1
5797 #define BNX2_PHY_LOOPBACK_FAILED 2
5798 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5799 BNX2_PHY_LOOPBACK_FAILED)
5801 static int
5802 bnx2_test_loopback(struct bnx2 *bp)
5804 int rc = 0;
5806 if (!netif_running(bp->dev))
5807 return BNX2_LOOPBACK_FAILED;
5809 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5810 spin_lock_bh(&bp->phy_lock);
5811 bnx2_init_phy(bp, 1);
5812 spin_unlock_bh(&bp->phy_lock);
5813 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5814 rc |= BNX2_MAC_LOOPBACK_FAILED;
5815 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5816 rc |= BNX2_PHY_LOOPBACK_FAILED;
5817 return rc;
5820 #define NVRAM_SIZE 0x200
5821 #define CRC32_RESIDUAL 0xdebb20e3
5823 static int
5824 bnx2_test_nvram(struct bnx2 *bp)
5826 __be32 buf[NVRAM_SIZE / 4];
5827 u8 *data = (u8 *) buf;
5828 int rc = 0;
5829 u32 magic, csum;
5831 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5832 goto test_nvram_done;
5834 magic = be32_to_cpu(buf[0]);
5835 if (magic != 0x669955aa) {
5836 rc = -ENODEV;
5837 goto test_nvram_done;
5840 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5841 goto test_nvram_done;
5843 csum = ether_crc_le(0x100, data);
5844 if (csum != CRC32_RESIDUAL) {
5845 rc = -ENODEV;
5846 goto test_nvram_done;
5849 csum = ether_crc_le(0x100, data + 0x100);
5850 if (csum != CRC32_RESIDUAL) {
5851 rc = -ENODEV;
5854 test_nvram_done:
5855 return rc;
5858 static int
5859 bnx2_test_link(struct bnx2 *bp)
5861 u32 bmsr;
5863 if (!netif_running(bp->dev))
5864 return -ENODEV;
5866 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5867 if (bp->link_up)
5868 return 0;
5869 return -ENODEV;
5871 spin_lock_bh(&bp->phy_lock);
5872 bnx2_enable_bmsr1(bp);
5873 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5874 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5875 bnx2_disable_bmsr1(bp);
5876 spin_unlock_bh(&bp->phy_lock);
5878 if (bmsr & BMSR_LSTATUS) {
5879 return 0;
5881 return -ENODEV;
5884 static int
5885 bnx2_test_intr(struct bnx2 *bp)
5887 int i;
5888 u16 status_idx;
5890 if (!netif_running(bp->dev))
5891 return -ENODEV;
5893 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5895 /* This register is not touched during run-time. */
5896 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5897 REG_RD(bp, BNX2_HC_COMMAND);
5899 for (i = 0; i < 10; i++) {
5900 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5901 status_idx) {
5903 break;
5906 msleep_interruptible(10);
5908 if (i < 10)
5909 return 0;
5911 return -ENODEV;
5914 /* Determining link for parallel detection. */
5915 static int
5916 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5918 u32 mode_ctl, an_dbg, exp;
5920 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5921 return 0;
5923 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5924 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5926 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5927 return 0;
5929 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5930 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5931 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5933 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5934 return 0;
5936 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5937 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5938 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5940 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5941 return 0;
5943 return 1;
5946 static void
5947 bnx2_5706_serdes_timer(struct bnx2 *bp)
5949 int check_link = 1;
5951 spin_lock(&bp->phy_lock);
5952 if (bp->serdes_an_pending) {
5953 bp->serdes_an_pending--;
5954 check_link = 0;
5955 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5956 u32 bmcr;
5958 bp->current_interval = BNX2_TIMER_INTERVAL;
5960 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5962 if (bmcr & BMCR_ANENABLE) {
5963 if (bnx2_5706_serdes_has_link(bp)) {
5964 bmcr &= ~BMCR_ANENABLE;
5965 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5966 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5967 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5971 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5972 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5973 u32 phy2;
5975 bnx2_write_phy(bp, 0x17, 0x0f01);
5976 bnx2_read_phy(bp, 0x15, &phy2);
5977 if (phy2 & 0x20) {
5978 u32 bmcr;
5980 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5981 bmcr |= BMCR_ANENABLE;
5982 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5984 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5986 } else
5987 bp->current_interval = BNX2_TIMER_INTERVAL;
5989 if (check_link) {
5990 u32 val;
5992 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5993 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5994 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5996 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5997 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5998 bnx2_5706s_force_link_dn(bp, 1);
5999 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6000 } else
6001 bnx2_set_link(bp);
6002 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6003 bnx2_set_link(bp);
6005 spin_unlock(&bp->phy_lock);
6008 static void
6009 bnx2_5708_serdes_timer(struct bnx2 *bp)
6011 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6012 return;
6014 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6015 bp->serdes_an_pending = 0;
6016 return;
6019 spin_lock(&bp->phy_lock);
6020 if (bp->serdes_an_pending)
6021 bp->serdes_an_pending--;
6022 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6023 u32 bmcr;
6025 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6026 if (bmcr & BMCR_ANENABLE) {
6027 bnx2_enable_forced_2g5(bp);
6028 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6029 } else {
6030 bnx2_disable_forced_2g5(bp);
6031 bp->serdes_an_pending = 2;
6032 bp->current_interval = BNX2_TIMER_INTERVAL;
6035 } else
6036 bp->current_interval = BNX2_TIMER_INTERVAL;
6038 spin_unlock(&bp->phy_lock);
6041 static void
6042 bnx2_timer(unsigned long data)
6044 struct bnx2 *bp = (struct bnx2 *) data;
6046 if (!netif_running(bp->dev))
6047 return;
6049 if (atomic_read(&bp->intr_sem) != 0)
6050 goto bnx2_restart_timer;
6052 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6053 BNX2_FLAG_USING_MSI)
6054 bnx2_chk_missed_msi(bp);
6056 bnx2_send_heart_beat(bp);
6058 bp->stats_blk->stat_FwRxDrop =
6059 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6061 /* workaround occasional corrupted counters */
6062 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6063 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6064 BNX2_HC_COMMAND_STATS_NOW);
6066 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6067 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6068 bnx2_5706_serdes_timer(bp);
6069 else
6070 bnx2_5708_serdes_timer(bp);
6073 bnx2_restart_timer:
6074 mod_timer(&bp->timer, jiffies + bp->current_interval);
6077 static int
6078 bnx2_request_irq(struct bnx2 *bp)
6080 unsigned long flags;
6081 struct bnx2_irq *irq;
6082 int rc = 0, i;
6084 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6085 flags = 0;
6086 else
6087 flags = IRQF_SHARED;
6089 for (i = 0; i < bp->irq_nvecs; i++) {
6090 irq = &bp->irq_tbl[i];
6091 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6092 &bp->bnx2_napi[i]);
6093 if (rc)
6094 break;
6095 irq->requested = 1;
6097 return rc;
6100 static void
6101 __bnx2_free_irq(struct bnx2 *bp)
6103 struct bnx2_irq *irq;
6104 int i;
6106 for (i = 0; i < bp->irq_nvecs; i++) {
6107 irq = &bp->irq_tbl[i];
6108 if (irq->requested)
6109 free_irq(irq->vector, &bp->bnx2_napi[i]);
6110 irq->requested = 0;
6114 static void
6115 bnx2_free_irq(struct bnx2 *bp)
6118 __bnx2_free_irq(bp);
6119 if (bp->flags & BNX2_FLAG_USING_MSI)
6120 pci_disable_msi(bp->pdev);
6121 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6122 pci_disable_msix(bp->pdev);
6124 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6127 static void
6128 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6130 int i, total_vecs, rc;
6131 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6132 struct net_device *dev = bp->dev;
6133 const int len = sizeof(bp->irq_tbl[0].name);
6135 bnx2_setup_msix_tbl(bp);
6136 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6137 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6138 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6140 /* Need to flush the previous three writes to ensure MSI-X
6141 * is setup properly */
6142 REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6144 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6145 msix_ent[i].entry = i;
6146 msix_ent[i].vector = 0;
6149 total_vecs = msix_vecs;
6150 #ifdef BCM_CNIC
6151 total_vecs++;
6152 #endif
6153 rc = -ENOSPC;
6154 while (total_vecs >= BNX2_MIN_MSIX_VEC) {
6155 rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
6156 if (rc <= 0)
6157 break;
6158 if (rc > 0)
6159 total_vecs = rc;
6162 if (rc != 0)
6163 return;
6165 msix_vecs = total_vecs;
6166 #ifdef BCM_CNIC
6167 msix_vecs--;
6168 #endif
6169 bp->irq_nvecs = msix_vecs;
6170 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6171 for (i = 0; i < total_vecs; i++) {
6172 bp->irq_tbl[i].vector = msix_ent[i].vector;
6173 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6174 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6178 static int
6179 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6181 int cpus = num_online_cpus();
6182 int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6184 bp->irq_tbl[0].handler = bnx2_interrupt;
6185 strcpy(bp->irq_tbl[0].name, bp->dev->name);
6186 bp->irq_nvecs = 1;
6187 bp->irq_tbl[0].vector = bp->pdev->irq;
6189 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6190 bnx2_enable_msix(bp, msix_vecs);
6192 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6193 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6194 if (pci_enable_msi(bp->pdev) == 0) {
6195 bp->flags |= BNX2_FLAG_USING_MSI;
6196 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6197 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6198 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6199 } else
6200 bp->irq_tbl[0].handler = bnx2_msi;
6202 bp->irq_tbl[0].vector = bp->pdev->irq;
6206 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6207 netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6209 bp->num_rx_rings = bp->irq_nvecs;
6210 return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6213 /* Called with rtnl_lock */
6214 static int
6215 bnx2_open(struct net_device *dev)
6217 struct bnx2 *bp = netdev_priv(dev);
6218 int rc;
6220 netif_carrier_off(dev);
6222 bnx2_set_power_state(bp, PCI_D0);
6223 bnx2_disable_int(bp);
6225 rc = bnx2_setup_int_mode(bp, disable_msi);
6226 if (rc)
6227 goto open_err;
6228 bnx2_init_napi(bp);
6229 bnx2_napi_enable(bp);
6230 rc = bnx2_alloc_mem(bp);
6231 if (rc)
6232 goto open_err;
6234 rc = bnx2_request_irq(bp);
6235 if (rc)
6236 goto open_err;
6238 rc = bnx2_init_nic(bp, 1);
6239 if (rc)
6240 goto open_err;
6242 mod_timer(&bp->timer, jiffies + bp->current_interval);
6244 atomic_set(&bp->intr_sem, 0);
6246 memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6248 bnx2_enable_int(bp);
6250 if (bp->flags & BNX2_FLAG_USING_MSI) {
6251 /* Test MSI to make sure it is working
6252 * If MSI test fails, go back to INTx mode
6254 if (bnx2_test_intr(bp) != 0) {
6255 netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6257 bnx2_disable_int(bp);
6258 bnx2_free_irq(bp);
6260 bnx2_setup_int_mode(bp, 1);
6262 rc = bnx2_init_nic(bp, 0);
6264 if (!rc)
6265 rc = bnx2_request_irq(bp);
6267 if (rc) {
6268 del_timer_sync(&bp->timer);
6269 goto open_err;
6271 bnx2_enable_int(bp);
6274 if (bp->flags & BNX2_FLAG_USING_MSI)
6275 netdev_info(dev, "using MSI\n");
6276 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6277 netdev_info(dev, "using MSIX\n");
6279 netif_tx_start_all_queues(dev);
6281 return 0;
6283 open_err:
6284 bnx2_napi_disable(bp);
6285 bnx2_free_skbs(bp);
6286 bnx2_free_irq(bp);
6287 bnx2_free_mem(bp);
6288 bnx2_del_napi(bp);
6289 return rc;
6292 static void
6293 bnx2_reset_task(struct work_struct *work)
6295 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6297 rtnl_lock();
6298 if (!netif_running(bp->dev)) {
6299 rtnl_unlock();
6300 return;
6303 bnx2_netif_stop(bp, true);
6305 bnx2_init_nic(bp, 1);
6307 atomic_set(&bp->intr_sem, 1);
6308 bnx2_netif_start(bp, true);
6309 rtnl_unlock();
6312 static void
6313 bnx2_dump_state(struct bnx2 *bp)
6315 struct net_device *dev = bp->dev;
6316 u32 mcp_p0, mcp_p1, val1, val2;
6318 pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6319 netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6320 atomic_read(&bp->intr_sem), val1);
6321 pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6322 pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6323 netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6324 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6325 REG_RD(bp, BNX2_EMAC_TX_STATUS),
6326 REG_RD(bp, BNX2_EMAC_RX_STATUS));
6327 netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6328 REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6329 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6330 mcp_p0 = BNX2_MCP_STATE_P0;
6331 mcp_p1 = BNX2_MCP_STATE_P1;
6332 } else {
6333 mcp_p0 = BNX2_MCP_STATE_P0_5708;
6334 mcp_p1 = BNX2_MCP_STATE_P1_5708;
6336 netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
6337 bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
6338 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6339 REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6340 if (bp->flags & BNX2_FLAG_USING_MSIX)
6341 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6342 REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6345 static void
6346 bnx2_tx_timeout(struct net_device *dev)
6348 struct bnx2 *bp = netdev_priv(dev);
6350 bnx2_dump_state(bp);
6352 /* This allows the netif to be shutdown gracefully before resetting */
6353 schedule_work(&bp->reset_task);
6356 /* Called with netif_tx_lock.
6357 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6358 * netif_wake_queue().
6360 static netdev_tx_t
6361 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6363 struct bnx2 *bp = netdev_priv(dev);
6364 dma_addr_t mapping;
6365 struct tx_bd *txbd;
6366 struct sw_tx_bd *tx_buf;
6367 u32 len, vlan_tag_flags, last_frag, mss;
6368 u16 prod, ring_prod;
6369 int i;
6370 struct bnx2_napi *bnapi;
6371 struct bnx2_tx_ring_info *txr;
6372 struct netdev_queue *txq;
6374 /* Determine which tx ring we will be placed on */
6375 i = skb_get_queue_mapping(skb);
6376 bnapi = &bp->bnx2_napi[i];
6377 txr = &bnapi->tx_ring;
6378 txq = netdev_get_tx_queue(dev, i);
6380 if (unlikely(bnx2_tx_avail(bp, txr) <
6381 (skb_shinfo(skb)->nr_frags + 1))) {
6382 netif_tx_stop_queue(txq);
6383 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6385 return NETDEV_TX_BUSY;
6387 len = skb_headlen(skb);
6388 prod = txr->tx_prod;
6389 ring_prod = TX_RING_IDX(prod);
6391 vlan_tag_flags = 0;
6392 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6393 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6396 if (vlan_tx_tag_present(skb)) {
6397 vlan_tag_flags |=
6398 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6401 if ((mss = skb_shinfo(skb)->gso_size)) {
6402 u32 tcp_opt_len;
6403 struct iphdr *iph;
6405 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6407 tcp_opt_len = tcp_optlen(skb);
6409 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6410 u32 tcp_off = skb_transport_offset(skb) -
6411 sizeof(struct ipv6hdr) - ETH_HLEN;
6413 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6414 TX_BD_FLAGS_SW_FLAGS;
6415 if (likely(tcp_off == 0))
6416 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6417 else {
6418 tcp_off >>= 3;
6419 vlan_tag_flags |= ((tcp_off & 0x3) <<
6420 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6421 ((tcp_off & 0x10) <<
6422 TX_BD_FLAGS_TCP6_OFF4_SHL);
6423 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6425 } else {
6426 iph = ip_hdr(skb);
6427 if (tcp_opt_len || (iph->ihl > 5)) {
6428 vlan_tag_flags |= ((iph->ihl - 5) +
6429 (tcp_opt_len >> 2)) << 8;
6432 } else
6433 mss = 0;
6435 mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6436 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6437 dev_kfree_skb(skb);
6438 return NETDEV_TX_OK;
6441 tx_buf = &txr->tx_buf_ring[ring_prod];
6442 tx_buf->skb = skb;
6443 dma_unmap_addr_set(tx_buf, mapping, mapping);
6445 txbd = &txr->tx_desc_ring[ring_prod];
6447 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6448 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6449 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6450 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6452 last_frag = skb_shinfo(skb)->nr_frags;
6453 tx_buf->nr_frags = last_frag;
6454 tx_buf->is_gso = skb_is_gso(skb);
6456 for (i = 0; i < last_frag; i++) {
6457 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6459 prod = NEXT_TX_BD(prod);
6460 ring_prod = TX_RING_IDX(prod);
6461 txbd = &txr->tx_desc_ring[ring_prod];
6463 len = frag->size;
6464 mapping = dma_map_page(&bp->pdev->dev, frag->page, frag->page_offset,
6465 len, PCI_DMA_TODEVICE);
6466 if (dma_mapping_error(&bp->pdev->dev, mapping))
6467 goto dma_error;
6468 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6469 mapping);
6471 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6472 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6473 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6474 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6477 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6479 prod = NEXT_TX_BD(prod);
6480 txr->tx_prod_bseq += skb->len;
6482 REG_WR16(bp, txr->tx_bidx_addr, prod);
6483 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6485 mmiowb();
6487 txr->tx_prod = prod;
6489 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6490 netif_tx_stop_queue(txq);
6492 /* netif_tx_stop_queue() must be done before checking
6493 * tx index in bnx2_tx_avail() below, because in
6494 * bnx2_tx_int(), we update tx index before checking for
6495 * netif_tx_queue_stopped().
6497 smp_mb();
6498 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6499 netif_tx_wake_queue(txq);
6502 return NETDEV_TX_OK;
6503 dma_error:
6504 /* save value of frag that failed */
6505 last_frag = i;
6507 /* start back at beginning and unmap skb */
6508 prod = txr->tx_prod;
6509 ring_prod = TX_RING_IDX(prod);
6510 tx_buf = &txr->tx_buf_ring[ring_prod];
6511 tx_buf->skb = NULL;
6512 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6513 skb_headlen(skb), PCI_DMA_TODEVICE);
6515 /* unmap remaining mapped pages */
6516 for (i = 0; i < last_frag; i++) {
6517 prod = NEXT_TX_BD(prod);
6518 ring_prod = TX_RING_IDX(prod);
6519 tx_buf = &txr->tx_buf_ring[ring_prod];
6520 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6521 skb_shinfo(skb)->frags[i].size,
6522 PCI_DMA_TODEVICE);
6525 dev_kfree_skb(skb);
6526 return NETDEV_TX_OK;
6529 /* Called with rtnl_lock */
6530 static int
6531 bnx2_close(struct net_device *dev)
6533 struct bnx2 *bp = netdev_priv(dev);
6535 cancel_work_sync(&bp->reset_task);
6537 bnx2_disable_int_sync(bp);
6538 bnx2_napi_disable(bp);
6539 del_timer_sync(&bp->timer);
6540 bnx2_shutdown_chip(bp);
6541 bnx2_free_irq(bp);
6542 bnx2_free_skbs(bp);
6543 bnx2_free_mem(bp);
6544 bnx2_del_napi(bp);
6545 bp->link_up = 0;
6546 netif_carrier_off(bp->dev);
6547 bnx2_set_power_state(bp, PCI_D3hot);
6548 return 0;
6551 static void
6552 bnx2_save_stats(struct bnx2 *bp)
6554 u32 *hw_stats = (u32 *) bp->stats_blk;
6555 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6556 int i;
6558 /* The 1st 10 counters are 64-bit counters */
6559 for (i = 0; i < 20; i += 2) {
6560 u32 hi;
6561 u64 lo;
6563 hi = temp_stats[i] + hw_stats[i];
6564 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6565 if (lo > 0xffffffff)
6566 hi++;
6567 temp_stats[i] = hi;
6568 temp_stats[i + 1] = lo & 0xffffffff;
6571 for ( ; i < sizeof(struct statistics_block) / 4; i++)
6572 temp_stats[i] += hw_stats[i];
6575 #define GET_64BIT_NET_STATS64(ctr) \
6576 (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6578 #define GET_64BIT_NET_STATS(ctr) \
6579 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6580 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6582 #define GET_32BIT_NET_STATS(ctr) \
6583 (unsigned long) (bp->stats_blk->ctr + \
6584 bp->temp_stats_blk->ctr)
6586 static struct rtnl_link_stats64 *
6587 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6589 struct bnx2 *bp = netdev_priv(dev);
6591 if (bp->stats_blk == NULL)
6592 return net_stats;
6594 net_stats->rx_packets =
6595 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6596 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6597 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6599 net_stats->tx_packets =
6600 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6601 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6602 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6604 net_stats->rx_bytes =
6605 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6607 net_stats->tx_bytes =
6608 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6610 net_stats->multicast =
6611 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6613 net_stats->collisions =
6614 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6616 net_stats->rx_length_errors =
6617 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6618 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6620 net_stats->rx_over_errors =
6621 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6622 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6624 net_stats->rx_frame_errors =
6625 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6627 net_stats->rx_crc_errors =
6628 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6630 net_stats->rx_errors = net_stats->rx_length_errors +
6631 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6632 net_stats->rx_crc_errors;
6634 net_stats->tx_aborted_errors =
6635 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6636 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6638 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6639 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6640 net_stats->tx_carrier_errors = 0;
6641 else {
6642 net_stats->tx_carrier_errors =
6643 GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6646 net_stats->tx_errors =
6647 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6648 net_stats->tx_aborted_errors +
6649 net_stats->tx_carrier_errors;
6651 net_stats->rx_missed_errors =
6652 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6653 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6654 GET_32BIT_NET_STATS(stat_FwRxDrop);
6656 return net_stats;
6659 /* All ethtool functions called with rtnl_lock */
6661 static int
6662 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6664 struct bnx2 *bp = netdev_priv(dev);
6665 int support_serdes = 0, support_copper = 0;
6667 cmd->supported = SUPPORTED_Autoneg;
6668 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6669 support_serdes = 1;
6670 support_copper = 1;
6671 } else if (bp->phy_port == PORT_FIBRE)
6672 support_serdes = 1;
6673 else
6674 support_copper = 1;
6676 if (support_serdes) {
6677 cmd->supported |= SUPPORTED_1000baseT_Full |
6678 SUPPORTED_FIBRE;
6679 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6680 cmd->supported |= SUPPORTED_2500baseX_Full;
6683 if (support_copper) {
6684 cmd->supported |= SUPPORTED_10baseT_Half |
6685 SUPPORTED_10baseT_Full |
6686 SUPPORTED_100baseT_Half |
6687 SUPPORTED_100baseT_Full |
6688 SUPPORTED_1000baseT_Full |
6689 SUPPORTED_TP;
6693 spin_lock_bh(&bp->phy_lock);
6694 cmd->port = bp->phy_port;
6695 cmd->advertising = bp->advertising;
6697 if (bp->autoneg & AUTONEG_SPEED) {
6698 cmd->autoneg = AUTONEG_ENABLE;
6699 } else {
6700 cmd->autoneg = AUTONEG_DISABLE;
6703 if (netif_carrier_ok(dev)) {
6704 ethtool_cmd_speed_set(cmd, bp->line_speed);
6705 cmd->duplex = bp->duplex;
6707 else {
6708 ethtool_cmd_speed_set(cmd, -1);
6709 cmd->duplex = -1;
6711 spin_unlock_bh(&bp->phy_lock);
6713 cmd->transceiver = XCVR_INTERNAL;
6714 cmd->phy_address = bp->phy_addr;
6716 return 0;
6719 static int
6720 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6722 struct bnx2 *bp = netdev_priv(dev);
6723 u8 autoneg = bp->autoneg;
6724 u8 req_duplex = bp->req_duplex;
6725 u16 req_line_speed = bp->req_line_speed;
6726 u32 advertising = bp->advertising;
6727 int err = -EINVAL;
6729 spin_lock_bh(&bp->phy_lock);
6731 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6732 goto err_out_unlock;
6734 if (cmd->port != bp->phy_port &&
6735 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6736 goto err_out_unlock;
6738 /* If device is down, we can store the settings only if the user
6739 * is setting the currently active port.
6741 if (!netif_running(dev) && cmd->port != bp->phy_port)
6742 goto err_out_unlock;
6744 if (cmd->autoneg == AUTONEG_ENABLE) {
6745 autoneg |= AUTONEG_SPEED;
6747 advertising = cmd->advertising;
6748 if (cmd->port == PORT_TP) {
6749 advertising &= ETHTOOL_ALL_COPPER_SPEED;
6750 if (!advertising)
6751 advertising = ETHTOOL_ALL_COPPER_SPEED;
6752 } else {
6753 advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6754 if (!advertising)
6755 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6757 advertising |= ADVERTISED_Autoneg;
6759 else {
6760 u32 speed = ethtool_cmd_speed(cmd);
6761 if (cmd->port == PORT_FIBRE) {
6762 if ((speed != SPEED_1000 &&
6763 speed != SPEED_2500) ||
6764 (cmd->duplex != DUPLEX_FULL))
6765 goto err_out_unlock;
6767 if (speed == SPEED_2500 &&
6768 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6769 goto err_out_unlock;
6770 } else if (speed == SPEED_1000 || speed == SPEED_2500)
6771 goto err_out_unlock;
6773 autoneg &= ~AUTONEG_SPEED;
6774 req_line_speed = speed;
6775 req_duplex = cmd->duplex;
6776 advertising = 0;
6779 bp->autoneg = autoneg;
6780 bp->advertising = advertising;
6781 bp->req_line_speed = req_line_speed;
6782 bp->req_duplex = req_duplex;
6784 err = 0;
6785 /* If device is down, the new settings will be picked up when it is
6786 * brought up.
6788 if (netif_running(dev))
6789 err = bnx2_setup_phy(bp, cmd->port);
6791 err_out_unlock:
6792 spin_unlock_bh(&bp->phy_lock);
6794 return err;
6797 static void
6798 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6800 struct bnx2 *bp = netdev_priv(dev);
6802 strcpy(info->driver, DRV_MODULE_NAME);
6803 strcpy(info->version, DRV_MODULE_VERSION);
6804 strcpy(info->bus_info, pci_name(bp->pdev));
6805 strcpy(info->fw_version, bp->fw_version);
6808 #define BNX2_REGDUMP_LEN (32 * 1024)
6810 static int
6811 bnx2_get_regs_len(struct net_device *dev)
6813 return BNX2_REGDUMP_LEN;
6816 static void
6817 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6819 u32 *p = _p, i, offset;
6820 u8 *orig_p = _p;
6821 struct bnx2 *bp = netdev_priv(dev);
6822 static const u32 reg_boundaries[] = {
6823 0x0000, 0x0098, 0x0400, 0x045c,
6824 0x0800, 0x0880, 0x0c00, 0x0c10,
6825 0x0c30, 0x0d08, 0x1000, 0x101c,
6826 0x1040, 0x1048, 0x1080, 0x10a4,
6827 0x1400, 0x1490, 0x1498, 0x14f0,
6828 0x1500, 0x155c, 0x1580, 0x15dc,
6829 0x1600, 0x1658, 0x1680, 0x16d8,
6830 0x1800, 0x1820, 0x1840, 0x1854,
6831 0x1880, 0x1894, 0x1900, 0x1984,
6832 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6833 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6834 0x2000, 0x2030, 0x23c0, 0x2400,
6835 0x2800, 0x2820, 0x2830, 0x2850,
6836 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6837 0x3c00, 0x3c94, 0x4000, 0x4010,
6838 0x4080, 0x4090, 0x43c0, 0x4458,
6839 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6840 0x4fc0, 0x5010, 0x53c0, 0x5444,
6841 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6842 0x5fc0, 0x6000, 0x6400, 0x6428,
6843 0x6800, 0x6848, 0x684c, 0x6860,
6844 0x6888, 0x6910, 0x8000
6847 regs->version = 0;
6849 memset(p, 0, BNX2_REGDUMP_LEN);
6851 if (!netif_running(bp->dev))
6852 return;
6854 i = 0;
6855 offset = reg_boundaries[0];
6856 p += offset;
6857 while (offset < BNX2_REGDUMP_LEN) {
6858 *p++ = REG_RD(bp, offset);
6859 offset += 4;
6860 if (offset == reg_boundaries[i + 1]) {
6861 offset = reg_boundaries[i + 2];
6862 p = (u32 *) (orig_p + offset);
6863 i += 2;
6868 static void
6869 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6871 struct bnx2 *bp = netdev_priv(dev);
6873 if (bp->flags & BNX2_FLAG_NO_WOL) {
6874 wol->supported = 0;
6875 wol->wolopts = 0;
6877 else {
6878 wol->supported = WAKE_MAGIC;
6879 if (bp->wol)
6880 wol->wolopts = WAKE_MAGIC;
6881 else
6882 wol->wolopts = 0;
6884 memset(&wol->sopass, 0, sizeof(wol->sopass));
6887 static int
6888 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6890 struct bnx2 *bp = netdev_priv(dev);
6892 if (wol->wolopts & ~WAKE_MAGIC)
6893 return -EINVAL;
6895 if (wol->wolopts & WAKE_MAGIC) {
6896 if (bp->flags & BNX2_FLAG_NO_WOL)
6897 return -EINVAL;
6899 bp->wol = 1;
6901 else {
6902 bp->wol = 0;
6904 return 0;
6907 static int
6908 bnx2_nway_reset(struct net_device *dev)
6910 struct bnx2 *bp = netdev_priv(dev);
6911 u32 bmcr;
6913 if (!netif_running(dev))
6914 return -EAGAIN;
6916 if (!(bp->autoneg & AUTONEG_SPEED)) {
6917 return -EINVAL;
6920 spin_lock_bh(&bp->phy_lock);
6922 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6923 int rc;
6925 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6926 spin_unlock_bh(&bp->phy_lock);
6927 return rc;
6930 /* Force a link down visible on the other side */
6931 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6932 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6933 spin_unlock_bh(&bp->phy_lock);
6935 msleep(20);
6937 spin_lock_bh(&bp->phy_lock);
6939 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6940 bp->serdes_an_pending = 1;
6941 mod_timer(&bp->timer, jiffies + bp->current_interval);
6944 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6945 bmcr &= ~BMCR_LOOPBACK;
6946 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6948 spin_unlock_bh(&bp->phy_lock);
6950 return 0;
6953 static u32
6954 bnx2_get_link(struct net_device *dev)
6956 struct bnx2 *bp = netdev_priv(dev);
6958 return bp->link_up;
6961 static int
6962 bnx2_get_eeprom_len(struct net_device *dev)
6964 struct bnx2 *bp = netdev_priv(dev);
6966 if (bp->flash_info == NULL)
6967 return 0;
6969 return (int) bp->flash_size;
6972 static int
6973 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6974 u8 *eebuf)
6976 struct bnx2 *bp = netdev_priv(dev);
6977 int rc;
6979 if (!netif_running(dev))
6980 return -EAGAIN;
6982 /* parameters already validated in ethtool_get_eeprom */
6984 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6986 return rc;
6989 static int
6990 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6991 u8 *eebuf)
6993 struct bnx2 *bp = netdev_priv(dev);
6994 int rc;
6996 if (!netif_running(dev))
6997 return -EAGAIN;
6999 /* parameters already validated in ethtool_set_eeprom */
7001 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7003 return rc;
7006 static int
7007 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7009 struct bnx2 *bp = netdev_priv(dev);
7011 memset(coal, 0, sizeof(struct ethtool_coalesce));
7013 coal->rx_coalesce_usecs = bp->rx_ticks;
7014 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7015 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7016 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7018 coal->tx_coalesce_usecs = bp->tx_ticks;
7019 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7020 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7021 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7023 coal->stats_block_coalesce_usecs = bp->stats_ticks;
7025 return 0;
7028 static int
7029 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7031 struct bnx2 *bp = netdev_priv(dev);
7033 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7034 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7036 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7037 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7039 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7040 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7042 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7043 if (bp->rx_quick_cons_trip_int > 0xff)
7044 bp->rx_quick_cons_trip_int = 0xff;
7046 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7047 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7049 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7050 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7052 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7053 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7055 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7056 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7057 0xff;
7059 bp->stats_ticks = coal->stats_block_coalesce_usecs;
7060 if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7061 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7062 bp->stats_ticks = USEC_PER_SEC;
7064 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7065 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7066 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7068 if (netif_running(bp->dev)) {
7069 bnx2_netif_stop(bp, true);
7070 bnx2_init_nic(bp, 0);
7071 bnx2_netif_start(bp, true);
7074 return 0;
7077 static void
7078 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7080 struct bnx2 *bp = netdev_priv(dev);
7082 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
7083 ering->rx_mini_max_pending = 0;
7084 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
7086 ering->rx_pending = bp->rx_ring_size;
7087 ering->rx_mini_pending = 0;
7088 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7090 ering->tx_max_pending = MAX_TX_DESC_CNT;
7091 ering->tx_pending = bp->tx_ring_size;
7094 static int
7095 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7097 if (netif_running(bp->dev)) {
7098 /* Reset will erase chipset stats; save them */
7099 bnx2_save_stats(bp);
7101 bnx2_netif_stop(bp, true);
7102 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7103 __bnx2_free_irq(bp);
7104 bnx2_free_skbs(bp);
7105 bnx2_free_mem(bp);
7108 bnx2_set_rx_ring_size(bp, rx);
7109 bp->tx_ring_size = tx;
7111 if (netif_running(bp->dev)) {
7112 int rc;
7114 rc = bnx2_alloc_mem(bp);
7115 if (!rc)
7116 rc = bnx2_request_irq(bp);
7118 if (!rc)
7119 rc = bnx2_init_nic(bp, 0);
7121 if (rc) {
7122 bnx2_napi_enable(bp);
7123 dev_close(bp->dev);
7124 return rc;
7126 #ifdef BCM_CNIC
7127 mutex_lock(&bp->cnic_lock);
7128 /* Let cnic know about the new status block. */
7129 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7130 bnx2_setup_cnic_irq_info(bp);
7131 mutex_unlock(&bp->cnic_lock);
7132 #endif
7133 bnx2_netif_start(bp, true);
7135 return 0;
7138 static int
7139 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7141 struct bnx2 *bp = netdev_priv(dev);
7142 int rc;
7144 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7145 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7146 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7148 return -EINVAL;
7150 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7151 return rc;
7154 static void
7155 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7157 struct bnx2 *bp = netdev_priv(dev);
7159 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7160 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7161 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7164 static int
7165 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7167 struct bnx2 *bp = netdev_priv(dev);
7169 bp->req_flow_ctrl = 0;
7170 if (epause->rx_pause)
7171 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7172 if (epause->tx_pause)
7173 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7175 if (epause->autoneg) {
7176 bp->autoneg |= AUTONEG_FLOW_CTRL;
7178 else {
7179 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7182 if (netif_running(dev)) {
7183 spin_lock_bh(&bp->phy_lock);
7184 bnx2_setup_phy(bp, bp->phy_port);
7185 spin_unlock_bh(&bp->phy_lock);
7188 return 0;
7191 static struct {
7192 char string[ETH_GSTRING_LEN];
7193 } bnx2_stats_str_arr[] = {
7194 { "rx_bytes" },
7195 { "rx_error_bytes" },
7196 { "tx_bytes" },
7197 { "tx_error_bytes" },
7198 { "rx_ucast_packets" },
7199 { "rx_mcast_packets" },
7200 { "rx_bcast_packets" },
7201 { "tx_ucast_packets" },
7202 { "tx_mcast_packets" },
7203 { "tx_bcast_packets" },
7204 { "tx_mac_errors" },
7205 { "tx_carrier_errors" },
7206 { "rx_crc_errors" },
7207 { "rx_align_errors" },
7208 { "tx_single_collisions" },
7209 { "tx_multi_collisions" },
7210 { "tx_deferred" },
7211 { "tx_excess_collisions" },
7212 { "tx_late_collisions" },
7213 { "tx_total_collisions" },
7214 { "rx_fragments" },
7215 { "rx_jabbers" },
7216 { "rx_undersize_packets" },
7217 { "rx_oversize_packets" },
7218 { "rx_64_byte_packets" },
7219 { "rx_65_to_127_byte_packets" },
7220 { "rx_128_to_255_byte_packets" },
7221 { "rx_256_to_511_byte_packets" },
7222 { "rx_512_to_1023_byte_packets" },
7223 { "rx_1024_to_1522_byte_packets" },
7224 { "rx_1523_to_9022_byte_packets" },
7225 { "tx_64_byte_packets" },
7226 { "tx_65_to_127_byte_packets" },
7227 { "tx_128_to_255_byte_packets" },
7228 { "tx_256_to_511_byte_packets" },
7229 { "tx_512_to_1023_byte_packets" },
7230 { "tx_1024_to_1522_byte_packets" },
7231 { "tx_1523_to_9022_byte_packets" },
7232 { "rx_xon_frames" },
7233 { "rx_xoff_frames" },
7234 { "tx_xon_frames" },
7235 { "tx_xoff_frames" },
7236 { "rx_mac_ctrl_frames" },
7237 { "rx_filtered_packets" },
7238 { "rx_ftq_discards" },
7239 { "rx_discards" },
7240 { "rx_fw_discards" },
7243 #define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7244 sizeof(bnx2_stats_str_arr[0]))
7246 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7248 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7249 STATS_OFFSET32(stat_IfHCInOctets_hi),
7250 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7251 STATS_OFFSET32(stat_IfHCOutOctets_hi),
7252 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7253 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7254 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7255 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7256 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7257 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7258 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7259 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7260 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7261 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7262 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7263 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7264 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7265 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7266 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7267 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7268 STATS_OFFSET32(stat_EtherStatsCollisions),
7269 STATS_OFFSET32(stat_EtherStatsFragments),
7270 STATS_OFFSET32(stat_EtherStatsJabbers),
7271 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7272 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7273 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7274 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7275 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7276 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7277 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7278 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7279 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7280 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7281 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7282 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7283 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7284 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7285 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7286 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7287 STATS_OFFSET32(stat_XonPauseFramesReceived),
7288 STATS_OFFSET32(stat_XoffPauseFramesReceived),
7289 STATS_OFFSET32(stat_OutXonSent),
7290 STATS_OFFSET32(stat_OutXoffSent),
7291 STATS_OFFSET32(stat_MacControlFramesReceived),
7292 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7293 STATS_OFFSET32(stat_IfInFTQDiscards),
7294 STATS_OFFSET32(stat_IfInMBUFDiscards),
7295 STATS_OFFSET32(stat_FwRxDrop),
7298 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7299 * skipped because of errata.
7301 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7302 8,0,8,8,8,8,8,8,8,8,
7303 4,0,4,4,4,4,4,4,4,4,
7304 4,4,4,4,4,4,4,4,4,4,
7305 4,4,4,4,4,4,4,4,4,4,
7306 4,4,4,4,4,4,4,
7309 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7310 8,0,8,8,8,8,8,8,8,8,
7311 4,4,4,4,4,4,4,4,4,4,
7312 4,4,4,4,4,4,4,4,4,4,
7313 4,4,4,4,4,4,4,4,4,4,
7314 4,4,4,4,4,4,4,
7317 #define BNX2_NUM_TESTS 6
7319 static struct {
7320 char string[ETH_GSTRING_LEN];
7321 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7322 { "register_test (offline)" },
7323 { "memory_test (offline)" },
7324 { "loopback_test (offline)" },
7325 { "nvram_test (online)" },
7326 { "interrupt_test (online)" },
7327 { "link_test (online)" },
7330 static int
7331 bnx2_get_sset_count(struct net_device *dev, int sset)
7333 switch (sset) {
7334 case ETH_SS_TEST:
7335 return BNX2_NUM_TESTS;
7336 case ETH_SS_STATS:
7337 return BNX2_NUM_STATS;
7338 default:
7339 return -EOPNOTSUPP;
7343 static void
7344 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7346 struct bnx2 *bp = netdev_priv(dev);
7348 bnx2_set_power_state(bp, PCI_D0);
7350 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7351 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7352 int i;
7354 bnx2_netif_stop(bp, true);
7355 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7356 bnx2_free_skbs(bp);
7358 if (bnx2_test_registers(bp) != 0) {
7359 buf[0] = 1;
7360 etest->flags |= ETH_TEST_FL_FAILED;
7362 if (bnx2_test_memory(bp) != 0) {
7363 buf[1] = 1;
7364 etest->flags |= ETH_TEST_FL_FAILED;
7366 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7367 etest->flags |= ETH_TEST_FL_FAILED;
7369 if (!netif_running(bp->dev))
7370 bnx2_shutdown_chip(bp);
7371 else {
7372 bnx2_init_nic(bp, 1);
7373 bnx2_netif_start(bp, true);
7376 /* wait for link up */
7377 for (i = 0; i < 7; i++) {
7378 if (bp->link_up)
7379 break;
7380 msleep_interruptible(1000);
7384 if (bnx2_test_nvram(bp) != 0) {
7385 buf[3] = 1;
7386 etest->flags |= ETH_TEST_FL_FAILED;
7388 if (bnx2_test_intr(bp) != 0) {
7389 buf[4] = 1;
7390 etest->flags |= ETH_TEST_FL_FAILED;
7393 if (bnx2_test_link(bp) != 0) {
7394 buf[5] = 1;
7395 etest->flags |= ETH_TEST_FL_FAILED;
7398 if (!netif_running(bp->dev))
7399 bnx2_set_power_state(bp, PCI_D3hot);
7402 static void
7403 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7405 switch (stringset) {
7406 case ETH_SS_STATS:
7407 memcpy(buf, bnx2_stats_str_arr,
7408 sizeof(bnx2_stats_str_arr));
7409 break;
7410 case ETH_SS_TEST:
7411 memcpy(buf, bnx2_tests_str_arr,
7412 sizeof(bnx2_tests_str_arr));
7413 break;
7417 static void
7418 bnx2_get_ethtool_stats(struct net_device *dev,
7419 struct ethtool_stats *stats, u64 *buf)
7421 struct bnx2 *bp = netdev_priv(dev);
7422 int i;
7423 u32 *hw_stats = (u32 *) bp->stats_blk;
7424 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7425 u8 *stats_len_arr = NULL;
7427 if (hw_stats == NULL) {
7428 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7429 return;
7432 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7433 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7434 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7435 (CHIP_ID(bp) == CHIP_ID_5708_A0))
7436 stats_len_arr = bnx2_5706_stats_len_arr;
7437 else
7438 stats_len_arr = bnx2_5708_stats_len_arr;
7440 for (i = 0; i < BNX2_NUM_STATS; i++) {
7441 unsigned long offset;
7443 if (stats_len_arr[i] == 0) {
7444 /* skip this counter */
7445 buf[i] = 0;
7446 continue;
7449 offset = bnx2_stats_offset_arr[i];
7450 if (stats_len_arr[i] == 4) {
7451 /* 4-byte counter */
7452 buf[i] = (u64) *(hw_stats + offset) +
7453 *(temp_stats + offset);
7454 continue;
7456 /* 8-byte counter */
7457 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7458 *(hw_stats + offset + 1) +
7459 (((u64) *(temp_stats + offset)) << 32) +
7460 *(temp_stats + offset + 1);
7464 static int
7465 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7467 struct bnx2 *bp = netdev_priv(dev);
7469 switch (state) {
7470 case ETHTOOL_ID_ACTIVE:
7471 bnx2_set_power_state(bp, PCI_D0);
7473 bp->leds_save = REG_RD(bp, BNX2_MISC_CFG);
7474 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7475 return 1; /* cycle on/off once per second */
7477 case ETHTOOL_ID_ON:
7478 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7479 BNX2_EMAC_LED_1000MB_OVERRIDE |
7480 BNX2_EMAC_LED_100MB_OVERRIDE |
7481 BNX2_EMAC_LED_10MB_OVERRIDE |
7482 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7483 BNX2_EMAC_LED_TRAFFIC);
7484 break;
7486 case ETHTOOL_ID_OFF:
7487 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7488 break;
7490 case ETHTOOL_ID_INACTIVE:
7491 REG_WR(bp, BNX2_EMAC_LED, 0);
7492 REG_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7494 if (!netif_running(dev))
7495 bnx2_set_power_state(bp, PCI_D3hot);
7496 break;
7499 return 0;
7502 static u32
7503 bnx2_fix_features(struct net_device *dev, u32 features)
7505 struct bnx2 *bp = netdev_priv(dev);
7507 if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
7508 features |= NETIF_F_HW_VLAN_RX;
7510 return features;
7513 static int
7514 bnx2_set_features(struct net_device *dev, u32 features)
7516 struct bnx2 *bp = netdev_priv(dev);
7518 /* TSO with VLAN tag won't work with current firmware */
7519 if (features & NETIF_F_HW_VLAN_TX)
7520 dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7521 else
7522 dev->vlan_features &= ~NETIF_F_ALL_TSO;
7524 if ((!!(features & NETIF_F_HW_VLAN_RX) !=
7525 !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7526 netif_running(dev)) {
7527 bnx2_netif_stop(bp, false);
7528 dev->features = features;
7529 bnx2_set_rx_mode(dev);
7530 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7531 bnx2_netif_start(bp, false);
7532 return 1;
7535 return 0;
7538 static const struct ethtool_ops bnx2_ethtool_ops = {
7539 .get_settings = bnx2_get_settings,
7540 .set_settings = bnx2_set_settings,
7541 .get_drvinfo = bnx2_get_drvinfo,
7542 .get_regs_len = bnx2_get_regs_len,
7543 .get_regs = bnx2_get_regs,
7544 .get_wol = bnx2_get_wol,
7545 .set_wol = bnx2_set_wol,
7546 .nway_reset = bnx2_nway_reset,
7547 .get_link = bnx2_get_link,
7548 .get_eeprom_len = bnx2_get_eeprom_len,
7549 .get_eeprom = bnx2_get_eeprom,
7550 .set_eeprom = bnx2_set_eeprom,
7551 .get_coalesce = bnx2_get_coalesce,
7552 .set_coalesce = bnx2_set_coalesce,
7553 .get_ringparam = bnx2_get_ringparam,
7554 .set_ringparam = bnx2_set_ringparam,
7555 .get_pauseparam = bnx2_get_pauseparam,
7556 .set_pauseparam = bnx2_set_pauseparam,
7557 .self_test = bnx2_self_test,
7558 .get_strings = bnx2_get_strings,
7559 .set_phys_id = bnx2_set_phys_id,
7560 .get_ethtool_stats = bnx2_get_ethtool_stats,
7561 .get_sset_count = bnx2_get_sset_count,
7564 /* Called with rtnl_lock */
7565 static int
7566 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7568 struct mii_ioctl_data *data = if_mii(ifr);
7569 struct bnx2 *bp = netdev_priv(dev);
7570 int err;
7572 switch(cmd) {
7573 case SIOCGMIIPHY:
7574 data->phy_id = bp->phy_addr;
7576 /* fallthru */
7577 case SIOCGMIIREG: {
7578 u32 mii_regval;
7580 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7581 return -EOPNOTSUPP;
7583 if (!netif_running(dev))
7584 return -EAGAIN;
7586 spin_lock_bh(&bp->phy_lock);
7587 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7588 spin_unlock_bh(&bp->phy_lock);
7590 data->val_out = mii_regval;
7592 return err;
7595 case SIOCSMIIREG:
7596 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7597 return -EOPNOTSUPP;
7599 if (!netif_running(dev))
7600 return -EAGAIN;
7602 spin_lock_bh(&bp->phy_lock);
7603 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7604 spin_unlock_bh(&bp->phy_lock);
7606 return err;
7608 default:
7609 /* do nothing */
7610 break;
7612 return -EOPNOTSUPP;
7615 /* Called with rtnl_lock */
7616 static int
7617 bnx2_change_mac_addr(struct net_device *dev, void *p)
7619 struct sockaddr *addr = p;
7620 struct bnx2 *bp = netdev_priv(dev);
7622 if (!is_valid_ether_addr(addr->sa_data))
7623 return -EINVAL;
7625 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7626 if (netif_running(dev))
7627 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7629 return 0;
7632 /* Called with rtnl_lock */
7633 static int
7634 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7636 struct bnx2 *bp = netdev_priv(dev);
7638 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7639 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7640 return -EINVAL;
7642 dev->mtu = new_mtu;
7643 return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size);
7646 #ifdef CONFIG_NET_POLL_CONTROLLER
7647 static void
7648 poll_bnx2(struct net_device *dev)
7650 struct bnx2 *bp = netdev_priv(dev);
7651 int i;
7653 for (i = 0; i < bp->irq_nvecs; i++) {
7654 struct bnx2_irq *irq = &bp->irq_tbl[i];
7656 disable_irq(irq->vector);
7657 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7658 enable_irq(irq->vector);
7661 #endif
7663 static void __devinit
7664 bnx2_get_5709_media(struct bnx2 *bp)
7666 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7667 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7668 u32 strap;
7670 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7671 return;
7672 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7673 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7674 return;
7677 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7678 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7679 else
7680 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7682 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7683 switch (strap) {
7684 case 0x4:
7685 case 0x5:
7686 case 0x6:
7687 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7688 return;
7690 } else {
7691 switch (strap) {
7692 case 0x1:
7693 case 0x2:
7694 case 0x4:
7695 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7696 return;
7701 static void __devinit
7702 bnx2_get_pci_speed(struct bnx2 *bp)
7704 u32 reg;
7706 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7707 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7708 u32 clkreg;
7710 bp->flags |= BNX2_FLAG_PCIX;
7712 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7714 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7715 switch (clkreg) {
7716 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7717 bp->bus_speed_mhz = 133;
7718 break;
7720 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7721 bp->bus_speed_mhz = 100;
7722 break;
7724 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7725 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7726 bp->bus_speed_mhz = 66;
7727 break;
7729 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7730 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7731 bp->bus_speed_mhz = 50;
7732 break;
7734 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7735 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7736 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7737 bp->bus_speed_mhz = 33;
7738 break;
7741 else {
7742 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7743 bp->bus_speed_mhz = 66;
7744 else
7745 bp->bus_speed_mhz = 33;
7748 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7749 bp->flags |= BNX2_FLAG_PCI_32BIT;
7753 static void __devinit
7754 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7756 int rc, i, j;
7757 u8 *data;
7758 unsigned int block_end, rosize, len;
7760 #define BNX2_VPD_NVRAM_OFFSET 0x300
7761 #define BNX2_VPD_LEN 128
7762 #define BNX2_MAX_VER_SLEN 30
7764 data = kmalloc(256, GFP_KERNEL);
7765 if (!data)
7766 return;
7768 rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7769 BNX2_VPD_LEN);
7770 if (rc)
7771 goto vpd_done;
7773 for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7774 data[i] = data[i + BNX2_VPD_LEN + 3];
7775 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7776 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7777 data[i + 3] = data[i + BNX2_VPD_LEN];
7780 i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7781 if (i < 0)
7782 goto vpd_done;
7784 rosize = pci_vpd_lrdt_size(&data[i]);
7785 i += PCI_VPD_LRDT_TAG_SIZE;
7786 block_end = i + rosize;
7788 if (block_end > BNX2_VPD_LEN)
7789 goto vpd_done;
7791 j = pci_vpd_find_info_keyword(data, i, rosize,
7792 PCI_VPD_RO_KEYWORD_MFR_ID);
7793 if (j < 0)
7794 goto vpd_done;
7796 len = pci_vpd_info_field_size(&data[j]);
7798 j += PCI_VPD_INFO_FLD_HDR_SIZE;
7799 if (j + len > block_end || len != 4 ||
7800 memcmp(&data[j], "1028", 4))
7801 goto vpd_done;
7803 j = pci_vpd_find_info_keyword(data, i, rosize,
7804 PCI_VPD_RO_KEYWORD_VENDOR0);
7805 if (j < 0)
7806 goto vpd_done;
7808 len = pci_vpd_info_field_size(&data[j]);
7810 j += PCI_VPD_INFO_FLD_HDR_SIZE;
7811 if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
7812 goto vpd_done;
7814 memcpy(bp->fw_version, &data[j], len);
7815 bp->fw_version[len] = ' ';
7817 vpd_done:
7818 kfree(data);
7821 static int __devinit
7822 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7824 struct bnx2 *bp;
7825 unsigned long mem_len;
7826 int rc, i, j;
7827 u32 reg;
7828 u64 dma_mask, persist_dma_mask;
7829 int err;
7831 SET_NETDEV_DEV(dev, &pdev->dev);
7832 bp = netdev_priv(dev);
7834 bp->flags = 0;
7835 bp->phy_flags = 0;
7837 bp->temp_stats_blk =
7838 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7840 if (bp->temp_stats_blk == NULL) {
7841 rc = -ENOMEM;
7842 goto err_out;
7845 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7846 rc = pci_enable_device(pdev);
7847 if (rc) {
7848 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
7849 goto err_out;
7852 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7853 dev_err(&pdev->dev,
7854 "Cannot find PCI device base address, aborting\n");
7855 rc = -ENODEV;
7856 goto err_out_disable;
7859 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7860 if (rc) {
7861 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
7862 goto err_out_disable;
7865 pci_set_master(pdev);
7867 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7868 if (bp->pm_cap == 0) {
7869 dev_err(&pdev->dev,
7870 "Cannot find power management capability, aborting\n");
7871 rc = -EIO;
7872 goto err_out_release;
7875 bp->dev = dev;
7876 bp->pdev = pdev;
7878 spin_lock_init(&bp->phy_lock);
7879 spin_lock_init(&bp->indirect_lock);
7880 #ifdef BCM_CNIC
7881 mutex_init(&bp->cnic_lock);
7882 #endif
7883 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7885 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7886 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7887 dev->mem_end = dev->mem_start + mem_len;
7888 dev->irq = pdev->irq;
7890 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7892 if (!bp->regview) {
7893 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
7894 rc = -ENOMEM;
7895 goto err_out_release;
7898 bnx2_set_power_state(bp, PCI_D0);
7900 /* Configure byte swap and enable write to the reg_window registers.
7901 * Rely on CPU to do target byte swapping on big endian systems
7902 * The chip's target access swapping will not swap all accesses
7904 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG,
7905 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7906 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7908 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7910 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7911 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7912 dev_err(&pdev->dev,
7913 "Cannot find PCIE capability, aborting\n");
7914 rc = -EIO;
7915 goto err_out_unmap;
7917 bp->flags |= BNX2_FLAG_PCIE;
7918 if (CHIP_REV(bp) == CHIP_REV_Ax)
7919 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7921 /* AER (Advanced Error Reporting) hooks */
7922 err = pci_enable_pcie_error_reporting(pdev);
7923 if (!err)
7924 bp->flags |= BNX2_FLAG_AER_ENABLED;
7926 } else {
7927 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7928 if (bp->pcix_cap == 0) {
7929 dev_err(&pdev->dev,
7930 "Cannot find PCIX capability, aborting\n");
7931 rc = -EIO;
7932 goto err_out_unmap;
7934 bp->flags |= BNX2_FLAG_BROKEN_STATS;
7937 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7938 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7939 bp->flags |= BNX2_FLAG_MSIX_CAP;
7942 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7943 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7944 bp->flags |= BNX2_FLAG_MSI_CAP;
7947 /* 5708 cannot support DMA addresses > 40-bit. */
7948 if (CHIP_NUM(bp) == CHIP_NUM_5708)
7949 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7950 else
7951 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7953 /* Configure DMA attributes. */
7954 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7955 dev->features |= NETIF_F_HIGHDMA;
7956 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7957 if (rc) {
7958 dev_err(&pdev->dev,
7959 "pci_set_consistent_dma_mask failed, aborting\n");
7960 goto err_out_unmap;
7962 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
7963 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
7964 goto err_out_unmap;
7967 if (!(bp->flags & BNX2_FLAG_PCIE))
7968 bnx2_get_pci_speed(bp);
7970 /* 5706A0 may falsely detect SERR and PERR. */
7971 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7972 reg = REG_RD(bp, PCI_COMMAND);
7973 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7974 REG_WR(bp, PCI_COMMAND, reg);
7976 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7977 !(bp->flags & BNX2_FLAG_PCIX)) {
7979 dev_err(&pdev->dev,
7980 "5706 A1 can only be used in a PCIX bus, aborting\n");
7981 goto err_out_unmap;
7984 bnx2_init_nvram(bp);
7986 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7988 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7989 BNX2_SHM_HDR_SIGNATURE_SIG) {
7990 u32 off = PCI_FUNC(pdev->devfn) << 2;
7992 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7993 } else
7994 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7996 /* Get the permanent MAC address. First we need to make sure the
7997 * firmware is actually running.
7999 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8001 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8002 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8003 dev_err(&pdev->dev, "Firmware not running, aborting\n");
8004 rc = -ENODEV;
8005 goto err_out_unmap;
8008 bnx2_read_vpd_fw_ver(bp);
8010 j = strlen(bp->fw_version);
8011 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8012 for (i = 0; i < 3 && j < 24; i++) {
8013 u8 num, k, skip0;
8015 if (i == 0) {
8016 bp->fw_version[j++] = 'b';
8017 bp->fw_version[j++] = 'c';
8018 bp->fw_version[j++] = ' ';
8020 num = (u8) (reg >> (24 - (i * 8)));
8021 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8022 if (num >= k || !skip0 || k == 1) {
8023 bp->fw_version[j++] = (num / k) + '0';
8024 skip0 = 0;
8027 if (i != 2)
8028 bp->fw_version[j++] = '.';
8030 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8031 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8032 bp->wol = 1;
8034 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8035 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8037 for (i = 0; i < 30; i++) {
8038 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8039 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8040 break;
8041 msleep(10);
8044 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8045 reg &= BNX2_CONDITION_MFW_RUN_MASK;
8046 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8047 reg != BNX2_CONDITION_MFW_RUN_NONE) {
8048 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8050 if (j < 32)
8051 bp->fw_version[j++] = ' ';
8052 for (i = 0; i < 3 && j < 28; i++) {
8053 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8054 reg = swab32(reg);
8055 memcpy(&bp->fw_version[j], &reg, 4);
8056 j += 4;
8060 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8061 bp->mac_addr[0] = (u8) (reg >> 8);
8062 bp->mac_addr[1] = (u8) reg;
8064 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8065 bp->mac_addr[2] = (u8) (reg >> 24);
8066 bp->mac_addr[3] = (u8) (reg >> 16);
8067 bp->mac_addr[4] = (u8) (reg >> 8);
8068 bp->mac_addr[5] = (u8) reg;
8070 bp->tx_ring_size = MAX_TX_DESC_CNT;
8071 bnx2_set_rx_ring_size(bp, 255);
8073 bp->tx_quick_cons_trip_int = 2;
8074 bp->tx_quick_cons_trip = 20;
8075 bp->tx_ticks_int = 18;
8076 bp->tx_ticks = 80;
8078 bp->rx_quick_cons_trip_int = 2;
8079 bp->rx_quick_cons_trip = 12;
8080 bp->rx_ticks_int = 18;
8081 bp->rx_ticks = 18;
8083 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8085 bp->current_interval = BNX2_TIMER_INTERVAL;
8087 bp->phy_addr = 1;
8089 /* Disable WOL support if we are running on a SERDES chip. */
8090 if (CHIP_NUM(bp) == CHIP_NUM_5709)
8091 bnx2_get_5709_media(bp);
8092 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
8093 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8095 bp->phy_port = PORT_TP;
8096 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8097 bp->phy_port = PORT_FIBRE;
8098 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8099 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8100 bp->flags |= BNX2_FLAG_NO_WOL;
8101 bp->wol = 0;
8103 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8104 /* Don't do parallel detect on this board because of
8105 * some board problems. The link will not go down
8106 * if we do parallel detect.
8108 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8109 pdev->subsystem_device == 0x310c)
8110 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8111 } else {
8112 bp->phy_addr = 2;
8113 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8114 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8116 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8117 CHIP_NUM(bp) == CHIP_NUM_5708)
8118 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8119 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8120 (CHIP_REV(bp) == CHIP_REV_Ax ||
8121 CHIP_REV(bp) == CHIP_REV_Bx))
8122 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8124 bnx2_init_fw_cap(bp);
8126 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8127 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8128 (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8129 !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8130 bp->flags |= BNX2_FLAG_NO_WOL;
8131 bp->wol = 0;
8134 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8135 bp->tx_quick_cons_trip_int =
8136 bp->tx_quick_cons_trip;
8137 bp->tx_ticks_int = bp->tx_ticks;
8138 bp->rx_quick_cons_trip_int =
8139 bp->rx_quick_cons_trip;
8140 bp->rx_ticks_int = bp->rx_ticks;
8141 bp->comp_prod_trip_int = bp->comp_prod_trip;
8142 bp->com_ticks_int = bp->com_ticks;
8143 bp->cmd_ticks_int = bp->cmd_ticks;
8146 /* Disable MSI on 5706 if AMD 8132 bridge is found.
8148 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
8149 * with byte enables disabled on the unused 32-bit word. This is legal
8150 * but causes problems on the AMD 8132 which will eventually stop
8151 * responding after a while.
8153 * AMD believes this incompatibility is unique to the 5706, and
8154 * prefers to locally disable MSI rather than globally disabling it.
8156 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8157 struct pci_dev *amd_8132 = NULL;
8159 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8160 PCI_DEVICE_ID_AMD_8132_BRIDGE,
8161 amd_8132))) {
8163 if (amd_8132->revision >= 0x10 &&
8164 amd_8132->revision <= 0x13) {
8165 disable_msi = 1;
8166 pci_dev_put(amd_8132);
8167 break;
8172 bnx2_set_default_link(bp);
8173 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8175 init_timer(&bp->timer);
8176 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8177 bp->timer.data = (unsigned long) bp;
8178 bp->timer.function = bnx2_timer;
8180 pci_save_state(pdev);
8182 return 0;
8184 err_out_unmap:
8185 if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8186 pci_disable_pcie_error_reporting(pdev);
8187 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8190 if (bp->regview) {
8191 iounmap(bp->regview);
8192 bp->regview = NULL;
8195 err_out_release:
8196 pci_release_regions(pdev);
8198 err_out_disable:
8199 pci_disable_device(pdev);
8200 pci_set_drvdata(pdev, NULL);
8202 err_out:
8203 return rc;
8206 static char * __devinit
8207 bnx2_bus_string(struct bnx2 *bp, char *str)
8209 char *s = str;
8211 if (bp->flags & BNX2_FLAG_PCIE) {
8212 s += sprintf(s, "PCI Express");
8213 } else {
8214 s += sprintf(s, "PCI");
8215 if (bp->flags & BNX2_FLAG_PCIX)
8216 s += sprintf(s, "-X");
8217 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8218 s += sprintf(s, " 32-bit");
8219 else
8220 s += sprintf(s, " 64-bit");
8221 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8223 return str;
8226 static void
8227 bnx2_del_napi(struct bnx2 *bp)
8229 int i;
8231 for (i = 0; i < bp->irq_nvecs; i++)
8232 netif_napi_del(&bp->bnx2_napi[i].napi);
8235 static void
8236 bnx2_init_napi(struct bnx2 *bp)
8238 int i;
8240 for (i = 0; i < bp->irq_nvecs; i++) {
8241 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8242 int (*poll)(struct napi_struct *, int);
8244 if (i == 0)
8245 poll = bnx2_poll;
8246 else
8247 poll = bnx2_poll_msix;
8249 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8250 bnapi->bp = bp;
8254 static const struct net_device_ops bnx2_netdev_ops = {
8255 .ndo_open = bnx2_open,
8256 .ndo_start_xmit = bnx2_start_xmit,
8257 .ndo_stop = bnx2_close,
8258 .ndo_get_stats64 = bnx2_get_stats64,
8259 .ndo_set_rx_mode = bnx2_set_rx_mode,
8260 .ndo_do_ioctl = bnx2_ioctl,
8261 .ndo_validate_addr = eth_validate_addr,
8262 .ndo_set_mac_address = bnx2_change_mac_addr,
8263 .ndo_change_mtu = bnx2_change_mtu,
8264 .ndo_fix_features = bnx2_fix_features,
8265 .ndo_set_features = bnx2_set_features,
8266 .ndo_tx_timeout = bnx2_tx_timeout,
8267 #ifdef CONFIG_NET_POLL_CONTROLLER
8268 .ndo_poll_controller = poll_bnx2,
8269 #endif
8272 static int __devinit
8273 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8275 static int version_printed = 0;
8276 struct net_device *dev = NULL;
8277 struct bnx2 *bp;
8278 int rc;
8279 char str[40];
8281 if (version_printed++ == 0)
8282 pr_info("%s", version);
8284 /* dev zeroed in init_etherdev */
8285 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8287 if (!dev)
8288 return -ENOMEM;
8290 rc = bnx2_init_board(pdev, dev);
8291 if (rc < 0) {
8292 free_netdev(dev);
8293 return rc;
8296 dev->netdev_ops = &bnx2_netdev_ops;
8297 dev->watchdog_timeo = TX_TIMEOUT;
8298 dev->ethtool_ops = &bnx2_ethtool_ops;
8300 bp = netdev_priv(dev);
8302 pci_set_drvdata(pdev, dev);
8304 rc = bnx2_request_firmware(bp);
8305 if (rc)
8306 goto error;
8308 memcpy(dev->dev_addr, bp->mac_addr, 6);
8309 memcpy(dev->perm_addr, bp->mac_addr, 6);
8311 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8312 NETIF_F_TSO | NETIF_F_TSO_ECN |
8313 NETIF_F_RXHASH | NETIF_F_RXCSUM;
8315 if (CHIP_NUM(bp) == CHIP_NUM_5709)
8316 dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8318 dev->vlan_features = dev->hw_features;
8319 dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8320 dev->features |= dev->hw_features;
8322 if ((rc = register_netdev(dev))) {
8323 dev_err(&pdev->dev, "Cannot register net device\n");
8324 goto error;
8327 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8328 board_info[ent->driver_data].name,
8329 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8330 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8331 bnx2_bus_string(bp, str),
8332 dev->base_addr,
8333 bp->pdev->irq, dev->dev_addr);
8335 return 0;
8337 error:
8338 if (bp->mips_firmware)
8339 release_firmware(bp->mips_firmware);
8340 if (bp->rv2p_firmware)
8341 release_firmware(bp->rv2p_firmware);
8343 if (bp->regview)
8344 iounmap(bp->regview);
8345 pci_release_regions(pdev);
8346 pci_disable_device(pdev);
8347 pci_set_drvdata(pdev, NULL);
8348 free_netdev(dev);
8349 return rc;
8352 static void __devexit
8353 bnx2_remove_one(struct pci_dev *pdev)
8355 struct net_device *dev = pci_get_drvdata(pdev);
8356 struct bnx2 *bp = netdev_priv(dev);
8358 unregister_netdev(dev);
8360 del_timer_sync(&bp->timer);
8362 if (bp->mips_firmware)
8363 release_firmware(bp->mips_firmware);
8364 if (bp->rv2p_firmware)
8365 release_firmware(bp->rv2p_firmware);
8367 if (bp->regview)
8368 iounmap(bp->regview);
8370 kfree(bp->temp_stats_blk);
8372 if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8373 pci_disable_pcie_error_reporting(pdev);
8374 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8377 free_netdev(dev);
8379 pci_release_regions(pdev);
8380 pci_disable_device(pdev);
8381 pci_set_drvdata(pdev, NULL);
8384 static int
8385 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8387 struct net_device *dev = pci_get_drvdata(pdev);
8388 struct bnx2 *bp = netdev_priv(dev);
8390 /* PCI register 4 needs to be saved whether netif_running() or not.
8391 * MSI address and data need to be saved if using MSI and
8392 * netif_running().
8394 pci_save_state(pdev);
8395 if (!netif_running(dev))
8396 return 0;
8398 cancel_work_sync(&bp->reset_task);
8399 bnx2_netif_stop(bp, true);
8400 netif_device_detach(dev);
8401 del_timer_sync(&bp->timer);
8402 bnx2_shutdown_chip(bp);
8403 bnx2_free_skbs(bp);
8404 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8405 return 0;
8408 static int
8409 bnx2_resume(struct pci_dev *pdev)
8411 struct net_device *dev = pci_get_drvdata(pdev);
8412 struct bnx2 *bp = netdev_priv(dev);
8414 pci_restore_state(pdev);
8415 if (!netif_running(dev))
8416 return 0;
8418 bnx2_set_power_state(bp, PCI_D0);
8419 netif_device_attach(dev);
8420 bnx2_init_nic(bp, 1);
8421 bnx2_netif_start(bp, true);
8422 return 0;
8426 * bnx2_io_error_detected - called when PCI error is detected
8427 * @pdev: Pointer to PCI device
8428 * @state: The current pci connection state
8430 * This function is called after a PCI bus error affecting
8431 * this device has been detected.
8433 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8434 pci_channel_state_t state)
8436 struct net_device *dev = pci_get_drvdata(pdev);
8437 struct bnx2 *bp = netdev_priv(dev);
8439 rtnl_lock();
8440 netif_device_detach(dev);
8442 if (state == pci_channel_io_perm_failure) {
8443 rtnl_unlock();
8444 return PCI_ERS_RESULT_DISCONNECT;
8447 if (netif_running(dev)) {
8448 bnx2_netif_stop(bp, true);
8449 del_timer_sync(&bp->timer);
8450 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8453 pci_disable_device(pdev);
8454 rtnl_unlock();
8456 /* Request a slot slot reset. */
8457 return PCI_ERS_RESULT_NEED_RESET;
8461 * bnx2_io_slot_reset - called after the pci bus has been reset.
8462 * @pdev: Pointer to PCI device
8464 * Restart the card from scratch, as if from a cold-boot.
8466 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8468 struct net_device *dev = pci_get_drvdata(pdev);
8469 struct bnx2 *bp = netdev_priv(dev);
8470 pci_ers_result_t result;
8471 int err;
8473 rtnl_lock();
8474 if (pci_enable_device(pdev)) {
8475 dev_err(&pdev->dev,
8476 "Cannot re-enable PCI device after reset\n");
8477 result = PCI_ERS_RESULT_DISCONNECT;
8478 } else {
8479 pci_set_master(pdev);
8480 pci_restore_state(pdev);
8481 pci_save_state(pdev);
8483 if (netif_running(dev)) {
8484 bnx2_set_power_state(bp, PCI_D0);
8485 bnx2_init_nic(bp, 1);
8487 result = PCI_ERS_RESULT_RECOVERED;
8489 rtnl_unlock();
8491 if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8492 return result;
8494 err = pci_cleanup_aer_uncorrect_error_status(pdev);
8495 if (err) {
8496 dev_err(&pdev->dev,
8497 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8498 err); /* non-fatal, continue */
8501 return result;
8505 * bnx2_io_resume - called when traffic can start flowing again.
8506 * @pdev: Pointer to PCI device
8508 * This callback is called when the error recovery driver tells us that
8509 * its OK to resume normal operation.
8511 static void bnx2_io_resume(struct pci_dev *pdev)
8513 struct net_device *dev = pci_get_drvdata(pdev);
8514 struct bnx2 *bp = netdev_priv(dev);
8516 rtnl_lock();
8517 if (netif_running(dev))
8518 bnx2_netif_start(bp, true);
8520 netif_device_attach(dev);
8521 rtnl_unlock();
8524 static struct pci_error_handlers bnx2_err_handler = {
8525 .error_detected = bnx2_io_error_detected,
8526 .slot_reset = bnx2_io_slot_reset,
8527 .resume = bnx2_io_resume,
8530 static struct pci_driver bnx2_pci_driver = {
8531 .name = DRV_MODULE_NAME,
8532 .id_table = bnx2_pci_tbl,
8533 .probe = bnx2_init_one,
8534 .remove = __devexit_p(bnx2_remove_one),
8535 .suspend = bnx2_suspend,
8536 .resume = bnx2_resume,
8537 .err_handler = &bnx2_err_handler,
8540 static int __init bnx2_init(void)
8542 return pci_register_driver(&bnx2_pci_driver);
8545 static void __exit bnx2_cleanup(void)
8547 pci_unregister_driver(&bnx2_pci_driver);
8550 module_init(bnx2_init);
8551 module_exit(bnx2_cleanup);