sched: Remove double_rq_lock() from __migrate_task()
[linux/fpc-iii.git] / drivers / net / ethernet / broadcom / bnx2.c
blob67d2b00473718eec10b9f1fbdbf4f6a981aa02d0
1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2013 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
17 #include <linux/stringify.h>
18 #include <linux/kernel.h>
19 #include <linux/timer.h>
20 #include <linux/errno.h>
21 #include <linux/ioport.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/interrupt.h>
25 #include <linux/pci.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/bitops.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <asm/page.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/if.h>
40 #include <linux/if_vlan.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/checksum.h>
44 #include <linux/workqueue.h>
45 #include <linux/crc32.h>
46 #include <linux/prefetch.h>
47 #include <linux/cache.h>
48 #include <linux/firmware.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
52 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
53 #define BCM_CNIC 1
54 #include "cnic_if.h"
55 #endif
56 #include "bnx2.h"
57 #include "bnx2_fw.h"
59 #define DRV_MODULE_NAME "bnx2"
60 #define DRV_MODULE_VERSION "2.2.5"
61 #define DRV_MODULE_RELDATE "December 20, 2013"
62 #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
63 #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
64 #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
65 #define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
66 #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
68 #define RUN_AT(x) (jiffies + (x))
70 /* Time in jiffies before concluding the transmitter is hung. */
71 #define TX_TIMEOUT (5*HZ)
73 static char version[] =
74 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
80 MODULE_FIRMWARE(FW_MIPS_FILE_06);
81 MODULE_FIRMWARE(FW_RV2P_FILE_06);
82 MODULE_FIRMWARE(FW_MIPS_FILE_09);
83 MODULE_FIRMWARE(FW_RV2P_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
86 static int disable_msi = 0;
88 module_param(disable_msi, int, S_IRUGO);
89 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91 typedef enum {
92 BCM5706 = 0,
93 NC370T,
94 NC370I,
95 BCM5706S,
96 NC370F,
97 BCM5708,
98 BCM5708S,
99 BCM5709,
100 BCM5709S,
101 BCM5716,
102 BCM5716S,
103 } board_t;
105 /* indexed by board_t, above */
106 static struct {
107 char *name;
108 } board_info[] = {
109 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
110 { "HP NC370T Multifunction Gigabit Server Adapter" },
111 { "HP NC370i Multifunction Gigabit Server Adapter" },
112 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
113 { "HP NC370F Multifunction Gigabit Server Adapter" },
114 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
115 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
116 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
117 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
118 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
119 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
122 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
123 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
124 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
125 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
126 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
127 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
128 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
129 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
131 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
132 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
133 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
134 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
135 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
136 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
137 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
138 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
139 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
140 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
141 { PCI_VENDOR_ID_BROADCOM, 0x163b,
142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
143 { PCI_VENDOR_ID_BROADCOM, 0x163c,
144 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
145 { 0, }
148 static const struct flash_spec flash_table[] =
150 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
151 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
152 /* Slow EEPROM */
153 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
154 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
155 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
156 "EEPROM - slow"},
157 /* Expansion entry 0001 */
158 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
159 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
160 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
161 "Entry 0001"},
162 /* Saifun SA25F010 (non-buffered flash) */
163 /* strap, cfg1, & write1 need updates */
164 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
165 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
166 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
167 "Non-buffered flash (128kB)"},
168 /* Saifun SA25F020 (non-buffered flash) */
169 /* strap, cfg1, & write1 need updates */
170 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
171 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
172 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
173 "Non-buffered flash (256kB)"},
174 /* Expansion entry 0100 */
175 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
176 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
177 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
178 "Entry 0100"},
179 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
180 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
181 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
182 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
183 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
184 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
185 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
186 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
187 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
188 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
189 /* Saifun SA25F005 (non-buffered flash) */
190 /* strap, cfg1, & write1 need updates */
191 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
192 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
193 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
194 "Non-buffered flash (64kB)"},
195 /* Fast EEPROM */
196 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
197 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
198 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
199 "EEPROM - fast"},
200 /* Expansion entry 1001 */
201 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
202 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
203 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
204 "Entry 1001"},
205 /* Expansion entry 1010 */
206 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
207 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
208 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
209 "Entry 1010"},
210 /* ATMEL AT45DB011B (buffered flash) */
211 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
212 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
213 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
214 "Buffered flash (128kB)"},
215 /* Expansion entry 1100 */
216 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
217 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
218 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
219 "Entry 1100"},
220 /* Expansion entry 1101 */
221 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
222 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
223 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
224 "Entry 1101"},
225 /* Ateml Expansion entry 1110 */
226 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
227 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
228 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
229 "Entry 1110 (Atmel)"},
230 /* ATMEL AT45DB021B (buffered flash) */
231 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
232 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
233 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
234 "Buffered flash (256kB)"},
237 static const struct flash_spec flash_5709 = {
238 .flags = BNX2_NV_BUFFERED,
239 .page_bits = BCM5709_FLASH_PAGE_BITS,
240 .page_size = BCM5709_FLASH_PAGE_SIZE,
241 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
242 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
243 .name = "5709 Buffered flash (256kB)",
246 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248 static void bnx2_init_napi(struct bnx2 *bp);
249 static void bnx2_del_napi(struct bnx2 *bp);
251 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
253 u32 diff;
255 /* Tell compiler to fetch tx_prod and tx_cons from memory. */
256 barrier();
258 /* The ring uses 256 indices for 255 entries, one of them
259 * needs to be skipped.
261 diff = txr->tx_prod - txr->tx_cons;
262 if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
263 diff &= 0xffff;
264 if (diff == BNX2_TX_DESC_CNT)
265 diff = BNX2_MAX_TX_DESC_CNT;
267 return bp->tx_ring_size - diff;
270 static u32
271 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
273 u32 val;
275 spin_lock_bh(&bp->indirect_lock);
276 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
277 val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
278 spin_unlock_bh(&bp->indirect_lock);
279 return val;
282 static void
283 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
285 spin_lock_bh(&bp->indirect_lock);
286 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
287 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
288 spin_unlock_bh(&bp->indirect_lock);
291 static void
292 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
294 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
297 static u32
298 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
300 return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
303 static void
304 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
306 offset += cid_addr;
307 spin_lock_bh(&bp->indirect_lock);
308 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
309 int i;
311 BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
312 BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
313 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
314 for (i = 0; i < 5; i++) {
315 val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
316 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
317 break;
318 udelay(5);
320 } else {
321 BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
322 BNX2_WR(bp, BNX2_CTX_DATA, val);
324 spin_unlock_bh(&bp->indirect_lock);
327 #ifdef BCM_CNIC
328 static int
329 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
331 struct bnx2 *bp = netdev_priv(dev);
332 struct drv_ctl_io *io = &info->data.io;
334 switch (info->cmd) {
335 case DRV_CTL_IO_WR_CMD:
336 bnx2_reg_wr_ind(bp, io->offset, io->data);
337 break;
338 case DRV_CTL_IO_RD_CMD:
339 io->data = bnx2_reg_rd_ind(bp, io->offset);
340 break;
341 case DRV_CTL_CTX_WR_CMD:
342 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
343 break;
344 default:
345 return -EINVAL;
347 return 0;
350 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
352 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
353 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
354 int sb_id;
356 if (bp->flags & BNX2_FLAG_USING_MSIX) {
357 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
358 bnapi->cnic_present = 0;
359 sb_id = bp->irq_nvecs;
360 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
361 } else {
362 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
363 bnapi->cnic_tag = bnapi->last_status_idx;
364 bnapi->cnic_present = 1;
365 sb_id = 0;
366 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
369 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
370 cp->irq_arr[0].status_blk = (void *)
371 ((unsigned long) bnapi->status_blk.msi +
372 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
373 cp->irq_arr[0].status_blk_num = sb_id;
374 cp->num_irq = 1;
377 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
378 void *data)
380 struct bnx2 *bp = netdev_priv(dev);
381 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
383 if (ops == NULL)
384 return -EINVAL;
386 if (cp->drv_state & CNIC_DRV_STATE_REGD)
387 return -EBUSY;
389 if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
390 return -ENODEV;
392 bp->cnic_data = data;
393 rcu_assign_pointer(bp->cnic_ops, ops);
395 cp->num_irq = 0;
396 cp->drv_state = CNIC_DRV_STATE_REGD;
398 bnx2_setup_cnic_irq_info(bp);
400 return 0;
403 static int bnx2_unregister_cnic(struct net_device *dev)
405 struct bnx2 *bp = netdev_priv(dev);
406 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
407 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
409 mutex_lock(&bp->cnic_lock);
410 cp->drv_state = 0;
411 bnapi->cnic_present = 0;
412 RCU_INIT_POINTER(bp->cnic_ops, NULL);
413 mutex_unlock(&bp->cnic_lock);
414 synchronize_rcu();
415 return 0;
418 static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
420 struct bnx2 *bp = netdev_priv(dev);
421 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
423 if (!cp->max_iscsi_conn)
424 return NULL;
426 cp->drv_owner = THIS_MODULE;
427 cp->chip_id = bp->chip_id;
428 cp->pdev = bp->pdev;
429 cp->io_base = bp->regview;
430 cp->drv_ctl = bnx2_drv_ctl;
431 cp->drv_register_cnic = bnx2_register_cnic;
432 cp->drv_unregister_cnic = bnx2_unregister_cnic;
434 return cp;
437 static void
438 bnx2_cnic_stop(struct bnx2 *bp)
440 struct cnic_ops *c_ops;
441 struct cnic_ctl_info info;
443 mutex_lock(&bp->cnic_lock);
444 c_ops = rcu_dereference_protected(bp->cnic_ops,
445 lockdep_is_held(&bp->cnic_lock));
446 if (c_ops) {
447 info.cmd = CNIC_CTL_STOP_CMD;
448 c_ops->cnic_ctl(bp->cnic_data, &info);
450 mutex_unlock(&bp->cnic_lock);
453 static void
454 bnx2_cnic_start(struct bnx2 *bp)
456 struct cnic_ops *c_ops;
457 struct cnic_ctl_info info;
459 mutex_lock(&bp->cnic_lock);
460 c_ops = rcu_dereference_protected(bp->cnic_ops,
461 lockdep_is_held(&bp->cnic_lock));
462 if (c_ops) {
463 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
464 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
466 bnapi->cnic_tag = bnapi->last_status_idx;
468 info.cmd = CNIC_CTL_START_CMD;
469 c_ops->cnic_ctl(bp->cnic_data, &info);
471 mutex_unlock(&bp->cnic_lock);
474 #else
476 static void
477 bnx2_cnic_stop(struct bnx2 *bp)
481 static void
482 bnx2_cnic_start(struct bnx2 *bp)
486 #endif
488 static int
489 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
491 u32 val1;
492 int i, ret;
494 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
495 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
496 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
498 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
499 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
501 udelay(40);
504 val1 = (bp->phy_addr << 21) | (reg << 16) |
505 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
506 BNX2_EMAC_MDIO_COMM_START_BUSY;
507 BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
509 for (i = 0; i < 50; i++) {
510 udelay(10);
512 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
513 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
514 udelay(5);
516 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
517 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
519 break;
523 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
524 *val = 0x0;
525 ret = -EBUSY;
527 else {
528 *val = val1;
529 ret = 0;
532 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
533 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
534 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
536 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
537 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
539 udelay(40);
542 return ret;
545 static int
546 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
548 u32 val1;
549 int i, ret;
551 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
552 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
553 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
555 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
556 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
558 udelay(40);
561 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
562 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
563 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
564 BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
566 for (i = 0; i < 50; i++) {
567 udelay(10);
569 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
570 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
571 udelay(5);
572 break;
576 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
577 ret = -EBUSY;
578 else
579 ret = 0;
581 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
582 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
583 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
585 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
586 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
588 udelay(40);
591 return ret;
594 static void
595 bnx2_disable_int(struct bnx2 *bp)
597 int i;
598 struct bnx2_napi *bnapi;
600 for (i = 0; i < bp->irq_nvecs; i++) {
601 bnapi = &bp->bnx2_napi[i];
602 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
603 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
605 BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
608 static void
609 bnx2_enable_int(struct bnx2 *bp)
611 int i;
612 struct bnx2_napi *bnapi;
614 for (i = 0; i < bp->irq_nvecs; i++) {
615 bnapi = &bp->bnx2_napi[i];
617 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
618 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
619 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
620 bnapi->last_status_idx);
622 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
623 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
624 bnapi->last_status_idx);
626 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
629 static void
630 bnx2_disable_int_sync(struct bnx2 *bp)
632 int i;
634 atomic_inc(&bp->intr_sem);
635 if (!netif_running(bp->dev))
636 return;
638 bnx2_disable_int(bp);
639 for (i = 0; i < bp->irq_nvecs; i++)
640 synchronize_irq(bp->irq_tbl[i].vector);
643 static void
644 bnx2_napi_disable(struct bnx2 *bp)
646 int i;
648 for (i = 0; i < bp->irq_nvecs; i++)
649 napi_disable(&bp->bnx2_napi[i].napi);
652 static void
653 bnx2_napi_enable(struct bnx2 *bp)
655 int i;
657 for (i = 0; i < bp->irq_nvecs; i++)
658 napi_enable(&bp->bnx2_napi[i].napi);
661 static void
662 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
664 if (stop_cnic)
665 bnx2_cnic_stop(bp);
666 if (netif_running(bp->dev)) {
667 bnx2_napi_disable(bp);
668 netif_tx_disable(bp->dev);
670 bnx2_disable_int_sync(bp);
671 netif_carrier_off(bp->dev); /* prevent tx timeout */
674 static void
675 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
677 if (atomic_dec_and_test(&bp->intr_sem)) {
678 if (netif_running(bp->dev)) {
679 netif_tx_wake_all_queues(bp->dev);
680 spin_lock_bh(&bp->phy_lock);
681 if (bp->link_up)
682 netif_carrier_on(bp->dev);
683 spin_unlock_bh(&bp->phy_lock);
684 bnx2_napi_enable(bp);
685 bnx2_enable_int(bp);
686 if (start_cnic)
687 bnx2_cnic_start(bp);
692 static void
693 bnx2_free_tx_mem(struct bnx2 *bp)
695 int i;
697 for (i = 0; i < bp->num_tx_rings; i++) {
698 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
699 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
701 if (txr->tx_desc_ring) {
702 dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
703 txr->tx_desc_ring,
704 txr->tx_desc_mapping);
705 txr->tx_desc_ring = NULL;
707 kfree(txr->tx_buf_ring);
708 txr->tx_buf_ring = NULL;
712 static void
713 bnx2_free_rx_mem(struct bnx2 *bp)
715 int i;
717 for (i = 0; i < bp->num_rx_rings; i++) {
718 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
719 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
720 int j;
722 for (j = 0; j < bp->rx_max_ring; j++) {
723 if (rxr->rx_desc_ring[j])
724 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
725 rxr->rx_desc_ring[j],
726 rxr->rx_desc_mapping[j]);
727 rxr->rx_desc_ring[j] = NULL;
729 vfree(rxr->rx_buf_ring);
730 rxr->rx_buf_ring = NULL;
732 for (j = 0; j < bp->rx_max_pg_ring; j++) {
733 if (rxr->rx_pg_desc_ring[j])
734 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
735 rxr->rx_pg_desc_ring[j],
736 rxr->rx_pg_desc_mapping[j]);
737 rxr->rx_pg_desc_ring[j] = NULL;
739 vfree(rxr->rx_pg_ring);
740 rxr->rx_pg_ring = NULL;
744 static int
745 bnx2_alloc_tx_mem(struct bnx2 *bp)
747 int i;
749 for (i = 0; i < bp->num_tx_rings; i++) {
750 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
751 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
753 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
754 if (txr->tx_buf_ring == NULL)
755 return -ENOMEM;
757 txr->tx_desc_ring =
758 dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
759 &txr->tx_desc_mapping, GFP_KERNEL);
760 if (txr->tx_desc_ring == NULL)
761 return -ENOMEM;
763 return 0;
766 static int
767 bnx2_alloc_rx_mem(struct bnx2 *bp)
769 int i;
771 for (i = 0; i < bp->num_rx_rings; i++) {
772 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
773 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
774 int j;
776 rxr->rx_buf_ring =
777 vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
778 if (rxr->rx_buf_ring == NULL)
779 return -ENOMEM;
781 for (j = 0; j < bp->rx_max_ring; j++) {
782 rxr->rx_desc_ring[j] =
783 dma_alloc_coherent(&bp->pdev->dev,
784 RXBD_RING_SIZE,
785 &rxr->rx_desc_mapping[j],
786 GFP_KERNEL);
787 if (rxr->rx_desc_ring[j] == NULL)
788 return -ENOMEM;
792 if (bp->rx_pg_ring_size) {
793 rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
794 bp->rx_max_pg_ring);
795 if (rxr->rx_pg_ring == NULL)
796 return -ENOMEM;
800 for (j = 0; j < bp->rx_max_pg_ring; j++) {
801 rxr->rx_pg_desc_ring[j] =
802 dma_alloc_coherent(&bp->pdev->dev,
803 RXBD_RING_SIZE,
804 &rxr->rx_pg_desc_mapping[j],
805 GFP_KERNEL);
806 if (rxr->rx_pg_desc_ring[j] == NULL)
807 return -ENOMEM;
811 return 0;
814 static void
815 bnx2_free_mem(struct bnx2 *bp)
817 int i;
818 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
820 bnx2_free_tx_mem(bp);
821 bnx2_free_rx_mem(bp);
823 for (i = 0; i < bp->ctx_pages; i++) {
824 if (bp->ctx_blk[i]) {
825 dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
826 bp->ctx_blk[i],
827 bp->ctx_blk_mapping[i]);
828 bp->ctx_blk[i] = NULL;
831 if (bnapi->status_blk.msi) {
832 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
833 bnapi->status_blk.msi,
834 bp->status_blk_mapping);
835 bnapi->status_blk.msi = NULL;
836 bp->stats_blk = NULL;
840 static int
841 bnx2_alloc_mem(struct bnx2 *bp)
843 int i, status_blk_size, err;
844 struct bnx2_napi *bnapi;
845 void *status_blk;
847 /* Combine status and statistics blocks into one allocation. */
848 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
849 if (bp->flags & BNX2_FLAG_MSIX_CAP)
850 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
851 BNX2_SBLK_MSIX_ALIGN_SIZE);
852 bp->status_stats_size = status_blk_size +
853 sizeof(struct statistics_block);
855 status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
856 &bp->status_blk_mapping, GFP_KERNEL);
857 if (status_blk == NULL)
858 goto alloc_mem_err;
860 bnapi = &bp->bnx2_napi[0];
861 bnapi->status_blk.msi = status_blk;
862 bnapi->hw_tx_cons_ptr =
863 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
864 bnapi->hw_rx_cons_ptr =
865 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
866 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
867 for (i = 1; i < bp->irq_nvecs; i++) {
868 struct status_block_msix *sblk;
870 bnapi = &bp->bnx2_napi[i];
872 sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
873 bnapi->status_blk.msix = sblk;
874 bnapi->hw_tx_cons_ptr =
875 &sblk->status_tx_quick_consumer_index;
876 bnapi->hw_rx_cons_ptr =
877 &sblk->status_rx_quick_consumer_index;
878 bnapi->int_num = i << 24;
882 bp->stats_blk = status_blk + status_blk_size;
884 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
886 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
887 bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
888 if (bp->ctx_pages == 0)
889 bp->ctx_pages = 1;
890 for (i = 0; i < bp->ctx_pages; i++) {
891 bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
892 BNX2_PAGE_SIZE,
893 &bp->ctx_blk_mapping[i],
894 GFP_KERNEL);
895 if (bp->ctx_blk[i] == NULL)
896 goto alloc_mem_err;
900 err = bnx2_alloc_rx_mem(bp);
901 if (err)
902 goto alloc_mem_err;
904 err = bnx2_alloc_tx_mem(bp);
905 if (err)
906 goto alloc_mem_err;
908 return 0;
910 alloc_mem_err:
911 bnx2_free_mem(bp);
912 return -ENOMEM;
915 static void
916 bnx2_report_fw_link(struct bnx2 *bp)
918 u32 fw_link_status = 0;
920 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
921 return;
923 if (bp->link_up) {
924 u32 bmsr;
926 switch (bp->line_speed) {
927 case SPEED_10:
928 if (bp->duplex == DUPLEX_HALF)
929 fw_link_status = BNX2_LINK_STATUS_10HALF;
930 else
931 fw_link_status = BNX2_LINK_STATUS_10FULL;
932 break;
933 case SPEED_100:
934 if (bp->duplex == DUPLEX_HALF)
935 fw_link_status = BNX2_LINK_STATUS_100HALF;
936 else
937 fw_link_status = BNX2_LINK_STATUS_100FULL;
938 break;
939 case SPEED_1000:
940 if (bp->duplex == DUPLEX_HALF)
941 fw_link_status = BNX2_LINK_STATUS_1000HALF;
942 else
943 fw_link_status = BNX2_LINK_STATUS_1000FULL;
944 break;
945 case SPEED_2500:
946 if (bp->duplex == DUPLEX_HALF)
947 fw_link_status = BNX2_LINK_STATUS_2500HALF;
948 else
949 fw_link_status = BNX2_LINK_STATUS_2500FULL;
950 break;
953 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
955 if (bp->autoneg) {
956 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
958 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
959 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
961 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
962 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
963 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
964 else
965 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
968 else
969 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
971 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
974 static char *
975 bnx2_xceiver_str(struct bnx2 *bp)
977 return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
978 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
979 "Copper");
982 static void
983 bnx2_report_link(struct bnx2 *bp)
985 if (bp->link_up) {
986 netif_carrier_on(bp->dev);
987 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
988 bnx2_xceiver_str(bp),
989 bp->line_speed,
990 bp->duplex == DUPLEX_FULL ? "full" : "half");
992 if (bp->flow_ctrl) {
993 if (bp->flow_ctrl & FLOW_CTRL_RX) {
994 pr_cont(", receive ");
995 if (bp->flow_ctrl & FLOW_CTRL_TX)
996 pr_cont("& transmit ");
998 else {
999 pr_cont(", transmit ");
1001 pr_cont("flow control ON");
1003 pr_cont("\n");
1004 } else {
1005 netif_carrier_off(bp->dev);
1006 netdev_err(bp->dev, "NIC %s Link is Down\n",
1007 bnx2_xceiver_str(bp));
1010 bnx2_report_fw_link(bp);
1013 static void
1014 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1016 u32 local_adv, remote_adv;
1018 bp->flow_ctrl = 0;
1019 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1020 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1022 if (bp->duplex == DUPLEX_FULL) {
1023 bp->flow_ctrl = bp->req_flow_ctrl;
1025 return;
1028 if (bp->duplex != DUPLEX_FULL) {
1029 return;
1032 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1033 (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1034 u32 val;
1036 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1037 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1038 bp->flow_ctrl |= FLOW_CTRL_TX;
1039 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1040 bp->flow_ctrl |= FLOW_CTRL_RX;
1041 return;
1044 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1045 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1047 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1048 u32 new_local_adv = 0;
1049 u32 new_remote_adv = 0;
1051 if (local_adv & ADVERTISE_1000XPAUSE)
1052 new_local_adv |= ADVERTISE_PAUSE_CAP;
1053 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1054 new_local_adv |= ADVERTISE_PAUSE_ASYM;
1055 if (remote_adv & ADVERTISE_1000XPAUSE)
1056 new_remote_adv |= ADVERTISE_PAUSE_CAP;
1057 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1058 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1060 local_adv = new_local_adv;
1061 remote_adv = new_remote_adv;
1064 /* See Table 28B-3 of 802.3ab-1999 spec. */
1065 if (local_adv & ADVERTISE_PAUSE_CAP) {
1066 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1067 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1068 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1070 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1071 bp->flow_ctrl = FLOW_CTRL_RX;
1074 else {
1075 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1076 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1080 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1081 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1082 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1084 bp->flow_ctrl = FLOW_CTRL_TX;
1089 static int
1090 bnx2_5709s_linkup(struct bnx2 *bp)
1092 u32 val, speed;
1094 bp->link_up = 1;
1096 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1097 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1098 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1100 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1101 bp->line_speed = bp->req_line_speed;
1102 bp->duplex = bp->req_duplex;
1103 return 0;
1105 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1106 switch (speed) {
1107 case MII_BNX2_GP_TOP_AN_SPEED_10:
1108 bp->line_speed = SPEED_10;
1109 break;
1110 case MII_BNX2_GP_TOP_AN_SPEED_100:
1111 bp->line_speed = SPEED_100;
1112 break;
1113 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1114 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1115 bp->line_speed = SPEED_1000;
1116 break;
1117 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1118 bp->line_speed = SPEED_2500;
1119 break;
1121 if (val & MII_BNX2_GP_TOP_AN_FD)
1122 bp->duplex = DUPLEX_FULL;
1123 else
1124 bp->duplex = DUPLEX_HALF;
1125 return 0;
1128 static int
1129 bnx2_5708s_linkup(struct bnx2 *bp)
1131 u32 val;
1133 bp->link_up = 1;
1134 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1135 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1136 case BCM5708S_1000X_STAT1_SPEED_10:
1137 bp->line_speed = SPEED_10;
1138 break;
1139 case BCM5708S_1000X_STAT1_SPEED_100:
1140 bp->line_speed = SPEED_100;
1141 break;
1142 case BCM5708S_1000X_STAT1_SPEED_1G:
1143 bp->line_speed = SPEED_1000;
1144 break;
1145 case BCM5708S_1000X_STAT1_SPEED_2G5:
1146 bp->line_speed = SPEED_2500;
1147 break;
1149 if (val & BCM5708S_1000X_STAT1_FD)
1150 bp->duplex = DUPLEX_FULL;
1151 else
1152 bp->duplex = DUPLEX_HALF;
1154 return 0;
1157 static int
1158 bnx2_5706s_linkup(struct bnx2 *bp)
1160 u32 bmcr, local_adv, remote_adv, common;
1162 bp->link_up = 1;
1163 bp->line_speed = SPEED_1000;
1165 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1166 if (bmcr & BMCR_FULLDPLX) {
1167 bp->duplex = DUPLEX_FULL;
1169 else {
1170 bp->duplex = DUPLEX_HALF;
1173 if (!(bmcr & BMCR_ANENABLE)) {
1174 return 0;
1177 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1178 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1180 common = local_adv & remote_adv;
1181 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1183 if (common & ADVERTISE_1000XFULL) {
1184 bp->duplex = DUPLEX_FULL;
1186 else {
1187 bp->duplex = DUPLEX_HALF;
1191 return 0;
1194 static int
1195 bnx2_copper_linkup(struct bnx2 *bp)
1197 u32 bmcr;
1199 bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
1201 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1202 if (bmcr & BMCR_ANENABLE) {
1203 u32 local_adv, remote_adv, common;
1205 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1206 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1208 common = local_adv & (remote_adv >> 2);
1209 if (common & ADVERTISE_1000FULL) {
1210 bp->line_speed = SPEED_1000;
1211 bp->duplex = DUPLEX_FULL;
1213 else if (common & ADVERTISE_1000HALF) {
1214 bp->line_speed = SPEED_1000;
1215 bp->duplex = DUPLEX_HALF;
1217 else {
1218 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1219 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1221 common = local_adv & remote_adv;
1222 if (common & ADVERTISE_100FULL) {
1223 bp->line_speed = SPEED_100;
1224 bp->duplex = DUPLEX_FULL;
1226 else if (common & ADVERTISE_100HALF) {
1227 bp->line_speed = SPEED_100;
1228 bp->duplex = DUPLEX_HALF;
1230 else if (common & ADVERTISE_10FULL) {
1231 bp->line_speed = SPEED_10;
1232 bp->duplex = DUPLEX_FULL;
1234 else if (common & ADVERTISE_10HALF) {
1235 bp->line_speed = SPEED_10;
1236 bp->duplex = DUPLEX_HALF;
1238 else {
1239 bp->line_speed = 0;
1240 bp->link_up = 0;
1244 else {
1245 if (bmcr & BMCR_SPEED100) {
1246 bp->line_speed = SPEED_100;
1248 else {
1249 bp->line_speed = SPEED_10;
1251 if (bmcr & BMCR_FULLDPLX) {
1252 bp->duplex = DUPLEX_FULL;
1254 else {
1255 bp->duplex = DUPLEX_HALF;
1259 if (bp->link_up) {
1260 u32 ext_status;
1262 bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
1263 if (ext_status & EXT_STATUS_MDIX)
1264 bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
1267 return 0;
1270 static void
1271 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1273 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1275 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1276 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1277 val |= 0x02 << 8;
1279 if (bp->flow_ctrl & FLOW_CTRL_TX)
1280 val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1282 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1285 static void
1286 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1288 int i;
1289 u32 cid;
1291 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1292 if (i == 1)
1293 cid = RX_RSS_CID;
1294 bnx2_init_rx_context(bp, cid);
1298 static void
1299 bnx2_set_mac_link(struct bnx2 *bp)
1301 u32 val;
1303 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1304 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1305 (bp->duplex == DUPLEX_HALF)) {
1306 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1309 /* Configure the EMAC mode register. */
1310 val = BNX2_RD(bp, BNX2_EMAC_MODE);
1312 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1313 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1314 BNX2_EMAC_MODE_25G_MODE);
1316 if (bp->link_up) {
1317 switch (bp->line_speed) {
1318 case SPEED_10:
1319 if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1320 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1321 break;
1323 /* fall through */
1324 case SPEED_100:
1325 val |= BNX2_EMAC_MODE_PORT_MII;
1326 break;
1327 case SPEED_2500:
1328 val |= BNX2_EMAC_MODE_25G_MODE;
1329 /* fall through */
1330 case SPEED_1000:
1331 val |= BNX2_EMAC_MODE_PORT_GMII;
1332 break;
1335 else {
1336 val |= BNX2_EMAC_MODE_PORT_GMII;
1339 /* Set the MAC to operate in the appropriate duplex mode. */
1340 if (bp->duplex == DUPLEX_HALF)
1341 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1342 BNX2_WR(bp, BNX2_EMAC_MODE, val);
1344 /* Enable/disable rx PAUSE. */
1345 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1347 if (bp->flow_ctrl & FLOW_CTRL_RX)
1348 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1349 BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1351 /* Enable/disable tx PAUSE. */
1352 val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1353 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1355 if (bp->flow_ctrl & FLOW_CTRL_TX)
1356 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1357 BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1359 /* Acknowledge the interrupt. */
1360 BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1362 bnx2_init_all_rx_contexts(bp);
1365 static void
1366 bnx2_enable_bmsr1(struct bnx2 *bp)
1368 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1369 (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1370 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1371 MII_BNX2_BLK_ADDR_GP_STATUS);
1374 static void
1375 bnx2_disable_bmsr1(struct bnx2 *bp)
1377 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1378 (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1379 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1380 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1383 static int
1384 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1386 u32 up1;
1387 int ret = 1;
1389 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1390 return 0;
1392 if (bp->autoneg & AUTONEG_SPEED)
1393 bp->advertising |= ADVERTISED_2500baseX_Full;
1395 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1396 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1398 bnx2_read_phy(bp, bp->mii_up1, &up1);
1399 if (!(up1 & BCM5708S_UP1_2G5)) {
1400 up1 |= BCM5708S_UP1_2G5;
1401 bnx2_write_phy(bp, bp->mii_up1, up1);
1402 ret = 0;
1405 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1406 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1407 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1409 return ret;
1412 static int
1413 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1415 u32 up1;
1416 int ret = 0;
1418 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1419 return 0;
1421 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1422 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1424 bnx2_read_phy(bp, bp->mii_up1, &up1);
1425 if (up1 & BCM5708S_UP1_2G5) {
1426 up1 &= ~BCM5708S_UP1_2G5;
1427 bnx2_write_phy(bp, bp->mii_up1, up1);
1428 ret = 1;
1431 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1432 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1433 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1435 return ret;
1438 static void
1439 bnx2_enable_forced_2g5(struct bnx2 *bp)
1441 u32 uninitialized_var(bmcr);
1442 int err;
1444 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1445 return;
1447 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1448 u32 val;
1450 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1451 MII_BNX2_BLK_ADDR_SERDES_DIG);
1452 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1453 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1454 val |= MII_BNX2_SD_MISC1_FORCE |
1455 MII_BNX2_SD_MISC1_FORCE_2_5G;
1456 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1459 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1460 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1461 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1463 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1464 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1465 if (!err)
1466 bmcr |= BCM5708S_BMCR_FORCE_2500;
1467 } else {
1468 return;
1471 if (err)
1472 return;
1474 if (bp->autoneg & AUTONEG_SPEED) {
1475 bmcr &= ~BMCR_ANENABLE;
1476 if (bp->req_duplex == DUPLEX_FULL)
1477 bmcr |= BMCR_FULLDPLX;
1479 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1482 static void
1483 bnx2_disable_forced_2g5(struct bnx2 *bp)
1485 u32 uninitialized_var(bmcr);
1486 int err;
1488 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1489 return;
1491 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1492 u32 val;
1494 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1495 MII_BNX2_BLK_ADDR_SERDES_DIG);
1496 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1497 val &= ~MII_BNX2_SD_MISC1_FORCE;
1498 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1501 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1502 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1503 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1505 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1506 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1507 if (!err)
1508 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1509 } else {
1510 return;
1513 if (err)
1514 return;
1516 if (bp->autoneg & AUTONEG_SPEED)
1517 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1518 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1521 static void
1522 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1524 u32 val;
1526 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1527 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1528 if (start)
1529 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1530 else
1531 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1534 static int
1535 bnx2_set_link(struct bnx2 *bp)
1537 u32 bmsr;
1538 u8 link_up;
1540 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1541 bp->link_up = 1;
1542 return 0;
1545 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1546 return 0;
1548 link_up = bp->link_up;
1550 bnx2_enable_bmsr1(bp);
1551 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1552 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1553 bnx2_disable_bmsr1(bp);
1555 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1556 (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1557 u32 val, an_dbg;
1559 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1560 bnx2_5706s_force_link_dn(bp, 0);
1561 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1563 val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1565 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1566 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1567 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1569 if ((val & BNX2_EMAC_STATUS_LINK) &&
1570 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1571 bmsr |= BMSR_LSTATUS;
1572 else
1573 bmsr &= ~BMSR_LSTATUS;
1576 if (bmsr & BMSR_LSTATUS) {
1577 bp->link_up = 1;
1579 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1580 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1581 bnx2_5706s_linkup(bp);
1582 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1583 bnx2_5708s_linkup(bp);
1584 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1585 bnx2_5709s_linkup(bp);
1587 else {
1588 bnx2_copper_linkup(bp);
1590 bnx2_resolve_flow_ctrl(bp);
1592 else {
1593 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1594 (bp->autoneg & AUTONEG_SPEED))
1595 bnx2_disable_forced_2g5(bp);
1597 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1598 u32 bmcr;
1600 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1601 bmcr |= BMCR_ANENABLE;
1602 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1604 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1606 bp->link_up = 0;
1609 if (bp->link_up != link_up) {
1610 bnx2_report_link(bp);
1613 bnx2_set_mac_link(bp);
1615 return 0;
1618 static int
1619 bnx2_reset_phy(struct bnx2 *bp)
1621 int i;
1622 u32 reg;
1624 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1626 #define PHY_RESET_MAX_WAIT 100
1627 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1628 udelay(10);
1630 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1631 if (!(reg & BMCR_RESET)) {
1632 udelay(20);
1633 break;
1636 if (i == PHY_RESET_MAX_WAIT) {
1637 return -EBUSY;
1639 return 0;
1642 static u32
1643 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1645 u32 adv = 0;
1647 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1648 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1650 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1651 adv = ADVERTISE_1000XPAUSE;
1653 else {
1654 adv = ADVERTISE_PAUSE_CAP;
1657 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1658 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1659 adv = ADVERTISE_1000XPSE_ASYM;
1661 else {
1662 adv = ADVERTISE_PAUSE_ASYM;
1665 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1666 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1667 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1669 else {
1670 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1673 return adv;
1676 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1678 static int
1679 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1680 __releases(&bp->phy_lock)
1681 __acquires(&bp->phy_lock)
1683 u32 speed_arg = 0, pause_adv;
1685 pause_adv = bnx2_phy_get_pause_adv(bp);
1687 if (bp->autoneg & AUTONEG_SPEED) {
1688 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1689 if (bp->advertising & ADVERTISED_10baseT_Half)
1690 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1691 if (bp->advertising & ADVERTISED_10baseT_Full)
1692 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1693 if (bp->advertising & ADVERTISED_100baseT_Half)
1694 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1695 if (bp->advertising & ADVERTISED_100baseT_Full)
1696 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1697 if (bp->advertising & ADVERTISED_1000baseT_Full)
1698 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1699 if (bp->advertising & ADVERTISED_2500baseX_Full)
1700 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1701 } else {
1702 if (bp->req_line_speed == SPEED_2500)
1703 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1704 else if (bp->req_line_speed == SPEED_1000)
1705 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1706 else if (bp->req_line_speed == SPEED_100) {
1707 if (bp->req_duplex == DUPLEX_FULL)
1708 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1709 else
1710 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1711 } else if (bp->req_line_speed == SPEED_10) {
1712 if (bp->req_duplex == DUPLEX_FULL)
1713 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1714 else
1715 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1719 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1720 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1721 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1722 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1724 if (port == PORT_TP)
1725 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1726 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1728 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1730 spin_unlock_bh(&bp->phy_lock);
1731 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1732 spin_lock_bh(&bp->phy_lock);
1734 return 0;
1737 static int
1738 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1739 __releases(&bp->phy_lock)
1740 __acquires(&bp->phy_lock)
1742 u32 adv, bmcr;
1743 u32 new_adv = 0;
1745 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1746 return bnx2_setup_remote_phy(bp, port);
1748 if (!(bp->autoneg & AUTONEG_SPEED)) {
1749 u32 new_bmcr;
1750 int force_link_down = 0;
1752 if (bp->req_line_speed == SPEED_2500) {
1753 if (!bnx2_test_and_enable_2g5(bp))
1754 force_link_down = 1;
1755 } else if (bp->req_line_speed == SPEED_1000) {
1756 if (bnx2_test_and_disable_2g5(bp))
1757 force_link_down = 1;
1759 bnx2_read_phy(bp, bp->mii_adv, &adv);
1760 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1762 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1763 new_bmcr = bmcr & ~BMCR_ANENABLE;
1764 new_bmcr |= BMCR_SPEED1000;
1766 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1767 if (bp->req_line_speed == SPEED_2500)
1768 bnx2_enable_forced_2g5(bp);
1769 else if (bp->req_line_speed == SPEED_1000) {
1770 bnx2_disable_forced_2g5(bp);
1771 new_bmcr &= ~0x2000;
1774 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1775 if (bp->req_line_speed == SPEED_2500)
1776 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1777 else
1778 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1781 if (bp->req_duplex == DUPLEX_FULL) {
1782 adv |= ADVERTISE_1000XFULL;
1783 new_bmcr |= BMCR_FULLDPLX;
1785 else {
1786 adv |= ADVERTISE_1000XHALF;
1787 new_bmcr &= ~BMCR_FULLDPLX;
1789 if ((new_bmcr != bmcr) || (force_link_down)) {
1790 /* Force a link down visible on the other side */
1791 if (bp->link_up) {
1792 bnx2_write_phy(bp, bp->mii_adv, adv &
1793 ~(ADVERTISE_1000XFULL |
1794 ADVERTISE_1000XHALF));
1795 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1796 BMCR_ANRESTART | BMCR_ANENABLE);
1798 bp->link_up = 0;
1799 netif_carrier_off(bp->dev);
1800 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1801 bnx2_report_link(bp);
1803 bnx2_write_phy(bp, bp->mii_adv, adv);
1804 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1805 } else {
1806 bnx2_resolve_flow_ctrl(bp);
1807 bnx2_set_mac_link(bp);
1809 return 0;
1812 bnx2_test_and_enable_2g5(bp);
1814 if (bp->advertising & ADVERTISED_1000baseT_Full)
1815 new_adv |= ADVERTISE_1000XFULL;
1817 new_adv |= bnx2_phy_get_pause_adv(bp);
1819 bnx2_read_phy(bp, bp->mii_adv, &adv);
1820 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1822 bp->serdes_an_pending = 0;
1823 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1824 /* Force a link down visible on the other side */
1825 if (bp->link_up) {
1826 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1827 spin_unlock_bh(&bp->phy_lock);
1828 msleep(20);
1829 spin_lock_bh(&bp->phy_lock);
1832 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1833 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1834 BMCR_ANENABLE);
1835 /* Speed up link-up time when the link partner
1836 * does not autonegotiate which is very common
1837 * in blade servers. Some blade servers use
1838 * IPMI for kerboard input and it's important
1839 * to minimize link disruptions. Autoneg. involves
1840 * exchanging base pages plus 3 next pages and
1841 * normally completes in about 120 msec.
1843 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1844 bp->serdes_an_pending = 1;
1845 mod_timer(&bp->timer, jiffies + bp->current_interval);
1846 } else {
1847 bnx2_resolve_flow_ctrl(bp);
1848 bnx2_set_mac_link(bp);
1851 return 0;
1854 #define ETHTOOL_ALL_FIBRE_SPEED \
1855 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1856 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1857 (ADVERTISED_1000baseT_Full)
1859 #define ETHTOOL_ALL_COPPER_SPEED \
1860 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1861 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1862 ADVERTISED_1000baseT_Full)
1864 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1865 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1867 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1869 static void
1870 bnx2_set_default_remote_link(struct bnx2 *bp)
1872 u32 link;
1874 if (bp->phy_port == PORT_TP)
1875 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1876 else
1877 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1879 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1880 bp->req_line_speed = 0;
1881 bp->autoneg |= AUTONEG_SPEED;
1882 bp->advertising = ADVERTISED_Autoneg;
1883 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1884 bp->advertising |= ADVERTISED_10baseT_Half;
1885 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1886 bp->advertising |= ADVERTISED_10baseT_Full;
1887 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1888 bp->advertising |= ADVERTISED_100baseT_Half;
1889 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1890 bp->advertising |= ADVERTISED_100baseT_Full;
1891 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1892 bp->advertising |= ADVERTISED_1000baseT_Full;
1893 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1894 bp->advertising |= ADVERTISED_2500baseX_Full;
1895 } else {
1896 bp->autoneg = 0;
1897 bp->advertising = 0;
1898 bp->req_duplex = DUPLEX_FULL;
1899 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1900 bp->req_line_speed = SPEED_10;
1901 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1902 bp->req_duplex = DUPLEX_HALF;
1904 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1905 bp->req_line_speed = SPEED_100;
1906 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1907 bp->req_duplex = DUPLEX_HALF;
1909 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1910 bp->req_line_speed = SPEED_1000;
1911 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1912 bp->req_line_speed = SPEED_2500;
1916 static void
1917 bnx2_set_default_link(struct bnx2 *bp)
1919 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1920 bnx2_set_default_remote_link(bp);
1921 return;
1924 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1925 bp->req_line_speed = 0;
1926 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1927 u32 reg;
1929 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1931 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1932 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1933 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1934 bp->autoneg = 0;
1935 bp->req_line_speed = bp->line_speed = SPEED_1000;
1936 bp->req_duplex = DUPLEX_FULL;
1938 } else
1939 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1942 static void
1943 bnx2_send_heart_beat(struct bnx2 *bp)
1945 u32 msg;
1946 u32 addr;
1948 spin_lock(&bp->indirect_lock);
1949 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1950 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1951 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1952 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1953 spin_unlock(&bp->indirect_lock);
1956 static void
1957 bnx2_remote_phy_event(struct bnx2 *bp)
1959 u32 msg;
1960 u8 link_up = bp->link_up;
1961 u8 old_port;
1963 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1965 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1966 bnx2_send_heart_beat(bp);
1968 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1970 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1971 bp->link_up = 0;
1972 else {
1973 u32 speed;
1975 bp->link_up = 1;
1976 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1977 bp->duplex = DUPLEX_FULL;
1978 switch (speed) {
1979 case BNX2_LINK_STATUS_10HALF:
1980 bp->duplex = DUPLEX_HALF;
1981 /* fall through */
1982 case BNX2_LINK_STATUS_10FULL:
1983 bp->line_speed = SPEED_10;
1984 break;
1985 case BNX2_LINK_STATUS_100HALF:
1986 bp->duplex = DUPLEX_HALF;
1987 /* fall through */
1988 case BNX2_LINK_STATUS_100BASE_T4:
1989 case BNX2_LINK_STATUS_100FULL:
1990 bp->line_speed = SPEED_100;
1991 break;
1992 case BNX2_LINK_STATUS_1000HALF:
1993 bp->duplex = DUPLEX_HALF;
1994 /* fall through */
1995 case BNX2_LINK_STATUS_1000FULL:
1996 bp->line_speed = SPEED_1000;
1997 break;
1998 case BNX2_LINK_STATUS_2500HALF:
1999 bp->duplex = DUPLEX_HALF;
2000 /* fall through */
2001 case BNX2_LINK_STATUS_2500FULL:
2002 bp->line_speed = SPEED_2500;
2003 break;
2004 default:
2005 bp->line_speed = 0;
2006 break;
2009 bp->flow_ctrl = 0;
2010 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2011 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2012 if (bp->duplex == DUPLEX_FULL)
2013 bp->flow_ctrl = bp->req_flow_ctrl;
2014 } else {
2015 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2016 bp->flow_ctrl |= FLOW_CTRL_TX;
2017 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2018 bp->flow_ctrl |= FLOW_CTRL_RX;
2021 old_port = bp->phy_port;
2022 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2023 bp->phy_port = PORT_FIBRE;
2024 else
2025 bp->phy_port = PORT_TP;
2027 if (old_port != bp->phy_port)
2028 bnx2_set_default_link(bp);
2031 if (bp->link_up != link_up)
2032 bnx2_report_link(bp);
2034 bnx2_set_mac_link(bp);
2037 static int
2038 bnx2_set_remote_link(struct bnx2 *bp)
2040 u32 evt_code;
2042 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2043 switch (evt_code) {
2044 case BNX2_FW_EVT_CODE_LINK_EVENT:
2045 bnx2_remote_phy_event(bp);
2046 break;
2047 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2048 default:
2049 bnx2_send_heart_beat(bp);
2050 break;
2052 return 0;
2055 static int
2056 bnx2_setup_copper_phy(struct bnx2 *bp)
2057 __releases(&bp->phy_lock)
2058 __acquires(&bp->phy_lock)
2060 u32 bmcr, adv_reg, new_adv = 0;
2061 u32 new_bmcr;
2063 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2065 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2066 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2067 ADVERTISE_PAUSE_ASYM);
2069 new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
2071 if (bp->autoneg & AUTONEG_SPEED) {
2072 u32 adv1000_reg;
2073 u32 new_adv1000 = 0;
2075 new_adv |= bnx2_phy_get_pause_adv(bp);
2077 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2078 adv1000_reg &= PHY_ALL_1000_SPEED;
2080 new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2081 if ((adv1000_reg != new_adv1000) ||
2082 (adv_reg != new_adv) ||
2083 ((bmcr & BMCR_ANENABLE) == 0)) {
2085 bnx2_write_phy(bp, bp->mii_adv, new_adv);
2086 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2087 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2088 BMCR_ANENABLE);
2090 else if (bp->link_up) {
2091 /* Flow ctrl may have changed from auto to forced */
2092 /* or vice-versa. */
2094 bnx2_resolve_flow_ctrl(bp);
2095 bnx2_set_mac_link(bp);
2097 return 0;
2100 /* advertise nothing when forcing speed */
2101 if (adv_reg != new_adv)
2102 bnx2_write_phy(bp, bp->mii_adv, new_adv);
2104 new_bmcr = 0;
2105 if (bp->req_line_speed == SPEED_100) {
2106 new_bmcr |= BMCR_SPEED100;
2108 if (bp->req_duplex == DUPLEX_FULL) {
2109 new_bmcr |= BMCR_FULLDPLX;
2111 if (new_bmcr != bmcr) {
2112 u32 bmsr;
2114 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2115 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2117 if (bmsr & BMSR_LSTATUS) {
2118 /* Force link down */
2119 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2120 spin_unlock_bh(&bp->phy_lock);
2121 msleep(50);
2122 spin_lock_bh(&bp->phy_lock);
2124 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2125 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2128 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2130 /* Normally, the new speed is setup after the link has
2131 * gone down and up again. In some cases, link will not go
2132 * down so we need to set up the new speed here.
2134 if (bmsr & BMSR_LSTATUS) {
2135 bp->line_speed = bp->req_line_speed;
2136 bp->duplex = bp->req_duplex;
2137 bnx2_resolve_flow_ctrl(bp);
2138 bnx2_set_mac_link(bp);
2140 } else {
2141 bnx2_resolve_flow_ctrl(bp);
2142 bnx2_set_mac_link(bp);
2144 return 0;
2147 static int
2148 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2149 __releases(&bp->phy_lock)
2150 __acquires(&bp->phy_lock)
2152 if (bp->loopback == MAC_LOOPBACK)
2153 return 0;
2155 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2156 return bnx2_setup_serdes_phy(bp, port);
2158 else {
2159 return bnx2_setup_copper_phy(bp);
2163 static int
2164 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2166 u32 val;
2168 bp->mii_bmcr = MII_BMCR + 0x10;
2169 bp->mii_bmsr = MII_BMSR + 0x10;
2170 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2171 bp->mii_adv = MII_ADVERTISE + 0x10;
2172 bp->mii_lpa = MII_LPA + 0x10;
2173 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2175 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2176 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2178 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2179 if (reset_phy)
2180 bnx2_reset_phy(bp);
2182 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2184 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2185 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2186 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2187 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2189 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2190 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2191 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2192 val |= BCM5708S_UP1_2G5;
2193 else
2194 val &= ~BCM5708S_UP1_2G5;
2195 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2197 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2198 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2199 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2200 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2202 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2204 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2205 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2206 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2208 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2210 return 0;
2213 static int
2214 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2216 u32 val;
2218 if (reset_phy)
2219 bnx2_reset_phy(bp);
2221 bp->mii_up1 = BCM5708S_UP1;
2223 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2224 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2225 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2227 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2228 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2229 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2231 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2232 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2233 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2235 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2236 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2237 val |= BCM5708S_UP1_2G5;
2238 bnx2_write_phy(bp, BCM5708S_UP1, val);
2241 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2242 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2243 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2244 /* increase tx signal amplitude */
2245 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2246 BCM5708S_BLK_ADDR_TX_MISC);
2247 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2248 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2249 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2250 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2253 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2254 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2256 if (val) {
2257 u32 is_backplane;
2259 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2260 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2261 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2262 BCM5708S_BLK_ADDR_TX_MISC);
2263 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2264 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2265 BCM5708S_BLK_ADDR_DIG);
2268 return 0;
2271 static int
2272 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2274 if (reset_phy)
2275 bnx2_reset_phy(bp);
2277 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2279 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2280 BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2282 if (bp->dev->mtu > 1500) {
2283 u32 val;
2285 /* Set extended packet length bit */
2286 bnx2_write_phy(bp, 0x18, 0x7);
2287 bnx2_read_phy(bp, 0x18, &val);
2288 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2290 bnx2_write_phy(bp, 0x1c, 0x6c00);
2291 bnx2_read_phy(bp, 0x1c, &val);
2292 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2294 else {
2295 u32 val;
2297 bnx2_write_phy(bp, 0x18, 0x7);
2298 bnx2_read_phy(bp, 0x18, &val);
2299 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2301 bnx2_write_phy(bp, 0x1c, 0x6c00);
2302 bnx2_read_phy(bp, 0x1c, &val);
2303 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2306 return 0;
2309 static int
2310 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2312 u32 val;
2314 if (reset_phy)
2315 bnx2_reset_phy(bp);
2317 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2318 bnx2_write_phy(bp, 0x18, 0x0c00);
2319 bnx2_write_phy(bp, 0x17, 0x000a);
2320 bnx2_write_phy(bp, 0x15, 0x310b);
2321 bnx2_write_phy(bp, 0x17, 0x201f);
2322 bnx2_write_phy(bp, 0x15, 0x9506);
2323 bnx2_write_phy(bp, 0x17, 0x401f);
2324 bnx2_write_phy(bp, 0x15, 0x14e2);
2325 bnx2_write_phy(bp, 0x18, 0x0400);
2328 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2329 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2330 MII_BNX2_DSP_EXPAND_REG | 0x8);
2331 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2332 val &= ~(1 << 8);
2333 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2336 if (bp->dev->mtu > 1500) {
2337 /* Set extended packet length bit */
2338 bnx2_write_phy(bp, 0x18, 0x7);
2339 bnx2_read_phy(bp, 0x18, &val);
2340 bnx2_write_phy(bp, 0x18, val | 0x4000);
2342 bnx2_read_phy(bp, 0x10, &val);
2343 bnx2_write_phy(bp, 0x10, val | 0x1);
2345 else {
2346 bnx2_write_phy(bp, 0x18, 0x7);
2347 bnx2_read_phy(bp, 0x18, &val);
2348 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2350 bnx2_read_phy(bp, 0x10, &val);
2351 bnx2_write_phy(bp, 0x10, val & ~0x1);
2354 /* ethernet@wirespeed */
2355 bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
2356 bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
2357 val |= AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
2359 /* auto-mdix */
2360 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2361 val |= AUX_CTL_MISC_CTL_AUTOMDIX;
2363 bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
2364 return 0;
2368 static int
2369 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2370 __releases(&bp->phy_lock)
2371 __acquires(&bp->phy_lock)
2373 u32 val;
2374 int rc = 0;
2376 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2377 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2379 bp->mii_bmcr = MII_BMCR;
2380 bp->mii_bmsr = MII_BMSR;
2381 bp->mii_bmsr1 = MII_BMSR;
2382 bp->mii_adv = MII_ADVERTISE;
2383 bp->mii_lpa = MII_LPA;
2385 BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2387 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2388 goto setup_phy;
2390 bnx2_read_phy(bp, MII_PHYSID1, &val);
2391 bp->phy_id = val << 16;
2392 bnx2_read_phy(bp, MII_PHYSID2, &val);
2393 bp->phy_id |= val & 0xffff;
2395 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2396 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2397 rc = bnx2_init_5706s_phy(bp, reset_phy);
2398 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2399 rc = bnx2_init_5708s_phy(bp, reset_phy);
2400 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2401 rc = bnx2_init_5709s_phy(bp, reset_phy);
2403 else {
2404 rc = bnx2_init_copper_phy(bp, reset_phy);
2407 setup_phy:
2408 if (!rc)
2409 rc = bnx2_setup_phy(bp, bp->phy_port);
2411 return rc;
2414 static int
2415 bnx2_set_mac_loopback(struct bnx2 *bp)
2417 u32 mac_mode;
2419 mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2420 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2421 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2422 BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2423 bp->link_up = 1;
2424 return 0;
2427 static int bnx2_test_link(struct bnx2 *);
2429 static int
2430 bnx2_set_phy_loopback(struct bnx2 *bp)
2432 u32 mac_mode;
2433 int rc, i;
2435 spin_lock_bh(&bp->phy_lock);
2436 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2437 BMCR_SPEED1000);
2438 spin_unlock_bh(&bp->phy_lock);
2439 if (rc)
2440 return rc;
2442 for (i = 0; i < 10; i++) {
2443 if (bnx2_test_link(bp) == 0)
2444 break;
2445 msleep(100);
2448 mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2449 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2450 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2451 BNX2_EMAC_MODE_25G_MODE);
2453 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2454 BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2455 bp->link_up = 1;
2456 return 0;
2459 static void
2460 bnx2_dump_mcp_state(struct bnx2 *bp)
2462 struct net_device *dev = bp->dev;
2463 u32 mcp_p0, mcp_p1;
2465 netdev_err(dev, "<--- start MCP states dump --->\n");
2466 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2467 mcp_p0 = BNX2_MCP_STATE_P0;
2468 mcp_p1 = BNX2_MCP_STATE_P1;
2469 } else {
2470 mcp_p0 = BNX2_MCP_STATE_P0_5708;
2471 mcp_p1 = BNX2_MCP_STATE_P1_5708;
2473 netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2474 bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2475 netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2476 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2477 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2478 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2479 netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2480 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2481 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2482 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2483 netdev_err(dev, "DEBUG: shmem states:\n");
2484 netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2485 bnx2_shmem_rd(bp, BNX2_DRV_MB),
2486 bnx2_shmem_rd(bp, BNX2_FW_MB),
2487 bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2488 pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2489 netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2490 bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2491 bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2492 pr_cont(" condition[%08x]\n",
2493 bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2494 DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2495 DP_SHMEM_LINE(bp, 0x3cc);
2496 DP_SHMEM_LINE(bp, 0x3dc);
2497 DP_SHMEM_LINE(bp, 0x3ec);
2498 netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2499 netdev_err(dev, "<--- end MCP states dump --->\n");
2502 static int
2503 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2505 int i;
2506 u32 val;
2508 bp->fw_wr_seq++;
2509 msg_data |= bp->fw_wr_seq;
2510 bp->fw_last_msg = msg_data;
2512 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2514 if (!ack)
2515 return 0;
2517 /* wait for an acknowledgement. */
2518 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2519 msleep(10);
2521 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2523 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2524 break;
2526 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2527 return 0;
2529 /* If we timed out, inform the firmware that this is the case. */
2530 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2531 msg_data &= ~BNX2_DRV_MSG_CODE;
2532 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2534 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2535 if (!silent) {
2536 pr_err("fw sync timeout, reset code = %x\n", msg_data);
2537 bnx2_dump_mcp_state(bp);
2540 return -EBUSY;
2543 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2544 return -EIO;
2546 return 0;
2549 static int
2550 bnx2_init_5709_context(struct bnx2 *bp)
2552 int i, ret = 0;
2553 u32 val;
2555 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2556 val |= (BNX2_PAGE_BITS - 8) << 16;
2557 BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2558 for (i = 0; i < 10; i++) {
2559 val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2560 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2561 break;
2562 udelay(2);
2564 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2565 return -EBUSY;
2567 for (i = 0; i < bp->ctx_pages; i++) {
2568 int j;
2570 if (bp->ctx_blk[i])
2571 memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2572 else
2573 return -ENOMEM;
2575 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2576 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2577 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2578 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2579 (u64) bp->ctx_blk_mapping[i] >> 32);
2580 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2581 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2582 for (j = 0; j < 10; j++) {
2584 val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2585 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2586 break;
2587 udelay(5);
2589 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2590 ret = -EBUSY;
2591 break;
2594 return ret;
2597 static void
2598 bnx2_init_context(struct bnx2 *bp)
2600 u32 vcid;
2602 vcid = 96;
2603 while (vcid) {
2604 u32 vcid_addr, pcid_addr, offset;
2605 int i;
2607 vcid--;
2609 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2610 u32 new_vcid;
2612 vcid_addr = GET_PCID_ADDR(vcid);
2613 if (vcid & 0x8) {
2614 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2616 else {
2617 new_vcid = vcid;
2619 pcid_addr = GET_PCID_ADDR(new_vcid);
2621 else {
2622 vcid_addr = GET_CID_ADDR(vcid);
2623 pcid_addr = vcid_addr;
2626 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2627 vcid_addr += (i << PHY_CTX_SHIFT);
2628 pcid_addr += (i << PHY_CTX_SHIFT);
2630 BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2631 BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2633 /* Zero out the context. */
2634 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2635 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2640 static int
2641 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2643 u16 *good_mbuf;
2644 u32 good_mbuf_cnt;
2645 u32 val;
2647 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2648 if (good_mbuf == NULL)
2649 return -ENOMEM;
2651 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2652 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2654 good_mbuf_cnt = 0;
2656 /* Allocate a bunch of mbufs and save the good ones in an array. */
2657 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2658 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2659 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2660 BNX2_RBUF_COMMAND_ALLOC_REQ);
2662 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2664 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2666 /* The addresses with Bit 9 set are bad memory blocks. */
2667 if (!(val & (1 << 9))) {
2668 good_mbuf[good_mbuf_cnt] = (u16) val;
2669 good_mbuf_cnt++;
2672 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2675 /* Free the good ones back to the mbuf pool thus discarding
2676 * all the bad ones. */
2677 while (good_mbuf_cnt) {
2678 good_mbuf_cnt--;
2680 val = good_mbuf[good_mbuf_cnt];
2681 val = (val << 9) | val | 1;
2683 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2685 kfree(good_mbuf);
2686 return 0;
2689 static void
2690 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2692 u32 val;
2694 val = (mac_addr[0] << 8) | mac_addr[1];
2696 BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2698 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2699 (mac_addr[4] << 8) | mac_addr[5];
2701 BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2704 static inline int
2705 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2707 dma_addr_t mapping;
2708 struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2709 struct bnx2_rx_bd *rxbd =
2710 &rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2711 struct page *page = alloc_page(gfp);
2713 if (!page)
2714 return -ENOMEM;
2715 mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2716 PCI_DMA_FROMDEVICE);
2717 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2718 __free_page(page);
2719 return -EIO;
2722 rx_pg->page = page;
2723 dma_unmap_addr_set(rx_pg, mapping, mapping);
2724 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2725 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2726 return 0;
2729 static void
2730 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2732 struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2733 struct page *page = rx_pg->page;
2735 if (!page)
2736 return;
2738 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2739 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2741 __free_page(page);
2742 rx_pg->page = NULL;
2745 static inline int
2746 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2748 u8 *data;
2749 struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2750 dma_addr_t mapping;
2751 struct bnx2_rx_bd *rxbd =
2752 &rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2754 data = kmalloc(bp->rx_buf_size, gfp);
2755 if (!data)
2756 return -ENOMEM;
2758 mapping = dma_map_single(&bp->pdev->dev,
2759 get_l2_fhdr(data),
2760 bp->rx_buf_use_size,
2761 PCI_DMA_FROMDEVICE);
2762 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2763 kfree(data);
2764 return -EIO;
2767 rx_buf->data = data;
2768 dma_unmap_addr_set(rx_buf, mapping, mapping);
2770 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2771 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2773 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2775 return 0;
2778 static int
2779 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2781 struct status_block *sblk = bnapi->status_blk.msi;
2782 u32 new_link_state, old_link_state;
2783 int is_set = 1;
2785 new_link_state = sblk->status_attn_bits & event;
2786 old_link_state = sblk->status_attn_bits_ack & event;
2787 if (new_link_state != old_link_state) {
2788 if (new_link_state)
2789 BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2790 else
2791 BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2792 } else
2793 is_set = 0;
2795 return is_set;
2798 static void
2799 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2801 spin_lock(&bp->phy_lock);
2803 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2804 bnx2_set_link(bp);
2805 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2806 bnx2_set_remote_link(bp);
2808 spin_unlock(&bp->phy_lock);
2812 static inline u16
2813 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2815 u16 cons;
2817 /* Tell compiler that status block fields can change. */
2818 barrier();
2819 cons = *bnapi->hw_tx_cons_ptr;
2820 barrier();
2821 if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2822 cons++;
2823 return cons;
2826 static int
2827 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2829 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2830 u16 hw_cons, sw_cons, sw_ring_cons;
2831 int tx_pkt = 0, index;
2832 unsigned int tx_bytes = 0;
2833 struct netdev_queue *txq;
2835 index = (bnapi - bp->bnx2_napi);
2836 txq = netdev_get_tx_queue(bp->dev, index);
2838 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2839 sw_cons = txr->tx_cons;
2841 while (sw_cons != hw_cons) {
2842 struct bnx2_sw_tx_bd *tx_buf;
2843 struct sk_buff *skb;
2844 int i, last;
2846 sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2848 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2849 skb = tx_buf->skb;
2851 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2852 prefetch(&skb->end);
2854 /* partial BD completions possible with TSO packets */
2855 if (tx_buf->is_gso) {
2856 u16 last_idx, last_ring_idx;
2858 last_idx = sw_cons + tx_buf->nr_frags + 1;
2859 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2860 if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2861 last_idx++;
2863 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2864 break;
2868 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2869 skb_headlen(skb), PCI_DMA_TODEVICE);
2871 tx_buf->skb = NULL;
2872 last = tx_buf->nr_frags;
2874 for (i = 0; i < last; i++) {
2875 struct bnx2_sw_tx_bd *tx_buf;
2877 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2879 tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2880 dma_unmap_page(&bp->pdev->dev,
2881 dma_unmap_addr(tx_buf, mapping),
2882 skb_frag_size(&skb_shinfo(skb)->frags[i]),
2883 PCI_DMA_TODEVICE);
2886 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2888 tx_bytes += skb->len;
2889 dev_kfree_skb_any(skb);
2890 tx_pkt++;
2891 if (tx_pkt == budget)
2892 break;
2894 if (hw_cons == sw_cons)
2895 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2898 netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2899 txr->hw_tx_cons = hw_cons;
2900 txr->tx_cons = sw_cons;
2902 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2903 * before checking for netif_tx_queue_stopped(). Without the
2904 * memory barrier, there is a small possibility that bnx2_start_xmit()
2905 * will miss it and cause the queue to be stopped forever.
2907 smp_mb();
2909 if (unlikely(netif_tx_queue_stopped(txq)) &&
2910 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2911 __netif_tx_lock(txq, smp_processor_id());
2912 if ((netif_tx_queue_stopped(txq)) &&
2913 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2914 netif_tx_wake_queue(txq);
2915 __netif_tx_unlock(txq);
2918 return tx_pkt;
2921 static void
2922 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2923 struct sk_buff *skb, int count)
2925 struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2926 struct bnx2_rx_bd *cons_bd, *prod_bd;
2927 int i;
2928 u16 hw_prod, prod;
2929 u16 cons = rxr->rx_pg_cons;
2931 cons_rx_pg = &rxr->rx_pg_ring[cons];
2933 /* The caller was unable to allocate a new page to replace the
2934 * last one in the frags array, so we need to recycle that page
2935 * and then free the skb.
2937 if (skb) {
2938 struct page *page;
2939 struct skb_shared_info *shinfo;
2941 shinfo = skb_shinfo(skb);
2942 shinfo->nr_frags--;
2943 page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2944 __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2946 cons_rx_pg->page = page;
2947 dev_kfree_skb(skb);
2950 hw_prod = rxr->rx_pg_prod;
2952 for (i = 0; i < count; i++) {
2953 prod = BNX2_RX_PG_RING_IDX(hw_prod);
2955 prod_rx_pg = &rxr->rx_pg_ring[prod];
2956 cons_rx_pg = &rxr->rx_pg_ring[cons];
2957 cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2958 [BNX2_RX_IDX(cons)];
2959 prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2960 [BNX2_RX_IDX(prod)];
2962 if (prod != cons) {
2963 prod_rx_pg->page = cons_rx_pg->page;
2964 cons_rx_pg->page = NULL;
2965 dma_unmap_addr_set(prod_rx_pg, mapping,
2966 dma_unmap_addr(cons_rx_pg, mapping));
2968 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2969 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2972 cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2973 hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2975 rxr->rx_pg_prod = hw_prod;
2976 rxr->rx_pg_cons = cons;
2979 static inline void
2980 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2981 u8 *data, u16 cons, u16 prod)
2983 struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
2984 struct bnx2_rx_bd *cons_bd, *prod_bd;
2986 cons_rx_buf = &rxr->rx_buf_ring[cons];
2987 prod_rx_buf = &rxr->rx_buf_ring[prod];
2989 dma_sync_single_for_device(&bp->pdev->dev,
2990 dma_unmap_addr(cons_rx_buf, mapping),
2991 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2993 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2995 prod_rx_buf->data = data;
2997 if (cons == prod)
2998 return;
3000 dma_unmap_addr_set(prod_rx_buf, mapping,
3001 dma_unmap_addr(cons_rx_buf, mapping));
3003 cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
3004 prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
3005 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
3006 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
3009 static struct sk_buff *
3010 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
3011 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3012 u32 ring_idx)
3014 int err;
3015 u16 prod = ring_idx & 0xffff;
3016 struct sk_buff *skb;
3018 err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3019 if (unlikely(err)) {
3020 bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3021 error:
3022 if (hdr_len) {
3023 unsigned int raw_len = len + 4;
3024 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3026 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3028 return NULL;
3031 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3032 PCI_DMA_FROMDEVICE);
3033 skb = build_skb(data, 0);
3034 if (!skb) {
3035 kfree(data);
3036 goto error;
3038 skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3039 if (hdr_len == 0) {
3040 skb_put(skb, len);
3041 return skb;
3042 } else {
3043 unsigned int i, frag_len, frag_size, pages;
3044 struct bnx2_sw_pg *rx_pg;
3045 u16 pg_cons = rxr->rx_pg_cons;
3046 u16 pg_prod = rxr->rx_pg_prod;
3048 frag_size = len + 4 - hdr_len;
3049 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3050 skb_put(skb, hdr_len);
3052 for (i = 0; i < pages; i++) {
3053 dma_addr_t mapping_old;
3055 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3056 if (unlikely(frag_len <= 4)) {
3057 unsigned int tail = 4 - frag_len;
3059 rxr->rx_pg_cons = pg_cons;
3060 rxr->rx_pg_prod = pg_prod;
3061 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3062 pages - i);
3063 skb->len -= tail;
3064 if (i == 0) {
3065 skb->tail -= tail;
3066 } else {
3067 skb_frag_t *frag =
3068 &skb_shinfo(skb)->frags[i - 1];
3069 skb_frag_size_sub(frag, tail);
3070 skb->data_len -= tail;
3072 return skb;
3074 rx_pg = &rxr->rx_pg_ring[pg_cons];
3076 /* Don't unmap yet. If we're unable to allocate a new
3077 * page, we need to recycle the page and the DMA addr.
3079 mapping_old = dma_unmap_addr(rx_pg, mapping);
3080 if (i == pages - 1)
3081 frag_len -= 4;
3083 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3084 rx_pg->page = NULL;
3086 err = bnx2_alloc_rx_page(bp, rxr,
3087 BNX2_RX_PG_RING_IDX(pg_prod),
3088 GFP_ATOMIC);
3089 if (unlikely(err)) {
3090 rxr->rx_pg_cons = pg_cons;
3091 rxr->rx_pg_prod = pg_prod;
3092 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3093 pages - i);
3094 return NULL;
3097 dma_unmap_page(&bp->pdev->dev, mapping_old,
3098 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3100 frag_size -= frag_len;
3101 skb->data_len += frag_len;
3102 skb->truesize += PAGE_SIZE;
3103 skb->len += frag_len;
3105 pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3106 pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3108 rxr->rx_pg_prod = pg_prod;
3109 rxr->rx_pg_cons = pg_cons;
3111 return skb;
3114 static inline u16
3115 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3117 u16 cons;
3119 /* Tell compiler that status block fields can change. */
3120 barrier();
3121 cons = *bnapi->hw_rx_cons_ptr;
3122 barrier();
3123 if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3124 cons++;
3125 return cons;
3128 static int
3129 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3131 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3132 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3133 struct l2_fhdr *rx_hdr;
3134 int rx_pkt = 0, pg_ring_used = 0;
3136 if (budget <= 0)
3137 return rx_pkt;
3139 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3140 sw_cons = rxr->rx_cons;
3141 sw_prod = rxr->rx_prod;
3143 /* Memory barrier necessary as speculative reads of the rx
3144 * buffer can be ahead of the index in the status block
3146 rmb();
3147 while (sw_cons != hw_cons) {
3148 unsigned int len, hdr_len;
3149 u32 status;
3150 struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3151 struct sk_buff *skb;
3152 dma_addr_t dma_addr;
3153 u8 *data;
3154 u16 next_ring_idx;
3156 sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3157 sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3159 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3160 data = rx_buf->data;
3161 rx_buf->data = NULL;
3163 rx_hdr = get_l2_fhdr(data);
3164 prefetch(rx_hdr);
3166 dma_addr = dma_unmap_addr(rx_buf, mapping);
3168 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3169 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3170 PCI_DMA_FROMDEVICE);
3172 next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3173 next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3174 prefetch(get_l2_fhdr(next_rx_buf->data));
3176 len = rx_hdr->l2_fhdr_pkt_len;
3177 status = rx_hdr->l2_fhdr_status;
3179 hdr_len = 0;
3180 if (status & L2_FHDR_STATUS_SPLIT) {
3181 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3182 pg_ring_used = 1;
3183 } else if (len > bp->rx_jumbo_thresh) {
3184 hdr_len = bp->rx_jumbo_thresh;
3185 pg_ring_used = 1;
3188 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3189 L2_FHDR_ERRORS_PHY_DECODE |
3190 L2_FHDR_ERRORS_ALIGNMENT |
3191 L2_FHDR_ERRORS_TOO_SHORT |
3192 L2_FHDR_ERRORS_GIANT_FRAME))) {
3194 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3195 sw_ring_prod);
3196 if (pg_ring_used) {
3197 int pages;
3199 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3201 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3203 goto next_rx;
3206 len -= 4;
3208 if (len <= bp->rx_copy_thresh) {
3209 skb = netdev_alloc_skb(bp->dev, len + 6);
3210 if (skb == NULL) {
3211 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3212 sw_ring_prod);
3213 goto next_rx;
3216 /* aligned copy */
3217 memcpy(skb->data,
3218 (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3219 len + 6);
3220 skb_reserve(skb, 6);
3221 skb_put(skb, len);
3223 bnx2_reuse_rx_data(bp, rxr, data,
3224 sw_ring_cons, sw_ring_prod);
3226 } else {
3227 skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3228 (sw_ring_cons << 16) | sw_ring_prod);
3229 if (!skb)
3230 goto next_rx;
3232 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3233 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3234 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3236 skb->protocol = eth_type_trans(skb, bp->dev);
3238 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3239 (ntohs(skb->protocol) != 0x8100)) {
3241 dev_kfree_skb(skb);
3242 goto next_rx;
3246 skb_checksum_none_assert(skb);
3247 if ((bp->dev->features & NETIF_F_RXCSUM) &&
3248 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3249 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3251 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3252 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3253 skb->ip_summed = CHECKSUM_UNNECESSARY;
3255 if ((bp->dev->features & NETIF_F_RXHASH) &&
3256 ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3257 L2_FHDR_STATUS_USE_RXHASH))
3258 skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
3259 PKT_HASH_TYPE_L3);
3261 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3262 napi_gro_receive(&bnapi->napi, skb);
3263 rx_pkt++;
3265 next_rx:
3266 sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3267 sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3269 if ((rx_pkt == budget))
3270 break;
3272 /* Refresh hw_cons to see if there is new work */
3273 if (sw_cons == hw_cons) {
3274 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3275 rmb();
3278 rxr->rx_cons = sw_cons;
3279 rxr->rx_prod = sw_prod;
3281 if (pg_ring_used)
3282 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3284 BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3286 BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3288 mmiowb();
3290 return rx_pkt;
3294 /* MSI ISR - The only difference between this and the INTx ISR
3295 * is that the MSI interrupt is always serviced.
3297 static irqreturn_t
3298 bnx2_msi(int irq, void *dev_instance)
3300 struct bnx2_napi *bnapi = dev_instance;
3301 struct bnx2 *bp = bnapi->bp;
3303 prefetch(bnapi->status_blk.msi);
3304 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3305 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3306 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3308 /* Return here if interrupt is disabled. */
3309 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3310 return IRQ_HANDLED;
3312 napi_schedule(&bnapi->napi);
3314 return IRQ_HANDLED;
3317 static irqreturn_t
3318 bnx2_msi_1shot(int irq, void *dev_instance)
3320 struct bnx2_napi *bnapi = dev_instance;
3321 struct bnx2 *bp = bnapi->bp;
3323 prefetch(bnapi->status_blk.msi);
3325 /* Return here if interrupt is disabled. */
3326 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3327 return IRQ_HANDLED;
3329 napi_schedule(&bnapi->napi);
3331 return IRQ_HANDLED;
3334 static irqreturn_t
3335 bnx2_interrupt(int irq, void *dev_instance)
3337 struct bnx2_napi *bnapi = dev_instance;
3338 struct bnx2 *bp = bnapi->bp;
3339 struct status_block *sblk = bnapi->status_blk.msi;
3341 /* When using INTx, it is possible for the interrupt to arrive
3342 * at the CPU before the status block posted prior to the
3343 * interrupt. Reading a register will flush the status block.
3344 * When using MSI, the MSI message will always complete after
3345 * the status block write.
3347 if ((sblk->status_idx == bnapi->last_status_idx) &&
3348 (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3349 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3350 return IRQ_NONE;
3352 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3353 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3354 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3356 /* Read back to deassert IRQ immediately to avoid too many
3357 * spurious interrupts.
3359 BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3361 /* Return here if interrupt is shared and is disabled. */
3362 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3363 return IRQ_HANDLED;
3365 if (napi_schedule_prep(&bnapi->napi)) {
3366 bnapi->last_status_idx = sblk->status_idx;
3367 __napi_schedule(&bnapi->napi);
3370 return IRQ_HANDLED;
3373 static inline int
3374 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3376 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3377 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3379 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3380 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3381 return 1;
3382 return 0;
3385 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3386 STATUS_ATTN_BITS_TIMER_ABORT)
3388 static inline int
3389 bnx2_has_work(struct bnx2_napi *bnapi)
3391 struct status_block *sblk = bnapi->status_blk.msi;
3393 if (bnx2_has_fast_work(bnapi))
3394 return 1;
3396 #ifdef BCM_CNIC
3397 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3398 return 1;
3399 #endif
3401 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3402 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3403 return 1;
3405 return 0;
3408 static void
3409 bnx2_chk_missed_msi(struct bnx2 *bp)
3411 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3412 u32 msi_ctrl;
3414 if (bnx2_has_work(bnapi)) {
3415 msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3416 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3417 return;
3419 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3420 BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3421 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3422 BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3423 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3427 bp->idle_chk_status_idx = bnapi->last_status_idx;
3430 #ifdef BCM_CNIC
3431 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3433 struct cnic_ops *c_ops;
3435 if (!bnapi->cnic_present)
3436 return;
3438 rcu_read_lock();
3439 c_ops = rcu_dereference(bp->cnic_ops);
3440 if (c_ops)
3441 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3442 bnapi->status_blk.msi);
3443 rcu_read_unlock();
3445 #endif
3447 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3449 struct status_block *sblk = bnapi->status_blk.msi;
3450 u32 status_attn_bits = sblk->status_attn_bits;
3451 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3453 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3454 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3456 bnx2_phy_int(bp, bnapi);
3458 /* This is needed to take care of transient status
3459 * during link changes.
3461 BNX2_WR(bp, BNX2_HC_COMMAND,
3462 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3463 BNX2_RD(bp, BNX2_HC_COMMAND);
3467 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3468 int work_done, int budget)
3470 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3471 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3473 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3474 bnx2_tx_int(bp, bnapi, 0);
3476 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3477 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3479 return work_done;
3482 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3484 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3485 struct bnx2 *bp = bnapi->bp;
3486 int work_done = 0;
3487 struct status_block_msix *sblk = bnapi->status_blk.msix;
3489 while (1) {
3490 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3491 if (unlikely(work_done >= budget))
3492 break;
3494 bnapi->last_status_idx = sblk->status_idx;
3495 /* status idx must be read before checking for more work. */
3496 rmb();
3497 if (likely(!bnx2_has_fast_work(bnapi))) {
3499 napi_complete(napi);
3500 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3501 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3502 bnapi->last_status_idx);
3503 break;
3506 return work_done;
3509 static int bnx2_poll(struct napi_struct *napi, int budget)
3511 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3512 struct bnx2 *bp = bnapi->bp;
3513 int work_done = 0;
3514 struct status_block *sblk = bnapi->status_blk.msi;
3516 while (1) {
3517 bnx2_poll_link(bp, bnapi);
3519 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3521 #ifdef BCM_CNIC
3522 bnx2_poll_cnic(bp, bnapi);
3523 #endif
3525 /* bnapi->last_status_idx is used below to tell the hw how
3526 * much work has been processed, so we must read it before
3527 * checking for more work.
3529 bnapi->last_status_idx = sblk->status_idx;
3531 if (unlikely(work_done >= budget))
3532 break;
3534 rmb();
3535 if (likely(!bnx2_has_work(bnapi))) {
3536 napi_complete(napi);
3537 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3538 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3539 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3540 bnapi->last_status_idx);
3541 break;
3543 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3544 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3545 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3546 bnapi->last_status_idx);
3548 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3549 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3550 bnapi->last_status_idx);
3551 break;
3555 return work_done;
3558 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3559 * from set_multicast.
3561 static void
3562 bnx2_set_rx_mode(struct net_device *dev)
3564 struct bnx2 *bp = netdev_priv(dev);
3565 u32 rx_mode, sort_mode;
3566 struct netdev_hw_addr *ha;
3567 int i;
3569 if (!netif_running(dev))
3570 return;
3572 spin_lock_bh(&bp->phy_lock);
3574 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3575 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3576 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3577 if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3578 (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3579 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3580 if (dev->flags & IFF_PROMISC) {
3581 /* Promiscuous mode. */
3582 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3583 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3584 BNX2_RPM_SORT_USER0_PROM_VLAN;
3586 else if (dev->flags & IFF_ALLMULTI) {
3587 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3588 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3589 0xffffffff);
3591 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3593 else {
3594 /* Accept one or more multicast(s). */
3595 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3596 u32 regidx;
3597 u32 bit;
3598 u32 crc;
3600 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3602 netdev_for_each_mc_addr(ha, dev) {
3603 crc = ether_crc_le(ETH_ALEN, ha->addr);
3604 bit = crc & 0xff;
3605 regidx = (bit & 0xe0) >> 5;
3606 bit &= 0x1f;
3607 mc_filter[regidx] |= (1 << bit);
3610 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3611 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3612 mc_filter[i]);
3615 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3618 if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3619 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3620 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3621 BNX2_RPM_SORT_USER0_PROM_VLAN;
3622 } else if (!(dev->flags & IFF_PROMISC)) {
3623 /* Add all entries into to the match filter list */
3624 i = 0;
3625 netdev_for_each_uc_addr(ha, dev) {
3626 bnx2_set_mac_addr(bp, ha->addr,
3627 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3628 sort_mode |= (1 <<
3629 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3630 i++;
3635 if (rx_mode != bp->rx_mode) {
3636 bp->rx_mode = rx_mode;
3637 BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3640 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3641 BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3642 BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3644 spin_unlock_bh(&bp->phy_lock);
3647 static int
3648 check_fw_section(const struct firmware *fw,
3649 const struct bnx2_fw_file_section *section,
3650 u32 alignment, bool non_empty)
3652 u32 offset = be32_to_cpu(section->offset);
3653 u32 len = be32_to_cpu(section->len);
3655 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3656 return -EINVAL;
3657 if ((non_empty && len == 0) || len > fw->size - offset ||
3658 len & (alignment - 1))
3659 return -EINVAL;
3660 return 0;
3663 static int
3664 check_mips_fw_entry(const struct firmware *fw,
3665 const struct bnx2_mips_fw_file_entry *entry)
3667 if (check_fw_section(fw, &entry->text, 4, true) ||
3668 check_fw_section(fw, &entry->data, 4, false) ||
3669 check_fw_section(fw, &entry->rodata, 4, false))
3670 return -EINVAL;
3671 return 0;
3674 static void bnx2_release_firmware(struct bnx2 *bp)
3676 if (bp->rv2p_firmware) {
3677 release_firmware(bp->mips_firmware);
3678 release_firmware(bp->rv2p_firmware);
3679 bp->rv2p_firmware = NULL;
3683 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3685 const char *mips_fw_file, *rv2p_fw_file;
3686 const struct bnx2_mips_fw_file *mips_fw;
3687 const struct bnx2_rv2p_fw_file *rv2p_fw;
3688 int rc;
3690 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3691 mips_fw_file = FW_MIPS_FILE_09;
3692 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3693 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3694 rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3695 else
3696 rv2p_fw_file = FW_RV2P_FILE_09;
3697 } else {
3698 mips_fw_file = FW_MIPS_FILE_06;
3699 rv2p_fw_file = FW_RV2P_FILE_06;
3702 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3703 if (rc) {
3704 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3705 goto out;
3708 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3709 if (rc) {
3710 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3711 goto err_release_mips_firmware;
3713 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3714 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3715 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3716 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3717 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3718 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3719 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3720 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3721 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3722 rc = -EINVAL;
3723 goto err_release_firmware;
3725 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3726 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3727 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3728 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3729 rc = -EINVAL;
3730 goto err_release_firmware;
3732 out:
3733 return rc;
3735 err_release_firmware:
3736 release_firmware(bp->rv2p_firmware);
3737 bp->rv2p_firmware = NULL;
3738 err_release_mips_firmware:
3739 release_firmware(bp->mips_firmware);
3740 goto out;
3743 static int bnx2_request_firmware(struct bnx2 *bp)
3745 return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3748 static u32
3749 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3751 switch (idx) {
3752 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3753 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3754 rv2p_code |= RV2P_BD_PAGE_SIZE;
3755 break;
3757 return rv2p_code;
3760 static int
3761 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3762 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3764 u32 rv2p_code_len, file_offset;
3765 __be32 *rv2p_code;
3766 int i;
3767 u32 val, cmd, addr;
3769 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3770 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3772 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3774 if (rv2p_proc == RV2P_PROC1) {
3775 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3776 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3777 } else {
3778 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3779 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3782 for (i = 0; i < rv2p_code_len; i += 8) {
3783 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3784 rv2p_code++;
3785 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3786 rv2p_code++;
3788 val = (i / 8) | cmd;
3789 BNX2_WR(bp, addr, val);
3792 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3793 for (i = 0; i < 8; i++) {
3794 u32 loc, code;
3796 loc = be32_to_cpu(fw_entry->fixup[i]);
3797 if (loc && ((loc * 4) < rv2p_code_len)) {
3798 code = be32_to_cpu(*(rv2p_code + loc - 1));
3799 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3800 code = be32_to_cpu(*(rv2p_code + loc));
3801 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3802 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3804 val = (loc / 2) | cmd;
3805 BNX2_WR(bp, addr, val);
3809 /* Reset the processor, un-stall is done later. */
3810 if (rv2p_proc == RV2P_PROC1) {
3811 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3813 else {
3814 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3817 return 0;
3820 static int
3821 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3822 const struct bnx2_mips_fw_file_entry *fw_entry)
3824 u32 addr, len, file_offset;
3825 __be32 *data;
3826 u32 offset;
3827 u32 val;
3829 /* Halt the CPU. */
3830 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3831 val |= cpu_reg->mode_value_halt;
3832 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3833 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3835 /* Load the Text area. */
3836 addr = be32_to_cpu(fw_entry->text.addr);
3837 len = be32_to_cpu(fw_entry->text.len);
3838 file_offset = be32_to_cpu(fw_entry->text.offset);
3839 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3841 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3842 if (len) {
3843 int j;
3845 for (j = 0; j < (len / 4); j++, offset += 4)
3846 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3849 /* Load the Data area. */
3850 addr = be32_to_cpu(fw_entry->data.addr);
3851 len = be32_to_cpu(fw_entry->data.len);
3852 file_offset = be32_to_cpu(fw_entry->data.offset);
3853 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3855 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3856 if (len) {
3857 int j;
3859 for (j = 0; j < (len / 4); j++, offset += 4)
3860 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3863 /* Load the Read-Only area. */
3864 addr = be32_to_cpu(fw_entry->rodata.addr);
3865 len = be32_to_cpu(fw_entry->rodata.len);
3866 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3867 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3869 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3870 if (len) {
3871 int j;
3873 for (j = 0; j < (len / 4); j++, offset += 4)
3874 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3877 /* Clear the pre-fetch instruction. */
3878 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3880 val = be32_to_cpu(fw_entry->start_addr);
3881 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3883 /* Start the CPU. */
3884 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3885 val &= ~cpu_reg->mode_value_halt;
3886 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3887 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3889 return 0;
3892 static int
3893 bnx2_init_cpus(struct bnx2 *bp)
3895 const struct bnx2_mips_fw_file *mips_fw =
3896 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3897 const struct bnx2_rv2p_fw_file *rv2p_fw =
3898 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3899 int rc;
3901 /* Initialize the RV2P processor. */
3902 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3903 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3905 /* Initialize the RX Processor. */
3906 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3907 if (rc)
3908 goto init_cpu_err;
3910 /* Initialize the TX Processor. */
3911 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3912 if (rc)
3913 goto init_cpu_err;
3915 /* Initialize the TX Patch-up Processor. */
3916 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3917 if (rc)
3918 goto init_cpu_err;
3920 /* Initialize the Completion Processor. */
3921 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3922 if (rc)
3923 goto init_cpu_err;
3925 /* Initialize the Command Processor. */
3926 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3928 init_cpu_err:
3929 return rc;
3932 static void
3933 bnx2_setup_wol(struct bnx2 *bp)
3935 int i;
3936 u32 val, wol_msg;
3938 if (bp->wol) {
3939 u32 advertising;
3940 u8 autoneg;
3942 autoneg = bp->autoneg;
3943 advertising = bp->advertising;
3945 if (bp->phy_port == PORT_TP) {
3946 bp->autoneg = AUTONEG_SPEED;
3947 bp->advertising = ADVERTISED_10baseT_Half |
3948 ADVERTISED_10baseT_Full |
3949 ADVERTISED_100baseT_Half |
3950 ADVERTISED_100baseT_Full |
3951 ADVERTISED_Autoneg;
3954 spin_lock_bh(&bp->phy_lock);
3955 bnx2_setup_phy(bp, bp->phy_port);
3956 spin_unlock_bh(&bp->phy_lock);
3958 bp->autoneg = autoneg;
3959 bp->advertising = advertising;
3961 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3963 val = BNX2_RD(bp, BNX2_EMAC_MODE);
3965 /* Enable port mode. */
3966 val &= ~BNX2_EMAC_MODE_PORT;
3967 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3968 BNX2_EMAC_MODE_ACPI_RCVD |
3969 BNX2_EMAC_MODE_MPKT;
3970 if (bp->phy_port == PORT_TP) {
3971 val |= BNX2_EMAC_MODE_PORT_MII;
3972 } else {
3973 val |= BNX2_EMAC_MODE_PORT_GMII;
3974 if (bp->line_speed == SPEED_2500)
3975 val |= BNX2_EMAC_MODE_25G_MODE;
3978 BNX2_WR(bp, BNX2_EMAC_MODE, val);
3980 /* receive all multicast */
3981 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3982 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3983 0xffffffff);
3985 BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
3987 val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
3988 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3989 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
3990 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
3992 /* Need to enable EMAC and RPM for WOL. */
3993 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3994 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3995 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3996 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3998 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
3999 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4000 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4002 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4003 } else {
4004 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4007 if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
4008 u32 val;
4010 wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
4011 if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
4012 bnx2_fw_sync(bp, wol_msg, 1, 0);
4013 return;
4015 /* Tell firmware not to power down the PHY yet, otherwise
4016 * the chip will take a long time to respond to MMIO reads.
4018 val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
4019 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
4020 val | BNX2_PORT_FEATURE_ASF_ENABLED);
4021 bnx2_fw_sync(bp, wol_msg, 1, 0);
4022 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
4027 static int
4028 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4030 switch (state) {
4031 case PCI_D0: {
4032 u32 val;
4034 pci_enable_wake(bp->pdev, PCI_D0, false);
4035 pci_set_power_state(bp->pdev, PCI_D0);
4037 val = BNX2_RD(bp, BNX2_EMAC_MODE);
4038 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4039 val &= ~BNX2_EMAC_MODE_MPKT;
4040 BNX2_WR(bp, BNX2_EMAC_MODE, val);
4042 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4043 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4044 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4045 break;
4047 case PCI_D3hot: {
4048 bnx2_setup_wol(bp);
4049 pci_wake_from_d3(bp->pdev, bp->wol);
4050 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4051 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4053 if (bp->wol)
4054 pci_set_power_state(bp->pdev, PCI_D3hot);
4055 break;
4058 if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4059 u32 val;
4061 /* Tell firmware not to power down the PHY yet,
4062 * otherwise the other port may not respond to
4063 * MMIO reads.
4065 val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
4066 val &= ~BNX2_CONDITION_PM_STATE_MASK;
4067 val |= BNX2_CONDITION_PM_STATE_UNPREP;
4068 bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
4070 pci_set_power_state(bp->pdev, PCI_D3hot);
4072 /* No more memory access after this point until
4073 * device is brought back to D0.
4075 break;
4077 default:
4078 return -EINVAL;
4080 return 0;
4083 static int
4084 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4086 u32 val;
4087 int j;
4089 /* Request access to the flash interface. */
4090 BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4091 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4092 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4093 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4094 break;
4096 udelay(5);
4099 if (j >= NVRAM_TIMEOUT_COUNT)
4100 return -EBUSY;
4102 return 0;
4105 static int
4106 bnx2_release_nvram_lock(struct bnx2 *bp)
4108 int j;
4109 u32 val;
4111 /* Relinquish nvram interface. */
4112 BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4114 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4115 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4116 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4117 break;
4119 udelay(5);
4122 if (j >= NVRAM_TIMEOUT_COUNT)
4123 return -EBUSY;
4125 return 0;
4129 static int
4130 bnx2_enable_nvram_write(struct bnx2 *bp)
4132 u32 val;
4134 val = BNX2_RD(bp, BNX2_MISC_CFG);
4135 BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4137 if (bp->flash_info->flags & BNX2_NV_WREN) {
4138 int j;
4140 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4141 BNX2_WR(bp, BNX2_NVM_COMMAND,
4142 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4144 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4145 udelay(5);
4147 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4148 if (val & BNX2_NVM_COMMAND_DONE)
4149 break;
4152 if (j >= NVRAM_TIMEOUT_COUNT)
4153 return -EBUSY;
4155 return 0;
4158 static void
4159 bnx2_disable_nvram_write(struct bnx2 *bp)
4161 u32 val;
4163 val = BNX2_RD(bp, BNX2_MISC_CFG);
4164 BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4168 static void
4169 bnx2_enable_nvram_access(struct bnx2 *bp)
4171 u32 val;
4173 val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4174 /* Enable both bits, even on read. */
4175 BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4176 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4179 static void
4180 bnx2_disable_nvram_access(struct bnx2 *bp)
4182 u32 val;
4184 val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4185 /* Disable both bits, even after read. */
4186 BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4187 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4188 BNX2_NVM_ACCESS_ENABLE_WR_EN));
4191 static int
4192 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4194 u32 cmd;
4195 int j;
4197 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4198 /* Buffered flash, no erase needed */
4199 return 0;
4201 /* Build an erase command */
4202 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4203 BNX2_NVM_COMMAND_DOIT;
4205 /* Need to clear DONE bit separately. */
4206 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4208 /* Address of the NVRAM to read from. */
4209 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4211 /* Issue an erase command. */
4212 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4214 /* Wait for completion. */
4215 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4216 u32 val;
4218 udelay(5);
4220 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4221 if (val & BNX2_NVM_COMMAND_DONE)
4222 break;
4225 if (j >= NVRAM_TIMEOUT_COUNT)
4226 return -EBUSY;
4228 return 0;
4231 static int
4232 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4234 u32 cmd;
4235 int j;
4237 /* Build the command word. */
4238 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4240 /* Calculate an offset of a buffered flash, not needed for 5709. */
4241 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4242 offset = ((offset / bp->flash_info->page_size) <<
4243 bp->flash_info->page_bits) +
4244 (offset % bp->flash_info->page_size);
4247 /* Need to clear DONE bit separately. */
4248 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4250 /* Address of the NVRAM to read from. */
4251 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4253 /* Issue a read command. */
4254 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4256 /* Wait for completion. */
4257 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4258 u32 val;
4260 udelay(5);
4262 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4263 if (val & BNX2_NVM_COMMAND_DONE) {
4264 __be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4265 memcpy(ret_val, &v, 4);
4266 break;
4269 if (j >= NVRAM_TIMEOUT_COUNT)
4270 return -EBUSY;
4272 return 0;
4276 static int
4277 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4279 u32 cmd;
4280 __be32 val32;
4281 int j;
4283 /* Build the command word. */
4284 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4286 /* Calculate an offset of a buffered flash, not needed for 5709. */
4287 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4288 offset = ((offset / bp->flash_info->page_size) <<
4289 bp->flash_info->page_bits) +
4290 (offset % bp->flash_info->page_size);
4293 /* Need to clear DONE bit separately. */
4294 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4296 memcpy(&val32, val, 4);
4298 /* Write the data. */
4299 BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4301 /* Address of the NVRAM to write to. */
4302 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4304 /* Issue the write command. */
4305 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4307 /* Wait for completion. */
4308 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4309 udelay(5);
4311 if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4312 break;
4314 if (j >= NVRAM_TIMEOUT_COUNT)
4315 return -EBUSY;
4317 return 0;
4320 static int
4321 bnx2_init_nvram(struct bnx2 *bp)
4323 u32 val;
4324 int j, entry_count, rc = 0;
4325 const struct flash_spec *flash;
4327 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4328 bp->flash_info = &flash_5709;
4329 goto get_flash_size;
4332 /* Determine the selected interface. */
4333 val = BNX2_RD(bp, BNX2_NVM_CFG1);
4335 entry_count = ARRAY_SIZE(flash_table);
4337 if (val & 0x40000000) {
4339 /* Flash interface has been reconfigured */
4340 for (j = 0, flash = &flash_table[0]; j < entry_count;
4341 j++, flash++) {
4342 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4343 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4344 bp->flash_info = flash;
4345 break;
4349 else {
4350 u32 mask;
4351 /* Not yet been reconfigured */
4353 if (val & (1 << 23))
4354 mask = FLASH_BACKUP_STRAP_MASK;
4355 else
4356 mask = FLASH_STRAP_MASK;
4358 for (j = 0, flash = &flash_table[0]; j < entry_count;
4359 j++, flash++) {
4361 if ((val & mask) == (flash->strapping & mask)) {
4362 bp->flash_info = flash;
4364 /* Request access to the flash interface. */
4365 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4366 return rc;
4368 /* Enable access to flash interface */
4369 bnx2_enable_nvram_access(bp);
4371 /* Reconfigure the flash interface */
4372 BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4373 BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4374 BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4375 BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4377 /* Disable access to flash interface */
4378 bnx2_disable_nvram_access(bp);
4379 bnx2_release_nvram_lock(bp);
4381 break;
4384 } /* if (val & 0x40000000) */
4386 if (j == entry_count) {
4387 bp->flash_info = NULL;
4388 pr_alert("Unknown flash/EEPROM type\n");
4389 return -ENODEV;
4392 get_flash_size:
4393 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4394 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4395 if (val)
4396 bp->flash_size = val;
4397 else
4398 bp->flash_size = bp->flash_info->total_size;
4400 return rc;
4403 static int
4404 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4405 int buf_size)
4407 int rc = 0;
4408 u32 cmd_flags, offset32, len32, extra;
4410 if (buf_size == 0)
4411 return 0;
4413 /* Request access to the flash interface. */
4414 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4415 return rc;
4417 /* Enable access to flash interface */
4418 bnx2_enable_nvram_access(bp);
4420 len32 = buf_size;
4421 offset32 = offset;
4422 extra = 0;
4424 cmd_flags = 0;
4426 if (offset32 & 3) {
4427 u8 buf[4];
4428 u32 pre_len;
4430 offset32 &= ~3;
4431 pre_len = 4 - (offset & 3);
4433 if (pre_len >= len32) {
4434 pre_len = len32;
4435 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4436 BNX2_NVM_COMMAND_LAST;
4438 else {
4439 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4442 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4444 if (rc)
4445 return rc;
4447 memcpy(ret_buf, buf + (offset & 3), pre_len);
4449 offset32 += 4;
4450 ret_buf += pre_len;
4451 len32 -= pre_len;
4453 if (len32 & 3) {
4454 extra = 4 - (len32 & 3);
4455 len32 = (len32 + 4) & ~3;
4458 if (len32 == 4) {
4459 u8 buf[4];
4461 if (cmd_flags)
4462 cmd_flags = BNX2_NVM_COMMAND_LAST;
4463 else
4464 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4465 BNX2_NVM_COMMAND_LAST;
4467 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4469 memcpy(ret_buf, buf, 4 - extra);
4471 else if (len32 > 0) {
4472 u8 buf[4];
4474 /* Read the first word. */
4475 if (cmd_flags)
4476 cmd_flags = 0;
4477 else
4478 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4480 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4482 /* Advance to the next dword. */
4483 offset32 += 4;
4484 ret_buf += 4;
4485 len32 -= 4;
4487 while (len32 > 4 && rc == 0) {
4488 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4490 /* Advance to the next dword. */
4491 offset32 += 4;
4492 ret_buf += 4;
4493 len32 -= 4;
4496 if (rc)
4497 return rc;
4499 cmd_flags = BNX2_NVM_COMMAND_LAST;
4500 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4502 memcpy(ret_buf, buf, 4 - extra);
4505 /* Disable access to flash interface */
4506 bnx2_disable_nvram_access(bp);
4508 bnx2_release_nvram_lock(bp);
4510 return rc;
4513 static int
4514 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4515 int buf_size)
4517 u32 written, offset32, len32;
4518 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4519 int rc = 0;
4520 int align_start, align_end;
4522 buf = data_buf;
4523 offset32 = offset;
4524 len32 = buf_size;
4525 align_start = align_end = 0;
4527 if ((align_start = (offset32 & 3))) {
4528 offset32 &= ~3;
4529 len32 += align_start;
4530 if (len32 < 4)
4531 len32 = 4;
4532 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4533 return rc;
4536 if (len32 & 3) {
4537 align_end = 4 - (len32 & 3);
4538 len32 += align_end;
4539 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4540 return rc;
4543 if (align_start || align_end) {
4544 align_buf = kmalloc(len32, GFP_KERNEL);
4545 if (align_buf == NULL)
4546 return -ENOMEM;
4547 if (align_start) {
4548 memcpy(align_buf, start, 4);
4550 if (align_end) {
4551 memcpy(align_buf + len32 - 4, end, 4);
4553 memcpy(align_buf + align_start, data_buf, buf_size);
4554 buf = align_buf;
4557 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4558 flash_buffer = kmalloc(264, GFP_KERNEL);
4559 if (flash_buffer == NULL) {
4560 rc = -ENOMEM;
4561 goto nvram_write_end;
4565 written = 0;
4566 while ((written < len32) && (rc == 0)) {
4567 u32 page_start, page_end, data_start, data_end;
4568 u32 addr, cmd_flags;
4569 int i;
4571 /* Find the page_start addr */
4572 page_start = offset32 + written;
4573 page_start -= (page_start % bp->flash_info->page_size);
4574 /* Find the page_end addr */
4575 page_end = page_start + bp->flash_info->page_size;
4576 /* Find the data_start addr */
4577 data_start = (written == 0) ? offset32 : page_start;
4578 /* Find the data_end addr */
4579 data_end = (page_end > offset32 + len32) ?
4580 (offset32 + len32) : page_end;
4582 /* Request access to the flash interface. */
4583 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4584 goto nvram_write_end;
4586 /* Enable access to flash interface */
4587 bnx2_enable_nvram_access(bp);
4589 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4590 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4591 int j;
4593 /* Read the whole page into the buffer
4594 * (non-buffer flash only) */
4595 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4596 if (j == (bp->flash_info->page_size - 4)) {
4597 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4599 rc = bnx2_nvram_read_dword(bp,
4600 page_start + j,
4601 &flash_buffer[j],
4602 cmd_flags);
4604 if (rc)
4605 goto nvram_write_end;
4607 cmd_flags = 0;
4611 /* Enable writes to flash interface (unlock write-protect) */
4612 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4613 goto nvram_write_end;
4615 /* Loop to write back the buffer data from page_start to
4616 * data_start */
4617 i = 0;
4618 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4619 /* Erase the page */
4620 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4621 goto nvram_write_end;
4623 /* Re-enable the write again for the actual write */
4624 bnx2_enable_nvram_write(bp);
4626 for (addr = page_start; addr < data_start;
4627 addr += 4, i += 4) {
4629 rc = bnx2_nvram_write_dword(bp, addr,
4630 &flash_buffer[i], cmd_flags);
4632 if (rc != 0)
4633 goto nvram_write_end;
4635 cmd_flags = 0;
4639 /* Loop to write the new data from data_start to data_end */
4640 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4641 if ((addr == page_end - 4) ||
4642 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4643 (addr == data_end - 4))) {
4645 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4647 rc = bnx2_nvram_write_dword(bp, addr, buf,
4648 cmd_flags);
4650 if (rc != 0)
4651 goto nvram_write_end;
4653 cmd_flags = 0;
4654 buf += 4;
4657 /* Loop to write back the buffer data from data_end
4658 * to page_end */
4659 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4660 for (addr = data_end; addr < page_end;
4661 addr += 4, i += 4) {
4663 if (addr == page_end-4) {
4664 cmd_flags = BNX2_NVM_COMMAND_LAST;
4666 rc = bnx2_nvram_write_dword(bp, addr,
4667 &flash_buffer[i], cmd_flags);
4669 if (rc != 0)
4670 goto nvram_write_end;
4672 cmd_flags = 0;
4676 /* Disable writes to flash interface (lock write-protect) */
4677 bnx2_disable_nvram_write(bp);
4679 /* Disable access to flash interface */
4680 bnx2_disable_nvram_access(bp);
4681 bnx2_release_nvram_lock(bp);
4683 /* Increment written */
4684 written += data_end - data_start;
4687 nvram_write_end:
4688 kfree(flash_buffer);
4689 kfree(align_buf);
4690 return rc;
4693 static void
4694 bnx2_init_fw_cap(struct bnx2 *bp)
4696 u32 val, sig = 0;
4698 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4699 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4701 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4702 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4704 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4705 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4706 return;
4708 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4709 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4710 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4713 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4714 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4715 u32 link;
4717 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4719 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4720 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4721 bp->phy_port = PORT_FIBRE;
4722 else
4723 bp->phy_port = PORT_TP;
4725 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4726 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4729 if (netif_running(bp->dev) && sig)
4730 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4733 static void
4734 bnx2_setup_msix_tbl(struct bnx2 *bp)
4736 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4738 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4739 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4742 static int
4743 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4745 u32 val;
4746 int i, rc = 0;
4747 u8 old_port;
4749 /* Wait for the current PCI transaction to complete before
4750 * issuing a reset. */
4751 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4752 (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4753 BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4754 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4755 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4756 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4757 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4758 val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4759 udelay(5);
4760 } else { /* 5709 */
4761 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4762 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4763 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4764 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4766 for (i = 0; i < 100; i++) {
4767 msleep(1);
4768 val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4769 if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4770 break;
4774 /* Wait for the firmware to tell us it is ok to issue a reset. */
4775 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4777 /* Deposit a driver reset signature so the firmware knows that
4778 * this is a soft reset. */
4779 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4780 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4782 /* Do a dummy read to force the chip to complete all current transaction
4783 * before we issue a reset. */
4784 val = BNX2_RD(bp, BNX2_MISC_ID);
4786 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4787 BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4788 BNX2_RD(bp, BNX2_MISC_COMMAND);
4789 udelay(5);
4791 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4792 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4794 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4796 } else {
4797 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4798 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4799 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4801 /* Chip reset. */
4802 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4804 /* Reading back any register after chip reset will hang the
4805 * bus on 5706 A0 and A1. The msleep below provides plenty
4806 * of margin for write posting.
4808 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4809 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4810 msleep(20);
4812 /* Reset takes approximate 30 usec */
4813 for (i = 0; i < 10; i++) {
4814 val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4815 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4816 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4817 break;
4818 udelay(10);
4821 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4822 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4823 pr_err("Chip reset did not complete\n");
4824 return -EBUSY;
4828 /* Make sure byte swapping is properly configured. */
4829 val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4830 if (val != 0x01020304) {
4831 pr_err("Chip not in correct endian mode\n");
4832 return -ENODEV;
4835 /* Wait for the firmware to finish its initialization. */
4836 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4837 if (rc)
4838 return rc;
4840 spin_lock_bh(&bp->phy_lock);
4841 old_port = bp->phy_port;
4842 bnx2_init_fw_cap(bp);
4843 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4844 old_port != bp->phy_port)
4845 bnx2_set_default_remote_link(bp);
4846 spin_unlock_bh(&bp->phy_lock);
4848 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4849 /* Adjust the voltage regular to two steps lower. The default
4850 * of this register is 0x0000000e. */
4851 BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4853 /* Remove bad rbuf memory from the free pool. */
4854 rc = bnx2_alloc_bad_rbuf(bp);
4857 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4858 bnx2_setup_msix_tbl(bp);
4859 /* Prevent MSIX table reads and write from timing out */
4860 BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4861 BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4864 return rc;
4867 static int
4868 bnx2_init_chip(struct bnx2 *bp)
4870 u32 val, mtu;
4871 int rc, i;
4873 /* Make sure the interrupt is not active. */
4874 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4876 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4877 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4878 #ifdef __BIG_ENDIAN
4879 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4880 #endif
4881 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4882 DMA_READ_CHANS << 12 |
4883 DMA_WRITE_CHANS << 16;
4885 val |= (0x2 << 20) | (1 << 11);
4887 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4888 val |= (1 << 23);
4890 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4891 (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4892 !(bp->flags & BNX2_FLAG_PCIX))
4893 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4895 BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4897 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4898 val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4899 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4900 BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4903 if (bp->flags & BNX2_FLAG_PCIX) {
4904 u16 val16;
4906 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4907 &val16);
4908 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4909 val16 & ~PCI_X_CMD_ERO);
4912 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4913 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4914 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4915 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4917 /* Initialize context mapping and zero out the quick contexts. The
4918 * context block must have already been enabled. */
4919 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4920 rc = bnx2_init_5709_context(bp);
4921 if (rc)
4922 return rc;
4923 } else
4924 bnx2_init_context(bp);
4926 if ((rc = bnx2_init_cpus(bp)) != 0)
4927 return rc;
4929 bnx2_init_nvram(bp);
4931 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4933 val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4934 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4935 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4936 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4937 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4938 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4939 val |= BNX2_MQ_CONFIG_HALT_DIS;
4942 BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4944 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4945 BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4946 BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4948 val = (BNX2_PAGE_BITS - 8) << 24;
4949 BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4951 /* Configure page size. */
4952 val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4953 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4954 val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4955 BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4957 val = bp->mac_addr[0] +
4958 (bp->mac_addr[1] << 8) +
4959 (bp->mac_addr[2] << 16) +
4960 bp->mac_addr[3] +
4961 (bp->mac_addr[4] << 8) +
4962 (bp->mac_addr[5] << 16);
4963 BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4965 /* Program the MTU. Also include 4 bytes for CRC32. */
4966 mtu = bp->dev->mtu;
4967 val = mtu + ETH_HLEN + ETH_FCS_LEN;
4968 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4969 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4970 BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4972 if (mtu < 1500)
4973 mtu = 1500;
4975 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4976 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4977 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4979 memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4980 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4981 bp->bnx2_napi[i].last_status_idx = 0;
4983 bp->idle_chk_status_idx = 0xffff;
4985 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4987 /* Set up how to generate a link change interrupt. */
4988 BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4990 BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
4991 (u64) bp->status_blk_mapping & 0xffffffff);
4992 BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4994 BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4995 (u64) bp->stats_blk_mapping & 0xffffffff);
4996 BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4997 (u64) bp->stats_blk_mapping >> 32);
4999 BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
5000 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
5002 BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
5003 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
5005 BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
5006 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
5008 BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
5010 BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
5012 BNX2_WR(bp, BNX2_HC_COM_TICKS,
5013 (bp->com_ticks_int << 16) | bp->com_ticks);
5015 BNX2_WR(bp, BNX2_HC_CMD_TICKS,
5016 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
5018 if (bp->flags & BNX2_FLAG_BROKEN_STATS)
5019 BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
5020 else
5021 BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
5022 BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
5024 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
5025 val = BNX2_HC_CONFIG_COLLECT_STATS;
5026 else {
5027 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
5028 BNX2_HC_CONFIG_COLLECT_STATS;
5031 if (bp->flags & BNX2_FLAG_USING_MSIX) {
5032 BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
5033 BNX2_HC_MSIX_BIT_VECTOR_VAL);
5035 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5038 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5039 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5041 BNX2_WR(bp, BNX2_HC_CONFIG, val);
5043 if (bp->rx_ticks < 25)
5044 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5045 else
5046 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5048 for (i = 1; i < bp->irq_nvecs; i++) {
5049 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5050 BNX2_HC_SB_CONFIG_1;
5052 BNX2_WR(bp, base,
5053 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5054 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5055 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5057 BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5058 (bp->tx_quick_cons_trip_int << 16) |
5059 bp->tx_quick_cons_trip);
5061 BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5062 (bp->tx_ticks_int << 16) | bp->tx_ticks);
5064 BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5065 (bp->rx_quick_cons_trip_int << 16) |
5066 bp->rx_quick_cons_trip);
5068 BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5069 (bp->rx_ticks_int << 16) | bp->rx_ticks);
5072 /* Clear internal stats counters. */
5073 BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5075 BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5077 /* Initialize the receive filter. */
5078 bnx2_set_rx_mode(bp->dev);
5080 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5081 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5082 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5083 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5085 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5086 1, 0);
5088 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5089 BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5091 udelay(20);
5093 bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5095 return rc;
5098 static void
5099 bnx2_clear_ring_states(struct bnx2 *bp)
5101 struct bnx2_napi *bnapi;
5102 struct bnx2_tx_ring_info *txr;
5103 struct bnx2_rx_ring_info *rxr;
5104 int i;
5106 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5107 bnapi = &bp->bnx2_napi[i];
5108 txr = &bnapi->tx_ring;
5109 rxr = &bnapi->rx_ring;
5111 txr->tx_cons = 0;
5112 txr->hw_tx_cons = 0;
5113 rxr->rx_prod_bseq = 0;
5114 rxr->rx_prod = 0;
5115 rxr->rx_cons = 0;
5116 rxr->rx_pg_prod = 0;
5117 rxr->rx_pg_cons = 0;
5121 static void
5122 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5124 u32 val, offset0, offset1, offset2, offset3;
5125 u32 cid_addr = GET_CID_ADDR(cid);
5127 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5128 offset0 = BNX2_L2CTX_TYPE_XI;
5129 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5130 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5131 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5132 } else {
5133 offset0 = BNX2_L2CTX_TYPE;
5134 offset1 = BNX2_L2CTX_CMD_TYPE;
5135 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5136 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5138 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5139 bnx2_ctx_wr(bp, cid_addr, offset0, val);
5141 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5142 bnx2_ctx_wr(bp, cid_addr, offset1, val);
5144 val = (u64) txr->tx_desc_mapping >> 32;
5145 bnx2_ctx_wr(bp, cid_addr, offset2, val);
5147 val = (u64) txr->tx_desc_mapping & 0xffffffff;
5148 bnx2_ctx_wr(bp, cid_addr, offset3, val);
5151 static void
5152 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5154 struct bnx2_tx_bd *txbd;
5155 u32 cid = TX_CID;
5156 struct bnx2_napi *bnapi;
5157 struct bnx2_tx_ring_info *txr;
5159 bnapi = &bp->bnx2_napi[ring_num];
5160 txr = &bnapi->tx_ring;
5162 if (ring_num == 0)
5163 cid = TX_CID;
5164 else
5165 cid = TX_TSS_CID + ring_num - 1;
5167 bp->tx_wake_thresh = bp->tx_ring_size / 2;
5169 txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5171 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5172 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5174 txr->tx_prod = 0;
5175 txr->tx_prod_bseq = 0;
5177 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5178 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5180 bnx2_init_tx_context(bp, cid, txr);
5183 static void
5184 bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5185 u32 buf_size, int num_rings)
5187 int i;
5188 struct bnx2_rx_bd *rxbd;
5190 for (i = 0; i < num_rings; i++) {
5191 int j;
5193 rxbd = &rx_ring[i][0];
5194 for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5195 rxbd->rx_bd_len = buf_size;
5196 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5198 if (i == (num_rings - 1))
5199 j = 0;
5200 else
5201 j = i + 1;
5202 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5203 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5207 static void
5208 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5210 int i;
5211 u16 prod, ring_prod;
5212 u32 cid, rx_cid_addr, val;
5213 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5214 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5216 if (ring_num == 0)
5217 cid = RX_CID;
5218 else
5219 cid = RX_RSS_CID + ring_num - 1;
5221 rx_cid_addr = GET_CID_ADDR(cid);
5223 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5224 bp->rx_buf_use_size, bp->rx_max_ring);
5226 bnx2_init_rx_context(bp, cid);
5228 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5229 val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5230 BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5233 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5234 if (bp->rx_pg_ring_size) {
5235 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5236 rxr->rx_pg_desc_mapping,
5237 PAGE_SIZE, bp->rx_max_pg_ring);
5238 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5239 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5240 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5241 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5243 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5244 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5246 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5247 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5249 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5250 BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5253 val = (u64) rxr->rx_desc_mapping[0] >> 32;
5254 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5256 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5257 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5259 ring_prod = prod = rxr->rx_pg_prod;
5260 for (i = 0; i < bp->rx_pg_ring_size; i++) {
5261 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5262 netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5263 ring_num, i, bp->rx_pg_ring_size);
5264 break;
5266 prod = BNX2_NEXT_RX_BD(prod);
5267 ring_prod = BNX2_RX_PG_RING_IDX(prod);
5269 rxr->rx_pg_prod = prod;
5271 ring_prod = prod = rxr->rx_prod;
5272 for (i = 0; i < bp->rx_ring_size; i++) {
5273 if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5274 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5275 ring_num, i, bp->rx_ring_size);
5276 break;
5278 prod = BNX2_NEXT_RX_BD(prod);
5279 ring_prod = BNX2_RX_RING_IDX(prod);
5281 rxr->rx_prod = prod;
5283 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5284 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5285 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5287 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5288 BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5290 BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5293 static void
5294 bnx2_init_all_rings(struct bnx2 *bp)
5296 int i;
5297 u32 val;
5299 bnx2_clear_ring_states(bp);
5301 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5302 for (i = 0; i < bp->num_tx_rings; i++)
5303 bnx2_init_tx_ring(bp, i);
5305 if (bp->num_tx_rings > 1)
5306 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5307 (TX_TSS_CID << 7));
5309 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5310 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5312 for (i = 0; i < bp->num_rx_rings; i++)
5313 bnx2_init_rx_ring(bp, i);
5315 if (bp->num_rx_rings > 1) {
5316 u32 tbl_32 = 0;
5318 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5319 int shift = (i % 8) << 2;
5321 tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5322 if ((i % 8) == 7) {
5323 BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5324 BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5325 BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5326 BNX2_RLUP_RSS_COMMAND_WRITE |
5327 BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5328 tbl_32 = 0;
5332 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5333 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5335 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5340 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5342 u32 max, num_rings = 1;
5344 while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5345 ring_size -= BNX2_MAX_RX_DESC_CNT;
5346 num_rings++;
5348 /* round to next power of 2 */
5349 max = max_size;
5350 while ((max & num_rings) == 0)
5351 max >>= 1;
5353 if (num_rings != max)
5354 max <<= 1;
5356 return max;
5359 static void
5360 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5362 u32 rx_size, rx_space, jumbo_size;
5364 /* 8 for CRC and VLAN */
5365 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5367 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5368 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5370 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5371 bp->rx_pg_ring_size = 0;
5372 bp->rx_max_pg_ring = 0;
5373 bp->rx_max_pg_ring_idx = 0;
5374 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5375 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5377 jumbo_size = size * pages;
5378 if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5379 jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5381 bp->rx_pg_ring_size = jumbo_size;
5382 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5383 BNX2_MAX_RX_PG_RINGS);
5384 bp->rx_max_pg_ring_idx =
5385 (bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5386 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5387 bp->rx_copy_thresh = 0;
5390 bp->rx_buf_use_size = rx_size;
5391 /* hw alignment + build_skb() overhead*/
5392 bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5393 NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5394 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5395 bp->rx_ring_size = size;
5396 bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5397 bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5400 static void
5401 bnx2_free_tx_skbs(struct bnx2 *bp)
5403 int i;
5405 for (i = 0; i < bp->num_tx_rings; i++) {
5406 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5407 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5408 int j;
5410 if (txr->tx_buf_ring == NULL)
5411 continue;
5413 for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5414 struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5415 struct sk_buff *skb = tx_buf->skb;
5416 int k, last;
5418 if (skb == NULL) {
5419 j = BNX2_NEXT_TX_BD(j);
5420 continue;
5423 dma_unmap_single(&bp->pdev->dev,
5424 dma_unmap_addr(tx_buf, mapping),
5425 skb_headlen(skb),
5426 PCI_DMA_TODEVICE);
5428 tx_buf->skb = NULL;
5430 last = tx_buf->nr_frags;
5431 j = BNX2_NEXT_TX_BD(j);
5432 for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5433 tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5434 dma_unmap_page(&bp->pdev->dev,
5435 dma_unmap_addr(tx_buf, mapping),
5436 skb_frag_size(&skb_shinfo(skb)->frags[k]),
5437 PCI_DMA_TODEVICE);
5439 dev_kfree_skb(skb);
5441 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5445 static void
5446 bnx2_free_rx_skbs(struct bnx2 *bp)
5448 int i;
5450 for (i = 0; i < bp->num_rx_rings; i++) {
5451 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5452 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5453 int j;
5455 if (rxr->rx_buf_ring == NULL)
5456 return;
5458 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5459 struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5460 u8 *data = rx_buf->data;
5462 if (data == NULL)
5463 continue;
5465 dma_unmap_single(&bp->pdev->dev,
5466 dma_unmap_addr(rx_buf, mapping),
5467 bp->rx_buf_use_size,
5468 PCI_DMA_FROMDEVICE);
5470 rx_buf->data = NULL;
5472 kfree(data);
5474 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5475 bnx2_free_rx_page(bp, rxr, j);
5479 static void
5480 bnx2_free_skbs(struct bnx2 *bp)
5482 bnx2_free_tx_skbs(bp);
5483 bnx2_free_rx_skbs(bp);
5486 static int
5487 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5489 int rc;
5491 rc = bnx2_reset_chip(bp, reset_code);
5492 bnx2_free_skbs(bp);
5493 if (rc)
5494 return rc;
5496 if ((rc = bnx2_init_chip(bp)) != 0)
5497 return rc;
5499 bnx2_init_all_rings(bp);
5500 return 0;
5503 static int
5504 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5506 int rc;
5508 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5509 return rc;
5511 spin_lock_bh(&bp->phy_lock);
5512 bnx2_init_phy(bp, reset_phy);
5513 bnx2_set_link(bp);
5514 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5515 bnx2_remote_phy_event(bp);
5516 spin_unlock_bh(&bp->phy_lock);
5517 return 0;
5520 static int
5521 bnx2_shutdown_chip(struct bnx2 *bp)
5523 u32 reset_code;
5525 if (bp->flags & BNX2_FLAG_NO_WOL)
5526 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5527 else if (bp->wol)
5528 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5529 else
5530 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5532 return bnx2_reset_chip(bp, reset_code);
5535 static int
5536 bnx2_test_registers(struct bnx2 *bp)
5538 int ret;
5539 int i, is_5709;
5540 static const struct {
5541 u16 offset;
5542 u16 flags;
5543 #define BNX2_FL_NOT_5709 1
5544 u32 rw_mask;
5545 u32 ro_mask;
5546 } reg_tbl[] = {
5547 { 0x006c, 0, 0x00000000, 0x0000003f },
5548 { 0x0090, 0, 0xffffffff, 0x00000000 },
5549 { 0x0094, 0, 0x00000000, 0x00000000 },
5551 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5552 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5553 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5554 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5555 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5556 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5557 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5558 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5559 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5561 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5562 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5563 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5564 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5565 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5566 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5568 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5569 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5570 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5572 { 0x1000, 0, 0x00000000, 0x00000001 },
5573 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5575 { 0x1408, 0, 0x01c00800, 0x00000000 },
5576 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5577 { 0x14a8, 0, 0x00000000, 0x000001ff },
5578 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5579 { 0x14b0, 0, 0x00000002, 0x00000001 },
5580 { 0x14b8, 0, 0x00000000, 0x00000000 },
5581 { 0x14c0, 0, 0x00000000, 0x00000009 },
5582 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5583 { 0x14cc, 0, 0x00000000, 0x00000001 },
5584 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5586 { 0x1800, 0, 0x00000000, 0x00000001 },
5587 { 0x1804, 0, 0x00000000, 0x00000003 },
5589 { 0x2800, 0, 0x00000000, 0x00000001 },
5590 { 0x2804, 0, 0x00000000, 0x00003f01 },
5591 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5592 { 0x2810, 0, 0xffff0000, 0x00000000 },
5593 { 0x2814, 0, 0xffff0000, 0x00000000 },
5594 { 0x2818, 0, 0xffff0000, 0x00000000 },
5595 { 0x281c, 0, 0xffff0000, 0x00000000 },
5596 { 0x2834, 0, 0xffffffff, 0x00000000 },
5597 { 0x2840, 0, 0x00000000, 0xffffffff },
5598 { 0x2844, 0, 0x00000000, 0xffffffff },
5599 { 0x2848, 0, 0xffffffff, 0x00000000 },
5600 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5602 { 0x2c00, 0, 0x00000000, 0x00000011 },
5603 { 0x2c04, 0, 0x00000000, 0x00030007 },
5605 { 0x3c00, 0, 0x00000000, 0x00000001 },
5606 { 0x3c04, 0, 0x00000000, 0x00070000 },
5607 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5608 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5609 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5610 { 0x3c14, 0, 0x00000000, 0xffffffff },
5611 { 0x3c18, 0, 0x00000000, 0xffffffff },
5612 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5613 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5615 { 0x5004, 0, 0x00000000, 0x0000007f },
5616 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5618 { 0x5c00, 0, 0x00000000, 0x00000001 },
5619 { 0x5c04, 0, 0x00000000, 0x0003000f },
5620 { 0x5c08, 0, 0x00000003, 0x00000000 },
5621 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5622 { 0x5c10, 0, 0x00000000, 0xffffffff },
5623 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5624 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5625 { 0x5c88, 0, 0x00000000, 0x00077373 },
5626 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5628 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5629 { 0x680c, 0, 0xffffffff, 0x00000000 },
5630 { 0x6810, 0, 0xffffffff, 0x00000000 },
5631 { 0x6814, 0, 0xffffffff, 0x00000000 },
5632 { 0x6818, 0, 0xffffffff, 0x00000000 },
5633 { 0x681c, 0, 0xffffffff, 0x00000000 },
5634 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5635 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5636 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5637 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5638 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5639 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5640 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5641 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5642 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5643 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5644 { 0x684c, 0, 0xffffffff, 0x00000000 },
5645 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5646 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5647 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5648 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5649 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5650 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5652 { 0xffff, 0, 0x00000000, 0x00000000 },
5655 ret = 0;
5656 is_5709 = 0;
5657 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5658 is_5709 = 1;
5660 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5661 u32 offset, rw_mask, ro_mask, save_val, val;
5662 u16 flags = reg_tbl[i].flags;
5664 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5665 continue;
5667 offset = (u32) reg_tbl[i].offset;
5668 rw_mask = reg_tbl[i].rw_mask;
5669 ro_mask = reg_tbl[i].ro_mask;
5671 save_val = readl(bp->regview + offset);
5673 writel(0, bp->regview + offset);
5675 val = readl(bp->regview + offset);
5676 if ((val & rw_mask) != 0) {
5677 goto reg_test_err;
5680 if ((val & ro_mask) != (save_val & ro_mask)) {
5681 goto reg_test_err;
5684 writel(0xffffffff, bp->regview + offset);
5686 val = readl(bp->regview + offset);
5687 if ((val & rw_mask) != rw_mask) {
5688 goto reg_test_err;
5691 if ((val & ro_mask) != (save_val & ro_mask)) {
5692 goto reg_test_err;
5695 writel(save_val, bp->regview + offset);
5696 continue;
5698 reg_test_err:
5699 writel(save_val, bp->regview + offset);
5700 ret = -ENODEV;
5701 break;
5703 return ret;
5706 static int
5707 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5709 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5710 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5711 int i;
5713 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5714 u32 offset;
5716 for (offset = 0; offset < size; offset += 4) {
5718 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5720 if (bnx2_reg_rd_ind(bp, start + offset) !=
5721 test_pattern[i]) {
5722 return -ENODEV;
5726 return 0;
5729 static int
5730 bnx2_test_memory(struct bnx2 *bp)
5732 int ret = 0;
5733 int i;
5734 static struct mem_entry {
5735 u32 offset;
5736 u32 len;
5737 } mem_tbl_5706[] = {
5738 { 0x60000, 0x4000 },
5739 { 0xa0000, 0x3000 },
5740 { 0xe0000, 0x4000 },
5741 { 0x120000, 0x4000 },
5742 { 0x1a0000, 0x4000 },
5743 { 0x160000, 0x4000 },
5744 { 0xffffffff, 0 },
5746 mem_tbl_5709[] = {
5747 { 0x60000, 0x4000 },
5748 { 0xa0000, 0x3000 },
5749 { 0xe0000, 0x4000 },
5750 { 0x120000, 0x4000 },
5751 { 0x1a0000, 0x4000 },
5752 { 0xffffffff, 0 },
5754 struct mem_entry *mem_tbl;
5756 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5757 mem_tbl = mem_tbl_5709;
5758 else
5759 mem_tbl = mem_tbl_5706;
5761 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5762 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5763 mem_tbl[i].len)) != 0) {
5764 return ret;
5768 return ret;
5771 #define BNX2_MAC_LOOPBACK 0
5772 #define BNX2_PHY_LOOPBACK 1
5774 static int
5775 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5777 unsigned int pkt_size, num_pkts, i;
5778 struct sk_buff *skb;
5779 u8 *data;
5780 unsigned char *packet;
5781 u16 rx_start_idx, rx_idx;
5782 dma_addr_t map;
5783 struct bnx2_tx_bd *txbd;
5784 struct bnx2_sw_bd *rx_buf;
5785 struct l2_fhdr *rx_hdr;
5786 int ret = -ENODEV;
5787 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5788 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5789 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5791 tx_napi = bnapi;
5793 txr = &tx_napi->tx_ring;
5794 rxr = &bnapi->rx_ring;
5795 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5796 bp->loopback = MAC_LOOPBACK;
5797 bnx2_set_mac_loopback(bp);
5799 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5800 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5801 return 0;
5803 bp->loopback = PHY_LOOPBACK;
5804 bnx2_set_phy_loopback(bp);
5806 else
5807 return -EINVAL;
5809 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5810 skb = netdev_alloc_skb(bp->dev, pkt_size);
5811 if (!skb)
5812 return -ENOMEM;
5813 packet = skb_put(skb, pkt_size);
5814 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5815 memset(packet + ETH_ALEN, 0x0, 8);
5816 for (i = 14; i < pkt_size; i++)
5817 packet[i] = (unsigned char) (i & 0xff);
5819 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5820 PCI_DMA_TODEVICE);
5821 if (dma_mapping_error(&bp->pdev->dev, map)) {
5822 dev_kfree_skb(skb);
5823 return -EIO;
5826 BNX2_WR(bp, BNX2_HC_COMMAND,
5827 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5829 BNX2_RD(bp, BNX2_HC_COMMAND);
5831 udelay(5);
5832 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5834 num_pkts = 0;
5836 txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5838 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5839 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5840 txbd->tx_bd_mss_nbytes = pkt_size;
5841 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5843 num_pkts++;
5844 txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5845 txr->tx_prod_bseq += pkt_size;
5847 BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5848 BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5850 udelay(100);
5852 BNX2_WR(bp, BNX2_HC_COMMAND,
5853 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5855 BNX2_RD(bp, BNX2_HC_COMMAND);
5857 udelay(5);
5859 dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5860 dev_kfree_skb(skb);
5862 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5863 goto loopback_test_done;
5865 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5866 if (rx_idx != rx_start_idx + num_pkts) {
5867 goto loopback_test_done;
5870 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5871 data = rx_buf->data;
5873 rx_hdr = get_l2_fhdr(data);
5874 data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5876 dma_sync_single_for_cpu(&bp->pdev->dev,
5877 dma_unmap_addr(rx_buf, mapping),
5878 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5880 if (rx_hdr->l2_fhdr_status &
5881 (L2_FHDR_ERRORS_BAD_CRC |
5882 L2_FHDR_ERRORS_PHY_DECODE |
5883 L2_FHDR_ERRORS_ALIGNMENT |
5884 L2_FHDR_ERRORS_TOO_SHORT |
5885 L2_FHDR_ERRORS_GIANT_FRAME)) {
5887 goto loopback_test_done;
5890 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5891 goto loopback_test_done;
5894 for (i = 14; i < pkt_size; i++) {
5895 if (*(data + i) != (unsigned char) (i & 0xff)) {
5896 goto loopback_test_done;
5900 ret = 0;
5902 loopback_test_done:
5903 bp->loopback = 0;
5904 return ret;
5907 #define BNX2_MAC_LOOPBACK_FAILED 1
5908 #define BNX2_PHY_LOOPBACK_FAILED 2
5909 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5910 BNX2_PHY_LOOPBACK_FAILED)
5912 static int
5913 bnx2_test_loopback(struct bnx2 *bp)
5915 int rc = 0;
5917 if (!netif_running(bp->dev))
5918 return BNX2_LOOPBACK_FAILED;
5920 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5921 spin_lock_bh(&bp->phy_lock);
5922 bnx2_init_phy(bp, 1);
5923 spin_unlock_bh(&bp->phy_lock);
5924 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5925 rc |= BNX2_MAC_LOOPBACK_FAILED;
5926 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5927 rc |= BNX2_PHY_LOOPBACK_FAILED;
5928 return rc;
5931 #define NVRAM_SIZE 0x200
5932 #define CRC32_RESIDUAL 0xdebb20e3
5934 static int
5935 bnx2_test_nvram(struct bnx2 *bp)
5937 __be32 buf[NVRAM_SIZE / 4];
5938 u8 *data = (u8 *) buf;
5939 int rc = 0;
5940 u32 magic, csum;
5942 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5943 goto test_nvram_done;
5945 magic = be32_to_cpu(buf[0]);
5946 if (magic != 0x669955aa) {
5947 rc = -ENODEV;
5948 goto test_nvram_done;
5951 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5952 goto test_nvram_done;
5954 csum = ether_crc_le(0x100, data);
5955 if (csum != CRC32_RESIDUAL) {
5956 rc = -ENODEV;
5957 goto test_nvram_done;
5960 csum = ether_crc_le(0x100, data + 0x100);
5961 if (csum != CRC32_RESIDUAL) {
5962 rc = -ENODEV;
5965 test_nvram_done:
5966 return rc;
5969 static int
5970 bnx2_test_link(struct bnx2 *bp)
5972 u32 bmsr;
5974 if (!netif_running(bp->dev))
5975 return -ENODEV;
5977 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5978 if (bp->link_up)
5979 return 0;
5980 return -ENODEV;
5982 spin_lock_bh(&bp->phy_lock);
5983 bnx2_enable_bmsr1(bp);
5984 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5985 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5986 bnx2_disable_bmsr1(bp);
5987 spin_unlock_bh(&bp->phy_lock);
5989 if (bmsr & BMSR_LSTATUS) {
5990 return 0;
5992 return -ENODEV;
5995 static int
5996 bnx2_test_intr(struct bnx2 *bp)
5998 int i;
5999 u16 status_idx;
6001 if (!netif_running(bp->dev))
6002 return -ENODEV;
6004 status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
6006 /* This register is not touched during run-time. */
6007 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
6008 BNX2_RD(bp, BNX2_HC_COMMAND);
6010 for (i = 0; i < 10; i++) {
6011 if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
6012 status_idx) {
6014 break;
6017 msleep_interruptible(10);
6019 if (i < 10)
6020 return 0;
6022 return -ENODEV;
6025 /* Determining link for parallel detection. */
6026 static int
6027 bnx2_5706_serdes_has_link(struct bnx2 *bp)
6029 u32 mode_ctl, an_dbg, exp;
6031 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
6032 return 0;
6034 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6035 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6037 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
6038 return 0;
6040 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6041 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6042 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6044 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6045 return 0;
6047 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6048 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6049 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6051 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
6052 return 0;
6054 return 1;
6057 static void
6058 bnx2_5706_serdes_timer(struct bnx2 *bp)
6060 int check_link = 1;
6062 spin_lock(&bp->phy_lock);
6063 if (bp->serdes_an_pending) {
6064 bp->serdes_an_pending--;
6065 check_link = 0;
6066 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6067 u32 bmcr;
6069 bp->current_interval = BNX2_TIMER_INTERVAL;
6071 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6073 if (bmcr & BMCR_ANENABLE) {
6074 if (bnx2_5706_serdes_has_link(bp)) {
6075 bmcr &= ~BMCR_ANENABLE;
6076 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6077 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6078 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6082 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6083 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6084 u32 phy2;
6086 bnx2_write_phy(bp, 0x17, 0x0f01);
6087 bnx2_read_phy(bp, 0x15, &phy2);
6088 if (phy2 & 0x20) {
6089 u32 bmcr;
6091 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6092 bmcr |= BMCR_ANENABLE;
6093 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6095 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6097 } else
6098 bp->current_interval = BNX2_TIMER_INTERVAL;
6100 if (check_link) {
6101 u32 val;
6103 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6104 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6105 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6107 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6108 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6109 bnx2_5706s_force_link_dn(bp, 1);
6110 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6111 } else
6112 bnx2_set_link(bp);
6113 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6114 bnx2_set_link(bp);
6116 spin_unlock(&bp->phy_lock);
6119 static void
6120 bnx2_5708_serdes_timer(struct bnx2 *bp)
6122 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6123 return;
6125 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6126 bp->serdes_an_pending = 0;
6127 return;
6130 spin_lock(&bp->phy_lock);
6131 if (bp->serdes_an_pending)
6132 bp->serdes_an_pending--;
6133 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6134 u32 bmcr;
6136 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6137 if (bmcr & BMCR_ANENABLE) {
6138 bnx2_enable_forced_2g5(bp);
6139 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6140 } else {
6141 bnx2_disable_forced_2g5(bp);
6142 bp->serdes_an_pending = 2;
6143 bp->current_interval = BNX2_TIMER_INTERVAL;
6146 } else
6147 bp->current_interval = BNX2_TIMER_INTERVAL;
6149 spin_unlock(&bp->phy_lock);
6152 static void
6153 bnx2_timer(unsigned long data)
6155 struct bnx2 *bp = (struct bnx2 *) data;
6157 if (!netif_running(bp->dev))
6158 return;
6160 if (atomic_read(&bp->intr_sem) != 0)
6161 goto bnx2_restart_timer;
6163 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6164 BNX2_FLAG_USING_MSI)
6165 bnx2_chk_missed_msi(bp);
6167 bnx2_send_heart_beat(bp);
6169 bp->stats_blk->stat_FwRxDrop =
6170 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6172 /* workaround occasional corrupted counters */
6173 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6174 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6175 BNX2_HC_COMMAND_STATS_NOW);
6177 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6178 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6179 bnx2_5706_serdes_timer(bp);
6180 else
6181 bnx2_5708_serdes_timer(bp);
6184 bnx2_restart_timer:
6185 mod_timer(&bp->timer, jiffies + bp->current_interval);
6188 static int
6189 bnx2_request_irq(struct bnx2 *bp)
6191 unsigned long flags;
6192 struct bnx2_irq *irq;
6193 int rc = 0, i;
6195 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6196 flags = 0;
6197 else
6198 flags = IRQF_SHARED;
6200 for (i = 0; i < bp->irq_nvecs; i++) {
6201 irq = &bp->irq_tbl[i];
6202 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6203 &bp->bnx2_napi[i]);
6204 if (rc)
6205 break;
6206 irq->requested = 1;
6208 return rc;
6211 static void
6212 __bnx2_free_irq(struct bnx2 *bp)
6214 struct bnx2_irq *irq;
6215 int i;
6217 for (i = 0; i < bp->irq_nvecs; i++) {
6218 irq = &bp->irq_tbl[i];
6219 if (irq->requested)
6220 free_irq(irq->vector, &bp->bnx2_napi[i]);
6221 irq->requested = 0;
6225 static void
6226 bnx2_free_irq(struct bnx2 *bp)
6229 __bnx2_free_irq(bp);
6230 if (bp->flags & BNX2_FLAG_USING_MSI)
6231 pci_disable_msi(bp->pdev);
6232 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6233 pci_disable_msix(bp->pdev);
6235 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6238 static void
6239 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6241 int i, total_vecs;
6242 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6243 struct net_device *dev = bp->dev;
6244 const int len = sizeof(bp->irq_tbl[0].name);
6246 bnx2_setup_msix_tbl(bp);
6247 BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6248 BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6249 BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6251 /* Need to flush the previous three writes to ensure MSI-X
6252 * is setup properly */
6253 BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6255 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6256 msix_ent[i].entry = i;
6257 msix_ent[i].vector = 0;
6260 total_vecs = msix_vecs;
6261 #ifdef BCM_CNIC
6262 total_vecs++;
6263 #endif
6264 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
6265 BNX2_MIN_MSIX_VEC, total_vecs);
6266 if (total_vecs < 0)
6267 return;
6269 msix_vecs = total_vecs;
6270 #ifdef BCM_CNIC
6271 msix_vecs--;
6272 #endif
6273 bp->irq_nvecs = msix_vecs;
6274 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6275 for (i = 0; i < total_vecs; i++) {
6276 bp->irq_tbl[i].vector = msix_ent[i].vector;
6277 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6278 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6282 static int
6283 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6285 int cpus = netif_get_num_default_rss_queues();
6286 int msix_vecs;
6288 if (!bp->num_req_rx_rings)
6289 msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6290 else if (!bp->num_req_tx_rings)
6291 msix_vecs = max(cpus, bp->num_req_rx_rings);
6292 else
6293 msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6295 msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6297 bp->irq_tbl[0].handler = bnx2_interrupt;
6298 strcpy(bp->irq_tbl[0].name, bp->dev->name);
6299 bp->irq_nvecs = 1;
6300 bp->irq_tbl[0].vector = bp->pdev->irq;
6302 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6303 bnx2_enable_msix(bp, msix_vecs);
6305 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6306 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6307 if (pci_enable_msi(bp->pdev) == 0) {
6308 bp->flags |= BNX2_FLAG_USING_MSI;
6309 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6310 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6311 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6312 } else
6313 bp->irq_tbl[0].handler = bnx2_msi;
6315 bp->irq_tbl[0].vector = bp->pdev->irq;
6319 if (!bp->num_req_tx_rings)
6320 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6321 else
6322 bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6324 if (!bp->num_req_rx_rings)
6325 bp->num_rx_rings = bp->irq_nvecs;
6326 else
6327 bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6329 netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6331 return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6334 /* Called with rtnl_lock */
6335 static int
6336 bnx2_open(struct net_device *dev)
6338 struct bnx2 *bp = netdev_priv(dev);
6339 int rc;
6341 rc = bnx2_request_firmware(bp);
6342 if (rc < 0)
6343 goto out;
6345 netif_carrier_off(dev);
6347 bnx2_disable_int(bp);
6349 rc = bnx2_setup_int_mode(bp, disable_msi);
6350 if (rc)
6351 goto open_err;
6352 bnx2_init_napi(bp);
6353 bnx2_napi_enable(bp);
6354 rc = bnx2_alloc_mem(bp);
6355 if (rc)
6356 goto open_err;
6358 rc = bnx2_request_irq(bp);
6359 if (rc)
6360 goto open_err;
6362 rc = bnx2_init_nic(bp, 1);
6363 if (rc)
6364 goto open_err;
6366 mod_timer(&bp->timer, jiffies + bp->current_interval);
6368 atomic_set(&bp->intr_sem, 0);
6370 memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6372 bnx2_enable_int(bp);
6374 if (bp->flags & BNX2_FLAG_USING_MSI) {
6375 /* Test MSI to make sure it is working
6376 * If MSI test fails, go back to INTx mode
6378 if (bnx2_test_intr(bp) != 0) {
6379 netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6381 bnx2_disable_int(bp);
6382 bnx2_free_irq(bp);
6384 bnx2_setup_int_mode(bp, 1);
6386 rc = bnx2_init_nic(bp, 0);
6388 if (!rc)
6389 rc = bnx2_request_irq(bp);
6391 if (rc) {
6392 del_timer_sync(&bp->timer);
6393 goto open_err;
6395 bnx2_enable_int(bp);
6398 if (bp->flags & BNX2_FLAG_USING_MSI)
6399 netdev_info(dev, "using MSI\n");
6400 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6401 netdev_info(dev, "using MSIX\n");
6403 netif_tx_start_all_queues(dev);
6404 out:
6405 return rc;
6407 open_err:
6408 bnx2_napi_disable(bp);
6409 bnx2_free_skbs(bp);
6410 bnx2_free_irq(bp);
6411 bnx2_free_mem(bp);
6412 bnx2_del_napi(bp);
6413 bnx2_release_firmware(bp);
6414 goto out;
6417 static void
6418 bnx2_reset_task(struct work_struct *work)
6420 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6421 int rc;
6422 u16 pcicmd;
6424 rtnl_lock();
6425 if (!netif_running(bp->dev)) {
6426 rtnl_unlock();
6427 return;
6430 bnx2_netif_stop(bp, true);
6432 pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6433 if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6434 /* in case PCI block has reset */
6435 pci_restore_state(bp->pdev);
6436 pci_save_state(bp->pdev);
6438 rc = bnx2_init_nic(bp, 1);
6439 if (rc) {
6440 netdev_err(bp->dev, "failed to reset NIC, closing\n");
6441 bnx2_napi_enable(bp);
6442 dev_close(bp->dev);
6443 rtnl_unlock();
6444 return;
6447 atomic_set(&bp->intr_sem, 1);
6448 bnx2_netif_start(bp, true);
6449 rtnl_unlock();
6452 #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6454 static void
6455 bnx2_dump_ftq(struct bnx2 *bp)
6457 int i;
6458 u32 reg, bdidx, cid, valid;
6459 struct net_device *dev = bp->dev;
6460 static const struct ftq_reg {
6461 char *name;
6462 u32 off;
6463 } ftq_arr[] = {
6464 BNX2_FTQ_ENTRY(RV2P_P),
6465 BNX2_FTQ_ENTRY(RV2P_T),
6466 BNX2_FTQ_ENTRY(RV2P_M),
6467 BNX2_FTQ_ENTRY(TBDR_),
6468 BNX2_FTQ_ENTRY(TDMA_),
6469 BNX2_FTQ_ENTRY(TXP_),
6470 BNX2_FTQ_ENTRY(TXP_),
6471 BNX2_FTQ_ENTRY(TPAT_),
6472 BNX2_FTQ_ENTRY(RXP_C),
6473 BNX2_FTQ_ENTRY(RXP_),
6474 BNX2_FTQ_ENTRY(COM_COMXQ_),
6475 BNX2_FTQ_ENTRY(COM_COMTQ_),
6476 BNX2_FTQ_ENTRY(COM_COMQ_),
6477 BNX2_FTQ_ENTRY(CP_CPQ_),
6480 netdev_err(dev, "<--- start FTQ dump --->\n");
6481 for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6482 netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6483 bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6485 netdev_err(dev, "CPU states:\n");
6486 for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6487 netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6488 reg, bnx2_reg_rd_ind(bp, reg),
6489 bnx2_reg_rd_ind(bp, reg + 4),
6490 bnx2_reg_rd_ind(bp, reg + 8),
6491 bnx2_reg_rd_ind(bp, reg + 0x1c),
6492 bnx2_reg_rd_ind(bp, reg + 0x1c),
6493 bnx2_reg_rd_ind(bp, reg + 0x20));
6495 netdev_err(dev, "<--- end FTQ dump --->\n");
6496 netdev_err(dev, "<--- start TBDC dump --->\n");
6497 netdev_err(dev, "TBDC free cnt: %ld\n",
6498 BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6499 netdev_err(dev, "LINE CID BIDX CMD VALIDS\n");
6500 for (i = 0; i < 0x20; i++) {
6501 int j = 0;
6503 BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6504 BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6505 BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6506 BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6507 while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6508 BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6509 j++;
6511 cid = BNX2_RD(bp, BNX2_TBDC_CID);
6512 bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6513 valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6514 netdev_err(dev, "%02x %06x %04lx %02x [%x]\n",
6515 i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6516 bdidx >> 24, (valid >> 8) & 0x0ff);
6518 netdev_err(dev, "<--- end TBDC dump --->\n");
6521 static void
6522 bnx2_dump_state(struct bnx2 *bp)
6524 struct net_device *dev = bp->dev;
6525 u32 val1, val2;
6527 pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6528 netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6529 atomic_read(&bp->intr_sem), val1);
6530 pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6531 pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6532 netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6533 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6534 BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6535 BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6536 netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6537 BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6538 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6539 BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6540 if (bp->flags & BNX2_FLAG_USING_MSIX)
6541 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6542 BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6545 static void
6546 bnx2_tx_timeout(struct net_device *dev)
6548 struct bnx2 *bp = netdev_priv(dev);
6550 bnx2_dump_ftq(bp);
6551 bnx2_dump_state(bp);
6552 bnx2_dump_mcp_state(bp);
6554 /* This allows the netif to be shutdown gracefully before resetting */
6555 schedule_work(&bp->reset_task);
6558 /* Called with netif_tx_lock.
6559 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6560 * netif_wake_queue().
6562 static netdev_tx_t
6563 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6565 struct bnx2 *bp = netdev_priv(dev);
6566 dma_addr_t mapping;
6567 struct bnx2_tx_bd *txbd;
6568 struct bnx2_sw_tx_bd *tx_buf;
6569 u32 len, vlan_tag_flags, last_frag, mss;
6570 u16 prod, ring_prod;
6571 int i;
6572 struct bnx2_napi *bnapi;
6573 struct bnx2_tx_ring_info *txr;
6574 struct netdev_queue *txq;
6576 /* Determine which tx ring we will be placed on */
6577 i = skb_get_queue_mapping(skb);
6578 bnapi = &bp->bnx2_napi[i];
6579 txr = &bnapi->tx_ring;
6580 txq = netdev_get_tx_queue(dev, i);
6582 if (unlikely(bnx2_tx_avail(bp, txr) <
6583 (skb_shinfo(skb)->nr_frags + 1))) {
6584 netif_tx_stop_queue(txq);
6585 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6587 return NETDEV_TX_BUSY;
6589 len = skb_headlen(skb);
6590 prod = txr->tx_prod;
6591 ring_prod = BNX2_TX_RING_IDX(prod);
6593 vlan_tag_flags = 0;
6594 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6595 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6598 if (vlan_tx_tag_present(skb)) {
6599 vlan_tag_flags |=
6600 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6603 if ((mss = skb_shinfo(skb)->gso_size)) {
6604 u32 tcp_opt_len;
6605 struct iphdr *iph;
6607 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6609 tcp_opt_len = tcp_optlen(skb);
6611 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6612 u32 tcp_off = skb_transport_offset(skb) -
6613 sizeof(struct ipv6hdr) - ETH_HLEN;
6615 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6616 TX_BD_FLAGS_SW_FLAGS;
6617 if (likely(tcp_off == 0))
6618 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6619 else {
6620 tcp_off >>= 3;
6621 vlan_tag_flags |= ((tcp_off & 0x3) <<
6622 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6623 ((tcp_off & 0x10) <<
6624 TX_BD_FLAGS_TCP6_OFF4_SHL);
6625 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6627 } else {
6628 iph = ip_hdr(skb);
6629 if (tcp_opt_len || (iph->ihl > 5)) {
6630 vlan_tag_flags |= ((iph->ihl - 5) +
6631 (tcp_opt_len >> 2)) << 8;
6634 } else
6635 mss = 0;
6637 mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6638 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6639 dev_kfree_skb_any(skb);
6640 return NETDEV_TX_OK;
6643 tx_buf = &txr->tx_buf_ring[ring_prod];
6644 tx_buf->skb = skb;
6645 dma_unmap_addr_set(tx_buf, mapping, mapping);
6647 txbd = &txr->tx_desc_ring[ring_prod];
6649 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6650 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6651 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6652 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6654 last_frag = skb_shinfo(skb)->nr_frags;
6655 tx_buf->nr_frags = last_frag;
6656 tx_buf->is_gso = skb_is_gso(skb);
6658 for (i = 0; i < last_frag; i++) {
6659 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6661 prod = BNX2_NEXT_TX_BD(prod);
6662 ring_prod = BNX2_TX_RING_IDX(prod);
6663 txbd = &txr->tx_desc_ring[ring_prod];
6665 len = skb_frag_size(frag);
6666 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6667 DMA_TO_DEVICE);
6668 if (dma_mapping_error(&bp->pdev->dev, mapping))
6669 goto dma_error;
6670 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6671 mapping);
6673 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6674 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6675 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6676 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6679 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6681 /* Sync BD data before updating TX mailbox */
6682 wmb();
6684 netdev_tx_sent_queue(txq, skb->len);
6686 prod = BNX2_NEXT_TX_BD(prod);
6687 txr->tx_prod_bseq += skb->len;
6689 BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6690 BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6692 mmiowb();
6694 txr->tx_prod = prod;
6696 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6697 netif_tx_stop_queue(txq);
6699 /* netif_tx_stop_queue() must be done before checking
6700 * tx index in bnx2_tx_avail() below, because in
6701 * bnx2_tx_int(), we update tx index before checking for
6702 * netif_tx_queue_stopped().
6704 smp_mb();
6705 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6706 netif_tx_wake_queue(txq);
6709 return NETDEV_TX_OK;
6710 dma_error:
6711 /* save value of frag that failed */
6712 last_frag = i;
6714 /* start back at beginning and unmap skb */
6715 prod = txr->tx_prod;
6716 ring_prod = BNX2_TX_RING_IDX(prod);
6717 tx_buf = &txr->tx_buf_ring[ring_prod];
6718 tx_buf->skb = NULL;
6719 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6720 skb_headlen(skb), PCI_DMA_TODEVICE);
6722 /* unmap remaining mapped pages */
6723 for (i = 0; i < last_frag; i++) {
6724 prod = BNX2_NEXT_TX_BD(prod);
6725 ring_prod = BNX2_TX_RING_IDX(prod);
6726 tx_buf = &txr->tx_buf_ring[ring_prod];
6727 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6728 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6729 PCI_DMA_TODEVICE);
6732 dev_kfree_skb_any(skb);
6733 return NETDEV_TX_OK;
6736 /* Called with rtnl_lock */
6737 static int
6738 bnx2_close(struct net_device *dev)
6740 struct bnx2 *bp = netdev_priv(dev);
6742 bnx2_disable_int_sync(bp);
6743 bnx2_napi_disable(bp);
6744 netif_tx_disable(dev);
6745 del_timer_sync(&bp->timer);
6746 bnx2_shutdown_chip(bp);
6747 bnx2_free_irq(bp);
6748 bnx2_free_skbs(bp);
6749 bnx2_free_mem(bp);
6750 bnx2_del_napi(bp);
6751 bp->link_up = 0;
6752 netif_carrier_off(bp->dev);
6753 return 0;
6756 static void
6757 bnx2_save_stats(struct bnx2 *bp)
6759 u32 *hw_stats = (u32 *) bp->stats_blk;
6760 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6761 int i;
6763 /* The 1st 10 counters are 64-bit counters */
6764 for (i = 0; i < 20; i += 2) {
6765 u32 hi;
6766 u64 lo;
6768 hi = temp_stats[i] + hw_stats[i];
6769 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6770 if (lo > 0xffffffff)
6771 hi++;
6772 temp_stats[i] = hi;
6773 temp_stats[i + 1] = lo & 0xffffffff;
6776 for ( ; i < sizeof(struct statistics_block) / 4; i++)
6777 temp_stats[i] += hw_stats[i];
6780 #define GET_64BIT_NET_STATS64(ctr) \
6781 (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6783 #define GET_64BIT_NET_STATS(ctr) \
6784 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6785 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6787 #define GET_32BIT_NET_STATS(ctr) \
6788 (unsigned long) (bp->stats_blk->ctr + \
6789 bp->temp_stats_blk->ctr)
6791 static struct rtnl_link_stats64 *
6792 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6794 struct bnx2 *bp = netdev_priv(dev);
6796 if (bp->stats_blk == NULL)
6797 return net_stats;
6799 net_stats->rx_packets =
6800 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6801 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6802 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6804 net_stats->tx_packets =
6805 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6806 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6807 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6809 net_stats->rx_bytes =
6810 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6812 net_stats->tx_bytes =
6813 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6815 net_stats->multicast =
6816 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6818 net_stats->collisions =
6819 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6821 net_stats->rx_length_errors =
6822 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6823 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6825 net_stats->rx_over_errors =
6826 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6827 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6829 net_stats->rx_frame_errors =
6830 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6832 net_stats->rx_crc_errors =
6833 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6835 net_stats->rx_errors = net_stats->rx_length_errors +
6836 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6837 net_stats->rx_crc_errors;
6839 net_stats->tx_aborted_errors =
6840 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6841 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6843 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6844 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6845 net_stats->tx_carrier_errors = 0;
6846 else {
6847 net_stats->tx_carrier_errors =
6848 GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6851 net_stats->tx_errors =
6852 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6853 net_stats->tx_aborted_errors +
6854 net_stats->tx_carrier_errors;
6856 net_stats->rx_missed_errors =
6857 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6858 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6859 GET_32BIT_NET_STATS(stat_FwRxDrop);
6861 return net_stats;
6864 /* All ethtool functions called with rtnl_lock */
6866 static int
6867 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6869 struct bnx2 *bp = netdev_priv(dev);
6870 int support_serdes = 0, support_copper = 0;
6872 cmd->supported = SUPPORTED_Autoneg;
6873 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6874 support_serdes = 1;
6875 support_copper = 1;
6876 } else if (bp->phy_port == PORT_FIBRE)
6877 support_serdes = 1;
6878 else
6879 support_copper = 1;
6881 if (support_serdes) {
6882 cmd->supported |= SUPPORTED_1000baseT_Full |
6883 SUPPORTED_FIBRE;
6884 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6885 cmd->supported |= SUPPORTED_2500baseX_Full;
6888 if (support_copper) {
6889 cmd->supported |= SUPPORTED_10baseT_Half |
6890 SUPPORTED_10baseT_Full |
6891 SUPPORTED_100baseT_Half |
6892 SUPPORTED_100baseT_Full |
6893 SUPPORTED_1000baseT_Full |
6894 SUPPORTED_TP;
6898 spin_lock_bh(&bp->phy_lock);
6899 cmd->port = bp->phy_port;
6900 cmd->advertising = bp->advertising;
6902 if (bp->autoneg & AUTONEG_SPEED) {
6903 cmd->autoneg = AUTONEG_ENABLE;
6904 } else {
6905 cmd->autoneg = AUTONEG_DISABLE;
6908 if (netif_carrier_ok(dev)) {
6909 ethtool_cmd_speed_set(cmd, bp->line_speed);
6910 cmd->duplex = bp->duplex;
6911 if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
6912 if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
6913 cmd->eth_tp_mdix = ETH_TP_MDI_X;
6914 else
6915 cmd->eth_tp_mdix = ETH_TP_MDI;
6918 else {
6919 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
6920 cmd->duplex = DUPLEX_UNKNOWN;
6922 spin_unlock_bh(&bp->phy_lock);
6924 cmd->transceiver = XCVR_INTERNAL;
6925 cmd->phy_address = bp->phy_addr;
6927 return 0;
6930 static int
6931 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6933 struct bnx2 *bp = netdev_priv(dev);
6934 u8 autoneg = bp->autoneg;
6935 u8 req_duplex = bp->req_duplex;
6936 u16 req_line_speed = bp->req_line_speed;
6937 u32 advertising = bp->advertising;
6938 int err = -EINVAL;
6940 spin_lock_bh(&bp->phy_lock);
6942 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6943 goto err_out_unlock;
6945 if (cmd->port != bp->phy_port &&
6946 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6947 goto err_out_unlock;
6949 /* If device is down, we can store the settings only if the user
6950 * is setting the currently active port.
6952 if (!netif_running(dev) && cmd->port != bp->phy_port)
6953 goto err_out_unlock;
6955 if (cmd->autoneg == AUTONEG_ENABLE) {
6956 autoneg |= AUTONEG_SPEED;
6958 advertising = cmd->advertising;
6959 if (cmd->port == PORT_TP) {
6960 advertising &= ETHTOOL_ALL_COPPER_SPEED;
6961 if (!advertising)
6962 advertising = ETHTOOL_ALL_COPPER_SPEED;
6963 } else {
6964 advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6965 if (!advertising)
6966 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6968 advertising |= ADVERTISED_Autoneg;
6970 else {
6971 u32 speed = ethtool_cmd_speed(cmd);
6972 if (cmd->port == PORT_FIBRE) {
6973 if ((speed != SPEED_1000 &&
6974 speed != SPEED_2500) ||
6975 (cmd->duplex != DUPLEX_FULL))
6976 goto err_out_unlock;
6978 if (speed == SPEED_2500 &&
6979 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6980 goto err_out_unlock;
6981 } else if (speed == SPEED_1000 || speed == SPEED_2500)
6982 goto err_out_unlock;
6984 autoneg &= ~AUTONEG_SPEED;
6985 req_line_speed = speed;
6986 req_duplex = cmd->duplex;
6987 advertising = 0;
6990 bp->autoneg = autoneg;
6991 bp->advertising = advertising;
6992 bp->req_line_speed = req_line_speed;
6993 bp->req_duplex = req_duplex;
6995 err = 0;
6996 /* If device is down, the new settings will be picked up when it is
6997 * brought up.
6999 if (netif_running(dev))
7000 err = bnx2_setup_phy(bp, cmd->port);
7002 err_out_unlock:
7003 spin_unlock_bh(&bp->phy_lock);
7005 return err;
7008 static void
7009 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7011 struct bnx2 *bp = netdev_priv(dev);
7013 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
7014 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
7015 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
7016 strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
7019 #define BNX2_REGDUMP_LEN (32 * 1024)
7021 static int
7022 bnx2_get_regs_len(struct net_device *dev)
7024 return BNX2_REGDUMP_LEN;
7027 static void
7028 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
7030 u32 *p = _p, i, offset;
7031 u8 *orig_p = _p;
7032 struct bnx2 *bp = netdev_priv(dev);
7033 static const u32 reg_boundaries[] = {
7034 0x0000, 0x0098, 0x0400, 0x045c,
7035 0x0800, 0x0880, 0x0c00, 0x0c10,
7036 0x0c30, 0x0d08, 0x1000, 0x101c,
7037 0x1040, 0x1048, 0x1080, 0x10a4,
7038 0x1400, 0x1490, 0x1498, 0x14f0,
7039 0x1500, 0x155c, 0x1580, 0x15dc,
7040 0x1600, 0x1658, 0x1680, 0x16d8,
7041 0x1800, 0x1820, 0x1840, 0x1854,
7042 0x1880, 0x1894, 0x1900, 0x1984,
7043 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7044 0x1c80, 0x1c94, 0x1d00, 0x1d84,
7045 0x2000, 0x2030, 0x23c0, 0x2400,
7046 0x2800, 0x2820, 0x2830, 0x2850,
7047 0x2b40, 0x2c10, 0x2fc0, 0x3058,
7048 0x3c00, 0x3c94, 0x4000, 0x4010,
7049 0x4080, 0x4090, 0x43c0, 0x4458,
7050 0x4c00, 0x4c18, 0x4c40, 0x4c54,
7051 0x4fc0, 0x5010, 0x53c0, 0x5444,
7052 0x5c00, 0x5c18, 0x5c80, 0x5c90,
7053 0x5fc0, 0x6000, 0x6400, 0x6428,
7054 0x6800, 0x6848, 0x684c, 0x6860,
7055 0x6888, 0x6910, 0x8000
7058 regs->version = 0;
7060 memset(p, 0, BNX2_REGDUMP_LEN);
7062 if (!netif_running(bp->dev))
7063 return;
7065 i = 0;
7066 offset = reg_boundaries[0];
7067 p += offset;
7068 while (offset < BNX2_REGDUMP_LEN) {
7069 *p++ = BNX2_RD(bp, offset);
7070 offset += 4;
7071 if (offset == reg_boundaries[i + 1]) {
7072 offset = reg_boundaries[i + 2];
7073 p = (u32 *) (orig_p + offset);
7074 i += 2;
7079 static void
7080 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7082 struct bnx2 *bp = netdev_priv(dev);
7084 if (bp->flags & BNX2_FLAG_NO_WOL) {
7085 wol->supported = 0;
7086 wol->wolopts = 0;
7088 else {
7089 wol->supported = WAKE_MAGIC;
7090 if (bp->wol)
7091 wol->wolopts = WAKE_MAGIC;
7092 else
7093 wol->wolopts = 0;
7095 memset(&wol->sopass, 0, sizeof(wol->sopass));
7098 static int
7099 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7101 struct bnx2 *bp = netdev_priv(dev);
7103 if (wol->wolopts & ~WAKE_MAGIC)
7104 return -EINVAL;
7106 if (wol->wolopts & WAKE_MAGIC) {
7107 if (bp->flags & BNX2_FLAG_NO_WOL)
7108 return -EINVAL;
7110 bp->wol = 1;
7112 else {
7113 bp->wol = 0;
7116 device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7118 return 0;
7121 static int
7122 bnx2_nway_reset(struct net_device *dev)
7124 struct bnx2 *bp = netdev_priv(dev);
7125 u32 bmcr;
7127 if (!netif_running(dev))
7128 return -EAGAIN;
7130 if (!(bp->autoneg & AUTONEG_SPEED)) {
7131 return -EINVAL;
7134 spin_lock_bh(&bp->phy_lock);
7136 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7137 int rc;
7139 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7140 spin_unlock_bh(&bp->phy_lock);
7141 return rc;
7144 /* Force a link down visible on the other side */
7145 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7146 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7147 spin_unlock_bh(&bp->phy_lock);
7149 msleep(20);
7151 spin_lock_bh(&bp->phy_lock);
7153 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7154 bp->serdes_an_pending = 1;
7155 mod_timer(&bp->timer, jiffies + bp->current_interval);
7158 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7159 bmcr &= ~BMCR_LOOPBACK;
7160 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7162 spin_unlock_bh(&bp->phy_lock);
7164 return 0;
7167 static u32
7168 bnx2_get_link(struct net_device *dev)
7170 struct bnx2 *bp = netdev_priv(dev);
7172 return bp->link_up;
7175 static int
7176 bnx2_get_eeprom_len(struct net_device *dev)
7178 struct bnx2 *bp = netdev_priv(dev);
7180 if (bp->flash_info == NULL)
7181 return 0;
7183 return (int) bp->flash_size;
7186 static int
7187 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7188 u8 *eebuf)
7190 struct bnx2 *bp = netdev_priv(dev);
7191 int rc;
7193 /* parameters already validated in ethtool_get_eeprom */
7195 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7197 return rc;
7200 static int
7201 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7202 u8 *eebuf)
7204 struct bnx2 *bp = netdev_priv(dev);
7205 int rc;
7207 /* parameters already validated in ethtool_set_eeprom */
7209 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7211 return rc;
7214 static int
7215 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7217 struct bnx2 *bp = netdev_priv(dev);
7219 memset(coal, 0, sizeof(struct ethtool_coalesce));
7221 coal->rx_coalesce_usecs = bp->rx_ticks;
7222 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7223 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7224 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7226 coal->tx_coalesce_usecs = bp->tx_ticks;
7227 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7228 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7229 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7231 coal->stats_block_coalesce_usecs = bp->stats_ticks;
7233 return 0;
7236 static int
7237 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7239 struct bnx2 *bp = netdev_priv(dev);
7241 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7242 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7244 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7245 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7247 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7248 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7250 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7251 if (bp->rx_quick_cons_trip_int > 0xff)
7252 bp->rx_quick_cons_trip_int = 0xff;
7254 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7255 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7257 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7258 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7260 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7261 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7263 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7264 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7265 0xff;
7267 bp->stats_ticks = coal->stats_block_coalesce_usecs;
7268 if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7269 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7270 bp->stats_ticks = USEC_PER_SEC;
7272 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7273 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7274 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7276 if (netif_running(bp->dev)) {
7277 bnx2_netif_stop(bp, true);
7278 bnx2_init_nic(bp, 0);
7279 bnx2_netif_start(bp, true);
7282 return 0;
7285 static void
7286 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7288 struct bnx2 *bp = netdev_priv(dev);
7290 ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7291 ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7293 ering->rx_pending = bp->rx_ring_size;
7294 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7296 ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7297 ering->tx_pending = bp->tx_ring_size;
7300 static int
7301 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7303 if (netif_running(bp->dev)) {
7304 /* Reset will erase chipset stats; save them */
7305 bnx2_save_stats(bp);
7307 bnx2_netif_stop(bp, true);
7308 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7309 if (reset_irq) {
7310 bnx2_free_irq(bp);
7311 bnx2_del_napi(bp);
7312 } else {
7313 __bnx2_free_irq(bp);
7315 bnx2_free_skbs(bp);
7316 bnx2_free_mem(bp);
7319 bnx2_set_rx_ring_size(bp, rx);
7320 bp->tx_ring_size = tx;
7322 if (netif_running(bp->dev)) {
7323 int rc = 0;
7325 if (reset_irq) {
7326 rc = bnx2_setup_int_mode(bp, disable_msi);
7327 bnx2_init_napi(bp);
7330 if (!rc)
7331 rc = bnx2_alloc_mem(bp);
7333 if (!rc)
7334 rc = bnx2_request_irq(bp);
7336 if (!rc)
7337 rc = bnx2_init_nic(bp, 0);
7339 if (rc) {
7340 bnx2_napi_enable(bp);
7341 dev_close(bp->dev);
7342 return rc;
7344 #ifdef BCM_CNIC
7345 mutex_lock(&bp->cnic_lock);
7346 /* Let cnic know about the new status block. */
7347 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7348 bnx2_setup_cnic_irq_info(bp);
7349 mutex_unlock(&bp->cnic_lock);
7350 #endif
7351 bnx2_netif_start(bp, true);
7353 return 0;
7356 static int
7357 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7359 struct bnx2 *bp = netdev_priv(dev);
7360 int rc;
7362 if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7363 (ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7364 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7366 return -EINVAL;
7368 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7369 false);
7370 return rc;
7373 static void
7374 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7376 struct bnx2 *bp = netdev_priv(dev);
7378 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7379 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7380 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7383 static int
7384 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7386 struct bnx2 *bp = netdev_priv(dev);
7388 bp->req_flow_ctrl = 0;
7389 if (epause->rx_pause)
7390 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7391 if (epause->tx_pause)
7392 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7394 if (epause->autoneg) {
7395 bp->autoneg |= AUTONEG_FLOW_CTRL;
7397 else {
7398 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7401 if (netif_running(dev)) {
7402 spin_lock_bh(&bp->phy_lock);
7403 bnx2_setup_phy(bp, bp->phy_port);
7404 spin_unlock_bh(&bp->phy_lock);
7407 return 0;
7410 static struct {
7411 char string[ETH_GSTRING_LEN];
7412 } bnx2_stats_str_arr[] = {
7413 { "rx_bytes" },
7414 { "rx_error_bytes" },
7415 { "tx_bytes" },
7416 { "tx_error_bytes" },
7417 { "rx_ucast_packets" },
7418 { "rx_mcast_packets" },
7419 { "rx_bcast_packets" },
7420 { "tx_ucast_packets" },
7421 { "tx_mcast_packets" },
7422 { "tx_bcast_packets" },
7423 { "tx_mac_errors" },
7424 { "tx_carrier_errors" },
7425 { "rx_crc_errors" },
7426 { "rx_align_errors" },
7427 { "tx_single_collisions" },
7428 { "tx_multi_collisions" },
7429 { "tx_deferred" },
7430 { "tx_excess_collisions" },
7431 { "tx_late_collisions" },
7432 { "tx_total_collisions" },
7433 { "rx_fragments" },
7434 { "rx_jabbers" },
7435 { "rx_undersize_packets" },
7436 { "rx_oversize_packets" },
7437 { "rx_64_byte_packets" },
7438 { "rx_65_to_127_byte_packets" },
7439 { "rx_128_to_255_byte_packets" },
7440 { "rx_256_to_511_byte_packets" },
7441 { "rx_512_to_1023_byte_packets" },
7442 { "rx_1024_to_1522_byte_packets" },
7443 { "rx_1523_to_9022_byte_packets" },
7444 { "tx_64_byte_packets" },
7445 { "tx_65_to_127_byte_packets" },
7446 { "tx_128_to_255_byte_packets" },
7447 { "tx_256_to_511_byte_packets" },
7448 { "tx_512_to_1023_byte_packets" },
7449 { "tx_1024_to_1522_byte_packets" },
7450 { "tx_1523_to_9022_byte_packets" },
7451 { "rx_xon_frames" },
7452 { "rx_xoff_frames" },
7453 { "tx_xon_frames" },
7454 { "tx_xoff_frames" },
7455 { "rx_mac_ctrl_frames" },
7456 { "rx_filtered_packets" },
7457 { "rx_ftq_discards" },
7458 { "rx_discards" },
7459 { "rx_fw_discards" },
7462 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7464 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7466 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7467 STATS_OFFSET32(stat_IfHCInOctets_hi),
7468 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7469 STATS_OFFSET32(stat_IfHCOutOctets_hi),
7470 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7471 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7472 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7473 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7474 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7475 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7476 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7477 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7478 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7479 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7480 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7481 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7482 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7483 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7484 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7485 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7486 STATS_OFFSET32(stat_EtherStatsCollisions),
7487 STATS_OFFSET32(stat_EtherStatsFragments),
7488 STATS_OFFSET32(stat_EtherStatsJabbers),
7489 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7490 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7491 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7492 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7493 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7494 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7495 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7496 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7497 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7498 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7499 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7500 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7501 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7502 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7503 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7504 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7505 STATS_OFFSET32(stat_XonPauseFramesReceived),
7506 STATS_OFFSET32(stat_XoffPauseFramesReceived),
7507 STATS_OFFSET32(stat_OutXonSent),
7508 STATS_OFFSET32(stat_OutXoffSent),
7509 STATS_OFFSET32(stat_MacControlFramesReceived),
7510 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7511 STATS_OFFSET32(stat_IfInFTQDiscards),
7512 STATS_OFFSET32(stat_IfInMBUFDiscards),
7513 STATS_OFFSET32(stat_FwRxDrop),
7516 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7517 * skipped because of errata.
7519 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7520 8,0,8,8,8,8,8,8,8,8,
7521 4,0,4,4,4,4,4,4,4,4,
7522 4,4,4,4,4,4,4,4,4,4,
7523 4,4,4,4,4,4,4,4,4,4,
7524 4,4,4,4,4,4,4,
7527 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7528 8,0,8,8,8,8,8,8,8,8,
7529 4,4,4,4,4,4,4,4,4,4,
7530 4,4,4,4,4,4,4,4,4,4,
7531 4,4,4,4,4,4,4,4,4,4,
7532 4,4,4,4,4,4,4,
7535 #define BNX2_NUM_TESTS 6
7537 static struct {
7538 char string[ETH_GSTRING_LEN];
7539 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7540 { "register_test (offline)" },
7541 { "memory_test (offline)" },
7542 { "loopback_test (offline)" },
7543 { "nvram_test (online)" },
7544 { "interrupt_test (online)" },
7545 { "link_test (online)" },
7548 static int
7549 bnx2_get_sset_count(struct net_device *dev, int sset)
7551 switch (sset) {
7552 case ETH_SS_TEST:
7553 return BNX2_NUM_TESTS;
7554 case ETH_SS_STATS:
7555 return BNX2_NUM_STATS;
7556 default:
7557 return -EOPNOTSUPP;
7561 static void
7562 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7564 struct bnx2 *bp = netdev_priv(dev);
7566 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7567 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7568 int i;
7570 bnx2_netif_stop(bp, true);
7571 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7572 bnx2_free_skbs(bp);
7574 if (bnx2_test_registers(bp) != 0) {
7575 buf[0] = 1;
7576 etest->flags |= ETH_TEST_FL_FAILED;
7578 if (bnx2_test_memory(bp) != 0) {
7579 buf[1] = 1;
7580 etest->flags |= ETH_TEST_FL_FAILED;
7582 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7583 etest->flags |= ETH_TEST_FL_FAILED;
7585 if (!netif_running(bp->dev))
7586 bnx2_shutdown_chip(bp);
7587 else {
7588 bnx2_init_nic(bp, 1);
7589 bnx2_netif_start(bp, true);
7592 /* wait for link up */
7593 for (i = 0; i < 7; i++) {
7594 if (bp->link_up)
7595 break;
7596 msleep_interruptible(1000);
7600 if (bnx2_test_nvram(bp) != 0) {
7601 buf[3] = 1;
7602 etest->flags |= ETH_TEST_FL_FAILED;
7604 if (bnx2_test_intr(bp) != 0) {
7605 buf[4] = 1;
7606 etest->flags |= ETH_TEST_FL_FAILED;
7609 if (bnx2_test_link(bp) != 0) {
7610 buf[5] = 1;
7611 etest->flags |= ETH_TEST_FL_FAILED;
7616 static void
7617 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7619 switch (stringset) {
7620 case ETH_SS_STATS:
7621 memcpy(buf, bnx2_stats_str_arr,
7622 sizeof(bnx2_stats_str_arr));
7623 break;
7624 case ETH_SS_TEST:
7625 memcpy(buf, bnx2_tests_str_arr,
7626 sizeof(bnx2_tests_str_arr));
7627 break;
7631 static void
7632 bnx2_get_ethtool_stats(struct net_device *dev,
7633 struct ethtool_stats *stats, u64 *buf)
7635 struct bnx2 *bp = netdev_priv(dev);
7636 int i;
7637 u32 *hw_stats = (u32 *) bp->stats_blk;
7638 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7639 u8 *stats_len_arr = NULL;
7641 if (hw_stats == NULL) {
7642 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7643 return;
7646 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7647 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7648 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7649 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7650 stats_len_arr = bnx2_5706_stats_len_arr;
7651 else
7652 stats_len_arr = bnx2_5708_stats_len_arr;
7654 for (i = 0; i < BNX2_NUM_STATS; i++) {
7655 unsigned long offset;
7657 if (stats_len_arr[i] == 0) {
7658 /* skip this counter */
7659 buf[i] = 0;
7660 continue;
7663 offset = bnx2_stats_offset_arr[i];
7664 if (stats_len_arr[i] == 4) {
7665 /* 4-byte counter */
7666 buf[i] = (u64) *(hw_stats + offset) +
7667 *(temp_stats + offset);
7668 continue;
7670 /* 8-byte counter */
7671 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7672 *(hw_stats + offset + 1) +
7673 (((u64) *(temp_stats + offset)) << 32) +
7674 *(temp_stats + offset + 1);
7678 static int
7679 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7681 struct bnx2 *bp = netdev_priv(dev);
7683 switch (state) {
7684 case ETHTOOL_ID_ACTIVE:
7685 bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7686 BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7687 return 1; /* cycle on/off once per second */
7689 case ETHTOOL_ID_ON:
7690 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7691 BNX2_EMAC_LED_1000MB_OVERRIDE |
7692 BNX2_EMAC_LED_100MB_OVERRIDE |
7693 BNX2_EMAC_LED_10MB_OVERRIDE |
7694 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7695 BNX2_EMAC_LED_TRAFFIC);
7696 break;
7698 case ETHTOOL_ID_OFF:
7699 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7700 break;
7702 case ETHTOOL_ID_INACTIVE:
7703 BNX2_WR(bp, BNX2_EMAC_LED, 0);
7704 BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7705 break;
7708 return 0;
7711 static netdev_features_t
7712 bnx2_fix_features(struct net_device *dev, netdev_features_t features)
7714 struct bnx2 *bp = netdev_priv(dev);
7716 if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
7717 features |= NETIF_F_HW_VLAN_CTAG_RX;
7719 return features;
7722 static int
7723 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7725 struct bnx2 *bp = netdev_priv(dev);
7727 /* TSO with VLAN tag won't work with current firmware */
7728 if (features & NETIF_F_HW_VLAN_CTAG_TX)
7729 dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7730 else
7731 dev->vlan_features &= ~NETIF_F_ALL_TSO;
7733 if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7734 !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7735 netif_running(dev)) {
7736 bnx2_netif_stop(bp, false);
7737 dev->features = features;
7738 bnx2_set_rx_mode(dev);
7739 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7740 bnx2_netif_start(bp, false);
7741 return 1;
7744 return 0;
7747 static void bnx2_get_channels(struct net_device *dev,
7748 struct ethtool_channels *channels)
7750 struct bnx2 *bp = netdev_priv(dev);
7751 u32 max_rx_rings = 1;
7752 u32 max_tx_rings = 1;
7754 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7755 max_rx_rings = RX_MAX_RINGS;
7756 max_tx_rings = TX_MAX_RINGS;
7759 channels->max_rx = max_rx_rings;
7760 channels->max_tx = max_tx_rings;
7761 channels->max_other = 0;
7762 channels->max_combined = 0;
7763 channels->rx_count = bp->num_rx_rings;
7764 channels->tx_count = bp->num_tx_rings;
7765 channels->other_count = 0;
7766 channels->combined_count = 0;
7769 static int bnx2_set_channels(struct net_device *dev,
7770 struct ethtool_channels *channels)
7772 struct bnx2 *bp = netdev_priv(dev);
7773 u32 max_rx_rings = 1;
7774 u32 max_tx_rings = 1;
7775 int rc = 0;
7777 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7778 max_rx_rings = RX_MAX_RINGS;
7779 max_tx_rings = TX_MAX_RINGS;
7781 if (channels->rx_count > max_rx_rings ||
7782 channels->tx_count > max_tx_rings)
7783 return -EINVAL;
7785 bp->num_req_rx_rings = channels->rx_count;
7786 bp->num_req_tx_rings = channels->tx_count;
7788 if (netif_running(dev))
7789 rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7790 bp->tx_ring_size, true);
7792 return rc;
7795 static const struct ethtool_ops bnx2_ethtool_ops = {
7796 .get_settings = bnx2_get_settings,
7797 .set_settings = bnx2_set_settings,
7798 .get_drvinfo = bnx2_get_drvinfo,
7799 .get_regs_len = bnx2_get_regs_len,
7800 .get_regs = bnx2_get_regs,
7801 .get_wol = bnx2_get_wol,
7802 .set_wol = bnx2_set_wol,
7803 .nway_reset = bnx2_nway_reset,
7804 .get_link = bnx2_get_link,
7805 .get_eeprom_len = bnx2_get_eeprom_len,
7806 .get_eeprom = bnx2_get_eeprom,
7807 .set_eeprom = bnx2_set_eeprom,
7808 .get_coalesce = bnx2_get_coalesce,
7809 .set_coalesce = bnx2_set_coalesce,
7810 .get_ringparam = bnx2_get_ringparam,
7811 .set_ringparam = bnx2_set_ringparam,
7812 .get_pauseparam = bnx2_get_pauseparam,
7813 .set_pauseparam = bnx2_set_pauseparam,
7814 .self_test = bnx2_self_test,
7815 .get_strings = bnx2_get_strings,
7816 .set_phys_id = bnx2_set_phys_id,
7817 .get_ethtool_stats = bnx2_get_ethtool_stats,
7818 .get_sset_count = bnx2_get_sset_count,
7819 .get_channels = bnx2_get_channels,
7820 .set_channels = bnx2_set_channels,
7823 /* Called with rtnl_lock */
7824 static int
7825 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7827 struct mii_ioctl_data *data = if_mii(ifr);
7828 struct bnx2 *bp = netdev_priv(dev);
7829 int err;
7831 switch(cmd) {
7832 case SIOCGMIIPHY:
7833 data->phy_id = bp->phy_addr;
7835 /* fallthru */
7836 case SIOCGMIIREG: {
7837 u32 mii_regval;
7839 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7840 return -EOPNOTSUPP;
7842 if (!netif_running(dev))
7843 return -EAGAIN;
7845 spin_lock_bh(&bp->phy_lock);
7846 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7847 spin_unlock_bh(&bp->phy_lock);
7849 data->val_out = mii_regval;
7851 return err;
7854 case SIOCSMIIREG:
7855 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7856 return -EOPNOTSUPP;
7858 if (!netif_running(dev))
7859 return -EAGAIN;
7861 spin_lock_bh(&bp->phy_lock);
7862 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7863 spin_unlock_bh(&bp->phy_lock);
7865 return err;
7867 default:
7868 /* do nothing */
7869 break;
7871 return -EOPNOTSUPP;
7874 /* Called with rtnl_lock */
7875 static int
7876 bnx2_change_mac_addr(struct net_device *dev, void *p)
7878 struct sockaddr *addr = p;
7879 struct bnx2 *bp = netdev_priv(dev);
7881 if (!is_valid_ether_addr(addr->sa_data))
7882 return -EADDRNOTAVAIL;
7884 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7885 if (netif_running(dev))
7886 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7888 return 0;
7891 /* Called with rtnl_lock */
7892 static int
7893 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7895 struct bnx2 *bp = netdev_priv(dev);
7897 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7898 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7899 return -EINVAL;
7901 dev->mtu = new_mtu;
7902 return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7903 false);
7906 #ifdef CONFIG_NET_POLL_CONTROLLER
7907 static void
7908 poll_bnx2(struct net_device *dev)
7910 struct bnx2 *bp = netdev_priv(dev);
7911 int i;
7913 for (i = 0; i < bp->irq_nvecs; i++) {
7914 struct bnx2_irq *irq = &bp->irq_tbl[i];
7916 disable_irq(irq->vector);
7917 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7918 enable_irq(irq->vector);
7921 #endif
7923 static void
7924 bnx2_get_5709_media(struct bnx2 *bp)
7926 u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7927 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7928 u32 strap;
7930 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7931 return;
7932 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7933 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7934 return;
7937 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7938 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7939 else
7940 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7942 if (bp->func == 0) {
7943 switch (strap) {
7944 case 0x4:
7945 case 0x5:
7946 case 0x6:
7947 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7948 return;
7950 } else {
7951 switch (strap) {
7952 case 0x1:
7953 case 0x2:
7954 case 0x4:
7955 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7956 return;
7961 static void
7962 bnx2_get_pci_speed(struct bnx2 *bp)
7964 u32 reg;
7966 reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7967 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7968 u32 clkreg;
7970 bp->flags |= BNX2_FLAG_PCIX;
7972 clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7974 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7975 switch (clkreg) {
7976 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7977 bp->bus_speed_mhz = 133;
7978 break;
7980 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7981 bp->bus_speed_mhz = 100;
7982 break;
7984 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7985 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7986 bp->bus_speed_mhz = 66;
7987 break;
7989 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7990 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7991 bp->bus_speed_mhz = 50;
7992 break;
7994 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7995 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7996 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7997 bp->bus_speed_mhz = 33;
7998 break;
8001 else {
8002 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
8003 bp->bus_speed_mhz = 66;
8004 else
8005 bp->bus_speed_mhz = 33;
8008 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
8009 bp->flags |= BNX2_FLAG_PCI_32BIT;
8013 static void
8014 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
8016 int rc, i, j;
8017 u8 *data;
8018 unsigned int block_end, rosize, len;
8020 #define BNX2_VPD_NVRAM_OFFSET 0x300
8021 #define BNX2_VPD_LEN 128
8022 #define BNX2_MAX_VER_SLEN 30
8024 data = kmalloc(256, GFP_KERNEL);
8025 if (!data)
8026 return;
8028 rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
8029 BNX2_VPD_LEN);
8030 if (rc)
8031 goto vpd_done;
8033 for (i = 0; i < BNX2_VPD_LEN; i += 4) {
8034 data[i] = data[i + BNX2_VPD_LEN + 3];
8035 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
8036 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
8037 data[i + 3] = data[i + BNX2_VPD_LEN];
8040 i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
8041 if (i < 0)
8042 goto vpd_done;
8044 rosize = pci_vpd_lrdt_size(&data[i]);
8045 i += PCI_VPD_LRDT_TAG_SIZE;
8046 block_end = i + rosize;
8048 if (block_end > BNX2_VPD_LEN)
8049 goto vpd_done;
8051 j = pci_vpd_find_info_keyword(data, i, rosize,
8052 PCI_VPD_RO_KEYWORD_MFR_ID);
8053 if (j < 0)
8054 goto vpd_done;
8056 len = pci_vpd_info_field_size(&data[j]);
8058 j += PCI_VPD_INFO_FLD_HDR_SIZE;
8059 if (j + len > block_end || len != 4 ||
8060 memcmp(&data[j], "1028", 4))
8061 goto vpd_done;
8063 j = pci_vpd_find_info_keyword(data, i, rosize,
8064 PCI_VPD_RO_KEYWORD_VENDOR0);
8065 if (j < 0)
8066 goto vpd_done;
8068 len = pci_vpd_info_field_size(&data[j]);
8070 j += PCI_VPD_INFO_FLD_HDR_SIZE;
8071 if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8072 goto vpd_done;
8074 memcpy(bp->fw_version, &data[j], len);
8075 bp->fw_version[len] = ' ';
8077 vpd_done:
8078 kfree(data);
8081 static int
8082 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8084 struct bnx2 *bp;
8085 int rc, i, j;
8086 u32 reg;
8087 u64 dma_mask, persist_dma_mask;
8088 int err;
8090 SET_NETDEV_DEV(dev, &pdev->dev);
8091 bp = netdev_priv(dev);
8093 bp->flags = 0;
8094 bp->phy_flags = 0;
8096 bp->temp_stats_blk =
8097 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8099 if (bp->temp_stats_blk == NULL) {
8100 rc = -ENOMEM;
8101 goto err_out;
8104 /* enable device (incl. PCI PM wakeup), and bus-mastering */
8105 rc = pci_enable_device(pdev);
8106 if (rc) {
8107 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8108 goto err_out;
8111 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8112 dev_err(&pdev->dev,
8113 "Cannot find PCI device base address, aborting\n");
8114 rc = -ENODEV;
8115 goto err_out_disable;
8118 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8119 if (rc) {
8120 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8121 goto err_out_disable;
8124 pci_set_master(pdev);
8126 bp->pm_cap = pdev->pm_cap;
8127 if (bp->pm_cap == 0) {
8128 dev_err(&pdev->dev,
8129 "Cannot find power management capability, aborting\n");
8130 rc = -EIO;
8131 goto err_out_release;
8134 bp->dev = dev;
8135 bp->pdev = pdev;
8137 spin_lock_init(&bp->phy_lock);
8138 spin_lock_init(&bp->indirect_lock);
8139 #ifdef BCM_CNIC
8140 mutex_init(&bp->cnic_lock);
8141 #endif
8142 INIT_WORK(&bp->reset_task, bnx2_reset_task);
8144 bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8145 TX_MAX_TSS_RINGS + 1));
8146 if (!bp->regview) {
8147 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8148 rc = -ENOMEM;
8149 goto err_out_release;
8152 /* Configure byte swap and enable write to the reg_window registers.
8153 * Rely on CPU to do target byte swapping on big endian systems
8154 * The chip's target access swapping will not swap all accesses
8156 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8157 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8158 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8160 bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8162 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8163 if (!pci_is_pcie(pdev)) {
8164 dev_err(&pdev->dev, "Not PCIE, aborting\n");
8165 rc = -EIO;
8166 goto err_out_unmap;
8168 bp->flags |= BNX2_FLAG_PCIE;
8169 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8170 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8172 /* AER (Advanced Error Reporting) hooks */
8173 err = pci_enable_pcie_error_reporting(pdev);
8174 if (!err)
8175 bp->flags |= BNX2_FLAG_AER_ENABLED;
8177 } else {
8178 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8179 if (bp->pcix_cap == 0) {
8180 dev_err(&pdev->dev,
8181 "Cannot find PCIX capability, aborting\n");
8182 rc = -EIO;
8183 goto err_out_unmap;
8185 bp->flags |= BNX2_FLAG_BROKEN_STATS;
8188 if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8189 BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8190 if (pdev->msix_cap)
8191 bp->flags |= BNX2_FLAG_MSIX_CAP;
8194 if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8195 BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8196 if (pdev->msi_cap)
8197 bp->flags |= BNX2_FLAG_MSI_CAP;
8200 /* 5708 cannot support DMA addresses > 40-bit. */
8201 if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8202 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8203 else
8204 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8206 /* Configure DMA attributes. */
8207 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8208 dev->features |= NETIF_F_HIGHDMA;
8209 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8210 if (rc) {
8211 dev_err(&pdev->dev,
8212 "pci_set_consistent_dma_mask failed, aborting\n");
8213 goto err_out_unmap;
8215 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8216 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8217 goto err_out_unmap;
8220 if (!(bp->flags & BNX2_FLAG_PCIE))
8221 bnx2_get_pci_speed(bp);
8223 /* 5706A0 may falsely detect SERR and PERR. */
8224 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8225 reg = BNX2_RD(bp, PCI_COMMAND);
8226 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8227 BNX2_WR(bp, PCI_COMMAND, reg);
8228 } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8229 !(bp->flags & BNX2_FLAG_PCIX)) {
8231 dev_err(&pdev->dev,
8232 "5706 A1 can only be used in a PCIX bus, aborting\n");
8233 goto err_out_unmap;
8236 bnx2_init_nvram(bp);
8238 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8240 if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8241 bp->func = 1;
8243 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8244 BNX2_SHM_HDR_SIGNATURE_SIG) {
8245 u32 off = bp->func << 2;
8247 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8248 } else
8249 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8251 /* Get the permanent MAC address. First we need to make sure the
8252 * firmware is actually running.
8254 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8256 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8257 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8258 dev_err(&pdev->dev, "Firmware not running, aborting\n");
8259 rc = -ENODEV;
8260 goto err_out_unmap;
8263 bnx2_read_vpd_fw_ver(bp);
8265 j = strlen(bp->fw_version);
8266 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8267 for (i = 0; i < 3 && j < 24; i++) {
8268 u8 num, k, skip0;
8270 if (i == 0) {
8271 bp->fw_version[j++] = 'b';
8272 bp->fw_version[j++] = 'c';
8273 bp->fw_version[j++] = ' ';
8275 num = (u8) (reg >> (24 - (i * 8)));
8276 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8277 if (num >= k || !skip0 || k == 1) {
8278 bp->fw_version[j++] = (num / k) + '0';
8279 skip0 = 0;
8282 if (i != 2)
8283 bp->fw_version[j++] = '.';
8285 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8286 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8287 bp->wol = 1;
8289 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8290 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8292 for (i = 0; i < 30; i++) {
8293 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8294 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8295 break;
8296 msleep(10);
8299 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8300 reg &= BNX2_CONDITION_MFW_RUN_MASK;
8301 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8302 reg != BNX2_CONDITION_MFW_RUN_NONE) {
8303 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8305 if (j < 32)
8306 bp->fw_version[j++] = ' ';
8307 for (i = 0; i < 3 && j < 28; i++) {
8308 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8309 reg = be32_to_cpu(reg);
8310 memcpy(&bp->fw_version[j], &reg, 4);
8311 j += 4;
8315 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8316 bp->mac_addr[0] = (u8) (reg >> 8);
8317 bp->mac_addr[1] = (u8) reg;
8319 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8320 bp->mac_addr[2] = (u8) (reg >> 24);
8321 bp->mac_addr[3] = (u8) (reg >> 16);
8322 bp->mac_addr[4] = (u8) (reg >> 8);
8323 bp->mac_addr[5] = (u8) reg;
8325 bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8326 bnx2_set_rx_ring_size(bp, 255);
8328 bp->tx_quick_cons_trip_int = 2;
8329 bp->tx_quick_cons_trip = 20;
8330 bp->tx_ticks_int = 18;
8331 bp->tx_ticks = 80;
8333 bp->rx_quick_cons_trip_int = 2;
8334 bp->rx_quick_cons_trip = 12;
8335 bp->rx_ticks_int = 18;
8336 bp->rx_ticks = 18;
8338 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8340 bp->current_interval = BNX2_TIMER_INTERVAL;
8342 bp->phy_addr = 1;
8344 /* Disable WOL support if we are running on a SERDES chip. */
8345 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8346 bnx2_get_5709_media(bp);
8347 else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8348 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8350 bp->phy_port = PORT_TP;
8351 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8352 bp->phy_port = PORT_FIBRE;
8353 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8354 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8355 bp->flags |= BNX2_FLAG_NO_WOL;
8356 bp->wol = 0;
8358 if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8359 /* Don't do parallel detect on this board because of
8360 * some board problems. The link will not go down
8361 * if we do parallel detect.
8363 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8364 pdev->subsystem_device == 0x310c)
8365 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8366 } else {
8367 bp->phy_addr = 2;
8368 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8369 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8371 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8372 BNX2_CHIP(bp) == BNX2_CHIP_5708)
8373 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8374 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8375 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8376 BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8377 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8379 bnx2_init_fw_cap(bp);
8381 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8382 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8383 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8384 !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8385 bp->flags |= BNX2_FLAG_NO_WOL;
8386 bp->wol = 0;
8389 if (bp->flags & BNX2_FLAG_NO_WOL)
8390 device_set_wakeup_capable(&bp->pdev->dev, false);
8391 else
8392 device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8394 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8395 bp->tx_quick_cons_trip_int =
8396 bp->tx_quick_cons_trip;
8397 bp->tx_ticks_int = bp->tx_ticks;
8398 bp->rx_quick_cons_trip_int =
8399 bp->rx_quick_cons_trip;
8400 bp->rx_ticks_int = bp->rx_ticks;
8401 bp->comp_prod_trip_int = bp->comp_prod_trip;
8402 bp->com_ticks_int = bp->com_ticks;
8403 bp->cmd_ticks_int = bp->cmd_ticks;
8406 /* Disable MSI on 5706 if AMD 8132 bridge is found.
8408 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
8409 * with byte enables disabled on the unused 32-bit word. This is legal
8410 * but causes problems on the AMD 8132 which will eventually stop
8411 * responding after a while.
8413 * AMD believes this incompatibility is unique to the 5706, and
8414 * prefers to locally disable MSI rather than globally disabling it.
8416 if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8417 struct pci_dev *amd_8132 = NULL;
8419 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8420 PCI_DEVICE_ID_AMD_8132_BRIDGE,
8421 amd_8132))) {
8423 if (amd_8132->revision >= 0x10 &&
8424 amd_8132->revision <= 0x13) {
8425 disable_msi = 1;
8426 pci_dev_put(amd_8132);
8427 break;
8432 bnx2_set_default_link(bp);
8433 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8435 init_timer(&bp->timer);
8436 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8437 bp->timer.data = (unsigned long) bp;
8438 bp->timer.function = bnx2_timer;
8440 #ifdef BCM_CNIC
8441 if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8442 bp->cnic_eth_dev.max_iscsi_conn =
8443 (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8444 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8445 bp->cnic_probe = bnx2_cnic_probe;
8446 #endif
8447 pci_save_state(pdev);
8449 return 0;
8451 err_out_unmap:
8452 if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8453 pci_disable_pcie_error_reporting(pdev);
8454 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8457 pci_iounmap(pdev, bp->regview);
8458 bp->regview = NULL;
8460 err_out_release:
8461 pci_release_regions(pdev);
8463 err_out_disable:
8464 pci_disable_device(pdev);
8466 err_out:
8467 return rc;
8470 static char *
8471 bnx2_bus_string(struct bnx2 *bp, char *str)
8473 char *s = str;
8475 if (bp->flags & BNX2_FLAG_PCIE) {
8476 s += sprintf(s, "PCI Express");
8477 } else {
8478 s += sprintf(s, "PCI");
8479 if (bp->flags & BNX2_FLAG_PCIX)
8480 s += sprintf(s, "-X");
8481 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8482 s += sprintf(s, " 32-bit");
8483 else
8484 s += sprintf(s, " 64-bit");
8485 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8487 return str;
8490 static void
8491 bnx2_del_napi(struct bnx2 *bp)
8493 int i;
8495 for (i = 0; i < bp->irq_nvecs; i++)
8496 netif_napi_del(&bp->bnx2_napi[i].napi);
8499 static void
8500 bnx2_init_napi(struct bnx2 *bp)
8502 int i;
8504 for (i = 0; i < bp->irq_nvecs; i++) {
8505 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8506 int (*poll)(struct napi_struct *, int);
8508 if (i == 0)
8509 poll = bnx2_poll;
8510 else
8511 poll = bnx2_poll_msix;
8513 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8514 bnapi->bp = bp;
8518 static const struct net_device_ops bnx2_netdev_ops = {
8519 .ndo_open = bnx2_open,
8520 .ndo_start_xmit = bnx2_start_xmit,
8521 .ndo_stop = bnx2_close,
8522 .ndo_get_stats64 = bnx2_get_stats64,
8523 .ndo_set_rx_mode = bnx2_set_rx_mode,
8524 .ndo_do_ioctl = bnx2_ioctl,
8525 .ndo_validate_addr = eth_validate_addr,
8526 .ndo_set_mac_address = bnx2_change_mac_addr,
8527 .ndo_change_mtu = bnx2_change_mtu,
8528 .ndo_fix_features = bnx2_fix_features,
8529 .ndo_set_features = bnx2_set_features,
8530 .ndo_tx_timeout = bnx2_tx_timeout,
8531 #ifdef CONFIG_NET_POLL_CONTROLLER
8532 .ndo_poll_controller = poll_bnx2,
8533 #endif
8536 static int
8537 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8539 static int version_printed = 0;
8540 struct net_device *dev;
8541 struct bnx2 *bp;
8542 int rc;
8543 char str[40];
8545 if (version_printed++ == 0)
8546 pr_info("%s", version);
8548 /* dev zeroed in init_etherdev */
8549 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8550 if (!dev)
8551 return -ENOMEM;
8553 rc = bnx2_init_board(pdev, dev);
8554 if (rc < 0)
8555 goto err_free;
8557 dev->netdev_ops = &bnx2_netdev_ops;
8558 dev->watchdog_timeo = TX_TIMEOUT;
8559 dev->ethtool_ops = &bnx2_ethtool_ops;
8561 bp = netdev_priv(dev);
8563 pci_set_drvdata(pdev, dev);
8565 memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
8567 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8568 NETIF_F_TSO | NETIF_F_TSO_ECN |
8569 NETIF_F_RXHASH | NETIF_F_RXCSUM;
8571 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8572 dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8574 dev->vlan_features = dev->hw_features;
8575 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8576 dev->features |= dev->hw_features;
8577 dev->priv_flags |= IFF_UNICAST_FLT;
8579 if ((rc = register_netdev(dev))) {
8580 dev_err(&pdev->dev, "Cannot register net device\n");
8581 goto error;
8584 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8585 "node addr %pM\n", board_info[ent->driver_data].name,
8586 ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8587 ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8588 bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8589 pdev->irq, dev->dev_addr);
8591 return 0;
8593 error:
8594 pci_iounmap(pdev, bp->regview);
8595 pci_release_regions(pdev);
8596 pci_disable_device(pdev);
8597 err_free:
8598 free_netdev(dev);
8599 return rc;
8602 static void
8603 bnx2_remove_one(struct pci_dev *pdev)
8605 struct net_device *dev = pci_get_drvdata(pdev);
8606 struct bnx2 *bp = netdev_priv(dev);
8608 unregister_netdev(dev);
8610 del_timer_sync(&bp->timer);
8611 cancel_work_sync(&bp->reset_task);
8613 pci_iounmap(bp->pdev, bp->regview);
8615 kfree(bp->temp_stats_blk);
8617 if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8618 pci_disable_pcie_error_reporting(pdev);
8619 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8622 bnx2_release_firmware(bp);
8624 free_netdev(dev);
8626 pci_release_regions(pdev);
8627 pci_disable_device(pdev);
8630 #ifdef CONFIG_PM_SLEEP
8631 static int
8632 bnx2_suspend(struct device *device)
8634 struct pci_dev *pdev = to_pci_dev(device);
8635 struct net_device *dev = pci_get_drvdata(pdev);
8636 struct bnx2 *bp = netdev_priv(dev);
8638 if (netif_running(dev)) {
8639 cancel_work_sync(&bp->reset_task);
8640 bnx2_netif_stop(bp, true);
8641 netif_device_detach(dev);
8642 del_timer_sync(&bp->timer);
8643 bnx2_shutdown_chip(bp);
8644 __bnx2_free_irq(bp);
8645 bnx2_free_skbs(bp);
8647 bnx2_setup_wol(bp);
8648 return 0;
8651 static int
8652 bnx2_resume(struct device *device)
8654 struct pci_dev *pdev = to_pci_dev(device);
8655 struct net_device *dev = pci_get_drvdata(pdev);
8656 struct bnx2 *bp = netdev_priv(dev);
8658 if (!netif_running(dev))
8659 return 0;
8661 bnx2_set_power_state(bp, PCI_D0);
8662 netif_device_attach(dev);
8663 bnx2_request_irq(bp);
8664 bnx2_init_nic(bp, 1);
8665 bnx2_netif_start(bp, true);
8666 return 0;
8669 static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8670 #define BNX2_PM_OPS (&bnx2_pm_ops)
8672 #else
8674 #define BNX2_PM_OPS NULL
8676 #endif /* CONFIG_PM_SLEEP */
8678 * bnx2_io_error_detected - called when PCI error is detected
8679 * @pdev: Pointer to PCI device
8680 * @state: The current pci connection state
8682 * This function is called after a PCI bus error affecting
8683 * this device has been detected.
8685 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8686 pci_channel_state_t state)
8688 struct net_device *dev = pci_get_drvdata(pdev);
8689 struct bnx2 *bp = netdev_priv(dev);
8691 rtnl_lock();
8692 netif_device_detach(dev);
8694 if (state == pci_channel_io_perm_failure) {
8695 rtnl_unlock();
8696 return PCI_ERS_RESULT_DISCONNECT;
8699 if (netif_running(dev)) {
8700 bnx2_netif_stop(bp, true);
8701 del_timer_sync(&bp->timer);
8702 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8705 pci_disable_device(pdev);
8706 rtnl_unlock();
8708 /* Request a slot slot reset. */
8709 return PCI_ERS_RESULT_NEED_RESET;
8713 * bnx2_io_slot_reset - called after the pci bus has been reset.
8714 * @pdev: Pointer to PCI device
8716 * Restart the card from scratch, as if from a cold-boot.
8718 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8720 struct net_device *dev = pci_get_drvdata(pdev);
8721 struct bnx2 *bp = netdev_priv(dev);
8722 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8723 int err = 0;
8725 rtnl_lock();
8726 if (pci_enable_device(pdev)) {
8727 dev_err(&pdev->dev,
8728 "Cannot re-enable PCI device after reset\n");
8729 } else {
8730 pci_set_master(pdev);
8731 pci_restore_state(pdev);
8732 pci_save_state(pdev);
8734 if (netif_running(dev))
8735 err = bnx2_init_nic(bp, 1);
8737 if (!err)
8738 result = PCI_ERS_RESULT_RECOVERED;
8741 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8742 bnx2_napi_enable(bp);
8743 dev_close(dev);
8745 rtnl_unlock();
8747 if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8748 return result;
8750 err = pci_cleanup_aer_uncorrect_error_status(pdev);
8751 if (err) {
8752 dev_err(&pdev->dev,
8753 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8754 err); /* non-fatal, continue */
8757 return result;
8761 * bnx2_io_resume - called when traffic can start flowing again.
8762 * @pdev: Pointer to PCI device
8764 * This callback is called when the error recovery driver tells us that
8765 * its OK to resume normal operation.
8767 static void bnx2_io_resume(struct pci_dev *pdev)
8769 struct net_device *dev = pci_get_drvdata(pdev);
8770 struct bnx2 *bp = netdev_priv(dev);
8772 rtnl_lock();
8773 if (netif_running(dev))
8774 bnx2_netif_start(bp, true);
8776 netif_device_attach(dev);
8777 rtnl_unlock();
8780 static void bnx2_shutdown(struct pci_dev *pdev)
8782 struct net_device *dev = pci_get_drvdata(pdev);
8783 struct bnx2 *bp;
8785 if (!dev)
8786 return;
8788 bp = netdev_priv(dev);
8789 if (!bp)
8790 return;
8792 rtnl_lock();
8793 if (netif_running(dev))
8794 dev_close(bp->dev);
8796 if (system_state == SYSTEM_POWER_OFF)
8797 bnx2_set_power_state(bp, PCI_D3hot);
8799 rtnl_unlock();
8802 static const struct pci_error_handlers bnx2_err_handler = {
8803 .error_detected = bnx2_io_error_detected,
8804 .slot_reset = bnx2_io_slot_reset,
8805 .resume = bnx2_io_resume,
8808 static struct pci_driver bnx2_pci_driver = {
8809 .name = DRV_MODULE_NAME,
8810 .id_table = bnx2_pci_tbl,
8811 .probe = bnx2_init_one,
8812 .remove = bnx2_remove_one,
8813 .driver.pm = BNX2_PM_OPS,
8814 .err_handler = &bnx2_err_handler,
8815 .shutdown = bnx2_shutdown,
8818 module_pci_driver(bnx2_pci_driver);