1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/bitops.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/if_vlan.h>
40 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
45 #include <net/checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/firmware.h>
51 #include <linux/log2.h>
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
60 #define DRV_MODULE_NAME "bnx2"
61 #define DRV_MODULE_VERSION "2.0.9"
62 #define DRV_MODULE_RELDATE "April 27, 2010"
63 #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw"
64 #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
65 #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw"
66 #define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
67 #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
69 #define RUN_AT(x) (jiffies + (x))
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT (5*HZ)
74 static char version
[] __devinitdata
=
75 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME
" v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION
);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06
);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06
);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09
);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09
);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax
);
87 static int disable_msi
= 0;
89 module_param(disable_msi
, int, 0);
90 MODULE_PARM_DESC(disable_msi
, "Disable Message Signaled Interrupt (MSI)");
106 /* indexed by board_t, above */
109 } board_info
[] __devinitdata
= {
110 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111 { "HP NC370T Multifunction Gigabit Server Adapter" },
112 { "HP NC370i Multifunction Gigabit Server Adapter" },
113 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114 { "HP NC370F Multifunction Gigabit Server Adapter" },
115 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
116 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
118 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
120 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
123 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl
) = {
124 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706
,
125 PCI_VENDOR_ID_HP
, 0x3101, 0, 0, NC370T
},
126 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706
,
127 PCI_VENDOR_ID_HP
, 0x3106, 0, 0, NC370I
},
128 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706
,
129 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5706
},
130 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5708
,
131 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5708
},
132 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706S
,
133 PCI_VENDOR_ID_HP
, 0x3102, 0, 0, NC370F
},
134 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706S
,
135 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5706S
},
136 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5708S
,
137 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5708S
},
138 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5709
,
139 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5709
},
140 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5709S
,
141 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5709S
},
142 { PCI_VENDOR_ID_BROADCOM
, 0x163b,
143 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5716
},
144 { PCI_VENDOR_ID_BROADCOM
, 0x163c,
145 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5716S
},
149 static const struct flash_spec flash_table
[] =
151 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
154 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155 BUFFERED_FLAGS
, SEEPROM_PAGE_BITS
, SEEPROM_PAGE_SIZE
,
156 SEEPROM_BYTE_ADDR_MASK
, SEEPROM_TOTAL_SIZE
,
158 /* Expansion entry 0001 */
159 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160 NONBUFFERED_FLAGS
, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
161 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
163 /* Saifun SA25F010 (non-buffered flash) */
164 /* strap, cfg1, & write1 need updates */
165 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166 NONBUFFERED_FLAGS
, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
167 SAIFUN_FLASH_BYTE_ADDR_MASK
, SAIFUN_FLASH_BASE_TOTAL_SIZE
*2,
168 "Non-buffered flash (128kB)"},
169 /* Saifun SA25F020 (non-buffered flash) */
170 /* strap, cfg1, & write1 need updates */
171 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172 NONBUFFERED_FLAGS
, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
173 SAIFUN_FLASH_BYTE_ADDR_MASK
, SAIFUN_FLASH_BASE_TOTAL_SIZE
*4,
174 "Non-buffered flash (256kB)"},
175 /* Expansion entry 0100 */
176 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177 NONBUFFERED_FLAGS
, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
178 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
180 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182 NONBUFFERED_FLAGS
, ST_MICRO_FLASH_PAGE_BITS
, ST_MICRO_FLASH_PAGE_SIZE
,
183 ST_MICRO_FLASH_BYTE_ADDR_MASK
, ST_MICRO_FLASH_BASE_TOTAL_SIZE
*2,
184 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
186 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187 NONBUFFERED_FLAGS
, ST_MICRO_FLASH_PAGE_BITS
, ST_MICRO_FLASH_PAGE_SIZE
,
188 ST_MICRO_FLASH_BYTE_ADDR_MASK
, ST_MICRO_FLASH_BASE_TOTAL_SIZE
*4,
189 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190 /* Saifun SA25F005 (non-buffered flash) */
191 /* strap, cfg1, & write1 need updates */
192 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193 NONBUFFERED_FLAGS
, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
194 SAIFUN_FLASH_BYTE_ADDR_MASK
, SAIFUN_FLASH_BASE_TOTAL_SIZE
,
195 "Non-buffered flash (64kB)"},
197 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198 BUFFERED_FLAGS
, SEEPROM_PAGE_BITS
, SEEPROM_PAGE_SIZE
,
199 SEEPROM_BYTE_ADDR_MASK
, SEEPROM_TOTAL_SIZE
,
201 /* Expansion entry 1001 */
202 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS
, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
204 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
206 /* Expansion entry 1010 */
207 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208 NONBUFFERED_FLAGS
, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
209 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
211 /* ATMEL AT45DB011B (buffered flash) */
212 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS
, BUFFERED_FLASH_PAGE_BITS
, BUFFERED_FLASH_PAGE_SIZE
,
214 BUFFERED_FLASH_BYTE_ADDR_MASK
, BUFFERED_FLASH_TOTAL_SIZE
,
215 "Buffered flash (128kB)"},
216 /* Expansion entry 1100 */
217 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218 NONBUFFERED_FLAGS
, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
219 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
221 /* Expansion entry 1101 */
222 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223 NONBUFFERED_FLAGS
, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
224 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
226 /* Ateml Expansion entry 1110 */
227 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228 BUFFERED_FLAGS
, BUFFERED_FLASH_PAGE_BITS
, BUFFERED_FLASH_PAGE_SIZE
,
229 BUFFERED_FLASH_BYTE_ADDR_MASK
, 0,
230 "Entry 1110 (Atmel)"},
231 /* ATMEL AT45DB021B (buffered flash) */
232 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233 BUFFERED_FLAGS
, BUFFERED_FLASH_PAGE_BITS
, BUFFERED_FLASH_PAGE_SIZE
,
234 BUFFERED_FLASH_BYTE_ADDR_MASK
, BUFFERED_FLASH_TOTAL_SIZE
*2,
235 "Buffered flash (256kB)"},
238 static const struct flash_spec flash_5709
= {
239 .flags
= BNX2_NV_BUFFERED
,
240 .page_bits
= BCM5709_FLASH_PAGE_BITS
,
241 .page_size
= BCM5709_FLASH_PAGE_SIZE
,
242 .addr_mask
= BCM5709_FLASH_BYTE_ADDR_MASK
,
243 .total_size
= BUFFERED_FLASH_TOTAL_SIZE
*2,
244 .name
= "5709 Buffered flash (256kB)",
247 MODULE_DEVICE_TABLE(pci
, bnx2_pci_tbl
);
249 static void bnx2_init_napi(struct bnx2
*bp
);
251 static inline u32
bnx2_tx_avail(struct bnx2
*bp
, struct bnx2_tx_ring_info
*txr
)
257 /* The ring uses 256 indices for 255 entries, one of them
258 * needs to be skipped.
260 diff
= txr
->tx_prod
- txr
->tx_cons
;
261 if (unlikely(diff
>= TX_DESC_CNT
)) {
263 if (diff
== TX_DESC_CNT
)
264 diff
= MAX_TX_DESC_CNT
;
266 return (bp
->tx_ring_size
- diff
);
270 bnx2_reg_rd_ind(struct bnx2
*bp
, u32 offset
)
274 spin_lock_bh(&bp
->indirect_lock
);
275 REG_WR(bp
, BNX2_PCICFG_REG_WINDOW_ADDRESS
, offset
);
276 val
= REG_RD(bp
, BNX2_PCICFG_REG_WINDOW
);
277 spin_unlock_bh(&bp
->indirect_lock
);
282 bnx2_reg_wr_ind(struct bnx2
*bp
, u32 offset
, u32 val
)
284 spin_lock_bh(&bp
->indirect_lock
);
285 REG_WR(bp
, BNX2_PCICFG_REG_WINDOW_ADDRESS
, offset
);
286 REG_WR(bp
, BNX2_PCICFG_REG_WINDOW
, val
);
287 spin_unlock_bh(&bp
->indirect_lock
);
291 bnx2_shmem_wr(struct bnx2
*bp
, u32 offset
, u32 val
)
293 bnx2_reg_wr_ind(bp
, bp
->shmem_base
+ offset
, val
);
297 bnx2_shmem_rd(struct bnx2
*bp
, u32 offset
)
299 return (bnx2_reg_rd_ind(bp
, bp
->shmem_base
+ offset
));
303 bnx2_ctx_wr(struct bnx2
*bp
, u32 cid_addr
, u32 offset
, u32 val
)
306 spin_lock_bh(&bp
->indirect_lock
);
307 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
310 REG_WR(bp
, BNX2_CTX_CTX_DATA
, val
);
311 REG_WR(bp
, BNX2_CTX_CTX_CTRL
,
312 offset
| BNX2_CTX_CTX_CTRL_WRITE_REQ
);
313 for (i
= 0; i
< 5; i
++) {
314 val
= REG_RD(bp
, BNX2_CTX_CTX_CTRL
);
315 if ((val
& BNX2_CTX_CTX_CTRL_WRITE_REQ
) == 0)
320 REG_WR(bp
, BNX2_CTX_DATA_ADR
, offset
);
321 REG_WR(bp
, BNX2_CTX_DATA
, val
);
323 spin_unlock_bh(&bp
->indirect_lock
);
328 bnx2_drv_ctl(struct net_device
*dev
, struct drv_ctl_info
*info
)
330 struct bnx2
*bp
= netdev_priv(dev
);
331 struct drv_ctl_io
*io
= &info
->data
.io
;
334 case DRV_CTL_IO_WR_CMD
:
335 bnx2_reg_wr_ind(bp
, io
->offset
, io
->data
);
337 case DRV_CTL_IO_RD_CMD
:
338 io
->data
= bnx2_reg_rd_ind(bp
, io
->offset
);
340 case DRV_CTL_CTX_WR_CMD
:
341 bnx2_ctx_wr(bp
, io
->cid_addr
, io
->offset
, io
->data
);
349 static void bnx2_setup_cnic_irq_info(struct bnx2
*bp
)
351 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
352 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[0];
355 if (bp
->flags
& BNX2_FLAG_USING_MSIX
) {
356 cp
->drv_state
|= CNIC_DRV_STATE_USING_MSIX
;
357 bnapi
->cnic_present
= 0;
358 sb_id
= bp
->irq_nvecs
;
359 cp
->irq_arr
[0].irq_flags
|= CNIC_IRQ_FL_MSIX
;
361 cp
->drv_state
&= ~CNIC_DRV_STATE_USING_MSIX
;
362 bnapi
->cnic_tag
= bnapi
->last_status_idx
;
363 bnapi
->cnic_present
= 1;
365 cp
->irq_arr
[0].irq_flags
&= ~CNIC_IRQ_FL_MSIX
;
368 cp
->irq_arr
[0].vector
= bp
->irq_tbl
[sb_id
].vector
;
369 cp
->irq_arr
[0].status_blk
= (void *)
370 ((unsigned long) bnapi
->status_blk
.msi
+
371 (BNX2_SBLK_MSIX_ALIGN_SIZE
* sb_id
));
372 cp
->irq_arr
[0].status_blk_num
= sb_id
;
376 static int bnx2_register_cnic(struct net_device
*dev
, struct cnic_ops
*ops
,
379 struct bnx2
*bp
= netdev_priv(dev
);
380 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
385 if (cp
->drv_state
& CNIC_DRV_STATE_REGD
)
388 bp
->cnic_data
= data
;
389 rcu_assign_pointer(bp
->cnic_ops
, ops
);
392 cp
->drv_state
= CNIC_DRV_STATE_REGD
;
394 bnx2_setup_cnic_irq_info(bp
);
399 static int bnx2_unregister_cnic(struct net_device
*dev
)
401 struct bnx2
*bp
= netdev_priv(dev
);
402 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[0];
403 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
405 mutex_lock(&bp
->cnic_lock
);
407 bnapi
->cnic_present
= 0;
408 rcu_assign_pointer(bp
->cnic_ops
, NULL
);
409 mutex_unlock(&bp
->cnic_lock
);
414 struct cnic_eth_dev
*bnx2_cnic_probe(struct net_device
*dev
)
416 struct bnx2
*bp
= netdev_priv(dev
);
417 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
419 cp
->drv_owner
= THIS_MODULE
;
420 cp
->chip_id
= bp
->chip_id
;
422 cp
->io_base
= bp
->regview
;
423 cp
->drv_ctl
= bnx2_drv_ctl
;
424 cp
->drv_register_cnic
= bnx2_register_cnic
;
425 cp
->drv_unregister_cnic
= bnx2_unregister_cnic
;
429 EXPORT_SYMBOL(bnx2_cnic_probe
);
432 bnx2_cnic_stop(struct bnx2
*bp
)
434 struct cnic_ops
*c_ops
;
435 struct cnic_ctl_info info
;
437 mutex_lock(&bp
->cnic_lock
);
438 c_ops
= bp
->cnic_ops
;
440 info
.cmd
= CNIC_CTL_STOP_CMD
;
441 c_ops
->cnic_ctl(bp
->cnic_data
, &info
);
443 mutex_unlock(&bp
->cnic_lock
);
447 bnx2_cnic_start(struct bnx2
*bp
)
449 struct cnic_ops
*c_ops
;
450 struct cnic_ctl_info info
;
452 mutex_lock(&bp
->cnic_lock
);
453 c_ops
= bp
->cnic_ops
;
455 if (!(bp
->flags
& BNX2_FLAG_USING_MSIX
)) {
456 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[0];
458 bnapi
->cnic_tag
= bnapi
->last_status_idx
;
460 info
.cmd
= CNIC_CTL_START_CMD
;
461 c_ops
->cnic_ctl(bp
->cnic_data
, &info
);
463 mutex_unlock(&bp
->cnic_lock
);
469 bnx2_cnic_stop(struct bnx2
*bp
)
474 bnx2_cnic_start(struct bnx2
*bp
)
481 bnx2_read_phy(struct bnx2
*bp
, u32 reg
, u32
*val
)
486 if (bp
->phy_flags
& BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING
) {
487 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
488 val1
&= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL
;
490 REG_WR(bp
, BNX2_EMAC_MDIO_MODE
, val1
);
491 REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
496 val1
= (bp
->phy_addr
<< 21) | (reg
<< 16) |
497 BNX2_EMAC_MDIO_COMM_COMMAND_READ
| BNX2_EMAC_MDIO_COMM_DISEXT
|
498 BNX2_EMAC_MDIO_COMM_START_BUSY
;
499 REG_WR(bp
, BNX2_EMAC_MDIO_COMM
, val1
);
501 for (i
= 0; i
< 50; i
++) {
504 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_COMM
);
505 if (!(val1
& BNX2_EMAC_MDIO_COMM_START_BUSY
)) {
508 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_COMM
);
509 val1
&= BNX2_EMAC_MDIO_COMM_DATA
;
515 if (val1
& BNX2_EMAC_MDIO_COMM_START_BUSY
) {
524 if (bp
->phy_flags
& BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING
) {
525 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
526 val1
|= BNX2_EMAC_MDIO_MODE_AUTO_POLL
;
528 REG_WR(bp
, BNX2_EMAC_MDIO_MODE
, val1
);
529 REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
538 bnx2_write_phy(struct bnx2
*bp
, u32 reg
, u32 val
)
543 if (bp
->phy_flags
& BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING
) {
544 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
545 val1
&= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL
;
547 REG_WR(bp
, BNX2_EMAC_MDIO_MODE
, val1
);
548 REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
553 val1
= (bp
->phy_addr
<< 21) | (reg
<< 16) | val
|
554 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE
|
555 BNX2_EMAC_MDIO_COMM_START_BUSY
| BNX2_EMAC_MDIO_COMM_DISEXT
;
556 REG_WR(bp
, BNX2_EMAC_MDIO_COMM
, val1
);
558 for (i
= 0; i
< 50; i
++) {
561 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_COMM
);
562 if (!(val1
& BNX2_EMAC_MDIO_COMM_START_BUSY
)) {
568 if (val1
& BNX2_EMAC_MDIO_COMM_START_BUSY
)
573 if (bp
->phy_flags
& BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING
) {
574 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
575 val1
|= BNX2_EMAC_MDIO_MODE_AUTO_POLL
;
577 REG_WR(bp
, BNX2_EMAC_MDIO_MODE
, val1
);
578 REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
587 bnx2_disable_int(struct bnx2
*bp
)
590 struct bnx2_napi
*bnapi
;
592 for (i
= 0; i
< bp
->irq_nvecs
; i
++) {
593 bnapi
= &bp
->bnx2_napi
[i
];
594 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
, bnapi
->int_num
|
595 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
597 REG_RD(bp
, BNX2_PCICFG_INT_ACK_CMD
);
601 bnx2_enable_int(struct bnx2
*bp
)
604 struct bnx2_napi
*bnapi
;
606 for (i
= 0; i
< bp
->irq_nvecs
; i
++) {
607 bnapi
= &bp
->bnx2_napi
[i
];
609 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
, bnapi
->int_num
|
610 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
611 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
|
612 bnapi
->last_status_idx
);
614 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
, bnapi
->int_num
|
615 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
616 bnapi
->last_status_idx
);
618 REG_WR(bp
, BNX2_HC_COMMAND
, bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW
);
622 bnx2_disable_int_sync(struct bnx2
*bp
)
626 atomic_inc(&bp
->intr_sem
);
627 if (!netif_running(bp
->dev
))
630 bnx2_disable_int(bp
);
631 for (i
= 0; i
< bp
->irq_nvecs
; i
++)
632 synchronize_irq(bp
->irq_tbl
[i
].vector
);
636 bnx2_napi_disable(struct bnx2
*bp
)
640 for (i
= 0; i
< bp
->irq_nvecs
; i
++)
641 napi_disable(&bp
->bnx2_napi
[i
].napi
);
645 bnx2_napi_enable(struct bnx2
*bp
)
649 for (i
= 0; i
< bp
->irq_nvecs
; i
++)
650 napi_enable(&bp
->bnx2_napi
[i
].napi
);
654 bnx2_netif_stop(struct bnx2
*bp
, bool stop_cnic
)
658 if (netif_running(bp
->dev
)) {
661 bnx2_napi_disable(bp
);
662 netif_tx_disable(bp
->dev
);
663 /* prevent tx timeout */
664 for (i
= 0; i
< bp
->dev
->num_tx_queues
; i
++) {
665 struct netdev_queue
*txq
;
667 txq
= netdev_get_tx_queue(bp
->dev
, i
);
668 txq
->trans_start
= jiffies
;
671 bnx2_disable_int_sync(bp
);
675 bnx2_netif_start(struct bnx2
*bp
, bool start_cnic
)
677 if (atomic_dec_and_test(&bp
->intr_sem
)) {
678 if (netif_running(bp
->dev
)) {
679 netif_tx_wake_all_queues(bp
->dev
);
680 bnx2_napi_enable(bp
);
689 bnx2_free_tx_mem(struct bnx2
*bp
)
693 for (i
= 0; i
< bp
->num_tx_rings
; i
++) {
694 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[i
];
695 struct bnx2_tx_ring_info
*txr
= &bnapi
->tx_ring
;
697 if (txr
->tx_desc_ring
) {
698 pci_free_consistent(bp
->pdev
, TXBD_RING_SIZE
,
700 txr
->tx_desc_mapping
);
701 txr
->tx_desc_ring
= NULL
;
703 kfree(txr
->tx_buf_ring
);
704 txr
->tx_buf_ring
= NULL
;
709 bnx2_free_rx_mem(struct bnx2
*bp
)
713 for (i
= 0; i
< bp
->num_rx_rings
; i
++) {
714 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[i
];
715 struct bnx2_rx_ring_info
*rxr
= &bnapi
->rx_ring
;
718 for (j
= 0; j
< bp
->rx_max_ring
; j
++) {
719 if (rxr
->rx_desc_ring
[j
])
720 pci_free_consistent(bp
->pdev
, RXBD_RING_SIZE
,
721 rxr
->rx_desc_ring
[j
],
722 rxr
->rx_desc_mapping
[j
]);
723 rxr
->rx_desc_ring
[j
] = NULL
;
725 vfree(rxr
->rx_buf_ring
);
726 rxr
->rx_buf_ring
= NULL
;
728 for (j
= 0; j
< bp
->rx_max_pg_ring
; j
++) {
729 if (rxr
->rx_pg_desc_ring
[j
])
730 pci_free_consistent(bp
->pdev
, RXBD_RING_SIZE
,
731 rxr
->rx_pg_desc_ring
[j
],
732 rxr
->rx_pg_desc_mapping
[j
]);
733 rxr
->rx_pg_desc_ring
[j
] = NULL
;
735 vfree(rxr
->rx_pg_ring
);
736 rxr
->rx_pg_ring
= NULL
;
741 bnx2_alloc_tx_mem(struct bnx2
*bp
)
745 for (i
= 0; i
< bp
->num_tx_rings
; i
++) {
746 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[i
];
747 struct bnx2_tx_ring_info
*txr
= &bnapi
->tx_ring
;
749 txr
->tx_buf_ring
= kzalloc(SW_TXBD_RING_SIZE
, GFP_KERNEL
);
750 if (txr
->tx_buf_ring
== NULL
)
754 pci_alloc_consistent(bp
->pdev
, TXBD_RING_SIZE
,
755 &txr
->tx_desc_mapping
);
756 if (txr
->tx_desc_ring
== NULL
)
763 bnx2_alloc_rx_mem(struct bnx2
*bp
)
767 for (i
= 0; i
< bp
->num_rx_rings
; i
++) {
768 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[i
];
769 struct bnx2_rx_ring_info
*rxr
= &bnapi
->rx_ring
;
773 vmalloc(SW_RXBD_RING_SIZE
* bp
->rx_max_ring
);
774 if (rxr
->rx_buf_ring
== NULL
)
777 memset(rxr
->rx_buf_ring
, 0,
778 SW_RXBD_RING_SIZE
* bp
->rx_max_ring
);
780 for (j
= 0; j
< bp
->rx_max_ring
; j
++) {
781 rxr
->rx_desc_ring
[j
] =
782 pci_alloc_consistent(bp
->pdev
, RXBD_RING_SIZE
,
783 &rxr
->rx_desc_mapping
[j
]);
784 if (rxr
->rx_desc_ring
[j
] == NULL
)
789 if (bp
->rx_pg_ring_size
) {
790 rxr
->rx_pg_ring
= vmalloc(SW_RXPG_RING_SIZE
*
792 if (rxr
->rx_pg_ring
== NULL
)
795 memset(rxr
->rx_pg_ring
, 0, SW_RXPG_RING_SIZE
*
799 for (j
= 0; j
< bp
->rx_max_pg_ring
; j
++) {
800 rxr
->rx_pg_desc_ring
[j
] =
801 pci_alloc_consistent(bp
->pdev
, RXBD_RING_SIZE
,
802 &rxr
->rx_pg_desc_mapping
[j
]);
803 if (rxr
->rx_pg_desc_ring
[j
] == NULL
)
812 bnx2_free_mem(struct bnx2
*bp
)
815 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[0];
817 bnx2_free_tx_mem(bp
);
818 bnx2_free_rx_mem(bp
);
820 for (i
= 0; i
< bp
->ctx_pages
; i
++) {
821 if (bp
->ctx_blk
[i
]) {
822 pci_free_consistent(bp
->pdev
, BCM_PAGE_SIZE
,
824 bp
->ctx_blk_mapping
[i
]);
825 bp
->ctx_blk
[i
] = NULL
;
828 if (bnapi
->status_blk
.msi
) {
829 pci_free_consistent(bp
->pdev
, bp
->status_stats_size
,
830 bnapi
->status_blk
.msi
,
831 bp
->status_blk_mapping
);
832 bnapi
->status_blk
.msi
= NULL
;
833 bp
->stats_blk
= NULL
;
838 bnx2_alloc_mem(struct bnx2
*bp
)
840 int i
, status_blk_size
, err
;
841 struct bnx2_napi
*bnapi
;
844 /* Combine status and statistics blocks into one allocation. */
845 status_blk_size
= L1_CACHE_ALIGN(sizeof(struct status_block
));
846 if (bp
->flags
& BNX2_FLAG_MSIX_CAP
)
847 status_blk_size
= L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC
*
848 BNX2_SBLK_MSIX_ALIGN_SIZE
);
849 bp
->status_stats_size
= status_blk_size
+
850 sizeof(struct statistics_block
);
852 status_blk
= pci_alloc_consistent(bp
->pdev
, bp
->status_stats_size
,
853 &bp
->status_blk_mapping
);
854 if (status_blk
== NULL
)
857 memset(status_blk
, 0, bp
->status_stats_size
);
859 bnapi
= &bp
->bnx2_napi
[0];
860 bnapi
->status_blk
.msi
= status_blk
;
861 bnapi
->hw_tx_cons_ptr
=
862 &bnapi
->status_blk
.msi
->status_tx_quick_consumer_index0
;
863 bnapi
->hw_rx_cons_ptr
=
864 &bnapi
->status_blk
.msi
->status_rx_quick_consumer_index0
;
865 if (bp
->flags
& BNX2_FLAG_MSIX_CAP
) {
866 for (i
= 1; i
< BNX2_MAX_MSIX_VEC
; i
++) {
867 struct status_block_msix
*sblk
;
869 bnapi
= &bp
->bnx2_napi
[i
];
871 sblk
= (void *) (status_blk
+
872 BNX2_SBLK_MSIX_ALIGN_SIZE
* i
);
873 bnapi
->status_blk
.msix
= sblk
;
874 bnapi
->hw_tx_cons_ptr
=
875 &sblk
->status_tx_quick_consumer_index
;
876 bnapi
->hw_rx_cons_ptr
=
877 &sblk
->status_rx_quick_consumer_index
;
878 bnapi
->int_num
= i
<< 24;
882 bp
->stats_blk
= status_blk
+ status_blk_size
;
884 bp
->stats_blk_mapping
= bp
->status_blk_mapping
+ status_blk_size
;
886 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
887 bp
->ctx_pages
= 0x2000 / BCM_PAGE_SIZE
;
888 if (bp
->ctx_pages
== 0)
890 for (i
= 0; i
< bp
->ctx_pages
; i
++) {
891 bp
->ctx_blk
[i
] = pci_alloc_consistent(bp
->pdev
,
893 &bp
->ctx_blk_mapping
[i
]);
894 if (bp
->ctx_blk
[i
] == NULL
)
899 err
= bnx2_alloc_rx_mem(bp
);
903 err
= bnx2_alloc_tx_mem(bp
);
915 bnx2_report_fw_link(struct bnx2
*bp
)
917 u32 fw_link_status
= 0;
919 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
)
925 switch (bp
->line_speed
) {
927 if (bp
->duplex
== DUPLEX_HALF
)
928 fw_link_status
= BNX2_LINK_STATUS_10HALF
;
930 fw_link_status
= BNX2_LINK_STATUS_10FULL
;
933 if (bp
->duplex
== DUPLEX_HALF
)
934 fw_link_status
= BNX2_LINK_STATUS_100HALF
;
936 fw_link_status
= BNX2_LINK_STATUS_100FULL
;
939 if (bp
->duplex
== DUPLEX_HALF
)
940 fw_link_status
= BNX2_LINK_STATUS_1000HALF
;
942 fw_link_status
= BNX2_LINK_STATUS_1000FULL
;
945 if (bp
->duplex
== DUPLEX_HALF
)
946 fw_link_status
= BNX2_LINK_STATUS_2500HALF
;
948 fw_link_status
= BNX2_LINK_STATUS_2500FULL
;
952 fw_link_status
|= BNX2_LINK_STATUS_LINK_UP
;
955 fw_link_status
|= BNX2_LINK_STATUS_AN_ENABLED
;
957 bnx2_read_phy(bp
, bp
->mii_bmsr
, &bmsr
);
958 bnx2_read_phy(bp
, bp
->mii_bmsr
, &bmsr
);
960 if (!(bmsr
& BMSR_ANEGCOMPLETE
) ||
961 bp
->phy_flags
& BNX2_PHY_FLAG_PARALLEL_DETECT
)
962 fw_link_status
|= BNX2_LINK_STATUS_PARALLEL_DET
;
964 fw_link_status
|= BNX2_LINK_STATUS_AN_COMPLETE
;
968 fw_link_status
= BNX2_LINK_STATUS_LINK_DOWN
;
970 bnx2_shmem_wr(bp
, BNX2_LINK_STATUS
, fw_link_status
);
974 bnx2_xceiver_str(struct bnx2
*bp
)
976 return ((bp
->phy_port
== PORT_FIBRE
) ? "SerDes" :
977 ((bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) ? "Remote Copper" :
982 bnx2_report_link(struct bnx2
*bp
)
985 netif_carrier_on(bp
->dev
);
986 netdev_info(bp
->dev
, "NIC %s Link is Up, %d Mbps %s duplex",
987 bnx2_xceiver_str(bp
),
989 bp
->duplex
== DUPLEX_FULL
? "full" : "half");
992 if (bp
->flow_ctrl
& FLOW_CTRL_RX
) {
993 pr_cont(", receive ");
994 if (bp
->flow_ctrl
& FLOW_CTRL_TX
)
995 pr_cont("& transmit ");
998 pr_cont(", transmit ");
1000 pr_cont("flow control ON");
1004 netif_carrier_off(bp
->dev
);
1005 netdev_err(bp
->dev
, "NIC %s Link is Down\n",
1006 bnx2_xceiver_str(bp
));
1009 bnx2_report_fw_link(bp
);
1013 bnx2_resolve_flow_ctrl(struct bnx2
*bp
)
1015 u32 local_adv
, remote_adv
;
1018 if ((bp
->autoneg
& (AUTONEG_SPEED
| AUTONEG_FLOW_CTRL
)) !=
1019 (AUTONEG_SPEED
| AUTONEG_FLOW_CTRL
)) {
1021 if (bp
->duplex
== DUPLEX_FULL
) {
1022 bp
->flow_ctrl
= bp
->req_flow_ctrl
;
1027 if (bp
->duplex
!= DUPLEX_FULL
) {
1031 if ((bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) &&
1032 (CHIP_NUM(bp
) == CHIP_NUM_5708
)) {
1035 bnx2_read_phy(bp
, BCM5708S_1000X_STAT1
, &val
);
1036 if (val
& BCM5708S_1000X_STAT1_TX_PAUSE
)
1037 bp
->flow_ctrl
|= FLOW_CTRL_TX
;
1038 if (val
& BCM5708S_1000X_STAT1_RX_PAUSE
)
1039 bp
->flow_ctrl
|= FLOW_CTRL_RX
;
1043 bnx2_read_phy(bp
, bp
->mii_adv
, &local_adv
);
1044 bnx2_read_phy(bp
, bp
->mii_lpa
, &remote_adv
);
1046 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
1047 u32 new_local_adv
= 0;
1048 u32 new_remote_adv
= 0;
1050 if (local_adv
& ADVERTISE_1000XPAUSE
)
1051 new_local_adv
|= ADVERTISE_PAUSE_CAP
;
1052 if (local_adv
& ADVERTISE_1000XPSE_ASYM
)
1053 new_local_adv
|= ADVERTISE_PAUSE_ASYM
;
1054 if (remote_adv
& ADVERTISE_1000XPAUSE
)
1055 new_remote_adv
|= ADVERTISE_PAUSE_CAP
;
1056 if (remote_adv
& ADVERTISE_1000XPSE_ASYM
)
1057 new_remote_adv
|= ADVERTISE_PAUSE_ASYM
;
1059 local_adv
= new_local_adv
;
1060 remote_adv
= new_remote_adv
;
1063 /* See Table 28B-3 of 802.3ab-1999 spec. */
1064 if (local_adv
& ADVERTISE_PAUSE_CAP
) {
1065 if(local_adv
& ADVERTISE_PAUSE_ASYM
) {
1066 if (remote_adv
& ADVERTISE_PAUSE_CAP
) {
1067 bp
->flow_ctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1069 else if (remote_adv
& ADVERTISE_PAUSE_ASYM
) {
1070 bp
->flow_ctrl
= FLOW_CTRL_RX
;
1074 if (remote_adv
& ADVERTISE_PAUSE_CAP
) {
1075 bp
->flow_ctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1079 else if (local_adv
& ADVERTISE_PAUSE_ASYM
) {
1080 if ((remote_adv
& ADVERTISE_PAUSE_CAP
) &&
1081 (remote_adv
& ADVERTISE_PAUSE_ASYM
)) {
1083 bp
->flow_ctrl
= FLOW_CTRL_TX
;
1089 bnx2_5709s_linkup(struct bnx2
*bp
)
1095 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_GP_STATUS
);
1096 bnx2_read_phy(bp
, MII_BNX2_GP_TOP_AN_STATUS1
, &val
);
1097 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_COMBO_IEEEB0
);
1099 if ((bp
->autoneg
& AUTONEG_SPEED
) == 0) {
1100 bp
->line_speed
= bp
->req_line_speed
;
1101 bp
->duplex
= bp
->req_duplex
;
1104 speed
= val
& MII_BNX2_GP_TOP_AN_SPEED_MSK
;
1106 case MII_BNX2_GP_TOP_AN_SPEED_10
:
1107 bp
->line_speed
= SPEED_10
;
1109 case MII_BNX2_GP_TOP_AN_SPEED_100
:
1110 bp
->line_speed
= SPEED_100
;
1112 case MII_BNX2_GP_TOP_AN_SPEED_1G
:
1113 case MII_BNX2_GP_TOP_AN_SPEED_1GKV
:
1114 bp
->line_speed
= SPEED_1000
;
1116 case MII_BNX2_GP_TOP_AN_SPEED_2_5G
:
1117 bp
->line_speed
= SPEED_2500
;
1120 if (val
& MII_BNX2_GP_TOP_AN_FD
)
1121 bp
->duplex
= DUPLEX_FULL
;
1123 bp
->duplex
= DUPLEX_HALF
;
1128 bnx2_5708s_linkup(struct bnx2
*bp
)
1133 bnx2_read_phy(bp
, BCM5708S_1000X_STAT1
, &val
);
1134 switch (val
& BCM5708S_1000X_STAT1_SPEED_MASK
) {
1135 case BCM5708S_1000X_STAT1_SPEED_10
:
1136 bp
->line_speed
= SPEED_10
;
1138 case BCM5708S_1000X_STAT1_SPEED_100
:
1139 bp
->line_speed
= SPEED_100
;
1141 case BCM5708S_1000X_STAT1_SPEED_1G
:
1142 bp
->line_speed
= SPEED_1000
;
1144 case BCM5708S_1000X_STAT1_SPEED_2G5
:
1145 bp
->line_speed
= SPEED_2500
;
1148 if (val
& BCM5708S_1000X_STAT1_FD
)
1149 bp
->duplex
= DUPLEX_FULL
;
1151 bp
->duplex
= DUPLEX_HALF
;
1157 bnx2_5706s_linkup(struct bnx2
*bp
)
1159 u32 bmcr
, local_adv
, remote_adv
, common
;
1162 bp
->line_speed
= SPEED_1000
;
1164 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1165 if (bmcr
& BMCR_FULLDPLX
) {
1166 bp
->duplex
= DUPLEX_FULL
;
1169 bp
->duplex
= DUPLEX_HALF
;
1172 if (!(bmcr
& BMCR_ANENABLE
)) {
1176 bnx2_read_phy(bp
, bp
->mii_adv
, &local_adv
);
1177 bnx2_read_phy(bp
, bp
->mii_lpa
, &remote_adv
);
1179 common
= local_adv
& remote_adv
;
1180 if (common
& (ADVERTISE_1000XHALF
| ADVERTISE_1000XFULL
)) {
1182 if (common
& ADVERTISE_1000XFULL
) {
1183 bp
->duplex
= DUPLEX_FULL
;
1186 bp
->duplex
= DUPLEX_HALF
;
1194 bnx2_copper_linkup(struct bnx2
*bp
)
1198 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1199 if (bmcr
& BMCR_ANENABLE
) {
1200 u32 local_adv
, remote_adv
, common
;
1202 bnx2_read_phy(bp
, MII_CTRL1000
, &local_adv
);
1203 bnx2_read_phy(bp
, MII_STAT1000
, &remote_adv
);
1205 common
= local_adv
& (remote_adv
>> 2);
1206 if (common
& ADVERTISE_1000FULL
) {
1207 bp
->line_speed
= SPEED_1000
;
1208 bp
->duplex
= DUPLEX_FULL
;
1210 else if (common
& ADVERTISE_1000HALF
) {
1211 bp
->line_speed
= SPEED_1000
;
1212 bp
->duplex
= DUPLEX_HALF
;
1215 bnx2_read_phy(bp
, bp
->mii_adv
, &local_adv
);
1216 bnx2_read_phy(bp
, bp
->mii_lpa
, &remote_adv
);
1218 common
= local_adv
& remote_adv
;
1219 if (common
& ADVERTISE_100FULL
) {
1220 bp
->line_speed
= SPEED_100
;
1221 bp
->duplex
= DUPLEX_FULL
;
1223 else if (common
& ADVERTISE_100HALF
) {
1224 bp
->line_speed
= SPEED_100
;
1225 bp
->duplex
= DUPLEX_HALF
;
1227 else if (common
& ADVERTISE_10FULL
) {
1228 bp
->line_speed
= SPEED_10
;
1229 bp
->duplex
= DUPLEX_FULL
;
1231 else if (common
& ADVERTISE_10HALF
) {
1232 bp
->line_speed
= SPEED_10
;
1233 bp
->duplex
= DUPLEX_HALF
;
1242 if (bmcr
& BMCR_SPEED100
) {
1243 bp
->line_speed
= SPEED_100
;
1246 bp
->line_speed
= SPEED_10
;
1248 if (bmcr
& BMCR_FULLDPLX
) {
1249 bp
->duplex
= DUPLEX_FULL
;
1252 bp
->duplex
= DUPLEX_HALF
;
1260 bnx2_init_rx_context(struct bnx2
*bp
, u32 cid
)
1262 u32 val
, rx_cid_addr
= GET_CID_ADDR(cid
);
1264 val
= BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE
;
1265 val
|= BNX2_L2CTX_CTX_TYPE_SIZE_L2
;
1268 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
1269 u32 lo_water
, hi_water
;
1271 if (bp
->flow_ctrl
& FLOW_CTRL_TX
)
1272 lo_water
= BNX2_L2CTX_LO_WATER_MARK_DEFAULT
;
1274 lo_water
= BNX2_L2CTX_LO_WATER_MARK_DIS
;
1275 if (lo_water
>= bp
->rx_ring_size
)
1278 hi_water
= min_t(int, bp
->rx_ring_size
/ 4, lo_water
+ 16);
1280 if (hi_water
<= lo_water
)
1283 hi_water
/= BNX2_L2CTX_HI_WATER_MARK_SCALE
;
1284 lo_water
/= BNX2_L2CTX_LO_WATER_MARK_SCALE
;
1288 else if (hi_water
== 0)
1290 val
|= lo_water
| (hi_water
<< BNX2_L2CTX_HI_WATER_MARK_SHIFT
);
1292 bnx2_ctx_wr(bp
, rx_cid_addr
, BNX2_L2CTX_CTX_TYPE
, val
);
1296 bnx2_init_all_rx_contexts(struct bnx2
*bp
)
1301 for (i
= 0, cid
= RX_CID
; i
< bp
->num_rx_rings
; i
++, cid
++) {
1304 bnx2_init_rx_context(bp
, cid
);
1309 bnx2_set_mac_link(struct bnx2
*bp
)
1313 REG_WR(bp
, BNX2_EMAC_TX_LENGTHS
, 0x2620);
1314 if (bp
->link_up
&& (bp
->line_speed
== SPEED_1000
) &&
1315 (bp
->duplex
== DUPLEX_HALF
)) {
1316 REG_WR(bp
, BNX2_EMAC_TX_LENGTHS
, 0x26ff);
1319 /* Configure the EMAC mode register. */
1320 val
= REG_RD(bp
, BNX2_EMAC_MODE
);
1322 val
&= ~(BNX2_EMAC_MODE_PORT
| BNX2_EMAC_MODE_HALF_DUPLEX
|
1323 BNX2_EMAC_MODE_MAC_LOOP
| BNX2_EMAC_MODE_FORCE_LINK
|
1324 BNX2_EMAC_MODE_25G_MODE
);
1327 switch (bp
->line_speed
) {
1329 if (CHIP_NUM(bp
) != CHIP_NUM_5706
) {
1330 val
|= BNX2_EMAC_MODE_PORT_MII_10M
;
1335 val
|= BNX2_EMAC_MODE_PORT_MII
;
1338 val
|= BNX2_EMAC_MODE_25G_MODE
;
1341 val
|= BNX2_EMAC_MODE_PORT_GMII
;
1346 val
|= BNX2_EMAC_MODE_PORT_GMII
;
1349 /* Set the MAC to operate in the appropriate duplex mode. */
1350 if (bp
->duplex
== DUPLEX_HALF
)
1351 val
|= BNX2_EMAC_MODE_HALF_DUPLEX
;
1352 REG_WR(bp
, BNX2_EMAC_MODE
, val
);
1354 /* Enable/disable rx PAUSE. */
1355 bp
->rx_mode
&= ~BNX2_EMAC_RX_MODE_FLOW_EN
;
1357 if (bp
->flow_ctrl
& FLOW_CTRL_RX
)
1358 bp
->rx_mode
|= BNX2_EMAC_RX_MODE_FLOW_EN
;
1359 REG_WR(bp
, BNX2_EMAC_RX_MODE
, bp
->rx_mode
);
1361 /* Enable/disable tx PAUSE. */
1362 val
= REG_RD(bp
, BNX2_EMAC_TX_MODE
);
1363 val
&= ~BNX2_EMAC_TX_MODE_FLOW_EN
;
1365 if (bp
->flow_ctrl
& FLOW_CTRL_TX
)
1366 val
|= BNX2_EMAC_TX_MODE_FLOW_EN
;
1367 REG_WR(bp
, BNX2_EMAC_TX_MODE
, val
);
1369 /* Acknowledge the interrupt. */
1370 REG_WR(bp
, BNX2_EMAC_STATUS
, BNX2_EMAC_STATUS_LINK_CHANGE
);
1372 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
1373 bnx2_init_all_rx_contexts(bp
);
1377 bnx2_enable_bmsr1(struct bnx2
*bp
)
1379 if ((bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) &&
1380 (CHIP_NUM(bp
) == CHIP_NUM_5709
))
1381 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
,
1382 MII_BNX2_BLK_ADDR_GP_STATUS
);
1386 bnx2_disable_bmsr1(struct bnx2
*bp
)
1388 if ((bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) &&
1389 (CHIP_NUM(bp
) == CHIP_NUM_5709
))
1390 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
,
1391 MII_BNX2_BLK_ADDR_COMBO_IEEEB0
);
1395 bnx2_test_and_enable_2g5(struct bnx2
*bp
)
1400 if (!(bp
->phy_flags
& BNX2_PHY_FLAG_2_5G_CAPABLE
))
1403 if (bp
->autoneg
& AUTONEG_SPEED
)
1404 bp
->advertising
|= ADVERTISED_2500baseX_Full
;
1406 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
1407 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_OVER1G
);
1409 bnx2_read_phy(bp
, bp
->mii_up1
, &up1
);
1410 if (!(up1
& BCM5708S_UP1_2G5
)) {
1411 up1
|= BCM5708S_UP1_2G5
;
1412 bnx2_write_phy(bp
, bp
->mii_up1
, up1
);
1416 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
1417 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
,
1418 MII_BNX2_BLK_ADDR_COMBO_IEEEB0
);
1424 bnx2_test_and_disable_2g5(struct bnx2
*bp
)
1429 if (!(bp
->phy_flags
& BNX2_PHY_FLAG_2_5G_CAPABLE
))
1432 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
1433 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_OVER1G
);
1435 bnx2_read_phy(bp
, bp
->mii_up1
, &up1
);
1436 if (up1
& BCM5708S_UP1_2G5
) {
1437 up1
&= ~BCM5708S_UP1_2G5
;
1438 bnx2_write_phy(bp
, bp
->mii_up1
, up1
);
1442 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
1443 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
,
1444 MII_BNX2_BLK_ADDR_COMBO_IEEEB0
);
1450 bnx2_enable_forced_2g5(struct bnx2
*bp
)
1454 if (!(bp
->phy_flags
& BNX2_PHY_FLAG_2_5G_CAPABLE
))
1457 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
1460 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
,
1461 MII_BNX2_BLK_ADDR_SERDES_DIG
);
1462 bnx2_read_phy(bp
, MII_BNX2_SERDES_DIG_MISC1
, &val
);
1463 val
&= ~MII_BNX2_SD_MISC1_FORCE_MSK
;
1464 val
|= MII_BNX2_SD_MISC1_FORCE
| MII_BNX2_SD_MISC1_FORCE_2_5G
;
1465 bnx2_write_phy(bp
, MII_BNX2_SERDES_DIG_MISC1
, val
);
1467 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
,
1468 MII_BNX2_BLK_ADDR_COMBO_IEEEB0
);
1469 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1471 } else if (CHIP_NUM(bp
) == CHIP_NUM_5708
) {
1472 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1473 bmcr
|= BCM5708S_BMCR_FORCE_2500
;
1478 if (bp
->autoneg
& AUTONEG_SPEED
) {
1479 bmcr
&= ~BMCR_ANENABLE
;
1480 if (bp
->req_duplex
== DUPLEX_FULL
)
1481 bmcr
|= BMCR_FULLDPLX
;
1483 bnx2_write_phy(bp
, bp
->mii_bmcr
, bmcr
);
1487 bnx2_disable_forced_2g5(struct bnx2
*bp
)
1491 if (!(bp
->phy_flags
& BNX2_PHY_FLAG_2_5G_CAPABLE
))
1494 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
1497 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
,
1498 MII_BNX2_BLK_ADDR_SERDES_DIG
);
1499 bnx2_read_phy(bp
, MII_BNX2_SERDES_DIG_MISC1
, &val
);
1500 val
&= ~MII_BNX2_SD_MISC1_FORCE
;
1501 bnx2_write_phy(bp
, MII_BNX2_SERDES_DIG_MISC1
, val
);
1503 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
,
1504 MII_BNX2_BLK_ADDR_COMBO_IEEEB0
);
1505 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1507 } else if (CHIP_NUM(bp
) == CHIP_NUM_5708
) {
1508 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1509 bmcr
&= ~BCM5708S_BMCR_FORCE_2500
;
1514 if (bp
->autoneg
& AUTONEG_SPEED
)
1515 bmcr
|= BMCR_SPEED1000
| BMCR_ANENABLE
| BMCR_ANRESTART
;
1516 bnx2_write_phy(bp
, bp
->mii_bmcr
, bmcr
);
1520 bnx2_5706s_force_link_dn(struct bnx2
*bp
, int start
)
1524 bnx2_write_phy(bp
, MII_BNX2_DSP_ADDRESS
, MII_EXPAND_SERDES_CTL
);
1525 bnx2_read_phy(bp
, MII_BNX2_DSP_RW_PORT
, &val
);
1527 bnx2_write_phy(bp
, MII_BNX2_DSP_RW_PORT
, val
& 0xff0f);
1529 bnx2_write_phy(bp
, MII_BNX2_DSP_RW_PORT
, val
| 0xc0);
1533 bnx2_set_link(struct bnx2
*bp
)
1538 if (bp
->loopback
== MAC_LOOPBACK
|| bp
->loopback
== PHY_LOOPBACK
) {
1543 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
)
1546 link_up
= bp
->link_up
;
1548 bnx2_enable_bmsr1(bp
);
1549 bnx2_read_phy(bp
, bp
->mii_bmsr1
, &bmsr
);
1550 bnx2_read_phy(bp
, bp
->mii_bmsr1
, &bmsr
);
1551 bnx2_disable_bmsr1(bp
);
1553 if ((bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) &&
1554 (CHIP_NUM(bp
) == CHIP_NUM_5706
)) {
1557 if (bp
->phy_flags
& BNX2_PHY_FLAG_FORCED_DOWN
) {
1558 bnx2_5706s_force_link_dn(bp
, 0);
1559 bp
->phy_flags
&= ~BNX2_PHY_FLAG_FORCED_DOWN
;
1561 val
= REG_RD(bp
, BNX2_EMAC_STATUS
);
1563 bnx2_write_phy(bp
, MII_BNX2_MISC_SHADOW
, MISC_SHDW_AN_DBG
);
1564 bnx2_read_phy(bp
, MII_BNX2_MISC_SHADOW
, &an_dbg
);
1565 bnx2_read_phy(bp
, MII_BNX2_MISC_SHADOW
, &an_dbg
);
1567 if ((val
& BNX2_EMAC_STATUS_LINK
) &&
1568 !(an_dbg
& MISC_SHDW_AN_DBG_NOSYNC
))
1569 bmsr
|= BMSR_LSTATUS
;
1571 bmsr
&= ~BMSR_LSTATUS
;
1574 if (bmsr
& BMSR_LSTATUS
) {
1577 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
1578 if (CHIP_NUM(bp
) == CHIP_NUM_5706
)
1579 bnx2_5706s_linkup(bp
);
1580 else if (CHIP_NUM(bp
) == CHIP_NUM_5708
)
1581 bnx2_5708s_linkup(bp
);
1582 else if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
1583 bnx2_5709s_linkup(bp
);
1586 bnx2_copper_linkup(bp
);
1588 bnx2_resolve_flow_ctrl(bp
);
1591 if ((bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) &&
1592 (bp
->autoneg
& AUTONEG_SPEED
))
1593 bnx2_disable_forced_2g5(bp
);
1595 if (bp
->phy_flags
& BNX2_PHY_FLAG_PARALLEL_DETECT
) {
1598 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1599 bmcr
|= BMCR_ANENABLE
;
1600 bnx2_write_phy(bp
, bp
->mii_bmcr
, bmcr
);
1602 bp
->phy_flags
&= ~BNX2_PHY_FLAG_PARALLEL_DETECT
;
1607 if (bp
->link_up
!= link_up
) {
1608 bnx2_report_link(bp
);
1611 bnx2_set_mac_link(bp
);
1617 bnx2_reset_phy(struct bnx2
*bp
)
1622 bnx2_write_phy(bp
, bp
->mii_bmcr
, BMCR_RESET
);
1624 #define PHY_RESET_MAX_WAIT 100
1625 for (i
= 0; i
< PHY_RESET_MAX_WAIT
; i
++) {
1628 bnx2_read_phy(bp
, bp
->mii_bmcr
, ®
);
1629 if (!(reg
& BMCR_RESET
)) {
1634 if (i
== PHY_RESET_MAX_WAIT
) {
1641 bnx2_phy_get_pause_adv(struct bnx2
*bp
)
1645 if ((bp
->req_flow_ctrl
& (FLOW_CTRL_RX
| FLOW_CTRL_TX
)) ==
1646 (FLOW_CTRL_RX
| FLOW_CTRL_TX
)) {
1648 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
1649 adv
= ADVERTISE_1000XPAUSE
;
1652 adv
= ADVERTISE_PAUSE_CAP
;
1655 else if (bp
->req_flow_ctrl
& FLOW_CTRL_TX
) {
1656 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
1657 adv
= ADVERTISE_1000XPSE_ASYM
;
1660 adv
= ADVERTISE_PAUSE_ASYM
;
1663 else if (bp
->req_flow_ctrl
& FLOW_CTRL_RX
) {
1664 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
1665 adv
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1668 adv
= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
1674 static int bnx2_fw_sync(struct bnx2
*, u32
, int, int);
1677 bnx2_setup_remote_phy(struct bnx2
*bp
, u8 port
)
1678 __releases(&bp
->phy_lock
)
1679 __acquires(&bp
->phy_lock
)
1681 u32 speed_arg
= 0, pause_adv
;
1683 pause_adv
= bnx2_phy_get_pause_adv(bp
);
1685 if (bp
->autoneg
& AUTONEG_SPEED
) {
1686 speed_arg
|= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG
;
1687 if (bp
->advertising
& ADVERTISED_10baseT_Half
)
1688 speed_arg
|= BNX2_NETLINK_SET_LINK_SPEED_10HALF
;
1689 if (bp
->advertising
& ADVERTISED_10baseT_Full
)
1690 speed_arg
|= BNX2_NETLINK_SET_LINK_SPEED_10FULL
;
1691 if (bp
->advertising
& ADVERTISED_100baseT_Half
)
1692 speed_arg
|= BNX2_NETLINK_SET_LINK_SPEED_100HALF
;
1693 if (bp
->advertising
& ADVERTISED_100baseT_Full
)
1694 speed_arg
|= BNX2_NETLINK_SET_LINK_SPEED_100FULL
;
1695 if (bp
->advertising
& ADVERTISED_1000baseT_Full
)
1696 speed_arg
|= BNX2_NETLINK_SET_LINK_SPEED_1GFULL
;
1697 if (bp
->advertising
& ADVERTISED_2500baseX_Full
)
1698 speed_arg
|= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL
;
1700 if (bp
->req_line_speed
== SPEED_2500
)
1701 speed_arg
= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL
;
1702 else if (bp
->req_line_speed
== SPEED_1000
)
1703 speed_arg
= BNX2_NETLINK_SET_LINK_SPEED_1GFULL
;
1704 else if (bp
->req_line_speed
== SPEED_100
) {
1705 if (bp
->req_duplex
== DUPLEX_FULL
)
1706 speed_arg
= BNX2_NETLINK_SET_LINK_SPEED_100FULL
;
1708 speed_arg
= BNX2_NETLINK_SET_LINK_SPEED_100HALF
;
1709 } else if (bp
->req_line_speed
== SPEED_10
) {
1710 if (bp
->req_duplex
== DUPLEX_FULL
)
1711 speed_arg
= BNX2_NETLINK_SET_LINK_SPEED_10FULL
;
1713 speed_arg
= BNX2_NETLINK_SET_LINK_SPEED_10HALF
;
1717 if (pause_adv
& (ADVERTISE_1000XPAUSE
| ADVERTISE_PAUSE_CAP
))
1718 speed_arg
|= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE
;
1719 if (pause_adv
& (ADVERTISE_1000XPSE_ASYM
| ADVERTISE_PAUSE_ASYM
))
1720 speed_arg
|= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE
;
1722 if (port
== PORT_TP
)
1723 speed_arg
|= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE
|
1724 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED
;
1726 bnx2_shmem_wr(bp
, BNX2_DRV_MB_ARG0
, speed_arg
);
1728 spin_unlock_bh(&bp
->phy_lock
);
1729 bnx2_fw_sync(bp
, BNX2_DRV_MSG_CODE_CMD_SET_LINK
, 1, 0);
1730 spin_lock_bh(&bp
->phy_lock
);
1736 bnx2_setup_serdes_phy(struct bnx2
*bp
, u8 port
)
1737 __releases(&bp
->phy_lock
)
1738 __acquires(&bp
->phy_lock
)
1743 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
)
1744 return (bnx2_setup_remote_phy(bp
, port
));
1746 if (!(bp
->autoneg
& AUTONEG_SPEED
)) {
1748 int force_link_down
= 0;
1750 if (bp
->req_line_speed
== SPEED_2500
) {
1751 if (!bnx2_test_and_enable_2g5(bp
))
1752 force_link_down
= 1;
1753 } else if (bp
->req_line_speed
== SPEED_1000
) {
1754 if (bnx2_test_and_disable_2g5(bp
))
1755 force_link_down
= 1;
1757 bnx2_read_phy(bp
, bp
->mii_adv
, &adv
);
1758 adv
&= ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
);
1760 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1761 new_bmcr
= bmcr
& ~BMCR_ANENABLE
;
1762 new_bmcr
|= BMCR_SPEED1000
;
1764 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
1765 if (bp
->req_line_speed
== SPEED_2500
)
1766 bnx2_enable_forced_2g5(bp
);
1767 else if (bp
->req_line_speed
== SPEED_1000
) {
1768 bnx2_disable_forced_2g5(bp
);
1769 new_bmcr
&= ~0x2000;
1772 } else if (CHIP_NUM(bp
) == CHIP_NUM_5708
) {
1773 if (bp
->req_line_speed
== SPEED_2500
)
1774 new_bmcr
|= BCM5708S_BMCR_FORCE_2500
;
1776 new_bmcr
= bmcr
& ~BCM5708S_BMCR_FORCE_2500
;
1779 if (bp
->req_duplex
== DUPLEX_FULL
) {
1780 adv
|= ADVERTISE_1000XFULL
;
1781 new_bmcr
|= BMCR_FULLDPLX
;
1784 adv
|= ADVERTISE_1000XHALF
;
1785 new_bmcr
&= ~BMCR_FULLDPLX
;
1787 if ((new_bmcr
!= bmcr
) || (force_link_down
)) {
1788 /* Force a link down visible on the other side */
1790 bnx2_write_phy(bp
, bp
->mii_adv
, adv
&
1791 ~(ADVERTISE_1000XFULL
|
1792 ADVERTISE_1000XHALF
));
1793 bnx2_write_phy(bp
, bp
->mii_bmcr
, bmcr
|
1794 BMCR_ANRESTART
| BMCR_ANENABLE
);
1797 netif_carrier_off(bp
->dev
);
1798 bnx2_write_phy(bp
, bp
->mii_bmcr
, new_bmcr
);
1799 bnx2_report_link(bp
);
1801 bnx2_write_phy(bp
, bp
->mii_adv
, adv
);
1802 bnx2_write_phy(bp
, bp
->mii_bmcr
, new_bmcr
);
1804 bnx2_resolve_flow_ctrl(bp
);
1805 bnx2_set_mac_link(bp
);
1810 bnx2_test_and_enable_2g5(bp
);
1812 if (bp
->advertising
& ADVERTISED_1000baseT_Full
)
1813 new_adv
|= ADVERTISE_1000XFULL
;
1815 new_adv
|= bnx2_phy_get_pause_adv(bp
);
1817 bnx2_read_phy(bp
, bp
->mii_adv
, &adv
);
1818 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1820 bp
->serdes_an_pending
= 0;
1821 if ((adv
!= new_adv
) || ((bmcr
& BMCR_ANENABLE
) == 0)) {
1822 /* Force a link down visible on the other side */
1824 bnx2_write_phy(bp
, bp
->mii_bmcr
, BMCR_LOOPBACK
);
1825 spin_unlock_bh(&bp
->phy_lock
);
1827 spin_lock_bh(&bp
->phy_lock
);
1830 bnx2_write_phy(bp
, bp
->mii_adv
, new_adv
);
1831 bnx2_write_phy(bp
, bp
->mii_bmcr
, bmcr
| BMCR_ANRESTART
|
1833 /* Speed up link-up time when the link partner
1834 * does not autonegotiate which is very common
1835 * in blade servers. Some blade servers use
1836 * IPMI for kerboard input and it's important
1837 * to minimize link disruptions. Autoneg. involves
1838 * exchanging base pages plus 3 next pages and
1839 * normally completes in about 120 msec.
1841 bp
->current_interval
= BNX2_SERDES_AN_TIMEOUT
;
1842 bp
->serdes_an_pending
= 1;
1843 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
1845 bnx2_resolve_flow_ctrl(bp
);
1846 bnx2_set_mac_link(bp
);
1852 #define ETHTOOL_ALL_FIBRE_SPEED \
1853 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1854 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1855 (ADVERTISED_1000baseT_Full)
1857 #define ETHTOOL_ALL_COPPER_SPEED \
1858 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1859 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1860 ADVERTISED_1000baseT_Full)
1862 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1863 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1865 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1868 bnx2_set_default_remote_link(struct bnx2
*bp
)
1872 if (bp
->phy_port
== PORT_TP
)
1873 link
= bnx2_shmem_rd(bp
, BNX2_RPHY_COPPER_LINK
);
1875 link
= bnx2_shmem_rd(bp
, BNX2_RPHY_SERDES_LINK
);
1877 if (link
& BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG
) {
1878 bp
->req_line_speed
= 0;
1879 bp
->autoneg
|= AUTONEG_SPEED
;
1880 bp
->advertising
= ADVERTISED_Autoneg
;
1881 if (link
& BNX2_NETLINK_SET_LINK_SPEED_10HALF
)
1882 bp
->advertising
|= ADVERTISED_10baseT_Half
;
1883 if (link
& BNX2_NETLINK_SET_LINK_SPEED_10FULL
)
1884 bp
->advertising
|= ADVERTISED_10baseT_Full
;
1885 if (link
& BNX2_NETLINK_SET_LINK_SPEED_100HALF
)
1886 bp
->advertising
|= ADVERTISED_100baseT_Half
;
1887 if (link
& BNX2_NETLINK_SET_LINK_SPEED_100FULL
)
1888 bp
->advertising
|= ADVERTISED_100baseT_Full
;
1889 if (link
& BNX2_NETLINK_SET_LINK_SPEED_1GFULL
)
1890 bp
->advertising
|= ADVERTISED_1000baseT_Full
;
1891 if (link
& BNX2_NETLINK_SET_LINK_SPEED_2G5FULL
)
1892 bp
->advertising
|= ADVERTISED_2500baseX_Full
;
1895 bp
->advertising
= 0;
1896 bp
->req_duplex
= DUPLEX_FULL
;
1897 if (link
& BNX2_NETLINK_SET_LINK_SPEED_10
) {
1898 bp
->req_line_speed
= SPEED_10
;
1899 if (link
& BNX2_NETLINK_SET_LINK_SPEED_10HALF
)
1900 bp
->req_duplex
= DUPLEX_HALF
;
1902 if (link
& BNX2_NETLINK_SET_LINK_SPEED_100
) {
1903 bp
->req_line_speed
= SPEED_100
;
1904 if (link
& BNX2_NETLINK_SET_LINK_SPEED_100HALF
)
1905 bp
->req_duplex
= DUPLEX_HALF
;
1907 if (link
& BNX2_NETLINK_SET_LINK_SPEED_1GFULL
)
1908 bp
->req_line_speed
= SPEED_1000
;
1909 if (link
& BNX2_NETLINK_SET_LINK_SPEED_2G5FULL
)
1910 bp
->req_line_speed
= SPEED_2500
;
1915 bnx2_set_default_link(struct bnx2
*bp
)
1917 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
) {
1918 bnx2_set_default_remote_link(bp
);
1922 bp
->autoneg
= AUTONEG_SPEED
| AUTONEG_FLOW_CTRL
;
1923 bp
->req_line_speed
= 0;
1924 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
1927 bp
->advertising
= ETHTOOL_ALL_FIBRE_SPEED
| ADVERTISED_Autoneg
;
1929 reg
= bnx2_shmem_rd(bp
, BNX2_PORT_HW_CFG_CONFIG
);
1930 reg
&= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK
;
1931 if (reg
== BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G
) {
1933 bp
->req_line_speed
= bp
->line_speed
= SPEED_1000
;
1934 bp
->req_duplex
= DUPLEX_FULL
;
1937 bp
->advertising
= ETHTOOL_ALL_COPPER_SPEED
| ADVERTISED_Autoneg
;
1941 bnx2_send_heart_beat(struct bnx2
*bp
)
1946 spin_lock(&bp
->indirect_lock
);
1947 msg
= (u32
) (++bp
->fw_drv_pulse_wr_seq
& BNX2_DRV_PULSE_SEQ_MASK
);
1948 addr
= bp
->shmem_base
+ BNX2_DRV_PULSE_MB
;
1949 REG_WR(bp
, BNX2_PCICFG_REG_WINDOW_ADDRESS
, addr
);
1950 REG_WR(bp
, BNX2_PCICFG_REG_WINDOW
, msg
);
1951 spin_unlock(&bp
->indirect_lock
);
1955 bnx2_remote_phy_event(struct bnx2
*bp
)
1958 u8 link_up
= bp
->link_up
;
1961 msg
= bnx2_shmem_rd(bp
, BNX2_LINK_STATUS
);
1963 if (msg
& BNX2_LINK_STATUS_HEART_BEAT_EXPIRED
)
1964 bnx2_send_heart_beat(bp
);
1966 msg
&= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED
;
1968 if ((msg
& BNX2_LINK_STATUS_LINK_UP
) == BNX2_LINK_STATUS_LINK_DOWN
)
1974 speed
= msg
& BNX2_LINK_STATUS_SPEED_MASK
;
1975 bp
->duplex
= DUPLEX_FULL
;
1977 case BNX2_LINK_STATUS_10HALF
:
1978 bp
->duplex
= DUPLEX_HALF
;
1979 case BNX2_LINK_STATUS_10FULL
:
1980 bp
->line_speed
= SPEED_10
;
1982 case BNX2_LINK_STATUS_100HALF
:
1983 bp
->duplex
= DUPLEX_HALF
;
1984 case BNX2_LINK_STATUS_100BASE_T4
:
1985 case BNX2_LINK_STATUS_100FULL
:
1986 bp
->line_speed
= SPEED_100
;
1988 case BNX2_LINK_STATUS_1000HALF
:
1989 bp
->duplex
= DUPLEX_HALF
;
1990 case BNX2_LINK_STATUS_1000FULL
:
1991 bp
->line_speed
= SPEED_1000
;
1993 case BNX2_LINK_STATUS_2500HALF
:
1994 bp
->duplex
= DUPLEX_HALF
;
1995 case BNX2_LINK_STATUS_2500FULL
:
1996 bp
->line_speed
= SPEED_2500
;
2004 if ((bp
->autoneg
& (AUTONEG_SPEED
| AUTONEG_FLOW_CTRL
)) !=
2005 (AUTONEG_SPEED
| AUTONEG_FLOW_CTRL
)) {
2006 if (bp
->duplex
== DUPLEX_FULL
)
2007 bp
->flow_ctrl
= bp
->req_flow_ctrl
;
2009 if (msg
& BNX2_LINK_STATUS_TX_FC_ENABLED
)
2010 bp
->flow_ctrl
|= FLOW_CTRL_TX
;
2011 if (msg
& BNX2_LINK_STATUS_RX_FC_ENABLED
)
2012 bp
->flow_ctrl
|= FLOW_CTRL_RX
;
2015 old_port
= bp
->phy_port
;
2016 if (msg
& BNX2_LINK_STATUS_SERDES_LINK
)
2017 bp
->phy_port
= PORT_FIBRE
;
2019 bp
->phy_port
= PORT_TP
;
2021 if (old_port
!= bp
->phy_port
)
2022 bnx2_set_default_link(bp
);
2025 if (bp
->link_up
!= link_up
)
2026 bnx2_report_link(bp
);
2028 bnx2_set_mac_link(bp
);
2032 bnx2_set_remote_link(struct bnx2
*bp
)
2036 evt_code
= bnx2_shmem_rd(bp
, BNX2_FW_EVT_CODE_MB
);
2038 case BNX2_FW_EVT_CODE_LINK_EVENT
:
2039 bnx2_remote_phy_event(bp
);
2041 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT
:
2043 bnx2_send_heart_beat(bp
);
2050 bnx2_setup_copper_phy(struct bnx2
*bp
)
2051 __releases(&bp
->phy_lock
)
2052 __acquires(&bp
->phy_lock
)
2057 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
2059 if (bp
->autoneg
& AUTONEG_SPEED
) {
2060 u32 adv_reg
, adv1000_reg
;
2061 u32 new_adv_reg
= 0;
2062 u32 new_adv1000_reg
= 0;
2064 bnx2_read_phy(bp
, bp
->mii_adv
, &adv_reg
);
2065 adv_reg
&= (PHY_ALL_10_100_SPEED
| ADVERTISE_PAUSE_CAP
|
2066 ADVERTISE_PAUSE_ASYM
);
2068 bnx2_read_phy(bp
, MII_CTRL1000
, &adv1000_reg
);
2069 adv1000_reg
&= PHY_ALL_1000_SPEED
;
2071 if (bp
->advertising
& ADVERTISED_10baseT_Half
)
2072 new_adv_reg
|= ADVERTISE_10HALF
;
2073 if (bp
->advertising
& ADVERTISED_10baseT_Full
)
2074 new_adv_reg
|= ADVERTISE_10FULL
;
2075 if (bp
->advertising
& ADVERTISED_100baseT_Half
)
2076 new_adv_reg
|= ADVERTISE_100HALF
;
2077 if (bp
->advertising
& ADVERTISED_100baseT_Full
)
2078 new_adv_reg
|= ADVERTISE_100FULL
;
2079 if (bp
->advertising
& ADVERTISED_1000baseT_Full
)
2080 new_adv1000_reg
|= ADVERTISE_1000FULL
;
2082 new_adv_reg
|= ADVERTISE_CSMA
;
2084 new_adv_reg
|= bnx2_phy_get_pause_adv(bp
);
2086 if ((adv1000_reg
!= new_adv1000_reg
) ||
2087 (adv_reg
!= new_adv_reg
) ||
2088 ((bmcr
& BMCR_ANENABLE
) == 0)) {
2090 bnx2_write_phy(bp
, bp
->mii_adv
, new_adv_reg
);
2091 bnx2_write_phy(bp
, MII_CTRL1000
, new_adv1000_reg
);
2092 bnx2_write_phy(bp
, bp
->mii_bmcr
, BMCR_ANRESTART
|
2095 else if (bp
->link_up
) {
2096 /* Flow ctrl may have changed from auto to forced */
2097 /* or vice-versa. */
2099 bnx2_resolve_flow_ctrl(bp
);
2100 bnx2_set_mac_link(bp
);
2106 if (bp
->req_line_speed
== SPEED_100
) {
2107 new_bmcr
|= BMCR_SPEED100
;
2109 if (bp
->req_duplex
== DUPLEX_FULL
) {
2110 new_bmcr
|= BMCR_FULLDPLX
;
2112 if (new_bmcr
!= bmcr
) {
2115 bnx2_read_phy(bp
, bp
->mii_bmsr
, &bmsr
);
2116 bnx2_read_phy(bp
, bp
->mii_bmsr
, &bmsr
);
2118 if (bmsr
& BMSR_LSTATUS
) {
2119 /* Force link down */
2120 bnx2_write_phy(bp
, bp
->mii_bmcr
, BMCR_LOOPBACK
);
2121 spin_unlock_bh(&bp
->phy_lock
);
2123 spin_lock_bh(&bp
->phy_lock
);
2125 bnx2_read_phy(bp
, bp
->mii_bmsr
, &bmsr
);
2126 bnx2_read_phy(bp
, bp
->mii_bmsr
, &bmsr
);
2129 bnx2_write_phy(bp
, bp
->mii_bmcr
, new_bmcr
);
2131 /* Normally, the new speed is setup after the link has
2132 * gone down and up again. In some cases, link will not go
2133 * down so we need to set up the new speed here.
2135 if (bmsr
& BMSR_LSTATUS
) {
2136 bp
->line_speed
= bp
->req_line_speed
;
2137 bp
->duplex
= bp
->req_duplex
;
2138 bnx2_resolve_flow_ctrl(bp
);
2139 bnx2_set_mac_link(bp
);
2142 bnx2_resolve_flow_ctrl(bp
);
2143 bnx2_set_mac_link(bp
);
2149 bnx2_setup_phy(struct bnx2
*bp
, u8 port
)
2150 __releases(&bp
->phy_lock
)
2151 __acquires(&bp
->phy_lock
)
2153 if (bp
->loopback
== MAC_LOOPBACK
)
2156 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
2157 return (bnx2_setup_serdes_phy(bp
, port
));
2160 return (bnx2_setup_copper_phy(bp
));
2165 bnx2_init_5709s_phy(struct bnx2
*bp
, int reset_phy
)
2169 bp
->mii_bmcr
= MII_BMCR
+ 0x10;
2170 bp
->mii_bmsr
= MII_BMSR
+ 0x10;
2171 bp
->mii_bmsr1
= MII_BNX2_GP_TOP_AN_STATUS1
;
2172 bp
->mii_adv
= MII_ADVERTISE
+ 0x10;
2173 bp
->mii_lpa
= MII_LPA
+ 0x10;
2174 bp
->mii_up1
= MII_BNX2_OVER1G_UP1
;
2176 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_AER
);
2177 bnx2_write_phy(bp
, MII_BNX2_AER_AER
, MII_BNX2_AER_AER_AN_MMD
);
2179 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_COMBO_IEEEB0
);
2183 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_SERDES_DIG
);
2185 bnx2_read_phy(bp
, MII_BNX2_SERDES_DIG_1000XCTL1
, &val
);
2186 val
&= ~MII_BNX2_SD_1000XCTL1_AUTODET
;
2187 val
|= MII_BNX2_SD_1000XCTL1_FIBER
;
2188 bnx2_write_phy(bp
, MII_BNX2_SERDES_DIG_1000XCTL1
, val
);
2190 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_OVER1G
);
2191 bnx2_read_phy(bp
, MII_BNX2_OVER1G_UP1
, &val
);
2192 if (bp
->phy_flags
& BNX2_PHY_FLAG_2_5G_CAPABLE
)
2193 val
|= BCM5708S_UP1_2G5
;
2195 val
&= ~BCM5708S_UP1_2G5
;
2196 bnx2_write_phy(bp
, MII_BNX2_OVER1G_UP1
, val
);
2198 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_BAM_NXTPG
);
2199 bnx2_read_phy(bp
, MII_BNX2_BAM_NXTPG_CTL
, &val
);
2200 val
|= MII_BNX2_NXTPG_CTL_T2
| MII_BNX2_NXTPG_CTL_BAM
;
2201 bnx2_write_phy(bp
, MII_BNX2_BAM_NXTPG_CTL
, val
);
2203 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_CL73_USERB0
);
2205 val
= MII_BNX2_CL73_BAM_EN
| MII_BNX2_CL73_BAM_STA_MGR_EN
|
2206 MII_BNX2_CL73_BAM_NP_AFT_BP_EN
;
2207 bnx2_write_phy(bp
, MII_BNX2_CL73_BAM_CTL1
, val
);
2209 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_COMBO_IEEEB0
);
2215 bnx2_init_5708s_phy(struct bnx2
*bp
, int reset_phy
)
2222 bp
->mii_up1
= BCM5708S_UP1
;
2224 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
, BCM5708S_BLK_ADDR_DIG3
);
2225 bnx2_write_phy(bp
, BCM5708S_DIG_3_0
, BCM5708S_DIG_3_0_USE_IEEE
);
2226 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
, BCM5708S_BLK_ADDR_DIG
);
2228 bnx2_read_phy(bp
, BCM5708S_1000X_CTL1
, &val
);
2229 val
|= BCM5708S_1000X_CTL1_FIBER_MODE
| BCM5708S_1000X_CTL1_AUTODET_EN
;
2230 bnx2_write_phy(bp
, BCM5708S_1000X_CTL1
, val
);
2232 bnx2_read_phy(bp
, BCM5708S_1000X_CTL2
, &val
);
2233 val
|= BCM5708S_1000X_CTL2_PLLEL_DET_EN
;
2234 bnx2_write_phy(bp
, BCM5708S_1000X_CTL2
, val
);
2236 if (bp
->phy_flags
& BNX2_PHY_FLAG_2_5G_CAPABLE
) {
2237 bnx2_read_phy(bp
, BCM5708S_UP1
, &val
);
2238 val
|= BCM5708S_UP1_2G5
;
2239 bnx2_write_phy(bp
, BCM5708S_UP1
, val
);
2242 if ((CHIP_ID(bp
) == CHIP_ID_5708_A0
) ||
2243 (CHIP_ID(bp
) == CHIP_ID_5708_B0
) ||
2244 (CHIP_ID(bp
) == CHIP_ID_5708_B1
)) {
2245 /* increase tx signal amplitude */
2246 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
,
2247 BCM5708S_BLK_ADDR_TX_MISC
);
2248 bnx2_read_phy(bp
, BCM5708S_TX_ACTL1
, &val
);
2249 val
&= ~BCM5708S_TX_ACTL1_DRIVER_VCM
;
2250 bnx2_write_phy(bp
, BCM5708S_TX_ACTL1
, val
);
2251 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
, BCM5708S_BLK_ADDR_DIG
);
2254 val
= bnx2_shmem_rd(bp
, BNX2_PORT_HW_CFG_CONFIG
) &
2255 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK
;
2260 is_backplane
= bnx2_shmem_rd(bp
, BNX2_SHARED_HW_CFG_CONFIG
);
2261 if (is_backplane
& BNX2_SHARED_HW_CFG_PHY_BACKPLANE
) {
2262 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
,
2263 BCM5708S_BLK_ADDR_TX_MISC
);
2264 bnx2_write_phy(bp
, BCM5708S_TX_ACTL3
, val
);
2265 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
,
2266 BCM5708S_BLK_ADDR_DIG
);
2273 bnx2_init_5706s_phy(struct bnx2
*bp
, int reset_phy
)
2278 bp
->phy_flags
&= ~BNX2_PHY_FLAG_PARALLEL_DETECT
;
2280 if (CHIP_NUM(bp
) == CHIP_NUM_5706
)
2281 REG_WR(bp
, BNX2_MISC_GP_HW_CTL0
, 0x300);
2283 if (bp
->dev
->mtu
> 1500) {
2286 /* Set extended packet length bit */
2287 bnx2_write_phy(bp
, 0x18, 0x7);
2288 bnx2_read_phy(bp
, 0x18, &val
);
2289 bnx2_write_phy(bp
, 0x18, (val
& 0xfff8) | 0x4000);
2291 bnx2_write_phy(bp
, 0x1c, 0x6c00);
2292 bnx2_read_phy(bp
, 0x1c, &val
);
2293 bnx2_write_phy(bp
, 0x1c, (val
& 0x3ff) | 0xec02);
2298 bnx2_write_phy(bp
, 0x18, 0x7);
2299 bnx2_read_phy(bp
, 0x18, &val
);
2300 bnx2_write_phy(bp
, 0x18, val
& ~0x4007);
2302 bnx2_write_phy(bp
, 0x1c, 0x6c00);
2303 bnx2_read_phy(bp
, 0x1c, &val
);
2304 bnx2_write_phy(bp
, 0x1c, (val
& 0x3fd) | 0xec00);
2311 bnx2_init_copper_phy(struct bnx2
*bp
, int reset_phy
)
2318 if (bp
->phy_flags
& BNX2_PHY_FLAG_CRC_FIX
) {
2319 bnx2_write_phy(bp
, 0x18, 0x0c00);
2320 bnx2_write_phy(bp
, 0x17, 0x000a);
2321 bnx2_write_phy(bp
, 0x15, 0x310b);
2322 bnx2_write_phy(bp
, 0x17, 0x201f);
2323 bnx2_write_phy(bp
, 0x15, 0x9506);
2324 bnx2_write_phy(bp
, 0x17, 0x401f);
2325 bnx2_write_phy(bp
, 0x15, 0x14e2);
2326 bnx2_write_phy(bp
, 0x18, 0x0400);
2329 if (bp
->phy_flags
& BNX2_PHY_FLAG_DIS_EARLY_DAC
) {
2330 bnx2_write_phy(bp
, MII_BNX2_DSP_ADDRESS
,
2331 MII_BNX2_DSP_EXPAND_REG
| 0x8);
2332 bnx2_read_phy(bp
, MII_BNX2_DSP_RW_PORT
, &val
);
2334 bnx2_write_phy(bp
, MII_BNX2_DSP_RW_PORT
, val
);
2337 if (bp
->dev
->mtu
> 1500) {
2338 /* Set extended packet length bit */
2339 bnx2_write_phy(bp
, 0x18, 0x7);
2340 bnx2_read_phy(bp
, 0x18, &val
);
2341 bnx2_write_phy(bp
, 0x18, val
| 0x4000);
2343 bnx2_read_phy(bp
, 0x10, &val
);
2344 bnx2_write_phy(bp
, 0x10, val
| 0x1);
2347 bnx2_write_phy(bp
, 0x18, 0x7);
2348 bnx2_read_phy(bp
, 0x18, &val
);
2349 bnx2_write_phy(bp
, 0x18, val
& ~0x4007);
2351 bnx2_read_phy(bp
, 0x10, &val
);
2352 bnx2_write_phy(bp
, 0x10, val
& ~0x1);
2355 /* ethernet@wirespeed */
2356 bnx2_write_phy(bp
, 0x18, 0x7007);
2357 bnx2_read_phy(bp
, 0x18, &val
);
2358 bnx2_write_phy(bp
, 0x18, val
| (1 << 15) | (1 << 4));
2364 bnx2_init_phy(struct bnx2
*bp
, int reset_phy
)
2365 __releases(&bp
->phy_lock
)
2366 __acquires(&bp
->phy_lock
)
2371 bp
->phy_flags
&= ~BNX2_PHY_FLAG_INT_MODE_MASK
;
2372 bp
->phy_flags
|= BNX2_PHY_FLAG_INT_MODE_LINK_READY
;
2374 bp
->mii_bmcr
= MII_BMCR
;
2375 bp
->mii_bmsr
= MII_BMSR
;
2376 bp
->mii_bmsr1
= MII_BMSR
;
2377 bp
->mii_adv
= MII_ADVERTISE
;
2378 bp
->mii_lpa
= MII_LPA
;
2380 REG_WR(bp
, BNX2_EMAC_ATTENTION_ENA
, BNX2_EMAC_ATTENTION_ENA_LINK
);
2382 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
)
2385 bnx2_read_phy(bp
, MII_PHYSID1
, &val
);
2386 bp
->phy_id
= val
<< 16;
2387 bnx2_read_phy(bp
, MII_PHYSID2
, &val
);
2388 bp
->phy_id
|= val
& 0xffff;
2390 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
2391 if (CHIP_NUM(bp
) == CHIP_NUM_5706
)
2392 rc
= bnx2_init_5706s_phy(bp
, reset_phy
);
2393 else if (CHIP_NUM(bp
) == CHIP_NUM_5708
)
2394 rc
= bnx2_init_5708s_phy(bp
, reset_phy
);
2395 else if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
2396 rc
= bnx2_init_5709s_phy(bp
, reset_phy
);
2399 rc
= bnx2_init_copper_phy(bp
, reset_phy
);
2404 rc
= bnx2_setup_phy(bp
, bp
->phy_port
);
2410 bnx2_set_mac_loopback(struct bnx2
*bp
)
2414 mac_mode
= REG_RD(bp
, BNX2_EMAC_MODE
);
2415 mac_mode
&= ~BNX2_EMAC_MODE_PORT
;
2416 mac_mode
|= BNX2_EMAC_MODE_MAC_LOOP
| BNX2_EMAC_MODE_FORCE_LINK
;
2417 REG_WR(bp
, BNX2_EMAC_MODE
, mac_mode
);
2422 static int bnx2_test_link(struct bnx2
*);
2425 bnx2_set_phy_loopback(struct bnx2
*bp
)
2430 spin_lock_bh(&bp
->phy_lock
);
2431 rc
= bnx2_write_phy(bp
, bp
->mii_bmcr
, BMCR_LOOPBACK
| BMCR_FULLDPLX
|
2433 spin_unlock_bh(&bp
->phy_lock
);
2437 for (i
= 0; i
< 10; i
++) {
2438 if (bnx2_test_link(bp
) == 0)
2443 mac_mode
= REG_RD(bp
, BNX2_EMAC_MODE
);
2444 mac_mode
&= ~(BNX2_EMAC_MODE_PORT
| BNX2_EMAC_MODE_HALF_DUPLEX
|
2445 BNX2_EMAC_MODE_MAC_LOOP
| BNX2_EMAC_MODE_FORCE_LINK
|
2446 BNX2_EMAC_MODE_25G_MODE
);
2448 mac_mode
|= BNX2_EMAC_MODE_PORT_GMII
;
2449 REG_WR(bp
, BNX2_EMAC_MODE
, mac_mode
);
2455 bnx2_fw_sync(struct bnx2
*bp
, u32 msg_data
, int ack
, int silent
)
2461 msg_data
|= bp
->fw_wr_seq
;
2463 bnx2_shmem_wr(bp
, BNX2_DRV_MB
, msg_data
);
2468 /* wait for an acknowledgement. */
2469 for (i
= 0; i
< (BNX2_FW_ACK_TIME_OUT_MS
/ 10); i
++) {
2472 val
= bnx2_shmem_rd(bp
, BNX2_FW_MB
);
2474 if ((val
& BNX2_FW_MSG_ACK
) == (msg_data
& BNX2_DRV_MSG_SEQ
))
2477 if ((msg_data
& BNX2_DRV_MSG_DATA
) == BNX2_DRV_MSG_DATA_WAIT0
)
2480 /* If we timed out, inform the firmware that this is the case. */
2481 if ((val
& BNX2_FW_MSG_ACK
) != (msg_data
& BNX2_DRV_MSG_SEQ
)) {
2483 pr_err("fw sync timeout, reset code = %x\n", msg_data
);
2485 msg_data
&= ~BNX2_DRV_MSG_CODE
;
2486 msg_data
|= BNX2_DRV_MSG_CODE_FW_TIMEOUT
;
2488 bnx2_shmem_wr(bp
, BNX2_DRV_MB
, msg_data
);
2493 if ((val
& BNX2_FW_MSG_STATUS_MASK
) != BNX2_FW_MSG_STATUS_OK
)
2500 bnx2_init_5709_context(struct bnx2
*bp
)
2505 val
= BNX2_CTX_COMMAND_ENABLED
| BNX2_CTX_COMMAND_MEM_INIT
| (1 << 12);
2506 val
|= (BCM_PAGE_BITS
- 8) << 16;
2507 REG_WR(bp
, BNX2_CTX_COMMAND
, val
);
2508 for (i
= 0; i
< 10; i
++) {
2509 val
= REG_RD(bp
, BNX2_CTX_COMMAND
);
2510 if (!(val
& BNX2_CTX_COMMAND_MEM_INIT
))
2514 if (val
& BNX2_CTX_COMMAND_MEM_INIT
)
2517 for (i
= 0; i
< bp
->ctx_pages
; i
++) {
2521 memset(bp
->ctx_blk
[i
], 0, BCM_PAGE_SIZE
);
2525 REG_WR(bp
, BNX2_CTX_HOST_PAGE_TBL_DATA0
,
2526 (bp
->ctx_blk_mapping
[i
] & 0xffffffff) |
2527 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID
);
2528 REG_WR(bp
, BNX2_CTX_HOST_PAGE_TBL_DATA1
,
2529 (u64
) bp
->ctx_blk_mapping
[i
] >> 32);
2530 REG_WR(bp
, BNX2_CTX_HOST_PAGE_TBL_CTRL
, i
|
2531 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
);
2532 for (j
= 0; j
< 10; j
++) {
2534 val
= REG_RD(bp
, BNX2_CTX_HOST_PAGE_TBL_CTRL
);
2535 if (!(val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
))
2539 if (val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
) {
2548 bnx2_init_context(struct bnx2
*bp
)
2554 u32 vcid_addr
, pcid_addr
, offset
;
2559 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
2562 vcid_addr
= GET_PCID_ADDR(vcid
);
2564 new_vcid
= 0x60 + (vcid
& 0xf0) + (vcid
& 0x7);
2569 pcid_addr
= GET_PCID_ADDR(new_vcid
);
2572 vcid_addr
= GET_CID_ADDR(vcid
);
2573 pcid_addr
= vcid_addr
;
2576 for (i
= 0; i
< (CTX_SIZE
/ PHY_CTX_SIZE
); i
++) {
2577 vcid_addr
+= (i
<< PHY_CTX_SHIFT
);
2578 pcid_addr
+= (i
<< PHY_CTX_SHIFT
);
2580 REG_WR(bp
, BNX2_CTX_VIRT_ADDR
, vcid_addr
);
2581 REG_WR(bp
, BNX2_CTX_PAGE_TBL
, pcid_addr
);
2583 /* Zero out the context. */
2584 for (offset
= 0; offset
< PHY_CTX_SIZE
; offset
+= 4)
2585 bnx2_ctx_wr(bp
, vcid_addr
, offset
, 0);
2591 bnx2_alloc_bad_rbuf(struct bnx2
*bp
)
2597 good_mbuf
= kmalloc(512 * sizeof(u16
), GFP_KERNEL
);
2598 if (good_mbuf
== NULL
) {
2599 pr_err("Failed to allocate memory in %s\n", __func__
);
2603 REG_WR(bp
, BNX2_MISC_ENABLE_SET_BITS
,
2604 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE
);
2608 /* Allocate a bunch of mbufs and save the good ones in an array. */
2609 val
= bnx2_reg_rd_ind(bp
, BNX2_RBUF_STATUS1
);
2610 while (val
& BNX2_RBUF_STATUS1_FREE_COUNT
) {
2611 bnx2_reg_wr_ind(bp
, BNX2_RBUF_COMMAND
,
2612 BNX2_RBUF_COMMAND_ALLOC_REQ
);
2614 val
= bnx2_reg_rd_ind(bp
, BNX2_RBUF_FW_BUF_ALLOC
);
2616 val
&= BNX2_RBUF_FW_BUF_ALLOC_VALUE
;
2618 /* The addresses with Bit 9 set are bad memory blocks. */
2619 if (!(val
& (1 << 9))) {
2620 good_mbuf
[good_mbuf_cnt
] = (u16
) val
;
2624 val
= bnx2_reg_rd_ind(bp
, BNX2_RBUF_STATUS1
);
2627 /* Free the good ones back to the mbuf pool thus discarding
2628 * all the bad ones. */
2629 while (good_mbuf_cnt
) {
2632 val
= good_mbuf
[good_mbuf_cnt
];
2633 val
= (val
<< 9) | val
| 1;
2635 bnx2_reg_wr_ind(bp
, BNX2_RBUF_FW_BUF_FREE
, val
);
2642 bnx2_set_mac_addr(struct bnx2
*bp
, u8
*mac_addr
, u32 pos
)
2646 val
= (mac_addr
[0] << 8) | mac_addr
[1];
2648 REG_WR(bp
, BNX2_EMAC_MAC_MATCH0
+ (pos
* 8), val
);
2650 val
= (mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
2651 (mac_addr
[4] << 8) | mac_addr
[5];
2653 REG_WR(bp
, BNX2_EMAC_MAC_MATCH1
+ (pos
* 8), val
);
2657 bnx2_alloc_rx_page(struct bnx2
*bp
, struct bnx2_rx_ring_info
*rxr
, u16 index
)
2660 struct sw_pg
*rx_pg
= &rxr
->rx_pg_ring
[index
];
2661 struct rx_bd
*rxbd
=
2662 &rxr
->rx_pg_desc_ring
[RX_RING(index
)][RX_IDX(index
)];
2663 struct page
*page
= alloc_page(GFP_ATOMIC
);
2667 mapping
= pci_map_page(bp
->pdev
, page
, 0, PAGE_SIZE
,
2668 PCI_DMA_FROMDEVICE
);
2669 if (pci_dma_mapping_error(bp
->pdev
, mapping
)) {
2675 pci_unmap_addr_set(rx_pg
, mapping
, mapping
);
2676 rxbd
->rx_bd_haddr_hi
= (u64
) mapping
>> 32;
2677 rxbd
->rx_bd_haddr_lo
= (u64
) mapping
& 0xffffffff;
2682 bnx2_free_rx_page(struct bnx2
*bp
, struct bnx2_rx_ring_info
*rxr
, u16 index
)
2684 struct sw_pg
*rx_pg
= &rxr
->rx_pg_ring
[index
];
2685 struct page
*page
= rx_pg
->page
;
2690 pci_unmap_page(bp
->pdev
, pci_unmap_addr(rx_pg
, mapping
), PAGE_SIZE
,
2691 PCI_DMA_FROMDEVICE
);
2698 bnx2_alloc_rx_skb(struct bnx2
*bp
, struct bnx2_rx_ring_info
*rxr
, u16 index
)
2700 struct sk_buff
*skb
;
2701 struct sw_bd
*rx_buf
= &rxr
->rx_buf_ring
[index
];
2703 struct rx_bd
*rxbd
= &rxr
->rx_desc_ring
[RX_RING(index
)][RX_IDX(index
)];
2704 unsigned long align
;
2706 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
2711 if (unlikely((align
= (unsigned long) skb
->data
& (BNX2_RX_ALIGN
- 1))))
2712 skb_reserve(skb
, BNX2_RX_ALIGN
- align
);
2714 mapping
= pci_map_single(bp
->pdev
, skb
->data
, bp
->rx_buf_use_size
,
2715 PCI_DMA_FROMDEVICE
);
2716 if (pci_dma_mapping_error(bp
->pdev
, mapping
)) {
2722 pci_unmap_addr_set(rx_buf
, mapping
, mapping
);
2724 rxbd
->rx_bd_haddr_hi
= (u64
) mapping
>> 32;
2725 rxbd
->rx_bd_haddr_lo
= (u64
) mapping
& 0xffffffff;
2727 rxr
->rx_prod_bseq
+= bp
->rx_buf_use_size
;
2733 bnx2_phy_event_is_set(struct bnx2
*bp
, struct bnx2_napi
*bnapi
, u32 event
)
2735 struct status_block
*sblk
= bnapi
->status_blk
.msi
;
2736 u32 new_link_state
, old_link_state
;
2739 new_link_state
= sblk
->status_attn_bits
& event
;
2740 old_link_state
= sblk
->status_attn_bits_ack
& event
;
2741 if (new_link_state
!= old_link_state
) {
2743 REG_WR(bp
, BNX2_PCICFG_STATUS_BIT_SET_CMD
, event
);
2745 REG_WR(bp
, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD
, event
);
2753 bnx2_phy_int(struct bnx2
*bp
, struct bnx2_napi
*bnapi
)
2755 spin_lock(&bp
->phy_lock
);
2757 if (bnx2_phy_event_is_set(bp
, bnapi
, STATUS_ATTN_BITS_LINK_STATE
))
2759 if (bnx2_phy_event_is_set(bp
, bnapi
, STATUS_ATTN_BITS_TIMER_ABORT
))
2760 bnx2_set_remote_link(bp
);
2762 spin_unlock(&bp
->phy_lock
);
2767 bnx2_get_hw_tx_cons(struct bnx2_napi
*bnapi
)
2771 /* Tell compiler that status block fields can change. */
2773 cons
= *bnapi
->hw_tx_cons_ptr
;
2775 if (unlikely((cons
& MAX_TX_DESC_CNT
) == MAX_TX_DESC_CNT
))
2781 bnx2_tx_int(struct bnx2
*bp
, struct bnx2_napi
*bnapi
, int budget
)
2783 struct bnx2_tx_ring_info
*txr
= &bnapi
->tx_ring
;
2784 u16 hw_cons
, sw_cons
, sw_ring_cons
;
2785 int tx_pkt
= 0, index
;
2786 struct netdev_queue
*txq
;
2788 index
= (bnapi
- bp
->bnx2_napi
);
2789 txq
= netdev_get_tx_queue(bp
->dev
, index
);
2791 hw_cons
= bnx2_get_hw_tx_cons(bnapi
);
2792 sw_cons
= txr
->tx_cons
;
2794 while (sw_cons
!= hw_cons
) {
2795 struct sw_tx_bd
*tx_buf
;
2796 struct sk_buff
*skb
;
2799 sw_ring_cons
= TX_RING_IDX(sw_cons
);
2801 tx_buf
= &txr
->tx_buf_ring
[sw_ring_cons
];
2804 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2805 prefetch(&skb
->end
);
2807 /* partial BD completions possible with TSO packets */
2808 if (tx_buf
->is_gso
) {
2809 u16 last_idx
, last_ring_idx
;
2811 last_idx
= sw_cons
+ tx_buf
->nr_frags
+ 1;
2812 last_ring_idx
= sw_ring_cons
+ tx_buf
->nr_frags
+ 1;
2813 if (unlikely(last_ring_idx
>= MAX_TX_DESC_CNT
)) {
2816 if (((s16
) ((s16
) last_idx
- (s16
) hw_cons
)) > 0) {
2821 pci_unmap_single(bp
->pdev
, pci_unmap_addr(tx_buf
, mapping
),
2822 skb_headlen(skb
), PCI_DMA_TODEVICE
);
2825 last
= tx_buf
->nr_frags
;
2827 for (i
= 0; i
< last
; i
++) {
2828 sw_cons
= NEXT_TX_BD(sw_cons
);
2830 pci_unmap_page(bp
->pdev
,
2832 &txr
->tx_buf_ring
[TX_RING_IDX(sw_cons
)],
2834 skb_shinfo(skb
)->frags
[i
].size
,
2838 sw_cons
= NEXT_TX_BD(sw_cons
);
2842 if (tx_pkt
== budget
)
2845 if (hw_cons
== sw_cons
)
2846 hw_cons
= bnx2_get_hw_tx_cons(bnapi
);
2849 txr
->hw_tx_cons
= hw_cons
;
2850 txr
->tx_cons
= sw_cons
;
2852 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2853 * before checking for netif_tx_queue_stopped(). Without the
2854 * memory barrier, there is a small possibility that bnx2_start_xmit()
2855 * will miss it and cause the queue to be stopped forever.
2859 if (unlikely(netif_tx_queue_stopped(txq
)) &&
2860 (bnx2_tx_avail(bp
, txr
) > bp
->tx_wake_thresh
)) {
2861 __netif_tx_lock(txq
, smp_processor_id());
2862 if ((netif_tx_queue_stopped(txq
)) &&
2863 (bnx2_tx_avail(bp
, txr
) > bp
->tx_wake_thresh
))
2864 netif_tx_wake_queue(txq
);
2865 __netif_tx_unlock(txq
);
2872 bnx2_reuse_rx_skb_pages(struct bnx2
*bp
, struct bnx2_rx_ring_info
*rxr
,
2873 struct sk_buff
*skb
, int count
)
2875 struct sw_pg
*cons_rx_pg
, *prod_rx_pg
;
2876 struct rx_bd
*cons_bd
, *prod_bd
;
2879 u16 cons
= rxr
->rx_pg_cons
;
2881 cons_rx_pg
= &rxr
->rx_pg_ring
[cons
];
2883 /* The caller was unable to allocate a new page to replace the
2884 * last one in the frags array, so we need to recycle that page
2885 * and then free the skb.
2889 struct skb_shared_info
*shinfo
;
2891 shinfo
= skb_shinfo(skb
);
2893 page
= shinfo
->frags
[shinfo
->nr_frags
].page
;
2894 shinfo
->frags
[shinfo
->nr_frags
].page
= NULL
;
2896 cons_rx_pg
->page
= page
;
2900 hw_prod
= rxr
->rx_pg_prod
;
2902 for (i
= 0; i
< count
; i
++) {
2903 prod
= RX_PG_RING_IDX(hw_prod
);
2905 prod_rx_pg
= &rxr
->rx_pg_ring
[prod
];
2906 cons_rx_pg
= &rxr
->rx_pg_ring
[cons
];
2907 cons_bd
= &rxr
->rx_pg_desc_ring
[RX_RING(cons
)][RX_IDX(cons
)];
2908 prod_bd
= &rxr
->rx_pg_desc_ring
[RX_RING(prod
)][RX_IDX(prod
)];
2911 prod_rx_pg
->page
= cons_rx_pg
->page
;
2912 cons_rx_pg
->page
= NULL
;
2913 pci_unmap_addr_set(prod_rx_pg
, mapping
,
2914 pci_unmap_addr(cons_rx_pg
, mapping
));
2916 prod_bd
->rx_bd_haddr_hi
= cons_bd
->rx_bd_haddr_hi
;
2917 prod_bd
->rx_bd_haddr_lo
= cons_bd
->rx_bd_haddr_lo
;
2920 cons
= RX_PG_RING_IDX(NEXT_RX_BD(cons
));
2921 hw_prod
= NEXT_RX_BD(hw_prod
);
2923 rxr
->rx_pg_prod
= hw_prod
;
2924 rxr
->rx_pg_cons
= cons
;
2928 bnx2_reuse_rx_skb(struct bnx2
*bp
, struct bnx2_rx_ring_info
*rxr
,
2929 struct sk_buff
*skb
, u16 cons
, u16 prod
)
2931 struct sw_bd
*cons_rx_buf
, *prod_rx_buf
;
2932 struct rx_bd
*cons_bd
, *prod_bd
;
2934 cons_rx_buf
= &rxr
->rx_buf_ring
[cons
];
2935 prod_rx_buf
= &rxr
->rx_buf_ring
[prod
];
2937 pci_dma_sync_single_for_device(bp
->pdev
,
2938 pci_unmap_addr(cons_rx_buf
, mapping
),
2939 BNX2_RX_OFFSET
+ BNX2_RX_COPY_THRESH
, PCI_DMA_FROMDEVICE
);
2941 rxr
->rx_prod_bseq
+= bp
->rx_buf_use_size
;
2943 prod_rx_buf
->skb
= skb
;
2948 pci_unmap_addr_set(prod_rx_buf
, mapping
,
2949 pci_unmap_addr(cons_rx_buf
, mapping
));
2951 cons_bd
= &rxr
->rx_desc_ring
[RX_RING(cons
)][RX_IDX(cons
)];
2952 prod_bd
= &rxr
->rx_desc_ring
[RX_RING(prod
)][RX_IDX(prod
)];
2953 prod_bd
->rx_bd_haddr_hi
= cons_bd
->rx_bd_haddr_hi
;
2954 prod_bd
->rx_bd_haddr_lo
= cons_bd
->rx_bd_haddr_lo
;
2958 bnx2_rx_skb(struct bnx2
*bp
, struct bnx2_rx_ring_info
*rxr
, struct sk_buff
*skb
,
2959 unsigned int len
, unsigned int hdr_len
, dma_addr_t dma_addr
,
2963 u16 prod
= ring_idx
& 0xffff;
2965 err
= bnx2_alloc_rx_skb(bp
, rxr
, prod
);
2966 if (unlikely(err
)) {
2967 bnx2_reuse_rx_skb(bp
, rxr
, skb
, (u16
) (ring_idx
>> 16), prod
);
2969 unsigned int raw_len
= len
+ 4;
2970 int pages
= PAGE_ALIGN(raw_len
- hdr_len
) >> PAGE_SHIFT
;
2972 bnx2_reuse_rx_skb_pages(bp
, rxr
, NULL
, pages
);
2977 skb_reserve(skb
, BNX2_RX_OFFSET
);
2978 pci_unmap_single(bp
->pdev
, dma_addr
, bp
->rx_buf_use_size
,
2979 PCI_DMA_FROMDEVICE
);
2985 unsigned int i
, frag_len
, frag_size
, pages
;
2986 struct sw_pg
*rx_pg
;
2987 u16 pg_cons
= rxr
->rx_pg_cons
;
2988 u16 pg_prod
= rxr
->rx_pg_prod
;
2990 frag_size
= len
+ 4 - hdr_len
;
2991 pages
= PAGE_ALIGN(frag_size
) >> PAGE_SHIFT
;
2992 skb_put(skb
, hdr_len
);
2994 for (i
= 0; i
< pages
; i
++) {
2995 dma_addr_t mapping_old
;
2997 frag_len
= min(frag_size
, (unsigned int) PAGE_SIZE
);
2998 if (unlikely(frag_len
<= 4)) {
2999 unsigned int tail
= 4 - frag_len
;
3001 rxr
->rx_pg_cons
= pg_cons
;
3002 rxr
->rx_pg_prod
= pg_prod
;
3003 bnx2_reuse_rx_skb_pages(bp
, rxr
, NULL
,
3010 &skb_shinfo(skb
)->frags
[i
- 1];
3012 skb
->data_len
-= tail
;
3013 skb
->truesize
-= tail
;
3017 rx_pg
= &rxr
->rx_pg_ring
[pg_cons
];
3019 /* Don't unmap yet. If we're unable to allocate a new
3020 * page, we need to recycle the page and the DMA addr.
3022 mapping_old
= pci_unmap_addr(rx_pg
, mapping
);
3026 skb_fill_page_desc(skb
, i
, rx_pg
->page
, 0, frag_len
);
3029 err
= bnx2_alloc_rx_page(bp
, rxr
,
3030 RX_PG_RING_IDX(pg_prod
));
3031 if (unlikely(err
)) {
3032 rxr
->rx_pg_cons
= pg_cons
;
3033 rxr
->rx_pg_prod
= pg_prod
;
3034 bnx2_reuse_rx_skb_pages(bp
, rxr
, skb
,
3039 pci_unmap_page(bp
->pdev
, mapping_old
,
3040 PAGE_SIZE
, PCI_DMA_FROMDEVICE
);
3042 frag_size
-= frag_len
;
3043 skb
->data_len
+= frag_len
;
3044 skb
->truesize
+= frag_len
;
3045 skb
->len
+= frag_len
;
3047 pg_prod
= NEXT_RX_BD(pg_prod
);
3048 pg_cons
= RX_PG_RING_IDX(NEXT_RX_BD(pg_cons
));
3050 rxr
->rx_pg_prod
= pg_prod
;
3051 rxr
->rx_pg_cons
= pg_cons
;
3057 bnx2_get_hw_rx_cons(struct bnx2_napi
*bnapi
)
3061 /* Tell compiler that status block fields can change. */
3063 cons
= *bnapi
->hw_rx_cons_ptr
;
3065 if (unlikely((cons
& MAX_RX_DESC_CNT
) == MAX_RX_DESC_CNT
))
3071 bnx2_rx_int(struct bnx2
*bp
, struct bnx2_napi
*bnapi
, int budget
)
3073 struct bnx2_rx_ring_info
*rxr
= &bnapi
->rx_ring
;
3074 u16 hw_cons
, sw_cons
, sw_ring_cons
, sw_prod
, sw_ring_prod
;
3075 struct l2_fhdr
*rx_hdr
;
3076 int rx_pkt
= 0, pg_ring_used
= 0;
3078 hw_cons
= bnx2_get_hw_rx_cons(bnapi
);
3079 sw_cons
= rxr
->rx_cons
;
3080 sw_prod
= rxr
->rx_prod
;
3082 /* Memory barrier necessary as speculative reads of the rx
3083 * buffer can be ahead of the index in the status block
3086 while (sw_cons
!= hw_cons
) {
3087 unsigned int len
, hdr_len
;
3089 struct sw_bd
*rx_buf
;
3090 struct sk_buff
*skb
;
3091 dma_addr_t dma_addr
;
3093 int hw_vlan __maybe_unused
= 0;
3095 sw_ring_cons
= RX_RING_IDX(sw_cons
);
3096 sw_ring_prod
= RX_RING_IDX(sw_prod
);
3098 rx_buf
= &rxr
->rx_buf_ring
[sw_ring_cons
];
3103 dma_addr
= pci_unmap_addr(rx_buf
, mapping
);
3105 pci_dma_sync_single_for_cpu(bp
->pdev
, dma_addr
,
3106 BNX2_RX_OFFSET
+ BNX2_RX_COPY_THRESH
,
3107 PCI_DMA_FROMDEVICE
);
3109 rx_hdr
= (struct l2_fhdr
*) skb
->data
;
3110 len
= rx_hdr
->l2_fhdr_pkt_len
;
3111 status
= rx_hdr
->l2_fhdr_status
;
3114 if (status
& L2_FHDR_STATUS_SPLIT
) {
3115 hdr_len
= rx_hdr
->l2_fhdr_ip_xsum
;
3117 } else if (len
> bp
->rx_jumbo_thresh
) {
3118 hdr_len
= bp
->rx_jumbo_thresh
;
3122 if (unlikely(status
& (L2_FHDR_ERRORS_BAD_CRC
|
3123 L2_FHDR_ERRORS_PHY_DECODE
|
3124 L2_FHDR_ERRORS_ALIGNMENT
|
3125 L2_FHDR_ERRORS_TOO_SHORT
|
3126 L2_FHDR_ERRORS_GIANT_FRAME
))) {
3128 bnx2_reuse_rx_skb(bp
, rxr
, skb
, sw_ring_cons
,
3133 pages
= PAGE_ALIGN(len
- hdr_len
) >> PAGE_SHIFT
;
3135 bnx2_reuse_rx_skb_pages(bp
, rxr
, NULL
, pages
);
3142 if (len
<= bp
->rx_copy_thresh
) {
3143 struct sk_buff
*new_skb
;
3145 new_skb
= netdev_alloc_skb(bp
->dev
, len
+ 6);
3146 if (new_skb
== NULL
) {
3147 bnx2_reuse_rx_skb(bp
, rxr
, skb
, sw_ring_cons
,
3153 skb_copy_from_linear_data_offset(skb
,
3155 new_skb
->data
, len
+ 6);
3156 skb_reserve(new_skb
, 6);
3157 skb_put(new_skb
, len
);
3159 bnx2_reuse_rx_skb(bp
, rxr
, skb
,
3160 sw_ring_cons
, sw_ring_prod
);
3163 } else if (unlikely(bnx2_rx_skb(bp
, rxr
, skb
, len
, hdr_len
,
3164 dma_addr
, (sw_ring_cons
<< 16) | sw_ring_prod
)))
3167 if ((status
& L2_FHDR_STATUS_L2_VLAN_TAG
) &&
3168 !(bp
->rx_mode
& BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG
)) {
3169 vtag
= rx_hdr
->l2_fhdr_vlan_tag
;
3176 struct vlan_ethhdr
*ve
= (struct vlan_ethhdr
*)
3179 memmove(ve
, skb
->data
+ 4, ETH_ALEN
* 2);
3180 ve
->h_vlan_proto
= htons(ETH_P_8021Q
);
3181 ve
->h_vlan_TCI
= htons(vtag
);
3186 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
3188 if ((len
> (bp
->dev
->mtu
+ ETH_HLEN
)) &&
3189 (ntohs(skb
->protocol
) != 0x8100)) {
3196 skb
->ip_summed
= CHECKSUM_NONE
;
3198 (status
& (L2_FHDR_STATUS_TCP_SEGMENT
|
3199 L2_FHDR_STATUS_UDP_DATAGRAM
))) {
3201 if (likely((status
& (L2_FHDR_ERRORS_TCP_XSUM
|
3202 L2_FHDR_ERRORS_UDP_XSUM
)) == 0))
3203 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3206 skb_record_rx_queue(skb
, bnapi
- &bp
->bnx2_napi
[0]);
3210 vlan_hwaccel_receive_skb(skb
, bp
->vlgrp
, vtag
);
3213 netif_receive_skb(skb
);
3218 sw_cons
= NEXT_RX_BD(sw_cons
);
3219 sw_prod
= NEXT_RX_BD(sw_prod
);
3221 if ((rx_pkt
== budget
))
3224 /* Refresh hw_cons to see if there is new work */
3225 if (sw_cons
== hw_cons
) {
3226 hw_cons
= bnx2_get_hw_rx_cons(bnapi
);
3230 rxr
->rx_cons
= sw_cons
;
3231 rxr
->rx_prod
= sw_prod
;
3234 REG_WR16(bp
, rxr
->rx_pg_bidx_addr
, rxr
->rx_pg_prod
);
3236 REG_WR16(bp
, rxr
->rx_bidx_addr
, sw_prod
);
3238 REG_WR(bp
, rxr
->rx_bseq_addr
, rxr
->rx_prod_bseq
);
3246 /* MSI ISR - The only difference between this and the INTx ISR
3247 * is that the MSI interrupt is always serviced.
3250 bnx2_msi(int irq
, void *dev_instance
)
3252 struct bnx2_napi
*bnapi
= dev_instance
;
3253 struct bnx2
*bp
= bnapi
->bp
;
3255 prefetch(bnapi
->status_blk
.msi
);
3256 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
3257 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM
|
3258 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
3260 /* Return here if interrupt is disabled. */
3261 if (unlikely(atomic_read(&bp
->intr_sem
) != 0))
3264 napi_schedule(&bnapi
->napi
);
3270 bnx2_msi_1shot(int irq
, void *dev_instance
)
3272 struct bnx2_napi
*bnapi
= dev_instance
;
3273 struct bnx2
*bp
= bnapi
->bp
;
3275 prefetch(bnapi
->status_blk
.msi
);
3277 /* Return here if interrupt is disabled. */
3278 if (unlikely(atomic_read(&bp
->intr_sem
) != 0))
3281 napi_schedule(&bnapi
->napi
);
3287 bnx2_interrupt(int irq
, void *dev_instance
)
3289 struct bnx2_napi
*bnapi
= dev_instance
;
3290 struct bnx2
*bp
= bnapi
->bp
;
3291 struct status_block
*sblk
= bnapi
->status_blk
.msi
;
3293 /* When using INTx, it is possible for the interrupt to arrive
3294 * at the CPU before the status block posted prior to the
3295 * interrupt. Reading a register will flush the status block.
3296 * When using MSI, the MSI message will always complete after
3297 * the status block write.
3299 if ((sblk
->status_idx
== bnapi
->last_status_idx
) &&
3300 (REG_RD(bp
, BNX2_PCICFG_MISC_STATUS
) &
3301 BNX2_PCICFG_MISC_STATUS_INTA_VALUE
))
3304 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
3305 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM
|
3306 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
3308 /* Read back to deassert IRQ immediately to avoid too many
3309 * spurious interrupts.
3311 REG_RD(bp
, BNX2_PCICFG_INT_ACK_CMD
);
3313 /* Return here if interrupt is shared and is disabled. */
3314 if (unlikely(atomic_read(&bp
->intr_sem
) != 0))
3317 if (napi_schedule_prep(&bnapi
->napi
)) {
3318 bnapi
->last_status_idx
= sblk
->status_idx
;
3319 __napi_schedule(&bnapi
->napi
);
3326 bnx2_has_fast_work(struct bnx2_napi
*bnapi
)
3328 struct bnx2_tx_ring_info
*txr
= &bnapi
->tx_ring
;
3329 struct bnx2_rx_ring_info
*rxr
= &bnapi
->rx_ring
;
3331 if ((bnx2_get_hw_rx_cons(bnapi
) != rxr
->rx_cons
) ||
3332 (bnx2_get_hw_tx_cons(bnapi
) != txr
->hw_tx_cons
))
3337 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3338 STATUS_ATTN_BITS_TIMER_ABORT)
3341 bnx2_has_work(struct bnx2_napi
*bnapi
)
3343 struct status_block
*sblk
= bnapi
->status_blk
.msi
;
3345 if (bnx2_has_fast_work(bnapi
))
3349 if (bnapi
->cnic_present
&& (bnapi
->cnic_tag
!= sblk
->status_idx
))
3353 if ((sblk
->status_attn_bits
& STATUS_ATTN_EVENTS
) !=
3354 (sblk
->status_attn_bits_ack
& STATUS_ATTN_EVENTS
))
3361 bnx2_chk_missed_msi(struct bnx2
*bp
)
3363 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[0];
3366 if (bnx2_has_work(bnapi
)) {
3367 msi_ctrl
= REG_RD(bp
, BNX2_PCICFG_MSI_CONTROL
);
3368 if (!(msi_ctrl
& BNX2_PCICFG_MSI_CONTROL_ENABLE
))
3371 if (bnapi
->last_status_idx
== bp
->idle_chk_status_idx
) {
3372 REG_WR(bp
, BNX2_PCICFG_MSI_CONTROL
, msi_ctrl
&
3373 ~BNX2_PCICFG_MSI_CONTROL_ENABLE
);
3374 REG_WR(bp
, BNX2_PCICFG_MSI_CONTROL
, msi_ctrl
);
3375 bnx2_msi(bp
->irq_tbl
[0].vector
, bnapi
);
3379 bp
->idle_chk_status_idx
= bnapi
->last_status_idx
;
3383 static void bnx2_poll_cnic(struct bnx2
*bp
, struct bnx2_napi
*bnapi
)
3385 struct cnic_ops
*c_ops
;
3387 if (!bnapi
->cnic_present
)
3391 c_ops
= rcu_dereference(bp
->cnic_ops
);
3393 bnapi
->cnic_tag
= c_ops
->cnic_handler(bp
->cnic_data
,
3394 bnapi
->status_blk
.msi
);
3399 static void bnx2_poll_link(struct bnx2
*bp
, struct bnx2_napi
*bnapi
)
3401 struct status_block
*sblk
= bnapi
->status_blk
.msi
;
3402 u32 status_attn_bits
= sblk
->status_attn_bits
;
3403 u32 status_attn_bits_ack
= sblk
->status_attn_bits_ack
;
3405 if ((status_attn_bits
& STATUS_ATTN_EVENTS
) !=
3406 (status_attn_bits_ack
& STATUS_ATTN_EVENTS
)) {
3408 bnx2_phy_int(bp
, bnapi
);
3410 /* This is needed to take care of transient status
3411 * during link changes.
3413 REG_WR(bp
, BNX2_HC_COMMAND
,
3414 bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
3415 REG_RD(bp
, BNX2_HC_COMMAND
);
3419 static int bnx2_poll_work(struct bnx2
*bp
, struct bnx2_napi
*bnapi
,
3420 int work_done
, int budget
)
3422 struct bnx2_tx_ring_info
*txr
= &bnapi
->tx_ring
;
3423 struct bnx2_rx_ring_info
*rxr
= &bnapi
->rx_ring
;
3425 if (bnx2_get_hw_tx_cons(bnapi
) != txr
->hw_tx_cons
)
3426 bnx2_tx_int(bp
, bnapi
, 0);
3428 if (bnx2_get_hw_rx_cons(bnapi
) != rxr
->rx_cons
)
3429 work_done
+= bnx2_rx_int(bp
, bnapi
, budget
- work_done
);
3434 static int bnx2_poll_msix(struct napi_struct
*napi
, int budget
)
3436 struct bnx2_napi
*bnapi
= container_of(napi
, struct bnx2_napi
, napi
);
3437 struct bnx2
*bp
= bnapi
->bp
;
3439 struct status_block_msix
*sblk
= bnapi
->status_blk
.msix
;
3442 work_done
= bnx2_poll_work(bp
, bnapi
, work_done
, budget
);
3443 if (unlikely(work_done
>= budget
))
3446 bnapi
->last_status_idx
= sblk
->status_idx
;
3447 /* status idx must be read before checking for more work. */
3449 if (likely(!bnx2_has_fast_work(bnapi
))) {
3451 napi_complete(napi
);
3452 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
, bnapi
->int_num
|
3453 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
3454 bnapi
->last_status_idx
);
3461 static int bnx2_poll(struct napi_struct
*napi
, int budget
)
3463 struct bnx2_napi
*bnapi
= container_of(napi
, struct bnx2_napi
, napi
);
3464 struct bnx2
*bp
= bnapi
->bp
;
3466 struct status_block
*sblk
= bnapi
->status_blk
.msi
;
3469 bnx2_poll_link(bp
, bnapi
);
3471 work_done
= bnx2_poll_work(bp
, bnapi
, work_done
, budget
);
3474 bnx2_poll_cnic(bp
, bnapi
);
3477 /* bnapi->last_status_idx is used below to tell the hw how
3478 * much work has been processed, so we must read it before
3479 * checking for more work.
3481 bnapi
->last_status_idx
= sblk
->status_idx
;
3483 if (unlikely(work_done
>= budget
))
3487 if (likely(!bnx2_has_work(bnapi
))) {
3488 napi_complete(napi
);
3489 if (likely(bp
->flags
& BNX2_FLAG_USING_MSI_OR_MSIX
)) {
3490 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
3491 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
3492 bnapi
->last_status_idx
);
3495 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
3496 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
3497 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
|
3498 bnapi
->last_status_idx
);
3500 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
3501 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
3502 bnapi
->last_status_idx
);
3510 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3511 * from set_multicast.
3514 bnx2_set_rx_mode(struct net_device
*dev
)
3516 struct bnx2
*bp
= netdev_priv(dev
);
3517 u32 rx_mode
, sort_mode
;
3518 struct netdev_hw_addr
*ha
;
3521 if (!netif_running(dev
))
3524 spin_lock_bh(&bp
->phy_lock
);
3526 rx_mode
= bp
->rx_mode
& ~(BNX2_EMAC_RX_MODE_PROMISCUOUS
|
3527 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG
);
3528 sort_mode
= 1 | BNX2_RPM_SORT_USER0_BC_EN
;
3530 if (!bp
->vlgrp
&& (bp
->flags
& BNX2_FLAG_CAN_KEEP_VLAN
))
3531 rx_mode
|= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG
;
3533 if (bp
->flags
& BNX2_FLAG_CAN_KEEP_VLAN
)
3534 rx_mode
|= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG
;
3536 if (dev
->flags
& IFF_PROMISC
) {
3537 /* Promiscuous mode. */
3538 rx_mode
|= BNX2_EMAC_RX_MODE_PROMISCUOUS
;
3539 sort_mode
|= BNX2_RPM_SORT_USER0_PROM_EN
|
3540 BNX2_RPM_SORT_USER0_PROM_VLAN
;
3542 else if (dev
->flags
& IFF_ALLMULTI
) {
3543 for (i
= 0; i
< NUM_MC_HASH_REGISTERS
; i
++) {
3544 REG_WR(bp
, BNX2_EMAC_MULTICAST_HASH0
+ (i
* 4),
3547 sort_mode
|= BNX2_RPM_SORT_USER0_MC_EN
;
3550 /* Accept one or more multicast(s). */
3551 struct dev_mc_list
*mclist
;
3552 u32 mc_filter
[NUM_MC_HASH_REGISTERS
];
3557 memset(mc_filter
, 0, 4 * NUM_MC_HASH_REGISTERS
);
3559 netdev_for_each_mc_addr(mclist
, dev
) {
3560 crc
= ether_crc_le(ETH_ALEN
, mclist
->dmi_addr
);
3562 regidx
= (bit
& 0xe0) >> 5;
3564 mc_filter
[regidx
] |= (1 << bit
);
3567 for (i
= 0; i
< NUM_MC_HASH_REGISTERS
; i
++) {
3568 REG_WR(bp
, BNX2_EMAC_MULTICAST_HASH0
+ (i
* 4),
3572 sort_mode
|= BNX2_RPM_SORT_USER0_MC_HSH_EN
;
3575 if (netdev_uc_count(dev
) > BNX2_MAX_UNICAST_ADDRESSES
) {
3576 rx_mode
|= BNX2_EMAC_RX_MODE_PROMISCUOUS
;
3577 sort_mode
|= BNX2_RPM_SORT_USER0_PROM_EN
|
3578 BNX2_RPM_SORT_USER0_PROM_VLAN
;
3579 } else if (!(dev
->flags
& IFF_PROMISC
)) {
3580 /* Add all entries into to the match filter list */
3582 netdev_for_each_uc_addr(ha
, dev
) {
3583 bnx2_set_mac_addr(bp
, ha
->addr
,
3584 i
+ BNX2_START_UNICAST_ADDRESS_INDEX
);
3586 (i
+ BNX2_START_UNICAST_ADDRESS_INDEX
));
3592 if (rx_mode
!= bp
->rx_mode
) {
3593 bp
->rx_mode
= rx_mode
;
3594 REG_WR(bp
, BNX2_EMAC_RX_MODE
, rx_mode
);
3597 REG_WR(bp
, BNX2_RPM_SORT_USER0
, 0x0);
3598 REG_WR(bp
, BNX2_RPM_SORT_USER0
, sort_mode
);
3599 REG_WR(bp
, BNX2_RPM_SORT_USER0
, sort_mode
| BNX2_RPM_SORT_USER0_ENA
);
3601 spin_unlock_bh(&bp
->phy_lock
);
3604 static int __devinit
3605 check_fw_section(const struct firmware
*fw
,
3606 const struct bnx2_fw_file_section
*section
,
3607 u32 alignment
, bool non_empty
)
3609 u32 offset
= be32_to_cpu(section
->offset
);
3610 u32 len
= be32_to_cpu(section
->len
);
3612 if ((offset
== 0 && len
!= 0) || offset
>= fw
->size
|| offset
& 3)
3614 if ((non_empty
&& len
== 0) || len
> fw
->size
- offset
||
3615 len
& (alignment
- 1))
3620 static int __devinit
3621 check_mips_fw_entry(const struct firmware
*fw
,
3622 const struct bnx2_mips_fw_file_entry
*entry
)
3624 if (check_fw_section(fw
, &entry
->text
, 4, true) ||
3625 check_fw_section(fw
, &entry
->data
, 4, false) ||
3626 check_fw_section(fw
, &entry
->rodata
, 4, false))
3631 static int __devinit
3632 bnx2_request_firmware(struct bnx2
*bp
)
3634 const char *mips_fw_file
, *rv2p_fw_file
;
3635 const struct bnx2_mips_fw_file
*mips_fw
;
3636 const struct bnx2_rv2p_fw_file
*rv2p_fw
;
3639 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
3640 mips_fw_file
= FW_MIPS_FILE_09
;
3641 if ((CHIP_ID(bp
) == CHIP_ID_5709_A0
) ||
3642 (CHIP_ID(bp
) == CHIP_ID_5709_A1
))
3643 rv2p_fw_file
= FW_RV2P_FILE_09_Ax
;
3645 rv2p_fw_file
= FW_RV2P_FILE_09
;
3647 mips_fw_file
= FW_MIPS_FILE_06
;
3648 rv2p_fw_file
= FW_RV2P_FILE_06
;
3651 rc
= request_firmware(&bp
->mips_firmware
, mips_fw_file
, &bp
->pdev
->dev
);
3653 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file
);
3657 rc
= request_firmware(&bp
->rv2p_firmware
, rv2p_fw_file
, &bp
->pdev
->dev
);
3659 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file
);
3662 mips_fw
= (const struct bnx2_mips_fw_file
*) bp
->mips_firmware
->data
;
3663 rv2p_fw
= (const struct bnx2_rv2p_fw_file
*) bp
->rv2p_firmware
->data
;
3664 if (bp
->mips_firmware
->size
< sizeof(*mips_fw
) ||
3665 check_mips_fw_entry(bp
->mips_firmware
, &mips_fw
->com
) ||
3666 check_mips_fw_entry(bp
->mips_firmware
, &mips_fw
->cp
) ||
3667 check_mips_fw_entry(bp
->mips_firmware
, &mips_fw
->rxp
) ||
3668 check_mips_fw_entry(bp
->mips_firmware
, &mips_fw
->tpat
) ||
3669 check_mips_fw_entry(bp
->mips_firmware
, &mips_fw
->txp
)) {
3670 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file
);
3673 if (bp
->rv2p_firmware
->size
< sizeof(*rv2p_fw
) ||
3674 check_fw_section(bp
->rv2p_firmware
, &rv2p_fw
->proc1
.rv2p
, 8, true) ||
3675 check_fw_section(bp
->rv2p_firmware
, &rv2p_fw
->proc2
.rv2p
, 8, true)) {
3676 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file
);
3684 rv2p_fw_fixup(u32 rv2p_proc
, int idx
, u32 loc
, u32 rv2p_code
)
3687 case RV2P_P1_FIXUP_PAGE_SIZE_IDX
:
3688 rv2p_code
&= ~RV2P_BD_PAGE_SIZE_MSK
;
3689 rv2p_code
|= RV2P_BD_PAGE_SIZE
;
3696 load_rv2p_fw(struct bnx2
*bp
, u32 rv2p_proc
,
3697 const struct bnx2_rv2p_fw_file_entry
*fw_entry
)
3699 u32 rv2p_code_len
, file_offset
;
3704 rv2p_code_len
= be32_to_cpu(fw_entry
->rv2p
.len
);
3705 file_offset
= be32_to_cpu(fw_entry
->rv2p
.offset
);
3707 rv2p_code
= (__be32
*)(bp
->rv2p_firmware
->data
+ file_offset
);
3709 if (rv2p_proc
== RV2P_PROC1
) {
3710 cmd
= BNX2_RV2P_PROC1_ADDR_CMD_RDWR
;
3711 addr
= BNX2_RV2P_PROC1_ADDR_CMD
;
3713 cmd
= BNX2_RV2P_PROC2_ADDR_CMD_RDWR
;
3714 addr
= BNX2_RV2P_PROC2_ADDR_CMD
;
3717 for (i
= 0; i
< rv2p_code_len
; i
+= 8) {
3718 REG_WR(bp
, BNX2_RV2P_INSTR_HIGH
, be32_to_cpu(*rv2p_code
));
3720 REG_WR(bp
, BNX2_RV2P_INSTR_LOW
, be32_to_cpu(*rv2p_code
));
3723 val
= (i
/ 8) | cmd
;
3724 REG_WR(bp
, addr
, val
);
3727 rv2p_code
= (__be32
*)(bp
->rv2p_firmware
->data
+ file_offset
);
3728 for (i
= 0; i
< 8; i
++) {
3731 loc
= be32_to_cpu(fw_entry
->fixup
[i
]);
3732 if (loc
&& ((loc
* 4) < rv2p_code_len
)) {
3733 code
= be32_to_cpu(*(rv2p_code
+ loc
- 1));
3734 REG_WR(bp
, BNX2_RV2P_INSTR_HIGH
, code
);
3735 code
= be32_to_cpu(*(rv2p_code
+ loc
));
3736 code
= rv2p_fw_fixup(rv2p_proc
, i
, loc
, code
);
3737 REG_WR(bp
, BNX2_RV2P_INSTR_LOW
, code
);
3739 val
= (loc
/ 2) | cmd
;
3740 REG_WR(bp
, addr
, val
);
3744 /* Reset the processor, un-stall is done later. */
3745 if (rv2p_proc
== RV2P_PROC1
) {
3746 REG_WR(bp
, BNX2_RV2P_COMMAND
, BNX2_RV2P_COMMAND_PROC1_RESET
);
3749 REG_WR(bp
, BNX2_RV2P_COMMAND
, BNX2_RV2P_COMMAND_PROC2_RESET
);
3756 load_cpu_fw(struct bnx2
*bp
, const struct cpu_reg
*cpu_reg
,
3757 const struct bnx2_mips_fw_file_entry
*fw_entry
)
3759 u32 addr
, len
, file_offset
;
3765 val
= bnx2_reg_rd_ind(bp
, cpu_reg
->mode
);
3766 val
|= cpu_reg
->mode_value_halt
;
3767 bnx2_reg_wr_ind(bp
, cpu_reg
->mode
, val
);
3768 bnx2_reg_wr_ind(bp
, cpu_reg
->state
, cpu_reg
->state_value_clear
);
3770 /* Load the Text area. */
3771 addr
= be32_to_cpu(fw_entry
->text
.addr
);
3772 len
= be32_to_cpu(fw_entry
->text
.len
);
3773 file_offset
= be32_to_cpu(fw_entry
->text
.offset
);
3774 data
= (__be32
*)(bp
->mips_firmware
->data
+ file_offset
);
3776 offset
= cpu_reg
->spad_base
+ (addr
- cpu_reg
->mips_view_base
);
3780 for (j
= 0; j
< (len
/ 4); j
++, offset
+= 4)
3781 bnx2_reg_wr_ind(bp
, offset
, be32_to_cpu(data
[j
]));
3784 /* Load the Data area. */
3785 addr
= be32_to_cpu(fw_entry
->data
.addr
);
3786 len
= be32_to_cpu(fw_entry
->data
.len
);
3787 file_offset
= be32_to_cpu(fw_entry
->data
.offset
);
3788 data
= (__be32
*)(bp
->mips_firmware
->data
+ file_offset
);
3790 offset
= cpu_reg
->spad_base
+ (addr
- cpu_reg
->mips_view_base
);
3794 for (j
= 0; j
< (len
/ 4); j
++, offset
+= 4)
3795 bnx2_reg_wr_ind(bp
, offset
, be32_to_cpu(data
[j
]));
3798 /* Load the Read-Only area. */
3799 addr
= be32_to_cpu(fw_entry
->rodata
.addr
);
3800 len
= be32_to_cpu(fw_entry
->rodata
.len
);
3801 file_offset
= be32_to_cpu(fw_entry
->rodata
.offset
);
3802 data
= (__be32
*)(bp
->mips_firmware
->data
+ file_offset
);
3804 offset
= cpu_reg
->spad_base
+ (addr
- cpu_reg
->mips_view_base
);
3808 for (j
= 0; j
< (len
/ 4); j
++, offset
+= 4)
3809 bnx2_reg_wr_ind(bp
, offset
, be32_to_cpu(data
[j
]));
3812 /* Clear the pre-fetch instruction. */
3813 bnx2_reg_wr_ind(bp
, cpu_reg
->inst
, 0);
3815 val
= be32_to_cpu(fw_entry
->start_addr
);
3816 bnx2_reg_wr_ind(bp
, cpu_reg
->pc
, val
);
3818 /* Start the CPU. */
3819 val
= bnx2_reg_rd_ind(bp
, cpu_reg
->mode
);
3820 val
&= ~cpu_reg
->mode_value_halt
;
3821 bnx2_reg_wr_ind(bp
, cpu_reg
->state
, cpu_reg
->state_value_clear
);
3822 bnx2_reg_wr_ind(bp
, cpu_reg
->mode
, val
);
3828 bnx2_init_cpus(struct bnx2
*bp
)
3830 const struct bnx2_mips_fw_file
*mips_fw
=
3831 (const struct bnx2_mips_fw_file
*) bp
->mips_firmware
->data
;
3832 const struct bnx2_rv2p_fw_file
*rv2p_fw
=
3833 (const struct bnx2_rv2p_fw_file
*) bp
->rv2p_firmware
->data
;
3836 /* Initialize the RV2P processor. */
3837 load_rv2p_fw(bp
, RV2P_PROC1
, &rv2p_fw
->proc1
);
3838 load_rv2p_fw(bp
, RV2P_PROC2
, &rv2p_fw
->proc2
);
3840 /* Initialize the RX Processor. */
3841 rc
= load_cpu_fw(bp
, &cpu_reg_rxp
, &mips_fw
->rxp
);
3845 /* Initialize the TX Processor. */
3846 rc
= load_cpu_fw(bp
, &cpu_reg_txp
, &mips_fw
->txp
);
3850 /* Initialize the TX Patch-up Processor. */
3851 rc
= load_cpu_fw(bp
, &cpu_reg_tpat
, &mips_fw
->tpat
);
3855 /* Initialize the Completion Processor. */
3856 rc
= load_cpu_fw(bp
, &cpu_reg_com
, &mips_fw
->com
);
3860 /* Initialize the Command Processor. */
3861 rc
= load_cpu_fw(bp
, &cpu_reg_cp
, &mips_fw
->cp
);
3868 bnx2_set_power_state(struct bnx2
*bp
, pci_power_t state
)
3872 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
3878 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
3879 (pmcsr
& ~PCI_PM_CTRL_STATE_MASK
) |
3880 PCI_PM_CTRL_PME_STATUS
);
3882 if (pmcsr
& PCI_PM_CTRL_STATE_MASK
)
3883 /* delay required during transition out of D3hot */
3886 val
= REG_RD(bp
, BNX2_EMAC_MODE
);
3887 val
|= BNX2_EMAC_MODE_MPKT_RCVD
| BNX2_EMAC_MODE_ACPI_RCVD
;
3888 val
&= ~BNX2_EMAC_MODE_MPKT
;
3889 REG_WR(bp
, BNX2_EMAC_MODE
, val
);
3891 val
= REG_RD(bp
, BNX2_RPM_CONFIG
);
3892 val
&= ~BNX2_RPM_CONFIG_ACPI_ENA
;
3893 REG_WR(bp
, BNX2_RPM_CONFIG
, val
);
3904 autoneg
= bp
->autoneg
;
3905 advertising
= bp
->advertising
;
3907 if (bp
->phy_port
== PORT_TP
) {
3908 bp
->autoneg
= AUTONEG_SPEED
;
3909 bp
->advertising
= ADVERTISED_10baseT_Half
|
3910 ADVERTISED_10baseT_Full
|
3911 ADVERTISED_100baseT_Half
|
3912 ADVERTISED_100baseT_Full
|
3916 spin_lock_bh(&bp
->phy_lock
);
3917 bnx2_setup_phy(bp
, bp
->phy_port
);
3918 spin_unlock_bh(&bp
->phy_lock
);
3920 bp
->autoneg
= autoneg
;
3921 bp
->advertising
= advertising
;
3923 bnx2_set_mac_addr(bp
, bp
->dev
->dev_addr
, 0);
3925 val
= REG_RD(bp
, BNX2_EMAC_MODE
);
3927 /* Enable port mode. */
3928 val
&= ~BNX2_EMAC_MODE_PORT
;
3929 val
|= BNX2_EMAC_MODE_MPKT_RCVD
|
3930 BNX2_EMAC_MODE_ACPI_RCVD
|
3931 BNX2_EMAC_MODE_MPKT
;
3932 if (bp
->phy_port
== PORT_TP
)
3933 val
|= BNX2_EMAC_MODE_PORT_MII
;
3935 val
|= BNX2_EMAC_MODE_PORT_GMII
;
3936 if (bp
->line_speed
== SPEED_2500
)
3937 val
|= BNX2_EMAC_MODE_25G_MODE
;
3940 REG_WR(bp
, BNX2_EMAC_MODE
, val
);
3942 /* receive all multicast */
3943 for (i
= 0; i
< NUM_MC_HASH_REGISTERS
; i
++) {
3944 REG_WR(bp
, BNX2_EMAC_MULTICAST_HASH0
+ (i
* 4),
3947 REG_WR(bp
, BNX2_EMAC_RX_MODE
,
3948 BNX2_EMAC_RX_MODE_SORT_MODE
);
3950 val
= 1 | BNX2_RPM_SORT_USER0_BC_EN
|
3951 BNX2_RPM_SORT_USER0_MC_EN
;
3952 REG_WR(bp
, BNX2_RPM_SORT_USER0
, 0x0);
3953 REG_WR(bp
, BNX2_RPM_SORT_USER0
, val
);
3954 REG_WR(bp
, BNX2_RPM_SORT_USER0
, val
|
3955 BNX2_RPM_SORT_USER0_ENA
);
3957 /* Need to enable EMAC and RPM for WOL. */
3958 REG_WR(bp
, BNX2_MISC_ENABLE_SET_BITS
,
3959 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE
|
3960 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE
|
3961 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE
);
3963 val
= REG_RD(bp
, BNX2_RPM_CONFIG
);
3964 val
&= ~BNX2_RPM_CONFIG_ACPI_ENA
;
3965 REG_WR(bp
, BNX2_RPM_CONFIG
, val
);
3967 wol_msg
= BNX2_DRV_MSG_CODE_SUSPEND_WOL
;
3970 wol_msg
= BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL
;
3973 if (!(bp
->flags
& BNX2_FLAG_NO_WOL
))
3974 bnx2_fw_sync(bp
, BNX2_DRV_MSG_DATA_WAIT3
| wol_msg
,
3977 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
3978 if ((CHIP_ID(bp
) == CHIP_ID_5706_A0
) ||
3979 (CHIP_ID(bp
) == CHIP_ID_5706_A1
)) {
3988 pmcsr
|= PCI_PM_CTRL_PME_ENABLE
;
3990 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
3993 /* No more memory access after this point until
3994 * device is brought back to D0.
4006 bnx2_acquire_nvram_lock(struct bnx2
*bp
)
4011 /* Request access to the flash interface. */
4012 REG_WR(bp
, BNX2_NVM_SW_ARB
, BNX2_NVM_SW_ARB_ARB_REQ_SET2
);
4013 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
4014 val
= REG_RD(bp
, BNX2_NVM_SW_ARB
);
4015 if (val
& BNX2_NVM_SW_ARB_ARB_ARB2
)
4021 if (j
>= NVRAM_TIMEOUT_COUNT
)
4028 bnx2_release_nvram_lock(struct bnx2
*bp
)
4033 /* Relinquish nvram interface. */
4034 REG_WR(bp
, BNX2_NVM_SW_ARB
, BNX2_NVM_SW_ARB_ARB_REQ_CLR2
);
4036 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
4037 val
= REG_RD(bp
, BNX2_NVM_SW_ARB
);
4038 if (!(val
& BNX2_NVM_SW_ARB_ARB_ARB2
))
4044 if (j
>= NVRAM_TIMEOUT_COUNT
)
4052 bnx2_enable_nvram_write(struct bnx2
*bp
)
4056 val
= REG_RD(bp
, BNX2_MISC_CFG
);
4057 REG_WR(bp
, BNX2_MISC_CFG
, val
| BNX2_MISC_CFG_NVM_WR_EN_PCI
);
4059 if (bp
->flash_info
->flags
& BNX2_NV_WREN
) {
4062 REG_WR(bp
, BNX2_NVM_COMMAND
, BNX2_NVM_COMMAND_DONE
);
4063 REG_WR(bp
, BNX2_NVM_COMMAND
,
4064 BNX2_NVM_COMMAND_WREN
| BNX2_NVM_COMMAND_DOIT
);
4066 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
4069 val
= REG_RD(bp
, BNX2_NVM_COMMAND
);
4070 if (val
& BNX2_NVM_COMMAND_DONE
)
4074 if (j
>= NVRAM_TIMEOUT_COUNT
)
4081 bnx2_disable_nvram_write(struct bnx2
*bp
)
4085 val
= REG_RD(bp
, BNX2_MISC_CFG
);
4086 REG_WR(bp
, BNX2_MISC_CFG
, val
& ~BNX2_MISC_CFG_NVM_WR_EN
);
4091 bnx2_enable_nvram_access(struct bnx2
*bp
)
4095 val
= REG_RD(bp
, BNX2_NVM_ACCESS_ENABLE
);
4096 /* Enable both bits, even on read. */
4097 REG_WR(bp
, BNX2_NVM_ACCESS_ENABLE
,
4098 val
| BNX2_NVM_ACCESS_ENABLE_EN
| BNX2_NVM_ACCESS_ENABLE_WR_EN
);
4102 bnx2_disable_nvram_access(struct bnx2
*bp
)
4106 val
= REG_RD(bp
, BNX2_NVM_ACCESS_ENABLE
);
4107 /* Disable both bits, even after read. */
4108 REG_WR(bp
, BNX2_NVM_ACCESS_ENABLE
,
4109 val
& ~(BNX2_NVM_ACCESS_ENABLE_EN
|
4110 BNX2_NVM_ACCESS_ENABLE_WR_EN
));
4114 bnx2_nvram_erase_page(struct bnx2
*bp
, u32 offset
)
4119 if (bp
->flash_info
->flags
& BNX2_NV_BUFFERED
)
4120 /* Buffered flash, no erase needed */
4123 /* Build an erase command */
4124 cmd
= BNX2_NVM_COMMAND_ERASE
| BNX2_NVM_COMMAND_WR
|
4125 BNX2_NVM_COMMAND_DOIT
;
4127 /* Need to clear DONE bit separately. */
4128 REG_WR(bp
, BNX2_NVM_COMMAND
, BNX2_NVM_COMMAND_DONE
);
4130 /* Address of the NVRAM to read from. */
4131 REG_WR(bp
, BNX2_NVM_ADDR
, offset
& BNX2_NVM_ADDR_NVM_ADDR_VALUE
);
4133 /* Issue an erase command. */
4134 REG_WR(bp
, BNX2_NVM_COMMAND
, cmd
);
4136 /* Wait for completion. */
4137 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
4142 val
= REG_RD(bp
, BNX2_NVM_COMMAND
);
4143 if (val
& BNX2_NVM_COMMAND_DONE
)
4147 if (j
>= NVRAM_TIMEOUT_COUNT
)
4154 bnx2_nvram_read_dword(struct bnx2
*bp
, u32 offset
, u8
*ret_val
, u32 cmd_flags
)
4159 /* Build the command word. */
4160 cmd
= BNX2_NVM_COMMAND_DOIT
| cmd_flags
;
4162 /* Calculate an offset of a buffered flash, not needed for 5709. */
4163 if (bp
->flash_info
->flags
& BNX2_NV_TRANSLATE
) {
4164 offset
= ((offset
/ bp
->flash_info
->page_size
) <<
4165 bp
->flash_info
->page_bits
) +
4166 (offset
% bp
->flash_info
->page_size
);
4169 /* Need to clear DONE bit separately. */
4170 REG_WR(bp
, BNX2_NVM_COMMAND
, BNX2_NVM_COMMAND_DONE
);
4172 /* Address of the NVRAM to read from. */
4173 REG_WR(bp
, BNX2_NVM_ADDR
, offset
& BNX2_NVM_ADDR_NVM_ADDR_VALUE
);
4175 /* Issue a read command. */
4176 REG_WR(bp
, BNX2_NVM_COMMAND
, cmd
);
4178 /* Wait for completion. */
4179 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
4184 val
= REG_RD(bp
, BNX2_NVM_COMMAND
);
4185 if (val
& BNX2_NVM_COMMAND_DONE
) {
4186 __be32 v
= cpu_to_be32(REG_RD(bp
, BNX2_NVM_READ
));
4187 memcpy(ret_val
, &v
, 4);
4191 if (j
>= NVRAM_TIMEOUT_COUNT
)
4199 bnx2_nvram_write_dword(struct bnx2
*bp
, u32 offset
, u8
*val
, u32 cmd_flags
)
4205 /* Build the command word. */
4206 cmd
= BNX2_NVM_COMMAND_DOIT
| BNX2_NVM_COMMAND_WR
| cmd_flags
;
4208 /* Calculate an offset of a buffered flash, not needed for 5709. */
4209 if (bp
->flash_info
->flags
& BNX2_NV_TRANSLATE
) {
4210 offset
= ((offset
/ bp
->flash_info
->page_size
) <<
4211 bp
->flash_info
->page_bits
) +
4212 (offset
% bp
->flash_info
->page_size
);
4215 /* Need to clear DONE bit separately. */
4216 REG_WR(bp
, BNX2_NVM_COMMAND
, BNX2_NVM_COMMAND_DONE
);
4218 memcpy(&val32
, val
, 4);
4220 /* Write the data. */
4221 REG_WR(bp
, BNX2_NVM_WRITE
, be32_to_cpu(val32
));
4223 /* Address of the NVRAM to write to. */
4224 REG_WR(bp
, BNX2_NVM_ADDR
, offset
& BNX2_NVM_ADDR_NVM_ADDR_VALUE
);
4226 /* Issue the write command. */
4227 REG_WR(bp
, BNX2_NVM_COMMAND
, cmd
);
4229 /* Wait for completion. */
4230 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
4233 if (REG_RD(bp
, BNX2_NVM_COMMAND
) & BNX2_NVM_COMMAND_DONE
)
4236 if (j
>= NVRAM_TIMEOUT_COUNT
)
4243 bnx2_init_nvram(struct bnx2
*bp
)
4246 int j
, entry_count
, rc
= 0;
4247 const struct flash_spec
*flash
;
4249 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
4250 bp
->flash_info
= &flash_5709
;
4251 goto get_flash_size
;
4254 /* Determine the selected interface. */
4255 val
= REG_RD(bp
, BNX2_NVM_CFG1
);
4257 entry_count
= ARRAY_SIZE(flash_table
);
4259 if (val
& 0x40000000) {
4261 /* Flash interface has been reconfigured */
4262 for (j
= 0, flash
= &flash_table
[0]; j
< entry_count
;
4264 if ((val
& FLASH_BACKUP_STRAP_MASK
) ==
4265 (flash
->config1
& FLASH_BACKUP_STRAP_MASK
)) {
4266 bp
->flash_info
= flash
;
4273 /* Not yet been reconfigured */
4275 if (val
& (1 << 23))
4276 mask
= FLASH_BACKUP_STRAP_MASK
;
4278 mask
= FLASH_STRAP_MASK
;
4280 for (j
= 0, flash
= &flash_table
[0]; j
< entry_count
;
4283 if ((val
& mask
) == (flash
->strapping
& mask
)) {
4284 bp
->flash_info
= flash
;
4286 /* Request access to the flash interface. */
4287 if ((rc
= bnx2_acquire_nvram_lock(bp
)) != 0)
4290 /* Enable access to flash interface */
4291 bnx2_enable_nvram_access(bp
);
4293 /* Reconfigure the flash interface */
4294 REG_WR(bp
, BNX2_NVM_CFG1
, flash
->config1
);
4295 REG_WR(bp
, BNX2_NVM_CFG2
, flash
->config2
);
4296 REG_WR(bp
, BNX2_NVM_CFG3
, flash
->config3
);
4297 REG_WR(bp
, BNX2_NVM_WRITE1
, flash
->write1
);
4299 /* Disable access to flash interface */
4300 bnx2_disable_nvram_access(bp
);
4301 bnx2_release_nvram_lock(bp
);
4306 } /* if (val & 0x40000000) */
4308 if (j
== entry_count
) {
4309 bp
->flash_info
= NULL
;
4310 pr_alert("Unknown flash/EEPROM type\n");
4315 val
= bnx2_shmem_rd(bp
, BNX2_SHARED_HW_CFG_CONFIG2
);
4316 val
&= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK
;
4318 bp
->flash_size
= val
;
4320 bp
->flash_size
= bp
->flash_info
->total_size
;
4326 bnx2_nvram_read(struct bnx2
*bp
, u32 offset
, u8
*ret_buf
,
4330 u32 cmd_flags
, offset32
, len32
, extra
;
4335 /* Request access to the flash interface. */
4336 if ((rc
= bnx2_acquire_nvram_lock(bp
)) != 0)
4339 /* Enable access to flash interface */
4340 bnx2_enable_nvram_access(bp
);
4353 pre_len
= 4 - (offset
& 3);
4355 if (pre_len
>= len32
) {
4357 cmd_flags
= BNX2_NVM_COMMAND_FIRST
|
4358 BNX2_NVM_COMMAND_LAST
;
4361 cmd_flags
= BNX2_NVM_COMMAND_FIRST
;
4364 rc
= bnx2_nvram_read_dword(bp
, offset32
, buf
, cmd_flags
);
4369 memcpy(ret_buf
, buf
+ (offset
& 3), pre_len
);
4376 extra
= 4 - (len32
& 3);
4377 len32
= (len32
+ 4) & ~3;
4384 cmd_flags
= BNX2_NVM_COMMAND_LAST
;
4386 cmd_flags
= BNX2_NVM_COMMAND_FIRST
|
4387 BNX2_NVM_COMMAND_LAST
;
4389 rc
= bnx2_nvram_read_dword(bp
, offset32
, buf
, cmd_flags
);
4391 memcpy(ret_buf
, buf
, 4 - extra
);
4393 else if (len32
> 0) {
4396 /* Read the first word. */
4400 cmd_flags
= BNX2_NVM_COMMAND_FIRST
;
4402 rc
= bnx2_nvram_read_dword(bp
, offset32
, ret_buf
, cmd_flags
);
4404 /* Advance to the next dword. */
4409 while (len32
> 4 && rc
== 0) {
4410 rc
= bnx2_nvram_read_dword(bp
, offset32
, ret_buf
, 0);
4412 /* Advance to the next dword. */
4421 cmd_flags
= BNX2_NVM_COMMAND_LAST
;
4422 rc
= bnx2_nvram_read_dword(bp
, offset32
, buf
, cmd_flags
);
4424 memcpy(ret_buf
, buf
, 4 - extra
);
4427 /* Disable access to flash interface */
4428 bnx2_disable_nvram_access(bp
);
4430 bnx2_release_nvram_lock(bp
);
4436 bnx2_nvram_write(struct bnx2
*bp
, u32 offset
, u8
*data_buf
,
4439 u32 written
, offset32
, len32
;
4440 u8
*buf
, start
[4], end
[4], *align_buf
= NULL
, *flash_buffer
= NULL
;
4442 int align_start
, align_end
;
4447 align_start
= align_end
= 0;
4449 if ((align_start
= (offset32
& 3))) {
4451 len32
+= align_start
;
4454 if ((rc
= bnx2_nvram_read(bp
, offset32
, start
, 4)))
4459 align_end
= 4 - (len32
& 3);
4461 if ((rc
= bnx2_nvram_read(bp
, offset32
+ len32
- 4, end
, 4)))
4465 if (align_start
|| align_end
) {
4466 align_buf
= kmalloc(len32
, GFP_KERNEL
);
4467 if (align_buf
== NULL
)
4470 memcpy(align_buf
, start
, 4);
4473 memcpy(align_buf
+ len32
- 4, end
, 4);
4475 memcpy(align_buf
+ align_start
, data_buf
, buf_size
);
4479 if (!(bp
->flash_info
->flags
& BNX2_NV_BUFFERED
)) {
4480 flash_buffer
= kmalloc(264, GFP_KERNEL
);
4481 if (flash_buffer
== NULL
) {
4483 goto nvram_write_end
;
4488 while ((written
< len32
) && (rc
== 0)) {
4489 u32 page_start
, page_end
, data_start
, data_end
;
4490 u32 addr
, cmd_flags
;
4493 /* Find the page_start addr */
4494 page_start
= offset32
+ written
;
4495 page_start
-= (page_start
% bp
->flash_info
->page_size
);
4496 /* Find the page_end addr */
4497 page_end
= page_start
+ bp
->flash_info
->page_size
;
4498 /* Find the data_start addr */
4499 data_start
= (written
== 0) ? offset32
: page_start
;
4500 /* Find the data_end addr */
4501 data_end
= (page_end
> offset32
+ len32
) ?
4502 (offset32
+ len32
) : page_end
;
4504 /* Request access to the flash interface. */
4505 if ((rc
= bnx2_acquire_nvram_lock(bp
)) != 0)
4506 goto nvram_write_end
;
4508 /* Enable access to flash interface */
4509 bnx2_enable_nvram_access(bp
);
4511 cmd_flags
= BNX2_NVM_COMMAND_FIRST
;
4512 if (!(bp
->flash_info
->flags
& BNX2_NV_BUFFERED
)) {
4515 /* Read the whole page into the buffer
4516 * (non-buffer flash only) */
4517 for (j
= 0; j
< bp
->flash_info
->page_size
; j
+= 4) {
4518 if (j
== (bp
->flash_info
->page_size
- 4)) {
4519 cmd_flags
|= BNX2_NVM_COMMAND_LAST
;
4521 rc
= bnx2_nvram_read_dword(bp
,
4527 goto nvram_write_end
;
4533 /* Enable writes to flash interface (unlock write-protect) */
4534 if ((rc
= bnx2_enable_nvram_write(bp
)) != 0)
4535 goto nvram_write_end
;
4537 /* Loop to write back the buffer data from page_start to
4540 if (!(bp
->flash_info
->flags
& BNX2_NV_BUFFERED
)) {
4541 /* Erase the page */
4542 if ((rc
= bnx2_nvram_erase_page(bp
, page_start
)) != 0)
4543 goto nvram_write_end
;
4545 /* Re-enable the write again for the actual write */
4546 bnx2_enable_nvram_write(bp
);
4548 for (addr
= page_start
; addr
< data_start
;
4549 addr
+= 4, i
+= 4) {
4551 rc
= bnx2_nvram_write_dword(bp
, addr
,
4552 &flash_buffer
[i
], cmd_flags
);
4555 goto nvram_write_end
;
4561 /* Loop to write the new data from data_start to data_end */
4562 for (addr
= data_start
; addr
< data_end
; addr
+= 4, i
+= 4) {
4563 if ((addr
== page_end
- 4) ||
4564 ((bp
->flash_info
->flags
& BNX2_NV_BUFFERED
) &&
4565 (addr
== data_end
- 4))) {
4567 cmd_flags
|= BNX2_NVM_COMMAND_LAST
;
4569 rc
= bnx2_nvram_write_dword(bp
, addr
, buf
,
4573 goto nvram_write_end
;
4579 /* Loop to write back the buffer data from data_end
4581 if (!(bp
->flash_info
->flags
& BNX2_NV_BUFFERED
)) {
4582 for (addr
= data_end
; addr
< page_end
;
4583 addr
+= 4, i
+= 4) {
4585 if (addr
== page_end
-4) {
4586 cmd_flags
= BNX2_NVM_COMMAND_LAST
;
4588 rc
= bnx2_nvram_write_dword(bp
, addr
,
4589 &flash_buffer
[i
], cmd_flags
);
4592 goto nvram_write_end
;
4598 /* Disable writes to flash interface (lock write-protect) */
4599 bnx2_disable_nvram_write(bp
);
4601 /* Disable access to flash interface */
4602 bnx2_disable_nvram_access(bp
);
4603 bnx2_release_nvram_lock(bp
);
4605 /* Increment written */
4606 written
+= data_end
- data_start
;
4610 kfree(flash_buffer
);
4616 bnx2_init_fw_cap(struct bnx2
*bp
)
4620 bp
->phy_flags
&= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP
;
4621 bp
->flags
&= ~BNX2_FLAG_CAN_KEEP_VLAN
;
4623 if (!(bp
->flags
& BNX2_FLAG_ASF_ENABLE
))
4624 bp
->flags
|= BNX2_FLAG_CAN_KEEP_VLAN
;
4626 val
= bnx2_shmem_rd(bp
, BNX2_FW_CAP_MB
);
4627 if ((val
& BNX2_FW_CAP_SIGNATURE_MASK
) != BNX2_FW_CAP_SIGNATURE
)
4630 if ((val
& BNX2_FW_CAP_CAN_KEEP_VLAN
) == BNX2_FW_CAP_CAN_KEEP_VLAN
) {
4631 bp
->flags
|= BNX2_FLAG_CAN_KEEP_VLAN
;
4632 sig
|= BNX2_DRV_ACK_CAP_SIGNATURE
| BNX2_FW_CAP_CAN_KEEP_VLAN
;
4635 if ((bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) &&
4636 (val
& BNX2_FW_CAP_REMOTE_PHY_CAPABLE
)) {
4639 bp
->phy_flags
|= BNX2_PHY_FLAG_REMOTE_PHY_CAP
;
4641 link
= bnx2_shmem_rd(bp
, BNX2_LINK_STATUS
);
4642 if (link
& BNX2_LINK_STATUS_SERDES_LINK
)
4643 bp
->phy_port
= PORT_FIBRE
;
4645 bp
->phy_port
= PORT_TP
;
4647 sig
|= BNX2_DRV_ACK_CAP_SIGNATURE
|
4648 BNX2_FW_CAP_REMOTE_PHY_CAPABLE
;
4651 if (netif_running(bp
->dev
) && sig
)
4652 bnx2_shmem_wr(bp
, BNX2_DRV_ACK_CAP_MB
, sig
);
4656 bnx2_setup_msix_tbl(struct bnx2
*bp
)
4658 REG_WR(bp
, BNX2_PCI_GRC_WINDOW_ADDR
, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN
);
4660 REG_WR(bp
, BNX2_PCI_GRC_WINDOW2_ADDR
, BNX2_MSIX_TABLE_ADDR
);
4661 REG_WR(bp
, BNX2_PCI_GRC_WINDOW3_ADDR
, BNX2_MSIX_PBA_ADDR
);
4665 bnx2_reset_chip(struct bnx2
*bp
, u32 reset_code
)
4671 /* Wait for the current PCI transaction to complete before
4672 * issuing a reset. */
4673 REG_WR(bp
, BNX2_MISC_ENABLE_CLR_BITS
,
4674 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE
|
4675 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE
|
4676 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE
|
4677 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE
);
4678 val
= REG_RD(bp
, BNX2_MISC_ENABLE_CLR_BITS
);
4681 /* Wait for the firmware to tell us it is ok to issue a reset. */
4682 bnx2_fw_sync(bp
, BNX2_DRV_MSG_DATA_WAIT0
| reset_code
, 1, 1);
4684 /* Deposit a driver reset signature so the firmware knows that
4685 * this is a soft reset. */
4686 bnx2_shmem_wr(bp
, BNX2_DRV_RESET_SIGNATURE
,
4687 BNX2_DRV_RESET_SIGNATURE_MAGIC
);
4689 /* Do a dummy read to force the chip to complete all current transaction
4690 * before we issue a reset. */
4691 val
= REG_RD(bp
, BNX2_MISC_ID
);
4693 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
4694 REG_WR(bp
, BNX2_MISC_COMMAND
, BNX2_MISC_COMMAND_SW_RESET
);
4695 REG_RD(bp
, BNX2_MISC_COMMAND
);
4698 val
= BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA
|
4699 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP
;
4701 pci_write_config_dword(bp
->pdev
, BNX2_PCICFG_MISC_CONFIG
, val
);
4704 val
= BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ
|
4705 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA
|
4706 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP
;
4709 REG_WR(bp
, BNX2_PCICFG_MISC_CONFIG
, val
);
4711 /* Reading back any register after chip reset will hang the
4712 * bus on 5706 A0 and A1. The msleep below provides plenty
4713 * of margin for write posting.
4715 if ((CHIP_ID(bp
) == CHIP_ID_5706_A0
) ||
4716 (CHIP_ID(bp
) == CHIP_ID_5706_A1
))
4719 /* Reset takes approximate 30 usec */
4720 for (i
= 0; i
< 10; i
++) {
4721 val
= REG_RD(bp
, BNX2_PCICFG_MISC_CONFIG
);
4722 if ((val
& (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ
|
4723 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY
)) == 0)
4728 if (val
& (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ
|
4729 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY
)) {
4730 pr_err("Chip reset did not complete\n");
4735 /* Make sure byte swapping is properly configured. */
4736 val
= REG_RD(bp
, BNX2_PCI_SWAP_DIAG0
);
4737 if (val
!= 0x01020304) {
4738 pr_err("Chip not in correct endian mode\n");
4742 /* Wait for the firmware to finish its initialization. */
4743 rc
= bnx2_fw_sync(bp
, BNX2_DRV_MSG_DATA_WAIT1
| reset_code
, 1, 0);
4747 spin_lock_bh(&bp
->phy_lock
);
4748 old_port
= bp
->phy_port
;
4749 bnx2_init_fw_cap(bp
);
4750 if ((bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
) &&
4751 old_port
!= bp
->phy_port
)
4752 bnx2_set_default_remote_link(bp
);
4753 spin_unlock_bh(&bp
->phy_lock
);
4755 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
4756 /* Adjust the voltage regular to two steps lower. The default
4757 * of this register is 0x0000000e. */
4758 REG_WR(bp
, BNX2_MISC_VREG_CONTROL
, 0x000000fa);
4760 /* Remove bad rbuf memory from the free pool. */
4761 rc
= bnx2_alloc_bad_rbuf(bp
);
4764 if (bp
->flags
& BNX2_FLAG_USING_MSIX
) {
4765 bnx2_setup_msix_tbl(bp
);
4766 /* Prevent MSIX table reads and write from timing out */
4767 REG_WR(bp
, BNX2_MISC_ECO_HW_CTL
,
4768 BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN
);
4775 bnx2_init_chip(struct bnx2
*bp
)
4780 /* Make sure the interrupt is not active. */
4781 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
, BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
4783 val
= BNX2_DMA_CONFIG_DATA_BYTE_SWAP
|
4784 BNX2_DMA_CONFIG_DATA_WORD_SWAP
|
4786 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP
|
4788 BNX2_DMA_CONFIG_CNTL_WORD_SWAP
|
4789 DMA_READ_CHANS
<< 12 |
4790 DMA_WRITE_CHANS
<< 16;
4792 val
|= (0x2 << 20) | (1 << 11);
4794 if ((bp
->flags
& BNX2_FLAG_PCIX
) && (bp
->bus_speed_mhz
== 133))
4797 if ((CHIP_NUM(bp
) == CHIP_NUM_5706
) &&
4798 (CHIP_ID(bp
) != CHIP_ID_5706_A0
) && !(bp
->flags
& BNX2_FLAG_PCIX
))
4799 val
|= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA
;
4801 REG_WR(bp
, BNX2_DMA_CONFIG
, val
);
4803 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
4804 val
= REG_RD(bp
, BNX2_TDMA_CONFIG
);
4805 val
|= BNX2_TDMA_CONFIG_ONE_DMA
;
4806 REG_WR(bp
, BNX2_TDMA_CONFIG
, val
);
4809 if (bp
->flags
& BNX2_FLAG_PCIX
) {
4812 pci_read_config_word(bp
->pdev
, bp
->pcix_cap
+ PCI_X_CMD
,
4814 pci_write_config_word(bp
->pdev
, bp
->pcix_cap
+ PCI_X_CMD
,
4815 val16
& ~PCI_X_CMD_ERO
);
4818 REG_WR(bp
, BNX2_MISC_ENABLE_SET_BITS
,
4819 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE
|
4820 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE
|
4821 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE
);
4823 /* Initialize context mapping and zero out the quick contexts. The
4824 * context block must have already been enabled. */
4825 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
4826 rc
= bnx2_init_5709_context(bp
);
4830 bnx2_init_context(bp
);
4832 if ((rc
= bnx2_init_cpus(bp
)) != 0)
4835 bnx2_init_nvram(bp
);
4837 bnx2_set_mac_addr(bp
, bp
->dev
->dev_addr
, 0);
4839 val
= REG_RD(bp
, BNX2_MQ_CONFIG
);
4840 val
&= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE
;
4841 val
|= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256
;
4842 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
4843 val
|= BNX2_MQ_CONFIG_BIN_MQ_MODE
;
4844 if (CHIP_REV(bp
) == CHIP_REV_Ax
)
4845 val
|= BNX2_MQ_CONFIG_HALT_DIS
;
4848 REG_WR(bp
, BNX2_MQ_CONFIG
, val
);
4850 val
= 0x10000 + (MAX_CID_CNT
* MB_KERNEL_CTX_SIZE
);
4851 REG_WR(bp
, BNX2_MQ_KNL_BYP_WIND_START
, val
);
4852 REG_WR(bp
, BNX2_MQ_KNL_WIND_END
, val
);
4854 val
= (BCM_PAGE_BITS
- 8) << 24;
4855 REG_WR(bp
, BNX2_RV2P_CONFIG
, val
);
4857 /* Configure page size. */
4858 val
= REG_RD(bp
, BNX2_TBDR_CONFIG
);
4859 val
&= ~BNX2_TBDR_CONFIG_PAGE_SIZE
;
4860 val
|= (BCM_PAGE_BITS
- 8) << 24 | 0x40;
4861 REG_WR(bp
, BNX2_TBDR_CONFIG
, val
);
4863 val
= bp
->mac_addr
[0] +
4864 (bp
->mac_addr
[1] << 8) +
4865 (bp
->mac_addr
[2] << 16) +
4867 (bp
->mac_addr
[4] << 8) +
4868 (bp
->mac_addr
[5] << 16);
4869 REG_WR(bp
, BNX2_EMAC_BACKOFF_SEED
, val
);
4871 /* Program the MTU. Also include 4 bytes for CRC32. */
4873 val
= mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
4874 if (val
> (MAX_ETHERNET_PACKET_SIZE
+ 4))
4875 val
|= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA
;
4876 REG_WR(bp
, BNX2_EMAC_RX_MTU_SIZE
, val
);
4881 bnx2_reg_wr_ind(bp
, BNX2_RBUF_CONFIG
, BNX2_RBUF_CONFIG_VAL(mtu
));
4882 bnx2_reg_wr_ind(bp
, BNX2_RBUF_CONFIG2
, BNX2_RBUF_CONFIG2_VAL(mtu
));
4883 bnx2_reg_wr_ind(bp
, BNX2_RBUF_CONFIG3
, BNX2_RBUF_CONFIG3_VAL(mtu
));
4885 memset(bp
->bnx2_napi
[0].status_blk
.msi
, 0, bp
->status_stats_size
);
4886 for (i
= 0; i
< BNX2_MAX_MSIX_VEC
; i
++)
4887 bp
->bnx2_napi
[i
].last_status_idx
= 0;
4889 bp
->idle_chk_status_idx
= 0xffff;
4891 bp
->rx_mode
= BNX2_EMAC_RX_MODE_SORT_MODE
;
4893 /* Set up how to generate a link change interrupt. */
4894 REG_WR(bp
, BNX2_EMAC_ATTENTION_ENA
, BNX2_EMAC_ATTENTION_ENA_LINK
);
4896 REG_WR(bp
, BNX2_HC_STATUS_ADDR_L
,
4897 (u64
) bp
->status_blk_mapping
& 0xffffffff);
4898 REG_WR(bp
, BNX2_HC_STATUS_ADDR_H
, (u64
) bp
->status_blk_mapping
>> 32);
4900 REG_WR(bp
, BNX2_HC_STATISTICS_ADDR_L
,
4901 (u64
) bp
->stats_blk_mapping
& 0xffffffff);
4902 REG_WR(bp
, BNX2_HC_STATISTICS_ADDR_H
,
4903 (u64
) bp
->stats_blk_mapping
>> 32);
4905 REG_WR(bp
, BNX2_HC_TX_QUICK_CONS_TRIP
,
4906 (bp
->tx_quick_cons_trip_int
<< 16) | bp
->tx_quick_cons_trip
);
4908 REG_WR(bp
, BNX2_HC_RX_QUICK_CONS_TRIP
,
4909 (bp
->rx_quick_cons_trip_int
<< 16) | bp
->rx_quick_cons_trip
);
4911 REG_WR(bp
, BNX2_HC_COMP_PROD_TRIP
,
4912 (bp
->comp_prod_trip_int
<< 16) | bp
->comp_prod_trip
);
4914 REG_WR(bp
, BNX2_HC_TX_TICKS
, (bp
->tx_ticks_int
<< 16) | bp
->tx_ticks
);
4916 REG_WR(bp
, BNX2_HC_RX_TICKS
, (bp
->rx_ticks_int
<< 16) | bp
->rx_ticks
);
4918 REG_WR(bp
, BNX2_HC_COM_TICKS
,
4919 (bp
->com_ticks_int
<< 16) | bp
->com_ticks
);
4921 REG_WR(bp
, BNX2_HC_CMD_TICKS
,
4922 (bp
->cmd_ticks_int
<< 16) | bp
->cmd_ticks
);
4924 if (bp
->flags
& BNX2_FLAG_BROKEN_STATS
)
4925 REG_WR(bp
, BNX2_HC_STATS_TICKS
, 0);
4927 REG_WR(bp
, BNX2_HC_STATS_TICKS
, bp
->stats_ticks
);
4928 REG_WR(bp
, BNX2_HC_STAT_COLLECT_TICKS
, 0xbb8); /* 3ms */
4930 if (CHIP_ID(bp
) == CHIP_ID_5706_A1
)
4931 val
= BNX2_HC_CONFIG_COLLECT_STATS
;
4933 val
= BNX2_HC_CONFIG_RX_TMR_MODE
| BNX2_HC_CONFIG_TX_TMR_MODE
|
4934 BNX2_HC_CONFIG_COLLECT_STATS
;
4937 if (bp
->flags
& BNX2_FLAG_USING_MSIX
) {
4938 REG_WR(bp
, BNX2_HC_MSIX_BIT_VECTOR
,
4939 BNX2_HC_MSIX_BIT_VECTOR_VAL
);
4941 val
|= BNX2_HC_CONFIG_SB_ADDR_INC_128B
;
4944 if (bp
->flags
& BNX2_FLAG_ONE_SHOT_MSI
)
4945 val
|= BNX2_HC_CONFIG_ONE_SHOT
| BNX2_HC_CONFIG_USE_INT_PARAM
;
4947 REG_WR(bp
, BNX2_HC_CONFIG
, val
);
4949 for (i
= 1; i
< bp
->irq_nvecs
; i
++) {
4950 u32 base
= ((i
- 1) * BNX2_HC_SB_CONFIG_SIZE
) +
4951 BNX2_HC_SB_CONFIG_1
;
4954 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE
|
4955 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE
|
4956 BNX2_HC_SB_CONFIG_1_ONE_SHOT
);
4958 REG_WR(bp
, base
+ BNX2_HC_TX_QUICK_CONS_TRIP_OFF
,
4959 (bp
->tx_quick_cons_trip_int
<< 16) |
4960 bp
->tx_quick_cons_trip
);
4962 REG_WR(bp
, base
+ BNX2_HC_TX_TICKS_OFF
,
4963 (bp
->tx_ticks_int
<< 16) | bp
->tx_ticks
);
4965 REG_WR(bp
, base
+ BNX2_HC_RX_QUICK_CONS_TRIP_OFF
,
4966 (bp
->rx_quick_cons_trip_int
<< 16) |
4967 bp
->rx_quick_cons_trip
);
4969 REG_WR(bp
, base
+ BNX2_HC_RX_TICKS_OFF
,
4970 (bp
->rx_ticks_int
<< 16) | bp
->rx_ticks
);
4973 /* Clear internal stats counters. */
4974 REG_WR(bp
, BNX2_HC_COMMAND
, BNX2_HC_COMMAND_CLR_STAT_NOW
);
4976 REG_WR(bp
, BNX2_HC_ATTN_BITS_ENABLE
, STATUS_ATTN_EVENTS
);
4978 /* Initialize the receive filter. */
4979 bnx2_set_rx_mode(bp
->dev
);
4981 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
4982 val
= REG_RD(bp
, BNX2_MISC_NEW_CORE_CTL
);
4983 val
|= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE
;
4984 REG_WR(bp
, BNX2_MISC_NEW_CORE_CTL
, val
);
4986 rc
= bnx2_fw_sync(bp
, BNX2_DRV_MSG_DATA_WAIT2
| BNX2_DRV_MSG_CODE_RESET
,
4989 REG_WR(bp
, BNX2_MISC_ENABLE_SET_BITS
, BNX2_MISC_ENABLE_DEFAULT
);
4990 REG_RD(bp
, BNX2_MISC_ENABLE_SET_BITS
);
4994 bp
->hc_cmd
= REG_RD(bp
, BNX2_HC_COMMAND
);
5000 bnx2_clear_ring_states(struct bnx2
*bp
)
5002 struct bnx2_napi
*bnapi
;
5003 struct bnx2_tx_ring_info
*txr
;
5004 struct bnx2_rx_ring_info
*rxr
;
5007 for (i
= 0; i
< BNX2_MAX_MSIX_VEC
; i
++) {
5008 bnapi
= &bp
->bnx2_napi
[i
];
5009 txr
= &bnapi
->tx_ring
;
5010 rxr
= &bnapi
->rx_ring
;
5013 txr
->hw_tx_cons
= 0;
5014 rxr
->rx_prod_bseq
= 0;
5017 rxr
->rx_pg_prod
= 0;
5018 rxr
->rx_pg_cons
= 0;
5023 bnx2_init_tx_context(struct bnx2
*bp
, u32 cid
, struct bnx2_tx_ring_info
*txr
)
5025 u32 val
, offset0
, offset1
, offset2
, offset3
;
5026 u32 cid_addr
= GET_CID_ADDR(cid
);
5028 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
5029 offset0
= BNX2_L2CTX_TYPE_XI
;
5030 offset1
= BNX2_L2CTX_CMD_TYPE_XI
;
5031 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI_XI
;
5032 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO_XI
;
5034 offset0
= BNX2_L2CTX_TYPE
;
5035 offset1
= BNX2_L2CTX_CMD_TYPE
;
5036 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI
;
5037 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO
;
5039 val
= BNX2_L2CTX_TYPE_TYPE_L2
| BNX2_L2CTX_TYPE_SIZE_L2
;
5040 bnx2_ctx_wr(bp
, cid_addr
, offset0
, val
);
5042 val
= BNX2_L2CTX_CMD_TYPE_TYPE_L2
| (8 << 16);
5043 bnx2_ctx_wr(bp
, cid_addr
, offset1
, val
);
5045 val
= (u64
) txr
->tx_desc_mapping
>> 32;
5046 bnx2_ctx_wr(bp
, cid_addr
, offset2
, val
);
5048 val
= (u64
) txr
->tx_desc_mapping
& 0xffffffff;
5049 bnx2_ctx_wr(bp
, cid_addr
, offset3
, val
);
5053 bnx2_init_tx_ring(struct bnx2
*bp
, int ring_num
)
5057 struct bnx2_napi
*bnapi
;
5058 struct bnx2_tx_ring_info
*txr
;
5060 bnapi
= &bp
->bnx2_napi
[ring_num
];
5061 txr
= &bnapi
->tx_ring
;
5066 cid
= TX_TSS_CID
+ ring_num
- 1;
5068 bp
->tx_wake_thresh
= bp
->tx_ring_size
/ 2;
5070 txbd
= &txr
->tx_desc_ring
[MAX_TX_DESC_CNT
];
5072 txbd
->tx_bd_haddr_hi
= (u64
) txr
->tx_desc_mapping
>> 32;
5073 txbd
->tx_bd_haddr_lo
= (u64
) txr
->tx_desc_mapping
& 0xffffffff;
5076 txr
->tx_prod_bseq
= 0;
5078 txr
->tx_bidx_addr
= MB_GET_CID_ADDR(cid
) + BNX2_L2CTX_TX_HOST_BIDX
;
5079 txr
->tx_bseq_addr
= MB_GET_CID_ADDR(cid
) + BNX2_L2CTX_TX_HOST_BSEQ
;
5081 bnx2_init_tx_context(bp
, cid
, txr
);
5085 bnx2_init_rxbd_rings(struct rx_bd
*rx_ring
[], dma_addr_t dma
[], u32 buf_size
,
5091 for (i
= 0; i
< num_rings
; i
++) {
5094 rxbd
= &rx_ring
[i
][0];
5095 for (j
= 0; j
< MAX_RX_DESC_CNT
; j
++, rxbd
++) {
5096 rxbd
->rx_bd_len
= buf_size
;
5097 rxbd
->rx_bd_flags
= RX_BD_FLAGS_START
| RX_BD_FLAGS_END
;
5099 if (i
== (num_rings
- 1))
5103 rxbd
->rx_bd_haddr_hi
= (u64
) dma
[j
] >> 32;
5104 rxbd
->rx_bd_haddr_lo
= (u64
) dma
[j
] & 0xffffffff;
5109 bnx2_init_rx_ring(struct bnx2
*bp
, int ring_num
)
5112 u16 prod
, ring_prod
;
5113 u32 cid
, rx_cid_addr
, val
;
5114 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[ring_num
];
5115 struct bnx2_rx_ring_info
*rxr
= &bnapi
->rx_ring
;
5120 cid
= RX_RSS_CID
+ ring_num
- 1;
5122 rx_cid_addr
= GET_CID_ADDR(cid
);
5124 bnx2_init_rxbd_rings(rxr
->rx_desc_ring
, rxr
->rx_desc_mapping
,
5125 bp
->rx_buf_use_size
, bp
->rx_max_ring
);
5127 bnx2_init_rx_context(bp
, cid
);
5129 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
5130 val
= REG_RD(bp
, BNX2_MQ_MAP_L2_5
);
5131 REG_WR(bp
, BNX2_MQ_MAP_L2_5
, val
| BNX2_MQ_MAP_L2_5_ARM
);
5134 bnx2_ctx_wr(bp
, rx_cid_addr
, BNX2_L2CTX_PG_BUF_SIZE
, 0);
5135 if (bp
->rx_pg_ring_size
) {
5136 bnx2_init_rxbd_rings(rxr
->rx_pg_desc_ring
,
5137 rxr
->rx_pg_desc_mapping
,
5138 PAGE_SIZE
, bp
->rx_max_pg_ring
);
5139 val
= (bp
->rx_buf_use_size
<< 16) | PAGE_SIZE
;
5140 bnx2_ctx_wr(bp
, rx_cid_addr
, BNX2_L2CTX_PG_BUF_SIZE
, val
);
5141 bnx2_ctx_wr(bp
, rx_cid_addr
, BNX2_L2CTX_RBDC_KEY
,
5142 BNX2_L2CTX_RBDC_JUMBO_KEY
- ring_num
);
5144 val
= (u64
) rxr
->rx_pg_desc_mapping
[0] >> 32;
5145 bnx2_ctx_wr(bp
, rx_cid_addr
, BNX2_L2CTX_NX_PG_BDHADDR_HI
, val
);
5147 val
= (u64
) rxr
->rx_pg_desc_mapping
[0] & 0xffffffff;
5148 bnx2_ctx_wr(bp
, rx_cid_addr
, BNX2_L2CTX_NX_PG_BDHADDR_LO
, val
);
5150 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
5151 REG_WR(bp
, BNX2_MQ_MAP_L2_3
, BNX2_MQ_MAP_L2_3_DEFAULT
);
5154 val
= (u64
) rxr
->rx_desc_mapping
[0] >> 32;
5155 bnx2_ctx_wr(bp
, rx_cid_addr
, BNX2_L2CTX_NX_BDHADDR_HI
, val
);
5157 val
= (u64
) rxr
->rx_desc_mapping
[0] & 0xffffffff;
5158 bnx2_ctx_wr(bp
, rx_cid_addr
, BNX2_L2CTX_NX_BDHADDR_LO
, val
);
5160 ring_prod
= prod
= rxr
->rx_pg_prod
;
5161 for (i
= 0; i
< bp
->rx_pg_ring_size
; i
++) {
5162 if (bnx2_alloc_rx_page(bp
, rxr
, ring_prod
) < 0) {
5163 netdev_warn(bp
->dev
, "init'ed rx page ring %d with %d/%d pages only\n",
5164 ring_num
, i
, bp
->rx_pg_ring_size
);
5167 prod
= NEXT_RX_BD(prod
);
5168 ring_prod
= RX_PG_RING_IDX(prod
);
5170 rxr
->rx_pg_prod
= prod
;
5172 ring_prod
= prod
= rxr
->rx_prod
;
5173 for (i
= 0; i
< bp
->rx_ring_size
; i
++) {
5174 if (bnx2_alloc_rx_skb(bp
, rxr
, ring_prod
) < 0) {
5175 netdev_warn(bp
->dev
, "init'ed rx ring %d with %d/%d skbs only\n",
5176 ring_num
, i
, bp
->rx_ring_size
);
5179 prod
= NEXT_RX_BD(prod
);
5180 ring_prod
= RX_RING_IDX(prod
);
5182 rxr
->rx_prod
= prod
;
5184 rxr
->rx_bidx_addr
= MB_GET_CID_ADDR(cid
) + BNX2_L2CTX_HOST_BDIDX
;
5185 rxr
->rx_bseq_addr
= MB_GET_CID_ADDR(cid
) + BNX2_L2CTX_HOST_BSEQ
;
5186 rxr
->rx_pg_bidx_addr
= MB_GET_CID_ADDR(cid
) + BNX2_L2CTX_HOST_PG_BDIDX
;
5188 REG_WR16(bp
, rxr
->rx_pg_bidx_addr
, rxr
->rx_pg_prod
);
5189 REG_WR16(bp
, rxr
->rx_bidx_addr
, prod
);
5191 REG_WR(bp
, rxr
->rx_bseq_addr
, rxr
->rx_prod_bseq
);
5195 bnx2_init_all_rings(struct bnx2
*bp
)
5200 bnx2_clear_ring_states(bp
);
5202 REG_WR(bp
, BNX2_TSCH_TSS_CFG
, 0);
5203 for (i
= 0; i
< bp
->num_tx_rings
; i
++)
5204 bnx2_init_tx_ring(bp
, i
);
5206 if (bp
->num_tx_rings
> 1)
5207 REG_WR(bp
, BNX2_TSCH_TSS_CFG
, ((bp
->num_tx_rings
- 1) << 24) |
5210 REG_WR(bp
, BNX2_RLUP_RSS_CONFIG
, 0);
5211 bnx2_reg_wr_ind(bp
, BNX2_RXP_SCRATCH_RSS_TBL_SZ
, 0);
5213 for (i
= 0; i
< bp
->num_rx_rings
; i
++)
5214 bnx2_init_rx_ring(bp
, i
);
5216 if (bp
->num_rx_rings
> 1) {
5218 u8
*tbl
= (u8
*) &tbl_32
;
5220 bnx2_reg_wr_ind(bp
, BNX2_RXP_SCRATCH_RSS_TBL_SZ
,
5221 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES
);
5223 for (i
= 0; i
< BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES
; i
++) {
5224 tbl
[i
% 4] = i
% (bp
->num_rx_rings
- 1);
5227 BNX2_RXP_SCRATCH_RSS_TBL
+ i
,
5228 cpu_to_be32(tbl_32
));
5231 val
= BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI
|
5232 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI
;
5234 REG_WR(bp
, BNX2_RLUP_RSS_CONFIG
, val
);
5239 static u32
bnx2_find_max_ring(u32 ring_size
, u32 max_size
)
5241 u32 max
, num_rings
= 1;
5243 while (ring_size
> MAX_RX_DESC_CNT
) {
5244 ring_size
-= MAX_RX_DESC_CNT
;
5247 /* round to next power of 2 */
5249 while ((max
& num_rings
) == 0)
5252 if (num_rings
!= max
)
5259 bnx2_set_rx_ring_size(struct bnx2
*bp
, u32 size
)
5261 u32 rx_size
, rx_space
, jumbo_size
;
5263 /* 8 for CRC and VLAN */
5264 rx_size
= bp
->dev
->mtu
+ ETH_HLEN
+ BNX2_RX_OFFSET
+ 8;
5266 rx_space
= SKB_DATA_ALIGN(rx_size
+ BNX2_RX_ALIGN
) + NET_SKB_PAD
+
5267 sizeof(struct skb_shared_info
);
5269 bp
->rx_copy_thresh
= BNX2_RX_COPY_THRESH
;
5270 bp
->rx_pg_ring_size
= 0;
5271 bp
->rx_max_pg_ring
= 0;
5272 bp
->rx_max_pg_ring_idx
= 0;
5273 if ((rx_space
> PAGE_SIZE
) && !(bp
->flags
& BNX2_FLAG_JUMBO_BROKEN
)) {
5274 int pages
= PAGE_ALIGN(bp
->dev
->mtu
- 40) >> PAGE_SHIFT
;
5276 jumbo_size
= size
* pages
;
5277 if (jumbo_size
> MAX_TOTAL_RX_PG_DESC_CNT
)
5278 jumbo_size
= MAX_TOTAL_RX_PG_DESC_CNT
;
5280 bp
->rx_pg_ring_size
= jumbo_size
;
5281 bp
->rx_max_pg_ring
= bnx2_find_max_ring(jumbo_size
,
5283 bp
->rx_max_pg_ring_idx
= (bp
->rx_max_pg_ring
* RX_DESC_CNT
) - 1;
5284 rx_size
= BNX2_RX_COPY_THRESH
+ BNX2_RX_OFFSET
;
5285 bp
->rx_copy_thresh
= 0;
5288 bp
->rx_buf_use_size
= rx_size
;
5290 bp
->rx_buf_size
= bp
->rx_buf_use_size
+ BNX2_RX_ALIGN
;
5291 bp
->rx_jumbo_thresh
= rx_size
- BNX2_RX_OFFSET
;
5292 bp
->rx_ring_size
= size
;
5293 bp
->rx_max_ring
= bnx2_find_max_ring(size
, MAX_RX_RINGS
);
5294 bp
->rx_max_ring_idx
= (bp
->rx_max_ring
* RX_DESC_CNT
) - 1;
5298 bnx2_free_tx_skbs(struct bnx2
*bp
)
5302 for (i
= 0; i
< bp
->num_tx_rings
; i
++) {
5303 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[i
];
5304 struct bnx2_tx_ring_info
*txr
= &bnapi
->tx_ring
;
5307 if (txr
->tx_buf_ring
== NULL
)
5310 for (j
= 0; j
< TX_DESC_CNT
; ) {
5311 struct sw_tx_bd
*tx_buf
= &txr
->tx_buf_ring
[j
];
5312 struct sk_buff
*skb
= tx_buf
->skb
;
5320 pci_unmap_single(bp
->pdev
,
5321 pci_unmap_addr(tx_buf
, mapping
),
5327 last
= tx_buf
->nr_frags
;
5329 for (k
= 0; k
< last
; k
++, j
++) {
5330 tx_buf
= &txr
->tx_buf_ring
[TX_RING_IDX(j
)];
5331 pci_unmap_page(bp
->pdev
,
5332 pci_unmap_addr(tx_buf
, mapping
),
5333 skb_shinfo(skb
)->frags
[k
].size
,
5342 bnx2_free_rx_skbs(struct bnx2
*bp
)
5346 for (i
= 0; i
< bp
->num_rx_rings
; i
++) {
5347 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[i
];
5348 struct bnx2_rx_ring_info
*rxr
= &bnapi
->rx_ring
;
5351 if (rxr
->rx_buf_ring
== NULL
)
5354 for (j
= 0; j
< bp
->rx_max_ring_idx
; j
++) {
5355 struct sw_bd
*rx_buf
= &rxr
->rx_buf_ring
[j
];
5356 struct sk_buff
*skb
= rx_buf
->skb
;
5361 pci_unmap_single(bp
->pdev
,
5362 pci_unmap_addr(rx_buf
, mapping
),
5363 bp
->rx_buf_use_size
,
5364 PCI_DMA_FROMDEVICE
);
5370 for (j
= 0; j
< bp
->rx_max_pg_ring_idx
; j
++)
5371 bnx2_free_rx_page(bp
, rxr
, j
);
5376 bnx2_free_skbs(struct bnx2
*bp
)
5378 bnx2_free_tx_skbs(bp
);
5379 bnx2_free_rx_skbs(bp
);
5383 bnx2_reset_nic(struct bnx2
*bp
, u32 reset_code
)
5387 rc
= bnx2_reset_chip(bp
, reset_code
);
5392 if ((rc
= bnx2_init_chip(bp
)) != 0)
5395 bnx2_init_all_rings(bp
);
5400 bnx2_init_nic(struct bnx2
*bp
, int reset_phy
)
5404 if ((rc
= bnx2_reset_nic(bp
, BNX2_DRV_MSG_CODE_RESET
)) != 0)
5407 spin_lock_bh(&bp
->phy_lock
);
5408 bnx2_init_phy(bp
, reset_phy
);
5410 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
)
5411 bnx2_remote_phy_event(bp
);
5412 spin_unlock_bh(&bp
->phy_lock
);
5417 bnx2_shutdown_chip(struct bnx2
*bp
)
5421 if (bp
->flags
& BNX2_FLAG_NO_WOL
)
5422 reset_code
= BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN
;
5424 reset_code
= BNX2_DRV_MSG_CODE_SUSPEND_WOL
;
5426 reset_code
= BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL
;
5428 return bnx2_reset_chip(bp
, reset_code
);
5432 bnx2_test_registers(struct bnx2
*bp
)
5436 static const struct {
5439 #define BNX2_FL_NOT_5709 1
5443 { 0x006c, 0, 0x00000000, 0x0000003f },
5444 { 0x0090, 0, 0xffffffff, 0x00000000 },
5445 { 0x0094, 0, 0x00000000, 0x00000000 },
5447 { 0x0404, BNX2_FL_NOT_5709
, 0x00003f00, 0x00000000 },
5448 { 0x0418, BNX2_FL_NOT_5709
, 0x00000000, 0xffffffff },
5449 { 0x041c, BNX2_FL_NOT_5709
, 0x00000000, 0xffffffff },
5450 { 0x0420, BNX2_FL_NOT_5709
, 0x00000000, 0x80ffffff },
5451 { 0x0424, BNX2_FL_NOT_5709
, 0x00000000, 0x00000000 },
5452 { 0x0428, BNX2_FL_NOT_5709
, 0x00000000, 0x00000001 },
5453 { 0x0450, BNX2_FL_NOT_5709
, 0x00000000, 0x0000ffff },
5454 { 0x0454, BNX2_FL_NOT_5709
, 0x00000000, 0xffffffff },
5455 { 0x0458, BNX2_FL_NOT_5709
, 0x00000000, 0xffffffff },
5457 { 0x0808, BNX2_FL_NOT_5709
, 0x00000000, 0xffffffff },
5458 { 0x0854, BNX2_FL_NOT_5709
, 0x00000000, 0xffffffff },
5459 { 0x0868, BNX2_FL_NOT_5709
, 0x00000000, 0x77777777 },
5460 { 0x086c, BNX2_FL_NOT_5709
, 0x00000000, 0x77777777 },
5461 { 0x0870, BNX2_FL_NOT_5709
, 0x00000000, 0x77777777 },
5462 { 0x0874, BNX2_FL_NOT_5709
, 0x00000000, 0x77777777 },
5464 { 0x0c00, BNX2_FL_NOT_5709
, 0x00000000, 0x00000001 },
5465 { 0x0c04, BNX2_FL_NOT_5709
, 0x00000000, 0x03ff0001 },
5466 { 0x0c08, BNX2_FL_NOT_5709
, 0x0f0ff073, 0x00000000 },
5468 { 0x1000, 0, 0x00000000, 0x00000001 },
5469 { 0x1004, BNX2_FL_NOT_5709
, 0x00000000, 0x000f0001 },
5471 { 0x1408, 0, 0x01c00800, 0x00000000 },
5472 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5473 { 0x14a8, 0, 0x00000000, 0x000001ff },
5474 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5475 { 0x14b0, 0, 0x00000002, 0x00000001 },
5476 { 0x14b8, 0, 0x00000000, 0x00000000 },
5477 { 0x14c0, 0, 0x00000000, 0x00000009 },
5478 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5479 { 0x14cc, 0, 0x00000000, 0x00000001 },
5480 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5482 { 0x1800, 0, 0x00000000, 0x00000001 },
5483 { 0x1804, 0, 0x00000000, 0x00000003 },
5485 { 0x2800, 0, 0x00000000, 0x00000001 },
5486 { 0x2804, 0, 0x00000000, 0x00003f01 },
5487 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5488 { 0x2810, 0, 0xffff0000, 0x00000000 },
5489 { 0x2814, 0, 0xffff0000, 0x00000000 },
5490 { 0x2818, 0, 0xffff0000, 0x00000000 },
5491 { 0x281c, 0, 0xffff0000, 0x00000000 },
5492 { 0x2834, 0, 0xffffffff, 0x00000000 },
5493 { 0x2840, 0, 0x00000000, 0xffffffff },
5494 { 0x2844, 0, 0x00000000, 0xffffffff },
5495 { 0x2848, 0, 0xffffffff, 0x00000000 },
5496 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5498 { 0x2c00, 0, 0x00000000, 0x00000011 },
5499 { 0x2c04, 0, 0x00000000, 0x00030007 },
5501 { 0x3c00, 0, 0x00000000, 0x00000001 },
5502 { 0x3c04, 0, 0x00000000, 0x00070000 },
5503 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5504 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5505 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5506 { 0x3c14, 0, 0x00000000, 0xffffffff },
5507 { 0x3c18, 0, 0x00000000, 0xffffffff },
5508 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5509 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5511 { 0x5004, 0, 0x00000000, 0x0000007f },
5512 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5514 { 0x5c00, 0, 0x00000000, 0x00000001 },
5515 { 0x5c04, 0, 0x00000000, 0x0003000f },
5516 { 0x5c08, 0, 0x00000003, 0x00000000 },
5517 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5518 { 0x5c10, 0, 0x00000000, 0xffffffff },
5519 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5520 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5521 { 0x5c88, 0, 0x00000000, 0x00077373 },
5522 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5524 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5525 { 0x680c, 0, 0xffffffff, 0x00000000 },
5526 { 0x6810, 0, 0xffffffff, 0x00000000 },
5527 { 0x6814, 0, 0xffffffff, 0x00000000 },
5528 { 0x6818, 0, 0xffffffff, 0x00000000 },
5529 { 0x681c, 0, 0xffffffff, 0x00000000 },
5530 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5531 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5532 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5533 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5534 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5535 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5536 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5537 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5538 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5539 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5540 { 0x684c, 0, 0xffffffff, 0x00000000 },
5541 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5542 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5543 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5544 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5545 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5546 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5548 { 0xffff, 0, 0x00000000, 0x00000000 },
5553 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
5556 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
5557 u32 offset
, rw_mask
, ro_mask
, save_val
, val
;
5558 u16 flags
= reg_tbl
[i
].flags
;
5560 if (is_5709
&& (flags
& BNX2_FL_NOT_5709
))
5563 offset
= (u32
) reg_tbl
[i
].offset
;
5564 rw_mask
= reg_tbl
[i
].rw_mask
;
5565 ro_mask
= reg_tbl
[i
].ro_mask
;
5567 save_val
= readl(bp
->regview
+ offset
);
5569 writel(0, bp
->regview
+ offset
);
5571 val
= readl(bp
->regview
+ offset
);
5572 if ((val
& rw_mask
) != 0) {
5576 if ((val
& ro_mask
) != (save_val
& ro_mask
)) {
5580 writel(0xffffffff, bp
->regview
+ offset
);
5582 val
= readl(bp
->regview
+ offset
);
5583 if ((val
& rw_mask
) != rw_mask
) {
5587 if ((val
& ro_mask
) != (save_val
& ro_mask
)) {
5591 writel(save_val
, bp
->regview
+ offset
);
5595 writel(save_val
, bp
->regview
+ offset
);
5603 bnx2_do_mem_test(struct bnx2
*bp
, u32 start
, u32 size
)
5605 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0x55555555,
5606 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5609 for (i
= 0; i
< sizeof(test_pattern
) / 4; i
++) {
5612 for (offset
= 0; offset
< size
; offset
+= 4) {
5614 bnx2_reg_wr_ind(bp
, start
+ offset
, test_pattern
[i
]);
5616 if (bnx2_reg_rd_ind(bp
, start
+ offset
) !=
5626 bnx2_test_memory(struct bnx2
*bp
)
5630 static struct mem_entry
{
5633 } mem_tbl_5706
[] = {
5634 { 0x60000, 0x4000 },
5635 { 0xa0000, 0x3000 },
5636 { 0xe0000, 0x4000 },
5637 { 0x120000, 0x4000 },
5638 { 0x1a0000, 0x4000 },
5639 { 0x160000, 0x4000 },
5643 { 0x60000, 0x4000 },
5644 { 0xa0000, 0x3000 },
5645 { 0xe0000, 0x4000 },
5646 { 0x120000, 0x4000 },
5647 { 0x1a0000, 0x4000 },
5650 struct mem_entry
*mem_tbl
;
5652 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
5653 mem_tbl
= mem_tbl_5709
;
5655 mem_tbl
= mem_tbl_5706
;
5657 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
5658 if ((ret
= bnx2_do_mem_test(bp
, mem_tbl
[i
].offset
,
5659 mem_tbl
[i
].len
)) != 0) {
5667 #define BNX2_MAC_LOOPBACK 0
5668 #define BNX2_PHY_LOOPBACK 1
5671 bnx2_run_loopback(struct bnx2
*bp
, int loopback_mode
)
5673 unsigned int pkt_size
, num_pkts
, i
;
5674 struct sk_buff
*skb
, *rx_skb
;
5675 unsigned char *packet
;
5676 u16 rx_start_idx
, rx_idx
;
5679 struct sw_bd
*rx_buf
;
5680 struct l2_fhdr
*rx_hdr
;
5682 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[0], *tx_napi
;
5683 struct bnx2_tx_ring_info
*txr
= &bnapi
->tx_ring
;
5684 struct bnx2_rx_ring_info
*rxr
= &bnapi
->rx_ring
;
5688 txr
= &tx_napi
->tx_ring
;
5689 rxr
= &bnapi
->rx_ring
;
5690 if (loopback_mode
== BNX2_MAC_LOOPBACK
) {
5691 bp
->loopback
= MAC_LOOPBACK
;
5692 bnx2_set_mac_loopback(bp
);
5694 else if (loopback_mode
== BNX2_PHY_LOOPBACK
) {
5695 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
)
5698 bp
->loopback
= PHY_LOOPBACK
;
5699 bnx2_set_phy_loopback(bp
);
5704 pkt_size
= min(bp
->dev
->mtu
+ ETH_HLEN
, bp
->rx_jumbo_thresh
- 4);
5705 skb
= netdev_alloc_skb(bp
->dev
, pkt_size
);
5708 packet
= skb_put(skb
, pkt_size
);
5709 memcpy(packet
, bp
->dev
->dev_addr
, 6);
5710 memset(packet
+ 6, 0x0, 8);
5711 for (i
= 14; i
< pkt_size
; i
++)
5712 packet
[i
] = (unsigned char) (i
& 0xff);
5714 map
= pci_map_single(bp
->pdev
, skb
->data
, pkt_size
,
5716 if (pci_dma_mapping_error(bp
->pdev
, map
)) {
5721 REG_WR(bp
, BNX2_HC_COMMAND
,
5722 bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
5724 REG_RD(bp
, BNX2_HC_COMMAND
);
5727 rx_start_idx
= bnx2_get_hw_rx_cons(bnapi
);
5731 txbd
= &txr
->tx_desc_ring
[TX_RING_IDX(txr
->tx_prod
)];
5733 txbd
->tx_bd_haddr_hi
= (u64
) map
>> 32;
5734 txbd
->tx_bd_haddr_lo
= (u64
) map
& 0xffffffff;
5735 txbd
->tx_bd_mss_nbytes
= pkt_size
;
5736 txbd
->tx_bd_vlan_tag_flags
= TX_BD_FLAGS_START
| TX_BD_FLAGS_END
;
5739 txr
->tx_prod
= NEXT_TX_BD(txr
->tx_prod
);
5740 txr
->tx_prod_bseq
+= pkt_size
;
5742 REG_WR16(bp
, txr
->tx_bidx_addr
, txr
->tx_prod
);
5743 REG_WR(bp
, txr
->tx_bseq_addr
, txr
->tx_prod_bseq
);
5747 REG_WR(bp
, BNX2_HC_COMMAND
,
5748 bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
5750 REG_RD(bp
, BNX2_HC_COMMAND
);
5754 pci_unmap_single(bp
->pdev
, map
, pkt_size
, PCI_DMA_TODEVICE
);
5757 if (bnx2_get_hw_tx_cons(tx_napi
) != txr
->tx_prod
)
5758 goto loopback_test_done
;
5760 rx_idx
= bnx2_get_hw_rx_cons(bnapi
);
5761 if (rx_idx
!= rx_start_idx
+ num_pkts
) {
5762 goto loopback_test_done
;
5765 rx_buf
= &rxr
->rx_buf_ring
[rx_start_idx
];
5766 rx_skb
= rx_buf
->skb
;
5768 rx_hdr
= (struct l2_fhdr
*) rx_skb
->data
;
5769 skb_reserve(rx_skb
, BNX2_RX_OFFSET
);
5771 pci_dma_sync_single_for_cpu(bp
->pdev
,
5772 pci_unmap_addr(rx_buf
, mapping
),
5773 bp
->rx_buf_size
, PCI_DMA_FROMDEVICE
);
5775 if (rx_hdr
->l2_fhdr_status
&
5776 (L2_FHDR_ERRORS_BAD_CRC
|
5777 L2_FHDR_ERRORS_PHY_DECODE
|
5778 L2_FHDR_ERRORS_ALIGNMENT
|
5779 L2_FHDR_ERRORS_TOO_SHORT
|
5780 L2_FHDR_ERRORS_GIANT_FRAME
)) {
5782 goto loopback_test_done
;
5785 if ((rx_hdr
->l2_fhdr_pkt_len
- 4) != pkt_size
) {
5786 goto loopback_test_done
;
5789 for (i
= 14; i
< pkt_size
; i
++) {
5790 if (*(rx_skb
->data
+ i
) != (unsigned char) (i
& 0xff)) {
5791 goto loopback_test_done
;
5802 #define BNX2_MAC_LOOPBACK_FAILED 1
5803 #define BNX2_PHY_LOOPBACK_FAILED 2
5804 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5805 BNX2_PHY_LOOPBACK_FAILED)
5808 bnx2_test_loopback(struct bnx2
*bp
)
5812 if (!netif_running(bp
->dev
))
5813 return BNX2_LOOPBACK_FAILED
;
5815 bnx2_reset_nic(bp
, BNX2_DRV_MSG_CODE_RESET
);
5816 spin_lock_bh(&bp
->phy_lock
);
5817 bnx2_init_phy(bp
, 1);
5818 spin_unlock_bh(&bp
->phy_lock
);
5819 if (bnx2_run_loopback(bp
, BNX2_MAC_LOOPBACK
))
5820 rc
|= BNX2_MAC_LOOPBACK_FAILED
;
5821 if (bnx2_run_loopback(bp
, BNX2_PHY_LOOPBACK
))
5822 rc
|= BNX2_PHY_LOOPBACK_FAILED
;
5826 #define NVRAM_SIZE 0x200
5827 #define CRC32_RESIDUAL 0xdebb20e3
5830 bnx2_test_nvram(struct bnx2
*bp
)
5832 __be32 buf
[NVRAM_SIZE
/ 4];
5833 u8
*data
= (u8
*) buf
;
5837 if ((rc
= bnx2_nvram_read(bp
, 0, data
, 4)) != 0)
5838 goto test_nvram_done
;
5840 magic
= be32_to_cpu(buf
[0]);
5841 if (magic
!= 0x669955aa) {
5843 goto test_nvram_done
;
5846 if ((rc
= bnx2_nvram_read(bp
, 0x100, data
, NVRAM_SIZE
)) != 0)
5847 goto test_nvram_done
;
5849 csum
= ether_crc_le(0x100, data
);
5850 if (csum
!= CRC32_RESIDUAL
) {
5852 goto test_nvram_done
;
5855 csum
= ether_crc_le(0x100, data
+ 0x100);
5856 if (csum
!= CRC32_RESIDUAL
) {
5865 bnx2_test_link(struct bnx2
*bp
)
5869 if (!netif_running(bp
->dev
))
5872 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
) {
5877 spin_lock_bh(&bp
->phy_lock
);
5878 bnx2_enable_bmsr1(bp
);
5879 bnx2_read_phy(bp
, bp
->mii_bmsr1
, &bmsr
);
5880 bnx2_read_phy(bp
, bp
->mii_bmsr1
, &bmsr
);
5881 bnx2_disable_bmsr1(bp
);
5882 spin_unlock_bh(&bp
->phy_lock
);
5884 if (bmsr
& BMSR_LSTATUS
) {
5891 bnx2_test_intr(struct bnx2
*bp
)
5896 if (!netif_running(bp
->dev
))
5899 status_idx
= REG_RD(bp
, BNX2_PCICFG_INT_ACK_CMD
) & 0xffff;
5901 /* This register is not touched during run-time. */
5902 REG_WR(bp
, BNX2_HC_COMMAND
, bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW
);
5903 REG_RD(bp
, BNX2_HC_COMMAND
);
5905 for (i
= 0; i
< 10; i
++) {
5906 if ((REG_RD(bp
, BNX2_PCICFG_INT_ACK_CMD
) & 0xffff) !=
5912 msleep_interruptible(10);
5920 /* Determining link for parallel detection. */
5922 bnx2_5706_serdes_has_link(struct bnx2
*bp
)
5924 u32 mode_ctl
, an_dbg
, exp
;
5926 if (bp
->phy_flags
& BNX2_PHY_FLAG_NO_PARALLEL
)
5929 bnx2_write_phy(bp
, MII_BNX2_MISC_SHADOW
, MISC_SHDW_MODE_CTL
);
5930 bnx2_read_phy(bp
, MII_BNX2_MISC_SHADOW
, &mode_ctl
);
5932 if (!(mode_ctl
& MISC_SHDW_MODE_CTL_SIG_DET
))
5935 bnx2_write_phy(bp
, MII_BNX2_MISC_SHADOW
, MISC_SHDW_AN_DBG
);
5936 bnx2_read_phy(bp
, MII_BNX2_MISC_SHADOW
, &an_dbg
);
5937 bnx2_read_phy(bp
, MII_BNX2_MISC_SHADOW
, &an_dbg
);
5939 if (an_dbg
& (MISC_SHDW_AN_DBG_NOSYNC
| MISC_SHDW_AN_DBG_RUDI_INVALID
))
5942 bnx2_write_phy(bp
, MII_BNX2_DSP_ADDRESS
, MII_EXPAND_REG1
);
5943 bnx2_read_phy(bp
, MII_BNX2_DSP_RW_PORT
, &exp
);
5944 bnx2_read_phy(bp
, MII_BNX2_DSP_RW_PORT
, &exp
);
5946 if (exp
& MII_EXPAND_REG1_RUDI_C
) /* receiving CONFIG */
5953 bnx2_5706_serdes_timer(struct bnx2
*bp
)
5957 spin_lock(&bp
->phy_lock
);
5958 if (bp
->serdes_an_pending
) {
5959 bp
->serdes_an_pending
--;
5961 } else if ((bp
->link_up
== 0) && (bp
->autoneg
& AUTONEG_SPEED
)) {
5964 bp
->current_interval
= BNX2_TIMER_INTERVAL
;
5966 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
5968 if (bmcr
& BMCR_ANENABLE
) {
5969 if (bnx2_5706_serdes_has_link(bp
)) {
5970 bmcr
&= ~BMCR_ANENABLE
;
5971 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
5972 bnx2_write_phy(bp
, bp
->mii_bmcr
, bmcr
);
5973 bp
->phy_flags
|= BNX2_PHY_FLAG_PARALLEL_DETECT
;
5977 else if ((bp
->link_up
) && (bp
->autoneg
& AUTONEG_SPEED
) &&
5978 (bp
->phy_flags
& BNX2_PHY_FLAG_PARALLEL_DETECT
)) {
5981 bnx2_write_phy(bp
, 0x17, 0x0f01);
5982 bnx2_read_phy(bp
, 0x15, &phy2
);
5986 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
5987 bmcr
|= BMCR_ANENABLE
;
5988 bnx2_write_phy(bp
, bp
->mii_bmcr
, bmcr
);
5990 bp
->phy_flags
&= ~BNX2_PHY_FLAG_PARALLEL_DETECT
;
5993 bp
->current_interval
= BNX2_TIMER_INTERVAL
;
5998 bnx2_write_phy(bp
, MII_BNX2_MISC_SHADOW
, MISC_SHDW_AN_DBG
);
5999 bnx2_read_phy(bp
, MII_BNX2_MISC_SHADOW
, &val
);
6000 bnx2_read_phy(bp
, MII_BNX2_MISC_SHADOW
, &val
);
6002 if (bp
->link_up
&& (val
& MISC_SHDW_AN_DBG_NOSYNC
)) {
6003 if (!(bp
->phy_flags
& BNX2_PHY_FLAG_FORCED_DOWN
)) {
6004 bnx2_5706s_force_link_dn(bp
, 1);
6005 bp
->phy_flags
|= BNX2_PHY_FLAG_FORCED_DOWN
;
6008 } else if (!bp
->link_up
&& !(val
& MISC_SHDW_AN_DBG_NOSYNC
))
6011 spin_unlock(&bp
->phy_lock
);
6015 bnx2_5708_serdes_timer(struct bnx2
*bp
)
6017 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
)
6020 if ((bp
->phy_flags
& BNX2_PHY_FLAG_2_5G_CAPABLE
) == 0) {
6021 bp
->serdes_an_pending
= 0;
6025 spin_lock(&bp
->phy_lock
);
6026 if (bp
->serdes_an_pending
)
6027 bp
->serdes_an_pending
--;
6028 else if ((bp
->link_up
== 0) && (bp
->autoneg
& AUTONEG_SPEED
)) {
6031 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
6032 if (bmcr
& BMCR_ANENABLE
) {
6033 bnx2_enable_forced_2g5(bp
);
6034 bp
->current_interval
= BNX2_SERDES_FORCED_TIMEOUT
;
6036 bnx2_disable_forced_2g5(bp
);
6037 bp
->serdes_an_pending
= 2;
6038 bp
->current_interval
= BNX2_TIMER_INTERVAL
;
6042 bp
->current_interval
= BNX2_TIMER_INTERVAL
;
6044 spin_unlock(&bp
->phy_lock
);
6048 bnx2_timer(unsigned long data
)
6050 struct bnx2
*bp
= (struct bnx2
*) data
;
6052 if (!netif_running(bp
->dev
))
6055 if (atomic_read(&bp
->intr_sem
) != 0)
6056 goto bnx2_restart_timer
;
6058 if ((bp
->flags
& (BNX2_FLAG_USING_MSI
| BNX2_FLAG_ONE_SHOT_MSI
)) ==
6059 BNX2_FLAG_USING_MSI
)
6060 bnx2_chk_missed_msi(bp
);
6062 bnx2_send_heart_beat(bp
);
6064 bp
->stats_blk
->stat_FwRxDrop
=
6065 bnx2_reg_rd_ind(bp
, BNX2_FW_RX_DROP_COUNT
);
6067 /* workaround occasional corrupted counters */
6068 if ((bp
->flags
& BNX2_FLAG_BROKEN_STATS
) && bp
->stats_ticks
)
6069 REG_WR(bp
, BNX2_HC_COMMAND
, bp
->hc_cmd
|
6070 BNX2_HC_COMMAND_STATS_NOW
);
6072 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
6073 if (CHIP_NUM(bp
) == CHIP_NUM_5706
)
6074 bnx2_5706_serdes_timer(bp
);
6076 bnx2_5708_serdes_timer(bp
);
6080 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
6084 bnx2_request_irq(struct bnx2
*bp
)
6086 unsigned long flags
;
6087 struct bnx2_irq
*irq
;
6090 if (bp
->flags
& BNX2_FLAG_USING_MSI_OR_MSIX
)
6093 flags
= IRQF_SHARED
;
6095 for (i
= 0; i
< bp
->irq_nvecs
; i
++) {
6096 irq
= &bp
->irq_tbl
[i
];
6097 rc
= request_irq(irq
->vector
, irq
->handler
, flags
, irq
->name
,
6107 bnx2_free_irq(struct bnx2
*bp
)
6109 struct bnx2_irq
*irq
;
6112 for (i
= 0; i
< bp
->irq_nvecs
; i
++) {
6113 irq
= &bp
->irq_tbl
[i
];
6115 free_irq(irq
->vector
, &bp
->bnx2_napi
[i
]);
6118 if (bp
->flags
& BNX2_FLAG_USING_MSI
)
6119 pci_disable_msi(bp
->pdev
);
6120 else if (bp
->flags
& BNX2_FLAG_USING_MSIX
)
6121 pci_disable_msix(bp
->pdev
);
6123 bp
->flags
&= ~(BNX2_FLAG_USING_MSI_OR_MSIX
| BNX2_FLAG_ONE_SHOT_MSI
);
6127 bnx2_enable_msix(struct bnx2
*bp
, int msix_vecs
)
6130 struct msix_entry msix_ent
[BNX2_MAX_MSIX_VEC
];
6131 struct net_device
*dev
= bp
->dev
;
6132 const int len
= sizeof(bp
->irq_tbl
[0].name
);
6134 bnx2_setup_msix_tbl(bp
);
6135 REG_WR(bp
, BNX2_PCI_MSIX_CONTROL
, BNX2_MAX_MSIX_HW_VEC
- 1);
6136 REG_WR(bp
, BNX2_PCI_MSIX_TBL_OFF_BIR
, BNX2_PCI_GRC_WINDOW2_BASE
);
6137 REG_WR(bp
, BNX2_PCI_MSIX_PBA_OFF_BIT
, BNX2_PCI_GRC_WINDOW3_BASE
);
6139 /* Need to flush the previous three writes to ensure MSI-X
6140 * is setup properly */
6141 REG_RD(bp
, BNX2_PCI_MSIX_CONTROL
);
6143 for (i
= 0; i
< BNX2_MAX_MSIX_VEC
; i
++) {
6144 msix_ent
[i
].entry
= i
;
6145 msix_ent
[i
].vector
= 0;
6148 rc
= pci_enable_msix(bp
->pdev
, msix_ent
, BNX2_MAX_MSIX_VEC
);
6152 bp
->irq_nvecs
= msix_vecs
;
6153 bp
->flags
|= BNX2_FLAG_USING_MSIX
| BNX2_FLAG_ONE_SHOT_MSI
;
6154 for (i
= 0; i
< BNX2_MAX_MSIX_VEC
; i
++) {
6155 bp
->irq_tbl
[i
].vector
= msix_ent
[i
].vector
;
6156 snprintf(bp
->irq_tbl
[i
].name
, len
, "%s-%d", dev
->name
, i
);
6157 bp
->irq_tbl
[i
].handler
= bnx2_msi_1shot
;
6162 bnx2_setup_int_mode(struct bnx2
*bp
, int dis_msi
)
6164 int cpus
= num_online_cpus();
6165 int msix_vecs
= min(cpus
+ 1, RX_MAX_RINGS
);
6167 bp
->irq_tbl
[0].handler
= bnx2_interrupt
;
6168 strcpy(bp
->irq_tbl
[0].name
, bp
->dev
->name
);
6170 bp
->irq_tbl
[0].vector
= bp
->pdev
->irq
;
6172 if ((bp
->flags
& BNX2_FLAG_MSIX_CAP
) && !dis_msi
&& cpus
> 1)
6173 bnx2_enable_msix(bp
, msix_vecs
);
6175 if ((bp
->flags
& BNX2_FLAG_MSI_CAP
) && !dis_msi
&&
6176 !(bp
->flags
& BNX2_FLAG_USING_MSIX
)) {
6177 if (pci_enable_msi(bp
->pdev
) == 0) {
6178 bp
->flags
|= BNX2_FLAG_USING_MSI
;
6179 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
6180 bp
->flags
|= BNX2_FLAG_ONE_SHOT_MSI
;
6181 bp
->irq_tbl
[0].handler
= bnx2_msi_1shot
;
6183 bp
->irq_tbl
[0].handler
= bnx2_msi
;
6185 bp
->irq_tbl
[0].vector
= bp
->pdev
->irq
;
6189 bp
->num_tx_rings
= rounddown_pow_of_two(bp
->irq_nvecs
);
6190 bp
->dev
->real_num_tx_queues
= bp
->num_tx_rings
;
6192 bp
->num_rx_rings
= bp
->irq_nvecs
;
6195 /* Called with rtnl_lock */
6197 bnx2_open(struct net_device
*dev
)
6199 struct bnx2
*bp
= netdev_priv(dev
);
6202 netif_carrier_off(dev
);
6204 bnx2_set_power_state(bp
, PCI_D0
);
6205 bnx2_disable_int(bp
);
6207 bnx2_setup_int_mode(bp
, disable_msi
);
6209 bnx2_napi_enable(bp
);
6210 rc
= bnx2_alloc_mem(bp
);
6214 rc
= bnx2_request_irq(bp
);
6218 rc
= bnx2_init_nic(bp
, 1);
6222 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
6224 atomic_set(&bp
->intr_sem
, 0);
6226 memset(bp
->temp_stats_blk
, 0, sizeof(struct statistics_block
));
6228 bnx2_enable_int(bp
);
6230 if (bp
->flags
& BNX2_FLAG_USING_MSI
) {
6231 /* Test MSI to make sure it is working
6232 * If MSI test fails, go back to INTx mode
6234 if (bnx2_test_intr(bp
) != 0) {
6235 netdev_warn(bp
->dev
, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6237 bnx2_disable_int(bp
);
6240 bnx2_setup_int_mode(bp
, 1);
6242 rc
= bnx2_init_nic(bp
, 0);
6245 rc
= bnx2_request_irq(bp
);
6248 del_timer_sync(&bp
->timer
);
6251 bnx2_enable_int(bp
);
6254 if (bp
->flags
& BNX2_FLAG_USING_MSI
)
6255 netdev_info(dev
, "using MSI\n");
6256 else if (bp
->flags
& BNX2_FLAG_USING_MSIX
)
6257 netdev_info(dev
, "using MSIX\n");
6259 netif_tx_start_all_queues(dev
);
6264 bnx2_napi_disable(bp
);
6272 bnx2_reset_task(struct work_struct
*work
)
6274 struct bnx2
*bp
= container_of(work
, struct bnx2
, reset_task
);
6277 if (!netif_running(bp
->dev
)) {
6282 bnx2_netif_stop(bp
, true);
6284 bnx2_init_nic(bp
, 1);
6286 atomic_set(&bp
->intr_sem
, 1);
6287 bnx2_netif_start(bp
, true);
6292 bnx2_dump_state(struct bnx2
*bp
)
6294 struct net_device
*dev
= bp
->dev
;
6296 netdev_err(dev
, "DEBUG: intr_sem[%x]\n", atomic_read(&bp
->intr_sem
));
6297 netdev_err(dev
, "DEBUG: EMAC_TX_STATUS[%08x] RPM_MGMT_PKT_CTRL[%08x]\n",
6298 REG_RD(bp
, BNX2_EMAC_TX_STATUS
),
6299 REG_RD(bp
, BNX2_RPM_MGMT_PKT_CTRL
));
6300 netdev_err(dev
, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
6301 bnx2_reg_rd_ind(bp
, BNX2_MCP_STATE_P0
),
6302 bnx2_reg_rd_ind(bp
, BNX2_MCP_STATE_P1
));
6303 netdev_err(dev
, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6304 REG_RD(bp
, BNX2_HC_STATS_INTERRUPT_STATUS
));
6305 if (bp
->flags
& BNX2_FLAG_USING_MSIX
)
6306 netdev_err(dev
, "DEBUG: PBA[%08x]\n",
6307 REG_RD(bp
, BNX2_PCI_GRC_WINDOW3_BASE
));
6311 bnx2_tx_timeout(struct net_device
*dev
)
6313 struct bnx2
*bp
= netdev_priv(dev
);
6315 bnx2_dump_state(bp
);
6317 /* This allows the netif to be shutdown gracefully before resetting */
6318 schedule_work(&bp
->reset_task
);
6322 /* Called with rtnl_lock */
6324 bnx2_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*vlgrp
)
6326 struct bnx2
*bp
= netdev_priv(dev
);
6328 if (netif_running(dev
))
6329 bnx2_netif_stop(bp
, false);
6333 if (!netif_running(dev
))
6336 bnx2_set_rx_mode(dev
);
6337 if (bp
->flags
& BNX2_FLAG_CAN_KEEP_VLAN
)
6338 bnx2_fw_sync(bp
, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE
, 0, 1);
6340 bnx2_netif_start(bp
, false);
6344 /* Called with netif_tx_lock.
6345 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6346 * netif_wake_queue().
6349 bnx2_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
6351 struct bnx2
*bp
= netdev_priv(dev
);
6354 struct sw_tx_bd
*tx_buf
;
6355 u32 len
, vlan_tag_flags
, last_frag
, mss
;
6356 u16 prod
, ring_prod
;
6358 struct bnx2_napi
*bnapi
;
6359 struct bnx2_tx_ring_info
*txr
;
6360 struct netdev_queue
*txq
;
6362 /* Determine which tx ring we will be placed on */
6363 i
= skb_get_queue_mapping(skb
);
6364 bnapi
= &bp
->bnx2_napi
[i
];
6365 txr
= &bnapi
->tx_ring
;
6366 txq
= netdev_get_tx_queue(dev
, i
);
6368 if (unlikely(bnx2_tx_avail(bp
, txr
) <
6369 (skb_shinfo(skb
)->nr_frags
+ 1))) {
6370 netif_tx_stop_queue(txq
);
6371 netdev_err(dev
, "BUG! Tx ring full when queue awake!\n");
6373 return NETDEV_TX_BUSY
;
6375 len
= skb_headlen(skb
);
6376 prod
= txr
->tx_prod
;
6377 ring_prod
= TX_RING_IDX(prod
);
6380 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
6381 vlan_tag_flags
|= TX_BD_FLAGS_TCP_UDP_CKSUM
;
6385 if (bp
->vlgrp
&& vlan_tx_tag_present(skb
)) {
6387 (TX_BD_FLAGS_VLAN_TAG
| (vlan_tx_tag_get(skb
) << 16));
6390 if ((mss
= skb_shinfo(skb
)->gso_size
)) {
6394 vlan_tag_flags
|= TX_BD_FLAGS_SW_LSO
;
6396 tcp_opt_len
= tcp_optlen(skb
);
6398 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
) {
6399 u32 tcp_off
= skb_transport_offset(skb
) -
6400 sizeof(struct ipv6hdr
) - ETH_HLEN
;
6402 vlan_tag_flags
|= ((tcp_opt_len
>> 2) << 8) |
6403 TX_BD_FLAGS_SW_FLAGS
;
6404 if (likely(tcp_off
== 0))
6405 vlan_tag_flags
&= ~TX_BD_FLAGS_TCP6_OFF0_MSK
;
6408 vlan_tag_flags
|= ((tcp_off
& 0x3) <<
6409 TX_BD_FLAGS_TCP6_OFF0_SHL
) |
6410 ((tcp_off
& 0x10) <<
6411 TX_BD_FLAGS_TCP6_OFF4_SHL
);
6412 mss
|= (tcp_off
& 0xc) << TX_BD_TCP6_OFF2_SHL
;
6416 if (tcp_opt_len
|| (iph
->ihl
> 5)) {
6417 vlan_tag_flags
|= ((iph
->ihl
- 5) +
6418 (tcp_opt_len
>> 2)) << 8;
6424 mapping
= pci_map_single(bp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
6425 if (pci_dma_mapping_error(bp
->pdev
, mapping
)) {
6427 return NETDEV_TX_OK
;
6430 tx_buf
= &txr
->tx_buf_ring
[ring_prod
];
6432 pci_unmap_addr_set(tx_buf
, mapping
, mapping
);
6434 txbd
= &txr
->tx_desc_ring
[ring_prod
];
6436 txbd
->tx_bd_haddr_hi
= (u64
) mapping
>> 32;
6437 txbd
->tx_bd_haddr_lo
= (u64
) mapping
& 0xffffffff;
6438 txbd
->tx_bd_mss_nbytes
= len
| (mss
<< 16);
6439 txbd
->tx_bd_vlan_tag_flags
= vlan_tag_flags
| TX_BD_FLAGS_START
;
6441 last_frag
= skb_shinfo(skb
)->nr_frags
;
6442 tx_buf
->nr_frags
= last_frag
;
6443 tx_buf
->is_gso
= skb_is_gso(skb
);
6445 for (i
= 0; i
< last_frag
; i
++) {
6446 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
6448 prod
= NEXT_TX_BD(prod
);
6449 ring_prod
= TX_RING_IDX(prod
);
6450 txbd
= &txr
->tx_desc_ring
[ring_prod
];
6453 mapping
= pci_map_page(bp
->pdev
, frag
->page
, frag
->page_offset
,
6454 len
, PCI_DMA_TODEVICE
);
6455 if (pci_dma_mapping_error(bp
->pdev
, mapping
))
6457 pci_unmap_addr_set(&txr
->tx_buf_ring
[ring_prod
], mapping
,
6460 txbd
->tx_bd_haddr_hi
= (u64
) mapping
>> 32;
6461 txbd
->tx_bd_haddr_lo
= (u64
) mapping
& 0xffffffff;
6462 txbd
->tx_bd_mss_nbytes
= len
| (mss
<< 16);
6463 txbd
->tx_bd_vlan_tag_flags
= vlan_tag_flags
;
6466 txbd
->tx_bd_vlan_tag_flags
|= TX_BD_FLAGS_END
;
6468 prod
= NEXT_TX_BD(prod
);
6469 txr
->tx_prod_bseq
+= skb
->len
;
6471 REG_WR16(bp
, txr
->tx_bidx_addr
, prod
);
6472 REG_WR(bp
, txr
->tx_bseq_addr
, txr
->tx_prod_bseq
);
6476 txr
->tx_prod
= prod
;
6478 if (unlikely(bnx2_tx_avail(bp
, txr
) <= MAX_SKB_FRAGS
)) {
6479 netif_tx_stop_queue(txq
);
6480 if (bnx2_tx_avail(bp
, txr
) > bp
->tx_wake_thresh
)
6481 netif_tx_wake_queue(txq
);
6484 return NETDEV_TX_OK
;
6486 /* save value of frag that failed */
6489 /* start back at beginning and unmap skb */
6490 prod
= txr
->tx_prod
;
6491 ring_prod
= TX_RING_IDX(prod
);
6492 tx_buf
= &txr
->tx_buf_ring
[ring_prod
];
6494 pci_unmap_single(bp
->pdev
, pci_unmap_addr(tx_buf
, mapping
),
6495 skb_headlen(skb
), PCI_DMA_TODEVICE
);
6497 /* unmap remaining mapped pages */
6498 for (i
= 0; i
< last_frag
; i
++) {
6499 prod
= NEXT_TX_BD(prod
);
6500 ring_prod
= TX_RING_IDX(prod
);
6501 tx_buf
= &txr
->tx_buf_ring
[ring_prod
];
6502 pci_unmap_page(bp
->pdev
, pci_unmap_addr(tx_buf
, mapping
),
6503 skb_shinfo(skb
)->frags
[i
].size
,
6508 return NETDEV_TX_OK
;
6511 /* Called with rtnl_lock */
6513 bnx2_close(struct net_device
*dev
)
6515 struct bnx2
*bp
= netdev_priv(dev
);
6517 cancel_work_sync(&bp
->reset_task
);
6519 bnx2_disable_int_sync(bp
);
6520 bnx2_napi_disable(bp
);
6521 del_timer_sync(&bp
->timer
);
6522 bnx2_shutdown_chip(bp
);
6527 netif_carrier_off(bp
->dev
);
6528 bnx2_set_power_state(bp
, PCI_D3hot
);
6533 bnx2_save_stats(struct bnx2
*bp
)
6535 u32
*hw_stats
= (u32
*) bp
->stats_blk
;
6536 u32
*temp_stats
= (u32
*) bp
->temp_stats_blk
;
6539 /* The 1st 10 counters are 64-bit counters */
6540 for (i
= 0; i
< 20; i
+= 2) {
6544 hi
= temp_stats
[i
] + hw_stats
[i
];
6545 lo
= (u64
) temp_stats
[i
+ 1] + (u64
) hw_stats
[i
+ 1];
6546 if (lo
> 0xffffffff)
6549 temp_stats
[i
+ 1] = lo
& 0xffffffff;
6552 for ( ; i
< sizeof(struct statistics_block
) / 4; i
++)
6553 temp_stats
[i
] += hw_stats
[i
];
6556 #define GET_64BIT_NET_STATS64(ctr) \
6557 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6558 (unsigned long) (ctr##_lo)
6560 #define GET_64BIT_NET_STATS32(ctr) \
6563 #if (BITS_PER_LONG == 64)
6564 #define GET_64BIT_NET_STATS(ctr) \
6565 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6566 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6568 #define GET_64BIT_NET_STATS(ctr) \
6569 GET_64BIT_NET_STATS32(bp->stats_blk->ctr) + \
6570 GET_64BIT_NET_STATS32(bp->temp_stats_blk->ctr)
6573 #define GET_32BIT_NET_STATS(ctr) \
6574 (unsigned long) (bp->stats_blk->ctr + \
6575 bp->temp_stats_blk->ctr)
6577 static struct net_device_stats
*
6578 bnx2_get_stats(struct net_device
*dev
)
6580 struct bnx2
*bp
= netdev_priv(dev
);
6581 struct net_device_stats
*net_stats
= &dev
->stats
;
6583 if (bp
->stats_blk
== NULL
) {
6586 net_stats
->rx_packets
=
6587 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts
) +
6588 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts
) +
6589 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts
);
6591 net_stats
->tx_packets
=
6592 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts
) +
6593 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts
) +
6594 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts
);
6596 net_stats
->rx_bytes
=
6597 GET_64BIT_NET_STATS(stat_IfHCInOctets
);
6599 net_stats
->tx_bytes
=
6600 GET_64BIT_NET_STATS(stat_IfHCOutOctets
);
6602 net_stats
->multicast
=
6603 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts
);
6605 net_stats
->collisions
=
6606 GET_32BIT_NET_STATS(stat_EtherStatsCollisions
);
6608 net_stats
->rx_length_errors
=
6609 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts
) +
6610 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts
);
6612 net_stats
->rx_over_errors
=
6613 GET_32BIT_NET_STATS(stat_IfInFTQDiscards
) +
6614 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards
);
6616 net_stats
->rx_frame_errors
=
6617 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors
);
6619 net_stats
->rx_crc_errors
=
6620 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors
);
6622 net_stats
->rx_errors
= net_stats
->rx_length_errors
+
6623 net_stats
->rx_over_errors
+ net_stats
->rx_frame_errors
+
6624 net_stats
->rx_crc_errors
;
6626 net_stats
->tx_aborted_errors
=
6627 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions
) +
6628 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions
);
6630 if ((CHIP_NUM(bp
) == CHIP_NUM_5706
) ||
6631 (CHIP_ID(bp
) == CHIP_ID_5708_A0
))
6632 net_stats
->tx_carrier_errors
= 0;
6634 net_stats
->tx_carrier_errors
=
6635 GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors
);
6638 net_stats
->tx_errors
=
6639 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors
) +
6640 net_stats
->tx_aborted_errors
+
6641 net_stats
->tx_carrier_errors
;
6643 net_stats
->rx_missed_errors
=
6644 GET_32BIT_NET_STATS(stat_IfInFTQDiscards
) +
6645 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards
) +
6646 GET_32BIT_NET_STATS(stat_FwRxDrop
);
6651 /* All ethtool functions called with rtnl_lock */
6654 bnx2_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
6656 struct bnx2
*bp
= netdev_priv(dev
);
6657 int support_serdes
= 0, support_copper
= 0;
6659 cmd
->supported
= SUPPORTED_Autoneg
;
6660 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
) {
6663 } else if (bp
->phy_port
== PORT_FIBRE
)
6668 if (support_serdes
) {
6669 cmd
->supported
|= SUPPORTED_1000baseT_Full
|
6671 if (bp
->phy_flags
& BNX2_PHY_FLAG_2_5G_CAPABLE
)
6672 cmd
->supported
|= SUPPORTED_2500baseX_Full
;
6675 if (support_copper
) {
6676 cmd
->supported
|= SUPPORTED_10baseT_Half
|
6677 SUPPORTED_10baseT_Full
|
6678 SUPPORTED_100baseT_Half
|
6679 SUPPORTED_100baseT_Full
|
6680 SUPPORTED_1000baseT_Full
|
6685 spin_lock_bh(&bp
->phy_lock
);
6686 cmd
->port
= bp
->phy_port
;
6687 cmd
->advertising
= bp
->advertising
;
6689 if (bp
->autoneg
& AUTONEG_SPEED
) {
6690 cmd
->autoneg
= AUTONEG_ENABLE
;
6693 cmd
->autoneg
= AUTONEG_DISABLE
;
6696 if (netif_carrier_ok(dev
)) {
6697 cmd
->speed
= bp
->line_speed
;
6698 cmd
->duplex
= bp
->duplex
;
6704 spin_unlock_bh(&bp
->phy_lock
);
6706 cmd
->transceiver
= XCVR_INTERNAL
;
6707 cmd
->phy_address
= bp
->phy_addr
;
6713 bnx2_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
6715 struct bnx2
*bp
= netdev_priv(dev
);
6716 u8 autoneg
= bp
->autoneg
;
6717 u8 req_duplex
= bp
->req_duplex
;
6718 u16 req_line_speed
= bp
->req_line_speed
;
6719 u32 advertising
= bp
->advertising
;
6722 spin_lock_bh(&bp
->phy_lock
);
6724 if (cmd
->port
!= PORT_TP
&& cmd
->port
!= PORT_FIBRE
)
6725 goto err_out_unlock
;
6727 if (cmd
->port
!= bp
->phy_port
&&
6728 !(bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
))
6729 goto err_out_unlock
;
6731 /* If device is down, we can store the settings only if the user
6732 * is setting the currently active port.
6734 if (!netif_running(dev
) && cmd
->port
!= bp
->phy_port
)
6735 goto err_out_unlock
;
6737 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
6738 autoneg
|= AUTONEG_SPEED
;
6740 advertising
= cmd
->advertising
;
6741 if (cmd
->port
== PORT_TP
) {
6742 advertising
&= ETHTOOL_ALL_COPPER_SPEED
;
6744 advertising
= ETHTOOL_ALL_COPPER_SPEED
;
6746 advertising
&= ETHTOOL_ALL_FIBRE_SPEED
;
6748 advertising
= ETHTOOL_ALL_FIBRE_SPEED
;
6750 advertising
|= ADVERTISED_Autoneg
;
6753 if (cmd
->port
== PORT_FIBRE
) {
6754 if ((cmd
->speed
!= SPEED_1000
&&
6755 cmd
->speed
!= SPEED_2500
) ||
6756 (cmd
->duplex
!= DUPLEX_FULL
))
6757 goto err_out_unlock
;
6759 if (cmd
->speed
== SPEED_2500
&&
6760 !(bp
->phy_flags
& BNX2_PHY_FLAG_2_5G_CAPABLE
))
6761 goto err_out_unlock
;
6763 else if (cmd
->speed
== SPEED_1000
|| cmd
->speed
== SPEED_2500
)
6764 goto err_out_unlock
;
6766 autoneg
&= ~AUTONEG_SPEED
;
6767 req_line_speed
= cmd
->speed
;
6768 req_duplex
= cmd
->duplex
;
6772 bp
->autoneg
= autoneg
;
6773 bp
->advertising
= advertising
;
6774 bp
->req_line_speed
= req_line_speed
;
6775 bp
->req_duplex
= req_duplex
;
6778 /* If device is down, the new settings will be picked up when it is
6781 if (netif_running(dev
))
6782 err
= bnx2_setup_phy(bp
, cmd
->port
);
6785 spin_unlock_bh(&bp
->phy_lock
);
6791 bnx2_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
6793 struct bnx2
*bp
= netdev_priv(dev
);
6795 strcpy(info
->driver
, DRV_MODULE_NAME
);
6796 strcpy(info
->version
, DRV_MODULE_VERSION
);
6797 strcpy(info
->bus_info
, pci_name(bp
->pdev
));
6798 strcpy(info
->fw_version
, bp
->fw_version
);
6801 #define BNX2_REGDUMP_LEN (32 * 1024)
6804 bnx2_get_regs_len(struct net_device
*dev
)
6806 return BNX2_REGDUMP_LEN
;
6810 bnx2_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
, void *_p
)
6812 u32
*p
= _p
, i
, offset
;
6814 struct bnx2
*bp
= netdev_priv(dev
);
6815 u32 reg_boundaries
[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6816 0x0800, 0x0880, 0x0c00, 0x0c10,
6817 0x0c30, 0x0d08, 0x1000, 0x101c,
6818 0x1040, 0x1048, 0x1080, 0x10a4,
6819 0x1400, 0x1490, 0x1498, 0x14f0,
6820 0x1500, 0x155c, 0x1580, 0x15dc,
6821 0x1600, 0x1658, 0x1680, 0x16d8,
6822 0x1800, 0x1820, 0x1840, 0x1854,
6823 0x1880, 0x1894, 0x1900, 0x1984,
6824 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6825 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6826 0x2000, 0x2030, 0x23c0, 0x2400,
6827 0x2800, 0x2820, 0x2830, 0x2850,
6828 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6829 0x3c00, 0x3c94, 0x4000, 0x4010,
6830 0x4080, 0x4090, 0x43c0, 0x4458,
6831 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6832 0x4fc0, 0x5010, 0x53c0, 0x5444,
6833 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6834 0x5fc0, 0x6000, 0x6400, 0x6428,
6835 0x6800, 0x6848, 0x684c, 0x6860,
6836 0x6888, 0x6910, 0x8000 };
6840 memset(p
, 0, BNX2_REGDUMP_LEN
);
6842 if (!netif_running(bp
->dev
))
6846 offset
= reg_boundaries
[0];
6848 while (offset
< BNX2_REGDUMP_LEN
) {
6849 *p
++ = REG_RD(bp
, offset
);
6851 if (offset
== reg_boundaries
[i
+ 1]) {
6852 offset
= reg_boundaries
[i
+ 2];
6853 p
= (u32
*) (orig_p
+ offset
);
6860 bnx2_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
6862 struct bnx2
*bp
= netdev_priv(dev
);
6864 if (bp
->flags
& BNX2_FLAG_NO_WOL
) {
6869 wol
->supported
= WAKE_MAGIC
;
6871 wol
->wolopts
= WAKE_MAGIC
;
6875 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
6879 bnx2_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
6881 struct bnx2
*bp
= netdev_priv(dev
);
6883 if (wol
->wolopts
& ~WAKE_MAGIC
)
6886 if (wol
->wolopts
& WAKE_MAGIC
) {
6887 if (bp
->flags
& BNX2_FLAG_NO_WOL
)
6899 bnx2_nway_reset(struct net_device
*dev
)
6901 struct bnx2
*bp
= netdev_priv(dev
);
6904 if (!netif_running(dev
))
6907 if (!(bp
->autoneg
& AUTONEG_SPEED
)) {
6911 spin_lock_bh(&bp
->phy_lock
);
6913 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
) {
6916 rc
= bnx2_setup_remote_phy(bp
, bp
->phy_port
);
6917 spin_unlock_bh(&bp
->phy_lock
);
6921 /* Force a link down visible on the other side */
6922 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
6923 bnx2_write_phy(bp
, bp
->mii_bmcr
, BMCR_LOOPBACK
);
6924 spin_unlock_bh(&bp
->phy_lock
);
6928 spin_lock_bh(&bp
->phy_lock
);
6930 bp
->current_interval
= BNX2_SERDES_AN_TIMEOUT
;
6931 bp
->serdes_an_pending
= 1;
6932 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
6935 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
6936 bmcr
&= ~BMCR_LOOPBACK
;
6937 bnx2_write_phy(bp
, bp
->mii_bmcr
, bmcr
| BMCR_ANRESTART
| BMCR_ANENABLE
);
6939 spin_unlock_bh(&bp
->phy_lock
);
6945 bnx2_get_link(struct net_device
*dev
)
6947 struct bnx2
*bp
= netdev_priv(dev
);
6953 bnx2_get_eeprom_len(struct net_device
*dev
)
6955 struct bnx2
*bp
= netdev_priv(dev
);
6957 if (bp
->flash_info
== NULL
)
6960 return (int) bp
->flash_size
;
6964 bnx2_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
,
6967 struct bnx2
*bp
= netdev_priv(dev
);
6970 if (!netif_running(dev
))
6973 /* parameters already validated in ethtool_get_eeprom */
6975 rc
= bnx2_nvram_read(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
6981 bnx2_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
,
6984 struct bnx2
*bp
= netdev_priv(dev
);
6987 if (!netif_running(dev
))
6990 /* parameters already validated in ethtool_set_eeprom */
6992 rc
= bnx2_nvram_write(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
6998 bnx2_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*coal
)
7000 struct bnx2
*bp
= netdev_priv(dev
);
7002 memset(coal
, 0, sizeof(struct ethtool_coalesce
));
7004 coal
->rx_coalesce_usecs
= bp
->rx_ticks
;
7005 coal
->rx_max_coalesced_frames
= bp
->rx_quick_cons_trip
;
7006 coal
->rx_coalesce_usecs_irq
= bp
->rx_ticks_int
;
7007 coal
->rx_max_coalesced_frames_irq
= bp
->rx_quick_cons_trip_int
;
7009 coal
->tx_coalesce_usecs
= bp
->tx_ticks
;
7010 coal
->tx_max_coalesced_frames
= bp
->tx_quick_cons_trip
;
7011 coal
->tx_coalesce_usecs_irq
= bp
->tx_ticks_int
;
7012 coal
->tx_max_coalesced_frames_irq
= bp
->tx_quick_cons_trip_int
;
7014 coal
->stats_block_coalesce_usecs
= bp
->stats_ticks
;
7020 bnx2_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*coal
)
7022 struct bnx2
*bp
= netdev_priv(dev
);
7024 bp
->rx_ticks
= (u16
) coal
->rx_coalesce_usecs
;
7025 if (bp
->rx_ticks
> 0x3ff) bp
->rx_ticks
= 0x3ff;
7027 bp
->rx_quick_cons_trip
= (u16
) coal
->rx_max_coalesced_frames
;
7028 if (bp
->rx_quick_cons_trip
> 0xff) bp
->rx_quick_cons_trip
= 0xff;
7030 bp
->rx_ticks_int
= (u16
) coal
->rx_coalesce_usecs_irq
;
7031 if (bp
->rx_ticks_int
> 0x3ff) bp
->rx_ticks_int
= 0x3ff;
7033 bp
->rx_quick_cons_trip_int
= (u16
) coal
->rx_max_coalesced_frames_irq
;
7034 if (bp
->rx_quick_cons_trip_int
> 0xff)
7035 bp
->rx_quick_cons_trip_int
= 0xff;
7037 bp
->tx_ticks
= (u16
) coal
->tx_coalesce_usecs
;
7038 if (bp
->tx_ticks
> 0x3ff) bp
->tx_ticks
= 0x3ff;
7040 bp
->tx_quick_cons_trip
= (u16
) coal
->tx_max_coalesced_frames
;
7041 if (bp
->tx_quick_cons_trip
> 0xff) bp
->tx_quick_cons_trip
= 0xff;
7043 bp
->tx_ticks_int
= (u16
) coal
->tx_coalesce_usecs_irq
;
7044 if (bp
->tx_ticks_int
> 0x3ff) bp
->tx_ticks_int
= 0x3ff;
7046 bp
->tx_quick_cons_trip_int
= (u16
) coal
->tx_max_coalesced_frames_irq
;
7047 if (bp
->tx_quick_cons_trip_int
> 0xff) bp
->tx_quick_cons_trip_int
=
7050 bp
->stats_ticks
= coal
->stats_block_coalesce_usecs
;
7051 if (bp
->flags
& BNX2_FLAG_BROKEN_STATS
) {
7052 if (bp
->stats_ticks
!= 0 && bp
->stats_ticks
!= USEC_PER_SEC
)
7053 bp
->stats_ticks
= USEC_PER_SEC
;
7055 if (bp
->stats_ticks
> BNX2_HC_STATS_TICKS_HC_STAT_TICKS
)
7056 bp
->stats_ticks
= BNX2_HC_STATS_TICKS_HC_STAT_TICKS
;
7057 bp
->stats_ticks
&= BNX2_HC_STATS_TICKS_HC_STAT_TICKS
;
7059 if (netif_running(bp
->dev
)) {
7060 bnx2_netif_stop(bp
, true);
7061 bnx2_init_nic(bp
, 0);
7062 bnx2_netif_start(bp
, true);
7069 bnx2_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
7071 struct bnx2
*bp
= netdev_priv(dev
);
7073 ering
->rx_max_pending
= MAX_TOTAL_RX_DESC_CNT
;
7074 ering
->rx_mini_max_pending
= 0;
7075 ering
->rx_jumbo_max_pending
= MAX_TOTAL_RX_PG_DESC_CNT
;
7077 ering
->rx_pending
= bp
->rx_ring_size
;
7078 ering
->rx_mini_pending
= 0;
7079 ering
->rx_jumbo_pending
= bp
->rx_pg_ring_size
;
7081 ering
->tx_max_pending
= MAX_TX_DESC_CNT
;
7082 ering
->tx_pending
= bp
->tx_ring_size
;
7086 bnx2_change_ring_size(struct bnx2
*bp
, u32 rx
, u32 tx
)
7088 if (netif_running(bp
->dev
)) {
7089 /* Reset will erase chipset stats; save them */
7090 bnx2_save_stats(bp
);
7092 bnx2_netif_stop(bp
, true);
7093 bnx2_reset_chip(bp
, BNX2_DRV_MSG_CODE_RESET
);
7098 bnx2_set_rx_ring_size(bp
, rx
);
7099 bp
->tx_ring_size
= tx
;
7101 if (netif_running(bp
->dev
)) {
7104 rc
= bnx2_alloc_mem(bp
);
7106 rc
= bnx2_init_nic(bp
, 0);
7109 bnx2_napi_enable(bp
);
7114 mutex_lock(&bp
->cnic_lock
);
7115 /* Let cnic know about the new status block. */
7116 if (bp
->cnic_eth_dev
.drv_state
& CNIC_DRV_STATE_REGD
)
7117 bnx2_setup_cnic_irq_info(bp
);
7118 mutex_unlock(&bp
->cnic_lock
);
7120 bnx2_netif_start(bp
, true);
7126 bnx2_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
7128 struct bnx2
*bp
= netdev_priv(dev
);
7131 if ((ering
->rx_pending
> MAX_TOTAL_RX_DESC_CNT
) ||
7132 (ering
->tx_pending
> MAX_TX_DESC_CNT
) ||
7133 (ering
->tx_pending
<= MAX_SKB_FRAGS
)) {
7137 rc
= bnx2_change_ring_size(bp
, ering
->rx_pending
, ering
->tx_pending
);
7142 bnx2_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
7144 struct bnx2
*bp
= netdev_priv(dev
);
7146 epause
->autoneg
= ((bp
->autoneg
& AUTONEG_FLOW_CTRL
) != 0);
7147 epause
->rx_pause
= ((bp
->flow_ctrl
& FLOW_CTRL_RX
) != 0);
7148 epause
->tx_pause
= ((bp
->flow_ctrl
& FLOW_CTRL_TX
) != 0);
7152 bnx2_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
7154 struct bnx2
*bp
= netdev_priv(dev
);
7156 bp
->req_flow_ctrl
= 0;
7157 if (epause
->rx_pause
)
7158 bp
->req_flow_ctrl
|= FLOW_CTRL_RX
;
7159 if (epause
->tx_pause
)
7160 bp
->req_flow_ctrl
|= FLOW_CTRL_TX
;
7162 if (epause
->autoneg
) {
7163 bp
->autoneg
|= AUTONEG_FLOW_CTRL
;
7166 bp
->autoneg
&= ~AUTONEG_FLOW_CTRL
;
7169 if (netif_running(dev
)) {
7170 spin_lock_bh(&bp
->phy_lock
);
7171 bnx2_setup_phy(bp
, bp
->phy_port
);
7172 spin_unlock_bh(&bp
->phy_lock
);
7179 bnx2_get_rx_csum(struct net_device
*dev
)
7181 struct bnx2
*bp
= netdev_priv(dev
);
7187 bnx2_set_rx_csum(struct net_device
*dev
, u32 data
)
7189 struct bnx2
*bp
= netdev_priv(dev
);
7196 bnx2_set_tso(struct net_device
*dev
, u32 data
)
7198 struct bnx2
*bp
= netdev_priv(dev
);
7201 dev
->features
|= NETIF_F_TSO
| NETIF_F_TSO_ECN
;
7202 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
7203 dev
->features
|= NETIF_F_TSO6
;
7205 dev
->features
&= ~(NETIF_F_TSO
| NETIF_F_TSO6
|
7211 char string
[ETH_GSTRING_LEN
];
7212 } bnx2_stats_str_arr
[] = {
7214 { "rx_error_bytes" },
7216 { "tx_error_bytes" },
7217 { "rx_ucast_packets" },
7218 { "rx_mcast_packets" },
7219 { "rx_bcast_packets" },
7220 { "tx_ucast_packets" },
7221 { "tx_mcast_packets" },
7222 { "tx_bcast_packets" },
7223 { "tx_mac_errors" },
7224 { "tx_carrier_errors" },
7225 { "rx_crc_errors" },
7226 { "rx_align_errors" },
7227 { "tx_single_collisions" },
7228 { "tx_multi_collisions" },
7230 { "tx_excess_collisions" },
7231 { "tx_late_collisions" },
7232 { "tx_total_collisions" },
7235 { "rx_undersize_packets" },
7236 { "rx_oversize_packets" },
7237 { "rx_64_byte_packets" },
7238 { "rx_65_to_127_byte_packets" },
7239 { "rx_128_to_255_byte_packets" },
7240 { "rx_256_to_511_byte_packets" },
7241 { "rx_512_to_1023_byte_packets" },
7242 { "rx_1024_to_1522_byte_packets" },
7243 { "rx_1523_to_9022_byte_packets" },
7244 { "tx_64_byte_packets" },
7245 { "tx_65_to_127_byte_packets" },
7246 { "tx_128_to_255_byte_packets" },
7247 { "tx_256_to_511_byte_packets" },
7248 { "tx_512_to_1023_byte_packets" },
7249 { "tx_1024_to_1522_byte_packets" },
7250 { "tx_1523_to_9022_byte_packets" },
7251 { "rx_xon_frames" },
7252 { "rx_xoff_frames" },
7253 { "tx_xon_frames" },
7254 { "tx_xoff_frames" },
7255 { "rx_mac_ctrl_frames" },
7256 { "rx_filtered_packets" },
7257 { "rx_ftq_discards" },
7259 { "rx_fw_discards" },
7262 #define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7263 sizeof(bnx2_stats_str_arr[0]))
7265 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7267 static const unsigned long bnx2_stats_offset_arr
[BNX2_NUM_STATS
] = {
7268 STATS_OFFSET32(stat_IfHCInOctets_hi
),
7269 STATS_OFFSET32(stat_IfHCInBadOctets_hi
),
7270 STATS_OFFSET32(stat_IfHCOutOctets_hi
),
7271 STATS_OFFSET32(stat_IfHCOutBadOctets_hi
),
7272 STATS_OFFSET32(stat_IfHCInUcastPkts_hi
),
7273 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi
),
7274 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi
),
7275 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi
),
7276 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi
),
7277 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi
),
7278 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors
),
7279 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors
),
7280 STATS_OFFSET32(stat_Dot3StatsFCSErrors
),
7281 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors
),
7282 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames
),
7283 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames
),
7284 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions
),
7285 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions
),
7286 STATS_OFFSET32(stat_Dot3StatsLateCollisions
),
7287 STATS_OFFSET32(stat_EtherStatsCollisions
),
7288 STATS_OFFSET32(stat_EtherStatsFragments
),
7289 STATS_OFFSET32(stat_EtherStatsJabbers
),
7290 STATS_OFFSET32(stat_EtherStatsUndersizePkts
),
7291 STATS_OFFSET32(stat_EtherStatsOverrsizePkts
),
7292 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets
),
7293 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets
),
7294 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets
),
7295 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets
),
7296 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets
),
7297 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets
),
7298 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets
),
7299 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets
),
7300 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets
),
7301 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets
),
7302 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets
),
7303 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets
),
7304 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets
),
7305 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets
),
7306 STATS_OFFSET32(stat_XonPauseFramesReceived
),
7307 STATS_OFFSET32(stat_XoffPauseFramesReceived
),
7308 STATS_OFFSET32(stat_OutXonSent
),
7309 STATS_OFFSET32(stat_OutXoffSent
),
7310 STATS_OFFSET32(stat_MacControlFramesReceived
),
7311 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards
),
7312 STATS_OFFSET32(stat_IfInFTQDiscards
),
7313 STATS_OFFSET32(stat_IfInMBUFDiscards
),
7314 STATS_OFFSET32(stat_FwRxDrop
),
7317 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7318 * skipped because of errata.
7320 static u8 bnx2_5706_stats_len_arr
[BNX2_NUM_STATS
] = {
7321 8,0,8,8,8,8,8,8,8,8,
7322 4,0,4,4,4,4,4,4,4,4,
7323 4,4,4,4,4,4,4,4,4,4,
7324 4,4,4,4,4,4,4,4,4,4,
7328 static u8 bnx2_5708_stats_len_arr
[BNX2_NUM_STATS
] = {
7329 8,0,8,8,8,8,8,8,8,8,
7330 4,4,4,4,4,4,4,4,4,4,
7331 4,4,4,4,4,4,4,4,4,4,
7332 4,4,4,4,4,4,4,4,4,4,
7336 #define BNX2_NUM_TESTS 6
7339 char string
[ETH_GSTRING_LEN
];
7340 } bnx2_tests_str_arr
[BNX2_NUM_TESTS
] = {
7341 { "register_test (offline)" },
7342 { "memory_test (offline)" },
7343 { "loopback_test (offline)" },
7344 { "nvram_test (online)" },
7345 { "interrupt_test (online)" },
7346 { "link_test (online)" },
7350 bnx2_get_sset_count(struct net_device
*dev
, int sset
)
7354 return BNX2_NUM_TESTS
;
7356 return BNX2_NUM_STATS
;
7363 bnx2_self_test(struct net_device
*dev
, struct ethtool_test
*etest
, u64
*buf
)
7365 struct bnx2
*bp
= netdev_priv(dev
);
7367 bnx2_set_power_state(bp
, PCI_D0
);
7369 memset(buf
, 0, sizeof(u64
) * BNX2_NUM_TESTS
);
7370 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
7373 bnx2_netif_stop(bp
, true);
7374 bnx2_reset_chip(bp
, BNX2_DRV_MSG_CODE_DIAG
);
7377 if (bnx2_test_registers(bp
) != 0) {
7379 etest
->flags
|= ETH_TEST_FL_FAILED
;
7381 if (bnx2_test_memory(bp
) != 0) {
7383 etest
->flags
|= ETH_TEST_FL_FAILED
;
7385 if ((buf
[2] = bnx2_test_loopback(bp
)) != 0)
7386 etest
->flags
|= ETH_TEST_FL_FAILED
;
7388 if (!netif_running(bp
->dev
))
7389 bnx2_shutdown_chip(bp
);
7391 bnx2_init_nic(bp
, 1);
7392 bnx2_netif_start(bp
, true);
7395 /* wait for link up */
7396 for (i
= 0; i
< 7; i
++) {
7399 msleep_interruptible(1000);
7403 if (bnx2_test_nvram(bp
) != 0) {
7405 etest
->flags
|= ETH_TEST_FL_FAILED
;
7407 if (bnx2_test_intr(bp
) != 0) {
7409 etest
->flags
|= ETH_TEST_FL_FAILED
;
7412 if (bnx2_test_link(bp
) != 0) {
7414 etest
->flags
|= ETH_TEST_FL_FAILED
;
7417 if (!netif_running(bp
->dev
))
7418 bnx2_set_power_state(bp
, PCI_D3hot
);
7422 bnx2_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
7424 switch (stringset
) {
7426 memcpy(buf
, bnx2_stats_str_arr
,
7427 sizeof(bnx2_stats_str_arr
));
7430 memcpy(buf
, bnx2_tests_str_arr
,
7431 sizeof(bnx2_tests_str_arr
));
7437 bnx2_get_ethtool_stats(struct net_device
*dev
,
7438 struct ethtool_stats
*stats
, u64
*buf
)
7440 struct bnx2
*bp
= netdev_priv(dev
);
7442 u32
*hw_stats
= (u32
*) bp
->stats_blk
;
7443 u32
*temp_stats
= (u32
*) bp
->temp_stats_blk
;
7444 u8
*stats_len_arr
= NULL
;
7446 if (hw_stats
== NULL
) {
7447 memset(buf
, 0, sizeof(u64
) * BNX2_NUM_STATS
);
7451 if ((CHIP_ID(bp
) == CHIP_ID_5706_A0
) ||
7452 (CHIP_ID(bp
) == CHIP_ID_5706_A1
) ||
7453 (CHIP_ID(bp
) == CHIP_ID_5706_A2
) ||
7454 (CHIP_ID(bp
) == CHIP_ID_5708_A0
))
7455 stats_len_arr
= bnx2_5706_stats_len_arr
;
7457 stats_len_arr
= bnx2_5708_stats_len_arr
;
7459 for (i
= 0; i
< BNX2_NUM_STATS
; i
++) {
7460 unsigned long offset
;
7462 if (stats_len_arr
[i
] == 0) {
7463 /* skip this counter */
7468 offset
= bnx2_stats_offset_arr
[i
];
7469 if (stats_len_arr
[i
] == 4) {
7470 /* 4-byte counter */
7471 buf
[i
] = (u64
) *(hw_stats
+ offset
) +
7472 *(temp_stats
+ offset
);
7475 /* 8-byte counter */
7476 buf
[i
] = (((u64
) *(hw_stats
+ offset
)) << 32) +
7477 *(hw_stats
+ offset
+ 1) +
7478 (((u64
) *(temp_stats
+ offset
)) << 32) +
7479 *(temp_stats
+ offset
+ 1);
7484 bnx2_phys_id(struct net_device
*dev
, u32 data
)
7486 struct bnx2
*bp
= netdev_priv(dev
);
7490 bnx2_set_power_state(bp
, PCI_D0
);
7495 save
= REG_RD(bp
, BNX2_MISC_CFG
);
7496 REG_WR(bp
, BNX2_MISC_CFG
, BNX2_MISC_CFG_LEDMODE_MAC
);
7498 for (i
= 0; i
< (data
* 2); i
++) {
7500 REG_WR(bp
, BNX2_EMAC_LED
, BNX2_EMAC_LED_OVERRIDE
);
7503 REG_WR(bp
, BNX2_EMAC_LED
, BNX2_EMAC_LED_OVERRIDE
|
7504 BNX2_EMAC_LED_1000MB_OVERRIDE
|
7505 BNX2_EMAC_LED_100MB_OVERRIDE
|
7506 BNX2_EMAC_LED_10MB_OVERRIDE
|
7507 BNX2_EMAC_LED_TRAFFIC_OVERRIDE
|
7508 BNX2_EMAC_LED_TRAFFIC
);
7510 msleep_interruptible(500);
7511 if (signal_pending(current
))
7514 REG_WR(bp
, BNX2_EMAC_LED
, 0);
7515 REG_WR(bp
, BNX2_MISC_CFG
, save
);
7517 if (!netif_running(dev
))
7518 bnx2_set_power_state(bp
, PCI_D3hot
);
7524 bnx2_set_tx_csum(struct net_device
*dev
, u32 data
)
7526 struct bnx2
*bp
= netdev_priv(dev
);
7528 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
7529 return (ethtool_op_set_tx_ipv6_csum(dev
, data
));
7531 return (ethtool_op_set_tx_csum(dev
, data
));
7534 static const struct ethtool_ops bnx2_ethtool_ops
= {
7535 .get_settings
= bnx2_get_settings
,
7536 .set_settings
= bnx2_set_settings
,
7537 .get_drvinfo
= bnx2_get_drvinfo
,
7538 .get_regs_len
= bnx2_get_regs_len
,
7539 .get_regs
= bnx2_get_regs
,
7540 .get_wol
= bnx2_get_wol
,
7541 .set_wol
= bnx2_set_wol
,
7542 .nway_reset
= bnx2_nway_reset
,
7543 .get_link
= bnx2_get_link
,
7544 .get_eeprom_len
= bnx2_get_eeprom_len
,
7545 .get_eeprom
= bnx2_get_eeprom
,
7546 .set_eeprom
= bnx2_set_eeprom
,
7547 .get_coalesce
= bnx2_get_coalesce
,
7548 .set_coalesce
= bnx2_set_coalesce
,
7549 .get_ringparam
= bnx2_get_ringparam
,
7550 .set_ringparam
= bnx2_set_ringparam
,
7551 .get_pauseparam
= bnx2_get_pauseparam
,
7552 .set_pauseparam
= bnx2_set_pauseparam
,
7553 .get_rx_csum
= bnx2_get_rx_csum
,
7554 .set_rx_csum
= bnx2_set_rx_csum
,
7555 .set_tx_csum
= bnx2_set_tx_csum
,
7556 .set_sg
= ethtool_op_set_sg
,
7557 .set_tso
= bnx2_set_tso
,
7558 .self_test
= bnx2_self_test
,
7559 .get_strings
= bnx2_get_strings
,
7560 .phys_id
= bnx2_phys_id
,
7561 .get_ethtool_stats
= bnx2_get_ethtool_stats
,
7562 .get_sset_count
= bnx2_get_sset_count
,
7565 /* Called with rtnl_lock */
7567 bnx2_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
7569 struct mii_ioctl_data
*data
= if_mii(ifr
);
7570 struct bnx2
*bp
= netdev_priv(dev
);
7575 data
->phy_id
= bp
->phy_addr
;
7581 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
)
7584 if (!netif_running(dev
))
7587 spin_lock_bh(&bp
->phy_lock
);
7588 err
= bnx2_read_phy(bp
, data
->reg_num
& 0x1f, &mii_regval
);
7589 spin_unlock_bh(&bp
->phy_lock
);
7591 data
->val_out
= mii_regval
;
7597 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
)
7600 if (!netif_running(dev
))
7603 spin_lock_bh(&bp
->phy_lock
);
7604 err
= bnx2_write_phy(bp
, data
->reg_num
& 0x1f, data
->val_in
);
7605 spin_unlock_bh(&bp
->phy_lock
);
7616 /* Called with rtnl_lock */
7618 bnx2_change_mac_addr(struct net_device
*dev
, void *p
)
7620 struct sockaddr
*addr
= p
;
7621 struct bnx2
*bp
= netdev_priv(dev
);
7623 if (!is_valid_ether_addr(addr
->sa_data
))
7626 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
7627 if (netif_running(dev
))
7628 bnx2_set_mac_addr(bp
, bp
->dev
->dev_addr
, 0);
7633 /* Called with rtnl_lock */
7635 bnx2_change_mtu(struct net_device
*dev
, int new_mtu
)
7637 struct bnx2
*bp
= netdev_priv(dev
);
7639 if (((new_mtu
+ ETH_HLEN
) > MAX_ETHERNET_JUMBO_PACKET_SIZE
) ||
7640 ((new_mtu
+ ETH_HLEN
) < MIN_ETHERNET_PACKET_SIZE
))
7644 return (bnx2_change_ring_size(bp
, bp
->rx_ring_size
, bp
->tx_ring_size
));
7647 #ifdef CONFIG_NET_POLL_CONTROLLER
7649 poll_bnx2(struct net_device
*dev
)
7651 struct bnx2
*bp
= netdev_priv(dev
);
7654 for (i
= 0; i
< bp
->irq_nvecs
; i
++) {
7655 struct bnx2_irq
*irq
= &bp
->irq_tbl
[i
];
7657 disable_irq(irq
->vector
);
7658 irq
->handler(irq
->vector
, &bp
->bnx2_napi
[i
]);
7659 enable_irq(irq
->vector
);
7664 static void __devinit
7665 bnx2_get_5709_media(struct bnx2
*bp
)
7667 u32 val
= REG_RD(bp
, BNX2_MISC_DUAL_MEDIA_CTRL
);
7668 u32 bond_id
= val
& BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID
;
7671 if (bond_id
== BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C
)
7673 else if (bond_id
== BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S
) {
7674 bp
->phy_flags
|= BNX2_PHY_FLAG_SERDES
;
7678 if (val
& BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE
)
7679 strap
= (val
& BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL
) >> 21;
7681 strap
= (val
& BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP
) >> 8;
7683 if (PCI_FUNC(bp
->pdev
->devfn
) == 0) {
7688 bp
->phy_flags
|= BNX2_PHY_FLAG_SERDES
;
7696 bp
->phy_flags
|= BNX2_PHY_FLAG_SERDES
;
7702 static void __devinit
7703 bnx2_get_pci_speed(struct bnx2
*bp
)
7707 reg
= REG_RD(bp
, BNX2_PCICFG_MISC_STATUS
);
7708 if (reg
& BNX2_PCICFG_MISC_STATUS_PCIX_DET
) {
7711 bp
->flags
|= BNX2_FLAG_PCIX
;
7713 clkreg
= REG_RD(bp
, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS
);
7715 clkreg
&= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET
;
7717 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ
:
7718 bp
->bus_speed_mhz
= 133;
7721 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ
:
7722 bp
->bus_speed_mhz
= 100;
7725 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ
:
7726 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ
:
7727 bp
->bus_speed_mhz
= 66;
7730 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ
:
7731 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ
:
7732 bp
->bus_speed_mhz
= 50;
7735 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW
:
7736 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ
:
7737 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ
:
7738 bp
->bus_speed_mhz
= 33;
7743 if (reg
& BNX2_PCICFG_MISC_STATUS_M66EN
)
7744 bp
->bus_speed_mhz
= 66;
7746 bp
->bus_speed_mhz
= 33;
7749 if (reg
& BNX2_PCICFG_MISC_STATUS_32BIT_DET
)
7750 bp
->flags
|= BNX2_FLAG_PCI_32BIT
;
7754 static void __devinit
7755 bnx2_read_vpd_fw_ver(struct bnx2
*bp
)
7759 unsigned int block_end
, rosize
, len
;
7761 #define BNX2_VPD_NVRAM_OFFSET 0x300
7762 #define BNX2_VPD_LEN 128
7763 #define BNX2_MAX_VER_SLEN 30
7765 data
= kmalloc(256, GFP_KERNEL
);
7769 rc
= bnx2_nvram_read(bp
, BNX2_VPD_NVRAM_OFFSET
, data
+ BNX2_VPD_LEN
,
7774 for (i
= 0; i
< BNX2_VPD_LEN
; i
+= 4) {
7775 data
[i
] = data
[i
+ BNX2_VPD_LEN
+ 3];
7776 data
[i
+ 1] = data
[i
+ BNX2_VPD_LEN
+ 2];
7777 data
[i
+ 2] = data
[i
+ BNX2_VPD_LEN
+ 1];
7778 data
[i
+ 3] = data
[i
+ BNX2_VPD_LEN
];
7781 i
= pci_vpd_find_tag(data
, 0, BNX2_VPD_LEN
, PCI_VPD_LRDT_RO_DATA
);
7785 rosize
= pci_vpd_lrdt_size(&data
[i
]);
7786 i
+= PCI_VPD_LRDT_TAG_SIZE
;
7787 block_end
= i
+ rosize
;
7789 if (block_end
> BNX2_VPD_LEN
)
7792 j
= pci_vpd_find_info_keyword(data
, i
, rosize
,
7793 PCI_VPD_RO_KEYWORD_MFR_ID
);
7797 len
= pci_vpd_info_field_size(&data
[j
]);
7799 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
7800 if (j
+ len
> block_end
|| len
!= 4 ||
7801 memcmp(&data
[j
], "1028", 4))
7804 j
= pci_vpd_find_info_keyword(data
, i
, rosize
,
7805 PCI_VPD_RO_KEYWORD_VENDOR0
);
7809 len
= pci_vpd_info_field_size(&data
[j
]);
7811 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
7812 if (j
+ len
> block_end
|| len
> BNX2_MAX_VER_SLEN
)
7815 memcpy(bp
->fw_version
, &data
[j
], len
);
7816 bp
->fw_version
[len
] = ' ';
7822 static int __devinit
7823 bnx2_init_board(struct pci_dev
*pdev
, struct net_device
*dev
)
7826 unsigned long mem_len
;
7829 u64 dma_mask
, persist_dma_mask
;
7831 SET_NETDEV_DEV(dev
, &pdev
->dev
);
7832 bp
= netdev_priv(dev
);
7837 bp
->temp_stats_blk
=
7838 kzalloc(sizeof(struct statistics_block
), GFP_KERNEL
);
7840 if (bp
->temp_stats_blk
== NULL
) {
7845 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7846 rc
= pci_enable_device(pdev
);
7848 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
7852 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
7854 "Cannot find PCI device base address, aborting\n");
7856 goto err_out_disable
;
7859 rc
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
7861 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
7862 goto err_out_disable
;
7865 pci_set_master(pdev
);
7866 pci_save_state(pdev
);
7868 bp
->pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
7869 if (bp
->pm_cap
== 0) {
7871 "Cannot find power management capability, aborting\n");
7873 goto err_out_release
;
7879 spin_lock_init(&bp
->phy_lock
);
7880 spin_lock_init(&bp
->indirect_lock
);
7882 mutex_init(&bp
->cnic_lock
);
7884 INIT_WORK(&bp
->reset_task
, bnx2_reset_task
);
7886 dev
->base_addr
= dev
->mem_start
= pci_resource_start(pdev
, 0);
7887 mem_len
= MB_GET_CID_ADDR(TX_TSS_CID
+ TX_MAX_TSS_RINGS
+ 1);
7888 dev
->mem_end
= dev
->mem_start
+ mem_len
;
7889 dev
->irq
= pdev
->irq
;
7891 bp
->regview
= ioremap_nocache(dev
->base_addr
, mem_len
);
7894 dev_err(&pdev
->dev
, "Cannot map register space, aborting\n");
7896 goto err_out_release
;
7899 /* Configure byte swap and enable write to the reg_window registers.
7900 * Rely on CPU to do target byte swapping on big endian systems
7901 * The chip's target access swapping will not swap all accesses
7903 pci_write_config_dword(bp
->pdev
, BNX2_PCICFG_MISC_CONFIG
,
7904 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA
|
7905 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP
);
7907 bnx2_set_power_state(bp
, PCI_D0
);
7909 bp
->chip_id
= REG_RD(bp
, BNX2_MISC_ID
);
7911 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
7912 if (pci_find_capability(pdev
, PCI_CAP_ID_EXP
) == 0) {
7914 "Cannot find PCIE capability, aborting\n");
7918 bp
->flags
|= BNX2_FLAG_PCIE
;
7919 if (CHIP_REV(bp
) == CHIP_REV_Ax
)
7920 bp
->flags
|= BNX2_FLAG_JUMBO_BROKEN
;
7922 bp
->pcix_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PCIX
);
7923 if (bp
->pcix_cap
== 0) {
7925 "Cannot find PCIX capability, aborting\n");
7929 bp
->flags
|= BNX2_FLAG_BROKEN_STATS
;
7932 if (CHIP_NUM(bp
) == CHIP_NUM_5709
&& CHIP_REV(bp
) != CHIP_REV_Ax
) {
7933 if (pci_find_capability(pdev
, PCI_CAP_ID_MSIX
))
7934 bp
->flags
|= BNX2_FLAG_MSIX_CAP
;
7937 if (CHIP_ID(bp
) != CHIP_ID_5706_A0
&& CHIP_ID(bp
) != CHIP_ID_5706_A1
) {
7938 if (pci_find_capability(pdev
, PCI_CAP_ID_MSI
))
7939 bp
->flags
|= BNX2_FLAG_MSI_CAP
;
7942 /* 5708 cannot support DMA addresses > 40-bit. */
7943 if (CHIP_NUM(bp
) == CHIP_NUM_5708
)
7944 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(40);
7946 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
7948 /* Configure DMA attributes. */
7949 if (pci_set_dma_mask(pdev
, dma_mask
) == 0) {
7950 dev
->features
|= NETIF_F_HIGHDMA
;
7951 rc
= pci_set_consistent_dma_mask(pdev
, persist_dma_mask
);
7954 "pci_set_consistent_dma_mask failed, aborting\n");
7957 } else if ((rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32))) != 0) {
7958 dev_err(&pdev
->dev
, "System does not support DMA, aborting\n");
7962 if (!(bp
->flags
& BNX2_FLAG_PCIE
))
7963 bnx2_get_pci_speed(bp
);
7965 /* 5706A0 may falsely detect SERR and PERR. */
7966 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
7967 reg
= REG_RD(bp
, PCI_COMMAND
);
7968 reg
&= ~(PCI_COMMAND_SERR
| PCI_COMMAND_PARITY
);
7969 REG_WR(bp
, PCI_COMMAND
, reg
);
7971 else if ((CHIP_ID(bp
) == CHIP_ID_5706_A1
) &&
7972 !(bp
->flags
& BNX2_FLAG_PCIX
)) {
7975 "5706 A1 can only be used in a PCIX bus, aborting\n");
7979 bnx2_init_nvram(bp
);
7981 reg
= bnx2_reg_rd_ind(bp
, BNX2_SHM_HDR_SIGNATURE
);
7983 if ((reg
& BNX2_SHM_HDR_SIGNATURE_SIG_MASK
) ==
7984 BNX2_SHM_HDR_SIGNATURE_SIG
) {
7985 u32 off
= PCI_FUNC(pdev
->devfn
) << 2;
7987 bp
->shmem_base
= bnx2_reg_rd_ind(bp
, BNX2_SHM_HDR_ADDR_0
+ off
);
7989 bp
->shmem_base
= HOST_VIEW_SHMEM_BASE
;
7991 /* Get the permanent MAC address. First we need to make sure the
7992 * firmware is actually running.
7994 reg
= bnx2_shmem_rd(bp
, BNX2_DEV_INFO_SIGNATURE
);
7996 if ((reg
& BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK
) !=
7997 BNX2_DEV_INFO_SIGNATURE_MAGIC
) {
7998 dev_err(&pdev
->dev
, "Firmware not running, aborting\n");
8003 bnx2_read_vpd_fw_ver(bp
);
8005 j
= strlen(bp
->fw_version
);
8006 reg
= bnx2_shmem_rd(bp
, BNX2_DEV_INFO_BC_REV
);
8007 for (i
= 0; i
< 3 && j
< 24; i
++) {
8011 bp
->fw_version
[j
++] = 'b';
8012 bp
->fw_version
[j
++] = 'c';
8013 bp
->fw_version
[j
++] = ' ';
8015 num
= (u8
) (reg
>> (24 - (i
* 8)));
8016 for (k
= 100, skip0
= 1; k
>= 1; num
%= k
, k
/= 10) {
8017 if (num
>= k
|| !skip0
|| k
== 1) {
8018 bp
->fw_version
[j
++] = (num
/ k
) + '0';
8023 bp
->fw_version
[j
++] = '.';
8025 reg
= bnx2_shmem_rd(bp
, BNX2_PORT_FEATURE
);
8026 if (reg
& BNX2_PORT_FEATURE_WOL_ENABLED
)
8029 if (reg
& BNX2_PORT_FEATURE_ASF_ENABLED
) {
8030 bp
->flags
|= BNX2_FLAG_ASF_ENABLE
;
8032 for (i
= 0; i
< 30; i
++) {
8033 reg
= bnx2_shmem_rd(bp
, BNX2_BC_STATE_CONDITION
);
8034 if (reg
& BNX2_CONDITION_MFW_RUN_MASK
)
8039 reg
= bnx2_shmem_rd(bp
, BNX2_BC_STATE_CONDITION
);
8040 reg
&= BNX2_CONDITION_MFW_RUN_MASK
;
8041 if (reg
!= BNX2_CONDITION_MFW_RUN_UNKNOWN
&&
8042 reg
!= BNX2_CONDITION_MFW_RUN_NONE
) {
8043 u32 addr
= bnx2_shmem_rd(bp
, BNX2_MFW_VER_PTR
);
8046 bp
->fw_version
[j
++] = ' ';
8047 for (i
= 0; i
< 3 && j
< 28; i
++) {
8048 reg
= bnx2_reg_rd_ind(bp
, addr
+ i
* 4);
8050 memcpy(&bp
->fw_version
[j
], ®
, 4);
8055 reg
= bnx2_shmem_rd(bp
, BNX2_PORT_HW_CFG_MAC_UPPER
);
8056 bp
->mac_addr
[0] = (u8
) (reg
>> 8);
8057 bp
->mac_addr
[1] = (u8
) reg
;
8059 reg
= bnx2_shmem_rd(bp
, BNX2_PORT_HW_CFG_MAC_LOWER
);
8060 bp
->mac_addr
[2] = (u8
) (reg
>> 24);
8061 bp
->mac_addr
[3] = (u8
) (reg
>> 16);
8062 bp
->mac_addr
[4] = (u8
) (reg
>> 8);
8063 bp
->mac_addr
[5] = (u8
) reg
;
8065 bp
->tx_ring_size
= MAX_TX_DESC_CNT
;
8066 bnx2_set_rx_ring_size(bp
, 255);
8070 bp
->tx_quick_cons_trip_int
= 2;
8071 bp
->tx_quick_cons_trip
= 20;
8072 bp
->tx_ticks_int
= 18;
8075 bp
->rx_quick_cons_trip_int
= 2;
8076 bp
->rx_quick_cons_trip
= 12;
8077 bp
->rx_ticks_int
= 18;
8080 bp
->stats_ticks
= USEC_PER_SEC
& BNX2_HC_STATS_TICKS_HC_STAT_TICKS
;
8082 bp
->current_interval
= BNX2_TIMER_INTERVAL
;
8086 /* Disable WOL support if we are running on a SERDES chip. */
8087 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
8088 bnx2_get_5709_media(bp
);
8089 else if (CHIP_BOND_ID(bp
) & CHIP_BOND_ID_SERDES_BIT
)
8090 bp
->phy_flags
|= BNX2_PHY_FLAG_SERDES
;
8092 bp
->phy_port
= PORT_TP
;
8093 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
8094 bp
->phy_port
= PORT_FIBRE
;
8095 reg
= bnx2_shmem_rd(bp
, BNX2_SHARED_HW_CFG_CONFIG
);
8096 if (!(reg
& BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX
)) {
8097 bp
->flags
|= BNX2_FLAG_NO_WOL
;
8100 if (CHIP_NUM(bp
) == CHIP_NUM_5706
) {
8101 /* Don't do parallel detect on this board because of
8102 * some board problems. The link will not go down
8103 * if we do parallel detect.
8105 if (pdev
->subsystem_vendor
== PCI_VENDOR_ID_HP
&&
8106 pdev
->subsystem_device
== 0x310c)
8107 bp
->phy_flags
|= BNX2_PHY_FLAG_NO_PARALLEL
;
8110 if (reg
& BNX2_SHARED_HW_CFG_PHY_2_5G
)
8111 bp
->phy_flags
|= BNX2_PHY_FLAG_2_5G_CAPABLE
;
8113 } else if (CHIP_NUM(bp
) == CHIP_NUM_5706
||
8114 CHIP_NUM(bp
) == CHIP_NUM_5708
)
8115 bp
->phy_flags
|= BNX2_PHY_FLAG_CRC_FIX
;
8116 else if (CHIP_NUM(bp
) == CHIP_NUM_5709
&&
8117 (CHIP_REV(bp
) == CHIP_REV_Ax
||
8118 CHIP_REV(bp
) == CHIP_REV_Bx
))
8119 bp
->phy_flags
|= BNX2_PHY_FLAG_DIS_EARLY_DAC
;
8121 bnx2_init_fw_cap(bp
);
8123 if ((CHIP_ID(bp
) == CHIP_ID_5708_A0
) ||
8124 (CHIP_ID(bp
) == CHIP_ID_5708_B0
) ||
8125 (CHIP_ID(bp
) == CHIP_ID_5708_B1
) ||
8126 !(REG_RD(bp
, BNX2_PCI_CONFIG_3
) & BNX2_PCI_CONFIG_3_VAUX_PRESET
)) {
8127 bp
->flags
|= BNX2_FLAG_NO_WOL
;
8131 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
8132 bp
->tx_quick_cons_trip_int
=
8133 bp
->tx_quick_cons_trip
;
8134 bp
->tx_ticks_int
= bp
->tx_ticks
;
8135 bp
->rx_quick_cons_trip_int
=
8136 bp
->rx_quick_cons_trip
;
8137 bp
->rx_ticks_int
= bp
->rx_ticks
;
8138 bp
->comp_prod_trip_int
= bp
->comp_prod_trip
;
8139 bp
->com_ticks_int
= bp
->com_ticks
;
8140 bp
->cmd_ticks_int
= bp
->cmd_ticks
;
8143 /* Disable MSI on 5706 if AMD 8132 bridge is found.
8145 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
8146 * with byte enables disabled on the unused 32-bit word. This is legal
8147 * but causes problems on the AMD 8132 which will eventually stop
8148 * responding after a while.
8150 * AMD believes this incompatibility is unique to the 5706, and
8151 * prefers to locally disable MSI rather than globally disabling it.
8153 if (CHIP_NUM(bp
) == CHIP_NUM_5706
&& disable_msi
== 0) {
8154 struct pci_dev
*amd_8132
= NULL
;
8156 while ((amd_8132
= pci_get_device(PCI_VENDOR_ID_AMD
,
8157 PCI_DEVICE_ID_AMD_8132_BRIDGE
,
8160 if (amd_8132
->revision
>= 0x10 &&
8161 amd_8132
->revision
<= 0x13) {
8163 pci_dev_put(amd_8132
);
8169 bnx2_set_default_link(bp
);
8170 bp
->req_flow_ctrl
= FLOW_CTRL_RX
| FLOW_CTRL_TX
;
8172 init_timer(&bp
->timer
);
8173 bp
->timer
.expires
= RUN_AT(BNX2_TIMER_INTERVAL
);
8174 bp
->timer
.data
= (unsigned long) bp
;
8175 bp
->timer
.function
= bnx2_timer
;
8181 iounmap(bp
->regview
);
8186 pci_release_regions(pdev
);
8189 pci_disable_device(pdev
);
8190 pci_set_drvdata(pdev
, NULL
);
8196 static char * __devinit
8197 bnx2_bus_string(struct bnx2
*bp
, char *str
)
8201 if (bp
->flags
& BNX2_FLAG_PCIE
) {
8202 s
+= sprintf(s
, "PCI Express");
8204 s
+= sprintf(s
, "PCI");
8205 if (bp
->flags
& BNX2_FLAG_PCIX
)
8206 s
+= sprintf(s
, "-X");
8207 if (bp
->flags
& BNX2_FLAG_PCI_32BIT
)
8208 s
+= sprintf(s
, " 32-bit");
8210 s
+= sprintf(s
, " 64-bit");
8211 s
+= sprintf(s
, " %dMHz", bp
->bus_speed_mhz
);
8216 static void __devinit
8217 bnx2_init_napi(struct bnx2
*bp
)
8221 for (i
= 0; i
< bp
->irq_nvecs
; i
++) {
8222 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[i
];
8223 int (*poll
)(struct napi_struct
*, int);
8228 poll
= bnx2_poll_msix
;
8230 netif_napi_add(bp
->dev
, &bp
->bnx2_napi
[i
].napi
, poll
, 64);
8235 static const struct net_device_ops bnx2_netdev_ops
= {
8236 .ndo_open
= bnx2_open
,
8237 .ndo_start_xmit
= bnx2_start_xmit
,
8238 .ndo_stop
= bnx2_close
,
8239 .ndo_get_stats
= bnx2_get_stats
,
8240 .ndo_set_rx_mode
= bnx2_set_rx_mode
,
8241 .ndo_do_ioctl
= bnx2_ioctl
,
8242 .ndo_validate_addr
= eth_validate_addr
,
8243 .ndo_set_mac_address
= bnx2_change_mac_addr
,
8244 .ndo_change_mtu
= bnx2_change_mtu
,
8245 .ndo_tx_timeout
= bnx2_tx_timeout
,
8247 .ndo_vlan_rx_register
= bnx2_vlan_rx_register
,
8249 #ifdef CONFIG_NET_POLL_CONTROLLER
8250 .ndo_poll_controller
= poll_bnx2
,
8254 static void inline vlan_features_add(struct net_device
*dev
, unsigned long flags
)
8257 dev
->vlan_features
|= flags
;
8261 static int __devinit
8262 bnx2_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
8264 static int version_printed
= 0;
8265 struct net_device
*dev
= NULL
;
8270 if (version_printed
++ == 0)
8271 pr_info("%s", version
);
8273 /* dev zeroed in init_etherdev */
8274 dev
= alloc_etherdev_mq(sizeof(*bp
), TX_MAX_RINGS
);
8279 rc
= bnx2_init_board(pdev
, dev
);
8285 dev
->netdev_ops
= &bnx2_netdev_ops
;
8286 dev
->watchdog_timeo
= TX_TIMEOUT
;
8287 dev
->ethtool_ops
= &bnx2_ethtool_ops
;
8289 bp
= netdev_priv(dev
);
8291 pci_set_drvdata(pdev
, dev
);
8293 rc
= bnx2_request_firmware(bp
);
8297 memcpy(dev
->dev_addr
, bp
->mac_addr
, 6);
8298 memcpy(dev
->perm_addr
, bp
->mac_addr
, 6);
8300 dev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_SG
;
8301 vlan_features_add(dev
, NETIF_F_IP_CSUM
| NETIF_F_SG
);
8302 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
8303 dev
->features
|= NETIF_F_IPV6_CSUM
;
8304 vlan_features_add(dev
, NETIF_F_IPV6_CSUM
);
8307 dev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
8309 dev
->features
|= NETIF_F_TSO
| NETIF_F_TSO_ECN
;
8310 vlan_features_add(dev
, NETIF_F_TSO
| NETIF_F_TSO_ECN
);
8311 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
8312 dev
->features
|= NETIF_F_TSO6
;
8313 vlan_features_add(dev
, NETIF_F_TSO6
);
8315 if ((rc
= register_netdev(dev
))) {
8316 dev_err(&pdev
->dev
, "Cannot register net device\n");
8320 netdev_info(dev
, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8321 board_info
[ent
->driver_data
].name
,
8322 ((CHIP_ID(bp
) & 0xf000) >> 12) + 'A',
8323 ((CHIP_ID(bp
) & 0x0ff0) >> 4),
8324 bnx2_bus_string(bp
, str
),
8326 bp
->pdev
->irq
, dev
->dev_addr
);
8331 if (bp
->mips_firmware
)
8332 release_firmware(bp
->mips_firmware
);
8333 if (bp
->rv2p_firmware
)
8334 release_firmware(bp
->rv2p_firmware
);
8337 iounmap(bp
->regview
);
8338 pci_release_regions(pdev
);
8339 pci_disable_device(pdev
);
8340 pci_set_drvdata(pdev
, NULL
);
8345 static void __devexit
8346 bnx2_remove_one(struct pci_dev
*pdev
)
8348 struct net_device
*dev
= pci_get_drvdata(pdev
);
8349 struct bnx2
*bp
= netdev_priv(dev
);
8351 flush_scheduled_work();
8353 unregister_netdev(dev
);
8355 if (bp
->mips_firmware
)
8356 release_firmware(bp
->mips_firmware
);
8357 if (bp
->rv2p_firmware
)
8358 release_firmware(bp
->rv2p_firmware
);
8361 iounmap(bp
->regview
);
8363 kfree(bp
->temp_stats_blk
);
8366 pci_release_regions(pdev
);
8367 pci_disable_device(pdev
);
8368 pci_set_drvdata(pdev
, NULL
);
8372 bnx2_suspend(struct pci_dev
*pdev
, pm_message_t state
)
8374 struct net_device
*dev
= pci_get_drvdata(pdev
);
8375 struct bnx2
*bp
= netdev_priv(dev
);
8377 /* PCI register 4 needs to be saved whether netif_running() or not.
8378 * MSI address and data need to be saved if using MSI and
8381 pci_save_state(pdev
);
8382 if (!netif_running(dev
))
8385 flush_scheduled_work();
8386 bnx2_netif_stop(bp
, true);
8387 netif_device_detach(dev
);
8388 del_timer_sync(&bp
->timer
);
8389 bnx2_shutdown_chip(bp
);
8391 bnx2_set_power_state(bp
, pci_choose_state(pdev
, state
));
8396 bnx2_resume(struct pci_dev
*pdev
)
8398 struct net_device
*dev
= pci_get_drvdata(pdev
);
8399 struct bnx2
*bp
= netdev_priv(dev
);
8401 pci_restore_state(pdev
);
8402 if (!netif_running(dev
))
8405 bnx2_set_power_state(bp
, PCI_D0
);
8406 netif_device_attach(dev
);
8407 bnx2_init_nic(bp
, 1);
8408 bnx2_netif_start(bp
, true);
8413 * bnx2_io_error_detected - called when PCI error is detected
8414 * @pdev: Pointer to PCI device
8415 * @state: The current pci connection state
8417 * This function is called after a PCI bus error affecting
8418 * this device has been detected.
8420 static pci_ers_result_t
bnx2_io_error_detected(struct pci_dev
*pdev
,
8421 pci_channel_state_t state
)
8423 struct net_device
*dev
= pci_get_drvdata(pdev
);
8424 struct bnx2
*bp
= netdev_priv(dev
);
8427 netif_device_detach(dev
);
8429 if (state
== pci_channel_io_perm_failure
) {
8431 return PCI_ERS_RESULT_DISCONNECT
;
8434 if (netif_running(dev
)) {
8435 bnx2_netif_stop(bp
, true);
8436 del_timer_sync(&bp
->timer
);
8437 bnx2_reset_nic(bp
, BNX2_DRV_MSG_CODE_RESET
);
8440 pci_disable_device(pdev
);
8443 /* Request a slot slot reset. */
8444 return PCI_ERS_RESULT_NEED_RESET
;
8448 * bnx2_io_slot_reset - called after the pci bus has been reset.
8449 * @pdev: Pointer to PCI device
8451 * Restart the card from scratch, as if from a cold-boot.
8453 static pci_ers_result_t
bnx2_io_slot_reset(struct pci_dev
*pdev
)
8455 struct net_device
*dev
= pci_get_drvdata(pdev
);
8456 struct bnx2
*bp
= netdev_priv(dev
);
8459 if (pci_enable_device(pdev
)) {
8461 "Cannot re-enable PCI device after reset\n");
8463 return PCI_ERS_RESULT_DISCONNECT
;
8465 pci_set_master(pdev
);
8466 pci_restore_state(pdev
);
8467 pci_save_state(pdev
);
8469 if (netif_running(dev
)) {
8470 bnx2_set_power_state(bp
, PCI_D0
);
8471 bnx2_init_nic(bp
, 1);
8475 return PCI_ERS_RESULT_RECOVERED
;
8479 * bnx2_io_resume - called when traffic can start flowing again.
8480 * @pdev: Pointer to PCI device
8482 * This callback is called when the error recovery driver tells us that
8483 * its OK to resume normal operation.
8485 static void bnx2_io_resume(struct pci_dev
*pdev
)
8487 struct net_device
*dev
= pci_get_drvdata(pdev
);
8488 struct bnx2
*bp
= netdev_priv(dev
);
8491 if (netif_running(dev
))
8492 bnx2_netif_start(bp
, true);
8494 netif_device_attach(dev
);
8498 static struct pci_error_handlers bnx2_err_handler
= {
8499 .error_detected
= bnx2_io_error_detected
,
8500 .slot_reset
= bnx2_io_slot_reset
,
8501 .resume
= bnx2_io_resume
,
8504 static struct pci_driver bnx2_pci_driver
= {
8505 .name
= DRV_MODULE_NAME
,
8506 .id_table
= bnx2_pci_tbl
,
8507 .probe
= bnx2_init_one
,
8508 .remove
= __devexit_p(bnx2_remove_one
),
8509 .suspend
= bnx2_suspend
,
8510 .resume
= bnx2_resume
,
8511 .err_handler
= &bnx2_err_handler
,
8514 static int __init
bnx2_init(void)
8516 return pci_register_driver(&bnx2_pci_driver
);
8519 static void __exit
bnx2_cleanup(void)
8521 pci_unregister_driver(&bnx2_pci_driver
);
8524 module_init(bnx2_init
);
8525 module_exit(bnx2_cleanup
);