2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005 Broadcom Corporation.
10 * Copyright (C) 2000-2003 Broadcom Corporation.
13 #include <linux/config.h>
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/compiler.h>
20 #include <linux/slab.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/ioport.h>
24 #include <linux/pci.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/ethtool.h>
29 #include <linux/mii.h>
30 #include <linux/if_vlan.h>
32 #include <linux/tcp.h>
33 #include <linux/workqueue.h>
35 #include <net/checksum.h>
37 #include <asm/system.h>
39 #include <asm/byteorder.h>
40 #include <asm/uaccess.h>
43 #include <asm/idprom.h>
44 #include <asm/oplib.h>
48 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
49 #define TG3_VLAN_TAG_USED 1
51 #define TG3_VLAN_TAG_USED 0
55 #define TG3_TSO_SUPPORT 1
57 #define TG3_TSO_SUPPORT 0
62 #define DRV_MODULE_NAME "tg3"
63 #define PFX DRV_MODULE_NAME ": "
64 #define DRV_MODULE_VERSION "3.26"
65 #define DRV_MODULE_RELDATE "April 24, 2005"
67 #define TG3_DEF_MAC_MODE 0
68 #define TG3_DEF_RX_MODE 0
69 #define TG3_DEF_TX_MODE 0
70 #define TG3_DEF_MSG_ENABLE \
80 /* length of time before we decide the hardware is borked,
81 * and dev->tx_timeout() should be called to fix the problem
83 #define TG3_TX_TIMEOUT (5 * HZ)
85 /* hardware minimum and maximum for a single frame's data payload */
86 #define TG3_MIN_MTU 60
87 #define TG3_MAX_MTU(tp) \
88 (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 9000 : 1500)
90 /* These numbers seem to be hard coded in the NIC firmware somehow.
91 * You can't change the ring sizes, but you can change where you place
92 * them in the NIC onboard memory.
94 #define TG3_RX_RING_SIZE 512
95 #define TG3_DEF_RX_RING_PENDING 200
96 #define TG3_RX_JUMBO_RING_SIZE 256
97 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
99 /* Do not place this n-ring entries value into the tp struct itself,
100 * we really want to expose these constants to GCC so that modulo et
101 * al. operations are done with shifts and masks instead of with
102 * hw multiply/modulo instructions. Another solution would be to
103 * replace things like '% foo' with '& (foo - 1)'.
105 #define TG3_RX_RCB_RING_SIZE(tp) \
106 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
108 #define TG3_TX_RING_SIZE 512
109 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
111 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
113 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
114 TG3_RX_JUMBO_RING_SIZE)
115 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
116 TG3_RX_RCB_RING_SIZE(tp))
117 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
119 #define TX_RING_GAP(TP) \
120 (TG3_TX_RING_SIZE - (TP)->tx_pending)
121 #define TX_BUFFS_AVAIL(TP) \
122 (((TP)->tx_cons <= (TP)->tx_prod) ? \
123 (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod : \
124 (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
125 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
127 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
128 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
130 /* minimum number of free TX descriptors required to wake up TX process */
131 #define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
133 /* number of ETHTOOL_GSTATS u64's */
134 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
136 static char version
[] __devinitdata
=
137 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
139 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
140 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
141 MODULE_LICENSE("GPL");
142 MODULE_VERSION(DRV_MODULE_VERSION
);
144 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
145 module_param(tg3_debug
, int, 0);
146 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
148 static struct pci_device_id tg3_pci_tbl
[] = {
149 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
,
150 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
151 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
,
152 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
153 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
,
154 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
155 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
,
156 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
157 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
,
158 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
159 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
,
160 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
161 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
,
162 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
163 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
,
164 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
165 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
,
166 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
167 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
,
168 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
169 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
,
170 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
171 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
,
172 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
173 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
,
174 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
175 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
,
176 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
177 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
,
178 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
179 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
,
180 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
181 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
,
182 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
183 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
,
184 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
185 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
,
186 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
187 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
,
188 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
189 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
,
190 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
191 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
,
192 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
193 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5720
,
194 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
195 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
,
196 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
197 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5750
,
198 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
199 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
,
200 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
201 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5750M
,
202 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
203 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
,
204 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
205 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
,
206 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
207 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
,
208 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
209 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
,
210 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
211 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
,
212 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
213 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
,
214 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
215 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
,
216 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
217 { PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
,
218 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
219 { PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
,
220 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
221 { PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
,
222 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
223 { PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
,
224 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
225 { PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
,
226 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
227 { PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
,
228 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
229 { PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
,
230 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
234 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
237 const char string
[ETH_GSTRING_LEN
];
238 } ethtool_stats_keys
[TG3_NUM_STATS
] = {
241 { "rx_ucast_packets" },
242 { "rx_mcast_packets" },
243 { "rx_bcast_packets" },
245 { "rx_align_errors" },
246 { "rx_xon_pause_rcvd" },
247 { "rx_xoff_pause_rcvd" },
248 { "rx_mac_ctrl_rcvd" },
249 { "rx_xoff_entered" },
250 { "rx_frame_too_long_errors" },
252 { "rx_undersize_packets" },
253 { "rx_in_length_errors" },
254 { "rx_out_length_errors" },
255 { "rx_64_or_less_octet_packets" },
256 { "rx_65_to_127_octet_packets" },
257 { "rx_128_to_255_octet_packets" },
258 { "rx_256_to_511_octet_packets" },
259 { "rx_512_to_1023_octet_packets" },
260 { "rx_1024_to_1522_octet_packets" },
261 { "rx_1523_to_2047_octet_packets" },
262 { "rx_2048_to_4095_octet_packets" },
263 { "rx_4096_to_8191_octet_packets" },
264 { "rx_8192_to_9022_octet_packets" },
271 { "tx_flow_control" },
273 { "tx_single_collisions" },
274 { "tx_mult_collisions" },
276 { "tx_excessive_collisions" },
277 { "tx_late_collisions" },
278 { "tx_collide_2times" },
279 { "tx_collide_3times" },
280 { "tx_collide_4times" },
281 { "tx_collide_5times" },
282 { "tx_collide_6times" },
283 { "tx_collide_7times" },
284 { "tx_collide_8times" },
285 { "tx_collide_9times" },
286 { "tx_collide_10times" },
287 { "tx_collide_11times" },
288 { "tx_collide_12times" },
289 { "tx_collide_13times" },
290 { "tx_collide_14times" },
291 { "tx_collide_15times" },
292 { "tx_ucast_packets" },
293 { "tx_mcast_packets" },
294 { "tx_bcast_packets" },
295 { "tx_carrier_sense_errors" },
299 { "dma_writeq_full" },
300 { "dma_write_prioq_full" },
304 { "rx_threshold_hit" },
306 { "dma_readq_full" },
307 { "dma_read_prioq_full" },
308 { "tx_comp_queue_full" },
310 { "ring_set_send_prod_index" },
311 { "ring_status_update" },
313 { "nic_avoided_irqs" },
314 { "nic_tx_threshold_hit" }
317 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
319 if ((tp
->tg3_flags
& TG3_FLAG_PCIX_TARGET_HWBUG
) != 0) {
322 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
323 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
324 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
325 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
327 writel(val
, tp
->regs
+ off
);
328 if ((tp
->tg3_flags
& TG3_FLAG_5701_REG_WRITE_BUG
) != 0)
329 readl(tp
->regs
+ off
);
333 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
)
335 if ((tp
->tg3_flags
& TG3_FLAG_PCIX_TARGET_HWBUG
) != 0) {
338 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
339 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
340 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
341 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
343 void __iomem
*dest
= tp
->regs
+ off
;
345 readl(dest
); /* always flush PCI write */
349 static inline void _tw32_rx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
351 void __iomem
*mbox
= tp
->regs
+ off
;
353 if (tp
->tg3_flags
& TG3_FLAG_MBOX_WRITE_REORDER
)
357 static inline void _tw32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
359 void __iomem
*mbox
= tp
->regs
+ off
;
361 if (tp
->tg3_flags
& TG3_FLAG_TXD_MBOX_HWBUG
)
363 if (tp
->tg3_flags
& TG3_FLAG_MBOX_WRITE_REORDER
)
367 #define tw32_mailbox(reg, val) writel(((val) & 0xffffffff), tp->regs + (reg))
368 #define tw32_rx_mbox(reg, val) _tw32_rx_mbox(tp, reg, val)
369 #define tw32_tx_mbox(reg, val) _tw32_tx_mbox(tp, reg, val)
371 #define tw32(reg,val) tg3_write_indirect_reg32(tp,(reg),(val))
372 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val))
373 #define tw16(reg,val) writew(((val) & 0xffff), tp->regs + (reg))
374 #define tw8(reg,val) writeb(((val) & 0xff), tp->regs + (reg))
375 #define tr32(reg) readl(tp->regs + (reg))
376 #define tr16(reg) readw(tp->regs + (reg))
377 #define tr8(reg) readb(tp->regs + (reg))
379 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
383 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
384 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
385 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
387 /* Always leave this as zero. */
388 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
389 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
392 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
396 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
397 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
398 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
400 /* Always leave this as zero. */
401 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
402 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
405 static void tg3_disable_ints(struct tg3
*tp
)
407 tw32(TG3PCI_MISC_HOST_CTRL
,
408 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
409 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
410 tr32(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
);
413 static inline void tg3_cond_int(struct tg3
*tp
)
415 if (tp
->hw_status
->status
& SD_STATUS_UPDATED
)
416 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
419 static void tg3_enable_ints(struct tg3
*tp
)
421 tw32(TG3PCI_MISC_HOST_CTRL
,
422 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
423 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000000);
424 tr32(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
);
429 static inline unsigned int tg3_has_work(struct tg3
*tp
)
431 struct tg3_hw_status
*sblk
= tp
->hw_status
;
432 unsigned int work_exists
= 0;
434 /* check for phy events */
435 if (!(tp
->tg3_flags
&
436 (TG3_FLAG_USE_LINKCHG_REG
|
437 TG3_FLAG_POLL_SERDES
))) {
438 if (sblk
->status
& SD_STATUS_LINK_CHG
)
441 /* check for RX/TX work to do */
442 if (sblk
->idx
[0].tx_consumer
!= tp
->tx_cons
||
443 sblk
->idx
[0].rx_producer
!= tp
->rx_rcb_ptr
)
450 * similar to tg3_enable_ints, but it accurately determines whether there
451 * is new work pending and can return without flushing the PIO write
452 * which reenables interrupts
454 static void tg3_restart_ints(struct tg3
*tp
)
456 tw32(TG3PCI_MISC_HOST_CTRL
,
457 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
458 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000000);
461 if (tg3_has_work(tp
))
462 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
463 (HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
));
466 static inline void tg3_netif_stop(struct tg3
*tp
)
468 netif_poll_disable(tp
->dev
);
469 netif_tx_disable(tp
->dev
);
472 static inline void tg3_netif_start(struct tg3
*tp
)
474 netif_wake_queue(tp
->dev
);
475 /* NOTE: unconditional netif_wake_queue is only appropriate
476 * so long as all callers are assured to have free tx slots
477 * (such as after tg3_init_hw)
479 netif_poll_enable(tp
->dev
);
483 static void tg3_switch_clocks(struct tg3
*tp
)
485 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
488 orig_clock_ctrl
= clock_ctrl
;
489 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
490 CLOCK_CTRL_CLKRUN_OENABLE
|
492 tp
->pci_clock_ctrl
= clock_ctrl
;
494 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
495 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
496 tw32_f(TG3PCI_CLOCK_CTRL
,
497 clock_ctrl
| CLOCK_CTRL_625_CORE
);
500 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
501 tw32_f(TG3PCI_CLOCK_CTRL
,
503 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
));
505 tw32_f(TG3PCI_CLOCK_CTRL
,
506 clock_ctrl
| (CLOCK_CTRL_ALTCLK
));
509 tw32_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
);
513 #define PHY_BUSY_LOOPS 5000
515 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
521 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
523 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
529 frame_val
= ((PHY_ADDR
<< MI_COM_PHY_ADDR_SHIFT
) &
530 MI_COM_PHY_ADDR_MASK
);
531 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
532 MI_COM_REG_ADDR_MASK
);
533 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
535 tw32_f(MAC_MI_COM
, frame_val
);
537 loops
= PHY_BUSY_LOOPS
;
540 frame_val
= tr32(MAC_MI_COM
);
542 if ((frame_val
& MI_COM_BUSY
) == 0) {
544 frame_val
= tr32(MAC_MI_COM
);
552 *val
= frame_val
& MI_COM_DATA_MASK
;
556 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
557 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
564 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
570 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
572 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
576 frame_val
= ((PHY_ADDR
<< MI_COM_PHY_ADDR_SHIFT
) &
577 MI_COM_PHY_ADDR_MASK
);
578 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
579 MI_COM_REG_ADDR_MASK
);
580 frame_val
|= (val
& MI_COM_DATA_MASK
);
581 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
583 tw32_f(MAC_MI_COM
, frame_val
);
585 loops
= PHY_BUSY_LOOPS
;
588 frame_val
= tr32(MAC_MI_COM
);
589 if ((frame_val
& MI_COM_BUSY
) == 0) {
591 frame_val
= tr32(MAC_MI_COM
);
601 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
602 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
609 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
613 if (tp
->tg3_flags2
& TG3_FLG2_NO_ETH_WIRE_SPEED
)
616 if (!tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x7007) &&
617 !tg3_readphy(tp
, MII_TG3_AUX_CTRL
, &val
))
618 tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
619 (val
| (1 << 15) | (1 << 4)));
622 static int tg3_bmcr_reset(struct tg3
*tp
)
627 /* OK, reset it, and poll the BMCR_RESET bit until it
628 * clears or we time out.
630 phy_control
= BMCR_RESET
;
631 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
637 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
641 if ((phy_control
& BMCR_RESET
) == 0) {
653 static int tg3_wait_macro_done(struct tg3
*tp
)
660 if (!tg3_readphy(tp
, 0x16, &tmp32
)) {
661 if ((tmp32
& 0x1000) == 0)
671 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
673 static const u32 test_pat
[4][6] = {
674 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
675 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
676 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
677 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
681 for (chan
= 0; chan
< 4; chan
++) {
684 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
685 (chan
* 0x2000) | 0x0200);
686 tg3_writephy(tp
, 0x16, 0x0002);
688 for (i
= 0; i
< 6; i
++)
689 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
692 tg3_writephy(tp
, 0x16, 0x0202);
693 if (tg3_wait_macro_done(tp
)) {
698 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
699 (chan
* 0x2000) | 0x0200);
700 tg3_writephy(tp
, 0x16, 0x0082);
701 if (tg3_wait_macro_done(tp
)) {
706 tg3_writephy(tp
, 0x16, 0x0802);
707 if (tg3_wait_macro_done(tp
)) {
712 for (i
= 0; i
< 6; i
+= 2) {
715 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
716 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
717 tg3_wait_macro_done(tp
)) {
723 if (low
!= test_pat
[chan
][i
] ||
724 high
!= test_pat
[chan
][i
+1]) {
725 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
726 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
727 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
737 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
741 for (chan
= 0; chan
< 4; chan
++) {
744 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
745 (chan
* 0x2000) | 0x0200);
746 tg3_writephy(tp
, 0x16, 0x0002);
747 for (i
= 0; i
< 6; i
++)
748 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
749 tg3_writephy(tp
, 0x16, 0x0202);
750 if (tg3_wait_macro_done(tp
))
757 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
759 u32 reg32
, phy9_orig
;
760 int retries
, do_phy_reset
, err
;
766 err
= tg3_bmcr_reset(tp
);
772 /* Disable transmitter and interrupt. */
773 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
777 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
779 /* Set full-duplex, 1000 mbps. */
780 tg3_writephy(tp
, MII_BMCR
,
781 BMCR_FULLDPLX
| TG3_BMCR_SPEED1000
);
783 /* Set to master mode. */
784 if (tg3_readphy(tp
, MII_TG3_CTRL
, &phy9_orig
))
787 tg3_writephy(tp
, MII_TG3_CTRL
,
788 (MII_TG3_CTRL_AS_MASTER
|
789 MII_TG3_CTRL_ENABLE_AS_MASTER
));
791 /* Enable SM_DSP_CLOCK and 6dB. */
792 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0c00);
794 /* Block the PHY control access. */
795 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8005);
796 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0800);
798 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
803 err
= tg3_phy_reset_chanpat(tp
);
807 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8005);
808 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0000);
810 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
811 tg3_writephy(tp
, 0x16, 0x0000);
813 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
814 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
815 /* Set Extended packet length bit for jumbo frames */
816 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x4400);
819 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0400);
822 tg3_writephy(tp
, MII_TG3_CTRL
, phy9_orig
);
824 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
)) {
826 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
833 /* This will reset the tigon3 PHY if there is no valid
834 * link unless the FORCE argument is non-zero.
836 static int tg3_phy_reset(struct tg3
*tp
)
841 err
= tg3_readphy(tp
, MII_BMSR
, &phy_status
);
842 err
|= tg3_readphy(tp
, MII_BMSR
, &phy_status
);
846 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
847 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
848 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
849 err
= tg3_phy_reset_5703_4_5(tp
);
855 err
= tg3_bmcr_reset(tp
);
860 if (tp
->tg3_flags2
& TG3_FLG2_PHY_ADC_BUG
) {
861 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0c00);
862 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x201f);
863 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x2aaa);
864 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
865 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0323);
866 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0400);
868 if (tp
->tg3_flags2
& TG3_FLG2_PHY_5704_A0_BUG
) {
869 tg3_writephy(tp
, 0x1c, 0x8d68);
870 tg3_writephy(tp
, 0x1c, 0x8d68);
872 if (tp
->tg3_flags2
& TG3_FLG2_PHY_BER_BUG
) {
873 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0c00);
874 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
875 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x310b);
876 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x201f);
877 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x9506);
878 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x401f);
879 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x14e2);
880 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0400);
882 /* Set Extended packet length bit (bit 14) on all chips that */
883 /* support jumbo frames */
884 if ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5401
) {
885 /* Cannot do read-modify-write on 5401 */
886 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x4c20);
887 } else if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
890 /* Set bit 14 with read-modify-write to preserve other bits */
891 if (!tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0007) &&
892 !tg3_readphy(tp
, MII_TG3_AUX_CTRL
, &phy_reg
))
893 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, phy_reg
| 0x4000);
896 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
897 * jumbo frames transmission.
899 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
902 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &phy_reg
))
903 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
904 phy_reg
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
907 tg3_phy_set_wirespeed(tp
);
911 static void tg3_frob_aux_power(struct tg3
*tp
)
913 struct tg3
*tp_peer
= tp
;
915 if ((tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
) != 0)
918 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
919 tp_peer
= pci_get_drvdata(tp
->pdev_peer
);
925 if ((tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
) != 0 ||
926 (tp_peer
->tg3_flags
& TG3_FLAG_WOL_ENABLE
) != 0) {
927 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
928 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
929 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
930 (GRC_LCLCTRL_GPIO_OE0
|
931 GRC_LCLCTRL_GPIO_OE1
|
932 GRC_LCLCTRL_GPIO_OE2
|
933 GRC_LCLCTRL_GPIO_OUTPUT0
|
934 GRC_LCLCTRL_GPIO_OUTPUT1
));
941 (tp_peer
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
) != 0)
944 /* On 5753 and variants, GPIO2 cannot be used. */
945 no_gpio2
= tp
->nic_sram_data_cfg
&
946 NIC_SRAM_DATA_CFG_NO_GPIO2
;
948 grc_local_ctrl
= GRC_LCLCTRL_GPIO_OE0
|
949 GRC_LCLCTRL_GPIO_OE1
|
950 GRC_LCLCTRL_GPIO_OE2
|
951 GRC_LCLCTRL_GPIO_OUTPUT1
|
952 GRC_LCLCTRL_GPIO_OUTPUT2
;
954 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
955 GRC_LCLCTRL_GPIO_OUTPUT2
);
957 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
961 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
963 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
968 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
969 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
975 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
976 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) {
978 (tp_peer
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
) != 0)
981 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
982 (GRC_LCLCTRL_GPIO_OE1
|
983 GRC_LCLCTRL_GPIO_OUTPUT1
));
986 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
987 (GRC_LCLCTRL_GPIO_OE1
));
990 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
991 (GRC_LCLCTRL_GPIO_OE1
|
992 GRC_LCLCTRL_GPIO_OUTPUT1
));
998 static int tg3_setup_phy(struct tg3
*, int);
1000 #define RESET_KIND_SHUTDOWN 0
1001 #define RESET_KIND_INIT 1
1002 #define RESET_KIND_SUSPEND 2
1004 static void tg3_write_sig_post_reset(struct tg3
*, int);
1005 static int tg3_halt_cpu(struct tg3
*, u32
);
1007 static int tg3_set_power_state(struct tg3
*tp
, int state
)
1010 u16 power_control
, power_caps
;
1011 int pm
= tp
->pm_cap
;
1013 /* Make sure register accesses (indirect or otherwise)
1014 * will function correctly.
1016 pci_write_config_dword(tp
->pdev
,
1017 TG3PCI_MISC_HOST_CTRL
,
1018 tp
->misc_host_ctrl
);
1020 pci_read_config_word(tp
->pdev
,
1023 power_control
|= PCI_PM_CTRL_PME_STATUS
;
1024 power_control
&= ~(PCI_PM_CTRL_STATE_MASK
);
1028 pci_write_config_word(tp
->pdev
,
1031 udelay(100); /* Delay after power state change */
1033 /* Switch out of Vaux if it is not a LOM */
1034 if (!(tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
)) {
1035 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
1054 printk(KERN_WARNING PFX
"%s: Invalid power state (%d) "
1056 tp
->dev
->name
, state
);
1060 power_control
|= PCI_PM_CTRL_PME_ENABLE
;
1062 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
1063 tw32(TG3PCI_MISC_HOST_CTRL
,
1064 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
1066 if (tp
->link_config
.phy_is_low_power
== 0) {
1067 tp
->link_config
.phy_is_low_power
= 1;
1068 tp
->link_config
.orig_speed
= tp
->link_config
.speed
;
1069 tp
->link_config
.orig_duplex
= tp
->link_config
.duplex
;
1070 tp
->link_config
.orig_autoneg
= tp
->link_config
.autoneg
;
1073 if (!(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)) {
1074 tp
->link_config
.speed
= SPEED_10
;
1075 tp
->link_config
.duplex
= DUPLEX_HALF
;
1076 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
1077 tg3_setup_phy(tp
, 0);
1080 pci_read_config_word(tp
->pdev
, pm
+ PCI_PM_PMC
, &power_caps
);
1082 if (tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
) {
1085 if (!(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)) {
1086 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x5a);
1089 mac_mode
= MAC_MODE_PORT_MODE_MII
;
1091 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
||
1092 !(tp
->tg3_flags
& TG3_FLAG_WOL_SPEED_100MB
))
1093 mac_mode
|= MAC_MODE_LINK_POLARITY
;
1095 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
1098 if (!(tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
))
1099 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
1101 if (((power_caps
& PCI_PM_CAP_PME_D3cold
) &&
1102 (tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
)))
1103 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
1105 tw32_f(MAC_MODE
, mac_mode
);
1108 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
1112 if (!(tp
->tg3_flags
& TG3_FLAG_WOL_SPEED_100MB
) &&
1113 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
1114 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
1117 base_val
= tp
->pci_clock_ctrl
;
1118 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
1119 CLOCK_CTRL_TXCLK_DISABLE
);
1121 tw32_f(TG3PCI_CLOCK_CTRL
, base_val
|
1123 CLOCK_CTRL_PWRDOWN_PLL133
);
1125 } else if (!((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
1126 (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
))) {
1127 u32 newbits1
, newbits2
;
1129 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
1130 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
1131 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
1132 CLOCK_CTRL_TXCLK_DISABLE
|
1134 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
1135 } else if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
1136 newbits1
= CLOCK_CTRL_625_CORE
;
1137 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
1139 newbits1
= CLOCK_CTRL_ALTCLK
;
1140 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
1143 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
);
1146 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
);
1149 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
1152 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
1153 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
1154 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
1155 CLOCK_CTRL_TXCLK_DISABLE
|
1156 CLOCK_CTRL_44MHZ_CORE
);
1158 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
1161 tw32_f(TG3PCI_CLOCK_CTRL
,
1162 tp
->pci_clock_ctrl
| newbits3
);
1167 tg3_frob_aux_power(tp
);
1169 /* Workaround for unstable PLL clock */
1170 if ((GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
) ||
1171 (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
)) {
1172 u32 val
= tr32(0x7d00);
1174 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1176 if (!(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
))
1177 tg3_halt_cpu(tp
, RX_CPU_BASE
);
1180 /* Finally, set the new power state. */
1181 pci_write_config_word(tp
->pdev
, pm
+ PCI_PM_CTRL
, power_control
);
1182 udelay(100); /* Delay after power state change */
1184 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
1189 static void tg3_link_report(struct tg3
*tp
)
1191 if (!netif_carrier_ok(tp
->dev
)) {
1192 printk(KERN_INFO PFX
"%s: Link is down.\n", tp
->dev
->name
);
1194 printk(KERN_INFO PFX
"%s: Link is up at %d Mbps, %s duplex.\n",
1196 (tp
->link_config
.active_speed
== SPEED_1000
?
1198 (tp
->link_config
.active_speed
== SPEED_100
?
1200 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1203 printk(KERN_INFO PFX
"%s: Flow control is %s for TX and "
1206 (tp
->tg3_flags
& TG3_FLAG_TX_PAUSE
) ? "on" : "off",
1207 (tp
->tg3_flags
& TG3_FLAG_RX_PAUSE
) ? "on" : "off");
1211 static void tg3_setup_flow_control(struct tg3
*tp
, u32 local_adv
, u32 remote_adv
)
1213 u32 new_tg3_flags
= 0;
1214 u32 old_rx_mode
= tp
->rx_mode
;
1215 u32 old_tx_mode
= tp
->tx_mode
;
1217 if (tp
->tg3_flags
& TG3_FLAG_PAUSE_AUTONEG
) {
1218 if (local_adv
& ADVERTISE_PAUSE_CAP
) {
1219 if (local_adv
& ADVERTISE_PAUSE_ASYM
) {
1220 if (remote_adv
& LPA_PAUSE_CAP
)
1222 (TG3_FLAG_RX_PAUSE
|
1224 else if (remote_adv
& LPA_PAUSE_ASYM
)
1226 (TG3_FLAG_RX_PAUSE
);
1228 if (remote_adv
& LPA_PAUSE_CAP
)
1230 (TG3_FLAG_RX_PAUSE
|
1233 } else if (local_adv
& ADVERTISE_PAUSE_ASYM
) {
1234 if ((remote_adv
& LPA_PAUSE_CAP
) &&
1235 (remote_adv
& LPA_PAUSE_ASYM
))
1236 new_tg3_flags
|= TG3_FLAG_TX_PAUSE
;
1239 tp
->tg3_flags
&= ~(TG3_FLAG_RX_PAUSE
| TG3_FLAG_TX_PAUSE
);
1240 tp
->tg3_flags
|= new_tg3_flags
;
1242 new_tg3_flags
= tp
->tg3_flags
;
1245 if (new_tg3_flags
& TG3_FLAG_RX_PAUSE
)
1246 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1248 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1250 if (old_rx_mode
!= tp
->rx_mode
) {
1251 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
1254 if (new_tg3_flags
& TG3_FLAG_TX_PAUSE
)
1255 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
1257 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
1259 if (old_tx_mode
!= tp
->tx_mode
) {
1260 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
1264 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
1266 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
1267 case MII_TG3_AUX_STAT_10HALF
:
1269 *duplex
= DUPLEX_HALF
;
1272 case MII_TG3_AUX_STAT_10FULL
:
1274 *duplex
= DUPLEX_FULL
;
1277 case MII_TG3_AUX_STAT_100HALF
:
1279 *duplex
= DUPLEX_HALF
;
1282 case MII_TG3_AUX_STAT_100FULL
:
1284 *duplex
= DUPLEX_FULL
;
1287 case MII_TG3_AUX_STAT_1000HALF
:
1288 *speed
= SPEED_1000
;
1289 *duplex
= DUPLEX_HALF
;
1292 case MII_TG3_AUX_STAT_1000FULL
:
1293 *speed
= SPEED_1000
;
1294 *duplex
= DUPLEX_FULL
;
1298 *speed
= SPEED_INVALID
;
1299 *duplex
= DUPLEX_INVALID
;
1304 static void tg3_phy_copper_begin(struct tg3
*tp
)
1309 if (tp
->link_config
.phy_is_low_power
) {
1310 /* Entering low power mode. Disable gigabit and
1311 * 100baseT advertisements.
1313 tg3_writephy(tp
, MII_TG3_CTRL
, 0);
1315 new_adv
= (ADVERTISE_10HALF
| ADVERTISE_10FULL
|
1316 ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
);
1317 if (tp
->tg3_flags
& TG3_FLAG_WOL_SPEED_100MB
)
1318 new_adv
|= (ADVERTISE_100HALF
| ADVERTISE_100FULL
);
1320 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
1321 } else if (tp
->link_config
.speed
== SPEED_INVALID
) {
1322 tp
->link_config
.advertising
=
1323 (ADVERTISED_10baseT_Half
| ADVERTISED_10baseT_Full
|
1324 ADVERTISED_100baseT_Half
| ADVERTISED_100baseT_Full
|
1325 ADVERTISED_1000baseT_Half
| ADVERTISED_1000baseT_Full
|
1326 ADVERTISED_Autoneg
| ADVERTISED_MII
);
1328 if (tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
)
1329 tp
->link_config
.advertising
&=
1330 ~(ADVERTISED_1000baseT_Half
|
1331 ADVERTISED_1000baseT_Full
);
1333 new_adv
= (ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
);
1334 if (tp
->link_config
.advertising
& ADVERTISED_10baseT_Half
)
1335 new_adv
|= ADVERTISE_10HALF
;
1336 if (tp
->link_config
.advertising
& ADVERTISED_10baseT_Full
)
1337 new_adv
|= ADVERTISE_10FULL
;
1338 if (tp
->link_config
.advertising
& ADVERTISED_100baseT_Half
)
1339 new_adv
|= ADVERTISE_100HALF
;
1340 if (tp
->link_config
.advertising
& ADVERTISED_100baseT_Full
)
1341 new_adv
|= ADVERTISE_100FULL
;
1342 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
1344 if (tp
->link_config
.advertising
&
1345 (ADVERTISED_1000baseT_Half
| ADVERTISED_1000baseT_Full
)) {
1347 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Half
)
1348 new_adv
|= MII_TG3_CTRL_ADV_1000_HALF
;
1349 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Full
)
1350 new_adv
|= MII_TG3_CTRL_ADV_1000_FULL
;
1351 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
) &&
1352 (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
1353 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
))
1354 new_adv
|= (MII_TG3_CTRL_AS_MASTER
|
1355 MII_TG3_CTRL_ENABLE_AS_MASTER
);
1356 tg3_writephy(tp
, MII_TG3_CTRL
, new_adv
);
1358 tg3_writephy(tp
, MII_TG3_CTRL
, 0);
1361 /* Asking for a specific link mode. */
1362 if (tp
->link_config
.speed
== SPEED_1000
) {
1363 new_adv
= ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
;
1364 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
1366 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
1367 new_adv
= MII_TG3_CTRL_ADV_1000_FULL
;
1369 new_adv
= MII_TG3_CTRL_ADV_1000_HALF
;
1370 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
1371 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
)
1372 new_adv
|= (MII_TG3_CTRL_AS_MASTER
|
1373 MII_TG3_CTRL_ENABLE_AS_MASTER
);
1374 tg3_writephy(tp
, MII_TG3_CTRL
, new_adv
);
1376 tg3_writephy(tp
, MII_TG3_CTRL
, 0);
1378 new_adv
= ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
;
1379 if (tp
->link_config
.speed
== SPEED_100
) {
1380 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
1381 new_adv
|= ADVERTISE_100FULL
;
1383 new_adv
|= ADVERTISE_100HALF
;
1385 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
1386 new_adv
|= ADVERTISE_10FULL
;
1388 new_adv
|= ADVERTISE_10HALF
;
1390 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
1394 if (tp
->link_config
.autoneg
== AUTONEG_DISABLE
&&
1395 tp
->link_config
.speed
!= SPEED_INVALID
) {
1396 u32 bmcr
, orig_bmcr
;
1398 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
1399 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
1402 switch (tp
->link_config
.speed
) {
1408 bmcr
|= BMCR_SPEED100
;
1412 bmcr
|= TG3_BMCR_SPEED1000
;
1416 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
1417 bmcr
|= BMCR_FULLDPLX
;
1419 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
1420 (bmcr
!= orig_bmcr
)) {
1421 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
1422 for (i
= 0; i
< 1500; i
++) {
1426 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
1427 tg3_readphy(tp
, MII_BMSR
, &tmp
))
1429 if (!(tmp
& BMSR_LSTATUS
)) {
1434 tg3_writephy(tp
, MII_BMCR
, bmcr
);
1438 tg3_writephy(tp
, MII_BMCR
,
1439 BMCR_ANENABLE
| BMCR_ANRESTART
);
1443 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
1447 /* Turn off tap power management. */
1448 /* Set Extended packet length bit */
1449 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x4c20);
1451 err
|= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x0012);
1452 err
|= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x1804);
1454 err
|= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x0013);
1455 err
|= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x1204);
1457 err
|= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8006);
1458 err
|= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0132);
1460 err
|= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8006);
1461 err
|= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0232);
1463 err
|= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x201f);
1464 err
|= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0a20);
1471 static int tg3_copper_is_advertising_all(struct tg3
*tp
)
1473 u32 adv_reg
, all_mask
;
1475 if (tg3_readphy(tp
, MII_ADVERTISE
, &adv_reg
))
1478 all_mask
= (ADVERTISE_10HALF
| ADVERTISE_10FULL
|
1479 ADVERTISE_100HALF
| ADVERTISE_100FULL
);
1480 if ((adv_reg
& all_mask
) != all_mask
)
1482 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
)) {
1485 if (tg3_readphy(tp
, MII_TG3_CTRL
, &tg3_ctrl
))
1488 all_mask
= (MII_TG3_CTRL_ADV_1000_HALF
|
1489 MII_TG3_CTRL_ADV_1000_FULL
);
1490 if ((tg3_ctrl
& all_mask
) != all_mask
)
1496 static int tg3_setup_copper_phy(struct tg3
*tp
, int force_reset
)
1498 int current_link_up
;
1507 (MAC_STATUS_SYNC_CHANGED
|
1508 MAC_STATUS_CFG_CHANGED
|
1509 MAC_STATUS_MI_COMPLETION
|
1510 MAC_STATUS_LNKSTATE_CHANGED
));
1513 tp
->mi_mode
= MAC_MI_MODE_BASE
;
1514 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1517 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x02);
1519 /* Some third-party PHYs need to be reset on link going
1522 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
1523 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
1524 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) &&
1525 netif_carrier_ok(tp
->dev
)) {
1526 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
1527 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
1528 !(bmsr
& BMSR_LSTATUS
))
1534 if ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5401
) {
1535 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
1536 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
1537 !(tp
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
))
1540 if (!(bmsr
& BMSR_LSTATUS
)) {
1541 err
= tg3_init_5401phy_dsp(tp
);
1545 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
1546 for (i
= 0; i
< 1000; i
++) {
1548 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
1549 (bmsr
& BMSR_LSTATUS
)) {
1555 if ((tp
->phy_id
& PHY_ID_REV_MASK
) == PHY_REV_BCM5401_B0
&&
1556 !(bmsr
& BMSR_LSTATUS
) &&
1557 tp
->link_config
.active_speed
== SPEED_1000
) {
1558 err
= tg3_phy_reset(tp
);
1560 err
= tg3_init_5401phy_dsp(tp
);
1565 } else if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
1566 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
) {
1567 /* 5701 {A0,B0} CRC bug workaround */
1568 tg3_writephy(tp
, 0x15, 0x0a75);
1569 tg3_writephy(tp
, 0x1c, 0x8c68);
1570 tg3_writephy(tp
, 0x1c, 0x8d68);
1571 tg3_writephy(tp
, 0x1c, 0x8c68);
1574 /* Clear pending interrupts... */
1575 tg3_readphy(tp
, MII_TG3_ISTAT
, &dummy
);
1576 tg3_readphy(tp
, MII_TG3_ISTAT
, &dummy
);
1578 if (tp
->tg3_flags
& TG3_FLAG_USE_MI_INTERRUPT
)
1579 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
1581 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
1583 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
1584 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
1585 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
1586 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
1587 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
1589 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
1592 current_link_up
= 0;
1593 current_speed
= SPEED_INVALID
;
1594 current_duplex
= DUPLEX_INVALID
;
1596 if (tp
->tg3_flags2
& TG3_FLG2_CAPACITIVE_COUPLING
) {
1599 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x4007);
1600 tg3_readphy(tp
, MII_TG3_AUX_CTRL
, &val
);
1601 if (!(val
& (1 << 10))) {
1603 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, val
);
1609 for (i
= 0; i
< 100; i
++) {
1610 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
1611 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
1612 (bmsr
& BMSR_LSTATUS
))
1617 if (bmsr
& BMSR_LSTATUS
) {
1620 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
1621 for (i
= 0; i
< 2000; i
++) {
1623 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
1628 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
1633 for (i
= 0; i
< 200; i
++) {
1634 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
1635 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
1637 if (bmcr
&& bmcr
!= 0x7fff)
1642 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
1643 if (bmcr
& BMCR_ANENABLE
) {
1644 current_link_up
= 1;
1646 /* Force autoneg restart if we are exiting
1649 if (!tg3_copper_is_advertising_all(tp
))
1650 current_link_up
= 0;
1652 current_link_up
= 0;
1655 if (!(bmcr
& BMCR_ANENABLE
) &&
1656 tp
->link_config
.speed
== current_speed
&&
1657 tp
->link_config
.duplex
== current_duplex
) {
1658 current_link_up
= 1;
1660 current_link_up
= 0;
1664 tp
->link_config
.active_speed
= current_speed
;
1665 tp
->link_config
.active_duplex
= current_duplex
;
1668 if (current_link_up
== 1 &&
1669 (tp
->link_config
.active_duplex
== DUPLEX_FULL
) &&
1670 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
1671 u32 local_adv
, remote_adv
;
1673 if (tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
))
1675 local_adv
&= (ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
1677 if (tg3_readphy(tp
, MII_LPA
, &remote_adv
))
1680 remote_adv
&= (LPA_PAUSE_CAP
| LPA_PAUSE_ASYM
);
1682 /* If we are not advertising full pause capability,
1683 * something is wrong. Bring the link down and reconfigure.
1685 if (local_adv
!= ADVERTISE_PAUSE_CAP
) {
1686 current_link_up
= 0;
1688 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
1692 if (current_link_up
== 0) {
1695 tg3_phy_copper_begin(tp
);
1697 tg3_readphy(tp
, MII_BMSR
, &tmp
);
1698 if (!tg3_readphy(tp
, MII_BMSR
, &tmp
) &&
1699 (tmp
& BMSR_LSTATUS
))
1700 current_link_up
= 1;
1703 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
1704 if (current_link_up
== 1) {
1705 if (tp
->link_config
.active_speed
== SPEED_100
||
1706 tp
->link_config
.active_speed
== SPEED_10
)
1707 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1709 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1711 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1713 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
1714 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
1715 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
1717 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
1718 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
1719 if ((tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
) ||
1720 (current_link_up
== 1 &&
1721 tp
->link_config
.active_speed
== SPEED_10
))
1722 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
1724 if (current_link_up
== 1)
1725 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
1728 /* ??? Without this setting Netgear GA302T PHY does not
1729 * ??? send/receive packets...
1731 if ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5411
&&
1732 tp
->pci_chip_rev_id
== CHIPREV_ID_5700_ALTIMA
) {
1733 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
1734 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1738 tw32_f(MAC_MODE
, tp
->mac_mode
);
1741 if (tp
->tg3_flags
& TG3_FLAG_USE_LINKCHG_REG
) {
1742 /* Polled via timer. */
1743 tw32_f(MAC_EVENT
, 0);
1745 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
1749 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
&&
1750 current_link_up
== 1 &&
1751 tp
->link_config
.active_speed
== SPEED_1000
&&
1752 ((tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) ||
1753 (tp
->tg3_flags
& TG3_FLAG_PCI_HIGH_SPEED
))) {
1756 (MAC_STATUS_SYNC_CHANGED
|
1757 MAC_STATUS_CFG_CHANGED
));
1760 NIC_SRAM_FIRMWARE_MBOX
,
1761 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
1764 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
1765 if (current_link_up
)
1766 netif_carrier_on(tp
->dev
);
1768 netif_carrier_off(tp
->dev
);
1769 tg3_link_report(tp
);
1775 struct tg3_fiber_aneginfo
{
1777 #define ANEG_STATE_UNKNOWN 0
1778 #define ANEG_STATE_AN_ENABLE 1
1779 #define ANEG_STATE_RESTART_INIT 2
1780 #define ANEG_STATE_RESTART 3
1781 #define ANEG_STATE_DISABLE_LINK_OK 4
1782 #define ANEG_STATE_ABILITY_DETECT_INIT 5
1783 #define ANEG_STATE_ABILITY_DETECT 6
1784 #define ANEG_STATE_ACK_DETECT_INIT 7
1785 #define ANEG_STATE_ACK_DETECT 8
1786 #define ANEG_STATE_COMPLETE_ACK_INIT 9
1787 #define ANEG_STATE_COMPLETE_ACK 10
1788 #define ANEG_STATE_IDLE_DETECT_INIT 11
1789 #define ANEG_STATE_IDLE_DETECT 12
1790 #define ANEG_STATE_LINK_OK 13
1791 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
1792 #define ANEG_STATE_NEXT_PAGE_WAIT 15
1795 #define MR_AN_ENABLE 0x00000001
1796 #define MR_RESTART_AN 0x00000002
1797 #define MR_AN_COMPLETE 0x00000004
1798 #define MR_PAGE_RX 0x00000008
1799 #define MR_NP_LOADED 0x00000010
1800 #define MR_TOGGLE_TX 0x00000020
1801 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
1802 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
1803 #define MR_LP_ADV_SYM_PAUSE 0x00000100
1804 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
1805 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1806 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1807 #define MR_LP_ADV_NEXT_PAGE 0x00001000
1808 #define MR_TOGGLE_RX 0x00002000
1809 #define MR_NP_RX 0x00004000
1811 #define MR_LINK_OK 0x80000000
1813 unsigned long link_time
, cur_time
;
1815 u32 ability_match_cfg
;
1816 int ability_match_count
;
1818 char ability_match
, idle_match
, ack_match
;
1820 u32 txconfig
, rxconfig
;
1821 #define ANEG_CFG_NP 0x00000080
1822 #define ANEG_CFG_ACK 0x00000040
1823 #define ANEG_CFG_RF2 0x00000020
1824 #define ANEG_CFG_RF1 0x00000010
1825 #define ANEG_CFG_PS2 0x00000001
1826 #define ANEG_CFG_PS1 0x00008000
1827 #define ANEG_CFG_HD 0x00004000
1828 #define ANEG_CFG_FD 0x00002000
1829 #define ANEG_CFG_INVAL 0x00001f06
1834 #define ANEG_TIMER_ENAB 2
1835 #define ANEG_FAILED -1
1837 #define ANEG_STATE_SETTLE_TIME 10000
1839 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
1840 struct tg3_fiber_aneginfo
*ap
)
1842 unsigned long delta
;
1846 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
1850 ap
->ability_match_cfg
= 0;
1851 ap
->ability_match_count
= 0;
1852 ap
->ability_match
= 0;
1858 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
1859 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
1861 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
1862 ap
->ability_match_cfg
= rx_cfg_reg
;
1863 ap
->ability_match
= 0;
1864 ap
->ability_match_count
= 0;
1866 if (++ap
->ability_match_count
> 1) {
1867 ap
->ability_match
= 1;
1868 ap
->ability_match_cfg
= rx_cfg_reg
;
1871 if (rx_cfg_reg
& ANEG_CFG_ACK
)
1879 ap
->ability_match_cfg
= 0;
1880 ap
->ability_match_count
= 0;
1881 ap
->ability_match
= 0;
1887 ap
->rxconfig
= rx_cfg_reg
;
1891 case ANEG_STATE_UNKNOWN
:
1892 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
1893 ap
->state
= ANEG_STATE_AN_ENABLE
;
1896 case ANEG_STATE_AN_ENABLE
:
1897 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
1898 if (ap
->flags
& MR_AN_ENABLE
) {
1901 ap
->ability_match_cfg
= 0;
1902 ap
->ability_match_count
= 0;
1903 ap
->ability_match
= 0;
1907 ap
->state
= ANEG_STATE_RESTART_INIT
;
1909 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
1913 case ANEG_STATE_RESTART_INIT
:
1914 ap
->link_time
= ap
->cur_time
;
1915 ap
->flags
&= ~(MR_NP_LOADED
);
1917 tw32(MAC_TX_AUTO_NEG
, 0);
1918 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
1919 tw32_f(MAC_MODE
, tp
->mac_mode
);
1922 ret
= ANEG_TIMER_ENAB
;
1923 ap
->state
= ANEG_STATE_RESTART
;
1926 case ANEG_STATE_RESTART
:
1927 delta
= ap
->cur_time
- ap
->link_time
;
1928 if (delta
> ANEG_STATE_SETTLE_TIME
) {
1929 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
1931 ret
= ANEG_TIMER_ENAB
;
1935 case ANEG_STATE_DISABLE_LINK_OK
:
1939 case ANEG_STATE_ABILITY_DETECT_INIT
:
1940 ap
->flags
&= ~(MR_TOGGLE_TX
);
1941 ap
->txconfig
= (ANEG_CFG_FD
| ANEG_CFG_PS1
);
1942 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
1943 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
1944 tw32_f(MAC_MODE
, tp
->mac_mode
);
1947 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
1950 case ANEG_STATE_ABILITY_DETECT
:
1951 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0) {
1952 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
1956 case ANEG_STATE_ACK_DETECT_INIT
:
1957 ap
->txconfig
|= ANEG_CFG_ACK
;
1958 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
1959 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
1960 tw32_f(MAC_MODE
, tp
->mac_mode
);
1963 ap
->state
= ANEG_STATE_ACK_DETECT
;
1966 case ANEG_STATE_ACK_DETECT
:
1967 if (ap
->ack_match
!= 0) {
1968 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
1969 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
1970 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
1972 ap
->state
= ANEG_STATE_AN_ENABLE
;
1974 } else if (ap
->ability_match
!= 0 &&
1975 ap
->rxconfig
== 0) {
1976 ap
->state
= ANEG_STATE_AN_ENABLE
;
1980 case ANEG_STATE_COMPLETE_ACK_INIT
:
1981 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
1985 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
1986 MR_LP_ADV_HALF_DUPLEX
|
1987 MR_LP_ADV_SYM_PAUSE
|
1988 MR_LP_ADV_ASYM_PAUSE
|
1989 MR_LP_ADV_REMOTE_FAULT1
|
1990 MR_LP_ADV_REMOTE_FAULT2
|
1991 MR_LP_ADV_NEXT_PAGE
|
1994 if (ap
->rxconfig
& ANEG_CFG_FD
)
1995 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
1996 if (ap
->rxconfig
& ANEG_CFG_HD
)
1997 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
1998 if (ap
->rxconfig
& ANEG_CFG_PS1
)
1999 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
2000 if (ap
->rxconfig
& ANEG_CFG_PS2
)
2001 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
2002 if (ap
->rxconfig
& ANEG_CFG_RF1
)
2003 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
2004 if (ap
->rxconfig
& ANEG_CFG_RF2
)
2005 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
2006 if (ap
->rxconfig
& ANEG_CFG_NP
)
2007 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
2009 ap
->link_time
= ap
->cur_time
;
2011 ap
->flags
^= (MR_TOGGLE_TX
);
2012 if (ap
->rxconfig
& 0x0008)
2013 ap
->flags
|= MR_TOGGLE_RX
;
2014 if (ap
->rxconfig
& ANEG_CFG_NP
)
2015 ap
->flags
|= MR_NP_RX
;
2016 ap
->flags
|= MR_PAGE_RX
;
2018 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
2019 ret
= ANEG_TIMER_ENAB
;
2022 case ANEG_STATE_COMPLETE_ACK
:
2023 if (ap
->ability_match
!= 0 &&
2024 ap
->rxconfig
== 0) {
2025 ap
->state
= ANEG_STATE_AN_ENABLE
;
2028 delta
= ap
->cur_time
- ap
->link_time
;
2029 if (delta
> ANEG_STATE_SETTLE_TIME
) {
2030 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
2031 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
2033 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
2034 !(ap
->flags
& MR_NP_RX
)) {
2035 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
2043 case ANEG_STATE_IDLE_DETECT_INIT
:
2044 ap
->link_time
= ap
->cur_time
;
2045 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
2046 tw32_f(MAC_MODE
, tp
->mac_mode
);
2049 ap
->state
= ANEG_STATE_IDLE_DETECT
;
2050 ret
= ANEG_TIMER_ENAB
;
2053 case ANEG_STATE_IDLE_DETECT
:
2054 if (ap
->ability_match
!= 0 &&
2055 ap
->rxconfig
== 0) {
2056 ap
->state
= ANEG_STATE_AN_ENABLE
;
2059 delta
= ap
->cur_time
- ap
->link_time
;
2060 if (delta
> ANEG_STATE_SETTLE_TIME
) {
2061 /* XXX another gem from the Broadcom driver :( */
2062 ap
->state
= ANEG_STATE_LINK_OK
;
2066 case ANEG_STATE_LINK_OK
:
2067 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
2071 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
2072 /* ??? unimplemented */
2075 case ANEG_STATE_NEXT_PAGE_WAIT
:
2076 /* ??? unimplemented */
2087 static int fiber_autoneg(struct tg3
*tp
, u32
*flags
)
2090 struct tg3_fiber_aneginfo aninfo
;
2091 int status
= ANEG_FAILED
;
2095 tw32_f(MAC_TX_AUTO_NEG
, 0);
2097 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
2098 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
2101 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
2104 memset(&aninfo
, 0, sizeof(aninfo
));
2105 aninfo
.flags
|= MR_AN_ENABLE
;
2106 aninfo
.state
= ANEG_STATE_UNKNOWN
;
2107 aninfo
.cur_time
= 0;
2109 while (++tick
< 195000) {
2110 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
2111 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
2117 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
2118 tw32_f(MAC_MODE
, tp
->mac_mode
);
2121 *flags
= aninfo
.flags
;
2123 if (status
== ANEG_DONE
&&
2124 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
2125 MR_LP_ADV_FULL_DUPLEX
)))
2131 static void tg3_init_bcm8002(struct tg3
*tp
)
2133 u32 mac_status
= tr32(MAC_STATUS
);
2136 /* Reset when initting first time or we have a link. */
2137 if ((tp
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
) &&
2138 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
2141 /* Set PLL lock range. */
2142 tg3_writephy(tp
, 0x16, 0x8007);
2145 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
2147 /* Wait for reset to complete. */
2148 /* XXX schedule_timeout() ... */
2149 for (i
= 0; i
< 500; i
++)
2152 /* Config mode; select PMA/Ch 1 regs. */
2153 tg3_writephy(tp
, 0x10, 0x8411);
2155 /* Enable auto-lock and comdet, select txclk for tx. */
2156 tg3_writephy(tp
, 0x11, 0x0a10);
2158 tg3_writephy(tp
, 0x18, 0x00a0);
2159 tg3_writephy(tp
, 0x16, 0x41ff);
2161 /* Assert and deassert POR. */
2162 tg3_writephy(tp
, 0x13, 0x0400);
2164 tg3_writephy(tp
, 0x13, 0x0000);
2166 tg3_writephy(tp
, 0x11, 0x0a50);
2168 tg3_writephy(tp
, 0x11, 0x0a10);
2170 /* Wait for signal to stabilize */
2171 /* XXX schedule_timeout() ... */
2172 for (i
= 0; i
< 15000; i
++)
2175 /* Deselect the channel register so we can read the PHYID
2178 tg3_writephy(tp
, 0x10, 0x8011);
2181 static int tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
2183 u32 sg_dig_ctrl
, sg_dig_status
;
2184 u32 serdes_cfg
, expected_sg_dig_ctrl
;
2185 int workaround
, port_a
;
2186 int current_link_up
;
2189 expected_sg_dig_ctrl
= 0;
2192 current_link_up
= 0;
2194 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A0
&&
2195 tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A1
) {
2197 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
2200 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2201 /* preserve bits 20-23 for voltage regulator */
2202 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
2205 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
2207 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
2208 if (sg_dig_ctrl
& (1 << 31)) {
2210 u32 val
= serdes_cfg
;
2216 tw32_f(MAC_SERDES_CFG
, val
);
2218 tw32_f(SG_DIG_CTRL
, 0x01388400);
2220 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
2221 tg3_setup_flow_control(tp
, 0, 0);
2222 current_link_up
= 1;
2227 /* Want auto-negotiation. */
2228 expected_sg_dig_ctrl
= 0x81388400;
2230 /* Pause capability */
2231 expected_sg_dig_ctrl
|= (1 << 11);
2233 /* Asymettric pause */
2234 expected_sg_dig_ctrl
|= (1 << 12);
2236 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
2238 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
2239 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| (1 << 30));
2241 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
2243 tp
->tg3_flags2
|= TG3_FLG2_PHY_JUST_INITTED
;
2244 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
2245 MAC_STATUS_SIGNAL_DET
)) {
2248 /* Giver time to negotiate (~200ms) */
2249 for (i
= 0; i
< 40000; i
++) {
2250 sg_dig_status
= tr32(SG_DIG_STATUS
);
2251 if (sg_dig_status
& (0x3))
2255 mac_status
= tr32(MAC_STATUS
);
2257 if ((sg_dig_status
& (1 << 1)) &&
2258 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
2259 u32 local_adv
, remote_adv
;
2261 local_adv
= ADVERTISE_PAUSE_CAP
;
2263 if (sg_dig_status
& (1 << 19))
2264 remote_adv
|= LPA_PAUSE_CAP
;
2265 if (sg_dig_status
& (1 << 20))
2266 remote_adv
|= LPA_PAUSE_ASYM
;
2268 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
2269 current_link_up
= 1;
2270 tp
->tg3_flags2
&= ~TG3_FLG2_PHY_JUST_INITTED
;
2271 } else if (!(sg_dig_status
& (1 << 1))) {
2272 if (tp
->tg3_flags2
& TG3_FLG2_PHY_JUST_INITTED
)
2273 tp
->tg3_flags2
&= ~TG3_FLG2_PHY_JUST_INITTED
;
2276 u32 val
= serdes_cfg
;
2283 tw32_f(MAC_SERDES_CFG
, val
);
2286 tw32_f(SG_DIG_CTRL
, 0x01388400);
2289 /* Link parallel detection - link is up */
2290 /* only if we have PCS_SYNC and not */
2291 /* receiving config code words */
2292 mac_status
= tr32(MAC_STATUS
);
2293 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
2294 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
2295 tg3_setup_flow_control(tp
, 0, 0);
2296 current_link_up
= 1;
2303 return current_link_up
;
2306 static int tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
2308 int current_link_up
= 0;
2310 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
)) {
2311 tp
->tg3_flags
&= ~TG3_FLAG_GOT_SERDES_FLOWCTL
;
2315 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
2319 if (fiber_autoneg(tp
, &flags
)) {
2320 u32 local_adv
, remote_adv
;
2322 local_adv
= ADVERTISE_PAUSE_CAP
;
2324 if (flags
& MR_LP_ADV_SYM_PAUSE
)
2325 remote_adv
|= LPA_PAUSE_CAP
;
2326 if (flags
& MR_LP_ADV_ASYM_PAUSE
)
2327 remote_adv
|= LPA_PAUSE_ASYM
;
2329 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
2331 tp
->tg3_flags
|= TG3_FLAG_GOT_SERDES_FLOWCTL
;
2332 current_link_up
= 1;
2334 for (i
= 0; i
< 30; i
++) {
2337 (MAC_STATUS_SYNC_CHANGED
|
2338 MAC_STATUS_CFG_CHANGED
));
2340 if ((tr32(MAC_STATUS
) &
2341 (MAC_STATUS_SYNC_CHANGED
|
2342 MAC_STATUS_CFG_CHANGED
)) == 0)
2346 mac_status
= tr32(MAC_STATUS
);
2347 if (current_link_up
== 0 &&
2348 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
2349 !(mac_status
& MAC_STATUS_RCVD_CFG
))
2350 current_link_up
= 1;
2352 /* Forcing 1000FD link up. */
2353 current_link_up
= 1;
2354 tp
->tg3_flags
|= TG3_FLAG_GOT_SERDES_FLOWCTL
;
2356 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
2361 return current_link_up
;
2364 static int tg3_setup_fiber_phy(struct tg3
*tp
, int force_reset
)
2367 u16 orig_active_speed
;
2368 u8 orig_active_duplex
;
2370 int current_link_up
;
2374 (tp
->tg3_flags
& (TG3_FLAG_RX_PAUSE
|
2375 TG3_FLAG_TX_PAUSE
));
2376 orig_active_speed
= tp
->link_config
.active_speed
;
2377 orig_active_duplex
= tp
->link_config
.active_duplex
;
2379 if (!(tp
->tg3_flags2
& TG3_FLG2_HW_AUTONEG
) &&
2380 netif_carrier_ok(tp
->dev
) &&
2381 (tp
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
)) {
2382 mac_status
= tr32(MAC_STATUS
);
2383 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
2384 MAC_STATUS_SIGNAL_DET
|
2385 MAC_STATUS_CFG_CHANGED
|
2386 MAC_STATUS_RCVD_CFG
);
2387 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
2388 MAC_STATUS_SIGNAL_DET
)) {
2389 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
2390 MAC_STATUS_CFG_CHANGED
));
2395 tw32_f(MAC_TX_AUTO_NEG
, 0);
2397 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
2398 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
2399 tw32_f(MAC_MODE
, tp
->mac_mode
);
2402 if (tp
->phy_id
== PHY_ID_BCM8002
)
2403 tg3_init_bcm8002(tp
);
2405 /* Enable link change event even when serdes polling. */
2406 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
2409 current_link_up
= 0;
2410 mac_status
= tr32(MAC_STATUS
);
2412 if (tp
->tg3_flags2
& TG3_FLG2_HW_AUTONEG
)
2413 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
2415 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
2417 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
2418 tw32_f(MAC_MODE
, tp
->mac_mode
);
2421 tp
->hw_status
->status
=
2422 (SD_STATUS_UPDATED
|
2423 (tp
->hw_status
->status
& ~SD_STATUS_LINK_CHG
));
2425 for (i
= 0; i
< 100; i
++) {
2426 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
2427 MAC_STATUS_CFG_CHANGED
));
2429 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
2430 MAC_STATUS_CFG_CHANGED
)) == 0)
2434 mac_status
= tr32(MAC_STATUS
);
2435 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
2436 current_link_up
= 0;
2437 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
2438 tw32_f(MAC_MODE
, (tp
->mac_mode
|
2439 MAC_MODE_SEND_CONFIGS
));
2441 tw32_f(MAC_MODE
, tp
->mac_mode
);
2445 if (current_link_up
== 1) {
2446 tp
->link_config
.active_speed
= SPEED_1000
;
2447 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
2448 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
2449 LED_CTRL_LNKLED_OVERRIDE
|
2450 LED_CTRL_1000MBPS_ON
));
2452 tp
->link_config
.active_speed
= SPEED_INVALID
;
2453 tp
->link_config
.active_duplex
= DUPLEX_INVALID
;
2454 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
2455 LED_CTRL_LNKLED_OVERRIDE
|
2456 LED_CTRL_TRAFFIC_OVERRIDE
));
2459 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
2460 if (current_link_up
)
2461 netif_carrier_on(tp
->dev
);
2463 netif_carrier_off(tp
->dev
);
2464 tg3_link_report(tp
);
2467 tp
->tg3_flags
& (TG3_FLAG_RX_PAUSE
|
2469 if (orig_pause_cfg
!= now_pause_cfg
||
2470 orig_active_speed
!= tp
->link_config
.active_speed
||
2471 orig_active_duplex
!= tp
->link_config
.active_duplex
)
2472 tg3_link_report(tp
);
2478 static int tg3_setup_phy(struct tg3
*tp
, int force_reset
)
2482 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) {
2483 err
= tg3_setup_fiber_phy(tp
, force_reset
);
2485 err
= tg3_setup_copper_phy(tp
, force_reset
);
2488 if (tp
->link_config
.active_speed
== SPEED_1000
&&
2489 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
2490 tw32(MAC_TX_LENGTHS
,
2491 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2492 (6 << TX_LENGTHS_IPG_SHIFT
) |
2493 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2495 tw32(MAC_TX_LENGTHS
,
2496 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2497 (6 << TX_LENGTHS_IPG_SHIFT
) |
2498 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2500 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
2501 if (netif_carrier_ok(tp
->dev
)) {
2502 tw32(HOSTCC_STAT_COAL_TICKS
,
2503 DEFAULT_STAT_COAL_TICKS
);
2505 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
2512 /* Tigon3 never reports partial packet sends. So we do not
2513 * need special logic to handle SKBs that have not had all
2514 * of their frags sent yet, like SunGEM does.
2516 static void tg3_tx(struct tg3
*tp
)
2518 u32 hw_idx
= tp
->hw_status
->idx
[0].tx_consumer
;
2519 u32 sw_idx
= tp
->tx_cons
;
2521 while (sw_idx
!= hw_idx
) {
2522 struct tx_ring_info
*ri
= &tp
->tx_buffers
[sw_idx
];
2523 struct sk_buff
*skb
= ri
->skb
;
2526 if (unlikely(skb
== NULL
))
2529 pci_unmap_single(tp
->pdev
,
2530 pci_unmap_addr(ri
, mapping
),
2536 sw_idx
= NEXT_TX(sw_idx
);
2538 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2539 if (unlikely(sw_idx
== hw_idx
))
2542 ri
= &tp
->tx_buffers
[sw_idx
];
2543 if (unlikely(ri
->skb
!= NULL
))
2546 pci_unmap_page(tp
->pdev
,
2547 pci_unmap_addr(ri
, mapping
),
2548 skb_shinfo(skb
)->frags
[i
].size
,
2551 sw_idx
= NEXT_TX(sw_idx
);
2554 dev_kfree_skb_irq(skb
);
2557 tp
->tx_cons
= sw_idx
;
2559 if (netif_queue_stopped(tp
->dev
) &&
2560 (TX_BUFFS_AVAIL(tp
) > TG3_TX_WAKEUP_THRESH
))
2561 netif_wake_queue(tp
->dev
);
2564 /* Returns size of skb allocated or < 0 on error.
2566 * We only need to fill in the address because the other members
2567 * of the RX descriptor are invariant, see tg3_init_rings.
2569 * Note the purposeful assymetry of cpu vs. chip accesses. For
2570 * posting buffers we only dirty the first cache line of the RX
2571 * descriptor (containing the address). Whereas for the RX status
2572 * buffers the cpu only reads the last cacheline of the RX descriptor
2573 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2575 static int tg3_alloc_rx_skb(struct tg3
*tp
, u32 opaque_key
,
2576 int src_idx
, u32 dest_idx_unmasked
)
2578 struct tg3_rx_buffer_desc
*desc
;
2579 struct ring_info
*map
, *src_map
;
2580 struct sk_buff
*skb
;
2582 int skb_size
, dest_idx
;
2585 switch (opaque_key
) {
2586 case RXD_OPAQUE_RING_STD
:
2587 dest_idx
= dest_idx_unmasked
% TG3_RX_RING_SIZE
;
2588 desc
= &tp
->rx_std
[dest_idx
];
2589 map
= &tp
->rx_std_buffers
[dest_idx
];
2591 src_map
= &tp
->rx_std_buffers
[src_idx
];
2592 skb_size
= RX_PKT_BUF_SZ
;
2595 case RXD_OPAQUE_RING_JUMBO
:
2596 dest_idx
= dest_idx_unmasked
% TG3_RX_JUMBO_RING_SIZE
;
2597 desc
= &tp
->rx_jumbo
[dest_idx
];
2598 map
= &tp
->rx_jumbo_buffers
[dest_idx
];
2600 src_map
= &tp
->rx_jumbo_buffers
[src_idx
];
2601 skb_size
= RX_JUMBO_PKT_BUF_SZ
;
2608 /* Do not overwrite any of the map or rp information
2609 * until we are sure we can commit to a new buffer.
2611 * Callers depend upon this behavior and assume that
2612 * we leave everything unchanged if we fail.
2614 skb
= dev_alloc_skb(skb_size
);
2619 skb_reserve(skb
, tp
->rx_offset
);
2621 mapping
= pci_map_single(tp
->pdev
, skb
->data
,
2622 skb_size
- tp
->rx_offset
,
2623 PCI_DMA_FROMDEVICE
);
2626 pci_unmap_addr_set(map
, mapping
, mapping
);
2628 if (src_map
!= NULL
)
2629 src_map
->skb
= NULL
;
2631 desc
->addr_hi
= ((u64
)mapping
>> 32);
2632 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
2637 /* We only need to move over in the address because the other
2638 * members of the RX descriptor are invariant. See notes above
2639 * tg3_alloc_rx_skb for full details.
2641 static void tg3_recycle_rx(struct tg3
*tp
, u32 opaque_key
,
2642 int src_idx
, u32 dest_idx_unmasked
)
2644 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
2645 struct ring_info
*src_map
, *dest_map
;
2648 switch (opaque_key
) {
2649 case RXD_OPAQUE_RING_STD
:
2650 dest_idx
= dest_idx_unmasked
% TG3_RX_RING_SIZE
;
2651 dest_desc
= &tp
->rx_std
[dest_idx
];
2652 dest_map
= &tp
->rx_std_buffers
[dest_idx
];
2653 src_desc
= &tp
->rx_std
[src_idx
];
2654 src_map
= &tp
->rx_std_buffers
[src_idx
];
2657 case RXD_OPAQUE_RING_JUMBO
:
2658 dest_idx
= dest_idx_unmasked
% TG3_RX_JUMBO_RING_SIZE
;
2659 dest_desc
= &tp
->rx_jumbo
[dest_idx
];
2660 dest_map
= &tp
->rx_jumbo_buffers
[dest_idx
];
2661 src_desc
= &tp
->rx_jumbo
[src_idx
];
2662 src_map
= &tp
->rx_jumbo_buffers
[src_idx
];
2669 dest_map
->skb
= src_map
->skb
;
2670 pci_unmap_addr_set(dest_map
, mapping
,
2671 pci_unmap_addr(src_map
, mapping
));
2672 dest_desc
->addr_hi
= src_desc
->addr_hi
;
2673 dest_desc
->addr_lo
= src_desc
->addr_lo
;
2675 src_map
->skb
= NULL
;
2678 #if TG3_VLAN_TAG_USED
2679 static int tg3_vlan_rx(struct tg3
*tp
, struct sk_buff
*skb
, u16 vlan_tag
)
2681 return vlan_hwaccel_receive_skb(skb
, tp
->vlgrp
, vlan_tag
);
2685 /* The RX ring scheme is composed of multiple rings which post fresh
2686 * buffers to the chip, and one special ring the chip uses to report
2687 * status back to the host.
2689 * The special ring reports the status of received packets to the
2690 * host. The chip does not write into the original descriptor the
2691 * RX buffer was obtained from. The chip simply takes the original
2692 * descriptor as provided by the host, updates the status and length
2693 * field, then writes this into the next status ring entry.
2695 * Each ring the host uses to post buffers to the chip is described
2696 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
2697 * it is first placed into the on-chip ram. When the packet's length
2698 * is known, it walks down the TG3_BDINFO entries to select the ring.
2699 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2700 * which is within the range of the new packet's length is chosen.
2702 * The "separate ring for rx status" scheme may sound queer, but it makes
2703 * sense from a cache coherency perspective. If only the host writes
2704 * to the buffer post rings, and only the chip writes to the rx status
2705 * rings, then cache lines never move beyond shared-modified state.
2706 * If both the host and chip were to write into the same ring, cache line
2707 * eviction could occur since both entities want it in an exclusive state.
2709 static int tg3_rx(struct tg3
*tp
, int budget
)
2712 u32 sw_idx
= tp
->rx_rcb_ptr
;
2716 hw_idx
= tp
->hw_status
->idx
[0].rx_producer
;
2718 * We need to order the read of hw_idx and the read of
2719 * the opaque cookie.
2724 while (sw_idx
!= hw_idx
&& budget
> 0) {
2725 struct tg3_rx_buffer_desc
*desc
= &tp
->rx_rcb
[sw_idx
];
2727 struct sk_buff
*skb
;
2728 dma_addr_t dma_addr
;
2729 u32 opaque_key
, desc_idx
, *post_ptr
;
2731 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
2732 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
2733 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
2734 dma_addr
= pci_unmap_addr(&tp
->rx_std_buffers
[desc_idx
],
2736 skb
= tp
->rx_std_buffers
[desc_idx
].skb
;
2737 post_ptr
= &tp
->rx_std_ptr
;
2738 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
2739 dma_addr
= pci_unmap_addr(&tp
->rx_jumbo_buffers
[desc_idx
],
2741 skb
= tp
->rx_jumbo_buffers
[desc_idx
].skb
;
2742 post_ptr
= &tp
->rx_jumbo_ptr
;
2745 goto next_pkt_nopost
;
2748 work_mask
|= opaque_key
;
2750 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
2751 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
)) {
2753 tg3_recycle_rx(tp
, opaque_key
,
2754 desc_idx
, *post_ptr
);
2756 /* Other statistics kept track of by card. */
2757 tp
->net_stats
.rx_dropped
++;
2761 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) - 4; /* omit crc */
2763 if (len
> RX_COPY_THRESHOLD
2764 && tp
->rx_offset
== 2
2765 /* rx_offset != 2 iff this is a 5701 card running
2766 * in PCI-X mode [see tg3_get_invariants()] */
2770 skb_size
= tg3_alloc_rx_skb(tp
, opaque_key
,
2771 desc_idx
, *post_ptr
);
2775 pci_unmap_single(tp
->pdev
, dma_addr
,
2776 skb_size
- tp
->rx_offset
,
2777 PCI_DMA_FROMDEVICE
);
2781 struct sk_buff
*copy_skb
;
2783 tg3_recycle_rx(tp
, opaque_key
,
2784 desc_idx
, *post_ptr
);
2786 copy_skb
= dev_alloc_skb(len
+ 2);
2787 if (copy_skb
== NULL
)
2788 goto drop_it_no_recycle
;
2790 copy_skb
->dev
= tp
->dev
;
2791 skb_reserve(copy_skb
, 2);
2792 skb_put(copy_skb
, len
);
2793 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
2794 memcpy(copy_skb
->data
, skb
->data
, len
);
2795 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
2797 /* We'll reuse the original ring buffer. */
2801 if ((tp
->tg3_flags
& TG3_FLAG_RX_CHECKSUMS
) &&
2802 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
2803 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
2804 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
2805 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2807 skb
->ip_summed
= CHECKSUM_NONE
;
2809 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
2810 #if TG3_VLAN_TAG_USED
2811 if (tp
->vlgrp
!= NULL
&&
2812 desc
->type_flags
& RXD_FLAG_VLAN
) {
2813 tg3_vlan_rx(tp
, skb
,
2814 desc
->err_vlan
& RXD_VLAN_MASK
);
2817 netif_receive_skb(skb
);
2819 tp
->dev
->last_rx
= jiffies
;
2827 sw_idx
%= TG3_RX_RCB_RING_SIZE(tp
);
2829 /* Refresh hw_idx to see if there is new work */
2830 if (sw_idx
== hw_idx
) {
2831 hw_idx
= tp
->hw_status
->idx
[0].rx_producer
;
2836 /* ACK the status ring. */
2837 tp
->rx_rcb_ptr
= sw_idx
;
2838 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
, sw_idx
);
2840 /* Refill RX ring(s). */
2841 if (work_mask
& RXD_OPAQUE_RING_STD
) {
2842 sw_idx
= tp
->rx_std_ptr
% TG3_RX_RING_SIZE
;
2843 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX
+ TG3_64BIT_REG_LOW
,
2846 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
2847 sw_idx
= tp
->rx_jumbo_ptr
% TG3_RX_JUMBO_RING_SIZE
;
2848 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX
+ TG3_64BIT_REG_LOW
,
2856 static int tg3_poll(struct net_device
*netdev
, int *budget
)
2858 struct tg3
*tp
= netdev_priv(netdev
);
2859 struct tg3_hw_status
*sblk
= tp
->hw_status
;
2860 unsigned long flags
;
2863 spin_lock_irqsave(&tp
->lock
, flags
);
2865 /* handle link change and other phy events */
2866 if (!(tp
->tg3_flags
&
2867 (TG3_FLAG_USE_LINKCHG_REG
|
2868 TG3_FLAG_POLL_SERDES
))) {
2869 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
2870 sblk
->status
= SD_STATUS_UPDATED
|
2871 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
2872 tg3_setup_phy(tp
, 0);
2876 /* run TX completion thread */
2877 if (sblk
->idx
[0].tx_consumer
!= tp
->tx_cons
) {
2878 spin_lock(&tp
->tx_lock
);
2880 spin_unlock(&tp
->tx_lock
);
2883 spin_unlock_irqrestore(&tp
->lock
, flags
);
2885 /* run RX thread, within the bounds set by NAPI.
2886 * All RX "locking" is done by ensuring outside
2887 * code synchronizes with dev->poll()
2890 if (sblk
->idx
[0].rx_producer
!= tp
->rx_rcb_ptr
) {
2891 int orig_budget
= *budget
;
2894 if (orig_budget
> netdev
->quota
)
2895 orig_budget
= netdev
->quota
;
2897 work_done
= tg3_rx(tp
, orig_budget
);
2899 *budget
-= work_done
;
2900 netdev
->quota
-= work_done
;
2902 if (work_done
>= orig_budget
)
2906 /* if no more work, tell net stack and NIC we're done */
2908 spin_lock_irqsave(&tp
->lock
, flags
);
2909 __netif_rx_complete(netdev
);
2910 tg3_restart_ints(tp
);
2911 spin_unlock_irqrestore(&tp
->lock
, flags
);
2914 return (done
? 0 : 1);
2917 /* MSI ISR - No need to check for interrupt sharing and no need to
2918 * flush status block and interrupt mailbox. PCI ordering rules
2919 * guarantee that MSI will arrive after the status block.
2921 static irqreturn_t
tg3_msi(int irq
, void *dev_id
, struct pt_regs
*regs
)
2923 struct net_device
*dev
= dev_id
;
2924 struct tg3
*tp
= netdev_priv(dev
);
2925 struct tg3_hw_status
*sblk
= tp
->hw_status
;
2926 unsigned long flags
;
2928 spin_lock_irqsave(&tp
->lock
, flags
);
2931 * writing any value to intr-mbox-0 clears PCI INTA# and
2932 * chip-internal interrupt pending events.
2933 * writing non-zero to intr-mbox-0 additional tells the
2934 * NIC to stop sending us irqs, engaging "in-intr-handler"
2937 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
2938 sblk
->status
&= ~SD_STATUS_UPDATED
;
2940 if (likely(tg3_has_work(tp
)))
2941 netif_rx_schedule(dev
); /* schedule NAPI poll */
2943 /* no work, re-enable interrupts
2945 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
2949 spin_unlock_irqrestore(&tp
->lock
, flags
);
2951 return IRQ_RETVAL(1);
2954 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
2956 struct net_device
*dev
= dev_id
;
2957 struct tg3
*tp
= netdev_priv(dev
);
2958 struct tg3_hw_status
*sblk
= tp
->hw_status
;
2959 unsigned long flags
;
2960 unsigned int handled
= 1;
2962 spin_lock_irqsave(&tp
->lock
, flags
);
2964 /* In INTx mode, it is possible for the interrupt to arrive at
2965 * the CPU before the status block posted prior to the interrupt.
2966 * Reading the PCI State register will confirm whether the
2967 * interrupt is ours and will flush the status block.
2969 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
2970 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
2972 * writing any value to intr-mbox-0 clears PCI INTA# and
2973 * chip-internal interrupt pending events.
2974 * writing non-zero to intr-mbox-0 additional tells the
2975 * NIC to stop sending us irqs, engaging "in-intr-handler"
2978 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
2981 * Flush PCI write. This also guarantees that our
2982 * status block has been flushed to host memory.
2984 tr32(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
);
2985 sblk
->status
&= ~SD_STATUS_UPDATED
;
2987 if (likely(tg3_has_work(tp
)))
2988 netif_rx_schedule(dev
); /* schedule NAPI poll */
2990 /* no work, shared interrupt perhaps? re-enable
2991 * interrupts, and flush that PCI write
2993 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
2995 tr32(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
);
2997 } else { /* shared interrupt */
3001 spin_unlock_irqrestore(&tp
->lock
, flags
);
3003 return IRQ_RETVAL(handled
);
3006 /* ISR for interrupt test */
3007 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
,
3008 struct pt_regs
*regs
)
3010 struct net_device
*dev
= dev_id
;
3011 struct tg3
*tp
= netdev_priv(dev
);
3012 struct tg3_hw_status
*sblk
= tp
->hw_status
;
3014 if (sblk
->status
& SD_STATUS_UPDATED
) {
3015 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
3017 return IRQ_RETVAL(1);
3019 return IRQ_RETVAL(0);
3022 static int tg3_init_hw(struct tg3
*);
3023 static int tg3_halt(struct tg3
*);
3025 #ifdef CONFIG_NET_POLL_CONTROLLER
3026 static void tg3_poll_controller(struct net_device
*dev
)
3028 struct tg3
*tp
= netdev_priv(dev
);
3030 tg3_interrupt(tp
->pdev
->irq
, dev
, NULL
);
3034 static void tg3_reset_task(void *_data
)
3036 struct tg3
*tp
= _data
;
3037 unsigned int restart_timer
;
3041 spin_lock_irq(&tp
->lock
);
3042 spin_lock(&tp
->tx_lock
);
3044 restart_timer
= tp
->tg3_flags2
& TG3_FLG2_RESTART_TIMER
;
3045 tp
->tg3_flags2
&= ~TG3_FLG2_RESTART_TIMER
;
3050 tg3_netif_start(tp
);
3052 spin_unlock(&tp
->tx_lock
);
3053 spin_unlock_irq(&tp
->lock
);
3056 mod_timer(&tp
->timer
, jiffies
+ 1);
3059 static void tg3_tx_timeout(struct net_device
*dev
)
3061 struct tg3
*tp
= netdev_priv(dev
);
3063 printk(KERN_ERR PFX
"%s: transmit timed out, resetting\n",
3066 schedule_work(&tp
->reset_task
);
3069 static void tg3_set_txd(struct tg3
*, int, dma_addr_t
, int, u32
, u32
);
3071 static int tigon3_4gb_hwbug_workaround(struct tg3
*tp
, struct sk_buff
*skb
,
3072 u32 guilty_entry
, int guilty_len
,
3073 u32 last_plus_one
, u32
*start
, u32 mss
)
3075 struct sk_buff
*new_skb
= skb_copy(skb
, GFP_ATOMIC
);
3076 dma_addr_t new_addr
;
3085 /* New SKB is guaranteed to be linear. */
3087 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
3089 tg3_set_txd(tp
, entry
, new_addr
, new_skb
->len
,
3090 (skb
->ip_summed
== CHECKSUM_HW
) ?
3091 TXD_FLAG_TCPUDP_CSUM
: 0, 1 | (mss
<< 1));
3092 *start
= NEXT_TX(entry
);
3094 /* Now clean up the sw ring entries. */
3096 while (entry
!= last_plus_one
) {
3100 len
= skb_headlen(skb
);
3102 len
= skb_shinfo(skb
)->frags
[i
-1].size
;
3103 pci_unmap_single(tp
->pdev
,
3104 pci_unmap_addr(&tp
->tx_buffers
[entry
], mapping
),
3105 len
, PCI_DMA_TODEVICE
);
3107 tp
->tx_buffers
[entry
].skb
= new_skb
;
3108 pci_unmap_addr_set(&tp
->tx_buffers
[entry
], mapping
, new_addr
);
3110 tp
->tx_buffers
[entry
].skb
= NULL
;
3112 entry
= NEXT_TX(entry
);
3121 static void tg3_set_txd(struct tg3
*tp
, int entry
,
3122 dma_addr_t mapping
, int len
, u32 flags
,
3125 struct tg3_tx_buffer_desc
*txd
= &tp
->tx_ring
[entry
];
3126 int is_end
= (mss_and_is_end
& 0x1);
3127 u32 mss
= (mss_and_is_end
>> 1);
3131 flags
|= TXD_FLAG_END
;
3132 if (flags
& TXD_FLAG_VLAN
) {
3133 vlan_tag
= flags
>> 16;
3136 vlan_tag
|= (mss
<< TXD_MSS_SHIFT
);
3138 txd
->addr_hi
= ((u64
) mapping
>> 32);
3139 txd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
3140 txd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | flags
;
3141 txd
->vlan_tag
= vlan_tag
<< TXD_VLAN_TAG_SHIFT
;
3144 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
3146 u32 base
= (u32
) mapping
& 0xffffffff;
3148 return ((base
> 0xffffdcc0) &&
3149 (base
+ len
+ 8 < base
));
3152 static int tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
3154 struct tg3
*tp
= netdev_priv(dev
);
3157 u32 len
, entry
, base_flags
, mss
;
3158 int would_hit_hwbug
;
3159 unsigned long flags
;
3161 len
= skb_headlen(skb
);
3163 /* No BH disabling for tx_lock here. We are running in BH disabled
3164 * context and TX reclaim runs via tp->poll inside of a software
3165 * interrupt. Rejoice!
3167 * Actually, things are not so simple. If we are to take a hw
3168 * IRQ here, we can deadlock, consider:
3177 * spin on tp->tx_lock
3179 * So we really do need to disable interrupts when taking
3182 local_irq_save(flags
);
3183 if (!spin_trylock(&tp
->tx_lock
)) {
3184 local_irq_restore(flags
);
3185 return NETDEV_TX_LOCKED
;
3188 /* This is a hard error, log it. */
3189 if (unlikely(TX_BUFFS_AVAIL(tp
) <= (skb_shinfo(skb
)->nr_frags
+ 1))) {
3190 netif_stop_queue(dev
);
3191 spin_unlock_irqrestore(&tp
->tx_lock
, flags
);
3192 printk(KERN_ERR PFX
"%s: BUG! Tx Ring full when queue awake!\n",
3194 return NETDEV_TX_BUSY
;
3197 entry
= tp
->tx_prod
;
3199 if (skb
->ip_summed
== CHECKSUM_HW
)
3200 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
3201 #if TG3_TSO_SUPPORT != 0
3203 if (skb
->len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
3204 (mss
= skb_shinfo(skb
)->tso_size
) != 0) {
3205 int tcp_opt_len
, ip_tcp_len
;
3207 if (skb_header_cloned(skb
) &&
3208 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
)) {
3213 tcp_opt_len
= ((skb
->h
.th
->doff
- 5) * 4);
3214 ip_tcp_len
= (skb
->nh
.iph
->ihl
* 4) + sizeof(struct tcphdr
);
3216 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
3217 TXD_FLAG_CPU_POST_DMA
);
3219 skb
->nh
.iph
->check
= 0;
3220 skb
->nh
.iph
->tot_len
= ntohs(mss
+ ip_tcp_len
+ tcp_opt_len
);
3221 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
) {
3222 skb
->h
.th
->check
= 0;
3223 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
3227 ~csum_tcpudp_magic(skb
->nh
.iph
->saddr
,
3232 if ((tp
->tg3_flags2
& TG3_FLG2_HW_TSO
) ||
3233 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
)) {
3234 if (tcp_opt_len
|| skb
->nh
.iph
->ihl
> 5) {
3237 tsflags
= ((skb
->nh
.iph
->ihl
- 5) +
3238 (tcp_opt_len
>> 2));
3239 mss
|= (tsflags
<< 11);
3242 if (tcp_opt_len
|| skb
->nh
.iph
->ihl
> 5) {
3245 tsflags
= ((skb
->nh
.iph
->ihl
- 5) +
3246 (tcp_opt_len
>> 2));
3247 base_flags
|= tsflags
<< 12;
3254 #if TG3_VLAN_TAG_USED
3255 if (tp
->vlgrp
!= NULL
&& vlan_tx_tag_present(skb
))
3256 base_flags
|= (TXD_FLAG_VLAN
|
3257 (vlan_tx_tag_get(skb
) << 16));
3260 /* Queue skb data, a.k.a. the main skb fragment. */
3261 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
3263 tp
->tx_buffers
[entry
].skb
= skb
;
3264 pci_unmap_addr_set(&tp
->tx_buffers
[entry
], mapping
, mapping
);
3266 would_hit_hwbug
= 0;
3268 if (tg3_4g_overflow_test(mapping
, len
))
3269 would_hit_hwbug
= entry
+ 1;
3271 tg3_set_txd(tp
, entry
, mapping
, len
, base_flags
,
3272 (skb_shinfo(skb
)->nr_frags
== 0) | (mss
<< 1));
3274 entry
= NEXT_TX(entry
);
3276 /* Now loop through additional data fragments, and queue them. */
3277 if (skb_shinfo(skb
)->nr_frags
> 0) {
3278 unsigned int i
, last
;
3280 last
= skb_shinfo(skb
)->nr_frags
- 1;
3281 for (i
= 0; i
<= last
; i
++) {
3282 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
3285 mapping
= pci_map_page(tp
->pdev
,
3288 len
, PCI_DMA_TODEVICE
);
3290 tp
->tx_buffers
[entry
].skb
= NULL
;
3291 pci_unmap_addr_set(&tp
->tx_buffers
[entry
], mapping
, mapping
);
3293 if (tg3_4g_overflow_test(mapping
, len
)) {
3294 /* Only one should match. */
3295 if (would_hit_hwbug
)
3297 would_hit_hwbug
= entry
+ 1;
3300 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
)
3301 tg3_set_txd(tp
, entry
, mapping
, len
,
3302 base_flags
, (i
== last
)|(mss
<< 1));
3304 tg3_set_txd(tp
, entry
, mapping
, len
,
3305 base_flags
, (i
== last
));
3307 entry
= NEXT_TX(entry
);
3311 if (would_hit_hwbug
) {
3312 u32 last_plus_one
= entry
;
3314 unsigned int len
= 0;
3316 would_hit_hwbug
-= 1;
3317 entry
= entry
- 1 - skb_shinfo(skb
)->nr_frags
;
3318 entry
&= (TG3_TX_RING_SIZE
- 1);
3321 while (entry
!= last_plus_one
) {
3323 len
= skb_headlen(skb
);
3325 len
= skb_shinfo(skb
)->frags
[i
-1].size
;
3327 if (entry
== would_hit_hwbug
)
3331 entry
= NEXT_TX(entry
);
3335 /* If the workaround fails due to memory/mapping
3336 * failure, silently drop this packet.
3338 if (tigon3_4gb_hwbug_workaround(tp
, skb
,
3347 /* Packets are ready, update Tx producer idx local and on card. */
3348 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
), entry
);
3350 tp
->tx_prod
= entry
;
3351 if (TX_BUFFS_AVAIL(tp
) <= (MAX_SKB_FRAGS
+ 1))
3352 netif_stop_queue(dev
);
3356 spin_unlock_irqrestore(&tp
->tx_lock
, flags
);
3358 dev
->trans_start
= jiffies
;
3360 return NETDEV_TX_OK
;
3363 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
3368 if (new_mtu
> ETH_DATA_LEN
)
3369 tp
->tg3_flags
|= TG3_FLAG_JUMBO_ENABLE
;
3371 tp
->tg3_flags
&= ~TG3_FLAG_JUMBO_ENABLE
;
3374 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
3376 struct tg3
*tp
= netdev_priv(dev
);
3378 if (new_mtu
< TG3_MIN_MTU
|| new_mtu
> TG3_MAX_MTU(tp
))
3381 if (!netif_running(dev
)) {
3382 /* We'll just catch it later when the
3385 tg3_set_mtu(dev
, tp
, new_mtu
);
3390 spin_lock_irq(&tp
->lock
);
3391 spin_lock(&tp
->tx_lock
);
3395 tg3_set_mtu(dev
, tp
, new_mtu
);
3399 tg3_netif_start(tp
);
3401 spin_unlock(&tp
->tx_lock
);
3402 spin_unlock_irq(&tp
->lock
);
3407 /* Free up pending packets in all rx/tx rings.
3409 * The chip has been shut down and the driver detached from
3410 * the networking, so no interrupts or new tx packets will
3411 * end up in the driver. tp->{tx,}lock is not held and we are not
3412 * in an interrupt context and thus may sleep.
3414 static void tg3_free_rings(struct tg3
*tp
)
3416 struct ring_info
*rxp
;
3419 for (i
= 0; i
< TG3_RX_RING_SIZE
; i
++) {
3420 rxp
= &tp
->rx_std_buffers
[i
];
3422 if (rxp
->skb
== NULL
)
3424 pci_unmap_single(tp
->pdev
,
3425 pci_unmap_addr(rxp
, mapping
),
3426 RX_PKT_BUF_SZ
- tp
->rx_offset
,
3427 PCI_DMA_FROMDEVICE
);
3428 dev_kfree_skb_any(rxp
->skb
);
3432 for (i
= 0; i
< TG3_RX_JUMBO_RING_SIZE
; i
++) {
3433 rxp
= &tp
->rx_jumbo_buffers
[i
];
3435 if (rxp
->skb
== NULL
)
3437 pci_unmap_single(tp
->pdev
,
3438 pci_unmap_addr(rxp
, mapping
),
3439 RX_JUMBO_PKT_BUF_SZ
- tp
->rx_offset
,
3440 PCI_DMA_FROMDEVICE
);
3441 dev_kfree_skb_any(rxp
->skb
);
3445 for (i
= 0; i
< TG3_TX_RING_SIZE
; ) {
3446 struct tx_ring_info
*txp
;
3447 struct sk_buff
*skb
;
3450 txp
= &tp
->tx_buffers
[i
];
3458 pci_unmap_single(tp
->pdev
,
3459 pci_unmap_addr(txp
, mapping
),
3466 for (j
= 0; j
< skb_shinfo(skb
)->nr_frags
; j
++) {
3467 txp
= &tp
->tx_buffers
[i
& (TG3_TX_RING_SIZE
- 1)];
3468 pci_unmap_page(tp
->pdev
,
3469 pci_unmap_addr(txp
, mapping
),
3470 skb_shinfo(skb
)->frags
[j
].size
,
3475 dev_kfree_skb_any(skb
);
3479 /* Initialize tx/rx rings for packet processing.
3481 * The chip has been shut down and the driver detached from
3482 * the networking, so no interrupts or new tx packets will
3483 * end up in the driver. tp->{tx,}lock are held and thus
3486 static void tg3_init_rings(struct tg3
*tp
)
3490 /* Free up all the SKBs. */
3493 /* Zero out all descriptors. */
3494 memset(tp
->rx_std
, 0, TG3_RX_RING_BYTES
);
3495 memset(tp
->rx_jumbo
, 0, TG3_RX_JUMBO_RING_BYTES
);
3496 memset(tp
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
3497 memset(tp
->tx_ring
, 0, TG3_TX_RING_BYTES
);
3499 /* Initialize invariants of the rings, we only set this
3500 * stuff once. This works because the card does not
3501 * write into the rx buffer posting rings.
3503 for (i
= 0; i
< TG3_RX_RING_SIZE
; i
++) {
3504 struct tg3_rx_buffer_desc
*rxd
;
3506 rxd
= &tp
->rx_std
[i
];
3507 rxd
->idx_len
= (RX_PKT_BUF_SZ
- tp
->rx_offset
- 64)
3509 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
3510 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
3511 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
3514 if (tp
->tg3_flags
& TG3_FLAG_JUMBO_ENABLE
) {
3515 for (i
= 0; i
< TG3_RX_JUMBO_RING_SIZE
; i
++) {
3516 struct tg3_rx_buffer_desc
*rxd
;
3518 rxd
= &tp
->rx_jumbo
[i
];
3519 rxd
->idx_len
= (RX_JUMBO_PKT_BUF_SZ
- tp
->rx_offset
- 64)
3521 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
3523 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
3524 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
3528 /* Now allocate fresh SKBs for each rx ring. */
3529 for (i
= 0; i
< tp
->rx_pending
; i
++) {
3530 if (tg3_alloc_rx_skb(tp
, RXD_OPAQUE_RING_STD
,
3535 if (tp
->tg3_flags
& TG3_FLAG_JUMBO_ENABLE
) {
3536 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
3537 if (tg3_alloc_rx_skb(tp
, RXD_OPAQUE_RING_JUMBO
,
3545 * Must not be invoked with interrupt sources disabled and
3546 * the hardware shutdown down.
3548 static void tg3_free_consistent(struct tg3
*tp
)
3550 if (tp
->rx_std_buffers
) {
3551 kfree(tp
->rx_std_buffers
);
3552 tp
->rx_std_buffers
= NULL
;
3555 pci_free_consistent(tp
->pdev
, TG3_RX_RING_BYTES
,
3556 tp
->rx_std
, tp
->rx_std_mapping
);
3560 pci_free_consistent(tp
->pdev
, TG3_RX_JUMBO_RING_BYTES
,
3561 tp
->rx_jumbo
, tp
->rx_jumbo_mapping
);
3562 tp
->rx_jumbo
= NULL
;
3565 pci_free_consistent(tp
->pdev
, TG3_RX_RCB_RING_BYTES(tp
),
3566 tp
->rx_rcb
, tp
->rx_rcb_mapping
);
3570 pci_free_consistent(tp
->pdev
, TG3_TX_RING_BYTES
,
3571 tp
->tx_ring
, tp
->tx_desc_mapping
);
3574 if (tp
->hw_status
) {
3575 pci_free_consistent(tp
->pdev
, TG3_HW_STATUS_SIZE
,
3576 tp
->hw_status
, tp
->status_mapping
);
3577 tp
->hw_status
= NULL
;
3580 pci_free_consistent(tp
->pdev
, sizeof(struct tg3_hw_stats
),
3581 tp
->hw_stats
, tp
->stats_mapping
);
3582 tp
->hw_stats
= NULL
;
3587 * Must not be invoked with interrupt sources disabled and
3588 * the hardware shutdown down. Can sleep.
3590 static int tg3_alloc_consistent(struct tg3
*tp
)
3592 tp
->rx_std_buffers
= kmalloc((sizeof(struct ring_info
) *
3594 TG3_RX_JUMBO_RING_SIZE
)) +
3595 (sizeof(struct tx_ring_info
) *
3598 if (!tp
->rx_std_buffers
)
3601 memset(tp
->rx_std_buffers
, 0,
3602 (sizeof(struct ring_info
) *
3604 TG3_RX_JUMBO_RING_SIZE
)) +
3605 (sizeof(struct tx_ring_info
) *
3608 tp
->rx_jumbo_buffers
= &tp
->rx_std_buffers
[TG3_RX_RING_SIZE
];
3609 tp
->tx_buffers
= (struct tx_ring_info
*)
3610 &tp
->rx_jumbo_buffers
[TG3_RX_JUMBO_RING_SIZE
];
3612 tp
->rx_std
= pci_alloc_consistent(tp
->pdev
, TG3_RX_RING_BYTES
,
3613 &tp
->rx_std_mapping
);
3617 tp
->rx_jumbo
= pci_alloc_consistent(tp
->pdev
, TG3_RX_JUMBO_RING_BYTES
,
3618 &tp
->rx_jumbo_mapping
);
3623 tp
->rx_rcb
= pci_alloc_consistent(tp
->pdev
, TG3_RX_RCB_RING_BYTES(tp
),
3624 &tp
->rx_rcb_mapping
);
3628 tp
->tx_ring
= pci_alloc_consistent(tp
->pdev
, TG3_TX_RING_BYTES
,
3629 &tp
->tx_desc_mapping
);
3633 tp
->hw_status
= pci_alloc_consistent(tp
->pdev
,
3635 &tp
->status_mapping
);
3639 tp
->hw_stats
= pci_alloc_consistent(tp
->pdev
,
3640 sizeof(struct tg3_hw_stats
),
3641 &tp
->stats_mapping
);
3645 memset(tp
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
3646 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
3651 tg3_free_consistent(tp
);
3655 #define MAX_WAIT_CNT 1000
3657 /* To stop a block, clear the enable bit and poll till it
3658 * clears. tp->lock is held.
3660 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
)
3665 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
3672 /* We can't enable/disable these bits of the
3673 * 5705/5750, just say success.
3686 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
3689 if ((val
& enable_bit
) == 0)
3693 if (i
== MAX_WAIT_CNT
) {
3694 printk(KERN_ERR PFX
"tg3_stop_block timed out, "
3695 "ofs=%lx enable_bit=%x\n",
3703 /* tp->lock is held. */
3704 static int tg3_abort_hw(struct tg3
*tp
)
3708 tg3_disable_ints(tp
);
3710 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
3711 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
3714 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
);
3715 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
3716 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
);
3717 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
);
3718 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
);
3719 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
);
3721 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
);
3722 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
);
3723 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
3724 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
);
3725 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
3726 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
);
3727 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
);
3731 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
3732 tw32_f(MAC_MODE
, tp
->mac_mode
);
3735 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
3736 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
3738 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
3740 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
3743 if (i
>= MAX_WAIT_CNT
) {
3744 printk(KERN_ERR PFX
"tg3_abort_hw timed out for %s, "
3745 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3746 tp
->dev
->name
, tr32(MAC_TX_MODE
));
3750 err
= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
);
3751 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
);
3752 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
);
3754 tw32(FTQ_RESET
, 0xffffffff);
3755 tw32(FTQ_RESET
, 0x00000000);
3757 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
);
3758 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
);
3763 memset(tp
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
3765 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
3771 /* tp->lock is held. */
3772 static int tg3_nvram_lock(struct tg3
*tp
)
3774 if (tp
->tg3_flags
& TG3_FLAG_NVRAM
) {
3777 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
3778 for (i
= 0; i
< 8000; i
++) {
3779 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
3789 /* tp->lock is held. */
3790 static void tg3_nvram_unlock(struct tg3
*tp
)
3792 if (tp
->tg3_flags
& TG3_FLAG_NVRAM
)
3793 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3796 /* tp->lock is held. */
3797 static void tg3_enable_nvram_access(struct tg3
*tp
)
3799 if ((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
3800 !(tp
->tg3_flags2
& TG3_FLG2_PROTECTED_NVRAM
)) {
3801 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3803 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
3807 /* tp->lock is held. */
3808 static void tg3_disable_nvram_access(struct tg3
*tp
)
3810 if ((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
3811 !(tp
->tg3_flags2
& TG3_FLG2_PROTECTED_NVRAM
)) {
3812 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3814 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
3818 /* tp->lock is held. */
3819 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
3821 if (!(tp
->tg3_flags2
& TG3_FLG2_SUN_570X
))
3822 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
3823 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
3825 if (tp
->tg3_flags2
& TG3_FLG2_ASF_NEW_HANDSHAKE
) {
3827 case RESET_KIND_INIT
:
3828 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
3832 case RESET_KIND_SHUTDOWN
:
3833 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
3837 case RESET_KIND_SUSPEND
:
3838 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
3848 /* tp->lock is held. */
3849 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
3851 if (tp
->tg3_flags2
& TG3_FLG2_ASF_NEW_HANDSHAKE
) {
3853 case RESET_KIND_INIT
:
3854 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
3855 DRV_STATE_START_DONE
);
3858 case RESET_KIND_SHUTDOWN
:
3859 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
3860 DRV_STATE_UNLOAD_DONE
);
3869 /* tp->lock is held. */
3870 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
3872 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) {
3874 case RESET_KIND_INIT
:
3875 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
3879 case RESET_KIND_SHUTDOWN
:
3880 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
3884 case RESET_KIND_SUSPEND
:
3885 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
3895 static void tg3_stop_fw(struct tg3
*);
3897 /* tp->lock is held. */
3898 static int tg3_chip_reset(struct tg3
*tp
)
3904 if (!(tp
->tg3_flags2
& TG3_FLG2_SUN_570X
))
3908 * We must avoid the readl() that normally takes place.
3909 * It locks machines, causes machine checks, and other
3910 * fun things. So, temporarily disable the 5701
3911 * hardware workaround, while we do the reset.
3913 flags_save
= tp
->tg3_flags
;
3914 tp
->tg3_flags
&= ~TG3_FLAG_5701_REG_WRITE_BUG
;
3917 val
= GRC_MISC_CFG_CORECLK_RESET
;
3919 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
3920 if (tr32(0x7e2c) == 0x60) {
3923 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
) {
3924 tw32(GRC_MISC_CFG
, (1 << 29));
3929 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)
3930 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
3931 tw32(GRC_MISC_CFG
, val
);
3933 /* restore 5701 hardware bug workaround flag */
3934 tp
->tg3_flags
= flags_save
;
3936 /* Unfortunately, we have to delay before the PCI read back.
3937 * Some 575X chips even will not respond to a PCI cfg access
3938 * when the reset command is given to the chip.
3940 * How do these hardware designers expect things to work
3941 * properly if the PCI write is posted for a long period
3942 * of time? It is always necessary to have some method by
3943 * which a register read back can occur to push the write
3944 * out which does the reset.
3946 * For most tg3 variants the trick below was working.
3951 /* Flush PCI posted writes. The normal MMIO registers
3952 * are inaccessible at this time so this is the only
3953 * way to make this reliably (actually, this is no longer
3954 * the case, see above). I tried to use indirect
3955 * register read/write but this upset some 5701 variants.
3957 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
3961 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
3962 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
) {
3966 /* Wait for link training to complete. */
3967 for (i
= 0; i
< 5000; i
++)
3970 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
3971 pci_write_config_dword(tp
->pdev
, 0xc4,
3972 cfg_val
| (1 << 15));
3974 /* Set PCIE max payload size and clear error status. */
3975 pci_write_config_dword(tp
->pdev
, 0xd8, 0xf5000);
3978 /* Re-enable indirect register accesses. */
3979 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
3980 tp
->misc_host_ctrl
);
3982 /* Set MAX PCI retry to zero. */
3983 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
3984 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
3985 (tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
))
3986 val
|= PCISTATE_RETRY_SAME_DMA
;
3987 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
3989 pci_restore_state(tp
->pdev
);
3991 /* Make sure PCI-X relaxed ordering bit is clear. */
3992 pci_read_config_dword(tp
->pdev
, TG3PCI_X_CAPS
, &val
);
3993 val
&= ~PCIX_CAPS_RELAXED_ORDERING
;
3994 pci_write_config_dword(tp
->pdev
, TG3PCI_X_CAPS
, val
);
3996 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
3998 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A3
) {
4000 tw32(0x5000, 0x400);
4003 tw32(GRC_MODE
, tp
->grc_mode
);
4005 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
) {
4006 u32 val
= tr32(0xc4);
4008 tw32(0xc4, val
| (1 << 15));
4011 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
4012 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
4013 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
4014 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
)
4015 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
4016 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
4019 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) {
4020 tp
->mac_mode
= MAC_MODE_PORT_MODE_TBI
;
4021 tw32_f(MAC_MODE
, tp
->mac_mode
);
4023 tw32_f(MAC_MODE
, 0);
4026 if (!(tp
->tg3_flags2
& TG3_FLG2_SUN_570X
)) {
4027 /* Wait for firmware initialization to complete. */
4028 for (i
= 0; i
< 100000; i
++) {
4029 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
4030 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
4035 printk(KERN_ERR PFX
"tg3_reset_hw timed out for %s, "
4036 "firmware will not restart magic=%08x\n",
4037 tp
->dev
->name
, val
);
4042 if ((tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) &&
4043 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
) {
4044 u32 val
= tr32(0x7c00);
4046 tw32(0x7c00, val
| (1 << 25));
4049 /* Reprobe ASF enable state. */
4050 tp
->tg3_flags
&= ~TG3_FLAG_ENABLE_ASF
;
4051 tp
->tg3_flags2
&= ~TG3_FLG2_ASF_NEW_HANDSHAKE
;
4052 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
4053 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
4056 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
4057 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
4058 tp
->tg3_flags
|= TG3_FLAG_ENABLE_ASF
;
4059 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
)
4060 tp
->tg3_flags2
|= TG3_FLG2_ASF_NEW_HANDSHAKE
;
4067 /* tp->lock is held. */
4068 static void tg3_stop_fw(struct tg3
*tp
)
4070 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) {
4074 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
4075 val
= tr32(GRC_RX_CPU_EVENT
);
4077 tw32(GRC_RX_CPU_EVENT
, val
);
4079 /* Wait for RX cpu to ACK the event. */
4080 for (i
= 0; i
< 100; i
++) {
4081 if (!(tr32(GRC_RX_CPU_EVENT
) & (1 << 14)))
4088 /* tp->lock is held. */
4089 static int tg3_halt(struct tg3
*tp
)
4095 tg3_write_sig_pre_reset(tp
, RESET_KIND_SHUTDOWN
);
4098 err
= tg3_chip_reset(tp
);
4100 tg3_write_sig_legacy(tp
, RESET_KIND_SHUTDOWN
);
4101 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
4109 #define TG3_FW_RELEASE_MAJOR 0x0
4110 #define TG3_FW_RELASE_MINOR 0x0
4111 #define TG3_FW_RELEASE_FIX 0x0
4112 #define TG3_FW_START_ADDR 0x08000000
4113 #define TG3_FW_TEXT_ADDR 0x08000000
4114 #define TG3_FW_TEXT_LEN 0x9c0
4115 #define TG3_FW_RODATA_ADDR 0x080009c0
4116 #define TG3_FW_RODATA_LEN 0x60
4117 #define TG3_FW_DATA_ADDR 0x08000a40
4118 #define TG3_FW_DATA_LEN 0x20
4119 #define TG3_FW_SBSS_ADDR 0x08000a60
4120 #define TG3_FW_SBSS_LEN 0xc
4121 #define TG3_FW_BSS_ADDR 0x08000a70
4122 #define TG3_FW_BSS_LEN 0x10
4124 static u32 tg3FwText
[(TG3_FW_TEXT_LEN
/ sizeof(u32
)) + 1] = {
4125 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4126 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4127 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4128 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4129 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4130 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4131 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4132 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4133 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4134 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4135 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4136 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4137 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4138 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4139 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4140 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4141 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4142 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4143 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4144 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4145 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4146 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4147 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4148 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4149 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4151 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4152 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4153 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4154 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4155 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4156 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4157 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4158 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4159 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4160 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4161 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4162 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4163 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4164 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4165 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4166 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4167 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4168 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4169 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4170 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4171 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4172 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4173 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4174 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4175 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4176 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4177 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4178 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4179 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4180 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4181 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4182 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4183 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4184 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4185 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4186 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4187 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4188 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4189 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4190 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4191 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4192 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4193 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4194 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4195 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4196 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4197 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4198 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4199 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4200 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4201 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4202 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4203 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4204 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4205 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4206 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4207 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4208 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4209 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4210 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4211 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4212 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4213 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4214 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4215 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4218 static u32 tg3FwRodata
[(TG3_FW_RODATA_LEN
/ sizeof(u32
)) + 1] = {
4219 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4220 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4221 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4222 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4226 #if 0 /* All zeros, don't eat up space with it. */
4227 u32 tg3FwData
[(TG3_FW_DATA_LEN
/ sizeof(u32
)) + 1] = {
4228 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4229 0x00000000, 0x00000000, 0x00000000, 0x00000000
4233 #define RX_CPU_SCRATCH_BASE 0x30000
4234 #define RX_CPU_SCRATCH_SIZE 0x04000
4235 #define TX_CPU_SCRATCH_BASE 0x34000
4236 #define TX_CPU_SCRATCH_SIZE 0x04000
4238 /* tp->lock is held. */
4239 static int tg3_halt_cpu(struct tg3
*tp
, u32 offset
)
4243 if (offset
== TX_CPU_BASE
&&
4244 (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
4247 if (offset
== RX_CPU_BASE
) {
4248 for (i
= 0; i
< 10000; i
++) {
4249 tw32(offset
+ CPU_STATE
, 0xffffffff);
4250 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
4251 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
4255 tw32(offset
+ CPU_STATE
, 0xffffffff);
4256 tw32_f(offset
+ CPU_MODE
, CPU_MODE_HALT
);
4259 for (i
= 0; i
< 10000; i
++) {
4260 tw32(offset
+ CPU_STATE
, 0xffffffff);
4261 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
4262 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
4268 printk(KERN_ERR PFX
"tg3_reset_cpu timed out for %s, "
4271 (offset
== RX_CPU_BASE
? "RX" : "TX"));
4278 unsigned int text_base
;
4279 unsigned int text_len
;
4281 unsigned int rodata_base
;
4282 unsigned int rodata_len
;
4284 unsigned int data_base
;
4285 unsigned int data_len
;
4289 /* tp->lock is held. */
4290 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
, u32 cpu_scratch_base
,
4291 int cpu_scratch_size
, struct fw_info
*info
)
4294 u32 orig_tg3_flags
= tp
->tg3_flags
;
4295 void (*write_op
)(struct tg3
*, u32
, u32
);
4297 if (cpu_base
== TX_CPU_BASE
&&
4298 (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
4299 printk(KERN_ERR PFX
"tg3_load_firmware_cpu: Trying to load "
4300 "TX cpu firmware on %s which is 5705.\n",
4305 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)
4306 write_op
= tg3_write_mem
;
4308 write_op
= tg3_write_indirect_reg32
;
4310 /* Force use of PCI config space for indirect register
4313 tp
->tg3_flags
|= TG3_FLAG_PCIX_TARGET_HWBUG
;
4315 err
= tg3_halt_cpu(tp
, cpu_base
);
4319 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
4320 write_op(tp
, cpu_scratch_base
+ i
, 0);
4321 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
4322 tw32(cpu_base
+ CPU_MODE
, tr32(cpu_base
+CPU_MODE
)|CPU_MODE_HALT
);
4323 for (i
= 0; i
< (info
->text_len
/ sizeof(u32
)); i
++)
4324 write_op(tp
, (cpu_scratch_base
+
4325 (info
->text_base
& 0xffff) +
4328 info
->text_data
[i
] : 0));
4329 for (i
= 0; i
< (info
->rodata_len
/ sizeof(u32
)); i
++)
4330 write_op(tp
, (cpu_scratch_base
+
4331 (info
->rodata_base
& 0xffff) +
4333 (info
->rodata_data
?
4334 info
->rodata_data
[i
] : 0));
4335 for (i
= 0; i
< (info
->data_len
/ sizeof(u32
)); i
++)
4336 write_op(tp
, (cpu_scratch_base
+
4337 (info
->data_base
& 0xffff) +
4340 info
->data_data
[i
] : 0));
4345 tp
->tg3_flags
= orig_tg3_flags
;
4349 /* tp->lock is held. */
4350 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
4352 struct fw_info info
;
4355 info
.text_base
= TG3_FW_TEXT_ADDR
;
4356 info
.text_len
= TG3_FW_TEXT_LEN
;
4357 info
.text_data
= &tg3FwText
[0];
4358 info
.rodata_base
= TG3_FW_RODATA_ADDR
;
4359 info
.rodata_len
= TG3_FW_RODATA_LEN
;
4360 info
.rodata_data
= &tg3FwRodata
[0];
4361 info
.data_base
= TG3_FW_DATA_ADDR
;
4362 info
.data_len
= TG3_FW_DATA_LEN
;
4363 info
.data_data
= NULL
;
4365 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
4366 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
4371 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
4372 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
4377 /* Now startup only the RX cpu. */
4378 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
4379 tw32_f(RX_CPU_BASE
+ CPU_PC
, TG3_FW_TEXT_ADDR
);
4381 for (i
= 0; i
< 5; i
++) {
4382 if (tr32(RX_CPU_BASE
+ CPU_PC
) == TG3_FW_TEXT_ADDR
)
4384 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
4385 tw32(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
4386 tw32_f(RX_CPU_BASE
+ CPU_PC
, TG3_FW_TEXT_ADDR
);
4390 printk(KERN_ERR PFX
"tg3_load_firmware fails for %s "
4391 "to set RX CPU PC, is %08x should be %08x\n",
4392 tp
->dev
->name
, tr32(RX_CPU_BASE
+ CPU_PC
),
4396 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
4397 tw32_f(RX_CPU_BASE
+ CPU_MODE
, 0x00000000);
4402 #if TG3_TSO_SUPPORT != 0
4404 #define TG3_TSO_FW_RELEASE_MAJOR 0x1
4405 #define TG3_TSO_FW_RELASE_MINOR 0x6
4406 #define TG3_TSO_FW_RELEASE_FIX 0x0
4407 #define TG3_TSO_FW_START_ADDR 0x08000000
4408 #define TG3_TSO_FW_TEXT_ADDR 0x08000000
4409 #define TG3_TSO_FW_TEXT_LEN 0x1aa0
4410 #define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
4411 #define TG3_TSO_FW_RODATA_LEN 0x60
4412 #define TG3_TSO_FW_DATA_ADDR 0x08001b20
4413 #define TG3_TSO_FW_DATA_LEN 0x30
4414 #define TG3_TSO_FW_SBSS_ADDR 0x08001b50
4415 #define TG3_TSO_FW_SBSS_LEN 0x2c
4416 #define TG3_TSO_FW_BSS_ADDR 0x08001b80
4417 #define TG3_TSO_FW_BSS_LEN 0x894
4419 static u32 tg3TsoFwText
[(TG3_TSO_FW_TEXT_LEN
/ 4) + 1] = {
4420 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4421 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4422 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4423 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4424 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4425 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4426 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4427 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4428 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4429 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4430 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4431 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4432 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4433 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4434 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4435 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4436 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4437 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4438 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4439 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4440 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4441 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4442 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4443 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4444 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4445 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4446 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4447 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4448 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4449 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4450 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4451 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4452 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4453 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4454 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4455 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4456 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4457 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4458 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4459 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4460 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4461 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4462 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4463 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4464 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4465 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4466 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4467 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4468 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4469 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4470 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4471 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4472 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4473 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4474 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4475 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4476 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4477 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4478 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4479 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4480 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4481 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4482 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4483 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4484 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4485 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4486 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4487 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4488 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4489 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4490 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4491 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4492 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4493 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4494 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4495 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4496 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4497 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4498 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4499 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4500 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4501 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4502 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4503 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4504 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4505 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4506 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4507 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4508 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4509 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4510 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4511 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4512 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4513 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4514 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4515 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4516 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4517 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4518 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4519 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4520 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4521 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4522 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4523 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4524 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4525 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4526 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4527 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4528 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4529 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4530 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4531 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4532 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4533 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4534 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4535 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4536 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4537 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4538 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4539 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4540 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4541 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4542 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4543 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4544 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4545 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4546 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4547 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4548 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4549 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4550 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4551 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4552 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4553 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4554 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4555 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4556 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4557 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4558 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4559 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4560 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4561 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4562 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4563 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4564 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4565 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4566 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4567 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4568 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4569 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4570 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4571 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4572 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4573 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4574 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4575 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4576 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4577 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4578 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4579 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4580 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4581 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4582 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4583 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4584 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4585 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4586 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4587 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4588 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4589 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4590 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4591 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4592 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4593 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4594 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4595 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4596 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4597 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4598 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4599 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4600 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4601 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4602 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4603 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4604 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4605 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4606 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4607 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4608 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4609 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4610 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4611 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4612 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4613 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4614 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4615 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4616 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4617 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4618 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4619 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4620 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4621 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4622 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4623 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4624 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4625 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4626 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4627 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4628 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4629 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4630 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4631 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4632 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4633 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4634 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4635 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4636 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4637 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4638 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4639 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4640 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4641 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4642 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4643 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4644 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4645 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4646 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4647 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4648 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4649 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4650 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4651 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4652 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4653 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4654 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
4655 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
4656 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
4657 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4658 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
4659 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
4660 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
4661 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
4662 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
4663 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
4664 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
4665 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
4666 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
4667 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
4668 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
4669 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4670 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4671 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
4672 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
4673 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
4674 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
4675 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
4676 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
4677 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
4678 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
4679 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
4680 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
4681 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
4682 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
4683 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
4684 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4685 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
4686 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
4687 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
4688 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
4689 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
4690 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
4691 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
4692 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
4693 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
4694 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
4695 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
4696 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
4697 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
4698 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
4699 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
4700 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
4701 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
4702 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
4703 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4706 static u32 tg3TsoFwRodata
[] = {
4707 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4708 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4709 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4710 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4714 static u32 tg3TsoFwData
[] = {
4715 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
4716 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4720 /* 5705 needs a special version of the TSO firmware. */
4721 #define TG3_TSO5_FW_RELEASE_MAJOR 0x1
4722 #define TG3_TSO5_FW_RELASE_MINOR 0x2
4723 #define TG3_TSO5_FW_RELEASE_FIX 0x0
4724 #define TG3_TSO5_FW_START_ADDR 0x00010000
4725 #define TG3_TSO5_FW_TEXT_ADDR 0x00010000
4726 #define TG3_TSO5_FW_TEXT_LEN 0xe90
4727 #define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
4728 #define TG3_TSO5_FW_RODATA_LEN 0x50
4729 #define TG3_TSO5_FW_DATA_ADDR 0x00010f00
4730 #define TG3_TSO5_FW_DATA_LEN 0x20
4731 #define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
4732 #define TG3_TSO5_FW_SBSS_LEN 0x28
4733 #define TG3_TSO5_FW_BSS_ADDR 0x00010f50
4734 #define TG3_TSO5_FW_BSS_LEN 0x88
4736 static u32 tg3Tso5FwText
[(TG3_TSO5_FW_TEXT_LEN
/ 4) + 1] = {
4737 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
4738 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4739 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4740 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
4741 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
4742 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
4743 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4744 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4745 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4746 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4747 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
4748 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4749 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4750 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
4751 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
4752 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
4753 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
4754 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4755 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4756 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4757 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
4758 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
4759 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4760 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
4761 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4762 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4763 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
4764 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4765 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4766 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4767 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4768 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4769 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
4770 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4771 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4772 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
4773 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4774 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4775 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4776 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4777 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4778 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4779 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
4780 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4781 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
4782 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
4783 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
4784 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
4785 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
4786 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
4787 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
4788 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
4789 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4790 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4791 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
4792 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4793 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4794 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
4795 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4796 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4797 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
4798 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4799 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
4800 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4801 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4802 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
4803 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4804 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4805 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
4806 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
4807 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
4808 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
4809 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
4810 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4811 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
4812 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4813 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
4814 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
4815 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
4816 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
4817 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4818 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
4819 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
4820 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4821 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
4822 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
4823 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4824 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
4825 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4826 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4827 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4828 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
4829 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
4830 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
4831 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4832 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4833 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4834 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4835 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4836 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
4837 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
4838 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
4839 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
4840 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4841 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
4842 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
4843 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4844 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4845 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4846 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
4847 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4848 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4849 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
4850 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
4851 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
4852 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
4853 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
4854 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4855 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4856 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4857 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
4858 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4859 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
4860 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4861 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
4862 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
4863 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
4864 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
4865 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
4866 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
4867 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
4868 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
4869 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
4870 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
4871 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
4872 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
4873 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
4874 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
4875 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
4876 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
4877 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
4878 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
4879 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
4880 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
4881 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
4882 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
4883 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
4884 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4885 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
4886 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
4887 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
4888 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4889 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
4890 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4891 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4892 0x00000000, 0x00000000, 0x00000000,
4895 static u32 tg3Tso5FwRodata
[(TG3_TSO5_FW_RODATA_LEN
/ 4) + 1] = {
4896 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4897 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
4898 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4899 0x00000000, 0x00000000, 0x00000000,
4902 static u32 tg3Tso5FwData
[(TG3_TSO5_FW_DATA_LEN
/ 4) + 1] = {
4903 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
4904 0x00000000, 0x00000000, 0x00000000,
4907 /* tp->lock is held. */
4908 static int tg3_load_tso_firmware(struct tg3
*tp
)
4910 struct fw_info info
;
4911 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
4914 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
)
4917 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
4918 info
.text_base
= TG3_TSO5_FW_TEXT_ADDR
;
4919 info
.text_len
= TG3_TSO5_FW_TEXT_LEN
;
4920 info
.text_data
= &tg3Tso5FwText
[0];
4921 info
.rodata_base
= TG3_TSO5_FW_RODATA_ADDR
;
4922 info
.rodata_len
= TG3_TSO5_FW_RODATA_LEN
;
4923 info
.rodata_data
= &tg3Tso5FwRodata
[0];
4924 info
.data_base
= TG3_TSO5_FW_DATA_ADDR
;
4925 info
.data_len
= TG3_TSO5_FW_DATA_LEN
;
4926 info
.data_data
= &tg3Tso5FwData
[0];
4927 cpu_base
= RX_CPU_BASE
;
4928 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
4929 cpu_scratch_size
= (info
.text_len
+
4932 TG3_TSO5_FW_SBSS_LEN
+
4933 TG3_TSO5_FW_BSS_LEN
);
4935 info
.text_base
= TG3_TSO_FW_TEXT_ADDR
;
4936 info
.text_len
= TG3_TSO_FW_TEXT_LEN
;
4937 info
.text_data
= &tg3TsoFwText
[0];
4938 info
.rodata_base
= TG3_TSO_FW_RODATA_ADDR
;
4939 info
.rodata_len
= TG3_TSO_FW_RODATA_LEN
;
4940 info
.rodata_data
= &tg3TsoFwRodata
[0];
4941 info
.data_base
= TG3_TSO_FW_DATA_ADDR
;
4942 info
.data_len
= TG3_TSO_FW_DATA_LEN
;
4943 info
.data_data
= &tg3TsoFwData
[0];
4944 cpu_base
= TX_CPU_BASE
;
4945 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
4946 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
4949 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
4950 cpu_scratch_base
, cpu_scratch_size
,
4955 /* Now startup the cpu. */
4956 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
4957 tw32_f(cpu_base
+ CPU_PC
, info
.text_base
);
4959 for (i
= 0; i
< 5; i
++) {
4960 if (tr32(cpu_base
+ CPU_PC
) == info
.text_base
)
4962 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
4963 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
4964 tw32_f(cpu_base
+ CPU_PC
, info
.text_base
);
4968 printk(KERN_ERR PFX
"tg3_load_tso_firmware fails for %s "
4969 "to set CPU PC, is %08x should be %08x\n",
4970 tp
->dev
->name
, tr32(cpu_base
+ CPU_PC
),
4974 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
4975 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
4979 #endif /* TG3_TSO_SUPPORT != 0 */
4981 /* tp->lock is held. */
4982 static void __tg3_set_mac_addr(struct tg3
*tp
)
4984 u32 addr_high
, addr_low
;
4987 addr_high
= ((tp
->dev
->dev_addr
[0] << 8) |
4988 tp
->dev
->dev_addr
[1]);
4989 addr_low
= ((tp
->dev
->dev_addr
[2] << 24) |
4990 (tp
->dev
->dev_addr
[3] << 16) |
4991 (tp
->dev
->dev_addr
[4] << 8) |
4992 (tp
->dev
->dev_addr
[5] << 0));
4993 for (i
= 0; i
< 4; i
++) {
4994 tw32(MAC_ADDR_0_HIGH
+ (i
* 8), addr_high
);
4995 tw32(MAC_ADDR_0_LOW
+ (i
* 8), addr_low
);
4998 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
4999 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
5000 for (i
= 0; i
< 12; i
++) {
5001 tw32(MAC_EXTADDR_0_HIGH
+ (i
* 8), addr_high
);
5002 tw32(MAC_EXTADDR_0_LOW
+ (i
* 8), addr_low
);
5006 addr_high
= (tp
->dev
->dev_addr
[0] +
5007 tp
->dev
->dev_addr
[1] +
5008 tp
->dev
->dev_addr
[2] +
5009 tp
->dev
->dev_addr
[3] +
5010 tp
->dev
->dev_addr
[4] +
5011 tp
->dev
->dev_addr
[5]) &
5012 TX_BACKOFF_SEED_MASK
;
5013 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
5016 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
5018 struct tg3
*tp
= netdev_priv(dev
);
5019 struct sockaddr
*addr
= p
;
5021 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
5023 spin_lock_irq(&tp
->lock
);
5024 __tg3_set_mac_addr(tp
);
5025 spin_unlock_irq(&tp
->lock
);
5030 /* tp->lock is held. */
5031 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
5032 dma_addr_t mapping
, u32 maxlen_flags
,
5036 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
5037 ((u64
) mapping
>> 32));
5039 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
5040 ((u64
) mapping
& 0xffffffff));
5042 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
5045 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
5047 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
5051 static void __tg3_set_rx_mode(struct net_device
*);
5053 /* tp->lock is held. */
5054 static int tg3_reset_hw(struct tg3
*tp
)
5056 u32 val
, rdmac_mode
;
5059 tg3_disable_ints(tp
);
5063 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
5065 if (tp
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
) {
5066 err
= tg3_abort_hw(tp
);
5071 err
= tg3_chip_reset(tp
);
5075 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
5077 /* This works around an issue with Athlon chipsets on
5078 * B3 tigon3 silicon. This bit has no effect on any
5079 * other revision. But do not set this on PCI Express
5082 if (!(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
))
5083 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
5084 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
5086 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
5087 (tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
)) {
5088 val
= tr32(TG3PCI_PCISTATE
);
5089 val
|= PCISTATE_RETRY_SAME_DMA
;
5090 tw32(TG3PCI_PCISTATE
, val
);
5093 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_BX
) {
5094 /* Enable some hw fixes. */
5095 val
= tr32(TG3PCI_MSI_DATA
);
5096 val
|= (1 << 26) | (1 << 28) | (1 << 29);
5097 tw32(TG3PCI_MSI_DATA
, val
);
5100 /* Descriptor ring init may make accesses to the
5101 * NIC SRAM area to setup the TX descriptors, so we
5102 * can only do this after the hardware has been
5103 * successfully reset.
5107 /* This value is determined during the probe time DMA
5108 * engine test, tg3_test_dma.
5110 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
5112 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
5113 GRC_MODE_4X_NIC_SEND_RINGS
|
5114 GRC_MODE_NO_TX_PHDR_CSUM
|
5115 GRC_MODE_NO_RX_PHDR_CSUM
);
5116 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
5117 if (tp
->tg3_flags
& TG3_FLAG_NO_TX_PSEUDO_CSUM
)
5118 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
5119 if (tp
->tg3_flags
& TG3_FLAG_NO_RX_PSEUDO_CSUM
)
5120 tp
->grc_mode
|= GRC_MODE_NO_RX_PHDR_CSUM
;
5124 (GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
));
5126 /* Setup the timer prescalar register. Clock is always 66Mhz. */
5127 val
= tr32(GRC_MISC_CFG
);
5129 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
5130 tw32(GRC_MISC_CFG
, val
);
5132 /* Initialize MBUF/DESC pool. */
5133 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) {
5135 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5705
) {
5136 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
5137 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
5138 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
5140 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
5141 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
5142 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
5144 #if TG3_TSO_SUPPORT != 0
5145 else if (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
) {
5148 fw_len
= (TG3_TSO5_FW_TEXT_LEN
+
5149 TG3_TSO5_FW_RODATA_LEN
+
5150 TG3_TSO5_FW_DATA_LEN
+
5151 TG3_TSO5_FW_SBSS_LEN
+
5152 TG3_TSO5_FW_BSS_LEN
);
5153 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
5154 tw32(BUFMGR_MB_POOL_ADDR
,
5155 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
5156 tw32(BUFMGR_MB_POOL_SIZE
,
5157 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
5161 if (!(tp
->tg3_flags
& TG3_FLAG_JUMBO_ENABLE
)) {
5162 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
5163 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
5164 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
5165 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
5166 tw32(BUFMGR_MB_HIGH_WATER
,
5167 tp
->bufmgr_config
.mbuf_high_water
);
5169 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
5170 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
5171 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
5172 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
5173 tw32(BUFMGR_MB_HIGH_WATER
,
5174 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
5176 tw32(BUFMGR_DMA_LOW_WATER
,
5177 tp
->bufmgr_config
.dma_low_water
);
5178 tw32(BUFMGR_DMA_HIGH_WATER
,
5179 tp
->bufmgr_config
.dma_high_water
);
5181 tw32(BUFMGR_MODE
, BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
);
5182 for (i
= 0; i
< 2000; i
++) {
5183 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
5188 printk(KERN_ERR PFX
"tg3_reset_hw cannot enable BUFMGR for %s.\n",
5193 /* Setup replenish threshold. */
5194 tw32(RCVBDI_STD_THRESH
, tp
->rx_pending
/ 8);
5196 /* Initialize TG3_BDINFO's at:
5197 * RCVDBDI_STD_BD: standard eth size rx ring
5198 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
5199 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
5202 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
5203 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
5204 * ring attribute flags
5205 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
5207 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5208 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5210 * The size of each ring is fixed in the firmware, but the location is
5213 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
5214 ((u64
) tp
->rx_std_mapping
>> 32));
5215 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
5216 ((u64
) tp
->rx_std_mapping
& 0xffffffff));
5217 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
5218 NIC_SRAM_RX_BUFFER_DESC
);
5220 /* Don't even try to program the JUMBO/MINI buffer descriptor
5223 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
5224 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
5225 RX_STD_MAX_SIZE_5705
<< BDINFO_FLAGS_MAXLEN_SHIFT
);
5227 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
5228 RX_STD_MAX_SIZE
<< BDINFO_FLAGS_MAXLEN_SHIFT
);
5230 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
5231 BDINFO_FLAGS_DISABLED
);
5233 /* Setup replenish threshold. */
5234 tw32(RCVBDI_JUMBO_THRESH
, tp
->rx_jumbo_pending
/ 8);
5236 if (tp
->tg3_flags
& TG3_FLAG_JUMBO_ENABLE
) {
5237 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
5238 ((u64
) tp
->rx_jumbo_mapping
>> 32));
5239 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
5240 ((u64
) tp
->rx_jumbo_mapping
& 0xffffffff));
5241 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
5242 RX_JUMBO_MAX_SIZE
<< BDINFO_FLAGS_MAXLEN_SHIFT
);
5243 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
5244 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
5246 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
5247 BDINFO_FLAGS_DISABLED
);
5252 /* There is only one send ring on 5705/5750, no need to explicitly
5253 * disable the others.
5255 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
5256 /* Clear out send RCB ring in SRAM. */
5257 for (i
= NIC_SRAM_SEND_RCB
; i
< NIC_SRAM_RCV_RET_RCB
; i
+= TG3_BDINFO_SIZE
)
5258 tg3_write_mem(tp
, i
+ TG3_BDINFO_MAXLEN_FLAGS
,
5259 BDINFO_FLAGS_DISABLED
);
5264 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
, 0);
5265 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
, 0);
5267 tg3_set_bdinfo(tp
, NIC_SRAM_SEND_RCB
,
5268 tp
->tx_desc_mapping
,
5269 (TG3_TX_RING_SIZE
<<
5270 BDINFO_FLAGS_MAXLEN_SHIFT
),
5271 NIC_SRAM_TX_BUFFER_DESC
);
5273 /* There is only one receive return ring on 5705/5750, no need
5274 * to explicitly disable the others.
5276 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
5277 for (i
= NIC_SRAM_RCV_RET_RCB
; i
< NIC_SRAM_STATS_BLK
;
5278 i
+= TG3_BDINFO_SIZE
) {
5279 tg3_write_mem(tp
, i
+ TG3_BDINFO_MAXLEN_FLAGS
,
5280 BDINFO_FLAGS_DISABLED
);
5285 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
, 0);
5287 tg3_set_bdinfo(tp
, NIC_SRAM_RCV_RET_RCB
,
5289 (TG3_RX_RCB_RING_SIZE(tp
) <<
5290 BDINFO_FLAGS_MAXLEN_SHIFT
),
5293 tp
->rx_std_ptr
= tp
->rx_pending
;
5294 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX
+ TG3_64BIT_REG_LOW
,
5297 tp
->rx_jumbo_ptr
= (tp
->tg3_flags
& TG3_FLAG_JUMBO_ENABLE
) ?
5298 tp
->rx_jumbo_pending
: 0;
5299 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX
+ TG3_64BIT_REG_LOW
,
5302 /* Initialize MAC address and backoff seed. */
5303 __tg3_set_mac_addr(tp
);
5305 /* MTU + ethernet header + FCS + optional VLAN tag */
5306 tw32(MAC_RX_MTU_SIZE
, tp
->dev
->mtu
+ ETH_HLEN
+ 8);
5308 /* The slot time is changed by tg3_setup_phy if we
5309 * run at gigabit with half duplex.
5311 tw32(MAC_TX_LENGTHS
,
5312 (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
5313 (6 << TX_LENGTHS_IPG_SHIFT
) |
5314 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
5316 /* Receive rules. */
5317 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
5318 tw32(RCVLPC_CONFIG
, 0x0181);
5320 /* Calculate RDMAC_MODE setting early, we need it to determine
5321 * the RCVLPC_STATE_ENABLE mask.
5323 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
5324 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
5325 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
5326 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
5327 RDMAC_MODE_LNGREAD_ENAB
);
5328 if (tp
->tg3_flags
& TG3_FLAG_SPLIT_MODE
)
5329 rdmac_mode
|= RDMAC_MODE_SPLIT_ENABLE
;
5331 /* If statement applies to 5705 and 5750 PCI devices only */
5332 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
5333 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) ||
5334 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
)) {
5335 if (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
&&
5336 (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
||
5337 tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A2
)) {
5338 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
5339 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
5340 !(tp
->tg3_flags2
& TG3_FLG2_IS_5788
)) {
5341 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
5345 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
)
5346 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
5348 #if TG3_TSO_SUPPORT != 0
5349 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
)
5350 rdmac_mode
|= (1 << 27);
5353 /* Receive/send statistics. */
5354 if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
5355 (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
)) {
5356 val
= tr32(RCVLPC_STATS_ENABLE
);
5357 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
5358 tw32(RCVLPC_STATS_ENABLE
, val
);
5360 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
5362 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
5363 tw32(SNDDATAI_STATSENAB
, 0xffffff);
5364 tw32(SNDDATAI_STATSCTRL
,
5365 (SNDDATAI_SCTRL_ENABLE
|
5366 SNDDATAI_SCTRL_FASTUPD
));
5368 /* Setup host coalescing engine. */
5369 tw32(HOSTCC_MODE
, 0);
5370 for (i
= 0; i
< 2000; i
++) {
5371 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
5376 tw32(HOSTCC_RXCOL_TICKS
, 0);
5377 tw32(HOSTCC_TXCOL_TICKS
, LOW_TXCOL_TICKS
);
5378 tw32(HOSTCC_RXMAX_FRAMES
, 1);
5379 tw32(HOSTCC_TXMAX_FRAMES
, LOW_RXMAX_FRAMES
);
5380 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
5381 tw32(HOSTCC_RXCOAL_TICK_INT
, 0);
5382 tw32(HOSTCC_TXCOAL_TICK_INT
, 0);
5384 tw32(HOSTCC_RXCOAL_MAXF_INT
, 1);
5385 tw32(HOSTCC_TXCOAL_MAXF_INT
, 0);
5387 /* set status block DMA address */
5388 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
5389 ((u64
) tp
->status_mapping
>> 32));
5390 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
5391 ((u64
) tp
->status_mapping
& 0xffffffff));
5393 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
5394 /* Status/statistics block address. See tg3_timer,
5395 * the tg3_periodic_fetch_stats call there, and
5396 * tg3_get_stats to see how this works for 5705/5750 chips.
5398 tw32(HOSTCC_STAT_COAL_TICKS
,
5399 DEFAULT_STAT_COAL_TICKS
);
5400 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
5401 ((u64
) tp
->stats_mapping
>> 32));
5402 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
5403 ((u64
) tp
->stats_mapping
& 0xffffffff));
5404 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
5405 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
5408 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
5410 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
5411 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
5412 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
5413 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
5415 /* Clear statistics/status block in chip, and status block in ram. */
5416 for (i
= NIC_SRAM_STATS_BLK
;
5417 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
5419 tg3_write_mem(tp
, i
, 0);
5422 memset(tp
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
5424 tp
->mac_mode
= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
5425 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
| MAC_MODE_FHDE_ENABLE
;
5426 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
5429 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5430 * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5431 * register to preserve the GPIO settings for LOMs. The GPIOs,
5432 * whether used as inputs or outputs, are set by boot code after
5435 if (tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
) {
5438 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE2
|
5439 GRC_LCLCTRL_GPIO_OUTPUT0
| GRC_LCLCTRL_GPIO_OUTPUT2
;
5441 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
5442 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
5443 GRC_LCLCTRL_GPIO_OUTPUT3
;
5445 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
5447 /* GPIO1 must be driven high for eeprom write protect */
5448 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
5449 GRC_LCLCTRL_GPIO_OUTPUT1
);
5451 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
5454 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0);
5455 tr32(MAILBOX_INTERRUPT_0
);
5457 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
5458 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
5462 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
5463 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
5464 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
5465 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
5466 WDMAC_MODE_LNGREAD_ENAB
);
5468 /* If statement applies to 5705 and 5750 PCI devices only */
5469 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
5470 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) ||
5471 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
) {
5472 if ((tp
->tg3_flags
& TG3_FLG2_TSO_CAPABLE
) &&
5473 (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
||
5474 tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A2
)) {
5476 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
5477 !(tp
->tg3_flags2
& TG3_FLG2_IS_5788
) &&
5478 !(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
)) {
5479 val
|= WDMAC_MODE_RX_ACCEL
;
5483 tw32_f(WDMAC_MODE
, val
);
5486 if ((tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) != 0) {
5487 val
= tr32(TG3PCI_X_CAPS
);
5488 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
) {
5489 val
&= ~PCIX_CAPS_BURST_MASK
;
5490 val
|= (PCIX_CAPS_MAX_BURST_CPIOB
<< PCIX_CAPS_BURST_SHIFT
);
5491 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
5492 val
&= ~(PCIX_CAPS_SPLIT_MASK
| PCIX_CAPS_BURST_MASK
);
5493 val
|= (PCIX_CAPS_MAX_BURST_CPIOB
<< PCIX_CAPS_BURST_SHIFT
);
5494 if (tp
->tg3_flags
& TG3_FLAG_SPLIT_MODE
)
5495 val
|= (tp
->split_mode_max_reqs
<<
5496 PCIX_CAPS_SPLIT_SHIFT
);
5498 tw32(TG3PCI_X_CAPS
, val
);
5501 tw32_f(RDMAC_MODE
, rdmac_mode
);
5504 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
5505 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
5506 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
5507 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
5508 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
5509 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
5510 tw32(RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
);
5511 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
5512 #if TG3_TSO_SUPPORT != 0
5513 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
)
5514 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
5516 tw32(SNDBDI_MODE
, SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
);
5517 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
5519 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
) {
5520 err
= tg3_load_5701_a0_firmware_fix(tp
);
5525 #if TG3_TSO_SUPPORT != 0
5526 if (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
) {
5527 err
= tg3_load_tso_firmware(tp
);
5533 tp
->tx_mode
= TX_MODE_ENABLE
;
5534 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
5537 tp
->rx_mode
= RX_MODE_ENABLE
;
5538 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
5541 if (tp
->link_config
.phy_is_low_power
) {
5542 tp
->link_config
.phy_is_low_power
= 0;
5543 tp
->link_config
.speed
= tp
->link_config
.orig_speed
;
5544 tp
->link_config
.duplex
= tp
->link_config
.orig_duplex
;
5545 tp
->link_config
.autoneg
= tp
->link_config
.orig_autoneg
;
5548 tp
->mi_mode
= MAC_MI_MODE_BASE
;
5549 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
5552 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
5554 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
5555 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) {
5556 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
5559 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
5562 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) {
5563 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) &&
5564 !(tp
->tg3_flags2
& TG3_FLG2_SERDES_PREEMPHASIS
)) {
5565 /* Set drive transmission level to 1.2V */
5566 /* only if the signal pre-emphasis bit is not set */
5567 val
= tr32(MAC_SERDES_CFG
);
5570 tw32(MAC_SERDES_CFG
, val
);
5572 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
)
5573 tw32(MAC_SERDES_CFG
, 0x616000);
5576 /* Prevent chip from dropping frames when flow control
5579 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, 2);
5581 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
&&
5582 (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)) {
5583 /* Use hardware link auto-negotiation */
5584 tp
->tg3_flags2
|= TG3_FLG2_HW_AUTONEG
;
5587 err
= tg3_setup_phy(tp
, 1);
5591 if (!(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)) {
5594 /* Clear CRC stats. */
5595 if (!tg3_readphy(tp
, 0x1e, &tmp
)) {
5596 tg3_writephy(tp
, 0x1e, tmp
| 0x8000);
5597 tg3_readphy(tp
, 0x14, &tmp
);
5601 __tg3_set_rx_mode(tp
->dev
);
5603 /* Initialize receive rules. */
5604 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
5605 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
5606 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
5607 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
5609 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)
5613 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
)
5617 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
5619 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
5621 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
5623 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
5625 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
5627 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
5629 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
5631 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
5633 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
5635 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
5637 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
5639 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
5641 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
5643 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
5651 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
5653 if (tp
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
)
5654 tg3_enable_ints(tp
);
5659 /* Called at device open time to get the chip ready for
5660 * packet processing. Invoked with tp->lock held.
5662 static int tg3_init_hw(struct tg3
*tp
)
5666 /* Force the chip into D0. */
5667 err
= tg3_set_power_state(tp
, 0);
5671 tg3_switch_clocks(tp
);
5673 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
5675 err
= tg3_reset_hw(tp
);
5681 #define TG3_STAT_ADD32(PSTAT, REG) \
5682 do { u32 __val = tr32(REG); \
5683 (PSTAT)->low += __val; \
5684 if ((PSTAT)->low < __val) \
5685 (PSTAT)->high += 1; \
5688 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
5690 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
5692 if (!netif_carrier_ok(tp
->dev
))
5695 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
5696 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
5697 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
5698 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
5699 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
5700 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
5701 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
5702 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
5703 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
5704 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
5705 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
5706 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
5707 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
5709 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
5710 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
5711 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
5712 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
5713 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
5714 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
5715 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
5716 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
5717 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
5718 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
5719 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
5720 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
5721 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
5722 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
5725 static void tg3_timer(unsigned long __opaque
)
5727 struct tg3
*tp
= (struct tg3
*) __opaque
;
5728 unsigned long flags
;
5730 spin_lock_irqsave(&tp
->lock
, flags
);
5731 spin_lock(&tp
->tx_lock
);
5733 /* All of this garbage is because when using non-tagged
5734 * IRQ status the mailbox/status_block protocol the chip
5735 * uses with the cpu is race prone.
5737 if (tp
->hw_status
->status
& SD_STATUS_UPDATED
) {
5738 tw32(GRC_LOCAL_CTRL
,
5739 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
5741 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
5742 (HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
));
5745 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
5746 tp
->tg3_flags2
|= TG3_FLG2_RESTART_TIMER
;
5747 spin_unlock(&tp
->tx_lock
);
5748 spin_unlock_irqrestore(&tp
->lock
, flags
);
5749 schedule_work(&tp
->reset_task
);
5753 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)
5754 tg3_periodic_fetch_stats(tp
);
5756 /* This part only runs once per second. */
5757 if (!--tp
->timer_counter
) {
5758 if (tp
->tg3_flags
& TG3_FLAG_USE_LINKCHG_REG
) {
5762 mac_stat
= tr32(MAC_STATUS
);
5765 if (tp
->tg3_flags
& TG3_FLAG_USE_MI_INTERRUPT
) {
5766 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
5768 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
5772 tg3_setup_phy(tp
, 0);
5773 } else if (tp
->tg3_flags
& TG3_FLAG_POLL_SERDES
) {
5774 u32 mac_stat
= tr32(MAC_STATUS
);
5777 if (netif_carrier_ok(tp
->dev
) &&
5778 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
5781 if (! netif_carrier_ok(tp
->dev
) &&
5782 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
5783 MAC_STATUS_SIGNAL_DET
))) {
5789 ~MAC_MODE_PORT_MODE_MASK
));
5791 tw32_f(MAC_MODE
, tp
->mac_mode
);
5793 tg3_setup_phy(tp
, 0);
5797 tp
->timer_counter
= tp
->timer_multiplier
;
5800 /* Heartbeat is only sent once every 120 seconds. */
5801 if (!--tp
->asf_counter
) {
5802 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) {
5805 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_ALIVE
);
5806 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
5807 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
, 3);
5808 val
= tr32(GRC_RX_CPU_EVENT
);
5810 tw32(GRC_RX_CPU_EVENT
, val
);
5812 tp
->asf_counter
= tp
->asf_multiplier
;
5815 spin_unlock(&tp
->tx_lock
);
5816 spin_unlock_irqrestore(&tp
->lock
, flags
);
5818 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
5819 add_timer(&tp
->timer
);
5822 static int tg3_test_interrupt(struct tg3
*tp
)
5824 struct net_device
*dev
= tp
->dev
;
5828 tg3_disable_ints(tp
);
5830 free_irq(tp
->pdev
->irq
, dev
);
5832 err
= request_irq(tp
->pdev
->irq
, tg3_test_isr
,
5833 SA_SHIRQ
| SA_SAMPLE_RANDOM
, dev
->name
, dev
);
5837 tg3_enable_ints(tp
);
5839 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
5842 for (i
= 0; i
< 5; i
++) {
5843 int_mbox
= tr32(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
);
5849 tg3_disable_ints(tp
);
5851 free_irq(tp
->pdev
->irq
, dev
);
5853 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
)
5854 err
= request_irq(tp
->pdev
->irq
, tg3_msi
,
5855 SA_SAMPLE_RANDOM
, dev
->name
, dev
);
5857 err
= request_irq(tp
->pdev
->irq
, tg3_interrupt
,
5858 SA_SHIRQ
| SA_SAMPLE_RANDOM
, dev
->name
, dev
);
5869 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
5870 * successfully restored
5872 static int tg3_test_msi(struct tg3
*tp
)
5874 struct net_device
*dev
= tp
->dev
;
5878 if (!(tp
->tg3_flags2
& TG3_FLG2_USING_MSI
))
5881 /* Turn off SERR reporting in case MSI terminates with Master
5884 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
5885 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
5886 pci_cmd
& ~PCI_COMMAND_SERR
);
5888 err
= tg3_test_interrupt(tp
);
5890 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
5895 /* other failures */
5899 /* MSI test failed, go back to INTx mode */
5900 printk(KERN_WARNING PFX
"%s: No interrupt was generated using MSI, "
5901 "switching to INTx mode. Please report this failure to "
5902 "the PCI maintainer and include system chipset information.\n",
5905 free_irq(tp
->pdev
->irq
, dev
);
5906 pci_disable_msi(tp
->pdev
);
5908 tp
->tg3_flags2
&= ~TG3_FLG2_USING_MSI
;
5910 err
= request_irq(tp
->pdev
->irq
, tg3_interrupt
,
5911 SA_SHIRQ
| SA_SAMPLE_RANDOM
, dev
->name
, dev
);
5916 /* Need to reset the chip because the MSI cycle may have terminated
5917 * with Master Abort.
5919 spin_lock_irq(&tp
->lock
);
5920 spin_lock(&tp
->tx_lock
);
5923 err
= tg3_init_hw(tp
);
5925 spin_unlock(&tp
->tx_lock
);
5926 spin_unlock_irq(&tp
->lock
);
5929 free_irq(tp
->pdev
->irq
, dev
);
5934 static int tg3_open(struct net_device
*dev
)
5936 struct tg3
*tp
= netdev_priv(dev
);
5939 spin_lock_irq(&tp
->lock
);
5940 spin_lock(&tp
->tx_lock
);
5942 tg3_disable_ints(tp
);
5943 tp
->tg3_flags
&= ~TG3_FLAG_INIT_COMPLETE
;
5945 spin_unlock(&tp
->tx_lock
);
5946 spin_unlock_irq(&tp
->lock
);
5948 /* The placement of this call is tied
5949 * to the setup and use of Host TX descriptors.
5951 err
= tg3_alloc_consistent(tp
);
5955 if ((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
5956 (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5750_AX
) &&
5957 (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5750_BX
)) {
5958 if (pci_enable_msi(tp
->pdev
) == 0) {
5961 msi_mode
= tr32(MSGINT_MODE
);
5962 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
5963 tp
->tg3_flags2
|= TG3_FLG2_USING_MSI
;
5966 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
)
5967 err
= request_irq(tp
->pdev
->irq
, tg3_msi
,
5968 SA_SAMPLE_RANDOM
, dev
->name
, dev
);
5970 err
= request_irq(tp
->pdev
->irq
, tg3_interrupt
,
5971 SA_SHIRQ
| SA_SAMPLE_RANDOM
, dev
->name
, dev
);
5974 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
5975 pci_disable_msi(tp
->pdev
);
5976 tp
->tg3_flags2
&= ~TG3_FLG2_USING_MSI
;
5978 tg3_free_consistent(tp
);
5982 spin_lock_irq(&tp
->lock
);
5983 spin_lock(&tp
->tx_lock
);
5985 err
= tg3_init_hw(tp
);
5990 tp
->timer_offset
= HZ
/ 10;
5991 tp
->timer_counter
= tp
->timer_multiplier
= 10;
5992 tp
->asf_counter
= tp
->asf_multiplier
= (10 * 120);
5994 init_timer(&tp
->timer
);
5995 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
5996 tp
->timer
.data
= (unsigned long) tp
;
5997 tp
->timer
.function
= tg3_timer
;
6000 spin_unlock(&tp
->tx_lock
);
6001 spin_unlock_irq(&tp
->lock
);
6004 free_irq(tp
->pdev
->irq
, dev
);
6005 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
6006 pci_disable_msi(tp
->pdev
);
6007 tp
->tg3_flags2
&= ~TG3_FLG2_USING_MSI
;
6009 tg3_free_consistent(tp
);
6013 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
6014 err
= tg3_test_msi(tp
);
6016 spin_lock_irq(&tp
->lock
);
6017 spin_lock(&tp
->tx_lock
);
6019 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
6020 pci_disable_msi(tp
->pdev
);
6021 tp
->tg3_flags2
&= ~TG3_FLG2_USING_MSI
;
6025 tg3_free_consistent(tp
);
6027 spin_unlock(&tp
->tx_lock
);
6028 spin_unlock_irq(&tp
->lock
);
6034 spin_lock_irq(&tp
->lock
);
6035 spin_lock(&tp
->tx_lock
);
6037 add_timer(&tp
->timer
);
6038 tp
->tg3_flags
|= TG3_FLAG_INIT_COMPLETE
;
6039 tg3_enable_ints(tp
);
6041 spin_unlock(&tp
->tx_lock
);
6042 spin_unlock_irq(&tp
->lock
);
6044 netif_start_queue(dev
);
6050 /*static*/ void tg3_dump_state(struct tg3
*tp
)
6052 u32 val32
, val32_2
, val32_3
, val32_4
, val32_5
;
6056 pci_read_config_word(tp
->pdev
, PCI_STATUS
, &val16
);
6057 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, &val32
);
6058 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6062 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6063 tr32(MAC_MODE
), tr32(MAC_STATUS
));
6064 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6065 tr32(MAC_EVENT
), tr32(MAC_LED_CTRL
));
6066 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6067 tr32(MAC_TX_MODE
), tr32(MAC_TX_STATUS
));
6068 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6069 tr32(MAC_RX_MODE
), tr32(MAC_RX_STATUS
));
6071 /* Send data initiator control block */
6072 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6073 tr32(SNDDATAI_MODE
), tr32(SNDDATAI_STATUS
));
6074 printk(" SNDDATAI_STATSCTRL[%08x]\n",
6075 tr32(SNDDATAI_STATSCTRL
));
6077 /* Send data completion control block */
6078 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE
));
6080 /* Send BD ring selector block */
6081 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6082 tr32(SNDBDS_MODE
), tr32(SNDBDS_STATUS
));
6084 /* Send BD initiator control block */
6085 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6086 tr32(SNDBDI_MODE
), tr32(SNDBDI_STATUS
));
6088 /* Send BD completion control block */
6089 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE
));
6091 /* Receive list placement control block */
6092 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6093 tr32(RCVLPC_MODE
), tr32(RCVLPC_STATUS
));
6094 printk(" RCVLPC_STATSCTRL[%08x]\n",
6095 tr32(RCVLPC_STATSCTRL
));
6097 /* Receive data and receive BD initiator control block */
6098 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6099 tr32(RCVDBDI_MODE
), tr32(RCVDBDI_STATUS
));
6101 /* Receive data completion control block */
6102 printk("DEBUG: RCVDCC_MODE[%08x]\n",
6105 /* Receive BD initiator control block */
6106 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6107 tr32(RCVBDI_MODE
), tr32(RCVBDI_STATUS
));
6109 /* Receive BD completion control block */
6110 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6111 tr32(RCVCC_MODE
), tr32(RCVCC_STATUS
));
6113 /* Receive list selector control block */
6114 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6115 tr32(RCVLSC_MODE
), tr32(RCVLSC_STATUS
));
6117 /* Mbuf cluster free block */
6118 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6119 tr32(MBFREE_MODE
), tr32(MBFREE_STATUS
));
6121 /* Host coalescing control block */
6122 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6123 tr32(HOSTCC_MODE
), tr32(HOSTCC_STATUS
));
6124 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6125 tr32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
6126 tr32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
));
6127 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6128 tr32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
6129 tr32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
));
6130 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6131 tr32(HOSTCC_STATS_BLK_NIC_ADDR
));
6132 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6133 tr32(HOSTCC_STATUS_BLK_NIC_ADDR
));
6135 /* Memory arbiter control block */
6136 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6137 tr32(MEMARB_MODE
), tr32(MEMARB_STATUS
));
6139 /* Buffer manager control block */
6140 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6141 tr32(BUFMGR_MODE
), tr32(BUFMGR_STATUS
));
6142 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6143 tr32(BUFMGR_MB_POOL_ADDR
), tr32(BUFMGR_MB_POOL_SIZE
));
6144 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6145 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6146 tr32(BUFMGR_DMA_DESC_POOL_ADDR
),
6147 tr32(BUFMGR_DMA_DESC_POOL_SIZE
));
6149 /* Read DMA control block */
6150 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6151 tr32(RDMAC_MODE
), tr32(RDMAC_STATUS
));
6153 /* Write DMA control block */
6154 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6155 tr32(WDMAC_MODE
), tr32(WDMAC_STATUS
));
6157 /* DMA completion block */
6158 printk("DEBUG: DMAC_MODE[%08x]\n",
6162 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6163 tr32(GRC_MODE
), tr32(GRC_MISC_CFG
));
6164 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6165 tr32(GRC_LOCAL_CTRL
));
6168 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6169 tr32(RCVDBDI_JUMBO_BD
+ 0x0),
6170 tr32(RCVDBDI_JUMBO_BD
+ 0x4),
6171 tr32(RCVDBDI_JUMBO_BD
+ 0x8),
6172 tr32(RCVDBDI_JUMBO_BD
+ 0xc));
6173 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6174 tr32(RCVDBDI_STD_BD
+ 0x0),
6175 tr32(RCVDBDI_STD_BD
+ 0x4),
6176 tr32(RCVDBDI_STD_BD
+ 0x8),
6177 tr32(RCVDBDI_STD_BD
+ 0xc));
6178 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6179 tr32(RCVDBDI_MINI_BD
+ 0x0),
6180 tr32(RCVDBDI_MINI_BD
+ 0x4),
6181 tr32(RCVDBDI_MINI_BD
+ 0x8),
6182 tr32(RCVDBDI_MINI_BD
+ 0xc));
6184 tg3_read_mem(tp
, NIC_SRAM_SEND_RCB
+ 0x0, &val32
);
6185 tg3_read_mem(tp
, NIC_SRAM_SEND_RCB
+ 0x4, &val32_2
);
6186 tg3_read_mem(tp
, NIC_SRAM_SEND_RCB
+ 0x8, &val32_3
);
6187 tg3_read_mem(tp
, NIC_SRAM_SEND_RCB
+ 0xc, &val32_4
);
6188 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6189 val32
, val32_2
, val32_3
, val32_4
);
6191 tg3_read_mem(tp
, NIC_SRAM_RCV_RET_RCB
+ 0x0, &val32
);
6192 tg3_read_mem(tp
, NIC_SRAM_RCV_RET_RCB
+ 0x4, &val32_2
);
6193 tg3_read_mem(tp
, NIC_SRAM_RCV_RET_RCB
+ 0x8, &val32_3
);
6194 tg3_read_mem(tp
, NIC_SRAM_RCV_RET_RCB
+ 0xc, &val32_4
);
6195 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6196 val32
, val32_2
, val32_3
, val32_4
);
6198 tg3_read_mem(tp
, NIC_SRAM_STATUS_BLK
+ 0x0, &val32
);
6199 tg3_read_mem(tp
, NIC_SRAM_STATUS_BLK
+ 0x4, &val32_2
);
6200 tg3_read_mem(tp
, NIC_SRAM_STATUS_BLK
+ 0x8, &val32_3
);
6201 tg3_read_mem(tp
, NIC_SRAM_STATUS_BLK
+ 0xc, &val32_4
);
6202 tg3_read_mem(tp
, NIC_SRAM_STATUS_BLK
+ 0x10, &val32_5
);
6203 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6204 val32
, val32_2
, val32_3
, val32_4
, val32_5
);
6206 /* SW status block */
6207 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6208 tp
->hw_status
->status
,
6209 tp
->hw_status
->status_tag
,
6210 tp
->hw_status
->rx_jumbo_consumer
,
6211 tp
->hw_status
->rx_consumer
,
6212 tp
->hw_status
->rx_mini_consumer
,
6213 tp
->hw_status
->idx
[0].rx_producer
,
6214 tp
->hw_status
->idx
[0].tx_consumer
);
6216 /* SW statistics block */
6217 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6218 ((u32
*)tp
->hw_stats
)[0],
6219 ((u32
*)tp
->hw_stats
)[1],
6220 ((u32
*)tp
->hw_stats
)[2],
6221 ((u32
*)tp
->hw_stats
)[3]);
6224 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6225 tr32(MAILBOX_SNDHOST_PROD_IDX_0
+ 0x0),
6226 tr32(MAILBOX_SNDHOST_PROD_IDX_0
+ 0x4),
6227 tr32(MAILBOX_SNDNIC_PROD_IDX_0
+ 0x0),
6228 tr32(MAILBOX_SNDNIC_PROD_IDX_0
+ 0x4));
6230 /* NIC side send descriptors. */
6231 for (i
= 0; i
< 6; i
++) {
6234 txd
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_TX_BUFFER_DESC
6235 + (i
* sizeof(struct tg3_tx_buffer_desc
));
6236 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6238 readl(txd
+ 0x0), readl(txd
+ 0x4),
6239 readl(txd
+ 0x8), readl(txd
+ 0xc));
6242 /* NIC side RX descriptors. */
6243 for (i
= 0; i
< 6; i
++) {
6246 rxd
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_RX_BUFFER_DESC
6247 + (i
* sizeof(struct tg3_rx_buffer_desc
));
6248 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6250 readl(rxd
+ 0x0), readl(rxd
+ 0x4),
6251 readl(rxd
+ 0x8), readl(rxd
+ 0xc));
6252 rxd
+= (4 * sizeof(u32
));
6253 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6255 readl(rxd
+ 0x0), readl(rxd
+ 0x4),
6256 readl(rxd
+ 0x8), readl(rxd
+ 0xc));
6259 for (i
= 0; i
< 6; i
++) {
6262 rxd
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_RX_JUMBO_BUFFER_DESC
6263 + (i
* sizeof(struct tg3_rx_buffer_desc
));
6264 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6266 readl(rxd
+ 0x0), readl(rxd
+ 0x4),
6267 readl(rxd
+ 0x8), readl(rxd
+ 0xc));
6268 rxd
+= (4 * sizeof(u32
));
6269 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6271 readl(rxd
+ 0x0), readl(rxd
+ 0x4),
6272 readl(rxd
+ 0x8), readl(rxd
+ 0xc));
6277 static struct net_device_stats
*tg3_get_stats(struct net_device
*);
6278 static struct tg3_ethtool_stats
*tg3_get_estats(struct tg3
*);
6280 static int tg3_close(struct net_device
*dev
)
6282 struct tg3
*tp
= netdev_priv(dev
);
6284 netif_stop_queue(dev
);
6286 del_timer_sync(&tp
->timer
);
6288 spin_lock_irq(&tp
->lock
);
6289 spin_lock(&tp
->tx_lock
);
6294 tg3_disable_ints(tp
);
6299 ~(TG3_FLAG_INIT_COMPLETE
|
6300 TG3_FLAG_GOT_SERDES_FLOWCTL
);
6301 netif_carrier_off(tp
->dev
);
6303 spin_unlock(&tp
->tx_lock
);
6304 spin_unlock_irq(&tp
->lock
);
6306 free_irq(tp
->pdev
->irq
, dev
);
6307 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
6308 pci_disable_msi(tp
->pdev
);
6309 tp
->tg3_flags2
&= ~TG3_FLG2_USING_MSI
;
6312 memcpy(&tp
->net_stats_prev
, tg3_get_stats(tp
->dev
),
6313 sizeof(tp
->net_stats_prev
));
6314 memcpy(&tp
->estats_prev
, tg3_get_estats(tp
),
6315 sizeof(tp
->estats_prev
));
6317 tg3_free_consistent(tp
);
6322 static inline unsigned long get_stat64(tg3_stat64_t
*val
)
6326 #if (BITS_PER_LONG == 32)
6329 ret
= ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
6334 static unsigned long calc_crc_errors(struct tg3
*tp
)
6336 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
6338 if (!(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) &&
6339 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
6340 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
6341 unsigned long flags
;
6344 spin_lock_irqsave(&tp
->lock
, flags
);
6345 if (!tg3_readphy(tp
, 0x1e, &val
)) {
6346 tg3_writephy(tp
, 0x1e, val
| 0x8000);
6347 tg3_readphy(tp
, 0x14, &val
);
6350 spin_unlock_irqrestore(&tp
->lock
, flags
);
6352 tp
->phy_crc_errors
+= val
;
6354 return tp
->phy_crc_errors
;
6357 return get_stat64(&hw_stats
->rx_fcs_errors
);
6360 #define ESTAT_ADD(member) \
6361 estats->member = old_estats->member + \
6362 get_stat64(&hw_stats->member)
6364 static struct tg3_ethtool_stats
*tg3_get_estats(struct tg3
*tp
)
6366 struct tg3_ethtool_stats
*estats
= &tp
->estats
;
6367 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
6368 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
6373 ESTAT_ADD(rx_octets
);
6374 ESTAT_ADD(rx_fragments
);
6375 ESTAT_ADD(rx_ucast_packets
);
6376 ESTAT_ADD(rx_mcast_packets
);
6377 ESTAT_ADD(rx_bcast_packets
);
6378 ESTAT_ADD(rx_fcs_errors
);
6379 ESTAT_ADD(rx_align_errors
);
6380 ESTAT_ADD(rx_xon_pause_rcvd
);
6381 ESTAT_ADD(rx_xoff_pause_rcvd
);
6382 ESTAT_ADD(rx_mac_ctrl_rcvd
);
6383 ESTAT_ADD(rx_xoff_entered
);
6384 ESTAT_ADD(rx_frame_too_long_errors
);
6385 ESTAT_ADD(rx_jabbers
);
6386 ESTAT_ADD(rx_undersize_packets
);
6387 ESTAT_ADD(rx_in_length_errors
);
6388 ESTAT_ADD(rx_out_length_errors
);
6389 ESTAT_ADD(rx_64_or_less_octet_packets
);
6390 ESTAT_ADD(rx_65_to_127_octet_packets
);
6391 ESTAT_ADD(rx_128_to_255_octet_packets
);
6392 ESTAT_ADD(rx_256_to_511_octet_packets
);
6393 ESTAT_ADD(rx_512_to_1023_octet_packets
);
6394 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
6395 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
6396 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
6397 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
6398 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
6400 ESTAT_ADD(tx_octets
);
6401 ESTAT_ADD(tx_collisions
);
6402 ESTAT_ADD(tx_xon_sent
);
6403 ESTAT_ADD(tx_xoff_sent
);
6404 ESTAT_ADD(tx_flow_control
);
6405 ESTAT_ADD(tx_mac_errors
);
6406 ESTAT_ADD(tx_single_collisions
);
6407 ESTAT_ADD(tx_mult_collisions
);
6408 ESTAT_ADD(tx_deferred
);
6409 ESTAT_ADD(tx_excessive_collisions
);
6410 ESTAT_ADD(tx_late_collisions
);
6411 ESTAT_ADD(tx_collide_2times
);
6412 ESTAT_ADD(tx_collide_3times
);
6413 ESTAT_ADD(tx_collide_4times
);
6414 ESTAT_ADD(tx_collide_5times
);
6415 ESTAT_ADD(tx_collide_6times
);
6416 ESTAT_ADD(tx_collide_7times
);
6417 ESTAT_ADD(tx_collide_8times
);
6418 ESTAT_ADD(tx_collide_9times
);
6419 ESTAT_ADD(tx_collide_10times
);
6420 ESTAT_ADD(tx_collide_11times
);
6421 ESTAT_ADD(tx_collide_12times
);
6422 ESTAT_ADD(tx_collide_13times
);
6423 ESTAT_ADD(tx_collide_14times
);
6424 ESTAT_ADD(tx_collide_15times
);
6425 ESTAT_ADD(tx_ucast_packets
);
6426 ESTAT_ADD(tx_mcast_packets
);
6427 ESTAT_ADD(tx_bcast_packets
);
6428 ESTAT_ADD(tx_carrier_sense_errors
);
6429 ESTAT_ADD(tx_discards
);
6430 ESTAT_ADD(tx_errors
);
6432 ESTAT_ADD(dma_writeq_full
);
6433 ESTAT_ADD(dma_write_prioq_full
);
6434 ESTAT_ADD(rxbds_empty
);
6435 ESTAT_ADD(rx_discards
);
6436 ESTAT_ADD(rx_errors
);
6437 ESTAT_ADD(rx_threshold_hit
);
6439 ESTAT_ADD(dma_readq_full
);
6440 ESTAT_ADD(dma_read_prioq_full
);
6441 ESTAT_ADD(tx_comp_queue_full
);
6443 ESTAT_ADD(ring_set_send_prod_index
);
6444 ESTAT_ADD(ring_status_update
);
6445 ESTAT_ADD(nic_irqs
);
6446 ESTAT_ADD(nic_avoided_irqs
);
6447 ESTAT_ADD(nic_tx_threshold_hit
);
6452 static struct net_device_stats
*tg3_get_stats(struct net_device
*dev
)
6454 struct tg3
*tp
= netdev_priv(dev
);
6455 struct net_device_stats
*stats
= &tp
->net_stats
;
6456 struct net_device_stats
*old_stats
= &tp
->net_stats_prev
;
6457 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
6462 stats
->rx_packets
= old_stats
->rx_packets
+
6463 get_stat64(&hw_stats
->rx_ucast_packets
) +
6464 get_stat64(&hw_stats
->rx_mcast_packets
) +
6465 get_stat64(&hw_stats
->rx_bcast_packets
);
6467 stats
->tx_packets
= old_stats
->tx_packets
+
6468 get_stat64(&hw_stats
->tx_ucast_packets
) +
6469 get_stat64(&hw_stats
->tx_mcast_packets
) +
6470 get_stat64(&hw_stats
->tx_bcast_packets
);
6472 stats
->rx_bytes
= old_stats
->rx_bytes
+
6473 get_stat64(&hw_stats
->rx_octets
);
6474 stats
->tx_bytes
= old_stats
->tx_bytes
+
6475 get_stat64(&hw_stats
->tx_octets
);
6477 stats
->rx_errors
= old_stats
->rx_errors
+
6478 get_stat64(&hw_stats
->rx_errors
) +
6479 get_stat64(&hw_stats
->rx_discards
);
6480 stats
->tx_errors
= old_stats
->tx_errors
+
6481 get_stat64(&hw_stats
->tx_errors
) +
6482 get_stat64(&hw_stats
->tx_mac_errors
) +
6483 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
6484 get_stat64(&hw_stats
->tx_discards
);
6486 stats
->multicast
= old_stats
->multicast
+
6487 get_stat64(&hw_stats
->rx_mcast_packets
);
6488 stats
->collisions
= old_stats
->collisions
+
6489 get_stat64(&hw_stats
->tx_collisions
);
6491 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
6492 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
6493 get_stat64(&hw_stats
->rx_undersize_packets
);
6495 stats
->rx_over_errors
= old_stats
->rx_over_errors
+
6496 get_stat64(&hw_stats
->rxbds_empty
);
6497 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
6498 get_stat64(&hw_stats
->rx_align_errors
);
6499 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
6500 get_stat64(&hw_stats
->tx_discards
);
6501 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
6502 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
6504 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
6505 calc_crc_errors(tp
);
6510 static inline u32
calc_crc(unsigned char *buf
, int len
)
6518 for (j
= 0; j
< len
; j
++) {
6521 for (k
= 0; k
< 8; k
++) {
6535 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
6537 /* accept or reject all multicast frames */
6538 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
6539 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
6540 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
6541 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
6544 static void __tg3_set_rx_mode(struct net_device
*dev
)
6546 struct tg3
*tp
= netdev_priv(dev
);
6549 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
6550 RX_MODE_KEEP_VLAN_TAG
);
6552 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6555 #if TG3_VLAN_TAG_USED
6557 !(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
))
6558 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
6560 /* By definition, VLAN is disabled always in this
6563 if (!(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
))
6564 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
6567 if (dev
->flags
& IFF_PROMISC
) {
6568 /* Promiscuous mode. */
6569 rx_mode
|= RX_MODE_PROMISC
;
6570 } else if (dev
->flags
& IFF_ALLMULTI
) {
6571 /* Accept all multicast. */
6572 tg3_set_multi (tp
, 1);
6573 } else if (dev
->mc_count
< 1) {
6574 /* Reject all multicast. */
6575 tg3_set_multi (tp
, 0);
6577 /* Accept one or more multicast(s). */
6578 struct dev_mc_list
*mclist
;
6580 u32 mc_filter
[4] = { 0, };
6585 for (i
= 0, mclist
= dev
->mc_list
; mclist
&& i
< dev
->mc_count
;
6586 i
++, mclist
= mclist
->next
) {
6588 crc
= calc_crc (mclist
->dmi_addr
, ETH_ALEN
);
6590 regidx
= (bit
& 0x60) >> 5;
6592 mc_filter
[regidx
] |= (1 << bit
);
6595 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
6596 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
6597 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
6598 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
6601 if (rx_mode
!= tp
->rx_mode
) {
6602 tp
->rx_mode
= rx_mode
;
6603 tw32_f(MAC_RX_MODE
, rx_mode
);
6608 static void tg3_set_rx_mode(struct net_device
*dev
)
6610 struct tg3
*tp
= netdev_priv(dev
);
6612 spin_lock_irq(&tp
->lock
);
6613 spin_lock(&tp
->tx_lock
);
6614 __tg3_set_rx_mode(dev
);
6615 spin_unlock(&tp
->tx_lock
);
6616 spin_unlock_irq(&tp
->lock
);
6619 #define TG3_REGDUMP_LEN (32 * 1024)
6621 static int tg3_get_regs_len(struct net_device
*dev
)
6623 return TG3_REGDUMP_LEN
;
6626 static void tg3_get_regs(struct net_device
*dev
,
6627 struct ethtool_regs
*regs
, void *_p
)
6630 struct tg3
*tp
= netdev_priv(dev
);
6636 memset(p
, 0, TG3_REGDUMP_LEN
);
6638 spin_lock_irq(&tp
->lock
);
6639 spin_lock(&tp
->tx_lock
);
6641 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
6642 #define GET_REG32_LOOP(base,len) \
6643 do { p = (u32 *)(orig_p + (base)); \
6644 for (i = 0; i < len; i += 4) \
6645 __GET_REG32((base) + i); \
6647 #define GET_REG32_1(reg) \
6648 do { p = (u32 *)(orig_p + (reg)); \
6649 __GET_REG32((reg)); \
6652 GET_REG32_LOOP(TG3PCI_VENDOR
, 0xb0);
6653 GET_REG32_LOOP(MAILBOX_INTERRUPT_0
, 0x200);
6654 GET_REG32_LOOP(MAC_MODE
, 0x4f0);
6655 GET_REG32_LOOP(SNDDATAI_MODE
, 0xe0);
6656 GET_REG32_1(SNDDATAC_MODE
);
6657 GET_REG32_LOOP(SNDBDS_MODE
, 0x80);
6658 GET_REG32_LOOP(SNDBDI_MODE
, 0x48);
6659 GET_REG32_1(SNDBDC_MODE
);
6660 GET_REG32_LOOP(RCVLPC_MODE
, 0x20);
6661 GET_REG32_LOOP(RCVLPC_SELLST_BASE
, 0x15c);
6662 GET_REG32_LOOP(RCVDBDI_MODE
, 0x0c);
6663 GET_REG32_LOOP(RCVDBDI_JUMBO_BD
, 0x3c);
6664 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0
, 0x44);
6665 GET_REG32_1(RCVDCC_MODE
);
6666 GET_REG32_LOOP(RCVBDI_MODE
, 0x20);
6667 GET_REG32_LOOP(RCVCC_MODE
, 0x14);
6668 GET_REG32_LOOP(RCVLSC_MODE
, 0x08);
6669 GET_REG32_1(MBFREE_MODE
);
6670 GET_REG32_LOOP(HOSTCC_MODE
, 0x100);
6671 GET_REG32_LOOP(MEMARB_MODE
, 0x10);
6672 GET_REG32_LOOP(BUFMGR_MODE
, 0x58);
6673 GET_REG32_LOOP(RDMAC_MODE
, 0x08);
6674 GET_REG32_LOOP(WDMAC_MODE
, 0x08);
6675 GET_REG32_LOOP(RX_CPU_BASE
, 0x280);
6676 GET_REG32_LOOP(TX_CPU_BASE
, 0x280);
6677 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0
, 0x110);
6678 GET_REG32_LOOP(FTQ_RESET
, 0x120);
6679 GET_REG32_LOOP(MSGINT_MODE
, 0x0c);
6680 GET_REG32_1(DMAC_MODE
);
6681 GET_REG32_LOOP(GRC_MODE
, 0x4c);
6682 if (tp
->tg3_flags
& TG3_FLAG_NVRAM
)
6683 GET_REG32_LOOP(NVRAM_CMD
, 0x24);
6686 #undef GET_REG32_LOOP
6689 spin_unlock(&tp
->tx_lock
);
6690 spin_unlock_irq(&tp
->lock
);
6693 static int tg3_get_eeprom_len(struct net_device
*dev
)
6695 struct tg3
*tp
= netdev_priv(dev
);
6697 return tp
->nvram_size
;
6700 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
);
6702 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
6704 struct tg3
*tp
= netdev_priv(dev
);
6707 u32 i
, offset
, len
, val
, b_offset
, b_count
;
6709 offset
= eeprom
->offset
;
6713 eeprom
->magic
= TG3_EEPROM_MAGIC
;
6716 /* adjustments to start on required 4 byte boundary */
6717 b_offset
= offset
& 3;
6718 b_count
= 4 - b_offset
;
6719 if (b_count
> len
) {
6720 /* i.e. offset=1 len=2 */
6723 ret
= tg3_nvram_read(tp
, offset
-b_offset
, &val
);
6726 val
= cpu_to_le32(val
);
6727 memcpy(data
, ((char*)&val
) + b_offset
, b_count
);
6730 eeprom
->len
+= b_count
;
6733 /* read bytes upto the last 4 byte boundary */
6734 pd
= &data
[eeprom
->len
];
6735 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
6736 ret
= tg3_nvram_read(tp
, offset
+ i
, &val
);
6741 val
= cpu_to_le32(val
);
6742 memcpy(pd
+ i
, &val
, 4);
6747 /* read last bytes not ending on 4 byte boundary */
6748 pd
= &data
[eeprom
->len
];
6750 b_offset
= offset
+ len
- b_count
;
6751 ret
= tg3_nvram_read(tp
, b_offset
, &val
);
6754 val
= cpu_to_le32(val
);
6755 memcpy(pd
, ((char*)&val
), b_count
);
6756 eeprom
->len
+= b_count
;
6761 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
);
6763 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
6765 struct tg3
*tp
= netdev_priv(dev
);
6767 u32 offset
, len
, b_offset
, odd_len
, start
, end
;
6770 if (eeprom
->magic
!= TG3_EEPROM_MAGIC
)
6773 offset
= eeprom
->offset
;
6776 if ((b_offset
= (offset
& 3))) {
6777 /* adjustments to start on required 4 byte boundary */
6778 ret
= tg3_nvram_read(tp
, offset
-b_offset
, &start
);
6781 start
= cpu_to_le32(start
);
6790 /* adjustments to end on required 4 byte boundary */
6792 len
= (len
+ 3) & ~3;
6793 ret
= tg3_nvram_read(tp
, offset
+len
-4, &end
);
6796 end
= cpu_to_le32(end
);
6800 if (b_offset
|| odd_len
) {
6801 buf
= kmalloc(len
, GFP_KERNEL
);
6805 memcpy(buf
, &start
, 4);
6807 memcpy(buf
+len
-4, &end
, 4);
6808 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
6811 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
6819 static int tg3_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
6821 struct tg3
*tp
= netdev_priv(dev
);
6823 cmd
->supported
= (SUPPORTED_Autoneg
);
6825 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
))
6826 cmd
->supported
|= (SUPPORTED_1000baseT_Half
|
6827 SUPPORTED_1000baseT_Full
);
6829 if (!(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
))
6830 cmd
->supported
|= (SUPPORTED_100baseT_Half
|
6831 SUPPORTED_100baseT_Full
|
6832 SUPPORTED_10baseT_Half
|
6833 SUPPORTED_10baseT_Full
|
6836 cmd
->supported
|= SUPPORTED_FIBRE
;
6838 cmd
->advertising
= tp
->link_config
.advertising
;
6839 if (netif_running(dev
)) {
6840 cmd
->speed
= tp
->link_config
.active_speed
;
6841 cmd
->duplex
= tp
->link_config
.active_duplex
;
6844 cmd
->phy_address
= PHY_ADDR
;
6845 cmd
->transceiver
= 0;
6846 cmd
->autoneg
= tp
->link_config
.autoneg
;
6852 static int tg3_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
6854 struct tg3
*tp
= netdev_priv(dev
);
6856 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) {
6857 /* These are the only valid advertisement bits allowed. */
6858 if (cmd
->autoneg
== AUTONEG_ENABLE
&&
6859 (cmd
->advertising
& ~(ADVERTISED_1000baseT_Half
|
6860 ADVERTISED_1000baseT_Full
|
6861 ADVERTISED_Autoneg
|
6866 spin_lock_irq(&tp
->lock
);
6867 spin_lock(&tp
->tx_lock
);
6869 tp
->link_config
.autoneg
= cmd
->autoneg
;
6870 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
6871 tp
->link_config
.advertising
= cmd
->advertising
;
6872 tp
->link_config
.speed
= SPEED_INVALID
;
6873 tp
->link_config
.duplex
= DUPLEX_INVALID
;
6875 tp
->link_config
.advertising
= 0;
6876 tp
->link_config
.speed
= cmd
->speed
;
6877 tp
->link_config
.duplex
= cmd
->duplex
;
6880 if (netif_running(dev
))
6881 tg3_setup_phy(tp
, 1);
6883 spin_unlock(&tp
->tx_lock
);
6884 spin_unlock_irq(&tp
->lock
);
6889 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
6891 struct tg3
*tp
= netdev_priv(dev
);
6893 strcpy(info
->driver
, DRV_MODULE_NAME
);
6894 strcpy(info
->version
, DRV_MODULE_VERSION
);
6895 strcpy(info
->bus_info
, pci_name(tp
->pdev
));
6898 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
6900 struct tg3
*tp
= netdev_priv(dev
);
6902 wol
->supported
= WAKE_MAGIC
;
6904 if (tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
)
6905 wol
->wolopts
= WAKE_MAGIC
;
6906 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
6909 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
6911 struct tg3
*tp
= netdev_priv(dev
);
6913 if (wol
->wolopts
& ~WAKE_MAGIC
)
6915 if ((wol
->wolopts
& WAKE_MAGIC
) &&
6916 tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
&&
6917 !(tp
->tg3_flags
& TG3_FLAG_SERDES_WOL_CAP
))
6920 spin_lock_irq(&tp
->lock
);
6921 if (wol
->wolopts
& WAKE_MAGIC
)
6922 tp
->tg3_flags
|= TG3_FLAG_WOL_ENABLE
;
6924 tp
->tg3_flags
&= ~TG3_FLAG_WOL_ENABLE
;
6925 spin_unlock_irq(&tp
->lock
);
6930 static u32
tg3_get_msglevel(struct net_device
*dev
)
6932 struct tg3
*tp
= netdev_priv(dev
);
6933 return tp
->msg_enable
;
6936 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
6938 struct tg3
*tp
= netdev_priv(dev
);
6939 tp
->msg_enable
= value
;
6942 #if TG3_TSO_SUPPORT != 0
6943 static int tg3_set_tso(struct net_device
*dev
, u32 value
)
6945 struct tg3
*tp
= netdev_priv(dev
);
6947 if (!(tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
)) {
6952 return ethtool_op_set_tso(dev
, value
);
6956 static int tg3_nway_reset(struct net_device
*dev
)
6958 struct tg3
*tp
= netdev_priv(dev
);
6962 if (!netif_running(dev
))
6965 spin_lock_irq(&tp
->lock
);
6967 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
6968 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
6969 (bmcr
& BMCR_ANENABLE
)) {
6970 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
);
6973 spin_unlock_irq(&tp
->lock
);
6978 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
6980 struct tg3
*tp
= netdev_priv(dev
);
6982 ering
->rx_max_pending
= TG3_RX_RING_SIZE
- 1;
6983 ering
->rx_mini_max_pending
= 0;
6984 ering
->rx_jumbo_max_pending
= TG3_RX_JUMBO_RING_SIZE
- 1;
6986 ering
->rx_pending
= tp
->rx_pending
;
6987 ering
->rx_mini_pending
= 0;
6988 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
6989 ering
->tx_pending
= tp
->tx_pending
;
6992 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
6994 struct tg3
*tp
= netdev_priv(dev
);
6996 if ((ering
->rx_pending
> TG3_RX_RING_SIZE
- 1) ||
6997 (ering
->rx_jumbo_pending
> TG3_RX_JUMBO_RING_SIZE
- 1) ||
6998 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1))
7001 if (netif_running(dev
))
7004 spin_lock_irq(&tp
->lock
);
7005 spin_lock(&tp
->tx_lock
);
7007 tp
->rx_pending
= ering
->rx_pending
;
7009 if ((tp
->tg3_flags2
& TG3_FLG2_MAX_RXPEND_64
) &&
7010 tp
->rx_pending
> 63)
7011 tp
->rx_pending
= 63;
7012 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
7013 tp
->tx_pending
= ering
->tx_pending
;
7015 if (netif_running(dev
)) {
7018 tg3_netif_start(tp
);
7021 spin_unlock(&tp
->tx_lock
);
7022 spin_unlock_irq(&tp
->lock
);
7027 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
7029 struct tg3
*tp
= netdev_priv(dev
);
7031 epause
->autoneg
= (tp
->tg3_flags
& TG3_FLAG_PAUSE_AUTONEG
) != 0;
7032 epause
->rx_pause
= (tp
->tg3_flags
& TG3_FLAG_RX_PAUSE
) != 0;
7033 epause
->tx_pause
= (tp
->tg3_flags
& TG3_FLAG_TX_PAUSE
) != 0;
7036 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
7038 struct tg3
*tp
= netdev_priv(dev
);
7040 if (netif_running(dev
))
7043 spin_lock_irq(&tp
->lock
);
7044 spin_lock(&tp
->tx_lock
);
7045 if (epause
->autoneg
)
7046 tp
->tg3_flags
|= TG3_FLAG_PAUSE_AUTONEG
;
7048 tp
->tg3_flags
&= ~TG3_FLAG_PAUSE_AUTONEG
;
7049 if (epause
->rx_pause
)
7050 tp
->tg3_flags
|= TG3_FLAG_RX_PAUSE
;
7052 tp
->tg3_flags
&= ~TG3_FLAG_RX_PAUSE
;
7053 if (epause
->tx_pause
)
7054 tp
->tg3_flags
|= TG3_FLAG_TX_PAUSE
;
7056 tp
->tg3_flags
&= ~TG3_FLAG_TX_PAUSE
;
7058 if (netif_running(dev
)) {
7061 tg3_netif_start(tp
);
7063 spin_unlock(&tp
->tx_lock
);
7064 spin_unlock_irq(&tp
->lock
);
7069 static u32
tg3_get_rx_csum(struct net_device
*dev
)
7071 struct tg3
*tp
= netdev_priv(dev
);
7072 return (tp
->tg3_flags
& TG3_FLAG_RX_CHECKSUMS
) != 0;
7075 static int tg3_set_rx_csum(struct net_device
*dev
, u32 data
)
7077 struct tg3
*tp
= netdev_priv(dev
);
7079 if (tp
->tg3_flags
& TG3_FLAG_BROKEN_CHECKSUMS
) {
7085 spin_lock_irq(&tp
->lock
);
7087 tp
->tg3_flags
|= TG3_FLAG_RX_CHECKSUMS
;
7089 tp
->tg3_flags
&= ~TG3_FLAG_RX_CHECKSUMS
;
7090 spin_unlock_irq(&tp
->lock
);
7095 static int tg3_set_tx_csum(struct net_device
*dev
, u32 data
)
7097 struct tg3
*tp
= netdev_priv(dev
);
7099 if (tp
->tg3_flags
& TG3_FLAG_BROKEN_CHECKSUMS
) {
7106 dev
->features
|= NETIF_F_IP_CSUM
;
7108 dev
->features
&= ~NETIF_F_IP_CSUM
;
7113 static int tg3_get_stats_count (struct net_device
*dev
)
7115 return TG3_NUM_STATS
;
7118 static void tg3_get_strings (struct net_device
*dev
, u32 stringset
, u8
*buf
)
7120 switch (stringset
) {
7122 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
7125 WARN_ON(1); /* we need a WARN() */
7130 static void tg3_get_ethtool_stats (struct net_device
*dev
,
7131 struct ethtool_stats
*estats
, u64
*tmp_stats
)
7133 struct tg3
*tp
= netdev_priv(dev
);
7134 memcpy(tmp_stats
, tg3_get_estats(tp
), sizeof(tp
->estats
));
7137 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
7139 struct mii_ioctl_data
*data
= if_mii(ifr
);
7140 struct tg3
*tp
= netdev_priv(dev
);
7145 data
->phy_id
= PHY_ADDR
;
7151 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)
7152 break; /* We have no PHY */
7154 spin_lock_irq(&tp
->lock
);
7155 err
= tg3_readphy(tp
, data
->reg_num
& 0x1f, &mii_regval
);
7156 spin_unlock_irq(&tp
->lock
);
7158 data
->val_out
= mii_regval
;
7164 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)
7165 break; /* We have no PHY */
7167 if (!capable(CAP_NET_ADMIN
))
7170 spin_lock_irq(&tp
->lock
);
7171 err
= tg3_writephy(tp
, data
->reg_num
& 0x1f, data
->val_in
);
7172 spin_unlock_irq(&tp
->lock
);
7183 #if TG3_VLAN_TAG_USED
7184 static void tg3_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*grp
)
7186 struct tg3
*tp
= netdev_priv(dev
);
7188 spin_lock_irq(&tp
->lock
);
7189 spin_lock(&tp
->tx_lock
);
7193 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
7194 __tg3_set_rx_mode(dev
);
7196 spin_unlock(&tp
->tx_lock
);
7197 spin_unlock_irq(&tp
->lock
);
7200 static void tg3_vlan_rx_kill_vid(struct net_device
*dev
, unsigned short vid
)
7202 struct tg3
*tp
= netdev_priv(dev
);
7204 spin_lock_irq(&tp
->lock
);
7205 spin_lock(&tp
->tx_lock
);
7207 tp
->vlgrp
->vlan_devices
[vid
] = NULL
;
7208 spin_unlock(&tp
->tx_lock
);
7209 spin_unlock_irq(&tp
->lock
);
7213 static struct ethtool_ops tg3_ethtool_ops
= {
7214 .get_settings
= tg3_get_settings
,
7215 .set_settings
= tg3_set_settings
,
7216 .get_drvinfo
= tg3_get_drvinfo
,
7217 .get_regs_len
= tg3_get_regs_len
,
7218 .get_regs
= tg3_get_regs
,
7219 .get_wol
= tg3_get_wol
,
7220 .set_wol
= tg3_set_wol
,
7221 .get_msglevel
= tg3_get_msglevel
,
7222 .set_msglevel
= tg3_set_msglevel
,
7223 .nway_reset
= tg3_nway_reset
,
7224 .get_link
= ethtool_op_get_link
,
7225 .get_eeprom_len
= tg3_get_eeprom_len
,
7226 .get_eeprom
= tg3_get_eeprom
,
7227 .set_eeprom
= tg3_set_eeprom
,
7228 .get_ringparam
= tg3_get_ringparam
,
7229 .set_ringparam
= tg3_set_ringparam
,
7230 .get_pauseparam
= tg3_get_pauseparam
,
7231 .set_pauseparam
= tg3_set_pauseparam
,
7232 .get_rx_csum
= tg3_get_rx_csum
,
7233 .set_rx_csum
= tg3_set_rx_csum
,
7234 .get_tx_csum
= ethtool_op_get_tx_csum
,
7235 .set_tx_csum
= tg3_set_tx_csum
,
7236 .get_sg
= ethtool_op_get_sg
,
7237 .set_sg
= ethtool_op_set_sg
,
7238 #if TG3_TSO_SUPPORT != 0
7239 .get_tso
= ethtool_op_get_tso
,
7240 .set_tso
= tg3_set_tso
,
7242 .get_strings
= tg3_get_strings
,
7243 .get_stats_count
= tg3_get_stats_count
,
7244 .get_ethtool_stats
= tg3_get_ethtool_stats
,
7247 static void __devinit
tg3_get_eeprom_size(struct tg3
*tp
)
7251 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
7253 if (tg3_nvram_read(tp
, 0, &val
) != 0)
7256 if (swab32(val
) != TG3_EEPROM_MAGIC
)
7260 * Size the chip by reading offsets at increasing powers of two.
7261 * When we encounter our validation signature, we know the addressing
7262 * has wrapped around, and thus have our chip size.
7266 while (cursize
< tp
->nvram_size
) {
7267 if (tg3_nvram_read(tp
, cursize
, &val
) != 0)
7270 if (swab32(val
) == TG3_EEPROM_MAGIC
)
7276 tp
->nvram_size
= cursize
;
7279 static void __devinit
tg3_get_nvram_size(struct tg3
*tp
)
7283 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
7285 tp
->nvram_size
= (val
>> 16) * 1024;
7289 tp
->nvram_size
= 0x20000;
7292 static void __devinit
tg3_get_nvram_info(struct tg3
*tp
)
7296 nvcfg1
= tr32(NVRAM_CFG1
);
7297 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
7298 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
7301 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
7302 tw32(NVRAM_CFG1
, nvcfg1
);
7305 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
) {
7306 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
7307 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
7308 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
7309 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
7310 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
7312 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
7313 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
7314 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
7316 case FLASH_VENDOR_ATMEL_EEPROM
:
7317 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
7318 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
7319 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
7321 case FLASH_VENDOR_ST
:
7322 tp
->nvram_jedecnum
= JEDEC_ST
;
7323 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
7324 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
7326 case FLASH_VENDOR_SAIFUN
:
7327 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
7328 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
7330 case FLASH_VENDOR_SST_SMALL
:
7331 case FLASH_VENDOR_SST_LARGE
:
7332 tp
->nvram_jedecnum
= JEDEC_SST
;
7333 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
7338 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
7339 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
7340 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
7344 static void __devinit
tg3_get_5752_nvram_info(struct tg3
*tp
)
7348 nvcfg1
= tr32(NVRAM_CFG1
);
7350 /* NVRAM protection for TPM */
7351 if (nvcfg1
& (1 << 27))
7352 tp
->tg3_flags2
|= TG3_FLG2_PROTECTED_NVRAM
;
7354 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
7355 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
7356 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
7357 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
7358 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
7360 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
7361 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
7362 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
7363 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
7365 case FLASH_5752VENDOR_ST_M45PE10
:
7366 case FLASH_5752VENDOR_ST_M45PE20
:
7367 case FLASH_5752VENDOR_ST_M45PE40
:
7368 tp
->nvram_jedecnum
= JEDEC_ST
;
7369 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
7370 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
7374 if (tp
->tg3_flags2
& TG3_FLG2_FLASH
) {
7375 switch (nvcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
7376 case FLASH_5752PAGE_SIZE_256
:
7377 tp
->nvram_pagesize
= 256;
7379 case FLASH_5752PAGE_SIZE_512
:
7380 tp
->nvram_pagesize
= 512;
7382 case FLASH_5752PAGE_SIZE_1K
:
7383 tp
->nvram_pagesize
= 1024;
7385 case FLASH_5752PAGE_SIZE_2K
:
7386 tp
->nvram_pagesize
= 2048;
7388 case FLASH_5752PAGE_SIZE_4K
:
7389 tp
->nvram_pagesize
= 4096;
7391 case FLASH_5752PAGE_SIZE_264
:
7392 tp
->nvram_pagesize
= 264;
7397 /* For eeprom, set pagesize to maximum eeprom size */
7398 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
7400 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
7401 tw32(NVRAM_CFG1
, nvcfg1
);
7405 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
7406 static void __devinit
tg3_nvram_init(struct tg3
*tp
)
7410 if (tp
->tg3_flags2
& TG3_FLG2_SUN_570X
)
7413 tw32_f(GRC_EEPROM_ADDR
,
7414 (EEPROM_ADDR_FSM_RESET
|
7415 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
7416 EEPROM_ADDR_CLKPERD_SHIFT
)));
7418 /* XXX schedule_timeout() ... */
7419 for (j
= 0; j
< 100; j
++)
7422 /* Enable seeprom accesses. */
7423 tw32_f(GRC_LOCAL_CTRL
,
7424 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
7427 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
7428 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) {
7429 tp
->tg3_flags
|= TG3_FLAG_NVRAM
;
7431 tg3_enable_nvram_access(tp
);
7433 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
7434 tg3_get_5752_nvram_info(tp
);
7436 tg3_get_nvram_info(tp
);
7438 tg3_get_nvram_size(tp
);
7440 tg3_disable_nvram_access(tp
);
7443 tp
->tg3_flags
&= ~(TG3_FLAG_NVRAM
| TG3_FLAG_NVRAM_BUFFERED
);
7445 tg3_get_eeprom_size(tp
);
7449 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
7450 u32 offset
, u32
*val
)
7455 if (offset
> EEPROM_ADDR_ADDR_MASK
||
7459 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
7460 EEPROM_ADDR_DEVID_MASK
|
7462 tw32(GRC_EEPROM_ADDR
,
7464 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
7465 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
7466 EEPROM_ADDR_ADDR_MASK
) |
7467 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
7469 for (i
= 0; i
< 10000; i
++) {
7470 tmp
= tr32(GRC_EEPROM_ADDR
);
7472 if (tmp
& EEPROM_ADDR_COMPLETE
)
7476 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
7479 *val
= tr32(GRC_EEPROM_DATA
);
7483 #define NVRAM_CMD_TIMEOUT 10000
7485 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
7489 tw32(NVRAM_CMD
, nvram_cmd
);
7490 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
7492 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
7497 if (i
== NVRAM_CMD_TIMEOUT
) {
7503 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
7507 if (tp
->tg3_flags2
& TG3_FLG2_SUN_570X
) {
7508 printk(KERN_ERR PFX
"Attempt to do nvram_read on Sun 570X\n");
7512 if (!(tp
->tg3_flags
& TG3_FLAG_NVRAM
))
7513 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
7515 if ((tp
->tg3_flags
& TG3_FLAG_NVRAM_BUFFERED
) &&
7516 (tp
->tg3_flags2
& TG3_FLG2_FLASH
) &&
7517 (tp
->nvram_jedecnum
== JEDEC_ATMEL
)) {
7519 offset
= ((offset
/ tp
->nvram_pagesize
) <<
7520 ATMEL_AT45DB0X1B_PAGE_POS
) +
7521 (offset
% tp
->nvram_pagesize
);
7524 if (offset
> NVRAM_ADDR_MSK
)
7529 tg3_enable_nvram_access(tp
);
7531 tw32(NVRAM_ADDR
, offset
);
7532 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
7533 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
7536 *val
= swab32(tr32(NVRAM_RDDATA
));
7538 tg3_nvram_unlock(tp
);
7540 tg3_disable_nvram_access(tp
);
7545 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
7546 u32 offset
, u32 len
, u8
*buf
)
7551 for (i
= 0; i
< len
; i
+= 4) {
7556 memcpy(&data
, buf
+ i
, 4);
7558 tw32(GRC_EEPROM_DATA
, cpu_to_le32(data
));
7560 val
= tr32(GRC_EEPROM_ADDR
);
7561 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
7563 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
7565 tw32(GRC_EEPROM_ADDR
, val
|
7566 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
7567 (addr
& EEPROM_ADDR_ADDR_MASK
) |
7571 for (j
= 0; j
< 10000; j
++) {
7572 val
= tr32(GRC_EEPROM_ADDR
);
7574 if (val
& EEPROM_ADDR_COMPLETE
)
7578 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
7587 /* offset and length are dword aligned */
7588 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
7592 u32 pagesize
= tp
->nvram_pagesize
;
7593 u32 pagemask
= pagesize
- 1;
7597 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
7603 u32 phy_addr
, page_off
, size
;
7605 phy_addr
= offset
& ~pagemask
;
7607 for (j
= 0; j
< pagesize
; j
+= 4) {
7608 if ((ret
= tg3_nvram_read(tp
, phy_addr
+ j
,
7609 (u32
*) (tmp
+ j
))))
7615 page_off
= offset
& pagemask
;
7622 memcpy(tmp
+ page_off
, buf
, size
);
7624 offset
= offset
+ (pagesize
- page_off
);
7626 tg3_enable_nvram_access(tp
);
7629 * Before we can erase the flash page, we need
7630 * to issue a special "write enable" command.
7632 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
7634 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
7637 /* Erase the target page */
7638 tw32(NVRAM_ADDR
, phy_addr
);
7640 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
7641 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
7643 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
7646 /* Issue another write enable to start the write. */
7647 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
7649 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
7652 for (j
= 0; j
< pagesize
; j
+= 4) {
7655 data
= *((u32
*) (tmp
+ j
));
7656 tw32(NVRAM_WRDATA
, cpu_to_be32(data
));
7658 tw32(NVRAM_ADDR
, phy_addr
+ j
);
7660 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
7664 nvram_cmd
|= NVRAM_CMD_FIRST
;
7665 else if (j
== (pagesize
- 4))
7666 nvram_cmd
|= NVRAM_CMD_LAST
;
7668 if ((ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
)))
7675 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
7676 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
7683 /* offset and length are dword aligned */
7684 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
7689 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
7690 u32 data
, page_off
, phy_addr
, nvram_cmd
;
7692 memcpy(&data
, buf
+ i
, 4);
7693 tw32(NVRAM_WRDATA
, cpu_to_be32(data
));
7695 page_off
= offset
% tp
->nvram_pagesize
;
7697 if ((tp
->tg3_flags2
& TG3_FLG2_FLASH
) &&
7698 (tp
->nvram_jedecnum
== JEDEC_ATMEL
)) {
7700 phy_addr
= ((offset
/ tp
->nvram_pagesize
) <<
7701 ATMEL_AT45DB0X1B_PAGE_POS
) + page_off
;
7707 tw32(NVRAM_ADDR
, phy_addr
);
7709 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
7711 if ((page_off
== 0) || (i
== 0))
7712 nvram_cmd
|= NVRAM_CMD_FIRST
;
7713 else if (page_off
== (tp
->nvram_pagesize
- 4))
7714 nvram_cmd
|= NVRAM_CMD_LAST
;
7717 nvram_cmd
|= NVRAM_CMD_LAST
;
7719 if ((tp
->nvram_jedecnum
== JEDEC_ST
) &&
7720 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
7722 if ((ret
= tg3_nvram_exec_cmd(tp
,
7723 NVRAM_CMD_WREN
| NVRAM_CMD_GO
|
7728 if (!(tp
->tg3_flags2
& TG3_FLG2_FLASH
)) {
7729 /* We always do complete word writes to eeprom. */
7730 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
7733 if ((ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
)))
7739 /* offset and length are dword aligned */
7740 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
7744 if (tp
->tg3_flags2
& TG3_FLG2_SUN_570X
) {
7745 printk(KERN_ERR PFX
"Attempt to do nvram_write on Sun 570X\n");
7749 if (tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
) {
7750 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
7751 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
7755 if (!(tp
->tg3_flags
& TG3_FLAG_NVRAM
)) {
7756 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
7763 tg3_enable_nvram_access(tp
);
7764 if ((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
7765 !(tp
->tg3_flags2
& TG3_FLG2_PROTECTED_NVRAM
))
7766 tw32(NVRAM_WRITE1
, 0x406);
7768 grc_mode
= tr32(GRC_MODE
);
7769 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
7771 if ((tp
->tg3_flags
& TG3_FLAG_NVRAM_BUFFERED
) ||
7772 !(tp
->tg3_flags2
& TG3_FLG2_FLASH
)) {
7774 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
7778 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
7782 grc_mode
= tr32(GRC_MODE
);
7783 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
7785 tg3_disable_nvram_access(tp
);
7786 tg3_nvram_unlock(tp
);
7789 if (tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
) {
7790 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
7797 struct subsys_tbl_ent
{
7798 u16 subsys_vendor
, subsys_devid
;
7802 static struct subsys_tbl_ent subsys_id_to_phy_id
[] = {
7803 /* Broadcom boards. */
7804 { PCI_VENDOR_ID_BROADCOM
, 0x1644, PHY_ID_BCM5401
}, /* BCM95700A6 */
7805 { PCI_VENDOR_ID_BROADCOM
, 0x0001, PHY_ID_BCM5701
}, /* BCM95701A5 */
7806 { PCI_VENDOR_ID_BROADCOM
, 0x0002, PHY_ID_BCM8002
}, /* BCM95700T6 */
7807 { PCI_VENDOR_ID_BROADCOM
, 0x0003, 0 }, /* BCM95700A9 */
7808 { PCI_VENDOR_ID_BROADCOM
, 0x0005, PHY_ID_BCM5701
}, /* BCM95701T1 */
7809 { PCI_VENDOR_ID_BROADCOM
, 0x0006, PHY_ID_BCM5701
}, /* BCM95701T8 */
7810 { PCI_VENDOR_ID_BROADCOM
, 0x0007, 0 }, /* BCM95701A7 */
7811 { PCI_VENDOR_ID_BROADCOM
, 0x0008, PHY_ID_BCM5701
}, /* BCM95701A10 */
7812 { PCI_VENDOR_ID_BROADCOM
, 0x8008, PHY_ID_BCM5701
}, /* BCM95701A12 */
7813 { PCI_VENDOR_ID_BROADCOM
, 0x0009, PHY_ID_BCM5703
}, /* BCM95703Ax1 */
7814 { PCI_VENDOR_ID_BROADCOM
, 0x8009, PHY_ID_BCM5703
}, /* BCM95703Ax2 */
7817 { PCI_VENDOR_ID_3COM
, 0x1000, PHY_ID_BCM5401
}, /* 3C996T */
7818 { PCI_VENDOR_ID_3COM
, 0x1006, PHY_ID_BCM5701
}, /* 3C996BT */
7819 { PCI_VENDOR_ID_3COM
, 0x1004, 0 }, /* 3C996SX */
7820 { PCI_VENDOR_ID_3COM
, 0x1007, PHY_ID_BCM5701
}, /* 3C1000T */
7821 { PCI_VENDOR_ID_3COM
, 0x1008, PHY_ID_BCM5701
}, /* 3C940BR01 */
7824 { PCI_VENDOR_ID_DELL
, 0x00d1, PHY_ID_BCM5401
}, /* VIPER */
7825 { PCI_VENDOR_ID_DELL
, 0x0106, PHY_ID_BCM5401
}, /* JAGUAR */
7826 { PCI_VENDOR_ID_DELL
, 0x0109, PHY_ID_BCM5411
}, /* MERLOT */
7827 { PCI_VENDOR_ID_DELL
, 0x010a, PHY_ID_BCM5411
}, /* SLIM_MERLOT */
7829 /* Compaq boards. */
7830 { PCI_VENDOR_ID_COMPAQ
, 0x007c, PHY_ID_BCM5701
}, /* BANSHEE */
7831 { PCI_VENDOR_ID_COMPAQ
, 0x009a, PHY_ID_BCM5701
}, /* BANSHEE_2 */
7832 { PCI_VENDOR_ID_COMPAQ
, 0x007d, 0 }, /* CHANGELING */
7833 { PCI_VENDOR_ID_COMPAQ
, 0x0085, PHY_ID_BCM5701
}, /* NC7780 */
7834 { PCI_VENDOR_ID_COMPAQ
, 0x0099, PHY_ID_BCM5701
}, /* NC7780_2 */
7837 { PCI_VENDOR_ID_IBM
, 0x0281, 0 } /* IBM??? */
7840 static inline struct subsys_tbl_ent
*lookup_by_subsys(struct tg3
*tp
)
7844 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
7845 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
7846 tp
->pdev
->subsystem_vendor
) &&
7847 (subsys_id_to_phy_id
[i
].subsys_devid
==
7848 tp
->pdev
->subsystem_device
))
7849 return &subsys_id_to_phy_id
[i
];
7854 /* Since this function may be called in D3-hot power state during
7855 * tg3_init_one(), only config cycles are allowed.
7857 static void __devinit
tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
7861 /* Make sure register accesses (indirect or otherwise)
7862 * will function correctly.
7864 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
7865 tp
->misc_host_ctrl
);
7867 tp
->phy_id
= PHY_ID_INVALID
;
7868 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
7870 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
7871 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
7872 u32 nic_cfg
, led_cfg
;
7873 u32 nic_phy_id
, ver
, cfg2
= 0, eeprom_phy_id
;
7874 int eeprom_phy_serdes
= 0;
7876 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
7877 tp
->nic_sram_data_cfg
= nic_cfg
;
7879 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
7880 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
7881 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
) &&
7882 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) &&
7883 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5703
) &&
7884 (ver
> 0) && (ver
< 0x100))
7885 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
7887 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
7888 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
7889 eeprom_phy_serdes
= 1;
7891 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
7892 if (nic_phy_id
!= 0) {
7893 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
7894 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
7896 eeprom_phy_id
= (id1
>> 16) << 10;
7897 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
7898 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
7902 tp
->phy_id
= eeprom_phy_id
;
7903 if (eeprom_phy_serdes
)
7904 tp
->tg3_flags2
|= TG3_FLG2_PHY_SERDES
;
7906 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
)
7907 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
7908 SHASTA_EXT_LED_MODE_MASK
);
7910 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
7914 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
7915 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
7918 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
7919 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
7922 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
7923 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
7926 case SHASTA_EXT_LED_SHARED
:
7927 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
7928 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
&&
7929 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A1
)
7930 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
7931 LED_CTRL_MODE_PHY_2
);
7934 case SHASTA_EXT_LED_MAC
:
7935 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
7938 case SHASTA_EXT_LED_COMBO
:
7939 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
7940 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
)
7941 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
7942 LED_CTRL_MODE_PHY_2
);
7947 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
7948 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) &&
7949 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
7950 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
7952 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
) &&
7953 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) &&
7954 (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
))
7955 tp
->tg3_flags
|= TG3_FLAG_EEPROM_WRITE_PROT
;
7957 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
7958 tp
->tg3_flags
|= TG3_FLAG_ENABLE_ASF
;
7959 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
)
7960 tp
->tg3_flags2
|= TG3_FLG2_ASF_NEW_HANDSHAKE
;
7962 if (nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
)
7963 tp
->tg3_flags
|= TG3_FLAG_SERDES_WOL_CAP
;
7965 if (cfg2
& (1 << 17))
7966 tp
->tg3_flags2
|= TG3_FLG2_CAPACITIVE_COUPLING
;
7968 /* serdes signal pre-emphasis in register 0x590 set by */
7969 /* bootcode if bit 18 is set */
7970 if (cfg2
& (1 << 18))
7971 tp
->tg3_flags2
|= TG3_FLG2_SERDES_PREEMPHASIS
;
7975 static int __devinit
tg3_phy_probe(struct tg3
*tp
)
7977 u32 hw_phy_id_1
, hw_phy_id_2
;
7978 u32 hw_phy_id
, hw_phy_id_masked
;
7981 /* Reading the PHY ID register can conflict with ASF
7982 * firwmare access to the PHY hardware.
7985 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) {
7986 hw_phy_id
= hw_phy_id_masked
= PHY_ID_INVALID
;
7988 /* Now read the physical PHY_ID from the chip and verify
7989 * that it is sane. If it doesn't look good, we fall back
7990 * to either the hard-coded table based PHY_ID and failing
7991 * that the value found in the eeprom area.
7993 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
7994 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
7996 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
7997 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
7998 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
8000 hw_phy_id_masked
= hw_phy_id
& PHY_ID_MASK
;
8003 if (!err
&& KNOWN_PHY_ID(hw_phy_id_masked
)) {
8004 tp
->phy_id
= hw_phy_id
;
8005 if (hw_phy_id_masked
== PHY_ID_BCM8002
)
8006 tp
->tg3_flags2
|= TG3_FLG2_PHY_SERDES
;
8008 if (tp
->phy_id
!= PHY_ID_INVALID
) {
8009 /* Do nothing, phy ID already set up in
8010 * tg3_get_eeprom_hw_cfg().
8013 struct subsys_tbl_ent
*p
;
8015 /* No eeprom signature? Try the hardcoded
8016 * subsys device table.
8018 p
= lookup_by_subsys(tp
);
8022 tp
->phy_id
= p
->phy_id
;
8024 tp
->phy_id
== PHY_ID_BCM8002
)
8025 tp
->tg3_flags2
|= TG3_FLG2_PHY_SERDES
;
8029 if (!(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) &&
8030 !(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
)) {
8031 u32 bmsr
, adv_reg
, tg3_ctrl
;
8033 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
8034 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
8035 (bmsr
& BMSR_LSTATUS
))
8036 goto skip_phy_reset
;
8038 err
= tg3_phy_reset(tp
);
8042 adv_reg
= (ADVERTISE_10HALF
| ADVERTISE_10FULL
|
8043 ADVERTISE_100HALF
| ADVERTISE_100FULL
|
8044 ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
);
8046 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
)) {
8047 tg3_ctrl
= (MII_TG3_CTRL_ADV_1000_HALF
|
8048 MII_TG3_CTRL_ADV_1000_FULL
);
8049 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
8050 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
)
8051 tg3_ctrl
|= (MII_TG3_CTRL_AS_MASTER
|
8052 MII_TG3_CTRL_ENABLE_AS_MASTER
);
8055 if (!tg3_copper_is_advertising_all(tp
)) {
8056 tg3_writephy(tp
, MII_ADVERTISE
, adv_reg
);
8058 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
))
8059 tg3_writephy(tp
, MII_TG3_CTRL
, tg3_ctrl
);
8061 tg3_writephy(tp
, MII_BMCR
,
8062 BMCR_ANENABLE
| BMCR_ANRESTART
);
8064 tg3_phy_set_wirespeed(tp
);
8066 tg3_writephy(tp
, MII_ADVERTISE
, adv_reg
);
8067 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
))
8068 tg3_writephy(tp
, MII_TG3_CTRL
, tg3_ctrl
);
8072 if ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5401
) {
8073 err
= tg3_init_5401phy_dsp(tp
);
8078 if (!err
&& ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5401
)) {
8079 err
= tg3_init_5401phy_dsp(tp
);
8082 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)
8083 tp
->link_config
.advertising
=
8084 (ADVERTISED_1000baseT_Half
|
8085 ADVERTISED_1000baseT_Full
|
8086 ADVERTISED_Autoneg
|
8088 if (tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
)
8089 tp
->link_config
.advertising
&=
8090 ~(ADVERTISED_1000baseT_Half
|
8091 ADVERTISED_1000baseT_Full
);
8096 static void __devinit
tg3_read_partno(struct tg3
*tp
)
8098 unsigned char vpd_data
[256];
8101 if (tp
->tg3_flags2
& TG3_FLG2_SUN_570X
) {
8102 /* Sun decided not to put the necessary bits in the
8103 * NVRAM of their onboard tg3 parts :(
8105 strcpy(tp
->board_part_number
, "Sun 570X");
8109 for (i
= 0; i
< 256; i
+= 4) {
8112 if (tg3_nvram_read(tp
, 0x100 + i
, &tmp
))
8115 vpd_data
[i
+ 0] = ((tmp
>> 0) & 0xff);
8116 vpd_data
[i
+ 1] = ((tmp
>> 8) & 0xff);
8117 vpd_data
[i
+ 2] = ((tmp
>> 16) & 0xff);
8118 vpd_data
[i
+ 3] = ((tmp
>> 24) & 0xff);
8121 /* Now parse and find the part number. */
8122 for (i
= 0; i
< 256; ) {
8123 unsigned char val
= vpd_data
[i
];
8126 if (val
== 0x82 || val
== 0x91) {
8129 (vpd_data
[i
+ 2] << 8)));
8136 block_end
= (i
+ 3 +
8138 (vpd_data
[i
+ 2] << 8)));
8140 while (i
< block_end
) {
8141 if (vpd_data
[i
+ 0] == 'P' &&
8142 vpd_data
[i
+ 1] == 'N') {
8143 int partno_len
= vpd_data
[i
+ 2];
8145 if (partno_len
> 24)
8148 memcpy(tp
->board_part_number
,
8157 /* Part number not found. */
8162 strcpy(tp
->board_part_number
, "none");
8165 #ifdef CONFIG_SPARC64
8166 static int __devinit
tg3_is_sun_570X(struct tg3
*tp
)
8168 struct pci_dev
*pdev
= tp
->pdev
;
8169 struct pcidev_cookie
*pcp
= pdev
->sysdata
;
8172 int node
= pcp
->prom_node
;
8176 err
= prom_getproperty(node
, "subsystem-vendor-id",
8177 (char *) &venid
, sizeof(venid
));
8178 if (err
== 0 || err
== -1)
8180 if (venid
== PCI_VENDOR_ID_SUN
)
8187 static int __devinit
tg3_get_invariants(struct tg3
*tp
)
8189 static struct pci_device_id write_reorder_chipsets
[] = {
8190 { PCI_DEVICE(PCI_VENDOR_ID_INTEL
,
8191 PCI_DEVICE_ID_INTEL_82801AA_8
) },
8192 { PCI_DEVICE(PCI_VENDOR_ID_INTEL
,
8193 PCI_DEVICE_ID_INTEL_82801AB_8
) },
8194 { PCI_DEVICE(PCI_VENDOR_ID_INTEL
,
8195 PCI_DEVICE_ID_INTEL_82801BA_11
) },
8196 { PCI_DEVICE(PCI_VENDOR_ID_INTEL
,
8197 PCI_DEVICE_ID_INTEL_82801BA_6
) },
8198 { PCI_DEVICE(PCI_VENDOR_ID_AMD
,
8199 PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
8203 u32 cacheline_sz_reg
;
8204 u32 pci_state_reg
, grc_misc_cfg
;
8209 #ifdef CONFIG_SPARC64
8210 if (tg3_is_sun_570X(tp
))
8211 tp
->tg3_flags2
|= TG3_FLG2_SUN_570X
;
8214 /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
8215 * reordering to the mailbox registers done by the host
8216 * controller can cause major troubles. We read back from
8217 * every mailbox register write to force the writes to be
8218 * posted to the chip in order.
8220 if (pci_dev_present(write_reorder_chipsets
))
8221 tp
->tg3_flags
|= TG3_FLAG_MBOX_WRITE_REORDER
;
8223 /* Force memory write invalidate off. If we leave it on,
8224 * then on 5700_BX chips we have to enable a workaround.
8225 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
8226 * to match the cacheline size. The Broadcom driver have this
8227 * workaround but turns MWI off all the times so never uses
8228 * it. This seems to suggest that the workaround is insufficient.
8230 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
8231 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
8232 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
8234 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
8235 * has the register indirect write enable bit set before
8236 * we try to access any of the MMIO registers. It is also
8237 * critical that the PCI-X hw workaround situation is decided
8238 * before that as well.
8240 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
8243 tp
->pci_chip_rev_id
= (misc_ctrl_reg
>>
8244 MISC_HOST_CTRL_CHIPREV_SHIFT
);
8246 /* Wrong chip ID in 5752 A0. This code can be removed later
8247 * as A0 is not in production.
8249 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5752_A0_HW
)
8250 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
8252 /* Initialize misc host control in PCI block. */
8253 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
8254 MISC_HOST_CTRL_CHIPREV
);
8255 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
8256 tp
->misc_host_ctrl
);
8258 pci_read_config_dword(tp
->pdev
, TG3PCI_CACHELINESZ
,
8261 tp
->pci_cacheline_sz
= (cacheline_sz_reg
>> 0) & 0xff;
8262 tp
->pci_lat_timer
= (cacheline_sz_reg
>> 8) & 0xff;
8263 tp
->pci_hdr_type
= (cacheline_sz_reg
>> 16) & 0xff;
8264 tp
->pci_bist
= (cacheline_sz_reg
>> 24) & 0xff;
8266 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
8267 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
8268 tp
->tg3_flags2
|= TG3_FLG2_5750_PLUS
;
8270 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) ||
8271 (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
))
8272 tp
->tg3_flags2
|= TG3_FLG2_5705_PLUS
;
8274 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
)
8275 tp
->tg3_flags2
|= TG3_FLG2_HW_TSO
;
8277 if (pci_find_capability(tp
->pdev
, PCI_CAP_ID_EXP
) != 0)
8278 tp
->tg3_flags2
|= TG3_FLG2_PCI_EXPRESS
;
8280 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
8281 tp
->pci_lat_timer
< 64) {
8282 tp
->pci_lat_timer
= 64;
8284 cacheline_sz_reg
= ((tp
->pci_cacheline_sz
& 0xff) << 0);
8285 cacheline_sz_reg
|= ((tp
->pci_lat_timer
& 0xff) << 8);
8286 cacheline_sz_reg
|= ((tp
->pci_hdr_type
& 0xff) << 16);
8287 cacheline_sz_reg
|= ((tp
->pci_bist
& 0xff) << 24);
8289 pci_write_config_dword(tp
->pdev
, TG3PCI_CACHELINESZ
,
8293 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
8296 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0) {
8297 tp
->tg3_flags
|= TG3_FLAG_PCIX_MODE
;
8299 /* If this is a 5700 BX chipset, and we are in PCI-X
8300 * mode, enable register write workaround.
8302 * The workaround is to use indirect register accesses
8303 * for all chip writes not to mailbox registers.
8305 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5700_BX
) {
8309 tp
->tg3_flags
|= TG3_FLAG_PCIX_TARGET_HWBUG
;
8311 /* The chip can have it's power management PCI config
8312 * space registers clobbered due to this bug.
8313 * So explicitly force the chip into D0 here.
8315 pci_read_config_dword(tp
->pdev
, TG3PCI_PM_CTRL_STAT
,
8317 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
8318 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
8319 pci_write_config_dword(tp
->pdev
, TG3PCI_PM_CTRL_STAT
,
8322 /* Also, force SERR#/PERR# in PCI command. */
8323 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
8324 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
8325 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
8329 /* Back to back register writes can cause problems on this chip,
8330 * the workaround is to read back all reg writes except those to
8331 * mailbox regs. See tg3_write_indirect_reg32().
8333 * PCI Express 5750_A0 rev chips need this workaround too.
8335 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
||
8336 ((tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) &&
8337 tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
))
8338 tp
->tg3_flags
|= TG3_FLAG_5701_REG_WRITE_BUG
;
8340 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
8341 tp
->tg3_flags
|= TG3_FLAG_PCI_HIGH_SPEED
;
8342 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
8343 tp
->tg3_flags
|= TG3_FLAG_PCI_32BIT
;
8345 /* Chip-specific fixup from Broadcom driver */
8346 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
) &&
8347 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
8348 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
8349 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
8352 /* Get eeprom hw config before calling tg3_set_power_state().
8353 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
8354 * determined before calling tg3_set_power_state() so that
8355 * we know whether or not to switch out of Vaux power.
8356 * When the flag is set, it means that GPIO1 is used for eeprom
8357 * write protect and also implies that it is a LOM where GPIOs
8358 * are not used to switch power.
8360 tg3_get_eeprom_hw_cfg(tp
);
8362 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
8363 * GPIO1 driven high will bring 5700's external PHY out of reset.
8364 * It is also used as eeprom write protect on LOMs.
8366 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
8367 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) ||
8368 (tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
))
8369 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
8370 GRC_LCLCTRL_GPIO_OUTPUT1
);
8371 /* Unused GPIO3 must be driven as output on 5752 because there
8372 * are no pull-up resistors on unused GPIO pins.
8374 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
8375 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
8377 /* Force the chip into D0. */
8378 err
= tg3_set_power_state(tp
, 0);
8380 printk(KERN_ERR PFX
"(%s) transition to D0 failed\n",
8381 pci_name(tp
->pdev
));
8385 /* 5700 B0 chips do not support checksumming correctly due
8388 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5700_B0
)
8389 tp
->tg3_flags
|= TG3_FLAG_BROKEN_CHECKSUMS
;
8391 /* Pseudo-header checksum is done by hardware logic and not
8392 * the offload processers, so make the chip do the pseudo-
8393 * header checksums on receive. For transmit it is more
8394 * convenient to do the pseudo-header checksum in software
8395 * as Linux does that on transmit for us in all cases.
8397 tp
->tg3_flags
|= TG3_FLAG_NO_TX_PSEUDO_CSUM
;
8398 tp
->tg3_flags
&= ~TG3_FLAG_NO_RX_PSEUDO_CSUM
;
8400 /* Derive initial jumbo mode from MTU assigned in
8401 * ether_setup() via the alloc_etherdev() call
8403 if (tp
->dev
->mtu
> ETH_DATA_LEN
)
8404 tp
->tg3_flags
|= TG3_FLAG_JUMBO_ENABLE
;
8406 /* Determine WakeOnLan speed to use. */
8407 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
8408 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
8409 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
||
8410 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B2
) {
8411 tp
->tg3_flags
&= ~(TG3_FLAG_WOL_SPEED_100MB
);
8413 tp
->tg3_flags
|= TG3_FLAG_WOL_SPEED_100MB
;
8416 /* A few boards don't want Ethernet@WireSpeed phy feature */
8417 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) ||
8418 ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) &&
8419 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) &&
8420 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A1
)))
8421 tp
->tg3_flags2
|= TG3_FLG2_NO_ETH_WIRE_SPEED
;
8423 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5703_AX
||
8424 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_AX
)
8425 tp
->tg3_flags2
|= TG3_FLG2_PHY_ADC_BUG
;
8426 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
)
8427 tp
->tg3_flags2
|= TG3_FLG2_PHY_5704_A0_BUG
;
8429 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)
8430 tp
->tg3_flags2
|= TG3_FLG2_PHY_BER_BUG
;
8432 /* Only 5701 and later support tagged irq status mode.
8433 * Also, 5788 chips cannot use tagged irq status.
8435 * However, since we are using NAPI avoid tagged irq status
8436 * because the interrupt condition is more difficult to
8437 * fully clear in that mode.
8439 tp
->coalesce_mode
= 0;
8441 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_AX
&&
8442 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_BX
)
8443 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
8445 /* Initialize MAC MI mode, polling disabled. */
8446 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
8449 /* Initialize data/descriptor byte/word swapping. */
8450 val
= tr32(GRC_MODE
);
8451 val
&= GRC_MODE_HOST_STACKUP
;
8452 tw32(GRC_MODE
, val
| tp
->grc_mode
);
8454 tg3_switch_clocks(tp
);
8456 /* Clear this out for sanity. */
8457 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
8459 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
8461 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
8462 (tp
->tg3_flags
& TG3_FLAG_PCIX_TARGET_HWBUG
) == 0) {
8463 u32 chiprevid
= GET_CHIP_REV_ID(tp
->misc_host_ctrl
);
8465 if (chiprevid
== CHIPREV_ID_5701_A0
||
8466 chiprevid
== CHIPREV_ID_5701_B0
||
8467 chiprevid
== CHIPREV_ID_5701_B2
||
8468 chiprevid
== CHIPREV_ID_5701_B5
) {
8469 void __iomem
*sram_base
;
8471 /* Write some dummy words into the SRAM status block
8472 * area, see if it reads back correctly. If the return
8473 * value is bad, force enable the PCIX workaround.
8475 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
8477 writel(0x00000000, sram_base
);
8478 writel(0x00000000, sram_base
+ 4);
8479 writel(0xffffffff, sram_base
+ 4);
8480 if (readl(sram_base
) != 0x00000000)
8481 tp
->tg3_flags
|= TG3_FLAG_PCIX_TARGET_HWBUG
;
8488 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
8489 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
8491 /* Broadcom's driver says that CIOBE multisplit has a bug */
8493 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
&&
8494 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5704CIOBE
) {
8495 tp
->tg3_flags
|= TG3_FLAG_SPLIT_MODE
;
8496 tp
->split_mode_max_reqs
= SPLIT_MODE_5704_MAX_REQ
;
8499 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
8500 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
8501 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
8502 tp
->tg3_flags2
|= TG3_FLG2_IS_5788
;
8504 /* these are limited to 10/100 only */
8505 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
8506 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
8507 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
8508 tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
8509 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901
||
8510 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901_2
||
8511 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5705F
)) ||
8512 (tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
8513 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5751F
||
8514 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5753F
)))
8515 tp
->tg3_flags
|= TG3_FLAG_10_100_ONLY
;
8517 err
= tg3_phy_probe(tp
);
8519 printk(KERN_ERR PFX
"(%s) phy probe failed, err %d\n",
8520 pci_name(tp
->pdev
), err
);
8521 /* ... but do not return immediately ... */
8524 tg3_read_partno(tp
);
8526 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) {
8527 tp
->tg3_flags
&= ~TG3_FLAG_USE_MI_INTERRUPT
;
8529 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
8530 tp
->tg3_flags
|= TG3_FLAG_USE_MI_INTERRUPT
;
8532 tp
->tg3_flags
&= ~TG3_FLAG_USE_MI_INTERRUPT
;
8535 /* 5700 {AX,BX} chips have a broken status block link
8536 * change bit implementation, so we must use the
8537 * status register in those cases.
8539 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
8540 tp
->tg3_flags
|= TG3_FLAG_USE_LINKCHG_REG
;
8542 tp
->tg3_flags
&= ~TG3_FLAG_USE_LINKCHG_REG
;
8544 /* The led_ctrl is set during tg3_phy_probe, here we might
8545 * have to force the link status polling mechanism based
8546 * upon subsystem IDs.
8548 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
8549 !(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)) {
8550 tp
->tg3_flags
|= (TG3_FLAG_USE_MI_INTERRUPT
|
8551 TG3_FLAG_USE_LINKCHG_REG
);
8554 /* For all SERDES we poll the MAC status register. */
8555 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)
8556 tp
->tg3_flags
|= TG3_FLAG_POLL_SERDES
;
8558 tp
->tg3_flags
&= ~TG3_FLAG_POLL_SERDES
;
8560 /* 5700 BX chips need to have their TX producer index mailboxes
8561 * written twice to workaround a bug.
8563 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5700_BX
)
8564 tp
->tg3_flags
|= TG3_FLAG_TXD_MBOX_HWBUG
;
8566 tp
->tg3_flags
&= ~TG3_FLAG_TXD_MBOX_HWBUG
;
8568 /* It seems all chips can get confused if TX buffers
8569 * straddle the 4GB address boundary in some cases.
8571 tp
->dev
->hard_start_xmit
= tg3_start_xmit
;
8574 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
8575 (tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) != 0)
8578 /* By default, disable wake-on-lan. User can change this
8579 * using ETHTOOL_SWOL.
8581 tp
->tg3_flags
&= ~TG3_FLAG_WOL_ENABLE
;
8586 #ifdef CONFIG_SPARC64
8587 static int __devinit
tg3_get_macaddr_sparc(struct tg3
*tp
)
8589 struct net_device
*dev
= tp
->dev
;
8590 struct pci_dev
*pdev
= tp
->pdev
;
8591 struct pcidev_cookie
*pcp
= pdev
->sysdata
;
8594 int node
= pcp
->prom_node
;
8596 if (prom_getproplen(node
, "local-mac-address") == 6) {
8597 prom_getproperty(node
, "local-mac-address",
8605 static int __devinit
tg3_get_default_macaddr_sparc(struct tg3
*tp
)
8607 struct net_device
*dev
= tp
->dev
;
8609 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, 6);
8614 static int __devinit
tg3_get_device_address(struct tg3
*tp
)
8616 struct net_device
*dev
= tp
->dev
;
8617 u32 hi
, lo
, mac_offset
;
8619 #ifdef CONFIG_SPARC64
8620 if (!tg3_get_macaddr_sparc(tp
))
8625 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
&&
8626 !(tp
->tg3_flags
& TG3_FLG2_SUN_570X
)) {
8627 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
8629 if (tg3_nvram_lock(tp
))
8630 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
8632 tg3_nvram_unlock(tp
);
8635 /* First try to get it from MAC address mailbox. */
8636 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
8637 if ((hi
>> 16) == 0x484b) {
8638 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
8639 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
8641 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
8642 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
8643 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
8644 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
8645 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
8647 /* Next, try NVRAM. */
8648 else if (!(tp
->tg3_flags
& TG3_FLG2_SUN_570X
) &&
8649 !tg3_nvram_read(tp
, mac_offset
+ 0, &hi
) &&
8650 !tg3_nvram_read(tp
, mac_offset
+ 4, &lo
)) {
8651 dev
->dev_addr
[0] = ((hi
>> 16) & 0xff);
8652 dev
->dev_addr
[1] = ((hi
>> 24) & 0xff);
8653 dev
->dev_addr
[2] = ((lo
>> 0) & 0xff);
8654 dev
->dev_addr
[3] = ((lo
>> 8) & 0xff);
8655 dev
->dev_addr
[4] = ((lo
>> 16) & 0xff);
8656 dev
->dev_addr
[5] = ((lo
>> 24) & 0xff);
8658 /* Finally just fetch it out of the MAC control regs. */
8660 hi
= tr32(MAC_ADDR_0_HIGH
);
8661 lo
= tr32(MAC_ADDR_0_LOW
);
8663 dev
->dev_addr
[5] = lo
& 0xff;
8664 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
8665 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
8666 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
8667 dev
->dev_addr
[1] = hi
& 0xff;
8668 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
8671 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
8672 #ifdef CONFIG_SPARC64
8673 if (!tg3_get_default_macaddr_sparc(tp
))
8681 static int __devinit
tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
, int size
, int to_device
)
8683 struct tg3_internal_buffer_desc test_desc
;
8687 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
8689 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
8690 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
8691 tw32(RDMAC_STATUS
, 0);
8692 tw32(WDMAC_STATUS
, 0);
8694 tw32(BUFMGR_MODE
, 0);
8697 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
8698 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
8699 test_desc
.nic_mbuf
= 0x00002100;
8700 test_desc
.len
= size
;
8703 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
8704 * the *second* time the tg3 driver was getting loaded after an
8707 * Broadcom tells me:
8708 * ...the DMA engine is connected to the GRC block and a DMA
8709 * reset may affect the GRC block in some unpredictable way...
8710 * The behavior of resets to individual blocks has not been tested.
8712 * Broadcom noted the GRC reset will also reset all sub-components.
8715 test_desc
.cqid_sqid
= (13 << 8) | 2;
8717 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
8720 test_desc
.cqid_sqid
= (16 << 8) | 7;
8722 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
8725 test_desc
.flags
= 0x00000005;
8727 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
8730 val
= *(((u32
*)&test_desc
) + i
);
8731 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
8732 sram_dma_descs
+ (i
* sizeof(u32
)));
8733 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
8735 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
8738 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
8740 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
8744 for (i
= 0; i
< 40; i
++) {
8748 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
8750 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
8751 if ((val
& 0xffff) == sram_dma_descs
) {
8762 #define TEST_BUFFER_SIZE 0x400
8764 static int __devinit
tg3_test_dma(struct tg3
*tp
)
8770 buf
= pci_alloc_consistent(tp
->pdev
, TEST_BUFFER_SIZE
, &buf_dma
);
8776 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
8777 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
8783 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
8786 cacheline_size
= 1024;
8788 cacheline_size
= (int) byte
* 4;
8790 switch (cacheline_size
) {
8795 if ((tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) &&
8796 !(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
)) {
8798 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
;
8800 } else if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
8802 ~(DMA_RWCTRL_PCI_WRITE_CMD
);
8804 DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
8809 if (!(tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) &&
8810 !(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
))
8812 DMA_RWCTRL_WRITE_BNDRY_256
;
8813 else if (!(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
))
8815 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
;
8820 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
8821 /* DMA read watermark not used on PCIE */
8822 tp
->dma_rwctrl
|= 0x00180000;
8823 } else if (!(tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
)) {
8824 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
||
8825 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
)
8826 tp
->dma_rwctrl
|= 0x003f0000;
8828 tp
->dma_rwctrl
|= 0x003f000f;
8830 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
8831 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
8832 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
8834 if (ccval
== 0x6 || ccval
== 0x7)
8835 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
8837 /* Set bit 23 to renable PCIX hw bug fix */
8838 tp
->dma_rwctrl
|= 0x009f0000;
8840 tp
->dma_rwctrl
|= 0x001b000f;
8844 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
8845 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
8846 tp
->dma_rwctrl
&= 0xfffffff0;
8848 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
8849 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
8850 /* Remove this if it causes problems for some boards. */
8851 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
8853 /* On 5700/5701 chips, we need to set this bit.
8854 * Otherwise the chip will issue cacheline transactions
8855 * to streamable DMA memory with not all the byte
8856 * enables turned on. This is an error on several
8857 * RISC PCI controllers, in particular sparc64.
8859 * On 5703/5704 chips, this bit has been reassigned
8860 * a different meaning. In particular, it is used
8861 * on those chips to enable a PCI-X workaround.
8863 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
8866 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
8869 /* Unneeded, already done by tg3_get_invariants. */
8870 tg3_switch_clocks(tp
);
8874 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
8875 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
8881 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
8884 /* Send the buffer to the chip. */
8885 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 1);
8887 printk(KERN_ERR
"tg3_test_dma() Write the buffer failed %d\n", ret
);
8892 /* validate data reached card RAM correctly. */
8893 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
8895 tg3_read_mem(tp
, 0x2100 + (i
*4), &val
);
8896 if (le32_to_cpu(val
) != p
[i
]) {
8897 printk(KERN_ERR
" tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val
, i
);
8898 /* ret = -ENODEV here? */
8903 /* Now read it back. */
8904 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 0);
8906 printk(KERN_ERR
"tg3_test_dma() Read the buffer failed %d\n", ret
);
8912 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
8916 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) ==
8917 DMA_RWCTRL_WRITE_BNDRY_DISAB
) {
8918 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
8919 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
8922 printk(KERN_ERR
"tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p
[i
], i
);
8928 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
8936 pci_free_consistent(tp
->pdev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
8941 static void __devinit
tg3_init_link_config(struct tg3
*tp
)
8943 tp
->link_config
.advertising
=
8944 (ADVERTISED_10baseT_Half
| ADVERTISED_10baseT_Full
|
8945 ADVERTISED_100baseT_Half
| ADVERTISED_100baseT_Full
|
8946 ADVERTISED_1000baseT_Half
| ADVERTISED_1000baseT_Full
|
8947 ADVERTISED_Autoneg
| ADVERTISED_MII
);
8948 tp
->link_config
.speed
= SPEED_INVALID
;
8949 tp
->link_config
.duplex
= DUPLEX_INVALID
;
8950 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
8951 netif_carrier_off(tp
->dev
);
8952 tp
->link_config
.active_speed
= SPEED_INVALID
;
8953 tp
->link_config
.active_duplex
= DUPLEX_INVALID
;
8954 tp
->link_config
.phy_is_low_power
= 0;
8955 tp
->link_config
.orig_speed
= SPEED_INVALID
;
8956 tp
->link_config
.orig_duplex
= DUPLEX_INVALID
;
8957 tp
->link_config
.orig_autoneg
= AUTONEG_INVALID
;
8960 static void __devinit
tg3_init_bufmgr_config(struct tg3
*tp
)
8962 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
8963 DEFAULT_MB_RDMA_LOW_WATER
;
8964 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
8965 DEFAULT_MB_MACRX_LOW_WATER
;
8966 tp
->bufmgr_config
.mbuf_high_water
=
8967 DEFAULT_MB_HIGH_WATER
;
8969 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
8970 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
8971 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
8972 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
8973 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
8974 DEFAULT_MB_HIGH_WATER_JUMBO
;
8976 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
8977 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
8980 static char * __devinit
tg3_phy_string(struct tg3
*tp
)
8982 switch (tp
->phy_id
& PHY_ID_MASK
) {
8983 case PHY_ID_BCM5400
: return "5400";
8984 case PHY_ID_BCM5401
: return "5401";
8985 case PHY_ID_BCM5411
: return "5411";
8986 case PHY_ID_BCM5701
: return "5701";
8987 case PHY_ID_BCM5703
: return "5703";
8988 case PHY_ID_BCM5704
: return "5704";
8989 case PHY_ID_BCM5705
: return "5705";
8990 case PHY_ID_BCM5750
: return "5750";
8991 case PHY_ID_BCM5752
: return "5752";
8992 case PHY_ID_BCM8002
: return "8002/serdes";
8993 case 0: return "serdes";
8994 default: return "unknown";
8998 static struct pci_dev
* __devinit
tg3_find_5704_peer(struct tg3
*tp
)
9000 struct pci_dev
*peer
;
9001 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
9003 for (func
= 0; func
< 8; func
++) {
9004 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
9005 if (peer
&& peer
!= tp
->pdev
)
9009 if (!peer
|| peer
== tp
->pdev
)
9013 * We don't need to keep the refcount elevated; there's no way
9014 * to remove one half of this device without removing the other
9021 static int __devinit
tg3_init_one(struct pci_dev
*pdev
,
9022 const struct pci_device_id
*ent
)
9024 static int tg3_version_printed
= 0;
9025 unsigned long tg3reg_base
, tg3reg_len
;
9026 struct net_device
*dev
;
9028 int i
, err
, pci_using_dac
, pm_cap
;
9030 if (tg3_version_printed
++ == 0)
9031 printk(KERN_INFO
"%s", version
);
9033 err
= pci_enable_device(pdev
);
9035 printk(KERN_ERR PFX
"Cannot enable PCI device, "
9040 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
9041 printk(KERN_ERR PFX
"Cannot find proper PCI device "
9042 "base address, aborting.\n");
9044 goto err_out_disable_pdev
;
9047 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
9049 printk(KERN_ERR PFX
"Cannot obtain PCI resources, "
9051 goto err_out_disable_pdev
;
9054 pci_set_master(pdev
);
9056 /* Find power-management capability. */
9057 pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
9059 printk(KERN_ERR PFX
"Cannot find PowerManagement capability, "
9062 goto err_out_free_res
;
9065 /* Configure DMA attributes. */
9066 err
= pci_set_dma_mask(pdev
, 0xffffffffffffffffULL
);
9069 err
= pci_set_consistent_dma_mask(pdev
, 0xffffffffffffffffULL
);
9071 printk(KERN_ERR PFX
"Unable to obtain 64 bit DMA "
9072 "for consistent allocations\n");
9073 goto err_out_free_res
;
9076 err
= pci_set_dma_mask(pdev
, 0xffffffffULL
);
9078 printk(KERN_ERR PFX
"No usable DMA configuration, "
9080 goto err_out_free_res
;
9085 tg3reg_base
= pci_resource_start(pdev
, 0);
9086 tg3reg_len
= pci_resource_len(pdev
, 0);
9088 dev
= alloc_etherdev(sizeof(*tp
));
9090 printk(KERN_ERR PFX
"Etherdev alloc failed, aborting.\n");
9092 goto err_out_free_res
;
9095 SET_MODULE_OWNER(dev
);
9096 SET_NETDEV_DEV(dev
, &pdev
->dev
);
9099 dev
->features
|= NETIF_F_HIGHDMA
;
9100 dev
->features
|= NETIF_F_LLTX
;
9101 #if TG3_VLAN_TAG_USED
9102 dev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
9103 dev
->vlan_rx_register
= tg3_vlan_rx_register
;
9104 dev
->vlan_rx_kill_vid
= tg3_vlan_rx_kill_vid
;
9107 tp
= netdev_priv(dev
);
9110 tp
->pm_cap
= pm_cap
;
9111 tp
->mac_mode
= TG3_DEF_MAC_MODE
;
9112 tp
->rx_mode
= TG3_DEF_RX_MODE
;
9113 tp
->tx_mode
= TG3_DEF_TX_MODE
;
9114 tp
->mi_mode
= MAC_MI_MODE_BASE
;
9116 tp
->msg_enable
= tg3_debug
;
9118 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
9120 /* The word/byte swap controls here control register access byte
9121 * swapping. DMA data byte swapping is controlled in the GRC_MODE
9124 tp
->misc_host_ctrl
=
9125 MISC_HOST_CTRL_MASK_PCI_INT
|
9126 MISC_HOST_CTRL_WORD_SWAP
|
9127 MISC_HOST_CTRL_INDIR_ACCESS
|
9128 MISC_HOST_CTRL_PCISTATE_RW
;
9130 /* The NONFRM (non-frame) byte/word swap controls take effect
9131 * on descriptor entries, anything which isn't packet data.
9133 * The StrongARM chips on the board (one for tx, one for rx)
9134 * are running in big-endian mode.
9136 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
9137 GRC_MODE_WSWAP_NONFRM_DATA
);
9139 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
9141 spin_lock_init(&tp
->lock
);
9142 spin_lock_init(&tp
->tx_lock
);
9143 spin_lock_init(&tp
->indirect_lock
);
9144 INIT_WORK(&tp
->reset_task
, tg3_reset_task
, tp
);
9146 tp
->regs
= ioremap_nocache(tg3reg_base
, tg3reg_len
);
9147 if (tp
->regs
== 0UL) {
9148 printk(KERN_ERR PFX
"Cannot map device registers, "
9151 goto err_out_free_dev
;
9154 tg3_init_link_config(tp
);
9156 tg3_init_bufmgr_config(tp
);
9158 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
9159 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
9160 tp
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
9162 dev
->open
= tg3_open
;
9163 dev
->stop
= tg3_close
;
9164 dev
->get_stats
= tg3_get_stats
;
9165 dev
->set_multicast_list
= tg3_set_rx_mode
;
9166 dev
->set_mac_address
= tg3_set_mac_addr
;
9167 dev
->do_ioctl
= tg3_ioctl
;
9168 dev
->tx_timeout
= tg3_tx_timeout
;
9169 dev
->poll
= tg3_poll
;
9170 dev
->ethtool_ops
= &tg3_ethtool_ops
;
9172 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
9173 dev
->change_mtu
= tg3_change_mtu
;
9174 dev
->irq
= pdev
->irq
;
9175 #ifdef CONFIG_NET_POLL_CONTROLLER
9176 dev
->poll_controller
= tg3_poll_controller
;
9179 err
= tg3_get_invariants(tp
);
9181 printk(KERN_ERR PFX
"Problem fetching invariants of chip, "
9183 goto err_out_iounmap
;
9186 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
9187 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
9188 DEFAULT_MB_RDMA_LOW_WATER_5705
;
9189 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
9190 DEFAULT_MB_MACRX_LOW_WATER_5705
;
9191 tp
->bufmgr_config
.mbuf_high_water
=
9192 DEFAULT_MB_HIGH_WATER_5705
;
9195 #if TG3_TSO_SUPPORT != 0
9196 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
) {
9197 tp
->tg3_flags2
|= TG3_FLG2_TSO_CAPABLE
;
9199 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
9200 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
||
9201 tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
||
9202 (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) != 0) {
9203 tp
->tg3_flags2
&= ~TG3_FLG2_TSO_CAPABLE
;
9205 tp
->tg3_flags2
|= TG3_FLG2_TSO_CAPABLE
;
9208 /* TSO is off by default, user can enable using ethtool. */
9210 if (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
)
9211 dev
->features
|= NETIF_F_TSO
;
9216 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
&&
9217 !(tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
) &&
9218 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
9219 tp
->tg3_flags2
|= TG3_FLG2_MAX_RXPEND_64
;
9220 tp
->rx_pending
= 63;
9223 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
9224 tp
->pdev_peer
= tg3_find_5704_peer(tp
);
9226 err
= tg3_get_device_address(tp
);
9228 printk(KERN_ERR PFX
"Could not obtain valid ethernet address, "
9230 goto err_out_iounmap
;
9234 * Reset chip in case UNDI or EFI driver did not shutdown
9235 * DMA self test will enable WDMAC and we'll see (spurious)
9236 * pending DMA on the PCI bus at that point.
9238 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
9239 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
9240 pci_save_state(tp
->pdev
);
9241 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
9245 err
= tg3_test_dma(tp
);
9247 printk(KERN_ERR PFX
"DMA engine test failed, aborting.\n");
9248 goto err_out_iounmap
;
9251 /* Tigon3 can do ipv4 only... and some chips have buggy
9254 if ((tp
->tg3_flags
& TG3_FLAG_BROKEN_CHECKSUMS
) == 0) {
9255 dev
->features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
;
9256 tp
->tg3_flags
|= TG3_FLAG_RX_CHECKSUMS
;
9258 tp
->tg3_flags
&= ~TG3_FLAG_RX_CHECKSUMS
;
9260 if (tp
->tg3_flags2
& TG3_FLG2_IS_5788
)
9261 dev
->features
&= ~NETIF_F_HIGHDMA
;
9263 /* flow control autonegotiation is default behavior */
9264 tp
->tg3_flags
|= TG3_FLAG_PAUSE_AUTONEG
;
9266 err
= register_netdev(dev
);
9268 printk(KERN_ERR PFX
"Cannot register net device, "
9270 goto err_out_iounmap
;
9273 pci_set_drvdata(pdev
, dev
);
9275 /* Now that we have fully setup the chip, save away a snapshot
9276 * of the PCI config space. We need to restore this after
9277 * GRC_MISC_CFG core clock resets and some resume events.
9279 pci_save_state(tp
->pdev
);
9281 printk(KERN_INFO
"%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
9283 tp
->board_part_number
,
9284 tp
->pci_chip_rev_id
,
9286 ((tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) ? "X" : ""),
9287 ((tp
->tg3_flags
& TG3_FLAG_PCI_HIGH_SPEED
) ?
9288 ((tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) ? "133MHz" : "66MHz") :
9289 ((tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) ? "100MHz" : "33MHz")),
9290 ((tp
->tg3_flags
& TG3_FLAG_PCI_32BIT
) ? "32-bit" : "64-bit"),
9291 (tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
) ? "10/100" : "10/100/1000");
9293 for (i
= 0; i
< 6; i
++)
9294 printk("%2.2x%c", dev
->dev_addr
[i
],
9295 i
== 5 ? '\n' : ':');
9297 printk(KERN_INFO
"%s: RXcsums[%d] LinkChgREG[%d] "
9298 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
9301 (tp
->tg3_flags
& TG3_FLAG_RX_CHECKSUMS
) != 0,
9302 (tp
->tg3_flags
& TG3_FLAG_USE_LINKCHG_REG
) != 0,
9303 (tp
->tg3_flags
& TG3_FLAG_USE_MI_INTERRUPT
) != 0,
9304 (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) != 0,
9305 (tp
->tg3_flags
& TG3_FLAG_SPLIT_MODE
) != 0,
9306 (tp
->tg3_flags2
& TG3_FLG2_NO_ETH_WIRE_SPEED
) == 0,
9307 (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
) != 0);
9318 pci_release_regions(pdev
);
9320 err_out_disable_pdev
:
9321 pci_disable_device(pdev
);
9322 pci_set_drvdata(pdev
, NULL
);
9326 static void __devexit
tg3_remove_one(struct pci_dev
*pdev
)
9328 struct net_device
*dev
= pci_get_drvdata(pdev
);
9331 struct tg3
*tp
= netdev_priv(dev
);
9333 unregister_netdev(dev
);
9336 pci_release_regions(pdev
);
9337 pci_disable_device(pdev
);
9338 pci_set_drvdata(pdev
, NULL
);
9342 static int tg3_suspend(struct pci_dev
*pdev
, pm_message_t state
)
9344 struct net_device
*dev
= pci_get_drvdata(pdev
);
9345 struct tg3
*tp
= netdev_priv(dev
);
9348 if (!netif_running(dev
))
9353 del_timer_sync(&tp
->timer
);
9355 spin_lock_irq(&tp
->lock
);
9356 spin_lock(&tp
->tx_lock
);
9357 tg3_disable_ints(tp
);
9358 spin_unlock(&tp
->tx_lock
);
9359 spin_unlock_irq(&tp
->lock
);
9361 netif_device_detach(dev
);
9363 spin_lock_irq(&tp
->lock
);
9364 spin_lock(&tp
->tx_lock
);
9366 spin_unlock(&tp
->tx_lock
);
9367 spin_unlock_irq(&tp
->lock
);
9369 err
= tg3_set_power_state(tp
, pci_choose_state(pdev
, state
));
9371 spin_lock_irq(&tp
->lock
);
9372 spin_lock(&tp
->tx_lock
);
9376 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
9377 add_timer(&tp
->timer
);
9379 netif_device_attach(dev
);
9380 tg3_netif_start(tp
);
9382 spin_unlock(&tp
->tx_lock
);
9383 spin_unlock_irq(&tp
->lock
);
9389 static int tg3_resume(struct pci_dev
*pdev
)
9391 struct net_device
*dev
= pci_get_drvdata(pdev
);
9392 struct tg3
*tp
= netdev_priv(dev
);
9395 if (!netif_running(dev
))
9398 pci_restore_state(tp
->pdev
);
9400 err
= tg3_set_power_state(tp
, 0);
9404 netif_device_attach(dev
);
9406 spin_lock_irq(&tp
->lock
);
9407 spin_lock(&tp
->tx_lock
);
9411 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
9412 add_timer(&tp
->timer
);
9414 tg3_enable_ints(tp
);
9416 tg3_netif_start(tp
);
9418 spin_unlock(&tp
->tx_lock
);
9419 spin_unlock_irq(&tp
->lock
);
9424 static struct pci_driver tg3_driver
= {
9425 .name
= DRV_MODULE_NAME
,
9426 .id_table
= tg3_pci_tbl
,
9427 .probe
= tg3_init_one
,
9428 .remove
= __devexit_p(tg3_remove_one
),
9429 .suspend
= tg3_suspend
,
9430 .resume
= tg3_resume
9433 static int __init
tg3_init(void)
9435 return pci_module_init(&tg3_driver
);
9438 static void __exit
tg3_cleanup(void)
9440 pci_unregister_driver(&tg3_driver
);
9443 module_init(tg3_init
);
9444 module_exit(tg3_cleanup
);