Merge remote-tracking branch 'cleancache/linux-next'
[linux-2.6/next.git] / drivers / net / tg3.c
blobec1953043102285c356aff16e48ccb124b6b8035
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if_vlan.h>
40 #include <linux/ip.h>
41 #include <linux/tcp.h>
42 #include <linux/workqueue.h>
43 #include <linux/prefetch.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/firmware.h>
47 #include <net/checksum.h>
48 #include <net/ip.h>
50 #include <asm/system.h>
51 #include <linux/io.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
55 #ifdef CONFIG_SPARC
56 #include <asm/idprom.h>
57 #include <asm/prom.h>
58 #endif
60 #define BAR_0 0
61 #define BAR_2 2
63 #include "tg3.h"
65 /* Functions & macros to verify TG3_FLAGS types */
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 return test_bit(flag, bits);
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 set_bit(flag, bits);
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 clear_bit(flag, bits);
82 #define tg3_flag(tp, flag) \
83 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag) \
85 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag) \
87 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89 #define DRV_MODULE_NAME "tg3"
90 #define TG3_MAJ_NUM 3
91 #define TG3_MIN_NUM 118
92 #define DRV_MODULE_VERSION \
93 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE "April 22, 2011"
96 #define TG3_DEF_MAC_MODE 0
97 #define TG3_DEF_RX_MODE 0
98 #define TG3_DEF_TX_MODE 0
99 #define TG3_DEF_MSG_ENABLE \
100 (NETIF_MSG_DRV | \
101 NETIF_MSG_PROBE | \
102 NETIF_MSG_LINK | \
103 NETIF_MSG_TIMER | \
104 NETIF_MSG_IFDOWN | \
105 NETIF_MSG_IFUP | \
106 NETIF_MSG_RX_ERR | \
107 NETIF_MSG_TX_ERR)
109 /* length of time before we decide the hardware is borked,
110 * and dev->tx_timeout() should be called to fix the problem
113 #define TG3_TX_TIMEOUT (5 * HZ)
115 /* hardware minimum and maximum for a single frame's data payload */
116 #define TG3_MIN_MTU 60
117 #define TG3_MAX_MTU(tp) \
118 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
120 /* These numbers seem to be hard coded in the NIC firmware somehow.
121 * You can't change the ring sizes, but you can change where you place
122 * them in the NIC onboard memory.
124 #define TG3_RX_STD_RING_SIZE(tp) \
125 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
126 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
127 #define TG3_DEF_RX_RING_PENDING 200
128 #define TG3_RX_JMB_RING_SIZE(tp) \
129 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
130 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
131 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
132 #define TG3_RSS_INDIR_TBL_SIZE 128
134 /* Do not place this n-ring entries value into the tp struct itself,
135 * we really want to expose these constants to GCC so that modulo et
136 * al. operations are done with shifts and masks instead of with
137 * hw multiply/modulo instructions. Another solution would be to
138 * replace things like '% foo' with '& (foo - 1)'.
141 #define TG3_TX_RING_SIZE 512
142 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
144 #define TG3_RX_STD_RING_BYTES(tp) \
145 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
146 #define TG3_RX_JMB_RING_BYTES(tp) \
147 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
148 #define TG3_RX_RCB_RING_BYTES(tp) \
149 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
150 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
151 TG3_TX_RING_SIZE)
152 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
154 #define TG3_DMA_BYTE_ENAB 64
156 #define TG3_RX_STD_DMA_SZ 1536
157 #define TG3_RX_JMB_DMA_SZ 9046
159 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
161 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
162 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
164 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
165 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
167 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
168 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
170 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
171 * that are at least dword aligned when used in PCIX mode. The driver
172 * works around this bug by double copying the packet. This workaround
173 * is built into the normal double copy length check for efficiency.
175 * However, the double copy is only necessary on those architectures
176 * where unaligned memory accesses are inefficient. For those architectures
177 * where unaligned memory accesses incur little penalty, we can reintegrate
178 * the 5701 in the normal rx path. Doing so saves a device structure
179 * dereference by hardcoding the double copy threshold in place.
181 #define TG3_RX_COPY_THRESHOLD 256
182 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
183 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
184 #else
185 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
186 #endif
188 /* minimum number of free TX descriptors required to wake up TX process */
189 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
191 #define TG3_RAW_IP_ALIGN 2
193 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
195 #define FIRMWARE_TG3 "tigon/tg3.bin"
196 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
197 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
199 static char version[] __devinitdata =
200 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
202 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
203 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
204 MODULE_LICENSE("GPL");
205 MODULE_VERSION(DRV_MODULE_VERSION);
206 MODULE_FIRMWARE(FIRMWARE_TG3);
207 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
208 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
210 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
211 module_param(tg3_debug, int, 0);
212 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
214 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
288 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
289 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
290 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
291 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
292 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
293 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
294 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
298 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
300 static const struct {
301 const char string[ETH_GSTRING_LEN];
302 } ethtool_stats_keys[] = {
303 { "rx_octets" },
304 { "rx_fragments" },
305 { "rx_ucast_packets" },
306 { "rx_mcast_packets" },
307 { "rx_bcast_packets" },
308 { "rx_fcs_errors" },
309 { "rx_align_errors" },
310 { "rx_xon_pause_rcvd" },
311 { "rx_xoff_pause_rcvd" },
312 { "rx_mac_ctrl_rcvd" },
313 { "rx_xoff_entered" },
314 { "rx_frame_too_long_errors" },
315 { "rx_jabbers" },
316 { "rx_undersize_packets" },
317 { "rx_in_length_errors" },
318 { "rx_out_length_errors" },
319 { "rx_64_or_less_octet_packets" },
320 { "rx_65_to_127_octet_packets" },
321 { "rx_128_to_255_octet_packets" },
322 { "rx_256_to_511_octet_packets" },
323 { "rx_512_to_1023_octet_packets" },
324 { "rx_1024_to_1522_octet_packets" },
325 { "rx_1523_to_2047_octet_packets" },
326 { "rx_2048_to_4095_octet_packets" },
327 { "rx_4096_to_8191_octet_packets" },
328 { "rx_8192_to_9022_octet_packets" },
330 { "tx_octets" },
331 { "tx_collisions" },
333 { "tx_xon_sent" },
334 { "tx_xoff_sent" },
335 { "tx_flow_control" },
336 { "tx_mac_errors" },
337 { "tx_single_collisions" },
338 { "tx_mult_collisions" },
339 { "tx_deferred" },
340 { "tx_excessive_collisions" },
341 { "tx_late_collisions" },
342 { "tx_collide_2times" },
343 { "tx_collide_3times" },
344 { "tx_collide_4times" },
345 { "tx_collide_5times" },
346 { "tx_collide_6times" },
347 { "tx_collide_7times" },
348 { "tx_collide_8times" },
349 { "tx_collide_9times" },
350 { "tx_collide_10times" },
351 { "tx_collide_11times" },
352 { "tx_collide_12times" },
353 { "tx_collide_13times" },
354 { "tx_collide_14times" },
355 { "tx_collide_15times" },
356 { "tx_ucast_packets" },
357 { "tx_mcast_packets" },
358 { "tx_bcast_packets" },
359 { "tx_carrier_sense_errors" },
360 { "tx_discards" },
361 { "tx_errors" },
363 { "dma_writeq_full" },
364 { "dma_write_prioq_full" },
365 { "rxbds_empty" },
366 { "rx_discards" },
367 { "mbuf_lwm_thresh_hit" },
368 { "rx_errors" },
369 { "rx_threshold_hit" },
371 { "dma_readq_full" },
372 { "dma_read_prioq_full" },
373 { "tx_comp_queue_full" },
375 { "ring_set_send_prod_index" },
376 { "ring_status_update" },
377 { "nic_irqs" },
378 { "nic_avoided_irqs" },
379 { "nic_tx_threshold_hit" }
382 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
385 static const struct {
386 const char string[ETH_GSTRING_LEN];
387 } ethtool_test_keys[] = {
388 { "nvram test (online) " },
389 { "link test (online) " },
390 { "register test (offline)" },
391 { "memory test (offline)" },
392 { "loopback test (offline)" },
393 { "interrupt test (offline)" },
396 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
399 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
401 writel(val, tp->regs + off);
404 static u32 tg3_read32(struct tg3 *tp, u32 off)
406 return readl(tp->regs + off);
409 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
411 writel(val, tp->aperegs + off);
414 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
416 return readl(tp->aperegs + off);
419 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
421 unsigned long flags;
423 spin_lock_irqsave(&tp->indirect_lock, flags);
424 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
425 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
426 spin_unlock_irqrestore(&tp->indirect_lock, flags);
429 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
431 writel(val, tp->regs + off);
432 readl(tp->regs + off);
435 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
437 unsigned long flags;
438 u32 val;
440 spin_lock_irqsave(&tp->indirect_lock, flags);
441 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
443 spin_unlock_irqrestore(&tp->indirect_lock, flags);
444 return val;
447 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
449 unsigned long flags;
451 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
452 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
453 TG3_64BIT_REG_LOW, val);
454 return;
456 if (off == TG3_RX_STD_PROD_IDX_REG) {
457 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
458 TG3_64BIT_REG_LOW, val);
459 return;
462 spin_lock_irqsave(&tp->indirect_lock, flags);
463 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
464 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
465 spin_unlock_irqrestore(&tp->indirect_lock, flags);
467 /* In indirect mode when disabling interrupts, we also need
468 * to clear the interrupt bit in the GRC local ctrl register.
470 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
471 (val == 0x1)) {
472 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
473 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
477 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
479 unsigned long flags;
480 u32 val;
482 spin_lock_irqsave(&tp->indirect_lock, flags);
483 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
484 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
485 spin_unlock_irqrestore(&tp->indirect_lock, flags);
486 return val;
489 /* usec_wait specifies the wait time in usec when writing to certain registers
490 * where it is unsafe to read back the register without some delay.
491 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
492 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
494 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
496 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
497 /* Non-posted methods */
498 tp->write32(tp, off, val);
499 else {
500 /* Posted method */
501 tg3_write32(tp, off, val);
502 if (usec_wait)
503 udelay(usec_wait);
504 tp->read32(tp, off);
506 /* Wait again after the read for the posted method to guarantee that
507 * the wait time is met.
509 if (usec_wait)
510 udelay(usec_wait);
513 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
515 tp->write32_mbox(tp, off, val);
516 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
517 tp->read32_mbox(tp, off);
520 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
522 void __iomem *mbox = tp->regs + off;
523 writel(val, mbox);
524 if (tg3_flag(tp, TXD_MBOX_HWBUG))
525 writel(val, mbox);
526 if (tg3_flag(tp, MBOX_WRITE_REORDER))
527 readl(mbox);
530 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
532 return readl(tp->regs + off + GRCMBOX_BASE);
535 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
537 writel(val, tp->regs + off + GRCMBOX_BASE);
540 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
541 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
542 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
543 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
544 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
546 #define tw32(reg, val) tp->write32(tp, reg, val)
547 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
548 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
549 #define tr32(reg) tp->read32(tp, reg)
551 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
553 unsigned long flags;
555 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
556 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
557 return;
559 spin_lock_irqsave(&tp->indirect_lock, flags);
560 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
561 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
562 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
564 /* Always leave this as zero. */
565 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
566 } else {
567 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
568 tw32_f(TG3PCI_MEM_WIN_DATA, val);
570 /* Always leave this as zero. */
571 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
573 spin_unlock_irqrestore(&tp->indirect_lock, flags);
576 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
578 unsigned long flags;
580 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
581 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
582 *val = 0;
583 return;
586 spin_lock_irqsave(&tp->indirect_lock, flags);
587 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
588 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
589 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
591 /* Always leave this as zero. */
592 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
593 } else {
594 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
595 *val = tr32(TG3PCI_MEM_WIN_DATA);
597 /* Always leave this as zero. */
598 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
600 spin_unlock_irqrestore(&tp->indirect_lock, flags);
603 static void tg3_ape_lock_init(struct tg3 *tp)
605 int i;
606 u32 regbase;
608 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
609 regbase = TG3_APE_LOCK_GRANT;
610 else
611 regbase = TG3_APE_PER_LOCK_GRANT;
613 /* Make sure the driver hasn't any stale locks. */
614 for (i = 0; i < 8; i++)
615 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
618 static int tg3_ape_lock(struct tg3 *tp, int locknum)
620 int i, off;
621 int ret = 0;
622 u32 status, req, gnt;
624 if (!tg3_flag(tp, ENABLE_APE))
625 return 0;
627 switch (locknum) {
628 case TG3_APE_LOCK_GRC:
629 case TG3_APE_LOCK_MEM:
630 break;
631 default:
632 return -EINVAL;
635 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
636 req = TG3_APE_LOCK_REQ;
637 gnt = TG3_APE_LOCK_GRANT;
638 } else {
639 req = TG3_APE_PER_LOCK_REQ;
640 gnt = TG3_APE_PER_LOCK_GRANT;
643 off = 4 * locknum;
645 tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
647 /* Wait for up to 1 millisecond to acquire lock. */
648 for (i = 0; i < 100; i++) {
649 status = tg3_ape_read32(tp, gnt + off);
650 if (status == APE_LOCK_GRANT_DRIVER)
651 break;
652 udelay(10);
655 if (status != APE_LOCK_GRANT_DRIVER) {
656 /* Revoke the lock request. */
657 tg3_ape_write32(tp, gnt + off,
658 APE_LOCK_GRANT_DRIVER);
660 ret = -EBUSY;
663 return ret;
666 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
668 u32 gnt;
670 if (!tg3_flag(tp, ENABLE_APE))
671 return;
673 switch (locknum) {
674 case TG3_APE_LOCK_GRC:
675 case TG3_APE_LOCK_MEM:
676 break;
677 default:
678 return;
681 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
682 gnt = TG3_APE_LOCK_GRANT;
683 else
684 gnt = TG3_APE_PER_LOCK_GRANT;
686 tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
689 static void tg3_disable_ints(struct tg3 *tp)
691 int i;
693 tw32(TG3PCI_MISC_HOST_CTRL,
694 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
695 for (i = 0; i < tp->irq_max; i++)
696 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
699 static void tg3_enable_ints(struct tg3 *tp)
701 int i;
703 tp->irq_sync = 0;
704 wmb();
706 tw32(TG3PCI_MISC_HOST_CTRL,
707 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
709 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
710 for (i = 0; i < tp->irq_cnt; i++) {
711 struct tg3_napi *tnapi = &tp->napi[i];
713 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
714 if (tg3_flag(tp, 1SHOT_MSI))
715 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
717 tp->coal_now |= tnapi->coal_now;
720 /* Force an initial interrupt */
721 if (!tg3_flag(tp, TAGGED_STATUS) &&
722 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
723 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
724 else
725 tw32(HOSTCC_MODE, tp->coal_now);
727 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
730 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
732 struct tg3 *tp = tnapi->tp;
733 struct tg3_hw_status *sblk = tnapi->hw_status;
734 unsigned int work_exists = 0;
736 /* check for phy events */
737 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
738 if (sblk->status & SD_STATUS_LINK_CHG)
739 work_exists = 1;
741 /* check for RX/TX work to do */
742 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
743 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
744 work_exists = 1;
746 return work_exists;
749 /* tg3_int_reenable
750 * similar to tg3_enable_ints, but it accurately determines whether there
751 * is new work pending and can return without flushing the PIO write
752 * which reenables interrupts
754 static void tg3_int_reenable(struct tg3_napi *tnapi)
756 struct tg3 *tp = tnapi->tp;
758 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
759 mmiowb();
761 /* When doing tagged status, this work check is unnecessary.
762 * The last_tag we write above tells the chip which piece of
763 * work we've completed.
765 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
766 tw32(HOSTCC_MODE, tp->coalesce_mode |
767 HOSTCC_MODE_ENABLE | tnapi->coal_now);
770 static void tg3_switch_clocks(struct tg3 *tp)
772 u32 clock_ctrl;
773 u32 orig_clock_ctrl;
775 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
776 return;
778 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
780 orig_clock_ctrl = clock_ctrl;
781 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
782 CLOCK_CTRL_CLKRUN_OENABLE |
783 0x1f);
784 tp->pci_clock_ctrl = clock_ctrl;
786 if (tg3_flag(tp, 5705_PLUS)) {
787 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
788 tw32_wait_f(TG3PCI_CLOCK_CTRL,
789 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
791 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
792 tw32_wait_f(TG3PCI_CLOCK_CTRL,
793 clock_ctrl |
794 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
795 40);
796 tw32_wait_f(TG3PCI_CLOCK_CTRL,
797 clock_ctrl | (CLOCK_CTRL_ALTCLK),
798 40);
800 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
803 #define PHY_BUSY_LOOPS 5000
805 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
807 u32 frame_val;
808 unsigned int loops;
809 int ret;
811 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
812 tw32_f(MAC_MI_MODE,
813 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
814 udelay(80);
817 *val = 0x0;
819 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
820 MI_COM_PHY_ADDR_MASK);
821 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
822 MI_COM_REG_ADDR_MASK);
823 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
825 tw32_f(MAC_MI_COM, frame_val);
827 loops = PHY_BUSY_LOOPS;
828 while (loops != 0) {
829 udelay(10);
830 frame_val = tr32(MAC_MI_COM);
832 if ((frame_val & MI_COM_BUSY) == 0) {
833 udelay(5);
834 frame_val = tr32(MAC_MI_COM);
835 break;
837 loops -= 1;
840 ret = -EBUSY;
841 if (loops != 0) {
842 *val = frame_val & MI_COM_DATA_MASK;
843 ret = 0;
846 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
847 tw32_f(MAC_MI_MODE, tp->mi_mode);
848 udelay(80);
851 return ret;
854 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
856 u32 frame_val;
857 unsigned int loops;
858 int ret;
860 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
861 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
862 return 0;
864 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
865 tw32_f(MAC_MI_MODE,
866 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
867 udelay(80);
870 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
871 MI_COM_PHY_ADDR_MASK);
872 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
873 MI_COM_REG_ADDR_MASK);
874 frame_val |= (val & MI_COM_DATA_MASK);
875 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
877 tw32_f(MAC_MI_COM, frame_val);
879 loops = PHY_BUSY_LOOPS;
880 while (loops != 0) {
881 udelay(10);
882 frame_val = tr32(MAC_MI_COM);
883 if ((frame_val & MI_COM_BUSY) == 0) {
884 udelay(5);
885 frame_val = tr32(MAC_MI_COM);
886 break;
888 loops -= 1;
891 ret = -EBUSY;
892 if (loops != 0)
893 ret = 0;
895 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
896 tw32_f(MAC_MI_MODE, tp->mi_mode);
897 udelay(80);
900 return ret;
903 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
905 int err;
907 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
908 if (err)
909 goto done;
911 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
912 if (err)
913 goto done;
915 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
916 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
917 if (err)
918 goto done;
920 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
922 done:
923 return err;
926 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
928 int err;
930 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
931 if (err)
932 goto done;
934 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
935 if (err)
936 goto done;
938 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
939 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
940 if (err)
941 goto done;
943 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
945 done:
946 return err;
949 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
951 int err;
953 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
954 if (!err)
955 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
957 return err;
960 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
962 int err;
964 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
965 if (!err)
966 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
968 return err;
971 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
973 int err;
975 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
976 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
977 MII_TG3_AUXCTL_SHDWSEL_MISC);
978 if (!err)
979 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
981 return err;
984 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
986 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
987 set |= MII_TG3_AUXCTL_MISC_WREN;
989 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
992 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
993 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
994 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
995 MII_TG3_AUXCTL_ACTL_TX_6DB)
997 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
998 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
999 MII_TG3_AUXCTL_ACTL_TX_6DB);
1001 static int tg3_bmcr_reset(struct tg3 *tp)
1003 u32 phy_control;
1004 int limit, err;
1006 /* OK, reset it, and poll the BMCR_RESET bit until it
1007 * clears or we time out.
1009 phy_control = BMCR_RESET;
1010 err = tg3_writephy(tp, MII_BMCR, phy_control);
1011 if (err != 0)
1012 return -EBUSY;
1014 limit = 5000;
1015 while (limit--) {
1016 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1017 if (err != 0)
1018 return -EBUSY;
1020 if ((phy_control & BMCR_RESET) == 0) {
1021 udelay(40);
1022 break;
1024 udelay(10);
1026 if (limit < 0)
1027 return -EBUSY;
1029 return 0;
1032 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1034 struct tg3 *tp = bp->priv;
1035 u32 val;
1037 spin_lock_bh(&tp->lock);
1039 if (tg3_readphy(tp, reg, &val))
1040 val = -EIO;
1042 spin_unlock_bh(&tp->lock);
1044 return val;
1047 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1049 struct tg3 *tp = bp->priv;
1050 u32 ret = 0;
1052 spin_lock_bh(&tp->lock);
1054 if (tg3_writephy(tp, reg, val))
1055 ret = -EIO;
1057 spin_unlock_bh(&tp->lock);
1059 return ret;
1062 static int tg3_mdio_reset(struct mii_bus *bp)
1064 return 0;
1067 static void tg3_mdio_config_5785(struct tg3 *tp)
1069 u32 val;
1070 struct phy_device *phydev;
1072 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1073 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1074 case PHY_ID_BCM50610:
1075 case PHY_ID_BCM50610M:
1076 val = MAC_PHYCFG2_50610_LED_MODES;
1077 break;
1078 case PHY_ID_BCMAC131:
1079 val = MAC_PHYCFG2_AC131_LED_MODES;
1080 break;
1081 case PHY_ID_RTL8211C:
1082 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1083 break;
1084 case PHY_ID_RTL8201E:
1085 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1086 break;
1087 default:
1088 return;
1091 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1092 tw32(MAC_PHYCFG2, val);
1094 val = tr32(MAC_PHYCFG1);
1095 val &= ~(MAC_PHYCFG1_RGMII_INT |
1096 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1097 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1098 tw32(MAC_PHYCFG1, val);
1100 return;
1103 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1104 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1105 MAC_PHYCFG2_FMODE_MASK_MASK |
1106 MAC_PHYCFG2_GMODE_MASK_MASK |
1107 MAC_PHYCFG2_ACT_MASK_MASK |
1108 MAC_PHYCFG2_QUAL_MASK_MASK |
1109 MAC_PHYCFG2_INBAND_ENABLE;
1111 tw32(MAC_PHYCFG2, val);
1113 val = tr32(MAC_PHYCFG1);
1114 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1115 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1116 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1117 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1118 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1119 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1120 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1122 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1123 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1124 tw32(MAC_PHYCFG1, val);
1126 val = tr32(MAC_EXT_RGMII_MODE);
1127 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1128 MAC_RGMII_MODE_RX_QUALITY |
1129 MAC_RGMII_MODE_RX_ACTIVITY |
1130 MAC_RGMII_MODE_RX_ENG_DET |
1131 MAC_RGMII_MODE_TX_ENABLE |
1132 MAC_RGMII_MODE_TX_LOWPWR |
1133 MAC_RGMII_MODE_TX_RESET);
1134 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1135 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1136 val |= MAC_RGMII_MODE_RX_INT_B |
1137 MAC_RGMII_MODE_RX_QUALITY |
1138 MAC_RGMII_MODE_RX_ACTIVITY |
1139 MAC_RGMII_MODE_RX_ENG_DET;
1140 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1141 val |= MAC_RGMII_MODE_TX_ENABLE |
1142 MAC_RGMII_MODE_TX_LOWPWR |
1143 MAC_RGMII_MODE_TX_RESET;
1145 tw32(MAC_EXT_RGMII_MODE, val);
1148 static void tg3_mdio_start(struct tg3 *tp)
1150 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1151 tw32_f(MAC_MI_MODE, tp->mi_mode);
1152 udelay(80);
1154 if (tg3_flag(tp, MDIOBUS_INITED) &&
1155 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1156 tg3_mdio_config_5785(tp);
1159 static int tg3_mdio_init(struct tg3 *tp)
1161 int i;
1162 u32 reg;
1163 struct phy_device *phydev;
1165 if (tg3_flag(tp, 5717_PLUS)) {
1166 u32 is_serdes;
1168 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1170 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1171 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1172 else
1173 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1174 TG3_CPMU_PHY_STRAP_IS_SERDES;
1175 if (is_serdes)
1176 tp->phy_addr += 7;
1177 } else
1178 tp->phy_addr = TG3_PHY_MII_ADDR;
1180 tg3_mdio_start(tp);
1182 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1183 return 0;
1185 tp->mdio_bus = mdiobus_alloc();
1186 if (tp->mdio_bus == NULL)
1187 return -ENOMEM;
1189 tp->mdio_bus->name = "tg3 mdio bus";
1190 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1191 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1192 tp->mdio_bus->priv = tp;
1193 tp->mdio_bus->parent = &tp->pdev->dev;
1194 tp->mdio_bus->read = &tg3_mdio_read;
1195 tp->mdio_bus->write = &tg3_mdio_write;
1196 tp->mdio_bus->reset = &tg3_mdio_reset;
1197 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1198 tp->mdio_bus->irq = &tp->mdio_irq[0];
1200 for (i = 0; i < PHY_MAX_ADDR; i++)
1201 tp->mdio_bus->irq[i] = PHY_POLL;
1203 /* The bus registration will look for all the PHYs on the mdio bus.
1204 * Unfortunately, it does not ensure the PHY is powered up before
1205 * accessing the PHY ID registers. A chip reset is the
1206 * quickest way to bring the device back to an operational state..
1208 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1209 tg3_bmcr_reset(tp);
1211 i = mdiobus_register(tp->mdio_bus);
1212 if (i) {
1213 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1214 mdiobus_free(tp->mdio_bus);
1215 return i;
1218 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1220 if (!phydev || !phydev->drv) {
1221 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1222 mdiobus_unregister(tp->mdio_bus);
1223 mdiobus_free(tp->mdio_bus);
1224 return -ENODEV;
1227 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1228 case PHY_ID_BCM57780:
1229 phydev->interface = PHY_INTERFACE_MODE_GMII;
1230 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1231 break;
1232 case PHY_ID_BCM50610:
1233 case PHY_ID_BCM50610M:
1234 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1235 PHY_BRCM_RX_REFCLK_UNUSED |
1236 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1237 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1238 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1239 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1240 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1241 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1242 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1243 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1244 /* fallthru */
1245 case PHY_ID_RTL8211C:
1246 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1247 break;
1248 case PHY_ID_RTL8201E:
1249 case PHY_ID_BCMAC131:
1250 phydev->interface = PHY_INTERFACE_MODE_MII;
1251 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1252 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1253 break;
1256 tg3_flag_set(tp, MDIOBUS_INITED);
1258 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1259 tg3_mdio_config_5785(tp);
1261 return 0;
1264 static void tg3_mdio_fini(struct tg3 *tp)
1266 if (tg3_flag(tp, MDIOBUS_INITED)) {
1267 tg3_flag_clear(tp, MDIOBUS_INITED);
1268 mdiobus_unregister(tp->mdio_bus);
1269 mdiobus_free(tp->mdio_bus);
1273 /* tp->lock is held. */
1274 static inline void tg3_generate_fw_event(struct tg3 *tp)
1276 u32 val;
1278 val = tr32(GRC_RX_CPU_EVENT);
1279 val |= GRC_RX_CPU_DRIVER_EVENT;
1280 tw32_f(GRC_RX_CPU_EVENT, val);
1282 tp->last_event_jiffies = jiffies;
1285 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1287 /* tp->lock is held. */
1288 static void tg3_wait_for_event_ack(struct tg3 *tp)
1290 int i;
1291 unsigned int delay_cnt;
1292 long time_remain;
1294 /* If enough time has passed, no wait is necessary. */
1295 time_remain = (long)(tp->last_event_jiffies + 1 +
1296 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1297 (long)jiffies;
1298 if (time_remain < 0)
1299 return;
1301 /* Check if we can shorten the wait time. */
1302 delay_cnt = jiffies_to_usecs(time_remain);
1303 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1304 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1305 delay_cnt = (delay_cnt >> 3) + 1;
1307 for (i = 0; i < delay_cnt; i++) {
1308 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1309 break;
1310 udelay(8);
1314 /* tp->lock is held. */
1315 static void tg3_ump_link_report(struct tg3 *tp)
1317 u32 reg;
1318 u32 val;
1320 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1321 return;
1323 tg3_wait_for_event_ack(tp);
1325 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1327 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1329 val = 0;
1330 if (!tg3_readphy(tp, MII_BMCR, &reg))
1331 val = reg << 16;
1332 if (!tg3_readphy(tp, MII_BMSR, &reg))
1333 val |= (reg & 0xffff);
1334 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1336 val = 0;
1337 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1338 val = reg << 16;
1339 if (!tg3_readphy(tp, MII_LPA, &reg))
1340 val |= (reg & 0xffff);
1341 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1343 val = 0;
1344 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1345 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1346 val = reg << 16;
1347 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1348 val |= (reg & 0xffff);
1350 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1352 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1353 val = reg << 16;
1354 else
1355 val = 0;
1356 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1358 tg3_generate_fw_event(tp);
1361 static void tg3_link_report(struct tg3 *tp)
1363 if (!netif_carrier_ok(tp->dev)) {
1364 netif_info(tp, link, tp->dev, "Link is down\n");
1365 tg3_ump_link_report(tp);
1366 } else if (netif_msg_link(tp)) {
1367 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1368 (tp->link_config.active_speed == SPEED_1000 ?
1369 1000 :
1370 (tp->link_config.active_speed == SPEED_100 ?
1371 100 : 10)),
1372 (tp->link_config.active_duplex == DUPLEX_FULL ?
1373 "full" : "half"));
1375 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1376 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1377 "on" : "off",
1378 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1379 "on" : "off");
1381 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1382 netdev_info(tp->dev, "EEE is %s\n",
1383 tp->setlpicnt ? "enabled" : "disabled");
1385 tg3_ump_link_report(tp);
1389 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1391 u16 miireg;
1393 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1394 miireg = ADVERTISE_PAUSE_CAP;
1395 else if (flow_ctrl & FLOW_CTRL_TX)
1396 miireg = ADVERTISE_PAUSE_ASYM;
1397 else if (flow_ctrl & FLOW_CTRL_RX)
1398 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1399 else
1400 miireg = 0;
1402 return miireg;
1405 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1407 u16 miireg;
1409 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1410 miireg = ADVERTISE_1000XPAUSE;
1411 else if (flow_ctrl & FLOW_CTRL_TX)
1412 miireg = ADVERTISE_1000XPSE_ASYM;
1413 else if (flow_ctrl & FLOW_CTRL_RX)
1414 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1415 else
1416 miireg = 0;
1418 return miireg;
1421 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1423 u8 cap = 0;
1425 if (lcladv & ADVERTISE_1000XPAUSE) {
1426 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1427 if (rmtadv & LPA_1000XPAUSE)
1428 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1429 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1430 cap = FLOW_CTRL_RX;
1431 } else {
1432 if (rmtadv & LPA_1000XPAUSE)
1433 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1435 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1436 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1437 cap = FLOW_CTRL_TX;
1440 return cap;
1443 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1445 u8 autoneg;
1446 u8 flowctrl = 0;
1447 u32 old_rx_mode = tp->rx_mode;
1448 u32 old_tx_mode = tp->tx_mode;
1450 if (tg3_flag(tp, USE_PHYLIB))
1451 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1452 else
1453 autoneg = tp->link_config.autoneg;
1455 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1456 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1457 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1458 else
1459 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1460 } else
1461 flowctrl = tp->link_config.flowctrl;
1463 tp->link_config.active_flowctrl = flowctrl;
1465 if (flowctrl & FLOW_CTRL_RX)
1466 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1467 else
1468 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1470 if (old_rx_mode != tp->rx_mode)
1471 tw32_f(MAC_RX_MODE, tp->rx_mode);
1473 if (flowctrl & FLOW_CTRL_TX)
1474 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1475 else
1476 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1478 if (old_tx_mode != tp->tx_mode)
1479 tw32_f(MAC_TX_MODE, tp->tx_mode);
1482 static void tg3_adjust_link(struct net_device *dev)
1484 u8 oldflowctrl, linkmesg = 0;
1485 u32 mac_mode, lcl_adv, rmt_adv;
1486 struct tg3 *tp = netdev_priv(dev);
1487 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1489 spin_lock_bh(&tp->lock);
1491 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1492 MAC_MODE_HALF_DUPLEX);
1494 oldflowctrl = tp->link_config.active_flowctrl;
1496 if (phydev->link) {
1497 lcl_adv = 0;
1498 rmt_adv = 0;
1500 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1501 mac_mode |= MAC_MODE_PORT_MODE_MII;
1502 else if (phydev->speed == SPEED_1000 ||
1503 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1504 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1505 else
1506 mac_mode |= MAC_MODE_PORT_MODE_MII;
1508 if (phydev->duplex == DUPLEX_HALF)
1509 mac_mode |= MAC_MODE_HALF_DUPLEX;
1510 else {
1511 lcl_adv = tg3_advert_flowctrl_1000T(
1512 tp->link_config.flowctrl);
1514 if (phydev->pause)
1515 rmt_adv = LPA_PAUSE_CAP;
1516 if (phydev->asym_pause)
1517 rmt_adv |= LPA_PAUSE_ASYM;
1520 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1521 } else
1522 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1524 if (mac_mode != tp->mac_mode) {
1525 tp->mac_mode = mac_mode;
1526 tw32_f(MAC_MODE, tp->mac_mode);
1527 udelay(40);
1530 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1531 if (phydev->speed == SPEED_10)
1532 tw32(MAC_MI_STAT,
1533 MAC_MI_STAT_10MBPS_MODE |
1534 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1535 else
1536 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1539 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1540 tw32(MAC_TX_LENGTHS,
1541 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1542 (6 << TX_LENGTHS_IPG_SHIFT) |
1543 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1544 else
1545 tw32(MAC_TX_LENGTHS,
1546 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1547 (6 << TX_LENGTHS_IPG_SHIFT) |
1548 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1550 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1551 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1552 phydev->speed != tp->link_config.active_speed ||
1553 phydev->duplex != tp->link_config.active_duplex ||
1554 oldflowctrl != tp->link_config.active_flowctrl)
1555 linkmesg = 1;
1557 tp->link_config.active_speed = phydev->speed;
1558 tp->link_config.active_duplex = phydev->duplex;
1560 spin_unlock_bh(&tp->lock);
1562 if (linkmesg)
1563 tg3_link_report(tp);
1566 static int tg3_phy_init(struct tg3 *tp)
1568 struct phy_device *phydev;
1570 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1571 return 0;
1573 /* Bring the PHY back to a known state. */
1574 tg3_bmcr_reset(tp);
1576 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1578 /* Attach the MAC to the PHY. */
1579 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1580 phydev->dev_flags, phydev->interface);
1581 if (IS_ERR(phydev)) {
1582 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1583 return PTR_ERR(phydev);
1586 /* Mask with MAC supported features. */
1587 switch (phydev->interface) {
1588 case PHY_INTERFACE_MODE_GMII:
1589 case PHY_INTERFACE_MODE_RGMII:
1590 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1591 phydev->supported &= (PHY_GBIT_FEATURES |
1592 SUPPORTED_Pause |
1593 SUPPORTED_Asym_Pause);
1594 break;
1596 /* fallthru */
1597 case PHY_INTERFACE_MODE_MII:
1598 phydev->supported &= (PHY_BASIC_FEATURES |
1599 SUPPORTED_Pause |
1600 SUPPORTED_Asym_Pause);
1601 break;
1602 default:
1603 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1604 return -EINVAL;
1607 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1609 phydev->advertising = phydev->supported;
1611 return 0;
1614 static void tg3_phy_start(struct tg3 *tp)
1616 struct phy_device *phydev;
1618 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1619 return;
1621 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1623 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1624 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1625 phydev->speed = tp->link_config.orig_speed;
1626 phydev->duplex = tp->link_config.orig_duplex;
1627 phydev->autoneg = tp->link_config.orig_autoneg;
1628 phydev->advertising = tp->link_config.orig_advertising;
1631 phy_start(phydev);
1633 phy_start_aneg(phydev);
1636 static void tg3_phy_stop(struct tg3 *tp)
1638 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1639 return;
1641 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1644 static void tg3_phy_fini(struct tg3 *tp)
1646 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1647 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1648 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1652 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1654 u32 phytest;
1656 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1657 u32 phy;
1659 tg3_writephy(tp, MII_TG3_FET_TEST,
1660 phytest | MII_TG3_FET_SHADOW_EN);
1661 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1662 if (enable)
1663 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1664 else
1665 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1666 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1668 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1672 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1674 u32 reg;
1676 if (!tg3_flag(tp, 5705_PLUS) ||
1677 (tg3_flag(tp, 5717_PLUS) &&
1678 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1679 return;
1681 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1682 tg3_phy_fet_toggle_apd(tp, enable);
1683 return;
1686 reg = MII_TG3_MISC_SHDW_WREN |
1687 MII_TG3_MISC_SHDW_SCR5_SEL |
1688 MII_TG3_MISC_SHDW_SCR5_LPED |
1689 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1690 MII_TG3_MISC_SHDW_SCR5_SDTL |
1691 MII_TG3_MISC_SHDW_SCR5_C125OE;
1692 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1693 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1695 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1698 reg = MII_TG3_MISC_SHDW_WREN |
1699 MII_TG3_MISC_SHDW_APD_SEL |
1700 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1701 if (enable)
1702 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1704 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1707 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1709 u32 phy;
1711 if (!tg3_flag(tp, 5705_PLUS) ||
1712 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1713 return;
1715 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1716 u32 ephy;
1718 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1719 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1721 tg3_writephy(tp, MII_TG3_FET_TEST,
1722 ephy | MII_TG3_FET_SHADOW_EN);
1723 if (!tg3_readphy(tp, reg, &phy)) {
1724 if (enable)
1725 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1726 else
1727 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1728 tg3_writephy(tp, reg, phy);
1730 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1732 } else {
1733 int ret;
1735 ret = tg3_phy_auxctl_read(tp,
1736 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1737 if (!ret) {
1738 if (enable)
1739 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1740 else
1741 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1742 tg3_phy_auxctl_write(tp,
1743 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1748 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1750 int ret;
1751 u32 val;
1753 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1754 return;
1756 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1757 if (!ret)
1758 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1759 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1762 static void tg3_phy_apply_otp(struct tg3 *tp)
1764 u32 otp, phy;
1766 if (!tp->phy_otp)
1767 return;
1769 otp = tp->phy_otp;
1771 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1772 return;
1774 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1775 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1776 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1778 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1779 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1780 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1782 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1783 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1784 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1786 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1787 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1789 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1790 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1792 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1793 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1794 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1796 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1799 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1801 u32 val;
1803 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1804 return;
1806 tp->setlpicnt = 0;
1808 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1809 current_link_up == 1 &&
1810 tp->link_config.active_duplex == DUPLEX_FULL &&
1811 (tp->link_config.active_speed == SPEED_100 ||
1812 tp->link_config.active_speed == SPEED_1000)) {
1813 u32 eeectl;
1815 if (tp->link_config.active_speed == SPEED_1000)
1816 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1817 else
1818 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1820 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1822 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1823 TG3_CL45_D7_EEERES_STAT, &val);
1825 switch (val) {
1826 case TG3_CL45_D7_EEERES_STAT_LP_1000T:
1827 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
1828 case ASIC_REV_5717:
1829 case ASIC_REV_5719:
1830 case ASIC_REV_57765:
1831 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1832 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26,
1833 0x0000);
1834 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1837 /* Fallthrough */
1838 case TG3_CL45_D7_EEERES_STAT_LP_100TX:
1839 tp->setlpicnt = 2;
1843 if (!tp->setlpicnt) {
1844 val = tr32(TG3_CPMU_EEE_MODE);
1845 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1849 static int tg3_wait_macro_done(struct tg3 *tp)
1851 int limit = 100;
1853 while (limit--) {
1854 u32 tmp32;
1856 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1857 if ((tmp32 & 0x1000) == 0)
1858 break;
1861 if (limit < 0)
1862 return -EBUSY;
1864 return 0;
1867 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1869 static const u32 test_pat[4][6] = {
1870 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1871 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1872 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1873 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1875 int chan;
1877 for (chan = 0; chan < 4; chan++) {
1878 int i;
1880 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1881 (chan * 0x2000) | 0x0200);
1882 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1884 for (i = 0; i < 6; i++)
1885 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1886 test_pat[chan][i]);
1888 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1889 if (tg3_wait_macro_done(tp)) {
1890 *resetp = 1;
1891 return -EBUSY;
1894 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1895 (chan * 0x2000) | 0x0200);
1896 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1897 if (tg3_wait_macro_done(tp)) {
1898 *resetp = 1;
1899 return -EBUSY;
1902 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1903 if (tg3_wait_macro_done(tp)) {
1904 *resetp = 1;
1905 return -EBUSY;
1908 for (i = 0; i < 6; i += 2) {
1909 u32 low, high;
1911 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1912 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1913 tg3_wait_macro_done(tp)) {
1914 *resetp = 1;
1915 return -EBUSY;
1917 low &= 0x7fff;
1918 high &= 0x000f;
1919 if (low != test_pat[chan][i] ||
1920 high != test_pat[chan][i+1]) {
1921 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1922 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1923 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1925 return -EBUSY;
1930 return 0;
1933 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1935 int chan;
1937 for (chan = 0; chan < 4; chan++) {
1938 int i;
1940 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1941 (chan * 0x2000) | 0x0200);
1942 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1943 for (i = 0; i < 6; i++)
1944 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1945 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1946 if (tg3_wait_macro_done(tp))
1947 return -EBUSY;
1950 return 0;
1953 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1955 u32 reg32, phy9_orig;
1956 int retries, do_phy_reset, err;
1958 retries = 10;
1959 do_phy_reset = 1;
1960 do {
1961 if (do_phy_reset) {
1962 err = tg3_bmcr_reset(tp);
1963 if (err)
1964 return err;
1965 do_phy_reset = 0;
1968 /* Disable transmitter and interrupt. */
1969 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1970 continue;
1972 reg32 |= 0x3000;
1973 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1975 /* Set full-duplex, 1000 mbps. */
1976 tg3_writephy(tp, MII_BMCR,
1977 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1979 /* Set to master mode. */
1980 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1981 continue;
1983 tg3_writephy(tp, MII_TG3_CTRL,
1984 (MII_TG3_CTRL_AS_MASTER |
1985 MII_TG3_CTRL_ENABLE_AS_MASTER));
1987 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1988 if (err)
1989 return err;
1991 /* Block the PHY control access. */
1992 tg3_phydsp_write(tp, 0x8005, 0x0800);
1994 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1995 if (!err)
1996 break;
1997 } while (--retries);
1999 err = tg3_phy_reset_chanpat(tp);
2000 if (err)
2001 return err;
2003 tg3_phydsp_write(tp, 0x8005, 0x0000);
2005 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2006 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2008 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2010 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
2012 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2013 reg32 &= ~0x3000;
2014 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2015 } else if (!err)
2016 err = -EBUSY;
2018 return err;
2021 /* This will reset the tigon3 PHY if there is no valid
2022 * link unless the FORCE argument is non-zero.
2024 static int tg3_phy_reset(struct tg3 *tp)
2026 u32 val, cpmuctrl;
2027 int err;
2029 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2030 val = tr32(GRC_MISC_CFG);
2031 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2032 udelay(40);
2034 err = tg3_readphy(tp, MII_BMSR, &val);
2035 err |= tg3_readphy(tp, MII_BMSR, &val);
2036 if (err != 0)
2037 return -EBUSY;
2039 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2040 netif_carrier_off(tp->dev);
2041 tg3_link_report(tp);
2044 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2045 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2046 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2047 err = tg3_phy_reset_5703_4_5(tp);
2048 if (err)
2049 return err;
2050 goto out;
2053 cpmuctrl = 0;
2054 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2055 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2056 cpmuctrl = tr32(TG3_CPMU_CTRL);
2057 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2058 tw32(TG3_CPMU_CTRL,
2059 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2062 err = tg3_bmcr_reset(tp);
2063 if (err)
2064 return err;
2066 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2067 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2068 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2070 tw32(TG3_CPMU_CTRL, cpmuctrl);
2073 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2074 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2075 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2076 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2077 CPMU_LSPD_1000MB_MACCLK_12_5) {
2078 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2079 udelay(40);
2080 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2084 if (tg3_flag(tp, 5717_PLUS) &&
2085 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2086 return 0;
2088 tg3_phy_apply_otp(tp);
2090 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2091 tg3_phy_toggle_apd(tp, true);
2092 else
2093 tg3_phy_toggle_apd(tp, false);
2095 out:
2096 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2097 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2098 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2099 tg3_phydsp_write(tp, 0x000a, 0x0323);
2100 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2103 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2104 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2105 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2108 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2109 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2110 tg3_phydsp_write(tp, 0x000a, 0x310b);
2111 tg3_phydsp_write(tp, 0x201f, 0x9506);
2112 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2113 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2115 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2116 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2117 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2118 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2119 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2120 tg3_writephy(tp, MII_TG3_TEST1,
2121 MII_TG3_TEST1_TRIM_EN | 0x4);
2122 } else
2123 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2125 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2129 /* Set Extended packet length bit (bit 14) on all chips that */
2130 /* support jumbo frames */
2131 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2132 /* Cannot do read-modify-write on 5401 */
2133 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2134 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2135 /* Set bit 14 with read-modify-write to preserve other bits */
2136 err = tg3_phy_auxctl_read(tp,
2137 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2138 if (!err)
2139 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2140 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2143 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2144 * jumbo frames transmission.
2146 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2147 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2148 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2149 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2152 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2153 /* adjust output voltage */
2154 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2157 tg3_phy_toggle_automdix(tp, 1);
2158 tg3_phy_set_wirespeed(tp);
2159 return 0;
2162 static void tg3_frob_aux_power(struct tg3 *tp)
2164 bool need_vaux = false;
2166 /* The GPIOs do something completely different on 57765. */
2167 if (!tg3_flag(tp, IS_NIC) ||
2168 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2169 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2170 return;
2172 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2173 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2174 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2175 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2176 tp->pdev_peer != tp->pdev) {
2177 struct net_device *dev_peer;
2179 dev_peer = pci_get_drvdata(tp->pdev_peer);
2181 /* remove_one() may have been run on the peer. */
2182 if (dev_peer) {
2183 struct tg3 *tp_peer = netdev_priv(dev_peer);
2185 if (tg3_flag(tp_peer, INIT_COMPLETE))
2186 return;
2188 if (tg3_flag(tp_peer, WOL_ENABLE) ||
2189 tg3_flag(tp_peer, ENABLE_ASF))
2190 need_vaux = true;
2194 if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
2195 need_vaux = true;
2197 if (need_vaux) {
2198 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2199 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2200 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2201 (GRC_LCLCTRL_GPIO_OE0 |
2202 GRC_LCLCTRL_GPIO_OE1 |
2203 GRC_LCLCTRL_GPIO_OE2 |
2204 GRC_LCLCTRL_GPIO_OUTPUT0 |
2205 GRC_LCLCTRL_GPIO_OUTPUT1),
2206 100);
2207 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2208 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2209 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2210 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2211 GRC_LCLCTRL_GPIO_OE1 |
2212 GRC_LCLCTRL_GPIO_OE2 |
2213 GRC_LCLCTRL_GPIO_OUTPUT0 |
2214 GRC_LCLCTRL_GPIO_OUTPUT1 |
2215 tp->grc_local_ctrl;
2216 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2218 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2219 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2221 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2222 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2223 } else {
2224 u32 no_gpio2;
2225 u32 grc_local_ctrl = 0;
2227 /* Workaround to prevent overdrawing Amps. */
2228 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2229 ASIC_REV_5714) {
2230 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2231 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2232 grc_local_ctrl, 100);
2235 /* On 5753 and variants, GPIO2 cannot be used. */
2236 no_gpio2 = tp->nic_sram_data_cfg &
2237 NIC_SRAM_DATA_CFG_NO_GPIO2;
2239 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2240 GRC_LCLCTRL_GPIO_OE1 |
2241 GRC_LCLCTRL_GPIO_OE2 |
2242 GRC_LCLCTRL_GPIO_OUTPUT1 |
2243 GRC_LCLCTRL_GPIO_OUTPUT2;
2244 if (no_gpio2) {
2245 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2246 GRC_LCLCTRL_GPIO_OUTPUT2);
2248 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2249 grc_local_ctrl, 100);
2251 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2253 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2254 grc_local_ctrl, 100);
2256 if (!no_gpio2) {
2257 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2258 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2259 grc_local_ctrl, 100);
2262 } else {
2263 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2264 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2265 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2266 (GRC_LCLCTRL_GPIO_OE1 |
2267 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2269 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2270 GRC_LCLCTRL_GPIO_OE1, 100);
2272 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2273 (GRC_LCLCTRL_GPIO_OE1 |
2274 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2279 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2281 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2282 return 1;
2283 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2284 if (speed != SPEED_10)
2285 return 1;
2286 } else if (speed == SPEED_10)
2287 return 1;
2289 return 0;
2292 static int tg3_setup_phy(struct tg3 *, int);
2294 #define RESET_KIND_SHUTDOWN 0
2295 #define RESET_KIND_INIT 1
2296 #define RESET_KIND_SUSPEND 2
2298 static void tg3_write_sig_post_reset(struct tg3 *, int);
2299 static int tg3_halt_cpu(struct tg3 *, u32);
2301 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2303 u32 val;
2305 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2306 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2307 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2308 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2310 sg_dig_ctrl |=
2311 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2312 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2313 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2315 return;
2318 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2319 tg3_bmcr_reset(tp);
2320 val = tr32(GRC_MISC_CFG);
2321 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2322 udelay(40);
2323 return;
2324 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2325 u32 phytest;
2326 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2327 u32 phy;
2329 tg3_writephy(tp, MII_ADVERTISE, 0);
2330 tg3_writephy(tp, MII_BMCR,
2331 BMCR_ANENABLE | BMCR_ANRESTART);
2333 tg3_writephy(tp, MII_TG3_FET_TEST,
2334 phytest | MII_TG3_FET_SHADOW_EN);
2335 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2336 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2337 tg3_writephy(tp,
2338 MII_TG3_FET_SHDW_AUXMODE4,
2339 phy);
2341 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2343 return;
2344 } else if (do_low_power) {
2345 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2346 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2348 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2349 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2350 MII_TG3_AUXCTL_PCTL_VREG_11V;
2351 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2354 /* The PHY should not be powered down on some chips because
2355 * of bugs.
2357 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2358 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2359 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2360 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2361 return;
2363 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2364 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2365 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2366 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2367 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2368 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2371 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2374 /* tp->lock is held. */
2375 static int tg3_nvram_lock(struct tg3 *tp)
2377 if (tg3_flag(tp, NVRAM)) {
2378 int i;
2380 if (tp->nvram_lock_cnt == 0) {
2381 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2382 for (i = 0; i < 8000; i++) {
2383 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2384 break;
2385 udelay(20);
2387 if (i == 8000) {
2388 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2389 return -ENODEV;
2392 tp->nvram_lock_cnt++;
2394 return 0;
2397 /* tp->lock is held. */
2398 static void tg3_nvram_unlock(struct tg3 *tp)
2400 if (tg3_flag(tp, NVRAM)) {
2401 if (tp->nvram_lock_cnt > 0)
2402 tp->nvram_lock_cnt--;
2403 if (tp->nvram_lock_cnt == 0)
2404 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2408 /* tp->lock is held. */
2409 static void tg3_enable_nvram_access(struct tg3 *tp)
2411 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2412 u32 nvaccess = tr32(NVRAM_ACCESS);
2414 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2418 /* tp->lock is held. */
2419 static void tg3_disable_nvram_access(struct tg3 *tp)
2421 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2422 u32 nvaccess = tr32(NVRAM_ACCESS);
2424 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2428 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2429 u32 offset, u32 *val)
2431 u32 tmp;
2432 int i;
2434 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2435 return -EINVAL;
2437 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2438 EEPROM_ADDR_DEVID_MASK |
2439 EEPROM_ADDR_READ);
2440 tw32(GRC_EEPROM_ADDR,
2441 tmp |
2442 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2443 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2444 EEPROM_ADDR_ADDR_MASK) |
2445 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2447 for (i = 0; i < 1000; i++) {
2448 tmp = tr32(GRC_EEPROM_ADDR);
2450 if (tmp & EEPROM_ADDR_COMPLETE)
2451 break;
2452 msleep(1);
2454 if (!(tmp & EEPROM_ADDR_COMPLETE))
2455 return -EBUSY;
2457 tmp = tr32(GRC_EEPROM_DATA);
2460 * The data will always be opposite the native endian
2461 * format. Perform a blind byteswap to compensate.
2463 *val = swab32(tmp);
2465 return 0;
2468 #define NVRAM_CMD_TIMEOUT 10000
2470 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2472 int i;
2474 tw32(NVRAM_CMD, nvram_cmd);
2475 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2476 udelay(10);
2477 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2478 udelay(10);
2479 break;
2483 if (i == NVRAM_CMD_TIMEOUT)
2484 return -EBUSY;
2486 return 0;
2489 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2491 if (tg3_flag(tp, NVRAM) &&
2492 tg3_flag(tp, NVRAM_BUFFERED) &&
2493 tg3_flag(tp, FLASH) &&
2494 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2495 (tp->nvram_jedecnum == JEDEC_ATMEL))
2497 addr = ((addr / tp->nvram_pagesize) <<
2498 ATMEL_AT45DB0X1B_PAGE_POS) +
2499 (addr % tp->nvram_pagesize);
2501 return addr;
2504 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2506 if (tg3_flag(tp, NVRAM) &&
2507 tg3_flag(tp, NVRAM_BUFFERED) &&
2508 tg3_flag(tp, FLASH) &&
2509 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2510 (tp->nvram_jedecnum == JEDEC_ATMEL))
2512 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2513 tp->nvram_pagesize) +
2514 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2516 return addr;
2519 /* NOTE: Data read in from NVRAM is byteswapped according to
2520 * the byteswapping settings for all other register accesses.
2521 * tg3 devices are BE devices, so on a BE machine, the data
2522 * returned will be exactly as it is seen in NVRAM. On a LE
2523 * machine, the 32-bit value will be byteswapped.
2525 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2527 int ret;
2529 if (!tg3_flag(tp, NVRAM))
2530 return tg3_nvram_read_using_eeprom(tp, offset, val);
2532 offset = tg3_nvram_phys_addr(tp, offset);
2534 if (offset > NVRAM_ADDR_MSK)
2535 return -EINVAL;
2537 ret = tg3_nvram_lock(tp);
2538 if (ret)
2539 return ret;
2541 tg3_enable_nvram_access(tp);
2543 tw32(NVRAM_ADDR, offset);
2544 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2545 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2547 if (ret == 0)
2548 *val = tr32(NVRAM_RDDATA);
2550 tg3_disable_nvram_access(tp);
2552 tg3_nvram_unlock(tp);
2554 return ret;
2557 /* Ensures NVRAM data is in bytestream format. */
2558 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2560 u32 v;
2561 int res = tg3_nvram_read(tp, offset, &v);
2562 if (!res)
2563 *val = cpu_to_be32(v);
2564 return res;
2567 /* tp->lock is held. */
2568 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2570 u32 addr_high, addr_low;
2571 int i;
2573 addr_high = ((tp->dev->dev_addr[0] << 8) |
2574 tp->dev->dev_addr[1]);
2575 addr_low = ((tp->dev->dev_addr[2] << 24) |
2576 (tp->dev->dev_addr[3] << 16) |
2577 (tp->dev->dev_addr[4] << 8) |
2578 (tp->dev->dev_addr[5] << 0));
2579 for (i = 0; i < 4; i++) {
2580 if (i == 1 && skip_mac_1)
2581 continue;
2582 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2583 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2586 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2587 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2588 for (i = 0; i < 12; i++) {
2589 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2590 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2594 addr_high = (tp->dev->dev_addr[0] +
2595 tp->dev->dev_addr[1] +
2596 tp->dev->dev_addr[2] +
2597 tp->dev->dev_addr[3] +
2598 tp->dev->dev_addr[4] +
2599 tp->dev->dev_addr[5]) &
2600 TX_BACKOFF_SEED_MASK;
2601 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2604 static void tg3_enable_register_access(struct tg3 *tp)
2607 * Make sure register accesses (indirect or otherwise) will function
2608 * correctly.
2610 pci_write_config_dword(tp->pdev,
2611 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2614 static int tg3_power_up(struct tg3 *tp)
2616 tg3_enable_register_access(tp);
2618 pci_set_power_state(tp->pdev, PCI_D0);
2620 /* Switch out of Vaux if it is a NIC */
2621 if (tg3_flag(tp, IS_NIC))
2622 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2624 return 0;
2627 static int tg3_power_down_prepare(struct tg3 *tp)
2629 u32 misc_host_ctrl;
2630 bool device_should_wake, do_low_power;
2632 tg3_enable_register_access(tp);
2634 /* Restore the CLKREQ setting. */
2635 if (tg3_flag(tp, CLKREQ_BUG)) {
2636 u16 lnkctl;
2638 pci_read_config_word(tp->pdev,
2639 tp->pcie_cap + PCI_EXP_LNKCTL,
2640 &lnkctl);
2641 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2642 pci_write_config_word(tp->pdev,
2643 tp->pcie_cap + PCI_EXP_LNKCTL,
2644 lnkctl);
2647 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2648 tw32(TG3PCI_MISC_HOST_CTRL,
2649 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2651 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2652 tg3_flag(tp, WOL_ENABLE);
2654 if (tg3_flag(tp, USE_PHYLIB)) {
2655 do_low_power = false;
2656 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2657 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2658 struct phy_device *phydev;
2659 u32 phyid, advertising;
2661 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2663 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2665 tp->link_config.orig_speed = phydev->speed;
2666 tp->link_config.orig_duplex = phydev->duplex;
2667 tp->link_config.orig_autoneg = phydev->autoneg;
2668 tp->link_config.orig_advertising = phydev->advertising;
2670 advertising = ADVERTISED_TP |
2671 ADVERTISED_Pause |
2672 ADVERTISED_Autoneg |
2673 ADVERTISED_10baseT_Half;
2675 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2676 if (tg3_flag(tp, WOL_SPEED_100MB))
2677 advertising |=
2678 ADVERTISED_100baseT_Half |
2679 ADVERTISED_100baseT_Full |
2680 ADVERTISED_10baseT_Full;
2681 else
2682 advertising |= ADVERTISED_10baseT_Full;
2685 phydev->advertising = advertising;
2687 phy_start_aneg(phydev);
2689 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2690 if (phyid != PHY_ID_BCMAC131) {
2691 phyid &= PHY_BCM_OUI_MASK;
2692 if (phyid == PHY_BCM_OUI_1 ||
2693 phyid == PHY_BCM_OUI_2 ||
2694 phyid == PHY_BCM_OUI_3)
2695 do_low_power = true;
2698 } else {
2699 do_low_power = true;
2701 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2702 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2703 tp->link_config.orig_speed = tp->link_config.speed;
2704 tp->link_config.orig_duplex = tp->link_config.duplex;
2705 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2708 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2709 tp->link_config.speed = SPEED_10;
2710 tp->link_config.duplex = DUPLEX_HALF;
2711 tp->link_config.autoneg = AUTONEG_ENABLE;
2712 tg3_setup_phy(tp, 0);
2716 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2717 u32 val;
2719 val = tr32(GRC_VCPU_EXT_CTRL);
2720 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2721 } else if (!tg3_flag(tp, ENABLE_ASF)) {
2722 int i;
2723 u32 val;
2725 for (i = 0; i < 200; i++) {
2726 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2727 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2728 break;
2729 msleep(1);
2732 if (tg3_flag(tp, WOL_CAP))
2733 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2734 WOL_DRV_STATE_SHUTDOWN |
2735 WOL_DRV_WOL |
2736 WOL_SET_MAGIC_PKT);
2738 if (device_should_wake) {
2739 u32 mac_mode;
2741 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2742 if (do_low_power &&
2743 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2744 tg3_phy_auxctl_write(tp,
2745 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2746 MII_TG3_AUXCTL_PCTL_WOL_EN |
2747 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2748 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2749 udelay(40);
2752 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2753 mac_mode = MAC_MODE_PORT_MODE_GMII;
2754 else
2755 mac_mode = MAC_MODE_PORT_MODE_MII;
2757 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2758 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2759 ASIC_REV_5700) {
2760 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2761 SPEED_100 : SPEED_10;
2762 if (tg3_5700_link_polarity(tp, speed))
2763 mac_mode |= MAC_MODE_LINK_POLARITY;
2764 else
2765 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2767 } else {
2768 mac_mode = MAC_MODE_PORT_MODE_TBI;
2771 if (!tg3_flag(tp, 5750_PLUS))
2772 tw32(MAC_LED_CTRL, tp->led_ctrl);
2774 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2775 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2776 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2777 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2779 if (tg3_flag(tp, ENABLE_APE))
2780 mac_mode |= MAC_MODE_APE_TX_EN |
2781 MAC_MODE_APE_RX_EN |
2782 MAC_MODE_TDE_ENABLE;
2784 tw32_f(MAC_MODE, mac_mode);
2785 udelay(100);
2787 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2788 udelay(10);
2791 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2792 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2793 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2794 u32 base_val;
2796 base_val = tp->pci_clock_ctrl;
2797 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2798 CLOCK_CTRL_TXCLK_DISABLE);
2800 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2801 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2802 } else if (tg3_flag(tp, 5780_CLASS) ||
2803 tg3_flag(tp, CPMU_PRESENT) ||
2804 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2805 /* do nothing */
2806 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2807 u32 newbits1, newbits2;
2809 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2810 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2811 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2812 CLOCK_CTRL_TXCLK_DISABLE |
2813 CLOCK_CTRL_ALTCLK);
2814 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2815 } else if (tg3_flag(tp, 5705_PLUS)) {
2816 newbits1 = CLOCK_CTRL_625_CORE;
2817 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2818 } else {
2819 newbits1 = CLOCK_CTRL_ALTCLK;
2820 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2823 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2824 40);
2826 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2827 40);
2829 if (!tg3_flag(tp, 5705_PLUS)) {
2830 u32 newbits3;
2832 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2833 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2834 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2835 CLOCK_CTRL_TXCLK_DISABLE |
2836 CLOCK_CTRL_44MHZ_CORE);
2837 } else {
2838 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2841 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2842 tp->pci_clock_ctrl | newbits3, 40);
2846 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
2847 tg3_power_down_phy(tp, do_low_power);
2849 tg3_frob_aux_power(tp);
2851 /* Workaround for unstable PLL clock */
2852 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2853 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2854 u32 val = tr32(0x7d00);
2856 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2857 tw32(0x7d00, val);
2858 if (!tg3_flag(tp, ENABLE_ASF)) {
2859 int err;
2861 err = tg3_nvram_lock(tp);
2862 tg3_halt_cpu(tp, RX_CPU_BASE);
2863 if (!err)
2864 tg3_nvram_unlock(tp);
2868 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2870 return 0;
2873 static void tg3_power_down(struct tg3 *tp)
2875 tg3_power_down_prepare(tp);
2877 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
2878 pci_set_power_state(tp->pdev, PCI_D3hot);
2881 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2883 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2884 case MII_TG3_AUX_STAT_10HALF:
2885 *speed = SPEED_10;
2886 *duplex = DUPLEX_HALF;
2887 break;
2889 case MII_TG3_AUX_STAT_10FULL:
2890 *speed = SPEED_10;
2891 *duplex = DUPLEX_FULL;
2892 break;
2894 case MII_TG3_AUX_STAT_100HALF:
2895 *speed = SPEED_100;
2896 *duplex = DUPLEX_HALF;
2897 break;
2899 case MII_TG3_AUX_STAT_100FULL:
2900 *speed = SPEED_100;
2901 *duplex = DUPLEX_FULL;
2902 break;
2904 case MII_TG3_AUX_STAT_1000HALF:
2905 *speed = SPEED_1000;
2906 *duplex = DUPLEX_HALF;
2907 break;
2909 case MII_TG3_AUX_STAT_1000FULL:
2910 *speed = SPEED_1000;
2911 *duplex = DUPLEX_FULL;
2912 break;
2914 default:
2915 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2916 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2917 SPEED_10;
2918 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2919 DUPLEX_HALF;
2920 break;
2922 *speed = SPEED_INVALID;
2923 *duplex = DUPLEX_INVALID;
2924 break;
2928 static void tg3_phy_copper_begin(struct tg3 *tp)
2930 u32 new_adv;
2931 int i;
2933 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2934 /* Entering low power mode. Disable gigabit and
2935 * 100baseT advertisements.
2937 tg3_writephy(tp, MII_TG3_CTRL, 0);
2939 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2940 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2941 if (tg3_flag(tp, WOL_SPEED_100MB))
2942 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2944 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2945 } else if (tp->link_config.speed == SPEED_INVALID) {
2946 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2947 tp->link_config.advertising &=
2948 ~(ADVERTISED_1000baseT_Half |
2949 ADVERTISED_1000baseT_Full);
2951 new_adv = ADVERTISE_CSMA;
2952 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2953 new_adv |= ADVERTISE_10HALF;
2954 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2955 new_adv |= ADVERTISE_10FULL;
2956 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2957 new_adv |= ADVERTISE_100HALF;
2958 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2959 new_adv |= ADVERTISE_100FULL;
2961 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2963 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2965 if (tp->link_config.advertising &
2966 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2967 new_adv = 0;
2968 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2969 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2970 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2971 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2972 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY) &&
2973 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2974 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2975 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2976 MII_TG3_CTRL_ENABLE_AS_MASTER);
2977 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2978 } else {
2979 tg3_writephy(tp, MII_TG3_CTRL, 0);
2981 } else {
2982 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2983 new_adv |= ADVERTISE_CSMA;
2985 /* Asking for a specific link mode. */
2986 if (tp->link_config.speed == SPEED_1000) {
2987 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2989 if (tp->link_config.duplex == DUPLEX_FULL)
2990 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2991 else
2992 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2993 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2994 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2995 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2996 MII_TG3_CTRL_ENABLE_AS_MASTER);
2997 } else {
2998 if (tp->link_config.speed == SPEED_100) {
2999 if (tp->link_config.duplex == DUPLEX_FULL)
3000 new_adv |= ADVERTISE_100FULL;
3001 else
3002 new_adv |= ADVERTISE_100HALF;
3003 } else {
3004 if (tp->link_config.duplex == DUPLEX_FULL)
3005 new_adv |= ADVERTISE_10FULL;
3006 else
3007 new_adv |= ADVERTISE_10HALF;
3009 tg3_writephy(tp, MII_ADVERTISE, new_adv);
3011 new_adv = 0;
3014 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
3017 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
3018 u32 val;
3020 tw32(TG3_CPMU_EEE_MODE,
3021 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3023 TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3025 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3026 case ASIC_REV_5717:
3027 case ASIC_REV_57765:
3028 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3029 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3030 MII_TG3_DSP_CH34TP2_HIBW01);
3031 /* Fall through */
3032 case ASIC_REV_5719:
3033 val = MII_TG3_DSP_TAP26_ALNOKO |
3034 MII_TG3_DSP_TAP26_RMRXSTO |
3035 MII_TG3_DSP_TAP26_OPCSINPT;
3036 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3039 val = 0;
3040 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3041 /* Advertise 100-BaseTX EEE ability */
3042 if (tp->link_config.advertising &
3043 ADVERTISED_100baseT_Full)
3044 val |= MDIO_AN_EEE_ADV_100TX;
3045 /* Advertise 1000-BaseT EEE ability */
3046 if (tp->link_config.advertising &
3047 ADVERTISED_1000baseT_Full)
3048 val |= MDIO_AN_EEE_ADV_1000T;
3050 tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3052 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3055 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3056 tp->link_config.speed != SPEED_INVALID) {
3057 u32 bmcr, orig_bmcr;
3059 tp->link_config.active_speed = tp->link_config.speed;
3060 tp->link_config.active_duplex = tp->link_config.duplex;
3062 bmcr = 0;
3063 switch (tp->link_config.speed) {
3064 default:
3065 case SPEED_10:
3066 break;
3068 case SPEED_100:
3069 bmcr |= BMCR_SPEED100;
3070 break;
3072 case SPEED_1000:
3073 bmcr |= TG3_BMCR_SPEED1000;
3074 break;
3077 if (tp->link_config.duplex == DUPLEX_FULL)
3078 bmcr |= BMCR_FULLDPLX;
3080 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3081 (bmcr != orig_bmcr)) {
3082 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3083 for (i = 0; i < 1500; i++) {
3084 u32 tmp;
3086 udelay(10);
3087 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3088 tg3_readphy(tp, MII_BMSR, &tmp))
3089 continue;
3090 if (!(tmp & BMSR_LSTATUS)) {
3091 udelay(40);
3092 break;
3095 tg3_writephy(tp, MII_BMCR, bmcr);
3096 udelay(40);
3098 } else {
3099 tg3_writephy(tp, MII_BMCR,
3100 BMCR_ANENABLE | BMCR_ANRESTART);
3104 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3106 int err;
3108 /* Turn off tap power management. */
3109 /* Set Extended packet length bit */
3110 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3112 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3113 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3114 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3115 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3116 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3118 udelay(40);
3120 return err;
3123 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3125 u32 adv_reg, all_mask = 0;
3127 if (mask & ADVERTISED_10baseT_Half)
3128 all_mask |= ADVERTISE_10HALF;
3129 if (mask & ADVERTISED_10baseT_Full)
3130 all_mask |= ADVERTISE_10FULL;
3131 if (mask & ADVERTISED_100baseT_Half)
3132 all_mask |= ADVERTISE_100HALF;
3133 if (mask & ADVERTISED_100baseT_Full)
3134 all_mask |= ADVERTISE_100FULL;
3136 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3137 return 0;
3139 if ((adv_reg & all_mask) != all_mask)
3140 return 0;
3141 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3142 u32 tg3_ctrl;
3144 all_mask = 0;
3145 if (mask & ADVERTISED_1000baseT_Half)
3146 all_mask |= ADVERTISE_1000HALF;
3147 if (mask & ADVERTISED_1000baseT_Full)
3148 all_mask |= ADVERTISE_1000FULL;
3150 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3151 return 0;
3153 if ((tg3_ctrl & all_mask) != all_mask)
3154 return 0;
3156 return 1;
3159 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3161 u32 curadv, reqadv;
3163 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3164 return 1;
3166 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3167 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3169 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3170 if (curadv != reqadv)
3171 return 0;
3173 if (tg3_flag(tp, PAUSE_AUTONEG))
3174 tg3_readphy(tp, MII_LPA, rmtadv);
3175 } else {
3176 /* Reprogram the advertisement register, even if it
3177 * does not affect the current link. If the link
3178 * gets renegotiated in the future, we can save an
3179 * additional renegotiation cycle by advertising
3180 * it correctly in the first place.
3182 if (curadv != reqadv) {
3183 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3184 ADVERTISE_PAUSE_ASYM);
3185 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3189 return 1;
3192 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3194 int current_link_up;
3195 u32 bmsr, val;
3196 u32 lcl_adv, rmt_adv;
3197 u16 current_speed;
3198 u8 current_duplex;
3199 int i, err;
3201 tw32(MAC_EVENT, 0);
3203 tw32_f(MAC_STATUS,
3204 (MAC_STATUS_SYNC_CHANGED |
3205 MAC_STATUS_CFG_CHANGED |
3206 MAC_STATUS_MI_COMPLETION |
3207 MAC_STATUS_LNKSTATE_CHANGED));
3208 udelay(40);
3210 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3211 tw32_f(MAC_MI_MODE,
3212 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3213 udelay(80);
3216 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3218 /* Some third-party PHYs need to be reset on link going
3219 * down.
3221 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3222 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3223 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3224 netif_carrier_ok(tp->dev)) {
3225 tg3_readphy(tp, MII_BMSR, &bmsr);
3226 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3227 !(bmsr & BMSR_LSTATUS))
3228 force_reset = 1;
3230 if (force_reset)
3231 tg3_phy_reset(tp);
3233 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3234 tg3_readphy(tp, MII_BMSR, &bmsr);
3235 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3236 !tg3_flag(tp, INIT_COMPLETE))
3237 bmsr = 0;
3239 if (!(bmsr & BMSR_LSTATUS)) {
3240 err = tg3_init_5401phy_dsp(tp);
3241 if (err)
3242 return err;
3244 tg3_readphy(tp, MII_BMSR, &bmsr);
3245 for (i = 0; i < 1000; i++) {
3246 udelay(10);
3247 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3248 (bmsr & BMSR_LSTATUS)) {
3249 udelay(40);
3250 break;
3254 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3255 TG3_PHY_REV_BCM5401_B0 &&
3256 !(bmsr & BMSR_LSTATUS) &&
3257 tp->link_config.active_speed == SPEED_1000) {
3258 err = tg3_phy_reset(tp);
3259 if (!err)
3260 err = tg3_init_5401phy_dsp(tp);
3261 if (err)
3262 return err;
3265 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3266 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3267 /* 5701 {A0,B0} CRC bug workaround */
3268 tg3_writephy(tp, 0x15, 0x0a75);
3269 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3270 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3271 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3274 /* Clear pending interrupts... */
3275 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3276 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3278 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3279 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3280 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3281 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3283 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3284 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3285 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3286 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3287 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3288 else
3289 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3292 current_link_up = 0;
3293 current_speed = SPEED_INVALID;
3294 current_duplex = DUPLEX_INVALID;
3296 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3297 err = tg3_phy_auxctl_read(tp,
3298 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3299 &val);
3300 if (!err && !(val & (1 << 10))) {
3301 tg3_phy_auxctl_write(tp,
3302 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3303 val | (1 << 10));
3304 goto relink;
3308 bmsr = 0;
3309 for (i = 0; i < 100; i++) {
3310 tg3_readphy(tp, MII_BMSR, &bmsr);
3311 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3312 (bmsr & BMSR_LSTATUS))
3313 break;
3314 udelay(40);
3317 if (bmsr & BMSR_LSTATUS) {
3318 u32 aux_stat, bmcr;
3320 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3321 for (i = 0; i < 2000; i++) {
3322 udelay(10);
3323 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3324 aux_stat)
3325 break;
3328 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3329 &current_speed,
3330 &current_duplex);
3332 bmcr = 0;
3333 for (i = 0; i < 200; i++) {
3334 tg3_readphy(tp, MII_BMCR, &bmcr);
3335 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3336 continue;
3337 if (bmcr && bmcr != 0x7fff)
3338 break;
3339 udelay(10);
3342 lcl_adv = 0;
3343 rmt_adv = 0;
3345 tp->link_config.active_speed = current_speed;
3346 tp->link_config.active_duplex = current_duplex;
3348 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3349 if ((bmcr & BMCR_ANENABLE) &&
3350 tg3_copper_is_advertising_all(tp,
3351 tp->link_config.advertising)) {
3352 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3353 &rmt_adv))
3354 current_link_up = 1;
3356 } else {
3357 if (!(bmcr & BMCR_ANENABLE) &&
3358 tp->link_config.speed == current_speed &&
3359 tp->link_config.duplex == current_duplex &&
3360 tp->link_config.flowctrl ==
3361 tp->link_config.active_flowctrl) {
3362 current_link_up = 1;
3366 if (current_link_up == 1 &&
3367 tp->link_config.active_duplex == DUPLEX_FULL)
3368 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3371 relink:
3372 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3373 tg3_phy_copper_begin(tp);
3375 tg3_readphy(tp, MII_BMSR, &bmsr);
3376 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3377 (bmsr & BMSR_LSTATUS))
3378 current_link_up = 1;
3381 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3382 if (current_link_up == 1) {
3383 if (tp->link_config.active_speed == SPEED_100 ||
3384 tp->link_config.active_speed == SPEED_10)
3385 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3386 else
3387 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3388 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3389 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3390 else
3391 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3393 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3394 if (tp->link_config.active_duplex == DUPLEX_HALF)
3395 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3397 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3398 if (current_link_up == 1 &&
3399 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3400 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3401 else
3402 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3405 /* ??? Without this setting Netgear GA302T PHY does not
3406 * ??? send/receive packets...
3408 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3409 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3410 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3411 tw32_f(MAC_MI_MODE, tp->mi_mode);
3412 udelay(80);
3415 tw32_f(MAC_MODE, tp->mac_mode);
3416 udelay(40);
3418 tg3_phy_eee_adjust(tp, current_link_up);
3420 if (tg3_flag(tp, USE_LINKCHG_REG)) {
3421 /* Polled via timer. */
3422 tw32_f(MAC_EVENT, 0);
3423 } else {
3424 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3426 udelay(40);
3428 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3429 current_link_up == 1 &&
3430 tp->link_config.active_speed == SPEED_1000 &&
3431 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3432 udelay(120);
3433 tw32_f(MAC_STATUS,
3434 (MAC_STATUS_SYNC_CHANGED |
3435 MAC_STATUS_CFG_CHANGED));
3436 udelay(40);
3437 tg3_write_mem(tp,
3438 NIC_SRAM_FIRMWARE_MBOX,
3439 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3442 /* Prevent send BD corruption. */
3443 if (tg3_flag(tp, CLKREQ_BUG)) {
3444 u16 oldlnkctl, newlnkctl;
3446 pci_read_config_word(tp->pdev,
3447 tp->pcie_cap + PCI_EXP_LNKCTL,
3448 &oldlnkctl);
3449 if (tp->link_config.active_speed == SPEED_100 ||
3450 tp->link_config.active_speed == SPEED_10)
3451 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3452 else
3453 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3454 if (newlnkctl != oldlnkctl)
3455 pci_write_config_word(tp->pdev,
3456 tp->pcie_cap + PCI_EXP_LNKCTL,
3457 newlnkctl);
3460 if (current_link_up != netif_carrier_ok(tp->dev)) {
3461 if (current_link_up)
3462 netif_carrier_on(tp->dev);
3463 else
3464 netif_carrier_off(tp->dev);
3465 tg3_link_report(tp);
3468 return 0;
3471 struct tg3_fiber_aneginfo {
3472 int state;
3473 #define ANEG_STATE_UNKNOWN 0
3474 #define ANEG_STATE_AN_ENABLE 1
3475 #define ANEG_STATE_RESTART_INIT 2
3476 #define ANEG_STATE_RESTART 3
3477 #define ANEG_STATE_DISABLE_LINK_OK 4
3478 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3479 #define ANEG_STATE_ABILITY_DETECT 6
3480 #define ANEG_STATE_ACK_DETECT_INIT 7
3481 #define ANEG_STATE_ACK_DETECT 8
3482 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3483 #define ANEG_STATE_COMPLETE_ACK 10
3484 #define ANEG_STATE_IDLE_DETECT_INIT 11
3485 #define ANEG_STATE_IDLE_DETECT 12
3486 #define ANEG_STATE_LINK_OK 13
3487 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3488 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3490 u32 flags;
3491 #define MR_AN_ENABLE 0x00000001
3492 #define MR_RESTART_AN 0x00000002
3493 #define MR_AN_COMPLETE 0x00000004
3494 #define MR_PAGE_RX 0x00000008
3495 #define MR_NP_LOADED 0x00000010
3496 #define MR_TOGGLE_TX 0x00000020
3497 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3498 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3499 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3500 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3501 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3502 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3503 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3504 #define MR_TOGGLE_RX 0x00002000
3505 #define MR_NP_RX 0x00004000
3507 #define MR_LINK_OK 0x80000000
3509 unsigned long link_time, cur_time;
3511 u32 ability_match_cfg;
3512 int ability_match_count;
3514 char ability_match, idle_match, ack_match;
3516 u32 txconfig, rxconfig;
3517 #define ANEG_CFG_NP 0x00000080
3518 #define ANEG_CFG_ACK 0x00000040
3519 #define ANEG_CFG_RF2 0x00000020
3520 #define ANEG_CFG_RF1 0x00000010
3521 #define ANEG_CFG_PS2 0x00000001
3522 #define ANEG_CFG_PS1 0x00008000
3523 #define ANEG_CFG_HD 0x00004000
3524 #define ANEG_CFG_FD 0x00002000
3525 #define ANEG_CFG_INVAL 0x00001f06
3528 #define ANEG_OK 0
3529 #define ANEG_DONE 1
3530 #define ANEG_TIMER_ENAB 2
3531 #define ANEG_FAILED -1
3533 #define ANEG_STATE_SETTLE_TIME 10000
3535 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3536 struct tg3_fiber_aneginfo *ap)
3538 u16 flowctrl;
3539 unsigned long delta;
3540 u32 rx_cfg_reg;
3541 int ret;
3543 if (ap->state == ANEG_STATE_UNKNOWN) {
3544 ap->rxconfig = 0;
3545 ap->link_time = 0;
3546 ap->cur_time = 0;
3547 ap->ability_match_cfg = 0;
3548 ap->ability_match_count = 0;
3549 ap->ability_match = 0;
3550 ap->idle_match = 0;
3551 ap->ack_match = 0;
3553 ap->cur_time++;
3555 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3556 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3558 if (rx_cfg_reg != ap->ability_match_cfg) {
3559 ap->ability_match_cfg = rx_cfg_reg;
3560 ap->ability_match = 0;
3561 ap->ability_match_count = 0;
3562 } else {
3563 if (++ap->ability_match_count > 1) {
3564 ap->ability_match = 1;
3565 ap->ability_match_cfg = rx_cfg_reg;
3568 if (rx_cfg_reg & ANEG_CFG_ACK)
3569 ap->ack_match = 1;
3570 else
3571 ap->ack_match = 0;
3573 ap->idle_match = 0;
3574 } else {
3575 ap->idle_match = 1;
3576 ap->ability_match_cfg = 0;
3577 ap->ability_match_count = 0;
3578 ap->ability_match = 0;
3579 ap->ack_match = 0;
3581 rx_cfg_reg = 0;
3584 ap->rxconfig = rx_cfg_reg;
3585 ret = ANEG_OK;
3587 switch (ap->state) {
3588 case ANEG_STATE_UNKNOWN:
3589 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3590 ap->state = ANEG_STATE_AN_ENABLE;
3592 /* fallthru */
3593 case ANEG_STATE_AN_ENABLE:
3594 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3595 if (ap->flags & MR_AN_ENABLE) {
3596 ap->link_time = 0;
3597 ap->cur_time = 0;
3598 ap->ability_match_cfg = 0;
3599 ap->ability_match_count = 0;
3600 ap->ability_match = 0;
3601 ap->idle_match = 0;
3602 ap->ack_match = 0;
3604 ap->state = ANEG_STATE_RESTART_INIT;
3605 } else {
3606 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3608 break;
3610 case ANEG_STATE_RESTART_INIT:
3611 ap->link_time = ap->cur_time;
3612 ap->flags &= ~(MR_NP_LOADED);
3613 ap->txconfig = 0;
3614 tw32(MAC_TX_AUTO_NEG, 0);
3615 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3616 tw32_f(MAC_MODE, tp->mac_mode);
3617 udelay(40);
3619 ret = ANEG_TIMER_ENAB;
3620 ap->state = ANEG_STATE_RESTART;
3622 /* fallthru */
3623 case ANEG_STATE_RESTART:
3624 delta = ap->cur_time - ap->link_time;
3625 if (delta > ANEG_STATE_SETTLE_TIME)
3626 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3627 else
3628 ret = ANEG_TIMER_ENAB;
3629 break;
3631 case ANEG_STATE_DISABLE_LINK_OK:
3632 ret = ANEG_DONE;
3633 break;
3635 case ANEG_STATE_ABILITY_DETECT_INIT:
3636 ap->flags &= ~(MR_TOGGLE_TX);
3637 ap->txconfig = ANEG_CFG_FD;
3638 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3639 if (flowctrl & ADVERTISE_1000XPAUSE)
3640 ap->txconfig |= ANEG_CFG_PS1;
3641 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3642 ap->txconfig |= ANEG_CFG_PS2;
3643 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3644 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3645 tw32_f(MAC_MODE, tp->mac_mode);
3646 udelay(40);
3648 ap->state = ANEG_STATE_ABILITY_DETECT;
3649 break;
3651 case ANEG_STATE_ABILITY_DETECT:
3652 if (ap->ability_match != 0 && ap->rxconfig != 0)
3653 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3654 break;
3656 case ANEG_STATE_ACK_DETECT_INIT:
3657 ap->txconfig |= ANEG_CFG_ACK;
3658 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3659 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3660 tw32_f(MAC_MODE, tp->mac_mode);
3661 udelay(40);
3663 ap->state = ANEG_STATE_ACK_DETECT;
3665 /* fallthru */
3666 case ANEG_STATE_ACK_DETECT:
3667 if (ap->ack_match != 0) {
3668 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3669 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3670 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3671 } else {
3672 ap->state = ANEG_STATE_AN_ENABLE;
3674 } else if (ap->ability_match != 0 &&
3675 ap->rxconfig == 0) {
3676 ap->state = ANEG_STATE_AN_ENABLE;
3678 break;
3680 case ANEG_STATE_COMPLETE_ACK_INIT:
3681 if (ap->rxconfig & ANEG_CFG_INVAL) {
3682 ret = ANEG_FAILED;
3683 break;
3685 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3686 MR_LP_ADV_HALF_DUPLEX |
3687 MR_LP_ADV_SYM_PAUSE |
3688 MR_LP_ADV_ASYM_PAUSE |
3689 MR_LP_ADV_REMOTE_FAULT1 |
3690 MR_LP_ADV_REMOTE_FAULT2 |
3691 MR_LP_ADV_NEXT_PAGE |
3692 MR_TOGGLE_RX |
3693 MR_NP_RX);
3694 if (ap->rxconfig & ANEG_CFG_FD)
3695 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3696 if (ap->rxconfig & ANEG_CFG_HD)
3697 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3698 if (ap->rxconfig & ANEG_CFG_PS1)
3699 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3700 if (ap->rxconfig & ANEG_CFG_PS2)
3701 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3702 if (ap->rxconfig & ANEG_CFG_RF1)
3703 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3704 if (ap->rxconfig & ANEG_CFG_RF2)
3705 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3706 if (ap->rxconfig & ANEG_CFG_NP)
3707 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3709 ap->link_time = ap->cur_time;
3711 ap->flags ^= (MR_TOGGLE_TX);
3712 if (ap->rxconfig & 0x0008)
3713 ap->flags |= MR_TOGGLE_RX;
3714 if (ap->rxconfig & ANEG_CFG_NP)
3715 ap->flags |= MR_NP_RX;
3716 ap->flags |= MR_PAGE_RX;
3718 ap->state = ANEG_STATE_COMPLETE_ACK;
3719 ret = ANEG_TIMER_ENAB;
3720 break;
3722 case ANEG_STATE_COMPLETE_ACK:
3723 if (ap->ability_match != 0 &&
3724 ap->rxconfig == 0) {
3725 ap->state = ANEG_STATE_AN_ENABLE;
3726 break;
3728 delta = ap->cur_time - ap->link_time;
3729 if (delta > ANEG_STATE_SETTLE_TIME) {
3730 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3731 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3732 } else {
3733 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3734 !(ap->flags & MR_NP_RX)) {
3735 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3736 } else {
3737 ret = ANEG_FAILED;
3741 break;
3743 case ANEG_STATE_IDLE_DETECT_INIT:
3744 ap->link_time = ap->cur_time;
3745 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3746 tw32_f(MAC_MODE, tp->mac_mode);
3747 udelay(40);
3749 ap->state = ANEG_STATE_IDLE_DETECT;
3750 ret = ANEG_TIMER_ENAB;
3751 break;
3753 case ANEG_STATE_IDLE_DETECT:
3754 if (ap->ability_match != 0 &&
3755 ap->rxconfig == 0) {
3756 ap->state = ANEG_STATE_AN_ENABLE;
3757 break;
3759 delta = ap->cur_time - ap->link_time;
3760 if (delta > ANEG_STATE_SETTLE_TIME) {
3761 /* XXX another gem from the Broadcom driver :( */
3762 ap->state = ANEG_STATE_LINK_OK;
3764 break;
3766 case ANEG_STATE_LINK_OK:
3767 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3768 ret = ANEG_DONE;
3769 break;
3771 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3772 /* ??? unimplemented */
3773 break;
3775 case ANEG_STATE_NEXT_PAGE_WAIT:
3776 /* ??? unimplemented */
3777 break;
3779 default:
3780 ret = ANEG_FAILED;
3781 break;
3784 return ret;
3787 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3789 int res = 0;
3790 struct tg3_fiber_aneginfo aninfo;
3791 int status = ANEG_FAILED;
3792 unsigned int tick;
3793 u32 tmp;
3795 tw32_f(MAC_TX_AUTO_NEG, 0);
3797 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3798 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3799 udelay(40);
3801 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3802 udelay(40);
3804 memset(&aninfo, 0, sizeof(aninfo));
3805 aninfo.flags |= MR_AN_ENABLE;
3806 aninfo.state = ANEG_STATE_UNKNOWN;
3807 aninfo.cur_time = 0;
3808 tick = 0;
3809 while (++tick < 195000) {
3810 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3811 if (status == ANEG_DONE || status == ANEG_FAILED)
3812 break;
3814 udelay(1);
3817 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3818 tw32_f(MAC_MODE, tp->mac_mode);
3819 udelay(40);
3821 *txflags = aninfo.txconfig;
3822 *rxflags = aninfo.flags;
3824 if (status == ANEG_DONE &&
3825 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3826 MR_LP_ADV_FULL_DUPLEX)))
3827 res = 1;
3829 return res;
3832 static void tg3_init_bcm8002(struct tg3 *tp)
3834 u32 mac_status = tr32(MAC_STATUS);
3835 int i;
3837 /* Reset when initting first time or we have a link. */
3838 if (tg3_flag(tp, INIT_COMPLETE) &&
3839 !(mac_status & MAC_STATUS_PCS_SYNCED))
3840 return;
3842 /* Set PLL lock range. */
3843 tg3_writephy(tp, 0x16, 0x8007);
3845 /* SW reset */
3846 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3848 /* Wait for reset to complete. */
3849 /* XXX schedule_timeout() ... */
3850 for (i = 0; i < 500; i++)
3851 udelay(10);
3853 /* Config mode; select PMA/Ch 1 regs. */
3854 tg3_writephy(tp, 0x10, 0x8411);
3856 /* Enable auto-lock and comdet, select txclk for tx. */
3857 tg3_writephy(tp, 0x11, 0x0a10);
3859 tg3_writephy(tp, 0x18, 0x00a0);
3860 tg3_writephy(tp, 0x16, 0x41ff);
3862 /* Assert and deassert POR. */
3863 tg3_writephy(tp, 0x13, 0x0400);
3864 udelay(40);
3865 tg3_writephy(tp, 0x13, 0x0000);
3867 tg3_writephy(tp, 0x11, 0x0a50);
3868 udelay(40);
3869 tg3_writephy(tp, 0x11, 0x0a10);
3871 /* Wait for signal to stabilize */
3872 /* XXX schedule_timeout() ... */
3873 for (i = 0; i < 15000; i++)
3874 udelay(10);
3876 /* Deselect the channel register so we can read the PHYID
3877 * later.
3879 tg3_writephy(tp, 0x10, 0x8011);
3882 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3884 u16 flowctrl;
3885 u32 sg_dig_ctrl, sg_dig_status;
3886 u32 serdes_cfg, expected_sg_dig_ctrl;
3887 int workaround, port_a;
3888 int current_link_up;
3890 serdes_cfg = 0;
3891 expected_sg_dig_ctrl = 0;
3892 workaround = 0;
3893 port_a = 1;
3894 current_link_up = 0;
3896 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3897 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3898 workaround = 1;
3899 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3900 port_a = 0;
3902 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3903 /* preserve bits 20-23 for voltage regulator */
3904 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3907 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3909 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3910 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3911 if (workaround) {
3912 u32 val = serdes_cfg;
3914 if (port_a)
3915 val |= 0xc010000;
3916 else
3917 val |= 0x4010000;
3918 tw32_f(MAC_SERDES_CFG, val);
3921 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3923 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3924 tg3_setup_flow_control(tp, 0, 0);
3925 current_link_up = 1;
3927 goto out;
3930 /* Want auto-negotiation. */
3931 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3933 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3934 if (flowctrl & ADVERTISE_1000XPAUSE)
3935 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3936 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3937 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3939 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3940 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3941 tp->serdes_counter &&
3942 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3943 MAC_STATUS_RCVD_CFG)) ==
3944 MAC_STATUS_PCS_SYNCED)) {
3945 tp->serdes_counter--;
3946 current_link_up = 1;
3947 goto out;
3949 restart_autoneg:
3950 if (workaround)
3951 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3952 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3953 udelay(5);
3954 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3956 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3957 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3958 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3959 MAC_STATUS_SIGNAL_DET)) {
3960 sg_dig_status = tr32(SG_DIG_STATUS);
3961 mac_status = tr32(MAC_STATUS);
3963 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3964 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3965 u32 local_adv = 0, remote_adv = 0;
3967 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3968 local_adv |= ADVERTISE_1000XPAUSE;
3969 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3970 local_adv |= ADVERTISE_1000XPSE_ASYM;
3972 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3973 remote_adv |= LPA_1000XPAUSE;
3974 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3975 remote_adv |= LPA_1000XPAUSE_ASYM;
3977 tg3_setup_flow_control(tp, local_adv, remote_adv);
3978 current_link_up = 1;
3979 tp->serdes_counter = 0;
3980 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3981 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3982 if (tp->serdes_counter)
3983 tp->serdes_counter--;
3984 else {
3985 if (workaround) {
3986 u32 val = serdes_cfg;
3988 if (port_a)
3989 val |= 0xc010000;
3990 else
3991 val |= 0x4010000;
3993 tw32_f(MAC_SERDES_CFG, val);
3996 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3997 udelay(40);
3999 /* Link parallel detection - link is up */
4000 /* only if we have PCS_SYNC and not */
4001 /* receiving config code words */
4002 mac_status = tr32(MAC_STATUS);
4003 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4004 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4005 tg3_setup_flow_control(tp, 0, 0);
4006 current_link_up = 1;
4007 tp->phy_flags |=
4008 TG3_PHYFLG_PARALLEL_DETECT;
4009 tp->serdes_counter =
4010 SERDES_PARALLEL_DET_TIMEOUT;
4011 } else
4012 goto restart_autoneg;
4015 } else {
4016 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4017 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4020 out:
4021 return current_link_up;
4024 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4026 int current_link_up = 0;
4028 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4029 goto out;
4031 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4032 u32 txflags, rxflags;
4033 int i;
4035 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4036 u32 local_adv = 0, remote_adv = 0;
4038 if (txflags & ANEG_CFG_PS1)
4039 local_adv |= ADVERTISE_1000XPAUSE;
4040 if (txflags & ANEG_CFG_PS2)
4041 local_adv |= ADVERTISE_1000XPSE_ASYM;
4043 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4044 remote_adv |= LPA_1000XPAUSE;
4045 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4046 remote_adv |= LPA_1000XPAUSE_ASYM;
4048 tg3_setup_flow_control(tp, local_adv, remote_adv);
4050 current_link_up = 1;
4052 for (i = 0; i < 30; i++) {
4053 udelay(20);
4054 tw32_f(MAC_STATUS,
4055 (MAC_STATUS_SYNC_CHANGED |
4056 MAC_STATUS_CFG_CHANGED));
4057 udelay(40);
4058 if ((tr32(MAC_STATUS) &
4059 (MAC_STATUS_SYNC_CHANGED |
4060 MAC_STATUS_CFG_CHANGED)) == 0)
4061 break;
4064 mac_status = tr32(MAC_STATUS);
4065 if (current_link_up == 0 &&
4066 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4067 !(mac_status & MAC_STATUS_RCVD_CFG))
4068 current_link_up = 1;
4069 } else {
4070 tg3_setup_flow_control(tp, 0, 0);
4072 /* Forcing 1000FD link up. */
4073 current_link_up = 1;
4075 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4076 udelay(40);
4078 tw32_f(MAC_MODE, tp->mac_mode);
4079 udelay(40);
4082 out:
4083 return current_link_up;
4086 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4088 u32 orig_pause_cfg;
4089 u16 orig_active_speed;
4090 u8 orig_active_duplex;
4091 u32 mac_status;
4092 int current_link_up;
4093 int i;
4095 orig_pause_cfg = tp->link_config.active_flowctrl;
4096 orig_active_speed = tp->link_config.active_speed;
4097 orig_active_duplex = tp->link_config.active_duplex;
4099 if (!tg3_flag(tp, HW_AUTONEG) &&
4100 netif_carrier_ok(tp->dev) &&
4101 tg3_flag(tp, INIT_COMPLETE)) {
4102 mac_status = tr32(MAC_STATUS);
4103 mac_status &= (MAC_STATUS_PCS_SYNCED |
4104 MAC_STATUS_SIGNAL_DET |
4105 MAC_STATUS_CFG_CHANGED |
4106 MAC_STATUS_RCVD_CFG);
4107 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4108 MAC_STATUS_SIGNAL_DET)) {
4109 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4110 MAC_STATUS_CFG_CHANGED));
4111 return 0;
4115 tw32_f(MAC_TX_AUTO_NEG, 0);
4117 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4118 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4119 tw32_f(MAC_MODE, tp->mac_mode);
4120 udelay(40);
4122 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4123 tg3_init_bcm8002(tp);
4125 /* Enable link change event even when serdes polling. */
4126 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4127 udelay(40);
4129 current_link_up = 0;
4130 mac_status = tr32(MAC_STATUS);
4132 if (tg3_flag(tp, HW_AUTONEG))
4133 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4134 else
4135 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4137 tp->napi[0].hw_status->status =
4138 (SD_STATUS_UPDATED |
4139 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4141 for (i = 0; i < 100; i++) {
4142 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4143 MAC_STATUS_CFG_CHANGED));
4144 udelay(5);
4145 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4146 MAC_STATUS_CFG_CHANGED |
4147 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4148 break;
4151 mac_status = tr32(MAC_STATUS);
4152 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4153 current_link_up = 0;
4154 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4155 tp->serdes_counter == 0) {
4156 tw32_f(MAC_MODE, (tp->mac_mode |
4157 MAC_MODE_SEND_CONFIGS));
4158 udelay(1);
4159 tw32_f(MAC_MODE, tp->mac_mode);
4163 if (current_link_up == 1) {
4164 tp->link_config.active_speed = SPEED_1000;
4165 tp->link_config.active_duplex = DUPLEX_FULL;
4166 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4167 LED_CTRL_LNKLED_OVERRIDE |
4168 LED_CTRL_1000MBPS_ON));
4169 } else {
4170 tp->link_config.active_speed = SPEED_INVALID;
4171 tp->link_config.active_duplex = DUPLEX_INVALID;
4172 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4173 LED_CTRL_LNKLED_OVERRIDE |
4174 LED_CTRL_TRAFFIC_OVERRIDE));
4177 if (current_link_up != netif_carrier_ok(tp->dev)) {
4178 if (current_link_up)
4179 netif_carrier_on(tp->dev);
4180 else
4181 netif_carrier_off(tp->dev);
4182 tg3_link_report(tp);
4183 } else {
4184 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4185 if (orig_pause_cfg != now_pause_cfg ||
4186 orig_active_speed != tp->link_config.active_speed ||
4187 orig_active_duplex != tp->link_config.active_duplex)
4188 tg3_link_report(tp);
4191 return 0;
4194 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4196 int current_link_up, err = 0;
4197 u32 bmsr, bmcr;
4198 u16 current_speed;
4199 u8 current_duplex;
4200 u32 local_adv, remote_adv;
4202 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4203 tw32_f(MAC_MODE, tp->mac_mode);
4204 udelay(40);
4206 tw32(MAC_EVENT, 0);
4208 tw32_f(MAC_STATUS,
4209 (MAC_STATUS_SYNC_CHANGED |
4210 MAC_STATUS_CFG_CHANGED |
4211 MAC_STATUS_MI_COMPLETION |
4212 MAC_STATUS_LNKSTATE_CHANGED));
4213 udelay(40);
4215 if (force_reset)
4216 tg3_phy_reset(tp);
4218 current_link_up = 0;
4219 current_speed = SPEED_INVALID;
4220 current_duplex = DUPLEX_INVALID;
4222 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4223 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4224 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4225 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4226 bmsr |= BMSR_LSTATUS;
4227 else
4228 bmsr &= ~BMSR_LSTATUS;
4231 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4233 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4234 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4235 /* do nothing, just check for link up at the end */
4236 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4237 u32 adv, new_adv;
4239 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4240 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4241 ADVERTISE_1000XPAUSE |
4242 ADVERTISE_1000XPSE_ASYM |
4243 ADVERTISE_SLCT);
4245 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4247 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4248 new_adv |= ADVERTISE_1000XHALF;
4249 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4250 new_adv |= ADVERTISE_1000XFULL;
4252 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4253 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4254 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4255 tg3_writephy(tp, MII_BMCR, bmcr);
4257 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4258 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4259 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4261 return err;
4263 } else {
4264 u32 new_bmcr;
4266 bmcr &= ~BMCR_SPEED1000;
4267 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4269 if (tp->link_config.duplex == DUPLEX_FULL)
4270 new_bmcr |= BMCR_FULLDPLX;
4272 if (new_bmcr != bmcr) {
4273 /* BMCR_SPEED1000 is a reserved bit that needs
4274 * to be set on write.
4276 new_bmcr |= BMCR_SPEED1000;
4278 /* Force a linkdown */
4279 if (netif_carrier_ok(tp->dev)) {
4280 u32 adv;
4282 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4283 adv &= ~(ADVERTISE_1000XFULL |
4284 ADVERTISE_1000XHALF |
4285 ADVERTISE_SLCT);
4286 tg3_writephy(tp, MII_ADVERTISE, adv);
4287 tg3_writephy(tp, MII_BMCR, bmcr |
4288 BMCR_ANRESTART |
4289 BMCR_ANENABLE);
4290 udelay(10);
4291 netif_carrier_off(tp->dev);
4293 tg3_writephy(tp, MII_BMCR, new_bmcr);
4294 bmcr = new_bmcr;
4295 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4296 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4297 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4298 ASIC_REV_5714) {
4299 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4300 bmsr |= BMSR_LSTATUS;
4301 else
4302 bmsr &= ~BMSR_LSTATUS;
4304 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4308 if (bmsr & BMSR_LSTATUS) {
4309 current_speed = SPEED_1000;
4310 current_link_up = 1;
4311 if (bmcr & BMCR_FULLDPLX)
4312 current_duplex = DUPLEX_FULL;
4313 else
4314 current_duplex = DUPLEX_HALF;
4316 local_adv = 0;
4317 remote_adv = 0;
4319 if (bmcr & BMCR_ANENABLE) {
4320 u32 common;
4322 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4323 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4324 common = local_adv & remote_adv;
4325 if (common & (ADVERTISE_1000XHALF |
4326 ADVERTISE_1000XFULL)) {
4327 if (common & ADVERTISE_1000XFULL)
4328 current_duplex = DUPLEX_FULL;
4329 else
4330 current_duplex = DUPLEX_HALF;
4331 } else if (!tg3_flag(tp, 5780_CLASS)) {
4332 /* Link is up via parallel detect */
4333 } else {
4334 current_link_up = 0;
4339 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4340 tg3_setup_flow_control(tp, local_adv, remote_adv);
4342 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4343 if (tp->link_config.active_duplex == DUPLEX_HALF)
4344 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4346 tw32_f(MAC_MODE, tp->mac_mode);
4347 udelay(40);
4349 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4351 tp->link_config.active_speed = current_speed;
4352 tp->link_config.active_duplex = current_duplex;
4354 if (current_link_up != netif_carrier_ok(tp->dev)) {
4355 if (current_link_up)
4356 netif_carrier_on(tp->dev);
4357 else {
4358 netif_carrier_off(tp->dev);
4359 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4361 tg3_link_report(tp);
4363 return err;
4366 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4368 if (tp->serdes_counter) {
4369 /* Give autoneg time to complete. */
4370 tp->serdes_counter--;
4371 return;
4374 if (!netif_carrier_ok(tp->dev) &&
4375 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4376 u32 bmcr;
4378 tg3_readphy(tp, MII_BMCR, &bmcr);
4379 if (bmcr & BMCR_ANENABLE) {
4380 u32 phy1, phy2;
4382 /* Select shadow register 0x1f */
4383 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4384 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4386 /* Select expansion interrupt status register */
4387 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4388 MII_TG3_DSP_EXP1_INT_STAT);
4389 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4390 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4392 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4393 /* We have signal detect and not receiving
4394 * config code words, link is up by parallel
4395 * detection.
4398 bmcr &= ~BMCR_ANENABLE;
4399 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4400 tg3_writephy(tp, MII_BMCR, bmcr);
4401 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4404 } else if (netif_carrier_ok(tp->dev) &&
4405 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4406 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4407 u32 phy2;
4409 /* Select expansion interrupt status register */
4410 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4411 MII_TG3_DSP_EXP1_INT_STAT);
4412 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4413 if (phy2 & 0x20) {
4414 u32 bmcr;
4416 /* Config code words received, turn on autoneg. */
4417 tg3_readphy(tp, MII_BMCR, &bmcr);
4418 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4420 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4426 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4428 u32 val;
4429 int err;
4431 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4432 err = tg3_setup_fiber_phy(tp, force_reset);
4433 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4434 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4435 else
4436 err = tg3_setup_copper_phy(tp, force_reset);
4438 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4439 u32 scale;
4441 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4442 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4443 scale = 65;
4444 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4445 scale = 6;
4446 else
4447 scale = 12;
4449 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4450 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4451 tw32(GRC_MISC_CFG, val);
4454 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4455 (6 << TX_LENGTHS_IPG_SHIFT);
4456 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4457 val |= tr32(MAC_TX_LENGTHS) &
4458 (TX_LENGTHS_JMB_FRM_LEN_MSK |
4459 TX_LENGTHS_CNT_DWN_VAL_MSK);
4461 if (tp->link_config.active_speed == SPEED_1000 &&
4462 tp->link_config.active_duplex == DUPLEX_HALF)
4463 tw32(MAC_TX_LENGTHS, val |
4464 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4465 else
4466 tw32(MAC_TX_LENGTHS, val |
4467 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4469 if (!tg3_flag(tp, 5705_PLUS)) {
4470 if (netif_carrier_ok(tp->dev)) {
4471 tw32(HOSTCC_STAT_COAL_TICKS,
4472 tp->coal.stats_block_coalesce_usecs);
4473 } else {
4474 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4478 if (tg3_flag(tp, ASPM_WORKAROUND)) {
4479 val = tr32(PCIE_PWR_MGMT_THRESH);
4480 if (!netif_carrier_ok(tp->dev))
4481 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4482 tp->pwrmgmt_thresh;
4483 else
4484 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4485 tw32(PCIE_PWR_MGMT_THRESH, val);
4488 return err;
4491 static inline int tg3_irq_sync(struct tg3 *tp)
4493 return tp->irq_sync;
4496 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4498 int i;
4500 dst = (u32 *)((u8 *)dst + off);
4501 for (i = 0; i < len; i += sizeof(u32))
4502 *dst++ = tr32(off + i);
4505 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4507 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4508 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4509 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4510 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4511 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4512 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4513 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4514 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4515 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4516 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4517 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4518 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4519 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4520 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4521 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4522 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4523 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4524 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4525 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4527 if (tg3_flag(tp, SUPPORT_MSIX))
4528 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4530 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4531 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4532 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4533 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4534 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4535 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4536 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4537 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4539 if (!tg3_flag(tp, 5705_PLUS)) {
4540 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4541 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4542 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4545 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4546 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4547 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4548 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4549 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4551 if (tg3_flag(tp, NVRAM))
4552 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4555 static void tg3_dump_state(struct tg3 *tp)
4557 int i;
4558 u32 *regs;
4560 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4561 if (!regs) {
4562 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4563 return;
4566 if (tg3_flag(tp, PCI_EXPRESS)) {
4567 /* Read up to but not including private PCI registers */
4568 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4569 regs[i / sizeof(u32)] = tr32(i);
4570 } else
4571 tg3_dump_legacy_regs(tp, regs);
4573 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4574 if (!regs[i + 0] && !regs[i + 1] &&
4575 !regs[i + 2] && !regs[i + 3])
4576 continue;
4578 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4579 i * 4,
4580 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4583 kfree(regs);
4585 for (i = 0; i < tp->irq_cnt; i++) {
4586 struct tg3_napi *tnapi = &tp->napi[i];
4588 /* SW status block */
4589 netdev_err(tp->dev,
4590 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4592 tnapi->hw_status->status,
4593 tnapi->hw_status->status_tag,
4594 tnapi->hw_status->rx_jumbo_consumer,
4595 tnapi->hw_status->rx_consumer,
4596 tnapi->hw_status->rx_mini_consumer,
4597 tnapi->hw_status->idx[0].rx_producer,
4598 tnapi->hw_status->idx[0].tx_consumer);
4600 netdev_err(tp->dev,
4601 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4603 tnapi->last_tag, tnapi->last_irq_tag,
4604 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4605 tnapi->rx_rcb_ptr,
4606 tnapi->prodring.rx_std_prod_idx,
4607 tnapi->prodring.rx_std_cons_idx,
4608 tnapi->prodring.rx_jmb_prod_idx,
4609 tnapi->prodring.rx_jmb_cons_idx);
4613 /* This is called whenever we suspect that the system chipset is re-
4614 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4615 * is bogus tx completions. We try to recover by setting the
4616 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4617 * in the workqueue.
4619 static void tg3_tx_recover(struct tg3 *tp)
4621 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4622 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4624 netdev_warn(tp->dev,
4625 "The system may be re-ordering memory-mapped I/O "
4626 "cycles to the network device, attempting to recover. "
4627 "Please report the problem to the driver maintainer "
4628 "and include system chipset information.\n");
4630 spin_lock(&tp->lock);
4631 tg3_flag_set(tp, TX_RECOVERY_PENDING);
4632 spin_unlock(&tp->lock);
4635 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4637 /* Tell compiler to fetch tx indices from memory. */
4638 barrier();
4639 return tnapi->tx_pending -
4640 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4643 /* Tigon3 never reports partial packet sends. So we do not
4644 * need special logic to handle SKBs that have not had all
4645 * of their frags sent yet, like SunGEM does.
4647 static void tg3_tx(struct tg3_napi *tnapi)
4649 struct tg3 *tp = tnapi->tp;
4650 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4651 u32 sw_idx = tnapi->tx_cons;
4652 struct netdev_queue *txq;
4653 int index = tnapi - tp->napi;
4655 if (tg3_flag(tp, ENABLE_TSS))
4656 index--;
4658 txq = netdev_get_tx_queue(tp->dev, index);
4660 while (sw_idx != hw_idx) {
4661 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4662 struct sk_buff *skb = ri->skb;
4663 int i, tx_bug = 0;
4665 if (unlikely(skb == NULL)) {
4666 tg3_tx_recover(tp);
4667 return;
4670 pci_unmap_single(tp->pdev,
4671 dma_unmap_addr(ri, mapping),
4672 skb_headlen(skb),
4673 PCI_DMA_TODEVICE);
4675 ri->skb = NULL;
4677 sw_idx = NEXT_TX(sw_idx);
4679 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4680 ri = &tnapi->tx_buffers[sw_idx];
4681 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4682 tx_bug = 1;
4684 pci_unmap_page(tp->pdev,
4685 dma_unmap_addr(ri, mapping),
4686 skb_shinfo(skb)->frags[i].size,
4687 PCI_DMA_TODEVICE);
4688 sw_idx = NEXT_TX(sw_idx);
4691 dev_kfree_skb(skb);
4693 if (unlikely(tx_bug)) {
4694 tg3_tx_recover(tp);
4695 return;
4699 tnapi->tx_cons = sw_idx;
4701 /* Need to make the tx_cons update visible to tg3_start_xmit()
4702 * before checking for netif_queue_stopped(). Without the
4703 * memory barrier, there is a small possibility that tg3_start_xmit()
4704 * will miss it and cause the queue to be stopped forever.
4706 smp_mb();
4708 if (unlikely(netif_tx_queue_stopped(txq) &&
4709 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4710 __netif_tx_lock(txq, smp_processor_id());
4711 if (netif_tx_queue_stopped(txq) &&
4712 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4713 netif_tx_wake_queue(txq);
4714 __netif_tx_unlock(txq);
4718 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4720 if (!ri->skb)
4721 return;
4723 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4724 map_sz, PCI_DMA_FROMDEVICE);
4725 dev_kfree_skb_any(ri->skb);
4726 ri->skb = NULL;
4729 /* Returns size of skb allocated or < 0 on error.
4731 * We only need to fill in the address because the other members
4732 * of the RX descriptor are invariant, see tg3_init_rings.
4734 * Note the purposeful assymetry of cpu vs. chip accesses. For
4735 * posting buffers we only dirty the first cache line of the RX
4736 * descriptor (containing the address). Whereas for the RX status
4737 * buffers the cpu only reads the last cacheline of the RX descriptor
4738 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4740 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4741 u32 opaque_key, u32 dest_idx_unmasked)
4743 struct tg3_rx_buffer_desc *desc;
4744 struct ring_info *map;
4745 struct sk_buff *skb;
4746 dma_addr_t mapping;
4747 int skb_size, dest_idx;
4749 switch (opaque_key) {
4750 case RXD_OPAQUE_RING_STD:
4751 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4752 desc = &tpr->rx_std[dest_idx];
4753 map = &tpr->rx_std_buffers[dest_idx];
4754 skb_size = tp->rx_pkt_map_sz;
4755 break;
4757 case RXD_OPAQUE_RING_JUMBO:
4758 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4759 desc = &tpr->rx_jmb[dest_idx].std;
4760 map = &tpr->rx_jmb_buffers[dest_idx];
4761 skb_size = TG3_RX_JMB_MAP_SZ;
4762 break;
4764 default:
4765 return -EINVAL;
4768 /* Do not overwrite any of the map or rp information
4769 * until we are sure we can commit to a new buffer.
4771 * Callers depend upon this behavior and assume that
4772 * we leave everything unchanged if we fail.
4774 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4775 if (skb == NULL)
4776 return -ENOMEM;
4778 skb_reserve(skb, tp->rx_offset);
4780 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4781 PCI_DMA_FROMDEVICE);
4782 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4783 dev_kfree_skb(skb);
4784 return -EIO;
4787 map->skb = skb;
4788 dma_unmap_addr_set(map, mapping, mapping);
4790 desc->addr_hi = ((u64)mapping >> 32);
4791 desc->addr_lo = ((u64)mapping & 0xffffffff);
4793 return skb_size;
4796 /* We only need to move over in the address because the other
4797 * members of the RX descriptor are invariant. See notes above
4798 * tg3_alloc_rx_skb for full details.
4800 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4801 struct tg3_rx_prodring_set *dpr,
4802 u32 opaque_key, int src_idx,
4803 u32 dest_idx_unmasked)
4805 struct tg3 *tp = tnapi->tp;
4806 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4807 struct ring_info *src_map, *dest_map;
4808 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4809 int dest_idx;
4811 switch (opaque_key) {
4812 case RXD_OPAQUE_RING_STD:
4813 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4814 dest_desc = &dpr->rx_std[dest_idx];
4815 dest_map = &dpr->rx_std_buffers[dest_idx];
4816 src_desc = &spr->rx_std[src_idx];
4817 src_map = &spr->rx_std_buffers[src_idx];
4818 break;
4820 case RXD_OPAQUE_RING_JUMBO:
4821 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4822 dest_desc = &dpr->rx_jmb[dest_idx].std;
4823 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4824 src_desc = &spr->rx_jmb[src_idx].std;
4825 src_map = &spr->rx_jmb_buffers[src_idx];
4826 break;
4828 default:
4829 return;
4832 dest_map->skb = src_map->skb;
4833 dma_unmap_addr_set(dest_map, mapping,
4834 dma_unmap_addr(src_map, mapping));
4835 dest_desc->addr_hi = src_desc->addr_hi;
4836 dest_desc->addr_lo = src_desc->addr_lo;
4838 /* Ensure that the update to the skb happens after the physical
4839 * addresses have been transferred to the new BD location.
4841 smp_wmb();
4843 src_map->skb = NULL;
4846 /* The RX ring scheme is composed of multiple rings which post fresh
4847 * buffers to the chip, and one special ring the chip uses to report
4848 * status back to the host.
4850 * The special ring reports the status of received packets to the
4851 * host. The chip does not write into the original descriptor the
4852 * RX buffer was obtained from. The chip simply takes the original
4853 * descriptor as provided by the host, updates the status and length
4854 * field, then writes this into the next status ring entry.
4856 * Each ring the host uses to post buffers to the chip is described
4857 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4858 * it is first placed into the on-chip ram. When the packet's length
4859 * is known, it walks down the TG3_BDINFO entries to select the ring.
4860 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4861 * which is within the range of the new packet's length is chosen.
4863 * The "separate ring for rx status" scheme may sound queer, but it makes
4864 * sense from a cache coherency perspective. If only the host writes
4865 * to the buffer post rings, and only the chip writes to the rx status
4866 * rings, then cache lines never move beyond shared-modified state.
4867 * If both the host and chip were to write into the same ring, cache line
4868 * eviction could occur since both entities want it in an exclusive state.
4870 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4872 struct tg3 *tp = tnapi->tp;
4873 u32 work_mask, rx_std_posted = 0;
4874 u32 std_prod_idx, jmb_prod_idx;
4875 u32 sw_idx = tnapi->rx_rcb_ptr;
4876 u16 hw_idx;
4877 int received;
4878 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4880 hw_idx = *(tnapi->rx_rcb_prod_idx);
4882 * We need to order the read of hw_idx and the read of
4883 * the opaque cookie.
4885 rmb();
4886 work_mask = 0;
4887 received = 0;
4888 std_prod_idx = tpr->rx_std_prod_idx;
4889 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4890 while (sw_idx != hw_idx && budget > 0) {
4891 struct ring_info *ri;
4892 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4893 unsigned int len;
4894 struct sk_buff *skb;
4895 dma_addr_t dma_addr;
4896 u32 opaque_key, desc_idx, *post_ptr;
4898 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4899 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4900 if (opaque_key == RXD_OPAQUE_RING_STD) {
4901 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4902 dma_addr = dma_unmap_addr(ri, mapping);
4903 skb = ri->skb;
4904 post_ptr = &std_prod_idx;
4905 rx_std_posted++;
4906 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4907 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4908 dma_addr = dma_unmap_addr(ri, mapping);
4909 skb = ri->skb;
4910 post_ptr = &jmb_prod_idx;
4911 } else
4912 goto next_pkt_nopost;
4914 work_mask |= opaque_key;
4916 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4917 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4918 drop_it:
4919 tg3_recycle_rx(tnapi, tpr, opaque_key,
4920 desc_idx, *post_ptr);
4921 drop_it_no_recycle:
4922 /* Other statistics kept track of by card. */
4923 tp->rx_dropped++;
4924 goto next_pkt;
4927 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4928 ETH_FCS_LEN;
4930 if (len > TG3_RX_COPY_THRESH(tp)) {
4931 int skb_size;
4933 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4934 *post_ptr);
4935 if (skb_size < 0)
4936 goto drop_it;
4938 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4939 PCI_DMA_FROMDEVICE);
4941 /* Ensure that the update to the skb happens
4942 * after the usage of the old DMA mapping.
4944 smp_wmb();
4946 ri->skb = NULL;
4948 skb_put(skb, len);
4949 } else {
4950 struct sk_buff *copy_skb;
4952 tg3_recycle_rx(tnapi, tpr, opaque_key,
4953 desc_idx, *post_ptr);
4955 copy_skb = netdev_alloc_skb(tp->dev, len +
4956 TG3_RAW_IP_ALIGN);
4957 if (copy_skb == NULL)
4958 goto drop_it_no_recycle;
4960 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4961 skb_put(copy_skb, len);
4962 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4963 skb_copy_from_linear_data(skb, copy_skb->data, len);
4964 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4966 /* We'll reuse the original ring buffer. */
4967 skb = copy_skb;
4970 if ((tp->dev->features & NETIF_F_RXCSUM) &&
4971 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4972 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4973 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4974 skb->ip_summed = CHECKSUM_UNNECESSARY;
4975 else
4976 skb_checksum_none_assert(skb);
4978 skb->protocol = eth_type_trans(skb, tp->dev);
4980 if (len > (tp->dev->mtu + ETH_HLEN) &&
4981 skb->protocol != htons(ETH_P_8021Q)) {
4982 dev_kfree_skb(skb);
4983 goto drop_it_no_recycle;
4986 if (desc->type_flags & RXD_FLAG_VLAN &&
4987 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4988 __vlan_hwaccel_put_tag(skb,
4989 desc->err_vlan & RXD_VLAN_MASK);
4991 napi_gro_receive(&tnapi->napi, skb);
4993 received++;
4994 budget--;
4996 next_pkt:
4997 (*post_ptr)++;
4999 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5000 tpr->rx_std_prod_idx = std_prod_idx &
5001 tp->rx_std_ring_mask;
5002 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5003 tpr->rx_std_prod_idx);
5004 work_mask &= ~RXD_OPAQUE_RING_STD;
5005 rx_std_posted = 0;
5007 next_pkt_nopost:
5008 sw_idx++;
5009 sw_idx &= tp->rx_ret_ring_mask;
5011 /* Refresh hw_idx to see if there is new work */
5012 if (sw_idx == hw_idx) {
5013 hw_idx = *(tnapi->rx_rcb_prod_idx);
5014 rmb();
5018 /* ACK the status ring. */
5019 tnapi->rx_rcb_ptr = sw_idx;
5020 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5022 /* Refill RX ring(s). */
5023 if (!tg3_flag(tp, ENABLE_RSS)) {
5024 if (work_mask & RXD_OPAQUE_RING_STD) {
5025 tpr->rx_std_prod_idx = std_prod_idx &
5026 tp->rx_std_ring_mask;
5027 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5028 tpr->rx_std_prod_idx);
5030 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5031 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5032 tp->rx_jmb_ring_mask;
5033 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5034 tpr->rx_jmb_prod_idx);
5036 mmiowb();
5037 } else if (work_mask) {
5038 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5039 * updated before the producer indices can be updated.
5041 smp_wmb();
5043 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5044 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5046 if (tnapi != &tp->napi[1])
5047 napi_schedule(&tp->napi[1].napi);
5050 return received;
5053 static void tg3_poll_link(struct tg3 *tp)
5055 /* handle link change and other phy events */
5056 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5057 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5059 if (sblk->status & SD_STATUS_LINK_CHG) {
5060 sblk->status = SD_STATUS_UPDATED |
5061 (sblk->status & ~SD_STATUS_LINK_CHG);
5062 spin_lock(&tp->lock);
5063 if (tg3_flag(tp, USE_PHYLIB)) {
5064 tw32_f(MAC_STATUS,
5065 (MAC_STATUS_SYNC_CHANGED |
5066 MAC_STATUS_CFG_CHANGED |
5067 MAC_STATUS_MI_COMPLETION |
5068 MAC_STATUS_LNKSTATE_CHANGED));
5069 udelay(40);
5070 } else
5071 tg3_setup_phy(tp, 0);
5072 spin_unlock(&tp->lock);
5077 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5078 struct tg3_rx_prodring_set *dpr,
5079 struct tg3_rx_prodring_set *spr)
5081 u32 si, di, cpycnt, src_prod_idx;
5082 int i, err = 0;
5084 while (1) {
5085 src_prod_idx = spr->rx_std_prod_idx;
5087 /* Make sure updates to the rx_std_buffers[] entries and the
5088 * standard producer index are seen in the correct order.
5090 smp_rmb();
5092 if (spr->rx_std_cons_idx == src_prod_idx)
5093 break;
5095 if (spr->rx_std_cons_idx < src_prod_idx)
5096 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5097 else
5098 cpycnt = tp->rx_std_ring_mask + 1 -
5099 spr->rx_std_cons_idx;
5101 cpycnt = min(cpycnt,
5102 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5104 si = spr->rx_std_cons_idx;
5105 di = dpr->rx_std_prod_idx;
5107 for (i = di; i < di + cpycnt; i++) {
5108 if (dpr->rx_std_buffers[i].skb) {
5109 cpycnt = i - di;
5110 err = -ENOSPC;
5111 break;
5115 if (!cpycnt)
5116 break;
5118 /* Ensure that updates to the rx_std_buffers ring and the
5119 * shadowed hardware producer ring from tg3_recycle_skb() are
5120 * ordered correctly WRT the skb check above.
5122 smp_rmb();
5124 memcpy(&dpr->rx_std_buffers[di],
5125 &spr->rx_std_buffers[si],
5126 cpycnt * sizeof(struct ring_info));
5128 for (i = 0; i < cpycnt; i++, di++, si++) {
5129 struct tg3_rx_buffer_desc *sbd, *dbd;
5130 sbd = &spr->rx_std[si];
5131 dbd = &dpr->rx_std[di];
5132 dbd->addr_hi = sbd->addr_hi;
5133 dbd->addr_lo = sbd->addr_lo;
5136 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5137 tp->rx_std_ring_mask;
5138 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5139 tp->rx_std_ring_mask;
5142 while (1) {
5143 src_prod_idx = spr->rx_jmb_prod_idx;
5145 /* Make sure updates to the rx_jmb_buffers[] entries and
5146 * the jumbo producer index are seen in the correct order.
5148 smp_rmb();
5150 if (spr->rx_jmb_cons_idx == src_prod_idx)
5151 break;
5153 if (spr->rx_jmb_cons_idx < src_prod_idx)
5154 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5155 else
5156 cpycnt = tp->rx_jmb_ring_mask + 1 -
5157 spr->rx_jmb_cons_idx;
5159 cpycnt = min(cpycnt,
5160 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5162 si = spr->rx_jmb_cons_idx;
5163 di = dpr->rx_jmb_prod_idx;
5165 for (i = di; i < di + cpycnt; i++) {
5166 if (dpr->rx_jmb_buffers[i].skb) {
5167 cpycnt = i - di;
5168 err = -ENOSPC;
5169 break;
5173 if (!cpycnt)
5174 break;
5176 /* Ensure that updates to the rx_jmb_buffers ring and the
5177 * shadowed hardware producer ring from tg3_recycle_skb() are
5178 * ordered correctly WRT the skb check above.
5180 smp_rmb();
5182 memcpy(&dpr->rx_jmb_buffers[di],
5183 &spr->rx_jmb_buffers[si],
5184 cpycnt * sizeof(struct ring_info));
5186 for (i = 0; i < cpycnt; i++, di++, si++) {
5187 struct tg3_rx_buffer_desc *sbd, *dbd;
5188 sbd = &spr->rx_jmb[si].std;
5189 dbd = &dpr->rx_jmb[di].std;
5190 dbd->addr_hi = sbd->addr_hi;
5191 dbd->addr_lo = sbd->addr_lo;
5194 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5195 tp->rx_jmb_ring_mask;
5196 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5197 tp->rx_jmb_ring_mask;
5200 return err;
5203 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5205 struct tg3 *tp = tnapi->tp;
5207 /* run TX completion thread */
5208 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5209 tg3_tx(tnapi);
5210 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5211 return work_done;
5214 /* run RX thread, within the bounds set by NAPI.
5215 * All RX "locking" is done by ensuring outside
5216 * code synchronizes with tg3->napi.poll()
5218 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5219 work_done += tg3_rx(tnapi, budget - work_done);
5221 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5222 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5223 int i, err = 0;
5224 u32 std_prod_idx = dpr->rx_std_prod_idx;
5225 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5227 for (i = 1; i < tp->irq_cnt; i++)
5228 err |= tg3_rx_prodring_xfer(tp, dpr,
5229 &tp->napi[i].prodring);
5231 wmb();
5233 if (std_prod_idx != dpr->rx_std_prod_idx)
5234 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5235 dpr->rx_std_prod_idx);
5237 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5238 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5239 dpr->rx_jmb_prod_idx);
5241 mmiowb();
5243 if (err)
5244 tw32_f(HOSTCC_MODE, tp->coal_now);
5247 return work_done;
5250 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5252 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5253 struct tg3 *tp = tnapi->tp;
5254 int work_done = 0;
5255 struct tg3_hw_status *sblk = tnapi->hw_status;
5257 while (1) {
5258 work_done = tg3_poll_work(tnapi, work_done, budget);
5260 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5261 goto tx_recovery;
5263 if (unlikely(work_done >= budget))
5264 break;
5266 /* tp->last_tag is used in tg3_int_reenable() below
5267 * to tell the hw how much work has been processed,
5268 * so we must read it before checking for more work.
5270 tnapi->last_tag = sblk->status_tag;
5271 tnapi->last_irq_tag = tnapi->last_tag;
5272 rmb();
5274 /* check for RX/TX work to do */
5275 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5276 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5277 napi_complete(napi);
5278 /* Reenable interrupts. */
5279 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5280 mmiowb();
5281 break;
5285 return work_done;
5287 tx_recovery:
5288 /* work_done is guaranteed to be less than budget. */
5289 napi_complete(napi);
5290 schedule_work(&tp->reset_task);
5291 return work_done;
5294 static void tg3_process_error(struct tg3 *tp)
5296 u32 val;
5297 bool real_error = false;
5299 if (tg3_flag(tp, ERROR_PROCESSED))
5300 return;
5302 /* Check Flow Attention register */
5303 val = tr32(HOSTCC_FLOW_ATTN);
5304 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5305 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
5306 real_error = true;
5309 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5310 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
5311 real_error = true;
5314 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5315 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
5316 real_error = true;
5319 if (!real_error)
5320 return;
5322 tg3_dump_state(tp);
5324 tg3_flag_set(tp, ERROR_PROCESSED);
5325 schedule_work(&tp->reset_task);
5328 static int tg3_poll(struct napi_struct *napi, int budget)
5330 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5331 struct tg3 *tp = tnapi->tp;
5332 int work_done = 0;
5333 struct tg3_hw_status *sblk = tnapi->hw_status;
5335 while (1) {
5336 if (sblk->status & SD_STATUS_ERROR)
5337 tg3_process_error(tp);
5339 tg3_poll_link(tp);
5341 work_done = tg3_poll_work(tnapi, work_done, budget);
5343 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5344 goto tx_recovery;
5346 if (unlikely(work_done >= budget))
5347 break;
5349 if (tg3_flag(tp, TAGGED_STATUS)) {
5350 /* tp->last_tag is used in tg3_int_reenable() below
5351 * to tell the hw how much work has been processed,
5352 * so we must read it before checking for more work.
5354 tnapi->last_tag = sblk->status_tag;
5355 tnapi->last_irq_tag = tnapi->last_tag;
5356 rmb();
5357 } else
5358 sblk->status &= ~SD_STATUS_UPDATED;
5360 if (likely(!tg3_has_work(tnapi))) {
5361 napi_complete(napi);
5362 tg3_int_reenable(tnapi);
5363 break;
5367 return work_done;
5369 tx_recovery:
5370 /* work_done is guaranteed to be less than budget. */
5371 napi_complete(napi);
5372 schedule_work(&tp->reset_task);
5373 return work_done;
5376 static void tg3_napi_disable(struct tg3 *tp)
5378 int i;
5380 for (i = tp->irq_cnt - 1; i >= 0; i--)
5381 napi_disable(&tp->napi[i].napi);
5384 static void tg3_napi_enable(struct tg3 *tp)
5386 int i;
5388 for (i = 0; i < tp->irq_cnt; i++)
5389 napi_enable(&tp->napi[i].napi);
5392 static void tg3_napi_init(struct tg3 *tp)
5394 int i;
5396 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5397 for (i = 1; i < tp->irq_cnt; i++)
5398 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5401 static void tg3_napi_fini(struct tg3 *tp)
5403 int i;
5405 for (i = 0; i < tp->irq_cnt; i++)
5406 netif_napi_del(&tp->napi[i].napi);
5409 static inline void tg3_netif_stop(struct tg3 *tp)
5411 tp->dev->trans_start = jiffies; /* prevent tx timeout */
5412 tg3_napi_disable(tp);
5413 netif_tx_disable(tp->dev);
5416 static inline void tg3_netif_start(struct tg3 *tp)
5418 /* NOTE: unconditional netif_tx_wake_all_queues is only
5419 * appropriate so long as all callers are assured to
5420 * have free tx slots (such as after tg3_init_hw)
5422 netif_tx_wake_all_queues(tp->dev);
5424 tg3_napi_enable(tp);
5425 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5426 tg3_enable_ints(tp);
5429 static void tg3_irq_quiesce(struct tg3 *tp)
5431 int i;
5433 BUG_ON(tp->irq_sync);
5435 tp->irq_sync = 1;
5436 smp_mb();
5438 for (i = 0; i < tp->irq_cnt; i++)
5439 synchronize_irq(tp->napi[i].irq_vec);
5442 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5443 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5444 * with as well. Most of the time, this is not necessary except when
5445 * shutting down the device.
5447 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5449 spin_lock_bh(&tp->lock);
5450 if (irq_sync)
5451 tg3_irq_quiesce(tp);
5454 static inline void tg3_full_unlock(struct tg3 *tp)
5456 spin_unlock_bh(&tp->lock);
5459 /* One-shot MSI handler - Chip automatically disables interrupt
5460 * after sending MSI so driver doesn't have to do it.
5462 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5464 struct tg3_napi *tnapi = dev_id;
5465 struct tg3 *tp = tnapi->tp;
5467 prefetch(tnapi->hw_status);
5468 if (tnapi->rx_rcb)
5469 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5471 if (likely(!tg3_irq_sync(tp)))
5472 napi_schedule(&tnapi->napi);
5474 return IRQ_HANDLED;
5477 /* MSI ISR - No need to check for interrupt sharing and no need to
5478 * flush status block and interrupt mailbox. PCI ordering rules
5479 * guarantee that MSI will arrive after the status block.
5481 static irqreturn_t tg3_msi(int irq, void *dev_id)
5483 struct tg3_napi *tnapi = dev_id;
5484 struct tg3 *tp = tnapi->tp;
5486 prefetch(tnapi->hw_status);
5487 if (tnapi->rx_rcb)
5488 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5490 * Writing any value to intr-mbox-0 clears PCI INTA# and
5491 * chip-internal interrupt pending events.
5492 * Writing non-zero to intr-mbox-0 additional tells the
5493 * NIC to stop sending us irqs, engaging "in-intr-handler"
5494 * event coalescing.
5496 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5497 if (likely(!tg3_irq_sync(tp)))
5498 napi_schedule(&tnapi->napi);
5500 return IRQ_RETVAL(1);
5503 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5505 struct tg3_napi *tnapi = dev_id;
5506 struct tg3 *tp = tnapi->tp;
5507 struct tg3_hw_status *sblk = tnapi->hw_status;
5508 unsigned int handled = 1;
5510 /* In INTx mode, it is possible for the interrupt to arrive at
5511 * the CPU before the status block posted prior to the interrupt.
5512 * Reading the PCI State register will confirm whether the
5513 * interrupt is ours and will flush the status block.
5515 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5516 if (tg3_flag(tp, CHIP_RESETTING) ||
5517 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5518 handled = 0;
5519 goto out;
5524 * Writing any value to intr-mbox-0 clears PCI INTA# and
5525 * chip-internal interrupt pending events.
5526 * Writing non-zero to intr-mbox-0 additional tells the
5527 * NIC to stop sending us irqs, engaging "in-intr-handler"
5528 * event coalescing.
5530 * Flush the mailbox to de-assert the IRQ immediately to prevent
5531 * spurious interrupts. The flush impacts performance but
5532 * excessive spurious interrupts can be worse in some cases.
5534 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5535 if (tg3_irq_sync(tp))
5536 goto out;
5537 sblk->status &= ~SD_STATUS_UPDATED;
5538 if (likely(tg3_has_work(tnapi))) {
5539 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5540 napi_schedule(&tnapi->napi);
5541 } else {
5542 /* No work, shared interrupt perhaps? re-enable
5543 * interrupts, and flush that PCI write
5545 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5546 0x00000000);
5548 out:
5549 return IRQ_RETVAL(handled);
5552 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5554 struct tg3_napi *tnapi = dev_id;
5555 struct tg3 *tp = tnapi->tp;
5556 struct tg3_hw_status *sblk = tnapi->hw_status;
5557 unsigned int handled = 1;
5559 /* In INTx mode, it is possible for the interrupt to arrive at
5560 * the CPU before the status block posted prior to the interrupt.
5561 * Reading the PCI State register will confirm whether the
5562 * interrupt is ours and will flush the status block.
5564 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5565 if (tg3_flag(tp, CHIP_RESETTING) ||
5566 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5567 handled = 0;
5568 goto out;
5573 * writing any value to intr-mbox-0 clears PCI INTA# and
5574 * chip-internal interrupt pending events.
5575 * writing non-zero to intr-mbox-0 additional tells the
5576 * NIC to stop sending us irqs, engaging "in-intr-handler"
5577 * event coalescing.
5579 * Flush the mailbox to de-assert the IRQ immediately to prevent
5580 * spurious interrupts. The flush impacts performance but
5581 * excessive spurious interrupts can be worse in some cases.
5583 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5586 * In a shared interrupt configuration, sometimes other devices'
5587 * interrupts will scream. We record the current status tag here
5588 * so that the above check can report that the screaming interrupts
5589 * are unhandled. Eventually they will be silenced.
5591 tnapi->last_irq_tag = sblk->status_tag;
5593 if (tg3_irq_sync(tp))
5594 goto out;
5596 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5598 napi_schedule(&tnapi->napi);
5600 out:
5601 return IRQ_RETVAL(handled);
5604 /* ISR for interrupt test */
5605 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5607 struct tg3_napi *tnapi = dev_id;
5608 struct tg3 *tp = tnapi->tp;
5609 struct tg3_hw_status *sblk = tnapi->hw_status;
5611 if ((sblk->status & SD_STATUS_UPDATED) ||
5612 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5613 tg3_disable_ints(tp);
5614 return IRQ_RETVAL(1);
5616 return IRQ_RETVAL(0);
5619 static int tg3_init_hw(struct tg3 *, int);
5620 static int tg3_halt(struct tg3 *, int, int);
5622 /* Restart hardware after configuration changes, self-test, etc.
5623 * Invoked with tp->lock held.
5625 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5626 __releases(tp->lock)
5627 __acquires(tp->lock)
5629 int err;
5631 err = tg3_init_hw(tp, reset_phy);
5632 if (err) {
5633 netdev_err(tp->dev,
5634 "Failed to re-initialize device, aborting\n");
5635 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5636 tg3_full_unlock(tp);
5637 del_timer_sync(&tp->timer);
5638 tp->irq_sync = 0;
5639 tg3_napi_enable(tp);
5640 dev_close(tp->dev);
5641 tg3_full_lock(tp, 0);
5643 return err;
5646 #ifdef CONFIG_NET_POLL_CONTROLLER
5647 static void tg3_poll_controller(struct net_device *dev)
5649 int i;
5650 struct tg3 *tp = netdev_priv(dev);
5652 for (i = 0; i < tp->irq_cnt; i++)
5653 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5655 #endif
5657 static void tg3_reset_task(struct work_struct *work)
5659 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5660 int err;
5661 unsigned int restart_timer;
5663 tg3_full_lock(tp, 0);
5665 if (!netif_running(tp->dev)) {
5666 tg3_full_unlock(tp);
5667 return;
5670 tg3_full_unlock(tp);
5672 tg3_phy_stop(tp);
5674 tg3_netif_stop(tp);
5676 tg3_full_lock(tp, 1);
5678 restart_timer = tg3_flag(tp, RESTART_TIMER);
5679 tg3_flag_clear(tp, RESTART_TIMER);
5681 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5682 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5683 tp->write32_rx_mbox = tg3_write_flush_reg32;
5684 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5685 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5688 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5689 err = tg3_init_hw(tp, 1);
5690 if (err)
5691 goto out;
5693 tg3_netif_start(tp);
5695 if (restart_timer)
5696 mod_timer(&tp->timer, jiffies + 1);
5698 out:
5699 tg3_full_unlock(tp);
5701 if (!err)
5702 tg3_phy_start(tp);
5705 static void tg3_tx_timeout(struct net_device *dev)
5707 struct tg3 *tp = netdev_priv(dev);
5709 if (netif_msg_tx_err(tp)) {
5710 netdev_err(dev, "transmit timed out, resetting\n");
5711 tg3_dump_state(tp);
5714 schedule_work(&tp->reset_task);
5717 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5718 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5720 u32 base = (u32) mapping & 0xffffffff;
5722 return (base > 0xffffdcc0) && (base + len + 8 < base);
5725 /* Test for DMA addresses > 40-bit */
5726 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5727 int len)
5729 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5730 if (tg3_flag(tp, 40BIT_DMA_BUG))
5731 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5732 return 0;
5733 #else
5734 return 0;
5735 #endif
5738 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5740 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5741 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5742 struct sk_buff *skb, u32 last_plus_one,
5743 u32 *start, u32 base_flags, u32 mss)
5745 struct tg3 *tp = tnapi->tp;
5746 struct sk_buff *new_skb;
5747 dma_addr_t new_addr = 0;
5748 u32 entry = *start;
5749 int i, ret = 0;
5751 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5752 new_skb = skb_copy(skb, GFP_ATOMIC);
5753 else {
5754 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5756 new_skb = skb_copy_expand(skb,
5757 skb_headroom(skb) + more_headroom,
5758 skb_tailroom(skb), GFP_ATOMIC);
5761 if (!new_skb) {
5762 ret = -1;
5763 } else {
5764 /* New SKB is guaranteed to be linear. */
5765 entry = *start;
5766 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5767 PCI_DMA_TODEVICE);
5768 /* Make sure the mapping succeeded */
5769 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5770 ret = -1;
5771 dev_kfree_skb(new_skb);
5772 new_skb = NULL;
5774 /* Make sure new skb does not cross any 4G boundaries.
5775 * Drop the packet if it does.
5777 } else if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
5778 tg3_4g_overflow_test(new_addr, new_skb->len)) {
5779 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5780 PCI_DMA_TODEVICE);
5781 ret = -1;
5782 dev_kfree_skb(new_skb);
5783 new_skb = NULL;
5784 } else {
5785 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5786 base_flags, 1 | (mss << 1));
5787 *start = NEXT_TX(entry);
5791 /* Now clean up the sw ring entries. */
5792 i = 0;
5793 while (entry != last_plus_one) {
5794 int len;
5796 if (i == 0)
5797 len = skb_headlen(skb);
5798 else
5799 len = skb_shinfo(skb)->frags[i-1].size;
5801 pci_unmap_single(tp->pdev,
5802 dma_unmap_addr(&tnapi->tx_buffers[entry],
5803 mapping),
5804 len, PCI_DMA_TODEVICE);
5805 if (i == 0) {
5806 tnapi->tx_buffers[entry].skb = new_skb;
5807 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5808 new_addr);
5809 } else {
5810 tnapi->tx_buffers[entry].skb = NULL;
5812 entry = NEXT_TX(entry);
5813 i++;
5816 dev_kfree_skb(skb);
5818 return ret;
5821 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5822 dma_addr_t mapping, int len, u32 flags,
5823 u32 mss_and_is_end)
5825 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5826 int is_end = (mss_and_is_end & 0x1);
5827 u32 mss = (mss_and_is_end >> 1);
5828 u32 vlan_tag = 0;
5830 if (is_end)
5831 flags |= TXD_FLAG_END;
5832 if (flags & TXD_FLAG_VLAN) {
5833 vlan_tag = flags >> 16;
5834 flags &= 0xffff;
5836 vlan_tag |= (mss << TXD_MSS_SHIFT);
5838 txd->addr_hi = ((u64) mapping >> 32);
5839 txd->addr_lo = ((u64) mapping & 0xffffffff);
5840 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5841 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5844 /* hard_start_xmit for devices that don't have any bugs and
5845 * support TG3_FLAG_HW_TSO_2 and TG3_FLAG_HW_TSO_3 only.
5847 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5848 struct net_device *dev)
5850 struct tg3 *tp = netdev_priv(dev);
5851 u32 len, entry, base_flags, mss;
5852 dma_addr_t mapping;
5853 struct tg3_napi *tnapi;
5854 struct netdev_queue *txq;
5855 unsigned int i, last;
5857 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5858 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5859 if (tg3_flag(tp, ENABLE_TSS))
5860 tnapi++;
5862 /* We are running in BH disabled context with netif_tx_lock
5863 * and TX reclaim runs via tp->napi.poll inside of a software
5864 * interrupt. Furthermore, IRQ processing runs lockless so we have
5865 * no IRQ context deadlocks to worry about either. Rejoice!
5867 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5868 if (!netif_tx_queue_stopped(txq)) {
5869 netif_tx_stop_queue(txq);
5871 /* This is a hard error, log it. */
5872 netdev_err(dev,
5873 "BUG! Tx Ring full when queue awake!\n");
5875 return NETDEV_TX_BUSY;
5878 entry = tnapi->tx_prod;
5879 base_flags = 0;
5880 mss = skb_shinfo(skb)->gso_size;
5881 if (mss) {
5882 int tcp_opt_len, ip_tcp_len;
5883 u32 hdrlen;
5885 if (skb_header_cloned(skb) &&
5886 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5887 dev_kfree_skb(skb);
5888 goto out_unlock;
5891 if (skb_is_gso_v6(skb)) {
5892 hdrlen = skb_headlen(skb) - ETH_HLEN;
5893 } else {
5894 struct iphdr *iph = ip_hdr(skb);
5896 tcp_opt_len = tcp_optlen(skb);
5897 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5899 iph->check = 0;
5900 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5901 hdrlen = ip_tcp_len + tcp_opt_len;
5904 if (tg3_flag(tp, HW_TSO_3)) {
5905 mss |= (hdrlen & 0xc) << 12;
5906 if (hdrlen & 0x10)
5907 base_flags |= 0x00000010;
5908 base_flags |= (hdrlen & 0x3e0) << 5;
5909 } else
5910 mss |= hdrlen << 9;
5912 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5913 TXD_FLAG_CPU_POST_DMA);
5915 tcp_hdr(skb)->check = 0;
5917 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5918 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5921 if (vlan_tx_tag_present(skb))
5922 base_flags |= (TXD_FLAG_VLAN |
5923 (vlan_tx_tag_get(skb) << 16));
5925 len = skb_headlen(skb);
5927 /* Queue skb data, a.k.a. the main skb fragment. */
5928 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5929 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5930 dev_kfree_skb(skb);
5931 goto out_unlock;
5934 tnapi->tx_buffers[entry].skb = skb;
5935 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5937 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
5938 !mss && skb->len > VLAN_ETH_FRAME_LEN)
5939 base_flags |= TXD_FLAG_JMB_PKT;
5941 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5942 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5944 entry = NEXT_TX(entry);
5946 /* Now loop through additional data fragments, and queue them. */
5947 if (skb_shinfo(skb)->nr_frags > 0) {
5948 last = skb_shinfo(skb)->nr_frags - 1;
5949 for (i = 0; i <= last; i++) {
5950 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5952 len = frag->size;
5953 mapping = pci_map_page(tp->pdev,
5954 frag->page,
5955 frag->page_offset,
5956 len, PCI_DMA_TODEVICE);
5957 if (pci_dma_mapping_error(tp->pdev, mapping))
5958 goto dma_error;
5960 tnapi->tx_buffers[entry].skb = NULL;
5961 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5962 mapping);
5964 tg3_set_txd(tnapi, entry, mapping, len,
5965 base_flags, (i == last) | (mss << 1));
5967 entry = NEXT_TX(entry);
5971 /* Packets are ready, update Tx producer idx local and on card. */
5972 tw32_tx_mbox(tnapi->prodmbox, entry);
5974 tnapi->tx_prod = entry;
5975 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5976 netif_tx_stop_queue(txq);
5978 /* netif_tx_stop_queue() must be done before checking
5979 * checking tx index in tg3_tx_avail() below, because in
5980 * tg3_tx(), we update tx index before checking for
5981 * netif_tx_queue_stopped().
5983 smp_mb();
5984 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5985 netif_tx_wake_queue(txq);
5988 out_unlock:
5989 mmiowb();
5991 return NETDEV_TX_OK;
5993 dma_error:
5994 last = i;
5995 entry = tnapi->tx_prod;
5996 tnapi->tx_buffers[entry].skb = NULL;
5997 pci_unmap_single(tp->pdev,
5998 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5999 skb_headlen(skb),
6000 PCI_DMA_TODEVICE);
6001 for (i = 0; i <= last; i++) {
6002 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6003 entry = NEXT_TX(entry);
6005 pci_unmap_page(tp->pdev,
6006 dma_unmap_addr(&tnapi->tx_buffers[entry],
6007 mapping),
6008 frag->size, PCI_DMA_TODEVICE);
6011 dev_kfree_skb(skb);
6012 return NETDEV_TX_OK;
6015 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
6016 struct net_device *);
6018 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6019 * TSO header is greater than 80 bytes.
6021 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6023 struct sk_buff *segs, *nskb;
6024 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6026 /* Estimate the number of fragments in the worst case */
6027 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6028 netif_stop_queue(tp->dev);
6030 /* netif_tx_stop_queue() must be done before checking
6031 * checking tx index in tg3_tx_avail() below, because in
6032 * tg3_tx(), we update tx index before checking for
6033 * netif_tx_queue_stopped().
6035 smp_mb();
6036 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6037 return NETDEV_TX_BUSY;
6039 netif_wake_queue(tp->dev);
6042 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6043 if (IS_ERR(segs))
6044 goto tg3_tso_bug_end;
6046 do {
6047 nskb = segs;
6048 segs = segs->next;
6049 nskb->next = NULL;
6050 tg3_start_xmit_dma_bug(nskb, tp->dev);
6051 } while (segs);
6053 tg3_tso_bug_end:
6054 dev_kfree_skb(skb);
6056 return NETDEV_TX_OK;
6059 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6060 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6062 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
6063 struct net_device *dev)
6065 struct tg3 *tp = netdev_priv(dev);
6066 u32 len, entry, base_flags, mss;
6067 int would_hit_hwbug;
6068 dma_addr_t mapping;
6069 struct tg3_napi *tnapi;
6070 struct netdev_queue *txq;
6071 unsigned int i, last;
6073 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6074 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6075 if (tg3_flag(tp, ENABLE_TSS))
6076 tnapi++;
6078 /* We are running in BH disabled context with netif_tx_lock
6079 * and TX reclaim runs via tp->napi.poll inside of a software
6080 * interrupt. Furthermore, IRQ processing runs lockless so we have
6081 * no IRQ context deadlocks to worry about either. Rejoice!
6083 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
6084 if (!netif_tx_queue_stopped(txq)) {
6085 netif_tx_stop_queue(txq);
6087 /* This is a hard error, log it. */
6088 netdev_err(dev,
6089 "BUG! Tx Ring full when queue awake!\n");
6091 return NETDEV_TX_BUSY;
6094 entry = tnapi->tx_prod;
6095 base_flags = 0;
6096 if (skb->ip_summed == CHECKSUM_PARTIAL)
6097 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6099 mss = skb_shinfo(skb)->gso_size;
6100 if (mss) {
6101 struct iphdr *iph;
6102 u32 tcp_opt_len, hdr_len;
6104 if (skb_header_cloned(skb) &&
6105 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
6106 dev_kfree_skb(skb);
6107 goto out_unlock;
6110 iph = ip_hdr(skb);
6111 tcp_opt_len = tcp_optlen(skb);
6113 if (skb_is_gso_v6(skb)) {
6114 hdr_len = skb_headlen(skb) - ETH_HLEN;
6115 } else {
6116 u32 ip_tcp_len;
6118 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6119 hdr_len = ip_tcp_len + tcp_opt_len;
6121 iph->check = 0;
6122 iph->tot_len = htons(mss + hdr_len);
6125 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6126 tg3_flag(tp, TSO_BUG))
6127 return tg3_tso_bug(tp, skb);
6129 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6130 TXD_FLAG_CPU_POST_DMA);
6132 if (tg3_flag(tp, HW_TSO_1) ||
6133 tg3_flag(tp, HW_TSO_2) ||
6134 tg3_flag(tp, HW_TSO_3)) {
6135 tcp_hdr(skb)->check = 0;
6136 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6137 } else
6138 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6139 iph->daddr, 0,
6140 IPPROTO_TCP,
6143 if (tg3_flag(tp, HW_TSO_3)) {
6144 mss |= (hdr_len & 0xc) << 12;
6145 if (hdr_len & 0x10)
6146 base_flags |= 0x00000010;
6147 base_flags |= (hdr_len & 0x3e0) << 5;
6148 } else if (tg3_flag(tp, HW_TSO_2))
6149 mss |= hdr_len << 9;
6150 else if (tg3_flag(tp, HW_TSO_1) ||
6151 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6152 if (tcp_opt_len || iph->ihl > 5) {
6153 int tsflags;
6155 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6156 mss |= (tsflags << 11);
6158 } else {
6159 if (tcp_opt_len || iph->ihl > 5) {
6160 int tsflags;
6162 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6163 base_flags |= tsflags << 12;
6168 if (vlan_tx_tag_present(skb))
6169 base_flags |= (TXD_FLAG_VLAN |
6170 (vlan_tx_tag_get(skb) << 16));
6172 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6173 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6174 base_flags |= TXD_FLAG_JMB_PKT;
6176 len = skb_headlen(skb);
6178 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6179 if (pci_dma_mapping_error(tp->pdev, mapping)) {
6180 dev_kfree_skb(skb);
6181 goto out_unlock;
6184 tnapi->tx_buffers[entry].skb = skb;
6185 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6187 would_hit_hwbug = 0;
6189 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6190 would_hit_hwbug = 1;
6192 if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6193 tg3_4g_overflow_test(mapping, len))
6194 would_hit_hwbug = 1;
6196 if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6197 tg3_40bit_overflow_test(tp, mapping, len))
6198 would_hit_hwbug = 1;
6200 if (tg3_flag(tp, 5701_DMA_BUG))
6201 would_hit_hwbug = 1;
6203 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6204 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6206 entry = NEXT_TX(entry);
6208 /* Now loop through additional data fragments, and queue them. */
6209 if (skb_shinfo(skb)->nr_frags > 0) {
6210 last = skb_shinfo(skb)->nr_frags - 1;
6211 for (i = 0; i <= last; i++) {
6212 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6214 len = frag->size;
6215 mapping = pci_map_page(tp->pdev,
6216 frag->page,
6217 frag->page_offset,
6218 len, PCI_DMA_TODEVICE);
6220 tnapi->tx_buffers[entry].skb = NULL;
6221 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6222 mapping);
6223 if (pci_dma_mapping_error(tp->pdev, mapping))
6224 goto dma_error;
6226 if (tg3_flag(tp, SHORT_DMA_BUG) &&
6227 len <= 8)
6228 would_hit_hwbug = 1;
6230 if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6231 tg3_4g_overflow_test(mapping, len))
6232 would_hit_hwbug = 1;
6234 if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6235 tg3_40bit_overflow_test(tp, mapping, len))
6236 would_hit_hwbug = 1;
6238 if (tg3_flag(tp, HW_TSO_1) ||
6239 tg3_flag(tp, HW_TSO_2) ||
6240 tg3_flag(tp, HW_TSO_3))
6241 tg3_set_txd(tnapi, entry, mapping, len,
6242 base_flags, (i == last)|(mss << 1));
6243 else
6244 tg3_set_txd(tnapi, entry, mapping, len,
6245 base_flags, (i == last));
6247 entry = NEXT_TX(entry);
6251 if (would_hit_hwbug) {
6252 u32 last_plus_one = entry;
6253 u32 start;
6255 start = entry - 1 - skb_shinfo(skb)->nr_frags;
6256 start &= (TG3_TX_RING_SIZE - 1);
6258 /* If the workaround fails due to memory/mapping
6259 * failure, silently drop this packet.
6261 if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one,
6262 &start, base_flags, mss))
6263 goto out_unlock;
6265 entry = start;
6268 /* Packets are ready, update Tx producer idx local and on card. */
6269 tw32_tx_mbox(tnapi->prodmbox, entry);
6271 tnapi->tx_prod = entry;
6272 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6273 netif_tx_stop_queue(txq);
6275 /* netif_tx_stop_queue() must be done before checking
6276 * checking tx index in tg3_tx_avail() below, because in
6277 * tg3_tx(), we update tx index before checking for
6278 * netif_tx_queue_stopped().
6280 smp_mb();
6281 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6282 netif_tx_wake_queue(txq);
6285 out_unlock:
6286 mmiowb();
6288 return NETDEV_TX_OK;
6290 dma_error:
6291 last = i;
6292 entry = tnapi->tx_prod;
6293 tnapi->tx_buffers[entry].skb = NULL;
6294 pci_unmap_single(tp->pdev,
6295 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
6296 skb_headlen(skb),
6297 PCI_DMA_TODEVICE);
6298 for (i = 0; i <= last; i++) {
6299 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6300 entry = NEXT_TX(entry);
6302 pci_unmap_page(tp->pdev,
6303 dma_unmap_addr(&tnapi->tx_buffers[entry],
6304 mapping),
6305 frag->size, PCI_DMA_TODEVICE);
6308 dev_kfree_skb(skb);
6309 return NETDEV_TX_OK;
6312 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6314 struct tg3 *tp = netdev_priv(dev);
6316 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6317 features &= ~NETIF_F_ALL_TSO;
6319 return features;
6322 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6323 int new_mtu)
6325 dev->mtu = new_mtu;
6327 if (new_mtu > ETH_DATA_LEN) {
6328 if (tg3_flag(tp, 5780_CLASS)) {
6329 netdev_update_features(dev);
6330 tg3_flag_clear(tp, TSO_CAPABLE);
6331 } else {
6332 tg3_flag_set(tp, JUMBO_RING_ENABLE);
6334 } else {
6335 if (tg3_flag(tp, 5780_CLASS)) {
6336 tg3_flag_set(tp, TSO_CAPABLE);
6337 netdev_update_features(dev);
6339 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6343 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6345 struct tg3 *tp = netdev_priv(dev);
6346 int err;
6348 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6349 return -EINVAL;
6351 if (!netif_running(dev)) {
6352 /* We'll just catch it later when the
6353 * device is up'd.
6355 tg3_set_mtu(dev, tp, new_mtu);
6356 return 0;
6359 tg3_phy_stop(tp);
6361 tg3_netif_stop(tp);
6363 tg3_full_lock(tp, 1);
6365 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6367 tg3_set_mtu(dev, tp, new_mtu);
6369 err = tg3_restart_hw(tp, 0);
6371 if (!err)
6372 tg3_netif_start(tp);
6374 tg3_full_unlock(tp);
6376 if (!err)
6377 tg3_phy_start(tp);
6379 return err;
6382 static void tg3_rx_prodring_free(struct tg3 *tp,
6383 struct tg3_rx_prodring_set *tpr)
6385 int i;
6387 if (tpr != &tp->napi[0].prodring) {
6388 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6389 i = (i + 1) & tp->rx_std_ring_mask)
6390 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6391 tp->rx_pkt_map_sz);
6393 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6394 for (i = tpr->rx_jmb_cons_idx;
6395 i != tpr->rx_jmb_prod_idx;
6396 i = (i + 1) & tp->rx_jmb_ring_mask) {
6397 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6398 TG3_RX_JMB_MAP_SZ);
6402 return;
6405 for (i = 0; i <= tp->rx_std_ring_mask; i++)
6406 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6407 tp->rx_pkt_map_sz);
6409 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6410 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6411 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6412 TG3_RX_JMB_MAP_SZ);
6416 /* Initialize rx rings for packet processing.
6418 * The chip has been shut down and the driver detached from
6419 * the networking, so no interrupts or new tx packets will
6420 * end up in the driver. tp->{tx,}lock are held and thus
6421 * we may not sleep.
6423 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6424 struct tg3_rx_prodring_set *tpr)
6426 u32 i, rx_pkt_dma_sz;
6428 tpr->rx_std_cons_idx = 0;
6429 tpr->rx_std_prod_idx = 0;
6430 tpr->rx_jmb_cons_idx = 0;
6431 tpr->rx_jmb_prod_idx = 0;
6433 if (tpr != &tp->napi[0].prodring) {
6434 memset(&tpr->rx_std_buffers[0], 0,
6435 TG3_RX_STD_BUFF_RING_SIZE(tp));
6436 if (tpr->rx_jmb_buffers)
6437 memset(&tpr->rx_jmb_buffers[0], 0,
6438 TG3_RX_JMB_BUFF_RING_SIZE(tp));
6439 goto done;
6442 /* Zero out all descriptors. */
6443 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6445 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6446 if (tg3_flag(tp, 5780_CLASS) &&
6447 tp->dev->mtu > ETH_DATA_LEN)
6448 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6449 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6451 /* Initialize invariants of the rings, we only set this
6452 * stuff once. This works because the card does not
6453 * write into the rx buffer posting rings.
6455 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6456 struct tg3_rx_buffer_desc *rxd;
6458 rxd = &tpr->rx_std[i];
6459 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6460 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6461 rxd->opaque = (RXD_OPAQUE_RING_STD |
6462 (i << RXD_OPAQUE_INDEX_SHIFT));
6465 /* Now allocate fresh SKBs for each rx ring. */
6466 for (i = 0; i < tp->rx_pending; i++) {
6467 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6468 netdev_warn(tp->dev,
6469 "Using a smaller RX standard ring. Only "
6470 "%d out of %d buffers were allocated "
6471 "successfully\n", i, tp->rx_pending);
6472 if (i == 0)
6473 goto initfail;
6474 tp->rx_pending = i;
6475 break;
6479 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6480 goto done;
6482 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6484 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6485 goto done;
6487 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6488 struct tg3_rx_buffer_desc *rxd;
6490 rxd = &tpr->rx_jmb[i].std;
6491 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6492 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6493 RXD_FLAG_JUMBO;
6494 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6495 (i << RXD_OPAQUE_INDEX_SHIFT));
6498 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6499 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6500 netdev_warn(tp->dev,
6501 "Using a smaller RX jumbo ring. Only %d "
6502 "out of %d buffers were allocated "
6503 "successfully\n", i, tp->rx_jumbo_pending);
6504 if (i == 0)
6505 goto initfail;
6506 tp->rx_jumbo_pending = i;
6507 break;
6511 done:
6512 return 0;
6514 initfail:
6515 tg3_rx_prodring_free(tp, tpr);
6516 return -ENOMEM;
6519 static void tg3_rx_prodring_fini(struct tg3 *tp,
6520 struct tg3_rx_prodring_set *tpr)
6522 kfree(tpr->rx_std_buffers);
6523 tpr->rx_std_buffers = NULL;
6524 kfree(tpr->rx_jmb_buffers);
6525 tpr->rx_jmb_buffers = NULL;
6526 if (tpr->rx_std) {
6527 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6528 tpr->rx_std, tpr->rx_std_mapping);
6529 tpr->rx_std = NULL;
6531 if (tpr->rx_jmb) {
6532 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6533 tpr->rx_jmb, tpr->rx_jmb_mapping);
6534 tpr->rx_jmb = NULL;
6538 static int tg3_rx_prodring_init(struct tg3 *tp,
6539 struct tg3_rx_prodring_set *tpr)
6541 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6542 GFP_KERNEL);
6543 if (!tpr->rx_std_buffers)
6544 return -ENOMEM;
6546 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6547 TG3_RX_STD_RING_BYTES(tp),
6548 &tpr->rx_std_mapping,
6549 GFP_KERNEL);
6550 if (!tpr->rx_std)
6551 goto err_out;
6553 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6554 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6555 GFP_KERNEL);
6556 if (!tpr->rx_jmb_buffers)
6557 goto err_out;
6559 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6560 TG3_RX_JMB_RING_BYTES(tp),
6561 &tpr->rx_jmb_mapping,
6562 GFP_KERNEL);
6563 if (!tpr->rx_jmb)
6564 goto err_out;
6567 return 0;
6569 err_out:
6570 tg3_rx_prodring_fini(tp, tpr);
6571 return -ENOMEM;
6574 /* Free up pending packets in all rx/tx rings.
6576 * The chip has been shut down and the driver detached from
6577 * the networking, so no interrupts or new tx packets will
6578 * end up in the driver. tp->{tx,}lock is not held and we are not
6579 * in an interrupt context and thus may sleep.
6581 static void tg3_free_rings(struct tg3 *tp)
6583 int i, j;
6585 for (j = 0; j < tp->irq_cnt; j++) {
6586 struct tg3_napi *tnapi = &tp->napi[j];
6588 tg3_rx_prodring_free(tp, &tnapi->prodring);
6590 if (!tnapi->tx_buffers)
6591 continue;
6593 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6594 struct ring_info *txp;
6595 struct sk_buff *skb;
6596 unsigned int k;
6598 txp = &tnapi->tx_buffers[i];
6599 skb = txp->skb;
6601 if (skb == NULL) {
6602 i++;
6603 continue;
6606 pci_unmap_single(tp->pdev,
6607 dma_unmap_addr(txp, mapping),
6608 skb_headlen(skb),
6609 PCI_DMA_TODEVICE);
6610 txp->skb = NULL;
6612 i++;
6614 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6615 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6616 pci_unmap_page(tp->pdev,
6617 dma_unmap_addr(txp, mapping),
6618 skb_shinfo(skb)->frags[k].size,
6619 PCI_DMA_TODEVICE);
6620 i++;
6623 dev_kfree_skb_any(skb);
6628 /* Initialize tx/rx rings for packet processing.
6630 * The chip has been shut down and the driver detached from
6631 * the networking, so no interrupts or new tx packets will
6632 * end up in the driver. tp->{tx,}lock are held and thus
6633 * we may not sleep.
6635 static int tg3_init_rings(struct tg3 *tp)
6637 int i;
6639 /* Free up all the SKBs. */
6640 tg3_free_rings(tp);
6642 for (i = 0; i < tp->irq_cnt; i++) {
6643 struct tg3_napi *tnapi = &tp->napi[i];
6645 tnapi->last_tag = 0;
6646 tnapi->last_irq_tag = 0;
6647 tnapi->hw_status->status = 0;
6648 tnapi->hw_status->status_tag = 0;
6649 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6651 tnapi->tx_prod = 0;
6652 tnapi->tx_cons = 0;
6653 if (tnapi->tx_ring)
6654 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6656 tnapi->rx_rcb_ptr = 0;
6657 if (tnapi->rx_rcb)
6658 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6660 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6661 tg3_free_rings(tp);
6662 return -ENOMEM;
6666 return 0;
6670 * Must not be invoked with interrupt sources disabled and
6671 * the hardware shutdown down.
6673 static void tg3_free_consistent(struct tg3 *tp)
6675 int i;
6677 for (i = 0; i < tp->irq_cnt; i++) {
6678 struct tg3_napi *tnapi = &tp->napi[i];
6680 if (tnapi->tx_ring) {
6681 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6682 tnapi->tx_ring, tnapi->tx_desc_mapping);
6683 tnapi->tx_ring = NULL;
6686 kfree(tnapi->tx_buffers);
6687 tnapi->tx_buffers = NULL;
6689 if (tnapi->rx_rcb) {
6690 dma_free_coherent(&tp->pdev->dev,
6691 TG3_RX_RCB_RING_BYTES(tp),
6692 tnapi->rx_rcb,
6693 tnapi->rx_rcb_mapping);
6694 tnapi->rx_rcb = NULL;
6697 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6699 if (tnapi->hw_status) {
6700 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6701 tnapi->hw_status,
6702 tnapi->status_mapping);
6703 tnapi->hw_status = NULL;
6707 if (tp->hw_stats) {
6708 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6709 tp->hw_stats, tp->stats_mapping);
6710 tp->hw_stats = NULL;
6715 * Must not be invoked with interrupt sources disabled and
6716 * the hardware shutdown down. Can sleep.
6718 static int tg3_alloc_consistent(struct tg3 *tp)
6720 int i;
6722 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6723 sizeof(struct tg3_hw_stats),
6724 &tp->stats_mapping,
6725 GFP_KERNEL);
6726 if (!tp->hw_stats)
6727 goto err_out;
6729 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6731 for (i = 0; i < tp->irq_cnt; i++) {
6732 struct tg3_napi *tnapi = &tp->napi[i];
6733 struct tg3_hw_status *sblk;
6735 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6736 TG3_HW_STATUS_SIZE,
6737 &tnapi->status_mapping,
6738 GFP_KERNEL);
6739 if (!tnapi->hw_status)
6740 goto err_out;
6742 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6743 sblk = tnapi->hw_status;
6745 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6746 goto err_out;
6748 /* If multivector TSS is enabled, vector 0 does not handle
6749 * tx interrupts. Don't allocate any resources for it.
6751 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6752 (i && tg3_flag(tp, ENABLE_TSS))) {
6753 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6754 TG3_TX_RING_SIZE,
6755 GFP_KERNEL);
6756 if (!tnapi->tx_buffers)
6757 goto err_out;
6759 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6760 TG3_TX_RING_BYTES,
6761 &tnapi->tx_desc_mapping,
6762 GFP_KERNEL);
6763 if (!tnapi->tx_ring)
6764 goto err_out;
6768 * When RSS is enabled, the status block format changes
6769 * slightly. The "rx_jumbo_consumer", "reserved",
6770 * and "rx_mini_consumer" members get mapped to the
6771 * other three rx return ring producer indexes.
6773 switch (i) {
6774 default:
6775 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6776 break;
6777 case 2:
6778 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6779 break;
6780 case 3:
6781 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6782 break;
6783 case 4:
6784 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6785 break;
6789 * If multivector RSS is enabled, vector 0 does not handle
6790 * rx or tx interrupts. Don't allocate any resources for it.
6792 if (!i && tg3_flag(tp, ENABLE_RSS))
6793 continue;
6795 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6796 TG3_RX_RCB_RING_BYTES(tp),
6797 &tnapi->rx_rcb_mapping,
6798 GFP_KERNEL);
6799 if (!tnapi->rx_rcb)
6800 goto err_out;
6802 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6805 return 0;
6807 err_out:
6808 tg3_free_consistent(tp);
6809 return -ENOMEM;
6812 #define MAX_WAIT_CNT 1000
6814 /* To stop a block, clear the enable bit and poll till it
6815 * clears. tp->lock is held.
6817 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6819 unsigned int i;
6820 u32 val;
6822 if (tg3_flag(tp, 5705_PLUS)) {
6823 switch (ofs) {
6824 case RCVLSC_MODE:
6825 case DMAC_MODE:
6826 case MBFREE_MODE:
6827 case BUFMGR_MODE:
6828 case MEMARB_MODE:
6829 /* We can't enable/disable these bits of the
6830 * 5705/5750, just say success.
6832 return 0;
6834 default:
6835 break;
6839 val = tr32(ofs);
6840 val &= ~enable_bit;
6841 tw32_f(ofs, val);
6843 for (i = 0; i < MAX_WAIT_CNT; i++) {
6844 udelay(100);
6845 val = tr32(ofs);
6846 if ((val & enable_bit) == 0)
6847 break;
6850 if (i == MAX_WAIT_CNT && !silent) {
6851 dev_err(&tp->pdev->dev,
6852 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6853 ofs, enable_bit);
6854 return -ENODEV;
6857 return 0;
6860 /* tp->lock is held. */
6861 static int tg3_abort_hw(struct tg3 *tp, int silent)
6863 int i, err;
6865 tg3_disable_ints(tp);
6867 tp->rx_mode &= ~RX_MODE_ENABLE;
6868 tw32_f(MAC_RX_MODE, tp->rx_mode);
6869 udelay(10);
6871 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6872 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6873 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6874 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6875 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6876 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6878 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6879 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6880 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6881 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6882 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6883 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6884 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6886 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6887 tw32_f(MAC_MODE, tp->mac_mode);
6888 udelay(40);
6890 tp->tx_mode &= ~TX_MODE_ENABLE;
6891 tw32_f(MAC_TX_MODE, tp->tx_mode);
6893 for (i = 0; i < MAX_WAIT_CNT; i++) {
6894 udelay(100);
6895 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6896 break;
6898 if (i >= MAX_WAIT_CNT) {
6899 dev_err(&tp->pdev->dev,
6900 "%s timed out, TX_MODE_ENABLE will not clear "
6901 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6902 err |= -ENODEV;
6905 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6906 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6907 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6909 tw32(FTQ_RESET, 0xffffffff);
6910 tw32(FTQ_RESET, 0x00000000);
6912 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6913 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6915 for (i = 0; i < tp->irq_cnt; i++) {
6916 struct tg3_napi *tnapi = &tp->napi[i];
6917 if (tnapi->hw_status)
6918 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6920 if (tp->hw_stats)
6921 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6923 return err;
6926 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6928 int i;
6929 u32 apedata;
6931 /* NCSI does not support APE events */
6932 if (tg3_flag(tp, APE_HAS_NCSI))
6933 return;
6935 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6936 if (apedata != APE_SEG_SIG_MAGIC)
6937 return;
6939 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6940 if (!(apedata & APE_FW_STATUS_READY))
6941 return;
6943 /* Wait for up to 1 millisecond for APE to service previous event. */
6944 for (i = 0; i < 10; i++) {
6945 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6946 return;
6948 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6950 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6951 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6952 event | APE_EVENT_STATUS_EVENT_PENDING);
6954 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6956 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6957 break;
6959 udelay(100);
6962 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6963 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6966 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6968 u32 event;
6969 u32 apedata;
6971 if (!tg3_flag(tp, ENABLE_APE))
6972 return;
6974 switch (kind) {
6975 case RESET_KIND_INIT:
6976 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6977 APE_HOST_SEG_SIG_MAGIC);
6978 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6979 APE_HOST_SEG_LEN_MAGIC);
6980 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6981 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6982 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6983 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6984 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6985 APE_HOST_BEHAV_NO_PHYLOCK);
6986 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6987 TG3_APE_HOST_DRVR_STATE_START);
6989 event = APE_EVENT_STATUS_STATE_START;
6990 break;
6991 case RESET_KIND_SHUTDOWN:
6992 /* With the interface we are currently using,
6993 * APE does not track driver state. Wiping
6994 * out the HOST SEGMENT SIGNATURE forces
6995 * the APE to assume OS absent status.
6997 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6999 if (device_may_wakeup(&tp->pdev->dev) &&
7000 tg3_flag(tp, WOL_ENABLE)) {
7001 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
7002 TG3_APE_HOST_WOL_SPEED_AUTO);
7003 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
7004 } else
7005 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
7007 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
7009 event = APE_EVENT_STATUS_STATE_UNLOAD;
7010 break;
7011 case RESET_KIND_SUSPEND:
7012 event = APE_EVENT_STATUS_STATE_SUSPEND;
7013 break;
7014 default:
7015 return;
7018 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
7020 tg3_ape_send_event(tp, event);
7023 /* tp->lock is held. */
7024 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
7026 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
7027 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
7029 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7030 switch (kind) {
7031 case RESET_KIND_INIT:
7032 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7033 DRV_STATE_START);
7034 break;
7036 case RESET_KIND_SHUTDOWN:
7037 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7038 DRV_STATE_UNLOAD);
7039 break;
7041 case RESET_KIND_SUSPEND:
7042 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7043 DRV_STATE_SUSPEND);
7044 break;
7046 default:
7047 break;
7051 if (kind == RESET_KIND_INIT ||
7052 kind == RESET_KIND_SUSPEND)
7053 tg3_ape_driver_state_change(tp, kind);
7056 /* tp->lock is held. */
7057 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
7059 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7060 switch (kind) {
7061 case RESET_KIND_INIT:
7062 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7063 DRV_STATE_START_DONE);
7064 break;
7066 case RESET_KIND_SHUTDOWN:
7067 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7068 DRV_STATE_UNLOAD_DONE);
7069 break;
7071 default:
7072 break;
7076 if (kind == RESET_KIND_SHUTDOWN)
7077 tg3_ape_driver_state_change(tp, kind);
7080 /* tp->lock is held. */
7081 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
7083 if (tg3_flag(tp, ENABLE_ASF)) {
7084 switch (kind) {
7085 case RESET_KIND_INIT:
7086 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7087 DRV_STATE_START);
7088 break;
7090 case RESET_KIND_SHUTDOWN:
7091 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7092 DRV_STATE_UNLOAD);
7093 break;
7095 case RESET_KIND_SUSPEND:
7096 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7097 DRV_STATE_SUSPEND);
7098 break;
7100 default:
7101 break;
7106 static int tg3_poll_fw(struct tg3 *tp)
7108 int i;
7109 u32 val;
7111 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7112 /* Wait up to 20ms for init done. */
7113 for (i = 0; i < 200; i++) {
7114 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
7115 return 0;
7116 udelay(100);
7118 return -ENODEV;
7121 /* Wait for firmware initialization to complete. */
7122 for (i = 0; i < 100000; i++) {
7123 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
7124 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
7125 break;
7126 udelay(10);
7129 /* Chip might not be fitted with firmware. Some Sun onboard
7130 * parts are configured like that. So don't signal the timeout
7131 * of the above loop as an error, but do report the lack of
7132 * running firmware once.
7134 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
7135 tg3_flag_set(tp, NO_FWARE_REPORTED);
7137 netdev_info(tp->dev, "No firmware running\n");
7140 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7141 /* The 57765 A0 needs a little more
7142 * time to do some important work.
7144 mdelay(10);
7147 return 0;
7150 /* Save PCI command register before chip reset */
7151 static void tg3_save_pci_state(struct tg3 *tp)
7153 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7156 /* Restore PCI state after chip reset */
7157 static void tg3_restore_pci_state(struct tg3 *tp)
7159 u32 val;
7161 /* Re-enable indirect register accesses. */
7162 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7163 tp->misc_host_ctrl);
7165 /* Set MAX PCI retry to zero. */
7166 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7167 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7168 tg3_flag(tp, PCIX_MODE))
7169 val |= PCISTATE_RETRY_SAME_DMA;
7170 /* Allow reads and writes to the APE register and memory space. */
7171 if (tg3_flag(tp, ENABLE_APE))
7172 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7173 PCISTATE_ALLOW_APE_SHMEM_WR |
7174 PCISTATE_ALLOW_APE_PSPACE_WR;
7175 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7177 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7179 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7180 if (tg3_flag(tp, PCI_EXPRESS))
7181 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7182 else {
7183 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7184 tp->pci_cacheline_sz);
7185 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7186 tp->pci_lat_timer);
7190 /* Make sure PCI-X relaxed ordering bit is clear. */
7191 if (tg3_flag(tp, PCIX_MODE)) {
7192 u16 pcix_cmd;
7194 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7195 &pcix_cmd);
7196 pcix_cmd &= ~PCI_X_CMD_ERO;
7197 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7198 pcix_cmd);
7201 if (tg3_flag(tp, 5780_CLASS)) {
7203 /* Chip reset on 5780 will reset MSI enable bit,
7204 * so need to restore it.
7206 if (tg3_flag(tp, USING_MSI)) {
7207 u16 ctrl;
7209 pci_read_config_word(tp->pdev,
7210 tp->msi_cap + PCI_MSI_FLAGS,
7211 &ctrl);
7212 pci_write_config_word(tp->pdev,
7213 tp->msi_cap + PCI_MSI_FLAGS,
7214 ctrl | PCI_MSI_FLAGS_ENABLE);
7215 val = tr32(MSGINT_MODE);
7216 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7221 static void tg3_stop_fw(struct tg3 *);
7223 /* tp->lock is held. */
7224 static int tg3_chip_reset(struct tg3 *tp)
7226 u32 val;
7227 void (*write_op)(struct tg3 *, u32, u32);
7228 int i, err;
7230 tg3_nvram_lock(tp);
7232 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7234 /* No matching tg3_nvram_unlock() after this because
7235 * chip reset below will undo the nvram lock.
7237 tp->nvram_lock_cnt = 0;
7239 /* GRC_MISC_CFG core clock reset will clear the memory
7240 * enable bit in PCI register 4 and the MSI enable bit
7241 * on some chips, so we save relevant registers here.
7243 tg3_save_pci_state(tp);
7245 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7246 tg3_flag(tp, 5755_PLUS))
7247 tw32(GRC_FASTBOOT_PC, 0);
7250 * We must avoid the readl() that normally takes place.
7251 * It locks machines, causes machine checks, and other
7252 * fun things. So, temporarily disable the 5701
7253 * hardware workaround, while we do the reset.
7255 write_op = tp->write32;
7256 if (write_op == tg3_write_flush_reg32)
7257 tp->write32 = tg3_write32;
7259 /* Prevent the irq handler from reading or writing PCI registers
7260 * during chip reset when the memory enable bit in the PCI command
7261 * register may be cleared. The chip does not generate interrupt
7262 * at this time, but the irq handler may still be called due to irq
7263 * sharing or irqpoll.
7265 tg3_flag_set(tp, CHIP_RESETTING);
7266 for (i = 0; i < tp->irq_cnt; i++) {
7267 struct tg3_napi *tnapi = &tp->napi[i];
7268 if (tnapi->hw_status) {
7269 tnapi->hw_status->status = 0;
7270 tnapi->hw_status->status_tag = 0;
7272 tnapi->last_tag = 0;
7273 tnapi->last_irq_tag = 0;
7275 smp_mb();
7277 for (i = 0; i < tp->irq_cnt; i++)
7278 synchronize_irq(tp->napi[i].irq_vec);
7280 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7281 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7282 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7285 /* do the reset */
7286 val = GRC_MISC_CFG_CORECLK_RESET;
7288 if (tg3_flag(tp, PCI_EXPRESS)) {
7289 /* Force PCIe 1.0a mode */
7290 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7291 !tg3_flag(tp, 57765_PLUS) &&
7292 tr32(TG3_PCIE_PHY_TSTCTL) ==
7293 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7294 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7296 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7297 tw32(GRC_MISC_CFG, (1 << 29));
7298 val |= (1 << 29);
7302 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7303 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7304 tw32(GRC_VCPU_EXT_CTRL,
7305 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7308 /* Manage gphy power for all CPMU absent PCIe devices. */
7309 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7310 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7312 tw32(GRC_MISC_CFG, val);
7314 /* restore 5701 hardware bug workaround write method */
7315 tp->write32 = write_op;
7317 /* Unfortunately, we have to delay before the PCI read back.
7318 * Some 575X chips even will not respond to a PCI cfg access
7319 * when the reset command is given to the chip.
7321 * How do these hardware designers expect things to work
7322 * properly if the PCI write is posted for a long period
7323 * of time? It is always necessary to have some method by
7324 * which a register read back can occur to push the write
7325 * out which does the reset.
7327 * For most tg3 variants the trick below was working.
7328 * Ho hum...
7330 udelay(120);
7332 /* Flush PCI posted writes. The normal MMIO registers
7333 * are inaccessible at this time so this is the only
7334 * way to make this reliably (actually, this is no longer
7335 * the case, see above). I tried to use indirect
7336 * register read/write but this upset some 5701 variants.
7338 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7340 udelay(120);
7342 if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) {
7343 u16 val16;
7345 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7346 int i;
7347 u32 cfg_val;
7349 /* Wait for link training to complete. */
7350 for (i = 0; i < 5000; i++)
7351 udelay(100);
7353 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7354 pci_write_config_dword(tp->pdev, 0xc4,
7355 cfg_val | (1 << 15));
7358 /* Clear the "no snoop" and "relaxed ordering" bits. */
7359 pci_read_config_word(tp->pdev,
7360 tp->pcie_cap + PCI_EXP_DEVCTL,
7361 &val16);
7362 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7363 PCI_EXP_DEVCTL_NOSNOOP_EN);
7365 * Older PCIe devices only support the 128 byte
7366 * MPS setting. Enforce the restriction.
7368 if (!tg3_flag(tp, CPMU_PRESENT))
7369 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7370 pci_write_config_word(tp->pdev,
7371 tp->pcie_cap + PCI_EXP_DEVCTL,
7372 val16);
7374 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7376 /* Clear error status */
7377 pci_write_config_word(tp->pdev,
7378 tp->pcie_cap + PCI_EXP_DEVSTA,
7379 PCI_EXP_DEVSTA_CED |
7380 PCI_EXP_DEVSTA_NFED |
7381 PCI_EXP_DEVSTA_FED |
7382 PCI_EXP_DEVSTA_URD);
7385 tg3_restore_pci_state(tp);
7387 tg3_flag_clear(tp, CHIP_RESETTING);
7388 tg3_flag_clear(tp, ERROR_PROCESSED);
7390 val = 0;
7391 if (tg3_flag(tp, 5780_CLASS))
7392 val = tr32(MEMARB_MODE);
7393 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7395 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7396 tg3_stop_fw(tp);
7397 tw32(0x5000, 0x400);
7400 tw32(GRC_MODE, tp->grc_mode);
7402 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7403 val = tr32(0xc4);
7405 tw32(0xc4, val | (1 << 15));
7408 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7409 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7410 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7411 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7412 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7413 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7416 if (tg3_flag(tp, ENABLE_APE))
7417 tp->mac_mode = MAC_MODE_APE_TX_EN |
7418 MAC_MODE_APE_RX_EN |
7419 MAC_MODE_TDE_ENABLE;
7421 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7422 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7423 val = tp->mac_mode;
7424 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7425 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7426 val = tp->mac_mode;
7427 } else
7428 val = 0;
7430 tw32_f(MAC_MODE, val);
7431 udelay(40);
7433 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7435 err = tg3_poll_fw(tp);
7436 if (err)
7437 return err;
7439 tg3_mdio_start(tp);
7441 if (tg3_flag(tp, PCI_EXPRESS) &&
7442 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7443 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7444 !tg3_flag(tp, 57765_PLUS)) {
7445 val = tr32(0x7c00);
7447 tw32(0x7c00, val | (1 << 25));
7450 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7451 val = tr32(TG3_CPMU_CLCK_ORIDE);
7452 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7455 /* Reprobe ASF enable state. */
7456 tg3_flag_clear(tp, ENABLE_ASF);
7457 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7458 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7459 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7460 u32 nic_cfg;
7462 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7463 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7464 tg3_flag_set(tp, ENABLE_ASF);
7465 tp->last_event_jiffies = jiffies;
7466 if (tg3_flag(tp, 5750_PLUS))
7467 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7471 return 0;
7474 /* tp->lock is held. */
7475 static void tg3_stop_fw(struct tg3 *tp)
7477 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7478 /* Wait for RX cpu to ACK the previous event. */
7479 tg3_wait_for_event_ack(tp);
7481 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7483 tg3_generate_fw_event(tp);
7485 /* Wait for RX cpu to ACK this event. */
7486 tg3_wait_for_event_ack(tp);
7490 /* tp->lock is held. */
7491 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7493 int err;
7495 tg3_stop_fw(tp);
7497 tg3_write_sig_pre_reset(tp, kind);
7499 tg3_abort_hw(tp, silent);
7500 err = tg3_chip_reset(tp);
7502 __tg3_set_mac_addr(tp, 0);
7504 tg3_write_sig_legacy(tp, kind);
7505 tg3_write_sig_post_reset(tp, kind);
7507 if (err)
7508 return err;
7510 return 0;
7513 #define RX_CPU_SCRATCH_BASE 0x30000
7514 #define RX_CPU_SCRATCH_SIZE 0x04000
7515 #define TX_CPU_SCRATCH_BASE 0x34000
7516 #define TX_CPU_SCRATCH_SIZE 0x04000
7518 /* tp->lock is held. */
7519 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7521 int i;
7523 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7525 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7526 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7528 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7529 return 0;
7531 if (offset == RX_CPU_BASE) {
7532 for (i = 0; i < 10000; i++) {
7533 tw32(offset + CPU_STATE, 0xffffffff);
7534 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7535 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7536 break;
7539 tw32(offset + CPU_STATE, 0xffffffff);
7540 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7541 udelay(10);
7542 } else {
7543 for (i = 0; i < 10000; i++) {
7544 tw32(offset + CPU_STATE, 0xffffffff);
7545 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7546 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7547 break;
7551 if (i >= 10000) {
7552 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7553 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7554 return -ENODEV;
7557 /* Clear firmware's nvram arbitration. */
7558 if (tg3_flag(tp, NVRAM))
7559 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7560 return 0;
7563 struct fw_info {
7564 unsigned int fw_base;
7565 unsigned int fw_len;
7566 const __be32 *fw_data;
7569 /* tp->lock is held. */
7570 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7571 int cpu_scratch_size, struct fw_info *info)
7573 int err, lock_err, i;
7574 void (*write_op)(struct tg3 *, u32, u32);
7576 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7577 netdev_err(tp->dev,
7578 "%s: Trying to load TX cpu firmware which is 5705\n",
7579 __func__);
7580 return -EINVAL;
7583 if (tg3_flag(tp, 5705_PLUS))
7584 write_op = tg3_write_mem;
7585 else
7586 write_op = tg3_write_indirect_reg32;
7588 /* It is possible that bootcode is still loading at this point.
7589 * Get the nvram lock first before halting the cpu.
7591 lock_err = tg3_nvram_lock(tp);
7592 err = tg3_halt_cpu(tp, cpu_base);
7593 if (!lock_err)
7594 tg3_nvram_unlock(tp);
7595 if (err)
7596 goto out;
7598 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7599 write_op(tp, cpu_scratch_base + i, 0);
7600 tw32(cpu_base + CPU_STATE, 0xffffffff);
7601 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7602 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7603 write_op(tp, (cpu_scratch_base +
7604 (info->fw_base & 0xffff) +
7605 (i * sizeof(u32))),
7606 be32_to_cpu(info->fw_data[i]));
7608 err = 0;
7610 out:
7611 return err;
7614 /* tp->lock is held. */
7615 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7617 struct fw_info info;
7618 const __be32 *fw_data;
7619 int err, i;
7621 fw_data = (void *)tp->fw->data;
7623 /* Firmware blob starts with version numbers, followed by
7624 start address and length. We are setting complete length.
7625 length = end_address_of_bss - start_address_of_text.
7626 Remainder is the blob to be loaded contiguously
7627 from start address. */
7629 info.fw_base = be32_to_cpu(fw_data[1]);
7630 info.fw_len = tp->fw->size - 12;
7631 info.fw_data = &fw_data[3];
7633 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7634 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7635 &info);
7636 if (err)
7637 return err;
7639 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7640 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7641 &info);
7642 if (err)
7643 return err;
7645 /* Now startup only the RX cpu. */
7646 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7647 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7649 for (i = 0; i < 5; i++) {
7650 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7651 break;
7652 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7653 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7654 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7655 udelay(1000);
7657 if (i >= 5) {
7658 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7659 "should be %08x\n", __func__,
7660 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7661 return -ENODEV;
7663 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7664 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7666 return 0;
7669 /* tp->lock is held. */
7670 static int tg3_load_tso_firmware(struct tg3 *tp)
7672 struct fw_info info;
7673 const __be32 *fw_data;
7674 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7675 int err, i;
7677 if (tg3_flag(tp, HW_TSO_1) ||
7678 tg3_flag(tp, HW_TSO_2) ||
7679 tg3_flag(tp, HW_TSO_3))
7680 return 0;
7682 fw_data = (void *)tp->fw->data;
7684 /* Firmware blob starts with version numbers, followed by
7685 start address and length. We are setting complete length.
7686 length = end_address_of_bss - start_address_of_text.
7687 Remainder is the blob to be loaded contiguously
7688 from start address. */
7690 info.fw_base = be32_to_cpu(fw_data[1]);
7691 cpu_scratch_size = tp->fw_len;
7692 info.fw_len = tp->fw->size - 12;
7693 info.fw_data = &fw_data[3];
7695 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7696 cpu_base = RX_CPU_BASE;
7697 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7698 } else {
7699 cpu_base = TX_CPU_BASE;
7700 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7701 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7704 err = tg3_load_firmware_cpu(tp, cpu_base,
7705 cpu_scratch_base, cpu_scratch_size,
7706 &info);
7707 if (err)
7708 return err;
7710 /* Now startup the cpu. */
7711 tw32(cpu_base + CPU_STATE, 0xffffffff);
7712 tw32_f(cpu_base + CPU_PC, info.fw_base);
7714 for (i = 0; i < 5; i++) {
7715 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7716 break;
7717 tw32(cpu_base + CPU_STATE, 0xffffffff);
7718 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7719 tw32_f(cpu_base + CPU_PC, info.fw_base);
7720 udelay(1000);
7722 if (i >= 5) {
7723 netdev_err(tp->dev,
7724 "%s fails to set CPU PC, is %08x should be %08x\n",
7725 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7726 return -ENODEV;
7728 tw32(cpu_base + CPU_STATE, 0xffffffff);
7729 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7730 return 0;
7734 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7736 struct tg3 *tp = netdev_priv(dev);
7737 struct sockaddr *addr = p;
7738 int err = 0, skip_mac_1 = 0;
7740 if (!is_valid_ether_addr(addr->sa_data))
7741 return -EINVAL;
7743 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7745 if (!netif_running(dev))
7746 return 0;
7748 if (tg3_flag(tp, ENABLE_ASF)) {
7749 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7751 addr0_high = tr32(MAC_ADDR_0_HIGH);
7752 addr0_low = tr32(MAC_ADDR_0_LOW);
7753 addr1_high = tr32(MAC_ADDR_1_HIGH);
7754 addr1_low = tr32(MAC_ADDR_1_LOW);
7756 /* Skip MAC addr 1 if ASF is using it. */
7757 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7758 !(addr1_high == 0 && addr1_low == 0))
7759 skip_mac_1 = 1;
7761 spin_lock_bh(&tp->lock);
7762 __tg3_set_mac_addr(tp, skip_mac_1);
7763 spin_unlock_bh(&tp->lock);
7765 return err;
7768 /* tp->lock is held. */
7769 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7770 dma_addr_t mapping, u32 maxlen_flags,
7771 u32 nic_addr)
7773 tg3_write_mem(tp,
7774 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7775 ((u64) mapping >> 32));
7776 tg3_write_mem(tp,
7777 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7778 ((u64) mapping & 0xffffffff));
7779 tg3_write_mem(tp,
7780 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7781 maxlen_flags);
7783 if (!tg3_flag(tp, 5705_PLUS))
7784 tg3_write_mem(tp,
7785 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7786 nic_addr);
7789 static void __tg3_set_rx_mode(struct net_device *);
7790 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7792 int i;
7794 if (!tg3_flag(tp, ENABLE_TSS)) {
7795 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7796 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7797 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7798 } else {
7799 tw32(HOSTCC_TXCOL_TICKS, 0);
7800 tw32(HOSTCC_TXMAX_FRAMES, 0);
7801 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7804 if (!tg3_flag(tp, ENABLE_RSS)) {
7805 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7806 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7807 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7808 } else {
7809 tw32(HOSTCC_RXCOL_TICKS, 0);
7810 tw32(HOSTCC_RXMAX_FRAMES, 0);
7811 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7814 if (!tg3_flag(tp, 5705_PLUS)) {
7815 u32 val = ec->stats_block_coalesce_usecs;
7817 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7818 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7820 if (!netif_carrier_ok(tp->dev))
7821 val = 0;
7823 tw32(HOSTCC_STAT_COAL_TICKS, val);
7826 for (i = 0; i < tp->irq_cnt - 1; i++) {
7827 u32 reg;
7829 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7830 tw32(reg, ec->rx_coalesce_usecs);
7831 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7832 tw32(reg, ec->rx_max_coalesced_frames);
7833 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7834 tw32(reg, ec->rx_max_coalesced_frames_irq);
7836 if (tg3_flag(tp, ENABLE_TSS)) {
7837 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7838 tw32(reg, ec->tx_coalesce_usecs);
7839 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7840 tw32(reg, ec->tx_max_coalesced_frames);
7841 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7842 tw32(reg, ec->tx_max_coalesced_frames_irq);
7846 for (; i < tp->irq_max - 1; i++) {
7847 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7848 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7849 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7851 if (tg3_flag(tp, ENABLE_TSS)) {
7852 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7853 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7854 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7859 /* tp->lock is held. */
7860 static void tg3_rings_reset(struct tg3 *tp)
7862 int i;
7863 u32 stblk, txrcb, rxrcb, limit;
7864 struct tg3_napi *tnapi = &tp->napi[0];
7866 /* Disable all transmit rings but the first. */
7867 if (!tg3_flag(tp, 5705_PLUS))
7868 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7869 else if (tg3_flag(tp, 5717_PLUS))
7870 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7871 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7872 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7873 else
7874 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7876 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7877 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7878 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7879 BDINFO_FLAGS_DISABLED);
7882 /* Disable all receive return rings but the first. */
7883 if (tg3_flag(tp, 5717_PLUS))
7884 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7885 else if (!tg3_flag(tp, 5705_PLUS))
7886 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7887 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7888 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7889 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7890 else
7891 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7893 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7894 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7895 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7896 BDINFO_FLAGS_DISABLED);
7898 /* Disable interrupts */
7899 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7901 /* Zero mailbox registers. */
7902 if (tg3_flag(tp, SUPPORT_MSIX)) {
7903 for (i = 1; i < tp->irq_max; i++) {
7904 tp->napi[i].tx_prod = 0;
7905 tp->napi[i].tx_cons = 0;
7906 if (tg3_flag(tp, ENABLE_TSS))
7907 tw32_mailbox(tp->napi[i].prodmbox, 0);
7908 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7909 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7911 if (!tg3_flag(tp, ENABLE_TSS))
7912 tw32_mailbox(tp->napi[0].prodmbox, 0);
7913 } else {
7914 tp->napi[0].tx_prod = 0;
7915 tp->napi[0].tx_cons = 0;
7916 tw32_mailbox(tp->napi[0].prodmbox, 0);
7917 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7920 /* Make sure the NIC-based send BD rings are disabled. */
7921 if (!tg3_flag(tp, 5705_PLUS)) {
7922 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7923 for (i = 0; i < 16; i++)
7924 tw32_tx_mbox(mbox + i * 8, 0);
7927 txrcb = NIC_SRAM_SEND_RCB;
7928 rxrcb = NIC_SRAM_RCV_RET_RCB;
7930 /* Clear status block in ram. */
7931 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7933 /* Set status block DMA address */
7934 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7935 ((u64) tnapi->status_mapping >> 32));
7936 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7937 ((u64) tnapi->status_mapping & 0xffffffff));
7939 if (tnapi->tx_ring) {
7940 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7941 (TG3_TX_RING_SIZE <<
7942 BDINFO_FLAGS_MAXLEN_SHIFT),
7943 NIC_SRAM_TX_BUFFER_DESC);
7944 txrcb += TG3_BDINFO_SIZE;
7947 if (tnapi->rx_rcb) {
7948 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7949 (tp->rx_ret_ring_mask + 1) <<
7950 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7951 rxrcb += TG3_BDINFO_SIZE;
7954 stblk = HOSTCC_STATBLCK_RING1;
7956 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7957 u64 mapping = (u64)tnapi->status_mapping;
7958 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7959 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7961 /* Clear status block in ram. */
7962 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7964 if (tnapi->tx_ring) {
7965 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7966 (TG3_TX_RING_SIZE <<
7967 BDINFO_FLAGS_MAXLEN_SHIFT),
7968 NIC_SRAM_TX_BUFFER_DESC);
7969 txrcb += TG3_BDINFO_SIZE;
7972 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7973 ((tp->rx_ret_ring_mask + 1) <<
7974 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7976 stblk += 8;
7977 rxrcb += TG3_BDINFO_SIZE;
7981 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7983 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7985 if (!tg3_flag(tp, 5750_PLUS) ||
7986 tg3_flag(tp, 5780_CLASS) ||
7987 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7988 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7989 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
7990 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7991 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7992 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
7993 else
7994 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
7996 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
7997 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
7999 val = min(nic_rep_thresh, host_rep_thresh);
8000 tw32(RCVBDI_STD_THRESH, val);
8002 if (tg3_flag(tp, 57765_PLUS))
8003 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8005 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8006 return;
8008 if (!tg3_flag(tp, 5705_PLUS))
8009 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8010 else
8011 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8013 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8015 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8016 tw32(RCVBDI_JUMBO_THRESH, val);
8018 if (tg3_flag(tp, 57765_PLUS))
8019 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8022 /* tp->lock is held. */
8023 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8025 u32 val, rdmac_mode;
8026 int i, err, limit;
8027 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8029 tg3_disable_ints(tp);
8031 tg3_stop_fw(tp);
8033 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8035 if (tg3_flag(tp, INIT_COMPLETE))
8036 tg3_abort_hw(tp, 1);
8038 /* Enable MAC control of LPI */
8039 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8040 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8041 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8042 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8044 tw32_f(TG3_CPMU_EEE_CTRL,
8045 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8047 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8048 TG3_CPMU_EEEMD_LPI_IN_TX |
8049 TG3_CPMU_EEEMD_LPI_IN_RX |
8050 TG3_CPMU_EEEMD_EEE_ENABLE;
8052 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8053 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8055 if (tg3_flag(tp, ENABLE_APE))
8056 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8058 tw32_f(TG3_CPMU_EEE_MODE, val);
8060 tw32_f(TG3_CPMU_EEE_DBTMR1,
8061 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8062 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8064 tw32_f(TG3_CPMU_EEE_DBTMR2,
8065 TG3_CPMU_DBTMR2_APE_TX_2047US |
8066 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8069 if (reset_phy)
8070 tg3_phy_reset(tp);
8072 err = tg3_chip_reset(tp);
8073 if (err)
8074 return err;
8076 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8078 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8079 val = tr32(TG3_CPMU_CTRL);
8080 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8081 tw32(TG3_CPMU_CTRL, val);
8083 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8084 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8085 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8086 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8088 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8089 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8090 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8091 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8093 val = tr32(TG3_CPMU_HST_ACC);
8094 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8095 val |= CPMU_HST_ACC_MACCLK_6_25;
8096 tw32(TG3_CPMU_HST_ACC, val);
8099 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8100 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8101 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8102 PCIE_PWR_MGMT_L1_THRESH_4MS;
8103 tw32(PCIE_PWR_MGMT_THRESH, val);
8105 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8106 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8108 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8110 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8111 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8114 if (tg3_flag(tp, L1PLLPD_EN)) {
8115 u32 grc_mode = tr32(GRC_MODE);
8117 /* Access the lower 1K of PL PCIE block registers. */
8118 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8119 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8121 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8122 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8123 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8125 tw32(GRC_MODE, grc_mode);
8128 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8129 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8130 u32 grc_mode = tr32(GRC_MODE);
8132 /* Access the lower 1K of PL PCIE block registers. */
8133 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8134 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8136 val = tr32(TG3_PCIE_TLDLPL_PORT +
8137 TG3_PCIE_PL_LO_PHYCTL5);
8138 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8139 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8141 tw32(GRC_MODE, grc_mode);
8144 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8145 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8146 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8147 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8150 /* This works around an issue with Athlon chipsets on
8151 * B3 tigon3 silicon. This bit has no effect on any
8152 * other revision. But do not set this on PCI Express
8153 * chips and don't even touch the clocks if the CPMU is present.
8155 if (!tg3_flag(tp, CPMU_PRESENT)) {
8156 if (!tg3_flag(tp, PCI_EXPRESS))
8157 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8158 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8161 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8162 tg3_flag(tp, PCIX_MODE)) {
8163 val = tr32(TG3PCI_PCISTATE);
8164 val |= PCISTATE_RETRY_SAME_DMA;
8165 tw32(TG3PCI_PCISTATE, val);
8168 if (tg3_flag(tp, ENABLE_APE)) {
8169 /* Allow reads and writes to the
8170 * APE register and memory space.
8172 val = tr32(TG3PCI_PCISTATE);
8173 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8174 PCISTATE_ALLOW_APE_SHMEM_WR |
8175 PCISTATE_ALLOW_APE_PSPACE_WR;
8176 tw32(TG3PCI_PCISTATE, val);
8179 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8180 /* Enable some hw fixes. */
8181 val = tr32(TG3PCI_MSI_DATA);
8182 val |= (1 << 26) | (1 << 28) | (1 << 29);
8183 tw32(TG3PCI_MSI_DATA, val);
8186 /* Descriptor ring init may make accesses to the
8187 * NIC SRAM area to setup the TX descriptors, so we
8188 * can only do this after the hardware has been
8189 * successfully reset.
8191 err = tg3_init_rings(tp);
8192 if (err)
8193 return err;
8195 if (tg3_flag(tp, 57765_PLUS)) {
8196 val = tr32(TG3PCI_DMA_RW_CTRL) &
8197 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8198 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8199 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8200 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8201 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8202 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8203 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8204 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8205 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8206 /* This value is determined during the probe time DMA
8207 * engine test, tg3_test_dma.
8209 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8212 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8213 GRC_MODE_4X_NIC_SEND_RINGS |
8214 GRC_MODE_NO_TX_PHDR_CSUM |
8215 GRC_MODE_NO_RX_PHDR_CSUM);
8216 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8218 /* Pseudo-header checksum is done by hardware logic and not
8219 * the offload processers, so make the chip do the pseudo-
8220 * header checksums on receive. For transmit it is more
8221 * convenient to do the pseudo-header checksum in software
8222 * as Linux does that on transmit for us in all cases.
8224 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8226 tw32(GRC_MODE,
8227 tp->grc_mode |
8228 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8230 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8231 val = tr32(GRC_MISC_CFG);
8232 val &= ~0xff;
8233 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8234 tw32(GRC_MISC_CFG, val);
8236 /* Initialize MBUF/DESC pool. */
8237 if (tg3_flag(tp, 5750_PLUS)) {
8238 /* Do nothing. */
8239 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8240 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8241 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8242 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8243 else
8244 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8245 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8246 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8247 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8248 int fw_len;
8250 fw_len = tp->fw_len;
8251 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8252 tw32(BUFMGR_MB_POOL_ADDR,
8253 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8254 tw32(BUFMGR_MB_POOL_SIZE,
8255 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8258 if (tp->dev->mtu <= ETH_DATA_LEN) {
8259 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8260 tp->bufmgr_config.mbuf_read_dma_low_water);
8261 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8262 tp->bufmgr_config.mbuf_mac_rx_low_water);
8263 tw32(BUFMGR_MB_HIGH_WATER,
8264 tp->bufmgr_config.mbuf_high_water);
8265 } else {
8266 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8267 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8268 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8269 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8270 tw32(BUFMGR_MB_HIGH_WATER,
8271 tp->bufmgr_config.mbuf_high_water_jumbo);
8273 tw32(BUFMGR_DMA_LOW_WATER,
8274 tp->bufmgr_config.dma_low_water);
8275 tw32(BUFMGR_DMA_HIGH_WATER,
8276 tp->bufmgr_config.dma_high_water);
8278 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8279 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8280 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8281 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8282 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8283 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8284 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8285 tw32(BUFMGR_MODE, val);
8286 for (i = 0; i < 2000; i++) {
8287 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8288 break;
8289 udelay(10);
8291 if (i >= 2000) {
8292 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8293 return -ENODEV;
8296 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8297 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8299 tg3_setup_rxbd_thresholds(tp);
8301 /* Initialize TG3_BDINFO's at:
8302 * RCVDBDI_STD_BD: standard eth size rx ring
8303 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8304 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8306 * like so:
8307 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8308 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8309 * ring attribute flags
8310 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8312 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8313 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8315 * The size of each ring is fixed in the firmware, but the location is
8316 * configurable.
8318 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8319 ((u64) tpr->rx_std_mapping >> 32));
8320 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8321 ((u64) tpr->rx_std_mapping & 0xffffffff));
8322 if (!tg3_flag(tp, 5717_PLUS))
8323 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8324 NIC_SRAM_RX_BUFFER_DESC);
8326 /* Disable the mini ring */
8327 if (!tg3_flag(tp, 5705_PLUS))
8328 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8329 BDINFO_FLAGS_DISABLED);
8331 /* Program the jumbo buffer descriptor ring control
8332 * blocks on those devices that have them.
8334 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8335 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8337 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8338 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8339 ((u64) tpr->rx_jmb_mapping >> 32));
8340 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8341 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8342 val = TG3_RX_JMB_RING_SIZE(tp) <<
8343 BDINFO_FLAGS_MAXLEN_SHIFT;
8344 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8345 val | BDINFO_FLAGS_USE_EXT_RECV);
8346 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8347 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8348 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8349 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8350 } else {
8351 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8352 BDINFO_FLAGS_DISABLED);
8355 if (tg3_flag(tp, 57765_PLUS)) {
8356 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8357 val = TG3_RX_STD_MAX_SIZE_5700;
8358 else
8359 val = TG3_RX_STD_MAX_SIZE_5717;
8360 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8361 val |= (TG3_RX_STD_DMA_SZ << 2);
8362 } else
8363 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8364 } else
8365 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8367 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8369 tpr->rx_std_prod_idx = tp->rx_pending;
8370 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8372 tpr->rx_jmb_prod_idx =
8373 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8374 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8376 tg3_rings_reset(tp);
8378 /* Initialize MAC address and backoff seed. */
8379 __tg3_set_mac_addr(tp, 0);
8381 /* MTU + ethernet header + FCS + optional VLAN tag */
8382 tw32(MAC_RX_MTU_SIZE,
8383 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8385 /* The slot time is changed by tg3_setup_phy if we
8386 * run at gigabit with half duplex.
8388 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8389 (6 << TX_LENGTHS_IPG_SHIFT) |
8390 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8392 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8393 val |= tr32(MAC_TX_LENGTHS) &
8394 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8395 TX_LENGTHS_CNT_DWN_VAL_MSK);
8397 tw32(MAC_TX_LENGTHS, val);
8399 /* Receive rules. */
8400 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8401 tw32(RCVLPC_CONFIG, 0x0181);
8403 /* Calculate RDMAC_MODE setting early, we need it to determine
8404 * the RCVLPC_STATE_ENABLE mask.
8406 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8407 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8408 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8409 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8410 RDMAC_MODE_LNGREAD_ENAB);
8412 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8413 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8415 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8416 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8417 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8418 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8419 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8420 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8422 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8423 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8424 if (tg3_flag(tp, TSO_CAPABLE) &&
8425 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8426 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8427 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8428 !tg3_flag(tp, IS_5788)) {
8429 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8433 if (tg3_flag(tp, PCI_EXPRESS))
8434 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8436 if (tg3_flag(tp, HW_TSO_1) ||
8437 tg3_flag(tp, HW_TSO_2) ||
8438 tg3_flag(tp, HW_TSO_3))
8439 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8441 if (tg3_flag(tp, HW_TSO_3) ||
8442 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8443 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8444 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8446 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8447 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8449 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8450 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8451 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8452 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8453 tg3_flag(tp, 57765_PLUS)) {
8454 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8455 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8456 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8457 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8458 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8459 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8460 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8461 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8462 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8464 tw32(TG3_RDMA_RSRVCTRL_REG,
8465 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8468 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8469 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8470 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8471 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8472 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8473 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8476 /* Receive/send statistics. */
8477 if (tg3_flag(tp, 5750_PLUS)) {
8478 val = tr32(RCVLPC_STATS_ENABLE);
8479 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8480 tw32(RCVLPC_STATS_ENABLE, val);
8481 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8482 tg3_flag(tp, TSO_CAPABLE)) {
8483 val = tr32(RCVLPC_STATS_ENABLE);
8484 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8485 tw32(RCVLPC_STATS_ENABLE, val);
8486 } else {
8487 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8489 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8490 tw32(SNDDATAI_STATSENAB, 0xffffff);
8491 tw32(SNDDATAI_STATSCTRL,
8492 (SNDDATAI_SCTRL_ENABLE |
8493 SNDDATAI_SCTRL_FASTUPD));
8495 /* Setup host coalescing engine. */
8496 tw32(HOSTCC_MODE, 0);
8497 for (i = 0; i < 2000; i++) {
8498 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8499 break;
8500 udelay(10);
8503 __tg3_set_coalesce(tp, &tp->coal);
8505 if (!tg3_flag(tp, 5705_PLUS)) {
8506 /* Status/statistics block address. See tg3_timer,
8507 * the tg3_periodic_fetch_stats call there, and
8508 * tg3_get_stats to see how this works for 5705/5750 chips.
8510 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8511 ((u64) tp->stats_mapping >> 32));
8512 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8513 ((u64) tp->stats_mapping & 0xffffffff));
8514 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8516 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8518 /* Clear statistics and status block memory areas */
8519 for (i = NIC_SRAM_STATS_BLK;
8520 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8521 i += sizeof(u32)) {
8522 tg3_write_mem(tp, i, 0);
8523 udelay(40);
8527 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8529 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8530 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8531 if (!tg3_flag(tp, 5705_PLUS))
8532 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8534 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8535 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8536 /* reset to prevent losing 1st rx packet intermittently */
8537 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8538 udelay(10);
8541 if (tg3_flag(tp, ENABLE_APE))
8542 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8543 else
8544 tp->mac_mode = 0;
8545 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8546 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8547 if (!tg3_flag(tp, 5705_PLUS) &&
8548 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8549 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8550 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8551 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8552 udelay(40);
8554 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8555 * If TG3_FLAG_IS_NIC is zero, we should read the
8556 * register to preserve the GPIO settings for LOMs. The GPIOs,
8557 * whether used as inputs or outputs, are set by boot code after
8558 * reset.
8560 if (!tg3_flag(tp, IS_NIC)) {
8561 u32 gpio_mask;
8563 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8564 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8565 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8567 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8568 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8569 GRC_LCLCTRL_GPIO_OUTPUT3;
8571 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8572 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8574 tp->grc_local_ctrl &= ~gpio_mask;
8575 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8577 /* GPIO1 must be driven high for eeprom write protect */
8578 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8579 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8580 GRC_LCLCTRL_GPIO_OUTPUT1);
8582 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8583 udelay(100);
8585 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8586 val = tr32(MSGINT_MODE);
8587 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8588 tw32(MSGINT_MODE, val);
8591 if (!tg3_flag(tp, 5705_PLUS)) {
8592 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8593 udelay(40);
8596 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8597 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8598 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8599 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8600 WDMAC_MODE_LNGREAD_ENAB);
8602 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8603 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8604 if (tg3_flag(tp, TSO_CAPABLE) &&
8605 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8606 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8607 /* nothing */
8608 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8609 !tg3_flag(tp, IS_5788)) {
8610 val |= WDMAC_MODE_RX_ACCEL;
8614 /* Enable host coalescing bug fix */
8615 if (tg3_flag(tp, 5755_PLUS))
8616 val |= WDMAC_MODE_STATUS_TAG_FIX;
8618 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8619 val |= WDMAC_MODE_BURST_ALL_DATA;
8621 tw32_f(WDMAC_MODE, val);
8622 udelay(40);
8624 if (tg3_flag(tp, PCIX_MODE)) {
8625 u16 pcix_cmd;
8627 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8628 &pcix_cmd);
8629 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8630 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8631 pcix_cmd |= PCI_X_CMD_READ_2K;
8632 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8633 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8634 pcix_cmd |= PCI_X_CMD_READ_2K;
8636 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8637 pcix_cmd);
8640 tw32_f(RDMAC_MODE, rdmac_mode);
8641 udelay(40);
8643 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8644 if (!tg3_flag(tp, 5705_PLUS))
8645 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8647 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8648 tw32(SNDDATAC_MODE,
8649 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8650 else
8651 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8653 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8654 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8655 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8656 if (tg3_flag(tp, LRG_PROD_RING_CAP))
8657 val |= RCVDBDI_MODE_LRG_RING_SZ;
8658 tw32(RCVDBDI_MODE, val);
8659 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8660 if (tg3_flag(tp, HW_TSO_1) ||
8661 tg3_flag(tp, HW_TSO_2) ||
8662 tg3_flag(tp, HW_TSO_3))
8663 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8664 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8665 if (tg3_flag(tp, ENABLE_TSS))
8666 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8667 tw32(SNDBDI_MODE, val);
8668 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8670 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8671 err = tg3_load_5701_a0_firmware_fix(tp);
8672 if (err)
8673 return err;
8676 if (tg3_flag(tp, TSO_CAPABLE)) {
8677 err = tg3_load_tso_firmware(tp);
8678 if (err)
8679 return err;
8682 tp->tx_mode = TX_MODE_ENABLE;
8684 if (tg3_flag(tp, 5755_PLUS) ||
8685 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8686 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8688 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8689 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8690 tp->tx_mode &= ~val;
8691 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8694 tw32_f(MAC_TX_MODE, tp->tx_mode);
8695 udelay(100);
8697 if (tg3_flag(tp, ENABLE_RSS)) {
8698 u32 reg = MAC_RSS_INDIR_TBL_0;
8699 u8 *ent = (u8 *)&val;
8701 /* Setup the indirection table */
8702 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8703 int idx = i % sizeof(val);
8705 ent[idx] = i % (tp->irq_cnt - 1);
8706 if (idx == sizeof(val) - 1) {
8707 tw32(reg, val);
8708 reg += 4;
8712 /* Setup the "secret" hash key. */
8713 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8714 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8715 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8716 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8717 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8718 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8719 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8720 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8721 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8722 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8725 tp->rx_mode = RX_MODE_ENABLE;
8726 if (tg3_flag(tp, 5755_PLUS))
8727 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8729 if (tg3_flag(tp, ENABLE_RSS))
8730 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8731 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8732 RX_MODE_RSS_IPV6_HASH_EN |
8733 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8734 RX_MODE_RSS_IPV4_HASH_EN |
8735 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8737 tw32_f(MAC_RX_MODE, tp->rx_mode);
8738 udelay(10);
8740 tw32(MAC_LED_CTRL, tp->led_ctrl);
8742 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8743 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8744 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8745 udelay(10);
8747 tw32_f(MAC_RX_MODE, tp->rx_mode);
8748 udelay(10);
8750 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8751 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8752 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8753 /* Set drive transmission level to 1.2V */
8754 /* only if the signal pre-emphasis bit is not set */
8755 val = tr32(MAC_SERDES_CFG);
8756 val &= 0xfffff000;
8757 val |= 0x880;
8758 tw32(MAC_SERDES_CFG, val);
8760 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8761 tw32(MAC_SERDES_CFG, 0x616000);
8764 /* Prevent chip from dropping frames when flow control
8765 * is enabled.
8767 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8768 val = 1;
8769 else
8770 val = 2;
8771 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8773 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8774 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8775 /* Use hardware link auto-negotiation */
8776 tg3_flag_set(tp, HW_AUTONEG);
8779 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8780 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
8781 u32 tmp;
8783 tmp = tr32(SERDES_RX_CTRL);
8784 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8785 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8786 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8787 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8790 if (!tg3_flag(tp, USE_PHYLIB)) {
8791 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8792 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8793 tp->link_config.speed = tp->link_config.orig_speed;
8794 tp->link_config.duplex = tp->link_config.orig_duplex;
8795 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8798 err = tg3_setup_phy(tp, 0);
8799 if (err)
8800 return err;
8802 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8803 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8804 u32 tmp;
8806 /* Clear CRC stats. */
8807 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8808 tg3_writephy(tp, MII_TG3_TEST1,
8809 tmp | MII_TG3_TEST1_CRC_EN);
8810 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8815 __tg3_set_rx_mode(tp->dev);
8817 /* Initialize receive rules. */
8818 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8819 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8820 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8821 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8823 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8824 limit = 8;
8825 else
8826 limit = 16;
8827 if (tg3_flag(tp, ENABLE_ASF))
8828 limit -= 4;
8829 switch (limit) {
8830 case 16:
8831 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8832 case 15:
8833 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8834 case 14:
8835 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8836 case 13:
8837 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8838 case 12:
8839 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8840 case 11:
8841 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8842 case 10:
8843 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8844 case 9:
8845 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8846 case 8:
8847 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8848 case 7:
8849 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8850 case 6:
8851 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8852 case 5:
8853 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8854 case 4:
8855 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8856 case 3:
8857 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8858 case 2:
8859 case 1:
8861 default:
8862 break;
8865 if (tg3_flag(tp, ENABLE_APE))
8866 /* Write our heartbeat update interval to APE. */
8867 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8868 APE_HOST_HEARTBEAT_INT_DISABLE);
8870 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8872 return 0;
8875 /* Called at device open time to get the chip ready for
8876 * packet processing. Invoked with tp->lock held.
8878 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8880 tg3_switch_clocks(tp);
8882 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8884 return tg3_reset_hw(tp, reset_phy);
8887 #define TG3_STAT_ADD32(PSTAT, REG) \
8888 do { u32 __val = tr32(REG); \
8889 (PSTAT)->low += __val; \
8890 if ((PSTAT)->low < __val) \
8891 (PSTAT)->high += 1; \
8892 } while (0)
8894 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8896 struct tg3_hw_stats *sp = tp->hw_stats;
8898 if (!netif_carrier_ok(tp->dev))
8899 return;
8901 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8902 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8903 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8904 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8905 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8906 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8907 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8908 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8909 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8910 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8911 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8912 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8913 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8915 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8916 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8917 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8918 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8919 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8920 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8921 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8922 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8923 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8924 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8925 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8926 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8927 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8928 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8930 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8931 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
8932 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8933 } else {
8934 u32 val = tr32(HOSTCC_FLOW_ATTN);
8935 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8936 if (val) {
8937 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8938 sp->rx_discards.low += val;
8939 if (sp->rx_discards.low < val)
8940 sp->rx_discards.high += 1;
8942 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8944 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8947 static void tg3_timer(unsigned long __opaque)
8949 struct tg3 *tp = (struct tg3 *) __opaque;
8951 if (tp->irq_sync)
8952 goto restart_timer;
8954 spin_lock(&tp->lock);
8956 if (!tg3_flag(tp, TAGGED_STATUS)) {
8957 /* All of this garbage is because when using non-tagged
8958 * IRQ status the mailbox/status_block protocol the chip
8959 * uses with the cpu is race prone.
8961 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8962 tw32(GRC_LOCAL_CTRL,
8963 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8964 } else {
8965 tw32(HOSTCC_MODE, tp->coalesce_mode |
8966 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8969 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8970 tg3_flag_set(tp, RESTART_TIMER);
8971 spin_unlock(&tp->lock);
8972 schedule_work(&tp->reset_task);
8973 return;
8977 /* This part only runs once per second. */
8978 if (!--tp->timer_counter) {
8979 if (tg3_flag(tp, 5705_PLUS))
8980 tg3_periodic_fetch_stats(tp);
8982 if (tp->setlpicnt && !--tp->setlpicnt) {
8983 u32 val = tr32(TG3_CPMU_EEE_MODE);
8984 tw32(TG3_CPMU_EEE_MODE,
8985 val | TG3_CPMU_EEEMD_LPI_ENABLE);
8988 if (tg3_flag(tp, USE_LINKCHG_REG)) {
8989 u32 mac_stat;
8990 int phy_event;
8992 mac_stat = tr32(MAC_STATUS);
8994 phy_event = 0;
8995 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8996 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8997 phy_event = 1;
8998 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8999 phy_event = 1;
9001 if (phy_event)
9002 tg3_setup_phy(tp, 0);
9003 } else if (tg3_flag(tp, POLL_SERDES)) {
9004 u32 mac_stat = tr32(MAC_STATUS);
9005 int need_setup = 0;
9007 if (netif_carrier_ok(tp->dev) &&
9008 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9009 need_setup = 1;
9011 if (!netif_carrier_ok(tp->dev) &&
9012 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9013 MAC_STATUS_SIGNAL_DET))) {
9014 need_setup = 1;
9016 if (need_setup) {
9017 if (!tp->serdes_counter) {
9018 tw32_f(MAC_MODE,
9019 (tp->mac_mode &
9020 ~MAC_MODE_PORT_MODE_MASK));
9021 udelay(40);
9022 tw32_f(MAC_MODE, tp->mac_mode);
9023 udelay(40);
9025 tg3_setup_phy(tp, 0);
9027 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9028 tg3_flag(tp, 5780_CLASS)) {
9029 tg3_serdes_parallel_detect(tp);
9032 tp->timer_counter = tp->timer_multiplier;
9035 /* Heartbeat is only sent once every 2 seconds.
9037 * The heartbeat is to tell the ASF firmware that the host
9038 * driver is still alive. In the event that the OS crashes,
9039 * ASF needs to reset the hardware to free up the FIFO space
9040 * that may be filled with rx packets destined for the host.
9041 * If the FIFO is full, ASF will no longer function properly.
9043 * Unintended resets have been reported on real time kernels
9044 * where the timer doesn't run on time. Netpoll will also have
9045 * same problem.
9047 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9048 * to check the ring condition when the heartbeat is expiring
9049 * before doing the reset. This will prevent most unintended
9050 * resets.
9052 if (!--tp->asf_counter) {
9053 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9054 tg3_wait_for_event_ack(tp);
9056 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9057 FWCMD_NICDRV_ALIVE3);
9058 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9059 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9060 TG3_FW_UPDATE_TIMEOUT_SEC);
9062 tg3_generate_fw_event(tp);
9064 tp->asf_counter = tp->asf_multiplier;
9067 spin_unlock(&tp->lock);
9069 restart_timer:
9070 tp->timer.expires = jiffies + tp->timer_offset;
9071 add_timer(&tp->timer);
9074 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9076 irq_handler_t fn;
9077 unsigned long flags;
9078 char *name;
9079 struct tg3_napi *tnapi = &tp->napi[irq_num];
9081 if (tp->irq_cnt == 1)
9082 name = tp->dev->name;
9083 else {
9084 name = &tnapi->irq_lbl[0];
9085 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9086 name[IFNAMSIZ-1] = 0;
9089 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9090 fn = tg3_msi;
9091 if (tg3_flag(tp, 1SHOT_MSI))
9092 fn = tg3_msi_1shot;
9093 flags = 0;
9094 } else {
9095 fn = tg3_interrupt;
9096 if (tg3_flag(tp, TAGGED_STATUS))
9097 fn = tg3_interrupt_tagged;
9098 flags = IRQF_SHARED;
9101 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9104 static int tg3_test_interrupt(struct tg3 *tp)
9106 struct tg3_napi *tnapi = &tp->napi[0];
9107 struct net_device *dev = tp->dev;
9108 int err, i, intr_ok = 0;
9109 u32 val;
9111 if (!netif_running(dev))
9112 return -ENODEV;
9114 tg3_disable_ints(tp);
9116 free_irq(tnapi->irq_vec, tnapi);
9119 * Turn off MSI one shot mode. Otherwise this test has no
9120 * observable way to know whether the interrupt was delivered.
9122 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9123 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9124 tw32(MSGINT_MODE, val);
9127 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9128 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9129 if (err)
9130 return err;
9132 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9133 tg3_enable_ints(tp);
9135 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9136 tnapi->coal_now);
9138 for (i = 0; i < 5; i++) {
9139 u32 int_mbox, misc_host_ctrl;
9141 int_mbox = tr32_mailbox(tnapi->int_mbox);
9142 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9144 if ((int_mbox != 0) ||
9145 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9146 intr_ok = 1;
9147 break;
9150 msleep(10);
9153 tg3_disable_ints(tp);
9155 free_irq(tnapi->irq_vec, tnapi);
9157 err = tg3_request_irq(tp, 0);
9159 if (err)
9160 return err;
9162 if (intr_ok) {
9163 /* Reenable MSI one shot mode. */
9164 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9165 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9166 tw32(MSGINT_MODE, val);
9168 return 0;
9171 return -EIO;
9174 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9175 * successfully restored
9177 static int tg3_test_msi(struct tg3 *tp)
9179 int err;
9180 u16 pci_cmd;
9182 if (!tg3_flag(tp, USING_MSI))
9183 return 0;
9185 /* Turn off SERR reporting in case MSI terminates with Master
9186 * Abort.
9188 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9189 pci_write_config_word(tp->pdev, PCI_COMMAND,
9190 pci_cmd & ~PCI_COMMAND_SERR);
9192 err = tg3_test_interrupt(tp);
9194 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9196 if (!err)
9197 return 0;
9199 /* other failures */
9200 if (err != -EIO)
9201 return err;
9203 /* MSI test failed, go back to INTx mode */
9204 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9205 "to INTx mode. Please report this failure to the PCI "
9206 "maintainer and include system chipset information\n");
9208 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9210 pci_disable_msi(tp->pdev);
9212 tg3_flag_clear(tp, USING_MSI);
9213 tp->napi[0].irq_vec = tp->pdev->irq;
9215 err = tg3_request_irq(tp, 0);
9216 if (err)
9217 return err;
9219 /* Need to reset the chip because the MSI cycle may have terminated
9220 * with Master Abort.
9222 tg3_full_lock(tp, 1);
9224 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9225 err = tg3_init_hw(tp, 1);
9227 tg3_full_unlock(tp);
9229 if (err)
9230 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9232 return err;
9235 static int tg3_request_firmware(struct tg3 *tp)
9237 const __be32 *fw_data;
9239 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9240 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9241 tp->fw_needed);
9242 return -ENOENT;
9245 fw_data = (void *)tp->fw->data;
9247 /* Firmware blob starts with version numbers, followed by
9248 * start address and _full_ length including BSS sections
9249 * (which must be longer than the actual data, of course
9252 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9253 if (tp->fw_len < (tp->fw->size - 12)) {
9254 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9255 tp->fw_len, tp->fw_needed);
9256 release_firmware(tp->fw);
9257 tp->fw = NULL;
9258 return -EINVAL;
9261 /* We no longer need firmware; we have it. */
9262 tp->fw_needed = NULL;
9263 return 0;
9266 static bool tg3_enable_msix(struct tg3 *tp)
9268 int i, rc, cpus = num_online_cpus();
9269 struct msix_entry msix_ent[tp->irq_max];
9271 if (cpus == 1)
9272 /* Just fallback to the simpler MSI mode. */
9273 return false;
9276 * We want as many rx rings enabled as there are cpus.
9277 * The first MSIX vector only deals with link interrupts, etc,
9278 * so we add one to the number of vectors we are requesting.
9280 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9282 for (i = 0; i < tp->irq_max; i++) {
9283 msix_ent[i].entry = i;
9284 msix_ent[i].vector = 0;
9287 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9288 if (rc < 0) {
9289 return false;
9290 } else if (rc != 0) {
9291 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9292 return false;
9293 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9294 tp->irq_cnt, rc);
9295 tp->irq_cnt = rc;
9298 for (i = 0; i < tp->irq_max; i++)
9299 tp->napi[i].irq_vec = msix_ent[i].vector;
9301 netif_set_real_num_tx_queues(tp->dev, 1);
9302 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9303 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9304 pci_disable_msix(tp->pdev);
9305 return false;
9308 if (tp->irq_cnt > 1) {
9309 tg3_flag_set(tp, ENABLE_RSS);
9311 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9312 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9313 tg3_flag_set(tp, ENABLE_TSS);
9314 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9318 return true;
9321 static void tg3_ints_init(struct tg3 *tp)
9323 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9324 !tg3_flag(tp, TAGGED_STATUS)) {
9325 /* All MSI supporting chips should support tagged
9326 * status. Assert that this is the case.
9328 netdev_warn(tp->dev,
9329 "MSI without TAGGED_STATUS? Not using MSI\n");
9330 goto defcfg;
9333 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9334 tg3_flag_set(tp, USING_MSIX);
9335 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9336 tg3_flag_set(tp, USING_MSI);
9338 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9339 u32 msi_mode = tr32(MSGINT_MODE);
9340 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9341 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9342 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9344 defcfg:
9345 if (!tg3_flag(tp, USING_MSIX)) {
9346 tp->irq_cnt = 1;
9347 tp->napi[0].irq_vec = tp->pdev->irq;
9348 netif_set_real_num_tx_queues(tp->dev, 1);
9349 netif_set_real_num_rx_queues(tp->dev, 1);
9353 static void tg3_ints_fini(struct tg3 *tp)
9355 if (tg3_flag(tp, USING_MSIX))
9356 pci_disable_msix(tp->pdev);
9357 else if (tg3_flag(tp, USING_MSI))
9358 pci_disable_msi(tp->pdev);
9359 tg3_flag_clear(tp, USING_MSI);
9360 tg3_flag_clear(tp, USING_MSIX);
9361 tg3_flag_clear(tp, ENABLE_RSS);
9362 tg3_flag_clear(tp, ENABLE_TSS);
9365 static int tg3_open(struct net_device *dev)
9367 struct tg3 *tp = netdev_priv(dev);
9368 int i, err;
9370 if (tp->fw_needed) {
9371 err = tg3_request_firmware(tp);
9372 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9373 if (err)
9374 return err;
9375 } else if (err) {
9376 netdev_warn(tp->dev, "TSO capability disabled\n");
9377 tg3_flag_clear(tp, TSO_CAPABLE);
9378 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9379 netdev_notice(tp->dev, "TSO capability restored\n");
9380 tg3_flag_set(tp, TSO_CAPABLE);
9384 netif_carrier_off(tp->dev);
9386 err = tg3_power_up(tp);
9387 if (err)
9388 return err;
9390 tg3_full_lock(tp, 0);
9392 tg3_disable_ints(tp);
9393 tg3_flag_clear(tp, INIT_COMPLETE);
9395 tg3_full_unlock(tp);
9398 * Setup interrupts first so we know how
9399 * many NAPI resources to allocate
9401 tg3_ints_init(tp);
9403 /* The placement of this call is tied
9404 * to the setup and use of Host TX descriptors.
9406 err = tg3_alloc_consistent(tp);
9407 if (err)
9408 goto err_out1;
9410 tg3_napi_init(tp);
9412 tg3_napi_enable(tp);
9414 for (i = 0; i < tp->irq_cnt; i++) {
9415 struct tg3_napi *tnapi = &tp->napi[i];
9416 err = tg3_request_irq(tp, i);
9417 if (err) {
9418 for (i--; i >= 0; i--)
9419 free_irq(tnapi->irq_vec, tnapi);
9420 break;
9424 if (err)
9425 goto err_out2;
9427 tg3_full_lock(tp, 0);
9429 err = tg3_init_hw(tp, 1);
9430 if (err) {
9431 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9432 tg3_free_rings(tp);
9433 } else {
9434 if (tg3_flag(tp, TAGGED_STATUS))
9435 tp->timer_offset = HZ;
9436 else
9437 tp->timer_offset = HZ / 10;
9439 BUG_ON(tp->timer_offset > HZ);
9440 tp->timer_counter = tp->timer_multiplier =
9441 (HZ / tp->timer_offset);
9442 tp->asf_counter = tp->asf_multiplier =
9443 ((HZ / tp->timer_offset) * 2);
9445 init_timer(&tp->timer);
9446 tp->timer.expires = jiffies + tp->timer_offset;
9447 tp->timer.data = (unsigned long) tp;
9448 tp->timer.function = tg3_timer;
9451 tg3_full_unlock(tp);
9453 if (err)
9454 goto err_out3;
9456 if (tg3_flag(tp, USING_MSI)) {
9457 err = tg3_test_msi(tp);
9459 if (err) {
9460 tg3_full_lock(tp, 0);
9461 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9462 tg3_free_rings(tp);
9463 tg3_full_unlock(tp);
9465 goto err_out2;
9468 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9469 u32 val = tr32(PCIE_TRANSACTION_CFG);
9471 tw32(PCIE_TRANSACTION_CFG,
9472 val | PCIE_TRANS_CFG_1SHOT_MSI);
9476 tg3_phy_start(tp);
9478 tg3_full_lock(tp, 0);
9480 add_timer(&tp->timer);
9481 tg3_flag_set(tp, INIT_COMPLETE);
9482 tg3_enable_ints(tp);
9484 tg3_full_unlock(tp);
9486 netif_tx_start_all_queues(dev);
9488 return 0;
9490 err_out3:
9491 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9492 struct tg3_napi *tnapi = &tp->napi[i];
9493 free_irq(tnapi->irq_vec, tnapi);
9496 err_out2:
9497 tg3_napi_disable(tp);
9498 tg3_napi_fini(tp);
9499 tg3_free_consistent(tp);
9501 err_out1:
9502 tg3_ints_fini(tp);
9503 return err;
9506 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9507 struct rtnl_link_stats64 *);
9508 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9510 static int tg3_close(struct net_device *dev)
9512 int i;
9513 struct tg3 *tp = netdev_priv(dev);
9515 tg3_napi_disable(tp);
9516 cancel_work_sync(&tp->reset_task);
9518 netif_tx_stop_all_queues(dev);
9520 del_timer_sync(&tp->timer);
9522 tg3_phy_stop(tp);
9524 tg3_full_lock(tp, 1);
9526 tg3_disable_ints(tp);
9528 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9529 tg3_free_rings(tp);
9530 tg3_flag_clear(tp, INIT_COMPLETE);
9532 tg3_full_unlock(tp);
9534 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9535 struct tg3_napi *tnapi = &tp->napi[i];
9536 free_irq(tnapi->irq_vec, tnapi);
9539 tg3_ints_fini(tp);
9541 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9543 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9544 sizeof(tp->estats_prev));
9546 tg3_napi_fini(tp);
9548 tg3_free_consistent(tp);
9550 tg3_power_down(tp);
9552 netif_carrier_off(tp->dev);
9554 return 0;
9557 static inline u64 get_stat64(tg3_stat64_t *val)
9559 return ((u64)val->high << 32) | ((u64)val->low);
9562 static u64 calc_crc_errors(struct tg3 *tp)
9564 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9566 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9567 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9568 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9569 u32 val;
9571 spin_lock_bh(&tp->lock);
9572 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9573 tg3_writephy(tp, MII_TG3_TEST1,
9574 val | MII_TG3_TEST1_CRC_EN);
9575 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9576 } else
9577 val = 0;
9578 spin_unlock_bh(&tp->lock);
9580 tp->phy_crc_errors += val;
9582 return tp->phy_crc_errors;
9585 return get_stat64(&hw_stats->rx_fcs_errors);
9588 #define ESTAT_ADD(member) \
9589 estats->member = old_estats->member + \
9590 get_stat64(&hw_stats->member)
9592 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9594 struct tg3_ethtool_stats *estats = &tp->estats;
9595 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9596 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9598 if (!hw_stats)
9599 return old_estats;
9601 ESTAT_ADD(rx_octets);
9602 ESTAT_ADD(rx_fragments);
9603 ESTAT_ADD(rx_ucast_packets);
9604 ESTAT_ADD(rx_mcast_packets);
9605 ESTAT_ADD(rx_bcast_packets);
9606 ESTAT_ADD(rx_fcs_errors);
9607 ESTAT_ADD(rx_align_errors);
9608 ESTAT_ADD(rx_xon_pause_rcvd);
9609 ESTAT_ADD(rx_xoff_pause_rcvd);
9610 ESTAT_ADD(rx_mac_ctrl_rcvd);
9611 ESTAT_ADD(rx_xoff_entered);
9612 ESTAT_ADD(rx_frame_too_long_errors);
9613 ESTAT_ADD(rx_jabbers);
9614 ESTAT_ADD(rx_undersize_packets);
9615 ESTAT_ADD(rx_in_length_errors);
9616 ESTAT_ADD(rx_out_length_errors);
9617 ESTAT_ADD(rx_64_or_less_octet_packets);
9618 ESTAT_ADD(rx_65_to_127_octet_packets);
9619 ESTAT_ADD(rx_128_to_255_octet_packets);
9620 ESTAT_ADD(rx_256_to_511_octet_packets);
9621 ESTAT_ADD(rx_512_to_1023_octet_packets);
9622 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9623 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9624 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9625 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9626 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9628 ESTAT_ADD(tx_octets);
9629 ESTAT_ADD(tx_collisions);
9630 ESTAT_ADD(tx_xon_sent);
9631 ESTAT_ADD(tx_xoff_sent);
9632 ESTAT_ADD(tx_flow_control);
9633 ESTAT_ADD(tx_mac_errors);
9634 ESTAT_ADD(tx_single_collisions);
9635 ESTAT_ADD(tx_mult_collisions);
9636 ESTAT_ADD(tx_deferred);
9637 ESTAT_ADD(tx_excessive_collisions);
9638 ESTAT_ADD(tx_late_collisions);
9639 ESTAT_ADD(tx_collide_2times);
9640 ESTAT_ADD(tx_collide_3times);
9641 ESTAT_ADD(tx_collide_4times);
9642 ESTAT_ADD(tx_collide_5times);
9643 ESTAT_ADD(tx_collide_6times);
9644 ESTAT_ADD(tx_collide_7times);
9645 ESTAT_ADD(tx_collide_8times);
9646 ESTAT_ADD(tx_collide_9times);
9647 ESTAT_ADD(tx_collide_10times);
9648 ESTAT_ADD(tx_collide_11times);
9649 ESTAT_ADD(tx_collide_12times);
9650 ESTAT_ADD(tx_collide_13times);
9651 ESTAT_ADD(tx_collide_14times);
9652 ESTAT_ADD(tx_collide_15times);
9653 ESTAT_ADD(tx_ucast_packets);
9654 ESTAT_ADD(tx_mcast_packets);
9655 ESTAT_ADD(tx_bcast_packets);
9656 ESTAT_ADD(tx_carrier_sense_errors);
9657 ESTAT_ADD(tx_discards);
9658 ESTAT_ADD(tx_errors);
9660 ESTAT_ADD(dma_writeq_full);
9661 ESTAT_ADD(dma_write_prioq_full);
9662 ESTAT_ADD(rxbds_empty);
9663 ESTAT_ADD(rx_discards);
9664 ESTAT_ADD(rx_errors);
9665 ESTAT_ADD(rx_threshold_hit);
9667 ESTAT_ADD(dma_readq_full);
9668 ESTAT_ADD(dma_read_prioq_full);
9669 ESTAT_ADD(tx_comp_queue_full);
9671 ESTAT_ADD(ring_set_send_prod_index);
9672 ESTAT_ADD(ring_status_update);
9673 ESTAT_ADD(nic_irqs);
9674 ESTAT_ADD(nic_avoided_irqs);
9675 ESTAT_ADD(nic_tx_threshold_hit);
9677 return estats;
9680 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9681 struct rtnl_link_stats64 *stats)
9683 struct tg3 *tp = netdev_priv(dev);
9684 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9685 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9687 if (!hw_stats)
9688 return old_stats;
9690 stats->rx_packets = old_stats->rx_packets +
9691 get_stat64(&hw_stats->rx_ucast_packets) +
9692 get_stat64(&hw_stats->rx_mcast_packets) +
9693 get_stat64(&hw_stats->rx_bcast_packets);
9695 stats->tx_packets = old_stats->tx_packets +
9696 get_stat64(&hw_stats->tx_ucast_packets) +
9697 get_stat64(&hw_stats->tx_mcast_packets) +
9698 get_stat64(&hw_stats->tx_bcast_packets);
9700 stats->rx_bytes = old_stats->rx_bytes +
9701 get_stat64(&hw_stats->rx_octets);
9702 stats->tx_bytes = old_stats->tx_bytes +
9703 get_stat64(&hw_stats->tx_octets);
9705 stats->rx_errors = old_stats->rx_errors +
9706 get_stat64(&hw_stats->rx_errors);
9707 stats->tx_errors = old_stats->tx_errors +
9708 get_stat64(&hw_stats->tx_errors) +
9709 get_stat64(&hw_stats->tx_mac_errors) +
9710 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9711 get_stat64(&hw_stats->tx_discards);
9713 stats->multicast = old_stats->multicast +
9714 get_stat64(&hw_stats->rx_mcast_packets);
9715 stats->collisions = old_stats->collisions +
9716 get_stat64(&hw_stats->tx_collisions);
9718 stats->rx_length_errors = old_stats->rx_length_errors +
9719 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9720 get_stat64(&hw_stats->rx_undersize_packets);
9722 stats->rx_over_errors = old_stats->rx_over_errors +
9723 get_stat64(&hw_stats->rxbds_empty);
9724 stats->rx_frame_errors = old_stats->rx_frame_errors +
9725 get_stat64(&hw_stats->rx_align_errors);
9726 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9727 get_stat64(&hw_stats->tx_discards);
9728 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9729 get_stat64(&hw_stats->tx_carrier_sense_errors);
9731 stats->rx_crc_errors = old_stats->rx_crc_errors +
9732 calc_crc_errors(tp);
9734 stats->rx_missed_errors = old_stats->rx_missed_errors +
9735 get_stat64(&hw_stats->rx_discards);
9737 stats->rx_dropped = tp->rx_dropped;
9739 return stats;
9742 static inline u32 calc_crc(unsigned char *buf, int len)
9744 u32 reg;
9745 u32 tmp;
9746 int j, k;
9748 reg = 0xffffffff;
9750 for (j = 0; j < len; j++) {
9751 reg ^= buf[j];
9753 for (k = 0; k < 8; k++) {
9754 tmp = reg & 0x01;
9756 reg >>= 1;
9758 if (tmp)
9759 reg ^= 0xedb88320;
9763 return ~reg;
9766 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9768 /* accept or reject all multicast frames */
9769 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9770 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9771 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9772 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9775 static void __tg3_set_rx_mode(struct net_device *dev)
9777 struct tg3 *tp = netdev_priv(dev);
9778 u32 rx_mode;
9780 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9781 RX_MODE_KEEP_VLAN_TAG);
9783 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9784 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9785 * flag clear.
9787 if (!tg3_flag(tp, ENABLE_ASF))
9788 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9789 #endif
9791 if (dev->flags & IFF_PROMISC) {
9792 /* Promiscuous mode. */
9793 rx_mode |= RX_MODE_PROMISC;
9794 } else if (dev->flags & IFF_ALLMULTI) {
9795 /* Accept all multicast. */
9796 tg3_set_multi(tp, 1);
9797 } else if (netdev_mc_empty(dev)) {
9798 /* Reject all multicast. */
9799 tg3_set_multi(tp, 0);
9800 } else {
9801 /* Accept one or more multicast(s). */
9802 struct netdev_hw_addr *ha;
9803 u32 mc_filter[4] = { 0, };
9804 u32 regidx;
9805 u32 bit;
9806 u32 crc;
9808 netdev_for_each_mc_addr(ha, dev) {
9809 crc = calc_crc(ha->addr, ETH_ALEN);
9810 bit = ~crc & 0x7f;
9811 regidx = (bit & 0x60) >> 5;
9812 bit &= 0x1f;
9813 mc_filter[regidx] |= (1 << bit);
9816 tw32(MAC_HASH_REG_0, mc_filter[0]);
9817 tw32(MAC_HASH_REG_1, mc_filter[1]);
9818 tw32(MAC_HASH_REG_2, mc_filter[2]);
9819 tw32(MAC_HASH_REG_3, mc_filter[3]);
9822 if (rx_mode != tp->rx_mode) {
9823 tp->rx_mode = rx_mode;
9824 tw32_f(MAC_RX_MODE, rx_mode);
9825 udelay(10);
9829 static void tg3_set_rx_mode(struct net_device *dev)
9831 struct tg3 *tp = netdev_priv(dev);
9833 if (!netif_running(dev))
9834 return;
9836 tg3_full_lock(tp, 0);
9837 __tg3_set_rx_mode(dev);
9838 tg3_full_unlock(tp);
9841 static int tg3_get_regs_len(struct net_device *dev)
9843 return TG3_REG_BLK_SIZE;
9846 static void tg3_get_regs(struct net_device *dev,
9847 struct ethtool_regs *regs, void *_p)
9849 struct tg3 *tp = netdev_priv(dev);
9851 regs->version = 0;
9853 memset(_p, 0, TG3_REG_BLK_SIZE);
9855 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9856 return;
9858 tg3_full_lock(tp, 0);
9860 tg3_dump_legacy_regs(tp, (u32 *)_p);
9862 tg3_full_unlock(tp);
9865 static int tg3_get_eeprom_len(struct net_device *dev)
9867 struct tg3 *tp = netdev_priv(dev);
9869 return tp->nvram_size;
9872 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9874 struct tg3 *tp = netdev_priv(dev);
9875 int ret;
9876 u8 *pd;
9877 u32 i, offset, len, b_offset, b_count;
9878 __be32 val;
9880 if (tg3_flag(tp, NO_NVRAM))
9881 return -EINVAL;
9883 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9884 return -EAGAIN;
9886 offset = eeprom->offset;
9887 len = eeprom->len;
9888 eeprom->len = 0;
9890 eeprom->magic = TG3_EEPROM_MAGIC;
9892 if (offset & 3) {
9893 /* adjustments to start on required 4 byte boundary */
9894 b_offset = offset & 3;
9895 b_count = 4 - b_offset;
9896 if (b_count > len) {
9897 /* i.e. offset=1 len=2 */
9898 b_count = len;
9900 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9901 if (ret)
9902 return ret;
9903 memcpy(data, ((char *)&val) + b_offset, b_count);
9904 len -= b_count;
9905 offset += b_count;
9906 eeprom->len += b_count;
9909 /* read bytes up to the last 4 byte boundary */
9910 pd = &data[eeprom->len];
9911 for (i = 0; i < (len - (len & 3)); i += 4) {
9912 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9913 if (ret) {
9914 eeprom->len += i;
9915 return ret;
9917 memcpy(pd + i, &val, 4);
9919 eeprom->len += i;
9921 if (len & 3) {
9922 /* read last bytes not ending on 4 byte boundary */
9923 pd = &data[eeprom->len];
9924 b_count = len & 3;
9925 b_offset = offset + len - b_count;
9926 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9927 if (ret)
9928 return ret;
9929 memcpy(pd, &val, b_count);
9930 eeprom->len += b_count;
9932 return 0;
9935 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9937 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9939 struct tg3 *tp = netdev_priv(dev);
9940 int ret;
9941 u32 offset, len, b_offset, odd_len;
9942 u8 *buf;
9943 __be32 start, end;
9945 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9946 return -EAGAIN;
9948 if (tg3_flag(tp, NO_NVRAM) ||
9949 eeprom->magic != TG3_EEPROM_MAGIC)
9950 return -EINVAL;
9952 offset = eeprom->offset;
9953 len = eeprom->len;
9955 if ((b_offset = (offset & 3))) {
9956 /* adjustments to start on required 4 byte boundary */
9957 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9958 if (ret)
9959 return ret;
9960 len += b_offset;
9961 offset &= ~3;
9962 if (len < 4)
9963 len = 4;
9966 odd_len = 0;
9967 if (len & 3) {
9968 /* adjustments to end on required 4 byte boundary */
9969 odd_len = 1;
9970 len = (len + 3) & ~3;
9971 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9972 if (ret)
9973 return ret;
9976 buf = data;
9977 if (b_offset || odd_len) {
9978 buf = kmalloc(len, GFP_KERNEL);
9979 if (!buf)
9980 return -ENOMEM;
9981 if (b_offset)
9982 memcpy(buf, &start, 4);
9983 if (odd_len)
9984 memcpy(buf+len-4, &end, 4);
9985 memcpy(buf + b_offset, data, eeprom->len);
9988 ret = tg3_nvram_write_block(tp, offset, len, buf);
9990 if (buf != data)
9991 kfree(buf);
9993 return ret;
9996 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9998 struct tg3 *tp = netdev_priv(dev);
10000 if (tg3_flag(tp, USE_PHYLIB)) {
10001 struct phy_device *phydev;
10002 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10003 return -EAGAIN;
10004 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10005 return phy_ethtool_gset(phydev, cmd);
10008 cmd->supported = (SUPPORTED_Autoneg);
10010 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10011 cmd->supported |= (SUPPORTED_1000baseT_Half |
10012 SUPPORTED_1000baseT_Full);
10014 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10015 cmd->supported |= (SUPPORTED_100baseT_Half |
10016 SUPPORTED_100baseT_Full |
10017 SUPPORTED_10baseT_Half |
10018 SUPPORTED_10baseT_Full |
10019 SUPPORTED_TP);
10020 cmd->port = PORT_TP;
10021 } else {
10022 cmd->supported |= SUPPORTED_FIBRE;
10023 cmd->port = PORT_FIBRE;
10026 cmd->advertising = tp->link_config.advertising;
10027 if (netif_running(dev)) {
10028 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10029 cmd->duplex = tp->link_config.active_duplex;
10030 } else {
10031 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10032 cmd->duplex = DUPLEX_INVALID;
10034 cmd->phy_address = tp->phy_addr;
10035 cmd->transceiver = XCVR_INTERNAL;
10036 cmd->autoneg = tp->link_config.autoneg;
10037 cmd->maxtxpkt = 0;
10038 cmd->maxrxpkt = 0;
10039 return 0;
10042 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10044 struct tg3 *tp = netdev_priv(dev);
10045 u32 speed = ethtool_cmd_speed(cmd);
10047 if (tg3_flag(tp, USE_PHYLIB)) {
10048 struct phy_device *phydev;
10049 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10050 return -EAGAIN;
10051 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10052 return phy_ethtool_sset(phydev, cmd);
10055 if (cmd->autoneg != AUTONEG_ENABLE &&
10056 cmd->autoneg != AUTONEG_DISABLE)
10057 return -EINVAL;
10059 if (cmd->autoneg == AUTONEG_DISABLE &&
10060 cmd->duplex != DUPLEX_FULL &&
10061 cmd->duplex != DUPLEX_HALF)
10062 return -EINVAL;
10064 if (cmd->autoneg == AUTONEG_ENABLE) {
10065 u32 mask = ADVERTISED_Autoneg |
10066 ADVERTISED_Pause |
10067 ADVERTISED_Asym_Pause;
10069 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10070 mask |= ADVERTISED_1000baseT_Half |
10071 ADVERTISED_1000baseT_Full;
10073 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10074 mask |= ADVERTISED_100baseT_Half |
10075 ADVERTISED_100baseT_Full |
10076 ADVERTISED_10baseT_Half |
10077 ADVERTISED_10baseT_Full |
10078 ADVERTISED_TP;
10079 else
10080 mask |= ADVERTISED_FIBRE;
10082 if (cmd->advertising & ~mask)
10083 return -EINVAL;
10085 mask &= (ADVERTISED_1000baseT_Half |
10086 ADVERTISED_1000baseT_Full |
10087 ADVERTISED_100baseT_Half |
10088 ADVERTISED_100baseT_Full |
10089 ADVERTISED_10baseT_Half |
10090 ADVERTISED_10baseT_Full);
10092 cmd->advertising &= mask;
10093 } else {
10094 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10095 if (speed != SPEED_1000)
10096 return -EINVAL;
10098 if (cmd->duplex != DUPLEX_FULL)
10099 return -EINVAL;
10100 } else {
10101 if (speed != SPEED_100 &&
10102 speed != SPEED_10)
10103 return -EINVAL;
10107 tg3_full_lock(tp, 0);
10109 tp->link_config.autoneg = cmd->autoneg;
10110 if (cmd->autoneg == AUTONEG_ENABLE) {
10111 tp->link_config.advertising = (cmd->advertising |
10112 ADVERTISED_Autoneg);
10113 tp->link_config.speed = SPEED_INVALID;
10114 tp->link_config.duplex = DUPLEX_INVALID;
10115 } else {
10116 tp->link_config.advertising = 0;
10117 tp->link_config.speed = speed;
10118 tp->link_config.duplex = cmd->duplex;
10121 tp->link_config.orig_speed = tp->link_config.speed;
10122 tp->link_config.orig_duplex = tp->link_config.duplex;
10123 tp->link_config.orig_autoneg = tp->link_config.autoneg;
10125 if (netif_running(dev))
10126 tg3_setup_phy(tp, 1);
10128 tg3_full_unlock(tp);
10130 return 0;
10133 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10135 struct tg3 *tp = netdev_priv(dev);
10137 strcpy(info->driver, DRV_MODULE_NAME);
10138 strcpy(info->version, DRV_MODULE_VERSION);
10139 strcpy(info->fw_version, tp->fw_ver);
10140 strcpy(info->bus_info, pci_name(tp->pdev));
10143 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10145 struct tg3 *tp = netdev_priv(dev);
10147 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10148 wol->supported = WAKE_MAGIC;
10149 else
10150 wol->supported = 0;
10151 wol->wolopts = 0;
10152 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10153 wol->wolopts = WAKE_MAGIC;
10154 memset(&wol->sopass, 0, sizeof(wol->sopass));
10157 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10159 struct tg3 *tp = netdev_priv(dev);
10160 struct device *dp = &tp->pdev->dev;
10162 if (wol->wolopts & ~WAKE_MAGIC)
10163 return -EINVAL;
10164 if ((wol->wolopts & WAKE_MAGIC) &&
10165 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10166 return -EINVAL;
10168 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10170 spin_lock_bh(&tp->lock);
10171 if (device_may_wakeup(dp))
10172 tg3_flag_set(tp, WOL_ENABLE);
10173 else
10174 tg3_flag_clear(tp, WOL_ENABLE);
10175 spin_unlock_bh(&tp->lock);
10177 return 0;
10180 static u32 tg3_get_msglevel(struct net_device *dev)
10182 struct tg3 *tp = netdev_priv(dev);
10183 return tp->msg_enable;
10186 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10188 struct tg3 *tp = netdev_priv(dev);
10189 tp->msg_enable = value;
10192 static int tg3_nway_reset(struct net_device *dev)
10194 struct tg3 *tp = netdev_priv(dev);
10195 int r;
10197 if (!netif_running(dev))
10198 return -EAGAIN;
10200 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10201 return -EINVAL;
10203 if (tg3_flag(tp, USE_PHYLIB)) {
10204 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10205 return -EAGAIN;
10206 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10207 } else {
10208 u32 bmcr;
10210 spin_lock_bh(&tp->lock);
10211 r = -EINVAL;
10212 tg3_readphy(tp, MII_BMCR, &bmcr);
10213 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10214 ((bmcr & BMCR_ANENABLE) ||
10215 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10216 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10217 BMCR_ANENABLE);
10218 r = 0;
10220 spin_unlock_bh(&tp->lock);
10223 return r;
10226 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10228 struct tg3 *tp = netdev_priv(dev);
10230 ering->rx_max_pending = tp->rx_std_ring_mask;
10231 ering->rx_mini_max_pending = 0;
10232 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10233 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10234 else
10235 ering->rx_jumbo_max_pending = 0;
10237 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10239 ering->rx_pending = tp->rx_pending;
10240 ering->rx_mini_pending = 0;
10241 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10242 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10243 else
10244 ering->rx_jumbo_pending = 0;
10246 ering->tx_pending = tp->napi[0].tx_pending;
10249 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10251 struct tg3 *tp = netdev_priv(dev);
10252 int i, irq_sync = 0, err = 0;
10254 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10255 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10256 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10257 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10258 (tg3_flag(tp, TSO_BUG) &&
10259 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10260 return -EINVAL;
10262 if (netif_running(dev)) {
10263 tg3_phy_stop(tp);
10264 tg3_netif_stop(tp);
10265 irq_sync = 1;
10268 tg3_full_lock(tp, irq_sync);
10270 tp->rx_pending = ering->rx_pending;
10272 if (tg3_flag(tp, MAX_RXPEND_64) &&
10273 tp->rx_pending > 63)
10274 tp->rx_pending = 63;
10275 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10277 for (i = 0; i < tp->irq_max; i++)
10278 tp->napi[i].tx_pending = ering->tx_pending;
10280 if (netif_running(dev)) {
10281 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10282 err = tg3_restart_hw(tp, 1);
10283 if (!err)
10284 tg3_netif_start(tp);
10287 tg3_full_unlock(tp);
10289 if (irq_sync && !err)
10290 tg3_phy_start(tp);
10292 return err;
10295 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10297 struct tg3 *tp = netdev_priv(dev);
10299 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10301 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10302 epause->rx_pause = 1;
10303 else
10304 epause->rx_pause = 0;
10306 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10307 epause->tx_pause = 1;
10308 else
10309 epause->tx_pause = 0;
10312 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10314 struct tg3 *tp = netdev_priv(dev);
10315 int err = 0;
10317 if (tg3_flag(tp, USE_PHYLIB)) {
10318 u32 newadv;
10319 struct phy_device *phydev;
10321 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10323 if (!(phydev->supported & SUPPORTED_Pause) ||
10324 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10325 (epause->rx_pause != epause->tx_pause)))
10326 return -EINVAL;
10328 tp->link_config.flowctrl = 0;
10329 if (epause->rx_pause) {
10330 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10332 if (epause->tx_pause) {
10333 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10334 newadv = ADVERTISED_Pause;
10335 } else
10336 newadv = ADVERTISED_Pause |
10337 ADVERTISED_Asym_Pause;
10338 } else if (epause->tx_pause) {
10339 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10340 newadv = ADVERTISED_Asym_Pause;
10341 } else
10342 newadv = 0;
10344 if (epause->autoneg)
10345 tg3_flag_set(tp, PAUSE_AUTONEG);
10346 else
10347 tg3_flag_clear(tp, PAUSE_AUTONEG);
10349 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10350 u32 oldadv = phydev->advertising &
10351 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10352 if (oldadv != newadv) {
10353 phydev->advertising &=
10354 ~(ADVERTISED_Pause |
10355 ADVERTISED_Asym_Pause);
10356 phydev->advertising |= newadv;
10357 if (phydev->autoneg) {
10359 * Always renegotiate the link to
10360 * inform our link partner of our
10361 * flow control settings, even if the
10362 * flow control is forced. Let
10363 * tg3_adjust_link() do the final
10364 * flow control setup.
10366 return phy_start_aneg(phydev);
10370 if (!epause->autoneg)
10371 tg3_setup_flow_control(tp, 0, 0);
10372 } else {
10373 tp->link_config.orig_advertising &=
10374 ~(ADVERTISED_Pause |
10375 ADVERTISED_Asym_Pause);
10376 tp->link_config.orig_advertising |= newadv;
10378 } else {
10379 int irq_sync = 0;
10381 if (netif_running(dev)) {
10382 tg3_netif_stop(tp);
10383 irq_sync = 1;
10386 tg3_full_lock(tp, irq_sync);
10388 if (epause->autoneg)
10389 tg3_flag_set(tp, PAUSE_AUTONEG);
10390 else
10391 tg3_flag_clear(tp, PAUSE_AUTONEG);
10392 if (epause->rx_pause)
10393 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10394 else
10395 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10396 if (epause->tx_pause)
10397 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10398 else
10399 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10401 if (netif_running(dev)) {
10402 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10403 err = tg3_restart_hw(tp, 1);
10404 if (!err)
10405 tg3_netif_start(tp);
10408 tg3_full_unlock(tp);
10411 return err;
10414 static int tg3_get_sset_count(struct net_device *dev, int sset)
10416 switch (sset) {
10417 case ETH_SS_TEST:
10418 return TG3_NUM_TEST;
10419 case ETH_SS_STATS:
10420 return TG3_NUM_STATS;
10421 default:
10422 return -EOPNOTSUPP;
10426 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10428 switch (stringset) {
10429 case ETH_SS_STATS:
10430 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10431 break;
10432 case ETH_SS_TEST:
10433 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10434 break;
10435 default:
10436 WARN_ON(1); /* we need a WARN() */
10437 break;
10441 static int tg3_set_phys_id(struct net_device *dev,
10442 enum ethtool_phys_id_state state)
10444 struct tg3 *tp = netdev_priv(dev);
10446 if (!netif_running(tp->dev))
10447 return -EAGAIN;
10449 switch (state) {
10450 case ETHTOOL_ID_ACTIVE:
10451 return 1; /* cycle on/off once per second */
10453 case ETHTOOL_ID_ON:
10454 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10455 LED_CTRL_1000MBPS_ON |
10456 LED_CTRL_100MBPS_ON |
10457 LED_CTRL_10MBPS_ON |
10458 LED_CTRL_TRAFFIC_OVERRIDE |
10459 LED_CTRL_TRAFFIC_BLINK |
10460 LED_CTRL_TRAFFIC_LED);
10461 break;
10463 case ETHTOOL_ID_OFF:
10464 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10465 LED_CTRL_TRAFFIC_OVERRIDE);
10466 break;
10468 case ETHTOOL_ID_INACTIVE:
10469 tw32(MAC_LED_CTRL, tp->led_ctrl);
10470 break;
10473 return 0;
10476 static void tg3_get_ethtool_stats(struct net_device *dev,
10477 struct ethtool_stats *estats, u64 *tmp_stats)
10479 struct tg3 *tp = netdev_priv(dev);
10480 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10483 static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10485 int i;
10486 __be32 *buf;
10487 u32 offset = 0, len = 0;
10488 u32 magic, val;
10490 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10491 return NULL;
10493 if (magic == TG3_EEPROM_MAGIC) {
10494 for (offset = TG3_NVM_DIR_START;
10495 offset < TG3_NVM_DIR_END;
10496 offset += TG3_NVM_DIRENT_SIZE) {
10497 if (tg3_nvram_read(tp, offset, &val))
10498 return NULL;
10500 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10501 TG3_NVM_DIRTYPE_EXTVPD)
10502 break;
10505 if (offset != TG3_NVM_DIR_END) {
10506 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10507 if (tg3_nvram_read(tp, offset + 4, &offset))
10508 return NULL;
10510 offset = tg3_nvram_logical_addr(tp, offset);
10514 if (!offset || !len) {
10515 offset = TG3_NVM_VPD_OFF;
10516 len = TG3_NVM_VPD_LEN;
10519 buf = kmalloc(len, GFP_KERNEL);
10520 if (buf == NULL)
10521 return NULL;
10523 if (magic == TG3_EEPROM_MAGIC) {
10524 for (i = 0; i < len; i += 4) {
10525 /* The data is in little-endian format in NVRAM.
10526 * Use the big-endian read routines to preserve
10527 * the byte order as it exists in NVRAM.
10529 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10530 goto error;
10532 } else {
10533 u8 *ptr;
10534 ssize_t cnt;
10535 unsigned int pos = 0;
10537 ptr = (u8 *)&buf[0];
10538 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10539 cnt = pci_read_vpd(tp->pdev, pos,
10540 len - pos, ptr);
10541 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10542 cnt = 0;
10543 else if (cnt < 0)
10544 goto error;
10546 if (pos != len)
10547 goto error;
10550 return buf;
10552 error:
10553 kfree(buf);
10554 return NULL;
10557 #define NVRAM_TEST_SIZE 0x100
10558 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10559 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10560 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10561 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10562 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10564 static int tg3_test_nvram(struct tg3 *tp)
10566 u32 csum, magic;
10567 __be32 *buf;
10568 int i, j, k, err = 0, size;
10570 if (tg3_flag(tp, NO_NVRAM))
10571 return 0;
10573 if (tg3_nvram_read(tp, 0, &magic) != 0)
10574 return -EIO;
10576 if (magic == TG3_EEPROM_MAGIC)
10577 size = NVRAM_TEST_SIZE;
10578 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10579 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10580 TG3_EEPROM_SB_FORMAT_1) {
10581 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10582 case TG3_EEPROM_SB_REVISION_0:
10583 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10584 break;
10585 case TG3_EEPROM_SB_REVISION_2:
10586 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10587 break;
10588 case TG3_EEPROM_SB_REVISION_3:
10589 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10590 break;
10591 default:
10592 return 0;
10594 } else
10595 return 0;
10596 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10597 size = NVRAM_SELFBOOT_HW_SIZE;
10598 else
10599 return -EIO;
10601 buf = kmalloc(size, GFP_KERNEL);
10602 if (buf == NULL)
10603 return -ENOMEM;
10605 err = -EIO;
10606 for (i = 0, j = 0; i < size; i += 4, j++) {
10607 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10608 if (err)
10609 break;
10611 if (i < size)
10612 goto out;
10614 /* Selfboot format */
10615 magic = be32_to_cpu(buf[0]);
10616 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10617 TG3_EEPROM_MAGIC_FW) {
10618 u8 *buf8 = (u8 *) buf, csum8 = 0;
10620 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10621 TG3_EEPROM_SB_REVISION_2) {
10622 /* For rev 2, the csum doesn't include the MBA. */
10623 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10624 csum8 += buf8[i];
10625 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10626 csum8 += buf8[i];
10627 } else {
10628 for (i = 0; i < size; i++)
10629 csum8 += buf8[i];
10632 if (csum8 == 0) {
10633 err = 0;
10634 goto out;
10637 err = -EIO;
10638 goto out;
10641 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10642 TG3_EEPROM_MAGIC_HW) {
10643 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10644 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10645 u8 *buf8 = (u8 *) buf;
10647 /* Separate the parity bits and the data bytes. */
10648 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10649 if ((i == 0) || (i == 8)) {
10650 int l;
10651 u8 msk;
10653 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10654 parity[k++] = buf8[i] & msk;
10655 i++;
10656 } else if (i == 16) {
10657 int l;
10658 u8 msk;
10660 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10661 parity[k++] = buf8[i] & msk;
10662 i++;
10664 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10665 parity[k++] = buf8[i] & msk;
10666 i++;
10668 data[j++] = buf8[i];
10671 err = -EIO;
10672 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10673 u8 hw8 = hweight8(data[i]);
10675 if ((hw8 & 0x1) && parity[i])
10676 goto out;
10677 else if (!(hw8 & 0x1) && !parity[i])
10678 goto out;
10680 err = 0;
10681 goto out;
10684 err = -EIO;
10686 /* Bootstrap checksum at offset 0x10 */
10687 csum = calc_crc((unsigned char *) buf, 0x10);
10688 if (csum != le32_to_cpu(buf[0x10/4]))
10689 goto out;
10691 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10692 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10693 if (csum != le32_to_cpu(buf[0xfc/4]))
10694 goto out;
10696 kfree(buf);
10698 buf = tg3_vpd_readblock(tp);
10699 if (!buf)
10700 return -ENOMEM;
10702 i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10703 PCI_VPD_LRDT_RO_DATA);
10704 if (i > 0) {
10705 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10706 if (j < 0)
10707 goto out;
10709 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10710 goto out;
10712 i += PCI_VPD_LRDT_TAG_SIZE;
10713 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10714 PCI_VPD_RO_KEYWORD_CHKSUM);
10715 if (j > 0) {
10716 u8 csum8 = 0;
10718 j += PCI_VPD_INFO_FLD_HDR_SIZE;
10720 for (i = 0; i <= j; i++)
10721 csum8 += ((u8 *)buf)[i];
10723 if (csum8)
10724 goto out;
10728 err = 0;
10730 out:
10731 kfree(buf);
10732 return err;
10735 #define TG3_SERDES_TIMEOUT_SEC 2
10736 #define TG3_COPPER_TIMEOUT_SEC 6
10738 static int tg3_test_link(struct tg3 *tp)
10740 int i, max;
10742 if (!netif_running(tp->dev))
10743 return -ENODEV;
10745 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10746 max = TG3_SERDES_TIMEOUT_SEC;
10747 else
10748 max = TG3_COPPER_TIMEOUT_SEC;
10750 for (i = 0; i < max; i++) {
10751 if (netif_carrier_ok(tp->dev))
10752 return 0;
10754 if (msleep_interruptible(1000))
10755 break;
10758 return -EIO;
10761 /* Only test the commonly used registers */
10762 static int tg3_test_registers(struct tg3 *tp)
10764 int i, is_5705, is_5750;
10765 u32 offset, read_mask, write_mask, val, save_val, read_val;
10766 static struct {
10767 u16 offset;
10768 u16 flags;
10769 #define TG3_FL_5705 0x1
10770 #define TG3_FL_NOT_5705 0x2
10771 #define TG3_FL_NOT_5788 0x4
10772 #define TG3_FL_NOT_5750 0x8
10773 u32 read_mask;
10774 u32 write_mask;
10775 } reg_tbl[] = {
10776 /* MAC Control Registers */
10777 { MAC_MODE, TG3_FL_NOT_5705,
10778 0x00000000, 0x00ef6f8c },
10779 { MAC_MODE, TG3_FL_5705,
10780 0x00000000, 0x01ef6b8c },
10781 { MAC_STATUS, TG3_FL_NOT_5705,
10782 0x03800107, 0x00000000 },
10783 { MAC_STATUS, TG3_FL_5705,
10784 0x03800100, 0x00000000 },
10785 { MAC_ADDR_0_HIGH, 0x0000,
10786 0x00000000, 0x0000ffff },
10787 { MAC_ADDR_0_LOW, 0x0000,
10788 0x00000000, 0xffffffff },
10789 { MAC_RX_MTU_SIZE, 0x0000,
10790 0x00000000, 0x0000ffff },
10791 { MAC_TX_MODE, 0x0000,
10792 0x00000000, 0x00000070 },
10793 { MAC_TX_LENGTHS, 0x0000,
10794 0x00000000, 0x00003fff },
10795 { MAC_RX_MODE, TG3_FL_NOT_5705,
10796 0x00000000, 0x000007fc },
10797 { MAC_RX_MODE, TG3_FL_5705,
10798 0x00000000, 0x000007dc },
10799 { MAC_HASH_REG_0, 0x0000,
10800 0x00000000, 0xffffffff },
10801 { MAC_HASH_REG_1, 0x0000,
10802 0x00000000, 0xffffffff },
10803 { MAC_HASH_REG_2, 0x0000,
10804 0x00000000, 0xffffffff },
10805 { MAC_HASH_REG_3, 0x0000,
10806 0x00000000, 0xffffffff },
10808 /* Receive Data and Receive BD Initiator Control Registers. */
10809 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10810 0x00000000, 0xffffffff },
10811 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10812 0x00000000, 0xffffffff },
10813 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10814 0x00000000, 0x00000003 },
10815 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10816 0x00000000, 0xffffffff },
10817 { RCVDBDI_STD_BD+0, 0x0000,
10818 0x00000000, 0xffffffff },
10819 { RCVDBDI_STD_BD+4, 0x0000,
10820 0x00000000, 0xffffffff },
10821 { RCVDBDI_STD_BD+8, 0x0000,
10822 0x00000000, 0xffff0002 },
10823 { RCVDBDI_STD_BD+0xc, 0x0000,
10824 0x00000000, 0xffffffff },
10826 /* Receive BD Initiator Control Registers. */
10827 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10828 0x00000000, 0xffffffff },
10829 { RCVBDI_STD_THRESH, TG3_FL_5705,
10830 0x00000000, 0x000003ff },
10831 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10832 0x00000000, 0xffffffff },
10834 /* Host Coalescing Control Registers. */
10835 { HOSTCC_MODE, TG3_FL_NOT_5705,
10836 0x00000000, 0x00000004 },
10837 { HOSTCC_MODE, TG3_FL_5705,
10838 0x00000000, 0x000000f6 },
10839 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10840 0x00000000, 0xffffffff },
10841 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10842 0x00000000, 0x000003ff },
10843 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10844 0x00000000, 0xffffffff },
10845 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10846 0x00000000, 0x000003ff },
10847 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10848 0x00000000, 0xffffffff },
10849 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10850 0x00000000, 0x000000ff },
10851 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10852 0x00000000, 0xffffffff },
10853 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10854 0x00000000, 0x000000ff },
10855 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10856 0x00000000, 0xffffffff },
10857 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10858 0x00000000, 0xffffffff },
10859 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10860 0x00000000, 0xffffffff },
10861 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10862 0x00000000, 0x000000ff },
10863 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10864 0x00000000, 0xffffffff },
10865 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10866 0x00000000, 0x000000ff },
10867 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10868 0x00000000, 0xffffffff },
10869 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10870 0x00000000, 0xffffffff },
10871 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10872 0x00000000, 0xffffffff },
10873 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10874 0x00000000, 0xffffffff },
10875 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10876 0x00000000, 0xffffffff },
10877 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10878 0xffffffff, 0x00000000 },
10879 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10880 0xffffffff, 0x00000000 },
10882 /* Buffer Manager Control Registers. */
10883 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10884 0x00000000, 0x007fff80 },
10885 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10886 0x00000000, 0x007fffff },
10887 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10888 0x00000000, 0x0000003f },
10889 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10890 0x00000000, 0x000001ff },
10891 { BUFMGR_MB_HIGH_WATER, 0x0000,
10892 0x00000000, 0x000001ff },
10893 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10894 0xffffffff, 0x00000000 },
10895 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10896 0xffffffff, 0x00000000 },
10898 /* Mailbox Registers */
10899 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10900 0x00000000, 0x000001ff },
10901 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10902 0x00000000, 0x000001ff },
10903 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10904 0x00000000, 0x000007ff },
10905 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10906 0x00000000, 0x000001ff },
10908 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10911 is_5705 = is_5750 = 0;
10912 if (tg3_flag(tp, 5705_PLUS)) {
10913 is_5705 = 1;
10914 if (tg3_flag(tp, 5750_PLUS))
10915 is_5750 = 1;
10918 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10919 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10920 continue;
10922 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10923 continue;
10925 if (tg3_flag(tp, IS_5788) &&
10926 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10927 continue;
10929 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10930 continue;
10932 offset = (u32) reg_tbl[i].offset;
10933 read_mask = reg_tbl[i].read_mask;
10934 write_mask = reg_tbl[i].write_mask;
10936 /* Save the original register content */
10937 save_val = tr32(offset);
10939 /* Determine the read-only value. */
10940 read_val = save_val & read_mask;
10942 /* Write zero to the register, then make sure the read-only bits
10943 * are not changed and the read/write bits are all zeros.
10945 tw32(offset, 0);
10947 val = tr32(offset);
10949 /* Test the read-only and read/write bits. */
10950 if (((val & read_mask) != read_val) || (val & write_mask))
10951 goto out;
10953 /* Write ones to all the bits defined by RdMask and WrMask, then
10954 * make sure the read-only bits are not changed and the
10955 * read/write bits are all ones.
10957 tw32(offset, read_mask | write_mask);
10959 val = tr32(offset);
10961 /* Test the read-only bits. */
10962 if ((val & read_mask) != read_val)
10963 goto out;
10965 /* Test the read/write bits. */
10966 if ((val & write_mask) != write_mask)
10967 goto out;
10969 tw32(offset, save_val);
10972 return 0;
10974 out:
10975 if (netif_msg_hw(tp))
10976 netdev_err(tp->dev,
10977 "Register test failed at offset %x\n", offset);
10978 tw32(offset, save_val);
10979 return -EIO;
10982 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10984 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10985 int i;
10986 u32 j;
10988 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10989 for (j = 0; j < len; j += 4) {
10990 u32 val;
10992 tg3_write_mem(tp, offset + j, test_pattern[i]);
10993 tg3_read_mem(tp, offset + j, &val);
10994 if (val != test_pattern[i])
10995 return -EIO;
10998 return 0;
11001 static int tg3_test_memory(struct tg3 *tp)
11003 static struct mem_entry {
11004 u32 offset;
11005 u32 len;
11006 } mem_tbl_570x[] = {
11007 { 0x00000000, 0x00b50},
11008 { 0x00002000, 0x1c000},
11009 { 0xffffffff, 0x00000}
11010 }, mem_tbl_5705[] = {
11011 { 0x00000100, 0x0000c},
11012 { 0x00000200, 0x00008},
11013 { 0x00004000, 0x00800},
11014 { 0x00006000, 0x01000},
11015 { 0x00008000, 0x02000},
11016 { 0x00010000, 0x0e000},
11017 { 0xffffffff, 0x00000}
11018 }, mem_tbl_5755[] = {
11019 { 0x00000200, 0x00008},
11020 { 0x00004000, 0x00800},
11021 { 0x00006000, 0x00800},
11022 { 0x00008000, 0x02000},
11023 { 0x00010000, 0x0c000},
11024 { 0xffffffff, 0x00000}
11025 }, mem_tbl_5906[] = {
11026 { 0x00000200, 0x00008},
11027 { 0x00004000, 0x00400},
11028 { 0x00006000, 0x00400},
11029 { 0x00008000, 0x01000},
11030 { 0x00010000, 0x01000},
11031 { 0xffffffff, 0x00000}
11032 }, mem_tbl_5717[] = {
11033 { 0x00000200, 0x00008},
11034 { 0x00010000, 0x0a000},
11035 { 0x00020000, 0x13c00},
11036 { 0xffffffff, 0x00000}
11037 }, mem_tbl_57765[] = {
11038 { 0x00000200, 0x00008},
11039 { 0x00004000, 0x00800},
11040 { 0x00006000, 0x09800},
11041 { 0x00010000, 0x0a000},
11042 { 0xffffffff, 0x00000}
11044 struct mem_entry *mem_tbl;
11045 int err = 0;
11046 int i;
11048 if (tg3_flag(tp, 5717_PLUS))
11049 mem_tbl = mem_tbl_5717;
11050 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11051 mem_tbl = mem_tbl_57765;
11052 else if (tg3_flag(tp, 5755_PLUS))
11053 mem_tbl = mem_tbl_5755;
11054 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11055 mem_tbl = mem_tbl_5906;
11056 else if (tg3_flag(tp, 5705_PLUS))
11057 mem_tbl = mem_tbl_5705;
11058 else
11059 mem_tbl = mem_tbl_570x;
11061 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11062 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11063 if (err)
11064 break;
11067 return err;
11070 #define TG3_MAC_LOOPBACK 0
11071 #define TG3_PHY_LOOPBACK 1
11072 #define TG3_TSO_LOOPBACK 2
11074 #define TG3_TSO_MSS 500
11076 #define TG3_TSO_IP_HDR_LEN 20
11077 #define TG3_TSO_TCP_HDR_LEN 20
11078 #define TG3_TSO_TCP_OPT_LEN 12
11080 static const u8 tg3_tso_header[] = {
11081 0x08, 0x00,
11082 0x45, 0x00, 0x00, 0x00,
11083 0x00, 0x00, 0x40, 0x00,
11084 0x40, 0x06, 0x00, 0x00,
11085 0x0a, 0x00, 0x00, 0x01,
11086 0x0a, 0x00, 0x00, 0x02,
11087 0x0d, 0x00, 0xe0, 0x00,
11088 0x00, 0x00, 0x01, 0x00,
11089 0x00, 0x00, 0x02, 0x00,
11090 0x80, 0x10, 0x10, 0x00,
11091 0x14, 0x09, 0x00, 0x00,
11092 0x01, 0x01, 0x08, 0x0a,
11093 0x11, 0x11, 0x11, 0x11,
11094 0x11, 0x11, 0x11, 0x11,
11097 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
11099 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
11100 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11101 struct sk_buff *skb, *rx_skb;
11102 u8 *tx_data;
11103 dma_addr_t map;
11104 int num_pkts, tx_len, rx_len, i, err;
11105 struct tg3_rx_buffer_desc *desc;
11106 struct tg3_napi *tnapi, *rnapi;
11107 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11109 tnapi = &tp->napi[0];
11110 rnapi = &tp->napi[0];
11111 if (tp->irq_cnt > 1) {
11112 if (tg3_flag(tp, ENABLE_RSS))
11113 rnapi = &tp->napi[1];
11114 if (tg3_flag(tp, ENABLE_TSS))
11115 tnapi = &tp->napi[1];
11117 coal_now = tnapi->coal_now | rnapi->coal_now;
11119 if (loopback_mode == TG3_MAC_LOOPBACK) {
11120 /* HW errata - mac loopback fails in some cases on 5780.
11121 * Normal traffic and PHY loopback are not affected by
11122 * errata. Also, the MAC loopback test is deprecated for
11123 * all newer ASIC revisions.
11125 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11126 tg3_flag(tp, CPMU_PRESENT))
11127 return 0;
11129 mac_mode = tp->mac_mode &
11130 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11131 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11132 if (!tg3_flag(tp, 5705_PLUS))
11133 mac_mode |= MAC_MODE_LINK_POLARITY;
11134 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11135 mac_mode |= MAC_MODE_PORT_MODE_MII;
11136 else
11137 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11138 tw32(MAC_MODE, mac_mode);
11139 } else {
11140 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11141 tg3_phy_fet_toggle_apd(tp, false);
11142 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11143 } else
11144 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11146 tg3_phy_toggle_automdix(tp, 0);
11148 tg3_writephy(tp, MII_BMCR, val);
11149 udelay(40);
11151 mac_mode = tp->mac_mode &
11152 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11153 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11154 tg3_writephy(tp, MII_TG3_FET_PTEST,
11155 MII_TG3_FET_PTEST_FRC_TX_LINK |
11156 MII_TG3_FET_PTEST_FRC_TX_LOCK);
11157 /* The write needs to be flushed for the AC131 */
11158 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11159 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11160 mac_mode |= MAC_MODE_PORT_MODE_MII;
11161 } else
11162 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11164 /* reset to prevent losing 1st rx packet intermittently */
11165 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11166 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11167 udelay(10);
11168 tw32_f(MAC_RX_MODE, tp->rx_mode);
11170 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11171 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11172 if (masked_phy_id == TG3_PHY_ID_BCM5401)
11173 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11174 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11175 mac_mode |= MAC_MODE_LINK_POLARITY;
11176 tg3_writephy(tp, MII_TG3_EXT_CTRL,
11177 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11179 tw32(MAC_MODE, mac_mode);
11181 /* Wait for link */
11182 for (i = 0; i < 100; i++) {
11183 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11184 break;
11185 mdelay(1);
11189 err = -EIO;
11191 tx_len = pktsz;
11192 skb = netdev_alloc_skb(tp->dev, tx_len);
11193 if (!skb)
11194 return -ENOMEM;
11196 tx_data = skb_put(skb, tx_len);
11197 memcpy(tx_data, tp->dev->dev_addr, 6);
11198 memset(tx_data + 6, 0x0, 8);
11200 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11202 if (loopback_mode == TG3_TSO_LOOPBACK) {
11203 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11205 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11206 TG3_TSO_TCP_OPT_LEN;
11208 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11209 sizeof(tg3_tso_header));
11210 mss = TG3_TSO_MSS;
11212 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11213 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11215 /* Set the total length field in the IP header */
11216 iph->tot_len = htons((u16)(mss + hdr_len));
11218 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11219 TXD_FLAG_CPU_POST_DMA);
11221 if (tg3_flag(tp, HW_TSO_1) ||
11222 tg3_flag(tp, HW_TSO_2) ||
11223 tg3_flag(tp, HW_TSO_3)) {
11224 struct tcphdr *th;
11225 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11226 th = (struct tcphdr *)&tx_data[val];
11227 th->check = 0;
11228 } else
11229 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11231 if (tg3_flag(tp, HW_TSO_3)) {
11232 mss |= (hdr_len & 0xc) << 12;
11233 if (hdr_len & 0x10)
11234 base_flags |= 0x00000010;
11235 base_flags |= (hdr_len & 0x3e0) << 5;
11236 } else if (tg3_flag(tp, HW_TSO_2))
11237 mss |= hdr_len << 9;
11238 else if (tg3_flag(tp, HW_TSO_1) ||
11239 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11240 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11241 } else {
11242 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11245 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11246 } else {
11247 num_pkts = 1;
11248 data_off = ETH_HLEN;
11251 for (i = data_off; i < tx_len; i++)
11252 tx_data[i] = (u8) (i & 0xff);
11254 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11255 if (pci_dma_mapping_error(tp->pdev, map)) {
11256 dev_kfree_skb(skb);
11257 return -EIO;
11260 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11261 rnapi->coal_now);
11263 udelay(10);
11265 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11267 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
11268 base_flags, (mss << 1) | 1);
11270 tnapi->tx_prod++;
11272 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11273 tr32_mailbox(tnapi->prodmbox);
11275 udelay(10);
11277 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11278 for (i = 0; i < 35; i++) {
11279 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11280 coal_now);
11282 udelay(10);
11284 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11285 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11286 if ((tx_idx == tnapi->tx_prod) &&
11287 (rx_idx == (rx_start_idx + num_pkts)))
11288 break;
11291 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11292 dev_kfree_skb(skb);
11294 if (tx_idx != tnapi->tx_prod)
11295 goto out;
11297 if (rx_idx != rx_start_idx + num_pkts)
11298 goto out;
11300 val = data_off;
11301 while (rx_idx != rx_start_idx) {
11302 desc = &rnapi->rx_rcb[rx_start_idx++];
11303 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11304 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11306 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11307 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11308 goto out;
11310 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11311 - ETH_FCS_LEN;
11313 if (loopback_mode != TG3_TSO_LOOPBACK) {
11314 if (rx_len != tx_len)
11315 goto out;
11317 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11318 if (opaque_key != RXD_OPAQUE_RING_STD)
11319 goto out;
11320 } else {
11321 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11322 goto out;
11324 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11325 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11326 >> RXD_TCPCSUM_SHIFT == 0xffff) {
11327 goto out;
11330 if (opaque_key == RXD_OPAQUE_RING_STD) {
11331 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11332 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11333 mapping);
11334 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11335 rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11336 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11337 mapping);
11338 } else
11339 goto out;
11341 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11342 PCI_DMA_FROMDEVICE);
11344 for (i = data_off; i < rx_len; i++, val++) {
11345 if (*(rx_skb->data + i) != (u8) (val & 0xff))
11346 goto out;
11350 err = 0;
11352 /* tg3_free_rings will unmap and free the rx_skb */
11353 out:
11354 return err;
11357 #define TG3_STD_LOOPBACK_FAILED 1
11358 #define TG3_JMB_LOOPBACK_FAILED 2
11359 #define TG3_TSO_LOOPBACK_FAILED 4
11361 #define TG3_MAC_LOOPBACK_SHIFT 0
11362 #define TG3_PHY_LOOPBACK_SHIFT 4
11363 #define TG3_LOOPBACK_FAILED 0x00000077
11365 static int tg3_test_loopback(struct tg3 *tp)
11367 int err = 0;
11368 u32 eee_cap, cpmuctrl = 0;
11370 if (!netif_running(tp->dev))
11371 return TG3_LOOPBACK_FAILED;
11373 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11374 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11376 err = tg3_reset_hw(tp, 1);
11377 if (err) {
11378 err = TG3_LOOPBACK_FAILED;
11379 goto done;
11382 if (tg3_flag(tp, ENABLE_RSS)) {
11383 int i;
11385 /* Reroute all rx packets to the 1st queue */
11386 for (i = MAC_RSS_INDIR_TBL_0;
11387 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11388 tw32(i, 0x0);
11391 /* Turn off gphy autopowerdown. */
11392 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11393 tg3_phy_toggle_apd(tp, false);
11395 if (tg3_flag(tp, CPMU_PRESENT)) {
11396 int i;
11397 u32 status;
11399 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11401 /* Wait for up to 40 microseconds to acquire lock. */
11402 for (i = 0; i < 4; i++) {
11403 status = tr32(TG3_CPMU_MUTEX_GNT);
11404 if (status == CPMU_MUTEX_GNT_DRIVER)
11405 break;
11406 udelay(10);
11409 if (status != CPMU_MUTEX_GNT_DRIVER) {
11410 err = TG3_LOOPBACK_FAILED;
11411 goto done;
11414 /* Turn off link-based power management. */
11415 cpmuctrl = tr32(TG3_CPMU_CTRL);
11416 tw32(TG3_CPMU_CTRL,
11417 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11418 CPMU_CTRL_LINK_AWARE_MODE));
11421 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11422 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11424 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11425 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11426 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11428 if (tg3_flag(tp, CPMU_PRESENT)) {
11429 tw32(TG3_CPMU_CTRL, cpmuctrl);
11431 /* Release the mutex */
11432 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11435 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11436 !tg3_flag(tp, USE_PHYLIB)) {
11437 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11438 err |= TG3_STD_LOOPBACK_FAILED <<
11439 TG3_PHY_LOOPBACK_SHIFT;
11440 if (tg3_flag(tp, TSO_CAPABLE) &&
11441 tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11442 err |= TG3_TSO_LOOPBACK_FAILED <<
11443 TG3_PHY_LOOPBACK_SHIFT;
11444 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11445 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11446 err |= TG3_JMB_LOOPBACK_FAILED <<
11447 TG3_PHY_LOOPBACK_SHIFT;
11450 /* Re-enable gphy autopowerdown. */
11451 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11452 tg3_phy_toggle_apd(tp, true);
11454 done:
11455 tp->phy_flags |= eee_cap;
11457 return err;
11460 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11461 u64 *data)
11463 struct tg3 *tp = netdev_priv(dev);
11465 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11466 tg3_power_up(tp);
11468 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11470 if (tg3_test_nvram(tp) != 0) {
11471 etest->flags |= ETH_TEST_FL_FAILED;
11472 data[0] = 1;
11474 if (tg3_test_link(tp) != 0) {
11475 etest->flags |= ETH_TEST_FL_FAILED;
11476 data[1] = 1;
11478 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11479 int err, err2 = 0, irq_sync = 0;
11481 if (netif_running(dev)) {
11482 tg3_phy_stop(tp);
11483 tg3_netif_stop(tp);
11484 irq_sync = 1;
11487 tg3_full_lock(tp, irq_sync);
11489 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11490 err = tg3_nvram_lock(tp);
11491 tg3_halt_cpu(tp, RX_CPU_BASE);
11492 if (!tg3_flag(tp, 5705_PLUS))
11493 tg3_halt_cpu(tp, TX_CPU_BASE);
11494 if (!err)
11495 tg3_nvram_unlock(tp);
11497 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11498 tg3_phy_reset(tp);
11500 if (tg3_test_registers(tp) != 0) {
11501 etest->flags |= ETH_TEST_FL_FAILED;
11502 data[2] = 1;
11504 if (tg3_test_memory(tp) != 0) {
11505 etest->flags |= ETH_TEST_FL_FAILED;
11506 data[3] = 1;
11508 if ((data[4] = tg3_test_loopback(tp)) != 0)
11509 etest->flags |= ETH_TEST_FL_FAILED;
11511 tg3_full_unlock(tp);
11513 if (tg3_test_interrupt(tp) != 0) {
11514 etest->flags |= ETH_TEST_FL_FAILED;
11515 data[5] = 1;
11518 tg3_full_lock(tp, 0);
11520 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11521 if (netif_running(dev)) {
11522 tg3_flag_set(tp, INIT_COMPLETE);
11523 err2 = tg3_restart_hw(tp, 1);
11524 if (!err2)
11525 tg3_netif_start(tp);
11528 tg3_full_unlock(tp);
11530 if (irq_sync && !err2)
11531 tg3_phy_start(tp);
11533 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11534 tg3_power_down(tp);
11538 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11540 struct mii_ioctl_data *data = if_mii(ifr);
11541 struct tg3 *tp = netdev_priv(dev);
11542 int err;
11544 if (tg3_flag(tp, USE_PHYLIB)) {
11545 struct phy_device *phydev;
11546 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11547 return -EAGAIN;
11548 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11549 return phy_mii_ioctl(phydev, ifr, cmd);
11552 switch (cmd) {
11553 case SIOCGMIIPHY:
11554 data->phy_id = tp->phy_addr;
11556 /* fallthru */
11557 case SIOCGMIIREG: {
11558 u32 mii_regval;
11560 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11561 break; /* We have no PHY */
11563 if (!netif_running(dev))
11564 return -EAGAIN;
11566 spin_lock_bh(&tp->lock);
11567 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11568 spin_unlock_bh(&tp->lock);
11570 data->val_out = mii_regval;
11572 return err;
11575 case SIOCSMIIREG:
11576 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11577 break; /* We have no PHY */
11579 if (!netif_running(dev))
11580 return -EAGAIN;
11582 spin_lock_bh(&tp->lock);
11583 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11584 spin_unlock_bh(&tp->lock);
11586 return err;
11588 default:
11589 /* do nothing */
11590 break;
11592 return -EOPNOTSUPP;
11595 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11597 struct tg3 *tp = netdev_priv(dev);
11599 memcpy(ec, &tp->coal, sizeof(*ec));
11600 return 0;
11603 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11605 struct tg3 *tp = netdev_priv(dev);
11606 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11607 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11609 if (!tg3_flag(tp, 5705_PLUS)) {
11610 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11611 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11612 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11613 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11616 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11617 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11618 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11619 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11620 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11621 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11622 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11623 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11624 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11625 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11626 return -EINVAL;
11628 /* No rx interrupts will be generated if both are zero */
11629 if ((ec->rx_coalesce_usecs == 0) &&
11630 (ec->rx_max_coalesced_frames == 0))
11631 return -EINVAL;
11633 /* No tx interrupts will be generated if both are zero */
11634 if ((ec->tx_coalesce_usecs == 0) &&
11635 (ec->tx_max_coalesced_frames == 0))
11636 return -EINVAL;
11638 /* Only copy relevant parameters, ignore all others. */
11639 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11640 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11641 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11642 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11643 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11644 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11645 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11646 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11647 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11649 if (netif_running(dev)) {
11650 tg3_full_lock(tp, 0);
11651 __tg3_set_coalesce(tp, &tp->coal);
11652 tg3_full_unlock(tp);
11654 return 0;
11657 static const struct ethtool_ops tg3_ethtool_ops = {
11658 .get_settings = tg3_get_settings,
11659 .set_settings = tg3_set_settings,
11660 .get_drvinfo = tg3_get_drvinfo,
11661 .get_regs_len = tg3_get_regs_len,
11662 .get_regs = tg3_get_regs,
11663 .get_wol = tg3_get_wol,
11664 .set_wol = tg3_set_wol,
11665 .get_msglevel = tg3_get_msglevel,
11666 .set_msglevel = tg3_set_msglevel,
11667 .nway_reset = tg3_nway_reset,
11668 .get_link = ethtool_op_get_link,
11669 .get_eeprom_len = tg3_get_eeprom_len,
11670 .get_eeprom = tg3_get_eeprom,
11671 .set_eeprom = tg3_set_eeprom,
11672 .get_ringparam = tg3_get_ringparam,
11673 .set_ringparam = tg3_set_ringparam,
11674 .get_pauseparam = tg3_get_pauseparam,
11675 .set_pauseparam = tg3_set_pauseparam,
11676 .self_test = tg3_self_test,
11677 .get_strings = tg3_get_strings,
11678 .set_phys_id = tg3_set_phys_id,
11679 .get_ethtool_stats = tg3_get_ethtool_stats,
11680 .get_coalesce = tg3_get_coalesce,
11681 .set_coalesce = tg3_set_coalesce,
11682 .get_sset_count = tg3_get_sset_count,
11685 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11687 u32 cursize, val, magic;
11689 tp->nvram_size = EEPROM_CHIP_SIZE;
11691 if (tg3_nvram_read(tp, 0, &magic) != 0)
11692 return;
11694 if ((magic != TG3_EEPROM_MAGIC) &&
11695 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11696 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11697 return;
11700 * Size the chip by reading offsets at increasing powers of two.
11701 * When we encounter our validation signature, we know the addressing
11702 * has wrapped around, and thus have our chip size.
11704 cursize = 0x10;
11706 while (cursize < tp->nvram_size) {
11707 if (tg3_nvram_read(tp, cursize, &val) != 0)
11708 return;
11710 if (val == magic)
11711 break;
11713 cursize <<= 1;
11716 tp->nvram_size = cursize;
11719 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11721 u32 val;
11723 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11724 return;
11726 /* Selfboot format */
11727 if (val != TG3_EEPROM_MAGIC) {
11728 tg3_get_eeprom_size(tp);
11729 return;
11732 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11733 if (val != 0) {
11734 /* This is confusing. We want to operate on the
11735 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11736 * call will read from NVRAM and byteswap the data
11737 * according to the byteswapping settings for all
11738 * other register accesses. This ensures the data we
11739 * want will always reside in the lower 16-bits.
11740 * However, the data in NVRAM is in LE format, which
11741 * means the data from the NVRAM read will always be
11742 * opposite the endianness of the CPU. The 16-bit
11743 * byteswap then brings the data to CPU endianness.
11745 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11746 return;
11749 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11752 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11754 u32 nvcfg1;
11756 nvcfg1 = tr32(NVRAM_CFG1);
11757 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11758 tg3_flag_set(tp, FLASH);
11759 } else {
11760 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11761 tw32(NVRAM_CFG1, nvcfg1);
11764 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
11765 tg3_flag(tp, 5780_CLASS)) {
11766 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11767 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11768 tp->nvram_jedecnum = JEDEC_ATMEL;
11769 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11770 tg3_flag_set(tp, NVRAM_BUFFERED);
11771 break;
11772 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11773 tp->nvram_jedecnum = JEDEC_ATMEL;
11774 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11775 break;
11776 case FLASH_VENDOR_ATMEL_EEPROM:
11777 tp->nvram_jedecnum = JEDEC_ATMEL;
11778 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11779 tg3_flag_set(tp, NVRAM_BUFFERED);
11780 break;
11781 case FLASH_VENDOR_ST:
11782 tp->nvram_jedecnum = JEDEC_ST;
11783 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11784 tg3_flag_set(tp, NVRAM_BUFFERED);
11785 break;
11786 case FLASH_VENDOR_SAIFUN:
11787 tp->nvram_jedecnum = JEDEC_SAIFUN;
11788 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11789 break;
11790 case FLASH_VENDOR_SST_SMALL:
11791 case FLASH_VENDOR_SST_LARGE:
11792 tp->nvram_jedecnum = JEDEC_SST;
11793 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11794 break;
11796 } else {
11797 tp->nvram_jedecnum = JEDEC_ATMEL;
11798 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11799 tg3_flag_set(tp, NVRAM_BUFFERED);
11803 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11805 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11806 case FLASH_5752PAGE_SIZE_256:
11807 tp->nvram_pagesize = 256;
11808 break;
11809 case FLASH_5752PAGE_SIZE_512:
11810 tp->nvram_pagesize = 512;
11811 break;
11812 case FLASH_5752PAGE_SIZE_1K:
11813 tp->nvram_pagesize = 1024;
11814 break;
11815 case FLASH_5752PAGE_SIZE_2K:
11816 tp->nvram_pagesize = 2048;
11817 break;
11818 case FLASH_5752PAGE_SIZE_4K:
11819 tp->nvram_pagesize = 4096;
11820 break;
11821 case FLASH_5752PAGE_SIZE_264:
11822 tp->nvram_pagesize = 264;
11823 break;
11824 case FLASH_5752PAGE_SIZE_528:
11825 tp->nvram_pagesize = 528;
11826 break;
11830 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11832 u32 nvcfg1;
11834 nvcfg1 = tr32(NVRAM_CFG1);
11836 /* NVRAM protection for TPM */
11837 if (nvcfg1 & (1 << 27))
11838 tg3_flag_set(tp, PROTECTED_NVRAM);
11840 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11841 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11842 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11843 tp->nvram_jedecnum = JEDEC_ATMEL;
11844 tg3_flag_set(tp, NVRAM_BUFFERED);
11845 break;
11846 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11847 tp->nvram_jedecnum = JEDEC_ATMEL;
11848 tg3_flag_set(tp, NVRAM_BUFFERED);
11849 tg3_flag_set(tp, FLASH);
11850 break;
11851 case FLASH_5752VENDOR_ST_M45PE10:
11852 case FLASH_5752VENDOR_ST_M45PE20:
11853 case FLASH_5752VENDOR_ST_M45PE40:
11854 tp->nvram_jedecnum = JEDEC_ST;
11855 tg3_flag_set(tp, NVRAM_BUFFERED);
11856 tg3_flag_set(tp, FLASH);
11857 break;
11860 if (tg3_flag(tp, FLASH)) {
11861 tg3_nvram_get_pagesize(tp, nvcfg1);
11862 } else {
11863 /* For eeprom, set pagesize to maximum eeprom size */
11864 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11866 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11867 tw32(NVRAM_CFG1, nvcfg1);
11871 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11873 u32 nvcfg1, protect = 0;
11875 nvcfg1 = tr32(NVRAM_CFG1);
11877 /* NVRAM protection for TPM */
11878 if (nvcfg1 & (1 << 27)) {
11879 tg3_flag_set(tp, PROTECTED_NVRAM);
11880 protect = 1;
11883 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11884 switch (nvcfg1) {
11885 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11886 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11887 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11888 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11889 tp->nvram_jedecnum = JEDEC_ATMEL;
11890 tg3_flag_set(tp, NVRAM_BUFFERED);
11891 tg3_flag_set(tp, FLASH);
11892 tp->nvram_pagesize = 264;
11893 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11894 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11895 tp->nvram_size = (protect ? 0x3e200 :
11896 TG3_NVRAM_SIZE_512KB);
11897 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11898 tp->nvram_size = (protect ? 0x1f200 :
11899 TG3_NVRAM_SIZE_256KB);
11900 else
11901 tp->nvram_size = (protect ? 0x1f200 :
11902 TG3_NVRAM_SIZE_128KB);
11903 break;
11904 case FLASH_5752VENDOR_ST_M45PE10:
11905 case FLASH_5752VENDOR_ST_M45PE20:
11906 case FLASH_5752VENDOR_ST_M45PE40:
11907 tp->nvram_jedecnum = JEDEC_ST;
11908 tg3_flag_set(tp, NVRAM_BUFFERED);
11909 tg3_flag_set(tp, FLASH);
11910 tp->nvram_pagesize = 256;
11911 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11912 tp->nvram_size = (protect ?
11913 TG3_NVRAM_SIZE_64KB :
11914 TG3_NVRAM_SIZE_128KB);
11915 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11916 tp->nvram_size = (protect ?
11917 TG3_NVRAM_SIZE_64KB :
11918 TG3_NVRAM_SIZE_256KB);
11919 else
11920 tp->nvram_size = (protect ?
11921 TG3_NVRAM_SIZE_128KB :
11922 TG3_NVRAM_SIZE_512KB);
11923 break;
11927 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11929 u32 nvcfg1;
11931 nvcfg1 = tr32(NVRAM_CFG1);
11933 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11934 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11935 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11936 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11937 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11938 tp->nvram_jedecnum = JEDEC_ATMEL;
11939 tg3_flag_set(tp, NVRAM_BUFFERED);
11940 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11942 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11943 tw32(NVRAM_CFG1, nvcfg1);
11944 break;
11945 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11946 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11947 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11948 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11949 tp->nvram_jedecnum = JEDEC_ATMEL;
11950 tg3_flag_set(tp, NVRAM_BUFFERED);
11951 tg3_flag_set(tp, FLASH);
11952 tp->nvram_pagesize = 264;
11953 break;
11954 case FLASH_5752VENDOR_ST_M45PE10:
11955 case FLASH_5752VENDOR_ST_M45PE20:
11956 case FLASH_5752VENDOR_ST_M45PE40:
11957 tp->nvram_jedecnum = JEDEC_ST;
11958 tg3_flag_set(tp, NVRAM_BUFFERED);
11959 tg3_flag_set(tp, FLASH);
11960 tp->nvram_pagesize = 256;
11961 break;
11965 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11967 u32 nvcfg1, protect = 0;
11969 nvcfg1 = tr32(NVRAM_CFG1);
11971 /* NVRAM protection for TPM */
11972 if (nvcfg1 & (1 << 27)) {
11973 tg3_flag_set(tp, PROTECTED_NVRAM);
11974 protect = 1;
11977 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11978 switch (nvcfg1) {
11979 case FLASH_5761VENDOR_ATMEL_ADB021D:
11980 case FLASH_5761VENDOR_ATMEL_ADB041D:
11981 case FLASH_5761VENDOR_ATMEL_ADB081D:
11982 case FLASH_5761VENDOR_ATMEL_ADB161D:
11983 case FLASH_5761VENDOR_ATMEL_MDB021D:
11984 case FLASH_5761VENDOR_ATMEL_MDB041D:
11985 case FLASH_5761VENDOR_ATMEL_MDB081D:
11986 case FLASH_5761VENDOR_ATMEL_MDB161D:
11987 tp->nvram_jedecnum = JEDEC_ATMEL;
11988 tg3_flag_set(tp, NVRAM_BUFFERED);
11989 tg3_flag_set(tp, FLASH);
11990 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11991 tp->nvram_pagesize = 256;
11992 break;
11993 case FLASH_5761VENDOR_ST_A_M45PE20:
11994 case FLASH_5761VENDOR_ST_A_M45PE40:
11995 case FLASH_5761VENDOR_ST_A_M45PE80:
11996 case FLASH_5761VENDOR_ST_A_M45PE16:
11997 case FLASH_5761VENDOR_ST_M_M45PE20:
11998 case FLASH_5761VENDOR_ST_M_M45PE40:
11999 case FLASH_5761VENDOR_ST_M_M45PE80:
12000 case FLASH_5761VENDOR_ST_M_M45PE16:
12001 tp->nvram_jedecnum = JEDEC_ST;
12002 tg3_flag_set(tp, NVRAM_BUFFERED);
12003 tg3_flag_set(tp, FLASH);
12004 tp->nvram_pagesize = 256;
12005 break;
12008 if (protect) {
12009 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12010 } else {
12011 switch (nvcfg1) {
12012 case FLASH_5761VENDOR_ATMEL_ADB161D:
12013 case FLASH_5761VENDOR_ATMEL_MDB161D:
12014 case FLASH_5761VENDOR_ST_A_M45PE16:
12015 case FLASH_5761VENDOR_ST_M_M45PE16:
12016 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12017 break;
12018 case FLASH_5761VENDOR_ATMEL_ADB081D:
12019 case FLASH_5761VENDOR_ATMEL_MDB081D:
12020 case FLASH_5761VENDOR_ST_A_M45PE80:
12021 case FLASH_5761VENDOR_ST_M_M45PE80:
12022 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12023 break;
12024 case FLASH_5761VENDOR_ATMEL_ADB041D:
12025 case FLASH_5761VENDOR_ATMEL_MDB041D:
12026 case FLASH_5761VENDOR_ST_A_M45PE40:
12027 case FLASH_5761VENDOR_ST_M_M45PE40:
12028 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12029 break;
12030 case FLASH_5761VENDOR_ATMEL_ADB021D:
12031 case FLASH_5761VENDOR_ATMEL_MDB021D:
12032 case FLASH_5761VENDOR_ST_A_M45PE20:
12033 case FLASH_5761VENDOR_ST_M_M45PE20:
12034 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12035 break;
12040 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12042 tp->nvram_jedecnum = JEDEC_ATMEL;
12043 tg3_flag_set(tp, NVRAM_BUFFERED);
12044 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12047 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12049 u32 nvcfg1;
12051 nvcfg1 = tr32(NVRAM_CFG1);
12053 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12054 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12055 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12056 tp->nvram_jedecnum = JEDEC_ATMEL;
12057 tg3_flag_set(tp, NVRAM_BUFFERED);
12058 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12060 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12061 tw32(NVRAM_CFG1, nvcfg1);
12062 return;
12063 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12064 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12065 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12066 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12067 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12068 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12069 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12070 tp->nvram_jedecnum = JEDEC_ATMEL;
12071 tg3_flag_set(tp, NVRAM_BUFFERED);
12072 tg3_flag_set(tp, FLASH);
12074 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12075 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12076 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12077 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12078 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12079 break;
12080 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12081 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12082 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12083 break;
12084 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12085 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12086 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12087 break;
12089 break;
12090 case FLASH_5752VENDOR_ST_M45PE10:
12091 case FLASH_5752VENDOR_ST_M45PE20:
12092 case FLASH_5752VENDOR_ST_M45PE40:
12093 tp->nvram_jedecnum = JEDEC_ST;
12094 tg3_flag_set(tp, NVRAM_BUFFERED);
12095 tg3_flag_set(tp, FLASH);
12097 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12098 case FLASH_5752VENDOR_ST_M45PE10:
12099 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12100 break;
12101 case FLASH_5752VENDOR_ST_M45PE20:
12102 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12103 break;
12104 case FLASH_5752VENDOR_ST_M45PE40:
12105 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12106 break;
12108 break;
12109 default:
12110 tg3_flag_set(tp, NO_NVRAM);
12111 return;
12114 tg3_nvram_get_pagesize(tp, nvcfg1);
12115 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12116 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12120 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12122 u32 nvcfg1;
12124 nvcfg1 = tr32(NVRAM_CFG1);
12126 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12127 case FLASH_5717VENDOR_ATMEL_EEPROM:
12128 case FLASH_5717VENDOR_MICRO_EEPROM:
12129 tp->nvram_jedecnum = JEDEC_ATMEL;
12130 tg3_flag_set(tp, NVRAM_BUFFERED);
12131 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12133 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12134 tw32(NVRAM_CFG1, nvcfg1);
12135 return;
12136 case FLASH_5717VENDOR_ATMEL_MDB011D:
12137 case FLASH_5717VENDOR_ATMEL_ADB011B:
12138 case FLASH_5717VENDOR_ATMEL_ADB011D:
12139 case FLASH_5717VENDOR_ATMEL_MDB021D:
12140 case FLASH_5717VENDOR_ATMEL_ADB021B:
12141 case FLASH_5717VENDOR_ATMEL_ADB021D:
12142 case FLASH_5717VENDOR_ATMEL_45USPT:
12143 tp->nvram_jedecnum = JEDEC_ATMEL;
12144 tg3_flag_set(tp, NVRAM_BUFFERED);
12145 tg3_flag_set(tp, FLASH);
12147 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12148 case FLASH_5717VENDOR_ATMEL_MDB021D:
12149 /* Detect size with tg3_nvram_get_size() */
12150 break;
12151 case FLASH_5717VENDOR_ATMEL_ADB021B:
12152 case FLASH_5717VENDOR_ATMEL_ADB021D:
12153 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12154 break;
12155 default:
12156 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12157 break;
12159 break;
12160 case FLASH_5717VENDOR_ST_M_M25PE10:
12161 case FLASH_5717VENDOR_ST_A_M25PE10:
12162 case FLASH_5717VENDOR_ST_M_M45PE10:
12163 case FLASH_5717VENDOR_ST_A_M45PE10:
12164 case FLASH_5717VENDOR_ST_M_M25PE20:
12165 case FLASH_5717VENDOR_ST_A_M25PE20:
12166 case FLASH_5717VENDOR_ST_M_M45PE20:
12167 case FLASH_5717VENDOR_ST_A_M45PE20:
12168 case FLASH_5717VENDOR_ST_25USPT:
12169 case FLASH_5717VENDOR_ST_45USPT:
12170 tp->nvram_jedecnum = JEDEC_ST;
12171 tg3_flag_set(tp, NVRAM_BUFFERED);
12172 tg3_flag_set(tp, FLASH);
12174 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12175 case FLASH_5717VENDOR_ST_M_M25PE20:
12176 case FLASH_5717VENDOR_ST_M_M45PE20:
12177 /* Detect size with tg3_nvram_get_size() */
12178 break;
12179 case FLASH_5717VENDOR_ST_A_M25PE20:
12180 case FLASH_5717VENDOR_ST_A_M45PE20:
12181 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12182 break;
12183 default:
12184 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12185 break;
12187 break;
12188 default:
12189 tg3_flag_set(tp, NO_NVRAM);
12190 return;
12193 tg3_nvram_get_pagesize(tp, nvcfg1);
12194 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12195 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12198 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12200 u32 nvcfg1, nvmpinstrp;
12202 nvcfg1 = tr32(NVRAM_CFG1);
12203 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12205 switch (nvmpinstrp) {
12206 case FLASH_5720_EEPROM_HD:
12207 case FLASH_5720_EEPROM_LD:
12208 tp->nvram_jedecnum = JEDEC_ATMEL;
12209 tg3_flag_set(tp, NVRAM_BUFFERED);
12211 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12212 tw32(NVRAM_CFG1, nvcfg1);
12213 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12214 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12215 else
12216 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12217 return;
12218 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12219 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12220 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12221 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12222 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12223 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12224 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12225 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12226 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12227 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12228 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12229 case FLASH_5720VENDOR_ATMEL_45USPT:
12230 tp->nvram_jedecnum = JEDEC_ATMEL;
12231 tg3_flag_set(tp, NVRAM_BUFFERED);
12232 tg3_flag_set(tp, FLASH);
12234 switch (nvmpinstrp) {
12235 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12236 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12237 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12238 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12239 break;
12240 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12241 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12242 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12243 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12244 break;
12245 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12246 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12247 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12248 break;
12249 default:
12250 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12251 break;
12253 break;
12254 case FLASH_5720VENDOR_M_ST_M25PE10:
12255 case FLASH_5720VENDOR_M_ST_M45PE10:
12256 case FLASH_5720VENDOR_A_ST_M25PE10:
12257 case FLASH_5720VENDOR_A_ST_M45PE10:
12258 case FLASH_5720VENDOR_M_ST_M25PE20:
12259 case FLASH_5720VENDOR_M_ST_M45PE20:
12260 case FLASH_5720VENDOR_A_ST_M25PE20:
12261 case FLASH_5720VENDOR_A_ST_M45PE20:
12262 case FLASH_5720VENDOR_M_ST_M25PE40:
12263 case FLASH_5720VENDOR_M_ST_M45PE40:
12264 case FLASH_5720VENDOR_A_ST_M25PE40:
12265 case FLASH_5720VENDOR_A_ST_M45PE40:
12266 case FLASH_5720VENDOR_M_ST_M25PE80:
12267 case FLASH_5720VENDOR_M_ST_M45PE80:
12268 case FLASH_5720VENDOR_A_ST_M25PE80:
12269 case FLASH_5720VENDOR_A_ST_M45PE80:
12270 case FLASH_5720VENDOR_ST_25USPT:
12271 case FLASH_5720VENDOR_ST_45USPT:
12272 tp->nvram_jedecnum = JEDEC_ST;
12273 tg3_flag_set(tp, NVRAM_BUFFERED);
12274 tg3_flag_set(tp, FLASH);
12276 switch (nvmpinstrp) {
12277 case FLASH_5720VENDOR_M_ST_M25PE20:
12278 case FLASH_5720VENDOR_M_ST_M45PE20:
12279 case FLASH_5720VENDOR_A_ST_M25PE20:
12280 case FLASH_5720VENDOR_A_ST_M45PE20:
12281 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12282 break;
12283 case FLASH_5720VENDOR_M_ST_M25PE40:
12284 case FLASH_5720VENDOR_M_ST_M45PE40:
12285 case FLASH_5720VENDOR_A_ST_M25PE40:
12286 case FLASH_5720VENDOR_A_ST_M45PE40:
12287 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12288 break;
12289 case FLASH_5720VENDOR_M_ST_M25PE80:
12290 case FLASH_5720VENDOR_M_ST_M45PE80:
12291 case FLASH_5720VENDOR_A_ST_M25PE80:
12292 case FLASH_5720VENDOR_A_ST_M45PE80:
12293 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12294 break;
12295 default:
12296 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12297 break;
12299 break;
12300 default:
12301 tg3_flag_set(tp, NO_NVRAM);
12302 return;
12305 tg3_nvram_get_pagesize(tp, nvcfg1);
12306 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12307 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12310 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12311 static void __devinit tg3_nvram_init(struct tg3 *tp)
12313 tw32_f(GRC_EEPROM_ADDR,
12314 (EEPROM_ADDR_FSM_RESET |
12315 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12316 EEPROM_ADDR_CLKPERD_SHIFT)));
12318 msleep(1);
12320 /* Enable seeprom accesses. */
12321 tw32_f(GRC_LOCAL_CTRL,
12322 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12323 udelay(100);
12325 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12326 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12327 tg3_flag_set(tp, NVRAM);
12329 if (tg3_nvram_lock(tp)) {
12330 netdev_warn(tp->dev,
12331 "Cannot get nvram lock, %s failed\n",
12332 __func__);
12333 return;
12335 tg3_enable_nvram_access(tp);
12337 tp->nvram_size = 0;
12339 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12340 tg3_get_5752_nvram_info(tp);
12341 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12342 tg3_get_5755_nvram_info(tp);
12343 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12344 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12345 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12346 tg3_get_5787_nvram_info(tp);
12347 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12348 tg3_get_5761_nvram_info(tp);
12349 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12350 tg3_get_5906_nvram_info(tp);
12351 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12352 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12353 tg3_get_57780_nvram_info(tp);
12354 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12355 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12356 tg3_get_5717_nvram_info(tp);
12357 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12358 tg3_get_5720_nvram_info(tp);
12359 else
12360 tg3_get_nvram_info(tp);
12362 if (tp->nvram_size == 0)
12363 tg3_get_nvram_size(tp);
12365 tg3_disable_nvram_access(tp);
12366 tg3_nvram_unlock(tp);
12368 } else {
12369 tg3_flag_clear(tp, NVRAM);
12370 tg3_flag_clear(tp, NVRAM_BUFFERED);
12372 tg3_get_eeprom_size(tp);
12376 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12377 u32 offset, u32 len, u8 *buf)
12379 int i, j, rc = 0;
12380 u32 val;
12382 for (i = 0; i < len; i += 4) {
12383 u32 addr;
12384 __be32 data;
12386 addr = offset + i;
12388 memcpy(&data, buf + i, 4);
12391 * The SEEPROM interface expects the data to always be opposite
12392 * the native endian format. We accomplish this by reversing
12393 * all the operations that would have been performed on the
12394 * data from a call to tg3_nvram_read_be32().
12396 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12398 val = tr32(GRC_EEPROM_ADDR);
12399 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12401 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12402 EEPROM_ADDR_READ);
12403 tw32(GRC_EEPROM_ADDR, val |
12404 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12405 (addr & EEPROM_ADDR_ADDR_MASK) |
12406 EEPROM_ADDR_START |
12407 EEPROM_ADDR_WRITE);
12409 for (j = 0; j < 1000; j++) {
12410 val = tr32(GRC_EEPROM_ADDR);
12412 if (val & EEPROM_ADDR_COMPLETE)
12413 break;
12414 msleep(1);
12416 if (!(val & EEPROM_ADDR_COMPLETE)) {
12417 rc = -EBUSY;
12418 break;
12422 return rc;
12425 /* offset and length are dword aligned */
12426 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12427 u8 *buf)
12429 int ret = 0;
12430 u32 pagesize = tp->nvram_pagesize;
12431 u32 pagemask = pagesize - 1;
12432 u32 nvram_cmd;
12433 u8 *tmp;
12435 tmp = kmalloc(pagesize, GFP_KERNEL);
12436 if (tmp == NULL)
12437 return -ENOMEM;
12439 while (len) {
12440 int j;
12441 u32 phy_addr, page_off, size;
12443 phy_addr = offset & ~pagemask;
12445 for (j = 0; j < pagesize; j += 4) {
12446 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12447 (__be32 *) (tmp + j));
12448 if (ret)
12449 break;
12451 if (ret)
12452 break;
12454 page_off = offset & pagemask;
12455 size = pagesize;
12456 if (len < size)
12457 size = len;
12459 len -= size;
12461 memcpy(tmp + page_off, buf, size);
12463 offset = offset + (pagesize - page_off);
12465 tg3_enable_nvram_access(tp);
12468 * Before we can erase the flash page, we need
12469 * to issue a special "write enable" command.
12471 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12473 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12474 break;
12476 /* Erase the target page */
12477 tw32(NVRAM_ADDR, phy_addr);
12479 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12480 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12482 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12483 break;
12485 /* Issue another write enable to start the write. */
12486 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12488 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12489 break;
12491 for (j = 0; j < pagesize; j += 4) {
12492 __be32 data;
12494 data = *((__be32 *) (tmp + j));
12496 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12498 tw32(NVRAM_ADDR, phy_addr + j);
12500 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12501 NVRAM_CMD_WR;
12503 if (j == 0)
12504 nvram_cmd |= NVRAM_CMD_FIRST;
12505 else if (j == (pagesize - 4))
12506 nvram_cmd |= NVRAM_CMD_LAST;
12508 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12509 break;
12511 if (ret)
12512 break;
12515 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12516 tg3_nvram_exec_cmd(tp, nvram_cmd);
12518 kfree(tmp);
12520 return ret;
12523 /* offset and length are dword aligned */
12524 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12525 u8 *buf)
12527 int i, ret = 0;
12529 for (i = 0; i < len; i += 4, offset += 4) {
12530 u32 page_off, phy_addr, nvram_cmd;
12531 __be32 data;
12533 memcpy(&data, buf + i, 4);
12534 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12536 page_off = offset % tp->nvram_pagesize;
12538 phy_addr = tg3_nvram_phys_addr(tp, offset);
12540 tw32(NVRAM_ADDR, phy_addr);
12542 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12544 if (page_off == 0 || i == 0)
12545 nvram_cmd |= NVRAM_CMD_FIRST;
12546 if (page_off == (tp->nvram_pagesize - 4))
12547 nvram_cmd |= NVRAM_CMD_LAST;
12549 if (i == (len - 4))
12550 nvram_cmd |= NVRAM_CMD_LAST;
12552 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12553 !tg3_flag(tp, 5755_PLUS) &&
12554 (tp->nvram_jedecnum == JEDEC_ST) &&
12555 (nvram_cmd & NVRAM_CMD_FIRST)) {
12557 if ((ret = tg3_nvram_exec_cmd(tp,
12558 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12559 NVRAM_CMD_DONE)))
12561 break;
12563 if (!tg3_flag(tp, FLASH)) {
12564 /* We always do complete word writes to eeprom. */
12565 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12568 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12569 break;
12571 return ret;
12574 /* offset and length are dword aligned */
12575 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12577 int ret;
12579 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12580 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12581 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12582 udelay(40);
12585 if (!tg3_flag(tp, NVRAM)) {
12586 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12587 } else {
12588 u32 grc_mode;
12590 ret = tg3_nvram_lock(tp);
12591 if (ret)
12592 return ret;
12594 tg3_enable_nvram_access(tp);
12595 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12596 tw32(NVRAM_WRITE1, 0x406);
12598 grc_mode = tr32(GRC_MODE);
12599 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12601 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12602 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12603 buf);
12604 } else {
12605 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12606 buf);
12609 grc_mode = tr32(GRC_MODE);
12610 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12612 tg3_disable_nvram_access(tp);
12613 tg3_nvram_unlock(tp);
12616 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12617 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12618 udelay(40);
12621 return ret;
12624 struct subsys_tbl_ent {
12625 u16 subsys_vendor, subsys_devid;
12626 u32 phy_id;
12629 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12630 /* Broadcom boards. */
12631 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12632 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12633 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12634 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12635 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12636 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12637 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12638 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12639 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12640 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12641 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12642 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12643 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12644 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12645 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12646 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12647 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12648 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12649 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12650 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12651 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12652 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12654 /* 3com boards. */
12655 { TG3PCI_SUBVENDOR_ID_3COM,
12656 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12657 { TG3PCI_SUBVENDOR_ID_3COM,
12658 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12659 { TG3PCI_SUBVENDOR_ID_3COM,
12660 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12661 { TG3PCI_SUBVENDOR_ID_3COM,
12662 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12663 { TG3PCI_SUBVENDOR_ID_3COM,
12664 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12666 /* DELL boards. */
12667 { TG3PCI_SUBVENDOR_ID_DELL,
12668 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12669 { TG3PCI_SUBVENDOR_ID_DELL,
12670 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12671 { TG3PCI_SUBVENDOR_ID_DELL,
12672 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12673 { TG3PCI_SUBVENDOR_ID_DELL,
12674 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12676 /* Compaq boards. */
12677 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12678 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12679 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12680 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12681 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12682 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12683 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12684 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12685 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12686 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12688 /* IBM boards. */
12689 { TG3PCI_SUBVENDOR_ID_IBM,
12690 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12693 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12695 int i;
12697 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12698 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12699 tp->pdev->subsystem_vendor) &&
12700 (subsys_id_to_phy_id[i].subsys_devid ==
12701 tp->pdev->subsystem_device))
12702 return &subsys_id_to_phy_id[i];
12704 return NULL;
12707 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12709 u32 val;
12710 u16 pmcsr;
12712 /* On some early chips the SRAM cannot be accessed in D3hot state,
12713 * so need make sure we're in D0.
12715 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12716 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12717 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12718 msleep(1);
12720 /* Make sure register accesses (indirect or otherwise)
12721 * will function correctly.
12723 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12724 tp->misc_host_ctrl);
12726 /* The memory arbiter has to be enabled in order for SRAM accesses
12727 * to succeed. Normally on powerup the tg3 chip firmware will make
12728 * sure it is enabled, but other entities such as system netboot
12729 * code might disable it.
12731 val = tr32(MEMARB_MODE);
12732 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12734 tp->phy_id = TG3_PHY_ID_INVALID;
12735 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12737 /* Assume an onboard device and WOL capable by default. */
12738 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12739 tg3_flag_set(tp, WOL_CAP);
12741 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12742 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12743 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12744 tg3_flag_set(tp, IS_NIC);
12746 val = tr32(VCPU_CFGSHDW);
12747 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12748 tg3_flag_set(tp, ASPM_WORKAROUND);
12749 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12750 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12751 tg3_flag_set(tp, WOL_ENABLE);
12752 device_set_wakeup_enable(&tp->pdev->dev, true);
12754 goto done;
12757 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12758 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12759 u32 nic_cfg, led_cfg;
12760 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12761 int eeprom_phy_serdes = 0;
12763 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12764 tp->nic_sram_data_cfg = nic_cfg;
12766 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12767 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12768 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
12769 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
12770 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
12771 (ver > 0) && (ver < 0x100))
12772 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12774 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12775 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12777 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12778 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12779 eeprom_phy_serdes = 1;
12781 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12782 if (nic_phy_id != 0) {
12783 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12784 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12786 eeprom_phy_id = (id1 >> 16) << 10;
12787 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12788 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12789 } else
12790 eeprom_phy_id = 0;
12792 tp->phy_id = eeprom_phy_id;
12793 if (eeprom_phy_serdes) {
12794 if (!tg3_flag(tp, 5705_PLUS))
12795 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12796 else
12797 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12800 if (tg3_flag(tp, 5750_PLUS))
12801 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12802 SHASTA_EXT_LED_MODE_MASK);
12803 else
12804 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12806 switch (led_cfg) {
12807 default:
12808 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12809 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12810 break;
12812 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12813 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12814 break;
12816 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12817 tp->led_ctrl = LED_CTRL_MODE_MAC;
12819 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12820 * read on some older 5700/5701 bootcode.
12822 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12823 ASIC_REV_5700 ||
12824 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12825 ASIC_REV_5701)
12826 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12828 break;
12830 case SHASTA_EXT_LED_SHARED:
12831 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12832 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12833 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12834 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12835 LED_CTRL_MODE_PHY_2);
12836 break;
12838 case SHASTA_EXT_LED_MAC:
12839 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12840 break;
12842 case SHASTA_EXT_LED_COMBO:
12843 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12844 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12845 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12846 LED_CTRL_MODE_PHY_2);
12847 break;
12851 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12852 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12853 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12854 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12856 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12857 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12859 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12860 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12861 if ((tp->pdev->subsystem_vendor ==
12862 PCI_VENDOR_ID_ARIMA) &&
12863 (tp->pdev->subsystem_device == 0x205a ||
12864 tp->pdev->subsystem_device == 0x2063))
12865 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12866 } else {
12867 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12868 tg3_flag_set(tp, IS_NIC);
12871 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12872 tg3_flag_set(tp, ENABLE_ASF);
12873 if (tg3_flag(tp, 5750_PLUS))
12874 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12877 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12878 tg3_flag(tp, 5750_PLUS))
12879 tg3_flag_set(tp, ENABLE_APE);
12881 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12882 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12883 tg3_flag_clear(tp, WOL_CAP);
12885 if (tg3_flag(tp, WOL_CAP) &&
12886 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12887 tg3_flag_set(tp, WOL_ENABLE);
12888 device_set_wakeup_enable(&tp->pdev->dev, true);
12891 if (cfg2 & (1 << 17))
12892 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12894 /* serdes signal pre-emphasis in register 0x590 set by */
12895 /* bootcode if bit 18 is set */
12896 if (cfg2 & (1 << 18))
12897 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12899 if ((tg3_flag(tp, 57765_PLUS) ||
12900 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12901 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12902 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12903 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12905 if (tg3_flag(tp, PCI_EXPRESS) &&
12906 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12907 !tg3_flag(tp, 57765_PLUS)) {
12908 u32 cfg3;
12910 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12911 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12912 tg3_flag_set(tp, ASPM_WORKAROUND);
12915 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12916 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
12917 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12918 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
12919 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12920 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
12922 done:
12923 if (tg3_flag(tp, WOL_CAP))
12924 device_set_wakeup_enable(&tp->pdev->dev,
12925 tg3_flag(tp, WOL_ENABLE));
12926 else
12927 device_set_wakeup_capable(&tp->pdev->dev, false);
12930 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12932 int i;
12933 u32 val;
12935 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12936 tw32(OTP_CTRL, cmd);
12938 /* Wait for up to 1 ms for command to execute. */
12939 for (i = 0; i < 100; i++) {
12940 val = tr32(OTP_STATUS);
12941 if (val & OTP_STATUS_CMD_DONE)
12942 break;
12943 udelay(10);
12946 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12949 /* Read the gphy configuration from the OTP region of the chip. The gphy
12950 * configuration is a 32-bit value that straddles the alignment boundary.
12951 * We do two 32-bit reads and then shift and merge the results.
12953 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12955 u32 bhalf_otp, thalf_otp;
12957 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12959 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12960 return 0;
12962 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12964 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12965 return 0;
12967 thalf_otp = tr32(OTP_READ_DATA);
12969 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12971 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12972 return 0;
12974 bhalf_otp = tr32(OTP_READ_DATA);
12976 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12979 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
12981 u32 adv = ADVERTISED_Autoneg |
12982 ADVERTISED_Pause;
12984 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12985 adv |= ADVERTISED_1000baseT_Half |
12986 ADVERTISED_1000baseT_Full;
12988 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12989 adv |= ADVERTISED_100baseT_Half |
12990 ADVERTISED_100baseT_Full |
12991 ADVERTISED_10baseT_Half |
12992 ADVERTISED_10baseT_Full |
12993 ADVERTISED_TP;
12994 else
12995 adv |= ADVERTISED_FIBRE;
12997 tp->link_config.advertising = adv;
12998 tp->link_config.speed = SPEED_INVALID;
12999 tp->link_config.duplex = DUPLEX_INVALID;
13000 tp->link_config.autoneg = AUTONEG_ENABLE;
13001 tp->link_config.active_speed = SPEED_INVALID;
13002 tp->link_config.active_duplex = DUPLEX_INVALID;
13003 tp->link_config.orig_speed = SPEED_INVALID;
13004 tp->link_config.orig_duplex = DUPLEX_INVALID;
13005 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13008 static int __devinit tg3_phy_probe(struct tg3 *tp)
13010 u32 hw_phy_id_1, hw_phy_id_2;
13011 u32 hw_phy_id, hw_phy_id_masked;
13012 int err;
13014 /* flow control autonegotiation is default behavior */
13015 tg3_flag_set(tp, PAUSE_AUTONEG);
13016 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13018 if (tg3_flag(tp, USE_PHYLIB))
13019 return tg3_phy_init(tp);
13021 /* Reading the PHY ID register can conflict with ASF
13022 * firmware access to the PHY hardware.
13024 err = 0;
13025 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13026 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13027 } else {
13028 /* Now read the physical PHY_ID from the chip and verify
13029 * that it is sane. If it doesn't look good, we fall back
13030 * to either the hard-coded table based PHY_ID and failing
13031 * that the value found in the eeprom area.
13033 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13034 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13036 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13037 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13038 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13040 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13043 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13044 tp->phy_id = hw_phy_id;
13045 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13046 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13047 else
13048 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13049 } else {
13050 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13051 /* Do nothing, phy ID already set up in
13052 * tg3_get_eeprom_hw_cfg().
13054 } else {
13055 struct subsys_tbl_ent *p;
13057 /* No eeprom signature? Try the hardcoded
13058 * subsys device table.
13060 p = tg3_lookup_by_subsys(tp);
13061 if (!p)
13062 return -ENODEV;
13064 tp->phy_id = p->phy_id;
13065 if (!tp->phy_id ||
13066 tp->phy_id == TG3_PHY_ID_BCM8002)
13067 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13071 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13072 ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13073 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13074 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13075 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13076 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13078 tg3_phy_init_link_config(tp);
13080 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13081 !tg3_flag(tp, ENABLE_APE) &&
13082 !tg3_flag(tp, ENABLE_ASF)) {
13083 u32 bmsr, adv_reg, tg3_ctrl, mask;
13085 tg3_readphy(tp, MII_BMSR, &bmsr);
13086 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13087 (bmsr & BMSR_LSTATUS))
13088 goto skip_phy_reset;
13090 err = tg3_phy_reset(tp);
13091 if (err)
13092 return err;
13094 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
13095 ADVERTISE_100HALF | ADVERTISE_100FULL |
13096 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
13097 tg3_ctrl = 0;
13098 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
13099 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
13100 MII_TG3_CTRL_ADV_1000_FULL);
13101 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13102 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
13103 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
13104 MII_TG3_CTRL_ENABLE_AS_MASTER);
13107 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13108 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13109 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13110 if (!tg3_copper_is_advertising_all(tp, mask)) {
13111 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
13113 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13114 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
13116 tg3_writephy(tp, MII_BMCR,
13117 BMCR_ANENABLE | BMCR_ANRESTART);
13119 tg3_phy_set_wirespeed(tp);
13121 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
13122 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13123 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
13126 skip_phy_reset:
13127 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13128 err = tg3_init_5401phy_dsp(tp);
13129 if (err)
13130 return err;
13132 err = tg3_init_5401phy_dsp(tp);
13135 return err;
13138 static void __devinit tg3_read_vpd(struct tg3 *tp)
13140 u8 *vpd_data;
13141 unsigned int block_end, rosize, len;
13142 int j, i = 0;
13144 vpd_data = (u8 *)tg3_vpd_readblock(tp);
13145 if (!vpd_data)
13146 goto out_no_vpd;
13148 i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
13149 PCI_VPD_LRDT_RO_DATA);
13150 if (i < 0)
13151 goto out_not_found;
13153 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13154 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13155 i += PCI_VPD_LRDT_TAG_SIZE;
13157 if (block_end > TG3_NVM_VPD_LEN)
13158 goto out_not_found;
13160 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13161 PCI_VPD_RO_KEYWORD_MFR_ID);
13162 if (j > 0) {
13163 len = pci_vpd_info_field_size(&vpd_data[j]);
13165 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13166 if (j + len > block_end || len != 4 ||
13167 memcmp(&vpd_data[j], "1028", 4))
13168 goto partno;
13170 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13171 PCI_VPD_RO_KEYWORD_VENDOR0);
13172 if (j < 0)
13173 goto partno;
13175 len = pci_vpd_info_field_size(&vpd_data[j]);
13177 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13178 if (j + len > block_end)
13179 goto partno;
13181 memcpy(tp->fw_ver, &vpd_data[j], len);
13182 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
13185 partno:
13186 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13187 PCI_VPD_RO_KEYWORD_PARTNO);
13188 if (i < 0)
13189 goto out_not_found;
13191 len = pci_vpd_info_field_size(&vpd_data[i]);
13193 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13194 if (len > TG3_BPN_SIZE ||
13195 (len + i) > TG3_NVM_VPD_LEN)
13196 goto out_not_found;
13198 memcpy(tp->board_part_number, &vpd_data[i], len);
13200 out_not_found:
13201 kfree(vpd_data);
13202 if (tp->board_part_number[0])
13203 return;
13205 out_no_vpd:
13206 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13207 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13208 strcpy(tp->board_part_number, "BCM5717");
13209 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13210 strcpy(tp->board_part_number, "BCM5718");
13211 else
13212 goto nomatch;
13213 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13214 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13215 strcpy(tp->board_part_number, "BCM57780");
13216 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13217 strcpy(tp->board_part_number, "BCM57760");
13218 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13219 strcpy(tp->board_part_number, "BCM57790");
13220 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13221 strcpy(tp->board_part_number, "BCM57788");
13222 else
13223 goto nomatch;
13224 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13225 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13226 strcpy(tp->board_part_number, "BCM57761");
13227 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13228 strcpy(tp->board_part_number, "BCM57765");
13229 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13230 strcpy(tp->board_part_number, "BCM57781");
13231 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13232 strcpy(tp->board_part_number, "BCM57785");
13233 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13234 strcpy(tp->board_part_number, "BCM57791");
13235 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13236 strcpy(tp->board_part_number, "BCM57795");
13237 else
13238 goto nomatch;
13239 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13240 strcpy(tp->board_part_number, "BCM95906");
13241 } else {
13242 nomatch:
13243 strcpy(tp->board_part_number, "none");
13247 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13249 u32 val;
13251 if (tg3_nvram_read(tp, offset, &val) ||
13252 (val & 0xfc000000) != 0x0c000000 ||
13253 tg3_nvram_read(tp, offset + 4, &val) ||
13254 val != 0)
13255 return 0;
13257 return 1;
13260 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13262 u32 val, offset, start, ver_offset;
13263 int i, dst_off;
13264 bool newver = false;
13266 if (tg3_nvram_read(tp, 0xc, &offset) ||
13267 tg3_nvram_read(tp, 0x4, &start))
13268 return;
13270 offset = tg3_nvram_logical_addr(tp, offset);
13272 if (tg3_nvram_read(tp, offset, &val))
13273 return;
13275 if ((val & 0xfc000000) == 0x0c000000) {
13276 if (tg3_nvram_read(tp, offset + 4, &val))
13277 return;
13279 if (val == 0)
13280 newver = true;
13283 dst_off = strlen(tp->fw_ver);
13285 if (newver) {
13286 if (TG3_VER_SIZE - dst_off < 16 ||
13287 tg3_nvram_read(tp, offset + 8, &ver_offset))
13288 return;
13290 offset = offset + ver_offset - start;
13291 for (i = 0; i < 16; i += 4) {
13292 __be32 v;
13293 if (tg3_nvram_read_be32(tp, offset + i, &v))
13294 return;
13296 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13298 } else {
13299 u32 major, minor;
13301 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13302 return;
13304 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13305 TG3_NVM_BCVER_MAJSFT;
13306 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13307 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13308 "v%d.%02d", major, minor);
13312 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13314 u32 val, major, minor;
13316 /* Use native endian representation */
13317 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13318 return;
13320 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13321 TG3_NVM_HWSB_CFG1_MAJSFT;
13322 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13323 TG3_NVM_HWSB_CFG1_MINSFT;
13325 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13328 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13330 u32 offset, major, minor, build;
13332 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13334 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13335 return;
13337 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13338 case TG3_EEPROM_SB_REVISION_0:
13339 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13340 break;
13341 case TG3_EEPROM_SB_REVISION_2:
13342 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13343 break;
13344 case TG3_EEPROM_SB_REVISION_3:
13345 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13346 break;
13347 case TG3_EEPROM_SB_REVISION_4:
13348 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13349 break;
13350 case TG3_EEPROM_SB_REVISION_5:
13351 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13352 break;
13353 case TG3_EEPROM_SB_REVISION_6:
13354 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13355 break;
13356 default:
13357 return;
13360 if (tg3_nvram_read(tp, offset, &val))
13361 return;
13363 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13364 TG3_EEPROM_SB_EDH_BLD_SHFT;
13365 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13366 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13367 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13369 if (minor > 99 || build > 26)
13370 return;
13372 offset = strlen(tp->fw_ver);
13373 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13374 " v%d.%02d", major, minor);
13376 if (build > 0) {
13377 offset = strlen(tp->fw_ver);
13378 if (offset < TG3_VER_SIZE - 1)
13379 tp->fw_ver[offset] = 'a' + build - 1;
13383 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13385 u32 val, offset, start;
13386 int i, vlen;
13388 for (offset = TG3_NVM_DIR_START;
13389 offset < TG3_NVM_DIR_END;
13390 offset += TG3_NVM_DIRENT_SIZE) {
13391 if (tg3_nvram_read(tp, offset, &val))
13392 return;
13394 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13395 break;
13398 if (offset == TG3_NVM_DIR_END)
13399 return;
13401 if (!tg3_flag(tp, 5705_PLUS))
13402 start = 0x08000000;
13403 else if (tg3_nvram_read(tp, offset - 4, &start))
13404 return;
13406 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13407 !tg3_fw_img_is_valid(tp, offset) ||
13408 tg3_nvram_read(tp, offset + 8, &val))
13409 return;
13411 offset += val - start;
13413 vlen = strlen(tp->fw_ver);
13415 tp->fw_ver[vlen++] = ',';
13416 tp->fw_ver[vlen++] = ' ';
13418 for (i = 0; i < 4; i++) {
13419 __be32 v;
13420 if (tg3_nvram_read_be32(tp, offset, &v))
13421 return;
13423 offset += sizeof(v);
13425 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13426 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13427 break;
13430 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13431 vlen += sizeof(v);
13435 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13437 int vlen;
13438 u32 apedata;
13439 char *fwtype;
13441 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13442 return;
13444 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13445 if (apedata != APE_SEG_SIG_MAGIC)
13446 return;
13448 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13449 if (!(apedata & APE_FW_STATUS_READY))
13450 return;
13452 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13454 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13455 tg3_flag_set(tp, APE_HAS_NCSI);
13456 fwtype = "NCSI";
13457 } else {
13458 fwtype = "DASH";
13461 vlen = strlen(tp->fw_ver);
13463 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13464 fwtype,
13465 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13466 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13467 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13468 (apedata & APE_FW_VERSION_BLDMSK));
13471 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13473 u32 val;
13474 bool vpd_vers = false;
13476 if (tp->fw_ver[0] != 0)
13477 vpd_vers = true;
13479 if (tg3_flag(tp, NO_NVRAM)) {
13480 strcat(tp->fw_ver, "sb");
13481 return;
13484 if (tg3_nvram_read(tp, 0, &val))
13485 return;
13487 if (val == TG3_EEPROM_MAGIC)
13488 tg3_read_bc_ver(tp);
13489 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13490 tg3_read_sb_ver(tp, val);
13491 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13492 tg3_read_hwsb_ver(tp);
13493 else
13494 return;
13496 if (!tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || vpd_vers)
13497 goto done;
13499 tg3_read_mgmtfw_ver(tp);
13501 done:
13502 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13505 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13507 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13509 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13510 return TG3_RX_RET_MAX_SIZE_5717;
13511 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13512 return TG3_RX_RET_MAX_SIZE_5700;
13513 else
13514 return TG3_RX_RET_MAX_SIZE_5705;
13517 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13518 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13519 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13520 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13521 { },
13524 static int __devinit tg3_get_invariants(struct tg3 *tp)
13526 u32 misc_ctrl_reg;
13527 u32 pci_state_reg, grc_misc_cfg;
13528 u32 val;
13529 u16 pci_cmd;
13530 int err;
13532 /* Force memory write invalidate off. If we leave it on,
13533 * then on 5700_BX chips we have to enable a workaround.
13534 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13535 * to match the cacheline size. The Broadcom driver have this
13536 * workaround but turns MWI off all the times so never uses
13537 * it. This seems to suggest that the workaround is insufficient.
13539 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13540 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13541 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13543 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
13544 * has the register indirect write enable bit set before
13545 * we try to access any of the MMIO registers. It is also
13546 * critical that the PCI-X hw workaround situation is decided
13547 * before that as well.
13549 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13550 &misc_ctrl_reg);
13552 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13553 MISC_HOST_CTRL_CHIPREV_SHIFT);
13554 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13555 u32 prod_id_asic_rev;
13557 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13558 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13559 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13560 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13561 pci_read_config_dword(tp->pdev,
13562 TG3PCI_GEN2_PRODID_ASICREV,
13563 &prod_id_asic_rev);
13564 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13565 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13566 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13567 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13568 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13569 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13570 pci_read_config_dword(tp->pdev,
13571 TG3PCI_GEN15_PRODID_ASICREV,
13572 &prod_id_asic_rev);
13573 else
13574 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13575 &prod_id_asic_rev);
13577 tp->pci_chip_rev_id = prod_id_asic_rev;
13580 /* Wrong chip ID in 5752 A0. This code can be removed later
13581 * as A0 is not in production.
13583 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13584 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13586 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13587 * we need to disable memory and use config. cycles
13588 * only to access all registers. The 5702/03 chips
13589 * can mistakenly decode the special cycles from the
13590 * ICH chipsets as memory write cycles, causing corruption
13591 * of register and memory space. Only certain ICH bridges
13592 * will drive special cycles with non-zero data during the
13593 * address phase which can fall within the 5703's address
13594 * range. This is not an ICH bug as the PCI spec allows
13595 * non-zero address during special cycles. However, only
13596 * these ICH bridges are known to drive non-zero addresses
13597 * during special cycles.
13599 * Since special cycles do not cross PCI bridges, we only
13600 * enable this workaround if the 5703 is on the secondary
13601 * bus of these ICH bridges.
13603 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13604 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13605 static struct tg3_dev_id {
13606 u32 vendor;
13607 u32 device;
13608 u32 rev;
13609 } ich_chipsets[] = {
13610 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13611 PCI_ANY_ID },
13612 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13613 PCI_ANY_ID },
13614 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13615 0xa },
13616 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13617 PCI_ANY_ID },
13618 { },
13620 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13621 struct pci_dev *bridge = NULL;
13623 while (pci_id->vendor != 0) {
13624 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13625 bridge);
13626 if (!bridge) {
13627 pci_id++;
13628 continue;
13630 if (pci_id->rev != PCI_ANY_ID) {
13631 if (bridge->revision > pci_id->rev)
13632 continue;
13634 if (bridge->subordinate &&
13635 (bridge->subordinate->number ==
13636 tp->pdev->bus->number)) {
13637 tg3_flag_set(tp, ICH_WORKAROUND);
13638 pci_dev_put(bridge);
13639 break;
13644 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
13645 static struct tg3_dev_id {
13646 u32 vendor;
13647 u32 device;
13648 } bridge_chipsets[] = {
13649 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13650 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13651 { },
13653 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13654 struct pci_dev *bridge = NULL;
13656 while (pci_id->vendor != 0) {
13657 bridge = pci_get_device(pci_id->vendor,
13658 pci_id->device,
13659 bridge);
13660 if (!bridge) {
13661 pci_id++;
13662 continue;
13664 if (bridge->subordinate &&
13665 (bridge->subordinate->number <=
13666 tp->pdev->bus->number) &&
13667 (bridge->subordinate->subordinate >=
13668 tp->pdev->bus->number)) {
13669 tg3_flag_set(tp, 5701_DMA_BUG);
13670 pci_dev_put(bridge);
13671 break;
13676 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13677 * DMA addresses > 40-bit. This bridge may have other additional
13678 * 57xx devices behind it in some 4-port NIC designs for example.
13679 * Any tg3 device found behind the bridge will also need the 40-bit
13680 * DMA workaround.
13682 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13683 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13684 tg3_flag_set(tp, 5780_CLASS);
13685 tg3_flag_set(tp, 40BIT_DMA_BUG);
13686 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13687 } else {
13688 struct pci_dev *bridge = NULL;
13690 do {
13691 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13692 PCI_DEVICE_ID_SERVERWORKS_EPB,
13693 bridge);
13694 if (bridge && bridge->subordinate &&
13695 (bridge->subordinate->number <=
13696 tp->pdev->bus->number) &&
13697 (bridge->subordinate->subordinate >=
13698 tp->pdev->bus->number)) {
13699 tg3_flag_set(tp, 40BIT_DMA_BUG);
13700 pci_dev_put(bridge);
13701 break;
13703 } while (bridge);
13706 /* Initialize misc host control in PCI block. */
13707 tp->misc_host_ctrl |= (misc_ctrl_reg &
13708 MISC_HOST_CTRL_CHIPREV);
13709 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13710 tp->misc_host_ctrl);
13712 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13713 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13714 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13715 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13716 tp->pdev_peer = tg3_find_peer(tp);
13718 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13719 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13720 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13721 tg3_flag_set(tp, 5717_PLUS);
13723 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13724 tg3_flag(tp, 5717_PLUS))
13725 tg3_flag_set(tp, 57765_PLUS);
13727 /* Intentionally exclude ASIC_REV_5906 */
13728 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13729 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13730 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13731 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13732 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13733 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13734 tg3_flag(tp, 57765_PLUS))
13735 tg3_flag_set(tp, 5755_PLUS);
13737 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13738 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13739 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13740 tg3_flag(tp, 5755_PLUS) ||
13741 tg3_flag(tp, 5780_CLASS))
13742 tg3_flag_set(tp, 5750_PLUS);
13744 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
13745 tg3_flag(tp, 5750_PLUS))
13746 tg3_flag_set(tp, 5705_PLUS);
13748 /* 5700 B0 chips do not support checksumming correctly due
13749 * to hardware bugs.
13751 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
13752 u32 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
13754 if (tg3_flag(tp, 5755_PLUS))
13755 features |= NETIF_F_IPV6_CSUM;
13756 tp->dev->features |= features;
13757 tp->dev->hw_features |= features;
13758 tp->dev->vlan_features |= features;
13761 /* Determine TSO capabilities */
13762 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13763 ; /* Do nothing. HW bug. */
13764 else if (tg3_flag(tp, 57765_PLUS))
13765 tg3_flag_set(tp, HW_TSO_3);
13766 else if (tg3_flag(tp, 5755_PLUS) ||
13767 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13768 tg3_flag_set(tp, HW_TSO_2);
13769 else if (tg3_flag(tp, 5750_PLUS)) {
13770 tg3_flag_set(tp, HW_TSO_1);
13771 tg3_flag_set(tp, TSO_BUG);
13772 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13773 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13774 tg3_flag_clear(tp, TSO_BUG);
13775 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13776 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13777 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13778 tg3_flag_set(tp, TSO_BUG);
13779 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13780 tp->fw_needed = FIRMWARE_TG3TSO5;
13781 else
13782 tp->fw_needed = FIRMWARE_TG3TSO;
13785 tp->irq_max = 1;
13787 if (tg3_flag(tp, 5750_PLUS)) {
13788 tg3_flag_set(tp, SUPPORT_MSI);
13789 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13790 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13791 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13792 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13793 tp->pdev_peer == tp->pdev))
13794 tg3_flag_clear(tp, SUPPORT_MSI);
13796 if (tg3_flag(tp, 5755_PLUS) ||
13797 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13798 tg3_flag_set(tp, 1SHOT_MSI);
13801 if (tg3_flag(tp, 57765_PLUS)) {
13802 tg3_flag_set(tp, SUPPORT_MSIX);
13803 tp->irq_max = TG3_IRQ_MAX_VECS;
13807 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13808 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13809 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13810 tg3_flag_set(tp, SHORT_DMA_BUG);
13811 else if (!tg3_flag(tp, 5755_PLUS)) {
13812 tg3_flag_set(tp, 4G_DMA_BNDRY_BUG);
13813 tg3_flag_set(tp, 40BIT_DMA_LIMIT_BUG);
13816 if (tg3_flag(tp, 5717_PLUS))
13817 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13819 if (tg3_flag(tp, 57765_PLUS) &&
13820 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13821 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13823 if (!tg3_flag(tp, 5705_PLUS) ||
13824 tg3_flag(tp, 5780_CLASS) ||
13825 tg3_flag(tp, USE_JUMBO_BDFLAG))
13826 tg3_flag_set(tp, JUMBO_CAPABLE);
13828 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13829 &pci_state_reg);
13831 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13832 if (tp->pcie_cap != 0) {
13833 u16 lnkctl;
13835 tg3_flag_set(tp, PCI_EXPRESS);
13837 tp->pcie_readrq = 4096;
13838 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13839 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13840 tp->pcie_readrq = 2048;
13842 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13844 pci_read_config_word(tp->pdev,
13845 tp->pcie_cap + PCI_EXP_LNKCTL,
13846 &lnkctl);
13847 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13848 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13849 tg3_flag_clear(tp, HW_TSO_2);
13850 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13851 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13852 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13853 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13854 tg3_flag_set(tp, CLKREQ_BUG);
13855 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13856 tg3_flag_set(tp, L1PLLPD_EN);
13858 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13859 tg3_flag_set(tp, PCI_EXPRESS);
13860 } else if (!tg3_flag(tp, 5705_PLUS) ||
13861 tg3_flag(tp, 5780_CLASS)) {
13862 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13863 if (!tp->pcix_cap) {
13864 dev_err(&tp->pdev->dev,
13865 "Cannot find PCI-X capability, aborting\n");
13866 return -EIO;
13869 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13870 tg3_flag_set(tp, PCIX_MODE);
13873 /* If we have an AMD 762 or VIA K8T800 chipset, write
13874 * reordering to the mailbox registers done by the host
13875 * controller can cause major troubles. We read back from
13876 * every mailbox register write to force the writes to be
13877 * posted to the chip in order.
13879 if (pci_dev_present(tg3_write_reorder_chipsets) &&
13880 !tg3_flag(tp, PCI_EXPRESS))
13881 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13883 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13884 &tp->pci_cacheline_sz);
13885 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13886 &tp->pci_lat_timer);
13887 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13888 tp->pci_lat_timer < 64) {
13889 tp->pci_lat_timer = 64;
13890 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13891 tp->pci_lat_timer);
13894 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13895 /* 5700 BX chips need to have their TX producer index
13896 * mailboxes written twice to workaround a bug.
13898 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13900 /* If we are in PCI-X mode, enable register write workaround.
13902 * The workaround is to use indirect register accesses
13903 * for all chip writes not to mailbox registers.
13905 if (tg3_flag(tp, PCIX_MODE)) {
13906 u32 pm_reg;
13908 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13910 /* The chip can have it's power management PCI config
13911 * space registers clobbered due to this bug.
13912 * So explicitly force the chip into D0 here.
13914 pci_read_config_dword(tp->pdev,
13915 tp->pm_cap + PCI_PM_CTRL,
13916 &pm_reg);
13917 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13918 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13919 pci_write_config_dword(tp->pdev,
13920 tp->pm_cap + PCI_PM_CTRL,
13921 pm_reg);
13923 /* Also, force SERR#/PERR# in PCI command. */
13924 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13925 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13926 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13930 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13931 tg3_flag_set(tp, PCI_HIGH_SPEED);
13932 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13933 tg3_flag_set(tp, PCI_32BIT);
13935 /* Chip-specific fixup from Broadcom driver */
13936 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13937 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13938 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13939 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13942 /* Default fast path register access methods */
13943 tp->read32 = tg3_read32;
13944 tp->write32 = tg3_write32;
13945 tp->read32_mbox = tg3_read32;
13946 tp->write32_mbox = tg3_write32;
13947 tp->write32_tx_mbox = tg3_write32;
13948 tp->write32_rx_mbox = tg3_write32;
13950 /* Various workaround register access methods */
13951 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
13952 tp->write32 = tg3_write_indirect_reg32;
13953 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13954 (tg3_flag(tp, PCI_EXPRESS) &&
13955 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13957 * Back to back register writes can cause problems on these
13958 * chips, the workaround is to read back all reg writes
13959 * except those to mailbox regs.
13961 * See tg3_write_indirect_reg32().
13963 tp->write32 = tg3_write_flush_reg32;
13966 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
13967 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13968 if (tg3_flag(tp, MBOX_WRITE_REORDER))
13969 tp->write32_rx_mbox = tg3_write_flush_reg32;
13972 if (tg3_flag(tp, ICH_WORKAROUND)) {
13973 tp->read32 = tg3_read_indirect_reg32;
13974 tp->write32 = tg3_write_indirect_reg32;
13975 tp->read32_mbox = tg3_read_indirect_mbox;
13976 tp->write32_mbox = tg3_write_indirect_mbox;
13977 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13978 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13980 iounmap(tp->regs);
13981 tp->regs = NULL;
13983 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13984 pci_cmd &= ~PCI_COMMAND_MEMORY;
13985 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13987 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13988 tp->read32_mbox = tg3_read32_mbox_5906;
13989 tp->write32_mbox = tg3_write32_mbox_5906;
13990 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13991 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13994 if (tp->write32 == tg3_write_indirect_reg32 ||
13995 (tg3_flag(tp, PCIX_MODE) &&
13996 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13997 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13998 tg3_flag_set(tp, SRAM_USE_CONFIG);
14000 /* Get eeprom hw config before calling tg3_set_power_state().
14001 * In particular, the TG3_FLAG_IS_NIC flag must be
14002 * determined before calling tg3_set_power_state() so that
14003 * we know whether or not to switch out of Vaux power.
14004 * When the flag is set, it means that GPIO1 is used for eeprom
14005 * write protect and also implies that it is a LOM where GPIOs
14006 * are not used to switch power.
14008 tg3_get_eeprom_hw_cfg(tp);
14010 if (tg3_flag(tp, ENABLE_APE)) {
14011 /* Allow reads and writes to the
14012 * APE register and memory space.
14014 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14015 PCISTATE_ALLOW_APE_SHMEM_WR |
14016 PCISTATE_ALLOW_APE_PSPACE_WR;
14017 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14018 pci_state_reg);
14021 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14022 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14023 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14024 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14025 tg3_flag(tp, 57765_PLUS))
14026 tg3_flag_set(tp, CPMU_PRESENT);
14028 /* Set up tp->grc_local_ctrl before calling tg3_power_up().
14029 * GPIO1 driven high will bring 5700's external PHY out of reset.
14030 * It is also used as eeprom write protect on LOMs.
14032 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14033 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
14034 tg3_flag(tp, EEPROM_WRITE_PROT))
14035 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14036 GRC_LCLCTRL_GPIO_OUTPUT1);
14037 /* Unused GPIO3 must be driven as output on 5752 because there
14038 * are no pull-up resistors on unused GPIO pins.
14040 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14041 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14043 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14044 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14045 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14046 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14048 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14049 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14050 /* Turn off the debug UART. */
14051 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14052 if (tg3_flag(tp, IS_NIC))
14053 /* Keep VMain power. */
14054 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14055 GRC_LCLCTRL_GPIO_OUTPUT0;
14058 /* Force the chip into D0. */
14059 err = tg3_power_up(tp);
14060 if (err) {
14061 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
14062 return err;
14065 /* Derive initial jumbo mode from MTU assigned in
14066 * ether_setup() via the alloc_etherdev() call
14068 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14069 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14071 /* Determine WakeOnLan speed to use. */
14072 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14073 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14074 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14075 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14076 tg3_flag_clear(tp, WOL_SPEED_100MB);
14077 } else {
14078 tg3_flag_set(tp, WOL_SPEED_100MB);
14081 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14082 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14084 /* A few boards don't want Ethernet@WireSpeed phy feature */
14085 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
14086 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
14087 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14088 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14089 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14090 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14091 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14093 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14094 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14095 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14096 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14097 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14099 if (tg3_flag(tp, 5705_PLUS) &&
14100 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14101 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14102 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14103 !tg3_flag(tp, 57765_PLUS)) {
14104 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14105 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14106 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14107 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14108 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14109 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14110 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14111 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14112 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14113 } else
14114 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14117 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14118 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14119 tp->phy_otp = tg3_read_otp_phycfg(tp);
14120 if (tp->phy_otp == 0)
14121 tp->phy_otp = TG3_OTP_DEFAULT;
14124 if (tg3_flag(tp, CPMU_PRESENT))
14125 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14126 else
14127 tp->mi_mode = MAC_MI_MODE_BASE;
14129 tp->coalesce_mode = 0;
14130 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14131 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14132 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14134 /* Set these bits to enable statistics workaround. */
14135 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14136 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14137 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14138 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14139 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14142 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14143 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14144 tg3_flag_set(tp, USE_PHYLIB);
14146 err = tg3_mdio_init(tp);
14147 if (err)
14148 return err;
14150 /* Initialize data/descriptor byte/word swapping. */
14151 val = tr32(GRC_MODE);
14152 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14153 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14154 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14155 GRC_MODE_B2HRX_ENABLE |
14156 GRC_MODE_HTX2B_ENABLE |
14157 GRC_MODE_HOST_STACKUP);
14158 else
14159 val &= GRC_MODE_HOST_STACKUP;
14161 tw32(GRC_MODE, val | tp->grc_mode);
14163 tg3_switch_clocks(tp);
14165 /* Clear this out for sanity. */
14166 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14168 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14169 &pci_state_reg);
14170 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14171 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14172 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14174 if (chiprevid == CHIPREV_ID_5701_A0 ||
14175 chiprevid == CHIPREV_ID_5701_B0 ||
14176 chiprevid == CHIPREV_ID_5701_B2 ||
14177 chiprevid == CHIPREV_ID_5701_B5) {
14178 void __iomem *sram_base;
14180 /* Write some dummy words into the SRAM status block
14181 * area, see if it reads back correctly. If the return
14182 * value is bad, force enable the PCIX workaround.
14184 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14186 writel(0x00000000, sram_base);
14187 writel(0x00000000, sram_base + 4);
14188 writel(0xffffffff, sram_base + 4);
14189 if (readl(sram_base) != 0x00000000)
14190 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14194 udelay(50);
14195 tg3_nvram_init(tp);
14197 grc_misc_cfg = tr32(GRC_MISC_CFG);
14198 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14200 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14201 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14202 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14203 tg3_flag_set(tp, IS_5788);
14205 if (!tg3_flag(tp, IS_5788) &&
14206 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
14207 tg3_flag_set(tp, TAGGED_STATUS);
14208 if (tg3_flag(tp, TAGGED_STATUS)) {
14209 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14210 HOSTCC_MODE_CLRTICK_TXBD);
14212 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14213 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14214 tp->misc_host_ctrl);
14217 /* Preserve the APE MAC_MODE bits */
14218 if (tg3_flag(tp, ENABLE_APE))
14219 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14220 else
14221 tp->mac_mode = TG3_DEF_MAC_MODE;
14223 /* these are limited to 10/100 only */
14224 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14225 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14226 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14227 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14228 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14229 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14230 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14231 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14232 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14233 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14234 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14235 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14236 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14237 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14238 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14239 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14241 err = tg3_phy_probe(tp);
14242 if (err) {
14243 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14244 /* ... but do not return immediately ... */
14245 tg3_mdio_fini(tp);
14248 tg3_read_vpd(tp);
14249 tg3_read_fw_ver(tp);
14251 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14252 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14253 } else {
14254 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14255 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14256 else
14257 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14260 /* 5700 {AX,BX} chips have a broken status block link
14261 * change bit implementation, so we must use the
14262 * status register in those cases.
14264 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14265 tg3_flag_set(tp, USE_LINKCHG_REG);
14266 else
14267 tg3_flag_clear(tp, USE_LINKCHG_REG);
14269 /* The led_ctrl is set during tg3_phy_probe, here we might
14270 * have to force the link status polling mechanism based
14271 * upon subsystem IDs.
14273 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14274 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14275 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14276 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14277 tg3_flag_set(tp, USE_LINKCHG_REG);
14280 /* For all SERDES we poll the MAC status register. */
14281 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14282 tg3_flag_set(tp, POLL_SERDES);
14283 else
14284 tg3_flag_clear(tp, POLL_SERDES);
14286 tp->rx_offset = NET_IP_ALIGN;
14287 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14288 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14289 tg3_flag(tp, PCIX_MODE)) {
14290 tp->rx_offset = 0;
14291 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14292 tp->rx_copy_thresh = ~(u16)0;
14293 #endif
14296 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14297 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14298 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14300 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14302 /* Increment the rx prod index on the rx std ring by at most
14303 * 8 for these chips to workaround hw errata.
14305 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14306 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14307 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14308 tp->rx_std_max_post = 8;
14310 if (tg3_flag(tp, ASPM_WORKAROUND))
14311 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14312 PCIE_PWR_MGMT_L1_THRESH_MSK;
14314 return err;
14317 #ifdef CONFIG_SPARC
14318 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14320 struct net_device *dev = tp->dev;
14321 struct pci_dev *pdev = tp->pdev;
14322 struct device_node *dp = pci_device_to_OF_node(pdev);
14323 const unsigned char *addr;
14324 int len;
14326 addr = of_get_property(dp, "local-mac-address", &len);
14327 if (addr && len == 6) {
14328 memcpy(dev->dev_addr, addr, 6);
14329 memcpy(dev->perm_addr, dev->dev_addr, 6);
14330 return 0;
14332 return -ENODEV;
14335 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14337 struct net_device *dev = tp->dev;
14339 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14340 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14341 return 0;
14343 #endif
14345 static int __devinit tg3_get_device_address(struct tg3 *tp)
14347 struct net_device *dev = tp->dev;
14348 u32 hi, lo, mac_offset;
14349 int addr_ok = 0;
14351 #ifdef CONFIG_SPARC
14352 if (!tg3_get_macaddr_sparc(tp))
14353 return 0;
14354 #endif
14356 mac_offset = 0x7c;
14357 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
14358 tg3_flag(tp, 5780_CLASS)) {
14359 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14360 mac_offset = 0xcc;
14361 if (tg3_nvram_lock(tp))
14362 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14363 else
14364 tg3_nvram_unlock(tp);
14365 } else if (tg3_flag(tp, 5717_PLUS)) {
14366 if (PCI_FUNC(tp->pdev->devfn) & 1)
14367 mac_offset = 0xcc;
14368 if (PCI_FUNC(tp->pdev->devfn) > 1)
14369 mac_offset += 0x18c;
14370 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14371 mac_offset = 0x10;
14373 /* First try to get it from MAC address mailbox. */
14374 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14375 if ((hi >> 16) == 0x484b) {
14376 dev->dev_addr[0] = (hi >> 8) & 0xff;
14377 dev->dev_addr[1] = (hi >> 0) & 0xff;
14379 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14380 dev->dev_addr[2] = (lo >> 24) & 0xff;
14381 dev->dev_addr[3] = (lo >> 16) & 0xff;
14382 dev->dev_addr[4] = (lo >> 8) & 0xff;
14383 dev->dev_addr[5] = (lo >> 0) & 0xff;
14385 /* Some old bootcode may report a 0 MAC address in SRAM */
14386 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14388 if (!addr_ok) {
14389 /* Next, try NVRAM. */
14390 if (!tg3_flag(tp, NO_NVRAM) &&
14391 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14392 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14393 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14394 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14396 /* Finally just fetch it out of the MAC control regs. */
14397 else {
14398 hi = tr32(MAC_ADDR_0_HIGH);
14399 lo = tr32(MAC_ADDR_0_LOW);
14401 dev->dev_addr[5] = lo & 0xff;
14402 dev->dev_addr[4] = (lo >> 8) & 0xff;
14403 dev->dev_addr[3] = (lo >> 16) & 0xff;
14404 dev->dev_addr[2] = (lo >> 24) & 0xff;
14405 dev->dev_addr[1] = hi & 0xff;
14406 dev->dev_addr[0] = (hi >> 8) & 0xff;
14410 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14411 #ifdef CONFIG_SPARC
14412 if (!tg3_get_default_macaddr_sparc(tp))
14413 return 0;
14414 #endif
14415 return -EINVAL;
14417 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14418 return 0;
14421 #define BOUNDARY_SINGLE_CACHELINE 1
14422 #define BOUNDARY_MULTI_CACHELINE 2
14424 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14426 int cacheline_size;
14427 u8 byte;
14428 int goal;
14430 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14431 if (byte == 0)
14432 cacheline_size = 1024;
14433 else
14434 cacheline_size = (int) byte * 4;
14436 /* On 5703 and later chips, the boundary bits have no
14437 * effect.
14439 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14440 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14441 !tg3_flag(tp, PCI_EXPRESS))
14442 goto out;
14444 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14445 goal = BOUNDARY_MULTI_CACHELINE;
14446 #else
14447 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14448 goal = BOUNDARY_SINGLE_CACHELINE;
14449 #else
14450 goal = 0;
14451 #endif
14452 #endif
14454 if (tg3_flag(tp, 57765_PLUS)) {
14455 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14456 goto out;
14459 if (!goal)
14460 goto out;
14462 /* PCI controllers on most RISC systems tend to disconnect
14463 * when a device tries to burst across a cache-line boundary.
14464 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14466 * Unfortunately, for PCI-E there are only limited
14467 * write-side controls for this, and thus for reads
14468 * we will still get the disconnects. We'll also waste
14469 * these PCI cycles for both read and write for chips
14470 * other than 5700 and 5701 which do not implement the
14471 * boundary bits.
14473 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14474 switch (cacheline_size) {
14475 case 16:
14476 case 32:
14477 case 64:
14478 case 128:
14479 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14480 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14481 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14482 } else {
14483 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14484 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14486 break;
14488 case 256:
14489 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14490 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14491 break;
14493 default:
14494 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14495 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14496 break;
14498 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14499 switch (cacheline_size) {
14500 case 16:
14501 case 32:
14502 case 64:
14503 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14504 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14505 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14506 break;
14508 /* fallthrough */
14509 case 128:
14510 default:
14511 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14512 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14513 break;
14515 } else {
14516 switch (cacheline_size) {
14517 case 16:
14518 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14519 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14520 DMA_RWCTRL_WRITE_BNDRY_16);
14521 break;
14523 /* fallthrough */
14524 case 32:
14525 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14526 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14527 DMA_RWCTRL_WRITE_BNDRY_32);
14528 break;
14530 /* fallthrough */
14531 case 64:
14532 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14533 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14534 DMA_RWCTRL_WRITE_BNDRY_64);
14535 break;
14537 /* fallthrough */
14538 case 128:
14539 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14540 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14541 DMA_RWCTRL_WRITE_BNDRY_128);
14542 break;
14544 /* fallthrough */
14545 case 256:
14546 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14547 DMA_RWCTRL_WRITE_BNDRY_256);
14548 break;
14549 case 512:
14550 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14551 DMA_RWCTRL_WRITE_BNDRY_512);
14552 break;
14553 case 1024:
14554 default:
14555 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14556 DMA_RWCTRL_WRITE_BNDRY_1024);
14557 break;
14561 out:
14562 return val;
14565 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14567 struct tg3_internal_buffer_desc test_desc;
14568 u32 sram_dma_descs;
14569 int i, ret;
14571 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14573 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14574 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14575 tw32(RDMAC_STATUS, 0);
14576 tw32(WDMAC_STATUS, 0);
14578 tw32(BUFMGR_MODE, 0);
14579 tw32(FTQ_RESET, 0);
14581 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14582 test_desc.addr_lo = buf_dma & 0xffffffff;
14583 test_desc.nic_mbuf = 0x00002100;
14584 test_desc.len = size;
14587 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14588 * the *second* time the tg3 driver was getting loaded after an
14589 * initial scan.
14591 * Broadcom tells me:
14592 * ...the DMA engine is connected to the GRC block and a DMA
14593 * reset may affect the GRC block in some unpredictable way...
14594 * The behavior of resets to individual blocks has not been tested.
14596 * Broadcom noted the GRC reset will also reset all sub-components.
14598 if (to_device) {
14599 test_desc.cqid_sqid = (13 << 8) | 2;
14601 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14602 udelay(40);
14603 } else {
14604 test_desc.cqid_sqid = (16 << 8) | 7;
14606 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14607 udelay(40);
14609 test_desc.flags = 0x00000005;
14611 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14612 u32 val;
14614 val = *(((u32 *)&test_desc) + i);
14615 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14616 sram_dma_descs + (i * sizeof(u32)));
14617 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14619 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14621 if (to_device)
14622 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14623 else
14624 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14626 ret = -ENODEV;
14627 for (i = 0; i < 40; i++) {
14628 u32 val;
14630 if (to_device)
14631 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14632 else
14633 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14634 if ((val & 0xffff) == sram_dma_descs) {
14635 ret = 0;
14636 break;
14639 udelay(100);
14642 return ret;
14645 #define TEST_BUFFER_SIZE 0x2000
14647 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14648 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14649 { },
14652 static int __devinit tg3_test_dma(struct tg3 *tp)
14654 dma_addr_t buf_dma;
14655 u32 *buf, saved_dma_rwctrl;
14656 int ret = 0;
14658 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14659 &buf_dma, GFP_KERNEL);
14660 if (!buf) {
14661 ret = -ENOMEM;
14662 goto out_nofree;
14665 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14666 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14668 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14670 if (tg3_flag(tp, 57765_PLUS))
14671 goto out;
14673 if (tg3_flag(tp, PCI_EXPRESS)) {
14674 /* DMA read watermark not used on PCIE */
14675 tp->dma_rwctrl |= 0x00180000;
14676 } else if (!tg3_flag(tp, PCIX_MODE)) {
14677 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14678 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14679 tp->dma_rwctrl |= 0x003f0000;
14680 else
14681 tp->dma_rwctrl |= 0x003f000f;
14682 } else {
14683 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14684 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14685 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14686 u32 read_water = 0x7;
14688 /* If the 5704 is behind the EPB bridge, we can
14689 * do the less restrictive ONE_DMA workaround for
14690 * better performance.
14692 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14693 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14694 tp->dma_rwctrl |= 0x8000;
14695 else if (ccval == 0x6 || ccval == 0x7)
14696 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14698 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14699 read_water = 4;
14700 /* Set bit 23 to enable PCIX hw bug fix */
14701 tp->dma_rwctrl |=
14702 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14703 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14704 (1 << 23);
14705 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14706 /* 5780 always in PCIX mode */
14707 tp->dma_rwctrl |= 0x00144000;
14708 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14709 /* 5714 always in PCIX mode */
14710 tp->dma_rwctrl |= 0x00148000;
14711 } else {
14712 tp->dma_rwctrl |= 0x001b000f;
14716 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14717 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14718 tp->dma_rwctrl &= 0xfffffff0;
14720 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14721 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14722 /* Remove this if it causes problems for some boards. */
14723 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14725 /* On 5700/5701 chips, we need to set this bit.
14726 * Otherwise the chip will issue cacheline transactions
14727 * to streamable DMA memory with not all the byte
14728 * enables turned on. This is an error on several
14729 * RISC PCI controllers, in particular sparc64.
14731 * On 5703/5704 chips, this bit has been reassigned
14732 * a different meaning. In particular, it is used
14733 * on those chips to enable a PCI-X workaround.
14735 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14738 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14740 #if 0
14741 /* Unneeded, already done by tg3_get_invariants. */
14742 tg3_switch_clocks(tp);
14743 #endif
14745 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14746 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14747 goto out;
14749 /* It is best to perform DMA test with maximum write burst size
14750 * to expose the 5700/5701 write DMA bug.
14752 saved_dma_rwctrl = tp->dma_rwctrl;
14753 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14754 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14756 while (1) {
14757 u32 *p = buf, i;
14759 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14760 p[i] = i;
14762 /* Send the buffer to the chip. */
14763 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14764 if (ret) {
14765 dev_err(&tp->pdev->dev,
14766 "%s: Buffer write failed. err = %d\n",
14767 __func__, ret);
14768 break;
14771 #if 0
14772 /* validate data reached card RAM correctly. */
14773 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14774 u32 val;
14775 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14776 if (le32_to_cpu(val) != p[i]) {
14777 dev_err(&tp->pdev->dev,
14778 "%s: Buffer corrupted on device! "
14779 "(%d != %d)\n", __func__, val, i);
14780 /* ret = -ENODEV here? */
14782 p[i] = 0;
14784 #endif
14785 /* Now read it back. */
14786 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14787 if (ret) {
14788 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14789 "err = %d\n", __func__, ret);
14790 break;
14793 /* Verify it. */
14794 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14795 if (p[i] == i)
14796 continue;
14798 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14799 DMA_RWCTRL_WRITE_BNDRY_16) {
14800 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14801 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14802 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14803 break;
14804 } else {
14805 dev_err(&tp->pdev->dev,
14806 "%s: Buffer corrupted on read back! "
14807 "(%d != %d)\n", __func__, p[i], i);
14808 ret = -ENODEV;
14809 goto out;
14813 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14814 /* Success. */
14815 ret = 0;
14816 break;
14819 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14820 DMA_RWCTRL_WRITE_BNDRY_16) {
14821 /* DMA test passed without adjusting DMA boundary,
14822 * now look for chipsets that are known to expose the
14823 * DMA bug without failing the test.
14825 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14826 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14827 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14828 } else {
14829 /* Safe to use the calculated DMA boundary. */
14830 tp->dma_rwctrl = saved_dma_rwctrl;
14833 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14836 out:
14837 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14838 out_nofree:
14839 return ret;
14842 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14844 if (tg3_flag(tp, 57765_PLUS)) {
14845 tp->bufmgr_config.mbuf_read_dma_low_water =
14846 DEFAULT_MB_RDMA_LOW_WATER_5705;
14847 tp->bufmgr_config.mbuf_mac_rx_low_water =
14848 DEFAULT_MB_MACRX_LOW_WATER_57765;
14849 tp->bufmgr_config.mbuf_high_water =
14850 DEFAULT_MB_HIGH_WATER_57765;
14852 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14853 DEFAULT_MB_RDMA_LOW_WATER_5705;
14854 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14855 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14856 tp->bufmgr_config.mbuf_high_water_jumbo =
14857 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14858 } else if (tg3_flag(tp, 5705_PLUS)) {
14859 tp->bufmgr_config.mbuf_read_dma_low_water =
14860 DEFAULT_MB_RDMA_LOW_WATER_5705;
14861 tp->bufmgr_config.mbuf_mac_rx_low_water =
14862 DEFAULT_MB_MACRX_LOW_WATER_5705;
14863 tp->bufmgr_config.mbuf_high_water =
14864 DEFAULT_MB_HIGH_WATER_5705;
14865 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14866 tp->bufmgr_config.mbuf_mac_rx_low_water =
14867 DEFAULT_MB_MACRX_LOW_WATER_5906;
14868 tp->bufmgr_config.mbuf_high_water =
14869 DEFAULT_MB_HIGH_WATER_5906;
14872 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14873 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14874 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14875 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14876 tp->bufmgr_config.mbuf_high_water_jumbo =
14877 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14878 } else {
14879 tp->bufmgr_config.mbuf_read_dma_low_water =
14880 DEFAULT_MB_RDMA_LOW_WATER;
14881 tp->bufmgr_config.mbuf_mac_rx_low_water =
14882 DEFAULT_MB_MACRX_LOW_WATER;
14883 tp->bufmgr_config.mbuf_high_water =
14884 DEFAULT_MB_HIGH_WATER;
14886 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14887 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14888 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14889 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14890 tp->bufmgr_config.mbuf_high_water_jumbo =
14891 DEFAULT_MB_HIGH_WATER_JUMBO;
14894 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14895 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14898 static char * __devinit tg3_phy_string(struct tg3 *tp)
14900 switch (tp->phy_id & TG3_PHY_ID_MASK) {
14901 case TG3_PHY_ID_BCM5400: return "5400";
14902 case TG3_PHY_ID_BCM5401: return "5401";
14903 case TG3_PHY_ID_BCM5411: return "5411";
14904 case TG3_PHY_ID_BCM5701: return "5701";
14905 case TG3_PHY_ID_BCM5703: return "5703";
14906 case TG3_PHY_ID_BCM5704: return "5704";
14907 case TG3_PHY_ID_BCM5705: return "5705";
14908 case TG3_PHY_ID_BCM5750: return "5750";
14909 case TG3_PHY_ID_BCM5752: return "5752";
14910 case TG3_PHY_ID_BCM5714: return "5714";
14911 case TG3_PHY_ID_BCM5780: return "5780";
14912 case TG3_PHY_ID_BCM5755: return "5755";
14913 case TG3_PHY_ID_BCM5787: return "5787";
14914 case TG3_PHY_ID_BCM5784: return "5784";
14915 case TG3_PHY_ID_BCM5756: return "5722/5756";
14916 case TG3_PHY_ID_BCM5906: return "5906";
14917 case TG3_PHY_ID_BCM5761: return "5761";
14918 case TG3_PHY_ID_BCM5718C: return "5718C";
14919 case TG3_PHY_ID_BCM5718S: return "5718S";
14920 case TG3_PHY_ID_BCM57765: return "57765";
14921 case TG3_PHY_ID_BCM5719C: return "5719C";
14922 case TG3_PHY_ID_BCM5720C: return "5720C";
14923 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14924 case 0: return "serdes";
14925 default: return "unknown";
14929 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14931 if (tg3_flag(tp, PCI_EXPRESS)) {
14932 strcpy(str, "PCI Express");
14933 return str;
14934 } else if (tg3_flag(tp, PCIX_MODE)) {
14935 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14937 strcpy(str, "PCIX:");
14939 if ((clock_ctrl == 7) ||
14940 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14941 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14942 strcat(str, "133MHz");
14943 else if (clock_ctrl == 0)
14944 strcat(str, "33MHz");
14945 else if (clock_ctrl == 2)
14946 strcat(str, "50MHz");
14947 else if (clock_ctrl == 4)
14948 strcat(str, "66MHz");
14949 else if (clock_ctrl == 6)
14950 strcat(str, "100MHz");
14951 } else {
14952 strcpy(str, "PCI:");
14953 if (tg3_flag(tp, PCI_HIGH_SPEED))
14954 strcat(str, "66MHz");
14955 else
14956 strcat(str, "33MHz");
14958 if (tg3_flag(tp, PCI_32BIT))
14959 strcat(str, ":32-bit");
14960 else
14961 strcat(str, ":64-bit");
14962 return str;
14965 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14967 struct pci_dev *peer;
14968 unsigned int func, devnr = tp->pdev->devfn & ~7;
14970 for (func = 0; func < 8; func++) {
14971 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14972 if (peer && peer != tp->pdev)
14973 break;
14974 pci_dev_put(peer);
14976 /* 5704 can be configured in single-port mode, set peer to
14977 * tp->pdev in that case.
14979 if (!peer) {
14980 peer = tp->pdev;
14981 return peer;
14985 * We don't need to keep the refcount elevated; there's no way
14986 * to remove one half of this device without removing the other
14988 pci_dev_put(peer);
14990 return peer;
14993 static void __devinit tg3_init_coal(struct tg3 *tp)
14995 struct ethtool_coalesce *ec = &tp->coal;
14997 memset(ec, 0, sizeof(*ec));
14998 ec->cmd = ETHTOOL_GCOALESCE;
14999 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15000 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15001 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15002 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15003 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15004 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15005 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15006 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15007 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15009 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15010 HOSTCC_MODE_CLRTICK_TXBD)) {
15011 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15012 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15013 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15014 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15017 if (tg3_flag(tp, 5705_PLUS)) {
15018 ec->rx_coalesce_usecs_irq = 0;
15019 ec->tx_coalesce_usecs_irq = 0;
15020 ec->stats_block_coalesce_usecs = 0;
15024 static const struct net_device_ops tg3_netdev_ops = {
15025 .ndo_open = tg3_open,
15026 .ndo_stop = tg3_close,
15027 .ndo_start_xmit = tg3_start_xmit,
15028 .ndo_get_stats64 = tg3_get_stats64,
15029 .ndo_validate_addr = eth_validate_addr,
15030 .ndo_set_multicast_list = tg3_set_rx_mode,
15031 .ndo_set_mac_address = tg3_set_mac_addr,
15032 .ndo_do_ioctl = tg3_ioctl,
15033 .ndo_tx_timeout = tg3_tx_timeout,
15034 .ndo_change_mtu = tg3_change_mtu,
15035 .ndo_fix_features = tg3_fix_features,
15036 #ifdef CONFIG_NET_POLL_CONTROLLER
15037 .ndo_poll_controller = tg3_poll_controller,
15038 #endif
15041 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
15042 .ndo_open = tg3_open,
15043 .ndo_stop = tg3_close,
15044 .ndo_start_xmit = tg3_start_xmit_dma_bug,
15045 .ndo_get_stats64 = tg3_get_stats64,
15046 .ndo_validate_addr = eth_validate_addr,
15047 .ndo_set_multicast_list = tg3_set_rx_mode,
15048 .ndo_set_mac_address = tg3_set_mac_addr,
15049 .ndo_do_ioctl = tg3_ioctl,
15050 .ndo_tx_timeout = tg3_tx_timeout,
15051 .ndo_change_mtu = tg3_change_mtu,
15052 #ifdef CONFIG_NET_POLL_CONTROLLER
15053 .ndo_poll_controller = tg3_poll_controller,
15054 #endif
15057 static int __devinit tg3_init_one(struct pci_dev *pdev,
15058 const struct pci_device_id *ent)
15060 struct net_device *dev;
15061 struct tg3 *tp;
15062 int i, err, pm_cap;
15063 u32 sndmbx, rcvmbx, intmbx;
15064 char str[40];
15065 u64 dma_mask, persist_dma_mask;
15066 u32 hw_features = 0;
15068 printk_once(KERN_INFO "%s\n", version);
15070 err = pci_enable_device(pdev);
15071 if (err) {
15072 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15073 return err;
15076 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15077 if (err) {
15078 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15079 goto err_out_disable_pdev;
15082 pci_set_master(pdev);
15084 /* Find power-management capability. */
15085 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15086 if (pm_cap == 0) {
15087 dev_err(&pdev->dev,
15088 "Cannot find Power Management capability, aborting\n");
15089 err = -EIO;
15090 goto err_out_free_res;
15093 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15094 if (!dev) {
15095 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15096 err = -ENOMEM;
15097 goto err_out_free_res;
15100 SET_NETDEV_DEV(dev, &pdev->dev);
15102 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15104 tp = netdev_priv(dev);
15105 tp->pdev = pdev;
15106 tp->dev = dev;
15107 tp->pm_cap = pm_cap;
15108 tp->rx_mode = TG3_DEF_RX_MODE;
15109 tp->tx_mode = TG3_DEF_TX_MODE;
15111 if (tg3_debug > 0)
15112 tp->msg_enable = tg3_debug;
15113 else
15114 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15116 /* The word/byte swap controls here control register access byte
15117 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15118 * setting below.
15120 tp->misc_host_ctrl =
15121 MISC_HOST_CTRL_MASK_PCI_INT |
15122 MISC_HOST_CTRL_WORD_SWAP |
15123 MISC_HOST_CTRL_INDIR_ACCESS |
15124 MISC_HOST_CTRL_PCISTATE_RW;
15126 /* The NONFRM (non-frame) byte/word swap controls take effect
15127 * on descriptor entries, anything which isn't packet data.
15129 * The StrongARM chips on the board (one for tx, one for rx)
15130 * are running in big-endian mode.
15132 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15133 GRC_MODE_WSWAP_NONFRM_DATA);
15134 #ifdef __BIG_ENDIAN
15135 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15136 #endif
15137 spin_lock_init(&tp->lock);
15138 spin_lock_init(&tp->indirect_lock);
15139 INIT_WORK(&tp->reset_task, tg3_reset_task);
15141 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15142 if (!tp->regs) {
15143 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15144 err = -ENOMEM;
15145 goto err_out_free_dev;
15148 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15149 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15151 dev->ethtool_ops = &tg3_ethtool_ops;
15152 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15153 dev->irq = pdev->irq;
15155 err = tg3_get_invariants(tp);
15156 if (err) {
15157 dev_err(&pdev->dev,
15158 "Problem fetching invariants of chip, aborting\n");
15159 goto err_out_iounmap;
15162 if (tg3_flag(tp, 5755_PLUS) && !tg3_flag(tp, 5717_PLUS))
15163 dev->netdev_ops = &tg3_netdev_ops;
15164 else
15165 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
15168 /* The EPB bridge inside 5714, 5715, and 5780 and any
15169 * device behind the EPB cannot support DMA addresses > 40-bit.
15170 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15171 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15172 * do DMA address check in tg3_start_xmit().
15174 if (tg3_flag(tp, IS_5788))
15175 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15176 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15177 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15178 #ifdef CONFIG_HIGHMEM
15179 dma_mask = DMA_BIT_MASK(64);
15180 #endif
15181 } else
15182 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15184 /* Configure DMA attributes. */
15185 if (dma_mask > DMA_BIT_MASK(32)) {
15186 err = pci_set_dma_mask(pdev, dma_mask);
15187 if (!err) {
15188 dev->features |= NETIF_F_HIGHDMA;
15189 err = pci_set_consistent_dma_mask(pdev,
15190 persist_dma_mask);
15191 if (err < 0) {
15192 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15193 "DMA for consistent allocations\n");
15194 goto err_out_iounmap;
15198 if (err || dma_mask == DMA_BIT_MASK(32)) {
15199 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15200 if (err) {
15201 dev_err(&pdev->dev,
15202 "No usable DMA configuration, aborting\n");
15203 goto err_out_iounmap;
15207 tg3_init_bufmgr_config(tp);
15209 /* Selectively allow TSO based on operating conditions */
15210 if ((tg3_flag(tp, HW_TSO_1) ||
15211 tg3_flag(tp, HW_TSO_2) ||
15212 tg3_flag(tp, HW_TSO_3)) ||
15213 (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
15214 tg3_flag_set(tp, TSO_CAPABLE);
15215 else {
15216 tg3_flag_clear(tp, TSO_CAPABLE);
15217 tg3_flag_clear(tp, TSO_BUG);
15218 tp->fw_needed = NULL;
15221 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
15222 tp->fw_needed = FIRMWARE_TG3;
15224 /* TSO is on by default on chips that support hardware TSO.
15225 * Firmware TSO on older chips gives lower performance, so it
15226 * is off by default, but can be enabled using ethtool.
15228 if ((tg3_flag(tp, HW_TSO_1) ||
15229 tg3_flag(tp, HW_TSO_2) ||
15230 tg3_flag(tp, HW_TSO_3)) &&
15231 (dev->features & NETIF_F_IP_CSUM))
15232 hw_features |= NETIF_F_TSO;
15233 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15234 if (dev->features & NETIF_F_IPV6_CSUM)
15235 hw_features |= NETIF_F_TSO6;
15236 if (tg3_flag(tp, HW_TSO_3) ||
15237 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15238 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15239 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15240 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15241 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15242 hw_features |= NETIF_F_TSO_ECN;
15245 dev->hw_features |= hw_features;
15246 dev->features |= hw_features;
15247 dev->vlan_features |= hw_features;
15249 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15250 !tg3_flag(tp, TSO_CAPABLE) &&
15251 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15252 tg3_flag_set(tp, MAX_RXPEND_64);
15253 tp->rx_pending = 63;
15256 err = tg3_get_device_address(tp);
15257 if (err) {
15258 dev_err(&pdev->dev,
15259 "Could not obtain valid ethernet address, aborting\n");
15260 goto err_out_iounmap;
15263 if (tg3_flag(tp, ENABLE_APE)) {
15264 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15265 if (!tp->aperegs) {
15266 dev_err(&pdev->dev,
15267 "Cannot map APE registers, aborting\n");
15268 err = -ENOMEM;
15269 goto err_out_iounmap;
15272 tg3_ape_lock_init(tp);
15274 if (tg3_flag(tp, ENABLE_ASF))
15275 tg3_read_dash_ver(tp);
15279 * Reset chip in case UNDI or EFI driver did not shutdown
15280 * DMA self test will enable WDMAC and we'll see (spurious)
15281 * pending DMA on the PCI bus at that point.
15283 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15284 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15285 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15286 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15289 err = tg3_test_dma(tp);
15290 if (err) {
15291 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15292 goto err_out_apeunmap;
15295 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15296 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15297 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15298 for (i = 0; i < tp->irq_max; i++) {
15299 struct tg3_napi *tnapi = &tp->napi[i];
15301 tnapi->tp = tp;
15302 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15304 tnapi->int_mbox = intmbx;
15305 if (i < 4)
15306 intmbx += 0x8;
15307 else
15308 intmbx += 0x4;
15310 tnapi->consmbox = rcvmbx;
15311 tnapi->prodmbox = sndmbx;
15313 if (i)
15314 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15315 else
15316 tnapi->coal_now = HOSTCC_MODE_NOW;
15318 if (!tg3_flag(tp, SUPPORT_MSIX))
15319 break;
15322 * If we support MSIX, we'll be using RSS. If we're using
15323 * RSS, the first vector only handles link interrupts and the
15324 * remaining vectors handle rx and tx interrupts. Reuse the
15325 * mailbox values for the next iteration. The values we setup
15326 * above are still useful for the single vectored mode.
15328 if (!i)
15329 continue;
15331 rcvmbx += 0x8;
15333 if (sndmbx & 0x4)
15334 sndmbx -= 0x4;
15335 else
15336 sndmbx += 0xc;
15339 tg3_init_coal(tp);
15341 pci_set_drvdata(pdev, dev);
15343 err = register_netdev(dev);
15344 if (err) {
15345 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15346 goto err_out_apeunmap;
15349 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15350 tp->board_part_number,
15351 tp->pci_chip_rev_id,
15352 tg3_bus_string(tp, str),
15353 dev->dev_addr);
15355 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15356 struct phy_device *phydev;
15357 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15358 netdev_info(dev,
15359 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15360 phydev->drv->name, dev_name(&phydev->dev));
15361 } else {
15362 char *ethtype;
15364 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15365 ethtype = "10/100Base-TX";
15366 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15367 ethtype = "1000Base-SX";
15368 else
15369 ethtype = "10/100/1000Base-T";
15371 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15372 "(WireSpeed[%d], EEE[%d])\n",
15373 tg3_phy_string(tp), ethtype,
15374 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15375 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15378 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15379 (dev->features & NETIF_F_RXCSUM) != 0,
15380 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15381 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15382 tg3_flag(tp, ENABLE_ASF) != 0,
15383 tg3_flag(tp, TSO_CAPABLE) != 0);
15384 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15385 tp->dma_rwctrl,
15386 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15387 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15389 pci_save_state(pdev);
15391 return 0;
15393 err_out_apeunmap:
15394 if (tp->aperegs) {
15395 iounmap(tp->aperegs);
15396 tp->aperegs = NULL;
15399 err_out_iounmap:
15400 if (tp->regs) {
15401 iounmap(tp->regs);
15402 tp->regs = NULL;
15405 err_out_free_dev:
15406 free_netdev(dev);
15408 err_out_free_res:
15409 pci_release_regions(pdev);
15411 err_out_disable_pdev:
15412 pci_disable_device(pdev);
15413 pci_set_drvdata(pdev, NULL);
15414 return err;
15417 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15419 struct net_device *dev = pci_get_drvdata(pdev);
15421 if (dev) {
15422 struct tg3 *tp = netdev_priv(dev);
15424 if (tp->fw)
15425 release_firmware(tp->fw);
15427 cancel_work_sync(&tp->reset_task);
15429 if (!tg3_flag(tp, USE_PHYLIB)) {
15430 tg3_phy_fini(tp);
15431 tg3_mdio_fini(tp);
15434 unregister_netdev(dev);
15435 if (tp->aperegs) {
15436 iounmap(tp->aperegs);
15437 tp->aperegs = NULL;
15439 if (tp->regs) {
15440 iounmap(tp->regs);
15441 tp->regs = NULL;
15443 free_netdev(dev);
15444 pci_release_regions(pdev);
15445 pci_disable_device(pdev);
15446 pci_set_drvdata(pdev, NULL);
15450 #ifdef CONFIG_PM_SLEEP
15451 static int tg3_suspend(struct device *device)
15453 struct pci_dev *pdev = to_pci_dev(device);
15454 struct net_device *dev = pci_get_drvdata(pdev);
15455 struct tg3 *tp = netdev_priv(dev);
15456 int err;
15458 if (!netif_running(dev))
15459 return 0;
15461 flush_work_sync(&tp->reset_task);
15462 tg3_phy_stop(tp);
15463 tg3_netif_stop(tp);
15465 del_timer_sync(&tp->timer);
15467 tg3_full_lock(tp, 1);
15468 tg3_disable_ints(tp);
15469 tg3_full_unlock(tp);
15471 netif_device_detach(dev);
15473 tg3_full_lock(tp, 0);
15474 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15475 tg3_flag_clear(tp, INIT_COMPLETE);
15476 tg3_full_unlock(tp);
15478 err = tg3_power_down_prepare(tp);
15479 if (err) {
15480 int err2;
15482 tg3_full_lock(tp, 0);
15484 tg3_flag_set(tp, INIT_COMPLETE);
15485 err2 = tg3_restart_hw(tp, 1);
15486 if (err2)
15487 goto out;
15489 tp->timer.expires = jiffies + tp->timer_offset;
15490 add_timer(&tp->timer);
15492 netif_device_attach(dev);
15493 tg3_netif_start(tp);
15495 out:
15496 tg3_full_unlock(tp);
15498 if (!err2)
15499 tg3_phy_start(tp);
15502 return err;
15505 static int tg3_resume(struct device *device)
15507 struct pci_dev *pdev = to_pci_dev(device);
15508 struct net_device *dev = pci_get_drvdata(pdev);
15509 struct tg3 *tp = netdev_priv(dev);
15510 int err;
15512 if (!netif_running(dev))
15513 return 0;
15515 netif_device_attach(dev);
15517 tg3_full_lock(tp, 0);
15519 tg3_flag_set(tp, INIT_COMPLETE);
15520 err = tg3_restart_hw(tp, 1);
15521 if (err)
15522 goto out;
15524 tp->timer.expires = jiffies + tp->timer_offset;
15525 add_timer(&tp->timer);
15527 tg3_netif_start(tp);
15529 out:
15530 tg3_full_unlock(tp);
15532 if (!err)
15533 tg3_phy_start(tp);
15535 return err;
15538 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15539 #define TG3_PM_OPS (&tg3_pm_ops)
15541 #else
15543 #define TG3_PM_OPS NULL
15545 #endif /* CONFIG_PM_SLEEP */
15548 * tg3_io_error_detected - called when PCI error is detected
15549 * @pdev: Pointer to PCI device
15550 * @state: The current pci connection state
15552 * This function is called after a PCI bus error affecting
15553 * this device has been detected.
15555 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15556 pci_channel_state_t state)
15558 struct net_device *netdev = pci_get_drvdata(pdev);
15559 struct tg3 *tp = netdev_priv(netdev);
15560 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15562 netdev_info(netdev, "PCI I/O error detected\n");
15564 rtnl_lock();
15566 if (!netif_running(netdev))
15567 goto done;
15569 tg3_phy_stop(tp);
15571 tg3_netif_stop(tp);
15573 del_timer_sync(&tp->timer);
15574 tg3_flag_clear(tp, RESTART_TIMER);
15576 /* Want to make sure that the reset task doesn't run */
15577 cancel_work_sync(&tp->reset_task);
15578 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15579 tg3_flag_clear(tp, RESTART_TIMER);
15581 netif_device_detach(netdev);
15583 /* Clean up software state, even if MMIO is blocked */
15584 tg3_full_lock(tp, 0);
15585 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15586 tg3_full_unlock(tp);
15588 done:
15589 if (state == pci_channel_io_perm_failure)
15590 err = PCI_ERS_RESULT_DISCONNECT;
15591 else
15592 pci_disable_device(pdev);
15594 rtnl_unlock();
15596 return err;
15600 * tg3_io_slot_reset - called after the pci bus has been reset.
15601 * @pdev: Pointer to PCI device
15603 * Restart the card from scratch, as if from a cold-boot.
15604 * At this point, the card has exprienced a hard reset,
15605 * followed by fixups by BIOS, and has its config space
15606 * set up identically to what it was at cold boot.
15608 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15610 struct net_device *netdev = pci_get_drvdata(pdev);
15611 struct tg3 *tp = netdev_priv(netdev);
15612 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15613 int err;
15615 rtnl_lock();
15617 if (pci_enable_device(pdev)) {
15618 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15619 goto done;
15622 pci_set_master(pdev);
15623 pci_restore_state(pdev);
15624 pci_save_state(pdev);
15626 if (!netif_running(netdev)) {
15627 rc = PCI_ERS_RESULT_RECOVERED;
15628 goto done;
15631 err = tg3_power_up(tp);
15632 if (err) {
15633 netdev_err(netdev, "Failed to restore register access.\n");
15634 goto done;
15637 rc = PCI_ERS_RESULT_RECOVERED;
15639 done:
15640 rtnl_unlock();
15642 return rc;
15646 * tg3_io_resume - called when traffic can start flowing again.
15647 * @pdev: Pointer to PCI device
15649 * This callback is called when the error recovery driver tells
15650 * us that its OK to resume normal operation.
15652 static void tg3_io_resume(struct pci_dev *pdev)
15654 struct net_device *netdev = pci_get_drvdata(pdev);
15655 struct tg3 *tp = netdev_priv(netdev);
15656 int err;
15658 rtnl_lock();
15660 if (!netif_running(netdev))
15661 goto done;
15663 tg3_full_lock(tp, 0);
15664 tg3_flag_set(tp, INIT_COMPLETE);
15665 err = tg3_restart_hw(tp, 1);
15666 tg3_full_unlock(tp);
15667 if (err) {
15668 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15669 goto done;
15672 netif_device_attach(netdev);
15674 tp->timer.expires = jiffies + tp->timer_offset;
15675 add_timer(&tp->timer);
15677 tg3_netif_start(tp);
15679 tg3_phy_start(tp);
15681 done:
15682 rtnl_unlock();
15685 static struct pci_error_handlers tg3_err_handler = {
15686 .error_detected = tg3_io_error_detected,
15687 .slot_reset = tg3_io_slot_reset,
15688 .resume = tg3_io_resume
15691 static struct pci_driver tg3_driver = {
15692 .name = DRV_MODULE_NAME,
15693 .id_table = tg3_pci_tbl,
15694 .probe = tg3_init_one,
15695 .remove = __devexit_p(tg3_remove_one),
15696 .err_handler = &tg3_err_handler,
15697 .driver.pm = TG3_PM_OPS,
15700 static int __init tg3_init(void)
15702 return pci_register_driver(&tg3_driver);
15705 static void __exit tg3_cleanup(void)
15707 pci_unregister_driver(&tg3_driver);
15710 module_init(tg3_init);
15711 module_exit(tg3_cleanup);