Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[linux-btrfs-devel.git] / drivers / net / ethernet / broadcom / tg3.c
blob161cbbb4814ad1ae99fa75f3473a28fac8bcf768
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
48 #include <net/checksum.h>
49 #include <net/ip.h>
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
61 #define BAR_0 0
62 #define BAR_2 2
64 #include "tg3.h"
66 /* Functions & macros to verify TG3_FLAGS types */
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
70 return test_bit(flag, bits);
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
75 set_bit(flag, bits);
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
80 clear_bit(flag, bits);
83 #define tg3_flag(tp, flag) \
84 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag) \
86 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag) \
88 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define DRV_MODULE_NAME "tg3"
91 #define TG3_MAJ_NUM 3
92 #define TG3_MIN_NUM 120
93 #define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE "August 18, 2011"
97 #define RESET_KIND_SHUTDOWN 0
98 #define RESET_KIND_INIT 1
99 #define RESET_KIND_SUSPEND 2
101 #define TG3_DEF_RX_MODE 0
102 #define TG3_DEF_TX_MODE 0
103 #define TG3_DEF_MSG_ENABLE \
104 (NETIF_MSG_DRV | \
105 NETIF_MSG_PROBE | \
106 NETIF_MSG_LINK | \
107 NETIF_MSG_TIMER | \
108 NETIF_MSG_IFDOWN | \
109 NETIF_MSG_IFUP | \
110 NETIF_MSG_RX_ERR | \
111 NETIF_MSG_TX_ERR)
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
115 /* length of time before we decide the hardware is borked,
116 * and dev->tx_timeout() should be called to fix the problem
119 #define TG3_TX_TIMEOUT (5 * HZ)
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU 60
123 #define TG3_MAX_MTU(tp) \
124 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127 * You can't change the ring sizes, but you can change where you place
128 * them in the NIC onboard memory.
130 #define TG3_RX_STD_RING_SIZE(tp) \
131 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING 200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
138 #define TG3_RSS_INDIR_TBL_SIZE 128
140 /* Do not place this n-ring entries value into the tp struct itself,
141 * we really want to expose these constants to GCC so that modulo et
142 * al. operations are done with shifts and masks instead of with
143 * hw multiply/modulo instructions. Another solution would be to
144 * replace things like '% foo' with '& (foo - 1)'.
147 #define TG3_TX_RING_SIZE 512
148 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
150 #define TG3_RX_STD_RING_BYTES(tp) \
151 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
152 #define TG3_RX_JMB_RING_BYTES(tp) \
153 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
154 #define TG3_RX_RCB_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
156 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
157 TG3_TX_RING_SIZE)
158 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
160 #define TG3_DMA_BYTE_ENAB 64
162 #define TG3_RX_STD_DMA_SZ 1536
163 #define TG3_RX_JMB_DMA_SZ 9046
165 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
167 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
168 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
170 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
171 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
173 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
174 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
176 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
177 * that are at least dword aligned when used in PCIX mode. The driver
178 * works around this bug by double copying the packet. This workaround
179 * is built into the normal double copy length check for efficiency.
181 * However, the double copy is only necessary on those architectures
182 * where unaligned memory accesses are inefficient. For those architectures
183 * where unaligned memory accesses incur little penalty, we can reintegrate
184 * the 5701 in the normal rx path. Doing so saves a device structure
185 * dereference by hardcoding the double copy threshold in place.
187 #define TG3_RX_COPY_THRESHOLD 256
188 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
189 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
190 #else
191 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
192 #endif
194 #if (NET_IP_ALIGN != 0)
195 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
196 #else
197 #define TG3_RX_OFFSET(tp) 0
198 #endif
200 /* minimum number of free TX descriptors required to wake up TX process */
201 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
202 #define TG3_TX_BD_DMA_MAX 4096
204 #define TG3_RAW_IP_ALIGN 2
206 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
208 #define FIRMWARE_TG3 "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
212 static char version[] __devinitdata =
213 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
223 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
314 static const struct {
315 const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
317 { "rx_octets" },
318 { "rx_fragments" },
319 { "rx_ucast_packets" },
320 { "rx_mcast_packets" },
321 { "rx_bcast_packets" },
322 { "rx_fcs_errors" },
323 { "rx_align_errors" },
324 { "rx_xon_pause_rcvd" },
325 { "rx_xoff_pause_rcvd" },
326 { "rx_mac_ctrl_rcvd" },
327 { "rx_xoff_entered" },
328 { "rx_frame_too_long_errors" },
329 { "rx_jabbers" },
330 { "rx_undersize_packets" },
331 { "rx_in_length_errors" },
332 { "rx_out_length_errors" },
333 { "rx_64_or_less_octet_packets" },
334 { "rx_65_to_127_octet_packets" },
335 { "rx_128_to_255_octet_packets" },
336 { "rx_256_to_511_octet_packets" },
337 { "rx_512_to_1023_octet_packets" },
338 { "rx_1024_to_1522_octet_packets" },
339 { "rx_1523_to_2047_octet_packets" },
340 { "rx_2048_to_4095_octet_packets" },
341 { "rx_4096_to_8191_octet_packets" },
342 { "rx_8192_to_9022_octet_packets" },
344 { "tx_octets" },
345 { "tx_collisions" },
347 { "tx_xon_sent" },
348 { "tx_xoff_sent" },
349 { "tx_flow_control" },
350 { "tx_mac_errors" },
351 { "tx_single_collisions" },
352 { "tx_mult_collisions" },
353 { "tx_deferred" },
354 { "tx_excessive_collisions" },
355 { "tx_late_collisions" },
356 { "tx_collide_2times" },
357 { "tx_collide_3times" },
358 { "tx_collide_4times" },
359 { "tx_collide_5times" },
360 { "tx_collide_6times" },
361 { "tx_collide_7times" },
362 { "tx_collide_8times" },
363 { "tx_collide_9times" },
364 { "tx_collide_10times" },
365 { "tx_collide_11times" },
366 { "tx_collide_12times" },
367 { "tx_collide_13times" },
368 { "tx_collide_14times" },
369 { "tx_collide_15times" },
370 { "tx_ucast_packets" },
371 { "tx_mcast_packets" },
372 { "tx_bcast_packets" },
373 { "tx_carrier_sense_errors" },
374 { "tx_discards" },
375 { "tx_errors" },
377 { "dma_writeq_full" },
378 { "dma_write_prioq_full" },
379 { "rxbds_empty" },
380 { "rx_discards" },
381 { "rx_errors" },
382 { "rx_threshold_hit" },
384 { "dma_readq_full" },
385 { "dma_read_prioq_full" },
386 { "tx_comp_queue_full" },
388 { "ring_set_send_prod_index" },
389 { "ring_status_update" },
390 { "nic_irqs" },
391 { "nic_avoided_irqs" },
392 { "nic_tx_threshold_hit" },
394 { "mbuf_lwm_thresh_hit" },
397 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
400 static const struct {
401 const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403 { "nvram test (online) " },
404 { "link test (online) " },
405 { "register test (offline)" },
406 { "memory test (offline)" },
407 { "mac loopback test (offline)" },
408 { "phy loopback test (offline)" },
409 { "ext loopback test (offline)" },
410 { "interrupt test (offline)" },
413 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
418 writel(val, tp->regs + off);
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
423 return readl(tp->regs + off);
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
428 writel(val, tp->aperegs + off);
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
433 return readl(tp->aperegs + off);
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
438 unsigned long flags;
440 spin_lock_irqsave(&tp->indirect_lock, flags);
441 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443 spin_unlock_irqrestore(&tp->indirect_lock, flags);
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
448 writel(val, tp->regs + off);
449 readl(tp->regs + off);
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
454 unsigned long flags;
455 u32 val;
457 spin_lock_irqsave(&tp->indirect_lock, flags);
458 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460 spin_unlock_irqrestore(&tp->indirect_lock, flags);
461 return val;
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
466 unsigned long flags;
468 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470 TG3_64BIT_REG_LOW, val);
471 return;
473 if (off == TG3_RX_STD_PROD_IDX_REG) {
474 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475 TG3_64BIT_REG_LOW, val);
476 return;
479 spin_lock_irqsave(&tp->indirect_lock, flags);
480 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482 spin_unlock_irqrestore(&tp->indirect_lock, flags);
484 /* In indirect mode when disabling interrupts, we also need
485 * to clear the interrupt bit in the GRC local ctrl register.
487 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488 (val == 0x1)) {
489 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
496 unsigned long flags;
497 u32 val;
499 spin_lock_irqsave(&tp->indirect_lock, flags);
500 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502 spin_unlock_irqrestore(&tp->indirect_lock, flags);
503 return val;
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507 * where it is unsafe to read back the register without some delay.
508 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
513 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514 /* Non-posted methods */
515 tp->write32(tp, off, val);
516 else {
517 /* Posted method */
518 tg3_write32(tp, off, val);
519 if (usec_wait)
520 udelay(usec_wait);
521 tp->read32(tp, off);
523 /* Wait again after the read for the posted method to guarantee that
524 * the wait time is met.
526 if (usec_wait)
527 udelay(usec_wait);
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
532 tp->write32_mbox(tp, off, val);
533 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534 tp->read32_mbox(tp, off);
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
539 void __iomem *mbox = tp->regs + off;
540 writel(val, mbox);
541 if (tg3_flag(tp, TXD_MBOX_HWBUG))
542 writel(val, mbox);
543 if (tg3_flag(tp, MBOX_WRITE_REORDER))
544 readl(mbox);
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
549 return readl(tp->regs + off + GRCMBOX_BASE);
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
554 writel(val, tp->regs + off + GRCMBOX_BASE);
557 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
563 #define tw32(reg, val) tp->write32(tp, reg, val)
564 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg) tp->read32(tp, reg)
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
570 unsigned long flags;
572 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574 return;
576 spin_lock_irqsave(&tp->indirect_lock, flags);
577 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
581 /* Always leave this as zero. */
582 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583 } else {
584 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585 tw32_f(TG3PCI_MEM_WIN_DATA, val);
587 /* Always leave this as zero. */
588 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
590 spin_unlock_irqrestore(&tp->indirect_lock, flags);
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
595 unsigned long flags;
597 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599 *val = 0;
600 return;
603 spin_lock_irqsave(&tp->indirect_lock, flags);
604 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
608 /* Always leave this as zero. */
609 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610 } else {
611 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612 *val = tr32(TG3PCI_MEM_WIN_DATA);
614 /* Always leave this as zero. */
615 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
617 spin_unlock_irqrestore(&tp->indirect_lock, flags);
620 static void tg3_ape_lock_init(struct tg3 *tp)
622 int i;
623 u32 regbase, bit;
625 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626 regbase = TG3_APE_LOCK_GRANT;
627 else
628 regbase = TG3_APE_PER_LOCK_GRANT;
630 /* Make sure the driver hasn't any stale locks. */
631 for (i = 0; i < 8; i++) {
632 if (i == TG3_APE_LOCK_GPIO)
633 continue;
634 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
637 /* Clear the correct bit of the GPIO lock too. */
638 if (!tp->pci_fn)
639 bit = APE_LOCK_GRANT_DRIVER;
640 else
641 bit = 1 << tp->pci_fn;
643 tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit);
646 static int tg3_ape_lock(struct tg3 *tp, int locknum)
648 int i, off;
649 int ret = 0;
650 u32 status, req, gnt, bit;
652 if (!tg3_flag(tp, ENABLE_APE))
653 return 0;
655 switch (locknum) {
656 case TG3_APE_LOCK_GPIO:
657 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
658 return 0;
659 case TG3_APE_LOCK_GRC:
660 case TG3_APE_LOCK_MEM:
661 break;
662 default:
663 return -EINVAL;
666 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
667 req = TG3_APE_LOCK_REQ;
668 gnt = TG3_APE_LOCK_GRANT;
669 } else {
670 req = TG3_APE_PER_LOCK_REQ;
671 gnt = TG3_APE_PER_LOCK_GRANT;
674 off = 4 * locknum;
676 if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
677 bit = APE_LOCK_REQ_DRIVER;
678 else
679 bit = 1 << tp->pci_fn;
681 tg3_ape_write32(tp, req + off, bit);
683 /* Wait for up to 1 millisecond to acquire lock. */
684 for (i = 0; i < 100; i++) {
685 status = tg3_ape_read32(tp, gnt + off);
686 if (status == bit)
687 break;
688 udelay(10);
691 if (status != bit) {
692 /* Revoke the lock request. */
693 tg3_ape_write32(tp, gnt + off, bit);
694 ret = -EBUSY;
697 return ret;
700 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
702 u32 gnt, bit;
704 if (!tg3_flag(tp, ENABLE_APE))
705 return;
707 switch (locknum) {
708 case TG3_APE_LOCK_GPIO:
709 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
710 return;
711 case TG3_APE_LOCK_GRC:
712 case TG3_APE_LOCK_MEM:
713 break;
714 default:
715 return;
718 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
719 gnt = TG3_APE_LOCK_GRANT;
720 else
721 gnt = TG3_APE_PER_LOCK_GRANT;
723 if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
724 bit = APE_LOCK_GRANT_DRIVER;
725 else
726 bit = 1 << tp->pci_fn;
728 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
731 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
733 int i;
734 u32 apedata;
736 /* NCSI does not support APE events */
737 if (tg3_flag(tp, APE_HAS_NCSI))
738 return;
740 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
741 if (apedata != APE_SEG_SIG_MAGIC)
742 return;
744 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
745 if (!(apedata & APE_FW_STATUS_READY))
746 return;
748 /* Wait for up to 1 millisecond for APE to service previous event. */
749 for (i = 0; i < 10; i++) {
750 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
751 return;
753 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
755 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
756 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
757 event | APE_EVENT_STATUS_EVENT_PENDING);
759 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
761 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
762 break;
764 udelay(100);
767 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
768 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
771 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
773 u32 event;
774 u32 apedata;
776 if (!tg3_flag(tp, ENABLE_APE))
777 return;
779 switch (kind) {
780 case RESET_KIND_INIT:
781 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
782 APE_HOST_SEG_SIG_MAGIC);
783 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
784 APE_HOST_SEG_LEN_MAGIC);
785 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
786 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
787 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
788 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
789 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
790 APE_HOST_BEHAV_NO_PHYLOCK);
791 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
792 TG3_APE_HOST_DRVR_STATE_START);
794 event = APE_EVENT_STATUS_STATE_START;
795 break;
796 case RESET_KIND_SHUTDOWN:
797 /* With the interface we are currently using,
798 * APE does not track driver state. Wiping
799 * out the HOST SEGMENT SIGNATURE forces
800 * the APE to assume OS absent status.
802 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
804 if (device_may_wakeup(&tp->pdev->dev) &&
805 tg3_flag(tp, WOL_ENABLE)) {
806 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
807 TG3_APE_HOST_WOL_SPEED_AUTO);
808 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
809 } else
810 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
812 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
814 event = APE_EVENT_STATUS_STATE_UNLOAD;
815 break;
816 case RESET_KIND_SUSPEND:
817 event = APE_EVENT_STATUS_STATE_SUSPEND;
818 break;
819 default:
820 return;
823 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
825 tg3_ape_send_event(tp, event);
828 static void tg3_disable_ints(struct tg3 *tp)
830 int i;
832 tw32(TG3PCI_MISC_HOST_CTRL,
833 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
834 for (i = 0; i < tp->irq_max; i++)
835 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
838 static void tg3_enable_ints(struct tg3 *tp)
840 int i;
842 tp->irq_sync = 0;
843 wmb();
845 tw32(TG3PCI_MISC_HOST_CTRL,
846 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
848 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
849 for (i = 0; i < tp->irq_cnt; i++) {
850 struct tg3_napi *tnapi = &tp->napi[i];
852 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
853 if (tg3_flag(tp, 1SHOT_MSI))
854 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
856 tp->coal_now |= tnapi->coal_now;
859 /* Force an initial interrupt */
860 if (!tg3_flag(tp, TAGGED_STATUS) &&
861 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
862 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
863 else
864 tw32(HOSTCC_MODE, tp->coal_now);
866 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
869 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
871 struct tg3 *tp = tnapi->tp;
872 struct tg3_hw_status *sblk = tnapi->hw_status;
873 unsigned int work_exists = 0;
875 /* check for phy events */
876 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
877 if (sblk->status & SD_STATUS_LINK_CHG)
878 work_exists = 1;
880 /* check for RX/TX work to do */
881 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
882 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
883 work_exists = 1;
885 return work_exists;
888 /* tg3_int_reenable
889 * similar to tg3_enable_ints, but it accurately determines whether there
890 * is new work pending and can return without flushing the PIO write
891 * which reenables interrupts
893 static void tg3_int_reenable(struct tg3_napi *tnapi)
895 struct tg3 *tp = tnapi->tp;
897 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
898 mmiowb();
900 /* When doing tagged status, this work check is unnecessary.
901 * The last_tag we write above tells the chip which piece of
902 * work we've completed.
904 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
905 tw32(HOSTCC_MODE, tp->coalesce_mode |
906 HOSTCC_MODE_ENABLE | tnapi->coal_now);
909 static void tg3_switch_clocks(struct tg3 *tp)
911 u32 clock_ctrl;
912 u32 orig_clock_ctrl;
914 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
915 return;
917 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
919 orig_clock_ctrl = clock_ctrl;
920 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
921 CLOCK_CTRL_CLKRUN_OENABLE |
922 0x1f);
923 tp->pci_clock_ctrl = clock_ctrl;
925 if (tg3_flag(tp, 5705_PLUS)) {
926 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
927 tw32_wait_f(TG3PCI_CLOCK_CTRL,
928 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
930 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
931 tw32_wait_f(TG3PCI_CLOCK_CTRL,
932 clock_ctrl |
933 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
934 40);
935 tw32_wait_f(TG3PCI_CLOCK_CTRL,
936 clock_ctrl | (CLOCK_CTRL_ALTCLK),
937 40);
939 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
942 #define PHY_BUSY_LOOPS 5000
944 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
946 u32 frame_val;
947 unsigned int loops;
948 int ret;
950 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
951 tw32_f(MAC_MI_MODE,
952 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
953 udelay(80);
956 *val = 0x0;
958 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
959 MI_COM_PHY_ADDR_MASK);
960 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
961 MI_COM_REG_ADDR_MASK);
962 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
964 tw32_f(MAC_MI_COM, frame_val);
966 loops = PHY_BUSY_LOOPS;
967 while (loops != 0) {
968 udelay(10);
969 frame_val = tr32(MAC_MI_COM);
971 if ((frame_val & MI_COM_BUSY) == 0) {
972 udelay(5);
973 frame_val = tr32(MAC_MI_COM);
974 break;
976 loops -= 1;
979 ret = -EBUSY;
980 if (loops != 0) {
981 *val = frame_val & MI_COM_DATA_MASK;
982 ret = 0;
985 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
986 tw32_f(MAC_MI_MODE, tp->mi_mode);
987 udelay(80);
990 return ret;
993 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
995 u32 frame_val;
996 unsigned int loops;
997 int ret;
999 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1000 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1001 return 0;
1003 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1004 tw32_f(MAC_MI_MODE,
1005 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1006 udelay(80);
1009 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1010 MI_COM_PHY_ADDR_MASK);
1011 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1012 MI_COM_REG_ADDR_MASK);
1013 frame_val |= (val & MI_COM_DATA_MASK);
1014 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1016 tw32_f(MAC_MI_COM, frame_val);
1018 loops = PHY_BUSY_LOOPS;
1019 while (loops != 0) {
1020 udelay(10);
1021 frame_val = tr32(MAC_MI_COM);
1022 if ((frame_val & MI_COM_BUSY) == 0) {
1023 udelay(5);
1024 frame_val = tr32(MAC_MI_COM);
1025 break;
1027 loops -= 1;
1030 ret = -EBUSY;
1031 if (loops != 0)
1032 ret = 0;
1034 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1035 tw32_f(MAC_MI_MODE, tp->mi_mode);
1036 udelay(80);
1039 return ret;
1042 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1044 int err;
1046 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1047 if (err)
1048 goto done;
1050 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1051 if (err)
1052 goto done;
1054 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1055 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1056 if (err)
1057 goto done;
1059 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1061 done:
1062 return err;
1065 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1067 int err;
1069 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1070 if (err)
1071 goto done;
1073 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1074 if (err)
1075 goto done;
1077 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1078 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1079 if (err)
1080 goto done;
1082 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1084 done:
1085 return err;
1088 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1090 int err;
1092 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1093 if (!err)
1094 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1096 return err;
1099 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1101 int err;
1103 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1104 if (!err)
1105 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1107 return err;
1110 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1112 int err;
1114 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1115 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1116 MII_TG3_AUXCTL_SHDWSEL_MISC);
1117 if (!err)
1118 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1120 return err;
1123 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1125 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1126 set |= MII_TG3_AUXCTL_MISC_WREN;
1128 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1131 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1132 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1133 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1134 MII_TG3_AUXCTL_ACTL_TX_6DB)
1136 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1137 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1138 MII_TG3_AUXCTL_ACTL_TX_6DB);
1140 static int tg3_bmcr_reset(struct tg3 *tp)
1142 u32 phy_control;
1143 int limit, err;
1145 /* OK, reset it, and poll the BMCR_RESET bit until it
1146 * clears or we time out.
1148 phy_control = BMCR_RESET;
1149 err = tg3_writephy(tp, MII_BMCR, phy_control);
1150 if (err != 0)
1151 return -EBUSY;
1153 limit = 5000;
1154 while (limit--) {
1155 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1156 if (err != 0)
1157 return -EBUSY;
1159 if ((phy_control & BMCR_RESET) == 0) {
1160 udelay(40);
1161 break;
1163 udelay(10);
1165 if (limit < 0)
1166 return -EBUSY;
1168 return 0;
1171 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1173 struct tg3 *tp = bp->priv;
1174 u32 val;
1176 spin_lock_bh(&tp->lock);
1178 if (tg3_readphy(tp, reg, &val))
1179 val = -EIO;
1181 spin_unlock_bh(&tp->lock);
1183 return val;
1186 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1188 struct tg3 *tp = bp->priv;
1189 u32 ret = 0;
1191 spin_lock_bh(&tp->lock);
1193 if (tg3_writephy(tp, reg, val))
1194 ret = -EIO;
1196 spin_unlock_bh(&tp->lock);
1198 return ret;
1201 static int tg3_mdio_reset(struct mii_bus *bp)
1203 return 0;
1206 static void tg3_mdio_config_5785(struct tg3 *tp)
1208 u32 val;
1209 struct phy_device *phydev;
1211 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1212 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1213 case PHY_ID_BCM50610:
1214 case PHY_ID_BCM50610M:
1215 val = MAC_PHYCFG2_50610_LED_MODES;
1216 break;
1217 case PHY_ID_BCMAC131:
1218 val = MAC_PHYCFG2_AC131_LED_MODES;
1219 break;
1220 case PHY_ID_RTL8211C:
1221 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1222 break;
1223 case PHY_ID_RTL8201E:
1224 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1225 break;
1226 default:
1227 return;
1230 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1231 tw32(MAC_PHYCFG2, val);
1233 val = tr32(MAC_PHYCFG1);
1234 val &= ~(MAC_PHYCFG1_RGMII_INT |
1235 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1236 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1237 tw32(MAC_PHYCFG1, val);
1239 return;
1242 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1243 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1244 MAC_PHYCFG2_FMODE_MASK_MASK |
1245 MAC_PHYCFG2_GMODE_MASK_MASK |
1246 MAC_PHYCFG2_ACT_MASK_MASK |
1247 MAC_PHYCFG2_QUAL_MASK_MASK |
1248 MAC_PHYCFG2_INBAND_ENABLE;
1250 tw32(MAC_PHYCFG2, val);
1252 val = tr32(MAC_PHYCFG1);
1253 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1254 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1255 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1256 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1257 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1258 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1259 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1261 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1262 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1263 tw32(MAC_PHYCFG1, val);
1265 val = tr32(MAC_EXT_RGMII_MODE);
1266 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1267 MAC_RGMII_MODE_RX_QUALITY |
1268 MAC_RGMII_MODE_RX_ACTIVITY |
1269 MAC_RGMII_MODE_RX_ENG_DET |
1270 MAC_RGMII_MODE_TX_ENABLE |
1271 MAC_RGMII_MODE_TX_LOWPWR |
1272 MAC_RGMII_MODE_TX_RESET);
1273 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1274 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1275 val |= MAC_RGMII_MODE_RX_INT_B |
1276 MAC_RGMII_MODE_RX_QUALITY |
1277 MAC_RGMII_MODE_RX_ACTIVITY |
1278 MAC_RGMII_MODE_RX_ENG_DET;
1279 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1280 val |= MAC_RGMII_MODE_TX_ENABLE |
1281 MAC_RGMII_MODE_TX_LOWPWR |
1282 MAC_RGMII_MODE_TX_RESET;
1284 tw32(MAC_EXT_RGMII_MODE, val);
1287 static void tg3_mdio_start(struct tg3 *tp)
1289 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1290 tw32_f(MAC_MI_MODE, tp->mi_mode);
1291 udelay(80);
1293 if (tg3_flag(tp, MDIOBUS_INITED) &&
1294 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1295 tg3_mdio_config_5785(tp);
1298 static int tg3_mdio_init(struct tg3 *tp)
1300 int i;
1301 u32 reg;
1302 struct phy_device *phydev;
1304 if (tg3_flag(tp, 5717_PLUS)) {
1305 u32 is_serdes;
1307 tp->phy_addr = tp->pci_fn + 1;
1309 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1310 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1311 else
1312 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1313 TG3_CPMU_PHY_STRAP_IS_SERDES;
1314 if (is_serdes)
1315 tp->phy_addr += 7;
1316 } else
1317 tp->phy_addr = TG3_PHY_MII_ADDR;
1319 tg3_mdio_start(tp);
1321 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1322 return 0;
1324 tp->mdio_bus = mdiobus_alloc();
1325 if (tp->mdio_bus == NULL)
1326 return -ENOMEM;
1328 tp->mdio_bus->name = "tg3 mdio bus";
1329 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1330 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1331 tp->mdio_bus->priv = tp;
1332 tp->mdio_bus->parent = &tp->pdev->dev;
1333 tp->mdio_bus->read = &tg3_mdio_read;
1334 tp->mdio_bus->write = &tg3_mdio_write;
1335 tp->mdio_bus->reset = &tg3_mdio_reset;
1336 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1337 tp->mdio_bus->irq = &tp->mdio_irq[0];
1339 for (i = 0; i < PHY_MAX_ADDR; i++)
1340 tp->mdio_bus->irq[i] = PHY_POLL;
1342 /* The bus registration will look for all the PHYs on the mdio bus.
1343 * Unfortunately, it does not ensure the PHY is powered up before
1344 * accessing the PHY ID registers. A chip reset is the
1345 * quickest way to bring the device back to an operational state..
1347 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1348 tg3_bmcr_reset(tp);
1350 i = mdiobus_register(tp->mdio_bus);
1351 if (i) {
1352 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1353 mdiobus_free(tp->mdio_bus);
1354 return i;
1357 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1359 if (!phydev || !phydev->drv) {
1360 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1361 mdiobus_unregister(tp->mdio_bus);
1362 mdiobus_free(tp->mdio_bus);
1363 return -ENODEV;
1366 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1367 case PHY_ID_BCM57780:
1368 phydev->interface = PHY_INTERFACE_MODE_GMII;
1369 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1370 break;
1371 case PHY_ID_BCM50610:
1372 case PHY_ID_BCM50610M:
1373 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1374 PHY_BRCM_RX_REFCLK_UNUSED |
1375 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1376 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1377 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1378 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1379 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1380 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1381 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1382 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1383 /* fallthru */
1384 case PHY_ID_RTL8211C:
1385 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1386 break;
1387 case PHY_ID_RTL8201E:
1388 case PHY_ID_BCMAC131:
1389 phydev->interface = PHY_INTERFACE_MODE_MII;
1390 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1391 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1392 break;
1395 tg3_flag_set(tp, MDIOBUS_INITED);
1397 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1398 tg3_mdio_config_5785(tp);
1400 return 0;
1403 static void tg3_mdio_fini(struct tg3 *tp)
1405 if (tg3_flag(tp, MDIOBUS_INITED)) {
1406 tg3_flag_clear(tp, MDIOBUS_INITED);
1407 mdiobus_unregister(tp->mdio_bus);
1408 mdiobus_free(tp->mdio_bus);
1412 /* tp->lock is held. */
1413 static inline void tg3_generate_fw_event(struct tg3 *tp)
1415 u32 val;
1417 val = tr32(GRC_RX_CPU_EVENT);
1418 val |= GRC_RX_CPU_DRIVER_EVENT;
1419 tw32_f(GRC_RX_CPU_EVENT, val);
1421 tp->last_event_jiffies = jiffies;
1424 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1426 /* tp->lock is held. */
1427 static void tg3_wait_for_event_ack(struct tg3 *tp)
1429 int i;
1430 unsigned int delay_cnt;
1431 long time_remain;
1433 /* If enough time has passed, no wait is necessary. */
1434 time_remain = (long)(tp->last_event_jiffies + 1 +
1435 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1436 (long)jiffies;
1437 if (time_remain < 0)
1438 return;
1440 /* Check if we can shorten the wait time. */
1441 delay_cnt = jiffies_to_usecs(time_remain);
1442 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1443 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1444 delay_cnt = (delay_cnt >> 3) + 1;
1446 for (i = 0; i < delay_cnt; i++) {
1447 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1448 break;
1449 udelay(8);
1453 /* tp->lock is held. */
1454 static void tg3_ump_link_report(struct tg3 *tp)
1456 u32 reg;
1457 u32 val;
1459 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1460 return;
1462 tg3_wait_for_event_ack(tp);
1464 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1466 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1468 val = 0;
1469 if (!tg3_readphy(tp, MII_BMCR, &reg))
1470 val = reg << 16;
1471 if (!tg3_readphy(tp, MII_BMSR, &reg))
1472 val |= (reg & 0xffff);
1473 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1475 val = 0;
1476 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1477 val = reg << 16;
1478 if (!tg3_readphy(tp, MII_LPA, &reg))
1479 val |= (reg & 0xffff);
1480 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1482 val = 0;
1483 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1484 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1485 val = reg << 16;
1486 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1487 val |= (reg & 0xffff);
1489 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1491 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1492 val = reg << 16;
1493 else
1494 val = 0;
1495 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1497 tg3_generate_fw_event(tp);
1500 /* tp->lock is held. */
1501 static void tg3_stop_fw(struct tg3 *tp)
1503 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1504 /* Wait for RX cpu to ACK the previous event. */
1505 tg3_wait_for_event_ack(tp);
1507 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1509 tg3_generate_fw_event(tp);
1511 /* Wait for RX cpu to ACK this event. */
1512 tg3_wait_for_event_ack(tp);
1516 /* tp->lock is held. */
1517 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1519 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1520 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1522 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1523 switch (kind) {
1524 case RESET_KIND_INIT:
1525 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1526 DRV_STATE_START);
1527 break;
1529 case RESET_KIND_SHUTDOWN:
1530 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1531 DRV_STATE_UNLOAD);
1532 break;
1534 case RESET_KIND_SUSPEND:
1535 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1536 DRV_STATE_SUSPEND);
1537 break;
1539 default:
1540 break;
1544 if (kind == RESET_KIND_INIT ||
1545 kind == RESET_KIND_SUSPEND)
1546 tg3_ape_driver_state_change(tp, kind);
1549 /* tp->lock is held. */
1550 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1552 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1553 switch (kind) {
1554 case RESET_KIND_INIT:
1555 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1556 DRV_STATE_START_DONE);
1557 break;
1559 case RESET_KIND_SHUTDOWN:
1560 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1561 DRV_STATE_UNLOAD_DONE);
1562 break;
1564 default:
1565 break;
1569 if (kind == RESET_KIND_SHUTDOWN)
1570 tg3_ape_driver_state_change(tp, kind);
1573 /* tp->lock is held. */
1574 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1576 if (tg3_flag(tp, ENABLE_ASF)) {
1577 switch (kind) {
1578 case RESET_KIND_INIT:
1579 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1580 DRV_STATE_START);
1581 break;
1583 case RESET_KIND_SHUTDOWN:
1584 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1585 DRV_STATE_UNLOAD);
1586 break;
1588 case RESET_KIND_SUSPEND:
1589 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1590 DRV_STATE_SUSPEND);
1591 break;
1593 default:
1594 break;
1599 static int tg3_poll_fw(struct tg3 *tp)
1601 int i;
1602 u32 val;
1604 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1605 /* Wait up to 20ms for init done. */
1606 for (i = 0; i < 200; i++) {
1607 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1608 return 0;
1609 udelay(100);
1611 return -ENODEV;
1614 /* Wait for firmware initialization to complete. */
1615 for (i = 0; i < 100000; i++) {
1616 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1617 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1618 break;
1619 udelay(10);
1622 /* Chip might not be fitted with firmware. Some Sun onboard
1623 * parts are configured like that. So don't signal the timeout
1624 * of the above loop as an error, but do report the lack of
1625 * running firmware once.
1627 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1628 tg3_flag_set(tp, NO_FWARE_REPORTED);
1630 netdev_info(tp->dev, "No firmware running\n");
1633 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1634 /* The 57765 A0 needs a little more
1635 * time to do some important work.
1637 mdelay(10);
1640 return 0;
1643 static void tg3_link_report(struct tg3 *tp)
1645 if (!netif_carrier_ok(tp->dev)) {
1646 netif_info(tp, link, tp->dev, "Link is down\n");
1647 tg3_ump_link_report(tp);
1648 } else if (netif_msg_link(tp)) {
1649 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1650 (tp->link_config.active_speed == SPEED_1000 ?
1651 1000 :
1652 (tp->link_config.active_speed == SPEED_100 ?
1653 100 : 10)),
1654 (tp->link_config.active_duplex == DUPLEX_FULL ?
1655 "full" : "half"));
1657 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1658 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1659 "on" : "off",
1660 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1661 "on" : "off");
1663 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1664 netdev_info(tp->dev, "EEE is %s\n",
1665 tp->setlpicnt ? "enabled" : "disabled");
1667 tg3_ump_link_report(tp);
1671 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1673 u16 miireg;
1675 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1676 miireg = ADVERTISE_PAUSE_CAP;
1677 else if (flow_ctrl & FLOW_CTRL_TX)
1678 miireg = ADVERTISE_PAUSE_ASYM;
1679 else if (flow_ctrl & FLOW_CTRL_RX)
1680 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1681 else
1682 miireg = 0;
1684 return miireg;
1687 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1689 u16 miireg;
1691 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1692 miireg = ADVERTISE_1000XPAUSE;
1693 else if (flow_ctrl & FLOW_CTRL_TX)
1694 miireg = ADVERTISE_1000XPSE_ASYM;
1695 else if (flow_ctrl & FLOW_CTRL_RX)
1696 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1697 else
1698 miireg = 0;
1700 return miireg;
1703 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1705 u8 cap = 0;
1707 if (lcladv & ADVERTISE_1000XPAUSE) {
1708 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1709 if (rmtadv & LPA_1000XPAUSE)
1710 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1711 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1712 cap = FLOW_CTRL_RX;
1713 } else {
1714 if (rmtadv & LPA_1000XPAUSE)
1715 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1717 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1718 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1719 cap = FLOW_CTRL_TX;
1722 return cap;
1725 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1727 u8 autoneg;
1728 u8 flowctrl = 0;
1729 u32 old_rx_mode = tp->rx_mode;
1730 u32 old_tx_mode = tp->tx_mode;
1732 if (tg3_flag(tp, USE_PHYLIB))
1733 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1734 else
1735 autoneg = tp->link_config.autoneg;
1737 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1738 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1739 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1740 else
1741 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1742 } else
1743 flowctrl = tp->link_config.flowctrl;
1745 tp->link_config.active_flowctrl = flowctrl;
1747 if (flowctrl & FLOW_CTRL_RX)
1748 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1749 else
1750 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1752 if (old_rx_mode != tp->rx_mode)
1753 tw32_f(MAC_RX_MODE, tp->rx_mode);
1755 if (flowctrl & FLOW_CTRL_TX)
1756 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1757 else
1758 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1760 if (old_tx_mode != tp->tx_mode)
1761 tw32_f(MAC_TX_MODE, tp->tx_mode);
1764 static void tg3_adjust_link(struct net_device *dev)
1766 u8 oldflowctrl, linkmesg = 0;
1767 u32 mac_mode, lcl_adv, rmt_adv;
1768 struct tg3 *tp = netdev_priv(dev);
1769 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1771 spin_lock_bh(&tp->lock);
1773 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1774 MAC_MODE_HALF_DUPLEX);
1776 oldflowctrl = tp->link_config.active_flowctrl;
1778 if (phydev->link) {
1779 lcl_adv = 0;
1780 rmt_adv = 0;
1782 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1783 mac_mode |= MAC_MODE_PORT_MODE_MII;
1784 else if (phydev->speed == SPEED_1000 ||
1785 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1786 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1787 else
1788 mac_mode |= MAC_MODE_PORT_MODE_MII;
1790 if (phydev->duplex == DUPLEX_HALF)
1791 mac_mode |= MAC_MODE_HALF_DUPLEX;
1792 else {
1793 lcl_adv = tg3_advert_flowctrl_1000T(
1794 tp->link_config.flowctrl);
1796 if (phydev->pause)
1797 rmt_adv = LPA_PAUSE_CAP;
1798 if (phydev->asym_pause)
1799 rmt_adv |= LPA_PAUSE_ASYM;
1802 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1803 } else
1804 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1806 if (mac_mode != tp->mac_mode) {
1807 tp->mac_mode = mac_mode;
1808 tw32_f(MAC_MODE, tp->mac_mode);
1809 udelay(40);
1812 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1813 if (phydev->speed == SPEED_10)
1814 tw32(MAC_MI_STAT,
1815 MAC_MI_STAT_10MBPS_MODE |
1816 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1817 else
1818 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1821 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1822 tw32(MAC_TX_LENGTHS,
1823 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1824 (6 << TX_LENGTHS_IPG_SHIFT) |
1825 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1826 else
1827 tw32(MAC_TX_LENGTHS,
1828 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1829 (6 << TX_LENGTHS_IPG_SHIFT) |
1830 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1832 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1833 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1834 phydev->speed != tp->link_config.active_speed ||
1835 phydev->duplex != tp->link_config.active_duplex ||
1836 oldflowctrl != tp->link_config.active_flowctrl)
1837 linkmesg = 1;
1839 tp->link_config.active_speed = phydev->speed;
1840 tp->link_config.active_duplex = phydev->duplex;
1842 spin_unlock_bh(&tp->lock);
1844 if (linkmesg)
1845 tg3_link_report(tp);
1848 static int tg3_phy_init(struct tg3 *tp)
1850 struct phy_device *phydev;
1852 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1853 return 0;
1855 /* Bring the PHY back to a known state. */
1856 tg3_bmcr_reset(tp);
1858 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1860 /* Attach the MAC to the PHY. */
1861 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1862 phydev->dev_flags, phydev->interface);
1863 if (IS_ERR(phydev)) {
1864 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1865 return PTR_ERR(phydev);
1868 /* Mask with MAC supported features. */
1869 switch (phydev->interface) {
1870 case PHY_INTERFACE_MODE_GMII:
1871 case PHY_INTERFACE_MODE_RGMII:
1872 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1873 phydev->supported &= (PHY_GBIT_FEATURES |
1874 SUPPORTED_Pause |
1875 SUPPORTED_Asym_Pause);
1876 break;
1878 /* fallthru */
1879 case PHY_INTERFACE_MODE_MII:
1880 phydev->supported &= (PHY_BASIC_FEATURES |
1881 SUPPORTED_Pause |
1882 SUPPORTED_Asym_Pause);
1883 break;
1884 default:
1885 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1886 return -EINVAL;
1889 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1891 phydev->advertising = phydev->supported;
1893 return 0;
1896 static void tg3_phy_start(struct tg3 *tp)
1898 struct phy_device *phydev;
1900 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1901 return;
1903 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1905 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1906 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1907 phydev->speed = tp->link_config.orig_speed;
1908 phydev->duplex = tp->link_config.orig_duplex;
1909 phydev->autoneg = tp->link_config.orig_autoneg;
1910 phydev->advertising = tp->link_config.orig_advertising;
1913 phy_start(phydev);
1915 phy_start_aneg(phydev);
1918 static void tg3_phy_stop(struct tg3 *tp)
1920 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1921 return;
1923 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1926 static void tg3_phy_fini(struct tg3 *tp)
1928 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1929 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1930 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1934 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1936 int err;
1937 u32 val;
1939 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1940 return 0;
1942 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1943 /* Cannot do read-modify-write on 5401 */
1944 err = tg3_phy_auxctl_write(tp,
1945 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1946 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1947 0x4c20);
1948 goto done;
1951 err = tg3_phy_auxctl_read(tp,
1952 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1953 if (err)
1954 return err;
1956 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1957 err = tg3_phy_auxctl_write(tp,
1958 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1960 done:
1961 return err;
1964 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1966 u32 phytest;
1968 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1969 u32 phy;
1971 tg3_writephy(tp, MII_TG3_FET_TEST,
1972 phytest | MII_TG3_FET_SHADOW_EN);
1973 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1974 if (enable)
1975 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1976 else
1977 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1978 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1980 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1984 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1986 u32 reg;
1988 if (!tg3_flag(tp, 5705_PLUS) ||
1989 (tg3_flag(tp, 5717_PLUS) &&
1990 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1991 return;
1993 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1994 tg3_phy_fet_toggle_apd(tp, enable);
1995 return;
1998 reg = MII_TG3_MISC_SHDW_WREN |
1999 MII_TG3_MISC_SHDW_SCR5_SEL |
2000 MII_TG3_MISC_SHDW_SCR5_LPED |
2001 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2002 MII_TG3_MISC_SHDW_SCR5_SDTL |
2003 MII_TG3_MISC_SHDW_SCR5_C125OE;
2004 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2005 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2007 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2010 reg = MII_TG3_MISC_SHDW_WREN |
2011 MII_TG3_MISC_SHDW_APD_SEL |
2012 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2013 if (enable)
2014 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2016 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2019 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2021 u32 phy;
2023 if (!tg3_flag(tp, 5705_PLUS) ||
2024 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2025 return;
2027 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2028 u32 ephy;
2030 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2031 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2033 tg3_writephy(tp, MII_TG3_FET_TEST,
2034 ephy | MII_TG3_FET_SHADOW_EN);
2035 if (!tg3_readphy(tp, reg, &phy)) {
2036 if (enable)
2037 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2038 else
2039 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2040 tg3_writephy(tp, reg, phy);
2042 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2044 } else {
2045 int ret;
2047 ret = tg3_phy_auxctl_read(tp,
2048 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2049 if (!ret) {
2050 if (enable)
2051 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2052 else
2053 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2054 tg3_phy_auxctl_write(tp,
2055 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2060 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2062 int ret;
2063 u32 val;
2065 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2066 return;
2068 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2069 if (!ret)
2070 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2071 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2074 static void tg3_phy_apply_otp(struct tg3 *tp)
2076 u32 otp, phy;
2078 if (!tp->phy_otp)
2079 return;
2081 otp = tp->phy_otp;
2083 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2084 return;
2086 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2087 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2088 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2090 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2091 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2092 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2094 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2095 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2096 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2098 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2099 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2101 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2102 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2104 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2105 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2106 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2108 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2111 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2113 u32 val;
2115 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2116 return;
2118 tp->setlpicnt = 0;
2120 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2121 current_link_up == 1 &&
2122 tp->link_config.active_duplex == DUPLEX_FULL &&
2123 (tp->link_config.active_speed == SPEED_100 ||
2124 tp->link_config.active_speed == SPEED_1000)) {
2125 u32 eeectl;
2127 if (tp->link_config.active_speed == SPEED_1000)
2128 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2129 else
2130 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2132 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2134 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2135 TG3_CL45_D7_EEERES_STAT, &val);
2137 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2138 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2139 tp->setlpicnt = 2;
2142 if (!tp->setlpicnt) {
2143 if (current_link_up == 1 &&
2144 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2145 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2146 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2149 val = tr32(TG3_CPMU_EEE_MODE);
2150 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2154 static void tg3_phy_eee_enable(struct tg3 *tp)
2156 u32 val;
2158 if (tp->link_config.active_speed == SPEED_1000 &&
2159 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2160 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2161 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
2162 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2163 val = MII_TG3_DSP_TAP26_ALNOKO |
2164 MII_TG3_DSP_TAP26_RMRXSTO;
2165 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2166 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2169 val = tr32(TG3_CPMU_EEE_MODE);
2170 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2173 static int tg3_wait_macro_done(struct tg3 *tp)
2175 int limit = 100;
2177 while (limit--) {
2178 u32 tmp32;
2180 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2181 if ((tmp32 & 0x1000) == 0)
2182 break;
2185 if (limit < 0)
2186 return -EBUSY;
2188 return 0;
2191 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2193 static const u32 test_pat[4][6] = {
2194 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2195 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2196 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2197 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2199 int chan;
2201 for (chan = 0; chan < 4; chan++) {
2202 int i;
2204 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2205 (chan * 0x2000) | 0x0200);
2206 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2208 for (i = 0; i < 6; i++)
2209 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2210 test_pat[chan][i]);
2212 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2213 if (tg3_wait_macro_done(tp)) {
2214 *resetp = 1;
2215 return -EBUSY;
2218 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2219 (chan * 0x2000) | 0x0200);
2220 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2221 if (tg3_wait_macro_done(tp)) {
2222 *resetp = 1;
2223 return -EBUSY;
2226 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2227 if (tg3_wait_macro_done(tp)) {
2228 *resetp = 1;
2229 return -EBUSY;
2232 for (i = 0; i < 6; i += 2) {
2233 u32 low, high;
2235 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2236 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2237 tg3_wait_macro_done(tp)) {
2238 *resetp = 1;
2239 return -EBUSY;
2241 low &= 0x7fff;
2242 high &= 0x000f;
2243 if (low != test_pat[chan][i] ||
2244 high != test_pat[chan][i+1]) {
2245 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2246 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2247 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2249 return -EBUSY;
2254 return 0;
2257 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2259 int chan;
2261 for (chan = 0; chan < 4; chan++) {
2262 int i;
2264 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2265 (chan * 0x2000) | 0x0200);
2266 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2267 for (i = 0; i < 6; i++)
2268 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2269 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2270 if (tg3_wait_macro_done(tp))
2271 return -EBUSY;
2274 return 0;
2277 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2279 u32 reg32, phy9_orig;
2280 int retries, do_phy_reset, err;
2282 retries = 10;
2283 do_phy_reset = 1;
2284 do {
2285 if (do_phy_reset) {
2286 err = tg3_bmcr_reset(tp);
2287 if (err)
2288 return err;
2289 do_phy_reset = 0;
2292 /* Disable transmitter and interrupt. */
2293 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2294 continue;
2296 reg32 |= 0x3000;
2297 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2299 /* Set full-duplex, 1000 mbps. */
2300 tg3_writephy(tp, MII_BMCR,
2301 BMCR_FULLDPLX | BMCR_SPEED1000);
2303 /* Set to master mode. */
2304 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2305 continue;
2307 tg3_writephy(tp, MII_CTRL1000,
2308 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2310 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2311 if (err)
2312 return err;
2314 /* Block the PHY control access. */
2315 tg3_phydsp_write(tp, 0x8005, 0x0800);
2317 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2318 if (!err)
2319 break;
2320 } while (--retries);
2322 err = tg3_phy_reset_chanpat(tp);
2323 if (err)
2324 return err;
2326 tg3_phydsp_write(tp, 0x8005, 0x0000);
2328 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2329 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2331 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2333 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2335 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2336 reg32 &= ~0x3000;
2337 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2338 } else if (!err)
2339 err = -EBUSY;
2341 return err;
2344 /* This will reset the tigon3 PHY if there is no valid
2345 * link unless the FORCE argument is non-zero.
2347 static int tg3_phy_reset(struct tg3 *tp)
2349 u32 val, cpmuctrl;
2350 int err;
2352 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2353 val = tr32(GRC_MISC_CFG);
2354 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2355 udelay(40);
2357 err = tg3_readphy(tp, MII_BMSR, &val);
2358 err |= tg3_readphy(tp, MII_BMSR, &val);
2359 if (err != 0)
2360 return -EBUSY;
2362 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2363 netif_carrier_off(tp->dev);
2364 tg3_link_report(tp);
2367 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2368 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2369 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2370 err = tg3_phy_reset_5703_4_5(tp);
2371 if (err)
2372 return err;
2373 goto out;
2376 cpmuctrl = 0;
2377 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2378 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2379 cpmuctrl = tr32(TG3_CPMU_CTRL);
2380 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2381 tw32(TG3_CPMU_CTRL,
2382 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2385 err = tg3_bmcr_reset(tp);
2386 if (err)
2387 return err;
2389 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2390 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2391 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2393 tw32(TG3_CPMU_CTRL, cpmuctrl);
2396 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2397 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2398 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2399 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2400 CPMU_LSPD_1000MB_MACCLK_12_5) {
2401 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2402 udelay(40);
2403 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2407 if (tg3_flag(tp, 5717_PLUS) &&
2408 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2409 return 0;
2411 tg3_phy_apply_otp(tp);
2413 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2414 tg3_phy_toggle_apd(tp, true);
2415 else
2416 tg3_phy_toggle_apd(tp, false);
2418 out:
2419 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2420 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2421 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2422 tg3_phydsp_write(tp, 0x000a, 0x0323);
2423 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2426 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2427 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2428 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2431 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2432 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2433 tg3_phydsp_write(tp, 0x000a, 0x310b);
2434 tg3_phydsp_write(tp, 0x201f, 0x9506);
2435 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2436 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2438 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2439 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2440 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2441 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2442 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2443 tg3_writephy(tp, MII_TG3_TEST1,
2444 MII_TG3_TEST1_TRIM_EN | 0x4);
2445 } else
2446 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2448 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2452 /* Set Extended packet length bit (bit 14) on all chips that */
2453 /* support jumbo frames */
2454 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2455 /* Cannot do read-modify-write on 5401 */
2456 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2457 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2458 /* Set bit 14 with read-modify-write to preserve other bits */
2459 err = tg3_phy_auxctl_read(tp,
2460 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2461 if (!err)
2462 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2463 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2466 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2467 * jumbo frames transmission.
2469 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2470 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2471 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2472 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2475 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2476 /* adjust output voltage */
2477 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2480 tg3_phy_toggle_automdix(tp, 1);
2481 tg3_phy_set_wirespeed(tp);
2482 return 0;
2485 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2486 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2487 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2488 TG3_GPIO_MSG_NEED_VAUX)
2489 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2490 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2491 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2492 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2493 (TG3_GPIO_MSG_DRVR_PRES << 12))
2495 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2496 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2497 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2498 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2499 (TG3_GPIO_MSG_NEED_VAUX << 12))
2501 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2503 u32 status, shift;
2505 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2506 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2507 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2508 else
2509 status = tr32(TG3_CPMU_DRV_STATUS);
2511 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2512 status &= ~(TG3_GPIO_MSG_MASK << shift);
2513 status |= (newstat << shift);
2515 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2516 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2517 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2518 else
2519 tw32(TG3_CPMU_DRV_STATUS, status);
2521 return status >> TG3_APE_GPIO_MSG_SHIFT;
2524 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2526 if (!tg3_flag(tp, IS_NIC))
2527 return 0;
2529 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2530 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2531 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2532 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2533 return -EIO;
2535 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2537 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2538 TG3_GRC_LCLCTL_PWRSW_DELAY);
2540 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2541 } else {
2542 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2543 TG3_GRC_LCLCTL_PWRSW_DELAY);
2546 return 0;
2549 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2551 u32 grc_local_ctrl;
2553 if (!tg3_flag(tp, IS_NIC) ||
2554 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2555 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2556 return;
2558 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2560 tw32_wait_f(GRC_LOCAL_CTRL,
2561 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2562 TG3_GRC_LCLCTL_PWRSW_DELAY);
2564 tw32_wait_f(GRC_LOCAL_CTRL,
2565 grc_local_ctrl,
2566 TG3_GRC_LCLCTL_PWRSW_DELAY);
2568 tw32_wait_f(GRC_LOCAL_CTRL,
2569 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2570 TG3_GRC_LCLCTL_PWRSW_DELAY);
2573 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2575 if (!tg3_flag(tp, IS_NIC))
2576 return;
2578 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2579 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2580 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2581 (GRC_LCLCTRL_GPIO_OE0 |
2582 GRC_LCLCTRL_GPIO_OE1 |
2583 GRC_LCLCTRL_GPIO_OE2 |
2584 GRC_LCLCTRL_GPIO_OUTPUT0 |
2585 GRC_LCLCTRL_GPIO_OUTPUT1),
2586 TG3_GRC_LCLCTL_PWRSW_DELAY);
2587 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2588 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2589 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2590 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2591 GRC_LCLCTRL_GPIO_OE1 |
2592 GRC_LCLCTRL_GPIO_OE2 |
2593 GRC_LCLCTRL_GPIO_OUTPUT0 |
2594 GRC_LCLCTRL_GPIO_OUTPUT1 |
2595 tp->grc_local_ctrl;
2596 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2597 TG3_GRC_LCLCTL_PWRSW_DELAY);
2599 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2600 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2601 TG3_GRC_LCLCTL_PWRSW_DELAY);
2603 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2604 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2605 TG3_GRC_LCLCTL_PWRSW_DELAY);
2606 } else {
2607 u32 no_gpio2;
2608 u32 grc_local_ctrl = 0;
2610 /* Workaround to prevent overdrawing Amps. */
2611 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2612 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2613 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2614 grc_local_ctrl,
2615 TG3_GRC_LCLCTL_PWRSW_DELAY);
2618 /* On 5753 and variants, GPIO2 cannot be used. */
2619 no_gpio2 = tp->nic_sram_data_cfg &
2620 NIC_SRAM_DATA_CFG_NO_GPIO2;
2622 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2623 GRC_LCLCTRL_GPIO_OE1 |
2624 GRC_LCLCTRL_GPIO_OE2 |
2625 GRC_LCLCTRL_GPIO_OUTPUT1 |
2626 GRC_LCLCTRL_GPIO_OUTPUT2;
2627 if (no_gpio2) {
2628 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2629 GRC_LCLCTRL_GPIO_OUTPUT2);
2631 tw32_wait_f(GRC_LOCAL_CTRL,
2632 tp->grc_local_ctrl | grc_local_ctrl,
2633 TG3_GRC_LCLCTL_PWRSW_DELAY);
2635 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2637 tw32_wait_f(GRC_LOCAL_CTRL,
2638 tp->grc_local_ctrl | grc_local_ctrl,
2639 TG3_GRC_LCLCTL_PWRSW_DELAY);
2641 if (!no_gpio2) {
2642 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2643 tw32_wait_f(GRC_LOCAL_CTRL,
2644 tp->grc_local_ctrl | grc_local_ctrl,
2645 TG3_GRC_LCLCTL_PWRSW_DELAY);
2650 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2652 u32 msg = 0;
2654 /* Serialize power state transitions */
2655 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2656 return;
2658 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2659 msg = TG3_GPIO_MSG_NEED_VAUX;
2661 msg = tg3_set_function_status(tp, msg);
2663 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2664 goto done;
2666 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2667 tg3_pwrsrc_switch_to_vaux(tp);
2668 else
2669 tg3_pwrsrc_die_with_vmain(tp);
2671 done:
2672 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2675 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2677 bool need_vaux = false;
2679 /* The GPIOs do something completely different on 57765. */
2680 if (!tg3_flag(tp, IS_NIC) ||
2681 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2682 return;
2684 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2685 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2686 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2687 tg3_frob_aux_power_5717(tp, include_wol ?
2688 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2689 return;
2692 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2693 struct net_device *dev_peer;
2695 dev_peer = pci_get_drvdata(tp->pdev_peer);
2697 /* remove_one() may have been run on the peer. */
2698 if (dev_peer) {
2699 struct tg3 *tp_peer = netdev_priv(dev_peer);
2701 if (tg3_flag(tp_peer, INIT_COMPLETE))
2702 return;
2704 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2705 tg3_flag(tp_peer, ENABLE_ASF))
2706 need_vaux = true;
2710 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2711 tg3_flag(tp, ENABLE_ASF))
2712 need_vaux = true;
2714 if (need_vaux)
2715 tg3_pwrsrc_switch_to_vaux(tp);
2716 else
2717 tg3_pwrsrc_die_with_vmain(tp);
2720 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2722 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2723 return 1;
2724 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2725 if (speed != SPEED_10)
2726 return 1;
2727 } else if (speed == SPEED_10)
2728 return 1;
2730 return 0;
2733 static int tg3_setup_phy(struct tg3 *, int);
2734 static int tg3_halt_cpu(struct tg3 *, u32);
2736 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2738 u32 val;
2740 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2741 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2742 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2743 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2745 sg_dig_ctrl |=
2746 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2747 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2748 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2750 return;
2753 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2754 tg3_bmcr_reset(tp);
2755 val = tr32(GRC_MISC_CFG);
2756 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2757 udelay(40);
2758 return;
2759 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2760 u32 phytest;
2761 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2762 u32 phy;
2764 tg3_writephy(tp, MII_ADVERTISE, 0);
2765 tg3_writephy(tp, MII_BMCR,
2766 BMCR_ANENABLE | BMCR_ANRESTART);
2768 tg3_writephy(tp, MII_TG3_FET_TEST,
2769 phytest | MII_TG3_FET_SHADOW_EN);
2770 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2771 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2772 tg3_writephy(tp,
2773 MII_TG3_FET_SHDW_AUXMODE4,
2774 phy);
2776 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2778 return;
2779 } else if (do_low_power) {
2780 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2781 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2783 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2784 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2785 MII_TG3_AUXCTL_PCTL_VREG_11V;
2786 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2789 /* The PHY should not be powered down on some chips because
2790 * of bugs.
2792 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2793 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2794 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2795 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2796 return;
2798 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2799 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2800 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2801 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2802 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2803 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2806 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2809 /* tp->lock is held. */
2810 static int tg3_nvram_lock(struct tg3 *tp)
2812 if (tg3_flag(tp, NVRAM)) {
2813 int i;
2815 if (tp->nvram_lock_cnt == 0) {
2816 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2817 for (i = 0; i < 8000; i++) {
2818 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2819 break;
2820 udelay(20);
2822 if (i == 8000) {
2823 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2824 return -ENODEV;
2827 tp->nvram_lock_cnt++;
2829 return 0;
2832 /* tp->lock is held. */
2833 static void tg3_nvram_unlock(struct tg3 *tp)
2835 if (tg3_flag(tp, NVRAM)) {
2836 if (tp->nvram_lock_cnt > 0)
2837 tp->nvram_lock_cnt--;
2838 if (tp->nvram_lock_cnt == 0)
2839 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2843 /* tp->lock is held. */
2844 static void tg3_enable_nvram_access(struct tg3 *tp)
2846 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2847 u32 nvaccess = tr32(NVRAM_ACCESS);
2849 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2853 /* tp->lock is held. */
2854 static void tg3_disable_nvram_access(struct tg3 *tp)
2856 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2857 u32 nvaccess = tr32(NVRAM_ACCESS);
2859 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2863 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2864 u32 offset, u32 *val)
2866 u32 tmp;
2867 int i;
2869 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2870 return -EINVAL;
2872 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2873 EEPROM_ADDR_DEVID_MASK |
2874 EEPROM_ADDR_READ);
2875 tw32(GRC_EEPROM_ADDR,
2876 tmp |
2877 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2878 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2879 EEPROM_ADDR_ADDR_MASK) |
2880 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2882 for (i = 0; i < 1000; i++) {
2883 tmp = tr32(GRC_EEPROM_ADDR);
2885 if (tmp & EEPROM_ADDR_COMPLETE)
2886 break;
2887 msleep(1);
2889 if (!(tmp & EEPROM_ADDR_COMPLETE))
2890 return -EBUSY;
2892 tmp = tr32(GRC_EEPROM_DATA);
2895 * The data will always be opposite the native endian
2896 * format. Perform a blind byteswap to compensate.
2898 *val = swab32(tmp);
2900 return 0;
2903 #define NVRAM_CMD_TIMEOUT 10000
2905 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2907 int i;
2909 tw32(NVRAM_CMD, nvram_cmd);
2910 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2911 udelay(10);
2912 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2913 udelay(10);
2914 break;
2918 if (i == NVRAM_CMD_TIMEOUT)
2919 return -EBUSY;
2921 return 0;
2924 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2926 if (tg3_flag(tp, NVRAM) &&
2927 tg3_flag(tp, NVRAM_BUFFERED) &&
2928 tg3_flag(tp, FLASH) &&
2929 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2930 (tp->nvram_jedecnum == JEDEC_ATMEL))
2932 addr = ((addr / tp->nvram_pagesize) <<
2933 ATMEL_AT45DB0X1B_PAGE_POS) +
2934 (addr % tp->nvram_pagesize);
2936 return addr;
2939 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2941 if (tg3_flag(tp, NVRAM) &&
2942 tg3_flag(tp, NVRAM_BUFFERED) &&
2943 tg3_flag(tp, FLASH) &&
2944 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2945 (tp->nvram_jedecnum == JEDEC_ATMEL))
2947 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2948 tp->nvram_pagesize) +
2949 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2951 return addr;
2954 /* NOTE: Data read in from NVRAM is byteswapped according to
2955 * the byteswapping settings for all other register accesses.
2956 * tg3 devices are BE devices, so on a BE machine, the data
2957 * returned will be exactly as it is seen in NVRAM. On a LE
2958 * machine, the 32-bit value will be byteswapped.
2960 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2962 int ret;
2964 if (!tg3_flag(tp, NVRAM))
2965 return tg3_nvram_read_using_eeprom(tp, offset, val);
2967 offset = tg3_nvram_phys_addr(tp, offset);
2969 if (offset > NVRAM_ADDR_MSK)
2970 return -EINVAL;
2972 ret = tg3_nvram_lock(tp);
2973 if (ret)
2974 return ret;
2976 tg3_enable_nvram_access(tp);
2978 tw32(NVRAM_ADDR, offset);
2979 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2980 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2982 if (ret == 0)
2983 *val = tr32(NVRAM_RDDATA);
2985 tg3_disable_nvram_access(tp);
2987 tg3_nvram_unlock(tp);
2989 return ret;
2992 /* Ensures NVRAM data is in bytestream format. */
2993 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2995 u32 v;
2996 int res = tg3_nvram_read(tp, offset, &v);
2997 if (!res)
2998 *val = cpu_to_be32(v);
2999 return res;
3002 #define RX_CPU_SCRATCH_BASE 0x30000
3003 #define RX_CPU_SCRATCH_SIZE 0x04000
3004 #define TX_CPU_SCRATCH_BASE 0x34000
3005 #define TX_CPU_SCRATCH_SIZE 0x04000
3007 /* tp->lock is held. */
3008 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3010 int i;
3012 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3014 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3015 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3017 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3018 return 0;
3020 if (offset == RX_CPU_BASE) {
3021 for (i = 0; i < 10000; i++) {
3022 tw32(offset + CPU_STATE, 0xffffffff);
3023 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3024 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3025 break;
3028 tw32(offset + CPU_STATE, 0xffffffff);
3029 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3030 udelay(10);
3031 } else {
3032 for (i = 0; i < 10000; i++) {
3033 tw32(offset + CPU_STATE, 0xffffffff);
3034 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3035 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3036 break;
3040 if (i >= 10000) {
3041 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3042 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3043 return -ENODEV;
3046 /* Clear firmware's nvram arbitration. */
3047 if (tg3_flag(tp, NVRAM))
3048 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3049 return 0;
3052 struct fw_info {
3053 unsigned int fw_base;
3054 unsigned int fw_len;
3055 const __be32 *fw_data;
3058 /* tp->lock is held. */
3059 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3060 u32 cpu_scratch_base, int cpu_scratch_size,
3061 struct fw_info *info)
3063 int err, lock_err, i;
3064 void (*write_op)(struct tg3 *, u32, u32);
3066 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3067 netdev_err(tp->dev,
3068 "%s: Trying to load TX cpu firmware which is 5705\n",
3069 __func__);
3070 return -EINVAL;
3073 if (tg3_flag(tp, 5705_PLUS))
3074 write_op = tg3_write_mem;
3075 else
3076 write_op = tg3_write_indirect_reg32;
3078 /* It is possible that bootcode is still loading at this point.
3079 * Get the nvram lock first before halting the cpu.
3081 lock_err = tg3_nvram_lock(tp);
3082 err = tg3_halt_cpu(tp, cpu_base);
3083 if (!lock_err)
3084 tg3_nvram_unlock(tp);
3085 if (err)
3086 goto out;
3088 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3089 write_op(tp, cpu_scratch_base + i, 0);
3090 tw32(cpu_base + CPU_STATE, 0xffffffff);
3091 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3092 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3093 write_op(tp, (cpu_scratch_base +
3094 (info->fw_base & 0xffff) +
3095 (i * sizeof(u32))),
3096 be32_to_cpu(info->fw_data[i]));
3098 err = 0;
3100 out:
3101 return err;
3104 /* tp->lock is held. */
3105 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3107 struct fw_info info;
3108 const __be32 *fw_data;
3109 int err, i;
3111 fw_data = (void *)tp->fw->data;
3113 /* Firmware blob starts with version numbers, followed by
3114 start address and length. We are setting complete length.
3115 length = end_address_of_bss - start_address_of_text.
3116 Remainder is the blob to be loaded contiguously
3117 from start address. */
3119 info.fw_base = be32_to_cpu(fw_data[1]);
3120 info.fw_len = tp->fw->size - 12;
3121 info.fw_data = &fw_data[3];
3123 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3124 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3125 &info);
3126 if (err)
3127 return err;
3129 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3130 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3131 &info);
3132 if (err)
3133 return err;
3135 /* Now startup only the RX cpu. */
3136 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3137 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3139 for (i = 0; i < 5; i++) {
3140 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3141 break;
3142 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3143 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3144 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3145 udelay(1000);
3147 if (i >= 5) {
3148 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3149 "should be %08x\n", __func__,
3150 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3151 return -ENODEV;
3153 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3154 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
3156 return 0;
3159 /* tp->lock is held. */
3160 static int tg3_load_tso_firmware(struct tg3 *tp)
3162 struct fw_info info;
3163 const __be32 *fw_data;
3164 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3165 int err, i;
3167 if (tg3_flag(tp, HW_TSO_1) ||
3168 tg3_flag(tp, HW_TSO_2) ||
3169 tg3_flag(tp, HW_TSO_3))
3170 return 0;
3172 fw_data = (void *)tp->fw->data;
3174 /* Firmware blob starts with version numbers, followed by
3175 start address and length. We are setting complete length.
3176 length = end_address_of_bss - start_address_of_text.
3177 Remainder is the blob to be loaded contiguously
3178 from start address. */
3180 info.fw_base = be32_to_cpu(fw_data[1]);
3181 cpu_scratch_size = tp->fw_len;
3182 info.fw_len = tp->fw->size - 12;
3183 info.fw_data = &fw_data[3];
3185 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3186 cpu_base = RX_CPU_BASE;
3187 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3188 } else {
3189 cpu_base = TX_CPU_BASE;
3190 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3191 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3194 err = tg3_load_firmware_cpu(tp, cpu_base,
3195 cpu_scratch_base, cpu_scratch_size,
3196 &info);
3197 if (err)
3198 return err;
3200 /* Now startup the cpu. */
3201 tw32(cpu_base + CPU_STATE, 0xffffffff);
3202 tw32_f(cpu_base + CPU_PC, info.fw_base);
3204 for (i = 0; i < 5; i++) {
3205 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3206 break;
3207 tw32(cpu_base + CPU_STATE, 0xffffffff);
3208 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3209 tw32_f(cpu_base + CPU_PC, info.fw_base);
3210 udelay(1000);
3212 if (i >= 5) {
3213 netdev_err(tp->dev,
3214 "%s fails to set CPU PC, is %08x should be %08x\n",
3215 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3216 return -ENODEV;
3218 tw32(cpu_base + CPU_STATE, 0xffffffff);
3219 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3220 return 0;
3224 /* tp->lock is held. */
3225 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3227 u32 addr_high, addr_low;
3228 int i;
3230 addr_high = ((tp->dev->dev_addr[0] << 8) |
3231 tp->dev->dev_addr[1]);
3232 addr_low = ((tp->dev->dev_addr[2] << 24) |
3233 (tp->dev->dev_addr[3] << 16) |
3234 (tp->dev->dev_addr[4] << 8) |
3235 (tp->dev->dev_addr[5] << 0));
3236 for (i = 0; i < 4; i++) {
3237 if (i == 1 && skip_mac_1)
3238 continue;
3239 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3240 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3243 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3244 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3245 for (i = 0; i < 12; i++) {
3246 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3247 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3251 addr_high = (tp->dev->dev_addr[0] +
3252 tp->dev->dev_addr[1] +
3253 tp->dev->dev_addr[2] +
3254 tp->dev->dev_addr[3] +
3255 tp->dev->dev_addr[4] +
3256 tp->dev->dev_addr[5]) &
3257 TX_BACKOFF_SEED_MASK;
3258 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3261 static void tg3_enable_register_access(struct tg3 *tp)
3264 * Make sure register accesses (indirect or otherwise) will function
3265 * correctly.
3267 pci_write_config_dword(tp->pdev,
3268 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3271 static int tg3_power_up(struct tg3 *tp)
3273 int err;
3275 tg3_enable_register_access(tp);
3277 err = pci_set_power_state(tp->pdev, PCI_D0);
3278 if (!err) {
3279 /* Switch out of Vaux if it is a NIC */
3280 tg3_pwrsrc_switch_to_vmain(tp);
3281 } else {
3282 netdev_err(tp->dev, "Transition to D0 failed\n");
3285 return err;
3288 static int tg3_power_down_prepare(struct tg3 *tp)
3290 u32 misc_host_ctrl;
3291 bool device_should_wake, do_low_power;
3293 tg3_enable_register_access(tp);
3295 /* Restore the CLKREQ setting. */
3296 if (tg3_flag(tp, CLKREQ_BUG)) {
3297 u16 lnkctl;
3299 pci_read_config_word(tp->pdev,
3300 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3301 &lnkctl);
3302 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3303 pci_write_config_word(tp->pdev,
3304 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3305 lnkctl);
3308 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3309 tw32(TG3PCI_MISC_HOST_CTRL,
3310 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3312 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3313 tg3_flag(tp, WOL_ENABLE);
3315 if (tg3_flag(tp, USE_PHYLIB)) {
3316 do_low_power = false;
3317 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3318 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3319 struct phy_device *phydev;
3320 u32 phyid, advertising;
3322 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3324 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3326 tp->link_config.orig_speed = phydev->speed;
3327 tp->link_config.orig_duplex = phydev->duplex;
3328 tp->link_config.orig_autoneg = phydev->autoneg;
3329 tp->link_config.orig_advertising = phydev->advertising;
3331 advertising = ADVERTISED_TP |
3332 ADVERTISED_Pause |
3333 ADVERTISED_Autoneg |
3334 ADVERTISED_10baseT_Half;
3336 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3337 if (tg3_flag(tp, WOL_SPEED_100MB))
3338 advertising |=
3339 ADVERTISED_100baseT_Half |
3340 ADVERTISED_100baseT_Full |
3341 ADVERTISED_10baseT_Full;
3342 else
3343 advertising |= ADVERTISED_10baseT_Full;
3346 phydev->advertising = advertising;
3348 phy_start_aneg(phydev);
3350 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3351 if (phyid != PHY_ID_BCMAC131) {
3352 phyid &= PHY_BCM_OUI_MASK;
3353 if (phyid == PHY_BCM_OUI_1 ||
3354 phyid == PHY_BCM_OUI_2 ||
3355 phyid == PHY_BCM_OUI_3)
3356 do_low_power = true;
3359 } else {
3360 do_low_power = true;
3362 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3363 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3364 tp->link_config.orig_speed = tp->link_config.speed;
3365 tp->link_config.orig_duplex = tp->link_config.duplex;
3366 tp->link_config.orig_autoneg = tp->link_config.autoneg;
3369 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
3370 tp->link_config.speed = SPEED_10;
3371 tp->link_config.duplex = DUPLEX_HALF;
3372 tp->link_config.autoneg = AUTONEG_ENABLE;
3373 tg3_setup_phy(tp, 0);
3377 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3378 u32 val;
3380 val = tr32(GRC_VCPU_EXT_CTRL);
3381 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3382 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3383 int i;
3384 u32 val;
3386 for (i = 0; i < 200; i++) {
3387 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3388 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3389 break;
3390 msleep(1);
3393 if (tg3_flag(tp, WOL_CAP))
3394 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3395 WOL_DRV_STATE_SHUTDOWN |
3396 WOL_DRV_WOL |
3397 WOL_SET_MAGIC_PKT);
3399 if (device_should_wake) {
3400 u32 mac_mode;
3402 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3403 if (do_low_power &&
3404 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3405 tg3_phy_auxctl_write(tp,
3406 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3407 MII_TG3_AUXCTL_PCTL_WOL_EN |
3408 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3409 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3410 udelay(40);
3413 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3414 mac_mode = MAC_MODE_PORT_MODE_GMII;
3415 else
3416 mac_mode = MAC_MODE_PORT_MODE_MII;
3418 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3419 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3420 ASIC_REV_5700) {
3421 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3422 SPEED_100 : SPEED_10;
3423 if (tg3_5700_link_polarity(tp, speed))
3424 mac_mode |= MAC_MODE_LINK_POLARITY;
3425 else
3426 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3428 } else {
3429 mac_mode = MAC_MODE_PORT_MODE_TBI;
3432 if (!tg3_flag(tp, 5750_PLUS))
3433 tw32(MAC_LED_CTRL, tp->led_ctrl);
3435 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3436 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3437 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3438 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3440 if (tg3_flag(tp, ENABLE_APE))
3441 mac_mode |= MAC_MODE_APE_TX_EN |
3442 MAC_MODE_APE_RX_EN |
3443 MAC_MODE_TDE_ENABLE;
3445 tw32_f(MAC_MODE, mac_mode);
3446 udelay(100);
3448 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3449 udelay(10);
3452 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3453 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3454 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3455 u32 base_val;
3457 base_val = tp->pci_clock_ctrl;
3458 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3459 CLOCK_CTRL_TXCLK_DISABLE);
3461 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3462 CLOCK_CTRL_PWRDOWN_PLL133, 40);
3463 } else if (tg3_flag(tp, 5780_CLASS) ||
3464 tg3_flag(tp, CPMU_PRESENT) ||
3465 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3466 /* do nothing */
3467 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3468 u32 newbits1, newbits2;
3470 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3471 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3472 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3473 CLOCK_CTRL_TXCLK_DISABLE |
3474 CLOCK_CTRL_ALTCLK);
3475 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3476 } else if (tg3_flag(tp, 5705_PLUS)) {
3477 newbits1 = CLOCK_CTRL_625_CORE;
3478 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3479 } else {
3480 newbits1 = CLOCK_CTRL_ALTCLK;
3481 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3484 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3485 40);
3487 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3488 40);
3490 if (!tg3_flag(tp, 5705_PLUS)) {
3491 u32 newbits3;
3493 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3494 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3495 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3496 CLOCK_CTRL_TXCLK_DISABLE |
3497 CLOCK_CTRL_44MHZ_CORE);
3498 } else {
3499 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3502 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3503 tp->pci_clock_ctrl | newbits3, 40);
3507 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3508 tg3_power_down_phy(tp, do_low_power);
3510 tg3_frob_aux_power(tp, true);
3512 /* Workaround for unstable PLL clock */
3513 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3514 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3515 u32 val = tr32(0x7d00);
3517 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3518 tw32(0x7d00, val);
3519 if (!tg3_flag(tp, ENABLE_ASF)) {
3520 int err;
3522 err = tg3_nvram_lock(tp);
3523 tg3_halt_cpu(tp, RX_CPU_BASE);
3524 if (!err)
3525 tg3_nvram_unlock(tp);
3529 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3531 return 0;
3534 static void tg3_power_down(struct tg3 *tp)
3536 tg3_power_down_prepare(tp);
3538 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3539 pci_set_power_state(tp->pdev, PCI_D3hot);
3542 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3544 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3545 case MII_TG3_AUX_STAT_10HALF:
3546 *speed = SPEED_10;
3547 *duplex = DUPLEX_HALF;
3548 break;
3550 case MII_TG3_AUX_STAT_10FULL:
3551 *speed = SPEED_10;
3552 *duplex = DUPLEX_FULL;
3553 break;
3555 case MII_TG3_AUX_STAT_100HALF:
3556 *speed = SPEED_100;
3557 *duplex = DUPLEX_HALF;
3558 break;
3560 case MII_TG3_AUX_STAT_100FULL:
3561 *speed = SPEED_100;
3562 *duplex = DUPLEX_FULL;
3563 break;
3565 case MII_TG3_AUX_STAT_1000HALF:
3566 *speed = SPEED_1000;
3567 *duplex = DUPLEX_HALF;
3568 break;
3570 case MII_TG3_AUX_STAT_1000FULL:
3571 *speed = SPEED_1000;
3572 *duplex = DUPLEX_FULL;
3573 break;
3575 default:
3576 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3577 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3578 SPEED_10;
3579 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3580 DUPLEX_HALF;
3581 break;
3583 *speed = SPEED_INVALID;
3584 *duplex = DUPLEX_INVALID;
3585 break;
3589 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3591 int err = 0;
3592 u32 val, new_adv;
3594 new_adv = ADVERTISE_CSMA;
3595 if (advertise & ADVERTISED_10baseT_Half)
3596 new_adv |= ADVERTISE_10HALF;
3597 if (advertise & ADVERTISED_10baseT_Full)
3598 new_adv |= ADVERTISE_10FULL;
3599 if (advertise & ADVERTISED_100baseT_Half)
3600 new_adv |= ADVERTISE_100HALF;
3601 if (advertise & ADVERTISED_100baseT_Full)
3602 new_adv |= ADVERTISE_100FULL;
3604 new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
3606 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3607 if (err)
3608 goto done;
3610 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3611 goto done;
3613 new_adv = 0;
3614 if (advertise & ADVERTISED_1000baseT_Half)
3615 new_adv |= ADVERTISE_1000HALF;
3616 if (advertise & ADVERTISED_1000baseT_Full)
3617 new_adv |= ADVERTISE_1000FULL;
3619 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3620 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3621 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3623 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3624 if (err)
3625 goto done;
3627 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3628 goto done;
3630 tw32(TG3_CPMU_EEE_MODE,
3631 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3633 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3634 if (!err) {
3635 u32 err2;
3637 val = 0;
3638 /* Advertise 100-BaseTX EEE ability */
3639 if (advertise & ADVERTISED_100baseT_Full)
3640 val |= MDIO_AN_EEE_ADV_100TX;
3641 /* Advertise 1000-BaseT EEE ability */
3642 if (advertise & ADVERTISED_1000baseT_Full)
3643 val |= MDIO_AN_EEE_ADV_1000T;
3644 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3645 if (err)
3646 val = 0;
3648 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3649 case ASIC_REV_5717:
3650 case ASIC_REV_57765:
3651 case ASIC_REV_5719:
3652 /* If we advertised any eee advertisements above... */
3653 if (val)
3654 val = MII_TG3_DSP_TAP26_ALNOKO |
3655 MII_TG3_DSP_TAP26_RMRXSTO |
3656 MII_TG3_DSP_TAP26_OPCSINPT;
3657 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3658 /* Fall through */
3659 case ASIC_REV_5720:
3660 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3661 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3662 MII_TG3_DSP_CH34TP2_HIBW01);
3665 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3666 if (!err)
3667 err = err2;
3670 done:
3671 return err;
3674 static void tg3_phy_copper_begin(struct tg3 *tp)
3676 u32 new_adv;
3677 int i;
3679 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3680 new_adv = ADVERTISED_10baseT_Half |
3681 ADVERTISED_10baseT_Full;
3682 if (tg3_flag(tp, WOL_SPEED_100MB))
3683 new_adv |= ADVERTISED_100baseT_Half |
3684 ADVERTISED_100baseT_Full;
3686 tg3_phy_autoneg_cfg(tp, new_adv,
3687 FLOW_CTRL_TX | FLOW_CTRL_RX);
3688 } else if (tp->link_config.speed == SPEED_INVALID) {
3689 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3690 tp->link_config.advertising &=
3691 ~(ADVERTISED_1000baseT_Half |
3692 ADVERTISED_1000baseT_Full);
3694 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3695 tp->link_config.flowctrl);
3696 } else {
3697 /* Asking for a specific link mode. */
3698 if (tp->link_config.speed == SPEED_1000) {
3699 if (tp->link_config.duplex == DUPLEX_FULL)
3700 new_adv = ADVERTISED_1000baseT_Full;
3701 else
3702 new_adv = ADVERTISED_1000baseT_Half;
3703 } else if (tp->link_config.speed == SPEED_100) {
3704 if (tp->link_config.duplex == DUPLEX_FULL)
3705 new_adv = ADVERTISED_100baseT_Full;
3706 else
3707 new_adv = ADVERTISED_100baseT_Half;
3708 } else {
3709 if (tp->link_config.duplex == DUPLEX_FULL)
3710 new_adv = ADVERTISED_10baseT_Full;
3711 else
3712 new_adv = ADVERTISED_10baseT_Half;
3715 tg3_phy_autoneg_cfg(tp, new_adv,
3716 tp->link_config.flowctrl);
3719 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3720 tp->link_config.speed != SPEED_INVALID) {
3721 u32 bmcr, orig_bmcr;
3723 tp->link_config.active_speed = tp->link_config.speed;
3724 tp->link_config.active_duplex = tp->link_config.duplex;
3726 bmcr = 0;
3727 switch (tp->link_config.speed) {
3728 default:
3729 case SPEED_10:
3730 break;
3732 case SPEED_100:
3733 bmcr |= BMCR_SPEED100;
3734 break;
3736 case SPEED_1000:
3737 bmcr |= BMCR_SPEED1000;
3738 break;
3741 if (tp->link_config.duplex == DUPLEX_FULL)
3742 bmcr |= BMCR_FULLDPLX;
3744 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3745 (bmcr != orig_bmcr)) {
3746 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3747 for (i = 0; i < 1500; i++) {
3748 u32 tmp;
3750 udelay(10);
3751 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3752 tg3_readphy(tp, MII_BMSR, &tmp))
3753 continue;
3754 if (!(tmp & BMSR_LSTATUS)) {
3755 udelay(40);
3756 break;
3759 tg3_writephy(tp, MII_BMCR, bmcr);
3760 udelay(40);
3762 } else {
3763 tg3_writephy(tp, MII_BMCR,
3764 BMCR_ANENABLE | BMCR_ANRESTART);
3768 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3770 int err;
3772 /* Turn off tap power management. */
3773 /* Set Extended packet length bit */
3774 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3776 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3777 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3778 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3779 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3780 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3782 udelay(40);
3784 return err;
3787 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3789 u32 adv_reg, all_mask = 0;
3791 if (mask & ADVERTISED_10baseT_Half)
3792 all_mask |= ADVERTISE_10HALF;
3793 if (mask & ADVERTISED_10baseT_Full)
3794 all_mask |= ADVERTISE_10FULL;
3795 if (mask & ADVERTISED_100baseT_Half)
3796 all_mask |= ADVERTISE_100HALF;
3797 if (mask & ADVERTISED_100baseT_Full)
3798 all_mask |= ADVERTISE_100FULL;
3800 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3801 return 0;
3803 if ((adv_reg & ADVERTISE_ALL) != all_mask)
3804 return 0;
3806 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3807 u32 tg3_ctrl;
3809 all_mask = 0;
3810 if (mask & ADVERTISED_1000baseT_Half)
3811 all_mask |= ADVERTISE_1000HALF;
3812 if (mask & ADVERTISED_1000baseT_Full)
3813 all_mask |= ADVERTISE_1000FULL;
3815 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3816 return 0;
3818 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
3819 if (tg3_ctrl != all_mask)
3820 return 0;
3823 return 1;
3826 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3828 u32 curadv, reqadv;
3830 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3831 return 1;
3833 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3834 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3836 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3837 if (curadv != reqadv)
3838 return 0;
3840 if (tg3_flag(tp, PAUSE_AUTONEG))
3841 tg3_readphy(tp, MII_LPA, rmtadv);
3842 } else {
3843 /* Reprogram the advertisement register, even if it
3844 * does not affect the current link. If the link
3845 * gets renegotiated in the future, we can save an
3846 * additional renegotiation cycle by advertising
3847 * it correctly in the first place.
3849 if (curadv != reqadv) {
3850 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3851 ADVERTISE_PAUSE_ASYM);
3852 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3856 return 1;
3859 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3861 int current_link_up;
3862 u32 bmsr, val;
3863 u32 lcl_adv, rmt_adv;
3864 u16 current_speed;
3865 u8 current_duplex;
3866 int i, err;
3868 tw32(MAC_EVENT, 0);
3870 tw32_f(MAC_STATUS,
3871 (MAC_STATUS_SYNC_CHANGED |
3872 MAC_STATUS_CFG_CHANGED |
3873 MAC_STATUS_MI_COMPLETION |
3874 MAC_STATUS_LNKSTATE_CHANGED));
3875 udelay(40);
3877 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3878 tw32_f(MAC_MI_MODE,
3879 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3880 udelay(80);
3883 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3885 /* Some third-party PHYs need to be reset on link going
3886 * down.
3888 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3889 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3890 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3891 netif_carrier_ok(tp->dev)) {
3892 tg3_readphy(tp, MII_BMSR, &bmsr);
3893 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3894 !(bmsr & BMSR_LSTATUS))
3895 force_reset = 1;
3897 if (force_reset)
3898 tg3_phy_reset(tp);
3900 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3901 tg3_readphy(tp, MII_BMSR, &bmsr);
3902 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3903 !tg3_flag(tp, INIT_COMPLETE))
3904 bmsr = 0;
3906 if (!(bmsr & BMSR_LSTATUS)) {
3907 err = tg3_init_5401phy_dsp(tp);
3908 if (err)
3909 return err;
3911 tg3_readphy(tp, MII_BMSR, &bmsr);
3912 for (i = 0; i < 1000; i++) {
3913 udelay(10);
3914 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3915 (bmsr & BMSR_LSTATUS)) {
3916 udelay(40);
3917 break;
3921 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3922 TG3_PHY_REV_BCM5401_B0 &&
3923 !(bmsr & BMSR_LSTATUS) &&
3924 tp->link_config.active_speed == SPEED_1000) {
3925 err = tg3_phy_reset(tp);
3926 if (!err)
3927 err = tg3_init_5401phy_dsp(tp);
3928 if (err)
3929 return err;
3932 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3933 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3934 /* 5701 {A0,B0} CRC bug workaround */
3935 tg3_writephy(tp, 0x15, 0x0a75);
3936 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3937 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3938 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3941 /* Clear pending interrupts... */
3942 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3943 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3945 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3946 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3947 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3948 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3950 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3951 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3952 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3953 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3954 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3955 else
3956 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3959 current_link_up = 0;
3960 current_speed = SPEED_INVALID;
3961 current_duplex = DUPLEX_INVALID;
3963 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3964 err = tg3_phy_auxctl_read(tp,
3965 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3966 &val);
3967 if (!err && !(val & (1 << 10))) {
3968 tg3_phy_auxctl_write(tp,
3969 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3970 val | (1 << 10));
3971 goto relink;
3975 bmsr = 0;
3976 for (i = 0; i < 100; i++) {
3977 tg3_readphy(tp, MII_BMSR, &bmsr);
3978 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3979 (bmsr & BMSR_LSTATUS))
3980 break;
3981 udelay(40);
3984 if (bmsr & BMSR_LSTATUS) {
3985 u32 aux_stat, bmcr;
3987 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3988 for (i = 0; i < 2000; i++) {
3989 udelay(10);
3990 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3991 aux_stat)
3992 break;
3995 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3996 &current_speed,
3997 &current_duplex);
3999 bmcr = 0;
4000 for (i = 0; i < 200; i++) {
4001 tg3_readphy(tp, MII_BMCR, &bmcr);
4002 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4003 continue;
4004 if (bmcr && bmcr != 0x7fff)
4005 break;
4006 udelay(10);
4009 lcl_adv = 0;
4010 rmt_adv = 0;
4012 tp->link_config.active_speed = current_speed;
4013 tp->link_config.active_duplex = current_duplex;
4015 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4016 if ((bmcr & BMCR_ANENABLE) &&
4017 tg3_copper_is_advertising_all(tp,
4018 tp->link_config.advertising)) {
4019 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
4020 &rmt_adv))
4021 current_link_up = 1;
4023 } else {
4024 if (!(bmcr & BMCR_ANENABLE) &&
4025 tp->link_config.speed == current_speed &&
4026 tp->link_config.duplex == current_duplex &&
4027 tp->link_config.flowctrl ==
4028 tp->link_config.active_flowctrl) {
4029 current_link_up = 1;
4033 if (current_link_up == 1 &&
4034 tp->link_config.active_duplex == DUPLEX_FULL)
4035 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4038 relink:
4039 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4040 tg3_phy_copper_begin(tp);
4042 tg3_readphy(tp, MII_BMSR, &bmsr);
4043 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4044 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4045 current_link_up = 1;
4048 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4049 if (current_link_up == 1) {
4050 if (tp->link_config.active_speed == SPEED_100 ||
4051 tp->link_config.active_speed == SPEED_10)
4052 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4053 else
4054 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4055 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4056 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4057 else
4058 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4060 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4061 if (tp->link_config.active_duplex == DUPLEX_HALF)
4062 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4064 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4065 if (current_link_up == 1 &&
4066 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4067 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4068 else
4069 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4072 /* ??? Without this setting Netgear GA302T PHY does not
4073 * ??? send/receive packets...
4075 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4076 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4077 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4078 tw32_f(MAC_MI_MODE, tp->mi_mode);
4079 udelay(80);
4082 tw32_f(MAC_MODE, tp->mac_mode);
4083 udelay(40);
4085 tg3_phy_eee_adjust(tp, current_link_up);
4087 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4088 /* Polled via timer. */
4089 tw32_f(MAC_EVENT, 0);
4090 } else {
4091 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4093 udelay(40);
4095 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4096 current_link_up == 1 &&
4097 tp->link_config.active_speed == SPEED_1000 &&
4098 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4099 udelay(120);
4100 tw32_f(MAC_STATUS,
4101 (MAC_STATUS_SYNC_CHANGED |
4102 MAC_STATUS_CFG_CHANGED));
4103 udelay(40);
4104 tg3_write_mem(tp,
4105 NIC_SRAM_FIRMWARE_MBOX,
4106 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4109 /* Prevent send BD corruption. */
4110 if (tg3_flag(tp, CLKREQ_BUG)) {
4111 u16 oldlnkctl, newlnkctl;
4113 pci_read_config_word(tp->pdev,
4114 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4115 &oldlnkctl);
4116 if (tp->link_config.active_speed == SPEED_100 ||
4117 tp->link_config.active_speed == SPEED_10)
4118 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4119 else
4120 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4121 if (newlnkctl != oldlnkctl)
4122 pci_write_config_word(tp->pdev,
4123 pci_pcie_cap(tp->pdev) +
4124 PCI_EXP_LNKCTL, newlnkctl);
4127 if (current_link_up != netif_carrier_ok(tp->dev)) {
4128 if (current_link_up)
4129 netif_carrier_on(tp->dev);
4130 else
4131 netif_carrier_off(tp->dev);
4132 tg3_link_report(tp);
4135 return 0;
4138 struct tg3_fiber_aneginfo {
4139 int state;
4140 #define ANEG_STATE_UNKNOWN 0
4141 #define ANEG_STATE_AN_ENABLE 1
4142 #define ANEG_STATE_RESTART_INIT 2
4143 #define ANEG_STATE_RESTART 3
4144 #define ANEG_STATE_DISABLE_LINK_OK 4
4145 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4146 #define ANEG_STATE_ABILITY_DETECT 6
4147 #define ANEG_STATE_ACK_DETECT_INIT 7
4148 #define ANEG_STATE_ACK_DETECT 8
4149 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4150 #define ANEG_STATE_COMPLETE_ACK 10
4151 #define ANEG_STATE_IDLE_DETECT_INIT 11
4152 #define ANEG_STATE_IDLE_DETECT 12
4153 #define ANEG_STATE_LINK_OK 13
4154 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4155 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4157 u32 flags;
4158 #define MR_AN_ENABLE 0x00000001
4159 #define MR_RESTART_AN 0x00000002
4160 #define MR_AN_COMPLETE 0x00000004
4161 #define MR_PAGE_RX 0x00000008
4162 #define MR_NP_LOADED 0x00000010
4163 #define MR_TOGGLE_TX 0x00000020
4164 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4165 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4166 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4167 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4168 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4169 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4170 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4171 #define MR_TOGGLE_RX 0x00002000
4172 #define MR_NP_RX 0x00004000
4174 #define MR_LINK_OK 0x80000000
4176 unsigned long link_time, cur_time;
4178 u32 ability_match_cfg;
4179 int ability_match_count;
4181 char ability_match, idle_match, ack_match;
4183 u32 txconfig, rxconfig;
4184 #define ANEG_CFG_NP 0x00000080
4185 #define ANEG_CFG_ACK 0x00000040
4186 #define ANEG_CFG_RF2 0x00000020
4187 #define ANEG_CFG_RF1 0x00000010
4188 #define ANEG_CFG_PS2 0x00000001
4189 #define ANEG_CFG_PS1 0x00008000
4190 #define ANEG_CFG_HD 0x00004000
4191 #define ANEG_CFG_FD 0x00002000
4192 #define ANEG_CFG_INVAL 0x00001f06
4195 #define ANEG_OK 0
4196 #define ANEG_DONE 1
4197 #define ANEG_TIMER_ENAB 2
4198 #define ANEG_FAILED -1
4200 #define ANEG_STATE_SETTLE_TIME 10000
4202 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4203 struct tg3_fiber_aneginfo *ap)
4205 u16 flowctrl;
4206 unsigned long delta;
4207 u32 rx_cfg_reg;
4208 int ret;
4210 if (ap->state == ANEG_STATE_UNKNOWN) {
4211 ap->rxconfig = 0;
4212 ap->link_time = 0;
4213 ap->cur_time = 0;
4214 ap->ability_match_cfg = 0;
4215 ap->ability_match_count = 0;
4216 ap->ability_match = 0;
4217 ap->idle_match = 0;
4218 ap->ack_match = 0;
4220 ap->cur_time++;
4222 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4223 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4225 if (rx_cfg_reg != ap->ability_match_cfg) {
4226 ap->ability_match_cfg = rx_cfg_reg;
4227 ap->ability_match = 0;
4228 ap->ability_match_count = 0;
4229 } else {
4230 if (++ap->ability_match_count > 1) {
4231 ap->ability_match = 1;
4232 ap->ability_match_cfg = rx_cfg_reg;
4235 if (rx_cfg_reg & ANEG_CFG_ACK)
4236 ap->ack_match = 1;
4237 else
4238 ap->ack_match = 0;
4240 ap->idle_match = 0;
4241 } else {
4242 ap->idle_match = 1;
4243 ap->ability_match_cfg = 0;
4244 ap->ability_match_count = 0;
4245 ap->ability_match = 0;
4246 ap->ack_match = 0;
4248 rx_cfg_reg = 0;
4251 ap->rxconfig = rx_cfg_reg;
4252 ret = ANEG_OK;
4254 switch (ap->state) {
4255 case ANEG_STATE_UNKNOWN:
4256 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4257 ap->state = ANEG_STATE_AN_ENABLE;
4259 /* fallthru */
4260 case ANEG_STATE_AN_ENABLE:
4261 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4262 if (ap->flags & MR_AN_ENABLE) {
4263 ap->link_time = 0;
4264 ap->cur_time = 0;
4265 ap->ability_match_cfg = 0;
4266 ap->ability_match_count = 0;
4267 ap->ability_match = 0;
4268 ap->idle_match = 0;
4269 ap->ack_match = 0;
4271 ap->state = ANEG_STATE_RESTART_INIT;
4272 } else {
4273 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4275 break;
4277 case ANEG_STATE_RESTART_INIT:
4278 ap->link_time = ap->cur_time;
4279 ap->flags &= ~(MR_NP_LOADED);
4280 ap->txconfig = 0;
4281 tw32(MAC_TX_AUTO_NEG, 0);
4282 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4283 tw32_f(MAC_MODE, tp->mac_mode);
4284 udelay(40);
4286 ret = ANEG_TIMER_ENAB;
4287 ap->state = ANEG_STATE_RESTART;
4289 /* fallthru */
4290 case ANEG_STATE_RESTART:
4291 delta = ap->cur_time - ap->link_time;
4292 if (delta > ANEG_STATE_SETTLE_TIME)
4293 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4294 else
4295 ret = ANEG_TIMER_ENAB;
4296 break;
4298 case ANEG_STATE_DISABLE_LINK_OK:
4299 ret = ANEG_DONE;
4300 break;
4302 case ANEG_STATE_ABILITY_DETECT_INIT:
4303 ap->flags &= ~(MR_TOGGLE_TX);
4304 ap->txconfig = ANEG_CFG_FD;
4305 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4306 if (flowctrl & ADVERTISE_1000XPAUSE)
4307 ap->txconfig |= ANEG_CFG_PS1;
4308 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4309 ap->txconfig |= ANEG_CFG_PS2;
4310 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4311 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4312 tw32_f(MAC_MODE, tp->mac_mode);
4313 udelay(40);
4315 ap->state = ANEG_STATE_ABILITY_DETECT;
4316 break;
4318 case ANEG_STATE_ABILITY_DETECT:
4319 if (ap->ability_match != 0 && ap->rxconfig != 0)
4320 ap->state = ANEG_STATE_ACK_DETECT_INIT;
4321 break;
4323 case ANEG_STATE_ACK_DETECT_INIT:
4324 ap->txconfig |= ANEG_CFG_ACK;
4325 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4326 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4327 tw32_f(MAC_MODE, tp->mac_mode);
4328 udelay(40);
4330 ap->state = ANEG_STATE_ACK_DETECT;
4332 /* fallthru */
4333 case ANEG_STATE_ACK_DETECT:
4334 if (ap->ack_match != 0) {
4335 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4336 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4337 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4338 } else {
4339 ap->state = ANEG_STATE_AN_ENABLE;
4341 } else if (ap->ability_match != 0 &&
4342 ap->rxconfig == 0) {
4343 ap->state = ANEG_STATE_AN_ENABLE;
4345 break;
4347 case ANEG_STATE_COMPLETE_ACK_INIT:
4348 if (ap->rxconfig & ANEG_CFG_INVAL) {
4349 ret = ANEG_FAILED;
4350 break;
4352 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4353 MR_LP_ADV_HALF_DUPLEX |
4354 MR_LP_ADV_SYM_PAUSE |
4355 MR_LP_ADV_ASYM_PAUSE |
4356 MR_LP_ADV_REMOTE_FAULT1 |
4357 MR_LP_ADV_REMOTE_FAULT2 |
4358 MR_LP_ADV_NEXT_PAGE |
4359 MR_TOGGLE_RX |
4360 MR_NP_RX);
4361 if (ap->rxconfig & ANEG_CFG_FD)
4362 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4363 if (ap->rxconfig & ANEG_CFG_HD)
4364 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4365 if (ap->rxconfig & ANEG_CFG_PS1)
4366 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4367 if (ap->rxconfig & ANEG_CFG_PS2)
4368 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4369 if (ap->rxconfig & ANEG_CFG_RF1)
4370 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4371 if (ap->rxconfig & ANEG_CFG_RF2)
4372 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4373 if (ap->rxconfig & ANEG_CFG_NP)
4374 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4376 ap->link_time = ap->cur_time;
4378 ap->flags ^= (MR_TOGGLE_TX);
4379 if (ap->rxconfig & 0x0008)
4380 ap->flags |= MR_TOGGLE_RX;
4381 if (ap->rxconfig & ANEG_CFG_NP)
4382 ap->flags |= MR_NP_RX;
4383 ap->flags |= MR_PAGE_RX;
4385 ap->state = ANEG_STATE_COMPLETE_ACK;
4386 ret = ANEG_TIMER_ENAB;
4387 break;
4389 case ANEG_STATE_COMPLETE_ACK:
4390 if (ap->ability_match != 0 &&
4391 ap->rxconfig == 0) {
4392 ap->state = ANEG_STATE_AN_ENABLE;
4393 break;
4395 delta = ap->cur_time - ap->link_time;
4396 if (delta > ANEG_STATE_SETTLE_TIME) {
4397 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4398 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4399 } else {
4400 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4401 !(ap->flags & MR_NP_RX)) {
4402 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4403 } else {
4404 ret = ANEG_FAILED;
4408 break;
4410 case ANEG_STATE_IDLE_DETECT_INIT:
4411 ap->link_time = ap->cur_time;
4412 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4413 tw32_f(MAC_MODE, tp->mac_mode);
4414 udelay(40);
4416 ap->state = ANEG_STATE_IDLE_DETECT;
4417 ret = ANEG_TIMER_ENAB;
4418 break;
4420 case ANEG_STATE_IDLE_DETECT:
4421 if (ap->ability_match != 0 &&
4422 ap->rxconfig == 0) {
4423 ap->state = ANEG_STATE_AN_ENABLE;
4424 break;
4426 delta = ap->cur_time - ap->link_time;
4427 if (delta > ANEG_STATE_SETTLE_TIME) {
4428 /* XXX another gem from the Broadcom driver :( */
4429 ap->state = ANEG_STATE_LINK_OK;
4431 break;
4433 case ANEG_STATE_LINK_OK:
4434 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4435 ret = ANEG_DONE;
4436 break;
4438 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4439 /* ??? unimplemented */
4440 break;
4442 case ANEG_STATE_NEXT_PAGE_WAIT:
4443 /* ??? unimplemented */
4444 break;
4446 default:
4447 ret = ANEG_FAILED;
4448 break;
4451 return ret;
4454 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4456 int res = 0;
4457 struct tg3_fiber_aneginfo aninfo;
4458 int status = ANEG_FAILED;
4459 unsigned int tick;
4460 u32 tmp;
4462 tw32_f(MAC_TX_AUTO_NEG, 0);
4464 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4465 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4466 udelay(40);
4468 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4469 udelay(40);
4471 memset(&aninfo, 0, sizeof(aninfo));
4472 aninfo.flags |= MR_AN_ENABLE;
4473 aninfo.state = ANEG_STATE_UNKNOWN;
4474 aninfo.cur_time = 0;
4475 tick = 0;
4476 while (++tick < 195000) {
4477 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4478 if (status == ANEG_DONE || status == ANEG_FAILED)
4479 break;
4481 udelay(1);
4484 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4485 tw32_f(MAC_MODE, tp->mac_mode);
4486 udelay(40);
4488 *txflags = aninfo.txconfig;
4489 *rxflags = aninfo.flags;
4491 if (status == ANEG_DONE &&
4492 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4493 MR_LP_ADV_FULL_DUPLEX)))
4494 res = 1;
4496 return res;
4499 static void tg3_init_bcm8002(struct tg3 *tp)
4501 u32 mac_status = tr32(MAC_STATUS);
4502 int i;
4504 /* Reset when initting first time or we have a link. */
4505 if (tg3_flag(tp, INIT_COMPLETE) &&
4506 !(mac_status & MAC_STATUS_PCS_SYNCED))
4507 return;
4509 /* Set PLL lock range. */
4510 tg3_writephy(tp, 0x16, 0x8007);
4512 /* SW reset */
4513 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4515 /* Wait for reset to complete. */
4516 /* XXX schedule_timeout() ... */
4517 for (i = 0; i < 500; i++)
4518 udelay(10);
4520 /* Config mode; select PMA/Ch 1 regs. */
4521 tg3_writephy(tp, 0x10, 0x8411);
4523 /* Enable auto-lock and comdet, select txclk for tx. */
4524 tg3_writephy(tp, 0x11, 0x0a10);
4526 tg3_writephy(tp, 0x18, 0x00a0);
4527 tg3_writephy(tp, 0x16, 0x41ff);
4529 /* Assert and deassert POR. */
4530 tg3_writephy(tp, 0x13, 0x0400);
4531 udelay(40);
4532 tg3_writephy(tp, 0x13, 0x0000);
4534 tg3_writephy(tp, 0x11, 0x0a50);
4535 udelay(40);
4536 tg3_writephy(tp, 0x11, 0x0a10);
4538 /* Wait for signal to stabilize */
4539 /* XXX schedule_timeout() ... */
4540 for (i = 0; i < 15000; i++)
4541 udelay(10);
4543 /* Deselect the channel register so we can read the PHYID
4544 * later.
4546 tg3_writephy(tp, 0x10, 0x8011);
4549 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4551 u16 flowctrl;
4552 u32 sg_dig_ctrl, sg_dig_status;
4553 u32 serdes_cfg, expected_sg_dig_ctrl;
4554 int workaround, port_a;
4555 int current_link_up;
4557 serdes_cfg = 0;
4558 expected_sg_dig_ctrl = 0;
4559 workaround = 0;
4560 port_a = 1;
4561 current_link_up = 0;
4563 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4564 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4565 workaround = 1;
4566 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4567 port_a = 0;
4569 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4570 /* preserve bits 20-23 for voltage regulator */
4571 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4574 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4576 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4577 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4578 if (workaround) {
4579 u32 val = serdes_cfg;
4581 if (port_a)
4582 val |= 0xc010000;
4583 else
4584 val |= 0x4010000;
4585 tw32_f(MAC_SERDES_CFG, val);
4588 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4590 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4591 tg3_setup_flow_control(tp, 0, 0);
4592 current_link_up = 1;
4594 goto out;
4597 /* Want auto-negotiation. */
4598 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4600 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4601 if (flowctrl & ADVERTISE_1000XPAUSE)
4602 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4603 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4604 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4606 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4607 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4608 tp->serdes_counter &&
4609 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4610 MAC_STATUS_RCVD_CFG)) ==
4611 MAC_STATUS_PCS_SYNCED)) {
4612 tp->serdes_counter--;
4613 current_link_up = 1;
4614 goto out;
4616 restart_autoneg:
4617 if (workaround)
4618 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4619 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4620 udelay(5);
4621 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4623 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4624 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4625 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4626 MAC_STATUS_SIGNAL_DET)) {
4627 sg_dig_status = tr32(SG_DIG_STATUS);
4628 mac_status = tr32(MAC_STATUS);
4630 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4631 (mac_status & MAC_STATUS_PCS_SYNCED)) {
4632 u32 local_adv = 0, remote_adv = 0;
4634 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4635 local_adv |= ADVERTISE_1000XPAUSE;
4636 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4637 local_adv |= ADVERTISE_1000XPSE_ASYM;
4639 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4640 remote_adv |= LPA_1000XPAUSE;
4641 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4642 remote_adv |= LPA_1000XPAUSE_ASYM;
4644 tg3_setup_flow_control(tp, local_adv, remote_adv);
4645 current_link_up = 1;
4646 tp->serdes_counter = 0;
4647 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4648 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4649 if (tp->serdes_counter)
4650 tp->serdes_counter--;
4651 else {
4652 if (workaround) {
4653 u32 val = serdes_cfg;
4655 if (port_a)
4656 val |= 0xc010000;
4657 else
4658 val |= 0x4010000;
4660 tw32_f(MAC_SERDES_CFG, val);
4663 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4664 udelay(40);
4666 /* Link parallel detection - link is up */
4667 /* only if we have PCS_SYNC and not */
4668 /* receiving config code words */
4669 mac_status = tr32(MAC_STATUS);
4670 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4671 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4672 tg3_setup_flow_control(tp, 0, 0);
4673 current_link_up = 1;
4674 tp->phy_flags |=
4675 TG3_PHYFLG_PARALLEL_DETECT;
4676 tp->serdes_counter =
4677 SERDES_PARALLEL_DET_TIMEOUT;
4678 } else
4679 goto restart_autoneg;
4682 } else {
4683 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4684 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4687 out:
4688 return current_link_up;
4691 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4693 int current_link_up = 0;
4695 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4696 goto out;
4698 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4699 u32 txflags, rxflags;
4700 int i;
4702 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4703 u32 local_adv = 0, remote_adv = 0;
4705 if (txflags & ANEG_CFG_PS1)
4706 local_adv |= ADVERTISE_1000XPAUSE;
4707 if (txflags & ANEG_CFG_PS2)
4708 local_adv |= ADVERTISE_1000XPSE_ASYM;
4710 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4711 remote_adv |= LPA_1000XPAUSE;
4712 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4713 remote_adv |= LPA_1000XPAUSE_ASYM;
4715 tg3_setup_flow_control(tp, local_adv, remote_adv);
4717 current_link_up = 1;
4719 for (i = 0; i < 30; i++) {
4720 udelay(20);
4721 tw32_f(MAC_STATUS,
4722 (MAC_STATUS_SYNC_CHANGED |
4723 MAC_STATUS_CFG_CHANGED));
4724 udelay(40);
4725 if ((tr32(MAC_STATUS) &
4726 (MAC_STATUS_SYNC_CHANGED |
4727 MAC_STATUS_CFG_CHANGED)) == 0)
4728 break;
4731 mac_status = tr32(MAC_STATUS);
4732 if (current_link_up == 0 &&
4733 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4734 !(mac_status & MAC_STATUS_RCVD_CFG))
4735 current_link_up = 1;
4736 } else {
4737 tg3_setup_flow_control(tp, 0, 0);
4739 /* Forcing 1000FD link up. */
4740 current_link_up = 1;
4742 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4743 udelay(40);
4745 tw32_f(MAC_MODE, tp->mac_mode);
4746 udelay(40);
4749 out:
4750 return current_link_up;
4753 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4755 u32 orig_pause_cfg;
4756 u16 orig_active_speed;
4757 u8 orig_active_duplex;
4758 u32 mac_status;
4759 int current_link_up;
4760 int i;
4762 orig_pause_cfg = tp->link_config.active_flowctrl;
4763 orig_active_speed = tp->link_config.active_speed;
4764 orig_active_duplex = tp->link_config.active_duplex;
4766 if (!tg3_flag(tp, HW_AUTONEG) &&
4767 netif_carrier_ok(tp->dev) &&
4768 tg3_flag(tp, INIT_COMPLETE)) {
4769 mac_status = tr32(MAC_STATUS);
4770 mac_status &= (MAC_STATUS_PCS_SYNCED |
4771 MAC_STATUS_SIGNAL_DET |
4772 MAC_STATUS_CFG_CHANGED |
4773 MAC_STATUS_RCVD_CFG);
4774 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4775 MAC_STATUS_SIGNAL_DET)) {
4776 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4777 MAC_STATUS_CFG_CHANGED));
4778 return 0;
4782 tw32_f(MAC_TX_AUTO_NEG, 0);
4784 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4785 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4786 tw32_f(MAC_MODE, tp->mac_mode);
4787 udelay(40);
4789 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4790 tg3_init_bcm8002(tp);
4792 /* Enable link change event even when serdes polling. */
4793 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4794 udelay(40);
4796 current_link_up = 0;
4797 mac_status = tr32(MAC_STATUS);
4799 if (tg3_flag(tp, HW_AUTONEG))
4800 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4801 else
4802 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4804 tp->napi[0].hw_status->status =
4805 (SD_STATUS_UPDATED |
4806 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4808 for (i = 0; i < 100; i++) {
4809 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4810 MAC_STATUS_CFG_CHANGED));
4811 udelay(5);
4812 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4813 MAC_STATUS_CFG_CHANGED |
4814 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4815 break;
4818 mac_status = tr32(MAC_STATUS);
4819 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4820 current_link_up = 0;
4821 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4822 tp->serdes_counter == 0) {
4823 tw32_f(MAC_MODE, (tp->mac_mode |
4824 MAC_MODE_SEND_CONFIGS));
4825 udelay(1);
4826 tw32_f(MAC_MODE, tp->mac_mode);
4830 if (current_link_up == 1) {
4831 tp->link_config.active_speed = SPEED_1000;
4832 tp->link_config.active_duplex = DUPLEX_FULL;
4833 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4834 LED_CTRL_LNKLED_OVERRIDE |
4835 LED_CTRL_1000MBPS_ON));
4836 } else {
4837 tp->link_config.active_speed = SPEED_INVALID;
4838 tp->link_config.active_duplex = DUPLEX_INVALID;
4839 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4840 LED_CTRL_LNKLED_OVERRIDE |
4841 LED_CTRL_TRAFFIC_OVERRIDE));
4844 if (current_link_up != netif_carrier_ok(tp->dev)) {
4845 if (current_link_up)
4846 netif_carrier_on(tp->dev);
4847 else
4848 netif_carrier_off(tp->dev);
4849 tg3_link_report(tp);
4850 } else {
4851 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4852 if (orig_pause_cfg != now_pause_cfg ||
4853 orig_active_speed != tp->link_config.active_speed ||
4854 orig_active_duplex != tp->link_config.active_duplex)
4855 tg3_link_report(tp);
4858 return 0;
4861 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4863 int current_link_up, err = 0;
4864 u32 bmsr, bmcr;
4865 u16 current_speed;
4866 u8 current_duplex;
4867 u32 local_adv, remote_adv;
4869 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4870 tw32_f(MAC_MODE, tp->mac_mode);
4871 udelay(40);
4873 tw32(MAC_EVENT, 0);
4875 tw32_f(MAC_STATUS,
4876 (MAC_STATUS_SYNC_CHANGED |
4877 MAC_STATUS_CFG_CHANGED |
4878 MAC_STATUS_MI_COMPLETION |
4879 MAC_STATUS_LNKSTATE_CHANGED));
4880 udelay(40);
4882 if (force_reset)
4883 tg3_phy_reset(tp);
4885 current_link_up = 0;
4886 current_speed = SPEED_INVALID;
4887 current_duplex = DUPLEX_INVALID;
4889 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4890 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4891 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4892 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4893 bmsr |= BMSR_LSTATUS;
4894 else
4895 bmsr &= ~BMSR_LSTATUS;
4898 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4900 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4901 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4902 /* do nothing, just check for link up at the end */
4903 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4904 u32 adv, new_adv;
4906 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4907 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4908 ADVERTISE_1000XPAUSE |
4909 ADVERTISE_1000XPSE_ASYM |
4910 ADVERTISE_SLCT);
4912 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4914 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4915 new_adv |= ADVERTISE_1000XHALF;
4916 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4917 new_adv |= ADVERTISE_1000XFULL;
4919 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4920 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4921 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4922 tg3_writephy(tp, MII_BMCR, bmcr);
4924 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4925 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4926 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4928 return err;
4930 } else {
4931 u32 new_bmcr;
4933 bmcr &= ~BMCR_SPEED1000;
4934 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4936 if (tp->link_config.duplex == DUPLEX_FULL)
4937 new_bmcr |= BMCR_FULLDPLX;
4939 if (new_bmcr != bmcr) {
4940 /* BMCR_SPEED1000 is a reserved bit that needs
4941 * to be set on write.
4943 new_bmcr |= BMCR_SPEED1000;
4945 /* Force a linkdown */
4946 if (netif_carrier_ok(tp->dev)) {
4947 u32 adv;
4949 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4950 adv &= ~(ADVERTISE_1000XFULL |
4951 ADVERTISE_1000XHALF |
4952 ADVERTISE_SLCT);
4953 tg3_writephy(tp, MII_ADVERTISE, adv);
4954 tg3_writephy(tp, MII_BMCR, bmcr |
4955 BMCR_ANRESTART |
4956 BMCR_ANENABLE);
4957 udelay(10);
4958 netif_carrier_off(tp->dev);
4960 tg3_writephy(tp, MII_BMCR, new_bmcr);
4961 bmcr = new_bmcr;
4962 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4963 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4964 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4965 ASIC_REV_5714) {
4966 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4967 bmsr |= BMSR_LSTATUS;
4968 else
4969 bmsr &= ~BMSR_LSTATUS;
4971 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4975 if (bmsr & BMSR_LSTATUS) {
4976 current_speed = SPEED_1000;
4977 current_link_up = 1;
4978 if (bmcr & BMCR_FULLDPLX)
4979 current_duplex = DUPLEX_FULL;
4980 else
4981 current_duplex = DUPLEX_HALF;
4983 local_adv = 0;
4984 remote_adv = 0;
4986 if (bmcr & BMCR_ANENABLE) {
4987 u32 common;
4989 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4990 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4991 common = local_adv & remote_adv;
4992 if (common & (ADVERTISE_1000XHALF |
4993 ADVERTISE_1000XFULL)) {
4994 if (common & ADVERTISE_1000XFULL)
4995 current_duplex = DUPLEX_FULL;
4996 else
4997 current_duplex = DUPLEX_HALF;
4998 } else if (!tg3_flag(tp, 5780_CLASS)) {
4999 /* Link is up via parallel detect */
5000 } else {
5001 current_link_up = 0;
5006 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5007 tg3_setup_flow_control(tp, local_adv, remote_adv);
5009 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5010 if (tp->link_config.active_duplex == DUPLEX_HALF)
5011 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5013 tw32_f(MAC_MODE, tp->mac_mode);
5014 udelay(40);
5016 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5018 tp->link_config.active_speed = current_speed;
5019 tp->link_config.active_duplex = current_duplex;
5021 if (current_link_up != netif_carrier_ok(tp->dev)) {
5022 if (current_link_up)
5023 netif_carrier_on(tp->dev);
5024 else {
5025 netif_carrier_off(tp->dev);
5026 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5028 tg3_link_report(tp);
5030 return err;
5033 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5035 if (tp->serdes_counter) {
5036 /* Give autoneg time to complete. */
5037 tp->serdes_counter--;
5038 return;
5041 if (!netif_carrier_ok(tp->dev) &&
5042 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5043 u32 bmcr;
5045 tg3_readphy(tp, MII_BMCR, &bmcr);
5046 if (bmcr & BMCR_ANENABLE) {
5047 u32 phy1, phy2;
5049 /* Select shadow register 0x1f */
5050 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5051 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5053 /* Select expansion interrupt status register */
5054 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5055 MII_TG3_DSP_EXP1_INT_STAT);
5056 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5057 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5059 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5060 /* We have signal detect and not receiving
5061 * config code words, link is up by parallel
5062 * detection.
5065 bmcr &= ~BMCR_ANENABLE;
5066 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5067 tg3_writephy(tp, MII_BMCR, bmcr);
5068 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5071 } else if (netif_carrier_ok(tp->dev) &&
5072 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5073 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5074 u32 phy2;
5076 /* Select expansion interrupt status register */
5077 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5078 MII_TG3_DSP_EXP1_INT_STAT);
5079 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5080 if (phy2 & 0x20) {
5081 u32 bmcr;
5083 /* Config code words received, turn on autoneg. */
5084 tg3_readphy(tp, MII_BMCR, &bmcr);
5085 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5087 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5093 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5095 u32 val;
5096 int err;
5098 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5099 err = tg3_setup_fiber_phy(tp, force_reset);
5100 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5101 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5102 else
5103 err = tg3_setup_copper_phy(tp, force_reset);
5105 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5106 u32 scale;
5108 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5109 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5110 scale = 65;
5111 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5112 scale = 6;
5113 else
5114 scale = 12;
5116 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5117 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5118 tw32(GRC_MISC_CFG, val);
5121 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5122 (6 << TX_LENGTHS_IPG_SHIFT);
5123 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5124 val |= tr32(MAC_TX_LENGTHS) &
5125 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5126 TX_LENGTHS_CNT_DWN_VAL_MSK);
5128 if (tp->link_config.active_speed == SPEED_1000 &&
5129 tp->link_config.active_duplex == DUPLEX_HALF)
5130 tw32(MAC_TX_LENGTHS, val |
5131 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5132 else
5133 tw32(MAC_TX_LENGTHS, val |
5134 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5136 if (!tg3_flag(tp, 5705_PLUS)) {
5137 if (netif_carrier_ok(tp->dev)) {
5138 tw32(HOSTCC_STAT_COAL_TICKS,
5139 tp->coal.stats_block_coalesce_usecs);
5140 } else {
5141 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5145 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5146 val = tr32(PCIE_PWR_MGMT_THRESH);
5147 if (!netif_carrier_ok(tp->dev))
5148 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5149 tp->pwrmgmt_thresh;
5150 else
5151 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5152 tw32(PCIE_PWR_MGMT_THRESH, val);
5155 return err;
5158 static inline int tg3_irq_sync(struct tg3 *tp)
5160 return tp->irq_sync;
5163 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5165 int i;
5167 dst = (u32 *)((u8 *)dst + off);
5168 for (i = 0; i < len; i += sizeof(u32))
5169 *dst++ = tr32(off + i);
5172 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5174 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5175 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5176 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5177 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5178 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5179 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5180 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5181 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5182 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5183 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5184 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5185 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5186 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5187 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5188 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5189 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5190 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5191 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5192 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5194 if (tg3_flag(tp, SUPPORT_MSIX))
5195 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5197 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5198 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5199 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5200 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5201 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5202 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5203 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5204 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5206 if (!tg3_flag(tp, 5705_PLUS)) {
5207 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5208 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5209 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5212 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5213 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5214 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5215 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5216 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5218 if (tg3_flag(tp, NVRAM))
5219 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5222 static void tg3_dump_state(struct tg3 *tp)
5224 int i;
5225 u32 *regs;
5227 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5228 if (!regs) {
5229 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5230 return;
5233 if (tg3_flag(tp, PCI_EXPRESS)) {
5234 /* Read up to but not including private PCI registers */
5235 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5236 regs[i / sizeof(u32)] = tr32(i);
5237 } else
5238 tg3_dump_legacy_regs(tp, regs);
5240 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5241 if (!regs[i + 0] && !regs[i + 1] &&
5242 !regs[i + 2] && !regs[i + 3])
5243 continue;
5245 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5246 i * 4,
5247 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5250 kfree(regs);
5252 for (i = 0; i < tp->irq_cnt; i++) {
5253 struct tg3_napi *tnapi = &tp->napi[i];
5255 /* SW status block */
5256 netdev_err(tp->dev,
5257 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5259 tnapi->hw_status->status,
5260 tnapi->hw_status->status_tag,
5261 tnapi->hw_status->rx_jumbo_consumer,
5262 tnapi->hw_status->rx_consumer,
5263 tnapi->hw_status->rx_mini_consumer,
5264 tnapi->hw_status->idx[0].rx_producer,
5265 tnapi->hw_status->idx[0].tx_consumer);
5267 netdev_err(tp->dev,
5268 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5270 tnapi->last_tag, tnapi->last_irq_tag,
5271 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5272 tnapi->rx_rcb_ptr,
5273 tnapi->prodring.rx_std_prod_idx,
5274 tnapi->prodring.rx_std_cons_idx,
5275 tnapi->prodring.rx_jmb_prod_idx,
5276 tnapi->prodring.rx_jmb_cons_idx);
5280 /* This is called whenever we suspect that the system chipset is re-
5281 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5282 * is bogus tx completions. We try to recover by setting the
5283 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5284 * in the workqueue.
5286 static void tg3_tx_recover(struct tg3 *tp)
5288 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5289 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5291 netdev_warn(tp->dev,
5292 "The system may be re-ordering memory-mapped I/O "
5293 "cycles to the network device, attempting to recover. "
5294 "Please report the problem to the driver maintainer "
5295 "and include system chipset information.\n");
5297 spin_lock(&tp->lock);
5298 tg3_flag_set(tp, TX_RECOVERY_PENDING);
5299 spin_unlock(&tp->lock);
5302 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5304 /* Tell compiler to fetch tx indices from memory. */
5305 barrier();
5306 return tnapi->tx_pending -
5307 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5310 /* Tigon3 never reports partial packet sends. So we do not
5311 * need special logic to handle SKBs that have not had all
5312 * of their frags sent yet, like SunGEM does.
5314 static void tg3_tx(struct tg3_napi *tnapi)
5316 struct tg3 *tp = tnapi->tp;
5317 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5318 u32 sw_idx = tnapi->tx_cons;
5319 struct netdev_queue *txq;
5320 int index = tnapi - tp->napi;
5322 if (tg3_flag(tp, ENABLE_TSS))
5323 index--;
5325 txq = netdev_get_tx_queue(tp->dev, index);
5327 while (sw_idx != hw_idx) {
5328 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5329 struct sk_buff *skb = ri->skb;
5330 int i, tx_bug = 0;
5332 if (unlikely(skb == NULL)) {
5333 tg3_tx_recover(tp);
5334 return;
5337 pci_unmap_single(tp->pdev,
5338 dma_unmap_addr(ri, mapping),
5339 skb_headlen(skb),
5340 PCI_DMA_TODEVICE);
5342 ri->skb = NULL;
5344 while (ri->fragmented) {
5345 ri->fragmented = false;
5346 sw_idx = NEXT_TX(sw_idx);
5347 ri = &tnapi->tx_buffers[sw_idx];
5350 sw_idx = NEXT_TX(sw_idx);
5352 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5353 ri = &tnapi->tx_buffers[sw_idx];
5354 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5355 tx_bug = 1;
5357 pci_unmap_page(tp->pdev,
5358 dma_unmap_addr(ri, mapping),
5359 skb_frag_size(&skb_shinfo(skb)->frags[i]),
5360 PCI_DMA_TODEVICE);
5362 while (ri->fragmented) {
5363 ri->fragmented = false;
5364 sw_idx = NEXT_TX(sw_idx);
5365 ri = &tnapi->tx_buffers[sw_idx];
5368 sw_idx = NEXT_TX(sw_idx);
5371 dev_kfree_skb(skb);
5373 if (unlikely(tx_bug)) {
5374 tg3_tx_recover(tp);
5375 return;
5379 tnapi->tx_cons = sw_idx;
5381 /* Need to make the tx_cons update visible to tg3_start_xmit()
5382 * before checking for netif_queue_stopped(). Without the
5383 * memory barrier, there is a small possibility that tg3_start_xmit()
5384 * will miss it and cause the queue to be stopped forever.
5386 smp_mb();
5388 if (unlikely(netif_tx_queue_stopped(txq) &&
5389 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5390 __netif_tx_lock(txq, smp_processor_id());
5391 if (netif_tx_queue_stopped(txq) &&
5392 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5393 netif_tx_wake_queue(txq);
5394 __netif_tx_unlock(txq);
5398 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5400 if (!ri->skb)
5401 return;
5403 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5404 map_sz, PCI_DMA_FROMDEVICE);
5405 dev_kfree_skb_any(ri->skb);
5406 ri->skb = NULL;
5409 /* Returns size of skb allocated or < 0 on error.
5411 * We only need to fill in the address because the other members
5412 * of the RX descriptor are invariant, see tg3_init_rings.
5414 * Note the purposeful assymetry of cpu vs. chip accesses. For
5415 * posting buffers we only dirty the first cache line of the RX
5416 * descriptor (containing the address). Whereas for the RX status
5417 * buffers the cpu only reads the last cacheline of the RX descriptor
5418 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5420 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5421 u32 opaque_key, u32 dest_idx_unmasked)
5423 struct tg3_rx_buffer_desc *desc;
5424 struct ring_info *map;
5425 struct sk_buff *skb;
5426 dma_addr_t mapping;
5427 int skb_size, dest_idx;
5429 switch (opaque_key) {
5430 case RXD_OPAQUE_RING_STD:
5431 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5432 desc = &tpr->rx_std[dest_idx];
5433 map = &tpr->rx_std_buffers[dest_idx];
5434 skb_size = tp->rx_pkt_map_sz;
5435 break;
5437 case RXD_OPAQUE_RING_JUMBO:
5438 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5439 desc = &tpr->rx_jmb[dest_idx].std;
5440 map = &tpr->rx_jmb_buffers[dest_idx];
5441 skb_size = TG3_RX_JMB_MAP_SZ;
5442 break;
5444 default:
5445 return -EINVAL;
5448 /* Do not overwrite any of the map or rp information
5449 * until we are sure we can commit to a new buffer.
5451 * Callers depend upon this behavior and assume that
5452 * we leave everything unchanged if we fail.
5454 skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp));
5455 if (skb == NULL)
5456 return -ENOMEM;
5458 skb_reserve(skb, TG3_RX_OFFSET(tp));
5460 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
5461 PCI_DMA_FROMDEVICE);
5462 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5463 dev_kfree_skb(skb);
5464 return -EIO;
5467 map->skb = skb;
5468 dma_unmap_addr_set(map, mapping, mapping);
5470 desc->addr_hi = ((u64)mapping >> 32);
5471 desc->addr_lo = ((u64)mapping & 0xffffffff);
5473 return skb_size;
5476 /* We only need to move over in the address because the other
5477 * members of the RX descriptor are invariant. See notes above
5478 * tg3_alloc_rx_skb for full details.
5480 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5481 struct tg3_rx_prodring_set *dpr,
5482 u32 opaque_key, int src_idx,
5483 u32 dest_idx_unmasked)
5485 struct tg3 *tp = tnapi->tp;
5486 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5487 struct ring_info *src_map, *dest_map;
5488 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5489 int dest_idx;
5491 switch (opaque_key) {
5492 case RXD_OPAQUE_RING_STD:
5493 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5494 dest_desc = &dpr->rx_std[dest_idx];
5495 dest_map = &dpr->rx_std_buffers[dest_idx];
5496 src_desc = &spr->rx_std[src_idx];
5497 src_map = &spr->rx_std_buffers[src_idx];
5498 break;
5500 case RXD_OPAQUE_RING_JUMBO:
5501 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5502 dest_desc = &dpr->rx_jmb[dest_idx].std;
5503 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5504 src_desc = &spr->rx_jmb[src_idx].std;
5505 src_map = &spr->rx_jmb_buffers[src_idx];
5506 break;
5508 default:
5509 return;
5512 dest_map->skb = src_map->skb;
5513 dma_unmap_addr_set(dest_map, mapping,
5514 dma_unmap_addr(src_map, mapping));
5515 dest_desc->addr_hi = src_desc->addr_hi;
5516 dest_desc->addr_lo = src_desc->addr_lo;
5518 /* Ensure that the update to the skb happens after the physical
5519 * addresses have been transferred to the new BD location.
5521 smp_wmb();
5523 src_map->skb = NULL;
5526 /* The RX ring scheme is composed of multiple rings which post fresh
5527 * buffers to the chip, and one special ring the chip uses to report
5528 * status back to the host.
5530 * The special ring reports the status of received packets to the
5531 * host. The chip does not write into the original descriptor the
5532 * RX buffer was obtained from. The chip simply takes the original
5533 * descriptor as provided by the host, updates the status and length
5534 * field, then writes this into the next status ring entry.
5536 * Each ring the host uses to post buffers to the chip is described
5537 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5538 * it is first placed into the on-chip ram. When the packet's length
5539 * is known, it walks down the TG3_BDINFO entries to select the ring.
5540 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5541 * which is within the range of the new packet's length is chosen.
5543 * The "separate ring for rx status" scheme may sound queer, but it makes
5544 * sense from a cache coherency perspective. If only the host writes
5545 * to the buffer post rings, and only the chip writes to the rx status
5546 * rings, then cache lines never move beyond shared-modified state.
5547 * If both the host and chip were to write into the same ring, cache line
5548 * eviction could occur since both entities want it in an exclusive state.
5550 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5552 struct tg3 *tp = tnapi->tp;
5553 u32 work_mask, rx_std_posted = 0;
5554 u32 std_prod_idx, jmb_prod_idx;
5555 u32 sw_idx = tnapi->rx_rcb_ptr;
5556 u16 hw_idx;
5557 int received;
5558 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5560 hw_idx = *(tnapi->rx_rcb_prod_idx);
5562 * We need to order the read of hw_idx and the read of
5563 * the opaque cookie.
5565 rmb();
5566 work_mask = 0;
5567 received = 0;
5568 std_prod_idx = tpr->rx_std_prod_idx;
5569 jmb_prod_idx = tpr->rx_jmb_prod_idx;
5570 while (sw_idx != hw_idx && budget > 0) {
5571 struct ring_info *ri;
5572 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5573 unsigned int len;
5574 struct sk_buff *skb;
5575 dma_addr_t dma_addr;
5576 u32 opaque_key, desc_idx, *post_ptr;
5578 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5579 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5580 if (opaque_key == RXD_OPAQUE_RING_STD) {
5581 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5582 dma_addr = dma_unmap_addr(ri, mapping);
5583 skb = ri->skb;
5584 post_ptr = &std_prod_idx;
5585 rx_std_posted++;
5586 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5587 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5588 dma_addr = dma_unmap_addr(ri, mapping);
5589 skb = ri->skb;
5590 post_ptr = &jmb_prod_idx;
5591 } else
5592 goto next_pkt_nopost;
5594 work_mask |= opaque_key;
5596 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5597 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5598 drop_it:
5599 tg3_recycle_rx(tnapi, tpr, opaque_key,
5600 desc_idx, *post_ptr);
5601 drop_it_no_recycle:
5602 /* Other statistics kept track of by card. */
5603 tp->rx_dropped++;
5604 goto next_pkt;
5607 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5608 ETH_FCS_LEN;
5610 if (len > TG3_RX_COPY_THRESH(tp)) {
5611 int skb_size;
5613 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
5614 *post_ptr);
5615 if (skb_size < 0)
5616 goto drop_it;
5618 pci_unmap_single(tp->pdev, dma_addr, skb_size,
5619 PCI_DMA_FROMDEVICE);
5621 /* Ensure that the update to the skb happens
5622 * after the usage of the old DMA mapping.
5624 smp_wmb();
5626 ri->skb = NULL;
5628 skb_put(skb, len);
5629 } else {
5630 struct sk_buff *copy_skb;
5632 tg3_recycle_rx(tnapi, tpr, opaque_key,
5633 desc_idx, *post_ptr);
5635 copy_skb = netdev_alloc_skb(tp->dev, len +
5636 TG3_RAW_IP_ALIGN);
5637 if (copy_skb == NULL)
5638 goto drop_it_no_recycle;
5640 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5641 skb_put(copy_skb, len);
5642 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5643 skb_copy_from_linear_data(skb, copy_skb->data, len);
5644 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5646 /* We'll reuse the original ring buffer. */
5647 skb = copy_skb;
5650 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5651 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5652 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5653 >> RXD_TCPCSUM_SHIFT) == 0xffff))
5654 skb->ip_summed = CHECKSUM_UNNECESSARY;
5655 else
5656 skb_checksum_none_assert(skb);
5658 skb->protocol = eth_type_trans(skb, tp->dev);
5660 if (len > (tp->dev->mtu + ETH_HLEN) &&
5661 skb->protocol != htons(ETH_P_8021Q)) {
5662 dev_kfree_skb(skb);
5663 goto drop_it_no_recycle;
5666 if (desc->type_flags & RXD_FLAG_VLAN &&
5667 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5668 __vlan_hwaccel_put_tag(skb,
5669 desc->err_vlan & RXD_VLAN_MASK);
5671 napi_gro_receive(&tnapi->napi, skb);
5673 received++;
5674 budget--;
5676 next_pkt:
5677 (*post_ptr)++;
5679 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5680 tpr->rx_std_prod_idx = std_prod_idx &
5681 tp->rx_std_ring_mask;
5682 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5683 tpr->rx_std_prod_idx);
5684 work_mask &= ~RXD_OPAQUE_RING_STD;
5685 rx_std_posted = 0;
5687 next_pkt_nopost:
5688 sw_idx++;
5689 sw_idx &= tp->rx_ret_ring_mask;
5691 /* Refresh hw_idx to see if there is new work */
5692 if (sw_idx == hw_idx) {
5693 hw_idx = *(tnapi->rx_rcb_prod_idx);
5694 rmb();
5698 /* ACK the status ring. */
5699 tnapi->rx_rcb_ptr = sw_idx;
5700 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5702 /* Refill RX ring(s). */
5703 if (!tg3_flag(tp, ENABLE_RSS)) {
5704 if (work_mask & RXD_OPAQUE_RING_STD) {
5705 tpr->rx_std_prod_idx = std_prod_idx &
5706 tp->rx_std_ring_mask;
5707 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5708 tpr->rx_std_prod_idx);
5710 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5711 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5712 tp->rx_jmb_ring_mask;
5713 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5714 tpr->rx_jmb_prod_idx);
5716 mmiowb();
5717 } else if (work_mask) {
5718 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5719 * updated before the producer indices can be updated.
5721 smp_wmb();
5723 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5724 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5726 if (tnapi != &tp->napi[1])
5727 napi_schedule(&tp->napi[1].napi);
5730 return received;
5733 static void tg3_poll_link(struct tg3 *tp)
5735 /* handle link change and other phy events */
5736 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5737 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5739 if (sblk->status & SD_STATUS_LINK_CHG) {
5740 sblk->status = SD_STATUS_UPDATED |
5741 (sblk->status & ~SD_STATUS_LINK_CHG);
5742 spin_lock(&tp->lock);
5743 if (tg3_flag(tp, USE_PHYLIB)) {
5744 tw32_f(MAC_STATUS,
5745 (MAC_STATUS_SYNC_CHANGED |
5746 MAC_STATUS_CFG_CHANGED |
5747 MAC_STATUS_MI_COMPLETION |
5748 MAC_STATUS_LNKSTATE_CHANGED));
5749 udelay(40);
5750 } else
5751 tg3_setup_phy(tp, 0);
5752 spin_unlock(&tp->lock);
5757 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5758 struct tg3_rx_prodring_set *dpr,
5759 struct tg3_rx_prodring_set *spr)
5761 u32 si, di, cpycnt, src_prod_idx;
5762 int i, err = 0;
5764 while (1) {
5765 src_prod_idx = spr->rx_std_prod_idx;
5767 /* Make sure updates to the rx_std_buffers[] entries and the
5768 * standard producer index are seen in the correct order.
5770 smp_rmb();
5772 if (spr->rx_std_cons_idx == src_prod_idx)
5773 break;
5775 if (spr->rx_std_cons_idx < src_prod_idx)
5776 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5777 else
5778 cpycnt = tp->rx_std_ring_mask + 1 -
5779 spr->rx_std_cons_idx;
5781 cpycnt = min(cpycnt,
5782 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5784 si = spr->rx_std_cons_idx;
5785 di = dpr->rx_std_prod_idx;
5787 for (i = di; i < di + cpycnt; i++) {
5788 if (dpr->rx_std_buffers[i].skb) {
5789 cpycnt = i - di;
5790 err = -ENOSPC;
5791 break;
5795 if (!cpycnt)
5796 break;
5798 /* Ensure that updates to the rx_std_buffers ring and the
5799 * shadowed hardware producer ring from tg3_recycle_skb() are
5800 * ordered correctly WRT the skb check above.
5802 smp_rmb();
5804 memcpy(&dpr->rx_std_buffers[di],
5805 &spr->rx_std_buffers[si],
5806 cpycnt * sizeof(struct ring_info));
5808 for (i = 0; i < cpycnt; i++, di++, si++) {
5809 struct tg3_rx_buffer_desc *sbd, *dbd;
5810 sbd = &spr->rx_std[si];
5811 dbd = &dpr->rx_std[di];
5812 dbd->addr_hi = sbd->addr_hi;
5813 dbd->addr_lo = sbd->addr_lo;
5816 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5817 tp->rx_std_ring_mask;
5818 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5819 tp->rx_std_ring_mask;
5822 while (1) {
5823 src_prod_idx = spr->rx_jmb_prod_idx;
5825 /* Make sure updates to the rx_jmb_buffers[] entries and
5826 * the jumbo producer index are seen in the correct order.
5828 smp_rmb();
5830 if (spr->rx_jmb_cons_idx == src_prod_idx)
5831 break;
5833 if (spr->rx_jmb_cons_idx < src_prod_idx)
5834 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5835 else
5836 cpycnt = tp->rx_jmb_ring_mask + 1 -
5837 spr->rx_jmb_cons_idx;
5839 cpycnt = min(cpycnt,
5840 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5842 si = spr->rx_jmb_cons_idx;
5843 di = dpr->rx_jmb_prod_idx;
5845 for (i = di; i < di + cpycnt; i++) {
5846 if (dpr->rx_jmb_buffers[i].skb) {
5847 cpycnt = i - di;
5848 err = -ENOSPC;
5849 break;
5853 if (!cpycnt)
5854 break;
5856 /* Ensure that updates to the rx_jmb_buffers ring and the
5857 * shadowed hardware producer ring from tg3_recycle_skb() are
5858 * ordered correctly WRT the skb check above.
5860 smp_rmb();
5862 memcpy(&dpr->rx_jmb_buffers[di],
5863 &spr->rx_jmb_buffers[si],
5864 cpycnt * sizeof(struct ring_info));
5866 for (i = 0; i < cpycnt; i++, di++, si++) {
5867 struct tg3_rx_buffer_desc *sbd, *dbd;
5868 sbd = &spr->rx_jmb[si].std;
5869 dbd = &dpr->rx_jmb[di].std;
5870 dbd->addr_hi = sbd->addr_hi;
5871 dbd->addr_lo = sbd->addr_lo;
5874 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5875 tp->rx_jmb_ring_mask;
5876 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5877 tp->rx_jmb_ring_mask;
5880 return err;
5883 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5885 struct tg3 *tp = tnapi->tp;
5887 /* run TX completion thread */
5888 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5889 tg3_tx(tnapi);
5890 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5891 return work_done;
5894 /* run RX thread, within the bounds set by NAPI.
5895 * All RX "locking" is done by ensuring outside
5896 * code synchronizes with tg3->napi.poll()
5898 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5899 work_done += tg3_rx(tnapi, budget - work_done);
5901 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5902 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5903 int i, err = 0;
5904 u32 std_prod_idx = dpr->rx_std_prod_idx;
5905 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5907 for (i = 1; i < tp->irq_cnt; i++)
5908 err |= tg3_rx_prodring_xfer(tp, dpr,
5909 &tp->napi[i].prodring);
5911 wmb();
5913 if (std_prod_idx != dpr->rx_std_prod_idx)
5914 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5915 dpr->rx_std_prod_idx);
5917 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5918 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5919 dpr->rx_jmb_prod_idx);
5921 mmiowb();
5923 if (err)
5924 tw32_f(HOSTCC_MODE, tp->coal_now);
5927 return work_done;
5930 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5932 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5933 struct tg3 *tp = tnapi->tp;
5934 int work_done = 0;
5935 struct tg3_hw_status *sblk = tnapi->hw_status;
5937 while (1) {
5938 work_done = tg3_poll_work(tnapi, work_done, budget);
5940 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5941 goto tx_recovery;
5943 if (unlikely(work_done >= budget))
5944 break;
5946 /* tp->last_tag is used in tg3_int_reenable() below
5947 * to tell the hw how much work has been processed,
5948 * so we must read it before checking for more work.
5950 tnapi->last_tag = sblk->status_tag;
5951 tnapi->last_irq_tag = tnapi->last_tag;
5952 rmb();
5954 /* check for RX/TX work to do */
5955 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5956 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5957 napi_complete(napi);
5958 /* Reenable interrupts. */
5959 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5960 mmiowb();
5961 break;
5965 return work_done;
5967 tx_recovery:
5968 /* work_done is guaranteed to be less than budget. */
5969 napi_complete(napi);
5970 schedule_work(&tp->reset_task);
5971 return work_done;
5974 static void tg3_process_error(struct tg3 *tp)
5976 u32 val;
5977 bool real_error = false;
5979 if (tg3_flag(tp, ERROR_PROCESSED))
5980 return;
5982 /* Check Flow Attention register */
5983 val = tr32(HOSTCC_FLOW_ATTN);
5984 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5985 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
5986 real_error = true;
5989 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5990 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
5991 real_error = true;
5994 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5995 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
5996 real_error = true;
5999 if (!real_error)
6000 return;
6002 tg3_dump_state(tp);
6004 tg3_flag_set(tp, ERROR_PROCESSED);
6005 schedule_work(&tp->reset_task);
6008 static int tg3_poll(struct napi_struct *napi, int budget)
6010 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6011 struct tg3 *tp = tnapi->tp;
6012 int work_done = 0;
6013 struct tg3_hw_status *sblk = tnapi->hw_status;
6015 while (1) {
6016 if (sblk->status & SD_STATUS_ERROR)
6017 tg3_process_error(tp);
6019 tg3_poll_link(tp);
6021 work_done = tg3_poll_work(tnapi, work_done, budget);
6023 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6024 goto tx_recovery;
6026 if (unlikely(work_done >= budget))
6027 break;
6029 if (tg3_flag(tp, TAGGED_STATUS)) {
6030 /* tp->last_tag is used in tg3_int_reenable() below
6031 * to tell the hw how much work has been processed,
6032 * so we must read it before checking for more work.
6034 tnapi->last_tag = sblk->status_tag;
6035 tnapi->last_irq_tag = tnapi->last_tag;
6036 rmb();
6037 } else
6038 sblk->status &= ~SD_STATUS_UPDATED;
6040 if (likely(!tg3_has_work(tnapi))) {
6041 napi_complete(napi);
6042 tg3_int_reenable(tnapi);
6043 break;
6047 return work_done;
6049 tx_recovery:
6050 /* work_done is guaranteed to be less than budget. */
6051 napi_complete(napi);
6052 schedule_work(&tp->reset_task);
6053 return work_done;
6056 static void tg3_napi_disable(struct tg3 *tp)
6058 int i;
6060 for (i = tp->irq_cnt - 1; i >= 0; i--)
6061 napi_disable(&tp->napi[i].napi);
6064 static void tg3_napi_enable(struct tg3 *tp)
6066 int i;
6068 for (i = 0; i < tp->irq_cnt; i++)
6069 napi_enable(&tp->napi[i].napi);
6072 static void tg3_napi_init(struct tg3 *tp)
6074 int i;
6076 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6077 for (i = 1; i < tp->irq_cnt; i++)
6078 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6081 static void tg3_napi_fini(struct tg3 *tp)
6083 int i;
6085 for (i = 0; i < tp->irq_cnt; i++)
6086 netif_napi_del(&tp->napi[i].napi);
6089 static inline void tg3_netif_stop(struct tg3 *tp)
6091 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6092 tg3_napi_disable(tp);
6093 netif_tx_disable(tp->dev);
6096 static inline void tg3_netif_start(struct tg3 *tp)
6098 /* NOTE: unconditional netif_tx_wake_all_queues is only
6099 * appropriate so long as all callers are assured to
6100 * have free tx slots (such as after tg3_init_hw)
6102 netif_tx_wake_all_queues(tp->dev);
6104 tg3_napi_enable(tp);
6105 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6106 tg3_enable_ints(tp);
6109 static void tg3_irq_quiesce(struct tg3 *tp)
6111 int i;
6113 BUG_ON(tp->irq_sync);
6115 tp->irq_sync = 1;
6116 smp_mb();
6118 for (i = 0; i < tp->irq_cnt; i++)
6119 synchronize_irq(tp->napi[i].irq_vec);
6122 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6123 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6124 * with as well. Most of the time, this is not necessary except when
6125 * shutting down the device.
6127 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6129 spin_lock_bh(&tp->lock);
6130 if (irq_sync)
6131 tg3_irq_quiesce(tp);
6134 static inline void tg3_full_unlock(struct tg3 *tp)
6136 spin_unlock_bh(&tp->lock);
6139 /* One-shot MSI handler - Chip automatically disables interrupt
6140 * after sending MSI so driver doesn't have to do it.
6142 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6144 struct tg3_napi *tnapi = dev_id;
6145 struct tg3 *tp = tnapi->tp;
6147 prefetch(tnapi->hw_status);
6148 if (tnapi->rx_rcb)
6149 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6151 if (likely(!tg3_irq_sync(tp)))
6152 napi_schedule(&tnapi->napi);
6154 return IRQ_HANDLED;
6157 /* MSI ISR - No need to check for interrupt sharing and no need to
6158 * flush status block and interrupt mailbox. PCI ordering rules
6159 * guarantee that MSI will arrive after the status block.
6161 static irqreturn_t tg3_msi(int irq, void *dev_id)
6163 struct tg3_napi *tnapi = dev_id;
6164 struct tg3 *tp = tnapi->tp;
6166 prefetch(tnapi->hw_status);
6167 if (tnapi->rx_rcb)
6168 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6170 * Writing any value to intr-mbox-0 clears PCI INTA# and
6171 * chip-internal interrupt pending events.
6172 * Writing non-zero to intr-mbox-0 additional tells the
6173 * NIC to stop sending us irqs, engaging "in-intr-handler"
6174 * event coalescing.
6176 tw32_mailbox(tnapi->int_mbox, 0x00000001);
6177 if (likely(!tg3_irq_sync(tp)))
6178 napi_schedule(&tnapi->napi);
6180 return IRQ_RETVAL(1);
6183 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6185 struct tg3_napi *tnapi = dev_id;
6186 struct tg3 *tp = tnapi->tp;
6187 struct tg3_hw_status *sblk = tnapi->hw_status;
6188 unsigned int handled = 1;
6190 /* In INTx mode, it is possible for the interrupt to arrive at
6191 * the CPU before the status block posted prior to the interrupt.
6192 * Reading the PCI State register will confirm whether the
6193 * interrupt is ours and will flush the status block.
6195 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6196 if (tg3_flag(tp, CHIP_RESETTING) ||
6197 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6198 handled = 0;
6199 goto out;
6204 * Writing any value to intr-mbox-0 clears PCI INTA# and
6205 * chip-internal interrupt pending events.
6206 * Writing non-zero to intr-mbox-0 additional tells the
6207 * NIC to stop sending us irqs, engaging "in-intr-handler"
6208 * event coalescing.
6210 * Flush the mailbox to de-assert the IRQ immediately to prevent
6211 * spurious interrupts. The flush impacts performance but
6212 * excessive spurious interrupts can be worse in some cases.
6214 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6215 if (tg3_irq_sync(tp))
6216 goto out;
6217 sblk->status &= ~SD_STATUS_UPDATED;
6218 if (likely(tg3_has_work(tnapi))) {
6219 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6220 napi_schedule(&tnapi->napi);
6221 } else {
6222 /* No work, shared interrupt perhaps? re-enable
6223 * interrupts, and flush that PCI write
6225 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6226 0x00000000);
6228 out:
6229 return IRQ_RETVAL(handled);
6232 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6234 struct tg3_napi *tnapi = dev_id;
6235 struct tg3 *tp = tnapi->tp;
6236 struct tg3_hw_status *sblk = tnapi->hw_status;
6237 unsigned int handled = 1;
6239 /* In INTx mode, it is possible for the interrupt to arrive at
6240 * the CPU before the status block posted prior to the interrupt.
6241 * Reading the PCI State register will confirm whether the
6242 * interrupt is ours and will flush the status block.
6244 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6245 if (tg3_flag(tp, CHIP_RESETTING) ||
6246 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6247 handled = 0;
6248 goto out;
6253 * writing any value to intr-mbox-0 clears PCI INTA# and
6254 * chip-internal interrupt pending events.
6255 * writing non-zero to intr-mbox-0 additional tells the
6256 * NIC to stop sending us irqs, engaging "in-intr-handler"
6257 * event coalescing.
6259 * Flush the mailbox to de-assert the IRQ immediately to prevent
6260 * spurious interrupts. The flush impacts performance but
6261 * excessive spurious interrupts can be worse in some cases.
6263 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6266 * In a shared interrupt configuration, sometimes other devices'
6267 * interrupts will scream. We record the current status tag here
6268 * so that the above check can report that the screaming interrupts
6269 * are unhandled. Eventually they will be silenced.
6271 tnapi->last_irq_tag = sblk->status_tag;
6273 if (tg3_irq_sync(tp))
6274 goto out;
6276 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6278 napi_schedule(&tnapi->napi);
6280 out:
6281 return IRQ_RETVAL(handled);
6284 /* ISR for interrupt test */
6285 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6287 struct tg3_napi *tnapi = dev_id;
6288 struct tg3 *tp = tnapi->tp;
6289 struct tg3_hw_status *sblk = tnapi->hw_status;
6291 if ((sblk->status & SD_STATUS_UPDATED) ||
6292 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6293 tg3_disable_ints(tp);
6294 return IRQ_RETVAL(1);
6296 return IRQ_RETVAL(0);
6299 static int tg3_init_hw(struct tg3 *, int);
6300 static int tg3_halt(struct tg3 *, int, int);
6302 /* Restart hardware after configuration changes, self-test, etc.
6303 * Invoked with tp->lock held.
6305 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
6306 __releases(tp->lock)
6307 __acquires(tp->lock)
6309 int err;
6311 err = tg3_init_hw(tp, reset_phy);
6312 if (err) {
6313 netdev_err(tp->dev,
6314 "Failed to re-initialize device, aborting\n");
6315 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6316 tg3_full_unlock(tp);
6317 del_timer_sync(&tp->timer);
6318 tp->irq_sync = 0;
6319 tg3_napi_enable(tp);
6320 dev_close(tp->dev);
6321 tg3_full_lock(tp, 0);
6323 return err;
6326 #ifdef CONFIG_NET_POLL_CONTROLLER
6327 static void tg3_poll_controller(struct net_device *dev)
6329 int i;
6330 struct tg3 *tp = netdev_priv(dev);
6332 for (i = 0; i < tp->irq_cnt; i++)
6333 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6335 #endif
6337 static void tg3_reset_task(struct work_struct *work)
6339 struct tg3 *tp = container_of(work, struct tg3, reset_task);
6340 int err;
6341 unsigned int restart_timer;
6343 tg3_full_lock(tp, 0);
6345 if (!netif_running(tp->dev)) {
6346 tg3_full_unlock(tp);
6347 return;
6350 tg3_full_unlock(tp);
6352 tg3_phy_stop(tp);
6354 tg3_netif_stop(tp);
6356 tg3_full_lock(tp, 1);
6358 restart_timer = tg3_flag(tp, RESTART_TIMER);
6359 tg3_flag_clear(tp, RESTART_TIMER);
6361 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
6362 tp->write32_tx_mbox = tg3_write32_tx_mbox;
6363 tp->write32_rx_mbox = tg3_write_flush_reg32;
6364 tg3_flag_set(tp, MBOX_WRITE_REORDER);
6365 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6368 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
6369 err = tg3_init_hw(tp, 1);
6370 if (err)
6371 goto out;
6373 tg3_netif_start(tp);
6375 if (restart_timer)
6376 mod_timer(&tp->timer, jiffies + 1);
6378 out:
6379 tg3_full_unlock(tp);
6381 if (!err)
6382 tg3_phy_start(tp);
6385 static void tg3_tx_timeout(struct net_device *dev)
6387 struct tg3 *tp = netdev_priv(dev);
6389 if (netif_msg_tx_err(tp)) {
6390 netdev_err(dev, "transmit timed out, resetting\n");
6391 tg3_dump_state(tp);
6394 schedule_work(&tp->reset_task);
6397 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6398 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6400 u32 base = (u32) mapping & 0xffffffff;
6402 return (base > 0xffffdcc0) && (base + len + 8 < base);
6405 /* Test for DMA addresses > 40-bit */
6406 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6407 int len)
6409 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6410 if (tg3_flag(tp, 40BIT_DMA_BUG))
6411 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6412 return 0;
6413 #else
6414 return 0;
6415 #endif
6418 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6419 dma_addr_t mapping, u32 len, u32 flags,
6420 u32 mss, u32 vlan)
6422 txbd->addr_hi = ((u64) mapping >> 32);
6423 txbd->addr_lo = ((u64) mapping & 0xffffffff);
6424 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6425 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6428 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6429 dma_addr_t map, u32 len, u32 flags,
6430 u32 mss, u32 vlan)
6432 struct tg3 *tp = tnapi->tp;
6433 bool hwbug = false;
6435 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6436 hwbug = 1;
6438 if (tg3_4g_overflow_test(map, len))
6439 hwbug = 1;
6441 if (tg3_40bit_overflow_test(tp, map, len))
6442 hwbug = 1;
6444 if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
6445 u32 tmp_flag = flags & ~TXD_FLAG_END;
6446 while (len > TG3_TX_BD_DMA_MAX) {
6447 u32 frag_len = TG3_TX_BD_DMA_MAX;
6448 len -= TG3_TX_BD_DMA_MAX;
6450 if (len) {
6451 tnapi->tx_buffers[*entry].fragmented = true;
6452 /* Avoid the 8byte DMA problem */
6453 if (len <= 8) {
6454 len += TG3_TX_BD_DMA_MAX / 2;
6455 frag_len = TG3_TX_BD_DMA_MAX / 2;
6457 } else
6458 tmp_flag = flags;
6460 if (*budget) {
6461 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6462 frag_len, tmp_flag, mss, vlan);
6463 (*budget)--;
6464 *entry = NEXT_TX(*entry);
6465 } else {
6466 hwbug = 1;
6467 break;
6470 map += frag_len;
6473 if (len) {
6474 if (*budget) {
6475 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6476 len, flags, mss, vlan);
6477 (*budget)--;
6478 *entry = NEXT_TX(*entry);
6479 } else {
6480 hwbug = 1;
6483 } else {
6484 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6485 len, flags, mss, vlan);
6486 *entry = NEXT_TX(*entry);
6489 return hwbug;
6492 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6494 int i;
6495 struct sk_buff *skb;
6496 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6498 skb = txb->skb;
6499 txb->skb = NULL;
6501 pci_unmap_single(tnapi->tp->pdev,
6502 dma_unmap_addr(txb, mapping),
6503 skb_headlen(skb),
6504 PCI_DMA_TODEVICE);
6506 while (txb->fragmented) {
6507 txb->fragmented = false;
6508 entry = NEXT_TX(entry);
6509 txb = &tnapi->tx_buffers[entry];
6512 for (i = 0; i < last; i++) {
6513 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6515 entry = NEXT_TX(entry);
6516 txb = &tnapi->tx_buffers[entry];
6518 pci_unmap_page(tnapi->tp->pdev,
6519 dma_unmap_addr(txb, mapping),
6520 skb_frag_size(frag), PCI_DMA_TODEVICE);
6522 while (txb->fragmented) {
6523 txb->fragmented = false;
6524 entry = NEXT_TX(entry);
6525 txb = &tnapi->tx_buffers[entry];
6530 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6531 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6532 struct sk_buff **pskb,
6533 u32 *entry, u32 *budget,
6534 u32 base_flags, u32 mss, u32 vlan)
6536 struct tg3 *tp = tnapi->tp;
6537 struct sk_buff *new_skb, *skb = *pskb;
6538 dma_addr_t new_addr = 0;
6539 int ret = 0;
6541 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6542 new_skb = skb_copy(skb, GFP_ATOMIC);
6543 else {
6544 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6546 new_skb = skb_copy_expand(skb,
6547 skb_headroom(skb) + more_headroom,
6548 skb_tailroom(skb), GFP_ATOMIC);
6551 if (!new_skb) {
6552 ret = -1;
6553 } else {
6554 /* New SKB is guaranteed to be linear. */
6555 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6556 PCI_DMA_TODEVICE);
6557 /* Make sure the mapping succeeded */
6558 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6559 dev_kfree_skb(new_skb);
6560 ret = -1;
6561 } else {
6562 base_flags |= TXD_FLAG_END;
6564 tnapi->tx_buffers[*entry].skb = new_skb;
6565 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6566 mapping, new_addr);
6568 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6569 new_skb->len, base_flags,
6570 mss, vlan)) {
6571 tg3_tx_skb_unmap(tnapi, *entry, 0);
6572 dev_kfree_skb(new_skb);
6573 ret = -1;
6578 dev_kfree_skb(skb);
6579 *pskb = new_skb;
6580 return ret;
6583 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6585 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6586 * TSO header is greater than 80 bytes.
6588 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6590 struct sk_buff *segs, *nskb;
6591 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6593 /* Estimate the number of fragments in the worst case */
6594 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6595 netif_stop_queue(tp->dev);
6597 /* netif_tx_stop_queue() must be done before checking
6598 * checking tx index in tg3_tx_avail() below, because in
6599 * tg3_tx(), we update tx index before checking for
6600 * netif_tx_queue_stopped().
6602 smp_mb();
6603 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6604 return NETDEV_TX_BUSY;
6606 netif_wake_queue(tp->dev);
6609 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6610 if (IS_ERR(segs))
6611 goto tg3_tso_bug_end;
6613 do {
6614 nskb = segs;
6615 segs = segs->next;
6616 nskb->next = NULL;
6617 tg3_start_xmit(nskb, tp->dev);
6618 } while (segs);
6620 tg3_tso_bug_end:
6621 dev_kfree_skb(skb);
6623 return NETDEV_TX_OK;
6626 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6627 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6629 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6631 struct tg3 *tp = netdev_priv(dev);
6632 u32 len, entry, base_flags, mss, vlan = 0;
6633 u32 budget;
6634 int i = -1, would_hit_hwbug;
6635 dma_addr_t mapping;
6636 struct tg3_napi *tnapi;
6637 struct netdev_queue *txq;
6638 unsigned int last;
6640 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6641 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6642 if (tg3_flag(tp, ENABLE_TSS))
6643 tnapi++;
6645 budget = tg3_tx_avail(tnapi);
6647 /* We are running in BH disabled context with netif_tx_lock
6648 * and TX reclaim runs via tp->napi.poll inside of a software
6649 * interrupt. Furthermore, IRQ processing runs lockless so we have
6650 * no IRQ context deadlocks to worry about either. Rejoice!
6652 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6653 if (!netif_tx_queue_stopped(txq)) {
6654 netif_tx_stop_queue(txq);
6656 /* This is a hard error, log it. */
6657 netdev_err(dev,
6658 "BUG! Tx Ring full when queue awake!\n");
6660 return NETDEV_TX_BUSY;
6663 entry = tnapi->tx_prod;
6664 base_flags = 0;
6665 if (skb->ip_summed == CHECKSUM_PARTIAL)
6666 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6668 mss = skb_shinfo(skb)->gso_size;
6669 if (mss) {
6670 struct iphdr *iph;
6671 u32 tcp_opt_len, hdr_len;
6673 if (skb_header_cloned(skb) &&
6674 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6675 goto drop;
6677 iph = ip_hdr(skb);
6678 tcp_opt_len = tcp_optlen(skb);
6680 if (skb_is_gso_v6(skb)) {
6681 hdr_len = skb_headlen(skb) - ETH_HLEN;
6682 } else {
6683 u32 ip_tcp_len;
6685 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6686 hdr_len = ip_tcp_len + tcp_opt_len;
6688 iph->check = 0;
6689 iph->tot_len = htons(mss + hdr_len);
6692 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6693 tg3_flag(tp, TSO_BUG))
6694 return tg3_tso_bug(tp, skb);
6696 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6697 TXD_FLAG_CPU_POST_DMA);
6699 if (tg3_flag(tp, HW_TSO_1) ||
6700 tg3_flag(tp, HW_TSO_2) ||
6701 tg3_flag(tp, HW_TSO_3)) {
6702 tcp_hdr(skb)->check = 0;
6703 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6704 } else
6705 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6706 iph->daddr, 0,
6707 IPPROTO_TCP,
6710 if (tg3_flag(tp, HW_TSO_3)) {
6711 mss |= (hdr_len & 0xc) << 12;
6712 if (hdr_len & 0x10)
6713 base_flags |= 0x00000010;
6714 base_flags |= (hdr_len & 0x3e0) << 5;
6715 } else if (tg3_flag(tp, HW_TSO_2))
6716 mss |= hdr_len << 9;
6717 else if (tg3_flag(tp, HW_TSO_1) ||
6718 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6719 if (tcp_opt_len || iph->ihl > 5) {
6720 int tsflags;
6722 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6723 mss |= (tsflags << 11);
6725 } else {
6726 if (tcp_opt_len || iph->ihl > 5) {
6727 int tsflags;
6729 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6730 base_flags |= tsflags << 12;
6735 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6736 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6737 base_flags |= TXD_FLAG_JMB_PKT;
6739 if (vlan_tx_tag_present(skb)) {
6740 base_flags |= TXD_FLAG_VLAN;
6741 vlan = vlan_tx_tag_get(skb);
6744 len = skb_headlen(skb);
6746 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6747 if (pci_dma_mapping_error(tp->pdev, mapping))
6748 goto drop;
6751 tnapi->tx_buffers[entry].skb = skb;
6752 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6754 would_hit_hwbug = 0;
6756 if (tg3_flag(tp, 5701_DMA_BUG))
6757 would_hit_hwbug = 1;
6759 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6760 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6761 mss, vlan))
6762 would_hit_hwbug = 1;
6764 /* Now loop through additional data fragments, and queue them. */
6765 if (skb_shinfo(skb)->nr_frags > 0) {
6766 u32 tmp_mss = mss;
6768 if (!tg3_flag(tp, HW_TSO_1) &&
6769 !tg3_flag(tp, HW_TSO_2) &&
6770 !tg3_flag(tp, HW_TSO_3))
6771 tmp_mss = 0;
6773 last = skb_shinfo(skb)->nr_frags - 1;
6774 for (i = 0; i <= last; i++) {
6775 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6777 len = skb_frag_size(frag);
6778 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6779 len, DMA_TO_DEVICE);
6781 tnapi->tx_buffers[entry].skb = NULL;
6782 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6783 mapping);
6784 if (dma_mapping_error(&tp->pdev->dev, mapping))
6785 goto dma_error;
6787 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6788 len, base_flags |
6789 ((i == last) ? TXD_FLAG_END : 0),
6790 tmp_mss, vlan))
6791 would_hit_hwbug = 1;
6795 if (would_hit_hwbug) {
6796 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6798 /* If the workaround fails due to memory/mapping
6799 * failure, silently drop this packet.
6801 entry = tnapi->tx_prod;
6802 budget = tg3_tx_avail(tnapi);
6803 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6804 base_flags, mss, vlan))
6805 goto drop_nofree;
6808 skb_tx_timestamp(skb);
6810 /* Packets are ready, update Tx producer idx local and on card. */
6811 tw32_tx_mbox(tnapi->prodmbox, entry);
6813 tnapi->tx_prod = entry;
6814 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6815 netif_tx_stop_queue(txq);
6817 /* netif_tx_stop_queue() must be done before checking
6818 * checking tx index in tg3_tx_avail() below, because in
6819 * tg3_tx(), we update tx index before checking for
6820 * netif_tx_queue_stopped().
6822 smp_mb();
6823 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6824 netif_tx_wake_queue(txq);
6827 mmiowb();
6828 return NETDEV_TX_OK;
6830 dma_error:
6831 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6832 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6833 drop:
6834 dev_kfree_skb(skb);
6835 drop_nofree:
6836 tp->tx_dropped++;
6837 return NETDEV_TX_OK;
6840 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
6842 if (enable) {
6843 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
6844 MAC_MODE_PORT_MODE_MASK);
6846 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6848 if (!tg3_flag(tp, 5705_PLUS))
6849 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6851 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
6852 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
6853 else
6854 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
6855 } else {
6856 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6858 if (tg3_flag(tp, 5705_PLUS) ||
6859 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
6860 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
6861 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
6864 tw32(MAC_MODE, tp->mac_mode);
6865 udelay(40);
6868 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
6870 u32 val, bmcr, mac_mode, ptest = 0;
6872 tg3_phy_toggle_apd(tp, false);
6873 tg3_phy_toggle_automdix(tp, 0);
6875 if (extlpbk && tg3_phy_set_extloopbk(tp))
6876 return -EIO;
6878 bmcr = BMCR_FULLDPLX;
6879 switch (speed) {
6880 case SPEED_10:
6881 break;
6882 case SPEED_100:
6883 bmcr |= BMCR_SPEED100;
6884 break;
6885 case SPEED_1000:
6886 default:
6887 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
6888 speed = SPEED_100;
6889 bmcr |= BMCR_SPEED100;
6890 } else {
6891 speed = SPEED_1000;
6892 bmcr |= BMCR_SPEED1000;
6896 if (extlpbk) {
6897 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
6898 tg3_readphy(tp, MII_CTRL1000, &val);
6899 val |= CTL1000_AS_MASTER |
6900 CTL1000_ENABLE_MASTER;
6901 tg3_writephy(tp, MII_CTRL1000, val);
6902 } else {
6903 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
6904 MII_TG3_FET_PTEST_TRIM_2;
6905 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
6907 } else
6908 bmcr |= BMCR_LOOPBACK;
6910 tg3_writephy(tp, MII_BMCR, bmcr);
6912 /* The write needs to be flushed for the FETs */
6913 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
6914 tg3_readphy(tp, MII_BMCR, &bmcr);
6916 udelay(40);
6918 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
6919 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
6920 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
6921 MII_TG3_FET_PTEST_FRC_TX_LINK |
6922 MII_TG3_FET_PTEST_FRC_TX_LOCK);
6924 /* The write needs to be flushed for the AC131 */
6925 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
6928 /* Reset to prevent losing 1st rx packet intermittently */
6929 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6930 tg3_flag(tp, 5780_CLASS)) {
6931 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6932 udelay(10);
6933 tw32_f(MAC_RX_MODE, tp->rx_mode);
6936 mac_mode = tp->mac_mode &
6937 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
6938 if (speed == SPEED_1000)
6939 mac_mode |= MAC_MODE_PORT_MODE_GMII;
6940 else
6941 mac_mode |= MAC_MODE_PORT_MODE_MII;
6943 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
6944 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
6946 if (masked_phy_id == TG3_PHY_ID_BCM5401)
6947 mac_mode &= ~MAC_MODE_LINK_POLARITY;
6948 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
6949 mac_mode |= MAC_MODE_LINK_POLARITY;
6951 tg3_writephy(tp, MII_TG3_EXT_CTRL,
6952 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
6955 tw32(MAC_MODE, mac_mode);
6956 udelay(40);
6958 return 0;
6961 static void tg3_set_loopback(struct net_device *dev, u32 features)
6963 struct tg3 *tp = netdev_priv(dev);
6965 if (features & NETIF_F_LOOPBACK) {
6966 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6967 return;
6969 spin_lock_bh(&tp->lock);
6970 tg3_mac_loopback(tp, true);
6971 netif_carrier_on(tp->dev);
6972 spin_unlock_bh(&tp->lock);
6973 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6974 } else {
6975 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6976 return;
6978 spin_lock_bh(&tp->lock);
6979 tg3_mac_loopback(tp, false);
6980 /* Force link status check */
6981 tg3_setup_phy(tp, 1);
6982 spin_unlock_bh(&tp->lock);
6983 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6987 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6989 struct tg3 *tp = netdev_priv(dev);
6991 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6992 features &= ~NETIF_F_ALL_TSO;
6994 return features;
6997 static int tg3_set_features(struct net_device *dev, u32 features)
6999 u32 changed = dev->features ^ features;
7001 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7002 tg3_set_loopback(dev, features);
7004 return 0;
7007 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
7008 int new_mtu)
7010 dev->mtu = new_mtu;
7012 if (new_mtu > ETH_DATA_LEN) {
7013 if (tg3_flag(tp, 5780_CLASS)) {
7014 netdev_update_features(dev);
7015 tg3_flag_clear(tp, TSO_CAPABLE);
7016 } else {
7017 tg3_flag_set(tp, JUMBO_RING_ENABLE);
7019 } else {
7020 if (tg3_flag(tp, 5780_CLASS)) {
7021 tg3_flag_set(tp, TSO_CAPABLE);
7022 netdev_update_features(dev);
7024 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
7028 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
7030 struct tg3 *tp = netdev_priv(dev);
7031 int err;
7033 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
7034 return -EINVAL;
7036 if (!netif_running(dev)) {
7037 /* We'll just catch it later when the
7038 * device is up'd.
7040 tg3_set_mtu(dev, tp, new_mtu);
7041 return 0;
7044 tg3_phy_stop(tp);
7046 tg3_netif_stop(tp);
7048 tg3_full_lock(tp, 1);
7050 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7052 tg3_set_mtu(dev, tp, new_mtu);
7054 err = tg3_restart_hw(tp, 0);
7056 if (!err)
7057 tg3_netif_start(tp);
7059 tg3_full_unlock(tp);
7061 if (!err)
7062 tg3_phy_start(tp);
7064 return err;
7067 static void tg3_rx_prodring_free(struct tg3 *tp,
7068 struct tg3_rx_prodring_set *tpr)
7070 int i;
7072 if (tpr != &tp->napi[0].prodring) {
7073 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7074 i = (i + 1) & tp->rx_std_ring_mask)
7075 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
7076 tp->rx_pkt_map_sz);
7078 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7079 for (i = tpr->rx_jmb_cons_idx;
7080 i != tpr->rx_jmb_prod_idx;
7081 i = (i + 1) & tp->rx_jmb_ring_mask) {
7082 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
7083 TG3_RX_JMB_MAP_SZ);
7087 return;
7090 for (i = 0; i <= tp->rx_std_ring_mask; i++)
7091 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
7092 tp->rx_pkt_map_sz);
7094 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7095 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7096 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
7097 TG3_RX_JMB_MAP_SZ);
7101 /* Initialize rx rings for packet processing.
7103 * The chip has been shut down and the driver detached from
7104 * the networking, so no interrupts or new tx packets will
7105 * end up in the driver. tp->{tx,}lock are held and thus
7106 * we may not sleep.
7108 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7109 struct tg3_rx_prodring_set *tpr)
7111 u32 i, rx_pkt_dma_sz;
7113 tpr->rx_std_cons_idx = 0;
7114 tpr->rx_std_prod_idx = 0;
7115 tpr->rx_jmb_cons_idx = 0;
7116 tpr->rx_jmb_prod_idx = 0;
7118 if (tpr != &tp->napi[0].prodring) {
7119 memset(&tpr->rx_std_buffers[0], 0,
7120 TG3_RX_STD_BUFF_RING_SIZE(tp));
7121 if (tpr->rx_jmb_buffers)
7122 memset(&tpr->rx_jmb_buffers[0], 0,
7123 TG3_RX_JMB_BUFF_RING_SIZE(tp));
7124 goto done;
7127 /* Zero out all descriptors. */
7128 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7130 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7131 if (tg3_flag(tp, 5780_CLASS) &&
7132 tp->dev->mtu > ETH_DATA_LEN)
7133 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7134 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7136 /* Initialize invariants of the rings, we only set this
7137 * stuff once. This works because the card does not
7138 * write into the rx buffer posting rings.
7140 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7141 struct tg3_rx_buffer_desc *rxd;
7143 rxd = &tpr->rx_std[i];
7144 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7145 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7146 rxd->opaque = (RXD_OPAQUE_RING_STD |
7147 (i << RXD_OPAQUE_INDEX_SHIFT));
7150 /* Now allocate fresh SKBs for each rx ring. */
7151 for (i = 0; i < tp->rx_pending; i++) {
7152 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7153 netdev_warn(tp->dev,
7154 "Using a smaller RX standard ring. Only "
7155 "%d out of %d buffers were allocated "
7156 "successfully\n", i, tp->rx_pending);
7157 if (i == 0)
7158 goto initfail;
7159 tp->rx_pending = i;
7160 break;
7164 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7165 goto done;
7167 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7169 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7170 goto done;
7172 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7173 struct tg3_rx_buffer_desc *rxd;
7175 rxd = &tpr->rx_jmb[i].std;
7176 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7177 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7178 RXD_FLAG_JUMBO;
7179 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7180 (i << RXD_OPAQUE_INDEX_SHIFT));
7183 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7184 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7185 netdev_warn(tp->dev,
7186 "Using a smaller RX jumbo ring. Only %d "
7187 "out of %d buffers were allocated "
7188 "successfully\n", i, tp->rx_jumbo_pending);
7189 if (i == 0)
7190 goto initfail;
7191 tp->rx_jumbo_pending = i;
7192 break;
7196 done:
7197 return 0;
7199 initfail:
7200 tg3_rx_prodring_free(tp, tpr);
7201 return -ENOMEM;
7204 static void tg3_rx_prodring_fini(struct tg3 *tp,
7205 struct tg3_rx_prodring_set *tpr)
7207 kfree(tpr->rx_std_buffers);
7208 tpr->rx_std_buffers = NULL;
7209 kfree(tpr->rx_jmb_buffers);
7210 tpr->rx_jmb_buffers = NULL;
7211 if (tpr->rx_std) {
7212 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7213 tpr->rx_std, tpr->rx_std_mapping);
7214 tpr->rx_std = NULL;
7216 if (tpr->rx_jmb) {
7217 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7218 tpr->rx_jmb, tpr->rx_jmb_mapping);
7219 tpr->rx_jmb = NULL;
7223 static int tg3_rx_prodring_init(struct tg3 *tp,
7224 struct tg3_rx_prodring_set *tpr)
7226 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7227 GFP_KERNEL);
7228 if (!tpr->rx_std_buffers)
7229 return -ENOMEM;
7231 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7232 TG3_RX_STD_RING_BYTES(tp),
7233 &tpr->rx_std_mapping,
7234 GFP_KERNEL);
7235 if (!tpr->rx_std)
7236 goto err_out;
7238 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7239 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7240 GFP_KERNEL);
7241 if (!tpr->rx_jmb_buffers)
7242 goto err_out;
7244 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7245 TG3_RX_JMB_RING_BYTES(tp),
7246 &tpr->rx_jmb_mapping,
7247 GFP_KERNEL);
7248 if (!tpr->rx_jmb)
7249 goto err_out;
7252 return 0;
7254 err_out:
7255 tg3_rx_prodring_fini(tp, tpr);
7256 return -ENOMEM;
7259 /* Free up pending packets in all rx/tx rings.
7261 * The chip has been shut down and the driver detached from
7262 * the networking, so no interrupts or new tx packets will
7263 * end up in the driver. tp->{tx,}lock is not held and we are not
7264 * in an interrupt context and thus may sleep.
7266 static void tg3_free_rings(struct tg3 *tp)
7268 int i, j;
7270 for (j = 0; j < tp->irq_cnt; j++) {
7271 struct tg3_napi *tnapi = &tp->napi[j];
7273 tg3_rx_prodring_free(tp, &tnapi->prodring);
7275 if (!tnapi->tx_buffers)
7276 continue;
7278 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7279 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7281 if (!skb)
7282 continue;
7284 tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags);
7286 dev_kfree_skb_any(skb);
7291 /* Initialize tx/rx rings for packet processing.
7293 * The chip has been shut down and the driver detached from
7294 * the networking, so no interrupts or new tx packets will
7295 * end up in the driver. tp->{tx,}lock are held and thus
7296 * we may not sleep.
7298 static int tg3_init_rings(struct tg3 *tp)
7300 int i;
7302 /* Free up all the SKBs. */
7303 tg3_free_rings(tp);
7305 for (i = 0; i < tp->irq_cnt; i++) {
7306 struct tg3_napi *tnapi = &tp->napi[i];
7308 tnapi->last_tag = 0;
7309 tnapi->last_irq_tag = 0;
7310 tnapi->hw_status->status = 0;
7311 tnapi->hw_status->status_tag = 0;
7312 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7314 tnapi->tx_prod = 0;
7315 tnapi->tx_cons = 0;
7316 if (tnapi->tx_ring)
7317 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7319 tnapi->rx_rcb_ptr = 0;
7320 if (tnapi->rx_rcb)
7321 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7323 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7324 tg3_free_rings(tp);
7325 return -ENOMEM;
7329 return 0;
7333 * Must not be invoked with interrupt sources disabled and
7334 * the hardware shutdown down.
7336 static void tg3_free_consistent(struct tg3 *tp)
7338 int i;
7340 for (i = 0; i < tp->irq_cnt; i++) {
7341 struct tg3_napi *tnapi = &tp->napi[i];
7343 if (tnapi->tx_ring) {
7344 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7345 tnapi->tx_ring, tnapi->tx_desc_mapping);
7346 tnapi->tx_ring = NULL;
7349 kfree(tnapi->tx_buffers);
7350 tnapi->tx_buffers = NULL;
7352 if (tnapi->rx_rcb) {
7353 dma_free_coherent(&tp->pdev->dev,
7354 TG3_RX_RCB_RING_BYTES(tp),
7355 tnapi->rx_rcb,
7356 tnapi->rx_rcb_mapping);
7357 tnapi->rx_rcb = NULL;
7360 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7362 if (tnapi->hw_status) {
7363 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7364 tnapi->hw_status,
7365 tnapi->status_mapping);
7366 tnapi->hw_status = NULL;
7370 if (tp->hw_stats) {
7371 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7372 tp->hw_stats, tp->stats_mapping);
7373 tp->hw_stats = NULL;
7378 * Must not be invoked with interrupt sources disabled and
7379 * the hardware shutdown down. Can sleep.
7381 static int tg3_alloc_consistent(struct tg3 *tp)
7383 int i;
7385 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7386 sizeof(struct tg3_hw_stats),
7387 &tp->stats_mapping,
7388 GFP_KERNEL);
7389 if (!tp->hw_stats)
7390 goto err_out;
7392 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7394 for (i = 0; i < tp->irq_cnt; i++) {
7395 struct tg3_napi *tnapi = &tp->napi[i];
7396 struct tg3_hw_status *sblk;
7398 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7399 TG3_HW_STATUS_SIZE,
7400 &tnapi->status_mapping,
7401 GFP_KERNEL);
7402 if (!tnapi->hw_status)
7403 goto err_out;
7405 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7406 sblk = tnapi->hw_status;
7408 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7409 goto err_out;
7411 /* If multivector TSS is enabled, vector 0 does not handle
7412 * tx interrupts. Don't allocate any resources for it.
7414 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7415 (i && tg3_flag(tp, ENABLE_TSS))) {
7416 tnapi->tx_buffers = kzalloc(
7417 sizeof(struct tg3_tx_ring_info) *
7418 TG3_TX_RING_SIZE, GFP_KERNEL);
7419 if (!tnapi->tx_buffers)
7420 goto err_out;
7422 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7423 TG3_TX_RING_BYTES,
7424 &tnapi->tx_desc_mapping,
7425 GFP_KERNEL);
7426 if (!tnapi->tx_ring)
7427 goto err_out;
7431 * When RSS is enabled, the status block format changes
7432 * slightly. The "rx_jumbo_consumer", "reserved",
7433 * and "rx_mini_consumer" members get mapped to the
7434 * other three rx return ring producer indexes.
7436 switch (i) {
7437 default:
7438 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7439 break;
7440 case 2:
7441 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7442 break;
7443 case 3:
7444 tnapi->rx_rcb_prod_idx = &sblk->reserved;
7445 break;
7446 case 4:
7447 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7448 break;
7452 * If multivector RSS is enabled, vector 0 does not handle
7453 * rx or tx interrupts. Don't allocate any resources for it.
7455 if (!i && tg3_flag(tp, ENABLE_RSS))
7456 continue;
7458 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7459 TG3_RX_RCB_RING_BYTES(tp),
7460 &tnapi->rx_rcb_mapping,
7461 GFP_KERNEL);
7462 if (!tnapi->rx_rcb)
7463 goto err_out;
7465 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7468 return 0;
7470 err_out:
7471 tg3_free_consistent(tp);
7472 return -ENOMEM;
7475 #define MAX_WAIT_CNT 1000
7477 /* To stop a block, clear the enable bit and poll till it
7478 * clears. tp->lock is held.
7480 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7482 unsigned int i;
7483 u32 val;
7485 if (tg3_flag(tp, 5705_PLUS)) {
7486 switch (ofs) {
7487 case RCVLSC_MODE:
7488 case DMAC_MODE:
7489 case MBFREE_MODE:
7490 case BUFMGR_MODE:
7491 case MEMARB_MODE:
7492 /* We can't enable/disable these bits of the
7493 * 5705/5750, just say success.
7495 return 0;
7497 default:
7498 break;
7502 val = tr32(ofs);
7503 val &= ~enable_bit;
7504 tw32_f(ofs, val);
7506 for (i = 0; i < MAX_WAIT_CNT; i++) {
7507 udelay(100);
7508 val = tr32(ofs);
7509 if ((val & enable_bit) == 0)
7510 break;
7513 if (i == MAX_WAIT_CNT && !silent) {
7514 dev_err(&tp->pdev->dev,
7515 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7516 ofs, enable_bit);
7517 return -ENODEV;
7520 return 0;
7523 /* tp->lock is held. */
7524 static int tg3_abort_hw(struct tg3 *tp, int silent)
7526 int i, err;
7528 tg3_disable_ints(tp);
7530 tp->rx_mode &= ~RX_MODE_ENABLE;
7531 tw32_f(MAC_RX_MODE, tp->rx_mode);
7532 udelay(10);
7534 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7535 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7536 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7537 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7538 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7539 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7541 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7542 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7543 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7544 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7545 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7546 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7547 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7549 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7550 tw32_f(MAC_MODE, tp->mac_mode);
7551 udelay(40);
7553 tp->tx_mode &= ~TX_MODE_ENABLE;
7554 tw32_f(MAC_TX_MODE, tp->tx_mode);
7556 for (i = 0; i < MAX_WAIT_CNT; i++) {
7557 udelay(100);
7558 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7559 break;
7561 if (i >= MAX_WAIT_CNT) {
7562 dev_err(&tp->pdev->dev,
7563 "%s timed out, TX_MODE_ENABLE will not clear "
7564 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7565 err |= -ENODEV;
7568 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7569 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7570 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7572 tw32(FTQ_RESET, 0xffffffff);
7573 tw32(FTQ_RESET, 0x00000000);
7575 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7576 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7578 for (i = 0; i < tp->irq_cnt; i++) {
7579 struct tg3_napi *tnapi = &tp->napi[i];
7580 if (tnapi->hw_status)
7581 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7583 if (tp->hw_stats)
7584 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7586 return err;
7589 /* Save PCI command register before chip reset */
7590 static void tg3_save_pci_state(struct tg3 *tp)
7592 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7595 /* Restore PCI state after chip reset */
7596 static void tg3_restore_pci_state(struct tg3 *tp)
7598 u32 val;
7600 /* Re-enable indirect register accesses. */
7601 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7602 tp->misc_host_ctrl);
7604 /* Set MAX PCI retry to zero. */
7605 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7606 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7607 tg3_flag(tp, PCIX_MODE))
7608 val |= PCISTATE_RETRY_SAME_DMA;
7609 /* Allow reads and writes to the APE register and memory space. */
7610 if (tg3_flag(tp, ENABLE_APE))
7611 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7612 PCISTATE_ALLOW_APE_SHMEM_WR |
7613 PCISTATE_ALLOW_APE_PSPACE_WR;
7614 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7616 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7618 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7619 if (tg3_flag(tp, PCI_EXPRESS))
7620 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7621 else {
7622 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7623 tp->pci_cacheline_sz);
7624 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7625 tp->pci_lat_timer);
7629 /* Make sure PCI-X relaxed ordering bit is clear. */
7630 if (tg3_flag(tp, PCIX_MODE)) {
7631 u16 pcix_cmd;
7633 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7634 &pcix_cmd);
7635 pcix_cmd &= ~PCI_X_CMD_ERO;
7636 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7637 pcix_cmd);
7640 if (tg3_flag(tp, 5780_CLASS)) {
7642 /* Chip reset on 5780 will reset MSI enable bit,
7643 * so need to restore it.
7645 if (tg3_flag(tp, USING_MSI)) {
7646 u16 ctrl;
7648 pci_read_config_word(tp->pdev,
7649 tp->msi_cap + PCI_MSI_FLAGS,
7650 &ctrl);
7651 pci_write_config_word(tp->pdev,
7652 tp->msi_cap + PCI_MSI_FLAGS,
7653 ctrl | PCI_MSI_FLAGS_ENABLE);
7654 val = tr32(MSGINT_MODE);
7655 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7660 /* tp->lock is held. */
7661 static int tg3_chip_reset(struct tg3 *tp)
7663 u32 val;
7664 void (*write_op)(struct tg3 *, u32, u32);
7665 int i, err;
7667 tg3_nvram_lock(tp);
7669 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7671 /* No matching tg3_nvram_unlock() after this because
7672 * chip reset below will undo the nvram lock.
7674 tp->nvram_lock_cnt = 0;
7676 /* GRC_MISC_CFG core clock reset will clear the memory
7677 * enable bit in PCI register 4 and the MSI enable bit
7678 * on some chips, so we save relevant registers here.
7680 tg3_save_pci_state(tp);
7682 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7683 tg3_flag(tp, 5755_PLUS))
7684 tw32(GRC_FASTBOOT_PC, 0);
7687 * We must avoid the readl() that normally takes place.
7688 * It locks machines, causes machine checks, and other
7689 * fun things. So, temporarily disable the 5701
7690 * hardware workaround, while we do the reset.
7692 write_op = tp->write32;
7693 if (write_op == tg3_write_flush_reg32)
7694 tp->write32 = tg3_write32;
7696 /* Prevent the irq handler from reading or writing PCI registers
7697 * during chip reset when the memory enable bit in the PCI command
7698 * register may be cleared. The chip does not generate interrupt
7699 * at this time, but the irq handler may still be called due to irq
7700 * sharing or irqpoll.
7702 tg3_flag_set(tp, CHIP_RESETTING);
7703 for (i = 0; i < tp->irq_cnt; i++) {
7704 struct tg3_napi *tnapi = &tp->napi[i];
7705 if (tnapi->hw_status) {
7706 tnapi->hw_status->status = 0;
7707 tnapi->hw_status->status_tag = 0;
7709 tnapi->last_tag = 0;
7710 tnapi->last_irq_tag = 0;
7712 smp_mb();
7714 for (i = 0; i < tp->irq_cnt; i++)
7715 synchronize_irq(tp->napi[i].irq_vec);
7717 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7718 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7719 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7722 /* do the reset */
7723 val = GRC_MISC_CFG_CORECLK_RESET;
7725 if (tg3_flag(tp, PCI_EXPRESS)) {
7726 /* Force PCIe 1.0a mode */
7727 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7728 !tg3_flag(tp, 57765_PLUS) &&
7729 tr32(TG3_PCIE_PHY_TSTCTL) ==
7730 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7731 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7733 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7734 tw32(GRC_MISC_CFG, (1 << 29));
7735 val |= (1 << 29);
7739 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7740 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7741 tw32(GRC_VCPU_EXT_CTRL,
7742 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7745 /* Manage gphy power for all CPMU absent PCIe devices. */
7746 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7747 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7749 tw32(GRC_MISC_CFG, val);
7751 /* restore 5701 hardware bug workaround write method */
7752 tp->write32 = write_op;
7754 /* Unfortunately, we have to delay before the PCI read back.
7755 * Some 575X chips even will not respond to a PCI cfg access
7756 * when the reset command is given to the chip.
7758 * How do these hardware designers expect things to work
7759 * properly if the PCI write is posted for a long period
7760 * of time? It is always necessary to have some method by
7761 * which a register read back can occur to push the write
7762 * out which does the reset.
7764 * For most tg3 variants the trick below was working.
7765 * Ho hum...
7767 udelay(120);
7769 /* Flush PCI posted writes. The normal MMIO registers
7770 * are inaccessible at this time so this is the only
7771 * way to make this reliably (actually, this is no longer
7772 * the case, see above). I tried to use indirect
7773 * register read/write but this upset some 5701 variants.
7775 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7777 udelay(120);
7779 if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7780 u16 val16;
7782 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7783 int i;
7784 u32 cfg_val;
7786 /* Wait for link training to complete. */
7787 for (i = 0; i < 5000; i++)
7788 udelay(100);
7790 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7791 pci_write_config_dword(tp->pdev, 0xc4,
7792 cfg_val | (1 << 15));
7795 /* Clear the "no snoop" and "relaxed ordering" bits. */
7796 pci_read_config_word(tp->pdev,
7797 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7798 &val16);
7799 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7800 PCI_EXP_DEVCTL_NOSNOOP_EN);
7802 * Older PCIe devices only support the 128 byte
7803 * MPS setting. Enforce the restriction.
7805 if (!tg3_flag(tp, CPMU_PRESENT))
7806 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7807 pci_write_config_word(tp->pdev,
7808 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7809 val16);
7811 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7813 /* Clear error status */
7814 pci_write_config_word(tp->pdev,
7815 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7816 PCI_EXP_DEVSTA_CED |
7817 PCI_EXP_DEVSTA_NFED |
7818 PCI_EXP_DEVSTA_FED |
7819 PCI_EXP_DEVSTA_URD);
7822 tg3_restore_pci_state(tp);
7824 tg3_flag_clear(tp, CHIP_RESETTING);
7825 tg3_flag_clear(tp, ERROR_PROCESSED);
7827 val = 0;
7828 if (tg3_flag(tp, 5780_CLASS))
7829 val = tr32(MEMARB_MODE);
7830 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7832 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7833 tg3_stop_fw(tp);
7834 tw32(0x5000, 0x400);
7837 tw32(GRC_MODE, tp->grc_mode);
7839 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7840 val = tr32(0xc4);
7842 tw32(0xc4, val | (1 << 15));
7845 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7846 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7847 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7848 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7849 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7850 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7853 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7854 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7855 val = tp->mac_mode;
7856 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7857 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7858 val = tp->mac_mode;
7859 } else
7860 val = 0;
7862 tw32_f(MAC_MODE, val);
7863 udelay(40);
7865 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7867 err = tg3_poll_fw(tp);
7868 if (err)
7869 return err;
7871 tg3_mdio_start(tp);
7873 if (tg3_flag(tp, PCI_EXPRESS) &&
7874 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7875 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7876 !tg3_flag(tp, 57765_PLUS)) {
7877 val = tr32(0x7c00);
7879 tw32(0x7c00, val | (1 << 25));
7882 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7883 val = tr32(TG3_CPMU_CLCK_ORIDE);
7884 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7887 /* Reprobe ASF enable state. */
7888 tg3_flag_clear(tp, ENABLE_ASF);
7889 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7890 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7891 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7892 u32 nic_cfg;
7894 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7895 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7896 tg3_flag_set(tp, ENABLE_ASF);
7897 tp->last_event_jiffies = jiffies;
7898 if (tg3_flag(tp, 5750_PLUS))
7899 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7903 return 0;
7906 /* tp->lock is held. */
7907 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7909 int err;
7911 tg3_stop_fw(tp);
7913 tg3_write_sig_pre_reset(tp, kind);
7915 tg3_abort_hw(tp, silent);
7916 err = tg3_chip_reset(tp);
7918 __tg3_set_mac_addr(tp, 0);
7920 tg3_write_sig_legacy(tp, kind);
7921 tg3_write_sig_post_reset(tp, kind);
7923 if (err)
7924 return err;
7926 return 0;
7929 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7931 struct tg3 *tp = netdev_priv(dev);
7932 struct sockaddr *addr = p;
7933 int err = 0, skip_mac_1 = 0;
7935 if (!is_valid_ether_addr(addr->sa_data))
7936 return -EINVAL;
7938 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7940 if (!netif_running(dev))
7941 return 0;
7943 if (tg3_flag(tp, ENABLE_ASF)) {
7944 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7946 addr0_high = tr32(MAC_ADDR_0_HIGH);
7947 addr0_low = tr32(MAC_ADDR_0_LOW);
7948 addr1_high = tr32(MAC_ADDR_1_HIGH);
7949 addr1_low = tr32(MAC_ADDR_1_LOW);
7951 /* Skip MAC addr 1 if ASF is using it. */
7952 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7953 !(addr1_high == 0 && addr1_low == 0))
7954 skip_mac_1 = 1;
7956 spin_lock_bh(&tp->lock);
7957 __tg3_set_mac_addr(tp, skip_mac_1);
7958 spin_unlock_bh(&tp->lock);
7960 return err;
7963 /* tp->lock is held. */
7964 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7965 dma_addr_t mapping, u32 maxlen_flags,
7966 u32 nic_addr)
7968 tg3_write_mem(tp,
7969 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7970 ((u64) mapping >> 32));
7971 tg3_write_mem(tp,
7972 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7973 ((u64) mapping & 0xffffffff));
7974 tg3_write_mem(tp,
7975 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7976 maxlen_flags);
7978 if (!tg3_flag(tp, 5705_PLUS))
7979 tg3_write_mem(tp,
7980 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7981 nic_addr);
7984 static void __tg3_set_rx_mode(struct net_device *);
7985 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7987 int i;
7989 if (!tg3_flag(tp, ENABLE_TSS)) {
7990 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7991 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7992 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7993 } else {
7994 tw32(HOSTCC_TXCOL_TICKS, 0);
7995 tw32(HOSTCC_TXMAX_FRAMES, 0);
7996 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7999 if (!tg3_flag(tp, ENABLE_RSS)) {
8000 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8001 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8002 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8003 } else {
8004 tw32(HOSTCC_RXCOL_TICKS, 0);
8005 tw32(HOSTCC_RXMAX_FRAMES, 0);
8006 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8009 if (!tg3_flag(tp, 5705_PLUS)) {
8010 u32 val = ec->stats_block_coalesce_usecs;
8012 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8013 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8015 if (!netif_carrier_ok(tp->dev))
8016 val = 0;
8018 tw32(HOSTCC_STAT_COAL_TICKS, val);
8021 for (i = 0; i < tp->irq_cnt - 1; i++) {
8022 u32 reg;
8024 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8025 tw32(reg, ec->rx_coalesce_usecs);
8026 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8027 tw32(reg, ec->rx_max_coalesced_frames);
8028 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8029 tw32(reg, ec->rx_max_coalesced_frames_irq);
8031 if (tg3_flag(tp, ENABLE_TSS)) {
8032 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8033 tw32(reg, ec->tx_coalesce_usecs);
8034 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8035 tw32(reg, ec->tx_max_coalesced_frames);
8036 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8037 tw32(reg, ec->tx_max_coalesced_frames_irq);
8041 for (; i < tp->irq_max - 1; i++) {
8042 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8043 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8044 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8046 if (tg3_flag(tp, ENABLE_TSS)) {
8047 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8048 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8049 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8054 /* tp->lock is held. */
8055 static void tg3_rings_reset(struct tg3 *tp)
8057 int i;
8058 u32 stblk, txrcb, rxrcb, limit;
8059 struct tg3_napi *tnapi = &tp->napi[0];
8061 /* Disable all transmit rings but the first. */
8062 if (!tg3_flag(tp, 5705_PLUS))
8063 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8064 else if (tg3_flag(tp, 5717_PLUS))
8065 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8066 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8067 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8068 else
8069 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8071 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8072 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8073 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8074 BDINFO_FLAGS_DISABLED);
8077 /* Disable all receive return rings but the first. */
8078 if (tg3_flag(tp, 5717_PLUS))
8079 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8080 else if (!tg3_flag(tp, 5705_PLUS))
8081 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8082 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8083 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8084 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8085 else
8086 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8088 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8089 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8090 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8091 BDINFO_FLAGS_DISABLED);
8093 /* Disable interrupts */
8094 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8095 tp->napi[0].chk_msi_cnt = 0;
8096 tp->napi[0].last_rx_cons = 0;
8097 tp->napi[0].last_tx_cons = 0;
8099 /* Zero mailbox registers. */
8100 if (tg3_flag(tp, SUPPORT_MSIX)) {
8101 for (i = 1; i < tp->irq_max; i++) {
8102 tp->napi[i].tx_prod = 0;
8103 tp->napi[i].tx_cons = 0;
8104 if (tg3_flag(tp, ENABLE_TSS))
8105 tw32_mailbox(tp->napi[i].prodmbox, 0);
8106 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8107 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8108 tp->napi[i].chk_msi_cnt = 0;
8109 tp->napi[i].last_rx_cons = 0;
8110 tp->napi[i].last_tx_cons = 0;
8112 if (!tg3_flag(tp, ENABLE_TSS))
8113 tw32_mailbox(tp->napi[0].prodmbox, 0);
8114 } else {
8115 tp->napi[0].tx_prod = 0;
8116 tp->napi[0].tx_cons = 0;
8117 tw32_mailbox(tp->napi[0].prodmbox, 0);
8118 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8121 /* Make sure the NIC-based send BD rings are disabled. */
8122 if (!tg3_flag(tp, 5705_PLUS)) {
8123 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8124 for (i = 0; i < 16; i++)
8125 tw32_tx_mbox(mbox + i * 8, 0);
8128 txrcb = NIC_SRAM_SEND_RCB;
8129 rxrcb = NIC_SRAM_RCV_RET_RCB;
8131 /* Clear status block in ram. */
8132 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8134 /* Set status block DMA address */
8135 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8136 ((u64) tnapi->status_mapping >> 32));
8137 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8138 ((u64) tnapi->status_mapping & 0xffffffff));
8140 if (tnapi->tx_ring) {
8141 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8142 (TG3_TX_RING_SIZE <<
8143 BDINFO_FLAGS_MAXLEN_SHIFT),
8144 NIC_SRAM_TX_BUFFER_DESC);
8145 txrcb += TG3_BDINFO_SIZE;
8148 if (tnapi->rx_rcb) {
8149 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8150 (tp->rx_ret_ring_mask + 1) <<
8151 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8152 rxrcb += TG3_BDINFO_SIZE;
8155 stblk = HOSTCC_STATBLCK_RING1;
8157 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8158 u64 mapping = (u64)tnapi->status_mapping;
8159 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8160 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8162 /* Clear status block in ram. */
8163 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8165 if (tnapi->tx_ring) {
8166 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8167 (TG3_TX_RING_SIZE <<
8168 BDINFO_FLAGS_MAXLEN_SHIFT),
8169 NIC_SRAM_TX_BUFFER_DESC);
8170 txrcb += TG3_BDINFO_SIZE;
8173 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8174 ((tp->rx_ret_ring_mask + 1) <<
8175 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8177 stblk += 8;
8178 rxrcb += TG3_BDINFO_SIZE;
8182 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8184 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8186 if (!tg3_flag(tp, 5750_PLUS) ||
8187 tg3_flag(tp, 5780_CLASS) ||
8188 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8189 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8190 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8191 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8192 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8193 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8194 else
8195 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8197 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8198 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8200 val = min(nic_rep_thresh, host_rep_thresh);
8201 tw32(RCVBDI_STD_THRESH, val);
8203 if (tg3_flag(tp, 57765_PLUS))
8204 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8206 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8207 return;
8209 if (!tg3_flag(tp, 5705_PLUS))
8210 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8211 else
8212 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8214 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8216 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8217 tw32(RCVBDI_JUMBO_THRESH, val);
8219 if (tg3_flag(tp, 57765_PLUS))
8220 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8223 /* tp->lock is held. */
8224 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8226 u32 val, rdmac_mode;
8227 int i, err, limit;
8228 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8230 tg3_disable_ints(tp);
8232 tg3_stop_fw(tp);
8234 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8236 if (tg3_flag(tp, INIT_COMPLETE))
8237 tg3_abort_hw(tp, 1);
8239 /* Enable MAC control of LPI */
8240 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8241 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8242 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8243 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8245 tw32_f(TG3_CPMU_EEE_CTRL,
8246 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8248 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8249 TG3_CPMU_EEEMD_LPI_IN_TX |
8250 TG3_CPMU_EEEMD_LPI_IN_RX |
8251 TG3_CPMU_EEEMD_EEE_ENABLE;
8253 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8254 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8256 if (tg3_flag(tp, ENABLE_APE))
8257 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8259 tw32_f(TG3_CPMU_EEE_MODE, val);
8261 tw32_f(TG3_CPMU_EEE_DBTMR1,
8262 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8263 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8265 tw32_f(TG3_CPMU_EEE_DBTMR2,
8266 TG3_CPMU_DBTMR2_APE_TX_2047US |
8267 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8270 if (reset_phy)
8271 tg3_phy_reset(tp);
8273 err = tg3_chip_reset(tp);
8274 if (err)
8275 return err;
8277 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8279 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8280 val = tr32(TG3_CPMU_CTRL);
8281 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8282 tw32(TG3_CPMU_CTRL, val);
8284 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8285 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8286 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8287 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8289 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8290 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8291 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8292 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8294 val = tr32(TG3_CPMU_HST_ACC);
8295 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8296 val |= CPMU_HST_ACC_MACCLK_6_25;
8297 tw32(TG3_CPMU_HST_ACC, val);
8300 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8301 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8302 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8303 PCIE_PWR_MGMT_L1_THRESH_4MS;
8304 tw32(PCIE_PWR_MGMT_THRESH, val);
8306 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8307 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8309 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8311 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8312 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8315 if (tg3_flag(tp, L1PLLPD_EN)) {
8316 u32 grc_mode = tr32(GRC_MODE);
8318 /* Access the lower 1K of PL PCIE block registers. */
8319 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8320 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8322 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8323 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8324 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8326 tw32(GRC_MODE, grc_mode);
8329 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8330 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8331 u32 grc_mode = tr32(GRC_MODE);
8333 /* Access the lower 1K of PL PCIE block registers. */
8334 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8335 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8337 val = tr32(TG3_PCIE_TLDLPL_PORT +
8338 TG3_PCIE_PL_LO_PHYCTL5);
8339 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8340 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8342 tw32(GRC_MODE, grc_mode);
8345 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8346 u32 grc_mode = tr32(GRC_MODE);
8348 /* Access the lower 1K of DL PCIE block registers. */
8349 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8350 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8352 val = tr32(TG3_PCIE_TLDLPL_PORT +
8353 TG3_PCIE_DL_LO_FTSMAX);
8354 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8355 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8356 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8358 tw32(GRC_MODE, grc_mode);
8361 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8362 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8363 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8364 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8367 /* This works around an issue with Athlon chipsets on
8368 * B3 tigon3 silicon. This bit has no effect on any
8369 * other revision. But do not set this on PCI Express
8370 * chips and don't even touch the clocks if the CPMU is present.
8372 if (!tg3_flag(tp, CPMU_PRESENT)) {
8373 if (!tg3_flag(tp, PCI_EXPRESS))
8374 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8375 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8378 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8379 tg3_flag(tp, PCIX_MODE)) {
8380 val = tr32(TG3PCI_PCISTATE);
8381 val |= PCISTATE_RETRY_SAME_DMA;
8382 tw32(TG3PCI_PCISTATE, val);
8385 if (tg3_flag(tp, ENABLE_APE)) {
8386 /* Allow reads and writes to the
8387 * APE register and memory space.
8389 val = tr32(TG3PCI_PCISTATE);
8390 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8391 PCISTATE_ALLOW_APE_SHMEM_WR |
8392 PCISTATE_ALLOW_APE_PSPACE_WR;
8393 tw32(TG3PCI_PCISTATE, val);
8396 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8397 /* Enable some hw fixes. */
8398 val = tr32(TG3PCI_MSI_DATA);
8399 val |= (1 << 26) | (1 << 28) | (1 << 29);
8400 tw32(TG3PCI_MSI_DATA, val);
8403 /* Descriptor ring init may make accesses to the
8404 * NIC SRAM area to setup the TX descriptors, so we
8405 * can only do this after the hardware has been
8406 * successfully reset.
8408 err = tg3_init_rings(tp);
8409 if (err)
8410 return err;
8412 if (tg3_flag(tp, 57765_PLUS)) {
8413 val = tr32(TG3PCI_DMA_RW_CTRL) &
8414 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8415 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8416 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8417 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8418 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8419 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8420 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8421 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8422 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8423 /* This value is determined during the probe time DMA
8424 * engine test, tg3_test_dma.
8426 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8429 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8430 GRC_MODE_4X_NIC_SEND_RINGS |
8431 GRC_MODE_NO_TX_PHDR_CSUM |
8432 GRC_MODE_NO_RX_PHDR_CSUM);
8433 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8435 /* Pseudo-header checksum is done by hardware logic and not
8436 * the offload processers, so make the chip do the pseudo-
8437 * header checksums on receive. For transmit it is more
8438 * convenient to do the pseudo-header checksum in software
8439 * as Linux does that on transmit for us in all cases.
8441 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8443 tw32(GRC_MODE,
8444 tp->grc_mode |
8445 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8447 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8448 val = tr32(GRC_MISC_CFG);
8449 val &= ~0xff;
8450 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8451 tw32(GRC_MISC_CFG, val);
8453 /* Initialize MBUF/DESC pool. */
8454 if (tg3_flag(tp, 5750_PLUS)) {
8455 /* Do nothing. */
8456 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8457 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8458 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8459 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8460 else
8461 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8462 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8463 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8464 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8465 int fw_len;
8467 fw_len = tp->fw_len;
8468 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8469 tw32(BUFMGR_MB_POOL_ADDR,
8470 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8471 tw32(BUFMGR_MB_POOL_SIZE,
8472 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8475 if (tp->dev->mtu <= ETH_DATA_LEN) {
8476 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8477 tp->bufmgr_config.mbuf_read_dma_low_water);
8478 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8479 tp->bufmgr_config.mbuf_mac_rx_low_water);
8480 tw32(BUFMGR_MB_HIGH_WATER,
8481 tp->bufmgr_config.mbuf_high_water);
8482 } else {
8483 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8484 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8485 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8486 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8487 tw32(BUFMGR_MB_HIGH_WATER,
8488 tp->bufmgr_config.mbuf_high_water_jumbo);
8490 tw32(BUFMGR_DMA_LOW_WATER,
8491 tp->bufmgr_config.dma_low_water);
8492 tw32(BUFMGR_DMA_HIGH_WATER,
8493 tp->bufmgr_config.dma_high_water);
8495 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8496 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8497 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8498 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8499 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8500 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8501 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8502 tw32(BUFMGR_MODE, val);
8503 for (i = 0; i < 2000; i++) {
8504 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8505 break;
8506 udelay(10);
8508 if (i >= 2000) {
8509 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8510 return -ENODEV;
8513 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8514 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8516 tg3_setup_rxbd_thresholds(tp);
8518 /* Initialize TG3_BDINFO's at:
8519 * RCVDBDI_STD_BD: standard eth size rx ring
8520 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8521 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8523 * like so:
8524 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8525 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8526 * ring attribute flags
8527 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8529 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8530 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8532 * The size of each ring is fixed in the firmware, but the location is
8533 * configurable.
8535 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8536 ((u64) tpr->rx_std_mapping >> 32));
8537 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8538 ((u64) tpr->rx_std_mapping & 0xffffffff));
8539 if (!tg3_flag(tp, 5717_PLUS))
8540 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8541 NIC_SRAM_RX_BUFFER_DESC);
8543 /* Disable the mini ring */
8544 if (!tg3_flag(tp, 5705_PLUS))
8545 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8546 BDINFO_FLAGS_DISABLED);
8548 /* Program the jumbo buffer descriptor ring control
8549 * blocks on those devices that have them.
8551 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8552 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8554 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8555 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8556 ((u64) tpr->rx_jmb_mapping >> 32));
8557 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8558 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8559 val = TG3_RX_JMB_RING_SIZE(tp) <<
8560 BDINFO_FLAGS_MAXLEN_SHIFT;
8561 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8562 val | BDINFO_FLAGS_USE_EXT_RECV);
8563 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8564 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8565 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8566 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8567 } else {
8568 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8569 BDINFO_FLAGS_DISABLED);
8572 if (tg3_flag(tp, 57765_PLUS)) {
8573 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8574 val = TG3_RX_STD_MAX_SIZE_5700;
8575 else
8576 val = TG3_RX_STD_MAX_SIZE_5717;
8577 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8578 val |= (TG3_RX_STD_DMA_SZ << 2);
8579 } else
8580 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8581 } else
8582 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8584 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8586 tpr->rx_std_prod_idx = tp->rx_pending;
8587 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8589 tpr->rx_jmb_prod_idx =
8590 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8591 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8593 tg3_rings_reset(tp);
8595 /* Initialize MAC address and backoff seed. */
8596 __tg3_set_mac_addr(tp, 0);
8598 /* MTU + ethernet header + FCS + optional VLAN tag */
8599 tw32(MAC_RX_MTU_SIZE,
8600 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8602 /* The slot time is changed by tg3_setup_phy if we
8603 * run at gigabit with half duplex.
8605 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8606 (6 << TX_LENGTHS_IPG_SHIFT) |
8607 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8609 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8610 val |= tr32(MAC_TX_LENGTHS) &
8611 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8612 TX_LENGTHS_CNT_DWN_VAL_MSK);
8614 tw32(MAC_TX_LENGTHS, val);
8616 /* Receive rules. */
8617 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8618 tw32(RCVLPC_CONFIG, 0x0181);
8620 /* Calculate RDMAC_MODE setting early, we need it to determine
8621 * the RCVLPC_STATE_ENABLE mask.
8623 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8624 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8625 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8626 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8627 RDMAC_MODE_LNGREAD_ENAB);
8629 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8630 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8632 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8633 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8634 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8635 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8636 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8637 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8639 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8640 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8641 if (tg3_flag(tp, TSO_CAPABLE) &&
8642 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8643 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8644 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8645 !tg3_flag(tp, IS_5788)) {
8646 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8650 if (tg3_flag(tp, PCI_EXPRESS))
8651 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8653 if (tg3_flag(tp, HW_TSO_1) ||
8654 tg3_flag(tp, HW_TSO_2) ||
8655 tg3_flag(tp, HW_TSO_3))
8656 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8658 if (tg3_flag(tp, 57765_PLUS) ||
8659 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8660 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8661 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8663 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8664 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8666 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8667 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8668 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8669 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8670 tg3_flag(tp, 57765_PLUS)) {
8671 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8672 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8673 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8674 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8675 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8676 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8677 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8678 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8679 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8681 tw32(TG3_RDMA_RSRVCTRL_REG,
8682 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8685 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8686 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8687 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8688 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8689 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8690 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8693 /* Receive/send statistics. */
8694 if (tg3_flag(tp, 5750_PLUS)) {
8695 val = tr32(RCVLPC_STATS_ENABLE);
8696 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8697 tw32(RCVLPC_STATS_ENABLE, val);
8698 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8699 tg3_flag(tp, TSO_CAPABLE)) {
8700 val = tr32(RCVLPC_STATS_ENABLE);
8701 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8702 tw32(RCVLPC_STATS_ENABLE, val);
8703 } else {
8704 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8706 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8707 tw32(SNDDATAI_STATSENAB, 0xffffff);
8708 tw32(SNDDATAI_STATSCTRL,
8709 (SNDDATAI_SCTRL_ENABLE |
8710 SNDDATAI_SCTRL_FASTUPD));
8712 /* Setup host coalescing engine. */
8713 tw32(HOSTCC_MODE, 0);
8714 for (i = 0; i < 2000; i++) {
8715 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8716 break;
8717 udelay(10);
8720 __tg3_set_coalesce(tp, &tp->coal);
8722 if (!tg3_flag(tp, 5705_PLUS)) {
8723 /* Status/statistics block address. See tg3_timer,
8724 * the tg3_periodic_fetch_stats call there, and
8725 * tg3_get_stats to see how this works for 5705/5750 chips.
8727 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8728 ((u64) tp->stats_mapping >> 32));
8729 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8730 ((u64) tp->stats_mapping & 0xffffffff));
8731 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8733 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8735 /* Clear statistics and status block memory areas */
8736 for (i = NIC_SRAM_STATS_BLK;
8737 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8738 i += sizeof(u32)) {
8739 tg3_write_mem(tp, i, 0);
8740 udelay(40);
8744 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8746 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8747 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8748 if (!tg3_flag(tp, 5705_PLUS))
8749 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8751 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8752 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8753 /* reset to prevent losing 1st rx packet intermittently */
8754 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8755 udelay(10);
8758 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8759 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8760 MAC_MODE_FHDE_ENABLE;
8761 if (tg3_flag(tp, ENABLE_APE))
8762 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8763 if (!tg3_flag(tp, 5705_PLUS) &&
8764 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8765 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8766 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8767 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8768 udelay(40);
8770 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8771 * If TG3_FLAG_IS_NIC is zero, we should read the
8772 * register to preserve the GPIO settings for LOMs. The GPIOs,
8773 * whether used as inputs or outputs, are set by boot code after
8774 * reset.
8776 if (!tg3_flag(tp, IS_NIC)) {
8777 u32 gpio_mask;
8779 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8780 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8781 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8783 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8784 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8785 GRC_LCLCTRL_GPIO_OUTPUT3;
8787 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8788 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8790 tp->grc_local_ctrl &= ~gpio_mask;
8791 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8793 /* GPIO1 must be driven high for eeprom write protect */
8794 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8795 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8796 GRC_LCLCTRL_GPIO_OUTPUT1);
8798 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8799 udelay(100);
8801 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8802 val = tr32(MSGINT_MODE);
8803 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8804 if (!tg3_flag(tp, 1SHOT_MSI))
8805 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
8806 tw32(MSGINT_MODE, val);
8809 if (!tg3_flag(tp, 5705_PLUS)) {
8810 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8811 udelay(40);
8814 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8815 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8816 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8817 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8818 WDMAC_MODE_LNGREAD_ENAB);
8820 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8821 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8822 if (tg3_flag(tp, TSO_CAPABLE) &&
8823 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8824 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8825 /* nothing */
8826 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8827 !tg3_flag(tp, IS_5788)) {
8828 val |= WDMAC_MODE_RX_ACCEL;
8832 /* Enable host coalescing bug fix */
8833 if (tg3_flag(tp, 5755_PLUS))
8834 val |= WDMAC_MODE_STATUS_TAG_FIX;
8836 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8837 val |= WDMAC_MODE_BURST_ALL_DATA;
8839 tw32_f(WDMAC_MODE, val);
8840 udelay(40);
8842 if (tg3_flag(tp, PCIX_MODE)) {
8843 u16 pcix_cmd;
8845 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8846 &pcix_cmd);
8847 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8848 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8849 pcix_cmd |= PCI_X_CMD_READ_2K;
8850 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8851 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8852 pcix_cmd |= PCI_X_CMD_READ_2K;
8854 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8855 pcix_cmd);
8858 tw32_f(RDMAC_MODE, rdmac_mode);
8859 udelay(40);
8861 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8862 if (!tg3_flag(tp, 5705_PLUS))
8863 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8865 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8866 tw32(SNDDATAC_MODE,
8867 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8868 else
8869 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8871 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8872 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8873 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8874 if (tg3_flag(tp, LRG_PROD_RING_CAP))
8875 val |= RCVDBDI_MODE_LRG_RING_SZ;
8876 tw32(RCVDBDI_MODE, val);
8877 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8878 if (tg3_flag(tp, HW_TSO_1) ||
8879 tg3_flag(tp, HW_TSO_2) ||
8880 tg3_flag(tp, HW_TSO_3))
8881 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8882 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8883 if (tg3_flag(tp, ENABLE_TSS))
8884 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8885 tw32(SNDBDI_MODE, val);
8886 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8888 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8889 err = tg3_load_5701_a0_firmware_fix(tp);
8890 if (err)
8891 return err;
8894 if (tg3_flag(tp, TSO_CAPABLE)) {
8895 err = tg3_load_tso_firmware(tp);
8896 if (err)
8897 return err;
8900 tp->tx_mode = TX_MODE_ENABLE;
8902 if (tg3_flag(tp, 5755_PLUS) ||
8903 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8904 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8906 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8907 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8908 tp->tx_mode &= ~val;
8909 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8912 tw32_f(MAC_TX_MODE, tp->tx_mode);
8913 udelay(100);
8915 if (tg3_flag(tp, ENABLE_RSS)) {
8916 int i = 0;
8917 u32 reg = MAC_RSS_INDIR_TBL_0;
8919 if (tp->irq_cnt == 2) {
8920 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
8921 tw32(reg, 0x0);
8922 reg += 4;
8924 } else {
8925 u32 val;
8927 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8928 val = i % (tp->irq_cnt - 1);
8929 i++;
8930 for (; i % 8; i++) {
8931 val <<= 4;
8932 val |= (i % (tp->irq_cnt - 1));
8934 tw32(reg, val);
8935 reg += 4;
8939 /* Setup the "secret" hash key. */
8940 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8941 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8942 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8943 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8944 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8945 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8946 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8947 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8948 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8949 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8952 tp->rx_mode = RX_MODE_ENABLE;
8953 if (tg3_flag(tp, 5755_PLUS))
8954 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8956 if (tg3_flag(tp, ENABLE_RSS))
8957 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8958 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8959 RX_MODE_RSS_IPV6_HASH_EN |
8960 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8961 RX_MODE_RSS_IPV4_HASH_EN |
8962 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8964 tw32_f(MAC_RX_MODE, tp->rx_mode);
8965 udelay(10);
8967 tw32(MAC_LED_CTRL, tp->led_ctrl);
8969 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8970 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8971 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8972 udelay(10);
8974 tw32_f(MAC_RX_MODE, tp->rx_mode);
8975 udelay(10);
8977 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8978 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8979 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8980 /* Set drive transmission level to 1.2V */
8981 /* only if the signal pre-emphasis bit is not set */
8982 val = tr32(MAC_SERDES_CFG);
8983 val &= 0xfffff000;
8984 val |= 0x880;
8985 tw32(MAC_SERDES_CFG, val);
8987 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8988 tw32(MAC_SERDES_CFG, 0x616000);
8991 /* Prevent chip from dropping frames when flow control
8992 * is enabled.
8994 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8995 val = 1;
8996 else
8997 val = 2;
8998 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9000 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9001 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9002 /* Use hardware link auto-negotiation */
9003 tg3_flag_set(tp, HW_AUTONEG);
9006 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9007 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9008 u32 tmp;
9010 tmp = tr32(SERDES_RX_CTRL);
9011 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9012 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9013 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9014 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9017 if (!tg3_flag(tp, USE_PHYLIB)) {
9018 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
9019 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9020 tp->link_config.speed = tp->link_config.orig_speed;
9021 tp->link_config.duplex = tp->link_config.orig_duplex;
9022 tp->link_config.autoneg = tp->link_config.orig_autoneg;
9025 err = tg3_setup_phy(tp, 0);
9026 if (err)
9027 return err;
9029 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9030 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9031 u32 tmp;
9033 /* Clear CRC stats. */
9034 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9035 tg3_writephy(tp, MII_TG3_TEST1,
9036 tmp | MII_TG3_TEST1_CRC_EN);
9037 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9042 __tg3_set_rx_mode(tp->dev);
9044 /* Initialize receive rules. */
9045 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9046 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9047 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9048 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9050 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9051 limit = 8;
9052 else
9053 limit = 16;
9054 if (tg3_flag(tp, ENABLE_ASF))
9055 limit -= 4;
9056 switch (limit) {
9057 case 16:
9058 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9059 case 15:
9060 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9061 case 14:
9062 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9063 case 13:
9064 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9065 case 12:
9066 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9067 case 11:
9068 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9069 case 10:
9070 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9071 case 9:
9072 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9073 case 8:
9074 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9075 case 7:
9076 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9077 case 6:
9078 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9079 case 5:
9080 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9081 case 4:
9082 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9083 case 3:
9084 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9085 case 2:
9086 case 1:
9088 default:
9089 break;
9092 if (tg3_flag(tp, ENABLE_APE))
9093 /* Write our heartbeat update interval to APE. */
9094 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9095 APE_HOST_HEARTBEAT_INT_DISABLE);
9097 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9099 return 0;
9102 /* Called at device open time to get the chip ready for
9103 * packet processing. Invoked with tp->lock held.
9105 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9107 tg3_switch_clocks(tp);
9109 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9111 return tg3_reset_hw(tp, reset_phy);
9114 #define TG3_STAT_ADD32(PSTAT, REG) \
9115 do { u32 __val = tr32(REG); \
9116 (PSTAT)->low += __val; \
9117 if ((PSTAT)->low < __val) \
9118 (PSTAT)->high += 1; \
9119 } while (0)
9121 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9123 struct tg3_hw_stats *sp = tp->hw_stats;
9125 if (!netif_carrier_ok(tp->dev))
9126 return;
9128 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9129 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9130 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9131 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9132 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9133 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9134 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9135 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9136 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9137 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9138 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9139 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9140 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9142 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9143 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9144 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9145 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9146 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9147 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9148 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9149 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9150 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9151 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9152 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9153 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9154 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9155 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9157 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9158 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9159 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9160 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9161 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9162 } else {
9163 u32 val = tr32(HOSTCC_FLOW_ATTN);
9164 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9165 if (val) {
9166 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9167 sp->rx_discards.low += val;
9168 if (sp->rx_discards.low < val)
9169 sp->rx_discards.high += 1;
9171 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9173 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9176 static void tg3_chk_missed_msi(struct tg3 *tp)
9178 u32 i;
9180 for (i = 0; i < tp->irq_cnt; i++) {
9181 struct tg3_napi *tnapi = &tp->napi[i];
9183 if (tg3_has_work(tnapi)) {
9184 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9185 tnapi->last_tx_cons == tnapi->tx_cons) {
9186 if (tnapi->chk_msi_cnt < 1) {
9187 tnapi->chk_msi_cnt++;
9188 return;
9190 tg3_msi(0, tnapi);
9193 tnapi->chk_msi_cnt = 0;
9194 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9195 tnapi->last_tx_cons = tnapi->tx_cons;
9199 static void tg3_timer(unsigned long __opaque)
9201 struct tg3 *tp = (struct tg3 *) __opaque;
9203 if (tp->irq_sync)
9204 goto restart_timer;
9206 spin_lock(&tp->lock);
9208 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9209 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9210 tg3_chk_missed_msi(tp);
9212 if (!tg3_flag(tp, TAGGED_STATUS)) {
9213 /* All of this garbage is because when using non-tagged
9214 * IRQ status the mailbox/status_block protocol the chip
9215 * uses with the cpu is race prone.
9217 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9218 tw32(GRC_LOCAL_CTRL,
9219 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9220 } else {
9221 tw32(HOSTCC_MODE, tp->coalesce_mode |
9222 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9225 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9226 tg3_flag_set(tp, RESTART_TIMER);
9227 spin_unlock(&tp->lock);
9228 schedule_work(&tp->reset_task);
9229 return;
9233 /* This part only runs once per second. */
9234 if (!--tp->timer_counter) {
9235 if (tg3_flag(tp, 5705_PLUS))
9236 tg3_periodic_fetch_stats(tp);
9238 if (tp->setlpicnt && !--tp->setlpicnt)
9239 tg3_phy_eee_enable(tp);
9241 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9242 u32 mac_stat;
9243 int phy_event;
9245 mac_stat = tr32(MAC_STATUS);
9247 phy_event = 0;
9248 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9249 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9250 phy_event = 1;
9251 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9252 phy_event = 1;
9254 if (phy_event)
9255 tg3_setup_phy(tp, 0);
9256 } else if (tg3_flag(tp, POLL_SERDES)) {
9257 u32 mac_stat = tr32(MAC_STATUS);
9258 int need_setup = 0;
9260 if (netif_carrier_ok(tp->dev) &&
9261 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9262 need_setup = 1;
9264 if (!netif_carrier_ok(tp->dev) &&
9265 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9266 MAC_STATUS_SIGNAL_DET))) {
9267 need_setup = 1;
9269 if (need_setup) {
9270 if (!tp->serdes_counter) {
9271 tw32_f(MAC_MODE,
9272 (tp->mac_mode &
9273 ~MAC_MODE_PORT_MODE_MASK));
9274 udelay(40);
9275 tw32_f(MAC_MODE, tp->mac_mode);
9276 udelay(40);
9278 tg3_setup_phy(tp, 0);
9280 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9281 tg3_flag(tp, 5780_CLASS)) {
9282 tg3_serdes_parallel_detect(tp);
9285 tp->timer_counter = tp->timer_multiplier;
9288 /* Heartbeat is only sent once every 2 seconds.
9290 * The heartbeat is to tell the ASF firmware that the host
9291 * driver is still alive. In the event that the OS crashes,
9292 * ASF needs to reset the hardware to free up the FIFO space
9293 * that may be filled with rx packets destined for the host.
9294 * If the FIFO is full, ASF will no longer function properly.
9296 * Unintended resets have been reported on real time kernels
9297 * where the timer doesn't run on time. Netpoll will also have
9298 * same problem.
9300 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9301 * to check the ring condition when the heartbeat is expiring
9302 * before doing the reset. This will prevent most unintended
9303 * resets.
9305 if (!--tp->asf_counter) {
9306 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9307 tg3_wait_for_event_ack(tp);
9309 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9310 FWCMD_NICDRV_ALIVE3);
9311 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9312 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9313 TG3_FW_UPDATE_TIMEOUT_SEC);
9315 tg3_generate_fw_event(tp);
9317 tp->asf_counter = tp->asf_multiplier;
9320 spin_unlock(&tp->lock);
9322 restart_timer:
9323 tp->timer.expires = jiffies + tp->timer_offset;
9324 add_timer(&tp->timer);
9327 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9329 irq_handler_t fn;
9330 unsigned long flags;
9331 char *name;
9332 struct tg3_napi *tnapi = &tp->napi[irq_num];
9334 if (tp->irq_cnt == 1)
9335 name = tp->dev->name;
9336 else {
9337 name = &tnapi->irq_lbl[0];
9338 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9339 name[IFNAMSIZ-1] = 0;
9342 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9343 fn = tg3_msi;
9344 if (tg3_flag(tp, 1SHOT_MSI))
9345 fn = tg3_msi_1shot;
9346 flags = 0;
9347 } else {
9348 fn = tg3_interrupt;
9349 if (tg3_flag(tp, TAGGED_STATUS))
9350 fn = tg3_interrupt_tagged;
9351 flags = IRQF_SHARED;
9354 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9357 static int tg3_test_interrupt(struct tg3 *tp)
9359 struct tg3_napi *tnapi = &tp->napi[0];
9360 struct net_device *dev = tp->dev;
9361 int err, i, intr_ok = 0;
9362 u32 val;
9364 if (!netif_running(dev))
9365 return -ENODEV;
9367 tg3_disable_ints(tp);
9369 free_irq(tnapi->irq_vec, tnapi);
9372 * Turn off MSI one shot mode. Otherwise this test has no
9373 * observable way to know whether the interrupt was delivered.
9375 if (tg3_flag(tp, 57765_PLUS)) {
9376 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9377 tw32(MSGINT_MODE, val);
9380 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9381 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9382 if (err)
9383 return err;
9385 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9386 tg3_enable_ints(tp);
9388 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9389 tnapi->coal_now);
9391 for (i = 0; i < 5; i++) {
9392 u32 int_mbox, misc_host_ctrl;
9394 int_mbox = tr32_mailbox(tnapi->int_mbox);
9395 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9397 if ((int_mbox != 0) ||
9398 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9399 intr_ok = 1;
9400 break;
9403 if (tg3_flag(tp, 57765_PLUS) &&
9404 tnapi->hw_status->status_tag != tnapi->last_tag)
9405 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9407 msleep(10);
9410 tg3_disable_ints(tp);
9412 free_irq(tnapi->irq_vec, tnapi);
9414 err = tg3_request_irq(tp, 0);
9416 if (err)
9417 return err;
9419 if (intr_ok) {
9420 /* Reenable MSI one shot mode. */
9421 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9422 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9423 tw32(MSGINT_MODE, val);
9425 return 0;
9428 return -EIO;
9431 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9432 * successfully restored
9434 static int tg3_test_msi(struct tg3 *tp)
9436 int err;
9437 u16 pci_cmd;
9439 if (!tg3_flag(tp, USING_MSI))
9440 return 0;
9442 /* Turn off SERR reporting in case MSI terminates with Master
9443 * Abort.
9445 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9446 pci_write_config_word(tp->pdev, PCI_COMMAND,
9447 pci_cmd & ~PCI_COMMAND_SERR);
9449 err = tg3_test_interrupt(tp);
9451 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9453 if (!err)
9454 return 0;
9456 /* other failures */
9457 if (err != -EIO)
9458 return err;
9460 /* MSI test failed, go back to INTx mode */
9461 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9462 "to INTx mode. Please report this failure to the PCI "
9463 "maintainer and include system chipset information\n");
9465 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9467 pci_disable_msi(tp->pdev);
9469 tg3_flag_clear(tp, USING_MSI);
9470 tp->napi[0].irq_vec = tp->pdev->irq;
9472 err = tg3_request_irq(tp, 0);
9473 if (err)
9474 return err;
9476 /* Need to reset the chip because the MSI cycle may have terminated
9477 * with Master Abort.
9479 tg3_full_lock(tp, 1);
9481 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9482 err = tg3_init_hw(tp, 1);
9484 tg3_full_unlock(tp);
9486 if (err)
9487 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9489 return err;
9492 static int tg3_request_firmware(struct tg3 *tp)
9494 const __be32 *fw_data;
9496 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9497 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9498 tp->fw_needed);
9499 return -ENOENT;
9502 fw_data = (void *)tp->fw->data;
9504 /* Firmware blob starts with version numbers, followed by
9505 * start address and _full_ length including BSS sections
9506 * (which must be longer than the actual data, of course
9509 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9510 if (tp->fw_len < (tp->fw->size - 12)) {
9511 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9512 tp->fw_len, tp->fw_needed);
9513 release_firmware(tp->fw);
9514 tp->fw = NULL;
9515 return -EINVAL;
9518 /* We no longer need firmware; we have it. */
9519 tp->fw_needed = NULL;
9520 return 0;
9523 static bool tg3_enable_msix(struct tg3 *tp)
9525 int i, rc, cpus = num_online_cpus();
9526 struct msix_entry msix_ent[tp->irq_max];
9528 if (cpus == 1)
9529 /* Just fallback to the simpler MSI mode. */
9530 return false;
9533 * We want as many rx rings enabled as there are cpus.
9534 * The first MSIX vector only deals with link interrupts, etc,
9535 * so we add one to the number of vectors we are requesting.
9537 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9539 for (i = 0; i < tp->irq_max; i++) {
9540 msix_ent[i].entry = i;
9541 msix_ent[i].vector = 0;
9544 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9545 if (rc < 0) {
9546 return false;
9547 } else if (rc != 0) {
9548 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9549 return false;
9550 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9551 tp->irq_cnt, rc);
9552 tp->irq_cnt = rc;
9555 for (i = 0; i < tp->irq_max; i++)
9556 tp->napi[i].irq_vec = msix_ent[i].vector;
9558 netif_set_real_num_tx_queues(tp->dev, 1);
9559 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9560 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9561 pci_disable_msix(tp->pdev);
9562 return false;
9565 if (tp->irq_cnt > 1) {
9566 tg3_flag_set(tp, ENABLE_RSS);
9568 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9569 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9570 tg3_flag_set(tp, ENABLE_TSS);
9571 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9575 return true;
9578 static void tg3_ints_init(struct tg3 *tp)
9580 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9581 !tg3_flag(tp, TAGGED_STATUS)) {
9582 /* All MSI supporting chips should support tagged
9583 * status. Assert that this is the case.
9585 netdev_warn(tp->dev,
9586 "MSI without TAGGED_STATUS? Not using MSI\n");
9587 goto defcfg;
9590 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9591 tg3_flag_set(tp, USING_MSIX);
9592 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9593 tg3_flag_set(tp, USING_MSI);
9595 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9596 u32 msi_mode = tr32(MSGINT_MODE);
9597 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9598 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9599 if (!tg3_flag(tp, 1SHOT_MSI))
9600 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9601 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9603 defcfg:
9604 if (!tg3_flag(tp, USING_MSIX)) {
9605 tp->irq_cnt = 1;
9606 tp->napi[0].irq_vec = tp->pdev->irq;
9607 netif_set_real_num_tx_queues(tp->dev, 1);
9608 netif_set_real_num_rx_queues(tp->dev, 1);
9612 static void tg3_ints_fini(struct tg3 *tp)
9614 if (tg3_flag(tp, USING_MSIX))
9615 pci_disable_msix(tp->pdev);
9616 else if (tg3_flag(tp, USING_MSI))
9617 pci_disable_msi(tp->pdev);
9618 tg3_flag_clear(tp, USING_MSI);
9619 tg3_flag_clear(tp, USING_MSIX);
9620 tg3_flag_clear(tp, ENABLE_RSS);
9621 tg3_flag_clear(tp, ENABLE_TSS);
9624 static int tg3_open(struct net_device *dev)
9626 struct tg3 *tp = netdev_priv(dev);
9627 int i, err;
9629 if (tp->fw_needed) {
9630 err = tg3_request_firmware(tp);
9631 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9632 if (err)
9633 return err;
9634 } else if (err) {
9635 netdev_warn(tp->dev, "TSO capability disabled\n");
9636 tg3_flag_clear(tp, TSO_CAPABLE);
9637 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9638 netdev_notice(tp->dev, "TSO capability restored\n");
9639 tg3_flag_set(tp, TSO_CAPABLE);
9643 netif_carrier_off(tp->dev);
9645 err = tg3_power_up(tp);
9646 if (err)
9647 return err;
9649 tg3_full_lock(tp, 0);
9651 tg3_disable_ints(tp);
9652 tg3_flag_clear(tp, INIT_COMPLETE);
9654 tg3_full_unlock(tp);
9657 * Setup interrupts first so we know how
9658 * many NAPI resources to allocate
9660 tg3_ints_init(tp);
9662 /* The placement of this call is tied
9663 * to the setup and use of Host TX descriptors.
9665 err = tg3_alloc_consistent(tp);
9666 if (err)
9667 goto err_out1;
9669 tg3_napi_init(tp);
9671 tg3_napi_enable(tp);
9673 for (i = 0; i < tp->irq_cnt; i++) {
9674 struct tg3_napi *tnapi = &tp->napi[i];
9675 err = tg3_request_irq(tp, i);
9676 if (err) {
9677 for (i--; i >= 0; i--)
9678 free_irq(tnapi->irq_vec, tnapi);
9679 break;
9683 if (err)
9684 goto err_out2;
9686 tg3_full_lock(tp, 0);
9688 err = tg3_init_hw(tp, 1);
9689 if (err) {
9690 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9691 tg3_free_rings(tp);
9692 } else {
9693 if (tg3_flag(tp, TAGGED_STATUS) &&
9694 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9695 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9696 tp->timer_offset = HZ;
9697 else
9698 tp->timer_offset = HZ / 10;
9700 BUG_ON(tp->timer_offset > HZ);
9701 tp->timer_counter = tp->timer_multiplier =
9702 (HZ / tp->timer_offset);
9703 tp->asf_counter = tp->asf_multiplier =
9704 ((HZ / tp->timer_offset) * 2);
9706 init_timer(&tp->timer);
9707 tp->timer.expires = jiffies + tp->timer_offset;
9708 tp->timer.data = (unsigned long) tp;
9709 tp->timer.function = tg3_timer;
9712 tg3_full_unlock(tp);
9714 if (err)
9715 goto err_out3;
9717 if (tg3_flag(tp, USING_MSI)) {
9718 err = tg3_test_msi(tp);
9720 if (err) {
9721 tg3_full_lock(tp, 0);
9722 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9723 tg3_free_rings(tp);
9724 tg3_full_unlock(tp);
9726 goto err_out2;
9729 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9730 u32 val = tr32(PCIE_TRANSACTION_CFG);
9732 tw32(PCIE_TRANSACTION_CFG,
9733 val | PCIE_TRANS_CFG_1SHOT_MSI);
9737 tg3_phy_start(tp);
9739 tg3_full_lock(tp, 0);
9741 add_timer(&tp->timer);
9742 tg3_flag_set(tp, INIT_COMPLETE);
9743 tg3_enable_ints(tp);
9745 tg3_full_unlock(tp);
9747 netif_tx_start_all_queues(dev);
9750 * Reset loopback feature if it was turned on while the device was down
9751 * make sure that it's installed properly now.
9753 if (dev->features & NETIF_F_LOOPBACK)
9754 tg3_set_loopback(dev, dev->features);
9756 return 0;
9758 err_out3:
9759 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9760 struct tg3_napi *tnapi = &tp->napi[i];
9761 free_irq(tnapi->irq_vec, tnapi);
9764 err_out2:
9765 tg3_napi_disable(tp);
9766 tg3_napi_fini(tp);
9767 tg3_free_consistent(tp);
9769 err_out1:
9770 tg3_ints_fini(tp);
9771 tg3_frob_aux_power(tp, false);
9772 pci_set_power_state(tp->pdev, PCI_D3hot);
9773 return err;
9776 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9777 struct rtnl_link_stats64 *);
9778 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9780 static int tg3_close(struct net_device *dev)
9782 int i;
9783 struct tg3 *tp = netdev_priv(dev);
9785 tg3_napi_disable(tp);
9786 cancel_work_sync(&tp->reset_task);
9788 netif_tx_stop_all_queues(dev);
9790 del_timer_sync(&tp->timer);
9792 tg3_phy_stop(tp);
9794 tg3_full_lock(tp, 1);
9796 tg3_disable_ints(tp);
9798 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9799 tg3_free_rings(tp);
9800 tg3_flag_clear(tp, INIT_COMPLETE);
9802 tg3_full_unlock(tp);
9804 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9805 struct tg3_napi *tnapi = &tp->napi[i];
9806 free_irq(tnapi->irq_vec, tnapi);
9809 tg3_ints_fini(tp);
9811 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9813 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9814 sizeof(tp->estats_prev));
9816 tg3_napi_fini(tp);
9818 tg3_free_consistent(tp);
9820 tg3_power_down(tp);
9822 netif_carrier_off(tp->dev);
9824 return 0;
9827 static inline u64 get_stat64(tg3_stat64_t *val)
9829 return ((u64)val->high << 32) | ((u64)val->low);
9832 static u64 calc_crc_errors(struct tg3 *tp)
9834 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9836 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9837 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9838 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9839 u32 val;
9841 spin_lock_bh(&tp->lock);
9842 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9843 tg3_writephy(tp, MII_TG3_TEST1,
9844 val | MII_TG3_TEST1_CRC_EN);
9845 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9846 } else
9847 val = 0;
9848 spin_unlock_bh(&tp->lock);
9850 tp->phy_crc_errors += val;
9852 return tp->phy_crc_errors;
9855 return get_stat64(&hw_stats->rx_fcs_errors);
9858 #define ESTAT_ADD(member) \
9859 estats->member = old_estats->member + \
9860 get_stat64(&hw_stats->member)
9862 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9864 struct tg3_ethtool_stats *estats = &tp->estats;
9865 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9866 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9868 if (!hw_stats)
9869 return old_estats;
9871 ESTAT_ADD(rx_octets);
9872 ESTAT_ADD(rx_fragments);
9873 ESTAT_ADD(rx_ucast_packets);
9874 ESTAT_ADD(rx_mcast_packets);
9875 ESTAT_ADD(rx_bcast_packets);
9876 ESTAT_ADD(rx_fcs_errors);
9877 ESTAT_ADD(rx_align_errors);
9878 ESTAT_ADD(rx_xon_pause_rcvd);
9879 ESTAT_ADD(rx_xoff_pause_rcvd);
9880 ESTAT_ADD(rx_mac_ctrl_rcvd);
9881 ESTAT_ADD(rx_xoff_entered);
9882 ESTAT_ADD(rx_frame_too_long_errors);
9883 ESTAT_ADD(rx_jabbers);
9884 ESTAT_ADD(rx_undersize_packets);
9885 ESTAT_ADD(rx_in_length_errors);
9886 ESTAT_ADD(rx_out_length_errors);
9887 ESTAT_ADD(rx_64_or_less_octet_packets);
9888 ESTAT_ADD(rx_65_to_127_octet_packets);
9889 ESTAT_ADD(rx_128_to_255_octet_packets);
9890 ESTAT_ADD(rx_256_to_511_octet_packets);
9891 ESTAT_ADD(rx_512_to_1023_octet_packets);
9892 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9893 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9894 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9895 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9896 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9898 ESTAT_ADD(tx_octets);
9899 ESTAT_ADD(tx_collisions);
9900 ESTAT_ADD(tx_xon_sent);
9901 ESTAT_ADD(tx_xoff_sent);
9902 ESTAT_ADD(tx_flow_control);
9903 ESTAT_ADD(tx_mac_errors);
9904 ESTAT_ADD(tx_single_collisions);
9905 ESTAT_ADD(tx_mult_collisions);
9906 ESTAT_ADD(tx_deferred);
9907 ESTAT_ADD(tx_excessive_collisions);
9908 ESTAT_ADD(tx_late_collisions);
9909 ESTAT_ADD(tx_collide_2times);
9910 ESTAT_ADD(tx_collide_3times);
9911 ESTAT_ADD(tx_collide_4times);
9912 ESTAT_ADD(tx_collide_5times);
9913 ESTAT_ADD(tx_collide_6times);
9914 ESTAT_ADD(tx_collide_7times);
9915 ESTAT_ADD(tx_collide_8times);
9916 ESTAT_ADD(tx_collide_9times);
9917 ESTAT_ADD(tx_collide_10times);
9918 ESTAT_ADD(tx_collide_11times);
9919 ESTAT_ADD(tx_collide_12times);
9920 ESTAT_ADD(tx_collide_13times);
9921 ESTAT_ADD(tx_collide_14times);
9922 ESTAT_ADD(tx_collide_15times);
9923 ESTAT_ADD(tx_ucast_packets);
9924 ESTAT_ADD(tx_mcast_packets);
9925 ESTAT_ADD(tx_bcast_packets);
9926 ESTAT_ADD(tx_carrier_sense_errors);
9927 ESTAT_ADD(tx_discards);
9928 ESTAT_ADD(tx_errors);
9930 ESTAT_ADD(dma_writeq_full);
9931 ESTAT_ADD(dma_write_prioq_full);
9932 ESTAT_ADD(rxbds_empty);
9933 ESTAT_ADD(rx_discards);
9934 ESTAT_ADD(rx_errors);
9935 ESTAT_ADD(rx_threshold_hit);
9937 ESTAT_ADD(dma_readq_full);
9938 ESTAT_ADD(dma_read_prioq_full);
9939 ESTAT_ADD(tx_comp_queue_full);
9941 ESTAT_ADD(ring_set_send_prod_index);
9942 ESTAT_ADD(ring_status_update);
9943 ESTAT_ADD(nic_irqs);
9944 ESTAT_ADD(nic_avoided_irqs);
9945 ESTAT_ADD(nic_tx_threshold_hit);
9947 ESTAT_ADD(mbuf_lwm_thresh_hit);
9949 return estats;
9952 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9953 struct rtnl_link_stats64 *stats)
9955 struct tg3 *tp = netdev_priv(dev);
9956 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9957 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9959 if (!hw_stats)
9960 return old_stats;
9962 stats->rx_packets = old_stats->rx_packets +
9963 get_stat64(&hw_stats->rx_ucast_packets) +
9964 get_stat64(&hw_stats->rx_mcast_packets) +
9965 get_stat64(&hw_stats->rx_bcast_packets);
9967 stats->tx_packets = old_stats->tx_packets +
9968 get_stat64(&hw_stats->tx_ucast_packets) +
9969 get_stat64(&hw_stats->tx_mcast_packets) +
9970 get_stat64(&hw_stats->tx_bcast_packets);
9972 stats->rx_bytes = old_stats->rx_bytes +
9973 get_stat64(&hw_stats->rx_octets);
9974 stats->tx_bytes = old_stats->tx_bytes +
9975 get_stat64(&hw_stats->tx_octets);
9977 stats->rx_errors = old_stats->rx_errors +
9978 get_stat64(&hw_stats->rx_errors);
9979 stats->tx_errors = old_stats->tx_errors +
9980 get_stat64(&hw_stats->tx_errors) +
9981 get_stat64(&hw_stats->tx_mac_errors) +
9982 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9983 get_stat64(&hw_stats->tx_discards);
9985 stats->multicast = old_stats->multicast +
9986 get_stat64(&hw_stats->rx_mcast_packets);
9987 stats->collisions = old_stats->collisions +
9988 get_stat64(&hw_stats->tx_collisions);
9990 stats->rx_length_errors = old_stats->rx_length_errors +
9991 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9992 get_stat64(&hw_stats->rx_undersize_packets);
9994 stats->rx_over_errors = old_stats->rx_over_errors +
9995 get_stat64(&hw_stats->rxbds_empty);
9996 stats->rx_frame_errors = old_stats->rx_frame_errors +
9997 get_stat64(&hw_stats->rx_align_errors);
9998 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9999 get_stat64(&hw_stats->tx_discards);
10000 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10001 get_stat64(&hw_stats->tx_carrier_sense_errors);
10003 stats->rx_crc_errors = old_stats->rx_crc_errors +
10004 calc_crc_errors(tp);
10006 stats->rx_missed_errors = old_stats->rx_missed_errors +
10007 get_stat64(&hw_stats->rx_discards);
10009 stats->rx_dropped = tp->rx_dropped;
10010 stats->tx_dropped = tp->tx_dropped;
10012 return stats;
10015 static inline u32 calc_crc(unsigned char *buf, int len)
10017 u32 reg;
10018 u32 tmp;
10019 int j, k;
10021 reg = 0xffffffff;
10023 for (j = 0; j < len; j++) {
10024 reg ^= buf[j];
10026 for (k = 0; k < 8; k++) {
10027 tmp = reg & 0x01;
10029 reg >>= 1;
10031 if (tmp)
10032 reg ^= 0xedb88320;
10036 return ~reg;
10039 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
10041 /* accept or reject all multicast frames */
10042 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
10043 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
10044 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
10045 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
10048 static void __tg3_set_rx_mode(struct net_device *dev)
10050 struct tg3 *tp = netdev_priv(dev);
10051 u32 rx_mode;
10053 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
10054 RX_MODE_KEEP_VLAN_TAG);
10056 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
10057 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
10058 * flag clear.
10060 if (!tg3_flag(tp, ENABLE_ASF))
10061 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
10062 #endif
10064 if (dev->flags & IFF_PROMISC) {
10065 /* Promiscuous mode. */
10066 rx_mode |= RX_MODE_PROMISC;
10067 } else if (dev->flags & IFF_ALLMULTI) {
10068 /* Accept all multicast. */
10069 tg3_set_multi(tp, 1);
10070 } else if (netdev_mc_empty(dev)) {
10071 /* Reject all multicast. */
10072 tg3_set_multi(tp, 0);
10073 } else {
10074 /* Accept one or more multicast(s). */
10075 struct netdev_hw_addr *ha;
10076 u32 mc_filter[4] = { 0, };
10077 u32 regidx;
10078 u32 bit;
10079 u32 crc;
10081 netdev_for_each_mc_addr(ha, dev) {
10082 crc = calc_crc(ha->addr, ETH_ALEN);
10083 bit = ~crc & 0x7f;
10084 regidx = (bit & 0x60) >> 5;
10085 bit &= 0x1f;
10086 mc_filter[regidx] |= (1 << bit);
10089 tw32(MAC_HASH_REG_0, mc_filter[0]);
10090 tw32(MAC_HASH_REG_1, mc_filter[1]);
10091 tw32(MAC_HASH_REG_2, mc_filter[2]);
10092 tw32(MAC_HASH_REG_3, mc_filter[3]);
10095 if (rx_mode != tp->rx_mode) {
10096 tp->rx_mode = rx_mode;
10097 tw32_f(MAC_RX_MODE, rx_mode);
10098 udelay(10);
10102 static void tg3_set_rx_mode(struct net_device *dev)
10104 struct tg3 *tp = netdev_priv(dev);
10106 if (!netif_running(dev))
10107 return;
10109 tg3_full_lock(tp, 0);
10110 __tg3_set_rx_mode(dev);
10111 tg3_full_unlock(tp);
10114 static int tg3_get_regs_len(struct net_device *dev)
10116 return TG3_REG_BLK_SIZE;
10119 static void tg3_get_regs(struct net_device *dev,
10120 struct ethtool_regs *regs, void *_p)
10122 struct tg3 *tp = netdev_priv(dev);
10124 regs->version = 0;
10126 memset(_p, 0, TG3_REG_BLK_SIZE);
10128 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10129 return;
10131 tg3_full_lock(tp, 0);
10133 tg3_dump_legacy_regs(tp, (u32 *)_p);
10135 tg3_full_unlock(tp);
10138 static int tg3_get_eeprom_len(struct net_device *dev)
10140 struct tg3 *tp = netdev_priv(dev);
10142 return tp->nvram_size;
10145 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10147 struct tg3 *tp = netdev_priv(dev);
10148 int ret;
10149 u8 *pd;
10150 u32 i, offset, len, b_offset, b_count;
10151 __be32 val;
10153 if (tg3_flag(tp, NO_NVRAM))
10154 return -EINVAL;
10156 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10157 return -EAGAIN;
10159 offset = eeprom->offset;
10160 len = eeprom->len;
10161 eeprom->len = 0;
10163 eeprom->magic = TG3_EEPROM_MAGIC;
10165 if (offset & 3) {
10166 /* adjustments to start on required 4 byte boundary */
10167 b_offset = offset & 3;
10168 b_count = 4 - b_offset;
10169 if (b_count > len) {
10170 /* i.e. offset=1 len=2 */
10171 b_count = len;
10173 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10174 if (ret)
10175 return ret;
10176 memcpy(data, ((char *)&val) + b_offset, b_count);
10177 len -= b_count;
10178 offset += b_count;
10179 eeprom->len += b_count;
10182 /* read bytes up to the last 4 byte boundary */
10183 pd = &data[eeprom->len];
10184 for (i = 0; i < (len - (len & 3)); i += 4) {
10185 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10186 if (ret) {
10187 eeprom->len += i;
10188 return ret;
10190 memcpy(pd + i, &val, 4);
10192 eeprom->len += i;
10194 if (len & 3) {
10195 /* read last bytes not ending on 4 byte boundary */
10196 pd = &data[eeprom->len];
10197 b_count = len & 3;
10198 b_offset = offset + len - b_count;
10199 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10200 if (ret)
10201 return ret;
10202 memcpy(pd, &val, b_count);
10203 eeprom->len += b_count;
10205 return 0;
10208 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10210 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10212 struct tg3 *tp = netdev_priv(dev);
10213 int ret;
10214 u32 offset, len, b_offset, odd_len;
10215 u8 *buf;
10216 __be32 start, end;
10218 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10219 return -EAGAIN;
10221 if (tg3_flag(tp, NO_NVRAM) ||
10222 eeprom->magic != TG3_EEPROM_MAGIC)
10223 return -EINVAL;
10225 offset = eeprom->offset;
10226 len = eeprom->len;
10228 if ((b_offset = (offset & 3))) {
10229 /* adjustments to start on required 4 byte boundary */
10230 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10231 if (ret)
10232 return ret;
10233 len += b_offset;
10234 offset &= ~3;
10235 if (len < 4)
10236 len = 4;
10239 odd_len = 0;
10240 if (len & 3) {
10241 /* adjustments to end on required 4 byte boundary */
10242 odd_len = 1;
10243 len = (len + 3) & ~3;
10244 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10245 if (ret)
10246 return ret;
10249 buf = data;
10250 if (b_offset || odd_len) {
10251 buf = kmalloc(len, GFP_KERNEL);
10252 if (!buf)
10253 return -ENOMEM;
10254 if (b_offset)
10255 memcpy(buf, &start, 4);
10256 if (odd_len)
10257 memcpy(buf+len-4, &end, 4);
10258 memcpy(buf + b_offset, data, eeprom->len);
10261 ret = tg3_nvram_write_block(tp, offset, len, buf);
10263 if (buf != data)
10264 kfree(buf);
10266 return ret;
10269 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10271 struct tg3 *tp = netdev_priv(dev);
10273 if (tg3_flag(tp, USE_PHYLIB)) {
10274 struct phy_device *phydev;
10275 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10276 return -EAGAIN;
10277 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10278 return phy_ethtool_gset(phydev, cmd);
10281 cmd->supported = (SUPPORTED_Autoneg);
10283 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10284 cmd->supported |= (SUPPORTED_1000baseT_Half |
10285 SUPPORTED_1000baseT_Full);
10287 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10288 cmd->supported |= (SUPPORTED_100baseT_Half |
10289 SUPPORTED_100baseT_Full |
10290 SUPPORTED_10baseT_Half |
10291 SUPPORTED_10baseT_Full |
10292 SUPPORTED_TP);
10293 cmd->port = PORT_TP;
10294 } else {
10295 cmd->supported |= SUPPORTED_FIBRE;
10296 cmd->port = PORT_FIBRE;
10299 cmd->advertising = tp->link_config.advertising;
10300 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10301 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10302 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10303 cmd->advertising |= ADVERTISED_Pause;
10304 } else {
10305 cmd->advertising |= ADVERTISED_Pause |
10306 ADVERTISED_Asym_Pause;
10308 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10309 cmd->advertising |= ADVERTISED_Asym_Pause;
10312 if (netif_running(dev)) {
10313 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10314 cmd->duplex = tp->link_config.active_duplex;
10315 } else {
10316 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10317 cmd->duplex = DUPLEX_INVALID;
10319 cmd->phy_address = tp->phy_addr;
10320 cmd->transceiver = XCVR_INTERNAL;
10321 cmd->autoneg = tp->link_config.autoneg;
10322 cmd->maxtxpkt = 0;
10323 cmd->maxrxpkt = 0;
10324 return 0;
10327 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10329 struct tg3 *tp = netdev_priv(dev);
10330 u32 speed = ethtool_cmd_speed(cmd);
10332 if (tg3_flag(tp, USE_PHYLIB)) {
10333 struct phy_device *phydev;
10334 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10335 return -EAGAIN;
10336 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10337 return phy_ethtool_sset(phydev, cmd);
10340 if (cmd->autoneg != AUTONEG_ENABLE &&
10341 cmd->autoneg != AUTONEG_DISABLE)
10342 return -EINVAL;
10344 if (cmd->autoneg == AUTONEG_DISABLE &&
10345 cmd->duplex != DUPLEX_FULL &&
10346 cmd->duplex != DUPLEX_HALF)
10347 return -EINVAL;
10349 if (cmd->autoneg == AUTONEG_ENABLE) {
10350 u32 mask = ADVERTISED_Autoneg |
10351 ADVERTISED_Pause |
10352 ADVERTISED_Asym_Pause;
10354 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10355 mask |= ADVERTISED_1000baseT_Half |
10356 ADVERTISED_1000baseT_Full;
10358 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10359 mask |= ADVERTISED_100baseT_Half |
10360 ADVERTISED_100baseT_Full |
10361 ADVERTISED_10baseT_Half |
10362 ADVERTISED_10baseT_Full |
10363 ADVERTISED_TP;
10364 else
10365 mask |= ADVERTISED_FIBRE;
10367 if (cmd->advertising & ~mask)
10368 return -EINVAL;
10370 mask &= (ADVERTISED_1000baseT_Half |
10371 ADVERTISED_1000baseT_Full |
10372 ADVERTISED_100baseT_Half |
10373 ADVERTISED_100baseT_Full |
10374 ADVERTISED_10baseT_Half |
10375 ADVERTISED_10baseT_Full);
10377 cmd->advertising &= mask;
10378 } else {
10379 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10380 if (speed != SPEED_1000)
10381 return -EINVAL;
10383 if (cmd->duplex != DUPLEX_FULL)
10384 return -EINVAL;
10385 } else {
10386 if (speed != SPEED_100 &&
10387 speed != SPEED_10)
10388 return -EINVAL;
10392 tg3_full_lock(tp, 0);
10394 tp->link_config.autoneg = cmd->autoneg;
10395 if (cmd->autoneg == AUTONEG_ENABLE) {
10396 tp->link_config.advertising = (cmd->advertising |
10397 ADVERTISED_Autoneg);
10398 tp->link_config.speed = SPEED_INVALID;
10399 tp->link_config.duplex = DUPLEX_INVALID;
10400 } else {
10401 tp->link_config.advertising = 0;
10402 tp->link_config.speed = speed;
10403 tp->link_config.duplex = cmd->duplex;
10406 tp->link_config.orig_speed = tp->link_config.speed;
10407 tp->link_config.orig_duplex = tp->link_config.duplex;
10408 tp->link_config.orig_autoneg = tp->link_config.autoneg;
10410 if (netif_running(dev))
10411 tg3_setup_phy(tp, 1);
10413 tg3_full_unlock(tp);
10415 return 0;
10418 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10420 struct tg3 *tp = netdev_priv(dev);
10422 strcpy(info->driver, DRV_MODULE_NAME);
10423 strcpy(info->version, DRV_MODULE_VERSION);
10424 strcpy(info->fw_version, tp->fw_ver);
10425 strcpy(info->bus_info, pci_name(tp->pdev));
10428 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10430 struct tg3 *tp = netdev_priv(dev);
10432 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10433 wol->supported = WAKE_MAGIC;
10434 else
10435 wol->supported = 0;
10436 wol->wolopts = 0;
10437 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10438 wol->wolopts = WAKE_MAGIC;
10439 memset(&wol->sopass, 0, sizeof(wol->sopass));
10442 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10444 struct tg3 *tp = netdev_priv(dev);
10445 struct device *dp = &tp->pdev->dev;
10447 if (wol->wolopts & ~WAKE_MAGIC)
10448 return -EINVAL;
10449 if ((wol->wolopts & WAKE_MAGIC) &&
10450 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10451 return -EINVAL;
10453 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10455 spin_lock_bh(&tp->lock);
10456 if (device_may_wakeup(dp))
10457 tg3_flag_set(tp, WOL_ENABLE);
10458 else
10459 tg3_flag_clear(tp, WOL_ENABLE);
10460 spin_unlock_bh(&tp->lock);
10462 return 0;
10465 static u32 tg3_get_msglevel(struct net_device *dev)
10467 struct tg3 *tp = netdev_priv(dev);
10468 return tp->msg_enable;
10471 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10473 struct tg3 *tp = netdev_priv(dev);
10474 tp->msg_enable = value;
10477 static int tg3_nway_reset(struct net_device *dev)
10479 struct tg3 *tp = netdev_priv(dev);
10480 int r;
10482 if (!netif_running(dev))
10483 return -EAGAIN;
10485 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10486 return -EINVAL;
10488 if (tg3_flag(tp, USE_PHYLIB)) {
10489 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10490 return -EAGAIN;
10491 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10492 } else {
10493 u32 bmcr;
10495 spin_lock_bh(&tp->lock);
10496 r = -EINVAL;
10497 tg3_readphy(tp, MII_BMCR, &bmcr);
10498 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10499 ((bmcr & BMCR_ANENABLE) ||
10500 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10501 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10502 BMCR_ANENABLE);
10503 r = 0;
10505 spin_unlock_bh(&tp->lock);
10508 return r;
10511 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10513 struct tg3 *tp = netdev_priv(dev);
10515 ering->rx_max_pending = tp->rx_std_ring_mask;
10516 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10517 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10518 else
10519 ering->rx_jumbo_max_pending = 0;
10521 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10523 ering->rx_pending = tp->rx_pending;
10524 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10525 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10526 else
10527 ering->rx_jumbo_pending = 0;
10529 ering->tx_pending = tp->napi[0].tx_pending;
10532 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10534 struct tg3 *tp = netdev_priv(dev);
10535 int i, irq_sync = 0, err = 0;
10537 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10538 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10539 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10540 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10541 (tg3_flag(tp, TSO_BUG) &&
10542 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10543 return -EINVAL;
10545 if (netif_running(dev)) {
10546 tg3_phy_stop(tp);
10547 tg3_netif_stop(tp);
10548 irq_sync = 1;
10551 tg3_full_lock(tp, irq_sync);
10553 tp->rx_pending = ering->rx_pending;
10555 if (tg3_flag(tp, MAX_RXPEND_64) &&
10556 tp->rx_pending > 63)
10557 tp->rx_pending = 63;
10558 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10560 for (i = 0; i < tp->irq_max; i++)
10561 tp->napi[i].tx_pending = ering->tx_pending;
10563 if (netif_running(dev)) {
10564 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10565 err = tg3_restart_hw(tp, 1);
10566 if (!err)
10567 tg3_netif_start(tp);
10570 tg3_full_unlock(tp);
10572 if (irq_sync && !err)
10573 tg3_phy_start(tp);
10575 return err;
10578 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10580 struct tg3 *tp = netdev_priv(dev);
10582 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10584 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10585 epause->rx_pause = 1;
10586 else
10587 epause->rx_pause = 0;
10589 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10590 epause->tx_pause = 1;
10591 else
10592 epause->tx_pause = 0;
10595 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10597 struct tg3 *tp = netdev_priv(dev);
10598 int err = 0;
10600 if (tg3_flag(tp, USE_PHYLIB)) {
10601 u32 newadv;
10602 struct phy_device *phydev;
10604 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10606 if (!(phydev->supported & SUPPORTED_Pause) ||
10607 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10608 (epause->rx_pause != epause->tx_pause)))
10609 return -EINVAL;
10611 tp->link_config.flowctrl = 0;
10612 if (epause->rx_pause) {
10613 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10615 if (epause->tx_pause) {
10616 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10617 newadv = ADVERTISED_Pause;
10618 } else
10619 newadv = ADVERTISED_Pause |
10620 ADVERTISED_Asym_Pause;
10621 } else if (epause->tx_pause) {
10622 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10623 newadv = ADVERTISED_Asym_Pause;
10624 } else
10625 newadv = 0;
10627 if (epause->autoneg)
10628 tg3_flag_set(tp, PAUSE_AUTONEG);
10629 else
10630 tg3_flag_clear(tp, PAUSE_AUTONEG);
10632 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10633 u32 oldadv = phydev->advertising &
10634 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10635 if (oldadv != newadv) {
10636 phydev->advertising &=
10637 ~(ADVERTISED_Pause |
10638 ADVERTISED_Asym_Pause);
10639 phydev->advertising |= newadv;
10640 if (phydev->autoneg) {
10642 * Always renegotiate the link to
10643 * inform our link partner of our
10644 * flow control settings, even if the
10645 * flow control is forced. Let
10646 * tg3_adjust_link() do the final
10647 * flow control setup.
10649 return phy_start_aneg(phydev);
10653 if (!epause->autoneg)
10654 tg3_setup_flow_control(tp, 0, 0);
10655 } else {
10656 tp->link_config.orig_advertising &=
10657 ~(ADVERTISED_Pause |
10658 ADVERTISED_Asym_Pause);
10659 tp->link_config.orig_advertising |= newadv;
10661 } else {
10662 int irq_sync = 0;
10664 if (netif_running(dev)) {
10665 tg3_netif_stop(tp);
10666 irq_sync = 1;
10669 tg3_full_lock(tp, irq_sync);
10671 if (epause->autoneg)
10672 tg3_flag_set(tp, PAUSE_AUTONEG);
10673 else
10674 tg3_flag_clear(tp, PAUSE_AUTONEG);
10675 if (epause->rx_pause)
10676 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10677 else
10678 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10679 if (epause->tx_pause)
10680 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10681 else
10682 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10684 if (netif_running(dev)) {
10685 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10686 err = tg3_restart_hw(tp, 1);
10687 if (!err)
10688 tg3_netif_start(tp);
10691 tg3_full_unlock(tp);
10694 return err;
10697 static int tg3_get_sset_count(struct net_device *dev, int sset)
10699 switch (sset) {
10700 case ETH_SS_TEST:
10701 return TG3_NUM_TEST;
10702 case ETH_SS_STATS:
10703 return TG3_NUM_STATS;
10704 default:
10705 return -EOPNOTSUPP;
10709 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10711 switch (stringset) {
10712 case ETH_SS_STATS:
10713 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10714 break;
10715 case ETH_SS_TEST:
10716 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10717 break;
10718 default:
10719 WARN_ON(1); /* we need a WARN() */
10720 break;
10724 static int tg3_set_phys_id(struct net_device *dev,
10725 enum ethtool_phys_id_state state)
10727 struct tg3 *tp = netdev_priv(dev);
10729 if (!netif_running(tp->dev))
10730 return -EAGAIN;
10732 switch (state) {
10733 case ETHTOOL_ID_ACTIVE:
10734 return 1; /* cycle on/off once per second */
10736 case ETHTOOL_ID_ON:
10737 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10738 LED_CTRL_1000MBPS_ON |
10739 LED_CTRL_100MBPS_ON |
10740 LED_CTRL_10MBPS_ON |
10741 LED_CTRL_TRAFFIC_OVERRIDE |
10742 LED_CTRL_TRAFFIC_BLINK |
10743 LED_CTRL_TRAFFIC_LED);
10744 break;
10746 case ETHTOOL_ID_OFF:
10747 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10748 LED_CTRL_TRAFFIC_OVERRIDE);
10749 break;
10751 case ETHTOOL_ID_INACTIVE:
10752 tw32(MAC_LED_CTRL, tp->led_ctrl);
10753 break;
10756 return 0;
10759 static void tg3_get_ethtool_stats(struct net_device *dev,
10760 struct ethtool_stats *estats, u64 *tmp_stats)
10762 struct tg3 *tp = netdev_priv(dev);
10763 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10766 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
10768 int i;
10769 __be32 *buf;
10770 u32 offset = 0, len = 0;
10771 u32 magic, val;
10773 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10774 return NULL;
10776 if (magic == TG3_EEPROM_MAGIC) {
10777 for (offset = TG3_NVM_DIR_START;
10778 offset < TG3_NVM_DIR_END;
10779 offset += TG3_NVM_DIRENT_SIZE) {
10780 if (tg3_nvram_read(tp, offset, &val))
10781 return NULL;
10783 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10784 TG3_NVM_DIRTYPE_EXTVPD)
10785 break;
10788 if (offset != TG3_NVM_DIR_END) {
10789 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10790 if (tg3_nvram_read(tp, offset + 4, &offset))
10791 return NULL;
10793 offset = tg3_nvram_logical_addr(tp, offset);
10797 if (!offset || !len) {
10798 offset = TG3_NVM_VPD_OFF;
10799 len = TG3_NVM_VPD_LEN;
10802 buf = kmalloc(len, GFP_KERNEL);
10803 if (buf == NULL)
10804 return NULL;
10806 if (magic == TG3_EEPROM_MAGIC) {
10807 for (i = 0; i < len; i += 4) {
10808 /* The data is in little-endian format in NVRAM.
10809 * Use the big-endian read routines to preserve
10810 * the byte order as it exists in NVRAM.
10812 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10813 goto error;
10815 } else {
10816 u8 *ptr;
10817 ssize_t cnt;
10818 unsigned int pos = 0;
10820 ptr = (u8 *)&buf[0];
10821 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10822 cnt = pci_read_vpd(tp->pdev, pos,
10823 len - pos, ptr);
10824 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10825 cnt = 0;
10826 else if (cnt < 0)
10827 goto error;
10829 if (pos != len)
10830 goto error;
10833 *vpdlen = len;
10835 return buf;
10837 error:
10838 kfree(buf);
10839 return NULL;
10842 #define NVRAM_TEST_SIZE 0x100
10843 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10844 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10845 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10846 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
10847 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
10848 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
10849 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10850 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10852 static int tg3_test_nvram(struct tg3 *tp)
10854 u32 csum, magic, len;
10855 __be32 *buf;
10856 int i, j, k, err = 0, size;
10858 if (tg3_flag(tp, NO_NVRAM))
10859 return 0;
10861 if (tg3_nvram_read(tp, 0, &magic) != 0)
10862 return -EIO;
10864 if (magic == TG3_EEPROM_MAGIC)
10865 size = NVRAM_TEST_SIZE;
10866 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10867 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10868 TG3_EEPROM_SB_FORMAT_1) {
10869 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10870 case TG3_EEPROM_SB_REVISION_0:
10871 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10872 break;
10873 case TG3_EEPROM_SB_REVISION_2:
10874 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10875 break;
10876 case TG3_EEPROM_SB_REVISION_3:
10877 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10878 break;
10879 case TG3_EEPROM_SB_REVISION_4:
10880 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10881 break;
10882 case TG3_EEPROM_SB_REVISION_5:
10883 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10884 break;
10885 case TG3_EEPROM_SB_REVISION_6:
10886 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10887 break;
10888 default:
10889 return -EIO;
10891 } else
10892 return 0;
10893 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10894 size = NVRAM_SELFBOOT_HW_SIZE;
10895 else
10896 return -EIO;
10898 buf = kmalloc(size, GFP_KERNEL);
10899 if (buf == NULL)
10900 return -ENOMEM;
10902 err = -EIO;
10903 for (i = 0, j = 0; i < size; i += 4, j++) {
10904 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10905 if (err)
10906 break;
10908 if (i < size)
10909 goto out;
10911 /* Selfboot format */
10912 magic = be32_to_cpu(buf[0]);
10913 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10914 TG3_EEPROM_MAGIC_FW) {
10915 u8 *buf8 = (u8 *) buf, csum8 = 0;
10917 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10918 TG3_EEPROM_SB_REVISION_2) {
10919 /* For rev 2, the csum doesn't include the MBA. */
10920 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10921 csum8 += buf8[i];
10922 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10923 csum8 += buf8[i];
10924 } else {
10925 for (i = 0; i < size; i++)
10926 csum8 += buf8[i];
10929 if (csum8 == 0) {
10930 err = 0;
10931 goto out;
10934 err = -EIO;
10935 goto out;
10938 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10939 TG3_EEPROM_MAGIC_HW) {
10940 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10941 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10942 u8 *buf8 = (u8 *) buf;
10944 /* Separate the parity bits and the data bytes. */
10945 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10946 if ((i == 0) || (i == 8)) {
10947 int l;
10948 u8 msk;
10950 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10951 parity[k++] = buf8[i] & msk;
10952 i++;
10953 } else if (i == 16) {
10954 int l;
10955 u8 msk;
10957 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10958 parity[k++] = buf8[i] & msk;
10959 i++;
10961 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10962 parity[k++] = buf8[i] & msk;
10963 i++;
10965 data[j++] = buf8[i];
10968 err = -EIO;
10969 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10970 u8 hw8 = hweight8(data[i]);
10972 if ((hw8 & 0x1) && parity[i])
10973 goto out;
10974 else if (!(hw8 & 0x1) && !parity[i])
10975 goto out;
10977 err = 0;
10978 goto out;
10981 err = -EIO;
10983 /* Bootstrap checksum at offset 0x10 */
10984 csum = calc_crc((unsigned char *) buf, 0x10);
10985 if (csum != le32_to_cpu(buf[0x10/4]))
10986 goto out;
10988 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10989 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10990 if (csum != le32_to_cpu(buf[0xfc/4]))
10991 goto out;
10993 kfree(buf);
10995 buf = tg3_vpd_readblock(tp, &len);
10996 if (!buf)
10997 return -ENOMEM;
10999 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11000 if (i > 0) {
11001 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11002 if (j < 0)
11003 goto out;
11005 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11006 goto out;
11008 i += PCI_VPD_LRDT_TAG_SIZE;
11009 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11010 PCI_VPD_RO_KEYWORD_CHKSUM);
11011 if (j > 0) {
11012 u8 csum8 = 0;
11014 j += PCI_VPD_INFO_FLD_HDR_SIZE;
11016 for (i = 0; i <= j; i++)
11017 csum8 += ((u8 *)buf)[i];
11019 if (csum8)
11020 goto out;
11024 err = 0;
11026 out:
11027 kfree(buf);
11028 return err;
11031 #define TG3_SERDES_TIMEOUT_SEC 2
11032 #define TG3_COPPER_TIMEOUT_SEC 6
11034 static int tg3_test_link(struct tg3 *tp)
11036 int i, max;
11038 if (!netif_running(tp->dev))
11039 return -ENODEV;
11041 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11042 max = TG3_SERDES_TIMEOUT_SEC;
11043 else
11044 max = TG3_COPPER_TIMEOUT_SEC;
11046 for (i = 0; i < max; i++) {
11047 if (netif_carrier_ok(tp->dev))
11048 return 0;
11050 if (msleep_interruptible(1000))
11051 break;
11054 return -EIO;
11057 /* Only test the commonly used registers */
11058 static int tg3_test_registers(struct tg3 *tp)
11060 int i, is_5705, is_5750;
11061 u32 offset, read_mask, write_mask, val, save_val, read_val;
11062 static struct {
11063 u16 offset;
11064 u16 flags;
11065 #define TG3_FL_5705 0x1
11066 #define TG3_FL_NOT_5705 0x2
11067 #define TG3_FL_NOT_5788 0x4
11068 #define TG3_FL_NOT_5750 0x8
11069 u32 read_mask;
11070 u32 write_mask;
11071 } reg_tbl[] = {
11072 /* MAC Control Registers */
11073 { MAC_MODE, TG3_FL_NOT_5705,
11074 0x00000000, 0x00ef6f8c },
11075 { MAC_MODE, TG3_FL_5705,
11076 0x00000000, 0x01ef6b8c },
11077 { MAC_STATUS, TG3_FL_NOT_5705,
11078 0x03800107, 0x00000000 },
11079 { MAC_STATUS, TG3_FL_5705,
11080 0x03800100, 0x00000000 },
11081 { MAC_ADDR_0_HIGH, 0x0000,
11082 0x00000000, 0x0000ffff },
11083 { MAC_ADDR_0_LOW, 0x0000,
11084 0x00000000, 0xffffffff },
11085 { MAC_RX_MTU_SIZE, 0x0000,
11086 0x00000000, 0x0000ffff },
11087 { MAC_TX_MODE, 0x0000,
11088 0x00000000, 0x00000070 },
11089 { MAC_TX_LENGTHS, 0x0000,
11090 0x00000000, 0x00003fff },
11091 { MAC_RX_MODE, TG3_FL_NOT_5705,
11092 0x00000000, 0x000007fc },
11093 { MAC_RX_MODE, TG3_FL_5705,
11094 0x00000000, 0x000007dc },
11095 { MAC_HASH_REG_0, 0x0000,
11096 0x00000000, 0xffffffff },
11097 { MAC_HASH_REG_1, 0x0000,
11098 0x00000000, 0xffffffff },
11099 { MAC_HASH_REG_2, 0x0000,
11100 0x00000000, 0xffffffff },
11101 { MAC_HASH_REG_3, 0x0000,
11102 0x00000000, 0xffffffff },
11104 /* Receive Data and Receive BD Initiator Control Registers. */
11105 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11106 0x00000000, 0xffffffff },
11107 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11108 0x00000000, 0xffffffff },
11109 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11110 0x00000000, 0x00000003 },
11111 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11112 0x00000000, 0xffffffff },
11113 { RCVDBDI_STD_BD+0, 0x0000,
11114 0x00000000, 0xffffffff },
11115 { RCVDBDI_STD_BD+4, 0x0000,
11116 0x00000000, 0xffffffff },
11117 { RCVDBDI_STD_BD+8, 0x0000,
11118 0x00000000, 0xffff0002 },
11119 { RCVDBDI_STD_BD+0xc, 0x0000,
11120 0x00000000, 0xffffffff },
11122 /* Receive BD Initiator Control Registers. */
11123 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11124 0x00000000, 0xffffffff },
11125 { RCVBDI_STD_THRESH, TG3_FL_5705,
11126 0x00000000, 0x000003ff },
11127 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11128 0x00000000, 0xffffffff },
11130 /* Host Coalescing Control Registers. */
11131 { HOSTCC_MODE, TG3_FL_NOT_5705,
11132 0x00000000, 0x00000004 },
11133 { HOSTCC_MODE, TG3_FL_5705,
11134 0x00000000, 0x000000f6 },
11135 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11136 0x00000000, 0xffffffff },
11137 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11138 0x00000000, 0x000003ff },
11139 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11140 0x00000000, 0xffffffff },
11141 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11142 0x00000000, 0x000003ff },
11143 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11144 0x00000000, 0xffffffff },
11145 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11146 0x00000000, 0x000000ff },
11147 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11148 0x00000000, 0xffffffff },
11149 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11150 0x00000000, 0x000000ff },
11151 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11152 0x00000000, 0xffffffff },
11153 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11154 0x00000000, 0xffffffff },
11155 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11156 0x00000000, 0xffffffff },
11157 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11158 0x00000000, 0x000000ff },
11159 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11160 0x00000000, 0xffffffff },
11161 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11162 0x00000000, 0x000000ff },
11163 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11164 0x00000000, 0xffffffff },
11165 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11166 0x00000000, 0xffffffff },
11167 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11168 0x00000000, 0xffffffff },
11169 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11170 0x00000000, 0xffffffff },
11171 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11172 0x00000000, 0xffffffff },
11173 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11174 0xffffffff, 0x00000000 },
11175 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11176 0xffffffff, 0x00000000 },
11178 /* Buffer Manager Control Registers. */
11179 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11180 0x00000000, 0x007fff80 },
11181 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11182 0x00000000, 0x007fffff },
11183 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11184 0x00000000, 0x0000003f },
11185 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11186 0x00000000, 0x000001ff },
11187 { BUFMGR_MB_HIGH_WATER, 0x0000,
11188 0x00000000, 0x000001ff },
11189 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11190 0xffffffff, 0x00000000 },
11191 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11192 0xffffffff, 0x00000000 },
11194 /* Mailbox Registers */
11195 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11196 0x00000000, 0x000001ff },
11197 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11198 0x00000000, 0x000001ff },
11199 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11200 0x00000000, 0x000007ff },
11201 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11202 0x00000000, 0x000001ff },
11204 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11207 is_5705 = is_5750 = 0;
11208 if (tg3_flag(tp, 5705_PLUS)) {
11209 is_5705 = 1;
11210 if (tg3_flag(tp, 5750_PLUS))
11211 is_5750 = 1;
11214 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11215 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11216 continue;
11218 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11219 continue;
11221 if (tg3_flag(tp, IS_5788) &&
11222 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11223 continue;
11225 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11226 continue;
11228 offset = (u32) reg_tbl[i].offset;
11229 read_mask = reg_tbl[i].read_mask;
11230 write_mask = reg_tbl[i].write_mask;
11232 /* Save the original register content */
11233 save_val = tr32(offset);
11235 /* Determine the read-only value. */
11236 read_val = save_val & read_mask;
11238 /* Write zero to the register, then make sure the read-only bits
11239 * are not changed and the read/write bits are all zeros.
11241 tw32(offset, 0);
11243 val = tr32(offset);
11245 /* Test the read-only and read/write bits. */
11246 if (((val & read_mask) != read_val) || (val & write_mask))
11247 goto out;
11249 /* Write ones to all the bits defined by RdMask and WrMask, then
11250 * make sure the read-only bits are not changed and the
11251 * read/write bits are all ones.
11253 tw32(offset, read_mask | write_mask);
11255 val = tr32(offset);
11257 /* Test the read-only bits. */
11258 if ((val & read_mask) != read_val)
11259 goto out;
11261 /* Test the read/write bits. */
11262 if ((val & write_mask) != write_mask)
11263 goto out;
11265 tw32(offset, save_val);
11268 return 0;
11270 out:
11271 if (netif_msg_hw(tp))
11272 netdev_err(tp->dev,
11273 "Register test failed at offset %x\n", offset);
11274 tw32(offset, save_val);
11275 return -EIO;
11278 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11280 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11281 int i;
11282 u32 j;
11284 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11285 for (j = 0; j < len; j += 4) {
11286 u32 val;
11288 tg3_write_mem(tp, offset + j, test_pattern[i]);
11289 tg3_read_mem(tp, offset + j, &val);
11290 if (val != test_pattern[i])
11291 return -EIO;
11294 return 0;
11297 static int tg3_test_memory(struct tg3 *tp)
11299 static struct mem_entry {
11300 u32 offset;
11301 u32 len;
11302 } mem_tbl_570x[] = {
11303 { 0x00000000, 0x00b50},
11304 { 0x00002000, 0x1c000},
11305 { 0xffffffff, 0x00000}
11306 }, mem_tbl_5705[] = {
11307 { 0x00000100, 0x0000c},
11308 { 0x00000200, 0x00008},
11309 { 0x00004000, 0x00800},
11310 { 0x00006000, 0x01000},
11311 { 0x00008000, 0x02000},
11312 { 0x00010000, 0x0e000},
11313 { 0xffffffff, 0x00000}
11314 }, mem_tbl_5755[] = {
11315 { 0x00000200, 0x00008},
11316 { 0x00004000, 0x00800},
11317 { 0x00006000, 0x00800},
11318 { 0x00008000, 0x02000},
11319 { 0x00010000, 0x0c000},
11320 { 0xffffffff, 0x00000}
11321 }, mem_tbl_5906[] = {
11322 { 0x00000200, 0x00008},
11323 { 0x00004000, 0x00400},
11324 { 0x00006000, 0x00400},
11325 { 0x00008000, 0x01000},
11326 { 0x00010000, 0x01000},
11327 { 0xffffffff, 0x00000}
11328 }, mem_tbl_5717[] = {
11329 { 0x00000200, 0x00008},
11330 { 0x00010000, 0x0a000},
11331 { 0x00020000, 0x13c00},
11332 { 0xffffffff, 0x00000}
11333 }, mem_tbl_57765[] = {
11334 { 0x00000200, 0x00008},
11335 { 0x00004000, 0x00800},
11336 { 0x00006000, 0x09800},
11337 { 0x00010000, 0x0a000},
11338 { 0xffffffff, 0x00000}
11340 struct mem_entry *mem_tbl;
11341 int err = 0;
11342 int i;
11344 if (tg3_flag(tp, 5717_PLUS))
11345 mem_tbl = mem_tbl_5717;
11346 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11347 mem_tbl = mem_tbl_57765;
11348 else if (tg3_flag(tp, 5755_PLUS))
11349 mem_tbl = mem_tbl_5755;
11350 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11351 mem_tbl = mem_tbl_5906;
11352 else if (tg3_flag(tp, 5705_PLUS))
11353 mem_tbl = mem_tbl_5705;
11354 else
11355 mem_tbl = mem_tbl_570x;
11357 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11358 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11359 if (err)
11360 break;
11363 return err;
11366 #define TG3_TSO_MSS 500
11368 #define TG3_TSO_IP_HDR_LEN 20
11369 #define TG3_TSO_TCP_HDR_LEN 20
11370 #define TG3_TSO_TCP_OPT_LEN 12
11372 static const u8 tg3_tso_header[] = {
11373 0x08, 0x00,
11374 0x45, 0x00, 0x00, 0x00,
11375 0x00, 0x00, 0x40, 0x00,
11376 0x40, 0x06, 0x00, 0x00,
11377 0x0a, 0x00, 0x00, 0x01,
11378 0x0a, 0x00, 0x00, 0x02,
11379 0x0d, 0x00, 0xe0, 0x00,
11380 0x00, 0x00, 0x01, 0x00,
11381 0x00, 0x00, 0x02, 0x00,
11382 0x80, 0x10, 0x10, 0x00,
11383 0x14, 0x09, 0x00, 0x00,
11384 0x01, 0x01, 0x08, 0x0a,
11385 0x11, 0x11, 0x11, 0x11,
11386 0x11, 0x11, 0x11, 0x11,
11389 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11391 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11392 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11393 u32 budget;
11394 struct sk_buff *skb, *rx_skb;
11395 u8 *tx_data;
11396 dma_addr_t map;
11397 int num_pkts, tx_len, rx_len, i, err;
11398 struct tg3_rx_buffer_desc *desc;
11399 struct tg3_napi *tnapi, *rnapi;
11400 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11402 tnapi = &tp->napi[0];
11403 rnapi = &tp->napi[0];
11404 if (tp->irq_cnt > 1) {
11405 if (tg3_flag(tp, ENABLE_RSS))
11406 rnapi = &tp->napi[1];
11407 if (tg3_flag(tp, ENABLE_TSS))
11408 tnapi = &tp->napi[1];
11410 coal_now = tnapi->coal_now | rnapi->coal_now;
11412 err = -EIO;
11414 tx_len = pktsz;
11415 skb = netdev_alloc_skb(tp->dev, tx_len);
11416 if (!skb)
11417 return -ENOMEM;
11419 tx_data = skb_put(skb, tx_len);
11420 memcpy(tx_data, tp->dev->dev_addr, 6);
11421 memset(tx_data + 6, 0x0, 8);
11423 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11425 if (tso_loopback) {
11426 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11428 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11429 TG3_TSO_TCP_OPT_LEN;
11431 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11432 sizeof(tg3_tso_header));
11433 mss = TG3_TSO_MSS;
11435 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11436 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11438 /* Set the total length field in the IP header */
11439 iph->tot_len = htons((u16)(mss + hdr_len));
11441 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11442 TXD_FLAG_CPU_POST_DMA);
11444 if (tg3_flag(tp, HW_TSO_1) ||
11445 tg3_flag(tp, HW_TSO_2) ||
11446 tg3_flag(tp, HW_TSO_3)) {
11447 struct tcphdr *th;
11448 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11449 th = (struct tcphdr *)&tx_data[val];
11450 th->check = 0;
11451 } else
11452 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11454 if (tg3_flag(tp, HW_TSO_3)) {
11455 mss |= (hdr_len & 0xc) << 12;
11456 if (hdr_len & 0x10)
11457 base_flags |= 0x00000010;
11458 base_flags |= (hdr_len & 0x3e0) << 5;
11459 } else if (tg3_flag(tp, HW_TSO_2))
11460 mss |= hdr_len << 9;
11461 else if (tg3_flag(tp, HW_TSO_1) ||
11462 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11463 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11464 } else {
11465 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11468 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11469 } else {
11470 num_pkts = 1;
11471 data_off = ETH_HLEN;
11474 for (i = data_off; i < tx_len; i++)
11475 tx_data[i] = (u8) (i & 0xff);
11477 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11478 if (pci_dma_mapping_error(tp->pdev, map)) {
11479 dev_kfree_skb(skb);
11480 return -EIO;
11483 val = tnapi->tx_prod;
11484 tnapi->tx_buffers[val].skb = skb;
11485 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11487 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11488 rnapi->coal_now);
11490 udelay(10);
11492 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11494 budget = tg3_tx_avail(tnapi);
11495 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11496 base_flags | TXD_FLAG_END, mss, 0)) {
11497 tnapi->tx_buffers[val].skb = NULL;
11498 dev_kfree_skb(skb);
11499 return -EIO;
11502 tnapi->tx_prod++;
11504 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11505 tr32_mailbox(tnapi->prodmbox);
11507 udelay(10);
11509 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11510 for (i = 0; i < 35; i++) {
11511 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11512 coal_now);
11514 udelay(10);
11516 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11517 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11518 if ((tx_idx == tnapi->tx_prod) &&
11519 (rx_idx == (rx_start_idx + num_pkts)))
11520 break;
11523 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0);
11524 dev_kfree_skb(skb);
11526 if (tx_idx != tnapi->tx_prod)
11527 goto out;
11529 if (rx_idx != rx_start_idx + num_pkts)
11530 goto out;
11532 val = data_off;
11533 while (rx_idx != rx_start_idx) {
11534 desc = &rnapi->rx_rcb[rx_start_idx++];
11535 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11536 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11538 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11539 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11540 goto out;
11542 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11543 - ETH_FCS_LEN;
11545 if (!tso_loopback) {
11546 if (rx_len != tx_len)
11547 goto out;
11549 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11550 if (opaque_key != RXD_OPAQUE_RING_STD)
11551 goto out;
11552 } else {
11553 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11554 goto out;
11556 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11557 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11558 >> RXD_TCPCSUM_SHIFT != 0xffff) {
11559 goto out;
11562 if (opaque_key == RXD_OPAQUE_RING_STD) {
11563 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11564 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11565 mapping);
11566 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11567 rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11568 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11569 mapping);
11570 } else
11571 goto out;
11573 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11574 PCI_DMA_FROMDEVICE);
11576 for (i = data_off; i < rx_len; i++, val++) {
11577 if (*(rx_skb->data + i) != (u8) (val & 0xff))
11578 goto out;
11582 err = 0;
11584 /* tg3_free_rings will unmap and free the rx_skb */
11585 out:
11586 return err;
11589 #define TG3_STD_LOOPBACK_FAILED 1
11590 #define TG3_JMB_LOOPBACK_FAILED 2
11591 #define TG3_TSO_LOOPBACK_FAILED 4
11592 #define TG3_LOOPBACK_FAILED \
11593 (TG3_STD_LOOPBACK_FAILED | \
11594 TG3_JMB_LOOPBACK_FAILED | \
11595 TG3_TSO_LOOPBACK_FAILED)
11597 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11599 int err = -EIO;
11600 u32 eee_cap;
11602 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11603 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11605 if (!netif_running(tp->dev)) {
11606 data[0] = TG3_LOOPBACK_FAILED;
11607 data[1] = TG3_LOOPBACK_FAILED;
11608 if (do_extlpbk)
11609 data[2] = TG3_LOOPBACK_FAILED;
11610 goto done;
11613 err = tg3_reset_hw(tp, 1);
11614 if (err) {
11615 data[0] = TG3_LOOPBACK_FAILED;
11616 data[1] = TG3_LOOPBACK_FAILED;
11617 if (do_extlpbk)
11618 data[2] = TG3_LOOPBACK_FAILED;
11619 goto done;
11622 if (tg3_flag(tp, ENABLE_RSS)) {
11623 int i;
11625 /* Reroute all rx packets to the 1st queue */
11626 for (i = MAC_RSS_INDIR_TBL_0;
11627 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11628 tw32(i, 0x0);
11631 /* HW errata - mac loopback fails in some cases on 5780.
11632 * Normal traffic and PHY loopback are not affected by
11633 * errata. Also, the MAC loopback test is deprecated for
11634 * all newer ASIC revisions.
11636 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11637 !tg3_flag(tp, CPMU_PRESENT)) {
11638 tg3_mac_loopback(tp, true);
11640 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11641 data[0] |= TG3_STD_LOOPBACK_FAILED;
11643 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11644 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11645 data[0] |= TG3_JMB_LOOPBACK_FAILED;
11647 tg3_mac_loopback(tp, false);
11650 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11651 !tg3_flag(tp, USE_PHYLIB)) {
11652 int i;
11654 tg3_phy_lpbk_set(tp, 0, false);
11656 /* Wait for link */
11657 for (i = 0; i < 100; i++) {
11658 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11659 break;
11660 mdelay(1);
11663 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11664 data[1] |= TG3_STD_LOOPBACK_FAILED;
11665 if (tg3_flag(tp, TSO_CAPABLE) &&
11666 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11667 data[1] |= TG3_TSO_LOOPBACK_FAILED;
11668 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11669 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11670 data[1] |= TG3_JMB_LOOPBACK_FAILED;
11672 if (do_extlpbk) {
11673 tg3_phy_lpbk_set(tp, 0, true);
11675 /* All link indications report up, but the hardware
11676 * isn't really ready for about 20 msec. Double it
11677 * to be sure.
11679 mdelay(40);
11681 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11682 data[2] |= TG3_STD_LOOPBACK_FAILED;
11683 if (tg3_flag(tp, TSO_CAPABLE) &&
11684 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11685 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11686 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11687 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11688 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11691 /* Re-enable gphy autopowerdown. */
11692 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11693 tg3_phy_toggle_apd(tp, true);
11696 err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11698 done:
11699 tp->phy_flags |= eee_cap;
11701 return err;
11704 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11705 u64 *data)
11707 struct tg3 *tp = netdev_priv(dev);
11708 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11710 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11711 tg3_power_up(tp)) {
11712 etest->flags |= ETH_TEST_FL_FAILED;
11713 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11714 return;
11717 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11719 if (tg3_test_nvram(tp) != 0) {
11720 etest->flags |= ETH_TEST_FL_FAILED;
11721 data[0] = 1;
11723 if (!doextlpbk && tg3_test_link(tp)) {
11724 etest->flags |= ETH_TEST_FL_FAILED;
11725 data[1] = 1;
11727 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11728 int err, err2 = 0, irq_sync = 0;
11730 if (netif_running(dev)) {
11731 tg3_phy_stop(tp);
11732 tg3_netif_stop(tp);
11733 irq_sync = 1;
11736 tg3_full_lock(tp, irq_sync);
11738 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11739 err = tg3_nvram_lock(tp);
11740 tg3_halt_cpu(tp, RX_CPU_BASE);
11741 if (!tg3_flag(tp, 5705_PLUS))
11742 tg3_halt_cpu(tp, TX_CPU_BASE);
11743 if (!err)
11744 tg3_nvram_unlock(tp);
11746 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11747 tg3_phy_reset(tp);
11749 if (tg3_test_registers(tp) != 0) {
11750 etest->flags |= ETH_TEST_FL_FAILED;
11751 data[2] = 1;
11754 if (tg3_test_memory(tp) != 0) {
11755 etest->flags |= ETH_TEST_FL_FAILED;
11756 data[3] = 1;
11759 if (doextlpbk)
11760 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
11762 if (tg3_test_loopback(tp, &data[4], doextlpbk))
11763 etest->flags |= ETH_TEST_FL_FAILED;
11765 tg3_full_unlock(tp);
11767 if (tg3_test_interrupt(tp) != 0) {
11768 etest->flags |= ETH_TEST_FL_FAILED;
11769 data[7] = 1;
11772 tg3_full_lock(tp, 0);
11774 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11775 if (netif_running(dev)) {
11776 tg3_flag_set(tp, INIT_COMPLETE);
11777 err2 = tg3_restart_hw(tp, 1);
11778 if (!err2)
11779 tg3_netif_start(tp);
11782 tg3_full_unlock(tp);
11784 if (irq_sync && !err2)
11785 tg3_phy_start(tp);
11787 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11788 tg3_power_down(tp);
11792 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11794 struct mii_ioctl_data *data = if_mii(ifr);
11795 struct tg3 *tp = netdev_priv(dev);
11796 int err;
11798 if (tg3_flag(tp, USE_PHYLIB)) {
11799 struct phy_device *phydev;
11800 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11801 return -EAGAIN;
11802 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11803 return phy_mii_ioctl(phydev, ifr, cmd);
11806 switch (cmd) {
11807 case SIOCGMIIPHY:
11808 data->phy_id = tp->phy_addr;
11810 /* fallthru */
11811 case SIOCGMIIREG: {
11812 u32 mii_regval;
11814 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11815 break; /* We have no PHY */
11817 if (!netif_running(dev))
11818 return -EAGAIN;
11820 spin_lock_bh(&tp->lock);
11821 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11822 spin_unlock_bh(&tp->lock);
11824 data->val_out = mii_regval;
11826 return err;
11829 case SIOCSMIIREG:
11830 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11831 break; /* We have no PHY */
11833 if (!netif_running(dev))
11834 return -EAGAIN;
11836 spin_lock_bh(&tp->lock);
11837 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11838 spin_unlock_bh(&tp->lock);
11840 return err;
11842 default:
11843 /* do nothing */
11844 break;
11846 return -EOPNOTSUPP;
11849 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11851 struct tg3 *tp = netdev_priv(dev);
11853 memcpy(ec, &tp->coal, sizeof(*ec));
11854 return 0;
11857 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11859 struct tg3 *tp = netdev_priv(dev);
11860 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11861 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11863 if (!tg3_flag(tp, 5705_PLUS)) {
11864 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11865 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11866 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11867 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11870 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11871 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11872 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11873 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11874 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11875 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11876 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11877 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11878 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11879 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11880 return -EINVAL;
11882 /* No rx interrupts will be generated if both are zero */
11883 if ((ec->rx_coalesce_usecs == 0) &&
11884 (ec->rx_max_coalesced_frames == 0))
11885 return -EINVAL;
11887 /* No tx interrupts will be generated if both are zero */
11888 if ((ec->tx_coalesce_usecs == 0) &&
11889 (ec->tx_max_coalesced_frames == 0))
11890 return -EINVAL;
11892 /* Only copy relevant parameters, ignore all others. */
11893 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11894 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11895 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11896 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11897 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11898 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11899 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11900 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11901 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11903 if (netif_running(dev)) {
11904 tg3_full_lock(tp, 0);
11905 __tg3_set_coalesce(tp, &tp->coal);
11906 tg3_full_unlock(tp);
11908 return 0;
11911 static const struct ethtool_ops tg3_ethtool_ops = {
11912 .get_settings = tg3_get_settings,
11913 .set_settings = tg3_set_settings,
11914 .get_drvinfo = tg3_get_drvinfo,
11915 .get_regs_len = tg3_get_regs_len,
11916 .get_regs = tg3_get_regs,
11917 .get_wol = tg3_get_wol,
11918 .set_wol = tg3_set_wol,
11919 .get_msglevel = tg3_get_msglevel,
11920 .set_msglevel = tg3_set_msglevel,
11921 .nway_reset = tg3_nway_reset,
11922 .get_link = ethtool_op_get_link,
11923 .get_eeprom_len = tg3_get_eeprom_len,
11924 .get_eeprom = tg3_get_eeprom,
11925 .set_eeprom = tg3_set_eeprom,
11926 .get_ringparam = tg3_get_ringparam,
11927 .set_ringparam = tg3_set_ringparam,
11928 .get_pauseparam = tg3_get_pauseparam,
11929 .set_pauseparam = tg3_set_pauseparam,
11930 .self_test = tg3_self_test,
11931 .get_strings = tg3_get_strings,
11932 .set_phys_id = tg3_set_phys_id,
11933 .get_ethtool_stats = tg3_get_ethtool_stats,
11934 .get_coalesce = tg3_get_coalesce,
11935 .set_coalesce = tg3_set_coalesce,
11936 .get_sset_count = tg3_get_sset_count,
11939 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11941 u32 cursize, val, magic;
11943 tp->nvram_size = EEPROM_CHIP_SIZE;
11945 if (tg3_nvram_read(tp, 0, &magic) != 0)
11946 return;
11948 if ((magic != TG3_EEPROM_MAGIC) &&
11949 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11950 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11951 return;
11954 * Size the chip by reading offsets at increasing powers of two.
11955 * When we encounter our validation signature, we know the addressing
11956 * has wrapped around, and thus have our chip size.
11958 cursize = 0x10;
11960 while (cursize < tp->nvram_size) {
11961 if (tg3_nvram_read(tp, cursize, &val) != 0)
11962 return;
11964 if (val == magic)
11965 break;
11967 cursize <<= 1;
11970 tp->nvram_size = cursize;
11973 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11975 u32 val;
11977 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11978 return;
11980 /* Selfboot format */
11981 if (val != TG3_EEPROM_MAGIC) {
11982 tg3_get_eeprom_size(tp);
11983 return;
11986 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11987 if (val != 0) {
11988 /* This is confusing. We want to operate on the
11989 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11990 * call will read from NVRAM and byteswap the data
11991 * according to the byteswapping settings for all
11992 * other register accesses. This ensures the data we
11993 * want will always reside in the lower 16-bits.
11994 * However, the data in NVRAM is in LE format, which
11995 * means the data from the NVRAM read will always be
11996 * opposite the endianness of the CPU. The 16-bit
11997 * byteswap then brings the data to CPU endianness.
11999 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12000 return;
12003 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12006 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12008 u32 nvcfg1;
12010 nvcfg1 = tr32(NVRAM_CFG1);
12011 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12012 tg3_flag_set(tp, FLASH);
12013 } else {
12014 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12015 tw32(NVRAM_CFG1, nvcfg1);
12018 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12019 tg3_flag(tp, 5780_CLASS)) {
12020 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12021 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12022 tp->nvram_jedecnum = JEDEC_ATMEL;
12023 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12024 tg3_flag_set(tp, NVRAM_BUFFERED);
12025 break;
12026 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12027 tp->nvram_jedecnum = JEDEC_ATMEL;
12028 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12029 break;
12030 case FLASH_VENDOR_ATMEL_EEPROM:
12031 tp->nvram_jedecnum = JEDEC_ATMEL;
12032 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12033 tg3_flag_set(tp, NVRAM_BUFFERED);
12034 break;
12035 case FLASH_VENDOR_ST:
12036 tp->nvram_jedecnum = JEDEC_ST;
12037 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12038 tg3_flag_set(tp, NVRAM_BUFFERED);
12039 break;
12040 case FLASH_VENDOR_SAIFUN:
12041 tp->nvram_jedecnum = JEDEC_SAIFUN;
12042 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12043 break;
12044 case FLASH_VENDOR_SST_SMALL:
12045 case FLASH_VENDOR_SST_LARGE:
12046 tp->nvram_jedecnum = JEDEC_SST;
12047 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12048 break;
12050 } else {
12051 tp->nvram_jedecnum = JEDEC_ATMEL;
12052 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12053 tg3_flag_set(tp, NVRAM_BUFFERED);
12057 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12059 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12060 case FLASH_5752PAGE_SIZE_256:
12061 tp->nvram_pagesize = 256;
12062 break;
12063 case FLASH_5752PAGE_SIZE_512:
12064 tp->nvram_pagesize = 512;
12065 break;
12066 case FLASH_5752PAGE_SIZE_1K:
12067 tp->nvram_pagesize = 1024;
12068 break;
12069 case FLASH_5752PAGE_SIZE_2K:
12070 tp->nvram_pagesize = 2048;
12071 break;
12072 case FLASH_5752PAGE_SIZE_4K:
12073 tp->nvram_pagesize = 4096;
12074 break;
12075 case FLASH_5752PAGE_SIZE_264:
12076 tp->nvram_pagesize = 264;
12077 break;
12078 case FLASH_5752PAGE_SIZE_528:
12079 tp->nvram_pagesize = 528;
12080 break;
12084 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12086 u32 nvcfg1;
12088 nvcfg1 = tr32(NVRAM_CFG1);
12090 /* NVRAM protection for TPM */
12091 if (nvcfg1 & (1 << 27))
12092 tg3_flag_set(tp, PROTECTED_NVRAM);
12094 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12095 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12096 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12097 tp->nvram_jedecnum = JEDEC_ATMEL;
12098 tg3_flag_set(tp, NVRAM_BUFFERED);
12099 break;
12100 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12101 tp->nvram_jedecnum = JEDEC_ATMEL;
12102 tg3_flag_set(tp, NVRAM_BUFFERED);
12103 tg3_flag_set(tp, FLASH);
12104 break;
12105 case FLASH_5752VENDOR_ST_M45PE10:
12106 case FLASH_5752VENDOR_ST_M45PE20:
12107 case FLASH_5752VENDOR_ST_M45PE40:
12108 tp->nvram_jedecnum = JEDEC_ST;
12109 tg3_flag_set(tp, NVRAM_BUFFERED);
12110 tg3_flag_set(tp, FLASH);
12111 break;
12114 if (tg3_flag(tp, FLASH)) {
12115 tg3_nvram_get_pagesize(tp, nvcfg1);
12116 } else {
12117 /* For eeprom, set pagesize to maximum eeprom size */
12118 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12120 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12121 tw32(NVRAM_CFG1, nvcfg1);
12125 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12127 u32 nvcfg1, protect = 0;
12129 nvcfg1 = tr32(NVRAM_CFG1);
12131 /* NVRAM protection for TPM */
12132 if (nvcfg1 & (1 << 27)) {
12133 tg3_flag_set(tp, PROTECTED_NVRAM);
12134 protect = 1;
12137 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12138 switch (nvcfg1) {
12139 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12140 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12141 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12142 case FLASH_5755VENDOR_ATMEL_FLASH_5:
12143 tp->nvram_jedecnum = JEDEC_ATMEL;
12144 tg3_flag_set(tp, NVRAM_BUFFERED);
12145 tg3_flag_set(tp, FLASH);
12146 tp->nvram_pagesize = 264;
12147 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12148 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12149 tp->nvram_size = (protect ? 0x3e200 :
12150 TG3_NVRAM_SIZE_512KB);
12151 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12152 tp->nvram_size = (protect ? 0x1f200 :
12153 TG3_NVRAM_SIZE_256KB);
12154 else
12155 tp->nvram_size = (protect ? 0x1f200 :
12156 TG3_NVRAM_SIZE_128KB);
12157 break;
12158 case FLASH_5752VENDOR_ST_M45PE10:
12159 case FLASH_5752VENDOR_ST_M45PE20:
12160 case FLASH_5752VENDOR_ST_M45PE40:
12161 tp->nvram_jedecnum = JEDEC_ST;
12162 tg3_flag_set(tp, NVRAM_BUFFERED);
12163 tg3_flag_set(tp, FLASH);
12164 tp->nvram_pagesize = 256;
12165 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12166 tp->nvram_size = (protect ?
12167 TG3_NVRAM_SIZE_64KB :
12168 TG3_NVRAM_SIZE_128KB);
12169 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12170 tp->nvram_size = (protect ?
12171 TG3_NVRAM_SIZE_64KB :
12172 TG3_NVRAM_SIZE_256KB);
12173 else
12174 tp->nvram_size = (protect ?
12175 TG3_NVRAM_SIZE_128KB :
12176 TG3_NVRAM_SIZE_512KB);
12177 break;
12181 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12183 u32 nvcfg1;
12185 nvcfg1 = tr32(NVRAM_CFG1);
12187 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12188 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12189 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12190 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12191 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12192 tp->nvram_jedecnum = JEDEC_ATMEL;
12193 tg3_flag_set(tp, NVRAM_BUFFERED);
12194 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12196 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12197 tw32(NVRAM_CFG1, nvcfg1);
12198 break;
12199 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12200 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12201 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12202 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12203 tp->nvram_jedecnum = JEDEC_ATMEL;
12204 tg3_flag_set(tp, NVRAM_BUFFERED);
12205 tg3_flag_set(tp, FLASH);
12206 tp->nvram_pagesize = 264;
12207 break;
12208 case FLASH_5752VENDOR_ST_M45PE10:
12209 case FLASH_5752VENDOR_ST_M45PE20:
12210 case FLASH_5752VENDOR_ST_M45PE40:
12211 tp->nvram_jedecnum = JEDEC_ST;
12212 tg3_flag_set(tp, NVRAM_BUFFERED);
12213 tg3_flag_set(tp, FLASH);
12214 tp->nvram_pagesize = 256;
12215 break;
12219 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12221 u32 nvcfg1, protect = 0;
12223 nvcfg1 = tr32(NVRAM_CFG1);
12225 /* NVRAM protection for TPM */
12226 if (nvcfg1 & (1 << 27)) {
12227 tg3_flag_set(tp, PROTECTED_NVRAM);
12228 protect = 1;
12231 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12232 switch (nvcfg1) {
12233 case FLASH_5761VENDOR_ATMEL_ADB021D:
12234 case FLASH_5761VENDOR_ATMEL_ADB041D:
12235 case FLASH_5761VENDOR_ATMEL_ADB081D:
12236 case FLASH_5761VENDOR_ATMEL_ADB161D:
12237 case FLASH_5761VENDOR_ATMEL_MDB021D:
12238 case FLASH_5761VENDOR_ATMEL_MDB041D:
12239 case FLASH_5761VENDOR_ATMEL_MDB081D:
12240 case FLASH_5761VENDOR_ATMEL_MDB161D:
12241 tp->nvram_jedecnum = JEDEC_ATMEL;
12242 tg3_flag_set(tp, NVRAM_BUFFERED);
12243 tg3_flag_set(tp, FLASH);
12244 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12245 tp->nvram_pagesize = 256;
12246 break;
12247 case FLASH_5761VENDOR_ST_A_M45PE20:
12248 case FLASH_5761VENDOR_ST_A_M45PE40:
12249 case FLASH_5761VENDOR_ST_A_M45PE80:
12250 case FLASH_5761VENDOR_ST_A_M45PE16:
12251 case FLASH_5761VENDOR_ST_M_M45PE20:
12252 case FLASH_5761VENDOR_ST_M_M45PE40:
12253 case FLASH_5761VENDOR_ST_M_M45PE80:
12254 case FLASH_5761VENDOR_ST_M_M45PE16:
12255 tp->nvram_jedecnum = JEDEC_ST;
12256 tg3_flag_set(tp, NVRAM_BUFFERED);
12257 tg3_flag_set(tp, FLASH);
12258 tp->nvram_pagesize = 256;
12259 break;
12262 if (protect) {
12263 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12264 } else {
12265 switch (nvcfg1) {
12266 case FLASH_5761VENDOR_ATMEL_ADB161D:
12267 case FLASH_5761VENDOR_ATMEL_MDB161D:
12268 case FLASH_5761VENDOR_ST_A_M45PE16:
12269 case FLASH_5761VENDOR_ST_M_M45PE16:
12270 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12271 break;
12272 case FLASH_5761VENDOR_ATMEL_ADB081D:
12273 case FLASH_5761VENDOR_ATMEL_MDB081D:
12274 case FLASH_5761VENDOR_ST_A_M45PE80:
12275 case FLASH_5761VENDOR_ST_M_M45PE80:
12276 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12277 break;
12278 case FLASH_5761VENDOR_ATMEL_ADB041D:
12279 case FLASH_5761VENDOR_ATMEL_MDB041D:
12280 case FLASH_5761VENDOR_ST_A_M45PE40:
12281 case FLASH_5761VENDOR_ST_M_M45PE40:
12282 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12283 break;
12284 case FLASH_5761VENDOR_ATMEL_ADB021D:
12285 case FLASH_5761VENDOR_ATMEL_MDB021D:
12286 case FLASH_5761VENDOR_ST_A_M45PE20:
12287 case FLASH_5761VENDOR_ST_M_M45PE20:
12288 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12289 break;
12294 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12296 tp->nvram_jedecnum = JEDEC_ATMEL;
12297 tg3_flag_set(tp, NVRAM_BUFFERED);
12298 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12301 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12303 u32 nvcfg1;
12305 nvcfg1 = tr32(NVRAM_CFG1);
12307 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12308 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12309 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12310 tp->nvram_jedecnum = JEDEC_ATMEL;
12311 tg3_flag_set(tp, NVRAM_BUFFERED);
12312 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12314 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12315 tw32(NVRAM_CFG1, nvcfg1);
12316 return;
12317 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12318 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12319 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12320 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12321 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12322 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12323 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12324 tp->nvram_jedecnum = JEDEC_ATMEL;
12325 tg3_flag_set(tp, NVRAM_BUFFERED);
12326 tg3_flag_set(tp, FLASH);
12328 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12329 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12330 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12331 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12332 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12333 break;
12334 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12335 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12336 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12337 break;
12338 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12339 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12340 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12341 break;
12343 break;
12344 case FLASH_5752VENDOR_ST_M45PE10:
12345 case FLASH_5752VENDOR_ST_M45PE20:
12346 case FLASH_5752VENDOR_ST_M45PE40:
12347 tp->nvram_jedecnum = JEDEC_ST;
12348 tg3_flag_set(tp, NVRAM_BUFFERED);
12349 tg3_flag_set(tp, FLASH);
12351 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12352 case FLASH_5752VENDOR_ST_M45PE10:
12353 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12354 break;
12355 case FLASH_5752VENDOR_ST_M45PE20:
12356 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12357 break;
12358 case FLASH_5752VENDOR_ST_M45PE40:
12359 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12360 break;
12362 break;
12363 default:
12364 tg3_flag_set(tp, NO_NVRAM);
12365 return;
12368 tg3_nvram_get_pagesize(tp, nvcfg1);
12369 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12370 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12374 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12376 u32 nvcfg1;
12378 nvcfg1 = tr32(NVRAM_CFG1);
12380 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12381 case FLASH_5717VENDOR_ATMEL_EEPROM:
12382 case FLASH_5717VENDOR_MICRO_EEPROM:
12383 tp->nvram_jedecnum = JEDEC_ATMEL;
12384 tg3_flag_set(tp, NVRAM_BUFFERED);
12385 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12387 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12388 tw32(NVRAM_CFG1, nvcfg1);
12389 return;
12390 case FLASH_5717VENDOR_ATMEL_MDB011D:
12391 case FLASH_5717VENDOR_ATMEL_ADB011B:
12392 case FLASH_5717VENDOR_ATMEL_ADB011D:
12393 case FLASH_5717VENDOR_ATMEL_MDB021D:
12394 case FLASH_5717VENDOR_ATMEL_ADB021B:
12395 case FLASH_5717VENDOR_ATMEL_ADB021D:
12396 case FLASH_5717VENDOR_ATMEL_45USPT:
12397 tp->nvram_jedecnum = JEDEC_ATMEL;
12398 tg3_flag_set(tp, NVRAM_BUFFERED);
12399 tg3_flag_set(tp, FLASH);
12401 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12402 case FLASH_5717VENDOR_ATMEL_MDB021D:
12403 /* Detect size with tg3_nvram_get_size() */
12404 break;
12405 case FLASH_5717VENDOR_ATMEL_ADB021B:
12406 case FLASH_5717VENDOR_ATMEL_ADB021D:
12407 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12408 break;
12409 default:
12410 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12411 break;
12413 break;
12414 case FLASH_5717VENDOR_ST_M_M25PE10:
12415 case FLASH_5717VENDOR_ST_A_M25PE10:
12416 case FLASH_5717VENDOR_ST_M_M45PE10:
12417 case FLASH_5717VENDOR_ST_A_M45PE10:
12418 case FLASH_5717VENDOR_ST_M_M25PE20:
12419 case FLASH_5717VENDOR_ST_A_M25PE20:
12420 case FLASH_5717VENDOR_ST_M_M45PE20:
12421 case FLASH_5717VENDOR_ST_A_M45PE20:
12422 case FLASH_5717VENDOR_ST_25USPT:
12423 case FLASH_5717VENDOR_ST_45USPT:
12424 tp->nvram_jedecnum = JEDEC_ST;
12425 tg3_flag_set(tp, NVRAM_BUFFERED);
12426 tg3_flag_set(tp, FLASH);
12428 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12429 case FLASH_5717VENDOR_ST_M_M25PE20:
12430 case FLASH_5717VENDOR_ST_M_M45PE20:
12431 /* Detect size with tg3_nvram_get_size() */
12432 break;
12433 case FLASH_5717VENDOR_ST_A_M25PE20:
12434 case FLASH_5717VENDOR_ST_A_M45PE20:
12435 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12436 break;
12437 default:
12438 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12439 break;
12441 break;
12442 default:
12443 tg3_flag_set(tp, NO_NVRAM);
12444 return;
12447 tg3_nvram_get_pagesize(tp, nvcfg1);
12448 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12449 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12452 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12454 u32 nvcfg1, nvmpinstrp;
12456 nvcfg1 = tr32(NVRAM_CFG1);
12457 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12459 switch (nvmpinstrp) {
12460 case FLASH_5720_EEPROM_HD:
12461 case FLASH_5720_EEPROM_LD:
12462 tp->nvram_jedecnum = JEDEC_ATMEL;
12463 tg3_flag_set(tp, NVRAM_BUFFERED);
12465 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12466 tw32(NVRAM_CFG1, nvcfg1);
12467 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12468 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12469 else
12470 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12471 return;
12472 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12473 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12474 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12475 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12476 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12477 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12478 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12479 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12480 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12481 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12482 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12483 case FLASH_5720VENDOR_ATMEL_45USPT:
12484 tp->nvram_jedecnum = JEDEC_ATMEL;
12485 tg3_flag_set(tp, NVRAM_BUFFERED);
12486 tg3_flag_set(tp, FLASH);
12488 switch (nvmpinstrp) {
12489 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12490 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12491 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12492 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12493 break;
12494 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12495 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12496 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12497 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12498 break;
12499 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12500 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12501 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12502 break;
12503 default:
12504 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12505 break;
12507 break;
12508 case FLASH_5720VENDOR_M_ST_M25PE10:
12509 case FLASH_5720VENDOR_M_ST_M45PE10:
12510 case FLASH_5720VENDOR_A_ST_M25PE10:
12511 case FLASH_5720VENDOR_A_ST_M45PE10:
12512 case FLASH_5720VENDOR_M_ST_M25PE20:
12513 case FLASH_5720VENDOR_M_ST_M45PE20:
12514 case FLASH_5720VENDOR_A_ST_M25PE20:
12515 case FLASH_5720VENDOR_A_ST_M45PE20:
12516 case FLASH_5720VENDOR_M_ST_M25PE40:
12517 case FLASH_5720VENDOR_M_ST_M45PE40:
12518 case FLASH_5720VENDOR_A_ST_M25PE40:
12519 case FLASH_5720VENDOR_A_ST_M45PE40:
12520 case FLASH_5720VENDOR_M_ST_M25PE80:
12521 case FLASH_5720VENDOR_M_ST_M45PE80:
12522 case FLASH_5720VENDOR_A_ST_M25PE80:
12523 case FLASH_5720VENDOR_A_ST_M45PE80:
12524 case FLASH_5720VENDOR_ST_25USPT:
12525 case FLASH_5720VENDOR_ST_45USPT:
12526 tp->nvram_jedecnum = JEDEC_ST;
12527 tg3_flag_set(tp, NVRAM_BUFFERED);
12528 tg3_flag_set(tp, FLASH);
12530 switch (nvmpinstrp) {
12531 case FLASH_5720VENDOR_M_ST_M25PE20:
12532 case FLASH_5720VENDOR_M_ST_M45PE20:
12533 case FLASH_5720VENDOR_A_ST_M25PE20:
12534 case FLASH_5720VENDOR_A_ST_M45PE20:
12535 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12536 break;
12537 case FLASH_5720VENDOR_M_ST_M25PE40:
12538 case FLASH_5720VENDOR_M_ST_M45PE40:
12539 case FLASH_5720VENDOR_A_ST_M25PE40:
12540 case FLASH_5720VENDOR_A_ST_M45PE40:
12541 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12542 break;
12543 case FLASH_5720VENDOR_M_ST_M25PE80:
12544 case FLASH_5720VENDOR_M_ST_M45PE80:
12545 case FLASH_5720VENDOR_A_ST_M25PE80:
12546 case FLASH_5720VENDOR_A_ST_M45PE80:
12547 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12548 break;
12549 default:
12550 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12551 break;
12553 break;
12554 default:
12555 tg3_flag_set(tp, NO_NVRAM);
12556 return;
12559 tg3_nvram_get_pagesize(tp, nvcfg1);
12560 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12561 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12564 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12565 static void __devinit tg3_nvram_init(struct tg3 *tp)
12567 tw32_f(GRC_EEPROM_ADDR,
12568 (EEPROM_ADDR_FSM_RESET |
12569 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12570 EEPROM_ADDR_CLKPERD_SHIFT)));
12572 msleep(1);
12574 /* Enable seeprom accesses. */
12575 tw32_f(GRC_LOCAL_CTRL,
12576 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12577 udelay(100);
12579 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12580 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12581 tg3_flag_set(tp, NVRAM);
12583 if (tg3_nvram_lock(tp)) {
12584 netdev_warn(tp->dev,
12585 "Cannot get nvram lock, %s failed\n",
12586 __func__);
12587 return;
12589 tg3_enable_nvram_access(tp);
12591 tp->nvram_size = 0;
12593 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12594 tg3_get_5752_nvram_info(tp);
12595 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12596 tg3_get_5755_nvram_info(tp);
12597 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12598 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12599 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12600 tg3_get_5787_nvram_info(tp);
12601 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12602 tg3_get_5761_nvram_info(tp);
12603 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12604 tg3_get_5906_nvram_info(tp);
12605 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12606 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12607 tg3_get_57780_nvram_info(tp);
12608 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12609 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12610 tg3_get_5717_nvram_info(tp);
12611 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12612 tg3_get_5720_nvram_info(tp);
12613 else
12614 tg3_get_nvram_info(tp);
12616 if (tp->nvram_size == 0)
12617 tg3_get_nvram_size(tp);
12619 tg3_disable_nvram_access(tp);
12620 tg3_nvram_unlock(tp);
12622 } else {
12623 tg3_flag_clear(tp, NVRAM);
12624 tg3_flag_clear(tp, NVRAM_BUFFERED);
12626 tg3_get_eeprom_size(tp);
12630 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12631 u32 offset, u32 len, u8 *buf)
12633 int i, j, rc = 0;
12634 u32 val;
12636 for (i = 0; i < len; i += 4) {
12637 u32 addr;
12638 __be32 data;
12640 addr = offset + i;
12642 memcpy(&data, buf + i, 4);
12645 * The SEEPROM interface expects the data to always be opposite
12646 * the native endian format. We accomplish this by reversing
12647 * all the operations that would have been performed on the
12648 * data from a call to tg3_nvram_read_be32().
12650 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12652 val = tr32(GRC_EEPROM_ADDR);
12653 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12655 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12656 EEPROM_ADDR_READ);
12657 tw32(GRC_EEPROM_ADDR, val |
12658 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12659 (addr & EEPROM_ADDR_ADDR_MASK) |
12660 EEPROM_ADDR_START |
12661 EEPROM_ADDR_WRITE);
12663 for (j = 0; j < 1000; j++) {
12664 val = tr32(GRC_EEPROM_ADDR);
12666 if (val & EEPROM_ADDR_COMPLETE)
12667 break;
12668 msleep(1);
12670 if (!(val & EEPROM_ADDR_COMPLETE)) {
12671 rc = -EBUSY;
12672 break;
12676 return rc;
12679 /* offset and length are dword aligned */
12680 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12681 u8 *buf)
12683 int ret = 0;
12684 u32 pagesize = tp->nvram_pagesize;
12685 u32 pagemask = pagesize - 1;
12686 u32 nvram_cmd;
12687 u8 *tmp;
12689 tmp = kmalloc(pagesize, GFP_KERNEL);
12690 if (tmp == NULL)
12691 return -ENOMEM;
12693 while (len) {
12694 int j;
12695 u32 phy_addr, page_off, size;
12697 phy_addr = offset & ~pagemask;
12699 for (j = 0; j < pagesize; j += 4) {
12700 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12701 (__be32 *) (tmp + j));
12702 if (ret)
12703 break;
12705 if (ret)
12706 break;
12708 page_off = offset & pagemask;
12709 size = pagesize;
12710 if (len < size)
12711 size = len;
12713 len -= size;
12715 memcpy(tmp + page_off, buf, size);
12717 offset = offset + (pagesize - page_off);
12719 tg3_enable_nvram_access(tp);
12722 * Before we can erase the flash page, we need
12723 * to issue a special "write enable" command.
12725 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12727 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12728 break;
12730 /* Erase the target page */
12731 tw32(NVRAM_ADDR, phy_addr);
12733 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12734 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12736 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12737 break;
12739 /* Issue another write enable to start the write. */
12740 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12742 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12743 break;
12745 for (j = 0; j < pagesize; j += 4) {
12746 __be32 data;
12748 data = *((__be32 *) (tmp + j));
12750 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12752 tw32(NVRAM_ADDR, phy_addr + j);
12754 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12755 NVRAM_CMD_WR;
12757 if (j == 0)
12758 nvram_cmd |= NVRAM_CMD_FIRST;
12759 else if (j == (pagesize - 4))
12760 nvram_cmd |= NVRAM_CMD_LAST;
12762 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12763 break;
12765 if (ret)
12766 break;
12769 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12770 tg3_nvram_exec_cmd(tp, nvram_cmd);
12772 kfree(tmp);
12774 return ret;
12777 /* offset and length are dword aligned */
12778 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12779 u8 *buf)
12781 int i, ret = 0;
12783 for (i = 0; i < len; i += 4, offset += 4) {
12784 u32 page_off, phy_addr, nvram_cmd;
12785 __be32 data;
12787 memcpy(&data, buf + i, 4);
12788 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12790 page_off = offset % tp->nvram_pagesize;
12792 phy_addr = tg3_nvram_phys_addr(tp, offset);
12794 tw32(NVRAM_ADDR, phy_addr);
12796 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12798 if (page_off == 0 || i == 0)
12799 nvram_cmd |= NVRAM_CMD_FIRST;
12800 if (page_off == (tp->nvram_pagesize - 4))
12801 nvram_cmd |= NVRAM_CMD_LAST;
12803 if (i == (len - 4))
12804 nvram_cmd |= NVRAM_CMD_LAST;
12806 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12807 !tg3_flag(tp, 5755_PLUS) &&
12808 (tp->nvram_jedecnum == JEDEC_ST) &&
12809 (nvram_cmd & NVRAM_CMD_FIRST)) {
12811 if ((ret = tg3_nvram_exec_cmd(tp,
12812 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12813 NVRAM_CMD_DONE)))
12815 break;
12817 if (!tg3_flag(tp, FLASH)) {
12818 /* We always do complete word writes to eeprom. */
12819 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12822 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12823 break;
12825 return ret;
12828 /* offset and length are dword aligned */
12829 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12831 int ret;
12833 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12834 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12835 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12836 udelay(40);
12839 if (!tg3_flag(tp, NVRAM)) {
12840 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12841 } else {
12842 u32 grc_mode;
12844 ret = tg3_nvram_lock(tp);
12845 if (ret)
12846 return ret;
12848 tg3_enable_nvram_access(tp);
12849 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12850 tw32(NVRAM_WRITE1, 0x406);
12852 grc_mode = tr32(GRC_MODE);
12853 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12855 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12856 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12857 buf);
12858 } else {
12859 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12860 buf);
12863 grc_mode = tr32(GRC_MODE);
12864 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12866 tg3_disable_nvram_access(tp);
12867 tg3_nvram_unlock(tp);
12870 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12871 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12872 udelay(40);
12875 return ret;
12878 struct subsys_tbl_ent {
12879 u16 subsys_vendor, subsys_devid;
12880 u32 phy_id;
12883 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12884 /* Broadcom boards. */
12885 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12886 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12887 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12888 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12889 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12890 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12891 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12892 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12893 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12894 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12895 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12896 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12897 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12898 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12899 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12900 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12901 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12902 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12903 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12904 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12905 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12906 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12908 /* 3com boards. */
12909 { TG3PCI_SUBVENDOR_ID_3COM,
12910 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12911 { TG3PCI_SUBVENDOR_ID_3COM,
12912 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12913 { TG3PCI_SUBVENDOR_ID_3COM,
12914 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12915 { TG3PCI_SUBVENDOR_ID_3COM,
12916 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12917 { TG3PCI_SUBVENDOR_ID_3COM,
12918 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12920 /* DELL boards. */
12921 { TG3PCI_SUBVENDOR_ID_DELL,
12922 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12923 { TG3PCI_SUBVENDOR_ID_DELL,
12924 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12925 { TG3PCI_SUBVENDOR_ID_DELL,
12926 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12927 { TG3PCI_SUBVENDOR_ID_DELL,
12928 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12930 /* Compaq boards. */
12931 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12932 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12933 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12934 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12935 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12936 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12937 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12938 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12939 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12940 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12942 /* IBM boards. */
12943 { TG3PCI_SUBVENDOR_ID_IBM,
12944 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12947 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12949 int i;
12951 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12952 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12953 tp->pdev->subsystem_vendor) &&
12954 (subsys_id_to_phy_id[i].subsys_devid ==
12955 tp->pdev->subsystem_device))
12956 return &subsys_id_to_phy_id[i];
12958 return NULL;
12961 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12963 u32 val;
12965 tp->phy_id = TG3_PHY_ID_INVALID;
12966 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12968 /* Assume an onboard device and WOL capable by default. */
12969 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12970 tg3_flag_set(tp, WOL_CAP);
12972 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12973 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12974 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12975 tg3_flag_set(tp, IS_NIC);
12977 val = tr32(VCPU_CFGSHDW);
12978 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12979 tg3_flag_set(tp, ASPM_WORKAROUND);
12980 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12981 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12982 tg3_flag_set(tp, WOL_ENABLE);
12983 device_set_wakeup_enable(&tp->pdev->dev, true);
12985 goto done;
12988 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12989 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12990 u32 nic_cfg, led_cfg;
12991 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12992 int eeprom_phy_serdes = 0;
12994 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12995 tp->nic_sram_data_cfg = nic_cfg;
12997 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12998 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12999 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13000 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13001 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13002 (ver > 0) && (ver < 0x100))
13003 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13005 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13006 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13008 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13009 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13010 eeprom_phy_serdes = 1;
13012 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13013 if (nic_phy_id != 0) {
13014 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13015 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13017 eeprom_phy_id = (id1 >> 16) << 10;
13018 eeprom_phy_id |= (id2 & 0xfc00) << 16;
13019 eeprom_phy_id |= (id2 & 0x03ff) << 0;
13020 } else
13021 eeprom_phy_id = 0;
13023 tp->phy_id = eeprom_phy_id;
13024 if (eeprom_phy_serdes) {
13025 if (!tg3_flag(tp, 5705_PLUS))
13026 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13027 else
13028 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13031 if (tg3_flag(tp, 5750_PLUS))
13032 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13033 SHASTA_EXT_LED_MODE_MASK);
13034 else
13035 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13037 switch (led_cfg) {
13038 default:
13039 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13040 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13041 break;
13043 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13044 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13045 break;
13047 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13048 tp->led_ctrl = LED_CTRL_MODE_MAC;
13050 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13051 * read on some older 5700/5701 bootcode.
13053 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13054 ASIC_REV_5700 ||
13055 GET_ASIC_REV(tp->pci_chip_rev_id) ==
13056 ASIC_REV_5701)
13057 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13059 break;
13061 case SHASTA_EXT_LED_SHARED:
13062 tp->led_ctrl = LED_CTRL_MODE_SHARED;
13063 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13064 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13065 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13066 LED_CTRL_MODE_PHY_2);
13067 break;
13069 case SHASTA_EXT_LED_MAC:
13070 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13071 break;
13073 case SHASTA_EXT_LED_COMBO:
13074 tp->led_ctrl = LED_CTRL_MODE_COMBO;
13075 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13076 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13077 LED_CTRL_MODE_PHY_2);
13078 break;
13082 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13083 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13084 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13085 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13087 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13088 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13090 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13091 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13092 if ((tp->pdev->subsystem_vendor ==
13093 PCI_VENDOR_ID_ARIMA) &&
13094 (tp->pdev->subsystem_device == 0x205a ||
13095 tp->pdev->subsystem_device == 0x2063))
13096 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13097 } else {
13098 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13099 tg3_flag_set(tp, IS_NIC);
13102 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13103 tg3_flag_set(tp, ENABLE_ASF);
13104 if (tg3_flag(tp, 5750_PLUS))
13105 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13108 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13109 tg3_flag(tp, 5750_PLUS))
13110 tg3_flag_set(tp, ENABLE_APE);
13112 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13113 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13114 tg3_flag_clear(tp, WOL_CAP);
13116 if (tg3_flag(tp, WOL_CAP) &&
13117 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13118 tg3_flag_set(tp, WOL_ENABLE);
13119 device_set_wakeup_enable(&tp->pdev->dev, true);
13122 if (cfg2 & (1 << 17))
13123 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13125 /* serdes signal pre-emphasis in register 0x590 set by */
13126 /* bootcode if bit 18 is set */
13127 if (cfg2 & (1 << 18))
13128 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13130 if ((tg3_flag(tp, 57765_PLUS) ||
13131 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13132 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13133 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13134 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13136 if (tg3_flag(tp, PCI_EXPRESS) &&
13137 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13138 !tg3_flag(tp, 57765_PLUS)) {
13139 u32 cfg3;
13141 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13142 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13143 tg3_flag_set(tp, ASPM_WORKAROUND);
13146 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13147 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13148 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13149 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13150 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13151 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13153 done:
13154 if (tg3_flag(tp, WOL_CAP))
13155 device_set_wakeup_enable(&tp->pdev->dev,
13156 tg3_flag(tp, WOL_ENABLE));
13157 else
13158 device_set_wakeup_capable(&tp->pdev->dev, false);
13161 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13163 int i;
13164 u32 val;
13166 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13167 tw32(OTP_CTRL, cmd);
13169 /* Wait for up to 1 ms for command to execute. */
13170 for (i = 0; i < 100; i++) {
13171 val = tr32(OTP_STATUS);
13172 if (val & OTP_STATUS_CMD_DONE)
13173 break;
13174 udelay(10);
13177 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13180 /* Read the gphy configuration from the OTP region of the chip. The gphy
13181 * configuration is a 32-bit value that straddles the alignment boundary.
13182 * We do two 32-bit reads and then shift and merge the results.
13184 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13186 u32 bhalf_otp, thalf_otp;
13188 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13190 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13191 return 0;
13193 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13195 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13196 return 0;
13198 thalf_otp = tr32(OTP_READ_DATA);
13200 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13202 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13203 return 0;
13205 bhalf_otp = tr32(OTP_READ_DATA);
13207 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13210 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13212 u32 adv = ADVERTISED_Autoneg |
13213 ADVERTISED_Pause;
13215 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13216 adv |= ADVERTISED_1000baseT_Half |
13217 ADVERTISED_1000baseT_Full;
13219 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13220 adv |= ADVERTISED_100baseT_Half |
13221 ADVERTISED_100baseT_Full |
13222 ADVERTISED_10baseT_Half |
13223 ADVERTISED_10baseT_Full |
13224 ADVERTISED_TP;
13225 else
13226 adv |= ADVERTISED_FIBRE;
13228 tp->link_config.advertising = adv;
13229 tp->link_config.speed = SPEED_INVALID;
13230 tp->link_config.duplex = DUPLEX_INVALID;
13231 tp->link_config.autoneg = AUTONEG_ENABLE;
13232 tp->link_config.active_speed = SPEED_INVALID;
13233 tp->link_config.active_duplex = DUPLEX_INVALID;
13234 tp->link_config.orig_speed = SPEED_INVALID;
13235 tp->link_config.orig_duplex = DUPLEX_INVALID;
13236 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13239 static int __devinit tg3_phy_probe(struct tg3 *tp)
13241 u32 hw_phy_id_1, hw_phy_id_2;
13242 u32 hw_phy_id, hw_phy_id_masked;
13243 int err;
13245 /* flow control autonegotiation is default behavior */
13246 tg3_flag_set(tp, PAUSE_AUTONEG);
13247 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13249 if (tg3_flag(tp, USE_PHYLIB))
13250 return tg3_phy_init(tp);
13252 /* Reading the PHY ID register can conflict with ASF
13253 * firmware access to the PHY hardware.
13255 err = 0;
13256 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13257 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13258 } else {
13259 /* Now read the physical PHY_ID from the chip and verify
13260 * that it is sane. If it doesn't look good, we fall back
13261 * to either the hard-coded table based PHY_ID and failing
13262 * that the value found in the eeprom area.
13264 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13265 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13267 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13268 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13269 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13271 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13274 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13275 tp->phy_id = hw_phy_id;
13276 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13277 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13278 else
13279 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13280 } else {
13281 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13282 /* Do nothing, phy ID already set up in
13283 * tg3_get_eeprom_hw_cfg().
13285 } else {
13286 struct subsys_tbl_ent *p;
13288 /* No eeprom signature? Try the hardcoded
13289 * subsys device table.
13291 p = tg3_lookup_by_subsys(tp);
13292 if (!p)
13293 return -ENODEV;
13295 tp->phy_id = p->phy_id;
13296 if (!tp->phy_id ||
13297 tp->phy_id == TG3_PHY_ID_BCM8002)
13298 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13302 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13303 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13304 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13305 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13306 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13307 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13308 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13309 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13311 tg3_phy_init_link_config(tp);
13313 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13314 !tg3_flag(tp, ENABLE_APE) &&
13315 !tg3_flag(tp, ENABLE_ASF)) {
13316 u32 bmsr, mask;
13318 tg3_readphy(tp, MII_BMSR, &bmsr);
13319 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13320 (bmsr & BMSR_LSTATUS))
13321 goto skip_phy_reset;
13323 err = tg3_phy_reset(tp);
13324 if (err)
13325 return err;
13327 tg3_phy_set_wirespeed(tp);
13329 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13330 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13331 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13332 if (!tg3_copper_is_advertising_all(tp, mask)) {
13333 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13334 tp->link_config.flowctrl);
13336 tg3_writephy(tp, MII_BMCR,
13337 BMCR_ANENABLE | BMCR_ANRESTART);
13341 skip_phy_reset:
13342 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13343 err = tg3_init_5401phy_dsp(tp);
13344 if (err)
13345 return err;
13347 err = tg3_init_5401phy_dsp(tp);
13350 return err;
13353 static void __devinit tg3_read_vpd(struct tg3 *tp)
13355 u8 *vpd_data;
13356 unsigned int block_end, rosize, len;
13357 u32 vpdlen;
13358 int j, i = 0;
13360 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13361 if (!vpd_data)
13362 goto out_no_vpd;
13364 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13365 if (i < 0)
13366 goto out_not_found;
13368 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13369 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13370 i += PCI_VPD_LRDT_TAG_SIZE;
13372 if (block_end > vpdlen)
13373 goto out_not_found;
13375 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13376 PCI_VPD_RO_KEYWORD_MFR_ID);
13377 if (j > 0) {
13378 len = pci_vpd_info_field_size(&vpd_data[j]);
13380 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13381 if (j + len > block_end || len != 4 ||
13382 memcmp(&vpd_data[j], "1028", 4))
13383 goto partno;
13385 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13386 PCI_VPD_RO_KEYWORD_VENDOR0);
13387 if (j < 0)
13388 goto partno;
13390 len = pci_vpd_info_field_size(&vpd_data[j]);
13392 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13393 if (j + len > block_end)
13394 goto partno;
13396 memcpy(tp->fw_ver, &vpd_data[j], len);
13397 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13400 partno:
13401 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13402 PCI_VPD_RO_KEYWORD_PARTNO);
13403 if (i < 0)
13404 goto out_not_found;
13406 len = pci_vpd_info_field_size(&vpd_data[i]);
13408 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13409 if (len > TG3_BPN_SIZE ||
13410 (len + i) > vpdlen)
13411 goto out_not_found;
13413 memcpy(tp->board_part_number, &vpd_data[i], len);
13415 out_not_found:
13416 kfree(vpd_data);
13417 if (tp->board_part_number[0])
13418 return;
13420 out_no_vpd:
13421 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13422 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13423 strcpy(tp->board_part_number, "BCM5717");
13424 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13425 strcpy(tp->board_part_number, "BCM5718");
13426 else
13427 goto nomatch;
13428 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13429 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13430 strcpy(tp->board_part_number, "BCM57780");
13431 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13432 strcpy(tp->board_part_number, "BCM57760");
13433 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13434 strcpy(tp->board_part_number, "BCM57790");
13435 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13436 strcpy(tp->board_part_number, "BCM57788");
13437 else
13438 goto nomatch;
13439 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13440 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13441 strcpy(tp->board_part_number, "BCM57761");
13442 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13443 strcpy(tp->board_part_number, "BCM57765");
13444 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13445 strcpy(tp->board_part_number, "BCM57781");
13446 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13447 strcpy(tp->board_part_number, "BCM57785");
13448 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13449 strcpy(tp->board_part_number, "BCM57791");
13450 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13451 strcpy(tp->board_part_number, "BCM57795");
13452 else
13453 goto nomatch;
13454 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13455 strcpy(tp->board_part_number, "BCM95906");
13456 } else {
13457 nomatch:
13458 strcpy(tp->board_part_number, "none");
13462 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13464 u32 val;
13466 if (tg3_nvram_read(tp, offset, &val) ||
13467 (val & 0xfc000000) != 0x0c000000 ||
13468 tg3_nvram_read(tp, offset + 4, &val) ||
13469 val != 0)
13470 return 0;
13472 return 1;
13475 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13477 u32 val, offset, start, ver_offset;
13478 int i, dst_off;
13479 bool newver = false;
13481 if (tg3_nvram_read(tp, 0xc, &offset) ||
13482 tg3_nvram_read(tp, 0x4, &start))
13483 return;
13485 offset = tg3_nvram_logical_addr(tp, offset);
13487 if (tg3_nvram_read(tp, offset, &val))
13488 return;
13490 if ((val & 0xfc000000) == 0x0c000000) {
13491 if (tg3_nvram_read(tp, offset + 4, &val))
13492 return;
13494 if (val == 0)
13495 newver = true;
13498 dst_off = strlen(tp->fw_ver);
13500 if (newver) {
13501 if (TG3_VER_SIZE - dst_off < 16 ||
13502 tg3_nvram_read(tp, offset + 8, &ver_offset))
13503 return;
13505 offset = offset + ver_offset - start;
13506 for (i = 0; i < 16; i += 4) {
13507 __be32 v;
13508 if (tg3_nvram_read_be32(tp, offset + i, &v))
13509 return;
13511 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13513 } else {
13514 u32 major, minor;
13516 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13517 return;
13519 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13520 TG3_NVM_BCVER_MAJSFT;
13521 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13522 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13523 "v%d.%02d", major, minor);
13527 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13529 u32 val, major, minor;
13531 /* Use native endian representation */
13532 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13533 return;
13535 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13536 TG3_NVM_HWSB_CFG1_MAJSFT;
13537 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13538 TG3_NVM_HWSB_CFG1_MINSFT;
13540 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13543 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13545 u32 offset, major, minor, build;
13547 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13549 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13550 return;
13552 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13553 case TG3_EEPROM_SB_REVISION_0:
13554 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13555 break;
13556 case TG3_EEPROM_SB_REVISION_2:
13557 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13558 break;
13559 case TG3_EEPROM_SB_REVISION_3:
13560 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13561 break;
13562 case TG3_EEPROM_SB_REVISION_4:
13563 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13564 break;
13565 case TG3_EEPROM_SB_REVISION_5:
13566 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13567 break;
13568 case TG3_EEPROM_SB_REVISION_6:
13569 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13570 break;
13571 default:
13572 return;
13575 if (tg3_nvram_read(tp, offset, &val))
13576 return;
13578 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13579 TG3_EEPROM_SB_EDH_BLD_SHFT;
13580 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13581 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13582 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13584 if (minor > 99 || build > 26)
13585 return;
13587 offset = strlen(tp->fw_ver);
13588 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13589 " v%d.%02d", major, minor);
13591 if (build > 0) {
13592 offset = strlen(tp->fw_ver);
13593 if (offset < TG3_VER_SIZE - 1)
13594 tp->fw_ver[offset] = 'a' + build - 1;
13598 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13600 u32 val, offset, start;
13601 int i, vlen;
13603 for (offset = TG3_NVM_DIR_START;
13604 offset < TG3_NVM_DIR_END;
13605 offset += TG3_NVM_DIRENT_SIZE) {
13606 if (tg3_nvram_read(tp, offset, &val))
13607 return;
13609 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13610 break;
13613 if (offset == TG3_NVM_DIR_END)
13614 return;
13616 if (!tg3_flag(tp, 5705_PLUS))
13617 start = 0x08000000;
13618 else if (tg3_nvram_read(tp, offset - 4, &start))
13619 return;
13621 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13622 !tg3_fw_img_is_valid(tp, offset) ||
13623 tg3_nvram_read(tp, offset + 8, &val))
13624 return;
13626 offset += val - start;
13628 vlen = strlen(tp->fw_ver);
13630 tp->fw_ver[vlen++] = ',';
13631 tp->fw_ver[vlen++] = ' ';
13633 for (i = 0; i < 4; i++) {
13634 __be32 v;
13635 if (tg3_nvram_read_be32(tp, offset, &v))
13636 return;
13638 offset += sizeof(v);
13640 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13641 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13642 break;
13645 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13646 vlen += sizeof(v);
13650 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13652 int vlen;
13653 u32 apedata;
13654 char *fwtype;
13656 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13657 return;
13659 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13660 if (apedata != APE_SEG_SIG_MAGIC)
13661 return;
13663 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13664 if (!(apedata & APE_FW_STATUS_READY))
13665 return;
13667 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13669 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13670 tg3_flag_set(tp, APE_HAS_NCSI);
13671 fwtype = "NCSI";
13672 } else {
13673 fwtype = "DASH";
13676 vlen = strlen(tp->fw_ver);
13678 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13679 fwtype,
13680 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13681 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13682 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13683 (apedata & APE_FW_VERSION_BLDMSK));
13686 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13688 u32 val;
13689 bool vpd_vers = false;
13691 if (tp->fw_ver[0] != 0)
13692 vpd_vers = true;
13694 if (tg3_flag(tp, NO_NVRAM)) {
13695 strcat(tp->fw_ver, "sb");
13696 return;
13699 if (tg3_nvram_read(tp, 0, &val))
13700 return;
13702 if (val == TG3_EEPROM_MAGIC)
13703 tg3_read_bc_ver(tp);
13704 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13705 tg3_read_sb_ver(tp, val);
13706 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13707 tg3_read_hwsb_ver(tp);
13708 else
13709 return;
13711 if (vpd_vers)
13712 goto done;
13714 if (tg3_flag(tp, ENABLE_APE)) {
13715 if (tg3_flag(tp, ENABLE_ASF))
13716 tg3_read_dash_ver(tp);
13717 } else if (tg3_flag(tp, ENABLE_ASF)) {
13718 tg3_read_mgmtfw_ver(tp);
13721 done:
13722 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13725 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13727 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13729 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13730 return TG3_RX_RET_MAX_SIZE_5717;
13731 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13732 return TG3_RX_RET_MAX_SIZE_5700;
13733 else
13734 return TG3_RX_RET_MAX_SIZE_5705;
13737 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13738 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13739 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13740 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13741 { },
13744 static int __devinit tg3_get_invariants(struct tg3 *tp)
13746 u32 misc_ctrl_reg;
13747 u32 pci_state_reg, grc_misc_cfg;
13748 u32 val;
13749 u16 pci_cmd;
13750 int err;
13752 /* Force memory write invalidate off. If we leave it on,
13753 * then on 5700_BX chips we have to enable a workaround.
13754 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13755 * to match the cacheline size. The Broadcom driver have this
13756 * workaround but turns MWI off all the times so never uses
13757 * it. This seems to suggest that the workaround is insufficient.
13759 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13760 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13761 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13763 /* Important! -- Make sure register accesses are byteswapped
13764 * correctly. Also, for those chips that require it, make
13765 * sure that indirect register accesses are enabled before
13766 * the first operation.
13768 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13769 &misc_ctrl_reg);
13770 tp->misc_host_ctrl |= (misc_ctrl_reg &
13771 MISC_HOST_CTRL_CHIPREV);
13772 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13773 tp->misc_host_ctrl);
13775 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13776 MISC_HOST_CTRL_CHIPREV_SHIFT);
13777 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13778 u32 prod_id_asic_rev;
13780 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13781 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13782 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13783 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13784 pci_read_config_dword(tp->pdev,
13785 TG3PCI_GEN2_PRODID_ASICREV,
13786 &prod_id_asic_rev);
13787 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13788 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13789 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13790 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13791 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13792 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13793 pci_read_config_dword(tp->pdev,
13794 TG3PCI_GEN15_PRODID_ASICREV,
13795 &prod_id_asic_rev);
13796 else
13797 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13798 &prod_id_asic_rev);
13800 tp->pci_chip_rev_id = prod_id_asic_rev;
13803 /* Wrong chip ID in 5752 A0. This code can be removed later
13804 * as A0 is not in production.
13806 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13807 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13809 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13810 * we need to disable memory and use config. cycles
13811 * only to access all registers. The 5702/03 chips
13812 * can mistakenly decode the special cycles from the
13813 * ICH chipsets as memory write cycles, causing corruption
13814 * of register and memory space. Only certain ICH bridges
13815 * will drive special cycles with non-zero data during the
13816 * address phase which can fall within the 5703's address
13817 * range. This is not an ICH bug as the PCI spec allows
13818 * non-zero address during special cycles. However, only
13819 * these ICH bridges are known to drive non-zero addresses
13820 * during special cycles.
13822 * Since special cycles do not cross PCI bridges, we only
13823 * enable this workaround if the 5703 is on the secondary
13824 * bus of these ICH bridges.
13826 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13827 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13828 static struct tg3_dev_id {
13829 u32 vendor;
13830 u32 device;
13831 u32 rev;
13832 } ich_chipsets[] = {
13833 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13834 PCI_ANY_ID },
13835 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13836 PCI_ANY_ID },
13837 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13838 0xa },
13839 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13840 PCI_ANY_ID },
13841 { },
13843 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13844 struct pci_dev *bridge = NULL;
13846 while (pci_id->vendor != 0) {
13847 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13848 bridge);
13849 if (!bridge) {
13850 pci_id++;
13851 continue;
13853 if (pci_id->rev != PCI_ANY_ID) {
13854 if (bridge->revision > pci_id->rev)
13855 continue;
13857 if (bridge->subordinate &&
13858 (bridge->subordinate->number ==
13859 tp->pdev->bus->number)) {
13860 tg3_flag_set(tp, ICH_WORKAROUND);
13861 pci_dev_put(bridge);
13862 break;
13867 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13868 static struct tg3_dev_id {
13869 u32 vendor;
13870 u32 device;
13871 } bridge_chipsets[] = {
13872 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13873 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13874 { },
13876 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13877 struct pci_dev *bridge = NULL;
13879 while (pci_id->vendor != 0) {
13880 bridge = pci_get_device(pci_id->vendor,
13881 pci_id->device,
13882 bridge);
13883 if (!bridge) {
13884 pci_id++;
13885 continue;
13887 if (bridge->subordinate &&
13888 (bridge->subordinate->number <=
13889 tp->pdev->bus->number) &&
13890 (bridge->subordinate->subordinate >=
13891 tp->pdev->bus->number)) {
13892 tg3_flag_set(tp, 5701_DMA_BUG);
13893 pci_dev_put(bridge);
13894 break;
13899 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13900 * DMA addresses > 40-bit. This bridge may have other additional
13901 * 57xx devices behind it in some 4-port NIC designs for example.
13902 * Any tg3 device found behind the bridge will also need the 40-bit
13903 * DMA workaround.
13905 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13906 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13907 tg3_flag_set(tp, 5780_CLASS);
13908 tg3_flag_set(tp, 40BIT_DMA_BUG);
13909 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13910 } else {
13911 struct pci_dev *bridge = NULL;
13913 do {
13914 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13915 PCI_DEVICE_ID_SERVERWORKS_EPB,
13916 bridge);
13917 if (bridge && bridge->subordinate &&
13918 (bridge->subordinate->number <=
13919 tp->pdev->bus->number) &&
13920 (bridge->subordinate->subordinate >=
13921 tp->pdev->bus->number)) {
13922 tg3_flag_set(tp, 40BIT_DMA_BUG);
13923 pci_dev_put(bridge);
13924 break;
13926 } while (bridge);
13929 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13930 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13931 tp->pdev_peer = tg3_find_peer(tp);
13933 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13934 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13935 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13936 tg3_flag_set(tp, 5717_PLUS);
13938 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13939 tg3_flag(tp, 5717_PLUS))
13940 tg3_flag_set(tp, 57765_PLUS);
13942 /* Intentionally exclude ASIC_REV_5906 */
13943 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13944 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13945 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13946 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13947 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13948 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13949 tg3_flag(tp, 57765_PLUS))
13950 tg3_flag_set(tp, 5755_PLUS);
13952 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13953 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13954 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13955 tg3_flag(tp, 5755_PLUS) ||
13956 tg3_flag(tp, 5780_CLASS))
13957 tg3_flag_set(tp, 5750_PLUS);
13959 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13960 tg3_flag(tp, 5750_PLUS))
13961 tg3_flag_set(tp, 5705_PLUS);
13963 /* Determine TSO capabilities */
13964 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
13965 ; /* Do nothing. HW bug. */
13966 else if (tg3_flag(tp, 57765_PLUS))
13967 tg3_flag_set(tp, HW_TSO_3);
13968 else if (tg3_flag(tp, 5755_PLUS) ||
13969 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13970 tg3_flag_set(tp, HW_TSO_2);
13971 else if (tg3_flag(tp, 5750_PLUS)) {
13972 tg3_flag_set(tp, HW_TSO_1);
13973 tg3_flag_set(tp, TSO_BUG);
13974 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13975 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13976 tg3_flag_clear(tp, TSO_BUG);
13977 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13978 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13979 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13980 tg3_flag_set(tp, TSO_BUG);
13981 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13982 tp->fw_needed = FIRMWARE_TG3TSO5;
13983 else
13984 tp->fw_needed = FIRMWARE_TG3TSO;
13987 /* Selectively allow TSO based on operating conditions */
13988 if (tg3_flag(tp, HW_TSO_1) ||
13989 tg3_flag(tp, HW_TSO_2) ||
13990 tg3_flag(tp, HW_TSO_3) ||
13991 (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13992 tg3_flag_set(tp, TSO_CAPABLE);
13993 else {
13994 tg3_flag_clear(tp, TSO_CAPABLE);
13995 tg3_flag_clear(tp, TSO_BUG);
13996 tp->fw_needed = NULL;
13999 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14000 tp->fw_needed = FIRMWARE_TG3;
14002 tp->irq_max = 1;
14004 if (tg3_flag(tp, 5750_PLUS)) {
14005 tg3_flag_set(tp, SUPPORT_MSI);
14006 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14007 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14008 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14009 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14010 tp->pdev_peer == tp->pdev))
14011 tg3_flag_clear(tp, SUPPORT_MSI);
14013 if (tg3_flag(tp, 5755_PLUS) ||
14014 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14015 tg3_flag_set(tp, 1SHOT_MSI);
14018 if (tg3_flag(tp, 57765_PLUS)) {
14019 tg3_flag_set(tp, SUPPORT_MSIX);
14020 tp->irq_max = TG3_IRQ_MAX_VECS;
14024 if (tg3_flag(tp, 5755_PLUS))
14025 tg3_flag_set(tp, SHORT_DMA_BUG);
14027 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14028 tg3_flag_set(tp, 4K_FIFO_LIMIT);
14030 if (tg3_flag(tp, 5717_PLUS))
14031 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14033 if (tg3_flag(tp, 57765_PLUS) &&
14034 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14035 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14037 if (!tg3_flag(tp, 5705_PLUS) ||
14038 tg3_flag(tp, 5780_CLASS) ||
14039 tg3_flag(tp, USE_JUMBO_BDFLAG))
14040 tg3_flag_set(tp, JUMBO_CAPABLE);
14042 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14043 &pci_state_reg);
14045 if (pci_is_pcie(tp->pdev)) {
14046 u16 lnkctl;
14048 tg3_flag_set(tp, PCI_EXPRESS);
14050 tp->pcie_readrq = 4096;
14051 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14052 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14053 tp->pcie_readrq = 2048;
14055 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
14057 pci_read_config_word(tp->pdev,
14058 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14059 &lnkctl);
14060 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14061 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14062 ASIC_REV_5906) {
14063 tg3_flag_clear(tp, HW_TSO_2);
14064 tg3_flag_clear(tp, TSO_CAPABLE);
14066 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14067 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14068 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14069 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14070 tg3_flag_set(tp, CLKREQ_BUG);
14071 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14072 tg3_flag_set(tp, L1PLLPD_EN);
14074 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14075 /* BCM5785 devices are effectively PCIe devices, and should
14076 * follow PCIe codepaths, but do not have a PCIe capabilities
14077 * section.
14079 tg3_flag_set(tp, PCI_EXPRESS);
14080 } else if (!tg3_flag(tp, 5705_PLUS) ||
14081 tg3_flag(tp, 5780_CLASS)) {
14082 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14083 if (!tp->pcix_cap) {
14084 dev_err(&tp->pdev->dev,
14085 "Cannot find PCI-X capability, aborting\n");
14086 return -EIO;
14089 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14090 tg3_flag_set(tp, PCIX_MODE);
14093 /* If we have an AMD 762 or VIA K8T800 chipset, write
14094 * reordering to the mailbox registers done by the host
14095 * controller can cause major troubles. We read back from
14096 * every mailbox register write to force the writes to be
14097 * posted to the chip in order.
14099 if (pci_dev_present(tg3_write_reorder_chipsets) &&
14100 !tg3_flag(tp, PCI_EXPRESS))
14101 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14103 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14104 &tp->pci_cacheline_sz);
14105 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14106 &tp->pci_lat_timer);
14107 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14108 tp->pci_lat_timer < 64) {
14109 tp->pci_lat_timer = 64;
14110 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14111 tp->pci_lat_timer);
14114 /* Important! -- It is critical that the PCI-X hw workaround
14115 * situation is decided before the first MMIO register access.
14117 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14118 /* 5700 BX chips need to have their TX producer index
14119 * mailboxes written twice to workaround a bug.
14121 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14123 /* If we are in PCI-X mode, enable register write workaround.
14125 * The workaround is to use indirect register accesses
14126 * for all chip writes not to mailbox registers.
14128 if (tg3_flag(tp, PCIX_MODE)) {
14129 u32 pm_reg;
14131 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14133 /* The chip can have it's power management PCI config
14134 * space registers clobbered due to this bug.
14135 * So explicitly force the chip into D0 here.
14137 pci_read_config_dword(tp->pdev,
14138 tp->pm_cap + PCI_PM_CTRL,
14139 &pm_reg);
14140 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14141 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14142 pci_write_config_dword(tp->pdev,
14143 tp->pm_cap + PCI_PM_CTRL,
14144 pm_reg);
14146 /* Also, force SERR#/PERR# in PCI command. */
14147 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14148 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14149 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14153 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14154 tg3_flag_set(tp, PCI_HIGH_SPEED);
14155 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14156 tg3_flag_set(tp, PCI_32BIT);
14158 /* Chip-specific fixup from Broadcom driver */
14159 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14160 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14161 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14162 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14165 /* Default fast path register access methods */
14166 tp->read32 = tg3_read32;
14167 tp->write32 = tg3_write32;
14168 tp->read32_mbox = tg3_read32;
14169 tp->write32_mbox = tg3_write32;
14170 tp->write32_tx_mbox = tg3_write32;
14171 tp->write32_rx_mbox = tg3_write32;
14173 /* Various workaround register access methods */
14174 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14175 tp->write32 = tg3_write_indirect_reg32;
14176 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14177 (tg3_flag(tp, PCI_EXPRESS) &&
14178 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14180 * Back to back register writes can cause problems on these
14181 * chips, the workaround is to read back all reg writes
14182 * except those to mailbox regs.
14184 * See tg3_write_indirect_reg32().
14186 tp->write32 = tg3_write_flush_reg32;
14189 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14190 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14191 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14192 tp->write32_rx_mbox = tg3_write_flush_reg32;
14195 if (tg3_flag(tp, ICH_WORKAROUND)) {
14196 tp->read32 = tg3_read_indirect_reg32;
14197 tp->write32 = tg3_write_indirect_reg32;
14198 tp->read32_mbox = tg3_read_indirect_mbox;
14199 tp->write32_mbox = tg3_write_indirect_mbox;
14200 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14201 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14203 iounmap(tp->regs);
14204 tp->regs = NULL;
14206 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14207 pci_cmd &= ~PCI_COMMAND_MEMORY;
14208 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14210 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14211 tp->read32_mbox = tg3_read32_mbox_5906;
14212 tp->write32_mbox = tg3_write32_mbox_5906;
14213 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14214 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14217 if (tp->write32 == tg3_write_indirect_reg32 ||
14218 (tg3_flag(tp, PCIX_MODE) &&
14219 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14220 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14221 tg3_flag_set(tp, SRAM_USE_CONFIG);
14223 /* The memory arbiter has to be enabled in order for SRAM accesses
14224 * to succeed. Normally on powerup the tg3 chip firmware will make
14225 * sure it is enabled, but other entities such as system netboot
14226 * code might disable it.
14228 val = tr32(MEMARB_MODE);
14229 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14231 if (tg3_flag(tp, PCIX_MODE)) {
14232 pci_read_config_dword(tp->pdev,
14233 tp->pcix_cap + PCI_X_STATUS, &val);
14234 tp->pci_fn = val & 0x7;
14235 } else {
14236 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14239 /* Get eeprom hw config before calling tg3_set_power_state().
14240 * In particular, the TG3_FLAG_IS_NIC flag must be
14241 * determined before calling tg3_set_power_state() so that
14242 * we know whether or not to switch out of Vaux power.
14243 * When the flag is set, it means that GPIO1 is used for eeprom
14244 * write protect and also implies that it is a LOM where GPIOs
14245 * are not used to switch power.
14247 tg3_get_eeprom_hw_cfg(tp);
14249 if (tg3_flag(tp, ENABLE_APE)) {
14250 /* Allow reads and writes to the
14251 * APE register and memory space.
14253 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14254 PCISTATE_ALLOW_APE_SHMEM_WR |
14255 PCISTATE_ALLOW_APE_PSPACE_WR;
14256 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14257 pci_state_reg);
14259 tg3_ape_lock_init(tp);
14262 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14263 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14264 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14265 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14266 tg3_flag(tp, 57765_PLUS))
14267 tg3_flag_set(tp, CPMU_PRESENT);
14269 /* Set up tp->grc_local_ctrl before calling
14270 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14271 * will bring 5700's external PHY out of reset.
14272 * It is also used as eeprom write protect on LOMs.
14274 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14275 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14276 tg3_flag(tp, EEPROM_WRITE_PROT))
14277 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14278 GRC_LCLCTRL_GPIO_OUTPUT1);
14279 /* Unused GPIO3 must be driven as output on 5752 because there
14280 * are no pull-up resistors on unused GPIO pins.
14282 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14283 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14285 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14286 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14287 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14288 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14290 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14291 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14292 /* Turn off the debug UART. */
14293 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14294 if (tg3_flag(tp, IS_NIC))
14295 /* Keep VMain power. */
14296 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14297 GRC_LCLCTRL_GPIO_OUTPUT0;
14300 /* Switch out of Vaux if it is a NIC */
14301 tg3_pwrsrc_switch_to_vmain(tp);
14303 /* Derive initial jumbo mode from MTU assigned in
14304 * ether_setup() via the alloc_etherdev() call
14306 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14307 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14309 /* Determine WakeOnLan speed to use. */
14310 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14311 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14312 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14313 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14314 tg3_flag_clear(tp, WOL_SPEED_100MB);
14315 } else {
14316 tg3_flag_set(tp, WOL_SPEED_100MB);
14319 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14320 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14322 /* A few boards don't want Ethernet@WireSpeed phy feature */
14323 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14324 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14325 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14326 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14327 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14328 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14329 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14331 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14332 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14333 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14334 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14335 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14337 if (tg3_flag(tp, 5705_PLUS) &&
14338 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14339 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14340 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14341 !tg3_flag(tp, 57765_PLUS)) {
14342 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14343 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14344 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14345 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14346 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14347 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14348 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14349 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14350 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14351 } else
14352 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14355 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14356 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14357 tp->phy_otp = tg3_read_otp_phycfg(tp);
14358 if (tp->phy_otp == 0)
14359 tp->phy_otp = TG3_OTP_DEFAULT;
14362 if (tg3_flag(tp, CPMU_PRESENT))
14363 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14364 else
14365 tp->mi_mode = MAC_MI_MODE_BASE;
14367 tp->coalesce_mode = 0;
14368 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14369 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14370 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14372 /* Set these bits to enable statistics workaround. */
14373 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14374 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14375 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14376 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14377 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14380 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14381 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14382 tg3_flag_set(tp, USE_PHYLIB);
14384 err = tg3_mdio_init(tp);
14385 if (err)
14386 return err;
14388 /* Initialize data/descriptor byte/word swapping. */
14389 val = tr32(GRC_MODE);
14390 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14391 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14392 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14393 GRC_MODE_B2HRX_ENABLE |
14394 GRC_MODE_HTX2B_ENABLE |
14395 GRC_MODE_HOST_STACKUP);
14396 else
14397 val &= GRC_MODE_HOST_STACKUP;
14399 tw32(GRC_MODE, val | tp->grc_mode);
14401 tg3_switch_clocks(tp);
14403 /* Clear this out for sanity. */
14404 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14406 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14407 &pci_state_reg);
14408 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14409 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14410 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14412 if (chiprevid == CHIPREV_ID_5701_A0 ||
14413 chiprevid == CHIPREV_ID_5701_B0 ||
14414 chiprevid == CHIPREV_ID_5701_B2 ||
14415 chiprevid == CHIPREV_ID_5701_B5) {
14416 void __iomem *sram_base;
14418 /* Write some dummy words into the SRAM status block
14419 * area, see if it reads back correctly. If the return
14420 * value is bad, force enable the PCIX workaround.
14422 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14424 writel(0x00000000, sram_base);
14425 writel(0x00000000, sram_base + 4);
14426 writel(0xffffffff, sram_base + 4);
14427 if (readl(sram_base) != 0x00000000)
14428 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14432 udelay(50);
14433 tg3_nvram_init(tp);
14435 grc_misc_cfg = tr32(GRC_MISC_CFG);
14436 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14438 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14439 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14440 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14441 tg3_flag_set(tp, IS_5788);
14443 if (!tg3_flag(tp, IS_5788) &&
14444 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14445 tg3_flag_set(tp, TAGGED_STATUS);
14446 if (tg3_flag(tp, TAGGED_STATUS)) {
14447 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14448 HOSTCC_MODE_CLRTICK_TXBD);
14450 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14451 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14452 tp->misc_host_ctrl);
14455 /* Preserve the APE MAC_MODE bits */
14456 if (tg3_flag(tp, ENABLE_APE))
14457 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14458 else
14459 tp->mac_mode = 0;
14461 /* these are limited to 10/100 only */
14462 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14463 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14464 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14465 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14466 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14467 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14468 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14469 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14470 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14471 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14472 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14473 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14474 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14475 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14476 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14477 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14479 err = tg3_phy_probe(tp);
14480 if (err) {
14481 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14482 /* ... but do not return immediately ... */
14483 tg3_mdio_fini(tp);
14486 tg3_read_vpd(tp);
14487 tg3_read_fw_ver(tp);
14489 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14490 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14491 } else {
14492 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14493 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14494 else
14495 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14498 /* 5700 {AX,BX} chips have a broken status block link
14499 * change bit implementation, so we must use the
14500 * status register in those cases.
14502 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14503 tg3_flag_set(tp, USE_LINKCHG_REG);
14504 else
14505 tg3_flag_clear(tp, USE_LINKCHG_REG);
14507 /* The led_ctrl is set during tg3_phy_probe, here we might
14508 * have to force the link status polling mechanism based
14509 * upon subsystem IDs.
14511 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14512 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14513 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14514 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14515 tg3_flag_set(tp, USE_LINKCHG_REG);
14518 /* For all SERDES we poll the MAC status register. */
14519 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14520 tg3_flag_set(tp, POLL_SERDES);
14521 else
14522 tg3_flag_clear(tp, POLL_SERDES);
14524 tp->rx_offset = NET_IP_ALIGN;
14525 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14526 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14527 tg3_flag(tp, PCIX_MODE)) {
14528 tp->rx_offset = 0;
14529 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14530 tp->rx_copy_thresh = ~(u16)0;
14531 #endif
14534 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14535 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14536 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14538 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14540 /* Increment the rx prod index on the rx std ring by at most
14541 * 8 for these chips to workaround hw errata.
14543 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14544 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14545 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14546 tp->rx_std_max_post = 8;
14548 if (tg3_flag(tp, ASPM_WORKAROUND))
14549 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14550 PCIE_PWR_MGMT_L1_THRESH_MSK;
14552 return err;
14555 #ifdef CONFIG_SPARC
14556 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14558 struct net_device *dev = tp->dev;
14559 struct pci_dev *pdev = tp->pdev;
14560 struct device_node *dp = pci_device_to_OF_node(pdev);
14561 const unsigned char *addr;
14562 int len;
14564 addr = of_get_property(dp, "local-mac-address", &len);
14565 if (addr && len == 6) {
14566 memcpy(dev->dev_addr, addr, 6);
14567 memcpy(dev->perm_addr, dev->dev_addr, 6);
14568 return 0;
14570 return -ENODEV;
14573 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14575 struct net_device *dev = tp->dev;
14577 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14578 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14579 return 0;
14581 #endif
14583 static int __devinit tg3_get_device_address(struct tg3 *tp)
14585 struct net_device *dev = tp->dev;
14586 u32 hi, lo, mac_offset;
14587 int addr_ok = 0;
14589 #ifdef CONFIG_SPARC
14590 if (!tg3_get_macaddr_sparc(tp))
14591 return 0;
14592 #endif
14594 mac_offset = 0x7c;
14595 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14596 tg3_flag(tp, 5780_CLASS)) {
14597 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14598 mac_offset = 0xcc;
14599 if (tg3_nvram_lock(tp))
14600 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14601 else
14602 tg3_nvram_unlock(tp);
14603 } else if (tg3_flag(tp, 5717_PLUS)) {
14604 if (tp->pci_fn & 1)
14605 mac_offset = 0xcc;
14606 if (tp->pci_fn > 1)
14607 mac_offset += 0x18c;
14608 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14609 mac_offset = 0x10;
14611 /* First try to get it from MAC address mailbox. */
14612 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14613 if ((hi >> 16) == 0x484b) {
14614 dev->dev_addr[0] = (hi >> 8) & 0xff;
14615 dev->dev_addr[1] = (hi >> 0) & 0xff;
14617 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14618 dev->dev_addr[2] = (lo >> 24) & 0xff;
14619 dev->dev_addr[3] = (lo >> 16) & 0xff;
14620 dev->dev_addr[4] = (lo >> 8) & 0xff;
14621 dev->dev_addr[5] = (lo >> 0) & 0xff;
14623 /* Some old bootcode may report a 0 MAC address in SRAM */
14624 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14626 if (!addr_ok) {
14627 /* Next, try NVRAM. */
14628 if (!tg3_flag(tp, NO_NVRAM) &&
14629 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14630 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14631 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14632 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14634 /* Finally just fetch it out of the MAC control regs. */
14635 else {
14636 hi = tr32(MAC_ADDR_0_HIGH);
14637 lo = tr32(MAC_ADDR_0_LOW);
14639 dev->dev_addr[5] = lo & 0xff;
14640 dev->dev_addr[4] = (lo >> 8) & 0xff;
14641 dev->dev_addr[3] = (lo >> 16) & 0xff;
14642 dev->dev_addr[2] = (lo >> 24) & 0xff;
14643 dev->dev_addr[1] = hi & 0xff;
14644 dev->dev_addr[0] = (hi >> 8) & 0xff;
14648 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14649 #ifdef CONFIG_SPARC
14650 if (!tg3_get_default_macaddr_sparc(tp))
14651 return 0;
14652 #endif
14653 return -EINVAL;
14655 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14656 return 0;
14659 #define BOUNDARY_SINGLE_CACHELINE 1
14660 #define BOUNDARY_MULTI_CACHELINE 2
14662 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14664 int cacheline_size;
14665 u8 byte;
14666 int goal;
14668 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14669 if (byte == 0)
14670 cacheline_size = 1024;
14671 else
14672 cacheline_size = (int) byte * 4;
14674 /* On 5703 and later chips, the boundary bits have no
14675 * effect.
14677 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14678 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14679 !tg3_flag(tp, PCI_EXPRESS))
14680 goto out;
14682 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14683 goal = BOUNDARY_MULTI_CACHELINE;
14684 #else
14685 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14686 goal = BOUNDARY_SINGLE_CACHELINE;
14687 #else
14688 goal = 0;
14689 #endif
14690 #endif
14692 if (tg3_flag(tp, 57765_PLUS)) {
14693 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14694 goto out;
14697 if (!goal)
14698 goto out;
14700 /* PCI controllers on most RISC systems tend to disconnect
14701 * when a device tries to burst across a cache-line boundary.
14702 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14704 * Unfortunately, for PCI-E there are only limited
14705 * write-side controls for this, and thus for reads
14706 * we will still get the disconnects. We'll also waste
14707 * these PCI cycles for both read and write for chips
14708 * other than 5700 and 5701 which do not implement the
14709 * boundary bits.
14711 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14712 switch (cacheline_size) {
14713 case 16:
14714 case 32:
14715 case 64:
14716 case 128:
14717 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14718 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14719 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14720 } else {
14721 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14722 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14724 break;
14726 case 256:
14727 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14728 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14729 break;
14731 default:
14732 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14733 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14734 break;
14736 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14737 switch (cacheline_size) {
14738 case 16:
14739 case 32:
14740 case 64:
14741 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14742 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14743 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14744 break;
14746 /* fallthrough */
14747 case 128:
14748 default:
14749 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14750 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14751 break;
14753 } else {
14754 switch (cacheline_size) {
14755 case 16:
14756 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14757 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14758 DMA_RWCTRL_WRITE_BNDRY_16);
14759 break;
14761 /* fallthrough */
14762 case 32:
14763 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14764 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14765 DMA_RWCTRL_WRITE_BNDRY_32);
14766 break;
14768 /* fallthrough */
14769 case 64:
14770 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14771 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14772 DMA_RWCTRL_WRITE_BNDRY_64);
14773 break;
14775 /* fallthrough */
14776 case 128:
14777 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14778 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14779 DMA_RWCTRL_WRITE_BNDRY_128);
14780 break;
14782 /* fallthrough */
14783 case 256:
14784 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14785 DMA_RWCTRL_WRITE_BNDRY_256);
14786 break;
14787 case 512:
14788 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14789 DMA_RWCTRL_WRITE_BNDRY_512);
14790 break;
14791 case 1024:
14792 default:
14793 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14794 DMA_RWCTRL_WRITE_BNDRY_1024);
14795 break;
14799 out:
14800 return val;
14803 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14805 struct tg3_internal_buffer_desc test_desc;
14806 u32 sram_dma_descs;
14807 int i, ret;
14809 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14811 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14812 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14813 tw32(RDMAC_STATUS, 0);
14814 tw32(WDMAC_STATUS, 0);
14816 tw32(BUFMGR_MODE, 0);
14817 tw32(FTQ_RESET, 0);
14819 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14820 test_desc.addr_lo = buf_dma & 0xffffffff;
14821 test_desc.nic_mbuf = 0x00002100;
14822 test_desc.len = size;
14825 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14826 * the *second* time the tg3 driver was getting loaded after an
14827 * initial scan.
14829 * Broadcom tells me:
14830 * ...the DMA engine is connected to the GRC block and a DMA
14831 * reset may affect the GRC block in some unpredictable way...
14832 * The behavior of resets to individual blocks has not been tested.
14834 * Broadcom noted the GRC reset will also reset all sub-components.
14836 if (to_device) {
14837 test_desc.cqid_sqid = (13 << 8) | 2;
14839 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14840 udelay(40);
14841 } else {
14842 test_desc.cqid_sqid = (16 << 8) | 7;
14844 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14845 udelay(40);
14847 test_desc.flags = 0x00000005;
14849 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14850 u32 val;
14852 val = *(((u32 *)&test_desc) + i);
14853 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14854 sram_dma_descs + (i * sizeof(u32)));
14855 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14857 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14859 if (to_device)
14860 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14861 else
14862 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14864 ret = -ENODEV;
14865 for (i = 0; i < 40; i++) {
14866 u32 val;
14868 if (to_device)
14869 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14870 else
14871 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14872 if ((val & 0xffff) == sram_dma_descs) {
14873 ret = 0;
14874 break;
14877 udelay(100);
14880 return ret;
14883 #define TEST_BUFFER_SIZE 0x2000
14885 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14886 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14887 { },
14890 static int __devinit tg3_test_dma(struct tg3 *tp)
14892 dma_addr_t buf_dma;
14893 u32 *buf, saved_dma_rwctrl;
14894 int ret = 0;
14896 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14897 &buf_dma, GFP_KERNEL);
14898 if (!buf) {
14899 ret = -ENOMEM;
14900 goto out_nofree;
14903 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14904 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14906 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14908 if (tg3_flag(tp, 57765_PLUS))
14909 goto out;
14911 if (tg3_flag(tp, PCI_EXPRESS)) {
14912 /* DMA read watermark not used on PCIE */
14913 tp->dma_rwctrl |= 0x00180000;
14914 } else if (!tg3_flag(tp, PCIX_MODE)) {
14915 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14916 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14917 tp->dma_rwctrl |= 0x003f0000;
14918 else
14919 tp->dma_rwctrl |= 0x003f000f;
14920 } else {
14921 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14922 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14923 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14924 u32 read_water = 0x7;
14926 /* If the 5704 is behind the EPB bridge, we can
14927 * do the less restrictive ONE_DMA workaround for
14928 * better performance.
14930 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14931 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14932 tp->dma_rwctrl |= 0x8000;
14933 else if (ccval == 0x6 || ccval == 0x7)
14934 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14936 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14937 read_water = 4;
14938 /* Set bit 23 to enable PCIX hw bug fix */
14939 tp->dma_rwctrl |=
14940 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14941 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14942 (1 << 23);
14943 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14944 /* 5780 always in PCIX mode */
14945 tp->dma_rwctrl |= 0x00144000;
14946 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14947 /* 5714 always in PCIX mode */
14948 tp->dma_rwctrl |= 0x00148000;
14949 } else {
14950 tp->dma_rwctrl |= 0x001b000f;
14954 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14955 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14956 tp->dma_rwctrl &= 0xfffffff0;
14958 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14959 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14960 /* Remove this if it causes problems for some boards. */
14961 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14963 /* On 5700/5701 chips, we need to set this bit.
14964 * Otherwise the chip will issue cacheline transactions
14965 * to streamable DMA memory with not all the byte
14966 * enables turned on. This is an error on several
14967 * RISC PCI controllers, in particular sparc64.
14969 * On 5703/5704 chips, this bit has been reassigned
14970 * a different meaning. In particular, it is used
14971 * on those chips to enable a PCI-X workaround.
14973 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14976 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14978 #if 0
14979 /* Unneeded, already done by tg3_get_invariants. */
14980 tg3_switch_clocks(tp);
14981 #endif
14983 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14984 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14985 goto out;
14987 /* It is best to perform DMA test with maximum write burst size
14988 * to expose the 5700/5701 write DMA bug.
14990 saved_dma_rwctrl = tp->dma_rwctrl;
14991 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14992 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14994 while (1) {
14995 u32 *p = buf, i;
14997 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14998 p[i] = i;
15000 /* Send the buffer to the chip. */
15001 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15002 if (ret) {
15003 dev_err(&tp->pdev->dev,
15004 "%s: Buffer write failed. err = %d\n",
15005 __func__, ret);
15006 break;
15009 #if 0
15010 /* validate data reached card RAM correctly. */
15011 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15012 u32 val;
15013 tg3_read_mem(tp, 0x2100 + (i*4), &val);
15014 if (le32_to_cpu(val) != p[i]) {
15015 dev_err(&tp->pdev->dev,
15016 "%s: Buffer corrupted on device! "
15017 "(%d != %d)\n", __func__, val, i);
15018 /* ret = -ENODEV here? */
15020 p[i] = 0;
15022 #endif
15023 /* Now read it back. */
15024 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15025 if (ret) {
15026 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15027 "err = %d\n", __func__, ret);
15028 break;
15031 /* Verify it. */
15032 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15033 if (p[i] == i)
15034 continue;
15036 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15037 DMA_RWCTRL_WRITE_BNDRY_16) {
15038 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15039 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15040 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15041 break;
15042 } else {
15043 dev_err(&tp->pdev->dev,
15044 "%s: Buffer corrupted on read back! "
15045 "(%d != %d)\n", __func__, p[i], i);
15046 ret = -ENODEV;
15047 goto out;
15051 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15052 /* Success. */
15053 ret = 0;
15054 break;
15057 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15058 DMA_RWCTRL_WRITE_BNDRY_16) {
15059 /* DMA test passed without adjusting DMA boundary,
15060 * now look for chipsets that are known to expose the
15061 * DMA bug without failing the test.
15063 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15064 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15065 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15066 } else {
15067 /* Safe to use the calculated DMA boundary. */
15068 tp->dma_rwctrl = saved_dma_rwctrl;
15071 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15074 out:
15075 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15076 out_nofree:
15077 return ret;
15080 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15082 if (tg3_flag(tp, 57765_PLUS)) {
15083 tp->bufmgr_config.mbuf_read_dma_low_water =
15084 DEFAULT_MB_RDMA_LOW_WATER_5705;
15085 tp->bufmgr_config.mbuf_mac_rx_low_water =
15086 DEFAULT_MB_MACRX_LOW_WATER_57765;
15087 tp->bufmgr_config.mbuf_high_water =
15088 DEFAULT_MB_HIGH_WATER_57765;
15090 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15091 DEFAULT_MB_RDMA_LOW_WATER_5705;
15092 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15093 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15094 tp->bufmgr_config.mbuf_high_water_jumbo =
15095 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15096 } else if (tg3_flag(tp, 5705_PLUS)) {
15097 tp->bufmgr_config.mbuf_read_dma_low_water =
15098 DEFAULT_MB_RDMA_LOW_WATER_5705;
15099 tp->bufmgr_config.mbuf_mac_rx_low_water =
15100 DEFAULT_MB_MACRX_LOW_WATER_5705;
15101 tp->bufmgr_config.mbuf_high_water =
15102 DEFAULT_MB_HIGH_WATER_5705;
15103 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15104 tp->bufmgr_config.mbuf_mac_rx_low_water =
15105 DEFAULT_MB_MACRX_LOW_WATER_5906;
15106 tp->bufmgr_config.mbuf_high_water =
15107 DEFAULT_MB_HIGH_WATER_5906;
15110 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15111 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15112 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15113 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15114 tp->bufmgr_config.mbuf_high_water_jumbo =
15115 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15116 } else {
15117 tp->bufmgr_config.mbuf_read_dma_low_water =
15118 DEFAULT_MB_RDMA_LOW_WATER;
15119 tp->bufmgr_config.mbuf_mac_rx_low_water =
15120 DEFAULT_MB_MACRX_LOW_WATER;
15121 tp->bufmgr_config.mbuf_high_water =
15122 DEFAULT_MB_HIGH_WATER;
15124 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15125 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15126 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15127 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15128 tp->bufmgr_config.mbuf_high_water_jumbo =
15129 DEFAULT_MB_HIGH_WATER_JUMBO;
15132 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15133 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15136 static char * __devinit tg3_phy_string(struct tg3 *tp)
15138 switch (tp->phy_id & TG3_PHY_ID_MASK) {
15139 case TG3_PHY_ID_BCM5400: return "5400";
15140 case TG3_PHY_ID_BCM5401: return "5401";
15141 case TG3_PHY_ID_BCM5411: return "5411";
15142 case TG3_PHY_ID_BCM5701: return "5701";
15143 case TG3_PHY_ID_BCM5703: return "5703";
15144 case TG3_PHY_ID_BCM5704: return "5704";
15145 case TG3_PHY_ID_BCM5705: return "5705";
15146 case TG3_PHY_ID_BCM5750: return "5750";
15147 case TG3_PHY_ID_BCM5752: return "5752";
15148 case TG3_PHY_ID_BCM5714: return "5714";
15149 case TG3_PHY_ID_BCM5780: return "5780";
15150 case TG3_PHY_ID_BCM5755: return "5755";
15151 case TG3_PHY_ID_BCM5787: return "5787";
15152 case TG3_PHY_ID_BCM5784: return "5784";
15153 case TG3_PHY_ID_BCM5756: return "5722/5756";
15154 case TG3_PHY_ID_BCM5906: return "5906";
15155 case TG3_PHY_ID_BCM5761: return "5761";
15156 case TG3_PHY_ID_BCM5718C: return "5718C";
15157 case TG3_PHY_ID_BCM5718S: return "5718S";
15158 case TG3_PHY_ID_BCM57765: return "57765";
15159 case TG3_PHY_ID_BCM5719C: return "5719C";
15160 case TG3_PHY_ID_BCM5720C: return "5720C";
15161 case TG3_PHY_ID_BCM8002: return "8002/serdes";
15162 case 0: return "serdes";
15163 default: return "unknown";
15167 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15169 if (tg3_flag(tp, PCI_EXPRESS)) {
15170 strcpy(str, "PCI Express");
15171 return str;
15172 } else if (tg3_flag(tp, PCIX_MODE)) {
15173 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15175 strcpy(str, "PCIX:");
15177 if ((clock_ctrl == 7) ||
15178 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15179 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15180 strcat(str, "133MHz");
15181 else if (clock_ctrl == 0)
15182 strcat(str, "33MHz");
15183 else if (clock_ctrl == 2)
15184 strcat(str, "50MHz");
15185 else if (clock_ctrl == 4)
15186 strcat(str, "66MHz");
15187 else if (clock_ctrl == 6)
15188 strcat(str, "100MHz");
15189 } else {
15190 strcpy(str, "PCI:");
15191 if (tg3_flag(tp, PCI_HIGH_SPEED))
15192 strcat(str, "66MHz");
15193 else
15194 strcat(str, "33MHz");
15196 if (tg3_flag(tp, PCI_32BIT))
15197 strcat(str, ":32-bit");
15198 else
15199 strcat(str, ":64-bit");
15200 return str;
15203 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15205 struct pci_dev *peer;
15206 unsigned int func, devnr = tp->pdev->devfn & ~7;
15208 for (func = 0; func < 8; func++) {
15209 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15210 if (peer && peer != tp->pdev)
15211 break;
15212 pci_dev_put(peer);
15214 /* 5704 can be configured in single-port mode, set peer to
15215 * tp->pdev in that case.
15217 if (!peer) {
15218 peer = tp->pdev;
15219 return peer;
15223 * We don't need to keep the refcount elevated; there's no way
15224 * to remove one half of this device without removing the other
15226 pci_dev_put(peer);
15228 return peer;
15231 static void __devinit tg3_init_coal(struct tg3 *tp)
15233 struct ethtool_coalesce *ec = &tp->coal;
15235 memset(ec, 0, sizeof(*ec));
15236 ec->cmd = ETHTOOL_GCOALESCE;
15237 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15238 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15239 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15240 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15241 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15242 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15243 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15244 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15245 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15247 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15248 HOSTCC_MODE_CLRTICK_TXBD)) {
15249 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15250 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15251 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15252 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15255 if (tg3_flag(tp, 5705_PLUS)) {
15256 ec->rx_coalesce_usecs_irq = 0;
15257 ec->tx_coalesce_usecs_irq = 0;
15258 ec->stats_block_coalesce_usecs = 0;
15262 static const struct net_device_ops tg3_netdev_ops = {
15263 .ndo_open = tg3_open,
15264 .ndo_stop = tg3_close,
15265 .ndo_start_xmit = tg3_start_xmit,
15266 .ndo_get_stats64 = tg3_get_stats64,
15267 .ndo_validate_addr = eth_validate_addr,
15268 .ndo_set_rx_mode = tg3_set_rx_mode,
15269 .ndo_set_mac_address = tg3_set_mac_addr,
15270 .ndo_do_ioctl = tg3_ioctl,
15271 .ndo_tx_timeout = tg3_tx_timeout,
15272 .ndo_change_mtu = tg3_change_mtu,
15273 .ndo_fix_features = tg3_fix_features,
15274 .ndo_set_features = tg3_set_features,
15275 #ifdef CONFIG_NET_POLL_CONTROLLER
15276 .ndo_poll_controller = tg3_poll_controller,
15277 #endif
15280 static int __devinit tg3_init_one(struct pci_dev *pdev,
15281 const struct pci_device_id *ent)
15283 struct net_device *dev;
15284 struct tg3 *tp;
15285 int i, err, pm_cap;
15286 u32 sndmbx, rcvmbx, intmbx;
15287 char str[40];
15288 u64 dma_mask, persist_dma_mask;
15289 u32 features = 0;
15291 printk_once(KERN_INFO "%s\n", version);
15293 err = pci_enable_device(pdev);
15294 if (err) {
15295 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15296 return err;
15299 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15300 if (err) {
15301 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15302 goto err_out_disable_pdev;
15305 pci_set_master(pdev);
15307 /* Find power-management capability. */
15308 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15309 if (pm_cap == 0) {
15310 dev_err(&pdev->dev,
15311 "Cannot find Power Management capability, aborting\n");
15312 err = -EIO;
15313 goto err_out_free_res;
15316 err = pci_set_power_state(pdev, PCI_D0);
15317 if (err) {
15318 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15319 goto err_out_free_res;
15322 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15323 if (!dev) {
15324 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15325 err = -ENOMEM;
15326 goto err_out_power_down;
15329 SET_NETDEV_DEV(dev, &pdev->dev);
15331 tp = netdev_priv(dev);
15332 tp->pdev = pdev;
15333 tp->dev = dev;
15334 tp->pm_cap = pm_cap;
15335 tp->rx_mode = TG3_DEF_RX_MODE;
15336 tp->tx_mode = TG3_DEF_TX_MODE;
15338 if (tg3_debug > 0)
15339 tp->msg_enable = tg3_debug;
15340 else
15341 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15343 /* The word/byte swap controls here control register access byte
15344 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15345 * setting below.
15347 tp->misc_host_ctrl =
15348 MISC_HOST_CTRL_MASK_PCI_INT |
15349 MISC_HOST_CTRL_WORD_SWAP |
15350 MISC_HOST_CTRL_INDIR_ACCESS |
15351 MISC_HOST_CTRL_PCISTATE_RW;
15353 /* The NONFRM (non-frame) byte/word swap controls take effect
15354 * on descriptor entries, anything which isn't packet data.
15356 * The StrongARM chips on the board (one for tx, one for rx)
15357 * are running in big-endian mode.
15359 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15360 GRC_MODE_WSWAP_NONFRM_DATA);
15361 #ifdef __BIG_ENDIAN
15362 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15363 #endif
15364 spin_lock_init(&tp->lock);
15365 spin_lock_init(&tp->indirect_lock);
15366 INIT_WORK(&tp->reset_task, tg3_reset_task);
15368 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15369 if (!tp->regs) {
15370 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15371 err = -ENOMEM;
15372 goto err_out_free_dev;
15375 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15376 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15377 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15378 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15379 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15380 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15381 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15382 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15383 tg3_flag_set(tp, ENABLE_APE);
15384 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15385 if (!tp->aperegs) {
15386 dev_err(&pdev->dev,
15387 "Cannot map APE registers, aborting\n");
15388 err = -ENOMEM;
15389 goto err_out_iounmap;
15393 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15394 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15396 dev->ethtool_ops = &tg3_ethtool_ops;
15397 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15398 dev->netdev_ops = &tg3_netdev_ops;
15399 dev->irq = pdev->irq;
15401 err = tg3_get_invariants(tp);
15402 if (err) {
15403 dev_err(&pdev->dev,
15404 "Problem fetching invariants of chip, aborting\n");
15405 goto err_out_apeunmap;
15408 /* The EPB bridge inside 5714, 5715, and 5780 and any
15409 * device behind the EPB cannot support DMA addresses > 40-bit.
15410 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15411 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15412 * do DMA address check in tg3_start_xmit().
15414 if (tg3_flag(tp, IS_5788))
15415 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15416 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15417 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15418 #ifdef CONFIG_HIGHMEM
15419 dma_mask = DMA_BIT_MASK(64);
15420 #endif
15421 } else
15422 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15424 /* Configure DMA attributes. */
15425 if (dma_mask > DMA_BIT_MASK(32)) {
15426 err = pci_set_dma_mask(pdev, dma_mask);
15427 if (!err) {
15428 features |= NETIF_F_HIGHDMA;
15429 err = pci_set_consistent_dma_mask(pdev,
15430 persist_dma_mask);
15431 if (err < 0) {
15432 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15433 "DMA for consistent allocations\n");
15434 goto err_out_apeunmap;
15438 if (err || dma_mask == DMA_BIT_MASK(32)) {
15439 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15440 if (err) {
15441 dev_err(&pdev->dev,
15442 "No usable DMA configuration, aborting\n");
15443 goto err_out_apeunmap;
15447 tg3_init_bufmgr_config(tp);
15449 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15451 /* 5700 B0 chips do not support checksumming correctly due
15452 * to hardware bugs.
15454 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15455 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15457 if (tg3_flag(tp, 5755_PLUS))
15458 features |= NETIF_F_IPV6_CSUM;
15461 /* TSO is on by default on chips that support hardware TSO.
15462 * Firmware TSO on older chips gives lower performance, so it
15463 * is off by default, but can be enabled using ethtool.
15465 if ((tg3_flag(tp, HW_TSO_1) ||
15466 tg3_flag(tp, HW_TSO_2) ||
15467 tg3_flag(tp, HW_TSO_3)) &&
15468 (features & NETIF_F_IP_CSUM))
15469 features |= NETIF_F_TSO;
15470 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15471 if (features & NETIF_F_IPV6_CSUM)
15472 features |= NETIF_F_TSO6;
15473 if (tg3_flag(tp, HW_TSO_3) ||
15474 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15475 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15476 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15477 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15478 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15479 features |= NETIF_F_TSO_ECN;
15482 dev->features |= features;
15483 dev->vlan_features |= features;
15486 * Add loopback capability only for a subset of devices that support
15487 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15488 * loopback for the remaining devices.
15490 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15491 !tg3_flag(tp, CPMU_PRESENT))
15492 /* Add the loopback capability */
15493 features |= NETIF_F_LOOPBACK;
15495 dev->hw_features |= features;
15497 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15498 !tg3_flag(tp, TSO_CAPABLE) &&
15499 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15500 tg3_flag_set(tp, MAX_RXPEND_64);
15501 tp->rx_pending = 63;
15504 err = tg3_get_device_address(tp);
15505 if (err) {
15506 dev_err(&pdev->dev,
15507 "Could not obtain valid ethernet address, aborting\n");
15508 goto err_out_apeunmap;
15512 * Reset chip in case UNDI or EFI driver did not shutdown
15513 * DMA self test will enable WDMAC and we'll see (spurious)
15514 * pending DMA on the PCI bus at that point.
15516 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15517 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15518 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15519 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15522 err = tg3_test_dma(tp);
15523 if (err) {
15524 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15525 goto err_out_apeunmap;
15528 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15529 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15530 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15531 for (i = 0; i < tp->irq_max; i++) {
15532 struct tg3_napi *tnapi = &tp->napi[i];
15534 tnapi->tp = tp;
15535 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15537 tnapi->int_mbox = intmbx;
15538 if (i <= 4)
15539 intmbx += 0x8;
15540 else
15541 intmbx += 0x4;
15543 tnapi->consmbox = rcvmbx;
15544 tnapi->prodmbox = sndmbx;
15546 if (i)
15547 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15548 else
15549 tnapi->coal_now = HOSTCC_MODE_NOW;
15551 if (!tg3_flag(tp, SUPPORT_MSIX))
15552 break;
15555 * If we support MSIX, we'll be using RSS. If we're using
15556 * RSS, the first vector only handles link interrupts and the
15557 * remaining vectors handle rx and tx interrupts. Reuse the
15558 * mailbox values for the next iteration. The values we setup
15559 * above are still useful for the single vectored mode.
15561 if (!i)
15562 continue;
15564 rcvmbx += 0x8;
15566 if (sndmbx & 0x4)
15567 sndmbx -= 0x4;
15568 else
15569 sndmbx += 0xc;
15572 tg3_init_coal(tp);
15574 pci_set_drvdata(pdev, dev);
15576 if (tg3_flag(tp, 5717_PLUS)) {
15577 /* Resume a low-power mode */
15578 tg3_frob_aux_power(tp, false);
15581 err = register_netdev(dev);
15582 if (err) {
15583 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15584 goto err_out_apeunmap;
15587 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15588 tp->board_part_number,
15589 tp->pci_chip_rev_id,
15590 tg3_bus_string(tp, str),
15591 dev->dev_addr);
15593 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15594 struct phy_device *phydev;
15595 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15596 netdev_info(dev,
15597 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15598 phydev->drv->name, dev_name(&phydev->dev));
15599 } else {
15600 char *ethtype;
15602 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15603 ethtype = "10/100Base-TX";
15604 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15605 ethtype = "1000Base-SX";
15606 else
15607 ethtype = "10/100/1000Base-T";
15609 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15610 "(WireSpeed[%d], EEE[%d])\n",
15611 tg3_phy_string(tp), ethtype,
15612 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15613 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15616 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15617 (dev->features & NETIF_F_RXCSUM) != 0,
15618 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15619 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15620 tg3_flag(tp, ENABLE_ASF) != 0,
15621 tg3_flag(tp, TSO_CAPABLE) != 0);
15622 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15623 tp->dma_rwctrl,
15624 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15625 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15627 pci_save_state(pdev);
15629 return 0;
15631 err_out_apeunmap:
15632 if (tp->aperegs) {
15633 iounmap(tp->aperegs);
15634 tp->aperegs = NULL;
15637 err_out_iounmap:
15638 if (tp->regs) {
15639 iounmap(tp->regs);
15640 tp->regs = NULL;
15643 err_out_free_dev:
15644 free_netdev(dev);
15646 err_out_power_down:
15647 pci_set_power_state(pdev, PCI_D3hot);
15649 err_out_free_res:
15650 pci_release_regions(pdev);
15652 err_out_disable_pdev:
15653 pci_disable_device(pdev);
15654 pci_set_drvdata(pdev, NULL);
15655 return err;
15658 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15660 struct net_device *dev = pci_get_drvdata(pdev);
15662 if (dev) {
15663 struct tg3 *tp = netdev_priv(dev);
15665 if (tp->fw)
15666 release_firmware(tp->fw);
15668 cancel_work_sync(&tp->reset_task);
15670 if (tg3_flag(tp, USE_PHYLIB)) {
15671 tg3_phy_fini(tp);
15672 tg3_mdio_fini(tp);
15675 unregister_netdev(dev);
15676 if (tp->aperegs) {
15677 iounmap(tp->aperegs);
15678 tp->aperegs = NULL;
15680 if (tp->regs) {
15681 iounmap(tp->regs);
15682 tp->regs = NULL;
15684 free_netdev(dev);
15685 pci_release_regions(pdev);
15686 pci_disable_device(pdev);
15687 pci_set_drvdata(pdev, NULL);
15691 #ifdef CONFIG_PM_SLEEP
15692 static int tg3_suspend(struct device *device)
15694 struct pci_dev *pdev = to_pci_dev(device);
15695 struct net_device *dev = pci_get_drvdata(pdev);
15696 struct tg3 *tp = netdev_priv(dev);
15697 int err;
15699 if (!netif_running(dev))
15700 return 0;
15702 flush_work_sync(&tp->reset_task);
15703 tg3_phy_stop(tp);
15704 tg3_netif_stop(tp);
15706 del_timer_sync(&tp->timer);
15708 tg3_full_lock(tp, 1);
15709 tg3_disable_ints(tp);
15710 tg3_full_unlock(tp);
15712 netif_device_detach(dev);
15714 tg3_full_lock(tp, 0);
15715 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15716 tg3_flag_clear(tp, INIT_COMPLETE);
15717 tg3_full_unlock(tp);
15719 err = tg3_power_down_prepare(tp);
15720 if (err) {
15721 int err2;
15723 tg3_full_lock(tp, 0);
15725 tg3_flag_set(tp, INIT_COMPLETE);
15726 err2 = tg3_restart_hw(tp, 1);
15727 if (err2)
15728 goto out;
15730 tp->timer.expires = jiffies + tp->timer_offset;
15731 add_timer(&tp->timer);
15733 netif_device_attach(dev);
15734 tg3_netif_start(tp);
15736 out:
15737 tg3_full_unlock(tp);
15739 if (!err2)
15740 tg3_phy_start(tp);
15743 return err;
15746 static int tg3_resume(struct device *device)
15748 struct pci_dev *pdev = to_pci_dev(device);
15749 struct net_device *dev = pci_get_drvdata(pdev);
15750 struct tg3 *tp = netdev_priv(dev);
15751 int err;
15753 if (!netif_running(dev))
15754 return 0;
15756 netif_device_attach(dev);
15758 tg3_full_lock(tp, 0);
15760 tg3_flag_set(tp, INIT_COMPLETE);
15761 err = tg3_restart_hw(tp, 1);
15762 if (err)
15763 goto out;
15765 tp->timer.expires = jiffies + tp->timer_offset;
15766 add_timer(&tp->timer);
15768 tg3_netif_start(tp);
15770 out:
15771 tg3_full_unlock(tp);
15773 if (!err)
15774 tg3_phy_start(tp);
15776 return err;
15779 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15780 #define TG3_PM_OPS (&tg3_pm_ops)
15782 #else
15784 #define TG3_PM_OPS NULL
15786 #endif /* CONFIG_PM_SLEEP */
15789 * tg3_io_error_detected - called when PCI error is detected
15790 * @pdev: Pointer to PCI device
15791 * @state: The current pci connection state
15793 * This function is called after a PCI bus error affecting
15794 * this device has been detected.
15796 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15797 pci_channel_state_t state)
15799 struct net_device *netdev = pci_get_drvdata(pdev);
15800 struct tg3 *tp = netdev_priv(netdev);
15801 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15803 netdev_info(netdev, "PCI I/O error detected\n");
15805 rtnl_lock();
15807 if (!netif_running(netdev))
15808 goto done;
15810 tg3_phy_stop(tp);
15812 tg3_netif_stop(tp);
15814 del_timer_sync(&tp->timer);
15815 tg3_flag_clear(tp, RESTART_TIMER);
15817 /* Want to make sure that the reset task doesn't run */
15818 cancel_work_sync(&tp->reset_task);
15819 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15820 tg3_flag_clear(tp, RESTART_TIMER);
15822 netif_device_detach(netdev);
15824 /* Clean up software state, even if MMIO is blocked */
15825 tg3_full_lock(tp, 0);
15826 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15827 tg3_full_unlock(tp);
15829 done:
15830 if (state == pci_channel_io_perm_failure)
15831 err = PCI_ERS_RESULT_DISCONNECT;
15832 else
15833 pci_disable_device(pdev);
15835 rtnl_unlock();
15837 return err;
15841 * tg3_io_slot_reset - called after the pci bus has been reset.
15842 * @pdev: Pointer to PCI device
15844 * Restart the card from scratch, as if from a cold-boot.
15845 * At this point, the card has exprienced a hard reset,
15846 * followed by fixups by BIOS, and has its config space
15847 * set up identically to what it was at cold boot.
15849 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15851 struct net_device *netdev = pci_get_drvdata(pdev);
15852 struct tg3 *tp = netdev_priv(netdev);
15853 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15854 int err;
15856 rtnl_lock();
15858 if (pci_enable_device(pdev)) {
15859 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15860 goto done;
15863 pci_set_master(pdev);
15864 pci_restore_state(pdev);
15865 pci_save_state(pdev);
15867 if (!netif_running(netdev)) {
15868 rc = PCI_ERS_RESULT_RECOVERED;
15869 goto done;
15872 err = tg3_power_up(tp);
15873 if (err)
15874 goto done;
15876 rc = PCI_ERS_RESULT_RECOVERED;
15878 done:
15879 rtnl_unlock();
15881 return rc;
15885 * tg3_io_resume - called when traffic can start flowing again.
15886 * @pdev: Pointer to PCI device
15888 * This callback is called when the error recovery driver tells
15889 * us that its OK to resume normal operation.
15891 static void tg3_io_resume(struct pci_dev *pdev)
15893 struct net_device *netdev = pci_get_drvdata(pdev);
15894 struct tg3 *tp = netdev_priv(netdev);
15895 int err;
15897 rtnl_lock();
15899 if (!netif_running(netdev))
15900 goto done;
15902 tg3_full_lock(tp, 0);
15903 tg3_flag_set(tp, INIT_COMPLETE);
15904 err = tg3_restart_hw(tp, 1);
15905 tg3_full_unlock(tp);
15906 if (err) {
15907 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15908 goto done;
15911 netif_device_attach(netdev);
15913 tp->timer.expires = jiffies + tp->timer_offset;
15914 add_timer(&tp->timer);
15916 tg3_netif_start(tp);
15918 tg3_phy_start(tp);
15920 done:
15921 rtnl_unlock();
15924 static struct pci_error_handlers tg3_err_handler = {
15925 .error_detected = tg3_io_error_detected,
15926 .slot_reset = tg3_io_slot_reset,
15927 .resume = tg3_io_resume
15930 static struct pci_driver tg3_driver = {
15931 .name = DRV_MODULE_NAME,
15932 .id_table = tg3_pci_tbl,
15933 .probe = tg3_init_one,
15934 .remove = __devexit_p(tg3_remove_one),
15935 .err_handler = &tg3_err_handler,
15936 .driver.pm = TG3_PM_OPS,
15939 static int __init tg3_init(void)
15941 return pci_register_driver(&tg3_driver);
15944 static void __exit tg3_cleanup(void)
15946 pci_unregister_driver(&tg3_driver);
15949 module_init(tg3_init);
15950 module_exit(tg3_cleanup);