Linux 3.4.102
[linux/fpc-iii.git] / drivers / net / ethernet / broadcom / tg3.c
blobcbc6a62084e1f530d88b6ff70c9f3087c26ffa6e
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2012 Broadcom Corporation.
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
48 #include <net/checksum.h>
49 #include <net/ip.h>
51 #include <linux/io.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
55 #ifdef CONFIG_SPARC
56 #include <asm/idprom.h>
57 #include <asm/prom.h>
58 #endif
60 #define BAR_0 0
61 #define BAR_2 2
63 #include "tg3.h"
65 /* Functions & macros to verify TG3_FLAGS types */
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 return test_bit(flag, bits);
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 set_bit(flag, bits);
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 clear_bit(flag, bits);
82 #define tg3_flag(tp, flag) \
83 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag) \
85 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag) \
87 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89 #define DRV_MODULE_NAME "tg3"
90 #define TG3_MAJ_NUM 3
91 #define TG3_MIN_NUM 123
92 #define DRV_MODULE_VERSION \
93 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE "March 21, 2012"
96 #define RESET_KIND_SHUTDOWN 0
97 #define RESET_KIND_INIT 1
98 #define RESET_KIND_SUSPEND 2
100 #define TG3_DEF_RX_MODE 0
101 #define TG3_DEF_TX_MODE 0
102 #define TG3_DEF_MSG_ENABLE \
103 (NETIF_MSG_DRV | \
104 NETIF_MSG_PROBE | \
105 NETIF_MSG_LINK | \
106 NETIF_MSG_TIMER | \
107 NETIF_MSG_IFDOWN | \
108 NETIF_MSG_IFUP | \
109 NETIF_MSG_RX_ERR | \
110 NETIF_MSG_TX_ERR)
112 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
114 /* length of time before we decide the hardware is borked,
115 * and dev->tx_timeout() should be called to fix the problem
118 #define TG3_TX_TIMEOUT (5 * HZ)
120 /* hardware minimum and maximum for a single frame's data payload */
121 #define TG3_MIN_MTU 60
122 #define TG3_MAX_MTU(tp) \
123 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
125 /* These numbers seem to be hard coded in the NIC firmware somehow.
126 * You can't change the ring sizes, but you can change where you place
127 * them in the NIC onboard memory.
129 #define TG3_RX_STD_RING_SIZE(tp) \
130 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
131 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
132 #define TG3_DEF_RX_RING_PENDING 200
133 #define TG3_RX_JMB_RING_SIZE(tp) \
134 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
135 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
136 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
138 /* Do not place this n-ring entries value into the tp struct itself,
139 * we really want to expose these constants to GCC so that modulo et
140 * al. operations are done with shifts and masks instead of with
141 * hw multiply/modulo instructions. Another solution would be to
142 * replace things like '% foo' with '& (foo - 1)'.
145 #define TG3_TX_RING_SIZE 512
146 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
148 #define TG3_RX_STD_RING_BYTES(tp) \
149 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
150 #define TG3_RX_JMB_RING_BYTES(tp) \
151 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
152 #define TG3_RX_RCB_RING_BYTES(tp) \
153 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
154 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
155 TG3_TX_RING_SIZE)
156 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
158 #define TG3_DMA_BYTE_ENAB 64
160 #define TG3_RX_STD_DMA_SZ 1536
161 #define TG3_RX_JMB_DMA_SZ 9046
163 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
165 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
166 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
168 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
169 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
171 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
172 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
174 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
175 * that are at least dword aligned when used in PCIX mode. The driver
176 * works around this bug by double copying the packet. This workaround
177 * is built into the normal double copy length check for efficiency.
179 * However, the double copy is only necessary on those architectures
180 * where unaligned memory accesses are inefficient. For those architectures
181 * where unaligned memory accesses incur little penalty, we can reintegrate
182 * the 5701 in the normal rx path. Doing so saves a device structure
183 * dereference by hardcoding the double copy threshold in place.
185 #define TG3_RX_COPY_THRESHOLD 256
186 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
187 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
188 #else
189 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
190 #endif
192 #if (NET_IP_ALIGN != 0)
193 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
194 #else
195 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
196 #endif
198 /* minimum number of free TX descriptors required to wake up TX process */
199 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
200 #define TG3_TX_BD_DMA_MAX_2K 2048
201 #define TG3_TX_BD_DMA_MAX_4K 4096
203 #define TG3_RAW_IP_ALIGN 2
205 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
206 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
208 #define FIRMWARE_TG3 "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
212 static char version[] __devinitdata =
213 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
223 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
302 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
303 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
304 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
305 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
306 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
307 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
308 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
309 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
313 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
315 static const struct {
316 const char string[ETH_GSTRING_LEN];
317 } ethtool_stats_keys[] = {
318 { "rx_octets" },
319 { "rx_fragments" },
320 { "rx_ucast_packets" },
321 { "rx_mcast_packets" },
322 { "rx_bcast_packets" },
323 { "rx_fcs_errors" },
324 { "rx_align_errors" },
325 { "rx_xon_pause_rcvd" },
326 { "rx_xoff_pause_rcvd" },
327 { "rx_mac_ctrl_rcvd" },
328 { "rx_xoff_entered" },
329 { "rx_frame_too_long_errors" },
330 { "rx_jabbers" },
331 { "rx_undersize_packets" },
332 { "rx_in_length_errors" },
333 { "rx_out_length_errors" },
334 { "rx_64_or_less_octet_packets" },
335 { "rx_65_to_127_octet_packets" },
336 { "rx_128_to_255_octet_packets" },
337 { "rx_256_to_511_octet_packets" },
338 { "rx_512_to_1023_octet_packets" },
339 { "rx_1024_to_1522_octet_packets" },
340 { "rx_1523_to_2047_octet_packets" },
341 { "rx_2048_to_4095_octet_packets" },
342 { "rx_4096_to_8191_octet_packets" },
343 { "rx_8192_to_9022_octet_packets" },
345 { "tx_octets" },
346 { "tx_collisions" },
348 { "tx_xon_sent" },
349 { "tx_xoff_sent" },
350 { "tx_flow_control" },
351 { "tx_mac_errors" },
352 { "tx_single_collisions" },
353 { "tx_mult_collisions" },
354 { "tx_deferred" },
355 { "tx_excessive_collisions" },
356 { "tx_late_collisions" },
357 { "tx_collide_2times" },
358 { "tx_collide_3times" },
359 { "tx_collide_4times" },
360 { "tx_collide_5times" },
361 { "tx_collide_6times" },
362 { "tx_collide_7times" },
363 { "tx_collide_8times" },
364 { "tx_collide_9times" },
365 { "tx_collide_10times" },
366 { "tx_collide_11times" },
367 { "tx_collide_12times" },
368 { "tx_collide_13times" },
369 { "tx_collide_14times" },
370 { "tx_collide_15times" },
371 { "tx_ucast_packets" },
372 { "tx_mcast_packets" },
373 { "tx_bcast_packets" },
374 { "tx_carrier_sense_errors" },
375 { "tx_discards" },
376 { "tx_errors" },
378 { "dma_writeq_full" },
379 { "dma_write_prioq_full" },
380 { "rxbds_empty" },
381 { "rx_discards" },
382 { "rx_errors" },
383 { "rx_threshold_hit" },
385 { "dma_readq_full" },
386 { "dma_read_prioq_full" },
387 { "tx_comp_queue_full" },
389 { "ring_set_send_prod_index" },
390 { "ring_status_update" },
391 { "nic_irqs" },
392 { "nic_avoided_irqs" },
393 { "nic_tx_threshold_hit" },
395 { "mbuf_lwm_thresh_hit" },
398 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
401 static const struct {
402 const char string[ETH_GSTRING_LEN];
403 } ethtool_test_keys[] = {
404 { "nvram test (online) " },
405 { "link test (online) " },
406 { "register test (offline)" },
407 { "memory test (offline)" },
408 { "mac loopback test (offline)" },
409 { "phy loopback test (offline)" },
410 { "ext loopback test (offline)" },
411 { "interrupt test (offline)" },
414 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
417 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
419 writel(val, tp->regs + off);
422 static u32 tg3_read32(struct tg3 *tp, u32 off)
424 return readl(tp->regs + off);
427 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
429 writel(val, tp->aperegs + off);
432 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
434 return readl(tp->aperegs + off);
437 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
439 unsigned long flags;
441 spin_lock_irqsave(&tp->indirect_lock, flags);
442 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
443 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
444 spin_unlock_irqrestore(&tp->indirect_lock, flags);
447 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
449 writel(val, tp->regs + off);
450 readl(tp->regs + off);
453 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
455 unsigned long flags;
456 u32 val;
458 spin_lock_irqsave(&tp->indirect_lock, flags);
459 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
460 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
461 spin_unlock_irqrestore(&tp->indirect_lock, flags);
462 return val;
465 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
467 unsigned long flags;
469 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
470 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
471 TG3_64BIT_REG_LOW, val);
472 return;
474 if (off == TG3_RX_STD_PROD_IDX_REG) {
475 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
476 TG3_64BIT_REG_LOW, val);
477 return;
480 spin_lock_irqsave(&tp->indirect_lock, flags);
481 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
482 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
483 spin_unlock_irqrestore(&tp->indirect_lock, flags);
485 /* In indirect mode when disabling interrupts, we also need
486 * to clear the interrupt bit in the GRC local ctrl register.
488 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
489 (val == 0x1)) {
490 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
491 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
495 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
497 unsigned long flags;
498 u32 val;
500 spin_lock_irqsave(&tp->indirect_lock, flags);
501 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
502 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
503 spin_unlock_irqrestore(&tp->indirect_lock, flags);
504 return val;
507 /* usec_wait specifies the wait time in usec when writing to certain registers
508 * where it is unsafe to read back the register without some delay.
509 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
510 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
512 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
514 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
515 /* Non-posted methods */
516 tp->write32(tp, off, val);
517 else {
518 /* Posted method */
519 tg3_write32(tp, off, val);
520 if (usec_wait)
521 udelay(usec_wait);
522 tp->read32(tp, off);
524 /* Wait again after the read for the posted method to guarantee that
525 * the wait time is met.
527 if (usec_wait)
528 udelay(usec_wait);
531 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
533 tp->write32_mbox(tp, off, val);
534 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
535 tp->read32_mbox(tp, off);
538 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
540 void __iomem *mbox = tp->regs + off;
541 writel(val, mbox);
542 if (tg3_flag(tp, TXD_MBOX_HWBUG))
543 writel(val, mbox);
544 if (tg3_flag(tp, MBOX_WRITE_REORDER))
545 readl(mbox);
548 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
550 return readl(tp->regs + off + GRCMBOX_BASE);
553 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
555 writel(val, tp->regs + off + GRCMBOX_BASE);
558 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
559 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
560 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
561 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
562 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
564 #define tw32(reg, val) tp->write32(tp, reg, val)
565 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
566 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
567 #define tr32(reg) tp->read32(tp, reg)
569 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
571 unsigned long flags;
573 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
574 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
575 return;
577 spin_lock_irqsave(&tp->indirect_lock, flags);
578 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
579 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
580 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
582 /* Always leave this as zero. */
583 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
584 } else {
585 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
586 tw32_f(TG3PCI_MEM_WIN_DATA, val);
588 /* Always leave this as zero. */
589 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
591 spin_unlock_irqrestore(&tp->indirect_lock, flags);
594 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
596 unsigned long flags;
598 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
599 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
600 *val = 0;
601 return;
604 spin_lock_irqsave(&tp->indirect_lock, flags);
605 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
606 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
607 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
609 /* Always leave this as zero. */
610 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
611 } else {
612 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
613 *val = tr32(TG3PCI_MEM_WIN_DATA);
615 /* Always leave this as zero. */
616 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
618 spin_unlock_irqrestore(&tp->indirect_lock, flags);
621 static void tg3_ape_lock_init(struct tg3 *tp)
623 int i;
624 u32 regbase, bit;
626 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
627 regbase = TG3_APE_LOCK_GRANT;
628 else
629 regbase = TG3_APE_PER_LOCK_GRANT;
631 /* Make sure the driver hasn't any stale locks. */
632 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
633 switch (i) {
634 case TG3_APE_LOCK_PHY0:
635 case TG3_APE_LOCK_PHY1:
636 case TG3_APE_LOCK_PHY2:
637 case TG3_APE_LOCK_PHY3:
638 bit = APE_LOCK_GRANT_DRIVER;
639 break;
640 default:
641 if (!tp->pci_fn)
642 bit = APE_LOCK_GRANT_DRIVER;
643 else
644 bit = 1 << tp->pci_fn;
646 tg3_ape_write32(tp, regbase + 4 * i, bit);
651 static int tg3_ape_lock(struct tg3 *tp, int locknum)
653 int i, off;
654 int ret = 0;
655 u32 status, req, gnt, bit;
657 if (!tg3_flag(tp, ENABLE_APE))
658 return 0;
660 switch (locknum) {
661 case TG3_APE_LOCK_GPIO:
662 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
663 return 0;
664 case TG3_APE_LOCK_GRC:
665 case TG3_APE_LOCK_MEM:
666 if (!tp->pci_fn)
667 bit = APE_LOCK_REQ_DRIVER;
668 else
669 bit = 1 << tp->pci_fn;
670 break;
671 default:
672 return -EINVAL;
675 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
676 req = TG3_APE_LOCK_REQ;
677 gnt = TG3_APE_LOCK_GRANT;
678 } else {
679 req = TG3_APE_PER_LOCK_REQ;
680 gnt = TG3_APE_PER_LOCK_GRANT;
683 off = 4 * locknum;
685 tg3_ape_write32(tp, req + off, bit);
687 /* Wait for up to 1 millisecond to acquire lock. */
688 for (i = 0; i < 100; i++) {
689 status = tg3_ape_read32(tp, gnt + off);
690 if (status == bit)
691 break;
692 udelay(10);
695 if (status != bit) {
696 /* Revoke the lock request. */
697 tg3_ape_write32(tp, gnt + off, bit);
698 ret = -EBUSY;
701 return ret;
704 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
706 u32 gnt, bit;
708 if (!tg3_flag(tp, ENABLE_APE))
709 return;
711 switch (locknum) {
712 case TG3_APE_LOCK_GPIO:
713 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
714 return;
715 case TG3_APE_LOCK_GRC:
716 case TG3_APE_LOCK_MEM:
717 if (!tp->pci_fn)
718 bit = APE_LOCK_GRANT_DRIVER;
719 else
720 bit = 1 << tp->pci_fn;
721 break;
722 default:
723 return;
726 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
727 gnt = TG3_APE_LOCK_GRANT;
728 else
729 gnt = TG3_APE_PER_LOCK_GRANT;
731 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
734 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
736 int i;
737 u32 apedata;
739 /* NCSI does not support APE events */
740 if (tg3_flag(tp, APE_HAS_NCSI))
741 return;
743 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
744 if (apedata != APE_SEG_SIG_MAGIC)
745 return;
747 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
748 if (!(apedata & APE_FW_STATUS_READY))
749 return;
751 /* Wait for up to 1 millisecond for APE to service previous event. */
752 for (i = 0; i < 10; i++) {
753 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
754 return;
756 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
758 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
759 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
760 event | APE_EVENT_STATUS_EVENT_PENDING);
762 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
764 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
765 break;
767 udelay(100);
770 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
771 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
774 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
776 u32 event;
777 u32 apedata;
779 if (!tg3_flag(tp, ENABLE_APE))
780 return;
782 switch (kind) {
783 case RESET_KIND_INIT:
784 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
785 APE_HOST_SEG_SIG_MAGIC);
786 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
787 APE_HOST_SEG_LEN_MAGIC);
788 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
789 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
790 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
791 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
792 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
793 APE_HOST_BEHAV_NO_PHYLOCK);
794 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
795 TG3_APE_HOST_DRVR_STATE_START);
797 event = APE_EVENT_STATUS_STATE_START;
798 break;
799 case RESET_KIND_SHUTDOWN:
800 /* With the interface we are currently using,
801 * APE does not track driver state. Wiping
802 * out the HOST SEGMENT SIGNATURE forces
803 * the APE to assume OS absent status.
805 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
807 if (device_may_wakeup(&tp->pdev->dev) &&
808 tg3_flag(tp, WOL_ENABLE)) {
809 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
810 TG3_APE_HOST_WOL_SPEED_AUTO);
811 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
812 } else
813 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
815 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
817 event = APE_EVENT_STATUS_STATE_UNLOAD;
818 break;
819 case RESET_KIND_SUSPEND:
820 event = APE_EVENT_STATUS_STATE_SUSPEND;
821 break;
822 default:
823 return;
826 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
828 tg3_ape_send_event(tp, event);
831 static void tg3_disable_ints(struct tg3 *tp)
833 int i;
835 tw32(TG3PCI_MISC_HOST_CTRL,
836 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
837 for (i = 0; i < tp->irq_max; i++)
838 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
841 static void tg3_enable_ints(struct tg3 *tp)
843 int i;
845 tp->irq_sync = 0;
846 wmb();
848 tw32(TG3PCI_MISC_HOST_CTRL,
849 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
851 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
852 for (i = 0; i < tp->irq_cnt; i++) {
853 struct tg3_napi *tnapi = &tp->napi[i];
855 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
856 if (tg3_flag(tp, 1SHOT_MSI))
857 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
859 tp->coal_now |= tnapi->coal_now;
862 /* Force an initial interrupt */
863 if (!tg3_flag(tp, TAGGED_STATUS) &&
864 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
865 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
866 else
867 tw32(HOSTCC_MODE, tp->coal_now);
869 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
872 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
874 struct tg3 *tp = tnapi->tp;
875 struct tg3_hw_status *sblk = tnapi->hw_status;
876 unsigned int work_exists = 0;
878 /* check for phy events */
879 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
880 if (sblk->status & SD_STATUS_LINK_CHG)
881 work_exists = 1;
884 /* check for TX work to do */
885 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
886 work_exists = 1;
888 /* check for RX work to do */
889 if (tnapi->rx_rcb_prod_idx &&
890 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
891 work_exists = 1;
893 return work_exists;
896 /* tg3_int_reenable
897 * similar to tg3_enable_ints, but it accurately determines whether there
898 * is new work pending and can return without flushing the PIO write
899 * which reenables interrupts
901 static void tg3_int_reenable(struct tg3_napi *tnapi)
903 struct tg3 *tp = tnapi->tp;
905 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
906 mmiowb();
908 /* When doing tagged status, this work check is unnecessary.
909 * The last_tag we write above tells the chip which piece of
910 * work we've completed.
912 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
913 tw32(HOSTCC_MODE, tp->coalesce_mode |
914 HOSTCC_MODE_ENABLE | tnapi->coal_now);
917 static void tg3_switch_clocks(struct tg3 *tp)
919 u32 clock_ctrl;
920 u32 orig_clock_ctrl;
922 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
923 return;
925 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
927 orig_clock_ctrl = clock_ctrl;
928 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
929 CLOCK_CTRL_CLKRUN_OENABLE |
930 0x1f);
931 tp->pci_clock_ctrl = clock_ctrl;
933 if (tg3_flag(tp, 5705_PLUS)) {
934 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
935 tw32_wait_f(TG3PCI_CLOCK_CTRL,
936 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
938 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
939 tw32_wait_f(TG3PCI_CLOCK_CTRL,
940 clock_ctrl |
941 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
942 40);
943 tw32_wait_f(TG3PCI_CLOCK_CTRL,
944 clock_ctrl | (CLOCK_CTRL_ALTCLK),
945 40);
947 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
950 #define PHY_BUSY_LOOPS 5000
952 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
954 u32 frame_val;
955 unsigned int loops;
956 int ret;
958 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
959 tw32_f(MAC_MI_MODE,
960 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
961 udelay(80);
964 *val = 0x0;
966 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
967 MI_COM_PHY_ADDR_MASK);
968 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
969 MI_COM_REG_ADDR_MASK);
970 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
972 tw32_f(MAC_MI_COM, frame_val);
974 loops = PHY_BUSY_LOOPS;
975 while (loops != 0) {
976 udelay(10);
977 frame_val = tr32(MAC_MI_COM);
979 if ((frame_val & MI_COM_BUSY) == 0) {
980 udelay(5);
981 frame_val = tr32(MAC_MI_COM);
982 break;
984 loops -= 1;
987 ret = -EBUSY;
988 if (loops != 0) {
989 *val = frame_val & MI_COM_DATA_MASK;
990 ret = 0;
993 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
994 tw32_f(MAC_MI_MODE, tp->mi_mode);
995 udelay(80);
998 return ret;
1001 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1003 u32 frame_val;
1004 unsigned int loops;
1005 int ret;
1007 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1008 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1009 return 0;
1011 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1012 tw32_f(MAC_MI_MODE,
1013 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1014 udelay(80);
1017 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1018 MI_COM_PHY_ADDR_MASK);
1019 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1020 MI_COM_REG_ADDR_MASK);
1021 frame_val |= (val & MI_COM_DATA_MASK);
1022 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1024 tw32_f(MAC_MI_COM, frame_val);
1026 loops = PHY_BUSY_LOOPS;
1027 while (loops != 0) {
1028 udelay(10);
1029 frame_val = tr32(MAC_MI_COM);
1030 if ((frame_val & MI_COM_BUSY) == 0) {
1031 udelay(5);
1032 frame_val = tr32(MAC_MI_COM);
1033 break;
1035 loops -= 1;
1038 ret = -EBUSY;
1039 if (loops != 0)
1040 ret = 0;
1042 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1043 tw32_f(MAC_MI_MODE, tp->mi_mode);
1044 udelay(80);
1047 return ret;
1050 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1052 int err;
1054 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1055 if (err)
1056 goto done;
1058 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1059 if (err)
1060 goto done;
1062 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1063 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1064 if (err)
1065 goto done;
1067 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1069 done:
1070 return err;
1073 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1075 int err;
1077 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1078 if (err)
1079 goto done;
1081 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1082 if (err)
1083 goto done;
1085 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1086 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1087 if (err)
1088 goto done;
1090 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1092 done:
1093 return err;
1096 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1098 int err;
1100 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1101 if (!err)
1102 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1104 return err;
1107 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1109 int err;
1111 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1112 if (!err)
1113 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1115 return err;
1118 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1120 int err;
1122 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1123 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1124 MII_TG3_AUXCTL_SHDWSEL_MISC);
1125 if (!err)
1126 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1128 return err;
1131 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1133 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1134 set |= MII_TG3_AUXCTL_MISC_WREN;
1136 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1139 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1141 u32 val;
1142 int err;
1144 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1146 if (err)
1147 return err;
1148 if (enable)
1150 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1151 else
1152 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1154 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1155 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1157 return err;
1160 static int tg3_bmcr_reset(struct tg3 *tp)
1162 u32 phy_control;
1163 int limit, err;
1165 /* OK, reset it, and poll the BMCR_RESET bit until it
1166 * clears or we time out.
1168 phy_control = BMCR_RESET;
1169 err = tg3_writephy(tp, MII_BMCR, phy_control);
1170 if (err != 0)
1171 return -EBUSY;
1173 limit = 5000;
1174 while (limit--) {
1175 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1176 if (err != 0)
1177 return -EBUSY;
1179 if ((phy_control & BMCR_RESET) == 0) {
1180 udelay(40);
1181 break;
1183 udelay(10);
1185 if (limit < 0)
1186 return -EBUSY;
1188 return 0;
1191 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1193 struct tg3 *tp = bp->priv;
1194 u32 val;
1196 spin_lock_bh(&tp->lock);
1198 if (tg3_readphy(tp, reg, &val))
1199 val = -EIO;
1201 spin_unlock_bh(&tp->lock);
1203 return val;
1206 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1208 struct tg3 *tp = bp->priv;
1209 u32 ret = 0;
1211 spin_lock_bh(&tp->lock);
1213 if (tg3_writephy(tp, reg, val))
1214 ret = -EIO;
1216 spin_unlock_bh(&tp->lock);
1218 return ret;
1221 static int tg3_mdio_reset(struct mii_bus *bp)
1223 return 0;
1226 static void tg3_mdio_config_5785(struct tg3 *tp)
1228 u32 val;
1229 struct phy_device *phydev;
1231 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1232 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1233 case PHY_ID_BCM50610:
1234 case PHY_ID_BCM50610M:
1235 val = MAC_PHYCFG2_50610_LED_MODES;
1236 break;
1237 case PHY_ID_BCMAC131:
1238 val = MAC_PHYCFG2_AC131_LED_MODES;
1239 break;
1240 case PHY_ID_RTL8211C:
1241 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1242 break;
1243 case PHY_ID_RTL8201E:
1244 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1245 break;
1246 default:
1247 return;
1250 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1251 tw32(MAC_PHYCFG2, val);
1253 val = tr32(MAC_PHYCFG1);
1254 val &= ~(MAC_PHYCFG1_RGMII_INT |
1255 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1256 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1257 tw32(MAC_PHYCFG1, val);
1259 return;
1262 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1263 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1264 MAC_PHYCFG2_FMODE_MASK_MASK |
1265 MAC_PHYCFG2_GMODE_MASK_MASK |
1266 MAC_PHYCFG2_ACT_MASK_MASK |
1267 MAC_PHYCFG2_QUAL_MASK_MASK |
1268 MAC_PHYCFG2_INBAND_ENABLE;
1270 tw32(MAC_PHYCFG2, val);
1272 val = tr32(MAC_PHYCFG1);
1273 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1274 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1275 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1276 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1277 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1278 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1279 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1281 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1282 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1283 tw32(MAC_PHYCFG1, val);
1285 val = tr32(MAC_EXT_RGMII_MODE);
1286 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1287 MAC_RGMII_MODE_RX_QUALITY |
1288 MAC_RGMII_MODE_RX_ACTIVITY |
1289 MAC_RGMII_MODE_RX_ENG_DET |
1290 MAC_RGMII_MODE_TX_ENABLE |
1291 MAC_RGMII_MODE_TX_LOWPWR |
1292 MAC_RGMII_MODE_TX_RESET);
1293 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1294 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1295 val |= MAC_RGMII_MODE_RX_INT_B |
1296 MAC_RGMII_MODE_RX_QUALITY |
1297 MAC_RGMII_MODE_RX_ACTIVITY |
1298 MAC_RGMII_MODE_RX_ENG_DET;
1299 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1300 val |= MAC_RGMII_MODE_TX_ENABLE |
1301 MAC_RGMII_MODE_TX_LOWPWR |
1302 MAC_RGMII_MODE_TX_RESET;
1304 tw32(MAC_EXT_RGMII_MODE, val);
1307 static void tg3_mdio_start(struct tg3 *tp)
1309 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1310 tw32_f(MAC_MI_MODE, tp->mi_mode);
1311 udelay(80);
1313 if (tg3_flag(tp, MDIOBUS_INITED) &&
1314 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1315 tg3_mdio_config_5785(tp);
1318 static int tg3_mdio_init(struct tg3 *tp)
1320 int i;
1321 u32 reg;
1322 struct phy_device *phydev;
1324 if (tg3_flag(tp, 5717_PLUS)) {
1325 u32 is_serdes;
1327 tp->phy_addr = tp->pci_fn + 1;
1329 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1330 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1331 else
1332 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1333 TG3_CPMU_PHY_STRAP_IS_SERDES;
1334 if (is_serdes)
1335 tp->phy_addr += 7;
1336 } else
1337 tp->phy_addr = TG3_PHY_MII_ADDR;
1339 tg3_mdio_start(tp);
1341 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1342 return 0;
1344 tp->mdio_bus = mdiobus_alloc();
1345 if (tp->mdio_bus == NULL)
1346 return -ENOMEM;
1348 tp->mdio_bus->name = "tg3 mdio bus";
1349 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1350 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1351 tp->mdio_bus->priv = tp;
1352 tp->mdio_bus->parent = &tp->pdev->dev;
1353 tp->mdio_bus->read = &tg3_mdio_read;
1354 tp->mdio_bus->write = &tg3_mdio_write;
1355 tp->mdio_bus->reset = &tg3_mdio_reset;
1356 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1357 tp->mdio_bus->irq = &tp->mdio_irq[0];
1359 for (i = 0; i < PHY_MAX_ADDR; i++)
1360 tp->mdio_bus->irq[i] = PHY_POLL;
1362 /* The bus registration will look for all the PHYs on the mdio bus.
1363 * Unfortunately, it does not ensure the PHY is powered up before
1364 * accessing the PHY ID registers. A chip reset is the
1365 * quickest way to bring the device back to an operational state..
1367 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1368 tg3_bmcr_reset(tp);
1370 i = mdiobus_register(tp->mdio_bus);
1371 if (i) {
1372 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1373 mdiobus_free(tp->mdio_bus);
1374 return i;
1377 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1379 if (!phydev || !phydev->drv) {
1380 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1381 mdiobus_unregister(tp->mdio_bus);
1382 mdiobus_free(tp->mdio_bus);
1383 return -ENODEV;
1386 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1387 case PHY_ID_BCM57780:
1388 phydev->interface = PHY_INTERFACE_MODE_GMII;
1389 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1390 break;
1391 case PHY_ID_BCM50610:
1392 case PHY_ID_BCM50610M:
1393 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1394 PHY_BRCM_RX_REFCLK_UNUSED |
1395 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1396 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1397 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1398 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1399 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1400 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1401 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1402 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1403 /* fallthru */
1404 case PHY_ID_RTL8211C:
1405 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1406 break;
1407 case PHY_ID_RTL8201E:
1408 case PHY_ID_BCMAC131:
1409 phydev->interface = PHY_INTERFACE_MODE_MII;
1410 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1411 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1412 break;
1415 tg3_flag_set(tp, MDIOBUS_INITED);
1417 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1418 tg3_mdio_config_5785(tp);
1420 return 0;
1423 static void tg3_mdio_fini(struct tg3 *tp)
1425 if (tg3_flag(tp, MDIOBUS_INITED)) {
1426 tg3_flag_clear(tp, MDIOBUS_INITED);
1427 mdiobus_unregister(tp->mdio_bus);
1428 mdiobus_free(tp->mdio_bus);
1432 /* tp->lock is held. */
1433 static inline void tg3_generate_fw_event(struct tg3 *tp)
1435 u32 val;
1437 val = tr32(GRC_RX_CPU_EVENT);
1438 val |= GRC_RX_CPU_DRIVER_EVENT;
1439 tw32_f(GRC_RX_CPU_EVENT, val);
1441 tp->last_event_jiffies = jiffies;
1444 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1446 /* tp->lock is held. */
1447 static void tg3_wait_for_event_ack(struct tg3 *tp)
1449 int i;
1450 unsigned int delay_cnt;
1451 long time_remain;
1453 /* If enough time has passed, no wait is necessary. */
1454 time_remain = (long)(tp->last_event_jiffies + 1 +
1455 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1456 (long)jiffies;
1457 if (time_remain < 0)
1458 return;
1460 /* Check if we can shorten the wait time. */
1461 delay_cnt = jiffies_to_usecs(time_remain);
1462 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1463 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1464 delay_cnt = (delay_cnt >> 3) + 1;
1466 for (i = 0; i < delay_cnt; i++) {
1467 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1468 break;
1469 udelay(8);
1473 /* tp->lock is held. */
1474 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1476 u32 reg, val;
1478 val = 0;
1479 if (!tg3_readphy(tp, MII_BMCR, &reg))
1480 val = reg << 16;
1481 if (!tg3_readphy(tp, MII_BMSR, &reg))
1482 val |= (reg & 0xffff);
1483 *data++ = val;
1485 val = 0;
1486 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1487 val = reg << 16;
1488 if (!tg3_readphy(tp, MII_LPA, &reg))
1489 val |= (reg & 0xffff);
1490 *data++ = val;
1492 val = 0;
1493 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1494 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1495 val = reg << 16;
1496 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1497 val |= (reg & 0xffff);
1499 *data++ = val;
1501 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1502 val = reg << 16;
1503 else
1504 val = 0;
1505 *data++ = val;
1508 /* tp->lock is held. */
1509 static void tg3_ump_link_report(struct tg3 *tp)
1511 u32 data[4];
1513 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1514 return;
1516 tg3_phy_gather_ump_data(tp, data);
1518 tg3_wait_for_event_ack(tp);
1520 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1521 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1522 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1523 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1524 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1525 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1527 tg3_generate_fw_event(tp);
1530 /* tp->lock is held. */
1531 static void tg3_stop_fw(struct tg3 *tp)
1533 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1534 /* Wait for RX cpu to ACK the previous event. */
1535 tg3_wait_for_event_ack(tp);
1537 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1539 tg3_generate_fw_event(tp);
1541 /* Wait for RX cpu to ACK this event. */
1542 tg3_wait_for_event_ack(tp);
1546 /* tp->lock is held. */
1547 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1549 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1550 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1552 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1553 switch (kind) {
1554 case RESET_KIND_INIT:
1555 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1556 DRV_STATE_START);
1557 break;
1559 case RESET_KIND_SHUTDOWN:
1560 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1561 DRV_STATE_UNLOAD);
1562 break;
1564 case RESET_KIND_SUSPEND:
1565 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1566 DRV_STATE_SUSPEND);
1567 break;
1569 default:
1570 break;
1574 if (kind == RESET_KIND_INIT ||
1575 kind == RESET_KIND_SUSPEND)
1576 tg3_ape_driver_state_change(tp, kind);
1579 /* tp->lock is held. */
1580 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1582 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1583 switch (kind) {
1584 case RESET_KIND_INIT:
1585 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1586 DRV_STATE_START_DONE);
1587 break;
1589 case RESET_KIND_SHUTDOWN:
1590 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1591 DRV_STATE_UNLOAD_DONE);
1592 break;
1594 default:
1595 break;
1599 if (kind == RESET_KIND_SHUTDOWN)
1600 tg3_ape_driver_state_change(tp, kind);
1603 /* tp->lock is held. */
1604 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1606 if (tg3_flag(tp, ENABLE_ASF)) {
1607 switch (kind) {
1608 case RESET_KIND_INIT:
1609 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1610 DRV_STATE_START);
1611 break;
1613 case RESET_KIND_SHUTDOWN:
1614 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1615 DRV_STATE_UNLOAD);
1616 break;
1618 case RESET_KIND_SUSPEND:
1619 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1620 DRV_STATE_SUSPEND);
1621 break;
1623 default:
1624 break;
1629 static int tg3_poll_fw(struct tg3 *tp)
1631 int i;
1632 u32 val;
1634 if (tg3_flag(tp, NO_FWARE_REPORTED))
1635 return 0;
1637 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1638 /* Wait up to 20ms for init done. */
1639 for (i = 0; i < 200; i++) {
1640 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1641 return 0;
1642 udelay(100);
1644 return -ENODEV;
1647 /* Wait for firmware initialization to complete. */
1648 for (i = 0; i < 100000; i++) {
1649 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1650 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1651 break;
1652 udelay(10);
1655 /* Chip might not be fitted with firmware. Some Sun onboard
1656 * parts are configured like that. So don't signal the timeout
1657 * of the above loop as an error, but do report the lack of
1658 * running firmware once.
1660 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1661 tg3_flag_set(tp, NO_FWARE_REPORTED);
1663 netdev_info(tp->dev, "No firmware running\n");
1666 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1667 /* The 57765 A0 needs a little more
1668 * time to do some important work.
1670 mdelay(10);
1673 return 0;
1676 static void tg3_link_report(struct tg3 *tp)
1678 if (!netif_carrier_ok(tp->dev)) {
1679 netif_info(tp, link, tp->dev, "Link is down\n");
1680 tg3_ump_link_report(tp);
1681 } else if (netif_msg_link(tp)) {
1682 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1683 (tp->link_config.active_speed == SPEED_1000 ?
1684 1000 :
1685 (tp->link_config.active_speed == SPEED_100 ?
1686 100 : 10)),
1687 (tp->link_config.active_duplex == DUPLEX_FULL ?
1688 "full" : "half"));
1690 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1691 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1692 "on" : "off",
1693 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1694 "on" : "off");
1696 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1697 netdev_info(tp->dev, "EEE is %s\n",
1698 tp->setlpicnt ? "enabled" : "disabled");
1700 tg3_ump_link_report(tp);
1704 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1706 u16 miireg;
1708 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1709 miireg = ADVERTISE_1000XPAUSE;
1710 else if (flow_ctrl & FLOW_CTRL_TX)
1711 miireg = ADVERTISE_1000XPSE_ASYM;
1712 else if (flow_ctrl & FLOW_CTRL_RX)
1713 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1714 else
1715 miireg = 0;
1717 return miireg;
1720 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1722 u8 cap = 0;
1724 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1725 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1726 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1727 if (lcladv & ADVERTISE_1000XPAUSE)
1728 cap = FLOW_CTRL_RX;
1729 if (rmtadv & ADVERTISE_1000XPAUSE)
1730 cap = FLOW_CTRL_TX;
1733 return cap;
1736 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1738 u8 autoneg;
1739 u8 flowctrl = 0;
1740 u32 old_rx_mode = tp->rx_mode;
1741 u32 old_tx_mode = tp->tx_mode;
1743 if (tg3_flag(tp, USE_PHYLIB))
1744 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1745 else
1746 autoneg = tp->link_config.autoneg;
1748 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1749 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1750 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1751 else
1752 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1753 } else
1754 flowctrl = tp->link_config.flowctrl;
1756 tp->link_config.active_flowctrl = flowctrl;
1758 if (flowctrl & FLOW_CTRL_RX)
1759 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1760 else
1761 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1763 if (old_rx_mode != tp->rx_mode)
1764 tw32_f(MAC_RX_MODE, tp->rx_mode);
1766 if (flowctrl & FLOW_CTRL_TX)
1767 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1768 else
1769 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1771 if (old_tx_mode != tp->tx_mode)
1772 tw32_f(MAC_TX_MODE, tp->tx_mode);
1775 static void tg3_adjust_link(struct net_device *dev)
1777 u8 oldflowctrl, linkmesg = 0;
1778 u32 mac_mode, lcl_adv, rmt_adv;
1779 struct tg3 *tp = netdev_priv(dev);
1780 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1782 spin_lock_bh(&tp->lock);
1784 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1785 MAC_MODE_HALF_DUPLEX);
1787 oldflowctrl = tp->link_config.active_flowctrl;
1789 if (phydev->link) {
1790 lcl_adv = 0;
1791 rmt_adv = 0;
1793 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1794 mac_mode |= MAC_MODE_PORT_MODE_MII;
1795 else if (phydev->speed == SPEED_1000 ||
1796 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1797 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1798 else
1799 mac_mode |= MAC_MODE_PORT_MODE_MII;
1801 if (phydev->duplex == DUPLEX_HALF)
1802 mac_mode |= MAC_MODE_HALF_DUPLEX;
1803 else {
1804 lcl_adv = mii_advertise_flowctrl(
1805 tp->link_config.flowctrl);
1807 if (phydev->pause)
1808 rmt_adv = LPA_PAUSE_CAP;
1809 if (phydev->asym_pause)
1810 rmt_adv |= LPA_PAUSE_ASYM;
1813 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1814 } else
1815 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1817 if (mac_mode != tp->mac_mode) {
1818 tp->mac_mode = mac_mode;
1819 tw32_f(MAC_MODE, tp->mac_mode);
1820 udelay(40);
1823 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1824 if (phydev->speed == SPEED_10)
1825 tw32(MAC_MI_STAT,
1826 MAC_MI_STAT_10MBPS_MODE |
1827 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1828 else
1829 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1832 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1833 tw32(MAC_TX_LENGTHS,
1834 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1835 (6 << TX_LENGTHS_IPG_SHIFT) |
1836 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1837 else
1838 tw32(MAC_TX_LENGTHS,
1839 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1840 (6 << TX_LENGTHS_IPG_SHIFT) |
1841 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1843 if (phydev->link != tp->old_link ||
1844 phydev->speed != tp->link_config.active_speed ||
1845 phydev->duplex != tp->link_config.active_duplex ||
1846 oldflowctrl != tp->link_config.active_flowctrl)
1847 linkmesg = 1;
1849 tp->old_link = phydev->link;
1850 tp->link_config.active_speed = phydev->speed;
1851 tp->link_config.active_duplex = phydev->duplex;
1853 spin_unlock_bh(&tp->lock);
1855 if (linkmesg)
1856 tg3_link_report(tp);
1859 static int tg3_phy_init(struct tg3 *tp)
1861 struct phy_device *phydev;
1863 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1864 return 0;
1866 /* Bring the PHY back to a known state. */
1867 tg3_bmcr_reset(tp);
1869 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1871 /* Attach the MAC to the PHY. */
1872 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1873 phydev->dev_flags, phydev->interface);
1874 if (IS_ERR(phydev)) {
1875 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1876 return PTR_ERR(phydev);
1879 /* Mask with MAC supported features. */
1880 switch (phydev->interface) {
1881 case PHY_INTERFACE_MODE_GMII:
1882 case PHY_INTERFACE_MODE_RGMII:
1883 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1884 phydev->supported &= (PHY_GBIT_FEATURES |
1885 SUPPORTED_Pause |
1886 SUPPORTED_Asym_Pause);
1887 break;
1889 /* fallthru */
1890 case PHY_INTERFACE_MODE_MII:
1891 phydev->supported &= (PHY_BASIC_FEATURES |
1892 SUPPORTED_Pause |
1893 SUPPORTED_Asym_Pause);
1894 break;
1895 default:
1896 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1897 return -EINVAL;
1900 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1902 phydev->advertising = phydev->supported;
1904 return 0;
1907 static void tg3_phy_start(struct tg3 *tp)
1909 struct phy_device *phydev;
1911 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1912 return;
1914 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1916 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1917 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1918 phydev->speed = tp->link_config.speed;
1919 phydev->duplex = tp->link_config.duplex;
1920 phydev->autoneg = tp->link_config.autoneg;
1921 phydev->advertising = tp->link_config.advertising;
1924 phy_start(phydev);
1926 phy_start_aneg(phydev);
1929 static void tg3_phy_stop(struct tg3 *tp)
1931 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1932 return;
1934 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1937 static void tg3_phy_fini(struct tg3 *tp)
1939 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1940 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1941 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1945 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1947 int err;
1948 u32 val;
1950 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1951 return 0;
1953 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1954 /* Cannot do read-modify-write on 5401 */
1955 err = tg3_phy_auxctl_write(tp,
1956 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1957 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1958 0x4c20);
1959 goto done;
1962 err = tg3_phy_auxctl_read(tp,
1963 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1964 if (err)
1965 return err;
1967 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1968 err = tg3_phy_auxctl_write(tp,
1969 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1971 done:
1972 return err;
1975 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1977 u32 phytest;
1979 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1980 u32 phy;
1982 tg3_writephy(tp, MII_TG3_FET_TEST,
1983 phytest | MII_TG3_FET_SHADOW_EN);
1984 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1985 if (enable)
1986 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1987 else
1988 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1989 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1991 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1995 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1997 u32 reg;
1999 if (!tg3_flag(tp, 5705_PLUS) ||
2000 (tg3_flag(tp, 5717_PLUS) &&
2001 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2002 return;
2004 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2005 tg3_phy_fet_toggle_apd(tp, enable);
2006 return;
2009 reg = MII_TG3_MISC_SHDW_WREN |
2010 MII_TG3_MISC_SHDW_SCR5_SEL |
2011 MII_TG3_MISC_SHDW_SCR5_LPED |
2012 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2013 MII_TG3_MISC_SHDW_SCR5_SDTL |
2014 MII_TG3_MISC_SHDW_SCR5_C125OE;
2015 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2016 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2018 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2021 reg = MII_TG3_MISC_SHDW_WREN |
2022 MII_TG3_MISC_SHDW_APD_SEL |
2023 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2024 if (enable)
2025 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2027 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2030 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2032 u32 phy;
2034 if (!tg3_flag(tp, 5705_PLUS) ||
2035 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2036 return;
2038 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2039 u32 ephy;
2041 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2042 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2044 tg3_writephy(tp, MII_TG3_FET_TEST,
2045 ephy | MII_TG3_FET_SHADOW_EN);
2046 if (!tg3_readphy(tp, reg, &phy)) {
2047 if (enable)
2048 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2049 else
2050 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2051 tg3_writephy(tp, reg, phy);
2053 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2055 } else {
2056 int ret;
2058 ret = tg3_phy_auxctl_read(tp,
2059 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2060 if (!ret) {
2061 if (enable)
2062 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2063 else
2064 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2065 tg3_phy_auxctl_write(tp,
2066 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2071 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2073 int ret;
2074 u32 val;
2076 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2077 return;
2079 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2080 if (!ret)
2081 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2082 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2085 static void tg3_phy_apply_otp(struct tg3 *tp)
2087 u32 otp, phy;
2089 if (!tp->phy_otp)
2090 return;
2092 otp = tp->phy_otp;
2094 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2095 return;
2097 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2098 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2099 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2101 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2102 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2103 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2105 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2106 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2107 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2109 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2110 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2112 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2113 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2115 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2116 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2117 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2119 tg3_phy_toggle_auxctl_smdsp(tp, false);
2122 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2124 u32 val;
2126 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2127 return;
2129 tp->setlpicnt = 0;
2131 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2132 current_link_up == 1 &&
2133 tp->link_config.active_duplex == DUPLEX_FULL &&
2134 (tp->link_config.active_speed == SPEED_100 ||
2135 tp->link_config.active_speed == SPEED_1000)) {
2136 u32 eeectl;
2138 if (tp->link_config.active_speed == SPEED_1000)
2139 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2140 else
2141 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2143 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2145 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2146 TG3_CL45_D7_EEERES_STAT, &val);
2148 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2149 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2150 tp->setlpicnt = 2;
2153 if (!tp->setlpicnt) {
2154 if (current_link_up == 1 &&
2155 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2156 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2157 tg3_phy_toggle_auxctl_smdsp(tp, false);
2160 val = tr32(TG3_CPMU_EEE_MODE);
2161 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2165 static void tg3_phy_eee_enable(struct tg3 *tp)
2167 u32 val;
2169 if (tp->link_config.active_speed == SPEED_1000 &&
2170 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2171 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2172 tg3_flag(tp, 57765_CLASS)) &&
2173 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2174 val = MII_TG3_DSP_TAP26_ALNOKO |
2175 MII_TG3_DSP_TAP26_RMRXSTO;
2176 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2177 tg3_phy_toggle_auxctl_smdsp(tp, false);
2180 val = tr32(TG3_CPMU_EEE_MODE);
2181 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2184 static int tg3_wait_macro_done(struct tg3 *tp)
2186 int limit = 100;
2188 while (limit--) {
2189 u32 tmp32;
2191 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2192 if ((tmp32 & 0x1000) == 0)
2193 break;
2196 if (limit < 0)
2197 return -EBUSY;
2199 return 0;
2202 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2204 static const u32 test_pat[4][6] = {
2205 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2206 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2207 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2208 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2210 int chan;
2212 for (chan = 0; chan < 4; chan++) {
2213 int i;
2215 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2216 (chan * 0x2000) | 0x0200);
2217 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2219 for (i = 0; i < 6; i++)
2220 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2221 test_pat[chan][i]);
2223 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2224 if (tg3_wait_macro_done(tp)) {
2225 *resetp = 1;
2226 return -EBUSY;
2229 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2230 (chan * 0x2000) | 0x0200);
2231 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2232 if (tg3_wait_macro_done(tp)) {
2233 *resetp = 1;
2234 return -EBUSY;
2237 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2238 if (tg3_wait_macro_done(tp)) {
2239 *resetp = 1;
2240 return -EBUSY;
2243 for (i = 0; i < 6; i += 2) {
2244 u32 low, high;
2246 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2247 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2248 tg3_wait_macro_done(tp)) {
2249 *resetp = 1;
2250 return -EBUSY;
2252 low &= 0x7fff;
2253 high &= 0x000f;
2254 if (low != test_pat[chan][i] ||
2255 high != test_pat[chan][i+1]) {
2256 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2257 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2258 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2260 return -EBUSY;
2265 return 0;
2268 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2270 int chan;
2272 for (chan = 0; chan < 4; chan++) {
2273 int i;
2275 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2276 (chan * 0x2000) | 0x0200);
2277 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2278 for (i = 0; i < 6; i++)
2279 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2280 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2281 if (tg3_wait_macro_done(tp))
2282 return -EBUSY;
2285 return 0;
2288 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2290 u32 reg32, phy9_orig;
2291 int retries, do_phy_reset, err;
2293 retries = 10;
2294 do_phy_reset = 1;
2295 do {
2296 if (do_phy_reset) {
2297 err = tg3_bmcr_reset(tp);
2298 if (err)
2299 return err;
2300 do_phy_reset = 0;
2303 /* Disable transmitter and interrupt. */
2304 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2305 continue;
2307 reg32 |= 0x3000;
2308 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2310 /* Set full-duplex, 1000 mbps. */
2311 tg3_writephy(tp, MII_BMCR,
2312 BMCR_FULLDPLX | BMCR_SPEED1000);
2314 /* Set to master mode. */
2315 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2316 continue;
2318 tg3_writephy(tp, MII_CTRL1000,
2319 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2321 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2322 if (err)
2323 return err;
2325 /* Block the PHY control access. */
2326 tg3_phydsp_write(tp, 0x8005, 0x0800);
2328 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2329 if (!err)
2330 break;
2331 } while (--retries);
2333 err = tg3_phy_reset_chanpat(tp);
2334 if (err)
2335 return err;
2337 tg3_phydsp_write(tp, 0x8005, 0x0000);
2339 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2340 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2342 tg3_phy_toggle_auxctl_smdsp(tp, false);
2344 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2346 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2347 reg32 &= ~0x3000;
2348 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2349 } else if (!err)
2350 err = -EBUSY;
2352 return err;
2355 /* This will reset the tigon3 PHY if there is no valid
2356 * link unless the FORCE argument is non-zero.
2358 static int tg3_phy_reset(struct tg3 *tp)
2360 u32 val, cpmuctrl;
2361 int err;
2363 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2364 val = tr32(GRC_MISC_CFG);
2365 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2366 udelay(40);
2368 err = tg3_readphy(tp, MII_BMSR, &val);
2369 err |= tg3_readphy(tp, MII_BMSR, &val);
2370 if (err != 0)
2371 return -EBUSY;
2373 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2374 netif_carrier_off(tp->dev);
2375 tg3_link_report(tp);
2378 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2379 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2380 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2381 err = tg3_phy_reset_5703_4_5(tp);
2382 if (err)
2383 return err;
2384 goto out;
2387 cpmuctrl = 0;
2388 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2389 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2390 cpmuctrl = tr32(TG3_CPMU_CTRL);
2391 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2392 tw32(TG3_CPMU_CTRL,
2393 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2396 err = tg3_bmcr_reset(tp);
2397 if (err)
2398 return err;
2400 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2401 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2402 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2404 tw32(TG3_CPMU_CTRL, cpmuctrl);
2407 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2408 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2409 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2410 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2411 CPMU_LSPD_1000MB_MACCLK_12_5) {
2412 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2413 udelay(40);
2414 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2418 if (tg3_flag(tp, 5717_PLUS) &&
2419 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2420 return 0;
2422 tg3_phy_apply_otp(tp);
2424 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2425 tg3_phy_toggle_apd(tp, true);
2426 else
2427 tg3_phy_toggle_apd(tp, false);
2429 out:
2430 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2431 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2432 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2433 tg3_phydsp_write(tp, 0x000a, 0x0323);
2434 tg3_phy_toggle_auxctl_smdsp(tp, false);
2437 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2438 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2439 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2442 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2443 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2444 tg3_phydsp_write(tp, 0x000a, 0x310b);
2445 tg3_phydsp_write(tp, 0x201f, 0x9506);
2446 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2447 tg3_phy_toggle_auxctl_smdsp(tp, false);
2449 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2450 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2451 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2452 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2453 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2454 tg3_writephy(tp, MII_TG3_TEST1,
2455 MII_TG3_TEST1_TRIM_EN | 0x4);
2456 } else
2457 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2459 tg3_phy_toggle_auxctl_smdsp(tp, false);
2463 /* Set Extended packet length bit (bit 14) on all chips that */
2464 /* support jumbo frames */
2465 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2466 /* Cannot do read-modify-write on 5401 */
2467 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2468 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2469 /* Set bit 14 with read-modify-write to preserve other bits */
2470 err = tg3_phy_auxctl_read(tp,
2471 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2472 if (!err)
2473 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2474 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2477 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2478 * jumbo frames transmission.
2480 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2481 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2482 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2483 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2486 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2487 /* adjust output voltage */
2488 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2491 tg3_phy_toggle_automdix(tp, 1);
2492 tg3_phy_set_wirespeed(tp);
2493 return 0;
2496 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2497 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2498 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2499 TG3_GPIO_MSG_NEED_VAUX)
2500 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2501 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2502 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2503 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2504 (TG3_GPIO_MSG_DRVR_PRES << 12))
2506 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2507 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2508 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2509 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2510 (TG3_GPIO_MSG_NEED_VAUX << 12))
2512 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2514 u32 status, shift;
2516 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2517 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2518 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2519 else
2520 status = tr32(TG3_CPMU_DRV_STATUS);
2522 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2523 status &= ~(TG3_GPIO_MSG_MASK << shift);
2524 status |= (newstat << shift);
2526 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2527 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2528 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2529 else
2530 tw32(TG3_CPMU_DRV_STATUS, status);
2532 return status >> TG3_APE_GPIO_MSG_SHIFT;
2535 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2537 if (!tg3_flag(tp, IS_NIC))
2538 return 0;
2540 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2541 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2542 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2543 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2544 return -EIO;
2546 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2548 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2549 TG3_GRC_LCLCTL_PWRSW_DELAY);
2551 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2552 } else {
2553 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2554 TG3_GRC_LCLCTL_PWRSW_DELAY);
2557 return 0;
2560 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2562 u32 grc_local_ctrl;
2564 if (!tg3_flag(tp, IS_NIC) ||
2565 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2566 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2567 return;
2569 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2571 tw32_wait_f(GRC_LOCAL_CTRL,
2572 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2573 TG3_GRC_LCLCTL_PWRSW_DELAY);
2575 tw32_wait_f(GRC_LOCAL_CTRL,
2576 grc_local_ctrl,
2577 TG3_GRC_LCLCTL_PWRSW_DELAY);
2579 tw32_wait_f(GRC_LOCAL_CTRL,
2580 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2581 TG3_GRC_LCLCTL_PWRSW_DELAY);
2584 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2586 if (!tg3_flag(tp, IS_NIC))
2587 return;
2589 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2590 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2591 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2592 (GRC_LCLCTRL_GPIO_OE0 |
2593 GRC_LCLCTRL_GPIO_OE1 |
2594 GRC_LCLCTRL_GPIO_OE2 |
2595 GRC_LCLCTRL_GPIO_OUTPUT0 |
2596 GRC_LCLCTRL_GPIO_OUTPUT1),
2597 TG3_GRC_LCLCTL_PWRSW_DELAY);
2598 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2599 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2600 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2601 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2602 GRC_LCLCTRL_GPIO_OE1 |
2603 GRC_LCLCTRL_GPIO_OE2 |
2604 GRC_LCLCTRL_GPIO_OUTPUT0 |
2605 GRC_LCLCTRL_GPIO_OUTPUT1 |
2606 tp->grc_local_ctrl;
2607 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2608 TG3_GRC_LCLCTL_PWRSW_DELAY);
2610 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2611 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2612 TG3_GRC_LCLCTL_PWRSW_DELAY);
2614 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2615 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2616 TG3_GRC_LCLCTL_PWRSW_DELAY);
2617 } else {
2618 u32 no_gpio2;
2619 u32 grc_local_ctrl = 0;
2621 /* Workaround to prevent overdrawing Amps. */
2622 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2623 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2624 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2625 grc_local_ctrl,
2626 TG3_GRC_LCLCTL_PWRSW_DELAY);
2629 /* On 5753 and variants, GPIO2 cannot be used. */
2630 no_gpio2 = tp->nic_sram_data_cfg &
2631 NIC_SRAM_DATA_CFG_NO_GPIO2;
2633 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2634 GRC_LCLCTRL_GPIO_OE1 |
2635 GRC_LCLCTRL_GPIO_OE2 |
2636 GRC_LCLCTRL_GPIO_OUTPUT1 |
2637 GRC_LCLCTRL_GPIO_OUTPUT2;
2638 if (no_gpio2) {
2639 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2640 GRC_LCLCTRL_GPIO_OUTPUT2);
2642 tw32_wait_f(GRC_LOCAL_CTRL,
2643 tp->grc_local_ctrl | grc_local_ctrl,
2644 TG3_GRC_LCLCTL_PWRSW_DELAY);
2646 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2648 tw32_wait_f(GRC_LOCAL_CTRL,
2649 tp->grc_local_ctrl | grc_local_ctrl,
2650 TG3_GRC_LCLCTL_PWRSW_DELAY);
2652 if (!no_gpio2) {
2653 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2654 tw32_wait_f(GRC_LOCAL_CTRL,
2655 tp->grc_local_ctrl | grc_local_ctrl,
2656 TG3_GRC_LCLCTL_PWRSW_DELAY);
2661 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2663 u32 msg = 0;
2665 /* Serialize power state transitions */
2666 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2667 return;
2669 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2670 msg = TG3_GPIO_MSG_NEED_VAUX;
2672 msg = tg3_set_function_status(tp, msg);
2674 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2675 goto done;
2677 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2678 tg3_pwrsrc_switch_to_vaux(tp);
2679 else
2680 tg3_pwrsrc_die_with_vmain(tp);
2682 done:
2683 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2686 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2688 bool need_vaux = false;
2690 /* The GPIOs do something completely different on 57765. */
2691 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2692 return;
2694 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2695 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2696 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2697 tg3_frob_aux_power_5717(tp, include_wol ?
2698 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2699 return;
2702 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2703 struct net_device *dev_peer;
2705 dev_peer = pci_get_drvdata(tp->pdev_peer);
2707 /* remove_one() may have been run on the peer. */
2708 if (dev_peer) {
2709 struct tg3 *tp_peer = netdev_priv(dev_peer);
2711 if (tg3_flag(tp_peer, INIT_COMPLETE))
2712 return;
2714 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2715 tg3_flag(tp_peer, ENABLE_ASF))
2716 need_vaux = true;
2720 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2721 tg3_flag(tp, ENABLE_ASF))
2722 need_vaux = true;
2724 if (need_vaux)
2725 tg3_pwrsrc_switch_to_vaux(tp);
2726 else
2727 tg3_pwrsrc_die_with_vmain(tp);
2730 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2732 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2733 return 1;
2734 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2735 if (speed != SPEED_10)
2736 return 1;
2737 } else if (speed == SPEED_10)
2738 return 1;
2740 return 0;
2743 static bool tg3_phy_power_bug(struct tg3 *tp)
2745 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
2746 case ASIC_REV_5700:
2747 case ASIC_REV_5704:
2748 return true;
2749 case ASIC_REV_5780:
2750 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2751 return true;
2752 return false;
2753 case ASIC_REV_5717:
2754 if (!tp->pci_fn)
2755 return true;
2756 return false;
2757 case ASIC_REV_5719:
2758 case ASIC_REV_5720:
2759 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
2760 !tp->pci_fn)
2761 return true;
2762 return false;
2765 return false;
2768 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2770 u32 val;
2772 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2773 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2774 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2775 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2777 sg_dig_ctrl |=
2778 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2779 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2780 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2782 return;
2785 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2786 tg3_bmcr_reset(tp);
2787 val = tr32(GRC_MISC_CFG);
2788 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2789 udelay(40);
2790 return;
2791 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2792 u32 phytest;
2793 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2794 u32 phy;
2796 tg3_writephy(tp, MII_ADVERTISE, 0);
2797 tg3_writephy(tp, MII_BMCR,
2798 BMCR_ANENABLE | BMCR_ANRESTART);
2800 tg3_writephy(tp, MII_TG3_FET_TEST,
2801 phytest | MII_TG3_FET_SHADOW_EN);
2802 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2803 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2804 tg3_writephy(tp,
2805 MII_TG3_FET_SHDW_AUXMODE4,
2806 phy);
2808 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2810 return;
2811 } else if (do_low_power) {
2812 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2813 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2815 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2816 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2817 MII_TG3_AUXCTL_PCTL_VREG_11V;
2818 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2821 /* The PHY should not be powered down on some chips because
2822 * of bugs.
2824 if (tg3_phy_power_bug(tp))
2825 return;
2827 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2828 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2829 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2830 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2831 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2832 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2835 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2838 /* tp->lock is held. */
2839 static int tg3_nvram_lock(struct tg3 *tp)
2841 if (tg3_flag(tp, NVRAM)) {
2842 int i;
2844 if (tp->nvram_lock_cnt == 0) {
2845 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2846 for (i = 0; i < 8000; i++) {
2847 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2848 break;
2849 udelay(20);
2851 if (i == 8000) {
2852 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2853 return -ENODEV;
2856 tp->nvram_lock_cnt++;
2858 return 0;
2861 /* tp->lock is held. */
2862 static void tg3_nvram_unlock(struct tg3 *tp)
2864 if (tg3_flag(tp, NVRAM)) {
2865 if (tp->nvram_lock_cnt > 0)
2866 tp->nvram_lock_cnt--;
2867 if (tp->nvram_lock_cnt == 0)
2868 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2872 /* tp->lock is held. */
2873 static void tg3_enable_nvram_access(struct tg3 *tp)
2875 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2876 u32 nvaccess = tr32(NVRAM_ACCESS);
2878 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2882 /* tp->lock is held. */
2883 static void tg3_disable_nvram_access(struct tg3 *tp)
2885 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2886 u32 nvaccess = tr32(NVRAM_ACCESS);
2888 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2892 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2893 u32 offset, u32 *val)
2895 u32 tmp;
2896 int i;
2898 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2899 return -EINVAL;
2901 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2902 EEPROM_ADDR_DEVID_MASK |
2903 EEPROM_ADDR_READ);
2904 tw32(GRC_EEPROM_ADDR,
2905 tmp |
2906 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2907 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2908 EEPROM_ADDR_ADDR_MASK) |
2909 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2911 for (i = 0; i < 1000; i++) {
2912 tmp = tr32(GRC_EEPROM_ADDR);
2914 if (tmp & EEPROM_ADDR_COMPLETE)
2915 break;
2916 msleep(1);
2918 if (!(tmp & EEPROM_ADDR_COMPLETE))
2919 return -EBUSY;
2921 tmp = tr32(GRC_EEPROM_DATA);
2924 * The data will always be opposite the native endian
2925 * format. Perform a blind byteswap to compensate.
2927 *val = swab32(tmp);
2929 return 0;
2932 #define NVRAM_CMD_TIMEOUT 10000
2934 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2936 int i;
2938 tw32(NVRAM_CMD, nvram_cmd);
2939 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2940 udelay(10);
2941 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2942 udelay(10);
2943 break;
2947 if (i == NVRAM_CMD_TIMEOUT)
2948 return -EBUSY;
2950 return 0;
2953 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2955 if (tg3_flag(tp, NVRAM) &&
2956 tg3_flag(tp, NVRAM_BUFFERED) &&
2957 tg3_flag(tp, FLASH) &&
2958 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2959 (tp->nvram_jedecnum == JEDEC_ATMEL))
2961 addr = ((addr / tp->nvram_pagesize) <<
2962 ATMEL_AT45DB0X1B_PAGE_POS) +
2963 (addr % tp->nvram_pagesize);
2965 return addr;
2968 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2970 if (tg3_flag(tp, NVRAM) &&
2971 tg3_flag(tp, NVRAM_BUFFERED) &&
2972 tg3_flag(tp, FLASH) &&
2973 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2974 (tp->nvram_jedecnum == JEDEC_ATMEL))
2976 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2977 tp->nvram_pagesize) +
2978 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2980 return addr;
2983 /* NOTE: Data read in from NVRAM is byteswapped according to
2984 * the byteswapping settings for all other register accesses.
2985 * tg3 devices are BE devices, so on a BE machine, the data
2986 * returned will be exactly as it is seen in NVRAM. On a LE
2987 * machine, the 32-bit value will be byteswapped.
2989 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2991 int ret;
2993 if (!tg3_flag(tp, NVRAM))
2994 return tg3_nvram_read_using_eeprom(tp, offset, val);
2996 offset = tg3_nvram_phys_addr(tp, offset);
2998 if (offset > NVRAM_ADDR_MSK)
2999 return -EINVAL;
3001 ret = tg3_nvram_lock(tp);
3002 if (ret)
3003 return ret;
3005 tg3_enable_nvram_access(tp);
3007 tw32(NVRAM_ADDR, offset);
3008 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3009 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3011 if (ret == 0)
3012 *val = tr32(NVRAM_RDDATA);
3014 tg3_disable_nvram_access(tp);
3016 tg3_nvram_unlock(tp);
3018 return ret;
3021 /* Ensures NVRAM data is in bytestream format. */
3022 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3024 u32 v;
3025 int res = tg3_nvram_read(tp, offset, &v);
3026 if (!res)
3027 *val = cpu_to_be32(v);
3028 return res;
3031 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3032 u32 offset, u32 len, u8 *buf)
3034 int i, j, rc = 0;
3035 u32 val;
3037 for (i = 0; i < len; i += 4) {
3038 u32 addr;
3039 __be32 data;
3041 addr = offset + i;
3043 memcpy(&data, buf + i, 4);
3046 * The SEEPROM interface expects the data to always be opposite
3047 * the native endian format. We accomplish this by reversing
3048 * all the operations that would have been performed on the
3049 * data from a call to tg3_nvram_read_be32().
3051 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3053 val = tr32(GRC_EEPROM_ADDR);
3054 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3056 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3057 EEPROM_ADDR_READ);
3058 tw32(GRC_EEPROM_ADDR, val |
3059 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3060 (addr & EEPROM_ADDR_ADDR_MASK) |
3061 EEPROM_ADDR_START |
3062 EEPROM_ADDR_WRITE);
3064 for (j = 0; j < 1000; j++) {
3065 val = tr32(GRC_EEPROM_ADDR);
3067 if (val & EEPROM_ADDR_COMPLETE)
3068 break;
3069 msleep(1);
3071 if (!(val & EEPROM_ADDR_COMPLETE)) {
3072 rc = -EBUSY;
3073 break;
3077 return rc;
3080 /* offset and length are dword aligned */
3081 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3082 u8 *buf)
3084 int ret = 0;
3085 u32 pagesize = tp->nvram_pagesize;
3086 u32 pagemask = pagesize - 1;
3087 u32 nvram_cmd;
3088 u8 *tmp;
3090 tmp = kmalloc(pagesize, GFP_KERNEL);
3091 if (tmp == NULL)
3092 return -ENOMEM;
3094 while (len) {
3095 int j;
3096 u32 phy_addr, page_off, size;
3098 phy_addr = offset & ~pagemask;
3100 for (j = 0; j < pagesize; j += 4) {
3101 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3102 (__be32 *) (tmp + j));
3103 if (ret)
3104 break;
3106 if (ret)
3107 break;
3109 page_off = offset & pagemask;
3110 size = pagesize;
3111 if (len < size)
3112 size = len;
3114 len -= size;
3116 memcpy(tmp + page_off, buf, size);
3118 offset = offset + (pagesize - page_off);
3120 tg3_enable_nvram_access(tp);
3123 * Before we can erase the flash page, we need
3124 * to issue a special "write enable" command.
3126 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3128 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3129 break;
3131 /* Erase the target page */
3132 tw32(NVRAM_ADDR, phy_addr);
3134 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3135 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3137 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3138 break;
3140 /* Issue another write enable to start the write. */
3141 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3143 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3144 break;
3146 for (j = 0; j < pagesize; j += 4) {
3147 __be32 data;
3149 data = *((__be32 *) (tmp + j));
3151 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3153 tw32(NVRAM_ADDR, phy_addr + j);
3155 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3156 NVRAM_CMD_WR;
3158 if (j == 0)
3159 nvram_cmd |= NVRAM_CMD_FIRST;
3160 else if (j == (pagesize - 4))
3161 nvram_cmd |= NVRAM_CMD_LAST;
3163 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3164 if (ret)
3165 break;
3167 if (ret)
3168 break;
3171 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3172 tg3_nvram_exec_cmd(tp, nvram_cmd);
3174 kfree(tmp);
3176 return ret;
3179 /* offset and length are dword aligned */
3180 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3181 u8 *buf)
3183 int i, ret = 0;
3185 for (i = 0; i < len; i += 4, offset += 4) {
3186 u32 page_off, phy_addr, nvram_cmd;
3187 __be32 data;
3189 memcpy(&data, buf + i, 4);
3190 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3192 page_off = offset % tp->nvram_pagesize;
3194 phy_addr = tg3_nvram_phys_addr(tp, offset);
3196 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3198 if (page_off == 0 || i == 0)
3199 nvram_cmd |= NVRAM_CMD_FIRST;
3200 if (page_off == (tp->nvram_pagesize - 4))
3201 nvram_cmd |= NVRAM_CMD_LAST;
3203 if (i == (len - 4))
3204 nvram_cmd |= NVRAM_CMD_LAST;
3206 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3207 !tg3_flag(tp, FLASH) ||
3208 !tg3_flag(tp, 57765_PLUS))
3209 tw32(NVRAM_ADDR, phy_addr);
3211 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3212 !tg3_flag(tp, 5755_PLUS) &&
3213 (tp->nvram_jedecnum == JEDEC_ST) &&
3214 (nvram_cmd & NVRAM_CMD_FIRST)) {
3215 u32 cmd;
3217 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3218 ret = tg3_nvram_exec_cmd(tp, cmd);
3219 if (ret)
3220 break;
3222 if (!tg3_flag(tp, FLASH)) {
3223 /* We always do complete word writes to eeprom. */
3224 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3227 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3228 if (ret)
3229 break;
3231 return ret;
3234 /* offset and length are dword aligned */
3235 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3237 int ret;
3239 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3240 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3241 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3242 udelay(40);
3245 if (!tg3_flag(tp, NVRAM)) {
3246 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3247 } else {
3248 u32 grc_mode;
3250 ret = tg3_nvram_lock(tp);
3251 if (ret)
3252 return ret;
3254 tg3_enable_nvram_access(tp);
3255 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3256 tw32(NVRAM_WRITE1, 0x406);
3258 grc_mode = tr32(GRC_MODE);
3259 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3261 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3262 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3263 buf);
3264 } else {
3265 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3266 buf);
3269 grc_mode = tr32(GRC_MODE);
3270 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3272 tg3_disable_nvram_access(tp);
3273 tg3_nvram_unlock(tp);
3276 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3277 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3278 udelay(40);
3281 return ret;
3284 #define RX_CPU_SCRATCH_BASE 0x30000
3285 #define RX_CPU_SCRATCH_SIZE 0x04000
3286 #define TX_CPU_SCRATCH_BASE 0x34000
3287 #define TX_CPU_SCRATCH_SIZE 0x04000
3289 /* tp->lock is held. */
3290 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3292 int i;
3294 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3296 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3297 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3299 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3300 return 0;
3302 if (offset == RX_CPU_BASE) {
3303 for (i = 0; i < 10000; i++) {
3304 tw32(offset + CPU_STATE, 0xffffffff);
3305 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3306 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3307 break;
3310 tw32(offset + CPU_STATE, 0xffffffff);
3311 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3312 udelay(10);
3313 } else {
3314 for (i = 0; i < 10000; i++) {
3315 tw32(offset + CPU_STATE, 0xffffffff);
3316 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3317 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3318 break;
3322 if (i >= 10000) {
3323 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3324 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3325 return -ENODEV;
3328 /* Clear firmware's nvram arbitration. */
3329 if (tg3_flag(tp, NVRAM))
3330 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3331 return 0;
3334 struct fw_info {
3335 unsigned int fw_base;
3336 unsigned int fw_len;
3337 const __be32 *fw_data;
3340 /* tp->lock is held. */
3341 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3342 u32 cpu_scratch_base, int cpu_scratch_size,
3343 struct fw_info *info)
3345 int err, lock_err, i;
3346 void (*write_op)(struct tg3 *, u32, u32);
3348 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3349 netdev_err(tp->dev,
3350 "%s: Trying to load TX cpu firmware which is 5705\n",
3351 __func__);
3352 return -EINVAL;
3355 if (tg3_flag(tp, 5705_PLUS))
3356 write_op = tg3_write_mem;
3357 else
3358 write_op = tg3_write_indirect_reg32;
3360 /* It is possible that bootcode is still loading at this point.
3361 * Get the nvram lock first before halting the cpu.
3363 lock_err = tg3_nvram_lock(tp);
3364 err = tg3_halt_cpu(tp, cpu_base);
3365 if (!lock_err)
3366 tg3_nvram_unlock(tp);
3367 if (err)
3368 goto out;
3370 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3371 write_op(tp, cpu_scratch_base + i, 0);
3372 tw32(cpu_base + CPU_STATE, 0xffffffff);
3373 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3374 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3375 write_op(tp, (cpu_scratch_base +
3376 (info->fw_base & 0xffff) +
3377 (i * sizeof(u32))),
3378 be32_to_cpu(info->fw_data[i]));
3380 err = 0;
3382 out:
3383 return err;
3386 /* tp->lock is held. */
3387 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3389 struct fw_info info;
3390 const __be32 *fw_data;
3391 int err, i;
3393 fw_data = (void *)tp->fw->data;
3395 /* Firmware blob starts with version numbers, followed by
3396 start address and length. We are setting complete length.
3397 length = end_address_of_bss - start_address_of_text.
3398 Remainder is the blob to be loaded contiguously
3399 from start address. */
3401 info.fw_base = be32_to_cpu(fw_data[1]);
3402 info.fw_len = tp->fw->size - 12;
3403 info.fw_data = &fw_data[3];
3405 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3406 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3407 &info);
3408 if (err)
3409 return err;
3411 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3412 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3413 &info);
3414 if (err)
3415 return err;
3417 /* Now startup only the RX cpu. */
3418 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3419 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3421 for (i = 0; i < 5; i++) {
3422 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3423 break;
3424 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3425 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3426 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3427 udelay(1000);
3429 if (i >= 5) {
3430 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3431 "should be %08x\n", __func__,
3432 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3433 return -ENODEV;
3435 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3436 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
3438 return 0;
3441 /* tp->lock is held. */
3442 static int tg3_load_tso_firmware(struct tg3 *tp)
3444 struct fw_info info;
3445 const __be32 *fw_data;
3446 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3447 int err, i;
3449 if (tg3_flag(tp, HW_TSO_1) ||
3450 tg3_flag(tp, HW_TSO_2) ||
3451 tg3_flag(tp, HW_TSO_3))
3452 return 0;
3454 fw_data = (void *)tp->fw->data;
3456 /* Firmware blob starts with version numbers, followed by
3457 start address and length. We are setting complete length.
3458 length = end_address_of_bss - start_address_of_text.
3459 Remainder is the blob to be loaded contiguously
3460 from start address. */
3462 info.fw_base = be32_to_cpu(fw_data[1]);
3463 cpu_scratch_size = tp->fw_len;
3464 info.fw_len = tp->fw->size - 12;
3465 info.fw_data = &fw_data[3];
3467 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3468 cpu_base = RX_CPU_BASE;
3469 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3470 } else {
3471 cpu_base = TX_CPU_BASE;
3472 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3473 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3476 err = tg3_load_firmware_cpu(tp, cpu_base,
3477 cpu_scratch_base, cpu_scratch_size,
3478 &info);
3479 if (err)
3480 return err;
3482 /* Now startup the cpu. */
3483 tw32(cpu_base + CPU_STATE, 0xffffffff);
3484 tw32_f(cpu_base + CPU_PC, info.fw_base);
3486 for (i = 0; i < 5; i++) {
3487 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3488 break;
3489 tw32(cpu_base + CPU_STATE, 0xffffffff);
3490 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3491 tw32_f(cpu_base + CPU_PC, info.fw_base);
3492 udelay(1000);
3494 if (i >= 5) {
3495 netdev_err(tp->dev,
3496 "%s fails to set CPU PC, is %08x should be %08x\n",
3497 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3498 return -ENODEV;
3500 tw32(cpu_base + CPU_STATE, 0xffffffff);
3501 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3502 return 0;
3506 /* tp->lock is held. */
3507 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3509 u32 addr_high, addr_low;
3510 int i;
3512 addr_high = ((tp->dev->dev_addr[0] << 8) |
3513 tp->dev->dev_addr[1]);
3514 addr_low = ((tp->dev->dev_addr[2] << 24) |
3515 (tp->dev->dev_addr[3] << 16) |
3516 (tp->dev->dev_addr[4] << 8) |
3517 (tp->dev->dev_addr[5] << 0));
3518 for (i = 0; i < 4; i++) {
3519 if (i == 1 && skip_mac_1)
3520 continue;
3521 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3522 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3525 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3526 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3527 for (i = 0; i < 12; i++) {
3528 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3529 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3533 addr_high = (tp->dev->dev_addr[0] +
3534 tp->dev->dev_addr[1] +
3535 tp->dev->dev_addr[2] +
3536 tp->dev->dev_addr[3] +
3537 tp->dev->dev_addr[4] +
3538 tp->dev->dev_addr[5]) &
3539 TX_BACKOFF_SEED_MASK;
3540 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3543 static void tg3_enable_register_access(struct tg3 *tp)
3546 * Make sure register accesses (indirect or otherwise) will function
3547 * correctly.
3549 pci_write_config_dword(tp->pdev,
3550 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3553 static int tg3_power_up(struct tg3 *tp)
3555 int err;
3557 tg3_enable_register_access(tp);
3559 err = pci_set_power_state(tp->pdev, PCI_D0);
3560 if (!err) {
3561 /* Switch out of Vaux if it is a NIC */
3562 tg3_pwrsrc_switch_to_vmain(tp);
3563 } else {
3564 netdev_err(tp->dev, "Transition to D0 failed\n");
3567 return err;
3570 static int tg3_setup_phy(struct tg3 *, int);
3572 static int tg3_power_down_prepare(struct tg3 *tp)
3574 u32 misc_host_ctrl;
3575 bool device_should_wake, do_low_power;
3577 tg3_enable_register_access(tp);
3579 /* Restore the CLKREQ setting. */
3580 if (tg3_flag(tp, CLKREQ_BUG)) {
3581 u16 lnkctl;
3583 pci_read_config_word(tp->pdev,
3584 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3585 &lnkctl);
3586 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3587 pci_write_config_word(tp->pdev,
3588 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3589 lnkctl);
3592 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3593 tw32(TG3PCI_MISC_HOST_CTRL,
3594 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3596 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3597 tg3_flag(tp, WOL_ENABLE);
3599 if (tg3_flag(tp, USE_PHYLIB)) {
3600 do_low_power = false;
3601 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3602 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3603 struct phy_device *phydev;
3604 u32 phyid, advertising;
3606 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3608 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3610 tp->link_config.speed = phydev->speed;
3611 tp->link_config.duplex = phydev->duplex;
3612 tp->link_config.autoneg = phydev->autoneg;
3613 tp->link_config.advertising = phydev->advertising;
3615 advertising = ADVERTISED_TP |
3616 ADVERTISED_Pause |
3617 ADVERTISED_Autoneg |
3618 ADVERTISED_10baseT_Half;
3620 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3621 if (tg3_flag(tp, WOL_SPEED_100MB))
3622 advertising |=
3623 ADVERTISED_100baseT_Half |
3624 ADVERTISED_100baseT_Full |
3625 ADVERTISED_10baseT_Full;
3626 else
3627 advertising |= ADVERTISED_10baseT_Full;
3630 phydev->advertising = advertising;
3632 phy_start_aneg(phydev);
3634 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3635 if (phyid != PHY_ID_BCMAC131) {
3636 phyid &= PHY_BCM_OUI_MASK;
3637 if (phyid == PHY_BCM_OUI_1 ||
3638 phyid == PHY_BCM_OUI_2 ||
3639 phyid == PHY_BCM_OUI_3)
3640 do_low_power = true;
3643 } else {
3644 do_low_power = true;
3646 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3647 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3649 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3650 tg3_setup_phy(tp, 0);
3653 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3654 u32 val;
3656 val = tr32(GRC_VCPU_EXT_CTRL);
3657 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3658 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3659 int i;
3660 u32 val;
3662 for (i = 0; i < 200; i++) {
3663 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3664 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3665 break;
3666 msleep(1);
3669 if (tg3_flag(tp, WOL_CAP))
3670 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3671 WOL_DRV_STATE_SHUTDOWN |
3672 WOL_DRV_WOL |
3673 WOL_SET_MAGIC_PKT);
3675 if (device_should_wake) {
3676 u32 mac_mode;
3678 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3679 if (do_low_power &&
3680 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3681 tg3_phy_auxctl_write(tp,
3682 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3683 MII_TG3_AUXCTL_PCTL_WOL_EN |
3684 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3685 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3686 udelay(40);
3689 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3690 mac_mode = MAC_MODE_PORT_MODE_GMII;
3691 else
3692 mac_mode = MAC_MODE_PORT_MODE_MII;
3694 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3695 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3696 ASIC_REV_5700) {
3697 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3698 SPEED_100 : SPEED_10;
3699 if (tg3_5700_link_polarity(tp, speed))
3700 mac_mode |= MAC_MODE_LINK_POLARITY;
3701 else
3702 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3704 } else {
3705 mac_mode = MAC_MODE_PORT_MODE_TBI;
3708 if (!tg3_flag(tp, 5750_PLUS))
3709 tw32(MAC_LED_CTRL, tp->led_ctrl);
3711 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3712 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3713 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3714 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3716 if (tg3_flag(tp, ENABLE_APE))
3717 mac_mode |= MAC_MODE_APE_TX_EN |
3718 MAC_MODE_APE_RX_EN |
3719 MAC_MODE_TDE_ENABLE;
3721 tw32_f(MAC_MODE, mac_mode);
3722 udelay(100);
3724 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3725 udelay(10);
3728 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3729 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3730 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3731 u32 base_val;
3733 base_val = tp->pci_clock_ctrl;
3734 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3735 CLOCK_CTRL_TXCLK_DISABLE);
3737 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3738 CLOCK_CTRL_PWRDOWN_PLL133, 40);
3739 } else if (tg3_flag(tp, 5780_CLASS) ||
3740 tg3_flag(tp, CPMU_PRESENT) ||
3741 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3742 /* do nothing */
3743 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3744 u32 newbits1, newbits2;
3746 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3747 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3748 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3749 CLOCK_CTRL_TXCLK_DISABLE |
3750 CLOCK_CTRL_ALTCLK);
3751 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3752 } else if (tg3_flag(tp, 5705_PLUS)) {
3753 newbits1 = CLOCK_CTRL_625_CORE;
3754 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3755 } else {
3756 newbits1 = CLOCK_CTRL_ALTCLK;
3757 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3760 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3761 40);
3763 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3764 40);
3766 if (!tg3_flag(tp, 5705_PLUS)) {
3767 u32 newbits3;
3769 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3770 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3771 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3772 CLOCK_CTRL_TXCLK_DISABLE |
3773 CLOCK_CTRL_44MHZ_CORE);
3774 } else {
3775 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3778 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3779 tp->pci_clock_ctrl | newbits3, 40);
3783 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3784 tg3_power_down_phy(tp, do_low_power);
3786 tg3_frob_aux_power(tp, true);
3788 /* Workaround for unstable PLL clock */
3789 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3790 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3791 u32 val = tr32(0x7d00);
3793 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3794 tw32(0x7d00, val);
3795 if (!tg3_flag(tp, ENABLE_ASF)) {
3796 int err;
3798 err = tg3_nvram_lock(tp);
3799 tg3_halt_cpu(tp, RX_CPU_BASE);
3800 if (!err)
3801 tg3_nvram_unlock(tp);
3805 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3807 return 0;
3810 static void tg3_power_down(struct tg3 *tp)
3812 tg3_power_down_prepare(tp);
3814 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3815 pci_set_power_state(tp->pdev, PCI_D3hot);
3818 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3820 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3821 case MII_TG3_AUX_STAT_10HALF:
3822 *speed = SPEED_10;
3823 *duplex = DUPLEX_HALF;
3824 break;
3826 case MII_TG3_AUX_STAT_10FULL:
3827 *speed = SPEED_10;
3828 *duplex = DUPLEX_FULL;
3829 break;
3831 case MII_TG3_AUX_STAT_100HALF:
3832 *speed = SPEED_100;
3833 *duplex = DUPLEX_HALF;
3834 break;
3836 case MII_TG3_AUX_STAT_100FULL:
3837 *speed = SPEED_100;
3838 *duplex = DUPLEX_FULL;
3839 break;
3841 case MII_TG3_AUX_STAT_1000HALF:
3842 *speed = SPEED_1000;
3843 *duplex = DUPLEX_HALF;
3844 break;
3846 case MII_TG3_AUX_STAT_1000FULL:
3847 *speed = SPEED_1000;
3848 *duplex = DUPLEX_FULL;
3849 break;
3851 default:
3852 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3853 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3854 SPEED_10;
3855 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3856 DUPLEX_HALF;
3857 break;
3859 *speed = SPEED_UNKNOWN;
3860 *duplex = DUPLEX_UNKNOWN;
3861 break;
3865 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3867 int err = 0;
3868 u32 val, new_adv;
3870 new_adv = ADVERTISE_CSMA;
3871 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3872 new_adv |= mii_advertise_flowctrl(flowctrl);
3874 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3875 if (err)
3876 goto done;
3878 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3879 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3881 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3882 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3883 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3885 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3886 if (err)
3887 goto done;
3890 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3891 goto done;
3893 tw32(TG3_CPMU_EEE_MODE,
3894 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3896 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
3897 if (!err) {
3898 u32 err2;
3900 val = 0;
3901 /* Advertise 100-BaseTX EEE ability */
3902 if (advertise & ADVERTISED_100baseT_Full)
3903 val |= MDIO_AN_EEE_ADV_100TX;
3904 /* Advertise 1000-BaseT EEE ability */
3905 if (advertise & ADVERTISED_1000baseT_Full)
3906 val |= MDIO_AN_EEE_ADV_1000T;
3907 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3908 if (err)
3909 val = 0;
3911 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3912 case ASIC_REV_5717:
3913 case ASIC_REV_57765:
3914 case ASIC_REV_57766:
3915 case ASIC_REV_5719:
3916 /* If we advertised any eee advertisements above... */
3917 if (val)
3918 val = MII_TG3_DSP_TAP26_ALNOKO |
3919 MII_TG3_DSP_TAP26_RMRXSTO |
3920 MII_TG3_DSP_TAP26_OPCSINPT;
3921 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3922 /* Fall through */
3923 case ASIC_REV_5720:
3924 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3925 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3926 MII_TG3_DSP_CH34TP2_HIBW01);
3929 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
3930 if (!err)
3931 err = err2;
3934 done:
3935 return err;
3938 static void tg3_phy_copper_begin(struct tg3 *tp)
3940 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
3941 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3942 u32 adv, fc;
3944 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3945 adv = ADVERTISED_10baseT_Half |
3946 ADVERTISED_10baseT_Full;
3947 if (tg3_flag(tp, WOL_SPEED_100MB))
3948 adv |= ADVERTISED_100baseT_Half |
3949 ADVERTISED_100baseT_Full;
3951 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
3952 } else {
3953 adv = tp->link_config.advertising;
3954 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3955 adv &= ~(ADVERTISED_1000baseT_Half |
3956 ADVERTISED_1000baseT_Full);
3958 fc = tp->link_config.flowctrl;
3961 tg3_phy_autoneg_cfg(tp, adv, fc);
3963 tg3_writephy(tp, MII_BMCR,
3964 BMCR_ANENABLE | BMCR_ANRESTART);
3965 } else {
3966 int i;
3967 u32 bmcr, orig_bmcr;
3969 tp->link_config.active_speed = tp->link_config.speed;
3970 tp->link_config.active_duplex = tp->link_config.duplex;
3972 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3973 /* With autoneg disabled, 5715 only links up when the
3974 * advertisement register has the configured speed
3975 * enabled.
3977 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
3980 bmcr = 0;
3981 switch (tp->link_config.speed) {
3982 default:
3983 case SPEED_10:
3984 break;
3986 case SPEED_100:
3987 bmcr |= BMCR_SPEED100;
3988 break;
3990 case SPEED_1000:
3991 bmcr |= BMCR_SPEED1000;
3992 break;
3995 if (tp->link_config.duplex == DUPLEX_FULL)
3996 bmcr |= BMCR_FULLDPLX;
3998 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3999 (bmcr != orig_bmcr)) {
4000 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4001 for (i = 0; i < 1500; i++) {
4002 u32 tmp;
4004 udelay(10);
4005 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4006 tg3_readphy(tp, MII_BMSR, &tmp))
4007 continue;
4008 if (!(tmp & BMSR_LSTATUS)) {
4009 udelay(40);
4010 break;
4013 tg3_writephy(tp, MII_BMCR, bmcr);
4014 udelay(40);
4019 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4021 int err;
4023 /* Turn off tap power management. */
4024 /* Set Extended packet length bit */
4025 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4027 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4028 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4029 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4030 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4031 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4033 udelay(40);
4035 return err;
4038 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4040 u32 advmsk, tgtadv, advertising;
4042 advertising = tp->link_config.advertising;
4043 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4045 advmsk = ADVERTISE_ALL;
4046 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4047 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4048 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4051 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4052 return false;
4054 if ((*lcladv & advmsk) != tgtadv)
4055 return false;
4057 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4058 u32 tg3_ctrl;
4060 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4062 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4063 return false;
4065 if (tgtadv &&
4066 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4067 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4068 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4069 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4070 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4071 } else {
4072 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4075 if (tg3_ctrl != tgtadv)
4076 return false;
4079 return true;
4082 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4084 u32 lpeth = 0;
4086 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4087 u32 val;
4089 if (tg3_readphy(tp, MII_STAT1000, &val))
4090 return false;
4092 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4095 if (tg3_readphy(tp, MII_LPA, rmtadv))
4096 return false;
4098 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4099 tp->link_config.rmt_adv = lpeth;
4101 return true;
4104 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4106 int current_link_up;
4107 u32 bmsr, val;
4108 u32 lcl_adv, rmt_adv;
4109 u16 current_speed;
4110 u8 current_duplex;
4111 int i, err;
4113 tw32(MAC_EVENT, 0);
4115 tw32_f(MAC_STATUS,
4116 (MAC_STATUS_SYNC_CHANGED |
4117 MAC_STATUS_CFG_CHANGED |
4118 MAC_STATUS_MI_COMPLETION |
4119 MAC_STATUS_LNKSTATE_CHANGED));
4120 udelay(40);
4122 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4123 tw32_f(MAC_MI_MODE,
4124 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4125 udelay(80);
4128 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4130 /* Some third-party PHYs need to be reset on link going
4131 * down.
4133 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4134 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4135 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4136 netif_carrier_ok(tp->dev)) {
4137 tg3_readphy(tp, MII_BMSR, &bmsr);
4138 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4139 !(bmsr & BMSR_LSTATUS))
4140 force_reset = 1;
4142 if (force_reset)
4143 tg3_phy_reset(tp);
4145 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4146 tg3_readphy(tp, MII_BMSR, &bmsr);
4147 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4148 !tg3_flag(tp, INIT_COMPLETE))
4149 bmsr = 0;
4151 if (!(bmsr & BMSR_LSTATUS)) {
4152 err = tg3_init_5401phy_dsp(tp);
4153 if (err)
4154 return err;
4156 tg3_readphy(tp, MII_BMSR, &bmsr);
4157 for (i = 0; i < 1000; i++) {
4158 udelay(10);
4159 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4160 (bmsr & BMSR_LSTATUS)) {
4161 udelay(40);
4162 break;
4166 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4167 TG3_PHY_REV_BCM5401_B0 &&
4168 !(bmsr & BMSR_LSTATUS) &&
4169 tp->link_config.active_speed == SPEED_1000) {
4170 err = tg3_phy_reset(tp);
4171 if (!err)
4172 err = tg3_init_5401phy_dsp(tp);
4173 if (err)
4174 return err;
4177 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4178 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4179 /* 5701 {A0,B0} CRC bug workaround */
4180 tg3_writephy(tp, 0x15, 0x0a75);
4181 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4182 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4183 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4186 /* Clear pending interrupts... */
4187 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4188 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4190 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4191 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4192 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4193 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4195 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4196 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4197 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4198 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4199 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4200 else
4201 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4204 current_link_up = 0;
4205 current_speed = SPEED_UNKNOWN;
4206 current_duplex = DUPLEX_UNKNOWN;
4207 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4208 tp->link_config.rmt_adv = 0;
4210 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4211 err = tg3_phy_auxctl_read(tp,
4212 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4213 &val);
4214 if (!err && !(val & (1 << 10))) {
4215 tg3_phy_auxctl_write(tp,
4216 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4217 val | (1 << 10));
4218 goto relink;
4222 bmsr = 0;
4223 for (i = 0; i < 100; i++) {
4224 tg3_readphy(tp, MII_BMSR, &bmsr);
4225 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4226 (bmsr & BMSR_LSTATUS))
4227 break;
4228 udelay(40);
4231 if (bmsr & BMSR_LSTATUS) {
4232 u32 aux_stat, bmcr;
4234 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4235 for (i = 0; i < 2000; i++) {
4236 udelay(10);
4237 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4238 aux_stat)
4239 break;
4242 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4243 &current_speed,
4244 &current_duplex);
4246 bmcr = 0;
4247 for (i = 0; i < 200; i++) {
4248 tg3_readphy(tp, MII_BMCR, &bmcr);
4249 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4250 continue;
4251 if (bmcr && bmcr != 0x7fff)
4252 break;
4253 udelay(10);
4256 lcl_adv = 0;
4257 rmt_adv = 0;
4259 tp->link_config.active_speed = current_speed;
4260 tp->link_config.active_duplex = current_duplex;
4262 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4263 if ((bmcr & BMCR_ANENABLE) &&
4264 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4265 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4266 current_link_up = 1;
4267 } else {
4268 if (!(bmcr & BMCR_ANENABLE) &&
4269 tp->link_config.speed == current_speed &&
4270 tp->link_config.duplex == current_duplex &&
4271 tp->link_config.flowctrl ==
4272 tp->link_config.active_flowctrl) {
4273 current_link_up = 1;
4277 if (current_link_up == 1 &&
4278 tp->link_config.active_duplex == DUPLEX_FULL) {
4279 u32 reg, bit;
4281 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4282 reg = MII_TG3_FET_GEN_STAT;
4283 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4284 } else {
4285 reg = MII_TG3_EXT_STAT;
4286 bit = MII_TG3_EXT_STAT_MDIX;
4289 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4290 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4292 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4296 relink:
4297 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4298 tg3_phy_copper_begin(tp);
4300 tg3_readphy(tp, MII_BMSR, &bmsr);
4301 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4302 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4303 current_link_up = 1;
4306 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4307 if (current_link_up == 1) {
4308 if (tp->link_config.active_speed == SPEED_100 ||
4309 tp->link_config.active_speed == SPEED_10)
4310 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4311 else
4312 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4313 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4314 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4315 else
4316 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4318 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4319 if (tp->link_config.active_duplex == DUPLEX_HALF)
4320 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4322 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4323 if (current_link_up == 1 &&
4324 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4325 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4326 else
4327 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4330 /* ??? Without this setting Netgear GA302T PHY does not
4331 * ??? send/receive packets...
4333 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4334 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4335 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4336 tw32_f(MAC_MI_MODE, tp->mi_mode);
4337 udelay(80);
4340 tw32_f(MAC_MODE, tp->mac_mode);
4341 udelay(40);
4343 tg3_phy_eee_adjust(tp, current_link_up);
4345 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4346 /* Polled via timer. */
4347 tw32_f(MAC_EVENT, 0);
4348 } else {
4349 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4351 udelay(40);
4353 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4354 current_link_up == 1 &&
4355 tp->link_config.active_speed == SPEED_1000 &&
4356 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4357 udelay(120);
4358 tw32_f(MAC_STATUS,
4359 (MAC_STATUS_SYNC_CHANGED |
4360 MAC_STATUS_CFG_CHANGED));
4361 udelay(40);
4362 tg3_write_mem(tp,
4363 NIC_SRAM_FIRMWARE_MBOX,
4364 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4367 /* Prevent send BD corruption. */
4368 if (tg3_flag(tp, CLKREQ_BUG)) {
4369 u16 oldlnkctl, newlnkctl;
4371 pci_read_config_word(tp->pdev,
4372 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4373 &oldlnkctl);
4374 if (tp->link_config.active_speed == SPEED_100 ||
4375 tp->link_config.active_speed == SPEED_10)
4376 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4377 else
4378 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4379 if (newlnkctl != oldlnkctl)
4380 pci_write_config_word(tp->pdev,
4381 pci_pcie_cap(tp->pdev) +
4382 PCI_EXP_LNKCTL, newlnkctl);
4385 if (current_link_up != netif_carrier_ok(tp->dev)) {
4386 if (current_link_up)
4387 netif_carrier_on(tp->dev);
4388 else
4389 netif_carrier_off(tp->dev);
4390 tg3_link_report(tp);
4393 return 0;
4396 struct tg3_fiber_aneginfo {
4397 int state;
4398 #define ANEG_STATE_UNKNOWN 0
4399 #define ANEG_STATE_AN_ENABLE 1
4400 #define ANEG_STATE_RESTART_INIT 2
4401 #define ANEG_STATE_RESTART 3
4402 #define ANEG_STATE_DISABLE_LINK_OK 4
4403 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4404 #define ANEG_STATE_ABILITY_DETECT 6
4405 #define ANEG_STATE_ACK_DETECT_INIT 7
4406 #define ANEG_STATE_ACK_DETECT 8
4407 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4408 #define ANEG_STATE_COMPLETE_ACK 10
4409 #define ANEG_STATE_IDLE_DETECT_INIT 11
4410 #define ANEG_STATE_IDLE_DETECT 12
4411 #define ANEG_STATE_LINK_OK 13
4412 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4413 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4415 u32 flags;
4416 #define MR_AN_ENABLE 0x00000001
4417 #define MR_RESTART_AN 0x00000002
4418 #define MR_AN_COMPLETE 0x00000004
4419 #define MR_PAGE_RX 0x00000008
4420 #define MR_NP_LOADED 0x00000010
4421 #define MR_TOGGLE_TX 0x00000020
4422 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4423 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4424 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4425 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4426 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4427 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4428 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4429 #define MR_TOGGLE_RX 0x00002000
4430 #define MR_NP_RX 0x00004000
4432 #define MR_LINK_OK 0x80000000
4434 unsigned long link_time, cur_time;
4436 u32 ability_match_cfg;
4437 int ability_match_count;
4439 char ability_match, idle_match, ack_match;
4441 u32 txconfig, rxconfig;
4442 #define ANEG_CFG_NP 0x00000080
4443 #define ANEG_CFG_ACK 0x00000040
4444 #define ANEG_CFG_RF2 0x00000020
4445 #define ANEG_CFG_RF1 0x00000010
4446 #define ANEG_CFG_PS2 0x00000001
4447 #define ANEG_CFG_PS1 0x00008000
4448 #define ANEG_CFG_HD 0x00004000
4449 #define ANEG_CFG_FD 0x00002000
4450 #define ANEG_CFG_INVAL 0x00001f06
4453 #define ANEG_OK 0
4454 #define ANEG_DONE 1
4455 #define ANEG_TIMER_ENAB 2
4456 #define ANEG_FAILED -1
4458 #define ANEG_STATE_SETTLE_TIME 10000
4460 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4461 struct tg3_fiber_aneginfo *ap)
4463 u16 flowctrl;
4464 unsigned long delta;
4465 u32 rx_cfg_reg;
4466 int ret;
4468 if (ap->state == ANEG_STATE_UNKNOWN) {
4469 ap->rxconfig = 0;
4470 ap->link_time = 0;
4471 ap->cur_time = 0;
4472 ap->ability_match_cfg = 0;
4473 ap->ability_match_count = 0;
4474 ap->ability_match = 0;
4475 ap->idle_match = 0;
4476 ap->ack_match = 0;
4478 ap->cur_time++;
4480 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4481 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4483 if (rx_cfg_reg != ap->ability_match_cfg) {
4484 ap->ability_match_cfg = rx_cfg_reg;
4485 ap->ability_match = 0;
4486 ap->ability_match_count = 0;
4487 } else {
4488 if (++ap->ability_match_count > 1) {
4489 ap->ability_match = 1;
4490 ap->ability_match_cfg = rx_cfg_reg;
4493 if (rx_cfg_reg & ANEG_CFG_ACK)
4494 ap->ack_match = 1;
4495 else
4496 ap->ack_match = 0;
4498 ap->idle_match = 0;
4499 } else {
4500 ap->idle_match = 1;
4501 ap->ability_match_cfg = 0;
4502 ap->ability_match_count = 0;
4503 ap->ability_match = 0;
4504 ap->ack_match = 0;
4506 rx_cfg_reg = 0;
4509 ap->rxconfig = rx_cfg_reg;
4510 ret = ANEG_OK;
4512 switch (ap->state) {
4513 case ANEG_STATE_UNKNOWN:
4514 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4515 ap->state = ANEG_STATE_AN_ENABLE;
4517 /* fallthru */
4518 case ANEG_STATE_AN_ENABLE:
4519 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4520 if (ap->flags & MR_AN_ENABLE) {
4521 ap->link_time = 0;
4522 ap->cur_time = 0;
4523 ap->ability_match_cfg = 0;
4524 ap->ability_match_count = 0;
4525 ap->ability_match = 0;
4526 ap->idle_match = 0;
4527 ap->ack_match = 0;
4529 ap->state = ANEG_STATE_RESTART_INIT;
4530 } else {
4531 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4533 break;
4535 case ANEG_STATE_RESTART_INIT:
4536 ap->link_time = ap->cur_time;
4537 ap->flags &= ~(MR_NP_LOADED);
4538 ap->txconfig = 0;
4539 tw32(MAC_TX_AUTO_NEG, 0);
4540 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4541 tw32_f(MAC_MODE, tp->mac_mode);
4542 udelay(40);
4544 ret = ANEG_TIMER_ENAB;
4545 ap->state = ANEG_STATE_RESTART;
4547 /* fallthru */
4548 case ANEG_STATE_RESTART:
4549 delta = ap->cur_time - ap->link_time;
4550 if (delta > ANEG_STATE_SETTLE_TIME)
4551 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4552 else
4553 ret = ANEG_TIMER_ENAB;
4554 break;
4556 case ANEG_STATE_DISABLE_LINK_OK:
4557 ret = ANEG_DONE;
4558 break;
4560 case ANEG_STATE_ABILITY_DETECT_INIT:
4561 ap->flags &= ~(MR_TOGGLE_TX);
4562 ap->txconfig = ANEG_CFG_FD;
4563 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4564 if (flowctrl & ADVERTISE_1000XPAUSE)
4565 ap->txconfig |= ANEG_CFG_PS1;
4566 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4567 ap->txconfig |= ANEG_CFG_PS2;
4568 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4569 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4570 tw32_f(MAC_MODE, tp->mac_mode);
4571 udelay(40);
4573 ap->state = ANEG_STATE_ABILITY_DETECT;
4574 break;
4576 case ANEG_STATE_ABILITY_DETECT:
4577 if (ap->ability_match != 0 && ap->rxconfig != 0)
4578 ap->state = ANEG_STATE_ACK_DETECT_INIT;
4579 break;
4581 case ANEG_STATE_ACK_DETECT_INIT:
4582 ap->txconfig |= ANEG_CFG_ACK;
4583 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4584 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4585 tw32_f(MAC_MODE, tp->mac_mode);
4586 udelay(40);
4588 ap->state = ANEG_STATE_ACK_DETECT;
4590 /* fallthru */
4591 case ANEG_STATE_ACK_DETECT:
4592 if (ap->ack_match != 0) {
4593 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4594 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4595 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4596 } else {
4597 ap->state = ANEG_STATE_AN_ENABLE;
4599 } else if (ap->ability_match != 0 &&
4600 ap->rxconfig == 0) {
4601 ap->state = ANEG_STATE_AN_ENABLE;
4603 break;
4605 case ANEG_STATE_COMPLETE_ACK_INIT:
4606 if (ap->rxconfig & ANEG_CFG_INVAL) {
4607 ret = ANEG_FAILED;
4608 break;
4610 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4611 MR_LP_ADV_HALF_DUPLEX |
4612 MR_LP_ADV_SYM_PAUSE |
4613 MR_LP_ADV_ASYM_PAUSE |
4614 MR_LP_ADV_REMOTE_FAULT1 |
4615 MR_LP_ADV_REMOTE_FAULT2 |
4616 MR_LP_ADV_NEXT_PAGE |
4617 MR_TOGGLE_RX |
4618 MR_NP_RX);
4619 if (ap->rxconfig & ANEG_CFG_FD)
4620 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4621 if (ap->rxconfig & ANEG_CFG_HD)
4622 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4623 if (ap->rxconfig & ANEG_CFG_PS1)
4624 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4625 if (ap->rxconfig & ANEG_CFG_PS2)
4626 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4627 if (ap->rxconfig & ANEG_CFG_RF1)
4628 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4629 if (ap->rxconfig & ANEG_CFG_RF2)
4630 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4631 if (ap->rxconfig & ANEG_CFG_NP)
4632 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4634 ap->link_time = ap->cur_time;
4636 ap->flags ^= (MR_TOGGLE_TX);
4637 if (ap->rxconfig & 0x0008)
4638 ap->flags |= MR_TOGGLE_RX;
4639 if (ap->rxconfig & ANEG_CFG_NP)
4640 ap->flags |= MR_NP_RX;
4641 ap->flags |= MR_PAGE_RX;
4643 ap->state = ANEG_STATE_COMPLETE_ACK;
4644 ret = ANEG_TIMER_ENAB;
4645 break;
4647 case ANEG_STATE_COMPLETE_ACK:
4648 if (ap->ability_match != 0 &&
4649 ap->rxconfig == 0) {
4650 ap->state = ANEG_STATE_AN_ENABLE;
4651 break;
4653 delta = ap->cur_time - ap->link_time;
4654 if (delta > ANEG_STATE_SETTLE_TIME) {
4655 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4656 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4657 } else {
4658 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4659 !(ap->flags & MR_NP_RX)) {
4660 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4661 } else {
4662 ret = ANEG_FAILED;
4666 break;
4668 case ANEG_STATE_IDLE_DETECT_INIT:
4669 ap->link_time = ap->cur_time;
4670 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4671 tw32_f(MAC_MODE, tp->mac_mode);
4672 udelay(40);
4674 ap->state = ANEG_STATE_IDLE_DETECT;
4675 ret = ANEG_TIMER_ENAB;
4676 break;
4678 case ANEG_STATE_IDLE_DETECT:
4679 if (ap->ability_match != 0 &&
4680 ap->rxconfig == 0) {
4681 ap->state = ANEG_STATE_AN_ENABLE;
4682 break;
4684 delta = ap->cur_time - ap->link_time;
4685 if (delta > ANEG_STATE_SETTLE_TIME) {
4686 /* XXX another gem from the Broadcom driver :( */
4687 ap->state = ANEG_STATE_LINK_OK;
4689 break;
4691 case ANEG_STATE_LINK_OK:
4692 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4693 ret = ANEG_DONE;
4694 break;
4696 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4697 /* ??? unimplemented */
4698 break;
4700 case ANEG_STATE_NEXT_PAGE_WAIT:
4701 /* ??? unimplemented */
4702 break;
4704 default:
4705 ret = ANEG_FAILED;
4706 break;
4709 return ret;
4712 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4714 int res = 0;
4715 struct tg3_fiber_aneginfo aninfo;
4716 int status = ANEG_FAILED;
4717 unsigned int tick;
4718 u32 tmp;
4720 tw32_f(MAC_TX_AUTO_NEG, 0);
4722 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4723 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4724 udelay(40);
4726 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4727 udelay(40);
4729 memset(&aninfo, 0, sizeof(aninfo));
4730 aninfo.flags |= MR_AN_ENABLE;
4731 aninfo.state = ANEG_STATE_UNKNOWN;
4732 aninfo.cur_time = 0;
4733 tick = 0;
4734 while (++tick < 195000) {
4735 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4736 if (status == ANEG_DONE || status == ANEG_FAILED)
4737 break;
4739 udelay(1);
4742 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4743 tw32_f(MAC_MODE, tp->mac_mode);
4744 udelay(40);
4746 *txflags = aninfo.txconfig;
4747 *rxflags = aninfo.flags;
4749 if (status == ANEG_DONE &&
4750 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4751 MR_LP_ADV_FULL_DUPLEX)))
4752 res = 1;
4754 return res;
4757 static void tg3_init_bcm8002(struct tg3 *tp)
4759 u32 mac_status = tr32(MAC_STATUS);
4760 int i;
4762 /* Reset when initting first time or we have a link. */
4763 if (tg3_flag(tp, INIT_COMPLETE) &&
4764 !(mac_status & MAC_STATUS_PCS_SYNCED))
4765 return;
4767 /* Set PLL lock range. */
4768 tg3_writephy(tp, 0x16, 0x8007);
4770 /* SW reset */
4771 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4773 /* Wait for reset to complete. */
4774 /* XXX schedule_timeout() ... */
4775 for (i = 0; i < 500; i++)
4776 udelay(10);
4778 /* Config mode; select PMA/Ch 1 regs. */
4779 tg3_writephy(tp, 0x10, 0x8411);
4781 /* Enable auto-lock and comdet, select txclk for tx. */
4782 tg3_writephy(tp, 0x11, 0x0a10);
4784 tg3_writephy(tp, 0x18, 0x00a0);
4785 tg3_writephy(tp, 0x16, 0x41ff);
4787 /* Assert and deassert POR. */
4788 tg3_writephy(tp, 0x13, 0x0400);
4789 udelay(40);
4790 tg3_writephy(tp, 0x13, 0x0000);
4792 tg3_writephy(tp, 0x11, 0x0a50);
4793 udelay(40);
4794 tg3_writephy(tp, 0x11, 0x0a10);
4796 /* Wait for signal to stabilize */
4797 /* XXX schedule_timeout() ... */
4798 for (i = 0; i < 15000; i++)
4799 udelay(10);
4801 /* Deselect the channel register so we can read the PHYID
4802 * later.
4804 tg3_writephy(tp, 0x10, 0x8011);
4807 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4809 u16 flowctrl;
4810 u32 sg_dig_ctrl, sg_dig_status;
4811 u32 serdes_cfg, expected_sg_dig_ctrl;
4812 int workaround, port_a;
4813 int current_link_up;
4815 serdes_cfg = 0;
4816 expected_sg_dig_ctrl = 0;
4817 workaround = 0;
4818 port_a = 1;
4819 current_link_up = 0;
4821 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4822 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4823 workaround = 1;
4824 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4825 port_a = 0;
4827 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4828 /* preserve bits 20-23 for voltage regulator */
4829 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4832 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4834 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4835 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4836 if (workaround) {
4837 u32 val = serdes_cfg;
4839 if (port_a)
4840 val |= 0xc010000;
4841 else
4842 val |= 0x4010000;
4843 tw32_f(MAC_SERDES_CFG, val);
4846 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4848 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4849 tg3_setup_flow_control(tp, 0, 0);
4850 current_link_up = 1;
4852 goto out;
4855 /* Want auto-negotiation. */
4856 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4858 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4859 if (flowctrl & ADVERTISE_1000XPAUSE)
4860 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4861 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4862 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4864 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4865 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4866 tp->serdes_counter &&
4867 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4868 MAC_STATUS_RCVD_CFG)) ==
4869 MAC_STATUS_PCS_SYNCED)) {
4870 tp->serdes_counter--;
4871 current_link_up = 1;
4872 goto out;
4874 restart_autoneg:
4875 if (workaround)
4876 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4877 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4878 udelay(5);
4879 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4881 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4882 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4883 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4884 MAC_STATUS_SIGNAL_DET)) {
4885 sg_dig_status = tr32(SG_DIG_STATUS);
4886 mac_status = tr32(MAC_STATUS);
4888 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4889 (mac_status & MAC_STATUS_PCS_SYNCED)) {
4890 u32 local_adv = 0, remote_adv = 0;
4892 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4893 local_adv |= ADVERTISE_1000XPAUSE;
4894 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4895 local_adv |= ADVERTISE_1000XPSE_ASYM;
4897 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4898 remote_adv |= LPA_1000XPAUSE;
4899 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4900 remote_adv |= LPA_1000XPAUSE_ASYM;
4902 tp->link_config.rmt_adv =
4903 mii_adv_to_ethtool_adv_x(remote_adv);
4905 tg3_setup_flow_control(tp, local_adv, remote_adv);
4906 current_link_up = 1;
4907 tp->serdes_counter = 0;
4908 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4909 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4910 if (tp->serdes_counter)
4911 tp->serdes_counter--;
4912 else {
4913 if (workaround) {
4914 u32 val = serdes_cfg;
4916 if (port_a)
4917 val |= 0xc010000;
4918 else
4919 val |= 0x4010000;
4921 tw32_f(MAC_SERDES_CFG, val);
4924 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4925 udelay(40);
4927 /* Link parallel detection - link is up */
4928 /* only if we have PCS_SYNC and not */
4929 /* receiving config code words */
4930 mac_status = tr32(MAC_STATUS);
4931 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4932 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4933 tg3_setup_flow_control(tp, 0, 0);
4934 current_link_up = 1;
4935 tp->phy_flags |=
4936 TG3_PHYFLG_PARALLEL_DETECT;
4937 tp->serdes_counter =
4938 SERDES_PARALLEL_DET_TIMEOUT;
4939 } else
4940 goto restart_autoneg;
4943 } else {
4944 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4945 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4948 out:
4949 return current_link_up;
4952 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4954 int current_link_up = 0;
4956 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4957 goto out;
4959 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4960 u32 txflags, rxflags;
4961 int i;
4963 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4964 u32 local_adv = 0, remote_adv = 0;
4966 if (txflags & ANEG_CFG_PS1)
4967 local_adv |= ADVERTISE_1000XPAUSE;
4968 if (txflags & ANEG_CFG_PS2)
4969 local_adv |= ADVERTISE_1000XPSE_ASYM;
4971 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4972 remote_adv |= LPA_1000XPAUSE;
4973 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4974 remote_adv |= LPA_1000XPAUSE_ASYM;
4976 tp->link_config.rmt_adv =
4977 mii_adv_to_ethtool_adv_x(remote_adv);
4979 tg3_setup_flow_control(tp, local_adv, remote_adv);
4981 current_link_up = 1;
4983 for (i = 0; i < 30; i++) {
4984 udelay(20);
4985 tw32_f(MAC_STATUS,
4986 (MAC_STATUS_SYNC_CHANGED |
4987 MAC_STATUS_CFG_CHANGED));
4988 udelay(40);
4989 if ((tr32(MAC_STATUS) &
4990 (MAC_STATUS_SYNC_CHANGED |
4991 MAC_STATUS_CFG_CHANGED)) == 0)
4992 break;
4995 mac_status = tr32(MAC_STATUS);
4996 if (current_link_up == 0 &&
4997 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4998 !(mac_status & MAC_STATUS_RCVD_CFG))
4999 current_link_up = 1;
5000 } else {
5001 tg3_setup_flow_control(tp, 0, 0);
5003 /* Forcing 1000FD link up. */
5004 current_link_up = 1;
5006 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5007 udelay(40);
5009 tw32_f(MAC_MODE, tp->mac_mode);
5010 udelay(40);
5013 out:
5014 return current_link_up;
5017 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5019 u32 orig_pause_cfg;
5020 u16 orig_active_speed;
5021 u8 orig_active_duplex;
5022 u32 mac_status;
5023 int current_link_up;
5024 int i;
5026 orig_pause_cfg = tp->link_config.active_flowctrl;
5027 orig_active_speed = tp->link_config.active_speed;
5028 orig_active_duplex = tp->link_config.active_duplex;
5030 if (!tg3_flag(tp, HW_AUTONEG) &&
5031 netif_carrier_ok(tp->dev) &&
5032 tg3_flag(tp, INIT_COMPLETE)) {
5033 mac_status = tr32(MAC_STATUS);
5034 mac_status &= (MAC_STATUS_PCS_SYNCED |
5035 MAC_STATUS_SIGNAL_DET |
5036 MAC_STATUS_CFG_CHANGED |
5037 MAC_STATUS_RCVD_CFG);
5038 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5039 MAC_STATUS_SIGNAL_DET)) {
5040 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5041 MAC_STATUS_CFG_CHANGED));
5042 return 0;
5046 tw32_f(MAC_TX_AUTO_NEG, 0);
5048 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5049 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5050 tw32_f(MAC_MODE, tp->mac_mode);
5051 udelay(40);
5053 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5054 tg3_init_bcm8002(tp);
5056 /* Enable link change event even when serdes polling. */
5057 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5058 udelay(40);
5060 current_link_up = 0;
5061 tp->link_config.rmt_adv = 0;
5062 mac_status = tr32(MAC_STATUS);
5064 if (tg3_flag(tp, HW_AUTONEG))
5065 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5066 else
5067 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5069 tp->napi[0].hw_status->status =
5070 (SD_STATUS_UPDATED |
5071 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5073 for (i = 0; i < 100; i++) {
5074 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5075 MAC_STATUS_CFG_CHANGED));
5076 udelay(5);
5077 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5078 MAC_STATUS_CFG_CHANGED |
5079 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5080 break;
5083 mac_status = tr32(MAC_STATUS);
5084 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5085 current_link_up = 0;
5086 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5087 tp->serdes_counter == 0) {
5088 tw32_f(MAC_MODE, (tp->mac_mode |
5089 MAC_MODE_SEND_CONFIGS));
5090 udelay(1);
5091 tw32_f(MAC_MODE, tp->mac_mode);
5095 if (current_link_up == 1) {
5096 tp->link_config.active_speed = SPEED_1000;
5097 tp->link_config.active_duplex = DUPLEX_FULL;
5098 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5099 LED_CTRL_LNKLED_OVERRIDE |
5100 LED_CTRL_1000MBPS_ON));
5101 } else {
5102 tp->link_config.active_speed = SPEED_UNKNOWN;
5103 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5104 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5105 LED_CTRL_LNKLED_OVERRIDE |
5106 LED_CTRL_TRAFFIC_OVERRIDE));
5109 if (current_link_up != netif_carrier_ok(tp->dev)) {
5110 if (current_link_up)
5111 netif_carrier_on(tp->dev);
5112 else
5113 netif_carrier_off(tp->dev);
5114 tg3_link_report(tp);
5115 } else {
5116 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5117 if (orig_pause_cfg != now_pause_cfg ||
5118 orig_active_speed != tp->link_config.active_speed ||
5119 orig_active_duplex != tp->link_config.active_duplex)
5120 tg3_link_report(tp);
5123 return 0;
5126 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5128 int current_link_up, err = 0;
5129 u32 bmsr, bmcr;
5130 u16 current_speed;
5131 u8 current_duplex;
5132 u32 local_adv, remote_adv;
5134 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5135 tw32_f(MAC_MODE, tp->mac_mode);
5136 udelay(40);
5138 tw32(MAC_EVENT, 0);
5140 tw32_f(MAC_STATUS,
5141 (MAC_STATUS_SYNC_CHANGED |
5142 MAC_STATUS_CFG_CHANGED |
5143 MAC_STATUS_MI_COMPLETION |
5144 MAC_STATUS_LNKSTATE_CHANGED));
5145 udelay(40);
5147 if (force_reset)
5148 tg3_phy_reset(tp);
5150 current_link_up = 0;
5151 current_speed = SPEED_UNKNOWN;
5152 current_duplex = DUPLEX_UNKNOWN;
5153 tp->link_config.rmt_adv = 0;
5155 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5156 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5157 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5158 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5159 bmsr |= BMSR_LSTATUS;
5160 else
5161 bmsr &= ~BMSR_LSTATUS;
5164 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5166 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5167 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5168 /* do nothing, just check for link up at the end */
5169 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5170 u32 adv, newadv;
5172 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5173 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5174 ADVERTISE_1000XPAUSE |
5175 ADVERTISE_1000XPSE_ASYM |
5176 ADVERTISE_SLCT);
5178 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5179 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5181 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5182 tg3_writephy(tp, MII_ADVERTISE, newadv);
5183 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5184 tg3_writephy(tp, MII_BMCR, bmcr);
5186 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5187 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5188 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5190 return err;
5192 } else {
5193 u32 new_bmcr;
5195 bmcr &= ~BMCR_SPEED1000;
5196 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5198 if (tp->link_config.duplex == DUPLEX_FULL)
5199 new_bmcr |= BMCR_FULLDPLX;
5201 if (new_bmcr != bmcr) {
5202 /* BMCR_SPEED1000 is a reserved bit that needs
5203 * to be set on write.
5205 new_bmcr |= BMCR_SPEED1000;
5207 /* Force a linkdown */
5208 if (netif_carrier_ok(tp->dev)) {
5209 u32 adv;
5211 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5212 adv &= ~(ADVERTISE_1000XFULL |
5213 ADVERTISE_1000XHALF |
5214 ADVERTISE_SLCT);
5215 tg3_writephy(tp, MII_ADVERTISE, adv);
5216 tg3_writephy(tp, MII_BMCR, bmcr |
5217 BMCR_ANRESTART |
5218 BMCR_ANENABLE);
5219 udelay(10);
5220 netif_carrier_off(tp->dev);
5222 tg3_writephy(tp, MII_BMCR, new_bmcr);
5223 bmcr = new_bmcr;
5224 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5225 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5226 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5227 ASIC_REV_5714) {
5228 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5229 bmsr |= BMSR_LSTATUS;
5230 else
5231 bmsr &= ~BMSR_LSTATUS;
5233 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5237 if (bmsr & BMSR_LSTATUS) {
5238 current_speed = SPEED_1000;
5239 current_link_up = 1;
5240 if (bmcr & BMCR_FULLDPLX)
5241 current_duplex = DUPLEX_FULL;
5242 else
5243 current_duplex = DUPLEX_HALF;
5245 local_adv = 0;
5246 remote_adv = 0;
5248 if (bmcr & BMCR_ANENABLE) {
5249 u32 common;
5251 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5252 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5253 common = local_adv & remote_adv;
5254 if (common & (ADVERTISE_1000XHALF |
5255 ADVERTISE_1000XFULL)) {
5256 if (common & ADVERTISE_1000XFULL)
5257 current_duplex = DUPLEX_FULL;
5258 else
5259 current_duplex = DUPLEX_HALF;
5261 tp->link_config.rmt_adv =
5262 mii_adv_to_ethtool_adv_x(remote_adv);
5263 } else if (!tg3_flag(tp, 5780_CLASS)) {
5264 /* Link is up via parallel detect */
5265 } else {
5266 current_link_up = 0;
5271 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5272 tg3_setup_flow_control(tp, local_adv, remote_adv);
5274 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5275 if (tp->link_config.active_duplex == DUPLEX_HALF)
5276 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5278 tw32_f(MAC_MODE, tp->mac_mode);
5279 udelay(40);
5281 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5283 tp->link_config.active_speed = current_speed;
5284 tp->link_config.active_duplex = current_duplex;
5286 if (current_link_up != netif_carrier_ok(tp->dev)) {
5287 if (current_link_up)
5288 netif_carrier_on(tp->dev);
5289 else {
5290 netif_carrier_off(tp->dev);
5291 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5293 tg3_link_report(tp);
5295 return err;
5298 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5300 if (tp->serdes_counter) {
5301 /* Give autoneg time to complete. */
5302 tp->serdes_counter--;
5303 return;
5306 if (!netif_carrier_ok(tp->dev) &&
5307 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5308 u32 bmcr;
5310 tg3_readphy(tp, MII_BMCR, &bmcr);
5311 if (bmcr & BMCR_ANENABLE) {
5312 u32 phy1, phy2;
5314 /* Select shadow register 0x1f */
5315 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5316 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5318 /* Select expansion interrupt status register */
5319 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5320 MII_TG3_DSP_EXP1_INT_STAT);
5321 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5322 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5324 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5325 /* We have signal detect and not receiving
5326 * config code words, link is up by parallel
5327 * detection.
5330 bmcr &= ~BMCR_ANENABLE;
5331 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5332 tg3_writephy(tp, MII_BMCR, bmcr);
5333 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5336 } else if (netif_carrier_ok(tp->dev) &&
5337 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5338 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5339 u32 phy2;
5341 /* Select expansion interrupt status register */
5342 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5343 MII_TG3_DSP_EXP1_INT_STAT);
5344 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5345 if (phy2 & 0x20) {
5346 u32 bmcr;
5348 /* Config code words received, turn on autoneg. */
5349 tg3_readphy(tp, MII_BMCR, &bmcr);
5350 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5352 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5358 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5360 u32 val;
5361 int err;
5363 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5364 err = tg3_setup_fiber_phy(tp, force_reset);
5365 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5366 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5367 else
5368 err = tg3_setup_copper_phy(tp, force_reset);
5370 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5371 u32 scale;
5373 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5374 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5375 scale = 65;
5376 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5377 scale = 6;
5378 else
5379 scale = 12;
5381 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5382 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5383 tw32(GRC_MISC_CFG, val);
5386 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5387 (6 << TX_LENGTHS_IPG_SHIFT);
5388 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5389 val |= tr32(MAC_TX_LENGTHS) &
5390 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5391 TX_LENGTHS_CNT_DWN_VAL_MSK);
5393 if (tp->link_config.active_speed == SPEED_1000 &&
5394 tp->link_config.active_duplex == DUPLEX_HALF)
5395 tw32(MAC_TX_LENGTHS, val |
5396 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5397 else
5398 tw32(MAC_TX_LENGTHS, val |
5399 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5401 if (!tg3_flag(tp, 5705_PLUS)) {
5402 if (netif_carrier_ok(tp->dev)) {
5403 tw32(HOSTCC_STAT_COAL_TICKS,
5404 tp->coal.stats_block_coalesce_usecs);
5405 } else {
5406 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5410 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5411 val = tr32(PCIE_PWR_MGMT_THRESH);
5412 if (!netif_carrier_ok(tp->dev))
5413 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5414 tp->pwrmgmt_thresh;
5415 else
5416 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5417 tw32(PCIE_PWR_MGMT_THRESH, val);
5420 return err;
5423 static inline int tg3_irq_sync(struct tg3 *tp)
5425 return tp->irq_sync;
5428 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5430 int i;
5432 dst = (u32 *)((u8 *)dst + off);
5433 for (i = 0; i < len; i += sizeof(u32))
5434 *dst++ = tr32(off + i);
5437 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5439 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5440 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5441 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5442 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5443 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5444 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5445 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5446 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5447 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5448 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5449 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5450 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5451 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5452 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5453 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5454 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5455 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5456 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5457 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5459 if (tg3_flag(tp, SUPPORT_MSIX))
5460 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5462 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5463 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5464 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5465 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5466 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5467 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5468 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5469 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5471 if (!tg3_flag(tp, 5705_PLUS)) {
5472 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5473 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5474 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5477 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5478 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5479 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5480 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5481 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5483 if (tg3_flag(tp, NVRAM))
5484 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5487 static void tg3_dump_state(struct tg3 *tp)
5489 int i;
5490 u32 *regs;
5492 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5493 if (!regs) {
5494 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5495 return;
5498 if (tg3_flag(tp, PCI_EXPRESS)) {
5499 /* Read up to but not including private PCI registers */
5500 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5501 regs[i / sizeof(u32)] = tr32(i);
5502 } else
5503 tg3_dump_legacy_regs(tp, regs);
5505 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5506 if (!regs[i + 0] && !regs[i + 1] &&
5507 !regs[i + 2] && !regs[i + 3])
5508 continue;
5510 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5511 i * 4,
5512 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5515 kfree(regs);
5517 for (i = 0; i < tp->irq_cnt; i++) {
5518 struct tg3_napi *tnapi = &tp->napi[i];
5520 /* SW status block */
5521 netdev_err(tp->dev,
5522 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5524 tnapi->hw_status->status,
5525 tnapi->hw_status->status_tag,
5526 tnapi->hw_status->rx_jumbo_consumer,
5527 tnapi->hw_status->rx_consumer,
5528 tnapi->hw_status->rx_mini_consumer,
5529 tnapi->hw_status->idx[0].rx_producer,
5530 tnapi->hw_status->idx[0].tx_consumer);
5532 netdev_err(tp->dev,
5533 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5535 tnapi->last_tag, tnapi->last_irq_tag,
5536 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5537 tnapi->rx_rcb_ptr,
5538 tnapi->prodring.rx_std_prod_idx,
5539 tnapi->prodring.rx_std_cons_idx,
5540 tnapi->prodring.rx_jmb_prod_idx,
5541 tnapi->prodring.rx_jmb_cons_idx);
5545 /* This is called whenever we suspect that the system chipset is re-
5546 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5547 * is bogus tx completions. We try to recover by setting the
5548 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5549 * in the workqueue.
5551 static void tg3_tx_recover(struct tg3 *tp)
5553 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5554 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5556 netdev_warn(tp->dev,
5557 "The system may be re-ordering memory-mapped I/O "
5558 "cycles to the network device, attempting to recover. "
5559 "Please report the problem to the driver maintainer "
5560 "and include system chipset information.\n");
5562 spin_lock(&tp->lock);
5563 tg3_flag_set(tp, TX_RECOVERY_PENDING);
5564 spin_unlock(&tp->lock);
5567 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5569 /* Tell compiler to fetch tx indices from memory. */
5570 barrier();
5571 return tnapi->tx_pending -
5572 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5575 /* Tigon3 never reports partial packet sends. So we do not
5576 * need special logic to handle SKBs that have not had all
5577 * of their frags sent yet, like SunGEM does.
5579 static void tg3_tx(struct tg3_napi *tnapi)
5581 struct tg3 *tp = tnapi->tp;
5582 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5583 u32 sw_idx = tnapi->tx_cons;
5584 struct netdev_queue *txq;
5585 int index = tnapi - tp->napi;
5586 unsigned int pkts_compl = 0, bytes_compl = 0;
5588 if (tg3_flag(tp, ENABLE_TSS))
5589 index--;
5591 txq = netdev_get_tx_queue(tp->dev, index);
5593 while (sw_idx != hw_idx) {
5594 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5595 struct sk_buff *skb = ri->skb;
5596 int i, tx_bug = 0;
5598 if (unlikely(skb == NULL)) {
5599 tg3_tx_recover(tp);
5600 return;
5603 pci_unmap_single(tp->pdev,
5604 dma_unmap_addr(ri, mapping),
5605 skb_headlen(skb),
5606 PCI_DMA_TODEVICE);
5608 ri->skb = NULL;
5610 while (ri->fragmented) {
5611 ri->fragmented = false;
5612 sw_idx = NEXT_TX(sw_idx);
5613 ri = &tnapi->tx_buffers[sw_idx];
5616 sw_idx = NEXT_TX(sw_idx);
5618 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5619 ri = &tnapi->tx_buffers[sw_idx];
5620 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5621 tx_bug = 1;
5623 pci_unmap_page(tp->pdev,
5624 dma_unmap_addr(ri, mapping),
5625 skb_frag_size(&skb_shinfo(skb)->frags[i]),
5626 PCI_DMA_TODEVICE);
5628 while (ri->fragmented) {
5629 ri->fragmented = false;
5630 sw_idx = NEXT_TX(sw_idx);
5631 ri = &tnapi->tx_buffers[sw_idx];
5634 sw_idx = NEXT_TX(sw_idx);
5637 pkts_compl++;
5638 bytes_compl += skb->len;
5640 dev_kfree_skb(skb);
5642 if (unlikely(tx_bug)) {
5643 tg3_tx_recover(tp);
5644 return;
5648 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5650 tnapi->tx_cons = sw_idx;
5652 /* Need to make the tx_cons update visible to tg3_start_xmit()
5653 * before checking for netif_queue_stopped(). Without the
5654 * memory barrier, there is a small possibility that tg3_start_xmit()
5655 * will miss it and cause the queue to be stopped forever.
5657 smp_mb();
5659 if (unlikely(netif_tx_queue_stopped(txq) &&
5660 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5661 __netif_tx_lock(txq, smp_processor_id());
5662 if (netif_tx_queue_stopped(txq) &&
5663 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5664 netif_tx_wake_queue(txq);
5665 __netif_tx_unlock(txq);
5669 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5671 if (!ri->data)
5672 return;
5674 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5675 map_sz, PCI_DMA_FROMDEVICE);
5676 kfree(ri->data);
5677 ri->data = NULL;
5680 /* Returns size of skb allocated or < 0 on error.
5682 * We only need to fill in the address because the other members
5683 * of the RX descriptor are invariant, see tg3_init_rings.
5685 * Note the purposeful assymetry of cpu vs. chip accesses. For
5686 * posting buffers we only dirty the first cache line of the RX
5687 * descriptor (containing the address). Whereas for the RX status
5688 * buffers the cpu only reads the last cacheline of the RX descriptor
5689 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5691 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5692 u32 opaque_key, u32 dest_idx_unmasked)
5694 struct tg3_rx_buffer_desc *desc;
5695 struct ring_info *map;
5696 u8 *data;
5697 dma_addr_t mapping;
5698 int skb_size, data_size, dest_idx;
5700 switch (opaque_key) {
5701 case RXD_OPAQUE_RING_STD:
5702 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5703 desc = &tpr->rx_std[dest_idx];
5704 map = &tpr->rx_std_buffers[dest_idx];
5705 data_size = tp->rx_pkt_map_sz;
5706 break;
5708 case RXD_OPAQUE_RING_JUMBO:
5709 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5710 desc = &tpr->rx_jmb[dest_idx].std;
5711 map = &tpr->rx_jmb_buffers[dest_idx];
5712 data_size = TG3_RX_JMB_MAP_SZ;
5713 break;
5715 default:
5716 return -EINVAL;
5719 /* Do not overwrite any of the map or rp information
5720 * until we are sure we can commit to a new buffer.
5722 * Callers depend upon this behavior and assume that
5723 * we leave everything unchanged if we fail.
5725 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5726 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5727 data = kmalloc(skb_size, GFP_ATOMIC);
5728 if (!data)
5729 return -ENOMEM;
5731 mapping = pci_map_single(tp->pdev,
5732 data + TG3_RX_OFFSET(tp),
5733 data_size,
5734 PCI_DMA_FROMDEVICE);
5735 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5736 kfree(data);
5737 return -EIO;
5740 map->data = data;
5741 dma_unmap_addr_set(map, mapping, mapping);
5743 desc->addr_hi = ((u64)mapping >> 32);
5744 desc->addr_lo = ((u64)mapping & 0xffffffff);
5746 return data_size;
5749 /* We only need to move over in the address because the other
5750 * members of the RX descriptor are invariant. See notes above
5751 * tg3_alloc_rx_data for full details.
5753 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5754 struct tg3_rx_prodring_set *dpr,
5755 u32 opaque_key, int src_idx,
5756 u32 dest_idx_unmasked)
5758 struct tg3 *tp = tnapi->tp;
5759 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5760 struct ring_info *src_map, *dest_map;
5761 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5762 int dest_idx;
5764 switch (opaque_key) {
5765 case RXD_OPAQUE_RING_STD:
5766 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5767 dest_desc = &dpr->rx_std[dest_idx];
5768 dest_map = &dpr->rx_std_buffers[dest_idx];
5769 src_desc = &spr->rx_std[src_idx];
5770 src_map = &spr->rx_std_buffers[src_idx];
5771 break;
5773 case RXD_OPAQUE_RING_JUMBO:
5774 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5775 dest_desc = &dpr->rx_jmb[dest_idx].std;
5776 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5777 src_desc = &spr->rx_jmb[src_idx].std;
5778 src_map = &spr->rx_jmb_buffers[src_idx];
5779 break;
5781 default:
5782 return;
5785 dest_map->data = src_map->data;
5786 dma_unmap_addr_set(dest_map, mapping,
5787 dma_unmap_addr(src_map, mapping));
5788 dest_desc->addr_hi = src_desc->addr_hi;
5789 dest_desc->addr_lo = src_desc->addr_lo;
5791 /* Ensure that the update to the skb happens after the physical
5792 * addresses have been transferred to the new BD location.
5794 smp_wmb();
5796 src_map->data = NULL;
5799 /* The RX ring scheme is composed of multiple rings which post fresh
5800 * buffers to the chip, and one special ring the chip uses to report
5801 * status back to the host.
5803 * The special ring reports the status of received packets to the
5804 * host. The chip does not write into the original descriptor the
5805 * RX buffer was obtained from. The chip simply takes the original
5806 * descriptor as provided by the host, updates the status and length
5807 * field, then writes this into the next status ring entry.
5809 * Each ring the host uses to post buffers to the chip is described
5810 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5811 * it is first placed into the on-chip ram. When the packet's length
5812 * is known, it walks down the TG3_BDINFO entries to select the ring.
5813 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5814 * which is within the range of the new packet's length is chosen.
5816 * The "separate ring for rx status" scheme may sound queer, but it makes
5817 * sense from a cache coherency perspective. If only the host writes
5818 * to the buffer post rings, and only the chip writes to the rx status
5819 * rings, then cache lines never move beyond shared-modified state.
5820 * If both the host and chip were to write into the same ring, cache line
5821 * eviction could occur since both entities want it in an exclusive state.
5823 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5825 struct tg3 *tp = tnapi->tp;
5826 u32 work_mask, rx_std_posted = 0;
5827 u32 std_prod_idx, jmb_prod_idx;
5828 u32 sw_idx = tnapi->rx_rcb_ptr;
5829 u16 hw_idx;
5830 int received;
5831 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5833 hw_idx = *(tnapi->rx_rcb_prod_idx);
5835 * We need to order the read of hw_idx and the read of
5836 * the opaque cookie.
5838 rmb();
5839 work_mask = 0;
5840 received = 0;
5841 std_prod_idx = tpr->rx_std_prod_idx;
5842 jmb_prod_idx = tpr->rx_jmb_prod_idx;
5843 while (sw_idx != hw_idx && budget > 0) {
5844 struct ring_info *ri;
5845 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5846 unsigned int len;
5847 struct sk_buff *skb;
5848 dma_addr_t dma_addr;
5849 u32 opaque_key, desc_idx, *post_ptr;
5850 u8 *data;
5852 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5853 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5854 if (opaque_key == RXD_OPAQUE_RING_STD) {
5855 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5856 dma_addr = dma_unmap_addr(ri, mapping);
5857 data = ri->data;
5858 post_ptr = &std_prod_idx;
5859 rx_std_posted++;
5860 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5861 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5862 dma_addr = dma_unmap_addr(ri, mapping);
5863 data = ri->data;
5864 post_ptr = &jmb_prod_idx;
5865 } else
5866 goto next_pkt_nopost;
5868 work_mask |= opaque_key;
5870 if (desc->err_vlan & RXD_ERR_MASK) {
5871 drop_it:
5872 tg3_recycle_rx(tnapi, tpr, opaque_key,
5873 desc_idx, *post_ptr);
5874 drop_it_no_recycle:
5875 /* Other statistics kept track of by card. */
5876 tp->rx_dropped++;
5877 goto next_pkt;
5880 prefetch(data + TG3_RX_OFFSET(tp));
5881 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5882 ETH_FCS_LEN;
5884 if (len > TG3_RX_COPY_THRESH(tp)) {
5885 int skb_size;
5887 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5888 *post_ptr);
5889 if (skb_size < 0)
5890 goto drop_it;
5892 pci_unmap_single(tp->pdev, dma_addr, skb_size,
5893 PCI_DMA_FROMDEVICE);
5895 skb = build_skb(data);
5896 if (!skb) {
5897 kfree(data);
5898 goto drop_it_no_recycle;
5900 skb_reserve(skb, TG3_RX_OFFSET(tp));
5901 /* Ensure that the update to the data happens
5902 * after the usage of the old DMA mapping.
5904 smp_wmb();
5906 ri->data = NULL;
5908 } else {
5909 tg3_recycle_rx(tnapi, tpr, opaque_key,
5910 desc_idx, *post_ptr);
5912 skb = netdev_alloc_skb(tp->dev,
5913 len + TG3_RAW_IP_ALIGN);
5914 if (skb == NULL)
5915 goto drop_it_no_recycle;
5917 skb_reserve(skb, TG3_RAW_IP_ALIGN);
5918 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5919 memcpy(skb->data,
5920 data + TG3_RX_OFFSET(tp),
5921 len);
5922 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5925 skb_put(skb, len);
5926 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5927 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5928 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5929 >> RXD_TCPCSUM_SHIFT) == 0xffff))
5930 skb->ip_summed = CHECKSUM_UNNECESSARY;
5931 else
5932 skb_checksum_none_assert(skb);
5934 skb->protocol = eth_type_trans(skb, tp->dev);
5936 if (len > (tp->dev->mtu + ETH_HLEN) &&
5937 skb->protocol != htons(ETH_P_8021Q)) {
5938 dev_kfree_skb(skb);
5939 goto drop_it_no_recycle;
5942 if (desc->type_flags & RXD_FLAG_VLAN &&
5943 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5944 __vlan_hwaccel_put_tag(skb,
5945 desc->err_vlan & RXD_VLAN_MASK);
5947 napi_gro_receive(&tnapi->napi, skb);
5949 received++;
5950 budget--;
5952 next_pkt:
5953 (*post_ptr)++;
5955 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5956 tpr->rx_std_prod_idx = std_prod_idx &
5957 tp->rx_std_ring_mask;
5958 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5959 tpr->rx_std_prod_idx);
5960 work_mask &= ~RXD_OPAQUE_RING_STD;
5961 rx_std_posted = 0;
5963 next_pkt_nopost:
5964 sw_idx++;
5965 sw_idx &= tp->rx_ret_ring_mask;
5967 /* Refresh hw_idx to see if there is new work */
5968 if (sw_idx == hw_idx) {
5969 hw_idx = *(tnapi->rx_rcb_prod_idx);
5970 rmb();
5974 /* ACK the status ring. */
5975 tnapi->rx_rcb_ptr = sw_idx;
5976 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5978 /* Refill RX ring(s). */
5979 if (!tg3_flag(tp, ENABLE_RSS)) {
5980 /* Sync BD data before updating mailbox */
5981 wmb();
5983 if (work_mask & RXD_OPAQUE_RING_STD) {
5984 tpr->rx_std_prod_idx = std_prod_idx &
5985 tp->rx_std_ring_mask;
5986 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5987 tpr->rx_std_prod_idx);
5989 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5990 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5991 tp->rx_jmb_ring_mask;
5992 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5993 tpr->rx_jmb_prod_idx);
5995 mmiowb();
5996 } else if (work_mask) {
5997 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5998 * updated before the producer indices can be updated.
6000 smp_wmb();
6002 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6003 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6005 if (tnapi != &tp->napi[1]) {
6006 tp->rx_refill = true;
6007 napi_schedule(&tp->napi[1].napi);
6011 return received;
6014 static void tg3_poll_link(struct tg3 *tp)
6016 /* handle link change and other phy events */
6017 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6018 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6020 if (sblk->status & SD_STATUS_LINK_CHG) {
6021 sblk->status = SD_STATUS_UPDATED |
6022 (sblk->status & ~SD_STATUS_LINK_CHG);
6023 spin_lock(&tp->lock);
6024 if (tg3_flag(tp, USE_PHYLIB)) {
6025 tw32_f(MAC_STATUS,
6026 (MAC_STATUS_SYNC_CHANGED |
6027 MAC_STATUS_CFG_CHANGED |
6028 MAC_STATUS_MI_COMPLETION |
6029 MAC_STATUS_LNKSTATE_CHANGED));
6030 udelay(40);
6031 } else
6032 tg3_setup_phy(tp, 0);
6033 spin_unlock(&tp->lock);
6038 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6039 struct tg3_rx_prodring_set *dpr,
6040 struct tg3_rx_prodring_set *spr)
6042 u32 si, di, cpycnt, src_prod_idx;
6043 int i, err = 0;
6045 while (1) {
6046 src_prod_idx = spr->rx_std_prod_idx;
6048 /* Make sure updates to the rx_std_buffers[] entries and the
6049 * standard producer index are seen in the correct order.
6051 smp_rmb();
6053 if (spr->rx_std_cons_idx == src_prod_idx)
6054 break;
6056 if (spr->rx_std_cons_idx < src_prod_idx)
6057 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6058 else
6059 cpycnt = tp->rx_std_ring_mask + 1 -
6060 spr->rx_std_cons_idx;
6062 cpycnt = min(cpycnt,
6063 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6065 si = spr->rx_std_cons_idx;
6066 di = dpr->rx_std_prod_idx;
6068 for (i = di; i < di + cpycnt; i++) {
6069 if (dpr->rx_std_buffers[i].data) {
6070 cpycnt = i - di;
6071 err = -ENOSPC;
6072 break;
6076 if (!cpycnt)
6077 break;
6079 /* Ensure that updates to the rx_std_buffers ring and the
6080 * shadowed hardware producer ring from tg3_recycle_skb() are
6081 * ordered correctly WRT the skb check above.
6083 smp_rmb();
6085 memcpy(&dpr->rx_std_buffers[di],
6086 &spr->rx_std_buffers[si],
6087 cpycnt * sizeof(struct ring_info));
6089 for (i = 0; i < cpycnt; i++, di++, si++) {
6090 struct tg3_rx_buffer_desc *sbd, *dbd;
6091 sbd = &spr->rx_std[si];
6092 dbd = &dpr->rx_std[di];
6093 dbd->addr_hi = sbd->addr_hi;
6094 dbd->addr_lo = sbd->addr_lo;
6097 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6098 tp->rx_std_ring_mask;
6099 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6100 tp->rx_std_ring_mask;
6103 while (1) {
6104 src_prod_idx = spr->rx_jmb_prod_idx;
6106 /* Make sure updates to the rx_jmb_buffers[] entries and
6107 * the jumbo producer index are seen in the correct order.
6109 smp_rmb();
6111 if (spr->rx_jmb_cons_idx == src_prod_idx)
6112 break;
6114 if (spr->rx_jmb_cons_idx < src_prod_idx)
6115 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6116 else
6117 cpycnt = tp->rx_jmb_ring_mask + 1 -
6118 spr->rx_jmb_cons_idx;
6120 cpycnt = min(cpycnt,
6121 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6123 si = spr->rx_jmb_cons_idx;
6124 di = dpr->rx_jmb_prod_idx;
6126 for (i = di; i < di + cpycnt; i++) {
6127 if (dpr->rx_jmb_buffers[i].data) {
6128 cpycnt = i - di;
6129 err = -ENOSPC;
6130 break;
6134 if (!cpycnt)
6135 break;
6137 /* Ensure that updates to the rx_jmb_buffers ring and the
6138 * shadowed hardware producer ring from tg3_recycle_skb() are
6139 * ordered correctly WRT the skb check above.
6141 smp_rmb();
6143 memcpy(&dpr->rx_jmb_buffers[di],
6144 &spr->rx_jmb_buffers[si],
6145 cpycnt * sizeof(struct ring_info));
6147 for (i = 0; i < cpycnt; i++, di++, si++) {
6148 struct tg3_rx_buffer_desc *sbd, *dbd;
6149 sbd = &spr->rx_jmb[si].std;
6150 dbd = &dpr->rx_jmb[di].std;
6151 dbd->addr_hi = sbd->addr_hi;
6152 dbd->addr_lo = sbd->addr_lo;
6155 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6156 tp->rx_jmb_ring_mask;
6157 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6158 tp->rx_jmb_ring_mask;
6161 return err;
6164 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6166 struct tg3 *tp = tnapi->tp;
6168 /* run TX completion thread */
6169 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6170 tg3_tx(tnapi);
6171 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6172 return work_done;
6175 if (!tnapi->rx_rcb_prod_idx)
6176 return work_done;
6178 /* run RX thread, within the bounds set by NAPI.
6179 * All RX "locking" is done by ensuring outside
6180 * code synchronizes with tg3->napi.poll()
6182 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6183 work_done += tg3_rx(tnapi, budget - work_done);
6185 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6186 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6187 int i, err = 0;
6188 u32 std_prod_idx = dpr->rx_std_prod_idx;
6189 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6191 tp->rx_refill = false;
6192 for (i = 1; i < tp->irq_cnt; i++)
6193 err |= tg3_rx_prodring_xfer(tp, dpr,
6194 &tp->napi[i].prodring);
6196 wmb();
6198 if (std_prod_idx != dpr->rx_std_prod_idx)
6199 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6200 dpr->rx_std_prod_idx);
6202 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6203 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6204 dpr->rx_jmb_prod_idx);
6206 mmiowb();
6208 if (err)
6209 tw32_f(HOSTCC_MODE, tp->coal_now);
6212 return work_done;
6215 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6217 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6218 schedule_work(&tp->reset_task);
6221 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6223 cancel_work_sync(&tp->reset_task);
6224 tg3_flag_clear(tp, RESET_TASK_PENDING);
6225 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6228 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6230 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6231 struct tg3 *tp = tnapi->tp;
6232 int work_done = 0;
6233 struct tg3_hw_status *sblk = tnapi->hw_status;
6235 while (1) {
6236 work_done = tg3_poll_work(tnapi, work_done, budget);
6238 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6239 goto tx_recovery;
6241 if (unlikely(work_done >= budget))
6242 break;
6244 /* tp->last_tag is used in tg3_int_reenable() below
6245 * to tell the hw how much work has been processed,
6246 * so we must read it before checking for more work.
6248 tnapi->last_tag = sblk->status_tag;
6249 tnapi->last_irq_tag = tnapi->last_tag;
6250 rmb();
6252 /* check for RX/TX work to do */
6253 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6254 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6256 /* This test here is not race free, but will reduce
6257 * the number of interrupts by looping again.
6259 if (tnapi == &tp->napi[1] && tp->rx_refill)
6260 continue;
6262 napi_complete(napi);
6263 /* Reenable interrupts. */
6264 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6266 /* This test here is synchronized by napi_schedule()
6267 * and napi_complete() to close the race condition.
6269 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6270 tw32(HOSTCC_MODE, tp->coalesce_mode |
6271 HOSTCC_MODE_ENABLE |
6272 tnapi->coal_now);
6274 mmiowb();
6275 break;
6279 return work_done;
6281 tx_recovery:
6282 /* work_done is guaranteed to be less than budget. */
6283 napi_complete(napi);
6284 tg3_reset_task_schedule(tp);
6285 return work_done;
6288 static void tg3_process_error(struct tg3 *tp)
6290 u32 val;
6291 bool real_error = false;
6293 if (tg3_flag(tp, ERROR_PROCESSED))
6294 return;
6296 /* Check Flow Attention register */
6297 val = tr32(HOSTCC_FLOW_ATTN);
6298 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6299 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6300 real_error = true;
6303 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6304 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6305 real_error = true;
6308 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6309 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6310 real_error = true;
6313 if (!real_error)
6314 return;
6316 tg3_dump_state(tp);
6318 tg3_flag_set(tp, ERROR_PROCESSED);
6319 tg3_reset_task_schedule(tp);
6322 static int tg3_poll(struct napi_struct *napi, int budget)
6324 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6325 struct tg3 *tp = tnapi->tp;
6326 int work_done = 0;
6327 struct tg3_hw_status *sblk = tnapi->hw_status;
6329 while (1) {
6330 if (sblk->status & SD_STATUS_ERROR)
6331 tg3_process_error(tp);
6333 tg3_poll_link(tp);
6335 work_done = tg3_poll_work(tnapi, work_done, budget);
6337 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6338 goto tx_recovery;
6340 if (unlikely(work_done >= budget))
6341 break;
6343 if (tg3_flag(tp, TAGGED_STATUS)) {
6344 /* tp->last_tag is used in tg3_int_reenable() below
6345 * to tell the hw how much work has been processed,
6346 * so we must read it before checking for more work.
6348 tnapi->last_tag = sblk->status_tag;
6349 tnapi->last_irq_tag = tnapi->last_tag;
6350 rmb();
6351 } else
6352 sblk->status &= ~SD_STATUS_UPDATED;
6354 if (likely(!tg3_has_work(tnapi))) {
6355 napi_complete(napi);
6356 tg3_int_reenable(tnapi);
6357 break;
6361 return work_done;
6363 tx_recovery:
6364 /* work_done is guaranteed to be less than budget. */
6365 napi_complete(napi);
6366 tg3_reset_task_schedule(tp);
6367 return work_done;
6370 static void tg3_napi_disable(struct tg3 *tp)
6372 int i;
6374 for (i = tp->irq_cnt - 1; i >= 0; i--)
6375 napi_disable(&tp->napi[i].napi);
6378 static void tg3_napi_enable(struct tg3 *tp)
6380 int i;
6382 for (i = 0; i < tp->irq_cnt; i++)
6383 napi_enable(&tp->napi[i].napi);
6386 static void tg3_napi_init(struct tg3 *tp)
6388 int i;
6390 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6391 for (i = 1; i < tp->irq_cnt; i++)
6392 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6395 static void tg3_napi_fini(struct tg3 *tp)
6397 int i;
6399 for (i = 0; i < tp->irq_cnt; i++)
6400 netif_napi_del(&tp->napi[i].napi);
6403 static inline void tg3_netif_stop(struct tg3 *tp)
6405 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6406 tg3_napi_disable(tp);
6407 netif_tx_disable(tp->dev);
6410 static inline void tg3_netif_start(struct tg3 *tp)
6412 /* NOTE: unconditional netif_tx_wake_all_queues is only
6413 * appropriate so long as all callers are assured to
6414 * have free tx slots (such as after tg3_init_hw)
6416 netif_tx_wake_all_queues(tp->dev);
6418 tg3_napi_enable(tp);
6419 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6420 tg3_enable_ints(tp);
6423 static void tg3_irq_quiesce(struct tg3 *tp)
6425 int i;
6427 BUG_ON(tp->irq_sync);
6429 tp->irq_sync = 1;
6430 smp_mb();
6432 for (i = 0; i < tp->irq_cnt; i++)
6433 synchronize_irq(tp->napi[i].irq_vec);
6436 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6437 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6438 * with as well. Most of the time, this is not necessary except when
6439 * shutting down the device.
6441 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6443 spin_lock_bh(&tp->lock);
6444 if (irq_sync)
6445 tg3_irq_quiesce(tp);
6448 static inline void tg3_full_unlock(struct tg3 *tp)
6450 spin_unlock_bh(&tp->lock);
6453 /* One-shot MSI handler - Chip automatically disables interrupt
6454 * after sending MSI so driver doesn't have to do it.
6456 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6458 struct tg3_napi *tnapi = dev_id;
6459 struct tg3 *tp = tnapi->tp;
6461 prefetch(tnapi->hw_status);
6462 if (tnapi->rx_rcb)
6463 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6465 if (likely(!tg3_irq_sync(tp)))
6466 napi_schedule(&tnapi->napi);
6468 return IRQ_HANDLED;
6471 /* MSI ISR - No need to check for interrupt sharing and no need to
6472 * flush status block and interrupt mailbox. PCI ordering rules
6473 * guarantee that MSI will arrive after the status block.
6475 static irqreturn_t tg3_msi(int irq, void *dev_id)
6477 struct tg3_napi *tnapi = dev_id;
6478 struct tg3 *tp = tnapi->tp;
6480 prefetch(tnapi->hw_status);
6481 if (tnapi->rx_rcb)
6482 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6484 * Writing any value to intr-mbox-0 clears PCI INTA# and
6485 * chip-internal interrupt pending events.
6486 * Writing non-zero to intr-mbox-0 additional tells the
6487 * NIC to stop sending us irqs, engaging "in-intr-handler"
6488 * event coalescing.
6490 tw32_mailbox(tnapi->int_mbox, 0x00000001);
6491 if (likely(!tg3_irq_sync(tp)))
6492 napi_schedule(&tnapi->napi);
6494 return IRQ_RETVAL(1);
6497 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6499 struct tg3_napi *tnapi = dev_id;
6500 struct tg3 *tp = tnapi->tp;
6501 struct tg3_hw_status *sblk = tnapi->hw_status;
6502 unsigned int handled = 1;
6504 /* In INTx mode, it is possible for the interrupt to arrive at
6505 * the CPU before the status block posted prior to the interrupt.
6506 * Reading the PCI State register will confirm whether the
6507 * interrupt is ours and will flush the status block.
6509 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6510 if (tg3_flag(tp, CHIP_RESETTING) ||
6511 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6512 handled = 0;
6513 goto out;
6518 * Writing any value to intr-mbox-0 clears PCI INTA# and
6519 * chip-internal interrupt pending events.
6520 * Writing non-zero to intr-mbox-0 additional tells the
6521 * NIC to stop sending us irqs, engaging "in-intr-handler"
6522 * event coalescing.
6524 * Flush the mailbox to de-assert the IRQ immediately to prevent
6525 * spurious interrupts. The flush impacts performance but
6526 * excessive spurious interrupts can be worse in some cases.
6528 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6529 if (tg3_irq_sync(tp))
6530 goto out;
6531 sblk->status &= ~SD_STATUS_UPDATED;
6532 if (likely(tg3_has_work(tnapi))) {
6533 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6534 napi_schedule(&tnapi->napi);
6535 } else {
6536 /* No work, shared interrupt perhaps? re-enable
6537 * interrupts, and flush that PCI write
6539 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6540 0x00000000);
6542 out:
6543 return IRQ_RETVAL(handled);
6546 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6548 struct tg3_napi *tnapi = dev_id;
6549 struct tg3 *tp = tnapi->tp;
6550 struct tg3_hw_status *sblk = tnapi->hw_status;
6551 unsigned int handled = 1;
6553 /* In INTx mode, it is possible for the interrupt to arrive at
6554 * the CPU before the status block posted prior to the interrupt.
6555 * Reading the PCI State register will confirm whether the
6556 * interrupt is ours and will flush the status block.
6558 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6559 if (tg3_flag(tp, CHIP_RESETTING) ||
6560 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6561 handled = 0;
6562 goto out;
6567 * writing any value to intr-mbox-0 clears PCI INTA# and
6568 * chip-internal interrupt pending events.
6569 * writing non-zero to intr-mbox-0 additional tells the
6570 * NIC to stop sending us irqs, engaging "in-intr-handler"
6571 * event coalescing.
6573 * Flush the mailbox to de-assert the IRQ immediately to prevent
6574 * spurious interrupts. The flush impacts performance but
6575 * excessive spurious interrupts can be worse in some cases.
6577 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6580 * In a shared interrupt configuration, sometimes other devices'
6581 * interrupts will scream. We record the current status tag here
6582 * so that the above check can report that the screaming interrupts
6583 * are unhandled. Eventually they will be silenced.
6585 tnapi->last_irq_tag = sblk->status_tag;
6587 if (tg3_irq_sync(tp))
6588 goto out;
6590 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6592 napi_schedule(&tnapi->napi);
6594 out:
6595 return IRQ_RETVAL(handled);
6598 /* ISR for interrupt test */
6599 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6601 struct tg3_napi *tnapi = dev_id;
6602 struct tg3 *tp = tnapi->tp;
6603 struct tg3_hw_status *sblk = tnapi->hw_status;
6605 if ((sblk->status & SD_STATUS_UPDATED) ||
6606 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6607 tg3_disable_ints(tp);
6608 return IRQ_RETVAL(1);
6610 return IRQ_RETVAL(0);
6613 #ifdef CONFIG_NET_POLL_CONTROLLER
6614 static void tg3_poll_controller(struct net_device *dev)
6616 int i;
6617 struct tg3 *tp = netdev_priv(dev);
6619 if (tg3_irq_sync(tp))
6620 return;
6622 for (i = 0; i < tp->irq_cnt; i++)
6623 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6625 #endif
6627 static void tg3_tx_timeout(struct net_device *dev)
6629 struct tg3 *tp = netdev_priv(dev);
6631 if (netif_msg_tx_err(tp)) {
6632 netdev_err(dev, "transmit timed out, resetting\n");
6633 tg3_dump_state(tp);
6636 tg3_reset_task_schedule(tp);
6639 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6640 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6642 u32 base = (u32) mapping & 0xffffffff;
6644 return (base > 0xffffdcc0) && (base + len + 8 < base);
6647 /* Test for DMA addresses > 40-bit */
6648 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6649 int len)
6651 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6652 if (tg3_flag(tp, 40BIT_DMA_BUG))
6653 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6654 return 0;
6655 #else
6656 return 0;
6657 #endif
6660 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6661 dma_addr_t mapping, u32 len, u32 flags,
6662 u32 mss, u32 vlan)
6664 txbd->addr_hi = ((u64) mapping >> 32);
6665 txbd->addr_lo = ((u64) mapping & 0xffffffff);
6666 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6667 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6670 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6671 dma_addr_t map, u32 len, u32 flags,
6672 u32 mss, u32 vlan)
6674 struct tg3 *tp = tnapi->tp;
6675 bool hwbug = false;
6677 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6678 hwbug = true;
6680 if (tg3_4g_overflow_test(map, len))
6681 hwbug = true;
6683 if (tg3_40bit_overflow_test(tp, map, len))
6684 hwbug = true;
6686 if (tp->dma_limit) {
6687 u32 prvidx = *entry;
6688 u32 tmp_flag = flags & ~TXD_FLAG_END;
6689 while (len > tp->dma_limit && *budget) {
6690 u32 frag_len = tp->dma_limit;
6691 len -= tp->dma_limit;
6693 /* Avoid the 8byte DMA problem */
6694 if (len <= 8) {
6695 len += tp->dma_limit / 2;
6696 frag_len = tp->dma_limit / 2;
6699 tnapi->tx_buffers[*entry].fragmented = true;
6701 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6702 frag_len, tmp_flag, mss, vlan);
6703 *budget -= 1;
6704 prvidx = *entry;
6705 *entry = NEXT_TX(*entry);
6707 map += frag_len;
6710 if (len) {
6711 if (*budget) {
6712 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6713 len, flags, mss, vlan);
6714 *budget -= 1;
6715 *entry = NEXT_TX(*entry);
6716 } else {
6717 hwbug = true;
6718 tnapi->tx_buffers[prvidx].fragmented = false;
6721 } else {
6722 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6723 len, flags, mss, vlan);
6724 *entry = NEXT_TX(*entry);
6727 return hwbug;
6730 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6732 int i;
6733 struct sk_buff *skb;
6734 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6736 skb = txb->skb;
6737 txb->skb = NULL;
6739 pci_unmap_single(tnapi->tp->pdev,
6740 dma_unmap_addr(txb, mapping),
6741 skb_headlen(skb),
6742 PCI_DMA_TODEVICE);
6744 while (txb->fragmented) {
6745 txb->fragmented = false;
6746 entry = NEXT_TX(entry);
6747 txb = &tnapi->tx_buffers[entry];
6750 for (i = 0; i <= last; i++) {
6751 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6753 entry = NEXT_TX(entry);
6754 txb = &tnapi->tx_buffers[entry];
6756 pci_unmap_page(tnapi->tp->pdev,
6757 dma_unmap_addr(txb, mapping),
6758 skb_frag_size(frag), PCI_DMA_TODEVICE);
6760 while (txb->fragmented) {
6761 txb->fragmented = false;
6762 entry = NEXT_TX(entry);
6763 txb = &tnapi->tx_buffers[entry];
6768 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6769 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6770 struct sk_buff **pskb,
6771 u32 *entry, u32 *budget,
6772 u32 base_flags, u32 mss, u32 vlan)
6774 struct tg3 *tp = tnapi->tp;
6775 struct sk_buff *new_skb, *skb = *pskb;
6776 dma_addr_t new_addr = 0;
6777 int ret = 0;
6779 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6780 new_skb = skb_copy(skb, GFP_ATOMIC);
6781 else {
6782 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6784 new_skb = skb_copy_expand(skb,
6785 skb_headroom(skb) + more_headroom,
6786 skb_tailroom(skb), GFP_ATOMIC);
6789 if (!new_skb) {
6790 ret = -1;
6791 } else {
6792 /* New SKB is guaranteed to be linear. */
6793 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6794 PCI_DMA_TODEVICE);
6795 /* Make sure the mapping succeeded */
6796 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6797 dev_kfree_skb(new_skb);
6798 ret = -1;
6799 } else {
6800 u32 save_entry = *entry;
6802 base_flags |= TXD_FLAG_END;
6804 tnapi->tx_buffers[*entry].skb = new_skb;
6805 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6806 mapping, new_addr);
6808 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6809 new_skb->len, base_flags,
6810 mss, vlan)) {
6811 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6812 dev_kfree_skb(new_skb);
6813 ret = -1;
6818 dev_kfree_skb(skb);
6819 *pskb = new_skb;
6820 return ret;
6823 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6825 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6826 * TSO header is greater than 80 bytes.
6828 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6830 struct sk_buff *segs, *nskb;
6831 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6833 /* Estimate the number of fragments in the worst case */
6834 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6835 netif_stop_queue(tp->dev);
6837 /* netif_tx_stop_queue() must be done before checking
6838 * checking tx index in tg3_tx_avail() below, because in
6839 * tg3_tx(), we update tx index before checking for
6840 * netif_tx_queue_stopped().
6842 smp_mb();
6843 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6844 return NETDEV_TX_BUSY;
6846 netif_wake_queue(tp->dev);
6849 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6850 if (IS_ERR(segs))
6851 goto tg3_tso_bug_end;
6853 do {
6854 nskb = segs;
6855 segs = segs->next;
6856 nskb->next = NULL;
6857 tg3_start_xmit(nskb, tp->dev);
6858 } while (segs);
6860 tg3_tso_bug_end:
6861 dev_kfree_skb(skb);
6863 return NETDEV_TX_OK;
6866 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6867 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6869 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6871 struct tg3 *tp = netdev_priv(dev);
6872 u32 len, entry, base_flags, mss, vlan = 0;
6873 u32 budget;
6874 int i = -1, would_hit_hwbug;
6875 dma_addr_t mapping;
6876 struct tg3_napi *tnapi;
6877 struct netdev_queue *txq;
6878 unsigned int last;
6880 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6881 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6882 if (tg3_flag(tp, ENABLE_TSS))
6883 tnapi++;
6885 budget = tg3_tx_avail(tnapi);
6887 /* We are running in BH disabled context with netif_tx_lock
6888 * and TX reclaim runs via tp->napi.poll inside of a software
6889 * interrupt. Furthermore, IRQ processing runs lockless so we have
6890 * no IRQ context deadlocks to worry about either. Rejoice!
6892 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6893 if (!netif_tx_queue_stopped(txq)) {
6894 netif_tx_stop_queue(txq);
6896 /* This is a hard error, log it. */
6897 netdev_err(dev,
6898 "BUG! Tx Ring full when queue awake!\n");
6900 return NETDEV_TX_BUSY;
6903 entry = tnapi->tx_prod;
6904 base_flags = 0;
6905 if (skb->ip_summed == CHECKSUM_PARTIAL)
6906 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6908 mss = skb_shinfo(skb)->gso_size;
6909 if (mss) {
6910 struct iphdr *iph;
6911 u32 tcp_opt_len, hdr_len;
6913 if (skb_header_cloned(skb) &&
6914 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6915 goto drop;
6917 iph = ip_hdr(skb);
6918 tcp_opt_len = tcp_optlen(skb);
6920 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6922 if (!skb_is_gso_v6(skb)) {
6923 iph->check = 0;
6924 iph->tot_len = htons(mss + hdr_len);
6927 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6928 tg3_flag(tp, TSO_BUG))
6929 return tg3_tso_bug(tp, skb);
6931 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6932 TXD_FLAG_CPU_POST_DMA);
6934 if (tg3_flag(tp, HW_TSO_1) ||
6935 tg3_flag(tp, HW_TSO_2) ||
6936 tg3_flag(tp, HW_TSO_3)) {
6937 tcp_hdr(skb)->check = 0;
6938 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6939 } else
6940 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6941 iph->daddr, 0,
6942 IPPROTO_TCP,
6945 if (tg3_flag(tp, HW_TSO_3)) {
6946 mss |= (hdr_len & 0xc) << 12;
6947 if (hdr_len & 0x10)
6948 base_flags |= 0x00000010;
6949 base_flags |= (hdr_len & 0x3e0) << 5;
6950 } else if (tg3_flag(tp, HW_TSO_2))
6951 mss |= hdr_len << 9;
6952 else if (tg3_flag(tp, HW_TSO_1) ||
6953 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6954 if (tcp_opt_len || iph->ihl > 5) {
6955 int tsflags;
6957 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6958 mss |= (tsflags << 11);
6960 } else {
6961 if (tcp_opt_len || iph->ihl > 5) {
6962 int tsflags;
6964 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6965 base_flags |= tsflags << 12;
6970 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6971 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6972 base_flags |= TXD_FLAG_JMB_PKT;
6974 if (vlan_tx_tag_present(skb)) {
6975 base_flags |= TXD_FLAG_VLAN;
6976 vlan = vlan_tx_tag_get(skb);
6979 len = skb_headlen(skb);
6981 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6982 if (pci_dma_mapping_error(tp->pdev, mapping))
6983 goto drop;
6986 tnapi->tx_buffers[entry].skb = skb;
6987 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6989 would_hit_hwbug = 0;
6991 if (tg3_flag(tp, 5701_DMA_BUG))
6992 would_hit_hwbug = 1;
6994 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6995 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6996 mss, vlan)) {
6997 would_hit_hwbug = 1;
6998 } else if (skb_shinfo(skb)->nr_frags > 0) {
6999 u32 tmp_mss = mss;
7001 if (!tg3_flag(tp, HW_TSO_1) &&
7002 !tg3_flag(tp, HW_TSO_2) &&
7003 !tg3_flag(tp, HW_TSO_3))
7004 tmp_mss = 0;
7006 /* Now loop through additional data
7007 * fragments, and queue them.
7009 last = skb_shinfo(skb)->nr_frags - 1;
7010 for (i = 0; i <= last; i++) {
7011 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7013 len = skb_frag_size(frag);
7014 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7015 len, DMA_TO_DEVICE);
7017 tnapi->tx_buffers[entry].skb = NULL;
7018 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7019 mapping);
7020 if (dma_mapping_error(&tp->pdev->dev, mapping))
7021 goto dma_error;
7023 if (!budget ||
7024 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7025 len, base_flags |
7026 ((i == last) ? TXD_FLAG_END : 0),
7027 tmp_mss, vlan)) {
7028 would_hit_hwbug = 1;
7029 break;
7034 if (would_hit_hwbug) {
7035 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7037 /* If the workaround fails due to memory/mapping
7038 * failure, silently drop this packet.
7040 entry = tnapi->tx_prod;
7041 budget = tg3_tx_avail(tnapi);
7042 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7043 base_flags, mss, vlan))
7044 goto drop_nofree;
7047 skb_tx_timestamp(skb);
7048 netdev_tx_sent_queue(txq, skb->len);
7050 /* Sync BD data before updating mailbox */
7051 wmb();
7053 /* Packets are ready, update Tx producer idx local and on card. */
7054 tw32_tx_mbox(tnapi->prodmbox, entry);
7056 tnapi->tx_prod = entry;
7057 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7058 netif_tx_stop_queue(txq);
7060 /* netif_tx_stop_queue() must be done before checking
7061 * checking tx index in tg3_tx_avail() below, because in
7062 * tg3_tx(), we update tx index before checking for
7063 * netif_tx_queue_stopped().
7065 smp_mb();
7066 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7067 netif_tx_wake_queue(txq);
7070 mmiowb();
7071 return NETDEV_TX_OK;
7073 dma_error:
7074 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7075 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7076 drop:
7077 dev_kfree_skb(skb);
7078 drop_nofree:
7079 tp->tx_dropped++;
7080 return NETDEV_TX_OK;
7083 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7085 if (enable) {
7086 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7087 MAC_MODE_PORT_MODE_MASK);
7089 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7091 if (!tg3_flag(tp, 5705_PLUS))
7092 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7094 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7095 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7096 else
7097 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7098 } else {
7099 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7101 if (tg3_flag(tp, 5705_PLUS) ||
7102 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7103 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7104 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7107 tw32(MAC_MODE, tp->mac_mode);
7108 udelay(40);
7111 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7113 u32 val, bmcr, mac_mode, ptest = 0;
7115 tg3_phy_toggle_apd(tp, false);
7116 tg3_phy_toggle_automdix(tp, 0);
7118 if (extlpbk && tg3_phy_set_extloopbk(tp))
7119 return -EIO;
7121 bmcr = BMCR_FULLDPLX;
7122 switch (speed) {
7123 case SPEED_10:
7124 break;
7125 case SPEED_100:
7126 bmcr |= BMCR_SPEED100;
7127 break;
7128 case SPEED_1000:
7129 default:
7130 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7131 speed = SPEED_100;
7132 bmcr |= BMCR_SPEED100;
7133 } else {
7134 speed = SPEED_1000;
7135 bmcr |= BMCR_SPEED1000;
7139 if (extlpbk) {
7140 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7141 tg3_readphy(tp, MII_CTRL1000, &val);
7142 val |= CTL1000_AS_MASTER |
7143 CTL1000_ENABLE_MASTER;
7144 tg3_writephy(tp, MII_CTRL1000, val);
7145 } else {
7146 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7147 MII_TG3_FET_PTEST_TRIM_2;
7148 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7150 } else
7151 bmcr |= BMCR_LOOPBACK;
7153 tg3_writephy(tp, MII_BMCR, bmcr);
7155 /* The write needs to be flushed for the FETs */
7156 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7157 tg3_readphy(tp, MII_BMCR, &bmcr);
7159 udelay(40);
7161 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7162 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7163 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7164 MII_TG3_FET_PTEST_FRC_TX_LINK |
7165 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7167 /* The write needs to be flushed for the AC131 */
7168 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7171 /* Reset to prevent losing 1st rx packet intermittently */
7172 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7173 tg3_flag(tp, 5780_CLASS)) {
7174 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7175 udelay(10);
7176 tw32_f(MAC_RX_MODE, tp->rx_mode);
7179 mac_mode = tp->mac_mode &
7180 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7181 if (speed == SPEED_1000)
7182 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7183 else
7184 mac_mode |= MAC_MODE_PORT_MODE_MII;
7186 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7187 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7189 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7190 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7191 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7192 mac_mode |= MAC_MODE_LINK_POLARITY;
7194 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7195 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7198 tw32(MAC_MODE, mac_mode);
7199 udelay(40);
7201 return 0;
7204 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7206 struct tg3 *tp = netdev_priv(dev);
7208 if (features & NETIF_F_LOOPBACK) {
7209 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7210 return;
7212 spin_lock_bh(&tp->lock);
7213 tg3_mac_loopback(tp, true);
7214 netif_carrier_on(tp->dev);
7215 spin_unlock_bh(&tp->lock);
7216 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7217 } else {
7218 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7219 return;
7221 spin_lock_bh(&tp->lock);
7222 tg3_mac_loopback(tp, false);
7223 /* Force link status check */
7224 tg3_setup_phy(tp, 1);
7225 spin_unlock_bh(&tp->lock);
7226 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7230 static netdev_features_t tg3_fix_features(struct net_device *dev,
7231 netdev_features_t features)
7233 struct tg3 *tp = netdev_priv(dev);
7235 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7236 features &= ~NETIF_F_ALL_TSO;
7238 return features;
7241 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7243 netdev_features_t changed = dev->features ^ features;
7245 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7246 tg3_set_loopback(dev, features);
7248 return 0;
7251 static void tg3_rx_prodring_free(struct tg3 *tp,
7252 struct tg3_rx_prodring_set *tpr)
7254 int i;
7256 if (tpr != &tp->napi[0].prodring) {
7257 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7258 i = (i + 1) & tp->rx_std_ring_mask)
7259 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7260 tp->rx_pkt_map_sz);
7262 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7263 for (i = tpr->rx_jmb_cons_idx;
7264 i != tpr->rx_jmb_prod_idx;
7265 i = (i + 1) & tp->rx_jmb_ring_mask) {
7266 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7267 TG3_RX_JMB_MAP_SZ);
7271 return;
7274 for (i = 0; i <= tp->rx_std_ring_mask; i++)
7275 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7276 tp->rx_pkt_map_sz);
7278 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7279 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7280 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7281 TG3_RX_JMB_MAP_SZ);
7285 /* Initialize rx rings for packet processing.
7287 * The chip has been shut down and the driver detached from
7288 * the networking, so no interrupts or new tx packets will
7289 * end up in the driver. tp->{tx,}lock are held and thus
7290 * we may not sleep.
7292 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7293 struct tg3_rx_prodring_set *tpr)
7295 u32 i, rx_pkt_dma_sz;
7297 tpr->rx_std_cons_idx = 0;
7298 tpr->rx_std_prod_idx = 0;
7299 tpr->rx_jmb_cons_idx = 0;
7300 tpr->rx_jmb_prod_idx = 0;
7302 if (tpr != &tp->napi[0].prodring) {
7303 memset(&tpr->rx_std_buffers[0], 0,
7304 TG3_RX_STD_BUFF_RING_SIZE(tp));
7305 if (tpr->rx_jmb_buffers)
7306 memset(&tpr->rx_jmb_buffers[0], 0,
7307 TG3_RX_JMB_BUFF_RING_SIZE(tp));
7308 goto done;
7311 /* Zero out all descriptors. */
7312 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7314 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7315 if (tg3_flag(tp, 5780_CLASS) &&
7316 tp->dev->mtu > ETH_DATA_LEN)
7317 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7318 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7320 /* Initialize invariants of the rings, we only set this
7321 * stuff once. This works because the card does not
7322 * write into the rx buffer posting rings.
7324 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7325 struct tg3_rx_buffer_desc *rxd;
7327 rxd = &tpr->rx_std[i];
7328 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7329 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7330 rxd->opaque = (RXD_OPAQUE_RING_STD |
7331 (i << RXD_OPAQUE_INDEX_SHIFT));
7334 /* Now allocate fresh SKBs for each rx ring. */
7335 for (i = 0; i < tp->rx_pending; i++) {
7336 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7337 netdev_warn(tp->dev,
7338 "Using a smaller RX standard ring. Only "
7339 "%d out of %d buffers were allocated "
7340 "successfully\n", i, tp->rx_pending);
7341 if (i == 0)
7342 goto initfail;
7343 tp->rx_pending = i;
7344 break;
7348 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7349 goto done;
7351 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7353 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7354 goto done;
7356 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7357 struct tg3_rx_buffer_desc *rxd;
7359 rxd = &tpr->rx_jmb[i].std;
7360 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7361 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7362 RXD_FLAG_JUMBO;
7363 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7364 (i << RXD_OPAQUE_INDEX_SHIFT));
7367 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7368 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7369 netdev_warn(tp->dev,
7370 "Using a smaller RX jumbo ring. Only %d "
7371 "out of %d buffers were allocated "
7372 "successfully\n", i, tp->rx_jumbo_pending);
7373 if (i == 0)
7374 goto initfail;
7375 tp->rx_jumbo_pending = i;
7376 break;
7380 done:
7381 return 0;
7383 initfail:
7384 tg3_rx_prodring_free(tp, tpr);
7385 return -ENOMEM;
7388 static void tg3_rx_prodring_fini(struct tg3 *tp,
7389 struct tg3_rx_prodring_set *tpr)
7391 kfree(tpr->rx_std_buffers);
7392 tpr->rx_std_buffers = NULL;
7393 kfree(tpr->rx_jmb_buffers);
7394 tpr->rx_jmb_buffers = NULL;
7395 if (tpr->rx_std) {
7396 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7397 tpr->rx_std, tpr->rx_std_mapping);
7398 tpr->rx_std = NULL;
7400 if (tpr->rx_jmb) {
7401 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7402 tpr->rx_jmb, tpr->rx_jmb_mapping);
7403 tpr->rx_jmb = NULL;
7407 static int tg3_rx_prodring_init(struct tg3 *tp,
7408 struct tg3_rx_prodring_set *tpr)
7410 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7411 GFP_KERNEL);
7412 if (!tpr->rx_std_buffers)
7413 return -ENOMEM;
7415 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7416 TG3_RX_STD_RING_BYTES(tp),
7417 &tpr->rx_std_mapping,
7418 GFP_KERNEL);
7419 if (!tpr->rx_std)
7420 goto err_out;
7422 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7423 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7424 GFP_KERNEL);
7425 if (!tpr->rx_jmb_buffers)
7426 goto err_out;
7428 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7429 TG3_RX_JMB_RING_BYTES(tp),
7430 &tpr->rx_jmb_mapping,
7431 GFP_KERNEL);
7432 if (!tpr->rx_jmb)
7433 goto err_out;
7436 return 0;
7438 err_out:
7439 tg3_rx_prodring_fini(tp, tpr);
7440 return -ENOMEM;
7443 /* Free up pending packets in all rx/tx rings.
7445 * The chip has been shut down and the driver detached from
7446 * the networking, so no interrupts or new tx packets will
7447 * end up in the driver. tp->{tx,}lock is not held and we are not
7448 * in an interrupt context and thus may sleep.
7450 static void tg3_free_rings(struct tg3 *tp)
7452 int i, j;
7454 for (j = 0; j < tp->irq_cnt; j++) {
7455 struct tg3_napi *tnapi = &tp->napi[j];
7457 tg3_rx_prodring_free(tp, &tnapi->prodring);
7459 if (!tnapi->tx_buffers)
7460 continue;
7462 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7463 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7465 if (!skb)
7466 continue;
7468 tg3_tx_skb_unmap(tnapi, i,
7469 skb_shinfo(skb)->nr_frags - 1);
7471 dev_kfree_skb_any(skb);
7473 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7477 /* Initialize tx/rx rings for packet processing.
7479 * The chip has been shut down and the driver detached from
7480 * the networking, so no interrupts or new tx packets will
7481 * end up in the driver. tp->{tx,}lock are held and thus
7482 * we may not sleep.
7484 static int tg3_init_rings(struct tg3 *tp)
7486 int i;
7488 /* Free up all the SKBs. */
7489 tg3_free_rings(tp);
7491 for (i = 0; i < tp->irq_cnt; i++) {
7492 struct tg3_napi *tnapi = &tp->napi[i];
7494 tnapi->last_tag = 0;
7495 tnapi->last_irq_tag = 0;
7496 tnapi->hw_status->status = 0;
7497 tnapi->hw_status->status_tag = 0;
7498 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7500 tnapi->tx_prod = 0;
7501 tnapi->tx_cons = 0;
7502 if (tnapi->tx_ring)
7503 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7505 tnapi->rx_rcb_ptr = 0;
7506 if (tnapi->rx_rcb)
7507 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7509 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7510 tg3_free_rings(tp);
7511 return -ENOMEM;
7515 return 0;
7519 * Must not be invoked with interrupt sources disabled and
7520 * the hardware shutdown down.
7522 static void tg3_free_consistent(struct tg3 *tp)
7524 int i;
7526 for (i = 0; i < tp->irq_cnt; i++) {
7527 struct tg3_napi *tnapi = &tp->napi[i];
7529 if (tnapi->tx_ring) {
7530 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7531 tnapi->tx_ring, tnapi->tx_desc_mapping);
7532 tnapi->tx_ring = NULL;
7535 kfree(tnapi->tx_buffers);
7536 tnapi->tx_buffers = NULL;
7538 if (tnapi->rx_rcb) {
7539 dma_free_coherent(&tp->pdev->dev,
7540 TG3_RX_RCB_RING_BYTES(tp),
7541 tnapi->rx_rcb,
7542 tnapi->rx_rcb_mapping);
7543 tnapi->rx_rcb = NULL;
7546 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7548 if (tnapi->hw_status) {
7549 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7550 tnapi->hw_status,
7551 tnapi->status_mapping);
7552 tnapi->hw_status = NULL;
7556 if (tp->hw_stats) {
7557 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7558 tp->hw_stats, tp->stats_mapping);
7559 tp->hw_stats = NULL;
7564 * Must not be invoked with interrupt sources disabled and
7565 * the hardware shutdown down. Can sleep.
7567 static int tg3_alloc_consistent(struct tg3 *tp)
7569 int i;
7571 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7572 sizeof(struct tg3_hw_stats),
7573 &tp->stats_mapping,
7574 GFP_KERNEL);
7575 if (!tp->hw_stats)
7576 goto err_out;
7578 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7580 for (i = 0; i < tp->irq_cnt; i++) {
7581 struct tg3_napi *tnapi = &tp->napi[i];
7582 struct tg3_hw_status *sblk;
7584 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7585 TG3_HW_STATUS_SIZE,
7586 &tnapi->status_mapping,
7587 GFP_KERNEL);
7588 if (!tnapi->hw_status)
7589 goto err_out;
7591 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7592 sblk = tnapi->hw_status;
7594 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7595 goto err_out;
7597 /* If multivector TSS is enabled, vector 0 does not handle
7598 * tx interrupts. Don't allocate any resources for it.
7600 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7601 (i && tg3_flag(tp, ENABLE_TSS))) {
7602 tnapi->tx_buffers = kzalloc(
7603 sizeof(struct tg3_tx_ring_info) *
7604 TG3_TX_RING_SIZE, GFP_KERNEL);
7605 if (!tnapi->tx_buffers)
7606 goto err_out;
7608 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7609 TG3_TX_RING_BYTES,
7610 &tnapi->tx_desc_mapping,
7611 GFP_KERNEL);
7612 if (!tnapi->tx_ring)
7613 goto err_out;
7617 * When RSS is enabled, the status block format changes
7618 * slightly. The "rx_jumbo_consumer", "reserved",
7619 * and "rx_mini_consumer" members get mapped to the
7620 * other three rx return ring producer indexes.
7622 switch (i) {
7623 default:
7624 if (tg3_flag(tp, ENABLE_RSS)) {
7625 tnapi->rx_rcb_prod_idx = NULL;
7626 break;
7628 /* Fall through */
7629 case 1:
7630 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7631 break;
7632 case 2:
7633 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7634 break;
7635 case 3:
7636 tnapi->rx_rcb_prod_idx = &sblk->reserved;
7637 break;
7638 case 4:
7639 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7640 break;
7644 * If multivector RSS is enabled, vector 0 does not handle
7645 * rx or tx interrupts. Don't allocate any resources for it.
7647 if (!i && tg3_flag(tp, ENABLE_RSS))
7648 continue;
7650 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7651 TG3_RX_RCB_RING_BYTES(tp),
7652 &tnapi->rx_rcb_mapping,
7653 GFP_KERNEL);
7654 if (!tnapi->rx_rcb)
7655 goto err_out;
7657 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7660 return 0;
7662 err_out:
7663 tg3_free_consistent(tp);
7664 return -ENOMEM;
7667 #define MAX_WAIT_CNT 1000
7669 /* To stop a block, clear the enable bit and poll till it
7670 * clears. tp->lock is held.
7672 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7674 unsigned int i;
7675 u32 val;
7677 if (tg3_flag(tp, 5705_PLUS)) {
7678 switch (ofs) {
7679 case RCVLSC_MODE:
7680 case DMAC_MODE:
7681 case MBFREE_MODE:
7682 case BUFMGR_MODE:
7683 case MEMARB_MODE:
7684 /* We can't enable/disable these bits of the
7685 * 5705/5750, just say success.
7687 return 0;
7689 default:
7690 break;
7694 val = tr32(ofs);
7695 val &= ~enable_bit;
7696 tw32_f(ofs, val);
7698 for (i = 0; i < MAX_WAIT_CNT; i++) {
7699 udelay(100);
7700 val = tr32(ofs);
7701 if ((val & enable_bit) == 0)
7702 break;
7705 if (i == MAX_WAIT_CNT && !silent) {
7706 dev_err(&tp->pdev->dev,
7707 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7708 ofs, enable_bit);
7709 return -ENODEV;
7712 return 0;
7715 /* tp->lock is held. */
7716 static int tg3_abort_hw(struct tg3 *tp, int silent)
7718 int i, err;
7720 tg3_disable_ints(tp);
7722 tp->rx_mode &= ~RX_MODE_ENABLE;
7723 tw32_f(MAC_RX_MODE, tp->rx_mode);
7724 udelay(10);
7726 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7727 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7728 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7729 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7730 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7731 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7733 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7734 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7735 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7736 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7737 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7738 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7739 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7741 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7742 tw32_f(MAC_MODE, tp->mac_mode);
7743 udelay(40);
7745 tp->tx_mode &= ~TX_MODE_ENABLE;
7746 tw32_f(MAC_TX_MODE, tp->tx_mode);
7748 for (i = 0; i < MAX_WAIT_CNT; i++) {
7749 udelay(100);
7750 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7751 break;
7753 if (i >= MAX_WAIT_CNT) {
7754 dev_err(&tp->pdev->dev,
7755 "%s timed out, TX_MODE_ENABLE will not clear "
7756 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7757 err |= -ENODEV;
7760 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7761 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7762 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7764 tw32(FTQ_RESET, 0xffffffff);
7765 tw32(FTQ_RESET, 0x00000000);
7767 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7768 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7770 for (i = 0; i < tp->irq_cnt; i++) {
7771 struct tg3_napi *tnapi = &tp->napi[i];
7772 if (tnapi->hw_status)
7773 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7776 return err;
7779 /* Save PCI command register before chip reset */
7780 static void tg3_save_pci_state(struct tg3 *tp)
7782 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7785 /* Restore PCI state after chip reset */
7786 static void tg3_restore_pci_state(struct tg3 *tp)
7788 u32 val;
7790 /* Re-enable indirect register accesses. */
7791 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7792 tp->misc_host_ctrl);
7794 /* Set MAX PCI retry to zero. */
7795 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7796 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7797 tg3_flag(tp, PCIX_MODE))
7798 val |= PCISTATE_RETRY_SAME_DMA;
7799 /* Allow reads and writes to the APE register and memory space. */
7800 if (tg3_flag(tp, ENABLE_APE))
7801 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7802 PCISTATE_ALLOW_APE_SHMEM_WR |
7803 PCISTATE_ALLOW_APE_PSPACE_WR;
7804 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7806 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7808 if (!tg3_flag(tp, PCI_EXPRESS)) {
7809 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7810 tp->pci_cacheline_sz);
7811 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7812 tp->pci_lat_timer);
7815 /* Make sure PCI-X relaxed ordering bit is clear. */
7816 if (tg3_flag(tp, PCIX_MODE)) {
7817 u16 pcix_cmd;
7819 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7820 &pcix_cmd);
7821 pcix_cmd &= ~PCI_X_CMD_ERO;
7822 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7823 pcix_cmd);
7826 if (tg3_flag(tp, 5780_CLASS)) {
7828 /* Chip reset on 5780 will reset MSI enable bit,
7829 * so need to restore it.
7831 if (tg3_flag(tp, USING_MSI)) {
7832 u16 ctrl;
7834 pci_read_config_word(tp->pdev,
7835 tp->msi_cap + PCI_MSI_FLAGS,
7836 &ctrl);
7837 pci_write_config_word(tp->pdev,
7838 tp->msi_cap + PCI_MSI_FLAGS,
7839 ctrl | PCI_MSI_FLAGS_ENABLE);
7840 val = tr32(MSGINT_MODE);
7841 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7846 /* tp->lock is held. */
7847 static int tg3_chip_reset(struct tg3 *tp)
7849 u32 val;
7850 void (*write_op)(struct tg3 *, u32, u32);
7851 int i, err;
7853 tg3_nvram_lock(tp);
7855 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7857 /* No matching tg3_nvram_unlock() after this because
7858 * chip reset below will undo the nvram lock.
7860 tp->nvram_lock_cnt = 0;
7862 /* GRC_MISC_CFG core clock reset will clear the memory
7863 * enable bit in PCI register 4 and the MSI enable bit
7864 * on some chips, so we save relevant registers here.
7866 tg3_save_pci_state(tp);
7868 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7869 tg3_flag(tp, 5755_PLUS))
7870 tw32(GRC_FASTBOOT_PC, 0);
7873 * We must avoid the readl() that normally takes place.
7874 * It locks machines, causes machine checks, and other
7875 * fun things. So, temporarily disable the 5701
7876 * hardware workaround, while we do the reset.
7878 write_op = tp->write32;
7879 if (write_op == tg3_write_flush_reg32)
7880 tp->write32 = tg3_write32;
7882 /* Prevent the irq handler from reading or writing PCI registers
7883 * during chip reset when the memory enable bit in the PCI command
7884 * register may be cleared. The chip does not generate interrupt
7885 * at this time, but the irq handler may still be called due to irq
7886 * sharing or irqpoll.
7888 tg3_flag_set(tp, CHIP_RESETTING);
7889 for (i = 0; i < tp->irq_cnt; i++) {
7890 struct tg3_napi *tnapi = &tp->napi[i];
7891 if (tnapi->hw_status) {
7892 tnapi->hw_status->status = 0;
7893 tnapi->hw_status->status_tag = 0;
7895 tnapi->last_tag = 0;
7896 tnapi->last_irq_tag = 0;
7898 smp_mb();
7900 for (i = 0; i < tp->irq_cnt; i++)
7901 synchronize_irq(tp->napi[i].irq_vec);
7903 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7904 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7905 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7908 /* do the reset */
7909 val = GRC_MISC_CFG_CORECLK_RESET;
7911 if (tg3_flag(tp, PCI_EXPRESS)) {
7912 /* Force PCIe 1.0a mode */
7913 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7914 !tg3_flag(tp, 57765_PLUS) &&
7915 tr32(TG3_PCIE_PHY_TSTCTL) ==
7916 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7917 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7919 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7920 tw32(GRC_MISC_CFG, (1 << 29));
7921 val |= (1 << 29);
7925 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7926 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7927 tw32(GRC_VCPU_EXT_CTRL,
7928 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7931 /* Manage gphy power for all CPMU absent PCIe devices. */
7932 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7933 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7935 tw32(GRC_MISC_CFG, val);
7937 /* restore 5701 hardware bug workaround write method */
7938 tp->write32 = write_op;
7940 /* Unfortunately, we have to delay before the PCI read back.
7941 * Some 575X chips even will not respond to a PCI cfg access
7942 * when the reset command is given to the chip.
7944 * How do these hardware designers expect things to work
7945 * properly if the PCI write is posted for a long period
7946 * of time? It is always necessary to have some method by
7947 * which a register read back can occur to push the write
7948 * out which does the reset.
7950 * For most tg3 variants the trick below was working.
7951 * Ho hum...
7953 udelay(120);
7955 /* Flush PCI posted writes. The normal MMIO registers
7956 * are inaccessible at this time so this is the only
7957 * way to make this reliably (actually, this is no longer
7958 * the case, see above). I tried to use indirect
7959 * register read/write but this upset some 5701 variants.
7961 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7963 udelay(120);
7965 if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7966 u16 val16;
7968 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7969 int i;
7970 u32 cfg_val;
7972 /* Wait for link training to complete. */
7973 for (i = 0; i < 5000; i++)
7974 udelay(100);
7976 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7977 pci_write_config_dword(tp->pdev, 0xc4,
7978 cfg_val | (1 << 15));
7981 /* Clear the "no snoop" and "relaxed ordering" bits. */
7982 pci_read_config_word(tp->pdev,
7983 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7984 &val16);
7985 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7986 PCI_EXP_DEVCTL_NOSNOOP_EN);
7988 * Older PCIe devices only support the 128 byte
7989 * MPS setting. Enforce the restriction.
7991 if (!tg3_flag(tp, CPMU_PRESENT))
7992 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7993 pci_write_config_word(tp->pdev,
7994 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7995 val16);
7997 /* Clear error status */
7998 pci_write_config_word(tp->pdev,
7999 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
8000 PCI_EXP_DEVSTA_CED |
8001 PCI_EXP_DEVSTA_NFED |
8002 PCI_EXP_DEVSTA_FED |
8003 PCI_EXP_DEVSTA_URD);
8006 tg3_restore_pci_state(tp);
8008 tg3_flag_clear(tp, CHIP_RESETTING);
8009 tg3_flag_clear(tp, ERROR_PROCESSED);
8011 val = 0;
8012 if (tg3_flag(tp, 5780_CLASS))
8013 val = tr32(MEMARB_MODE);
8014 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8016 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8017 tg3_stop_fw(tp);
8018 tw32(0x5000, 0x400);
8021 tw32(GRC_MODE, tp->grc_mode);
8023 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
8024 val = tr32(0xc4);
8026 tw32(0xc4, val | (1 << 15));
8029 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8030 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8031 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8032 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8033 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8034 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8037 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8038 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8039 val = tp->mac_mode;
8040 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8041 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8042 val = tp->mac_mode;
8043 } else
8044 val = 0;
8046 tw32_f(MAC_MODE, val);
8047 udelay(40);
8049 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8051 err = tg3_poll_fw(tp);
8052 if (err)
8053 return err;
8055 tg3_mdio_start(tp);
8057 if (tg3_flag(tp, PCI_EXPRESS) &&
8058 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8059 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8060 !tg3_flag(tp, 57765_PLUS)) {
8061 val = tr32(0x7c00);
8063 tw32(0x7c00, val | (1 << 25));
8066 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8067 val = tr32(TG3_CPMU_CLCK_ORIDE);
8068 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8071 /* Reprobe ASF enable state. */
8072 tg3_flag_clear(tp, ENABLE_ASF);
8073 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8074 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8075 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8076 u32 nic_cfg;
8078 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8079 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8080 tg3_flag_set(tp, ENABLE_ASF);
8081 tp->last_event_jiffies = jiffies;
8082 if (tg3_flag(tp, 5750_PLUS))
8083 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8087 return 0;
8090 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8091 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8093 /* tp->lock is held. */
8094 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8096 int err;
8098 tg3_stop_fw(tp);
8100 tg3_write_sig_pre_reset(tp, kind);
8102 tg3_abort_hw(tp, silent);
8103 err = tg3_chip_reset(tp);
8105 __tg3_set_mac_addr(tp, 0);
8107 tg3_write_sig_legacy(tp, kind);
8108 tg3_write_sig_post_reset(tp, kind);
8110 if (tp->hw_stats) {
8111 /* Save the stats across chip resets... */
8112 tg3_get_nstats(tp, &tp->net_stats_prev);
8113 tg3_get_estats(tp, &tp->estats_prev);
8115 /* And make sure the next sample is new data */
8116 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8119 if (err)
8120 return err;
8122 return 0;
8125 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8127 struct tg3 *tp = netdev_priv(dev);
8128 struct sockaddr *addr = p;
8129 int err = 0, skip_mac_1 = 0;
8131 if (!is_valid_ether_addr(addr->sa_data))
8132 return -EADDRNOTAVAIL;
8134 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8136 if (!netif_running(dev))
8137 return 0;
8139 if (tg3_flag(tp, ENABLE_ASF)) {
8140 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8142 addr0_high = tr32(MAC_ADDR_0_HIGH);
8143 addr0_low = tr32(MAC_ADDR_0_LOW);
8144 addr1_high = tr32(MAC_ADDR_1_HIGH);
8145 addr1_low = tr32(MAC_ADDR_1_LOW);
8147 /* Skip MAC addr 1 if ASF is using it. */
8148 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8149 !(addr1_high == 0 && addr1_low == 0))
8150 skip_mac_1 = 1;
8152 spin_lock_bh(&tp->lock);
8153 __tg3_set_mac_addr(tp, skip_mac_1);
8154 spin_unlock_bh(&tp->lock);
8156 return err;
8159 /* tp->lock is held. */
8160 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8161 dma_addr_t mapping, u32 maxlen_flags,
8162 u32 nic_addr)
8164 tg3_write_mem(tp,
8165 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8166 ((u64) mapping >> 32));
8167 tg3_write_mem(tp,
8168 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8169 ((u64) mapping & 0xffffffff));
8170 tg3_write_mem(tp,
8171 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8172 maxlen_flags);
8174 if (!tg3_flag(tp, 5705_PLUS))
8175 tg3_write_mem(tp,
8176 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8177 nic_addr);
8180 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8182 int i;
8184 if (!tg3_flag(tp, ENABLE_TSS)) {
8185 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8186 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8187 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8188 } else {
8189 tw32(HOSTCC_TXCOL_TICKS, 0);
8190 tw32(HOSTCC_TXMAX_FRAMES, 0);
8191 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8194 if (!tg3_flag(tp, ENABLE_RSS)) {
8195 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8196 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8197 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8198 } else {
8199 tw32(HOSTCC_RXCOL_TICKS, 0);
8200 tw32(HOSTCC_RXMAX_FRAMES, 0);
8201 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8204 if (!tg3_flag(tp, 5705_PLUS)) {
8205 u32 val = ec->stats_block_coalesce_usecs;
8207 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8208 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8210 if (!netif_carrier_ok(tp->dev))
8211 val = 0;
8213 tw32(HOSTCC_STAT_COAL_TICKS, val);
8216 for (i = 0; i < tp->irq_cnt - 1; i++) {
8217 u32 reg;
8219 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8220 tw32(reg, ec->rx_coalesce_usecs);
8221 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8222 tw32(reg, ec->rx_max_coalesced_frames);
8223 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8224 tw32(reg, ec->rx_max_coalesced_frames_irq);
8226 if (tg3_flag(tp, ENABLE_TSS)) {
8227 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8228 tw32(reg, ec->tx_coalesce_usecs);
8229 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8230 tw32(reg, ec->tx_max_coalesced_frames);
8231 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8232 tw32(reg, ec->tx_max_coalesced_frames_irq);
8236 for (; i < tp->irq_max - 1; i++) {
8237 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8238 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8239 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8241 if (tg3_flag(tp, ENABLE_TSS)) {
8242 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8243 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8244 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8249 /* tp->lock is held. */
8250 static void tg3_rings_reset(struct tg3 *tp)
8252 int i;
8253 u32 stblk, txrcb, rxrcb, limit;
8254 struct tg3_napi *tnapi = &tp->napi[0];
8256 /* Disable all transmit rings but the first. */
8257 if (!tg3_flag(tp, 5705_PLUS))
8258 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8259 else if (tg3_flag(tp, 5717_PLUS))
8260 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8261 else if (tg3_flag(tp, 57765_CLASS))
8262 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8263 else
8264 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8266 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8267 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8268 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8269 BDINFO_FLAGS_DISABLED);
8272 /* Disable all receive return rings but the first. */
8273 if (tg3_flag(tp, 5717_PLUS))
8274 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8275 else if (!tg3_flag(tp, 5705_PLUS))
8276 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8277 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8278 tg3_flag(tp, 57765_CLASS))
8279 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8280 else
8281 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8283 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8284 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8285 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8286 BDINFO_FLAGS_DISABLED);
8288 /* Disable interrupts */
8289 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8290 tp->napi[0].chk_msi_cnt = 0;
8291 tp->napi[0].last_rx_cons = 0;
8292 tp->napi[0].last_tx_cons = 0;
8294 /* Zero mailbox registers. */
8295 if (tg3_flag(tp, SUPPORT_MSIX)) {
8296 for (i = 1; i < tp->irq_max; i++) {
8297 tp->napi[i].tx_prod = 0;
8298 tp->napi[i].tx_cons = 0;
8299 if (tg3_flag(tp, ENABLE_TSS))
8300 tw32_mailbox(tp->napi[i].prodmbox, 0);
8301 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8302 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8303 tp->napi[i].chk_msi_cnt = 0;
8304 tp->napi[i].last_rx_cons = 0;
8305 tp->napi[i].last_tx_cons = 0;
8307 if (!tg3_flag(tp, ENABLE_TSS))
8308 tw32_mailbox(tp->napi[0].prodmbox, 0);
8309 } else {
8310 tp->napi[0].tx_prod = 0;
8311 tp->napi[0].tx_cons = 0;
8312 tw32_mailbox(tp->napi[0].prodmbox, 0);
8313 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8316 /* Make sure the NIC-based send BD rings are disabled. */
8317 if (!tg3_flag(tp, 5705_PLUS)) {
8318 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8319 for (i = 0; i < 16; i++)
8320 tw32_tx_mbox(mbox + i * 8, 0);
8323 txrcb = NIC_SRAM_SEND_RCB;
8324 rxrcb = NIC_SRAM_RCV_RET_RCB;
8326 /* Clear status block in ram. */
8327 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8329 /* Set status block DMA address */
8330 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8331 ((u64) tnapi->status_mapping >> 32));
8332 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8333 ((u64) tnapi->status_mapping & 0xffffffff));
8335 if (tnapi->tx_ring) {
8336 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8337 (TG3_TX_RING_SIZE <<
8338 BDINFO_FLAGS_MAXLEN_SHIFT),
8339 NIC_SRAM_TX_BUFFER_DESC);
8340 txrcb += TG3_BDINFO_SIZE;
8343 if (tnapi->rx_rcb) {
8344 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8345 (tp->rx_ret_ring_mask + 1) <<
8346 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8347 rxrcb += TG3_BDINFO_SIZE;
8350 stblk = HOSTCC_STATBLCK_RING1;
8352 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8353 u64 mapping = (u64)tnapi->status_mapping;
8354 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8355 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8357 /* Clear status block in ram. */
8358 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8360 if (tnapi->tx_ring) {
8361 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8362 (TG3_TX_RING_SIZE <<
8363 BDINFO_FLAGS_MAXLEN_SHIFT),
8364 NIC_SRAM_TX_BUFFER_DESC);
8365 txrcb += TG3_BDINFO_SIZE;
8368 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8369 ((tp->rx_ret_ring_mask + 1) <<
8370 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8372 stblk += 8;
8373 rxrcb += TG3_BDINFO_SIZE;
8377 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8379 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8381 if (!tg3_flag(tp, 5750_PLUS) ||
8382 tg3_flag(tp, 5780_CLASS) ||
8383 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8384 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8385 tg3_flag(tp, 57765_PLUS))
8386 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8387 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8388 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8389 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8390 else
8391 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8393 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8394 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8396 val = min(nic_rep_thresh, host_rep_thresh);
8397 tw32(RCVBDI_STD_THRESH, val);
8399 if (tg3_flag(tp, 57765_PLUS))
8400 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8402 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8403 return;
8405 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8407 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8409 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8410 tw32(RCVBDI_JUMBO_THRESH, val);
8412 if (tg3_flag(tp, 57765_PLUS))
8413 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8416 static inline u32 calc_crc(unsigned char *buf, int len)
8418 u32 reg;
8419 u32 tmp;
8420 int j, k;
8422 reg = 0xffffffff;
8424 for (j = 0; j < len; j++) {
8425 reg ^= buf[j];
8427 for (k = 0; k < 8; k++) {
8428 tmp = reg & 0x01;
8430 reg >>= 1;
8432 if (tmp)
8433 reg ^= 0xedb88320;
8437 return ~reg;
8440 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8442 /* accept or reject all multicast frames */
8443 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8444 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8445 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8446 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8449 static void __tg3_set_rx_mode(struct net_device *dev)
8451 struct tg3 *tp = netdev_priv(dev);
8452 u32 rx_mode;
8454 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8455 RX_MODE_KEEP_VLAN_TAG);
8457 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8458 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8459 * flag clear.
8461 if (!tg3_flag(tp, ENABLE_ASF))
8462 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8463 #endif
8465 if (dev->flags & IFF_PROMISC) {
8466 /* Promiscuous mode. */
8467 rx_mode |= RX_MODE_PROMISC;
8468 } else if (dev->flags & IFF_ALLMULTI) {
8469 /* Accept all multicast. */
8470 tg3_set_multi(tp, 1);
8471 } else if (netdev_mc_empty(dev)) {
8472 /* Reject all multicast. */
8473 tg3_set_multi(tp, 0);
8474 } else {
8475 /* Accept one or more multicast(s). */
8476 struct netdev_hw_addr *ha;
8477 u32 mc_filter[4] = { 0, };
8478 u32 regidx;
8479 u32 bit;
8480 u32 crc;
8482 netdev_for_each_mc_addr(ha, dev) {
8483 crc = calc_crc(ha->addr, ETH_ALEN);
8484 bit = ~crc & 0x7f;
8485 regidx = (bit & 0x60) >> 5;
8486 bit &= 0x1f;
8487 mc_filter[regidx] |= (1 << bit);
8490 tw32(MAC_HASH_REG_0, mc_filter[0]);
8491 tw32(MAC_HASH_REG_1, mc_filter[1]);
8492 tw32(MAC_HASH_REG_2, mc_filter[2]);
8493 tw32(MAC_HASH_REG_3, mc_filter[3]);
8496 if (rx_mode != tp->rx_mode) {
8497 tp->rx_mode = rx_mode;
8498 tw32_f(MAC_RX_MODE, rx_mode);
8499 udelay(10);
8503 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8505 int i;
8507 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8508 tp->rss_ind_tbl[i] =
8509 ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8512 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8514 int i;
8516 if (!tg3_flag(tp, SUPPORT_MSIX))
8517 return;
8519 if (tp->irq_cnt <= 2) {
8520 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8521 return;
8524 /* Validate table against current IRQ count */
8525 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8526 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8527 break;
8530 if (i != TG3_RSS_INDIR_TBL_SIZE)
8531 tg3_rss_init_dflt_indir_tbl(tp);
8534 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8536 int i = 0;
8537 u32 reg = MAC_RSS_INDIR_TBL_0;
8539 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8540 u32 val = tp->rss_ind_tbl[i];
8541 i++;
8542 for (; i % 8; i++) {
8543 val <<= 4;
8544 val |= tp->rss_ind_tbl[i];
8546 tw32(reg, val);
8547 reg += 4;
8551 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
8553 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8554 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
8555 else
8556 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
8559 /* tp->lock is held. */
8560 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8562 u32 val, rdmac_mode;
8563 int i, err, limit;
8564 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8566 tg3_disable_ints(tp);
8568 tg3_stop_fw(tp);
8570 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8572 if (tg3_flag(tp, INIT_COMPLETE))
8573 tg3_abort_hw(tp, 1);
8575 /* Enable MAC control of LPI */
8576 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8577 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8578 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8579 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8581 tw32_f(TG3_CPMU_EEE_CTRL,
8582 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8584 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8585 TG3_CPMU_EEEMD_LPI_IN_TX |
8586 TG3_CPMU_EEEMD_LPI_IN_RX |
8587 TG3_CPMU_EEEMD_EEE_ENABLE;
8589 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8590 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8592 if (tg3_flag(tp, ENABLE_APE))
8593 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8595 tw32_f(TG3_CPMU_EEE_MODE, val);
8597 tw32_f(TG3_CPMU_EEE_DBTMR1,
8598 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8599 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8601 tw32_f(TG3_CPMU_EEE_DBTMR2,
8602 TG3_CPMU_DBTMR2_APE_TX_2047US |
8603 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8606 if (reset_phy)
8607 tg3_phy_reset(tp);
8609 err = tg3_chip_reset(tp);
8610 if (err)
8611 return err;
8613 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8615 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8616 val = tr32(TG3_CPMU_CTRL);
8617 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8618 tw32(TG3_CPMU_CTRL, val);
8620 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8621 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8622 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8623 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8625 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8626 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8627 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8628 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8630 val = tr32(TG3_CPMU_HST_ACC);
8631 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8632 val |= CPMU_HST_ACC_MACCLK_6_25;
8633 tw32(TG3_CPMU_HST_ACC, val);
8636 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8637 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8638 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8639 PCIE_PWR_MGMT_L1_THRESH_4MS;
8640 tw32(PCIE_PWR_MGMT_THRESH, val);
8642 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8643 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8645 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8647 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8648 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8651 if (tg3_flag(tp, L1PLLPD_EN)) {
8652 u32 grc_mode = tr32(GRC_MODE);
8654 /* Access the lower 1K of PL PCIE block registers. */
8655 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8656 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8658 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8659 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8660 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8662 tw32(GRC_MODE, grc_mode);
8665 if (tg3_flag(tp, 57765_CLASS)) {
8666 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8667 u32 grc_mode = tr32(GRC_MODE);
8669 /* Access the lower 1K of PL PCIE block registers. */
8670 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8671 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8673 val = tr32(TG3_PCIE_TLDLPL_PORT +
8674 TG3_PCIE_PL_LO_PHYCTL5);
8675 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8676 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8678 tw32(GRC_MODE, grc_mode);
8681 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8682 u32 grc_mode = tr32(GRC_MODE);
8684 /* Access the lower 1K of DL PCIE block registers. */
8685 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8686 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8688 val = tr32(TG3_PCIE_TLDLPL_PORT +
8689 TG3_PCIE_DL_LO_FTSMAX);
8690 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8691 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8692 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8694 tw32(GRC_MODE, grc_mode);
8697 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8698 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8699 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8700 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8703 /* This works around an issue with Athlon chipsets on
8704 * B3 tigon3 silicon. This bit has no effect on any
8705 * other revision. But do not set this on PCI Express
8706 * chips and don't even touch the clocks if the CPMU is present.
8708 if (!tg3_flag(tp, CPMU_PRESENT)) {
8709 if (!tg3_flag(tp, PCI_EXPRESS))
8710 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8711 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8714 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8715 tg3_flag(tp, PCIX_MODE)) {
8716 val = tr32(TG3PCI_PCISTATE);
8717 val |= PCISTATE_RETRY_SAME_DMA;
8718 tw32(TG3PCI_PCISTATE, val);
8721 if (tg3_flag(tp, ENABLE_APE)) {
8722 /* Allow reads and writes to the
8723 * APE register and memory space.
8725 val = tr32(TG3PCI_PCISTATE);
8726 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8727 PCISTATE_ALLOW_APE_SHMEM_WR |
8728 PCISTATE_ALLOW_APE_PSPACE_WR;
8729 tw32(TG3PCI_PCISTATE, val);
8732 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8733 /* Enable some hw fixes. */
8734 val = tr32(TG3PCI_MSI_DATA);
8735 val |= (1 << 26) | (1 << 28) | (1 << 29);
8736 tw32(TG3PCI_MSI_DATA, val);
8739 /* Descriptor ring init may make accesses to the
8740 * NIC SRAM area to setup the TX descriptors, so we
8741 * can only do this after the hardware has been
8742 * successfully reset.
8744 err = tg3_init_rings(tp);
8745 if (err)
8746 return err;
8748 if (tg3_flag(tp, 57765_PLUS)) {
8749 val = tr32(TG3PCI_DMA_RW_CTRL) &
8750 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8751 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8752 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8753 if (!tg3_flag(tp, 57765_CLASS) &&
8754 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8755 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8756 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8757 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8758 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8759 /* This value is determined during the probe time DMA
8760 * engine test, tg3_test_dma.
8762 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8765 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8766 GRC_MODE_4X_NIC_SEND_RINGS |
8767 GRC_MODE_NO_TX_PHDR_CSUM |
8768 GRC_MODE_NO_RX_PHDR_CSUM);
8769 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8771 /* Pseudo-header checksum is done by hardware logic and not
8772 * the offload processers, so make the chip do the pseudo-
8773 * header checksums on receive. For transmit it is more
8774 * convenient to do the pseudo-header checksum in software
8775 * as Linux does that on transmit for us in all cases.
8777 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8779 tw32(GRC_MODE,
8780 tp->grc_mode |
8781 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8783 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8784 val = tr32(GRC_MISC_CFG);
8785 val &= ~0xff;
8786 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8787 tw32(GRC_MISC_CFG, val);
8789 /* Initialize MBUF/DESC pool. */
8790 if (tg3_flag(tp, 5750_PLUS)) {
8791 /* Do nothing. */
8792 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8793 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8794 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8795 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8796 else
8797 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8798 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8799 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8800 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8801 int fw_len;
8803 fw_len = tp->fw_len;
8804 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8805 tw32(BUFMGR_MB_POOL_ADDR,
8806 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8807 tw32(BUFMGR_MB_POOL_SIZE,
8808 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8811 if (tp->dev->mtu <= ETH_DATA_LEN) {
8812 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8813 tp->bufmgr_config.mbuf_read_dma_low_water);
8814 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8815 tp->bufmgr_config.mbuf_mac_rx_low_water);
8816 tw32(BUFMGR_MB_HIGH_WATER,
8817 tp->bufmgr_config.mbuf_high_water);
8818 } else {
8819 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8820 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8821 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8822 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8823 tw32(BUFMGR_MB_HIGH_WATER,
8824 tp->bufmgr_config.mbuf_high_water_jumbo);
8826 tw32(BUFMGR_DMA_LOW_WATER,
8827 tp->bufmgr_config.dma_low_water);
8828 tw32(BUFMGR_DMA_HIGH_WATER,
8829 tp->bufmgr_config.dma_high_water);
8831 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8832 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8833 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8834 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8835 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8836 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8837 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8838 tw32(BUFMGR_MODE, val);
8839 for (i = 0; i < 2000; i++) {
8840 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8841 break;
8842 udelay(10);
8844 if (i >= 2000) {
8845 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8846 return -ENODEV;
8849 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8850 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8852 tg3_setup_rxbd_thresholds(tp);
8854 /* Initialize TG3_BDINFO's at:
8855 * RCVDBDI_STD_BD: standard eth size rx ring
8856 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8857 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8859 * like so:
8860 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8861 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8862 * ring attribute flags
8863 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8865 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8866 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8868 * The size of each ring is fixed in the firmware, but the location is
8869 * configurable.
8871 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8872 ((u64) tpr->rx_std_mapping >> 32));
8873 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8874 ((u64) tpr->rx_std_mapping & 0xffffffff));
8875 if (!tg3_flag(tp, 5717_PLUS))
8876 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8877 NIC_SRAM_RX_BUFFER_DESC);
8879 /* Disable the mini ring */
8880 if (!tg3_flag(tp, 5705_PLUS))
8881 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8882 BDINFO_FLAGS_DISABLED);
8884 /* Program the jumbo buffer descriptor ring control
8885 * blocks on those devices that have them.
8887 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8888 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8890 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8891 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8892 ((u64) tpr->rx_jmb_mapping >> 32));
8893 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8894 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8895 val = TG3_RX_JMB_RING_SIZE(tp) <<
8896 BDINFO_FLAGS_MAXLEN_SHIFT;
8897 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8898 val | BDINFO_FLAGS_USE_EXT_RECV);
8899 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8900 tg3_flag(tp, 57765_CLASS))
8901 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8902 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8903 } else {
8904 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8905 BDINFO_FLAGS_DISABLED);
8908 if (tg3_flag(tp, 57765_PLUS)) {
8909 val = TG3_RX_STD_RING_SIZE(tp);
8910 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8911 val |= (TG3_RX_STD_DMA_SZ << 2);
8912 } else
8913 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8914 } else
8915 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8917 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8919 tpr->rx_std_prod_idx = tp->rx_pending;
8920 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8922 tpr->rx_jmb_prod_idx =
8923 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8924 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8926 tg3_rings_reset(tp);
8928 /* Initialize MAC address and backoff seed. */
8929 __tg3_set_mac_addr(tp, 0);
8931 /* MTU + ethernet header + FCS + optional VLAN tag */
8932 tw32(MAC_RX_MTU_SIZE,
8933 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8935 /* The slot time is changed by tg3_setup_phy if we
8936 * run at gigabit with half duplex.
8938 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8939 (6 << TX_LENGTHS_IPG_SHIFT) |
8940 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8942 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8943 val |= tr32(MAC_TX_LENGTHS) &
8944 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8945 TX_LENGTHS_CNT_DWN_VAL_MSK);
8947 tw32(MAC_TX_LENGTHS, val);
8949 /* Receive rules. */
8950 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8951 tw32(RCVLPC_CONFIG, 0x0181);
8953 /* Calculate RDMAC_MODE setting early, we need it to determine
8954 * the RCVLPC_STATE_ENABLE mask.
8956 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8957 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8958 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8959 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8960 RDMAC_MODE_LNGREAD_ENAB);
8962 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8963 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8965 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8966 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8967 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8968 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8969 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8970 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8972 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8973 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8974 if (tg3_flag(tp, TSO_CAPABLE) &&
8975 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8976 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8977 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8978 !tg3_flag(tp, IS_5788)) {
8979 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8983 if (tg3_flag(tp, PCI_EXPRESS))
8984 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8986 if (tg3_flag(tp, HW_TSO_1) ||
8987 tg3_flag(tp, HW_TSO_2) ||
8988 tg3_flag(tp, HW_TSO_3))
8989 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8991 if (tg3_flag(tp, 57765_PLUS) ||
8992 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8993 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8994 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8996 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8997 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8999 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9000 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9001 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9002 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
9003 tg3_flag(tp, 57765_PLUS)) {
9004 val = tr32(TG3_RDMA_RSRVCTRL_REG);
9005 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
9006 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9007 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9008 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9009 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9010 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9011 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9013 tw32(TG3_RDMA_RSRVCTRL_REG,
9014 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9017 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9018 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9019 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9020 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
9021 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9022 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9025 /* Receive/send statistics. */
9026 if (tg3_flag(tp, 5750_PLUS)) {
9027 val = tr32(RCVLPC_STATS_ENABLE);
9028 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9029 tw32(RCVLPC_STATS_ENABLE, val);
9030 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9031 tg3_flag(tp, TSO_CAPABLE)) {
9032 val = tr32(RCVLPC_STATS_ENABLE);
9033 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9034 tw32(RCVLPC_STATS_ENABLE, val);
9035 } else {
9036 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9038 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9039 tw32(SNDDATAI_STATSENAB, 0xffffff);
9040 tw32(SNDDATAI_STATSCTRL,
9041 (SNDDATAI_SCTRL_ENABLE |
9042 SNDDATAI_SCTRL_FASTUPD));
9044 /* Setup host coalescing engine. */
9045 tw32(HOSTCC_MODE, 0);
9046 for (i = 0; i < 2000; i++) {
9047 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9048 break;
9049 udelay(10);
9052 __tg3_set_coalesce(tp, &tp->coal);
9054 if (!tg3_flag(tp, 5705_PLUS)) {
9055 /* Status/statistics block address. See tg3_timer,
9056 * the tg3_periodic_fetch_stats call there, and
9057 * tg3_get_stats to see how this works for 5705/5750 chips.
9059 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9060 ((u64) tp->stats_mapping >> 32));
9061 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9062 ((u64) tp->stats_mapping & 0xffffffff));
9063 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9065 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9067 /* Clear statistics and status block memory areas */
9068 for (i = NIC_SRAM_STATS_BLK;
9069 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9070 i += sizeof(u32)) {
9071 tg3_write_mem(tp, i, 0);
9072 udelay(40);
9076 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9078 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9079 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9080 if (!tg3_flag(tp, 5705_PLUS))
9081 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9083 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9084 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9085 /* reset to prevent losing 1st rx packet intermittently */
9086 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9087 udelay(10);
9090 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9091 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9092 MAC_MODE_FHDE_ENABLE;
9093 if (tg3_flag(tp, ENABLE_APE))
9094 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9095 if (!tg3_flag(tp, 5705_PLUS) &&
9096 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9097 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9098 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9099 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9100 udelay(40);
9102 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9103 * If TG3_FLAG_IS_NIC is zero, we should read the
9104 * register to preserve the GPIO settings for LOMs. The GPIOs,
9105 * whether used as inputs or outputs, are set by boot code after
9106 * reset.
9108 if (!tg3_flag(tp, IS_NIC)) {
9109 u32 gpio_mask;
9111 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9112 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9113 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9115 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9116 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9117 GRC_LCLCTRL_GPIO_OUTPUT3;
9119 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9120 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9122 tp->grc_local_ctrl &= ~gpio_mask;
9123 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9125 /* GPIO1 must be driven high for eeprom write protect */
9126 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9127 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9128 GRC_LCLCTRL_GPIO_OUTPUT1);
9130 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9131 udelay(100);
9133 if (tg3_flag(tp, USING_MSIX)) {
9134 val = tr32(MSGINT_MODE);
9135 val |= MSGINT_MODE_ENABLE;
9136 if (tp->irq_cnt > 1)
9137 val |= MSGINT_MODE_MULTIVEC_EN;
9138 if (!tg3_flag(tp, 1SHOT_MSI))
9139 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9140 tw32(MSGINT_MODE, val);
9143 if (!tg3_flag(tp, 5705_PLUS)) {
9144 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9145 udelay(40);
9148 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9149 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9150 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9151 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9152 WDMAC_MODE_LNGREAD_ENAB);
9154 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9155 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9156 if (tg3_flag(tp, TSO_CAPABLE) &&
9157 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9158 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9159 /* nothing */
9160 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9161 !tg3_flag(tp, IS_5788)) {
9162 val |= WDMAC_MODE_RX_ACCEL;
9166 /* Enable host coalescing bug fix */
9167 if (tg3_flag(tp, 5755_PLUS))
9168 val |= WDMAC_MODE_STATUS_TAG_FIX;
9170 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9171 val |= WDMAC_MODE_BURST_ALL_DATA;
9173 tw32_f(WDMAC_MODE, val);
9174 udelay(40);
9176 if (tg3_flag(tp, PCIX_MODE)) {
9177 u16 pcix_cmd;
9179 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9180 &pcix_cmd);
9181 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9182 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9183 pcix_cmd |= PCI_X_CMD_READ_2K;
9184 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9185 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9186 pcix_cmd |= PCI_X_CMD_READ_2K;
9188 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9189 pcix_cmd);
9192 tw32_f(RDMAC_MODE, rdmac_mode);
9193 udelay(40);
9195 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9196 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9197 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9198 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9199 break;
9201 if (i < TG3_NUM_RDMA_CHANNELS) {
9202 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9203 val |= tg3_lso_rd_dma_workaround_bit(tp);
9204 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9205 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
9209 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9210 if (!tg3_flag(tp, 5705_PLUS))
9211 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9213 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9214 tw32(SNDDATAC_MODE,
9215 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9216 else
9217 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9219 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9220 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9221 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9222 if (tg3_flag(tp, LRG_PROD_RING_CAP))
9223 val |= RCVDBDI_MODE_LRG_RING_SZ;
9224 tw32(RCVDBDI_MODE, val);
9225 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9226 if (tg3_flag(tp, HW_TSO_1) ||
9227 tg3_flag(tp, HW_TSO_2) ||
9228 tg3_flag(tp, HW_TSO_3))
9229 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9230 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9231 if (tg3_flag(tp, ENABLE_TSS))
9232 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9233 tw32(SNDBDI_MODE, val);
9234 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9236 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9237 err = tg3_load_5701_a0_firmware_fix(tp);
9238 if (err)
9239 return err;
9242 if (tg3_flag(tp, TSO_CAPABLE)) {
9243 err = tg3_load_tso_firmware(tp);
9244 if (err)
9245 return err;
9248 tp->tx_mode = TX_MODE_ENABLE;
9250 if (tg3_flag(tp, 5755_PLUS) ||
9251 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9252 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9254 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9255 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9256 tp->tx_mode &= ~val;
9257 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9260 tw32_f(MAC_TX_MODE, tp->tx_mode);
9261 udelay(100);
9263 if (tg3_flag(tp, ENABLE_RSS)) {
9264 tg3_rss_write_indir_tbl(tp);
9266 /* Setup the "secret" hash key. */
9267 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9268 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9269 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9270 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9271 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9272 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9273 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9274 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9275 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9276 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9279 tp->rx_mode = RX_MODE_ENABLE;
9280 if (tg3_flag(tp, 5755_PLUS))
9281 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9283 if (tg3_flag(tp, ENABLE_RSS))
9284 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9285 RX_MODE_RSS_ITBL_HASH_BITS_7 |
9286 RX_MODE_RSS_IPV6_HASH_EN |
9287 RX_MODE_RSS_TCP_IPV6_HASH_EN |
9288 RX_MODE_RSS_IPV4_HASH_EN |
9289 RX_MODE_RSS_TCP_IPV4_HASH_EN;
9291 tw32_f(MAC_RX_MODE, tp->rx_mode);
9292 udelay(10);
9294 tw32(MAC_LED_CTRL, tp->led_ctrl);
9296 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9297 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9298 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9299 udelay(10);
9301 tw32_f(MAC_RX_MODE, tp->rx_mode);
9302 udelay(10);
9304 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9305 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9306 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9307 /* Set drive transmission level to 1.2V */
9308 /* only if the signal pre-emphasis bit is not set */
9309 val = tr32(MAC_SERDES_CFG);
9310 val &= 0xfffff000;
9311 val |= 0x880;
9312 tw32(MAC_SERDES_CFG, val);
9314 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9315 tw32(MAC_SERDES_CFG, 0x616000);
9318 /* Prevent chip from dropping frames when flow control
9319 * is enabled.
9321 if (tg3_flag(tp, 57765_CLASS))
9322 val = 1;
9323 else
9324 val = 2;
9325 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9327 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9328 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9329 /* Use hardware link auto-negotiation */
9330 tg3_flag_set(tp, HW_AUTONEG);
9333 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9334 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9335 u32 tmp;
9337 tmp = tr32(SERDES_RX_CTRL);
9338 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9339 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9340 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9341 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9344 if (!tg3_flag(tp, USE_PHYLIB)) {
9345 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9346 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9348 err = tg3_setup_phy(tp, 0);
9349 if (err)
9350 return err;
9352 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9353 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9354 u32 tmp;
9356 /* Clear CRC stats. */
9357 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9358 tg3_writephy(tp, MII_TG3_TEST1,
9359 tmp | MII_TG3_TEST1_CRC_EN);
9360 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9365 __tg3_set_rx_mode(tp->dev);
9367 /* Initialize receive rules. */
9368 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9369 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9370 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9371 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9373 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9374 limit = 8;
9375 else
9376 limit = 16;
9377 if (tg3_flag(tp, ENABLE_ASF))
9378 limit -= 4;
9379 switch (limit) {
9380 case 16:
9381 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9382 case 15:
9383 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9384 case 14:
9385 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9386 case 13:
9387 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9388 case 12:
9389 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9390 case 11:
9391 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9392 case 10:
9393 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9394 case 9:
9395 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9396 case 8:
9397 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9398 case 7:
9399 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9400 case 6:
9401 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9402 case 5:
9403 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9404 case 4:
9405 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9406 case 3:
9407 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9408 case 2:
9409 case 1:
9411 default:
9412 break;
9415 if (tg3_flag(tp, ENABLE_APE))
9416 /* Write our heartbeat update interval to APE. */
9417 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9418 APE_HOST_HEARTBEAT_INT_DISABLE);
9420 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9422 return 0;
9425 /* Called at device open time to get the chip ready for
9426 * packet processing. Invoked with tp->lock held.
9428 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9430 /* Chip may have been just powered on. If so, the boot code may still
9431 * be running initialization. Wait for it to finish to avoid races in
9432 * accessing the hardware.
9434 tg3_enable_register_access(tp);
9435 tg3_poll_fw(tp);
9437 tg3_switch_clocks(tp);
9439 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9441 return tg3_reset_hw(tp, reset_phy);
9444 #define TG3_STAT_ADD32(PSTAT, REG) \
9445 do { u32 __val = tr32(REG); \
9446 (PSTAT)->low += __val; \
9447 if ((PSTAT)->low < __val) \
9448 (PSTAT)->high += 1; \
9449 } while (0)
9451 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9453 struct tg3_hw_stats *sp = tp->hw_stats;
9455 if (!netif_carrier_ok(tp->dev))
9456 return;
9458 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9459 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9460 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9461 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9462 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9463 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9464 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9465 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9466 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9467 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9468 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9469 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9470 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9471 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
9472 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
9473 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
9474 u32 val;
9476 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9477 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
9478 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9479 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
9482 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9483 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9484 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9485 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9486 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9487 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9488 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9489 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9490 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9491 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9492 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9493 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9494 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9495 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9497 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9498 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9499 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9500 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9501 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9502 } else {
9503 u32 val = tr32(HOSTCC_FLOW_ATTN);
9504 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9505 if (val) {
9506 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9507 sp->rx_discards.low += val;
9508 if (sp->rx_discards.low < val)
9509 sp->rx_discards.high += 1;
9511 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9513 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9516 static void tg3_chk_missed_msi(struct tg3 *tp)
9518 u32 i;
9520 for (i = 0; i < tp->irq_cnt; i++) {
9521 struct tg3_napi *tnapi = &tp->napi[i];
9523 if (tg3_has_work(tnapi)) {
9524 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9525 tnapi->last_tx_cons == tnapi->tx_cons) {
9526 if (tnapi->chk_msi_cnt < 1) {
9527 tnapi->chk_msi_cnt++;
9528 return;
9530 tg3_msi(0, tnapi);
9533 tnapi->chk_msi_cnt = 0;
9534 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9535 tnapi->last_tx_cons = tnapi->tx_cons;
9539 static void tg3_timer(unsigned long __opaque)
9541 struct tg3 *tp = (struct tg3 *) __opaque;
9543 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9544 goto restart_timer;
9546 spin_lock(&tp->lock);
9548 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9549 tg3_flag(tp, 57765_CLASS))
9550 tg3_chk_missed_msi(tp);
9552 if (!tg3_flag(tp, TAGGED_STATUS)) {
9553 /* All of this garbage is because when using non-tagged
9554 * IRQ status the mailbox/status_block protocol the chip
9555 * uses with the cpu is race prone.
9557 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9558 tw32(GRC_LOCAL_CTRL,
9559 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9560 } else {
9561 tw32(HOSTCC_MODE, tp->coalesce_mode |
9562 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9565 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9566 spin_unlock(&tp->lock);
9567 tg3_reset_task_schedule(tp);
9568 goto restart_timer;
9572 /* This part only runs once per second. */
9573 if (!--tp->timer_counter) {
9574 if (tg3_flag(tp, 5705_PLUS))
9575 tg3_periodic_fetch_stats(tp);
9577 if (tp->setlpicnt && !--tp->setlpicnt)
9578 tg3_phy_eee_enable(tp);
9580 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9581 u32 mac_stat;
9582 int phy_event;
9584 mac_stat = tr32(MAC_STATUS);
9586 phy_event = 0;
9587 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9588 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9589 phy_event = 1;
9590 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9591 phy_event = 1;
9593 if (phy_event)
9594 tg3_setup_phy(tp, 0);
9595 } else if (tg3_flag(tp, POLL_SERDES)) {
9596 u32 mac_stat = tr32(MAC_STATUS);
9597 int need_setup = 0;
9599 if (netif_carrier_ok(tp->dev) &&
9600 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9601 need_setup = 1;
9603 if (!netif_carrier_ok(tp->dev) &&
9604 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9605 MAC_STATUS_SIGNAL_DET))) {
9606 need_setup = 1;
9608 if (need_setup) {
9609 if (!tp->serdes_counter) {
9610 tw32_f(MAC_MODE,
9611 (tp->mac_mode &
9612 ~MAC_MODE_PORT_MODE_MASK));
9613 udelay(40);
9614 tw32_f(MAC_MODE, tp->mac_mode);
9615 udelay(40);
9617 tg3_setup_phy(tp, 0);
9619 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9620 tg3_flag(tp, 5780_CLASS)) {
9621 tg3_serdes_parallel_detect(tp);
9624 tp->timer_counter = tp->timer_multiplier;
9627 /* Heartbeat is only sent once every 2 seconds.
9629 * The heartbeat is to tell the ASF firmware that the host
9630 * driver is still alive. In the event that the OS crashes,
9631 * ASF needs to reset the hardware to free up the FIFO space
9632 * that may be filled with rx packets destined for the host.
9633 * If the FIFO is full, ASF will no longer function properly.
9635 * Unintended resets have been reported on real time kernels
9636 * where the timer doesn't run on time. Netpoll will also have
9637 * same problem.
9639 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9640 * to check the ring condition when the heartbeat is expiring
9641 * before doing the reset. This will prevent most unintended
9642 * resets.
9644 if (!--tp->asf_counter) {
9645 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9646 tg3_wait_for_event_ack(tp);
9648 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9649 FWCMD_NICDRV_ALIVE3);
9650 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9651 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9652 TG3_FW_UPDATE_TIMEOUT_SEC);
9654 tg3_generate_fw_event(tp);
9656 tp->asf_counter = tp->asf_multiplier;
9659 spin_unlock(&tp->lock);
9661 restart_timer:
9662 tp->timer.expires = jiffies + tp->timer_offset;
9663 add_timer(&tp->timer);
9666 static void __devinit tg3_timer_init(struct tg3 *tp)
9668 if (tg3_flag(tp, TAGGED_STATUS) &&
9669 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9670 !tg3_flag(tp, 57765_CLASS))
9671 tp->timer_offset = HZ;
9672 else
9673 tp->timer_offset = HZ / 10;
9675 BUG_ON(tp->timer_offset > HZ);
9677 tp->timer_multiplier = (HZ / tp->timer_offset);
9678 tp->asf_multiplier = (HZ / tp->timer_offset) *
9679 TG3_FW_UPDATE_FREQ_SEC;
9681 init_timer(&tp->timer);
9682 tp->timer.data = (unsigned long) tp;
9683 tp->timer.function = tg3_timer;
9686 static void tg3_timer_start(struct tg3 *tp)
9688 tp->asf_counter = tp->asf_multiplier;
9689 tp->timer_counter = tp->timer_multiplier;
9691 tp->timer.expires = jiffies + tp->timer_offset;
9692 add_timer(&tp->timer);
9695 static void tg3_timer_stop(struct tg3 *tp)
9697 del_timer_sync(&tp->timer);
9700 /* Restart hardware after configuration changes, self-test, etc.
9701 * Invoked with tp->lock held.
9703 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9704 __releases(tp->lock)
9705 __acquires(tp->lock)
9707 int err;
9709 err = tg3_init_hw(tp, reset_phy);
9710 if (err) {
9711 netdev_err(tp->dev,
9712 "Failed to re-initialize device, aborting\n");
9713 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9714 tg3_full_unlock(tp);
9715 tg3_timer_stop(tp);
9716 tp->irq_sync = 0;
9717 tg3_napi_enable(tp);
9718 dev_close(tp->dev);
9719 tg3_full_lock(tp, 0);
9721 return err;
9724 static void tg3_reset_task(struct work_struct *work)
9726 struct tg3 *tp = container_of(work, struct tg3, reset_task);
9727 int err;
9729 tg3_full_lock(tp, 0);
9731 if (!netif_running(tp->dev)) {
9732 tg3_flag_clear(tp, RESET_TASK_PENDING);
9733 tg3_full_unlock(tp);
9734 return;
9737 tg3_full_unlock(tp);
9739 tg3_phy_stop(tp);
9741 tg3_netif_stop(tp);
9743 tg3_full_lock(tp, 1);
9745 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9746 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9747 tp->write32_rx_mbox = tg3_write_flush_reg32;
9748 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9749 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9752 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9753 err = tg3_init_hw(tp, 1);
9754 if (err)
9755 goto out;
9757 tg3_netif_start(tp);
9759 out:
9760 tg3_full_unlock(tp);
9762 if (!err)
9763 tg3_phy_start(tp);
9765 tg3_flag_clear(tp, RESET_TASK_PENDING);
9768 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9770 irq_handler_t fn;
9771 unsigned long flags;
9772 char *name;
9773 struct tg3_napi *tnapi = &tp->napi[irq_num];
9775 if (tp->irq_cnt == 1)
9776 name = tp->dev->name;
9777 else {
9778 name = &tnapi->irq_lbl[0];
9779 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9780 name[IFNAMSIZ-1] = 0;
9783 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9784 fn = tg3_msi;
9785 if (tg3_flag(tp, 1SHOT_MSI))
9786 fn = tg3_msi_1shot;
9787 flags = 0;
9788 } else {
9789 fn = tg3_interrupt;
9790 if (tg3_flag(tp, TAGGED_STATUS))
9791 fn = tg3_interrupt_tagged;
9792 flags = IRQF_SHARED;
9795 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9798 static int tg3_test_interrupt(struct tg3 *tp)
9800 struct tg3_napi *tnapi = &tp->napi[0];
9801 struct net_device *dev = tp->dev;
9802 int err, i, intr_ok = 0;
9803 u32 val;
9805 if (!netif_running(dev))
9806 return -ENODEV;
9808 tg3_disable_ints(tp);
9810 free_irq(tnapi->irq_vec, tnapi);
9813 * Turn off MSI one shot mode. Otherwise this test has no
9814 * observable way to know whether the interrupt was delivered.
9816 if (tg3_flag(tp, 57765_PLUS)) {
9817 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9818 tw32(MSGINT_MODE, val);
9821 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9822 IRQF_SHARED, dev->name, tnapi);
9823 if (err)
9824 return err;
9826 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9827 tg3_enable_ints(tp);
9829 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9830 tnapi->coal_now);
9832 for (i = 0; i < 5; i++) {
9833 u32 int_mbox, misc_host_ctrl;
9835 int_mbox = tr32_mailbox(tnapi->int_mbox);
9836 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9838 if ((int_mbox != 0) ||
9839 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9840 intr_ok = 1;
9841 break;
9844 if (tg3_flag(tp, 57765_PLUS) &&
9845 tnapi->hw_status->status_tag != tnapi->last_tag)
9846 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9848 msleep(10);
9851 tg3_disable_ints(tp);
9853 free_irq(tnapi->irq_vec, tnapi);
9855 err = tg3_request_irq(tp, 0);
9857 if (err)
9858 return err;
9860 if (intr_ok) {
9861 /* Reenable MSI one shot mode. */
9862 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9863 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9864 tw32(MSGINT_MODE, val);
9866 return 0;
9869 return -EIO;
9872 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9873 * successfully restored
9875 static int tg3_test_msi(struct tg3 *tp)
9877 int err;
9878 u16 pci_cmd;
9880 if (!tg3_flag(tp, USING_MSI))
9881 return 0;
9883 /* Turn off SERR reporting in case MSI terminates with Master
9884 * Abort.
9886 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9887 pci_write_config_word(tp->pdev, PCI_COMMAND,
9888 pci_cmd & ~PCI_COMMAND_SERR);
9890 err = tg3_test_interrupt(tp);
9892 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9894 if (!err)
9895 return 0;
9897 /* other failures */
9898 if (err != -EIO)
9899 return err;
9901 /* MSI test failed, go back to INTx mode */
9902 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9903 "to INTx mode. Please report this failure to the PCI "
9904 "maintainer and include system chipset information\n");
9906 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9908 pci_disable_msi(tp->pdev);
9910 tg3_flag_clear(tp, USING_MSI);
9911 tp->napi[0].irq_vec = tp->pdev->irq;
9913 err = tg3_request_irq(tp, 0);
9914 if (err)
9915 return err;
9917 /* Need to reset the chip because the MSI cycle may have terminated
9918 * with Master Abort.
9920 tg3_full_lock(tp, 1);
9922 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9923 err = tg3_init_hw(tp, 1);
9925 tg3_full_unlock(tp);
9927 if (err)
9928 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9930 return err;
9933 static int tg3_request_firmware(struct tg3 *tp)
9935 const __be32 *fw_data;
9937 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9938 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9939 tp->fw_needed);
9940 return -ENOENT;
9943 fw_data = (void *)tp->fw->data;
9945 /* Firmware blob starts with version numbers, followed by
9946 * start address and _full_ length including BSS sections
9947 * (which must be longer than the actual data, of course
9950 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9951 if (tp->fw_len < (tp->fw->size - 12)) {
9952 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9953 tp->fw_len, tp->fw_needed);
9954 release_firmware(tp->fw);
9955 tp->fw = NULL;
9956 return -EINVAL;
9959 /* We no longer need firmware; we have it. */
9960 tp->fw_needed = NULL;
9961 return 0;
9964 static bool tg3_enable_msix(struct tg3 *tp)
9966 int i, rc;
9967 struct msix_entry msix_ent[tp->irq_max];
9969 tp->irq_cnt = num_online_cpus();
9970 if (tp->irq_cnt > 1) {
9971 /* We want as many rx rings enabled as there are cpus.
9972 * In multiqueue MSI-X mode, the first MSI-X vector
9973 * only deals with link interrupts, etc, so we add
9974 * one to the number of vectors we are requesting.
9976 tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
9979 for (i = 0; i < tp->irq_max; i++) {
9980 msix_ent[i].entry = i;
9981 msix_ent[i].vector = 0;
9984 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9985 if (rc < 0) {
9986 return false;
9987 } else if (rc != 0) {
9988 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9989 return false;
9990 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9991 tp->irq_cnt, rc);
9992 tp->irq_cnt = rc;
9995 for (i = 0; i < tp->irq_max; i++)
9996 tp->napi[i].irq_vec = msix_ent[i].vector;
9998 netif_set_real_num_tx_queues(tp->dev, 1);
9999 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
10000 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
10001 pci_disable_msix(tp->pdev);
10002 return false;
10005 if (tp->irq_cnt > 1) {
10006 tg3_flag_set(tp, ENABLE_RSS);
10008 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
10009 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
10010 tg3_flag_set(tp, ENABLE_TSS);
10011 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
10015 return true;
10018 static void tg3_ints_init(struct tg3 *tp)
10020 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10021 !tg3_flag(tp, TAGGED_STATUS)) {
10022 /* All MSI supporting chips should support tagged
10023 * status. Assert that this is the case.
10025 netdev_warn(tp->dev,
10026 "MSI without TAGGED_STATUS? Not using MSI\n");
10027 goto defcfg;
10030 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10031 tg3_flag_set(tp, USING_MSIX);
10032 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10033 tg3_flag_set(tp, USING_MSI);
10035 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10036 u32 msi_mode = tr32(MSGINT_MODE);
10037 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10038 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10039 if (!tg3_flag(tp, 1SHOT_MSI))
10040 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10041 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10043 defcfg:
10044 if (!tg3_flag(tp, USING_MSIX)) {
10045 tp->irq_cnt = 1;
10046 tp->napi[0].irq_vec = tp->pdev->irq;
10047 netif_set_real_num_tx_queues(tp->dev, 1);
10048 netif_set_real_num_rx_queues(tp->dev, 1);
10052 static void tg3_ints_fini(struct tg3 *tp)
10054 if (tg3_flag(tp, USING_MSIX))
10055 pci_disable_msix(tp->pdev);
10056 else if (tg3_flag(tp, USING_MSI))
10057 pci_disable_msi(tp->pdev);
10058 tg3_flag_clear(tp, USING_MSI);
10059 tg3_flag_clear(tp, USING_MSIX);
10060 tg3_flag_clear(tp, ENABLE_RSS);
10061 tg3_flag_clear(tp, ENABLE_TSS);
10064 static int tg3_open(struct net_device *dev)
10066 struct tg3 *tp = netdev_priv(dev);
10067 int i, err;
10069 if (tp->fw_needed) {
10070 err = tg3_request_firmware(tp);
10071 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10072 if (err)
10073 return err;
10074 } else if (err) {
10075 netdev_warn(tp->dev, "TSO capability disabled\n");
10076 tg3_flag_clear(tp, TSO_CAPABLE);
10077 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10078 netdev_notice(tp->dev, "TSO capability restored\n");
10079 tg3_flag_set(tp, TSO_CAPABLE);
10083 netif_carrier_off(tp->dev);
10085 err = tg3_power_up(tp);
10086 if (err)
10087 return err;
10089 tg3_full_lock(tp, 0);
10091 tg3_disable_ints(tp);
10092 tg3_flag_clear(tp, INIT_COMPLETE);
10094 tg3_full_unlock(tp);
10097 * Setup interrupts first so we know how
10098 * many NAPI resources to allocate
10100 tg3_ints_init(tp);
10102 tg3_rss_check_indir_tbl(tp);
10104 /* The placement of this call is tied
10105 * to the setup and use of Host TX descriptors.
10107 err = tg3_alloc_consistent(tp);
10108 if (err)
10109 goto err_out1;
10111 tg3_napi_init(tp);
10113 tg3_napi_enable(tp);
10115 for (i = 0; i < tp->irq_cnt; i++) {
10116 struct tg3_napi *tnapi = &tp->napi[i];
10117 err = tg3_request_irq(tp, i);
10118 if (err) {
10119 for (i--; i >= 0; i--) {
10120 tnapi = &tp->napi[i];
10121 free_irq(tnapi->irq_vec, tnapi);
10123 goto err_out2;
10127 tg3_full_lock(tp, 0);
10129 err = tg3_init_hw(tp, 1);
10130 if (err) {
10131 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10132 tg3_free_rings(tp);
10135 tg3_full_unlock(tp);
10137 if (err)
10138 goto err_out3;
10140 if (tg3_flag(tp, USING_MSI)) {
10141 err = tg3_test_msi(tp);
10143 if (err) {
10144 tg3_full_lock(tp, 0);
10145 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10146 tg3_free_rings(tp);
10147 tg3_full_unlock(tp);
10149 goto err_out2;
10152 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10153 u32 val = tr32(PCIE_TRANSACTION_CFG);
10155 tw32(PCIE_TRANSACTION_CFG,
10156 val | PCIE_TRANS_CFG_1SHOT_MSI);
10160 tg3_phy_start(tp);
10162 tg3_full_lock(tp, 0);
10164 tg3_timer_start(tp);
10165 tg3_flag_set(tp, INIT_COMPLETE);
10166 tg3_enable_ints(tp);
10168 tg3_full_unlock(tp);
10170 netif_tx_start_all_queues(dev);
10173 * Reset loopback feature if it was turned on while the device was down
10174 * make sure that it's installed properly now.
10176 if (dev->features & NETIF_F_LOOPBACK)
10177 tg3_set_loopback(dev, dev->features);
10179 return 0;
10181 err_out3:
10182 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10183 struct tg3_napi *tnapi = &tp->napi[i];
10184 free_irq(tnapi->irq_vec, tnapi);
10187 err_out2:
10188 tg3_napi_disable(tp);
10189 tg3_napi_fini(tp);
10190 tg3_free_consistent(tp);
10192 err_out1:
10193 tg3_ints_fini(tp);
10194 tg3_frob_aux_power(tp, false);
10195 pci_set_power_state(tp->pdev, PCI_D3hot);
10196 return err;
10199 static int tg3_close(struct net_device *dev)
10201 int i;
10202 struct tg3 *tp = netdev_priv(dev);
10204 tg3_napi_disable(tp);
10205 tg3_reset_task_cancel(tp);
10207 netif_tx_stop_all_queues(dev);
10209 tg3_timer_stop(tp);
10211 tg3_phy_stop(tp);
10213 tg3_full_lock(tp, 1);
10215 tg3_disable_ints(tp);
10217 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10218 tg3_free_rings(tp);
10219 tg3_flag_clear(tp, INIT_COMPLETE);
10221 tg3_full_unlock(tp);
10223 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10224 struct tg3_napi *tnapi = &tp->napi[i];
10225 free_irq(tnapi->irq_vec, tnapi);
10228 tg3_ints_fini(tp);
10230 /* Clear stats across close / open calls */
10231 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10232 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10234 tg3_napi_fini(tp);
10236 tg3_free_consistent(tp);
10238 tg3_power_down(tp);
10240 netif_carrier_off(tp->dev);
10242 return 0;
10245 static inline u64 get_stat64(tg3_stat64_t *val)
10247 return ((u64)val->high << 32) | ((u64)val->low);
10250 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10252 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10254 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10255 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10256 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10257 u32 val;
10259 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10260 tg3_writephy(tp, MII_TG3_TEST1,
10261 val | MII_TG3_TEST1_CRC_EN);
10262 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10263 } else
10264 val = 0;
10266 tp->phy_crc_errors += val;
10268 return tp->phy_crc_errors;
10271 return get_stat64(&hw_stats->rx_fcs_errors);
10274 #define ESTAT_ADD(member) \
10275 estats->member = old_estats->member + \
10276 get_stat64(&hw_stats->member)
10278 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10280 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10281 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10283 ESTAT_ADD(rx_octets);
10284 ESTAT_ADD(rx_fragments);
10285 ESTAT_ADD(rx_ucast_packets);
10286 ESTAT_ADD(rx_mcast_packets);
10287 ESTAT_ADD(rx_bcast_packets);
10288 ESTAT_ADD(rx_fcs_errors);
10289 ESTAT_ADD(rx_align_errors);
10290 ESTAT_ADD(rx_xon_pause_rcvd);
10291 ESTAT_ADD(rx_xoff_pause_rcvd);
10292 ESTAT_ADD(rx_mac_ctrl_rcvd);
10293 ESTAT_ADD(rx_xoff_entered);
10294 ESTAT_ADD(rx_frame_too_long_errors);
10295 ESTAT_ADD(rx_jabbers);
10296 ESTAT_ADD(rx_undersize_packets);
10297 ESTAT_ADD(rx_in_length_errors);
10298 ESTAT_ADD(rx_out_length_errors);
10299 ESTAT_ADD(rx_64_or_less_octet_packets);
10300 ESTAT_ADD(rx_65_to_127_octet_packets);
10301 ESTAT_ADD(rx_128_to_255_octet_packets);
10302 ESTAT_ADD(rx_256_to_511_octet_packets);
10303 ESTAT_ADD(rx_512_to_1023_octet_packets);
10304 ESTAT_ADD(rx_1024_to_1522_octet_packets);
10305 ESTAT_ADD(rx_1523_to_2047_octet_packets);
10306 ESTAT_ADD(rx_2048_to_4095_octet_packets);
10307 ESTAT_ADD(rx_4096_to_8191_octet_packets);
10308 ESTAT_ADD(rx_8192_to_9022_octet_packets);
10310 ESTAT_ADD(tx_octets);
10311 ESTAT_ADD(tx_collisions);
10312 ESTAT_ADD(tx_xon_sent);
10313 ESTAT_ADD(tx_xoff_sent);
10314 ESTAT_ADD(tx_flow_control);
10315 ESTAT_ADD(tx_mac_errors);
10316 ESTAT_ADD(tx_single_collisions);
10317 ESTAT_ADD(tx_mult_collisions);
10318 ESTAT_ADD(tx_deferred);
10319 ESTAT_ADD(tx_excessive_collisions);
10320 ESTAT_ADD(tx_late_collisions);
10321 ESTAT_ADD(tx_collide_2times);
10322 ESTAT_ADD(tx_collide_3times);
10323 ESTAT_ADD(tx_collide_4times);
10324 ESTAT_ADD(tx_collide_5times);
10325 ESTAT_ADD(tx_collide_6times);
10326 ESTAT_ADD(tx_collide_7times);
10327 ESTAT_ADD(tx_collide_8times);
10328 ESTAT_ADD(tx_collide_9times);
10329 ESTAT_ADD(tx_collide_10times);
10330 ESTAT_ADD(tx_collide_11times);
10331 ESTAT_ADD(tx_collide_12times);
10332 ESTAT_ADD(tx_collide_13times);
10333 ESTAT_ADD(tx_collide_14times);
10334 ESTAT_ADD(tx_collide_15times);
10335 ESTAT_ADD(tx_ucast_packets);
10336 ESTAT_ADD(tx_mcast_packets);
10337 ESTAT_ADD(tx_bcast_packets);
10338 ESTAT_ADD(tx_carrier_sense_errors);
10339 ESTAT_ADD(tx_discards);
10340 ESTAT_ADD(tx_errors);
10342 ESTAT_ADD(dma_writeq_full);
10343 ESTAT_ADD(dma_write_prioq_full);
10344 ESTAT_ADD(rxbds_empty);
10345 ESTAT_ADD(rx_discards);
10346 ESTAT_ADD(rx_errors);
10347 ESTAT_ADD(rx_threshold_hit);
10349 ESTAT_ADD(dma_readq_full);
10350 ESTAT_ADD(dma_read_prioq_full);
10351 ESTAT_ADD(tx_comp_queue_full);
10353 ESTAT_ADD(ring_set_send_prod_index);
10354 ESTAT_ADD(ring_status_update);
10355 ESTAT_ADD(nic_irqs);
10356 ESTAT_ADD(nic_avoided_irqs);
10357 ESTAT_ADD(nic_tx_threshold_hit);
10359 ESTAT_ADD(mbuf_lwm_thresh_hit);
10362 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10364 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10365 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10367 stats->rx_packets = old_stats->rx_packets +
10368 get_stat64(&hw_stats->rx_ucast_packets) +
10369 get_stat64(&hw_stats->rx_mcast_packets) +
10370 get_stat64(&hw_stats->rx_bcast_packets);
10372 stats->tx_packets = old_stats->tx_packets +
10373 get_stat64(&hw_stats->tx_ucast_packets) +
10374 get_stat64(&hw_stats->tx_mcast_packets) +
10375 get_stat64(&hw_stats->tx_bcast_packets);
10377 stats->rx_bytes = old_stats->rx_bytes +
10378 get_stat64(&hw_stats->rx_octets);
10379 stats->tx_bytes = old_stats->tx_bytes +
10380 get_stat64(&hw_stats->tx_octets);
10382 stats->rx_errors = old_stats->rx_errors +
10383 get_stat64(&hw_stats->rx_errors);
10384 stats->tx_errors = old_stats->tx_errors +
10385 get_stat64(&hw_stats->tx_errors) +
10386 get_stat64(&hw_stats->tx_mac_errors) +
10387 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10388 get_stat64(&hw_stats->tx_discards);
10390 stats->multicast = old_stats->multicast +
10391 get_stat64(&hw_stats->rx_mcast_packets);
10392 stats->collisions = old_stats->collisions +
10393 get_stat64(&hw_stats->tx_collisions);
10395 stats->rx_length_errors = old_stats->rx_length_errors +
10396 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10397 get_stat64(&hw_stats->rx_undersize_packets);
10399 stats->rx_over_errors = old_stats->rx_over_errors +
10400 get_stat64(&hw_stats->rxbds_empty);
10401 stats->rx_frame_errors = old_stats->rx_frame_errors +
10402 get_stat64(&hw_stats->rx_align_errors);
10403 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10404 get_stat64(&hw_stats->tx_discards);
10405 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10406 get_stat64(&hw_stats->tx_carrier_sense_errors);
10408 stats->rx_crc_errors = old_stats->rx_crc_errors +
10409 tg3_calc_crc_errors(tp);
10411 stats->rx_missed_errors = old_stats->rx_missed_errors +
10412 get_stat64(&hw_stats->rx_discards);
10414 stats->rx_dropped = tp->rx_dropped;
10415 stats->tx_dropped = tp->tx_dropped;
10418 static int tg3_get_regs_len(struct net_device *dev)
10420 return TG3_REG_BLK_SIZE;
10423 static void tg3_get_regs(struct net_device *dev,
10424 struct ethtool_regs *regs, void *_p)
10426 struct tg3 *tp = netdev_priv(dev);
10428 regs->version = 0;
10430 memset(_p, 0, TG3_REG_BLK_SIZE);
10432 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10433 return;
10435 tg3_full_lock(tp, 0);
10437 tg3_dump_legacy_regs(tp, (u32 *)_p);
10439 tg3_full_unlock(tp);
10442 static int tg3_get_eeprom_len(struct net_device *dev)
10444 struct tg3 *tp = netdev_priv(dev);
10446 return tp->nvram_size;
10449 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10451 struct tg3 *tp = netdev_priv(dev);
10452 int ret;
10453 u8 *pd;
10454 u32 i, offset, len, b_offset, b_count;
10455 __be32 val;
10457 if (tg3_flag(tp, NO_NVRAM))
10458 return -EINVAL;
10460 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10461 return -EAGAIN;
10463 offset = eeprom->offset;
10464 len = eeprom->len;
10465 eeprom->len = 0;
10467 eeprom->magic = TG3_EEPROM_MAGIC;
10469 if (offset & 3) {
10470 /* adjustments to start on required 4 byte boundary */
10471 b_offset = offset & 3;
10472 b_count = 4 - b_offset;
10473 if (b_count > len) {
10474 /* i.e. offset=1 len=2 */
10475 b_count = len;
10477 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10478 if (ret)
10479 return ret;
10480 memcpy(data, ((char *)&val) + b_offset, b_count);
10481 len -= b_count;
10482 offset += b_count;
10483 eeprom->len += b_count;
10486 /* read bytes up to the last 4 byte boundary */
10487 pd = &data[eeprom->len];
10488 for (i = 0; i < (len - (len & 3)); i += 4) {
10489 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10490 if (ret) {
10491 eeprom->len += i;
10492 return ret;
10494 memcpy(pd + i, &val, 4);
10496 eeprom->len += i;
10498 if (len & 3) {
10499 /* read last bytes not ending on 4 byte boundary */
10500 pd = &data[eeprom->len];
10501 b_count = len & 3;
10502 b_offset = offset + len - b_count;
10503 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10504 if (ret)
10505 return ret;
10506 memcpy(pd, &val, b_count);
10507 eeprom->len += b_count;
10509 return 0;
10512 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10514 struct tg3 *tp = netdev_priv(dev);
10515 int ret;
10516 u32 offset, len, b_offset, odd_len;
10517 u8 *buf;
10518 __be32 start, end;
10520 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10521 return -EAGAIN;
10523 if (tg3_flag(tp, NO_NVRAM) ||
10524 eeprom->magic != TG3_EEPROM_MAGIC)
10525 return -EINVAL;
10527 offset = eeprom->offset;
10528 len = eeprom->len;
10530 if ((b_offset = (offset & 3))) {
10531 /* adjustments to start on required 4 byte boundary */
10532 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10533 if (ret)
10534 return ret;
10535 len += b_offset;
10536 offset &= ~3;
10537 if (len < 4)
10538 len = 4;
10541 odd_len = 0;
10542 if (len & 3) {
10543 /* adjustments to end on required 4 byte boundary */
10544 odd_len = 1;
10545 len = (len + 3) & ~3;
10546 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10547 if (ret)
10548 return ret;
10551 buf = data;
10552 if (b_offset || odd_len) {
10553 buf = kmalloc(len, GFP_KERNEL);
10554 if (!buf)
10555 return -ENOMEM;
10556 if (b_offset)
10557 memcpy(buf, &start, 4);
10558 if (odd_len)
10559 memcpy(buf+len-4, &end, 4);
10560 memcpy(buf + b_offset, data, eeprom->len);
10563 ret = tg3_nvram_write_block(tp, offset, len, buf);
10565 if (buf != data)
10566 kfree(buf);
10568 return ret;
10571 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10573 struct tg3 *tp = netdev_priv(dev);
10575 if (tg3_flag(tp, USE_PHYLIB)) {
10576 struct phy_device *phydev;
10577 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10578 return -EAGAIN;
10579 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10580 return phy_ethtool_gset(phydev, cmd);
10583 cmd->supported = (SUPPORTED_Autoneg);
10585 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10586 cmd->supported |= (SUPPORTED_1000baseT_Half |
10587 SUPPORTED_1000baseT_Full);
10589 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10590 cmd->supported |= (SUPPORTED_100baseT_Half |
10591 SUPPORTED_100baseT_Full |
10592 SUPPORTED_10baseT_Half |
10593 SUPPORTED_10baseT_Full |
10594 SUPPORTED_TP);
10595 cmd->port = PORT_TP;
10596 } else {
10597 cmd->supported |= SUPPORTED_FIBRE;
10598 cmd->port = PORT_FIBRE;
10601 cmd->advertising = tp->link_config.advertising;
10602 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10603 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10604 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10605 cmd->advertising |= ADVERTISED_Pause;
10606 } else {
10607 cmd->advertising |= ADVERTISED_Pause |
10608 ADVERTISED_Asym_Pause;
10610 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10611 cmd->advertising |= ADVERTISED_Asym_Pause;
10614 if (netif_running(dev) && netif_carrier_ok(dev)) {
10615 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10616 cmd->duplex = tp->link_config.active_duplex;
10617 cmd->lp_advertising = tp->link_config.rmt_adv;
10618 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10619 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10620 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10621 else
10622 cmd->eth_tp_mdix = ETH_TP_MDI;
10624 } else {
10625 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10626 cmd->duplex = DUPLEX_UNKNOWN;
10627 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10629 cmd->phy_address = tp->phy_addr;
10630 cmd->transceiver = XCVR_INTERNAL;
10631 cmd->autoneg = tp->link_config.autoneg;
10632 cmd->maxtxpkt = 0;
10633 cmd->maxrxpkt = 0;
10634 return 0;
10637 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10639 struct tg3 *tp = netdev_priv(dev);
10640 u32 speed = ethtool_cmd_speed(cmd);
10642 if (tg3_flag(tp, USE_PHYLIB)) {
10643 struct phy_device *phydev;
10644 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10645 return -EAGAIN;
10646 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10647 return phy_ethtool_sset(phydev, cmd);
10650 if (cmd->autoneg != AUTONEG_ENABLE &&
10651 cmd->autoneg != AUTONEG_DISABLE)
10652 return -EINVAL;
10654 if (cmd->autoneg == AUTONEG_DISABLE &&
10655 cmd->duplex != DUPLEX_FULL &&
10656 cmd->duplex != DUPLEX_HALF)
10657 return -EINVAL;
10659 if (cmd->autoneg == AUTONEG_ENABLE) {
10660 u32 mask = ADVERTISED_Autoneg |
10661 ADVERTISED_Pause |
10662 ADVERTISED_Asym_Pause;
10664 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10665 mask |= ADVERTISED_1000baseT_Half |
10666 ADVERTISED_1000baseT_Full;
10668 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10669 mask |= ADVERTISED_100baseT_Half |
10670 ADVERTISED_100baseT_Full |
10671 ADVERTISED_10baseT_Half |
10672 ADVERTISED_10baseT_Full |
10673 ADVERTISED_TP;
10674 else
10675 mask |= ADVERTISED_FIBRE;
10677 if (cmd->advertising & ~mask)
10678 return -EINVAL;
10680 mask &= (ADVERTISED_1000baseT_Half |
10681 ADVERTISED_1000baseT_Full |
10682 ADVERTISED_100baseT_Half |
10683 ADVERTISED_100baseT_Full |
10684 ADVERTISED_10baseT_Half |
10685 ADVERTISED_10baseT_Full);
10687 cmd->advertising &= mask;
10688 } else {
10689 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10690 if (speed != SPEED_1000)
10691 return -EINVAL;
10693 if (cmd->duplex != DUPLEX_FULL)
10694 return -EINVAL;
10695 } else {
10696 if (speed != SPEED_100 &&
10697 speed != SPEED_10)
10698 return -EINVAL;
10702 tg3_full_lock(tp, 0);
10704 tp->link_config.autoneg = cmd->autoneg;
10705 if (cmd->autoneg == AUTONEG_ENABLE) {
10706 tp->link_config.advertising = (cmd->advertising |
10707 ADVERTISED_Autoneg);
10708 tp->link_config.speed = SPEED_UNKNOWN;
10709 tp->link_config.duplex = DUPLEX_UNKNOWN;
10710 } else {
10711 tp->link_config.advertising = 0;
10712 tp->link_config.speed = speed;
10713 tp->link_config.duplex = cmd->duplex;
10716 if (netif_running(dev))
10717 tg3_setup_phy(tp, 1);
10719 tg3_full_unlock(tp);
10721 return 0;
10724 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10726 struct tg3 *tp = netdev_priv(dev);
10728 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10729 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10730 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10731 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10734 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10736 struct tg3 *tp = netdev_priv(dev);
10738 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10739 wol->supported = WAKE_MAGIC;
10740 else
10741 wol->supported = 0;
10742 wol->wolopts = 0;
10743 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10744 wol->wolopts = WAKE_MAGIC;
10745 memset(&wol->sopass, 0, sizeof(wol->sopass));
10748 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10750 struct tg3 *tp = netdev_priv(dev);
10751 struct device *dp = &tp->pdev->dev;
10753 if (wol->wolopts & ~WAKE_MAGIC)
10754 return -EINVAL;
10755 if ((wol->wolopts & WAKE_MAGIC) &&
10756 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10757 return -EINVAL;
10759 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10761 spin_lock_bh(&tp->lock);
10762 if (device_may_wakeup(dp))
10763 tg3_flag_set(tp, WOL_ENABLE);
10764 else
10765 tg3_flag_clear(tp, WOL_ENABLE);
10766 spin_unlock_bh(&tp->lock);
10768 return 0;
10771 static u32 tg3_get_msglevel(struct net_device *dev)
10773 struct tg3 *tp = netdev_priv(dev);
10774 return tp->msg_enable;
10777 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10779 struct tg3 *tp = netdev_priv(dev);
10780 tp->msg_enable = value;
10783 static int tg3_nway_reset(struct net_device *dev)
10785 struct tg3 *tp = netdev_priv(dev);
10786 int r;
10788 if (!netif_running(dev))
10789 return -EAGAIN;
10791 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10792 return -EINVAL;
10794 if (tg3_flag(tp, USE_PHYLIB)) {
10795 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10796 return -EAGAIN;
10797 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10798 } else {
10799 u32 bmcr;
10801 spin_lock_bh(&tp->lock);
10802 r = -EINVAL;
10803 tg3_readphy(tp, MII_BMCR, &bmcr);
10804 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10805 ((bmcr & BMCR_ANENABLE) ||
10806 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10807 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10808 BMCR_ANENABLE);
10809 r = 0;
10811 spin_unlock_bh(&tp->lock);
10814 return r;
10817 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10819 struct tg3 *tp = netdev_priv(dev);
10821 ering->rx_max_pending = tp->rx_std_ring_mask;
10822 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10823 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10824 else
10825 ering->rx_jumbo_max_pending = 0;
10827 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10829 ering->rx_pending = tp->rx_pending;
10830 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10831 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10832 else
10833 ering->rx_jumbo_pending = 0;
10835 ering->tx_pending = tp->napi[0].tx_pending;
10838 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10840 struct tg3 *tp = netdev_priv(dev);
10841 int i, irq_sync = 0, err = 0;
10843 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10844 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10845 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10846 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10847 (tg3_flag(tp, TSO_BUG) &&
10848 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10849 return -EINVAL;
10851 if (netif_running(dev)) {
10852 tg3_phy_stop(tp);
10853 tg3_netif_stop(tp);
10854 irq_sync = 1;
10857 tg3_full_lock(tp, irq_sync);
10859 tp->rx_pending = ering->rx_pending;
10861 if (tg3_flag(tp, MAX_RXPEND_64) &&
10862 tp->rx_pending > 63)
10863 tp->rx_pending = 63;
10865 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10866 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10868 for (i = 0; i < tp->irq_max; i++)
10869 tp->napi[i].tx_pending = ering->tx_pending;
10871 if (netif_running(dev)) {
10872 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10873 err = tg3_restart_hw(tp, 1);
10874 if (!err)
10875 tg3_netif_start(tp);
10878 tg3_full_unlock(tp);
10880 if (irq_sync && !err)
10881 tg3_phy_start(tp);
10883 return err;
10886 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10888 struct tg3 *tp = netdev_priv(dev);
10890 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10892 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
10893 epause->rx_pause = 1;
10894 else
10895 epause->rx_pause = 0;
10897 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
10898 epause->tx_pause = 1;
10899 else
10900 epause->tx_pause = 0;
10903 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10905 struct tg3 *tp = netdev_priv(dev);
10906 int err = 0;
10908 if (tg3_flag(tp, USE_PHYLIB)) {
10909 u32 newadv;
10910 struct phy_device *phydev;
10912 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10914 if (!(phydev->supported & SUPPORTED_Pause) ||
10915 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10916 (epause->rx_pause != epause->tx_pause)))
10917 return -EINVAL;
10919 tp->link_config.flowctrl = 0;
10920 if (epause->rx_pause) {
10921 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10923 if (epause->tx_pause) {
10924 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10925 newadv = ADVERTISED_Pause;
10926 } else
10927 newadv = ADVERTISED_Pause |
10928 ADVERTISED_Asym_Pause;
10929 } else if (epause->tx_pause) {
10930 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10931 newadv = ADVERTISED_Asym_Pause;
10932 } else
10933 newadv = 0;
10935 if (epause->autoneg)
10936 tg3_flag_set(tp, PAUSE_AUTONEG);
10937 else
10938 tg3_flag_clear(tp, PAUSE_AUTONEG);
10940 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10941 u32 oldadv = phydev->advertising &
10942 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10943 if (oldadv != newadv) {
10944 phydev->advertising &=
10945 ~(ADVERTISED_Pause |
10946 ADVERTISED_Asym_Pause);
10947 phydev->advertising |= newadv;
10948 if (phydev->autoneg) {
10950 * Always renegotiate the link to
10951 * inform our link partner of our
10952 * flow control settings, even if the
10953 * flow control is forced. Let
10954 * tg3_adjust_link() do the final
10955 * flow control setup.
10957 return phy_start_aneg(phydev);
10961 if (!epause->autoneg)
10962 tg3_setup_flow_control(tp, 0, 0);
10963 } else {
10964 tp->link_config.advertising &=
10965 ~(ADVERTISED_Pause |
10966 ADVERTISED_Asym_Pause);
10967 tp->link_config.advertising |= newadv;
10969 } else {
10970 int irq_sync = 0;
10972 if (netif_running(dev)) {
10973 tg3_netif_stop(tp);
10974 irq_sync = 1;
10977 tg3_full_lock(tp, irq_sync);
10979 if (epause->autoneg)
10980 tg3_flag_set(tp, PAUSE_AUTONEG);
10981 else
10982 tg3_flag_clear(tp, PAUSE_AUTONEG);
10983 if (epause->rx_pause)
10984 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10985 else
10986 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10987 if (epause->tx_pause)
10988 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10989 else
10990 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10992 if (netif_running(dev)) {
10993 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10994 err = tg3_restart_hw(tp, 1);
10995 if (!err)
10996 tg3_netif_start(tp);
10999 tg3_full_unlock(tp);
11002 return err;
11005 static int tg3_get_sset_count(struct net_device *dev, int sset)
11007 switch (sset) {
11008 case ETH_SS_TEST:
11009 return TG3_NUM_TEST;
11010 case ETH_SS_STATS:
11011 return TG3_NUM_STATS;
11012 default:
11013 return -EOPNOTSUPP;
11017 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11018 u32 *rules __always_unused)
11020 struct tg3 *tp = netdev_priv(dev);
11022 if (!tg3_flag(tp, SUPPORT_MSIX))
11023 return -EOPNOTSUPP;
11025 switch (info->cmd) {
11026 case ETHTOOL_GRXRINGS:
11027 if (netif_running(tp->dev))
11028 info->data = tp->irq_cnt;
11029 else {
11030 info->data = num_online_cpus();
11031 if (info->data > TG3_IRQ_MAX_VECS_RSS)
11032 info->data = TG3_IRQ_MAX_VECS_RSS;
11035 /* The first interrupt vector only
11036 * handles link interrupts.
11038 info->data -= 1;
11039 return 0;
11041 default:
11042 return -EOPNOTSUPP;
11046 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11048 u32 size = 0;
11049 struct tg3 *tp = netdev_priv(dev);
11051 if (tg3_flag(tp, SUPPORT_MSIX))
11052 size = TG3_RSS_INDIR_TBL_SIZE;
11054 return size;
11057 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11059 struct tg3 *tp = netdev_priv(dev);
11060 int i;
11062 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11063 indir[i] = tp->rss_ind_tbl[i];
11065 return 0;
11068 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11070 struct tg3 *tp = netdev_priv(dev);
11071 size_t i;
11073 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11074 tp->rss_ind_tbl[i] = indir[i];
11076 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11077 return 0;
11079 /* It is legal to write the indirection
11080 * table while the device is running.
11082 tg3_full_lock(tp, 0);
11083 tg3_rss_write_indir_tbl(tp);
11084 tg3_full_unlock(tp);
11086 return 0;
11089 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11091 switch (stringset) {
11092 case ETH_SS_STATS:
11093 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11094 break;
11095 case ETH_SS_TEST:
11096 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11097 break;
11098 default:
11099 WARN_ON(1); /* we need a WARN() */
11100 break;
11104 static int tg3_set_phys_id(struct net_device *dev,
11105 enum ethtool_phys_id_state state)
11107 struct tg3 *tp = netdev_priv(dev);
11109 if (!netif_running(tp->dev))
11110 return -EAGAIN;
11112 switch (state) {
11113 case ETHTOOL_ID_ACTIVE:
11114 return 1; /* cycle on/off once per second */
11116 case ETHTOOL_ID_ON:
11117 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11118 LED_CTRL_1000MBPS_ON |
11119 LED_CTRL_100MBPS_ON |
11120 LED_CTRL_10MBPS_ON |
11121 LED_CTRL_TRAFFIC_OVERRIDE |
11122 LED_CTRL_TRAFFIC_BLINK |
11123 LED_CTRL_TRAFFIC_LED);
11124 break;
11126 case ETHTOOL_ID_OFF:
11127 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11128 LED_CTRL_TRAFFIC_OVERRIDE);
11129 break;
11131 case ETHTOOL_ID_INACTIVE:
11132 tw32(MAC_LED_CTRL, tp->led_ctrl);
11133 break;
11136 return 0;
11139 static void tg3_get_ethtool_stats(struct net_device *dev,
11140 struct ethtool_stats *estats, u64 *tmp_stats)
11142 struct tg3 *tp = netdev_priv(dev);
11144 if (tp->hw_stats)
11145 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11146 else
11147 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11150 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11152 int i;
11153 __be32 *buf;
11154 u32 offset = 0, len = 0;
11155 u32 magic, val;
11157 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11158 return NULL;
11160 if (magic == TG3_EEPROM_MAGIC) {
11161 for (offset = TG3_NVM_DIR_START;
11162 offset < TG3_NVM_DIR_END;
11163 offset += TG3_NVM_DIRENT_SIZE) {
11164 if (tg3_nvram_read(tp, offset, &val))
11165 return NULL;
11167 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11168 TG3_NVM_DIRTYPE_EXTVPD)
11169 break;
11172 if (offset != TG3_NVM_DIR_END) {
11173 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11174 if (tg3_nvram_read(tp, offset + 4, &offset))
11175 return NULL;
11177 offset = tg3_nvram_logical_addr(tp, offset);
11181 if (!offset || !len) {
11182 offset = TG3_NVM_VPD_OFF;
11183 len = TG3_NVM_VPD_LEN;
11186 buf = kmalloc(len, GFP_KERNEL);
11187 if (buf == NULL)
11188 return NULL;
11190 if (magic == TG3_EEPROM_MAGIC) {
11191 for (i = 0; i < len; i += 4) {
11192 /* The data is in little-endian format in NVRAM.
11193 * Use the big-endian read routines to preserve
11194 * the byte order as it exists in NVRAM.
11196 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11197 goto error;
11199 } else {
11200 u8 *ptr;
11201 ssize_t cnt;
11202 unsigned int pos = 0;
11204 ptr = (u8 *)&buf[0];
11205 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11206 cnt = pci_read_vpd(tp->pdev, pos,
11207 len - pos, ptr);
11208 if (cnt == -ETIMEDOUT || cnt == -EINTR)
11209 cnt = 0;
11210 else if (cnt < 0)
11211 goto error;
11213 if (pos != len)
11214 goto error;
11217 *vpdlen = len;
11219 return buf;
11221 error:
11222 kfree(buf);
11223 return NULL;
11226 #define NVRAM_TEST_SIZE 0x100
11227 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
11228 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
11229 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
11230 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
11231 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
11232 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
11233 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11234 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11236 static int tg3_test_nvram(struct tg3 *tp)
11238 u32 csum, magic, len;
11239 __be32 *buf;
11240 int i, j, k, err = 0, size;
11242 if (tg3_flag(tp, NO_NVRAM))
11243 return 0;
11245 if (tg3_nvram_read(tp, 0, &magic) != 0)
11246 return -EIO;
11248 if (magic == TG3_EEPROM_MAGIC)
11249 size = NVRAM_TEST_SIZE;
11250 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11251 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11252 TG3_EEPROM_SB_FORMAT_1) {
11253 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11254 case TG3_EEPROM_SB_REVISION_0:
11255 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11256 break;
11257 case TG3_EEPROM_SB_REVISION_2:
11258 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11259 break;
11260 case TG3_EEPROM_SB_REVISION_3:
11261 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11262 break;
11263 case TG3_EEPROM_SB_REVISION_4:
11264 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11265 break;
11266 case TG3_EEPROM_SB_REVISION_5:
11267 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11268 break;
11269 case TG3_EEPROM_SB_REVISION_6:
11270 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11271 break;
11272 default:
11273 return -EIO;
11275 } else
11276 return 0;
11277 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11278 size = NVRAM_SELFBOOT_HW_SIZE;
11279 else
11280 return -EIO;
11282 buf = kmalloc(size, GFP_KERNEL);
11283 if (buf == NULL)
11284 return -ENOMEM;
11286 err = -EIO;
11287 for (i = 0, j = 0; i < size; i += 4, j++) {
11288 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11289 if (err)
11290 break;
11292 if (i < size)
11293 goto out;
11295 /* Selfboot format */
11296 magic = be32_to_cpu(buf[0]);
11297 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11298 TG3_EEPROM_MAGIC_FW) {
11299 u8 *buf8 = (u8 *) buf, csum8 = 0;
11301 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11302 TG3_EEPROM_SB_REVISION_2) {
11303 /* For rev 2, the csum doesn't include the MBA. */
11304 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11305 csum8 += buf8[i];
11306 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11307 csum8 += buf8[i];
11308 } else {
11309 for (i = 0; i < size; i++)
11310 csum8 += buf8[i];
11313 if (csum8 == 0) {
11314 err = 0;
11315 goto out;
11318 err = -EIO;
11319 goto out;
11322 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11323 TG3_EEPROM_MAGIC_HW) {
11324 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11325 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11326 u8 *buf8 = (u8 *) buf;
11328 /* Separate the parity bits and the data bytes. */
11329 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11330 if ((i == 0) || (i == 8)) {
11331 int l;
11332 u8 msk;
11334 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11335 parity[k++] = buf8[i] & msk;
11336 i++;
11337 } else if (i == 16) {
11338 int l;
11339 u8 msk;
11341 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11342 parity[k++] = buf8[i] & msk;
11343 i++;
11345 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11346 parity[k++] = buf8[i] & msk;
11347 i++;
11349 data[j++] = buf8[i];
11352 err = -EIO;
11353 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11354 u8 hw8 = hweight8(data[i]);
11356 if ((hw8 & 0x1) && parity[i])
11357 goto out;
11358 else if (!(hw8 & 0x1) && !parity[i])
11359 goto out;
11361 err = 0;
11362 goto out;
11365 err = -EIO;
11367 /* Bootstrap checksum at offset 0x10 */
11368 csum = calc_crc((unsigned char *) buf, 0x10);
11369 if (csum != le32_to_cpu(buf[0x10/4]))
11370 goto out;
11372 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11373 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11374 if (csum != le32_to_cpu(buf[0xfc/4]))
11375 goto out;
11377 kfree(buf);
11379 buf = tg3_vpd_readblock(tp, &len);
11380 if (!buf)
11381 return -ENOMEM;
11383 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11384 if (i > 0) {
11385 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11386 if (j < 0)
11387 goto out;
11389 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11390 goto out;
11392 i += PCI_VPD_LRDT_TAG_SIZE;
11393 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11394 PCI_VPD_RO_KEYWORD_CHKSUM);
11395 if (j > 0) {
11396 u8 csum8 = 0;
11398 j += PCI_VPD_INFO_FLD_HDR_SIZE;
11400 for (i = 0; i <= j; i++)
11401 csum8 += ((u8 *)buf)[i];
11403 if (csum8)
11404 goto out;
11408 err = 0;
11410 out:
11411 kfree(buf);
11412 return err;
11415 #define TG3_SERDES_TIMEOUT_SEC 2
11416 #define TG3_COPPER_TIMEOUT_SEC 6
11418 static int tg3_test_link(struct tg3 *tp)
11420 int i, max;
11422 if (!netif_running(tp->dev))
11423 return -ENODEV;
11425 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11426 max = TG3_SERDES_TIMEOUT_SEC;
11427 else
11428 max = TG3_COPPER_TIMEOUT_SEC;
11430 for (i = 0; i < max; i++) {
11431 if (netif_carrier_ok(tp->dev))
11432 return 0;
11434 if (msleep_interruptible(1000))
11435 break;
11438 return -EIO;
11441 /* Only test the commonly used registers */
11442 static int tg3_test_registers(struct tg3 *tp)
11444 int i, is_5705, is_5750;
11445 u32 offset, read_mask, write_mask, val, save_val, read_val;
11446 static struct {
11447 u16 offset;
11448 u16 flags;
11449 #define TG3_FL_5705 0x1
11450 #define TG3_FL_NOT_5705 0x2
11451 #define TG3_FL_NOT_5788 0x4
11452 #define TG3_FL_NOT_5750 0x8
11453 u32 read_mask;
11454 u32 write_mask;
11455 } reg_tbl[] = {
11456 /* MAC Control Registers */
11457 { MAC_MODE, TG3_FL_NOT_5705,
11458 0x00000000, 0x00ef6f8c },
11459 { MAC_MODE, TG3_FL_5705,
11460 0x00000000, 0x01ef6b8c },
11461 { MAC_STATUS, TG3_FL_NOT_5705,
11462 0x03800107, 0x00000000 },
11463 { MAC_STATUS, TG3_FL_5705,
11464 0x03800100, 0x00000000 },
11465 { MAC_ADDR_0_HIGH, 0x0000,
11466 0x00000000, 0x0000ffff },
11467 { MAC_ADDR_0_LOW, 0x0000,
11468 0x00000000, 0xffffffff },
11469 { MAC_RX_MTU_SIZE, 0x0000,
11470 0x00000000, 0x0000ffff },
11471 { MAC_TX_MODE, 0x0000,
11472 0x00000000, 0x00000070 },
11473 { MAC_TX_LENGTHS, 0x0000,
11474 0x00000000, 0x00003fff },
11475 { MAC_RX_MODE, TG3_FL_NOT_5705,
11476 0x00000000, 0x000007fc },
11477 { MAC_RX_MODE, TG3_FL_5705,
11478 0x00000000, 0x000007dc },
11479 { MAC_HASH_REG_0, 0x0000,
11480 0x00000000, 0xffffffff },
11481 { MAC_HASH_REG_1, 0x0000,
11482 0x00000000, 0xffffffff },
11483 { MAC_HASH_REG_2, 0x0000,
11484 0x00000000, 0xffffffff },
11485 { MAC_HASH_REG_3, 0x0000,
11486 0x00000000, 0xffffffff },
11488 /* Receive Data and Receive BD Initiator Control Registers. */
11489 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11490 0x00000000, 0xffffffff },
11491 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11492 0x00000000, 0xffffffff },
11493 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11494 0x00000000, 0x00000003 },
11495 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11496 0x00000000, 0xffffffff },
11497 { RCVDBDI_STD_BD+0, 0x0000,
11498 0x00000000, 0xffffffff },
11499 { RCVDBDI_STD_BD+4, 0x0000,
11500 0x00000000, 0xffffffff },
11501 { RCVDBDI_STD_BD+8, 0x0000,
11502 0x00000000, 0xffff0002 },
11503 { RCVDBDI_STD_BD+0xc, 0x0000,
11504 0x00000000, 0xffffffff },
11506 /* Receive BD Initiator Control Registers. */
11507 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11508 0x00000000, 0xffffffff },
11509 { RCVBDI_STD_THRESH, TG3_FL_5705,
11510 0x00000000, 0x000003ff },
11511 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11512 0x00000000, 0xffffffff },
11514 /* Host Coalescing Control Registers. */
11515 { HOSTCC_MODE, TG3_FL_NOT_5705,
11516 0x00000000, 0x00000004 },
11517 { HOSTCC_MODE, TG3_FL_5705,
11518 0x00000000, 0x000000f6 },
11519 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11520 0x00000000, 0xffffffff },
11521 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11522 0x00000000, 0x000003ff },
11523 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11524 0x00000000, 0xffffffff },
11525 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11526 0x00000000, 0x000003ff },
11527 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11528 0x00000000, 0xffffffff },
11529 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11530 0x00000000, 0x000000ff },
11531 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11532 0x00000000, 0xffffffff },
11533 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11534 0x00000000, 0x000000ff },
11535 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11536 0x00000000, 0xffffffff },
11537 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11538 0x00000000, 0xffffffff },
11539 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11540 0x00000000, 0xffffffff },
11541 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11542 0x00000000, 0x000000ff },
11543 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11544 0x00000000, 0xffffffff },
11545 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11546 0x00000000, 0x000000ff },
11547 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11548 0x00000000, 0xffffffff },
11549 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11550 0x00000000, 0xffffffff },
11551 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11552 0x00000000, 0xffffffff },
11553 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11554 0x00000000, 0xffffffff },
11555 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11556 0x00000000, 0xffffffff },
11557 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11558 0xffffffff, 0x00000000 },
11559 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11560 0xffffffff, 0x00000000 },
11562 /* Buffer Manager Control Registers. */
11563 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11564 0x00000000, 0x007fff80 },
11565 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11566 0x00000000, 0x007fffff },
11567 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11568 0x00000000, 0x0000003f },
11569 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11570 0x00000000, 0x000001ff },
11571 { BUFMGR_MB_HIGH_WATER, 0x0000,
11572 0x00000000, 0x000001ff },
11573 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11574 0xffffffff, 0x00000000 },
11575 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11576 0xffffffff, 0x00000000 },
11578 /* Mailbox Registers */
11579 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11580 0x00000000, 0x000001ff },
11581 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11582 0x00000000, 0x000001ff },
11583 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11584 0x00000000, 0x000007ff },
11585 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11586 0x00000000, 0x000001ff },
11588 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11591 is_5705 = is_5750 = 0;
11592 if (tg3_flag(tp, 5705_PLUS)) {
11593 is_5705 = 1;
11594 if (tg3_flag(tp, 5750_PLUS))
11595 is_5750 = 1;
11598 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11599 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11600 continue;
11602 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11603 continue;
11605 if (tg3_flag(tp, IS_5788) &&
11606 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11607 continue;
11609 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11610 continue;
11612 offset = (u32) reg_tbl[i].offset;
11613 read_mask = reg_tbl[i].read_mask;
11614 write_mask = reg_tbl[i].write_mask;
11616 /* Save the original register content */
11617 save_val = tr32(offset);
11619 /* Determine the read-only value. */
11620 read_val = save_val & read_mask;
11622 /* Write zero to the register, then make sure the read-only bits
11623 * are not changed and the read/write bits are all zeros.
11625 tw32(offset, 0);
11627 val = tr32(offset);
11629 /* Test the read-only and read/write bits. */
11630 if (((val & read_mask) != read_val) || (val & write_mask))
11631 goto out;
11633 /* Write ones to all the bits defined by RdMask and WrMask, then
11634 * make sure the read-only bits are not changed and the
11635 * read/write bits are all ones.
11637 tw32(offset, read_mask | write_mask);
11639 val = tr32(offset);
11641 /* Test the read-only bits. */
11642 if ((val & read_mask) != read_val)
11643 goto out;
11645 /* Test the read/write bits. */
11646 if ((val & write_mask) != write_mask)
11647 goto out;
11649 tw32(offset, save_val);
11652 return 0;
11654 out:
11655 if (netif_msg_hw(tp))
11656 netdev_err(tp->dev,
11657 "Register test failed at offset %x\n", offset);
11658 tw32(offset, save_val);
11659 return -EIO;
11662 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11664 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11665 int i;
11666 u32 j;
11668 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11669 for (j = 0; j < len; j += 4) {
11670 u32 val;
11672 tg3_write_mem(tp, offset + j, test_pattern[i]);
11673 tg3_read_mem(tp, offset + j, &val);
11674 if (val != test_pattern[i])
11675 return -EIO;
11678 return 0;
11681 static int tg3_test_memory(struct tg3 *tp)
11683 static struct mem_entry {
11684 u32 offset;
11685 u32 len;
11686 } mem_tbl_570x[] = {
11687 { 0x00000000, 0x00b50},
11688 { 0x00002000, 0x1c000},
11689 { 0xffffffff, 0x00000}
11690 }, mem_tbl_5705[] = {
11691 { 0x00000100, 0x0000c},
11692 { 0x00000200, 0x00008},
11693 { 0x00004000, 0x00800},
11694 { 0x00006000, 0x01000},
11695 { 0x00008000, 0x02000},
11696 { 0x00010000, 0x0e000},
11697 { 0xffffffff, 0x00000}
11698 }, mem_tbl_5755[] = {
11699 { 0x00000200, 0x00008},
11700 { 0x00004000, 0x00800},
11701 { 0x00006000, 0x00800},
11702 { 0x00008000, 0x02000},
11703 { 0x00010000, 0x0c000},
11704 { 0xffffffff, 0x00000}
11705 }, mem_tbl_5906[] = {
11706 { 0x00000200, 0x00008},
11707 { 0x00004000, 0x00400},
11708 { 0x00006000, 0x00400},
11709 { 0x00008000, 0x01000},
11710 { 0x00010000, 0x01000},
11711 { 0xffffffff, 0x00000}
11712 }, mem_tbl_5717[] = {
11713 { 0x00000200, 0x00008},
11714 { 0x00010000, 0x0a000},
11715 { 0x00020000, 0x13c00},
11716 { 0xffffffff, 0x00000}
11717 }, mem_tbl_57765[] = {
11718 { 0x00000200, 0x00008},
11719 { 0x00004000, 0x00800},
11720 { 0x00006000, 0x09800},
11721 { 0x00010000, 0x0a000},
11722 { 0xffffffff, 0x00000}
11724 struct mem_entry *mem_tbl;
11725 int err = 0;
11726 int i;
11728 if (tg3_flag(tp, 5717_PLUS))
11729 mem_tbl = mem_tbl_5717;
11730 else if (tg3_flag(tp, 57765_CLASS))
11731 mem_tbl = mem_tbl_57765;
11732 else if (tg3_flag(tp, 5755_PLUS))
11733 mem_tbl = mem_tbl_5755;
11734 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11735 mem_tbl = mem_tbl_5906;
11736 else if (tg3_flag(tp, 5705_PLUS))
11737 mem_tbl = mem_tbl_5705;
11738 else
11739 mem_tbl = mem_tbl_570x;
11741 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11742 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11743 if (err)
11744 break;
11747 return err;
11750 #define TG3_TSO_MSS 500
11752 #define TG3_TSO_IP_HDR_LEN 20
11753 #define TG3_TSO_TCP_HDR_LEN 20
11754 #define TG3_TSO_TCP_OPT_LEN 12
11756 static const u8 tg3_tso_header[] = {
11757 0x08, 0x00,
11758 0x45, 0x00, 0x00, 0x00,
11759 0x00, 0x00, 0x40, 0x00,
11760 0x40, 0x06, 0x00, 0x00,
11761 0x0a, 0x00, 0x00, 0x01,
11762 0x0a, 0x00, 0x00, 0x02,
11763 0x0d, 0x00, 0xe0, 0x00,
11764 0x00, 0x00, 0x01, 0x00,
11765 0x00, 0x00, 0x02, 0x00,
11766 0x80, 0x10, 0x10, 0x00,
11767 0x14, 0x09, 0x00, 0x00,
11768 0x01, 0x01, 0x08, 0x0a,
11769 0x11, 0x11, 0x11, 0x11,
11770 0x11, 0x11, 0x11, 0x11,
11773 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11775 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11776 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11777 u32 budget;
11778 struct sk_buff *skb;
11779 u8 *tx_data, *rx_data;
11780 dma_addr_t map;
11781 int num_pkts, tx_len, rx_len, i, err;
11782 struct tg3_rx_buffer_desc *desc;
11783 struct tg3_napi *tnapi, *rnapi;
11784 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11786 tnapi = &tp->napi[0];
11787 rnapi = &tp->napi[0];
11788 if (tp->irq_cnt > 1) {
11789 if (tg3_flag(tp, ENABLE_RSS))
11790 rnapi = &tp->napi[1];
11791 if (tg3_flag(tp, ENABLE_TSS))
11792 tnapi = &tp->napi[1];
11794 coal_now = tnapi->coal_now | rnapi->coal_now;
11796 err = -EIO;
11798 tx_len = pktsz;
11799 skb = netdev_alloc_skb(tp->dev, tx_len);
11800 if (!skb)
11801 return -ENOMEM;
11803 tx_data = skb_put(skb, tx_len);
11804 memcpy(tx_data, tp->dev->dev_addr, 6);
11805 memset(tx_data + 6, 0x0, 8);
11807 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11809 if (tso_loopback) {
11810 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11812 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11813 TG3_TSO_TCP_OPT_LEN;
11815 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11816 sizeof(tg3_tso_header));
11817 mss = TG3_TSO_MSS;
11819 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11820 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11822 /* Set the total length field in the IP header */
11823 iph->tot_len = htons((u16)(mss + hdr_len));
11825 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11826 TXD_FLAG_CPU_POST_DMA);
11828 if (tg3_flag(tp, HW_TSO_1) ||
11829 tg3_flag(tp, HW_TSO_2) ||
11830 tg3_flag(tp, HW_TSO_3)) {
11831 struct tcphdr *th;
11832 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11833 th = (struct tcphdr *)&tx_data[val];
11834 th->check = 0;
11835 } else
11836 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11838 if (tg3_flag(tp, HW_TSO_3)) {
11839 mss |= (hdr_len & 0xc) << 12;
11840 if (hdr_len & 0x10)
11841 base_flags |= 0x00000010;
11842 base_flags |= (hdr_len & 0x3e0) << 5;
11843 } else if (tg3_flag(tp, HW_TSO_2))
11844 mss |= hdr_len << 9;
11845 else if (tg3_flag(tp, HW_TSO_1) ||
11846 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11847 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11848 } else {
11849 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11852 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11853 } else {
11854 num_pkts = 1;
11855 data_off = ETH_HLEN;
11857 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
11858 tx_len > VLAN_ETH_FRAME_LEN)
11859 base_flags |= TXD_FLAG_JMB_PKT;
11862 for (i = data_off; i < tx_len; i++)
11863 tx_data[i] = (u8) (i & 0xff);
11865 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11866 if (pci_dma_mapping_error(tp->pdev, map)) {
11867 dev_kfree_skb(skb);
11868 return -EIO;
11871 val = tnapi->tx_prod;
11872 tnapi->tx_buffers[val].skb = skb;
11873 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11875 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11876 rnapi->coal_now);
11878 udelay(10);
11880 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11882 budget = tg3_tx_avail(tnapi);
11883 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11884 base_flags | TXD_FLAG_END, mss, 0)) {
11885 tnapi->tx_buffers[val].skb = NULL;
11886 dev_kfree_skb(skb);
11887 return -EIO;
11890 tnapi->tx_prod++;
11892 /* Sync BD data before updating mailbox */
11893 wmb();
11895 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11896 tr32_mailbox(tnapi->prodmbox);
11898 udelay(10);
11900 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11901 for (i = 0; i < 35; i++) {
11902 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11903 coal_now);
11905 udelay(10);
11907 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11908 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11909 if ((tx_idx == tnapi->tx_prod) &&
11910 (rx_idx == (rx_start_idx + num_pkts)))
11911 break;
11914 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11915 dev_kfree_skb(skb);
11917 if (tx_idx != tnapi->tx_prod)
11918 goto out;
11920 if (rx_idx != rx_start_idx + num_pkts)
11921 goto out;
11923 val = data_off;
11924 while (rx_idx != rx_start_idx) {
11925 desc = &rnapi->rx_rcb[rx_start_idx++];
11926 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11927 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11929 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11930 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11931 goto out;
11933 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11934 - ETH_FCS_LEN;
11936 if (!tso_loopback) {
11937 if (rx_len != tx_len)
11938 goto out;
11940 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11941 if (opaque_key != RXD_OPAQUE_RING_STD)
11942 goto out;
11943 } else {
11944 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11945 goto out;
11947 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11948 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11949 >> RXD_TCPCSUM_SHIFT != 0xffff) {
11950 goto out;
11953 if (opaque_key == RXD_OPAQUE_RING_STD) {
11954 rx_data = tpr->rx_std_buffers[desc_idx].data;
11955 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11956 mapping);
11957 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11958 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11959 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11960 mapping);
11961 } else
11962 goto out;
11964 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11965 PCI_DMA_FROMDEVICE);
11967 rx_data += TG3_RX_OFFSET(tp);
11968 for (i = data_off; i < rx_len; i++, val++) {
11969 if (*(rx_data + i) != (u8) (val & 0xff))
11970 goto out;
11974 err = 0;
11976 /* tg3_free_rings will unmap and free the rx_data */
11977 out:
11978 return err;
11981 #define TG3_STD_LOOPBACK_FAILED 1
11982 #define TG3_JMB_LOOPBACK_FAILED 2
11983 #define TG3_TSO_LOOPBACK_FAILED 4
11984 #define TG3_LOOPBACK_FAILED \
11985 (TG3_STD_LOOPBACK_FAILED | \
11986 TG3_JMB_LOOPBACK_FAILED | \
11987 TG3_TSO_LOOPBACK_FAILED)
11989 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11991 int err = -EIO;
11992 u32 eee_cap;
11993 u32 jmb_pkt_sz = 9000;
11995 if (tp->dma_limit)
11996 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
11998 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11999 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12001 if (!netif_running(tp->dev)) {
12002 data[0] = TG3_LOOPBACK_FAILED;
12003 data[1] = TG3_LOOPBACK_FAILED;
12004 if (do_extlpbk)
12005 data[2] = TG3_LOOPBACK_FAILED;
12006 goto done;
12009 err = tg3_reset_hw(tp, 1);
12010 if (err) {
12011 data[0] = TG3_LOOPBACK_FAILED;
12012 data[1] = TG3_LOOPBACK_FAILED;
12013 if (do_extlpbk)
12014 data[2] = TG3_LOOPBACK_FAILED;
12015 goto done;
12018 if (tg3_flag(tp, ENABLE_RSS)) {
12019 int i;
12021 /* Reroute all rx packets to the 1st queue */
12022 for (i = MAC_RSS_INDIR_TBL_0;
12023 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12024 tw32(i, 0x0);
12027 /* HW errata - mac loopback fails in some cases on 5780.
12028 * Normal traffic and PHY loopback are not affected by
12029 * errata. Also, the MAC loopback test is deprecated for
12030 * all newer ASIC revisions.
12032 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12033 !tg3_flag(tp, CPMU_PRESENT)) {
12034 tg3_mac_loopback(tp, true);
12036 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12037 data[0] |= TG3_STD_LOOPBACK_FAILED;
12039 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12040 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12041 data[0] |= TG3_JMB_LOOPBACK_FAILED;
12043 tg3_mac_loopback(tp, false);
12046 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12047 !tg3_flag(tp, USE_PHYLIB)) {
12048 int i;
12050 tg3_phy_lpbk_set(tp, 0, false);
12052 /* Wait for link */
12053 for (i = 0; i < 100; i++) {
12054 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12055 break;
12056 mdelay(1);
12059 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12060 data[1] |= TG3_STD_LOOPBACK_FAILED;
12061 if (tg3_flag(tp, TSO_CAPABLE) &&
12062 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12063 data[1] |= TG3_TSO_LOOPBACK_FAILED;
12064 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12065 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12066 data[1] |= TG3_JMB_LOOPBACK_FAILED;
12068 if (do_extlpbk) {
12069 tg3_phy_lpbk_set(tp, 0, true);
12071 /* All link indications report up, but the hardware
12072 * isn't really ready for about 20 msec. Double it
12073 * to be sure.
12075 mdelay(40);
12077 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12078 data[2] |= TG3_STD_LOOPBACK_FAILED;
12079 if (tg3_flag(tp, TSO_CAPABLE) &&
12080 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12081 data[2] |= TG3_TSO_LOOPBACK_FAILED;
12082 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12083 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12084 data[2] |= TG3_JMB_LOOPBACK_FAILED;
12087 /* Re-enable gphy autopowerdown. */
12088 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12089 tg3_phy_toggle_apd(tp, true);
12092 err = (data[0] | data[1] | data[2]) ? -EIO : 0;
12094 done:
12095 tp->phy_flags |= eee_cap;
12097 return err;
12100 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12101 u64 *data)
12103 struct tg3 *tp = netdev_priv(dev);
12104 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12106 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12107 tg3_power_up(tp)) {
12108 etest->flags |= ETH_TEST_FL_FAILED;
12109 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12110 return;
12113 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12115 if (tg3_test_nvram(tp) != 0) {
12116 etest->flags |= ETH_TEST_FL_FAILED;
12117 data[0] = 1;
12119 if (!doextlpbk && tg3_test_link(tp)) {
12120 etest->flags |= ETH_TEST_FL_FAILED;
12121 data[1] = 1;
12123 if (etest->flags & ETH_TEST_FL_OFFLINE) {
12124 int err, err2 = 0, irq_sync = 0;
12126 if (netif_running(dev)) {
12127 tg3_phy_stop(tp);
12128 tg3_netif_stop(tp);
12129 irq_sync = 1;
12132 tg3_full_lock(tp, irq_sync);
12134 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12135 err = tg3_nvram_lock(tp);
12136 tg3_halt_cpu(tp, RX_CPU_BASE);
12137 if (!tg3_flag(tp, 5705_PLUS))
12138 tg3_halt_cpu(tp, TX_CPU_BASE);
12139 if (!err)
12140 tg3_nvram_unlock(tp);
12142 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12143 tg3_phy_reset(tp);
12145 if (tg3_test_registers(tp) != 0) {
12146 etest->flags |= ETH_TEST_FL_FAILED;
12147 data[2] = 1;
12150 if (tg3_test_memory(tp) != 0) {
12151 etest->flags |= ETH_TEST_FL_FAILED;
12152 data[3] = 1;
12155 if (doextlpbk)
12156 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12158 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12159 etest->flags |= ETH_TEST_FL_FAILED;
12161 tg3_full_unlock(tp);
12163 if (tg3_test_interrupt(tp) != 0) {
12164 etest->flags |= ETH_TEST_FL_FAILED;
12165 data[7] = 1;
12168 tg3_full_lock(tp, 0);
12170 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12171 if (netif_running(dev)) {
12172 tg3_flag_set(tp, INIT_COMPLETE);
12173 err2 = tg3_restart_hw(tp, 1);
12174 if (!err2)
12175 tg3_netif_start(tp);
12178 tg3_full_unlock(tp);
12180 if (irq_sync && !err2)
12181 tg3_phy_start(tp);
12183 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12184 tg3_power_down(tp);
12188 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12190 struct mii_ioctl_data *data = if_mii(ifr);
12191 struct tg3 *tp = netdev_priv(dev);
12192 int err;
12194 if (tg3_flag(tp, USE_PHYLIB)) {
12195 struct phy_device *phydev;
12196 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12197 return -EAGAIN;
12198 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12199 return phy_mii_ioctl(phydev, ifr, cmd);
12202 switch (cmd) {
12203 case SIOCGMIIPHY:
12204 data->phy_id = tp->phy_addr;
12206 /* fallthru */
12207 case SIOCGMIIREG: {
12208 u32 mii_regval;
12210 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12211 break; /* We have no PHY */
12213 if (!netif_running(dev))
12214 return -EAGAIN;
12216 spin_lock_bh(&tp->lock);
12217 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12218 spin_unlock_bh(&tp->lock);
12220 data->val_out = mii_regval;
12222 return err;
12225 case SIOCSMIIREG:
12226 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12227 break; /* We have no PHY */
12229 if (!netif_running(dev))
12230 return -EAGAIN;
12232 spin_lock_bh(&tp->lock);
12233 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12234 spin_unlock_bh(&tp->lock);
12236 return err;
12238 default:
12239 /* do nothing */
12240 break;
12242 return -EOPNOTSUPP;
12245 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12247 struct tg3 *tp = netdev_priv(dev);
12249 memcpy(ec, &tp->coal, sizeof(*ec));
12250 return 0;
12253 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12255 struct tg3 *tp = netdev_priv(dev);
12256 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12257 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12259 if (!tg3_flag(tp, 5705_PLUS)) {
12260 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12261 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12262 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12263 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12266 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12267 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12268 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12269 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12270 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12271 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12272 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12273 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12274 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12275 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12276 return -EINVAL;
12278 /* No rx interrupts will be generated if both are zero */
12279 if ((ec->rx_coalesce_usecs == 0) &&
12280 (ec->rx_max_coalesced_frames == 0))
12281 return -EINVAL;
12283 /* No tx interrupts will be generated if both are zero */
12284 if ((ec->tx_coalesce_usecs == 0) &&
12285 (ec->tx_max_coalesced_frames == 0))
12286 return -EINVAL;
12288 /* Only copy relevant parameters, ignore all others. */
12289 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12290 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12291 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12292 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12293 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12294 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12295 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12296 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12297 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12299 if (netif_running(dev)) {
12300 tg3_full_lock(tp, 0);
12301 __tg3_set_coalesce(tp, &tp->coal);
12302 tg3_full_unlock(tp);
12304 return 0;
12307 static const struct ethtool_ops tg3_ethtool_ops = {
12308 .get_settings = tg3_get_settings,
12309 .set_settings = tg3_set_settings,
12310 .get_drvinfo = tg3_get_drvinfo,
12311 .get_regs_len = tg3_get_regs_len,
12312 .get_regs = tg3_get_regs,
12313 .get_wol = tg3_get_wol,
12314 .set_wol = tg3_set_wol,
12315 .get_msglevel = tg3_get_msglevel,
12316 .set_msglevel = tg3_set_msglevel,
12317 .nway_reset = tg3_nway_reset,
12318 .get_link = ethtool_op_get_link,
12319 .get_eeprom_len = tg3_get_eeprom_len,
12320 .get_eeprom = tg3_get_eeprom,
12321 .set_eeprom = tg3_set_eeprom,
12322 .get_ringparam = tg3_get_ringparam,
12323 .set_ringparam = tg3_set_ringparam,
12324 .get_pauseparam = tg3_get_pauseparam,
12325 .set_pauseparam = tg3_set_pauseparam,
12326 .self_test = tg3_self_test,
12327 .get_strings = tg3_get_strings,
12328 .set_phys_id = tg3_set_phys_id,
12329 .get_ethtool_stats = tg3_get_ethtool_stats,
12330 .get_coalesce = tg3_get_coalesce,
12331 .set_coalesce = tg3_set_coalesce,
12332 .get_sset_count = tg3_get_sset_count,
12333 .get_rxnfc = tg3_get_rxnfc,
12334 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
12335 .get_rxfh_indir = tg3_get_rxfh_indir,
12336 .set_rxfh_indir = tg3_set_rxfh_indir,
12339 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12340 struct rtnl_link_stats64 *stats)
12342 struct tg3 *tp = netdev_priv(dev);
12344 spin_lock_bh(&tp->lock);
12345 if (!tp->hw_stats) {
12346 spin_unlock_bh(&tp->lock);
12347 return &tp->net_stats_prev;
12350 tg3_get_nstats(tp, stats);
12351 spin_unlock_bh(&tp->lock);
12353 return stats;
12356 static void tg3_set_rx_mode(struct net_device *dev)
12358 struct tg3 *tp = netdev_priv(dev);
12360 if (!netif_running(dev))
12361 return;
12363 tg3_full_lock(tp, 0);
12364 __tg3_set_rx_mode(dev);
12365 tg3_full_unlock(tp);
12368 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12369 int new_mtu)
12371 dev->mtu = new_mtu;
12373 if (new_mtu > ETH_DATA_LEN) {
12374 if (tg3_flag(tp, 5780_CLASS)) {
12375 netdev_update_features(dev);
12376 tg3_flag_clear(tp, TSO_CAPABLE);
12377 } else {
12378 tg3_flag_set(tp, JUMBO_RING_ENABLE);
12380 } else {
12381 if (tg3_flag(tp, 5780_CLASS)) {
12382 tg3_flag_set(tp, TSO_CAPABLE);
12383 netdev_update_features(dev);
12385 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12389 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12391 struct tg3 *tp = netdev_priv(dev);
12392 int err, reset_phy = 0;
12394 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12395 return -EINVAL;
12397 if (!netif_running(dev)) {
12398 /* We'll just catch it later when the
12399 * device is up'd.
12401 tg3_set_mtu(dev, tp, new_mtu);
12402 return 0;
12405 tg3_phy_stop(tp);
12407 tg3_netif_stop(tp);
12409 tg3_set_mtu(dev, tp, new_mtu);
12411 tg3_full_lock(tp, 1);
12413 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12415 /* Reset PHY, otherwise the read DMA engine will be in a mode that
12416 * breaks all requests to 256 bytes.
12418 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12419 reset_phy = 1;
12421 err = tg3_restart_hw(tp, reset_phy);
12423 if (!err)
12424 tg3_netif_start(tp);
12426 tg3_full_unlock(tp);
12428 if (!err)
12429 tg3_phy_start(tp);
12431 return err;
12434 static const struct net_device_ops tg3_netdev_ops = {
12435 .ndo_open = tg3_open,
12436 .ndo_stop = tg3_close,
12437 .ndo_start_xmit = tg3_start_xmit,
12438 .ndo_get_stats64 = tg3_get_stats64,
12439 .ndo_validate_addr = eth_validate_addr,
12440 .ndo_set_rx_mode = tg3_set_rx_mode,
12441 .ndo_set_mac_address = tg3_set_mac_addr,
12442 .ndo_do_ioctl = tg3_ioctl,
12443 .ndo_tx_timeout = tg3_tx_timeout,
12444 .ndo_change_mtu = tg3_change_mtu,
12445 .ndo_fix_features = tg3_fix_features,
12446 .ndo_set_features = tg3_set_features,
12447 #ifdef CONFIG_NET_POLL_CONTROLLER
12448 .ndo_poll_controller = tg3_poll_controller,
12449 #endif
12452 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12454 u32 cursize, val, magic;
12456 tp->nvram_size = EEPROM_CHIP_SIZE;
12458 if (tg3_nvram_read(tp, 0, &magic) != 0)
12459 return;
12461 if ((magic != TG3_EEPROM_MAGIC) &&
12462 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12463 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12464 return;
12467 * Size the chip by reading offsets at increasing powers of two.
12468 * When we encounter our validation signature, we know the addressing
12469 * has wrapped around, and thus have our chip size.
12471 cursize = 0x10;
12473 while (cursize < tp->nvram_size) {
12474 if (tg3_nvram_read(tp, cursize, &val) != 0)
12475 return;
12477 if (val == magic)
12478 break;
12480 cursize <<= 1;
12483 tp->nvram_size = cursize;
12486 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12488 u32 val;
12490 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12491 return;
12493 /* Selfboot format */
12494 if (val != TG3_EEPROM_MAGIC) {
12495 tg3_get_eeprom_size(tp);
12496 return;
12499 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12500 if (val != 0) {
12501 /* This is confusing. We want to operate on the
12502 * 16-bit value at offset 0xf2. The tg3_nvram_read()
12503 * call will read from NVRAM and byteswap the data
12504 * according to the byteswapping settings for all
12505 * other register accesses. This ensures the data we
12506 * want will always reside in the lower 16-bits.
12507 * However, the data in NVRAM is in LE format, which
12508 * means the data from the NVRAM read will always be
12509 * opposite the endianness of the CPU. The 16-bit
12510 * byteswap then brings the data to CPU endianness.
12512 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12513 return;
12516 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12519 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12521 u32 nvcfg1;
12523 nvcfg1 = tr32(NVRAM_CFG1);
12524 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12525 tg3_flag_set(tp, FLASH);
12526 } else {
12527 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12528 tw32(NVRAM_CFG1, nvcfg1);
12531 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12532 tg3_flag(tp, 5780_CLASS)) {
12533 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12534 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12535 tp->nvram_jedecnum = JEDEC_ATMEL;
12536 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12537 tg3_flag_set(tp, NVRAM_BUFFERED);
12538 break;
12539 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12540 tp->nvram_jedecnum = JEDEC_ATMEL;
12541 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12542 break;
12543 case FLASH_VENDOR_ATMEL_EEPROM:
12544 tp->nvram_jedecnum = JEDEC_ATMEL;
12545 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12546 tg3_flag_set(tp, NVRAM_BUFFERED);
12547 break;
12548 case FLASH_VENDOR_ST:
12549 tp->nvram_jedecnum = JEDEC_ST;
12550 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12551 tg3_flag_set(tp, NVRAM_BUFFERED);
12552 break;
12553 case FLASH_VENDOR_SAIFUN:
12554 tp->nvram_jedecnum = JEDEC_SAIFUN;
12555 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12556 break;
12557 case FLASH_VENDOR_SST_SMALL:
12558 case FLASH_VENDOR_SST_LARGE:
12559 tp->nvram_jedecnum = JEDEC_SST;
12560 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12561 break;
12563 } else {
12564 tp->nvram_jedecnum = JEDEC_ATMEL;
12565 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12566 tg3_flag_set(tp, NVRAM_BUFFERED);
12570 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12572 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12573 case FLASH_5752PAGE_SIZE_256:
12574 tp->nvram_pagesize = 256;
12575 break;
12576 case FLASH_5752PAGE_SIZE_512:
12577 tp->nvram_pagesize = 512;
12578 break;
12579 case FLASH_5752PAGE_SIZE_1K:
12580 tp->nvram_pagesize = 1024;
12581 break;
12582 case FLASH_5752PAGE_SIZE_2K:
12583 tp->nvram_pagesize = 2048;
12584 break;
12585 case FLASH_5752PAGE_SIZE_4K:
12586 tp->nvram_pagesize = 4096;
12587 break;
12588 case FLASH_5752PAGE_SIZE_264:
12589 tp->nvram_pagesize = 264;
12590 break;
12591 case FLASH_5752PAGE_SIZE_528:
12592 tp->nvram_pagesize = 528;
12593 break;
12597 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12599 u32 nvcfg1;
12601 nvcfg1 = tr32(NVRAM_CFG1);
12603 /* NVRAM protection for TPM */
12604 if (nvcfg1 & (1 << 27))
12605 tg3_flag_set(tp, PROTECTED_NVRAM);
12607 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12608 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12609 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12610 tp->nvram_jedecnum = JEDEC_ATMEL;
12611 tg3_flag_set(tp, NVRAM_BUFFERED);
12612 break;
12613 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12614 tp->nvram_jedecnum = JEDEC_ATMEL;
12615 tg3_flag_set(tp, NVRAM_BUFFERED);
12616 tg3_flag_set(tp, FLASH);
12617 break;
12618 case FLASH_5752VENDOR_ST_M45PE10:
12619 case FLASH_5752VENDOR_ST_M45PE20:
12620 case FLASH_5752VENDOR_ST_M45PE40:
12621 tp->nvram_jedecnum = JEDEC_ST;
12622 tg3_flag_set(tp, NVRAM_BUFFERED);
12623 tg3_flag_set(tp, FLASH);
12624 break;
12627 if (tg3_flag(tp, FLASH)) {
12628 tg3_nvram_get_pagesize(tp, nvcfg1);
12629 } else {
12630 /* For eeprom, set pagesize to maximum eeprom size */
12631 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12633 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12634 tw32(NVRAM_CFG1, nvcfg1);
12638 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12640 u32 nvcfg1, protect = 0;
12642 nvcfg1 = tr32(NVRAM_CFG1);
12644 /* NVRAM protection for TPM */
12645 if (nvcfg1 & (1 << 27)) {
12646 tg3_flag_set(tp, PROTECTED_NVRAM);
12647 protect = 1;
12650 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12651 switch (nvcfg1) {
12652 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12653 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12654 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12655 case FLASH_5755VENDOR_ATMEL_FLASH_5:
12656 tp->nvram_jedecnum = JEDEC_ATMEL;
12657 tg3_flag_set(tp, NVRAM_BUFFERED);
12658 tg3_flag_set(tp, FLASH);
12659 tp->nvram_pagesize = 264;
12660 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12661 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12662 tp->nvram_size = (protect ? 0x3e200 :
12663 TG3_NVRAM_SIZE_512KB);
12664 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12665 tp->nvram_size = (protect ? 0x1f200 :
12666 TG3_NVRAM_SIZE_256KB);
12667 else
12668 tp->nvram_size = (protect ? 0x1f200 :
12669 TG3_NVRAM_SIZE_128KB);
12670 break;
12671 case FLASH_5752VENDOR_ST_M45PE10:
12672 case FLASH_5752VENDOR_ST_M45PE20:
12673 case FLASH_5752VENDOR_ST_M45PE40:
12674 tp->nvram_jedecnum = JEDEC_ST;
12675 tg3_flag_set(tp, NVRAM_BUFFERED);
12676 tg3_flag_set(tp, FLASH);
12677 tp->nvram_pagesize = 256;
12678 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12679 tp->nvram_size = (protect ?
12680 TG3_NVRAM_SIZE_64KB :
12681 TG3_NVRAM_SIZE_128KB);
12682 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12683 tp->nvram_size = (protect ?
12684 TG3_NVRAM_SIZE_64KB :
12685 TG3_NVRAM_SIZE_256KB);
12686 else
12687 tp->nvram_size = (protect ?
12688 TG3_NVRAM_SIZE_128KB :
12689 TG3_NVRAM_SIZE_512KB);
12690 break;
12694 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12696 u32 nvcfg1;
12698 nvcfg1 = tr32(NVRAM_CFG1);
12700 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12701 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12702 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12703 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12704 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12705 tp->nvram_jedecnum = JEDEC_ATMEL;
12706 tg3_flag_set(tp, NVRAM_BUFFERED);
12707 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12709 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12710 tw32(NVRAM_CFG1, nvcfg1);
12711 break;
12712 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12713 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12714 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12715 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12716 tp->nvram_jedecnum = JEDEC_ATMEL;
12717 tg3_flag_set(tp, NVRAM_BUFFERED);
12718 tg3_flag_set(tp, FLASH);
12719 tp->nvram_pagesize = 264;
12720 break;
12721 case FLASH_5752VENDOR_ST_M45PE10:
12722 case FLASH_5752VENDOR_ST_M45PE20:
12723 case FLASH_5752VENDOR_ST_M45PE40:
12724 tp->nvram_jedecnum = JEDEC_ST;
12725 tg3_flag_set(tp, NVRAM_BUFFERED);
12726 tg3_flag_set(tp, FLASH);
12727 tp->nvram_pagesize = 256;
12728 break;
12732 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12734 u32 nvcfg1, protect = 0;
12736 nvcfg1 = tr32(NVRAM_CFG1);
12738 /* NVRAM protection for TPM */
12739 if (nvcfg1 & (1 << 27)) {
12740 tg3_flag_set(tp, PROTECTED_NVRAM);
12741 protect = 1;
12744 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12745 switch (nvcfg1) {
12746 case FLASH_5761VENDOR_ATMEL_ADB021D:
12747 case FLASH_5761VENDOR_ATMEL_ADB041D:
12748 case FLASH_5761VENDOR_ATMEL_ADB081D:
12749 case FLASH_5761VENDOR_ATMEL_ADB161D:
12750 case FLASH_5761VENDOR_ATMEL_MDB021D:
12751 case FLASH_5761VENDOR_ATMEL_MDB041D:
12752 case FLASH_5761VENDOR_ATMEL_MDB081D:
12753 case FLASH_5761VENDOR_ATMEL_MDB161D:
12754 tp->nvram_jedecnum = JEDEC_ATMEL;
12755 tg3_flag_set(tp, NVRAM_BUFFERED);
12756 tg3_flag_set(tp, FLASH);
12757 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12758 tp->nvram_pagesize = 256;
12759 break;
12760 case FLASH_5761VENDOR_ST_A_M45PE20:
12761 case FLASH_5761VENDOR_ST_A_M45PE40:
12762 case FLASH_5761VENDOR_ST_A_M45PE80:
12763 case FLASH_5761VENDOR_ST_A_M45PE16:
12764 case FLASH_5761VENDOR_ST_M_M45PE20:
12765 case FLASH_5761VENDOR_ST_M_M45PE40:
12766 case FLASH_5761VENDOR_ST_M_M45PE80:
12767 case FLASH_5761VENDOR_ST_M_M45PE16:
12768 tp->nvram_jedecnum = JEDEC_ST;
12769 tg3_flag_set(tp, NVRAM_BUFFERED);
12770 tg3_flag_set(tp, FLASH);
12771 tp->nvram_pagesize = 256;
12772 break;
12775 if (protect) {
12776 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12777 } else {
12778 switch (nvcfg1) {
12779 case FLASH_5761VENDOR_ATMEL_ADB161D:
12780 case FLASH_5761VENDOR_ATMEL_MDB161D:
12781 case FLASH_5761VENDOR_ST_A_M45PE16:
12782 case FLASH_5761VENDOR_ST_M_M45PE16:
12783 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12784 break;
12785 case FLASH_5761VENDOR_ATMEL_ADB081D:
12786 case FLASH_5761VENDOR_ATMEL_MDB081D:
12787 case FLASH_5761VENDOR_ST_A_M45PE80:
12788 case FLASH_5761VENDOR_ST_M_M45PE80:
12789 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12790 break;
12791 case FLASH_5761VENDOR_ATMEL_ADB041D:
12792 case FLASH_5761VENDOR_ATMEL_MDB041D:
12793 case FLASH_5761VENDOR_ST_A_M45PE40:
12794 case FLASH_5761VENDOR_ST_M_M45PE40:
12795 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12796 break;
12797 case FLASH_5761VENDOR_ATMEL_ADB021D:
12798 case FLASH_5761VENDOR_ATMEL_MDB021D:
12799 case FLASH_5761VENDOR_ST_A_M45PE20:
12800 case FLASH_5761VENDOR_ST_M_M45PE20:
12801 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12802 break;
12807 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12809 tp->nvram_jedecnum = JEDEC_ATMEL;
12810 tg3_flag_set(tp, NVRAM_BUFFERED);
12811 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12814 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12816 u32 nvcfg1;
12818 nvcfg1 = tr32(NVRAM_CFG1);
12820 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12821 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12822 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12823 tp->nvram_jedecnum = JEDEC_ATMEL;
12824 tg3_flag_set(tp, NVRAM_BUFFERED);
12825 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12827 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12828 tw32(NVRAM_CFG1, nvcfg1);
12829 return;
12830 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12831 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12832 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12833 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12834 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12835 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12836 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12837 tp->nvram_jedecnum = JEDEC_ATMEL;
12838 tg3_flag_set(tp, NVRAM_BUFFERED);
12839 tg3_flag_set(tp, FLASH);
12841 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12842 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12843 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12844 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12845 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12846 break;
12847 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12848 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12849 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12850 break;
12851 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12852 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12853 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12854 break;
12856 break;
12857 case FLASH_5752VENDOR_ST_M45PE10:
12858 case FLASH_5752VENDOR_ST_M45PE20:
12859 case FLASH_5752VENDOR_ST_M45PE40:
12860 tp->nvram_jedecnum = JEDEC_ST;
12861 tg3_flag_set(tp, NVRAM_BUFFERED);
12862 tg3_flag_set(tp, FLASH);
12864 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12865 case FLASH_5752VENDOR_ST_M45PE10:
12866 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12867 break;
12868 case FLASH_5752VENDOR_ST_M45PE20:
12869 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12870 break;
12871 case FLASH_5752VENDOR_ST_M45PE40:
12872 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12873 break;
12875 break;
12876 default:
12877 tg3_flag_set(tp, NO_NVRAM);
12878 return;
12881 tg3_nvram_get_pagesize(tp, nvcfg1);
12882 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12883 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12887 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12889 u32 nvcfg1;
12891 nvcfg1 = tr32(NVRAM_CFG1);
12893 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12894 case FLASH_5717VENDOR_ATMEL_EEPROM:
12895 case FLASH_5717VENDOR_MICRO_EEPROM:
12896 tp->nvram_jedecnum = JEDEC_ATMEL;
12897 tg3_flag_set(tp, NVRAM_BUFFERED);
12898 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12900 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12901 tw32(NVRAM_CFG1, nvcfg1);
12902 return;
12903 case FLASH_5717VENDOR_ATMEL_MDB011D:
12904 case FLASH_5717VENDOR_ATMEL_ADB011B:
12905 case FLASH_5717VENDOR_ATMEL_ADB011D:
12906 case FLASH_5717VENDOR_ATMEL_MDB021D:
12907 case FLASH_5717VENDOR_ATMEL_ADB021B:
12908 case FLASH_5717VENDOR_ATMEL_ADB021D:
12909 case FLASH_5717VENDOR_ATMEL_45USPT:
12910 tp->nvram_jedecnum = JEDEC_ATMEL;
12911 tg3_flag_set(tp, NVRAM_BUFFERED);
12912 tg3_flag_set(tp, FLASH);
12914 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12915 case FLASH_5717VENDOR_ATMEL_MDB021D:
12916 /* Detect size with tg3_nvram_get_size() */
12917 break;
12918 case FLASH_5717VENDOR_ATMEL_ADB021B:
12919 case FLASH_5717VENDOR_ATMEL_ADB021D:
12920 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12921 break;
12922 default:
12923 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12924 break;
12926 break;
12927 case FLASH_5717VENDOR_ST_M_M25PE10:
12928 case FLASH_5717VENDOR_ST_A_M25PE10:
12929 case FLASH_5717VENDOR_ST_M_M45PE10:
12930 case FLASH_5717VENDOR_ST_A_M45PE10:
12931 case FLASH_5717VENDOR_ST_M_M25PE20:
12932 case FLASH_5717VENDOR_ST_A_M25PE20:
12933 case FLASH_5717VENDOR_ST_M_M45PE20:
12934 case FLASH_5717VENDOR_ST_A_M45PE20:
12935 case FLASH_5717VENDOR_ST_25USPT:
12936 case FLASH_5717VENDOR_ST_45USPT:
12937 tp->nvram_jedecnum = JEDEC_ST;
12938 tg3_flag_set(tp, NVRAM_BUFFERED);
12939 tg3_flag_set(tp, FLASH);
12941 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12942 case FLASH_5717VENDOR_ST_M_M25PE20:
12943 case FLASH_5717VENDOR_ST_M_M45PE20:
12944 /* Detect size with tg3_nvram_get_size() */
12945 break;
12946 case FLASH_5717VENDOR_ST_A_M25PE20:
12947 case FLASH_5717VENDOR_ST_A_M45PE20:
12948 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12949 break;
12950 default:
12951 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12952 break;
12954 break;
12955 default:
12956 tg3_flag_set(tp, NO_NVRAM);
12957 return;
12960 tg3_nvram_get_pagesize(tp, nvcfg1);
12961 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12962 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12965 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12967 u32 nvcfg1, nvmpinstrp;
12969 nvcfg1 = tr32(NVRAM_CFG1);
12970 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12972 switch (nvmpinstrp) {
12973 case FLASH_5720_EEPROM_HD:
12974 case FLASH_5720_EEPROM_LD:
12975 tp->nvram_jedecnum = JEDEC_ATMEL;
12976 tg3_flag_set(tp, NVRAM_BUFFERED);
12978 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12979 tw32(NVRAM_CFG1, nvcfg1);
12980 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12981 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12982 else
12983 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12984 return;
12985 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12986 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12987 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12988 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12989 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12990 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12991 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12992 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12993 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12994 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12995 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12996 case FLASH_5720VENDOR_ATMEL_45USPT:
12997 tp->nvram_jedecnum = JEDEC_ATMEL;
12998 tg3_flag_set(tp, NVRAM_BUFFERED);
12999 tg3_flag_set(tp, FLASH);
13001 switch (nvmpinstrp) {
13002 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13003 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13004 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13005 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13006 break;
13007 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13008 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13009 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13010 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13011 break;
13012 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13013 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13014 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13015 break;
13016 default:
13017 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13018 break;
13020 break;
13021 case FLASH_5720VENDOR_M_ST_M25PE10:
13022 case FLASH_5720VENDOR_M_ST_M45PE10:
13023 case FLASH_5720VENDOR_A_ST_M25PE10:
13024 case FLASH_5720VENDOR_A_ST_M45PE10:
13025 case FLASH_5720VENDOR_M_ST_M25PE20:
13026 case FLASH_5720VENDOR_M_ST_M45PE20:
13027 case FLASH_5720VENDOR_A_ST_M25PE20:
13028 case FLASH_5720VENDOR_A_ST_M45PE20:
13029 case FLASH_5720VENDOR_M_ST_M25PE40:
13030 case FLASH_5720VENDOR_M_ST_M45PE40:
13031 case FLASH_5720VENDOR_A_ST_M25PE40:
13032 case FLASH_5720VENDOR_A_ST_M45PE40:
13033 case FLASH_5720VENDOR_M_ST_M25PE80:
13034 case FLASH_5720VENDOR_M_ST_M45PE80:
13035 case FLASH_5720VENDOR_A_ST_M25PE80:
13036 case FLASH_5720VENDOR_A_ST_M45PE80:
13037 case FLASH_5720VENDOR_ST_25USPT:
13038 case FLASH_5720VENDOR_ST_45USPT:
13039 tp->nvram_jedecnum = JEDEC_ST;
13040 tg3_flag_set(tp, NVRAM_BUFFERED);
13041 tg3_flag_set(tp, FLASH);
13043 switch (nvmpinstrp) {
13044 case FLASH_5720VENDOR_M_ST_M25PE20:
13045 case FLASH_5720VENDOR_M_ST_M45PE20:
13046 case FLASH_5720VENDOR_A_ST_M25PE20:
13047 case FLASH_5720VENDOR_A_ST_M45PE20:
13048 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13049 break;
13050 case FLASH_5720VENDOR_M_ST_M25PE40:
13051 case FLASH_5720VENDOR_M_ST_M45PE40:
13052 case FLASH_5720VENDOR_A_ST_M25PE40:
13053 case FLASH_5720VENDOR_A_ST_M45PE40:
13054 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13055 break;
13056 case FLASH_5720VENDOR_M_ST_M25PE80:
13057 case FLASH_5720VENDOR_M_ST_M45PE80:
13058 case FLASH_5720VENDOR_A_ST_M25PE80:
13059 case FLASH_5720VENDOR_A_ST_M45PE80:
13060 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13061 break;
13062 default:
13063 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13064 break;
13066 break;
13067 default:
13068 tg3_flag_set(tp, NO_NVRAM);
13069 return;
13072 tg3_nvram_get_pagesize(tp, nvcfg1);
13073 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13074 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13077 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13078 static void __devinit tg3_nvram_init(struct tg3 *tp)
13080 tw32_f(GRC_EEPROM_ADDR,
13081 (EEPROM_ADDR_FSM_RESET |
13082 (EEPROM_DEFAULT_CLOCK_PERIOD <<
13083 EEPROM_ADDR_CLKPERD_SHIFT)));
13085 msleep(1);
13087 /* Enable seeprom accesses. */
13088 tw32_f(GRC_LOCAL_CTRL,
13089 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13090 udelay(100);
13092 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13093 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13094 tg3_flag_set(tp, NVRAM);
13096 if (tg3_nvram_lock(tp)) {
13097 netdev_warn(tp->dev,
13098 "Cannot get nvram lock, %s failed\n",
13099 __func__);
13100 return;
13102 tg3_enable_nvram_access(tp);
13104 tp->nvram_size = 0;
13106 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13107 tg3_get_5752_nvram_info(tp);
13108 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13109 tg3_get_5755_nvram_info(tp);
13110 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13111 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13112 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13113 tg3_get_5787_nvram_info(tp);
13114 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13115 tg3_get_5761_nvram_info(tp);
13116 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13117 tg3_get_5906_nvram_info(tp);
13118 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13119 tg3_flag(tp, 57765_CLASS))
13120 tg3_get_57780_nvram_info(tp);
13121 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13122 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13123 tg3_get_5717_nvram_info(tp);
13124 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13125 tg3_get_5720_nvram_info(tp);
13126 else
13127 tg3_get_nvram_info(tp);
13129 if (tp->nvram_size == 0)
13130 tg3_get_nvram_size(tp);
13132 tg3_disable_nvram_access(tp);
13133 tg3_nvram_unlock(tp);
13135 } else {
13136 tg3_flag_clear(tp, NVRAM);
13137 tg3_flag_clear(tp, NVRAM_BUFFERED);
13139 tg3_get_eeprom_size(tp);
13143 struct subsys_tbl_ent {
13144 u16 subsys_vendor, subsys_devid;
13145 u32 phy_id;
13148 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13149 /* Broadcom boards. */
13150 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13151 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13152 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13153 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13154 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13155 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13156 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13157 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13158 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13159 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13160 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13161 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13162 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13163 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13164 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13165 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13166 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13167 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13168 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13169 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13170 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13171 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13173 /* 3com boards. */
13174 { TG3PCI_SUBVENDOR_ID_3COM,
13175 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13176 { TG3PCI_SUBVENDOR_ID_3COM,
13177 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13178 { TG3PCI_SUBVENDOR_ID_3COM,
13179 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13180 { TG3PCI_SUBVENDOR_ID_3COM,
13181 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13182 { TG3PCI_SUBVENDOR_ID_3COM,
13183 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13185 /* DELL boards. */
13186 { TG3PCI_SUBVENDOR_ID_DELL,
13187 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13188 { TG3PCI_SUBVENDOR_ID_DELL,
13189 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13190 { TG3PCI_SUBVENDOR_ID_DELL,
13191 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13192 { TG3PCI_SUBVENDOR_ID_DELL,
13193 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13195 /* Compaq boards. */
13196 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13197 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13198 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13199 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13200 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13201 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13202 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13203 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13204 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13205 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13207 /* IBM boards. */
13208 { TG3PCI_SUBVENDOR_ID_IBM,
13209 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13212 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13214 int i;
13216 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13217 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13218 tp->pdev->subsystem_vendor) &&
13219 (subsys_id_to_phy_id[i].subsys_devid ==
13220 tp->pdev->subsystem_device))
13221 return &subsys_id_to_phy_id[i];
13223 return NULL;
13226 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13228 u32 val;
13230 tp->phy_id = TG3_PHY_ID_INVALID;
13231 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13233 /* Assume an onboard device and WOL capable by default. */
13234 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13235 tg3_flag_set(tp, WOL_CAP);
13237 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13238 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13239 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13240 tg3_flag_set(tp, IS_NIC);
13242 val = tr32(VCPU_CFGSHDW);
13243 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13244 tg3_flag_set(tp, ASPM_WORKAROUND);
13245 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13246 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13247 tg3_flag_set(tp, WOL_ENABLE);
13248 device_set_wakeup_enable(&tp->pdev->dev, true);
13250 goto done;
13253 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13254 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13255 u32 nic_cfg, led_cfg;
13256 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13257 int eeprom_phy_serdes = 0;
13259 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13260 tp->nic_sram_data_cfg = nic_cfg;
13262 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13263 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13264 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13265 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13266 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13267 (ver > 0) && (ver < 0x100))
13268 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13270 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13271 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13273 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13274 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13275 eeprom_phy_serdes = 1;
13277 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13278 if (nic_phy_id != 0) {
13279 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13280 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13282 eeprom_phy_id = (id1 >> 16) << 10;
13283 eeprom_phy_id |= (id2 & 0xfc00) << 16;
13284 eeprom_phy_id |= (id2 & 0x03ff) << 0;
13285 } else
13286 eeprom_phy_id = 0;
13288 tp->phy_id = eeprom_phy_id;
13289 if (eeprom_phy_serdes) {
13290 if (!tg3_flag(tp, 5705_PLUS))
13291 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13292 else
13293 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13296 if (tg3_flag(tp, 5750_PLUS))
13297 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13298 SHASTA_EXT_LED_MODE_MASK);
13299 else
13300 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13302 switch (led_cfg) {
13303 default:
13304 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13305 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13306 break;
13308 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13309 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13310 break;
13312 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13313 tp->led_ctrl = LED_CTRL_MODE_MAC;
13315 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13316 * read on some older 5700/5701 bootcode.
13318 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13319 ASIC_REV_5700 ||
13320 GET_ASIC_REV(tp->pci_chip_rev_id) ==
13321 ASIC_REV_5701)
13322 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13324 break;
13326 case SHASTA_EXT_LED_SHARED:
13327 tp->led_ctrl = LED_CTRL_MODE_SHARED;
13328 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13329 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13330 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13331 LED_CTRL_MODE_PHY_2);
13332 break;
13334 case SHASTA_EXT_LED_MAC:
13335 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13336 break;
13338 case SHASTA_EXT_LED_COMBO:
13339 tp->led_ctrl = LED_CTRL_MODE_COMBO;
13340 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13341 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13342 LED_CTRL_MODE_PHY_2);
13343 break;
13347 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13348 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13349 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13350 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13352 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13353 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13355 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13356 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13357 if ((tp->pdev->subsystem_vendor ==
13358 PCI_VENDOR_ID_ARIMA) &&
13359 (tp->pdev->subsystem_device == 0x205a ||
13360 tp->pdev->subsystem_device == 0x2063))
13361 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13362 } else {
13363 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13364 tg3_flag_set(tp, IS_NIC);
13367 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13368 tg3_flag_set(tp, ENABLE_ASF);
13369 if (tg3_flag(tp, 5750_PLUS))
13370 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13373 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13374 tg3_flag(tp, 5750_PLUS))
13375 tg3_flag_set(tp, ENABLE_APE);
13377 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13378 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13379 tg3_flag_clear(tp, WOL_CAP);
13381 if (tg3_flag(tp, WOL_CAP) &&
13382 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13383 tg3_flag_set(tp, WOL_ENABLE);
13384 device_set_wakeup_enable(&tp->pdev->dev, true);
13387 if (cfg2 & (1 << 17))
13388 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13390 /* serdes signal pre-emphasis in register 0x590 set by */
13391 /* bootcode if bit 18 is set */
13392 if (cfg2 & (1 << 18))
13393 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13395 if ((tg3_flag(tp, 57765_PLUS) ||
13396 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13397 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13398 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13399 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13401 if (tg3_flag(tp, PCI_EXPRESS) &&
13402 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13403 !tg3_flag(tp, 57765_PLUS)) {
13404 u32 cfg3;
13406 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13407 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13408 tg3_flag_set(tp, ASPM_WORKAROUND);
13411 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13412 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13413 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13414 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13415 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13416 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13418 done:
13419 if (tg3_flag(tp, WOL_CAP))
13420 device_set_wakeup_enable(&tp->pdev->dev,
13421 tg3_flag(tp, WOL_ENABLE));
13422 else
13423 device_set_wakeup_capable(&tp->pdev->dev, false);
13426 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13428 int i;
13429 u32 val;
13431 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13432 tw32(OTP_CTRL, cmd);
13434 /* Wait for up to 1 ms for command to execute. */
13435 for (i = 0; i < 100; i++) {
13436 val = tr32(OTP_STATUS);
13437 if (val & OTP_STATUS_CMD_DONE)
13438 break;
13439 udelay(10);
13442 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13445 /* Read the gphy configuration from the OTP region of the chip. The gphy
13446 * configuration is a 32-bit value that straddles the alignment boundary.
13447 * We do two 32-bit reads and then shift and merge the results.
13449 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13451 u32 bhalf_otp, thalf_otp;
13453 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13455 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13456 return 0;
13458 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13460 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13461 return 0;
13463 thalf_otp = tr32(OTP_READ_DATA);
13465 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13467 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13468 return 0;
13470 bhalf_otp = tr32(OTP_READ_DATA);
13472 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13475 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13477 u32 adv = ADVERTISED_Autoneg;
13479 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13480 adv |= ADVERTISED_1000baseT_Half |
13481 ADVERTISED_1000baseT_Full;
13483 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13484 adv |= ADVERTISED_100baseT_Half |
13485 ADVERTISED_100baseT_Full |
13486 ADVERTISED_10baseT_Half |
13487 ADVERTISED_10baseT_Full |
13488 ADVERTISED_TP;
13489 else
13490 adv |= ADVERTISED_FIBRE;
13492 tp->link_config.advertising = adv;
13493 tp->link_config.speed = SPEED_UNKNOWN;
13494 tp->link_config.duplex = DUPLEX_UNKNOWN;
13495 tp->link_config.autoneg = AUTONEG_ENABLE;
13496 tp->link_config.active_speed = SPEED_UNKNOWN;
13497 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
13499 tp->old_link = -1;
13502 static int __devinit tg3_phy_probe(struct tg3 *tp)
13504 u32 hw_phy_id_1, hw_phy_id_2;
13505 u32 hw_phy_id, hw_phy_id_masked;
13506 int err;
13508 /* flow control autonegotiation is default behavior */
13509 tg3_flag_set(tp, PAUSE_AUTONEG);
13510 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13512 if (tg3_flag(tp, USE_PHYLIB))
13513 return tg3_phy_init(tp);
13515 /* Reading the PHY ID register can conflict with ASF
13516 * firmware access to the PHY hardware.
13518 err = 0;
13519 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13520 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13521 } else {
13522 /* Now read the physical PHY_ID from the chip and verify
13523 * that it is sane. If it doesn't look good, we fall back
13524 * to either the hard-coded table based PHY_ID and failing
13525 * that the value found in the eeprom area.
13527 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13528 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13530 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13531 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13532 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13534 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13537 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13538 tp->phy_id = hw_phy_id;
13539 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13540 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13541 else
13542 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13543 } else {
13544 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13545 /* Do nothing, phy ID already set up in
13546 * tg3_get_eeprom_hw_cfg().
13548 } else {
13549 struct subsys_tbl_ent *p;
13551 /* No eeprom signature? Try the hardcoded
13552 * subsys device table.
13554 p = tg3_lookup_by_subsys(tp);
13555 if (!p)
13556 return -ENODEV;
13558 tp->phy_id = p->phy_id;
13559 if (!tp->phy_id ||
13560 tp->phy_id == TG3_PHY_ID_BCM8002)
13561 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13565 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13566 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13567 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13568 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13569 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13570 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13571 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13572 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13574 tg3_phy_init_link_config(tp);
13576 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13577 !tg3_flag(tp, ENABLE_APE) &&
13578 !tg3_flag(tp, ENABLE_ASF)) {
13579 u32 bmsr, dummy;
13581 tg3_readphy(tp, MII_BMSR, &bmsr);
13582 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13583 (bmsr & BMSR_LSTATUS))
13584 goto skip_phy_reset;
13586 err = tg3_phy_reset(tp);
13587 if (err)
13588 return err;
13590 tg3_phy_set_wirespeed(tp);
13592 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13593 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13594 tp->link_config.flowctrl);
13596 tg3_writephy(tp, MII_BMCR,
13597 BMCR_ANENABLE | BMCR_ANRESTART);
13601 skip_phy_reset:
13602 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13603 err = tg3_init_5401phy_dsp(tp);
13604 if (err)
13605 return err;
13607 err = tg3_init_5401phy_dsp(tp);
13610 return err;
13613 static void __devinit tg3_read_vpd(struct tg3 *tp)
13615 u8 *vpd_data;
13616 unsigned int block_end, rosize, len;
13617 u32 vpdlen;
13618 int j, i = 0;
13620 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13621 if (!vpd_data)
13622 goto out_no_vpd;
13624 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13625 if (i < 0)
13626 goto out_not_found;
13628 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13629 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13630 i += PCI_VPD_LRDT_TAG_SIZE;
13632 if (block_end > vpdlen)
13633 goto out_not_found;
13635 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13636 PCI_VPD_RO_KEYWORD_MFR_ID);
13637 if (j > 0) {
13638 len = pci_vpd_info_field_size(&vpd_data[j]);
13640 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13641 if (j + len > block_end || len != 4 ||
13642 memcmp(&vpd_data[j], "1028", 4))
13643 goto partno;
13645 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13646 PCI_VPD_RO_KEYWORD_VENDOR0);
13647 if (j < 0)
13648 goto partno;
13650 len = pci_vpd_info_field_size(&vpd_data[j]);
13652 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13653 if (j + len > block_end)
13654 goto partno;
13656 if (len >= sizeof(tp->fw_ver))
13657 len = sizeof(tp->fw_ver) - 1;
13658 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
13659 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
13660 &vpd_data[j]);
13663 partno:
13664 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13665 PCI_VPD_RO_KEYWORD_PARTNO);
13666 if (i < 0)
13667 goto out_not_found;
13669 len = pci_vpd_info_field_size(&vpd_data[i]);
13671 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13672 if (len > TG3_BPN_SIZE ||
13673 (len + i) > vpdlen)
13674 goto out_not_found;
13676 memcpy(tp->board_part_number, &vpd_data[i], len);
13678 out_not_found:
13679 kfree(vpd_data);
13680 if (tp->board_part_number[0])
13681 return;
13683 out_no_vpd:
13684 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13685 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13686 strcpy(tp->board_part_number, "BCM5717");
13687 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13688 strcpy(tp->board_part_number, "BCM5718");
13689 else
13690 goto nomatch;
13691 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13692 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13693 strcpy(tp->board_part_number, "BCM57780");
13694 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13695 strcpy(tp->board_part_number, "BCM57760");
13696 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13697 strcpy(tp->board_part_number, "BCM57790");
13698 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13699 strcpy(tp->board_part_number, "BCM57788");
13700 else
13701 goto nomatch;
13702 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13703 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13704 strcpy(tp->board_part_number, "BCM57761");
13705 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13706 strcpy(tp->board_part_number, "BCM57765");
13707 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13708 strcpy(tp->board_part_number, "BCM57781");
13709 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13710 strcpy(tp->board_part_number, "BCM57785");
13711 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13712 strcpy(tp->board_part_number, "BCM57791");
13713 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13714 strcpy(tp->board_part_number, "BCM57795");
13715 else
13716 goto nomatch;
13717 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13718 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13719 strcpy(tp->board_part_number, "BCM57762");
13720 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13721 strcpy(tp->board_part_number, "BCM57766");
13722 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13723 strcpy(tp->board_part_number, "BCM57782");
13724 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13725 strcpy(tp->board_part_number, "BCM57786");
13726 else
13727 goto nomatch;
13728 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13729 strcpy(tp->board_part_number, "BCM95906");
13730 } else {
13731 nomatch:
13732 strcpy(tp->board_part_number, "none");
13736 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13738 u32 val;
13740 if (tg3_nvram_read(tp, offset, &val) ||
13741 (val & 0xfc000000) != 0x0c000000 ||
13742 tg3_nvram_read(tp, offset + 4, &val) ||
13743 val != 0)
13744 return 0;
13746 return 1;
13749 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13751 u32 val, offset, start, ver_offset;
13752 int i, dst_off;
13753 bool newver = false;
13755 if (tg3_nvram_read(tp, 0xc, &offset) ||
13756 tg3_nvram_read(tp, 0x4, &start))
13757 return;
13759 offset = tg3_nvram_logical_addr(tp, offset);
13761 if (tg3_nvram_read(tp, offset, &val))
13762 return;
13764 if ((val & 0xfc000000) == 0x0c000000) {
13765 if (tg3_nvram_read(tp, offset + 4, &val))
13766 return;
13768 if (val == 0)
13769 newver = true;
13772 dst_off = strlen(tp->fw_ver);
13774 if (newver) {
13775 if (TG3_VER_SIZE - dst_off < 16 ||
13776 tg3_nvram_read(tp, offset + 8, &ver_offset))
13777 return;
13779 offset = offset + ver_offset - start;
13780 for (i = 0; i < 16; i += 4) {
13781 __be32 v;
13782 if (tg3_nvram_read_be32(tp, offset + i, &v))
13783 return;
13785 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13787 } else {
13788 u32 major, minor;
13790 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13791 return;
13793 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13794 TG3_NVM_BCVER_MAJSFT;
13795 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13796 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13797 "v%d.%02d", major, minor);
13801 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13803 u32 val, major, minor;
13805 /* Use native endian representation */
13806 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13807 return;
13809 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13810 TG3_NVM_HWSB_CFG1_MAJSFT;
13811 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13812 TG3_NVM_HWSB_CFG1_MINSFT;
13814 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13817 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13819 u32 offset, major, minor, build;
13821 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13823 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13824 return;
13826 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13827 case TG3_EEPROM_SB_REVISION_0:
13828 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13829 break;
13830 case TG3_EEPROM_SB_REVISION_2:
13831 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13832 break;
13833 case TG3_EEPROM_SB_REVISION_3:
13834 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13835 break;
13836 case TG3_EEPROM_SB_REVISION_4:
13837 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13838 break;
13839 case TG3_EEPROM_SB_REVISION_5:
13840 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13841 break;
13842 case TG3_EEPROM_SB_REVISION_6:
13843 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13844 break;
13845 default:
13846 return;
13849 if (tg3_nvram_read(tp, offset, &val))
13850 return;
13852 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13853 TG3_EEPROM_SB_EDH_BLD_SHFT;
13854 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13855 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13856 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13858 if (minor > 99 || build > 26)
13859 return;
13861 offset = strlen(tp->fw_ver);
13862 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13863 " v%d.%02d", major, minor);
13865 if (build > 0) {
13866 offset = strlen(tp->fw_ver);
13867 if (offset < TG3_VER_SIZE - 1)
13868 tp->fw_ver[offset] = 'a' + build - 1;
13872 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13874 u32 val, offset, start;
13875 int i, vlen;
13877 for (offset = TG3_NVM_DIR_START;
13878 offset < TG3_NVM_DIR_END;
13879 offset += TG3_NVM_DIRENT_SIZE) {
13880 if (tg3_nvram_read(tp, offset, &val))
13881 return;
13883 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13884 break;
13887 if (offset == TG3_NVM_DIR_END)
13888 return;
13890 if (!tg3_flag(tp, 5705_PLUS))
13891 start = 0x08000000;
13892 else if (tg3_nvram_read(tp, offset - 4, &start))
13893 return;
13895 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13896 !tg3_fw_img_is_valid(tp, offset) ||
13897 tg3_nvram_read(tp, offset + 8, &val))
13898 return;
13900 offset += val - start;
13902 vlen = strlen(tp->fw_ver);
13904 tp->fw_ver[vlen++] = ',';
13905 tp->fw_ver[vlen++] = ' ';
13907 for (i = 0; i < 4; i++) {
13908 __be32 v;
13909 if (tg3_nvram_read_be32(tp, offset, &v))
13910 return;
13912 offset += sizeof(v);
13914 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13915 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13916 break;
13919 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13920 vlen += sizeof(v);
13924 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13926 int vlen;
13927 u32 apedata;
13928 char *fwtype;
13930 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13931 return;
13933 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13934 if (apedata != APE_SEG_SIG_MAGIC)
13935 return;
13937 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13938 if (!(apedata & APE_FW_STATUS_READY))
13939 return;
13941 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13943 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13944 tg3_flag_set(tp, APE_HAS_NCSI);
13945 fwtype = "NCSI";
13946 } else {
13947 fwtype = "DASH";
13950 vlen = strlen(tp->fw_ver);
13952 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13953 fwtype,
13954 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13955 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13956 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13957 (apedata & APE_FW_VERSION_BLDMSK));
13960 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13962 u32 val;
13963 bool vpd_vers = false;
13965 if (tp->fw_ver[0] != 0)
13966 vpd_vers = true;
13968 if (tg3_flag(tp, NO_NVRAM)) {
13969 strcat(tp->fw_ver, "sb");
13970 return;
13973 if (tg3_nvram_read(tp, 0, &val))
13974 return;
13976 if (val == TG3_EEPROM_MAGIC)
13977 tg3_read_bc_ver(tp);
13978 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13979 tg3_read_sb_ver(tp, val);
13980 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13981 tg3_read_hwsb_ver(tp);
13982 else
13983 return;
13985 if (vpd_vers)
13986 goto done;
13988 if (tg3_flag(tp, ENABLE_APE)) {
13989 if (tg3_flag(tp, ENABLE_ASF))
13990 tg3_read_dash_ver(tp);
13991 } else if (tg3_flag(tp, ENABLE_ASF)) {
13992 tg3_read_mgmtfw_ver(tp);
13995 done:
13996 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13999 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14001 if (tg3_flag(tp, LRG_PROD_RING_CAP))
14002 return TG3_RX_RET_MAX_SIZE_5717;
14003 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
14004 return TG3_RX_RET_MAX_SIZE_5700;
14005 else
14006 return TG3_RX_RET_MAX_SIZE_5705;
14009 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
14010 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14011 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14012 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14013 { },
14016 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14018 struct pci_dev *peer;
14019 unsigned int func, devnr = tp->pdev->devfn & ~7;
14021 for (func = 0; func < 8; func++) {
14022 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14023 if (peer && peer != tp->pdev)
14024 break;
14025 pci_dev_put(peer);
14027 /* 5704 can be configured in single-port mode, set peer to
14028 * tp->pdev in that case.
14030 if (!peer) {
14031 peer = tp->pdev;
14032 return peer;
14036 * We don't need to keep the refcount elevated; there's no way
14037 * to remove one half of this device without removing the other
14039 pci_dev_put(peer);
14041 return peer;
14044 static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14046 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
14047 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
14048 u32 reg;
14050 /* All devices that use the alternate
14051 * ASIC REV location have a CPMU.
14053 tg3_flag_set(tp, CPMU_PRESENT);
14055 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14056 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14057 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
14058 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
14059 reg = TG3PCI_GEN2_PRODID_ASICREV;
14060 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
14061 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
14062 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14063 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14064 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14065 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14066 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14067 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14068 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14069 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14070 reg = TG3PCI_GEN15_PRODID_ASICREV;
14071 else
14072 reg = TG3PCI_PRODID_ASICREV;
14074 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14077 /* Wrong chip ID in 5752 A0. This code can be removed later
14078 * as A0 is not in production.
14080 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14081 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14083 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14084 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14085 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14086 tg3_flag_set(tp, 5717_PLUS);
14088 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14089 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14090 tg3_flag_set(tp, 57765_CLASS);
14092 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14093 tg3_flag_set(tp, 57765_PLUS);
14095 /* Intentionally exclude ASIC_REV_5906 */
14096 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14097 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14098 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14099 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14100 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14101 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14102 tg3_flag(tp, 57765_PLUS))
14103 tg3_flag_set(tp, 5755_PLUS);
14105 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14106 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14107 tg3_flag_set(tp, 5780_CLASS);
14109 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14110 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14111 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14112 tg3_flag(tp, 5755_PLUS) ||
14113 tg3_flag(tp, 5780_CLASS))
14114 tg3_flag_set(tp, 5750_PLUS);
14116 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14117 tg3_flag(tp, 5750_PLUS))
14118 tg3_flag_set(tp, 5705_PLUS);
14121 static int __devinit tg3_get_invariants(struct tg3 *tp)
14123 u32 misc_ctrl_reg;
14124 u32 pci_state_reg, grc_misc_cfg;
14125 u32 val;
14126 u16 pci_cmd;
14127 int err;
14129 /* Force memory write invalidate off. If we leave it on,
14130 * then on 5700_BX chips we have to enable a workaround.
14131 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14132 * to match the cacheline size. The Broadcom driver have this
14133 * workaround but turns MWI off all the times so never uses
14134 * it. This seems to suggest that the workaround is insufficient.
14136 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14137 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14138 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14140 /* Important! -- Make sure register accesses are byteswapped
14141 * correctly. Also, for those chips that require it, make
14142 * sure that indirect register accesses are enabled before
14143 * the first operation.
14145 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14146 &misc_ctrl_reg);
14147 tp->misc_host_ctrl |= (misc_ctrl_reg &
14148 MISC_HOST_CTRL_CHIPREV);
14149 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14150 tp->misc_host_ctrl);
14152 tg3_detect_asic_rev(tp, misc_ctrl_reg);
14154 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14155 * we need to disable memory and use config. cycles
14156 * only to access all registers. The 5702/03 chips
14157 * can mistakenly decode the special cycles from the
14158 * ICH chipsets as memory write cycles, causing corruption
14159 * of register and memory space. Only certain ICH bridges
14160 * will drive special cycles with non-zero data during the
14161 * address phase which can fall within the 5703's address
14162 * range. This is not an ICH bug as the PCI spec allows
14163 * non-zero address during special cycles. However, only
14164 * these ICH bridges are known to drive non-zero addresses
14165 * during special cycles.
14167 * Since special cycles do not cross PCI bridges, we only
14168 * enable this workaround if the 5703 is on the secondary
14169 * bus of these ICH bridges.
14171 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14172 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14173 static struct tg3_dev_id {
14174 u32 vendor;
14175 u32 device;
14176 u32 rev;
14177 } ich_chipsets[] = {
14178 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14179 PCI_ANY_ID },
14180 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14181 PCI_ANY_ID },
14182 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14183 0xa },
14184 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14185 PCI_ANY_ID },
14186 { },
14188 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14189 struct pci_dev *bridge = NULL;
14191 while (pci_id->vendor != 0) {
14192 bridge = pci_get_device(pci_id->vendor, pci_id->device,
14193 bridge);
14194 if (!bridge) {
14195 pci_id++;
14196 continue;
14198 if (pci_id->rev != PCI_ANY_ID) {
14199 if (bridge->revision > pci_id->rev)
14200 continue;
14202 if (bridge->subordinate &&
14203 (bridge->subordinate->number ==
14204 tp->pdev->bus->number)) {
14205 tg3_flag_set(tp, ICH_WORKAROUND);
14206 pci_dev_put(bridge);
14207 break;
14212 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14213 static struct tg3_dev_id {
14214 u32 vendor;
14215 u32 device;
14216 } bridge_chipsets[] = {
14217 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14218 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14219 { },
14221 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14222 struct pci_dev *bridge = NULL;
14224 while (pci_id->vendor != 0) {
14225 bridge = pci_get_device(pci_id->vendor,
14226 pci_id->device,
14227 bridge);
14228 if (!bridge) {
14229 pci_id++;
14230 continue;
14232 if (bridge->subordinate &&
14233 (bridge->subordinate->number <=
14234 tp->pdev->bus->number) &&
14235 (bridge->subordinate->subordinate >=
14236 tp->pdev->bus->number)) {
14237 tg3_flag_set(tp, 5701_DMA_BUG);
14238 pci_dev_put(bridge);
14239 break;
14244 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14245 * DMA addresses > 40-bit. This bridge may have other additional
14246 * 57xx devices behind it in some 4-port NIC designs for example.
14247 * Any tg3 device found behind the bridge will also need the 40-bit
14248 * DMA workaround.
14250 if (tg3_flag(tp, 5780_CLASS)) {
14251 tg3_flag_set(tp, 40BIT_DMA_BUG);
14252 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14253 } else {
14254 struct pci_dev *bridge = NULL;
14256 do {
14257 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14258 PCI_DEVICE_ID_SERVERWORKS_EPB,
14259 bridge);
14260 if (bridge && bridge->subordinate &&
14261 (bridge->subordinate->number <=
14262 tp->pdev->bus->number) &&
14263 (bridge->subordinate->subordinate >=
14264 tp->pdev->bus->number)) {
14265 tg3_flag_set(tp, 40BIT_DMA_BUG);
14266 pci_dev_put(bridge);
14267 break;
14269 } while (bridge);
14272 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14273 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14274 tp->pdev_peer = tg3_find_peer(tp);
14276 /* Determine TSO capabilities */
14277 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14278 ; /* Do nothing. HW bug. */
14279 else if (tg3_flag(tp, 57765_PLUS))
14280 tg3_flag_set(tp, HW_TSO_3);
14281 else if (tg3_flag(tp, 5755_PLUS) ||
14282 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14283 tg3_flag_set(tp, HW_TSO_2);
14284 else if (tg3_flag(tp, 5750_PLUS)) {
14285 tg3_flag_set(tp, HW_TSO_1);
14286 tg3_flag_set(tp, TSO_BUG);
14287 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14288 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14289 tg3_flag_clear(tp, TSO_BUG);
14290 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14291 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14292 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14293 tg3_flag_set(tp, TSO_BUG);
14294 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14295 tp->fw_needed = FIRMWARE_TG3TSO5;
14296 else
14297 tp->fw_needed = FIRMWARE_TG3TSO;
14300 /* Selectively allow TSO based on operating conditions */
14301 if (tg3_flag(tp, HW_TSO_1) ||
14302 tg3_flag(tp, HW_TSO_2) ||
14303 tg3_flag(tp, HW_TSO_3) ||
14304 tp->fw_needed) {
14305 /* For firmware TSO, assume ASF is disabled.
14306 * We'll disable TSO later if we discover ASF
14307 * is enabled in tg3_get_eeprom_hw_cfg().
14309 tg3_flag_set(tp, TSO_CAPABLE);
14310 } else {
14311 tg3_flag_clear(tp, TSO_CAPABLE);
14312 tg3_flag_clear(tp, TSO_BUG);
14313 tp->fw_needed = NULL;
14316 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14317 tp->fw_needed = FIRMWARE_TG3;
14319 tp->irq_max = 1;
14321 if (tg3_flag(tp, 5750_PLUS)) {
14322 tg3_flag_set(tp, SUPPORT_MSI);
14323 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14324 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14325 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14326 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14327 tp->pdev_peer == tp->pdev))
14328 tg3_flag_clear(tp, SUPPORT_MSI);
14330 if (tg3_flag(tp, 5755_PLUS) ||
14331 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14332 tg3_flag_set(tp, 1SHOT_MSI);
14335 if (tg3_flag(tp, 57765_PLUS)) {
14336 tg3_flag_set(tp, SUPPORT_MSIX);
14337 tp->irq_max = TG3_IRQ_MAX_VECS;
14338 tg3_rss_init_dflt_indir_tbl(tp);
14342 if (tg3_flag(tp, 5755_PLUS) ||
14343 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14344 tg3_flag_set(tp, SHORT_DMA_BUG);
14346 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14347 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14349 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14350 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14351 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14352 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14354 if (tg3_flag(tp, 57765_PLUS) &&
14355 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14356 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14358 if (!tg3_flag(tp, 5705_PLUS) ||
14359 tg3_flag(tp, 5780_CLASS) ||
14360 tg3_flag(tp, USE_JUMBO_BDFLAG))
14361 tg3_flag_set(tp, JUMBO_CAPABLE);
14363 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14364 &pci_state_reg);
14366 if (pci_is_pcie(tp->pdev)) {
14367 u16 lnkctl;
14369 tg3_flag_set(tp, PCI_EXPRESS);
14371 pci_read_config_word(tp->pdev,
14372 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14373 &lnkctl);
14374 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14375 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14376 ASIC_REV_5906) {
14377 tg3_flag_clear(tp, HW_TSO_2);
14378 tg3_flag_clear(tp, TSO_CAPABLE);
14380 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14381 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14382 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14383 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14384 tg3_flag_set(tp, CLKREQ_BUG);
14385 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14386 tg3_flag_set(tp, L1PLLPD_EN);
14388 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14389 /* BCM5785 devices are effectively PCIe devices, and should
14390 * follow PCIe codepaths, but do not have a PCIe capabilities
14391 * section.
14393 tg3_flag_set(tp, PCI_EXPRESS);
14394 } else if (!tg3_flag(tp, 5705_PLUS) ||
14395 tg3_flag(tp, 5780_CLASS)) {
14396 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14397 if (!tp->pcix_cap) {
14398 dev_err(&tp->pdev->dev,
14399 "Cannot find PCI-X capability, aborting\n");
14400 return -EIO;
14403 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14404 tg3_flag_set(tp, PCIX_MODE);
14407 /* If we have an AMD 762 or VIA K8T800 chipset, write
14408 * reordering to the mailbox registers done by the host
14409 * controller can cause major troubles. We read back from
14410 * every mailbox register write to force the writes to be
14411 * posted to the chip in order.
14413 if (pci_dev_present(tg3_write_reorder_chipsets) &&
14414 !tg3_flag(tp, PCI_EXPRESS))
14415 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14417 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14418 &tp->pci_cacheline_sz);
14419 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14420 &tp->pci_lat_timer);
14421 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14422 tp->pci_lat_timer < 64) {
14423 tp->pci_lat_timer = 64;
14424 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14425 tp->pci_lat_timer);
14428 /* Important! -- It is critical that the PCI-X hw workaround
14429 * situation is decided before the first MMIO register access.
14431 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14432 /* 5700 BX chips need to have their TX producer index
14433 * mailboxes written twice to workaround a bug.
14435 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14437 /* If we are in PCI-X mode, enable register write workaround.
14439 * The workaround is to use indirect register accesses
14440 * for all chip writes not to mailbox registers.
14442 if (tg3_flag(tp, PCIX_MODE)) {
14443 u32 pm_reg;
14445 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14447 /* The chip can have it's power management PCI config
14448 * space registers clobbered due to this bug.
14449 * So explicitly force the chip into D0 here.
14451 pci_read_config_dword(tp->pdev,
14452 tp->pm_cap + PCI_PM_CTRL,
14453 &pm_reg);
14454 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14455 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14456 pci_write_config_dword(tp->pdev,
14457 tp->pm_cap + PCI_PM_CTRL,
14458 pm_reg);
14460 /* Also, force SERR#/PERR# in PCI command. */
14461 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14462 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14463 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14467 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14468 tg3_flag_set(tp, PCI_HIGH_SPEED);
14469 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14470 tg3_flag_set(tp, PCI_32BIT);
14472 /* Chip-specific fixup from Broadcom driver */
14473 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14474 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14475 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14476 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14479 /* Default fast path register access methods */
14480 tp->read32 = tg3_read32;
14481 tp->write32 = tg3_write32;
14482 tp->read32_mbox = tg3_read32;
14483 tp->write32_mbox = tg3_write32;
14484 tp->write32_tx_mbox = tg3_write32;
14485 tp->write32_rx_mbox = tg3_write32;
14487 /* Various workaround register access methods */
14488 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14489 tp->write32 = tg3_write_indirect_reg32;
14490 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14491 (tg3_flag(tp, PCI_EXPRESS) &&
14492 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14494 * Back to back register writes can cause problems on these
14495 * chips, the workaround is to read back all reg writes
14496 * except those to mailbox regs.
14498 * See tg3_write_indirect_reg32().
14500 tp->write32 = tg3_write_flush_reg32;
14503 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14504 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14505 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14506 tp->write32_rx_mbox = tg3_write_flush_reg32;
14509 if (tg3_flag(tp, ICH_WORKAROUND)) {
14510 tp->read32 = tg3_read_indirect_reg32;
14511 tp->write32 = tg3_write_indirect_reg32;
14512 tp->read32_mbox = tg3_read_indirect_mbox;
14513 tp->write32_mbox = tg3_write_indirect_mbox;
14514 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14515 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14517 iounmap(tp->regs);
14518 tp->regs = NULL;
14520 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14521 pci_cmd &= ~PCI_COMMAND_MEMORY;
14522 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14524 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14525 tp->read32_mbox = tg3_read32_mbox_5906;
14526 tp->write32_mbox = tg3_write32_mbox_5906;
14527 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14528 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14531 if (tp->write32 == tg3_write_indirect_reg32 ||
14532 (tg3_flag(tp, PCIX_MODE) &&
14533 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14534 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14535 tg3_flag_set(tp, SRAM_USE_CONFIG);
14537 /* The memory arbiter has to be enabled in order for SRAM accesses
14538 * to succeed. Normally on powerup the tg3 chip firmware will make
14539 * sure it is enabled, but other entities such as system netboot
14540 * code might disable it.
14542 val = tr32(MEMARB_MODE);
14543 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14545 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14546 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14547 tg3_flag(tp, 5780_CLASS)) {
14548 if (tg3_flag(tp, PCIX_MODE)) {
14549 pci_read_config_dword(tp->pdev,
14550 tp->pcix_cap + PCI_X_STATUS,
14551 &val);
14552 tp->pci_fn = val & 0x7;
14554 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14555 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14556 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14557 NIC_SRAM_CPMUSTAT_SIG) {
14558 tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14559 tp->pci_fn = tp->pci_fn ? 1 : 0;
14561 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14562 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14563 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14564 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14565 NIC_SRAM_CPMUSTAT_SIG) {
14566 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14567 TG3_CPMU_STATUS_FSHFT_5719;
14571 /* Get eeprom hw config before calling tg3_set_power_state().
14572 * In particular, the TG3_FLAG_IS_NIC flag must be
14573 * determined before calling tg3_set_power_state() so that
14574 * we know whether or not to switch out of Vaux power.
14575 * When the flag is set, it means that GPIO1 is used for eeprom
14576 * write protect and also implies that it is a LOM where GPIOs
14577 * are not used to switch power.
14579 tg3_get_eeprom_hw_cfg(tp);
14581 if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14582 tg3_flag_clear(tp, TSO_CAPABLE);
14583 tg3_flag_clear(tp, TSO_BUG);
14584 tp->fw_needed = NULL;
14587 if (tg3_flag(tp, ENABLE_APE)) {
14588 /* Allow reads and writes to the
14589 * APE register and memory space.
14591 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14592 PCISTATE_ALLOW_APE_SHMEM_WR |
14593 PCISTATE_ALLOW_APE_PSPACE_WR;
14594 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14595 pci_state_reg);
14597 tg3_ape_lock_init(tp);
14600 /* Set up tp->grc_local_ctrl before calling
14601 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14602 * will bring 5700's external PHY out of reset.
14603 * It is also used as eeprom write protect on LOMs.
14605 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14606 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14607 tg3_flag(tp, EEPROM_WRITE_PROT))
14608 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14609 GRC_LCLCTRL_GPIO_OUTPUT1);
14610 /* Unused GPIO3 must be driven as output on 5752 because there
14611 * are no pull-up resistors on unused GPIO pins.
14613 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14614 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14616 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14617 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14618 tg3_flag(tp, 57765_CLASS))
14619 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14621 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14622 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14623 /* Turn off the debug UART. */
14624 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14625 if (tg3_flag(tp, IS_NIC))
14626 /* Keep VMain power. */
14627 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14628 GRC_LCLCTRL_GPIO_OUTPUT0;
14631 /* Switch out of Vaux if it is a NIC */
14632 tg3_pwrsrc_switch_to_vmain(tp);
14634 /* Derive initial jumbo mode from MTU assigned in
14635 * ether_setup() via the alloc_etherdev() call
14637 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14638 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14640 /* Determine WakeOnLan speed to use. */
14641 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14642 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14643 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14644 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14645 tg3_flag_clear(tp, WOL_SPEED_100MB);
14646 } else {
14647 tg3_flag_set(tp, WOL_SPEED_100MB);
14650 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14651 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14653 /* A few boards don't want Ethernet@WireSpeed phy feature */
14654 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14655 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14656 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14657 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14658 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14659 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14660 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14662 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14663 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14664 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14665 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14666 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14668 if (tg3_flag(tp, 5705_PLUS) &&
14669 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14670 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14671 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14672 !tg3_flag(tp, 57765_PLUS)) {
14673 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14674 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14675 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14676 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14677 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14678 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14679 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14680 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14681 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14682 } else
14683 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14686 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14687 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14688 tp->phy_otp = tg3_read_otp_phycfg(tp);
14689 if (tp->phy_otp == 0)
14690 tp->phy_otp = TG3_OTP_DEFAULT;
14693 if (tg3_flag(tp, CPMU_PRESENT))
14694 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14695 else
14696 tp->mi_mode = MAC_MI_MODE_BASE;
14698 tp->coalesce_mode = 0;
14699 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14700 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14701 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14703 /* Set these bits to enable statistics workaround. */
14704 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14705 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14706 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14707 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14708 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14711 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14712 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14713 tg3_flag_set(tp, USE_PHYLIB);
14715 err = tg3_mdio_init(tp);
14716 if (err)
14717 return err;
14719 /* Initialize data/descriptor byte/word swapping. */
14720 val = tr32(GRC_MODE);
14721 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14722 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14723 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14724 GRC_MODE_B2HRX_ENABLE |
14725 GRC_MODE_HTX2B_ENABLE |
14726 GRC_MODE_HOST_STACKUP);
14727 else
14728 val &= GRC_MODE_HOST_STACKUP;
14730 tw32(GRC_MODE, val | tp->grc_mode);
14732 tg3_switch_clocks(tp);
14734 /* Clear this out for sanity. */
14735 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14737 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
14738 tw32(TG3PCI_REG_BASE_ADDR, 0);
14740 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14741 &pci_state_reg);
14742 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14743 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14744 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14746 if (chiprevid == CHIPREV_ID_5701_A0 ||
14747 chiprevid == CHIPREV_ID_5701_B0 ||
14748 chiprevid == CHIPREV_ID_5701_B2 ||
14749 chiprevid == CHIPREV_ID_5701_B5) {
14750 void __iomem *sram_base;
14752 /* Write some dummy words into the SRAM status block
14753 * area, see if it reads back correctly. If the return
14754 * value is bad, force enable the PCIX workaround.
14756 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14758 writel(0x00000000, sram_base);
14759 writel(0x00000000, sram_base + 4);
14760 writel(0xffffffff, sram_base + 4);
14761 if (readl(sram_base) != 0x00000000)
14762 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14766 udelay(50);
14767 tg3_nvram_init(tp);
14769 grc_misc_cfg = tr32(GRC_MISC_CFG);
14770 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14772 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14773 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14774 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14775 tg3_flag_set(tp, IS_5788);
14777 if (!tg3_flag(tp, IS_5788) &&
14778 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14779 tg3_flag_set(tp, TAGGED_STATUS);
14780 if (tg3_flag(tp, TAGGED_STATUS)) {
14781 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14782 HOSTCC_MODE_CLRTICK_TXBD);
14784 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14785 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14786 tp->misc_host_ctrl);
14789 /* Preserve the APE MAC_MODE bits */
14790 if (tg3_flag(tp, ENABLE_APE))
14791 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14792 else
14793 tp->mac_mode = 0;
14795 /* these are limited to 10/100 only */
14796 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14797 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14798 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14799 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14800 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14801 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14802 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14803 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14804 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14805 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14806 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14807 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14808 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14809 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14810 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14811 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14813 err = tg3_phy_probe(tp);
14814 if (err) {
14815 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14816 /* ... but do not return immediately ... */
14817 tg3_mdio_fini(tp);
14820 tg3_read_vpd(tp);
14821 tg3_read_fw_ver(tp);
14823 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14824 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14825 } else {
14826 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14827 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14828 else
14829 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14832 /* 5700 {AX,BX} chips have a broken status block link
14833 * change bit implementation, so we must use the
14834 * status register in those cases.
14836 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14837 tg3_flag_set(tp, USE_LINKCHG_REG);
14838 else
14839 tg3_flag_clear(tp, USE_LINKCHG_REG);
14841 /* The led_ctrl is set during tg3_phy_probe, here we might
14842 * have to force the link status polling mechanism based
14843 * upon subsystem IDs.
14845 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14846 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14847 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14848 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14849 tg3_flag_set(tp, USE_LINKCHG_REG);
14852 /* For all SERDES we poll the MAC status register. */
14853 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14854 tg3_flag_set(tp, POLL_SERDES);
14855 else
14856 tg3_flag_clear(tp, POLL_SERDES);
14858 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14859 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14860 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14861 tg3_flag(tp, PCIX_MODE)) {
14862 tp->rx_offset = NET_SKB_PAD;
14863 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14864 tp->rx_copy_thresh = ~(u16)0;
14865 #endif
14868 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14869 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14870 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14872 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14874 /* Increment the rx prod index on the rx std ring by at most
14875 * 8 for these chips to workaround hw errata.
14877 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14878 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14879 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14880 tp->rx_std_max_post = 8;
14882 if (tg3_flag(tp, ASPM_WORKAROUND))
14883 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14884 PCIE_PWR_MGMT_L1_THRESH_MSK;
14886 return err;
14889 #ifdef CONFIG_SPARC
14890 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14892 struct net_device *dev = tp->dev;
14893 struct pci_dev *pdev = tp->pdev;
14894 struct device_node *dp = pci_device_to_OF_node(pdev);
14895 const unsigned char *addr;
14896 int len;
14898 addr = of_get_property(dp, "local-mac-address", &len);
14899 if (addr && len == 6) {
14900 memcpy(dev->dev_addr, addr, 6);
14901 memcpy(dev->perm_addr, dev->dev_addr, 6);
14902 return 0;
14904 return -ENODEV;
14907 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14909 struct net_device *dev = tp->dev;
14911 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14912 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14913 return 0;
14915 #endif
14917 static int __devinit tg3_get_device_address(struct tg3 *tp)
14919 struct net_device *dev = tp->dev;
14920 u32 hi, lo, mac_offset;
14921 int addr_ok = 0;
14923 #ifdef CONFIG_SPARC
14924 if (!tg3_get_macaddr_sparc(tp))
14925 return 0;
14926 #endif
14928 mac_offset = 0x7c;
14929 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14930 tg3_flag(tp, 5780_CLASS)) {
14931 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14932 mac_offset = 0xcc;
14933 if (tg3_nvram_lock(tp))
14934 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14935 else
14936 tg3_nvram_unlock(tp);
14937 } else if (tg3_flag(tp, 5717_PLUS)) {
14938 if (tp->pci_fn & 1)
14939 mac_offset = 0xcc;
14940 if (tp->pci_fn > 1)
14941 mac_offset += 0x18c;
14942 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14943 mac_offset = 0x10;
14945 /* First try to get it from MAC address mailbox. */
14946 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14947 if ((hi >> 16) == 0x484b) {
14948 dev->dev_addr[0] = (hi >> 8) & 0xff;
14949 dev->dev_addr[1] = (hi >> 0) & 0xff;
14951 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14952 dev->dev_addr[2] = (lo >> 24) & 0xff;
14953 dev->dev_addr[3] = (lo >> 16) & 0xff;
14954 dev->dev_addr[4] = (lo >> 8) & 0xff;
14955 dev->dev_addr[5] = (lo >> 0) & 0xff;
14957 /* Some old bootcode may report a 0 MAC address in SRAM */
14958 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14960 if (!addr_ok) {
14961 /* Next, try NVRAM. */
14962 if (!tg3_flag(tp, NO_NVRAM) &&
14963 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14964 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14965 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14966 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14968 /* Finally just fetch it out of the MAC control regs. */
14969 else {
14970 hi = tr32(MAC_ADDR_0_HIGH);
14971 lo = tr32(MAC_ADDR_0_LOW);
14973 dev->dev_addr[5] = lo & 0xff;
14974 dev->dev_addr[4] = (lo >> 8) & 0xff;
14975 dev->dev_addr[3] = (lo >> 16) & 0xff;
14976 dev->dev_addr[2] = (lo >> 24) & 0xff;
14977 dev->dev_addr[1] = hi & 0xff;
14978 dev->dev_addr[0] = (hi >> 8) & 0xff;
14982 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14983 #ifdef CONFIG_SPARC
14984 if (!tg3_get_default_macaddr_sparc(tp))
14985 return 0;
14986 #endif
14987 return -EINVAL;
14989 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14990 return 0;
14993 #define BOUNDARY_SINGLE_CACHELINE 1
14994 #define BOUNDARY_MULTI_CACHELINE 2
14996 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14998 int cacheline_size;
14999 u8 byte;
15000 int goal;
15002 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
15003 if (byte == 0)
15004 cacheline_size = 1024;
15005 else
15006 cacheline_size = (int) byte * 4;
15008 /* On 5703 and later chips, the boundary bits have no
15009 * effect.
15011 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15012 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15013 !tg3_flag(tp, PCI_EXPRESS))
15014 goto out;
15016 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15017 goal = BOUNDARY_MULTI_CACHELINE;
15018 #else
15019 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15020 goal = BOUNDARY_SINGLE_CACHELINE;
15021 #else
15022 goal = 0;
15023 #endif
15024 #endif
15026 if (tg3_flag(tp, 57765_PLUS)) {
15027 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
15028 goto out;
15031 if (!goal)
15032 goto out;
15034 /* PCI controllers on most RISC systems tend to disconnect
15035 * when a device tries to burst across a cache-line boundary.
15036 * Therefore, letting tg3 do so just wastes PCI bandwidth.
15038 * Unfortunately, for PCI-E there are only limited
15039 * write-side controls for this, and thus for reads
15040 * we will still get the disconnects. We'll also waste
15041 * these PCI cycles for both read and write for chips
15042 * other than 5700 and 5701 which do not implement the
15043 * boundary bits.
15045 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
15046 switch (cacheline_size) {
15047 case 16:
15048 case 32:
15049 case 64:
15050 case 128:
15051 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15052 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
15053 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
15054 } else {
15055 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15056 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15058 break;
15060 case 256:
15061 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
15062 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
15063 break;
15065 default:
15066 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15067 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15068 break;
15070 } else if (tg3_flag(tp, PCI_EXPRESS)) {
15071 switch (cacheline_size) {
15072 case 16:
15073 case 32:
15074 case 64:
15075 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15076 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15077 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15078 break;
15080 /* fallthrough */
15081 case 128:
15082 default:
15083 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15084 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15085 break;
15087 } else {
15088 switch (cacheline_size) {
15089 case 16:
15090 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15091 val |= (DMA_RWCTRL_READ_BNDRY_16 |
15092 DMA_RWCTRL_WRITE_BNDRY_16);
15093 break;
15095 /* fallthrough */
15096 case 32:
15097 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15098 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15099 DMA_RWCTRL_WRITE_BNDRY_32);
15100 break;
15102 /* fallthrough */
15103 case 64:
15104 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15105 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15106 DMA_RWCTRL_WRITE_BNDRY_64);
15107 break;
15109 /* fallthrough */
15110 case 128:
15111 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15112 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15113 DMA_RWCTRL_WRITE_BNDRY_128);
15114 break;
15116 /* fallthrough */
15117 case 256:
15118 val |= (DMA_RWCTRL_READ_BNDRY_256 |
15119 DMA_RWCTRL_WRITE_BNDRY_256);
15120 break;
15121 case 512:
15122 val |= (DMA_RWCTRL_READ_BNDRY_512 |
15123 DMA_RWCTRL_WRITE_BNDRY_512);
15124 break;
15125 case 1024:
15126 default:
15127 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15128 DMA_RWCTRL_WRITE_BNDRY_1024);
15129 break;
15133 out:
15134 return val;
15137 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15139 struct tg3_internal_buffer_desc test_desc;
15140 u32 sram_dma_descs;
15141 int i, ret;
15143 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15145 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15146 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15147 tw32(RDMAC_STATUS, 0);
15148 tw32(WDMAC_STATUS, 0);
15150 tw32(BUFMGR_MODE, 0);
15151 tw32(FTQ_RESET, 0);
15153 test_desc.addr_hi = ((u64) buf_dma) >> 32;
15154 test_desc.addr_lo = buf_dma & 0xffffffff;
15155 test_desc.nic_mbuf = 0x00002100;
15156 test_desc.len = size;
15159 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15160 * the *second* time the tg3 driver was getting loaded after an
15161 * initial scan.
15163 * Broadcom tells me:
15164 * ...the DMA engine is connected to the GRC block and a DMA
15165 * reset may affect the GRC block in some unpredictable way...
15166 * The behavior of resets to individual blocks has not been tested.
15168 * Broadcom noted the GRC reset will also reset all sub-components.
15170 if (to_device) {
15171 test_desc.cqid_sqid = (13 << 8) | 2;
15173 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15174 udelay(40);
15175 } else {
15176 test_desc.cqid_sqid = (16 << 8) | 7;
15178 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15179 udelay(40);
15181 test_desc.flags = 0x00000005;
15183 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15184 u32 val;
15186 val = *(((u32 *)&test_desc) + i);
15187 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15188 sram_dma_descs + (i * sizeof(u32)));
15189 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15191 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15193 if (to_device)
15194 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15195 else
15196 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15198 ret = -ENODEV;
15199 for (i = 0; i < 40; i++) {
15200 u32 val;
15202 if (to_device)
15203 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15204 else
15205 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15206 if ((val & 0xffff) == sram_dma_descs) {
15207 ret = 0;
15208 break;
15211 udelay(100);
15214 return ret;
15217 #define TEST_BUFFER_SIZE 0x2000
15219 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15220 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15221 { },
15224 static int __devinit tg3_test_dma(struct tg3 *tp)
15226 dma_addr_t buf_dma;
15227 u32 *buf, saved_dma_rwctrl;
15228 int ret = 0;
15230 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15231 &buf_dma, GFP_KERNEL);
15232 if (!buf) {
15233 ret = -ENOMEM;
15234 goto out_nofree;
15237 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15238 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15240 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15242 if (tg3_flag(tp, 57765_PLUS))
15243 goto out;
15245 if (tg3_flag(tp, PCI_EXPRESS)) {
15246 /* DMA read watermark not used on PCIE */
15247 tp->dma_rwctrl |= 0x00180000;
15248 } else if (!tg3_flag(tp, PCIX_MODE)) {
15249 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15250 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15251 tp->dma_rwctrl |= 0x003f0000;
15252 else
15253 tp->dma_rwctrl |= 0x003f000f;
15254 } else {
15255 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15256 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15257 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15258 u32 read_water = 0x7;
15260 /* If the 5704 is behind the EPB bridge, we can
15261 * do the less restrictive ONE_DMA workaround for
15262 * better performance.
15264 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15265 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15266 tp->dma_rwctrl |= 0x8000;
15267 else if (ccval == 0x6 || ccval == 0x7)
15268 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15270 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15271 read_water = 4;
15272 /* Set bit 23 to enable PCIX hw bug fix */
15273 tp->dma_rwctrl |=
15274 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15275 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15276 (1 << 23);
15277 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15278 /* 5780 always in PCIX mode */
15279 tp->dma_rwctrl |= 0x00144000;
15280 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15281 /* 5714 always in PCIX mode */
15282 tp->dma_rwctrl |= 0x00148000;
15283 } else {
15284 tp->dma_rwctrl |= 0x001b000f;
15288 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15289 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15290 tp->dma_rwctrl &= 0xfffffff0;
15292 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15293 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15294 /* Remove this if it causes problems for some boards. */
15295 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15297 /* On 5700/5701 chips, we need to set this bit.
15298 * Otherwise the chip will issue cacheline transactions
15299 * to streamable DMA memory with not all the byte
15300 * enables turned on. This is an error on several
15301 * RISC PCI controllers, in particular sparc64.
15303 * On 5703/5704 chips, this bit has been reassigned
15304 * a different meaning. In particular, it is used
15305 * on those chips to enable a PCI-X workaround.
15307 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15310 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15312 #if 0
15313 /* Unneeded, already done by tg3_get_invariants. */
15314 tg3_switch_clocks(tp);
15315 #endif
15317 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15318 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15319 goto out;
15321 /* It is best to perform DMA test with maximum write burst size
15322 * to expose the 5700/5701 write DMA bug.
15324 saved_dma_rwctrl = tp->dma_rwctrl;
15325 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15326 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15328 while (1) {
15329 u32 *p = buf, i;
15331 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15332 p[i] = i;
15334 /* Send the buffer to the chip. */
15335 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15336 if (ret) {
15337 dev_err(&tp->pdev->dev,
15338 "%s: Buffer write failed. err = %d\n",
15339 __func__, ret);
15340 break;
15343 #if 0
15344 /* validate data reached card RAM correctly. */
15345 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15346 u32 val;
15347 tg3_read_mem(tp, 0x2100 + (i*4), &val);
15348 if (le32_to_cpu(val) != p[i]) {
15349 dev_err(&tp->pdev->dev,
15350 "%s: Buffer corrupted on device! "
15351 "(%d != %d)\n", __func__, val, i);
15352 /* ret = -ENODEV here? */
15354 p[i] = 0;
15356 #endif
15357 /* Now read it back. */
15358 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15359 if (ret) {
15360 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15361 "err = %d\n", __func__, ret);
15362 break;
15365 /* Verify it. */
15366 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15367 if (p[i] == i)
15368 continue;
15370 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15371 DMA_RWCTRL_WRITE_BNDRY_16) {
15372 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15373 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15374 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15375 break;
15376 } else {
15377 dev_err(&tp->pdev->dev,
15378 "%s: Buffer corrupted on read back! "
15379 "(%d != %d)\n", __func__, p[i], i);
15380 ret = -ENODEV;
15381 goto out;
15385 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15386 /* Success. */
15387 ret = 0;
15388 break;
15391 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15392 DMA_RWCTRL_WRITE_BNDRY_16) {
15393 /* DMA test passed without adjusting DMA boundary,
15394 * now look for chipsets that are known to expose the
15395 * DMA bug without failing the test.
15397 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15398 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15399 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15400 } else {
15401 /* Safe to use the calculated DMA boundary. */
15402 tp->dma_rwctrl = saved_dma_rwctrl;
15405 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15408 out:
15409 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15410 out_nofree:
15411 return ret;
15414 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15416 if (tg3_flag(tp, 57765_PLUS)) {
15417 tp->bufmgr_config.mbuf_read_dma_low_water =
15418 DEFAULT_MB_RDMA_LOW_WATER_5705;
15419 tp->bufmgr_config.mbuf_mac_rx_low_water =
15420 DEFAULT_MB_MACRX_LOW_WATER_57765;
15421 tp->bufmgr_config.mbuf_high_water =
15422 DEFAULT_MB_HIGH_WATER_57765;
15424 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15425 DEFAULT_MB_RDMA_LOW_WATER_5705;
15426 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15427 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15428 tp->bufmgr_config.mbuf_high_water_jumbo =
15429 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15430 } else if (tg3_flag(tp, 5705_PLUS)) {
15431 tp->bufmgr_config.mbuf_read_dma_low_water =
15432 DEFAULT_MB_RDMA_LOW_WATER_5705;
15433 tp->bufmgr_config.mbuf_mac_rx_low_water =
15434 DEFAULT_MB_MACRX_LOW_WATER_5705;
15435 tp->bufmgr_config.mbuf_high_water =
15436 DEFAULT_MB_HIGH_WATER_5705;
15437 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15438 tp->bufmgr_config.mbuf_mac_rx_low_water =
15439 DEFAULT_MB_MACRX_LOW_WATER_5906;
15440 tp->bufmgr_config.mbuf_high_water =
15441 DEFAULT_MB_HIGH_WATER_5906;
15444 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15445 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15446 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15447 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15448 tp->bufmgr_config.mbuf_high_water_jumbo =
15449 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15450 } else {
15451 tp->bufmgr_config.mbuf_read_dma_low_water =
15452 DEFAULT_MB_RDMA_LOW_WATER;
15453 tp->bufmgr_config.mbuf_mac_rx_low_water =
15454 DEFAULT_MB_MACRX_LOW_WATER;
15455 tp->bufmgr_config.mbuf_high_water =
15456 DEFAULT_MB_HIGH_WATER;
15458 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15459 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15460 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15461 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15462 tp->bufmgr_config.mbuf_high_water_jumbo =
15463 DEFAULT_MB_HIGH_WATER_JUMBO;
15466 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15467 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15470 static char * __devinit tg3_phy_string(struct tg3 *tp)
15472 switch (tp->phy_id & TG3_PHY_ID_MASK) {
15473 case TG3_PHY_ID_BCM5400: return "5400";
15474 case TG3_PHY_ID_BCM5401: return "5401";
15475 case TG3_PHY_ID_BCM5411: return "5411";
15476 case TG3_PHY_ID_BCM5701: return "5701";
15477 case TG3_PHY_ID_BCM5703: return "5703";
15478 case TG3_PHY_ID_BCM5704: return "5704";
15479 case TG3_PHY_ID_BCM5705: return "5705";
15480 case TG3_PHY_ID_BCM5750: return "5750";
15481 case TG3_PHY_ID_BCM5752: return "5752";
15482 case TG3_PHY_ID_BCM5714: return "5714";
15483 case TG3_PHY_ID_BCM5780: return "5780";
15484 case TG3_PHY_ID_BCM5755: return "5755";
15485 case TG3_PHY_ID_BCM5787: return "5787";
15486 case TG3_PHY_ID_BCM5784: return "5784";
15487 case TG3_PHY_ID_BCM5756: return "5722/5756";
15488 case TG3_PHY_ID_BCM5906: return "5906";
15489 case TG3_PHY_ID_BCM5761: return "5761";
15490 case TG3_PHY_ID_BCM5718C: return "5718C";
15491 case TG3_PHY_ID_BCM5718S: return "5718S";
15492 case TG3_PHY_ID_BCM57765: return "57765";
15493 case TG3_PHY_ID_BCM5719C: return "5719C";
15494 case TG3_PHY_ID_BCM5720C: return "5720C";
15495 case TG3_PHY_ID_BCM8002: return "8002/serdes";
15496 case 0: return "serdes";
15497 default: return "unknown";
15501 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15503 if (tg3_flag(tp, PCI_EXPRESS)) {
15504 strcpy(str, "PCI Express");
15505 return str;
15506 } else if (tg3_flag(tp, PCIX_MODE)) {
15507 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15509 strcpy(str, "PCIX:");
15511 if ((clock_ctrl == 7) ||
15512 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15513 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15514 strcat(str, "133MHz");
15515 else if (clock_ctrl == 0)
15516 strcat(str, "33MHz");
15517 else if (clock_ctrl == 2)
15518 strcat(str, "50MHz");
15519 else if (clock_ctrl == 4)
15520 strcat(str, "66MHz");
15521 else if (clock_ctrl == 6)
15522 strcat(str, "100MHz");
15523 } else {
15524 strcpy(str, "PCI:");
15525 if (tg3_flag(tp, PCI_HIGH_SPEED))
15526 strcat(str, "66MHz");
15527 else
15528 strcat(str, "33MHz");
15530 if (tg3_flag(tp, PCI_32BIT))
15531 strcat(str, ":32-bit");
15532 else
15533 strcat(str, ":64-bit");
15534 return str;
15537 static void __devinit tg3_init_coal(struct tg3 *tp)
15539 struct ethtool_coalesce *ec = &tp->coal;
15541 memset(ec, 0, sizeof(*ec));
15542 ec->cmd = ETHTOOL_GCOALESCE;
15543 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15544 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15545 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15546 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15547 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15548 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15549 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15550 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15551 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15553 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15554 HOSTCC_MODE_CLRTICK_TXBD)) {
15555 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15556 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15557 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15558 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15561 if (tg3_flag(tp, 5705_PLUS)) {
15562 ec->rx_coalesce_usecs_irq = 0;
15563 ec->tx_coalesce_usecs_irq = 0;
15564 ec->stats_block_coalesce_usecs = 0;
15568 static int __devinit tg3_init_one(struct pci_dev *pdev,
15569 const struct pci_device_id *ent)
15571 struct net_device *dev;
15572 struct tg3 *tp;
15573 int i, err, pm_cap;
15574 u32 sndmbx, rcvmbx, intmbx;
15575 char str[40];
15576 u64 dma_mask, persist_dma_mask;
15577 netdev_features_t features = 0;
15579 printk_once(KERN_INFO "%s\n", version);
15581 err = pci_enable_device(pdev);
15582 if (err) {
15583 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15584 return err;
15587 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15588 if (err) {
15589 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15590 goto err_out_disable_pdev;
15593 pci_set_master(pdev);
15595 /* Find power-management capability. */
15596 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15597 if (pm_cap == 0) {
15598 dev_err(&pdev->dev,
15599 "Cannot find Power Management capability, aborting\n");
15600 err = -EIO;
15601 goto err_out_free_res;
15604 err = pci_set_power_state(pdev, PCI_D0);
15605 if (err) {
15606 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15607 goto err_out_free_res;
15610 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15611 if (!dev) {
15612 err = -ENOMEM;
15613 goto err_out_power_down;
15616 SET_NETDEV_DEV(dev, &pdev->dev);
15618 tp = netdev_priv(dev);
15619 tp->pdev = pdev;
15620 tp->dev = dev;
15621 tp->pm_cap = pm_cap;
15622 tp->rx_mode = TG3_DEF_RX_MODE;
15623 tp->tx_mode = TG3_DEF_TX_MODE;
15624 tp->irq_sync = 1;
15626 if (tg3_debug > 0)
15627 tp->msg_enable = tg3_debug;
15628 else
15629 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15631 /* The word/byte swap controls here control register access byte
15632 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15633 * setting below.
15635 tp->misc_host_ctrl =
15636 MISC_HOST_CTRL_MASK_PCI_INT |
15637 MISC_HOST_CTRL_WORD_SWAP |
15638 MISC_HOST_CTRL_INDIR_ACCESS |
15639 MISC_HOST_CTRL_PCISTATE_RW;
15641 /* The NONFRM (non-frame) byte/word swap controls take effect
15642 * on descriptor entries, anything which isn't packet data.
15644 * The StrongARM chips on the board (one for tx, one for rx)
15645 * are running in big-endian mode.
15647 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15648 GRC_MODE_WSWAP_NONFRM_DATA);
15649 #ifdef __BIG_ENDIAN
15650 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15651 #endif
15652 spin_lock_init(&tp->lock);
15653 spin_lock_init(&tp->indirect_lock);
15654 INIT_WORK(&tp->reset_task, tg3_reset_task);
15656 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15657 if (!tp->regs) {
15658 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15659 err = -ENOMEM;
15660 goto err_out_free_dev;
15663 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15664 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15665 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15666 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15667 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15668 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15669 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15670 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15671 tg3_flag_set(tp, ENABLE_APE);
15672 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15673 if (!tp->aperegs) {
15674 dev_err(&pdev->dev,
15675 "Cannot map APE registers, aborting\n");
15676 err = -ENOMEM;
15677 goto err_out_iounmap;
15681 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15682 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15684 dev->ethtool_ops = &tg3_ethtool_ops;
15685 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15686 dev->netdev_ops = &tg3_netdev_ops;
15687 dev->irq = pdev->irq;
15689 err = tg3_get_invariants(tp);
15690 if (err) {
15691 dev_err(&pdev->dev,
15692 "Problem fetching invariants of chip, aborting\n");
15693 goto err_out_apeunmap;
15696 /* The EPB bridge inside 5714, 5715, and 5780 and any
15697 * device behind the EPB cannot support DMA addresses > 40-bit.
15698 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15699 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15700 * do DMA address check in tg3_start_xmit().
15702 if (tg3_flag(tp, IS_5788))
15703 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15704 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15705 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15706 #ifdef CONFIG_HIGHMEM
15707 dma_mask = DMA_BIT_MASK(64);
15708 #endif
15709 } else
15710 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15712 /* Configure DMA attributes. */
15713 if (dma_mask > DMA_BIT_MASK(32)) {
15714 err = pci_set_dma_mask(pdev, dma_mask);
15715 if (!err) {
15716 features |= NETIF_F_HIGHDMA;
15717 err = pci_set_consistent_dma_mask(pdev,
15718 persist_dma_mask);
15719 if (err < 0) {
15720 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15721 "DMA for consistent allocations\n");
15722 goto err_out_apeunmap;
15726 if (err || dma_mask == DMA_BIT_MASK(32)) {
15727 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15728 if (err) {
15729 dev_err(&pdev->dev,
15730 "No usable DMA configuration, aborting\n");
15731 goto err_out_apeunmap;
15735 tg3_init_bufmgr_config(tp);
15737 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15739 /* 5700 B0 chips do not support checksumming correctly due
15740 * to hardware bugs.
15742 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15743 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15745 if (tg3_flag(tp, 5755_PLUS))
15746 features |= NETIF_F_IPV6_CSUM;
15749 /* TSO is on by default on chips that support hardware TSO.
15750 * Firmware TSO on older chips gives lower performance, so it
15751 * is off by default, but can be enabled using ethtool.
15753 if ((tg3_flag(tp, HW_TSO_1) ||
15754 tg3_flag(tp, HW_TSO_2) ||
15755 tg3_flag(tp, HW_TSO_3)) &&
15756 (features & NETIF_F_IP_CSUM))
15757 features |= NETIF_F_TSO;
15758 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15759 if (features & NETIF_F_IPV6_CSUM)
15760 features |= NETIF_F_TSO6;
15761 if (tg3_flag(tp, HW_TSO_3) ||
15762 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15763 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15764 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15765 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15766 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15767 features |= NETIF_F_TSO_ECN;
15770 dev->features |= features;
15771 dev->vlan_features |= features;
15774 * Add loopback capability only for a subset of devices that support
15775 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15776 * loopback for the remaining devices.
15778 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15779 !tg3_flag(tp, CPMU_PRESENT))
15780 /* Add the loopback capability */
15781 features |= NETIF_F_LOOPBACK;
15783 dev->hw_features |= features;
15785 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15786 !tg3_flag(tp, TSO_CAPABLE) &&
15787 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15788 tg3_flag_set(tp, MAX_RXPEND_64);
15789 tp->rx_pending = 63;
15792 err = tg3_get_device_address(tp);
15793 if (err) {
15794 dev_err(&pdev->dev,
15795 "Could not obtain valid ethernet address, aborting\n");
15796 goto err_out_apeunmap;
15800 * Reset chip in case UNDI or EFI driver did not shutdown
15801 * DMA self test will enable WDMAC and we'll see (spurious)
15802 * pending DMA on the PCI bus at that point.
15804 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15805 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15806 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15807 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15810 err = tg3_test_dma(tp);
15811 if (err) {
15812 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15813 goto err_out_apeunmap;
15816 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15817 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15818 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15819 for (i = 0; i < tp->irq_max; i++) {
15820 struct tg3_napi *tnapi = &tp->napi[i];
15822 tnapi->tp = tp;
15823 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15825 tnapi->int_mbox = intmbx;
15826 if (i <= 4)
15827 intmbx += 0x8;
15828 else
15829 intmbx += 0x4;
15831 tnapi->consmbox = rcvmbx;
15832 tnapi->prodmbox = sndmbx;
15834 if (i)
15835 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15836 else
15837 tnapi->coal_now = HOSTCC_MODE_NOW;
15839 if (!tg3_flag(tp, SUPPORT_MSIX))
15840 break;
15843 * If we support MSIX, we'll be using RSS. If we're using
15844 * RSS, the first vector only handles link interrupts and the
15845 * remaining vectors handle rx and tx interrupts. Reuse the
15846 * mailbox values for the next iteration. The values we setup
15847 * above are still useful for the single vectored mode.
15849 if (!i)
15850 continue;
15852 rcvmbx += 0x8;
15854 if (sndmbx & 0x4)
15855 sndmbx -= 0x4;
15856 else
15857 sndmbx += 0xc;
15860 tg3_init_coal(tp);
15862 pci_set_drvdata(pdev, dev);
15864 if (tg3_flag(tp, 5717_PLUS)) {
15865 /* Resume a low-power mode */
15866 tg3_frob_aux_power(tp, false);
15869 tg3_timer_init(tp);
15871 err = register_netdev(dev);
15872 if (err) {
15873 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15874 goto err_out_apeunmap;
15877 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15878 tp->board_part_number,
15879 tp->pci_chip_rev_id,
15880 tg3_bus_string(tp, str),
15881 dev->dev_addr);
15883 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15884 struct phy_device *phydev;
15885 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15886 netdev_info(dev,
15887 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15888 phydev->drv->name, dev_name(&phydev->dev));
15889 } else {
15890 char *ethtype;
15892 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15893 ethtype = "10/100Base-TX";
15894 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15895 ethtype = "1000Base-SX";
15896 else
15897 ethtype = "10/100/1000Base-T";
15899 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15900 "(WireSpeed[%d], EEE[%d])\n",
15901 tg3_phy_string(tp), ethtype,
15902 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15903 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15906 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15907 (dev->features & NETIF_F_RXCSUM) != 0,
15908 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15909 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15910 tg3_flag(tp, ENABLE_ASF) != 0,
15911 tg3_flag(tp, TSO_CAPABLE) != 0);
15912 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15913 tp->dma_rwctrl,
15914 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15915 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15917 pci_save_state(pdev);
15919 return 0;
15921 err_out_apeunmap:
15922 if (tp->aperegs) {
15923 iounmap(tp->aperegs);
15924 tp->aperegs = NULL;
15927 err_out_iounmap:
15928 if (tp->regs) {
15929 iounmap(tp->regs);
15930 tp->regs = NULL;
15933 err_out_free_dev:
15934 free_netdev(dev);
15936 err_out_power_down:
15937 pci_set_power_state(pdev, PCI_D3hot);
15939 err_out_free_res:
15940 pci_release_regions(pdev);
15942 err_out_disable_pdev:
15943 pci_disable_device(pdev);
15944 pci_set_drvdata(pdev, NULL);
15945 return err;
15948 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15950 struct net_device *dev = pci_get_drvdata(pdev);
15952 if (dev) {
15953 struct tg3 *tp = netdev_priv(dev);
15955 if (tp->fw)
15956 release_firmware(tp->fw);
15958 tg3_reset_task_cancel(tp);
15960 if (tg3_flag(tp, USE_PHYLIB)) {
15961 tg3_phy_fini(tp);
15962 tg3_mdio_fini(tp);
15965 unregister_netdev(dev);
15966 if (tp->aperegs) {
15967 iounmap(tp->aperegs);
15968 tp->aperegs = NULL;
15970 if (tp->regs) {
15971 iounmap(tp->regs);
15972 tp->regs = NULL;
15974 free_netdev(dev);
15975 pci_release_regions(pdev);
15976 pci_disable_device(pdev);
15977 pci_set_drvdata(pdev, NULL);
15981 #ifdef CONFIG_PM_SLEEP
15982 static int tg3_suspend(struct device *device)
15984 struct pci_dev *pdev = to_pci_dev(device);
15985 struct net_device *dev = pci_get_drvdata(pdev);
15986 struct tg3 *tp = netdev_priv(dev);
15987 int err;
15989 if (!netif_running(dev))
15990 return 0;
15992 tg3_reset_task_cancel(tp);
15993 tg3_phy_stop(tp);
15994 tg3_netif_stop(tp);
15996 tg3_timer_stop(tp);
15998 tg3_full_lock(tp, 1);
15999 tg3_disable_ints(tp);
16000 tg3_full_unlock(tp);
16002 netif_device_detach(dev);
16004 tg3_full_lock(tp, 0);
16005 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16006 tg3_flag_clear(tp, INIT_COMPLETE);
16007 tg3_full_unlock(tp);
16009 err = tg3_power_down_prepare(tp);
16010 if (err) {
16011 int err2;
16013 tg3_full_lock(tp, 0);
16015 tg3_flag_set(tp, INIT_COMPLETE);
16016 err2 = tg3_restart_hw(tp, 1);
16017 if (err2)
16018 goto out;
16020 tg3_timer_start(tp);
16022 netif_device_attach(dev);
16023 tg3_netif_start(tp);
16025 out:
16026 tg3_full_unlock(tp);
16028 if (!err2)
16029 tg3_phy_start(tp);
16032 return err;
16035 static int tg3_resume(struct device *device)
16037 struct pci_dev *pdev = to_pci_dev(device);
16038 struct net_device *dev = pci_get_drvdata(pdev);
16039 struct tg3 *tp = netdev_priv(dev);
16040 int err;
16042 if (!netif_running(dev))
16043 return 0;
16045 netif_device_attach(dev);
16047 tg3_full_lock(tp, 0);
16049 tg3_flag_set(tp, INIT_COMPLETE);
16050 err = tg3_restart_hw(tp, 1);
16051 if (err)
16052 goto out;
16054 tg3_timer_start(tp);
16056 tg3_netif_start(tp);
16058 out:
16059 tg3_full_unlock(tp);
16061 if (!err)
16062 tg3_phy_start(tp);
16064 return err;
16067 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
16068 #define TG3_PM_OPS (&tg3_pm_ops)
16070 #else
16072 #define TG3_PM_OPS NULL
16074 #endif /* CONFIG_PM_SLEEP */
16077 * tg3_io_error_detected - called when PCI error is detected
16078 * @pdev: Pointer to PCI device
16079 * @state: The current pci connection state
16081 * This function is called after a PCI bus error affecting
16082 * this device has been detected.
16084 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16085 pci_channel_state_t state)
16087 struct net_device *netdev = pci_get_drvdata(pdev);
16088 struct tg3 *tp = netdev_priv(netdev);
16089 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16091 netdev_info(netdev, "PCI I/O error detected\n");
16093 rtnl_lock();
16095 if (!netif_running(netdev))
16096 goto done;
16098 tg3_phy_stop(tp);
16100 tg3_netif_stop(tp);
16102 tg3_timer_stop(tp);
16104 /* Want to make sure that the reset task doesn't run */
16105 tg3_reset_task_cancel(tp);
16107 netif_device_detach(netdev);
16109 /* Clean up software state, even if MMIO is blocked */
16110 tg3_full_lock(tp, 0);
16111 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16112 tg3_full_unlock(tp);
16114 done:
16115 if (state == pci_channel_io_perm_failure)
16116 err = PCI_ERS_RESULT_DISCONNECT;
16117 else
16118 pci_disable_device(pdev);
16120 rtnl_unlock();
16122 return err;
16126 * tg3_io_slot_reset - called after the pci bus has been reset.
16127 * @pdev: Pointer to PCI device
16129 * Restart the card from scratch, as if from a cold-boot.
16130 * At this point, the card has exprienced a hard reset,
16131 * followed by fixups by BIOS, and has its config space
16132 * set up identically to what it was at cold boot.
16134 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16136 struct net_device *netdev = pci_get_drvdata(pdev);
16137 struct tg3 *tp = netdev_priv(netdev);
16138 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16139 int err;
16141 rtnl_lock();
16143 if (pci_enable_device(pdev)) {
16144 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16145 goto done;
16148 pci_set_master(pdev);
16149 pci_restore_state(pdev);
16150 pci_save_state(pdev);
16152 if (!netif_running(netdev)) {
16153 rc = PCI_ERS_RESULT_RECOVERED;
16154 goto done;
16157 err = tg3_power_up(tp);
16158 if (err)
16159 goto done;
16161 rc = PCI_ERS_RESULT_RECOVERED;
16163 done:
16164 rtnl_unlock();
16166 return rc;
16170 * tg3_io_resume - called when traffic can start flowing again.
16171 * @pdev: Pointer to PCI device
16173 * This callback is called when the error recovery driver tells
16174 * us that its OK to resume normal operation.
16176 static void tg3_io_resume(struct pci_dev *pdev)
16178 struct net_device *netdev = pci_get_drvdata(pdev);
16179 struct tg3 *tp = netdev_priv(netdev);
16180 int err;
16182 rtnl_lock();
16184 if (!netif_running(netdev))
16185 goto done;
16187 tg3_full_lock(tp, 0);
16188 tg3_flag_set(tp, INIT_COMPLETE);
16189 err = tg3_restart_hw(tp, 1);
16190 tg3_full_unlock(tp);
16191 if (err) {
16192 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16193 goto done;
16196 netif_device_attach(netdev);
16198 tg3_timer_start(tp);
16200 tg3_netif_start(tp);
16202 tg3_phy_start(tp);
16204 done:
16205 rtnl_unlock();
16208 static struct pci_error_handlers tg3_err_handler = {
16209 .error_detected = tg3_io_error_detected,
16210 .slot_reset = tg3_io_slot_reset,
16211 .resume = tg3_io_resume
16214 static struct pci_driver tg3_driver = {
16215 .name = DRV_MODULE_NAME,
16216 .id_table = tg3_pci_tbl,
16217 .probe = tg3_init_one,
16218 .remove = __devexit_p(tg3_remove_one),
16219 .err_handler = &tg3_err_handler,
16220 .driver.pm = TG3_PM_OPS,
16223 static int __init tg3_init(void)
16225 return pci_register_driver(&tg3_driver);
16228 static void __exit tg3_cleanup(void)
16230 pci_unregister_driver(&tg3_driver);
16233 module_init(tg3_init);
16234 module_exit(tg3_cleanup);